summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.gitignore3
-rw-r--r--.mailmap4
-rw-r--r--CREDITS13
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio23
-rw-r--r--Documentation/ABI/testing/sysfs-platform-dell-laptop60
-rw-r--r--Documentation/DocBook/drm.tmpl434
-rw-r--r--Documentation/DocBook/media/v4l/compat.xml12
-rw-r--r--Documentation/DocBook/media/v4l/pixfmt.xml36
-rw-r--r--Documentation/DocBook/media/v4l/subdev-formats.xml18
-rw-r--r--Documentation/DocBook/media/v4l/v4l2.xml11
-rw-r--r--Documentation/devicetree/bindings/arm/gic-v3.txt39
-rw-r--r--Documentation/devicetree/bindings/arm/gic.txt53
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,sysirq.txt28
-rw-r--r--Documentation/devicetree/bindings/arm/samsung/exynos-adc.txt11
-rw-r--r--Documentation/devicetree/bindings/drm/imx/fsl-imx-drm.txt (renamed from Documentation/devicetree/bindings/staging/imx-drm/fsl-imx-drm.txt)0
-rw-r--r--Documentation/devicetree/bindings/drm/imx/hdmi.txt (renamed from Documentation/devicetree/bindings/staging/imx-drm/hdmi.txt)0
-rw-r--r--Documentation/devicetree/bindings/drm/imx/ldb.txt (renamed from Documentation/devicetree/bindings/staging/imx-drm/ldb.txt)0
-rw-r--r--Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt2
-rw-r--r--Documentation/devicetree/bindings/gpu/st,stih4xx.txt29
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-opal.txt37
-rw-r--r--Documentation/devicetree/bindings/iio/adc/qcom,spmi-iadc.txt46
-rw-r--r--Documentation/devicetree/bindings/iio/adc/rockchip-saradc.txt2
-rw-r--r--Documentation/devicetree/bindings/input/cap11xx.txt (renamed from Documentation/devicetree/bindings/input/cap1106.txt)26
-rw-r--r--Documentation/devicetree/bindings/input/elan_i2c.txt34
-rw-r--r--Documentation/devicetree/bindings/input/elants_i2c.txt33
-rw-r--r--Documentation/devicetree/bindings/input/gpio-keys.txt10
-rw-r--r--Documentation/devicetree/bindings/leds/leds-lp8860.txt29
-rw-r--r--Documentation/devicetree/bindings/media/rcar_vin.txt2
-rw-r--r--Documentation/devicetree/bindings/mtd/atmel-nand.txt6
-rw-r--r--Documentation/devicetree/bindings/mtd/diskonchip.txt15
-rw-r--r--Documentation/devicetree/bindings/mtd/gpio-control-nand.txt14
-rw-r--r--Documentation/devicetree/bindings/mtd/sunxi-nand.txt45
-rw-r--r--Documentation/devicetree/bindings/panel/auo,b116xw03.txt7
-rw-r--r--Documentation/devicetree/bindings/panel/hannstar,hsd070pww1.txt7
-rw-r--r--Documentation/devicetree/bindings/panel/hit,tx23d38vm0caa.txt7
-rw-r--r--Documentation/devicetree/bindings/panel/innolux,g121i1-l01.txt7
-rw-r--r--Documentation/devicetree/bindings/panel/sharp,lq101r1sx01.txt49
-rw-r--r--Documentation/devicetree/bindings/power_supply/gpio-charger.txt27
-rw-r--r--Documentation/devicetree/bindings/pwm/atmel-hlcdc-pwm.txt29
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm-bcm2835.txt30
-rw-r--r--Documentation/devicetree/bindings/thermal/armada-thermal.txt8
-rw-r--r--Documentation/devicetree/bindings/thermal/rockchip-thermal.txt68
-rw-r--r--Documentation/devicetree/bindings/thermal/tegra-soctherm.txt53
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt3
-rw-r--r--Documentation/devicetree/bindings/video/adi,adv7511.txt88
-rw-r--r--Documentation/devicetree/bindings/video/exynos_dsim.txt1
-rw-r--r--Documentation/devicetree/bindings/video/rockchip-drm.txt19
-rw-r--r--Documentation/devicetree/bindings/video/rockchip-vop.txt58
-rw-r--r--Documentation/devicetree/bindings/video/samsung-fimd.txt1
-rw-r--r--Documentation/ia64/kvm.txt83
-rw-r--r--Documentation/kernel-parameters.txt27
-rw-r--r--Documentation/networking/fib_trie.txt4
-rw-r--r--Documentation/video4linux/vivid.txt15
-rw-r--r--Documentation/virtual/kvm/api.txt102
-rw-r--r--Documentation/virtual/kvm/devices/vm.txt10
-rw-r--r--Documentation/virtual/kvm/msr.txt2
-rw-r--r--Documentation/x86/intel_mpx.txt18
-rw-r--r--MAINTAINERS71
-rw-r--r--Makefile17
-rw-r--r--arch/arc/Kconfig1
-rw-r--r--arch/arc/Makefile2
-rw-r--r--arch/arc/boot/dts/nsimosci.dts18
-rw-r--r--arch/arc/configs/fpga_noramfs_defconfig63
-rw-r--r--arch/arc/configs/nsim_700_defconfig (renamed from arch/arc/configs/fpga_defconfig)0
-rw-r--r--arch/arc/include/asm/irqflags.h9
-rw-r--r--arch/arc/kernel/smp.c2
-rw-r--r--arch/arm/boot/dts/am437x-sk-evm.dts15
-rw-r--r--arch/arm/boot/dts/armada-375.dtsi11
-rw-r--r--arch/arm/boot/dts/at91-sama5d4ek.dts4
-rw-r--r--arch/arm/boot/dts/at91sam9260.dtsi14
-rw-r--r--arch/arm/boot/dts/at91sam9261.dtsi14
-rw-r--r--arch/arm/boot/dts/at91sam9263.dtsi21
-rw-r--r--arch/arm/boot/dts/at91sam9g20ek_common.dtsi14
-rw-r--r--arch/arm/boot/dts/at91sam9g45.dtsi46
-rw-r--r--arch/arm/boot/dts/at91sam9m10g45ek.dts9
-rw-r--r--arch/arm/boot/dts/at91sam9rl.dtsi21
-rw-r--r--arch/arm/boot/dts/dra7-evm.dts4
-rw-r--r--arch/arm/boot/dts/dra7.dtsi2
-rw-r--r--arch/arm/boot/dts/dra72-evm.dts2
-rw-r--r--arch/arm/boot/dts/dra7xx-clocks.dtsi6
-rw-r--r--arch/arm/boot/dts/exynos3250.dtsi3
-rw-r--r--arch/arm/boot/dts/exynos4x12.dtsi3
-rw-r--r--arch/arm/boot/dts/exynos5250.dtsi3
-rw-r--r--arch/arm/boot/dts/exynos5420.dtsi3
-rw-r--r--arch/arm/boot/dts/omap2430-sdp.dts32
-rw-r--r--arch/arm/boot/dts/sama5d4.dtsi75
-rw-r--r--arch/arm/boot/dts/tegra114.dtsi23
-rw-r--r--arch/arm/boot/dts/tegra124-jetson-tk1.dts44
-rw-r--r--arch/arm/boot/dts/tegra124.dtsi66
-rw-r--r--arch/arm/boot/dts/tegra30.dtsi25
-rw-r--r--arch/arm/configs/ape6evm_defconfig2
-rw-r--r--arch/arm/configs/armadillo800eva_defconfig2
-rw-r--r--arch/arm/configs/bcm_defconfig2
-rw-r--r--arch/arm/configs/bockw_defconfig2
-rw-r--r--arch/arm/configs/davinci_all_defconfig2
-rw-r--r--arch/arm/configs/exynos_defconfig2
-rw-r--r--arch/arm/configs/ezx_defconfig1
-rw-r--r--arch/arm/configs/hisi_defconfig2
-rw-r--r--arch/arm/configs/imote2_defconfig1
-rw-r--r--arch/arm/configs/imx_v6_v7_defconfig2
-rw-r--r--arch/arm/configs/keystone_defconfig2
-rw-r--r--arch/arm/configs/kzm9g_defconfig2
-rw-r--r--arch/arm/configs/lager_defconfig2
-rw-r--r--arch/arm/configs/mackerel_defconfig1
-rw-r--r--arch/arm/configs/marzen_defconfig2
-rw-r--r--arch/arm/configs/multi_v7_defconfig2
-rw-r--r--arch/arm/configs/omap1_defconfig1
-rw-r--r--arch/arm/configs/omap2plus_defconfig2
-rw-r--r--arch/arm/configs/prima2_defconfig2
-rw-r--r--arch/arm/configs/sama5_defconfig2
-rw-r--r--arch/arm/configs/shmobile_defconfig5
-rw-r--r--arch/arm/configs/sunxi_defconfig2
-rw-r--r--arch/arm/configs/tegra_defconfig2
-rw-r--r--arch/arm/configs/u8500_defconfig2
-rw-r--r--arch/arm/configs/vt8500_v6_v7_defconfig2
-rw-r--r--arch/arm/include/asm/dma-mapping.h13
-rw-r--r--arch/arm/include/asm/kvm_emulate.h5
-rw-r--r--arch/arm/include/asm/kvm_host.h2
-rw-r--r--arch/arm/include/asm/kvm_mmu.h6
-rw-r--r--arch/arm/kernel/perf_event.c2
-rw-r--r--arch/arm/kernel/setup.c2
-rw-r--r--arch/arm/kvm/arm.c78
-rw-r--r--arch/arm/kvm/guest.c26
-rw-r--r--arch/arm/kvm/mmio.c15
-rw-r--r--arch/arm/kvm/mmu.c92
-rw-r--r--arch/arm/kvm/psci.c18
-rw-r--r--arch/arm/mach-davinci/pm_domain.c2
-rw-r--r--arch/arm/mach-exynos/Kconfig2
-rw-r--r--arch/arm/mach-keystone/pm_domain.c2
-rw-r--r--arch/arm/mach-omap1/pm_bus.c4
-rw-r--r--arch/arm/mach-omap2/Kconfig6
-rw-r--r--arch/arm/mach-omap2/clock.h2
-rw-r--r--arch/arm/mach-omap2/id.c8
-rw-r--r--arch/arm/mach-omap2/io.c2
-rw-r--r--arch/arm/mach-omap2/omap_device.c2
-rw-r--r--arch/arm/mach-omap2/soc.h1
-rw-r--r--arch/arm/mach-shmobile/board-lager.c58
-rw-r--r--arch/arm/mach-shmobile/board-marzen.c58
-rw-r--r--arch/arm/mm/dma-mapping.c84
-rw-r--r--arch/arm64/Kconfig2
-rw-r--r--arch/arm64/include/asm/Kbuild1
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h5
-rw-r--r--arch/arm64/include/asm/kvm_host.h3
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h6
-rw-r--r--arch/arm64/kernel/psci.c2
-rw-r--r--arch/arm64/kvm/guest.c26
-rw-r--r--arch/arm64/mm/dump.c5
-rw-r--r--arch/cris/arch-v10/lib/usercopy.c14
-rw-r--r--arch/cris/arch-v32/drivers/Kconfig8
-rw-r--r--arch/cris/arch-v32/drivers/Makefile1
-rw-r--r--arch/cris/arch-v32/drivers/i2c.h1
-rw-r--r--arch/cris/arch-v32/drivers/sync_serial.c1430
-rw-r--r--arch/cris/arch-v32/kernel/debugport.c82
-rw-r--r--arch/cris/arch-v32/kernel/time.c29
-rw-r--r--arch/cris/arch-v32/lib/usercopy.c15
-rw-r--r--arch/cris/arch-v32/mach-fs/pinmux.c152
-rw-r--r--arch/cris/include/arch-v32/mach-fs/mach/pinmux.h2
-rw-r--r--arch/cris/include/asm/Kbuild4
-rw-r--r--arch/cris/include/uapi/asm/Kbuild4
-rw-r--r--arch/cris/kernel/crisksyms.c9
-rw-r--r--arch/cris/kernel/traps.c61
-rw-r--r--arch/cris/mm/init.c38
-rw-r--r--arch/cris/mm/ioremap.c3
-rw-r--r--arch/hexagon/include/asm/cache.h4
-rw-r--r--arch/hexagon/include/asm/cacheflush.h36
-rw-r--r--arch/hexagon/include/asm/io.h5
-rw-r--r--arch/hexagon/kernel/setup.c1
-rw-r--r--arch/hexagon/kernel/traps.c4
-rw-r--r--arch/hexagon/kernel/vmlinux.lds.S4
-rw-r--r--arch/hexagon/mm/cache.c10
-rw-r--r--arch/hexagon/mm/ioremap.c1
-rw-r--r--arch/ia64/Kconfig5
-rw-r--r--arch/ia64/Makefile1
-rw-r--r--arch/ia64/include/asm/kvm_host.h609
-rw-r--r--arch/ia64/include/asm/percpu.h4
-rw-r--r--arch/ia64/include/asm/pvclock-abi.h48
-rw-r--r--arch/ia64/include/uapi/asm/kvm.h268
-rw-r--r--arch/ia64/kernel/perfmon.c10
-rw-r--r--arch/ia64/kvm/Kconfig66
-rw-r--r--arch/ia64/kvm/Makefile67
-rw-r--r--arch/ia64/kvm/asm-offsets.c241
-rw-r--r--arch/ia64/kvm/irq.h33
-rw-r--r--arch/ia64/kvm/kvm-ia64.c1942
-rw-r--r--arch/ia64/kvm/kvm_fw.c674
-rw-r--r--arch/ia64/kvm/kvm_lib.c21
-rw-r--r--arch/ia64/kvm/kvm_minstate.h266
-rw-r--r--arch/ia64/kvm/lapic.h30
-rw-r--r--arch/ia64/kvm/memcpy.S1
-rw-r--r--arch/ia64/kvm/memset.S1
-rw-r--r--arch/ia64/kvm/misc.h94
-rw-r--r--arch/ia64/kvm/mmio.c336
-rw-r--r--arch/ia64/kvm/optvfault.S1090
-rw-r--r--arch/ia64/kvm/process.c1024
-rw-r--r--arch/ia64/kvm/trampoline.S1038
-rw-r--r--arch/ia64/kvm/vcpu.c2209
-rw-r--r--arch/ia64/kvm/vcpu.h752
-rw-r--r--arch/ia64/kvm/vmm.c99
-rw-r--r--arch/ia64/kvm/vmm_ivt.S1392
-rw-r--r--arch/ia64/kvm/vti.h290
-rw-r--r--arch/ia64/kvm/vtlb.c640
-rw-r--r--arch/microblaze/include/asm/pgtable.h1
-rw-r--r--arch/microblaze/kernel/dma.c27
-rw-r--r--arch/microblaze/mm/consistent.c25
-rw-r--r--arch/mips/configs/db1xxx_defconfig2
-rw-r--r--arch/mips/configs/lemote2f_defconfig1
-rw-r--r--arch/mips/configs/loongson3_defconfig2
-rw-r--r--arch/mips/configs/nlm_xlp_defconfig2
-rw-r--r--arch/mips/configs/nlm_xlr_defconfig2
-rw-r--r--arch/nios2/Makefile2
-rw-r--r--arch/nios2/include/asm/io.h2
-rw-r--r--arch/nios2/include/asm/uaccess.h4
-rw-r--r--arch/powerpc/configs/ps3_defconfig2
-rw-r--r--arch/powerpc/include/asm/cpuidle.h20
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h2
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64.h3
-rw-r--r--arch/powerpc/include/asm/kvm_host.h18
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h2
-rw-r--r--arch/powerpc/include/asm/opal.h42
-rw-r--r--arch/powerpc/include/asm/paca.h10
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h2
-rw-r--r--arch/powerpc/include/asm/processor.h3
-rw-r--r--arch/powerpc/include/asm/reg.h4
-rw-r--r--arch/powerpc/include/asm/syscall.h6
-rw-r--r--arch/powerpc/include/asm/uaccess.h6
-rw-r--r--arch/powerpc/kernel/asm-offsets.c13
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S35
-rw-r--r--arch/powerpc/kernel/idle_power7.S344
-rw-r--r--arch/powerpc/kernel/smp.c9
-rw-r--r--arch/powerpc/kvm/Kconfig1
-rw-r--r--arch/powerpc/kvm/book3s.c8
-rw-r--r--arch/powerpc/kvm/book3s_32_mmu.c5
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c224
-rw-r--r--arch/powerpc/kvm/book3s_hv.c438
-rw-r--r--arch/powerpc/kvm/book3s_hv_builtin.c136
-rw-r--r--arch/powerpc/kvm/book3s_hv_interrupts.S39
-rw-r--r--arch/powerpc/kvm/book3s_hv_ras.c5
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c150
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_xics.c36
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S251
-rw-r--r--arch/powerpc/kvm/book3s_paired_singles.c8
-rw-r--r--arch/powerpc/kvm/book3s_pr.c5
-rw-r--r--arch/powerpc/kvm/book3s_xics.c30
-rw-r--r--arch/powerpc/kvm/book3s_xics.h1
-rw-r--r--arch/powerpc/kvm/e500.c10
-rw-r--r--arch/powerpc/kvm/powerpc.c10
-rw-r--r--arch/powerpc/kvm/trace_book3s.h32
-rw-r--r--arch/powerpc/kvm/trace_booke.h47
-rw-r--r--arch/powerpc/kvm/trace_hv.h477
-rw-r--r--arch/powerpc/kvm/trace_pr.h25
-rw-r--r--arch/powerpc/perf/hv-24x7.c23
-rw-r--r--arch/powerpc/platforms/powernv/opal-wrappers.S39
-rw-r--r--arch/powerpc/platforms/powernv/opal.c50
-rw-r--r--arch/powerpc/platforms/powernv/powernv.h2
-rw-r--r--arch/powerpc/platforms/powernv/setup.c166
-rw-r--r--arch/powerpc/platforms/powernv/smp.c29
-rw-r--r--arch/powerpc/platforms/powernv/subcore.c34
-rw-r--r--arch/powerpc/platforms/powernv/subcore.h9
-rw-r--r--arch/s390/include/asm/kvm_host.h99
-rw-r--r--arch/s390/include/asm/pgalloc.h1
-rw-r--r--arch/s390/include/asm/sigp.h1
-rw-r--r--arch/s390/kernel/compat_linux.c2
-rw-r--r--arch/s390/kvm/gaccess.c40
-rw-r--r--arch/s390/kvm/intercept.c20
-rw-r--r--arch/s390/kvm/interrupt.c1044
-rw-r--r--arch/s390/kvm/kvm-s390.c22
-rw-r--r--arch/s390/kvm/kvm-s390.h11
-rw-r--r--arch/s390/kvm/priv.c95
-rw-r--r--arch/s390/kvm/sigp.c305
-rw-r--r--arch/s390/mm/pgtable.c41
-rw-r--r--arch/sh/Kconfig2
-rw-r--r--arch/sh/configs/apsh4ad0a_defconfig2
-rw-r--r--arch/sh/configs/sdk7786_defconfig2
-rw-r--r--arch/sparc/mm/srmmu.c11
-rw-r--r--arch/tile/gxio/mpipe.c4
-rw-r--r--arch/tile/include/asm/io.h5
-rw-r--r--arch/tile/include/asm/pgtable.h4
-rw-r--r--arch/tile/include/asm/pgtable_64.h2
-rw-r--r--arch/tile/include/uapi/asm/ptrace.h16
-rw-r--r--arch/tile/include/uapi/asm/sigcontext.h14
-rw-r--r--arch/tile/kernel/hardwall.c6
-rw-r--r--arch/tile/kernel/irq.c5
-rw-r--r--arch/tile/kernel/kgdb.c6
-rw-r--r--arch/tile/kernel/kprobes.c3
-rw-r--r--arch/tile/kernel/machine_kexec.c28
-rw-r--r--arch/tile/kernel/messaging.c5
-rw-r--r--arch/tile/kernel/module.c12
-rw-r--r--arch/tile/kernel/pci.c7
-rw-r--r--arch/tile/kernel/pci_gx.c95
-rw-r--r--arch/tile/kernel/process.c16
-rw-r--r--arch/tile/kernel/setup.c36
-rw-r--r--arch/tile/kernel/signal.c20
-rw-r--r--arch/tile/kernel/single_step.c6
-rw-r--r--arch/tile/kernel/smpboot.c5
-rw-r--r--arch/tile/kernel/stack.c7
-rw-r--r--arch/tile/kernel/time.c4
-rw-r--r--arch/tile/kernel/traps.c10
-rw-r--r--arch/tile/kernel/unaligned.c22
-rw-r--r--arch/tile/mm/fault.c34
-rw-r--r--arch/tile/mm/homecache.c6
-rw-r--r--arch/tile/mm/hugetlbpage.c18
-rw-r--r--arch/tile/mm/init.c32
-rw-r--r--arch/tile/mm/pgtable.c4
-rw-r--r--arch/x86/Kconfig36
-rw-r--r--arch/x86/include/asm/hw_irq.h84
-rw-r--r--arch/x86/include/asm/io_apic.h35
-rw-r--r--arch/x86/include/asm/irq_vectors.h6
-rw-r--r--arch/x86/include/asm/kvm_host.h37
-rw-r--r--arch/x86/include/asm/pci.h3
-rw-r--r--arch/x86/include/asm/pci_x86.h2
-rw-r--r--arch/x86/include/asm/pgtable_types.h1
-rw-r--r--arch/x86/include/asm/vmx.h3
-rw-r--r--arch/x86/include/asm/xen/page.h64
-rw-r--r--arch/x86/include/asm/xsave.h1
-rw-r--r--arch/x86/include/uapi/asm/ldt.h7
-rw-r--r--arch/x86/include/uapi/asm/vmx.h6
-rw-r--r--arch/x86/kernel/acpi/boot.c99
-rw-r--r--arch/x86/kernel/apic/Makefile4
-rw-r--r--arch/x86/kernel/apic/apic.c22
-rw-r--r--arch/x86/kernel/apic/htirq.c107
-rw-r--r--arch/x86/kernel/apic/io_apic.c1356
-rw-r--r--arch/x86/kernel/apic/msi.c286
-rw-r--r--arch/x86/kernel/apic/vector.c719
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.c22
-rw-r--r--arch/x86/kernel/crash.c1
-rw-r--r--arch/x86/kernel/early-quirks.c23
-rw-r--r--arch/x86/kernel/entry_32.S4
-rw-r--r--arch/x86/kernel/entry_64.S4
-rw-r--r--arch/x86/kernel/irqinit.c35
-rw-r--r--arch/x86/kernel/kvm.c9
-rw-r--r--arch/x86/kernel/kvmclock.c20
-rw-r--r--arch/x86/kernel/machine_kexec_32.c1
-rw-r--r--arch/x86/kernel/machine_kexec_64.c1
-rw-r--r--arch/x86/kernel/reboot.c1
-rw-r--r--arch/x86/kernel/smpboot.c8
-rw-r--r--arch/x86/kernel/tls.c6
-rw-r--r--arch/x86/kernel/traps.c2
-rw-r--r--arch/x86/kernel/xsave.c1
-rw-r--r--arch/x86/kvm/Makefile7
-rw-r--r--arch/x86/kvm/assigned-dev.c (renamed from virt/kvm/assigned-dev.c)30
-rw-r--r--arch/x86/kvm/assigned-dev.h32
-rw-r--r--arch/x86/kvm/cpuid.c57
-rw-r--r--arch/x86/kvm/emulate.c408
-rw-r--r--arch/x86/kvm/ioapic.c (renamed from virt/kvm/ioapic.c)12
-rw-r--r--arch/x86/kvm/ioapic.h (renamed from virt/kvm/ioapic.h)21
-rw-r--r--arch/x86/kvm/iommu.c (renamed from virt/kvm/iommu.c)11
-rw-r--r--arch/x86/kvm/irq_comm.c (renamed from virt/kvm/irq_comm.c)45
-rw-r--r--arch/x86/kvm/lapic.c210
-rw-r--r--arch/x86/kvm/lapic.h14
-rw-r--r--arch/x86/kvm/mmu.c7
-rw-r--r--arch/x86/kvm/svm.c24
-rw-r--r--arch/x86/kvm/trace.h37
-rw-r--r--arch/x86/kvm/vmx.c608
-rw-r--r--arch/x86/kvm/x86.c226
-rw-r--r--arch/x86/kvm/x86.h3
-rw-r--r--arch/x86/lguest/boot.c2
-rw-r--r--arch/x86/mm/fault.c64
-rw-r--r--arch/x86/mm/pageattr.c20
-rw-r--r--arch/x86/pci/intel_mid_pci.c10
-rw-r--r--arch/x86/pci/irq.c25
-rw-r--r--arch/x86/platform/uv/uv_irq.c6
-rw-r--r--arch/x86/xen/mmu.c40
-rw-r--r--arch/x86/xen/p2m.c1172
-rw-r--r--arch/x86/xen/setup.c441
-rw-r--r--arch/x86/xen/xen-ops.h6
-rw-r--r--arch/xtensa/Kconfig56
-rw-r--r--arch/xtensa/Kconfig.debug4
-rw-r--r--arch/xtensa/Makefile1
-rw-r--r--arch/xtensa/boot/boot-elf/boot.lds.S2
-rw-r--r--arch/xtensa/boot/boot-elf/bootstrap.S10
-rw-r--r--arch/xtensa/boot/boot-uboot/Makefile4
-rw-r--r--arch/xtensa/configs/iss_defconfig3
-rw-r--r--arch/xtensa/configs/s6105_defconfig615
-rw-r--r--arch/xtensa/include/asm/cacheflush.h7
-rw-r--r--arch/xtensa/include/asm/highmem.h2
-rw-r--r--arch/xtensa/include/asm/initialize_mmu.h40
-rw-r--r--arch/xtensa/include/asm/mmu_context.h4
-rw-r--r--arch/xtensa/include/asm/nommu_context.h4
-rw-r--r--arch/xtensa/include/asm/page.h12
-rw-r--r--arch/xtensa/include/asm/pgtable.h1
-rw-r--r--arch/xtensa/include/asm/uaccess.h4
-rw-r--r--arch/xtensa/include/asm/vectors.h7
-rw-r--r--arch/xtensa/include/uapi/asm/mman.h6
-rw-r--r--arch/xtensa/kernel/head.S5
-rw-r--r--arch/xtensa/kernel/syscall.c2
-rw-r--r--arch/xtensa/mm/Makefile4
-rw-r--r--arch/xtensa/mm/init.c19
-rw-r--r--arch/xtensa/platforms/s6105/Makefile3
-rw-r--r--arch/xtensa/platforms/s6105/device.c161
-rw-r--r--arch/xtensa/platforms/s6105/include/platform/gpio.h27
-rw-r--r--arch/xtensa/platforms/s6105/include/platform/hardware.h11
-rw-r--r--arch/xtensa/platforms/s6105/include/platform/serial.h8
-rw-r--r--arch/xtensa/platforms/s6105/setup.c73
-rw-r--r--arch/xtensa/platforms/xtfpga/include/platform/hardware.h4
-rw-r--r--arch/xtensa/variants/s6000/Makefile4
-rw-r--r--arch/xtensa/variants/s6000/delay.c25
-rw-r--r--arch/xtensa/variants/s6000/dmac.c173
-rw-r--r--arch/xtensa/variants/s6000/gpio.c230
-rw-r--r--arch/xtensa/variants/s6000/include/variant/core.h431
-rw-r--r--arch/xtensa/variants/s6000/include/variant/dmac.h387
-rw-r--r--arch/xtensa/variants/s6000/include/variant/gpio.h6
-rw-r--r--arch/xtensa/variants/s6000/include/variant/hardware.h259
-rw-r--r--arch/xtensa/variants/s6000/include/variant/irq.h8
-rw-r--r--arch/xtensa/variants/s6000/include/variant/tie-asm.h304
-rw-r--r--arch/xtensa/variants/s6000/include/variant/tie.h191
-rw-r--r--arch/xtensa/variants/s6000/irq.c74
-rw-r--r--block/blk-mq-tag.c4
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/acpi/blacklist.c54
-rw-r--r--drivers/acpi/device_pm.c12
-rw-r--r--drivers/acpi/ec.c2
-rw-r--r--drivers/acpi/fan.c8
-rw-r--r--drivers/acpi/pci_irq.c11
-rw-r--r--drivers/acpi/processor_core.c9
-rw-r--r--drivers/acpi/resource.c2
-rw-r--r--drivers/acpi/scan.c4
-rw-r--r--drivers/acpi/utils.c12
-rw-r--r--drivers/acpi/video.c10
-rw-r--r--drivers/android/Kconfig37
-rw-r--r--drivers/android/Makefile3
-rw-r--r--drivers/android/binder.c (renamed from drivers/staging/android/binder.c)6
-rw-r--r--drivers/android/binder_trace.h (renamed from drivers/staging/android/binder_trace.h)0
-rw-r--r--drivers/ata/Kconfig2
-rw-r--r--drivers/base/power/opp.c78
-rw-r--r--drivers/block/rbd.c11
-rw-r--r--drivers/bluetooth/ath3k.c2
-rw-r--r--drivers/bluetooth/btusb.c1
-rw-r--r--drivers/char/agp/intel-gtt.c4
-rw-r--r--drivers/cpufreq/intel_pstate.c38
-rw-r--r--drivers/cpufreq/longhaul.c4
-rw-r--r--drivers/cpufreq/powernow-k6.c2
-rw-r--r--drivers/cpufreq/powernow-k7.c3
-rw-r--r--drivers/cpufreq/speedstep-ich.c3
-rw-r--r--drivers/cpuidle/cpuidle-powernv.c10
-rw-r--r--drivers/firewire/core-device.c3
-rw-r--r--drivers/firewire/ohci.c6
-rw-r--r--drivers/firewire/sbp2.c67
-rw-r--r--drivers/gpu/drm/Kconfig6
-rw-r--r--drivers/gpu/drm/Makefile7
-rw-r--r--drivers/gpu/drm/README.drm43
-rw-r--r--drivers/gpu/drm/amd/amdkfd/Kconfig9
-rw-r--r--drivers/gpu/drm/amd/amdkfd/Makefile14
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cik_regs.h221
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c595
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.h294
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c308
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c1062
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h146
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c256
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c356
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c176
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c353
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h69
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_module.c159
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c346
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h91
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c565
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_pasid.c96
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers.h405
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_pm4_opcodes.h107
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h600
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c410
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c343
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_queue.c85
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c1235
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.h168
-rw-r--r--drivers/gpu/drm/amd/include/kgd_kfd_interface.h185
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c1
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c1
-rw-r--r--drivers/gpu/drm/bochs/bochs_fbdev.c18
-rw-r--r--drivers/gpu/drm/bochs/bochs_hw.c23
-rw-r--r--drivers/gpu/drm/bochs/bochs_kms.c22
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.h3
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_fbdev.c5
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_main.c40
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_mode.c1
-rw-r--r--drivers/gpu/drm/drm_atomic.c657
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c1966
-rw-r--r--drivers/gpu/drm/drm_crtc.c581
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c132
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c201
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c68
-rw-r--r--drivers/gpu/drm/drm_drv.c7
-rw-r--r--drivers/gpu/drm/drm_edid.c231
-rw-r--r--drivers/gpu/drm/drm_edid_load.c3
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c135
-rw-r--r--drivers/gpu/drm/drm_flip_work.c105
-rw-r--r--drivers/gpu/drm/drm_fops.c13
-rw-r--r--drivers/gpu/drm/drm_gem.c13
-rw-r--r--drivers/gpu/drm/drm_gem_cma_helper.c259
-rw-r--r--drivers/gpu/drm/drm_irq.c9
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c660
-rw-r--r--drivers/gpu/drm/drm_modes.c2
-rw-r--r--drivers/gpu/drm/drm_modeset_lock.c43
-rw-r--r--drivers/gpu/drm/drm_plane_helper.c203
-rw-r--r--drivers/gpu/drm/drm_prime.c6
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp_core.c132
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp_core.h5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.h5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dpi.c42
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c252
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h83
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c129
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_encoder.h2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c266
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_iommu.h1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c150
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c65
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c126
-rw-r--r--drivers/gpu/drm/gma500/Makefile1
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_dp.c195
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c75
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.h12
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_lvds.c31
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_lvds_i2c.c170
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c20
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h3
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.c1
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_drv.h1
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo.c49
-rw-r--r--drivers/gpu/drm/i2c/Kconfig6
-rw-r--r--drivers/gpu/drm/i2c/Makefile2
-rw-r--r--drivers/gpu/drm/i2c/adv7511.c1010
-rw-r--r--drivers/gpu/drm/i2c/adv7511.h289
-rw-r--r--drivers/gpu/drm/i915/Makefile13
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c39
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c270
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c1070
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c359
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h311
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c645
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c18
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c87
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c96
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h10
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c60
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c43
-rw-r--r--drivers/gpu/drm/i915/i915_ioc32.c2
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c1000
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h643
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c57
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c22
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h104
-rw-r--r--drivers/gpu/drm/i915/i915_ums.c14
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c463
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h10
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c4
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c719
-rw-r--r--drivers/gpu/drm/i915/intel_display.c2851
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c985
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c16
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h212
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c2
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c44
-rw-r--r--drivers/gpu/drm/i915/intel_fifo_underrun.c381
-rw-r--r--drivers/gpu/drm/i915/intel_frontbuffer.c279
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c120
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c338
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.h6
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c4
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c136
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c2653
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c481
-rw-r--r--drivers/gpu/drm/i915/intel_renderstate.h1
-rw-r--r--drivers/gpu/drm/i915/intel_renderstate_gen8.c792
-rw-r--r--drivers/gpu/drm/i915/intel_renderstate_gen9.c974
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c419
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h12
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c1406
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c47
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c605
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c9
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c495
-rw-r--r--drivers/gpu/drm/imx/Kconfig (renamed from drivers/staging/imx-drm/Kconfig)7
-rw-r--r--drivers/gpu/drm/imx/Makefile (renamed from drivers/staging/imx-drm/Makefile)0
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c (renamed from drivers/staging/imx-drm/imx-drm-core.c)6
-rw-r--r--drivers/gpu/drm/imx/imx-drm.h (renamed from drivers/staging/imx-drm/imx-drm.h)0
-rw-r--r--drivers/gpu/drm/imx/imx-hdmi.c (renamed from drivers/staging/imx-drm/imx-hdmi.c)0
-rw-r--r--drivers/gpu/drm/imx/imx-hdmi.h (renamed from drivers/staging/imx-drm/imx-hdmi.h)0
-rw-r--r--drivers/gpu/drm/imx/imx-ldb.c (renamed from drivers/staging/imx-drm/imx-ldb.c)5
-rw-r--r--drivers/gpu/drm/imx/imx-tve.c (renamed from drivers/staging/imx-drm/imx-tve.c)8
-rw-r--r--drivers/gpu/drm/imx/ipuv3-crtc.c (renamed from drivers/staging/imx-drm/ipuv3-crtc.c)5
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c (renamed from drivers/staging/imx-drm/ipuv3-plane.c)43
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.h (renamed from drivers/staging/imx-drm/ipuv3-plane.h)2
-rw-r--r--drivers/gpu/drm/imx/parallel-display.c (renamed from drivers/staging/imx-drm/parallel-display.c)13
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c1
-rw-r--r--drivers/gpu/drm/msm/Kconfig1
-rw-r--r--drivers/gpu/drm/msm/Makefile4
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx.xml.h26
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx.xml.h247
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c91
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx.xml.h2144
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.c604
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.h34
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_common.xml.h17
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c13
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c31
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h126
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h75
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.xml.h8
-rw-r--r--drivers/gpu/drm/msm/dsi/mmss_cc.xml.h8
-rw-r--r--drivers/gpu/drm/msm/dsi/sfpb.xml.h8
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c144
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.h17
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.xml.h8
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_bridge.c3
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_connector.c7
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c2
-rw-r--r--drivers/gpu/drm/msm/hdmi/qfprom.xml.h8
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h8
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c348
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c17
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h17
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c3
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c121
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h10
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c207
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h91
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c466
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c322
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h122
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c24
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c93
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c273
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h131
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c328
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c241
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h23
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c163
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c25
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h35
-rw-r--r--drivers/gpu/drm/msm/msm_fb.c45
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c3
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c40
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h13
-rw-r--r--drivers/gpu/drm/msm/msm_gem_prime.c13
-rw-r--r--drivers/gpu/drm/nouveau/Makefile18
-rw-r--r--drivers/gpu/drm/nouveau/core/core/handle.c113
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/base.c14
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/gm100.c43
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nve0.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/dport.c9
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/gm107.c16
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/gm204.c114
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv50.c94
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv50.h63
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv84.c40
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv94.c30
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nva0.c16
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nva3.c16
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c99
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nve0.c30
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c16
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/outp.c5
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/sorgm204.c144
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c48
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/device.h9
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/handle.h5
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/object.h17
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/disp.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/M0203.h31
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/i2c.h14
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/image.h13
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/npde.h12
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/pcir.h18
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/pmu.h37
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/ramcfg.h23
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/devinit.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/i2c.h3
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/pwr.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/volt.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/os.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/M0203.c129
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/base.c369
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c27
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/disp.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/dp.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/extdev.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/i2c.c45
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/image.c78
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/init.c56
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/npde.c59
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/pcir.c69
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/pmu.c135
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/priv.h25
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/ramcfg.c13
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/rammap.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/shadow.c270
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/shadowacpi.c111
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/shadowof.c71
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/shadowpci.c108
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/shadowramin.c112
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/shadowrom.c69
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/timing.c42
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/gk20a.c17
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/gm107.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/gm204.c173
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv05.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv1a.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv20.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv84.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv98.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nva3.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nvaf.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nvc0.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/base.c37
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/gddr3.c117
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramfuc.h16
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnva3.c813
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/sddr2.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/sddr3.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/base.c97
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/gm204.c221
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.h6
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/nv94.c13
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/nvd0.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/nve0.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/padgm204.c86
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/priv.h4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/memx.fuc111
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h738
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h863
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h828
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h754
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/os.h5
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/memx.c37
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/volt/base.c67
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/volt/gk20a.c199
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c5
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/overlay.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c26
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c196
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.h5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c30
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c248
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c36
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c15
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_platform.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_platform.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_prime.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv17_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c134
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv84_fence.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvif/class.h3
-rw-r--r--drivers/gpu/drm/nouveau/nvif/client.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvif/driver.h1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c3
-rw-r--r--drivers/gpu/drm/omapdrm/omap_plane.c15
-rw-r--r--drivers/gpu/drm/panel/Kconfig13
-rw-r--r--drivers/gpu/drm/panel/Makefile1
-rw-r--r--drivers/gpu/drm/panel/panel-ld9040.c13
-rw-r--r--drivers/gpu/drm/panel/panel-s6e8aa0.c30
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c464
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c133
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c36
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c3
-rw-r--r--drivers/gpu/drm/r128/r128_state.c4
-rw-r--r--drivers/gpu/drm/radeon/Makefile4
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c1
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c752
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.h8
-rw-r--r--drivers/gpu/drm/radeon/ci_smc.c2
-rw-r--r--drivers/gpu/drm/radeon/cik.c214
-rw-r--r--drivers/gpu/drm/radeon/cik_reg.h136
-rw-r--r--drivers/gpu/drm/radeon/cik_sdma.c42
-rw-r--r--drivers/gpu/drm/radeon/cikd.h93
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c14
-rw-r--r--drivers/gpu/drm/radeon/evergreen_dma.c18
-rw-r--r--drivers/gpu/drm/radeon/ni.c20
-rw-r--r--drivers/gpu/drm/radeon/ni_dma.c17
-rw-r--r--drivers/gpu/drm/radeon/ppsmc.h18
-rw-r--r--drivers/gpu/drm/radeon/pptable.h8
-rw-r--r--drivers/gpu/drm/radeon/r100.c10
-rw-r--r--drivers/gpu/drm/radeon/r200.c2
-rw-r--r--drivers/gpu/drm/radeon/r300.c6
-rw-r--r--drivers/gpu/drm/radeon/r600.c18
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c26
-rw-r--r--drivers/gpu/drm/radeon/r600_dma.c18
-rw-r--r--drivers/gpu/drm/radeon/r600_dpm.c9
-rw-r--r--drivers/gpu/drm/radeon/r600_dpm.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon.h162
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h18
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c21
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c121
-rw-r--r--drivers/gpu/drm/radeon/radeon_cursor.c268
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c32
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c32
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c92
-rw-r--r--drivers/gpu/drm/radeon/radeon_ib.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_kfd.c563
-rw-r--r--drivers/gpu/drm/radeon/radeon_kfd.h47
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h20
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c83
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_semaphore.c154
-rw-r--r--drivers/gpu/drm/radeon/radeon_sync.c220
-rw-r--r--drivers/gpu/drm/radeon/radeon_trace.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c27
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_vce.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c236
-rw-r--r--drivers/gpu/drm/radeon/rv770_dma.c18
-rw-r--r--drivers/gpu/drm/radeon/si.c24
-rw-r--r--drivers/gpu/drm/radeon/si_dma.c37
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c381
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.h5
-rw-r--r--drivers/gpu/drm/radeon/si_smc.c2
-rw-r--r--drivers/gpu/drm/radeon/sid.h40
-rw-r--r--drivers/gpu/drm/radeon/sislands_smc.h25
-rw-r--r--drivers/gpu/drm/radeon/smu7_discrete.h30
-rw-r--r--drivers/gpu/drm/rcar-du/Kconfig11
-rw-r--r--drivers/gpu/drm/rcar-du/Makefile2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.c3
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.h10
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.c4
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.h2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_encoder.c45
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_encoder.h23
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c121
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_hdmicon.h31
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c151
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_hdmienc.h35
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c57
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c31
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdscon.h2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h1
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vgacon.c5
-rw-r--r--drivers/gpu/drm/rockchip/Kconfig17
-rw-r--r--drivers/gpu/drm/rockchip/Makefile8
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c551
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.h68
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fb.c201
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fb.h28
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c210
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fbdev.h (renamed from drivers/staging/android/binder.h)23
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.c294
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.h54
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c1455
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.h201
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_crtc.c1
-rw-r--r--drivers/gpu/drm/sti/Kconfig1
-rw-r--r--drivers/gpu/drm/sti/Makefile4
-rw-r--r--drivers/gpu/drm/sti/sti_compositor.c20
-rw-r--r--drivers/gpu/drm/sti/sti_compositor.h2
-rw-r--r--drivers/gpu/drm/sti/sti_cursor.c242
-rw-r--r--drivers/gpu/drm/sti/sti_cursor.h12
-rw-r--r--drivers/gpu/drm/sti/sti_drm_crtc.c24
-rw-r--r--drivers/gpu/drm/sti/sti_drm_drv.c6
-rw-r--r--drivers/gpu/drm/sti/sti_drm_plane.c4
-rw-r--r--drivers/gpu/drm/sti/sti_gdp.c62
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c84
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.h6
-rw-r--r--drivers/gpu/drm/sti/sti_hqvdp.c1073
-rw-r--r--drivers/gpu/drm/sti/sti_hqvdp.h12
-rw-r--r--drivers/gpu/drm/sti/sti_hqvdp_lut.h373
-rw-r--r--drivers/gpu/drm/sti/sti_layer.c18
-rw-r--r--drivers/gpu/drm/sti/sti_layer.h12
-rw-r--r--drivers/gpu/drm/sti/sti_mixer.c17
-rw-r--r--drivers/gpu/drm/sti/sti_mixer.h3
-rw-r--r--drivers/gpu/drm/sti/sti_tvout.c104
-rw-r--r--drivers/gpu/drm/sti/sti_vtg.c31
-rw-r--r--drivers/gpu/drm/tegra/Kconfig1
-rw-r--r--drivers/gpu/drm/tegra/dc.c596
-rw-r--r--drivers/gpu/drm/tegra/drm.c46
-rw-r--r--drivers/gpu/drm/tegra/drm.h18
-rw-r--r--drivers/gpu/drm/tegra/dsi.c811
-rw-r--r--drivers/gpu/drm/tegra/dsi.h14
-rw-r--r--drivers/gpu/drm/tegra/fb.c52
-rw-r--r--drivers/gpu/drm/tegra/gem.c366
-rw-r--r--drivers/gpu/drm/tegra/gem.h14
-rw-r--r--drivers/gpu/drm/tegra/output.c35
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_crtc.c7
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_manager.c8
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c10
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c26
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc_dma.c25
-rw-r--r--drivers/gpu/drm/udl/Makefile2
-rw-r--r--drivers/gpu/drm/udl/udl_dmabuf.c276
-rw-r--r--drivers/gpu/drm/udl/udl_drv.c2
-rw-r--r--drivers/gpu/drm/udl/udl_drv.h8
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c97
-rw-r--r--drivers/gpu/drm/udl/udl_modeset.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c11
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c39
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c2
-rw-r--r--drivers/gpu/host1x/cdma.c2
-rw-r--r--drivers/gpu/host1x/cdma.h2
-rw-r--r--drivers/gpu/host1x/hw/cdma_hw.c10
-rw-r--r--drivers/gpu/host1x/hw/channel_hw.c12
-rw-r--r--drivers/gpu/host1x/hw/debug_hw.c4
-rw-r--r--drivers/gpu/host1x/job.h2
-rw-r--r--drivers/gpu/host1x/mipi.c148
-rw-r--r--drivers/hsi/clients/nokia-modem.c8
-rw-r--r--drivers/hsi/controllers/omap_ssi_port.c3
-rw-r--r--drivers/hwmon/lm75.c9
-rw-r--r--drivers/hwmon/ntc_thermistor.c6
-rw-r--r--drivers/hwmon/tmp102.c6
-rw-r--r--drivers/i2c/busses/Kconfig11
-rw-r--r--drivers/i2c/busses/Makefile1
-rw-r--r--drivers/i2c/busses/i2c-opal.c294
-rw-r--r--drivers/iio/accel/st_accel.h3
-rw-r--r--drivers/iio/accel/st_accel_core.c22
-rw-r--r--drivers/iio/accel/st_accel_i2c.c3
-rw-r--r--drivers/iio/accel/st_accel_spi.c3
-rw-r--r--drivers/iio/adc/Kconfig14
-rw-r--r--drivers/iio/adc/Makefile1
-rw-r--r--drivers/iio/adc/exynos_adc.c62
-rw-r--r--drivers/iio/adc/mcp320x.c222
-rw-r--r--drivers/iio/adc/qcom-spmi-iadc.c595
-rw-r--r--drivers/iio/adc/rockchip_saradc.c64
-rw-r--r--drivers/iio/adc/vf610_adc.c45
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_core.c126
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_i2c.c1
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_spi.c1
-rw-r--r--drivers/iio/gyro/st_gyro.h3
-rw-r--r--drivers/iio/gyro/st_gyro_core.c19
-rw-r--r--drivers/iio/gyro/st_gyro_i2c.c4
-rw-r--r--drivers/iio/gyro/st_gyro_spi.c4
-rw-r--r--drivers/iio/humidity/Kconfig10
-rw-r--r--drivers/iio/humidity/Makefile1
-rw-r--r--drivers/iio/humidity/si7020.c161
-rw-r--r--drivers/iio/inkern.c33
-rw-r--r--drivers/iio/magnetometer/st_magn.h3
-rw-r--r--drivers/iio/magnetometer/st_magn_core.c18
-rw-r--r--drivers/iio/magnetometer/st_magn_i2c.c3
-rw-r--r--drivers/iio/magnetometer/st_magn_spi.c3
-rw-r--r--drivers/iio/pressure/Kconfig11
-rw-r--r--drivers/iio/pressure/Makefile1
-rw-r--r--drivers/iio/pressure/bmp280.c455
-rw-r--r--drivers/iio/pressure/st_pressure.h3
-rw-r--r--drivers/iio/pressure/st_pressure_buffer.c12
-rw-r--r--drivers/iio/pressure/st_pressure_core.c49
-rw-r--r--drivers/iio/pressure/st_pressure_i2c.c11
-rw-r--r--drivers/iio/pressure/st_pressure_spi.c11
-rw-r--r--drivers/iio/proximity/as3935.c16
-rw-r--r--drivers/infiniband/Kconfig11
-rw-r--r--drivers/infiniband/core/Makefile1
-rw-r--r--drivers/infiniband/core/addr.c4
-rw-r--r--drivers/infiniband/core/multicast.c11
-rw-r--r--drivers/infiniband/core/umem.c72
-rw-r--r--drivers/infiniband/core/umem_odp.c668
-rw-r--r--drivers/infiniband/core/umem_rbtree.c94
-rw-r--r--drivers/infiniband/core/uverbs.h1
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c171
-rw-r--r--drivers/infiniband/core/uverbs_main.c5
-rw-r--r--drivers/infiniband/core/verbs.c3
-rw-r--r--drivers/infiniband/hw/amso1100/c2_provider.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c7
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c28
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_mrmw.c2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_mr.c2
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c1
-rw-r--r--drivers/infiniband/hw/mlx5/Makefile1
-rw-r--r--drivers/infiniband/hw/mlx5/main.c45
-rw-r--r--drivers/infiniband/hw/mlx5/mem.c69
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h116
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c323
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c798
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c197
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c6
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.c5
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_mr.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h19
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c18
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c27
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c49
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c239
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_verbs.c22
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c104
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h30
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c6
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c102
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c91
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c1599
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.h80
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c2
-rw-r--r--drivers/input/gameport/gameport.c4
-rw-r--r--drivers/input/input.c4
-rw-r--r--drivers/input/joystick/xpad.c8
-rw-r--r--drivers/input/keyboard/Kconfig8
-rw-r--r--drivers/input/keyboard/Makefile2
-rw-r--r--drivers/input/keyboard/amikbd.c47
-rw-r--r--drivers/input/keyboard/atkbd.c6
-rw-r--r--drivers/input/keyboard/cap1106.c341
-rw-r--r--drivers/input/keyboard/cap11xx.c376
-rw-r--r--drivers/input/keyboard/gpio_keys.c37
-rw-r--r--drivers/input/keyboard/lm8323.c2
-rw-r--r--drivers/input/keyboard/lpc32xx-keys.c92
-rw-r--r--drivers/input/keyboard/mpr121_touchkey.c42
-rw-r--r--drivers/input/keyboard/pxa27x_keypad.c84
-rw-r--r--drivers/input/misc/88pm860x_onkey.c6
-rw-r--r--drivers/input/misc/ad714x-i2c.c6
-rw-r--r--drivers/input/misc/ad714x-spi.c6
-rw-r--r--drivers/input/misc/adxl34x-i2c.c6
-rw-r--r--drivers/input/misc/adxl34x-spi.c6
-rw-r--r--drivers/input/misc/drv260x.c6
-rw-r--r--drivers/input/misc/drv2667.c6
-rw-r--r--drivers/input/misc/gp2ap002a00f.c6
-rw-r--r--drivers/input/misc/ims-pcu.c4
-rw-r--r--drivers/input/misc/kxtj9.c6
-rw-r--r--drivers/input/misc/max77693-haptic.c6
-rw-r--r--drivers/input/misc/max8925_onkey.c6
-rw-r--r--drivers/input/misc/max8997_haptic.c4
-rw-r--r--drivers/input/misc/palmas-pwrbutton.c6
-rw-r--r--drivers/input/misc/pm8xxx-vibrator.c4
-rw-r--r--drivers/input/misc/pmic8xxx-pwrkey.c6
-rw-r--r--drivers/input/misc/pwm-beeper.c6
-rw-r--r--drivers/input/misc/sirfsoc-onkey.c4
-rw-r--r--drivers/input/misc/twl4030-vibra.c6
-rw-r--r--drivers/input/misc/twl6040-vibra.c4
-rw-r--r--drivers/input/mouse/Kconfig30
-rw-r--r--drivers/input/mouse/Makefile5
-rw-r--r--drivers/input/mouse/cyapa.c289
-rw-r--r--drivers/input/mouse/elan_i2c.h86
-rw-r--r--drivers/input/mouse/elan_i2c_core.c1137
-rw-r--r--drivers/input/mouse/elan_i2c_i2c.c611
-rw-r--r--drivers/input/mouse/elan_i2c_smbus.c514
-rw-r--r--drivers/input/mouse/lifebook.h6
-rw-r--r--drivers/input/mouse/navpoint.c6
-rw-r--r--drivers/input/mouse/synaptics_i2c.c6
-rw-r--r--drivers/input/serio/altera_ps2.c81
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h10
-rw-r--r--drivers/input/serio/serio.c4
-rw-r--r--drivers/input/serio/serio_raw.c4
-rw-r--r--drivers/input/touchscreen/Kconfig25
-rw-r--r--drivers/input/touchscreen/Makefile2
-rw-r--r--drivers/input/touchscreen/ad7877.c6
-rw-r--r--drivers/input/touchscreen/ad7879.c6
-rw-r--r--drivers/input/touchscreen/ads7846.c6
-rw-r--r--drivers/input/touchscreen/atmel_mxt_ts.c6
-rw-r--r--drivers/input/touchscreen/auo-pixcir-ts.c6
-rw-r--r--drivers/input/touchscreen/cy8ctmg110_ts.c6
-rw-r--r--drivers/input/touchscreen/cyttsp_core.c7
-rw-r--r--drivers/input/touchscreen/edt-ft5x06.c6
-rw-r--r--drivers/input/touchscreen/eeti_ts.c6
-rw-r--r--drivers/input/touchscreen/egalax_ts.c6
-rw-r--r--drivers/input/touchscreen/elants_i2c.c1271
-rw-r--r--drivers/input/touchscreen/goodix.c395
-rw-r--r--drivers/input/touchscreen/ili210x.c6
-rw-r--r--drivers/input/touchscreen/ipaq-micro-ts.c6
-rw-r--r--drivers/input/touchscreen/mms114.c6
-rw-r--r--drivers/input/touchscreen/pixcir_i2c_ts.c6
-rw-r--r--drivers/input/touchscreen/st1232.c7
-rw-r--r--drivers/input/touchscreen/tsc2005.c6
-rw-r--r--drivers/input/touchscreen/ucb1400_ts.c6
-rw-r--r--drivers/input/touchscreen/wacom_i2c.c6
-rw-r--r--drivers/input/touchscreen/zforce_ts.c6
-rw-r--r--drivers/iommu/Kconfig4
-rw-r--r--drivers/iommu/amd_iommu.c14
-rw-r--r--drivers/iommu/amd_iommu_v2.c61
-rw-r--r--drivers/iommu/intel_irq_remapping.c10
-rw-r--r--drivers/iommu/iommu.c2
-rw-r--r--drivers/iommu/irq_remapping.c6
-rw-r--r--drivers/iommu/of_iommu.c89
-rw-r--r--drivers/irqchip/Kconfig12
-rw-r--r--drivers/irqchip/Makefile3
-rw-r--r--drivers/irqchip/irq-gic-v2m.c333
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c1425
-rw-r--r--drivers/irqchip/irq-gic-v3.c156
-rw-r--r--drivers/irqchip/irq-gic.c81
-rw-r--r--drivers/irqchip/irq-mtk-sysirq.c163
-rw-r--r--drivers/leds/Kconfig11
-rw-r--r--drivers/leds/Makefile1
-rw-r--r--drivers/leds/led-class.c29
-rw-r--r--drivers/leds/led-core.c36
-rw-r--r--drivers/leds/led-triggers.c16
-rw-r--r--drivers/leds/leds-gpio.c2
-rw-r--r--drivers/leds/leds-lp8860.c491
-rw-r--r--drivers/leds/leds-regulator.c18
-rw-r--r--drivers/leds/leds-syscon.c71
-rw-r--r--drivers/leds/leds.h20
-rw-r--r--drivers/leds/trigger/ledtrig-backlight.c8
-rw-r--r--drivers/leds/trigger/ledtrig-default-on.c2
-rw-r--r--drivers/leds/trigger/ledtrig-gpio.c6
-rw-r--r--drivers/leds/trigger/ledtrig-heartbeat.c2
-rw-r--r--drivers/leds/trigger/ledtrig-oneshot.c4
-rw-r--r--drivers/leds/trigger/ledtrig-transient.c10
-rw-r--r--drivers/media/Kconfig1
-rw-r--r--drivers/media/Makefile2
-rw-r--r--drivers/media/i2c/Kconfig9
-rw-r--r--drivers/media/i2c/Makefile1
-rw-r--r--drivers/media/pci/cx88/cx88-blackbird.c4
-rw-r--r--drivers/media/pci/cx88/cx88-dvb.c4
-rw-r--r--drivers/media/pci/cx88/cx88-mpeg.c11
-rw-r--r--drivers/media/pci/cx88/cx88-vbi.c9
-rw-r--r--drivers/media/pci/cx88/cx88-video.c18
-rw-r--r--drivers/media/pci/cx88/cx88.h2
-rw-r--r--drivers/media/platform/Kconfig10
-rw-r--r--drivers/media/platform/Makefile3
-rw-r--r--drivers/media/platform/s5p-tv/Kconfig2
-rw-r--r--drivers/media/platform/soc_camera/rcar_vin.c466
-rw-r--r--drivers/media/platform/vivid/vivid-vid-out.c2
-rw-r--r--drivers/media/usb/Kconfig1
-rw-r--r--drivers/media/usb/Makefile1
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c6
-rw-r--r--drivers/memory/fsl_ifc.c13
-rw-r--r--drivers/misc/cxl/context.c26
-rw-r--r--drivers/misc/cxl/cxl.h9
-rw-r--r--drivers/misc/cxl/file.c6
-rw-r--r--drivers/misc/cxl/native.c12
-rw-r--r--drivers/misc/cxl/pci.c2
-rw-r--r--drivers/misc/cxl/sysfs.c10
-rw-r--r--drivers/misc/mic/host/mic_debugfs.c18
-rw-r--r--drivers/mmc/host/atmel-mci.c2
-rw-r--r--drivers/mtd/Kconfig2
-rw-r--r--drivers/mtd/bcm47xxpart.c28
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c3
-rw-r--r--drivers/mtd/devices/docg3.c122
-rw-r--r--drivers/mtd/devices/m25p80.c30
-rw-r--r--drivers/mtd/devices/mtd_dataflash.c6
-rw-r--r--drivers/mtd/devices/phram.c2
-rw-r--r--drivers/mtd/devices/pmc551.c3
-rw-r--r--drivers/mtd/inftlmount.c2
-rw-r--r--drivers/mtd/maps/bfin-async-flash.c1
-rw-r--r--drivers/mtd/maps/physmap_of.c8
-rw-r--r--drivers/mtd/nand/Kconfig12
-rw-r--r--drivers/mtd/nand/Makefile1
-rw-r--r--drivers/mtd/nand/atmel_nand.c120
-rw-r--r--drivers/mtd/nand/atmel_nand_ecc.h4
-rw-r--r--drivers/mtd/nand/cafe_nand.c45
-rw-r--r--drivers/mtd/nand/fsl_ifc_nand.c10
-rw-r--r--drivers/mtd/nand/gpio.c4
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-lib.c153
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.c201
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.h6
-rw-r--r--drivers/mtd/nand/mxc_nand.c10
-rw-r--r--drivers/mtd/nand/nand_base.c12
-rw-r--r--drivers/mtd/nand/nand_ids.c1
-rw-r--r--drivers/mtd/nand/nandsim.c42
-rw-r--r--drivers/mtd/nand/omap2.c24
-rw-r--r--drivers/mtd/nand/orion_nand.c39
-rw-r--r--drivers/mtd/nand/sunxi_nand.c1432
-rw-r--r--drivers/mtd/spi-nor/fsl-quadspi.c23
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c313
-rw-r--r--drivers/mtd/tests/oobtest.c77
-rw-r--r--drivers/mtd/tests/torturetest.c4
-rw-r--r--drivers/net/dsa/Kconfig2
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h4
-rw-r--r--drivers/net/ethernet/cadence/macb.c25
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h2
-rw-r--r--drivers/net/ethernet/cirrus/cs89x0.c27
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c2
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c19
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c68
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qp.c119
-rw-r--r--drivers/net/ethernet/smsc/Kconfig2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c10
-rw-r--r--drivers/net/macvtap.c30
-rw-r--r--drivers/net/phy/Kconfig4
-rw-r--r--drivers/net/phy/Makefile2
-rw-r--r--drivers/net/phy/fixed_phy.c (renamed from drivers/net/phy/fixed.c)0
-rw-r--r--drivers/net/tun.c26
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/main.c2
-rw-r--r--drivers/net/wireless/hostap/hostap_cs.c15
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/hw.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/hw.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8821ae/dm.c11
-rw-r--r--drivers/net/wireless/zd1211rw/zd_chip.c6
-rw-r--r--drivers/net/xen-netback/common.h4
-rw-r--r--drivers/net/xen-netback/interface.c4
-rw-r--r--drivers/net/xen-netback/netback.c27
-rw-r--r--drivers/net/xen-netback/xenbus.c12
-rw-r--r--drivers/net/xen-netfront.c11
-rw-r--r--drivers/nfc/trf7970a.c2
-rw-r--r--drivers/of/platform.c50
-rw-r--r--drivers/pci/Kconfig9
-rw-r--r--drivers/pci/Makefile2
-rw-r--r--drivers/pci/hotplug/ibmphp_core.c6
-rw-r--r--drivers/pci/ioapic.c121
-rw-r--r--drivers/phy/phy-omap-usb2.c2
-rw-r--r--drivers/phy/phy-ti-pipe3.c2
-rw-r--r--drivers/platform/x86/Kconfig7
-rw-r--r--drivers/platform/x86/acerhdf.c265
-rw-r--r--drivers/platform/x86/asus-laptop.c3
-rw-r--r--drivers/platform/x86/asus-nb-wmi.c9
-rw-r--r--drivers/platform/x86/asus-wmi.c3
-rw-r--r--drivers/platform/x86/dell-laptop.c1057
-rw-r--r--drivers/platform/x86/dell-smo8800.c10
-rw-r--r--drivers/platform/x86/dell-wmi.c176
-rw-r--r--drivers/platform/x86/eeepc-laptop.c213
-rw-r--r--drivers/platform/x86/fujitsu-laptop.c6
-rw-r--r--drivers/platform/x86/hp-wireless.c3
-rw-r--r--drivers/platform/x86/hp_accel.c1
-rw-r--r--drivers/platform/x86/ideapad-laptop.c3
-rw-r--r--drivers/platform/x86/intel_ips.c2
-rw-r--r--drivers/platform/x86/intel_oaktrail.c3
-rw-r--r--drivers/platform/x86/msi-laptop.c2
-rw-r--r--drivers/platform/x86/msi-wmi.c3
-rw-r--r--drivers/platform/x86/sony-laptop.c6
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c116
-rw-r--r--drivers/platform/x86/toshiba_acpi.c166
-rw-r--r--drivers/power/ds2782_battery.c8
-rw-r--r--drivers/power/gpio-charger.c72
-rw-r--r--drivers/power/pm2301_charger.c4
-rw-r--r--drivers/power/reset/axxia-reset.c21
-rw-r--r--drivers/power/reset/brcmstb-reboot.c28
-rw-r--r--drivers/power/reset/hisi-reboot.c20
-rw-r--r--drivers/power/reset/keystone-reset.c20
-rw-r--r--drivers/power/reset/syscon-reboot.c2
-rw-r--r--drivers/power/reset/vexpress-poweroff.c40
-rw-r--r--drivers/power/reset/xgene-reboot.c56
-rw-r--r--drivers/pwm/Kconfig22
-rw-r--r--drivers/pwm/Makefile2
-rw-r--r--drivers/pwm/pwm-atmel-hlcdc.c299
-rw-r--r--drivers/pwm/pwm-bcm2835.c205
-rw-r--r--drivers/pwm/pwm-fsl-ftm.c64
-rw-r--r--drivers/rpmsg/virtio_rpmsg_bus.c44
-rw-r--r--drivers/scsi/scsi_pm.c10
-rw-r--r--drivers/scsi/scsi_priv.h5
-rw-r--r--drivers/scsi/ufs/ufshcd-pci.c11
-rw-r--r--drivers/scsi/ufs/ufshcd-pltfrm.c11
-rw-r--r--drivers/soc/tegra/fuse/fuse-tegra.c1
-rw-r--r--drivers/spi/spi-coldfire-qspi.c2
-rw-r--r--drivers/spi/spi-img-spfi.c4
-rw-r--r--drivers/spi/spi-meson-spifc.c4
-rw-r--r--drivers/spi/spi-orion.c2
-rw-r--r--drivers/spi/spi-pxa2xx.c2
-rw-r--r--drivers/spi/spi-qup.c4
-rw-r--r--drivers/spi/spi-rockchip.c4
-rw-r--r--drivers/spi/spi-s3c64xx.c4
-rw-r--r--drivers/staging/Kconfig6
-rw-r--r--drivers/staging/Makefile3
-rw-r--r--drivers/staging/android/Kconfig30
-rw-r--r--drivers/staging/android/Makefile1
-rw-r--r--drivers/staging/android/TODO7
-rw-r--r--drivers/staging/android/ashmem.c2
-rw-r--r--drivers/staging/android/ion/ion.c10
-rw-r--r--drivers/staging/android/ion/ion.h2
-rw-r--r--drivers/staging/android/ion/ion_dummy_driver.c6
-rw-r--r--drivers/staging/android/ion/ion_page_pool.c2
-rw-r--r--drivers/staging/android/ion/ion_priv.h4
-rw-r--r--drivers/staging/android/ion/tegra/tegra_ion.c6
-rw-r--r--drivers/staging/android/sync_debug.c5
-rw-r--r--drivers/staging/android/timed_gpio.c7
-rw-r--r--drivers/staging/bcm/Adapter.h474
-rw-r--r--drivers/staging/bcm/Bcmchar.c2652
-rw-r--r--drivers/staging/bcm/Bcmnet.c240
-rw-r--r--drivers/staging/bcm/CmHost.c2254
-rw-r--r--drivers/staging/bcm/CmHost.h62
-rw-r--r--drivers/staging/bcm/DDRInit.c1355
-rw-r--r--drivers/staging/bcm/DDRInit.h9
-rw-r--r--drivers/staging/bcm/Debug.h242
-rw-r--r--drivers/staging/bcm/HandleControlPacket.c241
-rw-r--r--drivers/staging/bcm/HostMIBSInterface.h192
-rw-r--r--drivers/staging/bcm/IPv6Protocol.c476
-rw-r--r--drivers/staging/bcm/IPv6ProtocolHdr.h85
-rw-r--r--drivers/staging/bcm/InterfaceAdapter.h79
-rw-r--r--drivers/staging/bcm/InterfaceDld.c317
-rw-r--r--drivers/staging/bcm/InterfaceIdleMode.c274
-rw-r--r--drivers/staging/bcm/InterfaceIdleMode.h15
-rw-r--r--drivers/staging/bcm/InterfaceInit.c729
-rw-r--r--drivers/staging/bcm/InterfaceInit.h26
-rw-r--r--drivers/staging/bcm/InterfaceIsr.c190
-rw-r--r--drivers/staging/bcm/InterfaceIsr.h15
-rw-r--r--drivers/staging/bcm/InterfaceMacros.h18
-rw-r--r--drivers/staging/bcm/InterfaceMisc.c247
-rw-r--r--drivers/staging/bcm/InterfaceMisc.h42
-rw-r--r--drivers/staging/bcm/InterfaceRx.c289
-rw-r--r--drivers/staging/bcm/InterfaceRx.h7
-rw-r--r--drivers/staging/bcm/InterfaceTx.c213
-rw-r--r--drivers/staging/bcm/InterfaceTx.h7
-rw-r--r--drivers/staging/bcm/Ioctl.h226
-rw-r--r--drivers/staging/bcm/Kconfig6
-rw-r--r--drivers/staging/bcm/LeakyBucket.c364
-rw-r--r--drivers/staging/bcm/Macros.h352
-rw-r--r--drivers/staging/bcm/Makefile12
-rw-r--r--drivers/staging/bcm/Misc.c1587
-rw-r--r--drivers/staging/bcm/PHSDefines.h94
-rw-r--r--drivers/staging/bcm/PHSModule.c1703
-rw-r--r--drivers/staging/bcm/PHSModule.h59
-rw-r--r--drivers/staging/bcm/Protocol.h128
-rw-r--r--drivers/staging/bcm/Prototypes.h217
-rw-r--r--drivers/staging/bcm/Qos.c1200
-rw-r--r--drivers/staging/bcm/Queue.h29
-rw-r--r--drivers/staging/bcm/TODO26
-rw-r--r--drivers/staging/bcm/Transmit.c271
-rw-r--r--drivers/staging/bcm/Typedefs.h47
-rw-r--r--drivers/staging/bcm/cntrl_SignalingInterface.h311
-rw-r--r--drivers/staging/bcm/headers.h78
-rw-r--r--drivers/staging/bcm/hostmibs.c164
-rw-r--r--drivers/staging/bcm/led_control.c952
-rw-r--r--drivers/staging/bcm/led_control.h84
-rw-r--r--drivers/staging/bcm/nvm.c4661
-rw-r--r--drivers/staging/bcm/nvm.h286
-rw-r--r--drivers/staging/bcm/sort.c52
-rw-r--r--drivers/staging/bcm/target_params.h57
-rw-r--r--drivers/staging/bcm/vendorspecificextn.c145
-rw-r--r--drivers/staging/bcm/vendorspecificextn.h18
-rw-r--r--drivers/staging/clocking-wizard/Kconfig9
-rw-r--r--drivers/staging/clocking-wizard/Makefile1
-rw-r--r--drivers/staging/clocking-wizard/TODO12
-rw-r--r--drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c341
-rw-r--r--drivers/staging/clocking-wizard/dt-binding.txt30
-rw-r--r--drivers/staging/comedi/Kconfig28
-rw-r--r--drivers/staging/comedi/Makefile7
-rw-r--r--drivers/staging/comedi/comedi.h13
-rw-r--r--drivers/staging/comedi/comedi_buf.c157
-rw-r--r--drivers/staging/comedi/comedi_compat32.c2
-rw-r--r--drivers/staging/comedi/comedi_fops.c279
-rw-r--r--drivers/staging/comedi/comedi_pci.c16
-rw-r--r--drivers/staging/comedi/comedi_pcmcia.c16
-rw-r--r--drivers/staging/comedi/comedi_usb.c16
-rw-r--r--drivers/staging/comedi/comedidev.h139
-rw-r--r--drivers/staging/comedi/drivers.c102
-rw-r--r--drivers/staging/comedi/drivers/Makefile1
-rw-r--r--drivers/staging/comedi/drivers/addi-data/addi_common.c274
-rw-r--r--drivers/staging/comedi/drivers/addi-data/addi_common.h144
-rw-r--r--drivers/staging/comedi/drivers/addi-data/addi_eeprom.c360
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci035.c480
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci1500.c28
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci1564.c381
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci3120.c2050
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci3200.c3003
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_035.c77
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_1032.c5
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_1500.c117
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_1516.c2
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_1564.c268
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_16xx.c2
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_2032.c32
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_2200.c2
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_3120.c1153
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_3200.c125
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_3501.c7
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_3xxx.c11
-rw-r--r--drivers/staging/comedi/drivers/addi_tcw.h56
-rw-r--r--drivers/staging/comedi/drivers/addi_watchdog.c32
-rw-r--r--drivers/staging/comedi/drivers/adl_pci6208.c1
-rw-r--r--drivers/staging/comedi/drivers/adl_pci9111.c40
-rw-r--r--drivers/staging/comedi/drivers/adl_pci9118.c202
-rw-r--r--drivers/staging/comedi/drivers/adv_pci1710.c76
-rw-r--r--drivers/staging/comedi/drivers/adv_pci1723.c338
-rw-r--r--drivers/staging/comedi/drivers/adv_pci1724.c471
-rw-r--r--drivers/staging/comedi/drivers/adv_pci_dio.c8
-rw-r--r--drivers/staging/comedi/drivers/aio_aio12_8.c1
-rw-r--r--drivers/staging/comedi/drivers/amcc_s5933.h2
-rw-r--r--drivers/staging/comedi/drivers/amplc_dio200_common.c34
-rw-r--r--drivers/staging/comedi/drivers/amplc_pc236_common.c5
-rw-r--r--drivers/staging/comedi/drivers/amplc_pc263.c2
-rw-r--r--drivers/staging/comedi/drivers/amplc_pci224.c53
-rw-r--r--drivers/staging/comedi/drivers/amplc_pci230.c193
-rw-r--r--drivers/staging/comedi/drivers/amplc_pci263.c2
-rw-r--r--drivers/staging/comedi/drivers/c6xdigio.c2
-rw-r--r--drivers/staging/comedi/drivers/cb_das16_cs.c1
-rw-r--r--drivers/staging/comedi/drivers/cb_pcidas.c283
-rw-r--r--drivers/staging/comedi/drivers/cb_pcidas64.c295
-rw-r--r--drivers/staging/comedi/drivers/cb_pcimdas.c83
-rw-r--r--drivers/staging/comedi/drivers/comedi_bond.c9
-rw-r--r--drivers/staging/comedi/drivers/comedi_fc.h43
-rw-r--r--drivers/staging/comedi/drivers/comedi_parport.c5
-rw-r--r--drivers/staging/comedi/drivers/comedi_test.c63
-rw-r--r--drivers/staging/comedi/drivers/dac02.c1
-rw-r--r--drivers/staging/comedi/drivers/daqboard2000.c1
-rw-r--r--drivers/staging/comedi/drivers/das08.c3
-rw-r--r--drivers/staging/comedi/drivers/das16.c23
-rw-r--r--drivers/staging/comedi/drivers/das16m1.c7
-rw-r--r--drivers/staging/comedi/drivers/das1800.c62
-rw-r--r--drivers/staging/comedi/drivers/das6402.c202
-rw-r--r--drivers/staging/comedi/drivers/das800.c38
-rw-r--r--drivers/staging/comedi/drivers/dmm32at.c702
-rw-r--r--drivers/staging/comedi/drivers/dt2801.c1
-rw-r--r--drivers/staging/comedi/drivers/dt2811.c1
-rw-r--r--drivers/staging/comedi/drivers/dt2814.c2
-rw-r--r--drivers/staging/comedi/drivers/dt282x.c62
-rw-r--r--drivers/staging/comedi/drivers/dt3000.c9
-rw-r--r--drivers/staging/comedi/drivers/dt9812.c4
-rw-r--r--drivers/staging/comedi/drivers/dyna_pci10xx.c4
-rw-r--r--drivers/staging/comedi/drivers/fl512.c1
-rw-r--r--drivers/staging/comedi/drivers/gsc_hpdi.c8
-rw-r--r--drivers/staging/comedi/drivers/icp_multi.c9
-rw-r--r--drivers/staging/comedi/drivers/ii_pci20kc.c1
-rw-r--r--drivers/staging/comedi/drivers/me4000.c120
-rw-r--r--drivers/staging/comedi/drivers/me_daq.c5
-rw-r--r--drivers/staging/comedi/drivers/mf6x4.c1
-rw-r--r--drivers/staging/comedi/drivers/mite.c9
-rw-r--r--drivers/staging/comedi/drivers/multiq3.c1
-rw-r--r--drivers/staging/comedi/drivers/ni_6527.c8
-rw-r--r--drivers/staging/comedi/drivers/ni_65xx.c9
-rw-r--r--drivers/staging/comedi/drivers/ni_660x.c2
-rw-r--r--drivers/staging/comedi/drivers/ni_670x.c1
-rw-r--r--drivers/staging/comedi/drivers/ni_at_a2150.c21
-rw-r--r--drivers/staging/comedi/drivers/ni_at_ao.c56
-rw-r--r--drivers/staging/comedi/drivers/ni_atmio16d.c8
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc.h6
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc_common.c152
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc_isadma.c7
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_common.c166
-rw-r--r--drivers/staging/comedi/drivers/ni_pcidio.c14
-rw-r--r--drivers/staging/comedi/drivers/ni_stc.h16
-rw-r--r--drivers/staging/comedi/drivers/ni_tiocmd.c6
-rw-r--r--drivers/staging/comedi/drivers/ni_usb6501.c2
-rw-r--r--drivers/staging/comedi/drivers/pcl711.c25
-rw-r--r--drivers/staging/comedi/drivers/pcl726.c6
-rw-r--r--drivers/staging/comedi/drivers/pcl812.c40
-rw-r--r--drivers/staging/comedi/drivers/pcl816.c26
-rw-r--r--drivers/staging/comedi/drivers/pcl818.c34
-rw-r--r--drivers/staging/comedi/drivers/pcmmio.c32
-rw-r--r--drivers/staging/comedi/drivers/pcmuio.c34
-rw-r--r--drivers/staging/comedi/drivers/quatech_daqp_cs.c38
-rw-r--r--drivers/staging/comedi/drivers/rtd520.c77
-rw-r--r--drivers/staging/comedi/drivers/rti800.c1
-rw-r--r--drivers/staging/comedi/drivers/rti802.c1
-rw-r--r--drivers/staging/comedi/drivers/s526.c1
-rw-r--r--drivers/staging/comedi/drivers/s626.c38
-rw-r--r--drivers/staging/comedi/drivers/serial2002.c4
-rw-r--r--drivers/staging/comedi/drivers/usbdux.c400
-rw-r--r--drivers/staging/comedi/drivers/usbduxfast.c145
-rw-r--r--drivers/staging/comedi/drivers/usbduxsigma.c350
-rw-r--r--drivers/staging/comedi/drivers/vmk80xx.c8
-rw-r--r--drivers/staging/comedi/range.c36
-rw-r--r--drivers/staging/cptm1217/clearpad_tm1217.c2
-rw-r--r--drivers/staging/dgap/dgap.c7462
-rw-r--r--drivers/staging/dgap/dgap.h3
-rw-r--r--drivers/staging/dgnc/dgnc_cls.c30
-rw-r--r--drivers/staging/dgnc/dgnc_driver.c33
-rw-r--r--drivers/staging/dgnc/dgnc_driver.h61
-rw-r--r--drivers/staging/dgnc/dgnc_kcompat.h18
-rw-r--r--drivers/staging/dgnc/dgnc_neo.c30
-rw-r--r--drivers/staging/dgnc/dgnc_sysfs.c37
-rw-r--r--drivers/staging/dgnc/dgnc_tty.c146
-rw-r--r--drivers/staging/dgnc/dgnc_tty.h2
-rw-r--r--drivers/staging/emxx_udc/emxx_udc.c7
-rw-r--r--drivers/staging/ft1000/ft1000-pcmcia/boot.h34
-rw-r--r--drivers/staging/ft1000/ft1000-pcmcia/ft1000.h30
-rw-r--r--drivers/staging/ft1000/ft1000-pcmcia/ft1000_cs.c50
-rw-r--r--drivers/staging/ft1000/ft1000-pcmcia/ft1000_dnld.c212
-rw-r--r--drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c935
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_debug.c1204
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_download.c400
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_hw.c436
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_ioctl.h60
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_usb.c97
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_usb.h4
-rw-r--r--drivers/staging/fwserial/fwserial.c3
-rw-r--r--drivers/staging/gdm724x/gdm_lte.c10
-rw-r--r--drivers/staging/gdm724x/gdm_mux.h8
-rw-r--r--drivers/staging/gdm72xx/Kconfig2
-rw-r--r--drivers/staging/gdm72xx/gdm_wimax.c6
-rw-r--r--drivers/staging/gs_fpgaboot/gs_fpgaboot.c57
-rw-r--r--drivers/staging/iio/Documentation/generic_buffer.c81
-rw-r--r--drivers/staging/iio/Documentation/iio_event_monitor.c32
-rw-r--r--drivers/staging/iio/Documentation/iio_utils.h7
-rw-r--r--drivers/staging/iio/Documentation/lsiio.c20
-rw-r--r--drivers/staging/iio/accel/Kconfig39
-rw-r--r--drivers/staging/iio/accel/lis3l02dq_ring.c1
-rw-r--r--drivers/staging/iio/adc/ad7192.c3
-rw-r--r--drivers/staging/iio/adc/ad7280a.c2
-rw-r--r--drivers/staging/iio/adc/ad7606_spi.c2
-rw-r--r--drivers/staging/iio/adc/ad7816.c3
-rw-r--r--drivers/staging/iio/adc/lpc32xx_adc.c2
-rw-r--r--drivers/staging/iio/adc/mxs-lradc.c6
-rw-r--r--drivers/staging/iio/adc/spear_adc.c4
-rw-r--r--drivers/staging/iio/addac/Kconfig9
-rw-r--r--drivers/staging/iio/addac/adt7316.h3
-rw-r--r--drivers/staging/iio/gyro/Kconfig5
-rw-r--r--drivers/staging/iio/light/tsl2x7x_core.c17
-rw-r--r--drivers/staging/iio/meter/Kconfig15
-rw-r--r--drivers/staging/iio/trigger/Kconfig5
-rw-r--r--drivers/staging/iio/trigger/iio-trig-periodic-rtc.c3
-rw-r--r--drivers/staging/imx-drm/TODO17
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h12
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c17
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c34
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c3
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c60
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c89
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c15
-rw-r--r--drivers/staging/lustre/lnet/lnet/api-ni.c18
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-md.c2
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-move.c98
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-ptl.c3
-rw-r--r--drivers/staging/lustre/lnet/lnet/lo.c8
-rw-r--r--drivers/staging/lustre/lnet/lnet/module.c10
-rw-r--r--drivers/staging/lustre/lnet/lnet/router.c52
-rw-r--r--drivers/staging/lustre/lnet/lnet/router_proc.c25
-rw-r--r--drivers/staging/lustre/lnet/selftest/brw_test.c8
-rw-r--r--drivers/staging/lustre/lnet/selftest/conctl.c34
-rw-r--r--drivers/staging/lustre/lnet/selftest/conrpc.c20
-rw-r--r--drivers/staging/lustre/lnet/selftest/console.c6
-rw-r--r--drivers/staging/lustre/lnet/selftest/framework.c75
-rw-r--r--drivers/staging/lustre/lnet/selftest/module.c19
-rw-r--r--drivers/staging/lustre/lnet/selftest/ping_test.c2
-rw-r--r--drivers/staging/lustre/lnet/selftest/rpc.c63
-rw-r--r--drivers/staging/lustre/lnet/selftest/timer.c8
-rw-r--r--drivers/staging/lustre/lustre/Kconfig2
-rw-r--r--drivers/staging/lustre/lustre/include/dt_object.h4
-rw-r--r--drivers/staging/lustre/lustre/include/linux/lustre_compat25.h32
-rw-r--r--drivers/staging/lustre/lustre/include/linux/obd.h3
-rw-r--r--drivers/staging/lustre/lustre/include/lprocfs_status.h8
-rw-r--r--drivers/staging/lustre/lustre/include/lu_object.h4
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_capa.h2
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_disk.h3
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_dlm.h2
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_eacl.h2
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_lib.h2
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_net.h7
-rw-r--r--drivers/staging/lustre/lustre/include/obd_class.h2
-rw-r--r--drivers/staging/lustre/lustre/ldlm/interval_tree.c5
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_extent.c4
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_flock.c10
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_internal.h8
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lib.c7
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lock.c219
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c28
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_pool.c63
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_request.c78
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_resource.c45
-rw-r--r--drivers/staging/lustre/lustre/libcfs/debug.c18
-rw-r--r--drivers/staging/lustre/lustre/libcfs/fail.c24
-rw-r--r--drivers/staging/lustre/lustre/libcfs/hash.c28
-rw-r--r--drivers/staging/lustre/lustre/libcfs/libcfs_cpu.c2
-rw-r--r--drivers/staging/lustre/lustre/libcfs/linux/linux-cpu.c32
-rw-r--r--drivers/staging/lustre/lustre/libcfs/linux/linux-debug.c6
-rw-r--r--drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c2
-rw-r--r--drivers/staging/lustre/lustre/libcfs/linux/linux-tcpip.c3
-rw-r--r--drivers/staging/lustre/lustre/libcfs/tracefile.c38
-rw-r--r--drivers/staging/lustre/lustre/llite/dcache.c4
-rw-r--r--drivers/staging/lustre/lustre/llite/dir.c98
-rw-r--r--drivers/staging/lustre/lustre/llite/file.c22
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_capa.c3
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_close.c16
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_internal.h30
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_lib.c131
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_mmap.c9
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_rmtacl.c7
-rw-r--r--drivers/staging/lustre/lustre/llite/lloop.c8
-rw-r--r--drivers/staging/lustre/lustre/llite/lproc_llite.c89
-rw-r--r--drivers/staging/lustre/lustre/llite/namei.c10
-rw-r--r--drivers/staging/lustre/lustre/llite/remote_perm.c5
-rw-r--r--drivers/staging/lustre/lustre/llite/rw.c22
-rw-r--r--drivers/staging/lustre/lustre/llite/rw26.c6
-rw-r--r--drivers/staging/lustre/lustre/llite/statahead.c16
-rw-r--r--drivers/staging/lustre/lustre/llite/super25.c2
-rw-r--r--drivers/staging/lustre/lustre/llite/symlink.c4
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_io.c7
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_page.c3
-rw-r--r--drivers/staging/lustre/lustre/llite/xattr.c6
-rw-r--r--drivers/staging/lustre/lustre/llite/xattr_cache.c5
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_fld.c3
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_intent.c8
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_internal.h4
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_obd.c41
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_ea.c5
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_obd.c35
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_pack.c3
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_lib.c4
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_locks.c4
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_request.c12
-rw-r--r--drivers/staging/lustre/lustre/mgc/mgc_request.c14
-rw-r--r--drivers/staging/lustre/lustre/obdclass/acl.c8
-rw-r--r--drivers/staging/lustre/lustre/obdclass/capa.c14
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_io.c3
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_lock.c3
-rw-r--r--drivers/staging/lustre/lustre/obdclass/class_obd.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/debug.c12
-rw-r--r--drivers/staging/lustre/lustre/obdclass/dt_object.c15
-rw-r--r--drivers/staging/lustre/lustre/obdclass/genops.c28
-rw-r--r--drivers/staging/lustre/lustre/obdclass/linux/linux-module.c4
-rw-r--r--drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c5
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog.c4
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog_cat.c10
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog_obd.c2
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog_swab.c3
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lprocfs_status.c14
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lu_object.c3
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obd_config.c11
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obd_mount.c38
-rw-r--r--drivers/staging/lustre/lustre/obdecho/echo_client.c140
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_cache.c176
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_cl_internal.h2
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_lock.c6
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_object.c3
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_page.c11
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_request.c51
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/client.c48
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/events.c7
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/import.c115
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/layout.c37
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/llog_client.c3
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c18
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/niobuf.c7
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/nrs.c33
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/pack_generic.c59
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/pinger.c22
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c7
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/recover.c7
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec.c27
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c3
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_null.c18
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_plain.c4
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/service.c90
-rw-r--r--drivers/staging/media/Kconfig6
-rw-r--r--drivers/staging/media/Makefile3
-rw-r--r--drivers/staging/media/cxd2099/cxd2099.c2
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_ipipe.h4
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.c62
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.h58
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_ipipeif.c4
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_ipipeif.h2
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_isif.c11
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_isif.h6
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_resizer.c8
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_resizer.h2
-rw-r--r--drivers/staging/media/lirc/lirc_bt829.c14
-rw-r--r--drivers/staging/media/lirc/lirc_imon.c27
-rw-r--r--drivers/staging/media/lirc/lirc_sasem.c4
-rw-r--r--drivers/staging/media/lirc/lirc_sir.c18
-rw-r--r--drivers/staging/media/lirc/lirc_zilog.c137
-rw-r--r--drivers/staging/media/omap4iss/iss.c4
-rw-r--r--drivers/staging/media/omap4iss/iss_csi2.c2
-rw-r--r--drivers/staging/media/parport/Kconfig (renamed from drivers/media/parport/Kconfig)24
-rw-r--r--drivers/staging/media/parport/Makefile (renamed from drivers/media/parport/Makefile)0
-rw-r--r--drivers/staging/media/parport/bw-qcam.c (renamed from drivers/media/parport/bw-qcam.c)0
-rw-r--r--drivers/staging/media/parport/c-qcam.c (renamed from drivers/media/parport/c-qcam.c)0
-rw-r--r--drivers/staging/media/parport/pms.c (renamed from drivers/media/parport/pms.c)0
-rw-r--r--drivers/staging/media/parport/w9966.c (renamed from drivers/media/parport/w9966.c)0
-rw-r--r--drivers/staging/media/tlg2300/Kconfig (renamed from drivers/media/usb/tlg2300/Kconfig)6
-rw-r--r--drivers/staging/media/tlg2300/Makefile (renamed from drivers/media/usb/tlg2300/Makefile)0
-rw-r--r--drivers/staging/media/tlg2300/pd-alsa.c (renamed from drivers/media/usb/tlg2300/pd-alsa.c)0
-rw-r--r--drivers/staging/media/tlg2300/pd-common.h (renamed from drivers/media/usb/tlg2300/pd-common.h)0
-rw-r--r--drivers/staging/media/tlg2300/pd-dvb.c (renamed from drivers/media/usb/tlg2300/pd-dvb.c)0
-rw-r--r--drivers/staging/media/tlg2300/pd-main.c (renamed from drivers/media/usb/tlg2300/pd-main.c)0
-rw-r--r--drivers/staging/media/tlg2300/pd-radio.c (renamed from drivers/media/usb/tlg2300/pd-radio.c)0
-rw-r--r--drivers/staging/media/tlg2300/pd-video.c (renamed from drivers/media/usb/tlg2300/pd-video.c)0
-rw-r--r--drivers/staging/media/tlg2300/vendorcmds.h (renamed from drivers/media/usb/tlg2300/vendorcmds.h)0
-rw-r--r--drivers/staging/media/vino/Kconfig24
-rw-r--r--drivers/staging/media/vino/Makefile3
-rw-r--r--drivers/staging/media/vino/indycam.c (renamed from drivers/media/platform/indycam.c)0
-rw-r--r--drivers/staging/media/vino/indycam.h (renamed from drivers/media/platform/indycam.h)0
-rw-r--r--drivers/staging/media/vino/saa7191.c (renamed from drivers/media/i2c/saa7191.c)0
-rw-r--r--drivers/staging/media/vino/saa7191.h (renamed from drivers/media/i2c/saa7191.h)0
-rw-r--r--drivers/staging/media/vino/vino.c (renamed from drivers/media/platform/vino.c)0
-rw-r--r--drivers/staging/media/vino/vino.h (renamed from drivers/media/platform/vino.h)0
-rw-r--r--drivers/staging/octeon-usb/octeon-hcd.c69
-rw-r--r--drivers/staging/octeon/ethernet-rx.c156
-rw-r--r--drivers/staging/octeon/ethernet-tx.c11
-rw-r--r--drivers/staging/octeon/ethernet.c8
-rw-r--r--drivers/staging/octeon/octeon-ethernet.h1
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon.c3
-rw-r--r--drivers/staging/ozwpan/ozhcd.c22
-rw-r--r--drivers/staging/ozwpan/ozusbsvc1.c12
-rw-r--r--drivers/staging/panel/TODO1
-rw-r--r--drivers/staging/panel/panel.c807
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ap.c17
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_cmd.c9
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_debug.c14
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_efuse.c47
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ieee80211.c16
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ioctl_set.c1
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_led.c12
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme.c10
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme_ext.c34
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_pwrctrl.c7
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_recv.c20
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_security.c24
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_sta_mgt.c2
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_wlan_util.c1
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_xmit.c14
-rw-r--r--drivers/staging/rtl8188eu/hal/bb_cfg.c4
-rw-r--r--drivers/staging/rtl8188eu/hal/fw.c8
-rw-r--r--drivers/staging/rtl8188eu/hal/hal_intf.c16
-rw-r--r--drivers/staging/rtl8188eu/hal/mac_cfg.c2
-rw-r--r--drivers/staging/rtl8188eu/hal/odm.c8
-rw-r--r--drivers/staging/rtl8188eu/hal/odm_HWConfig.c2
-rw-r--r--drivers/staging/rtl8188eu/hal/odm_RTL8188E.c2
-rw-r--r--drivers/staging/rtl8188eu/hal/phy.c2
-rw-r--r--drivers/staging/rtl8188eu/hal/rf.c2
-rw-r--r--drivers/staging/rtl8188eu/hal/rf_cfg.c4
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c6
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_dm.c30
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c3
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_xmit.c2
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c2
-rw-r--r--drivers/staging/rtl8188eu/hal/usb_halinit.c4
-rw-r--r--drivers/staging/rtl8188eu/include/hal_intf.h3
-rw-r--r--drivers/staging/rtl8188eu/include/ieee80211_ext.h20
-rw-r--r--drivers/staging/rtl8188eu/include/odm_debug.h31
-rw-r--r--drivers/staging/rtl8188eu/include/osdep_service.h4
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_debug.h2
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_led.h8
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_mlme_ext.h26
-rw-r--r--drivers/staging/rtl8188eu/include/wifi.h36
-rw-r--r--drivers/staging/rtl8188eu/os_dep/ioctl_linux.c31
-rw-r--r--drivers/staging/rtl8188eu/os_dep/os_intfs.c11
-rw-r--r--drivers/staging/rtl8188eu/os_dep/osdep_service.c2
-rw-r--r--drivers/staging/rtl8188eu/os_dep/rtw_android.c41
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_intf.c35
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c8
-rw-r--r--drivers/staging/rtl8188eu/os_dep/xmit_linux.c10
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c4
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_dm.c8
-rw-r--r--drivers/staging/rtl8192e/rtl819x_BAProc.c24
-rw-r--r--drivers/staging/rtl8192e/rtllib.h6
-rw-r--r--drivers/staging/rtl8192e/rtllib_rx.c6
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac.c12
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c22
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c26
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c3
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c20
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c11
-rw-r--r--drivers/staging/rtl8192u/r8192U_core.c22
-rw-r--r--drivers/staging/rtl8192u/r8192U_dm.c91
-rw-r--r--drivers/staging/rtl8192u/r8192U_wx.c6
-rw-r--r--drivers/staging/rtl8192u/r819xU_firmware.c20
-rw-r--r--drivers/staging/rtl8192u/r819xU_firmware.h8
-rw-r--r--drivers/staging/rtl8192u/r819xU_phy.c1
-rw-r--r--drivers/staging/rtl8712/hal_init.c11
-rw-r--r--drivers/staging/rtl8712/ieee80211.c17
-rw-r--r--drivers/staging/rtl8712/osdep_service.h8
-rw-r--r--drivers/staging/rtl8712/recv_linux.c2
-rw-r--r--drivers/staging/rtl8712/rtl8712_cmd.c13
-rw-r--r--drivers/staging/rtl8712/rtl8712_efuse.c43
-rw-r--r--drivers/staging/rtl8712/rtl8712_recv.c24
-rw-r--r--drivers/staging/rtl8712/rtl871x_cmd.c58
-rw-r--r--drivers/staging/rtl8712/rtl871x_io.c6
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_linux.c23
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_set.c24
-rw-r--r--drivers/staging/rtl8712/rtl871x_mlme.c27
-rw-r--r--drivers/staging/rtl8712/rtl871x_mp.c4
-rw-r--r--drivers/staging/rtl8712/rtl871x_mp_ioctl.c3
-rw-r--r--drivers/staging/rtl8712/rtl871x_pwrctrl.c4
-rw-r--r--drivers/staging/rtl8712/rtl871x_recv.c19
-rw-r--r--drivers/staging/rtl8712/rtl871x_recv.h3
-rw-r--r--drivers/staging/rtl8712/rtl871x_security.c10
-rw-r--r--drivers/staging/rtl8712/rtl871x_sta_mgt.c11
-rw-r--r--drivers/staging/rtl8712/rtl871x_xmit.c15
-rw-r--r--drivers/staging/rtl8712/usb_intf.c16
-rw-r--r--drivers/staging/rtl8712/usb_ops_linux.c9
-rw-r--r--drivers/staging/rtl8712/xmit_linux.c4
-rw-r--r--drivers/staging/rtl8723au/Makefile3
-rw-r--r--drivers/staging/rtl8723au/core/rtw_ap.c159
-rw-r--r--drivers/staging/rtl8723au/core/rtw_cmd.c181
-rw-r--r--drivers/staging/rtl8723au/core/rtw_efuse.c23
-rw-r--r--drivers/staging/rtl8723au/core/rtw_ieee80211.c4
-rw-r--r--drivers/staging/rtl8723au/core/rtw_led.c1893
-rw-r--r--drivers/staging/rtl8723au/core/rtw_mlme.c76
-rw-r--r--drivers/staging/rtl8723au/core/rtw_mlme_ext.c67
-rw-r--r--drivers/staging/rtl8723au/core/rtw_pwrctrl.c8
-rw-r--r--drivers/staging/rtl8723au/core/rtw_recv.c165
-rw-r--r--drivers/staging/rtl8723au/core/rtw_security.c98
-rw-r--r--drivers/staging/rtl8723au/core/rtw_sreset.c2
-rw-r--r--drivers/staging/rtl8723au/core/rtw_wlan_util.c15
-rw-r--r--drivers/staging/rtl8723au/core/rtw_xmit.c94
-rw-r--r--drivers/staging/rtl8723au/hal/HalDMOutSrc8723A_CE.c2
-rw-r--r--drivers/staging/rtl8723au/hal/hal_com.c4
-rw-r--r--drivers/staging/rtl8723au/hal/odm_HWConfig.c13
-rw-r--r--drivers/staging/rtl8723au/hal/rtl8723a_bt-coexist.c44
-rw-r--r--drivers/staging/rtl8723au/hal/rtl8723a_cmd.c7
-rw-r--r--drivers/staging/rtl8723au/hal/rtl8723a_hal_init.c493
-rw-r--r--drivers/staging/rtl8723au/hal/rtl8723a_phycfg.c3
-rw-r--r--drivers/staging/rtl8723au/hal/rtl8723a_rf6052.c229
-rw-r--r--drivers/staging/rtl8723au/hal/rtl8723a_xmit.c31
-rw-r--r--drivers/staging/rtl8723au/hal/rtl8723au_led.c124
-rw-r--r--drivers/staging/rtl8723au/hal/rtl8723au_xmit.c33
-rw-r--r--drivers/staging/rtl8723au/hal/usb_halinit.c479
-rw-r--r--drivers/staging/rtl8723au/hal/usb_ops_linux.c21
-rw-r--r--drivers/staging/rtl8723au/include/Hal8723UHWImg_CE.h2
-rw-r--r--drivers/staging/rtl8723au/include/drv_types.h2
-rw-r--r--drivers/staging/rtl8723au/include/odm_debug.h30
-rw-r--r--drivers/staging/rtl8723au/include/rtl8723a_dm.h3
-rw-r--r--drivers/staging/rtl8723au/include/rtl8723a_hal.h11
-rw-r--r--drivers/staging/rtl8723au/include/rtl8723a_led.h30
-rw-r--r--drivers/staging/rtl8723au/include/rtl8723a_recv.h6
-rw-r--r--drivers/staging/rtl8723au/include/rtl8723a_xmit.h1
-rw-r--r--drivers/staging/rtl8723au/include/rtw_cmd.h4
-rw-r--r--drivers/staging/rtl8723au/include/rtw_ht.h3
-rw-r--r--drivers/staging/rtl8723au/include/rtw_led.h181
-rw-r--r--drivers/staging/rtl8723au/include/rtw_mlme.h2
-rw-r--r--drivers/staging/rtl8723au/include/rtw_mlme_ext.h6
-rw-r--r--drivers/staging/rtl8723au/include/rtw_recv.h3
-rw-r--r--drivers/staging/rtl8723au/include/rtw_xmit.h18
-rw-r--r--drivers/staging/rtl8723au/include/usb_ops.h2
-rw-r--r--drivers/staging/rtl8723au/include/usb_ops_linux.h4
-rw-r--r--drivers/staging/rtl8723au/include/wlan_bssdef.h21
-rw-r--r--drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c20
-rw-r--r--drivers/staging/rtl8723au/os_dep/os_intfs.c19
-rw-r--r--drivers/staging/rtl8723au/os_dep/usb_intf.c37
-rw-r--r--drivers/staging/rtl8723au/os_dep/usb_ops_linux.c23
-rw-r--r--drivers/staging/rts5208/ms.c4
-rw-r--r--drivers/staging/rts5208/rtsx.c4
-rw-r--r--drivers/staging/rts5208/rtsx_chip.c459
-rw-r--r--drivers/staging/rts5208/rtsx_scsi.c3
-rw-r--r--drivers/staging/rts5208/rtsx_transport.c8
-rw-r--r--drivers/staging/rts5208/rtsx_transport.h2
-rw-r--r--drivers/staging/skein/Kconfig24
-rw-r--r--drivers/staging/skein/Makefile13
-rw-r--r--drivers/staging/skein/skein_api.c2
-rw-r--r--drivers/staging/skein/skein_api.h2
-rw-r--r--drivers/staging/skein/skein_base.c (renamed from drivers/staging/skein/skein.c)23
-rw-r--r--drivers/staging/skein/skein_base.h (renamed from drivers/staging/skein/skein.h)39
-rw-r--r--drivers/staging/skein/skein_block.c932
-rw-r--r--drivers/staging/skein/skein_block.h2
-rw-r--r--drivers/staging/skein/skein_generic.c216
-rw-r--r--drivers/staging/skein/skein_iv.h2
-rw-r--r--drivers/staging/skein/threefish_api.h2
-rw-r--r--drivers/staging/slicoss/slicoss.c46
-rw-r--r--drivers/staging/speakup/kobjects.c6
-rw-r--r--drivers/staging/speakup/main.c2
-rw-r--r--drivers/staging/speakup/speakup_dtlk.c2
-rw-r--r--drivers/staging/speakup/speakup_keypc.c2
-rw-r--r--drivers/staging/unisys/channels/channel.c114
-rw-r--r--drivers/staging/unisys/channels/chanstub.c8
-rw-r--r--drivers/staging/unisys/channels/chanstub.h8
-rw-r--r--drivers/staging/unisys/common-spar/include/channels/channel.h505
-rw-r--r--drivers/staging/unisys/common-spar/include/channels/channel_guid.h27
-rw-r--r--drivers/staging/unisys/common-spar/include/channels/controlframework.h47
-rw-r--r--drivers/staging/unisys/common-spar/include/channels/controlvmchannel.h623
-rw-r--r--drivers/staging/unisys/common-spar/include/channels/diagchannel.h168
-rw-r--r--drivers/staging/unisys/common-spar/include/channels/iochannel.h292
-rw-r--r--drivers/staging/unisys/common-spar/include/channels/vbuschannel.h80
-rw-r--r--drivers/staging/unisys/common-spar/include/vbusdeviceinfo.h37
-rw-r--r--drivers/staging/unisys/common-spar/include/vmcallinterface.h47
-rw-r--r--drivers/staging/unisys/include/timskmod.h17
-rw-r--r--drivers/staging/unisys/include/uisqueue.h75
-rw-r--r--drivers/staging/unisys/include/uisutils.h162
-rw-r--r--drivers/staging/unisys/include/vbushelper.h16
-rw-r--r--drivers/staging/unisys/uislib/uislib.c521
-rw-r--r--drivers/staging/unisys/uislib/uisqueue.c8
-rw-r--r--drivers/staging/unisys/uislib/uisutils.c109
-rw-r--r--drivers/staging/unisys/virthba/virthba.c104
-rw-r--r--drivers/staging/unisys/virthba/virthba.h4
-rw-r--r--drivers/staging/unisys/virtpci/virtpci.c332
-rw-r--r--drivers/staging/unisys/virtpci/virtpci.h20
-rw-r--r--drivers/staging/unisys/visorchannel/globals.h1
-rw-r--r--drivers/staging/unisys/visorchannel/visorchannel.h2
-rw-r--r--drivers/staging/unisys/visorchannel/visorchannel_funcs.c301
-rw-r--r--drivers/staging/unisys/visorchipset/file.c6
-rw-r--r--drivers/staging/unisys/visorchipset/parser.c52
-rw-r--r--drivers/staging/unisys/visorchipset/testing.h5
-rw-r--r--drivers/staging/unisys/visorchipset/visorchipset.h275
-rw-r--r--drivers/staging/unisys/visorchipset/visorchipset_main.c702
-rw-r--r--drivers/staging/unisys/visorutil/charqueue.c40
-rw-r--r--drivers/staging/unisys/visorutil/charqueue.h17
-rw-r--r--drivers/staging/unisys/visorutil/easyproc.c6
-rw-r--r--drivers/staging/unisys/visorutil/memregion.h28
-rw-r--r--drivers/staging/unisys/visorutil/memregion_direct.c64
-rw-r--r--drivers/staging/unisys/visorutil/periodic_work.c19
-rw-r--r--drivers/staging/unisys/visorutil/procobjecttree.c15
-rw-r--r--drivers/staging/unisys/visorutil/visorkmodutils.c26
-rw-r--r--drivers/staging/vme/devices/Kconfig3
-rw-r--r--drivers/staging/vme/devices/vme_pio2_gpio.c4
-rw-r--r--drivers/staging/vt6655/80211hdr.h318
-rw-r--r--drivers/staging/vt6655/80211mgr.c1019
-rw-r--r--drivers/staging/vt6655/80211mgr.h725
-rw-r--r--drivers/staging/vt6655/IEEE11h.c141
-rw-r--r--drivers/staging/vt6655/IEEE11h.h42
-rw-r--r--drivers/staging/vt6655/Kconfig4
-rw-r--r--drivers/staging/vt6655/Makefile23
-rw-r--r--drivers/staging/vt6655/aes_ccmp.c374
-rw-r--r--drivers/staging/vt6655/aes_ccmp.h37
-rw-r--r--drivers/staging/vt6655/baseband.c777
-rw-r--r--drivers/staging/vt6655/baseband.h39
-rw-r--r--drivers/staging/vt6655/bssdb.c1512
-rw-r--r--drivers/staging/vt6655/bssdb.h326
-rw-r--r--drivers/staging/vt6655/card.c1259
-rw-r--r--drivers/staging/vt6655/card.h120
-rw-r--r--drivers/staging/vt6655/channel.c844
-rw-r--r--drivers/staging/vt6655/channel.h22
-rw-r--r--drivers/staging/vt6655/country.h161
-rw-r--r--drivers/staging/vt6655/datarate.c410
-rw-r--r--drivers/staging/vt6655/datarate.h78
-rw-r--r--drivers/staging/vt6655/desc.h134
-rw-r--r--drivers/staging/vt6655/device.h476
-rw-r--r--drivers/staging/vt6655/device_cfg.h2
-rw-r--r--drivers/staging/vt6655/device_main.c2533
-rw-r--r--drivers/staging/vt6655/dpc.c1312
-rw-r--r--drivers/staging/vt6655/dpc.h10
-rw-r--r--drivers/staging/vt6655/hostap.c765
-rw-r--r--drivers/staging/vt6655/hostap.h58
-rw-r--r--drivers/staging/vt6655/iocmd.h408
-rw-r--r--drivers/staging/vt6655/ioctl.c658
-rw-r--r--drivers/staging/vt6655/ioctl.h36
-rw-r--r--drivers/staging/vt6655/iowpa.h130
-rw-r--r--drivers/staging/vt6655/iwctl.c1937
-rw-r--r--drivers/staging/vt6655/iwctl.h206
-rw-r--r--drivers/staging/vt6655/key.c838
-rw-r--r--drivers/staging/vt6655/key.h131
-rw-r--r--drivers/staging/vt6655/mac.c780
-rw-r--r--drivers/staging/vt6655/mac.h53
-rw-r--r--drivers/staging/vt6655/mib.c423
-rw-r--r--drivers/staging/vt6655/mib.h261
-rw-r--r--drivers/staging/vt6655/michael.c148
-rw-r--r--drivers/staging/vt6655/michael.h52
-rw-r--r--drivers/staging/vt6655/power.c208
-rw-r--r--drivers/staging/vt6655/power.h16
-rw-r--r--drivers/staging/vt6655/rc4.c88
-rw-r--r--drivers/staging/vt6655/rc4.h47
-rw-r--r--drivers/staging/vt6655/rf.c381
-rw-r--r--drivers/staging/vt6655/rf.h9
-rw-r--r--drivers/staging/vt6655/rxtx.c2112
-rw-r--r--drivers/staging/vt6655/rxtx.h52
-rw-r--r--drivers/staging/vt6655/srom.c252
-rw-r--r--drivers/staging/vt6655/srom.h49
-rw-r--r--drivers/staging/vt6655/tcrc.c191
-rw-r--r--drivers/staging/vt6655/tcrc.h50
-rw-r--r--drivers/staging/vt6655/tether.c105
-rw-r--r--drivers/staging/vt6655/tether.h192
-rw-r--r--drivers/staging/vt6655/tkip.c268
-rw-r--r--drivers/staging/vt6655/tkip.h57
-rw-r--r--drivers/staging/vt6655/tmacro.h2
-rw-r--r--drivers/staging/vt6655/ttype.h42
-rw-r--r--drivers/staging/vt6655/upc.h1
-rw-r--r--drivers/staging/vt6655/vntconfiguration.dat1
-rw-r--r--drivers/staging/vt6655/vntwifi.c700
-rw-r--r--drivers/staging/vt6655/vntwifi.h273
-rw-r--r--drivers/staging/vt6655/wcmd.c1023
-rw-r--r--drivers/staging/vt6655/wcmd.h123
-rw-r--r--drivers/staging/vt6655/wctl.c233
-rw-r--r--drivers/staging/vt6655/wctl.h105
-rw-r--r--drivers/staging/vt6655/wmgr.c4602
-rw-r--r--drivers/staging/vt6655/wmgr.h420
-rw-r--r--drivers/staging/vt6655/wpa.c300
-rw-r--r--drivers/staging/vt6655/wpa.h83
-rw-r--r--drivers/staging/vt6655/wpa2.c359
-rw-r--r--drivers/staging/vt6655/wpa2.h77
-rw-r--r--drivers/staging/vt6655/wpactl.c896
-rw-r--r--drivers/staging/vt6655/wpactl.h64
-rw-r--r--drivers/staging/vt6655/wroute.c187
-rw-r--r--drivers/staging/vt6655/wroute.h45
-rw-r--r--drivers/staging/vt6656/main_usb.c10
-rw-r--r--drivers/staging/wlan-ng/hfa384x.h3
-rw-r--r--drivers/staging/wlan-ng/hfa384x_usb.c6
-rw-r--r--drivers/staging/wlan-ng/p80211conv.c2
-rw-r--r--drivers/staging/wlan-ng/p80211hdr.h2
-rw-r--r--drivers/staging/wlan-ng/p80211netdev.c4
-rw-r--r--drivers/staging/wlan-ng/prism2fw.c9
-rw-r--r--drivers/staging/xgifb/XGI_main_26.c2
-rw-r--r--drivers/staging/xgifb/vb_def.h1
-rw-r--r--drivers/staging/xgifb/vb_setmode.c2
-rw-r--r--drivers/staging/xgifb/vb_util.c5
-rw-r--r--drivers/target/iscsi/iscsi_target.c1
-rw-r--r--drivers/target/iscsi/iscsi_target_core.h1
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c11
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.c1
-rw-r--r--drivers/target/iscsi/iscsi_target_transport.c3
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c26
-rw-r--r--drivers/target/loopback/tcm_loop.c66
-rw-r--r--drivers/target/loopback/tcm_loop.h7
-rw-r--r--drivers/target/target_core_configfs.c344
-rw-r--r--drivers/target/target_core_device.c90
-rw-r--r--drivers/target/target_core_file.c42
-rw-r--r--drivers/target/target_core_hba.c7
-rw-r--r--drivers/target/target_core_iblock.c42
-rw-r--r--drivers/target/target_core_internal.h28
-rw-r--r--drivers/target/target_core_pr.c125
-rw-r--r--drivers/target/target_core_pscsi.c26
-rw-r--r--drivers/target/target_core_rd.c41
-rw-r--r--drivers/target/target_core_user.c42
-rw-r--r--drivers/thermal/Kconfig32
-rw-r--r--drivers/thermal/Makefile5
-rw-r--r--drivers/thermal/armada_thermal.c20
-rw-r--r--drivers/thermal/clock_cooling.c485
-rw-r--r--drivers/thermal/int340x_thermal/acpi_thermal_rel.c12
-rw-r--r--drivers/thermal/int340x_thermal/int3400_thermal.c80
-rw-r--r--drivers/thermal/int340x_thermal/int3403_thermal.c4
-rw-r--r--drivers/thermal/intel_powerclamp.c3
-rw-r--r--drivers/thermal/intel_soc_dts_thermal.c12
-rw-r--r--drivers/thermal/of-thermal.c148
-rw-r--r--drivers/thermal/rockchip_thermal.c693
-rw-r--r--drivers/thermal/samsung/exynos_thermal_common.h1
-rw-r--r--drivers/thermal/samsung/exynos_tmu.c692
-rw-r--r--drivers/thermal/samsung/exynos_tmu.h123
-rw-r--r--drivers/thermal/samsung/exynos_tmu_data.c239
-rw-r--r--drivers/thermal/samsung/exynos_tmu_data.h159
-rw-r--r--drivers/thermal/tegra_soctherm.c476
-rw-r--r--drivers/thermal/thermal_core.c8
-rw-r--r--drivers/thermal/thermal_core.h18
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-thermal-common.c8
-rw-r--r--drivers/tty/serial/8250/8250_dw.c2
-rw-r--r--drivers/tty/serial/8250/8250_mtk.c2
-rw-r--r--drivers/tty/serial/8250/8250_omap.c14
-rw-r--r--drivers/tty/serial/mfd.c7
-rw-r--r--drivers/tty/serial/msm_serial_hs.c2
-rw-r--r--drivers/tty/serial/omap-serial.c2
-rw-r--r--drivers/usb/core/Kconfig2
-rw-r--r--drivers/usb/host/isp1760-hcd.c2
-rw-r--r--drivers/usb/host/oxu210hp-hcd.c2
-rw-r--r--drivers/usb/phy/Kconfig4
-rw-r--r--drivers/usb/storage/Kconfig2
-rw-r--r--drivers/vfio/Kconfig2
-rw-r--r--drivers/vfio/pci/Kconfig8
-rw-r--r--drivers/vfio/pci/vfio_pci.c5
-rw-r--r--drivers/vfio/pci/vfio_pci_config.c7
-rw-r--r--drivers/vhost/vringh.c125
-rw-r--r--drivers/video/fbdev/s3c-fb.c2
-rw-r--r--drivers/video/fbdev/sh_mobile_meram.c4
-rw-r--r--drivers/virtio/virtio.c37
-rw-r--r--drivers/virtio/virtio_balloon.c57
-rw-r--r--drivers/virtio/virtio_pci_common.c39
-rw-r--r--drivers/virtio/virtio_pci_common.h7
-rw-r--r--drivers/virtio/virtio_pci_legacy.c24
-rw-r--r--drivers/watchdog/imx2_wdt.c47
-rw-r--r--fs/Makefile2
-rw-r--r--fs/binfmt_misc.c7
-rw-r--r--fs/btrfs/ctree.h4
-rw-r--r--fs/btrfs/disk-io.c6
-rw-r--r--fs/btrfs/extent-tree.c23
-rw-r--r--fs/btrfs/free-space-cache.c12
-rw-r--r--fs/btrfs/volumes.c2
-rw-r--r--fs/ceph/addr.c273
-rw-r--r--fs/ceph/caps.c132
-rw-r--r--fs/ceph/dir.c27
-rw-r--r--fs/ceph/file.c97
-rw-r--r--fs/ceph/inode.c59
-rw-r--r--fs/ceph/locks.c64
-rw-r--r--fs/ceph/mds_client.c41
-rw-r--r--fs/ceph/mds_client.h10
-rw-r--r--fs/ceph/snap.c37
-rw-r--r--fs/ceph/super.c16
-rw-r--r--fs/ceph/super.h55
-rw-r--r--fs/ceph/xattr.c7
-rw-r--r--fs/coda/dir.c4
-rw-r--r--fs/ecryptfs/crypto.c1
-rw-r--r--fs/ecryptfs/file.c12
-rw-r--r--fs/ecryptfs/keystore.c6
-rw-r--r--fs/ecryptfs/main.c16
-rw-r--r--fs/ext4/move_extent.c4
-rw-r--r--fs/fuse/cuse.c2
-rw-r--r--fs/fuse/dev.c29
-rw-r--r--fs/fuse/dir.c538
-rw-r--r--fs/fuse/file.c230
-rw-r--r--fs/fuse/fuse_i.h45
-rw-r--r--fs/fuse/inode.c39
-rw-r--r--fs/hfsplus/catalog.c89
-rw-r--r--fs/hfsplus/dir.c11
-rw-r--r--fs/hfsplus/hfsplus_fs.h4
-rw-r--r--fs/hfsplus/super.c4
-rw-r--r--fs/inode.c11
-rw-r--r--fs/internal.h5
-rw-r--r--fs/ioctl.c2
-rw-r--r--fs/isofs/rock.c6
-rw-r--r--fs/jffs2/readinode.c2
-rw-r--r--fs/jffs2/summary.c1
-rw-r--r--fs/kernfs/file.c22
-rw-r--r--fs/lockd/mon.c2
-rw-r--r--fs/lockd/svc.c2
-rw-r--r--fs/mount.h3
-rw-r--r--fs/namei.c98
-rw-r--r--fs/namespace.c71
-rw-r--r--fs/nfsd/nfs4proc.c57
-rw-r--r--fs/nfsd/nfs4state.c68
-rw-r--r--fs/nfsd/nfs4xdr.c34
-rw-r--r--fs/nfsd/nfscache.c4
-rw-r--r--fs/nfsd/nfsctl.c6
-rw-r--r--fs/nfsd/nfsfh.c2
-rw-r--r--fs/nfsd/nfssvc.c2
-rw-r--r--fs/nfsd/state.h19
-rw-r--r--fs/nfsd/vfs.c37
-rw-r--r--fs/nfsd/vfs.h2
-rw-r--r--fs/nfsd/xdr4.h9
-rw-r--r--fs/nsfs.c161
-rw-r--r--fs/ocfs2/alloc.c28
-rw-r--r--fs/ocfs2/alloc.h2
-rw-r--r--fs/ocfs2/aops.c16
-rw-r--r--fs/ocfs2/dir.c2
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c12
-rw-r--r--fs/ocfs2/file.c2
-rw-r--r--fs/open.c5
-rw-r--r--fs/pnode.c1
-rw-r--r--fs/proc/base.c53
-rw-r--r--fs/proc/inode.c10
-rw-r--r--fs/proc/internal.h2
-rw-r--r--fs/proc/meminfo.c15
-rw-r--r--fs/proc/namespaces.c153
-rw-r--r--fs/proc/stat.c2
-rw-r--r--fs/proc_namespace.c16
-rw-r--r--fs/reiserfs/super.c3
-rw-r--r--include/acpi/acpi_bus.h1
-rw-r--r--include/asm-generic/vmlinux.lds.h2
-rw-r--r--include/drm/drmP.h27
-rw-r--r--include/drm/drm_atomic.h69
-rw-r--r--include/drm/drm_atomic_helper.h126
-rw-r--r--include/drm/drm_crtc.h327
-rw-r--r--include/drm/drm_crtc_helper.h13
-rw-r--r--include/drm/drm_displayid.h76
-rw-r--r--include/drm/drm_dp_helper.h26
-rw-r--r--include/drm/drm_dp_mst_helper.h8
-rw-r--r--include/drm/drm_edid.h109
-rw-r--r--include/drm/drm_fb_helper.h6
-rw-r--r--include/drm/drm_flip_work.h33
-rw-r--r--include/drm/drm_gem.h7
-rw-r--r--include/drm/drm_gem_cma_helper.h30
-rw-r--r--include/drm/drm_mipi_dsi.h94
-rw-r--r--include/drm/drm_modeset_lock.h5
-rw-r--r--include/drm/drm_plane_helper.h44
-rw-r--r--include/drm/i915_pciids.h17
-rw-r--r--include/drm/ttm/ttm_execbuf_util.h9
-rw-r--r--include/dt-bindings/thermal/tegra124-soctherm.h13
-rw-r--r--include/kvm/arm_arch_timer.h10
-rw-r--r--include/kvm/arm_vgic.h12
-rw-r--r--include/linux/acpi.h1
-rw-r--r--include/linux/ceph/auth.h26
-rw-r--r--include/linux/ceph/buffer.h3
-rw-r--r--include/linux/ceph/ceph_features.h1
-rw-r--r--include/linux/ceph/ceph_fs.h10
-rw-r--r--include/linux/ceph/libceph.h2
-rw-r--r--include/linux/ceph/messenger.h9
-rw-r--r--include/linux/ceph/msgr.h11
-rw-r--r--include/linux/ceph/osd_client.h13
-rw-r--r--include/linux/ceph/pagelist.h4
-rw-r--r--include/linux/clock_cooling.h65
-rw-r--r--include/linux/cma.h1
-rw-r--r--include/linux/cred.h1
-rw-r--r--include/linux/devfreq.h2
-rw-r--r--include/linux/dma-mapping.h13
-rw-r--r--include/linux/fs.h3
-rw-r--r--include/linux/fsl_ifc.h21
-rw-r--r--include/linux/ftrace.h7
-rw-r--r--include/linux/hdmi.h21
-rw-r--r--include/linux/iio/common/st_sensors.h10
-rw-r--r--include/linux/iio/iio.h8
-rw-r--r--include/linux/iommu.h8
-rw-r--r--include/linux/ipc_namespace.h3
-rw-r--r--include/linux/irqchip/arm-gic-v3.h128
-rw-r--r--include/linux/irqchip/arm-gic.h4
-rw-r--r--include/linux/kernel_stat.h1
-rw-r--r--include/linux/kvm_host.h106
-rw-r--r--include/linux/kvm_types.h27
-rw-r--r--include/linux/leds.h40
-rw-r--r--include/linux/migrate.h10
-rw-r--r--include/linux/mlx5/device.h72
-rw-r--r--include/linux/mlx5/driver.h14
-rw-r--r--include/linux/mlx5/qp.h65
-rw-r--r--include/linux/mm.h2
-rw-r--r--include/linux/mmu_notifier.h88
-rw-r--r--include/linux/module.h16
-rw-r--r--include/linux/mtd/nand.h18
-rw-r--r--include/linux/mtd/spi-nor.h11
-rw-r--r--include/linux/namei.h25
-rw-r--r--include/linux/ns_common.h12
-rw-r--r--include/linux/of_iommu.h23
-rw-r--r--include/linux/pci.h1
-rw-r--r--include/linux/phy_fixed.h2
-rw-r--r--include/linux/pid_namespace.h3
-rw-r--r--include/linux/platform_data/rcar-du.h74
-rw-r--r--include/linux/pm.h2
-rw-r--r--include/linux/proc_ns.h43
-rw-r--r--include/linux/sunrpc/svc.h34
-rw-r--r--include/linux/sunrpc/svc_xprt.h7
-rw-r--r--include/linux/thermal.h73
-rw-r--r--include/linux/uio.h5
-rw-r--r--include/linux/user_namespace.h15
-rw-r--r--include/linux/utsname.h3
-rw-r--r--include/linux/virtio.h2
-rw-r--r--include/linux/virtio_config.h29
-rw-r--r--include/linux/vringh.h37
-rw-r--r--include/net/net_namespace.h3
-rw-r--r--include/rdma/ib_umem.h34
-rw-r--r--include/rdma/ib_umem_odp.h160
-rw-r--r--include/rdma/ib_verbs.h54
-rw-r--r--include/scsi/scsi_device.h4
-rw-r--r--include/target/target_core_backend.h43
-rw-r--r--include/target/target_core_backend_configfs.h120
-rw-r--r--include/trace/events/host1x.h27
-rw-r--r--include/trace/events/module.h2
-rw-r--r--include/trace/events/sunrpc.h120
-rw-r--r--include/uapi/drm/drm_mode.h2
-rw-r--r--include/uapi/drm/i915_drm.h7
-rw-r--r--include/uapi/linux/Kbuild3
-rw-r--r--include/uapi/linux/android/Kbuild2
-rw-r--r--include/uapi/linux/android/binder.h (renamed from drivers/staging/android/uapi/binder.h)1
-rw-r--r--include/uapi/linux/audit.h2
-rw-r--r--include/uapi/linux/if_tun.h3
-rw-r--r--include/uapi/linux/kcmp.h (renamed from include/linux/kcmp.h)6
-rw-r--r--include/uapi/linux/kfd_ioctl.h154
-rw-r--r--include/uapi/linux/kvm.h11
-rw-r--r--include/uapi/linux/magic.h1
-rw-r--r--include/uapi/linux/target_core_user.h4
-rw-r--r--include/uapi/linux/thermal.h35
-rw-r--r--include/uapi/linux/v4l2-mediabus.h6
-rw-r--r--include/uapi/linux/virtio_balloon.h1
-rw-r--r--include/uapi/linux/virtio_pci.h15
-rw-r--r--include/uapi/rdma/ib_user_verbs.h29
-rw-r--r--init/do_mounts.c6
-rw-r--r--init/main.c6
-rw-r--r--init/version.c5
-rw-r--r--ipc/msgutil.c5
-rw-r--r--ipc/namespace.c32
-rw-r--r--kernel/events/core.c4
-rw-r--r--kernel/events/uprobes.c2
-rw-r--r--kernel/groups.c11
-rw-r--r--kernel/irq/internals.h4
-rw-r--r--kernel/irq/irqdesc.c52
-rw-r--r--kernel/irq/proc.c22
-rw-r--r--kernel/module.c170
-rw-r--r--kernel/nsproxy.c10
-rw-r--r--kernel/params.c97
-rw-r--r--kernel/pid.c5
-rw-r--r--kernel/pid_namespace.c29
-rw-r--r--kernel/power/Kconfig16
-rw-r--r--kernel/sysctl.c7
-rw-r--r--kernel/time/tick-sched.c2
-rw-r--r--kernel/time/time.c1
-rw-r--r--kernel/trace/Makefile2
-rw-r--r--kernel/trace/trace.c25
-rw-r--r--kernel/trace/trace.h14
-rw-r--r--kernel/trace/trace_events.c42
-rw-r--r--kernel/trace/trace_syscalls.c7
-rw-r--r--kernel/uid16.c2
-rw-r--r--kernel/user.c6
-rw-r--r--kernel/user_namespace.c153
-rw-r--r--kernel/utsname.c31
-rw-r--r--lib/bug.c20
-rw-r--r--lib/show_mem.c6
-rw-r--r--mm/cma.c1
-rw-r--r--mm/filemap.c2
-rw-r--r--mm/fremap.c2
-rw-r--r--mm/huge_memory.c9
-rw-r--r--mm/hugetlb.c7
-rw-r--r--mm/ksm.c4
-rw-r--r--mm/madvise.c2
-rw-r--r--mm/memory.c15
-rw-r--r--mm/mempolicy.c15
-rw-r--r--mm/migrate.c24
-rw-r--r--mm/mmu_notifier.c25
-rw-r--r--mm/page_alloc.c6
-rw-r--r--mm/rmap.c2
-rw-r--r--mm/shmem.c2
-rw-r--r--mm/zsmalloc.c374
-rw-r--r--net/Makefile2
-rw-r--r--net/bluetooth/hci_conn.c2
-rw-r--r--net/bluetooth/hci_core.c60
-rw-r--r--net/bluetooth/hci_event.c20
-rw-r--r--net/bluetooth/l2cap_core.c5
-rw-r--r--net/bluetooth/mgmt.c99
-rw-r--r--net/bluetooth/smp.c5
-rw-r--r--net/ceph/auth_x.c76
-rw-r--r--net/ceph/auth_x.h1
-rw-r--r--net/ceph/buffer.c4
-rw-r--r--net/ceph/ceph_common.c21
-rw-r--r--net/ceph/messenger.c34
-rw-r--r--net/ceph/osd_client.c118
-rw-r--r--net/core/net_namespace.c39
-rw-r--r--net/core/rtnetlink.c5
-rw-r--r--net/ipv4/geneve.c30
-rw-r--r--net/ipv4/ip_gre.c9
-rw-r--r--net/ipv4/ip_tunnel.c9
-rw-r--r--net/mac80211/chan.c4
-rw-r--r--net/mac80211/key.c2
-rw-r--r--net/mac80211/mlme.c1
-rw-r--r--net/mac80211/rx.c11
-rw-r--r--net/netlink/af_netlink.c54
-rw-r--r--net/nonet.c26
-rw-r--r--net/rds/message.c3
-rw-r--r--net/socket.c20
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c2
-rw-r--r--net/sunrpc/cache.c26
-rw-r--r--net/sunrpc/svc.c42
-rw-r--r--net/sunrpc/svc_xprt.c292
-rw-r--r--net/sunrpc/svcsock.c5
-rw-r--r--net/sunrpc/xdr.c9
-rw-r--r--net/wireless/chan.c9
-rw-r--r--net/wireless/nl80211.c2
-rw-r--r--net/wireless/reg.c20
-rw-r--r--scripts/Kbuild.include12
-rw-r--r--scripts/Makefile.clean10
-rw-r--r--scripts/Makefile.headersinst1
-rw-r--r--scripts/coccinelle/misc/bugon.cocci2
-rwxr-xr-xscripts/headers.sh2
-rw-r--r--scripts/kconfig/mconf.c4
-rw-r--r--scripts/kconfig/menu.c4
-rwxr-xr-xscripts/package/mkspec4
-rw-r--r--security/integrity/ima/Kconfig2
-rw-r--r--security/keys/encrypted-keys/encrypted.c5
-rw-r--r--security/keys/key.c10
-rw-r--r--sound/firewire/oxfw/oxfw-pcm.c6
-rw-r--r--sound/firewire/oxfw/oxfw-proc.c2
-rw-r--r--sound/firewire/oxfw/oxfw-stream.c3
-rw-r--r--sound/firewire/oxfw/oxfw.c2
-rw-r--r--sound/pci/asihpi/hpi_internal.h6
-rw-r--r--sound/pci/asihpi/hpi_version.h6
-rw-r--r--sound/pci/asihpi/hpidspcd.c26
-rw-r--r--sound/pci/hda/hda_controller.c2
-rw-r--r--sound/pci/hda/hda_generic.c10
-rw-r--r--sound/pci/hda/hda_generic.h9
-rw-r--r--sound/pci/hda/hda_intel.c5
-rw-r--r--sound/pci/hda/hda_sysfs.c2
-rw-r--r--sound/pci/hda/patch_analog.c42
-rw-r--r--sound/pci/hda/patch_conexant.c4
-rw-r--r--sound/pci/hda/patch_hdmi.c6
-rw-r--r--sound/pci/hda/patch_realtek.c15
-rw-r--r--sound/pci/hda/patch_via.c2
-rw-r--r--sound/soc/atmel/atmel_ssc_dai.c2
-rw-r--r--sound/soc/codecs/Kconfig2
-rw-r--r--sound/soc/codecs/cs35l32.c2
-rw-r--r--sound/soc/codecs/cs42xx8.c2
-rw-r--r--sound/soc/codecs/max98090.c2
-rw-r--r--sound/soc/codecs/pcm512x-i2c.c7
-rw-r--r--sound/soc/codecs/pcm512x.c2
-rw-r--r--sound/soc/codecs/rt5645.c4
-rw-r--r--sound/soc/codecs/tas2552.c4
-rw-r--r--sound/soc/codecs/wm2200.c2
-rw-r--r--sound/soc/codecs/wm5100.c2
-rw-r--r--sound/soc/codecs/wm8962.c2
-rw-r--r--sound/soc/fsl/fsl_asrc.c4
-rw-r--r--sound/soc/intel/sst-haswell-pcm.c4
-rw-r--r--sound/soc/intel/sst/sst_acpi.c10
-rw-r--r--sound/soc/samsung/i2s.c6
-rw-r--r--sound/usb/mixer_maps.c15
-rw-r--r--sound/usb/mixer_scarlett.c2
-rw-r--r--sound/usb/quirks.c5
-rw-r--r--tools/include/asm-generic/bitops.h27
-rw-r--r--tools/include/asm-generic/bitops/__ffs.h43
-rw-r--r--tools/include/asm-generic/bitops/__fls.h1
-rw-r--r--tools/include/asm-generic/bitops/atomic.h22
-rw-r--r--tools/include/asm-generic/bitops/find.h33
-rw-r--r--tools/include/asm-generic/bitops/fls.h1
-rw-r--r--tools/include/asm-generic/bitops/fls64.h1
-rw-r--r--tools/include/linux/bitops.h53
-rw-r--r--tools/include/linux/log2.h185
-rw-r--r--tools/lib/api/fs/fs.c34
-rw-r--r--tools/lib/api/fs/fs.h3
-rw-r--r--tools/lib/util/find_next_bit.c89
-rw-r--r--tools/perf/Documentation/perf.txt4
-rw-r--r--tools/perf/MANIFEST16
-rw-r--r--tools/perf/Makefile.perf15
-rw-r--r--tools/perf/bench/mem-memcpy.c286
-rw-r--r--tools/perf/bench/mem-memset.c304
-rw-r--r--tools/perf/builtin-buildid-cache.c13
-rw-r--r--tools/perf/builtin-kvm.c3
-rw-r--r--tools/perf/builtin-trace.c14
-rw-r--r--tools/perf/perf.c14
-rw-r--r--tools/perf/tests/attr/base-record2
-rw-r--r--tools/perf/tests/attr/base-stat2
-rw-r--r--tools/perf/ui/browsers/hists.c2
-rw-r--r--tools/perf/ui/hist.c4
-rw-r--r--tools/perf/util/build-id.c9
-rw-r--r--tools/perf/util/callchain.c2
-rw-r--r--tools/perf/util/config.c10
-rw-r--r--tools/perf/util/evlist.c57
-rw-r--r--tools/perf/util/evlist.h1
-rw-r--r--tools/perf/util/include/linux/bitops.h162
-rw-r--r--tools/perf/util/machine.c72
-rw-r--r--tools/perf/util/record.c11
-rw-r--r--tools/perf/util/srcline.c12
-rw-r--r--tools/perf/util/symbol-minimal.c8
-rw-r--r--tools/perf/util/util.c26
-rw-r--r--tools/perf/util/util.h34
-rw-r--r--tools/power/cpupower/utils/cpuidle-info.c8
-rw-r--r--tools/testing/selftests/Makefile16
-rw-r--r--tools/testing/selftests/breakpoints/breakpoint_test.c10
-rw-r--r--tools/testing/selftests/ipc/msgque.c26
-rw-r--r--tools/testing/selftests/kcmp/Makefile22
-rw-r--r--tools/testing/selftests/kcmp/kcmp_test.c27
-rw-r--r--tools/testing/selftests/kselftest.h62
-rw-r--r--tools/testing/selftests/mount/unprivileged-remount-test.c204
-rw-r--r--tools/testing/selftests/net/Makefile8
-rwxr-xr-xtools/testing/selftests/net/test_bpf.sh10
-rw-r--r--tools/testing/selftests/size/.gitignore1
-rw-r--r--tools/testing/selftests/size/Makefile12
-rw-r--r--tools/testing/selftests/size/get_size.c100
-rw-r--r--tools/testing/selftests/timers/posix_timers.c14
-rw-r--r--tools/testing/selftests/user/Makefile8
-rwxr-xr-xtools/testing/selftests/user/test_user_copy.sh10
-rw-r--r--tools/thermal/tmon/sysfs.c6
-rw-r--r--tools/virtio/Makefile2
-rw-r--r--tools/virtio/linux/virtio.h1
-rw-r--r--tools/virtio/linux/virtio_byteorder.h8
-rw-r--r--tools/virtio/linux/virtio_config.h70
-rw-r--r--tools/virtio/uapi/linux/virtio_types.h1
-rw-r--r--tools/virtio/virtio_test.c15
-rw-r--r--tools/virtio/vringh_test.c5
-rw-r--r--virt/kvm/arm/arch_timer.c30
-rw-r--r--virt/kvm/arm/vgic.c116
-rw-r--r--virt/kvm/eventfd.c7
-rw-r--r--virt/kvm/kvm_main.c133
2411 files changed, 110804 insertions, 128433 deletions
diff --git a/.gitignore b/.gitignore
index e213b27f392..ce57b79670a 100644
--- a/.gitignore
+++ b/.gitignore
@@ -96,3 +96,6 @@ x509.genkey
# Kconfig presets
all.config
+
+# Kdevelop4
+*.kdev4
diff --git a/.mailmap b/.mailmap
index 1ad68731fb4..ada8ad696b2 100644
--- a/.mailmap
+++ b/.mailmap
@@ -17,7 +17,7 @@ Aleksey Gorelov <aleksey_gorelov@phoenix.com>
Al Viro <viro@ftp.linux.org.uk>
Al Viro <viro@zenIV.linux.org.uk>
Andreas Herrmann <aherrman@de.ibm.com>
-Andrew Morton <akpm@osdl.org>
+Andrew Morton <akpm@linux-foundation.org>
Andrew Vasquez <andrew.vasquez@qlogic.com>
Andy Adamson <andros@citi.umich.edu>
Archit Taneja <archit@ti.com>
@@ -102,6 +102,8 @@ Rudolf Marek <R.Marek@sh.cvut.cz>
Rui Saraiva <rmps@joel.ist.utl.pt>
Sachin P Sant <ssant@in.ibm.com>
Sam Ravnborg <sam@mars.ravnborg.org>
+Santosh Shilimkar <ssantosh@kernel.org>
+Santosh Shilimkar <santosh.shilimkar@oracle.org>
Sascha Hauer <s.hauer@pengutronix.de>
S.Çağlar Onur <caglar@pardus.org.tr>
Shiraz Hashim <shiraz.linux.kernel@gmail.com> <shiraz.hashim@st.com>
diff --git a/CREDITS b/CREDITS
index bb6278884f8..96935df0b6f 100644
--- a/CREDITS
+++ b/CREDITS
@@ -1197,6 +1197,13 @@ S: R. Tocantins, 89 - Cristo Rei
S: 80050-430 - Curitiba - Paraná
S: Brazil
+N: Oded Gabbay
+E: oded.gabbay@gmail.com
+D: AMD KFD maintainer
+S: 12 Shraga Raphaeli
+S: Petah-Tikva, 4906418
+S: Israel
+
N: Kumar Gala
E: galak@kernel.crashing.org
D: Embedded PowerPC 6xx/7xx/74xx/82xx/83xx/85xx support
@@ -1727,14 +1734,14 @@ S: Chapel Hill, North Carolina 27514-4818
S: USA
N: Dave Jones
-E: davej@redhat.com
+E: davej@codemonkey.org.uk
W: http://www.codemonkey.org.uk
D: Assorted VIA x86 support.
D: 2.5 AGPGART overhaul.
D: CPUFREQ maintenance.
-D: Fedora kernel maintenance.
+D: Fedora kernel maintenance (2003-2014).
+D: 'Trinity' and similar fuzz testing work.
D: Misc/Other.
-S: 314 Littleton Rd, Westford, MA 01886, USA
N: Martin Josfsson
E: gandalf@wlug.westbo.se
diff --git a/Documentation/ABI/testing/sysfs-bus-iio b/Documentation/ABI/testing/sysfs-bus-iio
index d760b0224ef..117521dbf2b 100644
--- a/Documentation/ABI/testing/sysfs-bus-iio
+++ b/Documentation/ABI/testing/sysfs-bus-iio
@@ -200,6 +200,13 @@ Description:
Raw pressure measurement from channel Y. Units after
application of scale and offset are kilopascal.
+What: /sys/bus/iio/devices/iio:deviceX/in_pressureY_input
+What: /sys/bus/iio/devices/iio:deviceX/in_pressure_input
+KernelVersion: 3.8
+Contact: linux-iio@vger.kernel.org
+Description:
+ Scaled pressure measurement from channel Y, in kilopascal.
+
What: /sys/bus/iio/devices/iio:deviceX/in_humidityrelative_raw
KernelVersion: 3.14
Contact: linux-iio@vger.kernel.org
@@ -231,6 +238,7 @@ What: /sys/bus/iio/devices/iio:deviceX/in_tempY_offset
What: /sys/bus/iio/devices/iio:deviceX/in_temp_offset
What: /sys/bus/iio/devices/iio:deviceX/in_pressureY_offset
What: /sys/bus/iio/devices/iio:deviceX/in_pressure_offset
+What: /sys/bus/iio/devices/iio:deviceX/in_humidityrelative_offset
KernelVersion: 2.6.35
Contact: linux-iio@vger.kernel.org
Description:
@@ -251,6 +259,7 @@ Description:
What: /sys/bus/iio/devices/iio:deviceX/in_voltageY_scale
What: /sys/bus/iio/devices/iio:deviceX/in_voltageY_supply_scale
What: /sys/bus/iio/devices/iio:deviceX/in_voltage_scale
+What: /sys/bus/iio/devices/iio:deviceX/in_voltage-voltage_scale
What: /sys/bus/iio/devices/iio:deviceX/out_voltageY_scale
What: /sys/bus/iio/devices/iio:deviceX/out_altvoltageY_scale
What: /sys/bus/iio/devices/iio:deviceX/in_accel_scale
@@ -266,6 +275,7 @@ What: /sys/bus/iio/devices/iio:deviceX/in_rot_from_north_magnetic_tilt_comp_sca
What: /sys/bus/iio/devices/iio:deviceX/in_rot_from_north_true_tilt_comp_scale
What: /sys/bus/iio/devices/iio:deviceX/in_pressureY_scale
What: /sys/bus/iio/devices/iio:deviceX/in_pressure_scale
+What: /sys/bus/iio/devices/iio:deviceX/in_humidityrelative_scale
KernelVersion: 2.6.35
Contact: linux-iio@vger.kernel.org
Description:
@@ -328,6 +338,10 @@ Description:
are listed in this attribute.
What /sys/bus/iio/devices/iio:deviceX/out_voltageY_hardwaregain
+What: /sys/bus/iio/devices/iio:deviceX/in_intensity_red_hardwaregain
+What: /sys/bus/iio/devices/iio:deviceX/in_intensity_green_hardwaregain
+What: /sys/bus/iio/devices/iio:deviceX/in_intensity_blue_hardwaregain
+What: /sys/bus/iio/devices/iio:deviceX/in_intensity_clear_hardwaregain
KernelVersion: 2.6.35
Contact: linux-iio@vger.kernel.org
Description:
@@ -1028,3 +1042,12 @@ Contact: linux-iio@vger.kernel.org
Description:
Raw value of rotation from true/magnetic north measured with
or without compensation from tilt sensors.
+
+What: /sys/bus/iio/devices/iio:deviceX/in_currentX_raw
+KernelVersion: 3.18
+Contact: linux-iio@vger.kernel.org
+Description:
+ Raw current measurement from channel X. Units are in milliamps
+ after application of scale and offset. If no offset or scale is
+ present, output should be considered as processed with the
+ unit in milliamps.
diff --git a/Documentation/ABI/testing/sysfs-platform-dell-laptop b/Documentation/ABI/testing/sysfs-platform-dell-laptop
new file mode 100644
index 00000000000..7969443ef0e
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-platform-dell-laptop
@@ -0,0 +1,60 @@
+What: /sys/class/leds/dell::kbd_backlight/als_setting
+Date: December 2014
+KernelVersion: 3.19
+Contact: Gabriele Mazzotta <gabriele.mzt@gmail.com>,
+ Pali Rohár <pali.rohar@gmail.com>
+Description:
+ This file allows to control the automatic keyboard
+ illumination mode on some systems that have an ambient
+ light sensor. Write 1 to this file to enable the auto
+ mode, 0 to disable it.
+
+What: /sys/class/leds/dell::kbd_backlight/start_triggers
+Date: December 2014
+KernelVersion: 3.19
+Contact: Gabriele Mazzotta <gabriele.mzt@gmail.com>,
+ Pali Rohár <pali.rohar@gmail.com>
+Description:
+ This file allows to control the input triggers that
+ turn on the keyboard backlight illumination that is
+ disabled because of inactivity.
+ Read the file to see the triggers available. The ones
+ enabled are preceded by '+', those disabled by '-'.
+
+ To enable a trigger, write its name preceded by '+' to
+ this file. To disable a trigger, write its name preceded
+ by '-' instead.
+
+ For example, to enable the keyboard as trigger run:
+ echo +keyboard > /sys/class/leds/dell::kbd_backlight/start_triggers
+ To disable it:
+ echo -keyboard > /sys/class/leds/dell::kbd_backlight/start_triggers
+
+ Note that not all the available triggers can be configured.
+
+What: /sys/class/leds/dell::kbd_backlight/stop_timeout
+Date: December 2014
+KernelVersion: 3.19
+Contact: Gabriele Mazzotta <gabriele.mzt@gmail.com>,
+ Pali Rohár <pali.rohar@gmail.com>
+Description:
+ This file allows to specify the interval after which the
+ keyboard illumination is disabled because of inactivity.
+ The timeouts are expressed in seconds, minutes, hours and
+ days, for which the symbols are 's', 'm', 'h' and 'd'
+ respectively.
+
+ To configure the timeout, write to this file a value along
+ with any the above units. If no unit is specified, the value
+ is assumed to be expressed in seconds.
+
+ For example, to set the timeout to 10 minutes run:
+ echo 10m > /sys/class/leds/dell::kbd_backlight/stop_timeout
+
+ Note that when this file is read, the returned value might be
+ expressed in a different unit than the one used when the timeout
+ was set.
+
+ Also note that only some timeouts are supported and that
+ some systems might fall back to a specific timeout in case
+ an invalid timeout is written to this file.
diff --git a/Documentation/DocBook/drm.tmpl b/Documentation/DocBook/drm.tmpl
index be35bc328b7..4b592ffbafe 100644
--- a/Documentation/DocBook/drm.tmpl
+++ b/Documentation/DocBook/drm.tmpl
@@ -492,10 +492,10 @@ char *date;</synopsis>
<sect2>
<title>The Translation Table Manager (TTM)</title>
<para>
- TTM design background and information belongs here.
+ TTM design background and information belongs here.
</para>
<sect3>
- <title>TTM initialization</title>
+ <title>TTM initialization</title>
<warning><para>This section is outdated.</para></warning>
<para>
Drivers wishing to support TTM must fill out a drm_bo_driver
@@ -503,42 +503,42 @@ char *date;</synopsis>
pointers for initializing the TTM, allocating and freeing memory,
waiting for command completion and fence synchronization, and memory
migration. See the radeon_ttm.c file for an example of usage.
- </para>
- <para>
- The ttm_global_reference structure is made up of several fields:
- </para>
- <programlisting>
- struct ttm_global_reference {
- enum ttm_global_types global_type;
- size_t size;
- void *object;
- int (*init) (struct ttm_global_reference *);
- void (*release) (struct ttm_global_reference *);
- };
- </programlisting>
- <para>
- There should be one global reference structure for your memory
- manager as a whole, and there will be others for each object
- created by the memory manager at runtime. Your global TTM should
- have a type of TTM_GLOBAL_TTM_MEM. The size field for the global
- object should be sizeof(struct ttm_mem_global), and the init and
- release hooks should point at your driver-specific init and
- release routines, which probably eventually call
- ttm_mem_global_init and ttm_mem_global_release, respectively.
- </para>
- <para>
- Once your global TTM accounting structure is set up and initialized
- by calling ttm_global_item_ref() on it,
- you need to create a buffer object TTM to
- provide a pool for buffer object allocation by clients and the
- kernel itself. The type of this object should be TTM_GLOBAL_TTM_BO,
- and its size should be sizeof(struct ttm_bo_global). Again,
- driver-specific init and release functions may be provided,
- likely eventually calling ttm_bo_global_init() and
- ttm_bo_global_release(), respectively. Also, like the previous
- object, ttm_global_item_ref() is used to create an initial reference
- count for the TTM, which will call your initialization function.
- </para>
+ </para>
+ <para>
+ The ttm_global_reference structure is made up of several fields:
+ </para>
+ <programlisting>
+ struct ttm_global_reference {
+ enum ttm_global_types global_type;
+ size_t size;
+ void *object;
+ int (*init) (struct ttm_global_reference *);
+ void (*release) (struct ttm_global_reference *);
+ };
+ </programlisting>
+ <para>
+ There should be one global reference structure for your memory
+ manager as a whole, and there will be others for each object
+ created by the memory manager at runtime. Your global TTM should
+ have a type of TTM_GLOBAL_TTM_MEM. The size field for the global
+ object should be sizeof(struct ttm_mem_global), and the init and
+ release hooks should point at your driver-specific init and
+ release routines, which probably eventually call
+ ttm_mem_global_init and ttm_mem_global_release, respectively.
+ </para>
+ <para>
+ Once your global TTM accounting structure is set up and initialized
+ by calling ttm_global_item_ref() on it,
+ you need to create a buffer object TTM to
+ provide a pool for buffer object allocation by clients and the
+ kernel itself. The type of this object should be TTM_GLOBAL_TTM_BO,
+ and its size should be sizeof(struct ttm_bo_global). Again,
+ driver-specific init and release functions may be provided,
+ likely eventually calling ttm_bo_global_init() and
+ ttm_bo_global_release(), respectively. Also, like the previous
+ object, ttm_global_item_ref() is used to create an initial reference
+ count for the TTM, which will call your initialization function.
+ </para>
</sect3>
</sect2>
<sect2 id="drm-gem">
@@ -566,19 +566,19 @@ char *date;</synopsis>
using driver-specific ioctls.
</para>
<para>
- On a fundamental level, GEM involves several operations:
- <itemizedlist>
- <listitem>Memory allocation and freeing</listitem>
- <listitem>Command execution</listitem>
- <listitem>Aperture management at command execution time</listitem>
- </itemizedlist>
- Buffer object allocation is relatively straightforward and largely
+ On a fundamental level, GEM involves several operations:
+ <itemizedlist>
+ <listitem>Memory allocation and freeing</listitem>
+ <listitem>Command execution</listitem>
+ <listitem>Aperture management at command execution time</listitem>
+ </itemizedlist>
+ Buffer object allocation is relatively straightforward and largely
provided by Linux's shmem layer, which provides memory to back each
object.
</para>
<para>
Device-specific operations, such as command execution, pinning, buffer
- read &amp; write, mapping, and domain ownership transfers are left to
+ read &amp; write, mapping, and domain ownership transfers are left to
driver-specific ioctls.
</para>
<sect3>
@@ -738,16 +738,16 @@ char *date;</synopsis>
respectively. The conversion is handled by the DRM core without any
driver-specific support.
</para>
- <para>
- GEM also supports buffer sharing with dma-buf file descriptors through
- PRIME. GEM-based drivers must use the provided helpers functions to
- implement the exporting and importing correctly. See <xref linkend="drm-prime-support" />.
- Since sharing file descriptors is inherently more secure than the
- easily guessable and global GEM names it is the preferred buffer
- sharing mechanism. Sharing buffers through GEM names is only supported
- for legacy userspace. Furthermore PRIME also allows cross-device
- buffer sharing since it is based on dma-bufs.
- </para>
+ <para>
+ GEM also supports buffer sharing with dma-buf file descriptors through
+ PRIME. GEM-based drivers must use the provided helpers functions to
+ implement the exporting and importing correctly. See <xref linkend="drm-prime-support" />.
+ Since sharing file descriptors is inherently more secure than the
+ easily guessable and global GEM names it is the preferred buffer
+ sharing mechanism. Sharing buffers through GEM names is only supported
+ for legacy userspace. Furthermore PRIME also allows cross-device
+ buffer sharing since it is based on dma-bufs.
+ </para>
</sect3>
<sect3 id="drm-gem-objects-mapping">
<title>GEM Objects Mapping</title>
@@ -852,7 +852,7 @@ char *date;</synopsis>
<sect3>
<title>Command Execution</title>
<para>
- Perhaps the most important GEM function for GPU devices is providing a
+ Perhaps the most important GEM function for GPU devices is providing a
command execution interface to clients. Client programs construct
command buffers containing references to previously allocated memory
objects, and then submit them to GEM. At that point, GEM takes care to
@@ -874,95 +874,101 @@ char *date;</synopsis>
<title>GEM Function Reference</title>
!Edrivers/gpu/drm/drm_gem.c
</sect3>
- </sect2>
- <sect2>
- <title>VMA Offset Manager</title>
+ </sect2>
+ <sect2>
+ <title>VMA Offset Manager</title>
!Pdrivers/gpu/drm/drm_vma_manager.c vma offset manager
!Edrivers/gpu/drm/drm_vma_manager.c
!Iinclude/drm/drm_vma_manager.h
- </sect2>
- <sect2 id="drm-prime-support">
- <title>PRIME Buffer Sharing</title>
- <para>
- PRIME is the cross device buffer sharing framework in drm, originally
- created for the OPTIMUS range of multi-gpu platforms. To userspace
- PRIME buffers are dma-buf based file descriptors.
- </para>
- <sect3>
- <title>Overview and Driver Interface</title>
- <para>
- Similar to GEM global names, PRIME file descriptors are
- also used to share buffer objects across processes. They offer
- additional security: as file descriptors must be explicitly sent over
- UNIX domain sockets to be shared between applications, they can't be
- guessed like the globally unique GEM names.
- </para>
- <para>
- Drivers that support the PRIME
- API must set the DRIVER_PRIME bit in the struct
- <structname>drm_driver</structname>
- <structfield>driver_features</structfield> field, and implement the
- <methodname>prime_handle_to_fd</methodname> and
- <methodname>prime_fd_to_handle</methodname> operations.
- </para>
- <para>
- <synopsis>int (*prime_handle_to_fd)(struct drm_device *dev,
- struct drm_file *file_priv, uint32_t handle,
- uint32_t flags, int *prime_fd);
+ </sect2>
+ <sect2 id="drm-prime-support">
+ <title>PRIME Buffer Sharing</title>
+ <para>
+ PRIME is the cross device buffer sharing framework in drm, originally
+ created for the OPTIMUS range of multi-gpu platforms. To userspace
+ PRIME buffers are dma-buf based file descriptors.
+ </para>
+ <sect3>
+ <title>Overview and Driver Interface</title>
+ <para>
+ Similar to GEM global names, PRIME file descriptors are
+ also used to share buffer objects across processes. They offer
+ additional security: as file descriptors must be explicitly sent over
+ UNIX domain sockets to be shared between applications, they can't be
+ guessed like the globally unique GEM names.
+ </para>
+ <para>
+ Drivers that support the PRIME
+ API must set the DRIVER_PRIME bit in the struct
+ <structname>drm_driver</structname>
+ <structfield>driver_features</structfield> field, and implement the
+ <methodname>prime_handle_to_fd</methodname> and
+ <methodname>prime_fd_to_handle</methodname> operations.
+ </para>
+ <para>
+ <synopsis>int (*prime_handle_to_fd)(struct drm_device *dev,
+ struct drm_file *file_priv, uint32_t handle,
+ uint32_t flags, int *prime_fd);
int (*prime_fd_to_handle)(struct drm_device *dev,
- struct drm_file *file_priv, int prime_fd,
- uint32_t *handle);</synopsis>
- Those two operations convert a handle to a PRIME file descriptor and
- vice versa. Drivers must use the kernel dma-buf buffer sharing framework
- to manage the PRIME file descriptors. Similar to the mode setting
- API PRIME is agnostic to the underlying buffer object manager, as
- long as handles are 32bit unsigned integers.
- </para>
- <para>
- While non-GEM drivers must implement the operations themselves, GEM
- drivers must use the <function>drm_gem_prime_handle_to_fd</function>
- and <function>drm_gem_prime_fd_to_handle</function> helper functions.
- Those helpers rely on the driver
- <methodname>gem_prime_export</methodname> and
- <methodname>gem_prime_import</methodname> operations to create a dma-buf
- instance from a GEM object (dma-buf exporter role) and to create a GEM
- object from a dma-buf instance (dma-buf importer role).
- </para>
- <para>
- <synopsis>struct dma_buf * (*gem_prime_export)(struct drm_device *dev,
- struct drm_gem_object *obj,
- int flags);
+ struct drm_file *file_priv, int prime_fd,
+ uint32_t *handle);</synopsis>
+ Those two operations convert a handle to a PRIME file descriptor and
+ vice versa. Drivers must use the kernel dma-buf buffer sharing framework
+ to manage the PRIME file descriptors. Similar to the mode setting
+ API PRIME is agnostic to the underlying buffer object manager, as
+ long as handles are 32bit unsigned integers.
+ </para>
+ <para>
+ While non-GEM drivers must implement the operations themselves, GEM
+ drivers must use the <function>drm_gem_prime_handle_to_fd</function>
+ and <function>drm_gem_prime_fd_to_handle</function> helper functions.
+ Those helpers rely on the driver
+ <methodname>gem_prime_export</methodname> and
+ <methodname>gem_prime_import</methodname> operations to create a dma-buf
+ instance from a GEM object (dma-buf exporter role) and to create a GEM
+ object from a dma-buf instance (dma-buf importer role).
+ </para>
+ <para>
+ <synopsis>struct dma_buf * (*gem_prime_export)(struct drm_device *dev,
+ struct drm_gem_object *obj,
+ int flags);
struct drm_gem_object * (*gem_prime_import)(struct drm_device *dev,
- struct dma_buf *dma_buf);</synopsis>
- These two operations are mandatory for GEM drivers that support
- PRIME.
- </para>
- </sect3>
- <sect3>
- <title>PRIME Helper Functions</title>
-!Pdrivers/gpu/drm/drm_prime.c PRIME Helpers
+ struct dma_buf *dma_buf);</synopsis>
+ These two operations are mandatory for GEM drivers that support
+ PRIME.
+ </para>
</sect3>
- </sect2>
- <sect2>
- <title>PRIME Function References</title>
+ <sect3>
+ <title>PRIME Helper Functions</title>
+!Pdrivers/gpu/drm/drm_prime.c PRIME Helpers
+ </sect3>
+ </sect2>
+ <sect2>
+ <title>PRIME Function References</title>
!Edrivers/gpu/drm/drm_prime.c
- </sect2>
- <sect2>
- <title>DRM MM Range Allocator</title>
- <sect3>
- <title>Overview</title>
+ </sect2>
+ <sect2>
+ <title>DRM MM Range Allocator</title>
+ <sect3>
+ <title>Overview</title>
!Pdrivers/gpu/drm/drm_mm.c Overview
- </sect3>
- <sect3>
- <title>LRU Scan/Eviction Support</title>
+ </sect3>
+ <sect3>
+ <title>LRU Scan/Eviction Support</title>
!Pdrivers/gpu/drm/drm_mm.c lru scan roaster
- </sect3>
+ </sect3>
</sect2>
- <sect2>
- <title>DRM MM Range Allocator Function References</title>
+ <sect2>
+ <title>DRM MM Range Allocator Function References</title>
!Edrivers/gpu/drm/drm_mm.c
!Iinclude/drm/drm_mm.h
- </sect2>
+ </sect2>
+ <sect2>
+ <title>CMA Helper Functions Reference</title>
+!Pdrivers/gpu/drm/drm_gem_cma_helper.c cma helpers
+!Edrivers/gpu/drm/drm_gem_cma_helper.c
+!Iinclude/drm/drm_gem_cma_helper.h
+ </sect2>
</sect1>
<!-- Internals: mode setting -->
@@ -996,6 +1002,10 @@ int max_width, max_height;</synopsis>
!Edrivers/gpu/drm/drm_modes.c
</sect2>
<sect2>
+ <title>Atomic Mode Setting Function Reference</title>
+!Edrivers/gpu/drm/drm_atomic.c
+ </sect2>
+ <sect2>
<title>Frame Buffer Creation</title>
<synopsis>struct drm_framebuffer *(*fb_create)(struct drm_device *dev,
struct drm_file *file_priv,
@@ -1827,6 +1837,10 @@ void intel_crt_init(struct drm_device *dev)
!Edrivers/gpu/drm/drm_crtc.c
</sect2>
<sect2>
+ <title>KMS Data Structures</title>
+!Iinclude/drm/drm_crtc.h
+ </sect2>
+ <sect2>
<title>KMS Locking</title>
!Pdrivers/gpu/drm/drm_modeset_lock.c kms locking
!Iinclude/drm/drm_modeset_lock.h
@@ -1933,10 +1947,16 @@ void intel_crt_init(struct drm_device *dev)
and then retrieves a list of modes by calling the connector
<methodname>get_modes</methodname> helper operation.
</para>
+ <para>
+ If the helper operation returns no mode, and if the connector status
+ is connector_status_connected, standard VESA DMT modes up to
+ 1024x768 are automatically added to the modes list by a call to
+ <function>drm_add_modes_noedid</function>.
+ </para>
<para>
- The function filters out modes larger than
+ The function then filters out modes larger than
<parameter>max_width</parameter> and <parameter>max_height</parameter>
- if specified. It then calls the optional connector
+ if specified. It finally calls the optional connector
<methodname>mode_valid</methodname> helper operation for each mode in
the probed list to check whether the mode is valid for the connector.
</para>
@@ -2076,12 +2096,20 @@ void intel_crt_init(struct drm_device *dev)
<synopsis>int (*get_modes)(struct drm_connector *connector);</synopsis>
<para>
Fill the connector's <structfield>probed_modes</structfield> list
- by parsing EDID data with <function>drm_add_edid_modes</function> or
- calling <function>drm_mode_probed_add</function> directly for every
+ by parsing EDID data with <function>drm_add_edid_modes</function>,
+ adding standard VESA DMT modes with <function>drm_add_modes_noedid</function>,
+ or calling <function>drm_mode_probed_add</function> directly for every
supported mode and return the number of modes it has detected. This
operation is mandatory.
</para>
<para>
+ Note that the caller function will automatically add standard VESA
+ DMT modes up to 1024x768 if the <methodname>get_modes</methodname>
+ helper operation returns no mode and if the connector status is
+ connector_status_connected. There is no need to call
+ <function>drm_add_edid_modes</function> manually in that case.
+ </para>
+ <para>
When adding modes manually the driver creates each mode with a call to
<function>drm_mode_create</function> and must fill the following fields.
<itemizedlist>
@@ -2278,7 +2306,7 @@ void intel_crt_init(struct drm_device *dev)
<function>drm_helper_probe_single_connector_modes</function>.
</para>
<para>
- When parsing EDID data, <function>drm_add_edid_modes</function> fill the
+ When parsing EDID data, <function>drm_add_edid_modes</function> fills the
connector <structfield>display_info</structfield>
<structfield>width_mm</structfield> and
<structfield>height_mm</structfield> fields. When creating modes
@@ -2316,8 +2344,26 @@ void intel_crt_init(struct drm_device *dev)
</itemizedlist>
</sect2>
<sect2>
+ <title>Atomic Modeset Helper Functions Reference</title>
+ <sect3>
+ <title>Overview</title>
+!Pdrivers/gpu/drm/drm_atomic_helper.c overview
+ </sect3>
+ <sect3>
+ <title>Implementing Asynchronous Atomic Commit</title>
+!Pdrivers/gpu/drm/drm_atomic_helper.c implementing async commit
+ </sect3>
+ <sect3>
+ <title>Atomic State Reset and Initialization</title>
+!Pdrivers/gpu/drm/drm_atomic_helper.c atomic state reset and initialization
+ </sect3>
+!Iinclude/drm/drm_atomic_helper.h
+!Edrivers/gpu/drm/drm_atomic_helper.c
+ </sect2>
+ <sect2>
<title>Modeset Helper Functions Reference</title>
!Edrivers/gpu/drm/drm_crtc_helper.c
+!Pdrivers/gpu/drm/drm_crtc_helper.c overview
</sect2>
<sect2>
<title>Output Probing Helper Functions Reference</title>
@@ -2343,6 +2389,12 @@ void intel_crt_init(struct drm_device *dev)
!Edrivers/gpu/drm/drm_dp_mst_topology.c
</sect2>
<sect2>
+ <title>MIPI DSI Helper Functions Reference</title>
+!Pdrivers/gpu/drm/drm_mipi_dsi.c dsi helpers
+!Iinclude/drm/drm_mipi_dsi.h
+!Edrivers/gpu/drm/drm_mipi_dsi.c
+ </sect2>
+ <sect2>
<title>EDID Helper Functions Reference</title>
!Edrivers/gpu/drm/drm_edid.c
</sect2>
@@ -2371,7 +2423,12 @@ void intel_crt_init(struct drm_device *dev)
</sect2>
<sect2>
<title id="drm-kms-planehelpers">Plane Helper Reference</title>
-!Edrivers/gpu/drm/drm_plane_helper.c Plane Helpers
+!Edrivers/gpu/drm/drm_plane_helper.c
+!Pdrivers/gpu/drm/drm_plane_helper.c overview
+ </sect2>
+ <sect2>
+ <title>Tile group</title>
+!Pdrivers/gpu/drm/drm_crtc.c Tile group
</sect2>
</sect1>
@@ -2507,8 +2564,8 @@ void intel_crt_init(struct drm_device *dev)
<td valign="top" >Description/Restrictions</td>
</tr>
<tr>
- <td rowspan="21" valign="top" >DRM</td>
- <td rowspan="2" valign="top" >Generic</td>
+ <td rowspan="25" valign="top" >DRM</td>
+ <td rowspan="4" valign="top" >Generic</td>
<td valign="top" >“EDID”</td>
<td valign="top" >BLOB | IMMUTABLE</td>
<td valign="top" >0</td>
@@ -2523,6 +2580,20 @@ void intel_crt_init(struct drm_device *dev)
<td valign="top" >Contains DPMS operation mode value.</td>
</tr>
<tr>
+ <td valign="top" >“PATH”</td>
+ <td valign="top" >BLOB | IMMUTABLE</td>
+ <td valign="top" >0</td>
+ <td valign="top" >Connector</td>
+ <td valign="top" >Contains topology path to a connector.</td>
+ </tr>
+ <tr>
+ <td valign="top" >“TILE”</td>
+ <td valign="top" >BLOB | IMMUTABLE</td>
+ <td valign="top" >0</td>
+ <td valign="top" >Connector</td>
+ <td valign="top" >Contains tiling information for a connector.</td>
+ </tr>
+ <tr>
<td rowspan="1" valign="top" >Plane</td>
<td valign="top" >“type”</td>
<td valign="top" >ENUM | IMMUTABLE</td>
@@ -2638,6 +2709,21 @@ void intel_crt_init(struct drm_device *dev)
<td valign="top" >TBD</td>
</tr>
<tr>
+ <td rowspan="2" valign="top" >Virtual GPU</td>
+ <td valign="top" >“suggested X”</td>
+ <td valign="top" >RANGE</td>
+ <td valign="top" >Min=0, Max=0xffffffff</td>
+ <td valign="top" >Connector</td>
+ <td valign="top" >property to suggest an X offset for a connector</td>
+ </tr>
+ <tr>
+ <td valign="top" >“suggested Y”</td>
+ <td valign="top" >RANGE</td>
+ <td valign="top" >Min=0, Max=0xffffffff</td>
+ <td valign="top" >Connector</td>
+ <td valign="top" >property to suggest an Y offset for a connector</td>
+ </tr>
+ <tr>
<td rowspan="3" valign="top" >Optional</td>
<td valign="top" >“scaling mode”</td>
<td valign="top" >ENUM</td>
@@ -3788,6 +3874,26 @@ int num_ioctls;</synopsis>
those have basic support through the gma500 drm driver.
</para>
<sect1>
+ <title>Core Driver Infrastructure</title>
+ <para>
+ This section covers core driver infrastructure used by both the display
+ and the GEM parts of the driver.
+ </para>
+ <sect2>
+ <title>Runtime Power Management</title>
+!Pdrivers/gpu/drm/i915/intel_runtime_pm.c runtime pm
+!Idrivers/gpu/drm/i915/intel_runtime_pm.c
+ </sect2>
+ <sect2>
+ <title>Interrupt Handling</title>
+!Pdrivers/gpu/drm/i915/i915_irq.c interrupt handling
+!Fdrivers/gpu/drm/i915/i915_irq.c intel_irq_init intel_irq_init_hw intel_hpd_init
+!Fdrivers/gpu/drm/i915/i915_irq.c intel_irq_fini
+!Fdrivers/gpu/drm/i915/i915_irq.c intel_runtime_pm_disable_interrupts
+!Fdrivers/gpu/drm/i915/i915_irq.c intel_runtime_pm_enable_interrupts
+ </sect2>
+ </sect1>
+ <sect1>
<title>Display Hardware Handling</title>
<para>
This section covers everything related to the display hardware including
@@ -3804,6 +3910,18 @@ int num_ioctls;</synopsis>
</para>
</sect2>
<sect2>
+ <title>Frontbuffer Tracking</title>
+!Pdrivers/gpu/drm/i915/intel_frontbuffer.c frontbuffer tracking
+!Idrivers/gpu/drm/i915/intel_frontbuffer.c
+!Fdrivers/gpu/drm/i915/intel_drv.h intel_frontbuffer_flip
+!Fdrivers/gpu/drm/i915/i915_gem.c i915_gem_track_fb
+ </sect2>
+ <sect2>
+ <title>Display FIFO Underrun Reporting</title>
+!Pdrivers/gpu/drm/i915/intel_fifo_underrun.c fifo underrun handling
+!Idrivers/gpu/drm/i915/intel_fifo_underrun.c
+ </sect2>
+ <sect2>
<title>Plane Configuration</title>
<para>
This section covers plane configuration and composition with the
@@ -3823,6 +3941,16 @@ int num_ioctls;</synopsis>
</para>
</sect2>
<sect2>
+ <title>High Definition Audio</title>
+!Pdrivers/gpu/drm/i915/intel_audio.c High Definition Audio over HDMI and Display Port
+!Idrivers/gpu/drm/i915/intel_audio.c
+ </sect2>
+ <sect2>
+ <title>Panel Self Refresh PSR (PSR/SRD)</title>
+!Pdrivers/gpu/drm/i915/intel_psr.c Panel Self Refresh (PSR/SRD)
+!Idrivers/gpu/drm/i915/intel_psr.c
+ </sect2>
+ <sect2>
<title>DPIO</title>
!Pdrivers/gpu/drm/i915/i915_reg.h DPIO
<table id="dpiox2">
@@ -3931,6 +4059,28 @@ int num_ioctls;</synopsis>
!Idrivers/gpu/drm/i915/intel_lrc.c
</sect2>
</sect1>
+
+ <sect1>
+ <title> Tracing </title>
+ <para>
+ This sections covers all things related to the tracepoints implemented in
+ the i915 driver.
+ </para>
+ <sect2>
+ <title> i915_ppgtt_create and i915_ppgtt_release </title>
+!Pdrivers/gpu/drm/i915/i915_trace.h i915_ppgtt_create and i915_ppgtt_release tracepoints
+ </sect2>
+ <sect2>
+ <title> i915_context_create and i915_context_free </title>
+!Pdrivers/gpu/drm/i915/i915_trace.h i915_context_create and i915_context_free tracepoints
+ </sect2>
+ <sect2>
+ <title> switch_mm </title>
+!Pdrivers/gpu/drm/i915/i915_trace.h switch_mm tracepoint
+ </sect2>
+ </sect1>
+
</chapter>
+!Cdrivers/gpu/drm/i915/i915_irq.c
</part>
</book>
diff --git a/Documentation/DocBook/media/v4l/compat.xml b/Documentation/DocBook/media/v4l/compat.xml
index 0a2debfa68f..350dfb3d71e 100644
--- a/Documentation/DocBook/media/v4l/compat.xml
+++ b/Documentation/DocBook/media/v4l/compat.xml
@@ -2579,6 +2579,18 @@ fields changed from _s32 to _u32.
</orderedlist>
</section>
+ <section>
+ <title>V4L2 in Linux 3.19</title>
+ <orderedlist>
+ <listitem>
+ <para>Rewrote Colorspace chapter, added new &v4l2-ycbcr-encoding;
+and &v4l2-quantization; fields to &v4l2-pix-format;, &v4l2-pix-format-mplane;
+and &v4l2-mbus-framefmt;.
+ </para>
+ </listitem>
+ </orderedlist>
+ </section>
+
<section id="other">
<title>Relation of V4L2 to other Linux multimedia APIs</title>
diff --git a/Documentation/DocBook/media/v4l/pixfmt.xml b/Documentation/DocBook/media/v4l/pixfmt.xml
index ccf6053c1ae..d5eca4b8f74 100644
--- a/Documentation/DocBook/media/v4l/pixfmt.xml
+++ b/Documentation/DocBook/media/v4l/pixfmt.xml
@@ -138,9 +138,25 @@ applicable values.</para></entry>
<row>
<entry>__u32</entry>
<entry><structfield>flags</structfield></entry>
- <entry>Flags set by the application or driver, see <xref
+ <entry>Flags set by the application or driver, see <xref
linkend="format-flags" />.</entry>
</row>
+ <row>
+ <entry>&v4l2-ycbcr-encoding;</entry>
+ <entry><structfield>ycbcr_enc</structfield></entry>
+ <entry>This information supplements the
+<structfield>colorspace</structfield> and must be set by the driver for
+capture streams and by the application for output streams,
+see <xref linkend="colorspaces" />.</entry>
+ </row>
+ <row>
+ <entry>&v4l2-quantization;</entry>
+ <entry><structfield>quantization</structfield></entry>
+ <entry>This information supplements the
+<structfield>colorspace</structfield> and must be set by the driver for
+capture streams and by the application for output streams,
+see <xref linkend="colorspaces" />.</entry>
+ </row>
</tbody>
</tgroup>
</table>
@@ -232,9 +248,25 @@ codes can be used.</entry>
<entry>Flags set by the application or driver, see <xref
linkend="format-flags" />.</entry>
</row>
+ <row>
+ <entry>&v4l2-ycbcr-encoding;</entry>
+ <entry><structfield>ycbcr_enc</structfield></entry>
+ <entry>This information supplements the
+<structfield>colorspace</structfield> and must be set by the driver for
+capture streams and by the application for output streams,
+see <xref linkend="colorspaces" />.</entry>
+ </row>
+ <row>
+ <entry>&v4l2-quantization;</entry>
+ <entry><structfield>quantization</structfield></entry>
+ <entry>This information supplements the
+<structfield>colorspace</structfield> and must be set by the driver for
+capture streams and by the application for output streams,
+see <xref linkend="colorspaces" />.</entry>
+ </row>
<row>
<entry>__u8</entry>
- <entry><structfield>reserved[10]</structfield></entry>
+ <entry><structfield>reserved[8]</structfield></entry>
<entry>Reserved for future extensions. Should be zeroed by the
application.</entry>
</row>
diff --git a/Documentation/DocBook/media/v4l/subdev-formats.xml b/Documentation/DocBook/media/v4l/subdev-formats.xml
index 18730b96e1e..c5ea868e390 100644
--- a/Documentation/DocBook/media/v4l/subdev-formats.xml
+++ b/Documentation/DocBook/media/v4l/subdev-formats.xml
@@ -34,8 +34,24 @@
<xref linkend="colorspaces" /> for details.</entry>
</row>
<row>
+ <entry>&v4l2-ycbcr-encoding;</entry>
+ <entry><structfield>ycbcr_enc</structfield></entry>
+ <entry>This information supplements the
+<structfield>colorspace</structfield> and must be set by the driver for
+capture streams and by the application for output streams,
+see <xref linkend="colorspaces" />.</entry>
+ </row>
+ <row>
+ <entry>&v4l2-quantization;</entry>
+ <entry><structfield>quantization</structfield></entry>
+ <entry>This information supplements the
+<structfield>colorspace</structfield> and must be set by the driver for
+capture streams and by the application for output streams,
+see <xref linkend="colorspaces" />.</entry>
+ </row>
+ <row>
<entry>__u32</entry>
- <entry><structfield>reserved</structfield>[7]</entry>
+ <entry><structfield>reserved</structfield>[6]</entry>
<entry>Reserved for future extensions. Applications and drivers must
set the array to zero.</entry>
</row>
diff --git a/Documentation/DocBook/media/v4l/v4l2.xml b/Documentation/DocBook/media/v4l/v4l2.xml
index 7cfe618f754..ac0f8d9d2a4 100644
--- a/Documentation/DocBook/media/v4l/v4l2.xml
+++ b/Documentation/DocBook/media/v4l/v4l2.xml
@@ -152,6 +152,15 @@ structs, ioctls) must be noted in more detail in the history chapter
applications. -->
<revision>
+ <revnumber>3.19</revnumber>
+ <date>2014-12-05</date>
+ <authorinitials>hv</authorinitials>
+ <revremark>Rewrote Colorspace chapter, added new &v4l2-ycbcr-encoding; and &v4l2-quantization; fields
+to &v4l2-pix-format;, &v4l2-pix-format-mplane; and &v4l2-mbus-framefmt;.
+ </revremark>
+ </revision>
+
+ <revision>
<revnumber>3.17</revnumber>
<date>2014-08-04</date>
<authorinitials>lp, hv</authorinitials>
@@ -539,7 +548,7 @@ and discussions on the V4L mailing list.</revremark>
</partinfo>
<title>Video for Linux Two API Specification</title>
- <subtitle>Revision 3.17</subtitle>
+ <subtitle>Revision 3.19</subtitle>
<chapter id="common">
&sub-common;
diff --git a/Documentation/devicetree/bindings/arm/gic-v3.txt b/Documentation/devicetree/bindings/arm/gic-v3.txt
index 33cd05e6c12..ddfade40ac5 100644
--- a/Documentation/devicetree/bindings/arm/gic-v3.txt
+++ b/Documentation/devicetree/bindings/arm/gic-v3.txt
@@ -49,11 +49,29 @@ Optional
occupied by the redistributors. Required if more than one such
region is present.
+Sub-nodes:
+
+GICv3 has one or more Interrupt Translation Services (ITS) that are
+used to route Message Signalled Interrupts (MSI) to the CPUs.
+
+These nodes must have the following properties:
+- compatible : Should at least contain "arm,gic-v3-its".
+- msi-controller : Boolean property. Identifies the node as an MSI controller
+- reg: Specifies the base physical address and size of the ITS
+ registers.
+
+The main GIC node must contain the appropriate #address-cells,
+#size-cells and ranges properties for the reg property of all ITS
+nodes.
+
Examples:
gic: interrupt-controller@2cf00000 {
compatible = "arm,gic-v3";
#interrupt-cells = <3>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
interrupt-controller;
reg = <0x0 0x2f000000 0 0x10000>, // GICD
<0x0 0x2f100000 0 0x200000>, // GICR
@@ -61,11 +79,20 @@ Examples:
<0x0 0x2c010000 0 0x2000>, // GICH
<0x0 0x2c020000 0 0x2000>; // GICV
interrupts = <1 9 4>;
+
+ gic-its@2c200000 {
+ compatible = "arm,gic-v3-its";
+ msi-controller;
+ reg = <0x0 0x2c200000 0 0x200000>;
+ };
};
gic: interrupt-controller@2c010000 {
compatible = "arm,gic-v3";
#interrupt-cells = <3>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
interrupt-controller;
redistributor-stride = <0x0 0x40000>; // 256kB stride
#redistributor-regions = <2>;
@@ -76,4 +103,16 @@ Examples:
<0x0 0x2c060000 0 0x2000>, // GICH
<0x0 0x2c080000 0 0x2000>; // GICV
interrupts = <1 9 4>;
+
+ gic-its@2c200000 {
+ compatible = "arm,gic-v3-its";
+ msi-controller;
+ reg = <0x0 0x2c200000 0 0x200000>;
+ };
+
+ gic-its@2c400000 {
+ compatible = "arm,gic-v3-its";
+ msi-controller;
+ reg = <0x0 0x2c400000 0 0x200000>;
+ };
};
diff --git a/Documentation/devicetree/bindings/arm/gic.txt b/Documentation/devicetree/bindings/arm/gic.txt
index b38608af66d..8112d0c3675 100644
--- a/Documentation/devicetree/bindings/arm/gic.txt
+++ b/Documentation/devicetree/bindings/arm/gic.txt
@@ -97,3 +97,56 @@ Example:
<0x2c006000 0x2000>;
interrupts = <1 9 0xf04>;
};
+
+
+* GICv2m extension for MSI/MSI-x support (Optional)
+
+Certain revisions of GIC-400 supports MSI/MSI-x via V2M register frame(s).
+This is enabled by specifying v2m sub-node(s).
+
+Required properties:
+
+- compatible : The value here should contain "arm,gic-v2m-frame".
+
+- msi-controller : Identifies the node as an MSI controller.
+
+- reg : GICv2m MSI interface register base and size
+
+Optional properties:
+
+- arm,msi-base-spi : When the MSI_TYPER register contains an incorrect
+ value, this property should contain the SPI base of
+ the MSI frame, overriding the HW value.
+
+- arm,msi-num-spis : When the MSI_TYPER register contains an incorrect
+ value, this property should contain the number of
+ SPIs assigned to the frame, overriding the HW value.
+
+Example:
+
+ interrupt-controller@e1101000 {
+ compatible = "arm,gic-400";
+ #interrupt-cells = <3>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ interrupt-controller;
+ interrupts = <1 8 0xf04>;
+ ranges = <0 0 0 0xe1100000 0 0x100000>;
+ reg = <0x0 0xe1110000 0 0x01000>,
+ <0x0 0xe112f000 0 0x02000>,
+ <0x0 0xe1140000 0 0x10000>,
+ <0x0 0xe1160000 0 0x10000>;
+ v2m0: v2m@0x8000 {
+ compatible = "arm,gic-v2m-frame";
+ msi-controller;
+ reg = <0x0 0x80000 0 0x1000>;
+ };
+
+ ....
+
+ v2mN: v2m@0x9000 {
+ compatible = "arm,gic-v2m-frame";
+ msi-controller;
+ reg = <0x0 0x90000 0 0x1000>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,sysirq.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,sysirq.txt
new file mode 100644
index 00000000000..d680b07ec6e
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,sysirq.txt
@@ -0,0 +1,28 @@
+Mediatek 65xx/81xx sysirq
+
+Mediatek SOCs sysirq support controllable irq inverter for each GIC SPI
+interrupt.
+
+Required properties:
+- compatible: should be one of:
+ "mediatek,mt8135-sysirq"
+ "mediatek,mt8127-sysirq"
+ "mediatek,mt6589-sysirq"
+ "mediatek,mt6582-sysirq"
+ "mediatek,mt6577-sysirq"
+- interrupt-controller : Identifies the node as an interrupt controller
+- #interrupt-cells : Use the same format as specified by GIC in
+ Documentation/devicetree/bindings/arm/gic.txt
+- interrupt-parent: phandle of irq parent for sysirq. The parent must
+ use the same interrupt-cells format as GIC.
+- reg: Physical base address of the intpol registers and length of memory
+ mapped region.
+
+Example:
+ sysirq: interrupt-controller@10200100 {
+ compatible = "mediatek,mt6589-sysirq", "mediatek,mt6577-sysirq";
+ interrupt-controller;
+ #interrupt-cells = <3>;
+ interrupt-parent = <&gic>;
+ reg = <0 0x10200100 0 0x1c>;
+ };
diff --git a/Documentation/devicetree/bindings/arm/samsung/exynos-adc.txt b/Documentation/devicetree/bindings/arm/samsung/exynos-adc.txt
index 709efaa3084..f46ca9a316a 100644
--- a/Documentation/devicetree/bindings/arm/samsung/exynos-adc.txt
+++ b/Documentation/devicetree/bindings/arm/samsung/exynos-adc.txt
@@ -16,6 +16,8 @@ Required properties:
future controllers.
Must be "samsung,exynos3250-adc" for
controllers compatible with ADC of Exynos3250.
+ Must be "samsung,exynos7-adc" for
+ the ADC in Exynos7 and compatibles
Must be "samsung,s3c2410-adc" for
the ADC in s3c2410 and compatibles
Must be "samsung,s3c2416-adc" for
@@ -43,13 +45,16 @@ Required properties:
compatible ADC block)
- vdd-supply VDD input supply.
+- samsung,syscon-phandle Contains the PMU system controller node
+ (To access the ADC_PHY register on Exynos5250/5420/5800/3250)
+
Note: child nodes can be added for auto probing from device tree.
Example: adding device info in dtsi file
adc: adc@12D10000 {
compatible = "samsung,exynos-adc-v1";
- reg = <0x12D10000 0x100>, <0x10040718 0x4>;
+ reg = <0x12D10000 0x100>;
interrupts = <0 106 0>;
#io-channel-cells = <1>;
io-channel-ranges;
@@ -58,13 +63,14 @@ adc: adc@12D10000 {
clock-names = "adc";
vdd-supply = <&buck5_reg>;
+ samsung,syscon-phandle = <&pmu_system_controller>;
};
Example: adding device info in dtsi file for Exynos3250 with additional sclk
adc: adc@126C0000 {
compatible = "samsung,exynos3250-adc", "samsung,exynos-adc-v2;
- reg = <0x126C0000 0x100>, <0x10020718 0x4>;
+ reg = <0x126C0000 0x100>;
interrupts = <0 137 0>;
#io-channel-cells = <1>;
io-channel-ranges;
@@ -73,6 +79,7 @@ adc: adc@126C0000 {
clock-names = "adc", "sclk";
vdd-supply = <&buck5_reg>;
+ samsung,syscon-phandle = <&pmu_system_controller>;
};
Example: Adding child nodes in dts file
diff --git a/Documentation/devicetree/bindings/staging/imx-drm/fsl-imx-drm.txt b/Documentation/devicetree/bindings/drm/imx/fsl-imx-drm.txt
index e75f0e549ff..e75f0e549ff 100644
--- a/Documentation/devicetree/bindings/staging/imx-drm/fsl-imx-drm.txt
+++ b/Documentation/devicetree/bindings/drm/imx/fsl-imx-drm.txt
diff --git a/Documentation/devicetree/bindings/staging/imx-drm/hdmi.txt b/Documentation/devicetree/bindings/drm/imx/hdmi.txt
index 1b756cf9afb..1b756cf9afb 100644
--- a/Documentation/devicetree/bindings/staging/imx-drm/hdmi.txt
+++ b/Documentation/devicetree/bindings/drm/imx/hdmi.txt
diff --git a/Documentation/devicetree/bindings/staging/imx-drm/ldb.txt b/Documentation/devicetree/bindings/drm/imx/ldb.txt
index 443bcb6134d..443bcb6134d 100644
--- a/Documentation/devicetree/bindings/staging/imx-drm/ldb.txt
+++ b/Documentation/devicetree/bindings/drm/imx/ldb.txt
diff --git a/Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt b/Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt
index b48f4ef31d9..4c32ef0b7db 100644
--- a/Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt
+++ b/Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt
@@ -191,6 +191,8 @@ of the following host1x client modules:
- nvidia,hpd-gpio: specifies a GPIO used for hotplug detection
- nvidia,edid: supplies a binary EDID blob
- nvidia,panel: phandle of a display panel
+ - nvidia,ganged-mode: contains a phandle to a second DSI controller to gang
+ up with in order to support up to 8 data lanes
- sor: serial output resource
diff --git a/Documentation/devicetree/bindings/gpu/st,stih4xx.txt b/Documentation/devicetree/bindings/gpu/st,stih4xx.txt
index 2d150c311a0..c99eb34e640 100644
--- a/Documentation/devicetree/bindings/gpu/st,stih4xx.txt
+++ b/Documentation/devicetree/bindings/gpu/st,stih4xx.txt
@@ -68,7 +68,7 @@ STMicroelectronics stih4xx platforms
number of clocks may depend of the SoC type.
- clock-names: names of the clocks listed in clocks property in the same
order.
- - hdmi,hpd-gpio: gpio id to detect if an hdmi cable is plugged or not.
+ - ddc: phandle of an I2C controller used for DDC EDID probing
sti-hda:
Required properties:
@@ -83,6 +83,22 @@ sti-hda:
- clock-names: names of the clocks listed in clocks property in the same
order.
+sti-hqvdp:
+ must be a child of sti-display-subsystem
+ Required properties:
+ - compatible: "st,stih<chip>-hqvdp"
+ - reg: Physical base address of the IP registers and length of memory mapped region.
+ - clocks: from common clock binding: handle hardware IP needed clocks, the
+ number of clocks may depend of the SoC type.
+ See ../clocks/clock-bindings.txt for details.
+ - clock-names: names of the clocks listed in clocks property in the same
+ order.
+ - resets: resets to be used by the device
+ See ../reset/reset.txt for details.
+ - reset-names: names of the resets listed in resets property in the same
+ order.
+ - st,vtg: phandle on vtg main device node.
+
Example:
/ {
@@ -173,7 +189,6 @@ Example:
interrupt-names = "irq";
clock-names = "pix", "tmds", "phy", "audio";
clocks = <&clockgen_c_vcc CLK_S_PIX_HDMI>, <&clockgen_c_vcc CLK_S_TMDS_HDMI>, <&clockgen_c_vcc CLK_S_HDMI_REJECT_PLL>, <&clockgen_b1 CLK_S_PCM_0>;
- hdmi,hpd-gpio = <&PIO2 5>;
};
sti-hda@fe85a000 {
@@ -184,6 +199,16 @@ Example:
clocks = <&clockgen_c_vcc CLK_S_PIX_HD>, <&clockgen_c_vcc CLK_S_HDDAC>;
};
};
+
+ sti-hqvdp@9c000000 {
+ compatible = "st,stih407-hqvdp";
+ reg = <0x9C00000 0x100000>;
+ clock-names = "hqvdp", "pix_main";
+ clocks = <&clk_s_c0_flexgen CLK_MAIN_DISP>, <&clk_s_d2_flexgen CLK_PIX_MAIN_DISP>;
+ reset-names = "hqvdp";
+ resets = <&softreset STIH407_HDQVDP_SOFTRESET>;
+ st,vtg = <&vtg_main>;
+ };
};
...
};
diff --git a/Documentation/devicetree/bindings/i2c/i2c-opal.txt b/Documentation/devicetree/bindings/i2c/i2c-opal.txt
new file mode 100644
index 00000000000..12bc61465ee
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/i2c-opal.txt
@@ -0,0 +1,37 @@
+Device-tree bindings for I2C OPAL driver
+----------------------------------------
+
+Most of the device node and properties layout is specific to the firmware and
+used by the firmware itself for configuring the port. From the linux
+perspective, the properties of use are "ibm,port-name" and "ibm,opal-id".
+
+Required properties:
+
+- reg: Port-id within a given master
+- compatible: must be "ibm,opal-i2c"
+- ibm,opal-id: Refers to a specific bus and used to identify it when calling
+ the relevant OPAL functions.
+- bus-frequency: Operating frequency of the i2c bus (in HZ). Informational for
+ linux, used by the FW though.
+
+Optional properties:
+- ibm,port-name: Firmware provides this name that uniquely identifies the i2c
+ port.
+
+The node contains a number of other properties that are used by the FW itself
+and depend on the specific hardware implementation. The example below depicts
+a P8 on-chip bus.
+
+Example:
+
+i2c-bus@0 {
+ reg = <0x0>;
+ bus-frequency = <0x61a80>;
+ compatible = "ibm,power8-i2c-port", "ibm,opal-i2c";
+ ibm,opal-id = <0x1>;
+ ibm,port-name = "p8_00000000_e1p0";
+ #address-cells = <0x1>;
+ phandle = <0x10000006>;
+ #size-cells = <0x0>;
+ linux,phandle = <0x10000006>;
+};
diff --git a/Documentation/devicetree/bindings/iio/adc/qcom,spmi-iadc.txt b/Documentation/devicetree/bindings/iio/adc/qcom,spmi-iadc.txt
new file mode 100644
index 00000000000..4e36d6e2f7b
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/adc/qcom,spmi-iadc.txt
@@ -0,0 +1,46 @@
+Qualcomm's SPMI PMIC current ADC
+
+QPNP PMIC current ADC (IADC) provides interface to clients to read current.
+A 16 bit ADC is used for current measurements. IADC can measure the current
+through an external resistor (channel 1) or internal (built-in) resistor
+(channel 0). When using an external resistor it is to be described by
+qcom,external-resistor-micro-ohms property.
+
+IADC node:
+
+- compatible:
+ Usage: required
+ Value type: <string>
+ Definition: Should contain "qcom,spmi-iadc".
+
+- reg:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: IADC base address and length in the SPMI PMIC register map
+
+- interrupts:
+ Usage: optional
+ Value type: <prop-encoded-array>
+ Definition: End of ADC conversion.
+
+- qcom,external-resistor-micro-ohms:
+ Usage: optional
+ Value type: <u32>
+ Definition: Sense resister value in micro Ohm.
+ If not defined value of 10000 micro Ohms will be used.
+
+Example:
+ /* IADC node */
+ pmic_iadc: iadc@3600 {
+ compatible = "qcom,spmi-iadc";
+ reg = <0x3600 0x100>;
+ interrupts = <0x0 0x36 0x0 IRQ_TYPE_EDGE_RISING>;
+ qcom,external-resistor-micro-ohms = <10000>;
+ #io-channel-cells = <1>;
+ };
+
+ /* IIO client node */
+ bat {
+ io-channels = <&pmic_iadc 0>;
+ io-channel-names = "iadc";
+ };
diff --git a/Documentation/devicetree/bindings/iio/adc/rockchip-saradc.txt b/Documentation/devicetree/bindings/iio/adc/rockchip-saradc.txt
index 5d3ec1df226..a9a5fe19ff2 100644
--- a/Documentation/devicetree/bindings/iio/adc/rockchip-saradc.txt
+++ b/Documentation/devicetree/bindings/iio/adc/rockchip-saradc.txt
@@ -1,7 +1,7 @@
Rockchip Successive Approximation Register (SAR) A/D Converter bindings
Required properties:
-- compatible: Should be "rockchip,saradc"
+- compatible: Should be "rockchip,saradc" or "rockchip,rk3066-tsadc"
- reg: physical base address of the controller and length of memory mapped
region.
- interrupts: The interrupt number to the cpu. The interrupt specifier format
diff --git a/Documentation/devicetree/bindings/input/cap1106.txt b/Documentation/devicetree/bindings/input/cap11xx.txt
index 4b463904cba..7d0a3009771 100644
--- a/Documentation/devicetree/bindings/input/cap1106.txt
+++ b/Documentation/devicetree/bindings/input/cap11xx.txt
@@ -1,14 +1,16 @@
-Device tree bindings for Microchip CAP1106, 6 channel capacitive touch sensor
+Device tree bindings for Microchip CAP11xx based capacitive touch sensors
-The node for this driver must be a child of a I2C controller node, as the
+The node for this device must be a child of a I2C controller node, as the
device communication via I2C only.
Required properties:
- compatible: Must be "microchip,cap1106"
+ compatible: Must contain one of:
+ "microchip,cap1106"
+ "microchip,cap1126"
+ "microchip,cap1188"
reg: The I2C slave address of the device.
- Only 0x28 is valid.
interrupts: Property describing the interrupt line the
device's ALERT#/CM_IRQ# pin is connected to.
@@ -26,6 +28,10 @@ Optional properties:
Valid values are 1, 2, 4, and 8.
By default, a gain of 1 is set.
+ microchip,irq-active-high: By default the interrupt pin is active low
+ open drain. This property allows using the active
+ high push-pull output.
+
linux,keycodes: Specifies an array of numeric keycode values to
be used for the channels. If this property is
omitted, KEY_A, KEY_B, etc are used as
@@ -43,11 +49,11 @@ i2c_controller {
autorepeat;
microchip,sensor-gain = <2>;
- linux,keycodes = <103 /* KEY_UP */
- 106 /* KEY_RIGHT */
- 108 /* KEY_DOWN */
- 105 /* KEY_LEFT */
- 109 /* KEY_PAGEDOWN */
- 104>; /* KEY_PAGEUP */
+ linux,keycodes = <103>, /* KEY_UP */
+ <106>, /* KEY_RIGHT */
+ <108>, /* KEY_DOWN */
+ <105>, /* KEY_LEFT */
+ <109>, /* KEY_PAGEDOWN */
+ <104>; /* KEY_PAGEUP */
};
}
diff --git a/Documentation/devicetree/bindings/input/elan_i2c.txt b/Documentation/devicetree/bindings/input/elan_i2c.txt
new file mode 100644
index 00000000000..ee3242c4ba6
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/elan_i2c.txt
@@ -0,0 +1,34 @@
+Elantech I2C Touchpad
+
+Required properties:
+- compatible: must be "elan,ekth3000".
+- reg: I2C address of the chip.
+- interrupt-parent: a phandle for the interrupt controller (see interrupt
+ binding[0]).
+- interrupts: interrupt to which the chip is connected (see interrupt
+ binding[0]).
+
+Optional properties:
+- wakeup-source: touchpad can be used as a wakeup source.
+- pinctrl-names: should be "default" (see pinctrl binding [1]).
+- pinctrl-0: a phandle pointing to the pin settings for the device (see
+ pinctrl binding [1]).
+- vcc-supply: a phandle for the regulator supplying 3.3V power.
+
+[0]: Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
+[1]: Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
+
+Example:
+ &i2c1 {
+ /* ... */
+
+ touchpad@15 {
+ compatible = "elan,ekth3000";
+ reg = <0x15>;
+ interrupt-parent = <&gpio4>;
+ interrupts = <0x0 IRQ_TYPE_EDGE_FALLING>;
+ wakeup-source;
+ };
+
+ /* ... */
+ };
diff --git a/Documentation/devicetree/bindings/input/elants_i2c.txt b/Documentation/devicetree/bindings/input/elants_i2c.txt
new file mode 100644
index 00000000000..a765232e644
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/elants_i2c.txt
@@ -0,0 +1,33 @@
+Elantech I2C Touchscreen
+
+Required properties:
+- compatible: must be "elan,ekth3500".
+- reg: I2C address of the chip.
+- interrupt-parent: a phandle for the interrupt controller (see interrupt
+ binding[0]).
+- interrupts: interrupt to which the chip is connected (see interrupt
+ binding[0]).
+
+Optional properties:
+- wakeup-source: touchscreen can be used as a wakeup source.
+- pinctrl-names: should be "default" (see pinctrl binding [1]).
+- pinctrl-0: a phandle pointing to the pin settings for the device (see
+ pinctrl binding [1]).
+
+[0]: Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
+[1]: Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
+
+Example:
+ &i2c1 {
+ /* ... */
+
+ touchscreen@10 {
+ compatible = "elan,ekth3500";
+ reg = <0x10>;
+ interrupt-parent = <&gpio4>;
+ interrupts = <0x0 IRQ_TYPE_EDGE_FALLING>;
+ wakeup-source;
+ };
+
+ /* ... */
+ };
diff --git a/Documentation/devicetree/bindings/input/gpio-keys.txt b/Documentation/devicetree/bindings/input/gpio-keys.txt
index 5c2c02140a6..a4a38fcf2ed 100644
--- a/Documentation/devicetree/bindings/input/gpio-keys.txt
+++ b/Documentation/devicetree/bindings/input/gpio-keys.txt
@@ -10,10 +10,13 @@ Optional properties:
Each button (key) is represented as a sub-node of "gpio-keys":
Subnode properties:
- - gpios: OF device-tree gpio specification.
- label: Descriptive name of the key.
- linux,code: Keycode to emit.
+Required mutual exclusive subnode-properties:
+ - gpios: OF device-tree gpio specification.
+ - interrupts: the interrupt line for that input
+
Optional subnode-properties:
- linux,input-type: Specify event type this button/key generates.
If not specified defaults to <1> == EV_KEY.
@@ -33,4 +36,9 @@ Example nodes:
linux,code = <103>;
gpios = <&gpio1 0 1>;
};
+ button@22 {
+ label = "GPIO Key DOWN";
+ linux,code = <108>;
+ interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
+ };
...
diff --git a/Documentation/devicetree/bindings/leds/leds-lp8860.txt b/Documentation/devicetree/bindings/leds/leds-lp8860.txt
new file mode 100644
index 00000000000..aad38dd94d4
--- /dev/null
+++ b/Documentation/devicetree/bindings/leds/leds-lp8860.txt
@@ -0,0 +1,29 @@
+* Texas Instruments - lp8860 4-Channel LED Driver
+
+The LP8860-Q1 is an high-efficiency LED
+driver with boost controller. It has 4 high-precision
+current sinks that can be controlled by a PWM input
+signal, a SPI/I2C master, or both.
+
+Required properties:
+ - compatible:
+ "ti,lp8860"
+ - reg - I2C slave address
+ - label - Used for naming LEDs
+
+Optional properties:
+ - enable-gpio - gpio pin to enable/disable the device.
+ - supply - "vled" - LED supply
+
+Example:
+
+leds: leds@6 {
+ compatible = "ti,lp8860";
+ reg = <0x2d>;
+ label = "display_cluster";
+ enable-gpio = <&gpio1 28 GPIO_ACTIVE_HIGH>;
+ vled-supply = <&vbatt>;
+}
+
+For more product information please see the link below:
+http://www.ti.com/product/lp8860-q1
diff --git a/Documentation/devicetree/bindings/media/rcar_vin.txt b/Documentation/devicetree/bindings/media/rcar_vin.txt
index ba61782c2af..9dafe6b06cd 100644
--- a/Documentation/devicetree/bindings/media/rcar_vin.txt
+++ b/Documentation/devicetree/bindings/media/rcar_vin.txt
@@ -6,6 +6,8 @@ family of devices. The current blocks are always slaves and suppot one input
channel which can be either RGB, YUYV or BT656.
- compatible: Must be one of the following
+ - "renesas,vin-r8a7794" for the R8A7794 device
+ - "renesas,vin-r8a7793" for the R8A7793 device
- "renesas,vin-r8a7791" for the R8A7791 device
- "renesas,vin-r8a7790" for the R8A7790 device
- "renesas,vin-r8a7779" for the R8A7779 device
diff --git a/Documentation/devicetree/bindings/mtd/atmel-nand.txt b/Documentation/devicetree/bindings/mtd/atmel-nand.txt
index 6edc3b616e9..1fe6dde9849 100644
--- a/Documentation/devicetree/bindings/mtd/atmel-nand.txt
+++ b/Documentation/devicetree/bindings/mtd/atmel-nand.txt
@@ -5,7 +5,9 @@ Required properties:
- reg : should specify localbus address and size used for the chip,
and hardware ECC controller if available.
If the hardware ECC is PMECC, it should contain address and size for
- PMECC, PMECC Error Location controller and ROM which has lookup tables.
+ PMECC and PMECC Error Location controller.
+ The PMECC lookup table address and size in ROM is optional. If not
+ specified, driver will build it in runtime.
- atmel,nand-addr-offset : offset for the address latch.
- atmel,nand-cmd-offset : offset for the command latch.
- #address-cells, #size-cells : Must be present if the device has sub-nodes
@@ -27,7 +29,7 @@ Optional properties:
are: 512, 1024.
- atmel,pmecc-lookup-table-offset : includes two offsets of lookup table in ROM
for different sector size. First one is for sector size 512, the next is for
- sector size 1024.
+ sector size 1024. If not specified, driver will build the table in runtime.
- nand-bus-width : 8 or 16 bus width if not present 8
- nand-on-flash-bbt: boolean to enable on flash bbt option if not present false
- Nand Flash Controller(NFC) is a slave driver under Atmel nand flash
diff --git a/Documentation/devicetree/bindings/mtd/diskonchip.txt b/Documentation/devicetree/bindings/mtd/diskonchip.txt
new file mode 100644
index 00000000000..3e13bfdbea5
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/diskonchip.txt
@@ -0,0 +1,15 @@
+M-Systems and Sandisk DiskOnChip devices
+
+M-System DiskOnChip G3
+======================
+The Sandisk (formerly M-Systems) docg3 is a nand device of 64M to 256MB.
+
+Required properties:
+ - compatible: should be "m-systems,diskonchip-g3"
+ - reg: register base and size
+
+Example:
+ docg3: flash@0 {
+ compatible = "m-systems,diskonchip-g3";
+ reg = <0x0 0x2000>;
+ };
diff --git a/Documentation/devicetree/bindings/mtd/gpio-control-nand.txt b/Documentation/devicetree/bindings/mtd/gpio-control-nand.txt
index 36ef07d3c90..af8915b41cc 100644
--- a/Documentation/devicetree/bindings/mtd/gpio-control-nand.txt
+++ b/Documentation/devicetree/bindings/mtd/gpio-control-nand.txt
@@ -11,8 +11,8 @@ Required properties:
are made in native endianness.
- #address-cells, #size-cells : Must be present if the device has sub-nodes
representing partitions.
-- gpios : specifies the gpio pins to control the NAND device. nwp is an
- optional gpio and may be set to 0 if not present.
+- gpios : Specifies the GPIO pins to control the NAND device. The order of
+ GPIO references is: RDY, nCE, ALE, CLE, and an optional nWP.
Optional properties:
- bank-width : Width (in bytes) of the device. If not present, the width
@@ -35,11 +35,11 @@ gpio-nand@1,0 {
reg = <1 0x0000 0x2>;
#address-cells = <1>;
#size-cells = <1>;
- gpios = <&banka 1 0 /* rdy */
- &banka 2 0 /* nce */
- &banka 3 0 /* ale */
- &banka 4 0 /* cle */
- 0 /* nwp */>;
+ gpios = <&banka 1 0>, /* RDY */
+ <&banka 2 0>, /* nCE */
+ <&banka 3 0>, /* ALE */
+ <&banka 4 0>, /* CLE */
+ <0>; /* nWP */
partition@0 {
...
diff --git a/Documentation/devicetree/bindings/mtd/sunxi-nand.txt b/Documentation/devicetree/bindings/mtd/sunxi-nand.txt
new file mode 100644
index 00000000000..0273adb8638
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/sunxi-nand.txt
@@ -0,0 +1,45 @@
+Allwinner NAND Flash Controller (NFC)
+
+Required properties:
+- compatible : "allwinner,sun4i-a10-nand".
+- reg : shall contain registers location and length for data and reg.
+- interrupts : shall define the nand controller interrupt.
+- #address-cells: shall be set to 1. Encode the nand CS.
+- #size-cells : shall be set to 0.
+- clocks : shall reference nand controller clocks.
+- clock-names : nand controller internal clock names. Shall contain :
+ * "ahb" : AHB gating clock
+ * "mod" : nand controller clock
+
+Optional children nodes:
+Children nodes represent the available nand chips.
+
+Optional properties:
+- allwinner,rb : shall contain the native Ready/Busy ids.
+ or
+- rb-gpios : shall contain the gpios used as R/B pins.
+- nand-ecc-mode : one of the supported ECC modes ("hw", "hw_syndrome", "soft",
+ "soft_bch" or "none")
+
+see Documentation/devicetree/mtd/nand.txt for generic bindings.
+
+
+Examples:
+nfc: nand@01c03000 {
+ compatible = "allwinner,sun4i-a10-nand";
+ reg = <0x01c03000 0x1000>;
+ interrupts = <0 37 1>;
+ clocks = <&ahb_gates 13>, <&nand_clk>;
+ clock-names = "ahb", "mod";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&nand_pins_a &nand_cs0_pins_a &nand_rb0_pins_a>;
+ status = "okay";
+
+ nand@0 {
+ reg = <0>;
+ allwinner,rb = <0>;
+ nand-ecc-mode = "soft_bch";
+ };
+};
diff --git a/Documentation/devicetree/bindings/panel/auo,b116xw03.txt b/Documentation/devicetree/bindings/panel/auo,b116xw03.txt
new file mode 100644
index 00000000000..690d0a568ef
--- /dev/null
+++ b/Documentation/devicetree/bindings/panel/auo,b116xw03.txt
@@ -0,0 +1,7 @@
+AU Optronics Corporation 11.6" HD (1366x768) color TFT-LCD panel
+
+Required properties:
+- compatible: should be "auo,b116xw03"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/panel/hannstar,hsd070pww1.txt b/Documentation/devicetree/bindings/panel/hannstar,hsd070pww1.txt
new file mode 100644
index 00000000000..7da1d5c038f
--- /dev/null
+++ b/Documentation/devicetree/bindings/panel/hannstar,hsd070pww1.txt
@@ -0,0 +1,7 @@
+HannStar Display Corp. HSD070PWW1 7.0" WXGA TFT LCD panel
+
+Required properties:
+- compatible: should be "hannstar,hsd070pww1"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/panel/hit,tx23d38vm0caa.txt b/Documentation/devicetree/bindings/panel/hit,tx23d38vm0caa.txt
new file mode 100644
index 00000000000..04caaae19af
--- /dev/null
+++ b/Documentation/devicetree/bindings/panel/hit,tx23d38vm0caa.txt
@@ -0,0 +1,7 @@
+Hitachi Ltd. Corporation 9" WVGA (800x480) TFT LCD panel
+
+Required properties:
+- compatible: should be "hit,tx23d38vm0caa"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/panel/innolux,g121i1-l01.txt b/Documentation/devicetree/bindings/panel/innolux,g121i1-l01.txt
new file mode 100644
index 00000000000..2743b07cd2f
--- /dev/null
+++ b/Documentation/devicetree/bindings/panel/innolux,g121i1-l01.txt
@@ -0,0 +1,7 @@
+Innolux Corporation 12.1" WXGA (1280x800) TFT LCD panel
+
+Required properties:
+- compatible: should be "innolux,g121i1-l01"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/panel/sharp,lq101r1sx01.txt b/Documentation/devicetree/bindings/panel/sharp,lq101r1sx01.txt
new file mode 100644
index 00000000000..f522bb8e47e
--- /dev/null
+++ b/Documentation/devicetree/bindings/panel/sharp,lq101r1sx01.txt
@@ -0,0 +1,49 @@
+Sharp Microelectronics 10.1" WQXGA TFT LCD panel
+
+This panel requires a dual-channel DSI host to operate. It supports two modes:
+- left-right: each channel drives the left or right half of the screen
+- even-odd: each channel drives the even or odd lines of the screen
+
+Each of the DSI channels controls a separate DSI peripheral. The peripheral
+driven by the first link (DSI-LINK1), left or even, is considered the primary
+peripheral and controls the device. The 'link2' property contains a phandle
+to the peripheral driven by the second link (DSI-LINK2, right or odd).
+
+Note that in video mode the DSI-LINK1 interface always provides the left/even
+pixels and DSI-LINK2 always provides the right/odd pixels. In command mode it
+is possible to program either link to drive the left/even or right/odd pixels
+but for the sake of consistency this binding assumes that the same assignment
+is chosen as for video mode.
+
+Required properties:
+- compatible: should be "sharp,lq101r1sx01"
+- reg: DSI virtual channel of the peripheral
+
+Required properties (for DSI-LINK1 only):
+- link2: phandle to the DSI peripheral on the secondary link. Note that the
+ presence of this property marks the containing node as DSI-LINK1.
+- power-supply: phandle of the regulator that provides the supply voltage
+
+Optional properties (for DSI-LINK1 only):
+- backlight: phandle of the backlight device attached to the panel
+
+Example:
+
+ dsi@54300000 {
+ panel: panel@0 {
+ compatible = "sharp,lq101r1sx01";
+ reg = <0>;
+
+ link2 = <&secondary>;
+
+ power-supply = <...>;
+ backlight = <...>;
+ };
+ };
+
+ dsi@54400000 {
+ secondary: panel@0 {
+ compatible = "sharp,lq101r1sx01";
+ reg = <0>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/power_supply/gpio-charger.txt b/Documentation/devicetree/bindings/power_supply/gpio-charger.txt
new file mode 100644
index 00000000000..adbb5dc5b6e
--- /dev/null
+++ b/Documentation/devicetree/bindings/power_supply/gpio-charger.txt
@@ -0,0 +1,27 @@
+gpio-charger
+
+Required properties :
+ - compatible : "gpio-charger"
+ - gpios : GPIO indicating the charger presence.
+ See GPIO binding in bindings/gpio/gpio.txt .
+ - charger-type : power supply type, one of
+ unknown
+ battery
+ ups
+ mains
+ usb-sdp (USB standard downstream port)
+ usb-dcp (USB dedicated charging port)
+ usb-cdp (USB charging downstream port)
+ usb-aca (USB accessory charger adapter)
+
+Example:
+
+ usb_charger: charger {
+ compatible = "gpio-charger";
+ charger-type = "usb-sdp";
+ gpios = <&gpf0 2 0 0 0>;
+ }
+
+ battery {
+ power-supplies = <&usb_charger>;
+ };
diff --git a/Documentation/devicetree/bindings/pwm/atmel-hlcdc-pwm.txt b/Documentation/devicetree/bindings/pwm/atmel-hlcdc-pwm.txt
new file mode 100644
index 00000000000..cfda0d57d30
--- /dev/null
+++ b/Documentation/devicetree/bindings/pwm/atmel-hlcdc-pwm.txt
@@ -0,0 +1,29 @@
+Device-Tree bindings for Atmel's HLCDC (High-end LCD Controller) PWM driver
+
+The Atmel HLCDC PWM is subdevice of the HLCDC MFD device.
+See ../mfd/atmel-hlcdc.txt for more details.
+
+Required properties:
+ - compatible: value should be one of the following:
+ "atmel,hlcdc-pwm"
+ - pinctr-names: the pin control state names. Should contain "default".
+ - pinctrl-0: should contain the pinctrl states described by pinctrl
+ default.
+ - #pwm-cells: should be set to 3. This PWM chip use the default 3 cells
+ bindings defined in pwm.txt in this directory.
+
+Example:
+
+ hlcdc: hlcdc@f0030000 {
+ compatible = "atmel,sama5d3-hlcdc";
+ reg = <0xf0030000 0x2000>;
+ clocks = <&lcdc_clk>, <&lcdck>, <&clk32k>;
+ clock-names = "periph_clk","sys_clk", "slow_clk";
+
+ hlcdc_pwm: hlcdc-pwm {
+ compatible = "atmel,hlcdc-pwm";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_lcd_pwm>;
+ #pwm-cells = <3>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/pwm/pwm-bcm2835.txt b/Documentation/devicetree/bindings/pwm/pwm-bcm2835.txt
new file mode 100644
index 00000000000..fb6fb31bc4c
--- /dev/null
+++ b/Documentation/devicetree/bindings/pwm/pwm-bcm2835.txt
@@ -0,0 +1,30 @@
+BCM2835 PWM controller (Raspberry Pi controller)
+
+Required properties:
+- compatible: should be "brcm,bcm2835-pwm"
+- reg: physical base address and length of the controller's registers
+- clock: This clock defines the base clock frequency of the PWM hardware
+ system, the period and the duty_cycle of the PWM signal is a multiple of
+ the base period.
+- #pwm-cells: Should be 2. See pwm.txt in this directory for a description of
+ the cells format.
+
+Examples:
+
+pwm@2020c000 {
+ compatible = "brcm,bcm2835-pwm";
+ reg = <0x2020c000 0x28>;
+ clocks = <&clk_pwm>;
+ #pwm-cells = <2>;
+};
+
+clocks {
+ ....
+ clk_pwm: pwm {
+ compatible = "fixed-clock";
+ reg = <3>;
+ #clock-cells = <0>;
+ clock-frequency = <9200000>;
+ };
+ ....
+};
diff --git a/Documentation/devicetree/bindings/thermal/armada-thermal.txt b/Documentation/devicetree/bindings/thermal/armada-thermal.txt
index 4cf024929a3..4698e0edc20 100644
--- a/Documentation/devicetree/bindings/thermal/armada-thermal.txt
+++ b/Documentation/devicetree/bindings/thermal/armada-thermal.txt
@@ -5,17 +5,9 @@ Required properties:
- compatible: Should be set to one of the following:
marvell,armada370-thermal
marvell,armada375-thermal
- marvell,armada375-z1-thermal
marvell,armada380-thermal
marvell,armadaxp-thermal
- Note: As the name suggests, "marvell,armada375-z1-thermal"
- applies for the SoC Z1 stepping only. On such stepping
- some quirks need to be done and the register offset differs
- from the one in the A0 stepping.
- The operating system may auto-detect the SoC stepping and
- update the compatible and register offsets at runtime.
-
- reg: Device's register space.
Two entries are expected, see the examples below.
The first one is required for the sensor register;
diff --git a/Documentation/devicetree/bindings/thermal/rockchip-thermal.txt b/Documentation/devicetree/bindings/thermal/rockchip-thermal.txt
new file mode 100644
index 00000000000..ef802de4957
--- /dev/null
+++ b/Documentation/devicetree/bindings/thermal/rockchip-thermal.txt
@@ -0,0 +1,68 @@
+* Temperature Sensor ADC (TSADC) on rockchip SoCs
+
+Required properties:
+- compatible : "rockchip,rk3288-tsadc"
+- reg : physical base address of the controller and length of memory mapped
+ region.
+- interrupts : The interrupt number to the cpu. The interrupt specifier format
+ depends on the interrupt controller.
+- clocks : Must contain an entry for each entry in clock-names.
+- clock-names : Shall be "tsadc" for the converter-clock, and "apb_pclk" for
+ the peripheral clock.
+- resets : Must contain an entry for each entry in reset-names.
+ See ../reset/reset.txt for details.
+- reset-names : Must include the name "tsadc-apb".
+- #thermal-sensor-cells : Should be 1. See ./thermal.txt for a description.
+- rockchip,hw-tshut-temp : The hardware-controlled shutdown temperature value.
+- rockchip,hw-tshut-mode : The hardware-controlled shutdown mode 0:CRU 1:GPIO.
+- rockchip,hw-tshut-polarity : The hardware-controlled active polarity 0:LOW
+ 1:HIGH.
+
+Exiample:
+tsadc: tsadc@ff280000 {
+ compatible = "rockchip,rk3288-tsadc";
+ reg = <0xff280000 0x100>;
+ interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru SCLK_TSADC>, <&cru PCLK_TSADC>;
+ clock-names = "tsadc", "apb_pclk";
+ resets = <&cru SRST_TSADC>;
+ reset-names = "tsadc-apb";
+ pinctrl-names = "default";
+ pinctrl-0 = <&otp_out>;
+ #thermal-sensor-cells = <1>;
+ rockchip,hw-tshut-temp = <95000>;
+ rockchip,hw-tshut-mode = <0>;
+ rockchip,hw-tshut-polarity = <0>;
+};
+
+Example: referring to thermal sensors:
+thermal-zones {
+ cpu_thermal: cpu_thermal {
+ polling-delay-passive = <1000>; /* milliseconds */
+ polling-delay = <5000>; /* milliseconds */
+
+ /* sensor ID */
+ thermal-sensors = <&tsadc 1>;
+
+ trips {
+ cpu_alert0: cpu_alert {
+ temperature = <70000>; /* millicelsius */
+ hysteresis = <2000>; /* millicelsius */
+ type = "passive";
+ };
+ cpu_crit: cpu_crit {
+ temperature = <90000>; /* millicelsius */
+ hysteresis = <2000>; /* millicelsius */
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&cpu_alert0>;
+ cooling-device =
+ <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/thermal/tegra-soctherm.txt b/Documentation/devicetree/bindings/thermal/tegra-soctherm.txt
new file mode 100644
index 00000000000..ecf3ed76cd4
--- /dev/null
+++ b/Documentation/devicetree/bindings/thermal/tegra-soctherm.txt
@@ -0,0 +1,53 @@
+Tegra124 SOCTHERM thermal management system
+
+The SOCTHERM IP block contains thermal sensors, support for polled
+or interrupt-based thermal monitoring, CPU and GPU throttling based
+on temperature trip points, and handling external overcurrent
+notifications. It is also used to manage emergency shutdown in an
+overheating situation.
+
+Required properties :
+- compatible : "nvidia,tegra124-soctherm".
+- reg : Should contain 1 entry:
+ - SOCTHERM register set
+- interrupts : Defines the interrupt used by SOCTHERM
+- clocks : Must contain an entry for each entry in clock-names.
+ See ../clocks/clock-bindings.txt for details.
+- clock-names : Must include the following entries:
+ - tsensor
+ - soctherm
+- resets : Must contain an entry for each entry in reset-names.
+ See ../reset/reset.txt for details.
+- reset-names : Must include the following entries:
+ - soctherm
+- #thermal-sensor-cells : Should be 1. See ./thermal.txt for a description
+ of this property. See <dt-bindings/thermal/tegra124-soctherm.h> for a
+ list of valid values when referring to thermal sensors.
+
+
+Example :
+
+ soctherm@0,700e2000 {
+ compatible = "nvidia,tegra124-soctherm";
+ reg = <0x0 0x700e2000 0x0 0x1000>;
+ interrupts = <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&tegra_car TEGRA124_CLK_TSENSOR>,
+ <&tegra_car TEGRA124_CLK_SOC_THERM>;
+ clock-names = "tsensor", "soctherm";
+ resets = <&tegra_car 78>;
+ reset-names = "soctherm";
+
+ #thermal-sensor-cells = <1>;
+ };
+
+Example: referring to thermal sensors :
+
+ thermal-zones {
+ cpu {
+ polling-delay-passive = <1000>;
+ polling-delay = <1000>;
+
+ thermal-sensors =
+ <&soctherm TEGRA124_SOCTHERM_SENSOR_CPU>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index cc6151c431c..b1df0ad1306 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -47,6 +47,7 @@ dlink D-Link Corporation
dmo Data Modul AG
ebv EBV Elektronik
edt Emerging Display Technologies
+elan Elan Microelectronic Corp.
emmicro EM Microelectronic
energymicro Silicon Laboratories (formerly Energy Micro AS)
epcos EPCOS AG
@@ -66,8 +67,10 @@ gmt Global Mixed-mode Technology, Inc.
google Google, Inc.
gumstix Gumstix, Inc.
gw Gateworks Corporation
+hannstar HannStar Display Corporation
haoyu Haoyu Microelectronic Co. Ltd.
hisilicon Hisilicon Limited.
+hit Hitachi Ltd.
honeywell Honeywell
hp Hewlett Packard
i2se I2SE GmbH
diff --git a/Documentation/devicetree/bindings/video/adi,adv7511.txt b/Documentation/devicetree/bindings/video/adi,adv7511.txt
new file mode 100644
index 00000000000..96c25ee0150
--- /dev/null
+++ b/Documentation/devicetree/bindings/video/adi,adv7511.txt
@@ -0,0 +1,88 @@
+Analog Device ADV7511(W)/13 HDMI Encoders
+-----------------------------------------
+
+The ADV7511, ADV7511W and ADV7513 are HDMI audio and video transmitters
+compatible with HDMI 1.4 and DVI 1.0. They support color space conversion,
+S/PDIF, CEC and HDCP.
+
+Required properties:
+
+- compatible: Should be one of "adi,adv7511", "adi,adv7511w" or "adi,adv7513"
+- reg: I2C slave address
+
+The ADV7511 supports a large number of input data formats that differ by their
+color depth, color format, clock mode, bit justification and random
+arrangement of components on the data bus. The combination of the following
+properties describe the input and map directly to the video input tables of the
+ADV7511 datasheet that document all the supported combinations.
+
+- adi,input-depth: Number of bits per color component at the input (8, 10 or
+ 12).
+- adi,input-colorspace: The input color space, one of "rgb", "yuv422" or
+ "yuv444".
+- adi,input-clock: The input clock type, one of "1x" (one clock cycle per
+ pixel), "2x" (two clock cycles per pixel), "ddr" (one clock cycle per pixel,
+ data driven on both edges).
+
+The following input format properties are required except in "rgb 1x" and
+"yuv444 1x" modes, in which case they must not be specified.
+
+- adi,input-style: The input components arrangement variant (1, 2 or 3), as
+ listed in the input format tables in the datasheet.
+- adi,input-justification: The input bit justification ("left", "evenly",
+ "right").
+
+Optional properties:
+
+- interrupts: Specifier for the ADV7511 interrupt
+- pd-gpios: Specifier for the GPIO connected to the power down signal
+
+- adi,clock-delay: Video data clock delay relative to the pixel clock, in ps
+ (-1200 ps .. 1600 ps). Defaults to no delay.
+- adi,embedded-sync: The input uses synchronization signals embedded in the
+ data stream (similar to BT.656). Defaults to separate H/V synchronization
+ signals.
+
+Required nodes:
+
+The ADV7511 has two video ports. Their connections are modelled using the OF
+graph bindings specified in Documentation/devicetree/bindings/graph.txt.
+
+- Video port 0 for the RGB or YUV input
+- Video port 1 for the HDMI output
+
+
+Example
+-------
+
+ adv7511w: hdmi@39 {
+ compatible = "adi,adv7511w";
+ reg = <39>;
+ interrupt-parent = <&gpio3>;
+ interrupts = <29 IRQ_TYPE_EDGE_FALLING>;
+
+ adi,input-depth = <8>;
+ adi,input-colorspace = "rgb";
+ adi,input-clock = "1x";
+ adi,input-style = <1>;
+ adi,input-justification = "evenly";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ adv7511w_in: endpoint {
+ remote-endpoint = <&dpi_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ adv7511_out: endpoint {
+ remote-endpoint = <&hdmi_connector_in>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/video/exynos_dsim.txt b/Documentation/devicetree/bindings/video/exynos_dsim.txt
index e74243b4b31..ca2b4aacd9a 100644
--- a/Documentation/devicetree/bindings/video/exynos_dsim.txt
+++ b/Documentation/devicetree/bindings/video/exynos_dsim.txt
@@ -4,6 +4,7 @@ Required properties:
- compatible: value should be one of the following
"samsung,exynos3250-mipi-dsi" /* for Exynos3250/3472 SoCs */
"samsung,exynos4210-mipi-dsi" /* for Exynos4 SoCs */
+ "samsung,exynos4415-mipi-dsi" /* for Exynos4415 SoC */
"samsung,exynos5410-mipi-dsi" /* for Exynos5410/5420/5440 SoCs */
- reg: physical base address and length of the registers set for the device
- interrupts: should contain DSI interrupt
diff --git a/Documentation/devicetree/bindings/video/rockchip-drm.txt b/Documentation/devicetree/bindings/video/rockchip-drm.txt
new file mode 100644
index 00000000000..7fff582495a
--- /dev/null
+++ b/Documentation/devicetree/bindings/video/rockchip-drm.txt
@@ -0,0 +1,19 @@
+Rockchip DRM master device
+================================
+
+The Rockchip DRM master device is a virtual device needed to list all
+vop devices or other display interface nodes that comprise the
+graphics subsystem.
+
+Required properties:
+- compatible: Should be "rockchip,display-subsystem"
+- ports: Should contain a list of phandles pointing to display interface port
+ of vop devices. vop definitions as defined in
+ Documentation/devicetree/bindings/video/rockchip-vop.txt
+
+example:
+
+display-subsystem {
+ compatible = "rockchip,display-subsystem";
+ ports = <&vopl_out>, <&vopb_out>;
+};
diff --git a/Documentation/devicetree/bindings/video/rockchip-vop.txt b/Documentation/devicetree/bindings/video/rockchip-vop.txt
new file mode 100644
index 00000000000..d15351f2313
--- /dev/null
+++ b/Documentation/devicetree/bindings/video/rockchip-vop.txt
@@ -0,0 +1,58 @@
+device-tree bindings for rockchip soc display controller (vop)
+
+VOP (Visual Output Processor) is the Display Controller for the Rockchip
+series of SoCs which transfers the image data from a video memory
+buffer to an external LCD interface.
+
+Required properties:
+- compatible: value should be one of the following
+ "rockchip,rk3288-vop";
+
+- interrupts: should contain a list of all VOP IP block interrupts in the
+ order: VSYNC, LCD_SYSTEM. The interrupt specifier
+ format depends on the interrupt controller used.
+
+- clocks: must include clock specifiers corresponding to entries in the
+ clock-names property.
+
+- clock-names: Must contain
+ aclk_vop: for ddr buffer transfer.
+ hclk_vop: for ahb bus to R/W the phy regs.
+ dclk_vop: pixel clock.
+
+- resets: Must contain an entry for each entry in reset-names.
+ See ../reset/reset.txt for details.
+- reset-names: Must include the following entries:
+ - axi
+ - ahb
+ - dclk
+
+- iommus: required a iommu node
+
+- port: A port node with endpoint definitions as defined in
+ Documentation/devicetree/bindings/media/video-interfaces.txt.
+
+Example:
+SoC specific DT entry:
+ vopb: vopb@ff930000 {
+ compatible = "rockchip,rk3288-vop";
+ reg = <0xff930000 0x19c>;
+ interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru ACLK_VOP0>, <&cru DCLK_VOP0>, <&cru HCLK_VOP0>;
+ clock-names = "aclk_vop", "dclk_vop", "hclk_vop";
+ resets = <&cru SRST_LCDC1_AXI>, <&cru SRST_LCDC1_AHB>, <&cru SRST_LCDC1_DCLK>;
+ reset-names = "axi", "ahb", "dclk";
+ iommus = <&vopb_mmu>;
+ vopb_out: port {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ vopb_out_edp: endpoint@0 {
+ reg = <0>;
+ remote-endpoint=<&edp_in_vopb>;
+ };
+ vopb_out_hdmi: endpoint@1 {
+ reg = <1>;
+ remote-endpoint=<&hdmi_in_vopb>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/video/samsung-fimd.txt b/Documentation/devicetree/bindings/video/samsung-fimd.txt
index 4e6c77c8554..cf1af637102 100644
--- a/Documentation/devicetree/bindings/video/samsung-fimd.txt
+++ b/Documentation/devicetree/bindings/video/samsung-fimd.txt
@@ -11,6 +11,7 @@ Required properties:
"samsung,s5pv210-fimd"; /* for S5PV210 SoC */
"samsung,exynos3250-fimd"; /* for Exynos3250/3472 SoCs */
"samsung,exynos4210-fimd"; /* for Exynos4 SoCs */
+ "samsung,exynos4415-fimd"; /* for Exynos4415 SoC */
"samsung,exynos5250-fimd"; /* for Exynos5 SoCs */
- reg: physical base address and length of the FIMD registers set.
diff --git a/Documentation/ia64/kvm.txt b/Documentation/ia64/kvm.txt
deleted file mode 100644
index ffb5c80bec3..00000000000
--- a/Documentation/ia64/kvm.txt
+++ /dev/null
@@ -1,83 +0,0 @@
-Currently, kvm module is in EXPERIMENTAL stage on IA64. This means that
-interfaces are not stable enough to use. So, please don't run critical
-applications in virtual machine.
-We will try our best to improve it in future versions!
-
- Guide: How to boot up guests on kvm/ia64
-
-This guide is to describe how to enable kvm support for IA-64 systems.
-
-1. Get the kvm source from git.kernel.org.
- Userspace source:
- git clone git://git.kernel.org/pub/scm/virt/kvm/kvm-userspace.git
- Kernel Source:
- git clone git://git.kernel.org/pub/scm/linux/kernel/git/xiantao/kvm-ia64.git
-
-2. Compile the source code.
- 2.1 Compile userspace code:
- (1)cd ./kvm-userspace
- (2)./configure
- (3)cd kernel
- (4)make sync LINUX= $kernel_dir (kernel_dir is the directory of kernel source.)
- (5)cd ..
- (6)make qemu
- (7)cd qemu; make install
-
- 2.2 Compile kernel source code:
- (1) cd ./$kernel_dir
- (2) Make menuconfig
- (3) Enter into virtualization option, and choose kvm.
- (4) make
- (5) Once (4) done, make modules_install
- (6) Make initrd, and use new kernel to reboot up host machine.
- (7) Once (6) done, cd $kernel_dir/arch/ia64/kvm
- (8) insmod kvm.ko; insmod kvm-intel.ko
-
-Note: For step 2, please make sure that host page size == TARGET_PAGE_SIZE of qemu, otherwise, may fail.
-
-3. Get Guest Firmware named as Flash.fd, and put it under right place:
- (1) If you have the guest firmware (binary) released by Intel Corp for Xen, use it directly.
-
- (2) If you have no firmware at hand, Please download its source from
- hg clone http://xenbits.xensource.com/ext/efi-vfirmware.hg
- you can get the firmware's binary in the directory of efi-vfirmware.hg/binaries.
-
- (3) Rename the firmware you owned to Flash.fd, and copy it to /usr/local/share/qemu
-
-4. Boot up Linux or Windows guests:
- 4.1 Create or install a image for guest boot. If you have xen experience, it should be easy.
-
- 4.2 Boot up guests use the following command.
- /usr/local/bin/qemu-system-ia64 -smp xx -m 512 -hda $your_image
- (xx is the number of virtual processors for the guest, now the maximum value is 4)
-
-5. Known possible issue on some platforms with old Firmware.
-
-In the event of strange host crash issues, try to solve it through either of the following ways:
-
-(1): Upgrade your Firmware to the latest one.
-
-(2): Applying the below patch to kernel source.
-diff --git a/arch/ia64/kernel/pal.S b/arch/ia64/kernel/pal.S
-index 0b53344..f02b0f7 100644
---- a/arch/ia64/kernel/pal.S
-+++ b/arch/ia64/kernel/pal.S
-@@ -84,7 +84,8 @@ GLOBAL_ENTRY(ia64_pal_call_static)
- mov ar.pfs = loc1
- mov rp = loc0
- ;;
-- srlz.d // serialize restoration of psr.l
-+ srlz.i // serialize restoration of psr.l
-+ ;;
- br.ret.sptk.many b0
- END(ia64_pal_call_static)
-
-6. Bug report:
- If you found any issues when use kvm/ia64, Please post the bug info to kvm-ia64-devel mailing list.
- https://lists.sourceforge.net/lists/listinfo/kvm-ia64-devel/
-
-Thanks for your interest! Let's work together, and make kvm/ia64 stronger and stronger!
-
-
- Xiantao Zhang <xiantao.zhang@intel.com>
- 2008.3.10
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 1de833556d2..4df73da11ad 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1457,6 +1457,15 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
disable
Do not enable intel_pstate as the default
scaling driver for the supported processors
+ force
+ Enable intel_pstate on systems that prohibit it by default
+ in favor of acpi-cpufreq. Forcing the intel_pstate driver
+ instead of acpi-cpufreq may disable platform features, such
+ as thermal controls and power capping, that rely on ACPI
+ P-States information being indicated to OSPM and therefore
+ should be used with caution. This option does not work with
+ processors that aren't supported by the intel_pstate driver
+ or on platforms that use pcc-cpufreq instead of acpi-cpufreq.
no_hwp
Do not enable hardware P state control (HWP)
if available.
@@ -3570,6 +3579,24 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
See also Documentation/trace/ftrace.txt "trace options"
section.
+ tp_printk[FTRACE]
+ Have the tracepoints sent to printk as well as the
+ tracing ring buffer. This is useful for early boot up
+ where the system hangs or reboots and does not give the
+ option for reading the tracing buffer or performing a
+ ftrace_dump_on_oops.
+
+ To turn off having tracepoints sent to printk,
+ echo 0 > /proc/sys/kernel/tracepoint_printk
+ Note, echoing 1 into this file without the
+ tracepoint_printk kernel cmdline option has no effect.
+
+ ** CAUTION **
+
+ Having tracepoints sent to printk() and activating high
+ frequency tracepoints such as irq or sched, can cause
+ the system to live lock.
+
traceoff_on_warning
[FTRACE] enable this option to disable tracing when a
warning is hit. This turns off "tracing_on". Tracing can
diff --git a/Documentation/networking/fib_trie.txt b/Documentation/networking/fib_trie.txt
index 0723db7f849..fe719388518 100644
--- a/Documentation/networking/fib_trie.txt
+++ b/Documentation/networking/fib_trie.txt
@@ -73,8 +73,8 @@ trie_leaf_remove()
trie_rebalance()
The key function for the dynamic trie after any change in the trie
- it is run to optimize and reorganize. Tt will walk the trie upwards
- towards the root from a given tnode, doing a resize() at each step
+ it is run to optimize and reorganize. It will walk the trie upwards
+ towards the root from a given tnode, doing a resize() at each step
to implement level compression.
resize()
diff --git a/Documentation/video4linux/vivid.txt b/Documentation/video4linux/vivid.txt
index e5a940e3d30..6cfc8541a36 100644
--- a/Documentation/video4linux/vivid.txt
+++ b/Documentation/video4linux/vivid.txt
@@ -640,6 +640,21 @@ Colorspace: selects which colorspace should be used when generating the image.
Changing the colorspace will result in the V4L2_EVENT_SOURCE_CHANGE
to be sent since it emulates a detected colorspace change.
+Y'CbCr Encoding: selects which Y'CbCr encoding should be used when generating
+ a Y'CbCr image. This only applies if the CSC Colorbar test pattern is
+ selected, and if the format is set to a Y'CbCr format as opposed to an
+ RGB format.
+
+ Changing the Y'CbCr encoding will result in the V4L2_EVENT_SOURCE_CHANGE
+ to be sent since it emulates a detected colorspace change.
+
+Quantization: selects which quantization should be used for the RGB or Y'CbCr
+ encoding when generating the test pattern. This only applies if the CSC
+ Colorbar test pattern is selected.
+
+ Changing the quantization will result in the V4L2_EVENT_SOURCE_CHANGE
+ to be sent since it emulates a detected colorspace change.
+
Limited RGB Range (16-235): selects if the RGB range of the HDMI source should
be limited or full range. This combines with the Digital Video 'Rx RGB
Quantization Range' control and can be used to test what happens if
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index 7610eaa4d49..0007fef4ed8 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -68,9 +68,12 @@ description:
Capability: which KVM extension provides this ioctl. Can be 'basic',
which means that is will be provided by any kernel that supports
- API version 12 (see section 4.1), or a KVM_CAP_xyz constant, which
+ API version 12 (see section 4.1), a KVM_CAP_xyz constant, which
means availability needs to be checked with KVM_CHECK_EXTENSION
- (see section 4.4).
+ (see section 4.4), or 'none' which means that while not all kernels
+ support this ioctl, there's no capability bit to check its
+ availability: for kernels that don't support the ioctl,
+ the ioctl returns -ENOTTY.
Architectures: which instruction set architectures provide this ioctl.
x86 includes both i386 and x86_64.
@@ -604,7 +607,7 @@ struct kvm_fpu {
4.24 KVM_CREATE_IRQCHIP
Capability: KVM_CAP_IRQCHIP, KVM_CAP_S390_IRQCHIP (s390)
-Architectures: x86, ia64, ARM, arm64, s390
+Architectures: x86, ARM, arm64, s390
Type: vm ioctl
Parameters: none
Returns: 0 on success, -1 on error
@@ -612,7 +615,7 @@ Returns: 0 on success, -1 on error
Creates an interrupt controller model in the kernel. On x86, creates a virtual
ioapic, a virtual PIC (two PICs, nested), and sets up future vcpus to have a
local APIC. IRQ routing for GSIs 0-15 is set to both PIC and IOAPIC; GSI 16-23
-only go to the IOAPIC. On ia64, a IOSAPIC is created. On ARM/arm64, a GIC is
+only go to the IOAPIC. On ARM/arm64, a GIC is
created. On s390, a dummy irq routing table is created.
Note that on s390 the KVM_CAP_S390_IRQCHIP vm capability needs to be enabled
@@ -622,7 +625,7 @@ before KVM_CREATE_IRQCHIP can be used.
4.25 KVM_IRQ_LINE
Capability: KVM_CAP_IRQCHIP
-Architectures: x86, ia64, arm, arm64
+Architectures: x86, arm, arm64
Type: vm ioctl
Parameters: struct kvm_irq_level
Returns: 0 on success, -1 on error
@@ -676,7 +679,7 @@ struct kvm_irq_level {
4.26 KVM_GET_IRQCHIP
Capability: KVM_CAP_IRQCHIP
-Architectures: x86, ia64
+Architectures: x86
Type: vm ioctl
Parameters: struct kvm_irqchip (in/out)
Returns: 0 on success, -1 on error
@@ -698,7 +701,7 @@ struct kvm_irqchip {
4.27 KVM_SET_IRQCHIP
Capability: KVM_CAP_IRQCHIP
-Architectures: x86, ia64
+Architectures: x86
Type: vm ioctl
Parameters: struct kvm_irqchip (in)
Returns: 0 on success, -1 on error
@@ -991,7 +994,7 @@ for vm-wide capabilities.
4.38 KVM_GET_MP_STATE
Capability: KVM_CAP_MP_STATE
-Architectures: x86, ia64, s390
+Architectures: x86, s390
Type: vcpu ioctl
Parameters: struct kvm_mp_state (out)
Returns: 0 on success; -1 on error
@@ -1005,16 +1008,15 @@ uniprocessor guests).
Possible values are:
- - KVM_MP_STATE_RUNNABLE: the vcpu is currently running [x86, ia64]
+ - KVM_MP_STATE_RUNNABLE: the vcpu is currently running [x86]
- KVM_MP_STATE_UNINITIALIZED: the vcpu is an application processor (AP)
- which has not yet received an INIT signal [x86,
- ia64]
+ which has not yet received an INIT signal [x86]
- KVM_MP_STATE_INIT_RECEIVED: the vcpu has received an INIT signal, and is
- now ready for a SIPI [x86, ia64]
+ now ready for a SIPI [x86]
- KVM_MP_STATE_HALTED: the vcpu has executed a HLT instruction and
- is waiting for an interrupt [x86, ia64]
+ is waiting for an interrupt [x86]
- KVM_MP_STATE_SIPI_RECEIVED: the vcpu has just received a SIPI (vector
- accessible via KVM_GET_VCPU_EVENTS) [x86, ia64]
+ accessible via KVM_GET_VCPU_EVENTS) [x86]
- KVM_MP_STATE_STOPPED: the vcpu is stopped [s390]
- KVM_MP_STATE_CHECK_STOP: the vcpu is in a special error state [s390]
- KVM_MP_STATE_OPERATING: the vcpu is operating (running or halted)
@@ -1022,7 +1024,7 @@ Possible values are:
- KVM_MP_STATE_LOAD: the vcpu is in a special load/startup state
[s390]
-On x86 and ia64, this ioctl is only useful after KVM_CREATE_IRQCHIP. Without an
+On x86, this ioctl is only useful after KVM_CREATE_IRQCHIP. Without an
in-kernel irqchip, the multiprocessing state must be maintained by userspace on
these architectures.
@@ -1030,7 +1032,7 @@ these architectures.
4.39 KVM_SET_MP_STATE
Capability: KVM_CAP_MP_STATE
-Architectures: x86, ia64, s390
+Architectures: x86, s390
Type: vcpu ioctl
Parameters: struct kvm_mp_state (in)
Returns: 0 on success; -1 on error
@@ -1038,7 +1040,7 @@ Returns: 0 on success; -1 on error
Sets the vcpu's current "multiprocessing state"; see KVM_GET_MP_STATE for
arguments.
-On x86 and ia64, this ioctl is only useful after KVM_CREATE_IRQCHIP. Without an
+On x86, this ioctl is only useful after KVM_CREATE_IRQCHIP. Without an
in-kernel irqchip, the multiprocessing state must be maintained by userspace on
these architectures.
@@ -1065,7 +1067,7 @@ documentation when it pops into existence).
4.41 KVM_SET_BOOT_CPU_ID
Capability: KVM_CAP_SET_BOOT_CPU_ID
-Architectures: x86, ia64
+Architectures: x86
Type: vm ioctl
Parameters: unsigned long vcpu_id
Returns: 0 on success, -1 on error
@@ -1257,8 +1259,8 @@ The flags bitmap is defined as:
4.48 KVM_ASSIGN_PCI_DEVICE
-Capability: KVM_CAP_DEVICE_ASSIGNMENT
-Architectures: x86 ia64
+Capability: none
+Architectures: x86
Type: vm ioctl
Parameters: struct kvm_assigned_pci_dev (in)
Returns: 0 on success, -1 on error
@@ -1298,25 +1300,36 @@ Only PCI header type 0 devices with PCI BAR resources are supported by
device assignment. The user requesting this ioctl must have read/write
access to the PCI sysfs resource files associated with the device.
+Errors:
+ ENOTTY: kernel does not support this ioctl
+
+ Other error conditions may be defined by individual device types or
+ have their standard meanings.
+
4.49 KVM_DEASSIGN_PCI_DEVICE
-Capability: KVM_CAP_DEVICE_DEASSIGNMENT
-Architectures: x86 ia64
+Capability: none
+Architectures: x86
Type: vm ioctl
Parameters: struct kvm_assigned_pci_dev (in)
Returns: 0 on success, -1 on error
Ends PCI device assignment, releasing all associated resources.
-See KVM_CAP_DEVICE_ASSIGNMENT for the data structure. Only assigned_dev_id is
+See KVM_ASSIGN_PCI_DEVICE for the data structure. Only assigned_dev_id is
used in kvm_assigned_pci_dev to identify the device.
+Errors:
+ ENOTTY: kernel does not support this ioctl
+
+ Other error conditions may be defined by individual device types or
+ have their standard meanings.
4.50 KVM_ASSIGN_DEV_IRQ
Capability: KVM_CAP_ASSIGN_DEV_IRQ
-Architectures: x86 ia64
+Architectures: x86
Type: vm ioctl
Parameters: struct kvm_assigned_irq (in)
Returns: 0 on success, -1 on error
@@ -1346,11 +1359,17 @@ The following flags are defined:
It is not valid to specify multiple types per host or guest IRQ. However, the
IRQ type of host and guest can differ or can even be null.
+Errors:
+ ENOTTY: kernel does not support this ioctl
+
+ Other error conditions may be defined by individual device types or
+ have their standard meanings.
+
4.51 KVM_DEASSIGN_DEV_IRQ
Capability: KVM_CAP_ASSIGN_DEV_IRQ
-Architectures: x86 ia64
+Architectures: x86
Type: vm ioctl
Parameters: struct kvm_assigned_irq (in)
Returns: 0 on success, -1 on error
@@ -1365,7 +1384,7 @@ KVM_ASSIGN_DEV_IRQ. Partial deassignment of host or guest IRQ is allowed.
4.52 KVM_SET_GSI_ROUTING
Capability: KVM_CAP_IRQ_ROUTING
-Architectures: x86 ia64 s390
+Architectures: x86 s390
Type: vm ioctl
Parameters: struct kvm_irq_routing (in)
Returns: 0 on success, -1 on error
@@ -1423,8 +1442,8 @@ struct kvm_irq_routing_s390_adapter {
4.53 KVM_ASSIGN_SET_MSIX_NR
-Capability: KVM_CAP_DEVICE_MSIX
-Architectures: x86 ia64
+Capability: none
+Architectures: x86
Type: vm ioctl
Parameters: struct kvm_assigned_msix_nr (in)
Returns: 0 on success, -1 on error
@@ -1445,8 +1464,8 @@ struct kvm_assigned_msix_nr {
4.54 KVM_ASSIGN_SET_MSIX_ENTRY
-Capability: KVM_CAP_DEVICE_MSIX
-Architectures: x86 ia64
+Capability: none
+Architectures: x86
Type: vm ioctl
Parameters: struct kvm_assigned_msix_entry (in)
Returns: 0 on success, -1 on error
@@ -1461,6 +1480,12 @@ struct kvm_assigned_msix_entry {
__u16 padding[3];
};
+Errors:
+ ENOTTY: kernel does not support this ioctl
+
+ Other error conditions may be defined by individual device types or
+ have their standard meanings.
+
4.55 KVM_SET_TSC_KHZ
@@ -2453,9 +2478,15 @@ return ENOEXEC for that vcpu.
Note that because some registers reflect machine topology, all vcpus
should be created before this ioctl is invoked.
+Userspace can call this function multiple times for a given vcpu, including
+after the vcpu has been run. This will reset the vcpu to its initial
+state. All calls to this function after the initial call must use the same
+target and same set of feature flags, otherwise EINVAL will be returned.
+
Possible features:
- KVM_ARM_VCPU_POWER_OFF: Starts the CPU in a power-off state.
- Depends on KVM_CAP_ARM_PSCI.
+ Depends on KVM_CAP_ARM_PSCI. If not set, the CPU will be powered on
+ and execute guest code when KVM_RUN is called.
- KVM_ARM_VCPU_EL1_32BIT: Starts the CPU in a 32bit mode.
Depends on KVM_CAP_ARM_EL1_32BIT (arm64 only).
- KVM_ARM_VCPU_PSCI_0_2: Emulate PSCI v0.2 for the CPU.
@@ -2951,6 +2982,15 @@ HVC instruction based PSCI call from the vcpu. The 'type' field describes
the system-level event type. The 'flags' field describes architecture
specific flags for the system-level event.
+Valid values for 'type' are:
+ KVM_SYSTEM_EVENT_SHUTDOWN -- the guest has requested a shutdown of the
+ VM. Userspace is not obliged to honour this, and if it does honour
+ this does not need to destroy the VM synchronously (ie it may call
+ KVM_RUN again before shutdown finally occurs).
+ KVM_SYSTEM_EVENT_RESET -- the guest has requested a reset of the VM.
+ As with SHUTDOWN, userspace can choose to ignore the request, or
+ to schedule the reset to occur in the future and may call KVM_RUN again.
+
/* Fix the size of the union. */
char padding[256];
};
diff --git a/Documentation/virtual/kvm/devices/vm.txt b/Documentation/virtual/kvm/devices/vm.txt
index 0d16f96c0ea..d426fc87fe9 100644
--- a/Documentation/virtual/kvm/devices/vm.txt
+++ b/Documentation/virtual/kvm/devices/vm.txt
@@ -12,14 +12,14 @@ specific.
1. GROUP: KVM_S390_VM_MEM_CTRL
Architectures: s390
-1.1. ATTRIBUTE: KVM_S390_VM_MEM_CTRL
+1.1. ATTRIBUTE: KVM_S390_VM_MEM_ENABLE_CMMA
Parameters: none
-Returns: -EBUSY if already a vcpus is defined, otherwise 0
+Returns: -EBUSY if a vcpu is already defined, otherwise 0
-Enables CMMA for the virtual machine
+Enables Collaborative Memory Management Assist (CMMA) for the virtual machine.
-1.2. ATTRIBUTE: KVM_S390_VM_CLR_CMMA
-Parameteres: none
+1.2. ATTRIBUTE: KVM_S390_VM_MEM_CLR_CMMA
+Parameters: none
Returns: 0
Clear the CMMA status for all guest pages, so any pages the guest marked
diff --git a/Documentation/virtual/kvm/msr.txt b/Documentation/virtual/kvm/msr.txt
index 6d470ae7b07..2a71c8f29f6 100644
--- a/Documentation/virtual/kvm/msr.txt
+++ b/Documentation/virtual/kvm/msr.txt
@@ -168,7 +168,7 @@ MSR_KVM_ASYNC_PF_EN: 0x4b564d02
64 byte memory area which must be in guest RAM and must be
zeroed. Bits 5-2 are reserved and should be zero. Bit 0 is 1
when asynchronous page faults are enabled on the vcpu 0 when
- disabled. Bit 2 is 1 if asynchronous page faults can be injected
+ disabled. Bit 1 is 1 if asynchronous page faults can be injected
when vcpu is in cpl == 0.
First 4 byte of 64 byte memory location will be written to by
diff --git a/Documentation/x86/intel_mpx.txt b/Documentation/x86/intel_mpx.txt
index 4472ed2ad92..818518a3ff0 100644
--- a/Documentation/x86/intel_mpx.txt
+++ b/Documentation/x86/intel_mpx.txt
@@ -7,11 +7,15 @@ that can be used in conjunction with compiler changes to check memory
references, for those references whose compile-time normal intentions are
usurped at runtime due to buffer overflow or underflow.
+You can tell if your CPU supports MPX by looking in /proc/cpuinfo:
+
+ cat /proc/cpuinfo | grep ' mpx '
+
For more information, please refer to Intel(R) Architecture Instruction
Set Extensions Programming Reference, Chapter 9: Intel(R) Memory Protection
Extensions.
-Note: Currently no hardware with MPX ISA is available but it is always
+Note: As of December 2014, no hardware with MPX is available but it is
possible to use SDE (Intel(R) Software Development Emulator) instead, which
can be downloaded from
http://software.intel.com/en-us/articles/intel-software-development-emulator
@@ -30,9 +34,15 @@ is how we expect the compiler, application and kernel to work together.
instrumentation as well as some setup code called early after the app
starts. New instruction prefixes are noops for old CPUs.
2) That setup code allocates (virtual) space for the "bounds directory",
- points the "bndcfgu" register to the directory and notifies the kernel
- (via the new prctl(PR_MPX_ENABLE_MANAGEMENT)) that the app will be using
- MPX.
+ points the "bndcfgu" register to the directory (must also set the valid
+ bit) and notifies the kernel (via the new prctl(PR_MPX_ENABLE_MANAGEMENT))
+ that the app will be using MPX. The app must be careful not to access
+ the bounds tables between the time when it populates "bndcfgu" and
+ when it calls the prctl(). This might be hard to guarantee if the app
+ is compiled with MPX. You can add "__attribute__((bnd_legacy))" to
+ the function to disable MPX instrumentation to help guarantee this.
+ Also be careful not to call out to any other code which might be
+ MPX-instrumented.
3) The kernel detects that the CPU has MPX, allows the new prctl() to
succeed, and notes the location of the bounds directory. Userspace is
expected to keep the bounds directory at that locationWe note it
diff --git a/MAINTAINERS b/MAINTAINERS
index fdffe962a16..08f671dad3e 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -618,6 +618,16 @@ S: Maintained
F: drivers/iommu/amd_iommu*.[ch]
F: include/linux/amd-iommu.h
+AMD KFD
+M: Oded Gabbay <oded.gabbay@amd.com>
+L: dri-devel@lists.freedesktop.org
+T: git git://people.freedesktop.org/~gabbayo/linux.git
+S: Supported
+F: drivers/gpu/drm/amd/amdkfd/
+F: drivers/gpu/drm/radeon/radeon_kfd.c
+F: drivers/gpu/drm/radeon/radeon_kfd.h
+F: include/uapi/linux/kfd_ioctl.h
+
AMD MICROCODE UPDATE SUPPORT
M: Andreas Herrmann <herrmann.der.user@googlemail.com>
L: amd64-microcode@amd64.org
@@ -1915,13 +1925,6 @@ W: http://bcache.evilpiepirate.org
S: Maintained:
F: drivers/md/bcache/
-BECEEM BCS200/BCS220-3/BCSM250 WIMAX SUPPORT
-M: Kevin McKinney <klmckinney1@gmail.com>
-M: Matthias Beyer <mail@beyermatthias.de>
-L: devel@driverdev.osuosl.org
-S: Maintained
-F: drivers/staging/bcm*
-
BEFS FILE SYSTEM
S: Orphan
F: Documentation/filesystems/befs.txt
@@ -3297,6 +3300,13 @@ F: drivers/gpu/drm/exynos/
F: include/drm/exynos*
F: include/uapi/drm/exynos*
+DRM DRIVERS FOR FREESCALE IMX
+M: Philipp Zabel <p.zabel@pengutronix.de>
+L: dri-devel@lists.freedesktop.org
+S: Maintained
+F: drivers/gpu/drm/imx/
+F: Documentation/devicetree/bindings/drm/imx/
+
DRM DRIVERS FOR NVIDIA TEGRA
M: Thierry Reding <thierry.reding@gmail.com>
M: Terje Bergström <tbergstrom@nvidia.com>
@@ -4245,6 +4255,12 @@ L: linux-media@vger.kernel.org
S: Maintained
F: drivers/media/usb/go7007/
+GOODIX TOUCHSCREEN
+M: Bastien Nocera <hadess@hadess.net>
+L: linux-input@vger.kernel.org
+S: Maintained
+F: drivers/input/touchscreen/goodix.c
+
GPIO SUBSYSTEM
M: Linus Walleij <linus.walleij@linaro.org>
M: Alexandre Courbot <gnurou@gmail.com>
@@ -4947,6 +4963,12 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux.git
S: Supported
F: drivers/idle/intel_idle.c
+INTEL PSTATE DRIVER
+M: Kristen Carlson Accardi <kristen@linux.intel.com>
+L: linux-pm@vger.kernel.org
+S: Supported
+F: drivers/cpufreq/intel_pstate.c
+
INTEL FRAMEBUFFER DRIVER (excluding 810 and 815)
M: Maik Broemme <mbroemme@plusserver.de>
L: linux-fbdev@vger.kernel.org
@@ -5479,15 +5501,6 @@ S: Supported
F: arch/powerpc/include/asm/kvm*
F: arch/powerpc/kvm/
-KERNEL VIRTUAL MACHINE For Itanium (KVM/IA64)
-M: Xiantao Zhang <xiantao.zhang@intel.com>
-L: kvm-ia64@vger.kernel.org
-W: http://kvm.qumranet.com
-S: Supported
-F: Documentation/ia64/kvm.txt
-F: arch/ia64/include/asm/kvm*
-F: arch/ia64/kvm/
-
KERNEL VIRTUAL MACHINE for s390 (KVM/s390)
M: Christian Borntraeger <borntraeger@de.ibm.com>
M: Cornelia Huck <cornelia.huck@de.ibm.com>
@@ -6603,19 +6616,8 @@ L: netdev@vger.kernel.org
S: Maintained
NETWORKING [WIRELESS]
-M: "John W. Linville" <linville@tuxdriver.com>
L: linux-wireless@vger.kernel.org
Q: http://patchwork.kernel.org/project/linux-wireless/list/
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless.git
-S: Maintained
-F: net/mac80211/
-F: net/rfkill/
-F: net/wireless/
-F: include/net/ieee80211*
-F: include/linux/wireless.h
-F: include/uapi/linux/wireless.h
-F: include/net/iw_handler.h
-F: drivers/net/wireless/
NETWORKING DRIVERS
L: netdev@vger.kernel.org
@@ -6636,6 +6638,14 @@ F: include/linux/inetdevice.h
F: include/uapi/linux/if_*
F: include/uapi/linux/netdevice.h
+NETWORKING DRIVERS (WIRELESS)
+M: Kalle Valo <kvalo@codeaurora.org>
+L: linux-wireless@vger.kernel.org
+Q: http://patchwork.kernel.org/project/linux-wireless/list/
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers.git/
+S: Maintained
+F: drivers/net/wireless/
+
NETXEN (1/10) GbE SUPPORT
M: Manish Chopra <manish.chopra@qlogic.com>
M: Sony Chacko <sony.chacko@qlogic.com>
@@ -9500,6 +9510,7 @@ Q: https://patchwork.kernel.org/project/linux-pm/list/
S: Supported
F: drivers/thermal/
F: include/linux/thermal.h
+F: include/uapi/linux/thermal.h
F: include/linux/cpu_cooling.h
F: Documentation/devicetree/bindings/thermal/
@@ -9604,7 +9615,7 @@ F: include/uapi/linux/tipc*.h
F: net/tipc/
TILE ARCHITECTURE
-M: Chris Metcalf <cmetcalf@tilera.com>
+M: Chris Metcalf <cmetcalf@ezchip.com>
W: http://www.tilera.com/scm/
S: Supported
F: arch/tile/
@@ -10232,13 +10243,13 @@ L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/ethernet/via/via-velocity.*
-VIVI VIRTUAL VIDEO DRIVER
+VIVID VIRTUAL VIDEO DRIVER
M: Hans Verkuil <hverkuil@xs4all.nl>
L: linux-media@vger.kernel.org
T: git git://linuxtv.org/media_tree.git
W: http://linuxtv.org
S: Maintained
-F: drivers/media/platform/vivi*
+F: drivers/media/platform/vivid/*
VLAN (802.1Q)
M: Patrick McHardy <kaber@trash.net>
diff --git a/Makefile b/Makefile
index fd80c6e9bc2..fa9604d3e48 100644
--- a/Makefile
+++ b/Makefile
@@ -481,9 +481,10 @@ asm-generic:
# of make so .config is not included in this case either (for *config).
version_h := include/generated/uapi/linux/version.h
+old_version_h := include/linux/version.h
no-dot-config-targets := clean mrproper distclean \
- cscope gtags TAGS tags help %docs check% coccicheck \
+ cscope gtags TAGS tags help% %docs check% coccicheck \
$(version_h) headers_% archheaders archscripts \
kernelversion %src-pkg
@@ -1005,6 +1006,7 @@ endef
$(version_h): $(srctree)/Makefile FORCE
$(call filechk,version.h)
+ $(Q)rm -f $(old_version_h)
include/generated/utsrelease.h: include/config/kernel.release FORCE
$(call filechk,utsrelease.h)
@@ -1036,8 +1038,6 @@ firmware_install: FORCE
#Default location for installed headers
export INSTALL_HDR_PATH = $(objtree)/usr
-hdr-inst := -rR -f $(srctree)/scripts/Makefile.headersinst obj
-
# If we do an all arch process set dst to asm-$(hdr-arch)
hdr-dst = $(if $(KBUILD_HEADERS), dst=include/asm-$(hdr-arch), dst=include/asm)
@@ -1175,7 +1175,7 @@ MRPROPER_FILES += .config .config.old .version .old_version $(version_h) \
Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
signing_key.priv signing_key.x509 x509.genkey \
extra_certificates signing_key.x509.keyid \
- signing_key.x509.signer include/linux/version.h
+ signing_key.x509.signer
# clean - Delete most, but leave enough to build external modules
#
@@ -1235,7 +1235,7 @@ rpm: include/config/kernel.release FORCE
# ---------------------------------------------------------------------------
boards := $(wildcard $(srctree)/arch/$(SRCARCH)/configs/*_defconfig)
-boards := $(notdir $(boards))
+boards := $(sort $(notdir $(boards)))
board-dirs := $(dir $(wildcard $(srctree)/arch/$(SRCARCH)/configs/*/*_defconfig))
board-dirs := $(sort $(notdir $(board-dirs:/=)))
@@ -1326,7 +1326,7 @@ help-board-dirs := $(addprefix help-,$(board-dirs))
help-boards: $(help-board-dirs)
-boards-per-dir = $(notdir $(wildcard $(srctree)/arch/$(SRCARCH)/configs/$*/*_defconfig))
+boards-per-dir = $(sort $(notdir $(wildcard $(srctree)/arch/$(SRCARCH)/configs/$*/*_defconfig)))
$(help-board-dirs): help-%:
@echo 'Architecture specific targets ($(SRCARCH) $*):'
@@ -1581,11 +1581,6 @@ ifneq ($(cmd_files),)
include $(cmd_files)
endif
-# Shorthand for $(Q)$(MAKE) -f scripts/Makefile.clean obj=dir
-# Usage:
-# $(Q)$(MAKE) $(clean)=dir
-clean := -f $(srctree)/scripts/Makefile.clean obj
-
endif # skip-makefile
PHONY += FORCE
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index fe44b249460..df94ac1f75b 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -428,3 +428,4 @@ source "arch/arc/Kconfig.debug"
source "security/Kconfig"
source "crypto/Kconfig"
source "lib/Kconfig"
+source "kernel/power/Kconfig"
diff --git a/arch/arc/Makefile b/arch/arc/Makefile
index 10bc3d4e8a4..db72fec0e16 100644
--- a/arch/arc/Makefile
+++ b/arch/arc/Makefile
@@ -12,7 +12,7 @@ ifeq ($(CROSS_COMPILE),)
CROSS_COMPILE := arc-linux-uclibc-
endif
-KBUILD_DEFCONFIG := fpga_defconfig
+KBUILD_DEFCONFIG := nsim_700_defconfig
cflags-y += -mA7 -fno-common -pipe -fno-builtin -D__linux__
diff --git a/arch/arc/boot/dts/nsimosci.dts b/arch/arc/boot/dts/nsimosci.dts
index cfaedd9c61c..1c169dc74ad 100644
--- a/arch/arc/boot/dts/nsimosci.dts
+++ b/arch/arc/boot/dts/nsimosci.dts
@@ -20,7 +20,7 @@
/* this is for console on PGU */
/* bootargs = "console=tty0 consoleblank=0"; */
/* this is for console on serial */
- bootargs = "earlycon=uart8250,mmio32,0xc0000000,115200n8 console=tty0 console=ttyS0,115200n8 consoleblank=0 debug";
+ bootargs = "earlycon=uart8250,mmio32,0xf0000000,115200n8 console=tty0 console=ttyS0,115200n8 consoleblank=0 debug";
};
aliases {
@@ -41,9 +41,9 @@
#interrupt-cells = <1>;
};
- uart0: serial@c0000000 {
+ uart0: serial@f0000000 {
compatible = "ns8250";
- reg = <0xc0000000 0x2000>;
+ reg = <0xf0000000 0x2000>;
interrupts = <11>;
clock-frequency = <3686400>;
baud = <115200>;
@@ -52,21 +52,21 @@
no-loopback-test = <1>;
};
- pgu0: pgu@c9000000 {
+ pgu0: pgu@f9000000 {
compatible = "snps,arcpgufb";
- reg = <0xc9000000 0x400>;
+ reg = <0xf9000000 0x400>;
};
- ps2: ps2@c9001000 {
+ ps2: ps2@f9001000 {
compatible = "snps,arc_ps2";
- reg = <0xc9000400 0x14>;
+ reg = <0xf9000400 0x14>;
interrupts = <13>;
interrupt-names = "arc_ps2_irq";
};
- eth0: ethernet@c0003000 {
+ eth0: ethernet@f0003000 {
compatible = "snps,oscilan";
- reg = <0xc0003000 0x44>;
+ reg = <0xf0003000 0x44>;
interrupts = <7>, <8>;
interrupt-names = "rx", "tx";
};
diff --git a/arch/arc/configs/fpga_noramfs_defconfig b/arch/arc/configs/fpga_noramfs_defconfig
deleted file mode 100644
index 49c93011ab9..00000000000
--- a/arch/arc/configs/fpga_noramfs_defconfig
+++ /dev/null
@@ -1,63 +0,0 @@
-CONFIG_CROSS_COMPILE="arc-linux-uclibc-"
-# CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_DEFAULT_HOSTNAME="ARCLinux"
-# CONFIG_SWAP is not set
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_NAMESPACES=y
-# CONFIG_UTS_NS is not set
-# CONFIG_PID_NS is not set
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_KALLSYMS_ALL=y
-CONFIG_EMBEDDED=y
-# CONFIG_SLUB_DEBUG is not set
-# CONFIG_COMPAT_BRK is not set
-CONFIG_KPROBES=y
-CONFIG_MODULES=y
-# CONFIG_LBDAF is not set
-# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
-CONFIG_ARC_PLAT_FPGA_LEGACY=y
-# CONFIG_ARC_HAS_RTSC is not set
-CONFIG_ARC_BUILTIN_DTB_NAME="angel4"
-CONFIG_PREEMPT=y
-# CONFIG_COMPACTION is not set
-# CONFIG_CROSS_MEMORY_ATTACH is not set
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_UNIX_DIAG=y
-CONFIG_NET_KEY=y
-CONFIG_INET=y
-# CONFIG_IPV6 is not set
-# CONFIG_STANDALONE is not set
-# CONFIG_PREVENT_FIRMWARE_BUILD is not set
-# CONFIG_FIRMWARE_IN_KERNEL is not set
-# CONFIG_BLK_DEV is not set
-CONFIG_NETDEVICES=y
-CONFIG_ARC_EMAC=y
-CONFIG_LXT_PHY=y
-# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-# CONFIG_SERIO is not set
-# CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
-CONFIG_SERIAL_ARC=y
-CONFIG_SERIAL_ARC_CONSOLE=y
-# CONFIG_HW_RANDOM is not set
-# CONFIG_HWMON is not set
-# CONFIG_VGA_CONSOLE is not set
-# CONFIG_HID is not set
-# CONFIG_USB_SUPPORT is not set
-# CONFIG_IOMMU_SUPPORT is not set
-CONFIG_EXT2_FS=y
-CONFIG_EXT2_FS_XATTR=y
-CONFIG_TMPFS=y
-# CONFIG_MISC_FILESYSTEMS is not set
-CONFIG_NFS_FS=y
-# CONFIG_ENABLE_WARN_DEPRECATED is not set
-# CONFIG_ENABLE_MUST_CHECK is not set
-CONFIG_XZ_DEC=y
diff --git a/arch/arc/configs/fpga_defconfig b/arch/arc/configs/nsim_700_defconfig
index ef4d3bc7b6c..ef4d3bc7b6c 100644
--- a/arch/arc/configs/fpga_defconfig
+++ b/arch/arc/configs/nsim_700_defconfig
diff --git a/arch/arc/include/asm/irqflags.h b/arch/arc/include/asm/irqflags.h
index 742816f1b21..27ecc6975a5 100644
--- a/arch/arc/include/asm/irqflags.h
+++ b/arch/arc/include/asm/irqflags.h
@@ -41,6 +41,15 @@
/******************************************************************
* IRQ Control Macros
+ *
+ * All of them have "memory" clobber (compiler barrier) which is needed to
+ * ensure that LD/ST requiring irq safetly (R-M-W when LLSC is not available)
+ * are redone after IRQs are re-enabled (and gcc doesn't reuse stale register)
+ *
+ * Noted at the time of Abilis Timer List corruption
+ * Orig Bug + Rejected solution : https://lkml.org/lkml/2013/3/29/67
+ * Reasoning : https://lkml.org/lkml/2013/4/8/15
+ *
******************************************************************/
/*
diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c
index d01df0c517a..20ebb602ea2 100644
--- a/arch/arc/kernel/smp.c
+++ b/arch/arc/kernel/smp.c
@@ -26,8 +26,10 @@
#include <asm/setup.h>
#include <asm/mach_desc.h>
+#ifndef CONFIG_ARC_HAS_LLSC
arch_spinlock_t smp_atomic_ops_lock = __ARCH_SPIN_LOCK_UNLOCKED;
arch_spinlock_t smp_bitops_lock = __ARCH_SPIN_LOCK_UNLOCKED;
+#endif
struct plat_smp_ops plat_smp_ops;
diff --git a/arch/arm/boot/dts/am437x-sk-evm.dts b/arch/arm/boot/dts/am437x-sk-evm.dts
index 87aa4f3b8b3..53bbfc90b26 100644
--- a/arch/arm/boot/dts/am437x-sk-evm.dts
+++ b/arch/arm/boot/dts/am437x-sk-evm.dts
@@ -100,7 +100,7 @@
};
lcd0: display {
- compatible = "osddisplays,osd057T0559-34ts", "panel-dpi";
+ compatible = "newhaven,nhd-4.3-480272ef-atxl", "panel-dpi";
label = "lcd";
pinctrl-names = "default";
@@ -112,11 +112,11 @@
clock-frequency = <9000000>;
hactive = <480>;
vactive = <272>;
- hfront-porch = <8>;
- hback-porch = <43>;
- hsync-len = <4>;
- vback-porch = <12>;
- vfront-porch = <4>;
+ hfront-porch = <2>;
+ hback-porch = <2>;
+ hsync-len = <41>;
+ vfront-porch = <2>;
+ vback-porch = <2>;
vsync-len = <10>;
hsync-active = <0>;
vsync-active = <0>;
@@ -320,8 +320,7 @@
lcd_pins: lcd_pins {
pinctrl-single,pins = <
- /* GPIO 5_8 to select LCD / HDMI */
- 0x238 (PIN_OUTPUT_PULLUP | MUX_MODE7)
+ 0x1c (PIN_OUTPUT_PULLDOWN | MUX_MODE7) /* gpcm_ad7.gpio1_7 */
>;
};
};
diff --git a/arch/arm/boot/dts/armada-375.dtsi b/arch/arm/boot/dts/armada-375.dtsi
index 9721e55384c..50096d3427e 100644
--- a/arch/arm/boot/dts/armada-375.dtsi
+++ b/arch/arm/boot/dts/armada-375.dtsi
@@ -14,6 +14,7 @@
#include "skeleton.dtsi"
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/phy/phy.h>
#define MBUS_ID(target,attributes) (((target) << 24) | ((attributes) << 16))
@@ -348,6 +349,12 @@
#clock-cells = <1>;
};
+ usbcluster: usb-cluster@18400 {
+ compatible = "marvell,armada-375-usb-cluster";
+ reg = <0x18400 0x4>;
+ #phy-cells = <1>;
+ };
+
mbusc: mbus-controller@20000 {
compatible = "marvell,mbus-controller";
reg = <0x20000 0x100>, <0x20180 0x20>;
@@ -398,6 +405,8 @@
reg = <0x50000 0x500>;
interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gateclk 18>;
+ phys = <&usbcluster PHY_TYPE_USB2>;
+ phy-names = "usb";
status = "disabled";
};
@@ -414,6 +423,8 @@
reg = <0x58000 0x20000>,<0x5b880 0x80>;
interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gateclk 16>;
+ phys = <&usbcluster PHY_TYPE_USB3>;
+ phy-names = "usb";
status = "disabled";
};
diff --git a/arch/arm/boot/dts/at91-sama5d4ek.dts b/arch/arm/boot/dts/at91-sama5d4ek.dts
index b5b84006469..9198b719d0e 100644
--- a/arch/arm/boot/dts/at91-sama5d4ek.dts
+++ b/arch/arm/boot/dts/at91-sama5d4ek.dts
@@ -9,12 +9,12 @@
* licensing only applies to this file, and not this project as a
* whole.
*
- * a) This library is free software; you can redistribute it and/or
+ * a) This file is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * This file is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
diff --git a/arch/arm/boot/dts/at91sam9260.dtsi b/arch/arm/boot/dts/at91sam9260.dtsi
index cb100b03a36..dd1313cbc31 100644
--- a/arch/arm/boot/dts/at91sam9260.dtsi
+++ b/arch/arm/boot/dts/at91sam9260.dtsi
@@ -956,6 +956,14 @@
};
};
+ rtc@fffffd20 {
+ compatible = "atmel,at91sam9260-rtt";
+ reg = <0xfffffd20 0x10>;
+ interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
+ clocks = <&clk32k>;
+ status = "disabled";
+ };
+
watchdog@fffffd40 {
compatible = "atmel,at91sam9260-wdt";
reg = <0xfffffd40 0x10>;
@@ -966,6 +974,12 @@
atmel,idle-halt;
status = "disabled";
};
+
+ gpbr: syscon@fffffd50 {
+ compatible = "atmel,at91sam9260-gpbr", "syscon";
+ reg = <0xfffffd50 0x10>;
+ status = "disabled";
+ };
};
nand0: nand@40000000 {
diff --git a/arch/arm/boot/dts/at91sam9261.dtsi b/arch/arm/boot/dts/at91sam9261.dtsi
index a81aab4281a..cdb9ed61210 100644
--- a/arch/arm/boot/dts/at91sam9261.dtsi
+++ b/arch/arm/boot/dts/at91sam9261.dtsi
@@ -828,12 +828,26 @@
clocks = <&mck>;
};
+ rtc@fffffd20 {
+ compatible = "atmel,at91sam9260-rtt";
+ reg = <0xfffffd20 0x10>;
+ interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
+ clocks = <&slow_xtal>;
+ status = "disabled";
+ };
+
watchdog@fffffd40 {
compatible = "atmel,at91sam9260-wdt";
reg = <0xfffffd40 0x10>;
interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
status = "disabled";
};
+
+ gpbr: syscon@fffffd50 {
+ compatible = "atmel,at91sam9260-gpbr", "syscon";
+ reg = <0xfffffd50 0x10>;
+ status = "disabled";
+ };
};
};
diff --git a/arch/arm/boot/dts/at91sam9263.dtsi b/arch/arm/boot/dts/at91sam9263.dtsi
index 653e4395b7c..1467750e337 100644
--- a/arch/arm/boot/dts/at91sam9263.dtsi
+++ b/arch/arm/boot/dts/at91sam9263.dtsi
@@ -922,6 +922,27 @@
pinctrl-0 = <&pinctrl_can_rx_tx>;
clocks = <&can_clk>;
clock-names = "can_clk";
+ };
+
+ rtc@fffffd20 {
+ compatible = "atmel,at91sam9260-rtt";
+ reg = <0xfffffd20 0x10>;
+ interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
+ clocks = <&slow_xtal>;
+ status = "disabled";
+ };
+
+ rtc@fffffd50 {
+ compatible = "atmel,at91sam9260-rtt";
+ reg = <0xfffffd50 0x10>;
+ interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
+ clocks = <&slow_xtal>;
+ status = "disabled";
+ };
+
+ gpbr: syscon@fffffd60 {
+ compatible = "atmel,at91sam9260-gpbr", "syscon";
+ reg = <0xfffffd60 0x50>;
status = "disabled";
};
};
diff --git a/arch/arm/boot/dts/at91sam9g20ek_common.dtsi b/arch/arm/boot/dts/at91sam9g20ek_common.dtsi
index d2919108e92..dfaacb113f2 100644
--- a/arch/arm/boot/dts/at91sam9g20ek_common.dtsi
+++ b/arch/arm/boot/dts/at91sam9g20ek_common.dtsi
@@ -112,9 +112,23 @@
};
};
+ shdwc@fffffd10 {
+ atmel,wakeup-counter = <10>;
+ atmel,wakeup-rtt-timer;
+ };
+
+ rtc@fffffd20 {
+ atmel,rtt-rtc-time-reg = <&gpbr 0x0>;
+ status = "okay";
+ };
+
watchdog@fffffd40 {
status = "okay";
};
+
+ gpbr: syscon@fffffd50 {
+ status = "okay";
+ };
};
nand0: nand@40000000 {
diff --git a/arch/arm/boot/dts/at91sam9g45.dtsi b/arch/arm/boot/dts/at91sam9g45.dtsi
index 6c0637a4bda..2a8da8a884b 100644
--- a/arch/arm/boot/dts/at91sam9g45.dtsi
+++ b/arch/arm/boot/dts/at91sam9g45.dtsi
@@ -492,6 +492,27 @@
};
};
+ isi {
+ pinctrl_isi: isi-0 {
+ atmel,pins = <AT91_PIOB 8 AT91_PERIPH_B AT91_PINCTRL_NONE /* D8 */
+ AT91_PIOB 9 AT91_PERIPH_B AT91_PINCTRL_NONE /* D9 */
+ AT91_PIOB 10 AT91_PERIPH_B AT91_PINCTRL_NONE /* D10 */
+ AT91_PIOB 11 AT91_PERIPH_B AT91_PINCTRL_NONE /* D11 */
+ AT91_PIOB 20 AT91_PERIPH_A AT91_PINCTRL_NONE /* D0 */
+ AT91_PIOB 21 AT91_PERIPH_A AT91_PINCTRL_NONE /* D1 */
+ AT91_PIOB 22 AT91_PERIPH_A AT91_PINCTRL_NONE /* D2 */
+ AT91_PIOB 23 AT91_PERIPH_A AT91_PINCTRL_NONE /* D3 */
+ AT91_PIOB 24 AT91_PERIPH_A AT91_PINCTRL_NONE /* D4 */
+ AT91_PIOB 25 AT91_PERIPH_A AT91_PINCTRL_NONE /* D5 */
+ AT91_PIOB 26 AT91_PERIPH_A AT91_PINCTRL_NONE /* D6 */
+ AT91_PIOB 27 AT91_PERIPH_A AT91_PINCTRL_NONE /* D7 */
+ AT91_PIOB 28 AT91_PERIPH_A AT91_PINCTRL_NONE /* PCK */
+ AT91_PIOB 29 AT91_PERIPH_A AT91_PINCTRL_NONE /* VSYNC */
+ AT91_PIOB 30 AT91_PERIPH_A AT91_PINCTRL_NONE /* HSYNC */
+ AT91_PIOB 31 AT91_PERIPH_A AT91_PINCTRL_NONE /* MCK */>;
+ };
+ };
+
usart0 {
pinctrl_usart0: usart0-0 {
atmel,pins =
@@ -1035,6 +1056,17 @@
};
};
+ isi@fffb4000 {
+ compatible = "atmel,at91sam9g45-isi";
+ reg = <0xfffb4000 0x4000>;
+ interrupts = <26 IRQ_TYPE_LEVEL_HIGH 5>;
+ clocks = <&isi_clk>;
+ clock-names = "isi_clk";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_isi>;
+ status = "disabled";
+ };
+
pwm0: pwm@fffb8000 {
compatible = "atmel,at91sam9rl-pwm";
reg = <0xfffb8000 0x300>;
@@ -1199,12 +1231,26 @@
};
};
+ rtc@fffffd20 {
+ compatible = "atmel,at91sam9260-rtt";
+ reg = <0xfffffd20 0x10>;
+ interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
+ clocks = <&clk32k>;
+ status = "disabled";
+ };
+
rtc@fffffdb0 {
compatible = "atmel,at91rm9200-rtc";
reg = <0xfffffdb0 0x30>;
interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
status = "disabled";
};
+
+ gpbr: syscon@fffffd60 {
+ compatible = "atmel,at91sam9260-gpbr", "syscon";
+ reg = <0xfffffd60 0x10>;
+ status = "disabled";
+ };
};
fb0: fb@0x00500000 {
diff --git a/arch/arm/boot/dts/at91sam9m10g45ek.dts b/arch/arm/boot/dts/at91sam9m10g45ek.dts
index d8dd2265109..33ce7ca2c40 100644
--- a/arch/arm/boot/dts/at91sam9m10g45ek.dts
+++ b/arch/arm/boot/dts/at91sam9m10g45ek.dts
@@ -161,6 +161,15 @@
pinctrl-0 = <&pinctrl_pwm_leds>;
};
+ rtc@fffffd20 {
+ atmel,rtt-rtc-time-reg = <&gpbr 0x0>;
+ status = "okay";
+ };
+
+ gpbr: syscon@fffffd60 {
+ status = "okay";
+ };
+
rtc@fffffdb0 {
status = "okay";
};
diff --git a/arch/arm/boot/dts/at91sam9rl.dtsi b/arch/arm/boot/dts/at91sam9rl.dtsi
index f0b4352650e..72424371413 100644
--- a/arch/arm/boot/dts/at91sam9rl.dtsi
+++ b/arch/arm/boot/dts/at91sam9rl.dtsi
@@ -1059,6 +1059,27 @@
clocks = <&slow_rc_osc &slow_osc>;
};
};
+
+ rtc@fffffeb0 {
+ compatible = "atmel,at91rm9200-rtc";
+ reg = <0xfffffeb0 0x40>;
+ interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
+ status = "disabled";
+ };
+
+ rtc@fffffd20 {
+ compatible = "atmel,at91sam9260-rtt";
+ reg = <0xfffffd20 0x10>;
+ interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
+ clocks = <&clk32k>;
+ status = "disabled";
+ };
+
+ gpbr: syscon@fffffd60 {
+ compatible = "atmel,at91sam9260-gpbr", "syscon";
+ reg = <0xfffffd60 0x10>;
+ status = "disabled";
+ };
};
};
diff --git a/arch/arm/boot/dts/dra7-evm.dts b/arch/arm/boot/dts/dra7-evm.dts
index 736092b1a53..10b725c7bfc 100644
--- a/arch/arm/boot/dts/dra7-evm.dts
+++ b/arch/arm/boot/dts/dra7-evm.dts
@@ -304,7 +304,7 @@
/* VDD_GPU - over VDD_SMPS6 */
regulator-name = "smps6";
regulator-min-microvolt = <850000>;
- regulator-max-microvolt = <12500000>;
+ regulator-max-microvolt = <1250000>;
regulator-always-on;
regulator-boot-on;
};
@@ -313,7 +313,7 @@
/* CORE_VDD */
regulator-name = "smps7";
regulator-min-microvolt = <850000>;
- regulator-max-microvolt = <1030000>;
+ regulator-max-microvolt = <1060000>;
regulator-always-on;
regulator-boot-on;
};
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
index 63bf99be176..22771bc1643 100644
--- a/arch/arm/boot/dts/dra7.dtsi
+++ b/arch/arm/boot/dts/dra7.dtsi
@@ -742,7 +742,7 @@
};
wdt2: wdt@4ae14000 {
- compatible = "ti,omap4-wdt";
+ compatible = "ti,omap3-wdt";
reg = <0x4ae14000 0x80>;
interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
ti,hwmods = "wd_timer2";
diff --git a/arch/arm/boot/dts/dra72-evm.dts b/arch/arm/boot/dts/dra72-evm.dts
index afc74fd4bb5..89085d066c6 100644
--- a/arch/arm/boot/dts/dra72-evm.dts
+++ b/arch/arm/boot/dts/dra72-evm.dts
@@ -160,7 +160,7 @@
/* VDD_CORE */
regulator-name = "smps2";
regulator-min-microvolt = <850000>;
- regulator-max-microvolt = <1030000>;
+ regulator-max-microvolt = <1060000>;
regulator-boot-on;
regulator-always-on;
};
diff --git a/arch/arm/boot/dts/dra7xx-clocks.dtsi b/arch/arm/boot/dts/dra7xx-clocks.dtsi
index 2c05b3f017f..4bdcbd61ce4 100644
--- a/arch/arm/boot/dts/dra7xx-clocks.dtsi
+++ b/arch/arm/boot/dts/dra7xx-clocks.dtsi
@@ -1042,7 +1042,7 @@
#clock-cells = <0>;
compatible = "ti,mux-clock";
clocks = <&sys_clkin1>, <&sys_clkin2>;
- reg = <0x01a4>;
+ reg = <0x0164>;
};
mlb_clk: mlb_clk {
@@ -1084,14 +1084,14 @@
#clock-cells = <0>;
compatible = "ti,mux-clock";
clocks = <&sys_clkin1>, <&sys_clkin2>;
- reg = <0x01d0>;
+ reg = <0x0168>;
};
video2_dpll_clk_mux: video2_dpll_clk_mux {
#clock-cells = <0>;
compatible = "ti,mux-clock";
clocks = <&sys_clkin1>, <&sys_clkin2>;
- reg = <0x01d4>;
+ reg = <0x016c>;
};
wkupaon_iclk_mux: wkupaon_iclk_mux {
diff --git a/arch/arm/boot/dts/exynos3250.dtsi b/arch/arm/boot/dts/exynos3250.dtsi
index 242ddda0a8c..22465494b79 100644
--- a/arch/arm/boot/dts/exynos3250.dtsi
+++ b/arch/arm/boot/dts/exynos3250.dtsi
@@ -311,12 +311,13 @@
adc: adc@126C0000 {
compatible = "samsung,exynos3250-adc",
"samsung,exynos-adc-v2";
- reg = <0x126C0000 0x100>, <0x10020718 0x4>;
+ reg = <0x126C0000 0x100>;
interrupts = <0 137 0>;
clock-names = "adc", "sclk";
clocks = <&cmu CLK_TSADC>, <&cmu CLK_SCLK_TSADC>;
#io-channel-cells = <1>;
io-channel-ranges;
+ samsung,syscon-phandle = <&pmu_system_controller>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/exynos4x12.dtsi b/arch/arm/boot/dts/exynos4x12.dtsi
index 2e9f1f7be77..93b70402e94 100644
--- a/arch/arm/boot/dts/exynos4x12.dtsi
+++ b/arch/arm/boot/dts/exynos4x12.dtsi
@@ -108,13 +108,14 @@
adc: adc@126C0000 {
compatible = "samsung,exynos-adc-v1";
- reg = <0x126C0000 0x100>, <0x10020718 0x4>;
+ reg = <0x126C0000 0x100>;
interrupt-parent = <&combiner>;
interrupts = <10 3>;
clocks = <&clock CLK_TSADC>;
clock-names = "adc";
#io-channel-cells = <1>;
io-channel-ranges;
+ samsung,syscon-phandle = <&pmu_system_controller>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi
index d45a07ea340..0a229fcd7ac 100644
--- a/arch/arm/boot/dts/exynos5250.dtsi
+++ b/arch/arm/boot/dts/exynos5250.dtsi
@@ -754,12 +754,13 @@
adc: adc@12D10000 {
compatible = "samsung,exynos-adc-v1";
- reg = <0x12D10000 0x100>, <0x10040718 0x4>;
+ reg = <0x12D10000 0x100>;
interrupts = <0 106 0>;
clocks = <&clock CLK_ADC>;
clock-names = "adc";
#io-channel-cells = <1>;
io-channel-ranges;
+ samsung,syscon-phandle = <&pmu_system_controller>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/exynos5420.dtsi b/arch/arm/boot/dts/exynos5420.dtsi
index 90bf4011e31..517e50f6760 100644
--- a/arch/arm/boot/dts/exynos5420.dtsi
+++ b/arch/arm/boot/dts/exynos5420.dtsi
@@ -541,12 +541,13 @@
adc: adc@12D10000 {
compatible = "samsung,exynos-adc-v2";
- reg = <0x12D10000 0x100>, <0x10040720 0x4>;
+ reg = <0x12D10000 0x100>;
interrupts = <0 106 0>;
clocks = <&clock CLK_TSADC>;
clock-names = "adc";
#io-channel-cells = <1>;
io-channel-ranges;
+ samsung,syscon-phandle = <&pmu_system_controller>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/omap2430-sdp.dts b/arch/arm/boot/dts/omap2430-sdp.dts
index 05eca2e4430..6b36ede5848 100644
--- a/arch/arm/boot/dts/omap2430-sdp.dts
+++ b/arch/arm/boot/dts/omap2430-sdp.dts
@@ -48,22 +48,22 @@
gpmc,device-width = <1>;
gpmc,cycle2cycle-samecsen = <1>;
gpmc,cycle2cycle-diffcsen = <1>;
- gpmc,cs-on-ns = <7>;
- gpmc,cs-rd-off-ns = <233>;
- gpmc,cs-wr-off-ns = <233>;
- gpmc,adv-on-ns = <22>;
- gpmc,adv-rd-off-ns = <60>;
- gpmc,adv-wr-off-ns = <60>;
- gpmc,oe-on-ns = <67>;
- gpmc,oe-off-ns = <210>;
- gpmc,we-on-ns = <67>;
- gpmc,we-off-ns = <210>;
- gpmc,rd-cycle-ns = <233>;
- gpmc,wr-cycle-ns = <233>;
- gpmc,access-ns = <233>;
- gpmc,page-burst-access-ns = <30>;
- gpmc,bus-turnaround-ns = <30>;
- gpmc,cycle2cycle-delay-ns = <30>;
+ gpmc,cs-on-ns = <6>;
+ gpmc,cs-rd-off-ns = <187>;
+ gpmc,cs-wr-off-ns = <187>;
+ gpmc,adv-on-ns = <18>;
+ gpmc,adv-rd-off-ns = <48>;
+ gpmc,adv-wr-off-ns = <48>;
+ gpmc,oe-on-ns = <60>;
+ gpmc,oe-off-ns = <169>;
+ gpmc,we-on-ns = <66>;
+ gpmc,we-off-ns = <169>;
+ gpmc,rd-cycle-ns = <187>;
+ gpmc,wr-cycle-ns = <187>;
+ gpmc,access-ns = <187>;
+ gpmc,page-burst-access-ns = <24>;
+ gpmc,bus-turnaround-ns = <24>;
+ gpmc,cycle2cycle-delay-ns = <24>;
gpmc,wait-monitoring-ns = <0>;
gpmc,clk-activation-ns = <0>;
gpmc,wr-data-mux-bus-ns = <0>;
diff --git a/arch/arm/boot/dts/sama5d4.dtsi b/arch/arm/boot/dts/sama5d4.dtsi
index e0157b0f075..1b0f30c2c4a 100644
--- a/arch/arm/boot/dts/sama5d4.dtsi
+++ b/arch/arm/boot/dts/sama5d4.dtsi
@@ -9,12 +9,12 @@
* licensing only applies to this file, and not this project as a
* whole.
*
- * a) This library is free software; you can redistribute it and/or
+ * a) This file is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * This file is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
@@ -45,6 +45,7 @@
#include "skeleton.dtsi"
#include <dt-bindings/clock/at91.h>
+#include <dt-bindings/dma/at91.h>
#include <dt-bindings/pinctrl/at91.h>
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/gpio/gpio.h>
@@ -302,6 +303,15 @@
#size-cells = <1>;
ranges;
+ dma1: dma-controller@f0004000 {
+ compatible = "atmel,sama5d4-dma";
+ reg = <0xf0004000 0x200>;
+ interrupts = <50 IRQ_TYPE_LEVEL_HIGH 0>;
+ #dma-cells = <1>;
+ clocks = <&dma1_clk>;
+ clock-names = "dma_clk";
+ };
+
ramc0: ramc@f0010000 {
compatible = "atmel,sama5d3-ddramc";
reg = <0xf0010000 0x200>;
@@ -309,6 +319,15 @@
clock-names = "ddrck", "mpddr";
};
+ dma0: dma-controller@f0014000 {
+ compatible = "atmel,sama5d4-dma";
+ reg = <0xf0014000 0x200>;
+ interrupts = <8 IRQ_TYPE_LEVEL_HIGH 0>;
+ #dma-cells = <1>;
+ clocks = <&dma0_clk>;
+ clock-names = "dma_clk";
+ };
+
pmc: pmc@f0018000 {
compatible = "atmel,sama5d3-pmc";
reg = <0xf0018000 0x120>;
@@ -761,6 +780,10 @@
compatible = "atmel,hsmci";
reg = <0xf8000000 0x600>;
interrupts = <35 IRQ_TYPE_LEVEL_HIGH 0>;
+ dmas = <&dma1
+ (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1)
+ | AT91_XDMAC_DT_PERID(0))>;
+ dma-names = "rxtx";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_mmc0_clk_cmd_dat0 &pinctrl_mmc0_dat1_3>;
status = "disabled";
@@ -776,6 +799,13 @@
compatible = "atmel,at91rm9200-spi";
reg = <0xf8010000 0x100>;
interrupts = <37 IRQ_TYPE_LEVEL_HIGH 3>;
+ dmas = <&dma1
+ (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1)
+ | AT91_XDMAC_DT_PERID(10))>,
+ <&dma1
+ (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1)
+ | AT91_XDMAC_DT_PERID(11))>;
+ dma-names = "tx", "rx";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_spi0>;
clocks = <&spi0_clk>;
@@ -787,6 +817,13 @@
compatible = "atmel,at91sam9x5-i2c";
reg = <0xf8014000 0x4000>;
interrupts = <32 IRQ_TYPE_LEVEL_HIGH 6>;
+ dmas = <&dma1
+ (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1)
+ | AT91_XDMAC_DT_PERID(2))>,
+ <&dma1
+ (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1)
+ | AT91_XDMAC_DT_PERID(3))>;
+ dma-names = "tx", "rx";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_i2c0>;
#address-cells = <1>;
@@ -817,7 +854,14 @@
i2c2: i2c@f8024000 {
compatible = "atmel,at91sam9x5-i2c";
reg = <0xf8024000 0x4000>;
- interrupts = <34 4 6>;
+ interrupts = <34 IRQ_TYPE_LEVEL_HIGH 6>;
+ dmas = <&dma1
+ (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1)
+ | AT91_XDMAC_DT_PERID(6))>,
+ <&dma1
+ (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1)
+ | AT91_XDMAC_DT_PERID(7))>;
+ dma-names = "tx", "rx";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_i2c2>;
#address-cells = <1>;
@@ -830,6 +874,10 @@
compatible = "atmel,hsmci";
reg = <0xfc000000 0x600>;
interrupts = <36 IRQ_TYPE_LEVEL_HIGH 0>;
+ dmas = <&dma1
+ (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1)
+ | AT91_XDMAC_DT_PERID(1))>;
+ dma-names = "rxtx";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_mmc1_clk_cmd_dat0 &pinctrl_mmc1_dat1_3>;
status = "disabled";
@@ -843,6 +891,13 @@
compatible = "atmel,at91sam9260-usart";
reg = <0xfc008000 0x100>;
interrupts = <29 IRQ_TYPE_LEVEL_HIGH 5>;
+ dmas = <&dma1
+ (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1)
+ | AT91_XDMAC_DT_PERID(16))>,
+ <&dma1
+ (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1)
+ | AT91_XDMAC_DT_PERID(17))>;
+ dma-names = "tx", "rx";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usart2 &pinctrl_usart2_rts &pinctrl_usart2_cts>;
clocks = <&usart2_clk>;
@@ -854,6 +909,13 @@
compatible = "atmel,at91sam9260-usart";
reg = <0xfc00c000 0x100>;
interrupts = <30 IRQ_TYPE_LEVEL_HIGH 5>;
+ dmas = <&dma1
+ (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1)
+ | AT91_XDMAC_DT_PERID(18))>,
+ <&dma1
+ (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1)
+ | AT91_XDMAC_DT_PERID(19))>;
+ dma-names = "tx", "rx";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usart3>;
clocks = <&usart3_clk>;
@@ -865,6 +927,13 @@
compatible = "atmel,at91sam9260-usart";
reg = <0xfc010000 0x100>;
interrupts = <31 IRQ_TYPE_LEVEL_HIGH 5>;
+ dmas = <&dma1
+ (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1)
+ | AT91_XDMAC_DT_PERID(20))>,
+ <&dma1
+ (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1)
+ | AT91_XDMAC_DT_PERID(21))>;
+ dma-names = "tx", "rx";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usart4>;
clocks = <&usart4_clk>;
diff --git a/arch/arm/boot/dts/tegra114.dtsi b/arch/arm/boot/dts/tegra114.dtsi
index 222f3b3f4dd..4296b5398bf 100644
--- a/arch/arm/boot/dts/tegra114.dtsi
+++ b/arch/arm/boot/dts/tegra114.dtsi
@@ -1,5 +1,6 @@
#include <dt-bindings/clock/tegra114-car.h>
#include <dt-bindings/gpio/tegra-gpio.h>
+#include <dt-bindings/memory/tegra114-mc.h>
#include <dt-bindings/pinctrl/pinctrl-tegra.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
@@ -50,6 +51,8 @@
resets = <&tegra_car 27>;
reset-names = "dc";
+ iommus = <&mc TEGRA_SWGROUP_DC>;
+
nvidia,head = <0>;
rgb {
@@ -67,6 +70,8 @@
resets = <&tegra_car 26>;
reset-names = "dc";
+ iommus = <&mc TEGRA_SWGROUP_DCB>;
+
nvidia,head = <1>;
rgb {
@@ -498,15 +503,15 @@
reset-names = "fuse";
};
- iommu@70019010 {
- compatible = "nvidia,tegra114-smmu", "nvidia,tegra30-smmu";
- reg = <0x70019010 0x02c
- 0x700191f0 0x010
- 0x70019228 0x074>;
- nvidia,#asids = <4>;
- dma-window = <0 0x40000000>;
- nvidia,swgroups = <0x18659fe>;
- nvidia,ahb = <&ahb>;
+ mc: memory-controller@70019000 {
+ compatible = "nvidia,tegra114-mc";
+ reg = <0x70019000 0x1000>;
+ clocks = <&tegra_car TEGRA114_CLK_MC>;
+ clock-names = "mc";
+
+ interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>;
+
+ #iommu-cells = <1>;
};
ahub@70080000 {
diff --git a/arch/arm/boot/dts/tegra124-jetson-tk1.dts b/arch/arm/boot/dts/tegra124-jetson-tk1.dts
index 51b373ff106..4eb540be368 100644
--- a/arch/arm/boot/dts/tegra124-jetson-tk1.dts
+++ b/arch/arm/boot/dts/tegra124-jetson-tk1.dts
@@ -1942,4 +1942,48 @@
<&tegra_car TEGRA124_CLK_EXTERN1>;
clock-names = "pll_a", "pll_a_out0", "mclk";
};
+
+ thermal-zones {
+ cpu {
+ trips {
+ trip@0 {
+ temperature = <101000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ /* There are currently no cooling maps because there are no cooling devices */
+ };
+ };
+
+ mem {
+ trips {
+ trip@0 {
+ temperature = <101000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ /* There are currently no cooling maps because there are no cooling devices */
+ };
+ };
+
+ gpu {
+ trips {
+ trip@0 {
+ temperature = <101000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ /* There are currently no cooling maps because there are no cooling devices */
+ };
+ };
+ };
};
diff --git a/arch/arm/boot/dts/tegra124.dtsi b/arch/arm/boot/dts/tegra124.dtsi
index df2b06b2998..4be06c6ea0c 100644
--- a/arch/arm/boot/dts/tegra124.dtsi
+++ b/arch/arm/boot/dts/tegra124.dtsi
@@ -1,8 +1,10 @@
#include <dt-bindings/clock/tegra124-car.h>
#include <dt-bindings/gpio/tegra-gpio.h>
+#include <dt-bindings/memory/tegra124-mc.h>
#include <dt-bindings/pinctrl/pinctrl-tegra.h>
#include <dt-bindings/pinctrl/pinctrl-tegra-xusb.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/thermal/tegra124-soctherm.h>
#include "skeleton.dtsi"
@@ -102,6 +104,8 @@
resets = <&tegra_car 27>;
reset-names = "dc";
+ iommus = <&mc TEGRA_SWGROUP_DC>;
+
nvidia,head = <0>;
};
@@ -115,6 +119,8 @@
resets = <&tegra_car 26>;
reset-names = "dc";
+ iommus = <&mc TEGRA_SWGROUP_DCB>;
+
nvidia,head = <1>;
};
@@ -275,7 +281,8 @@
pinmux: pinmux@0,70000868 {
compatible = "nvidia,tegra124-pinmux";
reg = <0x0 0x70000868 0x0 0x164>, /* Pad control registers */
- <0x0 0x70003000 0x0 0x434>; /* Mux registers */
+ <0x0 0x70003000 0x0 0x434>, /* Mux registers */
+ <0x0 0x70000820 0x0 0x008>; /* MIPI pad control */
};
/*
@@ -551,6 +558,17 @@
reset-names = "fuse";
};
+ mc: memory-controller@0,70019000 {
+ compatible = "nvidia,tegra124-mc";
+ reg = <0x0 0x70019000 0x0 0x1000>;
+ clocks = <&tegra_car TEGRA124_CLK_MC>;
+ clock-names = "mc";
+
+ interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>;
+
+ #iommu-cells = <1>;
+ };
+
sata@0,70020000 {
compatible = "nvidia,tegra124-ahci";
@@ -640,6 +658,18 @@
status = "disabled";
};
+ soctherm: thermal-sensor@0,700e2000 {
+ compatible = "nvidia,tegra124-soctherm";
+ reg = <0x0 0x700e2000 0x0 0x1000>;
+ interrupts = <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&tegra_car TEGRA124_CLK_TSENSOR>,
+ <&tegra_car TEGRA124_CLK_SOC_THERM>;
+ clock-names = "tsensor", "soctherm";
+ resets = <&tegra_car 78>;
+ reset-names = "soctherm";
+ #thermal-sensor-cells = <1>;
+ };
+
ahub@0,70300000 {
compatible = "nvidia,tegra124-ahub";
reg = <0x0 0x70300000 0x0 0x200>,
@@ -881,6 +911,40 @@
};
};
+ thermal-zones {
+ cpu {
+ polling-delay-passive = <1000>;
+ polling-delay = <1000>;
+
+ thermal-sensors =
+ <&soctherm TEGRA124_SOCTHERM_SENSOR_CPU>;
+ };
+
+ mem {
+ polling-delay-passive = <1000>;
+ polling-delay = <1000>;
+
+ thermal-sensors =
+ <&soctherm TEGRA124_SOCTHERM_SENSOR_MEM>;
+ };
+
+ gpu {
+ polling-delay-passive = <1000>;
+ polling-delay = <1000>;
+
+ thermal-sensors =
+ <&soctherm TEGRA124_SOCTHERM_SENSOR_GPU>;
+ };
+
+ pllx {
+ polling-delay-passive = <1000>;
+ polling-delay = <1000>;
+
+ thermal-sensors =
+ <&soctherm TEGRA124_SOCTHERM_SENSOR_PLLX>;
+ };
+ };
+
timer {
compatible = "arm,armv7-timer";
interrupts = <GIC_PPI 13
diff --git a/arch/arm/boot/dts/tegra30.dtsi b/arch/arm/boot/dts/tegra30.dtsi
index b270b9e3d45..99475f6e76a 100644
--- a/arch/arm/boot/dts/tegra30.dtsi
+++ b/arch/arm/boot/dts/tegra30.dtsi
@@ -1,5 +1,6 @@
#include <dt-bindings/clock/tegra30-car.h>
#include <dt-bindings/gpio/tegra-gpio.h>
+#include <dt-bindings/memory/tegra30-mc.h>
#include <dt-bindings/pinctrl/pinctrl-tegra.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
@@ -166,6 +167,8 @@
resets = <&tegra_car 27>;
reset-names = "dc";
+ iommus = <&mc TEGRA_SWGROUP_DC>;
+
nvidia,head = <0>;
rgb {
@@ -183,6 +186,8 @@
resets = <&tegra_car 26>;
reset-names = "dc";
+ iommus = <&mc TEGRA_SWGROUP_DCB>;
+
nvidia,head = <1>;
rgb {
@@ -615,23 +620,15 @@
clock-names = "pclk", "clk32k_in";
};
- memory-controller@7000f000 {
+ mc: memory-controller@7000f000 {
compatible = "nvidia,tegra30-mc";
- reg = <0x7000f000 0x010
- 0x7000f03c 0x1b4
- 0x7000f200 0x028
- 0x7000f284 0x17c>;
+ reg = <0x7000f000 0x400>;
+ clocks = <&tegra_car TEGRA30_CLK_MC>;
+ clock-names = "mc";
+
interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>;
- };
- iommu@7000f010 {
- compatible = "nvidia,tegra30-smmu";
- reg = <0x7000f010 0x02c
- 0x7000f1f0 0x010
- 0x7000f228 0x05c>;
- nvidia,#asids = <4>; /* # of ASIDs */
- dma-window = <0 0x40000000>; /* IOVA start & length */
- nvidia,ahb = <&ahb>;
+ #iommu-cells = <1>;
};
fuse@7000f800 {
diff --git a/arch/arm/configs/ape6evm_defconfig b/arch/arm/configs/ape6evm_defconfig
index db81d8ce4c0..9e9a72e3d30 100644
--- a/arch/arm/configs/ape6evm_defconfig
+++ b/arch/arm/configs/ape6evm_defconfig
@@ -33,7 +33,7 @@ CONFIG_ARM_APPENDED_DTB=y
CONFIG_VFP=y
CONFIG_NEON=y
CONFIG_BINFMT_MISC=y
-CONFIG_PM_RUNTIME=y
+CONFIG_PM=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
diff --git a/arch/arm/configs/armadillo800eva_defconfig b/arch/arm/configs/armadillo800eva_defconfig
index d9675c68a39..5666e3700a8 100644
--- a/arch/arm/configs/armadillo800eva_defconfig
+++ b/arch/arm/configs/armadillo800eva_defconfig
@@ -43,7 +43,7 @@ CONFIG_KEXEC=y
CONFIG_VFP=y
CONFIG_NEON=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
-CONFIG_PM_RUNTIME=y
+CONFIG_PM=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
diff --git a/arch/arm/configs/bcm_defconfig b/arch/arm/configs/bcm_defconfig
index 83a87e48901..7117662bab2 100644
--- a/arch/arm/configs/bcm_defconfig
+++ b/arch/arm/configs/bcm_defconfig
@@ -39,7 +39,7 @@ CONFIG_CPU_IDLE=y
CONFIG_VFP=y
CONFIG_NEON=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
-CONFIG_PM_RUNTIME=y
+CONFIG_PM=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_PACKET_DIAG=y
diff --git a/arch/arm/configs/bockw_defconfig b/arch/arm/configs/bockw_defconfig
index 1dde5daa84f..3125e00f05a 100644
--- a/arch/arm/configs/bockw_defconfig
+++ b/arch/arm/configs/bockw_defconfig
@@ -29,7 +29,7 @@ CONFIG_ZBOOT_ROM_BSS=0x0
CONFIG_ARM_APPENDED_DTB=y
CONFIG_VFP=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
-CONFIG_PM_RUNTIME=y
+CONFIG_PM=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
diff --git a/arch/arm/configs/davinci_all_defconfig b/arch/arm/configs/davinci_all_defconfig
index 759f9b0053e..235842c9ba9 100644
--- a/arch/arm/configs/davinci_all_defconfig
+++ b/arch/arm/configs/davinci_all_defconfig
@@ -49,7 +49,7 @@ CONFIG_CPU_FREQ_GOV_PERFORMANCE=m
CONFIG_CPU_FREQ_GOV_POWERSAVE=m
CONFIG_CPU_FREQ_GOV_ONDEMAND=m
CONFIG_CPU_IDLE=y
-CONFIG_PM_RUNTIME=y
+CONFIG_PM=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
diff --git a/arch/arm/configs/exynos_defconfig b/arch/arm/configs/exynos_defconfig
index c4199072902..5ef14de00a2 100644
--- a/arch/arm/configs/exynos_defconfig
+++ b/arch/arm/configs/exynos_defconfig
@@ -27,7 +27,7 @@ CONFIG_ARM_ATAG_DTB_COMPAT=y
CONFIG_CMDLINE="root=/dev/ram0 rw ramdisk=8192 initrd=0x41000000,8M console=ttySAC1,115200 init=/linuxrc mem=256M"
CONFIG_VFP=y
CONFIG_NEON=y
-CONFIG_PM_RUNTIME=y
+CONFIG_PM=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
diff --git a/arch/arm/configs/ezx_defconfig b/arch/arm/configs/ezx_defconfig
index eb440aae428..ea316c4b890 100644
--- a/arch/arm/configs/ezx_defconfig
+++ b/arch/arm/configs/ezx_defconfig
@@ -39,7 +39,6 @@ CONFIG_BINFMT_AOUT=m
CONFIG_BINFMT_MISC=m
CONFIG_PM=y
CONFIG_APM_EMULATION=y
-CONFIG_PM_RUNTIME=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
diff --git a/arch/arm/configs/hisi_defconfig b/arch/arm/configs/hisi_defconfig
index 1fe3621faf6..112543665dd 100644
--- a/arch/arm/configs/hisi_defconfig
+++ b/arch/arm/configs/hisi_defconfig
@@ -18,7 +18,7 @@ CONFIG_ARM_APPENDED_DTB=y
CONFIG_ARM_ATAG_DTB_COMPAT=y
CONFIG_NEON=y
CONFIG_ARM_ATAG_DTB_COMPAT_CMDLINE_FROM_BOOTLOADER=y
-CONFIG_PM_RUNTIME=y
+CONFIG_PM=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
diff --git a/arch/arm/configs/imote2_defconfig b/arch/arm/configs/imote2_defconfig
index 182e5469266..18e59feaa30 100644
--- a/arch/arm/configs/imote2_defconfig
+++ b/arch/arm/configs/imote2_defconfig
@@ -31,7 +31,6 @@ CONFIG_BINFMT_AOUT=m
CONFIG_BINFMT_MISC=m
CONFIG_PM=y
CONFIG_APM_EMULATION=y
-CONFIG_PM_RUNTIME=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig
index f707cd2691c..7c2075a07eb 100644
--- a/arch/arm/configs/imx_v6_v7_defconfig
+++ b/arch/arm/configs/imx_v6_v7_defconfig
@@ -54,7 +54,7 @@ CONFIG_ARM_IMX6Q_CPUFREQ=y
CONFIG_VFP=y
CONFIG_NEON=y
CONFIG_BINFMT_MISC=m
-CONFIG_PM_RUNTIME=y
+CONFIG_PM=y
CONFIG_PM_DEBUG=y
CONFIG_PM_TEST_SUSPEND=y
CONFIG_NET=y
diff --git a/arch/arm/configs/keystone_defconfig b/arch/arm/configs/keystone_defconfig
index 20a3ff99fae..a2067cbfe17 100644
--- a/arch/arm/configs/keystone_defconfig
+++ b/arch/arm/configs/keystone_defconfig
@@ -30,7 +30,7 @@ CONFIG_HIGHMEM=y
CONFIG_VFP=y
CONFIG_NEON=y
# CONFIG_SUSPEND is not set
-CONFIG_PM_RUNTIME=y
+CONFIG_PM=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
diff --git a/arch/arm/configs/kzm9g_defconfig b/arch/arm/configs/kzm9g_defconfig
index 8cb115d74fd..5d63fc5d2d4 100644
--- a/arch/arm/configs/kzm9g_defconfig
+++ b/arch/arm/configs/kzm9g_defconfig
@@ -43,7 +43,7 @@ CONFIG_KEXEC=y
CONFIG_VFP=y
CONFIG_NEON=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
-CONFIG_PM_RUNTIME=y
+CONFIG_PM=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
diff --git a/arch/arm/configs/lager_defconfig b/arch/arm/configs/lager_defconfig
index 929c571ea29..a82afc916a8 100644
--- a/arch/arm/configs/lager_defconfig
+++ b/arch/arm/configs/lager_defconfig
@@ -37,7 +37,7 @@ CONFIG_AUTO_ZRELADDR=y
CONFIG_VFP=y
CONFIG_NEON=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
-CONFIG_PM_RUNTIME=y
+CONFIG_PM=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
diff --git a/arch/arm/configs/mackerel_defconfig b/arch/arm/configs/mackerel_defconfig
index 57ececba2ae..05a529311b4 100644
--- a/arch/arm/configs/mackerel_defconfig
+++ b/arch/arm/configs/mackerel_defconfig
@@ -28,7 +28,6 @@ CONFIG_KEXEC=y
CONFIG_VFP=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_PM=y
-CONFIG_PM_RUNTIME=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
diff --git a/arch/arm/configs/marzen_defconfig b/arch/arm/configs/marzen_defconfig
index ff91630d34e..3c8b6d82318 100644
--- a/arch/arm/configs/marzen_defconfig
+++ b/arch/arm/configs/marzen_defconfig
@@ -33,7 +33,7 @@ CONFIG_ARM_APPENDED_DTB=y
CONFIG_VFP=y
CONFIG_KEXEC=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
-CONFIG_PM_RUNTIME=y
+CONFIG_PM=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index d7896580f3b..2328fe752e9 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -479,4 +479,4 @@ CONFIG_DEBUG_FS=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_LOCKUP_DETECTOR=y
CONFIG_CRYPTO_DEV_TEGRA_AES=y
-CONFIG_GENERIC_CPUFREQ_CPU0=y
+CONFIG_CPUFREQ_DT=y
diff --git a/arch/arm/configs/omap1_defconfig b/arch/arm/configs/omap1_defconfig
index 115cda9f326..a7dce674f1b 100644
--- a/arch/arm/configs/omap1_defconfig
+++ b/arch/arm/configs/omap1_defconfig
@@ -63,7 +63,6 @@ CONFIG_FPE_NWFPE=y
CONFIG_BINFMT_MISC=y
CONFIG_PM=y
# CONFIG_SUSPEND is not set
-CONFIG_PM_RUNTIME=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index 3e09286f7ff..c2c3a852af9 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -127,6 +127,8 @@ CONFIG_SRAM=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_ATA=y
+CONFIG_SATA_AHCI_PLATFORM=y
CONFIG_MD=y
CONFIG_NETDEVICES=y
# CONFIG_NET_VENDOR_ARC is not set
diff --git a/arch/arm/configs/prima2_defconfig b/arch/arm/configs/prima2_defconfig
index 23591dba47a..f610230b9c1 100644
--- a/arch/arm/configs/prima2_defconfig
+++ b/arch/arm/configs/prima2_defconfig
@@ -18,7 +18,7 @@ CONFIG_PREEMPT=y
CONFIG_AEABI=y
CONFIG_KEXEC=y
CONFIG_BINFMT_MISC=y
-CONFIG_PM_RUNTIME=y
+CONFIG_PM=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
diff --git a/arch/arm/configs/sama5_defconfig b/arch/arm/configs/sama5_defconfig
index b58fb32770a..afa24799477 100644
--- a/arch/arm/configs/sama5_defconfig
+++ b/arch/arm/configs/sama5_defconfig
@@ -32,7 +32,7 @@ CONFIG_VFP=y
CONFIG_NEON=y
CONFIG_KERNEL_MODE_NEON=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
-CONFIG_PM_RUNTIME=y
+CONFIG_PM=y
CONFIG_PM_DEBUG=y
CONFIG_PM_ADVANCED_DEBUG=y
CONFIG_NET=y
diff --git a/arch/arm/configs/shmobile_defconfig b/arch/arm/configs/shmobile_defconfig
index 63fb5316ff0..3df6ca0c1d1 100644
--- a/arch/arm/configs/shmobile_defconfig
+++ b/arch/arm/configs/shmobile_defconfig
@@ -39,7 +39,7 @@ CONFIG_KEXEC=y
CONFIG_VFP=y
CONFIG_NEON=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
-CONFIG_PM_RUNTIME=y
+CONFIG_PM=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -146,7 +146,6 @@ CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_S35390A=y
CONFIG_DMADEVICES=y
CONFIG_SH_DMAE=y
-CONFIG_RCAR_AUDMAC_PP=y
CONFIG_RCAR_DMAC=y
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_PWM=y
@@ -178,5 +177,5 @@ CONFIG_CPU_FREQ_GOV_USERSPACE=y
CONFIG_CPU_FREQ_GOV_ONDEMAND=y
CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
CONFIG_CPU_THERMAL=y
-CONFIG_GENERIC_CPUFREQ_CPU0=y
+CONFIG_CPUFREQ_DT=y
CONFIG_REGULATOR_DA9210=y
diff --git a/arch/arm/configs/sunxi_defconfig b/arch/arm/configs/sunxi_defconfig
index f7ac0379850..7a342d2780a 100644
--- a/arch/arm/configs/sunxi_defconfig
+++ b/arch/arm/configs/sunxi_defconfig
@@ -11,7 +11,7 @@ CONFIG_ARM_APPENDED_DTB=y
CONFIG_ARM_ATAG_DTB_COMPAT=y
CONFIG_VFP=y
CONFIG_NEON=y
-CONFIG_PM_RUNTIME=y
+CONFIG_PM=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
diff --git a/arch/arm/configs/tegra_defconfig b/arch/arm/configs/tegra_defconfig
index 40750f93aa8..3ea9c3377cc 100644
--- a/arch/arm/configs/tegra_defconfig
+++ b/arch/arm/configs/tegra_defconfig
@@ -46,7 +46,7 @@ CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
CONFIG_CPU_IDLE=y
CONFIG_VFP=y
CONFIG_NEON=y
-CONFIG_PM_RUNTIME=y
+CONFIG_PM=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
diff --git a/arch/arm/configs/u8500_defconfig b/arch/arm/configs/u8500_defconfig
index d219d6a4323..6a1c9898fd0 100644
--- a/arch/arm/configs/u8500_defconfig
+++ b/arch/arm/configs/u8500_defconfig
@@ -25,7 +25,7 @@ CONFIG_CPU_IDLE=y
CONFIG_ARM_U8500_CPUIDLE=y
CONFIG_VFP=y
CONFIG_NEON=y
-CONFIG_PM_RUNTIME=y
+CONFIG_PM=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
diff --git a/arch/arm/configs/vt8500_v6_v7_defconfig b/arch/arm/configs/vt8500_v6_v7_defconfig
index 9e7a2563969..1bfaa7bfc39 100644
--- a/arch/arm/configs/vt8500_v6_v7_defconfig
+++ b/arch/arm/configs/vt8500_v6_v7_defconfig
@@ -16,7 +16,7 @@ CONFIG_ARM_APPENDED_DTB=y
CONFIG_ARM_ATAG_DTB_COMPAT=y
CONFIG_VFP=y
CONFIG_NEON=y
-CONFIG_PM_RUNTIME=y
+CONFIG_PM=y
CONFIG_NET=y
CONFIG_UNIX=y
CONFIG_INET=y
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index e6e3446abdf..b52101d37ec 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -121,13 +121,12 @@ static inline unsigned long dma_max_pfn(struct device *dev)
}
#define dma_max_pfn(dev) dma_max_pfn(dev)
-static inline int set_arch_dma_coherent_ops(struct device *dev)
-{
- dev->archdata.dma_coherent = true;
- set_dma_ops(dev, &arm_coherent_dma_ops);
- return 0;
-}
-#define set_arch_dma_coherent_ops(dev) set_arch_dma_coherent_ops(dev)
+#define arch_setup_dma_ops arch_setup_dma_ops
+extern void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
+ struct iommu_ops *iommu, bool coherent);
+
+#define arch_teardown_dma_ops arch_teardown_dma_ops
+extern void arch_teardown_dma_ops(struct device *dev);
/* do not use this function in a driver */
static inline bool is_device_dma_coherent(struct device *dev)
diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h
index b9db269c6e6..66ce17655bb 100644
--- a/arch/arm/include/asm/kvm_emulate.h
+++ b/arch/arm/include/asm/kvm_emulate.h
@@ -33,6 +33,11 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu);
void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
+static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.hcr = HCR_GUEST_MASK;
+}
+
static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu)
{
return 1;
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index 53036e21756..254e0650e48 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -150,8 +150,6 @@ struct kvm_vcpu_stat {
u32 halt_wakeup;
};
-int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
- const struct kvm_vcpu_init *init);
int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init);
unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index acb0d571271..63e0ecc0490 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -52,6 +52,7 @@ int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
void free_boot_hyp_pgd(void);
void free_hyp_pgds(void);
+void stage2_unmap_vm(struct kvm *kvm);
int kvm_alloc_stage2_pgd(struct kvm *kvm);
void kvm_free_stage2_pgd(struct kvm *kvm);
int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
@@ -161,9 +162,10 @@ static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
}
static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
- unsigned long size)
+ unsigned long size,
+ bool ipa_uncached)
{
- if (!vcpu_has_cache_enabled(vcpu))
+ if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached)
kvm_flush_dcache_to_poc((void *)hva, size);
/*
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index e34934f63a4..f7c65adaa42 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -484,7 +484,7 @@ static void armpmu_disable(struct pmu *pmu)
armpmu->stop(armpmu);
}
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
static int armpmu_runtime_resume(struct device *dev)
{
struct arm_pmu_platdata *plat = dev_get_platdata(dev);
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 8361652b6da..f9c86391103 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -18,6 +18,7 @@
#include <linux/bootmem.h>
#include <linux/seq_file.h>
#include <linux/screen_info.h>
+#include <linux/of_iommu.h>
#include <linux/of_platform.h>
#include <linux/init.h>
#include <linux/kexec.h>
@@ -806,6 +807,7 @@ static int __init customize_machine(void)
* machine from the device tree, if no callback is provided,
* otherwise we would always need an init_machine callback.
*/
+ of_iommu_init();
if (machine_desc->init_machine)
machine_desc->init_machine();
#ifdef CONFIG_OF
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 9e193c8a959..2d6d9100106 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -213,6 +213,11 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
int err;
struct kvm_vcpu *vcpu;
+ if (irqchip_in_kernel(kvm) && vgic_initialized(kvm)) {
+ err = -EBUSY;
+ goto out;
+ }
+
vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
if (!vcpu) {
err = -ENOMEM;
@@ -263,6 +268,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
{
/* Force users to call KVM_ARM_VCPU_INIT */
vcpu->arch.target = -1;
+ bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
/* Set up the timer */
kvm_timer_vcpu_init(vcpu);
@@ -419,6 +425,7 @@ static void update_vttbr(struct kvm *kvm)
static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
{
+ struct kvm *kvm = vcpu->kvm;
int ret;
if (likely(vcpu->arch.has_run_once))
@@ -427,15 +434,23 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
vcpu->arch.has_run_once = true;
/*
- * Initialize the VGIC before running a vcpu the first time on
- * this VM.
+ * Map the VGIC hardware resources before running a vcpu the first
+ * time on this VM.
*/
- if (unlikely(!vgic_initialized(vcpu->kvm))) {
- ret = kvm_vgic_init(vcpu->kvm);
+ if (unlikely(!vgic_ready(kvm))) {
+ ret = kvm_vgic_map_resources(kvm);
if (ret)
return ret;
}
+ /*
+ * Enable the arch timers only if we have an in-kernel VGIC
+ * and it has been properly initialized, since we cannot handle
+ * interrupts from the virtual timer with a userspace gic.
+ */
+ if (irqchip_in_kernel(kvm) && vgic_initialized(kvm))
+ kvm_timer_enable(kvm);
+
return 0;
}
@@ -649,6 +664,48 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
return -EINVAL;
}
+static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
+ const struct kvm_vcpu_init *init)
+{
+ unsigned int i;
+ int phys_target = kvm_target_cpu();
+
+ if (init->target != phys_target)
+ return -EINVAL;
+
+ /*
+ * Secondary and subsequent calls to KVM_ARM_VCPU_INIT must
+ * use the same target.
+ */
+ if (vcpu->arch.target != -1 && vcpu->arch.target != init->target)
+ return -EINVAL;
+
+ /* -ENOENT for unknown features, -EINVAL for invalid combinations. */
+ for (i = 0; i < sizeof(init->features) * 8; i++) {
+ bool set = (init->features[i / 32] & (1 << (i % 32)));
+
+ if (set && i >= KVM_VCPU_MAX_FEATURES)
+ return -ENOENT;
+
+ /*
+ * Secondary and subsequent calls to KVM_ARM_VCPU_INIT must
+ * use the same feature set.
+ */
+ if (vcpu->arch.target != -1 && i < KVM_VCPU_MAX_FEATURES &&
+ test_bit(i, vcpu->arch.features) != set)
+ return -EINVAL;
+
+ if (set)
+ set_bit(i, vcpu->arch.features);
+ }
+
+ vcpu->arch.target = phys_target;
+
+ /* Now we know what it is, we can reset it. */
+ return kvm_reset_vcpu(vcpu);
+}
+
+
static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
struct kvm_vcpu_init *init)
{
@@ -659,10 +716,21 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
return ret;
/*
+ * Ensure a rebooted VM will fault in RAM pages and detect if the
+ * guest MMU is turned off and flush the caches as needed.
+ */
+ if (vcpu->arch.has_run_once)
+ stage2_unmap_vm(vcpu->kvm);
+
+ vcpu_reset_hcr(vcpu);
+
+ /*
* Handle the "start in power-off" case by marking the VCPU as paused.
*/
- if (__test_and_clear_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features))
+ if (test_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features))
vcpu->arch.pause = true;
+ else
+ vcpu->arch.pause = false;
return 0;
}
diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c
index cc0b78769bd..384bab67c46 100644
--- a/arch/arm/kvm/guest.c
+++ b/arch/arm/kvm/guest.c
@@ -38,7 +38,6 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
{
- vcpu->arch.hcr = HCR_GUEST_MASK;
return 0;
}
@@ -274,31 +273,6 @@ int __attribute_const__ kvm_target_cpu(void)
}
}
-int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
- const struct kvm_vcpu_init *init)
-{
- unsigned int i;
-
- /* We can only cope with guest==host and only on A15/A7 (for now). */
- if (init->target != kvm_target_cpu())
- return -EINVAL;
-
- vcpu->arch.target = init->target;
- bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
-
- /* -ENOENT for unknown features, -EINVAL for invalid combinations. */
- for (i = 0; i < sizeof(init->features) * 8; i++) {
- if (test_bit(i, (void *)init->features)) {
- if (i >= KVM_VCPU_MAX_FEATURES)
- return -ENOENT;
- set_bit(i, vcpu->arch.features);
- }
- }
-
- /* Now we know what it is, we can reset it. */
- return kvm_reset_vcpu(vcpu);
-}
-
int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init)
{
int target = kvm_target_cpu();
diff --git a/arch/arm/kvm/mmio.c b/arch/arm/kvm/mmio.c
index 4cb5a93182e..5d3bfc0eb3f 100644
--- a/arch/arm/kvm/mmio.c
+++ b/arch/arm/kvm/mmio.c
@@ -187,15 +187,18 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
}
rt = vcpu->arch.mmio_decode.rt;
- data = vcpu_data_guest_to_host(vcpu, *vcpu_reg(vcpu, rt), mmio.len);
- trace_kvm_mmio((mmio.is_write) ? KVM_TRACE_MMIO_WRITE :
- KVM_TRACE_MMIO_READ_UNSATISFIED,
- mmio.len, fault_ipa,
- (mmio.is_write) ? data : 0);
+ if (mmio.is_write) {
+ data = vcpu_data_guest_to_host(vcpu, *vcpu_reg(vcpu, rt),
+ mmio.len);
- if (mmio.is_write)
+ trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, mmio.len,
+ fault_ipa, data);
mmio_write_buf(mmio.data, mmio.len, data);
+ } else {
+ trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, mmio.len,
+ fault_ipa, 0);
+ }
if (vgic_handle_mmio(vcpu, run, &mmio))
return 1;
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 8664ff17cbb..1dc9778a00a 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -612,6 +612,71 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
unmap_range(kvm, kvm->arch.pgd, start, size);
}
+static void stage2_unmap_memslot(struct kvm *kvm,
+ struct kvm_memory_slot *memslot)
+{
+ hva_t hva = memslot->userspace_addr;
+ phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
+ phys_addr_t size = PAGE_SIZE * memslot->npages;
+ hva_t reg_end = hva + size;
+
+ /*
+ * A memory region could potentially cover multiple VMAs, and any holes
+ * between them, so iterate over all of them to find out if we should
+ * unmap any of them.
+ *
+ * +--------------------------------------------+
+ * +---------------+----------------+ +----------------+
+ * | : VMA 1 | VMA 2 | | VMA 3 : |
+ * +---------------+----------------+ +----------------+
+ * | memory region |
+ * +--------------------------------------------+
+ */
+ do {
+ struct vm_area_struct *vma = find_vma(current->mm, hva);
+ hva_t vm_start, vm_end;
+
+ if (!vma || vma->vm_start >= reg_end)
+ break;
+
+ /*
+ * Take the intersection of this VMA with the memory region
+ */
+ vm_start = max(hva, vma->vm_start);
+ vm_end = min(reg_end, vma->vm_end);
+
+ if (!(vma->vm_flags & VM_PFNMAP)) {
+ gpa_t gpa = addr + (vm_start - memslot->userspace_addr);
+ unmap_stage2_range(kvm, gpa, vm_end - vm_start);
+ }
+ hva = vm_end;
+ } while (hva < reg_end);
+}
+
+/**
+ * stage2_unmap_vm - Unmap Stage-2 RAM mappings
+ * @kvm: The struct kvm pointer
+ *
+ * Go through the memregions and unmap any reguler RAM
+ * backing memory already mapped to the VM.
+ */
+void stage2_unmap_vm(struct kvm *kvm)
+{
+ struct kvm_memslots *slots;
+ struct kvm_memory_slot *memslot;
+ int idx;
+
+ idx = srcu_read_lock(&kvm->srcu);
+ spin_lock(&kvm->mmu_lock);
+
+ slots = kvm_memslots(kvm);
+ kvm_for_each_memslot(memslot, slots)
+ stage2_unmap_memslot(kvm, memslot);
+
+ spin_unlock(&kvm->mmu_lock);
+ srcu_read_unlock(&kvm->srcu, idx);
+}
+
/**
* kvm_free_stage2_pgd - free all stage-2 tables
* @kvm: The KVM struct pointer for the VM.
@@ -853,6 +918,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
struct vm_area_struct *vma;
pfn_t pfn;
pgprot_t mem_type = PAGE_S2;
+ bool fault_ipa_uncached;
write_fault = kvm_is_write_fault(vcpu);
if (fault_status == FSC_PERM && !write_fault) {
@@ -919,6 +985,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
if (!hugetlb && !force_pte)
hugetlb = transparent_hugepage_adjust(&pfn, &fault_ipa);
+ fault_ipa_uncached = memslot->flags & KVM_MEMSLOT_INCOHERENT;
+
if (hugetlb) {
pmd_t new_pmd = pfn_pmd(pfn, mem_type);
new_pmd = pmd_mkhuge(new_pmd);
@@ -926,7 +994,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
kvm_set_s2pmd_writable(&new_pmd);
kvm_set_pfn_dirty(pfn);
}
- coherent_cache_guest_page(vcpu, hva & PMD_MASK, PMD_SIZE);
+ coherent_cache_guest_page(vcpu, hva & PMD_MASK, PMD_SIZE,
+ fault_ipa_uncached);
ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
} else {
pte_t new_pte = pfn_pte(pfn, mem_type);
@@ -934,7 +1003,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
kvm_set_s2pte_writable(&new_pte);
kvm_set_pfn_dirty(pfn);
}
- coherent_cache_guest_page(vcpu, hva, PAGE_SIZE);
+ coherent_cache_guest_page(vcpu, hva, PAGE_SIZE,
+ fault_ipa_uncached);
ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte,
pgprot_val(mem_type) == pgprot_val(PAGE_S2_DEVICE));
}
@@ -1294,11 +1364,12 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
hva = vm_end;
} while (hva < reg_end);
- if (ret) {
- spin_lock(&kvm->mmu_lock);
+ spin_lock(&kvm->mmu_lock);
+ if (ret)
unmap_stage2_range(kvm, mem->guest_phys_addr, mem->memory_size);
- spin_unlock(&kvm->mmu_lock);
- }
+ else
+ stage2_flush_memslot(kvm, memslot);
+ spin_unlock(&kvm->mmu_lock);
return ret;
}
@@ -1310,6 +1381,15 @@ void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
unsigned long npages)
{
+ /*
+ * Readonly memslots are not incoherent with the caches by definition,
+ * but in practice, they are used mostly to emulate ROMs or NOR flashes
+ * that the guest may consider devices and hence map as uncached.
+ * To prevent incoherency issues in these cases, tag all readonly
+ * regions as incoherent.
+ */
+ if (slot->flags & KVM_MEM_READONLY)
+ slot->flags |= KVM_MEMSLOT_INCOHERENT;
return 0;
}
diff --git a/arch/arm/kvm/psci.c b/arch/arm/kvm/psci.c
index 09cf37737ee..58cb3248d27 100644
--- a/arch/arm/kvm/psci.c
+++ b/arch/arm/kvm/psci.c
@@ -15,6 +15,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/preempt.h>
#include <linux/kvm_host.h>
#include <linux/wait.h>
@@ -166,6 +167,23 @@ static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
static void kvm_prepare_system_event(struct kvm_vcpu *vcpu, u32 type)
{
+ int i;
+ struct kvm_vcpu *tmp;
+
+ /*
+ * The KVM ABI specifies that a system event exit may call KVM_RUN
+ * again and may perform shutdown/reboot at a later time that when the
+ * actual request is made. Since we are implementing PSCI and a
+ * caller of PSCI reboot and shutdown expects that the system shuts
+ * down or reboots immediately, let's make sure that VCPUs are not run
+ * after this call is handled and before the VCPUs have been
+ * re-initialized.
+ */
+ kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
+ tmp->arch.pause = true;
+ kvm_vcpu_kick(tmp);
+ }
+
memset(&vcpu->run->system_event, 0, sizeof(vcpu->run->system_event));
vcpu->run->system_event.type = type;
vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
diff --git a/arch/arm/mach-davinci/pm_domain.c b/arch/arm/mach-davinci/pm_domain.c
index 6b98413cebd..641edc31393 100644
--- a/arch/arm/mach-davinci/pm_domain.c
+++ b/arch/arm/mach-davinci/pm_domain.c
@@ -14,7 +14,7 @@
#include <linux/pm_clock.h>
#include <linux/platform_device.h>
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
static int davinci_pm_runtime_suspend(struct device *dev)
{
int ret;
diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig
index e4a00bafffc..603820e5aba 100644
--- a/arch/arm/mach-exynos/Kconfig
+++ b/arch/arm/mach-exynos/Kconfig
@@ -21,7 +21,7 @@ menuconfig ARCH_EXYNOS
select HAVE_S3C_RTC if RTC_CLASS
select PINCTRL
select PINCTRL_EXYNOS
- select PM_GENERIC_DOMAINS if PM_RUNTIME
+ select PM_GENERIC_DOMAINS if PM
select S5P_DEV_MFC
select SRAM
select MFD_SYSCON
diff --git a/arch/arm/mach-keystone/pm_domain.c b/arch/arm/mach-keystone/pm_domain.c
index ca79ddac38b..ef6041e7e67 100644
--- a/arch/arm/mach-keystone/pm_domain.c
+++ b/arch/arm/mach-keystone/pm_domain.c
@@ -19,7 +19,7 @@
#include <linux/clk-provider.h>
#include <linux/of.h>
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
static int keystone_pm_runtime_suspend(struct device *dev)
{
int ret;
diff --git a/arch/arm/mach-omap1/pm_bus.c b/arch/arm/mach-omap1/pm_bus.c
index 3f2d3967239..c40e209de65 100644
--- a/arch/arm/mach-omap1/pm_bus.c
+++ b/arch/arm/mach-omap1/pm_bus.c
@@ -21,7 +21,7 @@
#include "soc.h"
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
static int omap1_pm_runtime_suspend(struct device *dev)
{
int ret;
@@ -59,7 +59,7 @@ static struct dev_pm_domain default_pm_domain = {
#define OMAP1_PM_DOMAIN (&default_pm_domain)
#else
#define OMAP1_PM_DOMAIN NULL
-#endif /* CONFIG_PM_RUNTIME */
+#endif /* CONFIG_PM */
static struct pm_clk_notifier_block platform_bus_notifier = {
.pm_domain = OMAP1_PM_DOMAIN,
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index f0edec199cd..6ab656cc4f1 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -15,7 +15,7 @@ config ARCH_OMAP3
select ARM_CPU_SUSPEND if PM
select OMAP_INTERCONNECT
select PM_OPP if PM
- select PM_RUNTIME if CPU_IDLE
+ select PM if CPU_IDLE
select SOC_HAS_OMAP2_SDRC
config ARCH_OMAP4
@@ -32,7 +32,7 @@ config ARCH_OMAP4
select PL310_ERRATA_588369 if CACHE_L2X0
select PL310_ERRATA_727915 if CACHE_L2X0
select PM_OPP if PM
- select PM_RUNTIME if CPU_IDLE
+ select PM if CPU_IDLE
select ARM_ERRATA_754322
select ARM_ERRATA_775420
@@ -103,7 +103,7 @@ config ARCH_OMAP2PLUS_TYPICAL
select I2C_OMAP
select MENELAUS if ARCH_OMAP2
select NEON if CPU_V7
- select PM_RUNTIME
+ select PM
select REGULATOR
select TWL4030_CORE if ARCH_OMAP3 || ARCH_OMAP4
select TWL4030_POWER if ARCH_OMAP3 || ARCH_OMAP4
diff --git a/arch/arm/mach-omap2/clock.h b/arch/arm/mach-omap2/clock.h
index 641337c6cde..a4282e79143 100644
--- a/arch/arm/mach-omap2/clock.h
+++ b/arch/arm/mach-omap2/clock.h
@@ -270,8 +270,6 @@ extern const struct clksel_rate div31_1to31_rates[];
extern void __iomem *clk_memmaps[];
-extern int am33xx_clk_init(void);
-
extern int omap2_clkops_enable_clkdm(struct clk_hw *hw);
extern void omap2_clkops_disable_clkdm(struct clk_hw *hw);
diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c
index 53841dea80e..c25feba0581 100644
--- a/arch/arm/mach-omap2/id.c
+++ b/arch/arm/mach-omap2/id.c
@@ -471,11 +471,15 @@ void __init omap3xxx_check_revision(void)
cpu_rev = "1.0";
break;
case 1:
- /* FALLTHROUGH */
- default:
omap_revision = AM437X_REV_ES1_1;
cpu_rev = "1.1";
break;
+ case 2:
+ /* FALLTHROUGH */
+ default:
+ omap_revision = AM437X_REV_ES1_2;
+ cpu_rev = "1.2";
+ break;
}
break;
case 0xb8f2:
diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
index 4fc838354e3..a1bd6affb50 100644
--- a/arch/arm/mach-omap2/io.c
+++ b/arch/arm/mach-omap2/io.c
@@ -361,7 +361,7 @@ static void __init omap_hwmod_init_postsetup(void)
u8 postsetup_state;
/* Set the default postsetup state for all hwmods */
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
postsetup_state = _HWMOD_STATE_IDLE;
#else
postsetup_state = _HWMOD_STATE_ENABLED;
diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
index 8c58b71c272..be9541e1865 100644
--- a/arch/arm/mach-omap2/omap_device.c
+++ b/arch/arm/mach-omap2/omap_device.c
@@ -588,7 +588,7 @@ odbs_exit:
return ERR_PTR(ret);
}
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
static int _od_runtime_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
diff --git a/arch/arm/mach-omap2/soc.h b/arch/arm/mach-omap2/soc.h
index 4376f59626d..c1a3b441631 100644
--- a/arch/arm/mach-omap2/soc.h
+++ b/arch/arm/mach-omap2/soc.h
@@ -446,6 +446,7 @@ IS_OMAP_TYPE(3430, 0x3430)
#define AM437X_CLASS 0x43700000
#define AM437X_REV_ES1_0 (AM437X_CLASS | (0x10 << 8))
#define AM437X_REV_ES1_1 (AM437X_CLASS | (0x11 << 8))
+#define AM437X_REV_ES1_2 (AM437X_CLASS | (0x12 << 8))
#define OMAP443X_CLASS 0x44300044
#define OMAP4430_REV_ES1_0 (OMAP443X_CLASS | (0x10 << 8))
diff --git a/arch/arm/mach-shmobile/board-lager.c b/arch/arm/mach-shmobile/board-lager.c
index b47262afb24..f8197eb6e56 100644
--- a/arch/arm/mach-shmobile/board-lager.c
+++ b/arch/arm/mach-shmobile/board-lager.c
@@ -32,7 +32,6 @@
#include <linux/pinctrl/machine.h>
#include <linux/platform_data/camera-rcar.h>
#include <linux/platform_data/gpio-rcar.h>
-#include <linux/platform_data/rcar-du.h>
#include <linux/platform_data/usb-rcar-gen2-phy.h>
#include <linux/platform_device.h>
#include <linux/phy.h>
@@ -83,61 +82,6 @@
*
*/
-/* DU */
-static struct rcar_du_encoder_data lager_du_encoders[] = {
- {
- .type = RCAR_DU_ENCODER_VGA,
- .output = RCAR_DU_OUTPUT_DPAD0,
- }, {
- .type = RCAR_DU_ENCODER_NONE,
- .output = RCAR_DU_OUTPUT_LVDS1,
- .connector.lvds.panel = {
- .width_mm = 210,
- .height_mm = 158,
- .mode = {
- .pixelclock = 65000000,
- .hactive = 1024,
- .hfront_porch = 20,
- .hback_porch = 160,
- .hsync_len = 136,
- .vactive = 768,
- .vfront_porch = 3,
- .vback_porch = 29,
- .vsync_len = 6,
- },
- },
- },
-};
-
-static const struct rcar_du_platform_data lager_du_pdata __initconst = {
- .encoders = lager_du_encoders,
- .num_encoders = ARRAY_SIZE(lager_du_encoders),
-};
-
-static const struct resource du_resources[] __initconst = {
- DEFINE_RES_MEM(0xfeb00000, 0x70000),
- DEFINE_RES_MEM_NAMED(0xfeb90000, 0x1c, "lvds.0"),
- DEFINE_RES_MEM_NAMED(0xfeb94000, 0x1c, "lvds.1"),
- DEFINE_RES_IRQ(gic_spi(256)),
- DEFINE_RES_IRQ(gic_spi(268)),
- DEFINE_RES_IRQ(gic_spi(269)),
-};
-
-static void __init lager_add_du_device(void)
-{
- struct platform_device_info info = {
- .name = "rcar-du-r8a7790",
- .id = -1,
- .res = du_resources,
- .num_res = ARRAY_SIZE(du_resources),
- .data = &lager_du_pdata,
- .size_data = sizeof(lager_du_pdata),
- .dma_mask = DMA_BIT_MASK(32),
- };
-
- platform_device_register_full(&info);
-}
-
/* LEDS */
static struct gpio_led lager_leds[] = {
{
@@ -800,8 +744,6 @@ static void __init lager_add_standard_devices(void)
platform_device_register_full(&ether_info);
- lager_add_du_device();
-
platform_device_register_resndata(NULL, "qspi", 0,
qspi_resources,
ARRAY_SIZE(qspi_resources),
diff --git a/arch/arm/mach-shmobile/board-marzen.c b/arch/arm/mach-shmobile/board-marzen.c
index 994dc7d86ae..598f704f76a 100644
--- a/arch/arm/mach-shmobile/board-marzen.c
+++ b/arch/arm/mach-shmobile/board-marzen.c
@@ -27,7 +27,6 @@
#include <linux/pinctrl/machine.h>
#include <linux/platform_data/camera-rcar.h>
#include <linux/platform_data/gpio-rcar.h>
-#include <linux/platform_data/rcar-du.h>
#include <linux/platform_data/usb-rcar-phy.h>
#include <linux/regulator/fixed.h>
#include <linux/regulator/machine.h>
@@ -171,62 +170,6 @@ static struct platform_device hspi_device = {
.num_resources = ARRAY_SIZE(hspi_resources),
};
-/*
- * DU
- *
- * The panel only specifies the [hv]display and [hv]total values. The position
- * and width of the sync pulses don't matter, they're copied from VESA timings.
- */
-static struct rcar_du_encoder_data du_encoders[] = {
- {
- .type = RCAR_DU_ENCODER_VGA,
- .output = RCAR_DU_OUTPUT_DPAD0,
- }, {
- .type = RCAR_DU_ENCODER_LVDS,
- .output = RCAR_DU_OUTPUT_DPAD1,
- .connector.lvds.panel = {
- .width_mm = 210,
- .height_mm = 158,
- .mode = {
- .pixelclock = 65000000,
- .hactive = 1024,
- .hfront_porch = 20,
- .hback_porch = 160,
- .hsync_len = 136,
- .vactive = 768,
- .vfront_porch = 3,
- .vback_porch = 29,
- .vsync_len = 6,
- },
- },
- },
-};
-
-static const struct rcar_du_platform_data du_pdata __initconst = {
- .encoders = du_encoders,
- .num_encoders = ARRAY_SIZE(du_encoders),
-};
-
-static const struct resource du_resources[] __initconst = {
- DEFINE_RES_MEM(0xfff80000, 0x40000),
- DEFINE_RES_IRQ(gic_iid(0x3f)),
-};
-
-static void __init marzen_add_du_device(void)
-{
- struct platform_device_info info = {
- .name = "rcar-du-r8a7779",
- .id = -1,
- .res = du_resources,
- .num_res = ARRAY_SIZE(du_resources),
- .data = &du_pdata,
- .size_data = sizeof(du_pdata),
- .dma_mask = DMA_BIT_MASK(32),
- };
-
- platform_device_register_full(&info);
-}
-
/* LEDS */
static struct gpio_led marzen_leds[] = {
{
@@ -385,7 +328,6 @@ static void __init marzen_init(void)
platform_device_register_full(&vin1_info);
platform_device_register_full(&vin3_info);
platform_add_devices(marzen_devices, ARRAY_SIZE(marzen_devices));
- marzen_add_du_device();
}
static const char *marzen_boards_compat_dt[] __initdata = {
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index e8907117861..7864797609b 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -1947,9 +1947,8 @@ EXPORT_SYMBOL_GPL(arm_iommu_release_mapping);
* arm_iommu_create_mapping)
*
* Attaches specified io address space mapping to the provided device,
- * this replaces the dma operations (dma_map_ops pointer) with the
- * IOMMU aware version. More than one client might be attached to
- * the same io address space mapping.
+ * More than one client might be attached to the same io address space
+ * mapping.
*/
int arm_iommu_attach_device(struct device *dev,
struct dma_iommu_mapping *mapping)
@@ -1962,7 +1961,6 @@ int arm_iommu_attach_device(struct device *dev,
kref_get(&mapping->kref);
dev->archdata.mapping = mapping;
- set_dma_ops(dev, &iommu_ops);
pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev));
return 0;
@@ -1974,7 +1972,6 @@ EXPORT_SYMBOL_GPL(arm_iommu_attach_device);
* @dev: valid struct device pointer
*
* Detaches the provided device from a previously attached map.
- * This voids the dma operations (dma_map_ops pointer)
*/
void arm_iommu_detach_device(struct device *dev)
{
@@ -1989,10 +1986,83 @@ void arm_iommu_detach_device(struct device *dev)
iommu_detach_device(mapping->domain, dev);
kref_put(&mapping->kref, release_iommu_mapping);
dev->archdata.mapping = NULL;
- set_dma_ops(dev, NULL);
pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev));
}
EXPORT_SYMBOL_GPL(arm_iommu_detach_device);
-#endif
+static struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent)
+{
+ return coherent ? &iommu_coherent_ops : &iommu_ops;
+}
+
+static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
+ struct iommu_ops *iommu)
+{
+ struct dma_iommu_mapping *mapping;
+
+ if (!iommu)
+ return false;
+
+ mapping = arm_iommu_create_mapping(dev->bus, dma_base, size);
+ if (IS_ERR(mapping)) {
+ pr_warn("Failed to create %llu-byte IOMMU mapping for device %s\n",
+ size, dev_name(dev));
+ return false;
+ }
+
+ if (arm_iommu_attach_device(dev, mapping)) {
+ pr_warn("Failed to attached device %s to IOMMU_mapping\n",
+ dev_name(dev));
+ arm_iommu_release_mapping(mapping);
+ return false;
+ }
+
+ return true;
+}
+
+static void arm_teardown_iommu_dma_ops(struct device *dev)
+{
+ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+
+ arm_iommu_detach_device(dev);
+ arm_iommu_release_mapping(mapping);
+}
+
+#else
+
+static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
+ struct iommu_ops *iommu)
+{
+ return false;
+}
+
+static void arm_teardown_iommu_dma_ops(struct device *dev) { }
+
+#define arm_get_iommu_dma_map_ops arm_get_dma_map_ops
+
+#endif /* CONFIG_ARM_DMA_USE_IOMMU */
+
+static struct dma_map_ops *arm_get_dma_map_ops(bool coherent)
+{
+ return coherent ? &arm_coherent_dma_ops : &arm_dma_ops;
+}
+
+void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
+ struct iommu_ops *iommu, bool coherent)
+{
+ struct dma_map_ops *dma_ops;
+
+ dev->archdata.dma_coherent = coherent;
+ if (arm_setup_iommu_dma_ops(dev, dma_base, size, iommu))
+ dma_ops = arm_get_iommu_dma_map_ops(coherent);
+ else
+ dma_ops = arm_get_dma_map_ops(coherent);
+
+ set_dma_ops(dev, dma_ops);
+}
+
+void arch_teardown_dma_ops(struct device *dev)
+{
+ arm_teardown_iommu_dma_ops(dev);
+}
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 688db03ef5b..b1f9a20a367 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -14,7 +14,9 @@ config ARM64
select ARM_ARCH_TIMER
select ARM_GIC
select AUDIT_ARCH_COMPAT_GENERIC
+ select ARM_GIC_V2M if PCI_MSI
select ARM_GIC_V3
+ select ARM_GIC_V3_ITS if PCI_MSI
select BUILDTIME_EXTABLE_SORT
select CLONE_BACKWARDS
select COMMON_CLK
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild
index 6b61091c7f4..55103e50c51 100644
--- a/arch/arm64/include/asm/Kbuild
+++ b/arch/arm64/include/asm/Kbuild
@@ -27,6 +27,7 @@ generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += mman.h
generic-y += msgbuf.h
+generic-y += msi.h
generic-y += mutex.h
generic-y += pci.h
generic-y += pci-bridge.h
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 5674a55b551..8127e45e263 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -38,6 +38,11 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu);
void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
+static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
+}
+
static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
{
return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc;
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 2012c4ba8d6..0b7dfdb931d 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -165,8 +165,6 @@ struct kvm_vcpu_stat {
u32 halt_wakeup;
};
-int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
- const struct kvm_vcpu_init *init);
int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init);
unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
@@ -200,6 +198,7 @@ struct kvm_vcpu *kvm_arm_get_running_vcpu(void);
struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void);
u64 kvm_call_hyp(void *hypfn, ...);
+void force_vm_exit(const cpumask_t *mask);
int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
int exception_index);
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 0caf7a59f6a..14a74f13627 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -83,6 +83,7 @@ int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
void free_boot_hyp_pgd(void);
void free_hyp_pgds(void);
+void stage2_unmap_vm(struct kvm *kvm);
int kvm_alloc_stage2_pgd(struct kvm *kvm);
void kvm_free_stage2_pgd(struct kvm *kvm);
int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
@@ -243,9 +244,10 @@ static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
}
static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
- unsigned long size)
+ unsigned long size,
+ bool ipa_uncached)
{
- if (!vcpu_has_cache_enabled(vcpu))
+ if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached)
kvm_flush_dcache_to_poc((void *)hva, size);
if (!icache_is_aliasing()) { /* PIPT */
diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c
index 3425f311c49..f1dbca7d5c9 100644
--- a/arch/arm64/kernel/psci.c
+++ b/arch/arm64/kernel/psci.c
@@ -540,6 +540,8 @@ const struct cpu_operations cpu_psci_ops = {
.name = "psci",
#ifdef CONFIG_CPU_IDLE
.cpu_init_idle = cpu_psci_cpu_init_idle,
+#endif
+#ifdef CONFIG_ARM64_CPU_SUSPEND
.cpu_suspend = cpu_psci_cpu_suspend,
#endif
#ifdef CONFIG_SMP
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index 76794692c20..9535bd555d1 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -38,7 +38,6 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
{
- vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
return 0;
}
@@ -297,31 +296,6 @@ int __attribute_const__ kvm_target_cpu(void)
return -EINVAL;
}
-int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
- const struct kvm_vcpu_init *init)
-{
- unsigned int i;
- int phys_target = kvm_target_cpu();
-
- if (init->target != phys_target)
- return -EINVAL;
-
- vcpu->arch.target = phys_target;
- bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
-
- /* -ENOENT for unknown features, -EINVAL for invalid combinations. */
- for (i = 0; i < sizeof(init->features) * 8; i++) {
- if (init->features[i / 32] & (1 << (i % 32))) {
- if (i >= KVM_VCPU_MAX_FEATURES)
- return -ENOENT;
- set_bit(i, vcpu->arch.features);
- }
- }
-
- /* Now we know what it is, we can reset it. */
- return kvm_reset_vcpu(vcpu);
-}
-
int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init)
{
int target = kvm_target_cpu();
diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/dump.c
index bf69601be54..cf33f33333c 100644
--- a/arch/arm64/mm/dump.c
+++ b/arch/arm64/mm/dump.c
@@ -182,9 +182,6 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level,
static const char units[] = "KMGTPE";
u64 prot = val & pg_level[level].mask;
- if (addr < LOWEST_ADDR)
- return;
-
if (!st->level) {
st->level = level;
st->current_prot = prot;
@@ -272,7 +269,7 @@ static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start)
static void walk_pgd(struct pg_state *st, struct mm_struct *mm, unsigned long start)
{
- pgd_t *pgd = pgd_offset(mm, 0);
+ pgd_t *pgd = pgd_offset(mm, 0UL);
unsigned i;
unsigned long addr;
diff --git a/arch/cris/arch-v10/lib/usercopy.c b/arch/cris/arch-v10/lib/usercopy.c
index b0a608da7bd..b964c667ace 100644
--- a/arch/cris/arch-v10/lib/usercopy.c
+++ b/arch/cris/arch-v10/lib/usercopy.c
@@ -30,8 +30,7 @@
/* Copy to userspace. This is based on the memcpy used for
kernel-to-kernel copying; see "string.c". */
-unsigned long
-__copy_user (void __user *pdst, const void *psrc, unsigned long pn)
+unsigned long __copy_user(void __user *pdst, const void *psrc, unsigned long pn)
{
/* We want the parameters put in special registers.
Make sure the compiler is able to make something useful of this.
@@ -187,13 +186,14 @@ __copy_user (void __user *pdst, const void *psrc, unsigned long pn)
return retn;
}
+EXPORT_SYMBOL(__copy_user);
/* Copy from user to kernel, zeroing the bytes that were inaccessible in
userland. The return-value is the number of bytes that were
inaccessible. */
-unsigned long
-__copy_user_zeroing(void *pdst, const void __user *psrc, unsigned long pn)
+unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
+ unsigned long pn)
{
/* We want the parameters put in special registers.
Make sure the compiler is able to make something useful of this.
@@ -369,11 +369,10 @@ copy_exception_bytes:
return retn + n;
}
+EXPORT_SYMBOL(__copy_user_zeroing);
/* Zero userspace. */
-
-unsigned long
-__do_clear_user (void __user *pto, unsigned long pn)
+unsigned long __do_clear_user(void __user *pto, unsigned long pn)
{
/* We want the parameters put in special registers.
Make sure the compiler is able to make something useful of this.
@@ -521,3 +520,4 @@ __do_clear_user (void __user *pto, unsigned long pn)
return retn;
}
+EXPORT_SYMBOL(__do_clear_user);
diff --git a/arch/cris/arch-v32/drivers/Kconfig b/arch/cris/arch-v32/drivers/Kconfig
index 15a9ed1d579..4fc16b44fff 100644
--- a/arch/cris/arch-v32/drivers/Kconfig
+++ b/arch/cris/arch-v32/drivers/Kconfig
@@ -108,6 +108,7 @@ config ETRAX_AXISFLASHMAP
select MTD_JEDECPROBE
select MTD_BLOCK
select MTD_COMPLEX_MAPPINGS
+ select MTD_MTDRAM
help
This option enables MTD mapping of flash devices. Needed to use
flash memories. If unsure, say Y.
@@ -358,13 +359,6 @@ config ETRAX_SPI_MMC
default MMC
select SPI
select MMC_SPI
- select ETRAX_SPI_MMC_BOARD
-
-# For the parts that can't be a module (due to restrictions in
-# framework elsewhere).
-config ETRAX_SPI_MMC_BOARD
- boolean
- default n
# While the board info is MMC_SPI only, the drivers are written to be
# independent of MMC_SPI, so we'll keep SPI non-dependent on the
diff --git a/arch/cris/arch-v32/drivers/Makefile b/arch/cris/arch-v32/drivers/Makefile
index 39aa3c117a8..15fbfefced2 100644
--- a/arch/cris/arch-v32/drivers/Makefile
+++ b/arch/cris/arch-v32/drivers/Makefile
@@ -10,4 +10,3 @@ obj-$(CONFIG_ETRAX_IOP_FW_LOAD) += iop_fw_load.o
obj-$(CONFIG_ETRAX_I2C) += i2c.o
obj-$(CONFIG_ETRAX_SYNCHRONOUS_SERIAL) += sync_serial.o
obj-$(CONFIG_PCI) += pci/
-obj-$(CONFIG_ETRAX_SPI_MMC_BOARD) += board_mmcspi.o
diff --git a/arch/cris/arch-v32/drivers/i2c.h b/arch/cris/arch-v32/drivers/i2c.h
index c073cf4ba01..d9cc856f89f 100644
--- a/arch/cris/arch-v32/drivers/i2c.h
+++ b/arch/cris/arch-v32/drivers/i2c.h
@@ -2,7 +2,6 @@
#include <linux/init.h>
/* High level I2C actions */
-int __init i2c_init(void);
int i2c_write(unsigned char theSlave, void *data, size_t nbytes);
int i2c_read(unsigned char theSlave, void *data, size_t nbytes);
int i2c_writereg(unsigned char theSlave, unsigned char theReg, unsigned char theValue);
diff --git a/arch/cris/arch-v32/drivers/sync_serial.c b/arch/cris/arch-v32/drivers/sync_serial.c
index 5a149134cfb..08a313fc224 100644
--- a/arch/cris/arch-v32/drivers/sync_serial.c
+++ b/arch/cris/arch-v32/drivers/sync_serial.c
@@ -1,8 +1,7 @@
/*
- * Simple synchronous serial port driver for ETRAX FS and Artpec-3.
- *
- * Copyright (c) 2005 Axis Communications AB
+ * Simple synchronous serial port driver for ETRAX FS and ARTPEC-3.
*
+ * Copyright (c) 2005, 2008 Axis Communications AB
* Author: Mikael Starvik
*
*/
@@ -16,16 +15,17 @@
#include <linux/mutex.h>
#include <linux/interrupt.h>
#include <linux/poll.h>
-#include <linux/init.h>
-#include <linux/timer.h>
-#include <linux/spinlock.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
#include <linux/wait.h>
#include <asm/io.h>
-#include <dma.h>
+#include <mach/dma.h>
#include <pinmux.h>
#include <hwregs/reg_rdwr.h>
#include <hwregs/sser_defs.h>
+#include <hwregs/timer_defs.h>
#include <hwregs/dma_defs.h>
#include <hwregs/dma.h>
#include <hwregs/intr_vect_defs.h>
@@ -59,22 +59,23 @@
/* the rest of the data pointed out by Descr1 and set readp to the start */
/* of Descr2 */
-#define SYNC_SERIAL_MAJOR 125
-
/* IN_BUFFER_SIZE should be a multiple of 6 to make sure that 24 bit */
/* words can be handled */
-#define IN_BUFFER_SIZE 12288
-#define IN_DESCR_SIZE 256
-#define NBR_IN_DESCR (IN_BUFFER_SIZE/IN_DESCR_SIZE)
+#define IN_DESCR_SIZE SSP_INPUT_CHUNK_SIZE
+#define NBR_IN_DESCR (8*6)
+#define IN_BUFFER_SIZE (IN_DESCR_SIZE * NBR_IN_DESCR)
-#define OUT_BUFFER_SIZE 1024*8
#define NBR_OUT_DESCR 8
+#define OUT_BUFFER_SIZE (1024 * NBR_OUT_DESCR)
#define DEFAULT_FRAME_RATE 0
#define DEFAULT_WORD_RATE 7
+/* To be removed when we move to pure udev. */
+#define SYNC_SERIAL_MAJOR 125
+
/* NOTE: Enabling some debug will likely cause overrun or underrun,
- * especially if manual mode is use.
+ * especially if manual mode is used.
*/
#define DEBUG(x)
#define DEBUGREAD(x)
@@ -85,11 +86,28 @@
#define DEBUGTRDMA(x)
#define DEBUGOUTBUF(x)
-typedef struct sync_port
-{
- reg_scope_instances regi_sser;
- reg_scope_instances regi_dmain;
- reg_scope_instances regi_dmaout;
+enum syncser_irq_setup {
+ no_irq_setup = 0,
+ dma_irq_setup = 1,
+ manual_irq_setup = 2,
+};
+
+struct sync_port {
+ unsigned long regi_sser;
+ unsigned long regi_dmain;
+ unsigned long regi_dmaout;
+
+ /* Interrupt vectors. */
+ unsigned long dma_in_intr_vect; /* Used for DMA in. */
+ unsigned long dma_out_intr_vect; /* Used for DMA out. */
+ unsigned long syncser_intr_vect; /* Used when no DMA. */
+
+ /* DMA number for in and out. */
+ unsigned int dma_in_nbr;
+ unsigned int dma_out_nbr;
+
+ /* DMA owner. */
+ enum dma_owner req_dma;
char started; /* 1 if port has been started */
char port_nbr; /* Port 0 or 1 */
@@ -99,22 +117,29 @@ typedef struct sync_port
char use_dma; /* 1 if port uses dma */
char tr_running;
- char init_irqs;
+ enum syncser_irq_setup init_irqs;
int output;
int input;
/* Next byte to be read by application */
- volatile unsigned char *volatile readp;
+ unsigned char *readp;
/* Next byte to be written by etrax */
- volatile unsigned char *volatile writep;
+ unsigned char *writep;
unsigned int in_buffer_size;
+ unsigned int in_buffer_len;
unsigned int inbufchunk;
- unsigned char out_buffer[OUT_BUFFER_SIZE] __attribute__ ((aligned(32)));
- unsigned char in_buffer[IN_BUFFER_SIZE]__attribute__ ((aligned(32)));
- unsigned char flip[IN_BUFFER_SIZE] __attribute__ ((aligned(32)));
- struct dma_descr_data* next_rx_desc;
- struct dma_descr_data* prev_rx_desc;
+ /* Data buffers for in and output. */
+ unsigned char out_buffer[OUT_BUFFER_SIZE] __aligned(32);
+ unsigned char in_buffer[IN_BUFFER_SIZE] __aligned(32);
+ unsigned char flip[IN_BUFFER_SIZE] __aligned(32);
+ struct timespec timestamp[NBR_IN_DESCR];
+ struct dma_descr_data *next_rx_desc;
+ struct dma_descr_data *prev_rx_desc;
+
+ struct timeval last_timestamp;
+ int read_ts_idx;
+ int write_ts_idx;
/* Pointer to the first available descriptor in the ring,
* unless active_tr_descr == catch_tr_descr and a dma
@@ -135,114 +160,138 @@ typedef struct sync_port
/* Number of bytes currently locked for being read by DMA */
int out_buf_count;
- dma_descr_data in_descr[NBR_IN_DESCR] __attribute__ ((__aligned__(16)));
- dma_descr_context in_context __attribute__ ((__aligned__(32)));
- dma_descr_data out_descr[NBR_OUT_DESCR]
- __attribute__ ((__aligned__(16)));
- dma_descr_context out_context __attribute__ ((__aligned__(32)));
+ dma_descr_context in_context __aligned(32);
+ dma_descr_context out_context __aligned(32);
+ dma_descr_data in_descr[NBR_IN_DESCR] __aligned(16);
+ dma_descr_data out_descr[NBR_OUT_DESCR] __aligned(16);
+
wait_queue_head_t out_wait_q;
wait_queue_head_t in_wait_q;
spinlock_t lock;
-} sync_port;
+};
static DEFINE_MUTEX(sync_serial_mutex);
static int etrax_sync_serial_init(void);
static void initialize_port(int portnbr);
static inline int sync_data_avail(struct sync_port *port);
-static int sync_serial_open(struct inode *, struct file*);
-static int sync_serial_release(struct inode*, struct file*);
+static int sync_serial_open(struct inode *, struct file *);
+static int sync_serial_release(struct inode *, struct file *);
static unsigned int sync_serial_poll(struct file *filp, poll_table *wait);
-static int sync_serial_ioctl(struct file *,
- unsigned int cmd, unsigned long arg);
-static ssize_t sync_serial_write(struct file * file, const char * buf,
+static long sync_serial_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg);
+static int sync_serial_ioctl_unlocked(struct file *file,
+ unsigned int cmd, unsigned long arg);
+static ssize_t sync_serial_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos);
-static ssize_t sync_serial_read(struct file *file, char *buf,
+static ssize_t sync_serial_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos);
-#if (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0) && \
- defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA)) || \
- (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1) && \
- defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA))
+#if ((defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0) && \
+ defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA)) || \
+ (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1) && \
+ defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA)))
#define SYNC_SER_DMA
+#else
+#define SYNC_SER_MANUAL
#endif
-static void send_word(sync_port* port);
-static void start_dma_out(struct sync_port *port, const char *data, int count);
-static void start_dma_in(sync_port* port);
#ifdef SYNC_SER_DMA
+static void start_dma_out(struct sync_port *port, const char *data, int count);
+static void start_dma_in(struct sync_port *port);
static irqreturn_t tr_interrupt(int irq, void *dev_id);
static irqreturn_t rx_interrupt(int irq, void *dev_id);
#endif
-
-#if (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0) && \
- !defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA)) || \
- (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1) && \
- !defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA))
-#define SYNC_SER_MANUAL
-#endif
#ifdef SYNC_SER_MANUAL
+static void send_word(struct sync_port *port);
static irqreturn_t manual_interrupt(int irq, void *dev_id);
#endif
-#ifdef CONFIG_ETRAXFS /* ETRAX FS */
-#define OUT_DMA_NBR 4
-#define IN_DMA_NBR 5
-#define PINMUX_SSER pinmux_sser0
-#define SYNCSER_INST regi_sser0
-#define SYNCSER_INTR_VECT SSER0_INTR_VECT
-#define OUT_DMA_INST regi_dma4
-#define IN_DMA_INST regi_dma5
-#define DMA_OUT_INTR_VECT DMA4_INTR_VECT
-#define DMA_IN_INTR_VECT DMA5_INTR_VECT
-#define REQ_DMA_SYNCSER dma_sser0
-#else /* Artpec-3 */
-#define OUT_DMA_NBR 6
-#define IN_DMA_NBR 7
-#define PINMUX_SSER pinmux_sser
-#define SYNCSER_INST regi_sser
-#define SYNCSER_INTR_VECT SSER_INTR_VECT
-#define OUT_DMA_INST regi_dma6
-#define IN_DMA_INST regi_dma7
-#define DMA_OUT_INTR_VECT DMA6_INTR_VECT
-#define DMA_IN_INTR_VECT DMA7_INTR_VECT
-#define REQ_DMA_SYNCSER dma_sser
+#define artpec_pinmux_alloc_fixed crisv32_pinmux_alloc_fixed
+#define artpec_request_dma crisv32_request_dma
+#define artpec_free_dma crisv32_free_dma
+
+#ifdef CONFIG_ETRAXFS
+/* ETRAX FS */
+#define DMA_OUT_NBR0 SYNC_SER0_TX_DMA_NBR
+#define DMA_IN_NBR0 SYNC_SER0_RX_DMA_NBR
+#define DMA_OUT_NBR1 SYNC_SER1_TX_DMA_NBR
+#define DMA_IN_NBR1 SYNC_SER1_RX_DMA_NBR
+#define PINMUX_SSER0 pinmux_sser0
+#define PINMUX_SSER1 pinmux_sser1
+#define SYNCSER_INST0 regi_sser0
+#define SYNCSER_INST1 regi_sser1
+#define SYNCSER_INTR_VECT0 SSER0_INTR_VECT
+#define SYNCSER_INTR_VECT1 SSER1_INTR_VECT
+#define OUT_DMA_INST0 regi_dma4
+#define IN_DMA_INST0 regi_dma5
+#define DMA_OUT_INTR_VECT0 DMA4_INTR_VECT
+#define DMA_OUT_INTR_VECT1 DMA7_INTR_VECT
+#define DMA_IN_INTR_VECT0 DMA5_INTR_VECT
+#define DMA_IN_INTR_VECT1 DMA6_INTR_VECT
+#define REQ_DMA_SYNCSER0 dma_sser0
+#define REQ_DMA_SYNCSER1 dma_sser1
+#if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA)
+#define PORT1_DMA 1
+#else
+#define PORT1_DMA 0
+#endif
+#elif defined(CONFIG_CRIS_MACH_ARTPEC3)
+/* ARTPEC-3 */
+#define DMA_OUT_NBR0 SYNC_SER_TX_DMA_NBR
+#define DMA_IN_NBR0 SYNC_SER_RX_DMA_NBR
+#define PINMUX_SSER0 pinmux_sser
+#define SYNCSER_INST0 regi_sser
+#define SYNCSER_INTR_VECT0 SSER_INTR_VECT
+#define OUT_DMA_INST0 regi_dma6
+#define IN_DMA_INST0 regi_dma7
+#define DMA_OUT_INTR_VECT0 DMA6_INTR_VECT
+#define DMA_IN_INTR_VECT0 DMA7_INTR_VECT
+#define REQ_DMA_SYNCSER0 dma_sser
+#define REQ_DMA_SYNCSER1 dma_sser
#endif
-/* The ports */
-static struct sync_port ports[]=
-{
- {
- .regi_sser = SYNCSER_INST,
- .regi_dmaout = OUT_DMA_INST,
- .regi_dmain = IN_DMA_INST,
#if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA)
- .use_dma = 1,
+#define PORT0_DMA 1
#else
- .use_dma = 0,
+#define PORT0_DMA 0
#endif
- }
-#ifdef CONFIG_ETRAXFS
- ,
+/* The ports */
+static struct sync_port ports[] = {
{
- .regi_sser = regi_sser1,
- .regi_dmaout = regi_dma6,
- .regi_dmain = regi_dma7,
-#if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA)
- .use_dma = 1,
-#else
- .use_dma = 0,
-#endif
- }
+ .regi_sser = SYNCSER_INST0,
+ .regi_dmaout = OUT_DMA_INST0,
+ .regi_dmain = IN_DMA_INST0,
+ .use_dma = PORT0_DMA,
+ .dma_in_intr_vect = DMA_IN_INTR_VECT0,
+ .dma_out_intr_vect = DMA_OUT_INTR_VECT0,
+ .dma_in_nbr = DMA_IN_NBR0,
+ .dma_out_nbr = DMA_OUT_NBR0,
+ .req_dma = REQ_DMA_SYNCSER0,
+ .syncser_intr_vect = SYNCSER_INTR_VECT0,
+ },
+#ifdef CONFIG_ETRAXFS
+ {
+ .regi_sser = SYNCSER_INST1,
+ .regi_dmaout = regi_dma6,
+ .regi_dmain = regi_dma7,
+ .use_dma = PORT1_DMA,
+ .dma_in_intr_vect = DMA_IN_INTR_VECT1,
+ .dma_out_intr_vect = DMA_OUT_INTR_VECT1,
+ .dma_in_nbr = DMA_IN_NBR1,
+ .dma_out_nbr = DMA_OUT_NBR1,
+ .req_dma = REQ_DMA_SYNCSER1,
+ .syncser_intr_vect = SYNCSER_INTR_VECT1,
+ },
#endif
};
#define NBR_PORTS ARRAY_SIZE(ports)
-static const struct file_operations sync_serial_fops = {
+static const struct file_operations syncser_fops = {
.owner = THIS_MODULE,
.write = sync_serial_write,
.read = sync_serial_read,
@@ -253,61 +302,40 @@ static const struct file_operations sync_serial_fops = {
.llseek = noop_llseek,
};
-static int __init etrax_sync_serial_init(void)
-{
- ports[0].enabled = 0;
-#ifdef CONFIG_ETRAXFS
- ports[1].enabled = 0;
-#endif
- if (register_chrdev(SYNC_SERIAL_MAJOR, "sync serial",
- &sync_serial_fops) < 0) {
- printk(KERN_WARNING
- "Unable to get major for synchronous serial port\n");
- return -EBUSY;
- }
-
- /* Initialize Ports */
-#if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0)
- if (crisv32_pinmux_alloc_fixed(PINMUX_SSER)) {
- printk(KERN_WARNING
- "Unable to alloc pins for synchronous serial port 0\n");
- return -EIO;
- }
- ports[0].enabled = 1;
- initialize_port(0);
-#endif
-
-#if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1)
- if (crisv32_pinmux_alloc_fixed(pinmux_sser1)) {
- printk(KERN_WARNING
- "Unable to alloc pins for synchronous serial port 0\n");
- return -EIO;
- }
- ports[1].enabled = 1;
- initialize_port(1);
-#endif
+static dev_t syncser_first;
+static int minor_count = NBR_PORTS;
+#define SYNCSER_NAME "syncser"
+static struct cdev *syncser_cdev;
+static struct class *syncser_class;
-#ifdef CONFIG_ETRAXFS
- printk(KERN_INFO "ETRAX FS synchronous serial port driver\n");
-#else
- printk(KERN_INFO "Artpec-3 synchronous serial port driver\n");
-#endif
- return 0;
+static void sync_serial_start_port(struct sync_port *port)
+{
+ reg_sser_rw_cfg cfg = REG_RD(sser, port->regi_sser, rw_cfg);
+ reg_sser_rw_tr_cfg tr_cfg =
+ REG_RD(sser, port->regi_sser, rw_tr_cfg);
+ reg_sser_rw_rec_cfg rec_cfg =
+ REG_RD(sser, port->regi_sser, rw_rec_cfg);
+ cfg.en = regk_sser_yes;
+ tr_cfg.tr_en = regk_sser_yes;
+ rec_cfg.rec_en = regk_sser_yes;
+ REG_WR(sser, port->regi_sser, rw_cfg, cfg);
+ REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
+ REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
+ port->started = 1;
}
static void __init initialize_port(int portnbr)
{
- int __attribute__((unused)) i;
struct sync_port *port = &ports[portnbr];
- reg_sser_rw_cfg cfg = {0};
- reg_sser_rw_frm_cfg frm_cfg = {0};
- reg_sser_rw_tr_cfg tr_cfg = {0};
- reg_sser_rw_rec_cfg rec_cfg = {0};
+ reg_sser_rw_cfg cfg = { 0 };
+ reg_sser_rw_frm_cfg frm_cfg = { 0 };
+ reg_sser_rw_tr_cfg tr_cfg = { 0 };
+ reg_sser_rw_rec_cfg rec_cfg = { 0 };
- DEBUG(printk(KERN_DEBUG "Init sync serial port %d\n", portnbr));
+ DEBUG(pr_info("Init sync serial port %d\n", portnbr));
port->port_nbr = portnbr;
- port->init_irqs = 1;
+ port->init_irqs = no_irq_setup;
port->out_rd_ptr = port->out_buffer;
port->out_buf_count = 0;
@@ -318,10 +346,11 @@ static void __init initialize_port(int portnbr)
port->readp = port->flip;
port->writep = port->flip;
port->in_buffer_size = IN_BUFFER_SIZE;
+ port->in_buffer_len = 0;
port->inbufchunk = IN_DESCR_SIZE;
- port->next_rx_desc = &port->in_descr[0];
- port->prev_rx_desc = &port->in_descr[NBR_IN_DESCR-1];
- port->prev_rx_desc->eol = 1;
+
+ port->read_ts_idx = 0;
+ port->write_ts_idx = 0;
init_waitqueue_head(&port->out_wait_q);
init_waitqueue_head(&port->in_wait_q);
@@ -368,14 +397,18 @@ static void __init initialize_port(int portnbr)
REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
#ifdef SYNC_SER_DMA
- /* Setup the descriptor ring for dma out/transmit. */
- for (i = 0; i < NBR_OUT_DESCR; i++) {
- port->out_descr[i].wait = 0;
- port->out_descr[i].intr = 1;
- port->out_descr[i].eol = 0;
- port->out_descr[i].out_eop = 0;
- port->out_descr[i].next =
- (dma_descr_data *)virt_to_phys(&port->out_descr[i+1]);
+ {
+ int i;
+ /* Setup the descriptor ring for dma out/transmit. */
+ for (i = 0; i < NBR_OUT_DESCR; i++) {
+ dma_descr_data *descr = &port->out_descr[i];
+ descr->wait = 0;
+ descr->intr = 1;
+ descr->eol = 0;
+ descr->out_eop = 0;
+ descr->next =
+ (dma_descr_data *)virt_to_phys(&descr[i+1]);
+ }
}
/* Create a ring from the list. */
@@ -391,201 +424,116 @@ static void __init initialize_port(int portnbr)
static inline int sync_data_avail(struct sync_port *port)
{
- int avail;
- unsigned char *start;
- unsigned char *end;
-
- start = (unsigned char*)port->readp; /* cast away volatile */
- end = (unsigned char*)port->writep; /* cast away volatile */
- /* 0123456789 0123456789
- * ----- - -----
- * ^rp ^wp ^wp ^rp
- */
-
- if (end >= start)
- avail = end - start;
- else
- avail = port->in_buffer_size - (start - end);
- return avail;
-}
-
-static inline int sync_data_avail_to_end(struct sync_port *port)
-{
- int avail;
- unsigned char *start;
- unsigned char *end;
-
- start = (unsigned char*)port->readp; /* cast away volatile */
- end = (unsigned char*)port->writep; /* cast away volatile */
- /* 0123456789 0123456789
- * ----- -----
- * ^rp ^wp ^wp ^rp
- */
-
- if (end >= start)
- avail = end - start;
- else
- avail = port->flip + port->in_buffer_size - start;
- return avail;
+ return port->in_buffer_len;
}
static int sync_serial_open(struct inode *inode, struct file *file)
{
+ int ret = 0;
int dev = iminor(inode);
- int ret = -EBUSY;
- sync_port *port;
- reg_dma_rw_cfg cfg = {.en = regk_dma_yes};
- reg_dma_rw_intr_mask intr_mask = {.data = regk_dma_yes};
+ struct sync_port *port;
+#ifdef SYNC_SER_DMA
+ reg_dma_rw_cfg cfg = { .en = regk_dma_yes };
+ reg_dma_rw_intr_mask intr_mask = { .data = regk_dma_yes };
+#endif
- mutex_lock(&sync_serial_mutex);
- DEBUG(printk(KERN_DEBUG "Open sync serial port %d\n", dev));
+ DEBUG(pr_debug("Open sync serial port %d\n", dev));
- if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled)
- {
- DEBUG(printk(KERN_DEBUG "Invalid minor %d\n", dev));
- ret = -ENODEV;
- goto out;
+ if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) {
+ DEBUG(pr_info("Invalid minor %d\n", dev));
+ return -ENODEV;
}
port = &ports[dev];
/* Allow open this device twice (assuming one reader and one writer) */
- if (port->busy == 2)
- {
- DEBUG(printk(KERN_DEBUG "Device is busy.. \n"));
- goto out;
+ if (port->busy == 2) {
+ DEBUG(pr_info("syncser%d is busy\n", dev));
+ return -EBUSY;
}
+ mutex_lock(&sync_serial_mutex);
- if (port->init_irqs) {
- if (port->use_dma) {
- if (port == &ports[0]) {
-#ifdef SYNC_SER_DMA
- if (request_irq(DMA_OUT_INTR_VECT,
- tr_interrupt,
- 0,
- "synchronous serial 0 dma tr",
- &ports[0])) {
- printk(KERN_CRIT "Can't allocate sync serial port 0 IRQ");
- goto out;
- } else if (request_irq(DMA_IN_INTR_VECT,
- rx_interrupt,
- 0,
- "synchronous serial 1 dma rx",
- &ports[0])) {
- free_irq(DMA_OUT_INTR_VECT, &port[0]);
- printk(KERN_CRIT "Can't allocate sync serial port 0 IRQ");
- goto out;
- } else if (crisv32_request_dma(OUT_DMA_NBR,
- "synchronous serial 0 dma tr",
- DMA_VERBOSE_ON_ERROR,
- 0,
- REQ_DMA_SYNCSER)) {
- free_irq(DMA_OUT_INTR_VECT, &port[0]);
- free_irq(DMA_IN_INTR_VECT, &port[0]);
- printk(KERN_CRIT "Can't allocate sync serial port 0 TX DMA channel");
- goto out;
- } else if (crisv32_request_dma(IN_DMA_NBR,
- "synchronous serial 0 dma rec",
- DMA_VERBOSE_ON_ERROR,
- 0,
- REQ_DMA_SYNCSER)) {
- crisv32_free_dma(OUT_DMA_NBR);
- free_irq(DMA_OUT_INTR_VECT, &port[0]);
- free_irq(DMA_IN_INTR_VECT, &port[0]);
- printk(KERN_CRIT "Can't allocate sync serial port 1 RX DMA channel");
- goto out;
- }
-#endif
- }
-#ifdef CONFIG_ETRAXFS
- else if (port == &ports[1]) {
+ /* Clear any stale date left in the flip buffer */
+ port->readp = port->writep = port->flip;
+ port->in_buffer_len = 0;
+ port->read_ts_idx = 0;
+ port->write_ts_idx = 0;
+
+ if (port->init_irqs != no_irq_setup) {
+ /* Init only on first call. */
+ port->busy++;
+ mutex_unlock(&sync_serial_mutex);
+ return 0;
+ }
+ if (port->use_dma) {
#ifdef SYNC_SER_DMA
- if (request_irq(DMA6_INTR_VECT,
- tr_interrupt,
- 0,
- "synchronous serial 1 dma tr",
- &ports[1])) {
- printk(KERN_CRIT "Can't allocate sync serial port 1 IRQ");
- goto out;
- } else if (request_irq(DMA7_INTR_VECT,
- rx_interrupt,
- 0,
- "synchronous serial 1 dma rx",
- &ports[1])) {
- free_irq(DMA6_INTR_VECT, &ports[1]);
- printk(KERN_CRIT "Can't allocate sync serial port 3 IRQ");
- goto out;
- } else if (crisv32_request_dma(
- SYNC_SER1_TX_DMA_NBR,
- "synchronous serial 1 dma tr",
- DMA_VERBOSE_ON_ERROR,
- 0,
- dma_sser1)) {
- free_irq(DMA6_INTR_VECT, &ports[1]);
- free_irq(DMA7_INTR_VECT, &ports[1]);
- printk(KERN_CRIT "Can't allocate sync serial port 3 TX DMA channel");
- goto out;
- } else if (crisv32_request_dma(
- SYNC_SER1_RX_DMA_NBR,
- "synchronous serial 3 dma rec",
- DMA_VERBOSE_ON_ERROR,
- 0,
- dma_sser1)) {
- crisv32_free_dma(SYNC_SER1_TX_DMA_NBR);
- free_irq(DMA6_INTR_VECT, &ports[1]);
- free_irq(DMA7_INTR_VECT, &ports[1]);
- printk(KERN_CRIT "Can't allocate sync serial port 3 RX DMA channel");
- goto out;
- }
-#endif
- }
+ const char *tmp;
+ DEBUG(pr_info("Using DMA for syncser%d\n", dev));
+
+ tmp = dev == 0 ? "syncser0 tx" : "syncser1 tx";
+ if (request_irq(port->dma_out_intr_vect, tr_interrupt, 0,
+ tmp, port)) {
+ pr_err("Can't alloc syncser%d TX IRQ", dev);
+ ret = -EBUSY;
+ goto unlock_and_exit;
+ }
+ if (artpec_request_dma(port->dma_out_nbr, tmp,
+ DMA_VERBOSE_ON_ERROR, 0, port->req_dma)) {
+ free_irq(port->dma_out_intr_vect, port);
+ pr_err("Can't alloc syncser%d TX DMA", dev);
+ ret = -EBUSY;
+ goto unlock_and_exit;
+ }
+ tmp = dev == 0 ? "syncser0 rx" : "syncser1 rx";
+ if (request_irq(port->dma_in_intr_vect, rx_interrupt, 0,
+ tmp, port)) {
+ artpec_free_dma(port->dma_out_nbr);
+ free_irq(port->dma_out_intr_vect, port);
+ pr_err("Can't alloc syncser%d RX IRQ", dev);
+ ret = -EBUSY;
+ goto unlock_and_exit;
+ }
+ if (artpec_request_dma(port->dma_in_nbr, tmp,
+ DMA_VERBOSE_ON_ERROR, 0, port->req_dma)) {
+ artpec_free_dma(port->dma_out_nbr);
+ free_irq(port->dma_out_intr_vect, port);
+ free_irq(port->dma_in_intr_vect, port);
+ pr_err("Can't alloc syncser%d RX DMA", dev);
+ ret = -EBUSY;
+ goto unlock_and_exit;
+ }
+ /* Enable DMAs */
+ REG_WR(dma, port->regi_dmain, rw_cfg, cfg);
+ REG_WR(dma, port->regi_dmaout, rw_cfg, cfg);
+ /* Enable DMA IRQs */
+ REG_WR(dma, port->regi_dmain, rw_intr_mask, intr_mask);
+ REG_WR(dma, port->regi_dmaout, rw_intr_mask, intr_mask);
+ /* Set up wordsize = 1 for DMAs. */
+ DMA_WR_CMD(port->regi_dmain, regk_dma_set_w_size1);
+ DMA_WR_CMD(port->regi_dmaout, regk_dma_set_w_size1);
+
+ start_dma_in(port);
+ port->init_irqs = dma_irq_setup;
#endif
- /* Enable DMAs */
- REG_WR(dma, port->regi_dmain, rw_cfg, cfg);
- REG_WR(dma, port->regi_dmaout, rw_cfg, cfg);
- /* Enable DMA IRQs */
- REG_WR(dma, port->regi_dmain, rw_intr_mask, intr_mask);
- REG_WR(dma, port->regi_dmaout, rw_intr_mask, intr_mask);
- /* Set up wordsize = 1 for DMAs. */
- DMA_WR_CMD (port->regi_dmain, regk_dma_set_w_size1);
- DMA_WR_CMD (port->regi_dmaout, regk_dma_set_w_size1);
-
- start_dma_in(port);
- port->init_irqs = 0;
- } else { /* !port->use_dma */
+ } else { /* !port->use_dma */
#ifdef SYNC_SER_MANUAL
- if (port == &ports[0]) {
- if (request_irq(SYNCSER_INTR_VECT,
- manual_interrupt,
- 0,
- "synchronous serial manual irq",
- &ports[0])) {
- printk("Can't allocate sync serial manual irq");
- goto out;
- }
- }
-#ifdef CONFIG_ETRAXFS
- else if (port == &ports[1]) {
- if (request_irq(SSER1_INTR_VECT,
- manual_interrupt,
- 0,
- "synchronous serial manual irq",
- &ports[1])) {
- printk(KERN_CRIT "Can't allocate sync serial manual irq");
- goto out;
- }
- }
-#endif
- port->init_irqs = 0;
+ const char *tmp = dev == 0 ? "syncser0 manual irq" :
+ "syncser1 manual irq";
+ if (request_irq(port->syncser_intr_vect, manual_interrupt,
+ 0, tmp, port)) {
+ pr_err("Can't alloc syncser%d manual irq",
+ dev);
+ ret = -EBUSY;
+ goto unlock_and_exit;
+ }
+ port->init_irqs = manual_irq_setup;
#else
- panic("sync_serial: Manual mode not supported.\n");
+ panic("sync_serial: Manual mode not supported\n");
#endif /* SYNC_SER_MANUAL */
- }
-
- } /* port->init_irqs */
-
+ }
port->busy++;
ret = 0;
-out:
+
+unlock_and_exit:
mutex_unlock(&sync_serial_mutex);
return ret;
}
@@ -593,18 +541,17 @@ out:
static int sync_serial_release(struct inode *inode, struct file *file)
{
int dev = iminor(inode);
- sync_port *port;
+ struct sync_port *port;
- if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled)
- {
- DEBUG(printk("Invalid minor %d\n", dev));
+ if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) {
+ DEBUG(pr_info("Invalid minor %d\n", dev));
return -ENODEV;
}
port = &ports[dev];
if (port->busy)
port->busy--;
if (!port->busy)
- /* XXX */ ;
+ /* XXX */;
return 0;
}
@@ -612,21 +559,15 @@ static unsigned int sync_serial_poll(struct file *file, poll_table *wait)
{
int dev = iminor(file_inode(file));
unsigned int mask = 0;
- sync_port *port;
- DEBUGPOLL( static unsigned int prev_mask = 0; );
+ struct sync_port *port;
+ DEBUGPOLL(
+ static unsigned int prev_mask;
+ );
port = &ports[dev];
- if (!port->started) {
- reg_sser_rw_cfg cfg = REG_RD(sser, port->regi_sser, rw_cfg);
- reg_sser_rw_rec_cfg rec_cfg =
- REG_RD(sser, port->regi_sser, rw_rec_cfg);
- cfg.en = regk_sser_yes;
- rec_cfg.rec_en = port->input;
- REG_WR(sser, port->regi_sser, rw_cfg, cfg);
- REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
- port->started = 1;
- }
+ if (!port->started)
+ sync_serial_start_port(port);
poll_wait(file, &port->out_wait_q, wait);
poll_wait(file, &port->in_wait_q, wait);
@@ -645,33 +586,175 @@ static unsigned int sync_serial_poll(struct file *file, poll_table *wait)
if (port->input && sync_data_avail(port) >= port->inbufchunk)
mask |= POLLIN | POLLRDNORM;
- DEBUGPOLL(if (mask != prev_mask)
- printk("sync_serial_poll: mask 0x%08X %s %s\n", mask,
- mask&POLLOUT?"POLLOUT":"", mask&POLLIN?"POLLIN":"");
- prev_mask = mask;
- );
+ DEBUGPOLL(
+ if (mask != prev_mask)
+ pr_info("sync_serial_poll: mask 0x%08X %s %s\n",
+ mask,
+ mask & POLLOUT ? "POLLOUT" : "",
+ mask & POLLIN ? "POLLIN" : "");
+ prev_mask = mask;
+ );
return mask;
}
-static int sync_serial_ioctl(struct file *file,
- unsigned int cmd, unsigned long arg)
+static ssize_t __sync_serial_read(struct file *file,
+ char __user *buf,
+ size_t count,
+ loff_t *ppos,
+ struct timespec *ts)
+{
+ unsigned long flags;
+ int dev = MINOR(file->f_dentry->d_inode->i_rdev);
+ int avail;
+ struct sync_port *port;
+ unsigned char *start;
+ unsigned char *end;
+
+ if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) {
+ DEBUG(pr_info("Invalid minor %d\n", dev));
+ return -ENODEV;
+ }
+ port = &ports[dev];
+
+ if (!port->started)
+ sync_serial_start_port(port);
+
+ /* Calculate number of available bytes */
+ /* Save pointers to avoid that they are modified by interrupt */
+ spin_lock_irqsave(&port->lock, flags);
+ start = port->readp;
+ end = port->writep;
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ while ((start == end) && !port->in_buffer_len) {
+ if (file->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+
+ wait_event_interruptible(port->in_wait_q,
+ !(start == end && !port->full));
+
+ if (signal_pending(current))
+ return -EINTR;
+
+ spin_lock_irqsave(&port->lock, flags);
+ start = port->readp;
+ end = port->writep;
+ spin_unlock_irqrestore(&port->lock, flags);
+ }
+
+ DEBUGREAD(pr_info("R%d c %d ri %u wi %u /%u\n",
+ dev, count,
+ start - port->flip, end - port->flip,
+ port->in_buffer_size));
+
+ /* Lazy read, never return wrapped data. */
+ if (end > start)
+ avail = end - start;
+ else
+ avail = port->flip + port->in_buffer_size - start;
+
+ count = count > avail ? avail : count;
+ if (copy_to_user(buf, start, count))
+ return -EFAULT;
+
+ /* If timestamp requested, find timestamp of first returned byte
+ * and copy it.
+ * N.B: Applications that request timstamps MUST read data in
+ * chunks that are multiples of IN_DESCR_SIZE.
+ * Otherwise the timestamps will not be aligned to the data read.
+ */
+ if (ts != NULL) {
+ int idx = port->read_ts_idx;
+ memcpy(ts, &port->timestamp[idx], sizeof(struct timespec));
+ port->read_ts_idx += count / IN_DESCR_SIZE;
+ if (port->read_ts_idx >= NBR_IN_DESCR)
+ port->read_ts_idx = 0;
+ }
+
+ spin_lock_irqsave(&port->lock, flags);
+ port->readp += count;
+ /* Check for wrap */
+ if (port->readp >= port->flip + port->in_buffer_size)
+ port->readp = port->flip;
+ port->in_buffer_len -= count;
+ port->full = 0;
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ DEBUGREAD(pr_info("r %d\n", count));
+
+ return count;
+}
+
+static ssize_t sync_serial_input(struct file *file, unsigned long arg)
+{
+ struct ssp_request req;
+ int count;
+ int ret;
+
+ /* Copy the request structure from user-mode. */
+ ret = copy_from_user(&req, (struct ssp_request __user *)arg,
+ sizeof(struct ssp_request));
+
+ if (ret) {
+ DEBUG(pr_info("sync_serial_input copy from user failed\n"));
+ return -EFAULT;
+ }
+
+ /* To get the timestamps aligned, make sure that 'len'
+ * is a multiple of IN_DESCR_SIZE.
+ */
+ if ((req.len % IN_DESCR_SIZE) != 0) {
+ DEBUG(pr_info("sync_serial: req.len %x, IN_DESCR_SIZE %x\n",
+ req.len, IN_DESCR_SIZE));
+ return -EFAULT;
+ }
+
+ /* Do the actual read. */
+ /* Note that req.buf is actually a pointer to user space. */
+ count = __sync_serial_read(file, req.buf, req.len,
+ NULL, &req.ts);
+
+ if (count < 0) {
+ DEBUG(pr_info("sync_serial_input read failed\n"));
+ return count;
+ }
+
+ /* Copy the request back to user-mode. */
+ ret = copy_to_user((struct ssp_request __user *)arg, &req,
+ sizeof(struct ssp_request));
+
+ if (ret) {
+ DEBUG(pr_info("syncser input copy2user failed\n"));
+ return -EFAULT;
+ }
+
+ /* Return the number of bytes read. */
+ return count;
+}
+
+
+static int sync_serial_ioctl_unlocked(struct file *file,
+ unsigned int cmd, unsigned long arg)
{
int return_val = 0;
int dma_w_size = regk_dma_set_w_size1;
int dev = iminor(file_inode(file));
- sync_port *port;
+ struct sync_port *port;
reg_sser_rw_tr_cfg tr_cfg;
reg_sser_rw_rec_cfg rec_cfg;
reg_sser_rw_frm_cfg frm_cfg;
reg_sser_rw_cfg gen_cfg;
reg_sser_rw_intr_mask intr_mask;
- if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled)
- {
- DEBUG(printk("Invalid minor %d\n", dev));
+ if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) {
+ DEBUG(pr_info("Invalid minor %d\n", dev));
return -1;
}
- port = &ports[dev];
+
+ if (cmd == SSP_INPUT)
+ return sync_serial_input(file, arg);
+
+ port = &ports[dev];
spin_lock_irq(&port->lock);
tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg);
@@ -680,11 +763,9 @@ static int sync_serial_ioctl(struct file *file,
gen_cfg = REG_RD(sser, port->regi_sser, rw_cfg);
intr_mask = REG_RD(sser, port->regi_sser, rw_intr_mask);
- switch(cmd)
- {
+ switch (cmd) {
case SSP_SPEED:
- if (GET_SPEED(arg) == CODEC)
- {
+ if (GET_SPEED(arg) == CODEC) {
unsigned int freq;
gen_cfg.base_freq = regk_sser_f32;
@@ -701,15 +782,25 @@ static int sync_serial_ioctl(struct file *file,
case FREQ_256kHz:
gen_cfg.clk_div = 125 *
(1 << (freq - FREQ_256kHz)) - 1;
- break;
+ break;
case FREQ_512kHz:
gen_cfg.clk_div = 62;
- break;
+ break;
case FREQ_1MHz:
case FREQ_2MHz:
case FREQ_4MHz:
gen_cfg.clk_div = 8 * (1 << freq) - 1;
- break;
+ break;
+ }
+ } else if (GET_SPEED(arg) == CODEC_f32768) {
+ gen_cfg.base_freq = regk_sser_f32_768;
+ switch (GET_FREQ(arg)) {
+ case FREQ_4096kHz:
+ gen_cfg.clk_div = 7;
+ break;
+ default:
+ spin_unlock_irq(&port->lock);
+ return -EINVAL;
}
} else {
gen_cfg.base_freq = regk_sser_f29_493;
@@ -767,62 +858,64 @@ static int sync_serial_ioctl(struct file *file,
break;
case SSP_MODE:
- switch(arg)
- {
- case MASTER_OUTPUT:
- port->output = 1;
- port->input = 0;
- frm_cfg.out_on = regk_sser_tr;
- frm_cfg.frame_pin_dir = regk_sser_out;
- gen_cfg.clk_dir = regk_sser_out;
- break;
- case SLAVE_OUTPUT:
- port->output = 1;
- port->input = 0;
- frm_cfg.frame_pin_dir = regk_sser_in;
- gen_cfg.clk_dir = regk_sser_in;
- break;
- case MASTER_INPUT:
- port->output = 0;
- port->input = 1;
- frm_cfg.frame_pin_dir = regk_sser_out;
- frm_cfg.out_on = regk_sser_intern_tb;
- gen_cfg.clk_dir = regk_sser_out;
- break;
- case SLAVE_INPUT:
- port->output = 0;
- port->input = 1;
- frm_cfg.frame_pin_dir = regk_sser_in;
- gen_cfg.clk_dir = regk_sser_in;
- break;
- case MASTER_BIDIR:
- port->output = 1;
- port->input = 1;
- frm_cfg.frame_pin_dir = regk_sser_out;
- frm_cfg.out_on = regk_sser_intern_tb;
- gen_cfg.clk_dir = regk_sser_out;
- break;
- case SLAVE_BIDIR:
- port->output = 1;
- port->input = 1;
- frm_cfg.frame_pin_dir = regk_sser_in;
- gen_cfg.clk_dir = regk_sser_in;
- break;
- default:
- spin_unlock_irq(&port->lock);
- return -EINVAL;
+ switch (arg) {
+ case MASTER_OUTPUT:
+ port->output = 1;
+ port->input = 0;
+ frm_cfg.out_on = regk_sser_tr;
+ frm_cfg.frame_pin_dir = regk_sser_out;
+ gen_cfg.clk_dir = regk_sser_out;
+ break;
+ case SLAVE_OUTPUT:
+ port->output = 1;
+ port->input = 0;
+ frm_cfg.frame_pin_dir = regk_sser_in;
+ gen_cfg.clk_dir = regk_sser_in;
+ break;
+ case MASTER_INPUT:
+ port->output = 0;
+ port->input = 1;
+ frm_cfg.frame_pin_dir = regk_sser_out;
+ frm_cfg.out_on = regk_sser_intern_tb;
+ gen_cfg.clk_dir = regk_sser_out;
+ break;
+ case SLAVE_INPUT:
+ port->output = 0;
+ port->input = 1;
+ frm_cfg.frame_pin_dir = regk_sser_in;
+ gen_cfg.clk_dir = regk_sser_in;
+ break;
+ case MASTER_BIDIR:
+ port->output = 1;
+ port->input = 1;
+ frm_cfg.frame_pin_dir = regk_sser_out;
+ frm_cfg.out_on = regk_sser_intern_tb;
+ gen_cfg.clk_dir = regk_sser_out;
+ break;
+ case SLAVE_BIDIR:
+ port->output = 1;
+ port->input = 1;
+ frm_cfg.frame_pin_dir = regk_sser_in;
+ gen_cfg.clk_dir = regk_sser_in;
+ break;
+ default:
+ spin_unlock_irq(&port->lock);
+ return -EINVAL;
}
- if (!port->use_dma || (arg == MASTER_OUTPUT || arg == SLAVE_OUTPUT))
+ if (!port->use_dma || arg == MASTER_OUTPUT ||
+ arg == SLAVE_OUTPUT)
intr_mask.rdav = regk_sser_yes;
break;
case SSP_FRAME_SYNC:
if (arg & NORMAL_SYNC) {
frm_cfg.rec_delay = 1;
frm_cfg.tr_delay = 1;
- }
- else if (arg & EARLY_SYNC)
+ } else if (arg & EARLY_SYNC)
frm_cfg.rec_delay = frm_cfg.tr_delay = 0;
- else if (arg & SECOND_WORD_SYNC) {
+ else if (arg & LATE_SYNC) {
+ frm_cfg.tr_delay = 2;
+ frm_cfg.rec_delay = 2;
+ } else if (arg & SECOND_WORD_SYNC) {
frm_cfg.rec_delay = 7;
frm_cfg.tr_delay = 1;
}
@@ -914,15 +1007,12 @@ static int sync_serial_ioctl(struct file *file,
frm_cfg.type = regk_sser_level;
frm_cfg.tr_delay = 1;
frm_cfg.level = regk_sser_neg_lo;
- if (arg & SPI_SLAVE)
- {
+ if (arg & SPI_SLAVE) {
rec_cfg.clk_pol = regk_sser_neg;
gen_cfg.clk_dir = regk_sser_in;
port->input = 1;
port->output = 0;
- }
- else
- {
+ } else {
gen_cfg.out_clk_pol = regk_sser_pos;
port->input = 0;
port->output = 1;
@@ -965,19 +1055,19 @@ static int sync_serial_ioctl(struct file *file,
}
static long sync_serial_ioctl(struct file *file,
- unsigned int cmd, unsigned long arg)
+ unsigned int cmd, unsigned long arg)
{
- long ret;
+ long ret;
- mutex_lock(&sync_serial_mutex);
- ret = sync_serial_ioctl_unlocked(file, cmd, arg);
- mutex_unlock(&sync_serial_mutex);
+ mutex_lock(&sync_serial_mutex);
+ ret = sync_serial_ioctl_unlocked(file, cmd, arg);
+ mutex_unlock(&sync_serial_mutex);
- return ret;
+ return ret;
}
/* NOTE: sync_serial_write does not support concurrency */
-static ssize_t sync_serial_write(struct file *file, const char *buf,
+static ssize_t sync_serial_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
int dev = iminor(file_inode(file));
@@ -993,7 +1083,7 @@ static ssize_t sync_serial_write(struct file *file, const char *buf,
unsigned char *buf_stop_ptr; /* Last byte + 1 */
if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) {
- DEBUG(printk("Invalid minor %d\n", dev));
+ DEBUG(pr_info("Invalid minor %d\n", dev));
return -ENODEV;
}
port = &ports[dev];
@@ -1006,9 +1096,9 @@ static ssize_t sync_serial_write(struct file *file, const char *buf,
* |_________|___________________|________________________|
* ^ rd_ptr ^ wr_ptr
*/
- DEBUGWRITE(printk(KERN_DEBUG "W d%d c %lu a: %p c: %p\n",
- port->port_nbr, count, port->active_tr_descr,
- port->catch_tr_descr));
+ DEBUGWRITE(pr_info("W d%d c %u a: %p c: %p\n",
+ port->port_nbr, count, port->active_tr_descr,
+ port->catch_tr_descr));
/* Read variables that may be updated by interrupts */
spin_lock_irqsave(&port->lock, flags);
@@ -1020,7 +1110,7 @@ static ssize_t sync_serial_write(struct file *file, const char *buf,
if (port->tr_running &&
((port->use_dma && port->active_tr_descr == port->catch_tr_descr) ||
out_buf_count >= OUT_BUFFER_SIZE)) {
- DEBUGWRITE(printk(KERN_DEBUG "sser%d full\n", dev));
+ DEBUGWRITE(pr_info("sser%d full\n", dev));
return -EAGAIN;
}
@@ -1043,15 +1133,16 @@ static ssize_t sync_serial_write(struct file *file, const char *buf,
if (copy_from_user(wr_ptr, buf, trunc_count))
return -EFAULT;
- DEBUGOUTBUF(printk(KERN_DEBUG "%-4d + %-4d = %-4d %p %p %p\n",
- out_buf_count, trunc_count,
- port->out_buf_count, port->out_buffer,
- wr_ptr, buf_stop_ptr));
+ DEBUGOUTBUF(pr_info("%-4d + %-4d = %-4d %p %p %p\n",
+ out_buf_count, trunc_count,
+ port->out_buf_count, port->out_buffer,
+ wr_ptr, buf_stop_ptr));
/* Make sure transmitter/receiver is running */
if (!port->started) {
reg_sser_rw_cfg cfg = REG_RD(sser, port->regi_sser, rw_cfg);
- reg_sser_rw_rec_cfg rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg);
+ reg_sser_rw_rec_cfg rec_cfg =
+ REG_RD(sser, port->regi_sser, rw_rec_cfg);
cfg.en = regk_sser_yes;
rec_cfg.rec_en = port->input;
REG_WR(sser, port->regi_sser, rw_cfg, cfg);
@@ -1068,8 +1159,11 @@ static ssize_t sync_serial_write(struct file *file, const char *buf,
spin_lock_irqsave(&port->lock, flags);
port->out_buf_count += trunc_count;
if (port->use_dma) {
+#ifdef SYNC_SER_DMA
start_dma_out(port, wr_ptr, trunc_count);
+#endif
} else if (!port->tr_running) {
+#ifdef SYNC_SER_MANUAL
reg_sser_rw_intr_mask intr_mask;
intr_mask = REG_RD(sser, port->regi_sser, rw_intr_mask);
/* Start sender by writing data */
@@ -1077,14 +1171,15 @@ static ssize_t sync_serial_write(struct file *file, const char *buf,
/* and enable transmitter ready IRQ */
intr_mask.trdy = 1;
REG_WR(sser, port->regi_sser, rw_intr_mask, intr_mask);
+#endif
}
spin_unlock_irqrestore(&port->lock, flags);
/* Exit if non blocking */
if (file->f_flags & O_NONBLOCK) {
- DEBUGWRITE(printk(KERN_DEBUG "w d%d c %lu %08x\n",
- port->port_nbr, trunc_count,
- REG_RD_INT(dma, port->regi_dmaout, r_intr)));
+ DEBUGWRITE(pr_info("w d%d c %u %08x\n",
+ port->port_nbr, trunc_count,
+ REG_RD_INT(dma, port->regi_dmaout, r_intr)));
return trunc_count;
}
@@ -1094,105 +1189,32 @@ static ssize_t sync_serial_write(struct file *file, const char *buf,
if (signal_pending(current))
return -EINTR;
- DEBUGWRITE(printk(KERN_DEBUG "w d%d c %lu\n",
- port->port_nbr, trunc_count));
+ DEBUGWRITE(pr_info("w d%d c %u\n", port->port_nbr, trunc_count));
return trunc_count;
}
-static ssize_t sync_serial_read(struct file * file, char * buf,
+static ssize_t sync_serial_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
- int dev = iminor(file_inode(file));
- int avail;
- sync_port *port;
- unsigned char* start;
- unsigned char* end;
- unsigned long flags;
-
- if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled)
- {
- DEBUG(printk("Invalid minor %d\n", dev));
- return -ENODEV;
- }
- port = &ports[dev];
-
- DEBUGREAD(printk("R%d c %d ri %lu wi %lu /%lu\n", dev, count, port->readp - port->flip, port->writep - port->flip, port->in_buffer_size));
-
- if (!port->started)
- {
- reg_sser_rw_cfg cfg = REG_RD(sser, port->regi_sser, rw_cfg);
- reg_sser_rw_tr_cfg tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg);
- reg_sser_rw_rec_cfg rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg);
- cfg.en = regk_sser_yes;
- tr_cfg.tr_en = regk_sser_yes;
- rec_cfg.rec_en = regk_sser_yes;
- REG_WR(sser, port->regi_sser, rw_cfg, cfg);
- REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
- REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
- port->started = 1;
- }
-
- /* Calculate number of available bytes */
- /* Save pointers to avoid that they are modified by interrupt */
- spin_lock_irqsave(&port->lock, flags);
- start = (unsigned char*)port->readp; /* cast away volatile */
- end = (unsigned char*)port->writep; /* cast away volatile */
- spin_unlock_irqrestore(&port->lock, flags);
- while ((start == end) && !port->full) /* No data */
- {
- DEBUGREAD(printk(KERN_DEBUG "&"));
- if (file->f_flags & O_NONBLOCK)
- return -EAGAIN;
-
- wait_event_interruptible(port->in_wait_q,
- !(start == end && !port->full));
- if (signal_pending(current))
- return -EINTR;
-
- spin_lock_irqsave(&port->lock, flags);
- start = (unsigned char*)port->readp; /* cast away volatile */
- end = (unsigned char*)port->writep; /* cast away volatile */
- spin_unlock_irqrestore(&port->lock, flags);
- }
-
- /* Lazy read, never return wrapped data. */
- if (port->full)
- avail = port->in_buffer_size;
- else if (end > start)
- avail = end - start;
- else
- avail = port->flip + port->in_buffer_size - start;
-
- count = count > avail ? avail : count;
- if (copy_to_user(buf, start, count))
- return -EFAULT;
- /* Disable interrupts while updating readp */
- spin_lock_irqsave(&port->lock, flags);
- port->readp += count;
- if (port->readp >= port->flip + port->in_buffer_size) /* Wrap? */
- port->readp = port->flip;
- port->full = 0;
- spin_unlock_irqrestore(&port->lock, flags);
- DEBUGREAD(printk("r %d\n", count));
- return count;
+ return __sync_serial_read(file, buf, count, ppos, NULL);
}
-static void send_word(sync_port* port)
+#ifdef SYNC_SER_MANUAL
+static void send_word(struct sync_port *port)
{
reg_sser_rw_tr_cfg tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg);
reg_sser_rw_tr_data tr_data = {0};
- switch(tr_cfg.sample_size)
+ switch (tr_cfg.sample_size) {
+ case 8:
+ port->out_buf_count--;
+ tr_data.data = *port->out_rd_ptr++;
+ REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
+ if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE)
+ port->out_rd_ptr = port->out_buffer;
+ break;
+ case 12:
{
- case 8:
- port->out_buf_count--;
- tr_data.data = *port->out_rd_ptr++;
- REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
- if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE)
- port->out_rd_ptr = port->out_buffer;
- break;
- case 12:
- {
int data = (*port->out_rd_ptr++) << 8;
data |= *port->out_rd_ptr++;
port->out_buf_count -= 2;
@@ -1200,8 +1222,8 @@ static void send_word(sync_port* port)
REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE)
port->out_rd_ptr = port->out_buffer;
+ break;
}
- break;
case 16:
port->out_buf_count -= 2;
tr_data.data = *(unsigned short *)port->out_rd_ptr;
@@ -1233,27 +1255,28 @@ static void send_word(sync_port* port)
break;
}
}
+#endif
-static void start_dma_out(struct sync_port *port,
- const char *data, int count)
+#ifdef SYNC_SER_DMA
+static void start_dma_out(struct sync_port *port, const char *data, int count)
{
- port->active_tr_descr->buf = (char *) virt_to_phys((char *) data);
+ port->active_tr_descr->buf = (char *)virt_to_phys((char *)data);
port->active_tr_descr->after = port->active_tr_descr->buf + count;
port->active_tr_descr->intr = 1;
port->active_tr_descr->eol = 1;
port->prev_tr_descr->eol = 0;
- DEBUGTRDMA(printk(KERN_DEBUG "Inserting eolr:%p eol@:%p\n",
+ DEBUGTRDMA(pr_info("Inserting eolr:%p eol@:%p\n",
port->prev_tr_descr, port->active_tr_descr));
port->prev_tr_descr = port->active_tr_descr;
- port->active_tr_descr = phys_to_virt((int) port->active_tr_descr->next);
+ port->active_tr_descr = phys_to_virt((int)port->active_tr_descr->next);
if (!port->tr_running) {
reg_sser_rw_tr_cfg tr_cfg = REG_RD(sser, port->regi_sser,
rw_tr_cfg);
- port->out_context.next = 0;
+ port->out_context.next = NULL;
port->out_context.saved_data =
(dma_descr_data *)virt_to_phys(port->prev_tr_descr);
port->out_context.saved_data_buf = port->prev_tr_descr->buf;
@@ -1263,57 +1286,58 @@ static void start_dma_out(struct sync_port *port,
tr_cfg.tr_en = regk_sser_yes;
REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
- DEBUGTRDMA(printk(KERN_DEBUG "dma s\n"););
+ DEBUGTRDMA(pr_info(KERN_INFO "dma s\n"););
} else {
DMA_CONTINUE_DATA(port->regi_dmaout);
- DEBUGTRDMA(printk(KERN_DEBUG "dma c\n"););
+ DEBUGTRDMA(pr_info("dma c\n"););
}
port->tr_running = 1;
}
-static void start_dma_in(sync_port *port)
+static void start_dma_in(struct sync_port *port)
{
int i;
char *buf;
+ unsigned long flags;
+ spin_lock_irqsave(&port->lock, flags);
port->writep = port->flip;
+ spin_unlock_irqrestore(&port->lock, flags);
- if (port->writep > port->flip + port->in_buffer_size) {
- panic("Offset too large in sync serial driver\n");
- return;
- }
- buf = (char*)virt_to_phys(port->in_buffer);
+ buf = (char *)virt_to_phys(port->in_buffer);
for (i = 0; i < NBR_IN_DESCR; i++) {
port->in_descr[i].buf = buf;
port->in_descr[i].after = buf + port->inbufchunk;
port->in_descr[i].intr = 1;
- port->in_descr[i].next = (dma_descr_data*)virt_to_phys(&port->in_descr[i+1]);
+ port->in_descr[i].next =
+ (dma_descr_data *)virt_to_phys(&port->in_descr[i+1]);
port->in_descr[i].buf = buf;
buf += port->inbufchunk;
}
/* Link the last descriptor to the first */
- port->in_descr[i-1].next = (dma_descr_data*)virt_to_phys(&port->in_descr[0]);
+ port->in_descr[i-1].next =
+ (dma_descr_data *)virt_to_phys(&port->in_descr[0]);
port->in_descr[i-1].eol = regk_sser_yes;
port->next_rx_desc = &port->in_descr[0];
port->prev_rx_desc = &port->in_descr[NBR_IN_DESCR - 1];
- port->in_context.saved_data = (dma_descr_data*)virt_to_phys(&port->in_descr[0]);
+ port->in_context.saved_data =
+ (dma_descr_data *)virt_to_phys(&port->in_descr[0]);
port->in_context.saved_data_buf = port->in_descr[0].buf;
DMA_START_CONTEXT(port->regi_dmain, virt_to_phys(&port->in_context));
}
-#ifdef SYNC_SER_DMA
static irqreturn_t tr_interrupt(int irq, void *dev_id)
{
reg_dma_r_masked_intr masked;
- reg_dma_rw_ack_intr ack_intr = {.data = regk_dma_yes};
+ reg_dma_rw_ack_intr ack_intr = { .data = regk_dma_yes };
reg_dma_rw_stat stat;
int i;
int found = 0;
int stop_sser = 0;
for (i = 0; i < NBR_PORTS; i++) {
- sync_port *port = &ports[i];
- if (!port->enabled || !port->use_dma)
+ struct sync_port *port = &ports[i];
+ if (!port->enabled || !port->use_dma)
continue;
/* IRQ active for the port? */
@@ -1338,19 +1362,20 @@ static irqreturn_t tr_interrupt(int irq, void *dev_id)
int sent;
sent = port->catch_tr_descr->after -
port->catch_tr_descr->buf;
- DEBUGTXINT(printk(KERN_DEBUG "%-4d - %-4d = %-4d\t"
- "in descr %p (ac: %p)\n",
- port->out_buf_count, sent,
- port->out_buf_count - sent,
- port->catch_tr_descr,
- port->active_tr_descr););
+ DEBUGTXINT(pr_info("%-4d - %-4d = %-4d\t"
+ "in descr %p (ac: %p)\n",
+ port->out_buf_count, sent,
+ port->out_buf_count - sent,
+ port->catch_tr_descr,
+ port->active_tr_descr););
port->out_buf_count -= sent;
port->catch_tr_descr =
phys_to_virt((int) port->catch_tr_descr->next);
port->out_rd_ptr =
phys_to_virt((int) port->catch_tr_descr->buf);
} else {
- int i, sent;
+ reg_sser_rw_tr_cfg tr_cfg;
+ int j, sent;
/* EOL handler.
* Note that if an EOL was encountered during the irq
* locked section of sync_ser_write the DMA will be
@@ -1358,11 +1383,11 @@ static irqreturn_t tr_interrupt(int irq, void *dev_id)
* The remaining descriptors will be traversed by
* the descriptor interrupts as usual.
*/
- i = 0;
+ j = 0;
while (!port->catch_tr_descr->eol) {
sent = port->catch_tr_descr->after -
port->catch_tr_descr->buf;
- DEBUGOUTBUF(printk(KERN_DEBUG
+ DEBUGOUTBUF(pr_info(
"traversing descr %p -%d (%d)\n",
port->catch_tr_descr,
sent,
@@ -1370,16 +1395,15 @@ static irqreturn_t tr_interrupt(int irq, void *dev_id)
port->out_buf_count -= sent;
port->catch_tr_descr = phys_to_virt(
(int)port->catch_tr_descr->next);
- i++;
- if (i >= NBR_OUT_DESCR) {
+ j++;
+ if (j >= NBR_OUT_DESCR) {
/* TODO: Reset and recover */
panic("sync_serial: missing eol");
}
}
sent = port->catch_tr_descr->after -
port->catch_tr_descr->buf;
- DEBUGOUTBUF(printk(KERN_DEBUG
- "eol at descr %p -%d (%d)\n",
+ DEBUGOUTBUF(pr_info("eol at descr %p -%d (%d)\n",
port->catch_tr_descr,
sent,
port->out_buf_count));
@@ -1394,15 +1418,13 @@ static irqreturn_t tr_interrupt(int irq, void *dev_id)
OUT_BUFFER_SIZE)
port->out_rd_ptr = port->out_buffer;
- reg_sser_rw_tr_cfg tr_cfg =
- REG_RD(sser, port->regi_sser, rw_tr_cfg);
- DEBUGTXINT(printk(KERN_DEBUG
+ tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg);
+ DEBUGTXINT(pr_info(
"tr_int DMA stop %d, set catch @ %p\n",
port->out_buf_count,
port->active_tr_descr));
if (port->out_buf_count != 0)
- printk(KERN_CRIT "sync_ser: buffer not "
- "empty after eol.\n");
+ pr_err("sync_ser: buf not empty after eol\n");
port->catch_tr_descr = port->active_tr_descr;
port->tr_running = 0;
tr_cfg.tr_en = regk_sser_no;
@@ -1414,62 +1436,79 @@ static irqreturn_t tr_interrupt(int irq, void *dev_id)
return IRQ_RETVAL(found);
} /* tr_interrupt */
+
+static inline void handle_rx_packet(struct sync_port *port)
+{
+ int idx;
+ reg_dma_rw_ack_intr ack_intr = { .data = regk_dma_yes };
+ unsigned long flags;
+
+ DEBUGRXINT(pr_info(KERN_INFO "!"));
+ spin_lock_irqsave(&port->lock, flags);
+
+ /* If we overrun the user experience is crap regardless if we
+ * drop new or old data. Its much easier to get it right when
+ * dropping new data so lets do that.
+ */
+ if ((port->writep + port->inbufchunk <=
+ port->flip + port->in_buffer_size) &&
+ (port->in_buffer_len + port->inbufchunk < IN_BUFFER_SIZE)) {
+ memcpy(port->writep,
+ phys_to_virt((unsigned)port->next_rx_desc->buf),
+ port->inbufchunk);
+ port->writep += port->inbufchunk;
+ if (port->writep >= port->flip + port->in_buffer_size)
+ port->writep = port->flip;
+
+ /* Timestamp the new data chunk. */
+ if (port->write_ts_idx == NBR_IN_DESCR)
+ port->write_ts_idx = 0;
+ idx = port->write_ts_idx++;
+ do_posix_clock_monotonic_gettime(&port->timestamp[idx]);
+ port->in_buffer_len += port->inbufchunk;
+ }
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ port->next_rx_desc->eol = 1;
+ port->prev_rx_desc->eol = 0;
+ /* Cache bug workaround */
+ flush_dma_descr(port->prev_rx_desc, 0);
+ port->prev_rx_desc = port->next_rx_desc;
+ port->next_rx_desc = phys_to_virt((unsigned)port->next_rx_desc->next);
+ /* Cache bug workaround */
+ flush_dma_descr(port->prev_rx_desc, 1);
+ /* wake up the waiting process */
+ wake_up_interruptible(&port->in_wait_q);
+ DMA_CONTINUE(port->regi_dmain);
+ REG_WR(dma, port->regi_dmain, rw_ack_intr, ack_intr);
+
+}
+
static irqreturn_t rx_interrupt(int irq, void *dev_id)
{
reg_dma_r_masked_intr masked;
- reg_dma_rw_ack_intr ack_intr = {.data = regk_dma_yes};
int i;
int found = 0;
- for (i = 0; i < NBR_PORTS; i++)
- {
- sync_port *port = &ports[i];
+ DEBUG(pr_info("rx_interrupt\n"));
+
+ for (i = 0; i < NBR_PORTS; i++) {
+ struct sync_port *port = &ports[i];
- if (!port->enabled || !port->use_dma )
+ if (!port->enabled || !port->use_dma)
continue;
masked = REG_RD(dma, port->regi_dmain, r_masked_intr);
- if (masked.data) /* Descriptor interrupt */
- {
- found = 1;
- while (REG_RD(dma, port->regi_dmain, rw_data) !=
- virt_to_phys(port->next_rx_desc)) {
- DEBUGRXINT(printk(KERN_DEBUG "!"));
- if (port->writep + port->inbufchunk > port->flip + port->in_buffer_size) {
- int first_size = port->flip + port->in_buffer_size - port->writep;
- memcpy((char*)port->writep, phys_to_virt((unsigned)port->next_rx_desc->buf), first_size);
- memcpy(port->flip, phys_to_virt((unsigned)port->next_rx_desc->buf+first_size), port->inbufchunk - first_size);
- port->writep = port->flip + port->inbufchunk - first_size;
- } else {
- memcpy((char*)port->writep,
- phys_to_virt((unsigned)port->next_rx_desc->buf),
- port->inbufchunk);
- port->writep += port->inbufchunk;
- if (port->writep >= port->flip + port->in_buffer_size)
- port->writep = port->flip;
- }
- if (port->writep == port->readp)
- {
- port->full = 1;
- }
-
- port->next_rx_desc->eol = 1;
- port->prev_rx_desc->eol = 0;
- /* Cache bug workaround */
- flush_dma_descr(port->prev_rx_desc, 0);
- port->prev_rx_desc = port->next_rx_desc;
- port->next_rx_desc = phys_to_virt((unsigned)port->next_rx_desc->next);
- /* Cache bug workaround */
- flush_dma_descr(port->prev_rx_desc, 1);
- /* wake up the waiting process */
- wake_up_interruptible(&port->in_wait_q);
- DMA_CONTINUE(port->regi_dmain);
- REG_WR(dma, port->regi_dmain, rw_ack_intr, ack_intr);
+ if (!masked.data)
+ continue;
- }
- }
+ /* Descriptor interrupt */
+ found = 1;
+ while (REG_RD(dma, port->regi_dmain, rw_data) !=
+ virt_to_phys(port->next_rx_desc))
+ handle_rx_packet(port);
}
return IRQ_RETVAL(found);
} /* rx_interrupt */
@@ -1478,75 +1517,83 @@ static irqreturn_t rx_interrupt(int irq, void *dev_id)
#ifdef SYNC_SER_MANUAL
static irqreturn_t manual_interrupt(int irq, void *dev_id)
{
+ unsigned long flags;
int i;
int found = 0;
reg_sser_r_masked_intr masked;
- for (i = 0; i < NBR_PORTS; i++)
- {
- sync_port *port = &ports[i];
+ for (i = 0; i < NBR_PORTS; i++) {
+ struct sync_port *port = &ports[i];
if (!port->enabled || port->use_dma)
- {
continue;
- }
masked = REG_RD(sser, port->regi_sser, r_masked_intr);
- if (masked.rdav) /* Data received? */
- {
- reg_sser_rw_rec_cfg rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg);
- reg_sser_r_rec_data data = REG_RD(sser, port->regi_sser, r_rec_data);
+ /* Data received? */
+ if (masked.rdav) {
+ reg_sser_rw_rec_cfg rec_cfg =
+ REG_RD(sser, port->regi_sser, rw_rec_cfg);
+ reg_sser_r_rec_data data = REG_RD(sser,
+ port->regi_sser, r_rec_data);
found = 1;
/* Read data */
- switch(rec_cfg.sample_size)
- {
+ spin_lock_irqsave(&port->lock, flags);
+ switch (rec_cfg.sample_size) {
case 8:
*port->writep++ = data.data & 0xff;
break;
case 12:
*port->writep = (data.data & 0x0ff0) >> 4;
*(port->writep + 1) = data.data & 0x0f;
- port->writep+=2;
+ port->writep += 2;
break;
case 16:
- *(unsigned short*)port->writep = data.data;
- port->writep+=2;
+ *(unsigned short *)port->writep = data.data;
+ port->writep += 2;
break;
case 24:
- *(unsigned int*)port->writep = data.data;
- port->writep+=3;
+ *(unsigned int *)port->writep = data.data;
+ port->writep += 3;
break;
case 32:
- *(unsigned int*)port->writep = data.data;
- port->writep+=4;
+ *(unsigned int *)port->writep = data.data;
+ port->writep += 4;
break;
}
- if (port->writep >= port->flip + port->in_buffer_size) /* Wrap? */
+ /* Wrap? */
+ if (port->writep >= port->flip + port->in_buffer_size)
port->writep = port->flip;
if (port->writep == port->readp) {
- /* receive buffer overrun, discard oldest data
- */
+ /* Receive buf overrun, discard oldest data */
port->readp++;
- if (port->readp >= port->flip + port->in_buffer_size) /* Wrap? */
+ /* Wrap? */
+ if (port->readp >= port->flip +
+ port->in_buffer_size)
port->readp = port->flip;
}
+ spin_unlock_irqrestore(&port->lock, flags);
if (sync_data_avail(port) >= port->inbufchunk)
- wake_up_interruptible(&port->in_wait_q); /* Wake up application */
+ /* Wake up application */
+ wake_up_interruptible(&port->in_wait_q);
}
- if (masked.trdy) /* Transmitter ready? */
- {
+ /* Transmitter ready? */
+ if (masked.trdy) {
found = 1;
- if (port->out_buf_count > 0) /* More data to send */
+ /* More data to send */
+ if (port->out_buf_count > 0)
send_word(port);
- else /* transmission finished */
- {
+ else {
+ /* Transmission finished */
reg_sser_rw_intr_mask intr_mask;
- intr_mask = REG_RD(sser, port->regi_sser, rw_intr_mask);
+ intr_mask = REG_RD(sser, port->regi_sser,
+ rw_intr_mask);
intr_mask.trdy = 0;
- REG_WR(sser, port->regi_sser, rw_intr_mask, intr_mask);
- wake_up_interruptible(&port->out_wait_q); /* Wake up application */
+ REG_WR(sser, port->regi_sser,
+ rw_intr_mask, intr_mask);
+ /* Wake up application */
+ wake_up_interruptible(&port->out_wait_q);
}
}
}
@@ -1554,4 +1601,109 @@ static irqreturn_t manual_interrupt(int irq, void *dev_id)
}
#endif
+static int __init etrax_sync_serial_init(void)
+{
+#if 1
+ /* This code will be removed when we move to udev for all devices. */
+ syncser_first = MKDEV(SYNC_SERIAL_MAJOR, 0);
+ if (register_chrdev_region(syncser_first, minor_count, SYNCSER_NAME)) {
+ pr_err("Failed to register major %d\n", SYNC_SERIAL_MAJOR);
+ return -1;
+ }
+#else
+ /* Allocate dynamic major number. */
+ if (alloc_chrdev_region(&syncser_first, 0, minor_count, SYNCSER_NAME)) {
+ pr_err("Failed to allocate character device region\n");
+ return -1;
+ }
+#endif
+ syncser_cdev = cdev_alloc();
+ if (!syncser_cdev) {
+ pr_err("Failed to allocate cdev for syncser\n");
+ unregister_chrdev_region(syncser_first, minor_count);
+ return -1;
+ }
+ cdev_init(syncser_cdev, &syncser_fops);
+
+ /* Create a sysfs class for syncser */
+ syncser_class = class_create(THIS_MODULE, "syncser_class");
+
+ /* Initialize Ports */
+#if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0)
+ if (artpec_pinmux_alloc_fixed(PINMUX_SSER0)) {
+ pr_warn("Unable to alloc pins for synchronous serial port 0\n");
+ unregister_chrdev_region(syncser_first, minor_count);
+ return -EIO;
+ }
+ initialize_port(0);
+ ports[0].enabled = 1;
+ /* Register with sysfs so udev can pick it up. */
+ device_create(syncser_class, NULL, syncser_first, NULL,
+ "%s%d", SYNCSER_NAME, 0);
+#endif
+
+#if defined(CONFIG_ETRAXFS) && defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1)
+ if (artpec_pinmux_alloc_fixed(PINMUX_SSER1)) {
+ pr_warn("Unable to alloc pins for synchronous serial port 1\n");
+ unregister_chrdev_region(syncser_first, minor_count);
+ class_destroy(syncser_class);
+ return -EIO;
+ }
+ initialize_port(1);
+ ports[1].enabled = 1;
+ /* Register with sysfs so udev can pick it up. */
+ device_create(syncser_class, NULL, syncser_first, NULL,
+ "%s%d", SYNCSER_NAME, 0);
+#endif
+
+ /* Add it to system */
+ if (cdev_add(syncser_cdev, syncser_first, minor_count) < 0) {
+ pr_err("Failed to add syncser as char device\n");
+ device_destroy(syncser_class, syncser_first);
+ class_destroy(syncser_class);
+ cdev_del(syncser_cdev);
+ unregister_chrdev_region(syncser_first, minor_count);
+ return -1;
+ }
+
+
+ pr_info("ARTPEC synchronous serial port (%s: %d, %d)\n",
+ SYNCSER_NAME, MAJOR(syncser_first), MINOR(syncser_first));
+
+ return 0;
+}
+
+static void __exit etrax_sync_serial_exit(void)
+{
+ int i;
+ device_destroy(syncser_class, syncser_first);
+ class_destroy(syncser_class);
+
+ if (syncser_cdev) {
+ cdev_del(syncser_cdev);
+ unregister_chrdev_region(syncser_first, minor_count);
+ }
+ for (i = 0; i < NBR_PORTS; i++) {
+ struct sync_port *port = &ports[i];
+ if (port->init_irqs == dma_irq_setup) {
+ /* Free dma irqs and dma channels. */
+#ifdef SYNC_SER_DMA
+ artpec_free_dma(port->dma_in_nbr);
+ artpec_free_dma(port->dma_out_nbr);
+ free_irq(port->dma_out_intr_vect, port);
+ free_irq(port->dma_in_intr_vect, port);
+#endif
+ } else if (port->init_irqs == manual_irq_setup) {
+ /* Free manual irq. */
+ free_irq(port->syncser_intr_vect, port);
+ }
+ }
+
+ pr_info("ARTPEC synchronous serial port unregistered\n");
+}
+
module_init(etrax_sync_serial_init);
+module_exit(etrax_sync_serial_exit);
+
+MODULE_LICENSE("GPL");
+
diff --git a/arch/cris/arch-v32/kernel/debugport.c b/arch/cris/arch-v32/kernel/debugport.c
index 610909b003f..02e33ebe51e 100644
--- a/arch/cris/arch-v32/kernel/debugport.c
+++ b/arch/cris/arch-v32/kernel/debugport.c
@@ -3,7 +3,9 @@
*/
#include <linux/console.h>
+#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/string.h>
#include <hwregs/reg_rdwr.h>
#include <hwregs/reg_map.h>
#include <hwregs/ser_defs.h>
@@ -65,6 +67,7 @@ struct dbg_port ports[] =
},
#endif
};
+
static struct dbg_port *port =
#if defined(CONFIG_ETRAX_DEBUG_PORT0)
&ports[0];
@@ -97,14 +100,19 @@ static struct dbg_port *kgdb_port =
#endif
#endif
-static void
-start_port(struct dbg_port* p)
+static void start_port(struct dbg_port *p)
{
- if (!p)
- return;
+ /* Set up serial port registers */
+ reg_ser_rw_tr_ctrl tr_ctrl = {0};
+ reg_ser_rw_tr_dma_en tr_dma_en = {0};
- if (p->started)
+ reg_ser_rw_rec_ctrl rec_ctrl = {0};
+ reg_ser_rw_tr_baud_div tr_baud_div = {0};
+ reg_ser_rw_rec_baud_div rec_baud_div = {0};
+
+ if (!p || p->started)
return;
+
p->started = 1;
if (p->nbr == 1)
@@ -118,36 +126,24 @@ start_port(struct dbg_port* p)
crisv32_pinmux_alloc_fixed(pinmux_ser4);
#endif
- /* Set up serial port registers */
- reg_ser_rw_tr_ctrl tr_ctrl = {0};
- reg_ser_rw_tr_dma_en tr_dma_en = {0};
-
- reg_ser_rw_rec_ctrl rec_ctrl = {0};
- reg_ser_rw_tr_baud_div tr_baud_div = {0};
- reg_ser_rw_rec_baud_div rec_baud_div = {0};
-
tr_ctrl.base_freq = rec_ctrl.base_freq = regk_ser_f29_493;
tr_dma_en.en = rec_ctrl.dma_mode = regk_ser_no;
tr_baud_div.div = rec_baud_div.div = 29493000 / p->baudrate / 8;
tr_ctrl.en = rec_ctrl.en = 1;
- if (p->parity == 'O')
- {
+ if (p->parity == 'O') {
tr_ctrl.par_en = regk_ser_yes;
tr_ctrl.par = regk_ser_odd;
rec_ctrl.par_en = regk_ser_yes;
rec_ctrl.par = regk_ser_odd;
- }
- else if (p->parity == 'E')
- {
+ } else if (p->parity == 'E') {
tr_ctrl.par_en = regk_ser_yes;
tr_ctrl.par = regk_ser_even;
rec_ctrl.par_en = regk_ser_yes;
rec_ctrl.par = regk_ser_odd;
}
- if (p->bits == 7)
- {
+ if (p->bits == 7) {
tr_ctrl.data_bits = regk_ser_bits7;
rec_ctrl.data_bits = regk_ser_bits7;
}
@@ -161,8 +157,7 @@ start_port(struct dbg_port* p)
#ifdef CONFIG_ETRAX_KGDB
/* Use polling to get a single character from the kernel debug port */
-int
-getDebugChar(void)
+int getDebugChar(void)
{
reg_ser_rs_stat_din stat;
reg_ser_rw_ack_intr ack_intr = { 0 };
@@ -179,8 +174,7 @@ getDebugChar(void)
}
/* Use polling to put a single character to the kernel debug port */
-void
-putDebugChar(int val)
+void putDebugChar(int val)
{
reg_ser_r_stat_din stat;
do {
@@ -190,12 +184,48 @@ putDebugChar(int val)
}
#endif /* CONFIG_ETRAX_KGDB */
+static void __init early_putch(int c)
+{
+ reg_ser_r_stat_din stat;
+ /* Wait until transmitter is ready and send. */
+ do
+ stat = REG_RD(ser, port->instance, r_stat_din);
+ while (!stat.tr_rdy);
+ REG_WR_INT(ser, port->instance, rw_dout, c);
+}
+
+static void __init
+early_console_write(struct console *con, const char *s, unsigned n)
+{
+ extern void reset_watchdog(void);
+ int i;
+
+ /* Send data. */
+ for (i = 0; i < n; i++) {
+ /* TODO: the '\n' -> '\n\r' translation should be done at the
+ receiver. Remove it when the serial driver removes it. */
+ if (s[i] == '\n')
+ early_putch('\r');
+ early_putch(s[i]);
+ reset_watchdog();
+ }
+}
+
+static struct console early_console_dev __initdata = {
+ .name = "early",
+ .write = early_console_write,
+ .flags = CON_PRINTBUFFER | CON_BOOT,
+ .index = -1
+};
+
/* Register console for printk's, etc. */
-int __init
-init_etrax_debug(void)
+int __init init_etrax_debug(void)
{
start_port(port);
+ /* Register an early console if a debug port was chosen. */
+ register_console(&early_console_dev);
+
#ifdef CONFIG_ETRAX_KGDB
start_port(kgdb_port);
#endif /* CONFIG_ETRAX_KGDB */
diff --git a/arch/cris/arch-v32/kernel/time.c b/arch/cris/arch-v32/kernel/time.c
index ee66866538f..eb74dabbeb9 100644
--- a/arch/cris/arch-v32/kernel/time.c
+++ b/arch/cris/arch-v32/kernel/time.c
@@ -14,6 +14,7 @@
#include <linux/init.h>
#include <linux/threads.h>
#include <linux/cpufreq.h>
+#include <linux/mm.h>
#include <asm/types.h>
#include <asm/signal.h>
#include <asm/io.h>
@@ -56,7 +57,6 @@ static int __init etrax_init_cont_rotime(void)
}
arch_initcall(etrax_init_cont_rotime);
-
unsigned long timer_regs[NR_CPUS] =
{
regi_timer0,
@@ -68,9 +68,8 @@ unsigned long timer_regs[NR_CPUS] =
extern int set_rtc_mmss(unsigned long nowtime);
#ifdef CONFIG_CPU_FREQ
-static int
-cris_time_freq_notifier(struct notifier_block *nb, unsigned long val,
- void *data);
+static int cris_time_freq_notifier(struct notifier_block *nb,
+ unsigned long val, void *data);
static struct notifier_block cris_time_freq_notifier_block = {
.notifier_call = cris_time_freq_notifier,
@@ -87,7 +86,6 @@ unsigned long get_ns_in_jiffie(void)
return ns;
}
-
/* From timer MDS describing the hardware watchdog:
* 4.3.1 Watchdog Operation
* The watchdog timer is an 8-bit timer with a configurable start value.
@@ -109,11 +107,18 @@ static short int watchdog_key = 42; /* arbitrary 7 bit number */
* is used though, so set this really low. */
#define WATCHDOG_MIN_FREE_PAGES 8
+/* for reliable NICE_DOGGY behaviour */
+static int bite_in_progress;
+
void reset_watchdog(void)
{
#if defined(CONFIG_ETRAX_WATCHDOG)
reg_timer_rw_wd_ctrl wd_ctrl = { 0 };
+#if defined(CONFIG_ETRAX_WATCHDOG_NICE_DOGGY)
+ if (unlikely(bite_in_progress))
+ return;
+#endif
/* Only keep watchdog happy as long as we have memory left! */
if(nr_free_pages() > WATCHDOG_MIN_FREE_PAGES) {
/* Reset the watchdog with the inverse of the old key */
@@ -148,7 +153,9 @@ void handle_watchdog_bite(struct pt_regs *regs)
#if defined(CONFIG_ETRAX_WATCHDOG)
extern int cause_of_death;
+ nmi_enter();
oops_in_progress = 1;
+ bite_in_progress = 1;
printk(KERN_WARNING "Watchdog bite\n");
/* Check if forced restart or unexpected watchdog */
@@ -170,6 +177,7 @@ void handle_watchdog_bite(struct pt_regs *regs)
printk(KERN_WARNING "Oops: bitten by watchdog\n");
show_registers(regs);
oops_in_progress = 0;
+ printk("\n"); /* Flush mtdoops. */
#ifndef CONFIG_ETRAX_WATCHDOG_NICE_DOGGY
reset_watchdog();
#endif
@@ -202,7 +210,7 @@ static inline irqreturn_t timer_interrupt(int irq, void *dev_id)
/* Reset watchdog otherwise it resets us! */
reset_watchdog();
- /* Update statistics. */
+ /* Update statistics. */
update_process_times(user_mode(regs));
cris_do_profile(regs); /* Save profiling information */
@@ -213,7 +221,7 @@ static inline irqreturn_t timer_interrupt(int irq, void *dev_id)
/* Call the real timer interrupt handler */
xtime_update(1);
- return IRQ_HANDLED;
+ return IRQ_HANDLED;
}
/* Timer is IRQF_SHARED so drivers can add stuff to the timer irq chain. */
@@ -293,14 +301,13 @@ void __init time_init(void)
#ifdef CONFIG_CPU_FREQ
cpufreq_register_notifier(&cris_time_freq_notifier_block,
- CPUFREQ_TRANSITION_NOTIFIER);
+ CPUFREQ_TRANSITION_NOTIFIER);
#endif
}
#ifdef CONFIG_CPU_FREQ
-static int
-cris_time_freq_notifier(struct notifier_block *nb, unsigned long val,
- void *data)
+static int cris_time_freq_notifier(struct notifier_block *nb,
+ unsigned long val, void *data)
{
struct cpufreq_freqs *freqs = data;
if (val == CPUFREQ_POSTCHANGE) {
diff --git a/arch/cris/arch-v32/lib/usercopy.c b/arch/cris/arch-v32/lib/usercopy.c
index 0b5b70d5f58..f0f335d8aa7 100644
--- a/arch/cris/arch-v32/lib/usercopy.c
+++ b/arch/cris/arch-v32/lib/usercopy.c
@@ -26,8 +26,7 @@
/* Copy to userspace. This is based on the memcpy used for
kernel-to-kernel copying; see "string.c". */
-unsigned long
-__copy_user (void __user *pdst, const void *psrc, unsigned long pn)
+unsigned long __copy_user(void __user *pdst, const void *psrc, unsigned long pn)
{
/* We want the parameters put in special registers.
Make sure the compiler is able to make something useful of this.
@@ -155,13 +154,13 @@ __copy_user (void __user *pdst, const void *psrc, unsigned long pn)
return retn;
}
+EXPORT_SYMBOL(__copy_user);
/* Copy from user to kernel, zeroing the bytes that were inaccessible in
userland. The return-value is the number of bytes that were
inaccessible. */
-
-unsigned long
-__copy_user_zeroing(void *pdst, const void __user *psrc, unsigned long pn)
+unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
+ unsigned long pn)
{
/* We want the parameters put in special registers.
Make sure the compiler is able to make something useful of this.
@@ -321,11 +320,10 @@ copy_exception_bytes:
return retn + n;
}
+EXPORT_SYMBOL(__copy_user_zeroing);
/* Zero userspace. */
-
-unsigned long
-__do_clear_user (void __user *pto, unsigned long pn)
+unsigned long __do_clear_user(void __user *pto, unsigned long pn)
{
/* We want the parameters put in special registers.
Make sure the compiler is able to make something useful of this.
@@ -468,3 +466,4 @@ __do_clear_user (void __user *pto, unsigned long pn)
return retn;
}
+EXPORT_SYMBOL(__do_clear_user);
diff --git a/arch/cris/arch-v32/mach-fs/pinmux.c b/arch/cris/arch-v32/mach-fs/pinmux.c
index 38f29eec14a..05a04708b8e 100644
--- a/arch/cris/arch-v32/mach-fs/pinmux.c
+++ b/arch/cris/arch-v32/mach-fs/pinmux.c
@@ -26,7 +26,29 @@ static DEFINE_SPINLOCK(pinmux_lock);
static void crisv32_pinmux_set(int port);
-int crisv32_pinmux_init(void)
+static int __crisv32_pinmux_alloc(int port, int first_pin, int last_pin,
+ enum pin_mode mode)
+{
+ int i;
+
+ for (i = first_pin; i <= last_pin; i++) {
+ if ((pins[port][i] != pinmux_none)
+ && (pins[port][i] != pinmux_gpio)
+ && (pins[port][i] != mode)) {
+#ifdef DEBUG
+ panic("Pinmux alloc failed!\n");
+#endif
+ return -EPERM;
+ }
+ }
+
+ for (i = first_pin; i <= last_pin; i++)
+ pins[port][i] = mode;
+
+ crisv32_pinmux_set(port);
+}
+
+static int crisv32_pinmux_init(void)
{
static int initialized;
@@ -37,20 +59,20 @@ int crisv32_pinmux_init(void)
pa.pa0 = pa.pa1 = pa.pa2 = pa.pa3 =
pa.pa4 = pa.pa5 = pa.pa6 = pa.pa7 = regk_pinmux_yes;
REG_WR(pinmux, regi_pinmux, rw_pa, pa);
- crisv32_pinmux_alloc(PORT_B, 0, PORT_PINS - 1, pinmux_gpio);
- crisv32_pinmux_alloc(PORT_C, 0, PORT_PINS - 1, pinmux_gpio);
- crisv32_pinmux_alloc(PORT_D, 0, PORT_PINS - 1, pinmux_gpio);
- crisv32_pinmux_alloc(PORT_E, 0, PORT_PINS - 1, pinmux_gpio);
+ __crisv32_pinmux_alloc(PORT_B, 0, PORT_PINS - 1, pinmux_gpio);
+ __crisv32_pinmux_alloc(PORT_C, 0, PORT_PINS - 1, pinmux_gpio);
+ __crisv32_pinmux_alloc(PORT_D, 0, PORT_PINS - 1, pinmux_gpio);
+ __crisv32_pinmux_alloc(PORT_E, 0, PORT_PINS - 1, pinmux_gpio);
}
return 0;
}
-int
-crisv32_pinmux_alloc(int port, int first_pin, int last_pin, enum pin_mode mode)
+int crisv32_pinmux_alloc(int port, int first_pin, int last_pin,
+ enum pin_mode mode)
{
- int i;
unsigned long flags;
+ int ret;
crisv32_pinmux_init();
@@ -59,26 +81,11 @@ crisv32_pinmux_alloc(int port, int first_pin, int last_pin, enum pin_mode mode)
spin_lock_irqsave(&pinmux_lock, flags);
- for (i = first_pin; i <= last_pin; i++) {
- if ((pins[port][i] != pinmux_none)
- && (pins[port][i] != pinmux_gpio)
- && (pins[port][i] != mode)) {
- spin_unlock_irqrestore(&pinmux_lock, flags);
-#ifdef DEBUG
- panic("Pinmux alloc failed!\n");
-#endif
- return -EPERM;
- }
- }
-
- for (i = first_pin; i <= last_pin; i++)
- pins[port][i] = mode;
-
- crisv32_pinmux_set(port);
+ ret = __crisv32_pinmux_alloc(port, first_pin, last_pin, mode);
spin_unlock_irqrestore(&pinmux_lock, flags);
- return 0;
+ return ret;
}
int crisv32_pinmux_alloc_fixed(enum fixed_function function)
@@ -98,58 +105,58 @@ int crisv32_pinmux_alloc_fixed(enum fixed_function function)
switch (function) {
case pinmux_ser1:
- ret = crisv32_pinmux_alloc(PORT_C, 4, 7, pinmux_fixed);
+ ret = __crisv32_pinmux_alloc(PORT_C, 4, 7, pinmux_fixed);
hwprot.ser1 = regk_pinmux_yes;
break;
case pinmux_ser2:
- ret = crisv32_pinmux_alloc(PORT_C, 8, 11, pinmux_fixed);
+ ret = __crisv32_pinmux_alloc(PORT_C, 8, 11, pinmux_fixed);
hwprot.ser2 = regk_pinmux_yes;
break;
case pinmux_ser3:
- ret = crisv32_pinmux_alloc(PORT_C, 12, 15, pinmux_fixed);
+ ret = __crisv32_pinmux_alloc(PORT_C, 12, 15, pinmux_fixed);
hwprot.ser3 = regk_pinmux_yes;
break;
case pinmux_sser0:
- ret = crisv32_pinmux_alloc(PORT_C, 0, 3, pinmux_fixed);
- ret |= crisv32_pinmux_alloc(PORT_C, 16, 16, pinmux_fixed);
+ ret = __crisv32_pinmux_alloc(PORT_C, 0, 3, pinmux_fixed);
+ ret |= __crisv32_pinmux_alloc(PORT_C, 16, 16, pinmux_fixed);
hwprot.sser0 = regk_pinmux_yes;
break;
case pinmux_sser1:
- ret = crisv32_pinmux_alloc(PORT_D, 0, 4, pinmux_fixed);
+ ret = __crisv32_pinmux_alloc(PORT_D, 0, 4, pinmux_fixed);
hwprot.sser1 = regk_pinmux_yes;
break;
case pinmux_ata0:
- ret = crisv32_pinmux_alloc(PORT_D, 5, 7, pinmux_fixed);
- ret |= crisv32_pinmux_alloc(PORT_D, 15, 17, pinmux_fixed);
+ ret = __crisv32_pinmux_alloc(PORT_D, 5, 7, pinmux_fixed);
+ ret |= __crisv32_pinmux_alloc(PORT_D, 15, 17, pinmux_fixed);
hwprot.ata0 = regk_pinmux_yes;
break;
case pinmux_ata1:
- ret = crisv32_pinmux_alloc(PORT_D, 0, 4, pinmux_fixed);
- ret |= crisv32_pinmux_alloc(PORT_E, 17, 17, pinmux_fixed);
+ ret = __crisv32_pinmux_alloc(PORT_D, 0, 4, pinmux_fixed);
+ ret |= __crisv32_pinmux_alloc(PORT_E, 17, 17, pinmux_fixed);
hwprot.ata1 = regk_pinmux_yes;
break;
case pinmux_ata2:
- ret = crisv32_pinmux_alloc(PORT_C, 11, 15, pinmux_fixed);
- ret |= crisv32_pinmux_alloc(PORT_E, 3, 3, pinmux_fixed);
+ ret = __crisv32_pinmux_alloc(PORT_C, 11, 15, pinmux_fixed);
+ ret |= __crisv32_pinmux_alloc(PORT_E, 3, 3, pinmux_fixed);
hwprot.ata2 = regk_pinmux_yes;
break;
case pinmux_ata3:
- ret = crisv32_pinmux_alloc(PORT_C, 8, 10, pinmux_fixed);
- ret |= crisv32_pinmux_alloc(PORT_C, 0, 2, pinmux_fixed);
+ ret = __crisv32_pinmux_alloc(PORT_C, 8, 10, pinmux_fixed);
+ ret |= __crisv32_pinmux_alloc(PORT_C, 0, 2, pinmux_fixed);
hwprot.ata2 = regk_pinmux_yes;
break;
case pinmux_ata:
- ret = crisv32_pinmux_alloc(PORT_B, 0, 15, pinmux_fixed);
- ret |= crisv32_pinmux_alloc(PORT_D, 8, 15, pinmux_fixed);
+ ret = __crisv32_pinmux_alloc(PORT_B, 0, 15, pinmux_fixed);
+ ret |= __crisv32_pinmux_alloc(PORT_D, 8, 15, pinmux_fixed);
hwprot.ata = regk_pinmux_yes;
break;
case pinmux_eth1:
- ret = crisv32_pinmux_alloc(PORT_E, 0, 17, pinmux_fixed);
+ ret = __crisv32_pinmux_alloc(PORT_E, 0, 17, pinmux_fixed);
hwprot.eth1 = regk_pinmux_yes;
hwprot.eth1_mgm = regk_pinmux_yes;
break;
case pinmux_timer:
- ret = crisv32_pinmux_alloc(PORT_C, 16, 16, pinmux_fixed);
+ ret = __crisv32_pinmux_alloc(PORT_C, 16, 16, pinmux_fixed);
hwprot.timer = regk_pinmux_yes;
spin_unlock_irqrestore(&pinmux_lock, flags);
return ret;
@@ -188,9 +195,19 @@ void crisv32_pinmux_set(int port)
#endif
}
-int crisv32_pinmux_dealloc(int port, int first_pin, int last_pin)
+static int __crisv32_pinmux_dealloc(int port, int first_pin, int last_pin)
{
int i;
+
+ for (i = first_pin; i <= last_pin; i++)
+ pins[port][i] = pinmux_none;
+
+ crisv32_pinmux_set(port);
+ return 0;
+}
+
+int crisv32_pinmux_dealloc(int port, int first_pin, int last_pin)
+{
unsigned long flags;
crisv32_pinmux_init();
@@ -199,11 +216,7 @@ int crisv32_pinmux_dealloc(int port, int first_pin, int last_pin)
return -EINVAL;
spin_lock_irqsave(&pinmux_lock, flags);
-
- for (i = first_pin; i <= last_pin; i++)
- pins[port][i] = pinmux_none;
-
- crisv32_pinmux_set(port);
+ __crisv32_pinmux_dealloc(port, first_pin, last_pin);
spin_unlock_irqrestore(&pinmux_lock, flags);
return 0;
@@ -226,58 +239,58 @@ int crisv32_pinmux_dealloc_fixed(enum fixed_function function)
switch (function) {
case pinmux_ser1:
- ret = crisv32_pinmux_dealloc(PORT_C, 4, 7);
+ ret = __crisv32_pinmux_dealloc(PORT_C, 4, 7);
hwprot.ser1 = regk_pinmux_no;
break;
case pinmux_ser2:
- ret = crisv32_pinmux_dealloc(PORT_C, 8, 11);
+ ret = __crisv32_pinmux_dealloc(PORT_C, 8, 11);
hwprot.ser2 = regk_pinmux_no;
break;
case pinmux_ser3:
- ret = crisv32_pinmux_dealloc(PORT_C, 12, 15);
+ ret = __crisv32_pinmux_dealloc(PORT_C, 12, 15);
hwprot.ser3 = regk_pinmux_no;
break;
case pinmux_sser0:
- ret = crisv32_pinmux_dealloc(PORT_C, 0, 3);
- ret |= crisv32_pinmux_dealloc(PORT_C, 16, 16);
+ ret = __crisv32_pinmux_dealloc(PORT_C, 0, 3);
+ ret |= __crisv32_pinmux_dealloc(PORT_C, 16, 16);
hwprot.sser0 = regk_pinmux_no;
break;
case pinmux_sser1:
- ret = crisv32_pinmux_dealloc(PORT_D, 0, 4);
+ ret = __crisv32_pinmux_dealloc(PORT_D, 0, 4);
hwprot.sser1 = regk_pinmux_no;
break;
case pinmux_ata0:
- ret = crisv32_pinmux_dealloc(PORT_D, 5, 7);
- ret |= crisv32_pinmux_dealloc(PORT_D, 15, 17);
+ ret = __crisv32_pinmux_dealloc(PORT_D, 5, 7);
+ ret |= __crisv32_pinmux_dealloc(PORT_D, 15, 17);
hwprot.ata0 = regk_pinmux_no;
break;
case pinmux_ata1:
- ret = crisv32_pinmux_dealloc(PORT_D, 0, 4);
- ret |= crisv32_pinmux_dealloc(PORT_E, 17, 17);
+ ret = __crisv32_pinmux_dealloc(PORT_D, 0, 4);
+ ret |= __crisv32_pinmux_dealloc(PORT_E, 17, 17);
hwprot.ata1 = regk_pinmux_no;
break;
case pinmux_ata2:
- ret = crisv32_pinmux_dealloc(PORT_C, 11, 15);
- ret |= crisv32_pinmux_dealloc(PORT_E, 3, 3);
+ ret = __crisv32_pinmux_dealloc(PORT_C, 11, 15);
+ ret |= __crisv32_pinmux_dealloc(PORT_E, 3, 3);
hwprot.ata2 = regk_pinmux_no;
break;
case pinmux_ata3:
- ret = crisv32_pinmux_dealloc(PORT_C, 8, 10);
- ret |= crisv32_pinmux_dealloc(PORT_C, 0, 2);
+ ret = __crisv32_pinmux_dealloc(PORT_C, 8, 10);
+ ret |= __crisv32_pinmux_dealloc(PORT_C, 0, 2);
hwprot.ata2 = regk_pinmux_no;
break;
case pinmux_ata:
- ret = crisv32_pinmux_dealloc(PORT_B, 0, 15);
- ret |= crisv32_pinmux_dealloc(PORT_D, 8, 15);
+ ret = __crisv32_pinmux_dealloc(PORT_B, 0, 15);
+ ret |= __crisv32_pinmux_dealloc(PORT_D, 8, 15);
hwprot.ata = regk_pinmux_no;
break;
case pinmux_eth1:
- ret = crisv32_pinmux_dealloc(PORT_E, 0, 17);
+ ret = __crisv32_pinmux_dealloc(PORT_E, 0, 17);
hwprot.eth1 = regk_pinmux_no;
hwprot.eth1_mgm = regk_pinmux_no;
break;
case pinmux_timer:
- ret = crisv32_pinmux_dealloc(PORT_C, 16, 16);
+ ret = __crisv32_pinmux_dealloc(PORT_C, 16, 16);
hwprot.timer = regk_pinmux_no;
spin_unlock_irqrestore(&pinmux_lock, flags);
return ret;
@@ -293,7 +306,8 @@ int crisv32_pinmux_dealloc_fixed(enum fixed_function function)
return ret;
}
-void crisv32_pinmux_dump(void)
+#ifdef DEBUG
+static void crisv32_pinmux_dump(void)
{
int i, j;
@@ -305,5 +319,5 @@ void crisv32_pinmux_dump(void)
printk(KERN_DEBUG " Pin %d = %d\n", j, pins[i][j]);
}
}
-
+#endif
__initcall(crisv32_pinmux_init);
diff --git a/arch/cris/include/arch-v32/mach-fs/mach/pinmux.h b/arch/cris/include/arch-v32/mach-fs/mach/pinmux.h
index c2b3036779d..09bf0c90d2d 100644
--- a/arch/cris/include/arch-v32/mach-fs/mach/pinmux.h
+++ b/arch/cris/include/arch-v32/mach-fs/mach/pinmux.h
@@ -28,11 +28,9 @@ enum fixed_function {
pinmux_timer
};
-int crisv32_pinmux_init(void);
int crisv32_pinmux_alloc(int port, int first_pin, int last_pin, enum pin_mode);
int crisv32_pinmux_alloc_fixed(enum fixed_function function);
int crisv32_pinmux_dealloc(int port, int first_pin, int last_pin);
int crisv32_pinmux_dealloc_fixed(enum fixed_function function);
-void crisv32_pinmux_dump(void);
#endif
diff --git a/arch/cris/include/asm/Kbuild b/arch/cris/include/asm/Kbuild
index d5f124832fd..889f2de050a 100644
--- a/arch/cris/include/asm/Kbuild
+++ b/arch/cris/include/asm/Kbuild
@@ -1,8 +1,4 @@
-header-y += arch-v10/
-header-y += arch-v32/
-
-
generic-y += barrier.h
generic-y += clkdev.h
generic-y += cputime.h
diff --git a/arch/cris/include/uapi/asm/Kbuild b/arch/cris/include/uapi/asm/Kbuild
index 7d47b366ad8..01f66b8f15e 100644
--- a/arch/cris/include/uapi/asm/Kbuild
+++ b/arch/cris/include/uapi/asm/Kbuild
@@ -1,8 +1,8 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
-header-y += arch-v10/
-header-y += arch-v32/
+header-y += ../arch-v10/arch/
+header-y += ../arch-v32/arch/
header-y += auxvec.h
header-y += bitsperlong.h
header-y += byteorder.h
diff --git a/arch/cris/kernel/crisksyms.c b/arch/cris/kernel/crisksyms.c
index 5868cee20eb..3908b942fd4 100644
--- a/arch/cris/kernel/crisksyms.c
+++ b/arch/cris/kernel/crisksyms.c
@@ -47,16 +47,16 @@ EXPORT_SYMBOL(__negdi2);
EXPORT_SYMBOL(__ioremap);
EXPORT_SYMBOL(iounmap);
-/* Userspace access functions */
-EXPORT_SYMBOL(__copy_user_zeroing);
-EXPORT_SYMBOL(__copy_user);
-
#undef memcpy
#undef memset
extern void * memset(void *, int, __kernel_size_t);
extern void * memcpy(void *, const void *, __kernel_size_t);
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memset);
+#ifdef CONFIG_ETRAX_ARCH_V32
+#undef strcmp
+EXPORT_SYMBOL(strcmp);
+#endif
#ifdef CONFIG_ETRAX_FAST_TIMER
/* Fast timer functions */
@@ -66,3 +66,4 @@ EXPORT_SYMBOL(del_fast_timer);
EXPORT_SYMBOL(schedule_usleep);
#endif
EXPORT_SYMBOL(csum_partial);
+EXPORT_SYMBOL(csum_partial_copy_from_user);
diff --git a/arch/cris/kernel/traps.c b/arch/cris/kernel/traps.c
index 0ffda73734f..da4c72401e2 100644
--- a/arch/cris/kernel/traps.c
+++ b/arch/cris/kernel/traps.c
@@ -14,6 +14,10 @@
#include <linux/init.h>
#include <linux/module.h>
+#include <linux/utsname.h>
+#ifdef CONFIG_KALLSYMS
+#include <linux/kallsyms.h>
+#endif
#include <asm/pgtable.h>
#include <asm/uaccess.h>
@@ -34,25 +38,24 @@ static int kstack_depth_to_print = 24;
void (*nmi_handler)(struct pt_regs *);
-void
-show_trace(unsigned long *stack)
+void show_trace(unsigned long *stack)
{
unsigned long addr, module_start, module_end;
extern char _stext, _etext;
int i;
- printk("\nCall Trace: ");
+ pr_err("\nCall Trace: ");
i = 1;
module_start = VMALLOC_START;
module_end = VMALLOC_END;
- while (((long)stack & (THREAD_SIZE-1)) != 0) {
+ while (((long)stack & (THREAD_SIZE - 1)) != 0) {
if (__get_user(addr, stack)) {
/* This message matches "failing address" marked
s390 in ksymoops, so lines containing it will
not be filtered out by ksymoops. */
- printk("Failing address 0x%lx\n", (unsigned long)stack);
+ pr_err("Failing address 0x%lx\n", (unsigned long)stack);
break;
}
stack++;
@@ -68,10 +71,14 @@ show_trace(unsigned long *stack)
if (((addr >= (unsigned long)&_stext) &&
(addr <= (unsigned long)&_etext)) ||
((addr >= module_start) && (addr <= module_end))) {
+#ifdef CONFIG_KALLSYMS
+ print_ip_sym(addr);
+#else
if (i && ((i % 8) == 0))
- printk("\n ");
- printk("[<%08lx>] ", addr);
+ pr_err("\n ");
+ pr_err("[<%08lx>] ", addr);
i++;
+#endif
}
}
}
@@ -111,21 +118,21 @@ show_stack(struct task_struct *task, unsigned long *sp)
stack = sp;
- printk("\nStack from %08lx:\n ", (unsigned long)stack);
+ pr_err("\nStack from %08lx:\n ", (unsigned long)stack);
for (i = 0; i < kstack_depth_to_print; i++) {
if (((long)stack & (THREAD_SIZE-1)) == 0)
break;
if (i && ((i % 8) == 0))
- printk("\n ");
+ pr_err("\n ");
if (__get_user(addr, stack)) {
/* This message matches "failing address" marked
s390 in ksymoops, so lines containing it will
not be filtered out by ksymoops. */
- printk("Failing address 0x%lx\n", (unsigned long)stack);
+ pr_err("Failing address 0x%lx\n", (unsigned long)stack);
break;
}
stack++;
- printk("%08lx ", addr);
+ pr_err("%08lx ", addr);
}
show_trace(sp);
}
@@ -139,33 +146,32 @@ show_stack(void)
unsigned long *sp = (unsigned long *)rdusp();
int i;
- printk("Stack dump [0x%08lx]:\n", (unsigned long)sp);
+ pr_err("Stack dump [0x%08lx]:\n", (unsigned long)sp);
for (i = 0; i < 16; i++)
- printk("sp + %d: 0x%08lx\n", i*4, sp[i]);
+ pr_err("sp + %d: 0x%08lx\n", i*4, sp[i]);
return 0;
}
#endif
-void
-set_nmi_handler(void (*handler)(struct pt_regs *))
+void set_nmi_handler(void (*handler)(struct pt_regs *))
{
nmi_handler = handler;
arch_enable_nmi();
}
#ifdef CONFIG_DEBUG_NMI_OOPS
-void
-oops_nmi_handler(struct pt_regs *regs)
+void oops_nmi_handler(struct pt_regs *regs)
{
stop_watchdog();
oops_in_progress = 1;
- printk("NMI!\n");
+ pr_err("NMI!\n");
show_registers(regs);
oops_in_progress = 0;
+ oops_exit();
+ pr_err("\n"); /* Flush mtdoops. */
}
-static int __init
-oops_nmi_register(void)
+static int __init oops_nmi_register(void)
{
set_nmi_handler(oops_nmi_handler);
return 0;
@@ -180,8 +186,7 @@ __initcall(oops_nmi_register);
* similar to an Oops dump, and if the kernel is configured to be a nice
* doggy, then halt instead of reboot.
*/
-void
-watchdog_bite_hook(struct pt_regs *regs)
+void watchdog_bite_hook(struct pt_regs *regs)
{
#ifdef CONFIG_ETRAX_WATCHDOG_NICE_DOGGY
local_irq_disable();
@@ -196,8 +201,7 @@ watchdog_bite_hook(struct pt_regs *regs)
}
/* This is normally the Oops function. */
-void
-die_if_kernel(const char *str, struct pt_regs *regs, long err)
+void die_if_kernel(const char *str, struct pt_regs *regs, long err)
{
if (user_mode(regs))
return;
@@ -211,13 +215,17 @@ die_if_kernel(const char *str, struct pt_regs *regs, long err)
stop_watchdog();
#endif
+ oops_enter();
handle_BUG(regs);
- printk("%s: %04lx\n", str, err & 0xffff);
+ pr_err("Linux %s %s\n", utsname()->release, utsname()->version);
+ pr_err("%s: %04lx\n", str, err & 0xffff);
show_registers(regs);
+ oops_exit();
oops_in_progress = 0;
+ pr_err("\n"); /* Flush mtdoops. */
#ifdef CONFIG_ETRAX_WATCHDOG_NICE_DOGGY
reset_watchdog();
@@ -225,8 +233,7 @@ die_if_kernel(const char *str, struct pt_regs *regs, long err)
do_exit(SIGSEGV);
}
-void __init
-trap_init(void)
+void __init trap_init(void)
{
/* Nothing needs to be done */
}
diff --git a/arch/cris/mm/init.c b/arch/cris/mm/init.c
index c81af5bd916..1e7fd45b60f 100644
--- a/arch/cris/mm/init.c
+++ b/arch/cris/mm/init.c
@@ -11,13 +11,15 @@
#include <linux/gfp.h>
#include <linux/init.h>
#include <linux/bootmem.h>
+#include <linux/proc_fs.h>
+#include <linux/kcore.h>
#include <asm/tlb.h>
#include <asm/sections.h>
unsigned long empty_zero_page;
+EXPORT_SYMBOL(empty_zero_page);
-void __init
-mem_init(void)
+void __init mem_init(void)
{
BUG_ON(!mem_map);
@@ -31,10 +33,36 @@ mem_init(void)
mem_init_print_info(NULL);
}
-/* free the pages occupied by initialization code */
+/* Free a range of init pages. Virtual addresses. */
-void
-free_initmem(void)
+void free_init_pages(const char *what, unsigned long begin, unsigned long end)
+{
+ unsigned long addr;
+
+ for (addr = begin; addr < end; addr += PAGE_SIZE) {
+ ClearPageReserved(virt_to_page(addr));
+ init_page_count(virt_to_page(addr));
+ free_page(addr);
+ totalram_pages++;
+ }
+
+ printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
+}
+
+/* Free the pages occupied by initialization code. */
+
+void free_initmem(void)
{
free_initmem_default(-1);
}
+
+/* Free the pages occupied by initrd code. */
+
+#ifdef CONFIG_BLK_DEV_INITRD
+void free_initrd_mem(unsigned long start, unsigned long end)
+{
+ free_init_pages("initrd memory",
+ start,
+ end);
+}
+#endif
diff --git a/arch/cris/mm/ioremap.c b/arch/cris/mm/ioremap.c
index f9ca44bdea2..80fdb995a8c 100644
--- a/arch/cris/mm/ioremap.c
+++ b/arch/cris/mm/ioremap.c
@@ -76,10 +76,11 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l
* Must be freed with iounmap.
*/
-void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
+void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size)
{
return __ioremap(phys_addr | MEM_NON_CACHEABLE, size, 0);
}
+EXPORT_SYMBOL(ioremap_nocache);
void iounmap(volatile void __iomem *addr)
{
diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
index 263511719a4..69952c18420 100644
--- a/arch/hexagon/include/asm/cache.h
+++ b/arch/hexagon/include/asm/cache.h
@@ -1,7 +1,7 @@
/*
* Cache definitions for the Hexagon architecture
*
- * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2010-2011,2014 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -25,6 +25,8 @@
#define L1_CACHE_SHIFT (5)
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
+#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
+
#define __cacheline_aligned __aligned(L1_CACHE_BYTES)
#define ____cacheline_aligned __aligned(L1_CACHE_BYTES)
diff --git a/arch/hexagon/include/asm/cacheflush.h b/arch/hexagon/include/asm/cacheflush.h
index 49e0896ec24..b86f9f300e9 100644
--- a/arch/hexagon/include/asm/cacheflush.h
+++ b/arch/hexagon/include/asm/cacheflush.h
@@ -21,10 +21,7 @@
#ifndef _ASM_CACHEFLUSH_H
#define _ASM_CACHEFLUSH_H
-#include <linux/cache.h>
-#include <linux/mm.h>
-#include <asm/string.h>
-#include <asm-generic/cacheflush.h>
+#include <linux/mm_types.h>
/* Cache flushing:
*
@@ -41,6 +38,20 @@
#define LINESIZE 32
#define LINEBITS 5
+#define flush_cache_all() do { } while (0)
+#define flush_cache_mm(mm) do { } while (0)
+#define flush_cache_dup_mm(mm) do { } while (0)
+#define flush_cache_range(vma, start, end) do { } while (0)
+#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
+#define flush_dcache_page(page) do { } while (0)
+#define flush_dcache_mmap_lock(mapping) do { } while (0)
+#define flush_dcache_mmap_unlock(mapping) do { } while (0)
+#define flush_icache_page(vma, pg) do { } while (0)
+#define flush_icache_user_range(vma, pg, adr, len) do { } while (0)
+#define flush_cache_vmap(start, end) do { } while (0)
+#define flush_cache_vunmap(start, end) do { } while (0)
+
/*
* Flush Dcache range through current map.
*/
@@ -49,7 +60,6 @@ extern void flush_dcache_range(unsigned long start, unsigned long end);
/*
* Flush Icache range through current map.
*/
-#undef flush_icache_range
extern void flush_icache_range(unsigned long start, unsigned long end);
/*
@@ -79,19 +89,11 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
/* generic_ptrace_pokedata doesn't wind up here, does it? */
}
-#undef copy_to_user_page
-static inline void copy_to_user_page(struct vm_area_struct *vma,
- struct page *page,
- unsigned long vaddr,
- void *dst, void *src, int len)
-{
- memcpy(dst, src, len);
- if (vma->vm_flags & VM_EXEC) {
- flush_icache_range((unsigned long) dst,
- (unsigned long) dst + len);
- }
-}
+void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
+ unsigned long vaddr, void *dst, void *src, int len);
+#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
+ memcpy(dst, src, len)
extern void hexagon_inv_dcache_range(unsigned long start, unsigned long end);
extern void hexagon_clean_dcache_range(unsigned long start, unsigned long end);
diff --git a/arch/hexagon/include/asm/io.h b/arch/hexagon/include/asm/io.h
index 70298996e9b..66f5e9a61ef 100644
--- a/arch/hexagon/include/asm/io.h
+++ b/arch/hexagon/include/asm/io.h
@@ -24,14 +24,9 @@
#ifdef __KERNEL__
#include <linux/types.h>
-#include <linux/delay.h>
-#include <linux/vmalloc.h>
-#include <asm/string.h>
-#include <asm/mem-layout.h>
#include <asm/iomap.h>
#include <asm/page.h>
#include <asm/cacheflush.h>
-#include <asm/tlbflush.h>
/*
* We don't have PCI yet.
diff --git a/arch/hexagon/kernel/setup.c b/arch/hexagon/kernel/setup.c
index 0e7c1dbb37b..6981949f5df 100644
--- a/arch/hexagon/kernel/setup.c
+++ b/arch/hexagon/kernel/setup.c
@@ -19,6 +19,7 @@
*/
#include <linux/init.h>
+#include <linux/delay.h>
#include <linux/bootmem.h>
#include <linux/mmzone.h>
#include <linux/mm.h>
diff --git a/arch/hexagon/kernel/traps.c b/arch/hexagon/kernel/traps.c
index 7858663352b..110dab152f8 100644
--- a/arch/hexagon/kernel/traps.c
+++ b/arch/hexagon/kernel/traps.c
@@ -1,7 +1,7 @@
/*
* Kernel traps/events for Hexagon processor
*
- * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -423,7 +423,7 @@ void do_trap0(struct pt_regs *regs)
*/
info.si_code = TRAP_BRKPT;
info.si_addr = (void __user *) pt_elr(regs);
- send_sig_info(SIGTRAP, &info, current);
+ force_sig_info(SIGTRAP, &info, current);
} else {
#ifdef CONFIG_KGDB
kgdb_handle_exception(pt_cause(regs), SIGTRAP,
diff --git a/arch/hexagon/kernel/vmlinux.lds.S b/arch/hexagon/kernel/vmlinux.lds.S
index 44d8c47bae2..5f268c1071b 100644
--- a/arch/hexagon/kernel/vmlinux.lds.S
+++ b/arch/hexagon/kernel/vmlinux.lds.S
@@ -1,7 +1,7 @@
/*
* Linker script for Hexagon kernel
*
- * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -59,7 +59,7 @@ SECTIONS
INIT_DATA_SECTION(PAGE_SIZE)
_sdata = .;
- RW_DATA_SECTION(32,PAGE_SIZE,PAGE_SIZE)
+ RW_DATA_SECTION(32,PAGE_SIZE,_THREAD_SIZE)
RO_DATA_SECTION(PAGE_SIZE)
_edata = .;
diff --git a/arch/hexagon/mm/cache.c b/arch/hexagon/mm/cache.c
index 0c76c802e31..a7c6d827d8b 100644
--- a/arch/hexagon/mm/cache.c
+++ b/arch/hexagon/mm/cache.c
@@ -127,3 +127,13 @@ void flush_cache_all_hexagon(void)
local_irq_restore(flags);
mb();
}
+
+void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
+ unsigned long vaddr, void *dst, void *src, int len)
+{
+ memcpy(dst, src, len);
+ if (vma->vm_flags & VM_EXEC) {
+ flush_icache_range((unsigned long) dst,
+ (unsigned long) dst + len);
+ }
+}
diff --git a/arch/hexagon/mm/ioremap.c b/arch/hexagon/mm/ioremap.c
index 5905fd5f97f..d27d6722404 100644
--- a/arch/hexagon/mm/ioremap.c
+++ b/arch/hexagon/mm/ioremap.c
@@ -20,6 +20,7 @@
#include <linux/io.h>
#include <linux/vmalloc.h>
+#include <linux/mm.h>
void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size)
{
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 536d13b0bea..074e52bf815 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -20,7 +20,6 @@ config IA64
select HAVE_DYNAMIC_FTRACE if (!ITANIUM)
select HAVE_FUNCTION_TRACER
select HAVE_DMA_ATTRS
- select HAVE_KVM
select TTY
select HAVE_ARCH_TRACEHOOK
select HAVE_DMA_API_DEBUG
@@ -232,7 +231,7 @@ config IA64_SGI_UV
config IA64_HP_SIM
bool "Ski-simulator"
select SWIOTLB
- depends on !PM_RUNTIME
+ depends on !PM
endchoice
@@ -640,8 +639,6 @@ source "security/Kconfig"
source "crypto/Kconfig"
-source "arch/ia64/kvm/Kconfig"
-
source "lib/Kconfig"
config IOMMU_HELPER
diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile
index 5441b14994f..970d0bd9962 100644
--- a/arch/ia64/Makefile
+++ b/arch/ia64/Makefile
@@ -53,7 +53,6 @@ core-$(CONFIG_IA64_HP_ZX1) += arch/ia64/dig/
core-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += arch/ia64/dig/
core-$(CONFIG_IA64_SGI_SN2) += arch/ia64/sn/
core-$(CONFIG_IA64_SGI_UV) += arch/ia64/uv/
-core-$(CONFIG_KVM) += arch/ia64/kvm/
drivers-$(CONFIG_PCI) += arch/ia64/pci/
drivers-$(CONFIG_IA64_HP_SIM) += arch/ia64/hp/sim/
diff --git a/arch/ia64/include/asm/kvm_host.h b/arch/ia64/include/asm/kvm_host.h
deleted file mode 100644
index 4729752b725..00000000000
--- a/arch/ia64/include/asm/kvm_host.h
+++ /dev/null
@@ -1,609 +0,0 @@
-/*
- * kvm_host.h: used for kvm module, and hold ia64-specific sections.
- *
- * Copyright (C) 2007, Intel Corporation.
- *
- * Xiantao Zhang <xiantao.zhang@intel.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- */
-
-#ifndef __ASM_KVM_HOST_H
-#define __ASM_KVM_HOST_H
-
-#define KVM_USER_MEM_SLOTS 32
-
-#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
-#define KVM_IRQCHIP_NUM_PINS KVM_IOAPIC_NUM_PINS
-
-/* define exit reasons from vmm to kvm*/
-#define EXIT_REASON_VM_PANIC 0
-#define EXIT_REASON_MMIO_INSTRUCTION 1
-#define EXIT_REASON_PAL_CALL 2
-#define EXIT_REASON_SAL_CALL 3
-#define EXIT_REASON_SWITCH_RR6 4
-#define EXIT_REASON_VM_DESTROY 5
-#define EXIT_REASON_EXTERNAL_INTERRUPT 6
-#define EXIT_REASON_IPI 7
-#define EXIT_REASON_PTC_G 8
-#define EXIT_REASON_DEBUG 20
-
-/*Define vmm address space and vm data space.*/
-#define KVM_VMM_SIZE (__IA64_UL_CONST(16)<<20)
-#define KVM_VMM_SHIFT 24
-#define KVM_VMM_BASE 0xD000000000000000
-#define VMM_SIZE (__IA64_UL_CONST(8)<<20)
-
-/*
- * Define vm_buffer, used by PAL Services, base address.
- * Note: vm_buffer is in the VMM-BLOCK, the size must be < 8M
- */
-#define KVM_VM_BUFFER_BASE (KVM_VMM_BASE + VMM_SIZE)
-#define KVM_VM_BUFFER_SIZE (__IA64_UL_CONST(8)<<20)
-
-/*
- * kvm guest's data area looks as follow:
- *
- * +----------------------+ ------- KVM_VM_DATA_SIZE
- * | vcpu[n]'s data | | ___________________KVM_STK_OFFSET
- * | | | / |
- * | .......... | | /vcpu's struct&stack |
- * | .......... | | /---------------------|---- 0
- * | vcpu[5]'s data | | / vpd |
- * | vcpu[4]'s data | |/-----------------------|
- * | vcpu[3]'s data | / vtlb |
- * | vcpu[2]'s data | /|------------------------|
- * | vcpu[1]'s data |/ | vhpt |
- * | vcpu[0]'s data |____________________________|
- * +----------------------+ |
- * | memory dirty log | |
- * +----------------------+ |
- * | vm's data struct | |
- * +----------------------+ |
- * | | |
- * | | |
- * | | |
- * | | |
- * | | |
- * | | |
- * | | |
- * | vm's p2m table | |
- * | | |
- * | | |
- * | | | |
- * vm's data->| | | |
- * +----------------------+ ------- 0
- * To support large memory, needs to increase the size of p2m.
- * To support more vcpus, needs to ensure it has enough space to
- * hold vcpus' data.
- */
-
-#define KVM_VM_DATA_SHIFT 26
-#define KVM_VM_DATA_SIZE (__IA64_UL_CONST(1) << KVM_VM_DATA_SHIFT)
-#define KVM_VM_DATA_BASE (KVM_VMM_BASE + KVM_VM_DATA_SIZE)
-
-#define KVM_P2M_BASE KVM_VM_DATA_BASE
-#define KVM_P2M_SIZE (__IA64_UL_CONST(24) << 20)
-
-#define VHPT_SHIFT 16
-#define VHPT_SIZE (__IA64_UL_CONST(1) << VHPT_SHIFT)
-#define VHPT_NUM_ENTRIES (__IA64_UL_CONST(1) << (VHPT_SHIFT-5))
-
-#define VTLB_SHIFT 16
-#define VTLB_SIZE (__IA64_UL_CONST(1) << VTLB_SHIFT)
-#define VTLB_NUM_ENTRIES (1UL << (VHPT_SHIFT-5))
-
-#define VPD_SHIFT 16
-#define VPD_SIZE (__IA64_UL_CONST(1) << VPD_SHIFT)
-
-#define VCPU_STRUCT_SHIFT 16
-#define VCPU_STRUCT_SIZE (__IA64_UL_CONST(1) << VCPU_STRUCT_SHIFT)
-
-/*
- * This must match KVM_IA64_VCPU_STACK_{SHIFT,SIZE} arch/ia64/include/asm/kvm.h
- */
-#define KVM_STK_SHIFT 16
-#define KVM_STK_OFFSET (__IA64_UL_CONST(1)<< KVM_STK_SHIFT)
-
-#define KVM_VM_STRUCT_SHIFT 19
-#define KVM_VM_STRUCT_SIZE (__IA64_UL_CONST(1) << KVM_VM_STRUCT_SHIFT)
-
-#define KVM_MEM_DIRY_LOG_SHIFT 19
-#define KVM_MEM_DIRTY_LOG_SIZE (__IA64_UL_CONST(1) << KVM_MEM_DIRY_LOG_SHIFT)
-
-#ifndef __ASSEMBLY__
-
-/*Define the max vcpus and memory for Guests.*/
-#define KVM_MAX_VCPUS (KVM_VM_DATA_SIZE - KVM_P2M_SIZE - KVM_VM_STRUCT_SIZE -\
- KVM_MEM_DIRTY_LOG_SIZE) / sizeof(struct kvm_vcpu_data)
-#define KVM_MAX_MEM_SIZE (KVM_P2M_SIZE >> 3 << PAGE_SHIFT)
-
-#define VMM_LOG_LEN 256
-
-#include <linux/types.h>
-#include <linux/mm.h>
-#include <linux/kvm.h>
-#include <linux/kvm_para.h>
-#include <linux/kvm_types.h>
-
-#include <asm/pal.h>
-#include <asm/sal.h>
-#include <asm/page.h>
-
-struct kvm_vcpu_data {
- char vcpu_vhpt[VHPT_SIZE];
- char vcpu_vtlb[VTLB_SIZE];
- char vcpu_vpd[VPD_SIZE];
- char vcpu_struct[VCPU_STRUCT_SIZE];
-};
-
-struct kvm_vm_data {
- char kvm_p2m[KVM_P2M_SIZE];
- char kvm_vm_struct[KVM_VM_STRUCT_SIZE];
- char kvm_mem_dirty_log[KVM_MEM_DIRTY_LOG_SIZE];
- struct kvm_vcpu_data vcpu_data[KVM_MAX_VCPUS];
-};
-
-#define VCPU_BASE(n) (KVM_VM_DATA_BASE + \
- offsetof(struct kvm_vm_data, vcpu_data[n]))
-#define KVM_VM_BASE (KVM_VM_DATA_BASE + \
- offsetof(struct kvm_vm_data, kvm_vm_struct))
-#define KVM_MEM_DIRTY_LOG_BASE KVM_VM_DATA_BASE + \
- offsetof(struct kvm_vm_data, kvm_mem_dirty_log)
-
-#define VHPT_BASE(n) (VCPU_BASE(n) + offsetof(struct kvm_vcpu_data, vcpu_vhpt))
-#define VTLB_BASE(n) (VCPU_BASE(n) + offsetof(struct kvm_vcpu_data, vcpu_vtlb))
-#define VPD_BASE(n) (VCPU_BASE(n) + offsetof(struct kvm_vcpu_data, vcpu_vpd))
-#define VCPU_STRUCT_BASE(n) (VCPU_BASE(n) + \
- offsetof(struct kvm_vcpu_data, vcpu_struct))
-
-/*IO section definitions*/
-#define IOREQ_READ 1
-#define IOREQ_WRITE 0
-
-#define STATE_IOREQ_NONE 0
-#define STATE_IOREQ_READY 1
-#define STATE_IOREQ_INPROCESS 2
-#define STATE_IORESP_READY 3
-
-/*Guest Physical address layout.*/
-#define GPFN_MEM (0UL << 60) /* Guest pfn is normal mem */
-#define GPFN_FRAME_BUFFER (1UL << 60) /* VGA framebuffer */
-#define GPFN_LOW_MMIO (2UL << 60) /* Low MMIO range */
-#define GPFN_PIB (3UL << 60) /* PIB base */
-#define GPFN_IOSAPIC (4UL << 60) /* IOSAPIC base */
-#define GPFN_LEGACY_IO (5UL << 60) /* Legacy I/O base */
-#define GPFN_GFW (6UL << 60) /* Guest Firmware */
-#define GPFN_PHYS_MMIO (7UL << 60) /* Directed MMIO Range */
-
-#define GPFN_IO_MASK (7UL << 60) /* Guest pfn is I/O type */
-#define GPFN_INV_MASK (1UL << 63) /* Guest pfn is invalid */
-#define INVALID_MFN (~0UL)
-#define MEM_G (1UL << 30)
-#define MEM_M (1UL << 20)
-#define MMIO_START (3 * MEM_G)
-#define MMIO_SIZE (512 * MEM_M)
-#define VGA_IO_START 0xA0000UL
-#define VGA_IO_SIZE 0x20000
-#define LEGACY_IO_START (MMIO_START + MMIO_SIZE)
-#define LEGACY_IO_SIZE (64 * MEM_M)
-#define IO_SAPIC_START 0xfec00000UL
-#define IO_SAPIC_SIZE 0x100000
-#define PIB_START 0xfee00000UL
-#define PIB_SIZE 0x200000
-#define GFW_START (4 * MEM_G - 16 * MEM_M)
-#define GFW_SIZE (16 * MEM_M)
-
-/*Deliver mode, defined for ioapic.c*/
-#define dest_Fixed IOSAPIC_FIXED
-#define dest_LowestPrio IOSAPIC_LOWEST_PRIORITY
-
-#define NMI_VECTOR 2
-#define ExtINT_VECTOR 0
-#define NULL_VECTOR (-1)
-#define IA64_SPURIOUS_INT_VECTOR 0x0f
-
-#define VCPU_LID(v) (((u64)(v)->vcpu_id) << 24)
-
-/*
- *Delivery mode
- */
-#define SAPIC_DELIV_SHIFT 8
-#define SAPIC_FIXED 0x0
-#define SAPIC_LOWEST_PRIORITY 0x1
-#define SAPIC_PMI 0x2
-#define SAPIC_NMI 0x4
-#define SAPIC_INIT 0x5
-#define SAPIC_EXTINT 0x7
-
-/*
- * vcpu->requests bit members for arch
- */
-#define KVM_REQ_PTC_G 32
-#define KVM_REQ_RESUME 33
-
-struct kvm_mmio_req {
- uint64_t addr; /* physical address */
- uint64_t size; /* size in bytes */
- uint64_t data; /* data (or paddr of data) */
- uint8_t state:4;
- uint8_t dir:1; /* 1=read, 0=write */
-};
-
-/*Pal data struct */
-struct kvm_pal_call{
- /*In area*/
- uint64_t gr28;
- uint64_t gr29;
- uint64_t gr30;
- uint64_t gr31;
- /*Out area*/
- struct ia64_pal_retval ret;
-};
-
-/* Sal data structure */
-struct kvm_sal_call{
- /*In area*/
- uint64_t in0;
- uint64_t in1;
- uint64_t in2;
- uint64_t in3;
- uint64_t in4;
- uint64_t in5;
- uint64_t in6;
- uint64_t in7;
- struct sal_ret_values ret;
-};
-
-/*Guest change rr6*/
-struct kvm_switch_rr6 {
- uint64_t old_rr;
- uint64_t new_rr;
-};
-
-union ia64_ipi_a{
- unsigned long val;
- struct {
- unsigned long rv : 3;
- unsigned long ir : 1;
- unsigned long eid : 8;
- unsigned long id : 8;
- unsigned long ib_base : 44;
- };
-};
-
-union ia64_ipi_d {
- unsigned long val;
- struct {
- unsigned long vector : 8;
- unsigned long dm : 3;
- unsigned long ig : 53;
- };
-};
-
-/*ipi check exit data*/
-struct kvm_ipi_data{
- union ia64_ipi_a addr;
- union ia64_ipi_d data;
-};
-
-/*global purge data*/
-struct kvm_ptc_g {
- unsigned long vaddr;
- unsigned long rr;
- unsigned long ps;
- struct kvm_vcpu *vcpu;
-};
-
-/*Exit control data */
-struct exit_ctl_data{
- uint32_t exit_reason;
- uint32_t vm_status;
- union {
- struct kvm_mmio_req ioreq;
- struct kvm_pal_call pal_data;
- struct kvm_sal_call sal_data;
- struct kvm_switch_rr6 rr_data;
- struct kvm_ipi_data ipi_data;
- struct kvm_ptc_g ptc_g_data;
- } u;
-};
-
-union pte_flags {
- unsigned long val;
- struct {
- unsigned long p : 1; /*0 */
- unsigned long : 1; /* 1 */
- unsigned long ma : 3; /* 2-4 */
- unsigned long a : 1; /* 5 */
- unsigned long d : 1; /* 6 */
- unsigned long pl : 2; /* 7-8 */
- unsigned long ar : 3; /* 9-11 */
- unsigned long ppn : 38; /* 12-49 */
- unsigned long : 2; /* 50-51 */
- unsigned long ed : 1; /* 52 */
- };
-};
-
-union ia64_pta {
- unsigned long val;
- struct {
- unsigned long ve : 1;
- unsigned long reserved0 : 1;
- unsigned long size : 6;
- unsigned long vf : 1;
- unsigned long reserved1 : 6;
- unsigned long base : 49;
- };
-};
-
-struct thash_cb {
- /* THASH base information */
- struct thash_data *hash; /* hash table pointer */
- union ia64_pta pta;
- int num;
-};
-
-struct kvm_vcpu_stat {
- u32 halt_wakeup;
-};
-
-struct kvm_vcpu_arch {
- int launched;
- int last_exit;
- int last_run_cpu;
- int vmm_tr_slot;
- int vm_tr_slot;
- int sn_rtc_tr_slot;
-
-#define KVM_MP_STATE_RUNNABLE 0
-#define KVM_MP_STATE_UNINITIALIZED 1
-#define KVM_MP_STATE_INIT_RECEIVED 2
-#define KVM_MP_STATE_HALTED 3
- int mp_state;
-
-#define MAX_PTC_G_NUM 3
- int ptc_g_count;
- struct kvm_ptc_g ptc_g_data[MAX_PTC_G_NUM];
-
- /*halt timer to wake up sleepy vcpus*/
- struct hrtimer hlt_timer;
- long ht_active;
-
- struct kvm_lapic *apic; /* kernel irqchip context */
- struct vpd *vpd;
-
- /* Exit data for vmm_transition*/
- struct exit_ctl_data exit_data;
-
- cpumask_t cache_coherent_map;
-
- unsigned long vmm_rr;
- unsigned long host_rr6;
- unsigned long psbits[8];
- unsigned long cr_iipa;
- unsigned long cr_isr;
- unsigned long vsa_base;
- unsigned long dirty_log_lock_pa;
- unsigned long __gp;
- /* TR and TC. */
- struct thash_data itrs[NITRS];
- struct thash_data dtrs[NDTRS];
- /* Bit is set if there is a tr/tc for the region. */
- unsigned char itr_regions;
- unsigned char dtr_regions;
- unsigned char tc_regions;
- /* purge all */
- unsigned long ptce_base;
- unsigned long ptce_count[2];
- unsigned long ptce_stride[2];
- /* itc/itm */
- unsigned long last_itc;
- long itc_offset;
- unsigned long itc_check;
- unsigned long timer_check;
- unsigned int timer_pending;
- unsigned int timer_fired;
-
- unsigned long vrr[8];
- unsigned long ibr[8];
- unsigned long dbr[8];
- unsigned long insvc[4]; /* Interrupt in service. */
- unsigned long xtp;
-
- unsigned long metaphysical_rr0; /* from kvm_arch (so is pinned) */
- unsigned long metaphysical_rr4; /* from kvm_arch (so is pinned) */
- unsigned long metaphysical_saved_rr0; /* from kvm_arch */
- unsigned long metaphysical_saved_rr4; /* from kvm_arch */
- unsigned long fp_psr; /*used for lazy float register */
- unsigned long saved_gp;
- /*for phycial emulation */
- int mode_flags;
- struct thash_cb vtlb;
- struct thash_cb vhpt;
- char irq_check;
- char irq_new_pending;
-
- unsigned long opcode;
- unsigned long cause;
- char log_buf[VMM_LOG_LEN];
- union context host;
- union context guest;
-
- char mmio_data[8];
-};
-
-struct kvm_vm_stat {
- u64 remote_tlb_flush;
-};
-
-struct kvm_sal_data {
- unsigned long boot_ip;
- unsigned long boot_gp;
-};
-
-struct kvm_arch_memory_slot {
-};
-
-struct kvm_arch {
- spinlock_t dirty_log_lock;
-
- unsigned long vm_base;
- unsigned long metaphysical_rr0;
- unsigned long metaphysical_rr4;
- unsigned long vmm_init_rr;
-
- int is_sn2;
-
- struct kvm_ioapic *vioapic;
- struct kvm_vm_stat stat;
- struct kvm_sal_data rdv_sal_data;
-
- struct list_head assigned_dev_head;
- struct iommu_domain *iommu_domain;
- bool iommu_noncoherent;
-
- unsigned long irq_sources_bitmap;
- unsigned long irq_states[KVM_IOAPIC_NUM_PINS];
-};
-
-union cpuid3_t {
- u64 value;
- struct {
- u64 number : 8;
- u64 revision : 8;
- u64 model : 8;
- u64 family : 8;
- u64 archrev : 8;
- u64 rv : 24;
- };
-};
-
-struct kvm_pt_regs {
- /* The following registers are saved by SAVE_MIN: */
- unsigned long b6; /* scratch */
- unsigned long b7; /* scratch */
-
- unsigned long ar_csd; /* used by cmp8xchg16 (scratch) */
- unsigned long ar_ssd; /* reserved for future use (scratch) */
-
- unsigned long r8; /* scratch (return value register 0) */
- unsigned long r9; /* scratch (return value register 1) */
- unsigned long r10; /* scratch (return value register 2) */
- unsigned long r11; /* scratch (return value register 3) */
-
- unsigned long cr_ipsr; /* interrupted task's psr */
- unsigned long cr_iip; /* interrupted task's instruction pointer */
- unsigned long cr_ifs; /* interrupted task's function state */
-
- unsigned long ar_unat; /* interrupted task's NaT register (preserved) */
- unsigned long ar_pfs; /* prev function state */
- unsigned long ar_rsc; /* RSE configuration */
- /* The following two are valid only if cr_ipsr.cpl > 0: */
- unsigned long ar_rnat; /* RSE NaT */
- unsigned long ar_bspstore; /* RSE bspstore */
-
- unsigned long pr; /* 64 predicate registers (1 bit each) */
- unsigned long b0; /* return pointer (bp) */
- unsigned long loadrs; /* size of dirty partition << 16 */
-
- unsigned long r1; /* the gp pointer */
- unsigned long r12; /* interrupted task's memory stack pointer */
- unsigned long r13; /* thread pointer */
-
- unsigned long ar_fpsr; /* floating point status (preserved) */
- unsigned long r15; /* scratch */
-
- /* The remaining registers are NOT saved for system calls. */
- unsigned long r14; /* scratch */
- unsigned long r2; /* scratch */
- unsigned long r3; /* scratch */
- unsigned long r16; /* scratch */
- unsigned long r17; /* scratch */
- unsigned long r18; /* scratch */
- unsigned long r19; /* scratch */
- unsigned long r20; /* scratch */
- unsigned long r21; /* scratch */
- unsigned long r22; /* scratch */
- unsigned long r23; /* scratch */
- unsigned long r24; /* scratch */
- unsigned long r25; /* scratch */
- unsigned long r26; /* scratch */
- unsigned long r27; /* scratch */
- unsigned long r28; /* scratch */
- unsigned long r29; /* scratch */
- unsigned long r30; /* scratch */
- unsigned long r31; /* scratch */
- unsigned long ar_ccv; /* compare/exchange value (scratch) */
-
- /*
- * Floating point registers that the kernel considers scratch:
- */
- struct ia64_fpreg f6; /* scratch */
- struct ia64_fpreg f7; /* scratch */
- struct ia64_fpreg f8; /* scratch */
- struct ia64_fpreg f9; /* scratch */
- struct ia64_fpreg f10; /* scratch */
- struct ia64_fpreg f11; /* scratch */
-
- unsigned long r4; /* preserved */
- unsigned long r5; /* preserved */
- unsigned long r6; /* preserved */
- unsigned long r7; /* preserved */
- unsigned long eml_unat; /* used for emulating instruction */
- unsigned long pad0; /* alignment pad */
-};
-
-static inline struct kvm_pt_regs *vcpu_regs(struct kvm_vcpu *v)
-{
- return (struct kvm_pt_regs *) ((unsigned long) v + KVM_STK_OFFSET) - 1;
-}
-
-typedef int kvm_vmm_entry(void);
-typedef void kvm_tramp_entry(union context *host, union context *guest);
-
-struct kvm_vmm_info{
- struct module *module;
- kvm_vmm_entry *vmm_entry;
- kvm_tramp_entry *tramp_entry;
- unsigned long vmm_ivt;
- unsigned long patch_mov_ar;
- unsigned long patch_mov_ar_sn2;
-};
-
-int kvm_highest_pending_irq(struct kvm_vcpu *vcpu);
-int kvm_emulate_halt(struct kvm_vcpu *vcpu);
-int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
-void kvm_sal_emul(struct kvm_vcpu *vcpu);
-
-#define __KVM_HAVE_ARCH_VM_ALLOC 1
-struct kvm *kvm_arch_alloc_vm(void);
-void kvm_arch_free_vm(struct kvm *kvm);
-
-static inline void kvm_arch_sync_events(struct kvm *kvm) {}
-static inline void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) {}
-static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu) {}
-static inline void kvm_arch_free_memslot(struct kvm *kvm,
- struct kvm_memory_slot *free, struct kvm_memory_slot *dont) {}
-static inline void kvm_arch_memslots_updated(struct kvm *kvm) {}
-static inline void kvm_arch_commit_memory_region(struct kvm *kvm,
- struct kvm_userspace_memory_region *mem,
- const struct kvm_memory_slot *old,
- enum kvm_mr_change change) {}
-static inline void kvm_arch_hardware_unsetup(void) {}
-
-#endif /* __ASSEMBLY__*/
-
-#endif
diff --git a/arch/ia64/include/asm/percpu.h b/arch/ia64/include/asm/percpu.h
index 14aa1c58912..0ec484d2dcb 100644
--- a/arch/ia64/include/asm/percpu.h
+++ b/arch/ia64/include/asm/percpu.h
@@ -35,8 +35,8 @@ extern void *per_cpu_init(void);
/*
* Be extremely careful when taking the address of this variable! Due to virtual
- * remapping, it is different from the canonical address returned by __get_cpu_var(var)!
- * On the positive side, using __ia64_per_cpu_var() instead of __get_cpu_var() is slightly
+ * remapping, it is different from the canonical address returned by this_cpu_ptr(&var)!
+ * On the positive side, using __ia64_per_cpu_var() instead of this_cpu_ptr() is slightly
* more efficient.
*/
#define __ia64_per_cpu_var(var) (*({ \
diff --git a/arch/ia64/include/asm/pvclock-abi.h b/arch/ia64/include/asm/pvclock-abi.h
deleted file mode 100644
index 42b233bedeb..00000000000
--- a/arch/ia64/include/asm/pvclock-abi.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * same structure to x86's
- * Hopefully asm-x86/pvclock-abi.h would be moved to somewhere more generic.
- * For now, define same duplicated definitions.
- */
-
-#ifndef _ASM_IA64__PVCLOCK_ABI_H
-#define _ASM_IA64__PVCLOCK_ABI_H
-#ifndef __ASSEMBLY__
-
-/*
- * These structs MUST NOT be changed.
- * They are the ABI between hypervisor and guest OS.
- * KVM is using this.
- *
- * pvclock_vcpu_time_info holds the system time and the tsc timestamp
- * of the last update. So the guest can use the tsc delta to get a
- * more precise system time. There is one per virtual cpu.
- *
- * pvclock_wall_clock references the point in time when the system
- * time was zero (usually boot time), thus the guest calculates the
- * current wall clock by adding the system time.
- *
- * Protocol for the "version" fields is: hypervisor raises it (making
- * it uneven) before it starts updating the fields and raises it again
- * (making it even) when it is done. Thus the guest can make sure the
- * time values it got are consistent by checking the version before
- * and after reading them.
- */
-
-struct pvclock_vcpu_time_info {
- u32 version;
- u32 pad0;
- u64 tsc_timestamp;
- u64 system_time;
- u32 tsc_to_system_mul;
- s8 tsc_shift;
- u8 pad[3];
-} __attribute__((__packed__)); /* 32 bytes */
-
-struct pvclock_wall_clock {
- u32 version;
- u32 sec;
- u32 nsec;
-} __attribute__((__packed__));
-
-#endif /* __ASSEMBLY__ */
-#endif /* _ASM_IA64__PVCLOCK_ABI_H */
diff --git a/arch/ia64/include/uapi/asm/kvm.h b/arch/ia64/include/uapi/asm/kvm.h
deleted file mode 100644
index 99503c28440..00000000000
--- a/arch/ia64/include/uapi/asm/kvm.h
+++ /dev/null
@@ -1,268 +0,0 @@
-#ifndef __ASM_IA64_KVM_H
-#define __ASM_IA64_KVM_H
-
-/*
- * kvm structure definitions for ia64
- *
- * Copyright (C) 2007 Xiantao Zhang <xiantao.zhang@intel.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- */
-
-#include <linux/types.h>
-#include <linux/ioctl.h>
-
-/* Select x86 specific features in <linux/kvm.h> */
-#define __KVM_HAVE_IOAPIC
-#define __KVM_HAVE_IRQ_LINE
-
-/* Architectural interrupt line count. */
-#define KVM_NR_INTERRUPTS 256
-
-#define KVM_IOAPIC_NUM_PINS 48
-
-struct kvm_ioapic_state {
- __u64 base_address;
- __u32 ioregsel;
- __u32 id;
- __u32 irr;
- __u32 pad;
- union {
- __u64 bits;
- struct {
- __u8 vector;
- __u8 delivery_mode:3;
- __u8 dest_mode:1;
- __u8 delivery_status:1;
- __u8 polarity:1;
- __u8 remote_irr:1;
- __u8 trig_mode:1;
- __u8 mask:1;
- __u8 reserve:7;
- __u8 reserved[4];
- __u8 dest_id;
- } fields;
- } redirtbl[KVM_IOAPIC_NUM_PINS];
-};
-
-#define KVM_IRQCHIP_PIC_MASTER 0
-#define KVM_IRQCHIP_PIC_SLAVE 1
-#define KVM_IRQCHIP_IOAPIC 2
-#define KVM_NR_IRQCHIPS 3
-
-#define KVM_CONTEXT_SIZE 8*1024
-
-struct kvm_fpreg {
- union {
- unsigned long bits[2];
- long double __dummy; /* force 16-byte alignment */
- } u;
-};
-
-union context {
- /* 8K size */
- char dummy[KVM_CONTEXT_SIZE];
- struct {
- unsigned long psr;
- unsigned long pr;
- unsigned long caller_unat;
- unsigned long pad;
- unsigned long gr[32];
- unsigned long ar[128];
- unsigned long br[8];
- unsigned long cr[128];
- unsigned long rr[8];
- unsigned long ibr[8];
- unsigned long dbr[8];
- unsigned long pkr[8];
- struct kvm_fpreg fr[128];
- };
-};
-
-struct thash_data {
- union {
- struct {
- unsigned long p : 1; /* 0 */
- unsigned long rv1 : 1; /* 1 */
- unsigned long ma : 3; /* 2-4 */
- unsigned long a : 1; /* 5 */
- unsigned long d : 1; /* 6 */
- unsigned long pl : 2; /* 7-8 */
- unsigned long ar : 3; /* 9-11 */
- unsigned long ppn : 38; /* 12-49 */
- unsigned long rv2 : 2; /* 50-51 */
- unsigned long ed : 1; /* 52 */
- unsigned long ig1 : 11; /* 53-63 */
- };
- struct {
- unsigned long __rv1 : 53; /* 0-52 */
- unsigned long contiguous : 1; /*53 */
- unsigned long tc : 1; /* 54 TR or TC */
- unsigned long cl : 1;
- /* 55 I side or D side cache line */
- unsigned long len : 4; /* 56-59 */
- unsigned long io : 1; /* 60 entry is for io or not */
- unsigned long nomap : 1;
- /* 61 entry cann't be inserted into machine TLB.*/
- unsigned long checked : 1;
- /* 62 for VTLB/VHPT sanity check */
- unsigned long invalid : 1;
- /* 63 invalid entry */
- };
- unsigned long page_flags;
- }; /* same for VHPT and TLB */
-
- union {
- struct {
- unsigned long rv3 : 2;
- unsigned long ps : 6;
- unsigned long key : 24;
- unsigned long rv4 : 32;
- };
- unsigned long itir;
- };
- union {
- struct {
- unsigned long ig2 : 12;
- unsigned long vpn : 49;
- unsigned long vrn : 3;
- };
- unsigned long ifa;
- unsigned long vadr;
- struct {
- unsigned long tag : 63;
- unsigned long ti : 1;
- };
- unsigned long etag;
- };
- union {
- struct thash_data *next;
- unsigned long rid;
- unsigned long gpaddr;
- };
-};
-
-#define NITRS 8
-#define NDTRS 8
-
-struct saved_vpd {
- unsigned long vhpi;
- unsigned long vgr[16];
- unsigned long vbgr[16];
- unsigned long vnat;
- unsigned long vbnat;
- unsigned long vcpuid[5];
- unsigned long vpsr;
- unsigned long vpr;
- union {
- unsigned long vcr[128];
- struct {
- unsigned long dcr;
- unsigned long itm;
- unsigned long iva;
- unsigned long rsv1[5];
- unsigned long pta;
- unsigned long rsv2[7];
- unsigned long ipsr;
- unsigned long isr;
- unsigned long rsv3;
- unsigned long iip;
- unsigned long ifa;
- unsigned long itir;
- unsigned long iipa;
- unsigned long ifs;
- unsigned long iim;
- unsigned long iha;
- unsigned long rsv4[38];
- unsigned long lid;
- unsigned long ivr;
- unsigned long tpr;
- unsigned long eoi;
- unsigned long irr[4];
- unsigned long itv;
- unsigned long pmv;
- unsigned long cmcv;
- unsigned long rsv5[5];
- unsigned long lrr0;
- unsigned long lrr1;
- unsigned long rsv6[46];
- };
- };
-};
-
-struct kvm_regs {
- struct saved_vpd vpd;
- /*Arch-regs*/
- int mp_state;
- unsigned long vmm_rr;
- /* TR and TC. */
- struct thash_data itrs[NITRS];
- struct thash_data dtrs[NDTRS];
- /* Bit is set if there is a tr/tc for the region. */
- unsigned char itr_regions;
- unsigned char dtr_regions;
- unsigned char tc_regions;
-
- char irq_check;
- unsigned long saved_itc;
- unsigned long itc_check;
- unsigned long timer_check;
- unsigned long timer_pending;
- unsigned long last_itc;
-
- unsigned long vrr[8];
- unsigned long ibr[8];
- unsigned long dbr[8];
- unsigned long insvc[4]; /* Interrupt in service. */
- unsigned long xtp;
-
- unsigned long metaphysical_rr0; /* from kvm_arch (so is pinned) */
- unsigned long metaphysical_rr4; /* from kvm_arch (so is pinned) */
- unsigned long metaphysical_saved_rr0; /* from kvm_arch */
- unsigned long metaphysical_saved_rr4; /* from kvm_arch */
- unsigned long fp_psr; /*used for lazy float register */
- unsigned long saved_gp;
- /*for phycial emulation */
-
- union context saved_guest;
-
- unsigned long reserved[64]; /* for future use */
-};
-
-struct kvm_sregs {
-};
-
-struct kvm_fpu {
-};
-
-#define KVM_IA64_VCPU_STACK_SHIFT 16
-#define KVM_IA64_VCPU_STACK_SIZE (1UL << KVM_IA64_VCPU_STACK_SHIFT)
-
-struct kvm_ia64_vcpu_stack {
- unsigned char stack[KVM_IA64_VCPU_STACK_SIZE];
-};
-
-struct kvm_debug_exit_arch {
-};
-
-/* for KVM_SET_GUEST_DEBUG */
-struct kvm_guest_debug_arch {
-};
-
-/* definition of registers in kvm_run */
-struct kvm_sync_regs {
-};
-
-#endif
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index dc063fe6646..5f4243f0acf 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -2145,22 +2145,12 @@ doit:
return 0;
}
-static int
-pfm_no_open(struct inode *irrelevant, struct file *dontcare)
-{
- DPRINT(("pfm_no_open called\n"));
- return -ENXIO;
-}
-
-
-
static const struct file_operations pfm_file_ops = {
.llseek = no_llseek,
.read = pfm_read,
.write = pfm_write,
.poll = pfm_poll,
.unlocked_ioctl = pfm_ioctl,
- .open = pfm_no_open, /* special open code to disallow open via /proc */
.fasync = pfm_fasync,
.release = pfm_close,
.flush = pfm_flush
diff --git a/arch/ia64/kvm/Kconfig b/arch/ia64/kvm/Kconfig
deleted file mode 100644
index 3d50ea955c4..00000000000
--- a/arch/ia64/kvm/Kconfig
+++ /dev/null
@@ -1,66 +0,0 @@
-#
-# KVM configuration
-#
-
-source "virt/kvm/Kconfig"
-
-menuconfig VIRTUALIZATION
- bool "Virtualization"
- depends on HAVE_KVM || IA64
- default y
- ---help---
- Say Y here to get to see options for using your Linux host to run other
- operating systems inside virtual machines (guests).
- This option alone does not add any kernel code.
-
- If you say N, all options in this submenu will be skipped and disabled.
-
-if VIRTUALIZATION
-
-config KVM
- tristate "Kernel-based Virtual Machine (KVM) support"
- depends on BROKEN
- depends on HAVE_KVM && MODULES
- depends on BROKEN
- select PREEMPT_NOTIFIERS
- select ANON_INODES
- select HAVE_KVM_IRQCHIP
- select HAVE_KVM_IRQFD
- select HAVE_KVM_IRQ_ROUTING
- select KVM_APIC_ARCHITECTURE
- select KVM_MMIO
- ---help---
- Support hosting fully virtualized guest machines using hardware
- virtualization extensions. You will need a fairly recent
- processor equipped with virtualization extensions. You will also
- need to select one or more of the processor modules below.
-
- This module provides access to the hardware capabilities through
- a character device node named /dev/kvm.
-
- To compile this as a module, choose M here: the module
- will be called kvm.
-
- If unsure, say N.
-
-config KVM_INTEL
- tristate "KVM for Intel Itanium 2 processors support"
- depends on KVM && m
- ---help---
- Provides support for KVM on Itanium 2 processors equipped with the VT
- extensions.
-
-config KVM_DEVICE_ASSIGNMENT
- bool "KVM legacy PCI device assignment support"
- depends on KVM && PCI && IOMMU_API
- default y
- ---help---
- Provide support for legacy PCI device assignment through KVM. The
- kernel now also supports a full featured userspace device driver
- framework through VFIO, which supersedes much of this support.
-
- If unsure, say Y.
-
-source drivers/vhost/Kconfig
-
-endif # VIRTUALIZATION
diff --git a/arch/ia64/kvm/Makefile b/arch/ia64/kvm/Makefile
deleted file mode 100644
index 18e45ec49bb..00000000000
--- a/arch/ia64/kvm/Makefile
+++ /dev/null
@@ -1,67 +0,0 @@
-#This Make file is to generate asm-offsets.h and build source.
-#
-
-#Generate asm-offsets.h for vmm module build
-offsets-file := asm-offsets.h
-
-always := $(offsets-file)
-targets := $(offsets-file)
-targets += arch/ia64/kvm/asm-offsets.s
-
-# Default sed regexp - multiline due to syntax constraints
-define sed-y
- "/^->/{s:^->\([^ ]*\) [\$$#]*\([^ ]*\) \(.*\):#define \1 \2 /* \3 */:; s:->::; p;}"
-endef
-
-quiet_cmd_offsets = GEN $@
-define cmd_offsets
- (set -e; \
- echo "#ifndef __ASM_KVM_OFFSETS_H__"; \
- echo "#define __ASM_KVM_OFFSETS_H__"; \
- echo "/*"; \
- echo " * DO NOT MODIFY."; \
- echo " *"; \
- echo " * This file was generated by Makefile"; \
- echo " *"; \
- echo " */"; \
- echo ""; \
- sed -ne $(sed-y) $<; \
- echo ""; \
- echo "#endif" ) > $@
-endef
-
-# We use internal rules to avoid the "is up to date" message from make
-arch/ia64/kvm/asm-offsets.s: arch/ia64/kvm/asm-offsets.c \
- $(wildcard $(srctree)/arch/ia64/include/asm/*.h)\
- $(wildcard $(srctree)/include/linux/*.h)
- $(call if_changed_dep,cc_s_c)
-
-$(obj)/$(offsets-file): arch/ia64/kvm/asm-offsets.s
- $(call cmd,offsets)
-
-FORCE : $(obj)/$(offsets-file)
-
-#
-# Makefile for Kernel-based Virtual Machine module
-#
-
-ccflags-y := -Ivirt/kvm -Iarch/ia64/kvm/
-asflags-y := -Ivirt/kvm -Iarch/ia64/kvm/
-KVM := ../../../virt/kvm
-
-common-objs = $(KVM)/kvm_main.o $(KVM)/ioapic.o \
- $(KVM)/coalesced_mmio.o $(KVM)/irq_comm.o
-
-ifeq ($(CONFIG_KVM_DEVICE_ASSIGNMENT),y)
-common-objs += $(KVM)/assigned-dev.o $(KVM)/iommu.o
-endif
-
-kvm-objs := $(common-objs) kvm-ia64.o kvm_fw.o
-obj-$(CONFIG_KVM) += kvm.o
-
-CFLAGS_vcpu.o += -mfixed-range=f2-f5,f12-f127
-kvm-intel-objs = vmm.o vmm_ivt.o trampoline.o vcpu.o optvfault.o mmio.o \
- vtlb.o process.o kvm_lib.o
-#Add link memcpy and memset to avoid possible structure assignment error
-kvm-intel-objs += memcpy.o memset.o
-obj-$(CONFIG_KVM_INTEL) += kvm-intel.o
diff --git a/arch/ia64/kvm/asm-offsets.c b/arch/ia64/kvm/asm-offsets.c
deleted file mode 100644
index 9324c875caf..00000000000
--- a/arch/ia64/kvm/asm-offsets.c
+++ /dev/null
@@ -1,241 +0,0 @@
-/*
- * asm-offsets.c Generate definitions needed by assembly language modules.
- * This code generates raw asm output which is post-processed
- * to extract and format the required data.
- *
- * Anthony Xu <anthony.xu@intel.com>
- * Xiantao Zhang <xiantao.zhang@intel.com>
- * Copyright (c) 2007 Intel Corporation KVM support.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- */
-
-#include <linux/kvm_host.h>
-#include <linux/kbuild.h>
-
-#include "vcpu.h"
-
-void foo(void)
-{
- DEFINE(VMM_TASK_SIZE, sizeof(struct kvm_vcpu));
- DEFINE(VMM_PT_REGS_SIZE, sizeof(struct kvm_pt_regs));
-
- BLANK();
-
- DEFINE(VMM_VCPU_META_RR0_OFFSET,
- offsetof(struct kvm_vcpu, arch.metaphysical_rr0));
- DEFINE(VMM_VCPU_META_SAVED_RR0_OFFSET,
- offsetof(struct kvm_vcpu,
- arch.metaphysical_saved_rr0));
- DEFINE(VMM_VCPU_VRR0_OFFSET,
- offsetof(struct kvm_vcpu, arch.vrr[0]));
- DEFINE(VMM_VPD_IRR0_OFFSET,
- offsetof(struct vpd, irr[0]));
- DEFINE(VMM_VCPU_ITC_CHECK_OFFSET,
- offsetof(struct kvm_vcpu, arch.itc_check));
- DEFINE(VMM_VCPU_IRQ_CHECK_OFFSET,
- offsetof(struct kvm_vcpu, arch.irq_check));
- DEFINE(VMM_VPD_VHPI_OFFSET,
- offsetof(struct vpd, vhpi));
- DEFINE(VMM_VCPU_VSA_BASE_OFFSET,
- offsetof(struct kvm_vcpu, arch.vsa_base));
- DEFINE(VMM_VCPU_VPD_OFFSET,
- offsetof(struct kvm_vcpu, arch.vpd));
- DEFINE(VMM_VCPU_IRQ_CHECK,
- offsetof(struct kvm_vcpu, arch.irq_check));
- DEFINE(VMM_VCPU_TIMER_PENDING,
- offsetof(struct kvm_vcpu, arch.timer_pending));
- DEFINE(VMM_VCPU_META_SAVED_RR0_OFFSET,
- offsetof(struct kvm_vcpu, arch.metaphysical_saved_rr0));
- DEFINE(VMM_VCPU_MODE_FLAGS_OFFSET,
- offsetof(struct kvm_vcpu, arch.mode_flags));
- DEFINE(VMM_VCPU_ITC_OFS_OFFSET,
- offsetof(struct kvm_vcpu, arch.itc_offset));
- DEFINE(VMM_VCPU_LAST_ITC_OFFSET,
- offsetof(struct kvm_vcpu, arch.last_itc));
- DEFINE(VMM_VCPU_SAVED_GP_OFFSET,
- offsetof(struct kvm_vcpu, arch.saved_gp));
-
- BLANK();
-
- DEFINE(VMM_PT_REGS_B6_OFFSET,
- offsetof(struct kvm_pt_regs, b6));
- DEFINE(VMM_PT_REGS_B7_OFFSET,
- offsetof(struct kvm_pt_regs, b7));
- DEFINE(VMM_PT_REGS_AR_CSD_OFFSET,
- offsetof(struct kvm_pt_regs, ar_csd));
- DEFINE(VMM_PT_REGS_AR_SSD_OFFSET,
- offsetof(struct kvm_pt_regs, ar_ssd));
- DEFINE(VMM_PT_REGS_R8_OFFSET,
- offsetof(struct kvm_pt_regs, r8));
- DEFINE(VMM_PT_REGS_R9_OFFSET,
- offsetof(struct kvm_pt_regs, r9));
- DEFINE(VMM_PT_REGS_R10_OFFSET,
- offsetof(struct kvm_pt_regs, r10));
- DEFINE(VMM_PT_REGS_R11_OFFSET,
- offsetof(struct kvm_pt_regs, r11));
- DEFINE(VMM_PT_REGS_CR_IPSR_OFFSET,
- offsetof(struct kvm_pt_regs, cr_ipsr));
- DEFINE(VMM_PT_REGS_CR_IIP_OFFSET,
- offsetof(struct kvm_pt_regs, cr_iip));
- DEFINE(VMM_PT_REGS_CR_IFS_OFFSET,
- offsetof(struct kvm_pt_regs, cr_ifs));
- DEFINE(VMM_PT_REGS_AR_UNAT_OFFSET,
- offsetof(struct kvm_pt_regs, ar_unat));
- DEFINE(VMM_PT_REGS_AR_PFS_OFFSET,
- offsetof(struct kvm_pt_regs, ar_pfs));
- DEFINE(VMM_PT_REGS_AR_RSC_OFFSET,
- offsetof(struct kvm_pt_regs, ar_rsc));
- DEFINE(VMM_PT_REGS_AR_RNAT_OFFSET,
- offsetof(struct kvm_pt_regs, ar_rnat));
-
- DEFINE(VMM_PT_REGS_AR_BSPSTORE_OFFSET,
- offsetof(struct kvm_pt_regs, ar_bspstore));
- DEFINE(VMM_PT_REGS_PR_OFFSET,
- offsetof(struct kvm_pt_regs, pr));
- DEFINE(VMM_PT_REGS_B0_OFFSET,
- offsetof(struct kvm_pt_regs, b0));
- DEFINE(VMM_PT_REGS_LOADRS_OFFSET,
- offsetof(struct kvm_pt_regs, loadrs));
- DEFINE(VMM_PT_REGS_R1_OFFSET,
- offsetof(struct kvm_pt_regs, r1));
- DEFINE(VMM_PT_REGS_R12_OFFSET,
- offsetof(struct kvm_pt_regs, r12));
- DEFINE(VMM_PT_REGS_R13_OFFSET,
- offsetof(struct kvm_pt_regs, r13));
- DEFINE(VMM_PT_REGS_AR_FPSR_OFFSET,
- offsetof(struct kvm_pt_regs, ar_fpsr));
- DEFINE(VMM_PT_REGS_R15_OFFSET,
- offsetof(struct kvm_pt_regs, r15));
- DEFINE(VMM_PT_REGS_R14_OFFSET,
- offsetof(struct kvm_pt_regs, r14));
- DEFINE(VMM_PT_REGS_R2_OFFSET,
- offsetof(struct kvm_pt_regs, r2));
- DEFINE(VMM_PT_REGS_R3_OFFSET,
- offsetof(struct kvm_pt_regs, r3));
- DEFINE(VMM_PT_REGS_R16_OFFSET,
- offsetof(struct kvm_pt_regs, r16));
- DEFINE(VMM_PT_REGS_R17_OFFSET,
- offsetof(struct kvm_pt_regs, r17));
- DEFINE(VMM_PT_REGS_R18_OFFSET,
- offsetof(struct kvm_pt_regs, r18));
- DEFINE(VMM_PT_REGS_R19_OFFSET,
- offsetof(struct kvm_pt_regs, r19));
- DEFINE(VMM_PT_REGS_R20_OFFSET,
- offsetof(struct kvm_pt_regs, r20));
- DEFINE(VMM_PT_REGS_R21_OFFSET,
- offsetof(struct kvm_pt_regs, r21));
- DEFINE(VMM_PT_REGS_R22_OFFSET,
- offsetof(struct kvm_pt_regs, r22));
- DEFINE(VMM_PT_REGS_R23_OFFSET,
- offsetof(struct kvm_pt_regs, r23));
- DEFINE(VMM_PT_REGS_R24_OFFSET,
- offsetof(struct kvm_pt_regs, r24));
- DEFINE(VMM_PT_REGS_R25_OFFSET,
- offsetof(struct kvm_pt_regs, r25));
- DEFINE(VMM_PT_REGS_R26_OFFSET,
- offsetof(struct kvm_pt_regs, r26));
- DEFINE(VMM_PT_REGS_R27_OFFSET,
- offsetof(struct kvm_pt_regs, r27));
- DEFINE(VMM_PT_REGS_R28_OFFSET,
- offsetof(struct kvm_pt_regs, r28));
- DEFINE(VMM_PT_REGS_R29_OFFSET,
- offsetof(struct kvm_pt_regs, r29));
- DEFINE(VMM_PT_REGS_R30_OFFSET,
- offsetof(struct kvm_pt_regs, r30));
- DEFINE(VMM_PT_REGS_R31_OFFSET,
- offsetof(struct kvm_pt_regs, r31));
- DEFINE(VMM_PT_REGS_AR_CCV_OFFSET,
- offsetof(struct kvm_pt_regs, ar_ccv));
- DEFINE(VMM_PT_REGS_F6_OFFSET,
- offsetof(struct kvm_pt_regs, f6));
- DEFINE(VMM_PT_REGS_F7_OFFSET,
- offsetof(struct kvm_pt_regs, f7));
- DEFINE(VMM_PT_REGS_F8_OFFSET,
- offsetof(struct kvm_pt_regs, f8));
- DEFINE(VMM_PT_REGS_F9_OFFSET,
- offsetof(struct kvm_pt_regs, f9));
- DEFINE(VMM_PT_REGS_F10_OFFSET,
- offsetof(struct kvm_pt_regs, f10));
- DEFINE(VMM_PT_REGS_F11_OFFSET,
- offsetof(struct kvm_pt_regs, f11));
- DEFINE(VMM_PT_REGS_R4_OFFSET,
- offsetof(struct kvm_pt_regs, r4));
- DEFINE(VMM_PT_REGS_R5_OFFSET,
- offsetof(struct kvm_pt_regs, r5));
- DEFINE(VMM_PT_REGS_R6_OFFSET,
- offsetof(struct kvm_pt_regs, r6));
- DEFINE(VMM_PT_REGS_R7_OFFSET,
- offsetof(struct kvm_pt_regs, r7));
- DEFINE(VMM_PT_REGS_EML_UNAT_OFFSET,
- offsetof(struct kvm_pt_regs, eml_unat));
- DEFINE(VMM_VCPU_IIPA_OFFSET,
- offsetof(struct kvm_vcpu, arch.cr_iipa));
- DEFINE(VMM_VCPU_OPCODE_OFFSET,
- offsetof(struct kvm_vcpu, arch.opcode));
- DEFINE(VMM_VCPU_CAUSE_OFFSET, offsetof(struct kvm_vcpu, arch.cause));
- DEFINE(VMM_VCPU_ISR_OFFSET,
- offsetof(struct kvm_vcpu, arch.cr_isr));
- DEFINE(VMM_PT_REGS_R16_SLOT,
- (((offsetof(struct kvm_pt_regs, r16)
- - sizeof(struct kvm_pt_regs)) >> 3) & 0x3f));
- DEFINE(VMM_VCPU_MODE_FLAGS_OFFSET,
- offsetof(struct kvm_vcpu, arch.mode_flags));
- DEFINE(VMM_VCPU_GP_OFFSET, offsetof(struct kvm_vcpu, arch.__gp));
- BLANK();
-
- DEFINE(VMM_VPD_BASE_OFFSET, offsetof(struct kvm_vcpu, arch.vpd));
- DEFINE(VMM_VPD_VIFS_OFFSET, offsetof(struct vpd, ifs));
- DEFINE(VMM_VLSAPIC_INSVC_BASE_OFFSET,
- offsetof(struct kvm_vcpu, arch.insvc[0]));
- DEFINE(VMM_VPD_VPTA_OFFSET, offsetof(struct vpd, pta));
- DEFINE(VMM_VPD_VPSR_OFFSET, offsetof(struct vpd, vpsr));
-
- DEFINE(VMM_CTX_R4_OFFSET, offsetof(union context, gr[4]));
- DEFINE(VMM_CTX_R5_OFFSET, offsetof(union context, gr[5]));
- DEFINE(VMM_CTX_R12_OFFSET, offsetof(union context, gr[12]));
- DEFINE(VMM_CTX_R13_OFFSET, offsetof(union context, gr[13]));
- DEFINE(VMM_CTX_KR0_OFFSET, offsetof(union context, ar[0]));
- DEFINE(VMM_CTX_KR1_OFFSET, offsetof(union context, ar[1]));
- DEFINE(VMM_CTX_B0_OFFSET, offsetof(union context, br[0]));
- DEFINE(VMM_CTX_B1_OFFSET, offsetof(union context, br[1]));
- DEFINE(VMM_CTX_B2_OFFSET, offsetof(union context, br[2]));
- DEFINE(VMM_CTX_RR0_OFFSET, offsetof(union context, rr[0]));
- DEFINE(VMM_CTX_RSC_OFFSET, offsetof(union context, ar[16]));
- DEFINE(VMM_CTX_BSPSTORE_OFFSET, offsetof(union context, ar[18]));
- DEFINE(VMM_CTX_RNAT_OFFSET, offsetof(union context, ar[19]));
- DEFINE(VMM_CTX_FCR_OFFSET, offsetof(union context, ar[21]));
- DEFINE(VMM_CTX_EFLAG_OFFSET, offsetof(union context, ar[24]));
- DEFINE(VMM_CTX_CFLG_OFFSET, offsetof(union context, ar[27]));
- DEFINE(VMM_CTX_FSR_OFFSET, offsetof(union context, ar[28]));
- DEFINE(VMM_CTX_FIR_OFFSET, offsetof(union context, ar[29]));
- DEFINE(VMM_CTX_FDR_OFFSET, offsetof(union context, ar[30]));
- DEFINE(VMM_CTX_UNAT_OFFSET, offsetof(union context, ar[36]));
- DEFINE(VMM_CTX_FPSR_OFFSET, offsetof(union context, ar[40]));
- DEFINE(VMM_CTX_PFS_OFFSET, offsetof(union context, ar[64]));
- DEFINE(VMM_CTX_LC_OFFSET, offsetof(union context, ar[65]));
- DEFINE(VMM_CTX_DCR_OFFSET, offsetof(union context, cr[0]));
- DEFINE(VMM_CTX_IVA_OFFSET, offsetof(union context, cr[2]));
- DEFINE(VMM_CTX_PTA_OFFSET, offsetof(union context, cr[8]));
- DEFINE(VMM_CTX_IBR0_OFFSET, offsetof(union context, ibr[0]));
- DEFINE(VMM_CTX_DBR0_OFFSET, offsetof(union context, dbr[0]));
- DEFINE(VMM_CTX_F2_OFFSET, offsetof(union context, fr[2]));
- DEFINE(VMM_CTX_F3_OFFSET, offsetof(union context, fr[3]));
- DEFINE(VMM_CTX_F32_OFFSET, offsetof(union context, fr[32]));
- DEFINE(VMM_CTX_F33_OFFSET, offsetof(union context, fr[33]));
- DEFINE(VMM_CTX_PKR0_OFFSET, offsetof(union context, pkr[0]));
- DEFINE(VMM_CTX_PSR_OFFSET, offsetof(union context, psr));
- BLANK();
-}
diff --git a/arch/ia64/kvm/irq.h b/arch/ia64/kvm/irq.h
deleted file mode 100644
index c0785a72827..00000000000
--- a/arch/ia64/kvm/irq.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * irq.h: In-kernel interrupt controller related definitions
- * Copyright (c) 2008, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- * Authors:
- * Xiantao Zhang <xiantao.zhang@intel.com>
- *
- */
-
-#ifndef __IRQ_H
-#define __IRQ_H
-
-#include "lapic.h"
-
-static inline int irqchip_in_kernel(struct kvm *kvm)
-{
- return 1;
-}
-
-#endif
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
deleted file mode 100644
index dbe46f43884..00000000000
--- a/arch/ia64/kvm/kvm-ia64.c
+++ /dev/null
@@ -1,1942 +0,0 @@
-/*
- * kvm_ia64.c: Basic KVM support On Itanium series processors
- *
- *
- * Copyright (C) 2007, Intel Corporation.
- * Xiantao Zhang (xiantao.zhang@intel.com)
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- */
-
-#include <linux/module.h>
-#include <linux/errno.h>
-#include <linux/percpu.h>
-#include <linux/fs.h>
-#include <linux/slab.h>
-#include <linux/smp.h>
-#include <linux/kvm_host.h>
-#include <linux/kvm.h>
-#include <linux/bitops.h>
-#include <linux/hrtimer.h>
-#include <linux/uaccess.h>
-#include <linux/iommu.h>
-#include <linux/intel-iommu.h>
-#include <linux/pci.h>
-
-#include <asm/pgtable.h>
-#include <asm/gcc_intrin.h>
-#include <asm/pal.h>
-#include <asm/cacheflush.h>
-#include <asm/div64.h>
-#include <asm/tlb.h>
-#include <asm/elf.h>
-#include <asm/sn/addrs.h>
-#include <asm/sn/clksupport.h>
-#include <asm/sn/shub_mmr.h>
-
-#include "misc.h"
-#include "vti.h"
-#include "iodev.h"
-#include "ioapic.h"
-#include "lapic.h"
-#include "irq.h"
-
-static unsigned long kvm_vmm_base;
-static unsigned long kvm_vsa_base;
-static unsigned long kvm_vm_buffer;
-static unsigned long kvm_vm_buffer_size;
-unsigned long kvm_vmm_gp;
-
-static long vp_env_info;
-
-static struct kvm_vmm_info *kvm_vmm_info;
-
-static DEFINE_PER_CPU(struct kvm_vcpu *, last_vcpu);
-
-struct kvm_stats_debugfs_item debugfs_entries[] = {
- { NULL }
-};
-
-static unsigned long kvm_get_itc(struct kvm_vcpu *vcpu)
-{
-#if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC)
- if (vcpu->kvm->arch.is_sn2)
- return rtc_time();
- else
-#endif
- return ia64_getreg(_IA64_REG_AR_ITC);
-}
-
-static void kvm_flush_icache(unsigned long start, unsigned long len)
-{
- int l;
-
- for (l = 0; l < (len + 32); l += 32)
- ia64_fc((void *)(start + l));
-
- ia64_sync_i();
- ia64_srlz_i();
-}
-
-static void kvm_flush_tlb_all(void)
-{
- unsigned long i, j, count0, count1, stride0, stride1, addr;
- long flags;
-
- addr = local_cpu_data->ptce_base;
- count0 = local_cpu_data->ptce_count[0];
- count1 = local_cpu_data->ptce_count[1];
- stride0 = local_cpu_data->ptce_stride[0];
- stride1 = local_cpu_data->ptce_stride[1];
-
- local_irq_save(flags);
- for (i = 0; i < count0; ++i) {
- for (j = 0; j < count1; ++j) {
- ia64_ptce(addr);
- addr += stride1;
- }
- addr += stride0;
- }
- local_irq_restore(flags);
- ia64_srlz_i(); /* srlz.i implies srlz.d */
-}
-
-long ia64_pal_vp_create(u64 *vpd, u64 *host_iva, u64 *opt_handler)
-{
- struct ia64_pal_retval iprv;
-
- PAL_CALL_STK(iprv, PAL_VP_CREATE, (u64)vpd, (u64)host_iva,
- (u64)opt_handler);
-
- return iprv.status;
-}
-
-static DEFINE_SPINLOCK(vp_lock);
-
-int kvm_arch_hardware_enable(void)
-{
- long status;
- long tmp_base;
- unsigned long pte;
- unsigned long saved_psr;
- int slot;
-
- pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base), PAGE_KERNEL));
- local_irq_save(saved_psr);
- slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
- local_irq_restore(saved_psr);
- if (slot < 0)
- return -EINVAL;
-
- spin_lock(&vp_lock);
- status = ia64_pal_vp_init_env(kvm_vsa_base ?
- VP_INIT_ENV : VP_INIT_ENV_INITALIZE,
- __pa(kvm_vm_buffer), KVM_VM_BUFFER_BASE, &tmp_base);
- if (status != 0) {
- spin_unlock(&vp_lock);
- printk(KERN_WARNING"kvm: Failed to Enable VT Support!!!!\n");
- return -EINVAL;
- }
-
- if (!kvm_vsa_base) {
- kvm_vsa_base = tmp_base;
- printk(KERN_INFO"kvm: kvm_vsa_base:0x%lx\n", kvm_vsa_base);
- }
- spin_unlock(&vp_lock);
- ia64_ptr_entry(0x3, slot);
-
- return 0;
-}
-
-void kvm_arch_hardware_disable(void)
-{
-
- long status;
- int slot;
- unsigned long pte;
- unsigned long saved_psr;
- unsigned long host_iva = ia64_getreg(_IA64_REG_CR_IVA);
-
- pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base),
- PAGE_KERNEL));
-
- local_irq_save(saved_psr);
- slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
- local_irq_restore(saved_psr);
- if (slot < 0)
- return;
-
- status = ia64_pal_vp_exit_env(host_iva);
- if (status)
- printk(KERN_DEBUG"kvm: Failed to disable VT support! :%ld\n",
- status);
- ia64_ptr_entry(0x3, slot);
-}
-
-void kvm_arch_check_processor_compat(void *rtn)
-{
- *(int *)rtn = 0;
-}
-
-int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
-{
-
- int r;
-
- switch (ext) {
- case KVM_CAP_IRQCHIP:
- case KVM_CAP_MP_STATE:
- case KVM_CAP_IRQ_INJECT_STATUS:
- case KVM_CAP_IOAPIC_POLARITY_IGNORED:
- r = 1;
- break;
- case KVM_CAP_COALESCED_MMIO:
- r = KVM_COALESCED_MMIO_PAGE_OFFSET;
- break;
-#ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
- case KVM_CAP_IOMMU:
- r = iommu_present(&pci_bus_type);
- break;
-#endif
- default:
- r = 0;
- }
- return r;
-
-}
-
-static int handle_vm_error(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
-{
- kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
- kvm_run->hw.hardware_exit_reason = 1;
- return 0;
-}
-
-static int handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
-{
- struct kvm_mmio_req *p;
- struct kvm_io_device *mmio_dev;
- int r;
-
- p = kvm_get_vcpu_ioreq(vcpu);
-
- if ((p->addr & PAGE_MASK) == IOAPIC_DEFAULT_BASE_ADDRESS)
- goto mmio;
- vcpu->mmio_needed = 1;
- vcpu->mmio_fragments[0].gpa = kvm_run->mmio.phys_addr = p->addr;
- vcpu->mmio_fragments[0].len = kvm_run->mmio.len = p->size;
- vcpu->mmio_is_write = kvm_run->mmio.is_write = !p->dir;
-
- if (vcpu->mmio_is_write)
- memcpy(vcpu->arch.mmio_data, &p->data, p->size);
- memcpy(kvm_run->mmio.data, &p->data, p->size);
- kvm_run->exit_reason = KVM_EXIT_MMIO;
- return 0;
-mmio:
- if (p->dir)
- r = kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, p->addr,
- p->size, &p->data);
- else
- r = kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, p->addr,
- p->size, &p->data);
- if (r)
- printk(KERN_ERR"kvm: No iodevice found! addr:%lx\n", p->addr);
- p->state = STATE_IORESP_READY;
-
- return 1;
-}
-
-static int handle_pal_call(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
-{
- struct exit_ctl_data *p;
-
- p = kvm_get_exit_data(vcpu);
-
- if (p->exit_reason == EXIT_REASON_PAL_CALL)
- return kvm_pal_emul(vcpu, kvm_run);
- else {
- kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
- kvm_run->hw.hardware_exit_reason = 2;
- return 0;
- }
-}
-
-static int handle_sal_call(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
-{
- struct exit_ctl_data *p;
-
- p = kvm_get_exit_data(vcpu);
-
- if (p->exit_reason == EXIT_REASON_SAL_CALL) {
- kvm_sal_emul(vcpu);
- return 1;
- } else {
- kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
- kvm_run->hw.hardware_exit_reason = 3;
- return 0;
- }
-
-}
-
-static int __apic_accept_irq(struct kvm_vcpu *vcpu, uint64_t vector)
-{
- struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
-
- if (!test_and_set_bit(vector, &vpd->irr[0])) {
- vcpu->arch.irq_new_pending = 1;
- kvm_vcpu_kick(vcpu);
- return 1;
- }
- return 0;
-}
-
-/*
- * offset: address offset to IPI space.
- * value: deliver value.
- */
-static void vcpu_deliver_ipi(struct kvm_vcpu *vcpu, uint64_t dm,
- uint64_t vector)
-{
- switch (dm) {
- case SAPIC_FIXED:
- break;
- case SAPIC_NMI:
- vector = 2;
- break;
- case SAPIC_EXTINT:
- vector = 0;
- break;
- case SAPIC_INIT:
- case SAPIC_PMI:
- default:
- printk(KERN_ERR"kvm: Unimplemented Deliver reserved IPI!\n");
- return;
- }
- __apic_accept_irq(vcpu, vector);
-}
-
-static struct kvm_vcpu *lid_to_vcpu(struct kvm *kvm, unsigned long id,
- unsigned long eid)
-{
- union ia64_lid lid;
- int i;
- struct kvm_vcpu *vcpu;
-
- kvm_for_each_vcpu(i, vcpu, kvm) {
- lid.val = VCPU_LID(vcpu);
- if (lid.id == id && lid.eid == eid)
- return vcpu;
- }
-
- return NULL;
-}
-
-static int handle_ipi(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
-{
- struct exit_ctl_data *p = kvm_get_exit_data(vcpu);
- struct kvm_vcpu *target_vcpu;
- struct kvm_pt_regs *regs;
- union ia64_ipi_a addr = p->u.ipi_data.addr;
- union ia64_ipi_d data = p->u.ipi_data.data;
-
- target_vcpu = lid_to_vcpu(vcpu->kvm, addr.id, addr.eid);
- if (!target_vcpu)
- return handle_vm_error(vcpu, kvm_run);
-
- if (!target_vcpu->arch.launched) {
- regs = vcpu_regs(target_vcpu);
-
- regs->cr_iip = vcpu->kvm->arch.rdv_sal_data.boot_ip;
- regs->r1 = vcpu->kvm->arch.rdv_sal_data.boot_gp;
-
- target_vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
- if (waitqueue_active(&target_vcpu->wq))
- wake_up_interruptible(&target_vcpu->wq);
- } else {
- vcpu_deliver_ipi(target_vcpu, data.dm, data.vector);
- if (target_vcpu != vcpu)
- kvm_vcpu_kick(target_vcpu);
- }
-
- return 1;
-}
-
-struct call_data {
- struct kvm_ptc_g ptc_g_data;
- struct kvm_vcpu *vcpu;
-};
-
-static void vcpu_global_purge(void *info)
-{
- struct call_data *p = (struct call_data *)info;
- struct kvm_vcpu *vcpu = p->vcpu;
-
- if (test_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
- return;
-
- set_bit(KVM_REQ_PTC_G, &vcpu->requests);
- if (vcpu->arch.ptc_g_count < MAX_PTC_G_NUM) {
- vcpu->arch.ptc_g_data[vcpu->arch.ptc_g_count++] =
- p->ptc_g_data;
- } else {
- clear_bit(KVM_REQ_PTC_G, &vcpu->requests);
- vcpu->arch.ptc_g_count = 0;
- set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests);
- }
-}
-
-static int handle_global_purge(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
-{
- struct exit_ctl_data *p = kvm_get_exit_data(vcpu);
- struct kvm *kvm = vcpu->kvm;
- struct call_data call_data;
- int i;
- struct kvm_vcpu *vcpui;
-
- call_data.ptc_g_data = p->u.ptc_g_data;
-
- kvm_for_each_vcpu(i, vcpui, kvm) {
- if (vcpui->arch.mp_state == KVM_MP_STATE_UNINITIALIZED ||
- vcpu == vcpui)
- continue;
-
- if (waitqueue_active(&vcpui->wq))
- wake_up_interruptible(&vcpui->wq);
-
- if (vcpui->cpu != -1) {
- call_data.vcpu = vcpui;
- smp_call_function_single(vcpui->cpu,
- vcpu_global_purge, &call_data, 1);
- } else
- printk(KERN_WARNING"kvm: Uninit vcpu received ipi!\n");
-
- }
- return 1;
-}
-
-static int handle_switch_rr6(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
-{
- return 1;
-}
-
-static int kvm_sn2_setup_mappings(struct kvm_vcpu *vcpu)
-{
- unsigned long pte, rtc_phys_addr, map_addr;
- int slot;
-
- map_addr = KVM_VMM_BASE + (1UL << KVM_VMM_SHIFT);
- rtc_phys_addr = LOCAL_MMR_OFFSET | SH_RTC;
- pte = pte_val(mk_pte_phys(rtc_phys_addr, PAGE_KERNEL_UC));
- slot = ia64_itr_entry(0x3, map_addr, pte, PAGE_SHIFT);
- vcpu->arch.sn_rtc_tr_slot = slot;
- if (slot < 0) {
- printk(KERN_ERR "Mayday mayday! RTC mapping failed!\n");
- slot = 0;
- }
- return slot;
-}
-
-int kvm_emulate_halt(struct kvm_vcpu *vcpu)
-{
-
- ktime_t kt;
- long itc_diff;
- unsigned long vcpu_now_itc;
- unsigned long expires;
- struct hrtimer *p_ht = &vcpu->arch.hlt_timer;
- unsigned long cyc_per_usec = local_cpu_data->cyc_per_usec;
- struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
-
- if (irqchip_in_kernel(vcpu->kvm)) {
-
- vcpu_now_itc = kvm_get_itc(vcpu) + vcpu->arch.itc_offset;
-
- if (time_after(vcpu_now_itc, vpd->itm)) {
- vcpu->arch.timer_check = 1;
- return 1;
- }
- itc_diff = vpd->itm - vcpu_now_itc;
- if (itc_diff < 0)
- itc_diff = -itc_diff;
-
- expires = div64_u64(itc_diff, cyc_per_usec);
- kt = ktime_set(0, 1000 * expires);
-
- vcpu->arch.ht_active = 1;
- hrtimer_start(p_ht, kt, HRTIMER_MODE_ABS);
-
- vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
- kvm_vcpu_block(vcpu);
- hrtimer_cancel(p_ht);
- vcpu->arch.ht_active = 0;
-
- if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests) ||
- kvm_cpu_has_pending_timer(vcpu))
- if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED)
- vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
-
- if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE)
- return -EINTR;
- return 1;
- } else {
- printk(KERN_ERR"kvm: Unsupported userspace halt!");
- return 0;
- }
-}
-
-static int handle_vm_shutdown(struct kvm_vcpu *vcpu,
- struct kvm_run *kvm_run)
-{
- kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
- return 0;
-}
-
-static int handle_external_interrupt(struct kvm_vcpu *vcpu,
- struct kvm_run *kvm_run)
-{
- return 1;
-}
-
-static int handle_vcpu_debug(struct kvm_vcpu *vcpu,
- struct kvm_run *kvm_run)
-{
- printk("VMM: %s", vcpu->arch.log_buf);
- return 1;
-}
-
-static int (*kvm_vti_exit_handlers[])(struct kvm_vcpu *vcpu,
- struct kvm_run *kvm_run) = {
- [EXIT_REASON_VM_PANIC] = handle_vm_error,
- [EXIT_REASON_MMIO_INSTRUCTION] = handle_mmio,
- [EXIT_REASON_PAL_CALL] = handle_pal_call,
- [EXIT_REASON_SAL_CALL] = handle_sal_call,
- [EXIT_REASON_SWITCH_RR6] = handle_switch_rr6,
- [EXIT_REASON_VM_DESTROY] = handle_vm_shutdown,
- [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt,
- [EXIT_REASON_IPI] = handle_ipi,
- [EXIT_REASON_PTC_G] = handle_global_purge,
- [EXIT_REASON_DEBUG] = handle_vcpu_debug,
-
-};
-
-static const int kvm_vti_max_exit_handlers =
- sizeof(kvm_vti_exit_handlers)/sizeof(*kvm_vti_exit_handlers);
-
-static uint32_t kvm_get_exit_reason(struct kvm_vcpu *vcpu)
-{
- struct exit_ctl_data *p_exit_data;
-
- p_exit_data = kvm_get_exit_data(vcpu);
- return p_exit_data->exit_reason;
-}
-
-/*
- * The guest has exited. See if we can fix it or if we need userspace
- * assistance.
- */
-static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
-{
- u32 exit_reason = kvm_get_exit_reason(vcpu);
- vcpu->arch.last_exit = exit_reason;
-
- if (exit_reason < kvm_vti_max_exit_handlers
- && kvm_vti_exit_handlers[exit_reason])
- return kvm_vti_exit_handlers[exit_reason](vcpu, kvm_run);
- else {
- kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
- kvm_run->hw.hardware_exit_reason = exit_reason;
- }
- return 0;
-}
-
-static inline void vti_set_rr6(unsigned long rr6)
-{
- ia64_set_rr(RR6, rr6);
- ia64_srlz_i();
-}
-
-static int kvm_insert_vmm_mapping(struct kvm_vcpu *vcpu)
-{
- unsigned long pte;
- struct kvm *kvm = vcpu->kvm;
- int r;
-
- /*Insert a pair of tr to map vmm*/
- pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base), PAGE_KERNEL));
- r = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
- if (r < 0)
- goto out;
- vcpu->arch.vmm_tr_slot = r;
- /*Insert a pairt of tr to map data of vm*/
- pte = pte_val(mk_pte_phys(__pa(kvm->arch.vm_base), PAGE_KERNEL));
- r = ia64_itr_entry(0x3, KVM_VM_DATA_BASE,
- pte, KVM_VM_DATA_SHIFT);
- if (r < 0)
- goto out;
- vcpu->arch.vm_tr_slot = r;
-
-#if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC)
- if (kvm->arch.is_sn2) {
- r = kvm_sn2_setup_mappings(vcpu);
- if (r < 0)
- goto out;
- }
-#endif
-
- r = 0;
-out:
- return r;
-}
-
-static void kvm_purge_vmm_mapping(struct kvm_vcpu *vcpu)
-{
- struct kvm *kvm = vcpu->kvm;
- ia64_ptr_entry(0x3, vcpu->arch.vmm_tr_slot);
- ia64_ptr_entry(0x3, vcpu->arch.vm_tr_slot);
-#if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC)
- if (kvm->arch.is_sn2)
- ia64_ptr_entry(0x3, vcpu->arch.sn_rtc_tr_slot);
-#endif
-}
-
-static int kvm_vcpu_pre_transition(struct kvm_vcpu *vcpu)
-{
- unsigned long psr;
- int r;
- int cpu = smp_processor_id();
-
- if (vcpu->arch.last_run_cpu != cpu ||
- per_cpu(last_vcpu, cpu) != vcpu) {
- per_cpu(last_vcpu, cpu) = vcpu;
- vcpu->arch.last_run_cpu = cpu;
- kvm_flush_tlb_all();
- }
-
- vcpu->arch.host_rr6 = ia64_get_rr(RR6);
- vti_set_rr6(vcpu->arch.vmm_rr);
- local_irq_save(psr);
- r = kvm_insert_vmm_mapping(vcpu);
- local_irq_restore(psr);
- return r;
-}
-
-static void kvm_vcpu_post_transition(struct kvm_vcpu *vcpu)
-{
- kvm_purge_vmm_mapping(vcpu);
- vti_set_rr6(vcpu->arch.host_rr6);
-}
-
-static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
-{
- union context *host_ctx, *guest_ctx;
- int r, idx;
-
- idx = srcu_read_lock(&vcpu->kvm->srcu);
-
-again:
- if (signal_pending(current)) {
- r = -EINTR;
- kvm_run->exit_reason = KVM_EXIT_INTR;
- goto out;
- }
-
- preempt_disable();
- local_irq_disable();
-
- /*Get host and guest context with guest address space.*/
- host_ctx = kvm_get_host_context(vcpu);
- guest_ctx = kvm_get_guest_context(vcpu);
-
- clear_bit(KVM_REQ_KICK, &vcpu->requests);
-
- r = kvm_vcpu_pre_transition(vcpu);
- if (r < 0)
- goto vcpu_run_fail;
-
- srcu_read_unlock(&vcpu->kvm->srcu, idx);
- vcpu->mode = IN_GUEST_MODE;
- kvm_guest_enter();
-
- /*
- * Transition to the guest
- */
- kvm_vmm_info->tramp_entry(host_ctx, guest_ctx);
-
- kvm_vcpu_post_transition(vcpu);
-
- vcpu->arch.launched = 1;
- set_bit(KVM_REQ_KICK, &vcpu->requests);
- local_irq_enable();
-
- /*
- * We must have an instruction between local_irq_enable() and
- * kvm_guest_exit(), so the timer interrupt isn't delayed by
- * the interrupt shadow. The stat.exits increment will do nicely.
- * But we need to prevent reordering, hence this barrier():
- */
- barrier();
- kvm_guest_exit();
- vcpu->mode = OUTSIDE_GUEST_MODE;
- preempt_enable();
-
- idx = srcu_read_lock(&vcpu->kvm->srcu);
-
- r = kvm_handle_exit(kvm_run, vcpu);
-
- if (r > 0) {
- if (!need_resched())
- goto again;
- }
-
-out:
- srcu_read_unlock(&vcpu->kvm->srcu, idx);
- if (r > 0) {
- cond_resched();
- idx = srcu_read_lock(&vcpu->kvm->srcu);
- goto again;
- }
-
- return r;
-
-vcpu_run_fail:
- local_irq_enable();
- preempt_enable();
- kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
- goto out;
-}
-
-static void kvm_set_mmio_data(struct kvm_vcpu *vcpu)
-{
- struct kvm_mmio_req *p = kvm_get_vcpu_ioreq(vcpu);
-
- if (!vcpu->mmio_is_write)
- memcpy(&p->data, vcpu->arch.mmio_data, 8);
- p->state = STATE_IORESP_READY;
-}
-
-int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
-{
- int r;
- sigset_t sigsaved;
-
- if (vcpu->sigset_active)
- sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
-
- if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
- kvm_vcpu_block(vcpu);
- clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
- r = -EAGAIN;
- goto out;
- }
-
- if (vcpu->mmio_needed) {
- memcpy(vcpu->arch.mmio_data, kvm_run->mmio.data, 8);
- kvm_set_mmio_data(vcpu);
- vcpu->mmio_read_completed = 1;
- vcpu->mmio_needed = 0;
- }
- r = __vcpu_run(vcpu, kvm_run);
-out:
- if (vcpu->sigset_active)
- sigprocmask(SIG_SETMASK, &sigsaved, NULL);
-
- return r;
-}
-
-struct kvm *kvm_arch_alloc_vm(void)
-{
-
- struct kvm *kvm;
- uint64_t vm_base;
-
- BUG_ON(sizeof(struct kvm) > KVM_VM_STRUCT_SIZE);
-
- vm_base = __get_free_pages(GFP_KERNEL, get_order(KVM_VM_DATA_SIZE));
-
- if (!vm_base)
- return NULL;
-
- memset((void *)vm_base, 0, KVM_VM_DATA_SIZE);
- kvm = (struct kvm *)(vm_base +
- offsetof(struct kvm_vm_data, kvm_vm_struct));
- kvm->arch.vm_base = vm_base;
- printk(KERN_DEBUG"kvm: vm's data area:0x%lx\n", vm_base);
-
- return kvm;
-}
-
-struct kvm_ia64_io_range {
- unsigned long start;
- unsigned long size;
- unsigned long type;
-};
-
-static const struct kvm_ia64_io_range io_ranges[] = {
- {VGA_IO_START, VGA_IO_SIZE, GPFN_FRAME_BUFFER},
- {MMIO_START, MMIO_SIZE, GPFN_LOW_MMIO},
- {LEGACY_IO_START, LEGACY_IO_SIZE, GPFN_LEGACY_IO},
- {IO_SAPIC_START, IO_SAPIC_SIZE, GPFN_IOSAPIC},
- {PIB_START, PIB_SIZE, GPFN_PIB},
-};
-
-static void kvm_build_io_pmt(struct kvm *kvm)
-{
- unsigned long i, j;
-
- /* Mark I/O ranges */
- for (i = 0; i < (sizeof(io_ranges) / sizeof(struct kvm_io_range));
- i++) {
- for (j = io_ranges[i].start;
- j < io_ranges[i].start + io_ranges[i].size;
- j += PAGE_SIZE)
- kvm_set_pmt_entry(kvm, j >> PAGE_SHIFT,
- io_ranges[i].type, 0);
- }
-
-}
-
-/*Use unused rids to virtualize guest rid.*/
-#define GUEST_PHYSICAL_RR0 0x1739
-#define GUEST_PHYSICAL_RR4 0x2739
-#define VMM_INIT_RR 0x1660
-
-int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
-{
- BUG_ON(!kvm);
-
- if (type)
- return -EINVAL;
-
- kvm->arch.is_sn2 = ia64_platform_is("sn2");
-
- kvm->arch.metaphysical_rr0 = GUEST_PHYSICAL_RR0;
- kvm->arch.metaphysical_rr4 = GUEST_PHYSICAL_RR4;
- kvm->arch.vmm_init_rr = VMM_INIT_RR;
-
- /*
- *Fill P2M entries for MMIO/IO ranges
- */
- kvm_build_io_pmt(kvm);
-
- INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
-
- /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
- set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
-
- return 0;
-}
-
-static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm,
- struct kvm_irqchip *chip)
-{
- int r;
-
- r = 0;
- switch (chip->chip_id) {
- case KVM_IRQCHIP_IOAPIC:
- r = kvm_get_ioapic(kvm, &chip->chip.ioapic);
- break;
- default:
- r = -EINVAL;
- break;
- }
- return r;
-}
-
-static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
-{
- int r;
-
- r = 0;
- switch (chip->chip_id) {
- case KVM_IRQCHIP_IOAPIC:
- r = kvm_set_ioapic(kvm, &chip->chip.ioapic);
- break;
- default:
- r = -EINVAL;
- break;
- }
- return r;
-}
-
-#define RESTORE_REGS(_x) vcpu->arch._x = regs->_x
-
-int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
-{
- struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
- int i;
-
- for (i = 0; i < 16; i++) {
- vpd->vgr[i] = regs->vpd.vgr[i];
- vpd->vbgr[i] = regs->vpd.vbgr[i];
- }
- for (i = 0; i < 128; i++)
- vpd->vcr[i] = regs->vpd.vcr[i];
- vpd->vhpi = regs->vpd.vhpi;
- vpd->vnat = regs->vpd.vnat;
- vpd->vbnat = regs->vpd.vbnat;
- vpd->vpsr = regs->vpd.vpsr;
-
- vpd->vpr = regs->vpd.vpr;
-
- memcpy(&vcpu->arch.guest, &regs->saved_guest, sizeof(union context));
-
- RESTORE_REGS(mp_state);
- RESTORE_REGS(vmm_rr);
- memcpy(vcpu->arch.itrs, regs->itrs, sizeof(struct thash_data) * NITRS);
- memcpy(vcpu->arch.dtrs, regs->dtrs, sizeof(struct thash_data) * NDTRS);
- RESTORE_REGS(itr_regions);
- RESTORE_REGS(dtr_regions);
- RESTORE_REGS(tc_regions);
- RESTORE_REGS(irq_check);
- RESTORE_REGS(itc_check);
- RESTORE_REGS(timer_check);
- RESTORE_REGS(timer_pending);
- RESTORE_REGS(last_itc);
- for (i = 0; i < 8; i++) {
- vcpu->arch.vrr[i] = regs->vrr[i];
- vcpu->arch.ibr[i] = regs->ibr[i];
- vcpu->arch.dbr[i] = regs->dbr[i];
- }
- for (i = 0; i < 4; i++)
- vcpu->arch.insvc[i] = regs->insvc[i];
- RESTORE_REGS(xtp);
- RESTORE_REGS(metaphysical_rr0);
- RESTORE_REGS(metaphysical_rr4);
- RESTORE_REGS(metaphysical_saved_rr0);
- RESTORE_REGS(metaphysical_saved_rr4);
- RESTORE_REGS(fp_psr);
- RESTORE_REGS(saved_gp);
-
- vcpu->arch.irq_new_pending = 1;
- vcpu->arch.itc_offset = regs->saved_itc - kvm_get_itc(vcpu);
- set_bit(KVM_REQ_RESUME, &vcpu->requests);
-
- return 0;
-}
-
-int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
- bool line_status)
-{
- if (!irqchip_in_kernel(kvm))
- return -ENXIO;
-
- irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
- irq_event->irq, irq_event->level,
- line_status);
- return 0;
-}
-
-long kvm_arch_vm_ioctl(struct file *filp,
- unsigned int ioctl, unsigned long arg)
-{
- struct kvm *kvm = filp->private_data;
- void __user *argp = (void __user *)arg;
- int r = -ENOTTY;
-
- switch (ioctl) {
- case KVM_CREATE_IRQCHIP:
- r = -EFAULT;
- r = kvm_ioapic_init(kvm);
- if (r)
- goto out;
- r = kvm_setup_default_irq_routing(kvm);
- if (r) {
- mutex_lock(&kvm->slots_lock);
- kvm_ioapic_destroy(kvm);
- mutex_unlock(&kvm->slots_lock);
- goto out;
- }
- break;
- case KVM_GET_IRQCHIP: {
- /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
- struct kvm_irqchip chip;
-
- r = -EFAULT;
- if (copy_from_user(&chip, argp, sizeof chip))
- goto out;
- r = -ENXIO;
- if (!irqchip_in_kernel(kvm))
- goto out;
- r = kvm_vm_ioctl_get_irqchip(kvm, &chip);
- if (r)
- goto out;
- r = -EFAULT;
- if (copy_to_user(argp, &chip, sizeof chip))
- goto out;
- r = 0;
- break;
- }
- case KVM_SET_IRQCHIP: {
- /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
- struct kvm_irqchip chip;
-
- r = -EFAULT;
- if (copy_from_user(&chip, argp, sizeof chip))
- goto out;
- r = -ENXIO;
- if (!irqchip_in_kernel(kvm))
- goto out;
- r = kvm_vm_ioctl_set_irqchip(kvm, &chip);
- if (r)
- goto out;
- r = 0;
- break;
- }
- default:
- ;
- }
-out:
- return r;
-}
-
-int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
- struct kvm_sregs *sregs)
-{
- return -EINVAL;
-}
-
-int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
- struct kvm_sregs *sregs)
-{
- return -EINVAL;
-
-}
-int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
- struct kvm_translation *tr)
-{
-
- return -EINVAL;
-}
-
-static int kvm_alloc_vmm_area(void)
-{
- if (!kvm_vmm_base && (kvm_vm_buffer_size < KVM_VM_BUFFER_SIZE)) {
- kvm_vmm_base = __get_free_pages(GFP_KERNEL,
- get_order(KVM_VMM_SIZE));
- if (!kvm_vmm_base)
- return -ENOMEM;
-
- memset((void *)kvm_vmm_base, 0, KVM_VMM_SIZE);
- kvm_vm_buffer = kvm_vmm_base + VMM_SIZE;
-
- printk(KERN_DEBUG"kvm:VMM's Base Addr:0x%lx, vm_buffer:0x%lx\n",
- kvm_vmm_base, kvm_vm_buffer);
- }
-
- return 0;
-}
-
-static void kvm_free_vmm_area(void)
-{
- if (kvm_vmm_base) {
- /*Zero this area before free to avoid bits leak!!*/
- memset((void *)kvm_vmm_base, 0, KVM_VMM_SIZE);
- free_pages(kvm_vmm_base, get_order(KVM_VMM_SIZE));
- kvm_vmm_base = 0;
- kvm_vm_buffer = 0;
- kvm_vsa_base = 0;
- }
-}
-
-static int vti_init_vpd(struct kvm_vcpu *vcpu)
-{
- int i;
- union cpuid3_t cpuid3;
- struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
-
- if (IS_ERR(vpd))
- return PTR_ERR(vpd);
-
- /* CPUID init */
- for (i = 0; i < 5; i++)
- vpd->vcpuid[i] = ia64_get_cpuid(i);
-
- /* Limit the CPUID number to 5 */
- cpuid3.value = vpd->vcpuid[3];
- cpuid3.number = 4; /* 5 - 1 */
- vpd->vcpuid[3] = cpuid3.value;
-
- /*Set vac and vdc fields*/
- vpd->vac.a_from_int_cr = 1;
- vpd->vac.a_to_int_cr = 1;
- vpd->vac.a_from_psr = 1;
- vpd->vac.a_from_cpuid = 1;
- vpd->vac.a_cover = 1;
- vpd->vac.a_bsw = 1;
- vpd->vac.a_int = 1;
- vpd->vdc.d_vmsw = 1;
-
- /*Set virtual buffer*/
- vpd->virt_env_vaddr = KVM_VM_BUFFER_BASE;
-
- return 0;
-}
-
-static int vti_create_vp(struct kvm_vcpu *vcpu)
-{
- long ret;
- struct vpd *vpd = vcpu->arch.vpd;
- unsigned long vmm_ivt;
-
- vmm_ivt = kvm_vmm_info->vmm_ivt;
-
- printk(KERN_DEBUG "kvm: vcpu:%p,ivt: 0x%lx\n", vcpu, vmm_ivt);
-
- ret = ia64_pal_vp_create((u64 *)vpd, (u64 *)vmm_ivt, 0);
-
- if (ret) {
- printk(KERN_ERR"kvm: ia64_pal_vp_create failed!\n");
- return -EINVAL;
- }
- return 0;
-}
-
-static void init_ptce_info(struct kvm_vcpu *vcpu)
-{
- ia64_ptce_info_t ptce = {0};
-
- ia64_get_ptce(&ptce);
- vcpu->arch.ptce_base = ptce.base;
- vcpu->arch.ptce_count[0] = ptce.count[0];
- vcpu->arch.ptce_count[1] = ptce.count[1];
- vcpu->arch.ptce_stride[0] = ptce.stride[0];
- vcpu->arch.ptce_stride[1] = ptce.stride[1];
-}
-
-static void kvm_migrate_hlt_timer(struct kvm_vcpu *vcpu)
-{
- struct hrtimer *p_ht = &vcpu->arch.hlt_timer;
-
- if (hrtimer_cancel(p_ht))
- hrtimer_start_expires(p_ht, HRTIMER_MODE_ABS);
-}
-
-static enum hrtimer_restart hlt_timer_fn(struct hrtimer *data)
-{
- struct kvm_vcpu *vcpu;
- wait_queue_head_t *q;
-
- vcpu = container_of(data, struct kvm_vcpu, arch.hlt_timer);
- q = &vcpu->wq;
-
- if (vcpu->arch.mp_state != KVM_MP_STATE_HALTED)
- goto out;
-
- if (waitqueue_active(q))
- wake_up_interruptible(q);
-
-out:
- vcpu->arch.timer_fired = 1;
- vcpu->arch.timer_check = 1;
- return HRTIMER_NORESTART;
-}
-
-#define PALE_RESET_ENTRY 0x80000000ffffffb0UL
-
-bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
-{
- return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL);
-}
-
-int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
-{
- struct kvm_vcpu *v;
- int r;
- int i;
- long itc_offset;
- struct kvm *kvm = vcpu->kvm;
- struct kvm_pt_regs *regs = vcpu_regs(vcpu);
-
- union context *p_ctx = &vcpu->arch.guest;
- struct kvm_vcpu *vmm_vcpu = to_guest(vcpu->kvm, vcpu);
-
- /*Init vcpu context for first run.*/
- if (IS_ERR(vmm_vcpu))
- return PTR_ERR(vmm_vcpu);
-
- if (kvm_vcpu_is_bsp(vcpu)) {
- vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
-
- /*Set entry address for first run.*/
- regs->cr_iip = PALE_RESET_ENTRY;
-
- /*Initialize itc offset for vcpus*/
- itc_offset = 0UL - kvm_get_itc(vcpu);
- for (i = 0; i < KVM_MAX_VCPUS; i++) {
- v = (struct kvm_vcpu *)((char *)vcpu +
- sizeof(struct kvm_vcpu_data) * i);
- v->arch.itc_offset = itc_offset;
- v->arch.last_itc = 0;
- }
- } else
- vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
-
- r = -ENOMEM;
- vcpu->arch.apic = kzalloc(sizeof(struct kvm_lapic), GFP_KERNEL);
- if (!vcpu->arch.apic)
- goto out;
- vcpu->arch.apic->vcpu = vcpu;
-
- p_ctx->gr[1] = 0;
- p_ctx->gr[12] = (unsigned long)((char *)vmm_vcpu + KVM_STK_OFFSET);
- p_ctx->gr[13] = (unsigned long)vmm_vcpu;
- p_ctx->psr = 0x1008522000UL;
- p_ctx->ar[40] = FPSR_DEFAULT; /*fpsr*/
- p_ctx->caller_unat = 0;
- p_ctx->pr = 0x0;
- p_ctx->ar[36] = 0x0; /*unat*/
- p_ctx->ar[19] = 0x0; /*rnat*/
- p_ctx->ar[18] = (unsigned long)vmm_vcpu +
- ((sizeof(struct kvm_vcpu)+15) & ~15);
- p_ctx->ar[64] = 0x0; /*pfs*/
- p_ctx->cr[0] = 0x7e04UL;
- p_ctx->cr[2] = (unsigned long)kvm_vmm_info->vmm_ivt;
- p_ctx->cr[8] = 0x3c;
-
- /*Initialize region register*/
- p_ctx->rr[0] = 0x30;
- p_ctx->rr[1] = 0x30;
- p_ctx->rr[2] = 0x30;
- p_ctx->rr[3] = 0x30;
- p_ctx->rr[4] = 0x30;
- p_ctx->rr[5] = 0x30;
- p_ctx->rr[7] = 0x30;
-
- /*Initialize branch register 0*/
- p_ctx->br[0] = *(unsigned long *)kvm_vmm_info->vmm_entry;
-
- vcpu->arch.vmm_rr = kvm->arch.vmm_init_rr;
- vcpu->arch.metaphysical_rr0 = kvm->arch.metaphysical_rr0;
- vcpu->arch.metaphysical_rr4 = kvm->arch.metaphysical_rr4;
-
- hrtimer_init(&vcpu->arch.hlt_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
- vcpu->arch.hlt_timer.function = hlt_timer_fn;
-
- vcpu->arch.last_run_cpu = -1;
- vcpu->arch.vpd = (struct vpd *)VPD_BASE(vcpu->vcpu_id);
- vcpu->arch.vsa_base = kvm_vsa_base;
- vcpu->arch.__gp = kvm_vmm_gp;
- vcpu->arch.dirty_log_lock_pa = __pa(&kvm->arch.dirty_log_lock);
- vcpu->arch.vhpt.hash = (struct thash_data *)VHPT_BASE(vcpu->vcpu_id);
- vcpu->arch.vtlb.hash = (struct thash_data *)VTLB_BASE(vcpu->vcpu_id);
- init_ptce_info(vcpu);
-
- r = 0;
-out:
- return r;
-}
-
-static int vti_vcpu_setup(struct kvm_vcpu *vcpu, int id)
-{
- unsigned long psr;
- int r;
-
- local_irq_save(psr);
- r = kvm_insert_vmm_mapping(vcpu);
- local_irq_restore(psr);
- if (r)
- goto fail;
- r = kvm_vcpu_init(vcpu, vcpu->kvm, id);
- if (r)
- goto fail;
-
- r = vti_init_vpd(vcpu);
- if (r) {
- printk(KERN_DEBUG"kvm: vpd init error!!\n");
- goto uninit;
- }
-
- r = vti_create_vp(vcpu);
- if (r)
- goto uninit;
-
- kvm_purge_vmm_mapping(vcpu);
-
- return 0;
-uninit:
- kvm_vcpu_uninit(vcpu);
-fail:
- return r;
-}
-
-struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
- unsigned int id)
-{
- struct kvm_vcpu *vcpu;
- unsigned long vm_base = kvm->arch.vm_base;
- int r;
- int cpu;
-
- BUG_ON(sizeof(struct kvm_vcpu) > VCPU_STRUCT_SIZE/2);
-
- r = -EINVAL;
- if (id >= KVM_MAX_VCPUS) {
- printk(KERN_ERR"kvm: Can't configure vcpus > %ld",
- KVM_MAX_VCPUS);
- goto fail;
- }
-
- r = -ENOMEM;
- if (!vm_base) {
- printk(KERN_ERR"kvm: Create vcpu[%d] error!\n", id);
- goto fail;
- }
- vcpu = (struct kvm_vcpu *)(vm_base + offsetof(struct kvm_vm_data,
- vcpu_data[id].vcpu_struct));
- vcpu->kvm = kvm;
-
- cpu = get_cpu();
- r = vti_vcpu_setup(vcpu, id);
- put_cpu();
-
- if (r) {
- printk(KERN_DEBUG"kvm: vcpu_setup error!!\n");
- goto fail;
- }
-
- return vcpu;
-fail:
- return ERR_PTR(r);
-}
-
-int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
-{
- return 0;
-}
-
-int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
-{
- return 0;
-}
-
-int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
-{
- return -EINVAL;
-}
-
-int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
-{
- return -EINVAL;
-}
-
-int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
- struct kvm_guest_debug *dbg)
-{
- return -EINVAL;
-}
-
-void kvm_arch_free_vm(struct kvm *kvm)
-{
- unsigned long vm_base = kvm->arch.vm_base;
-
- if (vm_base) {
- memset((void *)vm_base, 0, KVM_VM_DATA_SIZE);
- free_pages(vm_base, get_order(KVM_VM_DATA_SIZE));
- }
-
-}
-
-static void kvm_release_vm_pages(struct kvm *kvm)
-{
- struct kvm_memslots *slots;
- struct kvm_memory_slot *memslot;
- int j;
-
- slots = kvm_memslots(kvm);
- kvm_for_each_memslot(memslot, slots) {
- for (j = 0; j < memslot->npages; j++) {
- if (memslot->rmap[j])
- put_page((struct page *)memslot->rmap[j]);
- }
- }
-}
-
-void kvm_arch_destroy_vm(struct kvm *kvm)
-{
- kvm_iommu_unmap_guest(kvm);
- kvm_free_all_assigned_devices(kvm);
- kfree(kvm->arch.vioapic);
- kvm_release_vm_pages(kvm);
-}
-
-void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
-{
- if (cpu != vcpu->cpu) {
- vcpu->cpu = cpu;
- if (vcpu->arch.ht_active)
- kvm_migrate_hlt_timer(vcpu);
- }
-}
-
-#define SAVE_REGS(_x) regs->_x = vcpu->arch._x
-
-int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
-{
- struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
- int i;
-
- vcpu_load(vcpu);
-
- for (i = 0; i < 16; i++) {
- regs->vpd.vgr[i] = vpd->vgr[i];
- regs->vpd.vbgr[i] = vpd->vbgr[i];
- }
- for (i = 0; i < 128; i++)
- regs->vpd.vcr[i] = vpd->vcr[i];
- regs->vpd.vhpi = vpd->vhpi;
- regs->vpd.vnat = vpd->vnat;
- regs->vpd.vbnat = vpd->vbnat;
- regs->vpd.vpsr = vpd->vpsr;
- regs->vpd.vpr = vpd->vpr;
-
- memcpy(&regs->saved_guest, &vcpu->arch.guest, sizeof(union context));
-
- SAVE_REGS(mp_state);
- SAVE_REGS(vmm_rr);
- memcpy(regs->itrs, vcpu->arch.itrs, sizeof(struct thash_data) * NITRS);
- memcpy(regs->dtrs, vcpu->arch.dtrs, sizeof(struct thash_data) * NDTRS);
- SAVE_REGS(itr_regions);
- SAVE_REGS(dtr_regions);
- SAVE_REGS(tc_regions);
- SAVE_REGS(irq_check);
- SAVE_REGS(itc_check);
- SAVE_REGS(timer_check);
- SAVE_REGS(timer_pending);
- SAVE_REGS(last_itc);
- for (i = 0; i < 8; i++) {
- regs->vrr[i] = vcpu->arch.vrr[i];
- regs->ibr[i] = vcpu->arch.ibr[i];
- regs->dbr[i] = vcpu->arch.dbr[i];
- }
- for (i = 0; i < 4; i++)
- regs->insvc[i] = vcpu->arch.insvc[i];
- regs->saved_itc = vcpu->arch.itc_offset + kvm_get_itc(vcpu);
- SAVE_REGS(xtp);
- SAVE_REGS(metaphysical_rr0);
- SAVE_REGS(metaphysical_rr4);
- SAVE_REGS(metaphysical_saved_rr0);
- SAVE_REGS(metaphysical_saved_rr4);
- SAVE_REGS(fp_psr);
- SAVE_REGS(saved_gp);
-
- vcpu_put(vcpu);
- return 0;
-}
-
-int kvm_arch_vcpu_ioctl_get_stack(struct kvm_vcpu *vcpu,
- struct kvm_ia64_vcpu_stack *stack)
-{
- memcpy(stack, vcpu, sizeof(struct kvm_ia64_vcpu_stack));
- return 0;
-}
-
-int kvm_arch_vcpu_ioctl_set_stack(struct kvm_vcpu *vcpu,
- struct kvm_ia64_vcpu_stack *stack)
-{
- memcpy(vcpu + 1, &stack->stack[0] + sizeof(struct kvm_vcpu),
- sizeof(struct kvm_ia64_vcpu_stack) - sizeof(struct kvm_vcpu));
-
- vcpu->arch.exit_data = ((struct kvm_vcpu *)stack)->arch.exit_data;
- return 0;
-}
-
-void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
-{
-
- hrtimer_cancel(&vcpu->arch.hlt_timer);
- kfree(vcpu->arch.apic);
-}
-
-long kvm_arch_vcpu_ioctl(struct file *filp,
- unsigned int ioctl, unsigned long arg)
-{
- struct kvm_vcpu *vcpu = filp->private_data;
- void __user *argp = (void __user *)arg;
- struct kvm_ia64_vcpu_stack *stack = NULL;
- long r;
-
- switch (ioctl) {
- case KVM_IA64_VCPU_GET_STACK: {
- struct kvm_ia64_vcpu_stack __user *user_stack;
- void __user *first_p = argp;
-
- r = -EFAULT;
- if (copy_from_user(&user_stack, first_p, sizeof(void *)))
- goto out;
-
- if (!access_ok(VERIFY_WRITE, user_stack,
- sizeof(struct kvm_ia64_vcpu_stack))) {
- printk(KERN_INFO "KVM_IA64_VCPU_GET_STACK: "
- "Illegal user destination address for stack\n");
- goto out;
- }
- stack = kzalloc(sizeof(struct kvm_ia64_vcpu_stack), GFP_KERNEL);
- if (!stack) {
- r = -ENOMEM;
- goto out;
- }
-
- r = kvm_arch_vcpu_ioctl_get_stack(vcpu, stack);
- if (r)
- goto out;
-
- if (copy_to_user(user_stack, stack,
- sizeof(struct kvm_ia64_vcpu_stack))) {
- r = -EFAULT;
- goto out;
- }
-
- break;
- }
- case KVM_IA64_VCPU_SET_STACK: {
- struct kvm_ia64_vcpu_stack __user *user_stack;
- void __user *first_p = argp;
-
- r = -EFAULT;
- if (copy_from_user(&user_stack, first_p, sizeof(void *)))
- goto out;
-
- if (!access_ok(VERIFY_READ, user_stack,
- sizeof(struct kvm_ia64_vcpu_stack))) {
- printk(KERN_INFO "KVM_IA64_VCPU_SET_STACK: "
- "Illegal user address for stack\n");
- goto out;
- }
- stack = kmalloc(sizeof(struct kvm_ia64_vcpu_stack), GFP_KERNEL);
- if (!stack) {
- r = -ENOMEM;
- goto out;
- }
- if (copy_from_user(stack, user_stack,
- sizeof(struct kvm_ia64_vcpu_stack)))
- goto out;
-
- r = kvm_arch_vcpu_ioctl_set_stack(vcpu, stack);
- break;
- }
-
- default:
- r = -EINVAL;
- }
-
-out:
- kfree(stack);
- return r;
-}
-
-int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
-{
- return VM_FAULT_SIGBUS;
-}
-
-int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
- unsigned long npages)
-{
- return 0;
-}
-
-int kvm_arch_prepare_memory_region(struct kvm *kvm,
- struct kvm_memory_slot *memslot,
- struct kvm_userspace_memory_region *mem,
- enum kvm_mr_change change)
-{
- unsigned long i;
- unsigned long pfn;
- int npages = memslot->npages;
- unsigned long base_gfn = memslot->base_gfn;
-
- if (base_gfn + npages > (KVM_MAX_MEM_SIZE >> PAGE_SHIFT))
- return -ENOMEM;
-
- for (i = 0; i < npages; i++) {
- pfn = gfn_to_pfn(kvm, base_gfn + i);
- if (!kvm_is_reserved_pfn(pfn)) {
- kvm_set_pmt_entry(kvm, base_gfn + i,
- pfn << PAGE_SHIFT,
- _PAGE_AR_RWX | _PAGE_MA_WB);
- memslot->rmap[i] = (unsigned long)pfn_to_page(pfn);
- } else {
- kvm_set_pmt_entry(kvm, base_gfn + i,
- GPFN_PHYS_MMIO | (pfn << PAGE_SHIFT),
- _PAGE_MA_UC);
- memslot->rmap[i] = 0;
- }
- }
-
- return 0;
-}
-
-void kvm_arch_flush_shadow_all(struct kvm *kvm)
-{
- kvm_flush_remote_tlbs(kvm);
-}
-
-void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
- struct kvm_memory_slot *slot)
-{
- kvm_arch_flush_shadow_all();
-}
-
-long kvm_arch_dev_ioctl(struct file *filp,
- unsigned int ioctl, unsigned long arg)
-{
- return -EINVAL;
-}
-
-void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
-{
- kvm_vcpu_uninit(vcpu);
-}
-
-static int vti_cpu_has_kvm_support(void)
-{
- long avail = 1, status = 1, control = 1;
- long ret;
-
- ret = ia64_pal_proc_get_features(&avail, &status, &control, 0);
- if (ret)
- goto out;
-
- if (!(avail & PAL_PROC_VM_BIT))
- goto out;
-
- printk(KERN_DEBUG"kvm: Hardware Supports VT\n");
-
- ret = ia64_pal_vp_env_info(&kvm_vm_buffer_size, &vp_env_info);
- if (ret)
- goto out;
- printk(KERN_DEBUG"kvm: VM Buffer Size:0x%lx\n", kvm_vm_buffer_size);
-
- if (!(vp_env_info & VP_OPCODE)) {
- printk(KERN_WARNING"kvm: No opcode ability on hardware, "
- "vm_env_info:0x%lx\n", vp_env_info);
- }
-
- return 1;
-out:
- return 0;
-}
-
-
-/*
- * On SN2, the ITC isn't stable, so copy in fast path code to use the
- * SN2 RTC, replacing the ITC based default verion.
- */
-static void kvm_patch_vmm(struct kvm_vmm_info *vmm_info,
- struct module *module)
-{
- unsigned long new_ar, new_ar_sn2;
- unsigned long module_base;
-
- if (!ia64_platform_is("sn2"))
- return;
-
- module_base = (unsigned long)module->module_core;
-
- new_ar = kvm_vmm_base + vmm_info->patch_mov_ar - module_base;
- new_ar_sn2 = kvm_vmm_base + vmm_info->patch_mov_ar_sn2 - module_base;
-
- printk(KERN_INFO "kvm: Patching ITC emulation to use SGI SN2 RTC "
- "as source\n");
-
- /*
- * Copy the SN2 version of mov_ar into place. They are both
- * the same size, so 6 bundles is sufficient (6 * 0x10).
- */
- memcpy((void *)new_ar, (void *)new_ar_sn2, 0x60);
-}
-
-static int kvm_relocate_vmm(struct kvm_vmm_info *vmm_info,
- struct module *module)
-{
- unsigned long module_base;
- unsigned long vmm_size;
-
- unsigned long vmm_offset, func_offset, fdesc_offset;
- struct fdesc *p_fdesc;
-
- BUG_ON(!module);
-
- if (!kvm_vmm_base) {
- printk("kvm: kvm area hasn't been initialized yet!!\n");
- return -EFAULT;
- }
-
- /*Calculate new position of relocated vmm module.*/
- module_base = (unsigned long)module->module_core;
- vmm_size = module->core_size;
- if (unlikely(vmm_size > KVM_VMM_SIZE))
- return -EFAULT;
-
- memcpy((void *)kvm_vmm_base, (void *)module_base, vmm_size);
- kvm_patch_vmm(vmm_info, module);
- kvm_flush_icache(kvm_vmm_base, vmm_size);
-
- /*Recalculate kvm_vmm_info based on new VMM*/
- vmm_offset = vmm_info->vmm_ivt - module_base;
- kvm_vmm_info->vmm_ivt = KVM_VMM_BASE + vmm_offset;
- printk(KERN_DEBUG"kvm: Relocated VMM's IVT Base Addr:%lx\n",
- kvm_vmm_info->vmm_ivt);
-
- fdesc_offset = (unsigned long)vmm_info->vmm_entry - module_base;
- kvm_vmm_info->vmm_entry = (kvm_vmm_entry *)(KVM_VMM_BASE +
- fdesc_offset);
- func_offset = *(unsigned long *)vmm_info->vmm_entry - module_base;
- p_fdesc = (struct fdesc *)(kvm_vmm_base + fdesc_offset);
- p_fdesc->ip = KVM_VMM_BASE + func_offset;
- p_fdesc->gp = KVM_VMM_BASE+(p_fdesc->gp - module_base);
-
- printk(KERN_DEBUG"kvm: Relocated VMM's Init Entry Addr:%lx\n",
- KVM_VMM_BASE+func_offset);
-
- fdesc_offset = (unsigned long)vmm_info->tramp_entry - module_base;
- kvm_vmm_info->tramp_entry = (kvm_tramp_entry *)(KVM_VMM_BASE +
- fdesc_offset);
- func_offset = *(unsigned long *)vmm_info->tramp_entry - module_base;
- p_fdesc = (struct fdesc *)(kvm_vmm_base + fdesc_offset);
- p_fdesc->ip = KVM_VMM_BASE + func_offset;
- p_fdesc->gp = KVM_VMM_BASE + (p_fdesc->gp - module_base);
-
- kvm_vmm_gp = p_fdesc->gp;
-
- printk(KERN_DEBUG"kvm: Relocated VMM's Entry IP:%p\n",
- kvm_vmm_info->vmm_entry);
- printk(KERN_DEBUG"kvm: Relocated VMM's Trampoline Entry IP:0x%lx\n",
- KVM_VMM_BASE + func_offset);
-
- return 0;
-}
-
-int kvm_arch_init(void *opaque)
-{
- int r;
- struct kvm_vmm_info *vmm_info = (struct kvm_vmm_info *)opaque;
-
- if (!vti_cpu_has_kvm_support()) {
- printk(KERN_ERR "kvm: No Hardware Virtualization Support!\n");
- r = -EOPNOTSUPP;
- goto out;
- }
-
- if (kvm_vmm_info) {
- printk(KERN_ERR "kvm: Already loaded VMM module!\n");
- r = -EEXIST;
- goto out;
- }
-
- r = -ENOMEM;
- kvm_vmm_info = kzalloc(sizeof(struct kvm_vmm_info), GFP_KERNEL);
- if (!kvm_vmm_info)
- goto out;
-
- if (kvm_alloc_vmm_area())
- goto out_free0;
-
- r = kvm_relocate_vmm(vmm_info, vmm_info->module);
- if (r)
- goto out_free1;
-
- return 0;
-
-out_free1:
- kvm_free_vmm_area();
-out_free0:
- kfree(kvm_vmm_info);
-out:
- return r;
-}
-
-void kvm_arch_exit(void)
-{
- kvm_free_vmm_area();
- kfree(kvm_vmm_info);
- kvm_vmm_info = NULL;
-}
-
-static void kvm_ia64_sync_dirty_log(struct kvm *kvm,
- struct kvm_memory_slot *memslot)
-{
- int i;
- long base;
- unsigned long n;
- unsigned long *dirty_bitmap = (unsigned long *)(kvm->arch.vm_base +
- offsetof(struct kvm_vm_data, kvm_mem_dirty_log));
-
- n = kvm_dirty_bitmap_bytes(memslot);
- base = memslot->base_gfn / BITS_PER_LONG;
-
- spin_lock(&kvm->arch.dirty_log_lock);
- for (i = 0; i < n/sizeof(long); ++i) {
- memslot->dirty_bitmap[i] = dirty_bitmap[base + i];
- dirty_bitmap[base + i] = 0;
- }
- spin_unlock(&kvm->arch.dirty_log_lock);
-}
-
-int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
- struct kvm_dirty_log *log)
-{
- int r;
- unsigned long n;
- struct kvm_memory_slot *memslot;
- int is_dirty = 0;
-
- mutex_lock(&kvm->slots_lock);
-
- r = -EINVAL;
- if (log->slot >= KVM_USER_MEM_SLOTS)
- goto out;
-
- memslot = id_to_memslot(kvm->memslots, log->slot);
- r = -ENOENT;
- if (!memslot->dirty_bitmap)
- goto out;
-
- kvm_ia64_sync_dirty_log(kvm, memslot);
- r = kvm_get_dirty_log(kvm, log, &is_dirty);
- if (r)
- goto out;
-
- /* If nothing is dirty, don't bother messing with page tables. */
- if (is_dirty) {
- kvm_flush_remote_tlbs(kvm);
- n = kvm_dirty_bitmap_bytes(memslot);
- memset(memslot->dirty_bitmap, 0, n);
- }
- r = 0;
-out:
- mutex_unlock(&kvm->slots_lock);
- return r;
-}
-
-int kvm_arch_hardware_setup(void)
-{
- return 0;
-}
-
-int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq)
-{
- return __apic_accept_irq(vcpu, irq->vector);
-}
-
-int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest)
-{
- return apic->vcpu->vcpu_id == dest;
-}
-
-int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda)
-{
- return 0;
-}
-
-int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2)
-{
- return vcpu1->arch.xtp - vcpu2->arch.xtp;
-}
-
-int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
- int short_hand, int dest, int dest_mode)
-{
- struct kvm_lapic *target = vcpu->arch.apic;
- return (dest_mode == 0) ?
- kvm_apic_match_physical_addr(target, dest) :
- kvm_apic_match_logical_addr(target, dest);
-}
-
-static int find_highest_bits(int *dat)
-{
- u32 bits, bitnum;
- int i;
-
- /* loop for all 256 bits */
- for (i = 7; i >= 0 ; i--) {
- bits = dat[i];
- if (bits) {
- bitnum = fls(bits);
- return i * 32 + bitnum - 1;
- }
- }
-
- return -1;
-}
-
-int kvm_highest_pending_irq(struct kvm_vcpu *vcpu)
-{
- struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
-
- if (vpd->irr[0] & (1UL << NMI_VECTOR))
- return NMI_VECTOR;
- if (vpd->irr[0] & (1UL << ExtINT_VECTOR))
- return ExtINT_VECTOR;
-
- return find_highest_bits((int *)&vpd->irr[0]);
-}
-
-int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
-{
- return vcpu->arch.timer_fired;
-}
-
-int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
-{
- return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE) ||
- (kvm_highest_pending_irq(vcpu) != -1);
-}
-
-int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
-{
- return (!test_and_set_bit(KVM_REQ_KICK, &vcpu->requests));
-}
-
-int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
- struct kvm_mp_state *mp_state)
-{
- mp_state->mp_state = vcpu->arch.mp_state;
- return 0;
-}
-
-static int vcpu_reset(struct kvm_vcpu *vcpu)
-{
- int r;
- long psr;
- local_irq_save(psr);
- r = kvm_insert_vmm_mapping(vcpu);
- local_irq_restore(psr);
- if (r)
- goto fail;
-
- vcpu->arch.launched = 0;
- kvm_arch_vcpu_uninit(vcpu);
- r = kvm_arch_vcpu_init(vcpu);
- if (r)
- goto fail;
-
- kvm_purge_vmm_mapping(vcpu);
- r = 0;
-fail:
- return r;
-}
-
-int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
- struct kvm_mp_state *mp_state)
-{
- int r = 0;
-
- vcpu->arch.mp_state = mp_state->mp_state;
- if (vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)
- r = vcpu_reset(vcpu);
- return r;
-}
diff --git a/arch/ia64/kvm/kvm_fw.c b/arch/ia64/kvm/kvm_fw.c
deleted file mode 100644
index cb548ee9fca..00000000000
--- a/arch/ia64/kvm/kvm_fw.c
+++ /dev/null
@@ -1,674 +0,0 @@
-/*
- * PAL/SAL call delegation
- *
- * Copyright (c) 2004 Li Susie <susie.li@intel.com>
- * Copyright (c) 2005 Yu Ke <ke.yu@intel.com>
- * Copyright (c) 2007 Xiantao Zhang <xiantao.zhang@intel.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- */
-
-#include <linux/kvm_host.h>
-#include <linux/smp.h>
-#include <asm/sn/addrs.h>
-#include <asm/sn/clksupport.h>
-#include <asm/sn/shub_mmr.h>
-
-#include "vti.h"
-#include "misc.h"
-
-#include <asm/pal.h>
-#include <asm/sal.h>
-#include <asm/tlb.h>
-
-/*
- * Handy macros to make sure that the PAL return values start out
- * as something meaningful.
- */
-#define INIT_PAL_STATUS_UNIMPLEMENTED(x) \
- { \
- x.status = PAL_STATUS_UNIMPLEMENTED; \
- x.v0 = 0; \
- x.v1 = 0; \
- x.v2 = 0; \
- }
-
-#define INIT_PAL_STATUS_SUCCESS(x) \
- { \
- x.status = PAL_STATUS_SUCCESS; \
- x.v0 = 0; \
- x.v1 = 0; \
- x.v2 = 0; \
- }
-
-static void kvm_get_pal_call_data(struct kvm_vcpu *vcpu,
- u64 *gr28, u64 *gr29, u64 *gr30, u64 *gr31) {
- struct exit_ctl_data *p;
-
- if (vcpu) {
- p = &vcpu->arch.exit_data;
- if (p->exit_reason == EXIT_REASON_PAL_CALL) {
- *gr28 = p->u.pal_data.gr28;
- *gr29 = p->u.pal_data.gr29;
- *gr30 = p->u.pal_data.gr30;
- *gr31 = p->u.pal_data.gr31;
- return ;
- }
- }
- printk(KERN_DEBUG"Failed to get vcpu pal data!!!\n");
-}
-
-static void set_pal_result(struct kvm_vcpu *vcpu,
- struct ia64_pal_retval result) {
-
- struct exit_ctl_data *p;
-
- p = kvm_get_exit_data(vcpu);
- if (p->exit_reason == EXIT_REASON_PAL_CALL) {
- p->u.pal_data.ret = result;
- return ;
- }
- INIT_PAL_STATUS_UNIMPLEMENTED(p->u.pal_data.ret);
-}
-
-static void set_sal_result(struct kvm_vcpu *vcpu,
- struct sal_ret_values result) {
- struct exit_ctl_data *p;
-
- p = kvm_get_exit_data(vcpu);
- if (p->exit_reason == EXIT_REASON_SAL_CALL) {
- p->u.sal_data.ret = result;
- return ;
- }
- printk(KERN_WARNING"Failed to set sal result!!\n");
-}
-
-struct cache_flush_args {
- u64 cache_type;
- u64 operation;
- u64 progress;
- long status;
-};
-
-cpumask_t cpu_cache_coherent_map;
-
-static void remote_pal_cache_flush(void *data)
-{
- struct cache_flush_args *args = data;
- long status;
- u64 progress = args->progress;
-
- status = ia64_pal_cache_flush(args->cache_type, args->operation,
- &progress, NULL);
- if (status != 0)
- args->status = status;
-}
-
-static struct ia64_pal_retval pal_cache_flush(struct kvm_vcpu *vcpu)
-{
- u64 gr28, gr29, gr30, gr31;
- struct ia64_pal_retval result = {0, 0, 0, 0};
- struct cache_flush_args args = {0, 0, 0, 0};
- long psr;
-
- gr28 = gr29 = gr30 = gr31 = 0;
- kvm_get_pal_call_data(vcpu, &gr28, &gr29, &gr30, &gr31);
-
- if (gr31 != 0)
- printk(KERN_ERR"vcpu:%p called cache_flush error!\n", vcpu);
-
- /* Always call Host Pal in int=1 */
- gr30 &= ~PAL_CACHE_FLUSH_CHK_INTRS;
- args.cache_type = gr29;
- args.operation = gr30;
- smp_call_function(remote_pal_cache_flush,
- (void *)&args, 1);
- if (args.status != 0)
- printk(KERN_ERR"pal_cache_flush error!,"
- "status:0x%lx\n", args.status);
- /*
- * Call Host PAL cache flush
- * Clear psr.ic when call PAL_CACHE_FLUSH
- */
- local_irq_save(psr);
- result.status = ia64_pal_cache_flush(gr29, gr30, &result.v1,
- &result.v0);
- local_irq_restore(psr);
- if (result.status != 0)
- printk(KERN_ERR"vcpu:%p crashed due to cache_flush err:%ld"
- "in1:%lx,in2:%lx\n",
- vcpu, result.status, gr29, gr30);
-
-#if 0
- if (gr29 == PAL_CACHE_TYPE_COHERENT) {
- cpus_setall(vcpu->arch.cache_coherent_map);
- cpu_clear(vcpu->cpu, vcpu->arch.cache_coherent_map);
- cpus_setall(cpu_cache_coherent_map);
- cpu_clear(vcpu->cpu, cpu_cache_coherent_map);
- }
-#endif
- return result;
-}
-
-struct ia64_pal_retval pal_cache_summary(struct kvm_vcpu *vcpu)
-{
-
- struct ia64_pal_retval result;
-
- PAL_CALL(result, PAL_CACHE_SUMMARY, 0, 0, 0);
- return result;
-}
-
-static struct ia64_pal_retval pal_freq_base(struct kvm_vcpu *vcpu)
-{
-
- struct ia64_pal_retval result;
-
- PAL_CALL(result, PAL_FREQ_BASE, 0, 0, 0);
-
- /*
- * PAL_FREQ_BASE may not be implemented in some platforms,
- * call SAL instead.
- */
- if (result.v0 == 0) {
- result.status = ia64_sal_freq_base(SAL_FREQ_BASE_PLATFORM,
- &result.v0,
- &result.v1);
- result.v2 = 0;
- }
-
- return result;
-}
-
-/*
- * On the SGI SN2, the ITC isn't stable. Emulation backed by the SN2
- * RTC is used instead. This function patches the ratios from SAL
- * to match the RTC before providing them to the guest.
- */
-static void sn2_patch_itc_freq_ratios(struct ia64_pal_retval *result)
-{
- struct pal_freq_ratio *ratio;
- unsigned long sal_freq, sal_drift, factor;
-
- result->status = ia64_sal_freq_base(SAL_FREQ_BASE_PLATFORM,
- &sal_freq, &sal_drift);
- ratio = (struct pal_freq_ratio *)&result->v2;
- factor = ((sal_freq * 3) + (sn_rtc_cycles_per_second / 2)) /
- sn_rtc_cycles_per_second;
-
- ratio->num = 3;
- ratio->den = factor;
-}
-
-static struct ia64_pal_retval pal_freq_ratios(struct kvm_vcpu *vcpu)
-{
- struct ia64_pal_retval result;
-
- PAL_CALL(result, PAL_FREQ_RATIOS, 0, 0, 0);
-
- if (vcpu->kvm->arch.is_sn2)
- sn2_patch_itc_freq_ratios(&result);
-
- return result;
-}
-
-static struct ia64_pal_retval pal_logical_to_physica(struct kvm_vcpu *vcpu)
-{
- struct ia64_pal_retval result;
-
- INIT_PAL_STATUS_UNIMPLEMENTED(result);
- return result;
-}
-
-static struct ia64_pal_retval pal_platform_addr(struct kvm_vcpu *vcpu)
-{
-
- struct ia64_pal_retval result;
-
- INIT_PAL_STATUS_SUCCESS(result);
- return result;
-}
-
-static struct ia64_pal_retval pal_proc_get_features(struct kvm_vcpu *vcpu)
-{
-
- struct ia64_pal_retval result = {0, 0, 0, 0};
- long in0, in1, in2, in3;
-
- kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
- result.status = ia64_pal_proc_get_features(&result.v0, &result.v1,
- &result.v2, in2);
-
- return result;
-}
-
-static struct ia64_pal_retval pal_register_info(struct kvm_vcpu *vcpu)
-{
-
- struct ia64_pal_retval result = {0, 0, 0, 0};
- long in0, in1, in2, in3;
-
- kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
- result.status = ia64_pal_register_info(in1, &result.v1, &result.v2);
-
- return result;
-}
-
-static struct ia64_pal_retval pal_cache_info(struct kvm_vcpu *vcpu)
-{
-
- pal_cache_config_info_t ci;
- long status;
- unsigned long in0, in1, in2, in3, r9, r10;
-
- kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
- status = ia64_pal_cache_config_info(in1, in2, &ci);
- r9 = ci.pcci_info_1.pcci1_data;
- r10 = ci.pcci_info_2.pcci2_data;
- return ((struct ia64_pal_retval){status, r9, r10, 0});
-}
-
-#define GUEST_IMPL_VA_MSB 59
-#define GUEST_RID_BITS 18
-
-static struct ia64_pal_retval pal_vm_summary(struct kvm_vcpu *vcpu)
-{
-
- pal_vm_info_1_u_t vminfo1;
- pal_vm_info_2_u_t vminfo2;
- struct ia64_pal_retval result;
-
- PAL_CALL(result, PAL_VM_SUMMARY, 0, 0, 0);
- if (!result.status) {
- vminfo1.pvi1_val = result.v0;
- vminfo1.pal_vm_info_1_s.max_itr_entry = 8;
- vminfo1.pal_vm_info_1_s.max_dtr_entry = 8;
- result.v0 = vminfo1.pvi1_val;
- vminfo2.pal_vm_info_2_s.impl_va_msb = GUEST_IMPL_VA_MSB;
- vminfo2.pal_vm_info_2_s.rid_size = GUEST_RID_BITS;
- result.v1 = vminfo2.pvi2_val;
- }
-
- return result;
-}
-
-static struct ia64_pal_retval pal_vm_info(struct kvm_vcpu *vcpu)
-{
- struct ia64_pal_retval result;
- unsigned long in0, in1, in2, in3;
-
- kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
-
- result.status = ia64_pal_vm_info(in1, in2,
- (pal_tc_info_u_t *)&result.v1, &result.v2);
-
- return result;
-}
-
-static u64 kvm_get_pal_call_index(struct kvm_vcpu *vcpu)
-{
- u64 index = 0;
- struct exit_ctl_data *p;
-
- p = kvm_get_exit_data(vcpu);
- if (p->exit_reason == EXIT_REASON_PAL_CALL)
- index = p->u.pal_data.gr28;
-
- return index;
-}
-
-static void prepare_for_halt(struct kvm_vcpu *vcpu)
-{
- vcpu->arch.timer_pending = 1;
- vcpu->arch.timer_fired = 0;
-}
-
-static struct ia64_pal_retval pal_perf_mon_info(struct kvm_vcpu *vcpu)
-{
- long status;
- unsigned long in0, in1, in2, in3, r9;
- unsigned long pm_buffer[16];
-
- kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
- status = ia64_pal_perf_mon_info(pm_buffer,
- (pal_perf_mon_info_u_t *) &r9);
- if (status != 0) {
- printk(KERN_DEBUG"PAL_PERF_MON_INFO fails ret=%ld\n", status);
- } else {
- if (in1)
- memcpy((void *)in1, pm_buffer, sizeof(pm_buffer));
- else {
- status = PAL_STATUS_EINVAL;
- printk(KERN_WARNING"Invalid parameters "
- "for PAL call:0x%lx!\n", in0);
- }
- }
- return (struct ia64_pal_retval){status, r9, 0, 0};
-}
-
-static struct ia64_pal_retval pal_halt_info(struct kvm_vcpu *vcpu)
-{
- unsigned long in0, in1, in2, in3;
- long status;
- unsigned long res = 1000UL | (1000UL << 16) | (10UL << 32)
- | (1UL << 61) | (1UL << 60);
-
- kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
- if (in1) {
- memcpy((void *)in1, &res, sizeof(res));
- status = 0;
- } else{
- status = PAL_STATUS_EINVAL;
- printk(KERN_WARNING"Invalid parameters "
- "for PAL call:0x%lx!\n", in0);
- }
-
- return (struct ia64_pal_retval){status, 0, 0, 0};
-}
-
-static struct ia64_pal_retval pal_mem_attrib(struct kvm_vcpu *vcpu)
-{
- unsigned long r9;
- long status;
-
- status = ia64_pal_mem_attrib(&r9);
-
- return (struct ia64_pal_retval){status, r9, 0, 0};
-}
-
-static void remote_pal_prefetch_visibility(void *v)
-{
- s64 trans_type = (s64)v;
- ia64_pal_prefetch_visibility(trans_type);
-}
-
-static struct ia64_pal_retval pal_prefetch_visibility(struct kvm_vcpu *vcpu)
-{
- struct ia64_pal_retval result = {0, 0, 0, 0};
- unsigned long in0, in1, in2, in3;
- kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
- result.status = ia64_pal_prefetch_visibility(in1);
- if (result.status == 0) {
- /* Must be performed on all remote processors
- in the coherence domain. */
- smp_call_function(remote_pal_prefetch_visibility,
- (void *)in1, 1);
- /* Unnecessary on remote processor for other vcpus!*/
- result.status = 1;
- }
- return result;
-}
-
-static void remote_pal_mc_drain(void *v)
-{
- ia64_pal_mc_drain();
-}
-
-static struct ia64_pal_retval pal_get_brand_info(struct kvm_vcpu *vcpu)
-{
- struct ia64_pal_retval result = {0, 0, 0, 0};
- unsigned long in0, in1, in2, in3;
-
- kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
-
- if (in1 == 0 && in2) {
- char brand_info[128];
- result.status = ia64_pal_get_brand_info(brand_info);
- if (result.status == PAL_STATUS_SUCCESS)
- memcpy((void *)in2, brand_info, 128);
- } else {
- result.status = PAL_STATUS_REQUIRES_MEMORY;
- printk(KERN_WARNING"Invalid parameters for "
- "PAL call:0x%lx!\n", in0);
- }
-
- return result;
-}
-
-int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run)
-{
-
- u64 gr28;
- struct ia64_pal_retval result;
- int ret = 1;
-
- gr28 = kvm_get_pal_call_index(vcpu);
- switch (gr28) {
- case PAL_CACHE_FLUSH:
- result = pal_cache_flush(vcpu);
- break;
- case PAL_MEM_ATTRIB:
- result = pal_mem_attrib(vcpu);
- break;
- case PAL_CACHE_SUMMARY:
- result = pal_cache_summary(vcpu);
- break;
- case PAL_PERF_MON_INFO:
- result = pal_perf_mon_info(vcpu);
- break;
- case PAL_HALT_INFO:
- result = pal_halt_info(vcpu);
- break;
- case PAL_HALT_LIGHT:
- {
- INIT_PAL_STATUS_SUCCESS(result);
- prepare_for_halt(vcpu);
- if (kvm_highest_pending_irq(vcpu) == -1)
- ret = kvm_emulate_halt(vcpu);
- }
- break;
-
- case PAL_PREFETCH_VISIBILITY:
- result = pal_prefetch_visibility(vcpu);
- break;
- case PAL_MC_DRAIN:
- result.status = ia64_pal_mc_drain();
- /* FIXME: All vcpus likely call PAL_MC_DRAIN.
- That causes the congestion. */
- smp_call_function(remote_pal_mc_drain, NULL, 1);
- break;
-
- case PAL_FREQ_RATIOS:
- result = pal_freq_ratios(vcpu);
- break;
-
- case PAL_FREQ_BASE:
- result = pal_freq_base(vcpu);
- break;
-
- case PAL_LOGICAL_TO_PHYSICAL :
- result = pal_logical_to_physica(vcpu);
- break;
-
- case PAL_VM_SUMMARY :
- result = pal_vm_summary(vcpu);
- break;
-
- case PAL_VM_INFO :
- result = pal_vm_info(vcpu);
- break;
- case PAL_PLATFORM_ADDR :
- result = pal_platform_addr(vcpu);
- break;
- case PAL_CACHE_INFO:
- result = pal_cache_info(vcpu);
- break;
- case PAL_PTCE_INFO:
- INIT_PAL_STATUS_SUCCESS(result);
- result.v1 = (1L << 32) | 1L;
- break;
- case PAL_REGISTER_INFO:
- result = pal_register_info(vcpu);
- break;
- case PAL_VM_PAGE_SIZE:
- result.status = ia64_pal_vm_page_size(&result.v0,
- &result.v1);
- break;
- case PAL_RSE_INFO:
- result.status = ia64_pal_rse_info(&result.v0,
- (pal_hints_u_t *)&result.v1);
- break;
- case PAL_PROC_GET_FEATURES:
- result = pal_proc_get_features(vcpu);
- break;
- case PAL_DEBUG_INFO:
- result.status = ia64_pal_debug_info(&result.v0,
- &result.v1);
- break;
- case PAL_VERSION:
- result.status = ia64_pal_version(
- (pal_version_u_t *)&result.v0,
- (pal_version_u_t *)&result.v1);
- break;
- case PAL_FIXED_ADDR:
- result.status = PAL_STATUS_SUCCESS;
- result.v0 = vcpu->vcpu_id;
- break;
- case PAL_BRAND_INFO:
- result = pal_get_brand_info(vcpu);
- break;
- case PAL_GET_PSTATE:
- case PAL_CACHE_SHARED_INFO:
- INIT_PAL_STATUS_UNIMPLEMENTED(result);
- break;
- default:
- INIT_PAL_STATUS_UNIMPLEMENTED(result);
- printk(KERN_WARNING"kvm: Unsupported pal call,"
- " index:0x%lx\n", gr28);
- }
- set_pal_result(vcpu, result);
- return ret;
-}
-
-static struct sal_ret_values sal_emulator(struct kvm *kvm,
- long index, unsigned long in1,
- unsigned long in2, unsigned long in3,
- unsigned long in4, unsigned long in5,
- unsigned long in6, unsigned long in7)
-{
- unsigned long r9 = 0;
- unsigned long r10 = 0;
- long r11 = 0;
- long status;
-
- status = 0;
- switch (index) {
- case SAL_FREQ_BASE:
- status = ia64_sal_freq_base(in1, &r9, &r10);
- break;
- case SAL_PCI_CONFIG_READ:
- printk(KERN_WARNING"kvm: Not allowed to call here!"
- " SAL_PCI_CONFIG_READ\n");
- break;
- case SAL_PCI_CONFIG_WRITE:
- printk(KERN_WARNING"kvm: Not allowed to call here!"
- " SAL_PCI_CONFIG_WRITE\n");
- break;
- case SAL_SET_VECTORS:
- if (in1 == SAL_VECTOR_OS_BOOT_RENDEZ) {
- if (in4 != 0 || in5 != 0 || in6 != 0 || in7 != 0) {
- status = -2;
- } else {
- kvm->arch.rdv_sal_data.boot_ip = in2;
- kvm->arch.rdv_sal_data.boot_gp = in3;
- }
- printk("Rendvous called! iip:%lx\n\n", in2);
- } else
- printk(KERN_WARNING"kvm: CALLED SAL_SET_VECTORS %lu."
- "ignored...\n", in1);
- break;
- case SAL_GET_STATE_INFO:
- /* No more info. */
- status = -5;
- r9 = 0;
- break;
- case SAL_GET_STATE_INFO_SIZE:
- /* Return a dummy size. */
- status = 0;
- r9 = 128;
- break;
- case SAL_CLEAR_STATE_INFO:
- /* Noop. */
- break;
- case SAL_MC_RENDEZ:
- printk(KERN_WARNING
- "kvm: called SAL_MC_RENDEZ. ignored...\n");
- break;
- case SAL_MC_SET_PARAMS:
- printk(KERN_WARNING
- "kvm: called SAL_MC_SET_PARAMS.ignored!\n");
- break;
- case SAL_CACHE_FLUSH:
- if (1) {
- /*Flush using SAL.
- This method is faster but has a side
- effect on other vcpu running on
- this cpu. */
- status = ia64_sal_cache_flush(in1);
- } else {
- /*Maybe need to implement the method
- without side effect!*/
- status = 0;
- }
- break;
- case SAL_CACHE_INIT:
- printk(KERN_WARNING
- "kvm: called SAL_CACHE_INIT. ignored...\n");
- break;
- case SAL_UPDATE_PAL:
- printk(KERN_WARNING
- "kvm: CALLED SAL_UPDATE_PAL. ignored...\n");
- break;
- default:
- printk(KERN_WARNING"kvm: called SAL_CALL with unknown index."
- " index:%ld\n", index);
- status = -1;
- break;
- }
- return ((struct sal_ret_values) {status, r9, r10, r11});
-}
-
-static void kvm_get_sal_call_data(struct kvm_vcpu *vcpu, u64 *in0, u64 *in1,
- u64 *in2, u64 *in3, u64 *in4, u64 *in5, u64 *in6, u64 *in7){
-
- struct exit_ctl_data *p;
-
- p = kvm_get_exit_data(vcpu);
-
- if (p->exit_reason == EXIT_REASON_SAL_CALL) {
- *in0 = p->u.sal_data.in0;
- *in1 = p->u.sal_data.in1;
- *in2 = p->u.sal_data.in2;
- *in3 = p->u.sal_data.in3;
- *in4 = p->u.sal_data.in4;
- *in5 = p->u.sal_data.in5;
- *in6 = p->u.sal_data.in6;
- *in7 = p->u.sal_data.in7;
- return ;
- }
- *in0 = 0;
-}
-
-void kvm_sal_emul(struct kvm_vcpu *vcpu)
-{
-
- struct sal_ret_values result;
- u64 index, in1, in2, in3, in4, in5, in6, in7;
-
- kvm_get_sal_call_data(vcpu, &index, &in1, &in2,
- &in3, &in4, &in5, &in6, &in7);
- result = sal_emulator(vcpu->kvm, index, in1, in2, in3,
- in4, in5, in6, in7);
- set_sal_result(vcpu, result);
-}
diff --git a/arch/ia64/kvm/kvm_lib.c b/arch/ia64/kvm/kvm_lib.c
deleted file mode 100644
index f1268b8e6f9..00000000000
--- a/arch/ia64/kvm/kvm_lib.c
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * kvm_lib.c: Compile some libraries for kvm-intel module.
- *
- * Just include kernel's library, and disable symbols export.
- * Copyright (C) 2008, Intel Corporation.
- * Xiantao Zhang (xiantao.zhang@intel.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-#undef CONFIG_MODULES
-#include <linux/module.h>
-#undef CONFIG_KALLSYMS
-#undef EXPORT_SYMBOL
-#undef EXPORT_SYMBOL_GPL
-#define EXPORT_SYMBOL(sym)
-#define EXPORT_SYMBOL_GPL(sym)
-#include "../../../lib/vsprintf.c"
-#include "../../../lib/ctype.c"
diff --git a/arch/ia64/kvm/kvm_minstate.h b/arch/ia64/kvm/kvm_minstate.h
deleted file mode 100644
index b2bcaa2787a..00000000000
--- a/arch/ia64/kvm/kvm_minstate.h
+++ /dev/null
@@ -1,266 +0,0 @@
-/*
- * kvm_minstate.h: min save macros
- * Copyright (c) 2007, Intel Corporation.
- *
- * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
- * Xiantao Zhang (xiantao.zhang@intel.com)
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- */
-
-
-#include <asm/asmmacro.h>
-#include <asm/types.h>
-#include <asm/kregs.h>
-#include <asm/kvm_host.h>
-
-#include "asm-offsets.h"
-
-#define KVM_MINSTATE_START_SAVE_MIN \
- mov ar.rsc = 0;/* set enforced lazy mode, pl 0, little-endian, loadrs=0 */\
- ;; \
- mov.m r28 = ar.rnat; \
- addl r22 = VMM_RBS_OFFSET,r1; /* compute base of RBS */ \
- ;; \
- lfetch.fault.excl.nt1 [r22]; \
- addl r1 = KVM_STK_OFFSET-VMM_PT_REGS_SIZE, r1; \
- mov r23 = ar.bspstore; /* save ar.bspstore */ \
- ;; \
- mov ar.bspstore = r22; /* switch to kernel RBS */\
- ;; \
- mov r18 = ar.bsp; \
- mov ar.rsc = 0x3; /* set eager mode, pl 0, little-endian, loadrs=0 */
-
-
-
-#define KVM_MINSTATE_END_SAVE_MIN \
- bsw.1; /* switch back to bank 1 (must be last in insn group) */\
- ;;
-
-
-#define PAL_VSA_SYNC_READ \
- /* begin to call pal vps sync_read */ \
-{.mii; \
- add r25 = VMM_VPD_BASE_OFFSET, r21; \
- nop 0x0; \
- mov r24=ip; \
- ;; \
-} \
-{.mmb \
- add r24=0x20, r24; \
- ld8 r25 = [r25]; /* read vpd base */ \
- br.cond.sptk kvm_vps_sync_read; /*call the service*/ \
- ;; \
-}; \
-
-
-#define KVM_MINSTATE_GET_CURRENT(reg) mov reg=r21
-
-/*
- * KVM_DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves
- * the minimum state necessary that allows us to turn psr.ic back
- * on.
- *
- * Assumed state upon entry:
- * psr.ic: off
- * r31: contains saved predicates (pr)
- *
- * Upon exit, the state is as follows:
- * psr.ic: off
- * r2 = points to &pt_regs.r16
- * r8 = contents of ar.ccv
- * r9 = contents of ar.csd
- * r10 = contents of ar.ssd
- * r11 = FPSR_DEFAULT
- * r12 = kernel sp (kernel virtual address)
- * r13 = points to current task_struct (kernel virtual address)
- * p15 = TRUE if psr.i is set in cr.ipsr
- * predicate registers (other than p2, p3, and p15), b6, r3, r14, r15:
- * preserved
- *
- * Note that psr.ic is NOT turned on by this macro. This is so that
- * we can pass interruption state as arguments to a handler.
- */
-
-
-#define PT(f) (VMM_PT_REGS_##f##_OFFSET)
-
-#define KVM_DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA) \
- KVM_MINSTATE_GET_CURRENT(r16); /* M (or M;;I) */ \
- mov r27 = ar.rsc; /* M */ \
- mov r20 = r1; /* A */ \
- mov r25 = ar.unat; /* M */ \
- mov r29 = cr.ipsr; /* M */ \
- mov r26 = ar.pfs; /* I */ \
- mov r18 = cr.isr; \
- COVER; /* B;; (or nothing) */ \
- ;; \
- tbit.z p0,p15 = r29,IA64_PSR_I_BIT; \
- mov r1 = r16; \
-/* mov r21=r16; */ \
- /* switch from user to kernel RBS: */ \
- ;; \
- invala; /* M */ \
- SAVE_IFS; \
- ;; \
- KVM_MINSTATE_START_SAVE_MIN \
- adds r17 = 2*L1_CACHE_BYTES,r1;/* cache-line size */ \
- adds r16 = PT(CR_IPSR),r1; \
- ;; \
- lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES; \
- st8 [r16] = r29; /* save cr.ipsr */ \
- ;; \
- lfetch.fault.excl.nt1 [r17]; \
- tbit.nz p15,p0 = r29,IA64_PSR_I_BIT; \
- mov r29 = b0 \
- ;; \
- adds r16 = PT(R8),r1; /* initialize first base pointer */\
- adds r17 = PT(R9),r1; /* initialize second base pointer */\
- ;; \
-.mem.offset 0,0; st8.spill [r16] = r8,16; \
-.mem.offset 8,0; st8.spill [r17] = r9,16; \
- ;; \
-.mem.offset 0,0; st8.spill [r16] = r10,24; \
-.mem.offset 8,0; st8.spill [r17] = r11,24; \
- ;; \
- mov r9 = cr.iip; /* M */ \
- mov r10 = ar.fpsr; /* M */ \
- ;; \
- st8 [r16] = r9,16; /* save cr.iip */ \
- st8 [r17] = r30,16; /* save cr.ifs */ \
- sub r18 = r18,r22; /* r18=RSE.ndirty*8 */ \
- ;; \
- st8 [r16] = r25,16; /* save ar.unat */ \
- st8 [r17] = r26,16; /* save ar.pfs */ \
- shl r18 = r18,16; /* calu ar.rsc used for "loadrs" */\
- ;; \
- st8 [r16] = r27,16; /* save ar.rsc */ \
- st8 [r17] = r28,16; /* save ar.rnat */ \
- ;; /* avoid RAW on r16 & r17 */ \
- st8 [r16] = r23,16; /* save ar.bspstore */ \
- st8 [r17] = r31,16; /* save predicates */ \
- ;; \
- st8 [r16] = r29,16; /* save b0 */ \
- st8 [r17] = r18,16; /* save ar.rsc value for "loadrs" */\
- ;; \
-.mem.offset 0,0; st8.spill [r16] = r20,16;/* save original r1 */ \
-.mem.offset 8,0; st8.spill [r17] = r12,16; \
- adds r12 = -16,r1; /* switch to kernel memory stack */ \
- ;; \
-.mem.offset 0,0; st8.spill [r16] = r13,16; \
-.mem.offset 8,0; st8.spill [r17] = r10,16; /* save ar.fpsr */\
- mov r13 = r21; /* establish `current' */ \
- ;; \
-.mem.offset 0,0; st8.spill [r16] = r15,16; \
-.mem.offset 8,0; st8.spill [r17] = r14,16; \
- ;; \
-.mem.offset 0,0; st8.spill [r16] = r2,16; \
-.mem.offset 8,0; st8.spill [r17] = r3,16; \
- adds r2 = VMM_PT_REGS_R16_OFFSET,r1; \
- ;; \
- adds r16 = VMM_VCPU_IIPA_OFFSET,r13; \
- adds r17 = VMM_VCPU_ISR_OFFSET,r13; \
- mov r26 = cr.iipa; \
- mov r27 = cr.isr; \
- ;; \
- st8 [r16] = r26; \
- st8 [r17] = r27; \
- ;; \
- EXTRA; \
- mov r8 = ar.ccv; \
- mov r9 = ar.csd; \
- mov r10 = ar.ssd; \
- movl r11 = FPSR_DEFAULT; /* L-unit */ \
- adds r17 = VMM_VCPU_GP_OFFSET,r13; \
- ;; \
- ld8 r1 = [r17];/* establish kernel global pointer */ \
- ;; \
- PAL_VSA_SYNC_READ \
- KVM_MINSTATE_END_SAVE_MIN
-
-/*
- * SAVE_REST saves the remainder of pt_regs (with psr.ic on).
- *
- * Assumed state upon entry:
- * psr.ic: on
- * r2: points to &pt_regs.f6
- * r3: points to &pt_regs.f7
- * r8: contents of ar.ccv
- * r9: contents of ar.csd
- * r10: contents of ar.ssd
- * r11: FPSR_DEFAULT
- *
- * Registers r14 and r15 are guaranteed not to be touched by SAVE_REST.
- */
-#define KVM_SAVE_REST \
-.mem.offset 0,0; st8.spill [r2] = r16,16; \
-.mem.offset 8,0; st8.spill [r3] = r17,16; \
- ;; \
-.mem.offset 0,0; st8.spill [r2] = r18,16; \
-.mem.offset 8,0; st8.spill [r3] = r19,16; \
- ;; \
-.mem.offset 0,0; st8.spill [r2] = r20,16; \
-.mem.offset 8,0; st8.spill [r3] = r21,16; \
- mov r18=b6; \
- ;; \
-.mem.offset 0,0; st8.spill [r2] = r22,16; \
-.mem.offset 8,0; st8.spill [r3] = r23,16; \
- mov r19 = b7; \
- ;; \
-.mem.offset 0,0; st8.spill [r2] = r24,16; \
-.mem.offset 8,0; st8.spill [r3] = r25,16; \
- ;; \
-.mem.offset 0,0; st8.spill [r2] = r26,16; \
-.mem.offset 8,0; st8.spill [r3] = r27,16; \
- ;; \
-.mem.offset 0,0; st8.spill [r2] = r28,16; \
-.mem.offset 8,0; st8.spill [r3] = r29,16; \
- ;; \
-.mem.offset 0,0; st8.spill [r2] = r30,16; \
-.mem.offset 8,0; st8.spill [r3] = r31,32; \
- ;; \
- mov ar.fpsr = r11; \
- st8 [r2] = r8,8; \
- adds r24 = PT(B6)-PT(F7),r3; \
- adds r25 = PT(B7)-PT(F7),r3; \
- ;; \
- st8 [r24] = r18,16; /* b6 */ \
- st8 [r25] = r19,16; /* b7 */ \
- adds r2 = PT(R4)-PT(F6),r2; \
- adds r3 = PT(R5)-PT(F7),r3; \
- ;; \
- st8 [r24] = r9; /* ar.csd */ \
- st8 [r25] = r10; /* ar.ssd */ \
- ;; \
- mov r18 = ar.unat; \
- adds r19 = PT(EML_UNAT)-PT(R4),r2; \
- ;; \
- st8 [r19] = r18; /* eml_unat */ \
-
-
-#define KVM_SAVE_EXTRA \
-.mem.offset 0,0; st8.spill [r2] = r4,16; \
-.mem.offset 8,0; st8.spill [r3] = r5,16; \
- ;; \
-.mem.offset 0,0; st8.spill [r2] = r6,16; \
-.mem.offset 8,0; st8.spill [r3] = r7; \
- ;; \
- mov r26 = ar.unat; \
- ;; \
- st8 [r2] = r26;/* eml_unat */ \
-
-#define KVM_SAVE_MIN_WITH_COVER KVM_DO_SAVE_MIN(cover, mov r30 = cr.ifs,)
-#define KVM_SAVE_MIN_WITH_COVER_R19 KVM_DO_SAVE_MIN(cover, mov r30 = cr.ifs, mov r15 = r19)
-#define KVM_SAVE_MIN KVM_DO_SAVE_MIN( , mov r30 = r0, )
diff --git a/arch/ia64/kvm/lapic.h b/arch/ia64/kvm/lapic.h
deleted file mode 100644
index c5f92a926a9..00000000000
--- a/arch/ia64/kvm/lapic.h
+++ /dev/null
@@ -1,30 +0,0 @@
-#ifndef __KVM_IA64_LAPIC_H
-#define __KVM_IA64_LAPIC_H
-
-#include <linux/kvm_host.h>
-
-/*
- * vlsapic
- */
-struct kvm_lapic{
- struct kvm_vcpu *vcpu;
- uint64_t insvc[4];
- uint64_t vhpi;
- uint8_t xtp;
- uint8_t pal_init_pending;
- uint8_t pad[2];
-};
-
-int kvm_create_lapic(struct kvm_vcpu *vcpu);
-void kvm_free_lapic(struct kvm_vcpu *vcpu);
-
-int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest);
-int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda);
-int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
- int short_hand, int dest, int dest_mode);
-int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2);
-int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq);
-#define kvm_apic_present(x) (true)
-#define kvm_lapic_enabled(x) (true)
-
-#endif
diff --git a/arch/ia64/kvm/memcpy.S b/arch/ia64/kvm/memcpy.S
deleted file mode 100644
index c04cdbe9f80..00000000000
--- a/arch/ia64/kvm/memcpy.S
+++ /dev/null
@@ -1 +0,0 @@
-#include "../lib/memcpy.S"
diff --git a/arch/ia64/kvm/memset.S b/arch/ia64/kvm/memset.S
deleted file mode 100644
index 83c3066d844..00000000000
--- a/arch/ia64/kvm/memset.S
+++ /dev/null
@@ -1 +0,0 @@
-#include "../lib/memset.S"
diff --git a/arch/ia64/kvm/misc.h b/arch/ia64/kvm/misc.h
deleted file mode 100644
index dd979e00b57..00000000000
--- a/arch/ia64/kvm/misc.h
+++ /dev/null
@@ -1,94 +0,0 @@
-#ifndef __KVM_IA64_MISC_H
-#define __KVM_IA64_MISC_H
-
-#include <linux/kvm_host.h>
-/*
- * misc.h
- * Copyright (C) 2007, Intel Corporation.
- * Xiantao Zhang (xiantao.zhang@intel.com)
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- */
-
-/*
- *Return p2m base address at host side!
- */
-static inline uint64_t *kvm_host_get_pmt(struct kvm *kvm)
-{
- return (uint64_t *)(kvm->arch.vm_base +
- offsetof(struct kvm_vm_data, kvm_p2m));
-}
-
-static inline void kvm_set_pmt_entry(struct kvm *kvm, gfn_t gfn,
- u64 paddr, u64 mem_flags)
-{
- uint64_t *pmt_base = kvm_host_get_pmt(kvm);
- unsigned long pte;
-
- pte = PAGE_ALIGN(paddr) | mem_flags;
- pmt_base[gfn] = pte;
-}
-
-/*Function for translating host address to guest address*/
-
-static inline void *to_guest(struct kvm *kvm, void *addr)
-{
- return (void *)((unsigned long)(addr) - kvm->arch.vm_base +
- KVM_VM_DATA_BASE);
-}
-
-/*Function for translating guest address to host address*/
-
-static inline void *to_host(struct kvm *kvm, void *addr)
-{
- return (void *)((unsigned long)addr - KVM_VM_DATA_BASE
- + kvm->arch.vm_base);
-}
-
-/* Get host context of the vcpu */
-static inline union context *kvm_get_host_context(struct kvm_vcpu *vcpu)
-{
- union context *ctx = &vcpu->arch.host;
- return to_guest(vcpu->kvm, ctx);
-}
-
-/* Get guest context of the vcpu */
-static inline union context *kvm_get_guest_context(struct kvm_vcpu *vcpu)
-{
- union context *ctx = &vcpu->arch.guest;
- return to_guest(vcpu->kvm, ctx);
-}
-
-/* kvm get exit data from gvmm! */
-static inline struct exit_ctl_data *kvm_get_exit_data(struct kvm_vcpu *vcpu)
-{
- return &vcpu->arch.exit_data;
-}
-
-/*kvm get vcpu ioreq for kvm module!*/
-static inline struct kvm_mmio_req *kvm_get_vcpu_ioreq(struct kvm_vcpu *vcpu)
-{
- struct exit_ctl_data *p_ctl_data;
-
- if (vcpu) {
- p_ctl_data = kvm_get_exit_data(vcpu);
- if (p_ctl_data->exit_reason == EXIT_REASON_MMIO_INSTRUCTION)
- return &p_ctl_data->u.ioreq;
- }
-
- return NULL;
-}
-
-#endif
diff --git a/arch/ia64/kvm/mmio.c b/arch/ia64/kvm/mmio.c
deleted file mode 100644
index f1e17d3d6cd..00000000000
--- a/arch/ia64/kvm/mmio.c
+++ /dev/null
@@ -1,336 +0,0 @@
-/*
- * mmio.c: MMIO emulation components.
- * Copyright (c) 2004, Intel Corporation.
- * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
- * Kun Tian (Kevin Tian) (Kevin.tian@intel.com)
- *
- * Copyright (c) 2007 Intel Corporation KVM support.
- * Xuefei Xu (Anthony Xu) (anthony.xu@intel.com)
- * Xiantao Zhang (xiantao.zhang@intel.com)
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- */
-
-#include <linux/kvm_host.h>
-
-#include "vcpu.h"
-
-static void vlsapic_write_xtp(struct kvm_vcpu *v, uint8_t val)
-{
- VLSAPIC_XTP(v) = val;
-}
-
-/*
- * LSAPIC OFFSET
- */
-#define PIB_LOW_HALF(ofst) !(ofst & (1 << 20))
-#define PIB_OFST_INTA 0x1E0000
-#define PIB_OFST_XTP 0x1E0008
-
-/*
- * execute write IPI op.
- */
-static void vlsapic_write_ipi(struct kvm_vcpu *vcpu,
- uint64_t addr, uint64_t data)
-{
- struct exit_ctl_data *p = &current_vcpu->arch.exit_data;
- unsigned long psr;
-
- local_irq_save(psr);
-
- p->exit_reason = EXIT_REASON_IPI;
- p->u.ipi_data.addr.val = addr;
- p->u.ipi_data.data.val = data;
- vmm_transition(current_vcpu);
-
- local_irq_restore(psr);
-
-}
-
-void lsapic_write(struct kvm_vcpu *v, unsigned long addr,
- unsigned long length, unsigned long val)
-{
- addr &= (PIB_SIZE - 1);
-
- switch (addr) {
- case PIB_OFST_INTA:
- panic_vm(v, "Undefined write on PIB INTA\n");
- break;
- case PIB_OFST_XTP:
- if (length == 1) {
- vlsapic_write_xtp(v, val);
- } else {
- panic_vm(v, "Undefined write on PIB XTP\n");
- }
- break;
- default:
- if (PIB_LOW_HALF(addr)) {
- /*Lower half */
- if (length != 8)
- panic_vm(v, "Can't LHF write with size %ld!\n",
- length);
- else
- vlsapic_write_ipi(v, addr, val);
- } else { /*Upper half */
- panic_vm(v, "IPI-UHF write %lx\n", addr);
- }
- break;
- }
-}
-
-unsigned long lsapic_read(struct kvm_vcpu *v, unsigned long addr,
- unsigned long length)
-{
- uint64_t result = 0;
-
- addr &= (PIB_SIZE - 1);
-
- switch (addr) {
- case PIB_OFST_INTA:
- if (length == 1) /* 1 byte load */
- ; /* There is no i8259, there is no INTA access*/
- else
- panic_vm(v, "Undefined read on PIB INTA\n");
-
- break;
- case PIB_OFST_XTP:
- if (length == 1) {
- result = VLSAPIC_XTP(v);
- } else {
- panic_vm(v, "Undefined read on PIB XTP\n");
- }
- break;
- default:
- panic_vm(v, "Undefined addr access for lsapic!\n");
- break;
- }
- return result;
-}
-
-static void mmio_access(struct kvm_vcpu *vcpu, u64 src_pa, u64 *dest,
- u16 s, int ma, int dir)
-{
- unsigned long iot;
- struct exit_ctl_data *p = &vcpu->arch.exit_data;
- unsigned long psr;
-
- iot = __gpfn_is_io(src_pa >> PAGE_SHIFT);
-
- local_irq_save(psr);
-
- /*Intercept the access for PIB range*/
- if (iot == GPFN_PIB) {
- if (!dir)
- lsapic_write(vcpu, src_pa, s, *dest);
- else
- *dest = lsapic_read(vcpu, src_pa, s);
- goto out;
- }
- p->exit_reason = EXIT_REASON_MMIO_INSTRUCTION;
- p->u.ioreq.addr = src_pa;
- p->u.ioreq.size = s;
- p->u.ioreq.dir = dir;
- if (dir == IOREQ_WRITE)
- p->u.ioreq.data = *dest;
- p->u.ioreq.state = STATE_IOREQ_READY;
- vmm_transition(vcpu);
-
- if (p->u.ioreq.state == STATE_IORESP_READY) {
- if (dir == IOREQ_READ)
- /* it's necessary to ensure zero extending */
- *dest = p->u.ioreq.data & (~0UL >> (64-(s*8)));
- } else
- panic_vm(vcpu, "Unhandled mmio access returned!\n");
-out:
- local_irq_restore(psr);
- return ;
-}
-
-/*
- dir 1: read 0:write
- inst_type 0:integer 1:floating point
- */
-#define SL_INTEGER 0 /* store/load interger*/
-#define SL_FLOATING 1 /* store/load floating*/
-
-void emulate_io_inst(struct kvm_vcpu *vcpu, u64 padr, u64 ma)
-{
- struct kvm_pt_regs *regs;
- IA64_BUNDLE bundle;
- int slot, dir = 0;
- int inst_type = -1;
- u16 size = 0;
- u64 data, slot1a, slot1b, temp, update_reg;
- s32 imm;
- INST64 inst;
-
- regs = vcpu_regs(vcpu);
-
- if (fetch_code(vcpu, regs->cr_iip, &bundle)) {
- /* if fetch code fail, return and try again */
- return;
- }
- slot = ((struct ia64_psr *)&(regs->cr_ipsr))->ri;
- if (!slot)
- inst.inst = bundle.slot0;
- else if (slot == 1) {
- slot1a = bundle.slot1a;
- slot1b = bundle.slot1b;
- inst.inst = slot1a + (slot1b << 18);
- } else if (slot == 2)
- inst.inst = bundle.slot2;
-
- /* Integer Load/Store */
- if (inst.M1.major == 4 && inst.M1.m == 0 && inst.M1.x == 0) {
- inst_type = SL_INTEGER;
- size = (inst.M1.x6 & 0x3);
- if ((inst.M1.x6 >> 2) > 0xb) {
- /*write*/
- dir = IOREQ_WRITE;
- data = vcpu_get_gr(vcpu, inst.M4.r2);
- } else if ((inst.M1.x6 >> 2) < 0xb) {
- /*read*/
- dir = IOREQ_READ;
- }
- } else if (inst.M2.major == 4 && inst.M2.m == 1 && inst.M2.x == 0) {
- /* Integer Load + Reg update */
- inst_type = SL_INTEGER;
- dir = IOREQ_READ;
- size = (inst.M2.x6 & 0x3);
- temp = vcpu_get_gr(vcpu, inst.M2.r3);
- update_reg = vcpu_get_gr(vcpu, inst.M2.r2);
- temp += update_reg;
- vcpu_set_gr(vcpu, inst.M2.r3, temp, 0);
- } else if (inst.M3.major == 5) {
- /*Integer Load/Store + Imm update*/
- inst_type = SL_INTEGER;
- size = (inst.M3.x6&0x3);
- if ((inst.M5.x6 >> 2) > 0xb) {
- /*write*/
- dir = IOREQ_WRITE;
- data = vcpu_get_gr(vcpu, inst.M5.r2);
- temp = vcpu_get_gr(vcpu, inst.M5.r3);
- imm = (inst.M5.s << 31) | (inst.M5.i << 30) |
- (inst.M5.imm7 << 23);
- temp += imm >> 23;
- vcpu_set_gr(vcpu, inst.M5.r3, temp, 0);
-
- } else if ((inst.M3.x6 >> 2) < 0xb) {
- /*read*/
- dir = IOREQ_READ;
- temp = vcpu_get_gr(vcpu, inst.M3.r3);
- imm = (inst.M3.s << 31) | (inst.M3.i << 30) |
- (inst.M3.imm7 << 23);
- temp += imm >> 23;
- vcpu_set_gr(vcpu, inst.M3.r3, temp, 0);
-
- }
- } else if (inst.M9.major == 6 && inst.M9.x6 == 0x3B
- && inst.M9.m == 0 && inst.M9.x == 0) {
- /* Floating-point spill*/
- struct ia64_fpreg v;
-
- inst_type = SL_FLOATING;
- dir = IOREQ_WRITE;
- vcpu_get_fpreg(vcpu, inst.M9.f2, &v);
- /* Write high word. FIXME: this is a kludge! */
- v.u.bits[1] &= 0x3ffff;
- mmio_access(vcpu, padr + 8, (u64 *)&v.u.bits[1], 8,
- ma, IOREQ_WRITE);
- data = v.u.bits[0];
- size = 3;
- } else if (inst.M10.major == 7 && inst.M10.x6 == 0x3B) {
- /* Floating-point spill + Imm update */
- struct ia64_fpreg v;
-
- inst_type = SL_FLOATING;
- dir = IOREQ_WRITE;
- vcpu_get_fpreg(vcpu, inst.M10.f2, &v);
- temp = vcpu_get_gr(vcpu, inst.M10.r3);
- imm = (inst.M10.s << 31) | (inst.M10.i << 30) |
- (inst.M10.imm7 << 23);
- temp += imm >> 23;
- vcpu_set_gr(vcpu, inst.M10.r3, temp, 0);
-
- /* Write high word.FIXME: this is a kludge! */
- v.u.bits[1] &= 0x3ffff;
- mmio_access(vcpu, padr + 8, (u64 *)&v.u.bits[1],
- 8, ma, IOREQ_WRITE);
- data = v.u.bits[0];
- size = 3;
- } else if (inst.M10.major == 7 && inst.M10.x6 == 0x31) {
- /* Floating-point stf8 + Imm update */
- struct ia64_fpreg v;
- inst_type = SL_FLOATING;
- dir = IOREQ_WRITE;
- size = 3;
- vcpu_get_fpreg(vcpu, inst.M10.f2, &v);
- data = v.u.bits[0]; /* Significand. */
- temp = vcpu_get_gr(vcpu, inst.M10.r3);
- imm = (inst.M10.s << 31) | (inst.M10.i << 30) |
- (inst.M10.imm7 << 23);
- temp += imm >> 23;
- vcpu_set_gr(vcpu, inst.M10.r3, temp, 0);
- } else if (inst.M15.major == 7 && inst.M15.x6 >= 0x2c
- && inst.M15.x6 <= 0x2f) {
- temp = vcpu_get_gr(vcpu, inst.M15.r3);
- imm = (inst.M15.s << 31) | (inst.M15.i << 30) |
- (inst.M15.imm7 << 23);
- temp += imm >> 23;
- vcpu_set_gr(vcpu, inst.M15.r3, temp, 0);
-
- vcpu_increment_iip(vcpu);
- return;
- } else if (inst.M12.major == 6 && inst.M12.m == 1
- && inst.M12.x == 1 && inst.M12.x6 == 1) {
- /* Floating-point Load Pair + Imm ldfp8 M12*/
- struct ia64_fpreg v;
-
- inst_type = SL_FLOATING;
- dir = IOREQ_READ;
- size = 8; /*ldfd*/
- mmio_access(vcpu, padr, &data, size, ma, dir);
- v.u.bits[0] = data;
- v.u.bits[1] = 0x1003E;
- vcpu_set_fpreg(vcpu, inst.M12.f1, &v);
- padr += 8;
- mmio_access(vcpu, padr, &data, size, ma, dir);
- v.u.bits[0] = data;
- v.u.bits[1] = 0x1003E;
- vcpu_set_fpreg(vcpu, inst.M12.f2, &v);
- padr += 8;
- vcpu_set_gr(vcpu, inst.M12.r3, padr, 0);
- vcpu_increment_iip(vcpu);
- return;
- } else {
- inst_type = -1;
- panic_vm(vcpu, "Unsupported MMIO access instruction! "
- "Bunld[0]=0x%lx, Bundle[1]=0x%lx\n",
- bundle.i64[0], bundle.i64[1]);
- }
-
- size = 1 << size;
- if (dir == IOREQ_WRITE) {
- mmio_access(vcpu, padr, &data, size, ma, dir);
- } else {
- mmio_access(vcpu, padr, &data, size, ma, dir);
- if (inst_type == SL_INTEGER)
- vcpu_set_gr(vcpu, inst.M1.r1, data, 0);
- else
- panic_vm(vcpu, "Unsupported instruction type!\n");
-
- }
- vcpu_increment_iip(vcpu);
-}
diff --git a/arch/ia64/kvm/optvfault.S b/arch/ia64/kvm/optvfault.S
deleted file mode 100644
index f793be3efff..00000000000
--- a/arch/ia64/kvm/optvfault.S
+++ /dev/null
@@ -1,1090 +0,0 @@
-/*
- * arch/ia64/kvm/optvfault.S
- * optimize virtualization fault handler
- *
- * Copyright (C) 2006 Intel Co
- * Xuefei Xu (Anthony Xu) <anthony.xu@intel.com>
- * Copyright (C) 2008 Intel Co
- * Add the support for Tukwila processors.
- * Xiantao Zhang <xiantao.zhang@intel.com>
- */
-
-#include <asm/asmmacro.h>
-#include <asm/processor.h>
-#include <asm/kvm_host.h>
-
-#include "vti.h"
-#include "asm-offsets.h"
-
-#define ACCE_MOV_FROM_AR
-#define ACCE_MOV_FROM_RR
-#define ACCE_MOV_TO_RR
-#define ACCE_RSM
-#define ACCE_SSM
-#define ACCE_MOV_TO_PSR
-#define ACCE_THASH
-
-#define VMX_VPS_SYNC_READ \
- add r16=VMM_VPD_BASE_OFFSET,r21; \
- mov r17 = b0; \
- mov r18 = r24; \
- mov r19 = r25; \
- mov r20 = r31; \
- ;; \
-{.mii; \
- ld8 r16 = [r16]; \
- nop 0x0; \
- mov r24 = ip; \
- ;; \
-}; \
-{.mmb; \
- add r24=0x20, r24; \
- mov r25 =r16; \
- br.sptk.many kvm_vps_sync_read; \
-}; \
- mov b0 = r17; \
- mov r24 = r18; \
- mov r25 = r19; \
- mov r31 = r20
-
-ENTRY(kvm_vps_entry)
- adds r29 = VMM_VCPU_VSA_BASE_OFFSET,r21
- ;;
- ld8 r29 = [r29]
- ;;
- add r29 = r29, r30
- ;;
- mov b0 = r29
- br.sptk.many b0
-END(kvm_vps_entry)
-
-/*
- * Inputs:
- * r24 : return address
- * r25 : vpd
- * r29 : scratch
- *
- */
-GLOBAL_ENTRY(kvm_vps_sync_read)
- movl r30 = PAL_VPS_SYNC_READ
- ;;
- br.sptk.many kvm_vps_entry
-END(kvm_vps_sync_read)
-
-/*
- * Inputs:
- * r24 : return address
- * r25 : vpd
- * r29 : scratch
- *
- */
-GLOBAL_ENTRY(kvm_vps_sync_write)
- movl r30 = PAL_VPS_SYNC_WRITE
- ;;
- br.sptk.many kvm_vps_entry
-END(kvm_vps_sync_write)
-
-/*
- * Inputs:
- * r23 : pr
- * r24 : guest b0
- * r25 : vpd
- *
- */
-GLOBAL_ENTRY(kvm_vps_resume_normal)
- movl r30 = PAL_VPS_RESUME_NORMAL
- ;;
- mov pr=r23,-2
- br.sptk.many kvm_vps_entry
-END(kvm_vps_resume_normal)
-
-/*
- * Inputs:
- * r23 : pr
- * r24 : guest b0
- * r25 : vpd
- * r17 : isr
- */
-GLOBAL_ENTRY(kvm_vps_resume_handler)
- movl r30 = PAL_VPS_RESUME_HANDLER
- ;;
- ld8 r26=[r25]
- shr r17=r17,IA64_ISR_IR_BIT
- ;;
- dep r26=r17,r26,63,1 // bit 63 of r26 indicate whether enable CFLE
- mov pr=r23,-2
- br.sptk.many kvm_vps_entry
-END(kvm_vps_resume_handler)
-
-//mov r1=ar3
-GLOBAL_ENTRY(kvm_asm_mov_from_ar)
-#ifndef ACCE_MOV_FROM_AR
- br.many kvm_virtualization_fault_back
-#endif
- add r18=VMM_VCPU_ITC_OFS_OFFSET, r21
- add r16=VMM_VCPU_LAST_ITC_OFFSET,r21
- extr.u r17=r25,6,7
- ;;
- ld8 r18=[r18]
- mov r19=ar.itc
- mov r24=b0
- ;;
- add r19=r19,r18
- addl r20=@gprel(asm_mov_to_reg),gp
- ;;
- st8 [r16] = r19
- adds r30=kvm_resume_to_guest-asm_mov_to_reg,r20
- shladd r17=r17,4,r20
- ;;
- mov b0=r17
- br.sptk.few b0
- ;;
-END(kvm_asm_mov_from_ar)
-
-/*
- * Special SGI SN2 optimized version of mov_from_ar using the SN2 RTC
- * clock as it's source for emulating the ITC. This version will be
- * copied on top of the original version if the host is determined to
- * be an SN2.
- */
-GLOBAL_ENTRY(kvm_asm_mov_from_ar_sn2)
- add r18=VMM_VCPU_ITC_OFS_OFFSET, r21
- movl r19 = (KVM_VMM_BASE+(1<<KVM_VMM_SHIFT))
-
- add r16=VMM_VCPU_LAST_ITC_OFFSET,r21
- extr.u r17=r25,6,7
- mov r24=b0
- ;;
- ld8 r18=[r18]
- ld8 r19=[r19]
- addl r20=@gprel(asm_mov_to_reg),gp
- ;;
- add r19=r19,r18
- shladd r17=r17,4,r20
- ;;
- adds r30=kvm_resume_to_guest-asm_mov_to_reg,r20
- st8 [r16] = r19
- mov b0=r17
- br.sptk.few b0
- ;;
-END(kvm_asm_mov_from_ar_sn2)
-
-
-
-// mov r1=rr[r3]
-GLOBAL_ENTRY(kvm_asm_mov_from_rr)
-#ifndef ACCE_MOV_FROM_RR
- br.many kvm_virtualization_fault_back
-#endif
- extr.u r16=r25,20,7
- extr.u r17=r25,6,7
- addl r20=@gprel(asm_mov_from_reg),gp
- ;;
- adds r30=kvm_asm_mov_from_rr_back_1-asm_mov_from_reg,r20
- shladd r16=r16,4,r20
- mov r24=b0
- ;;
- add r27=VMM_VCPU_VRR0_OFFSET,r21
- mov b0=r16
- br.many b0
- ;;
-kvm_asm_mov_from_rr_back_1:
- adds r30=kvm_resume_to_guest-asm_mov_from_reg,r20
- adds r22=asm_mov_to_reg-asm_mov_from_reg,r20
- shr.u r26=r19,61
- ;;
- shladd r17=r17,4,r22
- shladd r27=r26,3,r27
- ;;
- ld8 r19=[r27]
- mov b0=r17
- br.many b0
-END(kvm_asm_mov_from_rr)
-
-
-// mov rr[r3]=r2
-GLOBAL_ENTRY(kvm_asm_mov_to_rr)
-#ifndef ACCE_MOV_TO_RR
- br.many kvm_virtualization_fault_back
-#endif
- extr.u r16=r25,20,7
- extr.u r17=r25,13,7
- addl r20=@gprel(asm_mov_from_reg),gp
- ;;
- adds r30=kvm_asm_mov_to_rr_back_1-asm_mov_from_reg,r20
- shladd r16=r16,4,r20
- mov r22=b0
- ;;
- add r27=VMM_VCPU_VRR0_OFFSET,r21
- mov b0=r16
- br.many b0
- ;;
-kvm_asm_mov_to_rr_back_1:
- adds r30=kvm_asm_mov_to_rr_back_2-asm_mov_from_reg,r20
- shr.u r23=r19,61
- shladd r17=r17,4,r20
- ;;
- //if rr6, go back
- cmp.eq p6,p0=6,r23
- mov b0=r22
- (p6) br.cond.dpnt.many kvm_virtualization_fault_back
- ;;
- mov r28=r19
- mov b0=r17
- br.many b0
-kvm_asm_mov_to_rr_back_2:
- adds r30=kvm_resume_to_guest-asm_mov_from_reg,r20
- shladd r27=r23,3,r27
- ;; // vrr.rid<<4 |0xe
- st8 [r27]=r19
- mov b0=r30
- ;;
- extr.u r16=r19,8,26
- extr.u r18 =r19,2,6
- mov r17 =0xe
- ;;
- shladd r16 = r16, 4, r17
- extr.u r19 =r19,0,8
- ;;
- shl r16 = r16,8
- ;;
- add r19 = r19, r16
- ;; //set ve 1
- dep r19=-1,r19,0,1
- cmp.lt p6,p0=14,r18
- ;;
- (p6) mov r18=14
- ;;
- (p6) dep r19=r18,r19,2,6
- ;;
- cmp.eq p6,p0=0,r23
- ;;
- cmp.eq.or p6,p0=4,r23
- ;;
- adds r16=VMM_VCPU_MODE_FLAGS_OFFSET,r21
- (p6) adds r17=VMM_VCPU_META_SAVED_RR0_OFFSET,r21
- ;;
- ld4 r16=[r16]
- cmp.eq p7,p0=r0,r0
- (p6) shladd r17=r23,1,r17
- ;;
- (p6) st8 [r17]=r19
- (p6) tbit.nz p6,p7=r16,0
- ;;
- (p7) mov rr[r28]=r19
- mov r24=r22
- br.many b0
-END(kvm_asm_mov_to_rr)
-
-
-//rsm
-GLOBAL_ENTRY(kvm_asm_rsm)
-#ifndef ACCE_RSM
- br.many kvm_virtualization_fault_back
-#endif
- VMX_VPS_SYNC_READ
- ;;
- extr.u r26=r25,6,21
- extr.u r27=r25,31,2
- ;;
- extr.u r28=r25,36,1
- dep r26=r27,r26,21,2
- ;;
- add r17=VPD_VPSR_START_OFFSET,r16
- add r22=VMM_VCPU_MODE_FLAGS_OFFSET,r21
- //r26 is imm24
- dep r26=r28,r26,23,1
- ;;
- ld8 r18=[r17]
- movl r28=IA64_PSR_IC+IA64_PSR_I+IA64_PSR_DT+IA64_PSR_SI
- ld4 r23=[r22]
- sub r27=-1,r26
- mov r24=b0
- ;;
- mov r20=cr.ipsr
- or r28=r27,r28
- and r19=r18,r27
- ;;
- st8 [r17]=r19
- and r20=r20,r28
- /* Comment it out due to short of fp lazy alorgithm support
- adds r27=IA64_VCPU_FP_PSR_OFFSET,r21
- ;;
- ld8 r27=[r27]
- ;;
- tbit.nz p8,p0= r27,IA64_PSR_DFH_BIT
- ;;
- (p8) dep r20=-1,r20,IA64_PSR_DFH_BIT,1
- */
- ;;
- mov cr.ipsr=r20
- tbit.nz p6,p0=r23,0
- ;;
- tbit.z.or p6,p0=r26,IA64_PSR_DT_BIT
- (p6) br.dptk kvm_resume_to_guest_with_sync
- ;;
- add r26=VMM_VCPU_META_RR0_OFFSET,r21
- add r27=VMM_VCPU_META_RR0_OFFSET+8,r21
- dep r23=-1,r23,0,1
- ;;
- ld8 r26=[r26]
- ld8 r27=[r27]
- st4 [r22]=r23
- dep.z r28=4,61,3
- ;;
- mov rr[r0]=r26
- ;;
- mov rr[r28]=r27
- ;;
- srlz.d
- br.many kvm_resume_to_guest_with_sync
-END(kvm_asm_rsm)
-
-
-//ssm
-GLOBAL_ENTRY(kvm_asm_ssm)
-#ifndef ACCE_SSM
- br.many kvm_virtualization_fault_back
-#endif
- VMX_VPS_SYNC_READ
- ;;
- extr.u r26=r25,6,21
- extr.u r27=r25,31,2
- ;;
- extr.u r28=r25,36,1
- dep r26=r27,r26,21,2
- ;; //r26 is imm24
- add r27=VPD_VPSR_START_OFFSET,r16
- dep r26=r28,r26,23,1
- ;; //r19 vpsr
- ld8 r29=[r27]
- mov r24=b0
- ;;
- add r22=VMM_VCPU_MODE_FLAGS_OFFSET,r21
- mov r20=cr.ipsr
- or r19=r29,r26
- ;;
- ld4 r23=[r22]
- st8 [r27]=r19
- or r20=r20,r26
- ;;
- mov cr.ipsr=r20
- movl r28=IA64_PSR_DT+IA64_PSR_RT+IA64_PSR_IT
- ;;
- and r19=r28,r19
- tbit.z p6,p0=r23,0
- ;;
- cmp.ne.or p6,p0=r28,r19
- (p6) br.dptk kvm_asm_ssm_1
- ;;
- add r26=VMM_VCPU_META_SAVED_RR0_OFFSET,r21
- add r27=VMM_VCPU_META_SAVED_RR0_OFFSET+8,r21
- dep r23=0,r23,0,1
- ;;
- ld8 r26=[r26]
- ld8 r27=[r27]
- st4 [r22]=r23
- dep.z r28=4,61,3
- ;;
- mov rr[r0]=r26
- ;;
- mov rr[r28]=r27
- ;;
- srlz.d
- ;;
-kvm_asm_ssm_1:
- tbit.nz p6,p0=r29,IA64_PSR_I_BIT
- ;;
- tbit.z.or p6,p0=r19,IA64_PSR_I_BIT
- (p6) br.dptk kvm_resume_to_guest_with_sync
- ;;
- add r29=VPD_VTPR_START_OFFSET,r16
- add r30=VPD_VHPI_START_OFFSET,r16
- ;;
- ld8 r29=[r29]
- ld8 r30=[r30]
- ;;
- extr.u r17=r29,4,4
- extr.u r18=r29,16,1
- ;;
- dep r17=r18,r17,4,1
- ;;
- cmp.gt p6,p0=r30,r17
- (p6) br.dpnt.few kvm_asm_dispatch_vexirq
- br.many kvm_resume_to_guest_with_sync
-END(kvm_asm_ssm)
-
-
-//mov psr.l=r2
-GLOBAL_ENTRY(kvm_asm_mov_to_psr)
-#ifndef ACCE_MOV_TO_PSR
- br.many kvm_virtualization_fault_back
-#endif
- VMX_VPS_SYNC_READ
- ;;
- extr.u r26=r25,13,7 //r2
- addl r20=@gprel(asm_mov_from_reg),gp
- ;;
- adds r30=kvm_asm_mov_to_psr_back-asm_mov_from_reg,r20
- shladd r26=r26,4,r20
- mov r24=b0
- ;;
- add r27=VPD_VPSR_START_OFFSET,r16
- mov b0=r26
- br.many b0
- ;;
-kvm_asm_mov_to_psr_back:
- ld8 r17=[r27]
- add r22=VMM_VCPU_MODE_FLAGS_OFFSET,r21
- dep r19=0,r19,32,32
- ;;
- ld4 r23=[r22]
- dep r18=0,r17,0,32
- ;;
- add r30=r18,r19
- movl r28=IA64_PSR_DT+IA64_PSR_RT+IA64_PSR_IT
- ;;
- st8 [r27]=r30
- and r27=r28,r30
- and r29=r28,r17
- ;;
- cmp.eq p5,p0=r29,r27
- cmp.eq p6,p7=r28,r27
- (p5) br.many kvm_asm_mov_to_psr_1
- ;;
- //virtual to physical
- (p7) add r26=VMM_VCPU_META_RR0_OFFSET,r21
- (p7) add r27=VMM_VCPU_META_RR0_OFFSET+8,r21
- (p7) dep r23=-1,r23,0,1
- ;;
- //physical to virtual
- (p6) add r26=VMM_VCPU_META_SAVED_RR0_OFFSET,r21
- (p6) add r27=VMM_VCPU_META_SAVED_RR0_OFFSET+8,r21
- (p6) dep r23=0,r23,0,1
- ;;
- ld8 r26=[r26]
- ld8 r27=[r27]
- st4 [r22]=r23
- dep.z r28=4,61,3
- ;;
- mov rr[r0]=r26
- ;;
- mov rr[r28]=r27
- ;;
- srlz.d
- ;;
-kvm_asm_mov_to_psr_1:
- mov r20=cr.ipsr
- movl r28=IA64_PSR_IC+IA64_PSR_I+IA64_PSR_DT+IA64_PSR_SI+IA64_PSR_RT
- ;;
- or r19=r19,r28
- dep r20=0,r20,0,32
- ;;
- add r20=r19,r20
- mov b0=r24
- ;;
- /* Comment it out due to short of fp lazy algorithm support
- adds r27=IA64_VCPU_FP_PSR_OFFSET,r21
- ;;
- ld8 r27=[r27]
- ;;
- tbit.nz p8,p0=r27,IA64_PSR_DFH_BIT
- ;;
- (p8) dep r20=-1,r20,IA64_PSR_DFH_BIT,1
- ;;
- */
- mov cr.ipsr=r20
- cmp.ne p6,p0=r0,r0
- ;;
- tbit.nz.or p6,p0=r17,IA64_PSR_I_BIT
- tbit.z.or p6,p0=r30,IA64_PSR_I_BIT
- (p6) br.dpnt.few kvm_resume_to_guest_with_sync
- ;;
- add r29=VPD_VTPR_START_OFFSET,r16
- add r30=VPD_VHPI_START_OFFSET,r16
- ;;
- ld8 r29=[r29]
- ld8 r30=[r30]
- ;;
- extr.u r17=r29,4,4
- extr.u r18=r29,16,1
- ;;
- dep r17=r18,r17,4,1
- ;;
- cmp.gt p6,p0=r30,r17
- (p6) br.dpnt.few kvm_asm_dispatch_vexirq
- br.many kvm_resume_to_guest_with_sync
-END(kvm_asm_mov_to_psr)
-
-
-ENTRY(kvm_asm_dispatch_vexirq)
-//increment iip
- mov r17 = b0
- mov r18 = r31
-{.mii
- add r25=VMM_VPD_BASE_OFFSET,r21
- nop 0x0
- mov r24 = ip
- ;;
-}
-{.mmb
- add r24 = 0x20, r24
- ld8 r25 = [r25]
- br.sptk.many kvm_vps_sync_write
-}
- mov b0 =r17
- mov r16=cr.ipsr
- mov r31 = r18
- mov r19 = 37
- ;;
- extr.u r17=r16,IA64_PSR_RI_BIT,2
- tbit.nz p6,p7=r16,IA64_PSR_RI_BIT+1
- ;;
- (p6) mov r18=cr.iip
- (p6) mov r17=r0
- (p7) add r17=1,r17
- ;;
- (p6) add r18=0x10,r18
- dep r16=r17,r16,IA64_PSR_RI_BIT,2
- ;;
- (p6) mov cr.iip=r18
- mov cr.ipsr=r16
- mov r30 =1
- br.many kvm_dispatch_vexirq
-END(kvm_asm_dispatch_vexirq)
-
-// thash
-// TODO: add support when pta.vf = 1
-GLOBAL_ENTRY(kvm_asm_thash)
-#ifndef ACCE_THASH
- br.many kvm_virtualization_fault_back
-#endif
- extr.u r17=r25,20,7 // get r3 from opcode in r25
- extr.u r18=r25,6,7 // get r1 from opcode in r25
- addl r20=@gprel(asm_mov_from_reg),gp
- ;;
- adds r30=kvm_asm_thash_back1-asm_mov_from_reg,r20
- shladd r17=r17,4,r20 // get addr of MOVE_FROM_REG(r17)
- adds r16=VMM_VPD_BASE_OFFSET,r21 // get vcpu.arch.priveregs
- ;;
- mov r24=b0
- ;;
- ld8 r16=[r16] // get VPD addr
- mov b0=r17
- br.many b0 // r19 return value
- ;;
-kvm_asm_thash_back1:
- shr.u r23=r19,61 // get RR number
- adds r28=VMM_VCPU_VRR0_OFFSET,r21 // get vcpu->arch.vrr[0]'s addr
- adds r16=VMM_VPD_VPTA_OFFSET,r16 // get vpta
- ;;
- shladd r27=r23,3,r28 // get vcpu->arch.vrr[r23]'s addr
- ld8 r17=[r16] // get PTA
- mov r26=1
- ;;
- extr.u r29=r17,2,6 // get pta.size
- ld8 r28=[r27] // get vcpu->arch.vrr[r23]'s value
- ;;
- mov b0=r24
- //Fallback to C if pta.vf is set
- tbit.nz p6,p0=r17, 8
- ;;
- (p6) mov r24=EVENT_THASH
- (p6) br.cond.dpnt.many kvm_virtualization_fault_back
- extr.u r28=r28,2,6 // get rr.ps
- shl r22=r26,r29 // 1UL << pta.size
- ;;
- shr.u r23=r19,r28 // vaddr >> rr.ps
- adds r26=3,r29 // pta.size + 3
- shl r27=r17,3 // pta << 3
- ;;
- shl r23=r23,3 // (vaddr >> rr.ps) << 3
- shr.u r27=r27,r26 // (pta << 3) >> (pta.size+3)
- movl r16=7<<61
- ;;
- adds r22=-1,r22 // (1UL << pta.size) - 1
- shl r27=r27,r29 // ((pta<<3)>>(pta.size+3))<<pta.size
- and r19=r19,r16 // vaddr & VRN_MASK
- ;;
- and r22=r22,r23 // vhpt_offset
- or r19=r19,r27 // (vadr&VRN_MASK)|(((pta<<3)>>(pta.size + 3))<<pta.size)
- adds r26=asm_mov_to_reg-asm_mov_from_reg,r20
- ;;
- or r19=r19,r22 // calc pval
- shladd r17=r18,4,r26
- adds r30=kvm_resume_to_guest-asm_mov_from_reg,r20
- ;;
- mov b0=r17
- br.many b0
-END(kvm_asm_thash)
-
-#define MOV_TO_REG0 \
-{; \
- nop.b 0x0; \
- nop.b 0x0; \
- nop.b 0x0; \
- ;; \
-};
-
-
-#define MOV_TO_REG(n) \
-{; \
- mov r##n##=r19; \
- mov b0=r30; \
- br.sptk.many b0; \
- ;; \
-};
-
-
-#define MOV_FROM_REG(n) \
-{; \
- mov r19=r##n##; \
- mov b0=r30; \
- br.sptk.many b0; \
- ;; \
-};
-
-
-#define MOV_TO_BANK0_REG(n) \
-ENTRY_MIN_ALIGN(asm_mov_to_bank0_reg##n##); \
-{; \
- mov r26=r2; \
- mov r2=r19; \
- bsw.1; \
- ;; \
-}; \
-{; \
- mov r##n##=r2; \
- nop.b 0x0; \
- bsw.0; \
- ;; \
-}; \
-{; \
- mov r2=r26; \
- mov b0=r30; \
- br.sptk.many b0; \
- ;; \
-}; \
-END(asm_mov_to_bank0_reg##n##)
-
-
-#define MOV_FROM_BANK0_REG(n) \
-ENTRY_MIN_ALIGN(asm_mov_from_bank0_reg##n##); \
-{; \
- mov r26=r2; \
- nop.b 0x0; \
- bsw.1; \
- ;; \
-}; \
-{; \
- mov r2=r##n##; \
- nop.b 0x0; \
- bsw.0; \
- ;; \
-}; \
-{; \
- mov r19=r2; \
- mov r2=r26; \
- mov b0=r30; \
-}; \
-{; \
- nop.b 0x0; \
- nop.b 0x0; \
- br.sptk.many b0; \
- ;; \
-}; \
-END(asm_mov_from_bank0_reg##n##)
-
-
-#define JMP_TO_MOV_TO_BANK0_REG(n) \
-{; \
- nop.b 0x0; \
- nop.b 0x0; \
- br.sptk.many asm_mov_to_bank0_reg##n##; \
- ;; \
-}
-
-
-#define JMP_TO_MOV_FROM_BANK0_REG(n) \
-{; \
- nop.b 0x0; \
- nop.b 0x0; \
- br.sptk.many asm_mov_from_bank0_reg##n##; \
- ;; \
-}
-
-
-MOV_FROM_BANK0_REG(16)
-MOV_FROM_BANK0_REG(17)
-MOV_FROM_BANK0_REG(18)
-MOV_FROM_BANK0_REG(19)
-MOV_FROM_BANK0_REG(20)
-MOV_FROM_BANK0_REG(21)
-MOV_FROM_BANK0_REG(22)
-MOV_FROM_BANK0_REG(23)
-MOV_FROM_BANK0_REG(24)
-MOV_FROM_BANK0_REG(25)
-MOV_FROM_BANK0_REG(26)
-MOV_FROM_BANK0_REG(27)
-MOV_FROM_BANK0_REG(28)
-MOV_FROM_BANK0_REG(29)
-MOV_FROM_BANK0_REG(30)
-MOV_FROM_BANK0_REG(31)
-
-
-// mov from reg table
-ENTRY(asm_mov_from_reg)
- MOV_FROM_REG(0)
- MOV_FROM_REG(1)
- MOV_FROM_REG(2)
- MOV_FROM_REG(3)
- MOV_FROM_REG(4)
- MOV_FROM_REG(5)
- MOV_FROM_REG(6)
- MOV_FROM_REG(7)
- MOV_FROM_REG(8)
- MOV_FROM_REG(9)
- MOV_FROM_REG(10)
- MOV_FROM_REG(11)
- MOV_FROM_REG(12)
- MOV_FROM_REG(13)
- MOV_FROM_REG(14)
- MOV_FROM_REG(15)
- JMP_TO_MOV_FROM_BANK0_REG(16)
- JMP_TO_MOV_FROM_BANK0_REG(17)
- JMP_TO_MOV_FROM_BANK0_REG(18)
- JMP_TO_MOV_FROM_BANK0_REG(19)
- JMP_TO_MOV_FROM_BANK0_REG(20)
- JMP_TO_MOV_FROM_BANK0_REG(21)
- JMP_TO_MOV_FROM_BANK0_REG(22)
- JMP_TO_MOV_FROM_BANK0_REG(23)
- JMP_TO_MOV_FROM_BANK0_REG(24)
- JMP_TO_MOV_FROM_BANK0_REG(25)
- JMP_TO_MOV_FROM_BANK0_REG(26)
- JMP_TO_MOV_FROM_BANK0_REG(27)
- JMP_TO_MOV_FROM_BANK0_REG(28)
- JMP_TO_MOV_FROM_BANK0_REG(29)
- JMP_TO_MOV_FROM_BANK0_REG(30)
- JMP_TO_MOV_FROM_BANK0_REG(31)
- MOV_FROM_REG(32)
- MOV_FROM_REG(33)
- MOV_FROM_REG(34)
- MOV_FROM_REG(35)
- MOV_FROM_REG(36)
- MOV_FROM_REG(37)
- MOV_FROM_REG(38)
- MOV_FROM_REG(39)
- MOV_FROM_REG(40)
- MOV_FROM_REG(41)
- MOV_FROM_REG(42)
- MOV_FROM_REG(43)
- MOV_FROM_REG(44)
- MOV_FROM_REG(45)
- MOV_FROM_REG(46)
- MOV_FROM_REG(47)
- MOV_FROM_REG(48)
- MOV_FROM_REG(49)
- MOV_FROM_REG(50)
- MOV_FROM_REG(51)
- MOV_FROM_REG(52)
- MOV_FROM_REG(53)
- MOV_FROM_REG(54)
- MOV_FROM_REG(55)
- MOV_FROM_REG(56)
- MOV_FROM_REG(57)
- MOV_FROM_REG(58)
- MOV_FROM_REG(59)
- MOV_FROM_REG(60)
- MOV_FROM_REG(61)
- MOV_FROM_REG(62)
- MOV_FROM_REG(63)
- MOV_FROM_REG(64)
- MOV_FROM_REG(65)
- MOV_FROM_REG(66)
- MOV_FROM_REG(67)
- MOV_FROM_REG(68)
- MOV_FROM_REG(69)
- MOV_FROM_REG(70)
- MOV_FROM_REG(71)
- MOV_FROM_REG(72)
- MOV_FROM_REG(73)
- MOV_FROM_REG(74)
- MOV_FROM_REG(75)
- MOV_FROM_REG(76)
- MOV_FROM_REG(77)
- MOV_FROM_REG(78)
- MOV_FROM_REG(79)
- MOV_FROM_REG(80)
- MOV_FROM_REG(81)
- MOV_FROM_REG(82)
- MOV_FROM_REG(83)
- MOV_FROM_REG(84)
- MOV_FROM_REG(85)
- MOV_FROM_REG(86)
- MOV_FROM_REG(87)
- MOV_FROM_REG(88)
- MOV_FROM_REG(89)
- MOV_FROM_REG(90)
- MOV_FROM_REG(91)
- MOV_FROM_REG(92)
- MOV_FROM_REG(93)
- MOV_FROM_REG(94)
- MOV_FROM_REG(95)
- MOV_FROM_REG(96)
- MOV_FROM_REG(97)
- MOV_FROM_REG(98)
- MOV_FROM_REG(99)
- MOV_FROM_REG(100)
- MOV_FROM_REG(101)
- MOV_FROM_REG(102)
- MOV_FROM_REG(103)
- MOV_FROM_REG(104)
- MOV_FROM_REG(105)
- MOV_FROM_REG(106)
- MOV_FROM_REG(107)
- MOV_FROM_REG(108)
- MOV_FROM_REG(109)
- MOV_FROM_REG(110)
- MOV_FROM_REG(111)
- MOV_FROM_REG(112)
- MOV_FROM_REG(113)
- MOV_FROM_REG(114)
- MOV_FROM_REG(115)
- MOV_FROM_REG(116)
- MOV_FROM_REG(117)
- MOV_FROM_REG(118)
- MOV_FROM_REG(119)
- MOV_FROM_REG(120)
- MOV_FROM_REG(121)
- MOV_FROM_REG(122)
- MOV_FROM_REG(123)
- MOV_FROM_REG(124)
- MOV_FROM_REG(125)
- MOV_FROM_REG(126)
- MOV_FROM_REG(127)
-END(asm_mov_from_reg)
-
-
-/* must be in bank 0
- * parameter:
- * r31: pr
- * r24: b0
- */
-ENTRY(kvm_resume_to_guest_with_sync)
- adds r19=VMM_VPD_BASE_OFFSET,r21
- mov r16 = r31
- mov r17 = r24
- ;;
-{.mii
- ld8 r25 =[r19]
- nop 0x0
- mov r24 = ip
- ;;
-}
-{.mmb
- add r24 =0x20, r24
- nop 0x0
- br.sptk.many kvm_vps_sync_write
-}
-
- mov r31 = r16
- mov r24 =r17
- ;;
- br.sptk.many kvm_resume_to_guest
-END(kvm_resume_to_guest_with_sync)
-
-ENTRY(kvm_resume_to_guest)
- adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21
- ;;
- ld8 r1 =[r16]
- adds r20 = VMM_VCPU_VSA_BASE_OFFSET,r21
- ;;
- mov r16=cr.ipsr
- ;;
- ld8 r20 = [r20]
- adds r19=VMM_VPD_BASE_OFFSET,r21
- ;;
- ld8 r25=[r19]
- extr.u r17=r16,IA64_PSR_RI_BIT,2
- tbit.nz p6,p7=r16,IA64_PSR_RI_BIT+1
- ;;
- (p6) mov r18=cr.iip
- (p6) mov r17=r0
- ;;
- (p6) add r18=0x10,r18
- (p7) add r17=1,r17
- ;;
- (p6) mov cr.iip=r18
- dep r16=r17,r16,IA64_PSR_RI_BIT,2
- ;;
- mov cr.ipsr=r16
- adds r19= VPD_VPSR_START_OFFSET,r25
- add r28=PAL_VPS_RESUME_NORMAL,r20
- add r29=PAL_VPS_RESUME_HANDLER,r20
- ;;
- ld8 r19=[r19]
- mov b0=r29
- mov r27=cr.isr
- ;;
- tbit.z p6,p7 = r19,IA64_PSR_IC_BIT // p7=vpsr.ic
- shr r27=r27,IA64_ISR_IR_BIT
- ;;
- (p6) ld8 r26=[r25]
- (p7) mov b0=r28
- ;;
- (p6) dep r26=r27,r26,63,1
- mov pr=r31,-2
- br.sptk.many b0 // call pal service
- ;;
-END(kvm_resume_to_guest)
-
-
-MOV_TO_BANK0_REG(16)
-MOV_TO_BANK0_REG(17)
-MOV_TO_BANK0_REG(18)
-MOV_TO_BANK0_REG(19)
-MOV_TO_BANK0_REG(20)
-MOV_TO_BANK0_REG(21)
-MOV_TO_BANK0_REG(22)
-MOV_TO_BANK0_REG(23)
-MOV_TO_BANK0_REG(24)
-MOV_TO_BANK0_REG(25)
-MOV_TO_BANK0_REG(26)
-MOV_TO_BANK0_REG(27)
-MOV_TO_BANK0_REG(28)
-MOV_TO_BANK0_REG(29)
-MOV_TO_BANK0_REG(30)
-MOV_TO_BANK0_REG(31)
-
-
-// mov to reg table
-ENTRY(asm_mov_to_reg)
- MOV_TO_REG0
- MOV_TO_REG(1)
- MOV_TO_REG(2)
- MOV_TO_REG(3)
- MOV_TO_REG(4)
- MOV_TO_REG(5)
- MOV_TO_REG(6)
- MOV_TO_REG(7)
- MOV_TO_REG(8)
- MOV_TO_REG(9)
- MOV_TO_REG(10)
- MOV_TO_REG(11)
- MOV_TO_REG(12)
- MOV_TO_REG(13)
- MOV_TO_REG(14)
- MOV_TO_REG(15)
- JMP_TO_MOV_TO_BANK0_REG(16)
- JMP_TO_MOV_TO_BANK0_REG(17)
- JMP_TO_MOV_TO_BANK0_REG(18)
- JMP_TO_MOV_TO_BANK0_REG(19)
- JMP_TO_MOV_TO_BANK0_REG(20)
- JMP_TO_MOV_TO_BANK0_REG(21)
- JMP_TO_MOV_TO_BANK0_REG(22)
- JMP_TO_MOV_TO_BANK0_REG(23)
- JMP_TO_MOV_TO_BANK0_REG(24)
- JMP_TO_MOV_TO_BANK0_REG(25)
- JMP_TO_MOV_TO_BANK0_REG(26)
- JMP_TO_MOV_TO_BANK0_REG(27)
- JMP_TO_MOV_TO_BANK0_REG(28)
- JMP_TO_MOV_TO_BANK0_REG(29)
- JMP_TO_MOV_TO_BANK0_REG(30)
- JMP_TO_MOV_TO_BANK0_REG(31)
- MOV_TO_REG(32)
- MOV_TO_REG(33)
- MOV_TO_REG(34)
- MOV_TO_REG(35)
- MOV_TO_REG(36)
- MOV_TO_REG(37)
- MOV_TO_REG(38)
- MOV_TO_REG(39)
- MOV_TO_REG(40)
- MOV_TO_REG(41)
- MOV_TO_REG(42)
- MOV_TO_REG(43)
- MOV_TO_REG(44)
- MOV_TO_REG(45)
- MOV_TO_REG(46)
- MOV_TO_REG(47)
- MOV_TO_REG(48)
- MOV_TO_REG(49)
- MOV_TO_REG(50)
- MOV_TO_REG(51)
- MOV_TO_REG(52)
- MOV_TO_REG(53)
- MOV_TO_REG(54)
- MOV_TO_REG(55)
- MOV_TO_REG(56)
- MOV_TO_REG(57)
- MOV_TO_REG(58)
- MOV_TO_REG(59)
- MOV_TO_REG(60)
- MOV_TO_REG(61)
- MOV_TO_REG(62)
- MOV_TO_REG(63)
- MOV_TO_REG(64)
- MOV_TO_REG(65)
- MOV_TO_REG(66)
- MOV_TO_REG(67)
- MOV_TO_REG(68)
- MOV_TO_REG(69)
- MOV_TO_REG(70)
- MOV_TO_REG(71)
- MOV_TO_REG(72)
- MOV_TO_REG(73)
- MOV_TO_REG(74)
- MOV_TO_REG(75)
- MOV_TO_REG(76)
- MOV_TO_REG(77)
- MOV_TO_REG(78)
- MOV_TO_REG(79)
- MOV_TO_REG(80)
- MOV_TO_REG(81)
- MOV_TO_REG(82)
- MOV_TO_REG(83)
- MOV_TO_REG(84)
- MOV_TO_REG(85)
- MOV_TO_REG(86)
- MOV_TO_REG(87)
- MOV_TO_REG(88)
- MOV_TO_REG(89)
- MOV_TO_REG(90)
- MOV_TO_REG(91)
- MOV_TO_REG(92)
- MOV_TO_REG(93)
- MOV_TO_REG(94)
- MOV_TO_REG(95)
- MOV_TO_REG(96)
- MOV_TO_REG(97)
- MOV_TO_REG(98)
- MOV_TO_REG(99)
- MOV_TO_REG(100)
- MOV_TO_REG(101)
- MOV_TO_REG(102)
- MOV_TO_REG(103)
- MOV_TO_REG(104)
- MOV_TO_REG(105)
- MOV_TO_REG(106)
- MOV_TO_REG(107)
- MOV_TO_REG(108)
- MOV_TO_REG(109)
- MOV_TO_REG(110)
- MOV_TO_REG(111)
- MOV_TO_REG(112)
- MOV_TO_REG(113)
- MOV_TO_REG(114)
- MOV_TO_REG(115)
- MOV_TO_REG(116)
- MOV_TO_REG(117)
- MOV_TO_REG(118)
- MOV_TO_REG(119)
- MOV_TO_REG(120)
- MOV_TO_REG(121)
- MOV_TO_REG(122)
- MOV_TO_REG(123)
- MOV_TO_REG(124)
- MOV_TO_REG(125)
- MOV_TO_REG(126)
- MOV_TO_REG(127)
-END(asm_mov_to_reg)
diff --git a/arch/ia64/kvm/process.c b/arch/ia64/kvm/process.c
deleted file mode 100644
index b0398740b48..00000000000
--- a/arch/ia64/kvm/process.c
+++ /dev/null
@@ -1,1024 +0,0 @@
-/*
- * process.c: handle interruption inject for guests.
- * Copyright (c) 2005, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- * Shaofan Li (Susue Li) <susie.li@intel.com>
- * Xiaoyan Feng (Fleming Feng) <fleming.feng@intel.com>
- * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
- * Xiantao Zhang (xiantao.zhang@intel.com)
- */
-#include "vcpu.h"
-
-#include <asm/pal.h>
-#include <asm/sal.h>
-#include <asm/fpswa.h>
-#include <asm/kregs.h>
-#include <asm/tlb.h>
-
-fpswa_interface_t *vmm_fpswa_interface;
-
-#define IA64_VHPT_TRANS_VECTOR 0x0000
-#define IA64_INST_TLB_VECTOR 0x0400
-#define IA64_DATA_TLB_VECTOR 0x0800
-#define IA64_ALT_INST_TLB_VECTOR 0x0c00
-#define IA64_ALT_DATA_TLB_VECTOR 0x1000
-#define IA64_DATA_NESTED_TLB_VECTOR 0x1400
-#define IA64_INST_KEY_MISS_VECTOR 0x1800
-#define IA64_DATA_KEY_MISS_VECTOR 0x1c00
-#define IA64_DIRTY_BIT_VECTOR 0x2000
-#define IA64_INST_ACCESS_BIT_VECTOR 0x2400
-#define IA64_DATA_ACCESS_BIT_VECTOR 0x2800
-#define IA64_BREAK_VECTOR 0x2c00
-#define IA64_EXTINT_VECTOR 0x3000
-#define IA64_PAGE_NOT_PRESENT_VECTOR 0x5000
-#define IA64_KEY_PERMISSION_VECTOR 0x5100
-#define IA64_INST_ACCESS_RIGHTS_VECTOR 0x5200
-#define IA64_DATA_ACCESS_RIGHTS_VECTOR 0x5300
-#define IA64_GENEX_VECTOR 0x5400
-#define IA64_DISABLED_FPREG_VECTOR 0x5500
-#define IA64_NAT_CONSUMPTION_VECTOR 0x5600
-#define IA64_SPECULATION_VECTOR 0x5700 /* UNUSED */
-#define IA64_DEBUG_VECTOR 0x5900
-#define IA64_UNALIGNED_REF_VECTOR 0x5a00
-#define IA64_UNSUPPORTED_DATA_REF_VECTOR 0x5b00
-#define IA64_FP_FAULT_VECTOR 0x5c00
-#define IA64_FP_TRAP_VECTOR 0x5d00
-#define IA64_LOWERPRIV_TRANSFER_TRAP_VECTOR 0x5e00
-#define IA64_TAKEN_BRANCH_TRAP_VECTOR 0x5f00
-#define IA64_SINGLE_STEP_TRAP_VECTOR 0x6000
-
-/* SDM vol2 5.5 - IVA based interruption handling */
-#define INITIAL_PSR_VALUE_AT_INTERRUPTION (IA64_PSR_UP | IA64_PSR_MFL |\
- IA64_PSR_MFH | IA64_PSR_PK | IA64_PSR_DT | \
- IA64_PSR_RT | IA64_PSR_MC|IA64_PSR_IT)
-
-#define DOMN_PAL_REQUEST 0x110000
-#define DOMN_SAL_REQUEST 0x110001
-
-static u64 vec2off[68] = {0x0, 0x400, 0x800, 0xc00, 0x1000, 0x1400, 0x1800,
- 0x1c00, 0x2000, 0x2400, 0x2800, 0x2c00, 0x3000, 0x3400, 0x3800, 0x3c00,
- 0x4000, 0x4400, 0x4800, 0x4c00, 0x5000, 0x5100, 0x5200, 0x5300, 0x5400,
- 0x5500, 0x5600, 0x5700, 0x5800, 0x5900, 0x5a00, 0x5b00, 0x5c00, 0x5d00,
- 0x5e00, 0x5f00, 0x6000, 0x6100, 0x6200, 0x6300, 0x6400, 0x6500, 0x6600,
- 0x6700, 0x6800, 0x6900, 0x6a00, 0x6b00, 0x6c00, 0x6d00, 0x6e00, 0x6f00,
- 0x7000, 0x7100, 0x7200, 0x7300, 0x7400, 0x7500, 0x7600, 0x7700, 0x7800,
- 0x7900, 0x7a00, 0x7b00, 0x7c00, 0x7d00, 0x7e00, 0x7f00
-};
-
-static void collect_interruption(struct kvm_vcpu *vcpu)
-{
- u64 ipsr;
- u64 vdcr;
- u64 vifs;
- unsigned long vpsr;
- struct kvm_pt_regs *regs = vcpu_regs(vcpu);
-
- vpsr = vcpu_get_psr(vcpu);
- vcpu_bsw0(vcpu);
- if (vpsr & IA64_PSR_IC) {
-
- /* Sync mpsr id/da/dd/ss/ed bits to vipsr
- * since after guest do rfi, we still want these bits on in
- * mpsr
- */
-
- ipsr = regs->cr_ipsr;
- vpsr = vpsr | (ipsr & (IA64_PSR_ID | IA64_PSR_DA
- | IA64_PSR_DD | IA64_PSR_SS
- | IA64_PSR_ED));
- vcpu_set_ipsr(vcpu, vpsr);
-
- /* Currently, for trap, we do not advance IIP to next
- * instruction. That's because we assume caller already
- * set up IIP correctly
- */
-
- vcpu_set_iip(vcpu , regs->cr_iip);
-
- /* set vifs.v to zero */
- vifs = VCPU(vcpu, ifs);
- vifs &= ~IA64_IFS_V;
- vcpu_set_ifs(vcpu, vifs);
-
- vcpu_set_iipa(vcpu, VMX(vcpu, cr_iipa));
- }
-
- vdcr = VCPU(vcpu, dcr);
-
- /* Set guest psr
- * up/mfl/mfh/pk/dt/rt/mc/it keeps unchanged
- * be: set to the value of dcr.be
- * pp: set to the value of dcr.pp
- */
- vpsr &= INITIAL_PSR_VALUE_AT_INTERRUPTION;
- vpsr |= (vdcr & IA64_DCR_BE);
-
- /* VDCR pp bit position is different from VPSR pp bit */
- if (vdcr & IA64_DCR_PP) {
- vpsr |= IA64_PSR_PP;
- } else {
- vpsr &= ~IA64_PSR_PP;
- }
-
- vcpu_set_psr(vcpu, vpsr);
-
-}
-
-void inject_guest_interruption(struct kvm_vcpu *vcpu, u64 vec)
-{
- u64 viva;
- struct kvm_pt_regs *regs;
- union ia64_isr pt_isr;
-
- regs = vcpu_regs(vcpu);
-
- /* clear cr.isr.ir (incomplete register frame)*/
- pt_isr.val = VMX(vcpu, cr_isr);
- pt_isr.ir = 0;
- VMX(vcpu, cr_isr) = pt_isr.val;
-
- collect_interruption(vcpu);
-
- viva = vcpu_get_iva(vcpu);
- regs->cr_iip = viva + vec;
-}
-
-static u64 vcpu_get_itir_on_fault(struct kvm_vcpu *vcpu, u64 ifa)
-{
- union ia64_rr rr, rr1;
-
- rr.val = vcpu_get_rr(vcpu, ifa);
- rr1.val = 0;
- rr1.ps = rr.ps;
- rr1.rid = rr.rid;
- return (rr1.val);
-}
-
-/*
- * Set vIFA & vITIR & vIHA, when vPSR.ic =1
- * Parameter:
- * set_ifa: if true, set vIFA
- * set_itir: if true, set vITIR
- * set_iha: if true, set vIHA
- */
-void set_ifa_itir_iha(struct kvm_vcpu *vcpu, u64 vadr,
- int set_ifa, int set_itir, int set_iha)
-{
- long vpsr;
- u64 value;
-
- vpsr = VCPU(vcpu, vpsr);
- /* Vol2, Table 8-1 */
- if (vpsr & IA64_PSR_IC) {
- if (set_ifa)
- vcpu_set_ifa(vcpu, vadr);
- if (set_itir) {
- value = vcpu_get_itir_on_fault(vcpu, vadr);
- vcpu_set_itir(vcpu, value);
- }
-
- if (set_iha) {
- value = vcpu_thash(vcpu, vadr);
- vcpu_set_iha(vcpu, value);
- }
- }
-}
-
-/*
- * Data TLB Fault
- * @ Data TLB vector
- * Refer to SDM Vol2 Table 5-6 & 8-1
- */
-void dtlb_fault(struct kvm_vcpu *vcpu, u64 vadr)
-{
- /* If vPSR.ic, IFA, ITIR, IHA */
- set_ifa_itir_iha(vcpu, vadr, 1, 1, 1);
- inject_guest_interruption(vcpu, IA64_DATA_TLB_VECTOR);
-}
-
-/*
- * Instruction TLB Fault
- * @ Instruction TLB vector
- * Refer to SDM Vol2 Table 5-6 & 8-1
- */
-void itlb_fault(struct kvm_vcpu *vcpu, u64 vadr)
-{
- /* If vPSR.ic, IFA, ITIR, IHA */
- set_ifa_itir_iha(vcpu, vadr, 1, 1, 1);
- inject_guest_interruption(vcpu, IA64_INST_TLB_VECTOR);
-}
-
-/*
- * Data Nested TLB Fault
- * @ Data Nested TLB Vector
- * Refer to SDM Vol2 Table 5-6 & 8-1
- */
-void nested_dtlb(struct kvm_vcpu *vcpu)
-{
- inject_guest_interruption(vcpu, IA64_DATA_NESTED_TLB_VECTOR);
-}
-
-/*
- * Alternate Data TLB Fault
- * @ Alternate Data TLB vector
- * Refer to SDM Vol2 Table 5-6 & 8-1
- */
-void alt_dtlb(struct kvm_vcpu *vcpu, u64 vadr)
-{
- set_ifa_itir_iha(vcpu, vadr, 1, 1, 0);
- inject_guest_interruption(vcpu, IA64_ALT_DATA_TLB_VECTOR);
-}
-
-/*
- * Data TLB Fault
- * @ Data TLB vector
- * Refer to SDM Vol2 Table 5-6 & 8-1
- */
-void alt_itlb(struct kvm_vcpu *vcpu, u64 vadr)
-{
- set_ifa_itir_iha(vcpu, vadr, 1, 1, 0);
- inject_guest_interruption(vcpu, IA64_ALT_INST_TLB_VECTOR);
-}
-
-/* Deal with:
- * VHPT Translation Vector
- */
-static void _vhpt_fault(struct kvm_vcpu *vcpu, u64 vadr)
-{
- /* If vPSR.ic, IFA, ITIR, IHA*/
- set_ifa_itir_iha(vcpu, vadr, 1, 1, 1);
- inject_guest_interruption(vcpu, IA64_VHPT_TRANS_VECTOR);
-}
-
-/*
- * VHPT Instruction Fault
- * @ VHPT Translation vector
- * Refer to SDM Vol2 Table 5-6 & 8-1
- */
-void ivhpt_fault(struct kvm_vcpu *vcpu, u64 vadr)
-{
- _vhpt_fault(vcpu, vadr);
-}
-
-/*
- * VHPT Data Fault
- * @ VHPT Translation vector
- * Refer to SDM Vol2 Table 5-6 & 8-1
- */
-void dvhpt_fault(struct kvm_vcpu *vcpu, u64 vadr)
-{
- _vhpt_fault(vcpu, vadr);
-}
-
-/*
- * Deal with:
- * General Exception vector
- */
-void _general_exception(struct kvm_vcpu *vcpu)
-{
- inject_guest_interruption(vcpu, IA64_GENEX_VECTOR);
-}
-
-/*
- * Illegal Operation Fault
- * @ General Exception Vector
- * Refer to SDM Vol2 Table 5-6 & 8-1
- */
-void illegal_op(struct kvm_vcpu *vcpu)
-{
- _general_exception(vcpu);
-}
-
-/*
- * Illegal Dependency Fault
- * @ General Exception Vector
- * Refer to SDM Vol2 Table 5-6 & 8-1
- */
-void illegal_dep(struct kvm_vcpu *vcpu)
-{
- _general_exception(vcpu);
-}
-
-/*
- * Reserved Register/Field Fault
- * @ General Exception Vector
- * Refer to SDM Vol2 Table 5-6 & 8-1
- */
-void rsv_reg_field(struct kvm_vcpu *vcpu)
-{
- _general_exception(vcpu);
-}
-/*
- * Privileged Operation Fault
- * @ General Exception Vector
- * Refer to SDM Vol2 Table 5-6 & 8-1
- */
-
-void privilege_op(struct kvm_vcpu *vcpu)
-{
- _general_exception(vcpu);
-}
-
-/*
- * Unimplement Data Address Fault
- * @ General Exception Vector
- * Refer to SDM Vol2 Table 5-6 & 8-1
- */
-void unimpl_daddr(struct kvm_vcpu *vcpu)
-{
- _general_exception(vcpu);
-}
-
-/*
- * Privileged Register Fault
- * @ General Exception Vector
- * Refer to SDM Vol2 Table 5-6 & 8-1
- */
-void privilege_reg(struct kvm_vcpu *vcpu)
-{
- _general_exception(vcpu);
-}
-
-/* Deal with
- * Nat consumption vector
- * Parameter:
- * vaddr: Optional, if t == REGISTER
- */
-static void _nat_consumption_fault(struct kvm_vcpu *vcpu, u64 vadr,
- enum tlb_miss_type t)
-{
- /* If vPSR.ic && t == DATA/INST, IFA */
- if (t == DATA || t == INSTRUCTION) {
- /* IFA */
- set_ifa_itir_iha(vcpu, vadr, 1, 0, 0);
- }
-
- inject_guest_interruption(vcpu, IA64_NAT_CONSUMPTION_VECTOR);
-}
-
-/*
- * Instruction Nat Page Consumption Fault
- * @ Nat Consumption Vector
- * Refer to SDM Vol2 Table 5-6 & 8-1
- */
-void inat_page_consumption(struct kvm_vcpu *vcpu, u64 vadr)
-{
- _nat_consumption_fault(vcpu, vadr, INSTRUCTION);
-}
-
-/*
- * Register Nat Consumption Fault
- * @ Nat Consumption Vector
- * Refer to SDM Vol2 Table 5-6 & 8-1
- */
-void rnat_consumption(struct kvm_vcpu *vcpu)
-{
- _nat_consumption_fault(vcpu, 0, REGISTER);
-}
-
-/*
- * Data Nat Page Consumption Fault
- * @ Nat Consumption Vector
- * Refer to SDM Vol2 Table 5-6 & 8-1
- */
-void dnat_page_consumption(struct kvm_vcpu *vcpu, u64 vadr)
-{
- _nat_consumption_fault(vcpu, vadr, DATA);
-}
-
-/* Deal with
- * Page not present vector
- */
-static void __page_not_present(struct kvm_vcpu *vcpu, u64 vadr)
-{
- /* If vPSR.ic, IFA, ITIR */
- set_ifa_itir_iha(vcpu, vadr, 1, 1, 0);
- inject_guest_interruption(vcpu, IA64_PAGE_NOT_PRESENT_VECTOR);
-}
-
-void data_page_not_present(struct kvm_vcpu *vcpu, u64 vadr)
-{
- __page_not_present(vcpu, vadr);
-}
-
-void inst_page_not_present(struct kvm_vcpu *vcpu, u64 vadr)
-{
- __page_not_present(vcpu, vadr);
-}
-
-/* Deal with
- * Data access rights vector
- */
-void data_access_rights(struct kvm_vcpu *vcpu, u64 vadr)
-{
- /* If vPSR.ic, IFA, ITIR */
- set_ifa_itir_iha(vcpu, vadr, 1, 1, 0);
- inject_guest_interruption(vcpu, IA64_DATA_ACCESS_RIGHTS_VECTOR);
-}
-
-fpswa_ret_t vmm_fp_emulate(int fp_fault, void *bundle, unsigned long *ipsr,
- unsigned long *fpsr, unsigned long *isr, unsigned long *pr,
- unsigned long *ifs, struct kvm_pt_regs *regs)
-{
- fp_state_t fp_state;
- fpswa_ret_t ret;
- struct kvm_vcpu *vcpu = current_vcpu;
-
- uint64_t old_rr7 = ia64_get_rr(7UL<<61);
-
- if (!vmm_fpswa_interface)
- return (fpswa_ret_t) {-1, 0, 0, 0};
-
- memset(&fp_state, 0, sizeof(fp_state_t));
-
- /*
- * compute fp_state. only FP registers f6 - f11 are used by the
- * vmm, so set those bits in the mask and set the low volatile
- * pointer to point to these registers.
- */
- fp_state.bitmask_low64 = 0xfc0; /* bit6..bit11 */
-
- fp_state.fp_state_low_volatile = (fp_state_low_volatile_t *) &regs->f6;
-
- /*
- * unsigned long (*EFI_FPSWA) (
- * unsigned long trap_type,
- * void *Bundle,
- * unsigned long *pipsr,
- * unsigned long *pfsr,
- * unsigned long *pisr,
- * unsigned long *ppreds,
- * unsigned long *pifs,
- * void *fp_state);
- */
- /*Call host fpswa interface directly to virtualize
- *guest fpswa request!
- */
- ia64_set_rr(7UL << 61, vcpu->arch.host.rr[7]);
- ia64_srlz_d();
-
- ret = (*vmm_fpswa_interface->fpswa) (fp_fault, bundle,
- ipsr, fpsr, isr, pr, ifs, &fp_state);
- ia64_set_rr(7UL << 61, old_rr7);
- ia64_srlz_d();
- return ret;
-}
-
-/*
- * Handle floating-point assist faults and traps for domain.
- */
-unsigned long vmm_handle_fpu_swa(int fp_fault, struct kvm_pt_regs *regs,
- unsigned long isr)
-{
- struct kvm_vcpu *v = current_vcpu;
- IA64_BUNDLE bundle;
- unsigned long fault_ip;
- fpswa_ret_t ret;
-
- fault_ip = regs->cr_iip;
- /*
- * When the FP trap occurs, the trapping instruction is completed.
- * If ipsr.ri == 0, there is the trapping instruction in previous
- * bundle.
- */
- if (!fp_fault && (ia64_psr(regs)->ri == 0))
- fault_ip -= 16;
-
- if (fetch_code(v, fault_ip, &bundle))
- return -EAGAIN;
-
- if (!bundle.i64[0] && !bundle.i64[1])
- return -EACCES;
-
- ret = vmm_fp_emulate(fp_fault, &bundle, &regs->cr_ipsr, &regs->ar_fpsr,
- &isr, &regs->pr, &regs->cr_ifs, regs);
- return ret.status;
-}
-
-void reflect_interruption(u64 ifa, u64 isr, u64 iim,
- u64 vec, struct kvm_pt_regs *regs)
-{
- u64 vector;
- int status ;
- struct kvm_vcpu *vcpu = current_vcpu;
- u64 vpsr = VCPU(vcpu, vpsr);
-
- vector = vec2off[vec];
-
- if (!(vpsr & IA64_PSR_IC) && (vector != IA64_DATA_NESTED_TLB_VECTOR)) {
- panic_vm(vcpu, "Interruption with vector :0x%lx occurs "
- "with psr.ic = 0\n", vector);
- return;
- }
-
- switch (vec) {
- case 32: /*IA64_FP_FAULT_VECTOR*/
- status = vmm_handle_fpu_swa(1, regs, isr);
- if (!status) {
- vcpu_increment_iip(vcpu);
- return;
- } else if (-EAGAIN == status)
- return;
- break;
- case 33: /*IA64_FP_TRAP_VECTOR*/
- status = vmm_handle_fpu_swa(0, regs, isr);
- if (!status)
- return ;
- break;
- }
-
- VCPU(vcpu, isr) = isr;
- VCPU(vcpu, iipa) = regs->cr_iip;
- if (vector == IA64_BREAK_VECTOR || vector == IA64_SPECULATION_VECTOR)
- VCPU(vcpu, iim) = iim;
- else
- set_ifa_itir_iha(vcpu, ifa, 1, 1, 1);
-
- inject_guest_interruption(vcpu, vector);
-}
-
-static unsigned long kvm_trans_pal_call_args(struct kvm_vcpu *vcpu,
- unsigned long arg)
-{
- struct thash_data *data;
- unsigned long gpa, poff;
-
- if (!is_physical_mode(vcpu)) {
- /* Depends on caller to provide the DTR or DTC mapping.*/
- data = vtlb_lookup(vcpu, arg, D_TLB);
- if (data)
- gpa = data->page_flags & _PAGE_PPN_MASK;
- else {
- data = vhpt_lookup(arg);
- if (!data)
- return 0;
- gpa = data->gpaddr & _PAGE_PPN_MASK;
- }
-
- poff = arg & (PSIZE(data->ps) - 1);
- arg = PAGEALIGN(gpa, data->ps) | poff;
- }
- arg = kvm_gpa_to_mpa(arg << 1 >> 1);
-
- return (unsigned long)__va(arg);
-}
-
-static void set_pal_call_data(struct kvm_vcpu *vcpu)
-{
- struct exit_ctl_data *p = &vcpu->arch.exit_data;
- unsigned long gr28 = vcpu_get_gr(vcpu, 28);
- unsigned long gr29 = vcpu_get_gr(vcpu, 29);
- unsigned long gr30 = vcpu_get_gr(vcpu, 30);
-
- /*FIXME:For static and stacked convention, firmware
- * has put the parameters in gr28-gr31 before
- * break to vmm !!*/
-
- switch (gr28) {
- case PAL_PERF_MON_INFO:
- case PAL_HALT_INFO:
- p->u.pal_data.gr29 = kvm_trans_pal_call_args(vcpu, gr29);
- p->u.pal_data.gr30 = vcpu_get_gr(vcpu, 30);
- break;
- case PAL_BRAND_INFO:
- p->u.pal_data.gr29 = gr29;
- p->u.pal_data.gr30 = kvm_trans_pal_call_args(vcpu, gr30);
- break;
- default:
- p->u.pal_data.gr29 = gr29;
- p->u.pal_data.gr30 = vcpu_get_gr(vcpu, 30);
- }
- p->u.pal_data.gr28 = gr28;
- p->u.pal_data.gr31 = vcpu_get_gr(vcpu, 31);
-
- p->exit_reason = EXIT_REASON_PAL_CALL;
-}
-
-static void get_pal_call_result(struct kvm_vcpu *vcpu)
-{
- struct exit_ctl_data *p = &vcpu->arch.exit_data;
-
- if (p->exit_reason == EXIT_REASON_PAL_CALL) {
- vcpu_set_gr(vcpu, 8, p->u.pal_data.ret.status, 0);
- vcpu_set_gr(vcpu, 9, p->u.pal_data.ret.v0, 0);
- vcpu_set_gr(vcpu, 10, p->u.pal_data.ret.v1, 0);
- vcpu_set_gr(vcpu, 11, p->u.pal_data.ret.v2, 0);
- } else
- panic_vm(vcpu, "Mis-set for exit reason!\n");
-}
-
-static void set_sal_call_data(struct kvm_vcpu *vcpu)
-{
- struct exit_ctl_data *p = &vcpu->arch.exit_data;
-
- p->u.sal_data.in0 = vcpu_get_gr(vcpu, 32);
- p->u.sal_data.in1 = vcpu_get_gr(vcpu, 33);
- p->u.sal_data.in2 = vcpu_get_gr(vcpu, 34);
- p->u.sal_data.in3 = vcpu_get_gr(vcpu, 35);
- p->u.sal_data.in4 = vcpu_get_gr(vcpu, 36);
- p->u.sal_data.in5 = vcpu_get_gr(vcpu, 37);
- p->u.sal_data.in6 = vcpu_get_gr(vcpu, 38);
- p->u.sal_data.in7 = vcpu_get_gr(vcpu, 39);
- p->exit_reason = EXIT_REASON_SAL_CALL;
-}
-
-static void get_sal_call_result(struct kvm_vcpu *vcpu)
-{
- struct exit_ctl_data *p = &vcpu->arch.exit_data;
-
- if (p->exit_reason == EXIT_REASON_SAL_CALL) {
- vcpu_set_gr(vcpu, 8, p->u.sal_data.ret.r8, 0);
- vcpu_set_gr(vcpu, 9, p->u.sal_data.ret.r9, 0);
- vcpu_set_gr(vcpu, 10, p->u.sal_data.ret.r10, 0);
- vcpu_set_gr(vcpu, 11, p->u.sal_data.ret.r11, 0);
- } else
- panic_vm(vcpu, "Mis-set for exit reason!\n");
-}
-
-void kvm_ia64_handle_break(unsigned long ifa, struct kvm_pt_regs *regs,
- unsigned long isr, unsigned long iim)
-{
- struct kvm_vcpu *v = current_vcpu;
- long psr;
-
- if (ia64_psr(regs)->cpl == 0) {
- /* Allow hypercalls only when cpl = 0. */
- if (iim == DOMN_PAL_REQUEST) {
- local_irq_save(psr);
- set_pal_call_data(v);
- vmm_transition(v);
- get_pal_call_result(v);
- vcpu_increment_iip(v);
- local_irq_restore(psr);
- return;
- } else if (iim == DOMN_SAL_REQUEST) {
- local_irq_save(psr);
- set_sal_call_data(v);
- vmm_transition(v);
- get_sal_call_result(v);
- vcpu_increment_iip(v);
- local_irq_restore(psr);
- return;
- }
- }
- reflect_interruption(ifa, isr, iim, 11, regs);
-}
-
-void check_pending_irq(struct kvm_vcpu *vcpu)
-{
- int mask, h_pending, h_inservice;
- u64 isr;
- unsigned long vpsr;
- struct kvm_pt_regs *regs = vcpu_regs(vcpu);
-
- h_pending = highest_pending_irq(vcpu);
- if (h_pending == NULL_VECTOR) {
- update_vhpi(vcpu, NULL_VECTOR);
- return;
- }
- h_inservice = highest_inservice_irq(vcpu);
-
- vpsr = VCPU(vcpu, vpsr);
- mask = irq_masked(vcpu, h_pending, h_inservice);
- if ((vpsr & IA64_PSR_I) && IRQ_NO_MASKED == mask) {
- isr = vpsr & IA64_PSR_RI;
- update_vhpi(vcpu, h_pending);
- reflect_interruption(0, isr, 0, 12, regs); /* EXT IRQ */
- } else if (mask == IRQ_MASKED_BY_INSVC) {
- if (VCPU(vcpu, vhpi))
- update_vhpi(vcpu, NULL_VECTOR);
- } else {
- /* masked by vpsr.i or vtpr.*/
- update_vhpi(vcpu, h_pending);
- }
-}
-
-static void generate_exirq(struct kvm_vcpu *vcpu)
-{
- unsigned vpsr;
- uint64_t isr;
-
- struct kvm_pt_regs *regs = vcpu_regs(vcpu);
-
- vpsr = VCPU(vcpu, vpsr);
- isr = vpsr & IA64_PSR_RI;
- if (!(vpsr & IA64_PSR_IC))
- panic_vm(vcpu, "Trying to inject one IRQ with psr.ic=0\n");
- reflect_interruption(0, isr, 0, 12, regs); /* EXT IRQ */
-}
-
-void vhpi_detection(struct kvm_vcpu *vcpu)
-{
- uint64_t threshold, vhpi;
- union ia64_tpr vtpr;
- struct ia64_psr vpsr;
-
- vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
- vtpr.val = VCPU(vcpu, tpr);
-
- threshold = ((!vpsr.i) << 5) | (vtpr.mmi << 4) | vtpr.mic;
- vhpi = VCPU(vcpu, vhpi);
- if (vhpi > threshold) {
- /* interrupt actived*/
- generate_exirq(vcpu);
- }
-}
-
-void leave_hypervisor_tail(void)
-{
- struct kvm_vcpu *v = current_vcpu;
-
- if (VMX(v, timer_check)) {
- VMX(v, timer_check) = 0;
- if (VMX(v, itc_check)) {
- if (vcpu_get_itc(v) > VCPU(v, itm)) {
- if (!(VCPU(v, itv) & (1 << 16))) {
- vcpu_pend_interrupt(v, VCPU(v, itv)
- & 0xff);
- VMX(v, itc_check) = 0;
- } else {
- v->arch.timer_pending = 1;
- }
- VMX(v, last_itc) = VCPU(v, itm) + 1;
- }
- }
- }
-
- rmb();
- if (v->arch.irq_new_pending) {
- v->arch.irq_new_pending = 0;
- VMX(v, irq_check) = 0;
- check_pending_irq(v);
- return;
- }
- if (VMX(v, irq_check)) {
- VMX(v, irq_check) = 0;
- vhpi_detection(v);
- }
-}
-
-static inline void handle_lds(struct kvm_pt_regs *regs)
-{
- regs->cr_ipsr |= IA64_PSR_ED;
-}
-
-void physical_tlb_miss(struct kvm_vcpu *vcpu, unsigned long vadr, int type)
-{
- unsigned long pte;
- union ia64_rr rr;
-
- rr.val = ia64_get_rr(vadr);
- pte = vadr & _PAGE_PPN_MASK;
- pte = pte | PHY_PAGE_WB;
- thash_vhpt_insert(vcpu, pte, (u64)(rr.ps << 2), vadr, type);
- return;
-}
-
-void kvm_page_fault(u64 vadr , u64 vec, struct kvm_pt_regs *regs)
-{
- unsigned long vpsr;
- int type;
-
- u64 vhpt_adr, gppa, pteval, rr, itir;
- union ia64_isr misr;
- union ia64_pta vpta;
- struct thash_data *data;
- struct kvm_vcpu *v = current_vcpu;
-
- vpsr = VCPU(v, vpsr);
- misr.val = VMX(v, cr_isr);
-
- type = vec;
-
- if (is_physical_mode(v) && (!(vadr << 1 >> 62))) {
- if (vec == 2) {
- if (__gpfn_is_io((vadr << 1) >> (PAGE_SHIFT + 1))) {
- emulate_io_inst(v, ((vadr << 1) >> 1), 4);
- return;
- }
- }
- physical_tlb_miss(v, vadr, type);
- return;
- }
- data = vtlb_lookup(v, vadr, type);
- if (data != 0) {
- if (type == D_TLB) {
- gppa = (vadr & ((1UL << data->ps) - 1))
- + (data->ppn >> (data->ps - 12) << data->ps);
- if (__gpfn_is_io(gppa >> PAGE_SHIFT)) {
- if (data->pl >= ((regs->cr_ipsr >>
- IA64_PSR_CPL0_BIT) & 3))
- emulate_io_inst(v, gppa, data->ma);
- else {
- vcpu_set_isr(v, misr.val);
- data_access_rights(v, vadr);
- }
- return ;
- }
- }
- thash_vhpt_insert(v, data->page_flags, data->itir, vadr, type);
-
- } else if (type == D_TLB) {
- if (misr.sp) {
- handle_lds(regs);
- return;
- }
-
- rr = vcpu_get_rr(v, vadr);
- itir = rr & (RR_RID_MASK | RR_PS_MASK);
-
- if (!vhpt_enabled(v, vadr, misr.rs ? RSE_REF : DATA_REF)) {
- if (vpsr & IA64_PSR_IC) {
- vcpu_set_isr(v, misr.val);
- alt_dtlb(v, vadr);
- } else {
- nested_dtlb(v);
- }
- return ;
- }
-
- vpta.val = vcpu_get_pta(v);
- /* avoid recursively walking (short format) VHPT */
-
- vhpt_adr = vcpu_thash(v, vadr);
- if (!guest_vhpt_lookup(vhpt_adr, &pteval)) {
- /* VHPT successfully read. */
- if (!(pteval & _PAGE_P)) {
- if (vpsr & IA64_PSR_IC) {
- vcpu_set_isr(v, misr.val);
- dtlb_fault(v, vadr);
- } else {
- nested_dtlb(v);
- }
- } else if ((pteval & _PAGE_MA_MASK) != _PAGE_MA_ST) {
- thash_purge_and_insert(v, pteval, itir,
- vadr, D_TLB);
- } else if (vpsr & IA64_PSR_IC) {
- vcpu_set_isr(v, misr.val);
- dtlb_fault(v, vadr);
- } else {
- nested_dtlb(v);
- }
- } else {
- /* Can't read VHPT. */
- if (vpsr & IA64_PSR_IC) {
- vcpu_set_isr(v, misr.val);
- dvhpt_fault(v, vadr);
- } else {
- nested_dtlb(v);
- }
- }
- } else if (type == I_TLB) {
- if (!(vpsr & IA64_PSR_IC))
- misr.ni = 1;
- if (!vhpt_enabled(v, vadr, INST_REF)) {
- vcpu_set_isr(v, misr.val);
- alt_itlb(v, vadr);
- return;
- }
-
- vpta.val = vcpu_get_pta(v);
-
- vhpt_adr = vcpu_thash(v, vadr);
- if (!guest_vhpt_lookup(vhpt_adr, &pteval)) {
- /* VHPT successfully read. */
- if (pteval & _PAGE_P) {
- if ((pteval & _PAGE_MA_MASK) == _PAGE_MA_ST) {
- vcpu_set_isr(v, misr.val);
- itlb_fault(v, vadr);
- return ;
- }
- rr = vcpu_get_rr(v, vadr);
- itir = rr & (RR_RID_MASK | RR_PS_MASK);
- thash_purge_and_insert(v, pteval, itir,
- vadr, I_TLB);
- } else {
- vcpu_set_isr(v, misr.val);
- inst_page_not_present(v, vadr);
- }
- } else {
- vcpu_set_isr(v, misr.val);
- ivhpt_fault(v, vadr);
- }
- }
-}
-
-void kvm_vexirq(struct kvm_vcpu *vcpu)
-{
- u64 vpsr, isr;
- struct kvm_pt_regs *regs;
-
- regs = vcpu_regs(vcpu);
- vpsr = VCPU(vcpu, vpsr);
- isr = vpsr & IA64_PSR_RI;
- reflect_interruption(0, isr, 0, 12, regs); /*EXT IRQ*/
-}
-
-void kvm_ia64_handle_irq(struct kvm_vcpu *v)
-{
- struct exit_ctl_data *p = &v->arch.exit_data;
- long psr;
-
- local_irq_save(psr);
- p->exit_reason = EXIT_REASON_EXTERNAL_INTERRUPT;
- vmm_transition(v);
- local_irq_restore(psr);
-
- VMX(v, timer_check) = 1;
-
-}
-
-static void ptc_ga_remote_func(struct kvm_vcpu *v, int pos)
-{
- u64 oldrid, moldrid, oldpsbits, vaddr;
- struct kvm_ptc_g *p = &v->arch.ptc_g_data[pos];
- vaddr = p->vaddr;
-
- oldrid = VMX(v, vrr[0]);
- VMX(v, vrr[0]) = p->rr;
- oldpsbits = VMX(v, psbits[0]);
- VMX(v, psbits[0]) = VMX(v, psbits[REGION_NUMBER(vaddr)]);
- moldrid = ia64_get_rr(0x0);
- ia64_set_rr(0x0, vrrtomrr(p->rr));
- ia64_srlz_d();
-
- vaddr = PAGEALIGN(vaddr, p->ps);
- thash_purge_entries_remote(v, vaddr, p->ps);
-
- VMX(v, vrr[0]) = oldrid;
- VMX(v, psbits[0]) = oldpsbits;
- ia64_set_rr(0x0, moldrid);
- ia64_dv_serialize_data();
-}
-
-static void vcpu_do_resume(struct kvm_vcpu *vcpu)
-{
- /*Re-init VHPT and VTLB once from resume*/
- vcpu->arch.vhpt.num = VHPT_NUM_ENTRIES;
- thash_init(&vcpu->arch.vhpt, VHPT_SHIFT);
- vcpu->arch.vtlb.num = VTLB_NUM_ENTRIES;
- thash_init(&vcpu->arch.vtlb, VTLB_SHIFT);
-
- ia64_set_pta(vcpu->arch.vhpt.pta.val);
-}
-
-static void vmm_sanity_check(struct kvm_vcpu *vcpu)
-{
- struct exit_ctl_data *p = &vcpu->arch.exit_data;
-
- if (!vmm_sanity && p->exit_reason != EXIT_REASON_DEBUG) {
- panic_vm(vcpu, "Failed to do vmm sanity check,"
- "it maybe caused by crashed vmm!!\n\n");
- }
-}
-
-static void kvm_do_resume_op(struct kvm_vcpu *vcpu)
-{
- vmm_sanity_check(vcpu); /*Guarantee vcpu running on healthy vmm!*/
-
- if (test_and_clear_bit(KVM_REQ_RESUME, &vcpu->requests)) {
- vcpu_do_resume(vcpu);
- return;
- }
-
- if (unlikely(test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))) {
- thash_purge_all(vcpu);
- return;
- }
-
- if (test_and_clear_bit(KVM_REQ_PTC_G, &vcpu->requests)) {
- while (vcpu->arch.ptc_g_count > 0)
- ptc_ga_remote_func(vcpu, --vcpu->arch.ptc_g_count);
- }
-}
-
-void vmm_transition(struct kvm_vcpu *vcpu)
-{
- ia64_call_vsa(PAL_VPS_SAVE, (unsigned long)vcpu->arch.vpd,
- 1, 0, 0, 0, 0, 0);
- vmm_trampoline(&vcpu->arch.guest, &vcpu->arch.host);
- ia64_call_vsa(PAL_VPS_RESTORE, (unsigned long)vcpu->arch.vpd,
- 1, 0, 0, 0, 0, 0);
- kvm_do_resume_op(vcpu);
-}
-
-void vmm_panic_handler(u64 vec)
-{
- struct kvm_vcpu *vcpu = current_vcpu;
- vmm_sanity = 0;
- panic_vm(vcpu, "Unexpected interruption occurs in VMM, vector:0x%lx\n",
- vec2off[vec]);
-}
diff --git a/arch/ia64/kvm/trampoline.S b/arch/ia64/kvm/trampoline.S
deleted file mode 100644
index 30897d44d61..00000000000
--- a/arch/ia64/kvm/trampoline.S
+++ /dev/null
@@ -1,1038 +0,0 @@
-/* Save all processor states
- *
- * Copyright (c) 2007 Fleming Feng <fleming.feng@intel.com>
- * Copyright (c) 2007 Anthony Xu <anthony.xu@intel.com>
- */
-
-#include <asm/asmmacro.h>
-#include "asm-offsets.h"
-
-
-#define CTX(name) VMM_CTX_##name##_OFFSET
-
- /*
- * r32: context_t base address
- */
-#define SAVE_BRANCH_REGS \
- add r2 = CTX(B0),r32; \
- add r3 = CTX(B1),r32; \
- mov r16 = b0; \
- mov r17 = b1; \
- ;; \
- st8 [r2]=r16,16; \
- st8 [r3]=r17,16; \
- ;; \
- mov r16 = b2; \
- mov r17 = b3; \
- ;; \
- st8 [r2]=r16,16; \
- st8 [r3]=r17,16; \
- ;; \
- mov r16 = b4; \
- mov r17 = b5; \
- ;; \
- st8 [r2]=r16; \
- st8 [r3]=r17; \
- ;;
-
- /*
- * r33: context_t base address
- */
-#define RESTORE_BRANCH_REGS \
- add r2 = CTX(B0),r33; \
- add r3 = CTX(B1),r33; \
- ;; \
- ld8 r16=[r2],16; \
- ld8 r17=[r3],16; \
- ;; \
- mov b0 = r16; \
- mov b1 = r17; \
- ;; \
- ld8 r16=[r2],16; \
- ld8 r17=[r3],16; \
- ;; \
- mov b2 = r16; \
- mov b3 = r17; \
- ;; \
- ld8 r16=[r2]; \
- ld8 r17=[r3]; \
- ;; \
- mov b4=r16; \
- mov b5=r17; \
- ;;
-
-
- /*
- * r32: context_t base address
- * bsw == 1
- * Save all bank1 general registers, r4 ~ r7
- */
-#define SAVE_GENERAL_REGS \
- add r2=CTX(R4),r32; \
- add r3=CTX(R5),r32; \
- ;; \
-.mem.offset 0,0; \
- st8.spill [r2]=r4,16; \
-.mem.offset 8,0; \
- st8.spill [r3]=r5,16; \
- ;; \
-.mem.offset 0,0; \
- st8.spill [r2]=r6,48; \
-.mem.offset 8,0; \
- st8.spill [r3]=r7,48; \
- ;; \
-.mem.offset 0,0; \
- st8.spill [r2]=r12; \
-.mem.offset 8,0; \
- st8.spill [r3]=r13; \
- ;;
-
- /*
- * r33: context_t base address
- * bsw == 1
- */
-#define RESTORE_GENERAL_REGS \
- add r2=CTX(R4),r33; \
- add r3=CTX(R5),r33; \
- ;; \
- ld8.fill r4=[r2],16; \
- ld8.fill r5=[r3],16; \
- ;; \
- ld8.fill r6=[r2],48; \
- ld8.fill r7=[r3],48; \
- ;; \
- ld8.fill r12=[r2]; \
- ld8.fill r13 =[r3]; \
- ;;
-
-
-
-
- /*
- * r32: context_t base address
- */
-#define SAVE_KERNEL_REGS \
- add r2 = CTX(KR0),r32; \
- add r3 = CTX(KR1),r32; \
- mov r16 = ar.k0; \
- mov r17 = ar.k1; \
- ;; \
- st8 [r2] = r16,16; \
- st8 [r3] = r17,16; \
- ;; \
- mov r16 = ar.k2; \
- mov r17 = ar.k3; \
- ;; \
- st8 [r2] = r16,16; \
- st8 [r3] = r17,16; \
- ;; \
- mov r16 = ar.k4; \
- mov r17 = ar.k5; \
- ;; \
- st8 [r2] = r16,16; \
- st8 [r3] = r17,16; \
- ;; \
- mov r16 = ar.k6; \
- mov r17 = ar.k7; \
- ;; \
- st8 [r2] = r16; \
- st8 [r3] = r17; \
- ;;
-
-
-
- /*
- * r33: context_t base address
- */
-#define RESTORE_KERNEL_REGS \
- add r2 = CTX(KR0),r33; \
- add r3 = CTX(KR1),r33; \
- ;; \
- ld8 r16=[r2],16; \
- ld8 r17=[r3],16; \
- ;; \
- mov ar.k0=r16; \
- mov ar.k1=r17; \
- ;; \
- ld8 r16=[r2],16; \
- ld8 r17=[r3],16; \
- ;; \
- mov ar.k2=r16; \
- mov ar.k3=r17; \
- ;; \
- ld8 r16=[r2],16; \
- ld8 r17=[r3],16; \
- ;; \
- mov ar.k4=r16; \
- mov ar.k5=r17; \
- ;; \
- ld8 r16=[r2],16; \
- ld8 r17=[r3],16; \
- ;; \
- mov ar.k6=r16; \
- mov ar.k7=r17; \
- ;;
-
-
-
- /*
- * r32: context_t base address
- */
-#define SAVE_APP_REGS \
- add r2 = CTX(BSPSTORE),r32; \
- mov r16 = ar.bspstore; \
- ;; \
- st8 [r2] = r16,CTX(RNAT)-CTX(BSPSTORE);\
- mov r16 = ar.rnat; \
- ;; \
- st8 [r2] = r16,CTX(FCR)-CTX(RNAT); \
- mov r16 = ar.fcr; \
- ;; \
- st8 [r2] = r16,CTX(EFLAG)-CTX(FCR); \
- mov r16 = ar.eflag; \
- ;; \
- st8 [r2] = r16,CTX(CFLG)-CTX(EFLAG); \
- mov r16 = ar.cflg; \
- ;; \
- st8 [r2] = r16,CTX(FSR)-CTX(CFLG); \
- mov r16 = ar.fsr; \
- ;; \
- st8 [r2] = r16,CTX(FIR)-CTX(FSR); \
- mov r16 = ar.fir; \
- ;; \
- st8 [r2] = r16,CTX(FDR)-CTX(FIR); \
- mov r16 = ar.fdr; \
- ;; \
- st8 [r2] = r16,CTX(UNAT)-CTX(FDR); \
- mov r16 = ar.unat; \
- ;; \
- st8 [r2] = r16,CTX(FPSR)-CTX(UNAT); \
- mov r16 = ar.fpsr; \
- ;; \
- st8 [r2] = r16,CTX(PFS)-CTX(FPSR); \
- mov r16 = ar.pfs; \
- ;; \
- st8 [r2] = r16,CTX(LC)-CTX(PFS); \
- mov r16 = ar.lc; \
- ;; \
- st8 [r2] = r16; \
- ;;
-
- /*
- * r33: context_t base address
- */
-#define RESTORE_APP_REGS \
- add r2=CTX(BSPSTORE),r33; \
- ;; \
- ld8 r16=[r2],CTX(RNAT)-CTX(BSPSTORE); \
- ;; \
- mov ar.bspstore=r16; \
- ld8 r16=[r2],CTX(FCR)-CTX(RNAT); \
- ;; \
- mov ar.rnat=r16; \
- ld8 r16=[r2],CTX(EFLAG)-CTX(FCR); \
- ;; \
- mov ar.fcr=r16; \
- ld8 r16=[r2],CTX(CFLG)-CTX(EFLAG); \
- ;; \
- mov ar.eflag=r16; \
- ld8 r16=[r2],CTX(FSR)-CTX(CFLG); \
- ;; \
- mov ar.cflg=r16; \
- ld8 r16=[r2],CTX(FIR)-CTX(FSR); \
- ;; \
- mov ar.fsr=r16; \
- ld8 r16=[r2],CTX(FDR)-CTX(FIR); \
- ;; \
- mov ar.fir=r16; \
- ld8 r16=[r2],CTX(UNAT)-CTX(FDR); \
- ;; \
- mov ar.fdr=r16; \
- ld8 r16=[r2],CTX(FPSR)-CTX(UNAT); \
- ;; \
- mov ar.unat=r16; \
- ld8 r16=[r2],CTX(PFS)-CTX(FPSR); \
- ;; \
- mov ar.fpsr=r16; \
- ld8 r16=[r2],CTX(LC)-CTX(PFS); \
- ;; \
- mov ar.pfs=r16; \
- ld8 r16=[r2]; \
- ;; \
- mov ar.lc=r16; \
- ;;
-
- /*
- * r32: context_t base address
- */
-#define SAVE_CTL_REGS \
- add r2 = CTX(DCR),r32; \
- mov r16 = cr.dcr; \
- ;; \
- st8 [r2] = r16,CTX(IVA)-CTX(DCR); \
- ;; \
- mov r16 = cr.iva; \
- ;; \
- st8 [r2] = r16,CTX(PTA)-CTX(IVA); \
- ;; \
- mov r16 = cr.pta; \
- ;; \
- st8 [r2] = r16 ; \
- ;;
-
- /*
- * r33: context_t base address
- */
-#define RESTORE_CTL_REGS \
- add r2 = CTX(DCR),r33; \
- ;; \
- ld8 r16 = [r2],CTX(IVA)-CTX(DCR); \
- ;; \
- mov cr.dcr = r16; \
- dv_serialize_data; \
- ;; \
- ld8 r16 = [r2],CTX(PTA)-CTX(IVA); \
- ;; \
- mov cr.iva = r16; \
- dv_serialize_data; \
- ;; \
- ld8 r16 = [r2]; \
- ;; \
- mov cr.pta = r16; \
- dv_serialize_data; \
- ;;
-
-
- /*
- * r32: context_t base address
- */
-#define SAVE_REGION_REGS \
- add r2=CTX(RR0),r32; \
- mov r16=rr[r0]; \
- dep.z r18=1,61,3; \
- ;; \
- st8 [r2]=r16,8; \
- mov r17=rr[r18]; \
- dep.z r18=2,61,3; \
- ;; \
- st8 [r2]=r17,8; \
- mov r16=rr[r18]; \
- dep.z r18=3,61,3; \
- ;; \
- st8 [r2]=r16,8; \
- mov r17=rr[r18]; \
- dep.z r18=4,61,3; \
- ;; \
- st8 [r2]=r17,8; \
- mov r16=rr[r18]; \
- dep.z r18=5,61,3; \
- ;; \
- st8 [r2]=r16,8; \
- mov r17=rr[r18]; \
- dep.z r18=7,61,3; \
- ;; \
- st8 [r2]=r17,16; \
- mov r16=rr[r18]; \
- ;; \
- st8 [r2]=r16,8; \
- ;;
-
- /*
- * r33:context_t base address
- */
-#define RESTORE_REGION_REGS \
- add r2=CTX(RR0),r33;\
- mov r18=r0; \
- ;; \
- ld8 r20=[r2],8; \
- ;; /* rr0 */ \
- ld8 r21=[r2],8; \
- ;; /* rr1 */ \
- ld8 r22=[r2],8; \
- ;; /* rr2 */ \
- ld8 r23=[r2],8; \
- ;; /* rr3 */ \
- ld8 r24=[r2],8; \
- ;; /* rr4 */ \
- ld8 r25=[r2],16; \
- ;; /* rr5 */ \
- ld8 r27=[r2]; \
- ;; /* rr7 */ \
- mov rr[r18]=r20; \
- dep.z r18=1,61,3; \
- ;; /* rr1 */ \
- mov rr[r18]=r21; \
- dep.z r18=2,61,3; \
- ;; /* rr2 */ \
- mov rr[r18]=r22; \
- dep.z r18=3,61,3; \
- ;; /* rr3 */ \
- mov rr[r18]=r23; \
- dep.z r18=4,61,3; \
- ;; /* rr4 */ \
- mov rr[r18]=r24; \
- dep.z r18=5,61,3; \
- ;; /* rr5 */ \
- mov rr[r18]=r25; \
- dep.z r18=7,61,3; \
- ;; /* rr7 */ \
- mov rr[r18]=r27; \
- ;; \
- srlz.i; \
- ;;
-
-
-
- /*
- * r32: context_t base address
- * r36~r39:scratch registers
- */
-#define SAVE_DEBUG_REGS \
- add r2=CTX(IBR0),r32; \
- add r3=CTX(DBR0),r32; \
- mov r16=ibr[r0]; \
- mov r17=dbr[r0]; \
- ;; \
- st8 [r2]=r16,8; \
- st8 [r3]=r17,8; \
- add r18=1,r0; \
- ;; \
- mov r16=ibr[r18]; \
- mov r17=dbr[r18]; \
- ;; \
- st8 [r2]=r16,8; \
- st8 [r3]=r17,8; \
- add r18=2,r0; \
- ;; \
- mov r16=ibr[r18]; \
- mov r17=dbr[r18]; \
- ;; \
- st8 [r2]=r16,8; \
- st8 [r3]=r17,8; \
- add r18=2,r0; \
- ;; \
- mov r16=ibr[r18]; \
- mov r17=dbr[r18]; \
- ;; \
- st8 [r2]=r16,8; \
- st8 [r3]=r17,8; \
- add r18=3,r0; \
- ;; \
- mov r16=ibr[r18]; \
- mov r17=dbr[r18]; \
- ;; \
- st8 [r2]=r16,8; \
- st8 [r3]=r17,8; \
- add r18=4,r0; \
- ;; \
- mov r16=ibr[r18]; \
- mov r17=dbr[r18]; \
- ;; \
- st8 [r2]=r16,8; \
- st8 [r3]=r17,8; \
- add r18=5,r0; \
- ;; \
- mov r16=ibr[r18]; \
- mov r17=dbr[r18]; \
- ;; \
- st8 [r2]=r16,8; \
- st8 [r3]=r17,8; \
- add r18=6,r0; \
- ;; \
- mov r16=ibr[r18]; \
- mov r17=dbr[r18]; \
- ;; \
- st8 [r2]=r16,8; \
- st8 [r3]=r17,8; \
- add r18=7,r0; \
- ;; \
- mov r16=ibr[r18]; \
- mov r17=dbr[r18]; \
- ;; \
- st8 [r2]=r16,8; \
- st8 [r3]=r17,8; \
- ;;
-
-
-/*
- * r33: point to context_t structure
- * ar.lc are corrupted.
- */
-#define RESTORE_DEBUG_REGS \
- add r2=CTX(IBR0),r33; \
- add r3=CTX(DBR0),r33; \
- mov r16=7; \
- mov r17=r0; \
- ;; \
- mov ar.lc = r16; \
- ;; \
-1: \
- ld8 r18=[r2],8; \
- ld8 r19=[r3],8; \
- ;; \
- mov ibr[r17]=r18; \
- mov dbr[r17]=r19; \
- ;; \
- srlz.i; \
- ;; \
- add r17=1,r17; \
- br.cloop.sptk 1b; \
- ;;
-
-
- /*
- * r32: context_t base address
- */
-#define SAVE_FPU_LOW \
- add r2=CTX(F2),r32; \
- add r3=CTX(F3),r32; \
- ;; \
- stf.spill.nta [r2]=f2,32; \
- stf.spill.nta [r3]=f3,32; \
- ;; \
- stf.spill.nta [r2]=f4,32; \
- stf.spill.nta [r3]=f5,32; \
- ;; \
- stf.spill.nta [r2]=f6,32; \
- stf.spill.nta [r3]=f7,32; \
- ;; \
- stf.spill.nta [r2]=f8,32; \
- stf.spill.nta [r3]=f9,32; \
- ;; \
- stf.spill.nta [r2]=f10,32; \
- stf.spill.nta [r3]=f11,32; \
- ;; \
- stf.spill.nta [r2]=f12,32; \
- stf.spill.nta [r3]=f13,32; \
- ;; \
- stf.spill.nta [r2]=f14,32; \
- stf.spill.nta [r3]=f15,32; \
- ;; \
- stf.spill.nta [r2]=f16,32; \
- stf.spill.nta [r3]=f17,32; \
- ;; \
- stf.spill.nta [r2]=f18,32; \
- stf.spill.nta [r3]=f19,32; \
- ;; \
- stf.spill.nta [r2]=f20,32; \
- stf.spill.nta [r3]=f21,32; \
- ;; \
- stf.spill.nta [r2]=f22,32; \
- stf.spill.nta [r3]=f23,32; \
- ;; \
- stf.spill.nta [r2]=f24,32; \
- stf.spill.nta [r3]=f25,32; \
- ;; \
- stf.spill.nta [r2]=f26,32; \
- stf.spill.nta [r3]=f27,32; \
- ;; \
- stf.spill.nta [r2]=f28,32; \
- stf.spill.nta [r3]=f29,32; \
- ;; \
- stf.spill.nta [r2]=f30; \
- stf.spill.nta [r3]=f31; \
- ;;
-
- /*
- * r32: context_t base address
- */
-#define SAVE_FPU_HIGH \
- add r2=CTX(F32),r32; \
- add r3=CTX(F33),r32; \
- ;; \
- stf.spill.nta [r2]=f32,32; \
- stf.spill.nta [r3]=f33,32; \
- ;; \
- stf.spill.nta [r2]=f34,32; \
- stf.spill.nta [r3]=f35,32; \
- ;; \
- stf.spill.nta [r2]=f36,32; \
- stf.spill.nta [r3]=f37,32; \
- ;; \
- stf.spill.nta [r2]=f38,32; \
- stf.spill.nta [r3]=f39,32; \
- ;; \
- stf.spill.nta [r2]=f40,32; \
- stf.spill.nta [r3]=f41,32; \
- ;; \
- stf.spill.nta [r2]=f42,32; \
- stf.spill.nta [r3]=f43,32; \
- ;; \
- stf.spill.nta [r2]=f44,32; \
- stf.spill.nta [r3]=f45,32; \
- ;; \
- stf.spill.nta [r2]=f46,32; \
- stf.spill.nta [r3]=f47,32; \
- ;; \
- stf.spill.nta [r2]=f48,32; \
- stf.spill.nta [r3]=f49,32; \
- ;; \
- stf.spill.nta [r2]=f50,32; \
- stf.spill.nta [r3]=f51,32; \
- ;; \
- stf.spill.nta [r2]=f52,32; \
- stf.spill.nta [r3]=f53,32; \
- ;; \
- stf.spill.nta [r2]=f54,32; \
- stf.spill.nta [r3]=f55,32; \
- ;; \
- stf.spill.nta [r2]=f56,32; \
- stf.spill.nta [r3]=f57,32; \
- ;; \
- stf.spill.nta [r2]=f58,32; \
- stf.spill.nta [r3]=f59,32; \
- ;; \
- stf.spill.nta [r2]=f60,32; \
- stf.spill.nta [r3]=f61,32; \
- ;; \
- stf.spill.nta [r2]=f62,32; \
- stf.spill.nta [r3]=f63,32; \
- ;; \
- stf.spill.nta [r2]=f64,32; \
- stf.spill.nta [r3]=f65,32; \
- ;; \
- stf.spill.nta [r2]=f66,32; \
- stf.spill.nta [r3]=f67,32; \
- ;; \
- stf.spill.nta [r2]=f68,32; \
- stf.spill.nta [r3]=f69,32; \
- ;; \
- stf.spill.nta [r2]=f70,32; \
- stf.spill.nta [r3]=f71,32; \
- ;; \
- stf.spill.nta [r2]=f72,32; \
- stf.spill.nta [r3]=f73,32; \
- ;; \
- stf.spill.nta [r2]=f74,32; \
- stf.spill.nta [r3]=f75,32; \
- ;; \
- stf.spill.nta [r2]=f76,32; \
- stf.spill.nta [r3]=f77,32; \
- ;; \
- stf.spill.nta [r2]=f78,32; \
- stf.spill.nta [r3]=f79,32; \
- ;; \
- stf.spill.nta [r2]=f80,32; \
- stf.spill.nta [r3]=f81,32; \
- ;; \
- stf.spill.nta [r2]=f82,32; \
- stf.spill.nta [r3]=f83,32; \
- ;; \
- stf.spill.nta [r2]=f84,32; \
- stf.spill.nta [r3]=f85,32; \
- ;; \
- stf.spill.nta [r2]=f86,32; \
- stf.spill.nta [r3]=f87,32; \
- ;; \
- stf.spill.nta [r2]=f88,32; \
- stf.spill.nta [r3]=f89,32; \
- ;; \
- stf.spill.nta [r2]=f90,32; \
- stf.spill.nta [r3]=f91,32; \
- ;; \
- stf.spill.nta [r2]=f92,32; \
- stf.spill.nta [r3]=f93,32; \
- ;; \
- stf.spill.nta [r2]=f94,32; \
- stf.spill.nta [r3]=f95,32; \
- ;; \
- stf.spill.nta [r2]=f96,32; \
- stf.spill.nta [r3]=f97,32; \
- ;; \
- stf.spill.nta [r2]=f98,32; \
- stf.spill.nta [r3]=f99,32; \
- ;; \
- stf.spill.nta [r2]=f100,32; \
- stf.spill.nta [r3]=f101,32; \
- ;; \
- stf.spill.nta [r2]=f102,32; \
- stf.spill.nta [r3]=f103,32; \
- ;; \
- stf.spill.nta [r2]=f104,32; \
- stf.spill.nta [r3]=f105,32; \
- ;; \
- stf.spill.nta [r2]=f106,32; \
- stf.spill.nta [r3]=f107,32; \
- ;; \
- stf.spill.nta [r2]=f108,32; \
- stf.spill.nta [r3]=f109,32; \
- ;; \
- stf.spill.nta [r2]=f110,32; \
- stf.spill.nta [r3]=f111,32; \
- ;; \
- stf.spill.nta [r2]=f112,32; \
- stf.spill.nta [r3]=f113,32; \
- ;; \
- stf.spill.nta [r2]=f114,32; \
- stf.spill.nta [r3]=f115,32; \
- ;; \
- stf.spill.nta [r2]=f116,32; \
- stf.spill.nta [r3]=f117,32; \
- ;; \
- stf.spill.nta [r2]=f118,32; \
- stf.spill.nta [r3]=f119,32; \
- ;; \
- stf.spill.nta [r2]=f120,32; \
- stf.spill.nta [r3]=f121,32; \
- ;; \
- stf.spill.nta [r2]=f122,32; \
- stf.spill.nta [r3]=f123,32; \
- ;; \
- stf.spill.nta [r2]=f124,32; \
- stf.spill.nta [r3]=f125,32; \
- ;; \
- stf.spill.nta [r2]=f126; \
- stf.spill.nta [r3]=f127; \
- ;;
-
- /*
- * r33: point to context_t structure
- */
-#define RESTORE_FPU_LOW \
- add r2 = CTX(F2), r33; \
- add r3 = CTX(F3), r33; \
- ;; \
- ldf.fill.nta f2 = [r2], 32; \
- ldf.fill.nta f3 = [r3], 32; \
- ;; \
- ldf.fill.nta f4 = [r2], 32; \
- ldf.fill.nta f5 = [r3], 32; \
- ;; \
- ldf.fill.nta f6 = [r2], 32; \
- ldf.fill.nta f7 = [r3], 32; \
- ;; \
- ldf.fill.nta f8 = [r2], 32; \
- ldf.fill.nta f9 = [r3], 32; \
- ;; \
- ldf.fill.nta f10 = [r2], 32; \
- ldf.fill.nta f11 = [r3], 32; \
- ;; \
- ldf.fill.nta f12 = [r2], 32; \
- ldf.fill.nta f13 = [r3], 32; \
- ;; \
- ldf.fill.nta f14 = [r2], 32; \
- ldf.fill.nta f15 = [r3], 32; \
- ;; \
- ldf.fill.nta f16 = [r2], 32; \
- ldf.fill.nta f17 = [r3], 32; \
- ;; \
- ldf.fill.nta f18 = [r2], 32; \
- ldf.fill.nta f19 = [r3], 32; \
- ;; \
- ldf.fill.nta f20 = [r2], 32; \
- ldf.fill.nta f21 = [r3], 32; \
- ;; \
- ldf.fill.nta f22 = [r2], 32; \
- ldf.fill.nta f23 = [r3], 32; \
- ;; \
- ldf.fill.nta f24 = [r2], 32; \
- ldf.fill.nta f25 = [r3], 32; \
- ;; \
- ldf.fill.nta f26 = [r2], 32; \
- ldf.fill.nta f27 = [r3], 32; \
- ;; \
- ldf.fill.nta f28 = [r2], 32; \
- ldf.fill.nta f29 = [r3], 32; \
- ;; \
- ldf.fill.nta f30 = [r2], 32; \
- ldf.fill.nta f31 = [r3], 32; \
- ;;
-
-
-
- /*
- * r33: point to context_t structure
- */
-#define RESTORE_FPU_HIGH \
- add r2 = CTX(F32), r33; \
- add r3 = CTX(F33), r33; \
- ;; \
- ldf.fill.nta f32 = [r2], 32; \
- ldf.fill.nta f33 = [r3], 32; \
- ;; \
- ldf.fill.nta f34 = [r2], 32; \
- ldf.fill.nta f35 = [r3], 32; \
- ;; \
- ldf.fill.nta f36 = [r2], 32; \
- ldf.fill.nta f37 = [r3], 32; \
- ;; \
- ldf.fill.nta f38 = [r2], 32; \
- ldf.fill.nta f39 = [r3], 32; \
- ;; \
- ldf.fill.nta f40 = [r2], 32; \
- ldf.fill.nta f41 = [r3], 32; \
- ;; \
- ldf.fill.nta f42 = [r2], 32; \
- ldf.fill.nta f43 = [r3], 32; \
- ;; \
- ldf.fill.nta f44 = [r2], 32; \
- ldf.fill.nta f45 = [r3], 32; \
- ;; \
- ldf.fill.nta f46 = [r2], 32; \
- ldf.fill.nta f47 = [r3], 32; \
- ;; \
- ldf.fill.nta f48 = [r2], 32; \
- ldf.fill.nta f49 = [r3], 32; \
- ;; \
- ldf.fill.nta f50 = [r2], 32; \
- ldf.fill.nta f51 = [r3], 32; \
- ;; \
- ldf.fill.nta f52 = [r2], 32; \
- ldf.fill.nta f53 = [r3], 32; \
- ;; \
- ldf.fill.nta f54 = [r2], 32; \
- ldf.fill.nta f55 = [r3], 32; \
- ;; \
- ldf.fill.nta f56 = [r2], 32; \
- ldf.fill.nta f57 = [r3], 32; \
- ;; \
- ldf.fill.nta f58 = [r2], 32; \
- ldf.fill.nta f59 = [r3], 32; \
- ;; \
- ldf.fill.nta f60 = [r2], 32; \
- ldf.fill.nta f61 = [r3], 32; \
- ;; \
- ldf.fill.nta f62 = [r2], 32; \
- ldf.fill.nta f63 = [r3], 32; \
- ;; \
- ldf.fill.nta f64 = [r2], 32; \
- ldf.fill.nta f65 = [r3], 32; \
- ;; \
- ldf.fill.nta f66 = [r2], 32; \
- ldf.fill.nta f67 = [r3], 32; \
- ;; \
- ldf.fill.nta f68 = [r2], 32; \
- ldf.fill.nta f69 = [r3], 32; \
- ;; \
- ldf.fill.nta f70 = [r2], 32; \
- ldf.fill.nta f71 = [r3], 32; \
- ;; \
- ldf.fill.nta f72 = [r2], 32; \
- ldf.fill.nta f73 = [r3], 32; \
- ;; \
- ldf.fill.nta f74 = [r2], 32; \
- ldf.fill.nta f75 = [r3], 32; \
- ;; \
- ldf.fill.nta f76 = [r2], 32; \
- ldf.fill.nta f77 = [r3], 32; \
- ;; \
- ldf.fill.nta f78 = [r2], 32; \
- ldf.fill.nta f79 = [r3], 32; \
- ;; \
- ldf.fill.nta f80 = [r2], 32; \
- ldf.fill.nta f81 = [r3], 32; \
- ;; \
- ldf.fill.nta f82 = [r2], 32; \
- ldf.fill.nta f83 = [r3], 32; \
- ;; \
- ldf.fill.nta f84 = [r2], 32; \
- ldf.fill.nta f85 = [r3], 32; \
- ;; \
- ldf.fill.nta f86 = [r2], 32; \
- ldf.fill.nta f87 = [r3], 32; \
- ;; \
- ldf.fill.nta f88 = [r2], 32; \
- ldf.fill.nta f89 = [r3], 32; \
- ;; \
- ldf.fill.nta f90 = [r2], 32; \
- ldf.fill.nta f91 = [r3], 32; \
- ;; \
- ldf.fill.nta f92 = [r2], 32; \
- ldf.fill.nta f93 = [r3], 32; \
- ;; \
- ldf.fill.nta f94 = [r2], 32; \
- ldf.fill.nta f95 = [r3], 32; \
- ;; \
- ldf.fill.nta f96 = [r2], 32; \
- ldf.fill.nta f97 = [r3], 32; \
- ;; \
- ldf.fill.nta f98 = [r2], 32; \
- ldf.fill.nta f99 = [r3], 32; \
- ;; \
- ldf.fill.nta f100 = [r2], 32; \
- ldf.fill.nta f101 = [r3], 32; \
- ;; \
- ldf.fill.nta f102 = [r2], 32; \
- ldf.fill.nta f103 = [r3], 32; \
- ;; \
- ldf.fill.nta f104 = [r2], 32; \
- ldf.fill.nta f105 = [r3], 32; \
- ;; \
- ldf.fill.nta f106 = [r2], 32; \
- ldf.fill.nta f107 = [r3], 32; \
- ;; \
- ldf.fill.nta f108 = [r2], 32; \
- ldf.fill.nta f109 = [r3], 32; \
- ;; \
- ldf.fill.nta f110 = [r2], 32; \
- ldf.fill.nta f111 = [r3], 32; \
- ;; \
- ldf.fill.nta f112 = [r2], 32; \
- ldf.fill.nta f113 = [r3], 32; \
- ;; \
- ldf.fill.nta f114 = [r2], 32; \
- ldf.fill.nta f115 = [r3], 32; \
- ;; \
- ldf.fill.nta f116 = [r2], 32; \
- ldf.fill.nta f117 = [r3], 32; \
- ;; \
- ldf.fill.nta f118 = [r2], 32; \
- ldf.fill.nta f119 = [r3], 32; \
- ;; \
- ldf.fill.nta f120 = [r2], 32; \
- ldf.fill.nta f121 = [r3], 32; \
- ;; \
- ldf.fill.nta f122 = [r2], 32; \
- ldf.fill.nta f123 = [r3], 32; \
- ;; \
- ldf.fill.nta f124 = [r2], 32; \
- ldf.fill.nta f125 = [r3], 32; \
- ;; \
- ldf.fill.nta f126 = [r2], 32; \
- ldf.fill.nta f127 = [r3], 32; \
- ;;
-
- /*
- * r32: context_t base address
- */
-#define SAVE_PTK_REGS \
- add r2=CTX(PKR0), r32; \
- mov r16=7; \
- ;; \
- mov ar.lc=r16; \
- mov r17=r0; \
- ;; \
-1: \
- mov r18=pkr[r17]; \
- ;; \
- srlz.i; \
- ;; \
- st8 [r2]=r18, 8; \
- ;; \
- add r17 =1,r17; \
- ;; \
- br.cloop.sptk 1b; \
- ;;
-
-/*
- * r33: point to context_t structure
- * ar.lc are corrupted.
- */
-#define RESTORE_PTK_REGS \
- add r2=CTX(PKR0), r33; \
- mov r16=7; \
- ;; \
- mov ar.lc=r16; \
- mov r17=r0; \
- ;; \
-1: \
- ld8 r18=[r2], 8; \
- ;; \
- mov pkr[r17]=r18; \
- ;; \
- srlz.i; \
- ;; \
- add r17 =1,r17; \
- ;; \
- br.cloop.sptk 1b; \
- ;;
-
-
-/*
- * void vmm_trampoline( context_t * from,
- * context_t * to)
- *
- * from: r32
- * to: r33
- * note: interrupt disabled before call this function.
- */
-GLOBAL_ENTRY(vmm_trampoline)
- mov r16 = psr
- adds r2 = CTX(PSR), r32
- ;;
- st8 [r2] = r16, 8 // psr
- mov r17 = pr
- ;;
- st8 [r2] = r17, 8 // pr
- mov r18 = ar.unat
- ;;
- st8 [r2] = r18
- mov r17 = ar.rsc
- ;;
- adds r2 = CTX(RSC),r32
- ;;
- st8 [r2]= r17
- mov ar.rsc =0
- flushrs
- ;;
- SAVE_GENERAL_REGS
- ;;
- SAVE_KERNEL_REGS
- ;;
- SAVE_APP_REGS
- ;;
- SAVE_BRANCH_REGS
- ;;
- SAVE_CTL_REGS
- ;;
- SAVE_REGION_REGS
- ;;
- //SAVE_DEBUG_REGS
- ;;
- rsm psr.dfl
- ;;
- srlz.d
- ;;
- SAVE_FPU_LOW
- ;;
- rsm psr.dfh
- ;;
- srlz.d
- ;;
- SAVE_FPU_HIGH
- ;;
- SAVE_PTK_REGS
- ;;
- RESTORE_PTK_REGS
- ;;
- RESTORE_FPU_HIGH
- ;;
- RESTORE_FPU_LOW
- ;;
- //RESTORE_DEBUG_REGS
- ;;
- RESTORE_REGION_REGS
- ;;
- RESTORE_CTL_REGS
- ;;
- RESTORE_BRANCH_REGS
- ;;
- RESTORE_APP_REGS
- ;;
- RESTORE_KERNEL_REGS
- ;;
- RESTORE_GENERAL_REGS
- ;;
- adds r2=CTX(PSR), r33
- ;;
- ld8 r16=[r2], 8 // psr
- ;;
- mov psr.l=r16
- ;;
- srlz.d
- ;;
- ld8 r16=[r2], 8 // pr
- ;;
- mov pr =r16,-1
- ld8 r16=[r2] // unat
- ;;
- mov ar.unat=r16
- ;;
- adds r2=CTX(RSC),r33
- ;;
- ld8 r16 =[r2]
- ;;
- mov ar.rsc = r16
- ;;
- br.ret.sptk.few b0
-END(vmm_trampoline)
diff --git a/arch/ia64/kvm/vcpu.c b/arch/ia64/kvm/vcpu.c
deleted file mode 100644
index 958815c9787..00000000000
--- a/arch/ia64/kvm/vcpu.c
+++ /dev/null
@@ -1,2209 +0,0 @@
-/*
- * kvm_vcpu.c: handling all virtual cpu related thing.
- * Copyright (c) 2005, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- * Shaofan Li (Susue Li) <susie.li@intel.com>
- * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
- * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
- * Xiantao Zhang <xiantao.zhang@intel.com>
- */
-
-#include <linux/kvm_host.h>
-#include <linux/types.h>
-
-#include <asm/processor.h>
-#include <asm/ia64regs.h>
-#include <asm/gcc_intrin.h>
-#include <asm/kregs.h>
-#include <asm/pgtable.h>
-#include <asm/tlb.h>
-
-#include "asm-offsets.h"
-#include "vcpu.h"
-
-/*
- * Special notes:
- * - Index by it/dt/rt sequence
- * - Only existing mode transitions are allowed in this table
- * - RSE is placed at lazy mode when emulating guest partial mode
- * - If gva happens to be rr0 and rr4, only allowed case is identity
- * mapping (gva=gpa), or panic! (How?)
- */
-int mm_switch_table[8][8] = {
- /* 2004/09/12(Kevin): Allow switch to self */
- /*
- * (it,dt,rt): (0,0,0) -> (1,1,1)
- * This kind of transition usually occurs in the very early
- * stage of Linux boot up procedure. Another case is in efi
- * and pal calls. (see "arch/ia64/kernel/head.S")
- *
- * (it,dt,rt): (0,0,0) -> (0,1,1)
- * This kind of transition is found when OSYa exits efi boot
- * service. Due to gva = gpa in this case (Same region),
- * data access can be satisfied though itlb entry for physical
- * emulation is hit.
- */
- {SW_SELF, 0, 0, SW_NOP, 0, 0, 0, SW_P2V},
- {0, 0, 0, 0, 0, 0, 0, 0},
- {0, 0, 0, 0, 0, 0, 0, 0},
- /*
- * (it,dt,rt): (0,1,1) -> (1,1,1)
- * This kind of transition is found in OSYa.
- *
- * (it,dt,rt): (0,1,1) -> (0,0,0)
- * This kind of transition is found in OSYa
- */
- {SW_NOP, 0, 0, SW_SELF, 0, 0, 0, SW_P2V},
- /* (1,0,0)->(1,1,1) */
- {0, 0, 0, 0, 0, 0, 0, SW_P2V},
- /*
- * (it,dt,rt): (1,0,1) -> (1,1,1)
- * This kind of transition usually occurs when Linux returns
- * from the low level TLB miss handlers.
- * (see "arch/ia64/kernel/ivt.S")
- */
- {0, 0, 0, 0, 0, SW_SELF, 0, SW_P2V},
- {0, 0, 0, 0, 0, 0, 0, 0},
- /*
- * (it,dt,rt): (1,1,1) -> (1,0,1)
- * This kind of transition usually occurs in Linux low level
- * TLB miss handler. (see "arch/ia64/kernel/ivt.S")
- *
- * (it,dt,rt): (1,1,1) -> (0,0,0)
- * This kind of transition usually occurs in pal and efi calls,
- * which requires running in physical mode.
- * (see "arch/ia64/kernel/head.S")
- * (1,1,1)->(1,0,0)
- */
-
- {SW_V2P, 0, 0, 0, SW_V2P, SW_V2P, 0, SW_SELF},
-};
-
-void physical_mode_init(struct kvm_vcpu *vcpu)
-{
- vcpu->arch.mode_flags = GUEST_IN_PHY;
-}
-
-void switch_to_physical_rid(struct kvm_vcpu *vcpu)
-{
- unsigned long psr;
-
- /* Save original virtual mode rr[0] and rr[4] */
- psr = ia64_clear_ic();
- ia64_set_rr(VRN0<<VRN_SHIFT, vcpu->arch.metaphysical_rr0);
- ia64_srlz_d();
- ia64_set_rr(VRN4<<VRN_SHIFT, vcpu->arch.metaphysical_rr4);
- ia64_srlz_d();
-
- ia64_set_psr(psr);
- return;
-}
-
-void switch_to_virtual_rid(struct kvm_vcpu *vcpu)
-{
- unsigned long psr;
-
- psr = ia64_clear_ic();
- ia64_set_rr(VRN0 << VRN_SHIFT, vcpu->arch.metaphysical_saved_rr0);
- ia64_srlz_d();
- ia64_set_rr(VRN4 << VRN_SHIFT, vcpu->arch.metaphysical_saved_rr4);
- ia64_srlz_d();
- ia64_set_psr(psr);
- return;
-}
-
-static int mm_switch_action(struct ia64_psr opsr, struct ia64_psr npsr)
-{
- return mm_switch_table[MODE_IND(opsr)][MODE_IND(npsr)];
-}
-
-void switch_mm_mode(struct kvm_vcpu *vcpu, struct ia64_psr old_psr,
- struct ia64_psr new_psr)
-{
- int act;
- act = mm_switch_action(old_psr, new_psr);
- switch (act) {
- case SW_V2P:
- /*printk("V -> P mode transition: (0x%lx -> 0x%lx)\n",
- old_psr.val, new_psr.val);*/
- switch_to_physical_rid(vcpu);
- /*
- * Set rse to enforced lazy, to prevent active rse
- *save/restor when guest physical mode.
- */
- vcpu->arch.mode_flags |= GUEST_IN_PHY;
- break;
- case SW_P2V:
- switch_to_virtual_rid(vcpu);
- /*
- * recover old mode which is saved when entering
- * guest physical mode
- */
- vcpu->arch.mode_flags &= ~GUEST_IN_PHY;
- break;
- case SW_SELF:
- break;
- case SW_NOP:
- break;
- default:
- /* Sanity check */
- break;
- }
- return;
-}
-
-/*
- * In physical mode, insert tc/tr for region 0 and 4 uses
- * RID[0] and RID[4] which is for physical mode emulation.
- * However what those inserted tc/tr wants is rid for
- * virtual mode. So original virtual rid needs to be restored
- * before insert.
- *
- * Operations which required such switch include:
- * - insertions (itc.*, itr.*)
- * - purges (ptc.* and ptr.*)
- * - tpa
- * - tak
- * - thash?, ttag?
- * All above needs actual virtual rid for destination entry.
- */
-
-void check_mm_mode_switch(struct kvm_vcpu *vcpu, struct ia64_psr old_psr,
- struct ia64_psr new_psr)
-{
-
- if ((old_psr.dt != new_psr.dt)
- || (old_psr.it != new_psr.it)
- || (old_psr.rt != new_psr.rt))
- switch_mm_mode(vcpu, old_psr, new_psr);
-
- return;
-}
-
-
-/*
- * In physical mode, insert tc/tr for region 0 and 4 uses
- * RID[0] and RID[4] which is for physical mode emulation.
- * However what those inserted tc/tr wants is rid for
- * virtual mode. So original virtual rid needs to be restored
- * before insert.
- *
- * Operations which required such switch include:
- * - insertions (itc.*, itr.*)
- * - purges (ptc.* and ptr.*)
- * - tpa
- * - tak
- * - thash?, ttag?
- * All above needs actual virtual rid for destination entry.
- */
-
-void prepare_if_physical_mode(struct kvm_vcpu *vcpu)
-{
- if (is_physical_mode(vcpu)) {
- vcpu->arch.mode_flags |= GUEST_PHY_EMUL;
- switch_to_virtual_rid(vcpu);
- }
- return;
-}
-
-/* Recover always follows prepare */
-void recover_if_physical_mode(struct kvm_vcpu *vcpu)
-{
- if (is_physical_mode(vcpu))
- switch_to_physical_rid(vcpu);
- vcpu->arch.mode_flags &= ~GUEST_PHY_EMUL;
- return;
-}
-
-#define RPT(x) ((u16) &((struct kvm_pt_regs *)0)->x)
-
-static u16 gr_info[32] = {
- 0, /* r0 is read-only : WE SHOULD NEVER GET THIS */
- RPT(r1), RPT(r2), RPT(r3),
- RPT(r4), RPT(r5), RPT(r6), RPT(r7),
- RPT(r8), RPT(r9), RPT(r10), RPT(r11),
- RPT(r12), RPT(r13), RPT(r14), RPT(r15),
- RPT(r16), RPT(r17), RPT(r18), RPT(r19),
- RPT(r20), RPT(r21), RPT(r22), RPT(r23),
- RPT(r24), RPT(r25), RPT(r26), RPT(r27),
- RPT(r28), RPT(r29), RPT(r30), RPT(r31)
-};
-
-#define IA64_FIRST_STACKED_GR 32
-#define IA64_FIRST_ROTATING_FR 32
-
-static inline unsigned long
-rotate_reg(unsigned long sor, unsigned long rrb, unsigned long reg)
-{
- reg += rrb;
- if (reg >= sor)
- reg -= sor;
- return reg;
-}
-
-/*
- * Return the (rotated) index for floating point register
- * be in the REGNUM (REGNUM must range from 32-127,
- * result is in the range from 0-95.
- */
-static inline unsigned long fph_index(struct kvm_pt_regs *regs,
- long regnum)
-{
- unsigned long rrb_fr = (regs->cr_ifs >> 25) & 0x7f;
- return rotate_reg(96, rrb_fr, (regnum - IA64_FIRST_ROTATING_FR));
-}
-
-/*
- * The inverse of the above: given bspstore and the number of
- * registers, calculate ar.bsp.
- */
-static inline unsigned long *kvm_rse_skip_regs(unsigned long *addr,
- long num_regs)
-{
- long delta = ia64_rse_slot_num(addr) + num_regs;
- int i = 0;
-
- if (num_regs < 0)
- delta -= 0x3e;
- if (delta < 0) {
- while (delta <= -0x3f) {
- i--;
- delta += 0x3f;
- }
- } else {
- while (delta >= 0x3f) {
- i++;
- delta -= 0x3f;
- }
- }
-
- return addr + num_regs + i;
-}
-
-static void get_rse_reg(struct kvm_pt_regs *regs, unsigned long r1,
- unsigned long *val, int *nat)
-{
- unsigned long *bsp, *addr, *rnat_addr, *bspstore;
- unsigned long *kbs = (void *) current_vcpu + VMM_RBS_OFFSET;
- unsigned long nat_mask;
- unsigned long old_rsc, new_rsc;
- long sof = (regs->cr_ifs) & 0x7f;
- long sor = (((regs->cr_ifs >> 14) & 0xf) << 3);
- long rrb_gr = (regs->cr_ifs >> 18) & 0x7f;
- long ridx = r1 - 32;
-
- if (ridx < sor)
- ridx = rotate_reg(sor, rrb_gr, ridx);
-
- old_rsc = ia64_getreg(_IA64_REG_AR_RSC);
- new_rsc = old_rsc&(~(0x3));
- ia64_setreg(_IA64_REG_AR_RSC, new_rsc);
-
- bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
- bsp = kbs + (regs->loadrs >> 19);
-
- addr = kvm_rse_skip_regs(bsp, -sof + ridx);
- nat_mask = 1UL << ia64_rse_slot_num(addr);
- rnat_addr = ia64_rse_rnat_addr(addr);
-
- if (addr >= bspstore) {
- ia64_flushrs();
- ia64_mf();
- bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
- }
- *val = *addr;
- if (nat) {
- if (bspstore < rnat_addr)
- *nat = (int)!!(ia64_getreg(_IA64_REG_AR_RNAT)
- & nat_mask);
- else
- *nat = (int)!!((*rnat_addr) & nat_mask);
- ia64_setreg(_IA64_REG_AR_RSC, old_rsc);
- }
-}
-
-void set_rse_reg(struct kvm_pt_regs *regs, unsigned long r1,
- unsigned long val, unsigned long nat)
-{
- unsigned long *bsp, *bspstore, *addr, *rnat_addr;
- unsigned long *kbs = (void *) current_vcpu + VMM_RBS_OFFSET;
- unsigned long nat_mask;
- unsigned long old_rsc, new_rsc, psr;
- unsigned long rnat;
- long sof = (regs->cr_ifs) & 0x7f;
- long sor = (((regs->cr_ifs >> 14) & 0xf) << 3);
- long rrb_gr = (regs->cr_ifs >> 18) & 0x7f;
- long ridx = r1 - 32;
-
- if (ridx < sor)
- ridx = rotate_reg(sor, rrb_gr, ridx);
-
- old_rsc = ia64_getreg(_IA64_REG_AR_RSC);
- /* put RSC to lazy mode, and set loadrs 0 */
- new_rsc = old_rsc & (~0x3fff0003);
- ia64_setreg(_IA64_REG_AR_RSC, new_rsc);
- bsp = kbs + (regs->loadrs >> 19); /* 16 + 3 */
-
- addr = kvm_rse_skip_regs(bsp, -sof + ridx);
- nat_mask = 1UL << ia64_rse_slot_num(addr);
- rnat_addr = ia64_rse_rnat_addr(addr);
-
- local_irq_save(psr);
- bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
- if (addr >= bspstore) {
-
- ia64_flushrs();
- ia64_mf();
- *addr = val;
- bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
- rnat = ia64_getreg(_IA64_REG_AR_RNAT);
- if (bspstore < rnat_addr)
- rnat = rnat & (~nat_mask);
- else
- *rnat_addr = (*rnat_addr)&(~nat_mask);
-
- ia64_mf();
- ia64_loadrs();
- ia64_setreg(_IA64_REG_AR_RNAT, rnat);
- } else {
- rnat = ia64_getreg(_IA64_REG_AR_RNAT);
- *addr = val;
- if (bspstore < rnat_addr)
- rnat = rnat&(~nat_mask);
- else
- *rnat_addr = (*rnat_addr) & (~nat_mask);
-
- ia64_setreg(_IA64_REG_AR_BSPSTORE, (unsigned long)bspstore);
- ia64_setreg(_IA64_REG_AR_RNAT, rnat);
- }
- local_irq_restore(psr);
- ia64_setreg(_IA64_REG_AR_RSC, old_rsc);
-}
-
-void getreg(unsigned long regnum, unsigned long *val,
- int *nat, struct kvm_pt_regs *regs)
-{
- unsigned long addr, *unat;
- if (regnum >= IA64_FIRST_STACKED_GR) {
- get_rse_reg(regs, regnum, val, nat);
- return;
- }
-
- /*
- * Now look at registers in [0-31] range and init correct UNAT
- */
- addr = (unsigned long)regs;
- unat = &regs->eml_unat;
-
- addr += gr_info[regnum];
-
- *val = *(unsigned long *)addr;
- /*
- * do it only when requested
- */
- if (nat)
- *nat = (*unat >> ((addr >> 3) & 0x3f)) & 0x1UL;
-}
-
-void setreg(unsigned long regnum, unsigned long val,
- int nat, struct kvm_pt_regs *regs)
-{
- unsigned long addr;
- unsigned long bitmask;
- unsigned long *unat;
-
- /*
- * First takes care of stacked registers
- */
- if (regnum >= IA64_FIRST_STACKED_GR) {
- set_rse_reg(regs, regnum, val, nat);
- return;
- }
-
- /*
- * Now look at registers in [0-31] range and init correct UNAT
- */
- addr = (unsigned long)regs;
- unat = &regs->eml_unat;
- /*
- * add offset from base of struct
- * and do it !
- */
- addr += gr_info[regnum];
-
- *(unsigned long *)addr = val;
-
- /*
- * We need to clear the corresponding UNAT bit to fully emulate the load
- * UNAT bit_pos = GR[r3]{8:3} form EAS-2.4
- */
- bitmask = 1UL << ((addr >> 3) & 0x3f);
- if (nat)
- *unat |= bitmask;
- else
- *unat &= ~bitmask;
-
-}
-
-u64 vcpu_get_gr(struct kvm_vcpu *vcpu, unsigned long reg)
-{
- struct kvm_pt_regs *regs = vcpu_regs(vcpu);
- unsigned long val;
-
- if (!reg)
- return 0;
- getreg(reg, &val, 0, regs);
- return val;
-}
-
-void vcpu_set_gr(struct kvm_vcpu *vcpu, unsigned long reg, u64 value, int nat)
-{
- struct kvm_pt_regs *regs = vcpu_regs(vcpu);
- long sof = (regs->cr_ifs) & 0x7f;
-
- if (!reg)
- return;
- if (reg >= sof + 32)
- return;
- setreg(reg, value, nat, regs); /* FIXME: handle NATs later*/
-}
-
-void getfpreg(unsigned long regnum, struct ia64_fpreg *fpval,
- struct kvm_pt_regs *regs)
-{
- /* Take floating register rotation into consideration*/
- if (regnum >= IA64_FIRST_ROTATING_FR)
- regnum = IA64_FIRST_ROTATING_FR + fph_index(regs, regnum);
-#define CASE_FIXED_FP(reg) \
- case (reg) : \
- ia64_stf_spill(fpval, reg); \
- break
-
- switch (regnum) {
- CASE_FIXED_FP(0);
- CASE_FIXED_FP(1);
- CASE_FIXED_FP(2);
- CASE_FIXED_FP(3);
- CASE_FIXED_FP(4);
- CASE_FIXED_FP(5);
-
- CASE_FIXED_FP(6);
- CASE_FIXED_FP(7);
- CASE_FIXED_FP(8);
- CASE_FIXED_FP(9);
- CASE_FIXED_FP(10);
- CASE_FIXED_FP(11);
-
- CASE_FIXED_FP(12);
- CASE_FIXED_FP(13);
- CASE_FIXED_FP(14);
- CASE_FIXED_FP(15);
- CASE_FIXED_FP(16);
- CASE_FIXED_FP(17);
- CASE_FIXED_FP(18);
- CASE_FIXED_FP(19);
- CASE_FIXED_FP(20);
- CASE_FIXED_FP(21);
- CASE_FIXED_FP(22);
- CASE_FIXED_FP(23);
- CASE_FIXED_FP(24);
- CASE_FIXED_FP(25);
- CASE_FIXED_FP(26);
- CASE_FIXED_FP(27);
- CASE_FIXED_FP(28);
- CASE_FIXED_FP(29);
- CASE_FIXED_FP(30);
- CASE_FIXED_FP(31);
- CASE_FIXED_FP(32);
- CASE_FIXED_FP(33);
- CASE_FIXED_FP(34);
- CASE_FIXED_FP(35);
- CASE_FIXED_FP(36);
- CASE_FIXED_FP(37);
- CASE_FIXED_FP(38);
- CASE_FIXED_FP(39);
- CASE_FIXED_FP(40);
- CASE_FIXED_FP(41);
- CASE_FIXED_FP(42);
- CASE_FIXED_FP(43);
- CASE_FIXED_FP(44);
- CASE_FIXED_FP(45);
- CASE_FIXED_FP(46);
- CASE_FIXED_FP(47);
- CASE_FIXED_FP(48);
- CASE_FIXED_FP(49);
- CASE_FIXED_FP(50);
- CASE_FIXED_FP(51);
- CASE_FIXED_FP(52);
- CASE_FIXED_FP(53);
- CASE_FIXED_FP(54);
- CASE_FIXED_FP(55);
- CASE_FIXED_FP(56);
- CASE_FIXED_FP(57);
- CASE_FIXED_FP(58);
- CASE_FIXED_FP(59);
- CASE_FIXED_FP(60);
- CASE_FIXED_FP(61);
- CASE_FIXED_FP(62);
- CASE_FIXED_FP(63);
- CASE_FIXED_FP(64);
- CASE_FIXED_FP(65);
- CASE_FIXED_FP(66);
- CASE_FIXED_FP(67);
- CASE_FIXED_FP(68);
- CASE_FIXED_FP(69);
- CASE_FIXED_FP(70);
- CASE_FIXED_FP(71);
- CASE_FIXED_FP(72);
- CASE_FIXED_FP(73);
- CASE_FIXED_FP(74);
- CASE_FIXED_FP(75);
- CASE_FIXED_FP(76);
- CASE_FIXED_FP(77);
- CASE_FIXED_FP(78);
- CASE_FIXED_FP(79);
- CASE_FIXED_FP(80);
- CASE_FIXED_FP(81);
- CASE_FIXED_FP(82);
- CASE_FIXED_FP(83);
- CASE_FIXED_FP(84);
- CASE_FIXED_FP(85);
- CASE_FIXED_FP(86);
- CASE_FIXED_FP(87);
- CASE_FIXED_FP(88);
- CASE_FIXED_FP(89);
- CASE_FIXED_FP(90);
- CASE_FIXED_FP(91);
- CASE_FIXED_FP(92);
- CASE_FIXED_FP(93);
- CASE_FIXED_FP(94);
- CASE_FIXED_FP(95);
- CASE_FIXED_FP(96);
- CASE_FIXED_FP(97);
- CASE_FIXED_FP(98);
- CASE_FIXED_FP(99);
- CASE_FIXED_FP(100);
- CASE_FIXED_FP(101);
- CASE_FIXED_FP(102);
- CASE_FIXED_FP(103);
- CASE_FIXED_FP(104);
- CASE_FIXED_FP(105);
- CASE_FIXED_FP(106);
- CASE_FIXED_FP(107);
- CASE_FIXED_FP(108);
- CASE_FIXED_FP(109);
- CASE_FIXED_FP(110);
- CASE_FIXED_FP(111);
- CASE_FIXED_FP(112);
- CASE_FIXED_FP(113);
- CASE_FIXED_FP(114);
- CASE_FIXED_FP(115);
- CASE_FIXED_FP(116);
- CASE_FIXED_FP(117);
- CASE_FIXED_FP(118);
- CASE_FIXED_FP(119);
- CASE_FIXED_FP(120);
- CASE_FIXED_FP(121);
- CASE_FIXED_FP(122);
- CASE_FIXED_FP(123);
- CASE_FIXED_FP(124);
- CASE_FIXED_FP(125);
- CASE_FIXED_FP(126);
- CASE_FIXED_FP(127);
- }
-#undef CASE_FIXED_FP
-}
-
-void setfpreg(unsigned long regnum, struct ia64_fpreg *fpval,
- struct kvm_pt_regs *regs)
-{
- /* Take floating register rotation into consideration*/
- if (regnum >= IA64_FIRST_ROTATING_FR)
- regnum = IA64_FIRST_ROTATING_FR + fph_index(regs, regnum);
-
-#define CASE_FIXED_FP(reg) \
- case (reg) : \
- ia64_ldf_fill(reg, fpval); \
- break
-
- switch (regnum) {
- CASE_FIXED_FP(2);
- CASE_FIXED_FP(3);
- CASE_FIXED_FP(4);
- CASE_FIXED_FP(5);
-
- CASE_FIXED_FP(6);
- CASE_FIXED_FP(7);
- CASE_FIXED_FP(8);
- CASE_FIXED_FP(9);
- CASE_FIXED_FP(10);
- CASE_FIXED_FP(11);
-
- CASE_FIXED_FP(12);
- CASE_FIXED_FP(13);
- CASE_FIXED_FP(14);
- CASE_FIXED_FP(15);
- CASE_FIXED_FP(16);
- CASE_FIXED_FP(17);
- CASE_FIXED_FP(18);
- CASE_FIXED_FP(19);
- CASE_FIXED_FP(20);
- CASE_FIXED_FP(21);
- CASE_FIXED_FP(22);
- CASE_FIXED_FP(23);
- CASE_FIXED_FP(24);
- CASE_FIXED_FP(25);
- CASE_FIXED_FP(26);
- CASE_FIXED_FP(27);
- CASE_FIXED_FP(28);
- CASE_FIXED_FP(29);
- CASE_FIXED_FP(30);
- CASE_FIXED_FP(31);
- CASE_FIXED_FP(32);
- CASE_FIXED_FP(33);
- CASE_FIXED_FP(34);
- CASE_FIXED_FP(35);
- CASE_FIXED_FP(36);
- CASE_FIXED_FP(37);
- CASE_FIXED_FP(38);
- CASE_FIXED_FP(39);
- CASE_FIXED_FP(40);
- CASE_FIXED_FP(41);
- CASE_FIXED_FP(42);
- CASE_FIXED_FP(43);
- CASE_FIXED_FP(44);
- CASE_FIXED_FP(45);
- CASE_FIXED_FP(46);
- CASE_FIXED_FP(47);
- CASE_FIXED_FP(48);
- CASE_FIXED_FP(49);
- CASE_FIXED_FP(50);
- CASE_FIXED_FP(51);
- CASE_FIXED_FP(52);
- CASE_FIXED_FP(53);
- CASE_FIXED_FP(54);
- CASE_FIXED_FP(55);
- CASE_FIXED_FP(56);
- CASE_FIXED_FP(57);
- CASE_FIXED_FP(58);
- CASE_FIXED_FP(59);
- CASE_FIXED_FP(60);
- CASE_FIXED_FP(61);
- CASE_FIXED_FP(62);
- CASE_FIXED_FP(63);
- CASE_FIXED_FP(64);
- CASE_FIXED_FP(65);
- CASE_FIXED_FP(66);
- CASE_FIXED_FP(67);
- CASE_FIXED_FP(68);
- CASE_FIXED_FP(69);
- CASE_FIXED_FP(70);
- CASE_FIXED_FP(71);
- CASE_FIXED_FP(72);
- CASE_FIXED_FP(73);
- CASE_FIXED_FP(74);
- CASE_FIXED_FP(75);
- CASE_FIXED_FP(76);
- CASE_FIXED_FP(77);
- CASE_FIXED_FP(78);
- CASE_FIXED_FP(79);
- CASE_FIXED_FP(80);
- CASE_FIXED_FP(81);
- CASE_FIXED_FP(82);
- CASE_FIXED_FP(83);
- CASE_FIXED_FP(84);
- CASE_FIXED_FP(85);
- CASE_FIXED_FP(86);
- CASE_FIXED_FP(87);
- CASE_FIXED_FP(88);
- CASE_FIXED_FP(89);
- CASE_FIXED_FP(90);
- CASE_FIXED_FP(91);
- CASE_FIXED_FP(92);
- CASE_FIXED_FP(93);
- CASE_FIXED_FP(94);
- CASE_FIXED_FP(95);
- CASE_FIXED_FP(96);
- CASE_FIXED_FP(97);
- CASE_FIXED_FP(98);
- CASE_FIXED_FP(99);
- CASE_FIXED_FP(100);
- CASE_FIXED_FP(101);
- CASE_FIXED_FP(102);
- CASE_FIXED_FP(103);
- CASE_FIXED_FP(104);
- CASE_FIXED_FP(105);
- CASE_FIXED_FP(106);
- CASE_FIXED_FP(107);
- CASE_FIXED_FP(108);
- CASE_FIXED_FP(109);
- CASE_FIXED_FP(110);
- CASE_FIXED_FP(111);
- CASE_FIXED_FP(112);
- CASE_FIXED_FP(113);
- CASE_FIXED_FP(114);
- CASE_FIXED_FP(115);
- CASE_FIXED_FP(116);
- CASE_FIXED_FP(117);
- CASE_FIXED_FP(118);
- CASE_FIXED_FP(119);
- CASE_FIXED_FP(120);
- CASE_FIXED_FP(121);
- CASE_FIXED_FP(122);
- CASE_FIXED_FP(123);
- CASE_FIXED_FP(124);
- CASE_FIXED_FP(125);
- CASE_FIXED_FP(126);
- CASE_FIXED_FP(127);
- }
-}
-
-void vcpu_get_fpreg(struct kvm_vcpu *vcpu, unsigned long reg,
- struct ia64_fpreg *val)
-{
- struct kvm_pt_regs *regs = vcpu_regs(vcpu);
-
- getfpreg(reg, val, regs); /* FIXME: handle NATs later*/
-}
-
-void vcpu_set_fpreg(struct kvm_vcpu *vcpu, unsigned long reg,
- struct ia64_fpreg *val)
-{
- struct kvm_pt_regs *regs = vcpu_regs(vcpu);
-
- if (reg > 1)
- setfpreg(reg, val, regs); /* FIXME: handle NATs later*/
-}
-
-/*
- * The Altix RTC is mapped specially here for the vmm module
- */
-#define SN_RTC_BASE (u64 *)(KVM_VMM_BASE+(1UL<<KVM_VMM_SHIFT))
-static long kvm_get_itc(struct kvm_vcpu *vcpu)
-{
-#if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC)
- struct kvm *kvm = (struct kvm *)KVM_VM_BASE;
-
- if (kvm->arch.is_sn2)
- return (*SN_RTC_BASE);
- else
-#endif
- return ia64_getreg(_IA64_REG_AR_ITC);
-}
-
-/************************************************************************
- * lsapic timer
- ***********************************************************************/
-u64 vcpu_get_itc(struct kvm_vcpu *vcpu)
-{
- unsigned long guest_itc;
- guest_itc = VMX(vcpu, itc_offset) + kvm_get_itc(vcpu);
-
- if (guest_itc >= VMX(vcpu, last_itc)) {
- VMX(vcpu, last_itc) = guest_itc;
- return guest_itc;
- } else
- return VMX(vcpu, last_itc);
-}
-
-static inline void vcpu_set_itm(struct kvm_vcpu *vcpu, u64 val);
-static void vcpu_set_itc(struct kvm_vcpu *vcpu, u64 val)
-{
- struct kvm_vcpu *v;
- struct kvm *kvm;
- int i;
- long itc_offset = val - kvm_get_itc(vcpu);
- unsigned long vitv = VCPU(vcpu, itv);
-
- kvm = (struct kvm *)KVM_VM_BASE;
-
- if (kvm_vcpu_is_bsp(vcpu)) {
- for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) {
- v = (struct kvm_vcpu *)((char *)vcpu +
- sizeof(struct kvm_vcpu_data) * i);
- VMX(v, itc_offset) = itc_offset;
- VMX(v, last_itc) = 0;
- }
- }
- VMX(vcpu, last_itc) = 0;
- if (VCPU(vcpu, itm) <= val) {
- VMX(vcpu, itc_check) = 0;
- vcpu_unpend_interrupt(vcpu, vitv);
- } else {
- VMX(vcpu, itc_check) = 1;
- vcpu_set_itm(vcpu, VCPU(vcpu, itm));
- }
-
-}
-
-static inline u64 vcpu_get_itm(struct kvm_vcpu *vcpu)
-{
- return ((u64)VCPU(vcpu, itm));
-}
-
-static inline void vcpu_set_itm(struct kvm_vcpu *vcpu, u64 val)
-{
- unsigned long vitv = VCPU(vcpu, itv);
- VCPU(vcpu, itm) = val;
-
- if (val > vcpu_get_itc(vcpu)) {
- VMX(vcpu, itc_check) = 1;
- vcpu_unpend_interrupt(vcpu, vitv);
- VMX(vcpu, timer_pending) = 0;
- } else
- VMX(vcpu, itc_check) = 0;
-}
-
-#define ITV_VECTOR(itv) (itv&0xff)
-#define ITV_IRQ_MASK(itv) (itv&(1<<16))
-
-static inline void vcpu_set_itv(struct kvm_vcpu *vcpu, u64 val)
-{
- VCPU(vcpu, itv) = val;
- if (!ITV_IRQ_MASK(val) && vcpu->arch.timer_pending) {
- vcpu_pend_interrupt(vcpu, ITV_VECTOR(val));
- vcpu->arch.timer_pending = 0;
- }
-}
-
-static inline void vcpu_set_eoi(struct kvm_vcpu *vcpu, u64 val)
-{
- int vec;
-
- vec = highest_inservice_irq(vcpu);
- if (vec == NULL_VECTOR)
- return;
- VMX(vcpu, insvc[vec >> 6]) &= ~(1UL << (vec & 63));
- VCPU(vcpu, eoi) = 0;
- vcpu->arch.irq_new_pending = 1;
-
-}
-
-/* See Table 5-8 in SDM vol2 for the definition */
-int irq_masked(struct kvm_vcpu *vcpu, int h_pending, int h_inservice)
-{
- union ia64_tpr vtpr;
-
- vtpr.val = VCPU(vcpu, tpr);
-
- if (h_inservice == NMI_VECTOR)
- return IRQ_MASKED_BY_INSVC;
-
- if (h_pending == NMI_VECTOR) {
- /* Non Maskable Interrupt */
- return IRQ_NO_MASKED;
- }
-
- if (h_inservice == ExtINT_VECTOR)
- return IRQ_MASKED_BY_INSVC;
-
- if (h_pending == ExtINT_VECTOR) {
- if (vtpr.mmi) {
- /* mask all external IRQ */
- return IRQ_MASKED_BY_VTPR;
- } else
- return IRQ_NO_MASKED;
- }
-
- if (is_higher_irq(h_pending, h_inservice)) {
- if (is_higher_class(h_pending, vtpr.mic + (vtpr.mmi << 4)))
- return IRQ_NO_MASKED;
- else
- return IRQ_MASKED_BY_VTPR;
- } else {
- return IRQ_MASKED_BY_INSVC;
- }
-}
-
-void vcpu_pend_interrupt(struct kvm_vcpu *vcpu, u8 vec)
-{
- long spsr;
- int ret;
-
- local_irq_save(spsr);
- ret = test_and_set_bit(vec, &VCPU(vcpu, irr[0]));
- local_irq_restore(spsr);
-
- vcpu->arch.irq_new_pending = 1;
-}
-
-void vcpu_unpend_interrupt(struct kvm_vcpu *vcpu, u8 vec)
-{
- long spsr;
- int ret;
-
- local_irq_save(spsr);
- ret = test_and_clear_bit(vec, &VCPU(vcpu, irr[0]));
- local_irq_restore(spsr);
- if (ret) {
- vcpu->arch.irq_new_pending = 1;
- wmb();
- }
-}
-
-void update_vhpi(struct kvm_vcpu *vcpu, int vec)
-{
- u64 vhpi;
-
- if (vec == NULL_VECTOR)
- vhpi = 0;
- else if (vec == NMI_VECTOR)
- vhpi = 32;
- else if (vec == ExtINT_VECTOR)
- vhpi = 16;
- else
- vhpi = vec >> 4;
-
- VCPU(vcpu, vhpi) = vhpi;
- if (VCPU(vcpu, vac).a_int)
- ia64_call_vsa(PAL_VPS_SET_PENDING_INTERRUPT,
- (u64)vcpu->arch.vpd, 0, 0, 0, 0, 0, 0);
-}
-
-u64 vcpu_get_ivr(struct kvm_vcpu *vcpu)
-{
- int vec, h_inservice, mask;
-
- vec = highest_pending_irq(vcpu);
- h_inservice = highest_inservice_irq(vcpu);
- mask = irq_masked(vcpu, vec, h_inservice);
- if (vec == NULL_VECTOR || mask == IRQ_MASKED_BY_INSVC) {
- if (VCPU(vcpu, vhpi))
- update_vhpi(vcpu, NULL_VECTOR);
- return IA64_SPURIOUS_INT_VECTOR;
- }
- if (mask == IRQ_MASKED_BY_VTPR) {
- update_vhpi(vcpu, vec);
- return IA64_SPURIOUS_INT_VECTOR;
- }
- VMX(vcpu, insvc[vec >> 6]) |= (1UL << (vec & 63));
- vcpu_unpend_interrupt(vcpu, vec);
- return (u64)vec;
-}
-
-/**************************************************************************
- Privileged operation emulation routines
- **************************************************************************/
-u64 vcpu_thash(struct kvm_vcpu *vcpu, u64 vadr)
-{
- union ia64_pta vpta;
- union ia64_rr vrr;
- u64 pval;
- u64 vhpt_offset;
-
- vpta.val = vcpu_get_pta(vcpu);
- vrr.val = vcpu_get_rr(vcpu, vadr);
- vhpt_offset = ((vadr >> vrr.ps) << 3) & ((1UL << (vpta.size)) - 1);
- if (vpta.vf) {
- pval = ia64_call_vsa(PAL_VPS_THASH, vadr, vrr.val,
- vpta.val, 0, 0, 0, 0);
- } else {
- pval = (vadr & VRN_MASK) | vhpt_offset |
- (vpta.val << 3 >> (vpta.size + 3) << (vpta.size));
- }
- return pval;
-}
-
-u64 vcpu_ttag(struct kvm_vcpu *vcpu, u64 vadr)
-{
- union ia64_rr vrr;
- union ia64_pta vpta;
- u64 pval;
-
- vpta.val = vcpu_get_pta(vcpu);
- vrr.val = vcpu_get_rr(vcpu, vadr);
- if (vpta.vf) {
- pval = ia64_call_vsa(PAL_VPS_TTAG, vadr, vrr.val,
- 0, 0, 0, 0, 0);
- } else
- pval = 1;
-
- return pval;
-}
-
-u64 vcpu_tak(struct kvm_vcpu *vcpu, u64 vadr)
-{
- struct thash_data *data;
- union ia64_pta vpta;
- u64 key;
-
- vpta.val = vcpu_get_pta(vcpu);
- if (vpta.vf == 0) {
- key = 1;
- return key;
- }
- data = vtlb_lookup(vcpu, vadr, D_TLB);
- if (!data || !data->p)
- key = 1;
- else
- key = data->key;
-
- return key;
-}
-
-void kvm_thash(struct kvm_vcpu *vcpu, INST64 inst)
-{
- unsigned long thash, vadr;
-
- vadr = vcpu_get_gr(vcpu, inst.M46.r3);
- thash = vcpu_thash(vcpu, vadr);
- vcpu_set_gr(vcpu, inst.M46.r1, thash, 0);
-}
-
-void kvm_ttag(struct kvm_vcpu *vcpu, INST64 inst)
-{
- unsigned long tag, vadr;
-
- vadr = vcpu_get_gr(vcpu, inst.M46.r3);
- tag = vcpu_ttag(vcpu, vadr);
- vcpu_set_gr(vcpu, inst.M46.r1, tag, 0);
-}
-
-int vcpu_tpa(struct kvm_vcpu *vcpu, u64 vadr, unsigned long *padr)
-{
- struct thash_data *data;
- union ia64_isr visr, pt_isr;
- struct kvm_pt_regs *regs;
- struct ia64_psr vpsr;
-
- regs = vcpu_regs(vcpu);
- pt_isr.val = VMX(vcpu, cr_isr);
- visr.val = 0;
- visr.ei = pt_isr.ei;
- visr.ir = pt_isr.ir;
- vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
- visr.na = 1;
-
- data = vhpt_lookup(vadr);
- if (data) {
- if (data->p == 0) {
- vcpu_set_isr(vcpu, visr.val);
- data_page_not_present(vcpu, vadr);
- return IA64_FAULT;
- } else if (data->ma == VA_MATTR_NATPAGE) {
- vcpu_set_isr(vcpu, visr.val);
- dnat_page_consumption(vcpu, vadr);
- return IA64_FAULT;
- } else {
- *padr = (data->gpaddr >> data->ps << data->ps) |
- (vadr & (PSIZE(data->ps) - 1));
- return IA64_NO_FAULT;
- }
- }
-
- data = vtlb_lookup(vcpu, vadr, D_TLB);
- if (data) {
- if (data->p == 0) {
- vcpu_set_isr(vcpu, visr.val);
- data_page_not_present(vcpu, vadr);
- return IA64_FAULT;
- } else if (data->ma == VA_MATTR_NATPAGE) {
- vcpu_set_isr(vcpu, visr.val);
- dnat_page_consumption(vcpu, vadr);
- return IA64_FAULT;
- } else{
- *padr = ((data->ppn >> (data->ps - 12)) << data->ps)
- | (vadr & (PSIZE(data->ps) - 1));
- return IA64_NO_FAULT;
- }
- }
- if (!vhpt_enabled(vcpu, vadr, NA_REF)) {
- if (vpsr.ic) {
- vcpu_set_isr(vcpu, visr.val);
- alt_dtlb(vcpu, vadr);
- return IA64_FAULT;
- } else {
- nested_dtlb(vcpu);
- return IA64_FAULT;
- }
- } else {
- if (vpsr.ic) {
- vcpu_set_isr(vcpu, visr.val);
- dvhpt_fault(vcpu, vadr);
- return IA64_FAULT;
- } else{
- nested_dtlb(vcpu);
- return IA64_FAULT;
- }
- }
-
- return IA64_NO_FAULT;
-}
-
-int kvm_tpa(struct kvm_vcpu *vcpu, INST64 inst)
-{
- unsigned long r1, r3;
-
- r3 = vcpu_get_gr(vcpu, inst.M46.r3);
-
- if (vcpu_tpa(vcpu, r3, &r1))
- return IA64_FAULT;
-
- vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
- return(IA64_NO_FAULT);
-}
-
-void kvm_tak(struct kvm_vcpu *vcpu, INST64 inst)
-{
- unsigned long r1, r3;
-
- r3 = vcpu_get_gr(vcpu, inst.M46.r3);
- r1 = vcpu_tak(vcpu, r3);
- vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
-}
-
-/************************************
- * Insert/Purge translation register/cache
- ************************************/
-void vcpu_itc_i(struct kvm_vcpu *vcpu, u64 pte, u64 itir, u64 ifa)
-{
- thash_purge_and_insert(vcpu, pte, itir, ifa, I_TLB);
-}
-
-void vcpu_itc_d(struct kvm_vcpu *vcpu, u64 pte, u64 itir, u64 ifa)
-{
- thash_purge_and_insert(vcpu, pte, itir, ifa, D_TLB);
-}
-
-void vcpu_itr_i(struct kvm_vcpu *vcpu, u64 slot, u64 pte, u64 itir, u64 ifa)
-{
- u64 ps, va, rid;
- struct thash_data *p_itr;
-
- ps = itir_ps(itir);
- va = PAGEALIGN(ifa, ps);
- pte &= ~PAGE_FLAGS_RV_MASK;
- rid = vcpu_get_rr(vcpu, ifa);
- rid = rid & RR_RID_MASK;
- p_itr = (struct thash_data *)&vcpu->arch.itrs[slot];
- vcpu_set_tr(p_itr, pte, itir, va, rid);
- vcpu_quick_region_set(VMX(vcpu, itr_regions), va);
-}
-
-
-void vcpu_itr_d(struct kvm_vcpu *vcpu, u64 slot, u64 pte, u64 itir, u64 ifa)
-{
- u64 gpfn;
- u64 ps, va, rid;
- struct thash_data *p_dtr;
-
- ps = itir_ps(itir);
- va = PAGEALIGN(ifa, ps);
- pte &= ~PAGE_FLAGS_RV_MASK;
-
- if (ps != _PAGE_SIZE_16M)
- thash_purge_entries(vcpu, va, ps);
- gpfn = (pte & _PAGE_PPN_MASK) >> PAGE_SHIFT;
- if (__gpfn_is_io(gpfn))
- pte |= VTLB_PTE_IO;
- rid = vcpu_get_rr(vcpu, va);
- rid = rid & RR_RID_MASK;
- p_dtr = (struct thash_data *)&vcpu->arch.dtrs[slot];
- vcpu_set_tr((struct thash_data *)&vcpu->arch.dtrs[slot],
- pte, itir, va, rid);
- vcpu_quick_region_set(VMX(vcpu, dtr_regions), va);
-}
-
-void vcpu_ptr_d(struct kvm_vcpu *vcpu, u64 ifa, u64 ps)
-{
- int index;
- u64 va;
-
- va = PAGEALIGN(ifa, ps);
- while ((index = vtr_find_overlap(vcpu, va, ps, D_TLB)) >= 0)
- vcpu->arch.dtrs[index].page_flags = 0;
-
- thash_purge_entries(vcpu, va, ps);
-}
-
-void vcpu_ptr_i(struct kvm_vcpu *vcpu, u64 ifa, u64 ps)
-{
- int index;
- u64 va;
-
- va = PAGEALIGN(ifa, ps);
- while ((index = vtr_find_overlap(vcpu, va, ps, I_TLB)) >= 0)
- vcpu->arch.itrs[index].page_flags = 0;
-
- thash_purge_entries(vcpu, va, ps);
-}
-
-void vcpu_ptc_l(struct kvm_vcpu *vcpu, u64 va, u64 ps)
-{
- va = PAGEALIGN(va, ps);
- thash_purge_entries(vcpu, va, ps);
-}
-
-void vcpu_ptc_e(struct kvm_vcpu *vcpu, u64 va)
-{
- thash_purge_all(vcpu);
-}
-
-void vcpu_ptc_ga(struct kvm_vcpu *vcpu, u64 va, u64 ps)
-{
- struct exit_ctl_data *p = &vcpu->arch.exit_data;
- long psr;
- local_irq_save(psr);
- p->exit_reason = EXIT_REASON_PTC_G;
-
- p->u.ptc_g_data.rr = vcpu_get_rr(vcpu, va);
- p->u.ptc_g_data.vaddr = va;
- p->u.ptc_g_data.ps = ps;
- vmm_transition(vcpu);
- /* Do Local Purge Here*/
- vcpu_ptc_l(vcpu, va, ps);
- local_irq_restore(psr);
-}
-
-
-void vcpu_ptc_g(struct kvm_vcpu *vcpu, u64 va, u64 ps)
-{
- vcpu_ptc_ga(vcpu, va, ps);
-}
-
-void kvm_ptc_e(struct kvm_vcpu *vcpu, INST64 inst)
-{
- unsigned long ifa;
-
- ifa = vcpu_get_gr(vcpu, inst.M45.r3);
- vcpu_ptc_e(vcpu, ifa);
-}
-
-void kvm_ptc_g(struct kvm_vcpu *vcpu, INST64 inst)
-{
- unsigned long ifa, itir;
-
- ifa = vcpu_get_gr(vcpu, inst.M45.r3);
- itir = vcpu_get_gr(vcpu, inst.M45.r2);
- vcpu_ptc_g(vcpu, ifa, itir_ps(itir));
-}
-
-void kvm_ptc_ga(struct kvm_vcpu *vcpu, INST64 inst)
-{
- unsigned long ifa, itir;
-
- ifa = vcpu_get_gr(vcpu, inst.M45.r3);
- itir = vcpu_get_gr(vcpu, inst.M45.r2);
- vcpu_ptc_ga(vcpu, ifa, itir_ps(itir));
-}
-
-void kvm_ptc_l(struct kvm_vcpu *vcpu, INST64 inst)
-{
- unsigned long ifa, itir;
-
- ifa = vcpu_get_gr(vcpu, inst.M45.r3);
- itir = vcpu_get_gr(vcpu, inst.M45.r2);
- vcpu_ptc_l(vcpu, ifa, itir_ps(itir));
-}
-
-void kvm_ptr_d(struct kvm_vcpu *vcpu, INST64 inst)
-{
- unsigned long ifa, itir;
-
- ifa = vcpu_get_gr(vcpu, inst.M45.r3);
- itir = vcpu_get_gr(vcpu, inst.M45.r2);
- vcpu_ptr_d(vcpu, ifa, itir_ps(itir));
-}
-
-void kvm_ptr_i(struct kvm_vcpu *vcpu, INST64 inst)
-{
- unsigned long ifa, itir;
-
- ifa = vcpu_get_gr(vcpu, inst.M45.r3);
- itir = vcpu_get_gr(vcpu, inst.M45.r2);
- vcpu_ptr_i(vcpu, ifa, itir_ps(itir));
-}
-
-void kvm_itr_d(struct kvm_vcpu *vcpu, INST64 inst)
-{
- unsigned long itir, ifa, pte, slot;
-
- slot = vcpu_get_gr(vcpu, inst.M45.r3);
- pte = vcpu_get_gr(vcpu, inst.M45.r2);
- itir = vcpu_get_itir(vcpu);
- ifa = vcpu_get_ifa(vcpu);
- vcpu_itr_d(vcpu, slot, pte, itir, ifa);
-}
-
-
-
-void kvm_itr_i(struct kvm_vcpu *vcpu, INST64 inst)
-{
- unsigned long itir, ifa, pte, slot;
-
- slot = vcpu_get_gr(vcpu, inst.M45.r3);
- pte = vcpu_get_gr(vcpu, inst.M45.r2);
- itir = vcpu_get_itir(vcpu);
- ifa = vcpu_get_ifa(vcpu);
- vcpu_itr_i(vcpu, slot, pte, itir, ifa);
-}
-
-void kvm_itc_d(struct kvm_vcpu *vcpu, INST64 inst)
-{
- unsigned long itir, ifa, pte;
-
- itir = vcpu_get_itir(vcpu);
- ifa = vcpu_get_ifa(vcpu);
- pte = vcpu_get_gr(vcpu, inst.M45.r2);
- vcpu_itc_d(vcpu, pte, itir, ifa);
-}
-
-void kvm_itc_i(struct kvm_vcpu *vcpu, INST64 inst)
-{
- unsigned long itir, ifa, pte;
-
- itir = vcpu_get_itir(vcpu);
- ifa = vcpu_get_ifa(vcpu);
- pte = vcpu_get_gr(vcpu, inst.M45.r2);
- vcpu_itc_i(vcpu, pte, itir, ifa);
-}
-
-/*************************************
- * Moves to semi-privileged registers
- *************************************/
-
-void kvm_mov_to_ar_imm(struct kvm_vcpu *vcpu, INST64 inst)
-{
- unsigned long imm;
-
- if (inst.M30.s)
- imm = -inst.M30.imm;
- else
- imm = inst.M30.imm;
-
- vcpu_set_itc(vcpu, imm);
-}
-
-void kvm_mov_to_ar_reg(struct kvm_vcpu *vcpu, INST64 inst)
-{
- unsigned long r2;
-
- r2 = vcpu_get_gr(vcpu, inst.M29.r2);
- vcpu_set_itc(vcpu, r2);
-}
-
-void kvm_mov_from_ar_reg(struct kvm_vcpu *vcpu, INST64 inst)
-{
- unsigned long r1;
-
- r1 = vcpu_get_itc(vcpu);
- vcpu_set_gr(vcpu, inst.M31.r1, r1, 0);
-}
-
-/**************************************************************************
- struct kvm_vcpu protection key register access routines
- **************************************************************************/
-
-unsigned long vcpu_get_pkr(struct kvm_vcpu *vcpu, unsigned long reg)
-{
- return ((unsigned long)ia64_get_pkr(reg));
-}
-
-void vcpu_set_pkr(struct kvm_vcpu *vcpu, unsigned long reg, unsigned long val)
-{
- ia64_set_pkr(reg, val);
-}
-
-/********************************
- * Moves to privileged registers
- ********************************/
-unsigned long vcpu_set_rr(struct kvm_vcpu *vcpu, unsigned long reg,
- unsigned long val)
-{
- union ia64_rr oldrr, newrr;
- unsigned long rrval;
- struct exit_ctl_data *p = &vcpu->arch.exit_data;
- unsigned long psr;
-
- oldrr.val = vcpu_get_rr(vcpu, reg);
- newrr.val = val;
- vcpu->arch.vrr[reg >> VRN_SHIFT] = val;
-
- switch ((unsigned long)(reg >> VRN_SHIFT)) {
- case VRN6:
- vcpu->arch.vmm_rr = vrrtomrr(val);
- local_irq_save(psr);
- p->exit_reason = EXIT_REASON_SWITCH_RR6;
- vmm_transition(vcpu);
- local_irq_restore(psr);
- break;
- case VRN4:
- rrval = vrrtomrr(val);
- vcpu->arch.metaphysical_saved_rr4 = rrval;
- if (!is_physical_mode(vcpu))
- ia64_set_rr(reg, rrval);
- break;
- case VRN0:
- rrval = vrrtomrr(val);
- vcpu->arch.metaphysical_saved_rr0 = rrval;
- if (!is_physical_mode(vcpu))
- ia64_set_rr(reg, rrval);
- break;
- default:
- ia64_set_rr(reg, vrrtomrr(val));
- break;
- }
-
- return (IA64_NO_FAULT);
-}
-
-void kvm_mov_to_rr(struct kvm_vcpu *vcpu, INST64 inst)
-{
- unsigned long r3, r2;
-
- r3 = vcpu_get_gr(vcpu, inst.M42.r3);
- r2 = vcpu_get_gr(vcpu, inst.M42.r2);
- vcpu_set_rr(vcpu, r3, r2);
-}
-
-void kvm_mov_to_dbr(struct kvm_vcpu *vcpu, INST64 inst)
-{
-}
-
-void kvm_mov_to_ibr(struct kvm_vcpu *vcpu, INST64 inst)
-{
-}
-
-void kvm_mov_to_pmc(struct kvm_vcpu *vcpu, INST64 inst)
-{
- unsigned long r3, r2;
-
- r3 = vcpu_get_gr(vcpu, inst.M42.r3);
- r2 = vcpu_get_gr(vcpu, inst.M42.r2);
- vcpu_set_pmc(vcpu, r3, r2);
-}
-
-void kvm_mov_to_pmd(struct kvm_vcpu *vcpu, INST64 inst)
-{
- unsigned long r3, r2;
-
- r3 = vcpu_get_gr(vcpu, inst.M42.r3);
- r2 = vcpu_get_gr(vcpu, inst.M42.r2);
- vcpu_set_pmd(vcpu, r3, r2);
-}
-
-void kvm_mov_to_pkr(struct kvm_vcpu *vcpu, INST64 inst)
-{
- u64 r3, r2;
-
- r3 = vcpu_get_gr(vcpu, inst.M42.r3);
- r2 = vcpu_get_gr(vcpu, inst.M42.r2);
- vcpu_set_pkr(vcpu, r3, r2);
-}
-
-void kvm_mov_from_rr(struct kvm_vcpu *vcpu, INST64 inst)
-{
- unsigned long r3, r1;
-
- r3 = vcpu_get_gr(vcpu, inst.M43.r3);
- r1 = vcpu_get_rr(vcpu, r3);
- vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
-}
-
-void kvm_mov_from_pkr(struct kvm_vcpu *vcpu, INST64 inst)
-{
- unsigned long r3, r1;
-
- r3 = vcpu_get_gr(vcpu, inst.M43.r3);
- r1 = vcpu_get_pkr(vcpu, r3);
- vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
-}
-
-void kvm_mov_from_dbr(struct kvm_vcpu *vcpu, INST64 inst)
-{
- unsigned long r3, r1;
-
- r3 = vcpu_get_gr(vcpu, inst.M43.r3);
- r1 = vcpu_get_dbr(vcpu, r3);
- vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
-}
-
-void kvm_mov_from_ibr(struct kvm_vcpu *vcpu, INST64 inst)
-{
- unsigned long r3, r1;
-
- r3 = vcpu_get_gr(vcpu, inst.M43.r3);
- r1 = vcpu_get_ibr(vcpu, r3);
- vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
-}
-
-void kvm_mov_from_pmc(struct kvm_vcpu *vcpu, INST64 inst)
-{
- unsigned long r3, r1;
-
- r3 = vcpu_get_gr(vcpu, inst.M43.r3);
- r1 = vcpu_get_pmc(vcpu, r3);
- vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
-}
-
-unsigned long vcpu_get_cpuid(struct kvm_vcpu *vcpu, unsigned long reg)
-{
- /* FIXME: This could get called as a result of a rsvd-reg fault */
- if (reg > (ia64_get_cpuid(3) & 0xff))
- return 0;
- else
- return ia64_get_cpuid(reg);
-}
-
-void kvm_mov_from_cpuid(struct kvm_vcpu *vcpu, INST64 inst)
-{
- unsigned long r3, r1;
-
- r3 = vcpu_get_gr(vcpu, inst.M43.r3);
- r1 = vcpu_get_cpuid(vcpu, r3);
- vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
-}
-
-void vcpu_set_tpr(struct kvm_vcpu *vcpu, unsigned long val)
-{
- VCPU(vcpu, tpr) = val;
- vcpu->arch.irq_check = 1;
-}
-
-unsigned long kvm_mov_to_cr(struct kvm_vcpu *vcpu, INST64 inst)
-{
- unsigned long r2;
-
- r2 = vcpu_get_gr(vcpu, inst.M32.r2);
- VCPU(vcpu, vcr[inst.M32.cr3]) = r2;
-
- switch (inst.M32.cr3) {
- case 0:
- vcpu_set_dcr(vcpu, r2);
- break;
- case 1:
- vcpu_set_itm(vcpu, r2);
- break;
- case 66:
- vcpu_set_tpr(vcpu, r2);
- break;
- case 67:
- vcpu_set_eoi(vcpu, r2);
- break;
- default:
- break;
- }
-
- return 0;
-}
-
-unsigned long kvm_mov_from_cr(struct kvm_vcpu *vcpu, INST64 inst)
-{
- unsigned long tgt = inst.M33.r1;
- unsigned long val;
-
- switch (inst.M33.cr3) {
- case 65:
- val = vcpu_get_ivr(vcpu);
- vcpu_set_gr(vcpu, tgt, val, 0);
- break;
-
- case 67:
- vcpu_set_gr(vcpu, tgt, 0L, 0);
- break;
- default:
- val = VCPU(vcpu, vcr[inst.M33.cr3]);
- vcpu_set_gr(vcpu, tgt, val, 0);
- break;
- }
-
- return 0;
-}
-
-void vcpu_set_psr(struct kvm_vcpu *vcpu, unsigned long val)
-{
-
- unsigned long mask;
- struct kvm_pt_regs *regs;
- struct ia64_psr old_psr, new_psr;
-
- old_psr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
-
- regs = vcpu_regs(vcpu);
- /* We only support guest as:
- * vpsr.pk = 0
- * vpsr.is = 0
- * Otherwise panic
- */
- if (val & (IA64_PSR_PK | IA64_PSR_IS | IA64_PSR_VM))
- panic_vm(vcpu, "Only support guests with vpsr.pk =0 "
- "& vpsr.is=0\n");
-
- /*
- * For those IA64_PSR bits: id/da/dd/ss/ed/ia
- * Since these bits will become 0, after success execution of each
- * instruction, we will change set them to mIA64_PSR
- */
- VCPU(vcpu, vpsr) = val
- & (~(IA64_PSR_ID | IA64_PSR_DA | IA64_PSR_DD |
- IA64_PSR_SS | IA64_PSR_ED | IA64_PSR_IA));
-
- if (!old_psr.i && (val & IA64_PSR_I)) {
- /* vpsr.i 0->1 */
- vcpu->arch.irq_check = 1;
- }
- new_psr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
-
- /*
- * All vIA64_PSR bits shall go to mPSR (v->tf->tf_special.psr)
- * , except for the following bits:
- * ic/i/dt/si/rt/mc/it/bn/vm
- */
- mask = IA64_PSR_IC + IA64_PSR_I + IA64_PSR_DT + IA64_PSR_SI +
- IA64_PSR_RT + IA64_PSR_MC + IA64_PSR_IT + IA64_PSR_BN +
- IA64_PSR_VM;
-
- regs->cr_ipsr = (regs->cr_ipsr & mask) | (val & (~mask));
-
- check_mm_mode_switch(vcpu, old_psr, new_psr);
-
- return ;
-}
-
-unsigned long vcpu_cover(struct kvm_vcpu *vcpu)
-{
- struct ia64_psr vpsr;
-
- struct kvm_pt_regs *regs = vcpu_regs(vcpu);
- vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
-
- if (!vpsr.ic)
- VCPU(vcpu, ifs) = regs->cr_ifs;
- regs->cr_ifs = IA64_IFS_V;
- return (IA64_NO_FAULT);
-}
-
-
-
-/**************************************************************************
- VCPU banked general register access routines
- **************************************************************************/
-#define vcpu_bsw0_unat(i, b0unat, b1unat, runat, VMM_PT_REGS_R16_SLOT) \
- do { \
- __asm__ __volatile__ ( \
- ";;extr.u %0 = %3,%6,16;;\n" \
- "dep %1 = %0, %1, 0, 16;;\n" \
- "st8 [%4] = %1\n" \
- "extr.u %0 = %2, 16, 16;;\n" \
- "dep %3 = %0, %3, %6, 16;;\n" \
- "st8 [%5] = %3\n" \
- ::"r"(i), "r"(*b1unat), "r"(*b0unat), \
- "r"(*runat), "r"(b1unat), "r"(runat), \
- "i"(VMM_PT_REGS_R16_SLOT) : "memory"); \
- } while (0)
-
-void vcpu_bsw0(struct kvm_vcpu *vcpu)
-{
- unsigned long i;
-
- struct kvm_pt_regs *regs = vcpu_regs(vcpu);
- unsigned long *r = &regs->r16;
- unsigned long *b0 = &VCPU(vcpu, vbgr[0]);
- unsigned long *b1 = &VCPU(vcpu, vgr[0]);
- unsigned long *runat = &regs->eml_unat;
- unsigned long *b0unat = &VCPU(vcpu, vbnat);
- unsigned long *b1unat = &VCPU(vcpu, vnat);
-
-
- if (VCPU(vcpu, vpsr) & IA64_PSR_BN) {
- for (i = 0; i < 16; i++) {
- *b1++ = *r;
- *r++ = *b0++;
- }
- vcpu_bsw0_unat(i, b0unat, b1unat, runat,
- VMM_PT_REGS_R16_SLOT);
- VCPU(vcpu, vpsr) &= ~IA64_PSR_BN;
- }
-}
-
-#define vcpu_bsw1_unat(i, b0unat, b1unat, runat, VMM_PT_REGS_R16_SLOT) \
- do { \
- __asm__ __volatile__ (";;extr.u %0 = %3, %6, 16;;\n" \
- "dep %1 = %0, %1, 16, 16;;\n" \
- "st8 [%4] = %1\n" \
- "extr.u %0 = %2, 0, 16;;\n" \
- "dep %3 = %0, %3, %6, 16;;\n" \
- "st8 [%5] = %3\n" \
- ::"r"(i), "r"(*b0unat), "r"(*b1unat), \
- "r"(*runat), "r"(b0unat), "r"(runat), \
- "i"(VMM_PT_REGS_R16_SLOT) : "memory"); \
- } while (0)
-
-void vcpu_bsw1(struct kvm_vcpu *vcpu)
-{
- unsigned long i;
- struct kvm_pt_regs *regs = vcpu_regs(vcpu);
- unsigned long *r = &regs->r16;
- unsigned long *b0 = &VCPU(vcpu, vbgr[0]);
- unsigned long *b1 = &VCPU(vcpu, vgr[0]);
- unsigned long *runat = &regs->eml_unat;
- unsigned long *b0unat = &VCPU(vcpu, vbnat);
- unsigned long *b1unat = &VCPU(vcpu, vnat);
-
- if (!(VCPU(vcpu, vpsr) & IA64_PSR_BN)) {
- for (i = 0; i < 16; i++) {
- *b0++ = *r;
- *r++ = *b1++;
- }
- vcpu_bsw1_unat(i, b0unat, b1unat, runat,
- VMM_PT_REGS_R16_SLOT);
- VCPU(vcpu, vpsr) |= IA64_PSR_BN;
- }
-}
-
-void vcpu_rfi(struct kvm_vcpu *vcpu)
-{
- unsigned long ifs, psr;
- struct kvm_pt_regs *regs = vcpu_regs(vcpu);
-
- psr = VCPU(vcpu, ipsr);
- if (psr & IA64_PSR_BN)
- vcpu_bsw1(vcpu);
- else
- vcpu_bsw0(vcpu);
- vcpu_set_psr(vcpu, psr);
- ifs = VCPU(vcpu, ifs);
- if (ifs >> 63)
- regs->cr_ifs = ifs;
- regs->cr_iip = VCPU(vcpu, iip);
-}
-
-/*
- VPSR can't keep track of below bits of guest PSR
- This function gets guest PSR
- */
-
-unsigned long vcpu_get_psr(struct kvm_vcpu *vcpu)
-{
- unsigned long mask;
- struct kvm_pt_regs *regs = vcpu_regs(vcpu);
-
- mask = IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC | IA64_PSR_MFL |
- IA64_PSR_MFH | IA64_PSR_CPL | IA64_PSR_RI;
- return (VCPU(vcpu, vpsr) & ~mask) | (regs->cr_ipsr & mask);
-}
-
-void kvm_rsm(struct kvm_vcpu *vcpu, INST64 inst)
-{
- unsigned long vpsr;
- unsigned long imm24 = (inst.M44.i<<23) | (inst.M44.i2<<21)
- | inst.M44.imm;
-
- vpsr = vcpu_get_psr(vcpu);
- vpsr &= (~imm24);
- vcpu_set_psr(vcpu, vpsr);
-}
-
-void kvm_ssm(struct kvm_vcpu *vcpu, INST64 inst)
-{
- unsigned long vpsr;
- unsigned long imm24 = (inst.M44.i << 23) | (inst.M44.i2 << 21)
- | inst.M44.imm;
-
- vpsr = vcpu_get_psr(vcpu);
- vpsr |= imm24;
- vcpu_set_psr(vcpu, vpsr);
-}
-
-/* Generate Mask
- * Parameter:
- * bit -- starting bit
- * len -- how many bits
- */
-#define MASK(bit,len) \
-({ \
- __u64 ret; \
- \
- __asm __volatile("dep %0=-1, r0, %1, %2"\
- : "=r" (ret): \
- "M" (bit), \
- "M" (len)); \
- ret; \
-})
-
-void vcpu_set_psr_l(struct kvm_vcpu *vcpu, unsigned long val)
-{
- val = (val & MASK(0, 32)) | (vcpu_get_psr(vcpu) & MASK(32, 32));
- vcpu_set_psr(vcpu, val);
-}
-
-void kvm_mov_to_psr(struct kvm_vcpu *vcpu, INST64 inst)
-{
- unsigned long val;
-
- val = vcpu_get_gr(vcpu, inst.M35.r2);
- vcpu_set_psr_l(vcpu, val);
-}
-
-void kvm_mov_from_psr(struct kvm_vcpu *vcpu, INST64 inst)
-{
- unsigned long val;
-
- val = vcpu_get_psr(vcpu);
- val = (val & MASK(0, 32)) | (val & MASK(35, 2));
- vcpu_set_gr(vcpu, inst.M33.r1, val, 0);
-}
-
-void vcpu_increment_iip(struct kvm_vcpu *vcpu)
-{
- struct kvm_pt_regs *regs = vcpu_regs(vcpu);
- struct ia64_psr *ipsr = (struct ia64_psr *)&regs->cr_ipsr;
- if (ipsr->ri == 2) {
- ipsr->ri = 0;
- regs->cr_iip += 16;
- } else
- ipsr->ri++;
-}
-
-void vcpu_decrement_iip(struct kvm_vcpu *vcpu)
-{
- struct kvm_pt_regs *regs = vcpu_regs(vcpu);
- struct ia64_psr *ipsr = (struct ia64_psr *)&regs->cr_ipsr;
-
- if (ipsr->ri == 0) {
- ipsr->ri = 2;
- regs->cr_iip -= 16;
- } else
- ipsr->ri--;
-}
-
-/** Emulate a privileged operation.
- *
- *
- * @param vcpu virtual cpu
- * @cause the reason cause virtualization fault
- * @opcode the instruction code which cause virtualization fault
- */
-
-void kvm_emulate(struct kvm_vcpu *vcpu, struct kvm_pt_regs *regs)
-{
- unsigned long status, cause, opcode ;
- INST64 inst;
-
- status = IA64_NO_FAULT;
- cause = VMX(vcpu, cause);
- opcode = VMX(vcpu, opcode);
- inst.inst = opcode;
- /*
- * Switch to actual virtual rid in rr0 and rr4,
- * which is required by some tlb related instructions.
- */
- prepare_if_physical_mode(vcpu);
-
- switch (cause) {
- case EVENT_RSM:
- kvm_rsm(vcpu, inst);
- break;
- case EVENT_SSM:
- kvm_ssm(vcpu, inst);
- break;
- case EVENT_MOV_TO_PSR:
- kvm_mov_to_psr(vcpu, inst);
- break;
- case EVENT_MOV_FROM_PSR:
- kvm_mov_from_psr(vcpu, inst);
- break;
- case EVENT_MOV_FROM_CR:
- kvm_mov_from_cr(vcpu, inst);
- break;
- case EVENT_MOV_TO_CR:
- kvm_mov_to_cr(vcpu, inst);
- break;
- case EVENT_BSW_0:
- vcpu_bsw0(vcpu);
- break;
- case EVENT_BSW_1:
- vcpu_bsw1(vcpu);
- break;
- case EVENT_COVER:
- vcpu_cover(vcpu);
- break;
- case EVENT_RFI:
- vcpu_rfi(vcpu);
- break;
- case EVENT_ITR_D:
- kvm_itr_d(vcpu, inst);
- break;
- case EVENT_ITR_I:
- kvm_itr_i(vcpu, inst);
- break;
- case EVENT_PTR_D:
- kvm_ptr_d(vcpu, inst);
- break;
- case EVENT_PTR_I:
- kvm_ptr_i(vcpu, inst);
- break;
- case EVENT_ITC_D:
- kvm_itc_d(vcpu, inst);
- break;
- case EVENT_ITC_I:
- kvm_itc_i(vcpu, inst);
- break;
- case EVENT_PTC_L:
- kvm_ptc_l(vcpu, inst);
- break;
- case EVENT_PTC_G:
- kvm_ptc_g(vcpu, inst);
- break;
- case EVENT_PTC_GA:
- kvm_ptc_ga(vcpu, inst);
- break;
- case EVENT_PTC_E:
- kvm_ptc_e(vcpu, inst);
- break;
- case EVENT_MOV_TO_RR:
- kvm_mov_to_rr(vcpu, inst);
- break;
- case EVENT_MOV_FROM_RR:
- kvm_mov_from_rr(vcpu, inst);
- break;
- case EVENT_THASH:
- kvm_thash(vcpu, inst);
- break;
- case EVENT_TTAG:
- kvm_ttag(vcpu, inst);
- break;
- case EVENT_TPA:
- status = kvm_tpa(vcpu, inst);
- break;
- case EVENT_TAK:
- kvm_tak(vcpu, inst);
- break;
- case EVENT_MOV_TO_AR_IMM:
- kvm_mov_to_ar_imm(vcpu, inst);
- break;
- case EVENT_MOV_TO_AR:
- kvm_mov_to_ar_reg(vcpu, inst);
- break;
- case EVENT_MOV_FROM_AR:
- kvm_mov_from_ar_reg(vcpu, inst);
- break;
- case EVENT_MOV_TO_DBR:
- kvm_mov_to_dbr(vcpu, inst);
- break;
- case EVENT_MOV_TO_IBR:
- kvm_mov_to_ibr(vcpu, inst);
- break;
- case EVENT_MOV_TO_PMC:
- kvm_mov_to_pmc(vcpu, inst);
- break;
- case EVENT_MOV_TO_PMD:
- kvm_mov_to_pmd(vcpu, inst);
- break;
- case EVENT_MOV_TO_PKR:
- kvm_mov_to_pkr(vcpu, inst);
- break;
- case EVENT_MOV_FROM_DBR:
- kvm_mov_from_dbr(vcpu, inst);
- break;
- case EVENT_MOV_FROM_IBR:
- kvm_mov_from_ibr(vcpu, inst);
- break;
- case EVENT_MOV_FROM_PMC:
- kvm_mov_from_pmc(vcpu, inst);
- break;
- case EVENT_MOV_FROM_PKR:
- kvm_mov_from_pkr(vcpu, inst);
- break;
- case EVENT_MOV_FROM_CPUID:
- kvm_mov_from_cpuid(vcpu, inst);
- break;
- case EVENT_VMSW:
- status = IA64_FAULT;
- break;
- default:
- break;
- };
- /*Assume all status is NO_FAULT ?*/
- if (status == IA64_NO_FAULT && cause != EVENT_RFI)
- vcpu_increment_iip(vcpu);
-
- recover_if_physical_mode(vcpu);
-}
-
-void init_vcpu(struct kvm_vcpu *vcpu)
-{
- int i;
-
- vcpu->arch.mode_flags = GUEST_IN_PHY;
- VMX(vcpu, vrr[0]) = 0x38;
- VMX(vcpu, vrr[1]) = 0x38;
- VMX(vcpu, vrr[2]) = 0x38;
- VMX(vcpu, vrr[3]) = 0x38;
- VMX(vcpu, vrr[4]) = 0x38;
- VMX(vcpu, vrr[5]) = 0x38;
- VMX(vcpu, vrr[6]) = 0x38;
- VMX(vcpu, vrr[7]) = 0x38;
- VCPU(vcpu, vpsr) = IA64_PSR_BN;
- VCPU(vcpu, dcr) = 0;
- /* pta.size must not be 0. The minimum is 15 (32k) */
- VCPU(vcpu, pta) = 15 << 2;
- VCPU(vcpu, itv) = 0x10000;
- VCPU(vcpu, itm) = 0;
- VMX(vcpu, last_itc) = 0;
-
- VCPU(vcpu, lid) = VCPU_LID(vcpu);
- VCPU(vcpu, ivr) = 0;
- VCPU(vcpu, tpr) = 0x10000;
- VCPU(vcpu, eoi) = 0;
- VCPU(vcpu, irr[0]) = 0;
- VCPU(vcpu, irr[1]) = 0;
- VCPU(vcpu, irr[2]) = 0;
- VCPU(vcpu, irr[3]) = 0;
- VCPU(vcpu, pmv) = 0x10000;
- VCPU(vcpu, cmcv) = 0x10000;
- VCPU(vcpu, lrr0) = 0x10000; /* default reset value? */
- VCPU(vcpu, lrr1) = 0x10000; /* default reset value? */
- update_vhpi(vcpu, NULL_VECTOR);
- VLSAPIC_XTP(vcpu) = 0x80; /* disabled */
-
- for (i = 0; i < 4; i++)
- VLSAPIC_INSVC(vcpu, i) = 0;
-}
-
-void kvm_init_all_rr(struct kvm_vcpu *vcpu)
-{
- unsigned long psr;
-
- local_irq_save(psr);
-
- /* WARNING: not allow co-exist of both virtual mode and physical
- * mode in same region
- */
-
- vcpu->arch.metaphysical_saved_rr0 = vrrtomrr(VMX(vcpu, vrr[VRN0]));
- vcpu->arch.metaphysical_saved_rr4 = vrrtomrr(VMX(vcpu, vrr[VRN4]));
-
- if (is_physical_mode(vcpu)) {
- if (vcpu->arch.mode_flags & GUEST_PHY_EMUL)
- panic_vm(vcpu, "Machine Status conflicts!\n");
-
- ia64_set_rr((VRN0 << VRN_SHIFT), vcpu->arch.metaphysical_rr0);
- ia64_dv_serialize_data();
- ia64_set_rr((VRN4 << VRN_SHIFT), vcpu->arch.metaphysical_rr4);
- ia64_dv_serialize_data();
- } else {
- ia64_set_rr((VRN0 << VRN_SHIFT),
- vcpu->arch.metaphysical_saved_rr0);
- ia64_dv_serialize_data();
- ia64_set_rr((VRN4 << VRN_SHIFT),
- vcpu->arch.metaphysical_saved_rr4);
- ia64_dv_serialize_data();
- }
- ia64_set_rr((VRN1 << VRN_SHIFT),
- vrrtomrr(VMX(vcpu, vrr[VRN1])));
- ia64_dv_serialize_data();
- ia64_set_rr((VRN2 << VRN_SHIFT),
- vrrtomrr(VMX(vcpu, vrr[VRN2])));
- ia64_dv_serialize_data();
- ia64_set_rr((VRN3 << VRN_SHIFT),
- vrrtomrr(VMX(vcpu, vrr[VRN3])));
- ia64_dv_serialize_data();
- ia64_set_rr((VRN5 << VRN_SHIFT),
- vrrtomrr(VMX(vcpu, vrr[VRN5])));
- ia64_dv_serialize_data();
- ia64_set_rr((VRN7 << VRN_SHIFT),
- vrrtomrr(VMX(vcpu, vrr[VRN7])));
- ia64_dv_serialize_data();
- ia64_srlz_d();
- ia64_set_psr(psr);
-}
-
-int vmm_entry(void)
-{
- struct kvm_vcpu *v;
- v = current_vcpu;
-
- ia64_call_vsa(PAL_VPS_RESTORE, (unsigned long)v->arch.vpd,
- 0, 0, 0, 0, 0, 0);
- kvm_init_vtlb(v);
- kvm_init_vhpt(v);
- init_vcpu(v);
- kvm_init_all_rr(v);
- vmm_reset_entry();
-
- return 0;
-}
-
-static void kvm_show_registers(struct kvm_pt_regs *regs)
-{
- unsigned long ip = regs->cr_iip + ia64_psr(regs)->ri;
-
- struct kvm_vcpu *vcpu = current_vcpu;
- if (vcpu != NULL)
- printk("vcpu 0x%p vcpu %d\n",
- vcpu, vcpu->vcpu_id);
-
- printk("psr : %016lx ifs : %016lx ip : [<%016lx>]\n",
- regs->cr_ipsr, regs->cr_ifs, ip);
-
- printk("unat: %016lx pfs : %016lx rsc : %016lx\n",
- regs->ar_unat, regs->ar_pfs, regs->ar_rsc);
- printk("rnat: %016lx bspstore: %016lx pr : %016lx\n",
- regs->ar_rnat, regs->ar_bspstore, regs->pr);
- printk("ldrs: %016lx ccv : %016lx fpsr: %016lx\n",
- regs->loadrs, regs->ar_ccv, regs->ar_fpsr);
- printk("csd : %016lx ssd : %016lx\n", regs->ar_csd, regs->ar_ssd);
- printk("b0 : %016lx b6 : %016lx b7 : %016lx\n", regs->b0,
- regs->b6, regs->b7);
- printk("f6 : %05lx%016lx f7 : %05lx%016lx\n",
- regs->f6.u.bits[1], regs->f6.u.bits[0],
- regs->f7.u.bits[1], regs->f7.u.bits[0]);
- printk("f8 : %05lx%016lx f9 : %05lx%016lx\n",
- regs->f8.u.bits[1], regs->f8.u.bits[0],
- regs->f9.u.bits[1], regs->f9.u.bits[0]);
- printk("f10 : %05lx%016lx f11 : %05lx%016lx\n",
- regs->f10.u.bits[1], regs->f10.u.bits[0],
- regs->f11.u.bits[1], regs->f11.u.bits[0]);
-
- printk("r1 : %016lx r2 : %016lx r3 : %016lx\n", regs->r1,
- regs->r2, regs->r3);
- printk("r8 : %016lx r9 : %016lx r10 : %016lx\n", regs->r8,
- regs->r9, regs->r10);
- printk("r11 : %016lx r12 : %016lx r13 : %016lx\n", regs->r11,
- regs->r12, regs->r13);
- printk("r14 : %016lx r15 : %016lx r16 : %016lx\n", regs->r14,
- regs->r15, regs->r16);
- printk("r17 : %016lx r18 : %016lx r19 : %016lx\n", regs->r17,
- regs->r18, regs->r19);
- printk("r20 : %016lx r21 : %016lx r22 : %016lx\n", regs->r20,
- regs->r21, regs->r22);
- printk("r23 : %016lx r24 : %016lx r25 : %016lx\n", regs->r23,
- regs->r24, regs->r25);
- printk("r26 : %016lx r27 : %016lx r28 : %016lx\n", regs->r26,
- regs->r27, regs->r28);
- printk("r29 : %016lx r30 : %016lx r31 : %016lx\n", regs->r29,
- regs->r30, regs->r31);
-
-}
-
-void panic_vm(struct kvm_vcpu *v, const char *fmt, ...)
-{
- va_list args;
- char buf[256];
-
- struct kvm_pt_regs *regs = vcpu_regs(v);
- struct exit_ctl_data *p = &v->arch.exit_data;
- va_start(args, fmt);
- vsnprintf(buf, sizeof(buf), fmt, args);
- va_end(args);
- printk(buf);
- kvm_show_registers(regs);
- p->exit_reason = EXIT_REASON_VM_PANIC;
- vmm_transition(v);
- /*Never to return*/
- while (1);
-}
diff --git a/arch/ia64/kvm/vcpu.h b/arch/ia64/kvm/vcpu.h
deleted file mode 100644
index 988911b4cc7..00000000000
--- a/arch/ia64/kvm/vcpu.h
+++ /dev/null
@@ -1,752 +0,0 @@
-/*
- * vcpu.h: vcpu routines
- * Copyright (c) 2005, Intel Corporation.
- * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
- * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
- *
- * Copyright (c) 2007, Intel Corporation.
- * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
- * Xiantao Zhang (xiantao.zhang@intel.com)
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- */
-
-
-#ifndef __KVM_VCPU_H__
-#define __KVM_VCPU_H__
-
-#include <asm/types.h>
-#include <asm/fpu.h>
-#include <asm/processor.h>
-
-#ifndef __ASSEMBLY__
-#include "vti.h"
-
-#include <linux/kvm_host.h>
-#include <linux/spinlock.h>
-
-typedef unsigned long IA64_INST;
-
-typedef union U_IA64_BUNDLE {
- unsigned long i64[2];
- struct { unsigned long template:5, slot0:41, slot1a:18,
- slot1b:23, slot2:41; };
- /* NOTE: following doesn't work because bitfields can't cross natural
- size boundaries
- struct { unsigned long template:5, slot0:41, slot1:41, slot2:41; }; */
-} IA64_BUNDLE;
-
-typedef union U_INST64_A5 {
- IA64_INST inst;
- struct { unsigned long qp:6, r1:7, imm7b:7, r3:2, imm5c:5,
- imm9d:9, s:1, major:4; };
-} INST64_A5;
-
-typedef union U_INST64_B4 {
- IA64_INST inst;
- struct { unsigned long qp:6, btype:3, un3:3, p:1, b2:3, un11:11, x6:6,
- wh:2, d:1, un1:1, major:4; };
-} INST64_B4;
-
-typedef union U_INST64_B8 {
- IA64_INST inst;
- struct { unsigned long qp:6, un21:21, x6:6, un4:4, major:4; };
-} INST64_B8;
-
-typedef union U_INST64_B9 {
- IA64_INST inst;
- struct { unsigned long qp:6, imm20:20, :1, x6:6, :3, i:1, major:4; };
-} INST64_B9;
-
-typedef union U_INST64_I19 {
- IA64_INST inst;
- struct { unsigned long qp:6, imm20:20, :1, x6:6, x3:3, i:1, major:4; };
-} INST64_I19;
-
-typedef union U_INST64_I26 {
- IA64_INST inst;
- struct { unsigned long qp:6, :7, r2:7, ar3:7, x6:6, x3:3, :1, major:4; };
-} INST64_I26;
-
-typedef union U_INST64_I27 {
- IA64_INST inst;
- struct { unsigned long qp:6, :7, imm:7, ar3:7, x6:6, x3:3, s:1, major:4; };
-} INST64_I27;
-
-typedef union U_INST64_I28 { /* not privileged (mov from AR) */
- IA64_INST inst;
- struct { unsigned long qp:6, r1:7, :7, ar3:7, x6:6, x3:3, :1, major:4; };
-} INST64_I28;
-
-typedef union U_INST64_M28 {
- IA64_INST inst;
- struct { unsigned long qp:6, :14, r3:7, x6:6, x3:3, :1, major:4; };
-} INST64_M28;
-
-typedef union U_INST64_M29 {
- IA64_INST inst;
- struct { unsigned long qp:6, :7, r2:7, ar3:7, x6:6, x3:3, :1, major:4; };
-} INST64_M29;
-
-typedef union U_INST64_M30 {
- IA64_INST inst;
- struct { unsigned long qp:6, :7, imm:7, ar3:7, x4:4, x2:2,
- x3:3, s:1, major:4; };
-} INST64_M30;
-
-typedef union U_INST64_M31 {
- IA64_INST inst;
- struct { unsigned long qp:6, r1:7, :7, ar3:7, x6:6, x3:3, :1, major:4; };
-} INST64_M31;
-
-typedef union U_INST64_M32 {
- IA64_INST inst;
- struct { unsigned long qp:6, :7, r2:7, cr3:7, x6:6, x3:3, :1, major:4; };
-} INST64_M32;
-
-typedef union U_INST64_M33 {
- IA64_INST inst;
- struct { unsigned long qp:6, r1:7, :7, cr3:7, x6:6, x3:3, :1, major:4; };
-} INST64_M33;
-
-typedef union U_INST64_M35 {
- IA64_INST inst;
- struct { unsigned long qp:6, :7, r2:7, :7, x6:6, x3:3, :1, major:4; };
-
-} INST64_M35;
-
-typedef union U_INST64_M36 {
- IA64_INST inst;
- struct { unsigned long qp:6, r1:7, :14, x6:6, x3:3, :1, major:4; };
-} INST64_M36;
-
-typedef union U_INST64_M37 {
- IA64_INST inst;
- struct { unsigned long qp:6, imm20a:20, :1, x4:4, x2:2, x3:3,
- i:1, major:4; };
-} INST64_M37;
-
-typedef union U_INST64_M41 {
- IA64_INST inst;
- struct { unsigned long qp:6, :7, r2:7, :7, x6:6, x3:3, :1, major:4; };
-} INST64_M41;
-
-typedef union U_INST64_M42 {
- IA64_INST inst;
- struct { unsigned long qp:6, :7, r2:7, r3:7, x6:6, x3:3, :1, major:4; };
-} INST64_M42;
-
-typedef union U_INST64_M43 {
- IA64_INST inst;
- struct { unsigned long qp:6, r1:7, :7, r3:7, x6:6, x3:3, :1, major:4; };
-} INST64_M43;
-
-typedef union U_INST64_M44 {
- IA64_INST inst;
- struct { unsigned long qp:6, imm:21, x4:4, i2:2, x3:3, i:1, major:4; };
-} INST64_M44;
-
-typedef union U_INST64_M45 {
- IA64_INST inst;
- struct { unsigned long qp:6, :7, r2:7, r3:7, x6:6, x3:3, :1, major:4; };
-} INST64_M45;
-
-typedef union U_INST64_M46 {
- IA64_INST inst;
- struct { unsigned long qp:6, r1:7, un7:7, r3:7, x6:6,
- x3:3, un1:1, major:4; };
-} INST64_M46;
-
-typedef union U_INST64_M47 {
- IA64_INST inst;
- struct { unsigned long qp:6, un14:14, r3:7, x6:6, x3:3, un1:1, major:4; };
-} INST64_M47;
-
-typedef union U_INST64_M1{
- IA64_INST inst;
- struct { unsigned long qp:6, r1:7, un7:7, r3:7, x:1, hint:2,
- x6:6, m:1, major:4; };
-} INST64_M1;
-
-typedef union U_INST64_M2{
- IA64_INST inst;
- struct { unsigned long qp:6, r1:7, r2:7, r3:7, x:1, hint:2,
- x6:6, m:1, major:4; };
-} INST64_M2;
-
-typedef union U_INST64_M3{
- IA64_INST inst;
- struct { unsigned long qp:6, r1:7, imm7:7, r3:7, i:1, hint:2,
- x6:6, s:1, major:4; };
-} INST64_M3;
-
-typedef union U_INST64_M4 {
- IA64_INST inst;
- struct { unsigned long qp:6, un7:7, r2:7, r3:7, x:1, hint:2,
- x6:6, m:1, major:4; };
-} INST64_M4;
-
-typedef union U_INST64_M5 {
- IA64_INST inst;
- struct { unsigned long qp:6, imm7:7, r2:7, r3:7, i:1, hint:2,
- x6:6, s:1, major:4; };
-} INST64_M5;
-
-typedef union U_INST64_M6 {
- IA64_INST inst;
- struct { unsigned long qp:6, f1:7, un7:7, r3:7, x:1, hint:2,
- x6:6, m:1, major:4; };
-} INST64_M6;
-
-typedef union U_INST64_M9 {
- IA64_INST inst;
- struct { unsigned long qp:6, :7, f2:7, r3:7, x:1, hint:2,
- x6:6, m:1, major:4; };
-} INST64_M9;
-
-typedef union U_INST64_M10 {
- IA64_INST inst;
- struct { unsigned long qp:6, imm7:7, f2:7, r3:7, i:1, hint:2,
- x6:6, s:1, major:4; };
-} INST64_M10;
-
-typedef union U_INST64_M12 {
- IA64_INST inst;
- struct { unsigned long qp:6, f1:7, f2:7, r3:7, x:1, hint:2,
- x6:6, m:1, major:4; };
-} INST64_M12;
-
-typedef union U_INST64_M15 {
- IA64_INST inst;
- struct { unsigned long qp:6, :7, imm7:7, r3:7, i:1, hint:2,
- x6:6, s:1, major:4; };
-} INST64_M15;
-
-typedef union U_INST64 {
- IA64_INST inst;
- struct { unsigned long :37, major:4; } generic;
- INST64_A5 A5; /* used in build_hypercall_bundle only */
- INST64_B4 B4; /* used in build_hypercall_bundle only */
- INST64_B8 B8; /* rfi, bsw.[01] */
- INST64_B9 B9; /* break.b */
- INST64_I19 I19; /* used in build_hypercall_bundle only */
- INST64_I26 I26; /* mov register to ar (I unit) */
- INST64_I27 I27; /* mov immediate to ar (I unit) */
- INST64_I28 I28; /* mov from ar (I unit) */
- INST64_M1 M1; /* ld integer */
- INST64_M2 M2;
- INST64_M3 M3;
- INST64_M4 M4; /* st integer */
- INST64_M5 M5;
- INST64_M6 M6; /* ldfd floating pointer */
- INST64_M9 M9; /* stfd floating pointer */
- INST64_M10 M10; /* stfd floating pointer */
- INST64_M12 M12; /* ldfd pair floating pointer */
- INST64_M15 M15; /* lfetch + imm update */
- INST64_M28 M28; /* purge translation cache entry */
- INST64_M29 M29; /* mov register to ar (M unit) */
- INST64_M30 M30; /* mov immediate to ar (M unit) */
- INST64_M31 M31; /* mov from ar (M unit) */
- INST64_M32 M32; /* mov reg to cr */
- INST64_M33 M33; /* mov from cr */
- INST64_M35 M35; /* mov to psr */
- INST64_M36 M36; /* mov from psr */
- INST64_M37 M37; /* break.m */
- INST64_M41 M41; /* translation cache insert */
- INST64_M42 M42; /* mov to indirect reg/translation reg insert*/
- INST64_M43 M43; /* mov from indirect reg */
- INST64_M44 M44; /* set/reset system mask */
- INST64_M45 M45; /* translation purge */
- INST64_M46 M46; /* translation access (tpa,tak) */
- INST64_M47 M47; /* purge translation entry */
-} INST64;
-
-#define MASK_41 ((unsigned long)0x1ffffffffff)
-
-/* Virtual address memory attributes encoding */
-#define VA_MATTR_WB 0x0
-#define VA_MATTR_UC 0x4
-#define VA_MATTR_UCE 0x5
-#define VA_MATTR_WC 0x6
-#define VA_MATTR_NATPAGE 0x7
-
-#define PMASK(size) (~((size) - 1))
-#define PSIZE(size) (1UL<<(size))
-#define CLEARLSB(ppn, nbits) (((ppn) >> (nbits)) << (nbits))
-#define PAGEALIGN(va, ps) CLEARLSB(va, ps)
-#define PAGE_FLAGS_RV_MASK (0x2|(0x3UL<<50)|(((1UL<<11)-1)<<53))
-#define _PAGE_MA_ST (0x1 << 2) /* is reserved for software use */
-
-#define ARCH_PAGE_SHIFT 12
-
-#define INVALID_TI_TAG (1UL << 63)
-
-#define VTLB_PTE_P_BIT 0
-#define VTLB_PTE_IO_BIT 60
-#define VTLB_PTE_IO (1UL<<VTLB_PTE_IO_BIT)
-#define VTLB_PTE_P (1UL<<VTLB_PTE_P_BIT)
-
-#define vcpu_quick_region_check(_tr_regions,_ifa) \
- (_tr_regions & (1 << ((unsigned long)_ifa >> 61)))
-
-#define vcpu_quick_region_set(_tr_regions,_ifa) \
- do {_tr_regions |= (1 << ((unsigned long)_ifa >> 61)); } while (0)
-
-static inline void vcpu_set_tr(struct thash_data *trp, u64 pte, u64 itir,
- u64 va, u64 rid)
-{
- trp->page_flags = pte;
- trp->itir = itir;
- trp->vadr = va;
- trp->rid = rid;
-}
-
-extern u64 kvm_get_mpt_entry(u64 gpfn);
-
-/* Return I/ */
-static inline u64 __gpfn_is_io(u64 gpfn)
-{
- u64 pte;
- pte = kvm_get_mpt_entry(gpfn);
- if (!(pte & GPFN_INV_MASK)) {
- pte = pte & GPFN_IO_MASK;
- if (pte != GPFN_PHYS_MMIO)
- return pte;
- }
- return 0;
-}
-#endif
-#define IA64_NO_FAULT 0
-#define IA64_FAULT 1
-
-#define VMM_RBS_OFFSET ((VMM_TASK_SIZE + 15) & ~15)
-
-#define SW_BAD 0 /* Bad mode transitition */
-#define SW_V2P 1 /* Physical emulatino is activated */
-#define SW_P2V 2 /* Exit physical mode emulation */
-#define SW_SELF 3 /* No mode transition */
-#define SW_NOP 4 /* Mode transition, but without action required */
-
-#define GUEST_IN_PHY 0x1
-#define GUEST_PHY_EMUL 0x2
-
-#define current_vcpu ((struct kvm_vcpu *) ia64_getreg(_IA64_REG_TP))
-
-#define VRN_SHIFT 61
-#define VRN_MASK 0xe000000000000000
-#define VRN0 0x0UL
-#define VRN1 0x1UL
-#define VRN2 0x2UL
-#define VRN3 0x3UL
-#define VRN4 0x4UL
-#define VRN5 0x5UL
-#define VRN6 0x6UL
-#define VRN7 0x7UL
-
-#define IRQ_NO_MASKED 0
-#define IRQ_MASKED_BY_VTPR 1
-#define IRQ_MASKED_BY_INSVC 2 /* masked by inservice IRQ */
-
-#define PTA_BASE_SHIFT 15
-
-#define IA64_PSR_VM_BIT 46
-#define IA64_PSR_VM (__IA64_UL(1) << IA64_PSR_VM_BIT)
-
-/* Interruption Function State */
-#define IA64_IFS_V_BIT 63
-#define IA64_IFS_V (__IA64_UL(1) << IA64_IFS_V_BIT)
-
-#define PHY_PAGE_UC (_PAGE_A|_PAGE_D|_PAGE_P|_PAGE_MA_UC|_PAGE_AR_RWX)
-#define PHY_PAGE_WB (_PAGE_A|_PAGE_D|_PAGE_P|_PAGE_MA_WB|_PAGE_AR_RWX)
-
-#ifndef __ASSEMBLY__
-
-#include <asm/gcc_intrin.h>
-
-#define is_physical_mode(v) \
- ((v->arch.mode_flags) & GUEST_IN_PHY)
-
-#define is_virtual_mode(v) \
- (!is_physical_mode(v))
-
-#define MODE_IND(psr) \
- (((psr).it << 2) + ((psr).dt << 1) + (psr).rt)
-
-#ifndef CONFIG_SMP
-#define _vmm_raw_spin_lock(x) do {}while(0)
-#define _vmm_raw_spin_unlock(x) do {}while(0)
-#else
-typedef struct {
- volatile unsigned int lock;
-} vmm_spinlock_t;
-#define _vmm_raw_spin_lock(x) \
- do { \
- __u32 *ia64_spinlock_ptr = (__u32 *) (x); \
- __u64 ia64_spinlock_val; \
- ia64_spinlock_val = ia64_cmpxchg4_acq(ia64_spinlock_ptr, 1, 0);\
- if (unlikely(ia64_spinlock_val)) { \
- do { \
- while (*ia64_spinlock_ptr) \
- ia64_barrier(); \
- ia64_spinlock_val = \
- ia64_cmpxchg4_acq(ia64_spinlock_ptr, 1, 0);\
- } while (ia64_spinlock_val); \
- } \
- } while (0)
-
-#define _vmm_raw_spin_unlock(x) \
- do { barrier(); \
- ((vmm_spinlock_t *)x)->lock = 0; } \
-while (0)
-#endif
-
-void vmm_spin_lock(vmm_spinlock_t *lock);
-void vmm_spin_unlock(vmm_spinlock_t *lock);
-enum {
- I_TLB = 1,
- D_TLB = 2
-};
-
-union kvm_va {
- struct {
- unsigned long off : 60; /* intra-region offset */
- unsigned long reg : 4; /* region number */
- } f;
- unsigned long l;
- void *p;
-};
-
-#define __kvm_pa(x) ({union kvm_va _v; _v.l = (long) (x); \
- _v.f.reg = 0; _v.l; })
-#define __kvm_va(x) ({union kvm_va _v; _v.l = (long) (x); \
- _v.f.reg = -1; _v.p; })
-
-#define _REGION_ID(x) ({union ia64_rr _v; _v.val = (long)(x); \
- _v.rid; })
-#define _REGION_PAGE_SIZE(x) ({union ia64_rr _v; _v.val = (long)(x); \
- _v.ps; })
-#define _REGION_HW_WALKER(x) ({union ia64_rr _v; _v.val = (long)(x); \
- _v.ve; })
-
-enum vhpt_ref{ DATA_REF, NA_REF, INST_REF, RSE_REF };
-enum tlb_miss_type { INSTRUCTION, DATA, REGISTER };
-
-#define VCPU(_v, _x) ((_v)->arch.vpd->_x)
-#define VMX(_v, _x) ((_v)->arch._x)
-
-#define VLSAPIC_INSVC(vcpu, i) ((vcpu)->arch.insvc[i])
-#define VLSAPIC_XTP(_v) VMX(_v, xtp)
-
-static inline unsigned long itir_ps(unsigned long itir)
-{
- return ((itir >> 2) & 0x3f);
-}
-
-
-/**************************************************************************
- VCPU control register access routines
- **************************************************************************/
-
-static inline u64 vcpu_get_itir(struct kvm_vcpu *vcpu)
-{
- return ((u64)VCPU(vcpu, itir));
-}
-
-static inline void vcpu_set_itir(struct kvm_vcpu *vcpu, u64 val)
-{
- VCPU(vcpu, itir) = val;
-}
-
-static inline u64 vcpu_get_ifa(struct kvm_vcpu *vcpu)
-{
- return ((u64)VCPU(vcpu, ifa));
-}
-
-static inline void vcpu_set_ifa(struct kvm_vcpu *vcpu, u64 val)
-{
- VCPU(vcpu, ifa) = val;
-}
-
-static inline u64 vcpu_get_iva(struct kvm_vcpu *vcpu)
-{
- return ((u64)VCPU(vcpu, iva));
-}
-
-static inline u64 vcpu_get_pta(struct kvm_vcpu *vcpu)
-{
- return ((u64)VCPU(vcpu, pta));
-}
-
-static inline u64 vcpu_get_lid(struct kvm_vcpu *vcpu)
-{
- return ((u64)VCPU(vcpu, lid));
-}
-
-static inline u64 vcpu_get_tpr(struct kvm_vcpu *vcpu)
-{
- return ((u64)VCPU(vcpu, tpr));
-}
-
-static inline u64 vcpu_get_eoi(struct kvm_vcpu *vcpu)
-{
- return (0UL); /*reads of eoi always return 0 */
-}
-
-static inline u64 vcpu_get_irr0(struct kvm_vcpu *vcpu)
-{
- return ((u64)VCPU(vcpu, irr[0]));
-}
-
-static inline u64 vcpu_get_irr1(struct kvm_vcpu *vcpu)
-{
- return ((u64)VCPU(vcpu, irr[1]));
-}
-
-static inline u64 vcpu_get_irr2(struct kvm_vcpu *vcpu)
-{
- return ((u64)VCPU(vcpu, irr[2]));
-}
-
-static inline u64 vcpu_get_irr3(struct kvm_vcpu *vcpu)
-{
- return ((u64)VCPU(vcpu, irr[3]));
-}
-
-static inline void vcpu_set_dcr(struct kvm_vcpu *vcpu, u64 val)
-{
- ia64_setreg(_IA64_REG_CR_DCR, val);
-}
-
-static inline void vcpu_set_isr(struct kvm_vcpu *vcpu, u64 val)
-{
- VCPU(vcpu, isr) = val;
-}
-
-static inline void vcpu_set_lid(struct kvm_vcpu *vcpu, u64 val)
-{
- VCPU(vcpu, lid) = val;
-}
-
-static inline void vcpu_set_ipsr(struct kvm_vcpu *vcpu, u64 val)
-{
- VCPU(vcpu, ipsr) = val;
-}
-
-static inline void vcpu_set_iip(struct kvm_vcpu *vcpu, u64 val)
-{
- VCPU(vcpu, iip) = val;
-}
-
-static inline void vcpu_set_ifs(struct kvm_vcpu *vcpu, u64 val)
-{
- VCPU(vcpu, ifs) = val;
-}
-
-static inline void vcpu_set_iipa(struct kvm_vcpu *vcpu, u64 val)
-{
- VCPU(vcpu, iipa) = val;
-}
-
-static inline void vcpu_set_iha(struct kvm_vcpu *vcpu, u64 val)
-{
- VCPU(vcpu, iha) = val;
-}
-
-
-static inline u64 vcpu_get_rr(struct kvm_vcpu *vcpu, u64 reg)
-{
- return vcpu->arch.vrr[reg>>61];
-}
-
-/**************************************************************************
- VCPU debug breakpoint register access routines
- **************************************************************************/
-
-static inline void vcpu_set_dbr(struct kvm_vcpu *vcpu, u64 reg, u64 val)
-{
- __ia64_set_dbr(reg, val);
-}
-
-static inline void vcpu_set_ibr(struct kvm_vcpu *vcpu, u64 reg, u64 val)
-{
- ia64_set_ibr(reg, val);
-}
-
-static inline u64 vcpu_get_dbr(struct kvm_vcpu *vcpu, u64 reg)
-{
- return ((u64)__ia64_get_dbr(reg));
-}
-
-static inline u64 vcpu_get_ibr(struct kvm_vcpu *vcpu, u64 reg)
-{
- return ((u64)ia64_get_ibr(reg));
-}
-
-/**************************************************************************
- VCPU performance monitor register access routines
- **************************************************************************/
-static inline void vcpu_set_pmc(struct kvm_vcpu *vcpu, u64 reg, u64 val)
-{
- /* NOTE: Writes to unimplemented PMC registers are discarded */
- ia64_set_pmc(reg, val);
-}
-
-static inline void vcpu_set_pmd(struct kvm_vcpu *vcpu, u64 reg, u64 val)
-{
- /* NOTE: Writes to unimplemented PMD registers are discarded */
- ia64_set_pmd(reg, val);
-}
-
-static inline u64 vcpu_get_pmc(struct kvm_vcpu *vcpu, u64 reg)
-{
- /* NOTE: Reads from unimplemented PMC registers return zero */
- return ((u64)ia64_get_pmc(reg));
-}
-
-static inline u64 vcpu_get_pmd(struct kvm_vcpu *vcpu, u64 reg)
-{
- /* NOTE: Reads from unimplemented PMD registers return zero */
- return ((u64)ia64_get_pmd(reg));
-}
-
-static inline unsigned long vrrtomrr(unsigned long val)
-{
- union ia64_rr rr;
- rr.val = val;
- rr.rid = (rr.rid << 4) | 0xe;
- if (rr.ps > PAGE_SHIFT)
- rr.ps = PAGE_SHIFT;
- rr.ve = 1;
- return rr.val;
-}
-
-
-static inline int highest_bits(int *dat)
-{
- u32 bits, bitnum;
- int i;
-
- /* loop for all 256 bits */
- for (i = 7; i >= 0 ; i--) {
- bits = dat[i];
- if (bits) {
- bitnum = fls(bits);
- return i * 32 + bitnum - 1;
- }
- }
- return NULL_VECTOR;
-}
-
-/*
- * The pending irq is higher than the inservice one.
- *
- */
-static inline int is_higher_irq(int pending, int inservice)
-{
- return ((pending > inservice)
- || ((pending != NULL_VECTOR)
- && (inservice == NULL_VECTOR)));
-}
-
-static inline int is_higher_class(int pending, int mic)
-{
- return ((pending >> 4) > mic);
-}
-
-/*
- * Return 0-255 for pending irq.
- * NULL_VECTOR: when no pending.
- */
-static inline int highest_pending_irq(struct kvm_vcpu *vcpu)
-{
- if (VCPU(vcpu, irr[0]) & (1UL<<NMI_VECTOR))
- return NMI_VECTOR;
- if (VCPU(vcpu, irr[0]) & (1UL<<ExtINT_VECTOR))
- return ExtINT_VECTOR;
-
- return highest_bits((int *)&VCPU(vcpu, irr[0]));
-}
-
-static inline int highest_inservice_irq(struct kvm_vcpu *vcpu)
-{
- if (VMX(vcpu, insvc[0]) & (1UL<<NMI_VECTOR))
- return NMI_VECTOR;
- if (VMX(vcpu, insvc[0]) & (1UL<<ExtINT_VECTOR))
- return ExtINT_VECTOR;
-
- return highest_bits((int *)&(VMX(vcpu, insvc[0])));
-}
-
-extern void vcpu_get_fpreg(struct kvm_vcpu *vcpu, unsigned long reg,
- struct ia64_fpreg *val);
-extern void vcpu_set_fpreg(struct kvm_vcpu *vcpu, unsigned long reg,
- struct ia64_fpreg *val);
-extern u64 vcpu_get_gr(struct kvm_vcpu *vcpu, unsigned long reg);
-extern void vcpu_set_gr(struct kvm_vcpu *vcpu, unsigned long reg,
- u64 val, int nat);
-extern unsigned long vcpu_get_psr(struct kvm_vcpu *vcpu);
-extern void vcpu_set_psr(struct kvm_vcpu *vcpu, unsigned long val);
-extern u64 vcpu_thash(struct kvm_vcpu *vcpu, u64 vadr);
-extern void vcpu_bsw0(struct kvm_vcpu *vcpu);
-extern void thash_vhpt_insert(struct kvm_vcpu *v, u64 pte,
- u64 itir, u64 va, int type);
-extern struct thash_data *vhpt_lookup(u64 va);
-extern u64 guest_vhpt_lookup(u64 iha, u64 *pte);
-extern void thash_purge_entries(struct kvm_vcpu *v, u64 va, u64 ps);
-extern void thash_purge_entries_remote(struct kvm_vcpu *v, u64 va, u64 ps);
-extern u64 translate_phy_pte(u64 *pte, u64 itir, u64 va);
-extern void thash_purge_and_insert(struct kvm_vcpu *v, u64 pte,
- u64 itir, u64 ifa, int type);
-extern void thash_purge_all(struct kvm_vcpu *v);
-extern struct thash_data *vtlb_lookup(struct kvm_vcpu *v,
- u64 va, int is_data);
-extern int vtr_find_overlap(struct kvm_vcpu *vcpu, u64 va,
- u64 ps, int is_data);
-
-extern void vcpu_increment_iip(struct kvm_vcpu *v);
-extern void vcpu_decrement_iip(struct kvm_vcpu *vcpu);
-extern void vcpu_pend_interrupt(struct kvm_vcpu *vcpu, u8 vec);
-extern void vcpu_unpend_interrupt(struct kvm_vcpu *vcpu, u8 vec);
-extern void data_page_not_present(struct kvm_vcpu *vcpu, u64 vadr);
-extern void dnat_page_consumption(struct kvm_vcpu *vcpu, u64 vadr);
-extern void alt_dtlb(struct kvm_vcpu *vcpu, u64 vadr);
-extern void nested_dtlb(struct kvm_vcpu *vcpu);
-extern void dvhpt_fault(struct kvm_vcpu *vcpu, u64 vadr);
-extern int vhpt_enabled(struct kvm_vcpu *vcpu, u64 vadr, enum vhpt_ref ref);
-
-extern void update_vhpi(struct kvm_vcpu *vcpu, int vec);
-extern int irq_masked(struct kvm_vcpu *vcpu, int h_pending, int h_inservice);
-
-extern int fetch_code(struct kvm_vcpu *vcpu, u64 gip, IA64_BUNDLE *pbundle);
-extern void emulate_io_inst(struct kvm_vcpu *vcpu, u64 padr, u64 ma);
-extern void vmm_transition(struct kvm_vcpu *vcpu);
-extern void vmm_trampoline(union context *from, union context *to);
-extern int vmm_entry(void);
-extern u64 vcpu_get_itc(struct kvm_vcpu *vcpu);
-
-extern void vmm_reset_entry(void);
-void kvm_init_vtlb(struct kvm_vcpu *v);
-void kvm_init_vhpt(struct kvm_vcpu *v);
-void thash_init(struct thash_cb *hcb, u64 sz);
-
-void panic_vm(struct kvm_vcpu *v, const char *fmt, ...);
-u64 kvm_gpa_to_mpa(u64 gpa);
-extern u64 ia64_call_vsa(u64 proc, u64 arg1, u64 arg2, u64 arg3,
- u64 arg4, u64 arg5, u64 arg6, u64 arg7);
-
-extern long vmm_sanity;
-
-#endif
-#endif /* __VCPU_H__ */
diff --git a/arch/ia64/kvm/vmm.c b/arch/ia64/kvm/vmm.c
deleted file mode 100644
index 176a12cd56d..00000000000
--- a/arch/ia64/kvm/vmm.c
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * vmm.c: vmm module interface with kvm module
- *
- * Copyright (c) 2007, Intel Corporation.
- *
- * Xiantao Zhang (xiantao.zhang@intel.com)
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- */
-
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <asm/fpswa.h>
-
-#include "vcpu.h"
-
-MODULE_AUTHOR("Intel");
-MODULE_LICENSE("GPL");
-
-extern char kvm_ia64_ivt;
-extern char kvm_asm_mov_from_ar;
-extern char kvm_asm_mov_from_ar_sn2;
-extern fpswa_interface_t *vmm_fpswa_interface;
-
-long vmm_sanity = 1;
-
-struct kvm_vmm_info vmm_info = {
- .module = THIS_MODULE,
- .vmm_entry = vmm_entry,
- .tramp_entry = vmm_trampoline,
- .vmm_ivt = (unsigned long)&kvm_ia64_ivt,
- .patch_mov_ar = (unsigned long)&kvm_asm_mov_from_ar,
- .patch_mov_ar_sn2 = (unsigned long)&kvm_asm_mov_from_ar_sn2,
-};
-
-static int __init kvm_vmm_init(void)
-{
-
- vmm_fpswa_interface = fpswa_interface;
-
- /*Register vmm data to kvm side*/
- return kvm_init(&vmm_info, 1024, 0, THIS_MODULE);
-}
-
-static void __exit kvm_vmm_exit(void)
-{
- kvm_exit();
- return ;
-}
-
-void vmm_spin_lock(vmm_spinlock_t *lock)
-{
- _vmm_raw_spin_lock(lock);
-}
-
-void vmm_spin_unlock(vmm_spinlock_t *lock)
-{
- _vmm_raw_spin_unlock(lock);
-}
-
-static void vcpu_debug_exit(struct kvm_vcpu *vcpu)
-{
- struct exit_ctl_data *p = &vcpu->arch.exit_data;
- long psr;
-
- local_irq_save(psr);
- p->exit_reason = EXIT_REASON_DEBUG;
- vmm_transition(vcpu);
- local_irq_restore(psr);
-}
-
-asmlinkage int printk(const char *fmt, ...)
-{
- struct kvm_vcpu *vcpu = current_vcpu;
- va_list args;
- int r;
-
- memset(vcpu->arch.log_buf, 0, VMM_LOG_LEN);
- va_start(args, fmt);
- r = vsnprintf(vcpu->arch.log_buf, VMM_LOG_LEN, fmt, args);
- va_end(args);
- vcpu_debug_exit(vcpu);
- return r;
-}
-
-module_init(kvm_vmm_init)
-module_exit(kvm_vmm_exit)
diff --git a/arch/ia64/kvm/vmm_ivt.S b/arch/ia64/kvm/vmm_ivt.S
deleted file mode 100644
index 397e34a63e1..00000000000
--- a/arch/ia64/kvm/vmm_ivt.S
+++ /dev/null
@@ -1,1392 +0,0 @@
-/*
- * arch/ia64/kvm/vmm_ivt.S
- *
- * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co
- * Stephane Eranian <eranian@hpl.hp.com>
- * David Mosberger <davidm@hpl.hp.com>
- * Copyright (C) 2000, 2002-2003 Intel Co
- * Asit Mallick <asit.k.mallick@intel.com>
- * Suresh Siddha <suresh.b.siddha@intel.com>
- * Kenneth Chen <kenneth.w.chen@intel.com>
- * Fenghua Yu <fenghua.yu@intel.com>
- *
- *
- * 00/08/23 Asit Mallick <asit.k.mallick@intel.com> TLB handling
- * for SMP
- * 00/12/20 David Mosberger-Tang <davidm@hpl.hp.com> DTLB/ITLB
- * handler now uses virtual PT.
- *
- * 07/6/20 Xuefei Xu (Anthony Xu) (anthony.xu@intel.com)
- * Supporting Intel virtualization architecture
- *
- */
-
-/*
- * This file defines the interruption vector table used by the CPU.
- * It does not include one entry per possible cause of interruption.
- *
- * The first 20 entries of the table contain 64 bundles each while the
- * remaining 48 entries contain only 16 bundles each.
- *
- * The 64 bundles are used to allow inlining the whole handler for
- * critical
- * interruptions like TLB misses.
- *
- * For each entry, the comment is as follows:
- *
- * // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss
- * (12,51)
- * entry offset ----/ / / /
- * /
- * entry number ---------/ / /
- * /
- * size of the entry -------------/ /
- * /
- * vector name -------------------------------------/
- * /
- * interruptions triggering this vector
- * ----------------------/
- *
- * The table is 32KB in size and must be aligned on 32KB
- * boundary.
- * (The CPU ignores the 15 lower bits of the address)
- *
- * Table is based upon EAS2.6 (Oct 1999)
- */
-
-
-#include <asm/asmmacro.h>
-#include <asm/cache.h>
-#include <asm/pgtable.h>
-
-#include "asm-offsets.h"
-#include "vcpu.h"
-#include "kvm_minstate.h"
-#include "vti.h"
-
-#if 0
-# define PSR_DEFAULT_BITS psr.ac
-#else
-# define PSR_DEFAULT_BITS 0
-#endif
-
-#define KVM_FAULT(n) \
- kvm_fault_##n:; \
- mov r19=n;; \
- br.sptk.many kvm_vmm_panic; \
- ;; \
-
-#define KVM_REFLECT(n) \
- mov r31=pr; \
- mov r19=n; /* prepare to save predicates */ \
- mov r29=cr.ipsr; \
- ;; \
- tbit.z p6,p7=r29,IA64_PSR_VM_BIT; \
-(p7) br.sptk.many kvm_dispatch_reflection; \
- br.sptk.many kvm_vmm_panic; \
-
-GLOBAL_ENTRY(kvm_vmm_panic)
- KVM_SAVE_MIN_WITH_COVER_R19
- alloc r14=ar.pfs,0,0,1,0
- mov out0=r15
- adds r3=8,r2 // set up second base pointer
- ;;
- ssm psr.ic
- ;;
- srlz.i // guarantee that interruption collection is on
- ;;
- (p15) ssm psr.i // restore psr.
- addl r14=@gprel(ia64_leave_hypervisor),gp
- ;;
- KVM_SAVE_REST
- mov rp=r14
- ;;
- br.call.sptk.many b6=vmm_panic_handler;
-END(kvm_vmm_panic)
-
- .section .text..ivt,"ax"
-
- .align 32768 // align on 32KB boundary
- .global kvm_ia64_ivt
-kvm_ia64_ivt:
-///////////////////////////////////////////////////////////////
-// 0x0000 Entry 0 (size 64 bundles) VHPT Translation (8,20,47)
-ENTRY(kvm_vhpt_miss)
- KVM_FAULT(0)
-END(kvm_vhpt_miss)
-
- .org kvm_ia64_ivt+0x400
-////////////////////////////////////////////////////////////////
-// 0x0400 Entry 1 (size 64 bundles) ITLB (21)
-ENTRY(kvm_itlb_miss)
- mov r31 = pr
- mov r29=cr.ipsr;
- ;;
- tbit.z p6,p7=r29,IA64_PSR_VM_BIT;
-(p6) br.sptk kvm_alt_itlb_miss
- mov r19 = 1
- br.sptk kvm_itlb_miss_dispatch
- KVM_FAULT(1);
-END(kvm_itlb_miss)
-
- .org kvm_ia64_ivt+0x0800
-//////////////////////////////////////////////////////////////////
-// 0x0800 Entry 2 (size 64 bundles) DTLB (9,48)
-ENTRY(kvm_dtlb_miss)
- mov r31 = pr
- mov r29=cr.ipsr;
- ;;
- tbit.z p6,p7=r29,IA64_PSR_VM_BIT;
-(p6) br.sptk kvm_alt_dtlb_miss
- br.sptk kvm_dtlb_miss_dispatch
-END(kvm_dtlb_miss)
-
- .org kvm_ia64_ivt+0x0c00
-////////////////////////////////////////////////////////////////////
-// 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19)
-ENTRY(kvm_alt_itlb_miss)
- mov r16=cr.ifa // get address that caused the TLB miss
- ;;
- movl r17=PAGE_KERNEL
- mov r24=cr.ipsr
- movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
- ;;
- and r19=r19,r16 // clear ed, reserved bits, and PTE control bits
- ;;
- or r19=r17,r19 // insert PTE control bits into r19
- ;;
- movl r20=IA64_GRANULE_SHIFT<<2
- ;;
- mov cr.itir=r20
- ;;
- itc.i r19 // insert the TLB entry
- mov pr=r31,-1
- rfi
-END(kvm_alt_itlb_miss)
-
- .org kvm_ia64_ivt+0x1000
-/////////////////////////////////////////////////////////////////////
-// 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46)
-ENTRY(kvm_alt_dtlb_miss)
- mov r16=cr.ifa // get address that caused the TLB miss
- ;;
- movl r17=PAGE_KERNEL
- movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
- mov r24=cr.ipsr
- ;;
- and r19=r19,r16 // clear ed, reserved bits, and PTE control bits
- ;;
- or r19=r19,r17 // insert PTE control bits into r19
- ;;
- movl r20=IA64_GRANULE_SHIFT<<2
- ;;
- mov cr.itir=r20
- ;;
- itc.d r19 // insert the TLB entry
- mov pr=r31,-1
- rfi
-END(kvm_alt_dtlb_miss)
-
- .org kvm_ia64_ivt+0x1400
-//////////////////////////////////////////////////////////////////////
-// 0x1400 Entry 5 (size 64 bundles) Data nested TLB (6,45)
-ENTRY(kvm_nested_dtlb_miss)
- KVM_FAULT(5)
-END(kvm_nested_dtlb_miss)
-
- .org kvm_ia64_ivt+0x1800
-/////////////////////////////////////////////////////////////////////
-// 0x1800 Entry 6 (size 64 bundles) Instruction Key Miss (24)
-ENTRY(kvm_ikey_miss)
- KVM_REFLECT(6)
-END(kvm_ikey_miss)
-
- .org kvm_ia64_ivt+0x1c00
-/////////////////////////////////////////////////////////////////////
-// 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
-ENTRY(kvm_dkey_miss)
- KVM_REFLECT(7)
-END(kvm_dkey_miss)
-
- .org kvm_ia64_ivt+0x2000
-////////////////////////////////////////////////////////////////////
-// 0x2000 Entry 8 (size 64 bundles) Dirty-bit (54)
-ENTRY(kvm_dirty_bit)
- KVM_REFLECT(8)
-END(kvm_dirty_bit)
-
- .org kvm_ia64_ivt+0x2400
-////////////////////////////////////////////////////////////////////
-// 0x2400 Entry 9 (size 64 bundles) Instruction Access-bit (27)
-ENTRY(kvm_iaccess_bit)
- KVM_REFLECT(9)
-END(kvm_iaccess_bit)
-
- .org kvm_ia64_ivt+0x2800
-///////////////////////////////////////////////////////////////////
-// 0x2800 Entry 10 (size 64 bundles) Data Access-bit (15,55)
-ENTRY(kvm_daccess_bit)
- KVM_REFLECT(10)
-END(kvm_daccess_bit)
-
- .org kvm_ia64_ivt+0x2c00
-/////////////////////////////////////////////////////////////////
-// 0x2c00 Entry 11 (size 64 bundles) Break instruction (33)
-ENTRY(kvm_break_fault)
- mov r31=pr
- mov r19=11
- mov r29=cr.ipsr
- ;;
- KVM_SAVE_MIN_WITH_COVER_R19
- ;;
- alloc r14=ar.pfs,0,0,4,0 //(must be first in insn group!)
- mov out0=cr.ifa
- mov out2=cr.isr // FIXME: pity to make this slow access twice
- mov out3=cr.iim // FIXME: pity to make this slow access twice
- adds r3=8,r2 // set up second base pointer
- ;;
- ssm psr.ic
- ;;
- srlz.i // guarantee that interruption collection is on
- ;;
- (p15)ssm psr.i // restore psr.i
- addl r14=@gprel(ia64_leave_hypervisor),gp
- ;;
- KVM_SAVE_REST
- mov rp=r14
- ;;
- adds out1=16,sp
- br.call.sptk.many b6=kvm_ia64_handle_break
- ;;
-END(kvm_break_fault)
-
- .org kvm_ia64_ivt+0x3000
-/////////////////////////////////////////////////////////////////
-// 0x3000 Entry 12 (size 64 bundles) External Interrupt (4)
-ENTRY(kvm_interrupt)
- mov r31=pr // prepare to save predicates
- mov r19=12
- mov r29=cr.ipsr
- ;;
- tbit.z p6,p7=r29,IA64_PSR_VM_BIT
- tbit.z p0,p15=r29,IA64_PSR_I_BIT
- ;;
-(p7) br.sptk kvm_dispatch_interrupt
- ;;
- mov r27=ar.rsc /* M */
- mov r20=r1 /* A */
- mov r25=ar.unat /* M */
- mov r26=ar.pfs /* I */
- mov r28=cr.iip /* M */
- cover /* B (or nothing) */
- ;;
- mov r1=sp
- ;;
- invala /* M */
- mov r30=cr.ifs
- ;;
- addl r1=-VMM_PT_REGS_SIZE,r1
- ;;
- adds r17=2*L1_CACHE_BYTES,r1 /* really: biggest cache-line size */
- adds r16=PT(CR_IPSR),r1
- ;;
- lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES
- st8 [r16]=r29 /* save cr.ipsr */
- ;;
- lfetch.fault.excl.nt1 [r17]
- mov r29=b0
- ;;
- adds r16=PT(R8),r1 /* initialize first base pointer */
- adds r17=PT(R9),r1 /* initialize second base pointer */
- mov r18=r0 /* make sure r18 isn't NaT */
- ;;
-.mem.offset 0,0; st8.spill [r16]=r8,16
-.mem.offset 8,0; st8.spill [r17]=r9,16
- ;;
-.mem.offset 0,0; st8.spill [r16]=r10,24
-.mem.offset 8,0; st8.spill [r17]=r11,24
- ;;
- st8 [r16]=r28,16 /* save cr.iip */
- st8 [r17]=r30,16 /* save cr.ifs */
- mov r8=ar.fpsr /* M */
- mov r9=ar.csd
- mov r10=ar.ssd
- movl r11=FPSR_DEFAULT /* L-unit */
- ;;
- st8 [r16]=r25,16 /* save ar.unat */
- st8 [r17]=r26,16 /* save ar.pfs */
- shl r18=r18,16 /* compute ar.rsc to be used for "loadrs" */
- ;;
- st8 [r16]=r27,16 /* save ar.rsc */
- adds r17=16,r17 /* skip over ar_rnat field */
- ;;
- st8 [r17]=r31,16 /* save predicates */
- adds r16=16,r16 /* skip over ar_bspstore field */
- ;;
- st8 [r16]=r29,16 /* save b0 */
- st8 [r17]=r18,16 /* save ar.rsc value for "loadrs" */
- ;;
-.mem.offset 0,0; st8.spill [r16]=r20,16 /* save original r1 */
-.mem.offset 8,0; st8.spill [r17]=r12,16
- adds r12=-16,r1
- /* switch to kernel memory stack (with 16 bytes of scratch) */
- ;;
-.mem.offset 0,0; st8.spill [r16]=r13,16
-.mem.offset 8,0; st8.spill [r17]=r8,16 /* save ar.fpsr */
- ;;
-.mem.offset 0,0; st8.spill [r16]=r15,16
-.mem.offset 8,0; st8.spill [r17]=r14,16
- dep r14=-1,r0,60,4
- ;;
-.mem.offset 0,0; st8.spill [r16]=r2,16
-.mem.offset 8,0; st8.spill [r17]=r3,16
- adds r2=VMM_PT_REGS_R16_OFFSET,r1
- adds r14 = VMM_VCPU_GP_OFFSET,r13
- ;;
- mov r8=ar.ccv
- ld8 r14 = [r14]
- ;;
- mov r1=r14 /* establish kernel global pointer */
- ;; \
- bsw.1
- ;;
- alloc r14=ar.pfs,0,0,1,0 // must be first in an insn group
- mov out0=r13
- ;;
- ssm psr.ic
- ;;
- srlz.i
- ;;
- //(p15) ssm psr.i
- adds r3=8,r2 // set up second base pointer for SAVE_REST
- srlz.i // ensure everybody knows psr.ic is back on
- ;;
-.mem.offset 0,0; st8.spill [r2]=r16,16
-.mem.offset 8,0; st8.spill [r3]=r17,16
- ;;
-.mem.offset 0,0; st8.spill [r2]=r18,16
-.mem.offset 8,0; st8.spill [r3]=r19,16
- ;;
-.mem.offset 0,0; st8.spill [r2]=r20,16
-.mem.offset 8,0; st8.spill [r3]=r21,16
- mov r18=b6
- ;;
-.mem.offset 0,0; st8.spill [r2]=r22,16
-.mem.offset 8,0; st8.spill [r3]=r23,16
- mov r19=b7
- ;;
-.mem.offset 0,0; st8.spill [r2]=r24,16
-.mem.offset 8,0; st8.spill [r3]=r25,16
- ;;
-.mem.offset 0,0; st8.spill [r2]=r26,16
-.mem.offset 8,0; st8.spill [r3]=r27,16
- ;;
-.mem.offset 0,0; st8.spill [r2]=r28,16
-.mem.offset 8,0; st8.spill [r3]=r29,16
- ;;
-.mem.offset 0,0; st8.spill [r2]=r30,16
-.mem.offset 8,0; st8.spill [r3]=r31,32
- ;;
- mov ar.fpsr=r11 /* M-unit */
- st8 [r2]=r8,8 /* ar.ccv */
- adds r24=PT(B6)-PT(F7),r3
- ;;
- stf.spill [r2]=f6,32
- stf.spill [r3]=f7,32
- ;;
- stf.spill [r2]=f8,32
- stf.spill [r3]=f9,32
- ;;
- stf.spill [r2]=f10
- stf.spill [r3]=f11
- adds r25=PT(B7)-PT(F11),r3
- ;;
- st8 [r24]=r18,16 /* b6 */
- st8 [r25]=r19,16 /* b7 */
- ;;
- st8 [r24]=r9 /* ar.csd */
- st8 [r25]=r10 /* ar.ssd */
- ;;
- srlz.d // make sure we see the effect of cr.ivr
- addl r14=@gprel(ia64_leave_nested),gp
- ;;
- mov rp=r14
- br.call.sptk.many b6=kvm_ia64_handle_irq
- ;;
-END(kvm_interrupt)
-
- .global kvm_dispatch_vexirq
- .org kvm_ia64_ivt+0x3400
-//////////////////////////////////////////////////////////////////////
-// 0x3400 Entry 13 (size 64 bundles) Reserved
-ENTRY(kvm_virtual_exirq)
- mov r31=pr
- mov r19=13
- mov r30 =r0
- ;;
-kvm_dispatch_vexirq:
- cmp.eq p6,p0 = 1,r30
- ;;
-(p6) add r29 = VMM_VCPU_SAVED_GP_OFFSET,r21
- ;;
-(p6) ld8 r1 = [r29]
- ;;
- KVM_SAVE_MIN_WITH_COVER_R19
- alloc r14=ar.pfs,0,0,1,0
- mov out0=r13
-
- ssm psr.ic
- ;;
- srlz.i // guarantee that interruption collection is on
- ;;
- (p15) ssm psr.i // restore psr.i
- adds r3=8,r2 // set up second base pointer
- ;;
- KVM_SAVE_REST
- addl r14=@gprel(ia64_leave_hypervisor),gp
- ;;
- mov rp=r14
- br.call.sptk.many b6=kvm_vexirq
-END(kvm_virtual_exirq)
-
- .org kvm_ia64_ivt+0x3800
-/////////////////////////////////////////////////////////////////////
-// 0x3800 Entry 14 (size 64 bundles) Reserved
- KVM_FAULT(14)
- // this code segment is from 2.6.16.13
-
- .org kvm_ia64_ivt+0x3c00
-///////////////////////////////////////////////////////////////////////
-// 0x3c00 Entry 15 (size 64 bundles) Reserved
- KVM_FAULT(15)
-
- .org kvm_ia64_ivt+0x4000
-///////////////////////////////////////////////////////////////////////
-// 0x4000 Entry 16 (size 64 bundles) Reserved
- KVM_FAULT(16)
-
- .org kvm_ia64_ivt+0x4400
-//////////////////////////////////////////////////////////////////////
-// 0x4400 Entry 17 (size 64 bundles) Reserved
- KVM_FAULT(17)
-
- .org kvm_ia64_ivt+0x4800
-//////////////////////////////////////////////////////////////////////
-// 0x4800 Entry 18 (size 64 bundles) Reserved
- KVM_FAULT(18)
-
- .org kvm_ia64_ivt+0x4c00
-//////////////////////////////////////////////////////////////////////
-// 0x4c00 Entry 19 (size 64 bundles) Reserved
- KVM_FAULT(19)
-
- .org kvm_ia64_ivt+0x5000
-//////////////////////////////////////////////////////////////////////
-// 0x5000 Entry 20 (size 16 bundles) Page Not Present
-ENTRY(kvm_page_not_present)
- KVM_REFLECT(20)
-END(kvm_page_not_present)
-
- .org kvm_ia64_ivt+0x5100
-///////////////////////////////////////////////////////////////////////
-// 0x5100 Entry 21 (size 16 bundles) Key Permission vector
-ENTRY(kvm_key_permission)
- KVM_REFLECT(21)
-END(kvm_key_permission)
-
- .org kvm_ia64_ivt+0x5200
-//////////////////////////////////////////////////////////////////////
-// 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26)
-ENTRY(kvm_iaccess_rights)
- KVM_REFLECT(22)
-END(kvm_iaccess_rights)
-
- .org kvm_ia64_ivt+0x5300
-//////////////////////////////////////////////////////////////////////
-// 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53)
-ENTRY(kvm_daccess_rights)
- KVM_REFLECT(23)
-END(kvm_daccess_rights)
-
- .org kvm_ia64_ivt+0x5400
-/////////////////////////////////////////////////////////////////////
-// 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39)
-ENTRY(kvm_general_exception)
- KVM_REFLECT(24)
- KVM_FAULT(24)
-END(kvm_general_exception)
-
- .org kvm_ia64_ivt+0x5500
-//////////////////////////////////////////////////////////////////////
-// 0x5500 Entry 25 (size 16 bundles) Disabled FP-Register (35)
-ENTRY(kvm_disabled_fp_reg)
- KVM_REFLECT(25)
-END(kvm_disabled_fp_reg)
-
- .org kvm_ia64_ivt+0x5600
-////////////////////////////////////////////////////////////////////
-// 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50)
-ENTRY(kvm_nat_consumption)
- KVM_REFLECT(26)
-END(kvm_nat_consumption)
-
- .org kvm_ia64_ivt+0x5700
-/////////////////////////////////////////////////////////////////////
-// 0x5700 Entry 27 (size 16 bundles) Speculation (40)
-ENTRY(kvm_speculation_vector)
- KVM_REFLECT(27)
-END(kvm_speculation_vector)
-
- .org kvm_ia64_ivt+0x5800
-/////////////////////////////////////////////////////////////////////
-// 0x5800 Entry 28 (size 16 bundles) Reserved
- KVM_FAULT(28)
-
- .org kvm_ia64_ivt+0x5900
-///////////////////////////////////////////////////////////////////
-// 0x5900 Entry 29 (size 16 bundles) Debug (16,28,56)
-ENTRY(kvm_debug_vector)
- KVM_FAULT(29)
-END(kvm_debug_vector)
-
- .org kvm_ia64_ivt+0x5a00
-///////////////////////////////////////////////////////////////
-// 0x5a00 Entry 30 (size 16 bundles) Unaligned Reference (57)
-ENTRY(kvm_unaligned_access)
- KVM_REFLECT(30)
-END(kvm_unaligned_access)
-
- .org kvm_ia64_ivt+0x5b00
-//////////////////////////////////////////////////////////////////////
-// 0x5b00 Entry 31 (size 16 bundles) Unsupported Data Reference (57)
-ENTRY(kvm_unsupported_data_reference)
- KVM_REFLECT(31)
-END(kvm_unsupported_data_reference)
-
- .org kvm_ia64_ivt+0x5c00
-////////////////////////////////////////////////////////////////////
-// 0x5c00 Entry 32 (size 16 bundles) Floating Point FAULT (65)
-ENTRY(kvm_floating_point_fault)
- KVM_REFLECT(32)
-END(kvm_floating_point_fault)
-
- .org kvm_ia64_ivt+0x5d00
-/////////////////////////////////////////////////////////////////////
-// 0x5d00 Entry 33 (size 16 bundles) Floating Point Trap (66)
-ENTRY(kvm_floating_point_trap)
- KVM_REFLECT(33)
-END(kvm_floating_point_trap)
-
- .org kvm_ia64_ivt+0x5e00
-//////////////////////////////////////////////////////////////////////
-// 0x5e00 Entry 34 (size 16 bundles) Lower Privilege Transfer Trap (66)
-ENTRY(kvm_lower_privilege_trap)
- KVM_REFLECT(34)
-END(kvm_lower_privilege_trap)
-
- .org kvm_ia64_ivt+0x5f00
-//////////////////////////////////////////////////////////////////////
-// 0x5f00 Entry 35 (size 16 bundles) Taken Branch Trap (68)
-ENTRY(kvm_taken_branch_trap)
- KVM_REFLECT(35)
-END(kvm_taken_branch_trap)
-
- .org kvm_ia64_ivt+0x6000
-////////////////////////////////////////////////////////////////////
-// 0x6000 Entry 36 (size 16 bundles) Single Step Trap (69)
-ENTRY(kvm_single_step_trap)
- KVM_REFLECT(36)
-END(kvm_single_step_trap)
- .global kvm_virtualization_fault_back
- .org kvm_ia64_ivt+0x6100
-/////////////////////////////////////////////////////////////////////
-// 0x6100 Entry 37 (size 16 bundles) Virtualization Fault
-ENTRY(kvm_virtualization_fault)
- mov r31=pr
- adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21
- ;;
- st8 [r16] = r1
- adds r17 = VMM_VCPU_GP_OFFSET, r21
- ;;
- ld8 r1 = [r17]
- cmp.eq p6,p0=EVENT_MOV_FROM_AR,r24
- cmp.eq p7,p0=EVENT_MOV_FROM_RR,r24
- cmp.eq p8,p0=EVENT_MOV_TO_RR,r24
- cmp.eq p9,p0=EVENT_RSM,r24
- cmp.eq p10,p0=EVENT_SSM,r24
- cmp.eq p11,p0=EVENT_MOV_TO_PSR,r24
- cmp.eq p12,p0=EVENT_THASH,r24
-(p6) br.dptk.many kvm_asm_mov_from_ar
-(p7) br.dptk.many kvm_asm_mov_from_rr
-(p8) br.dptk.many kvm_asm_mov_to_rr
-(p9) br.dptk.many kvm_asm_rsm
-(p10) br.dptk.many kvm_asm_ssm
-(p11) br.dptk.many kvm_asm_mov_to_psr
-(p12) br.dptk.many kvm_asm_thash
- ;;
-kvm_virtualization_fault_back:
- adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21
- ;;
- ld8 r1 = [r16]
- ;;
- mov r19=37
- adds r16 = VMM_VCPU_CAUSE_OFFSET,r21
- adds r17 = VMM_VCPU_OPCODE_OFFSET,r21
- ;;
- st8 [r16] = r24
- st8 [r17] = r25
- ;;
- cmp.ne p6,p0=EVENT_RFI, r24
-(p6) br.sptk kvm_dispatch_virtualization_fault
- ;;
- adds r18=VMM_VPD_BASE_OFFSET,r21
- ;;
- ld8 r18=[r18]
- ;;
- adds r18=VMM_VPD_VIFS_OFFSET,r18
- ;;
- ld8 r18=[r18]
- ;;
- tbit.z p6,p0=r18,63
-(p6) br.sptk kvm_dispatch_virtualization_fault
- ;;
-//if vifs.v=1 desert current register frame
- alloc r18=ar.pfs,0,0,0,0
- br.sptk kvm_dispatch_virtualization_fault
-END(kvm_virtualization_fault)
-
- .org kvm_ia64_ivt+0x6200
-//////////////////////////////////////////////////////////////
-// 0x6200 Entry 38 (size 16 bundles) Reserved
- KVM_FAULT(38)
-
- .org kvm_ia64_ivt+0x6300
-/////////////////////////////////////////////////////////////////
-// 0x6300 Entry 39 (size 16 bundles) Reserved
- KVM_FAULT(39)
-
- .org kvm_ia64_ivt+0x6400
-/////////////////////////////////////////////////////////////////
-// 0x6400 Entry 40 (size 16 bundles) Reserved
- KVM_FAULT(40)
-
- .org kvm_ia64_ivt+0x6500
-//////////////////////////////////////////////////////////////////
-// 0x6500 Entry 41 (size 16 bundles) Reserved
- KVM_FAULT(41)
-
- .org kvm_ia64_ivt+0x6600
-//////////////////////////////////////////////////////////////////
-// 0x6600 Entry 42 (size 16 bundles) Reserved
- KVM_FAULT(42)
-
- .org kvm_ia64_ivt+0x6700
-//////////////////////////////////////////////////////////////////
-// 0x6700 Entry 43 (size 16 bundles) Reserved
- KVM_FAULT(43)
-
- .org kvm_ia64_ivt+0x6800
-//////////////////////////////////////////////////////////////////
-// 0x6800 Entry 44 (size 16 bundles) Reserved
- KVM_FAULT(44)
-
- .org kvm_ia64_ivt+0x6900
-///////////////////////////////////////////////////////////////////
-// 0x6900 Entry 45 (size 16 bundles) IA-32 Exeception
-//(17,18,29,41,42,43,44,58,60,61,62,72,73,75,76,77)
-ENTRY(kvm_ia32_exception)
- KVM_FAULT(45)
-END(kvm_ia32_exception)
-
- .org kvm_ia64_ivt+0x6a00
-////////////////////////////////////////////////////////////////////
-// 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept (30,31,59,70,71)
-ENTRY(kvm_ia32_intercept)
- KVM_FAULT(47)
-END(kvm_ia32_intercept)
-
- .org kvm_ia64_ivt+0x6c00
-/////////////////////////////////////////////////////////////////////
-// 0x6c00 Entry 48 (size 16 bundles) Reserved
- KVM_FAULT(48)
-
- .org kvm_ia64_ivt+0x6d00
-//////////////////////////////////////////////////////////////////////
-// 0x6d00 Entry 49 (size 16 bundles) Reserved
- KVM_FAULT(49)
-
- .org kvm_ia64_ivt+0x6e00
-//////////////////////////////////////////////////////////////////////
-// 0x6e00 Entry 50 (size 16 bundles) Reserved
- KVM_FAULT(50)
-
- .org kvm_ia64_ivt+0x6f00
-/////////////////////////////////////////////////////////////////////
-// 0x6f00 Entry 51 (size 16 bundles) Reserved
- KVM_FAULT(52)
-
- .org kvm_ia64_ivt+0x7100
-////////////////////////////////////////////////////////////////////
-// 0x7100 Entry 53 (size 16 bundles) Reserved
- KVM_FAULT(53)
-
- .org kvm_ia64_ivt+0x7200
-/////////////////////////////////////////////////////////////////////
-// 0x7200 Entry 54 (size 16 bundles) Reserved
- KVM_FAULT(54)
-
- .org kvm_ia64_ivt+0x7300
-////////////////////////////////////////////////////////////////////
-// 0x7300 Entry 55 (size 16 bundles) Reserved
- KVM_FAULT(55)
-
- .org kvm_ia64_ivt+0x7400
-////////////////////////////////////////////////////////////////////
-// 0x7400 Entry 56 (size 16 bundles) Reserved
- KVM_FAULT(56)
-
- .org kvm_ia64_ivt+0x7500
-/////////////////////////////////////////////////////////////////////
-// 0x7500 Entry 57 (size 16 bundles) Reserved
- KVM_FAULT(57)
-
- .org kvm_ia64_ivt+0x7600
-/////////////////////////////////////////////////////////////////////
-// 0x7600 Entry 58 (size 16 bundles) Reserved
- KVM_FAULT(58)
-
- .org kvm_ia64_ivt+0x7700
-////////////////////////////////////////////////////////////////////
-// 0x7700 Entry 59 (size 16 bundles) Reserved
- KVM_FAULT(59)
-
- .org kvm_ia64_ivt+0x7800
-////////////////////////////////////////////////////////////////////
-// 0x7800 Entry 60 (size 16 bundles) Reserved
- KVM_FAULT(60)
-
- .org kvm_ia64_ivt+0x7900
-/////////////////////////////////////////////////////////////////////
-// 0x7900 Entry 61 (size 16 bundles) Reserved
- KVM_FAULT(61)
-
- .org kvm_ia64_ivt+0x7a00
-/////////////////////////////////////////////////////////////////////
-// 0x7a00 Entry 62 (size 16 bundles) Reserved
- KVM_FAULT(62)
-
- .org kvm_ia64_ivt+0x7b00
-/////////////////////////////////////////////////////////////////////
-// 0x7b00 Entry 63 (size 16 bundles) Reserved
- KVM_FAULT(63)
-
- .org kvm_ia64_ivt+0x7c00
-////////////////////////////////////////////////////////////////////
-// 0x7c00 Entry 64 (size 16 bundles) Reserved
- KVM_FAULT(64)
-
- .org kvm_ia64_ivt+0x7d00
-/////////////////////////////////////////////////////////////////////
-// 0x7d00 Entry 65 (size 16 bundles) Reserved
- KVM_FAULT(65)
-
- .org kvm_ia64_ivt+0x7e00
-/////////////////////////////////////////////////////////////////////
-// 0x7e00 Entry 66 (size 16 bundles) Reserved
- KVM_FAULT(66)
-
- .org kvm_ia64_ivt+0x7f00
-////////////////////////////////////////////////////////////////////
-// 0x7f00 Entry 67 (size 16 bundles) Reserved
- KVM_FAULT(67)
-
- .org kvm_ia64_ivt+0x8000
-// There is no particular reason for this code to be here, other than that
-// there happens to be space here that would go unused otherwise. If this
-// fault ever gets "unreserved", simply moved the following code to a more
-// suitable spot...
-
-
-ENTRY(kvm_dtlb_miss_dispatch)
- mov r19 = 2
- KVM_SAVE_MIN_WITH_COVER_R19
- alloc r14=ar.pfs,0,0,3,0
- mov out0=cr.ifa
- mov out1=r15
- adds r3=8,r2 // set up second base pointer
- ;;
- ssm psr.ic
- ;;
- srlz.i // guarantee that interruption collection is on
- ;;
- (p15) ssm psr.i // restore psr.i
- addl r14=@gprel(ia64_leave_hypervisor_prepare),gp
- ;;
- KVM_SAVE_REST
- KVM_SAVE_EXTRA
- mov rp=r14
- ;;
- adds out2=16,r12
- br.call.sptk.many b6=kvm_page_fault
-END(kvm_dtlb_miss_dispatch)
-
-ENTRY(kvm_itlb_miss_dispatch)
-
- KVM_SAVE_MIN_WITH_COVER_R19
- alloc r14=ar.pfs,0,0,3,0
- mov out0=cr.ifa
- mov out1=r15
- adds r3=8,r2 // set up second base pointer
- ;;
- ssm psr.ic
- ;;
- srlz.i // guarantee that interruption collection is on
- ;;
- (p15) ssm psr.i // restore psr.i
- addl r14=@gprel(ia64_leave_hypervisor),gp
- ;;
- KVM_SAVE_REST
- mov rp=r14
- ;;
- adds out2=16,r12
- br.call.sptk.many b6=kvm_page_fault
-END(kvm_itlb_miss_dispatch)
-
-ENTRY(kvm_dispatch_reflection)
-/*
- * Input:
- * psr.ic: off
- * r19: intr type (offset into ivt, see ia64_int.h)
- * r31: contains saved predicates (pr)
- */
- KVM_SAVE_MIN_WITH_COVER_R19
- alloc r14=ar.pfs,0,0,5,0
- mov out0=cr.ifa
- mov out1=cr.isr
- mov out2=cr.iim
- mov out3=r15
- adds r3=8,r2 // set up second base pointer
- ;;
- ssm psr.ic
- ;;
- srlz.i // guarantee that interruption collection is on
- ;;
- (p15) ssm psr.i // restore psr.i
- addl r14=@gprel(ia64_leave_hypervisor),gp
- ;;
- KVM_SAVE_REST
- mov rp=r14
- ;;
- adds out4=16,r12
- br.call.sptk.many b6=reflect_interruption
-END(kvm_dispatch_reflection)
-
-ENTRY(kvm_dispatch_virtualization_fault)
- adds r16 = VMM_VCPU_CAUSE_OFFSET,r21
- adds r17 = VMM_VCPU_OPCODE_OFFSET,r21
- ;;
- st8 [r16] = r24
- st8 [r17] = r25
- ;;
- KVM_SAVE_MIN_WITH_COVER_R19
- ;;
- alloc r14=ar.pfs,0,0,2,0 // (must be first in insn group!)
- mov out0=r13 //vcpu
- adds r3=8,r2 // set up second base pointer
- ;;
- ssm psr.ic
- ;;
- srlz.i // guarantee that interruption collection is on
- ;;
- (p15) ssm psr.i // restore psr.i
- addl r14=@gprel(ia64_leave_hypervisor_prepare),gp
- ;;
- KVM_SAVE_REST
- KVM_SAVE_EXTRA
- mov rp=r14
- ;;
- adds out1=16,sp //regs
- br.call.sptk.many b6=kvm_emulate
-END(kvm_dispatch_virtualization_fault)
-
-
-ENTRY(kvm_dispatch_interrupt)
- KVM_SAVE_MIN_WITH_COVER_R19 // uses r31; defines r2 and r3
- ;;
- alloc r14=ar.pfs,0,0,1,0 // must be first in an insn group
- adds r3=8,r2 // set up second base pointer for SAVE_REST
- ;;
- ssm psr.ic
- ;;
- srlz.i
- ;;
- (p15) ssm psr.i
- addl r14=@gprel(ia64_leave_hypervisor),gp
- ;;
- KVM_SAVE_REST
- mov rp=r14
- ;;
- mov out0=r13 // pass pointer to pt_regs as second arg
- br.call.sptk.many b6=kvm_ia64_handle_irq
-END(kvm_dispatch_interrupt)
-
-GLOBAL_ENTRY(ia64_leave_nested)
- rsm psr.i
- ;;
- adds r21=PT(PR)+16,r12
- ;;
- lfetch [r21],PT(CR_IPSR)-PT(PR)
- adds r2=PT(B6)+16,r12
- adds r3=PT(R16)+16,r12
- ;;
- lfetch [r21]
- ld8 r28=[r2],8 // load b6
- adds r29=PT(R24)+16,r12
-
- ld8.fill r16=[r3]
- adds r3=PT(AR_CSD)-PT(R16),r3
- adds r30=PT(AR_CCV)+16,r12
- ;;
- ld8.fill r24=[r29]
- ld8 r15=[r30] // load ar.ccv
- ;;
- ld8 r29=[r2],16 // load b7
- ld8 r30=[r3],16 // load ar.csd
- ;;
- ld8 r31=[r2],16 // load ar.ssd
- ld8.fill r8=[r3],16
- ;;
- ld8.fill r9=[r2],16
- ld8.fill r10=[r3],PT(R17)-PT(R10)
- ;;
- ld8.fill r11=[r2],PT(R18)-PT(R11)
- ld8.fill r17=[r3],16
- ;;
- ld8.fill r18=[r2],16
- ld8.fill r19=[r3],16
- ;;
- ld8.fill r20=[r2],16
- ld8.fill r21=[r3],16
- mov ar.csd=r30
- mov ar.ssd=r31
- ;;
- rsm psr.i | psr.ic
- // initiate turning off of interrupt and interruption collection
- invala // invalidate ALAT
- ;;
- srlz.i
- ;;
- ld8.fill r22=[r2],24
- ld8.fill r23=[r3],24
- mov b6=r28
- ;;
- ld8.fill r25=[r2],16
- ld8.fill r26=[r3],16
- mov b7=r29
- ;;
- ld8.fill r27=[r2],16
- ld8.fill r28=[r3],16
- ;;
- ld8.fill r29=[r2],16
- ld8.fill r30=[r3],24
- ;;
- ld8.fill r31=[r2],PT(F9)-PT(R31)
- adds r3=PT(F10)-PT(F6),r3
- ;;
- ldf.fill f9=[r2],PT(F6)-PT(F9)
- ldf.fill f10=[r3],PT(F8)-PT(F10)
- ;;
- ldf.fill f6=[r2],PT(F7)-PT(F6)
- ;;
- ldf.fill f7=[r2],PT(F11)-PT(F7)
- ldf.fill f8=[r3],32
- ;;
- srlz.i // ensure interruption collection is off
- mov ar.ccv=r15
- ;;
- bsw.0 // switch back to bank 0 (no stop bit required beforehand...)
- ;;
- ldf.fill f11=[r2]
-// mov r18=r13
-// mov r21=r13
- adds r16=PT(CR_IPSR)+16,r12
- adds r17=PT(CR_IIP)+16,r12
- ;;
- ld8 r29=[r16],16 // load cr.ipsr
- ld8 r28=[r17],16 // load cr.iip
- ;;
- ld8 r30=[r16],16 // load cr.ifs
- ld8 r25=[r17],16 // load ar.unat
- ;;
- ld8 r26=[r16],16 // load ar.pfs
- ld8 r27=[r17],16 // load ar.rsc
- cmp.eq p9,p0=r0,r0
- // set p9 to indicate that we should restore cr.ifs
- ;;
- ld8 r24=[r16],16 // load ar.rnat (may be garbage)
- ld8 r23=[r17],16// load ar.bspstore (may be garbage)
- ;;
- ld8 r31=[r16],16 // load predicates
- ld8 r22=[r17],16 // load b0
- ;;
- ld8 r19=[r16],16 // load ar.rsc value for "loadrs"
- ld8.fill r1=[r17],16 // load r1
- ;;
- ld8.fill r12=[r16],16
- ld8.fill r13=[r17],16
- ;;
- ld8 r20=[r16],16 // ar.fpsr
- ld8.fill r15=[r17],16
- ;;
- ld8.fill r14=[r16],16
- ld8.fill r2=[r17]
- ;;
- ld8.fill r3=[r16]
- ;;
- mov r16=ar.bsp // get existing backing store pointer
- ;;
- mov b0=r22
- mov ar.pfs=r26
- mov cr.ifs=r30
- mov cr.ipsr=r29
- mov ar.fpsr=r20
- mov cr.iip=r28
- ;;
- mov ar.rsc=r27
- mov ar.unat=r25
- mov pr=r31,-1
- rfi
-END(ia64_leave_nested)
-
-GLOBAL_ENTRY(ia64_leave_hypervisor_prepare)
-/*
- * work.need_resched etc. mustn't get changed
- *by this CPU before it returns to
- * user- or fsys-mode, hence we disable interrupts early on:
- */
- adds r2 = PT(R4)+16,r12
- adds r3 = PT(R5)+16,r12
- adds r8 = PT(EML_UNAT)+16,r12
- ;;
- ld8 r8 = [r8]
- ;;
- mov ar.unat=r8
- ;;
- ld8.fill r4=[r2],16 //load r4
- ld8.fill r5=[r3],16 //load r5
- ;;
- ld8.fill r6=[r2] //load r6
- ld8.fill r7=[r3] //load r7
- ;;
-END(ia64_leave_hypervisor_prepare)
-//fall through
-GLOBAL_ENTRY(ia64_leave_hypervisor)
- rsm psr.i
- ;;
- br.call.sptk.many b0=leave_hypervisor_tail
- ;;
- adds r20=PT(PR)+16,r12
- adds r8=PT(EML_UNAT)+16,r12
- ;;
- ld8 r8=[r8]
- ;;
- mov ar.unat=r8
- ;;
- lfetch [r20],PT(CR_IPSR)-PT(PR)
- adds r2 = PT(B6)+16,r12
- adds r3 = PT(B7)+16,r12
- ;;
- lfetch [r20]
- ;;
- ld8 r24=[r2],16 /* B6 */
- ld8 r25=[r3],16 /* B7 */
- ;;
- ld8 r26=[r2],16 /* ar_csd */
- ld8 r27=[r3],16 /* ar_ssd */
- mov b6 = r24
- ;;
- ld8.fill r8=[r2],16
- ld8.fill r9=[r3],16
- mov b7 = r25
- ;;
- mov ar.csd = r26
- mov ar.ssd = r27
- ;;
- ld8.fill r10=[r2],PT(R15)-PT(R10)
- ld8.fill r11=[r3],PT(R14)-PT(R11)
- ;;
- ld8.fill r15=[r2],PT(R16)-PT(R15)
- ld8.fill r14=[r3],PT(R17)-PT(R14)
- ;;
- ld8.fill r16=[r2],16
- ld8.fill r17=[r3],16
- ;;
- ld8.fill r18=[r2],16
- ld8.fill r19=[r3],16
- ;;
- ld8.fill r20=[r2],16
- ld8.fill r21=[r3],16
- ;;
- ld8.fill r22=[r2],16
- ld8.fill r23=[r3],16
- ;;
- ld8.fill r24=[r2],16
- ld8.fill r25=[r3],16
- ;;
- ld8.fill r26=[r2],16
- ld8.fill r27=[r3],16
- ;;
- ld8.fill r28=[r2],16
- ld8.fill r29=[r3],16
- ;;
- ld8.fill r30=[r2],PT(F6)-PT(R30)
- ld8.fill r31=[r3],PT(F7)-PT(R31)
- ;;
- rsm psr.i | psr.ic
- // initiate turning off of interrupt and interruption collection
- invala // invalidate ALAT
- ;;
- srlz.i // ensure interruption collection is off
- ;;
- bsw.0
- ;;
- adds r16 = PT(CR_IPSR)+16,r12
- adds r17 = PT(CR_IIP)+16,r12
- mov r21=r13 // get current
- ;;
- ld8 r31=[r16],16 // load cr.ipsr
- ld8 r30=[r17],16 // load cr.iip
- ;;
- ld8 r29=[r16],16 // load cr.ifs
- ld8 r28=[r17],16 // load ar.unat
- ;;
- ld8 r27=[r16],16 // load ar.pfs
- ld8 r26=[r17],16 // load ar.rsc
- ;;
- ld8 r25=[r16],16 // load ar.rnat
- ld8 r24=[r17],16 // load ar.bspstore
- ;;
- ld8 r23=[r16],16 // load predicates
- ld8 r22=[r17],16 // load b0
- ;;
- ld8 r20=[r16],16 // load ar.rsc value for "loadrs"
- ld8.fill r1=[r17],16 //load r1
- ;;
- ld8.fill r12=[r16],16 //load r12
- ld8.fill r13=[r17],PT(R2)-PT(R13) //load r13
- ;;
- ld8 r19=[r16],PT(R3)-PT(AR_FPSR) //load ar_fpsr
- ld8.fill r2=[r17],PT(AR_CCV)-PT(R2) //load r2
- ;;
- ld8.fill r3=[r16] //load r3
- ld8 r18=[r17] //load ar_ccv
- ;;
- mov ar.fpsr=r19
- mov ar.ccv=r18
- shr.u r18=r20,16
- ;;
-kvm_rbs_switch:
- mov r19=96
-
-kvm_dont_preserve_current_frame:
-/*
- * To prevent leaking bits between the hypervisor and guest domain,
- * we must clear the stacked registers in the "invalid" partition here.
- * 5 registers/cycle on McKinley).
- */
-# define pRecurse p6
-# define pReturn p7
-# define Nregs 14
-
- alloc loc0=ar.pfs,2,Nregs-2,2,0
- shr.u loc1=r18,9 // RNaTslots <= floor(dirtySize / (64*8))
- sub r19=r19,r18 // r19 = (physStackedSize + 8) - dirtySize
- ;;
- mov ar.rsc=r20 // load ar.rsc to be used for "loadrs"
- shladd in0=loc1,3,r19
- mov in1=0
- ;;
- TEXT_ALIGN(32)
-kvm_rse_clear_invalid:
- alloc loc0=ar.pfs,2,Nregs-2,2,0
- cmp.lt pRecurse,p0=Nregs*8,in0
- // if more than Nregs regs left to clear, (re)curse
- add out0=-Nregs*8,in0
- add out1=1,in1 // increment recursion count
- mov loc1=0
- mov loc2=0
- ;;
- mov loc3=0
- mov loc4=0
- mov loc5=0
- mov loc6=0
- mov loc7=0
-(pRecurse) br.call.dptk.few b0=kvm_rse_clear_invalid
- ;;
- mov loc8=0
- mov loc9=0
- cmp.ne pReturn,p0=r0,in1
- // if recursion count != 0, we need to do a br.ret
- mov loc10=0
- mov loc11=0
-(pReturn) br.ret.dptk.many b0
-
-# undef pRecurse
-# undef pReturn
-
-// loadrs has already been shifted
- alloc r16=ar.pfs,0,0,0,0 // drop current register frame
- ;;
- loadrs
- ;;
- mov ar.bspstore=r24
- ;;
- mov ar.unat=r28
- mov ar.rnat=r25
- mov ar.rsc=r26
- ;;
- mov cr.ipsr=r31
- mov cr.iip=r30
- mov cr.ifs=r29
- mov ar.pfs=r27
- adds r18=VMM_VPD_BASE_OFFSET,r21
- ;;
- ld8 r18=[r18] //vpd
- adds r17=VMM_VCPU_ISR_OFFSET,r21
- ;;
- ld8 r17=[r17]
- adds r19=VMM_VPD_VPSR_OFFSET,r18
- ;;
- ld8 r19=[r19] //vpsr
- mov r25=r18
- adds r16= VMM_VCPU_GP_OFFSET,r21
- ;;
- ld8 r16= [r16] // Put gp in r24
- movl r24=@gprel(ia64_vmm_entry) // calculate return address
- ;;
- add r24=r24,r16
- ;;
- br.sptk.many kvm_vps_sync_write // call the service
- ;;
-END(ia64_leave_hypervisor)
-// fall through
-GLOBAL_ENTRY(ia64_vmm_entry)
-/*
- * must be at bank 0
- * parameter:
- * r17:cr.isr
- * r18:vpd
- * r19:vpsr
- * r22:b0
- * r23:predicate
- */
- mov r24=r22
- mov r25=r18
- tbit.nz p1,p2 = r19,IA64_PSR_IC_BIT // p1=vpsr.ic
-(p1) br.cond.sptk.few kvm_vps_resume_normal
-(p2) br.cond.sptk.many kvm_vps_resume_handler
- ;;
-END(ia64_vmm_entry)
-
-/*
- * extern u64 ia64_call_vsa(u64 proc, u64 arg1, u64 arg2,
- * u64 arg3, u64 arg4, u64 arg5,
- * u64 arg6, u64 arg7);
- *
- * XXX: The currently defined services use only 4 args at the max. The
- * rest are not consumed.
- */
-GLOBAL_ENTRY(ia64_call_vsa)
- .regstk 4,4,0,0
-
-rpsave = loc0
-pfssave = loc1
-psrsave = loc2
-entry = loc3
-hostret = r24
-
- alloc pfssave=ar.pfs,4,4,0,0
- mov rpsave=rp
- adds entry=VMM_VCPU_VSA_BASE_OFFSET, r13
- ;;
- ld8 entry=[entry]
-1: mov hostret=ip
- mov r25=in1 // copy arguments
- mov r26=in2
- mov r27=in3
- mov psrsave=psr
- ;;
- tbit.nz p6,p0=psrsave,14 // IA64_PSR_I
- tbit.nz p7,p0=psrsave,13 // IA64_PSR_IC
- ;;
- add hostret=2f-1b,hostret // calculate return address
- add entry=entry,in0
- ;;
- rsm psr.i | psr.ic
- ;;
- srlz.i
- mov b6=entry
- br.cond.sptk b6 // call the service
-2:
-// Architectural sequence for enabling interrupts if necessary
-(p7) ssm psr.ic
- ;;
-(p7) srlz.i
- ;;
-(p6) ssm psr.i
- ;;
- mov rp=rpsave
- mov ar.pfs=pfssave
- mov r8=r31
- ;;
- srlz.d
- br.ret.sptk rp
-
-END(ia64_call_vsa)
-
-#define INIT_BSPSTORE ((4<<30)-(12<<20)-0x100)
-
-GLOBAL_ENTRY(vmm_reset_entry)
- //set up ipsr, iip, vpd.vpsr, dcr
- // For IPSR: it/dt/rt=1, i/ic=1, si=1, vm/bn=1
- // For DCR: all bits 0
- bsw.0
- ;;
- mov r21 =r13
- adds r14=-VMM_PT_REGS_SIZE, r12
- ;;
- movl r6=0x501008826000 // IPSR dt/rt/it:1;i/ic:1, si:1, vm/bn:1
- movl r10=0x8000000000000000
- adds r16=PT(CR_IIP), r14
- adds r20=PT(R1), r14
- ;;
- rsm psr.ic | psr.i
- ;;
- srlz.i
- ;;
- mov ar.rsc = 0
- ;;
- flushrs
- ;;
- mov ar.bspstore = 0
- // clear BSPSTORE
- ;;
- mov cr.ipsr=r6
- mov cr.ifs=r10
- ld8 r4 = [r16] // Set init iip for first run.
- ld8 r1 = [r20]
- ;;
- mov cr.iip=r4
- adds r16=VMM_VPD_BASE_OFFSET,r13
- ;;
- ld8 r18=[r16]
- ;;
- adds r19=VMM_VPD_VPSR_OFFSET,r18
- ;;
- ld8 r19=[r19]
- mov r17=r0
- mov r22=r0
- mov r23=r0
- br.cond.sptk ia64_vmm_entry
- br.ret.sptk b0
-END(vmm_reset_entry)
diff --git a/arch/ia64/kvm/vti.h b/arch/ia64/kvm/vti.h
deleted file mode 100644
index b214b5b0432..00000000000
--- a/arch/ia64/kvm/vti.h
+++ /dev/null
@@ -1,290 +0,0 @@
-/*
- * vti.h: prototype for generial vt related interface
- * Copyright (c) 2004, Intel Corporation.
- *
- * Xuefei Xu (Anthony Xu) (anthony.xu@intel.com)
- * Fred Yang (fred.yang@intel.com)
- * Kun Tian (Kevin Tian) (kevin.tian@intel.com)
- *
- * Copyright (c) 2007, Intel Corporation.
- * Zhang xiantao <xiantao.zhang@intel.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- */
-#ifndef _KVM_VT_I_H
-#define _KVM_VT_I_H
-
-#ifndef __ASSEMBLY__
-#include <asm/page.h>
-
-#include <linux/kvm_host.h>
-
-/* define itr.i and itr.d in ia64_itr function */
-#define ITR 0x01
-#define DTR 0x02
-#define IaDTR 0x03
-
-#define IA64_TR_VMM 6 /*itr6, dtr6 : maps vmm code, vmbuffer*/
-#define IA64_TR_VM_DATA 7 /*dtr7 : maps current vm data*/
-
-#define RR6 (6UL<<61)
-#define RR7 (7UL<<61)
-
-
-/* config_options in pal_vp_init_env */
-#define VP_INITIALIZE 1UL
-#define VP_FR_PMC 1UL<<1
-#define VP_OPCODE 1UL<<8
-#define VP_CAUSE 1UL<<9
-#define VP_FW_ACC 1UL<<63
-
-/* init vp env with initializing vm_buffer */
-#define VP_INIT_ENV_INITALIZE (VP_INITIALIZE | VP_FR_PMC |\
- VP_OPCODE | VP_CAUSE | VP_FW_ACC)
-/* init vp env without initializing vm_buffer */
-#define VP_INIT_ENV VP_FR_PMC | VP_OPCODE | VP_CAUSE | VP_FW_ACC
-
-#define PAL_VP_CREATE 265
-/* Stacked Virt. Initializes a new VPD for the operation of
- * a new virtual processor in the virtual environment.
- */
-#define PAL_VP_ENV_INFO 266
-/*Stacked Virt. Returns the parameters needed to enter a virtual environment.*/
-#define PAL_VP_EXIT_ENV 267
-/*Stacked Virt. Allows a logical processor to exit a virtual environment.*/
-#define PAL_VP_INIT_ENV 268
-/*Stacked Virt. Allows a logical processor to enter a virtual environment.*/
-#define PAL_VP_REGISTER 269
-/*Stacked Virt. Register a different host IVT for the virtual processor.*/
-#define PAL_VP_RESUME 270
-/* Renamed from PAL_VP_RESUME */
-#define PAL_VP_RESTORE 270
-/*Stacked Virt. Resumes virtual processor operation on the logical processor.*/
-#define PAL_VP_SUSPEND 271
-/* Renamed from PAL_VP_SUSPEND */
-#define PAL_VP_SAVE 271
-/* Stacked Virt. Suspends operation for the specified virtual processor on
- * the logical processor.
- */
-#define PAL_VP_TERMINATE 272
-/* Stacked Virt. Terminates operation for the specified virtual processor.*/
-
-union vac {
- unsigned long value;
- struct {
- unsigned int a_int:1;
- unsigned int a_from_int_cr:1;
- unsigned int a_to_int_cr:1;
- unsigned int a_from_psr:1;
- unsigned int a_from_cpuid:1;
- unsigned int a_cover:1;
- unsigned int a_bsw:1;
- long reserved:57;
- };
-};
-
-union vdc {
- unsigned long value;
- struct {
- unsigned int d_vmsw:1;
- unsigned int d_extint:1;
- unsigned int d_ibr_dbr:1;
- unsigned int d_pmc:1;
- unsigned int d_to_pmd:1;
- unsigned int d_itm:1;
- long reserved:58;
- };
-};
-
-struct vpd {
- union vac vac;
- union vdc vdc;
- unsigned long virt_env_vaddr;
- unsigned long reserved1[29];
- unsigned long vhpi;
- unsigned long reserved2[95];
- unsigned long vgr[16];
- unsigned long vbgr[16];
- unsigned long vnat;
- unsigned long vbnat;
- unsigned long vcpuid[5];
- unsigned long reserved3[11];
- unsigned long vpsr;
- unsigned long vpr;
- unsigned long reserved4[76];
- union {
- unsigned long vcr[128];
- struct {
- unsigned long dcr;
- unsigned long itm;
- unsigned long iva;
- unsigned long rsv1[5];
- unsigned long pta;
- unsigned long rsv2[7];
- unsigned long ipsr;
- unsigned long isr;
- unsigned long rsv3;
- unsigned long iip;
- unsigned long ifa;
- unsigned long itir;
- unsigned long iipa;
- unsigned long ifs;
- unsigned long iim;
- unsigned long iha;
- unsigned long rsv4[38];
- unsigned long lid;
- unsigned long ivr;
- unsigned long tpr;
- unsigned long eoi;
- unsigned long irr[4];
- unsigned long itv;
- unsigned long pmv;
- unsigned long cmcv;
- unsigned long rsv5[5];
- unsigned long lrr0;
- unsigned long lrr1;
- unsigned long rsv6[46];
- };
- };
- unsigned long reserved5[128];
- unsigned long reserved6[3456];
- unsigned long vmm_avail[128];
- unsigned long reserved7[4096];
-};
-
-#define PAL_PROC_VM_BIT (1UL << 40)
-#define PAL_PROC_VMSW_BIT (1UL << 54)
-
-static inline s64 ia64_pal_vp_env_info(u64 *buffer_size,
- u64 *vp_env_info)
-{
- struct ia64_pal_retval iprv;
- PAL_CALL_STK(iprv, PAL_VP_ENV_INFO, 0, 0, 0);
- *buffer_size = iprv.v0;
- *vp_env_info = iprv.v1;
- return iprv.status;
-}
-
-static inline s64 ia64_pal_vp_exit_env(u64 iva)
-{
- struct ia64_pal_retval iprv;
-
- PAL_CALL_STK(iprv, PAL_VP_EXIT_ENV, (u64)iva, 0, 0);
- return iprv.status;
-}
-
-static inline s64 ia64_pal_vp_init_env(u64 config_options, u64 pbase_addr,
- u64 vbase_addr, u64 *vsa_base)
-{
- struct ia64_pal_retval iprv;
-
- PAL_CALL_STK(iprv, PAL_VP_INIT_ENV, config_options, pbase_addr,
- vbase_addr);
- *vsa_base = iprv.v0;
-
- return iprv.status;
-}
-
-static inline s64 ia64_pal_vp_restore(u64 *vpd, u64 pal_proc_vector)
-{
- struct ia64_pal_retval iprv;
-
- PAL_CALL_STK(iprv, PAL_VP_RESTORE, (u64)vpd, pal_proc_vector, 0);
-
- return iprv.status;
-}
-
-static inline s64 ia64_pal_vp_save(u64 *vpd, u64 pal_proc_vector)
-{
- struct ia64_pal_retval iprv;
-
- PAL_CALL_STK(iprv, PAL_VP_SAVE, (u64)vpd, pal_proc_vector, 0);
-
- return iprv.status;
-}
-
-#endif
-
-/*VPD field offset*/
-#define VPD_VAC_START_OFFSET 0
-#define VPD_VDC_START_OFFSET 8
-#define VPD_VHPI_START_OFFSET 256
-#define VPD_VGR_START_OFFSET 1024
-#define VPD_VBGR_START_OFFSET 1152
-#define VPD_VNAT_START_OFFSET 1280
-#define VPD_VBNAT_START_OFFSET 1288
-#define VPD_VCPUID_START_OFFSET 1296
-#define VPD_VPSR_START_OFFSET 1424
-#define VPD_VPR_START_OFFSET 1432
-#define VPD_VRSE_CFLE_START_OFFSET 1440
-#define VPD_VCR_START_OFFSET 2048
-#define VPD_VTPR_START_OFFSET 2576
-#define VPD_VRR_START_OFFSET 3072
-#define VPD_VMM_VAIL_START_OFFSET 31744
-
-/*Virtualization faults*/
-
-#define EVENT_MOV_TO_AR 1
-#define EVENT_MOV_TO_AR_IMM 2
-#define EVENT_MOV_FROM_AR 3
-#define EVENT_MOV_TO_CR 4
-#define EVENT_MOV_FROM_CR 5
-#define EVENT_MOV_TO_PSR 6
-#define EVENT_MOV_FROM_PSR 7
-#define EVENT_ITC_D 8
-#define EVENT_ITC_I 9
-#define EVENT_MOV_TO_RR 10
-#define EVENT_MOV_TO_DBR 11
-#define EVENT_MOV_TO_IBR 12
-#define EVENT_MOV_TO_PKR 13
-#define EVENT_MOV_TO_PMC 14
-#define EVENT_MOV_TO_PMD 15
-#define EVENT_ITR_D 16
-#define EVENT_ITR_I 17
-#define EVENT_MOV_FROM_RR 18
-#define EVENT_MOV_FROM_DBR 19
-#define EVENT_MOV_FROM_IBR 20
-#define EVENT_MOV_FROM_PKR 21
-#define EVENT_MOV_FROM_PMC 22
-#define EVENT_MOV_FROM_CPUID 23
-#define EVENT_SSM 24
-#define EVENT_RSM 25
-#define EVENT_PTC_L 26
-#define EVENT_PTC_G 27
-#define EVENT_PTC_GA 28
-#define EVENT_PTR_D 29
-#define EVENT_PTR_I 30
-#define EVENT_THASH 31
-#define EVENT_TTAG 32
-#define EVENT_TPA 33
-#define EVENT_TAK 34
-#define EVENT_PTC_E 35
-#define EVENT_COVER 36
-#define EVENT_RFI 37
-#define EVENT_BSW_0 38
-#define EVENT_BSW_1 39
-#define EVENT_VMSW 40
-
-/**PAL virtual services offsets */
-#define PAL_VPS_RESUME_NORMAL 0x0000
-#define PAL_VPS_RESUME_HANDLER 0x0400
-#define PAL_VPS_SYNC_READ 0x0800
-#define PAL_VPS_SYNC_WRITE 0x0c00
-#define PAL_VPS_SET_PENDING_INTERRUPT 0x1000
-#define PAL_VPS_THASH 0x1400
-#define PAL_VPS_TTAG 0x1800
-#define PAL_VPS_RESTORE 0x1c00
-#define PAL_VPS_SAVE 0x2000
-
-#endif/* _VT_I_H*/
diff --git a/arch/ia64/kvm/vtlb.c b/arch/ia64/kvm/vtlb.c
deleted file mode 100644
index a7869f8f49a..00000000000
--- a/arch/ia64/kvm/vtlb.c
+++ /dev/null
@@ -1,640 +0,0 @@
-/*
- * vtlb.c: guest virtual tlb handling module.
- * Copyright (c) 2004, Intel Corporation.
- * Yaozu Dong (Eddie Dong) <Eddie.dong@intel.com>
- * Xuefei Xu (Anthony Xu) <anthony.xu@intel.com>
- *
- * Copyright (c) 2007, Intel Corporation.
- * Xuefei Xu (Anthony Xu) <anthony.xu@intel.com>
- * Xiantao Zhang <xiantao.zhang@intel.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- */
-
-#include "vcpu.h"
-
-#include <linux/rwsem.h>
-
-#include <asm/tlb.h>
-
-/*
- * Check to see if the address rid:va is translated by the TLB
- */
-
-static int __is_tr_translated(struct thash_data *trp, u64 rid, u64 va)
-{
- return ((trp->p) && (trp->rid == rid)
- && ((va-trp->vadr) < PSIZE(trp->ps)));
-}
-
-/*
- * Only for GUEST TR format.
- */
-static int __is_tr_overlap(struct thash_data *trp, u64 rid, u64 sva, u64 eva)
-{
- u64 sa1, ea1;
-
- if (!trp->p || trp->rid != rid)
- return 0;
-
- sa1 = trp->vadr;
- ea1 = sa1 + PSIZE(trp->ps) - 1;
- eva -= 1;
- if ((sva > ea1) || (sa1 > eva))
- return 0;
- else
- return 1;
-
-}
-
-void machine_tlb_purge(u64 va, u64 ps)
-{
- ia64_ptcl(va, ps << 2);
-}
-
-void local_flush_tlb_all(void)
-{
- int i, j;
- unsigned long flags, count0, count1;
- unsigned long stride0, stride1, addr;
-
- addr = current_vcpu->arch.ptce_base;
- count0 = current_vcpu->arch.ptce_count[0];
- count1 = current_vcpu->arch.ptce_count[1];
- stride0 = current_vcpu->arch.ptce_stride[0];
- stride1 = current_vcpu->arch.ptce_stride[1];
-
- local_irq_save(flags);
- for (i = 0; i < count0; ++i) {
- for (j = 0; j < count1; ++j) {
- ia64_ptce(addr);
- addr += stride1;
- }
- addr += stride0;
- }
- local_irq_restore(flags);
- ia64_srlz_i(); /* srlz.i implies srlz.d */
-}
-
-int vhpt_enabled(struct kvm_vcpu *vcpu, u64 vadr, enum vhpt_ref ref)
-{
- union ia64_rr vrr;
- union ia64_pta vpta;
- struct ia64_psr vpsr;
-
- vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
- vrr.val = vcpu_get_rr(vcpu, vadr);
- vpta.val = vcpu_get_pta(vcpu);
-
- if (vrr.ve & vpta.ve) {
- switch (ref) {
- case DATA_REF:
- case NA_REF:
- return vpsr.dt;
- case INST_REF:
- return vpsr.dt && vpsr.it && vpsr.ic;
- case RSE_REF:
- return vpsr.dt && vpsr.rt;
-
- }
- }
- return 0;
-}
-
-struct thash_data *vsa_thash(union ia64_pta vpta, u64 va, u64 vrr, u64 *tag)
-{
- u64 index, pfn, rid, pfn_bits;
-
- pfn_bits = vpta.size - 5 - 8;
- pfn = REGION_OFFSET(va) >> _REGION_PAGE_SIZE(vrr);
- rid = _REGION_ID(vrr);
- index = ((rid & 0xff) << pfn_bits)|(pfn & ((1UL << pfn_bits) - 1));
- *tag = ((rid >> 8) & 0xffff) | ((pfn >> pfn_bits) << 16);
-
- return (struct thash_data *)((vpta.base << PTA_BASE_SHIFT) +
- (index << 5));
-}
-
-struct thash_data *__vtr_lookup(struct kvm_vcpu *vcpu, u64 va, int type)
-{
-
- struct thash_data *trp;
- int i;
- u64 rid;
-
- rid = vcpu_get_rr(vcpu, va);
- rid = rid & RR_RID_MASK;
- if (type == D_TLB) {
- if (vcpu_quick_region_check(vcpu->arch.dtr_regions, va)) {
- for (trp = (struct thash_data *)&vcpu->arch.dtrs, i = 0;
- i < NDTRS; i++, trp++) {
- if (__is_tr_translated(trp, rid, va))
- return trp;
- }
- }
- } else {
- if (vcpu_quick_region_check(vcpu->arch.itr_regions, va)) {
- for (trp = (struct thash_data *)&vcpu->arch.itrs, i = 0;
- i < NITRS; i++, trp++) {
- if (__is_tr_translated(trp, rid, va))
- return trp;
- }
- }
- }
-
- return NULL;
-}
-
-static void vhpt_insert(u64 pte, u64 itir, u64 ifa, u64 gpte)
-{
- union ia64_rr rr;
- struct thash_data *head;
- unsigned long ps, gpaddr;
-
- ps = itir_ps(itir);
- rr.val = ia64_get_rr(ifa);
-
- gpaddr = ((gpte & _PAGE_PPN_MASK) >> ps << ps) |
- (ifa & ((1UL << ps) - 1));
-
- head = (struct thash_data *)ia64_thash(ifa);
- head->etag = INVALID_TI_TAG;
- ia64_mf();
- head->page_flags = pte & ~PAGE_FLAGS_RV_MASK;
- head->itir = rr.ps << 2;
- head->etag = ia64_ttag(ifa);
- head->gpaddr = gpaddr;
-}
-
-void mark_pages_dirty(struct kvm_vcpu *v, u64 pte, u64 ps)
-{
- u64 i, dirty_pages = 1;
- u64 base_gfn = (pte&_PAGE_PPN_MASK) >> PAGE_SHIFT;
- vmm_spinlock_t *lock = __kvm_va(v->arch.dirty_log_lock_pa);
- void *dirty_bitmap = (void *)KVM_MEM_DIRTY_LOG_BASE;
-
- dirty_pages <<= ps <= PAGE_SHIFT ? 0 : ps - PAGE_SHIFT;
-
- vmm_spin_lock(lock);
- for (i = 0; i < dirty_pages; i++) {
- /* avoid RMW */
- if (!test_bit(base_gfn + i, dirty_bitmap))
- set_bit(base_gfn + i , dirty_bitmap);
- }
- vmm_spin_unlock(lock);
-}
-
-void thash_vhpt_insert(struct kvm_vcpu *v, u64 pte, u64 itir, u64 va, int type)
-{
- u64 phy_pte, psr;
- union ia64_rr mrr;
-
- mrr.val = ia64_get_rr(va);
- phy_pte = translate_phy_pte(&pte, itir, va);
-
- if (itir_ps(itir) >= mrr.ps) {
- vhpt_insert(phy_pte, itir, va, pte);
- } else {
- phy_pte &= ~PAGE_FLAGS_RV_MASK;
- psr = ia64_clear_ic();
- ia64_itc(type, va, phy_pte, itir_ps(itir));
- paravirt_dv_serialize_data();
- ia64_set_psr(psr);
- }
-
- if (!(pte&VTLB_PTE_IO))
- mark_pages_dirty(v, pte, itir_ps(itir));
-}
-
-/*
- * vhpt lookup
- */
-struct thash_data *vhpt_lookup(u64 va)
-{
- struct thash_data *head;
- u64 tag;
-
- head = (struct thash_data *)ia64_thash(va);
- tag = ia64_ttag(va);
- if (head->etag == tag)
- return head;
- return NULL;
-}
-
-u64 guest_vhpt_lookup(u64 iha, u64 *pte)
-{
- u64 ret;
- struct thash_data *data;
-
- data = __vtr_lookup(current_vcpu, iha, D_TLB);
- if (data != NULL)
- thash_vhpt_insert(current_vcpu, data->page_flags,
- data->itir, iha, D_TLB);
-
- asm volatile ("rsm psr.ic|psr.i;;"
- "srlz.d;;"
- "ld8.s r9=[%1];;"
- "tnat.nz p6,p7=r9;;"
- "(p6) mov %0=1;"
- "(p6) mov r9=r0;"
- "(p7) extr.u r9=r9,0,53;;"
- "(p7) mov %0=r0;"
- "(p7) st8 [%2]=r9;;"
- "ssm psr.ic;;"
- "srlz.d;;"
- "ssm psr.i;;"
- "srlz.d;;"
- : "=&r"(ret) : "r"(iha), "r"(pte) : "memory");
-
- return ret;
-}
-
-/*
- * purge software guest tlb
- */
-
-static void vtlb_purge(struct kvm_vcpu *v, u64 va, u64 ps)
-{
- struct thash_data *cur;
- u64 start, curadr, size, psbits, tag, rr_ps, num;
- union ia64_rr vrr;
- struct thash_cb *hcb = &v->arch.vtlb;
-
- vrr.val = vcpu_get_rr(v, va);
- psbits = VMX(v, psbits[(va >> 61)]);
- start = va & ~((1UL << ps) - 1);
- while (psbits) {
- curadr = start;
- rr_ps = __ffs(psbits);
- psbits &= ~(1UL << rr_ps);
- num = 1UL << ((ps < rr_ps) ? 0 : (ps - rr_ps));
- size = PSIZE(rr_ps);
- vrr.ps = rr_ps;
- while (num) {
- cur = vsa_thash(hcb->pta, curadr, vrr.val, &tag);
- if (cur->etag == tag && cur->ps == rr_ps)
- cur->etag = INVALID_TI_TAG;
- curadr += size;
- num--;
- }
- }
-}
-
-
-/*
- * purge VHPT and machine TLB
- */
-static void vhpt_purge(struct kvm_vcpu *v, u64 va, u64 ps)
-{
- struct thash_data *cur;
- u64 start, size, tag, num;
- union ia64_rr rr;
-
- start = va & ~((1UL << ps) - 1);
- rr.val = ia64_get_rr(va);
- size = PSIZE(rr.ps);
- num = 1UL << ((ps < rr.ps) ? 0 : (ps - rr.ps));
- while (num) {
- cur = (struct thash_data *)ia64_thash(start);
- tag = ia64_ttag(start);
- if (cur->etag == tag)
- cur->etag = INVALID_TI_TAG;
- start += size;
- num--;
- }
- machine_tlb_purge(va, ps);
-}
-
-/*
- * Insert an entry into hash TLB or VHPT.
- * NOTES:
- * 1: When inserting VHPT to thash, "va" is a must covered
- * address by the inserted machine VHPT entry.
- * 2: The format of entry is always in TLB.
- * 3: The caller need to make sure the new entry will not overlap
- * with any existed entry.
- */
-void vtlb_insert(struct kvm_vcpu *v, u64 pte, u64 itir, u64 va)
-{
- struct thash_data *head;
- union ia64_rr vrr;
- u64 tag;
- struct thash_cb *hcb = &v->arch.vtlb;
-
- vrr.val = vcpu_get_rr(v, va);
- vrr.ps = itir_ps(itir);
- VMX(v, psbits[va >> 61]) |= (1UL << vrr.ps);
- head = vsa_thash(hcb->pta, va, vrr.val, &tag);
- head->page_flags = pte;
- head->itir = itir;
- head->etag = tag;
-}
-
-int vtr_find_overlap(struct kvm_vcpu *vcpu, u64 va, u64 ps, int type)
-{
- struct thash_data *trp;
- int i;
- u64 end, rid;
-
- rid = vcpu_get_rr(vcpu, va);
- rid = rid & RR_RID_MASK;
- end = va + PSIZE(ps);
- if (type == D_TLB) {
- if (vcpu_quick_region_check(vcpu->arch.dtr_regions, va)) {
- for (trp = (struct thash_data *)&vcpu->arch.dtrs, i = 0;
- i < NDTRS; i++, trp++) {
- if (__is_tr_overlap(trp, rid, va, end))
- return i;
- }
- }
- } else {
- if (vcpu_quick_region_check(vcpu->arch.itr_regions, va)) {
- for (trp = (struct thash_data *)&vcpu->arch.itrs, i = 0;
- i < NITRS; i++, trp++) {
- if (__is_tr_overlap(trp, rid, va, end))
- return i;
- }
- }
- }
- return -1;
-}
-
-/*
- * Purge entries in VTLB and VHPT
- */
-void thash_purge_entries(struct kvm_vcpu *v, u64 va, u64 ps)
-{
- if (vcpu_quick_region_check(v->arch.tc_regions, va))
- vtlb_purge(v, va, ps);
- vhpt_purge(v, va, ps);
-}
-
-void thash_purge_entries_remote(struct kvm_vcpu *v, u64 va, u64 ps)
-{
- u64 old_va = va;
- va = REGION_OFFSET(va);
- if (vcpu_quick_region_check(v->arch.tc_regions, old_va))
- vtlb_purge(v, va, ps);
- vhpt_purge(v, va, ps);
-}
-
-u64 translate_phy_pte(u64 *pte, u64 itir, u64 va)
-{
- u64 ps, ps_mask, paddr, maddr, io_mask;
- union pte_flags phy_pte;
-
- ps = itir_ps(itir);
- ps_mask = ~((1UL << ps) - 1);
- phy_pte.val = *pte;
- paddr = *pte;
- paddr = ((paddr & _PAGE_PPN_MASK) & ps_mask) | (va & ~ps_mask);
- maddr = kvm_get_mpt_entry(paddr >> PAGE_SHIFT);
- io_mask = maddr & GPFN_IO_MASK;
- if (io_mask && (io_mask != GPFN_PHYS_MMIO)) {
- *pte |= VTLB_PTE_IO;
- return -1;
- }
- maddr = ((maddr & _PAGE_PPN_MASK) & PAGE_MASK) |
- (paddr & ~PAGE_MASK);
- phy_pte.ppn = maddr >> ARCH_PAGE_SHIFT;
- return phy_pte.val;
-}
-
-/*
- * Purge overlap TCs and then insert the new entry to emulate itc ops.
- * Notes: Only TC entry can purge and insert.
- */
-void thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir,
- u64 ifa, int type)
-{
- u64 ps;
- u64 phy_pte, io_mask, index;
- union ia64_rr vrr, mrr;
-
- ps = itir_ps(itir);
- vrr.val = vcpu_get_rr(v, ifa);
- mrr.val = ia64_get_rr(ifa);
-
- index = (pte & _PAGE_PPN_MASK) >> PAGE_SHIFT;
- io_mask = kvm_get_mpt_entry(index) & GPFN_IO_MASK;
- phy_pte = translate_phy_pte(&pte, itir, ifa);
-
- /* Ensure WB attribute if pte is related to a normal mem page,
- * which is required by vga acceleration since qemu maps shared
- * vram buffer with WB.
- */
- if (!(pte & VTLB_PTE_IO) && ((pte & _PAGE_MA_MASK) != _PAGE_MA_NAT) &&
- io_mask != GPFN_PHYS_MMIO) {
- pte &= ~_PAGE_MA_MASK;
- phy_pte &= ~_PAGE_MA_MASK;
- }
-
- vtlb_purge(v, ifa, ps);
- vhpt_purge(v, ifa, ps);
-
- if ((ps != mrr.ps) || (pte & VTLB_PTE_IO)) {
- vtlb_insert(v, pte, itir, ifa);
- vcpu_quick_region_set(VMX(v, tc_regions), ifa);
- }
- if (pte & VTLB_PTE_IO)
- return;
-
- if (ps >= mrr.ps)
- vhpt_insert(phy_pte, itir, ifa, pte);
- else {
- u64 psr;
- phy_pte &= ~PAGE_FLAGS_RV_MASK;
- psr = ia64_clear_ic();
- ia64_itc(type, ifa, phy_pte, ps);
- paravirt_dv_serialize_data();
- ia64_set_psr(psr);
- }
- if (!(pte&VTLB_PTE_IO))
- mark_pages_dirty(v, pte, ps);
-
-}
-
-/*
- * Purge all TCs or VHPT entries including those in Hash table.
- *
- */
-
-void thash_purge_all(struct kvm_vcpu *v)
-{
- int i;
- struct thash_data *head;
- struct thash_cb *vtlb, *vhpt;
- vtlb = &v->arch.vtlb;
- vhpt = &v->arch.vhpt;
-
- for (i = 0; i < 8; i++)
- VMX(v, psbits[i]) = 0;
-
- head = vtlb->hash;
- for (i = 0; i < vtlb->num; i++) {
- head->page_flags = 0;
- head->etag = INVALID_TI_TAG;
- head->itir = 0;
- head->next = 0;
- head++;
- };
-
- head = vhpt->hash;
- for (i = 0; i < vhpt->num; i++) {
- head->page_flags = 0;
- head->etag = INVALID_TI_TAG;
- head->itir = 0;
- head->next = 0;
- head++;
- };
-
- local_flush_tlb_all();
-}
-
-/*
- * Lookup the hash table and its collision chain to find an entry
- * covering this address rid:va or the entry.
- *
- * INPUT:
- * in: TLB format for both VHPT & TLB.
- */
-struct thash_data *vtlb_lookup(struct kvm_vcpu *v, u64 va, int is_data)
-{
- struct thash_data *cch;
- u64 psbits, ps, tag;
- union ia64_rr vrr;
-
- struct thash_cb *hcb = &v->arch.vtlb;
-
- cch = __vtr_lookup(v, va, is_data);
- if (cch)
- return cch;
-
- if (vcpu_quick_region_check(v->arch.tc_regions, va) == 0)
- return NULL;
-
- psbits = VMX(v, psbits[(va >> 61)]);
- vrr.val = vcpu_get_rr(v, va);
- while (psbits) {
- ps = __ffs(psbits);
- psbits &= ~(1UL << ps);
- vrr.ps = ps;
- cch = vsa_thash(hcb->pta, va, vrr.val, &tag);
- if (cch->etag == tag && cch->ps == ps)
- return cch;
- }
-
- return NULL;
-}
-
-/*
- * Initialize internal control data before service.
- */
-void thash_init(struct thash_cb *hcb, u64 sz)
-{
- int i;
- struct thash_data *head;
-
- hcb->pta.val = (unsigned long)hcb->hash;
- hcb->pta.vf = 1;
- hcb->pta.ve = 1;
- hcb->pta.size = sz;
- head = hcb->hash;
- for (i = 0; i < hcb->num; i++) {
- head->page_flags = 0;
- head->itir = 0;
- head->etag = INVALID_TI_TAG;
- head->next = 0;
- head++;
- }
-}
-
-u64 kvm_get_mpt_entry(u64 gpfn)
-{
- u64 *base = (u64 *) KVM_P2M_BASE;
-
- if (gpfn >= (KVM_P2M_SIZE >> 3))
- panic_vm(current_vcpu, "Invalid gpfn =%lx\n", gpfn);
-
- return *(base + gpfn);
-}
-
-u64 kvm_lookup_mpa(u64 gpfn)
-{
- u64 maddr;
- maddr = kvm_get_mpt_entry(gpfn);
- return maddr&_PAGE_PPN_MASK;
-}
-
-u64 kvm_gpa_to_mpa(u64 gpa)
-{
- u64 pte = kvm_lookup_mpa(gpa >> PAGE_SHIFT);
- return (pte >> PAGE_SHIFT << PAGE_SHIFT) | (gpa & ~PAGE_MASK);
-}
-
-/*
- * Fetch guest bundle code.
- * INPUT:
- * gip: guest ip
- * pbundle: used to return fetched bundle.
- */
-int fetch_code(struct kvm_vcpu *vcpu, u64 gip, IA64_BUNDLE *pbundle)
-{
- u64 gpip = 0; /* guest physical IP*/
- u64 *vpa;
- struct thash_data *tlb;
- u64 maddr;
-
- if (!(VCPU(vcpu, vpsr) & IA64_PSR_IT)) {
- /* I-side physical mode */
- gpip = gip;
- } else {
- tlb = vtlb_lookup(vcpu, gip, I_TLB);
- if (tlb)
- gpip = (tlb->ppn >> (tlb->ps - 12) << tlb->ps) |
- (gip & (PSIZE(tlb->ps) - 1));
- }
- if (gpip) {
- maddr = kvm_gpa_to_mpa(gpip);
- } else {
- tlb = vhpt_lookup(gip);
- if (tlb == NULL) {
- ia64_ptcl(gip, ARCH_PAGE_SHIFT << 2);
- return IA64_FAULT;
- }
- maddr = (tlb->ppn >> (tlb->ps - 12) << tlb->ps)
- | (gip & (PSIZE(tlb->ps) - 1));
- }
- vpa = (u64 *)__kvm_va(maddr);
-
- pbundle->i64[0] = *vpa++;
- pbundle->i64[1] = *vpa;
-
- return IA64_NO_FAULT;
-}
-
-void kvm_init_vhpt(struct kvm_vcpu *v)
-{
- v->arch.vhpt.num = VHPT_NUM_ENTRIES;
- thash_init(&v->arch.vhpt, VHPT_SHIFT);
- ia64_set_pta(v->arch.vhpt.pta.val);
- /*Enable VHPT here?*/
-}
-
-void kvm_init_vtlb(struct kvm_vcpu *v)
-{
- v->arch.vtlb.num = VTLB_NUM_ENTRIES;
- thash_init(&v->arch.vtlb, VTLB_SHIFT);
-}
diff --git a/arch/microblaze/include/asm/pgtable.h b/arch/microblaze/include/asm/pgtable.h
index 95cef0b5f83..df19d0c47be 100644
--- a/arch/microblaze/include/asm/pgtable.h
+++ b/arch/microblaze/include/asm/pgtable.h
@@ -565,6 +565,7 @@ void consistent_free(size_t size, void *vaddr);
void consistent_sync(void *vaddr, size_t size, int direction);
void consistent_sync_page(struct page *page, unsigned long offset,
size_t size, int direction);
+unsigned long consistent_virt_to_pfn(void *vaddr);
void setup_memory(void);
#endif /* __ASSEMBLY__ */
diff --git a/arch/microblaze/kernel/dma.c b/arch/microblaze/kernel/dma.c
index 4633c36c1b3..ed7ba8a1182 100644
--- a/arch/microblaze/kernel/dma.c
+++ b/arch/microblaze/kernel/dma.c
@@ -154,9 +154,36 @@ dma_direct_sync_sg_for_device(struct device *dev,
__dma_sync(sg->dma_address, sg->length, direction);
}
+int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t handle, size_t size,
+ struct dma_attrs *attrs)
+{
+#ifdef CONFIG_MMU
+ unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+ unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ unsigned long off = vma->vm_pgoff;
+ unsigned long pfn;
+
+ if (off >= count || user_count > (count - off))
+ return -ENXIO;
+
+#ifdef NOT_COHERENT_CACHE
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ pfn = consistent_virt_to_pfn(cpu_addr);
+#else
+ pfn = virt_to_pfn(cpu_addr);
+#endif
+ return remap_pfn_range(vma, vma->vm_start, pfn + off,
+ vma->vm_end - vma->vm_start, vma->vm_page_prot);
+#else
+ return -ENXIO;
+#endif
+}
+
struct dma_map_ops dma_direct_ops = {
.alloc = dma_direct_alloc_coherent,
.free = dma_direct_free_coherent,
+ .mmap = dma_direct_mmap_coherent,
.map_sg = dma_direct_map_sg,
.dma_supported = dma_direct_dma_supported,
.map_page = dma_direct_map_page,
diff --git a/arch/microblaze/mm/consistent.c b/arch/microblaze/mm/consistent.c
index e10ad930895..b06c3a7faf2 100644
--- a/arch/microblaze/mm/consistent.c
+++ b/arch/microblaze/mm/consistent.c
@@ -156,6 +156,25 @@ void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *dma_handle)
}
EXPORT_SYMBOL(consistent_alloc);
+#ifdef CONFIG_MMU
+static pte_t *consistent_virt_to_pte(void *vaddr)
+{
+ unsigned long addr = (unsigned long)vaddr;
+
+ return pte_offset_kernel(pmd_offset(pgd_offset_k(addr), addr), addr);
+}
+
+unsigned long consistent_virt_to_pfn(void *vaddr)
+{
+ pte_t *ptep = consistent_virt_to_pte(vaddr);
+
+ if (pte_none(*ptep) || !pte_present(*ptep))
+ return 0;
+
+ return pte_pfn(*ptep);
+}
+#endif
+
/*
* free page(s) as defined by the above mapping.
*/
@@ -181,13 +200,9 @@ void consistent_free(size_t size, void *vaddr)
} while (size -= PAGE_SIZE);
#else
do {
- pte_t *ptep;
+ pte_t *ptep = consistent_virt_to_pte(vaddr);
unsigned long pfn;
- ptep = pte_offset_kernel(pmd_offset(pgd_offset_k(
- (unsigned int)vaddr),
- (unsigned int)vaddr),
- (unsigned int)vaddr);
if (!pte_none(*ptep) && pte_present(*ptep)) {
pfn = pte_pfn(*ptep);
pte_clear(&init_mm, (unsigned int)vaddr, ptep);
diff --git a/arch/mips/configs/db1xxx_defconfig b/arch/mips/configs/db1xxx_defconfig
index 46e8f7676a1..3bdb72a7036 100644
--- a/arch/mips/configs/db1xxx_defconfig
+++ b/arch/mips/configs/db1xxx_defconfig
@@ -36,7 +36,7 @@ CONFIG_PCI=y
CONFIG_PCI_REALLOC_ENABLE_AUTO=y
CONFIG_PCCARD=y
CONFIG_PCMCIA_ALCHEMY_DEVBOARD=y
-CONFIG_PM_RUNTIME=y
+CONFIG_PM=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_PACKET_DIAG=y
diff --git a/arch/mips/configs/lemote2f_defconfig b/arch/mips/configs/lemote2f_defconfig
index 227a9de3224..e51aad9a94b 100644
--- a/arch/mips/configs/lemote2f_defconfig
+++ b/arch/mips/configs/lemote2f_defconfig
@@ -37,7 +37,6 @@ CONFIG_MIPS32_N32=y
CONFIG_PM=y
CONFIG_HIBERNATION=y
CONFIG_PM_STD_PARTITION="/dev/hda3"
-CONFIG_PM_RUNTIME=y
CONFIG_CPU_FREQ=y
CONFIG_CPU_FREQ_DEBUG=y
CONFIG_CPU_FREQ_STAT=m
diff --git a/arch/mips/configs/loongson3_defconfig b/arch/mips/configs/loongson3_defconfig
index 1c6191ebd58..7eabcd2031e 100644
--- a/arch/mips/configs/loongson3_defconfig
+++ b/arch/mips/configs/loongson3_defconfig
@@ -58,7 +58,7 @@ CONFIG_BINFMT_MISC=m
CONFIG_MIPS32_COMPAT=y
CONFIG_MIPS32_O32=y
CONFIG_MIPS32_N32=y
-CONFIG_PM_RUNTIME=y
+CONFIG_PM=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
diff --git a/arch/mips/configs/nlm_xlp_defconfig b/arch/mips/configs/nlm_xlp_defconfig
index 70509a48df8..b3d1d37f85e 100644
--- a/arch/mips/configs/nlm_xlp_defconfig
+++ b/arch/mips/configs/nlm_xlp_defconfig
@@ -61,7 +61,7 @@ CONFIG_BINFMT_MISC=y
CONFIG_MIPS32_COMPAT=y
CONFIG_MIPS32_O32=y
CONFIG_MIPS32_N32=y
-CONFIG_PM_RUNTIME=y
+CONFIG_PM=y
CONFIG_PM_DEBUG=y
CONFIG_NET=y
CONFIG_PACKET=y
diff --git a/arch/mips/configs/nlm_xlr_defconfig b/arch/mips/configs/nlm_xlr_defconfig
index 82207e8079f..3d8016d6cf3 100644
--- a/arch/mips/configs/nlm_xlr_defconfig
+++ b/arch/mips/configs/nlm_xlr_defconfig
@@ -41,7 +41,7 @@ CONFIG_PCI=y
CONFIG_PCI_MSI=y
CONFIG_PCI_DEBUG=y
CONFIG_BINFMT_MISC=m
-CONFIG_PM_RUNTIME=y
+CONFIG_PM=y
CONFIG_PM_DEBUG=y
CONFIG_NET=y
CONFIG_PACKET=y
diff --git a/arch/nios2/Makefile b/arch/nios2/Makefile
index e142c9ee51f..2328f82ba2a 100644
--- a/arch/nios2/Makefile
+++ b/arch/nios2/Makefile
@@ -14,6 +14,8 @@
# Nios2 port by Wind River Systems Inc trough:
# fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com
+KBUILD_DEFCONFIG := 3c120_defconfig
+
UTS_SYSNAME = Linux
export MMU
diff --git a/arch/nios2/include/asm/io.h b/arch/nios2/include/asm/io.h
index 9102bfd3fa1..6e24d7cceb0 100644
--- a/arch/nios2/include/asm/io.h
+++ b/arch/nios2/include/asm/io.h
@@ -45,6 +45,8 @@ static inline void iounmap(void __iomem *addr)
__iounmap(addr);
}
+#define ioremap_wc ioremap_nocache
+
/* Pages to physical address... */
#define page_to_phys(page) virt_to_phys(page_to_virt(page))
#define page_to_bus(page) page_to_virt(page)
diff --git a/arch/nios2/include/asm/uaccess.h b/arch/nios2/include/asm/uaccess.h
index acedc0a2860..caa51ff85a3 100644
--- a/arch/nios2/include/asm/uaccess.h
+++ b/arch/nios2/include/asm/uaccess.h
@@ -168,7 +168,7 @@ do { \
const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \
unsigned long __gu_val; \
__get_user_common(__gu_val, sizeof(*(ptr)), __gu_ptr, __gu_err);\
- (x) = (__typeof__(x))__gu_val; \
+ (x) = (__force __typeof__(x))__gu_val; \
__gu_err; \
})
@@ -180,7 +180,7 @@ do { \
if (access_ok(VERIFY_READ, __gu_ptr, sizeof(*__gu_ptr))) \
__get_user_common(__gu_val, sizeof(*__gu_ptr), \
__gu_ptr, __gu_err); \
- (x) = (__typeof__(x))__gu_val; \
+ (x) = (__force __typeof__(x))__gu_val; \
__gu_err; \
})
diff --git a/arch/powerpc/configs/ps3_defconfig b/arch/powerpc/configs/ps3_defconfig
index 2e637c881d2..879de5efb07 100644
--- a/arch/powerpc/configs/ps3_defconfig
+++ b/arch/powerpc/configs/ps3_defconfig
@@ -36,7 +36,7 @@ CONFIG_KEXEC=y
CONFIG_SCHED_SMT=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE=""
-CONFIG_PM_RUNTIME=y
+CONFIG_PM=y
CONFIG_PM_DEBUG=y
# CONFIG_SECCOMP is not set
# CONFIG_PCI is not set
diff --git a/arch/powerpc/include/asm/cpuidle.h b/arch/powerpc/include/asm/cpuidle.h
new file mode 100644
index 00000000000..d2f99ca1e3a
--- /dev/null
+++ b/arch/powerpc/include/asm/cpuidle.h
@@ -0,0 +1,20 @@
+#ifndef _ASM_POWERPC_CPUIDLE_H
+#define _ASM_POWERPC_CPUIDLE_H
+
+#ifdef CONFIG_PPC_POWERNV
+/* Used in powernv idle state management */
+#define PNV_THREAD_RUNNING 0
+#define PNV_THREAD_NAP 1
+#define PNV_THREAD_SLEEP 2
+#define PNV_THREAD_WINKLE 3
+#define PNV_CORE_IDLE_LOCK_BIT 0x100
+#define PNV_CORE_IDLE_THREAD_BITS 0x0FF
+
+#ifndef __ASSEMBLY__
+extern u32 pnv_fastsleep_workaround_at_entry[];
+extern u32 pnv_fastsleep_workaround_at_exit[];
+#endif
+
+#endif
+
+#endif
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index 6acf0c2a0f9..942c7b1678e 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -170,8 +170,6 @@ extern void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long addr,
unsigned long *nb_ret);
extern void kvmppc_unpin_guest_page(struct kvm *kvm, void *addr,
unsigned long gpa, bool dirty);
-extern long kvmppc_virtmode_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
- long pte_index, unsigned long pteh, unsigned long ptel);
extern long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
long pte_index, unsigned long pteh, unsigned long ptel,
pgd_t *pgdir, bool realmode, unsigned long *idx_ret);
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
index 0aa817933e6..2d81e202bdc 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -37,7 +37,6 @@ static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu)
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
#define KVM_DEFAULT_HPT_ORDER 24 /* 16MB HPT by default */
-extern unsigned long kvm_rma_pages;
#endif
#define VRMA_VSID 0x1ffffffUL /* 1TB VSID reserved for VRMA */
@@ -148,7 +147,7 @@ static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
/* This covers 14..54 bits of va*/
rb = (v & ~0x7fUL) << 16; /* AVA field */
- rb |= v >> (62 - 8); /* B field */
+ rb |= (v >> HPTE_V_SSIZE_SHIFT) << 8; /* B field */
/*
* AVA in v had cleared lower 23 bits. We need to derive
* that from pteg index
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 047855619cc..7efd666a3fa 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -180,11 +180,6 @@ struct kvmppc_spapr_tce_table {
struct page *pages[0];
};
-struct kvm_rma_info {
- atomic_t use_count;
- unsigned long base_pfn;
-};
-
/* XICS components, defined in book3s_xics.c */
struct kvmppc_xics;
struct kvmppc_icp;
@@ -214,16 +209,9 @@ struct revmap_entry {
#define KVMPPC_RMAP_PRESENT 0x100000000ul
#define KVMPPC_RMAP_INDEX 0xfffffffful
-/* Low-order bits in memslot->arch.slot_phys[] */
-#define KVMPPC_PAGE_ORDER_MASK 0x1f
-#define KVMPPC_PAGE_NO_CACHE HPTE_R_I /* 0x20 */
-#define KVMPPC_PAGE_WRITETHRU HPTE_R_W /* 0x40 */
-#define KVMPPC_GOT_PAGE 0x80
-
struct kvm_arch_memory_slot {
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
unsigned long *rmap;
- unsigned long *slot_phys;
#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
};
@@ -242,14 +230,12 @@ struct kvm_arch {
struct kvm_rma_info *rma;
unsigned long vrma_slb_v;
int rma_setup_done;
- int using_mmu_notifiers;
u32 hpt_order;
atomic_t vcpus_running;
u32 online_vcores;
unsigned long hpt_npte;
unsigned long hpt_mask;
atomic_t hpte_mod_interest;
- spinlock_t slot_phys_lock;
cpumask_t need_tlb_flush;
int hpt_cma_alloc;
#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
@@ -297,6 +283,7 @@ struct kvmppc_vcore {
struct list_head runnable_threads;
spinlock_t lock;
wait_queue_head_t wq;
+ spinlock_t stoltb_lock; /* protects stolen_tb and preempt_tb */
u64 stolen_tb;
u64 preempt_tb;
struct kvm_vcpu *runner;
@@ -308,6 +295,7 @@ struct kvmppc_vcore {
ulong dpdes; /* doorbell state (POWER8) */
void *mpp_buffer; /* Micro Partition Prefetch buffer */
bool mpp_buffer_is_valid;
+ ulong conferring_threads;
};
#define VCORE_ENTRY_COUNT(vc) ((vc)->entry_exit_count & 0xff)
@@ -664,6 +652,8 @@ struct kvm_vcpu_arch {
spinlock_t tbacct_lock;
u64 busy_stolen;
u64 busy_preempt;
+
+ u32 emul_inst;
#endif
};
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index a6dcdb6d13c..46bf652c916 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -170,8 +170,6 @@ extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
unsigned long ioba, unsigned long tce);
extern long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
unsigned long ioba);
-extern struct kvm_rma_info *kvm_alloc_rma(void);
-extern void kvm_release_rma(struct kvm_rma_info *ri);
extern struct page *kvm_alloc_hpt(unsigned long nr_pages);
extern void kvm_release_hpt(struct page *page, unsigned long nr_pages);
extern int kvmppc_core_init_vm(struct kvm *kvm);
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h
index 5cd8d2fddba..eb95b675109 100644
--- a/arch/powerpc/include/asm/opal.h
+++ b/arch/powerpc/include/asm/opal.h
@@ -56,6 +56,14 @@ struct opal_sg_list {
#define OPAL_HARDWARE_FROZEN -13
#define OPAL_WRONG_STATE -14
#define OPAL_ASYNC_COMPLETION -15
+#define OPAL_I2C_TIMEOUT -17
+#define OPAL_I2C_INVALID_CMD -18
+#define OPAL_I2C_LBUS_PARITY -19
+#define OPAL_I2C_BKEND_OVERRUN -20
+#define OPAL_I2C_BKEND_ACCESS -21
+#define OPAL_I2C_ARBT_LOST -22
+#define OPAL_I2C_NACK_RCVD -23
+#define OPAL_I2C_STOP_ERR -24
/* API Tokens (in r0) */
#define OPAL_INVALID_CALL -1
@@ -152,12 +160,25 @@ struct opal_sg_list {
#define OPAL_PCI_ERR_INJECT 96
#define OPAL_PCI_EEH_FREEZE_SET 97
#define OPAL_HANDLE_HMI 98
+#define OPAL_CONFIG_CPU_IDLE_STATE 99
+#define OPAL_SLW_SET_REG 100
#define OPAL_REGISTER_DUMP_REGION 101
#define OPAL_UNREGISTER_DUMP_REGION 102
#define OPAL_WRITE_TPO 103
#define OPAL_READ_TPO 104
#define OPAL_IPMI_SEND 107
#define OPAL_IPMI_RECV 108
+#define OPAL_I2C_REQUEST 109
+
+/* Device tree flags */
+
+/* Flags set in power-mgmt nodes in device tree if
+ * respective idle states are supported in the platform.
+ */
+#define OPAL_PM_NAP_ENABLED 0x00010000
+#define OPAL_PM_SLEEP_ENABLED 0x00020000
+#define OPAL_PM_WINKLE_ENABLED 0x00040000
+#define OPAL_PM_SLEEP_ENABLED_ER1 0x00080000
#ifndef __ASSEMBLY__
@@ -712,6 +733,24 @@ typedef struct oppanel_line {
uint64_t line_len;
} oppanel_line_t;
+/* OPAL I2C request */
+struct opal_i2c_request {
+ uint8_t type;
+#define OPAL_I2C_RAW_READ 0
+#define OPAL_I2C_RAW_WRITE 1
+#define OPAL_I2C_SM_READ 2
+#define OPAL_I2C_SM_WRITE 3
+ uint8_t flags;
+#define OPAL_I2C_ADDR_10 0x01 /* Not supported yet */
+ uint8_t subaddr_sz; /* Max 4 */
+ uint8_t reserved;
+ __be16 addr; /* 7 or 10 bit address */
+ __be16 reserved2;
+ __be32 subaddr; /* Sub-address if any */
+ __be32 size; /* Data size */
+ __be64 buffer_ra; /* Buffer real address */
+};
+
/* /sys/firmware/opal */
extern struct kobject *opal_kobj;
@@ -876,11 +915,14 @@ int64_t opal_sensor_read(uint32_t sensor_hndl, int token, __be32 *sensor_data);
int64_t opal_handle_hmi(void);
int64_t opal_register_dump_region(uint32_t id, uint64_t start, uint64_t end);
int64_t opal_unregister_dump_region(uint32_t id);
+int64_t opal_slw_set_reg(uint64_t cpu_pir, uint64_t sprn, uint64_t val);
int64_t opal_pci_set_phb_cxl_mode(uint64_t phb_id, uint64_t mode, uint64_t pe_number);
int64_t opal_ipmi_send(uint64_t interface, struct opal_ipmi_msg *msg,
uint64_t msg_len);
int64_t opal_ipmi_recv(uint64_t interface, struct opal_ipmi_msg *msg,
uint64_t *msg_len);
+int64_t opal_i2c_request(uint64_t async_token, uint32_t bus_id,
+ struct opal_i2c_request *oreq);
/* Internal functions */
extern int early_init_dt_scan_opal(unsigned long node, const char *uname,
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index 24a386cbb92..e5f22c6c4bf 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -152,6 +152,16 @@ struct paca_struct {
u64 tm_scratch; /* TM scratch area for reclaim */
#endif
+#ifdef CONFIG_PPC_POWERNV
+ /* Per-core mask tracking idle threads and a lock bit-[L][TTTTTTTT] */
+ u32 *core_idle_state_ptr;
+ u8 thread_idle_state; /* PNV_THREAD_RUNNING/NAP/SLEEP */
+ /* Mask to indicate thread id in core */
+ u8 thread_mask;
+ /* Mask to denote subcore sibling threads */
+ u8 subcore_sibling_mask;
+#endif
+
#ifdef CONFIG_PPC_BOOK3S_64
/* Exclusive emergency stack pointer for machine check exception. */
void *mc_emergency_sp;
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index 1a5287759fc..03cd858a401 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -194,6 +194,7 @@
#define PPC_INST_NAP 0x4c000364
#define PPC_INST_SLEEP 0x4c0003a4
+#define PPC_INST_WINKLE 0x4c0003e4
/* A2 specific instructions */
#define PPC_INST_ERATWE 0x7c0001a6
@@ -375,6 +376,7 @@
#define PPC_NAP stringify_in_c(.long PPC_INST_NAP)
#define PPC_SLEEP stringify_in_c(.long PPC_INST_SLEEP)
+#define PPC_WINKLE stringify_in_c(.long PPC_INST_WINKLE)
/* BHRB instructions */
#define PPC_CLRBHRB stringify_in_c(.long PPC_INST_CLRBHRB)
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index 29c3798cf80..bf117d8fb45 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -452,7 +452,8 @@ enum idle_boot_override {IDLE_NO_OVERRIDE = 0, IDLE_POWERSAVE_OFF};
extern int powersave_nap; /* set if nap mode can be used in idle loop */
extern unsigned long power7_nap(int check_irq);
-extern void power7_sleep(void);
+extern unsigned long power7_sleep(void);
+extern unsigned long power7_winkle(void);
extern void flush_instruction_cache(void);
extern void hard_reset_now(void);
extern void poweroff_now(void);
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index c998279bd85..1c874fb533b 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -118,8 +118,10 @@
#define __MSR (MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_ISF |MSR_HV)
#ifdef __BIG_ENDIAN__
#define MSR_ __MSR
+#define MSR_IDLE (MSR_ME | MSR_SF | MSR_HV)
#else
#define MSR_ (__MSR | MSR_LE)
+#define MSR_IDLE (MSR_ME | MSR_SF | MSR_HV | MSR_LE)
#endif
#define MSR_KERNEL (MSR_ | MSR_64BIT)
#define MSR_USER32 (MSR_ | MSR_PR | MSR_EE)
@@ -371,6 +373,7 @@
#define SPRN_DBAT7L 0x23F /* Data BAT 7 Lower Register */
#define SPRN_DBAT7U 0x23E /* Data BAT 7 Upper Register */
#define SPRN_PPR 0x380 /* SMT Thread status Register */
+#define SPRN_TSCR 0x399 /* Thread Switch Control Register */
#define SPRN_DEC 0x016 /* Decrement Register */
#define SPRN_DER 0x095 /* Debug Enable Regsiter */
@@ -728,6 +731,7 @@
#define SPRN_BESCR 806 /* Branch event status and control register */
#define BESCR_GE 0x8000000000000000ULL /* Global Enable */
#define SPRN_WORT 895 /* Workload optimization register - thread */
+#define SPRN_WORC 863 /* Workload optimization register - core */
#define SPRN_PMC1 787
#define SPRN_PMC2 788
diff --git a/arch/powerpc/include/asm/syscall.h b/arch/powerpc/include/asm/syscall.h
index 6240698fee9..ff21b7a2f0c 100644
--- a/arch/powerpc/include/asm/syscall.h
+++ b/arch/powerpc/include/asm/syscall.h
@@ -90,6 +90,10 @@ static inline void syscall_set_arguments(struct task_struct *task,
static inline int syscall_get_arch(void)
{
- return is_32bit_task() ? AUDIT_ARCH_PPC : AUDIT_ARCH_PPC64;
+ int arch = is_32bit_task() ? AUDIT_ARCH_PPC : AUDIT_ARCH_PPC64;
+#ifdef __LITTLE_ENDIAN__
+ arch |= __AUDIT_ARCH_LE;
+#endif
+ return arch;
}
#endif /* _ASM_SYSCALL_H */
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index 9485b43a7c0..a0c071d24e0 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -284,7 +284,7 @@ do { \
if (!is_kernel_addr((unsigned long)__gu_addr)) \
might_fault(); \
__get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
- (x) = (__typeof__(*(ptr)))__gu_val; \
+ (x) = (__force __typeof__(*(ptr)))__gu_val; \
__gu_err; \
})
#endif /* __powerpc64__ */
@@ -297,7 +297,7 @@ do { \
might_fault(); \
if (access_ok(VERIFY_READ, __gu_addr, (size))) \
__get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
- (x) = (__typeof__(*(ptr)))__gu_val; \
+ (x) = (__force __typeof__(*(ptr)))__gu_val; \
__gu_err; \
})
@@ -308,7 +308,7 @@ do { \
const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
__chk_user_ptr(ptr); \
__get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
- (x) = (__typeof__(*(ptr)))__gu_val; \
+ (x) = (__force __typeof__(*(ptr)))__gu_val; \
__gu_err; \
})
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index c161ef3f28a..e624f964635 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -489,7 +489,6 @@ int main(void)
DEFINE(KVM_HOST_LPID, offsetof(struct kvm, arch.host_lpid));
DEFINE(KVM_HOST_LPCR, offsetof(struct kvm, arch.host_lpcr));
DEFINE(KVM_HOST_SDR1, offsetof(struct kvm, arch.host_sdr1));
- DEFINE(KVM_TLBIE_LOCK, offsetof(struct kvm, arch.tlbie_lock));
DEFINE(KVM_NEED_FLUSH, offsetof(struct kvm, arch.need_tlb_flush.bits));
DEFINE(KVM_ENABLED_HCALLS, offsetof(struct kvm, arch.enabled_hcalls));
DEFINE(KVM_LPCR, offsetof(struct kvm, arch.lpcr));
@@ -499,6 +498,7 @@ int main(void)
DEFINE(VCPU_DAR, offsetof(struct kvm_vcpu, arch.shregs.dar));
DEFINE(VCPU_VPA, offsetof(struct kvm_vcpu, arch.vpa.pinned_addr));
DEFINE(VCPU_VPA_DIRTY, offsetof(struct kvm_vcpu, arch.vpa.dirty));
+ DEFINE(VCPU_HEIR, offsetof(struct kvm_vcpu, arch.emul_inst));
#endif
#ifdef CONFIG_PPC_BOOK3S
DEFINE(VCPU_VCPUID, offsetof(struct kvm_vcpu, vcpu_id));
@@ -726,5 +726,16 @@ int main(void)
arch.timing_last_enter.tv32.tbl));
#endif
+#ifdef CONFIG_PPC_POWERNV
+ DEFINE(PACA_CORE_IDLE_STATE_PTR,
+ offsetof(struct paca_struct, core_idle_state_ptr));
+ DEFINE(PACA_THREAD_IDLE_STATE,
+ offsetof(struct paca_struct, thread_idle_state));
+ DEFINE(PACA_THREAD_MASK,
+ offsetof(struct paca_struct, thread_mask));
+ DEFINE(PACA_SUBCORE_SIBLING_MASK,
+ offsetof(struct paca_struct, subcore_sibling_mask));
+#endif
+
return 0;
}
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index db08382e19f..c2df8150bd7 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -15,6 +15,7 @@
#include <asm/hw_irq.h>
#include <asm/exception-64s.h>
#include <asm/ptrace.h>
+#include <asm/cpuidle.h>
/*
* We layout physical memory as follows:
@@ -101,23 +102,34 @@ system_reset_pSeries:
#ifdef CONFIG_PPC_P7_NAP
BEGIN_FTR_SECTION
/* Running native on arch 2.06 or later, check if we are
- * waking up from nap. We only handle no state loss and
- * supervisor state loss. We do -not- handle hypervisor
- * state loss at this time.
+ * waking up from nap/sleep/winkle.
*/
mfspr r13,SPRN_SRR1
rlwinm. r13,r13,47-31,30,31
beq 9f
- /* waking up from powersave (nap) state */
- cmpwi cr1,r13,2
- /* Total loss of HV state is fatal, we could try to use the
- * PIR to locate a PACA, then use an emergency stack etc...
- * OPAL v3 based powernv platforms have new idle states
- * which fall in this catagory.
+ cmpwi cr3,r13,2
+
+ /*
+ * Check if last bit of HSPGR0 is set. This indicates whether we are
+ * waking up from winkle.
*/
- bgt cr1,8f
GET_PACA(r13)
+ clrldi r5,r13,63
+ clrrdi r13,r13,1
+ cmpwi cr4,r5,1
+ mtspr SPRN_HSPRG0,r13
+
+ lbz r0,PACA_THREAD_IDLE_STATE(r13)
+ cmpwi cr2,r0,PNV_THREAD_NAP
+ bgt cr2,8f /* Either sleep or Winkle */
+
+ /* Waking up from nap should not cause hypervisor state loss */
+ bgt cr3,.
+
+ /* Waking up from nap */
+ li r0,PNV_THREAD_RUNNING
+ stb r0,PACA_THREAD_IDLE_STATE(r13) /* Clear thread state */
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
li r0,KVM_HWTHREAD_IN_KERNEL
@@ -133,7 +145,7 @@ BEGIN_FTR_SECTION
/* Return SRR1 from power7_nap() */
mfspr r3,SPRN_SRR1
- beq cr1,2f
+ beq cr3,2f
b power7_wakeup_noloss
2: b power7_wakeup_loss
@@ -1382,6 +1394,7 @@ machine_check_handle_early:
MACHINE_CHECK_HANDLER_WINDUP
GET_PACA(r13)
ld r1,PACAR1(r13)
+ li r3,PNV_THREAD_NAP
b power7_enter_nap_mode
4:
#endif
diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S
index 18c0687e5ab..05adc8bbdef 100644
--- a/arch/powerpc/kernel/idle_power7.S
+++ b/arch/powerpc/kernel/idle_power7.S
@@ -18,9 +18,25 @@
#include <asm/hw_irq.h>
#include <asm/kvm_book3s_asm.h>
#include <asm/opal.h>
+#include <asm/cpuidle.h>
+#include <asm/mmu-hash64.h>
#undef DEBUG
+/*
+ * Use unused space in the interrupt stack to save and restore
+ * registers for winkle support.
+ */
+#define _SDR1 GPR3
+#define _RPR GPR4
+#define _SPURR GPR5
+#define _PURR GPR6
+#define _TSCR GPR7
+#define _DSCR GPR8
+#define _AMOR GPR9
+#define _WORT GPR10
+#define _WORC GPR11
+
/* Idle state entry routines */
#define IDLE_STATE_ENTER_SEQ(IDLE_INST) \
@@ -37,8 +53,7 @@
/*
* Pass requested state in r3:
- * 0 - nap
- * 1 - sleep
+ * r3 - PNV_THREAD_NAP/SLEEP/WINKLE
*
* To check IRQ_HAPPENED in r4
* 0 - don't check
@@ -101,18 +116,105 @@ _GLOBAL(power7_powersave_common)
std r9,_MSR(r1)
std r1,PACAR1(r13)
-_GLOBAL(power7_enter_nap_mode)
+ /*
+ * Go to real mode to do the nap, as required by the architecture.
+ * Also, we need to be in real mode before setting hwthread_state,
+ * because as soon as we do that, another thread can switch
+ * the MMU context to the guest.
+ */
+ LOAD_REG_IMMEDIATE(r5, MSR_IDLE)
+ li r6, MSR_RI
+ andc r6, r9, r6
+ LOAD_REG_ADDR(r7, power7_enter_nap_mode)
+ mtmsrd r6, 1 /* clear RI before setting SRR0/1 */
+ mtspr SPRN_SRR0, r7
+ mtspr SPRN_SRR1, r5
+ rfid
+
+ .globl power7_enter_nap_mode
+power7_enter_nap_mode:
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
/* Tell KVM we're napping */
li r4,KVM_HWTHREAD_IN_NAP
stb r4,HSTATE_HWTHREAD_STATE(r13)
#endif
- cmpwi cr0,r3,1
- beq 2f
+ stb r3,PACA_THREAD_IDLE_STATE(r13)
+ cmpwi cr3,r3,PNV_THREAD_SLEEP
+ bge cr3,2f
IDLE_STATE_ENTER_SEQ(PPC_NAP)
/* No return */
-2: IDLE_STATE_ENTER_SEQ(PPC_SLEEP)
- /* No return */
+2:
+ /* Sleep or winkle */
+ lbz r7,PACA_THREAD_MASK(r13)
+ ld r14,PACA_CORE_IDLE_STATE_PTR(r13)
+lwarx_loop1:
+ lwarx r15,0,r14
+ andc r15,r15,r7 /* Clear thread bit */
+
+ andi. r15,r15,PNV_CORE_IDLE_THREAD_BITS
+
+/*
+ * If cr0 = 0, then current thread is the last thread of the core entering
+ * sleep. Last thread needs to execute the hardware bug workaround code if
+ * required by the platform.
+ * Make the workaround call unconditionally here. The below branch call is
+ * patched out when the idle states are discovered if the platform does not
+ * require it.
+ */
+.global pnv_fastsleep_workaround_at_entry
+pnv_fastsleep_workaround_at_entry:
+ beq fastsleep_workaround_at_entry
+
+ stwcx. r15,0,r14
+ bne- lwarx_loop1
+ isync
+
+common_enter: /* common code for all the threads entering sleep or winkle */
+ bgt cr3,enter_winkle
+ IDLE_STATE_ENTER_SEQ(PPC_SLEEP)
+
+fastsleep_workaround_at_entry:
+ ori r15,r15,PNV_CORE_IDLE_LOCK_BIT
+ stwcx. r15,0,r14
+ bne- lwarx_loop1
+ isync
+
+ /* Fast sleep workaround */
+ li r3,1
+ li r4,1
+ li r0,OPAL_CONFIG_CPU_IDLE_STATE
+ bl opal_call_realmode
+
+ /* Clear Lock bit */
+ li r0,0
+ lwsync
+ stw r0,0(r14)
+ b common_enter
+
+enter_winkle:
+ /*
+ * Note all register i.e per-core, per-subcore or per-thread is saved
+ * here since any thread in the core might wake up first
+ */
+ mfspr r3,SPRN_SDR1
+ std r3,_SDR1(r1)
+ mfspr r3,SPRN_RPR
+ std r3,_RPR(r1)
+ mfspr r3,SPRN_SPURR
+ std r3,_SPURR(r1)
+ mfspr r3,SPRN_PURR
+ std r3,_PURR(r1)
+ mfspr r3,SPRN_TSCR
+ std r3,_TSCR(r1)
+ mfspr r3,SPRN_DSCR
+ std r3,_DSCR(r1)
+ mfspr r3,SPRN_AMOR
+ std r3,_AMOR(r1)
+ mfspr r3,SPRN_WORT
+ std r3,_WORT(r1)
+ mfspr r3,SPRN_WORC
+ std r3,_WORC(r1)
+ IDLE_STATE_ENTER_SEQ(PPC_WINKLE)
_GLOBAL(power7_idle)
/* Now check if user or arch enabled NAP mode */
@@ -125,48 +227,21 @@ _GLOBAL(power7_idle)
_GLOBAL(power7_nap)
mr r4,r3
- li r3,0
+ li r3,PNV_THREAD_NAP
b power7_powersave_common
/* No return */
_GLOBAL(power7_sleep)
- li r3,1
+ li r3,PNV_THREAD_SLEEP
li r4,1
b power7_powersave_common
/* No return */
-/*
- * Make opal call in realmode. This is a generic function to be called
- * from realmode from reset vector. It handles endianess.
- *
- * r13 - paca pointer
- * r1 - stack pointer
- * r3 - opal token
- */
-opal_call_realmode:
- mflr r12
- std r12,_LINK(r1)
- ld r2,PACATOC(r13)
- /* Set opal return address */
- LOAD_REG_ADDR(r0,return_from_opal_call)
- mtlr r0
- /* Handle endian-ness */
- li r0,MSR_LE
- mfmsr r12
- andc r12,r12,r0
- mtspr SPRN_HSRR1,r12
- mr r0,r3 /* Move opal token to r0 */
- LOAD_REG_ADDR(r11,opal)
- ld r12,8(r11)
- ld r2,0(r11)
- mtspr SPRN_HSRR0,r12
- hrfid
-
-return_from_opal_call:
- FIXUP_ENDIAN
- ld r0,_LINK(r1)
- mtlr r0
- blr
+_GLOBAL(power7_winkle)
+ li r3,3
+ li r4,1
+ b power7_powersave_common
+ /* No return */
#define CHECK_HMI_INTERRUPT \
mfspr r0,SPRN_SRR1; \
@@ -181,7 +256,7 @@ ALT_FTR_SECTION_END_NESTED_IFSET(CPU_FTR_ARCH_207S, 66); \
ld r2,PACATOC(r13); \
ld r1,PACAR1(r13); \
std r3,ORIG_GPR3(r1); /* Save original r3 */ \
- li r3,OPAL_HANDLE_HMI; /* Pass opal token argument*/ \
+ li r0,OPAL_HANDLE_HMI; /* Pass opal token argument*/ \
bl opal_call_realmode; \
ld r3,ORIG_GPR3(r1); /* Restore original r3 */ \
20: nop;
@@ -190,16 +265,190 @@ ALT_FTR_SECTION_END_NESTED_IFSET(CPU_FTR_ARCH_207S, 66); \
_GLOBAL(power7_wakeup_tb_loss)
ld r2,PACATOC(r13);
ld r1,PACAR1(r13)
+ /*
+ * Before entering any idle state, the NVGPRs are saved in the stack
+ * and they are restored before switching to the process context. Hence
+ * until they are restored, they are free to be used.
+ *
+ * Save SRR1 in a NVGPR as it might be clobbered in opal_call_realmode
+ * (called in CHECK_HMI_INTERRUPT). SRR1 is required to determine the
+ * wakeup reason if we branch to kvm_start_guest.
+ */
+ mfspr r16,SPRN_SRR1
BEGIN_FTR_SECTION
CHECK_HMI_INTERRUPT
END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
+
+ lbz r7,PACA_THREAD_MASK(r13)
+ ld r14,PACA_CORE_IDLE_STATE_PTR(r13)
+lwarx_loop2:
+ lwarx r15,0,r14
+ andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT
+ /*
+ * Lock bit is set in one of the 2 cases-
+ * a. In the sleep/winkle enter path, the last thread is executing
+ * fastsleep workaround code.
+ * b. In the wake up path, another thread is executing fastsleep
+ * workaround undo code or resyncing timebase or restoring context
+ * In either case loop until the lock bit is cleared.
+ */
+ bne core_idle_lock_held
+
+ cmpwi cr2,r15,0
+ lbz r4,PACA_SUBCORE_SIBLING_MASK(r13)
+ and r4,r4,r15
+ cmpwi cr1,r4,0 /* Check if first in subcore */
+
+ /*
+ * At this stage
+ * cr1 - 0b0100 if first thread to wakeup in subcore
+ * cr2 - 0b0100 if first thread to wakeup in core
+ * cr3- 0b0010 if waking up from sleep or winkle
+ * cr4 - 0b0100 if waking up from winkle
+ */
+
+ or r15,r15,r7 /* Set thread bit */
+
+ beq cr1,first_thread_in_subcore
+
+ /* Not first thread in subcore to wake up */
+ stwcx. r15,0,r14
+ bne- lwarx_loop2
+ isync
+ b common_exit
+
+core_idle_lock_held:
+ HMT_LOW
+core_idle_lock_loop:
+ lwz r15,0(14)
+ andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT
+ bne core_idle_lock_loop
+ HMT_MEDIUM
+ b lwarx_loop2
+
+first_thread_in_subcore:
+ /* First thread in subcore to wakeup */
+ ori r15,r15,PNV_CORE_IDLE_LOCK_BIT
+ stwcx. r15,0,r14
+ bne- lwarx_loop2
+ isync
+
+ /*
+ * If waking up from sleep, subcore state is not lost. Hence
+ * skip subcore state restore
+ */
+ bne cr4,subcore_state_restored
+
+ /* Restore per-subcore state */
+ ld r4,_SDR1(r1)
+ mtspr SPRN_SDR1,r4
+ ld r4,_RPR(r1)
+ mtspr SPRN_RPR,r4
+ ld r4,_AMOR(r1)
+ mtspr SPRN_AMOR,r4
+
+subcore_state_restored:
+ /*
+ * Check if the thread is also the first thread in the core. If not,
+ * skip to clear_lock.
+ */
+ bne cr2,clear_lock
+
+first_thread_in_core:
+
+ /*
+ * First thread in the core waking up from fastsleep. It needs to
+ * call the fastsleep workaround code if the platform requires it.
+ * Call it unconditionally here. The below branch instruction will
+ * be patched out when the idle states are discovered if platform
+ * does not require workaround.
+ */
+.global pnv_fastsleep_workaround_at_exit
+pnv_fastsleep_workaround_at_exit:
+ b fastsleep_workaround_at_exit
+
+timebase_resync:
+ /* Do timebase resync if we are waking up from sleep. Use cr3 value
+ * set in exceptions-64s.S */
+ ble cr3,clear_lock
/* Time base re-sync */
- li r3,OPAL_RESYNC_TIMEBASE
+ li r0,OPAL_RESYNC_TIMEBASE
bl opal_call_realmode;
-
/* TODO: Check r3 for failure */
+ /*
+ * If waking up from sleep, per core state is not lost, skip to
+ * clear_lock.
+ */
+ bne cr4,clear_lock
+
+ /* Restore per core state */
+ ld r4,_TSCR(r1)
+ mtspr SPRN_TSCR,r4
+ ld r4,_WORC(r1)
+ mtspr SPRN_WORC,r4
+
+clear_lock:
+ andi. r15,r15,PNV_CORE_IDLE_THREAD_BITS
+ lwsync
+ stw r15,0(r14)
+
+common_exit:
+ /*
+ * Common to all threads.
+ *
+ * If waking up from sleep, hypervisor state is not lost. Hence
+ * skip hypervisor state restore.
+ */
+ bne cr4,hypervisor_state_restored
+
+ /* Waking up from winkle */
+
+ /* Restore per thread state */
+ bl __restore_cpu_power8
+
+ /* Restore SLB from PACA */
+ ld r8,PACA_SLBSHADOWPTR(r13)
+
+ .rept SLB_NUM_BOLTED
+ li r3, SLBSHADOW_SAVEAREA
+ LDX_BE r5, r8, r3
+ addi r3, r3, 8
+ LDX_BE r6, r8, r3
+ andis. r7,r5,SLB_ESID_V@h
+ beq 1f
+ slbmte r6,r5
+1: addi r8,r8,16
+ .endr
+
+ ld r4,_SPURR(r1)
+ mtspr SPRN_SPURR,r4
+ ld r4,_PURR(r1)
+ mtspr SPRN_PURR,r4
+ ld r4,_DSCR(r1)
+ mtspr SPRN_DSCR,r4
+ ld r4,_WORT(r1)
+ mtspr SPRN_WORT,r4
+
+hypervisor_state_restored:
+
+ li r5,PNV_THREAD_RUNNING
+ stb r5,PACA_THREAD_IDLE_STATE(r13)
+
+ mtspr SPRN_SRR1,r16
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+ li r0,KVM_HWTHREAD_IN_KERNEL
+ stb r0,HSTATE_HWTHREAD_STATE(r13)
+ /* Order setting hwthread_state vs. testing hwthread_req */
+ sync
+ lbz r0,HSTATE_HWTHREAD_REQ(r13)
+ cmpwi r0,0
+ beq 6f
+ b kvm_start_guest
+6:
+#endif
+
REST_NVGPRS(r1)
REST_GPR(2, r1)
ld r3,_CCR(r1)
@@ -212,6 +461,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
mtspr SPRN_SRR0,r5
rfid
+fastsleep_workaround_at_exit:
+ li r3,1
+ li r4,0
+ li r0,OPAL_CONFIG_CPU_IDLE_STATE
+ bl opal_call_realmode
+ b timebase_resync
+
/*
* R3 here contains the value that will be returned to the caller
* of power7_nap.
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 8b2d2dc8ef1..8ec017cb444 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -700,7 +700,6 @@ void start_secondary(void *unused)
smp_store_cpu_info(cpu);
set_dec(tb_ticks_per_jiffy);
preempt_disable();
- cpu_callin_map[cpu] = 1;
if (smp_ops->setup_cpu)
smp_ops->setup_cpu(cpu);
@@ -739,6 +738,14 @@ void start_secondary(void *unused)
notify_cpu_starting(cpu);
set_cpu_online(cpu, true);
+ /*
+ * CPU must be marked active and online before we signal back to the
+ * master, because the scheduler needs to see the cpu_online and
+ * cpu_active bits set.
+ */
+ smp_wmb();
+ cpu_callin_map[cpu] = 1;
+
local_irq_enable();
cpu_startup_entry(CPUHP_ONLINE);
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
index 602eb51d20b..f5769f19ae2 100644
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
@@ -172,6 +172,7 @@ config KVM_XICS
depends on KVM_BOOK3S_64 && !KVM_MPIC
select HAVE_KVM_IRQCHIP
select HAVE_KVM_IRQFD
+ default y
---help---
Include support for the XICS (eXternal Interrupt Controller
Specification) interrupt controller architecture used on
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index b32db4b9536..888bf466d8c 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -64,14 +64,6 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ NULL }
};
-void kvmppc_core_load_host_debugstate(struct kvm_vcpu *vcpu)
-{
-}
-
-void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu)
-{
-}
-
void kvmppc_unfixup_split_real(struct kvm_vcpu *vcpu)
{
if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) {
diff --git a/arch/powerpc/kvm/book3s_32_mmu.c b/arch/powerpc/kvm/book3s_32_mmu.c
index cd0b0730e29..a2eb6d354a5 100644
--- a/arch/powerpc/kvm/book3s_32_mmu.c
+++ b/arch/powerpc/kvm/book3s_32_mmu.c
@@ -78,11 +78,6 @@ static inline bool sr_kp(u32 sr_raw)
return (sr_raw & 0x20000000) ? true: false;
}
-static inline bool sr_nx(u32 sr_raw)
-{
- return (sr_raw & 0x10000000) ? true: false;
-}
-
static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr,
struct kvmppc_pte *pte, bool data,
bool iswrite);
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index d40770248b6..534acb3c6c3 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -37,8 +37,7 @@
#include <asm/ppc-opcode.h>
#include <asm/cputable.h>
-/* POWER7 has 10-bit LPIDs, PPC970 has 6-bit LPIDs */
-#define MAX_LPID_970 63
+#include "trace_hv.h"
/* Power architecture requires HPT is at least 256kB */
#define PPC_MIN_HPT_ORDER 18
@@ -229,14 +228,9 @@ int kvmppc_mmu_hv_init(void)
if (!cpu_has_feature(CPU_FTR_HVMODE))
return -EINVAL;
- /* POWER7 has 10-bit LPIDs, PPC970 and e500mc have 6-bit LPIDs */
- if (cpu_has_feature(CPU_FTR_ARCH_206)) {
- host_lpid = mfspr(SPRN_LPID); /* POWER7 */
- rsvd_lpid = LPID_RSVD;
- } else {
- host_lpid = 0; /* PPC970 */
- rsvd_lpid = MAX_LPID_970;
- }
+ /* POWER7 has 10-bit LPIDs (12-bit in POWER8) */
+ host_lpid = mfspr(SPRN_LPID);
+ rsvd_lpid = LPID_RSVD;
kvmppc_init_lpid(rsvd_lpid + 1);
@@ -259,130 +253,12 @@ static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu)
kvmppc_set_msr(vcpu, msr);
}
-/*
- * This is called to get a reference to a guest page if there isn't
- * one already in the memslot->arch.slot_phys[] array.
- */
-static long kvmppc_get_guest_page(struct kvm *kvm, unsigned long gfn,
- struct kvm_memory_slot *memslot,
- unsigned long psize)
-{
- unsigned long start;
- long np, err;
- struct page *page, *hpage, *pages[1];
- unsigned long s, pgsize;
- unsigned long *physp;
- unsigned int is_io, got, pgorder;
- struct vm_area_struct *vma;
- unsigned long pfn, i, npages;
-
- physp = memslot->arch.slot_phys;
- if (!physp)
- return -EINVAL;
- if (physp[gfn - memslot->base_gfn])
- return 0;
-
- is_io = 0;
- got = 0;
- page = NULL;
- pgsize = psize;
- err = -EINVAL;
- start = gfn_to_hva_memslot(memslot, gfn);
-
- /* Instantiate and get the page we want access to */
- np = get_user_pages_fast(start, 1, 1, pages);
- if (np != 1) {
- /* Look up the vma for the page */
- down_read(&current->mm->mmap_sem);
- vma = find_vma(current->mm, start);
- if (!vma || vma->vm_start > start ||
- start + psize > vma->vm_end ||
- !(vma->vm_flags & VM_PFNMAP))
- goto up_err;
- is_io = hpte_cache_bits(pgprot_val(vma->vm_page_prot));
- pfn = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
- /* check alignment of pfn vs. requested page size */
- if (psize > PAGE_SIZE && (pfn & ((psize >> PAGE_SHIFT) - 1)))
- goto up_err;
- up_read(&current->mm->mmap_sem);
-
- } else {
- page = pages[0];
- got = KVMPPC_GOT_PAGE;
-
- /* See if this is a large page */
- s = PAGE_SIZE;
- if (PageHuge(page)) {
- hpage = compound_head(page);
- s <<= compound_order(hpage);
- /* Get the whole large page if slot alignment is ok */
- if (s > psize && slot_is_aligned(memslot, s) &&
- !(memslot->userspace_addr & (s - 1))) {
- start &= ~(s - 1);
- pgsize = s;
- get_page(hpage);
- put_page(page);
- page = hpage;
- }
- }
- if (s < psize)
- goto out;
- pfn = page_to_pfn(page);
- }
-
- npages = pgsize >> PAGE_SHIFT;
- pgorder = __ilog2(npages);
- physp += (gfn - memslot->base_gfn) & ~(npages - 1);
- spin_lock(&kvm->arch.slot_phys_lock);
- for (i = 0; i < npages; ++i) {
- if (!physp[i]) {
- physp[i] = ((pfn + i) << PAGE_SHIFT) +
- got + is_io + pgorder;
- got = 0;
- }
- }
- spin_unlock(&kvm->arch.slot_phys_lock);
- err = 0;
-
- out:
- if (got)
- put_page(page);
- return err;
-
- up_err:
- up_read(&current->mm->mmap_sem);
- return err;
-}
-
long kvmppc_virtmode_do_h_enter(struct kvm *kvm, unsigned long flags,
long pte_index, unsigned long pteh,
unsigned long ptel, unsigned long *pte_idx_ret)
{
- unsigned long psize, gpa, gfn;
- struct kvm_memory_slot *memslot;
long ret;
- if (kvm->arch.using_mmu_notifiers)
- goto do_insert;
-
- psize = hpte_page_size(pteh, ptel);
- if (!psize)
- return H_PARAMETER;
-
- pteh &= ~(HPTE_V_HVLOCK | HPTE_V_ABSENT | HPTE_V_VALID);
-
- /* Find the memslot (if any) for this address */
- gpa = (ptel & HPTE_R_RPN) & ~(psize - 1);
- gfn = gpa >> PAGE_SHIFT;
- memslot = gfn_to_memslot(kvm, gfn);
- if (memslot && !(memslot->flags & KVM_MEMSLOT_INVALID)) {
- if (!slot_is_aligned(memslot, psize))
- return H_PARAMETER;
- if (kvmppc_get_guest_page(kvm, gfn, memslot, psize) < 0)
- return H_PARAMETER;
- }
-
- do_insert:
/* Protect linux PTE lookup from page table destruction */
rcu_read_lock_sched(); /* this disables preemption too */
ret = kvmppc_do_h_enter(kvm, flags, pte_index, pteh, ptel,
@@ -397,19 +273,6 @@ long kvmppc_virtmode_do_h_enter(struct kvm *kvm, unsigned long flags,
}
-/*
- * We come here on a H_ENTER call from the guest when we are not
- * using mmu notifiers and we don't have the requested page pinned
- * already.
- */
-long kvmppc_virtmode_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
- long pte_index, unsigned long pteh,
- unsigned long ptel)
-{
- return kvmppc_virtmode_do_h_enter(vcpu->kvm, flags, pte_index,
- pteh, ptel, &vcpu->arch.gpr[4]);
-}
-
static struct kvmppc_slb *kvmppc_mmu_book3s_hv_find_slbe(struct kvm_vcpu *vcpu,
gva_t eaddr)
{
@@ -494,7 +357,7 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
gpte->may_execute = gpte->may_read && !(gr & (HPTE_R_N | HPTE_R_G));
/* Storage key permission check for POWER7 */
- if (data && virtmode && cpu_has_feature(CPU_FTR_ARCH_206)) {
+ if (data && virtmode) {
int amrfield = hpte_get_skey_perm(gr, vcpu->arch.amr);
if (amrfield & 1)
gpte->may_read = 0;
@@ -622,14 +485,13 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
gfn = gpa >> PAGE_SHIFT;
memslot = gfn_to_memslot(kvm, gfn);
+ trace_kvm_page_fault_enter(vcpu, hpte, memslot, ea, dsisr);
+
/* No memslot means it's an emulated MMIO region */
if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea,
dsisr & DSISR_ISSTORE);
- if (!kvm->arch.using_mmu_notifiers)
- return -EFAULT; /* should never get here */
-
/*
* This should never happen, because of the slot_is_aligned()
* check in kvmppc_do_h_enter().
@@ -641,6 +503,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
mmu_seq = kvm->mmu_notifier_seq;
smp_rmb();
+ ret = -EFAULT;
is_io = 0;
pfn = 0;
page = NULL;
@@ -664,7 +527,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
}
up_read(&current->mm->mmap_sem);
if (!pfn)
- return -EFAULT;
+ goto out_put;
} else {
page = pages[0];
pfn = page_to_pfn(page);
@@ -694,14 +557,14 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
}
}
- ret = -EFAULT;
if (psize > pte_size)
goto out_put;
/* Check WIMG vs. the actual page we're accessing */
if (!hpte_cache_flags_ok(r, is_io)) {
if (is_io)
- return -EFAULT;
+ goto out_put;
+
/*
* Allow guest to map emulated device memory as
* uncacheable, but actually make it cacheable.
@@ -765,6 +628,8 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
SetPageDirty(page);
out_put:
+ trace_kvm_page_fault_exit(vcpu, hpte, ret);
+
if (page) {
/*
* We drop pages[0] here, not page because page might
@@ -895,8 +760,7 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
psize = hpte_page_size(be64_to_cpu(hptep[0]), ptel);
if ((be64_to_cpu(hptep[0]) & HPTE_V_VALID) &&
hpte_rpn(ptel, psize) == gfn) {
- if (kvm->arch.using_mmu_notifiers)
- hptep[0] |= cpu_to_be64(HPTE_V_ABSENT);
+ hptep[0] |= cpu_to_be64(HPTE_V_ABSENT);
kvmppc_invalidate_hpte(kvm, hptep, i);
/* Harvest R and C */
rcbits = be64_to_cpu(hptep[1]) & (HPTE_R_R | HPTE_R_C);
@@ -914,15 +778,13 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
int kvm_unmap_hva_hv(struct kvm *kvm, unsigned long hva)
{
- if (kvm->arch.using_mmu_notifiers)
- kvm_handle_hva(kvm, hva, kvm_unmap_rmapp);
+ kvm_handle_hva(kvm, hva, kvm_unmap_rmapp);
return 0;
}
int kvm_unmap_hva_range_hv(struct kvm *kvm, unsigned long start, unsigned long end)
{
- if (kvm->arch.using_mmu_notifiers)
- kvm_handle_hva_range(kvm, start, end, kvm_unmap_rmapp);
+ kvm_handle_hva_range(kvm, start, end, kvm_unmap_rmapp);
return 0;
}
@@ -1004,8 +866,6 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
int kvm_age_hva_hv(struct kvm *kvm, unsigned long start, unsigned long end)
{
- if (!kvm->arch.using_mmu_notifiers)
- return 0;
return kvm_handle_hva_range(kvm, start, end, kvm_age_rmapp);
}
@@ -1042,15 +902,11 @@ static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
int kvm_test_age_hva_hv(struct kvm *kvm, unsigned long hva)
{
- if (!kvm->arch.using_mmu_notifiers)
- return 0;
return kvm_handle_hva(kvm, hva, kvm_test_age_rmapp);
}
void kvm_set_spte_hva_hv(struct kvm *kvm, unsigned long hva, pte_t pte)
{
- if (!kvm->arch.using_mmu_notifiers)
- return;
kvm_handle_hva(kvm, hva, kvm_unmap_rmapp);
}
@@ -1117,8 +973,11 @@ static int kvm_test_clear_dirty_npages(struct kvm *kvm, unsigned long *rmapp)
}
/* Now check and modify the HPTE */
- if (!(hptep[0] & cpu_to_be64(HPTE_V_VALID)))
+ if (!(hptep[0] & cpu_to_be64(HPTE_V_VALID))) {
+ /* unlock and continue */
+ hptep[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
continue;
+ }
/* need to make it temporarily absent so C is stable */
hptep[0] |= cpu_to_be64(HPTE_V_ABSENT);
@@ -1206,35 +1065,17 @@ void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa,
struct page *page, *pages[1];
int npages;
unsigned long hva, offset;
- unsigned long pa;
- unsigned long *physp;
int srcu_idx;
srcu_idx = srcu_read_lock(&kvm->srcu);
memslot = gfn_to_memslot(kvm, gfn);
if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
goto err;
- if (!kvm->arch.using_mmu_notifiers) {
- physp = memslot->arch.slot_phys;
- if (!physp)
- goto err;
- physp += gfn - memslot->base_gfn;
- pa = *physp;
- if (!pa) {
- if (kvmppc_get_guest_page(kvm, gfn, memslot,
- PAGE_SIZE) < 0)
- goto err;
- pa = *physp;
- }
- page = pfn_to_page(pa >> PAGE_SHIFT);
- get_page(page);
- } else {
- hva = gfn_to_hva_memslot(memslot, gfn);
- npages = get_user_pages_fast(hva, 1, 1, pages);
- if (npages < 1)
- goto err;
- page = pages[0];
- }
+ hva = gfn_to_hva_memslot(memslot, gfn);
+ npages = get_user_pages_fast(hva, 1, 1, pages);
+ if (npages < 1)
+ goto err;
+ page = pages[0];
srcu_read_unlock(&kvm->srcu, srcu_idx);
offset = gpa & (PAGE_SIZE - 1);
@@ -1258,7 +1099,7 @@ void kvmppc_unpin_guest_page(struct kvm *kvm, void *va, unsigned long gpa,
put_page(page);
- if (!dirty || !kvm->arch.using_mmu_notifiers)
+ if (!dirty)
return;
/* We need to mark this page dirty in the rmap chain */
@@ -1539,9 +1380,15 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
hptp = (__be64 *)(kvm->arch.hpt_virt + (i * HPTE_SIZE));
lbuf = (unsigned long __user *)buf;
for (j = 0; j < hdr.n_valid; ++j) {
+ __be64 hpte_v;
+ __be64 hpte_r;
+
err = -EFAULT;
- if (__get_user(v, lbuf) || __get_user(r, lbuf + 1))
+ if (__get_user(hpte_v, lbuf) ||
+ __get_user(hpte_r, lbuf + 1))
goto out;
+ v = be64_to_cpu(hpte_v);
+ r = be64_to_cpu(hpte_r);
err = -EINVAL;
if (!(v & HPTE_V_VALID))
goto out;
@@ -1652,10 +1499,7 @@ void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu)
{
struct kvmppc_mmu *mmu = &vcpu->arch.mmu;
- if (cpu_has_feature(CPU_FTR_ARCH_206))
- vcpu->arch.slb_nr = 32; /* POWER7 */
- else
- vcpu->arch.slb_nr = 64;
+ vcpu->arch.slb_nr = 32; /* POWER7/POWER8 */
mmu->xlate = kvmppc_mmu_book3s_64_hv_xlate;
mmu->reset_msr = kvmppc_mmu_book3s_64_hv_reset_msr;
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index e63587d30b7..de4018a1bc4 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -58,6 +58,9 @@
#include "book3s.h"
+#define CREATE_TRACE_POINTS
+#include "trace_hv.h"
+
/* #define EXIT_DEBUG */
/* #define EXIT_DEBUG_SIMPLE */
/* #define EXIT_DEBUG_INT */
@@ -135,11 +138,10 @@ static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
* stolen.
*
* Updates to busy_stolen are protected by arch.tbacct_lock;
- * updates to vc->stolen_tb are protected by the arch.tbacct_lock
- * of the vcpu that has taken responsibility for running the vcore
- * (i.e. vc->runner). The stolen times are measured in units of
- * timebase ticks. (Note that the != TB_NIL checks below are
- * purely defensive; they should never fail.)
+ * updates to vc->stolen_tb are protected by the vcore->stoltb_lock
+ * lock. The stolen times are measured in units of timebase ticks.
+ * (Note that the != TB_NIL checks below are purely defensive;
+ * they should never fail.)
*/
static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu)
@@ -147,12 +149,21 @@ static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu)
struct kvmppc_vcore *vc = vcpu->arch.vcore;
unsigned long flags;
- spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
- if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE &&
- vc->preempt_tb != TB_NIL) {
- vc->stolen_tb += mftb() - vc->preempt_tb;
- vc->preempt_tb = TB_NIL;
+ /*
+ * We can test vc->runner without taking the vcore lock,
+ * because only this task ever sets vc->runner to this
+ * vcpu, and once it is set to this vcpu, only this task
+ * ever sets it to NULL.
+ */
+ if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE) {
+ spin_lock_irqsave(&vc->stoltb_lock, flags);
+ if (vc->preempt_tb != TB_NIL) {
+ vc->stolen_tb += mftb() - vc->preempt_tb;
+ vc->preempt_tb = TB_NIL;
+ }
+ spin_unlock_irqrestore(&vc->stoltb_lock, flags);
}
+ spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST &&
vcpu->arch.busy_preempt != TB_NIL) {
vcpu->arch.busy_stolen += mftb() - vcpu->arch.busy_preempt;
@@ -166,9 +177,12 @@ static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu)
struct kvmppc_vcore *vc = vcpu->arch.vcore;
unsigned long flags;
- spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
- if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE)
+ if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE) {
+ spin_lock_irqsave(&vc->stoltb_lock, flags);
vc->preempt_tb = mftb();
+ spin_unlock_irqrestore(&vc->stoltb_lock, flags);
+ }
+ spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST)
vcpu->arch.busy_preempt = mftb();
spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
@@ -191,9 +205,6 @@ int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat)
struct kvmppc_vcore *vc = vcpu->arch.vcore;
if (arch_compat) {
- if (!cpu_has_feature(CPU_FTR_ARCH_206))
- return -EINVAL; /* 970 has no compat mode support */
-
switch (arch_compat) {
case PVR_ARCH_205:
/*
@@ -505,25 +516,14 @@ static void kvmppc_update_vpas(struct kvm_vcpu *vcpu)
static u64 vcore_stolen_time(struct kvmppc_vcore *vc, u64 now)
{
u64 p;
+ unsigned long flags;
- /*
- * If we are the task running the vcore, then since we hold
- * the vcore lock, we can't be preempted, so stolen_tb/preempt_tb
- * can't be updated, so we don't need the tbacct_lock.
- * If the vcore is inactive, it can't become active (since we
- * hold the vcore lock), so the vcpu load/put functions won't
- * update stolen_tb/preempt_tb, and we don't need tbacct_lock.
- */
+ spin_lock_irqsave(&vc->stoltb_lock, flags);
+ p = vc->stolen_tb;
if (vc->vcore_state != VCORE_INACTIVE &&
- vc->runner->arch.run_task != current) {
- spin_lock_irq(&vc->runner->arch.tbacct_lock);
- p = vc->stolen_tb;
- if (vc->preempt_tb != TB_NIL)
- p += now - vc->preempt_tb;
- spin_unlock_irq(&vc->runner->arch.tbacct_lock);
- } else {
- p = vc->stolen_tb;
- }
+ vc->preempt_tb != TB_NIL)
+ p += now - vc->preempt_tb;
+ spin_unlock_irqrestore(&vc->stoltb_lock, flags);
return p;
}
@@ -607,10 +607,45 @@ static int kvmppc_h_set_mode(struct kvm_vcpu *vcpu, unsigned long mflags,
}
}
+static int kvm_arch_vcpu_yield_to(struct kvm_vcpu *target)
+{
+ struct kvmppc_vcore *vcore = target->arch.vcore;
+
+ /*
+ * We expect to have been called by the real mode handler
+ * (kvmppc_rm_h_confer()) which would have directly returned
+ * H_SUCCESS if the source vcore wasn't idle (e.g. if it may
+ * have useful work to do and should not confer) so we don't
+ * recheck that here.
+ */
+
+ spin_lock(&vcore->lock);
+ if (target->arch.state == KVMPPC_VCPU_RUNNABLE &&
+ vcore->vcore_state != VCORE_INACTIVE)
+ target = vcore->runner;
+ spin_unlock(&vcore->lock);
+
+ return kvm_vcpu_yield_to(target);
+}
+
+static int kvmppc_get_yield_count(struct kvm_vcpu *vcpu)
+{
+ int yield_count = 0;
+ struct lppaca *lppaca;
+
+ spin_lock(&vcpu->arch.vpa_update_lock);
+ lppaca = (struct lppaca *)vcpu->arch.vpa.pinned_addr;
+ if (lppaca)
+ yield_count = lppaca->yield_count;
+ spin_unlock(&vcpu->arch.vpa_update_lock);
+ return yield_count;
+}
+
int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
{
unsigned long req = kvmppc_get_gpr(vcpu, 3);
unsigned long target, ret = H_SUCCESS;
+ int yield_count;
struct kvm_vcpu *tvcpu;
int idx, rc;
@@ -619,14 +654,6 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
return RESUME_HOST;
switch (req) {
- case H_ENTER:
- idx = srcu_read_lock(&vcpu->kvm->srcu);
- ret = kvmppc_virtmode_h_enter(vcpu, kvmppc_get_gpr(vcpu, 4),
- kvmppc_get_gpr(vcpu, 5),
- kvmppc_get_gpr(vcpu, 6),
- kvmppc_get_gpr(vcpu, 7));
- srcu_read_unlock(&vcpu->kvm->srcu, idx);
- break;
case H_CEDE:
break;
case H_PROD:
@@ -654,7 +681,10 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
ret = H_PARAMETER;
break;
}
- kvm_vcpu_yield_to(tvcpu);
+ yield_count = kvmppc_get_gpr(vcpu, 5);
+ if (kvmppc_get_yield_count(tvcpu) != yield_count)
+ break;
+ kvm_arch_vcpu_yield_to(tvcpu);
break;
case H_REGISTER_VPA:
ret = do_h_register_vpa(vcpu, kvmppc_get_gpr(vcpu, 4),
@@ -769,6 +799,8 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
vcpu->stat.ext_intr_exits++;
r = RESUME_GUEST;
break;
+ /* HMI is hypervisor interrupt and host has handled it. Resume guest.*/
+ case BOOK3S_INTERRUPT_HMI:
case BOOK3S_INTERRUPT_PERFMON:
r = RESUME_GUEST;
break;
@@ -837,6 +869,10 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
* Accordingly return to Guest or Host.
*/
case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
+ if (vcpu->arch.emul_inst != KVM_INST_FETCH_FAILED)
+ vcpu->arch.last_inst = kvmppc_need_byteswap(vcpu) ?
+ swab32(vcpu->arch.emul_inst) :
+ vcpu->arch.emul_inst;
if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) {
r = kvmppc_emulate_debug_inst(run, vcpu);
} else {
@@ -1357,6 +1393,7 @@ static struct kvmppc_vcore *kvmppc_vcore_create(struct kvm *kvm, int core)
INIT_LIST_HEAD(&vcore->runnable_threads);
spin_lock_init(&vcore->lock);
+ spin_lock_init(&vcore->stoltb_lock);
init_waitqueue_head(&vcore->wq);
vcore->preempt_tb = TB_NIL;
vcore->lpcr = kvm->arch.lpcr;
@@ -1694,9 +1731,11 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
vc->n_woken = 0;
vc->nap_count = 0;
vc->entry_exit_count = 0;
+ vc->preempt_tb = TB_NIL;
vc->vcore_state = VCORE_STARTING;
vc->in_guest = 0;
vc->napping_threads = 0;
+ vc->conferring_threads = 0;
/*
* Updating any of the vpas requires calling kvmppc_pin_guest_page,
@@ -1726,6 +1765,7 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
kvmppc_start_thread(vcpu);
kvmppc_create_dtl_entry(vcpu, vc);
+ trace_kvm_guest_enter(vcpu);
}
/* Set this explicitly in case thread 0 doesn't have a vcpu */
@@ -1734,6 +1774,9 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
vc->vcore_state = VCORE_RUNNING;
preempt_disable();
+
+ trace_kvmppc_run_core(vc, 0);
+
spin_unlock(&vc->lock);
kvm_guest_enter();
@@ -1779,6 +1822,8 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
kvmppc_core_pending_dec(vcpu))
kvmppc_core_dequeue_dec(vcpu);
+ trace_kvm_guest_exit(vcpu);
+
ret = RESUME_GUEST;
if (vcpu->arch.trap)
ret = kvmppc_handle_exit_hv(vcpu->arch.kvm_run, vcpu,
@@ -1804,6 +1849,8 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
wake_up(&vcpu->arch.cpu_run);
}
}
+
+ trace_kvmppc_run_core(vc, 1);
}
/*
@@ -1826,15 +1873,37 @@ static void kvmppc_wait_for_exec(struct kvm_vcpu *vcpu, int wait_state)
*/
static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
{
+ struct kvm_vcpu *vcpu;
+ int do_sleep = 1;
+
DEFINE_WAIT(wait);
prepare_to_wait(&vc->wq, &wait, TASK_INTERRUPTIBLE);
+
+ /*
+ * Check one last time for pending exceptions and ceded state after
+ * we put ourselves on the wait queue
+ */
+ list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
+ if (vcpu->arch.pending_exceptions || !vcpu->arch.ceded) {
+ do_sleep = 0;
+ break;
+ }
+ }
+
+ if (!do_sleep) {
+ finish_wait(&vc->wq, &wait);
+ return;
+ }
+
vc->vcore_state = VCORE_SLEEPING;
+ trace_kvmppc_vcore_blocked(vc, 0);
spin_unlock(&vc->lock);
schedule();
finish_wait(&vc->wq, &wait);
spin_lock(&vc->lock);
vc->vcore_state = VCORE_INACTIVE;
+ trace_kvmppc_vcore_blocked(vc, 1);
}
static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
@@ -1843,6 +1912,8 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
struct kvmppc_vcore *vc;
struct kvm_vcpu *v, *vn;
+ trace_kvmppc_run_vcpu_enter(vcpu);
+
kvm_run->exit_reason = 0;
vcpu->arch.ret = RESUME_GUEST;
vcpu->arch.trap = 0;
@@ -1872,6 +1943,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
VCORE_EXIT_COUNT(vc) == 0) {
kvmppc_create_dtl_entry(vcpu, vc);
kvmppc_start_thread(vcpu);
+ trace_kvm_guest_enter(vcpu);
} else if (vc->vcore_state == VCORE_SLEEPING) {
wake_up(&vc->wq);
}
@@ -1936,6 +2008,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
wake_up(&v->arch.cpu_run);
}
+ trace_kvmppc_run_vcpu_exit(vcpu, kvm_run);
spin_unlock(&vc->lock);
return vcpu->arch.ret;
}
@@ -1962,7 +2035,7 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
/* Order vcpus_running vs. rma_setup_done, see kvmppc_alloc_reset_hpt */
smp_mb();
- /* On the first time here, set up HTAB and VRMA or RMA */
+ /* On the first time here, set up HTAB and VRMA */
if (!vcpu->kvm->arch.rma_setup_done) {
r = kvmppc_hv_setup_htab_rma(vcpu);
if (r)
@@ -1981,7 +2054,9 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
if (run->exit_reason == KVM_EXIT_PAPR_HCALL &&
!(vcpu->arch.shregs.msr & MSR_PR)) {
+ trace_kvm_hcall_enter(vcpu);
r = kvmppc_pseries_do_hcall(vcpu);
+ trace_kvm_hcall_exit(vcpu, r);
kvmppc_core_prepare_to_enter(vcpu);
} else if (r == RESUME_PAGE_FAULT) {
srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
@@ -1997,98 +2072,6 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
return r;
}
-
-/* Work out RMLS (real mode limit selector) field value for a given RMA size.
- Assumes POWER7 or PPC970. */
-static inline int lpcr_rmls(unsigned long rma_size)
-{
- switch (rma_size) {
- case 32ul << 20: /* 32 MB */
- if (cpu_has_feature(CPU_FTR_ARCH_206))
- return 8; /* only supported on POWER7 */
- return -1;
- case 64ul << 20: /* 64 MB */
- return 3;
- case 128ul << 20: /* 128 MB */
- return 7;
- case 256ul << 20: /* 256 MB */
- return 4;
- case 1ul << 30: /* 1 GB */
- return 2;
- case 16ul << 30: /* 16 GB */
- return 1;
- case 256ul << 30: /* 256 GB */
- return 0;
- default:
- return -1;
- }
-}
-
-static int kvm_rma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-{
- struct page *page;
- struct kvm_rma_info *ri = vma->vm_file->private_data;
-
- if (vmf->pgoff >= kvm_rma_pages)
- return VM_FAULT_SIGBUS;
-
- page = pfn_to_page(ri->base_pfn + vmf->pgoff);
- get_page(page);
- vmf->page = page;
- return 0;
-}
-
-static const struct vm_operations_struct kvm_rma_vm_ops = {
- .fault = kvm_rma_fault,
-};
-
-static int kvm_rma_mmap(struct file *file, struct vm_area_struct *vma)
-{
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
- vma->vm_ops = &kvm_rma_vm_ops;
- return 0;
-}
-
-static int kvm_rma_release(struct inode *inode, struct file *filp)
-{
- struct kvm_rma_info *ri = filp->private_data;
-
- kvm_release_rma(ri);
- return 0;
-}
-
-static const struct file_operations kvm_rma_fops = {
- .mmap = kvm_rma_mmap,
- .release = kvm_rma_release,
-};
-
-static long kvm_vm_ioctl_allocate_rma(struct kvm *kvm,
- struct kvm_allocate_rma *ret)
-{
- long fd;
- struct kvm_rma_info *ri;
- /*
- * Only do this on PPC970 in HV mode
- */
- if (!cpu_has_feature(CPU_FTR_HVMODE) ||
- !cpu_has_feature(CPU_FTR_ARCH_201))
- return -EINVAL;
-
- if (!kvm_rma_pages)
- return -EINVAL;
-
- ri = kvm_alloc_rma();
- if (!ri)
- return -ENOMEM;
-
- fd = anon_inode_getfd("kvm-rma", &kvm_rma_fops, ri, O_RDWR | O_CLOEXEC);
- if (fd < 0)
- kvm_release_rma(ri);
-
- ret->rma_size = kvm_rma_pages << PAGE_SHIFT;
- return fd;
-}
-
static void kvmppc_add_seg_page_size(struct kvm_ppc_one_seg_page_size **sps,
int linux_psize)
{
@@ -2167,26 +2150,6 @@ out:
return r;
}
-static void unpin_slot(struct kvm_memory_slot *memslot)
-{
- unsigned long *physp;
- unsigned long j, npages, pfn;
- struct page *page;
-
- physp = memslot->arch.slot_phys;
- npages = memslot->npages;
- if (!physp)
- return;
- for (j = 0; j < npages; j++) {
- if (!(physp[j] & KVMPPC_GOT_PAGE))
- continue;
- pfn = physp[j] >> PAGE_SHIFT;
- page = pfn_to_page(pfn);
- SetPageDirty(page);
- put_page(page);
- }
-}
-
static void kvmppc_core_free_memslot_hv(struct kvm_memory_slot *free,
struct kvm_memory_slot *dont)
{
@@ -2194,11 +2157,6 @@ static void kvmppc_core_free_memslot_hv(struct kvm_memory_slot *free,
vfree(free->arch.rmap);
free->arch.rmap = NULL;
}
- if (!dont || free->arch.slot_phys != dont->arch.slot_phys) {
- unpin_slot(free);
- vfree(free->arch.slot_phys);
- free->arch.slot_phys = NULL;
- }
}
static int kvmppc_core_create_memslot_hv(struct kvm_memory_slot *slot,
@@ -2207,7 +2165,6 @@ static int kvmppc_core_create_memslot_hv(struct kvm_memory_slot *slot,
slot->arch.rmap = vzalloc(npages * sizeof(*slot->arch.rmap));
if (!slot->arch.rmap)
return -ENOMEM;
- slot->arch.slot_phys = NULL;
return 0;
}
@@ -2216,17 +2173,6 @@ static int kvmppc_core_prepare_memory_region_hv(struct kvm *kvm,
struct kvm_memory_slot *memslot,
struct kvm_userspace_memory_region *mem)
{
- unsigned long *phys;
-
- /* Allocate a slot_phys array if needed */
- phys = memslot->arch.slot_phys;
- if (!kvm->arch.using_mmu_notifiers && !phys && memslot->npages) {
- phys = vzalloc(memslot->npages * sizeof(unsigned long));
- if (!phys)
- return -ENOMEM;
- memslot->arch.slot_phys = phys;
- }
-
return 0;
}
@@ -2284,17 +2230,11 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
{
int err = 0;
struct kvm *kvm = vcpu->kvm;
- struct kvm_rma_info *ri = NULL;
unsigned long hva;
struct kvm_memory_slot *memslot;
struct vm_area_struct *vma;
unsigned long lpcr = 0, senc;
- unsigned long lpcr_mask = 0;
unsigned long psize, porder;
- unsigned long rma_size;
- unsigned long rmls;
- unsigned long *physp;
- unsigned long i, npages;
int srcu_idx;
mutex_lock(&kvm->lock);
@@ -2329,88 +2269,25 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
psize = vma_kernel_pagesize(vma);
porder = __ilog2(psize);
- /* Is this one of our preallocated RMAs? */
- if (vma->vm_file && vma->vm_file->f_op == &kvm_rma_fops &&
- hva == vma->vm_start)
- ri = vma->vm_file->private_data;
-
up_read(&current->mm->mmap_sem);
- if (!ri) {
- /* On POWER7, use VRMA; on PPC970, give up */
- err = -EPERM;
- if (cpu_has_feature(CPU_FTR_ARCH_201)) {
- pr_err("KVM: CPU requires an RMO\n");
- goto out_srcu;
- }
+ /* We can handle 4k, 64k or 16M pages in the VRMA */
+ err = -EINVAL;
+ if (!(psize == 0x1000 || psize == 0x10000 ||
+ psize == 0x1000000))
+ goto out_srcu;
- /* We can handle 4k, 64k or 16M pages in the VRMA */
- err = -EINVAL;
- if (!(psize == 0x1000 || psize == 0x10000 ||
- psize == 0x1000000))
- goto out_srcu;
+ /* Update VRMASD field in the LPCR */
+ senc = slb_pgsize_encoding(psize);
+ kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T |
+ (VRMA_VSID << SLB_VSID_SHIFT_1T);
+ /* the -4 is to account for senc values starting at 0x10 */
+ lpcr = senc << (LPCR_VRMASD_SH - 4);
- /* Update VRMASD field in the LPCR */
- senc = slb_pgsize_encoding(psize);
- kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T |
- (VRMA_VSID << SLB_VSID_SHIFT_1T);
- lpcr_mask = LPCR_VRMASD;
- /* the -4 is to account for senc values starting at 0x10 */
- lpcr = senc << (LPCR_VRMASD_SH - 4);
+ /* Create HPTEs in the hash page table for the VRMA */
+ kvmppc_map_vrma(vcpu, memslot, porder);
- /* Create HPTEs in the hash page table for the VRMA */
- kvmppc_map_vrma(vcpu, memslot, porder);
-
- } else {
- /* Set up to use an RMO region */
- rma_size = kvm_rma_pages;
- if (rma_size > memslot->npages)
- rma_size = memslot->npages;
- rma_size <<= PAGE_SHIFT;
- rmls = lpcr_rmls(rma_size);
- err = -EINVAL;
- if ((long)rmls < 0) {
- pr_err("KVM: Can't use RMA of 0x%lx bytes\n", rma_size);
- goto out_srcu;
- }
- atomic_inc(&ri->use_count);
- kvm->arch.rma = ri;
-
- /* Update LPCR and RMOR */
- if (cpu_has_feature(CPU_FTR_ARCH_201)) {
- /* PPC970; insert RMLS value (split field) in HID4 */
- lpcr_mask = (1ul << HID4_RMLS0_SH) |
- (3ul << HID4_RMLS2_SH) | HID4_RMOR;
- lpcr = ((rmls >> 2) << HID4_RMLS0_SH) |
- ((rmls & 3) << HID4_RMLS2_SH);
- /* RMOR is also in HID4 */
- lpcr |= ((ri->base_pfn >> (26 - PAGE_SHIFT)) & 0xffff)
- << HID4_RMOR_SH;
- } else {
- /* POWER7 */
- lpcr_mask = LPCR_VPM0 | LPCR_VRMA_L | LPCR_RMLS;
- lpcr = rmls << LPCR_RMLS_SH;
- kvm->arch.rmor = ri->base_pfn << PAGE_SHIFT;
- }
- pr_info("KVM: Using RMO at %lx size %lx (LPCR = %lx)\n",
- ri->base_pfn << PAGE_SHIFT, rma_size, lpcr);
-
- /* Initialize phys addrs of pages in RMO */
- npages = kvm_rma_pages;
- porder = __ilog2(npages);
- physp = memslot->arch.slot_phys;
- if (physp) {
- if (npages > memslot->npages)
- npages = memslot->npages;
- spin_lock(&kvm->arch.slot_phys_lock);
- for (i = 0; i < npages; ++i)
- physp[i] = ((ri->base_pfn + i) << PAGE_SHIFT) +
- porder;
- spin_unlock(&kvm->arch.slot_phys_lock);
- }
- }
-
- kvmppc_update_lpcr(kvm, lpcr, lpcr_mask);
+ kvmppc_update_lpcr(kvm, lpcr, LPCR_VRMASD);
/* Order updates to kvm->arch.lpcr etc. vs. rma_setup_done */
smp_wmb();
@@ -2449,35 +2326,21 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm)
memcpy(kvm->arch.enabled_hcalls, default_enabled_hcalls,
sizeof(kvm->arch.enabled_hcalls));
- kvm->arch.rma = NULL;
-
kvm->arch.host_sdr1 = mfspr(SPRN_SDR1);
- if (cpu_has_feature(CPU_FTR_ARCH_201)) {
- /* PPC970; HID4 is effectively the LPCR */
- kvm->arch.host_lpid = 0;
- kvm->arch.host_lpcr = lpcr = mfspr(SPRN_HID4);
- lpcr &= ~((3 << HID4_LPID1_SH) | (0xful << HID4_LPID5_SH));
- lpcr |= ((lpid >> 4) << HID4_LPID1_SH) |
- ((lpid & 0xf) << HID4_LPID5_SH);
- } else {
- /* POWER7; init LPCR for virtual RMA mode */
- kvm->arch.host_lpid = mfspr(SPRN_LPID);
- kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR);
- lpcr &= LPCR_PECE | LPCR_LPES;
- lpcr |= (4UL << LPCR_DPFD_SH) | LPCR_HDICE |
- LPCR_VPM0 | LPCR_VPM1;
- kvm->arch.vrma_slb_v = SLB_VSID_B_1T |
- (VRMA_VSID << SLB_VSID_SHIFT_1T);
- /* On POWER8 turn on online bit to enable PURR/SPURR */
- if (cpu_has_feature(CPU_FTR_ARCH_207S))
- lpcr |= LPCR_ONL;
- }
+ /* Init LPCR for virtual RMA mode */
+ kvm->arch.host_lpid = mfspr(SPRN_LPID);
+ kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR);
+ lpcr &= LPCR_PECE | LPCR_LPES;
+ lpcr |= (4UL << LPCR_DPFD_SH) | LPCR_HDICE |
+ LPCR_VPM0 | LPCR_VPM1;
+ kvm->arch.vrma_slb_v = SLB_VSID_B_1T |
+ (VRMA_VSID << SLB_VSID_SHIFT_1T);
+ /* On POWER8 turn on online bit to enable PURR/SPURR */
+ if (cpu_has_feature(CPU_FTR_ARCH_207S))
+ lpcr |= LPCR_ONL;
kvm->arch.lpcr = lpcr;
- kvm->arch.using_mmu_notifiers = !!cpu_has_feature(CPU_FTR_ARCH_206);
- spin_lock_init(&kvm->arch.slot_phys_lock);
-
/*
* Track that we now have a HV mode VM active. This blocks secondary
* CPU threads from coming online.
@@ -2507,10 +2370,6 @@ static void kvmppc_core_destroy_vm_hv(struct kvm *kvm)
kvm_hv_vm_deactivated();
kvmppc_free_vcores(kvm);
- if (kvm->arch.rma) {
- kvm_release_rma(kvm->arch.rma);
- kvm->arch.rma = NULL;
- }
kvmppc_free_hpt(kvm);
}
@@ -2536,7 +2395,8 @@ static int kvmppc_core_emulate_mfspr_hv(struct kvm_vcpu *vcpu, int sprn,
static int kvmppc_core_check_processor_compat_hv(void)
{
- if (!cpu_has_feature(CPU_FTR_HVMODE))
+ if (!cpu_has_feature(CPU_FTR_HVMODE) ||
+ !cpu_has_feature(CPU_FTR_ARCH_206))
return -EIO;
return 0;
}
@@ -2550,16 +2410,6 @@ static long kvm_arch_vm_ioctl_hv(struct file *filp,
switch (ioctl) {
- case KVM_ALLOCATE_RMA: {
- struct kvm_allocate_rma rma;
- struct kvm *kvm = filp->private_data;
-
- r = kvm_vm_ioctl_allocate_rma(kvm, &rma);
- if (r >= 0 && copy_to_user(argp, &rma, sizeof(rma)))
- r = -EFAULT;
- break;
- }
-
case KVM_PPC_ALLOCATE_HTAB: {
u32 htab_order;
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
index 3f1bb5a36c2..1f083ff8a61 100644
--- a/arch/powerpc/kvm/book3s_hv_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_builtin.c
@@ -16,6 +16,7 @@
#include <linux/memblock.h>
#include <linux/sizes.h>
#include <linux/cma.h>
+#include <linux/bitops.h>
#include <asm/cputable.h>
#include <asm/kvm_ppc.h>
@@ -32,95 +33,9 @@
* By default we reserve 5% of memory for hash pagetable allocation.
*/
static unsigned long kvm_cma_resv_ratio = 5;
-/*
- * We allocate RMAs (real mode areas) for KVM guests from the KVM CMA area.
- * Each RMA has to be physically contiguous and of a size that the
- * hardware supports. PPC970 and POWER7 support 64MB, 128MB and 256MB,
- * and other larger sizes. Since we are unlikely to be allocate that
- * much physically contiguous memory after the system is up and running,
- * we preallocate a set of RMAs in early boot using CMA.
- * should be power of 2.
- */
-unsigned long kvm_rma_pages = (1 << 27) >> PAGE_SHIFT; /* 128MB */
-EXPORT_SYMBOL_GPL(kvm_rma_pages);
static struct cma *kvm_cma;
-/* Work out RMLS (real mode limit selector) field value for a given RMA size.
- Assumes POWER7 or PPC970. */
-static inline int lpcr_rmls(unsigned long rma_size)
-{
- switch (rma_size) {
- case 32ul << 20: /* 32 MB */
- if (cpu_has_feature(CPU_FTR_ARCH_206))
- return 8; /* only supported on POWER7 */
- return -1;
- case 64ul << 20: /* 64 MB */
- return 3;
- case 128ul << 20: /* 128 MB */
- return 7;
- case 256ul << 20: /* 256 MB */
- return 4;
- case 1ul << 30: /* 1 GB */
- return 2;
- case 16ul << 30: /* 16 GB */
- return 1;
- case 256ul << 30: /* 256 GB */
- return 0;
- default:
- return -1;
- }
-}
-
-static int __init early_parse_rma_size(char *p)
-{
- unsigned long kvm_rma_size;
-
- pr_debug("%s(%s)\n", __func__, p);
- if (!p)
- return -EINVAL;
- kvm_rma_size = memparse(p, &p);
- /*
- * Check that the requested size is one supported in hardware
- */
- if (lpcr_rmls(kvm_rma_size) < 0) {
- pr_err("RMA size of 0x%lx not supported\n", kvm_rma_size);
- return -EINVAL;
- }
- kvm_rma_pages = kvm_rma_size >> PAGE_SHIFT;
- return 0;
-}
-early_param("kvm_rma_size", early_parse_rma_size);
-
-struct kvm_rma_info *kvm_alloc_rma()
-{
- struct page *page;
- struct kvm_rma_info *ri;
-
- ri = kmalloc(sizeof(struct kvm_rma_info), GFP_KERNEL);
- if (!ri)
- return NULL;
- page = cma_alloc(kvm_cma, kvm_rma_pages, order_base_2(kvm_rma_pages));
- if (!page)
- goto err_out;
- atomic_set(&ri->use_count, 1);
- ri->base_pfn = page_to_pfn(page);
- return ri;
-err_out:
- kfree(ri);
- return NULL;
-}
-EXPORT_SYMBOL_GPL(kvm_alloc_rma);
-
-void kvm_release_rma(struct kvm_rma_info *ri)
-{
- if (atomic_dec_and_test(&ri->use_count)) {
- cma_release(kvm_cma, pfn_to_page(ri->base_pfn), kvm_rma_pages);
- kfree(ri);
- }
-}
-EXPORT_SYMBOL_GPL(kvm_release_rma);
-
static int __init early_parse_kvm_cma_resv(char *p)
{
pr_debug("%s(%s)\n", __func__, p);
@@ -132,14 +47,9 @@ early_param("kvm_cma_resv_ratio", early_parse_kvm_cma_resv);
struct page *kvm_alloc_hpt(unsigned long nr_pages)
{
- unsigned long align_pages = HPT_ALIGN_PAGES;
-
VM_BUG_ON(order_base_2(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
- /* Old CPUs require HPT aligned on a multiple of its size */
- if (!cpu_has_feature(CPU_FTR_ARCH_206))
- align_pages = nr_pages;
- return cma_alloc(kvm_cma, nr_pages, order_base_2(align_pages));
+ return cma_alloc(kvm_cma, nr_pages, order_base_2(HPT_ALIGN_PAGES));
}
EXPORT_SYMBOL_GPL(kvm_alloc_hpt);
@@ -180,22 +90,44 @@ void __init kvm_cma_reserve(void)
if (selected_size) {
pr_debug("%s: reserving %ld MiB for global area\n", __func__,
(unsigned long)selected_size / SZ_1M);
- /*
- * Old CPUs require HPT aligned on a multiple of its size. So for them
- * make the alignment as max size we could request.
- */
- if (!cpu_has_feature(CPU_FTR_ARCH_206))
- align_size = __rounddown_pow_of_two(selected_size);
- else
- align_size = HPT_ALIGN_PAGES << PAGE_SHIFT;
-
- align_size = max(kvm_rma_pages << PAGE_SHIFT, align_size);
+ align_size = HPT_ALIGN_PAGES << PAGE_SHIFT;
cma_declare_contiguous(0, selected_size, 0, align_size,
KVM_CMA_CHUNK_ORDER - PAGE_SHIFT, false, &kvm_cma);
}
}
/*
+ * Real-mode H_CONFER implementation.
+ * We check if we are the only vcpu out of this virtual core
+ * still running in the guest and not ceded. If so, we pop up
+ * to the virtual-mode implementation; if not, just return to
+ * the guest.
+ */
+long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target,
+ unsigned int yield_count)
+{
+ struct kvmppc_vcore *vc = vcpu->arch.vcore;
+ int threads_running;
+ int threads_ceded;
+ int threads_conferring;
+ u64 stop = get_tb() + 10 * tb_ticks_per_usec;
+ int rv = H_SUCCESS; /* => don't yield */
+
+ set_bit(vcpu->arch.ptid, &vc->conferring_threads);
+ while ((get_tb() < stop) && (VCORE_EXIT_COUNT(vc) == 0)) {
+ threads_running = VCORE_ENTRY_COUNT(vc);
+ threads_ceded = hweight32(vc->napping_threads);
+ threads_conferring = hweight32(vc->conferring_threads);
+ if (threads_ceded + threads_conferring >= threads_running) {
+ rv = H_TOO_HARD; /* => do yield */
+ break;
+ }
+ }
+ clear_bit(vcpu->arch.ptid, &vc->conferring_threads);
+ return rv;
+}
+
+/*
* When running HV mode KVM we need to block certain operations while KVM VMs
* exist in the system. We use a counter of VMs to track this.
*
diff --git a/arch/powerpc/kvm/book3s_hv_interrupts.S b/arch/powerpc/kvm/book3s_hv_interrupts.S
index 731be7478b2..36540a99d17 100644
--- a/arch/powerpc/kvm/book3s_hv_interrupts.S
+++ b/arch/powerpc/kvm/book3s_hv_interrupts.S
@@ -52,10 +52,8 @@ _GLOBAL(__kvmppc_vcore_entry)
std r3, _CCR(r1)
/* Save host DSCR */
-BEGIN_FTR_SECTION
mfspr r3, SPRN_DSCR
std r3, HSTATE_DSCR(r13)
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
BEGIN_FTR_SECTION
/* Save host DABR */
@@ -84,11 +82,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
mfspr r7, SPRN_MMCR0 /* save MMCR0 */
mtspr SPRN_MMCR0, r3 /* freeze all counters, disable interrupts */
mfspr r6, SPRN_MMCRA
-BEGIN_FTR_SECTION
- /* On P7, clear MMCRA in order to disable SDAR updates */
+ /* Clear MMCRA in order to disable SDAR updates */
li r5, 0
mtspr SPRN_MMCRA, r5
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
isync
ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */
lbz r5, LPPACA_PMCINUSE(r3)
@@ -113,20 +109,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
mfspr r7, SPRN_PMC4
mfspr r8, SPRN_PMC5
mfspr r9, SPRN_PMC6
-BEGIN_FTR_SECTION
- mfspr r10, SPRN_PMC7
- mfspr r11, SPRN_PMC8
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
stw r3, HSTATE_PMC(r13)
stw r5, HSTATE_PMC + 4(r13)
stw r6, HSTATE_PMC + 8(r13)
stw r7, HSTATE_PMC + 12(r13)
stw r8, HSTATE_PMC + 16(r13)
stw r9, HSTATE_PMC + 20(r13)
-BEGIN_FTR_SECTION
- stw r10, HSTATE_PMC + 24(r13)
- stw r11, HSTATE_PMC + 28(r13)
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
31:
/*
@@ -140,31 +128,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
add r8,r8,r7
std r8,HSTATE_DECEXP(r13)
-#ifdef CONFIG_SMP
- /*
- * On PPC970, if the guest vcpu has an external interrupt pending,
- * send ourselves an IPI so as to interrupt the guest once it
- * enables interrupts. (It must have interrupts disabled,
- * otherwise we would already have delivered the interrupt.)
- *
- * XXX If this is a UP build, smp_send_reschedule is not available,
- * so the interrupt will be delayed until the next time the vcpu
- * enters the guest with interrupts enabled.
- */
-BEGIN_FTR_SECTION
- ld r4, HSTATE_KVM_VCPU(r13)
- ld r0, VCPU_PENDING_EXC(r4)
- li r7, (1 << BOOK3S_IRQPRIO_EXTERNAL)
- oris r7, r7, (1 << BOOK3S_IRQPRIO_EXTERNAL_LEVEL)@h
- and. r0, r0, r7
- beq 32f
- lhz r3, PACAPACAINDEX(r13)
- bl smp_send_reschedule
- nop
-32:
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
-#endif /* CONFIG_SMP */
-
/* Jump to partition switch code */
bl kvmppc_hv_entry_trampoline
nop
diff --git a/arch/powerpc/kvm/book3s_hv_ras.c b/arch/powerpc/kvm/book3s_hv_ras.c
index d562c8e2bc3..60081bd7584 100644
--- a/arch/powerpc/kvm/book3s_hv_ras.c
+++ b/arch/powerpc/kvm/book3s_hv_ras.c
@@ -138,8 +138,5 @@ out:
long kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu)
{
- if (cpu_has_feature(CPU_FTR_ARCH_206))
- return kvmppc_realmode_mc_power7(vcpu);
-
- return 0;
+ return kvmppc_realmode_mc_power7(vcpu);
}
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index 084ad54c73c..510bdfbc407 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -45,16 +45,12 @@ static int global_invalidates(struct kvm *kvm, unsigned long flags)
* as indicated by local_paca->kvm_hstate.kvm_vcpu being set,
* we can use tlbiel as long as we mark all other physical
* cores as potentially having stale TLB entries for this lpid.
- * If we're not using MMU notifiers, we never take pages away
- * from the guest, so we can use tlbiel if requested.
* Otherwise, don't use tlbiel.
*/
if (kvm->arch.online_vcores == 1 && local_paca->kvm_hstate.kvm_vcpu)
global = 0;
- else if (kvm->arch.using_mmu_notifiers)
- global = 1;
else
- global = !(flags & H_LOCAL);
+ global = 1;
if (!global) {
/* any other core might now have stale TLB entries... */
@@ -170,7 +166,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
struct revmap_entry *rev;
unsigned long g_ptel;
struct kvm_memory_slot *memslot;
- unsigned long *physp, pte_size;
+ unsigned long pte_size;
unsigned long is_io;
unsigned long *rmap;
pte_t pte;
@@ -198,9 +194,6 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
is_io = ~0ul;
rmap = NULL;
if (!(memslot && !(memslot->flags & KVM_MEMSLOT_INVALID))) {
- /* PPC970 can't do emulated MMIO */
- if (!cpu_has_feature(CPU_FTR_ARCH_206))
- return H_PARAMETER;
/* Emulated MMIO - mark this with key=31 */
pteh |= HPTE_V_ABSENT;
ptel |= HPTE_R_KEY_HI | HPTE_R_KEY_LO;
@@ -213,37 +206,20 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
slot_fn = gfn - memslot->base_gfn;
rmap = &memslot->arch.rmap[slot_fn];
- if (!kvm->arch.using_mmu_notifiers) {
- physp = memslot->arch.slot_phys;
- if (!physp)
- return H_PARAMETER;
- physp += slot_fn;
- if (realmode)
- physp = real_vmalloc_addr(physp);
- pa = *physp;
- if (!pa)
- return H_TOO_HARD;
- is_io = pa & (HPTE_R_I | HPTE_R_W);
- pte_size = PAGE_SIZE << (pa & KVMPPC_PAGE_ORDER_MASK);
- pa &= PAGE_MASK;
+ /* Translate to host virtual address */
+ hva = __gfn_to_hva_memslot(memslot, gfn);
+
+ /* Look up the Linux PTE for the backing page */
+ pte_size = psize;
+ pte = lookup_linux_pte_and_update(pgdir, hva, writing, &pte_size);
+ if (pte_present(pte) && !pte_numa(pte)) {
+ if (writing && !pte_write(pte))
+ /* make the actual HPTE be read-only */
+ ptel = hpte_make_readonly(ptel);
+ is_io = hpte_cache_bits(pte_val(pte));
+ pa = pte_pfn(pte) << PAGE_SHIFT;
+ pa |= hva & (pte_size - 1);
pa |= gpa & ~PAGE_MASK;
- } else {
- /* Translate to host virtual address */
- hva = __gfn_to_hva_memslot(memslot, gfn);
-
- /* Look up the Linux PTE for the backing page */
- pte_size = psize;
- pte = lookup_linux_pte_and_update(pgdir, hva, writing,
- &pte_size);
- if (pte_present(pte) && !pte_numa(pte)) {
- if (writing && !pte_write(pte))
- /* make the actual HPTE be read-only */
- ptel = hpte_make_readonly(ptel);
- is_io = hpte_cache_bits(pte_val(pte));
- pa = pte_pfn(pte) << PAGE_SHIFT;
- pa |= hva & (pte_size - 1);
- pa |= gpa & ~PAGE_MASK;
- }
}
if (pte_size < psize)
@@ -337,8 +313,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
rmap = real_vmalloc_addr(rmap);
lock_rmap(rmap);
/* Check for pending invalidations under the rmap chain lock */
- if (kvm->arch.using_mmu_notifiers &&
- mmu_notifier_retry(kvm, mmu_seq)) {
+ if (mmu_notifier_retry(kvm, mmu_seq)) {
/* inval in progress, write a non-present HPTE */
pteh |= HPTE_V_ABSENT;
pteh &= ~HPTE_V_VALID;
@@ -395,61 +370,11 @@ static inline int try_lock_tlbie(unsigned int *lock)
return old == 0;
}
-/*
- * tlbie/tlbiel is a bit different on the PPC970 compared to later
- * processors such as POWER7; the large page bit is in the instruction
- * not RB, and the top 16 bits and the bottom 12 bits of the VA
- * in RB must be 0.
- */
-static void do_tlbies_970(struct kvm *kvm, unsigned long *rbvalues,
- long npages, int global, bool need_sync)
-{
- long i;
-
- if (global) {
- while (!try_lock_tlbie(&kvm->arch.tlbie_lock))
- cpu_relax();
- if (need_sync)
- asm volatile("ptesync" : : : "memory");
- for (i = 0; i < npages; ++i) {
- unsigned long rb = rbvalues[i];
-
- if (rb & 1) /* large page */
- asm volatile("tlbie %0,1" : :
- "r" (rb & 0x0000fffffffff000ul));
- else
- asm volatile("tlbie %0,0" : :
- "r" (rb & 0x0000fffffffff000ul));
- }
- asm volatile("eieio; tlbsync; ptesync" : : : "memory");
- kvm->arch.tlbie_lock = 0;
- } else {
- if (need_sync)
- asm volatile("ptesync" : : : "memory");
- for (i = 0; i < npages; ++i) {
- unsigned long rb = rbvalues[i];
-
- if (rb & 1) /* large page */
- asm volatile("tlbiel %0,1" : :
- "r" (rb & 0x0000fffffffff000ul));
- else
- asm volatile("tlbiel %0,0" : :
- "r" (rb & 0x0000fffffffff000ul));
- }
- asm volatile("ptesync" : : : "memory");
- }
-}
-
static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues,
long npages, int global, bool need_sync)
{
long i;
- if (cpu_has_feature(CPU_FTR_ARCH_201)) {
- /* PPC970 tlbie instruction is a bit different */
- do_tlbies_970(kvm, rbvalues, npages, global, need_sync);
- return;
- }
if (global) {
while (!try_lock_tlbie(&kvm->arch.tlbie_lock))
cpu_relax();
@@ -667,40 +592,29 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
rev->guest_rpte = r;
note_hpte_modification(kvm, rev);
}
- r = (be64_to_cpu(hpte[1]) & ~mask) | bits;
/* Update HPTE */
if (v & HPTE_V_VALID) {
- rb = compute_tlbie_rb(v, r, pte_index);
- hpte[0] = cpu_to_be64(v & ~HPTE_V_VALID);
- do_tlbies(kvm, &rb, 1, global_invalidates(kvm, flags), true);
/*
- * If the host has this page as readonly but the guest
- * wants to make it read/write, reduce the permissions.
- * Checking the host permissions involves finding the
- * memslot and then the Linux PTE for the page.
+ * If the page is valid, don't let it transition from
+ * readonly to writable. If it should be writable, we'll
+ * take a trap and let the page fault code sort it out.
*/
- if (hpte_is_writable(r) && kvm->arch.using_mmu_notifiers) {
- unsigned long psize, gfn, hva;
- struct kvm_memory_slot *memslot;
- pgd_t *pgdir = vcpu->arch.pgdir;
- pte_t pte;
-
- psize = hpte_page_size(v, r);
- gfn = ((r & HPTE_R_RPN) & ~(psize - 1)) >> PAGE_SHIFT;
- memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn);
- if (memslot) {
- hva = __gfn_to_hva_memslot(memslot, gfn);
- pte = lookup_linux_pte_and_update(pgdir, hva,
- 1, &psize);
- if (pte_present(pte) && !pte_write(pte))
- r = hpte_make_readonly(r);
- }
+ pte = be64_to_cpu(hpte[1]);
+ r = (pte & ~mask) | bits;
+ if (hpte_is_writable(r) && !hpte_is_writable(pte))
+ r = hpte_make_readonly(r);
+ /* If the PTE is changing, invalidate it first */
+ if (r != pte) {
+ rb = compute_tlbie_rb(v, r, pte_index);
+ hpte[0] = cpu_to_be64((v & ~HPTE_V_VALID) |
+ HPTE_V_ABSENT);
+ do_tlbies(kvm, &rb, 1, global_invalidates(kvm, flags),
+ true);
+ hpte[1] = cpu_to_be64(r);
}
}
- hpte[1] = cpu_to_be64(r);
- eieio();
- hpte[0] = cpu_to_be64(v & ~HPTE_V_HVLOCK);
+ unlock_hpte(hpte, v & ~HPTE_V_HVLOCK);
asm volatile("ptesync" : : : "memory");
return H_SUCCESS;
}
diff --git a/arch/powerpc/kvm/book3s_hv_rm_xics.c b/arch/powerpc/kvm/book3s_hv_rm_xics.c
index 3ee38e6e884..7b066f6b02a 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_xics.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_xics.c
@@ -183,8 +183,10 @@ static void icp_rm_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
* state update in HW (ie bus transactions) so we can handle them
* separately here as well.
*/
- if (resend)
+ if (resend) {
icp->rm_action |= XICS_RM_CHECK_RESEND;
+ icp->rm_resend_icp = icp;
+ }
}
@@ -254,10 +256,25 @@ int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
* nothing needs to be done as there can be no XISR to
* reject.
*
+ * ICP state: Check_IPI
+ *
* If the CPPR is less favored, then we might be replacing
- * an interrupt, and thus need to possibly reject it as in
+ * an interrupt, and thus need to possibly reject it.
*
- * ICP state: Check_IPI
+ * ICP State: IPI
+ *
+ * Besides rejecting any pending interrupts, we also
+ * update XISR and pending_pri to mark IPI as pending.
+ *
+ * PAPR does not describe this state, but if the MFRR is being
+ * made less favored than its earlier value, there might be
+ * a previously-rejected interrupt needing to be resent.
+ * Ideally, we would want to resend only if
+ * prio(pending_interrupt) < mfrr &&
+ * prio(pending_interrupt) < cppr
+ * where pending interrupt is the one that was rejected. But
+ * we don't have that state, so we simply trigger a resend
+ * whenever the MFRR is made less favored.
*/
do {
old_state = new_state = ACCESS_ONCE(icp->state);
@@ -270,13 +287,14 @@ int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
resend = false;
if (mfrr < new_state.cppr) {
/* Reject a pending interrupt if not an IPI */
- if (mfrr <= new_state.pending_pri)
+ if (mfrr <= new_state.pending_pri) {
reject = new_state.xisr;
- new_state.pending_pri = mfrr;
- new_state.xisr = XICS_IPI;
+ new_state.pending_pri = mfrr;
+ new_state.xisr = XICS_IPI;
+ }
}
- if (mfrr > old_state.mfrr && mfrr > new_state.cppr) {
+ if (mfrr > old_state.mfrr) {
resend = new_state.need_resend;
new_state.need_resend = 0;
}
@@ -289,8 +307,10 @@ int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
}
/* Pass resends to virtual mode */
- if (resend)
+ if (resend) {
this_icp->rm_action |= XICS_RM_CHECK_RESEND;
+ this_icp->rm_resend_icp = icp;
+ }
return check_too_hard(xics, this_icp);
}
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 65c105b17a2..10554df1385 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -94,20 +94,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
lwz r6, HSTATE_PMC + 12(r13)
lwz r8, HSTATE_PMC + 16(r13)
lwz r9, HSTATE_PMC + 20(r13)
-BEGIN_FTR_SECTION
- lwz r10, HSTATE_PMC + 24(r13)
- lwz r11, HSTATE_PMC + 28(r13)
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
mtspr SPRN_PMC1, r3
mtspr SPRN_PMC2, r4
mtspr SPRN_PMC3, r5
mtspr SPRN_PMC4, r6
mtspr SPRN_PMC5, r8
mtspr SPRN_PMC6, r9
-BEGIN_FTR_SECTION
- mtspr SPRN_PMC7, r10
- mtspr SPRN_PMC8, r11
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
ld r3, HSTATE_MMCR(r13)
ld r4, HSTATE_MMCR + 8(r13)
ld r5, HSTATE_MMCR + 16(r13)
@@ -153,11 +145,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
cmpwi cr1, r12, BOOK3S_INTERRUPT_MACHINE_CHECK
cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
-BEGIN_FTR_SECTION
beq 11f
cmpwi cr2, r12, BOOK3S_INTERRUPT_HMI
beq cr2, 14f /* HMI check */
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
/* RFI into the highmem handler, or branch to interrupt handler */
mfmsr r6
@@ -166,7 +156,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
mtmsrd r6, 1 /* Clear RI in MSR */
mtsrr0 r8
mtsrr1 r7
- beqa 0x500 /* external interrupt (PPC970) */
beq cr1, 13f /* machine check */
RFI
@@ -393,11 +382,8 @@ kvmppc_hv_entry:
slbia
ptesync
-BEGIN_FTR_SECTION
- b 30f
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
/*
- * POWER7 host -> guest partition switch code.
+ * POWER7/POWER8 host -> guest partition switch code.
* We don't have to lock against concurrent tlbies,
* but we do have to coordinate across hardware threads.
*/
@@ -505,97 +491,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
cmpwi r3,512 /* 1 microsecond */
li r12,BOOK3S_INTERRUPT_HV_DECREMENTER
blt hdec_soon
- b 31f
-
- /*
- * PPC970 host -> guest partition switch code.
- * We have to lock against concurrent tlbies,
- * using native_tlbie_lock to lock against host tlbies
- * and kvm->arch.tlbie_lock to lock against guest tlbies.
- * We also have to invalidate the TLB since its
- * entries aren't tagged with the LPID.
- */
-30: ld r5,HSTATE_KVM_VCORE(r13)
- ld r9,VCORE_KVM(r5) /* pointer to struct kvm */
-
- /* first take native_tlbie_lock */
- .section ".toc","aw"
-toc_tlbie_lock:
- .tc native_tlbie_lock[TC],native_tlbie_lock
- .previous
- ld r3,toc_tlbie_lock@toc(r2)
-#ifdef __BIG_ENDIAN__
- lwz r8,PACA_LOCK_TOKEN(r13)
-#else
- lwz r8,PACAPACAINDEX(r13)
-#endif
-24: lwarx r0,0,r3
- cmpwi r0,0
- bne 24b
- stwcx. r8,0,r3
- bne 24b
- isync
-
- ld r5,HSTATE_KVM_VCORE(r13)
- ld r7,VCORE_LPCR(r5) /* use vcore->lpcr to store HID4 */
- li r0,0x18f
- rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */
- or r0,r7,r0
- ptesync
- sync
- mtspr SPRN_HID4,r0 /* switch to reserved LPID */
- isync
- li r0,0
- stw r0,0(r3) /* drop native_tlbie_lock */
-
- /* invalidate the whole TLB */
- li r0,256
- mtctr r0
- li r6,0
-25: tlbiel r6
- addi r6,r6,0x1000
- bdnz 25b
- ptesync
- /* Take the guest's tlbie_lock */
- addi r3,r9,KVM_TLBIE_LOCK
-24: lwarx r0,0,r3
- cmpwi r0,0
- bne 24b
- stwcx. r8,0,r3
- bne 24b
- isync
- ld r6,KVM_SDR1(r9)
- mtspr SPRN_SDR1,r6 /* switch to partition page table */
-
- /* Set up HID4 with the guest's LPID etc. */
- sync
- mtspr SPRN_HID4,r7
- isync
-
- /* drop the guest's tlbie_lock */
- li r0,0
- stw r0,0(r3)
-
- /* Check if HDEC expires soon */
- mfspr r3,SPRN_HDEC
- cmpwi r3,10
- li r12,BOOK3S_INTERRUPT_HV_DECREMENTER
- blt hdec_soon
-
- /* Enable HDEC interrupts */
- mfspr r0,SPRN_HID0
- li r3,1
- rldimi r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1
- sync
- mtspr SPRN_HID0,r0
- mfspr r0,SPRN_HID0
- mfspr r0,SPRN_HID0
- mfspr r0,SPRN_HID0
- mfspr r0,SPRN_HID0
- mfspr r0,SPRN_HID0
- mfspr r0,SPRN_HID0
-31:
/* Do we have a guest vcpu to run? */
cmpdi r4, 0
beq kvmppc_primary_no_guest
@@ -625,7 +521,6 @@ kvmppc_got_guest:
stb r6, VCPU_VPA_DIRTY(r4)
25:
-BEGIN_FTR_SECTION
/* Save purr/spurr */
mfspr r5,SPRN_PURR
mfspr r6,SPRN_SPURR
@@ -635,7 +530,6 @@ BEGIN_FTR_SECTION
ld r8,VCPU_SPURR(r4)
mtspr SPRN_PURR,r7
mtspr SPRN_SPURR,r8
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
BEGIN_FTR_SECTION
/* Set partition DABR */
@@ -644,9 +538,7 @@ BEGIN_FTR_SECTION
ld r6,VCPU_DABR(r4)
mtspr SPRN_DABRX,r5
mtspr SPRN_DABR,r6
- BEGIN_FTR_SECTION_NESTED(89)
isync
- END_FTR_SECTION_NESTED(CPU_FTR_ARCH_206, CPU_FTR_ARCH_206, 89)
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
@@ -777,20 +669,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
lwz r7, VCPU_PMC + 12(r4)
lwz r8, VCPU_PMC + 16(r4)
lwz r9, VCPU_PMC + 20(r4)
-BEGIN_FTR_SECTION
- lwz r10, VCPU_PMC + 24(r4)
- lwz r11, VCPU_PMC + 28(r4)
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
mtspr SPRN_PMC1, r3
mtspr SPRN_PMC2, r5
mtspr SPRN_PMC3, r6
mtspr SPRN_PMC4, r7
mtspr SPRN_PMC5, r8
mtspr SPRN_PMC6, r9
-BEGIN_FTR_SECTION
- mtspr SPRN_PMC7, r10
- mtspr SPRN_PMC8, r11
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
ld r3, VCPU_MMCR(r4)
ld r5, VCPU_MMCR + 8(r4)
ld r6, VCPU_MMCR + 16(r4)
@@ -837,14 +721,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
ld r30, VCPU_GPR(R30)(r4)
ld r31, VCPU_GPR(R31)(r4)
-BEGIN_FTR_SECTION
/* Switch DSCR to guest value */
ld r5, VCPU_DSCR(r4)
mtspr SPRN_DSCR, r5
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
BEGIN_FTR_SECTION
- /* Skip next section on POWER7 or PPC970 */
+ /* Skip next section on POWER7 */
b 8f
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
/* Turn on TM so we can access TFHAR/TFIAR/TEXASR */
@@ -920,7 +802,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
mtspr SPRN_DAR, r5
mtspr SPRN_DSISR, r6
-BEGIN_FTR_SECTION
/* Restore AMR and UAMOR, set AMOR to all 1s */
ld r5,VCPU_AMR(r4)
ld r6,VCPU_UAMOR(r4)
@@ -928,7 +809,6 @@ BEGIN_FTR_SECTION
mtspr SPRN_AMR,r5
mtspr SPRN_UAMOR,r6
mtspr SPRN_AMOR,r7
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
/* Restore state of CTRL run bit; assume 1 on entry */
lwz r5,VCPU_CTRL(r4)
@@ -963,13 +843,11 @@ deliver_guest_interrupt:
rldicl r0, r0, 64 - BOOK3S_IRQPRIO_EXTERNAL_LEVEL, 63
cmpdi cr1, r0, 0
andi. r8, r11, MSR_EE
-BEGIN_FTR_SECTION
mfspr r8, SPRN_LPCR
/* Insert EXTERNAL_LEVEL bit into LPCR at the MER bit position */
rldimi r8, r0, LPCR_MER_SH, 63 - LPCR_MER_SH
mtspr SPRN_LPCR, r8
isync
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
beq 5f
li r0, BOOK3S_INTERRUPT_EXTERNAL
bne cr1, 12f
@@ -1124,15 +1002,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
stw r12,VCPU_TRAP(r9)
- /* Save HEIR (HV emulation assist reg) in last_inst
+ /* Save HEIR (HV emulation assist reg) in emul_inst
if this is an HEI (HV emulation interrupt, e40) */
li r3,KVM_INST_FETCH_FAILED
-BEGIN_FTR_SECTION
cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
bne 11f
mfspr r3,SPRN_HEIR
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
-11: stw r3,VCPU_LAST_INST(r9)
+11: stw r3,VCPU_HEIR(r9)
/* these are volatile across C function calls */
mfctr r3
@@ -1140,13 +1016,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
std r3, VCPU_CTR(r9)
stw r4, VCPU_XER(r9)
-BEGIN_FTR_SECTION
/* If this is a page table miss then see if it's theirs or ours */
cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
beq kvmppc_hdsi
cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE
beq kvmppc_hisi
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
/* See if this is a leftover HDEC interrupt */
cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
@@ -1159,11 +1033,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
cmpwi r12,BOOK3S_INTERRUPT_SYSCALL
beq hcall_try_real_mode
- /* Only handle external interrupts here on arch 206 and later */
-BEGIN_FTR_SECTION
- b ext_interrupt_to_host
-END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
-
/* External interrupt ? */
cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
bne+ ext_interrupt_to_host
@@ -1193,11 +1062,9 @@ guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
mfdsisr r7
std r6, VCPU_DAR(r9)
stw r7, VCPU_DSISR(r9)
-BEGIN_FTR_SECTION
/* don't overwrite fault_dar/fault_dsisr if HDSI */
cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE
beq 6f
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
std r6, VCPU_FAULT_DAR(r9)
stw r7, VCPU_FAULT_DSISR(r9)
@@ -1236,7 +1103,6 @@ mc_cont:
/*
* Save the guest PURR/SPURR
*/
-BEGIN_FTR_SECTION
mfspr r5,SPRN_PURR
mfspr r6,SPRN_SPURR
ld r7,VCPU_PURR(r9)
@@ -1256,7 +1122,6 @@ BEGIN_FTR_SECTION
add r4,r4,r6
mtspr SPRN_PURR,r3
mtspr SPRN_SPURR,r4
-END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_201)
/* Save DEC */
mfspr r5,SPRN_DEC
@@ -1306,22 +1171,18 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
8:
/* Save and reset AMR and UAMOR before turning on the MMU */
-BEGIN_FTR_SECTION
mfspr r5,SPRN_AMR
mfspr r6,SPRN_UAMOR
std r5,VCPU_AMR(r9)
std r6,VCPU_UAMOR(r9)
li r6,0
mtspr SPRN_AMR,r6
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
/* Switch DSCR back to host value */
-BEGIN_FTR_SECTION
mfspr r8, SPRN_DSCR
ld r7, HSTATE_DSCR(r13)
std r8, VCPU_DSCR(r9)
mtspr SPRN_DSCR, r7
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
/* Save non-volatile GPRs */
std r14, VCPU_GPR(R14)(r9)
@@ -1503,11 +1364,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
mfspr r4, SPRN_MMCR0 /* save MMCR0 */
mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
mfspr r6, SPRN_MMCRA
-BEGIN_FTR_SECTION
- /* On P7, clear MMCRA in order to disable SDAR updates */
+ /* Clear MMCRA in order to disable SDAR updates */
li r7, 0
mtspr SPRN_MMCRA, r7
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
isync
beq 21f /* if no VPA, save PMU stuff anyway */
lbz r7, LPPACA_PMCINUSE(r8)
@@ -1532,10 +1391,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
mfspr r6, SPRN_PMC4
mfspr r7, SPRN_PMC5
mfspr r8, SPRN_PMC6
-BEGIN_FTR_SECTION
- mfspr r10, SPRN_PMC7
- mfspr r11, SPRN_PMC8
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
stw r3, VCPU_PMC(r9)
stw r4, VCPU_PMC + 4(r9)
stw r5, VCPU_PMC + 8(r9)
@@ -1543,10 +1398,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
stw r7, VCPU_PMC + 16(r9)
stw r8, VCPU_PMC + 20(r9)
BEGIN_FTR_SECTION
- stw r10, VCPU_PMC + 24(r9)
- stw r11, VCPU_PMC + 28(r9)
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
-BEGIN_FTR_SECTION
mfspr r5, SPRN_SIER
mfspr r6, SPRN_SPMC1
mfspr r7, SPRN_SPMC2
@@ -1566,11 +1417,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
ptesync
hdec_soon: /* r12 = trap, r13 = paca */
-BEGIN_FTR_SECTION
- b 32f
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
/*
- * POWER7 guest -> host partition switch code.
+ * POWER7/POWER8 guest -> host partition switch code.
* We don't have to lock against tlbies but we do
* have to coordinate the hardware threads.
*/
@@ -1698,87 +1546,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
16: ld r8,KVM_HOST_LPCR(r4)
mtspr SPRN_LPCR,r8
isync
- b 33f
-
- /*
- * PPC970 guest -> host partition switch code.
- * We have to lock against concurrent tlbies, and
- * we have to flush the whole TLB.
- */
-32: ld r5,HSTATE_KVM_VCORE(r13)
- ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
-
- /* Take the guest's tlbie_lock */
-#ifdef __BIG_ENDIAN__
- lwz r8,PACA_LOCK_TOKEN(r13)
-#else
- lwz r8,PACAPACAINDEX(r13)
-#endif
- addi r3,r4,KVM_TLBIE_LOCK
-24: lwarx r0,0,r3
- cmpwi r0,0
- bne 24b
- stwcx. r8,0,r3
- bne 24b
- isync
-
- ld r7,KVM_HOST_LPCR(r4) /* use kvm->arch.host_lpcr for HID4 */
- li r0,0x18f
- rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */
- or r0,r7,r0
- ptesync
- sync
- mtspr SPRN_HID4,r0 /* switch to reserved LPID */
- isync
- li r0,0
- stw r0,0(r3) /* drop guest tlbie_lock */
-
- /* invalidate the whole TLB */
- li r0,256
- mtctr r0
- li r6,0
-25: tlbiel r6
- addi r6,r6,0x1000
- bdnz 25b
- ptesync
-
- /* take native_tlbie_lock */
- ld r3,toc_tlbie_lock@toc(2)
-24: lwarx r0,0,r3
- cmpwi r0,0
- bne 24b
- stwcx. r8,0,r3
- bne 24b
- isync
-
- ld r6,KVM_HOST_SDR1(r4)
- mtspr SPRN_SDR1,r6 /* switch to host page table */
-
- /* Set up host HID4 value */
- sync
- mtspr SPRN_HID4,r7
- isync
- li r0,0
- stw r0,0(r3) /* drop native_tlbie_lock */
-
- lis r8,0x7fff /* MAX_INT@h */
- mtspr SPRN_HDEC,r8
-
- /* Disable HDEC interrupts */
- mfspr r0,SPRN_HID0
- li r3,0
- rldimi r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1
- sync
- mtspr SPRN_HID0,r0
- mfspr r0,SPRN_HID0
- mfspr r0,SPRN_HID0
- mfspr r0,SPRN_HID0
- mfspr r0,SPRN_HID0
- mfspr r0,SPRN_HID0
- mfspr r0,SPRN_HID0
/* load host SLB entries */
-33: ld r8,PACA_SLBSHADOWPTR(r13)
+ ld r8,PACA_SLBSHADOWPTR(r13)
.rept SLB_NUM_BOLTED
li r3, SLBSHADOW_SAVEAREA
@@ -2047,7 +1817,7 @@ hcall_real_table:
.long 0 /* 0xd8 */
.long 0 /* 0xdc */
.long DOTSYM(kvmppc_h_cede) - hcall_real_table
- .long 0 /* 0xe4 */
+ .long DOTSYM(kvmppc_rm_h_confer) - hcall_real_table
.long 0 /* 0xe8 */
.long 0 /* 0xec */
.long 0 /* 0xf0 */
@@ -2126,9 +1896,6 @@ _GLOBAL(kvmppc_h_cede)
stw r0,VCPU_TRAP(r3)
li r0,H_SUCCESS
std r0,VCPU_GPR(R3)(r3)
-BEGIN_FTR_SECTION
- b kvm_cede_exit /* just send it up to host on 970 */
-END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
/*
* Set our bit in the bitmask of napping threads unless all the
@@ -2455,7 +2222,6 @@ BEGIN_FTR_SECTION
END_FTR_SECTION_IFSET(CPU_FTR_VSX)
#endif
mtmsrd r8
- isync
addi r3,r3,VCPU_FPRS
bl store_fp_state
#ifdef CONFIG_ALTIVEC
@@ -2491,7 +2257,6 @@ BEGIN_FTR_SECTION
END_FTR_SECTION_IFSET(CPU_FTR_VSX)
#endif
mtmsrd r8
- isync
addi r3,r4,VCPU_FPRS
bl load_fp_state
#ifdef CONFIG_ALTIVEC
diff --git a/arch/powerpc/kvm/book3s_paired_singles.c b/arch/powerpc/kvm/book3s_paired_singles.c
index bfb8035314e..bd6ab1672ae 100644
--- a/arch/powerpc/kvm/book3s_paired_singles.c
+++ b/arch/powerpc/kvm/book3s_paired_singles.c
@@ -352,14 +352,6 @@ static inline u32 inst_get_field(u32 inst, int msb, int lsb)
return kvmppc_get_field(inst, msb + 32, lsb + 32);
}
-/*
- * Replaces inst bits with ordering according to spec.
- */
-static inline u32 inst_set_field(u32 inst, int msb, int lsb, int value)
-{
- return kvmppc_set_field(inst, msb + 32, lsb + 32, value);
-}
-
bool kvmppc_inst_is_paired_single(struct kvm_vcpu *vcpu, u32 inst)
{
if (!(vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE))
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index cf2eb16846d..f57383941d0 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -644,11 +644,6 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
return r;
}
-static inline int get_fpr_index(int i)
-{
- return i * TS_FPRWIDTH;
-}
-
/* Give up external provider (FPU, Altivec, VSX) */
void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
{
diff --git a/arch/powerpc/kvm/book3s_xics.c b/arch/powerpc/kvm/book3s_xics.c
index eaeb78047fb..807351f76f8 100644
--- a/arch/powerpc/kvm/book3s_xics.c
+++ b/arch/powerpc/kvm/book3s_xics.c
@@ -613,10 +613,25 @@ static noinline int kvmppc_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
* there might be a previously-rejected interrupt needing
* to be resent.
*
+ * ICP state: Check_IPI
+ *
* If the CPPR is less favored, then we might be replacing
- * an interrupt, and thus need to possibly reject it as in
+ * an interrupt, and thus need to possibly reject it.
*
- * ICP state: Check_IPI
+ * ICP State: IPI
+ *
+ * Besides rejecting any pending interrupts, we also
+ * update XISR and pending_pri to mark IPI as pending.
+ *
+ * PAPR does not describe this state, but if the MFRR is being
+ * made less favored than its earlier value, there might be
+ * a previously-rejected interrupt needing to be resent.
+ * Ideally, we would want to resend only if
+ * prio(pending_interrupt) < mfrr &&
+ * prio(pending_interrupt) < cppr
+ * where pending interrupt is the one that was rejected. But
+ * we don't have that state, so we simply trigger a resend
+ * whenever the MFRR is made less favored.
*/
do {
old_state = new_state = ACCESS_ONCE(icp->state);
@@ -629,13 +644,14 @@ static noinline int kvmppc_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
resend = false;
if (mfrr < new_state.cppr) {
/* Reject a pending interrupt if not an IPI */
- if (mfrr <= new_state.pending_pri)
+ if (mfrr <= new_state.pending_pri) {
reject = new_state.xisr;
- new_state.pending_pri = mfrr;
- new_state.xisr = XICS_IPI;
+ new_state.pending_pri = mfrr;
+ new_state.xisr = XICS_IPI;
+ }
}
- if (mfrr > old_state.mfrr && mfrr > new_state.cppr) {
+ if (mfrr > old_state.mfrr) {
resend = new_state.need_resend;
new_state.need_resend = 0;
}
@@ -789,7 +805,7 @@ static noinline int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall)
if (icp->rm_action & XICS_RM_KICK_VCPU)
kvmppc_fast_vcpu_kick(icp->rm_kick_target);
if (icp->rm_action & XICS_RM_CHECK_RESEND)
- icp_check_resend(xics, icp);
+ icp_check_resend(xics, icp->rm_resend_icp);
if (icp->rm_action & XICS_RM_REJECT)
icp_deliver_irq(xics, icp, icp->rm_reject);
if (icp->rm_action & XICS_RM_NOTIFY_EOI)
diff --git a/arch/powerpc/kvm/book3s_xics.h b/arch/powerpc/kvm/book3s_xics.h
index e8aaa7a3f20..73f0f2723c0 100644
--- a/arch/powerpc/kvm/book3s_xics.h
+++ b/arch/powerpc/kvm/book3s_xics.h
@@ -74,6 +74,7 @@ struct kvmppc_icp {
#define XICS_RM_NOTIFY_EOI 0x8
u32 rm_action;
struct kvm_vcpu *rm_kick_target;
+ struct kvmppc_icp *rm_resend_icp;
u32 rm_reject;
u32 rm_eoied_irq;
diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c
index 16095841afe..b29ce752c7d 100644
--- a/arch/powerpc/kvm/e500.c
+++ b/arch/powerpc/kvm/e500.c
@@ -78,7 +78,7 @@ static inline int local_sid_setup_one(struct id *entry)
sid = __this_cpu_inc_return(pcpu_last_used_sid);
if (sid < NUM_TIDS) {
- __this_cpu_write(pcpu_sids)entry[sid], entry);
+ __this_cpu_write(pcpu_sids.entry[sid], entry);
entry->val = sid;
entry->pentry = this_cpu_ptr(&pcpu_sids.entry[sid]);
ret = sid;
@@ -299,14 +299,6 @@ void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr)
kvmppc_e500_recalc_shadow_pid(to_e500(vcpu));
}
-void kvmppc_core_load_host_debugstate(struct kvm_vcpu *vcpu)
-{
-}
-
-void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu)
-{
-}
-
static void kvmppc_core_vcpu_load_e500(struct kvm_vcpu *vcpu, int cpu)
{
kvmppc_booke_vcpu_load(vcpu, cpu);
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index c1f8f53cd31..c45eaab752b 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -527,18 +527,12 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
r = 0;
break;
case KVM_CAP_PPC_RMA:
- r = hv_enabled;
- /* PPC970 requires an RMA */
- if (r && cpu_has_feature(CPU_FTR_ARCH_201))
- r = 2;
+ r = 0;
break;
#endif
case KVM_CAP_SYNC_MMU:
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
- if (hv_enabled)
- r = cpu_has_feature(CPU_FTR_ARCH_206) ? 1 : 0;
- else
- r = 0;
+ r = hv_enabled;
#elif defined(KVM_ARCH_WANT_MMU_NOTIFIER)
r = 1;
#else
diff --git a/arch/powerpc/kvm/trace_book3s.h b/arch/powerpc/kvm/trace_book3s.h
new file mode 100644
index 00000000000..f647ce0f428
--- /dev/null
+++ b/arch/powerpc/kvm/trace_book3s.h
@@ -0,0 +1,32 @@
+#if !defined(_TRACE_KVM_BOOK3S_H)
+#define _TRACE_KVM_BOOK3S_H
+
+/*
+ * Common defines used by the trace macros in trace_pr.h and trace_hv.h
+ */
+
+#define kvm_trace_symbol_exit \
+ {0x100, "SYSTEM_RESET"}, \
+ {0x200, "MACHINE_CHECK"}, \
+ {0x300, "DATA_STORAGE"}, \
+ {0x380, "DATA_SEGMENT"}, \
+ {0x400, "INST_STORAGE"}, \
+ {0x480, "INST_SEGMENT"}, \
+ {0x500, "EXTERNAL"}, \
+ {0x501, "EXTERNAL_LEVEL"}, \
+ {0x502, "EXTERNAL_HV"}, \
+ {0x600, "ALIGNMENT"}, \
+ {0x700, "PROGRAM"}, \
+ {0x800, "FP_UNAVAIL"}, \
+ {0x900, "DECREMENTER"}, \
+ {0x980, "HV_DECREMENTER"}, \
+ {0xc00, "SYSCALL"}, \
+ {0xd00, "TRACE"}, \
+ {0xe00, "H_DATA_STORAGE"}, \
+ {0xe20, "H_INST_STORAGE"}, \
+ {0xe40, "H_EMUL_ASSIST"}, \
+ {0xf00, "PERFMON"}, \
+ {0xf20, "ALTIVEC"}, \
+ {0xf40, "VSX"}
+
+#endif
diff --git a/arch/powerpc/kvm/trace_booke.h b/arch/powerpc/kvm/trace_booke.h
index f7537cf26ce..7ec534d1db9 100644
--- a/arch/powerpc/kvm/trace_booke.h
+++ b/arch/powerpc/kvm/trace_booke.h
@@ -151,6 +151,47 @@ TRACE_EVENT(kvm_booke206_ref_release,
__entry->pfn, __entry->flags)
);
+#ifdef CONFIG_SPE_POSSIBLE
+#define kvm_trace_symbol_irqprio_spe \
+ {BOOKE_IRQPRIO_SPE_UNAVAIL, "SPE_UNAVAIL"}, \
+ {BOOKE_IRQPRIO_SPE_FP_DATA, "SPE_FP_DATA"}, \
+ {BOOKE_IRQPRIO_SPE_FP_ROUND, "SPE_FP_ROUND"},
+#else
+#define kvm_trace_symbol_irqprio_spe
+#endif
+
+#ifdef CONFIG_PPC_E500MC
+#define kvm_trace_symbol_irqprio_e500mc \
+ {BOOKE_IRQPRIO_ALTIVEC_UNAVAIL, "ALTIVEC_UNAVAIL"}, \
+ {BOOKE_IRQPRIO_ALTIVEC_ASSIST, "ALTIVEC_ASSIST"},
+#else
+#define kvm_trace_symbol_irqprio_e500mc
+#endif
+
+#define kvm_trace_symbol_irqprio \
+ kvm_trace_symbol_irqprio_spe \
+ kvm_trace_symbol_irqprio_e500mc \
+ {BOOKE_IRQPRIO_DATA_STORAGE, "DATA_STORAGE"}, \
+ {BOOKE_IRQPRIO_INST_STORAGE, "INST_STORAGE"}, \
+ {BOOKE_IRQPRIO_ALIGNMENT, "ALIGNMENT"}, \
+ {BOOKE_IRQPRIO_PROGRAM, "PROGRAM"}, \
+ {BOOKE_IRQPRIO_FP_UNAVAIL, "FP_UNAVAIL"}, \
+ {BOOKE_IRQPRIO_SYSCALL, "SYSCALL"}, \
+ {BOOKE_IRQPRIO_AP_UNAVAIL, "AP_UNAVAIL"}, \
+ {BOOKE_IRQPRIO_DTLB_MISS, "DTLB_MISS"}, \
+ {BOOKE_IRQPRIO_ITLB_MISS, "ITLB_MISS"}, \
+ {BOOKE_IRQPRIO_MACHINE_CHECK, "MACHINE_CHECK"}, \
+ {BOOKE_IRQPRIO_DEBUG, "DEBUG"}, \
+ {BOOKE_IRQPRIO_CRITICAL, "CRITICAL"}, \
+ {BOOKE_IRQPRIO_WATCHDOG, "WATCHDOG"}, \
+ {BOOKE_IRQPRIO_EXTERNAL, "EXTERNAL"}, \
+ {BOOKE_IRQPRIO_FIT, "FIT"}, \
+ {BOOKE_IRQPRIO_DECREMENTER, "DECREMENTER"}, \
+ {BOOKE_IRQPRIO_PERFORMANCE_MONITOR, "PERFORMANCE_MONITOR"}, \
+ {BOOKE_IRQPRIO_EXTERNAL_LEVEL, "EXTERNAL_LEVEL"}, \
+ {BOOKE_IRQPRIO_DBELL, "DBELL"}, \
+ {BOOKE_IRQPRIO_DBELL_CRIT, "DBELL_CRIT"} \
+
TRACE_EVENT(kvm_booke_queue_irqprio,
TP_PROTO(struct kvm_vcpu *vcpu, unsigned int priority),
TP_ARGS(vcpu, priority),
@@ -167,8 +208,10 @@ TRACE_EVENT(kvm_booke_queue_irqprio,
__entry->pending = vcpu->arch.pending_exceptions;
),
- TP_printk("vcpu=%x prio=%x pending=%lx",
- __entry->cpu_nr, __entry->priority, __entry->pending)
+ TP_printk("vcpu=%x prio=%s pending=%lx",
+ __entry->cpu_nr,
+ __print_symbolic(__entry->priority, kvm_trace_symbol_irqprio),
+ __entry->pending)
);
#endif
diff --git a/arch/powerpc/kvm/trace_hv.h b/arch/powerpc/kvm/trace_hv.h
new file mode 100644
index 00000000000..33d9daff578
--- /dev/null
+++ b/arch/powerpc/kvm/trace_hv.h
@@ -0,0 +1,477 @@
+#if !defined(_TRACE_KVM_HV_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_KVM_HV_H
+
+#include <linux/tracepoint.h>
+#include "trace_book3s.h"
+#include <asm/hvcall.h>
+#include <asm/kvm_asm.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM kvm_hv
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace_hv
+
+#define kvm_trace_symbol_hcall \
+ {H_REMOVE, "H_REMOVE"}, \
+ {H_ENTER, "H_ENTER"}, \
+ {H_READ, "H_READ"}, \
+ {H_CLEAR_MOD, "H_CLEAR_MOD"}, \
+ {H_CLEAR_REF, "H_CLEAR_REF"}, \
+ {H_PROTECT, "H_PROTECT"}, \
+ {H_GET_TCE, "H_GET_TCE"}, \
+ {H_PUT_TCE, "H_PUT_TCE"}, \
+ {H_SET_SPRG0, "H_SET_SPRG0"}, \
+ {H_SET_DABR, "H_SET_DABR"}, \
+ {H_PAGE_INIT, "H_PAGE_INIT"}, \
+ {H_SET_ASR, "H_SET_ASR"}, \
+ {H_ASR_ON, "H_ASR_ON"}, \
+ {H_ASR_OFF, "H_ASR_OFF"}, \
+ {H_LOGICAL_CI_LOAD, "H_LOGICAL_CI_LOAD"}, \
+ {H_LOGICAL_CI_STORE, "H_LOGICAL_CI_STORE"}, \
+ {H_LOGICAL_CACHE_LOAD, "H_LOGICAL_CACHE_LOAD"}, \
+ {H_LOGICAL_CACHE_STORE, "H_LOGICAL_CACHE_STORE"}, \
+ {H_LOGICAL_ICBI, "H_LOGICAL_ICBI"}, \
+ {H_LOGICAL_DCBF, "H_LOGICAL_DCBF"}, \
+ {H_GET_TERM_CHAR, "H_GET_TERM_CHAR"}, \
+ {H_PUT_TERM_CHAR, "H_PUT_TERM_CHAR"}, \
+ {H_REAL_TO_LOGICAL, "H_REAL_TO_LOGICAL"}, \
+ {H_HYPERVISOR_DATA, "H_HYPERVISOR_DATA"}, \
+ {H_EOI, "H_EOI"}, \
+ {H_CPPR, "H_CPPR"}, \
+ {H_IPI, "H_IPI"}, \
+ {H_IPOLL, "H_IPOLL"}, \
+ {H_XIRR, "H_XIRR"}, \
+ {H_PERFMON, "H_PERFMON"}, \
+ {H_MIGRATE_DMA, "H_MIGRATE_DMA"}, \
+ {H_REGISTER_VPA, "H_REGISTER_VPA"}, \
+ {H_CEDE, "H_CEDE"}, \
+ {H_CONFER, "H_CONFER"}, \
+ {H_PROD, "H_PROD"}, \
+ {H_GET_PPP, "H_GET_PPP"}, \
+ {H_SET_PPP, "H_SET_PPP"}, \
+ {H_PURR, "H_PURR"}, \
+ {H_PIC, "H_PIC"}, \
+ {H_REG_CRQ, "H_REG_CRQ"}, \
+ {H_FREE_CRQ, "H_FREE_CRQ"}, \
+ {H_VIO_SIGNAL, "H_VIO_SIGNAL"}, \
+ {H_SEND_CRQ, "H_SEND_CRQ"}, \
+ {H_COPY_RDMA, "H_COPY_RDMA"}, \
+ {H_REGISTER_LOGICAL_LAN, "H_REGISTER_LOGICAL_LAN"}, \
+ {H_FREE_LOGICAL_LAN, "H_FREE_LOGICAL_LAN"}, \
+ {H_ADD_LOGICAL_LAN_BUFFER, "H_ADD_LOGICAL_LAN_BUFFER"}, \
+ {H_SEND_LOGICAL_LAN, "H_SEND_LOGICAL_LAN"}, \
+ {H_BULK_REMOVE, "H_BULK_REMOVE"}, \
+ {H_MULTICAST_CTRL, "H_MULTICAST_CTRL"}, \
+ {H_SET_XDABR, "H_SET_XDABR"}, \
+ {H_STUFF_TCE, "H_STUFF_TCE"}, \
+ {H_PUT_TCE_INDIRECT, "H_PUT_TCE_INDIRECT"}, \
+ {H_CHANGE_LOGICAL_LAN_MAC, "H_CHANGE_LOGICAL_LAN_MAC"}, \
+ {H_VTERM_PARTNER_INFO, "H_VTERM_PARTNER_INFO"}, \
+ {H_REGISTER_VTERM, "H_REGISTER_VTERM"}, \
+ {H_FREE_VTERM, "H_FREE_VTERM"}, \
+ {H_RESET_EVENTS, "H_RESET_EVENTS"}, \
+ {H_ALLOC_RESOURCE, "H_ALLOC_RESOURCE"}, \
+ {H_FREE_RESOURCE, "H_FREE_RESOURCE"}, \
+ {H_MODIFY_QP, "H_MODIFY_QP"}, \
+ {H_QUERY_QP, "H_QUERY_QP"}, \
+ {H_REREGISTER_PMR, "H_REREGISTER_PMR"}, \
+ {H_REGISTER_SMR, "H_REGISTER_SMR"}, \
+ {H_QUERY_MR, "H_QUERY_MR"}, \
+ {H_QUERY_MW, "H_QUERY_MW"}, \
+ {H_QUERY_HCA, "H_QUERY_HCA"}, \
+ {H_QUERY_PORT, "H_QUERY_PORT"}, \
+ {H_MODIFY_PORT, "H_MODIFY_PORT"}, \
+ {H_DEFINE_AQP1, "H_DEFINE_AQP1"}, \
+ {H_GET_TRACE_BUFFER, "H_GET_TRACE_BUFFER"}, \
+ {H_DEFINE_AQP0, "H_DEFINE_AQP0"}, \
+ {H_RESIZE_MR, "H_RESIZE_MR"}, \
+ {H_ATTACH_MCQP, "H_ATTACH_MCQP"}, \
+ {H_DETACH_MCQP, "H_DETACH_MCQP"}, \
+ {H_CREATE_RPT, "H_CREATE_RPT"}, \
+ {H_REMOVE_RPT, "H_REMOVE_RPT"}, \
+ {H_REGISTER_RPAGES, "H_REGISTER_RPAGES"}, \
+ {H_DISABLE_AND_GETC, "H_DISABLE_AND_GETC"}, \
+ {H_ERROR_DATA, "H_ERROR_DATA"}, \
+ {H_GET_HCA_INFO, "H_GET_HCA_INFO"}, \
+ {H_GET_PERF_COUNT, "H_GET_PERF_COUNT"}, \
+ {H_MANAGE_TRACE, "H_MANAGE_TRACE"}, \
+ {H_FREE_LOGICAL_LAN_BUFFER, "H_FREE_LOGICAL_LAN_BUFFER"}, \
+ {H_QUERY_INT_STATE, "H_QUERY_INT_STATE"}, \
+ {H_POLL_PENDING, "H_POLL_PENDING"}, \
+ {H_ILLAN_ATTRIBUTES, "H_ILLAN_ATTRIBUTES"}, \
+ {H_MODIFY_HEA_QP, "H_MODIFY_HEA_QP"}, \
+ {H_QUERY_HEA_QP, "H_QUERY_HEA_QP"}, \
+ {H_QUERY_HEA, "H_QUERY_HEA"}, \
+ {H_QUERY_HEA_PORT, "H_QUERY_HEA_PORT"}, \
+ {H_MODIFY_HEA_PORT, "H_MODIFY_HEA_PORT"}, \
+ {H_REG_BCMC, "H_REG_BCMC"}, \
+ {H_DEREG_BCMC, "H_DEREG_BCMC"}, \
+ {H_REGISTER_HEA_RPAGES, "H_REGISTER_HEA_RPAGES"}, \
+ {H_DISABLE_AND_GET_HEA, "H_DISABLE_AND_GET_HEA"}, \
+ {H_GET_HEA_INFO, "H_GET_HEA_INFO"}, \
+ {H_ALLOC_HEA_RESOURCE, "H_ALLOC_HEA_RESOURCE"}, \
+ {H_ADD_CONN, "H_ADD_CONN"}, \
+ {H_DEL_CONN, "H_DEL_CONN"}, \
+ {H_JOIN, "H_JOIN"}, \
+ {H_VASI_STATE, "H_VASI_STATE"}, \
+ {H_ENABLE_CRQ, "H_ENABLE_CRQ"}, \
+ {H_GET_EM_PARMS, "H_GET_EM_PARMS"}, \
+ {H_SET_MPP, "H_SET_MPP"}, \
+ {H_GET_MPP, "H_GET_MPP"}, \
+ {H_HOME_NODE_ASSOCIATIVITY, "H_HOME_NODE_ASSOCIATIVITY"}, \
+ {H_BEST_ENERGY, "H_BEST_ENERGY"}, \
+ {H_XIRR_X, "H_XIRR_X"}, \
+ {H_RANDOM, "H_RANDOM"}, \
+ {H_COP, "H_COP"}, \
+ {H_GET_MPP_X, "H_GET_MPP_X"}, \
+ {H_SET_MODE, "H_SET_MODE"}, \
+ {H_RTAS, "H_RTAS"}
+
+#define kvm_trace_symbol_kvmret \
+ {RESUME_GUEST, "RESUME_GUEST"}, \
+ {RESUME_GUEST_NV, "RESUME_GUEST_NV"}, \
+ {RESUME_HOST, "RESUME_HOST"}, \
+ {RESUME_HOST_NV, "RESUME_HOST_NV"}
+
+#define kvm_trace_symbol_hcall_rc \
+ {H_SUCCESS, "H_SUCCESS"}, \
+ {H_BUSY, "H_BUSY"}, \
+ {H_CLOSED, "H_CLOSED"}, \
+ {H_NOT_AVAILABLE, "H_NOT_AVAILABLE"}, \
+ {H_CONSTRAINED, "H_CONSTRAINED"}, \
+ {H_PARTIAL, "H_PARTIAL"}, \
+ {H_IN_PROGRESS, "H_IN_PROGRESS"}, \
+ {H_PAGE_REGISTERED, "H_PAGE_REGISTERED"}, \
+ {H_PARTIAL_STORE, "H_PARTIAL_STORE"}, \
+ {H_PENDING, "H_PENDING"}, \
+ {H_CONTINUE, "H_CONTINUE"}, \
+ {H_LONG_BUSY_START_RANGE, "H_LONG_BUSY_START_RANGE"}, \
+ {H_LONG_BUSY_ORDER_1_MSEC, "H_LONG_BUSY_ORDER_1_MSEC"}, \
+ {H_LONG_BUSY_ORDER_10_MSEC, "H_LONG_BUSY_ORDER_10_MSEC"}, \
+ {H_LONG_BUSY_ORDER_100_MSEC, "H_LONG_BUSY_ORDER_100_MSEC"}, \
+ {H_LONG_BUSY_ORDER_1_SEC, "H_LONG_BUSY_ORDER_1_SEC"}, \
+ {H_LONG_BUSY_ORDER_10_SEC, "H_LONG_BUSY_ORDER_10_SEC"}, \
+ {H_LONG_BUSY_ORDER_100_SEC, "H_LONG_BUSY_ORDER_100_SEC"}, \
+ {H_LONG_BUSY_END_RANGE, "H_LONG_BUSY_END_RANGE"}, \
+ {H_TOO_HARD, "H_TOO_HARD"}, \
+ {H_HARDWARE, "H_HARDWARE"}, \
+ {H_FUNCTION, "H_FUNCTION"}, \
+ {H_PRIVILEGE, "H_PRIVILEGE"}, \
+ {H_PARAMETER, "H_PARAMETER"}, \
+ {H_BAD_MODE, "H_BAD_MODE"}, \
+ {H_PTEG_FULL, "H_PTEG_FULL"}, \
+ {H_NOT_FOUND, "H_NOT_FOUND"}, \
+ {H_RESERVED_DABR, "H_RESERVED_DABR"}, \
+ {H_NO_MEM, "H_NO_MEM"}, \
+ {H_AUTHORITY, "H_AUTHORITY"}, \
+ {H_PERMISSION, "H_PERMISSION"}, \
+ {H_DROPPED, "H_DROPPED"}, \
+ {H_SOURCE_PARM, "H_SOURCE_PARM"}, \
+ {H_DEST_PARM, "H_DEST_PARM"}, \
+ {H_REMOTE_PARM, "H_REMOTE_PARM"}, \
+ {H_RESOURCE, "H_RESOURCE"}, \
+ {H_ADAPTER_PARM, "H_ADAPTER_PARM"}, \
+ {H_RH_PARM, "H_RH_PARM"}, \
+ {H_RCQ_PARM, "H_RCQ_PARM"}, \
+ {H_SCQ_PARM, "H_SCQ_PARM"}, \
+ {H_EQ_PARM, "H_EQ_PARM"}, \
+ {H_RT_PARM, "H_RT_PARM"}, \
+ {H_ST_PARM, "H_ST_PARM"}, \
+ {H_SIGT_PARM, "H_SIGT_PARM"}, \
+ {H_TOKEN_PARM, "H_TOKEN_PARM"}, \
+ {H_MLENGTH_PARM, "H_MLENGTH_PARM"}, \
+ {H_MEM_PARM, "H_MEM_PARM"}, \
+ {H_MEM_ACCESS_PARM, "H_MEM_ACCESS_PARM"}, \
+ {H_ATTR_PARM, "H_ATTR_PARM"}, \
+ {H_PORT_PARM, "H_PORT_PARM"}, \
+ {H_MCG_PARM, "H_MCG_PARM"}, \
+ {H_VL_PARM, "H_VL_PARM"}, \
+ {H_TSIZE_PARM, "H_TSIZE_PARM"}, \
+ {H_TRACE_PARM, "H_TRACE_PARM"}, \
+ {H_MASK_PARM, "H_MASK_PARM"}, \
+ {H_MCG_FULL, "H_MCG_FULL"}, \
+ {H_ALIAS_EXIST, "H_ALIAS_EXIST"}, \
+ {H_P_COUNTER, "H_P_COUNTER"}, \
+ {H_TABLE_FULL, "H_TABLE_FULL"}, \
+ {H_ALT_TABLE, "H_ALT_TABLE"}, \
+ {H_MR_CONDITION, "H_MR_CONDITION"}, \
+ {H_NOT_ENOUGH_RESOURCES, "H_NOT_ENOUGH_RESOURCES"}, \
+ {H_R_STATE, "H_R_STATE"}, \
+ {H_RESCINDED, "H_RESCINDED"}, \
+ {H_P2, "H_P2"}, \
+ {H_P3, "H_P3"}, \
+ {H_P4, "H_P4"}, \
+ {H_P5, "H_P5"}, \
+ {H_P6, "H_P6"}, \
+ {H_P7, "H_P7"}, \
+ {H_P8, "H_P8"}, \
+ {H_P9, "H_P9"}, \
+ {H_TOO_BIG, "H_TOO_BIG"}, \
+ {H_OVERLAP, "H_OVERLAP"}, \
+ {H_INTERRUPT, "H_INTERRUPT"}, \
+ {H_BAD_DATA, "H_BAD_DATA"}, \
+ {H_NOT_ACTIVE, "H_NOT_ACTIVE"}, \
+ {H_SG_LIST, "H_SG_LIST"}, \
+ {H_OP_MODE, "H_OP_MODE"}, \
+ {H_COP_HW, "H_COP_HW"}, \
+ {H_UNSUPPORTED_FLAG_START, "H_UNSUPPORTED_FLAG_START"}, \
+ {H_UNSUPPORTED_FLAG_END, "H_UNSUPPORTED_FLAG_END"}, \
+ {H_MULTI_THREADS_ACTIVE, "H_MULTI_THREADS_ACTIVE"}, \
+ {H_OUTSTANDING_COP_OPS, "H_OUTSTANDING_COP_OPS"}
+
+TRACE_EVENT(kvm_guest_enter,
+ TP_PROTO(struct kvm_vcpu *vcpu),
+ TP_ARGS(vcpu),
+
+ TP_STRUCT__entry(
+ __field(int, vcpu_id)
+ __field(unsigned long, pc)
+ __field(unsigned long, pending_exceptions)
+ __field(u8, ceded)
+ ),
+
+ TP_fast_assign(
+ __entry->vcpu_id = vcpu->vcpu_id;
+ __entry->pc = kvmppc_get_pc(vcpu);
+ __entry->ceded = vcpu->arch.ceded;
+ __entry->pending_exceptions = vcpu->arch.pending_exceptions;
+ ),
+
+ TP_printk("VCPU %d: pc=0x%lx pexcp=0x%lx ceded=%d",
+ __entry->vcpu_id,
+ __entry->pc,
+ __entry->pending_exceptions, __entry->ceded)
+);
+
+TRACE_EVENT(kvm_guest_exit,
+ TP_PROTO(struct kvm_vcpu *vcpu),
+ TP_ARGS(vcpu),
+
+ TP_STRUCT__entry(
+ __field(int, vcpu_id)
+ __field(int, trap)
+ __field(unsigned long, pc)
+ __field(unsigned long, msr)
+ __field(u8, ceded)
+ ),
+
+ TP_fast_assign(
+ __entry->vcpu_id = vcpu->vcpu_id;
+ __entry->trap = vcpu->arch.trap;
+ __entry->ceded = vcpu->arch.ceded;
+ __entry->pc = kvmppc_get_pc(vcpu);
+ __entry->msr = vcpu->arch.shregs.msr;
+ ),
+
+ TP_printk("VCPU %d: trap=%s pc=0x%lx msr=0x%lx, ceded=%d",
+ __entry->vcpu_id,
+ __print_symbolic(__entry->trap, kvm_trace_symbol_exit),
+ __entry->pc, __entry->msr, __entry->ceded
+ )
+);
+
+TRACE_EVENT(kvm_page_fault_enter,
+ TP_PROTO(struct kvm_vcpu *vcpu, unsigned long *hptep,
+ struct kvm_memory_slot *memslot, unsigned long ea,
+ unsigned long dsisr),
+
+ TP_ARGS(vcpu, hptep, memslot, ea, dsisr),
+
+ TP_STRUCT__entry(
+ __field(int, vcpu_id)
+ __field(unsigned long, hpte_v)
+ __field(unsigned long, hpte_r)
+ __field(unsigned long, gpte_r)
+ __field(unsigned long, ea)
+ __field(u64, base_gfn)
+ __field(u32, slot_flags)
+ __field(u32, dsisr)
+ ),
+
+ TP_fast_assign(
+ __entry->vcpu_id = vcpu->vcpu_id;
+ __entry->hpte_v = hptep[0];
+ __entry->hpte_r = hptep[1];
+ __entry->gpte_r = hptep[2];
+ __entry->ea = ea;
+ __entry->dsisr = dsisr;
+ __entry->base_gfn = memslot ? memslot->base_gfn : -1UL;
+ __entry->slot_flags = memslot ? memslot->flags : 0;
+ ),
+
+ TP_printk("VCPU %d: hpte=0x%lx:0x%lx guest=0x%lx ea=0x%lx,%x slot=0x%llx,0x%x",
+ __entry->vcpu_id,
+ __entry->hpte_v, __entry->hpte_r, __entry->gpte_r,
+ __entry->ea, __entry->dsisr,
+ __entry->base_gfn, __entry->slot_flags)
+);
+
+TRACE_EVENT(kvm_page_fault_exit,
+ TP_PROTO(struct kvm_vcpu *vcpu, unsigned long *hptep, long ret),
+
+ TP_ARGS(vcpu, hptep, ret),
+
+ TP_STRUCT__entry(
+ __field(int, vcpu_id)
+ __field(unsigned long, hpte_v)
+ __field(unsigned long, hpte_r)
+ __field(long, ret)
+ ),
+
+ TP_fast_assign(
+ __entry->vcpu_id = vcpu->vcpu_id;
+ __entry->hpte_v = hptep[0];
+ __entry->hpte_r = hptep[1];
+ __entry->ret = ret;
+ ),
+
+ TP_printk("VCPU %d: hpte=0x%lx:0x%lx ret=0x%lx",
+ __entry->vcpu_id,
+ __entry->hpte_v, __entry->hpte_r, __entry->ret)
+);
+
+TRACE_EVENT(kvm_hcall_enter,
+ TP_PROTO(struct kvm_vcpu *vcpu),
+
+ TP_ARGS(vcpu),
+
+ TP_STRUCT__entry(
+ __field(int, vcpu_id)
+ __field(unsigned long, req)
+ __field(unsigned long, gpr4)
+ __field(unsigned long, gpr5)
+ __field(unsigned long, gpr6)
+ __field(unsigned long, gpr7)
+ ),
+
+ TP_fast_assign(
+ __entry->vcpu_id = vcpu->vcpu_id;
+ __entry->req = kvmppc_get_gpr(vcpu, 3);
+ __entry->gpr4 = kvmppc_get_gpr(vcpu, 4);
+ __entry->gpr5 = kvmppc_get_gpr(vcpu, 5);
+ __entry->gpr6 = kvmppc_get_gpr(vcpu, 6);
+ __entry->gpr7 = kvmppc_get_gpr(vcpu, 7);
+ ),
+
+ TP_printk("VCPU %d: hcall=%s GPR4-7=0x%lx,0x%lx,0x%lx,0x%lx",
+ __entry->vcpu_id,
+ __print_symbolic(__entry->req, kvm_trace_symbol_hcall),
+ __entry->gpr4, __entry->gpr5, __entry->gpr6, __entry->gpr7)
+);
+
+TRACE_EVENT(kvm_hcall_exit,
+ TP_PROTO(struct kvm_vcpu *vcpu, int ret),
+
+ TP_ARGS(vcpu, ret),
+
+ TP_STRUCT__entry(
+ __field(int, vcpu_id)
+ __field(unsigned long, ret)
+ __field(unsigned long, hcall_rc)
+ ),
+
+ TP_fast_assign(
+ __entry->vcpu_id = vcpu->vcpu_id;
+ __entry->ret = ret;
+ __entry->hcall_rc = kvmppc_get_gpr(vcpu, 3);
+ ),
+
+ TP_printk("VCPU %d: ret=%s hcall_rc=%s",
+ __entry->vcpu_id,
+ __print_symbolic(__entry->ret, kvm_trace_symbol_kvmret),
+ __print_symbolic(__entry->ret & RESUME_FLAG_HOST ?
+ H_TOO_HARD : __entry->hcall_rc,
+ kvm_trace_symbol_hcall_rc))
+);
+
+TRACE_EVENT(kvmppc_run_core,
+ TP_PROTO(struct kvmppc_vcore *vc, int where),
+
+ TP_ARGS(vc, where),
+
+ TP_STRUCT__entry(
+ __field(int, n_runnable)
+ __field(int, runner_vcpu)
+ __field(int, where)
+ __field(pid_t, tgid)
+ ),
+
+ TP_fast_assign(
+ __entry->runner_vcpu = vc->runner->vcpu_id;
+ __entry->n_runnable = vc->n_runnable;
+ __entry->where = where;
+ __entry->tgid = current->tgid;
+ ),
+
+ TP_printk("%s runner_vcpu==%d runnable=%d tgid=%d",
+ __entry->where ? "Exit" : "Enter",
+ __entry->runner_vcpu, __entry->n_runnable, __entry->tgid)
+);
+
+TRACE_EVENT(kvmppc_vcore_blocked,
+ TP_PROTO(struct kvmppc_vcore *vc, int where),
+
+ TP_ARGS(vc, where),
+
+ TP_STRUCT__entry(
+ __field(int, n_runnable)
+ __field(int, runner_vcpu)
+ __field(int, where)
+ __field(pid_t, tgid)
+ ),
+
+ TP_fast_assign(
+ __entry->runner_vcpu = vc->runner->vcpu_id;
+ __entry->n_runnable = vc->n_runnable;
+ __entry->where = where;
+ __entry->tgid = current->tgid;
+ ),
+
+ TP_printk("%s runner_vcpu=%d runnable=%d tgid=%d",
+ __entry->where ? "Exit" : "Enter",
+ __entry->runner_vcpu, __entry->n_runnable, __entry->tgid)
+);
+
+TRACE_EVENT(kvmppc_run_vcpu_enter,
+ TP_PROTO(struct kvm_vcpu *vcpu),
+
+ TP_ARGS(vcpu),
+
+ TP_STRUCT__entry(
+ __field(int, vcpu_id)
+ __field(pid_t, tgid)
+ ),
+
+ TP_fast_assign(
+ __entry->vcpu_id = vcpu->vcpu_id;
+ __entry->tgid = current->tgid;
+ ),
+
+ TP_printk("VCPU %d: tgid=%d", __entry->vcpu_id, __entry->tgid)
+);
+
+TRACE_EVENT(kvmppc_run_vcpu_exit,
+ TP_PROTO(struct kvm_vcpu *vcpu, struct kvm_run *run),
+
+ TP_ARGS(vcpu, run),
+
+ TP_STRUCT__entry(
+ __field(int, vcpu_id)
+ __field(int, exit)
+ __field(int, ret)
+ ),
+
+ TP_fast_assign(
+ __entry->vcpu_id = vcpu->vcpu_id;
+ __entry->exit = run->exit_reason;
+ __entry->ret = vcpu->arch.ret;
+ ),
+
+ TP_printk("VCPU %d: exit=%d, ret=%d",
+ __entry->vcpu_id, __entry->exit, __entry->ret)
+);
+
+#endif /* _TRACE_KVM_HV_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/arch/powerpc/kvm/trace_pr.h b/arch/powerpc/kvm/trace_pr.h
index e1357cd8dc1..810507cb688 100644
--- a/arch/powerpc/kvm/trace_pr.h
+++ b/arch/powerpc/kvm/trace_pr.h
@@ -3,36 +3,13 @@
#define _TRACE_KVM_PR_H
#include <linux/tracepoint.h>
+#include "trace_book3s.h"
#undef TRACE_SYSTEM
#define TRACE_SYSTEM kvm_pr
#define TRACE_INCLUDE_PATH .
#define TRACE_INCLUDE_FILE trace_pr
-#define kvm_trace_symbol_exit \
- {0x100, "SYSTEM_RESET"}, \
- {0x200, "MACHINE_CHECK"}, \
- {0x300, "DATA_STORAGE"}, \
- {0x380, "DATA_SEGMENT"}, \
- {0x400, "INST_STORAGE"}, \
- {0x480, "INST_SEGMENT"}, \
- {0x500, "EXTERNAL"}, \
- {0x501, "EXTERNAL_LEVEL"}, \
- {0x502, "EXTERNAL_HV"}, \
- {0x600, "ALIGNMENT"}, \
- {0x700, "PROGRAM"}, \
- {0x800, "FP_UNAVAIL"}, \
- {0x900, "DECREMENTER"}, \
- {0x980, "HV_DECREMENTER"}, \
- {0xc00, "SYSCALL"}, \
- {0xd00, "TRACE"}, \
- {0xe00, "H_DATA_STORAGE"}, \
- {0xe20, "H_INST_STORAGE"}, \
- {0xe40, "H_EMUL_ASSIST"}, \
- {0xf00, "PERFMON"}, \
- {0xf20, "ALTIVEC"}, \
- {0xf40, "VSX"}
-
TRACE_EVENT(kvm_book3s_reenter,
TP_PROTO(int r, struct kvm_vcpu *vcpu),
TP_ARGS(r, vcpu),
diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c
index dba34088da2..f162d0b8eea 100644
--- a/arch/powerpc/perf/hv-24x7.c
+++ b/arch/powerpc/perf/hv-24x7.c
@@ -177,7 +177,7 @@ static ssize_t _name##_show(struct device *dev, \
} \
ret = sprintf(buf, _fmt, _expr); \
e_free: \
- kfree(page); \
+ kmem_cache_free(hv_page_cache, page); \
return ret; \
} \
static DEVICE_ATTR_RO(_name)
@@ -217,11 +217,14 @@ static bool is_physical_domain(int domain)
domain == HV_24X7_PERF_DOMAIN_PHYSICAL_CORE;
}
+DEFINE_PER_CPU(char, hv_24x7_reqb[4096]) __aligned(4096);
+DEFINE_PER_CPU(char, hv_24x7_resb[4096]) __aligned(4096);
+
static unsigned long single_24x7_request(u8 domain, u32 offset, u16 ix,
u16 lpar, u64 *res,
bool success_expected)
{
- unsigned long ret = -ENOMEM;
+ unsigned long ret;
/*
* request_buffer and result_buffer are not required to be 4k aligned,
@@ -243,13 +246,11 @@ static unsigned long single_24x7_request(u8 domain, u32 offset, u16 ix,
BUILD_BUG_ON(sizeof(*request_buffer) > 4096);
BUILD_BUG_ON(sizeof(*result_buffer) > 4096);
- request_buffer = kmem_cache_zalloc(hv_page_cache, GFP_USER);
- if (!request_buffer)
- goto out;
+ request_buffer = (void *)get_cpu_var(hv_24x7_reqb);
+ result_buffer = (void *)get_cpu_var(hv_24x7_resb);
- result_buffer = kmem_cache_zalloc(hv_page_cache, GFP_USER);
- if (!result_buffer)
- goto out_free_request_buffer;
+ memset(request_buffer, 0, 4096);
+ memset(result_buffer, 0, 4096);
*request_buffer = (struct reqb) {
.buf = {
@@ -278,15 +279,11 @@ static unsigned long single_24x7_request(u8 domain, u32 offset, u16 ix,
domain, offset, ix, lpar, ret, ret,
result_buffer->buf.detailed_rc,
result_buffer->buf.failing_request_ix);
- goto out_free_result_buffer;
+ goto out;
}
*res = be64_to_cpu(result_buffer->result);
-out_free_result_buffer:
- kfree(result_buffer);
-out_free_request_buffer:
- kfree(request_buffer);
out:
return ret;
}
diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S
index 0a299be588a..54eca8b3b28 100644
--- a/arch/powerpc/platforms/powernv/opal-wrappers.S
+++ b/arch/powerpc/platforms/powernv/opal-wrappers.S
@@ -158,6 +158,43 @@ opal_tracepoint_return:
blr
#endif
+/*
+ * Make opal call in realmode. This is a generic function to be called
+ * from realmode. It handles endianness.
+ *
+ * r13 - paca pointer
+ * r1 - stack pointer
+ * r0 - opal token
+ */
+_GLOBAL(opal_call_realmode)
+ mflr r12
+ std r12,PPC_LR_STKOFF(r1)
+ ld r2,PACATOC(r13)
+ /* Set opal return address */
+ LOAD_REG_ADDR(r12,return_from_opal_call)
+ mtlr r12
+
+ mfmsr r12
+#ifdef __LITTLE_ENDIAN__
+ /* Handle endian-ness */
+ li r11,MSR_LE
+ andc r12,r12,r11
+#endif
+ mtspr SPRN_HSRR1,r12
+ LOAD_REG_ADDR(r11,opal)
+ ld r12,8(r11)
+ ld r2,0(r11)
+ mtspr SPRN_HSRR0,r12
+ hrfid
+
+return_from_opal_call:
+#ifdef __LITTLE_ENDIAN__
+ FIXUP_ENDIAN
+#endif
+ ld r12,PPC_LR_STKOFF(r1)
+ mtlr r12
+ blr
+
OPAL_CALL(opal_invalid_call, OPAL_INVALID_CALL);
OPAL_CALL(opal_console_write, OPAL_CONSOLE_WRITE);
OPAL_CALL(opal_console_read, OPAL_CONSOLE_READ);
@@ -247,6 +284,7 @@ OPAL_CALL(opal_sensor_read, OPAL_SENSOR_READ);
OPAL_CALL(opal_get_param, OPAL_GET_PARAM);
OPAL_CALL(opal_set_param, OPAL_SET_PARAM);
OPAL_CALL(opal_handle_hmi, OPAL_HANDLE_HMI);
+OPAL_CALL(opal_slw_set_reg, OPAL_SLW_SET_REG);
OPAL_CALL(opal_register_dump_region, OPAL_REGISTER_DUMP_REGION);
OPAL_CALL(opal_unregister_dump_region, OPAL_UNREGISTER_DUMP_REGION);
OPAL_CALL(opal_pci_set_phb_cxl_mode, OPAL_PCI_SET_PHB_CXL_MODE);
@@ -254,3 +292,4 @@ OPAL_CALL(opal_tpo_write, OPAL_WRITE_TPO);
OPAL_CALL(opal_tpo_read, OPAL_READ_TPO);
OPAL_CALL(opal_ipmi_send, OPAL_IPMI_SEND);
OPAL_CALL(opal_ipmi_recv, OPAL_IPMI_RECV);
+OPAL_CALL(opal_i2c_request, OPAL_I2C_REQUEST);
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index cb0b6de79cd..f10b9ec8c1f 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -9,8 +9,9 @@
* 2 of the License, or (at your option) any later version.
*/
-#undef DEBUG
+#define pr_fmt(fmt) "opal: " fmt
+#include <linux/printk.h>
#include <linux/types.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
@@ -625,6 +626,39 @@ static int opal_sysfs_init(void)
return 0;
}
+static ssize_t symbol_map_read(struct file *fp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ return memory_read_from_buffer(buf, count, &off, bin_attr->private,
+ bin_attr->size);
+}
+
+static BIN_ATTR_RO(symbol_map, 0);
+
+static void opal_export_symmap(void)
+{
+ const __be64 *syms;
+ unsigned int size;
+ struct device_node *fw;
+ int rc;
+
+ fw = of_find_node_by_path("/ibm,opal/firmware");
+ if (!fw)
+ return;
+ syms = of_get_property(fw, "symbol-map", &size);
+ if (!syms || size != 2 * sizeof(__be64))
+ return;
+
+ /* Setup attributes */
+ bin_attr_symbol_map.private = __va(be64_to_cpu(syms[0]));
+ bin_attr_symbol_map.size = be64_to_cpu(syms[1]);
+
+ rc = sysfs_create_bin_file(opal_kobj, &bin_attr_symbol_map);
+ if (rc)
+ pr_warn("Error %d creating OPAL symbols file\n", rc);
+}
+
static void __init opal_dump_region_init(void)
{
void *addr;
@@ -653,6 +687,14 @@ static void opal_ipmi_init(struct device_node *opal_node)
of_platform_device_create(np, NULL, NULL);
}
+static void opal_i2c_create_devs(void)
+{
+ struct device_node *np;
+
+ for_each_compatible_node(np, NULL, "ibm,opal-i2c")
+ of_platform_device_create(np, NULL, NULL);
+}
+
static int __init opal_init(void)
{
struct device_node *np, *consoles;
@@ -679,6 +721,9 @@ static int __init opal_init(void)
of_node_put(consoles);
}
+ /* Create i2c platform devices */
+ opal_i2c_create_devs();
+
/* Find all OPAL interrupts and request them */
irqs = of_get_property(opal_node, "opal-interrupts", &irqlen);
pr_debug("opal: Found %d interrupts reserved for OPAL\n",
@@ -702,6 +747,8 @@ static int __init opal_init(void)
/* Create "opal" kobject under /sys/firmware */
rc = opal_sysfs_init();
if (rc == 0) {
+ /* Export symbol map to userspace */
+ opal_export_symmap();
/* Setup dump region interface */
opal_dump_region_init();
/* Setup error log interface */
@@ -824,3 +871,4 @@ EXPORT_SYMBOL_GPL(opal_rtc_read);
EXPORT_SYMBOL_GPL(opal_rtc_write);
EXPORT_SYMBOL_GPL(opal_tpo_read);
EXPORT_SYMBOL_GPL(opal_tpo_write);
+EXPORT_SYMBOL_GPL(opal_i2c_request);
diff --git a/arch/powerpc/platforms/powernv/powernv.h b/arch/powerpc/platforms/powernv/powernv.h
index 6c8e2d188cd..604c48e7879 100644
--- a/arch/powerpc/platforms/powernv/powernv.h
+++ b/arch/powerpc/platforms/powernv/powernv.h
@@ -29,6 +29,8 @@ static inline u64 pnv_pci_dma_get_required_mask(struct pci_dev *pdev)
}
#endif
+extern u32 pnv_get_supported_cpuidle_states(void);
+
extern void pnv_lpc_init(void);
bool cpu_core_split_required(void);
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
index 30b1c3e298a..b700a329c31 100644
--- a/arch/powerpc/platforms/powernv/setup.c
+++ b/arch/powerpc/platforms/powernv/setup.c
@@ -36,8 +36,12 @@
#include <asm/opal.h>
#include <asm/kexec.h>
#include <asm/smp.h>
+#include <asm/cputhreads.h>
+#include <asm/cpuidle.h>
+#include <asm/code-patching.h>
#include "powernv.h"
+#include "subcore.h"
static void __init pnv_setup_arch(void)
{
@@ -288,6 +292,168 @@ static void __init pnv_setup_machdep_rtas(void)
}
#endif /* CONFIG_PPC_POWERNV_RTAS */
+static u32 supported_cpuidle_states;
+
+int pnv_save_sprs_for_winkle(void)
+{
+ int cpu;
+ int rc;
+
+ /*
+ * hid0, hid1, hid4, hid5, hmeer and lpcr values are symmetric accross
+ * all cpus at boot. Get these reg values of current cpu and use the
+ * same accross all cpus.
+ */
+ uint64_t lpcr_val = mfspr(SPRN_LPCR);
+ uint64_t hid0_val = mfspr(SPRN_HID0);
+ uint64_t hid1_val = mfspr(SPRN_HID1);
+ uint64_t hid4_val = mfspr(SPRN_HID4);
+ uint64_t hid5_val = mfspr(SPRN_HID5);
+ uint64_t hmeer_val = mfspr(SPRN_HMEER);
+
+ for_each_possible_cpu(cpu) {
+ uint64_t pir = get_hard_smp_processor_id(cpu);
+ uint64_t hsprg0_val = (uint64_t)&paca[cpu];
+
+ /*
+ * HSPRG0 is used to store the cpu's pointer to paca. Hence last
+ * 3 bits are guaranteed to be 0. Program slw to restore HSPRG0
+ * with 63rd bit set, so that when a thread wakes up at 0x100 we
+ * can use this bit to distinguish between fastsleep and
+ * deep winkle.
+ */
+ hsprg0_val |= 1;
+
+ rc = opal_slw_set_reg(pir, SPRN_HSPRG0, hsprg0_val);
+ if (rc != 0)
+ return rc;
+
+ rc = opal_slw_set_reg(pir, SPRN_LPCR, lpcr_val);
+ if (rc != 0)
+ return rc;
+
+ /* HIDs are per core registers */
+ if (cpu_thread_in_core(cpu) == 0) {
+
+ rc = opal_slw_set_reg(pir, SPRN_HMEER, hmeer_val);
+ if (rc != 0)
+ return rc;
+
+ rc = opal_slw_set_reg(pir, SPRN_HID0, hid0_val);
+ if (rc != 0)
+ return rc;
+
+ rc = opal_slw_set_reg(pir, SPRN_HID1, hid1_val);
+ if (rc != 0)
+ return rc;
+
+ rc = opal_slw_set_reg(pir, SPRN_HID4, hid4_val);
+ if (rc != 0)
+ return rc;
+
+ rc = opal_slw_set_reg(pir, SPRN_HID5, hid5_val);
+ if (rc != 0)
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+static void pnv_alloc_idle_core_states(void)
+{
+ int i, j;
+ int nr_cores = cpu_nr_cores();
+ u32 *core_idle_state;
+
+ /*
+ * core_idle_state - First 8 bits track the idle state of each thread
+ * of the core. The 8th bit is the lock bit. Initially all thread bits
+ * are set. They are cleared when the thread enters deep idle state
+ * like sleep and winkle. Initially the lock bit is cleared.
+ * The lock bit has 2 purposes
+ * a. While the first thread is restoring core state, it prevents
+ * other threads in the core from switching to process context.
+ * b. While the last thread in the core is saving the core state, it
+ * prevents a different thread from waking up.
+ */
+ for (i = 0; i < nr_cores; i++) {
+ int first_cpu = i * threads_per_core;
+ int node = cpu_to_node(first_cpu);
+
+ core_idle_state = kmalloc_node(sizeof(u32), GFP_KERNEL, node);
+ *core_idle_state = PNV_CORE_IDLE_THREAD_BITS;
+
+ for (j = 0; j < threads_per_core; j++) {
+ int cpu = first_cpu + j;
+
+ paca[cpu].core_idle_state_ptr = core_idle_state;
+ paca[cpu].thread_idle_state = PNV_THREAD_RUNNING;
+ paca[cpu].thread_mask = 1 << j;
+ }
+ }
+
+ update_subcore_sibling_mask();
+
+ if (supported_cpuidle_states & OPAL_PM_WINKLE_ENABLED)
+ pnv_save_sprs_for_winkle();
+}
+
+u32 pnv_get_supported_cpuidle_states(void)
+{
+ return supported_cpuidle_states;
+}
+EXPORT_SYMBOL_GPL(pnv_get_supported_cpuidle_states);
+
+static int __init pnv_init_idle_states(void)
+{
+ struct device_node *power_mgt;
+ int dt_idle_states;
+ const __be32 *idle_state_flags;
+ u32 len_flags, flags;
+ int i;
+
+ supported_cpuidle_states = 0;
+
+ if (cpuidle_disable != IDLE_NO_OVERRIDE)
+ return 0;
+
+ if (!firmware_has_feature(FW_FEATURE_OPALv3))
+ return 0;
+
+ power_mgt = of_find_node_by_path("/ibm,opal/power-mgt");
+ if (!power_mgt) {
+ pr_warn("opal: PowerMgmt Node not found\n");
+ return 0;
+ }
+
+ idle_state_flags = of_get_property(power_mgt,
+ "ibm,cpu-idle-state-flags", &len_flags);
+ if (!idle_state_flags) {
+ pr_warn("DT-PowerMgmt: missing ibm,cpu-idle-state-flags\n");
+ return 0;
+ }
+
+ dt_idle_states = len_flags / sizeof(u32);
+
+ for (i = 0; i < dt_idle_states; i++) {
+ flags = be32_to_cpu(idle_state_flags[i]);
+ supported_cpuidle_states |= flags;
+ }
+ if (!(supported_cpuidle_states & OPAL_PM_SLEEP_ENABLED_ER1)) {
+ patch_instruction(
+ (unsigned int *)pnv_fastsleep_workaround_at_entry,
+ PPC_INST_NOP);
+ patch_instruction(
+ (unsigned int *)pnv_fastsleep_workaround_at_exit,
+ PPC_INST_NOP);
+ }
+ pnv_alloc_idle_core_states();
+ return 0;
+}
+
+subsys_initcall(pnv_init_idle_states);
+
static int __init pnv_probe(void)
{
unsigned long root = of_get_flat_dt_root();
diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c
index b716f666e48..fc34025ef82 100644
--- a/arch/powerpc/platforms/powernv/smp.c
+++ b/arch/powerpc/platforms/powernv/smp.c
@@ -150,6 +150,7 @@ static void pnv_smp_cpu_kill_self(void)
{
unsigned int cpu;
unsigned long srr1;
+ u32 idle_states;
/* Standard hot unplug procedure */
local_irq_disable();
@@ -160,13 +161,23 @@ static void pnv_smp_cpu_kill_self(void)
generic_set_cpu_dead(cpu);
smp_wmb();
+ idle_states = pnv_get_supported_cpuidle_states();
/* We don't want to take decrementer interrupts while we are offline,
* so clear LPCR:PECE1. We keep PECE2 enabled.
*/
mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1);
while (!generic_check_cpu_restart(cpu)) {
+
ppc64_runlatch_off();
- srr1 = power7_nap(1);
+
+ if (idle_states & OPAL_PM_WINKLE_ENABLED)
+ srr1 = power7_winkle();
+ else if ((idle_states & OPAL_PM_SLEEP_ENABLED) ||
+ (idle_states & OPAL_PM_SLEEP_ENABLED_ER1))
+ srr1 = power7_sleep();
+ else
+ srr1 = power7_nap(1);
+
ppc64_runlatch_on();
/*
@@ -198,13 +209,27 @@ static void pnv_smp_cpu_kill_self(void)
#endif /* CONFIG_HOTPLUG_CPU */
+static int pnv_cpu_bootable(unsigned int nr)
+{
+ /*
+ * Starting with POWER8, the subcore logic relies on all threads of a
+ * core being booted so that they can participate in split mode
+ * switches. So on those machines we ignore the smt_enabled_at_boot
+ * setting (smt-enabled on the kernel command line).
+ */
+ if (cpu_has_feature(CPU_FTR_ARCH_207S))
+ return 1;
+
+ return smp_generic_cpu_bootable(nr);
+}
+
static struct smp_ops_t pnv_smp_ops = {
.message_pass = smp_muxed_ipi_message_pass,
.cause_ipi = NULL, /* Filled at runtime by xics_smp_probe() */
.probe = xics_smp_probe,
.kick_cpu = pnv_smp_kick_cpu,
.setup_cpu = pnv_smp_setup_cpu,
- .cpu_bootable = smp_generic_cpu_bootable,
+ .cpu_bootable = pnv_cpu_bootable,
#ifdef CONFIG_HOTPLUG_CPU
.cpu_disable = pnv_smp_cpu_disable,
.cpu_die = generic_cpu_die,
diff --git a/arch/powerpc/platforms/powernv/subcore.c b/arch/powerpc/platforms/powernv/subcore.c
index c87f96b79d1..f60f80ada90 100644
--- a/arch/powerpc/platforms/powernv/subcore.c
+++ b/arch/powerpc/platforms/powernv/subcore.c
@@ -160,6 +160,18 @@ static void wait_for_sync_step(int step)
mb();
}
+static void update_hid_in_slw(u64 hid0)
+{
+ u64 idle_states = pnv_get_supported_cpuidle_states();
+
+ if (idle_states & OPAL_PM_WINKLE_ENABLED) {
+ /* OPAL call to patch slw with the new HID0 value */
+ u64 cpu_pir = hard_smp_processor_id();
+
+ opal_slw_set_reg(cpu_pir, SPRN_HID0, hid0);
+ }
+}
+
static void unsplit_core(void)
{
u64 hid0, mask;
@@ -179,6 +191,7 @@ static void unsplit_core(void)
hid0 = mfspr(SPRN_HID0);
hid0 &= ~HID0_POWER8_DYNLPARDIS;
mtspr(SPRN_HID0, hid0);
+ update_hid_in_slw(hid0);
while (mfspr(SPRN_HID0) & mask)
cpu_relax();
@@ -215,6 +228,7 @@ static void split_core(int new_mode)
hid0 = mfspr(SPRN_HID0);
hid0 |= HID0_POWER8_DYNLPARDIS | split_parms[i].value;
mtspr(SPRN_HID0, hid0);
+ update_hid_in_slw(hid0);
/* Wait for it to happen */
while (!(mfspr(SPRN_HID0) & split_parms[i].mask))
@@ -251,6 +265,25 @@ bool cpu_core_split_required(void)
return true;
}
+void update_subcore_sibling_mask(void)
+{
+ int cpu;
+ /*
+ * sibling mask for the first cpu. Left shift this by required bits
+ * to get sibling mask for the rest of the cpus.
+ */
+ int sibling_mask_first_cpu = (1 << threads_per_subcore) - 1;
+
+ for_each_possible_cpu(cpu) {
+ int tid = cpu_thread_in_core(cpu);
+ int offset = (tid / threads_per_subcore) * threads_per_subcore;
+ int mask = sibling_mask_first_cpu << offset;
+
+ paca[cpu].subcore_sibling_mask = mask;
+
+ }
+}
+
static int cpu_update_split_mode(void *data)
{
int cpu, new_mode = *(int *)data;
@@ -284,6 +317,7 @@ static int cpu_update_split_mode(void *data)
/* Make the new mode public */
subcores_per_core = new_mode;
threads_per_subcore = threads_per_core / subcores_per_core;
+ update_subcore_sibling_mask();
/* Make sure the new mode is written before we exit */
mb();
diff --git a/arch/powerpc/platforms/powernv/subcore.h b/arch/powerpc/platforms/powernv/subcore.h
index 148abc91deb..84e02ae5289 100644
--- a/arch/powerpc/platforms/powernv/subcore.h
+++ b/arch/powerpc/platforms/powernv/subcore.h
@@ -14,5 +14,12 @@
#define SYNC_STEP_FINISHED 3 /* Set by secondary when split/unsplit is done */
#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_SMP
void split_core_secondary_loop(u8 *state);
-#endif
+extern void update_subcore_sibling_mask(void);
+#else
+static inline void update_subcore_sibling_mask(void) { };
+#endif /* CONFIG_SMP */
+
+#endif /* __ASSEMBLY__ */
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 2175f911a73..9cba74d5d85 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -123,7 +123,7 @@ struct kvm_s390_sie_block {
#define ICPT_PARTEXEC 0x38
#define ICPT_IOINST 0x40
__u8 icptcode; /* 0x0050 */
- __u8 reserved51; /* 0x0051 */
+ __u8 icptstatus; /* 0x0051 */
__u16 ihcpu; /* 0x0052 */
__u8 reserved54[2]; /* 0x0054 */
__u16 ipa; /* 0x0056 */
@@ -226,10 +226,17 @@ struct kvm_vcpu_stat {
u32 instruction_sigp_sense_running;
u32 instruction_sigp_external_call;
u32 instruction_sigp_emergency;
+ u32 instruction_sigp_cond_emergency;
+ u32 instruction_sigp_start;
u32 instruction_sigp_stop;
+ u32 instruction_sigp_stop_store_status;
+ u32 instruction_sigp_store_status;
u32 instruction_sigp_arch;
u32 instruction_sigp_prefix;
u32 instruction_sigp_restart;
+ u32 instruction_sigp_init_cpu_reset;
+ u32 instruction_sigp_cpu_reset;
+ u32 instruction_sigp_unknown;
u32 diagnose_10;
u32 diagnose_44;
u32 diagnose_9c;
@@ -288,6 +295,79 @@ struct kvm_vcpu_stat {
#define PGM_PER 0x80
#define PGM_CRYPTO_OPERATION 0x119
+/* irq types in order of priority */
+enum irq_types {
+ IRQ_PEND_MCHK_EX = 0,
+ IRQ_PEND_SVC,
+ IRQ_PEND_PROG,
+ IRQ_PEND_MCHK_REP,
+ IRQ_PEND_EXT_IRQ_KEY,
+ IRQ_PEND_EXT_MALFUNC,
+ IRQ_PEND_EXT_EMERGENCY,
+ IRQ_PEND_EXT_EXTERNAL,
+ IRQ_PEND_EXT_CLOCK_COMP,
+ IRQ_PEND_EXT_CPU_TIMER,
+ IRQ_PEND_EXT_TIMING,
+ IRQ_PEND_EXT_SERVICE,
+ IRQ_PEND_EXT_HOST,
+ IRQ_PEND_PFAULT_INIT,
+ IRQ_PEND_PFAULT_DONE,
+ IRQ_PEND_VIRTIO,
+ IRQ_PEND_IO_ISC_0,
+ IRQ_PEND_IO_ISC_1,
+ IRQ_PEND_IO_ISC_2,
+ IRQ_PEND_IO_ISC_3,
+ IRQ_PEND_IO_ISC_4,
+ IRQ_PEND_IO_ISC_5,
+ IRQ_PEND_IO_ISC_6,
+ IRQ_PEND_IO_ISC_7,
+ IRQ_PEND_SIGP_STOP,
+ IRQ_PEND_RESTART,
+ IRQ_PEND_SET_PREFIX,
+ IRQ_PEND_COUNT
+};
+
+/*
+ * Repressible (non-floating) machine check interrupts
+ * subclass bits in MCIC
+ */
+#define MCHK_EXTD_BIT 58
+#define MCHK_DEGR_BIT 56
+#define MCHK_WARN_BIT 55
+#define MCHK_REP_MASK ((1UL << MCHK_DEGR_BIT) | \
+ (1UL << MCHK_EXTD_BIT) | \
+ (1UL << MCHK_WARN_BIT))
+
+/* Exigent machine check interrupts subclass bits in MCIC */
+#define MCHK_SD_BIT 63
+#define MCHK_PD_BIT 62
+#define MCHK_EX_MASK ((1UL << MCHK_SD_BIT) | (1UL << MCHK_PD_BIT))
+
+#define IRQ_PEND_EXT_MASK ((1UL << IRQ_PEND_EXT_IRQ_KEY) | \
+ (1UL << IRQ_PEND_EXT_CLOCK_COMP) | \
+ (1UL << IRQ_PEND_EXT_CPU_TIMER) | \
+ (1UL << IRQ_PEND_EXT_MALFUNC) | \
+ (1UL << IRQ_PEND_EXT_EMERGENCY) | \
+ (1UL << IRQ_PEND_EXT_EXTERNAL) | \
+ (1UL << IRQ_PEND_EXT_TIMING) | \
+ (1UL << IRQ_PEND_EXT_HOST) | \
+ (1UL << IRQ_PEND_EXT_SERVICE) | \
+ (1UL << IRQ_PEND_VIRTIO) | \
+ (1UL << IRQ_PEND_PFAULT_INIT) | \
+ (1UL << IRQ_PEND_PFAULT_DONE))
+
+#define IRQ_PEND_IO_MASK ((1UL << IRQ_PEND_IO_ISC_0) | \
+ (1UL << IRQ_PEND_IO_ISC_1) | \
+ (1UL << IRQ_PEND_IO_ISC_2) | \
+ (1UL << IRQ_PEND_IO_ISC_3) | \
+ (1UL << IRQ_PEND_IO_ISC_4) | \
+ (1UL << IRQ_PEND_IO_ISC_5) | \
+ (1UL << IRQ_PEND_IO_ISC_6) | \
+ (1UL << IRQ_PEND_IO_ISC_7))
+
+#define IRQ_PEND_MCHK_MASK ((1UL << IRQ_PEND_MCHK_REP) | \
+ (1UL << IRQ_PEND_MCHK_EX))
+
struct kvm_s390_interrupt_info {
struct list_head list;
u64 type;
@@ -306,14 +386,25 @@ struct kvm_s390_interrupt_info {
#define ACTION_STORE_ON_STOP (1<<0)
#define ACTION_STOP_ON_STOP (1<<1)
+struct kvm_s390_irq_payload {
+ struct kvm_s390_io_info io;
+ struct kvm_s390_ext_info ext;
+ struct kvm_s390_pgm_info pgm;
+ struct kvm_s390_emerg_info emerg;
+ struct kvm_s390_extcall_info extcall;
+ struct kvm_s390_prefix_info prefix;
+ struct kvm_s390_mchk_info mchk;
+};
+
struct kvm_s390_local_interrupt {
spinlock_t lock;
- struct list_head list;
- atomic_t active;
struct kvm_s390_float_interrupt *float_int;
wait_queue_head_t *wq;
atomic_t *cpuflags;
unsigned int action_bits;
+ DECLARE_BITMAP(sigp_emerg_pending, KVM_MAX_VCPUS);
+ struct kvm_s390_irq_payload irq;
+ unsigned long pending_irqs;
};
struct kvm_s390_float_interrupt {
@@ -434,6 +525,8 @@ struct kvm_arch{
int user_cpu_state_ctrl;
struct s390_io_adapter *adapters[MAX_S390_IO_ADAPTERS];
wait_queue_head_t ipte_wq;
+ int ipte_lock_count;
+ struct mutex ipte_mutex;
spinlock_t start_stop_lock;
struct kvm_s390_crypto crypto;
};
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index e510b9460ef..3009c2ba46d 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -24,6 +24,7 @@ void page_table_free_rcu(struct mmu_gather *, unsigned long *, unsigned long);
int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
unsigned long key, bool nq);
+unsigned long get_guest_storage_key(struct mm_struct *mm, unsigned long addr);
static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
{
diff --git a/arch/s390/include/asm/sigp.h b/arch/s390/include/asm/sigp.h
index 49576115dbb..fad4ae23ece 100644
--- a/arch/s390/include/asm/sigp.h
+++ b/arch/s390/include/asm/sigp.h
@@ -10,6 +10,7 @@
#define SIGP_RESTART 6
#define SIGP_STOP_AND_STORE_STATUS 9
#define SIGP_INITIAL_CPU_RESET 11
+#define SIGP_CPU_RESET 12
#define SIGP_SET_PREFIX 13
#define SIGP_STORE_STATUS_AT_ADDRESS 14
#define SIGP_SET_ARCHITECTURE 18
diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c
index ca38139423a..437e6115927 100644
--- a/arch/s390/kernel/compat_linux.c
+++ b/arch/s390/kernel/compat_linux.c
@@ -249,7 +249,7 @@ COMPAT_SYSCALL_DEFINE2(s390_setgroups16, int, gidsetsize, u16 __user *, grouplis
struct group_info *group_info;
int retval;
- if (!capable(CAP_SETGID))
+ if (!may_setgroups())
return -EPERM;
if ((unsigned)gidsetsize > NGROUPS_MAX)
return -EINVAL;
diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c
index 0f961a1c64b..8b9ccf02a2c 100644
--- a/arch/s390/kvm/gaccess.c
+++ b/arch/s390/kvm/gaccess.c
@@ -207,8 +207,6 @@ union raddress {
unsigned long pfra : 52; /* Page-Frame Real Address */
};
-static int ipte_lock_count;
-static DEFINE_MUTEX(ipte_mutex);
int ipte_lock_held(struct kvm_vcpu *vcpu)
{
@@ -216,47 +214,51 @@ int ipte_lock_held(struct kvm_vcpu *vcpu)
if (vcpu->arch.sie_block->eca & 1)
return ic->kh != 0;
- return ipte_lock_count != 0;
+ return vcpu->kvm->arch.ipte_lock_count != 0;
}
static void ipte_lock_simple(struct kvm_vcpu *vcpu)
{
union ipte_control old, new, *ic;
- mutex_lock(&ipte_mutex);
- ipte_lock_count++;
- if (ipte_lock_count > 1)
+ mutex_lock(&vcpu->kvm->arch.ipte_mutex);
+ vcpu->kvm->arch.ipte_lock_count++;
+ if (vcpu->kvm->arch.ipte_lock_count > 1)
goto out;
ic = &vcpu->kvm->arch.sca->ipte_control;
do {
- old = ACCESS_ONCE(*ic);
+ old = *ic;
+ barrier();
while (old.k) {
cond_resched();
- old = ACCESS_ONCE(*ic);
+ old = *ic;
+ barrier();
}
new = old;
new.k = 1;
} while (cmpxchg(&ic->val, old.val, new.val) != old.val);
out:
- mutex_unlock(&ipte_mutex);
+ mutex_unlock(&vcpu->kvm->arch.ipte_mutex);
}
static void ipte_unlock_simple(struct kvm_vcpu *vcpu)
{
union ipte_control old, new, *ic;
- mutex_lock(&ipte_mutex);
- ipte_lock_count--;
- if (ipte_lock_count)
+ mutex_lock(&vcpu->kvm->arch.ipte_mutex);
+ vcpu->kvm->arch.ipte_lock_count--;
+ if (vcpu->kvm->arch.ipte_lock_count)
goto out;
ic = &vcpu->kvm->arch.sca->ipte_control;
do {
- new = old = ACCESS_ONCE(*ic);
+ old = *ic;
+ barrier();
+ new = old;
new.k = 0;
} while (cmpxchg(&ic->val, old.val, new.val) != old.val);
wake_up(&vcpu->kvm->arch.ipte_wq);
out:
- mutex_unlock(&ipte_mutex);
+ mutex_unlock(&vcpu->kvm->arch.ipte_mutex);
}
static void ipte_lock_siif(struct kvm_vcpu *vcpu)
@@ -265,10 +267,12 @@ static void ipte_lock_siif(struct kvm_vcpu *vcpu)
ic = &vcpu->kvm->arch.sca->ipte_control;
do {
- old = ACCESS_ONCE(*ic);
+ old = *ic;
+ barrier();
while (old.kg) {
cond_resched();
- old = ACCESS_ONCE(*ic);
+ old = *ic;
+ barrier();
}
new = old;
new.k = 1;
@@ -282,7 +286,9 @@ static void ipte_unlock_siif(struct kvm_vcpu *vcpu)
ic = &vcpu->kvm->arch.sca->ipte_control;
do {
- new = old = ACCESS_ONCE(*ic);
+ old = *ic;
+ barrier();
+ new = old;
new.kh--;
if (!new.kh)
new.k = 0;
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
index eaf46291d36..81c77ab8102 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/intercept.c
@@ -38,6 +38,19 @@ static const intercept_handler_t instruction_handlers[256] = {
[0xeb] = kvm_s390_handle_eb,
};
+void kvm_s390_rewind_psw(struct kvm_vcpu *vcpu, int ilc)
+{
+ struct kvm_s390_sie_block *sie_block = vcpu->arch.sie_block;
+
+ /* Use the length of the EXECUTE instruction if necessary */
+ if (sie_block->icptstatus & 1) {
+ ilc = (sie_block->icptstatus >> 4) & 0x6;
+ if (!ilc)
+ ilc = 4;
+ }
+ sie_block->gpsw.addr = __rewind_psw(sie_block->gpsw, ilc);
+}
+
static int handle_noop(struct kvm_vcpu *vcpu)
{
switch (vcpu->arch.sie_block->icptcode) {
@@ -244,7 +257,7 @@ static int handle_instruction_and_prog(struct kvm_vcpu *vcpu)
static int handle_external_interrupt(struct kvm_vcpu *vcpu)
{
u16 eic = vcpu->arch.sie_block->eic;
- struct kvm_s390_interrupt irq;
+ struct kvm_s390_irq irq;
psw_t newpsw;
int rc;
@@ -269,7 +282,7 @@ static int handle_external_interrupt(struct kvm_vcpu *vcpu)
if (kvm_s390_si_ext_call_pending(vcpu))
return 0;
irq.type = KVM_S390_INT_EXTERNAL_CALL;
- irq.parm = vcpu->arch.sie_block->extcpuaddr;
+ irq.u.extcall.code = vcpu->arch.sie_block->extcpuaddr;
break;
default:
return -EOPNOTSUPP;
@@ -288,7 +301,6 @@ static int handle_external_interrupt(struct kvm_vcpu *vcpu)
*/
static int handle_mvpg_pei(struct kvm_vcpu *vcpu)
{
- psw_t *psw = &vcpu->arch.sie_block->gpsw;
unsigned long srcaddr, dstaddr;
int reg1, reg2, rc;
@@ -310,7 +322,7 @@ static int handle_mvpg_pei(struct kvm_vcpu *vcpu)
if (rc != 0)
return rc;
- psw->addr = __rewind_psw(*psw, 4);
+ kvm_s390_rewind_psw(vcpu, 4);
return 0;
}
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index a39838457f0..f00f31e66cd 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -16,6 +16,7 @@
#include <linux/mmu_context.h>
#include <linux/signal.h>
#include <linux/slab.h>
+#include <linux/bitmap.h>
#include <asm/asm-offsets.h>
#include <asm/uaccess.h>
#include "kvm-s390.h"
@@ -27,8 +28,8 @@
#define IOINT_CSSID_MASK 0x03fc0000
#define IOINT_AI_MASK 0x04000000
#define PFAULT_INIT 0x0600
-
-static int __must_check deliver_ckc_interrupt(struct kvm_vcpu *vcpu);
+#define PFAULT_DONE 0x0680
+#define VIRTIO_PARAM 0x0d00
static int is_ioint(u64 type)
{
@@ -136,6 +137,31 @@ static int __must_check __interrupt_is_deliverable(struct kvm_vcpu *vcpu,
return 0;
}
+static inline unsigned long pending_local_irqs(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.local_int.pending_irqs;
+}
+
+static unsigned long deliverable_local_irqs(struct kvm_vcpu *vcpu)
+{
+ unsigned long active_mask = pending_local_irqs(vcpu);
+
+ if (psw_extint_disabled(vcpu))
+ active_mask &= ~IRQ_PEND_EXT_MASK;
+ if (!(vcpu->arch.sie_block->gcr[0] & 0x2000ul))
+ __clear_bit(IRQ_PEND_EXT_EXTERNAL, &active_mask);
+ if (!(vcpu->arch.sie_block->gcr[0] & 0x4000ul))
+ __clear_bit(IRQ_PEND_EXT_EMERGENCY, &active_mask);
+ if (!(vcpu->arch.sie_block->gcr[0] & 0x800ul))
+ __clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &active_mask);
+ if (!(vcpu->arch.sie_block->gcr[0] & 0x400ul))
+ __clear_bit(IRQ_PEND_EXT_CPU_TIMER, &active_mask);
+ if (psw_mchk_disabled(vcpu))
+ active_mask &= ~IRQ_PEND_MCHK_MASK;
+
+ return active_mask;
+}
+
static void __set_cpu_idle(struct kvm_vcpu *vcpu)
{
atomic_set_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
@@ -170,26 +196,45 @@ static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag)
atomic_set_mask(flag, &vcpu->arch.sie_block->cpuflags);
}
+static void set_intercept_indicators_ext(struct kvm_vcpu *vcpu)
+{
+ if (!(pending_local_irqs(vcpu) & IRQ_PEND_EXT_MASK))
+ return;
+ if (psw_extint_disabled(vcpu))
+ __set_cpuflag(vcpu, CPUSTAT_EXT_INT);
+ else
+ vcpu->arch.sie_block->lctl |= LCTL_CR0;
+}
+
+static void set_intercept_indicators_mchk(struct kvm_vcpu *vcpu)
+{
+ if (!(pending_local_irqs(vcpu) & IRQ_PEND_MCHK_MASK))
+ return;
+ if (psw_mchk_disabled(vcpu))
+ vcpu->arch.sie_block->ictl |= ICTL_LPSW;
+ else
+ vcpu->arch.sie_block->lctl |= LCTL_CR14;
+}
+
+/* Set interception request for non-deliverable local interrupts */
+static void set_intercept_indicators_local(struct kvm_vcpu *vcpu)
+{
+ set_intercept_indicators_ext(vcpu);
+ set_intercept_indicators_mchk(vcpu);
+}
+
static void __set_intercept_indicator(struct kvm_vcpu *vcpu,
struct kvm_s390_interrupt_info *inti)
{
switch (inti->type) {
- case KVM_S390_INT_EXTERNAL_CALL:
- case KVM_S390_INT_EMERGENCY:
case KVM_S390_INT_SERVICE:
- case KVM_S390_INT_PFAULT_INIT:
case KVM_S390_INT_PFAULT_DONE:
case KVM_S390_INT_VIRTIO:
- case KVM_S390_INT_CLOCK_COMP:
- case KVM_S390_INT_CPU_TIMER:
if (psw_extint_disabled(vcpu))
__set_cpuflag(vcpu, CPUSTAT_EXT_INT);
else
vcpu->arch.sie_block->lctl |= LCTL_CR0;
break;
- case KVM_S390_SIGP_STOP:
- __set_cpuflag(vcpu, CPUSTAT_STOP_INT);
- break;
case KVM_S390_MCHK:
if (psw_mchk_disabled(vcpu))
vcpu->arch.sie_block->ictl |= ICTL_LPSW;
@@ -226,13 +271,236 @@ static u16 get_ilc(struct kvm_vcpu *vcpu)
}
}
-static int __must_check __deliver_prog_irq(struct kvm_vcpu *vcpu,
- struct kvm_s390_pgm_info *pgm_info)
+static int __must_check __deliver_cpu_timer(struct kvm_vcpu *vcpu)
+{
+ struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
+ int rc;
+
+ trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER,
+ 0, 0);
+
+ rc = put_guest_lc(vcpu, EXT_IRQ_CPU_TIMER,
+ (u16 *)__LC_EXT_INT_CODE);
+ rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
+ rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
+ &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
+ &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ clear_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
+ return rc ? -EFAULT : 0;
+}
+
+static int __must_check __deliver_ckc(struct kvm_vcpu *vcpu)
+{
+ struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
+ int rc;
+
+ trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP,
+ 0, 0);
+
+ rc = put_guest_lc(vcpu, EXT_IRQ_CLK_COMP,
+ (u16 __user *)__LC_EXT_INT_CODE);
+ rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
+ rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
+ &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
+ &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
+ return rc ? -EFAULT : 0;
+}
+
+static int __must_check __deliver_pfault_init(struct kvm_vcpu *vcpu)
+{
+ struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
+ struct kvm_s390_ext_info ext;
+ int rc;
+
+ spin_lock(&li->lock);
+ ext = li->irq.ext;
+ clear_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs);
+ li->irq.ext.ext_params2 = 0;
+ spin_unlock(&li->lock);
+
+ VCPU_EVENT(vcpu, 4, "interrupt: pfault init parm:%x,parm64:%llx",
+ 0, ext.ext_params2);
+ trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
+ KVM_S390_INT_PFAULT_INIT,
+ 0, ext.ext_params2);
+
+ rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *) __LC_EXT_INT_CODE);
+ rc |= put_guest_lc(vcpu, PFAULT_INIT, (u16 *) __LC_EXT_CPU_ADDR);
+ rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
+ &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
+ &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ rc |= put_guest_lc(vcpu, ext.ext_params2, (u64 *) __LC_EXT_PARAMS2);
+ return rc ? -EFAULT : 0;
+}
+
+static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu)
+{
+ struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
+ struct kvm_s390_mchk_info mchk;
+ int rc;
+
+ spin_lock(&li->lock);
+ mchk = li->irq.mchk;
+ /*
+ * If there was an exigent machine check pending, then any repressible
+ * machine checks that might have been pending are indicated along
+ * with it, so always clear both bits
+ */
+ clear_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs);
+ clear_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs);
+ memset(&li->irq.mchk, 0, sizeof(mchk));
+ spin_unlock(&li->lock);
+
+ VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx",
+ mchk.mcic);
+ trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_MCHK,
+ mchk.cr14, mchk.mcic);
+
+ rc = kvm_s390_vcpu_store_status(vcpu, KVM_S390_STORE_STATUS_PREFIXED);
+ rc |= put_guest_lc(vcpu, mchk.mcic,
+ (u64 __user *) __LC_MCCK_CODE);
+ rc |= put_guest_lc(vcpu, mchk.failing_storage_address,
+ (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR);
+ rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA,
+ &mchk.fixed_logout, sizeof(mchk.fixed_logout));
+ rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW,
+ &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW,
+ &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ return rc ? -EFAULT : 0;
+}
+
+static int __must_check __deliver_restart(struct kvm_vcpu *vcpu)
+{
+ struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
+ int rc;
+
+ VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu restart");
+ vcpu->stat.deliver_restart_signal++;
+ trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0);
+
+ rc = write_guest_lc(vcpu,
+ offsetof(struct _lowcore, restart_old_psw),
+ &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ rc |= read_guest_lc(vcpu, offsetof(struct _lowcore, restart_psw),
+ &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ clear_bit(IRQ_PEND_RESTART, &li->pending_irqs);
+ return rc ? -EFAULT : 0;
+}
+
+static int __must_check __deliver_stop(struct kvm_vcpu *vcpu)
+{
+ VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu stop");
+ vcpu->stat.deliver_stop_signal++;
+ trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_SIGP_STOP,
+ 0, 0);
+
+ __set_cpuflag(vcpu, CPUSTAT_STOP_INT);
+ clear_bit(IRQ_PEND_SIGP_STOP, &vcpu->arch.local_int.pending_irqs);
+ return 0;
+}
+
+static int __must_check __deliver_set_prefix(struct kvm_vcpu *vcpu)
+{
+ struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
+ struct kvm_s390_prefix_info prefix;
+
+ spin_lock(&li->lock);
+ prefix = li->irq.prefix;
+ li->irq.prefix.address = 0;
+ clear_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs);
+ spin_unlock(&li->lock);
+
+ VCPU_EVENT(vcpu, 4, "interrupt: set prefix to %x", prefix.address);
+ vcpu->stat.deliver_prefix_signal++;
+ trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
+ KVM_S390_SIGP_SET_PREFIX,
+ prefix.address, 0);
+
+ kvm_s390_set_prefix(vcpu, prefix.address);
+ return 0;
+}
+
+static int __must_check __deliver_emergency_signal(struct kvm_vcpu *vcpu)
+{
+ struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
+ int rc;
+ int cpu_addr;
+
+ spin_lock(&li->lock);
+ cpu_addr = find_first_bit(li->sigp_emerg_pending, KVM_MAX_VCPUS);
+ clear_bit(cpu_addr, li->sigp_emerg_pending);
+ if (bitmap_empty(li->sigp_emerg_pending, KVM_MAX_VCPUS))
+ clear_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
+ spin_unlock(&li->lock);
+
+ VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp emerg");
+ vcpu->stat.deliver_emergency_signal++;
+ trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY,
+ cpu_addr, 0);
+
+ rc = put_guest_lc(vcpu, EXT_IRQ_EMERGENCY_SIG,
+ (u16 *)__LC_EXT_INT_CODE);
+ rc |= put_guest_lc(vcpu, cpu_addr, (u16 *)__LC_EXT_CPU_ADDR);
+ rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
+ &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
+ &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ return rc ? -EFAULT : 0;
+}
+
+static int __must_check __deliver_external_call(struct kvm_vcpu *vcpu)
+{
+ struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
+ struct kvm_s390_extcall_info extcall;
+ int rc;
+
+ spin_lock(&li->lock);
+ extcall = li->irq.extcall;
+ li->irq.extcall.code = 0;
+ clear_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs);
+ spin_unlock(&li->lock);
+
+ VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp ext call");
+ vcpu->stat.deliver_external_call++;
+ trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
+ KVM_S390_INT_EXTERNAL_CALL,
+ extcall.code, 0);
+
+ rc = put_guest_lc(vcpu, EXT_IRQ_EXTERNAL_CALL,
+ (u16 *)__LC_EXT_INT_CODE);
+ rc |= put_guest_lc(vcpu, extcall.code, (u16 *)__LC_EXT_CPU_ADDR);
+ rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
+ &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &vcpu->arch.sie_block->gpsw,
+ sizeof(psw_t));
+ return rc ? -EFAULT : 0;
+}
+
+static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
{
+ struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
+ struct kvm_s390_pgm_info pgm_info;
int rc = 0;
u16 ilc = get_ilc(vcpu);
- switch (pgm_info->code & ~PGM_PER) {
+ spin_lock(&li->lock);
+ pgm_info = li->irq.pgm;
+ clear_bit(IRQ_PEND_PROG, &li->pending_irqs);
+ memset(&li->irq.pgm, 0, sizeof(pgm_info));
+ spin_unlock(&li->lock);
+
+ VCPU_EVENT(vcpu, 4, "interrupt: pgm check code:%x, ilc:%x",
+ pgm_info.code, ilc);
+ vcpu->stat.deliver_program_int++;
+ trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
+ pgm_info.code, 0);
+
+ switch (pgm_info.code & ~PGM_PER) {
case PGM_AFX_TRANSLATION:
case PGM_ASX_TRANSLATION:
case PGM_EX_TRANSLATION:
@@ -243,7 +511,7 @@ static int __must_check __deliver_prog_irq(struct kvm_vcpu *vcpu,
case PGM_PRIMARY_AUTHORITY:
case PGM_SECONDARY_AUTHORITY:
case PGM_SPACE_SWITCH:
- rc = put_guest_lc(vcpu, pgm_info->trans_exc_code,
+ rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
(u64 *)__LC_TRANS_EXC_CODE);
break;
case PGM_ALEN_TRANSLATION:
@@ -252,7 +520,7 @@ static int __must_check __deliver_prog_irq(struct kvm_vcpu *vcpu,
case PGM_ASTE_SEQUENCE:
case PGM_ASTE_VALIDITY:
case PGM_EXTENDED_AUTHORITY:
- rc = put_guest_lc(vcpu, pgm_info->exc_access_id,
+ rc = put_guest_lc(vcpu, pgm_info.exc_access_id,
(u8 *)__LC_EXC_ACCESS_ID);
break;
case PGM_ASCE_TYPE:
@@ -261,247 +529,208 @@ static int __must_check __deliver_prog_irq(struct kvm_vcpu *vcpu,
case PGM_REGION_SECOND_TRANS:
case PGM_REGION_THIRD_TRANS:
case PGM_SEGMENT_TRANSLATION:
- rc = put_guest_lc(vcpu, pgm_info->trans_exc_code,
+ rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
(u64 *)__LC_TRANS_EXC_CODE);
- rc |= put_guest_lc(vcpu, pgm_info->exc_access_id,
+ rc |= put_guest_lc(vcpu, pgm_info.exc_access_id,
(u8 *)__LC_EXC_ACCESS_ID);
- rc |= put_guest_lc(vcpu, pgm_info->op_access_id,
+ rc |= put_guest_lc(vcpu, pgm_info.op_access_id,
(u8 *)__LC_OP_ACCESS_ID);
break;
case PGM_MONITOR:
- rc = put_guest_lc(vcpu, pgm_info->mon_class_nr,
- (u64 *)__LC_MON_CLASS_NR);
- rc |= put_guest_lc(vcpu, pgm_info->mon_code,
+ rc = put_guest_lc(vcpu, pgm_info.mon_class_nr,
+ (u16 *)__LC_MON_CLASS_NR);
+ rc |= put_guest_lc(vcpu, pgm_info.mon_code,
(u64 *)__LC_MON_CODE);
break;
case PGM_DATA:
- rc = put_guest_lc(vcpu, pgm_info->data_exc_code,
+ rc = put_guest_lc(vcpu, pgm_info.data_exc_code,
(u32 *)__LC_DATA_EXC_CODE);
break;
case PGM_PROTECTION:
- rc = put_guest_lc(vcpu, pgm_info->trans_exc_code,
+ rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
(u64 *)__LC_TRANS_EXC_CODE);
- rc |= put_guest_lc(vcpu, pgm_info->exc_access_id,
+ rc |= put_guest_lc(vcpu, pgm_info.exc_access_id,
(u8 *)__LC_EXC_ACCESS_ID);
break;
}
- if (pgm_info->code & PGM_PER) {
- rc |= put_guest_lc(vcpu, pgm_info->per_code,
+ if (pgm_info.code & PGM_PER) {
+ rc |= put_guest_lc(vcpu, pgm_info.per_code,
(u8 *) __LC_PER_CODE);
- rc |= put_guest_lc(vcpu, pgm_info->per_atmid,
+ rc |= put_guest_lc(vcpu, pgm_info.per_atmid,
(u8 *)__LC_PER_ATMID);
- rc |= put_guest_lc(vcpu, pgm_info->per_address,
+ rc |= put_guest_lc(vcpu, pgm_info.per_address,
(u64 *) __LC_PER_ADDRESS);
- rc |= put_guest_lc(vcpu, pgm_info->per_access_id,
+ rc |= put_guest_lc(vcpu, pgm_info.per_access_id,
(u8 *) __LC_PER_ACCESS_ID);
}
rc |= put_guest_lc(vcpu, ilc, (u16 *) __LC_PGM_ILC);
- rc |= put_guest_lc(vcpu, pgm_info->code,
+ rc |= put_guest_lc(vcpu, pgm_info.code,
(u16 *)__LC_PGM_INT_CODE);
rc |= write_guest_lc(vcpu, __LC_PGM_OLD_PSW,
&vcpu->arch.sie_block->gpsw, sizeof(psw_t));
rc |= read_guest_lc(vcpu, __LC_PGM_NEW_PSW,
&vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ return rc ? -EFAULT : 0;
+}
- return rc;
+static int __must_check __deliver_service(struct kvm_vcpu *vcpu,
+ struct kvm_s390_interrupt_info *inti)
+{
+ int rc;
+
+ VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x",
+ inti->ext.ext_params);
+ vcpu->stat.deliver_service_signal++;
+ trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
+ inti->ext.ext_params, 0);
+
+ rc = put_guest_lc(vcpu, EXT_IRQ_SERVICE_SIG, (u16 *)__LC_EXT_INT_CODE);
+ rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
+ rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
+ &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
+ &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ rc |= put_guest_lc(vcpu, inti->ext.ext_params,
+ (u32 *)__LC_EXT_PARAMS);
+ return rc ? -EFAULT : 0;
}
-static int __must_check __do_deliver_interrupt(struct kvm_vcpu *vcpu,
- struct kvm_s390_interrupt_info *inti)
+static int __must_check __deliver_pfault_done(struct kvm_vcpu *vcpu,
+ struct kvm_s390_interrupt_info *inti)
{
- const unsigned short table[] = { 2, 4, 4, 6 };
- int rc = 0;
+ int rc;
+
+ trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
+ KVM_S390_INT_PFAULT_DONE, 0,
+ inti->ext.ext_params2);
+
+ rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *)__LC_EXT_INT_CODE);
+ rc |= put_guest_lc(vcpu, PFAULT_DONE, (u16 *)__LC_EXT_CPU_ADDR);
+ rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
+ &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
+ &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
+ (u64 *)__LC_EXT_PARAMS2);
+ return rc ? -EFAULT : 0;
+}
+
+static int __must_check __deliver_virtio(struct kvm_vcpu *vcpu,
+ struct kvm_s390_interrupt_info *inti)
+{
+ int rc;
+
+ VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%llx",
+ inti->ext.ext_params, inti->ext.ext_params2);
+ vcpu->stat.deliver_virtio_interrupt++;
+ trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
+ inti->ext.ext_params,
+ inti->ext.ext_params2);
+
+ rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *)__LC_EXT_INT_CODE);
+ rc |= put_guest_lc(vcpu, VIRTIO_PARAM, (u16 *)__LC_EXT_CPU_ADDR);
+ rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
+ &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
+ &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ rc |= put_guest_lc(vcpu, inti->ext.ext_params,
+ (u32 *)__LC_EXT_PARAMS);
+ rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
+ (u64 *)__LC_EXT_PARAMS2);
+ return rc ? -EFAULT : 0;
+}
+
+static int __must_check __deliver_io(struct kvm_vcpu *vcpu,
+ struct kvm_s390_interrupt_info *inti)
+{
+ int rc;
+
+ VCPU_EVENT(vcpu, 4, "interrupt: I/O %llx", inti->type);
+ vcpu->stat.deliver_io_int++;
+ trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
+ ((__u32)inti->io.subchannel_id << 16) |
+ inti->io.subchannel_nr,
+ ((__u64)inti->io.io_int_parm << 32) |
+ inti->io.io_int_word);
+
+ rc = put_guest_lc(vcpu, inti->io.subchannel_id,
+ (u16 *)__LC_SUBCHANNEL_ID);
+ rc |= put_guest_lc(vcpu, inti->io.subchannel_nr,
+ (u16 *)__LC_SUBCHANNEL_NR);
+ rc |= put_guest_lc(vcpu, inti->io.io_int_parm,
+ (u32 *)__LC_IO_INT_PARM);
+ rc |= put_guest_lc(vcpu, inti->io.io_int_word,
+ (u32 *)__LC_IO_INT_WORD);
+ rc |= write_guest_lc(vcpu, __LC_IO_OLD_PSW,
+ &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ rc |= read_guest_lc(vcpu, __LC_IO_NEW_PSW,
+ &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ return rc ? -EFAULT : 0;
+}
+
+static int __must_check __deliver_mchk_floating(struct kvm_vcpu *vcpu,
+ struct kvm_s390_interrupt_info *inti)
+{
+ struct kvm_s390_mchk_info *mchk = &inti->mchk;
+ int rc;
+
+ VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx",
+ mchk->mcic);
+ trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_MCHK,
+ mchk->cr14, mchk->mcic);
+
+ rc = kvm_s390_vcpu_store_status(vcpu, KVM_S390_STORE_STATUS_PREFIXED);
+ rc |= put_guest_lc(vcpu, mchk->mcic,
+ (u64 __user *) __LC_MCCK_CODE);
+ rc |= put_guest_lc(vcpu, mchk->failing_storage_address,
+ (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR);
+ rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA,
+ &mchk->fixed_logout, sizeof(mchk->fixed_logout));
+ rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW,
+ &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW,
+ &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ return rc ? -EFAULT : 0;
+}
+
+typedef int (*deliver_irq_t)(struct kvm_vcpu *vcpu);
+
+static const deliver_irq_t deliver_irq_funcs[] = {
+ [IRQ_PEND_MCHK_EX] = __deliver_machine_check,
+ [IRQ_PEND_PROG] = __deliver_prog,
+ [IRQ_PEND_EXT_EMERGENCY] = __deliver_emergency_signal,
+ [IRQ_PEND_EXT_EXTERNAL] = __deliver_external_call,
+ [IRQ_PEND_EXT_CLOCK_COMP] = __deliver_ckc,
+ [IRQ_PEND_EXT_CPU_TIMER] = __deliver_cpu_timer,
+ [IRQ_PEND_RESTART] = __deliver_restart,
+ [IRQ_PEND_SIGP_STOP] = __deliver_stop,
+ [IRQ_PEND_SET_PREFIX] = __deliver_set_prefix,
+ [IRQ_PEND_PFAULT_INIT] = __deliver_pfault_init,
+};
+
+static int __must_check __deliver_floating_interrupt(struct kvm_vcpu *vcpu,
+ struct kvm_s390_interrupt_info *inti)
+{
+ int rc;
switch (inti->type) {
- case KVM_S390_INT_EMERGENCY:
- VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp emerg");
- vcpu->stat.deliver_emergency_signal++;
- trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
- inti->emerg.code, 0);
- rc = put_guest_lc(vcpu, 0x1201, (u16 *)__LC_EXT_INT_CODE);
- rc |= put_guest_lc(vcpu, inti->emerg.code,
- (u16 *)__LC_EXT_CPU_ADDR);
- rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
- &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
- rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
- &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
- break;
- case KVM_S390_INT_EXTERNAL_CALL:
- VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp ext call");
- vcpu->stat.deliver_external_call++;
- trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
- inti->extcall.code, 0);
- rc = put_guest_lc(vcpu, 0x1202, (u16 *)__LC_EXT_INT_CODE);
- rc |= put_guest_lc(vcpu, inti->extcall.code,
- (u16 *)__LC_EXT_CPU_ADDR);
- rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
- &vcpu->arch.sie_block->gpsw,
- sizeof(psw_t));
- rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
- &vcpu->arch.sie_block->gpsw,
- sizeof(psw_t));
- break;
- case KVM_S390_INT_CLOCK_COMP:
- trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
- inti->ext.ext_params, 0);
- rc = deliver_ckc_interrupt(vcpu);
- break;
- case KVM_S390_INT_CPU_TIMER:
- trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
- inti->ext.ext_params, 0);
- rc = put_guest_lc(vcpu, EXT_IRQ_CPU_TIMER,
- (u16 *)__LC_EXT_INT_CODE);
- rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
- &vcpu->arch.sie_block->gpsw,
- sizeof(psw_t));
- rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
- &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
- rc |= put_guest_lc(vcpu, inti->ext.ext_params,
- (u32 *)__LC_EXT_PARAMS);
- break;
case KVM_S390_INT_SERVICE:
- VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x",
- inti->ext.ext_params);
- vcpu->stat.deliver_service_signal++;
- trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
- inti->ext.ext_params, 0);
- rc = put_guest_lc(vcpu, 0x2401, (u16 *)__LC_EXT_INT_CODE);
- rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
- &vcpu->arch.sie_block->gpsw,
- sizeof(psw_t));
- rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
- &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
- rc |= put_guest_lc(vcpu, inti->ext.ext_params,
- (u32 *)__LC_EXT_PARAMS);
- break;
- case KVM_S390_INT_PFAULT_INIT:
- trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 0,
- inti->ext.ext_params2);
- rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE,
- (u16 *) __LC_EXT_INT_CODE);
- rc |= put_guest_lc(vcpu, PFAULT_INIT, (u16 *) __LC_EXT_CPU_ADDR);
- rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
- &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
- rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
- &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
- rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
- (u64 *) __LC_EXT_PARAMS2);
+ rc = __deliver_service(vcpu, inti);
break;
case KVM_S390_INT_PFAULT_DONE:
- trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 0,
- inti->ext.ext_params2);
- rc = put_guest_lc(vcpu, 0x2603, (u16 *)__LC_EXT_INT_CODE);
- rc |= put_guest_lc(vcpu, 0x0680, (u16 *)__LC_EXT_CPU_ADDR);
- rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
- &vcpu->arch.sie_block->gpsw,
- sizeof(psw_t));
- rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
- &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
- rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
- (u64 *)__LC_EXT_PARAMS2);
+ rc = __deliver_pfault_done(vcpu, inti);
break;
case KVM_S390_INT_VIRTIO:
- VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%llx",
- inti->ext.ext_params, inti->ext.ext_params2);
- vcpu->stat.deliver_virtio_interrupt++;
- trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
- inti->ext.ext_params,
- inti->ext.ext_params2);
- rc = put_guest_lc(vcpu, 0x2603, (u16 *)__LC_EXT_INT_CODE);
- rc |= put_guest_lc(vcpu, 0x0d00, (u16 *)__LC_EXT_CPU_ADDR);
- rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
- &vcpu->arch.sie_block->gpsw,
- sizeof(psw_t));
- rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
- &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
- rc |= put_guest_lc(vcpu, inti->ext.ext_params,
- (u32 *)__LC_EXT_PARAMS);
- rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
- (u64 *)__LC_EXT_PARAMS2);
- break;
- case KVM_S390_SIGP_STOP:
- VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu stop");
- vcpu->stat.deliver_stop_signal++;
- trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
- 0, 0);
- __set_intercept_indicator(vcpu, inti);
- break;
-
- case KVM_S390_SIGP_SET_PREFIX:
- VCPU_EVENT(vcpu, 4, "interrupt: set prefix to %x",
- inti->prefix.address);
- vcpu->stat.deliver_prefix_signal++;
- trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
- inti->prefix.address, 0);
- kvm_s390_set_prefix(vcpu, inti->prefix.address);
- break;
-
- case KVM_S390_RESTART:
- VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu restart");
- vcpu->stat.deliver_restart_signal++;
- trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
- 0, 0);
- rc = write_guest_lc(vcpu,
- offsetof(struct _lowcore, restart_old_psw),
- &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
- rc |= read_guest_lc(vcpu, offsetof(struct _lowcore, restart_psw),
- &vcpu->arch.sie_block->gpsw,
- sizeof(psw_t));
+ rc = __deliver_virtio(vcpu, inti);
break;
- case KVM_S390_PROGRAM_INT:
- VCPU_EVENT(vcpu, 4, "interrupt: pgm check code:%x, ilc:%x",
- inti->pgm.code,
- table[vcpu->arch.sie_block->ipa >> 14]);
- vcpu->stat.deliver_program_int++;
- trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
- inti->pgm.code, 0);
- rc = __deliver_prog_irq(vcpu, &inti->pgm);
- break;
-
case KVM_S390_MCHK:
- VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx",
- inti->mchk.mcic);
- trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
- inti->mchk.cr14,
- inti->mchk.mcic);
- rc = kvm_s390_vcpu_store_status(vcpu,
- KVM_S390_STORE_STATUS_PREFIXED);
- rc |= put_guest_lc(vcpu, inti->mchk.mcic, (u64 *)__LC_MCCK_CODE);
- rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW,
- &vcpu->arch.sie_block->gpsw,
- sizeof(psw_t));
- rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW,
- &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ rc = __deliver_mchk_floating(vcpu, inti);
break;
-
case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
- {
- __u32 param0 = ((__u32)inti->io.subchannel_id << 16) |
- inti->io.subchannel_nr;
- __u64 param1 = ((__u64)inti->io.io_int_parm << 32) |
- inti->io.io_int_word;
- VCPU_EVENT(vcpu, 4, "interrupt: I/O %llx", inti->type);
- vcpu->stat.deliver_io_int++;
- trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
- param0, param1);
- rc = put_guest_lc(vcpu, inti->io.subchannel_id,
- (u16 *)__LC_SUBCHANNEL_ID);
- rc |= put_guest_lc(vcpu, inti->io.subchannel_nr,
- (u16 *)__LC_SUBCHANNEL_NR);
- rc |= put_guest_lc(vcpu, inti->io.io_int_parm,
- (u32 *)__LC_IO_INT_PARM);
- rc |= put_guest_lc(vcpu, inti->io.io_int_word,
- (u32 *)__LC_IO_INT_WORD);
- rc |= write_guest_lc(vcpu, __LC_IO_OLD_PSW,
- &vcpu->arch.sie_block->gpsw,
- sizeof(psw_t));
- rc |= read_guest_lc(vcpu, __LC_IO_NEW_PSW,
- &vcpu->arch.sie_block->gpsw,
- sizeof(psw_t));
+ rc = __deliver_io(vcpu, inti);
break;
- }
default:
BUG();
}
@@ -509,19 +738,6 @@ static int __must_check __do_deliver_interrupt(struct kvm_vcpu *vcpu,
return rc;
}
-static int __must_check deliver_ckc_interrupt(struct kvm_vcpu *vcpu)
-{
- int rc;
-
- rc = put_guest_lc(vcpu, 0x1004, (u16 __user *)__LC_EXT_INT_CODE);
- rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
- &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
- rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
- &vcpu->arch.sie_block->gpsw,
- sizeof(psw_t));
- return rc;
-}
-
/* Check whether SIGP interpretation facility has an external call pending */
int kvm_s390_si_ext_call_pending(struct kvm_vcpu *vcpu)
{
@@ -538,20 +754,11 @@ int kvm_s390_si_ext_call_pending(struct kvm_vcpu *vcpu)
int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
{
- struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int;
struct kvm_s390_interrupt_info *inti;
- int rc = 0;
+ int rc;
- if (atomic_read(&li->active)) {
- spin_lock(&li->lock);
- list_for_each_entry(inti, &li->list, list)
- if (__interrupt_is_deliverable(vcpu, inti)) {
- rc = 1;
- break;
- }
- spin_unlock(&li->lock);
- }
+ rc = !!deliverable_local_irqs(vcpu);
if ((!rc) && atomic_read(&fi->active)) {
spin_lock(&fi->lock);
@@ -643,18 +850,15 @@ enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer)
void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu)
{
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
- struct kvm_s390_interrupt_info *n, *inti = NULL;
spin_lock(&li->lock);
- list_for_each_entry_safe(inti, n, &li->list, list) {
- list_del(&inti->list);
- kfree(inti);
- }
- atomic_set(&li->active, 0);
+ li->pending_irqs = 0;
+ bitmap_zero(li->sigp_emerg_pending, KVM_MAX_VCPUS);
+ memset(&li->irq, 0, sizeof(li->irq));
spin_unlock(&li->lock);
/* clear pending external calls set by sigp interpretation facility */
- atomic_clear_mask(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags);
+ atomic_clear_mask(CPUSTAT_ECALL_PEND, li->cpuflags);
atomic_clear_mask(SIGP_CTRL_C,
&vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].ctrl);
}
@@ -664,34 +868,35 @@ int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int;
struct kvm_s390_interrupt_info *n, *inti = NULL;
+ deliver_irq_t func;
int deliver;
int rc = 0;
+ unsigned long irq_type;
+ unsigned long deliverable_irqs;
__reset_intercept_indicators(vcpu);
- if (atomic_read(&li->active)) {
- do {
- deliver = 0;
- spin_lock(&li->lock);
- list_for_each_entry_safe(inti, n, &li->list, list) {
- if (__interrupt_is_deliverable(vcpu, inti)) {
- list_del(&inti->list);
- deliver = 1;
- break;
- }
- __set_intercept_indicator(vcpu, inti);
- }
- if (list_empty(&li->list))
- atomic_set(&li->active, 0);
- spin_unlock(&li->lock);
- if (deliver) {
- rc = __do_deliver_interrupt(vcpu, inti);
- kfree(inti);
- }
- } while (!rc && deliver);
- }
- if (!rc && kvm_cpu_has_pending_timer(vcpu))
- rc = deliver_ckc_interrupt(vcpu);
+ /* pending ckc conditions might have been invalidated */
+ clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
+ if (kvm_cpu_has_pending_timer(vcpu))
+ set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
+
+ do {
+ deliverable_irqs = deliverable_local_irqs(vcpu);
+ /* bits are in the order of interrupt priority */
+ irq_type = find_first_bit(&deliverable_irqs, IRQ_PEND_COUNT);
+ if (irq_type == IRQ_PEND_COUNT)
+ break;
+ func = deliver_irq_funcs[irq_type];
+ if (!func) {
+ WARN_ON_ONCE(func == NULL);
+ clear_bit(irq_type, &li->pending_irqs);
+ continue;
+ }
+ rc = func(vcpu);
+ } while (!rc && irq_type != IRQ_PEND_COUNT);
+
+ set_intercept_indicators_local(vcpu);
if (!rc && atomic_read(&fi->active)) {
do {
@@ -710,7 +915,7 @@ int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
atomic_set(&fi->active, 0);
spin_unlock(&fi->lock);
if (deliver) {
- rc = __do_deliver_interrupt(vcpu, inti);
+ rc = __deliver_floating_interrupt(vcpu, inti);
kfree(inti);
}
} while (!rc && deliver);
@@ -719,23 +924,26 @@ int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
return rc;
}
-int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code)
+static int __inject_prog(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
{
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
- struct kvm_s390_interrupt_info *inti;
- inti = kzalloc(sizeof(*inti), GFP_KERNEL);
- if (!inti)
- return -ENOMEM;
+ li->irq.pgm = irq->u.pgm;
+ set_bit(IRQ_PEND_PROG, &li->pending_irqs);
+ return 0;
+}
- inti->type = KVM_S390_PROGRAM_INT;
- inti->pgm.code = code;
+int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code)
+{
+ struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
+ struct kvm_s390_irq irq;
VCPU_EVENT(vcpu, 3, "inject: program check %d (from kernel)", code);
- trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, inti->type, code, 0, 1);
+ trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT, code,
+ 0, 1);
spin_lock(&li->lock);
- list_add(&inti->list, &li->list);
- atomic_set(&li->active, 1);
+ irq.u.pgm.code = code;
+ __inject_prog(vcpu, &irq);
BUG_ON(waitqueue_active(li->wq));
spin_unlock(&li->lock);
return 0;
@@ -745,27 +953,166 @@ int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu,
struct kvm_s390_pgm_info *pgm_info)
{
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
- struct kvm_s390_interrupt_info *inti;
-
- inti = kzalloc(sizeof(*inti), GFP_KERNEL);
- if (!inti)
- return -ENOMEM;
+ struct kvm_s390_irq irq;
+ int rc;
VCPU_EVENT(vcpu, 3, "inject: prog irq %d (from kernel)",
pgm_info->code);
trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
pgm_info->code, 0, 1);
-
- inti->type = KVM_S390_PROGRAM_INT;
- memcpy(&inti->pgm, pgm_info, sizeof(inti->pgm));
spin_lock(&li->lock);
- list_add(&inti->list, &li->list);
- atomic_set(&li->active, 1);
+ irq.u.pgm = *pgm_info;
+ rc = __inject_prog(vcpu, &irq);
BUG_ON(waitqueue_active(li->wq));
spin_unlock(&li->lock);
+ return rc;
+}
+
+static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
+{
+ struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
+
+ VCPU_EVENT(vcpu, 3, "inject: external irq params:%x, params2:%llx",
+ irq->u.ext.ext_params, irq->u.ext.ext_params2);
+ trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_PFAULT_INIT,
+ irq->u.ext.ext_params,
+ irq->u.ext.ext_params2, 2);
+
+ li->irq.ext = irq->u.ext;
+ set_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs);
+ atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
return 0;
}
+int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
+{
+ struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
+ struct kvm_s390_extcall_info *extcall = &li->irq.extcall;
+
+ VCPU_EVENT(vcpu, 3, "inject: external call source-cpu:%u",
+ irq->u.extcall.code);
+ trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EXTERNAL_CALL,
+ irq->u.extcall.code, 0, 2);
+
+ *extcall = irq->u.extcall;
+ set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs);
+ atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
+ return 0;
+}
+
+static int __inject_set_prefix(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
+{
+ struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
+ struct kvm_s390_prefix_info *prefix = &li->irq.prefix;
+
+ VCPU_EVENT(vcpu, 3, "inject: set prefix to %x (from user)",
+ prefix->address);
+ trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_SET_PREFIX,
+ prefix->address, 0, 2);
+
+ *prefix = irq->u.prefix;
+ set_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs);
+ return 0;
+}
+
+static int __inject_sigp_stop(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
+{
+ struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
+
+ trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_STOP, 0, 0, 2);
+
+ li->action_bits |= ACTION_STOP_ON_STOP;
+ set_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
+ return 0;
+}
+
+static int __inject_sigp_restart(struct kvm_vcpu *vcpu,
+ struct kvm_s390_irq *irq)
+{
+ struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
+
+ VCPU_EVENT(vcpu, 3, "inject: restart type %llx", irq->type);
+ trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0, 2);
+
+ set_bit(IRQ_PEND_RESTART, &li->pending_irqs);
+ return 0;
+}
+
+static int __inject_sigp_emergency(struct kvm_vcpu *vcpu,
+ struct kvm_s390_irq *irq)
+{
+ struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
+ struct kvm_s390_emerg_info *emerg = &li->irq.emerg;
+
+ VCPU_EVENT(vcpu, 3, "inject: emergency %u\n",
+ irq->u.emerg.code);
+ trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY,
+ emerg->code, 0, 2);
+
+ set_bit(emerg->code, li->sigp_emerg_pending);
+ set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
+ atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
+ return 0;
+}
+
+static int __inject_mchk(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
+{
+ struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
+ struct kvm_s390_mchk_info *mchk = &li->irq.mchk;
+
+ VCPU_EVENT(vcpu, 5, "inject: machine check parm64:%llx",
+ mchk->mcic);
+ trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_MCHK, 0,
+ mchk->mcic, 2);
+
+ /*
+ * Because repressible machine checks can be indicated along with
+ * exigent machine checks (PoP, Chapter 11, Interruption action)
+ * we need to combine cr14, mcic and external damage code.
+ * Failing storage address and the logout area should not be or'ed
+ * together, we just indicate the last occurrence of the corresponding
+ * machine check
+ */
+ mchk->cr14 |= irq->u.mchk.cr14;
+ mchk->mcic |= irq->u.mchk.mcic;
+ mchk->ext_damage_code |= irq->u.mchk.ext_damage_code;
+ mchk->failing_storage_address = irq->u.mchk.failing_storage_address;
+ memcpy(&mchk->fixed_logout, &irq->u.mchk.fixed_logout,
+ sizeof(mchk->fixed_logout));
+ if (mchk->mcic & MCHK_EX_MASK)
+ set_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs);
+ else if (mchk->mcic & MCHK_REP_MASK)
+ set_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs);
+ return 0;
+}
+
+static int __inject_ckc(struct kvm_vcpu *vcpu)
+{
+ struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
+
+ VCPU_EVENT(vcpu, 3, "inject: type %x", KVM_S390_INT_CLOCK_COMP);
+ trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP,
+ 0, 0, 2);
+
+ set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
+ atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
+ return 0;
+}
+
+static int __inject_cpu_timer(struct kvm_vcpu *vcpu)
+{
+ struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
+
+ VCPU_EVENT(vcpu, 3, "inject: type %x", KVM_S390_INT_CPU_TIMER);
+ trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER,
+ 0, 0, 2);
+
+ set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
+ atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
+ return 0;
+}
+
+
struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
u64 cr6, u64 schid)
{
@@ -851,7 +1198,17 @@ static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
dst_vcpu = kvm_get_vcpu(kvm, sigcpu);
li = &dst_vcpu->arch.local_int;
spin_lock(&li->lock);
- atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
+ switch (inti->type) {
+ case KVM_S390_MCHK:
+ atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
+ break;
+ case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
+ atomic_set_mask(CPUSTAT_IO_INT, li->cpuflags);
+ break;
+ default:
+ atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
+ break;
+ }
spin_unlock(&li->lock);
kvm_s390_vcpu_wakeup(kvm_get_vcpu(kvm, sigcpu));
unlock_fi:
@@ -920,92 +1277,85 @@ void kvm_s390_reinject_io_int(struct kvm *kvm,
__inject_vm(kvm, inti);
}
-int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
- struct kvm_s390_interrupt *s390int)
+int s390int_to_s390irq(struct kvm_s390_interrupt *s390int,
+ struct kvm_s390_irq *irq)
{
- struct kvm_s390_local_interrupt *li;
- struct kvm_s390_interrupt_info *inti;
+ irq->type = s390int->type;
+ switch (irq->type) {
+ case KVM_S390_PROGRAM_INT:
+ if (s390int->parm & 0xffff0000)
+ return -EINVAL;
+ irq->u.pgm.code = s390int->parm;
+ break;
+ case KVM_S390_SIGP_SET_PREFIX:
+ irq->u.prefix.address = s390int->parm;
+ break;
+ case KVM_S390_INT_EXTERNAL_CALL:
+ if (irq->u.extcall.code & 0xffff0000)
+ return -EINVAL;
+ irq->u.extcall.code = s390int->parm;
+ break;
+ case KVM_S390_INT_EMERGENCY:
+ if (irq->u.emerg.code & 0xffff0000)
+ return -EINVAL;
+ irq->u.emerg.code = s390int->parm;
+ break;
+ case KVM_S390_MCHK:
+ irq->u.mchk.mcic = s390int->parm64;
+ break;
+ }
+ return 0;
+}
- inti = kzalloc(sizeof(*inti), GFP_KERNEL);
- if (!inti)
- return -ENOMEM;
+int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
+{
+ struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
+ int rc;
- switch (s390int->type) {
+ spin_lock(&li->lock);
+ switch (irq->type) {
case KVM_S390_PROGRAM_INT:
- if (s390int->parm & 0xffff0000) {
- kfree(inti);
- return -EINVAL;
- }
- inti->type = s390int->type;
- inti->pgm.code = s390int->parm;
VCPU_EVENT(vcpu, 3, "inject: program check %d (from user)",
- s390int->parm);
+ irq->u.pgm.code);
+ rc = __inject_prog(vcpu, irq);
break;
case KVM_S390_SIGP_SET_PREFIX:
- inti->prefix.address = s390int->parm;
- inti->type = s390int->type;
- VCPU_EVENT(vcpu, 3, "inject: set prefix to %x (from user)",
- s390int->parm);
+ rc = __inject_set_prefix(vcpu, irq);
break;
case KVM_S390_SIGP_STOP:
+ rc = __inject_sigp_stop(vcpu, irq);
+ break;
case KVM_S390_RESTART:
+ rc = __inject_sigp_restart(vcpu, irq);
+ break;
case KVM_S390_INT_CLOCK_COMP:
+ rc = __inject_ckc(vcpu);
+ break;
case KVM_S390_INT_CPU_TIMER:
- VCPU_EVENT(vcpu, 3, "inject: type %x", s390int->type);
- inti->type = s390int->type;
+ rc = __inject_cpu_timer(vcpu);
break;
case KVM_S390_INT_EXTERNAL_CALL:
- if (s390int->parm & 0xffff0000) {
- kfree(inti);
- return -EINVAL;
- }
- VCPU_EVENT(vcpu, 3, "inject: external call source-cpu:%u",
- s390int->parm);
- inti->type = s390int->type;
- inti->extcall.code = s390int->parm;
+ rc = __inject_extcall(vcpu, irq);
break;
case KVM_S390_INT_EMERGENCY:
- if (s390int->parm & 0xffff0000) {
- kfree(inti);
- return -EINVAL;
- }
- VCPU_EVENT(vcpu, 3, "inject: emergency %u\n", s390int->parm);
- inti->type = s390int->type;
- inti->emerg.code = s390int->parm;
+ rc = __inject_sigp_emergency(vcpu, irq);
break;
case KVM_S390_MCHK:
- VCPU_EVENT(vcpu, 5, "inject: machine check parm64:%llx",
- s390int->parm64);
- inti->type = s390int->type;
- inti->mchk.mcic = s390int->parm64;
+ rc = __inject_mchk(vcpu, irq);
break;
case KVM_S390_INT_PFAULT_INIT:
- inti->type = s390int->type;
- inti->ext.ext_params2 = s390int->parm64;
+ rc = __inject_pfault_init(vcpu, irq);
break;
case KVM_S390_INT_VIRTIO:
case KVM_S390_INT_SERVICE:
case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
default:
- kfree(inti);
- return -EINVAL;
+ rc = -EINVAL;
}
- trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, s390int->type, s390int->parm,
- s390int->parm64, 2);
-
- li = &vcpu->arch.local_int;
- spin_lock(&li->lock);
- if (inti->type == KVM_S390_PROGRAM_INT)
- list_add(&inti->list, &li->list);
- else
- list_add_tail(&inti->list, &li->list);
- atomic_set(&li->active, 1);
- if (inti->type == KVM_S390_SIGP_STOP)
- li->action_bits |= ACTION_STOP_ON_STOP;
- atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
spin_unlock(&li->lock);
- kvm_s390_vcpu_wakeup(vcpu);
- return 0;
+ if (!rc)
+ kvm_s390_vcpu_wakeup(vcpu);
+ return rc;
}
void kvm_s390_clear_float_irqs(struct kvm *kvm)
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 6b049ee75a5..3e09801e310 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -81,10 +81,17 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
{ "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
{ "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
+ { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
+ { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
{ "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
+ { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
+ { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
{ "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
{ "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
{ "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
+ { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
+ { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
+ { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
{ "diagnose_10", VCPU_STAT(diagnose_10) },
{ "diagnose_44", VCPU_STAT(diagnose_44) },
{ "diagnose_9c", VCPU_STAT(diagnose_9c) },
@@ -453,6 +460,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
spin_lock_init(&kvm->arch.float_int.lock);
INIT_LIST_HEAD(&kvm->arch.float_int.list);
init_waitqueue_head(&kvm->arch.ipte_wq);
+ mutex_init(&kvm->arch.ipte_mutex);
debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
VM_EVENT(kvm, 3, "%s", "vm created");
@@ -711,7 +719,6 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
}
spin_lock_init(&vcpu->arch.local_int.lock);
- INIT_LIST_HEAD(&vcpu->arch.local_int.list);
vcpu->arch.local_int.float_int = &kvm->arch.float_int;
vcpu->arch.local_int.wq = &vcpu->wq;
vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
@@ -1114,13 +1121,15 @@ static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
unsigned long token)
{
struct kvm_s390_interrupt inti;
- inti.parm64 = token;
+ struct kvm_s390_irq irq;
if (start_token) {
- inti.type = KVM_S390_INT_PFAULT_INIT;
- WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &inti));
+ irq.u.ext.ext_params2 = token;
+ irq.type = KVM_S390_INT_PFAULT_INIT;
+ WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
} else {
inti.type = KVM_S390_INT_PFAULT_DONE;
+ inti.parm64 = token;
WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
}
}
@@ -1614,11 +1623,14 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
switch (ioctl) {
case KVM_S390_INTERRUPT: {
struct kvm_s390_interrupt s390int;
+ struct kvm_s390_irq s390irq;
r = -EFAULT;
if (copy_from_user(&s390int, argp, sizeof(s390int)))
break;
- r = kvm_s390_inject_vcpu(vcpu, &s390int);
+ if (s390int_to_s390irq(&s390int, &s390irq))
+ return -EINVAL;
+ r = kvm_s390_inject_vcpu(vcpu, &s390irq);
break;
}
case KVM_S390_STORE_STATUS:
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index 244d0230318..a8f3d9b71c1 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -24,8 +24,6 @@ typedef int (*intercept_handler_t)(struct kvm_vcpu *vcpu);
/* declare vfacilities extern */
extern unsigned long *vfacilities;
-int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu);
-
/* Transactional Memory Execution related macros */
#define IS_TE_ENABLED(vcpu) ((vcpu->arch.sie_block->ecb & 0x10))
#define TDB_FORMAT1 1
@@ -144,7 +142,7 @@ void kvm_s390_clear_float_irqs(struct kvm *kvm);
int __must_check kvm_s390_inject_vm(struct kvm *kvm,
struct kvm_s390_interrupt *s390int);
int __must_check kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
- struct kvm_s390_interrupt *s390int);
+ struct kvm_s390_irq *irq);
int __must_check kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code);
struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
u64 cr6, u64 schid);
@@ -152,6 +150,10 @@ void kvm_s390_reinject_io_int(struct kvm *kvm,
struct kvm_s390_interrupt_info *inti);
int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked);
+/* implemented in intercept.c */
+void kvm_s390_rewind_psw(struct kvm_vcpu *vcpu, int ilc);
+int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu);
+
/* implemented in priv.c */
int is_valid_psw(psw_t *psw);
int kvm_s390_handle_b2(struct kvm_vcpu *vcpu);
@@ -222,6 +224,9 @@ static inline int kvm_s390_inject_prog_cond(struct kvm_vcpu *vcpu, int rc)
return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
}
+int s390int_to_s390irq(struct kvm_s390_interrupt *s390int,
+ struct kvm_s390_irq *s390irq);
+
/* implemented in interrupt.c */
int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
int psw_extint_disabled(struct kvm_vcpu *vcpu);
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index f47cb0c6d90..1be578d64df 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -180,21 +180,18 @@ static int handle_skey(struct kvm_vcpu *vcpu)
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
- vcpu->arch.sie_block->gpsw.addr =
- __rewind_psw(vcpu->arch.sie_block->gpsw, 4);
+ kvm_s390_rewind_psw(vcpu, 4);
VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation");
return 0;
}
static int handle_ipte_interlock(struct kvm_vcpu *vcpu)
{
- psw_t *psw = &vcpu->arch.sie_block->gpsw;
-
vcpu->stat.instruction_ipte_interlock++;
- if (psw_bits(*psw).p)
+ if (psw_bits(vcpu->arch.sie_block->gpsw).p)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
wait_event(vcpu->kvm->arch.ipte_wq, !ipte_lock_held(vcpu));
- psw->addr = __rewind_psw(*psw, 4);
+ kvm_s390_rewind_psw(vcpu, 4);
VCPU_EVENT(vcpu, 4, "%s", "retrying ipte interlock operation");
return 0;
}
@@ -650,10 +647,7 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
- if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
- if (kvm_s390_check_low_addr_protection(vcpu, start))
- return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
- }
+ start = kvm_s390_logical_to_effective(vcpu, start);
switch (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) {
case 0x00000000:
@@ -669,6 +663,12 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
default:
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
}
+
+ if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
+ if (kvm_s390_check_low_addr_protection(vcpu, start))
+ return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
+ }
+
while (start < end) {
unsigned long useraddr, abs_addr;
@@ -725,8 +725,7 @@ static int handle_essa(struct kvm_vcpu *vcpu)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
/* Rewind PSW to repeat the ESSA instruction */
- vcpu->arch.sie_block->gpsw.addr =
- __rewind_psw(vcpu->arch.sie_block->gpsw, 4);
+ kvm_s390_rewind_psw(vcpu, 4);
vcpu->arch.sie_block->cbrlo &= PAGE_MASK; /* reset nceo */
cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo);
down_read(&gmap->mm->mmap_sem);
@@ -769,8 +768,8 @@ int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu)
{
int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
- u32 val = 0;
- int reg, rc;
+ int reg, rc, nr_regs;
+ u32 ctl_array[16];
u64 ga;
vcpu->stat.instruction_lctl++;
@@ -786,19 +785,20 @@ int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu)
VCPU_EVENT(vcpu, 5, "lctl r1:%x, r3:%x, addr:%llx", reg1, reg3, ga);
trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, ga);
+ nr_regs = ((reg3 - reg1) & 0xf) + 1;
+ rc = read_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u32));
+ if (rc)
+ return kvm_s390_inject_prog_cond(vcpu, rc);
reg = reg1;
+ nr_regs = 0;
do {
- rc = read_guest(vcpu, ga, &val, sizeof(val));
- if (rc)
- return kvm_s390_inject_prog_cond(vcpu, rc);
vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul;
- vcpu->arch.sie_block->gcr[reg] |= val;
- ga += 4;
+ vcpu->arch.sie_block->gcr[reg] |= ctl_array[nr_regs++];
if (reg == reg3)
break;
reg = (reg + 1) % 16;
} while (1);
-
+ kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
return 0;
}
@@ -806,9 +806,9 @@ int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu)
{
int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
+ int reg, rc, nr_regs;
+ u32 ctl_array[16];
u64 ga;
- u32 val;
- int reg, rc;
vcpu->stat.instruction_stctl++;
@@ -824,26 +824,24 @@ int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu)
trace_kvm_s390_handle_stctl(vcpu, 0, reg1, reg3, ga);
reg = reg1;
+ nr_regs = 0;
do {
- val = vcpu->arch.sie_block->gcr[reg] & 0x00000000fffffffful;
- rc = write_guest(vcpu, ga, &val, sizeof(val));
- if (rc)
- return kvm_s390_inject_prog_cond(vcpu, rc);
- ga += 4;
+ ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg];
if (reg == reg3)
break;
reg = (reg + 1) % 16;
} while (1);
-
- return 0;
+ rc = write_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u32));
+ return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0;
}
static int handle_lctlg(struct kvm_vcpu *vcpu)
{
int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
- u64 ga, val;
- int reg, rc;
+ int reg, rc, nr_regs;
+ u64 ctl_array[16];
+ u64 ga;
vcpu->stat.instruction_lctlg++;
@@ -855,22 +853,22 @@ static int handle_lctlg(struct kvm_vcpu *vcpu)
if (ga & 7)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
- reg = reg1;
-
VCPU_EVENT(vcpu, 5, "lctlg r1:%x, r3:%x, addr:%llx", reg1, reg3, ga);
trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, ga);
+ nr_regs = ((reg3 - reg1) & 0xf) + 1;
+ rc = read_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u64));
+ if (rc)
+ return kvm_s390_inject_prog_cond(vcpu, rc);
+ reg = reg1;
+ nr_regs = 0;
do {
- rc = read_guest(vcpu, ga, &val, sizeof(val));
- if (rc)
- return kvm_s390_inject_prog_cond(vcpu, rc);
- vcpu->arch.sie_block->gcr[reg] = val;
- ga += 8;
+ vcpu->arch.sie_block->gcr[reg] = ctl_array[nr_regs++];
if (reg == reg3)
break;
reg = (reg + 1) % 16;
} while (1);
-
+ kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
return 0;
}
@@ -878,8 +876,9 @@ static int handle_stctg(struct kvm_vcpu *vcpu)
{
int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
- u64 ga, val;
- int reg, rc;
+ int reg, rc, nr_regs;
+ u64 ctl_array[16];
+ u64 ga;
vcpu->stat.instruction_stctg++;
@@ -891,23 +890,19 @@ static int handle_stctg(struct kvm_vcpu *vcpu)
if (ga & 7)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
- reg = reg1;
-
VCPU_EVENT(vcpu, 5, "stctg r1:%x, r3:%x, addr:%llx", reg1, reg3, ga);
trace_kvm_s390_handle_stctl(vcpu, 1, reg1, reg3, ga);
+ reg = reg1;
+ nr_regs = 0;
do {
- val = vcpu->arch.sie_block->gcr[reg];
- rc = write_guest(vcpu, ga, &val, sizeof(val));
- if (rc)
- return kvm_s390_inject_prog_cond(vcpu, rc);
- ga += 8;
+ ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg];
if (reg == reg3)
break;
reg = (reg + 1) % 16;
} while (1);
-
- return 0;
+ rc = write_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u64));
+ return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0;
}
static const intercept_handler_t eb_handlers[256] = {
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c
index cf243ba3d50..6651f9f7397 100644
--- a/arch/s390/kvm/sigp.c
+++ b/arch/s390/kvm/sigp.c
@@ -20,20 +20,13 @@
#include "kvm-s390.h"
#include "trace.h"
-static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr,
+static int __sigp_sense(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu,
u64 *reg)
{
struct kvm_s390_local_interrupt *li;
- struct kvm_vcpu *dst_vcpu = NULL;
int cpuflags;
int rc;
- if (cpu_addr >= KVM_MAX_VCPUS)
- return SIGP_CC_NOT_OPERATIONAL;
-
- dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
- if (!dst_vcpu)
- return SIGP_CC_NOT_OPERATIONAL;
li = &dst_vcpu->arch.local_int;
cpuflags = atomic_read(li->cpuflags);
@@ -48,55 +41,53 @@ static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr,
rc = SIGP_CC_STATUS_STORED;
}
- VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", cpu_addr, rc);
+ VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", dst_vcpu->vcpu_id,
+ rc);
return rc;
}
-static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr)
+static int __inject_sigp_emergency(struct kvm_vcpu *vcpu,
+ struct kvm_vcpu *dst_vcpu)
{
- struct kvm_s390_interrupt s390int = {
+ struct kvm_s390_irq irq = {
.type = KVM_S390_INT_EMERGENCY,
- .parm = vcpu->vcpu_id,
+ .u.emerg.code = vcpu->vcpu_id,
};
- struct kvm_vcpu *dst_vcpu = NULL;
int rc = 0;
- if (cpu_addr < KVM_MAX_VCPUS)
- dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
- if (!dst_vcpu)
- return SIGP_CC_NOT_OPERATIONAL;
-
- rc = kvm_s390_inject_vcpu(dst_vcpu, &s390int);
+ rc = kvm_s390_inject_vcpu(dst_vcpu, &irq);
if (!rc)
- VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr);
+ VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x",
+ dst_vcpu->vcpu_id);
return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED;
}
-static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr,
+static int __sigp_emergency(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu)
+{
+ return __inject_sigp_emergency(vcpu, dst_vcpu);
+}
+
+static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu,
+ struct kvm_vcpu *dst_vcpu,
u16 asn, u64 *reg)
{
- struct kvm_vcpu *dst_vcpu = NULL;
const u64 psw_int_mask = PSW_MASK_IO | PSW_MASK_EXT;
u16 p_asn, s_asn;
psw_t *psw;
u32 flags;
- if (cpu_addr < KVM_MAX_VCPUS)
- dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
- if (!dst_vcpu)
- return SIGP_CC_NOT_OPERATIONAL;
flags = atomic_read(&dst_vcpu->arch.sie_block->cpuflags);
psw = &dst_vcpu->arch.sie_block->gpsw;
p_asn = dst_vcpu->arch.sie_block->gcr[4] & 0xffff; /* Primary ASN */
s_asn = dst_vcpu->arch.sie_block->gcr[3] & 0xffff; /* Secondary ASN */
- /* Deliver the emergency signal? */
+ /* Inject the emergency signal? */
if (!(flags & CPUSTAT_STOPPED)
|| (psw->mask & psw_int_mask) != psw_int_mask
|| ((flags & CPUSTAT_WAIT) && psw->addr != 0)
|| (!(flags & CPUSTAT_WAIT) && (asn == p_asn || asn == s_asn))) {
- return __sigp_emergency(vcpu, cpu_addr);
+ return __inject_sigp_emergency(vcpu, dst_vcpu);
} else {
*reg &= 0xffffffff00000000UL;
*reg |= SIGP_STATUS_INCORRECT_STATE;
@@ -104,23 +95,19 @@ static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr,
}
}
-static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr)
+static int __sigp_external_call(struct kvm_vcpu *vcpu,
+ struct kvm_vcpu *dst_vcpu)
{
- struct kvm_s390_interrupt s390int = {
+ struct kvm_s390_irq irq = {
.type = KVM_S390_INT_EXTERNAL_CALL,
- .parm = vcpu->vcpu_id,
+ .u.extcall.code = vcpu->vcpu_id,
};
- struct kvm_vcpu *dst_vcpu = NULL;
int rc;
- if (cpu_addr < KVM_MAX_VCPUS)
- dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
- if (!dst_vcpu)
- return SIGP_CC_NOT_OPERATIONAL;
-
- rc = kvm_s390_inject_vcpu(dst_vcpu, &s390int);
+ rc = kvm_s390_inject_vcpu(dst_vcpu, &irq);
if (!rc)
- VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", cpu_addr);
+ VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x",
+ dst_vcpu->vcpu_id);
return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED;
}
@@ -128,29 +115,20 @@ static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr)
static int __inject_sigp_stop(struct kvm_vcpu *dst_vcpu, int action)
{
struct kvm_s390_local_interrupt *li = &dst_vcpu->arch.local_int;
- struct kvm_s390_interrupt_info *inti;
int rc = SIGP_CC_ORDER_CODE_ACCEPTED;
- inti = kzalloc(sizeof(*inti), GFP_ATOMIC);
- if (!inti)
- return -ENOMEM;
- inti->type = KVM_S390_SIGP_STOP;
-
spin_lock(&li->lock);
if (li->action_bits & ACTION_STOP_ON_STOP) {
/* another SIGP STOP is pending */
- kfree(inti);
rc = SIGP_CC_BUSY;
goto out;
}
if ((atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
- kfree(inti);
if ((action & ACTION_STORE_ON_STOP) != 0)
rc = -ESHUTDOWN;
goto out;
}
- list_add_tail(&inti->list, &li->list);
- atomic_set(&li->active, 1);
+ set_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
li->action_bits |= action;
atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
kvm_s390_vcpu_wakeup(dst_vcpu);
@@ -160,23 +138,27 @@ out:
return rc;
}
-static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action)
+static int __sigp_stop(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu)
{
- struct kvm_vcpu *dst_vcpu = NULL;
int rc;
- if (cpu_addr >= KVM_MAX_VCPUS)
- return SIGP_CC_NOT_OPERATIONAL;
+ rc = __inject_sigp_stop(dst_vcpu, ACTION_STOP_ON_STOP);
+ VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", dst_vcpu->vcpu_id);
- dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
- if (!dst_vcpu)
- return SIGP_CC_NOT_OPERATIONAL;
+ return rc;
+}
- rc = __inject_sigp_stop(dst_vcpu, action);
+static int __sigp_stop_and_store_status(struct kvm_vcpu *vcpu,
+ struct kvm_vcpu *dst_vcpu, u64 *reg)
+{
+ int rc;
- VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", cpu_addr);
+ rc = __inject_sigp_stop(dst_vcpu, ACTION_STOP_ON_STOP |
+ ACTION_STORE_ON_STOP);
+ VCPU_EVENT(vcpu, 4, "sent sigp stop and store status to cpu %x",
+ dst_vcpu->vcpu_id);
- if ((action & ACTION_STORE_ON_STOP) != 0 && rc == -ESHUTDOWN) {
+ if (rc == -ESHUTDOWN) {
/* If the CPU has already been stopped, we still have
* to save the status when doing stop-and-store. This
* has to be done after unlocking all spinlocks. */
@@ -212,18 +194,12 @@ static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter)
return rc;
}
-static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
- u64 *reg)
+static int __sigp_set_prefix(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu,
+ u32 address, u64 *reg)
{
struct kvm_s390_local_interrupt *li;
- struct kvm_vcpu *dst_vcpu = NULL;
- struct kvm_s390_interrupt_info *inti;
int rc;
- if (cpu_addr < KVM_MAX_VCPUS)
- dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
- if (!dst_vcpu)
- return SIGP_CC_NOT_OPERATIONAL;
li = &dst_vcpu->arch.local_int;
/*
@@ -238,46 +214,34 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
return SIGP_CC_STATUS_STORED;
}
- inti = kzalloc(sizeof(*inti), GFP_KERNEL);
- if (!inti)
- return SIGP_CC_BUSY;
-
spin_lock(&li->lock);
/* cpu must be in stopped state */
if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
*reg &= 0xffffffff00000000UL;
*reg |= SIGP_STATUS_INCORRECT_STATE;
rc = SIGP_CC_STATUS_STORED;
- kfree(inti);
goto out_li;
}
- inti->type = KVM_S390_SIGP_SET_PREFIX;
- inti->prefix.address = address;
-
- list_add_tail(&inti->list, &li->list);
- atomic_set(&li->active, 1);
+ li->irq.prefix.address = address;
+ set_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs);
kvm_s390_vcpu_wakeup(dst_vcpu);
rc = SIGP_CC_ORDER_CODE_ACCEPTED;
- VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address);
+ VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", dst_vcpu->vcpu_id,
+ address);
out_li:
spin_unlock(&li->lock);
return rc;
}
-static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu, u16 cpu_id,
- u32 addr, u64 *reg)
+static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu,
+ struct kvm_vcpu *dst_vcpu,
+ u32 addr, u64 *reg)
{
- struct kvm_vcpu *dst_vcpu = NULL;
int flags;
int rc;
- if (cpu_id < KVM_MAX_VCPUS)
- dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_id);
- if (!dst_vcpu)
- return SIGP_CC_NOT_OPERATIONAL;
-
spin_lock(&dst_vcpu->arch.local_int.lock);
flags = atomic_read(dst_vcpu->arch.local_int.cpuflags);
spin_unlock(&dst_vcpu->arch.local_int.lock);
@@ -297,19 +261,12 @@ static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu, u16 cpu_id,
return rc;
}
-static int __sigp_sense_running(struct kvm_vcpu *vcpu, u16 cpu_addr,
- u64 *reg)
+static int __sigp_sense_running(struct kvm_vcpu *vcpu,
+ struct kvm_vcpu *dst_vcpu, u64 *reg)
{
struct kvm_s390_local_interrupt *li;
- struct kvm_vcpu *dst_vcpu = NULL;
int rc;
- if (cpu_addr >= KVM_MAX_VCPUS)
- return SIGP_CC_NOT_OPERATIONAL;
-
- dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
- if (!dst_vcpu)
- return SIGP_CC_NOT_OPERATIONAL;
li = &dst_vcpu->arch.local_int;
if (atomic_read(li->cpuflags) & CPUSTAT_RUNNING) {
/* running */
@@ -321,26 +278,19 @@ static int __sigp_sense_running(struct kvm_vcpu *vcpu, u16 cpu_addr,
rc = SIGP_CC_STATUS_STORED;
}
- VCPU_EVENT(vcpu, 4, "sensed running status of cpu %x rc %x", cpu_addr,
- rc);
+ VCPU_EVENT(vcpu, 4, "sensed running status of cpu %x rc %x",
+ dst_vcpu->vcpu_id, rc);
return rc;
}
-/* Test whether the destination CPU is available and not busy */
-static int sigp_check_callable(struct kvm_vcpu *vcpu, u16 cpu_addr)
+static int __prepare_sigp_re_start(struct kvm_vcpu *vcpu,
+ struct kvm_vcpu *dst_vcpu, u8 order_code)
{
- struct kvm_s390_local_interrupt *li;
- int rc = SIGP_CC_ORDER_CODE_ACCEPTED;
- struct kvm_vcpu *dst_vcpu = NULL;
-
- if (cpu_addr >= KVM_MAX_VCPUS)
- return SIGP_CC_NOT_OPERATIONAL;
+ struct kvm_s390_local_interrupt *li = &dst_vcpu->arch.local_int;
+ /* handle (RE)START in user space */
+ int rc = -EOPNOTSUPP;
- dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
- if (!dst_vcpu)
- return SIGP_CC_NOT_OPERATIONAL;
- li = &dst_vcpu->arch.local_int;
spin_lock(&li->lock);
if (li->action_bits & ACTION_STOP_ON_STOP)
rc = SIGP_CC_BUSY;
@@ -349,90 +299,131 @@ static int sigp_check_callable(struct kvm_vcpu *vcpu, u16 cpu_addr)
return rc;
}
-int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
+static int __prepare_sigp_cpu_reset(struct kvm_vcpu *vcpu,
+ struct kvm_vcpu *dst_vcpu, u8 order_code)
{
- int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
- int r3 = vcpu->arch.sie_block->ipa & 0x000f;
- u32 parameter;
- u16 cpu_addr = vcpu->run->s.regs.gprs[r3];
- u8 order_code;
- int rc;
+ /* handle (INITIAL) CPU RESET in user space */
+ return -EOPNOTSUPP;
+}
- /* sigp in userspace can exit */
- if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
- return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
+static int __prepare_sigp_unknown(struct kvm_vcpu *vcpu,
+ struct kvm_vcpu *dst_vcpu)
+{
+ /* handle unknown orders in user space */
+ return -EOPNOTSUPP;
+}
- order_code = kvm_s390_get_base_disp_rs(vcpu);
+static int handle_sigp_dst(struct kvm_vcpu *vcpu, u8 order_code,
+ u16 cpu_addr, u32 parameter, u64 *status_reg)
+{
+ int rc;
+ struct kvm_vcpu *dst_vcpu;
- if (r1 % 2)
- parameter = vcpu->run->s.regs.gprs[r1];
- else
- parameter = vcpu->run->s.regs.gprs[r1 + 1];
+ if (cpu_addr >= KVM_MAX_VCPUS)
+ return SIGP_CC_NOT_OPERATIONAL;
+
+ dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
+ if (!dst_vcpu)
+ return SIGP_CC_NOT_OPERATIONAL;
- trace_kvm_s390_handle_sigp(vcpu, order_code, cpu_addr, parameter);
switch (order_code) {
case SIGP_SENSE:
vcpu->stat.instruction_sigp_sense++;
- rc = __sigp_sense(vcpu, cpu_addr,
- &vcpu->run->s.regs.gprs[r1]);
+ rc = __sigp_sense(vcpu, dst_vcpu, status_reg);
break;
case SIGP_EXTERNAL_CALL:
vcpu->stat.instruction_sigp_external_call++;
- rc = __sigp_external_call(vcpu, cpu_addr);
+ rc = __sigp_external_call(vcpu, dst_vcpu);
break;
case SIGP_EMERGENCY_SIGNAL:
vcpu->stat.instruction_sigp_emergency++;
- rc = __sigp_emergency(vcpu, cpu_addr);
+ rc = __sigp_emergency(vcpu, dst_vcpu);
break;
case SIGP_STOP:
vcpu->stat.instruction_sigp_stop++;
- rc = __sigp_stop(vcpu, cpu_addr, ACTION_STOP_ON_STOP);
+ rc = __sigp_stop(vcpu, dst_vcpu);
break;
case SIGP_STOP_AND_STORE_STATUS:
- vcpu->stat.instruction_sigp_stop++;
- rc = __sigp_stop(vcpu, cpu_addr, ACTION_STORE_ON_STOP |
- ACTION_STOP_ON_STOP);
+ vcpu->stat.instruction_sigp_stop_store_status++;
+ rc = __sigp_stop_and_store_status(vcpu, dst_vcpu, status_reg);
break;
case SIGP_STORE_STATUS_AT_ADDRESS:
- rc = __sigp_store_status_at_addr(vcpu, cpu_addr, parameter,
- &vcpu->run->s.regs.gprs[r1]);
- break;
- case SIGP_SET_ARCHITECTURE:
- vcpu->stat.instruction_sigp_arch++;
- rc = __sigp_set_arch(vcpu, parameter);
+ vcpu->stat.instruction_sigp_store_status++;
+ rc = __sigp_store_status_at_addr(vcpu, dst_vcpu, parameter,
+ status_reg);
break;
case SIGP_SET_PREFIX:
vcpu->stat.instruction_sigp_prefix++;
- rc = __sigp_set_prefix(vcpu, cpu_addr, parameter,
- &vcpu->run->s.regs.gprs[r1]);
+ rc = __sigp_set_prefix(vcpu, dst_vcpu, parameter, status_reg);
break;
case SIGP_COND_EMERGENCY_SIGNAL:
- rc = __sigp_conditional_emergency(vcpu, cpu_addr, parameter,
- &vcpu->run->s.regs.gprs[r1]);
+ vcpu->stat.instruction_sigp_cond_emergency++;
+ rc = __sigp_conditional_emergency(vcpu, dst_vcpu, parameter,
+ status_reg);
break;
case SIGP_SENSE_RUNNING:
vcpu->stat.instruction_sigp_sense_running++;
- rc = __sigp_sense_running(vcpu, cpu_addr,
- &vcpu->run->s.regs.gprs[r1]);
+ rc = __sigp_sense_running(vcpu, dst_vcpu, status_reg);
break;
case SIGP_START:
- rc = sigp_check_callable(vcpu, cpu_addr);
- if (rc == SIGP_CC_ORDER_CODE_ACCEPTED)
- rc = -EOPNOTSUPP; /* Handle START in user space */
+ vcpu->stat.instruction_sigp_start++;
+ rc = __prepare_sigp_re_start(vcpu, dst_vcpu, order_code);
break;
case SIGP_RESTART:
vcpu->stat.instruction_sigp_restart++;
- rc = sigp_check_callable(vcpu, cpu_addr);
- if (rc == SIGP_CC_ORDER_CODE_ACCEPTED) {
- VCPU_EVENT(vcpu, 4,
- "sigp restart %x to handle userspace",
- cpu_addr);
- /* user space must know about restart */
- rc = -EOPNOTSUPP;
- }
+ rc = __prepare_sigp_re_start(vcpu, dst_vcpu, order_code);
+ break;
+ case SIGP_INITIAL_CPU_RESET:
+ vcpu->stat.instruction_sigp_init_cpu_reset++;
+ rc = __prepare_sigp_cpu_reset(vcpu, dst_vcpu, order_code);
+ break;
+ case SIGP_CPU_RESET:
+ vcpu->stat.instruction_sigp_cpu_reset++;
+ rc = __prepare_sigp_cpu_reset(vcpu, dst_vcpu, order_code);
+ break;
+ default:
+ vcpu->stat.instruction_sigp_unknown++;
+ rc = __prepare_sigp_unknown(vcpu, dst_vcpu);
+ }
+
+ if (rc == -EOPNOTSUPP)
+ VCPU_EVENT(vcpu, 4,
+ "sigp order %u -> cpu %x: handled in user space",
+ order_code, dst_vcpu->vcpu_id);
+
+ return rc;
+}
+
+int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
+{
+ int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
+ int r3 = vcpu->arch.sie_block->ipa & 0x000f;
+ u32 parameter;
+ u16 cpu_addr = vcpu->run->s.regs.gprs[r3];
+ u8 order_code;
+ int rc;
+
+ /* sigp in userspace can exit */
+ if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
+ return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
+
+ order_code = kvm_s390_get_base_disp_rs(vcpu);
+
+ if (r1 % 2)
+ parameter = vcpu->run->s.regs.gprs[r1];
+ else
+ parameter = vcpu->run->s.regs.gprs[r1 + 1];
+
+ trace_kvm_s390_handle_sigp(vcpu, order_code, cpu_addr, parameter);
+ switch (order_code) {
+ case SIGP_SET_ARCHITECTURE:
+ vcpu->stat.instruction_sigp_arch++;
+ rc = __sigp_set_arch(vcpu, parameter);
break;
default:
- return -EOPNOTSUPP;
+ rc = handle_sigp_dst(vcpu, order_code, cpu_addr,
+ parameter,
+ &vcpu->run->s.regs.gprs[r1]);
}
if (rc < 0)
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 71c7eff2c89..be99357d238 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -844,7 +844,7 @@ int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
down_read(&mm->mmap_sem);
retry:
- ptep = get_locked_pte(current->mm, addr, &ptl);
+ ptep = get_locked_pte(mm, addr, &ptl);
if (unlikely(!ptep)) {
up_read(&mm->mmap_sem);
return -EFAULT;
@@ -888,6 +888,45 @@ retry:
}
EXPORT_SYMBOL(set_guest_storage_key);
+unsigned long get_guest_storage_key(struct mm_struct *mm, unsigned long addr)
+{
+ spinlock_t *ptl;
+ pgste_t pgste;
+ pte_t *ptep;
+ uint64_t physaddr;
+ unsigned long key = 0;
+
+ down_read(&mm->mmap_sem);
+ ptep = get_locked_pte(mm, addr, &ptl);
+ if (unlikely(!ptep)) {
+ up_read(&mm->mmap_sem);
+ return -EFAULT;
+ }
+ pgste = pgste_get_lock(ptep);
+
+ if (pte_val(*ptep) & _PAGE_INVALID) {
+ key |= (pgste_val(pgste) & PGSTE_ACC_BITS) >> 56;
+ key |= (pgste_val(pgste) & PGSTE_FP_BIT) >> 56;
+ key |= (pgste_val(pgste) & PGSTE_GR_BIT) >> 48;
+ key |= (pgste_val(pgste) & PGSTE_GC_BIT) >> 48;
+ } else {
+ physaddr = pte_val(*ptep) & PAGE_MASK;
+ key = page_get_storage_key(physaddr);
+
+ /* Reflect guest's logical view, not physical */
+ if (pgste_val(pgste) & PGSTE_GR_BIT)
+ key |= _PAGE_REFERENCED;
+ if (pgste_val(pgste) & PGSTE_GC_BIT)
+ key |= _PAGE_CHANGED;
+ }
+
+ pgste_set_unlock(ptep, pgste);
+ pte_unmap_unlock(ptep, ptl);
+ up_read(&mm->mmap_sem);
+ return key;
+}
+EXPORT_SYMBOL(get_guest_storage_key);
+
#else /* CONFIG_PGSTE */
static inline int page_table_with_pgste(struct page *page)
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index c6b6ee5f38b..0f09f5285d5 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -223,7 +223,7 @@ config CPU_SHX3
config ARCH_SHMOBILE
bool
select ARCH_SUSPEND_POSSIBLE
- select PM_RUNTIME
+ select PM
config CPU_HAS_PMU
depends on CPU_SH4 || CPU_SH4A
diff --git a/arch/sh/configs/apsh4ad0a_defconfig b/arch/sh/configs/apsh4ad0a_defconfig
index ec70475da89..a8d975793b6 100644
--- a/arch/sh/configs/apsh4ad0a_defconfig
+++ b/arch/sh/configs/apsh4ad0a_defconfig
@@ -47,7 +47,7 @@ CONFIG_PREEMPT=y
CONFIG_BINFMT_MISC=y
CONFIG_PM=y
CONFIG_PM_DEBUG=y
-CONFIG_PM_RUNTIME=y
+CONFIG_PM=y
CONFIG_CPU_IDLE=y
CONFIG_NET=y
CONFIG_PACKET=y
diff --git a/arch/sh/configs/sdk7786_defconfig b/arch/sh/configs/sdk7786_defconfig
index 76a76a295d7..e7e56a4131b 100644
--- a/arch/sh/configs/sdk7786_defconfig
+++ b/arch/sh/configs/sdk7786_defconfig
@@ -82,7 +82,7 @@ CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
CONFIG_BINFMT_MISC=y
CONFIG_PM=y
CONFIG_PM_DEBUG=y
-CONFIG_PM_RUNTIME=y
+CONFIG_PM=y
CONFIG_CPU_IDLE=y
CONFIG_NET=y
CONFIG_PACKET=y
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index be65f035d18..5cbc96d801f 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -460,10 +460,12 @@ static void __init sparc_context_init(int numctx)
void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm,
struct task_struct *tsk)
{
+ unsigned long flags;
+
if (mm->context == NO_CONTEXT) {
- spin_lock(&srmmu_context_spinlock);
+ spin_lock_irqsave(&srmmu_context_spinlock, flags);
alloc_context(old_mm, mm);
- spin_unlock(&srmmu_context_spinlock);
+ spin_unlock_irqrestore(&srmmu_context_spinlock, flags);
srmmu_ctxd_set(&srmmu_context_table[mm->context], mm->pgd);
}
@@ -986,14 +988,15 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
void destroy_context(struct mm_struct *mm)
{
+ unsigned long flags;
if (mm->context != NO_CONTEXT) {
flush_cache_mm(mm);
srmmu_ctxd_set(&srmmu_context_table[mm->context], srmmu_swapper_pg_dir);
flush_tlb_mm(mm);
- spin_lock(&srmmu_context_spinlock);
+ spin_lock_irqsave(&srmmu_context_spinlock, flags);
free_context(mm->context);
- spin_unlock(&srmmu_context_spinlock);
+ spin_unlock_irqrestore(&srmmu_context_spinlock, flags);
mm->context = NO_CONTEXT;
}
}
diff --git a/arch/tile/gxio/mpipe.c b/arch/tile/gxio/mpipe.c
index 320ff5e6e61..6f00e985063 100644
--- a/arch/tile/gxio/mpipe.c
+++ b/arch/tile/gxio/mpipe.c
@@ -463,6 +463,7 @@ int gxio_mpipe_set_timestamp(gxio_mpipe_context_t *context,
(uint64_t)ts->tv_nsec,
(uint64_t)cycles);
}
+EXPORT_SYMBOL_GPL(gxio_mpipe_set_timestamp);
int gxio_mpipe_get_timestamp(gxio_mpipe_context_t *context,
struct timespec *ts)
@@ -485,11 +486,13 @@ int gxio_mpipe_get_timestamp(gxio_mpipe_context_t *context,
}
return ret;
}
+EXPORT_SYMBOL_GPL(gxio_mpipe_get_timestamp);
int gxio_mpipe_adjust_timestamp(gxio_mpipe_context_t *context, int64_t delta)
{
return gxio_mpipe_adjust_timestamp_aux(context, delta);
}
+EXPORT_SYMBOL_GPL(gxio_mpipe_adjust_timestamp);
/* Get our internal context used for link name access. This context is
* special in that it is not associated with an mPIPE service domain.
@@ -542,6 +545,7 @@ int gxio_mpipe_link_instance(const char *link_name)
return gxio_mpipe_info_instance_aux(context, name);
}
+EXPORT_SYMBOL_GPL(gxio_mpipe_link_instance);
int gxio_mpipe_link_enumerate_mac(int idx, char *link_name, uint8_t *link_mac)
{
diff --git a/arch/tile/include/asm/io.h b/arch/tile/include/asm/io.h
index d372641054d..6ef4ecab1df 100644
--- a/arch/tile/include/asm/io.h
+++ b/arch/tile/include/asm/io.h
@@ -396,8 +396,7 @@ extern void ioport_unmap(void __iomem *addr);
static inline long ioport_panic(void)
{
#ifdef __tilegx__
- panic("PCI IO space support is disabled. Configure the kernel with"
- " CONFIG_TILE_PCI_IO to enable it");
+ panic("PCI IO space support is disabled. Configure the kernel with CONFIG_TILE_PCI_IO to enable it");
#else
panic("inb/outb and friends do not exist on tile");
#endif
@@ -406,7 +405,7 @@ static inline long ioport_panic(void)
static inline void __iomem *ioport_map(unsigned long port, unsigned int len)
{
- pr_info("ioport_map: mapping IO resources is unsupported on tile.\n");
+ pr_info("ioport_map: mapping IO resources is unsupported on tile\n");
return NULL;
}
diff --git a/arch/tile/include/asm/pgtable.h b/arch/tile/include/asm/pgtable.h
index 33587f16c15..5d1950788c6 100644
--- a/arch/tile/include/asm/pgtable.h
+++ b/arch/tile/include/asm/pgtable.h
@@ -235,9 +235,9 @@ static inline void __pte_clear(pte_t *ptep)
#define pte_donemigrate(x) hv_pte_set_present(hv_pte_clear_migrating(x))
#define pte_ERROR(e) \
- pr_err("%s:%d: bad pte 0x%016llx.\n", __FILE__, __LINE__, pte_val(e))
+ pr_err("%s:%d: bad pte 0x%016llx\n", __FILE__, __LINE__, pte_val(e))
#define pgd_ERROR(e) \
- pr_err("%s:%d: bad pgd 0x%016llx.\n", __FILE__, __LINE__, pgd_val(e))
+ pr_err("%s:%d: bad pgd 0x%016llx\n", __FILE__, __LINE__, pgd_val(e))
/* Return PA and protection info for a given kernel VA. */
int va_to_cpa_and_pte(void *va, phys_addr_t *cpa, pte_t *pte);
diff --git a/arch/tile/include/asm/pgtable_64.h b/arch/tile/include/asm/pgtable_64.h
index 2c8a9cd102d..e96cec52f6d 100644
--- a/arch/tile/include/asm/pgtable_64.h
+++ b/arch/tile/include/asm/pgtable_64.h
@@ -86,7 +86,7 @@ static inline int pud_huge_page(pud_t pud)
}
#define pmd_ERROR(e) \
- pr_err("%s:%d: bad pmd 0x%016llx.\n", __FILE__, __LINE__, pmd_val(e))
+ pr_err("%s:%d: bad pmd 0x%016llx\n", __FILE__, __LINE__, pmd_val(e))
static inline void pud_clear(pud_t *pudp)
{
diff --git a/arch/tile/include/uapi/asm/ptrace.h b/arch/tile/include/uapi/asm/ptrace.h
index 7757e1985fb..d03b829857e 100644
--- a/arch/tile/include/uapi/asm/ptrace.h
+++ b/arch/tile/include/uapi/asm/ptrace.h
@@ -52,12 +52,16 @@ typedef uint_reg_t pt_reg_t;
* system call or exception. "struct sigcontext" has the same shape.
*/
struct pt_regs {
- /* Saved main processor registers; 56..63 are special. */
- /* tp, sp, and lr must immediately follow regs[] for aliasing. */
- pt_reg_t regs[53];
- pt_reg_t tp; /* aliases regs[TREG_TP] */
- pt_reg_t sp; /* aliases regs[TREG_SP] */
- pt_reg_t lr; /* aliases regs[TREG_LR] */
+ union {
+ /* Saved main processor registers; 56..63 are special. */
+ pt_reg_t regs[56];
+ struct {
+ pt_reg_t __regs[53];
+ pt_reg_t tp; /* aliases regs[TREG_TP] */
+ pt_reg_t sp; /* aliases regs[TREG_SP] */
+ pt_reg_t lr; /* aliases regs[TREG_LR] */
+ };
+ };
/* Saved special registers. */
pt_reg_t pc; /* stored in EX_CONTEXT_K_0 */
diff --git a/arch/tile/include/uapi/asm/sigcontext.h b/arch/tile/include/uapi/asm/sigcontext.h
index 6348e59d372..39ff5d1a232 100644
--- a/arch/tile/include/uapi/asm/sigcontext.h
+++ b/arch/tile/include/uapi/asm/sigcontext.h
@@ -24,10 +24,16 @@
* but is simplified since we know the fault is from userspace.
*/
struct sigcontext {
- __uint_reg_t gregs[53]; /* General-purpose registers. */
- __uint_reg_t tp; /* Aliases gregs[TREG_TP]. */
- __uint_reg_t sp; /* Aliases gregs[TREG_SP]. */
- __uint_reg_t lr; /* Aliases gregs[TREG_LR]. */
+ __extension__ union {
+ /* General-purpose registers. */
+ __uint_reg_t gregs[56];
+ __extension__ struct {
+ __uint_reg_t __gregs[53];
+ __uint_reg_t tp; /* Aliases gregs[TREG_TP]. */
+ __uint_reg_t sp; /* Aliases gregs[TREG_SP]. */
+ __uint_reg_t lr; /* Aliases gregs[TREG_LR]. */
+ };
+ };
__uint_reg_t pc; /* Program counter. */
__uint_reg_t ics; /* In Interrupt Critical Section? */
__uint_reg_t faultnum; /* Fault number. */
diff --git a/arch/tile/kernel/hardwall.c b/arch/tile/kernel/hardwall.c
index aca6000bca7..c4646bb9934 100644
--- a/arch/tile/kernel/hardwall.c
+++ b/arch/tile/kernel/hardwall.c
@@ -365,8 +365,7 @@ void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num)
* to quiesce.
*/
if (rect->teardown_in_progress) {
- pr_notice("cpu %d: detected %s hardwall violation %#lx"
- " while teardown already in progress\n",
+ pr_notice("cpu %d: detected %s hardwall violation %#lx while teardown already in progress\n",
cpu, hwt->name,
(long)mfspr_XDN(hwt, DIRECTION_PROTECT));
goto done;
@@ -630,8 +629,7 @@ static void _hardwall_deactivate(struct hardwall_type *hwt,
struct thread_struct *ts = &task->thread;
if (cpumask_weight(&task->cpus_allowed) != 1) {
- pr_err("pid %d (%s) releasing %s hardwall with"
- " an affinity mask containing %d cpus!\n",
+ pr_err("pid %d (%s) releasing %s hardwall with an affinity mask containing %d cpus!\n",
task->pid, task->comm, hwt->name,
cpumask_weight(&task->cpus_allowed));
BUG();
diff --git a/arch/tile/kernel/irq.c b/arch/tile/kernel/irq.c
index ba85765e143..22044fc691e 100644
--- a/arch/tile/kernel/irq.c
+++ b/arch/tile/kernel/irq.c
@@ -107,9 +107,8 @@ void tile_dev_intr(struct pt_regs *regs, int intnum)
{
long sp = stack_pointer - (long) current_thread_info();
if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) {
- pr_emerg("tile_dev_intr: "
- "stack overflow: %ld\n",
- sp - sizeof(struct thread_info));
+ pr_emerg("%s: stack overflow: %ld\n",
+ __func__, sp - sizeof(struct thread_info));
dump_stack();
}
}
diff --git a/arch/tile/kernel/kgdb.c b/arch/tile/kernel/kgdb.c
index 4cd88381a83..ff5335ae050 100644
--- a/arch/tile/kernel/kgdb.c
+++ b/arch/tile/kernel/kgdb.c
@@ -125,9 +125,7 @@ int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
void
sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task)
{
- int reg;
struct pt_regs *thread_regs;
- unsigned long *ptr = gdb_regs;
if (task == NULL)
return;
@@ -136,9 +134,7 @@ sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task)
memset(gdb_regs, 0, NUMREGBYTES);
thread_regs = task_pt_regs(task);
- for (reg = 0; reg <= TREG_LAST_GPR; reg++)
- *(ptr++) = thread_regs->regs[reg];
-
+ memcpy(gdb_regs, thread_regs, TREG_LAST_GPR * sizeof(unsigned long));
gdb_regs[TILEGX_PC_REGNUM] = thread_regs->pc;
gdb_regs[TILEGX_FAULTNUM_REGNUM] = thread_regs->faultnum;
}
diff --git a/arch/tile/kernel/kprobes.c b/arch/tile/kernel/kprobes.c
index 27cdcacbe81..f8a45c51e9e 100644
--- a/arch/tile/kernel/kprobes.c
+++ b/arch/tile/kernel/kprobes.c
@@ -90,8 +90,7 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
return -EINVAL;
if (insn_has_control(*p->addr)) {
- pr_notice("Kprobes for control instructions are not "
- "supported\n");
+ pr_notice("Kprobes for control instructions are not supported\n");
return -EINVAL;
}
diff --git a/arch/tile/kernel/machine_kexec.c b/arch/tile/kernel/machine_kexec.c
index f0b54a93471..008aa2faef5 100644
--- a/arch/tile/kernel/machine_kexec.c
+++ b/arch/tile/kernel/machine_kexec.c
@@ -77,16 +77,13 @@ void machine_crash_shutdown(struct pt_regs *regs)
int machine_kexec_prepare(struct kimage *image)
{
if (num_online_cpus() > 1) {
- pr_warning("%s: detected attempt to kexec "
- "with num_online_cpus() > 1\n",
- __func__);
+ pr_warn("%s: detected attempt to kexec with num_online_cpus() > 1\n",
+ __func__);
return -ENOSYS;
}
if (image->type != KEXEC_TYPE_DEFAULT) {
- pr_warning("%s: detected attempt to kexec "
- "with unsupported type: %d\n",
- __func__,
- image->type);
+ pr_warn("%s: detected attempt to kexec with unsupported type: %d\n",
+ __func__, image->type);
return -ENOSYS;
}
return 0;
@@ -131,8 +128,8 @@ static unsigned char *kexec_bn2cl(void *pg)
*/
csum = ip_compute_csum(pg, bhdrp->b_size);
if (csum != 0) {
- pr_warning("%s: bad checksum %#x (size %d)\n",
- __func__, csum, bhdrp->b_size);
+ pr_warn("%s: bad checksum %#x (size %d)\n",
+ __func__, csum, bhdrp->b_size);
return 0;
}
@@ -160,8 +157,7 @@ static unsigned char *kexec_bn2cl(void *pg)
while (*desc != '\0') {
desc++;
if (((unsigned long)desc & PAGE_MASK) != (unsigned long)pg) {
- pr_info("%s: ran off end of page\n",
- __func__);
+ pr_info("%s: ran off end of page\n", __func__);
return 0;
}
}
@@ -195,20 +191,18 @@ static void kexec_find_and_set_command_line(struct kimage *image)
}
if (command_line != 0) {
- pr_info("setting new command line to \"%s\"\n",
- command_line);
+ pr_info("setting new command line to \"%s\"\n", command_line);
hverr = hv_set_command_line(
(HV_VirtAddr) command_line, strlen(command_line));
kunmap_atomic(command_line);
} else {
- pr_info("%s: no command line found; making empty\n",
- __func__);
+ pr_info("%s: no command line found; making empty\n", __func__);
hverr = hv_set_command_line((HV_VirtAddr) command_line, 0);
}
if (hverr)
- pr_warning("%s: hv_set_command_line returned error: %d\n",
- __func__, hverr);
+ pr_warn("%s: hv_set_command_line returned error: %d\n",
+ __func__, hverr);
}
/*
diff --git a/arch/tile/kernel/messaging.c b/arch/tile/kernel/messaging.c
index ac950be1318..7475af3aace 100644
--- a/arch/tile/kernel/messaging.c
+++ b/arch/tile/kernel/messaging.c
@@ -59,9 +59,8 @@ void hv_message_intr(struct pt_regs *regs, int intnum)
{
long sp = stack_pointer - (long) current_thread_info();
if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) {
- pr_emerg("hv_message_intr: "
- "stack overflow: %ld\n",
- sp - sizeof(struct thread_info));
+ pr_emerg("%s: stack overflow: %ld\n",
+ __func__, sp - sizeof(struct thread_info));
dump_stack();
}
}
diff --git a/arch/tile/kernel/module.c b/arch/tile/kernel/module.c
index d19b13e3a59..96447c9160a 100644
--- a/arch/tile/kernel/module.c
+++ b/arch/tile/kernel/module.c
@@ -96,8 +96,8 @@ void module_free(struct module *mod, void *module_region)
static int validate_hw2_last(long value, struct module *me)
{
if (((value << 16) >> 16) != value) {
- pr_warning("module %s: Out of range HW2_LAST value %#lx\n",
- me->name, value);
+ pr_warn("module %s: Out of range HW2_LAST value %#lx\n",
+ me->name, value);
return 0;
}
return 1;
@@ -210,10 +210,10 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
value -= (unsigned long) location; /* pc-relative */
value = (long) value >> 3; /* count by instrs */
if (!validate_jumpoff(value)) {
- pr_warning("module %s: Out of range jump to"
- " %#llx at %#llx (%p)\n", me->name,
- sym->st_value + rel[i].r_addend,
- rel[i].r_offset, location);
+ pr_warn("module %s: Out of range jump to %#llx at %#llx (%p)\n",
+ me->name,
+ sym->st_value + rel[i].r_addend,
+ rel[i].r_offset, location);
return -ENOEXEC;
}
MUNGE(create_JumpOff_X1);
diff --git a/arch/tile/kernel/pci.c b/arch/tile/kernel/pci.c
index 1f80a88c75a..f70c7892fa2 100644
--- a/arch/tile/kernel/pci.c
+++ b/arch/tile/kernel/pci.c
@@ -178,8 +178,8 @@ int __init tile_pci_init(void)
continue;
hv_cfg_fd1 = tile_pcie_open(i, 1);
if (hv_cfg_fd1 < 0) {
- pr_err("PCI: Couldn't open config fd to HV "
- "for controller %d\n", i);
+ pr_err("PCI: Couldn't open config fd to HV for controller %d\n",
+ i);
goto err_cont;
}
@@ -423,8 +423,7 @@ int pcibios_enable_device(struct pci_dev *dev, int mask)
for (i = 0; i < 6; i++) {
r = &dev->resource[i];
if (r->flags & IORESOURCE_UNSET) {
- pr_err("PCI: Device %s not available "
- "because of resource collisions\n",
+ pr_err("PCI: Device %s not available because of resource collisions\n",
pci_name(dev));
return -EINVAL;
}
diff --git a/arch/tile/kernel/pci_gx.c b/arch/tile/kernel/pci_gx.c
index e717af20dad..2c95f37ebbe 100644
--- a/arch/tile/kernel/pci_gx.c
+++ b/arch/tile/kernel/pci_gx.c
@@ -131,8 +131,7 @@ static int tile_irq_cpu(int irq)
count = cpumask_weight(&intr_cpus_map);
if (unlikely(count == 0)) {
- pr_warning("intr_cpus_map empty, interrupts will be"
- " delievered to dataplane tiles\n");
+ pr_warn("intr_cpus_map empty, interrupts will be delievered to dataplane tiles\n");
return irq % (smp_height * smp_width);
}
@@ -197,16 +196,16 @@ static int tile_pcie_open(int trio_index)
/* Get the properties of the PCIe ports on this TRIO instance. */
ret = gxio_trio_get_port_property(context, &pcie_ports[trio_index]);
if (ret < 0) {
- pr_err("PCI: PCIE_GET_PORT_PROPERTY failure, error %d,"
- " on TRIO %d\n", ret, trio_index);
+ pr_err("PCI: PCIE_GET_PORT_PROPERTY failure, error %d, on TRIO %d\n",
+ ret, trio_index);
goto get_port_property_failure;
}
context->mmio_base_mac =
iorpc_ioremap(context->fd, 0, HV_TRIO_CONFIG_IOREMAP_SIZE);
if (context->mmio_base_mac == NULL) {
- pr_err("PCI: TRIO config space mapping failure, error %d,"
- " on TRIO %d\n", ret, trio_index);
+ pr_err("PCI: TRIO config space mapping failure, error %d, on TRIO %d\n",
+ ret, trio_index);
ret = -ENOMEM;
goto trio_mmio_mapping_failure;
@@ -622,9 +621,8 @@ static void fixup_read_and_payload_sizes(struct pci_controller *controller)
dev_control.max_read_req_sz,
mac);
if (err < 0) {
- pr_err("PCI: PCIE_CONFIGURE_MAC_MPS_MRS failure, "
- "MAC %d on TRIO %d\n",
- mac, controller->trio_index);
+ pr_err("PCI: PCIE_CONFIGURE_MAC_MPS_MRS failure, MAC %d on TRIO %d\n",
+ mac, controller->trio_index);
}
}
@@ -720,27 +718,24 @@ int __init pcibios_init(void)
reg_offset);
if (!port_status.dl_up) {
if (rc_delay[trio_index][mac]) {
- pr_info("Delaying PCIe RC TRIO init %d sec"
- " on MAC %d on TRIO %d\n",
+ pr_info("Delaying PCIe RC TRIO init %d sec on MAC %d on TRIO %d\n",
rc_delay[trio_index][mac], mac,
trio_index);
msleep(rc_delay[trio_index][mac] * 1000);
}
ret = gxio_trio_force_rc_link_up(trio_context, mac);
if (ret < 0)
- pr_err("PCI: PCIE_FORCE_LINK_UP failure, "
- "MAC %d on TRIO %d\n", mac, trio_index);
+ pr_err("PCI: PCIE_FORCE_LINK_UP failure, MAC %d on TRIO %d\n",
+ mac, trio_index);
}
- pr_info("PCI: Found PCI controller #%d on TRIO %d MAC %d\n", i,
- trio_index, controller->mac);
+ pr_info("PCI: Found PCI controller #%d on TRIO %d MAC %d\n",
+ i, trio_index, controller->mac);
/* Delay the bus probe if needed. */
if (rc_delay[trio_index][mac]) {
- pr_info("Delaying PCIe RC bus enumerating %d sec"
- " on MAC %d on TRIO %d\n",
- rc_delay[trio_index][mac], mac,
- trio_index);
+ pr_info("Delaying PCIe RC bus enumerating %d sec on MAC %d on TRIO %d\n",
+ rc_delay[trio_index][mac], mac, trio_index);
msleep(rc_delay[trio_index][mac] * 1000);
} else {
/*
@@ -758,11 +753,10 @@ int __init pcibios_init(void)
if (pcie_ports[trio_index].ports[mac].removable) {
pr_info("PCI: link is down, MAC %d on TRIO %d\n",
mac, trio_index);
- pr_info("This is expected if no PCIe card"
- " is connected to this link\n");
+ pr_info("This is expected if no PCIe card is connected to this link\n");
} else
pr_err("PCI: link is down, MAC %d on TRIO %d\n",
- mac, trio_index);
+ mac, trio_index);
continue;
}
@@ -829,8 +823,8 @@ int __init pcibios_init(void)
/* Alloc a PIO region for PCI config access per MAC. */
ret = gxio_trio_alloc_pio_regions(trio_context, 1, 0, 0);
if (ret < 0) {
- pr_err("PCI: PCI CFG PIO alloc failure for mac %d "
- "on TRIO %d, give up\n", mac, trio_index);
+ pr_err("PCI: PCI CFG PIO alloc failure for mac %d on TRIO %d, give up\n",
+ mac, trio_index);
continue;
}
@@ -842,8 +836,8 @@ int __init pcibios_init(void)
trio_context->pio_cfg_index[mac],
mac, 0, HV_TRIO_PIO_FLAG_CONFIG_SPACE);
if (ret < 0) {
- pr_err("PCI: PCI CFG PIO init failure for mac %d "
- "on TRIO %d, give up\n", mac, trio_index);
+ pr_err("PCI: PCI CFG PIO init failure for mac %d on TRIO %d, give up\n",
+ mac, trio_index);
continue;
}
@@ -865,7 +859,7 @@ int __init pcibios_init(void)
(TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR__MAC_SHIFT - 1)));
if (trio_context->mmio_base_pio_cfg[mac] == NULL) {
pr_err("PCI: PIO map failure for mac %d on TRIO %d\n",
- mac, trio_index);
+ mac, trio_index);
continue;
}
@@ -925,9 +919,8 @@ int __init pcibios_init(void)
/* Alloc a PIO region for PCI memory access for each RC port. */
ret = gxio_trio_alloc_pio_regions(trio_context, 1, 0, 0);
if (ret < 0) {
- pr_err("PCI: MEM PIO alloc failure on TRIO %d mac %d, "
- "give up\n", controller->trio_index,
- controller->mac);
+ pr_err("PCI: MEM PIO alloc failure on TRIO %d mac %d, give up\n",
+ controller->trio_index, controller->mac);
continue;
}
@@ -944,9 +937,8 @@ int __init pcibios_init(void)
0,
0);
if (ret < 0) {
- pr_err("PCI: MEM PIO init failure on TRIO %d mac %d, "
- "give up\n", controller->trio_index,
- controller->mac);
+ pr_err("PCI: MEM PIO init failure on TRIO %d mac %d, give up\n",
+ controller->trio_index, controller->mac);
continue;
}
@@ -957,9 +949,8 @@ int __init pcibios_init(void)
*/
ret = gxio_trio_alloc_pio_regions(trio_context, 1, 0, 0);
if (ret < 0) {
- pr_err("PCI: I/O PIO alloc failure on TRIO %d mac %d, "
- "give up\n", controller->trio_index,
- controller->mac);
+ pr_err("PCI: I/O PIO alloc failure on TRIO %d mac %d, give up\n",
+ controller->trio_index, controller->mac);
continue;
}
@@ -976,9 +967,8 @@ int __init pcibios_init(void)
0,
HV_TRIO_PIO_FLAG_IO_SPACE);
if (ret < 0) {
- pr_err("PCI: I/O PIO init failure on TRIO %d mac %d, "
- "give up\n", controller->trio_index,
- controller->mac);
+ pr_err("PCI: I/O PIO init failure on TRIO %d mac %d, give up\n",
+ controller->trio_index, controller->mac);
continue;
}
@@ -997,10 +987,9 @@ int __init pcibios_init(void)
ret = gxio_trio_alloc_memory_maps(trio_context, 1, 0,
0);
if (ret < 0) {
- pr_err("PCI: Mem-Map alloc failure on TRIO %d "
- "mac %d for MC %d, give up\n",
- controller->trio_index,
- controller->mac, j);
+ pr_err("PCI: Mem-Map alloc failure on TRIO %d mac %d for MC %d, give up\n",
+ controller->trio_index, controller->mac,
+ j);
goto alloc_mem_map_failed;
}
@@ -1030,10 +1019,9 @@ int __init pcibios_init(void)
j,
GXIO_TRIO_ORDER_MODE_UNORDERED);
if (ret < 0) {
- pr_err("PCI: Mem-Map init failure on TRIO %d "
- "mac %d for MC %d, give up\n",
- controller->trio_index,
- controller->mac, j);
+ pr_err("PCI: Mem-Map init failure on TRIO %d mac %d for MC %d, give up\n",
+ controller->trio_index, controller->mac,
+ j);
goto alloc_mem_map_failed;
}
@@ -1510,9 +1498,7 @@ int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
* Most PCIe endpoint devices do support 64-bit message addressing.
*/
if (desc->msi_attrib.is_64 == 0) {
- dev_printk(KERN_INFO, &pdev->dev,
- "64-bit MSI message address not supported, "
- "falling back to legacy interrupts.\n");
+ dev_info(&pdev->dev, "64-bit MSI message address not supported, falling back to legacy interrupts\n");
ret = -ENOMEM;
goto is_64_failure;
@@ -1549,11 +1535,8 @@ int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
/* SQ regions are out, allocate from map mem regions. */
mem_map = gxio_trio_alloc_memory_maps(trio_context, 1, 0, 0);
if (mem_map < 0) {
- dev_printk(KERN_INFO, &pdev->dev,
- "%s Mem-Map alloc failure. "
- "Failed to initialize MSI interrupts. "
- "Falling back to legacy interrupts.\n",
- desc->msi_attrib.is_msix ? "MSI-X" : "MSI");
+ dev_info(&pdev->dev, "%s Mem-Map alloc failure - failed to initialize MSI interrupts - falling back to legacy interrupts\n",
+ desc->msi_attrib.is_msix ? "MSI-X" : "MSI");
ret = -ENOMEM;
goto msi_mem_map_alloc_failure;
}
@@ -1580,7 +1563,7 @@ int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
mem_map, mem_map_base, mem_map_limit,
trio_context->asid);
if (ret < 0) {
- dev_printk(KERN_INFO, &pdev->dev, "HV MSI config failed.\n");
+ dev_info(&pdev->dev, "HV MSI config failed\n");
goto hv_msi_config_failure;
}
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c
index 0050cbc1d9d..48e5773dd0b 100644
--- a/arch/tile/kernel/process.c
+++ b/arch/tile/kernel/process.c
@@ -52,7 +52,7 @@ static int __init idle_setup(char *str)
return -EINVAL;
if (!strcmp(str, "poll")) {
- pr_info("using polling idle threads.\n");
+ pr_info("using polling idle threads\n");
cpu_idle_poll_ctrl(true);
return 0;
} else if (!strcmp(str, "halt")) {
@@ -547,27 +547,25 @@ void show_regs(struct pt_regs *regs)
struct task_struct *tsk = validate_current();
int i;
- pr_err("\n");
if (tsk != &corrupt_current)
show_regs_print_info(KERN_ERR);
#ifdef __tilegx__
for (i = 0; i < 17; i++)
- pr_err(" r%-2d: "REGFMT" r%-2d: "REGFMT" r%-2d: "REGFMT"\n",
+ pr_err(" r%-2d: " REGFMT " r%-2d: " REGFMT " r%-2d: " REGFMT "\n",
i, regs->regs[i], i+18, regs->regs[i+18],
i+36, regs->regs[i+36]);
- pr_err(" r17: "REGFMT" r35: "REGFMT" tp : "REGFMT"\n",
+ pr_err(" r17: " REGFMT " r35: " REGFMT " tp : " REGFMT "\n",
regs->regs[17], regs->regs[35], regs->tp);
- pr_err(" sp : "REGFMT" lr : "REGFMT"\n", regs->sp, regs->lr);
+ pr_err(" sp : " REGFMT " lr : " REGFMT "\n", regs->sp, regs->lr);
#else
for (i = 0; i < 13; i++)
- pr_err(" r%-2d: "REGFMT" r%-2d: "REGFMT
- " r%-2d: "REGFMT" r%-2d: "REGFMT"\n",
+ pr_err(" r%-2d: " REGFMT " r%-2d: " REGFMT " r%-2d: " REGFMT " r%-2d: " REGFMT "\n",
i, regs->regs[i], i+14, regs->regs[i+14],
i+27, regs->regs[i+27], i+40, regs->regs[i+40]);
- pr_err(" r13: "REGFMT" tp : "REGFMT" sp : "REGFMT" lr : "REGFMT"\n",
+ pr_err(" r13: " REGFMT " tp : " REGFMT " sp : " REGFMT " lr : " REGFMT "\n",
regs->regs[13], regs->tp, regs->sp, regs->lr);
#endif
- pr_err(" pc : "REGFMT" ex1: %ld faultnum: %ld\n",
+ pr_err(" pc : " REGFMT " ex1: %ld faultnum: %ld\n",
regs->pc, regs->ex1, regs->faultnum);
dump_stack_regs(regs);
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
index 7f079bbfdf4..864eea69556 100644
--- a/arch/tile/kernel/setup.c
+++ b/arch/tile/kernel/setup.c
@@ -130,7 +130,7 @@ static int __init setup_maxmem(char *str)
maxmem_pfn = (maxmem >> HPAGE_SHIFT) << (HPAGE_SHIFT - PAGE_SHIFT);
pr_info("Forcing RAM used to no more than %dMB\n",
- maxmem_pfn >> (20 - PAGE_SHIFT));
+ maxmem_pfn >> (20 - PAGE_SHIFT));
return 0;
}
early_param("maxmem", setup_maxmem);
@@ -149,7 +149,7 @@ static int __init setup_maxnodemem(char *str)
maxnodemem_pfn[node] = (maxnodemem >> HPAGE_SHIFT) <<
(HPAGE_SHIFT - PAGE_SHIFT);
pr_info("Forcing RAM used on node %ld to no more than %dMB\n",
- node, maxnodemem_pfn[node] >> (20 - PAGE_SHIFT));
+ node, maxnodemem_pfn[node] >> (20 - PAGE_SHIFT));
return 0;
}
early_param("maxnodemem", setup_maxnodemem);
@@ -417,8 +417,7 @@ static void __init setup_memory(void)
range.start = (start_pa + HPAGE_SIZE - 1) & HPAGE_MASK;
range.size -= (range.start - start_pa);
range.size &= HPAGE_MASK;
- pr_err("Range not hugepage-aligned: %#llx..%#llx:"
- " now %#llx-%#llx\n",
+ pr_err("Range not hugepage-aligned: %#llx..%#llx: now %#llx-%#llx\n",
start_pa, start_pa + orig_size,
range.start, range.start + range.size);
}
@@ -437,8 +436,8 @@ static void __init setup_memory(void)
if (PFN_DOWN(range.size) > maxnodemem_pfn[i]) {
int max_size = maxnodemem_pfn[i];
if (max_size > 0) {
- pr_err("Maxnodemem reduced node %d to"
- " %d pages\n", i, max_size);
+ pr_err("Maxnodemem reduced node %d to %d pages\n",
+ i, max_size);
range.size = PFN_PHYS(max_size);
} else {
pr_err("Maxnodemem disabled node %d\n", i);
@@ -490,8 +489,8 @@ static void __init setup_memory(void)
NR_CPUS * (PFN_UP(per_cpu_size) >> PAGE_SHIFT);
if (end < pci_reserve_end_pfn + percpu_pages) {
end = pci_reserve_start_pfn;
- pr_err("PCI mapping region reduced node %d to"
- " %ld pages\n", i, end - start);
+ pr_err("PCI mapping region reduced node %d to %ld pages\n",
+ i, end - start);
}
}
#endif
@@ -555,10 +554,9 @@ static void __init setup_memory(void)
MAXMEM_PFN : mappable_physpages;
highmem_pages = (long) (physpages - lowmem_pages);
- pr_notice("%ldMB HIGHMEM available.\n",
- pages_to_mb(highmem_pages > 0 ? highmem_pages : 0));
- pr_notice("%ldMB LOWMEM available.\n",
- pages_to_mb(lowmem_pages));
+ pr_notice("%ldMB HIGHMEM available\n",
+ pages_to_mb(highmem_pages > 0 ? highmem_pages : 0));
+ pr_notice("%ldMB LOWMEM available\n", pages_to_mb(lowmem_pages));
#else
/* Set max_low_pfn based on what node 0 can directly address. */
max_low_pfn = node_end_pfn[0];
@@ -571,8 +569,8 @@ static void __init setup_memory(void)
max_pfn = MAXMEM_PFN;
node_end_pfn[0] = MAXMEM_PFN;
} else {
- pr_notice("%ldMB memory available.\n",
- pages_to_mb(node_end_pfn[0]));
+ pr_notice("%ldMB memory available\n",
+ pages_to_mb(node_end_pfn[0]));
}
for (i = 1; i < MAX_NUMNODES; ++i) {
node_start_pfn[i] = 0;
@@ -587,8 +585,7 @@ static void __init setup_memory(void)
if (pages)
high_memory = pfn_to_kaddr(node_end_pfn[i]);
}
- pr_notice("%ldMB memory available.\n",
- pages_to_mb(lowmem_pages));
+ pr_notice("%ldMB memory available\n", pages_to_mb(lowmem_pages));
#endif
#endif
}
@@ -1535,8 +1532,7 @@ static void __init pcpu_fc_populate_pte(unsigned long addr)
BUG_ON(pgd_addr_invalid(addr));
if (addr < VMALLOC_START || addr >= VMALLOC_END)
- panic("PCPU addr %#lx outside vmalloc range %#lx..%#lx;"
- " try increasing CONFIG_VMALLOC_RESERVE\n",
+ panic("PCPU addr %#lx outside vmalloc range %#lx..%#lx; try increasing CONFIG_VMALLOC_RESERVE\n",
addr, VMALLOC_START, VMALLOC_END);
pgd = swapper_pg_dir + pgd_index(addr);
@@ -1591,8 +1587,8 @@ void __init setup_per_cpu_areas(void)
lowmem_va = (unsigned long)pfn_to_kaddr(pfn);
ptep = virt_to_kpte(lowmem_va);
if (pte_huge(*ptep)) {
- printk(KERN_DEBUG "early shatter of huge page"
- " at %#lx\n", lowmem_va);
+ printk(KERN_DEBUG "early shatter of huge page at %#lx\n",
+ lowmem_va);
shatter_pmd((pmd_t *)ptep);
ptep = virt_to_kpte(lowmem_va);
BUG_ON(pte_huge(*ptep));
diff --git a/arch/tile/kernel/signal.c b/arch/tile/kernel/signal.c
index 7c2fecc5217..bb0a9ce7ae2 100644
--- a/arch/tile/kernel/signal.c
+++ b/arch/tile/kernel/signal.c
@@ -45,8 +45,7 @@
int restore_sigcontext(struct pt_regs *regs,
struct sigcontext __user *sc)
{
- int err = 0;
- int i;
+ int err;
/* Always make any pending restarted system calls return -EINTR */
current_thread_info()->restart_block.fn = do_no_restart_syscall;
@@ -57,9 +56,7 @@ int restore_sigcontext(struct pt_regs *regs,
*/
BUILD_BUG_ON(sizeof(struct sigcontext) != sizeof(struct pt_regs));
BUILD_BUG_ON(sizeof(struct sigcontext) % 8 != 0);
-
- for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i)
- err |= __get_user(regs->regs[i], &sc->gregs[i]);
+ err = __copy_from_user(regs, sc, sizeof(*regs));
/* Ensure that the PL is always set to USER_PL. */
regs->ex1 = PL_ICS_EX1(USER_PL, EX1_ICS(regs->ex1));
@@ -110,12 +107,7 @@ badframe:
int setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs)
{
- int i, err = 0;
-
- for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i)
- err |= __put_user(regs->regs[i], &sc->gregs[i]);
-
- return err;
+ return __copy_to_user(sc, regs, sizeof(*regs));
}
/*
@@ -345,7 +337,6 @@ static void dump_mem(void __user *address)
int i, j, k;
int found_readable_mem = 0;
- pr_err("\n");
if (!access_ok(VERIFY_READ, address, 1)) {
pr_err("Not dumping at address 0x%lx (kernel address)\n",
(unsigned long)address);
@@ -367,7 +358,7 @@ static void dump_mem(void __user *address)
(unsigned long)address);
found_readable_mem = 1;
}
- j = sprintf(line, REGFMT":", (unsigned long)addr);
+ j = sprintf(line, REGFMT ":", (unsigned long)addr);
for (k = 0; k < bytes_per_line; ++k)
j += sprintf(&line[j], " %02x", buf[k]);
pr_err("%s\n", line);
@@ -411,8 +402,7 @@ void trace_unhandled_signal(const char *type, struct pt_regs *regs,
case SIGFPE:
case SIGSEGV:
case SIGBUS:
- pr_err("User crash: signal %d,"
- " trap %ld, address 0x%lx\n",
+ pr_err("User crash: signal %d, trap %ld, address 0x%lx\n",
sig, regs->faultnum, address);
show_regs(regs);
dump_mem((void __user *)address);
diff --git a/arch/tile/kernel/single_step.c b/arch/tile/kernel/single_step.c
index 6cb2ce31b5a..862973074bf 100644
--- a/arch/tile/kernel/single_step.c
+++ b/arch/tile/kernel/single_step.c
@@ -222,11 +222,9 @@ static tilepro_bundle_bits rewrite_load_store_unaligned(
}
if (unaligned_printk || unaligned_fixup_count == 0) {
- pr_info("Process %d/%s: PC %#lx: Fixup of"
- " unaligned %s at %#lx.\n",
+ pr_info("Process %d/%s: PC %#lx: Fixup of unaligned %s at %#lx\n",
current->pid, current->comm, regs->pc,
- (mem_op == MEMOP_LOAD ||
- mem_op == MEMOP_LOAD_POSTINCR) ?
+ mem_op == MEMOP_LOAD || mem_op == MEMOP_LOAD_POSTINCR ?
"load" : "store",
(unsigned long)addr);
if (!unaligned_printk) {
diff --git a/arch/tile/kernel/smpboot.c b/arch/tile/kernel/smpboot.c
index 0d59a1b60c7..20d52a98e17 100644
--- a/arch/tile/kernel/smpboot.c
+++ b/arch/tile/kernel/smpboot.c
@@ -127,8 +127,7 @@ static __init int reset_init_affinity(void)
{
long rc = sched_setaffinity(current->pid, &init_affinity);
if (rc != 0)
- pr_warning("couldn't reset init affinity (%ld)\n",
- rc);
+ pr_warn("couldn't reset init affinity (%ld)\n", rc);
return 0;
}
late_initcall(reset_init_affinity);
@@ -174,7 +173,7 @@ static void start_secondary(void)
/* Indicate that we're ready to come up. */
/* Must not do this before we're ready to receive messages */
if (cpumask_test_and_set_cpu(cpuid, &cpu_started)) {
- pr_warning("CPU#%d already started!\n", cpuid);
+ pr_warn("CPU#%d already started!\n", cpuid);
for (;;)
local_irq_enable();
}
diff --git a/arch/tile/kernel/stack.c b/arch/tile/kernel/stack.c
index c93977a6211..7ff5afdbd3a 100644
--- a/arch/tile/kernel/stack.c
+++ b/arch/tile/kernel/stack.c
@@ -387,9 +387,7 @@ void tile_show_stack(struct KBacktraceIterator *kbt, int headers)
* then bust_spinlocks() spit out a space in front of us
* and it will mess up our KERN_ERR.
*/
- pr_err("\n");
- pr_err("Starting stack dump of tid %d, pid %d (%s)"
- " on cpu %d at cycle %lld\n",
+ pr_err("Starting stack dump of tid %d, pid %d (%s) on cpu %d at cycle %lld\n",
kbt->task->pid, kbt->task->tgid, kbt->task->comm,
raw_smp_processor_id(), get_cycles());
}
@@ -411,8 +409,7 @@ void tile_show_stack(struct KBacktraceIterator *kbt, int headers)
i++, address, namebuf, (unsigned long)(kbt->it.sp));
if (i >= 100) {
- pr_err("Stack dump truncated"
- " (%d frames)\n", i);
+ pr_err("Stack dump truncated (%d frames)\n", i);
break;
}
}
diff --git a/arch/tile/kernel/time.c b/arch/tile/kernel/time.c
index b854a1cd007..d412b0856c0 100644
--- a/arch/tile/kernel/time.c
+++ b/arch/tile/kernel/time.c
@@ -98,8 +98,8 @@ void __init calibrate_delay(void)
{
loops_per_jiffy = get_clock_rate() / HZ;
pr_info("Clock rate yields %lu.%02lu BogoMIPS (lpj=%lu)\n",
- loops_per_jiffy/(500000/HZ),
- (loops_per_jiffy/(5000/HZ)) % 100, loops_per_jiffy);
+ loops_per_jiffy / (500000 / HZ),
+ (loops_per_jiffy / (5000 / HZ)) % 100, loops_per_jiffy);
}
/* Called fairly late in init/main.c, but before we go smp. */
diff --git a/arch/tile/kernel/traps.c b/arch/tile/kernel/traps.c
index 86900ccd497..bf841ca517b 100644
--- a/arch/tile/kernel/traps.c
+++ b/arch/tile/kernel/traps.c
@@ -46,9 +46,9 @@ static int __init setup_unaligned_fixup(char *str)
return 0;
pr_info("Fixups for unaligned data accesses are %s\n",
- unaligned_fixup >= 0 ?
- (unaligned_fixup ? "enabled" : "disabled") :
- "completely disabled");
+ unaligned_fixup >= 0 ?
+ (unaligned_fixup ? "enabled" : "disabled") :
+ "completely disabled");
return 1;
}
__setup("unaligned_fixup=", setup_unaligned_fixup);
@@ -305,8 +305,8 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num,
case INT_ILL:
if (copy_from_user(&instr, (void __user *)regs->pc,
sizeof(instr))) {
- pr_err("Unreadable instruction for INT_ILL:"
- " %#lx\n", regs->pc);
+ pr_err("Unreadable instruction for INT_ILL: %#lx\n",
+ regs->pc);
do_exit(SIGKILL);
return;
}
diff --git a/arch/tile/kernel/unaligned.c b/arch/tile/kernel/unaligned.c
index c02ea2a45f6..7d9a83be0ac 100644
--- a/arch/tile/kernel/unaligned.c
+++ b/arch/tile/kernel/unaligned.c
@@ -969,8 +969,7 @@ void jit_bundle_gen(struct pt_regs *regs, tilegx_bundle_bits bundle,
unaligned_fixup_count++;
if (unaligned_printk) {
- pr_info("%s/%d. Unalign fixup for kernel access "
- "to userspace %lx.",
+ pr_info("%s/%d - Unalign fixup for kernel access to userspace %lx\n",
current->comm, current->pid, regs->regs[ra]);
}
@@ -985,7 +984,7 @@ void jit_bundle_gen(struct pt_regs *regs, tilegx_bundle_bits bundle,
.si_addr = (unsigned char __user *)0
};
if (unaligned_printk)
- pr_info("Unalign bundle: unexp @%llx, %llx",
+ pr_info("Unalign bundle: unexp @%llx, %llx\n",
(unsigned long long)regs->pc,
(unsigned long long)bundle);
@@ -1370,8 +1369,7 @@ void jit_bundle_gen(struct pt_regs *regs, tilegx_bundle_bits bundle,
frag.bundle = bundle;
if (unaligned_printk) {
- pr_info("%s/%d, Unalign fixup: pc=%lx "
- "bundle=%lx %d %d %d %d %d %d %d %d.",
+ pr_info("%s/%d, Unalign fixup: pc=%lx bundle=%lx %d %d %d %d %d %d %d %d\n",
current->comm, current->pid,
(unsigned long)frag.pc,
(unsigned long)frag.bundle,
@@ -1380,8 +1378,8 @@ void jit_bundle_gen(struct pt_regs *regs, tilegx_bundle_bits bundle,
(int)y1_lr, (int)y1_br, (int)x1_add);
for (k = 0; k < n; k += 2)
- pr_info("[%d] %016llx %016llx", k,
- (unsigned long long)frag.insn[k],
+ pr_info("[%d] %016llx %016llx\n",
+ k, (unsigned long long)frag.insn[k],
(unsigned long long)frag.insn[k+1]);
}
@@ -1402,7 +1400,7 @@ void jit_bundle_gen(struct pt_regs *regs, tilegx_bundle_bits bundle,
.si_addr = (void __user *)&jit_code_area[idx]
};
- pr_warn("Unalign fixup: pid=%d %s jit_code_area=%llx",
+ pr_warn("Unalign fixup: pid=%d %s jit_code_area=%llx\n",
current->pid, current->comm,
(unsigned long long)&jit_code_area[idx]);
@@ -1485,7 +1483,7 @@ void do_unaligned(struct pt_regs *regs, int vecnum)
/* If exception came from kernel, try fix it up. */
if (fixup_exception(regs)) {
if (unaligned_printk)
- pr_info("Unalign fixup: %d %llx @%llx",
+ pr_info("Unalign fixup: %d %llx @%llx\n",
(int)unaligned_fixup,
(unsigned long long)regs->ex1,
(unsigned long long)regs->pc);
@@ -1519,7 +1517,7 @@ void do_unaligned(struct pt_regs *regs, int vecnum)
};
if (unaligned_printk)
- pr_info("Unalign fixup: %d %llx @%llx",
+ pr_info("Unalign fixup: %d %llx @%llx\n",
(int)unaligned_fixup,
(unsigned long long)regs->ex1,
(unsigned long long)regs->pc);
@@ -1579,14 +1577,14 @@ void do_unaligned(struct pt_regs *regs, int vecnum)
0);
if (IS_ERR((void __force *)user_page)) {
- pr_err("Out of kernel pages trying do_mmap.\n");
+ pr_err("Out of kernel pages trying do_mmap\n");
return;
}
/* Save the address in the thread_info struct */
info->unalign_jit_base = user_page;
if (unaligned_printk)
- pr_info("Unalign bundle: %d:%d, allocate page @%llx",
+ pr_info("Unalign bundle: %d:%d, allocate page @%llx\n",
raw_smp_processor_id(), current->pid,
(unsigned long long)user_page);
}
diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c
index 6c0571216a9..565e25a9833 100644
--- a/arch/tile/mm/fault.c
+++ b/arch/tile/mm/fault.c
@@ -169,8 +169,7 @@ static void wait_for_migration(pte_t *pte)
while (pte_migrating(*pte)) {
barrier();
if (++retries > bound)
- panic("Hit migrating PTE (%#llx) and"
- " page PFN %#lx still migrating",
+ panic("Hit migrating PTE (%#llx) and page PFN %#lx still migrating",
pte->val, pte_pfn(*pte));
}
}
@@ -292,11 +291,10 @@ static int handle_page_fault(struct pt_regs *regs,
*/
stack_offset = stack_pointer & (THREAD_SIZE-1);
if (stack_offset < THREAD_SIZE / 8) {
- pr_alert("Potential stack overrun: sp %#lx\n",
- stack_pointer);
+ pr_alert("Potential stack overrun: sp %#lx\n", stack_pointer);
show_regs(regs);
pr_alert("Killing current process %d/%s\n",
- tsk->pid, tsk->comm);
+ tsk->pid, tsk->comm);
do_group_exit(SIGKILL);
}
@@ -421,7 +419,7 @@ good_area:
} else if (write) {
#ifdef TEST_VERIFY_AREA
if (!is_page_fault && regs->cs == KERNEL_CS)
- pr_err("WP fault at "REGFMT"\n", regs->eip);
+ pr_err("WP fault at " REGFMT "\n", regs->eip);
#endif
if (!(vma->vm_flags & VM_WRITE))
goto bad_area;
@@ -519,16 +517,15 @@ no_context:
pte_t *pte = lookup_address(address);
if (pte && pte_present(*pte) && !pte_exec_kernel(*pte))
- pr_crit("kernel tried to execute"
- " non-executable page - exploit attempt?"
- " (uid: %d)\n", current->uid);
+ pr_crit("kernel tried to execute non-executable page - exploit attempt? (uid: %d)\n",
+ current->uid);
}
#endif
if (address < PAGE_SIZE)
pr_alert("Unable to handle kernel NULL pointer dereference\n");
else
pr_alert("Unable to handle kernel paging request\n");
- pr_alert(" at virtual address "REGFMT", pc "REGFMT"\n",
+ pr_alert(" at virtual address " REGFMT ", pc " REGFMT "\n",
address, regs->pc);
show_regs(regs);
@@ -575,9 +572,10 @@ do_sigbus:
#ifndef __tilegx__
/* We must release ICS before panicking or we won't get anywhere. */
-#define ics_panic(fmt, ...) do { \
- __insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 0); \
- panic(fmt, __VA_ARGS__); \
+#define ics_panic(fmt, ...) \
+do { \
+ __insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 0); \
+ panic(fmt, ##__VA_ARGS__); \
} while (0)
/*
@@ -615,8 +613,7 @@ struct intvec_state do_page_fault_ics(struct pt_regs *regs, int fault_num,
fault_num != INT_DTLB_ACCESS)) {
unsigned long old_pc = regs->pc;
regs->pc = pc;
- ics_panic("Bad ICS page fault args:"
- " old PC %#lx, fault %d/%d at %#lx\n",
+ ics_panic("Bad ICS page fault args: old PC %#lx, fault %d/%d at %#lx",
old_pc, fault_num, write, address);
}
@@ -669,8 +666,8 @@ struct intvec_state do_page_fault_ics(struct pt_regs *regs, int fault_num,
#endif
fixup = search_exception_tables(pc);
if (!fixup)
- ics_panic("ICS atomic fault not in table:"
- " PC %#lx, fault %d", pc, fault_num);
+ ics_panic("ICS atomic fault not in table: PC %#lx, fault %d",
+ pc, fault_num);
regs->pc = fixup->fixup;
regs->ex1 = PL_ICS_EX1(KERNEL_PL, 0);
}
@@ -826,8 +823,7 @@ void do_page_fault(struct pt_regs *regs, int fault_num,
set_thread_flag(TIF_ASYNC_TLB);
if (async->fault_num != 0) {
- panic("Second async fault %d;"
- " old fault was %d (%#lx/%ld)",
+ panic("Second async fault %d; old fault was %d (%#lx/%ld)",
fault_num, async->fault_num,
address, write);
}
diff --git a/arch/tile/mm/homecache.c b/arch/tile/mm/homecache.c
index 33294fdc402..cd3387370eb 100644
--- a/arch/tile/mm/homecache.c
+++ b/arch/tile/mm/homecache.c
@@ -152,12 +152,10 @@ void flush_remote(unsigned long cache_pfn, unsigned long cache_control,
cpumask_scnprintf(cache_buf, sizeof(cache_buf), &cache_cpumask_copy);
cpumask_scnprintf(tlb_buf, sizeof(tlb_buf), &tlb_cpumask_copy);
- pr_err("hv_flush_remote(%#llx, %#lx, %p [%s],"
- " %#lx, %#lx, %#lx, %p [%s], %p, %d) = %d\n",
+ pr_err("hv_flush_remote(%#llx, %#lx, %p [%s], %#lx, %#lx, %#lx, %p [%s], %p, %d) = %d\n",
cache_pa, cache_control, cache_cpumask, cache_buf,
(unsigned long)tlb_va, tlb_length, tlb_pgsize,
- tlb_cpumask, tlb_buf,
- asids, asidcount, rc);
+ tlb_cpumask, tlb_buf, asids, asidcount, rc);
panic("Unsafe to continue.");
}
diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
index e514899e110..3270e001926 100644
--- a/arch/tile/mm/hugetlbpage.c
+++ b/arch/tile/mm/hugetlbpage.c
@@ -284,22 +284,21 @@ static __init int __setup_hugepagesz(unsigned long ps)
int level, base_shift;
if ((1UL << log_ps) != ps || (log_ps & 1) != 0) {
- pr_warn("Not enabling %ld byte huge pages;"
- " must be a power of four.\n", ps);
+ pr_warn("Not enabling %ld byte huge pages; must be a power of four\n",
+ ps);
return -EINVAL;
}
if (ps > 64*1024*1024*1024UL) {
- pr_warn("Not enabling %ld MB huge pages;"
- " largest legal value is 64 GB .\n", ps >> 20);
+ pr_warn("Not enabling %ld MB huge pages; largest legal value is 64 GB\n",
+ ps >> 20);
return -EINVAL;
} else if (ps >= PUD_SIZE) {
static long hv_jpage_size;
if (hv_jpage_size == 0)
hv_jpage_size = hv_sysconf(HV_SYSCONF_PAGE_SIZE_JUMBO);
if (hv_jpage_size != PUD_SIZE) {
- pr_warn("Not enabling >= %ld MB huge pages:"
- " hypervisor reports size %ld\n",
+ pr_warn("Not enabling >= %ld MB huge pages: hypervisor reports size %ld\n",
PUD_SIZE >> 20, hv_jpage_size);
return -EINVAL;
}
@@ -320,14 +319,13 @@ static __init int __setup_hugepagesz(unsigned long ps)
int shift_val = log_ps - base_shift;
if (huge_shift[level] != 0) {
int old_shift = base_shift + huge_shift[level];
- pr_warn("Not enabling %ld MB huge pages;"
- " already have size %ld MB.\n",
+ pr_warn("Not enabling %ld MB huge pages; already have size %ld MB\n",
ps >> 20, (1UL << old_shift) >> 20);
return -EINVAL;
}
if (hv_set_pte_super_shift(level, shift_val) != 0) {
- pr_warn("Not enabling %ld MB huge pages;"
- " no hypervisor support.\n", ps >> 20);
+ pr_warn("Not enabling %ld MB huge pages; no hypervisor support\n",
+ ps >> 20);
return -EINVAL;
}
printk(KERN_DEBUG "Enabled %ld MB huge pages\n", ps >> 20);
diff --git a/arch/tile/mm/init.c b/arch/tile/mm/init.c
index caa270165f8..be240cc4978 100644
--- a/arch/tile/mm/init.c
+++ b/arch/tile/mm/init.c
@@ -357,11 +357,11 @@ static int __init setup_ktext(char *str)
cpulist_scnprintf(buf, sizeof(buf), &ktext_mask);
if (cpumask_weight(&ktext_mask) > 1) {
ktext_small = 1;
- pr_info("ktext: using caching neighborhood %s "
- "with small pages\n", buf);
+ pr_info("ktext: using caching neighborhood %s with small pages\n",
+ buf);
} else {
pr_info("ktext: caching on cpu %s with one huge page\n",
- buf);
+ buf);
}
}
@@ -413,19 +413,16 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
int rc, i;
if (ktext_arg_seen && ktext_hash) {
- pr_warning("warning: \"ktext\" boot argument ignored"
- " if \"kcache_hash\" sets up text hash-for-home\n");
+ pr_warn("warning: \"ktext\" boot argument ignored if \"kcache_hash\" sets up text hash-for-home\n");
ktext_small = 0;
}
if (kdata_arg_seen && kdata_hash) {
- pr_warning("warning: \"kdata\" boot argument ignored"
- " if \"kcache_hash\" sets up data hash-for-home\n");
+ pr_warn("warning: \"kdata\" boot argument ignored if \"kcache_hash\" sets up data hash-for-home\n");
}
if (kdata_huge && !hash_default) {
- pr_warning("warning: disabling \"kdata=huge\"; requires"
- " kcache_hash=all or =allbutstack\n");
+ pr_warn("warning: disabling \"kdata=huge\"; requires kcache_hash=all or =allbutstack\n");
kdata_huge = 0;
}
@@ -470,8 +467,8 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
pte[pte_ofs] = pfn_pte(pfn, prot);
} else {
if (kdata_huge)
- printk(KERN_DEBUG "pre-shattered huge"
- " page at %#lx\n", address);
+ printk(KERN_DEBUG "pre-shattered huge page at %#lx\n",
+ address);
for (pte_ofs = 0; pte_ofs < PTRS_PER_PTE;
pfn++, pte_ofs++, address += PAGE_SIZE) {
pgprot_t prot = init_pgprot(address);
@@ -501,8 +498,8 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
pr_info("ktext: not using unavailable cpus %s\n", buf);
}
if (cpumask_empty(&ktext_mask)) {
- pr_warning("ktext: no valid cpus; caching on %d.\n",
- smp_processor_id());
+ pr_warn("ktext: no valid cpus; caching on %d\n",
+ smp_processor_id());
cpumask_copy(&ktext_mask,
cpumask_of(smp_processor_id()));
}
@@ -798,11 +795,9 @@ void __init mem_init(void)
#ifdef CONFIG_HIGHMEM
/* check that fixmap and pkmap do not overlap */
if (PKMAP_ADDR(LAST_PKMAP-1) >= FIXADDR_START) {
- pr_err("fixmap and kmap areas overlap"
- " - this will crash\n");
+ pr_err("fixmap and kmap areas overlap - this will crash\n");
pr_err("pkstart: %lxh pkend: %lxh fixstart %lxh\n",
- PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP-1),
- FIXADDR_START);
+ PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP-1), FIXADDR_START);
BUG();
}
#endif
@@ -926,8 +921,7 @@ static void free_init_pages(char *what, unsigned long begin, unsigned long end)
unsigned long addr = (unsigned long) begin;
if (kdata_huge && !initfree) {
- pr_warning("Warning: ignoring initfree=0:"
- " incompatible with kdata=huge\n");
+ pr_warn("Warning: ignoring initfree=0: incompatible with kdata=huge\n");
initfree = 1;
}
end = (end + PAGE_SIZE - 1) & PAGE_MASK;
diff --git a/arch/tile/mm/pgtable.c b/arch/tile/mm/pgtable.c
index 5e86eac4bfa..7bf2491a9c1 100644
--- a/arch/tile/mm/pgtable.c
+++ b/arch/tile/mm/pgtable.c
@@ -44,9 +44,7 @@ void show_mem(unsigned int filter)
{
struct zone *zone;
- pr_err("Active:%lu inactive:%lu dirty:%lu writeback:%lu unstable:%lu"
- " free:%lu\n slab:%lu mapped:%lu pagetables:%lu bounce:%lu"
- " pagecache:%lu swap:%lu\n",
+ pr_err("Active:%lu inactive:%lu dirty:%lu writeback:%lu unstable:%lu free:%lu\n slab:%lu mapped:%lu pagetables:%lu bounce:%lu pagecache:%lu swap:%lu\n",
(global_page_state(NR_ACTIVE_ANON) +
global_page_state(NR_ACTIVE_FILE)),
(global_page_state(NR_INACTIVE_ANON) +
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index d69f1cd87fd..ba397bde794 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -249,10 +249,6 @@ config HAVE_INTEL_TXT
def_bool y
depends on INTEL_IOMMU && ACPI
-config X86_INTEL_MPX
- def_bool y
- depends on CPU_SUP_INTEL
-
config X86_32_SMP
def_bool y
depends on X86_32 && SMP
@@ -887,11 +883,11 @@ config X86_UP_IOAPIC
config X86_LOCAL_APIC
def_bool y
depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_APIC || PCI_MSI
+ select GENERIC_IRQ_LEGACY_ALLOC_HWIRQ
config X86_IO_APIC
- def_bool y
- depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_IOAPIC || PCI_MSI
- select GENERIC_IRQ_LEGACY_ALLOC_HWIRQ
+ def_bool X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_IOAPIC
+ depends on X86_LOCAL_APIC
select IRQ_DOMAIN
config X86_REROUTE_FOR_BROKEN_BOOT_IRQS
@@ -1594,6 +1590,32 @@ config X86_SMAP
If unsure, say Y.
+config X86_INTEL_MPX
+ prompt "Intel MPX (Memory Protection Extensions)"
+ def_bool n
+ depends on CPU_SUP_INTEL
+ ---help---
+ MPX provides hardware features that can be used in
+ conjunction with compiler-instrumented code to check
+ memory references. It is designed to detect buffer
+ overflow or underflow bugs.
+
+ This option enables running applications which are
+ instrumented or otherwise use MPX. It does not use MPX
+ itself inside the kernel or to protect the kernel
+ against bad memory references.
+
+ Enabling this option will make the kernel larger:
+ ~8k of kernel text and 36 bytes of data on a 64-bit
+ defconfig. It adds a long to the 'mm_struct' which
+ will increase the kernel memory overhead of each
+ process and adds some branches to paths used during
+ exec() and munmap().
+
+ For details, see Documentation/x86/intel_mpx.txt
+
+ If unsure, say N.
+
config EFI
bool "EFI runtime service support"
depends on ACPI
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
index 4615906d83d..9662290e0b2 100644
--- a/arch/x86/include/asm/hw_irq.h
+++ b/arch/x86/include/asm/hw_irq.h
@@ -94,30 +94,7 @@ extern void trace_call_function_single_interrupt(void);
#define trace_kvm_posted_intr_ipi kvm_posted_intr_ipi
#endif /* CONFIG_TRACING */
-/* IOAPIC */
-#define IO_APIC_IRQ(x) (((x) >= NR_IRQS_LEGACY) || ((1<<(x)) & io_apic_irqs))
-extern unsigned long io_apic_irqs;
-
-extern void setup_IO_APIC(void);
-extern void disable_IO_APIC(void);
-
-struct io_apic_irq_attr {
- int ioapic;
- int ioapic_pin;
- int trigger;
- int polarity;
-};
-
-static inline void set_io_apic_irq_attr(struct io_apic_irq_attr *irq_attr,
- int ioapic, int ioapic_pin,
- int trigger, int polarity)
-{
- irq_attr->ioapic = ioapic;
- irq_attr->ioapic_pin = ioapic_pin;
- irq_attr->trigger = trigger;
- irq_attr->polarity = polarity;
-}
-
+#ifdef CONFIG_IRQ_REMAP
/* Intel specific interrupt remapping information */
struct irq_2_iommu {
struct intel_iommu *iommu;
@@ -131,14 +108,12 @@ struct irq_2_irte {
u16 devid; /* Device ID for IRTE table */
u16 index; /* Index into IRTE table*/
};
+#endif /* CONFIG_IRQ_REMAP */
+
+#ifdef CONFIG_X86_LOCAL_APIC
+struct irq_data;
-/*
- * This is performance-critical, we want to do it O(1)
- *
- * Most irqs are mapped 1:1 with pins.
- */
struct irq_cfg {
- struct irq_pin_list *irq_2_pin;
cpumask_var_t domain;
cpumask_var_t old_domain;
u8 vector;
@@ -150,18 +125,39 @@ struct irq_cfg {
struct irq_2_irte irq_2_irte;
};
#endif
+ union {
+#ifdef CONFIG_X86_IO_APIC
+ struct {
+ struct list_head irq_2_pin;
+ };
+#endif
+ };
};
+extern struct irq_cfg *irq_cfg(unsigned int irq);
+extern struct irq_cfg *irqd_cfg(struct irq_data *irq_data);
+extern struct irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node);
+extern void lock_vector_lock(void);
+extern void unlock_vector_lock(void);
extern int assign_irq_vector(int, struct irq_cfg *, const struct cpumask *);
+extern void clear_irq_vector(int irq, struct irq_cfg *cfg);
+extern void setup_vector_irq(int cpu);
+#ifdef CONFIG_SMP
extern void send_cleanup_vector(struct irq_cfg *);
+extern void irq_complete_move(struct irq_cfg *cfg);
+#else
+static inline void send_cleanup_vector(struct irq_cfg *c) { }
+static inline void irq_complete_move(struct irq_cfg *c) { }
+#endif
-struct irq_data;
-int __ioapic_set_affinity(struct irq_data *, const struct cpumask *,
- unsigned int *dest_id);
-extern int IO_APIC_get_PCI_irq_vector(int bus, int devfn, int pin, struct io_apic_irq_attr *irq_attr);
-extern void setup_ioapic_dest(void);
-
-extern void enable_IO_APIC(void);
+extern int apic_retrigger_irq(struct irq_data *data);
+extern void apic_ack_edge(struct irq_data *data);
+extern int apic_set_affinity(struct irq_data *data, const struct cpumask *mask,
+ unsigned int *dest_id);
+#else /* CONFIG_X86_LOCAL_APIC */
+static inline void lock_vector_lock(void) {}
+static inline void unlock_vector_lock(void) {}
+#endif /* CONFIG_X86_LOCAL_APIC */
/* Statistics */
extern atomic_t irq_err_count;
@@ -185,7 +181,8 @@ extern __visible void smp_call_function_single_interrupt(struct pt_regs *);
extern __visible void smp_invalidate_interrupt(struct pt_regs *);
#endif
-extern void (*__initconst interrupt[NR_VECTORS-FIRST_EXTERNAL_VECTOR])(void);
+extern void (*__initconst interrupt[FIRST_SYSTEM_VECTOR
+ - FIRST_EXTERNAL_VECTOR])(void);
#ifdef CONFIG_TRACING
#define trace_interrupt interrupt
#endif
@@ -195,17 +192,6 @@ extern void (*__initconst interrupt[NR_VECTORS-FIRST_EXTERNAL_VECTOR])(void);
typedef int vector_irq_t[NR_VECTORS];
DECLARE_PER_CPU(vector_irq_t, vector_irq);
-extern void setup_vector_irq(int cpu);
-
-#ifdef CONFIG_X86_IO_APIC
-extern void lock_vector_lock(void);
-extern void unlock_vector_lock(void);
-extern void __setup_vector_irq(int cpu);
-#else
-static inline void lock_vector_lock(void) {}
-static inline void unlock_vector_lock(void) {}
-static inline void __setup_vector_irq(int cpu) {}
-#endif
#endif /* !ASSEMBLY_ */
diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h
index 1733ab49ac5..bf006cce941 100644
--- a/arch/x86/include/asm/io_apic.h
+++ b/arch/x86/include/asm/io_apic.h
@@ -132,6 +132,10 @@ extern int noioapicquirk;
/* -1 if "noapic" boot option passed */
extern int noioapicreroute;
+extern unsigned long io_apic_irqs;
+
+#define IO_APIC_IRQ(x) (((x) >= NR_IRQS_LEGACY) || ((1 << (x)) & io_apic_irqs))
+
/*
* If we use the IO-APIC for IRQ routing, disable automatic
* assignment of PCI IRQ's.
@@ -139,18 +143,15 @@ extern int noioapicreroute;
#define io_apic_assign_pci_irqs \
(mp_irq_entries && !skip_ioapic_setup && io_apic_irqs)
-struct io_apic_irq_attr;
struct irq_cfg;
extern void ioapic_insert_resources(void);
+extern int arch_early_ioapic_init(void);
extern int native_setup_ioapic_entry(int, struct IO_APIC_route_entry *,
unsigned int, int,
struct io_apic_irq_attr *);
extern void eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg);
-extern void native_compose_msi_msg(struct pci_dev *pdev,
- unsigned int irq, unsigned int dest,
- struct msi_msg *msg, u8 hpet_id);
extern void native_eoi_ioapic_pin(int apic, int pin, int vector);
extern int save_ioapic_entries(void);
@@ -160,6 +161,13 @@ extern int restore_ioapic_entries(void);
extern void setup_ioapic_ids_from_mpc(void);
extern void setup_ioapic_ids_from_mpc_nocheck(void);
+struct io_apic_irq_attr {
+ int ioapic;
+ int ioapic_pin;
+ int trigger;
+ int polarity;
+};
+
enum ioapic_domain_type {
IOAPIC_DOMAIN_INVALID,
IOAPIC_DOMAIN_LEGACY,
@@ -188,8 +196,10 @@ extern int mp_find_ioapic_pin(int ioapic, u32 gsi);
extern u32 mp_pin_to_gsi(int ioapic, int pin);
extern int mp_map_gsi_to_irq(u32 gsi, unsigned int flags);
extern void mp_unmap_irq(int irq);
-extern void __init mp_register_ioapic(int id, u32 address, u32 gsi_base,
- struct ioapic_domain_cfg *cfg);
+extern int mp_register_ioapic(int id, u32 address, u32 gsi_base,
+ struct ioapic_domain_cfg *cfg);
+extern int mp_unregister_ioapic(u32 gsi_base);
+extern int mp_ioapic_registered(u32 gsi_base);
extern int mp_irqdomain_map(struct irq_domain *domain, unsigned int virq,
irq_hw_number_t hwirq);
extern void mp_irqdomain_unmap(struct irq_domain *domain, unsigned int virq);
@@ -227,19 +237,25 @@ static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned
extern void io_apic_eoi(unsigned int apic, unsigned int vector);
-extern bool mp_should_keep_irq(struct device *dev);
-
+extern void setup_IO_APIC(void);
+extern void enable_IO_APIC(void);
+extern void disable_IO_APIC(void);
+extern void setup_ioapic_dest(void);
+extern int IO_APIC_get_PCI_irq_vector(int bus, int devfn, int pin);
+extern void print_IO_APICs(void);
#else /* !CONFIG_X86_IO_APIC */
+#define IO_APIC_IRQ(x) 0
#define io_apic_assign_pci_irqs 0
#define setup_ioapic_ids_from_mpc x86_init_noop
static inline void ioapic_insert_resources(void) { }
+static inline int arch_early_ioapic_init(void) { return 0; }
+static inline void print_IO_APICs(void) {}
#define gsi_top (NR_IRQS_LEGACY)
static inline int mp_find_ioapic(u32 gsi) { return 0; }
static inline u32 mp_pin_to_gsi(int ioapic, int pin) { return UINT_MAX; }
static inline int mp_map_gsi_to_irq(u32 gsi, unsigned int flags) { return gsi; }
static inline void mp_unmap_irq(int irq) { }
-static inline bool mp_should_keep_irq(struct device *dev) { return 1; }
static inline int save_ioapic_entries(void)
{
@@ -262,7 +278,6 @@ static inline void disable_ioapic_support(void) { }
#define native_io_apic_print_entries NULL
#define native_ioapic_set_affinity NULL
#define native_setup_ioapic_entry NULL
-#define native_compose_msi_msg NULL
#define native_eoi_ioapic_pin NULL
#endif
diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h
index 5702d7e3111..666c89ec4bd 100644
--- a/arch/x86/include/asm/irq_vectors.h
+++ b/arch/x86/include/asm/irq_vectors.h
@@ -126,6 +126,12 @@
#define NR_VECTORS 256
+#ifdef CONFIG_X86_LOCAL_APIC
+#define FIRST_SYSTEM_VECTOR LOCAL_TIMER_VECTOR
+#else
+#define FIRST_SYSTEM_VECTOR NR_VECTORS
+#endif
+
#define FPU_IRQ 13
#define FIRST_VM86_IRQ 3
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 6ed0c30d6a0..d89c6b828c9 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -33,7 +33,7 @@
#define KVM_MAX_VCPUS 255
#define KVM_SOFT_MAX_VCPUS 160
-#define KVM_USER_MEM_SLOTS 125
+#define KVM_USER_MEM_SLOTS 509
/* memory slots that are not exposed to userspace */
#define KVM_PRIVATE_MEM_SLOTS 3
#define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS)
@@ -51,6 +51,7 @@
| X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
#define CR3_L_MODE_RESERVED_BITS 0xFFFFFF0000000000ULL
+#define CR3_PCID_INVD (1UL << 63)
#define CR4_RESERVED_BITS \
(~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
| X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \
@@ -361,6 +362,7 @@ struct kvm_vcpu_arch {
int mp_state;
u64 ia32_misc_enable_msr;
bool tpr_access_reporting;
+ u64 ia32_xss;
/*
* Paging state of the vcpu
@@ -542,7 +544,7 @@ struct kvm_apic_map {
struct rcu_head rcu;
u8 ldr_bits;
/* fields bellow are used to decode ldr values in different modes */
- u32 cid_shift, cid_mask, lid_mask;
+ u32 cid_shift, cid_mask, lid_mask, broadcast;
struct kvm_lapic *phys_map[256];
/* first index is cluster id second is cpu id in a cluster */
struct kvm_lapic *logical_map[16][16];
@@ -602,6 +604,9 @@ struct kvm_arch {
struct kvm_xen_hvm_config xen_hvm_config;
+ /* reads protected by irq_srcu, writes by irq_lock */
+ struct hlist_head mask_notifier_list;
+
/* fields used by HYPER-V emulation */
u64 hv_guest_os_id;
u64 hv_hypercall;
@@ -659,6 +664,16 @@ struct msr_data {
u64 data;
};
+struct kvm_lapic_irq {
+ u32 vector;
+ u32 delivery_mode;
+ u32 dest_mode;
+ u32 level;
+ u32 trig_mode;
+ u32 shorthand;
+ u32 dest_id;
+};
+
struct kvm_x86_ops {
int (*cpu_has_kvm_support)(void); /* __init */
int (*disabled_by_bios)(void); /* __init */
@@ -767,6 +782,7 @@ struct kvm_x86_ops {
enum x86_intercept_stage stage);
void (*handle_external_intr)(struct kvm_vcpu *vcpu);
bool (*mpx_supported)(void);
+ bool (*xsaves_supported)(void);
int (*check_nested_events)(struct kvm_vcpu *vcpu, bool external_intr);
@@ -818,6 +834,19 @@ int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
const void *val, int bytes);
u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn);
+struct kvm_irq_mask_notifier {
+ void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked);
+ int irq;
+ struct hlist_node link;
+};
+
+void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
+ struct kvm_irq_mask_notifier *kimn);
+void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
+ struct kvm_irq_mask_notifier *kimn);
+void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
+ bool mask);
+
extern bool tdp_enabled;
u64 vcpu_tsc_khz(struct kvm_vcpu *vcpu);
@@ -863,7 +892,7 @@ int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu);
void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg);
-void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, unsigned int vector);
+void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector);
int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index,
int reason, bool has_error_code, u32 error_code);
@@ -895,6 +924,7 @@ int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
gfn_t gfn, void *data, int offset, int len,
u32 access);
bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl);
+bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr);
static inline int __kvm_irq_line_state(unsigned long *irq_state,
int irq_source_id, int level)
@@ -1066,6 +1096,7 @@ void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
void kvm_define_shared_msr(unsigned index, u32 msr);
int kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
+unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu);
bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip);
void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h
index 0892ea0e683..4e370a5d811 100644
--- a/arch/x86/include/asm/pci.h
+++ b/arch/x86/include/asm/pci.h
@@ -96,12 +96,15 @@ extern void pci_iommu_alloc(void);
#ifdef CONFIG_PCI_MSI
/* implemented in arch/x86/kernel/apic/io_apic. */
struct msi_desc;
+void native_compose_msi_msg(struct pci_dev *pdev, unsigned int irq,
+ unsigned int dest, struct msi_msg *msg, u8 hpet_id);
int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type);
void native_teardown_msi_irq(unsigned int irq);
void native_restore_msi_irqs(struct pci_dev *dev);
int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc,
unsigned int irq_base, unsigned int irq_offset);
#else
+#define native_compose_msi_msg NULL
#define native_setup_msi_irqs NULL
#define native_teardown_msi_irq NULL
#endif
diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h
index fa1195dae42..164e3f8d3c3 100644
--- a/arch/x86/include/asm/pci_x86.h
+++ b/arch/x86/include/asm/pci_x86.h
@@ -93,6 +93,8 @@ extern raw_spinlock_t pci_config_lock;
extern int (*pcibios_enable_irq)(struct pci_dev *dev);
extern void (*pcibios_disable_irq)(struct pci_dev *dev);
+extern bool mp_should_keep_irq(struct device *dev);
+
struct pci_raw_ops {
int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn,
int reg, int len, u32 *val);
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index af447f95e3b..25bcd4a8951 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -452,6 +452,7 @@ static inline void update_page_count(int level, unsigned long pages) { }
extern pte_t *lookup_address(unsigned long address, unsigned int *level);
extern pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
unsigned int *level);
+extern pmd_t *lookup_pmd_address(unsigned long address);
extern phys_addr_t slow_virt_to_phys(void *__address);
extern int kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address,
unsigned numpages, unsigned long page_flags);
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index bcbfade26d8..45afaee9555 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -69,6 +69,7 @@
#define SECONDARY_EXEC_PAUSE_LOOP_EXITING 0x00000400
#define SECONDARY_EXEC_ENABLE_INVPCID 0x00001000
#define SECONDARY_EXEC_SHADOW_VMCS 0x00004000
+#define SECONDARY_EXEC_XSAVES 0x00100000
#define PIN_BASED_EXT_INTR_MASK 0x00000001
@@ -159,6 +160,8 @@ enum vmcs_field {
EOI_EXIT_BITMAP3_HIGH = 0x00002023,
VMREAD_BITMAP = 0x00002026,
VMWRITE_BITMAP = 0x00002028,
+ XSS_EXIT_BITMAP = 0x0000202C,
+ XSS_EXIT_BITMAP_HIGH = 0x0000202D,
GUEST_PHYSICAL_ADDRESS = 0x00002400,
GUEST_PHYSICAL_ADDRESS_HIGH = 0x00002401,
VMCS_LINK_POINTER = 0x00002800,
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
index f58ef6c0613..5eea09915a1 100644
--- a/arch/x86/include/asm/xen/page.h
+++ b/arch/x86/include/asm/xen/page.h
@@ -41,10 +41,12 @@ typedef struct xpaddr {
extern unsigned long *machine_to_phys_mapping;
extern unsigned long machine_to_phys_nr;
+extern unsigned long *xen_p2m_addr;
+extern unsigned long xen_p2m_size;
+extern unsigned long xen_max_p2m_pfn;
extern unsigned long get_phys_to_machine(unsigned long pfn);
extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn);
-extern bool __init early_set_phys_to_machine(unsigned long pfn, unsigned long mfn);
extern bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
extern unsigned long set_phys_range_identity(unsigned long pfn_s,
unsigned long pfn_e);
@@ -52,17 +54,52 @@ extern unsigned long set_phys_range_identity(unsigned long pfn_s,
extern int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
struct gnttab_map_grant_ref *kmap_ops,
struct page **pages, unsigned int count);
-extern int m2p_add_override(unsigned long mfn, struct page *page,
- struct gnttab_map_grant_ref *kmap_op);
extern int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
struct gnttab_map_grant_ref *kmap_ops,
struct page **pages, unsigned int count);
-extern int m2p_remove_override(struct page *page,
- struct gnttab_map_grant_ref *kmap_op,
- unsigned long mfn);
-extern struct page *m2p_find_override(unsigned long mfn);
extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn);
+/*
+ * Helper functions to write or read unsigned long values to/from
+ * memory, when the access may fault.
+ */
+static inline int xen_safe_write_ulong(unsigned long *addr, unsigned long val)
+{
+ return __put_user(val, (unsigned long __user *)addr);
+}
+
+static inline int xen_safe_read_ulong(unsigned long *addr, unsigned long *val)
+{
+ return __get_user(*val, (unsigned long __user *)addr);
+}
+
+/*
+ * When to use pfn_to_mfn(), __pfn_to_mfn() or get_phys_to_machine():
+ * - pfn_to_mfn() returns either INVALID_P2M_ENTRY or the mfn. No indicator
+ * bits (identity or foreign) are set.
+ * - __pfn_to_mfn() returns the found entry of the p2m table. A possibly set
+ * identity or foreign indicator will be still set. __pfn_to_mfn() is
+ * encapsulating get_phys_to_machine() which is called in special cases only.
+ * - get_phys_to_machine() is to be called by __pfn_to_mfn() only in special
+ * cases needing an extended handling.
+ */
+static inline unsigned long __pfn_to_mfn(unsigned long pfn)
+{
+ unsigned long mfn;
+
+ if (pfn < xen_p2m_size)
+ mfn = xen_p2m_addr[pfn];
+ else if (unlikely(pfn < xen_max_p2m_pfn))
+ return get_phys_to_machine(pfn);
+ else
+ return IDENTITY_FRAME(pfn);
+
+ if (unlikely(mfn == INVALID_P2M_ENTRY))
+ return get_phys_to_machine(pfn);
+
+ return mfn;
+}
+
static inline unsigned long pfn_to_mfn(unsigned long pfn)
{
unsigned long mfn;
@@ -70,7 +107,7 @@ static inline unsigned long pfn_to_mfn(unsigned long pfn)
if (xen_feature(XENFEAT_auto_translated_physmap))
return pfn;
- mfn = get_phys_to_machine(pfn);
+ mfn = __pfn_to_mfn(pfn);
if (mfn != INVALID_P2M_ENTRY)
mfn &= ~(FOREIGN_FRAME_BIT | IDENTITY_FRAME_BIT);
@@ -83,7 +120,7 @@ static inline int phys_to_machine_mapping_valid(unsigned long pfn)
if (xen_feature(XENFEAT_auto_translated_physmap))
return 1;
- return get_phys_to_machine(pfn) != INVALID_P2M_ENTRY;
+ return __pfn_to_mfn(pfn) != INVALID_P2M_ENTRY;
}
static inline unsigned long mfn_to_pfn_no_overrides(unsigned long mfn)
@@ -102,7 +139,7 @@ static inline unsigned long mfn_to_pfn_no_overrides(unsigned long mfn)
* In such cases it doesn't matter what we return (we return garbage),
* but we must handle the fault without crashing!
*/
- ret = __get_user(pfn, &machine_to_phys_mapping[mfn]);
+ ret = xen_safe_read_ulong(&machine_to_phys_mapping[mfn], &pfn);
if (ret < 0)
return ~0;
@@ -117,7 +154,7 @@ static inline unsigned long mfn_to_pfn(unsigned long mfn)
return mfn;
pfn = mfn_to_pfn_no_overrides(mfn);
- if (get_phys_to_machine(pfn) != mfn) {
+ if (__pfn_to_mfn(pfn) != mfn) {
/*
* If this appears to be a foreign mfn (because the pfn
* doesn't map back to the mfn), then check the local override
@@ -133,8 +170,7 @@ static inline unsigned long mfn_to_pfn(unsigned long mfn)
* entry doesn't map back to the mfn and m2p_override doesn't have a
* valid entry for it.
*/
- if (pfn == ~0 &&
- get_phys_to_machine(mfn) == IDENTITY_FRAME(mfn))
+ if (pfn == ~0 && __pfn_to_mfn(mfn) == IDENTITY_FRAME(mfn))
pfn = mfn;
return pfn;
@@ -180,7 +216,7 @@ static inline unsigned long mfn_to_local_pfn(unsigned long mfn)
return mfn;
pfn = mfn_to_pfn(mfn);
- if (get_phys_to_machine(pfn) != mfn)
+ if (__pfn_to_mfn(pfn) != mfn)
return -1; /* force !pfn_valid() */
return pfn;
}
diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
index 7e7a79ada65..5fa9770035d 100644
--- a/arch/x86/include/asm/xsave.h
+++ b/arch/x86/include/asm/xsave.h
@@ -16,6 +16,7 @@
#define XSTATE_Hi16_ZMM 0x80
#define XSTATE_FPSSE (XSTATE_FP | XSTATE_SSE)
+#define XSTATE_AVX512 (XSTATE_OPMASK | XSTATE_ZMM_Hi256 | XSTATE_Hi16_ZMM)
/* Bit 63 of XCR0 is reserved for future expansion */
#define XSTATE_EXTEND_MASK (~(XSTATE_FPSSE | (1ULL << 63)))
diff --git a/arch/x86/include/uapi/asm/ldt.h b/arch/x86/include/uapi/asm/ldt.h
index 46727eb37bf..6e1aaf73852 100644
--- a/arch/x86/include/uapi/asm/ldt.h
+++ b/arch/x86/include/uapi/asm/ldt.h
@@ -28,6 +28,13 @@ struct user_desc {
unsigned int seg_not_present:1;
unsigned int useable:1;
#ifdef __x86_64__
+ /*
+ * Because this bit is not present in 32-bit user code, user
+ * programs can pass uninitialized values here. Therefore, in
+ * any context in which a user_desc comes from a 32-bit program,
+ * the kernel must act as though lm == 0, regardless of the
+ * actual value.
+ */
unsigned int lm:1;
#endif
};
diff --git a/arch/x86/include/uapi/asm/vmx.h b/arch/x86/include/uapi/asm/vmx.h
index 990a2fe1588..b813bf9da1e 100644
--- a/arch/x86/include/uapi/asm/vmx.h
+++ b/arch/x86/include/uapi/asm/vmx.h
@@ -72,6 +72,8 @@
#define EXIT_REASON_XSETBV 55
#define EXIT_REASON_APIC_WRITE 56
#define EXIT_REASON_INVPCID 58
+#define EXIT_REASON_XSAVES 63
+#define EXIT_REASON_XRSTORS 64
#define VMX_EXIT_REASONS \
{ EXIT_REASON_EXCEPTION_NMI, "EXCEPTION_NMI" }, \
@@ -116,6 +118,8 @@
{ EXIT_REASON_INVALID_STATE, "INVALID_STATE" }, \
{ EXIT_REASON_INVD, "INVD" }, \
{ EXIT_REASON_INVVPID, "INVVPID" }, \
- { EXIT_REASON_INVPCID, "INVPCID" }
+ { EXIT_REASON_INVPCID, "INVPCID" }, \
+ { EXIT_REASON_XSAVES, "XSAVES" }, \
+ { EXIT_REASON_XRSTORS, "XRSTORS" }
#endif /* _UAPIVMX_H */
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index a142e77693e..4433a4be817 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -76,6 +76,19 @@ int acpi_fix_pin2_polarity __initdata;
static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
#endif
+/*
+ * Locks related to IOAPIC hotplug
+ * Hotplug side:
+ * ->device_hotplug_lock
+ * ->acpi_ioapic_lock
+ * ->ioapic_lock
+ * Interrupt mapping side:
+ * ->acpi_ioapic_lock
+ * ->ioapic_mutex
+ * ->ioapic_lock
+ */
+static DEFINE_MUTEX(acpi_ioapic_lock);
+
/* --------------------------------------------------------------------------
Boot-time Configuration
-------------------------------------------------------------------------- */
@@ -395,10 +408,6 @@ static int mp_register_gsi(struct device *dev, u32 gsi, int trigger,
if (acpi_irq_model != ACPI_IRQ_MODEL_IOAPIC)
return gsi;
- /* Don't set up the ACPI SCI because it's already set up */
- if (acpi_gbl_FADT.sci_interrupt == gsi)
- return mp_map_gsi_to_irq(gsi, IOAPIC_MAP_ALLOC);
-
trigger = trigger == ACPI_EDGE_SENSITIVE ? 0 : 1;
polarity = polarity == ACPI_ACTIVE_HIGH ? 0 : 1;
node = dev ? dev_to_node(dev) : NUMA_NO_NODE;
@@ -411,7 +420,8 @@ static int mp_register_gsi(struct device *dev, u32 gsi, int trigger,
if (irq < 0)
return irq;
- if (enable_update_mptable)
+ /* Don't set up the ACPI SCI because it's already set up */
+ if (enable_update_mptable && acpi_gbl_FADT.sci_interrupt != gsi)
mp_config_acpi_gsi(dev, gsi, trigger, polarity);
return irq;
@@ -424,9 +434,6 @@ static void mp_unregister_gsi(u32 gsi)
if (acpi_irq_model != ACPI_IRQ_MODEL_IOAPIC)
return;
- if (acpi_gbl_FADT.sci_interrupt == gsi)
- return;
-
irq = mp_map_gsi_to_irq(gsi, 0);
if (irq > 0)
mp_unmap_irq(irq);
@@ -609,8 +616,10 @@ int acpi_gsi_to_irq(u32 gsi, unsigned int *irqp)
if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) {
*irqp = gsi;
} else {
+ mutex_lock(&acpi_ioapic_lock);
irq = mp_map_gsi_to_irq(gsi,
IOAPIC_MAP_ALLOC | IOAPIC_MAP_CHECK);
+ mutex_unlock(&acpi_ioapic_lock);
if (irq < 0)
return -1;
*irqp = irq;
@@ -650,7 +659,9 @@ static int acpi_register_gsi_ioapic(struct device *dev, u32 gsi,
int irq = gsi;
#ifdef CONFIG_X86_IO_APIC
+ mutex_lock(&acpi_ioapic_lock);
irq = mp_register_gsi(dev, gsi, trigger, polarity);
+ mutex_unlock(&acpi_ioapic_lock);
#endif
return irq;
@@ -659,7 +670,9 @@ static int acpi_register_gsi_ioapic(struct device *dev, u32 gsi,
static void acpi_unregister_gsi_ioapic(u32 gsi)
{
#ifdef CONFIG_X86_IO_APIC
+ mutex_lock(&acpi_ioapic_lock);
mp_unregister_gsi(gsi);
+ mutex_unlock(&acpi_ioapic_lock);
#endif
}
@@ -690,6 +703,7 @@ void acpi_unregister_gsi(u32 gsi)
}
EXPORT_SYMBOL_GPL(acpi_unregister_gsi);
+#ifdef CONFIG_X86_LOCAL_APIC
static void __init acpi_set_irq_model_ioapic(void)
{
acpi_irq_model = ACPI_IRQ_MODEL_IOAPIC;
@@ -697,6 +711,7 @@ static void __init acpi_set_irq_model_ioapic(void)
__acpi_unregister_gsi = acpi_unregister_gsi_ioapic;
acpi_ioapic = 1;
}
+#endif
/*
* ACPI based hotplug support for CPU
@@ -759,20 +774,74 @@ EXPORT_SYMBOL(acpi_unmap_lsapic);
int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base)
{
- /* TBD */
- return -EINVAL;
-}
+ int ret = -ENOSYS;
+#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
+ int ioapic_id;
+ u64 addr;
+ struct ioapic_domain_cfg cfg = {
+ .type = IOAPIC_DOMAIN_DYNAMIC,
+ .ops = &acpi_irqdomain_ops,
+ };
+
+ ioapic_id = acpi_get_ioapic_id(handle, gsi_base, &addr);
+ if (ioapic_id < 0) {
+ unsigned long long uid;
+ acpi_status status;
+ status = acpi_evaluate_integer(handle, METHOD_NAME__UID,
+ NULL, &uid);
+ if (ACPI_FAILURE(status)) {
+ acpi_handle_warn(handle, "failed to get IOAPIC ID.\n");
+ return -EINVAL;
+ }
+ ioapic_id = (int)uid;
+ }
+
+ mutex_lock(&acpi_ioapic_lock);
+ ret = mp_register_ioapic(ioapic_id, phys_addr, gsi_base, &cfg);
+ mutex_unlock(&acpi_ioapic_lock);
+#endif
+
+ return ret;
+}
EXPORT_SYMBOL(acpi_register_ioapic);
int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base)
{
- /* TBD */
- return -EINVAL;
-}
+ int ret = -ENOSYS;
+#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
+ mutex_lock(&acpi_ioapic_lock);
+ ret = mp_unregister_ioapic(gsi_base);
+ mutex_unlock(&acpi_ioapic_lock);
+#endif
+
+ return ret;
+}
EXPORT_SYMBOL(acpi_unregister_ioapic);
+/**
+ * acpi_ioapic_registered - Check whether IOAPIC assoicatied with @gsi_base
+ * has been registered
+ * @handle: ACPI handle of the IOAPIC deivce
+ * @gsi_base: GSI base associated with the IOAPIC
+ *
+ * Assume caller holds some type of lock to serialize acpi_ioapic_registered()
+ * with acpi_register_ioapic()/acpi_unregister_ioapic().
+ */
+int acpi_ioapic_registered(acpi_handle handle, u32 gsi_base)
+{
+ int ret = 0;
+
+#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
+ mutex_lock(&acpi_ioapic_lock);
+ ret = mp_ioapic_registered(gsi_base);
+ mutex_unlock(&acpi_ioapic_lock);
+#endif
+
+ return ret;
+}
+
static int __init acpi_parse_sbf(struct acpi_table_header *table)
{
struct acpi_table_boot *sb;
@@ -1185,7 +1254,9 @@ static void __init acpi_process_madt(void)
/*
* Parse MADT IO-APIC entries
*/
+ mutex_lock(&acpi_ioapic_lock);
error = acpi_parse_madt_ioapic_entries();
+ mutex_unlock(&acpi_ioapic_lock);
if (!error) {
acpi_set_irq_model_ioapic();
diff --git a/arch/x86/kernel/apic/Makefile b/arch/x86/kernel/apic/Makefile
index dcb5b15401c..8bb12ddc5db 100644
--- a/arch/x86/kernel/apic/Makefile
+++ b/arch/x86/kernel/apic/Makefile
@@ -2,10 +2,12 @@
# Makefile for local APIC drivers and for the IO-APIC code
#
-obj-$(CONFIG_X86_LOCAL_APIC) += apic.o apic_noop.o ipi.o
+obj-$(CONFIG_X86_LOCAL_APIC) += apic.o apic_noop.o ipi.o vector.o
obj-y += hw_nmi.o
obj-$(CONFIG_X86_IO_APIC) += io_apic.o
+obj-$(CONFIG_PCI_MSI) += msi.o
+obj-$(CONFIG_HT_IRQ) += htirq.o
obj-$(CONFIG_SMP) += ipi.o
ifeq ($(CONFIG_X86_64),y)
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index ba6cc041edb..29b5b18afa2 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -196,7 +196,7 @@ static int disable_apic_timer __initdata;
int local_apic_timer_c2_ok;
EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok);
-int first_system_vector = 0xfe;
+int first_system_vector = FIRST_SYSTEM_VECTOR;
/*
* Debug level, exported for io_apic.c
@@ -1930,7 +1930,7 @@ int __init APIC_init_uniprocessor(void)
/*
* This interrupt should _never_ happen with our APIC/SMP architecture
*/
-static inline void __smp_spurious_interrupt(void)
+static inline void __smp_spurious_interrupt(u8 vector)
{
u32 v;
@@ -1939,30 +1939,32 @@ static inline void __smp_spurious_interrupt(void)
* if it is a vectored one. Just in case...
* Spurious interrupts should not be ACKed.
*/
- v = apic_read(APIC_ISR + ((SPURIOUS_APIC_VECTOR & ~0x1f) >> 1));
- if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f)))
+ v = apic_read(APIC_ISR + ((vector & ~0x1f) >> 1));
+ if (v & (1 << (vector & 0x1f)))
ack_APIC_irq();
inc_irq_stat(irq_spurious_count);
/* see sw-dev-man vol 3, chapter 7.4.13.5 */
- pr_info("spurious APIC interrupt on CPU#%d, "
- "should never happen.\n", smp_processor_id());
+ pr_info("spurious APIC interrupt through vector %02x on CPU#%d, "
+ "should never happen.\n", vector, smp_processor_id());
}
__visible void smp_spurious_interrupt(struct pt_regs *regs)
{
entering_irq();
- __smp_spurious_interrupt();
+ __smp_spurious_interrupt(~regs->orig_ax);
exiting_irq();
}
__visible void smp_trace_spurious_interrupt(struct pt_regs *regs)
{
+ u8 vector = ~regs->orig_ax;
+
entering_irq();
- trace_spurious_apic_entry(SPURIOUS_APIC_VECTOR);
- __smp_spurious_interrupt();
- trace_spurious_apic_exit(SPURIOUS_APIC_VECTOR);
+ trace_spurious_apic_entry(vector);
+ __smp_spurious_interrupt(vector);
+ trace_spurious_apic_exit(vector);
exiting_irq();
}
diff --git a/arch/x86/kernel/apic/htirq.c b/arch/x86/kernel/apic/htirq.c
new file mode 100644
index 00000000000..816f36e979a
--- /dev/null
+++ b/arch/x86/kernel/apic/htirq.c
@@ -0,0 +1,107 @@
+/*
+ * Support Hypertransport IRQ
+ *
+ * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo
+ * Moved from arch/x86/kernel/apic/io_apic.c.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/htirq.h>
+#include <asm/hw_irq.h>
+#include <asm/apic.h>
+#include <asm/hypertransport.h>
+
+/*
+ * Hypertransport interrupt support
+ */
+static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector)
+{
+ struct ht_irq_msg msg;
+
+ fetch_ht_irq_msg(irq, &msg);
+
+ msg.address_lo &= ~(HT_IRQ_LOW_VECTOR_MASK | HT_IRQ_LOW_DEST_ID_MASK);
+ msg.address_hi &= ~(HT_IRQ_HIGH_DEST_ID_MASK);
+
+ msg.address_lo |= HT_IRQ_LOW_VECTOR(vector) | HT_IRQ_LOW_DEST_ID(dest);
+ msg.address_hi |= HT_IRQ_HIGH_DEST_ID(dest);
+
+ write_ht_irq_msg(irq, &msg);
+}
+
+static int
+ht_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force)
+{
+ struct irq_cfg *cfg = irqd_cfg(data);
+ unsigned int dest;
+ int ret;
+
+ ret = apic_set_affinity(data, mask, &dest);
+ if (ret)
+ return ret;
+
+ target_ht_irq(data->irq, dest, cfg->vector);
+ return IRQ_SET_MASK_OK_NOCOPY;
+}
+
+static struct irq_chip ht_irq_chip = {
+ .name = "PCI-HT",
+ .irq_mask = mask_ht_irq,
+ .irq_unmask = unmask_ht_irq,
+ .irq_ack = apic_ack_edge,
+ .irq_set_affinity = ht_set_affinity,
+ .irq_retrigger = apic_retrigger_irq,
+ .flags = IRQCHIP_SKIP_SET_WAKE,
+};
+
+int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
+{
+ struct irq_cfg *cfg;
+ struct ht_irq_msg msg;
+ unsigned dest;
+ int err;
+
+ if (disable_apic)
+ return -ENXIO;
+
+ cfg = irq_cfg(irq);
+ err = assign_irq_vector(irq, cfg, apic->target_cpus());
+ if (err)
+ return err;
+
+ err = apic->cpu_mask_to_apicid_and(cfg->domain,
+ apic->target_cpus(), &dest);
+ if (err)
+ return err;
+
+ msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest);
+
+ msg.address_lo =
+ HT_IRQ_LOW_BASE |
+ HT_IRQ_LOW_DEST_ID(dest) |
+ HT_IRQ_LOW_VECTOR(cfg->vector) |
+ ((apic->irq_dest_mode == 0) ?
+ HT_IRQ_LOW_DM_PHYSICAL :
+ HT_IRQ_LOW_DM_LOGICAL) |
+ HT_IRQ_LOW_RQEOI_EDGE |
+ ((apic->irq_delivery_mode != dest_LowestPrio) ?
+ HT_IRQ_LOW_MT_FIXED :
+ HT_IRQ_LOW_MT_ARBITRATED) |
+ HT_IRQ_LOW_IRQ_MASKED;
+
+ write_ht_irq_msg(irq, &msg);
+
+ irq_set_chip_and_handler_name(irq, &ht_irq_chip,
+ handle_edge_irq, "edge");
+
+ dev_dbg(&dev->dev, "irq %d for HT\n", irq);
+
+ return 0;
+}
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 7ffe0a2b870..3f5f60406ab 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -32,15 +32,11 @@
#include <linux/module.h>
#include <linux/syscore_ops.h>
#include <linux/irqdomain.h>
-#include <linux/msi.h>
-#include <linux/htirq.h>
#include <linux/freezer.h>
#include <linux/kthread.h>
#include <linux/jiffies.h> /* time_after() */
#include <linux/slab.h>
#include <linux/bootmem.h>
-#include <linux/dmar.h>
-#include <linux/hpet.h>
#include <asm/idle.h>
#include <asm/io.h>
@@ -52,17 +48,12 @@
#include <asm/dma.h>
#include <asm/timer.h>
#include <asm/i8259.h>
-#include <asm/msidef.h>
-#include <asm/hypertransport.h>
#include <asm/setup.h>
#include <asm/irq_remapping.h>
-#include <asm/hpet.h>
#include <asm/hw_irq.h>
#include <asm/apic.h>
-#define __apicdebuginit(type) static type __init
-
#define for_each_ioapic(idx) \
for ((idx) = 0; (idx) < nr_ioapics; (idx)++)
#define for_each_ioapic_reverse(idx) \
@@ -74,7 +65,7 @@
for_each_pin((idx), (pin))
#define for_each_irq_pin(entry, head) \
- for (entry = head; entry; entry = entry->next)
+ list_for_each_entry(entry, &head, list)
/*
* Is the SiS APIC rmw bug present ?
@@ -83,7 +74,6 @@
int sis_apic_bug = -1;
static DEFINE_RAW_SPINLOCK(ioapic_lock);
-static DEFINE_RAW_SPINLOCK(vector_lock);
static DEFINE_MUTEX(ioapic_mutex);
static unsigned int ioapic_dynirq_base;
static int ioapic_initialized;
@@ -112,6 +102,7 @@ static struct ioapic {
struct ioapic_domain_cfg irqdomain_cfg;
struct irq_domain *irqdomain;
struct mp_pin_info *pin_info;
+ struct resource *iomem_res;
} ioapics[MAX_IO_APICS];
#define mpc_ioapic_ver(ioapic_idx) ioapics[ioapic_idx].mp_config.apicver
@@ -205,8 +196,6 @@ static int __init parse_noapic(char *str)
}
early_param("noapic", parse_noapic);
-static struct irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node);
-
/* Will be called in mpparse/acpi/sfi codes for saving IRQ info */
void mp_save_irq(struct mpc_intsrc *m)
{
@@ -228,8 +217,8 @@ void mp_save_irq(struct mpc_intsrc *m)
}
struct irq_pin_list {
+ struct list_head list;
int apic, pin;
- struct irq_pin_list *next;
};
static struct irq_pin_list *alloc_irq_pin_list(int node)
@@ -237,7 +226,26 @@ static struct irq_pin_list *alloc_irq_pin_list(int node)
return kzalloc_node(sizeof(struct irq_pin_list), GFP_KERNEL, node);
}
-int __init arch_early_irq_init(void)
+static void alloc_ioapic_saved_registers(int idx)
+{
+ size_t size;
+
+ if (ioapics[idx].saved_registers)
+ return;
+
+ size = sizeof(struct IO_APIC_route_entry) * ioapics[idx].nr_registers;
+ ioapics[idx].saved_registers = kzalloc(size, GFP_KERNEL);
+ if (!ioapics[idx].saved_registers)
+ pr_err("IOAPIC %d: suspend/resume impossible!\n", idx);
+}
+
+static void free_ioapic_saved_registers(int idx)
+{
+ kfree(ioapics[idx].saved_registers);
+ ioapics[idx].saved_registers = NULL;
+}
+
+int __init arch_early_ioapic_init(void)
{
struct irq_cfg *cfg;
int i, node = cpu_to_node(0);
@@ -245,13 +253,8 @@ int __init arch_early_irq_init(void)
if (!nr_legacy_irqs())
io_apic_irqs = ~0UL;
- for_each_ioapic(i) {
- ioapics[i].saved_registers =
- kzalloc(sizeof(struct IO_APIC_route_entry) *
- ioapics[i].nr_registers, GFP_KERNEL);
- if (!ioapics[i].saved_registers)
- pr_err("IOAPIC %d: suspend/resume impossible!\n", i);
- }
+ for_each_ioapic(i)
+ alloc_ioapic_saved_registers(i);
/*
* For legacy IRQ's, start with assigning irq0 to irq15 to
@@ -266,61 +269,6 @@ int __init arch_early_irq_init(void)
return 0;
}
-static inline struct irq_cfg *irq_cfg(unsigned int irq)
-{
- return irq_get_chip_data(irq);
-}
-
-static struct irq_cfg *alloc_irq_cfg(unsigned int irq, int node)
-{
- struct irq_cfg *cfg;
-
- cfg = kzalloc_node(sizeof(*cfg), GFP_KERNEL, node);
- if (!cfg)
- return NULL;
- if (!zalloc_cpumask_var_node(&cfg->domain, GFP_KERNEL, node))
- goto out_cfg;
- if (!zalloc_cpumask_var_node(&cfg->old_domain, GFP_KERNEL, node))
- goto out_domain;
- return cfg;
-out_domain:
- free_cpumask_var(cfg->domain);
-out_cfg:
- kfree(cfg);
- return NULL;
-}
-
-static void free_irq_cfg(unsigned int at, struct irq_cfg *cfg)
-{
- if (!cfg)
- return;
- irq_set_chip_data(at, NULL);
- free_cpumask_var(cfg->domain);
- free_cpumask_var(cfg->old_domain);
- kfree(cfg);
-}
-
-static struct irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node)
-{
- int res = irq_alloc_desc_at(at, node);
- struct irq_cfg *cfg;
-
- if (res < 0) {
- if (res != -EEXIST)
- return NULL;
- cfg = irq_cfg(at);
- if (cfg)
- return cfg;
- }
-
- cfg = alloc_irq_cfg(at, node);
- if (cfg)
- irq_set_chip_data(at, cfg);
- else
- irq_free_desc(at);
- return cfg;
-}
-
struct io_apic {
unsigned int index;
unsigned int unused[3];
@@ -445,15 +393,12 @@ static void ioapic_mask_entry(int apic, int pin)
*/
static int __add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin)
{
- struct irq_pin_list **last, *entry;
+ struct irq_pin_list *entry;
/* don't allow duplicates */
- last = &cfg->irq_2_pin;
- for_each_irq_pin(entry, cfg->irq_2_pin) {
+ for_each_irq_pin(entry, cfg->irq_2_pin)
if (entry->apic == apic && entry->pin == pin)
return 0;
- last = &entry->next;
- }
entry = alloc_irq_pin_list(node);
if (!entry) {
@@ -464,22 +409,19 @@ static int __add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pi
entry->apic = apic;
entry->pin = pin;
- *last = entry;
+ list_add_tail(&entry->list, &cfg->irq_2_pin);
return 0;
}
static void __remove_pin_from_irq(struct irq_cfg *cfg, int apic, int pin)
{
- struct irq_pin_list **last, *entry;
+ struct irq_pin_list *tmp, *entry;
- last = &cfg->irq_2_pin;
- for_each_irq_pin(entry, cfg->irq_2_pin)
+ list_for_each_entry_safe(entry, tmp, &cfg->irq_2_pin, list)
if (entry->apic == apic && entry->pin == pin) {
- *last = entry->next;
+ list_del(&entry->list);
kfree(entry);
return;
- } else {
- last = &entry->next;
}
}
@@ -559,7 +501,7 @@ static void mask_ioapic(struct irq_cfg *cfg)
static void mask_ioapic_irq(struct irq_data *data)
{
- mask_ioapic(data->chip_data);
+ mask_ioapic(irqd_cfg(data));
}
static void __unmask_ioapic(struct irq_cfg *cfg)
@@ -578,7 +520,7 @@ static void unmask_ioapic(struct irq_cfg *cfg)
static void unmask_ioapic_irq(struct irq_data *data)
{
- unmask_ioapic(data->chip_data);
+ unmask_ioapic(irqd_cfg(data));
}
/*
@@ -1164,8 +1106,7 @@ void mp_unmap_irq(int irq)
* Find a specific PCI IRQ entry.
* Not an __init, possibly needed by modules
*/
-int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
- struct io_apic_irq_attr *irq_attr)
+int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
{
int irq, i, best_ioapic = -1, best_idx = -1;
@@ -1219,195 +1160,11 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
return -1;
out:
- irq = pin_2_irq(best_idx, best_ioapic, mp_irqs[best_idx].dstirq,
- IOAPIC_MAP_ALLOC);
- if (irq > 0)
- set_io_apic_irq_attr(irq_attr, best_ioapic,
- mp_irqs[best_idx].dstirq,
- irq_trigger(best_idx),
- irq_polarity(best_idx));
- return irq;
+ return pin_2_irq(best_idx, best_ioapic, mp_irqs[best_idx].dstirq,
+ IOAPIC_MAP_ALLOC);
}
EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
-void lock_vector_lock(void)
-{
- /* Used to the online set of cpus does not change
- * during assign_irq_vector.
- */
- raw_spin_lock(&vector_lock);
-}
-
-void unlock_vector_lock(void)
-{
- raw_spin_unlock(&vector_lock);
-}
-
-static int
-__assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
-{
- /*
- * NOTE! The local APIC isn't very good at handling
- * multiple interrupts at the same interrupt level.
- * As the interrupt level is determined by taking the
- * vector number and shifting that right by 4, we
- * want to spread these out a bit so that they don't
- * all fall in the same interrupt level.
- *
- * Also, we've got to be careful not to trash gate
- * 0x80, because int 0x80 is hm, kind of importantish. ;)
- */
- static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START;
- static int current_offset = VECTOR_OFFSET_START % 16;
- int cpu, err;
- cpumask_var_t tmp_mask;
-
- if (cfg->move_in_progress)
- return -EBUSY;
-
- if (!alloc_cpumask_var(&tmp_mask, GFP_ATOMIC))
- return -ENOMEM;
-
- /* Only try and allocate irqs on cpus that are present */
- err = -ENOSPC;
- cpumask_clear(cfg->old_domain);
- cpu = cpumask_first_and(mask, cpu_online_mask);
- while (cpu < nr_cpu_ids) {
- int new_cpu, vector, offset;
-
- apic->vector_allocation_domain(cpu, tmp_mask, mask);
-
- if (cpumask_subset(tmp_mask, cfg->domain)) {
- err = 0;
- if (cpumask_equal(tmp_mask, cfg->domain))
- break;
- /*
- * New cpumask using the vector is a proper subset of
- * the current in use mask. So cleanup the vector
- * allocation for the members that are not used anymore.
- */
- cpumask_andnot(cfg->old_domain, cfg->domain, tmp_mask);
- cfg->move_in_progress =
- cpumask_intersects(cfg->old_domain, cpu_online_mask);
- cpumask_and(cfg->domain, cfg->domain, tmp_mask);
- break;
- }
-
- vector = current_vector;
- offset = current_offset;
-next:
- vector += 16;
- if (vector >= first_system_vector) {
- offset = (offset + 1) % 16;
- vector = FIRST_EXTERNAL_VECTOR + offset;
- }
-
- if (unlikely(current_vector == vector)) {
- cpumask_or(cfg->old_domain, cfg->old_domain, tmp_mask);
- cpumask_andnot(tmp_mask, mask, cfg->old_domain);
- cpu = cpumask_first_and(tmp_mask, cpu_online_mask);
- continue;
- }
-
- if (test_bit(vector, used_vectors))
- goto next;
-
- for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) {
- if (per_cpu(vector_irq, new_cpu)[vector] > VECTOR_UNDEFINED)
- goto next;
- }
- /* Found one! */
- current_vector = vector;
- current_offset = offset;
- if (cfg->vector) {
- cpumask_copy(cfg->old_domain, cfg->domain);
- cfg->move_in_progress =
- cpumask_intersects(cfg->old_domain, cpu_online_mask);
- }
- for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask)
- per_cpu(vector_irq, new_cpu)[vector] = irq;
- cfg->vector = vector;
- cpumask_copy(cfg->domain, tmp_mask);
- err = 0;
- break;
- }
- free_cpumask_var(tmp_mask);
- return err;
-}
-
-int assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
-{
- int err;
- unsigned long flags;
-
- raw_spin_lock_irqsave(&vector_lock, flags);
- err = __assign_irq_vector(irq, cfg, mask);
- raw_spin_unlock_irqrestore(&vector_lock, flags);
- return err;
-}
-
-static void __clear_irq_vector(int irq, struct irq_cfg *cfg)
-{
- int cpu, vector;
-
- BUG_ON(!cfg->vector);
-
- vector = cfg->vector;
- for_each_cpu_and(cpu, cfg->domain, cpu_online_mask)
- per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED;
-
- cfg->vector = 0;
- cpumask_clear(cfg->domain);
-
- if (likely(!cfg->move_in_progress))
- return;
- for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) {
- for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
- if (per_cpu(vector_irq, cpu)[vector] != irq)
- continue;
- per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED;
- break;
- }
- }
- cfg->move_in_progress = 0;
-}
-
-void __setup_vector_irq(int cpu)
-{
- /* Initialize vector_irq on a new cpu */
- int irq, vector;
- struct irq_cfg *cfg;
-
- /*
- * vector_lock will make sure that we don't run into irq vector
- * assignments that might be happening on another cpu in parallel,
- * while we setup our initial vector to irq mappings.
- */
- raw_spin_lock(&vector_lock);
- /* Mark the inuse vectors */
- for_each_active_irq(irq) {
- cfg = irq_cfg(irq);
- if (!cfg)
- continue;
-
- if (!cpumask_test_cpu(cpu, cfg->domain))
- continue;
- vector = cfg->vector;
- per_cpu(vector_irq, cpu)[vector] = irq;
- }
- /* Mark the free vectors */
- for (vector = 0; vector < NR_VECTORS; ++vector) {
- irq = per_cpu(vector_irq, cpu)[vector];
- if (irq <= VECTOR_UNDEFINED)
- continue;
-
- cfg = irq_cfg(irq);
- if (!cpumask_test_cpu(cpu, cfg->domain))
- per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED;
- }
- raw_spin_unlock(&vector_lock);
-}
-
static struct irq_chip ioapic_chip;
#ifdef CONFIG_X86_32
@@ -1496,7 +1253,7 @@ static void setup_ioapic_irq(unsigned int irq, struct irq_cfg *cfg,
&dest)) {
pr_warn("Failed to obtain apicid for ioapic %d, pin %d\n",
mpc_ioapic_id(attr->ioapic), attr->ioapic_pin);
- __clear_irq_vector(irq, cfg);
+ clear_irq_vector(irq, cfg);
return;
}
@@ -1510,7 +1267,7 @@ static void setup_ioapic_irq(unsigned int irq, struct irq_cfg *cfg,
if (x86_io_apic_ops.setup_entry(irq, &entry, dest, cfg->vector, attr)) {
pr_warn("Failed to setup ioapic entry for ioapic %d, pin %d\n",
mpc_ioapic_id(attr->ioapic), attr->ioapic_pin);
- __clear_irq_vector(irq, cfg);
+ clear_irq_vector(irq, cfg);
return;
}
@@ -1641,7 +1398,7 @@ void ioapic_zap_locks(void)
raw_spin_lock_init(&ioapic_lock);
}
-__apicdebuginit(void) print_IO_APIC(int ioapic_idx)
+static void __init print_IO_APIC(int ioapic_idx)
{
union IO_APIC_reg_00 reg_00;
union IO_APIC_reg_01 reg_01;
@@ -1698,7 +1455,7 @@ __apicdebuginit(void) print_IO_APIC(int ioapic_idx)
x86_io_apic_ops.print_entries(ioapic_idx, reg_01.bits.entries);
}
-__apicdebuginit(void) print_IO_APICs(void)
+void __init print_IO_APICs(void)
{
int ioapic_idx;
struct irq_cfg *cfg;
@@ -1731,8 +1488,7 @@ __apicdebuginit(void) print_IO_APICs(void)
cfg = irq_cfg(irq);
if (!cfg)
continue;
- entry = cfg->irq_2_pin;
- if (!entry)
+ if (list_empty(&cfg->irq_2_pin))
continue;
printk(KERN_DEBUG "IRQ%d ", irq);
for_each_irq_pin(entry, cfg->irq_2_pin)
@@ -1743,205 +1499,6 @@ __apicdebuginit(void) print_IO_APICs(void)
printk(KERN_INFO ".................................... done.\n");
}
-__apicdebuginit(void) print_APIC_field(int base)
-{
- int i;
-
- printk(KERN_DEBUG);
-
- for (i = 0; i < 8; i++)
- pr_cont("%08x", apic_read(base + i*0x10));
-
- pr_cont("\n");
-}
-
-__apicdebuginit(void) print_local_APIC(void *dummy)
-{
- unsigned int i, v, ver, maxlvt;
- u64 icr;
-
- printk(KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n",
- smp_processor_id(), hard_smp_processor_id());
- v = apic_read(APIC_ID);
- printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, read_apic_id());
- v = apic_read(APIC_LVR);
- printk(KERN_INFO "... APIC VERSION: %08x\n", v);
- ver = GET_APIC_VERSION(v);
- maxlvt = lapic_get_maxlvt();
-
- v = apic_read(APIC_TASKPRI);
- printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
-
- if (APIC_INTEGRATED(ver)) { /* !82489DX */
- if (!APIC_XAPIC(ver)) {
- v = apic_read(APIC_ARBPRI);
- printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v,
- v & APIC_ARBPRI_MASK);
- }
- v = apic_read(APIC_PROCPRI);
- printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v);
- }
-
- /*
- * Remote read supported only in the 82489DX and local APIC for
- * Pentium processors.
- */
- if (!APIC_INTEGRATED(ver) || maxlvt == 3) {
- v = apic_read(APIC_RRR);
- printk(KERN_DEBUG "... APIC RRR: %08x\n", v);
- }
-
- v = apic_read(APIC_LDR);
- printk(KERN_DEBUG "... APIC LDR: %08x\n", v);
- if (!x2apic_enabled()) {
- v = apic_read(APIC_DFR);
- printk(KERN_DEBUG "... APIC DFR: %08x\n", v);
- }
- v = apic_read(APIC_SPIV);
- printk(KERN_DEBUG "... APIC SPIV: %08x\n", v);
-
- printk(KERN_DEBUG "... APIC ISR field:\n");
- print_APIC_field(APIC_ISR);
- printk(KERN_DEBUG "... APIC TMR field:\n");
- print_APIC_field(APIC_TMR);
- printk(KERN_DEBUG "... APIC IRR field:\n");
- print_APIC_field(APIC_IRR);
-
- if (APIC_INTEGRATED(ver)) { /* !82489DX */
- if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
- apic_write(APIC_ESR, 0);
-
- v = apic_read(APIC_ESR);
- printk(KERN_DEBUG "... APIC ESR: %08x\n", v);
- }
-
- icr = apic_icr_read();
- printk(KERN_DEBUG "... APIC ICR: %08x\n", (u32)icr);
- printk(KERN_DEBUG "... APIC ICR2: %08x\n", (u32)(icr >> 32));
-
- v = apic_read(APIC_LVTT);
- printk(KERN_DEBUG "... APIC LVTT: %08x\n", v);
-
- if (maxlvt > 3) { /* PC is LVT#4. */
- v = apic_read(APIC_LVTPC);
- printk(KERN_DEBUG "... APIC LVTPC: %08x\n", v);
- }
- v = apic_read(APIC_LVT0);
- printk(KERN_DEBUG "... APIC LVT0: %08x\n", v);
- v = apic_read(APIC_LVT1);
- printk(KERN_DEBUG "... APIC LVT1: %08x\n", v);
-
- if (maxlvt > 2) { /* ERR is LVT#3. */
- v = apic_read(APIC_LVTERR);
- printk(KERN_DEBUG "... APIC LVTERR: %08x\n", v);
- }
-
- v = apic_read(APIC_TMICT);
- printk(KERN_DEBUG "... APIC TMICT: %08x\n", v);
- v = apic_read(APIC_TMCCT);
- printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v);
- v = apic_read(APIC_TDCR);
- printk(KERN_DEBUG "... APIC TDCR: %08x\n", v);
-
- if (boot_cpu_has(X86_FEATURE_EXTAPIC)) {
- v = apic_read(APIC_EFEAT);
- maxlvt = (v >> 16) & 0xff;
- printk(KERN_DEBUG "... APIC EFEAT: %08x\n", v);
- v = apic_read(APIC_ECTRL);
- printk(KERN_DEBUG "... APIC ECTRL: %08x\n", v);
- for (i = 0; i < maxlvt; i++) {
- v = apic_read(APIC_EILVTn(i));
- printk(KERN_DEBUG "... APIC EILVT%d: %08x\n", i, v);
- }
- }
- pr_cont("\n");
-}
-
-__apicdebuginit(void) print_local_APICs(int maxcpu)
-{
- int cpu;
-
- if (!maxcpu)
- return;
-
- preempt_disable();
- for_each_online_cpu(cpu) {
- if (cpu >= maxcpu)
- break;
- smp_call_function_single(cpu, print_local_APIC, NULL, 1);
- }
- preempt_enable();
-}
-
-__apicdebuginit(void) print_PIC(void)
-{
- unsigned int v;
- unsigned long flags;
-
- if (!nr_legacy_irqs())
- return;
-
- printk(KERN_DEBUG "\nprinting PIC contents\n");
-
- raw_spin_lock_irqsave(&i8259A_lock, flags);
-
- v = inb(0xa1) << 8 | inb(0x21);
- printk(KERN_DEBUG "... PIC IMR: %04x\n", v);
-
- v = inb(0xa0) << 8 | inb(0x20);
- printk(KERN_DEBUG "... PIC IRR: %04x\n", v);
-
- outb(0x0b,0xa0);
- outb(0x0b,0x20);
- v = inb(0xa0) << 8 | inb(0x20);
- outb(0x0a,0xa0);
- outb(0x0a,0x20);
-
- raw_spin_unlock_irqrestore(&i8259A_lock, flags);
-
- printk(KERN_DEBUG "... PIC ISR: %04x\n", v);
-
- v = inb(0x4d1) << 8 | inb(0x4d0);
- printk(KERN_DEBUG "... PIC ELCR: %04x\n", v);
-}
-
-static int __initdata show_lapic = 1;
-static __init int setup_show_lapic(char *arg)
-{
- int num = -1;
-
- if (strcmp(arg, "all") == 0) {
- show_lapic = CONFIG_NR_CPUS;
- } else {
- get_option(&arg, &num);
- if (num >= 0)
- show_lapic = num;
- }
-
- return 1;
-}
-__setup("show_lapic=", setup_show_lapic);
-
-__apicdebuginit(int) print_ICs(void)
-{
- if (apic_verbosity == APIC_QUIET)
- return 0;
-
- print_PIC();
-
- /* don't print out if apic is not there */
- if (!cpu_has_apic && !apic_from_smp_config())
- return 0;
-
- print_local_APICs(show_lapic);
- print_IO_APICs();
-
- return 0;
-}
-
-late_initcall(print_ICs);
-
-
/* Where if anywhere is the i8259 connect in external int mode */
static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
@@ -2244,26 +1801,12 @@ static unsigned int startup_ioapic_irq(struct irq_data *data)
if (legacy_pic->irq_pending(irq))
was_pending = 1;
}
- __unmask_ioapic(data->chip_data);
+ __unmask_ioapic(irqd_cfg(data));
raw_spin_unlock_irqrestore(&ioapic_lock, flags);
return was_pending;
}
-static int ioapic_retrigger_irq(struct irq_data *data)
-{
- struct irq_cfg *cfg = data->chip_data;
- unsigned long flags;
- int cpu;
-
- raw_spin_lock_irqsave(&vector_lock, flags);
- cpu = cpumask_first_and(cfg->domain, cpu_online_mask);
- apic->send_IPI_mask(cpumask_of(cpu), cfg->vector);
- raw_spin_unlock_irqrestore(&vector_lock, flags);
-
- return 1;
-}
-
/*
* Level and edge triggered IO-APIC interrupts need different handling,
* so we use two separate IRQ descriptors. Edge triggered IRQs can be
@@ -2273,113 +1816,6 @@ static int ioapic_retrigger_irq(struct irq_data *data)
* races.
*/
-#ifdef CONFIG_SMP
-void send_cleanup_vector(struct irq_cfg *cfg)
-{
- cpumask_var_t cleanup_mask;
-
- if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) {
- unsigned int i;
- for_each_cpu_and(i, cfg->old_domain, cpu_online_mask)
- apic->send_IPI_mask(cpumask_of(i), IRQ_MOVE_CLEANUP_VECTOR);
- } else {
- cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask);
- apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
- free_cpumask_var(cleanup_mask);
- }
- cfg->move_in_progress = 0;
-}
-
-asmlinkage __visible void smp_irq_move_cleanup_interrupt(void)
-{
- unsigned vector, me;
-
- ack_APIC_irq();
- irq_enter();
- exit_idle();
-
- me = smp_processor_id();
- for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
- int irq;
- unsigned int irr;
- struct irq_desc *desc;
- struct irq_cfg *cfg;
- irq = __this_cpu_read(vector_irq[vector]);
-
- if (irq <= VECTOR_UNDEFINED)
- continue;
-
- desc = irq_to_desc(irq);
- if (!desc)
- continue;
-
- cfg = irq_cfg(irq);
- if (!cfg)
- continue;
-
- raw_spin_lock(&desc->lock);
-
- /*
- * Check if the irq migration is in progress. If so, we
- * haven't received the cleanup request yet for this irq.
- */
- if (cfg->move_in_progress)
- goto unlock;
-
- if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
- goto unlock;
-
- irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
- /*
- * Check if the vector that needs to be cleanedup is
- * registered at the cpu's IRR. If so, then this is not
- * the best time to clean it up. Lets clean it up in the
- * next attempt by sending another IRQ_MOVE_CLEANUP_VECTOR
- * to myself.
- */
- if (irr & (1 << (vector % 32))) {
- apic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR);
- goto unlock;
- }
- __this_cpu_write(vector_irq[vector], VECTOR_UNDEFINED);
-unlock:
- raw_spin_unlock(&desc->lock);
- }
-
- irq_exit();
-}
-
-static void __irq_complete_move(struct irq_cfg *cfg, unsigned vector)
-{
- unsigned me;
-
- if (likely(!cfg->move_in_progress))
- return;
-
- me = smp_processor_id();
-
- if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
- send_cleanup_vector(cfg);
-}
-
-static void irq_complete_move(struct irq_cfg *cfg)
-{
- __irq_complete_move(cfg, ~get_irq_regs()->orig_ax);
-}
-
-void irq_force_complete_move(int irq)
-{
- struct irq_cfg *cfg = irq_cfg(irq);
-
- if (!cfg)
- return;
-
- __irq_complete_move(cfg, cfg->vector);
-}
-#else
-static inline void irq_complete_move(struct irq_cfg *cfg) { }
-#endif
-
static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg)
{
int apic, pin;
@@ -2400,41 +1836,6 @@ static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq
}
}
-/*
- * Either sets data->affinity to a valid value, and returns
- * ->cpu_mask_to_apicid of that in dest_id, or returns -1 and
- * leaves data->affinity untouched.
- */
-int __ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
- unsigned int *dest_id)
-{
- struct irq_cfg *cfg = data->chip_data;
- unsigned int irq = data->irq;
- int err;
-
- if (!config_enabled(CONFIG_SMP))
- return -EPERM;
-
- if (!cpumask_intersects(mask, cpu_online_mask))
- return -EINVAL;
-
- err = assign_irq_vector(irq, cfg, mask);
- if (err)
- return err;
-
- err = apic->cpu_mask_to_apicid_and(mask, cfg->domain, dest_id);
- if (err) {
- if (assign_irq_vector(irq, cfg, data->affinity))
- pr_err("Failed to recover vector for irq %d\n", irq);
- return err;
- }
-
- cpumask_copy(data->affinity, mask);
-
- return 0;
-}
-
-
int native_ioapic_set_affinity(struct irq_data *data,
const struct cpumask *mask,
bool force)
@@ -2447,24 +1848,17 @@ int native_ioapic_set_affinity(struct irq_data *data,
return -EPERM;
raw_spin_lock_irqsave(&ioapic_lock, flags);
- ret = __ioapic_set_affinity(data, mask, &dest);
+ ret = apic_set_affinity(data, mask, &dest);
if (!ret) {
/* Only the high 8 bits are valid. */
dest = SET_APIC_LOGICAL_ID(dest);
- __target_IO_APIC_irq(irq, dest, data->chip_data);
+ __target_IO_APIC_irq(irq, dest, irqd_cfg(data));
ret = IRQ_SET_MASK_OK_NOCOPY;
}
raw_spin_unlock_irqrestore(&ioapic_lock, flags);
return ret;
}
-static void ack_apic_edge(struct irq_data *data)
-{
- irq_complete_move(data->chip_data);
- irq_move_irq(data);
- ack_APIC_irq();
-}
-
atomic_t irq_mis_count;
#ifdef CONFIG_GENERIC_PENDING_IRQ
@@ -2547,9 +1941,9 @@ static inline void ioapic_irqd_unmask(struct irq_data *data,
}
#endif
-static void ack_apic_level(struct irq_data *data)
+static void ack_ioapic_level(struct irq_data *data)
{
- struct irq_cfg *cfg = data->chip_data;
+ struct irq_cfg *cfg = irqd_cfg(data);
int i, irq = data->irq;
unsigned long v;
bool masked;
@@ -2619,10 +2013,10 @@ static struct irq_chip ioapic_chip __read_mostly = {
.irq_startup = startup_ioapic_irq,
.irq_mask = mask_ioapic_irq,
.irq_unmask = unmask_ioapic_irq,
- .irq_ack = ack_apic_edge,
- .irq_eoi = ack_apic_level,
+ .irq_ack = apic_ack_edge,
+ .irq_eoi = ack_ioapic_level,
.irq_set_affinity = native_ioapic_set_affinity,
- .irq_retrigger = ioapic_retrigger_irq,
+ .irq_retrigger = apic_retrigger_irq,
.flags = IRQCHIP_SKIP_SET_WAKE,
};
@@ -2965,6 +2359,16 @@ static int mp_irqdomain_create(int ioapic)
return 0;
}
+static void ioapic_destroy_irqdomain(int idx)
+{
+ if (ioapics[idx].irqdomain) {
+ irq_domain_remove(ioapics[idx].irqdomain);
+ ioapics[idx].irqdomain = NULL;
+ }
+ kfree(ioapics[idx].pin_info);
+ ioapics[idx].pin_info = NULL;
+}
+
void __init setup_IO_APIC(void)
{
int ioapic;
@@ -3044,399 +2448,6 @@ static int __init ioapic_init_ops(void)
device_initcall(ioapic_init_ops);
-/*
- * Dynamic irq allocate and deallocation. Should be replaced by irq domains!
- */
-int arch_setup_hwirq(unsigned int irq, int node)
-{
- struct irq_cfg *cfg;
- unsigned long flags;
- int ret;
-
- cfg = alloc_irq_cfg(irq, node);
- if (!cfg)
- return -ENOMEM;
-
- raw_spin_lock_irqsave(&vector_lock, flags);
- ret = __assign_irq_vector(irq, cfg, apic->target_cpus());
- raw_spin_unlock_irqrestore(&vector_lock, flags);
-
- if (!ret)
- irq_set_chip_data(irq, cfg);
- else
- free_irq_cfg(irq, cfg);
- return ret;
-}
-
-void arch_teardown_hwirq(unsigned int irq)
-{
- struct irq_cfg *cfg = irq_cfg(irq);
- unsigned long flags;
-
- free_remapped_irq(irq);
- raw_spin_lock_irqsave(&vector_lock, flags);
- __clear_irq_vector(irq, cfg);
- raw_spin_unlock_irqrestore(&vector_lock, flags);
- free_irq_cfg(irq, cfg);
-}
-
-/*
- * MSI message composition
- */
-void native_compose_msi_msg(struct pci_dev *pdev,
- unsigned int irq, unsigned int dest,
- struct msi_msg *msg, u8 hpet_id)
-{
- struct irq_cfg *cfg = irq_cfg(irq);
-
- msg->address_hi = MSI_ADDR_BASE_HI;
-
- if (x2apic_enabled())
- msg->address_hi |= MSI_ADDR_EXT_DEST_ID(dest);
-
- msg->address_lo =
- MSI_ADDR_BASE_LO |
- ((apic->irq_dest_mode == 0) ?
- MSI_ADDR_DEST_MODE_PHYSICAL:
- MSI_ADDR_DEST_MODE_LOGICAL) |
- ((apic->irq_delivery_mode != dest_LowestPrio) ?
- MSI_ADDR_REDIRECTION_CPU:
- MSI_ADDR_REDIRECTION_LOWPRI) |
- MSI_ADDR_DEST_ID(dest);
-
- msg->data =
- MSI_DATA_TRIGGER_EDGE |
- MSI_DATA_LEVEL_ASSERT |
- ((apic->irq_delivery_mode != dest_LowestPrio) ?
- MSI_DATA_DELIVERY_FIXED:
- MSI_DATA_DELIVERY_LOWPRI) |
- MSI_DATA_VECTOR(cfg->vector);
-}
-
-#ifdef CONFIG_PCI_MSI
-static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq,
- struct msi_msg *msg, u8 hpet_id)
-{
- struct irq_cfg *cfg;
- int err;
- unsigned dest;
-
- if (disable_apic)
- return -ENXIO;
-
- cfg = irq_cfg(irq);
- err = assign_irq_vector(irq, cfg, apic->target_cpus());
- if (err)
- return err;
-
- err = apic->cpu_mask_to_apicid_and(cfg->domain,
- apic->target_cpus(), &dest);
- if (err)
- return err;
-
- x86_msi.compose_msi_msg(pdev, irq, dest, msg, hpet_id);
-
- return 0;
-}
-
-static int
-msi_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force)
-{
- struct irq_cfg *cfg = data->chip_data;
- struct msi_msg msg;
- unsigned int dest;
- int ret;
-
- ret = __ioapic_set_affinity(data, mask, &dest);
- if (ret)
- return ret;
-
- __get_cached_msi_msg(data->msi_desc, &msg);
-
- msg.data &= ~MSI_DATA_VECTOR_MASK;
- msg.data |= MSI_DATA_VECTOR(cfg->vector);
- msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
- msg.address_lo |= MSI_ADDR_DEST_ID(dest);
-
- __pci_write_msi_msg(data->msi_desc, &msg);
-
- return IRQ_SET_MASK_OK_NOCOPY;
-}
-
-/*
- * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices,
- * which implement the MSI or MSI-X Capability Structure.
- */
-static struct irq_chip msi_chip = {
- .name = "PCI-MSI",
- .irq_unmask = pci_msi_unmask_irq,
- .irq_mask = pci_msi_mask_irq,
- .irq_ack = ack_apic_edge,
- .irq_set_affinity = msi_set_affinity,
- .irq_retrigger = ioapic_retrigger_irq,
- .flags = IRQCHIP_SKIP_SET_WAKE,
-};
-
-int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc,
- unsigned int irq_base, unsigned int irq_offset)
-{
- struct irq_chip *chip = &msi_chip;
- struct msi_msg msg;
- unsigned int irq = irq_base + irq_offset;
- int ret;
-
- ret = msi_compose_msg(dev, irq, &msg, -1);
- if (ret < 0)
- return ret;
-
- irq_set_msi_desc_off(irq_base, irq_offset, msidesc);
-
- /*
- * MSI-X message is written per-IRQ, the offset is always 0.
- * MSI message denotes a contiguous group of IRQs, written for 0th IRQ.
- */
- if (!irq_offset)
- pci_write_msi_msg(irq, &msg);
-
- setup_remapped_irq(irq, irq_cfg(irq), chip);
-
- irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge");
-
- dev_printk(KERN_DEBUG, &dev->dev, "irq %d for MSI/MSI-X\n", irq);
-
- return 0;
-}
-
-int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
-{
- struct msi_desc *msidesc;
- unsigned int irq;
- int node, ret;
-
- /* Multiple MSI vectors only supported with interrupt remapping */
- if (type == PCI_CAP_ID_MSI && nvec > 1)
- return 1;
-
- node = dev_to_node(&dev->dev);
-
- list_for_each_entry(msidesc, &dev->msi_list, list) {
- irq = irq_alloc_hwirq(node);
- if (!irq)
- return -ENOSPC;
-
- ret = setup_msi_irq(dev, msidesc, irq, 0);
- if (ret < 0) {
- irq_free_hwirq(irq);
- return ret;
- }
-
- }
- return 0;
-}
-
-void native_teardown_msi_irq(unsigned int irq)
-{
- irq_free_hwirq(irq);
-}
-
-#ifdef CONFIG_DMAR_TABLE
-static int
-dmar_msi_set_affinity(struct irq_data *data, const struct cpumask *mask,
- bool force)
-{
- struct irq_cfg *cfg = data->chip_data;
- unsigned int dest, irq = data->irq;
- struct msi_msg msg;
- int ret;
-
- ret = __ioapic_set_affinity(data, mask, &dest);
- if (ret)
- return ret;
-
- dmar_msi_read(irq, &msg);
-
- msg.data &= ~MSI_DATA_VECTOR_MASK;
- msg.data |= MSI_DATA_VECTOR(cfg->vector);
- msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
- msg.address_lo |= MSI_ADDR_DEST_ID(dest);
- msg.address_hi = MSI_ADDR_BASE_HI | MSI_ADDR_EXT_DEST_ID(dest);
-
- dmar_msi_write(irq, &msg);
-
- return IRQ_SET_MASK_OK_NOCOPY;
-}
-
-static struct irq_chip dmar_msi_type = {
- .name = "DMAR_MSI",
- .irq_unmask = dmar_msi_unmask,
- .irq_mask = dmar_msi_mask,
- .irq_ack = ack_apic_edge,
- .irq_set_affinity = dmar_msi_set_affinity,
- .irq_retrigger = ioapic_retrigger_irq,
- .flags = IRQCHIP_SKIP_SET_WAKE,
-};
-
-int arch_setup_dmar_msi(unsigned int irq)
-{
- int ret;
- struct msi_msg msg;
-
- ret = msi_compose_msg(NULL, irq, &msg, -1);
- if (ret < 0)
- return ret;
- dmar_msi_write(irq, &msg);
- irq_set_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq,
- "edge");
- return 0;
-}
-#endif
-
-#ifdef CONFIG_HPET_TIMER
-
-static int hpet_msi_set_affinity(struct irq_data *data,
- const struct cpumask *mask, bool force)
-{
- struct irq_cfg *cfg = data->chip_data;
- struct msi_msg msg;
- unsigned int dest;
- int ret;
-
- ret = __ioapic_set_affinity(data, mask, &dest);
- if (ret)
- return ret;
-
- hpet_msi_read(data->handler_data, &msg);
-
- msg.data &= ~MSI_DATA_VECTOR_MASK;
- msg.data |= MSI_DATA_VECTOR(cfg->vector);
- msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
- msg.address_lo |= MSI_ADDR_DEST_ID(dest);
-
- hpet_msi_write(data->handler_data, &msg);
-
- return IRQ_SET_MASK_OK_NOCOPY;
-}
-
-static struct irq_chip hpet_msi_type = {
- .name = "HPET_MSI",
- .irq_unmask = hpet_msi_unmask,
- .irq_mask = hpet_msi_mask,
- .irq_ack = ack_apic_edge,
- .irq_set_affinity = hpet_msi_set_affinity,
- .irq_retrigger = ioapic_retrigger_irq,
- .flags = IRQCHIP_SKIP_SET_WAKE,
-};
-
-int default_setup_hpet_msi(unsigned int irq, unsigned int id)
-{
- struct irq_chip *chip = &hpet_msi_type;
- struct msi_msg msg;
- int ret;
-
- ret = msi_compose_msg(NULL, irq, &msg, id);
- if (ret < 0)
- return ret;
-
- hpet_msi_write(irq_get_handler_data(irq), &msg);
- irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
- setup_remapped_irq(irq, irq_cfg(irq), chip);
-
- irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge");
- return 0;
-}
-#endif
-
-#endif /* CONFIG_PCI_MSI */
-/*
- * Hypertransport interrupt support
- */
-#ifdef CONFIG_HT_IRQ
-
-static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector)
-{
- struct ht_irq_msg msg;
- fetch_ht_irq_msg(irq, &msg);
-
- msg.address_lo &= ~(HT_IRQ_LOW_VECTOR_MASK | HT_IRQ_LOW_DEST_ID_MASK);
- msg.address_hi &= ~(HT_IRQ_HIGH_DEST_ID_MASK);
-
- msg.address_lo |= HT_IRQ_LOW_VECTOR(vector) | HT_IRQ_LOW_DEST_ID(dest);
- msg.address_hi |= HT_IRQ_HIGH_DEST_ID(dest);
-
- write_ht_irq_msg(irq, &msg);
-}
-
-static int
-ht_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force)
-{
- struct irq_cfg *cfg = data->chip_data;
- unsigned int dest;
- int ret;
-
- ret = __ioapic_set_affinity(data, mask, &dest);
- if (ret)
- return ret;
-
- target_ht_irq(data->irq, dest, cfg->vector);
- return IRQ_SET_MASK_OK_NOCOPY;
-}
-
-static struct irq_chip ht_irq_chip = {
- .name = "PCI-HT",
- .irq_mask = mask_ht_irq,
- .irq_unmask = unmask_ht_irq,
- .irq_ack = ack_apic_edge,
- .irq_set_affinity = ht_set_affinity,
- .irq_retrigger = ioapic_retrigger_irq,
- .flags = IRQCHIP_SKIP_SET_WAKE,
-};
-
-int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
-{
- struct irq_cfg *cfg;
- struct ht_irq_msg msg;
- unsigned dest;
- int err;
-
- if (disable_apic)
- return -ENXIO;
-
- cfg = irq_cfg(irq);
- err = assign_irq_vector(irq, cfg, apic->target_cpus());
- if (err)
- return err;
-
- err = apic->cpu_mask_to_apicid_and(cfg->domain,
- apic->target_cpus(), &dest);
- if (err)
- return err;
-
- msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest);
-
- msg.address_lo =
- HT_IRQ_LOW_BASE |
- HT_IRQ_LOW_DEST_ID(dest) |
- HT_IRQ_LOW_VECTOR(cfg->vector) |
- ((apic->irq_dest_mode == 0) ?
- HT_IRQ_LOW_DM_PHYSICAL :
- HT_IRQ_LOW_DM_LOGICAL) |
- HT_IRQ_LOW_RQEOI_EDGE |
- ((apic->irq_delivery_mode != dest_LowestPrio) ?
- HT_IRQ_LOW_MT_FIXED :
- HT_IRQ_LOW_MT_ARBITRATED) |
- HT_IRQ_LOW_IRQ_MASKED;
-
- write_ht_irq_msg(irq, &msg);
-
- irq_set_chip_and_handler_name(irq, &ht_irq_chip,
- handle_edge_irq, "edge");
-
- dev_printk(KERN_DEBUG, &dev->dev, "irq %d for HT\n", irq);
-
- return 0;
-}
-#endif /* CONFIG_HT_IRQ */
-
static int
io_apic_setup_irq_pin(unsigned int irq, int node, struct io_apic_irq_attr *attr)
{
@@ -3451,7 +2462,7 @@ io_apic_setup_irq_pin(unsigned int irq, int node, struct io_apic_irq_attr *attr)
return ret;
}
-static int __init io_apic_get_redir_entries(int ioapic)
+static int io_apic_get_redir_entries(int ioapic)
{
union IO_APIC_reg_01 reg_01;
unsigned long flags;
@@ -3476,28 +2487,8 @@ unsigned int arch_dynirq_lower_bound(unsigned int from)
return ioapic_initialized ? ioapic_dynirq_base : gsi_top;
}
-int __init arch_probe_nr_irqs(void)
-{
- int nr;
-
- if (nr_irqs > (NR_VECTORS * nr_cpu_ids))
- nr_irqs = NR_VECTORS * nr_cpu_ids;
-
- nr = (gsi_top + nr_legacy_irqs()) + 8 * nr_cpu_ids;
-#if defined(CONFIG_PCI_MSI) || defined(CONFIG_HT_IRQ)
- /*
- * for MSI and HT dyn irq
- */
- nr += gsi_top * 16;
-#endif
- if (nr < nr_irqs)
- nr_irqs = nr;
-
- return 0;
-}
-
#ifdef CONFIG_X86_32
-static int __init io_apic_get_unique_id(int ioapic, int apic_id)
+static int io_apic_get_unique_id(int ioapic, int apic_id)
{
union IO_APIC_reg_00 reg_00;
static physid_mask_t apic_id_map = PHYSID_MASK_NONE;
@@ -3572,30 +2563,63 @@ static int __init io_apic_get_unique_id(int ioapic, int apic_id)
return apic_id;
}
-static u8 __init io_apic_unique_id(u8 id)
+static u8 io_apic_unique_id(int idx, u8 id)
{
if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
!APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
- return io_apic_get_unique_id(nr_ioapics, id);
+ return io_apic_get_unique_id(idx, id);
else
return id;
}
#else
-static u8 __init io_apic_unique_id(u8 id)
+static u8 io_apic_unique_id(int idx, u8 id)
{
- int i;
+ union IO_APIC_reg_00 reg_00;
DECLARE_BITMAP(used, 256);
+ unsigned long flags;
+ u8 new_id;
+ int i;
bitmap_zero(used, 256);
for_each_ioapic(i)
__set_bit(mpc_ioapic_id(i), used);
+
+ /* Hand out the requested id if available */
if (!test_bit(id, used))
return id;
- return find_first_zero_bit(used, 256);
+
+ /*
+ * Read the current id from the ioapic and keep it if
+ * available.
+ */
+ raw_spin_lock_irqsave(&ioapic_lock, flags);
+ reg_00.raw = io_apic_read(idx, 0);
+ raw_spin_unlock_irqrestore(&ioapic_lock, flags);
+ new_id = reg_00.bits.ID;
+ if (!test_bit(new_id, used)) {
+ apic_printk(APIC_VERBOSE, KERN_INFO
+ "IOAPIC[%d]: Using reg apic_id %d instead of %d\n",
+ idx, new_id, id);
+ return new_id;
+ }
+
+ /*
+ * Get the next free id and write it to the ioapic.
+ */
+ new_id = find_first_zero_bit(used, 256);
+ reg_00.bits.ID = new_id;
+ raw_spin_lock_irqsave(&ioapic_lock, flags);
+ io_apic_write(idx, 0, reg_00.raw);
+ reg_00.raw = io_apic_read(idx, 0);
+ raw_spin_unlock_irqrestore(&ioapic_lock, flags);
+ /* Sanity check */
+ BUG_ON(reg_00.bits.ID != new_id);
+
+ return new_id;
}
#endif
-static int __init io_apic_get_version(int ioapic)
+static int io_apic_get_version(int ioapic)
{
union IO_APIC_reg_01 reg_01;
unsigned long flags;
@@ -3702,6 +2726,7 @@ static struct resource * __init ioapic_setup_resources(void)
snprintf(mem, IOAPIC_RESOURCE_NAME_SIZE, "IOAPIC %u", i);
mem += IOAPIC_RESOURCE_NAME_SIZE;
num++;
+ ioapics[i].iomem_res = res;
}
ioapic_resources = res;
@@ -3799,21 +2824,7 @@ int mp_find_ioapic_pin(int ioapic, u32 gsi)
return gsi - gsi_cfg->gsi_base;
}
-static __init int bad_ioapic(unsigned long address)
-{
- if (nr_ioapics >= MAX_IO_APICS) {
- pr_warn("WARNING: Max # of I/O APICs (%d) exceeded (found %d), skipping\n",
- MAX_IO_APICS, nr_ioapics);
- return 1;
- }
- if (!address) {
- pr_warn("WARNING: Bogus (zero) I/O APIC address found in table, skipping!\n");
- return 1;
- }
- return 0;
-}
-
-static __init int bad_ioapic_register(int idx)
+static int bad_ioapic_register(int idx)
{
union IO_APIC_reg_00 reg_00;
union IO_APIC_reg_01 reg_01;
@@ -3832,32 +2843,61 @@ static __init int bad_ioapic_register(int idx)
return 0;
}
-void __init mp_register_ioapic(int id, u32 address, u32 gsi_base,
- struct ioapic_domain_cfg *cfg)
+static int find_free_ioapic_entry(void)
{
- int idx = 0;
- int entries;
+ int idx;
+
+ for (idx = 0; idx < MAX_IO_APICS; idx++)
+ if (ioapics[idx].nr_registers == 0)
+ return idx;
+
+ return MAX_IO_APICS;
+}
+
+/**
+ * mp_register_ioapic - Register an IOAPIC device
+ * @id: hardware IOAPIC ID
+ * @address: physical address of IOAPIC register area
+ * @gsi_base: base of GSI associated with the IOAPIC
+ * @cfg: configuration information for the IOAPIC
+ */
+int mp_register_ioapic(int id, u32 address, u32 gsi_base,
+ struct ioapic_domain_cfg *cfg)
+{
+ bool hotplug = !!ioapic_initialized;
struct mp_ioapic_gsi *gsi_cfg;
+ int idx, ioapic, entries;
+ u32 gsi_end;
- if (bad_ioapic(address))
- return;
+ if (!address) {
+ pr_warn("Bogus (zero) I/O APIC address found, skipping!\n");
+ return -EINVAL;
+ }
+ for_each_ioapic(ioapic)
+ if (ioapics[ioapic].mp_config.apicaddr == address) {
+ pr_warn("address 0x%x conflicts with IOAPIC%d\n",
+ address, ioapic);
+ return -EEXIST;
+ }
- idx = nr_ioapics;
+ idx = find_free_ioapic_entry();
+ if (idx >= MAX_IO_APICS) {
+ pr_warn("Max # of I/O APICs (%d) exceeded (found %d), skipping\n",
+ MAX_IO_APICS, idx);
+ return -ENOSPC;
+ }
ioapics[idx].mp_config.type = MP_IOAPIC;
ioapics[idx].mp_config.flags = MPC_APIC_USABLE;
ioapics[idx].mp_config.apicaddr = address;
- ioapics[idx].irqdomain = NULL;
- ioapics[idx].irqdomain_cfg = *cfg;
set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
-
if (bad_ioapic_register(idx)) {
clear_fixmap(FIX_IO_APIC_BASE_0 + idx);
- return;
+ return -ENODEV;
}
- ioapics[idx].mp_config.apicid = io_apic_unique_id(id);
+ ioapics[idx].mp_config.apicid = io_apic_unique_id(idx, id);
ioapics[idx].mp_config.apicver = io_apic_get_version(idx);
/*
@@ -3865,24 +2905,112 @@ void __init mp_register_ioapic(int id, u32 address, u32 gsi_base,
* and to prevent reprogramming of IOAPIC pins (PCI GSIs).
*/
entries = io_apic_get_redir_entries(idx);
+ gsi_end = gsi_base + entries - 1;
+ for_each_ioapic(ioapic) {
+ gsi_cfg = mp_ioapic_gsi_routing(ioapic);
+ if ((gsi_base >= gsi_cfg->gsi_base &&
+ gsi_base <= gsi_cfg->gsi_end) ||
+ (gsi_end >= gsi_cfg->gsi_base &&
+ gsi_end <= gsi_cfg->gsi_end)) {
+ pr_warn("GSI range [%u-%u] for new IOAPIC conflicts with GSI[%u-%u]\n",
+ gsi_base, gsi_end,
+ gsi_cfg->gsi_base, gsi_cfg->gsi_end);
+ clear_fixmap(FIX_IO_APIC_BASE_0 + idx);
+ return -ENOSPC;
+ }
+ }
gsi_cfg = mp_ioapic_gsi_routing(idx);
gsi_cfg->gsi_base = gsi_base;
- gsi_cfg->gsi_end = gsi_base + entries - 1;
+ gsi_cfg->gsi_end = gsi_end;
+
+ ioapics[idx].irqdomain = NULL;
+ ioapics[idx].irqdomain_cfg = *cfg;
/*
- * The number of IO-APIC IRQ registers (== #pins):
+ * If mp_register_ioapic() is called during early boot stage when
+ * walking ACPI/SFI/DT tables, it's too early to create irqdomain,
+ * we are still using bootmem allocator. So delay it to setup_IO_APIC().
*/
- ioapics[idx].nr_registers = entries;
+ if (hotplug) {
+ if (mp_irqdomain_create(idx)) {
+ clear_fixmap(FIX_IO_APIC_BASE_0 + idx);
+ return -ENOMEM;
+ }
+ alloc_ioapic_saved_registers(idx);
+ }
if (gsi_cfg->gsi_end >= gsi_top)
gsi_top = gsi_cfg->gsi_end + 1;
+ if (nr_ioapics <= idx)
+ nr_ioapics = idx + 1;
+
+ /* Set nr_registers to mark entry present */
+ ioapics[idx].nr_registers = entries;
pr_info("IOAPIC[%d]: apic_id %d, version %d, address 0x%x, GSI %d-%d\n",
idx, mpc_ioapic_id(idx),
mpc_ioapic_ver(idx), mpc_ioapic_addr(idx),
gsi_cfg->gsi_base, gsi_cfg->gsi_end);
- nr_ioapics++;
+ return 0;
+}
+
+int mp_unregister_ioapic(u32 gsi_base)
+{
+ int ioapic, pin;
+ int found = 0;
+ struct mp_pin_info *pin_info;
+
+ for_each_ioapic(ioapic)
+ if (ioapics[ioapic].gsi_config.gsi_base == gsi_base) {
+ found = 1;
+ break;
+ }
+ if (!found) {
+ pr_warn("can't find IOAPIC for GSI %d\n", gsi_base);
+ return -ENODEV;
+ }
+
+ for_each_pin(ioapic, pin) {
+ pin_info = mp_pin_info(ioapic, pin);
+ if (pin_info->count) {
+ pr_warn("pin%d on IOAPIC%d is still in use.\n",
+ pin, ioapic);
+ return -EBUSY;
+ }
+ }
+
+ /* Mark entry not present */
+ ioapics[ioapic].nr_registers = 0;
+ ioapic_destroy_irqdomain(ioapic);
+ free_ioapic_saved_registers(ioapic);
+ if (ioapics[ioapic].iomem_res)
+ release_resource(ioapics[ioapic].iomem_res);
+ clear_fixmap(FIX_IO_APIC_BASE_0 + ioapic);
+ memset(&ioapics[ioapic], 0, sizeof(ioapics[ioapic]));
+
+ return 0;
+}
+
+int mp_ioapic_registered(u32 gsi_base)
+{
+ int ioapic;
+
+ for_each_ioapic(ioapic)
+ if (ioapics[ioapic].gsi_config.gsi_base == gsi_base)
+ return 1;
+
+ return 0;
+}
+
+static inline void set_io_apic_irq_attr(struct io_apic_irq_attr *irq_attr,
+ int ioapic, int ioapic_pin,
+ int trigger, int polarity)
+{
+ irq_attr->ioapic = ioapic;
+ irq_attr->ioapic_pin = ioapic_pin;
+ irq_attr->trigger = trigger;
+ irq_attr->polarity = polarity;
}
int mp_irqdomain_map(struct irq_domain *domain, unsigned int virq,
@@ -3931,7 +3059,7 @@ void mp_irqdomain_unmap(struct irq_domain *domain, unsigned int virq)
ioapic_mask_entry(ioapic, pin);
__remove_pin_from_irq(cfg, ioapic, pin);
- WARN_ON(cfg->irq_2_pin != NULL);
+ WARN_ON(!list_empty(&cfg->irq_2_pin));
arch_teardown_hwirq(virq);
}
@@ -3964,18 +3092,6 @@ int mp_set_gsi_attr(u32 gsi, int trigger, int polarity, int node)
return ret;
}
-bool mp_should_keep_irq(struct device *dev)
-{
- if (dev->power.is_prepared)
- return true;
-#ifdef CONFIG_PM_RUNTIME
- if (dev->power.runtime_status == RPM_SUSPENDING)
- return true;
-#endif
-
- return false;
-}
-
/* Enable IOAPIC early just for system timer */
void __init pre_init_apic_IRQ0(void)
{
diff --git a/arch/x86/kernel/apic/msi.c b/arch/x86/kernel/apic/msi.c
new file mode 100644
index 00000000000..d6ba2d660dc
--- /dev/null
+++ b/arch/x86/kernel/apic/msi.c
@@ -0,0 +1,286 @@
+/*
+ * Support of MSI, HPET and DMAR interrupts.
+ *
+ * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo
+ * Moved from arch/x86/kernel/apic/io_apic.c.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/dmar.h>
+#include <linux/hpet.h>
+#include <linux/msi.h>
+#include <asm/msidef.h>
+#include <asm/hpet.h>
+#include <asm/hw_irq.h>
+#include <asm/apic.h>
+#include <asm/irq_remapping.h>
+
+void native_compose_msi_msg(struct pci_dev *pdev,
+ unsigned int irq, unsigned int dest,
+ struct msi_msg *msg, u8 hpet_id)
+{
+ struct irq_cfg *cfg = irq_cfg(irq);
+
+ msg->address_hi = MSI_ADDR_BASE_HI;
+
+ if (x2apic_enabled())
+ msg->address_hi |= MSI_ADDR_EXT_DEST_ID(dest);
+
+ msg->address_lo =
+ MSI_ADDR_BASE_LO |
+ ((apic->irq_dest_mode == 0) ?
+ MSI_ADDR_DEST_MODE_PHYSICAL :
+ MSI_ADDR_DEST_MODE_LOGICAL) |
+ ((apic->irq_delivery_mode != dest_LowestPrio) ?
+ MSI_ADDR_REDIRECTION_CPU :
+ MSI_ADDR_REDIRECTION_LOWPRI) |
+ MSI_ADDR_DEST_ID(dest);
+
+ msg->data =
+ MSI_DATA_TRIGGER_EDGE |
+ MSI_DATA_LEVEL_ASSERT |
+ ((apic->irq_delivery_mode != dest_LowestPrio) ?
+ MSI_DATA_DELIVERY_FIXED :
+ MSI_DATA_DELIVERY_LOWPRI) |
+ MSI_DATA_VECTOR(cfg->vector);
+}
+
+static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq,
+ struct msi_msg *msg, u8 hpet_id)
+{
+ struct irq_cfg *cfg;
+ int err;
+ unsigned dest;
+
+ if (disable_apic)
+ return -ENXIO;
+
+ cfg = irq_cfg(irq);
+ err = assign_irq_vector(irq, cfg, apic->target_cpus());
+ if (err)
+ return err;
+
+ err = apic->cpu_mask_to_apicid_and(cfg->domain,
+ apic->target_cpus(), &dest);
+ if (err)
+ return err;
+
+ x86_msi.compose_msi_msg(pdev, irq, dest, msg, hpet_id);
+
+ return 0;
+}
+
+static int
+msi_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force)
+{
+ struct irq_cfg *cfg = irqd_cfg(data);
+ struct msi_msg msg;
+ unsigned int dest;
+ int ret;
+
+ ret = apic_set_affinity(data, mask, &dest);
+ if (ret)
+ return ret;
+
+ __get_cached_msi_msg(data->msi_desc, &msg);
+
+ msg.data &= ~MSI_DATA_VECTOR_MASK;
+ msg.data |= MSI_DATA_VECTOR(cfg->vector);
+ msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
+ msg.address_lo |= MSI_ADDR_DEST_ID(dest);
+
+ __pci_write_msi_msg(data->msi_desc, &msg);
+
+ return IRQ_SET_MASK_OK_NOCOPY;
+}
+
+/*
+ * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices,
+ * which implement the MSI or MSI-X Capability Structure.
+ */
+static struct irq_chip msi_chip = {
+ .name = "PCI-MSI",
+ .irq_unmask = pci_msi_unmask_irq,
+ .irq_mask = pci_msi_mask_irq,
+ .irq_ack = apic_ack_edge,
+ .irq_set_affinity = msi_set_affinity,
+ .irq_retrigger = apic_retrigger_irq,
+ .flags = IRQCHIP_SKIP_SET_WAKE,
+};
+
+int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc,
+ unsigned int irq_base, unsigned int irq_offset)
+{
+ struct irq_chip *chip = &msi_chip;
+ struct msi_msg msg;
+ unsigned int irq = irq_base + irq_offset;
+ int ret;
+
+ ret = msi_compose_msg(dev, irq, &msg, -1);
+ if (ret < 0)
+ return ret;
+
+ irq_set_msi_desc_off(irq_base, irq_offset, msidesc);
+
+ /*
+ * MSI-X message is written per-IRQ, the offset is always 0.
+ * MSI message denotes a contiguous group of IRQs, written for 0th IRQ.
+ */
+ if (!irq_offset)
+ pci_write_msi_msg(irq, &msg);
+
+ setup_remapped_irq(irq, irq_cfg(irq), chip);
+
+ irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge");
+
+ dev_dbg(&dev->dev, "irq %d for MSI/MSI-X\n", irq);
+
+ return 0;
+}
+
+int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
+{
+ struct msi_desc *msidesc;
+ unsigned int irq;
+ int node, ret;
+
+ /* Multiple MSI vectors only supported with interrupt remapping */
+ if (type == PCI_CAP_ID_MSI && nvec > 1)
+ return 1;
+
+ node = dev_to_node(&dev->dev);
+
+ list_for_each_entry(msidesc, &dev->msi_list, list) {
+ irq = irq_alloc_hwirq(node);
+ if (!irq)
+ return -ENOSPC;
+
+ ret = setup_msi_irq(dev, msidesc, irq, 0);
+ if (ret < 0) {
+ irq_free_hwirq(irq);
+ return ret;
+ }
+
+ }
+ return 0;
+}
+
+void native_teardown_msi_irq(unsigned int irq)
+{
+ irq_free_hwirq(irq);
+}
+
+#ifdef CONFIG_DMAR_TABLE
+static int
+dmar_msi_set_affinity(struct irq_data *data, const struct cpumask *mask,
+ bool force)
+{
+ struct irq_cfg *cfg = irqd_cfg(data);
+ unsigned int dest, irq = data->irq;
+ struct msi_msg msg;
+ int ret;
+
+ ret = apic_set_affinity(data, mask, &dest);
+ if (ret)
+ return ret;
+
+ dmar_msi_read(irq, &msg);
+
+ msg.data &= ~MSI_DATA_VECTOR_MASK;
+ msg.data |= MSI_DATA_VECTOR(cfg->vector);
+ msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
+ msg.address_lo |= MSI_ADDR_DEST_ID(dest);
+ msg.address_hi = MSI_ADDR_BASE_HI | MSI_ADDR_EXT_DEST_ID(dest);
+
+ dmar_msi_write(irq, &msg);
+
+ return IRQ_SET_MASK_OK_NOCOPY;
+}
+
+static struct irq_chip dmar_msi_type = {
+ .name = "DMAR_MSI",
+ .irq_unmask = dmar_msi_unmask,
+ .irq_mask = dmar_msi_mask,
+ .irq_ack = apic_ack_edge,
+ .irq_set_affinity = dmar_msi_set_affinity,
+ .irq_retrigger = apic_retrigger_irq,
+ .flags = IRQCHIP_SKIP_SET_WAKE,
+};
+
+int arch_setup_dmar_msi(unsigned int irq)
+{
+ int ret;
+ struct msi_msg msg;
+
+ ret = msi_compose_msg(NULL, irq, &msg, -1);
+ if (ret < 0)
+ return ret;
+ dmar_msi_write(irq, &msg);
+ irq_set_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq,
+ "edge");
+ return 0;
+}
+#endif
+
+/*
+ * MSI message composition
+ */
+#ifdef CONFIG_HPET_TIMER
+
+static int hpet_msi_set_affinity(struct irq_data *data,
+ const struct cpumask *mask, bool force)
+{
+ struct irq_cfg *cfg = irqd_cfg(data);
+ struct msi_msg msg;
+ unsigned int dest;
+ int ret;
+
+ ret = apic_set_affinity(data, mask, &dest);
+ if (ret)
+ return ret;
+
+ hpet_msi_read(data->handler_data, &msg);
+
+ msg.data &= ~MSI_DATA_VECTOR_MASK;
+ msg.data |= MSI_DATA_VECTOR(cfg->vector);
+ msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
+ msg.address_lo |= MSI_ADDR_DEST_ID(dest);
+
+ hpet_msi_write(data->handler_data, &msg);
+
+ return IRQ_SET_MASK_OK_NOCOPY;
+}
+
+static struct irq_chip hpet_msi_type = {
+ .name = "HPET_MSI",
+ .irq_unmask = hpet_msi_unmask,
+ .irq_mask = hpet_msi_mask,
+ .irq_ack = apic_ack_edge,
+ .irq_set_affinity = hpet_msi_set_affinity,
+ .irq_retrigger = apic_retrigger_irq,
+ .flags = IRQCHIP_SKIP_SET_WAKE,
+};
+
+int default_setup_hpet_msi(unsigned int irq, unsigned int id)
+{
+ struct irq_chip *chip = &hpet_msi_type;
+ struct msi_msg msg;
+ int ret;
+
+ ret = msi_compose_msg(NULL, irq, &msg, id);
+ if (ret < 0)
+ return ret;
+
+ hpet_msi_write(irq_get_handler_data(irq), &msg);
+ irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
+ setup_remapped_irq(irq, irq_cfg(irq), chip);
+
+ irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge");
+ return 0;
+}
+#endif
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
new file mode 100644
index 00000000000..6cedd791458
--- /dev/null
+++ b/arch/x86/kernel/apic/vector.c
@@ -0,0 +1,719 @@
+/*
+ * Local APIC related interfaces to support IOAPIC, MSI, HT_IRQ etc.
+ *
+ * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo
+ * Moved from arch/x86/kernel/apic/io_apic.c.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/compiler.h>
+#include <linux/irqdomain.h>
+#include <linux/slab.h>
+#include <asm/hw_irq.h>
+#include <asm/apic.h>
+#include <asm/i8259.h>
+#include <asm/desc.h>
+#include <asm/irq_remapping.h>
+
+static DEFINE_RAW_SPINLOCK(vector_lock);
+
+void lock_vector_lock(void)
+{
+ /* Used to the online set of cpus does not change
+ * during assign_irq_vector.
+ */
+ raw_spin_lock(&vector_lock);
+}
+
+void unlock_vector_lock(void)
+{
+ raw_spin_unlock(&vector_lock);
+}
+
+struct irq_cfg *irq_cfg(unsigned int irq)
+{
+ return irq_get_chip_data(irq);
+}
+
+struct irq_cfg *irqd_cfg(struct irq_data *irq_data)
+{
+ return irq_data->chip_data;
+}
+
+static struct irq_cfg *alloc_irq_cfg(unsigned int irq, int node)
+{
+ struct irq_cfg *cfg;
+
+ cfg = kzalloc_node(sizeof(*cfg), GFP_KERNEL, node);
+ if (!cfg)
+ return NULL;
+ if (!zalloc_cpumask_var_node(&cfg->domain, GFP_KERNEL, node))
+ goto out_cfg;
+ if (!zalloc_cpumask_var_node(&cfg->old_domain, GFP_KERNEL, node))
+ goto out_domain;
+#ifdef CONFIG_X86_IO_APIC
+ INIT_LIST_HEAD(&cfg->irq_2_pin);
+#endif
+ return cfg;
+out_domain:
+ free_cpumask_var(cfg->domain);
+out_cfg:
+ kfree(cfg);
+ return NULL;
+}
+
+struct irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node)
+{
+ int res = irq_alloc_desc_at(at, node);
+ struct irq_cfg *cfg;
+
+ if (res < 0) {
+ if (res != -EEXIST)
+ return NULL;
+ cfg = irq_cfg(at);
+ if (cfg)
+ return cfg;
+ }
+
+ cfg = alloc_irq_cfg(at, node);
+ if (cfg)
+ irq_set_chip_data(at, cfg);
+ else
+ irq_free_desc(at);
+ return cfg;
+}
+
+static void free_irq_cfg(unsigned int at, struct irq_cfg *cfg)
+{
+ if (!cfg)
+ return;
+ irq_set_chip_data(at, NULL);
+ free_cpumask_var(cfg->domain);
+ free_cpumask_var(cfg->old_domain);
+ kfree(cfg);
+}
+
+static int
+__assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
+{
+ /*
+ * NOTE! The local APIC isn't very good at handling
+ * multiple interrupts at the same interrupt level.
+ * As the interrupt level is determined by taking the
+ * vector number and shifting that right by 4, we
+ * want to spread these out a bit so that they don't
+ * all fall in the same interrupt level.
+ *
+ * Also, we've got to be careful not to trash gate
+ * 0x80, because int 0x80 is hm, kind of importantish. ;)
+ */
+ static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START;
+ static int current_offset = VECTOR_OFFSET_START % 16;
+ int cpu, err;
+ cpumask_var_t tmp_mask;
+
+ if (cfg->move_in_progress)
+ return -EBUSY;
+
+ if (!alloc_cpumask_var(&tmp_mask, GFP_ATOMIC))
+ return -ENOMEM;
+
+ /* Only try and allocate irqs on cpus that are present */
+ err = -ENOSPC;
+ cpumask_clear(cfg->old_domain);
+ cpu = cpumask_first_and(mask, cpu_online_mask);
+ while (cpu < nr_cpu_ids) {
+ int new_cpu, vector, offset;
+
+ apic->vector_allocation_domain(cpu, tmp_mask, mask);
+
+ if (cpumask_subset(tmp_mask, cfg->domain)) {
+ err = 0;
+ if (cpumask_equal(tmp_mask, cfg->domain))
+ break;
+ /*
+ * New cpumask using the vector is a proper subset of
+ * the current in use mask. So cleanup the vector
+ * allocation for the members that are not used anymore.
+ */
+ cpumask_andnot(cfg->old_domain, cfg->domain, tmp_mask);
+ cfg->move_in_progress =
+ cpumask_intersects(cfg->old_domain, cpu_online_mask);
+ cpumask_and(cfg->domain, cfg->domain, tmp_mask);
+ break;
+ }
+
+ vector = current_vector;
+ offset = current_offset;
+next:
+ vector += 16;
+ if (vector >= first_system_vector) {
+ offset = (offset + 1) % 16;
+ vector = FIRST_EXTERNAL_VECTOR + offset;
+ }
+
+ if (unlikely(current_vector == vector)) {
+ cpumask_or(cfg->old_domain, cfg->old_domain, tmp_mask);
+ cpumask_andnot(tmp_mask, mask, cfg->old_domain);
+ cpu = cpumask_first_and(tmp_mask, cpu_online_mask);
+ continue;
+ }
+
+ if (test_bit(vector, used_vectors))
+ goto next;
+
+ for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) {
+ if (per_cpu(vector_irq, new_cpu)[vector] >
+ VECTOR_UNDEFINED)
+ goto next;
+ }
+ /* Found one! */
+ current_vector = vector;
+ current_offset = offset;
+ if (cfg->vector) {
+ cpumask_copy(cfg->old_domain, cfg->domain);
+ cfg->move_in_progress =
+ cpumask_intersects(cfg->old_domain, cpu_online_mask);
+ }
+ for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask)
+ per_cpu(vector_irq, new_cpu)[vector] = irq;
+ cfg->vector = vector;
+ cpumask_copy(cfg->domain, tmp_mask);
+ err = 0;
+ break;
+ }
+ free_cpumask_var(tmp_mask);
+
+ return err;
+}
+
+int assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
+{
+ int err;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&vector_lock, flags);
+ err = __assign_irq_vector(irq, cfg, mask);
+ raw_spin_unlock_irqrestore(&vector_lock, flags);
+ return err;
+}
+
+void clear_irq_vector(int irq, struct irq_cfg *cfg)
+{
+ int cpu, vector;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&vector_lock, flags);
+ BUG_ON(!cfg->vector);
+
+ vector = cfg->vector;
+ for_each_cpu_and(cpu, cfg->domain, cpu_online_mask)
+ per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED;
+
+ cfg->vector = 0;
+ cpumask_clear(cfg->domain);
+
+ if (likely(!cfg->move_in_progress)) {
+ raw_spin_unlock_irqrestore(&vector_lock, flags);
+ return;
+ }
+
+ for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) {
+ for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS;
+ vector++) {
+ if (per_cpu(vector_irq, cpu)[vector] != irq)
+ continue;
+ per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED;
+ break;
+ }
+ }
+ cfg->move_in_progress = 0;
+ raw_spin_unlock_irqrestore(&vector_lock, flags);
+}
+
+int __init arch_probe_nr_irqs(void)
+{
+ int nr;
+
+ if (nr_irqs > (NR_VECTORS * nr_cpu_ids))
+ nr_irqs = NR_VECTORS * nr_cpu_ids;
+
+ nr = (gsi_top + nr_legacy_irqs()) + 8 * nr_cpu_ids;
+#if defined(CONFIG_PCI_MSI) || defined(CONFIG_HT_IRQ)
+ /*
+ * for MSI and HT dyn irq
+ */
+ if (gsi_top <= NR_IRQS_LEGACY)
+ nr += 8 * nr_cpu_ids;
+ else
+ nr += gsi_top * 16;
+#endif
+ if (nr < nr_irqs)
+ nr_irqs = nr;
+
+ return nr_legacy_irqs();
+}
+
+int __init arch_early_irq_init(void)
+{
+ return arch_early_ioapic_init();
+}
+
+static void __setup_vector_irq(int cpu)
+{
+ /* Initialize vector_irq on a new cpu */
+ int irq, vector;
+ struct irq_cfg *cfg;
+
+ /*
+ * vector_lock will make sure that we don't run into irq vector
+ * assignments that might be happening on another cpu in parallel,
+ * while we setup our initial vector to irq mappings.
+ */
+ raw_spin_lock(&vector_lock);
+ /* Mark the inuse vectors */
+ for_each_active_irq(irq) {
+ cfg = irq_cfg(irq);
+ if (!cfg)
+ continue;
+
+ if (!cpumask_test_cpu(cpu, cfg->domain))
+ continue;
+ vector = cfg->vector;
+ per_cpu(vector_irq, cpu)[vector] = irq;
+ }
+ /* Mark the free vectors */
+ for (vector = 0; vector < NR_VECTORS; ++vector) {
+ irq = per_cpu(vector_irq, cpu)[vector];
+ if (irq <= VECTOR_UNDEFINED)
+ continue;
+
+ cfg = irq_cfg(irq);
+ if (!cpumask_test_cpu(cpu, cfg->domain))
+ per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED;
+ }
+ raw_spin_unlock(&vector_lock);
+}
+
+/*
+ * Setup the vector to irq mappings.
+ */
+void setup_vector_irq(int cpu)
+{
+ int irq;
+
+ /*
+ * On most of the platforms, legacy PIC delivers the interrupts on the
+ * boot cpu. But there are certain platforms where PIC interrupts are
+ * delivered to multiple cpu's. If the legacy IRQ is handled by the
+ * legacy PIC, for the new cpu that is coming online, setup the static
+ * legacy vector to irq mapping:
+ */
+ for (irq = 0; irq < nr_legacy_irqs(); irq++)
+ per_cpu(vector_irq, cpu)[IRQ0_VECTOR + irq] = irq;
+
+ __setup_vector_irq(cpu);
+}
+
+int apic_retrigger_irq(struct irq_data *data)
+{
+ struct irq_cfg *cfg = irqd_cfg(data);
+ unsigned long flags;
+ int cpu;
+
+ raw_spin_lock_irqsave(&vector_lock, flags);
+ cpu = cpumask_first_and(cfg->domain, cpu_online_mask);
+ apic->send_IPI_mask(cpumask_of(cpu), cfg->vector);
+ raw_spin_unlock_irqrestore(&vector_lock, flags);
+
+ return 1;
+}
+
+void apic_ack_edge(struct irq_data *data)
+{
+ irq_complete_move(irqd_cfg(data));
+ irq_move_irq(data);
+ ack_APIC_irq();
+}
+
+/*
+ * Either sets data->affinity to a valid value, and returns
+ * ->cpu_mask_to_apicid of that in dest_id, or returns -1 and
+ * leaves data->affinity untouched.
+ */
+int apic_set_affinity(struct irq_data *data, const struct cpumask *mask,
+ unsigned int *dest_id)
+{
+ struct irq_cfg *cfg = irqd_cfg(data);
+ unsigned int irq = data->irq;
+ int err;
+
+ if (!config_enabled(CONFIG_SMP))
+ return -EPERM;
+
+ if (!cpumask_intersects(mask, cpu_online_mask))
+ return -EINVAL;
+
+ err = assign_irq_vector(irq, cfg, mask);
+ if (err)
+ return err;
+
+ err = apic->cpu_mask_to_apicid_and(mask, cfg->domain, dest_id);
+ if (err) {
+ if (assign_irq_vector(irq, cfg, data->affinity))
+ pr_err("Failed to recover vector for irq %d\n", irq);
+ return err;
+ }
+
+ cpumask_copy(data->affinity, mask);
+
+ return 0;
+}
+
+#ifdef CONFIG_SMP
+void send_cleanup_vector(struct irq_cfg *cfg)
+{
+ cpumask_var_t cleanup_mask;
+
+ if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) {
+ unsigned int i;
+
+ for_each_cpu_and(i, cfg->old_domain, cpu_online_mask)
+ apic->send_IPI_mask(cpumask_of(i),
+ IRQ_MOVE_CLEANUP_VECTOR);
+ } else {
+ cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask);
+ apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
+ free_cpumask_var(cleanup_mask);
+ }
+ cfg->move_in_progress = 0;
+}
+
+asmlinkage __visible void smp_irq_move_cleanup_interrupt(void)
+{
+ unsigned vector, me;
+
+ ack_APIC_irq();
+ irq_enter();
+ exit_idle();
+
+ me = smp_processor_id();
+ for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
+ int irq;
+ unsigned int irr;
+ struct irq_desc *desc;
+ struct irq_cfg *cfg;
+
+ irq = __this_cpu_read(vector_irq[vector]);
+
+ if (irq <= VECTOR_UNDEFINED)
+ continue;
+
+ desc = irq_to_desc(irq);
+ if (!desc)
+ continue;
+
+ cfg = irq_cfg(irq);
+ if (!cfg)
+ continue;
+
+ raw_spin_lock(&desc->lock);
+
+ /*
+ * Check if the irq migration is in progress. If so, we
+ * haven't received the cleanup request yet for this irq.
+ */
+ if (cfg->move_in_progress)
+ goto unlock;
+
+ if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
+ goto unlock;
+
+ irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
+ /*
+ * Check if the vector that needs to be cleanedup is
+ * registered at the cpu's IRR. If so, then this is not
+ * the best time to clean it up. Lets clean it up in the
+ * next attempt by sending another IRQ_MOVE_CLEANUP_VECTOR
+ * to myself.
+ */
+ if (irr & (1 << (vector % 32))) {
+ apic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR);
+ goto unlock;
+ }
+ __this_cpu_write(vector_irq[vector], VECTOR_UNDEFINED);
+unlock:
+ raw_spin_unlock(&desc->lock);
+ }
+
+ irq_exit();
+}
+
+static void __irq_complete_move(struct irq_cfg *cfg, unsigned vector)
+{
+ unsigned me;
+
+ if (likely(!cfg->move_in_progress))
+ return;
+
+ me = smp_processor_id();
+
+ if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
+ send_cleanup_vector(cfg);
+}
+
+void irq_complete_move(struct irq_cfg *cfg)
+{
+ __irq_complete_move(cfg, ~get_irq_regs()->orig_ax);
+}
+
+void irq_force_complete_move(int irq)
+{
+ struct irq_cfg *cfg = irq_cfg(irq);
+
+ if (!cfg)
+ return;
+
+ __irq_complete_move(cfg, cfg->vector);
+}
+#endif
+
+/*
+ * Dynamic irq allocate and deallocation. Should be replaced by irq domains!
+ */
+int arch_setup_hwirq(unsigned int irq, int node)
+{
+ struct irq_cfg *cfg;
+ unsigned long flags;
+ int ret;
+
+ cfg = alloc_irq_cfg(irq, node);
+ if (!cfg)
+ return -ENOMEM;
+
+ raw_spin_lock_irqsave(&vector_lock, flags);
+ ret = __assign_irq_vector(irq, cfg, apic->target_cpus());
+ raw_spin_unlock_irqrestore(&vector_lock, flags);
+
+ if (!ret)
+ irq_set_chip_data(irq, cfg);
+ else
+ free_irq_cfg(irq, cfg);
+ return ret;
+}
+
+void arch_teardown_hwirq(unsigned int irq)
+{
+ struct irq_cfg *cfg = irq_cfg(irq);
+
+ free_remapped_irq(irq);
+ clear_irq_vector(irq, cfg);
+ free_irq_cfg(irq, cfg);
+}
+
+static void __init print_APIC_field(int base)
+{
+ int i;
+
+ printk(KERN_DEBUG);
+
+ for (i = 0; i < 8; i++)
+ pr_cont("%08x", apic_read(base + i*0x10));
+
+ pr_cont("\n");
+}
+
+static void __init print_local_APIC(void *dummy)
+{
+ unsigned int i, v, ver, maxlvt;
+ u64 icr;
+
+ pr_debug("printing local APIC contents on CPU#%d/%d:\n",
+ smp_processor_id(), hard_smp_processor_id());
+ v = apic_read(APIC_ID);
+ pr_info("... APIC ID: %08x (%01x)\n", v, read_apic_id());
+ v = apic_read(APIC_LVR);
+ pr_info("... APIC VERSION: %08x\n", v);
+ ver = GET_APIC_VERSION(v);
+ maxlvt = lapic_get_maxlvt();
+
+ v = apic_read(APIC_TASKPRI);
+ pr_debug("... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
+
+ /* !82489DX */
+ if (APIC_INTEGRATED(ver)) {
+ if (!APIC_XAPIC(ver)) {
+ v = apic_read(APIC_ARBPRI);
+ pr_debug("... APIC ARBPRI: %08x (%02x)\n",
+ v, v & APIC_ARBPRI_MASK);
+ }
+ v = apic_read(APIC_PROCPRI);
+ pr_debug("... APIC PROCPRI: %08x\n", v);
+ }
+
+ /*
+ * Remote read supported only in the 82489DX and local APIC for
+ * Pentium processors.
+ */
+ if (!APIC_INTEGRATED(ver) || maxlvt == 3) {
+ v = apic_read(APIC_RRR);
+ pr_debug("... APIC RRR: %08x\n", v);
+ }
+
+ v = apic_read(APIC_LDR);
+ pr_debug("... APIC LDR: %08x\n", v);
+ if (!x2apic_enabled()) {
+ v = apic_read(APIC_DFR);
+ pr_debug("... APIC DFR: %08x\n", v);
+ }
+ v = apic_read(APIC_SPIV);
+ pr_debug("... APIC SPIV: %08x\n", v);
+
+ pr_debug("... APIC ISR field:\n");
+ print_APIC_field(APIC_ISR);
+ pr_debug("... APIC TMR field:\n");
+ print_APIC_field(APIC_TMR);
+ pr_debug("... APIC IRR field:\n");
+ print_APIC_field(APIC_IRR);
+
+ /* !82489DX */
+ if (APIC_INTEGRATED(ver)) {
+ /* Due to the Pentium erratum 3AP. */
+ if (maxlvt > 3)
+ apic_write(APIC_ESR, 0);
+
+ v = apic_read(APIC_ESR);
+ pr_debug("... APIC ESR: %08x\n", v);
+ }
+
+ icr = apic_icr_read();
+ pr_debug("... APIC ICR: %08x\n", (u32)icr);
+ pr_debug("... APIC ICR2: %08x\n", (u32)(icr >> 32));
+
+ v = apic_read(APIC_LVTT);
+ pr_debug("... APIC LVTT: %08x\n", v);
+
+ if (maxlvt > 3) {
+ /* PC is LVT#4. */
+ v = apic_read(APIC_LVTPC);
+ pr_debug("... APIC LVTPC: %08x\n", v);
+ }
+ v = apic_read(APIC_LVT0);
+ pr_debug("... APIC LVT0: %08x\n", v);
+ v = apic_read(APIC_LVT1);
+ pr_debug("... APIC LVT1: %08x\n", v);
+
+ if (maxlvt > 2) {
+ /* ERR is LVT#3. */
+ v = apic_read(APIC_LVTERR);
+ pr_debug("... APIC LVTERR: %08x\n", v);
+ }
+
+ v = apic_read(APIC_TMICT);
+ pr_debug("... APIC TMICT: %08x\n", v);
+ v = apic_read(APIC_TMCCT);
+ pr_debug("... APIC TMCCT: %08x\n", v);
+ v = apic_read(APIC_TDCR);
+ pr_debug("... APIC TDCR: %08x\n", v);
+
+ if (boot_cpu_has(X86_FEATURE_EXTAPIC)) {
+ v = apic_read(APIC_EFEAT);
+ maxlvt = (v >> 16) & 0xff;
+ pr_debug("... APIC EFEAT: %08x\n", v);
+ v = apic_read(APIC_ECTRL);
+ pr_debug("... APIC ECTRL: %08x\n", v);
+ for (i = 0; i < maxlvt; i++) {
+ v = apic_read(APIC_EILVTn(i));
+ pr_debug("... APIC EILVT%d: %08x\n", i, v);
+ }
+ }
+ pr_cont("\n");
+}
+
+static void __init print_local_APICs(int maxcpu)
+{
+ int cpu;
+
+ if (!maxcpu)
+ return;
+
+ preempt_disable();
+ for_each_online_cpu(cpu) {
+ if (cpu >= maxcpu)
+ break;
+ smp_call_function_single(cpu, print_local_APIC, NULL, 1);
+ }
+ preempt_enable();
+}
+
+static void __init print_PIC(void)
+{
+ unsigned int v;
+ unsigned long flags;
+
+ if (!nr_legacy_irqs())
+ return;
+
+ pr_debug("\nprinting PIC contents\n");
+
+ raw_spin_lock_irqsave(&i8259A_lock, flags);
+
+ v = inb(0xa1) << 8 | inb(0x21);
+ pr_debug("... PIC IMR: %04x\n", v);
+
+ v = inb(0xa0) << 8 | inb(0x20);
+ pr_debug("... PIC IRR: %04x\n", v);
+
+ outb(0x0b, 0xa0);
+ outb(0x0b, 0x20);
+ v = inb(0xa0) << 8 | inb(0x20);
+ outb(0x0a, 0xa0);
+ outb(0x0a, 0x20);
+
+ raw_spin_unlock_irqrestore(&i8259A_lock, flags);
+
+ pr_debug("... PIC ISR: %04x\n", v);
+
+ v = inb(0x4d1) << 8 | inb(0x4d0);
+ pr_debug("... PIC ELCR: %04x\n", v);
+}
+
+static int show_lapic __initdata = 1;
+static __init int setup_show_lapic(char *arg)
+{
+ int num = -1;
+
+ if (strcmp(arg, "all") == 0) {
+ show_lapic = CONFIG_NR_CPUS;
+ } else {
+ get_option(&arg, &num);
+ if (num >= 0)
+ show_lapic = num;
+ }
+
+ return 1;
+}
+__setup("show_lapic=", setup_show_lapic);
+
+static int __init print_ICs(void)
+{
+ if (apic_verbosity == APIC_QUIET)
+ return 0;
+
+ print_PIC();
+
+ /* don't print out if apic is not there */
+ if (!cpu_has_apic && !apic_from_smp_config())
+ return 0;
+
+ print_local_APICs(show_lapic);
+ print_IO_APICs();
+
+ return 0;
+}
+
+late_initcall(print_ICs);
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
index 08f3fed2b0f..10b8d3eaaf1 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -276,6 +276,17 @@ static struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type,
return box;
}
+/*
+ * Using uncore_pmu_event_init pmu event_init callback
+ * as a detection point for uncore events.
+ */
+static int uncore_pmu_event_init(struct perf_event *event);
+
+static bool is_uncore_event(struct perf_event *event)
+{
+ return event->pmu->event_init == uncore_pmu_event_init;
+}
+
static int
uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader, bool dogrp)
{
@@ -290,13 +301,18 @@ uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader, b
return -EINVAL;
n = box->n_events;
- box->event_list[n] = leader;
- n++;
+
+ if (is_uncore_event(leader)) {
+ box->event_list[n] = leader;
+ n++;
+ }
+
if (!dogrp)
return n;
list_for_each_entry(event, &leader->sibling_list, group_entry) {
- if (event->state <= PERF_EVENT_STATE_OFF)
+ if (!is_uncore_event(event) ||
+ event->state <= PERF_EVENT_STATE_OFF)
continue;
if (n >= max_count)
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
index f5ab56d1428..aceb2f90c71 100644
--- a/arch/x86/kernel/crash.c
+++ b/arch/x86/kernel/crash.c
@@ -28,6 +28,7 @@
#include <asm/nmi.h>
#include <asm/hw_irq.h>
#include <asm/apic.h>
+#include <asm/io_apic.h>
#include <asm/hpet.h>
#include <linux/kdebug.h>
#include <asm/cpu.h>
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
index 2e1a6853e00..fe9f0b79a18 100644
--- a/arch/x86/kernel/early-quirks.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -455,6 +455,23 @@ struct intel_stolen_funcs {
u32 (*base)(int num, int slot, int func, size_t size);
};
+static size_t __init gen9_stolen_size(int num, int slot, int func)
+{
+ u16 gmch_ctrl;
+
+ gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL);
+ gmch_ctrl >>= BDW_GMCH_GMS_SHIFT;
+ gmch_ctrl &= BDW_GMCH_GMS_MASK;
+
+ if (gmch_ctrl < 0xf0)
+ return gmch_ctrl << 25; /* 32 MB units */
+ else
+ /* 4MB increments starting at 0xf0 for 4MB */
+ return (gmch_ctrl - 0xf0 + 1) << 22;
+}
+
+typedef size_t (*stolen_size_fn)(int num, int slot, int func);
+
static const struct intel_stolen_funcs i830_stolen_funcs __initconst = {
.base = i830_stolen_base,
.size = i830_stolen_size,
@@ -490,6 +507,11 @@ static const struct intel_stolen_funcs gen8_stolen_funcs __initconst = {
.size = gen8_stolen_size,
};
+static const struct intel_stolen_funcs gen9_stolen_funcs __initconst = {
+ .base = intel_stolen_base,
+ .size = gen9_stolen_size,
+};
+
static const struct intel_stolen_funcs chv_stolen_funcs __initconst = {
.base = intel_stolen_base,
.size = chv_stolen_size,
@@ -523,6 +545,7 @@ static const struct pci_device_id intel_stolen_ids[] __initconst = {
INTEL_BDW_M_IDS(&gen8_stolen_funcs),
INTEL_BDW_D_IDS(&gen8_stolen_funcs),
INTEL_CHV_IDS(&chv_stolen_funcs),
+ INTEL_SKL_IDS(&gen9_stolen_funcs),
};
static void __init intel_graphics_stolen(int num, int slot, int func)
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index 1cf7c97ff17..000d4199b03 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -732,10 +732,10 @@ ENTRY(interrupt)
ENTRY(irq_entries_start)
RING0_INT_FRAME
vector=FIRST_EXTERNAL_VECTOR
-.rept (NR_VECTORS-FIRST_EXTERNAL_VECTOR+6)/7
+.rept (FIRST_SYSTEM_VECTOR-FIRST_EXTERNAL_VECTOR+6)/7
.balign 32
.rept 7
- .if vector < NR_VECTORS
+ .if vector < FIRST_SYSTEM_VECTOR
.if vector <> FIRST_EXTERNAL_VECTOR
CFI_ADJUST_CFA_OFFSET -4
.endif
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 90878aa38db..9ebaf63ba18 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -740,10 +740,10 @@ ENTRY(interrupt)
ENTRY(irq_entries_start)
INTR_FRAME
vector=FIRST_EXTERNAL_VECTOR
-.rept (NR_VECTORS-FIRST_EXTERNAL_VECTOR+6)/7
+.rept (FIRST_SYSTEM_VECTOR-FIRST_EXTERNAL_VECTOR+6)/7
.balign 32
.rept 7
- .if vector < NR_VECTORS
+ .if vector < FIRST_SYSTEM_VECTOR
.if vector <> FIRST_EXTERNAL_VECTOR
CFI_ADJUST_CFA_OFFSET -8
.endif
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c
index 4de73ee7836..70e181ea1ea 100644
--- a/arch/x86/kernel/irqinit.c
+++ b/arch/x86/kernel/irqinit.c
@@ -99,32 +99,9 @@ void __init init_IRQ(void)
x86_init.irqs.intr_init();
}
-/*
- * Setup the vector to irq mappings.
- */
-void setup_vector_irq(int cpu)
-{
-#ifndef CONFIG_X86_IO_APIC
- int irq;
-
- /*
- * On most of the platforms, legacy PIC delivers the interrupts on the
- * boot cpu. But there are certain platforms where PIC interrupts are
- * delivered to multiple cpu's. If the legacy IRQ is handled by the
- * legacy PIC, for the new cpu that is coming online, setup the static
- * legacy vector to irq mapping:
- */
- for (irq = 0; irq < nr_legacy_irqs(); irq++)
- per_cpu(vector_irq, cpu)[IRQ0_VECTOR + irq] = irq;
-#endif
-
- __setup_vector_irq(cpu);
-}
-
static void __init smp_intr_init(void)
{
#ifdef CONFIG_SMP
-#if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC)
/*
* The reschedule interrupt is a CPU-to-CPU reschedule-helper
* IPI, driven by wakeup.
@@ -144,7 +121,6 @@ static void __init smp_intr_init(void)
/* IPI used for rebooting/stopping */
alloc_intr_gate(REBOOT_VECTOR, reboot_interrupt);
-#endif
#endif /* CONFIG_SMP */
}
@@ -159,7 +135,7 @@ static void __init apic_intr_init(void)
alloc_intr_gate(THRESHOLD_APIC_VECTOR, threshold_interrupt);
#endif
-#if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC)
+#ifdef CONFIG_X86_LOCAL_APIC
/* self generated IPI for local APIC timer */
alloc_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt);
@@ -197,10 +173,17 @@ void __init native_init_IRQ(void)
* 'special' SMP interrupts)
*/
i = FIRST_EXTERNAL_VECTOR;
- for_each_clear_bit_from(i, used_vectors, NR_VECTORS) {
+#ifndef CONFIG_X86_LOCAL_APIC
+#define first_system_vector NR_VECTORS
+#endif
+ for_each_clear_bit_from(i, used_vectors, first_system_vector) {
/* IA32_SYSCALL_VECTOR could be used in trap_init already. */
set_intr_gate(i, interrupt[i - FIRST_EXTERNAL_VECTOR]);
}
+#ifdef CONFIG_X86_LOCAL_APIC
+ for_each_clear_bit_from(i, used_vectors, NR_VECTORS)
+ set_intr_gate(i, spurious_interrupt);
+#endif
if (!acpi_ioapic && !of_ioapic && nr_legacy_irqs())
setup_irq(2, &irq2);
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index f6945bef2cd..94f64348430 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -283,7 +283,14 @@ NOKPROBE_SYMBOL(do_async_page_fault);
static void __init paravirt_ops_setup(void)
{
pv_info.name = "KVM";
- pv_info.paravirt_enabled = 1;
+
+ /*
+ * KVM isn't paravirt in the sense of paravirt_enabled. A KVM
+ * guest kernel works like a bare metal kernel with additional
+ * features, and paravirt_enabled is about features that are
+ * missing.
+ */
+ pv_info.paravirt_enabled = 0;
if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
pv_cpu_ops.io_delay = kvm_io_delay;
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index d9156ceecdf..42caaef897c 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -59,13 +59,12 @@ static void kvm_get_wallclock(struct timespec *now)
native_write_msr(msr_kvm_wall_clock, low, high);
- preempt_disable();
- cpu = smp_processor_id();
+ cpu = get_cpu();
vcpu_time = &hv_clock[cpu].pvti;
pvclock_read_wallclock(&wall_clock, vcpu_time, now);
- preempt_enable();
+ put_cpu();
}
static int kvm_set_wallclock(const struct timespec *now)
@@ -107,11 +106,10 @@ static unsigned long kvm_get_tsc_khz(void)
int cpu;
unsigned long tsc_khz;
- preempt_disable();
- cpu = smp_processor_id();
+ cpu = get_cpu();
src = &hv_clock[cpu].pvti;
tsc_khz = pvclock_tsc_khz(src);
- preempt_enable();
+ put_cpu();
return tsc_khz;
}
@@ -263,7 +261,6 @@ void __init kvmclock_init(void)
#endif
kvm_get_preset_lpj();
clocksource_register_hz(&kvm_clock, NSEC_PER_SEC);
- pv_info.paravirt_enabled = 1;
pv_info.name = "KVM";
if (kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE_STABLE_BIT))
@@ -284,23 +281,22 @@ int __init kvm_setup_vsyscall_timeinfo(void)
size = PAGE_ALIGN(sizeof(struct pvclock_vsyscall_time_info)*NR_CPUS);
- preempt_disable();
- cpu = smp_processor_id();
+ cpu = get_cpu();
vcpu_time = &hv_clock[cpu].pvti;
flags = pvclock_read_flags(vcpu_time);
if (!(flags & PVCLOCK_TSC_STABLE_BIT)) {
- preempt_enable();
+ put_cpu();
return 1;
}
if ((ret = pvclock_init_vsyscall(hv_clock, size))) {
- preempt_enable();
+ put_cpu();
return ret;
}
- preempt_enable();
+ put_cpu();
kvm_clock.archdata.vclock_mode = VCLOCK_PVCLOCK;
#endif
diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
index 72e8e310258..469b23d6acc 100644
--- a/arch/x86/kernel/machine_kexec_32.c
+++ b/arch/x86/kernel/machine_kexec_32.c
@@ -20,6 +20,7 @@
#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
#include <asm/apic.h>
+#include <asm/io_apic.h>
#include <asm/cpufeature.h>
#include <asm/desc.h>
#include <asm/cacheflush.h>
diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c
index 485981059a4..415480d3ea8 100644
--- a/arch/x86/kernel/machine_kexec_64.c
+++ b/arch/x86/kernel/machine_kexec_64.c
@@ -22,6 +22,7 @@
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
+#include <asm/io_apic.h>
#include <asm/debugreg.h>
#include <asm/kexec-bzimage64.h>
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 17962e667a9..bae6c609888 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -12,6 +12,7 @@
#include <acpi/reboot.h>
#include <asm/io.h>
#include <asm/apic.h>
+#include <asm/io_apic.h>
#include <asm/desc.h>
#include <asm/hpet.h>
#include <asm/pgtable.h>
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 7a8f5845e8e..6d7022c683e 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1084,7 +1084,6 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
{
unsigned int i;
- preempt_disable();
smp_cpu_index_default();
/*
@@ -1102,22 +1101,19 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
}
set_cpu_sibling_map(0);
-
if (smp_sanity_check(max_cpus) < 0) {
pr_info("SMP disabled\n");
disable_smp();
- goto out;
+ return;
}
default_setup_apic_routing();
- preempt_disable();
if (read_apic_id() != boot_cpu_physical_apicid) {
panic("Boot APIC ID in local APIC unexpected (%d vs %d)",
read_apic_id(), boot_cpu_physical_apicid);
/* Or can we switch back to PIC here? */
}
- preempt_enable();
connect_bsp_APIC();
@@ -1151,8 +1147,6 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
uv_system_init();
set_mtrr_aps_delayed_init();
-out:
- preempt_enable();
}
void arch_enable_nonboot_cpus_begin(void)
diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
index 3e551eee87b..4e942f31b1a 100644
--- a/arch/x86/kernel/tls.c
+++ b/arch/x86/kernel/tls.c
@@ -55,12 +55,6 @@ static bool tls_desc_okay(const struct user_desc *info)
if (info->seg_not_present)
return false;
-#ifdef CONFIG_X86_64
- /* The L bit makes no sense for data. */
- if (info->lm)
- return false;
-#endif
-
return true;
}
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index a9ae2057989..88900e28802 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -331,7 +331,7 @@ dotraplinkage void do_bounds(struct pt_regs *regs, long error_code)
break; /* Success, it was handled */
case 1: /* Bound violation. */
info = mpx_generate_siginfo(regs, xsave_buf);
- if (PTR_ERR(info)) {
+ if (IS_ERR(info)) {
/*
* We failed to decode the MPX instruction. Act as if
* the exception was not caused by MPX.
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
index 4c540c4719d..0de1fae2bdf 100644
--- a/arch/x86/kernel/xsave.c
+++ b/arch/x86/kernel/xsave.c
@@ -738,3 +738,4 @@ void *get_xsave_addr(struct xsave_struct *xsave, int xstate)
return (void *)xsave + xstate_comp_offsets[feature];
}
+EXPORT_SYMBOL_GPL(get_xsave_addr);
diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile
index 25d22b2d650..08f790dfadc 100644
--- a/arch/x86/kvm/Makefile
+++ b/arch/x86/kvm/Makefile
@@ -7,14 +7,13 @@ CFLAGS_vmx.o := -I.
KVM := ../../../virt/kvm
-kvm-y += $(KVM)/kvm_main.o $(KVM)/ioapic.o \
- $(KVM)/coalesced_mmio.o $(KVM)/irq_comm.o \
+kvm-y += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o \
$(KVM)/eventfd.o $(KVM)/irqchip.o $(KVM)/vfio.o
-kvm-$(CONFIG_KVM_DEVICE_ASSIGNMENT) += $(KVM)/assigned-dev.o $(KVM)/iommu.o
kvm-$(CONFIG_KVM_ASYNC_PF) += $(KVM)/async_pf.o
kvm-y += x86.o mmu.o emulate.o i8259.o irq.o lapic.o \
- i8254.o cpuid.o pmu.o
+ i8254.o ioapic.o irq_comm.o cpuid.o pmu.o
+kvm-$(CONFIG_KVM_DEVICE_ASSIGNMENT) += assigned-dev.o iommu.o
kvm-intel-y += vmx.o
kvm-amd-y += svm.o
diff --git a/virt/kvm/assigned-dev.c b/arch/x86/kvm/assigned-dev.c
index e05000e200d..6eb5c20ee37 100644
--- a/virt/kvm/assigned-dev.c
+++ b/arch/x86/kvm/assigned-dev.c
@@ -20,6 +20,32 @@
#include <linux/namei.h>
#include <linux/fs.h>
#include "irq.h"
+#include "assigned-dev.h"
+
+struct kvm_assigned_dev_kernel {
+ struct kvm_irq_ack_notifier ack_notifier;
+ struct list_head list;
+ int assigned_dev_id;
+ int host_segnr;
+ int host_busnr;
+ int host_devfn;
+ unsigned int entries_nr;
+ int host_irq;
+ bool host_irq_disabled;
+ bool pci_2_3;
+ struct msix_entry *host_msix_entries;
+ int guest_irq;
+ struct msix_entry *guest_msix_entries;
+ unsigned long irq_requested_type;
+ int irq_source_id;
+ int flags;
+ struct pci_dev *dev;
+ struct kvm *kvm;
+ spinlock_t intx_lock;
+ spinlock_t intx_mask_lock;
+ char irq_name[32];
+ struct pci_saved_state *pci_saved_state;
+};
static struct kvm_assigned_dev_kernel *kvm_find_assigned_dev(struct list_head *head,
int assigned_dev_id)
@@ -748,7 +774,7 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
if (r)
goto out_list_del;
}
- r = kvm_assign_device(kvm, match);
+ r = kvm_assign_device(kvm, match->dev);
if (r)
goto out_list_del;
@@ -790,7 +816,7 @@ static int kvm_vm_ioctl_deassign_device(struct kvm *kvm,
goto out;
}
- kvm_deassign_device(kvm, match);
+ kvm_deassign_device(kvm, match->dev);
kvm_free_assigned_device(kvm, match);
diff --git a/arch/x86/kvm/assigned-dev.h b/arch/x86/kvm/assigned-dev.h
new file mode 100644
index 00000000000..a428c1a211b
--- /dev/null
+++ b/arch/x86/kvm/assigned-dev.h
@@ -0,0 +1,32 @@
+#ifndef ARCH_X86_KVM_ASSIGNED_DEV_H
+#define ARCH_X86_KVM_ASSIGNED_DEV_H
+
+#include <linux/kvm_host.h>
+
+#ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
+int kvm_assign_device(struct kvm *kvm, struct pci_dev *pdev);
+int kvm_deassign_device(struct kvm *kvm, struct pci_dev *pdev);
+
+int kvm_iommu_map_guest(struct kvm *kvm);
+int kvm_iommu_unmap_guest(struct kvm *kvm);
+
+long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
+ unsigned long arg);
+
+void kvm_free_all_assigned_devices(struct kvm *kvm);
+#else
+static inline int kvm_iommu_unmap_guest(struct kvm *kvm)
+{
+ return 0;
+}
+
+static inline long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
+ unsigned long arg)
+{
+ return -ENOTTY;
+}
+
+static inline void kvm_free_all_assigned_devices(struct kvm *kvm) {}
+#endif /* CONFIG_KVM_DEVICE_ASSIGNMENT */
+
+#endif /* ARCH_X86_KVM_ASSIGNED_DEV_H */
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 976e3a57f9e..8a80737ee6e 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -23,7 +23,7 @@
#include "mmu.h"
#include "trace.h"
-static u32 xstate_required_size(u64 xstate_bv)
+static u32 xstate_required_size(u64 xstate_bv, bool compacted)
{
int feature_bit = 0;
u32 ret = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET;
@@ -31,9 +31,10 @@ static u32 xstate_required_size(u64 xstate_bv)
xstate_bv &= XSTATE_EXTEND_MASK;
while (xstate_bv) {
if (xstate_bv & 0x1) {
- u32 eax, ebx, ecx, edx;
+ u32 eax, ebx, ecx, edx, offset;
cpuid_count(0xD, feature_bit, &eax, &ebx, &ecx, &edx);
- ret = max(ret, eax + ebx);
+ offset = compacted ? ret : ebx;
+ ret = max(ret, offset + eax);
}
xstate_bv >>= 1;
@@ -53,6 +54,8 @@ u64 kvm_supported_xcr0(void)
return xcr0;
}
+#define F(x) bit(X86_FEATURE_##x)
+
int kvm_update_cpuid(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best;
@@ -64,13 +67,13 @@ int kvm_update_cpuid(struct kvm_vcpu *vcpu)
/* Update OSXSAVE bit */
if (cpu_has_xsave && best->function == 0x1) {
- best->ecx &= ~(bit(X86_FEATURE_OSXSAVE));
+ best->ecx &= ~F(OSXSAVE);
if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE))
- best->ecx |= bit(X86_FEATURE_OSXSAVE);
+ best->ecx |= F(OSXSAVE);
}
if (apic) {
- if (best->ecx & bit(X86_FEATURE_TSC_DEADLINE_TIMER))
+ if (best->ecx & F(TSC_DEADLINE_TIMER))
apic->lapic_timer.timer_mode_mask = 3 << 17;
else
apic->lapic_timer.timer_mode_mask = 1 << 17;
@@ -85,9 +88,13 @@ int kvm_update_cpuid(struct kvm_vcpu *vcpu)
(best->eax | ((u64)best->edx << 32)) &
kvm_supported_xcr0();
vcpu->arch.guest_xstate_size = best->ebx =
- xstate_required_size(vcpu->arch.xcr0);
+ xstate_required_size(vcpu->arch.xcr0, false);
}
+ best = kvm_find_cpuid_entry(vcpu, 0xD, 1);
+ if (best && (best->eax & (F(XSAVES) | F(XSAVEC))))
+ best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
+
/*
* The existing code assumes virtual address is 48-bit in the canonical
* address checks; exit if it is ever changed.
@@ -122,8 +129,8 @@ static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
break;
}
}
- if (entry && (entry->edx & bit(X86_FEATURE_NX)) && !is_efer_nx()) {
- entry->edx &= ~bit(X86_FEATURE_NX);
+ if (entry && (entry->edx & F(NX)) && !is_efer_nx()) {
+ entry->edx &= ~F(NX);
printk(KERN_INFO "kvm: guest NX capability removed\n");
}
}
@@ -227,8 +234,6 @@ static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function,
entry->flags = 0;
}
-#define F(x) bit(X86_FEATURE_##x)
-
static int __do_cpuid_ent_emulated(struct kvm_cpuid_entry2 *entry,
u32 func, u32 index, int *nent, int maxnent)
{
@@ -267,6 +272,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
unsigned f_rdtscp = kvm_x86_ops->rdtscp_supported() ? F(RDTSCP) : 0;
unsigned f_invpcid = kvm_x86_ops->invpcid_supported() ? F(INVPCID) : 0;
unsigned f_mpx = kvm_x86_ops->mpx_supported() ? F(MPX) : 0;
+ unsigned f_xsaves = kvm_x86_ops->xsaves_supported() ? F(XSAVES) : 0;
/* cpuid 1.edx */
const u32 kvm_supported_word0_x86_features =
@@ -317,7 +323,12 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
const u32 kvm_supported_word9_x86_features =
F(FSGSBASE) | F(BMI1) | F(HLE) | F(AVX2) | F(SMEP) |
F(BMI2) | F(ERMS) | f_invpcid | F(RTM) | f_mpx | F(RDSEED) |
- F(ADX) | F(SMAP);
+ F(ADX) | F(SMAP) | F(AVX512F) | F(AVX512PF) | F(AVX512ER) |
+ F(AVX512CD);
+
+ /* cpuid 0xD.1.eax */
+ const u32 kvm_supported_word10_x86_features =
+ F(XSAVEOPT) | F(XSAVEC) | F(XGETBV1) | f_xsaves;
/* all calls to cpuid_count() should be made on the same cpu */
get_cpu();
@@ -453,16 +464,34 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
u64 supported = kvm_supported_xcr0();
entry->eax &= supported;
+ entry->ebx = xstate_required_size(supported, false);
+ entry->ecx = entry->ebx;
entry->edx &= supported >> 32;
entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
+ if (!supported)
+ break;
+
for (idx = 1, i = 1; idx < 64; ++idx) {
u64 mask = ((u64)1 << idx);
if (*nent >= maxnent)
goto out;
do_cpuid_1_ent(&entry[i], function, idx);
- if (entry[i].eax == 0 || !(supported & mask))
- continue;
+ if (idx == 1) {
+ entry[i].eax &= kvm_supported_word10_x86_features;
+ entry[i].ebx = 0;
+ if (entry[i].eax & (F(XSAVES)|F(XSAVEC)))
+ entry[i].ebx =
+ xstate_required_size(supported,
+ true);
+ } else {
+ if (entry[i].eax == 0 || !(supported & mask))
+ continue;
+ if (WARN_ON_ONCE(entry[i].ecx & 1))
+ continue;
+ }
+ entry[i].ecx = 0;
+ entry[i].edx = 0;
entry[i].flags |=
KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
++*nent;
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 9f8a2faf504..169b09d76dd 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -123,6 +123,7 @@
#define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
#define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
#define Escape (5<<15) /* Escape to coprocessor instruction */
+#define InstrDual (6<<15) /* Alternate instruction decoding of mod == 3 */
#define Sse (1<<18) /* SSE Vector instruction */
/* Generic ModRM decode. */
#define ModRM (1<<19)
@@ -166,6 +167,8 @@
#define CheckPerm ((u64)1 << 49) /* Has valid check_perm field */
#define NoBigReal ((u64)1 << 50) /* No big real mode */
#define PrivUD ((u64)1 << 51) /* #UD instead of #GP on CPL > 0 */
+#define NearBranch ((u64)1 << 52) /* Near branches */
+#define No16 ((u64)1 << 53) /* No 16 bit operand */
#define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
@@ -209,6 +212,7 @@ struct opcode {
const struct group_dual *gdual;
const struct gprefix *gprefix;
const struct escape *esc;
+ const struct instr_dual *idual;
void (*fastop)(struct fastop *fake);
} u;
int (*check_perm)(struct x86_emulate_ctxt *ctxt);
@@ -231,6 +235,11 @@ struct escape {
struct opcode high[64];
};
+struct instr_dual {
+ struct opcode mod012;
+ struct opcode mod3;
+};
+
/* EFLAGS bit definitions. */
#define EFLG_ID (1<<21)
#define EFLG_VIP (1<<20)
@@ -379,6 +388,15 @@ static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
ON64(FOP2E(op##q, rax, cl)) \
FOP_END
+/* 2 operand, src and dest are reversed */
+#define FASTOP2R(op, name) \
+ FOP_START(name) \
+ FOP2E(op##b, dl, al) \
+ FOP2E(op##w, dx, ax) \
+ FOP2E(op##l, edx, eax) \
+ ON64(FOP2E(op##q, rdx, rax)) \
+ FOP_END
+
#define FOP3E(op, dst, src, src2) \
FOP_ALIGN #op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET
@@ -477,9 +495,9 @@ address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
}
static inline unsigned long
-register_address(struct x86_emulate_ctxt *ctxt, unsigned long reg)
+register_address(struct x86_emulate_ctxt *ctxt, int reg)
{
- return address_mask(ctxt, reg);
+ return address_mask(ctxt, reg_read(ctxt, reg));
}
static void masked_increment(ulong *reg, ulong mask, int inc)
@@ -488,7 +506,7 @@ static void masked_increment(ulong *reg, ulong mask, int inc)
}
static inline void
-register_address_increment(struct x86_emulate_ctxt *ctxt, unsigned long *reg, int inc)
+register_address_increment(struct x86_emulate_ctxt *ctxt, int reg, int inc)
{
ulong mask;
@@ -496,7 +514,7 @@ register_address_increment(struct x86_emulate_ctxt *ctxt, unsigned long *reg, in
mask = ~0UL;
else
mask = ad_mask(ctxt);
- masked_increment(reg, mask, inc);
+ masked_increment(reg_rmw(ctxt, reg), mask, inc);
}
static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
@@ -564,40 +582,6 @@ static int emulate_nm(struct x86_emulate_ctxt *ctxt)
return emulate_exception(ctxt, NM_VECTOR, 0, false);
}
-static inline int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
- int cs_l)
-{
- switch (ctxt->op_bytes) {
- case 2:
- ctxt->_eip = (u16)dst;
- break;
- case 4:
- ctxt->_eip = (u32)dst;
- break;
-#ifdef CONFIG_X86_64
- case 8:
- if ((cs_l && is_noncanonical_address(dst)) ||
- (!cs_l && (dst >> 32) != 0))
- return emulate_gp(ctxt, 0);
- ctxt->_eip = dst;
- break;
-#endif
- default:
- WARN(1, "unsupported eip assignment size\n");
- }
- return X86EMUL_CONTINUE;
-}
-
-static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
-{
- return assign_eip_far(ctxt, dst, ctxt->mode == X86EMUL_MODE_PROT64);
-}
-
-static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
-{
- return assign_eip_near(ctxt, ctxt->_eip + rel);
-}
-
static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
{
u16 selector;
@@ -641,25 +625,24 @@ static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size)
return true;
}
-static int __linearize(struct x86_emulate_ctxt *ctxt,
- struct segmented_address addr,
- unsigned *max_size, unsigned size,
- bool write, bool fetch,
- ulong *linear)
+static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
+ struct segmented_address addr,
+ unsigned *max_size, unsigned size,
+ bool write, bool fetch,
+ enum x86emul_mode mode, ulong *linear)
{
struct desc_struct desc;
bool usable;
ulong la;
u32 lim;
u16 sel;
- unsigned cpl;
la = seg_base(ctxt, addr.seg) + addr.ea;
*max_size = 0;
- switch (ctxt->mode) {
+ switch (mode) {
case X86EMUL_MODE_PROT64:
- if (((signed long)la << 16) >> 16 != la)
- return emulate_gp(ctxt, 0);
+ if (is_noncanonical_address(la))
+ goto bad;
*max_size = min_t(u64, ~0u, (1ull << 48) - la);
if (size > *max_size)
@@ -678,46 +661,20 @@ static int __linearize(struct x86_emulate_ctxt *ctxt,
if (!fetch && (desc.type & 8) && !(desc.type & 2))
goto bad;
lim = desc_limit_scaled(&desc);
- if ((ctxt->mode == X86EMUL_MODE_REAL) && !fetch &&
- (ctxt->d & NoBigReal)) {
- /* la is between zero and 0xffff */
- if (la > 0xffff)
- goto bad;
- *max_size = 0x10000 - la;
- } else if ((desc.type & 8) || !(desc.type & 4)) {
- /* expand-up segment */
- if (addr.ea > lim)
- goto bad;
- *max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea);
- } else {
+ if (!(desc.type & 8) && (desc.type & 4)) {
/* expand-down segment */
if (addr.ea <= lim)
goto bad;
lim = desc.d ? 0xffffffff : 0xffff;
- if (addr.ea > lim)
- goto bad;
- *max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea);
}
+ if (addr.ea > lim)
+ goto bad;
+ *max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea);
if (size > *max_size)
goto bad;
- cpl = ctxt->ops->cpl(ctxt);
- if (!(desc.type & 8)) {
- /* data segment */
- if (cpl > desc.dpl)
- goto bad;
- } else if ((desc.type & 8) && !(desc.type & 4)) {
- /* nonconforming code segment */
- if (cpl != desc.dpl)
- goto bad;
- } else if ((desc.type & 8) && (desc.type & 4)) {
- /* conforming code segment */
- if (cpl < desc.dpl)
- goto bad;
- }
+ la &= (u32)-1;
break;
}
- if (fetch ? ctxt->mode != X86EMUL_MODE_PROT64 : ctxt->ad_bytes != 8)
- la &= (u32)-1;
if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
return emulate_gp(ctxt, 0);
*linear = la;
@@ -735,9 +692,55 @@ static int linearize(struct x86_emulate_ctxt *ctxt,
ulong *linear)
{
unsigned max_size;
- return __linearize(ctxt, addr, &max_size, size, write, false, linear);
+ return __linearize(ctxt, addr, &max_size, size, write, false,
+ ctxt->mode, linear);
+}
+
+static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
+ enum x86emul_mode mode)
+{
+ ulong linear;
+ int rc;
+ unsigned max_size;
+ struct segmented_address addr = { .seg = VCPU_SREG_CS,
+ .ea = dst };
+
+ if (ctxt->op_bytes != sizeof(unsigned long))
+ addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
+ rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear);
+ if (rc == X86EMUL_CONTINUE)
+ ctxt->_eip = addr.ea;
+ return rc;
+}
+
+static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
+{
+ return assign_eip(ctxt, dst, ctxt->mode);
}
+static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
+ const struct desc_struct *cs_desc)
+{
+ enum x86emul_mode mode = ctxt->mode;
+
+#ifdef CONFIG_X86_64
+ if (ctxt->mode >= X86EMUL_MODE_PROT32 && cs_desc->l) {
+ u64 efer = 0;
+
+ ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
+ if (efer & EFER_LMA)
+ mode = X86EMUL_MODE_PROT64;
+ }
+#endif
+ if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32)
+ mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
+ return assign_eip(ctxt, dst, mode);
+}
+
+static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
+{
+ return assign_eip_near(ctxt, ctxt->_eip + rel);
+}
static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
struct segmented_address addr,
@@ -776,7 +779,8 @@ static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
* boundary check itself. Instead, we use max_size to check
* against op_size.
*/
- rc = __linearize(ctxt, addr, &max_size, 0, false, true, &linear);
+ rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode,
+ &linear);
if (unlikely(rc != X86EMUL_CONTINUE))
return rc;
@@ -911,6 +915,8 @@ FASTOP2W(btc);
FASTOP2(xadd);
+FASTOP2R(cmp, cmp_r);
+
static u8 test_cc(unsigned int condition, unsigned long flags)
{
u8 rc;
@@ -1221,6 +1227,7 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt,
if (index_reg != 4)
modrm_ea += reg_read(ctxt, index_reg) << scale;
} else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
+ modrm_ea += insn_fetch(s32, ctxt);
if (ctxt->mode == X86EMUL_MODE_PROT64)
ctxt->rip_relative = 1;
} else {
@@ -1229,10 +1236,6 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt,
adjust_modrm_seg(ctxt, base_reg);
}
switch (ctxt->modrm_mod) {
- case 0:
- if (ctxt->modrm_rm == 5)
- modrm_ea += insn_fetch(s32, ctxt);
- break;
case 1:
modrm_ea += insn_fetch(s8, ctxt);
break;
@@ -1284,7 +1287,8 @@ static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
else
sv = (s64)ctxt->src.val & (s64)mask;
- ctxt->dst.addr.mem.ea += (sv >> 3);
+ ctxt->dst.addr.mem.ea = address_mask(ctxt,
+ ctxt->dst.addr.mem.ea + (sv >> 3));
}
/* only subword offset */
@@ -1610,6 +1614,9 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
sizeof(base3), &ctxt->exception);
if (ret != X86EMUL_CONTINUE)
return ret;
+ if (is_noncanonical_address(get_desc_base(&seg_desc) |
+ ((u64)base3 << 32)))
+ return emulate_gp(ctxt, 0);
}
load:
ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
@@ -1807,6 +1814,10 @@ static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
int seg = ctxt->src2.val;
ctxt->src.val = get_segment_selector(ctxt, seg);
+ if (ctxt->op_bytes == 4) {
+ rsp_increment(ctxt, -2);
+ ctxt->op_bytes = 2;
+ }
return em_push(ctxt);
}
@@ -1850,7 +1861,7 @@ static int em_pusha(struct x86_emulate_ctxt *ctxt)
static int em_pushf(struct x86_emulate_ctxt *ctxt)
{
- ctxt->src.val = (unsigned long)ctxt->eflags;
+ ctxt->src.val = (unsigned long)ctxt->eflags & ~EFLG_VM;
return em_push(ctxt);
}
@@ -2035,7 +2046,7 @@ static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
if (rc != X86EMUL_CONTINUE)
return rc;
- rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l);
+ rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
if (rc != X86EMUL_CONTINUE) {
WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
/* assigning eip failed; restore the old cs */
@@ -2045,31 +2056,22 @@ static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
return rc;
}
-static int em_grp45(struct x86_emulate_ctxt *ctxt)
+static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
{
- int rc = X86EMUL_CONTINUE;
+ return assign_eip_near(ctxt, ctxt->src.val);
+}
- switch (ctxt->modrm_reg) {
- case 2: /* call near abs */ {
- long int old_eip;
- old_eip = ctxt->_eip;
- rc = assign_eip_near(ctxt, ctxt->src.val);
- if (rc != X86EMUL_CONTINUE)
- break;
- ctxt->src.val = old_eip;
- rc = em_push(ctxt);
- break;
- }
- case 4: /* jmp abs */
- rc = assign_eip_near(ctxt, ctxt->src.val);
- break;
- case 5: /* jmp far */
- rc = em_jmp_far(ctxt);
- break;
- case 6: /* push */
- rc = em_push(ctxt);
- break;
- }
+static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
+{
+ int rc;
+ long int old_eip;
+
+ old_eip = ctxt->_eip;
+ rc = assign_eip_near(ctxt, ctxt->src.val);
+ if (rc != X86EMUL_CONTINUE)
+ return rc;
+ ctxt->src.val = old_eip;
+ rc = em_push(ctxt);
return rc;
}
@@ -2128,11 +2130,11 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt)
/* Outer-privilege level return is not implemented */
if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
return X86EMUL_UNHANDLEABLE;
- rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, 0, false,
+ rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl, false,
&new_desc);
if (rc != X86EMUL_CONTINUE)
return rc;
- rc = assign_eip_far(ctxt, eip, new_desc.l);
+ rc = assign_eip_far(ctxt, eip, &new_desc);
if (rc != X86EMUL_CONTINUE) {
WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
@@ -2316,6 +2318,7 @@ static int em_syscall(struct x86_emulate_ctxt *ctxt)
ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
ctxt->eflags &= ~msr_data;
+ ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
#endif
} else {
/* legacy mode */
@@ -2349,11 +2352,9 @@ static int em_sysenter(struct x86_emulate_ctxt *ctxt)
&& !vendor_intel(ctxt))
return emulate_ud(ctxt);
- /* XXX sysenter/sysexit have not been tested in 64bit mode.
- * Therefore, we inject an #UD.
- */
+ /* sysenter/sysexit have not been tested in 64bit mode. */
if (ctxt->mode == X86EMUL_MODE_PROT64)
- return emulate_ud(ctxt);
+ return X86EMUL_UNHANDLEABLE;
setup_syscalls_segments(ctxt, &cs, &ss);
@@ -2425,6 +2426,8 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt)
if ((msr_data & 0xfffc) == 0x0)
return emulate_gp(ctxt, 0);
ss_sel = (u16)(msr_data + 24);
+ rcx = (u32)rcx;
+ rdx = (u32)rdx;
break;
case X86EMUL_MODE_PROT64:
cs_sel = (u16)(msr_data + 32);
@@ -2599,7 +2602,6 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt,
ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
&ctxt->exception);
if (ret != X86EMUL_CONTINUE)
- /* FIXME: need to provide precise fault address */
return ret;
save_state_to_tss16(ctxt, &tss_seg);
@@ -2607,13 +2609,11 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt,
ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
&ctxt->exception);
if (ret != X86EMUL_CONTINUE)
- /* FIXME: need to provide precise fault address */
return ret;
ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
&ctxt->exception);
if (ret != X86EMUL_CONTINUE)
- /* FIXME: need to provide precise fault address */
return ret;
if (old_tss_sel != 0xffff) {
@@ -2624,7 +2624,6 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt,
sizeof tss_seg.prev_task_link,
&ctxt->exception);
if (ret != X86EMUL_CONTINUE)
- /* FIXME: need to provide precise fault address */
return ret;
}
@@ -2813,7 +2812,8 @@ static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
*
* 1. jmp/call/int to task gate: Check against DPL of the task gate
* 2. Exception/IRQ/iret: No check is performed
- * 3. jmp/call to TSS: Check against DPL of the TSS
+ * 3. jmp/call to TSS/task-gate: No check is performed since the
+ * hardware checks it before exiting.
*/
if (reason == TASK_SWITCH_GATE) {
if (idt_index != -1) {
@@ -2830,13 +2830,8 @@ static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
return emulate_gp(ctxt, (idt_index << 3) | 0x2);
}
- } else if (reason != TASK_SWITCH_IRET) {
- int dpl = next_tss_desc.dpl;
- if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
- return emulate_gp(ctxt, tss_selector);
}
-
desc_limit = desc_limit_scaled(&next_tss_desc);
if (!next_tss_desc.p ||
((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
@@ -2913,8 +2908,8 @@ static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
{
int df = (ctxt->eflags & EFLG_DF) ? -op->count : op->count;
- register_address_increment(ctxt, reg_rmw(ctxt, reg), df * op->bytes);
- op->addr.mem.ea = register_address(ctxt, reg_read(ctxt, reg));
+ register_address_increment(ctxt, reg, df * op->bytes);
+ op->addr.mem.ea = register_address(ctxt, reg);
}
static int em_das(struct x86_emulate_ctxt *ctxt)
@@ -3025,7 +3020,7 @@ static int em_call_far(struct x86_emulate_ctxt *ctxt)
if (rc != X86EMUL_CONTINUE)
return X86EMUL_CONTINUE;
- rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l);
+ rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
if (rc != X86EMUL_CONTINUE)
goto fail;
@@ -3215,6 +3210,8 @@ static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
return emulate_ud(ctxt);
ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
+ if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM)
+ ctxt->dst.bytes = 2;
return X86EMUL_CONTINUE;
}
@@ -3317,7 +3314,7 @@ static int em_sidt(struct x86_emulate_ctxt *ctxt)
return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
}
-static int em_lgdt(struct x86_emulate_ctxt *ctxt)
+static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt)
{
struct desc_ptr desc_ptr;
int rc;
@@ -3329,12 +3326,23 @@ static int em_lgdt(struct x86_emulate_ctxt *ctxt)
ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
return rc;
- ctxt->ops->set_gdt(ctxt, &desc_ptr);
+ if (ctxt->mode == X86EMUL_MODE_PROT64 &&
+ is_noncanonical_address(desc_ptr.address))
+ return emulate_gp(ctxt, 0);
+ if (lgdt)
+ ctxt->ops->set_gdt(ctxt, &desc_ptr);
+ else
+ ctxt->ops->set_idt(ctxt, &desc_ptr);
/* Disable writeback. */
ctxt->dst.type = OP_NONE;
return X86EMUL_CONTINUE;
}
+static int em_lgdt(struct x86_emulate_ctxt *ctxt)
+{
+ return em_lgdt_lidt(ctxt, true);
+}
+
static int em_vmmcall(struct x86_emulate_ctxt *ctxt)
{
int rc;
@@ -3348,20 +3356,7 @@ static int em_vmmcall(struct x86_emulate_ctxt *ctxt)
static int em_lidt(struct x86_emulate_ctxt *ctxt)
{
- struct desc_ptr desc_ptr;
- int rc;
-
- if (ctxt->mode == X86EMUL_MODE_PROT64)
- ctxt->op_bytes = 8;
- rc = read_descriptor(ctxt, ctxt->src.addr.mem,
- &desc_ptr.size, &desc_ptr.address,
- ctxt->op_bytes);
- if (rc != X86EMUL_CONTINUE)
- return rc;
- ctxt->ops->set_idt(ctxt, &desc_ptr);
- /* Disable writeback. */
- ctxt->dst.type = OP_NONE;
- return X86EMUL_CONTINUE;
+ return em_lgdt_lidt(ctxt, false);
}
static int em_smsw(struct x86_emulate_ctxt *ctxt)
@@ -3384,7 +3379,7 @@ static int em_loop(struct x86_emulate_ctxt *ctxt)
{
int rc = X86EMUL_CONTINUE;
- register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX), -1);
+ register_address_increment(ctxt, VCPU_REGS_RCX, -1);
if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
(ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
rc = jmp_rel(ctxt, ctxt->src.val);
@@ -3554,7 +3549,7 @@ static int check_cr_write(struct x86_emulate_ctxt *ctxt)
ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
if (efer & EFER_LMA)
- rsvd = CR3_L_MODE_RESERVED_BITS;
+ rsvd = CR3_L_MODE_RESERVED_BITS & ~CR3_PCID_INVD;
if (new_val & rsvd)
return emulate_gp(ctxt, 0);
@@ -3596,8 +3591,15 @@ static int check_dr_read(struct x86_emulate_ctxt *ctxt)
if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
return emulate_ud(ctxt);
- if (check_dr7_gd(ctxt))
+ if (check_dr7_gd(ctxt)) {
+ ulong dr6;
+
+ ctxt->ops->get_dr(ctxt, 6, &dr6);
+ dr6 &= ~15;
+ dr6 |= DR6_BD | DR6_RTM;
+ ctxt->ops->set_dr(ctxt, 6, dr6);
return emulate_db(ctxt);
+ }
return X86EMUL_CONTINUE;
}
@@ -3684,6 +3686,7 @@ static int check_perm_out(struct x86_emulate_ctxt *ctxt)
#define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
#define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
#define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
+#define ID(_f, _i) { .flags = ((_f) | InstrDual | ModRM), .u.idual = (_i) }
#define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
#define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
#define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
@@ -3780,11 +3783,11 @@ static const struct opcode group4[] = {
static const struct opcode group5[] = {
F(DstMem | SrcNone | Lock, em_inc),
F(DstMem | SrcNone | Lock, em_dec),
- I(SrcMem | Stack, em_grp45),
+ I(SrcMem | NearBranch, em_call_near_abs),
I(SrcMemFAddr | ImplicitOps | Stack, em_call_far),
- I(SrcMem | Stack, em_grp45),
- I(SrcMemFAddr | ImplicitOps, em_grp45),
- I(SrcMem | Stack, em_grp45), D(Undefined),
+ I(SrcMem | NearBranch, em_jmp_abs),
+ I(SrcMemFAddr | ImplicitOps, em_jmp_far),
+ I(SrcMem | Stack, em_push), D(Undefined),
};
static const struct opcode group6[] = {
@@ -3845,8 +3848,12 @@ static const struct gprefix pfx_0f_6f_0f_7f = {
I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
};
+static const struct instr_dual instr_dual_0f_2b = {
+ I(0, em_mov), N
+};
+
static const struct gprefix pfx_0f_2b = {
- I(0, em_mov), I(0, em_mov), N, N,
+ ID(0, &instr_dual_0f_2b), ID(0, &instr_dual_0f_2b), N, N,
};
static const struct gprefix pfx_0f_28_0f_29 = {
@@ -3920,6 +3927,10 @@ static const struct escape escape_dd = { {
N, N, N, N, N, N, N, N,
} };
+static const struct instr_dual instr_dual_0f_c3 = {
+ I(DstMem | SrcReg | ModRM | No16 | Mov, em_mov), N
+};
+
static const struct opcode opcode_table[256] = {
/* 0x00 - 0x07 */
F6ALU(Lock, em_add),
@@ -3964,7 +3975,7 @@ static const struct opcode opcode_table[256] = {
I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
/* 0x70 - 0x7F */
- X16(D(SrcImmByte)),
+ X16(D(SrcImmByte | NearBranch)),
/* 0x80 - 0x87 */
G(ByteOp | DstMem | SrcImm, group1),
G(DstMem | SrcImm, group1),
@@ -3991,20 +4002,20 @@ static const struct opcode opcode_table[256] = {
I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
I2bv(SrcSI | DstDI | Mov | String, em_mov),
- F2bv(SrcSI | DstDI | String | NoWrite, em_cmp),
+ F2bv(SrcSI | DstDI | String | NoWrite, em_cmp_r),
/* 0xA8 - 0xAF */
F2bv(DstAcc | SrcImm | NoWrite, em_test),
I2bv(SrcAcc | DstDI | Mov | String, em_mov),
I2bv(SrcSI | DstAcc | Mov | String, em_mov),
- F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp),
+ F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp_r),
/* 0xB0 - 0xB7 */
X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
/* 0xB8 - 0xBF */
X8(I(DstReg | SrcImm64 | Mov, em_mov)),
/* 0xC0 - 0xC7 */
G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
- I(ImplicitOps | Stack | SrcImmU16, em_ret_near_imm),
- I(ImplicitOps | Stack, em_ret),
+ I(ImplicitOps | NearBranch | SrcImmU16, em_ret_near_imm),
+ I(ImplicitOps | NearBranch, em_ret),
I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
G(ByteOp, group11), G(0, group11),
@@ -4024,13 +4035,14 @@ static const struct opcode opcode_table[256] = {
/* 0xD8 - 0xDF */
N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
/* 0xE0 - 0xE7 */
- X3(I(SrcImmByte, em_loop)),
- I(SrcImmByte, em_jcxz),
+ X3(I(SrcImmByte | NearBranch, em_loop)),
+ I(SrcImmByte | NearBranch, em_jcxz),
I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in),
I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
/* 0xE8 - 0xEF */
- I(SrcImm | Stack, em_call), D(SrcImm | ImplicitOps),
- I(SrcImmFAddr | No64, em_jmp_far), D(SrcImmByte | ImplicitOps),
+ I(SrcImm | NearBranch, em_call), D(SrcImm | ImplicitOps | NearBranch),
+ I(SrcImmFAddr | No64, em_jmp_far),
+ D(SrcImmByte | ImplicitOps | NearBranch),
I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in),
I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
/* 0xF0 - 0xF7 */
@@ -4090,7 +4102,7 @@ static const struct opcode twobyte_table[256] = {
N, N, N, N,
N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
/* 0x80 - 0x8F */
- X16(D(SrcImm)),
+ X16(D(SrcImm | NearBranch)),
/* 0x90 - 0x9F */
X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
/* 0xA0 - 0xA7 */
@@ -4121,7 +4133,7 @@ static const struct opcode twobyte_table[256] = {
D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
/* 0xC0 - 0xC7 */
F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
- N, D(DstMem | SrcReg | ModRM | Mov),
+ N, ID(0, &instr_dual_0f_c3),
N, N, N, GD(0, &group9),
/* 0xC8 - 0xCF */
X8(I(DstReg, em_bswap)),
@@ -4134,12 +4146,20 @@ static const struct opcode twobyte_table[256] = {
N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
};
+static const struct instr_dual instr_dual_0f_38_f0 = {
+ I(DstReg | SrcMem | Mov, em_movbe), N
+};
+
+static const struct instr_dual instr_dual_0f_38_f1 = {
+ I(DstMem | SrcReg | Mov, em_movbe), N
+};
+
static const struct gprefix three_byte_0f_38_f0 = {
- I(DstReg | SrcMem | Mov, em_movbe), N, N, N
+ ID(0, &instr_dual_0f_38_f0), N, N, N
};
static const struct gprefix three_byte_0f_38_f1 = {
- I(DstMem | SrcReg | Mov, em_movbe), N, N, N
+ ID(0, &instr_dual_0f_38_f1), N, N, N
};
/*
@@ -4152,8 +4172,8 @@ static const struct opcode opcode_map_0f_38[256] = {
/* 0x80 - 0xef */
X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
/* 0xf0 - 0xf1 */
- GP(EmulateOnUD | ModRM | Prefix, &three_byte_0f_38_f0),
- GP(EmulateOnUD | ModRM | Prefix, &three_byte_0f_38_f1),
+ GP(EmulateOnUD | ModRM, &three_byte_0f_38_f0),
+ GP(EmulateOnUD | ModRM, &three_byte_0f_38_f1),
/* 0xf2 - 0xff */
N, N, X4(N), X8(N)
};
@@ -4275,7 +4295,7 @@ static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
op->type = OP_MEM;
op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
op->addr.mem.ea =
- register_address(ctxt, reg_read(ctxt, VCPU_REGS_RDI));
+ register_address(ctxt, VCPU_REGS_RDI);
op->addr.mem.seg = VCPU_SREG_ES;
op->val = 0;
op->count = 1;
@@ -4329,7 +4349,7 @@ static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
op->type = OP_MEM;
op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
op->addr.mem.ea =
- register_address(ctxt, reg_read(ctxt, VCPU_REGS_RSI));
+ register_address(ctxt, VCPU_REGS_RSI);
op->addr.mem.seg = ctxt->seg_override;
op->val = 0;
op->count = 1;
@@ -4338,7 +4358,7 @@ static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
op->type = OP_MEM;
op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
op->addr.mem.ea =
- register_address(ctxt,
+ address_mask(ctxt,
reg_read(ctxt, VCPU_REGS_RBX) +
(reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
op->addr.mem.seg = ctxt->seg_override;
@@ -4510,8 +4530,7 @@ done_prefixes:
/* vex-prefix instructions are not implemented */
if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
- (mode == X86EMUL_MODE_PROT64 ||
- (mode >= X86EMUL_MODE_PROT16 && (ctxt->modrm & 0x80)))) {
+ (mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) {
ctxt->d = NotImpl;
}
@@ -4549,6 +4568,12 @@ done_prefixes:
else
opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
break;
+ case InstrDual:
+ if ((ctxt->modrm >> 6) == 3)
+ opcode = opcode.u.idual->mod3;
+ else
+ opcode = opcode.u.idual->mod012;
+ break;
default:
return EMULATION_FAILED;
}
@@ -4567,7 +4592,8 @@ done_prefixes:
return EMULATION_FAILED;
if (unlikely(ctxt->d &
- (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm))) {
+ (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch|
+ No16))) {
/*
* These are copied unconditionally here, and checked unconditionally
* in x86_emulate_insn.
@@ -4578,8 +4604,12 @@ done_prefixes:
if (ctxt->d & NotImpl)
return EMULATION_FAILED;
- if (mode == X86EMUL_MODE_PROT64 && (ctxt->d & Stack))
- ctxt->op_bytes = 8;
+ if (mode == X86EMUL_MODE_PROT64) {
+ if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
+ ctxt->op_bytes = 8;
+ else if (ctxt->d & NearBranch)
+ ctxt->op_bytes = 8;
+ }
if (ctxt->d & Op3264) {
if (mode == X86EMUL_MODE_PROT64)
@@ -4588,6 +4618,9 @@ done_prefixes:
ctxt->op_bytes = 4;
}
+ if ((ctxt->d & No16) && ctxt->op_bytes == 2)
+ ctxt->op_bytes = 4;
+
if (ctxt->d & Sse)
ctxt->op_bytes = 16;
else if (ctxt->d & Mmx)
@@ -4631,7 +4664,8 @@ done_prefixes:
rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
if (ctxt->rip_relative)
- ctxt->memopp->addr.mem.ea += ctxt->_eip;
+ ctxt->memopp->addr.mem.ea = address_mask(ctxt,
+ ctxt->memopp->addr.mem.ea + ctxt->_eip);
done:
return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
@@ -4775,6 +4809,12 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
goto done;
}
+ /* Instruction can only be executed in protected mode */
+ if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
+ rc = emulate_ud(ctxt);
+ goto done;
+ }
+
/* Privileged instruction can be executed only in CPL=0 */
if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
if (ctxt->d & PrivUD)
@@ -4784,12 +4824,6 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
goto done;
}
- /* Instruction can only be executed in protected mode */
- if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
- rc = emulate_ud(ctxt);
- goto done;
- }
-
/* Do instruction specific permission checks */
if (ctxt->d & CheckPerm) {
rc = ctxt->check_perm(ctxt);
@@ -4974,8 +5008,7 @@ writeback:
count = ctxt->src.count;
else
count = ctxt->dst.count;
- register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX),
- -count);
+ register_address_increment(ctxt, VCPU_REGS_RCX, -count);
if (!string_insn_completed(ctxt)) {
/*
@@ -5053,11 +5086,6 @@ twobyte_insn:
ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
(s16) ctxt->src.val;
break;
- case 0xc3: /* movnti */
- ctxt->dst.bytes = ctxt->op_bytes;
- ctxt->dst.val = (ctxt->op_bytes == 8) ? (u64) ctxt->src.val :
- (u32) ctxt->src.val;
- break;
default:
goto cannot_emulate;
}
diff --git a/virt/kvm/ioapic.c b/arch/x86/kvm/ioapic.c
index 0ba4057d271..b1947e0f3e1 100644
--- a/virt/kvm/ioapic.c
+++ b/arch/x86/kvm/ioapic.c
@@ -270,7 +270,6 @@ void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap,
spin_unlock(&ioapic->lock);
}
-#ifdef CONFIG_X86
void kvm_vcpu_request_scan_ioapic(struct kvm *kvm)
{
struct kvm_ioapic *ioapic = kvm->arch.vioapic;
@@ -279,12 +278,6 @@ void kvm_vcpu_request_scan_ioapic(struct kvm *kvm)
return;
kvm_make_scan_ioapic_request(kvm);
}
-#else
-void kvm_vcpu_request_scan_ioapic(struct kvm *kvm)
-{
- return;
-}
-#endif
static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
{
@@ -586,11 +579,6 @@ static int ioapic_mmio_write(struct kvm_io_device *this, gpa_t addr, int len,
case IOAPIC_REG_WINDOW:
ioapic_write_indirect(ioapic, data);
break;
-#ifdef CONFIG_IA64
- case IOAPIC_REG_EOI:
- __kvm_ioapic_update_eoi(NULL, ioapic, data, IOAPIC_LEVEL_TRIG);
- break;
-#endif
default:
break;
diff --git a/virt/kvm/ioapic.h b/arch/x86/kvm/ioapic.h
index e23b70634f1..3c9195535ff 100644
--- a/virt/kvm/ioapic.h
+++ b/arch/x86/kvm/ioapic.h
@@ -19,7 +19,6 @@ struct kvm_vcpu;
/* Direct registers. */
#define IOAPIC_REG_SELECT 0x00
#define IOAPIC_REG_WINDOW 0x10
-#define IOAPIC_REG_EOI 0x40 /* IA64 IOSAPIC only */
/* Indirect registers. */
#define IOAPIC_REG_APIC_ID 0x00 /* x86 IOAPIC only */
@@ -45,6 +44,23 @@ struct rtc_status {
DECLARE_BITMAP(dest_map, KVM_MAX_VCPUS);
};
+union kvm_ioapic_redirect_entry {
+ u64 bits;
+ struct {
+ u8 vector;
+ u8 delivery_mode:3;
+ u8 dest_mode:1;
+ u8 delivery_status:1;
+ u8 polarity:1;
+ u8 remote_irr:1;
+ u8 trig_mode:1;
+ u8 mask:1;
+ u8 reserve:7;
+ u8 reserved[4];
+ u8 dest_id;
+ } fields;
+};
+
struct kvm_ioapic {
u64 base_address;
u32 ioregsel;
@@ -83,7 +99,7 @@ static inline struct kvm_ioapic *ioapic_irqchip(struct kvm *kvm)
void kvm_rtc_eoi_tracking_restore_one(struct kvm_vcpu *vcpu);
int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
- int short_hand, int dest, int dest_mode);
+ int short_hand, unsigned int dest, int dest_mode);
int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2);
void kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu, int vector,
int trigger_mode);
@@ -97,7 +113,6 @@ int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
struct kvm_lapic_irq *irq, unsigned long *dest_map);
int kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state);
int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state);
-void kvm_vcpu_request_scan_ioapic(struct kvm *kvm);
void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap,
u32 *tmr);
diff --git a/virt/kvm/iommu.c b/arch/x86/kvm/iommu.c
index c1e6ae989a4..17b73eeac8a 100644
--- a/virt/kvm/iommu.c
+++ b/arch/x86/kvm/iommu.c
@@ -31,6 +31,7 @@
#include <linux/dmar.h>
#include <linux/iommu.h>
#include <linux/intel-iommu.h>
+#include "assigned-dev.h"
static bool allow_unsafe_assigned_interrupts;
module_param_named(allow_unsafe_assigned_interrupts,
@@ -169,10 +170,8 @@ static int kvm_iommu_map_memslots(struct kvm *kvm)
return r;
}
-int kvm_assign_device(struct kvm *kvm,
- struct kvm_assigned_dev_kernel *assigned_dev)
+int kvm_assign_device(struct kvm *kvm, struct pci_dev *pdev)
{
- struct pci_dev *pdev = NULL;
struct iommu_domain *domain = kvm->arch.iommu_domain;
int r;
bool noncoherent;
@@ -181,7 +180,6 @@ int kvm_assign_device(struct kvm *kvm,
if (!domain)
return 0;
- pdev = assigned_dev->dev;
if (pdev == NULL)
return -ENODEV;
@@ -212,17 +210,14 @@ out_unmap:
return r;
}
-int kvm_deassign_device(struct kvm *kvm,
- struct kvm_assigned_dev_kernel *assigned_dev)
+int kvm_deassign_device(struct kvm *kvm, struct pci_dev *pdev)
{
struct iommu_domain *domain = kvm->arch.iommu_domain;
- struct pci_dev *pdev = NULL;
/* check if iommu exists and in use */
if (!domain)
return 0;
- pdev = assigned_dev->dev;
if (pdev == NULL)
return -ENODEV;
diff --git a/virt/kvm/irq_comm.c b/arch/x86/kvm/irq_comm.c
index 963b8995a9e..72298b3ac02 100644
--- a/virt/kvm/irq_comm.c
+++ b/arch/x86/kvm/irq_comm.c
@@ -26,9 +26,6 @@
#include <trace/events/kvm.h>
#include <asm/msidef.h>
-#ifdef CONFIG_IA64
-#include <asm/iosapic.h>
-#endif
#include "irq.h"
@@ -38,12 +35,8 @@ static int kvm_set_pic_irq(struct kvm_kernel_irq_routing_entry *e,
struct kvm *kvm, int irq_source_id, int level,
bool line_status)
{
-#ifdef CONFIG_X86
struct kvm_pic *pic = pic_irqchip(kvm);
return kvm_pic_set_irq(pic, e->irqchip.pin, irq_source_id, level);
-#else
- return -1;
-#endif
}
static int kvm_set_ioapic_irq(struct kvm_kernel_irq_routing_entry *e,
@@ -57,12 +50,7 @@ static int kvm_set_ioapic_irq(struct kvm_kernel_irq_routing_entry *e,
inline static bool kvm_is_dm_lowest_prio(struct kvm_lapic_irq *irq)
{
-#ifdef CONFIG_IA64
- return irq->delivery_mode ==
- (IOSAPIC_LOWEST_PRIORITY << IOSAPIC_DELIVERY_SHIFT);
-#else
return irq->delivery_mode == APIC_DM_LOWEST;
-#endif
}
int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
@@ -202,9 +190,7 @@ int kvm_request_irq_source_id(struct kvm *kvm)
}
ASSERT(irq_source_id != KVM_USERSPACE_IRQ_SOURCE_ID);
-#ifdef CONFIG_X86
ASSERT(irq_source_id != KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID);
-#endif
set_bit(irq_source_id, bitmap);
unlock:
mutex_unlock(&kvm->irq_lock);
@@ -215,9 +201,7 @@ unlock:
void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id)
{
ASSERT(irq_source_id != KVM_USERSPACE_IRQ_SOURCE_ID);
-#ifdef CONFIG_X86
ASSERT(irq_source_id != KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID);
-#endif
mutex_lock(&kvm->irq_lock);
if (irq_source_id < 0 ||
@@ -230,9 +214,7 @@ void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id)
goto unlock;
kvm_ioapic_clear_all(kvm->arch.vioapic, irq_source_id);
-#ifdef CONFIG_X86
kvm_pic_clear_all(pic_irqchip(kvm), irq_source_id);
-#endif
unlock:
mutex_unlock(&kvm->irq_lock);
}
@@ -242,7 +224,7 @@ void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
{
mutex_lock(&kvm->irq_lock);
kimn->irq = irq;
- hlist_add_head_rcu(&kimn->link, &kvm->mask_notifier_list);
+ hlist_add_head_rcu(&kimn->link, &kvm->arch.mask_notifier_list);
mutex_unlock(&kvm->irq_lock);
}
@@ -264,7 +246,7 @@ void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
idx = srcu_read_lock(&kvm->irq_srcu);
gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
if (gsi != -1)
- hlist_for_each_entry_rcu(kimn, &kvm->mask_notifier_list, link)
+ hlist_for_each_entry_rcu(kimn, &kvm->arch.mask_notifier_list, link)
if (kimn->irq == gsi)
kimn->func(kimn, mask);
srcu_read_unlock(&kvm->irq_srcu, idx);
@@ -322,16 +304,11 @@ out:
.u.irqchip = { .irqchip = KVM_IRQCHIP_IOAPIC, .pin = (irq) } }
#define ROUTING_ENTRY1(irq) IOAPIC_ROUTING_ENTRY(irq)
-#ifdef CONFIG_X86
-# define PIC_ROUTING_ENTRY(irq) \
+#define PIC_ROUTING_ENTRY(irq) \
{ .gsi = irq, .type = KVM_IRQ_ROUTING_IRQCHIP, \
.u.irqchip = { .irqchip = SELECT_PIC(irq), .pin = (irq) % 8 } }
-# define ROUTING_ENTRY2(irq) \
+#define ROUTING_ENTRY2(irq) \
IOAPIC_ROUTING_ENTRY(irq), PIC_ROUTING_ENTRY(irq)
-#else
-# define ROUTING_ENTRY2(irq) \
- IOAPIC_ROUTING_ENTRY(irq)
-#endif
static const struct kvm_irq_routing_entry default_routing[] = {
ROUTING_ENTRY2(0), ROUTING_ENTRY2(1),
@@ -346,20 +323,6 @@ static const struct kvm_irq_routing_entry default_routing[] = {
ROUTING_ENTRY1(18), ROUTING_ENTRY1(19),
ROUTING_ENTRY1(20), ROUTING_ENTRY1(21),
ROUTING_ENTRY1(22), ROUTING_ENTRY1(23),
-#ifdef CONFIG_IA64
- ROUTING_ENTRY1(24), ROUTING_ENTRY1(25),
- ROUTING_ENTRY1(26), ROUTING_ENTRY1(27),
- ROUTING_ENTRY1(28), ROUTING_ENTRY1(29),
- ROUTING_ENTRY1(30), ROUTING_ENTRY1(31),
- ROUTING_ENTRY1(32), ROUTING_ENTRY1(33),
- ROUTING_ENTRY1(34), ROUTING_ENTRY1(35),
- ROUTING_ENTRY1(36), ROUTING_ENTRY1(37),
- ROUTING_ENTRY1(38), ROUTING_ENTRY1(39),
- ROUTING_ENTRY1(40), ROUTING_ENTRY1(41),
- ROUTING_ENTRY1(42), ROUTING_ENTRY1(43),
- ROUTING_ENTRY1(44), ROUTING_ENTRY1(45),
- ROUTING_ENTRY1(46), ROUTING_ENTRY1(47),
-#endif
};
int kvm_setup_default_irq_routing(struct kvm *kvm)
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index b8345dd41b2..4f0c0b95468 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -68,6 +68,9 @@
#define MAX_APIC_VECTOR 256
#define APIC_VECTORS_PER_REG 32
+#define APIC_BROADCAST 0xFF
+#define X2APIC_BROADCAST 0xFFFFFFFFul
+
#define VEC_POS(v) ((v) & (32 - 1))
#define REG_POS(v) (((v) >> 5) << 4)
@@ -129,8 +132,6 @@ static inline int kvm_apic_id(struct kvm_lapic *apic)
return (kvm_apic_get_reg(apic, APIC_ID) >> 24) & 0xff;
}
-#define KVM_X2APIC_CID_BITS 0
-
static void recalculate_apic_map(struct kvm *kvm)
{
struct kvm_apic_map *new, *old = NULL;
@@ -149,42 +150,56 @@ static void recalculate_apic_map(struct kvm *kvm)
new->cid_shift = 8;
new->cid_mask = 0;
new->lid_mask = 0xff;
+ new->broadcast = APIC_BROADCAST;
kvm_for_each_vcpu(i, vcpu, kvm) {
struct kvm_lapic *apic = vcpu->arch.apic;
- u16 cid, lid;
- u32 ldr;
if (!kvm_apic_present(vcpu))
continue;
+ if (apic_x2apic_mode(apic)) {
+ new->ldr_bits = 32;
+ new->cid_shift = 16;
+ new->cid_mask = new->lid_mask = 0xffff;
+ new->broadcast = X2APIC_BROADCAST;
+ } else if (kvm_apic_get_reg(apic, APIC_LDR)) {
+ if (kvm_apic_get_reg(apic, APIC_DFR) ==
+ APIC_DFR_CLUSTER) {
+ new->cid_shift = 4;
+ new->cid_mask = 0xf;
+ new->lid_mask = 0xf;
+ } else {
+ new->cid_shift = 8;
+ new->cid_mask = 0;
+ new->lid_mask = 0xff;
+ }
+ }
+
/*
* All APICs have to be configured in the same mode by an OS.
* We take advatage of this while building logical id loockup
- * table. After reset APICs are in xapic/flat mode, so if we
- * find apic with different setting we assume this is the mode
+ * table. After reset APICs are in software disabled mode, so if
+ * we find apic with different setting we assume this is the mode
* OS wants all apics to be in; build lookup table accordingly.
*/
- if (apic_x2apic_mode(apic)) {
- new->ldr_bits = 32;
- new->cid_shift = 16;
- new->cid_mask = (1 << KVM_X2APIC_CID_BITS) - 1;
- new->lid_mask = 0xffff;
- } else if (kvm_apic_sw_enabled(apic) &&
- !new->cid_mask /* flat mode */ &&
- kvm_apic_get_reg(apic, APIC_DFR) == APIC_DFR_CLUSTER) {
- new->cid_shift = 4;
- new->cid_mask = 0xf;
- new->lid_mask = 0xf;
- }
+ if (kvm_apic_sw_enabled(apic))
+ break;
+ }
- new->phys_map[kvm_apic_id(apic)] = apic;
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ struct kvm_lapic *apic = vcpu->arch.apic;
+ u16 cid, lid;
+ u32 ldr, aid;
+ aid = kvm_apic_id(apic);
ldr = kvm_apic_get_reg(apic, APIC_LDR);
cid = apic_cluster_id(new, ldr);
lid = apic_logical_id(new, ldr);
- if (lid)
+ if (aid < ARRAY_SIZE(new->phys_map))
+ new->phys_map[aid] = apic;
+ if (lid && cid < ARRAY_SIZE(new->logical_map))
new->logical_map[cid][ffs(lid) - 1] = apic;
}
out:
@@ -201,11 +216,13 @@ out:
static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val)
{
- u32 prev = kvm_apic_get_reg(apic, APIC_SPIV);
+ bool enabled = val & APIC_SPIV_APIC_ENABLED;
apic_set_reg(apic, APIC_SPIV, val);
- if ((prev ^ val) & APIC_SPIV_APIC_ENABLED) {
- if (val & APIC_SPIV_APIC_ENABLED) {
+
+ if (enabled != apic->sw_enabled) {
+ apic->sw_enabled = enabled;
+ if (enabled) {
static_key_slow_dec_deferred(&apic_sw_disabled);
recalculate_apic_map(apic->vcpu->kvm);
} else
@@ -237,21 +254,17 @@ static inline int apic_lvt_vector(struct kvm_lapic *apic, int lvt_type)
static inline int apic_lvtt_oneshot(struct kvm_lapic *apic)
{
- return ((kvm_apic_get_reg(apic, APIC_LVTT) &
- apic->lapic_timer.timer_mode_mask) == APIC_LVT_TIMER_ONESHOT);
+ return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_ONESHOT;
}
static inline int apic_lvtt_period(struct kvm_lapic *apic)
{
- return ((kvm_apic_get_reg(apic, APIC_LVTT) &
- apic->lapic_timer.timer_mode_mask) == APIC_LVT_TIMER_PERIODIC);
+ return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_PERIODIC;
}
static inline int apic_lvtt_tscdeadline(struct kvm_lapic *apic)
{
- return ((kvm_apic_get_reg(apic, APIC_LVTT) &
- apic->lapic_timer.timer_mode_mask) ==
- APIC_LVT_TIMER_TSCDEADLINE);
+ return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_TSCDEADLINE;
}
static inline int apic_lvt_nmi_mode(u32 lvt_val)
@@ -326,8 +339,12 @@ EXPORT_SYMBOL_GPL(kvm_apic_update_irr);
static inline void apic_set_irr(int vec, struct kvm_lapic *apic)
{
- apic->irr_pending = true;
apic_set_vector(vec, apic->regs + APIC_IRR);
+ /*
+ * irr_pending must be true if any interrupt is pending; set it after
+ * APIC_IRR to avoid race with apic_clear_irr
+ */
+ apic->irr_pending = true;
}
static inline int apic_search_irr(struct kvm_lapic *apic)
@@ -359,13 +376,15 @@ static inline void apic_clear_irr(int vec, struct kvm_lapic *apic)
vcpu = apic->vcpu;
- apic_clear_vector(vec, apic->regs + APIC_IRR);
- if (unlikely(kvm_apic_vid_enabled(vcpu->kvm)))
+ if (unlikely(kvm_apic_vid_enabled(vcpu->kvm))) {
/* try to update RVI */
+ apic_clear_vector(vec, apic->regs + APIC_IRR);
kvm_make_request(KVM_REQ_EVENT, vcpu);
- else {
- vec = apic_search_irr(apic);
- apic->irr_pending = (vec != -1);
+ } else {
+ apic->irr_pending = false;
+ apic_clear_vector(vec, apic->regs + APIC_IRR);
+ if (apic_search_irr(apic) != -1)
+ apic->irr_pending = true;
}
}
@@ -558,16 +577,25 @@ static void apic_set_tpr(struct kvm_lapic *apic, u32 tpr)
apic_update_ppr(apic);
}
-int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest)
+static int kvm_apic_broadcast(struct kvm_lapic *apic, u32 dest)
+{
+ return dest == (apic_x2apic_mode(apic) ?
+ X2APIC_BROADCAST : APIC_BROADCAST);
+}
+
+int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u32 dest)
{
- return dest == 0xff || kvm_apic_id(apic) == dest;
+ return kvm_apic_id(apic) == dest || kvm_apic_broadcast(apic, dest);
}
-int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda)
+int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u32 mda)
{
int result = 0;
u32 logical_id;
+ if (kvm_apic_broadcast(apic, mda))
+ return 1;
+
if (apic_x2apic_mode(apic)) {
logical_id = kvm_apic_get_reg(apic, APIC_LDR);
return logical_id & mda;
@@ -595,7 +623,7 @@ int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda)
}
int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
- int short_hand, int dest, int dest_mode)
+ int short_hand, unsigned int dest, int dest_mode)
{
int result = 0;
struct kvm_lapic *target = vcpu->arch.apic;
@@ -657,15 +685,24 @@ bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
if (!map)
goto out;
+ if (irq->dest_id == map->broadcast)
+ goto out;
+
+ ret = true;
+
if (irq->dest_mode == 0) { /* physical mode */
- if (irq->delivery_mode == APIC_DM_LOWEST ||
- irq->dest_id == 0xff)
+ if (irq->dest_id >= ARRAY_SIZE(map->phys_map))
goto out;
- dst = &map->phys_map[irq->dest_id & 0xff];
+
+ dst = &map->phys_map[irq->dest_id];
} else {
u32 mda = irq->dest_id << (32 - map->ldr_bits);
+ u16 cid = apic_cluster_id(map, mda);
+
+ if (cid >= ARRAY_SIZE(map->logical_map))
+ goto out;
- dst = map->logical_map[apic_cluster_id(map, mda)];
+ dst = map->logical_map[cid];
bitmap = apic_logical_id(map, mda);
@@ -691,8 +728,6 @@ bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
*r = 0;
*r += kvm_apic_set_irq(dst[i]->vcpu, irq, dest_map);
}
-
- ret = true;
out:
rcu_read_unlock();
return ret;
@@ -1034,6 +1069,26 @@ static void update_divide_count(struct kvm_lapic *apic)
apic->divide_count);
}
+static void apic_timer_expired(struct kvm_lapic *apic)
+{
+ struct kvm_vcpu *vcpu = apic->vcpu;
+ wait_queue_head_t *q = &vcpu->wq;
+
+ /*
+ * Note: KVM_REQ_PENDING_TIMER is implicitly checked in
+ * vcpu_enter_guest.
+ */
+ if (atomic_read(&apic->lapic_timer.pending))
+ return;
+
+ atomic_inc(&apic->lapic_timer.pending);
+ /* FIXME: this code should not know anything about vcpus */
+ kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
+
+ if (waitqueue_active(q))
+ wake_up_interruptible(q);
+}
+
static void start_apic_timer(struct kvm_lapic *apic)
{
ktime_t now;
@@ -1096,9 +1151,10 @@ static void start_apic_timer(struct kvm_lapic *apic)
if (likely(tscdeadline > guest_tsc)) {
ns = (tscdeadline - guest_tsc) * 1000000ULL;
do_div(ns, this_tsc_khz);
- }
- hrtimer_start(&apic->lapic_timer.timer,
- ktime_add_ns(now, ns), HRTIMER_MODE_ABS);
+ hrtimer_start(&apic->lapic_timer.timer,
+ ktime_add_ns(now, ns), HRTIMER_MODE_ABS);
+ } else
+ apic_timer_expired(apic);
local_irq_restore(flags);
}
@@ -1203,17 +1259,20 @@ static int apic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
break;
- case APIC_LVTT:
- if ((kvm_apic_get_reg(apic, APIC_LVTT) &
- apic->lapic_timer.timer_mode_mask) !=
- (val & apic->lapic_timer.timer_mode_mask))
+ case APIC_LVTT: {
+ u32 timer_mode = val & apic->lapic_timer.timer_mode_mask;
+
+ if (apic->lapic_timer.timer_mode != timer_mode) {
+ apic->lapic_timer.timer_mode = timer_mode;
hrtimer_cancel(&apic->lapic_timer.timer);
+ }
if (!kvm_apic_sw_enabled(apic))
val |= APIC_LVT_MASKED;
val &= (apic_lvt_mask[0] | apic->lapic_timer.timer_mode_mask);
apic_set_reg(apic, APIC_LVTT, val);
break;
+ }
case APIC_TMICT:
if (apic_lvtt_tscdeadline(apic))
@@ -1320,7 +1379,7 @@ void kvm_free_lapic(struct kvm_vcpu *vcpu)
if (!(vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE))
static_key_slow_dec_deferred(&apic_hw_disabled);
- if (!(kvm_apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_APIC_ENABLED))
+ if (!apic->sw_enabled)
static_key_slow_dec_deferred(&apic_sw_disabled);
if (apic->regs)
@@ -1355,9 +1414,6 @@ void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data)
return;
hrtimer_cancel(&apic->lapic_timer.timer);
- /* Inject here so clearing tscdeadline won't override new value */
- if (apic_has_pending_timer(vcpu))
- kvm_inject_apic_timer_irqs(vcpu);
apic->lapic_timer.tscdeadline = data;
start_apic_timer(apic);
}
@@ -1422,6 +1478,10 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
apic->base_address = apic->vcpu->arch.apic_base &
MSR_IA32_APICBASE_BASE;
+ if ((value & MSR_IA32_APICBASE_ENABLE) &&
+ apic->base_address != APIC_DEFAULT_PHYS_BASE)
+ pr_warn_once("APIC base relocation is unsupported by KVM");
+
/* with FSB delivery interrupt, we can restart APIC functionality */
apic_debug("apic base msr is 0x%016" PRIx64 ", and base address is "
"0x%lx.\n", apic->vcpu->arch.apic_base, apic->base_address);
@@ -1447,6 +1507,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu)
for (i = 0; i < APIC_LVT_NUM; i++)
apic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED);
+ apic->lapic_timer.timer_mode = 0;
apic_set_reg(apic, APIC_LVT0,
SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT));
@@ -1538,23 +1599,8 @@ static enum hrtimer_restart apic_timer_fn(struct hrtimer *data)
{
struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer);
struct kvm_lapic *apic = container_of(ktimer, struct kvm_lapic, lapic_timer);
- struct kvm_vcpu *vcpu = apic->vcpu;
- wait_queue_head_t *q = &vcpu->wq;
-
- /*
- * There is a race window between reading and incrementing, but we do
- * not care about potentially losing timer events in the !reinject
- * case anyway. Note: KVM_REQ_PENDING_TIMER is implicitly checked
- * in vcpu_enter_guest.
- */
- if (!atomic_read(&ktimer->pending)) {
- atomic_inc(&ktimer->pending);
- /* FIXME: this code should not know anything about vcpus */
- kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
- }
- if (waitqueue_active(q))
- wake_up_interruptible(q);
+ apic_timer_expired(apic);
if (lapic_is_periodic(apic)) {
hrtimer_add_expires_ns(&ktimer->timer, ktimer->period);
@@ -1693,6 +1739,9 @@ void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu,
apic->isr_count = kvm_apic_vid_enabled(vcpu->kvm) ?
1 : count_vectors(apic->regs + APIC_ISR);
apic->highest_isr_cache = -1;
+ if (kvm_x86_ops->hwapic_irr_update)
+ kvm_x86_ops->hwapic_irr_update(vcpu,
+ apic_find_highest_irr(apic));
kvm_x86_ops->hwapic_isr_update(vcpu->kvm, apic_find_highest_isr(apic));
kvm_make_request(KVM_REQ_EVENT, vcpu);
kvm_rtc_eoi_tracking_restore_one(vcpu);
@@ -1837,8 +1886,11 @@ int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data)
if (!irqchip_in_kernel(vcpu->kvm) || !apic_x2apic_mode(apic))
return 1;
+ if (reg == APIC_ICR2)
+ return 1;
+
/* if this is ICR write vector before command */
- if (msr == 0x830)
+ if (reg == APIC_ICR)
apic_reg_write(apic, APIC_ICR2, (u32)(data >> 32));
return apic_reg_write(apic, reg, (u32)data);
}
@@ -1851,9 +1903,15 @@ int kvm_x2apic_msr_read(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
if (!irqchip_in_kernel(vcpu->kvm) || !apic_x2apic_mode(apic))
return 1;
+ if (reg == APIC_DFR || reg == APIC_ICR2) {
+ apic_debug("KVM_APIC_READ: read x2apic reserved register %x\n",
+ reg);
+ return 1;
+ }
+
if (apic_reg_read(apic, reg, 4, &low))
return 1;
- if (msr == 0x830)
+ if (reg == APIC_ICR)
apic_reg_read(apic, APIC_ICR2, 4, &high);
*data = (((u64)high) << 32) | low;
@@ -1908,7 +1966,7 @@ int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data)
void kvm_apic_accept_events(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;
- unsigned int sipi_vector;
+ u8 sipi_vector;
unsigned long pe;
if (!kvm_vcpu_has_lapic(vcpu) || !apic->pending_events)
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index 6a11845fd8b..c674fce53cf 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -11,6 +11,7 @@
struct kvm_timer {
struct hrtimer timer;
s64 period; /* unit: ns */
+ u32 timer_mode;
u32 timer_mode_mask;
u64 tscdeadline;
atomic_t pending; /* accumulated triggered timers */
@@ -22,6 +23,7 @@ struct kvm_lapic {
struct kvm_timer lapic_timer;
u32 divide_count;
struct kvm_vcpu *vcpu;
+ bool sw_enabled;
bool irr_pending;
/* Number of bits set in ISR. */
s16 isr_count;
@@ -55,8 +57,8 @@ void kvm_apic_set_version(struct kvm_vcpu *vcpu);
void kvm_apic_update_tmr(struct kvm_vcpu *vcpu, u32 *tmr);
void kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir);
-int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest);
-int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda);
+int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u32 dest);
+int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u32 mda);
int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq,
unsigned long *dest_map);
int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type);
@@ -119,11 +121,11 @@ static inline int kvm_apic_hw_enabled(struct kvm_lapic *apic)
extern struct static_key_deferred apic_sw_disabled;
-static inline int kvm_apic_sw_enabled(struct kvm_lapic *apic)
+static inline bool kvm_apic_sw_enabled(struct kvm_lapic *apic)
{
if (static_key_false(&apic_sw_disabled.key))
- return kvm_apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_APIC_ENABLED;
- return APIC_SPIV_APIC_ENABLED;
+ return apic->sw_enabled;
+ return true;
}
static inline bool kvm_apic_present(struct kvm_vcpu *vcpu)
@@ -152,8 +154,6 @@ static inline u16 apic_cluster_id(struct kvm_apic_map *map, u32 ldr)
ldr >>= 32 - map->ldr_bits;
cid = (ldr >> map->cid_shift) & map->cid_mask;
- BUG_ON(cid >= ARRAY_SIZE(map->logical_map));
-
return cid;
}
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 978f402006e..10fbed126b1 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -214,13 +214,12 @@ EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask);
#define MMIO_GEN_LOW_SHIFT 10
#define MMIO_GEN_LOW_MASK ((1 << MMIO_GEN_LOW_SHIFT) - 2)
#define MMIO_GEN_MASK ((1 << MMIO_GEN_SHIFT) - 1)
-#define MMIO_MAX_GEN ((1 << MMIO_GEN_SHIFT) - 1)
static u64 generation_mmio_spte_mask(unsigned int gen)
{
u64 mask;
- WARN_ON(gen > MMIO_MAX_GEN);
+ WARN_ON(gen & ~MMIO_GEN_MASK);
mask = (gen & MMIO_GEN_LOW_MASK) << MMIO_SPTE_GEN_LOW_SHIFT;
mask |= ((u64)gen >> MMIO_GEN_LOW_SHIFT) << MMIO_SPTE_GEN_HIGH_SHIFT;
@@ -263,13 +262,13 @@ static bool is_mmio_spte(u64 spte)
static gfn_t get_mmio_spte_gfn(u64 spte)
{
- u64 mask = generation_mmio_spte_mask(MMIO_MAX_GEN) | shadow_mmio_mask;
+ u64 mask = generation_mmio_spte_mask(MMIO_GEN_MASK) | shadow_mmio_mask;
return (spte & ~mask) >> PAGE_SHIFT;
}
static unsigned get_mmio_spte_access(u64 spte)
{
- u64 mask = generation_mmio_spte_mask(MMIO_MAX_GEN) | shadow_mmio_mask;
+ u64 mask = generation_mmio_spte_mask(MMIO_GEN_MASK) | shadow_mmio_mask;
return (spte & ~mask) & ~PAGE_MASK;
}
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 7527cefc5a4..41dd0387ccc 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1056,9 +1056,11 @@ static void svm_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment, bool ho
{
struct vcpu_svm *svm = to_svm(vcpu);
- WARN_ON(adjustment < 0);
- if (host)
- adjustment = svm_scale_tsc(vcpu, adjustment);
+ if (host) {
+ if (svm->tsc_ratio != TSC_RATIO_DEFAULT)
+ WARN_ON(adjustment < 0);
+ adjustment = svm_scale_tsc(vcpu, (u64)adjustment);
+ }
svm->vmcb->control.tsc_offset += adjustment;
if (is_guest_mode(vcpu))
@@ -2999,7 +3001,6 @@ static int dr_interception(struct vcpu_svm *svm)
{
int reg, dr;
unsigned long val;
- int err;
if (svm->vcpu.guest_debug == 0) {
/*
@@ -3019,12 +3020,15 @@ static int dr_interception(struct vcpu_svm *svm)
dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0;
if (dr >= 16) { /* mov to DRn */
+ if (!kvm_require_dr(&svm->vcpu, dr - 16))
+ return 1;
val = kvm_register_read(&svm->vcpu, reg);
kvm_set_dr(&svm->vcpu, dr - 16, val);
} else {
- err = kvm_get_dr(&svm->vcpu, dr, &val);
- if (!err)
- kvm_register_write(&svm->vcpu, reg, val);
+ if (!kvm_require_dr(&svm->vcpu, dr))
+ return 1;
+ kvm_get_dr(&svm->vcpu, dr, &val);
+ kvm_register_write(&svm->vcpu, reg, val);
}
skip_emulated_instruction(&svm->vcpu);
@@ -4123,6 +4127,11 @@ static bool svm_mpx_supported(void)
return false;
}
+static bool svm_xsaves_supported(void)
+{
+ return false;
+}
+
static bool svm_has_wbinvd_exit(void)
{
return true;
@@ -4410,6 +4419,7 @@ static struct kvm_x86_ops svm_x86_ops = {
.rdtscp_supported = svm_rdtscp_supported,
.invpcid_supported = svm_invpcid_supported,
.mpx_supported = svm_mpx_supported,
+ .xsaves_supported = svm_xsaves_supported,
.set_supported_cpuid = svm_set_supported_cpuid,
diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h
index 6b06ab8748d..c2a34bb5ad9 100644
--- a/arch/x86/kvm/trace.h
+++ b/arch/x86/kvm/trace.h
@@ -5,6 +5,7 @@
#include <asm/vmx.h>
#include <asm/svm.h>
#include <asm/clocksource.h>
+#include <asm/pvclock-abi.h>
#undef TRACE_SYSTEM
#define TRACE_SYSTEM kvm
@@ -877,6 +878,42 @@ TRACE_EVENT(kvm_ple_window,
#define trace_kvm_ple_window_shrink(vcpu_id, new, old) \
trace_kvm_ple_window(false, vcpu_id, new, old)
+TRACE_EVENT(kvm_pvclock_update,
+ TP_PROTO(unsigned int vcpu_id, struct pvclock_vcpu_time_info *pvclock),
+ TP_ARGS(vcpu_id, pvclock),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, vcpu_id )
+ __field( __u32, version )
+ __field( __u64, tsc_timestamp )
+ __field( __u64, system_time )
+ __field( __u32, tsc_to_system_mul )
+ __field( __s8, tsc_shift )
+ __field( __u8, flags )
+ ),
+
+ TP_fast_assign(
+ __entry->vcpu_id = vcpu_id;
+ __entry->version = pvclock->version;
+ __entry->tsc_timestamp = pvclock->tsc_timestamp;
+ __entry->system_time = pvclock->system_time;
+ __entry->tsc_to_system_mul = pvclock->tsc_to_system_mul;
+ __entry->tsc_shift = pvclock->tsc_shift;
+ __entry->flags = pvclock->flags;
+ ),
+
+ TP_printk("vcpu_id %u, pvclock { version %u, tsc_timestamp 0x%llx, "
+ "system_time 0x%llx, tsc_to_system_mul 0x%x, tsc_shift %d, "
+ "flags 0x%x }",
+ __entry->vcpu_id,
+ __entry->version,
+ __entry->tsc_timestamp,
+ __entry->system_time,
+ __entry->tsc_to_system_mul,
+ __entry->tsc_shift,
+ __entry->flags)
+);
+
#endif /* _TRACE_KVM_H */
#undef TRACE_INCLUDE_PATH
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 3e556c68351..feb852b0459 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -99,13 +99,15 @@ module_param_named(enable_shadow_vmcs, enable_shadow_vmcs, bool, S_IRUGO);
static bool __read_mostly nested = 0;
module_param(nested, bool, S_IRUGO);
+static u64 __read_mostly host_xss;
+
#define KVM_GUEST_CR0_MASK (X86_CR0_NW | X86_CR0_CD)
#define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST (X86_CR0_WP | X86_CR0_NE)
#define KVM_VM_CR0_ALWAYS_ON \
(KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE)
#define KVM_CR4_GUEST_OWNED_BITS \
(X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \
- | X86_CR4_OSXMMEXCPT)
+ | X86_CR4_OSXMMEXCPT | X86_CR4_TSD)
#define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
#define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)
@@ -214,6 +216,7 @@ struct __packed vmcs12 {
u64 virtual_apic_page_addr;
u64 apic_access_addr;
u64 ept_pointer;
+ u64 xss_exit_bitmap;
u64 guest_physical_address;
u64 vmcs_link_pointer;
u64 guest_ia32_debugctl;
@@ -616,6 +619,7 @@ static const unsigned short vmcs_field_to_offset_table[] = {
FIELD64(VIRTUAL_APIC_PAGE_ADDR, virtual_apic_page_addr),
FIELD64(APIC_ACCESS_ADDR, apic_access_addr),
FIELD64(EPT_POINTER, ept_pointer),
+ FIELD64(XSS_EXIT_BITMAP, xss_exit_bitmap),
FIELD64(GUEST_PHYSICAL_ADDRESS, guest_physical_address),
FIELD64(VMCS_LINK_POINTER, vmcs_link_pointer),
FIELD64(GUEST_IA32_DEBUGCTL, guest_ia32_debugctl),
@@ -720,12 +724,15 @@ static const unsigned short vmcs_field_to_offset_table[] = {
FIELD(HOST_RSP, host_rsp),
FIELD(HOST_RIP, host_rip),
};
-static const int max_vmcs_field = ARRAY_SIZE(vmcs_field_to_offset_table);
static inline short vmcs_field_to_offset(unsigned long field)
{
- if (field >= max_vmcs_field || vmcs_field_to_offset_table[field] == 0)
- return -1;
+ BUILD_BUG_ON(ARRAY_SIZE(vmcs_field_to_offset_table) > SHRT_MAX);
+
+ if (field >= ARRAY_SIZE(vmcs_field_to_offset_table) ||
+ vmcs_field_to_offset_table[field] == 0)
+ return -ENOENT;
+
return vmcs_field_to_offset_table[field];
}
@@ -758,6 +765,7 @@ static u64 construct_eptp(unsigned long root_hpa);
static void kvm_cpu_vmxon(u64 addr);
static void kvm_cpu_vmxoff(void);
static bool vmx_mpx_supported(void);
+static bool vmx_xsaves_supported(void);
static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr);
static void vmx_set_segment(struct kvm_vcpu *vcpu,
struct kvm_segment *var, int seg);
@@ -1098,6 +1106,12 @@ static inline int nested_cpu_has_ept(struct vmcs12 *vmcs12)
return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_EPT);
}
+static inline bool nested_cpu_has_xsaves(struct vmcs12 *vmcs12)
+{
+ return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES) &&
+ vmx_xsaves_supported();
+}
+
static inline bool is_exception(u32 intr_info)
{
return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
@@ -1659,12 +1673,20 @@ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
vmx->guest_msrs[efer_offset].mask = ~ignore_bits;
clear_atomic_switch_msr(vmx, MSR_EFER);
- /* On ept, can't emulate nx, and must switch nx atomically */
- if (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX)) {
+
+ /*
+ * On EPT, we can't emulate NX, so we must switch EFER atomically.
+ * On CPUs that support "load IA32_EFER", always switch EFER
+ * atomically, since it's faster than switching it manually.
+ */
+ if (cpu_has_load_ia32_efer ||
+ (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX))) {
guest_efer = vmx->vcpu.arch.efer;
if (!(guest_efer & EFER_LMA))
guest_efer &= ~EFER_LME;
- add_atomic_switch_msr(vmx, MSR_EFER, guest_efer, host_efer);
+ if (guest_efer != host_efer)
+ add_atomic_switch_msr(vmx, MSR_EFER,
+ guest_efer, host_efer);
return false;
}
@@ -2377,12 +2399,13 @@ static __init void nested_vmx_setup_ctls_msrs(void)
nested_vmx_secondary_ctls_low = 0;
nested_vmx_secondary_ctls_high &=
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
- SECONDARY_EXEC_UNRESTRICTED_GUEST |
- SECONDARY_EXEC_WBINVD_EXITING;
+ SECONDARY_EXEC_WBINVD_EXITING |
+ SECONDARY_EXEC_XSAVES;
if (enable_ept) {
/* nested EPT: emulate EPT also to L1 */
- nested_vmx_secondary_ctls_high |= SECONDARY_EXEC_ENABLE_EPT;
+ nested_vmx_secondary_ctls_high |= SECONDARY_EXEC_ENABLE_EPT |
+ SECONDARY_EXEC_UNRESTRICTED_GUEST;
nested_vmx_ept_caps = VMX_EPT_PAGE_WALK_4_BIT |
VMX_EPTP_WB_BIT | VMX_EPT_2MB_PAGE_BIT |
VMX_EPT_INVEPT_BIT;
@@ -2558,6 +2581,11 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
if (!nested_vmx_allowed(vcpu))
return 1;
return vmx_get_vmx_msr(vcpu, msr_index, pdata);
+ case MSR_IA32_XSS:
+ if (!vmx_xsaves_supported())
+ return 1;
+ data = vcpu->arch.ia32_xss;
+ break;
case MSR_TSC_AUX:
if (!to_vmx(vcpu)->rdtscp_enabled)
return 1;
@@ -2649,6 +2677,22 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
break;
case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
return 1; /* they are read-only */
+ case MSR_IA32_XSS:
+ if (!vmx_xsaves_supported())
+ return 1;
+ /*
+ * The only supported bit as of Skylake is bit 8, but
+ * it is not supported on KVM.
+ */
+ if (data != 0)
+ return 1;
+ vcpu->arch.ia32_xss = data;
+ if (vcpu->arch.ia32_xss != host_xss)
+ add_atomic_switch_msr(vmx, MSR_IA32_XSS,
+ vcpu->arch.ia32_xss, host_xss);
+ else
+ clear_atomic_switch_msr(vmx, MSR_IA32_XSS);
+ break;
case MSR_TSC_AUX:
if (!vmx->rdtscp_enabled)
return 1;
@@ -2884,7 +2928,8 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
SECONDARY_EXEC_ENABLE_INVPCID |
SECONDARY_EXEC_APIC_REGISTER_VIRT |
SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
- SECONDARY_EXEC_SHADOW_VMCS;
+ SECONDARY_EXEC_SHADOW_VMCS |
+ SECONDARY_EXEC_XSAVES;
if (adjust_vmx_controls(min2, opt2,
MSR_IA32_VMX_PROCBASED_CTLS2,
&_cpu_based_2nd_exec_control) < 0)
@@ -3007,6 +3052,9 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
}
}
+ if (cpu_has_xsaves)
+ rdmsrl(MSR_IA32_XSS, host_xss);
+
return 0;
}
@@ -3110,76 +3158,6 @@ static __init int alloc_kvm_area(void)
return 0;
}
-static __init int hardware_setup(void)
-{
- if (setup_vmcs_config(&vmcs_config) < 0)
- return -EIO;
-
- if (boot_cpu_has(X86_FEATURE_NX))
- kvm_enable_efer_bits(EFER_NX);
-
- if (!cpu_has_vmx_vpid())
- enable_vpid = 0;
- if (!cpu_has_vmx_shadow_vmcs())
- enable_shadow_vmcs = 0;
- if (enable_shadow_vmcs)
- init_vmcs_shadow_fields();
-
- if (!cpu_has_vmx_ept() ||
- !cpu_has_vmx_ept_4levels()) {
- enable_ept = 0;
- enable_unrestricted_guest = 0;
- enable_ept_ad_bits = 0;
- }
-
- if (!cpu_has_vmx_ept_ad_bits())
- enable_ept_ad_bits = 0;
-
- if (!cpu_has_vmx_unrestricted_guest())
- enable_unrestricted_guest = 0;
-
- if (!cpu_has_vmx_flexpriority()) {
- flexpriority_enabled = 0;
-
- /*
- * set_apic_access_page_addr() is used to reload apic access
- * page upon invalidation. No need to do anything if the
- * processor does not have the APIC_ACCESS_ADDR VMCS field.
- */
- kvm_x86_ops->set_apic_access_page_addr = NULL;
- }
-
- if (!cpu_has_vmx_tpr_shadow())
- kvm_x86_ops->update_cr8_intercept = NULL;
-
- if (enable_ept && !cpu_has_vmx_ept_2m_page())
- kvm_disable_largepages();
-
- if (!cpu_has_vmx_ple())
- ple_gap = 0;
-
- if (!cpu_has_vmx_apicv())
- enable_apicv = 0;
-
- if (enable_apicv)
- kvm_x86_ops->update_cr8_intercept = NULL;
- else {
- kvm_x86_ops->hwapic_irr_update = NULL;
- kvm_x86_ops->deliver_posted_interrupt = NULL;
- kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
- }
-
- if (nested)
- nested_vmx_setup_ctls_msrs();
-
- return alloc_kvm_area();
-}
-
-static __exit void hardware_unsetup(void)
-{
- free_kvm_area();
-}
-
static bool emulation_required(struct kvm_vcpu *vcpu)
{
return emulate_invalid_guest_state && !guest_state_valid(vcpu);
@@ -4396,6 +4374,7 @@ static void ept_set_mmio_spte_mask(void)
kvm_mmu_set_mmio_spte_mask((0x3ull << 62) | 0x6ull);
}
+#define VMX_XSS_EXIT_BITMAP 0
/*
* Sets up the vmcs for emulated real mode.
*/
@@ -4505,6 +4484,9 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL);
set_cr4_guest_host_mask(vmx);
+ if (vmx_xsaves_supported())
+ vmcs_write64(XSS_EXIT_BITMAP, VMX_XSS_EXIT_BITMAP);
+
return 0;
}
@@ -5163,13 +5145,20 @@ static int handle_cr(struct kvm_vcpu *vcpu)
static int handle_dr(struct kvm_vcpu *vcpu)
{
unsigned long exit_qualification;
- int dr, reg;
+ int dr, dr7, reg;
+
+ exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
+ dr = exit_qualification & DEBUG_REG_ACCESS_NUM;
+
+ /* First, if DR does not exist, trigger UD */
+ if (!kvm_require_dr(vcpu, dr))
+ return 1;
/* Do not handle if the CPL > 0, will trigger GP on re-entry */
if (!kvm_require_cpl(vcpu, 0))
return 1;
- dr = vmcs_readl(GUEST_DR7);
- if (dr & DR7_GD) {
+ dr7 = vmcs_readl(GUEST_DR7);
+ if (dr7 & DR7_GD) {
/*
* As the vm-exit takes precedence over the debug trap, we
* need to emulate the latter, either for the host or the
@@ -5177,17 +5166,14 @@ static int handle_dr(struct kvm_vcpu *vcpu)
*/
if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
vcpu->run->debug.arch.dr6 = vcpu->arch.dr6;
- vcpu->run->debug.arch.dr7 = dr;
- vcpu->run->debug.arch.pc =
- vmcs_readl(GUEST_CS_BASE) +
- vmcs_readl(GUEST_RIP);
+ vcpu->run->debug.arch.dr7 = dr7;
+ vcpu->run->debug.arch.pc = kvm_get_linear_rip(vcpu);
vcpu->run->debug.arch.exception = DB_VECTOR;
vcpu->run->exit_reason = KVM_EXIT_DEBUG;
return 0;
} else {
- vcpu->arch.dr7 &= ~DR7_GD;
+ vcpu->arch.dr6 &= ~15;
vcpu->arch.dr6 |= DR6_BD | DR6_RTM;
- vmcs_writel(GUEST_DR7, vcpu->arch.dr7);
kvm_queue_exception(vcpu, DB_VECTOR);
return 1;
}
@@ -5209,8 +5195,6 @@ static int handle_dr(struct kvm_vcpu *vcpu)
return 1;
}
- exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
- dr = exit_qualification & DEBUG_REG_ACCESS_NUM;
reg = DEBUG_REG_ACCESS_REG(exit_qualification);
if (exit_qualification & TYPE_MOV_FROM_DR) {
unsigned long val;
@@ -5391,6 +5375,20 @@ static int handle_xsetbv(struct kvm_vcpu *vcpu)
return 1;
}
+static int handle_xsaves(struct kvm_vcpu *vcpu)
+{
+ skip_emulated_instruction(vcpu);
+ WARN(1, "this should never happen\n");
+ return 1;
+}
+
+static int handle_xrstors(struct kvm_vcpu *vcpu)
+{
+ skip_emulated_instruction(vcpu);
+ WARN(1, "this should never happen\n");
+ return 1;
+}
+
static int handle_apic_access(struct kvm_vcpu *vcpu)
{
if (likely(fasteoi)) {
@@ -5492,7 +5490,7 @@ static int handle_task_switch(struct kvm_vcpu *vcpu)
}
/* clear all local breakpoint enable flags */
- vmcs_writel(GUEST_DR7, vmcs_readl(GUEST_DR7) & ~0x55);
+ vmcs_writel(GUEST_DR7, vmcs_readl(GUEST_DR7) & ~0x155);
/*
* TODO: What about debug traps on tss switch?
@@ -5539,11 +5537,11 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu)
trace_kvm_page_fault(gpa, exit_qualification);
/* It is a write fault? */
- error_code = exit_qualification & (1U << 1);
+ error_code = exit_qualification & PFERR_WRITE_MASK;
/* It is a fetch fault? */
- error_code |= (exit_qualification & (1U << 2)) << 2;
+ error_code |= (exit_qualification << 2) & PFERR_FETCH_MASK;
/* ept page table is present? */
- error_code |= (exit_qualification >> 3) & 0x1;
+ error_code |= (exit_qualification >> 3) & PFERR_PRESENT_MASK;
vcpu->arch.exit_qualification = exit_qualification;
@@ -5785,6 +5783,204 @@ static void update_ple_window_actual_max(void)
ple_window_grow, INT_MIN);
}
+static __init int hardware_setup(void)
+{
+ int r = -ENOMEM, i, msr;
+
+ rdmsrl_safe(MSR_EFER, &host_efer);
+
+ for (i = 0; i < ARRAY_SIZE(vmx_msr_index); ++i)
+ kvm_define_shared_msr(i, vmx_msr_index[i]);
+
+ vmx_io_bitmap_a = (unsigned long *)__get_free_page(GFP_KERNEL);
+ if (!vmx_io_bitmap_a)
+ return r;
+
+ vmx_io_bitmap_b = (unsigned long *)__get_free_page(GFP_KERNEL);
+ if (!vmx_io_bitmap_b)
+ goto out;
+
+ vmx_msr_bitmap_legacy = (unsigned long *)__get_free_page(GFP_KERNEL);
+ if (!vmx_msr_bitmap_legacy)
+ goto out1;
+
+ vmx_msr_bitmap_legacy_x2apic =
+ (unsigned long *)__get_free_page(GFP_KERNEL);
+ if (!vmx_msr_bitmap_legacy_x2apic)
+ goto out2;
+
+ vmx_msr_bitmap_longmode = (unsigned long *)__get_free_page(GFP_KERNEL);
+ if (!vmx_msr_bitmap_longmode)
+ goto out3;
+
+ vmx_msr_bitmap_longmode_x2apic =
+ (unsigned long *)__get_free_page(GFP_KERNEL);
+ if (!vmx_msr_bitmap_longmode_x2apic)
+ goto out4;
+ vmx_vmread_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL);
+ if (!vmx_vmread_bitmap)
+ goto out5;
+
+ vmx_vmwrite_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL);
+ if (!vmx_vmwrite_bitmap)
+ goto out6;
+
+ memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE);
+ memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE);
+
+ /*
+ * Allow direct access to the PC debug port (it is often used for I/O
+ * delays, but the vmexits simply slow things down).
+ */
+ memset(vmx_io_bitmap_a, 0xff, PAGE_SIZE);
+ clear_bit(0x80, vmx_io_bitmap_a);
+
+ memset(vmx_io_bitmap_b, 0xff, PAGE_SIZE);
+
+ memset(vmx_msr_bitmap_legacy, 0xff, PAGE_SIZE);
+ memset(vmx_msr_bitmap_longmode, 0xff, PAGE_SIZE);
+
+ vmx_disable_intercept_for_msr(MSR_FS_BASE, false);
+ vmx_disable_intercept_for_msr(MSR_GS_BASE, false);
+ vmx_disable_intercept_for_msr(MSR_KERNEL_GS_BASE, true);
+ vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false);
+ vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false);
+ vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false);
+ vmx_disable_intercept_for_msr(MSR_IA32_BNDCFGS, true);
+
+ memcpy(vmx_msr_bitmap_legacy_x2apic,
+ vmx_msr_bitmap_legacy, PAGE_SIZE);
+ memcpy(vmx_msr_bitmap_longmode_x2apic,
+ vmx_msr_bitmap_longmode, PAGE_SIZE);
+
+ if (enable_apicv) {
+ for (msr = 0x800; msr <= 0x8ff; msr++)
+ vmx_disable_intercept_msr_read_x2apic(msr);
+
+ /* According SDM, in x2apic mode, the whole id reg is used.
+ * But in KVM, it only use the highest eight bits. Need to
+ * intercept it */
+ vmx_enable_intercept_msr_read_x2apic(0x802);
+ /* TMCCT */
+ vmx_enable_intercept_msr_read_x2apic(0x839);
+ /* TPR */
+ vmx_disable_intercept_msr_write_x2apic(0x808);
+ /* EOI */
+ vmx_disable_intercept_msr_write_x2apic(0x80b);
+ /* SELF-IPI */
+ vmx_disable_intercept_msr_write_x2apic(0x83f);
+ }
+
+ if (enable_ept) {
+ kvm_mmu_set_mask_ptes(0ull,
+ (enable_ept_ad_bits) ? VMX_EPT_ACCESS_BIT : 0ull,
+ (enable_ept_ad_bits) ? VMX_EPT_DIRTY_BIT : 0ull,
+ 0ull, VMX_EPT_EXECUTABLE_MASK);
+ ept_set_mmio_spte_mask();
+ kvm_enable_tdp();
+ } else
+ kvm_disable_tdp();
+
+ update_ple_window_actual_max();
+
+ if (setup_vmcs_config(&vmcs_config) < 0) {
+ r = -EIO;
+ goto out7;
+ }
+
+ if (boot_cpu_has(X86_FEATURE_NX))
+ kvm_enable_efer_bits(EFER_NX);
+
+ if (!cpu_has_vmx_vpid())
+ enable_vpid = 0;
+ if (!cpu_has_vmx_shadow_vmcs())
+ enable_shadow_vmcs = 0;
+ if (enable_shadow_vmcs)
+ init_vmcs_shadow_fields();
+
+ if (!cpu_has_vmx_ept() ||
+ !cpu_has_vmx_ept_4levels()) {
+ enable_ept = 0;
+ enable_unrestricted_guest = 0;
+ enable_ept_ad_bits = 0;
+ }
+
+ if (!cpu_has_vmx_ept_ad_bits())
+ enable_ept_ad_bits = 0;
+
+ if (!cpu_has_vmx_unrestricted_guest())
+ enable_unrestricted_guest = 0;
+
+ if (!cpu_has_vmx_flexpriority()) {
+ flexpriority_enabled = 0;
+
+ /*
+ * set_apic_access_page_addr() is used to reload apic access
+ * page upon invalidation. No need to do anything if the
+ * processor does not have the APIC_ACCESS_ADDR VMCS field.
+ */
+ kvm_x86_ops->set_apic_access_page_addr = NULL;
+ }
+
+ if (!cpu_has_vmx_tpr_shadow())
+ kvm_x86_ops->update_cr8_intercept = NULL;
+
+ if (enable_ept && !cpu_has_vmx_ept_2m_page())
+ kvm_disable_largepages();
+
+ if (!cpu_has_vmx_ple())
+ ple_gap = 0;
+
+ if (!cpu_has_vmx_apicv())
+ enable_apicv = 0;
+
+ if (enable_apicv)
+ kvm_x86_ops->update_cr8_intercept = NULL;
+ else {
+ kvm_x86_ops->hwapic_irr_update = NULL;
+ kvm_x86_ops->deliver_posted_interrupt = NULL;
+ kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
+ }
+
+ if (nested)
+ nested_vmx_setup_ctls_msrs();
+
+ return alloc_kvm_area();
+
+out7:
+ free_page((unsigned long)vmx_vmwrite_bitmap);
+out6:
+ free_page((unsigned long)vmx_vmread_bitmap);
+out5:
+ free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic);
+out4:
+ free_page((unsigned long)vmx_msr_bitmap_longmode);
+out3:
+ free_page((unsigned long)vmx_msr_bitmap_legacy_x2apic);
+out2:
+ free_page((unsigned long)vmx_msr_bitmap_legacy);
+out1:
+ free_page((unsigned long)vmx_io_bitmap_b);
+out:
+ free_page((unsigned long)vmx_io_bitmap_a);
+
+ return r;
+}
+
+static __exit void hardware_unsetup(void)
+{
+ free_page((unsigned long)vmx_msr_bitmap_legacy_x2apic);
+ free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic);
+ free_page((unsigned long)vmx_msr_bitmap_legacy);
+ free_page((unsigned long)vmx_msr_bitmap_longmode);
+ free_page((unsigned long)vmx_io_bitmap_b);
+ free_page((unsigned long)vmx_io_bitmap_a);
+ free_page((unsigned long)vmx_vmwrite_bitmap);
+ free_page((unsigned long)vmx_vmread_bitmap);
+
+ free_kvm_area();
+}
+
/*
* Indicate a busy-waiting vcpu in spinlock. We do not enable the PAUSE
* exiting, so only get here on cpu with PAUSE-Loop-Exiting.
@@ -6361,58 +6557,60 @@ static inline int vmcs_field_readonly(unsigned long field)
* some of the bits we return here (e.g., on 32-bit guests, only 32 bits of
* 64-bit fields are to be returned).
*/
-static inline bool vmcs12_read_any(struct kvm_vcpu *vcpu,
- unsigned long field, u64 *ret)
+static inline int vmcs12_read_any(struct kvm_vcpu *vcpu,
+ unsigned long field, u64 *ret)
{
short offset = vmcs_field_to_offset(field);
char *p;
if (offset < 0)
- return 0;
+ return offset;
p = ((char *)(get_vmcs12(vcpu))) + offset;
switch (vmcs_field_type(field)) {
case VMCS_FIELD_TYPE_NATURAL_WIDTH:
*ret = *((natural_width *)p);
- return 1;
+ return 0;
case VMCS_FIELD_TYPE_U16:
*ret = *((u16 *)p);
- return 1;
+ return 0;
case VMCS_FIELD_TYPE_U32:
*ret = *((u32 *)p);
- return 1;
+ return 0;
case VMCS_FIELD_TYPE_U64:
*ret = *((u64 *)p);
- return 1;
+ return 0;
default:
- return 0; /* can never happen. */
+ WARN_ON(1);
+ return -ENOENT;
}
}
-static inline bool vmcs12_write_any(struct kvm_vcpu *vcpu,
- unsigned long field, u64 field_value){
+static inline int vmcs12_write_any(struct kvm_vcpu *vcpu,
+ unsigned long field, u64 field_value){
short offset = vmcs_field_to_offset(field);
char *p = ((char *) get_vmcs12(vcpu)) + offset;
if (offset < 0)
- return false;
+ return offset;
switch (vmcs_field_type(field)) {
case VMCS_FIELD_TYPE_U16:
*(u16 *)p = field_value;
- return true;
+ return 0;
case VMCS_FIELD_TYPE_U32:
*(u32 *)p = field_value;
- return true;
+ return 0;
case VMCS_FIELD_TYPE_U64:
*(u64 *)p = field_value;
- return true;
+ return 0;
case VMCS_FIELD_TYPE_NATURAL_WIDTH:
*(natural_width *)p = field_value;
- return true;
+ return 0;
default:
- return false; /* can never happen. */
+ WARN_ON(1);
+ return -ENOENT;
}
}
@@ -6445,6 +6643,9 @@ static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx)
case VMCS_FIELD_TYPE_NATURAL_WIDTH:
field_value = vmcs_readl(field);
break;
+ default:
+ WARN_ON(1);
+ continue;
}
vmcs12_write_any(&vmx->vcpu, field, field_value);
}
@@ -6490,6 +6691,9 @@ static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)
case VMCS_FIELD_TYPE_NATURAL_WIDTH:
vmcs_writel(field, (long)field_value);
break;
+ default:
+ WARN_ON(1);
+ break;
}
}
}
@@ -6528,7 +6732,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
/* Decode instruction info and find the field to read */
field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
/* Read the field, zero-extended to a u64 field_value */
- if (!vmcs12_read_any(vcpu, field, &field_value)) {
+ if (vmcs12_read_any(vcpu, field, &field_value) < 0) {
nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
skip_emulated_instruction(vcpu);
return 1;
@@ -6598,7 +6802,7 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
return 1;
}
- if (!vmcs12_write_any(vcpu, field, field_value)) {
+ if (vmcs12_write_any(vcpu, field, field_value) < 0) {
nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
skip_emulated_instruction(vcpu);
return 1;
@@ -6802,6 +7006,8 @@ static int (*const kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
[EXIT_REASON_MONITOR_INSTRUCTION] = handle_monitor,
[EXIT_REASON_INVEPT] = handle_invept,
[EXIT_REASON_INVVPID] = handle_invvpid,
+ [EXIT_REASON_XSAVES] = handle_xsaves,
+ [EXIT_REASON_XRSTORS] = handle_xrstors,
};
static const int kvm_vmx_max_exit_handlers =
@@ -7089,6 +7295,14 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
return nested_cpu_has2(vmcs12, SECONDARY_EXEC_WBINVD_EXITING);
case EXIT_REASON_XSETBV:
return 1;
+ case EXIT_REASON_XSAVES: case EXIT_REASON_XRSTORS:
+ /*
+ * This should never happen, since it is not possible to
+ * set XSS to a non-zero value---neither in L1 nor in L2.
+ * If if it were, XSS would have to be checked against
+ * the XSS exit bitmap in vmcs12.
+ */
+ return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES);
default:
return 1;
}
@@ -7277,6 +7491,9 @@ static void vmx_set_rvi(int vector)
u16 status;
u8 old;
+ if (vector == -1)
+ vector = 0;
+
status = vmcs_read16(GUEST_INTR_STATUS);
old = (u8)status & 0xff;
if ((u8)vector != old) {
@@ -7288,22 +7505,23 @@ static void vmx_set_rvi(int vector)
static void vmx_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr)
{
+ if (!is_guest_mode(vcpu)) {
+ vmx_set_rvi(max_irr);
+ return;
+ }
+
if (max_irr == -1)
return;
/*
- * If a vmexit is needed, vmx_check_nested_events handles it.
+ * In guest mode. If a vmexit is needed, vmx_check_nested_events
+ * handles it.
*/
- if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu))
+ if (nested_exit_on_intr(vcpu))
return;
- if (!is_guest_mode(vcpu)) {
- vmx_set_rvi(max_irr);
- return;
- }
-
/*
- * Fall back to pre-APICv interrupt injection since L2
+ * Else, fall back to pre-APICv interrupt injection since L2
* is run without virtual interrupt delivery.
*/
if (!kvm_event_needs_reinjection(vcpu) &&
@@ -7400,6 +7618,12 @@ static bool vmx_mpx_supported(void)
(vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_BNDCFGS);
}
+static bool vmx_xsaves_supported(void)
+{
+ return vmcs_config.cpu_based_2nd_exec_ctrl &
+ SECONDARY_EXEC_XSAVES;
+}
+
static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
{
u32 exit_intr_info;
@@ -8135,6 +8359,8 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->guest_sysenter_esp);
vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->guest_sysenter_eip);
+ if (nested_cpu_has_xsaves(vmcs12))
+ vmcs_write64(XSS_EXIT_BITMAP, vmcs12->xss_exit_bitmap);
vmcs_write64(VMCS_LINK_POINTER, -1ull);
exec_control = vmcs12->pin_based_vm_exec_control;
@@ -8775,6 +9001,8 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
vmcs12->guest_sysenter_eip = vmcs_readl(GUEST_SYSENTER_EIP);
if (vmx_mpx_supported())
vmcs12->guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS);
+ if (nested_cpu_has_xsaves(vmcs12))
+ vmcs12->xss_exit_bitmap = vmcs_read64(XSS_EXIT_BITMAP);
/* update exit information fields: */
@@ -9176,6 +9404,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
.check_intercept = vmx_check_intercept,
.handle_external_intr = vmx_handle_external_intr,
.mpx_supported = vmx_mpx_supported,
+ .xsaves_supported = vmx_xsaves_supported,
.check_nested_events = vmx_check_nested_events,
@@ -9184,150 +9413,21 @@ static struct kvm_x86_ops vmx_x86_ops = {
static int __init vmx_init(void)
{
- int r, i, msr;
-
- rdmsrl_safe(MSR_EFER, &host_efer);
-
- for (i = 0; i < ARRAY_SIZE(vmx_msr_index); ++i)
- kvm_define_shared_msr(i, vmx_msr_index[i]);
-
- vmx_io_bitmap_a = (unsigned long *)__get_free_page(GFP_KERNEL);
- if (!vmx_io_bitmap_a)
- return -ENOMEM;
-
- r = -ENOMEM;
-
- vmx_io_bitmap_b = (unsigned long *)__get_free_page(GFP_KERNEL);
- if (!vmx_io_bitmap_b)
- goto out;
-
- vmx_msr_bitmap_legacy = (unsigned long *)__get_free_page(GFP_KERNEL);
- if (!vmx_msr_bitmap_legacy)
- goto out1;
-
- vmx_msr_bitmap_legacy_x2apic =
- (unsigned long *)__get_free_page(GFP_KERNEL);
- if (!vmx_msr_bitmap_legacy_x2apic)
- goto out2;
-
- vmx_msr_bitmap_longmode = (unsigned long *)__get_free_page(GFP_KERNEL);
- if (!vmx_msr_bitmap_longmode)
- goto out3;
-
- vmx_msr_bitmap_longmode_x2apic =
- (unsigned long *)__get_free_page(GFP_KERNEL);
- if (!vmx_msr_bitmap_longmode_x2apic)
- goto out4;
- vmx_vmread_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL);
- if (!vmx_vmread_bitmap)
- goto out5;
-
- vmx_vmwrite_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL);
- if (!vmx_vmwrite_bitmap)
- goto out6;
-
- memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE);
- memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE);
-
- /*
- * Allow direct access to the PC debug port (it is often used for I/O
- * delays, but the vmexits simply slow things down).
- */
- memset(vmx_io_bitmap_a, 0xff, PAGE_SIZE);
- clear_bit(0x80, vmx_io_bitmap_a);
-
- memset(vmx_io_bitmap_b, 0xff, PAGE_SIZE);
-
- memset(vmx_msr_bitmap_legacy, 0xff, PAGE_SIZE);
- memset(vmx_msr_bitmap_longmode, 0xff, PAGE_SIZE);
-
- set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */
-
- r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx),
- __alignof__(struct vcpu_vmx), THIS_MODULE);
+ int r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx),
+ __alignof__(struct vcpu_vmx), THIS_MODULE);
if (r)
- goto out7;
+ return r;
#ifdef CONFIG_KEXEC
rcu_assign_pointer(crash_vmclear_loaded_vmcss,
crash_vmclear_local_loaded_vmcss);
#endif
- vmx_disable_intercept_for_msr(MSR_FS_BASE, false);
- vmx_disable_intercept_for_msr(MSR_GS_BASE, false);
- vmx_disable_intercept_for_msr(MSR_KERNEL_GS_BASE, true);
- vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false);
- vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false);
- vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false);
- vmx_disable_intercept_for_msr(MSR_IA32_BNDCFGS, true);
-
- memcpy(vmx_msr_bitmap_legacy_x2apic,
- vmx_msr_bitmap_legacy, PAGE_SIZE);
- memcpy(vmx_msr_bitmap_longmode_x2apic,
- vmx_msr_bitmap_longmode, PAGE_SIZE);
-
- if (enable_apicv) {
- for (msr = 0x800; msr <= 0x8ff; msr++)
- vmx_disable_intercept_msr_read_x2apic(msr);
-
- /* According SDM, in x2apic mode, the whole id reg is used.
- * But in KVM, it only use the highest eight bits. Need to
- * intercept it */
- vmx_enable_intercept_msr_read_x2apic(0x802);
- /* TMCCT */
- vmx_enable_intercept_msr_read_x2apic(0x839);
- /* TPR */
- vmx_disable_intercept_msr_write_x2apic(0x808);
- /* EOI */
- vmx_disable_intercept_msr_write_x2apic(0x80b);
- /* SELF-IPI */
- vmx_disable_intercept_msr_write_x2apic(0x83f);
- }
-
- if (enable_ept) {
- kvm_mmu_set_mask_ptes(0ull,
- (enable_ept_ad_bits) ? VMX_EPT_ACCESS_BIT : 0ull,
- (enable_ept_ad_bits) ? VMX_EPT_DIRTY_BIT : 0ull,
- 0ull, VMX_EPT_EXECUTABLE_MASK);
- ept_set_mmio_spte_mask();
- kvm_enable_tdp();
- } else
- kvm_disable_tdp();
-
- update_ple_window_actual_max();
-
return 0;
-
-out7:
- free_page((unsigned long)vmx_vmwrite_bitmap);
-out6:
- free_page((unsigned long)vmx_vmread_bitmap);
-out5:
- free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic);
-out4:
- free_page((unsigned long)vmx_msr_bitmap_longmode);
-out3:
- free_page((unsigned long)vmx_msr_bitmap_legacy_x2apic);
-out2:
- free_page((unsigned long)vmx_msr_bitmap_legacy);
-out1:
- free_page((unsigned long)vmx_io_bitmap_b);
-out:
- free_page((unsigned long)vmx_io_bitmap_a);
- return r;
}
static void __exit vmx_exit(void)
{
- free_page((unsigned long)vmx_msr_bitmap_legacy_x2apic);
- free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic);
- free_page((unsigned long)vmx_msr_bitmap_legacy);
- free_page((unsigned long)vmx_msr_bitmap_longmode);
- free_page((unsigned long)vmx_io_bitmap_b);
- free_page((unsigned long)vmx_io_bitmap_a);
- free_page((unsigned long)vmx_vmwrite_bitmap);
- free_page((unsigned long)vmx_vmread_bitmap);
-
#ifdef CONFIG_KEXEC
RCU_INIT_POINTER(crash_vmclear_loaded_vmcss, NULL);
synchronize_rcu();
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 0033df32a74..c259814200b 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -27,6 +27,7 @@
#include "kvm_cache_regs.h"
#include "x86.h"
#include "cpuid.h"
+#include "assigned-dev.h"
#include <linux/clocksource.h>
#include <linux/interrupt.h>
@@ -353,6 +354,8 @@ static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
if (!vcpu->arch.exception.pending) {
queue:
+ if (has_error && !is_protmode(vcpu))
+ has_error = false;
vcpu->arch.exception.pending = true;
vcpu->arch.exception.has_error_code = has_error;
vcpu->arch.exception.nr = nr;
@@ -455,6 +458,16 @@ bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl)
}
EXPORT_SYMBOL_GPL(kvm_require_cpl);
+bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr)
+{
+ if ((dr != 4 && dr != 5) || !kvm_read_cr4_bits(vcpu, X86_CR4_DE))
+ return true;
+
+ kvm_queue_exception(vcpu, UD_VECTOR);
+ return false;
+}
+EXPORT_SYMBOL_GPL(kvm_require_dr);
+
/*
* This function will be used to read from the physical memory of the currently
* running guest. The difference to kvm_read_guest_page is that this function
@@ -656,6 +669,12 @@ int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
if ((!(xcr0 & XSTATE_BNDREGS)) != (!(xcr0 & XSTATE_BNDCSR)))
return 1;
+ if (xcr0 & XSTATE_AVX512) {
+ if (!(xcr0 & XSTATE_YMM))
+ return 1;
+ if ((xcr0 & XSTATE_AVX512) != XSTATE_AVX512)
+ return 1;
+ }
kvm_put_guest_xcr0(vcpu);
vcpu->arch.xcr0 = xcr0;
@@ -732,6 +751,10 @@ EXPORT_SYMBOL_GPL(kvm_set_cr4);
int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
{
+#ifdef CONFIG_X86_64
+ cr3 &= ~CR3_PCID_INVD;
+#endif
+
if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) {
kvm_mmu_sync_roots(vcpu);
kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
@@ -811,8 +834,6 @@ static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
vcpu->arch.eff_db[dr] = val;
break;
case 4:
- if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
- return 1; /* #UD */
/* fall through */
case 6:
if (val & 0xffffffff00000000ULL)
@@ -821,8 +842,6 @@ static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
kvm_update_dr6(vcpu);
break;
case 5:
- if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
- return 1; /* #UD */
/* fall through */
default: /* 7 */
if (val & 0xffffffff00000000ULL)
@@ -837,27 +856,21 @@ static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
{
- int res;
-
- res = __kvm_set_dr(vcpu, dr, val);
- if (res > 0)
- kvm_queue_exception(vcpu, UD_VECTOR);
- else if (res < 0)
+ if (__kvm_set_dr(vcpu, dr, val)) {
kvm_inject_gp(vcpu, 0);
-
- return res;
+ return 1;
+ }
+ return 0;
}
EXPORT_SYMBOL_GPL(kvm_set_dr);
-static int _kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
+int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
{
switch (dr) {
case 0 ... 3:
*val = vcpu->arch.db[dr];
break;
case 4:
- if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
- return 1;
/* fall through */
case 6:
if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
@@ -866,23 +879,11 @@ static int _kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
*val = kvm_x86_ops->get_dr6(vcpu);
break;
case 5:
- if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
- return 1;
/* fall through */
default: /* 7 */
*val = vcpu->arch.dr7;
break;
}
-
- return 0;
-}
-
-int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
-{
- if (_kvm_get_dr(vcpu, dr, val)) {
- kvm_queue_exception(vcpu, UD_VECTOR);
- return 1;
- }
return 0;
}
EXPORT_SYMBOL_GPL(kvm_get_dr);
@@ -1237,21 +1238,22 @@ void kvm_track_tsc_matching(struct kvm_vcpu *vcpu)
{
#ifdef CONFIG_X86_64
bool vcpus_matched;
- bool do_request = false;
struct kvm_arch *ka = &vcpu->kvm->arch;
struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 ==
atomic_read(&vcpu->kvm->online_vcpus));
- if (vcpus_matched && gtod->clock.vclock_mode == VCLOCK_TSC)
- if (!ka->use_master_clock)
- do_request = 1;
-
- if (!vcpus_matched && ka->use_master_clock)
- do_request = 1;
-
- if (do_request)
+ /*
+ * Once the masterclock is enabled, always perform request in
+ * order to update it.
+ *
+ * In order to enable masterclock, the host clocksource must be TSC
+ * and the vcpus need to have matched TSCs. When that happens,
+ * perform request to enable masterclock.
+ */
+ if (ka->use_master_clock ||
+ (gtod->clock.vclock_mode == VCLOCK_TSC && vcpus_matched))
kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
trace_kvm_track_tsc(vcpu->vcpu_id, ka->nr_vcpus_matched_tsc,
@@ -1637,16 +1639,16 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset;
vcpu->last_guest_tsc = tsc_timestamp;
+ if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time,
+ &guest_hv_clock, sizeof(guest_hv_clock))))
+ return 0;
+
/*
* The interface expects us to write an even number signaling that the
* update is finished. Since the guest won't see the intermediate
* state, we just increase by 2 at the end.
*/
- vcpu->hv_clock.version += 2;
-
- if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time,
- &guest_hv_clock, sizeof(guest_hv_clock))))
- return 0;
+ vcpu->hv_clock.version = guest_hv_clock.version + 2;
/* retain PVCLOCK_GUEST_STOPPED if set in guest copy */
pvclock_flags = (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED);
@@ -1662,6 +1664,8 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
vcpu->hv_clock.flags = pvclock_flags;
+ trace_kvm_pvclock_update(v->vcpu_id, &vcpu->hv_clock);
+
kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
&vcpu->hv_clock,
sizeof(vcpu->hv_clock));
@@ -2140,7 +2144,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_IA32_TSC_ADJUST:
if (guest_cpuid_has_tsc_adjust(vcpu)) {
if (!msr_info->host_initiated) {
- u64 adj = data - vcpu->arch.ia32_tsc_adjust_msr;
+ s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr;
kvm_x86_ops->adjust_tsc_offset(vcpu, adj, true);
}
vcpu->arch.ia32_tsc_adjust_msr = data;
@@ -3106,7 +3110,7 @@ static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu,
unsigned long val;
memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db));
- _kvm_get_dr(vcpu, 6, &val);
+ kvm_get_dr(vcpu, 6, &val);
dbgregs->dr6 = val;
dbgregs->dr7 = vcpu->arch.dr7;
dbgregs->flags = 0;
@@ -3128,15 +3132,89 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
return 0;
}
+#define XSTATE_COMPACTION_ENABLED (1ULL << 63)
+
+static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
+{
+ struct xsave_struct *xsave = &vcpu->arch.guest_fpu.state->xsave;
+ u64 xstate_bv = xsave->xsave_hdr.xstate_bv;
+ u64 valid;
+
+ /*
+ * Copy legacy XSAVE area, to avoid complications with CPUID
+ * leaves 0 and 1 in the loop below.
+ */
+ memcpy(dest, xsave, XSAVE_HDR_OFFSET);
+
+ /* Set XSTATE_BV */
+ *(u64 *)(dest + XSAVE_HDR_OFFSET) = xstate_bv;
+
+ /*
+ * Copy each region from the possibly compacted offset to the
+ * non-compacted offset.
+ */
+ valid = xstate_bv & ~XSTATE_FPSSE;
+ while (valid) {
+ u64 feature = valid & -valid;
+ int index = fls64(feature) - 1;
+ void *src = get_xsave_addr(xsave, feature);
+
+ if (src) {
+ u32 size, offset, ecx, edx;
+ cpuid_count(XSTATE_CPUID, index,
+ &size, &offset, &ecx, &edx);
+ memcpy(dest + offset, src, size);
+ }
+
+ valid -= feature;
+ }
+}
+
+static void load_xsave(struct kvm_vcpu *vcpu, u8 *src)
+{
+ struct xsave_struct *xsave = &vcpu->arch.guest_fpu.state->xsave;
+ u64 xstate_bv = *(u64 *)(src + XSAVE_HDR_OFFSET);
+ u64 valid;
+
+ /*
+ * Copy legacy XSAVE area, to avoid complications with CPUID
+ * leaves 0 and 1 in the loop below.
+ */
+ memcpy(xsave, src, XSAVE_HDR_OFFSET);
+
+ /* Set XSTATE_BV and possibly XCOMP_BV. */
+ xsave->xsave_hdr.xstate_bv = xstate_bv;
+ if (cpu_has_xsaves)
+ xsave->xsave_hdr.xcomp_bv = host_xcr0 | XSTATE_COMPACTION_ENABLED;
+
+ /*
+ * Copy each region from the non-compacted offset to the
+ * possibly compacted offset.
+ */
+ valid = xstate_bv & ~XSTATE_FPSSE;
+ while (valid) {
+ u64 feature = valid & -valid;
+ int index = fls64(feature) - 1;
+ void *dest = get_xsave_addr(xsave, feature);
+
+ if (dest) {
+ u32 size, offset, ecx, edx;
+ cpuid_count(XSTATE_CPUID, index,
+ &size, &offset, &ecx, &edx);
+ memcpy(dest, src + offset, size);
+ } else
+ WARN_ON_ONCE(1);
+
+ valid -= feature;
+ }
+}
+
static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
struct kvm_xsave *guest_xsave)
{
if (cpu_has_xsave) {
- memcpy(guest_xsave->region,
- &vcpu->arch.guest_fpu.state->xsave,
- vcpu->arch.guest_xstate_size);
- *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] &=
- vcpu->arch.guest_supported_xcr0 | XSTATE_FPSSE;
+ memset(guest_xsave, 0, sizeof(struct kvm_xsave));
+ fill_xsave((u8 *) guest_xsave->region, vcpu);
} else {
memcpy(guest_xsave->region,
&vcpu->arch.guest_fpu.state->fxsave,
@@ -3160,8 +3238,7 @@ static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
*/
if (xstate_bv & ~kvm_supported_xcr0())
return -EINVAL;
- memcpy(&vcpu->arch.guest_fpu.state->xsave,
- guest_xsave->region, vcpu->arch.guest_xstate_size);
+ load_xsave(vcpu, (u8 *)guest_xsave->region);
} else {
if (xstate_bv & ~XSTATE_FPSSE)
return -EINVAL;
@@ -4004,7 +4081,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
}
default:
- ;
+ r = kvm_vm_ioctl_assigned_device(kvm, ioctl, arg);
}
out:
return r;
@@ -4667,7 +4744,7 @@ static void emulator_wbinvd(struct x86_emulate_ctxt *ctxt)
int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest)
{
- return _kvm_get_dr(emul_to_vcpu(ctxt), dr, dest);
+ return kvm_get_dr(emul_to_vcpu(ctxt), dr, dest);
}
int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
@@ -5211,21 +5288,17 @@ static void kvm_vcpu_check_singlestep(struct kvm_vcpu *vcpu, unsigned long rflag
static bool kvm_vcpu_check_breakpoint(struct kvm_vcpu *vcpu, int *r)
{
- struct kvm_run *kvm_run = vcpu->run;
- unsigned long eip = vcpu->arch.emulate_ctxt.eip;
- u32 dr6 = 0;
-
if (unlikely(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) &&
(vcpu->arch.guest_debug_dr7 & DR7_BP_EN_MASK)) {
- dr6 = kvm_vcpu_check_hw_bp(eip, 0,
+ struct kvm_run *kvm_run = vcpu->run;
+ unsigned long eip = kvm_get_linear_rip(vcpu);
+ u32 dr6 = kvm_vcpu_check_hw_bp(eip, 0,
vcpu->arch.guest_debug_dr7,
vcpu->arch.eff_db);
if (dr6 != 0) {
kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1 | DR6_RTM;
- kvm_run->debug.arch.pc = kvm_rip_read(vcpu) +
- get_segment_base(vcpu, VCPU_SREG_CS);
-
+ kvm_run->debug.arch.pc = eip;
kvm_run->debug.arch.exception = DB_VECTOR;
kvm_run->exit_reason = KVM_EXIT_DEBUG;
*r = EMULATE_USER_EXIT;
@@ -5235,7 +5308,8 @@ static bool kvm_vcpu_check_breakpoint(struct kvm_vcpu *vcpu, int *r)
if (unlikely(vcpu->arch.dr7 & DR7_BP_EN_MASK) &&
!(kvm_get_rflags(vcpu) & X86_EFLAGS_RF)) {
- dr6 = kvm_vcpu_check_hw_bp(eip, 0,
+ unsigned long eip = kvm_get_linear_rip(vcpu);
+ u32 dr6 = kvm_vcpu_check_hw_bp(eip, 0,
vcpu->arch.dr7,
vcpu->arch.db);
@@ -5365,7 +5439,9 @@ restart:
kvm_rip_write(vcpu, ctxt->eip);
if (r == EMULATE_DONE)
kvm_vcpu_check_singlestep(vcpu, rflags, &r);
- __kvm_set_rflags(vcpu, ctxt->eflags);
+ if (!ctxt->have_exception ||
+ exception_type(ctxt->exception.vector) == EXCPT_TRAP)
+ __kvm_set_rflags(vcpu, ctxt->eflags);
/*
* For STI, interrupts are shadowed; so KVM_REQ_EVENT will
@@ -5965,6 +6041,12 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win)
__kvm_set_rflags(vcpu, kvm_get_rflags(vcpu) |
X86_EFLAGS_RF);
+ if (vcpu->arch.exception.nr == DB_VECTOR &&
+ (vcpu->arch.dr7 & DR7_GD)) {
+ vcpu->arch.dr7 &= ~DR7_GD;
+ kvm_update_dr7(vcpu);
+ }
+
kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr,
vcpu->arch.exception.has_error_code,
vcpu->arch.exception.error_code,
@@ -6873,6 +6955,9 @@ int fx_init(struct kvm_vcpu *vcpu)
return err;
fpu_finit(&vcpu->arch.guest_fpu);
+ if (cpu_has_xsaves)
+ vcpu->arch.guest_fpu.state->xsave.xsave_hdr.xcomp_bv =
+ host_xcr0 | XSTATE_COMPACTION_ENABLED;
/*
* Ensure guest xcr0 is valid for loading
@@ -7024,7 +7109,7 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu)
kvm_x86_ops->vcpu_reset(vcpu);
}
-void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, unsigned int vector)
+void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
{
struct kvm_segment cs;
@@ -7256,6 +7341,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
if (type)
return -EINVAL;
+ INIT_HLIST_HEAD(&kvm->arch.mask_notifier_list);
INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages);
INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
@@ -7536,12 +7622,18 @@ int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
return kvm_x86_ops->interrupt_allowed(vcpu);
}
-bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip)
+unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu)
{
- unsigned long current_rip = kvm_rip_read(vcpu) +
- get_segment_base(vcpu, VCPU_SREG_CS);
+ if (is_64_bit_mode(vcpu))
+ return kvm_rip_read(vcpu);
+ return (u32)(get_segment_base(vcpu, VCPU_SREG_CS) +
+ kvm_rip_read(vcpu));
+}
+EXPORT_SYMBOL_GPL(kvm_get_linear_rip);
- return current_rip == linear_rip;
+bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip)
+{
+ return kvm_get_linear_rip(vcpu) == linear_rip;
}
EXPORT_SYMBOL_GPL(kvm_is_linear_rip);
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index 7cb9c45a5fe..cc1d61af614 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -162,7 +162,8 @@ int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data);
#define KVM_SUPPORTED_XCR0 (XSTATE_FP | XSTATE_SSE | XSTATE_YMM \
- | XSTATE_BNDREGS | XSTATE_BNDCSR)
+ | XSTATE_BNDREGS | XSTATE_BNDCSR \
+ | XSTATE_AVX512)
extern u64 host_xcr0;
extern u64 kvm_supported_xcr0(void);
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index aae94132bc2..c1c1544b848 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -841,7 +841,7 @@ static void __init lguest_init_IRQ(void)
{
unsigned int i;
- for (i = FIRST_EXTERNAL_VECTOR; i < NR_VECTORS; i++) {
+ for (i = FIRST_EXTERNAL_VECTOR; i < FIRST_SYSTEM_VECTOR; i++) {
/* Some systems map "vectors" to interrupts weirdly. Not us! */
__this_cpu_write(vector_irq[i], i - FIRST_EXTERNAL_VECTOR);
if (i != SYSCALL_VECTOR)
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index d973e61e450..38dcec403b4 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -844,11 +844,8 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
unsigned int fault)
{
struct task_struct *tsk = current;
- struct mm_struct *mm = tsk->mm;
int code = BUS_ADRERR;
- up_read(&mm->mmap_sem);
-
/* Kernel mode? Handle exceptions or die: */
if (!(error_code & PF_USER)) {
no_context(regs, error_code, address, SIGBUS, BUS_ADRERR);
@@ -879,7 +876,6 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
unsigned long address, unsigned int fault)
{
if (fatal_signal_pending(current) && !(error_code & PF_USER)) {
- up_read(&current->mm->mmap_sem);
no_context(regs, error_code, address, 0, 0);
return;
}
@@ -887,14 +883,11 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
if (fault & VM_FAULT_OOM) {
/* Kernel mode? Handle exceptions or die: */
if (!(error_code & PF_USER)) {
- up_read(&current->mm->mmap_sem);
no_context(regs, error_code, address,
SIGSEGV, SEGV_MAPERR);
return;
}
- up_read(&current->mm->mmap_sem);
-
/*
* We ran out of memory, call the OOM killer, and return the
* userspace (which will retry the fault, or kill us if we got
@@ -1062,7 +1055,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
struct vm_area_struct *vma;
struct task_struct *tsk;
struct mm_struct *mm;
- int fault;
+ int fault, major = 0;
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
tsk = current;
@@ -1237,47 +1230,50 @@ good_area:
* we get VM_FAULT_RETRY back, the mmap_sem has been unlocked.
*/
fault = handle_mm_fault(mm, vma, address, flags);
+ major |= fault & VM_FAULT_MAJOR;
/*
- * If we need to retry but a fatal signal is pending, handle the
- * signal first. We do not need to release the mmap_sem because it
- * would already be released in __lock_page_or_retry in mm/filemap.c.
+ * If we need to retry the mmap_sem has already been released,
+ * and if there is a fatal signal pending there is no guarantee
+ * that we made any progress. Handle this case first.
*/
- if (unlikely((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)))
+ if (unlikely(fault & VM_FAULT_RETRY)) {
+ /* Retry at most once */
+ if (flags & FAULT_FLAG_ALLOW_RETRY) {
+ flags &= ~FAULT_FLAG_ALLOW_RETRY;
+ flags |= FAULT_FLAG_TRIED;
+ if (!fatal_signal_pending(tsk))
+ goto retry;
+ }
+
+ /* User mode? Just return to handle the fatal exception */
+ if (flags & FAULT_FLAG_USER)
+ return;
+
+ /* Not returning to user mode? Handle exceptions or die: */
+ no_context(regs, error_code, address, SIGBUS, BUS_ADRERR);
return;
+ }
+ up_read(&mm->mmap_sem);
if (unlikely(fault & VM_FAULT_ERROR)) {
mm_fault_error(regs, error_code, address, fault);
return;
}
/*
- * Major/minor page fault accounting is only done on the
- * initial attempt. If we go through a retry, it is extremely
- * likely that the page will be found in page cache at that point.
+ * Major/minor page fault accounting. If any of the events
+ * returned VM_FAULT_MAJOR, we account it as a major fault.
*/
- if (flags & FAULT_FLAG_ALLOW_RETRY) {
- if (fault & VM_FAULT_MAJOR) {
- tsk->maj_flt++;
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
- regs, address);
- } else {
- tsk->min_flt++;
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
- regs, address);
- }
- if (fault & VM_FAULT_RETRY) {
- /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
- * of starvation. */
- flags &= ~FAULT_FLAG_ALLOW_RETRY;
- flags |= FAULT_FLAG_TRIED;
- goto retry;
- }
+ if (major) {
+ tsk->maj_flt++;
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
+ } else {
+ tsk->min_flt++;
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
}
check_v8086_mode(regs, address, tsk);
-
- up_read(&mm->mmap_sem);
}
NOKPROBE_SYMBOL(__do_page_fault);
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index dfaf2e0f5f8..536ea2fb6e3 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -384,6 +384,26 @@ static pte_t *_lookup_address_cpa(struct cpa_data *cpa, unsigned long address,
}
/*
+ * Lookup the PMD entry for a virtual address. Return a pointer to the entry
+ * or NULL if not present.
+ */
+pmd_t *lookup_pmd_address(unsigned long address)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+
+ pgd = pgd_offset_k(address);
+ if (pgd_none(*pgd))
+ return NULL;
+
+ pud = pud_offset(pgd, address);
+ if (pud_none(*pud) || pud_large(*pud) || !pud_present(*pud))
+ return NULL;
+
+ return pmd_offset(pud, address);
+}
+
+/*
* This is necessary because __pa() does not work on some
* kinds of memory, like vmalloc() or the alloc_remap()
* areas on 32-bit NUMA systems. The percpu areas can
diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c
index b9958c36407..44b9271580b 100644
--- a/arch/x86/pci/intel_mid_pci.c
+++ b/arch/x86/pci/intel_mid_pci.c
@@ -210,6 +210,9 @@ static int intel_mid_pci_irq_enable(struct pci_dev *dev)
{
int polarity;
+ if (dev->irq_managed && dev->irq > 0)
+ return 0;
+
if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER)
polarity = 0; /* active high */
else
@@ -224,13 +227,18 @@ static int intel_mid_pci_irq_enable(struct pci_dev *dev)
if (mp_map_gsi_to_irq(dev->irq, IOAPIC_MAP_ALLOC) < 0)
return -EBUSY;
+ dev->irq_managed = 1;
+
return 0;
}
static void intel_mid_pci_irq_disable(struct pci_dev *dev)
{
- if (!mp_should_keep_irq(&dev->dev) && dev->irq > 0)
+ if (!mp_should_keep_irq(&dev->dev) && dev->irq_managed &&
+ dev->irq > 0) {
mp_unmap_irq(dev->irq);
+ dev->irq_managed = 0;
+ }
}
struct pci_ops intel_mid_pci_ops = {
diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
index eb500c2592a..5dc6ca5e174 100644
--- a/arch/x86/pci/irq.c
+++ b/arch/x86/pci/irq.c
@@ -1200,11 +1200,12 @@ static int pirq_enable_irq(struct pci_dev *dev)
#ifdef CONFIG_X86_IO_APIC
struct pci_dev *temp_dev;
int irq;
- struct io_apic_irq_attr irq_attr;
+
+ if (dev->irq_managed && dev->irq > 0)
+ return 0;
irq = IO_APIC_get_PCI_irq_vector(dev->bus->number,
- PCI_SLOT(dev->devfn),
- pin - 1, &irq_attr);
+ PCI_SLOT(dev->devfn), pin - 1);
/*
* Busses behind bridges are typically not listed in the MP-table.
* In this case we have to look up the IRQ based on the parent bus,
@@ -1218,7 +1219,7 @@ static int pirq_enable_irq(struct pci_dev *dev)
pin = pci_swizzle_interrupt_pin(dev, pin);
irq = IO_APIC_get_PCI_irq_vector(bridge->bus->number,
PCI_SLOT(bridge->devfn),
- pin - 1, &irq_attr);
+ pin - 1);
if (irq >= 0)
dev_warn(&dev->dev, "using bridge %s "
"INT %c to get IRQ %d\n",
@@ -1228,6 +1229,7 @@ static int pirq_enable_irq(struct pci_dev *dev)
}
dev = temp_dev;
if (irq >= 0) {
+ dev->irq_managed = 1;
dev->irq = irq;
dev_info(&dev->dev, "PCI->APIC IRQ transform: "
"INT %c -> IRQ %d\n", 'A' + pin - 1, irq);
@@ -1254,11 +1256,24 @@ static int pirq_enable_irq(struct pci_dev *dev)
return 0;
}
+bool mp_should_keep_irq(struct device *dev)
+{
+ if (dev->power.is_prepared)
+ return true;
+#ifdef CONFIG_PM
+ if (dev->power.runtime_status == RPM_SUSPENDING)
+ return true;
+#endif
+
+ return false;
+}
+
static void pirq_disable_irq(struct pci_dev *dev)
{
if (io_apic_assign_pci_irqs && !mp_should_keep_irq(&dev->dev) &&
- dev->irq) {
+ dev->irq_managed && dev->irq) {
mp_unmap_irq(dev->irq);
dev->irq = 0;
+ dev->irq_managed = 0;
}
}
diff --git a/arch/x86/platform/uv/uv_irq.c b/arch/x86/platform/uv/uv_irq.c
index b233681af4d..0ce67364543 100644
--- a/arch/x86/platform/uv/uv_irq.c
+++ b/arch/x86/platform/uv/uv_irq.c
@@ -131,7 +131,7 @@ arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
unsigned long mmr_offset, int limit)
{
const struct cpumask *eligible_cpu = cpumask_of(cpu);
- struct irq_cfg *cfg = irq_get_chip_data(irq);
+ struct irq_cfg *cfg = irq_cfg(irq);
unsigned long mmr_value;
struct uv_IO_APIC_route_entry *entry;
int mmr_pnode, err;
@@ -198,13 +198,13 @@ static int
uv_set_irq_affinity(struct irq_data *data, const struct cpumask *mask,
bool force)
{
- struct irq_cfg *cfg = data->chip_data;
+ struct irq_cfg *cfg = irqd_cfg(data);
unsigned int dest;
unsigned long mmr_value, mmr_offset;
struct uv_IO_APIC_route_entry *entry;
int mmr_pnode;
- if (__ioapic_set_affinity(data, mask, &dest))
+ if (apic_set_affinity(data, mask, &dest))
return -1;
mmr_value = 0;
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 8c8298d7818..5c1f9ace7ae 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -387,7 +387,7 @@ static pteval_t pte_pfn_to_mfn(pteval_t val)
unsigned long mfn;
if (!xen_feature(XENFEAT_auto_translated_physmap))
- mfn = get_phys_to_machine(pfn);
+ mfn = __pfn_to_mfn(pfn);
else
mfn = pfn;
/*
@@ -1113,20 +1113,16 @@ static void __init xen_cleanhighmap(unsigned long vaddr,
* instead of somewhere later and be confusing. */
xen_mc_flush();
}
-static void __init xen_pagetable_p2m_copy(void)
+
+static void __init xen_pagetable_p2m_free(void)
{
unsigned long size;
unsigned long addr;
- unsigned long new_mfn_list;
-
- if (xen_feature(XENFEAT_auto_translated_physmap))
- return;
size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
- new_mfn_list = xen_revector_p2m_tree();
/* No memory or already called. */
- if (!new_mfn_list || new_mfn_list == xen_start_info->mfn_list)
+ if ((unsigned long)xen_p2m_addr == xen_start_info->mfn_list)
return;
/* using __ka address and sticking INVALID_P2M_ENTRY! */
@@ -1144,8 +1140,6 @@ static void __init xen_pagetable_p2m_copy(void)
size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
memblock_free(__pa(xen_start_info->mfn_list), size);
- /* And revector! Bye bye old array */
- xen_start_info->mfn_list = new_mfn_list;
/* At this stage, cleanup_highmap has already cleaned __ka space
* from _brk_limit way up to the max_pfn_mapped (which is the end of
@@ -1169,17 +1163,35 @@ static void __init xen_pagetable_p2m_copy(void)
}
#endif
-static void __init xen_pagetable_init(void)
+static void __init xen_pagetable_p2m_setup(void)
{
- paging_init();
+ if (xen_feature(XENFEAT_auto_translated_physmap))
+ return;
+
+ xen_vmalloc_p2m_tree();
+
#ifdef CONFIG_X86_64
- xen_pagetable_p2m_copy();
+ xen_pagetable_p2m_free();
#endif
+ /* And revector! Bye bye old array */
+ xen_start_info->mfn_list = (unsigned long)xen_p2m_addr;
+}
+
+static void __init xen_pagetable_init(void)
+{
+ paging_init();
+ xen_post_allocator_init();
+
+ xen_pagetable_p2m_setup();
+
/* Allocate and initialize top and mid mfn levels for p2m structure */
xen_build_mfn_list_list();
+ /* Remap memory freed due to conflicts with E820 map */
+ if (!xen_feature(XENFEAT_auto_translated_physmap))
+ xen_remap_memory();
+
xen_setup_shared_info();
- xen_post_allocator_init();
}
static void xen_write_cr2(unsigned long cr2)
{
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index b456b048eca..edbc7a63fd7 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -3,21 +3,22 @@
* guests themselves, but it must also access and update the p2m array
* during suspend/resume when all the pages are reallocated.
*
- * The p2m table is logically a flat array, but we implement it as a
- * three-level tree to allow the address space to be sparse.
+ * The logical flat p2m table is mapped to a linear kernel memory area.
+ * For accesses by Xen a three-level tree linked via mfns only is set up to
+ * allow the address space to be sparse.
*
- * Xen
- * |
- * p2m_top p2m_top_mfn
- * / \ / \
- * p2m_mid p2m_mid p2m_mid_mfn p2m_mid_mfn
- * / \ / \ / /
- * p2m p2m p2m p2m p2m p2m p2m ...
+ * Xen
+ * |
+ * p2m_top_mfn
+ * / \
+ * p2m_mid_mfn p2m_mid_mfn
+ * / /
+ * p2m p2m p2m ...
*
* The p2m_mid_mfn pages are mapped by p2m_top_mfn_p.
*
- * The p2m_top and p2m_top_mfn levels are limited to 1 page, so the
- * maximum representable pseudo-physical address space is:
+ * The p2m_top_mfn level is limited to 1 page, so the maximum representable
+ * pseudo-physical address space is:
* P2M_TOP_PER_PAGE * P2M_MID_PER_PAGE * P2M_PER_PAGE pages
*
* P2M_PER_PAGE depends on the architecture, as a mfn is always
@@ -30,6 +31,9 @@
* leaf entries, or for the top root, or middle one, for which there is a void
* entry, we assume it is "missing". So (for example)
* pfn_to_mfn(0x90909090)=INVALID_P2M_ENTRY.
+ * We have a dedicated page p2m_missing with all entries being
+ * INVALID_P2M_ENTRY. This page may be referenced multiple times in the p2m
+ * list/tree in case there are multiple areas with P2M_PER_PAGE invalid pfns.
*
* We also have the possibility of setting 1-1 mappings on certain regions, so
* that:
@@ -39,122 +43,20 @@
* PCI BARs, or ACPI spaces), we can create mappings easily because we
* get the PFN value to match the MFN.
*
- * For this to work efficiently we have one new page p2m_identity and
- * allocate (via reserved_brk) any other pages we need to cover the sides
- * (1GB or 4MB boundary violations). All entries in p2m_identity are set to
- * INVALID_P2M_ENTRY type (Xen toolstack only recognizes that and MFNs,
- * no other fancy value).
+ * For this to work efficiently we have one new page p2m_identity. All entries
+ * in p2m_identity are set to INVALID_P2M_ENTRY type (Xen toolstack only
+ * recognizes that and MFNs, no other fancy value).
*
* On lookup we spot that the entry points to p2m_identity and return the
* identity value instead of dereferencing and returning INVALID_P2M_ENTRY.
* If the entry points to an allocated page, we just proceed as before and
- * return the PFN. If the PFN has IDENTITY_FRAME_BIT set we unmask that in
+ * return the PFN. If the PFN has IDENTITY_FRAME_BIT set we unmask that in
* appropriate functions (pfn_to_mfn).
*
* The reason for having the IDENTITY_FRAME_BIT instead of just returning the
* PFN is that we could find ourselves where pfn_to_mfn(pfn)==pfn for a
* non-identity pfn. To protect ourselves against we elect to set (and get) the
* IDENTITY_FRAME_BIT on all identity mapped PFNs.
- *
- * This simplistic diagram is used to explain the more subtle piece of code.
- * There is also a digram of the P2M at the end that can help.
- * Imagine your E820 looking as so:
- *
- * 1GB 2GB 4GB
- * /-------------------+---------\/----\ /----------\ /---+-----\
- * | System RAM | Sys RAM ||ACPI| | reserved | | Sys RAM |
- * \-------------------+---------/\----/ \----------/ \---+-----/
- * ^- 1029MB ^- 2001MB
- *
- * [1029MB = 263424 (0x40500), 2001MB = 512256 (0x7D100),
- * 2048MB = 524288 (0x80000)]
- *
- * And dom0_mem=max:3GB,1GB is passed in to the guest, meaning memory past 1GB
- * is actually not present (would have to kick the balloon driver to put it in).
- *
- * When we are told to set the PFNs for identity mapping (see patch: "xen/setup:
- * Set identity mapping for non-RAM E820 and E820 gaps.") we pass in the start
- * of the PFN and the end PFN (263424 and 512256 respectively). The first step
- * is to reserve_brk a top leaf page if the p2m[1] is missing. The top leaf page
- * covers 512^2 of page estate (1GB) and in case the start or end PFN is not
- * aligned on 512^2*PAGE_SIZE (1GB) we reserve_brk new middle and leaf pages as
- * required to split any existing p2m_mid_missing middle pages.
- *
- * With the E820 example above, 263424 is not 1GB aligned so we allocate a
- * reserve_brk page which will cover the PFNs estate from 0x40000 to 0x80000.
- * Each entry in the allocate page is "missing" (points to p2m_missing).
- *
- * Next stage is to determine if we need to do a more granular boundary check
- * on the 4MB (or 2MB depending on architecture) off the start and end pfn's.
- * We check if the start pfn and end pfn violate that boundary check, and if
- * so reserve_brk a (p2m[x][y]) leaf page. This way we have a much finer
- * granularity of setting which PFNs are missing and which ones are identity.
- * In our example 263424 and 512256 both fail the check so we reserve_brk two
- * pages. Populate them with INVALID_P2M_ENTRY (so they both have "missing"
- * values) and assign them to p2m[1][2] and p2m[1][488] respectively.
- *
- * At this point we would at minimum reserve_brk one page, but could be up to
- * three. Each call to set_phys_range_identity has at maximum a three page
- * cost. If we were to query the P2M at this stage, all those entries from
- * start PFN through end PFN (so 1029MB -> 2001MB) would return
- * INVALID_P2M_ENTRY ("missing").
- *
- * The next step is to walk from the start pfn to the end pfn setting
- * the IDENTITY_FRAME_BIT on each PFN. This is done in set_phys_range_identity.
- * If we find that the middle entry is pointing to p2m_missing we can swap it
- * over to p2m_identity - this way covering 4MB (or 2MB) PFN space (and
- * similarly swapping p2m_mid_missing for p2m_mid_identity for larger regions).
- * At this point we do not need to worry about boundary aligment (so no need to
- * reserve_brk a middle page, figure out which PFNs are "missing" and which
- * ones are identity), as that has been done earlier. If we find that the
- * middle leaf is not occupied by p2m_identity or p2m_missing, we dereference
- * that page (which covers 512 PFNs) and set the appropriate PFN with
- * IDENTITY_FRAME_BIT. In our example 263424 and 512256 end up there, and we
- * set from p2m[1][2][256->511] and p2m[1][488][0->256] with
- * IDENTITY_FRAME_BIT set.
- *
- * All other regions that are void (or not filled) either point to p2m_missing
- * (considered missing) or have the default value of INVALID_P2M_ENTRY (also
- * considered missing). In our case, p2m[1][2][0->255] and p2m[1][488][257->511]
- * contain the INVALID_P2M_ENTRY value and are considered "missing."
- *
- * Finally, the region beyond the end of of the E820 (4 GB in this example)
- * is set to be identity (in case there are MMIO regions placed here).
- *
- * This is what the p2m ends up looking (for the E820 above) with this
- * fabulous drawing:
- *
- * p2m /--------------\
- * /-----\ | &mfn_list[0],| /-----------------\
- * | 0 |------>| &mfn_list[1],| /---------------\ | ~0, ~0, .. |
- * |-----| | ..., ~0, ~0 | | ~0, ~0, [x]---+----->| IDENTITY [@256] |
- * | 1 |---\ \--------------/ | [p2m_identity]+\ | IDENTITY [@257] |
- * |-----| \ | [p2m_identity]+\\ | .... |
- * | 2 |--\ \-------------------->| ... | \\ \----------------/
- * |-----| \ \---------------/ \\
- * | 3 |-\ \ \\ p2m_identity [1]
- * |-----| \ \-------------------->/---------------\ /-----------------\
- * | .. |\ | | [p2m_identity]+-->| ~0, ~0, ~0, ... |
- * \-----/ | | | [p2m_identity]+-->| ..., ~0 |
- * | | | .... | \-----------------/
- * | | +-[x], ~0, ~0.. +\
- * | | \---------------/ \
- * | | \-> /---------------\
- * | V p2m_mid_missing p2m_missing | IDENTITY[@0] |
- * | /-----------------\ /------------\ | IDENTITY[@256]|
- * | | [p2m_missing] +---->| ~0, ~0, ...| | ~0, ~0, .... |
- * | | [p2m_missing] +---->| ..., ~0 | \---------------/
- * | | ... | \------------/
- * | \-----------------/
- * |
- * | p2m_mid_identity
- * | /-----------------\
- * \-->| [p2m_identity] +---->[1]
- * | [p2m_identity] +---->[1]
- * | ... |
- * \-----------------/
- *
- * where ~0 is INVALID_P2M_ENTRY. IDENTITY is (PFN | IDENTITY_BIT)
*/
#include <linux/init.h>
@@ -164,9 +66,11 @@
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <linux/bootmem.h>
+#include <linux/slab.h>
#include <asm/cache.h>
#include <asm/setup.h>
+#include <asm/uaccess.h>
#include <asm/xen/page.h>
#include <asm/xen/hypercall.h>
@@ -178,31 +82,26 @@
#include "multicalls.h"
#include "xen-ops.h"
+#define PMDS_PER_MID_PAGE (P2M_MID_PER_PAGE / PTRS_PER_PTE)
+
static void __init m2p_override_init(void);
+unsigned long *xen_p2m_addr __read_mostly;
+EXPORT_SYMBOL_GPL(xen_p2m_addr);
+unsigned long xen_p2m_size __read_mostly;
+EXPORT_SYMBOL_GPL(xen_p2m_size);
unsigned long xen_max_p2m_pfn __read_mostly;
+EXPORT_SYMBOL_GPL(xen_max_p2m_pfn);
+
+static DEFINE_SPINLOCK(p2m_update_lock);
static unsigned long *p2m_mid_missing_mfn;
static unsigned long *p2m_top_mfn;
static unsigned long **p2m_top_mfn_p;
-
-/* Placeholders for holes in the address space */
-static RESERVE_BRK_ARRAY(unsigned long, p2m_missing, P2M_PER_PAGE);
-static RESERVE_BRK_ARRAY(unsigned long *, p2m_mid_missing, P2M_MID_PER_PAGE);
-
-static RESERVE_BRK_ARRAY(unsigned long **, p2m_top, P2M_TOP_PER_PAGE);
-
-static RESERVE_BRK_ARRAY(unsigned long, p2m_identity, P2M_PER_PAGE);
-static RESERVE_BRK_ARRAY(unsigned long *, p2m_mid_identity, P2M_MID_PER_PAGE);
-
-RESERVE_BRK(p2m_mid, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE)));
-
-/* For each I/O range remapped we may lose up to two leaf pages for the boundary
- * violations and three mid pages to cover up to 3GB. With
- * early_can_reuse_p2m_middle() most of the leaf pages will be reused by the
- * remapped region.
- */
-RESERVE_BRK(p2m_identity_remap, PAGE_SIZE * 2 * 3 * MAX_REMAP_RANGES);
+static unsigned long *p2m_missing;
+static unsigned long *p2m_identity;
+static pte_t *p2m_missing_pte;
+static pte_t *p2m_identity_pte;
static inline unsigned p2m_top_index(unsigned long pfn)
{
@@ -220,14 +119,6 @@ static inline unsigned p2m_index(unsigned long pfn)
return pfn % P2M_PER_PAGE;
}
-static void p2m_top_init(unsigned long ***top)
-{
- unsigned i;
-
- for (i = 0; i < P2M_TOP_PER_PAGE; i++)
- top[i] = p2m_mid_missing;
-}
-
static void p2m_top_mfn_init(unsigned long *top)
{
unsigned i;
@@ -244,28 +135,43 @@ static void p2m_top_mfn_p_init(unsigned long **top)
top[i] = p2m_mid_missing_mfn;
}
-static void p2m_mid_init(unsigned long **mid, unsigned long *leaf)
+static void p2m_mid_mfn_init(unsigned long *mid, unsigned long *leaf)
{
unsigned i;
for (i = 0; i < P2M_MID_PER_PAGE; i++)
- mid[i] = leaf;
+ mid[i] = virt_to_mfn(leaf);
}
-static void p2m_mid_mfn_init(unsigned long *mid, unsigned long *leaf)
+static void p2m_init(unsigned long *p2m)
{
unsigned i;
- for (i = 0; i < P2M_MID_PER_PAGE; i++)
- mid[i] = virt_to_mfn(leaf);
+ for (i = 0; i < P2M_PER_PAGE; i++)
+ p2m[i] = INVALID_P2M_ENTRY;
}
-static void p2m_init(unsigned long *p2m)
+static void p2m_init_identity(unsigned long *p2m, unsigned long pfn)
{
unsigned i;
- for (i = 0; i < P2M_MID_PER_PAGE; i++)
- p2m[i] = INVALID_P2M_ENTRY;
+ for (i = 0; i < P2M_PER_PAGE; i++)
+ p2m[i] = IDENTITY_FRAME(pfn + i);
+}
+
+static void * __ref alloc_p2m_page(void)
+{
+ if (unlikely(!slab_is_available()))
+ return alloc_bootmem_align(PAGE_SIZE, PAGE_SIZE);
+
+ return (void *)__get_free_page(GFP_KERNEL | __GFP_REPEAT);
+}
+
+/* Only to be called in case of a race for a page just allocated! */
+static void free_p2m_page(void *p)
+{
+ BUG_ON(!slab_is_available());
+ free_page((unsigned long)p);
}
/*
@@ -280,40 +186,46 @@ static void p2m_init(unsigned long *p2m)
*/
void __ref xen_build_mfn_list_list(void)
{
- unsigned long pfn;
+ unsigned long pfn, mfn;
+ pte_t *ptep;
+ unsigned int level, topidx, mididx;
+ unsigned long *mid_mfn_p;
if (xen_feature(XENFEAT_auto_translated_physmap))
return;
/* Pre-initialize p2m_top_mfn to be completely missing */
if (p2m_top_mfn == NULL) {
- p2m_mid_missing_mfn = alloc_bootmem_align(PAGE_SIZE, PAGE_SIZE);
+ p2m_mid_missing_mfn = alloc_p2m_page();
p2m_mid_mfn_init(p2m_mid_missing_mfn, p2m_missing);
- p2m_top_mfn_p = alloc_bootmem_align(PAGE_SIZE, PAGE_SIZE);
+ p2m_top_mfn_p = alloc_p2m_page();
p2m_top_mfn_p_init(p2m_top_mfn_p);
- p2m_top_mfn = alloc_bootmem_align(PAGE_SIZE, PAGE_SIZE);
+ p2m_top_mfn = alloc_p2m_page();
p2m_top_mfn_init(p2m_top_mfn);
} else {
/* Reinitialise, mfn's all change after migration */
p2m_mid_mfn_init(p2m_mid_missing_mfn, p2m_missing);
}
- for (pfn = 0; pfn < xen_max_p2m_pfn; pfn += P2M_PER_PAGE) {
- unsigned topidx = p2m_top_index(pfn);
- unsigned mididx = p2m_mid_index(pfn);
- unsigned long **mid;
- unsigned long *mid_mfn_p;
+ for (pfn = 0; pfn < xen_max_p2m_pfn && pfn < MAX_P2M_PFN;
+ pfn += P2M_PER_PAGE) {
+ topidx = p2m_top_index(pfn);
+ mididx = p2m_mid_index(pfn);
- mid = p2m_top[topidx];
mid_mfn_p = p2m_top_mfn_p[topidx];
+ ptep = lookup_address((unsigned long)(xen_p2m_addr + pfn),
+ &level);
+ BUG_ON(!ptep || level != PG_LEVEL_4K);
+ mfn = pte_mfn(*ptep);
+ ptep = (pte_t *)((unsigned long)ptep & ~(PAGE_SIZE - 1));
/* Don't bother allocating any mfn mid levels if
* they're just missing, just update the stored mfn,
* since all could have changed over a migrate.
*/
- if (mid == p2m_mid_missing) {
+ if (ptep == p2m_missing_pte || ptep == p2m_identity_pte) {
BUG_ON(mididx);
BUG_ON(mid_mfn_p != p2m_mid_missing_mfn);
p2m_top_mfn[topidx] = virt_to_mfn(p2m_mid_missing_mfn);
@@ -322,19 +234,14 @@ void __ref xen_build_mfn_list_list(void)
}
if (mid_mfn_p == p2m_mid_missing_mfn) {
- /*
- * XXX boot-time only! We should never find
- * missing parts of the mfn tree after
- * runtime.
- */
- mid_mfn_p = alloc_bootmem_align(PAGE_SIZE, PAGE_SIZE);
+ mid_mfn_p = alloc_p2m_page();
p2m_mid_mfn_init(mid_mfn_p, p2m_missing);
p2m_top_mfn_p[topidx] = mid_mfn_p;
}
p2m_top_mfn[topidx] = virt_to_mfn(mid_mfn_p);
- mid_mfn_p[mididx] = virt_to_mfn(mid[mididx]);
+ mid_mfn_p[mididx] = mfn;
}
}
@@ -353,171 +260,235 @@ void xen_setup_mfn_list_list(void)
/* Set up p2m_top to point to the domain-builder provided p2m pages */
void __init xen_build_dynamic_phys_to_machine(void)
{
- unsigned long *mfn_list;
- unsigned long max_pfn;
unsigned long pfn;
if (xen_feature(XENFEAT_auto_translated_physmap))
return;
- mfn_list = (unsigned long *)xen_start_info->mfn_list;
- max_pfn = min(MAX_DOMAIN_PAGES, xen_start_info->nr_pages);
- xen_max_p2m_pfn = max_pfn;
+ xen_p2m_addr = (unsigned long *)xen_start_info->mfn_list;
+ xen_p2m_size = ALIGN(xen_start_info->nr_pages, P2M_PER_PAGE);
- p2m_missing = extend_brk(PAGE_SIZE, PAGE_SIZE);
- p2m_init(p2m_missing);
- p2m_identity = extend_brk(PAGE_SIZE, PAGE_SIZE);
- p2m_init(p2m_identity);
+ for (pfn = xen_start_info->nr_pages; pfn < xen_p2m_size; pfn++)
+ xen_p2m_addr[pfn] = INVALID_P2M_ENTRY;
- p2m_mid_missing = extend_brk(PAGE_SIZE, PAGE_SIZE);
- p2m_mid_init(p2m_mid_missing, p2m_missing);
- p2m_mid_identity = extend_brk(PAGE_SIZE, PAGE_SIZE);
- p2m_mid_init(p2m_mid_identity, p2m_identity);
+ xen_max_p2m_pfn = xen_p2m_size;
+}
- p2m_top = extend_brk(PAGE_SIZE, PAGE_SIZE);
- p2m_top_init(p2m_top);
+#define P2M_TYPE_IDENTITY 0
+#define P2M_TYPE_MISSING 1
+#define P2M_TYPE_PFN 2
+#define P2M_TYPE_UNKNOWN 3
- /*
- * The domain builder gives us a pre-constructed p2m array in
- * mfn_list for all the pages initially given to us, so we just
- * need to graft that into our tree structure.
- */
- for (pfn = 0; pfn < max_pfn; pfn += P2M_PER_PAGE) {
- unsigned topidx = p2m_top_index(pfn);
- unsigned mididx = p2m_mid_index(pfn);
+static int xen_p2m_elem_type(unsigned long pfn)
+{
+ unsigned long mfn;
- if (p2m_top[topidx] == p2m_mid_missing) {
- unsigned long **mid = extend_brk(PAGE_SIZE, PAGE_SIZE);
- p2m_mid_init(mid, p2m_missing);
+ if (pfn >= xen_p2m_size)
+ return P2M_TYPE_IDENTITY;
- p2m_top[topidx] = mid;
- }
+ mfn = xen_p2m_addr[pfn];
- /*
- * As long as the mfn_list has enough entries to completely
- * fill a p2m page, pointing into the array is ok. But if
- * not the entries beyond the last pfn will be undefined.
- */
- if (unlikely(pfn + P2M_PER_PAGE > max_pfn)) {
- unsigned long p2midx;
+ if (mfn == INVALID_P2M_ENTRY)
+ return P2M_TYPE_MISSING;
- p2midx = max_pfn % P2M_PER_PAGE;
- for ( ; p2midx < P2M_PER_PAGE; p2midx++)
- mfn_list[pfn + p2midx] = INVALID_P2M_ENTRY;
- }
- p2m_top[topidx][mididx] = &mfn_list[pfn];
- }
+ if (mfn & IDENTITY_FRAME_BIT)
+ return P2M_TYPE_IDENTITY;
- m2p_override_init();
+ return P2M_TYPE_PFN;
}
-#ifdef CONFIG_X86_64
-unsigned long __init xen_revector_p2m_tree(void)
+
+static void __init xen_rebuild_p2m_list(unsigned long *p2m)
{
- unsigned long va_start;
- unsigned long va_end;
+ unsigned int i, chunk;
unsigned long pfn;
- unsigned long pfn_free = 0;
- unsigned long *mfn_list = NULL;
- unsigned long size;
-
- va_start = xen_start_info->mfn_list;
- /*We copy in increments of P2M_PER_PAGE * sizeof(unsigned long),
- * so make sure it is rounded up to that */
- size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
- va_end = va_start + size;
-
- /* If we were revectored already, don't do it again. */
- if (va_start <= __START_KERNEL_map && va_start >= __PAGE_OFFSET)
- return 0;
+ unsigned long *mfns;
+ pte_t *ptep;
+ pmd_t *pmdp;
+ int type;
- mfn_list = alloc_bootmem_align(size, PAGE_SIZE);
- if (!mfn_list) {
- pr_warn("Could not allocate space for a new P2M tree!\n");
- return xen_start_info->mfn_list;
- }
- /* Fill it out with INVALID_P2M_ENTRY value */
- memset(mfn_list, 0xFF, size);
+ p2m_missing = alloc_p2m_page();
+ p2m_init(p2m_missing);
+ p2m_identity = alloc_p2m_page();
+ p2m_init(p2m_identity);
- for (pfn = 0; pfn < ALIGN(MAX_DOMAIN_PAGES, P2M_PER_PAGE); pfn += P2M_PER_PAGE) {
- unsigned topidx = p2m_top_index(pfn);
- unsigned mididx;
- unsigned long *mid_p;
+ p2m_missing_pte = alloc_p2m_page();
+ paravirt_alloc_pte(&init_mm, __pa(p2m_missing_pte) >> PAGE_SHIFT);
+ p2m_identity_pte = alloc_p2m_page();
+ paravirt_alloc_pte(&init_mm, __pa(p2m_identity_pte) >> PAGE_SHIFT);
+ for (i = 0; i < PTRS_PER_PTE; i++) {
+ set_pte(p2m_missing_pte + i,
+ pfn_pte(PFN_DOWN(__pa(p2m_missing)), PAGE_KERNEL_RO));
+ set_pte(p2m_identity_pte + i,
+ pfn_pte(PFN_DOWN(__pa(p2m_identity)), PAGE_KERNEL_RO));
+ }
- if (!p2m_top[topidx])
+ for (pfn = 0; pfn < xen_max_p2m_pfn; pfn += chunk) {
+ /*
+ * Try to map missing/identity PMDs or p2m-pages if possible.
+ * We have to respect the structure of the mfn_list_list
+ * which will be built just afterwards.
+ * Chunk size to test is one p2m page if we are in the middle
+ * of a mfn_list_list mid page and the complete mid page area
+ * if we are at index 0 of the mid page. Please note that a
+ * mid page might cover more than one PMD, e.g. on 32 bit PAE
+ * kernels.
+ */
+ chunk = (pfn & (P2M_PER_PAGE * P2M_MID_PER_PAGE - 1)) ?
+ P2M_PER_PAGE : P2M_PER_PAGE * P2M_MID_PER_PAGE;
+
+ type = xen_p2m_elem_type(pfn);
+ i = 0;
+ if (type != P2M_TYPE_PFN)
+ for (i = 1; i < chunk; i++)
+ if (xen_p2m_elem_type(pfn + i) != type)
+ break;
+ if (i < chunk)
+ /* Reset to minimal chunk size. */
+ chunk = P2M_PER_PAGE;
+
+ if (type == P2M_TYPE_PFN || i < chunk) {
+ /* Use initial p2m page contents. */
+#ifdef CONFIG_X86_64
+ mfns = alloc_p2m_page();
+ copy_page(mfns, xen_p2m_addr + pfn);
+#else
+ mfns = xen_p2m_addr + pfn;
+#endif
+ ptep = populate_extra_pte((unsigned long)(p2m + pfn));
+ set_pte(ptep,
+ pfn_pte(PFN_DOWN(__pa(mfns)), PAGE_KERNEL));
continue;
+ }
- if (p2m_top[topidx] == p2m_mid_missing)
+ if (chunk == P2M_PER_PAGE) {
+ /* Map complete missing or identity p2m-page. */
+ mfns = (type == P2M_TYPE_MISSING) ?
+ p2m_missing : p2m_identity;
+ ptep = populate_extra_pte((unsigned long)(p2m + pfn));
+ set_pte(ptep,
+ pfn_pte(PFN_DOWN(__pa(mfns)), PAGE_KERNEL_RO));
continue;
+ }
- mididx = p2m_mid_index(pfn);
- mid_p = p2m_top[topidx][mididx];
- if (!mid_p)
- continue;
- if ((mid_p == p2m_missing) || (mid_p == p2m_identity))
- continue;
+ /* Complete missing or identity PMD(s) can be mapped. */
+ ptep = (type == P2M_TYPE_MISSING) ?
+ p2m_missing_pte : p2m_identity_pte;
+ for (i = 0; i < PMDS_PER_MID_PAGE; i++) {
+ pmdp = populate_extra_pmd(
+ (unsigned long)(p2m + pfn + i * PTRS_PER_PTE));
+ set_pmd(pmdp, __pmd(__pa(ptep) | _KERNPG_TABLE));
+ }
+ }
+}
- if ((unsigned long)mid_p == INVALID_P2M_ENTRY)
- continue;
+void __init xen_vmalloc_p2m_tree(void)
+{
+ static struct vm_struct vm;
- /* The old va. Rebase it on mfn_list */
- if (mid_p >= (unsigned long *)va_start && mid_p <= (unsigned long *)va_end) {
- unsigned long *new;
+ vm.flags = VM_ALLOC;
+ vm.size = ALIGN(sizeof(unsigned long) * xen_max_p2m_pfn,
+ PMD_SIZE * PMDS_PER_MID_PAGE);
+ vm_area_register_early(&vm, PMD_SIZE * PMDS_PER_MID_PAGE);
+ pr_notice("p2m virtual area at %p, size is %lx\n", vm.addr, vm.size);
- if (pfn_free > (size / sizeof(unsigned long))) {
- WARN(1, "Only allocated for %ld pages, but we want %ld!\n",
- size / sizeof(unsigned long), pfn_free);
- return 0;
- }
- new = &mfn_list[pfn_free];
+ xen_max_p2m_pfn = vm.size / sizeof(unsigned long);
- copy_page(new, mid_p);
- p2m_top[topidx][mididx] = &mfn_list[pfn_free];
+ xen_rebuild_p2m_list(vm.addr);
- pfn_free += P2M_PER_PAGE;
+ xen_p2m_addr = vm.addr;
+ xen_p2m_size = xen_max_p2m_pfn;
- }
- /* This should be the leafs allocated for identity from _brk. */
- }
- return (unsigned long)mfn_list;
+ xen_inv_extra_mem();
+ m2p_override_init();
}
-#else
-unsigned long __init xen_revector_p2m_tree(void)
-{
- return 0;
-}
-#endif
+
unsigned long get_phys_to_machine(unsigned long pfn)
{
- unsigned topidx, mididx, idx;
+ pte_t *ptep;
+ unsigned int level;
+
+ if (unlikely(pfn >= xen_p2m_size)) {
+ if (pfn < xen_max_p2m_pfn)
+ return xen_chk_extra_mem(pfn);
- if (unlikely(pfn >= MAX_P2M_PFN))
return IDENTITY_FRAME(pfn);
+ }
- topidx = p2m_top_index(pfn);
- mididx = p2m_mid_index(pfn);
- idx = p2m_index(pfn);
+ ptep = lookup_address((unsigned long)(xen_p2m_addr + pfn), &level);
+ BUG_ON(!ptep || level != PG_LEVEL_4K);
/*
* The INVALID_P2M_ENTRY is filled in both p2m_*identity
* and in p2m_*missing, so returning the INVALID_P2M_ENTRY
* would be wrong.
*/
- if (p2m_top[topidx][mididx] == p2m_identity)
+ if (pte_pfn(*ptep) == PFN_DOWN(__pa(p2m_identity)))
return IDENTITY_FRAME(pfn);
- return p2m_top[topidx][mididx][idx];
+ return xen_p2m_addr[pfn];
}
EXPORT_SYMBOL_GPL(get_phys_to_machine);
-static void *alloc_p2m_page(void)
+/*
+ * Allocate new pmd(s). It is checked whether the old pmd is still in place.
+ * If not, nothing is changed. This is okay as the only reason for allocating
+ * a new pmd is to replace p2m_missing_pte or p2m_identity_pte by a individual
+ * pmd. In case of PAE/x86-32 there are multiple pmds to allocate!
+ */
+static pte_t *alloc_p2m_pmd(unsigned long addr, pte_t *ptep, pte_t *pte_pg)
{
- return (void *)__get_free_page(GFP_KERNEL | __GFP_REPEAT);
-}
+ pte_t *ptechk;
+ pte_t *pteret = ptep;
+ pte_t *pte_newpg[PMDS_PER_MID_PAGE];
+ pmd_t *pmdp;
+ unsigned int level;
+ unsigned long flags;
+ unsigned long vaddr;
+ int i;
-static void free_p2m_page(void *p)
-{
- free_page((unsigned long)p);
+ /* Do all allocations first to bail out in error case. */
+ for (i = 0; i < PMDS_PER_MID_PAGE; i++) {
+ pte_newpg[i] = alloc_p2m_page();
+ if (!pte_newpg[i]) {
+ for (i--; i >= 0; i--)
+ free_p2m_page(pte_newpg[i]);
+
+ return NULL;
+ }
+ }
+
+ vaddr = addr & ~(PMD_SIZE * PMDS_PER_MID_PAGE - 1);
+
+ for (i = 0; i < PMDS_PER_MID_PAGE; i++) {
+ copy_page(pte_newpg[i], pte_pg);
+ paravirt_alloc_pte(&init_mm, __pa(pte_newpg[i]) >> PAGE_SHIFT);
+
+ pmdp = lookup_pmd_address(vaddr);
+ BUG_ON(!pmdp);
+
+ spin_lock_irqsave(&p2m_update_lock, flags);
+
+ ptechk = lookup_address(vaddr, &level);
+ if (ptechk == pte_pg) {
+ set_pmd(pmdp,
+ __pmd(__pa(pte_newpg[i]) | _KERNPG_TABLE));
+ if (vaddr == (addr & ~(PMD_SIZE - 1)))
+ pteret = pte_offset_kernel(pmdp, addr);
+ pte_newpg[i] = NULL;
+ }
+
+ spin_unlock_irqrestore(&p2m_update_lock, flags);
+
+ if (pte_newpg[i]) {
+ paravirt_release_pte(__pa(pte_newpg[i]) >> PAGE_SHIFT);
+ free_p2m_page(pte_newpg[i]);
+ }
+
+ vaddr += PMD_SIZE;
+ }
+
+ return pteret;
}
/*
@@ -530,58 +501,62 @@ static void free_p2m_page(void *p)
static bool alloc_p2m(unsigned long pfn)
{
unsigned topidx, mididx;
- unsigned long ***top_p, **mid;
unsigned long *top_mfn_p, *mid_mfn;
- unsigned long *p2m_orig;
+ pte_t *ptep, *pte_pg;
+ unsigned int level;
+ unsigned long flags;
+ unsigned long addr = (unsigned long)(xen_p2m_addr + pfn);
+ unsigned long p2m_pfn;
topidx = p2m_top_index(pfn);
mididx = p2m_mid_index(pfn);
- top_p = &p2m_top[topidx];
- mid = ACCESS_ONCE(*top_p);
+ ptep = lookup_address(addr, &level);
+ BUG_ON(!ptep || level != PG_LEVEL_4K);
+ pte_pg = (pte_t *)((unsigned long)ptep & ~(PAGE_SIZE - 1));
- if (mid == p2m_mid_missing) {
- /* Mid level is missing, allocate a new one */
- mid = alloc_p2m_page();
- if (!mid)
+ if (pte_pg == p2m_missing_pte || pte_pg == p2m_identity_pte) {
+ /* PMD level is missing, allocate a new one */
+ ptep = alloc_p2m_pmd(addr, ptep, pte_pg);
+ if (!ptep)
return false;
-
- p2m_mid_init(mid, p2m_missing);
-
- if (cmpxchg(top_p, p2m_mid_missing, mid) != p2m_mid_missing)
- free_p2m_page(mid);
}
- top_mfn_p = &p2m_top_mfn[topidx];
- mid_mfn = ACCESS_ONCE(p2m_top_mfn_p[topidx]);
+ if (p2m_top_mfn) {
+ top_mfn_p = &p2m_top_mfn[topidx];
+ mid_mfn = ACCESS_ONCE(p2m_top_mfn_p[topidx]);
- BUG_ON(virt_to_mfn(mid_mfn) != *top_mfn_p);
+ BUG_ON(virt_to_mfn(mid_mfn) != *top_mfn_p);
- if (mid_mfn == p2m_mid_missing_mfn) {
- /* Separately check the mid mfn level */
- unsigned long missing_mfn;
- unsigned long mid_mfn_mfn;
- unsigned long old_mfn;
+ if (mid_mfn == p2m_mid_missing_mfn) {
+ /* Separately check the mid mfn level */
+ unsigned long missing_mfn;
+ unsigned long mid_mfn_mfn;
+ unsigned long old_mfn;
- mid_mfn = alloc_p2m_page();
- if (!mid_mfn)
- return false;
+ mid_mfn = alloc_p2m_page();
+ if (!mid_mfn)
+ return false;
- p2m_mid_mfn_init(mid_mfn, p2m_missing);
+ p2m_mid_mfn_init(mid_mfn, p2m_missing);
- missing_mfn = virt_to_mfn(p2m_mid_missing_mfn);
- mid_mfn_mfn = virt_to_mfn(mid_mfn);
- old_mfn = cmpxchg(top_mfn_p, missing_mfn, mid_mfn_mfn);
- if (old_mfn != missing_mfn) {
- free_p2m_page(mid_mfn);
- mid_mfn = mfn_to_virt(old_mfn);
- } else {
- p2m_top_mfn_p[topidx] = mid_mfn;
+ missing_mfn = virt_to_mfn(p2m_mid_missing_mfn);
+ mid_mfn_mfn = virt_to_mfn(mid_mfn);
+ old_mfn = cmpxchg(top_mfn_p, missing_mfn, mid_mfn_mfn);
+ if (old_mfn != missing_mfn) {
+ free_p2m_page(mid_mfn);
+ mid_mfn = mfn_to_virt(old_mfn);
+ } else {
+ p2m_top_mfn_p[topidx] = mid_mfn;
+ }
}
+ } else {
+ mid_mfn = NULL;
}
- p2m_orig = ACCESS_ONCE(p2m_top[topidx][mididx]);
- if (p2m_orig == p2m_identity || p2m_orig == p2m_missing) {
+ p2m_pfn = pte_pfn(ACCESS_ONCE(*ptep));
+ if (p2m_pfn == PFN_DOWN(__pa(p2m_identity)) ||
+ p2m_pfn == PFN_DOWN(__pa(p2m_missing))) {
/* p2m leaf page is missing */
unsigned long *p2m;
@@ -589,183 +564,36 @@ static bool alloc_p2m(unsigned long pfn)
if (!p2m)
return false;
- p2m_init(p2m);
-
- if (cmpxchg(&mid[mididx], p2m_orig, p2m) != p2m_orig)
- free_p2m_page(p2m);
+ if (p2m_pfn == PFN_DOWN(__pa(p2m_missing)))
+ p2m_init(p2m);
else
- mid_mfn[mididx] = virt_to_mfn(p2m);
- }
-
- return true;
-}
-
-static bool __init early_alloc_p2m(unsigned long pfn, bool check_boundary)
-{
- unsigned topidx, mididx, idx;
- unsigned long *p2m;
-
- topidx = p2m_top_index(pfn);
- mididx = p2m_mid_index(pfn);
- idx = p2m_index(pfn);
-
- /* Pfff.. No boundary cross-over, lets get out. */
- if (!idx && check_boundary)
- return false;
-
- WARN(p2m_top[topidx][mididx] == p2m_identity,
- "P2M[%d][%d] == IDENTITY, should be MISSING (or alloced)!\n",
- topidx, mididx);
-
- /*
- * Could be done by xen_build_dynamic_phys_to_machine..
- */
- if (p2m_top[topidx][mididx] != p2m_missing)
- return false;
-
- /* Boundary cross-over for the edges: */
- p2m = extend_brk(PAGE_SIZE, PAGE_SIZE);
-
- p2m_init(p2m);
+ p2m_init_identity(p2m, pfn);
- p2m_top[topidx][mididx] = p2m;
+ spin_lock_irqsave(&p2m_update_lock, flags);
- return true;
-}
-
-static bool __init early_alloc_p2m_middle(unsigned long pfn)
-{
- unsigned topidx = p2m_top_index(pfn);
- unsigned long **mid;
-
- mid = p2m_top[topidx];
- if (mid == p2m_mid_missing) {
- mid = extend_brk(PAGE_SIZE, PAGE_SIZE);
-
- p2m_mid_init(mid, p2m_missing);
-
- p2m_top[topidx] = mid;
- }
- return true;
-}
-
-/*
- * Skim over the P2M tree looking at pages that are either filled with
- * INVALID_P2M_ENTRY or with 1:1 PFNs. If found, re-use that page and
- * replace the P2M leaf with a p2m_missing or p2m_identity.
- * Stick the old page in the new P2M tree location.
- */
-static bool __init early_can_reuse_p2m_middle(unsigned long set_pfn)
-{
- unsigned topidx;
- unsigned mididx;
- unsigned ident_pfns;
- unsigned inv_pfns;
- unsigned long *p2m;
- unsigned idx;
- unsigned long pfn;
-
- /* We only look when this entails a P2M middle layer */
- if (p2m_index(set_pfn))
- return false;
-
- for (pfn = 0; pfn < MAX_DOMAIN_PAGES; pfn += P2M_PER_PAGE) {
- topidx = p2m_top_index(pfn);
-
- if (!p2m_top[topidx])
- continue;
-
- if (p2m_top[topidx] == p2m_mid_missing)
- continue;
-
- mididx = p2m_mid_index(pfn);
- p2m = p2m_top[topidx][mididx];
- if (!p2m)
- continue;
-
- if ((p2m == p2m_missing) || (p2m == p2m_identity))
- continue;
-
- if ((unsigned long)p2m == INVALID_P2M_ENTRY)
- continue;
-
- ident_pfns = 0;
- inv_pfns = 0;
- for (idx = 0; idx < P2M_PER_PAGE; idx++) {
- /* IDENTITY_PFNs are 1:1 */
- if (p2m[idx] == IDENTITY_FRAME(pfn + idx))
- ident_pfns++;
- else if (p2m[idx] == INVALID_P2M_ENTRY)
- inv_pfns++;
- else
- break;
+ if (pte_pfn(*ptep) == p2m_pfn) {
+ set_pte(ptep,
+ pfn_pte(PFN_DOWN(__pa(p2m)), PAGE_KERNEL));
+ if (mid_mfn)
+ mid_mfn[mididx] = virt_to_mfn(p2m);
+ p2m = NULL;
}
- if ((ident_pfns == P2M_PER_PAGE) || (inv_pfns == P2M_PER_PAGE))
- goto found;
- }
- return false;
-found:
- /* Found one, replace old with p2m_identity or p2m_missing */
- p2m_top[topidx][mididx] = (ident_pfns ? p2m_identity : p2m_missing);
-
- /* Reset where we want to stick the old page in. */
- topidx = p2m_top_index(set_pfn);
- mididx = p2m_mid_index(set_pfn);
-
- /* This shouldn't happen */
- if (WARN_ON(p2m_top[topidx] == p2m_mid_missing))
- early_alloc_p2m_middle(set_pfn);
-
- if (WARN_ON(p2m_top[topidx][mididx] != p2m_missing))
- return false;
-
- p2m_init(p2m);
- p2m_top[topidx][mididx] = p2m;
- return true;
-}
-bool __init early_set_phys_to_machine(unsigned long pfn, unsigned long mfn)
-{
- if (unlikely(!__set_phys_to_machine(pfn, mfn))) {
- if (!early_alloc_p2m_middle(pfn))
- return false;
-
- if (early_can_reuse_p2m_middle(pfn))
- return __set_phys_to_machine(pfn, mfn);
-
- if (!early_alloc_p2m(pfn, false /* boundary crossover OK!*/))
- return false;
+ spin_unlock_irqrestore(&p2m_update_lock, flags);
- if (!__set_phys_to_machine(pfn, mfn))
- return false;
+ if (p2m)
+ free_p2m_page(p2m);
}
return true;
}
-static void __init early_split_p2m(unsigned long pfn)
-{
- unsigned long mididx, idx;
-
- mididx = p2m_mid_index(pfn);
- idx = p2m_index(pfn);
-
- /*
- * Allocate new middle and leaf pages if this pfn lies in the
- * middle of one.
- */
- if (mididx || idx)
- early_alloc_p2m_middle(pfn);
- if (idx)
- early_alloc_p2m(pfn, false);
-}
-
unsigned long __init set_phys_range_identity(unsigned long pfn_s,
unsigned long pfn_e)
{
unsigned long pfn;
- if (unlikely(pfn_s >= MAX_P2M_PFN))
+ if (unlikely(pfn_s >= xen_p2m_size))
return 0;
if (unlikely(xen_feature(XENFEAT_auto_translated_physmap)))
@@ -774,101 +602,51 @@ unsigned long __init set_phys_range_identity(unsigned long pfn_s,
if (pfn_s > pfn_e)
return 0;
- if (pfn_e > MAX_P2M_PFN)
- pfn_e = MAX_P2M_PFN;
-
- early_split_p2m(pfn_s);
- early_split_p2m(pfn_e);
-
- for (pfn = pfn_s; pfn < pfn_e;) {
- unsigned topidx = p2m_top_index(pfn);
- unsigned mididx = p2m_mid_index(pfn);
-
- if (!__set_phys_to_machine(pfn, IDENTITY_FRAME(pfn)))
- break;
- pfn++;
-
- /*
- * If the PFN was set to a middle or leaf identity
- * page the remainder must also be identity, so skip
- * ahead to the next middle or leaf entry.
- */
- if (p2m_top[topidx] == p2m_mid_identity)
- pfn = ALIGN(pfn, P2M_MID_PER_PAGE * P2M_PER_PAGE);
- else if (p2m_top[topidx][mididx] == p2m_identity)
- pfn = ALIGN(pfn, P2M_PER_PAGE);
- }
+ if (pfn_e > xen_p2m_size)
+ pfn_e = xen_p2m_size;
- WARN((pfn - pfn_s) != (pfn_e - pfn_s),
- "Identity mapping failed. We are %ld short of 1-1 mappings!\n",
- (pfn_e - pfn_s) - (pfn - pfn_s));
+ for (pfn = pfn_s; pfn < pfn_e; pfn++)
+ xen_p2m_addr[pfn] = IDENTITY_FRAME(pfn);
return pfn - pfn_s;
}
-/* Try to install p2m mapping; fail if intermediate bits missing */
bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
{
- unsigned topidx, mididx, idx;
+ pte_t *ptep;
+ unsigned int level;
/* don't track P2M changes in autotranslate guests */
if (unlikely(xen_feature(XENFEAT_auto_translated_physmap)))
return true;
- if (unlikely(pfn >= MAX_P2M_PFN)) {
+ if (unlikely(pfn >= xen_p2m_size)) {
BUG_ON(mfn != INVALID_P2M_ENTRY);
return true;
}
- topidx = p2m_top_index(pfn);
- mididx = p2m_mid_index(pfn);
- idx = p2m_index(pfn);
-
- /* For sparse holes were the p2m leaf has real PFN along with
- * PCI holes, stick in the PFN as the MFN value.
- *
- * set_phys_range_identity() will have allocated new middle
- * and leaf pages as required so an existing p2m_mid_missing
- * or p2m_missing mean that whole range will be identity so
- * these can be switched to p2m_mid_identity or p2m_identity.
- */
- if (mfn != INVALID_P2M_ENTRY && (mfn & IDENTITY_FRAME_BIT)) {
- if (p2m_top[topidx] == p2m_mid_identity)
- return true;
-
- if (p2m_top[topidx] == p2m_mid_missing) {
- WARN_ON(cmpxchg(&p2m_top[topidx], p2m_mid_missing,
- p2m_mid_identity) != p2m_mid_missing);
- return true;
- }
-
- if (p2m_top[topidx][mididx] == p2m_identity)
- return true;
+ if (likely(!xen_safe_write_ulong(xen_p2m_addr + pfn, mfn)))
+ return true;
- /* Swap over from MISSING to IDENTITY if needed. */
- if (p2m_top[topidx][mididx] == p2m_missing) {
- WARN_ON(cmpxchg(&p2m_top[topidx][mididx], p2m_missing,
- p2m_identity) != p2m_missing);
- return true;
- }
- }
+ ptep = lookup_address((unsigned long)(xen_p2m_addr + pfn), &level);
+ BUG_ON(!ptep || level != PG_LEVEL_4K);
- if (p2m_top[topidx][mididx] == p2m_missing)
+ if (pte_pfn(*ptep) == PFN_DOWN(__pa(p2m_missing)))
return mfn == INVALID_P2M_ENTRY;
- p2m_top[topidx][mididx][idx] = mfn;
+ if (pte_pfn(*ptep) == PFN_DOWN(__pa(p2m_identity)))
+ return mfn == IDENTITY_FRAME(pfn);
- return true;
+ return false;
}
bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
{
- if (unlikely(!__set_phys_to_machine(pfn, mfn))) {
+ if (unlikely(!__set_phys_to_machine(pfn, mfn))) {
if (!alloc_p2m(pfn))
return false;
- if (!__set_phys_to_machine(pfn, mfn))
- return false;
+ return __set_phys_to_machine(pfn, mfn);
}
return true;
@@ -877,15 +655,16 @@ bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
#define M2P_OVERRIDE_HASH_SHIFT 10
#define M2P_OVERRIDE_HASH (1 << M2P_OVERRIDE_HASH_SHIFT)
-static RESERVE_BRK_ARRAY(struct list_head, m2p_overrides, M2P_OVERRIDE_HASH);
+static struct list_head *m2p_overrides;
static DEFINE_SPINLOCK(m2p_override_lock);
static void __init m2p_override_init(void)
{
unsigned i;
- m2p_overrides = extend_brk(sizeof(*m2p_overrides) * M2P_OVERRIDE_HASH,
- sizeof(unsigned long));
+ m2p_overrides = alloc_bootmem_align(
+ sizeof(*m2p_overrides) * M2P_OVERRIDE_HASH,
+ sizeof(unsigned long));
for (i = 0; i < M2P_OVERRIDE_HASH; i++)
INIT_LIST_HEAD(&m2p_overrides[i]);
@@ -896,68 +675,9 @@ static unsigned long mfn_hash(unsigned long mfn)
return hash_long(mfn, M2P_OVERRIDE_HASH_SHIFT);
}
-int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
- struct gnttab_map_grant_ref *kmap_ops,
- struct page **pages, unsigned int count)
-{
- int i, ret = 0;
- bool lazy = false;
- pte_t *pte;
-
- if (xen_feature(XENFEAT_auto_translated_physmap))
- return 0;
-
- if (kmap_ops &&
- !in_interrupt() &&
- paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
- arch_enter_lazy_mmu_mode();
- lazy = true;
- }
-
- for (i = 0; i < count; i++) {
- unsigned long mfn, pfn;
-
- /* Do not add to override if the map failed. */
- if (map_ops[i].status)
- continue;
-
- if (map_ops[i].flags & GNTMAP_contains_pte) {
- pte = (pte_t *) (mfn_to_virt(PFN_DOWN(map_ops[i].host_addr)) +
- (map_ops[i].host_addr & ~PAGE_MASK));
- mfn = pte_mfn(*pte);
- } else {
- mfn = PFN_DOWN(map_ops[i].dev_bus_addr);
- }
- pfn = page_to_pfn(pages[i]);
-
- WARN_ON(PagePrivate(pages[i]));
- SetPagePrivate(pages[i]);
- set_page_private(pages[i], mfn);
- pages[i]->index = pfn_to_mfn(pfn);
-
- if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)))) {
- ret = -ENOMEM;
- goto out;
- }
-
- if (kmap_ops) {
- ret = m2p_add_override(mfn, pages[i], &kmap_ops[i]);
- if (ret)
- goto out;
- }
- }
-
-out:
- if (lazy)
- arch_leave_lazy_mmu_mode();
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(set_foreign_p2m_mapping);
-
/* Add an MFN override for a particular page */
-int m2p_add_override(unsigned long mfn, struct page *page,
- struct gnttab_map_grant_ref *kmap_op)
+static int m2p_add_override(unsigned long mfn, struct page *page,
+ struct gnttab_map_grant_ref *kmap_op)
{
unsigned long flags;
unsigned long pfn;
@@ -970,7 +690,7 @@ int m2p_add_override(unsigned long mfn, struct page *page,
address = (unsigned long)__va(pfn << PAGE_SHIFT);
ptep = lookup_address(address, &level);
if (WARN(ptep == NULL || level != PG_LEVEL_4K,
- "m2p_add_override: pfn %lx not mapped", pfn))
+ "m2p_add_override: pfn %lx not mapped", pfn))
return -EINVAL;
}
@@ -1004,19 +724,19 @@ int m2p_add_override(unsigned long mfn, struct page *page,
* because mfn_to_pfn (that ends up being called by GUPF) will
* return the backend pfn rather than the frontend pfn. */
pfn = mfn_to_pfn_no_overrides(mfn);
- if (get_phys_to_machine(pfn) == mfn)
+ if (__pfn_to_mfn(pfn) == mfn)
set_phys_to_machine(pfn, FOREIGN_FRAME(mfn));
return 0;
}
-EXPORT_SYMBOL_GPL(m2p_add_override);
-int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
- struct gnttab_map_grant_ref *kmap_ops,
- struct page **pages, unsigned int count)
+int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
+ struct gnttab_map_grant_ref *kmap_ops,
+ struct page **pages, unsigned int count)
{
int i, ret = 0;
bool lazy = false;
+ pte_t *pte;
if (xen_feature(XENFEAT_auto_translated_physmap))
return 0;
@@ -1029,35 +749,75 @@ int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
}
for (i = 0; i < count; i++) {
- unsigned long mfn = get_phys_to_machine(page_to_pfn(pages[i]));
- unsigned long pfn = page_to_pfn(pages[i]);
+ unsigned long mfn, pfn;
- if (mfn == INVALID_P2M_ENTRY || !(mfn & FOREIGN_FRAME_BIT)) {
- ret = -EINVAL;
- goto out;
+ /* Do not add to override if the map failed. */
+ if (map_ops[i].status)
+ continue;
+
+ if (map_ops[i].flags & GNTMAP_contains_pte) {
+ pte = (pte_t *)(mfn_to_virt(PFN_DOWN(map_ops[i].host_addr)) +
+ (map_ops[i].host_addr & ~PAGE_MASK));
+ mfn = pte_mfn(*pte);
+ } else {
+ mfn = PFN_DOWN(map_ops[i].dev_bus_addr);
}
+ pfn = page_to_pfn(pages[i]);
- set_page_private(pages[i], INVALID_P2M_ENTRY);
- WARN_ON(!PagePrivate(pages[i]));
- ClearPagePrivate(pages[i]);
- set_phys_to_machine(pfn, pages[i]->index);
+ WARN_ON(PagePrivate(pages[i]));
+ SetPagePrivate(pages[i]);
+ set_page_private(pages[i], mfn);
+ pages[i]->index = pfn_to_mfn(pfn);
- if (kmap_ops)
- ret = m2p_remove_override(pages[i], &kmap_ops[i], mfn);
- if (ret)
+ if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)))) {
+ ret = -ENOMEM;
goto out;
+ }
+
+ if (kmap_ops) {
+ ret = m2p_add_override(mfn, pages[i], &kmap_ops[i]);
+ if (ret)
+ goto out;
+ }
}
out:
if (lazy)
arch_leave_lazy_mmu_mode();
+
return ret;
}
-EXPORT_SYMBOL_GPL(clear_foreign_p2m_mapping);
+EXPORT_SYMBOL_GPL(set_foreign_p2m_mapping);
-int m2p_remove_override(struct page *page,
- struct gnttab_map_grant_ref *kmap_op,
- unsigned long mfn)
+static struct page *m2p_find_override(unsigned long mfn)
+{
+ unsigned long flags;
+ struct list_head *bucket;
+ struct page *p, *ret;
+
+ if (unlikely(!m2p_overrides))
+ return NULL;
+
+ ret = NULL;
+ bucket = &m2p_overrides[mfn_hash(mfn)];
+
+ spin_lock_irqsave(&m2p_override_lock, flags);
+
+ list_for_each_entry(p, bucket, lru) {
+ if (page_private(p) == mfn) {
+ ret = p;
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&m2p_override_lock, flags);
+
+ return ret;
+}
+
+static int m2p_remove_override(struct page *page,
+ struct gnttab_map_grant_ref *kmap_op,
+ unsigned long mfn)
{
unsigned long flags;
unsigned long pfn;
@@ -1072,7 +832,7 @@ int m2p_remove_override(struct page *page,
ptep = lookup_address(address, &level);
if (WARN(ptep == NULL || level != PG_LEVEL_4K,
- "m2p_remove_override: pfn %lx not mapped", pfn))
+ "m2p_remove_override: pfn %lx not mapped", pfn))
return -EINVAL;
}
@@ -1102,9 +862,8 @@ int m2p_remove_override(struct page *page,
* hypercall actually returned an error.
*/
if (kmap_op->handle == GNTST_general_error) {
- printk(KERN_WARNING "m2p_remove_override: "
- "pfn %lx mfn %lx, failed to modify kernel mappings",
- pfn, mfn);
+ pr_warn("m2p_remove_override: pfn %lx mfn %lx, failed to modify kernel mappings",
+ pfn, mfn);
put_balloon_scratch_page();
return -1;
}
@@ -1112,14 +871,14 @@ int m2p_remove_override(struct page *page,
xen_mc_batch();
mcs = __xen_mc_entry(
- sizeof(struct gnttab_unmap_and_replace));
+ sizeof(struct gnttab_unmap_and_replace));
unmap_op = mcs.args;
unmap_op->host_addr = kmap_op->host_addr;
unmap_op->new_addr = scratch_page_address;
unmap_op->handle = kmap_op->handle;
MULTI_grant_table_op(mcs.mc,
- GNTTABOP_unmap_and_replace, unmap_op, 1);
+ GNTTABOP_unmap_and_replace, unmap_op, 1);
mcs = __xen_mc_entry(0);
MULTI_update_va_mapping(mcs.mc, scratch_page_address,
@@ -1145,35 +904,56 @@ int m2p_remove_override(struct page *page,
* pfn again. */
mfn &= ~FOREIGN_FRAME_BIT;
pfn = mfn_to_pfn_no_overrides(mfn);
- if (get_phys_to_machine(pfn) == FOREIGN_FRAME(mfn) &&
+ if (__pfn_to_mfn(pfn) == FOREIGN_FRAME(mfn) &&
m2p_find_override(mfn) == NULL)
set_phys_to_machine(pfn, mfn);
return 0;
}
-EXPORT_SYMBOL_GPL(m2p_remove_override);
-struct page *m2p_find_override(unsigned long mfn)
+int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
+ struct gnttab_map_grant_ref *kmap_ops,
+ struct page **pages, unsigned int count)
{
- unsigned long flags;
- struct list_head *bucket = &m2p_overrides[mfn_hash(mfn)];
- struct page *p, *ret;
+ int i, ret = 0;
+ bool lazy = false;
- ret = NULL;
+ if (xen_feature(XENFEAT_auto_translated_physmap))
+ return 0;
- spin_lock_irqsave(&m2p_override_lock, flags);
+ if (kmap_ops &&
+ !in_interrupt() &&
+ paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
+ arch_enter_lazy_mmu_mode();
+ lazy = true;
+ }
- list_for_each_entry(p, bucket, lru) {
- if (page_private(p) == mfn) {
- ret = p;
- break;
+ for (i = 0; i < count; i++) {
+ unsigned long mfn = __pfn_to_mfn(page_to_pfn(pages[i]));
+ unsigned long pfn = page_to_pfn(pages[i]);
+
+ if (mfn == INVALID_P2M_ENTRY || !(mfn & FOREIGN_FRAME_BIT)) {
+ ret = -EINVAL;
+ goto out;
}
- }
- spin_unlock_irqrestore(&m2p_override_lock, flags);
+ set_page_private(pages[i], INVALID_P2M_ENTRY);
+ WARN_ON(!PagePrivate(pages[i]));
+ ClearPagePrivate(pages[i]);
+ set_phys_to_machine(pfn, pages[i]->index);
+
+ if (kmap_ops)
+ ret = m2p_remove_override(pages[i], &kmap_ops[i], mfn);
+ if (ret)
+ goto out;
+ }
+out:
+ if (lazy)
+ arch_leave_lazy_mmu_mode();
return ret;
}
+EXPORT_SYMBOL_GPL(clear_foreign_p2m_mapping);
unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn)
{
@@ -1192,79 +972,29 @@ EXPORT_SYMBOL_GPL(m2p_find_override_pfn);
#include "debugfs.h"
static int p2m_dump_show(struct seq_file *m, void *v)
{
- static const char * const level_name[] = { "top", "middle",
- "entry", "abnormal", "error"};
-#define TYPE_IDENTITY 0
-#define TYPE_MISSING 1
-#define TYPE_PFN 2
-#define TYPE_UNKNOWN 3
static const char * const type_name[] = {
- [TYPE_IDENTITY] = "identity",
- [TYPE_MISSING] = "missing",
- [TYPE_PFN] = "pfn",
- [TYPE_UNKNOWN] = "abnormal"};
- unsigned long pfn, prev_pfn_type = 0, prev_pfn_level = 0;
- unsigned int uninitialized_var(prev_level);
- unsigned int uninitialized_var(prev_type);
-
- if (!p2m_top)
- return 0;
-
- for (pfn = 0; pfn < MAX_DOMAIN_PAGES; pfn++) {
- unsigned topidx = p2m_top_index(pfn);
- unsigned mididx = p2m_mid_index(pfn);
- unsigned idx = p2m_index(pfn);
- unsigned lvl, type;
-
- lvl = 4;
- type = TYPE_UNKNOWN;
- if (p2m_top[topidx] == p2m_mid_missing) {
- lvl = 0; type = TYPE_MISSING;
- } else if (p2m_top[topidx] == NULL) {
- lvl = 0; type = TYPE_UNKNOWN;
- } else if (p2m_top[topidx][mididx] == NULL) {
- lvl = 1; type = TYPE_UNKNOWN;
- } else if (p2m_top[topidx][mididx] == p2m_identity) {
- lvl = 1; type = TYPE_IDENTITY;
- } else if (p2m_top[topidx][mididx] == p2m_missing) {
- lvl = 1; type = TYPE_MISSING;
- } else if (p2m_top[topidx][mididx][idx] == 0) {
- lvl = 2; type = TYPE_UNKNOWN;
- } else if (p2m_top[topidx][mididx][idx] == IDENTITY_FRAME(pfn)) {
- lvl = 2; type = TYPE_IDENTITY;
- } else if (p2m_top[topidx][mididx][idx] == INVALID_P2M_ENTRY) {
- lvl = 2; type = TYPE_MISSING;
- } else if (p2m_top[topidx][mididx][idx] == pfn) {
- lvl = 2; type = TYPE_PFN;
- } else if (p2m_top[topidx][mididx][idx] != pfn) {
- lvl = 2; type = TYPE_PFN;
- }
- if (pfn == 0) {
- prev_level = lvl;
- prev_type = type;
- }
- if (pfn == MAX_DOMAIN_PAGES-1) {
- lvl = 3;
- type = TYPE_UNKNOWN;
- }
- if (prev_type != type) {
- seq_printf(m, " [0x%lx->0x%lx] %s\n",
- prev_pfn_type, pfn, type_name[prev_type]);
- prev_pfn_type = pfn;
+ [P2M_TYPE_IDENTITY] = "identity",
+ [P2M_TYPE_MISSING] = "missing",
+ [P2M_TYPE_PFN] = "pfn",
+ [P2M_TYPE_UNKNOWN] = "abnormal"};
+ unsigned long pfn, first_pfn;
+ int type, prev_type;
+
+ prev_type = xen_p2m_elem_type(0);
+ first_pfn = 0;
+
+ for (pfn = 0; pfn < xen_p2m_size; pfn++) {
+ type = xen_p2m_elem_type(pfn);
+ if (type != prev_type) {
+ seq_printf(m, " [0x%lx->0x%lx] %s\n", first_pfn, pfn,
+ type_name[prev_type]);
prev_type = type;
- }
- if (prev_level != lvl) {
- seq_printf(m, " [0x%lx->0x%lx] level %s\n",
- prev_pfn_level, pfn, level_name[prev_level]);
- prev_pfn_level = pfn;
- prev_level = lvl;
+ first_pfn = pfn;
}
}
+ seq_printf(m, " [0x%lx->0x%lx] %s\n", first_pfn, pfn,
+ type_name[prev_type]);
return 0;
-#undef TYPE_IDENTITY
-#undef TYPE_MISSING
-#undef TYPE_PFN
-#undef TYPE_UNKNOWN
}
static int p2m_dump_open(struct inode *inode, struct file *filp)
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 29834b3fd87..dfd77dec8e2 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -30,6 +30,7 @@
#include "xen-ops.h"
#include "vdso.h"
#include "p2m.h"
+#include "mmu.h"
/* These are code, but not functions. Defined in entry.S */
extern const char xen_hypervisor_callback[];
@@ -47,8 +48,19 @@ struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
/* Number of pages released from the initial allocation. */
unsigned long xen_released_pages;
-/* Buffer used to remap identity mapped pages */
-unsigned long xen_remap_buf[P2M_PER_PAGE] __initdata;
+/*
+ * Buffer used to remap identity mapped pages. We only need the virtual space.
+ * The physical page behind this address is remapped as needed to different
+ * buffer pages.
+ */
+#define REMAP_SIZE (P2M_PER_PAGE - 3)
+static struct {
+ unsigned long next_area_mfn;
+ unsigned long target_pfn;
+ unsigned long size;
+ unsigned long mfns[REMAP_SIZE];
+} xen_remap_buf __initdata __aligned(PAGE_SIZE);
+static unsigned long xen_remap_mfn __initdata = INVALID_P2M_ENTRY;
/*
* The maximum amount of extra memory compared to the base size. The
@@ -64,7 +76,6 @@ unsigned long xen_remap_buf[P2M_PER_PAGE] __initdata;
static void __init xen_add_extra_mem(u64 start, u64 size)
{
- unsigned long pfn;
int i;
for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
@@ -84,75 +95,76 @@ static void __init xen_add_extra_mem(u64 start, u64 size)
printk(KERN_WARNING "Warning: not enough extra memory regions\n");
memblock_reserve(start, size);
+}
- xen_max_p2m_pfn = PFN_DOWN(start + size);
- for (pfn = PFN_DOWN(start); pfn < xen_max_p2m_pfn; pfn++) {
- unsigned long mfn = pfn_to_mfn(pfn);
-
- if (WARN_ONCE(mfn == pfn, "Trying to over-write 1-1 mapping (pfn: %lx)\n", pfn))
- continue;
- WARN_ONCE(mfn != INVALID_P2M_ENTRY, "Trying to remove %lx which has %lx mfn!\n",
- pfn, mfn);
+static void __init xen_del_extra_mem(u64 start, u64 size)
+{
+ int i;
+ u64 start_r, size_r;
- __set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
+ for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
+ start_r = xen_extra_mem[i].start;
+ size_r = xen_extra_mem[i].size;
+
+ /* Start of region. */
+ if (start_r == start) {
+ BUG_ON(size > size_r);
+ xen_extra_mem[i].start += size;
+ xen_extra_mem[i].size -= size;
+ break;
+ }
+ /* End of region. */
+ if (start_r + size_r == start + size) {
+ BUG_ON(size > size_r);
+ xen_extra_mem[i].size -= size;
+ break;
+ }
+ /* Mid of region. */
+ if (start > start_r && start < start_r + size_r) {
+ BUG_ON(start + size > start_r + size_r);
+ xen_extra_mem[i].size = start - start_r;
+ /* Calling memblock_reserve() again is okay. */
+ xen_add_extra_mem(start + size, start_r + size_r -
+ (start + size));
+ break;
+ }
}
+ memblock_free(start, size);
}
-static unsigned long __init xen_do_chunk(unsigned long start,
- unsigned long end, bool release)
+/*
+ * Called during boot before the p2m list can take entries beyond the
+ * hypervisor supplied p2m list. Entries in extra mem are to be regarded as
+ * invalid.
+ */
+unsigned long __ref xen_chk_extra_mem(unsigned long pfn)
{
- struct xen_memory_reservation reservation = {
- .address_bits = 0,
- .extent_order = 0,
- .domid = DOMID_SELF
- };
- unsigned long len = 0;
- unsigned long pfn;
- int ret;
+ int i;
+ unsigned long addr = PFN_PHYS(pfn);
- for (pfn = start; pfn < end; pfn++) {
- unsigned long frame;
- unsigned long mfn = pfn_to_mfn(pfn);
+ for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
+ if (addr >= xen_extra_mem[i].start &&
+ addr < xen_extra_mem[i].start + xen_extra_mem[i].size)
+ return INVALID_P2M_ENTRY;
+ }
- if (release) {
- /* Make sure pfn exists to start with */
- if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn)
- continue;
- frame = mfn;
- } else {
- if (mfn != INVALID_P2M_ENTRY)
- continue;
- frame = pfn;
- }
- set_xen_guest_handle(reservation.extent_start, &frame);
- reservation.nr_extents = 1;
+ return IDENTITY_FRAME(pfn);
+}
- ret = HYPERVISOR_memory_op(release ? XENMEM_decrease_reservation : XENMEM_populate_physmap,
- &reservation);
- WARN(ret != 1, "Failed to %s pfn %lx err=%d\n",
- release ? "release" : "populate", pfn, ret);
+/*
+ * Mark all pfns of extra mem as invalid in p2m list.
+ */
+void __init xen_inv_extra_mem(void)
+{
+ unsigned long pfn, pfn_s, pfn_e;
+ int i;
- if (ret == 1) {
- if (!early_set_phys_to_machine(pfn, release ? INVALID_P2M_ENTRY : frame)) {
- if (release)
- break;
- set_xen_guest_handle(reservation.extent_start, &frame);
- reservation.nr_extents = 1;
- ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
- &reservation);
- break;
- }
- len++;
- } else
- break;
+ for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
+ pfn_s = PFN_DOWN(xen_extra_mem[i].start);
+ pfn_e = PFN_UP(xen_extra_mem[i].start + xen_extra_mem[i].size);
+ for (pfn = pfn_s; pfn < pfn_e; pfn++)
+ set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
}
- if (len)
- printk(KERN_INFO "%s %lx-%lx pfn range: %lu pages %s\n",
- release ? "Freeing" : "Populating",
- start, end, len,
- release ? "freed" : "added");
-
- return len;
}
/*
@@ -198,26 +210,62 @@ static unsigned long __init xen_find_pfn_range(
return done;
}
+static int __init xen_free_mfn(unsigned long mfn)
+{
+ struct xen_memory_reservation reservation = {
+ .address_bits = 0,
+ .extent_order = 0,
+ .domid = DOMID_SELF
+ };
+
+ set_xen_guest_handle(reservation.extent_start, &mfn);
+ reservation.nr_extents = 1;
+
+ return HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
+}
+
/*
- * This releases a chunk of memory and then does the identity map. It's used as
+ * This releases a chunk of memory and then does the identity map. It's used
* as a fallback if the remapping fails.
*/
static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn,
unsigned long end_pfn, unsigned long nr_pages, unsigned long *identity,
unsigned long *released)
{
+ unsigned long len = 0;
+ unsigned long pfn, end;
+ int ret;
+
WARN_ON(start_pfn > end_pfn);
+ end = min(end_pfn, nr_pages);
+ for (pfn = start_pfn; pfn < end; pfn++) {
+ unsigned long mfn = pfn_to_mfn(pfn);
+
+ /* Make sure pfn exists to start with */
+ if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn)
+ continue;
+
+ ret = xen_free_mfn(mfn);
+ WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret);
+
+ if (ret == 1) {
+ if (!__set_phys_to_machine(pfn, INVALID_P2M_ENTRY))
+ break;
+ len++;
+ } else
+ break;
+ }
+
/* Need to release pages first */
- *released += xen_do_chunk(start_pfn, min(end_pfn, nr_pages), true);
+ *released += len;
*identity += set_phys_range_identity(start_pfn, end_pfn);
}
/*
- * Helper function to update both the p2m and m2p tables.
+ * Helper function to update the p2m and m2p tables and kernel mapping.
*/
-static unsigned long __init xen_update_mem_tables(unsigned long pfn,
- unsigned long mfn)
+static void __init xen_update_mem_tables(unsigned long pfn, unsigned long mfn)
{
struct mmu_update update = {
.ptr = ((unsigned long long)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE,
@@ -225,161 +273,88 @@ static unsigned long __init xen_update_mem_tables(unsigned long pfn,
};
/* Update p2m */
- if (!early_set_phys_to_machine(pfn, mfn)) {
+ if (!set_phys_to_machine(pfn, mfn)) {
WARN(1, "Failed to set p2m mapping for pfn=%ld mfn=%ld\n",
pfn, mfn);
- return false;
+ BUG();
}
/* Update m2p */
if (HYPERVISOR_mmu_update(&update, 1, NULL, DOMID_SELF) < 0) {
WARN(1, "Failed to set m2p mapping for mfn=%ld pfn=%ld\n",
mfn, pfn);
- return false;
+ BUG();
}
- return true;
+ /* Update kernel mapping, but not for highmem. */
+ if ((pfn << PAGE_SHIFT) >= __pa(high_memory))
+ return;
+
+ if (HYPERVISOR_update_va_mapping((unsigned long)__va(pfn << PAGE_SHIFT),
+ mfn_pte(mfn, PAGE_KERNEL), 0)) {
+ WARN(1, "Failed to update kernel mapping for mfn=%ld pfn=%ld\n",
+ mfn, pfn);
+ BUG();
+ }
}
/*
* This function updates the p2m and m2p tables with an identity map from
- * start_pfn to start_pfn+size and remaps the underlying RAM of the original
- * allocation at remap_pfn. It must do so carefully in P2M_PER_PAGE sized blocks
- * to not exhaust the reserved brk space. Doing it in properly aligned blocks
- * ensures we only allocate the minimum required leaf pages in the p2m table. It
- * copies the existing mfns from the p2m table under the 1:1 map, overwrites
- * them with the identity map and then updates the p2m and m2p tables with the
- * remapped memory.
+ * start_pfn to start_pfn+size and prepares remapping the underlying RAM of the
+ * original allocation at remap_pfn. The information needed for remapping is
+ * saved in the memory itself to avoid the need for allocating buffers. The
+ * complete remap information is contained in a list of MFNs each containing
+ * up to REMAP_SIZE MFNs and the start target PFN for doing the remap.
+ * This enables us to preserve the original mfn sequence while doing the
+ * remapping at a time when the memory management is capable of allocating
+ * virtual and physical memory in arbitrary amounts, see 'xen_remap_memory' and
+ * its callers.
*/
-static unsigned long __init xen_do_set_identity_and_remap_chunk(
+static void __init xen_do_set_identity_and_remap_chunk(
unsigned long start_pfn, unsigned long size, unsigned long remap_pfn)
{
+ unsigned long buf = (unsigned long)&xen_remap_buf;
+ unsigned long mfn_save, mfn;
unsigned long ident_pfn_iter, remap_pfn_iter;
- unsigned long ident_start_pfn_align, remap_start_pfn_align;
- unsigned long ident_end_pfn_align, remap_end_pfn_align;
- unsigned long ident_boundary_pfn, remap_boundary_pfn;
- unsigned long ident_cnt = 0;
- unsigned long remap_cnt = 0;
+ unsigned long ident_end_pfn = start_pfn + size;
unsigned long left = size;
- unsigned long mod;
- int i;
+ unsigned long ident_cnt = 0;
+ unsigned int i, chunk;
WARN_ON(size == 0);
BUG_ON(xen_feature(XENFEAT_auto_translated_physmap));
- /*
- * Determine the proper alignment to remap memory in P2M_PER_PAGE sized
- * blocks. We need to keep track of both the existing pfn mapping and
- * the new pfn remapping.
- */
- mod = start_pfn % P2M_PER_PAGE;
- ident_start_pfn_align =
- mod ? (start_pfn - mod + P2M_PER_PAGE) : start_pfn;
- mod = remap_pfn % P2M_PER_PAGE;
- remap_start_pfn_align =
- mod ? (remap_pfn - mod + P2M_PER_PAGE) : remap_pfn;
- mod = (start_pfn + size) % P2M_PER_PAGE;
- ident_end_pfn_align = start_pfn + size - mod;
- mod = (remap_pfn + size) % P2M_PER_PAGE;
- remap_end_pfn_align = remap_pfn + size - mod;
-
- /* Iterate over each p2m leaf node in each range */
- for (ident_pfn_iter = ident_start_pfn_align, remap_pfn_iter = remap_start_pfn_align;
- ident_pfn_iter < ident_end_pfn_align && remap_pfn_iter < remap_end_pfn_align;
- ident_pfn_iter += P2M_PER_PAGE, remap_pfn_iter += P2M_PER_PAGE) {
- /* Check we aren't past the end */
- BUG_ON(ident_pfn_iter + P2M_PER_PAGE > start_pfn + size);
- BUG_ON(remap_pfn_iter + P2M_PER_PAGE > remap_pfn + size);
-
- /* Save p2m mappings */
- for (i = 0; i < P2M_PER_PAGE; i++)
- xen_remap_buf[i] = pfn_to_mfn(ident_pfn_iter + i);
-
- /* Set identity map which will free a p2m leaf */
- ident_cnt += set_phys_range_identity(ident_pfn_iter,
- ident_pfn_iter + P2M_PER_PAGE);
+ mfn_save = virt_to_mfn(buf);
-#ifdef DEBUG
- /* Helps verify a p2m leaf has been freed */
- for (i = 0; i < P2M_PER_PAGE; i++) {
- unsigned int pfn = ident_pfn_iter + i;
- BUG_ON(pfn_to_mfn(pfn) != pfn);
- }
-#endif
- /* Now remap memory */
- for (i = 0; i < P2M_PER_PAGE; i++) {
- unsigned long mfn = xen_remap_buf[i];
-
- /* This will use the p2m leaf freed above */
- if (!xen_update_mem_tables(remap_pfn_iter + i, mfn)) {
- WARN(1, "Failed to update mem mapping for pfn=%ld mfn=%ld\n",
- remap_pfn_iter + i, mfn);
- return 0;
- }
-
- remap_cnt++;
- }
-
- left -= P2M_PER_PAGE;
- }
-
- /* Max boundary space possible */
- BUG_ON(left > (P2M_PER_PAGE - 1) * 2);
+ for (ident_pfn_iter = start_pfn, remap_pfn_iter = remap_pfn;
+ ident_pfn_iter < ident_end_pfn;
+ ident_pfn_iter += REMAP_SIZE, remap_pfn_iter += REMAP_SIZE) {
+ chunk = (left < REMAP_SIZE) ? left : REMAP_SIZE;
- /* Now handle the boundary conditions */
- ident_boundary_pfn = start_pfn;
- remap_boundary_pfn = remap_pfn;
- for (i = 0; i < left; i++) {
- unsigned long mfn;
+ /* Map first pfn to xen_remap_buf */
+ mfn = pfn_to_mfn(ident_pfn_iter);
+ set_pte_mfn(buf, mfn, PAGE_KERNEL);
- /* These two checks move from the start to end boundaries */
- if (ident_boundary_pfn == ident_start_pfn_align)
- ident_boundary_pfn = ident_pfn_iter;
- if (remap_boundary_pfn == remap_start_pfn_align)
- remap_boundary_pfn = remap_pfn_iter;
+ /* Save mapping information in page */
+ xen_remap_buf.next_area_mfn = xen_remap_mfn;
+ xen_remap_buf.target_pfn = remap_pfn_iter;
+ xen_remap_buf.size = chunk;
+ for (i = 0; i < chunk; i++)
+ xen_remap_buf.mfns[i] = pfn_to_mfn(ident_pfn_iter + i);
- /* Check we aren't past the end */
- BUG_ON(ident_boundary_pfn >= start_pfn + size);
- BUG_ON(remap_boundary_pfn >= remap_pfn + size);
-
- mfn = pfn_to_mfn(ident_boundary_pfn);
-
- if (!xen_update_mem_tables(remap_boundary_pfn, mfn)) {
- WARN(1, "Failed to update mem mapping for pfn=%ld mfn=%ld\n",
- remap_pfn_iter + i, mfn);
- return 0;
- }
- remap_cnt++;
+ /* Put remap buf into list. */
+ xen_remap_mfn = mfn;
- ident_boundary_pfn++;
- remap_boundary_pfn++;
- }
+ /* Set identity map */
+ ident_cnt += set_phys_range_identity(ident_pfn_iter,
+ ident_pfn_iter + chunk);
- /* Finish up the identity map */
- if (ident_start_pfn_align >= ident_end_pfn_align) {
- /*
- * In this case we have an identity range which does not span an
- * aligned block so everything needs to be identity mapped here.
- * If we didn't check this we might remap too many pages since
- * the align boundaries are not meaningful in this case.
- */
- ident_cnt += set_phys_range_identity(start_pfn,
- start_pfn + size);
- } else {
- /* Remapped above so check each end of the chunk */
- if (start_pfn < ident_start_pfn_align)
- ident_cnt += set_phys_range_identity(start_pfn,
- ident_start_pfn_align);
- if (start_pfn + size > ident_pfn_iter)
- ident_cnt += set_phys_range_identity(ident_pfn_iter,
- start_pfn + size);
+ left -= chunk;
}
- BUG_ON(ident_cnt != size);
- BUG_ON(remap_cnt != size);
-
- return size;
+ /* Restore old xen_remap_buf mapping */
+ set_pte_mfn(buf, mfn_save, PAGE_KERNEL);
}
/*
@@ -396,8 +371,7 @@ static unsigned long __init xen_do_set_identity_and_remap_chunk(
static unsigned long __init xen_set_identity_and_remap_chunk(
const struct e820entry *list, size_t map_size, unsigned long start_pfn,
unsigned long end_pfn, unsigned long nr_pages, unsigned long remap_pfn,
- unsigned long *identity, unsigned long *remapped,
- unsigned long *released)
+ unsigned long *identity, unsigned long *released)
{
unsigned long pfn;
unsigned long i = 0;
@@ -431,19 +405,12 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
if (size > remap_range_size)
size = remap_range_size;
- if (!xen_do_set_identity_and_remap_chunk(cur_pfn, size, remap_pfn)) {
- WARN(1, "Failed to remap 1:1 memory cur_pfn=%ld size=%ld remap_pfn=%ld\n",
- cur_pfn, size, remap_pfn);
- xen_set_identity_and_release_chunk(cur_pfn,
- cur_pfn + left, nr_pages, identity, released);
- break;
- }
+ xen_do_set_identity_and_remap_chunk(cur_pfn, size, remap_pfn);
/* Update variables to reflect new mappings. */
i += size;
remap_pfn += size;
*identity += size;
- *remapped += size;
}
/*
@@ -458,13 +425,12 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
return remap_pfn;
}
-static unsigned long __init xen_set_identity_and_remap(
+static void __init xen_set_identity_and_remap(
const struct e820entry *list, size_t map_size, unsigned long nr_pages,
unsigned long *released)
{
phys_addr_t start = 0;
unsigned long identity = 0;
- unsigned long remapped = 0;
unsigned long last_pfn = nr_pages;
const struct e820entry *entry;
unsigned long num_released = 0;
@@ -494,8 +460,7 @@ static unsigned long __init xen_set_identity_and_remap(
last_pfn = xen_set_identity_and_remap_chunk(
list, map_size, start_pfn,
end_pfn, nr_pages, last_pfn,
- &identity, &remapped,
- &num_released);
+ &identity, &num_released);
start = end;
}
}
@@ -503,12 +468,63 @@ static unsigned long __init xen_set_identity_and_remap(
*released = num_released;
pr_info("Set %ld page(s) to 1-1 mapping\n", identity);
- pr_info("Remapped %ld page(s), last_pfn=%ld\n", remapped,
- last_pfn);
pr_info("Released %ld page(s)\n", num_released);
+}
+
+/*
+ * Remap the memory prepared in xen_do_set_identity_and_remap_chunk().
+ * The remap information (which mfn remap to which pfn) is contained in the
+ * to be remapped memory itself in a linked list anchored at xen_remap_mfn.
+ * This scheme allows to remap the different chunks in arbitrary order while
+ * the resulting mapping will be independant from the order.
+ */
+void __init xen_remap_memory(void)
+{
+ unsigned long buf = (unsigned long)&xen_remap_buf;
+ unsigned long mfn_save, mfn, pfn;
+ unsigned long remapped = 0;
+ unsigned int i;
+ unsigned long pfn_s = ~0UL;
+ unsigned long len = 0;
+
+ mfn_save = virt_to_mfn(buf);
+
+ while (xen_remap_mfn != INVALID_P2M_ENTRY) {
+ /* Map the remap information */
+ set_pte_mfn(buf, xen_remap_mfn, PAGE_KERNEL);
- return last_pfn;
+ BUG_ON(xen_remap_mfn != xen_remap_buf.mfns[0]);
+
+ pfn = xen_remap_buf.target_pfn;
+ for (i = 0; i < xen_remap_buf.size; i++) {
+ mfn = xen_remap_buf.mfns[i];
+ xen_update_mem_tables(pfn, mfn);
+ remapped++;
+ pfn++;
+ }
+ if (pfn_s == ~0UL || pfn == pfn_s) {
+ pfn_s = xen_remap_buf.target_pfn;
+ len += xen_remap_buf.size;
+ } else if (pfn_s + len == xen_remap_buf.target_pfn) {
+ len += xen_remap_buf.size;
+ } else {
+ xen_del_extra_mem(PFN_PHYS(pfn_s), PFN_PHYS(len));
+ pfn_s = xen_remap_buf.target_pfn;
+ len = xen_remap_buf.size;
+ }
+
+ mfn = xen_remap_mfn;
+ xen_remap_mfn = xen_remap_buf.next_area_mfn;
+ }
+
+ if (pfn_s != ~0UL && len)
+ xen_del_extra_mem(PFN_PHYS(pfn_s), PFN_PHYS(len));
+
+ set_pte_mfn(buf, mfn_save, PAGE_KERNEL);
+
+ pr_info("Remapped %ld page(s)\n", remapped);
}
+
static unsigned long __init xen_get_max_pages(void)
{
unsigned long max_pages = MAX_DOMAIN_PAGES;
@@ -569,7 +585,6 @@ char * __init xen_memory_setup(void)
int rc;
struct xen_memory_map memmap;
unsigned long max_pages;
- unsigned long last_pfn = 0;
unsigned long extra_pages = 0;
int i;
int op;
@@ -616,17 +631,14 @@ char * __init xen_memory_setup(void)
extra_pages += max_pages - max_pfn;
/*
- * Set identity map on non-RAM pages and remap the underlying RAM.
+ * Set identity map on non-RAM pages and prepare remapping the
+ * underlying RAM.
*/
- last_pfn = xen_set_identity_and_remap(map, memmap.nr_entries, max_pfn,
- &xen_released_pages);
+ xen_set_identity_and_remap(map, memmap.nr_entries, max_pfn,
+ &xen_released_pages);
extra_pages += xen_released_pages;
- if (last_pfn > max_pfn) {
- max_pfn = min(MAX_DOMAIN_PAGES, last_pfn);
- mem_end = PFN_PHYS(max_pfn);
- }
/*
* Clamp the amount of extra memory to a EXTRA_MEM_RATIO
* factor the base size. On non-highmem systems, the base
@@ -653,6 +665,7 @@ char * __init xen_memory_setup(void)
size = min(size, (u64)extra_pages * PAGE_SIZE);
extra_pages -= size / PAGE_SIZE;
xen_add_extra_mem(addr, size);
+ xen_max_p2m_pfn = PFN_DOWN(addr + size);
} else
type = E820_UNUSABLE;
}
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index 4ab9298c5e1..5686bd9d58c 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -29,11 +29,13 @@ void xen_build_mfn_list_list(void);
void xen_setup_machphys_mapping(void);
void xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn);
void xen_reserve_top(void);
-extern unsigned long xen_max_p2m_pfn;
void xen_mm_pin_all(void);
void xen_mm_unpin_all(void);
+unsigned long __ref xen_chk_extra_mem(unsigned long pfn);
+void __init xen_inv_extra_mem(void);
+void __init xen_remap_memory(void);
char * __init xen_memory_setup(void);
char * xen_auto_xlated_memory_setup(void);
void __init xen_arch_setup(void);
@@ -46,7 +48,7 @@ void xen_hvm_init_shared_info(void);
void xen_unplug_emulated_devices(void);
void __init xen_build_dynamic_phys_to_machine(void);
-unsigned long __init xen_revector_p2m_tree(void);
+void __init xen_vmalloc_p2m_tree(void);
void xen_init_irq_ops(void);
void xen_setup_timer(int cpu);
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index 81f57e8c8f1..e31d4949124 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -98,12 +98,6 @@ config XTENSA_VARIANT_DC233C
help
This variant refers to Tensilica's Diamond 233L Standard core Rev.C (LE).
-config XTENSA_VARIANT_S6000
- bool "s6000 - Stretch software configurable processor"
- select VARIANT_IRQ_SWITCH
- select ARCH_REQUIRE_GPIOLIB
- select XTENSA_CALIBRATE_CCOUNT
-
config XTENSA_VARIANT_CUSTOM
bool "Custom Xtensa processor configuration"
select MAY_HAVE_SMP
@@ -126,7 +120,6 @@ config XTENSA_VARIANT_NAME
default "dc232b" if XTENSA_VARIANT_DC232B
default "dc233c" if XTENSA_VARIANT_DC233C
default "fsf" if XTENSA_VARIANT_FSF
- default "s6000" if XTENSA_VARIANT_S6000
default XTENSA_VARIANT_CUSTOM_NAME if XTENSA_VARIANT_CUSTOM
config XTENSA_VARIANT_MMU
@@ -191,7 +184,6 @@ config HOTPLUG_CPU
config INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
bool "Initialize Xtensa MMU inside the Linux kernel code"
- depends on MMU
default y
help
Earlier version initialized the MMU in the exception vector
@@ -311,15 +303,10 @@ config XTENSA_PLATFORM_XT2000
XT2000 is the name of Tensilica's feature-rich emulation platform.
This hardware is capable of running a full Linux distribution.
-config XTENSA_PLATFORM_S6105
- bool "S6105"
- select HAVE_IDE
- select SERIAL_CONSOLE
- select NO_IOPORT_MAP
-
config XTENSA_PLATFORM_XTFPGA
bool "XTFPGA"
select ETHOC if ETHERNET
+ select PLATFORM_WANT_DEFAULT_MEM
select SERIAL_CONSOLE
select XTENSA_CALIBRATE_CCOUNT
help
@@ -406,6 +393,41 @@ source "drivers/pcmcia/Kconfig"
source "drivers/pci/hotplug/Kconfig"
+config PLATFORM_WANT_DEFAULT_MEM
+ def_bool n
+
+config DEFAULT_MEM_START
+ hex "Physical address of the default memory area start"
+ depends on PLATFORM_WANT_DEFAULT_MEM
+ default 0x00000000 if MMU
+ default 0x40000000 if !MMU
+ help
+ This is a fallback start address of the default memory area, it is
+ used when no physical memory size is passed through DTB or through
+ boot parameter from bootloader.
+
+ In noMMU configuration the following parameters are derived from it:
+ - kernel load address;
+ - kernel entry point address;
+ - relocatable vectors base address;
+ - uBoot load address;
+ - TASK_SIZE.
+
+ If unsure, leave the default value here.
+
+config DEFAULT_MEM_SIZE
+ hex "Maximal size of the default memory area"
+ depends on PLATFORM_WANT_DEFAULT_MEM
+ default 0x04000000
+ help
+ This is a fallback size of the default memory area, it is used when
+ no physical memory size is passed through DTB or through boot
+ parameter from bootloader.
+
+ It's also used for TASK_SIZE calculation in noMMU configuration.
+
+ If unsure, leave the default value here.
+
endmenu
menu "Executable file formats"
@@ -414,6 +436,12 @@ source "fs/Kconfig.binfmt"
endmenu
+menu "Power management options"
+
+source "kernel/power/Kconfig"
+
+endmenu
+
source "net/Kconfig"
source "drivers/Kconfig"
diff --git a/arch/xtensa/Kconfig.debug b/arch/xtensa/Kconfig.debug
index af7da74d535..8430af27de0 100644
--- a/arch/xtensa/Kconfig.debug
+++ b/arch/xtensa/Kconfig.debug
@@ -4,7 +4,7 @@ source "lib/Kconfig.debug"
config DEBUG_TLB_SANITY
bool "Debug TLB sanity"
- depends on DEBUG_KERNEL
+ depends on DEBUG_KERNEL && MMU
help
Enable this to turn on TLB sanity check on each entry to userspace.
This check can spot missing TLB invalidation/wrong PTE permissions/
@@ -14,7 +14,7 @@ config DEBUG_TLB_SANITY
config LD_NO_RELAX
bool "Disable linker relaxation"
- default n
+ default y
help
Enable this function to disable link-time optimizations.
The default linker behavior is to combine identical literal
diff --git a/arch/xtensa/Makefile b/arch/xtensa/Makefile
index 472533064b4..f9e6a068aaf 100644
--- a/arch/xtensa/Makefile
+++ b/arch/xtensa/Makefile
@@ -35,7 +35,6 @@ endif
platform-$(CONFIG_XTENSA_PLATFORM_XT2000) := xt2000
platform-$(CONFIG_XTENSA_PLATFORM_ISS) := iss
-platform-$(CONFIG_XTENSA_PLATFORM_S6105) := s6105
platform-$(CONFIG_XTENSA_PLATFORM_XTFPGA) := xtfpga
PLATFORM = $(platform-y)
diff --git a/arch/xtensa/boot/boot-elf/boot.lds.S b/arch/xtensa/boot/boot-elf/boot.lds.S
index 932b58ef33d..958b33af96b 100644
--- a/arch/xtensa/boot/boot-elf/boot.lds.S
+++ b/arch/xtensa/boot/boot-elf/boot.lds.S
@@ -41,6 +41,7 @@ SECTIONS
__bss_end = .;
}
+#ifdef CONFIG_MMU
/*
* This is a remapped copy of the Reset Vector Code.
* It keeps gdb in sync with the PC after switching
@@ -51,4 +52,5 @@ SECTIONS
{
*(.ResetVector.remapped_text)
}
+#endif
}
diff --git a/arch/xtensa/boot/boot-elf/bootstrap.S b/arch/xtensa/boot/boot-elf/bootstrap.S
index 1388a499753..9341a575069 100644
--- a/arch/xtensa/boot/boot-elf/bootstrap.S
+++ b/arch/xtensa/boot/boot-elf/bootstrap.S
@@ -20,6 +20,7 @@
#include <asm/page.h>
#include <asm/cacheasm.h>
#include <asm/initialize_mmu.h>
+#include <asm/vectors.h>
#include <linux/linkage.h>
.section .ResetVector.text, "ax"
@@ -34,12 +35,7 @@ _ResetVector:
.align 4
RomInitAddr:
-#if defined(CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX) && \
- XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
- .word 0x00003000
-#else
- .word 0xd0003000
-#endif
+ .word LOAD_MEMORY_ADDRESS
RomBootParam:
.word _bootparam
_bootparam:
@@ -79,6 +75,7 @@ reset:
movi a4, 0
jx a0
+#ifdef CONFIG_MMU
.align 4
.section .ResetVector.remapped_text, "x"
@@ -102,3 +99,4 @@ _RemappedSetupMMU:
#endif
.end no-absolute-literals
+#endif
diff --git a/arch/xtensa/boot/boot-uboot/Makefile b/arch/xtensa/boot/boot-uboot/Makefile
index 545759819ef..403fcf23405 100644
--- a/arch/xtensa/boot/boot-uboot/Makefile
+++ b/arch/xtensa/boot/boot-uboot/Makefile
@@ -4,11 +4,15 @@
# for more details.
#
+ifdef CONFIG_MMU
ifdef CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
UIMAGE_LOADADDR = 0x00003000
else
UIMAGE_LOADADDR = 0xd0003000
endif
+else
+UIMAGE_LOADADDR = $(shell printf "0x%x" $$(( ${CONFIG_DEFAULT_MEM_START} + 0x3000 )) )
+endif
UIMAGE_COMPRESSION = gzip
$(obj)/../uImage: vmlinux.bin.gz FORCE
diff --git a/arch/xtensa/configs/iss_defconfig b/arch/xtensa/configs/iss_defconfig
index b966baf82ca..e4d193e7a30 100644
--- a/arch/xtensa/configs/iss_defconfig
+++ b/arch/xtensa/configs/iss_defconfig
@@ -143,7 +143,6 @@ CONFIG_MMU=y
#
CONFIG_XTENSA_VARIANT_FSF=y
# CONFIG_XTENSA_VARIANT_DC232B is not set
-# CONFIG_XTENSA_VARIANT_S6000 is not set
# CONFIG_XTENSA_UNALIGNED_USER is not set
# CONFIG_PREEMPT is not set
CONFIG_XTENSA_CALIBRATE_CCOUNT=y
@@ -161,7 +160,6 @@ CONFIG_XTENSA_ISS_NETWORK=y
#
CONFIG_XTENSA_PLATFORM_ISS=y
# CONFIG_XTENSA_PLATFORM_XT2000 is not set
-# CONFIG_XTENSA_PLATFORM_S6105 is not set
# CONFIG_GENERIC_CALIBRATE_DELAY is not set
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="console=ttyS0,38400 eth0=tuntap,,tap0 ip=192.168.168.5:192.168.168.1 root=nfs nfsroot=192.168.168.1:/opt/montavista/pro/devkit/xtensa/linux_be/target"
@@ -759,3 +757,4 @@ CONFIG_GENERIC_FIND_LAST_BIT=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_DMA=y
CONFIG_NLATTR=y
+CONFIG_LD_NO_RELAX=y
diff --git a/arch/xtensa/configs/s6105_defconfig b/arch/xtensa/configs/s6105_defconfig
deleted file mode 100644
index 9471265b8ca..00000000000
--- a/arch/xtensa/configs/s6105_defconfig
+++ /dev/null
@@ -1,615 +0,0 @@
-#
-# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.29-rc7-s6
-# Tue Mar 10 11:09:26 2009
-#
-# CONFIG_FRAME_POINTER is not set
-CONFIG_ZONE_DMA=y
-CONFIG_XTENSA=y
-CONFIG_RWSEM_XCHGADD_ALGORITHM=y
-CONFIG_GENERIC_FIND_NEXT_BIT=y
-CONFIG_GENERIC_HWEIGHT=y
-# CONFIG_ARCH_HAS_ILOG2_U32 is not set
-# CONFIG_ARCH_HAS_ILOG2_U64 is not set
-CONFIG_NO_IOPORT_MAP=y
-CONFIG_HZ=100
-CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
-
-#
-# General setup
-#
-CONFIG_EXPERIMENTAL=y
-CONFIG_BROKEN_ON_SMP=y
-CONFIG_INIT_ENV_ARG_LIMIT=32
-CONFIG_LOCALVERSION=""
-CONFIG_LOCALVERSION_AUTO=y
-CONFIG_SYSVIPC=y
-CONFIG_SYSVIPC_SYSCTL=y
-# CONFIG_POSIX_MQUEUE is not set
-# CONFIG_BSD_PROCESS_ACCT is not set
-# CONFIG_TASKSTATS is not set
-# CONFIG_AUDIT is not set
-
-#
-# RCU Subsystem
-#
-# CONFIG_CLASSIC_RCU is not set
-# CONFIG_TREE_RCU is not set
-CONFIG_PREEMPT_RCU=y
-# CONFIG_RCU_TRACE is not set
-# CONFIG_TREE_RCU_TRACE is not set
-# CONFIG_PREEMPT_RCU_TRACE is not set
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_LOG_BUF_SHIFT=16
-# CONFIG_GROUP_SCHED is not set
-# CONFIG_CGROUPS is not set
-# CONFIG_SYSFS_DEPRECATED_V2 is not set
-# CONFIG_RELAY is not set
-# CONFIG_NAMESPACES is not set
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE=""
-CONFIG_CC_OPTIMIZE_FOR_SIZE=y
-CONFIG_SYSCTL=y
-CONFIG_EXPERT=y
-CONFIG_SYSCTL_SYSCALL=y
-CONFIG_KALLSYMS=y
-# CONFIG_KALLSYMS_ALL is not set
-# CONFIG_KALLSYMS_EXTRA_PASS is not set
-# CONFIG_HOTPLUG is not set
-CONFIG_PRINTK=y
-CONFIG_BUG=y
-CONFIG_ELF_CORE=y
-# CONFIG_COMPAT_BRK is not set
-CONFIG_BASE_FULL=y
-CONFIG_FUTEX=y
-CONFIG_ANON_INODES=y
-CONFIG_EPOLL=y
-CONFIG_SIGNALFD=y
-CONFIG_TIMERFD=y
-CONFIG_EVENTFD=y
-CONFIG_AIO=y
-CONFIG_VM_EVENT_COUNTERS=y
-CONFIG_SLAB=y
-# CONFIG_SLUB is not set
-# CONFIG_SLOB is not set
-# CONFIG_PROFILING is not set
-# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
-CONFIG_SLABINFO=y
-CONFIG_RT_MUTEXES=y
-CONFIG_BASE_SMALL=0
-# CONFIG_MODULES is not set
-CONFIG_BLOCK=y
-# CONFIG_LBD is not set
-# CONFIG_BLK_DEV_IO_TRACE is not set
-# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_BLK_DEV_INTEGRITY is not set
-
-#
-# IO Schedulers
-#
-CONFIG_IOSCHED_NOOP=y
-# CONFIG_IOSCHED_AS is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-CONFIG_IOSCHED_CFQ=y
-# CONFIG_DEFAULT_AS is not set
-# CONFIG_DEFAULT_DEADLINE is not set
-CONFIG_DEFAULT_CFQ=y
-# CONFIG_DEFAULT_NOOP is not set
-CONFIG_DEFAULT_IOSCHED="cfq"
-# CONFIG_FREEZER is not set
-# CONFIG_MMU is not set
-CONFIG_VARIANT_IRQ_SWITCH=y
-
-#
-# Processor type and features
-#
-# CONFIG_XTENSA_VARIANT_FSF is not set
-# CONFIG_XTENSA_VARIANT_DC232B is not set
-CONFIG_XTENSA_VARIANT_S6000=y
-# CONFIG_XTENSA_UNALIGNED_USER is not set
-CONFIG_PREEMPT=y
-# CONFIG_HIGHMEM is not set
-CONFIG_XTENSA_CALIBRATE_CCOUNT=y
-CONFIG_SERIAL_CONSOLE=y
-# CONFIG_XTENSA_ISS_NETWORK is not set
-
-#
-# Bus options
-#
-# CONFIG_PCI is not set
-# CONFIG_ARCH_SUPPORTS_MSI is not set
-
-#
-# Platform options
-#
-# CONFIG_XTENSA_PLATFORM_ISS is not set
-# CONFIG_XTENSA_PLATFORM_XT2000 is not set
-CONFIG_XTENSA_PLATFORM_S6105=y
-CONFIG_GENERIC_CALIBRATE_DELAY=y
-CONFIG_CMDLINE_BOOL=y
-CONFIG_CMDLINE="console=ttyS1,38400 debug bootmem_debug loglevel=7"
-CONFIG_SELECT_MEMORY_MODEL=y
-CONFIG_FLATMEM_MANUAL=y
-# CONFIG_DISCONTIGMEM_MANUAL is not set
-# CONFIG_SPARSEMEM_MANUAL is not set
-CONFIG_FLATMEM=y
-CONFIG_FLAT_NODE_MEM_MAP=y
-CONFIG_PAGEFLAGS_EXTENDED=y
-CONFIG_SPLIT_PTLOCK_CPUS=4
-# CONFIG_PHYS_ADDR_T_64BIT is not set
-CONFIG_ZONE_DMA_FLAG=1
-CONFIG_VIRT_TO_BUS=y
-
-#
-# Executable file formats
-#
-CONFIG_KCORE_ELF=y
-CONFIG_BINFMT_FLAT=y
-# CONFIG_BINFMT_ZFLAT is not set
-# CONFIG_BINFMT_SHARED_FLAT is not set
-# CONFIG_HAVE_AOUT is not set
-# CONFIG_BINFMT_MISC is not set
-CONFIG_NET=y
-
-#
-# Networking options
-#
-CONFIG_COMPAT_NET_DEV_OPS=y
-CONFIG_PACKET=y
-# CONFIG_PACKET_MMAP is not set
-CONFIG_UNIX=y
-# CONFIG_NET_KEY is not set
-CONFIG_INET=y
-# CONFIG_IP_MULTICAST is not set
-# CONFIG_IP_ADVANCED_ROUTER is not set
-CONFIG_IP_FIB_HASH=y
-# CONFIG_IP_PNP is not set
-# CONFIG_NET_IPIP is not set
-# CONFIG_NET_IPGRE is not set
-# CONFIG_ARPD is not set
-# CONFIG_SYN_COOKIES is not set
-# CONFIG_INET_AH is not set
-# CONFIG_INET_ESP is not set
-# CONFIG_INET_IPCOMP is not set
-# CONFIG_INET_XFRM_TUNNEL is not set
-# CONFIG_INET_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
-# CONFIG_INET_DIAG is not set
-# CONFIG_TCP_CONG_ADVANCED is not set
-CONFIG_TCP_CONG_CUBIC=y
-CONFIG_DEFAULT_TCP_CONG="cubic"
-# CONFIG_TCP_MD5SIG is not set
-# CONFIG_IPV6 is not set
-# CONFIG_NETWORK_SECMARK is not set
-# CONFIG_NETFILTER is not set
-# CONFIG_IP_DCCP is not set
-# CONFIG_IP_SCTP is not set
-# CONFIG_TIPC is not set
-# CONFIG_ATM is not set
-# CONFIG_BRIDGE is not set
-# CONFIG_NET_DSA is not set
-# CONFIG_VLAN_8021Q is not set
-# CONFIG_DECNET is not set
-# CONFIG_LLC2 is not set
-# CONFIG_IPX is not set
-# CONFIG_ATALK is not set
-# CONFIG_X25 is not set
-# CONFIG_LAPB is not set
-# CONFIG_ECONET is not set
-# CONFIG_WAN_ROUTER is not set
-# CONFIG_NET_SCHED is not set
-# CONFIG_DCB is not set
-
-#
-# Network testing
-#
-# CONFIG_NET_PKTGEN is not set
-# CONFIG_HAMRADIO is not set
-# CONFIG_CAN is not set
-# CONFIG_IRDA is not set
-# CONFIG_BT is not set
-# CONFIG_AF_RXRPC is not set
-# CONFIG_PHONET is not set
-# CONFIG_WIRELESS is not set
-# CONFIG_WIMAX is not set
-# CONFIG_RFKILL is not set
-# CONFIG_NET_9P is not set
-
-#
-# Device Drivers
-#
-
-#
-# Generic Driver Options
-#
-CONFIG_STANDALONE=y
-CONFIG_PREVENT_FIRMWARE_BUILD=y
-# CONFIG_DEBUG_DRIVER is not set
-# CONFIG_DEBUG_DEVRES is not set
-# CONFIG_SYS_HYPERVISOR is not set
-# CONFIG_CONNECTOR is not set
-# CONFIG_MTD is not set
-# CONFIG_PARPORT is not set
-CONFIG_BLK_DEV=y
-# CONFIG_BLK_DEV_COW_COMMON is not set
-# CONFIG_BLK_DEV_LOOP is not set
-# CONFIG_BLK_DEV_NBD is not set
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_COUNT=16
-CONFIG_BLK_DEV_RAM_SIZE=4096
-# CONFIG_BLK_DEV_XIP is not set
-# CONFIG_CDROM_PKTCDVD is not set
-# CONFIG_ATA_OVER_ETH is not set
-# CONFIG_BLK_DEV_HD is not set
-# CONFIG_MISC_DEVICES is not set
-CONFIG_HAVE_IDE=y
-# CONFIG_IDE is not set
-
-#
-# SCSI device support
-#
-# CONFIG_RAID_ATTRS is not set
-# CONFIG_SCSI is not set
-# CONFIG_SCSI_DMA is not set
-# CONFIG_SCSI_NETLINK is not set
-# CONFIG_ATA is not set
-# CONFIG_MD is not set
-CONFIG_NETDEVICES=y
-# CONFIG_DUMMY is not set
-# CONFIG_BONDING is not set
-# CONFIG_MACVLAN is not set
-# CONFIG_EQUALIZER is not set
-# CONFIG_TUN is not set
-# CONFIG_VETH is not set
-CONFIG_PHYLIB=y
-
-#
-# MII PHY device drivers
-#
-# CONFIG_MARVELL_PHY is not set
-# CONFIG_DAVICOM_PHY is not set
-# CONFIG_QSEMI_PHY is not set
-# CONFIG_LXT_PHY is not set
-# CONFIG_CICADA_PHY is not set
-# CONFIG_VITESSE_PHY is not set
-CONFIG_SMSC_PHY=y
-# CONFIG_BROADCOM_PHY is not set
-# CONFIG_ICPLUS_PHY is not set
-# CONFIG_REALTEK_PHY is not set
-# CONFIG_NATIONAL_PHY is not set
-# CONFIG_STE10XP is not set
-# CONFIG_LSI_ET1011C_PHY is not set
-# CONFIG_FIXED_PHY is not set
-# CONFIG_MDIO_BITBANG is not set
-# CONFIG_NET_ETHERNET is not set
-CONFIG_NETDEV_1000=y
-CONFIG_S6GMAC=y
-# CONFIG_NETDEV_10000 is not set
-
-#
-# Wireless LAN
-#
-# CONFIG_WLAN_PRE80211 is not set
-# CONFIG_WLAN_80211 is not set
-# CONFIG_IWLWIFI_LEDS is not set
-
-#
-# Enable WiMAX (Networking options) to see the WiMAX drivers
-#
-# CONFIG_WAN is not set
-# CONFIG_PPP is not set
-# CONFIG_SLIP is not set
-# CONFIG_NETCONSOLE is not set
-# CONFIG_NETPOLL is not set
-# CONFIG_NET_POLL_CONTROLLER is not set
-# CONFIG_ISDN is not set
-# CONFIG_PHONE is not set
-
-#
-# Input device support
-#
-# CONFIG_INPUT is not set
-
-#
-# Hardware I/O ports
-#
-# CONFIG_SERIO is not set
-# CONFIG_GAMEPORT is not set
-
-#
-# Character devices
-#
-# CONFIG_VT is not set
-# CONFIG_DEVKMEM is not set
-# CONFIG_SERIAL_NONSTANDARD is not set
-
-#
-# Serial drivers
-#
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_SERIAL_8250_NR_UARTS=2
-CONFIG_SERIAL_8250_RUNTIME_UARTS=2
-# CONFIG_SERIAL_8250_EXTENDED is not set
-
-#
-# Non-8250 serial port support
-#
-CONFIG_SERIAL_CORE=y
-CONFIG_SERIAL_CORE_CONSOLE=y
-CONFIG_UNIX98_PTYS=y
-# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
-# CONFIG_LEGACY_PTYS is not set
-# CONFIG_IPMI_HANDLER is not set
-# CONFIG_HW_RANDOM is not set
-# CONFIG_R3964 is not set
-# CONFIG_RAW_DRIVER is not set
-# CONFIG_TCG_TPM is not set
-# CONFIG_I2C is not set
-# CONFIG_SPI is not set
-CONFIG_ARCH_REQUIRE_GPIOLIB=y
-CONFIG_GPIOLIB=y
-# CONFIG_DEBUG_GPIO is not set
-# CONFIG_GPIO_SYSFS is not set
-
-#
-# Memory mapped GPIO expanders:
-#
-
-#
-# I2C GPIO expanders:
-#
-
-#
-# PCI GPIO expanders:
-#
-
-#
-# SPI GPIO expanders:
-#
-# CONFIG_W1 is not set
-# CONFIG_POWER_SUPPLY is not set
-# CONFIG_HWMON is not set
-# CONFIG_THERMAL is not set
-# CONFIG_THERMAL_HWMON is not set
-# CONFIG_WATCHDOG is not set
-CONFIG_SSB_POSSIBLE=y
-
-#
-# Sonics Silicon Backplane
-#
-# CONFIG_SSB is not set
-
-#
-# Multifunction device drivers
-#
-# CONFIG_MFD_CORE is not set
-# CONFIG_MFD_SM501 is not set
-# CONFIG_HTC_PASIC3 is not set
-# CONFIG_MFD_TMIO is not set
-# CONFIG_REGULATOR is not set
-
-#
-# Multimedia devices
-#
-
-#
-# Multimedia core support
-#
-# CONFIG_VIDEO_DEV is not set
-# CONFIG_DVB_CORE is not set
-# CONFIG_VIDEO_MEDIA is not set
-
-#
-# Multimedia drivers
-#
-# CONFIG_DAB is not set
-
-#
-# Graphics support
-#
-# CONFIG_VGASTATE is not set
-# CONFIG_VIDEO_OUTPUT_CONTROL is not set
-# CONFIG_FB is not set
-# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
-
-#
-# Display device support
-#
-# CONFIG_DISPLAY_SUPPORT is not set
-# CONFIG_SOUND is not set
-# CONFIG_USB_SUPPORT is not set
-# CONFIG_MMC is not set
-# CONFIG_MEMSTICK is not set
-# CONFIG_NEW_LEDS is not set
-# CONFIG_ACCESSIBILITY is not set
-CONFIG_RTC_LIB=y
-CONFIG_RTC_CLASS=y
-CONFIG_RTC_HCTOSYS=y
-CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
-# CONFIG_RTC_DEBUG is not set
-
-#
-# RTC interfaces
-#
-# CONFIG_RTC_INTF_SYSFS is not set
-# CONFIG_RTC_INTF_PROC is not set
-# CONFIG_RTC_INTF_DEV is not set
-# CONFIG_RTC_DRV_TEST is not set
-
-#
-# I2C RTC drivers
-#
-# CONFIG_RTC_DRV_DS1307 is not set
-# CONFIG_RTC_DRV_DS1374 is not set
-# CONFIG_RTC_DRV_DS1672 is not set
-# CONFIG_RTC_DRV_MAX6900 is not set
-# CONFIG_RTC_DRV_RS5C372 is not set
-# CONFIG_RTC_DRV_ISL1208 is not set
-# CONFIG_RTC_DRV_X1205 is not set
-# CONFIG_RTC_DRV_PCF8563 is not set
-# CONFIG_RTC_DRV_PCF8583 is not set
-CONFIG_RTC_DRV_M41T80=y
-# CONFIG_RTC_DRV_M41T80_WDT is not set
-# CONFIG_RTC_DRV_S35390A is not set
-# CONFIG_RTC_DRV_FM3130 is not set
-# CONFIG_RTC_DRV_RX8581 is not set
-
-#
-# SPI RTC drivers
-#
-
-#
-# Platform RTC drivers
-#
-# CONFIG_RTC_DRV_DS1286 is not set
-# CONFIG_RTC_DRV_DS1511 is not set
-# CONFIG_RTC_DRV_DS1553 is not set
-# CONFIG_RTC_DRV_DS1742 is not set
-# CONFIG_RTC_DRV_STK17TA8 is not set
-# CONFIG_RTC_DRV_M48T86 is not set
-# CONFIG_RTC_DRV_M48T35 is not set
-# CONFIG_RTC_DRV_M48T59 is not set
-# CONFIG_RTC_DRV_BQ4802 is not set
-# CONFIG_RTC_DRV_V3020 is not set
-
-#
-# on-CPU RTC drivers
-#
-# CONFIG_DMADEVICES is not set
-# CONFIG_UIO is not set
-# CONFIG_STAGING is not set
-
-#
-# File systems
-#
-# CONFIG_EXT2_FS is not set
-# CONFIG_EXT3_FS is not set
-# CONFIG_EXT4_FS is not set
-# CONFIG_REISERFS_FS is not set
-# CONFIG_JFS_FS is not set
-# CONFIG_FS_POSIX_ACL is not set
-CONFIG_FILE_LOCKING=y
-# CONFIG_XFS_FS is not set
-# CONFIG_OCFS2_FS is not set
-# CONFIG_BTRFS_FS is not set
-# CONFIG_DNOTIFY is not set
-# CONFIG_INOTIFY is not set
-# CONFIG_QUOTA is not set
-# CONFIG_AUTOFS_FS is not set
-# CONFIG_AUTOFS4_FS is not set
-# CONFIG_FUSE_FS is not set
-
-#
-# CD-ROM/DVD Filesystems
-#
-# CONFIG_ISO9660_FS is not set
-# CONFIG_UDF_FS is not set
-
-#
-# DOS/FAT/NT Filesystems
-#
-# CONFIG_MSDOS_FS is not set
-# CONFIG_VFAT_FS is not set
-# CONFIG_NTFS_FS is not set
-
-#
-# Pseudo filesystems
-#
-CONFIG_PROC_FS=y
-CONFIG_PROC_SYSCTL=y
-CONFIG_SYSFS=y
-# CONFIG_TMPFS is not set
-# CONFIG_HUGETLB_PAGE is not set
-# CONFIG_CONFIGFS_FS is not set
-# CONFIG_MISC_FILESYSTEMS is not set
-# CONFIG_NETWORK_FILESYSTEMS is not set
-
-#
-# Partition Types
-#
-# CONFIG_PARTITION_ADVANCED is not set
-CONFIG_MSDOS_PARTITION=y
-# CONFIG_NLS is not set
-# CONFIG_DLM is not set
-
-#
-# Kernel hacking
-#
-CONFIG_PRINTK_TIME=y
-# CONFIG_ENABLE_WARN_DEPRECATED is not set
-# CONFIG_ENABLE_MUST_CHECK is not set
-CONFIG_FRAME_WARN=1024
-# CONFIG_MAGIC_SYSRQ is not set
-# CONFIG_UNUSED_SYMBOLS is not set
-# CONFIG_DEBUG_FS is not set
-# CONFIG_HEADERS_CHECK is not set
-CONFIG_DEBUG_KERNEL=y
-CONFIG_DEBUG_SHIRQ=y
-CONFIG_DETECT_SOFTLOCKUP=y
-# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
-CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
-# CONFIG_SCHED_DEBUG is not set
-# CONFIG_SCHEDSTATS is not set
-# CONFIG_TIMER_STATS is not set
-# CONFIG_DEBUG_OBJECTS is not set
-# CONFIG_DEBUG_SLAB is not set
-# CONFIG_DEBUG_RT_MUTEXES is not set
-# CONFIG_RT_MUTEX_TESTER is not set
-CONFIG_DEBUG_SPINLOCK=y
-CONFIG_DEBUG_MUTEXES=y
-CONFIG_DEBUG_SPINLOCK_SLEEP=y
-# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
-# CONFIG_DEBUG_KOBJECT is not set
-# CONFIG_DEBUG_INFO is not set
-# CONFIG_DEBUG_VM is not set
-CONFIG_DEBUG_NOMMU_REGIONS=y
-# CONFIG_DEBUG_MEMORY_INIT is not set
-# CONFIG_DEBUG_LIST is not set
-# CONFIG_DEBUG_SG is not set
-# CONFIG_DEBUG_NOTIFIERS is not set
-# CONFIG_BOOT_PRINTK_DELAY is not set
-# CONFIG_RCU_TORTURE_TEST is not set
-# CONFIG_BACKTRACE_SELF_TEST is not set
-# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
-# CONFIG_FAULT_INJECTION is not set
-# CONFIG_SYSCTL_SYSCALL_CHECK is not set
-
-#
-# Tracers
-#
-# CONFIG_PREEMPT_TRACER is not set
-# CONFIG_SCHED_TRACER is not set
-# CONFIG_CONTEXT_SWITCH_TRACER is not set
-# CONFIG_BOOT_TRACER is not set
-# CONFIG_TRACE_BRANCH_PROFILING is not set
-# CONFIG_DYNAMIC_DEBUG is not set
-# CONFIG_SAMPLES is not set
-
-#
-# Security options
-#
-# CONFIG_KEYS is not set
-# CONFIG_SECURITY is not set
-# CONFIG_SECURITYFS is not set
-# CONFIG_SECURITY_FILE_CAPABILITIES is not set
-# CONFIG_CRYPTO is not set
-
-#
-# Library routines
-#
-CONFIG_GENERIC_FIND_LAST_BIT=y
-# CONFIG_CRC_CCITT is not set
-# CONFIG_CRC16 is not set
-# CONFIG_CRC_T10DIF is not set
-# CONFIG_CRC_ITU_T is not set
-# CONFIG_CRC32 is not set
-# CONFIG_CRC7 is not set
-# CONFIG_LIBCRC32C is not set
-CONFIG_PLIST=y
-CONFIG_HAS_IOMEM=y
-CONFIG_HAS_DMA=y
diff --git a/arch/xtensa/include/asm/cacheflush.h b/arch/xtensa/include/asm/cacheflush.h
index e72aaca7a77..5f67ace97b3 100644
--- a/arch/xtensa/include/asm/cacheflush.h
+++ b/arch/xtensa/include/asm/cacheflush.h
@@ -67,6 +67,8 @@ extern void __invalidate_dcache_page_alias(unsigned long, unsigned long);
#else
static inline void __flush_invalidate_dcache_page_alias(unsigned long virt,
unsigned long phys) { }
+static inline void __invalidate_dcache_page_alias(unsigned long virt,
+ unsigned long phys) { }
#endif
#if defined(CONFIG_MMU) && (ICACHE_WAY_SIZE > PAGE_SIZE)
extern void __invalidate_icache_page_alias(unsigned long, unsigned long);
@@ -84,7 +86,8 @@ static inline void __invalidate_icache_page_alias(unsigned long virt,
* (see also Documentation/cachetlb.txt)
*/
-#if (DCACHE_WAY_SIZE > PAGE_SIZE) || defined(CONFIG_SMP)
+#if defined(CONFIG_MMU) && \
+ ((DCACHE_WAY_SIZE > PAGE_SIZE) || defined(CONFIG_SMP))
#ifdef CONFIG_SMP
void flush_cache_all(void);
@@ -150,7 +153,7 @@ void local_flush_cache_page(struct vm_area_struct *vma,
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
-#if (DCACHE_WAY_SIZE > PAGE_SIZE)
+#if defined(CONFIG_MMU) && (DCACHE_WAY_SIZE > PAGE_SIZE)
extern void copy_to_user_page(struct vm_area_struct*, struct page*,
unsigned long, void*, const void*, unsigned long);
diff --git a/arch/xtensa/include/asm/highmem.h b/arch/xtensa/include/asm/highmem.h
index 2c7901edffa..01cef6b4082 100644
--- a/arch/xtensa/include/asm/highmem.h
+++ b/arch/xtensa/include/asm/highmem.h
@@ -25,7 +25,7 @@
#define PKMAP_NR(virt) (((virt) - PKMAP_BASE) >> PAGE_SHIFT)
#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
-#define kmap_prot PAGE_KERNEL
+#define kmap_prot PAGE_KERNEL_EXEC
#if DCACHE_WAY_SIZE > PAGE_SIZE
#define get_pkmap_color get_pkmap_color
diff --git a/arch/xtensa/include/asm/initialize_mmu.h b/arch/xtensa/include/asm/initialize_mmu.h
index 600781edc8a..e256f2270ec 100644
--- a/arch/xtensa/include/asm/initialize_mmu.h
+++ b/arch/xtensa/include/asm/initialize_mmu.h
@@ -26,8 +26,16 @@
#include <asm/pgtable.h>
#include <asm/vectors.h>
+#if XCHAL_HAVE_PTP_MMU
#define CA_BYPASS (_PAGE_CA_BYPASS | _PAGE_HW_WRITE | _PAGE_HW_EXEC)
#define CA_WRITEBACK (_PAGE_CA_WB | _PAGE_HW_WRITE | _PAGE_HW_EXEC)
+#else
+#define CA_WRITEBACK (0x4)
+#endif
+
+#ifndef XCHAL_SPANNING_WAY
+#define XCHAL_SPANNING_WAY 0
+#endif
#ifdef __ASSEMBLY__
@@ -75,7 +83,7 @@
/* Step 1: invalidate mapping at 0x40000000..0x5FFFFFFF. */
- movi a2, 0x40000006
+ movi a2, 0x40000000 | XCHAL_SPANNING_WAY
idtlb a2
iitlb a2
isync
@@ -141,9 +149,6 @@
jx a4
1:
- movi a2, VECBASE_RESET_VADDR
- wsr a2, vecbase
-
/* Step 5: remove temporary mapping. */
idtlb a7
iitlb a7
@@ -156,6 +161,33 @@
#endif /* defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU &&
XCHAL_HAVE_SPANNING_WAY */
+#if !defined(CONFIG_MMU) && XCHAL_HAVE_TLBS
+ /* Enable data and instruction cache in the DEFAULT_MEMORY region
+ * if the processor has DTLB and ITLB.
+ */
+
+ movi a5, PLATFORM_DEFAULT_MEM_START | XCHAL_SPANNING_WAY
+ movi a6, ~_PAGE_ATTRIB_MASK
+ movi a7, CA_WRITEBACK
+ movi a8, 0x20000000
+ movi a9, PLATFORM_DEFAULT_MEM_SIZE
+ j 2f
+1:
+ sub a9, a9, a8
+2:
+ rdtlb1 a3, a5
+ ritlb1 a4, a5
+ and a3, a3, a6
+ and a4, a4, a6
+ or a3, a3, a7
+ or a4, a4, a7
+ wdtlb a3, a5
+ witlb a4, a5
+ add a5, a5, a8
+ bltu a8, a9, 1b
+
+#endif
+
.endm
#endif /*__ASSEMBLY__*/
diff --git a/arch/xtensa/include/asm/mmu_context.h b/arch/xtensa/include/asm/mmu_context.h
index d33c71a8c9e..04c8ebdc451 100644
--- a/arch/xtensa/include/asm/mmu_context.h
+++ b/arch/xtensa/include/asm/mmu_context.h
@@ -50,11 +50,7 @@ DECLARE_PER_CPU(unsigned long, asid_cache);
#define ASID_MASK ((1 << XCHAL_MMU_ASID_BITS) - 1)
#define ASID_INSERT(x) (0x03020001 | (((x) & ASID_MASK) << 8))
-#ifdef CONFIG_MMU
void init_mmu(void);
-#else
-static inline void init_mmu(void) { }
-#endif
static inline void set_rasid_register (unsigned long val)
{
diff --git a/arch/xtensa/include/asm/nommu_context.h b/arch/xtensa/include/asm/nommu_context.h
index 3407cf7989b..22984fd1d84 100644
--- a/arch/xtensa/include/asm/nommu_context.h
+++ b/arch/xtensa/include/asm/nommu_context.h
@@ -1,3 +1,7 @@
+static inline void init_mmu(void)
+{
+}
+
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
}
diff --git a/arch/xtensa/include/asm/page.h b/arch/xtensa/include/asm/page.h
index abe24c6f8b2..ad38500471f 100644
--- a/arch/xtensa/include/asm/page.h
+++ b/arch/xtensa/include/asm/page.h
@@ -20,10 +20,10 @@
* Fixed TLB translations in the processor.
*/
-#define XCHAL_KSEG_CACHED_VADDR 0xd0000000
-#define XCHAL_KSEG_BYPASS_VADDR 0xd8000000
-#define XCHAL_KSEG_PADDR 0x00000000
-#define XCHAL_KSEG_SIZE 0x08000000
+#define XCHAL_KSEG_CACHED_VADDR __XTENSA_UL_CONST(0xd0000000)
+#define XCHAL_KSEG_BYPASS_VADDR __XTENSA_UL_CONST(0xd8000000)
+#define XCHAL_KSEG_PADDR __XTENSA_UL_CONST(0x00000000)
+#define XCHAL_KSEG_SIZE __XTENSA_UL_CONST(0x08000000)
/*
* PAGE_SHIFT determines the page size
@@ -37,7 +37,7 @@
#define PAGE_OFFSET XCHAL_KSEG_CACHED_VADDR
#define MAX_MEM_PFN XCHAL_KSEG_SIZE
#else
-#define PAGE_OFFSET 0
+#define PAGE_OFFSET __XTENSA_UL_CONST(0)
#define MAX_MEM_PFN (PLATFORM_DEFAULT_MEM_START + PLATFORM_DEFAULT_MEM_SIZE)
#endif
@@ -145,7 +145,7 @@ extern void copy_page(void *to, void *from);
* some extra work
*/
-#if DCACHE_WAY_SIZE > PAGE_SIZE
+#if defined(CONFIG_MMU) && DCACHE_WAY_SIZE > PAGE_SIZE
extern void clear_page_alias(void *vaddr, unsigned long paddr);
extern void copy_page_alias(void *to, void *from,
unsigned long to_paddr, unsigned long from_paddr);
diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h
index 0383aed5912..872bf0194e6 100644
--- a/arch/xtensa/include/asm/pgtable.h
+++ b/arch/xtensa/include/asm/pgtable.h
@@ -178,6 +178,7 @@
#else /* no mmu */
+# define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
# define PAGE_NONE __pgprot(0)
# define PAGE_SHARED __pgprot(0)
# define PAGE_COPY __pgprot(0)
diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h
index c7211e7e182..876eb380aa2 100644
--- a/arch/xtensa/include/asm/uaccess.h
+++ b/arch/xtensa/include/asm/uaccess.h
@@ -320,7 +320,7 @@ __asm__ __volatile__( \
({ \
long __gu_err, __gu_val; \
__get_user_size(__gu_val,(ptr),(size),__gu_err); \
- (x) = (__typeof__(*(ptr)))__gu_val; \
+ (x) = (__force __typeof__(*(ptr)))__gu_val; \
__gu_err; \
})
@@ -330,7 +330,7 @@ __asm__ __volatile__( \
const __typeof__(*(ptr)) *__gu_addr = (ptr); \
if (access_ok(VERIFY_READ,__gu_addr,size)) \
__get_user_size(__gu_val,__gu_addr,(size),__gu_err); \
- (x) = (__typeof__(*(ptr)))__gu_val; \
+ (x) = (__force __typeof__(*(ptr)))__gu_val; \
__gu_err; \
})
diff --git a/arch/xtensa/include/asm/vectors.h b/arch/xtensa/include/asm/vectors.h
index f74ddfbb92e..a46c53f3611 100644
--- a/arch/xtensa/include/asm/vectors.h
+++ b/arch/xtensa/include/asm/vectors.h
@@ -19,6 +19,7 @@
#define _XTENSA_VECTORS_H
#include <variant/core.h>
+#include <platform/hardware.h>
#define XCHAL_KIO_CACHED_VADDR 0xe0000000
#define XCHAL_KIO_BYPASS_VADDR 0xf0000000
@@ -51,13 +52,13 @@
/* MMU Not being used - Virtual == Physical */
/* VECBASE */
- #define VIRTUAL_MEMORY_ADDRESS 0x00002000
+ #define VIRTUAL_MEMORY_ADDRESS (PLATFORM_DEFAULT_MEM_START + 0x2000)
/* Location of the start of the kernel text, _start */
- #define KERNELOFFSET 0x00003000
+ #define KERNELOFFSET (PLATFORM_DEFAULT_MEM_START + 0x3000)
/* Loaded just above possibly live vectors */
- #define LOAD_MEMORY_ADDRESS 0x00003000
+ #define LOAD_MEMORY_ADDRESS (PLATFORM_DEFAULT_MEM_START + 0x3000)
#endif /* CONFIG_MMU */
diff --git a/arch/xtensa/include/uapi/asm/mman.h b/arch/xtensa/include/uapi/asm/mman.h
index 00eed6786d7..201aec0e044 100644
--- a/arch/xtensa/include/uapi/asm/mman.h
+++ b/arch/xtensa/include/uapi/asm/mman.h
@@ -55,6 +55,12 @@
#define MAP_NONBLOCK 0x20000 /* do not block on IO */
#define MAP_STACK 0x40000 /* give out an address that is best suited for process/thread stacks */
#define MAP_HUGETLB 0x80000 /* create a huge page mapping */
+#ifdef CONFIG_MMAP_ALLOW_UNINITIALIZED
+# define MAP_UNINITIALIZED 0x4000000 /* For anonymous mmap, memory could be
+ * uninitialized */
+#else
+# define MAP_UNINITIALIZED 0x0 /* Don't support this flag */
+#endif
/*
* Flags for msync
diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S
index aeeb3cc8a41..15a461e2a0e 100644
--- a/arch/xtensa/kernel/head.S
+++ b/arch/xtensa/kernel/head.S
@@ -112,6 +112,11 @@ ENTRY(_startup)
movi a0, 0
+#if XCHAL_HAVE_VECBASE
+ movi a2, VECBASE_RESET_VADDR
+ wsr a2, vecbase
+#endif
+
/* Clear debugging registers. */
#if XCHAL_HAVE_DEBUG
diff --git a/arch/xtensa/kernel/syscall.c b/arch/xtensa/kernel/syscall.c
index 5d3f7a119ed..83cf4968537 100644
--- a/arch/xtensa/kernel/syscall.c
+++ b/arch/xtensa/kernel/syscall.c
@@ -57,6 +57,7 @@ asmlinkage long xtensa_fadvise64_64(int fd, int advice,
return sys_fadvise64_64(fd, offset, len, advice);
}
+#ifdef CONFIG_MMU
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags)
{
@@ -93,3 +94,4 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
addr = COLOUR_ALIGN(addr, pgoff);
}
}
+#endif
diff --git a/arch/xtensa/mm/Makefile b/arch/xtensa/mm/Makefile
index f54f78e24d7..e601e2fbe8e 100644
--- a/arch/xtensa/mm/Makefile
+++ b/arch/xtensa/mm/Makefile
@@ -2,6 +2,6 @@
# Makefile for the Linux/Xtensa-specific parts of the memory manager.
#
-obj-y := init.o cache.o misc.o
-obj-$(CONFIG_MMU) += fault.o mmu.o tlb.o
+obj-y := init.o misc.o
+obj-$(CONFIG_MMU) += cache.o fault.o mmu.o tlb.o
obj-$(CONFIG_HIGHMEM) += highmem.o
diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c
index 77ed20209ca..9a9a5935bd3 100644
--- a/arch/xtensa/mm/init.c
+++ b/arch/xtensa/mm/init.c
@@ -239,6 +239,17 @@ void __init bootmem_init(void)
unsigned long bootmap_start, bootmap_size;
int i;
+ /* Reserve all memory below PLATFORM_DEFAULT_MEM_START, as memory
+ * accounting doesn't work for pages below that address.
+ *
+ * If PLATFORM_DEFAULT_MEM_START is zero reserve page at address 0:
+ * successfull allocations should never return NULL.
+ */
+ if (PLATFORM_DEFAULT_MEM_START)
+ mem_reserve(0, PLATFORM_DEFAULT_MEM_START, 0);
+ else
+ mem_reserve(0, 1, 0);
+
sysmem_dump();
max_low_pfn = max_pfn = 0;
min_low_pfn = ~0;
@@ -332,18 +343,24 @@ void __init mem_init(void)
" pkmap : 0x%08lx - 0x%08lx (%5lu kB)\n"
" fixmap : 0x%08lx - 0x%08lx (%5lu kB)\n"
#endif
+#ifdef CONFIG_MMU
" vmalloc : 0x%08x - 0x%08x (%5u MB)\n"
- " lowmem : 0x%08x - 0x%08lx (%5lu MB)\n",
+#endif
+ " lowmem : 0x%08lx - 0x%08lx (%5lu MB)\n",
#ifdef CONFIG_HIGHMEM
PKMAP_BASE, PKMAP_BASE + LAST_PKMAP * PAGE_SIZE,
(LAST_PKMAP*PAGE_SIZE) >> 10,
FIXADDR_START, FIXADDR_TOP,
(FIXADDR_TOP - FIXADDR_START) >> 10,
#endif
+#ifdef CONFIG_MMU
VMALLOC_START, VMALLOC_END,
(VMALLOC_END - VMALLOC_START) >> 20,
PAGE_OFFSET, PAGE_OFFSET +
(max_low_pfn - min_low_pfn) * PAGE_SIZE,
+#else
+ min_low_pfn * PAGE_SIZE, max_low_pfn * PAGE_SIZE,
+#endif
((max_low_pfn - min_low_pfn) * PAGE_SIZE) >> 20);
}
diff --git a/arch/xtensa/platforms/s6105/Makefile b/arch/xtensa/platforms/s6105/Makefile
deleted file mode 100644
index 0be6194bcb7..00000000000
--- a/arch/xtensa/platforms/s6105/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-# Makefile for the Stretch S6105 eval board
-
-obj-y := setup.o device.o
diff --git a/arch/xtensa/platforms/s6105/device.c b/arch/xtensa/platforms/s6105/device.c
deleted file mode 100644
index 4f4fc971042..00000000000
--- a/arch/xtensa/platforms/s6105/device.c
+++ /dev/null
@@ -1,161 +0,0 @@
-/*
- * s6105 platform devices
- *
- * Copyright (c) 2009 emlix GmbH
- */
-
-#include <linux/kernel.h>
-#include <linux/gpio.h>
-#include <linux/init.h>
-#include <linux/irq.h>
-#include <linux/phy.h>
-#include <linux/platform_device.h>
-#include <linux/serial.h>
-#include <linux/serial_8250.h>
-
-#include <variant/hardware.h>
-#include <variant/dmac.h>
-
-#include <platform/gpio.h>
-
-#define GPIO3_INTNUM 3
-#define UART_INTNUM 4
-#define GMAC_INTNUM 5
-
-static const signed char gpio3_irq_mappings[] = {
- S6_INTC_GPIO(3),
- -1
-};
-
-static const signed char uart_irq_mappings[] = {
- S6_INTC_UART(0),
- S6_INTC_UART(1),
- -1,
-};
-
-static const signed char gmac_irq_mappings[] = {
- S6_INTC_GMAC_STAT,
- S6_INTC_GMAC_ERR,
- S6_INTC_DMA_HOSTTERMCNT(0),
- S6_INTC_DMA_HOSTTERMCNT(1),
- -1
-};
-
-const signed char *platform_irq_mappings[NR_IRQS] = {
- [GPIO3_INTNUM] = gpio3_irq_mappings,
- [UART_INTNUM] = uart_irq_mappings,
- [GMAC_INTNUM] = gmac_irq_mappings,
-};
-
-static struct plat_serial8250_port serial_platform_data[] = {
- {
- .membase = (void *)S6_REG_UART + 0x0000,
- .mapbase = S6_REG_UART + 0x0000,
- .irq = UART_INTNUM,
- .uartclk = S6_SCLK,
- .regshift = 2,
- .iotype = SERIAL_IO_MEM,
- .flags = ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST,
- },
- {
- .membase = (void *)S6_REG_UART + 0x1000,
- .mapbase = S6_REG_UART + 0x1000,
- .irq = UART_INTNUM,
- .uartclk = S6_SCLK,
- .regshift = 2,
- .iotype = SERIAL_IO_MEM,
- .flags = ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST,
- },
- { },
-};
-
-static struct resource s6_gmac_resource[] = {
- {
- .name = "mem",
- .start = (resource_size_t)S6_REG_GMAC,
- .end = (resource_size_t)S6_REG_GMAC + 0x10000 - 1,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "dma",
- .start = (resource_size_t)
- DMA_CHNL(S6_REG_HIFDMA, S6_HIFDMA_GMACTX),
- .end = (resource_size_t)
- DMA_CHNL(S6_REG_HIFDMA, S6_HIFDMA_GMACTX) + 0x100 - 1,
- .flags = IORESOURCE_DMA,
- },
- {
- .name = "dma",
- .start = (resource_size_t)
- DMA_CHNL(S6_REG_HIFDMA, S6_HIFDMA_GMACRX),
- .end = (resource_size_t)
- DMA_CHNL(S6_REG_HIFDMA, S6_HIFDMA_GMACRX) + 0x100 - 1,
- .flags = IORESOURCE_DMA,
- },
- {
- .name = "io",
- .start = (resource_size_t)S6_MEM_GMAC,
- .end = (resource_size_t)S6_MEM_GMAC + 0x2000000 - 1,
- .flags = IORESOURCE_IO,
- },
- {
- .name = "irq",
- .start = (resource_size_t)GMAC_INTNUM,
- .flags = IORESOURCE_IRQ,
- },
- {
- .name = "irq",
- .start = (resource_size_t)PHY_POLL,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static int __init prepare_phy_irq(int pin)
-{
- int irq;
- if (gpio_request(pin, "s6gmac_phy") < 0)
- goto fail;
- if (gpio_direction_input(pin) < 0)
- goto free;
- irq = gpio_to_irq(pin);
- if (irq < 0)
- goto free;
- if (irq_set_irq_type(irq, IRQ_TYPE_LEVEL_LOW) < 0)
- goto free;
- return irq;
-free:
- gpio_free(pin);
-fail:
- return PHY_POLL;
-}
-
-static struct platform_device platform_devices[] = {
- {
- .name = "serial8250",
- .id = PLAT8250_DEV_PLATFORM,
- .dev = {
- .platform_data = serial_platform_data,
- },
- },
- {
- .name = "s6gmac",
- .id = 0,
- .resource = s6_gmac_resource,
- .num_resources = ARRAY_SIZE(s6_gmac_resource),
- },
- {
- I2C_BOARD_INFO("m41t62", S6I2C_ADDR_M41T62),
- },
-};
-
-static int __init device_init(void)
-{
- int i;
-
- s6_gmac_resource[5].start = prepare_phy_irq(GPIO_PHY_IRQ);
-
- for (i = 0; i < ARRAY_SIZE(platform_devices); i++)
- platform_device_register(&platform_devices[i]);
- return 0;
-}
-arch_initcall_sync(device_init);
diff --git a/arch/xtensa/platforms/s6105/include/platform/gpio.h b/arch/xtensa/platforms/s6105/include/platform/gpio.h
deleted file mode 100644
index fa11aa4b61e..00000000000
--- a/arch/xtensa/platforms/s6105/include/platform/gpio.h
+++ /dev/null
@@ -1,27 +0,0 @@
-#ifndef __ASM_XTENSA_S6105_GPIO_H
-#define __ASM_XTENSA_S6105_GPIO_H
-
-#define GPIO_BP_TEMP_ALARM 0
-#define GPIO_PB_RESET_IN 1
-#define GPIO_EXP_IRQ 2
-#define GPIO_TRIGGER_IRQ 3
-#define GPIO_RTC_IRQ 4
-#define GPIO_PHY_IRQ 5
-#define GPIO_IMAGER_RESET 6
-#define GPIO_SD_IRQ 7
-#define GPIO_MINI_BOOT_INH 8
-#define GPIO_BOARD_RESET 9
-#define GPIO_EXP_PRESENT 10
-#define GPIO_LED1_NGREEN 12
-#define GPIO_LED1_RED 13
-#define GPIO_LED0_NGREEN 14
-#define GPIO_LED0_NRED 15
-#define GPIO_SPI_CS0 16
-#define GPIO_SPI_CS1 17
-#define GPIO_SPI_CS3 19
-#define GPIO_SPI_CS4 20
-#define GPIO_SD_WP 21
-#define GPIO_BP_RESET 22
-#define GPIO_ALARM_OUT 23
-
-#endif /* __ASM_XTENSA_S6105_GPIO_H */
diff --git a/arch/xtensa/platforms/s6105/include/platform/hardware.h b/arch/xtensa/platforms/s6105/include/platform/hardware.h
deleted file mode 100644
index d628efac708..00000000000
--- a/arch/xtensa/platforms/s6105/include/platform/hardware.h
+++ /dev/null
@@ -1,11 +0,0 @@
-#ifndef __XTENSA_S6105_HARDWARE_H
-#define __XTENSA_S6105_HARDWARE_H
-
-#define PLATFORM_DEFAULT_MEM_START 0x40000000
-#define PLATFORM_DEFAULT_MEM_SIZE 0x08000000
-
-#define MAX_DMA_ADDRESS 0
-
-#define KERNELOFFSET (PLATFORM_DEFAULT_MEM_START + 0x1000)
-
-#endif /* __XTENSA_S6105_HARDWARE_H */
diff --git a/arch/xtensa/platforms/s6105/include/platform/serial.h b/arch/xtensa/platforms/s6105/include/platform/serial.h
deleted file mode 100644
index c8a771e5981..00000000000
--- a/arch/xtensa/platforms/s6105/include/platform/serial.h
+++ /dev/null
@@ -1,8 +0,0 @@
-#ifndef __ASM_XTENSA_S6105_SERIAL_H
-#define __ASM_XTENSA_S6105_SERIAL_H
-
-#include <variant/hardware.h>
-
-#define BASE_BAUD (S6_SCLK / 16)
-
-#endif /* __ASM_XTENSA_S6105_SERIAL_H */
diff --git a/arch/xtensa/platforms/s6105/setup.c b/arch/xtensa/platforms/s6105/setup.c
deleted file mode 100644
index 86ce730f791..00000000000
--- a/arch/xtensa/platforms/s6105/setup.c
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * s6105 control routines
- *
- * Copyright (c) 2009 emlix GmbH
- */
-#include <linux/irq.h>
-#include <linux/io.h>
-#include <linux/gpio.h>
-
-#include <asm/bootparam.h>
-
-#include <variant/hardware.h>
-#include <variant/gpio.h>
-
-#include <platform/gpio.h>
-
-void platform_halt(void)
-{
- local_irq_disable();
- while (1)
- ;
-}
-
-void platform_power_off(void)
-{
- platform_halt();
-}
-
-void platform_restart(void)
-{
- platform_halt();
-}
-
-void __init platform_setup(char **cmdline)
-{
- unsigned long reg;
-
- reg = readl(S6_REG_GREG1 + S6_GREG1_PLLSEL);
- reg &= ~(S6_GREG1_PLLSEL_GMAC_MASK << S6_GREG1_PLLSEL_GMAC |
- S6_GREG1_PLLSEL_GMII_MASK << S6_GREG1_PLLSEL_GMII);
- reg |= S6_GREG1_PLLSEL_GMAC_125MHZ << S6_GREG1_PLLSEL_GMAC |
- S6_GREG1_PLLSEL_GMII_125MHZ << S6_GREG1_PLLSEL_GMII;
- writel(reg, S6_REG_GREG1 + S6_GREG1_PLLSEL);
-
- reg = readl(S6_REG_GREG1 + S6_GREG1_CLKGATE);
- reg &= ~(1 << S6_GREG1_BLOCK_SB);
- reg &= ~(1 << S6_GREG1_BLOCK_GMAC);
- writel(reg, S6_REG_GREG1 + S6_GREG1_CLKGATE);
-
- reg = readl(S6_REG_GREG1 + S6_GREG1_BLOCKENA);
- reg |= 1 << S6_GREG1_BLOCK_SB;
- reg |= 1 << S6_GREG1_BLOCK_GMAC;
- writel(reg, S6_REG_GREG1 + S6_GREG1_BLOCKENA);
-
- printk(KERN_NOTICE "S6105 on Stretch S6000 - "
- "Copyright (C) 2009 emlix GmbH <info@emlix.com>\n");
-}
-
-void __init platform_init(bp_tag_t *first)
-{
- s6_gpio_init(0);
- gpio_request(GPIO_LED1_NGREEN, "led1_green");
- gpio_request(GPIO_LED1_RED, "led1_red");
- gpio_direction_output(GPIO_LED1_NGREEN, 1);
-}
-
-void platform_heartbeat(void)
-{
- static unsigned int c;
-
- if (!(++c & 0x4F))
- gpio_direction_output(GPIO_LED1_RED, !(c & 0x10));
-}
diff --git a/arch/xtensa/platforms/xtfpga/include/platform/hardware.h b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
index aeb316b7ff8..6edd20bb456 100644
--- a/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
+++ b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
@@ -17,8 +17,8 @@
/* Memory configuration. */
-#define PLATFORM_DEFAULT_MEM_START 0x00000000
-#define PLATFORM_DEFAULT_MEM_SIZE 0x04000000
+#define PLATFORM_DEFAULT_MEM_START CONFIG_DEFAULT_MEM_START
+#define PLATFORM_DEFAULT_MEM_SIZE CONFIG_DEFAULT_MEM_SIZE
/* Interrupt configuration. */
diff --git a/arch/xtensa/variants/s6000/Makefile b/arch/xtensa/variants/s6000/Makefile
deleted file mode 100644
index 3e7ef0a0c49..00000000000
--- a/arch/xtensa/variants/s6000/Makefile
+++ /dev/null
@@ -1,4 +0,0 @@
-# s6000 Makefile
-
-obj-y += irq.o gpio.o dmac.o
-obj-$(CONFIG_XTENSA_CALIBRATE_CCOUNT) += delay.o
diff --git a/arch/xtensa/variants/s6000/delay.c b/arch/xtensa/variants/s6000/delay.c
deleted file mode 100644
index 39154563ee1..00000000000
--- a/arch/xtensa/variants/s6000/delay.c
+++ /dev/null
@@ -1,25 +0,0 @@
-#include <asm/timex.h>
-#include <asm/io.h>
-#include <variant/hardware.h>
-
-#define LOOPS 10
-void platform_calibrate_ccount(void)
-{
- u32 uninitialized_var(a);
- u32 uninitialized_var(u);
- u32 b;
- u32 tstamp = S6_REG_GREG1 + S6_GREG1_GLOBAL_TIMER;
- int i = LOOPS+1;
- do {
- u32 t = u;
- asm volatile(
- "1: l32i %0, %2, 0 ;"
- " beq %0, %1, 1b ;"
- : "=&a"(u) : "a"(t), "a"(tstamp));
- b = get_ccount();
- if (i == LOOPS)
- a = b;
- } while (--i >= 0);
- b -= a;
- ccount_freq = b * (100000UL / LOOPS);
-}
diff --git a/arch/xtensa/variants/s6000/dmac.c b/arch/xtensa/variants/s6000/dmac.c
deleted file mode 100644
index 340f5bb0b5e..00000000000
--- a/arch/xtensa/variants/s6000/dmac.c
+++ /dev/null
@@ -1,173 +0,0 @@
-/*
- * Authors: Oskar Schirmer <oskar@scara.com>
- * Daniel Gloeckner <dg@emlix.com>
- * (c) 2008 emlix GmbH http://www.emlix.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include <linux/kernel.h>
-#include <linux/io.h>
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/spinlock.h>
-#include <asm/cacheflush.h>
-#include <variant/dmac.h>
-
-/* DMA engine lookup */
-
-struct s6dmac_ctrl s6dmac_ctrl[S6_DMAC_NB];
-
-
-/* DMA control, per engine */
-
-void s6dmac_put_fifo_cache(u32 dmac, int chan, u32 src, u32 dst, u32 size)
-{
- if (xtensa_need_flush_dma_source(src)) {
- u32 base = src;
- u32 span = size;
- u32 chunk = readl(DMA_CHNL(dmac, chan) + S6_DMA_CMONCHUNK);
- if (chunk && (size > chunk)) {
- s32 skip =
- readl(DMA_CHNL(dmac, chan) + S6_DMA_SRCSKIP);
- u32 gaps = (size+chunk-1)/chunk - 1;
- if (skip >= 0) {
- span += gaps * skip;
- } else if (-skip > chunk) {
- s32 decr = gaps * (chunk + skip);
- base += decr;
- span = chunk - decr;
- } else {
- span = max(span + gaps * skip,
- (chunk + skip) * gaps - skip);
- }
- }
- flush_dcache_unaligned(base, span);
- }
- if (xtensa_need_invalidate_dma_destination(dst)) {
- u32 base = dst;
- u32 span = size;
- u32 chunk = readl(DMA_CHNL(dmac, chan) + S6_DMA_CMONCHUNK);
- if (chunk && (size > chunk)) {
- s32 skip =
- readl(DMA_CHNL(dmac, chan) + S6_DMA_DSTSKIP);
- u32 gaps = (size+chunk-1)/chunk - 1;
- if (skip >= 0) {
- span += gaps * skip;
- } else if (-skip > chunk) {
- s32 decr = gaps * (chunk + skip);
- base += decr;
- span = chunk - decr;
- } else {
- span = max(span + gaps * skip,
- (chunk + skip) * gaps - skip);
- }
- }
- invalidate_dcache_unaligned(base, span);
- }
- s6dmac_put_fifo(dmac, chan, src, dst, size);
-}
-
-void s6dmac_disable_error_irqs(u32 dmac, u32 mask)
-{
- unsigned long flags;
- spinlock_t *spinl = &s6dmac_ctrl[_dmac_addr_index(dmac)].lock;
- spin_lock_irqsave(spinl, flags);
- _s6dmac_disable_error_irqs(dmac, mask);
- spin_unlock_irqrestore(spinl, flags);
-}
-
-u32 s6dmac_int_sources(u32 dmac, u32 channel)
-{
- u32 mask, ret, tmp;
- mask = 1 << channel;
-
- tmp = readl(dmac + S6_DMA_TERMCNTIRQSTAT);
- tmp &= mask;
- writel(tmp, dmac + S6_DMA_TERMCNTIRQCLR);
- ret = tmp >> channel;
-
- tmp = readl(dmac + S6_DMA_PENDCNTIRQSTAT);
- tmp &= mask;
- writel(tmp, dmac + S6_DMA_PENDCNTIRQCLR);
- ret |= (tmp >> channel) << 1;
-
- tmp = readl(dmac + S6_DMA_LOWWMRKIRQSTAT);
- tmp &= mask;
- writel(tmp, dmac + S6_DMA_LOWWMRKIRQCLR);
- ret |= (tmp >> channel) << 2;
-
- tmp = readl(dmac + S6_DMA_INTRAW0);
- tmp &= (mask << S6_DMA_INT0_OVER) | (mask << S6_DMA_INT0_UNDER);
- writel(tmp, dmac + S6_DMA_INTCLEAR0);
-
- if (tmp & (mask << S6_DMA_INT0_UNDER))
- ret |= 1 << 3;
- if (tmp & (mask << S6_DMA_INT0_OVER))
- ret |= 1 << 4;
-
- tmp = readl(dmac + S6_DMA_MASTERERRINFO);
- mask <<= S6_DMA_INT1_CHANNEL;
- if (((tmp >> S6_DMA_MASTERERR_CHAN(0)) & S6_DMA_MASTERERR_CHAN_MASK)
- == channel)
- mask |= 1 << S6_DMA_INT1_MASTER;
- if (((tmp >> S6_DMA_MASTERERR_CHAN(1)) & S6_DMA_MASTERERR_CHAN_MASK)
- == channel)
- mask |= 1 << (S6_DMA_INT1_MASTER + 1);
- if (((tmp >> S6_DMA_MASTERERR_CHAN(2)) & S6_DMA_MASTERERR_CHAN_MASK)
- == channel)
- mask |= 1 << (S6_DMA_INT1_MASTER + 2);
-
- tmp = readl(dmac + S6_DMA_INTRAW1) & mask;
- writel(tmp, dmac + S6_DMA_INTCLEAR1);
- ret |= ((tmp >> channel) & 1) << 5;
- ret |= ((tmp >> S6_DMA_INT1_MASTER) & S6_DMA_INT1_MASTER_MASK) << 6;
-
- return ret;
-}
-
-void s6dmac_release_chan(u32 dmac, int chan)
-{
- if (chan >= 0)
- s6dmac_disable_chan(dmac, chan);
-}
-
-
-/* global init */
-
-static inline void __init dmac_init(u32 dmac, u8 chan_nb)
-{
- s6dmac_ctrl[S6_DMAC_INDEX(dmac)].dmac = dmac;
- spin_lock_init(&s6dmac_ctrl[S6_DMAC_INDEX(dmac)].lock);
- s6dmac_ctrl[S6_DMAC_INDEX(dmac)].chan_nb = chan_nb;
- writel(S6_DMA_INT1_MASTER_MASK << S6_DMA_INT1_MASTER,
- dmac + S6_DMA_INTCLEAR1);
-}
-
-static inline void __init dmac_master(u32 dmac,
- u32 m0start, u32 m0end, u32 m1start, u32 m1end)
-{
- writel(m0start, dmac + S6_DMA_MASTER0START);
- writel(m0end - 1, dmac + S6_DMA_MASTER0END);
- writel(m1start, dmac + S6_DMA_MASTER1START);
- writel(m1end - 1, dmac + S6_DMA_MASTER1END);
-}
-
-static void __init s6_dmac_init(void)
-{
- dmac_init(S6_REG_LMSDMA, S6_LMSDMA_NB);
- dmac_master(S6_REG_LMSDMA,
- S6_MEM_DDR, S6_MEM_PCIE_APER, S6_MEM_EFI, S6_MEM_GMAC);
- dmac_init(S6_REG_NIDMA, S6_NIDMA_NB);
- dmac_init(S6_REG_DPDMA, S6_DPDMA_NB);
- dmac_master(S6_REG_DPDMA,
- S6_MEM_DDR, S6_MEM_PCIE_APER, S6_REG_DP, S6_REG_DPDMA);
- dmac_init(S6_REG_HIFDMA, S6_HIFDMA_NB);
- dmac_master(S6_REG_HIFDMA,
- S6_MEM_GMAC, S6_MEM_PCIE_CFG, S6_MEM_PCIE_APER, S6_MEM_AUX);
-}
-
-arch_initcall(s6_dmac_init);
diff --git a/arch/xtensa/variants/s6000/gpio.c b/arch/xtensa/variants/s6000/gpio.c
deleted file mode 100644
index da9e85c13b0..00000000000
--- a/arch/xtensa/variants/s6000/gpio.c
+++ /dev/null
@@ -1,230 +0,0 @@
-/*
- * s6000 gpio driver
- *
- * Copyright (c) 2009 emlix GmbH
- * Authors: Oskar Schirmer <oskar@scara.com>
- * Johannes Weiner <hannes@cmpxchg.org>
- * Daniel Gloeckner <dg@emlix.com>
- */
-#include <linux/bitops.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/irq.h>
-#include <linux/gpio.h>
-
-#include <variant/hardware.h>
-
-#define IRQ_BASE XTENSA_NR_IRQS
-
-#define S6_GPIO_DATA 0x000
-#define S6_GPIO_IS 0x404
-#define S6_GPIO_IBE 0x408
-#define S6_GPIO_IEV 0x40C
-#define S6_GPIO_IE 0x410
-#define S6_GPIO_RIS 0x414
-#define S6_GPIO_MIS 0x418
-#define S6_GPIO_IC 0x41C
-#define S6_GPIO_AFSEL 0x420
-#define S6_GPIO_DIR 0x800
-#define S6_GPIO_BANK(nr) ((nr) * 0x1000)
-#define S6_GPIO_MASK(nr) (4 << (nr))
-#define S6_GPIO_OFFSET(nr) \
- (S6_GPIO_BANK((nr) >> 3) + S6_GPIO_MASK((nr) & 7))
-
-static int direction_input(struct gpio_chip *chip, unsigned int off)
-{
- writeb(0, S6_REG_GPIO + S6_GPIO_DIR + S6_GPIO_OFFSET(off));
- return 0;
-}
-
-static int get(struct gpio_chip *chip, unsigned int off)
-{
- return readb(S6_REG_GPIO + S6_GPIO_DATA + S6_GPIO_OFFSET(off));
-}
-
-static int direction_output(struct gpio_chip *chip, unsigned int off, int val)
-{
- unsigned rel = S6_GPIO_OFFSET(off);
- writeb(~0, S6_REG_GPIO + S6_GPIO_DIR + rel);
- writeb(val ? ~0 : 0, S6_REG_GPIO + S6_GPIO_DATA + rel);
- return 0;
-}
-
-static void set(struct gpio_chip *chip, unsigned int off, int val)
-{
- writeb(val ? ~0 : 0, S6_REG_GPIO + S6_GPIO_DATA + S6_GPIO_OFFSET(off));
-}
-
-static int to_irq(struct gpio_chip *chip, unsigned offset)
-{
- if (offset < 8)
- return offset + IRQ_BASE;
- return -EINVAL;
-}
-
-static struct gpio_chip gpiochip = {
- .owner = THIS_MODULE,
- .direction_input = direction_input,
- .get = get,
- .direction_output = direction_output,
- .set = set,
- .to_irq = to_irq,
- .base = 0,
- .ngpio = 24,
- .can_sleep = 0, /* no blocking io needed */
- .exported = 0, /* no exporting to userspace */
-};
-
-int s6_gpio_init(u32 afsel)
-{
- writeb(afsel, S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_AFSEL);
- writeb(afsel >> 8, S6_REG_GPIO + S6_GPIO_BANK(1) + S6_GPIO_AFSEL);
- writeb(afsel >> 16, S6_REG_GPIO + S6_GPIO_BANK(2) + S6_GPIO_AFSEL);
- return gpiochip_add(&gpiochip);
-}
-
-static void ack(struct irq_data *d)
-{
- writeb(1 << (d->irq - IRQ_BASE), S6_REG_GPIO + S6_GPIO_IC);
-}
-
-static void mask(struct irq_data *d)
-{
- u8 r = readb(S6_REG_GPIO + S6_GPIO_IE);
- r &= ~(1 << (d->irq - IRQ_BASE));
- writeb(r, S6_REG_GPIO + S6_GPIO_IE);
-}
-
-static void unmask(struct irq_data *d)
-{
- u8 m = readb(S6_REG_GPIO + S6_GPIO_IE);
- m |= 1 << (d->irq - IRQ_BASE);
- writeb(m, S6_REG_GPIO + S6_GPIO_IE);
-}
-
-static int set_type(struct irq_data *d, unsigned int type)
-{
- const u8 m = 1 << (d->irq - IRQ_BASE);
- irq_flow_handler_t handler;
- u8 reg;
-
- if (type == IRQ_TYPE_PROBE) {
- if ((readb(S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_AFSEL) & m)
- || (readb(S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_IE) & m)
- || readb(S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_DIR
- + S6_GPIO_MASK(irq - IRQ_BASE)))
- return 0;
- type = IRQ_TYPE_EDGE_BOTH;
- }
-
- reg = readb(S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_IS);
- if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) {
- reg |= m;
- handler = handle_level_irq;
- } else {
- reg &= ~m;
- handler = handle_edge_irq;
- }
- writeb(reg, S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_IS);
- __irq_set_handler_locked(irq, handler);
-
- reg = readb(S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_IEV);
- if (type & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_EDGE_RISING))
- reg |= m;
- else
- reg &= ~m;
- writeb(reg, S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_IEV);
-
- reg = readb(S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_IBE);
- if ((type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH)
- reg |= m;
- else
- reg &= ~m;
- writeb(reg, S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_IBE);
- return 0;
-}
-
-static struct irq_chip gpioirqs = {
- .name = "GPIO",
- .irq_ack = ack,
- .irq_mask = mask,
- .irq_unmask = unmask,
- .irq_set_type = set_type,
-};
-
-static u8 demux_masks[4];
-
-static void demux_irqs(unsigned int irq, struct irq_desc *desc)
-{
- struct irq_chip *chip = irq_desc_get_chip(desc);
- u8 *mask = irq_desc_get_handler_data(desc);
- u8 pending;
- int cirq;
-
- chip->irq_mask(&desc->irq_data);
- chip->irq_ack(&desc->irq_data);
- pending = readb(S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_MIS) & *mask;
- cirq = IRQ_BASE - 1;
- while (pending) {
- int n = ffs(pending);
- cirq += n;
- pending >>= n;
- generic_handle_irq(cirq);
- }
- chip->irq_unmask(&desc->irq_data);
-}
-
-extern const signed char *platform_irq_mappings[XTENSA_NR_IRQS];
-
-void __init variant_init_irq(void)
-{
- int irq, n;
- writeb(0, S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_IE);
- for (irq = n = 0; irq < XTENSA_NR_IRQS; irq++) {
- const signed char *mapping = platform_irq_mappings[irq];
- int alone = 1;
- u8 mask;
- if (!mapping)
- continue;
- for(mask = 0; *mapping != -1; mapping++)
- switch (*mapping) {
- case S6_INTC_GPIO(0):
- mask |= 1 << 0;
- break;
- case S6_INTC_GPIO(1):
- mask |= 1 << 1;
- break;
- case S6_INTC_GPIO(2):
- mask |= 1 << 2;
- break;
- case S6_INTC_GPIO(3):
- mask |= 0x1f << 3;
- break;
- default:
- alone = 0;
- }
- if (mask) {
- int cirq, i;
- if (!alone) {
- printk(KERN_ERR "chained irq chips can't share"
- " parent irq %i\n", irq);
- continue;
- }
- demux_masks[n] = mask;
- cirq = IRQ_BASE - 1;
- do {
- i = ffs(mask);
- cirq += i;
- mask >>= i;
- irq_set_chip(cirq, &gpioirqs);
- irq_set_irq_type(irq, IRQ_TYPE_LEVEL_LOW);
- } while (mask);
- irq_set_handler_data(irq, demux_masks + n);
- irq_set_chained_handler(irq, demux_irqs);
- if (++n == ARRAY_SIZE(demux_masks))
- break;
- }
- }
-}
diff --git a/arch/xtensa/variants/s6000/include/variant/core.h b/arch/xtensa/variants/s6000/include/variant/core.h
deleted file mode 100644
index af007953027..00000000000
--- a/arch/xtensa/variants/s6000/include/variant/core.h
+++ /dev/null
@@ -1,431 +0,0 @@
-/*
- * Xtensa processor core configuration information.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (c) 1999-2008 Tensilica Inc.
- */
-
-#ifndef _XTENSA_CORE_CONFIGURATION_H
-#define _XTENSA_CORE_CONFIGURATION_H
-
-
-/****************************************************************************
- Parameters Useful for Any Code, USER or PRIVILEGED
- ****************************************************************************/
-
-/*
- * Note: Macros of the form XCHAL_HAVE_*** have a value of 1 if the option is
- * configured, and a value of 0 otherwise. These macros are always defined.
- */
-
-
-/*----------------------------------------------------------------------
- ISA
- ----------------------------------------------------------------------*/
-
-#define XCHAL_HAVE_BE 0 /* big-endian byte ordering */
-#define XCHAL_HAVE_WINDOWED 1 /* windowed registers option */
-#define XCHAL_NUM_AREGS 64 /* num of physical addr regs */
-#define XCHAL_NUM_AREGS_LOG2 6 /* log2(XCHAL_NUM_AREGS) */
-#define XCHAL_MAX_INSTRUCTION_SIZE 8 /* max instr bytes (3..8) */
-#define XCHAL_HAVE_DEBUG 1 /* debug option */
-#define XCHAL_HAVE_DENSITY 1 /* 16-bit instructions */
-#define XCHAL_HAVE_LOOPS 1 /* zero-overhead loops */
-#define XCHAL_HAVE_NSA 1 /* NSA/NSAU instructions */
-#define XCHAL_HAVE_MINMAX 1 /* MIN/MAX instructions */
-#define XCHAL_HAVE_SEXT 1 /* SEXT instruction */
-#define XCHAL_HAVE_CLAMPS 1 /* CLAMPS instruction */
-#define XCHAL_HAVE_MUL16 1 /* MUL16S/MUL16U instructions */
-#define XCHAL_HAVE_MUL32 1 /* MULL instruction */
-#define XCHAL_HAVE_MUL32_HIGH 1 /* MULUH/MULSH instructions */
-#define XCHAL_HAVE_DIV32 0 /* QUOS/QUOU/REMS/REMU instructions */
-#define XCHAL_HAVE_L32R 1 /* L32R instruction */
-#define XCHAL_HAVE_ABSOLUTE_LITERALS 1 /* non-PC-rel (extended) L32R */
-#define XCHAL_HAVE_CONST16 0 /* CONST16 instruction */
-#define XCHAL_HAVE_ADDX 1 /* ADDX#/SUBX# instructions */
-#define XCHAL_HAVE_WIDE_BRANCHES 0 /* B*.W18 or B*.W15 instr's */
-#define XCHAL_HAVE_PREDICTED_BRANCHES 0 /* B[EQ/EQZ/NE/NEZ]T instr's */
-#define XCHAL_HAVE_CALL4AND12 1 /* (obsolete option) */
-#define XCHAL_HAVE_ABS 1 /* ABS instruction */
-/*#define XCHAL_HAVE_POPC 0*/ /* POPC instruction */
-/*#define XCHAL_HAVE_CRC 0*/ /* CRC instruction */
-#define XCHAL_HAVE_RELEASE_SYNC 0 /* L32AI/S32RI instructions */
-#define XCHAL_HAVE_S32C1I 0 /* S32C1I instruction */
-#define XCHAL_HAVE_SPECULATION 0 /* speculation */
-#define XCHAL_HAVE_FULL_RESET 0 /* all regs/state reset */
-#define XCHAL_NUM_CONTEXTS 1 /* */
-#define XCHAL_NUM_MISC_REGS 4 /* num of scratch regs (0..4) */
-#define XCHAL_HAVE_TAP_MASTER 0 /* JTAG TAP control instr's */
-#define XCHAL_HAVE_PRID 0 /* processor ID register */
-#define XCHAL_HAVE_THREADPTR 0 /* THREADPTR register */
-#define XCHAL_HAVE_BOOLEANS 1 /* boolean registers */
-#define XCHAL_HAVE_CP 1 /* CPENABLE reg (coprocessor) */
-#define XCHAL_CP_MAXCFG 8 /* max allowed cp id plus one */
-#define XCHAL_HAVE_MAC16 0 /* MAC16 package */
-#define XCHAL_HAVE_VECTORFPU2005 0 /* vector floating-point pkg */
-#define XCHAL_HAVE_FP 1 /* floating point pkg */
-#define XCHAL_HAVE_VECTRA1 0 /* Vectra I pkg */
-#define XCHAL_HAVE_VECTRALX 0 /* Vectra LX pkg */
-#define XCHAL_HAVE_HIFI2 0 /* HiFi2 Audio Engine pkg */
-
-
-/*----------------------------------------------------------------------
- MISC
- ----------------------------------------------------------------------*/
-
-#define XCHAL_NUM_WRITEBUFFER_ENTRIES 8 /* size of write buffer */
-#define XCHAL_INST_FETCH_WIDTH 8 /* instr-fetch width in bytes */
-#define XCHAL_DATA_WIDTH 16 /* data width in bytes */
-/* In T1050, applies to selected core load and store instructions (see ISA): */
-#define XCHAL_UNALIGNED_LOAD_EXCEPTION 1 /* unaligned loads cause exc. */
-#define XCHAL_UNALIGNED_STORE_EXCEPTION 1 /* unaligned stores cause exc.*/
-
-#define XCHAL_SW_VERSION 701001 /* sw version of this header */
-
-#define XCHAL_CORE_ID "stretch_bali" /* alphanum core name
- (CoreID) set in the Xtensa
- Processor Generator */
-
-#define XCHAL_BUILD_UNIQUE_ID 0x000104B9 /* 22-bit sw build ID */
-
-/*
- * These definitions describe the hardware targeted by this software.
- */
-#define XCHAL_HW_CONFIGID0 0xC2F3F9FE /* ConfigID hi 32 bits*/
-#define XCHAL_HW_CONFIGID1 0x054104B9 /* ConfigID lo 32 bits*/
-#define XCHAL_HW_VERSION_NAME "LX1.0.2" /* full version name */
-#define XCHAL_HW_VERSION_MAJOR 2100 /* major ver# of targeted hw */
-#define XCHAL_HW_VERSION_MINOR 2 /* minor ver# of targeted hw */
-#define XCHAL_HW_VERSION 210002 /* major*100+minor */
-#define XCHAL_HW_REL_LX1 1
-#define XCHAL_HW_REL_LX1_0 1
-#define XCHAL_HW_REL_LX1_0_2 1
-#define XCHAL_HW_CONFIGID_RELIABLE 1
-/* If software targets a *range* of hardware versions, these are the bounds: */
-#define XCHAL_HW_MIN_VERSION_MAJOR 2100 /* major v of earliest tgt hw */
-#define XCHAL_HW_MIN_VERSION_MINOR 2 /* minor v of earliest tgt hw */
-#define XCHAL_HW_MIN_VERSION 210002 /* earliest targeted hw */
-#define XCHAL_HW_MAX_VERSION_MAJOR 2100 /* major v of latest tgt hw */
-#define XCHAL_HW_MAX_VERSION_MINOR 2 /* minor v of latest tgt hw */
-#define XCHAL_HW_MAX_VERSION 210002 /* latest targeted hw */
-
-
-/*----------------------------------------------------------------------
- CACHE
- ----------------------------------------------------------------------*/
-
-#define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
-#define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
-#define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
-
-#define XCHAL_ICACHE_SIZE 32768 /* I-cache size in bytes or 0 */
-#define XCHAL_DCACHE_SIZE 32768 /* D-cache size in bytes or 0 */
-
-#define XCHAL_DCACHE_IS_WRITEBACK 1 /* writeback feature */
-
-
-
-
-/****************************************************************************
- Parameters Useful for PRIVILEGED (Supervisory or Non-Virtualized) Code
- ****************************************************************************/
-
-
-#ifndef XTENSA_HAL_NON_PRIVILEGED_ONLY
-
-/*----------------------------------------------------------------------
- CACHE
- ----------------------------------------------------------------------*/
-
-#define XCHAL_HAVE_PIF 1 /* any outbound PIF present */
-
-/* If present, cache size in bytes == (ways * 2^(linewidth + setwidth)). */
-
-/* Number of cache sets in log2(lines per way): */
-#define XCHAL_ICACHE_SETWIDTH 9
-#define XCHAL_DCACHE_SETWIDTH 10
-
-/* Cache set associativity (number of ways): */
-#define XCHAL_ICACHE_WAYS 4
-#define XCHAL_DCACHE_WAYS 2
-
-/* Cache features: */
-#define XCHAL_ICACHE_LINE_LOCKABLE 1
-#define XCHAL_DCACHE_LINE_LOCKABLE 0
-#define XCHAL_ICACHE_ECC_PARITY 0
-#define XCHAL_DCACHE_ECC_PARITY 0
-
-/* Number of encoded cache attr bits (see <xtensa/hal.h> for decoded bits): */
-#define XCHAL_CA_BITS 4
-
-
-/*----------------------------------------------------------------------
- INTERNAL I/D RAM/ROMs and XLMI
- ----------------------------------------------------------------------*/
-
-#define XCHAL_NUM_INSTROM 0 /* number of core instr. ROMs */
-#define XCHAL_NUM_INSTRAM 0 /* number of core instr. RAMs */
-#define XCHAL_NUM_DATAROM 0 /* number of core data ROMs */
-#define XCHAL_NUM_DATARAM 1 /* number of core data RAMs */
-#define XCHAL_NUM_URAM 0 /* number of core unified RAMs*/
-#define XCHAL_NUM_XLMI 1 /* number of core XLMI ports */
-
-/* Data RAM 0: */
-#define XCHAL_DATARAM0_VADDR 0x3FFF0000
-#define XCHAL_DATARAM0_PADDR 0x3FFF0000
-#define XCHAL_DATARAM0_SIZE 65536
-#define XCHAL_DATARAM0_ECC_PARITY 0
-
-/* XLMI Port 0: */
-#define XCHAL_XLMI0_VADDR 0x37F80000
-#define XCHAL_XLMI0_PADDR 0x37F80000
-#define XCHAL_XLMI0_SIZE 262144
-#define XCHAL_XLMI0_ECC_PARITY 0
-
-
-/*----------------------------------------------------------------------
- INTERRUPTS and TIMERS
- ----------------------------------------------------------------------*/
-
-#define XCHAL_HAVE_INTERRUPTS 1 /* interrupt option */
-#define XCHAL_HAVE_HIGHPRI_INTERRUPTS 1 /* med/high-pri. interrupts */
-#define XCHAL_HAVE_NMI 1 /* non-maskable interrupt */
-#define XCHAL_HAVE_CCOUNT 1 /* CCOUNT reg. (timer option) */
-#define XCHAL_NUM_TIMERS 3 /* number of CCOMPAREn regs */
-#define XCHAL_NUM_INTERRUPTS 27 /* number of interrupts */
-#define XCHAL_NUM_INTERRUPTS_LOG2 5 /* ceil(log2(NUM_INTERRUPTS)) */
-#define XCHAL_NUM_EXTINTERRUPTS 20 /* num of external interrupts */
-#define XCHAL_NUM_INTLEVELS 4 /* number of interrupt levels
- (not including level zero) */
-#define XCHAL_EXCM_LEVEL 1 /* level masked by PS.EXCM */
- /* (always 1 in XEA1; levels 2 .. EXCM_LEVEL are "medium priority") */
-
-/* Masks of interrupts at each interrupt level: */
-#define XCHAL_INTLEVEL1_MASK 0x01F07FFF
-#define XCHAL_INTLEVEL2_MASK 0x02018000
-#define XCHAL_INTLEVEL3_MASK 0x04060000
-#define XCHAL_INTLEVEL4_MASK 0x00000000
-#define XCHAL_INTLEVEL5_MASK 0x00080000
-#define XCHAL_INTLEVEL6_MASK 0x00000000
-#define XCHAL_INTLEVEL7_MASK 0x00000000
-
-/* Masks of interrupts at each range 1..n of interrupt levels: */
-#define XCHAL_INTLEVEL1_ANDBELOW_MASK 0x01F07FFF
-#define XCHAL_INTLEVEL2_ANDBELOW_MASK 0x03F1FFFF
-#define XCHAL_INTLEVEL3_ANDBELOW_MASK 0x07F7FFFF
-#define XCHAL_INTLEVEL4_ANDBELOW_MASK 0x07F7FFFF
-#define XCHAL_INTLEVEL5_ANDBELOW_MASK 0x07FFFFFF
-#define XCHAL_INTLEVEL6_ANDBELOW_MASK 0x07FFFFFF
-#define XCHAL_INTLEVEL7_ANDBELOW_MASK 0x07FFFFFF
-
-/* Level of each interrupt: */
-#define XCHAL_INT0_LEVEL 1
-#define XCHAL_INT1_LEVEL 1
-#define XCHAL_INT2_LEVEL 1
-#define XCHAL_INT3_LEVEL 1
-#define XCHAL_INT4_LEVEL 1
-#define XCHAL_INT5_LEVEL 1
-#define XCHAL_INT6_LEVEL 1
-#define XCHAL_INT7_LEVEL 1
-#define XCHAL_INT8_LEVEL 1
-#define XCHAL_INT9_LEVEL 1
-#define XCHAL_INT10_LEVEL 1
-#define XCHAL_INT11_LEVEL 1
-#define XCHAL_INT12_LEVEL 1
-#define XCHAL_INT13_LEVEL 1
-#define XCHAL_INT14_LEVEL 1
-#define XCHAL_INT15_LEVEL 2
-#define XCHAL_INT16_LEVEL 2
-#define XCHAL_INT17_LEVEL 3
-#define XCHAL_INT18_LEVEL 3
-#define XCHAL_INT19_LEVEL 5
-#define XCHAL_INT20_LEVEL 1
-#define XCHAL_INT21_LEVEL 1
-#define XCHAL_INT22_LEVEL 1
-#define XCHAL_INT23_LEVEL 1
-#define XCHAL_INT24_LEVEL 1
-#define XCHAL_INT25_LEVEL 2
-#define XCHAL_INT26_LEVEL 3
-#define XCHAL_DEBUGLEVEL 4 /* debug interrupt level */
-#define XCHAL_HAVE_DEBUG_EXTERN_INT 1 /* OCD external db interrupt */
-#define XCHAL_NMILEVEL 5 /* NMI "level" (for use with
- EXCSAVE/EPS/EPC_n, RFI n) */
-
-/* Type of each interrupt: */
-#define XCHAL_INT0_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
-#define XCHAL_INT1_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
-#define XCHAL_INT2_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
-#define XCHAL_INT3_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
-#define XCHAL_INT4_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
-#define XCHAL_INT5_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
-#define XCHAL_INT6_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
-#define XCHAL_INT7_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
-#define XCHAL_INT8_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
-#define XCHAL_INT9_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
-#define XCHAL_INT10_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
-#define XCHAL_INT11_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
-#define XCHAL_INT12_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
-#define XCHAL_INT13_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
-#define XCHAL_INT14_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
-#define XCHAL_INT15_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
-#define XCHAL_INT16_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
-#define XCHAL_INT17_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
-#define XCHAL_INT18_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
-#define XCHAL_INT19_TYPE XTHAL_INTTYPE_NMI
-#define XCHAL_INT20_TYPE XTHAL_INTTYPE_SOFTWARE
-#define XCHAL_INT21_TYPE XTHAL_INTTYPE_SOFTWARE
-#define XCHAL_INT22_TYPE XTHAL_INTTYPE_SOFTWARE
-#define XCHAL_INT23_TYPE XTHAL_INTTYPE_SOFTWARE
-#define XCHAL_INT24_TYPE XTHAL_INTTYPE_TIMER
-#define XCHAL_INT25_TYPE XTHAL_INTTYPE_TIMER
-#define XCHAL_INT26_TYPE XTHAL_INTTYPE_TIMER
-
-/* Masks of interrupts for each type of interrupt: */
-#define XCHAL_INTTYPE_MASK_UNCONFIGURED 0xF8000000
-#define XCHAL_INTTYPE_MASK_SOFTWARE 0x00F00000
-#define XCHAL_INTTYPE_MASK_EXTERN_EDGE 0x00000000
-#define XCHAL_INTTYPE_MASK_EXTERN_LEVEL 0x0007FFFF
-#define XCHAL_INTTYPE_MASK_TIMER 0x07000000
-#define XCHAL_INTTYPE_MASK_NMI 0x00080000
-#define XCHAL_INTTYPE_MASK_WRITE_ERROR 0x00000000
-
-/* Interrupt numbers assigned to specific interrupt sources: */
-#define XCHAL_TIMER0_INTERRUPT 24 /* CCOMPARE0 */
-#define XCHAL_TIMER1_INTERRUPT 25 /* CCOMPARE1 */
-#define XCHAL_TIMER2_INTERRUPT 26 /* CCOMPARE2 */
-#define XCHAL_TIMER3_INTERRUPT XTHAL_TIMER_UNCONFIGURED
-#define XCHAL_NMI_INTERRUPT 19 /* non-maskable interrupt */
-
-/* Interrupt numbers for levels at which only one interrupt is configured: */
-#define XCHAL_INTLEVEL5_NUM 19
-/* (There are many interrupts each at level(s) 1, 2, 3.) */
-
-
-/*
- * External interrupt vectors/levels.
- * These macros describe how Xtensa processor interrupt numbers
- * (as numbered internally, eg. in INTERRUPT and INTENABLE registers)
- * map to external BInterrupt<n> pins, for those interrupts
- * configured as external (level-triggered, edge-triggered, or NMI).
- * See the Xtensa processor databook for more details.
- */
-
-/* Core interrupt numbers mapped to each EXTERNAL interrupt number: */
-#define XCHAL_EXTINT0_NUM 0 /* (intlevel 1) */
-#define XCHAL_EXTINT1_NUM 1 /* (intlevel 1) */
-#define XCHAL_EXTINT2_NUM 2 /* (intlevel 1) */
-#define XCHAL_EXTINT3_NUM 3 /* (intlevel 1) */
-#define XCHAL_EXTINT4_NUM 4 /* (intlevel 1) */
-#define XCHAL_EXTINT5_NUM 5 /* (intlevel 1) */
-#define XCHAL_EXTINT6_NUM 6 /* (intlevel 1) */
-#define XCHAL_EXTINT7_NUM 7 /* (intlevel 1) */
-#define XCHAL_EXTINT8_NUM 8 /* (intlevel 1) */
-#define XCHAL_EXTINT9_NUM 9 /* (intlevel 1) */
-#define XCHAL_EXTINT10_NUM 10 /* (intlevel 1) */
-#define XCHAL_EXTINT11_NUM 11 /* (intlevel 1) */
-#define XCHAL_EXTINT12_NUM 12 /* (intlevel 1) */
-#define XCHAL_EXTINT13_NUM 13 /* (intlevel 1) */
-#define XCHAL_EXTINT14_NUM 14 /* (intlevel 1) */
-#define XCHAL_EXTINT15_NUM 15 /* (intlevel 2) */
-#define XCHAL_EXTINT16_NUM 16 /* (intlevel 2) */
-#define XCHAL_EXTINT17_NUM 17 /* (intlevel 3) */
-#define XCHAL_EXTINT18_NUM 18 /* (intlevel 3) */
-#define XCHAL_EXTINT19_NUM 19 /* (intlevel 5) */
-
-
-/*----------------------------------------------------------------------
- EXCEPTIONS and VECTORS
- ----------------------------------------------------------------------*/
-
-#define XCHAL_XEA_VERSION 2 /* Xtensa Exception Architecture
- number: 1 == XEA1 (old)
- 2 == XEA2 (new)
- 0 == XEAX (extern) */
-#define XCHAL_HAVE_XEA1 0 /* Exception Architecture 1 */
-#define XCHAL_HAVE_XEA2 1 /* Exception Architecture 2 */
-#define XCHAL_HAVE_XEAX 0 /* External Exception Arch. */
-#define XCHAL_HAVE_EXCEPTIONS 1 /* exception option */
-#define XCHAL_HAVE_MEM_ECC_PARITY 0 /* local memory ECC/parity */
-#define XCHAL_HAVE_VECTOR_SELECT 0 /* relocatable vectors */
-#define XCHAL_HAVE_VECBASE 0 /* relocatable vectors */
-
-#define XCHAL_RESET_VECOFS 0x00000000
-#define XCHAL_RESET_VECTOR_VADDR 0x3FFE03D0
-#define XCHAL_RESET_VECTOR_PADDR 0x3FFE03D0
-#define XCHAL_USER_VECOFS 0x00000000
-#define XCHAL_USER_VECTOR_VADDR 0x40000220
-#define XCHAL_USER_VECTOR_PADDR 0x40000220
-#define XCHAL_KERNEL_VECOFS 0x00000000
-#define XCHAL_KERNEL_VECTOR_VADDR 0x40000200
-#define XCHAL_KERNEL_VECTOR_PADDR 0x40000200
-#define XCHAL_DOUBLEEXC_VECOFS 0x00000000
-#define XCHAL_DOUBLEEXC_VECTOR_VADDR 0x400002A0
-#define XCHAL_DOUBLEEXC_VECTOR_PADDR 0x400002A0
-#define XCHAL_WINDOW_OF4_VECOFS 0x00000000
-#define XCHAL_WINDOW_UF4_VECOFS 0x00000040
-#define XCHAL_WINDOW_OF8_VECOFS 0x00000080
-#define XCHAL_WINDOW_UF8_VECOFS 0x000000C0
-#define XCHAL_WINDOW_OF12_VECOFS 0x00000100
-#define XCHAL_WINDOW_UF12_VECOFS 0x00000140
-#define XCHAL_WINDOW_VECTORS_VADDR 0x40000000
-#define XCHAL_WINDOW_VECTORS_PADDR 0x40000000
-#define XCHAL_INTLEVEL2_VECOFS 0x00000000
-#define XCHAL_INTLEVEL2_VECTOR_VADDR 0x40000240
-#define XCHAL_INTLEVEL2_VECTOR_PADDR 0x40000240
-#define XCHAL_INTLEVEL3_VECOFS 0x00000000
-#define XCHAL_INTLEVEL3_VECTOR_VADDR 0x40000260
-#define XCHAL_INTLEVEL3_VECTOR_PADDR 0x40000260
-#define XCHAL_INTLEVEL4_VECOFS 0x00000000
-#define XCHAL_INTLEVEL4_VECTOR_VADDR 0x40000390
-#define XCHAL_INTLEVEL4_VECTOR_PADDR 0x40000390
-#define XCHAL_DEBUG_VECOFS XCHAL_INTLEVEL4_VECOFS
-#define XCHAL_DEBUG_VECTOR_VADDR XCHAL_INTLEVEL4_VECTOR_VADDR
-#define XCHAL_DEBUG_VECTOR_PADDR XCHAL_INTLEVEL4_VECTOR_PADDR
-#define XCHAL_NMI_VECOFS 0x00000000
-#define XCHAL_NMI_VECTOR_VADDR 0x400003B0
-#define XCHAL_NMI_VECTOR_PADDR 0x400003B0
-#define XCHAL_INTLEVEL5_VECOFS XCHAL_NMI_VECOFS
-#define XCHAL_INTLEVEL5_VECTOR_VADDR XCHAL_NMI_VECTOR_VADDR
-#define XCHAL_INTLEVEL5_VECTOR_PADDR XCHAL_NMI_VECTOR_PADDR
-
-
-/*----------------------------------------------------------------------
- DEBUG
- ----------------------------------------------------------------------*/
-
-#define XCHAL_HAVE_OCD 1 /* OnChipDebug option */
-#define XCHAL_NUM_IBREAK 2 /* number of IBREAKn regs */
-#define XCHAL_NUM_DBREAK 2 /* number of DBREAKn regs */
-#define XCHAL_HAVE_OCD_DIR_ARRAY 1 /* faster OCD option */
-
-
-/*----------------------------------------------------------------------
- MMU
- ----------------------------------------------------------------------*/
-
-/* See core-matmap.h header file for more details. */
-
-#define XCHAL_HAVE_TLBS 1 /* inverse of HAVE_CACHEATTR */
-#define XCHAL_HAVE_SPANNING_WAY 1 /* one way maps I+D 4GB vaddr */
-#define XCHAL_HAVE_IDENTITY_MAP 1 /* vaddr == paddr always */
-#define XCHAL_HAVE_CACHEATTR 0 /* CACHEATTR register present */
-#define XCHAL_HAVE_MIMIC_CACHEATTR 1 /* region protection */
-#define XCHAL_HAVE_XLT_CACHEATTR 0 /* region prot. w/translation */
-#define XCHAL_HAVE_PTP_MMU 0 /* full MMU (with page table
- [autorefill] and protection)
- usable for an MMU-based OS */
-/* If none of the above last 4 are set, it's a custom TLB configuration. */
-
-#define XCHAL_MMU_ASID_BITS 0 /* number of bits in ASIDs */
-#define XCHAL_MMU_RINGS 1 /* number of rings (1..4) */
-#define XCHAL_MMU_RING_BITS 0 /* num of bits in RING field */
-
-#endif /* !XTENSA_HAL_NON_PRIVILEGED_ONLY */
-
-
-#endif /* _XTENSA_CORE_CONFIGURATION_H */
-
diff --git a/arch/xtensa/variants/s6000/include/variant/dmac.h b/arch/xtensa/variants/s6000/include/variant/dmac.h
deleted file mode 100644
index 3f88d9fc689..00000000000
--- a/arch/xtensa/variants/s6000/include/variant/dmac.h
+++ /dev/null
@@ -1,387 +0,0 @@
-/*
- * include/asm-xtensa/variant-s6000/dmac.h
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2006 Tensilica Inc.
- * Copyright (C) 2008 Emlix GmbH <info@emlix.com>
- * Authors: Fabian Godehardt <fg@emlix.com>
- * Oskar Schirmer <oskar@scara.com>
- * Daniel Gloeckner <dg@emlix.com>
- */
-
-#ifndef __ASM_XTENSA_S6000_DMAC_H
-#define __ASM_XTENSA_S6000_DMAC_H
-#include <linux/io.h>
-#include <variant/hardware.h>
-
-/* DMA global */
-
-#define S6_DMA_INTSTAT0 0x000
-#define S6_DMA_INTSTAT1 0x004
-#define S6_DMA_INTENABLE0 0x008
-#define S6_DMA_INTENABLE1 0x00C
-#define S6_DMA_INTRAW0 0x010
-#define S6_DMA_INTRAW1 0x014
-#define S6_DMA_INTCLEAR0 0x018
-#define S6_DMA_INTCLEAR1 0x01C
-#define S6_DMA_INTSET0 0x020
-#define S6_DMA_INTSET1 0x024
-#define S6_DMA_INT0_UNDER 0
-#define S6_DMA_INT0_OVER 16
-#define S6_DMA_INT1_CHANNEL 0
-#define S6_DMA_INT1_MASTER 16
-#define S6_DMA_INT1_MASTER_MASK 7
-#define S6_DMA_TERMCNTIRQSTAT 0x028
-#define S6_DMA_TERMCNTIRQCLR 0x02C
-#define S6_DMA_TERMCNTIRQSET 0x030
-#define S6_DMA_PENDCNTIRQSTAT 0x034
-#define S6_DMA_PENDCNTIRQCLR 0x038
-#define S6_DMA_PENDCNTIRQSET 0x03C
-#define S6_DMA_LOWWMRKIRQSTAT 0x040
-#define S6_DMA_LOWWMRKIRQCLR 0x044
-#define S6_DMA_LOWWMRKIRQSET 0x048
-#define S6_DMA_MASTERERRINFO 0x04C
-#define S6_DMA_MASTERERR_CHAN(n) (4*(n))
-#define S6_DMA_MASTERERR_CHAN_MASK 0xF
-#define S6_DMA_DESCRFIFO0 0x050
-#define S6_DMA_DESCRFIFO1 0x054
-#define S6_DMA_DESCRFIFO2 0x058
-#define S6_DMA_DESCRFIFO2_AUTODISABLE 24
-#define S6_DMA_DESCRFIFO3 0x05C
-#define S6_DMA_MASTER0START 0x060
-#define S6_DMA_MASTER0END 0x064
-#define S6_DMA_MASTER1START 0x068
-#define S6_DMA_MASTER1END 0x06C
-#define S6_DMA_NEXTFREE 0x070
-#define S6_DMA_NEXTFREE_CHAN 0
-#define S6_DMA_NEXTFREE_CHAN_MASK 0x1F
-#define S6_DMA_NEXTFREE_ENA 16
-#define S6_DMA_NEXTFREE_ENA_MASK ((1 << 16) - 1)
-#define S6_DMA_DPORTCTRLGRP(p) ((p) * 4 + 0x074)
-#define S6_DMA_DPORTCTRLGRP_FRAMEREP 0
-#define S6_DMA_DPORTCTRLGRP_NRCHANS 1
-#define S6_DMA_DPORTCTRLGRP_NRCHANS_1 0
-#define S6_DMA_DPORTCTRLGRP_NRCHANS_3 1
-#define S6_DMA_DPORTCTRLGRP_NRCHANS_4 2
-#define S6_DMA_DPORTCTRLGRP_NRCHANS_2 3
-#define S6_DMA_DPORTCTRLGRP_ENA 31
-
-
-/* DMA per channel */
-
-#define DMA_CHNL(dmac, n) ((dmac) + 0x1000 + (n) * 0x100)
-#define DMA_INDEX_CHNL(addr) (((addr) >> 8) & 0xF)
-#define DMA_MASK_DMAC(addr) ((addr) & 0xFFFF0000)
-#define S6_DMA_CHNCTRL 0x000
-#define S6_DMA_CHNCTRL_ENABLE 0
-#define S6_DMA_CHNCTRL_PAUSE 1
-#define S6_DMA_CHNCTRL_PRIO 2
-#define S6_DMA_CHNCTRL_PRIO_MASK 3
-#define S6_DMA_CHNCTRL_PERIPHXFER 4
-#define S6_DMA_CHNCTRL_PERIPHENA 5
-#define S6_DMA_CHNCTRL_SRCINC 6
-#define S6_DMA_CHNCTRL_DSTINC 7
-#define S6_DMA_CHNCTRL_BURSTLOG 8
-#define S6_DMA_CHNCTRL_BURSTLOG_MASK 7
-#define S6_DMA_CHNCTRL_DESCFIFODEPTH 12
-#define S6_DMA_CHNCTRL_DESCFIFODEPTH_MASK 0x1F
-#define S6_DMA_CHNCTRL_DESCFIFOFULL 17
-#define S6_DMA_CHNCTRL_BWCONSEL 18
-#define S6_DMA_CHNCTRL_BWCONENA 19
-#define S6_DMA_CHNCTRL_PENDGCNTSTAT 20
-#define S6_DMA_CHNCTRL_PENDGCNTSTAT_MASK 0x3F
-#define S6_DMA_CHNCTRL_LOWWMARK 26
-#define S6_DMA_CHNCTRL_LOWWMARK_MASK 0xF
-#define S6_DMA_CHNCTRL_TSTAMP 30
-#define S6_DMA_TERMCNTNB 0x004
-#define S6_DMA_TERMCNTNB_MASK 0xFFFF
-#define S6_DMA_TERMCNTTMO 0x008
-#define S6_DMA_TERMCNTSTAT 0x00C
-#define S6_DMA_TERMCNTSTAT_MASK 0xFF
-#define S6_DMA_CMONCHUNK 0x010
-#define S6_DMA_SRCSKIP 0x014
-#define S6_DMA_DSTSKIP 0x018
-#define S6_DMA_CUR_SRC 0x024
-#define S6_DMA_CUR_DST 0x028
-#define S6_DMA_TIMESTAMP 0x030
-
-/* DMA channel lists */
-
-#define S6_DPDMA_CHAN(stream, channel) (4 * (stream) + (channel))
-#define S6_DPDMA_NB 16
-
-#define S6_HIFDMA_GMACTX 0
-#define S6_HIFDMA_GMACRX 1
-#define S6_HIFDMA_I2S0 2
-#define S6_HIFDMA_I2S1 3
-#define S6_HIFDMA_EGIB 4
-#define S6_HIFDMA_PCITX 5
-#define S6_HIFDMA_PCIRX 6
-#define S6_HIFDMA_NB 7
-
-#define S6_NIDMA_NB 4
-
-#define S6_LMSDMA_NB 12
-
-/* controller access */
-
-#define S6_DMAC_NB 4
-#define S6_DMAC_INDEX(dmac) (((unsigned)(dmac) >> 18) % S6_DMAC_NB)
-
-struct s6dmac_ctrl {
- u32 dmac;
- spinlock_t lock;
- u8 chan_nb;
-};
-
-extern struct s6dmac_ctrl s6dmac_ctrl[S6_DMAC_NB];
-
-
-/* DMA control, per channel */
-
-static inline int s6dmac_fifo_full(u32 dmac, int chan)
-{
- return (readl(DMA_CHNL(dmac, chan) + S6_DMA_CHNCTRL)
- & (1 << S6_DMA_CHNCTRL_DESCFIFOFULL)) && 1;
-}
-
-static inline int s6dmac_termcnt_irq(u32 dmac, int chan)
-{
- u32 m = 1 << chan;
- int r = (readl(dmac + S6_DMA_TERMCNTIRQSTAT) & m) && 1;
- if (r)
- writel(m, dmac + S6_DMA_TERMCNTIRQCLR);
- return r;
-}
-
-static inline int s6dmac_pendcnt_irq(u32 dmac, int chan)
-{
- u32 m = 1 << chan;
- int r = (readl(dmac + S6_DMA_PENDCNTIRQSTAT) & m) && 1;
- if (r)
- writel(m, dmac + S6_DMA_PENDCNTIRQCLR);
- return r;
-}
-
-static inline int s6dmac_lowwmark_irq(u32 dmac, int chan)
-{
- int r = (readl(dmac + S6_DMA_LOWWMRKIRQSTAT) & (1 << chan)) ? 1 : 0;
- if (r)
- writel(1 << chan, dmac + S6_DMA_LOWWMRKIRQCLR);
- return r;
-}
-
-static inline u32 s6dmac_pending_count(u32 dmac, int chan)
-{
- return (readl(DMA_CHNL(dmac, chan) + S6_DMA_CHNCTRL)
- >> S6_DMA_CHNCTRL_PENDGCNTSTAT)
- & S6_DMA_CHNCTRL_PENDGCNTSTAT_MASK;
-}
-
-static inline void s6dmac_set_terminal_count(u32 dmac, int chan, u32 n)
-{
- n &= S6_DMA_TERMCNTNB_MASK;
- n |= readl(DMA_CHNL(dmac, chan) + S6_DMA_TERMCNTNB)
- & ~S6_DMA_TERMCNTNB_MASK;
- writel(n, DMA_CHNL(dmac, chan) + S6_DMA_TERMCNTNB);
-}
-
-static inline u32 s6dmac_get_terminal_count(u32 dmac, int chan)
-{
- return (readl(DMA_CHNL(dmac, chan) + S6_DMA_TERMCNTNB))
- & S6_DMA_TERMCNTNB_MASK;
-}
-
-static inline u32 s6dmac_timestamp(u32 dmac, int chan)
-{
- return readl(DMA_CHNL(dmac, chan) + S6_DMA_TIMESTAMP);
-}
-
-static inline u32 s6dmac_cur_src(u32 dmac, int chan)
-{
- return readl(DMA_CHNL(dmac, chan) + S6_DMA_CUR_SRC);
-}
-
-static inline u32 s6dmac_cur_dst(u32 dmac, int chan)
-{
- return readl(DMA_CHNL(dmac, chan) + S6_DMA_CUR_DST);
-}
-
-static inline void s6dmac_disable_chan(u32 dmac, int chan)
-{
- u32 ctrl;
- writel(readl(DMA_CHNL(dmac, chan) + S6_DMA_CHNCTRL)
- & ~(1 << S6_DMA_CHNCTRL_ENABLE),
- DMA_CHNL(dmac, chan) + S6_DMA_CHNCTRL);
- do
- ctrl = readl(DMA_CHNL(dmac, chan) + S6_DMA_CHNCTRL);
- while (ctrl & (1 << S6_DMA_CHNCTRL_ENABLE));
-}
-
-static inline void s6dmac_set_stride_skip(u32 dmac, int chan,
- int comchunk, /* 0: disable scatter/gather */
- int srcskip, int dstskip)
-{
- writel(comchunk, DMA_CHNL(dmac, chan) + S6_DMA_CMONCHUNK);
- writel(srcskip, DMA_CHNL(dmac, chan) + S6_DMA_SRCSKIP);
- writel(dstskip, DMA_CHNL(dmac, chan) + S6_DMA_DSTSKIP);
-}
-
-static inline void s6dmac_enable_chan(u32 dmac, int chan,
- int prio, /* 0 (highest) .. 3 (lowest) */
- int periphxfer, /* <0: disable p.req.line, 0..1: mode */
- int srcinc, int dstinc, /* 0: dont increment src/dst address */
- int comchunk, /* 0: disable scatter/gather */
- int srcskip, int dstskip,
- int burstsize, /* 4 for I2S, 7 for everything else */
- int bandwidthconserve, /* <0: disable, 0..1: select */
- int lowwmark, /* 0..15 */
- int timestamp, /* 0: disable timestamp */
- int enable) /* 0: disable for now */
-{
- writel(1, DMA_CHNL(dmac, chan) + S6_DMA_TERMCNTNB);
- writel(0, DMA_CHNL(dmac, chan) + S6_DMA_TERMCNTTMO);
- writel(lowwmark << S6_DMA_CHNCTRL_LOWWMARK,
- DMA_CHNL(dmac, chan) + S6_DMA_CHNCTRL);
- s6dmac_set_stride_skip(dmac, chan, comchunk, srcskip, dstskip);
- writel(((enable ? 1 : 0) << S6_DMA_CHNCTRL_ENABLE) |
- (prio << S6_DMA_CHNCTRL_PRIO) |
- (((periphxfer > 0) ? 1 : 0) << S6_DMA_CHNCTRL_PERIPHXFER) |
- (((periphxfer < 0) ? 0 : 1) << S6_DMA_CHNCTRL_PERIPHENA) |
- ((srcinc ? 1 : 0) << S6_DMA_CHNCTRL_SRCINC) |
- ((dstinc ? 1 : 0) << S6_DMA_CHNCTRL_DSTINC) |
- (burstsize << S6_DMA_CHNCTRL_BURSTLOG) |
- (((bandwidthconserve > 0) ? 1 : 0) << S6_DMA_CHNCTRL_BWCONSEL) |
- (((bandwidthconserve < 0) ? 0 : 1) << S6_DMA_CHNCTRL_BWCONENA) |
- (lowwmark << S6_DMA_CHNCTRL_LOWWMARK) |
- ((timestamp ? 1 : 0) << S6_DMA_CHNCTRL_TSTAMP),
- DMA_CHNL(dmac, chan) + S6_DMA_CHNCTRL);
-}
-
-
-/* DMA control, per engine */
-
-static inline unsigned _dmac_addr_index(u32 dmac)
-{
- unsigned i = S6_DMAC_INDEX(dmac);
- if (s6dmac_ctrl[i].dmac != dmac)
- BUG();
- return i;
-}
-
-static inline void _s6dmac_disable_error_irqs(u32 dmac, u32 mask)
-{
- writel(mask, dmac + S6_DMA_TERMCNTIRQCLR);
- writel(mask, dmac + S6_DMA_PENDCNTIRQCLR);
- writel(mask, dmac + S6_DMA_LOWWMRKIRQCLR);
- writel(readl(dmac + S6_DMA_INTENABLE0)
- & ~((mask << S6_DMA_INT0_UNDER) | (mask << S6_DMA_INT0_OVER)),
- dmac + S6_DMA_INTENABLE0);
- writel(readl(dmac + S6_DMA_INTENABLE1) & ~(mask << S6_DMA_INT1_CHANNEL),
- dmac + S6_DMA_INTENABLE1);
- writel((mask << S6_DMA_INT0_UNDER) | (mask << S6_DMA_INT0_OVER),
- dmac + S6_DMA_INTCLEAR0);
- writel(mask << S6_DMA_INT1_CHANNEL, dmac + S6_DMA_INTCLEAR1);
-}
-
-/*
- * request channel from specified engine
- * with chan<0, accept any channel
- * further parameters see s6dmac_enable_chan
- * returns < 0 upon error, channel nb otherwise
- */
-static inline int s6dmac_request_chan(u32 dmac, int chan,
- int prio,
- int periphxfer,
- int srcinc, int dstinc,
- int comchunk,
- int srcskip, int dstskip,
- int burstsize,
- int bandwidthconserve,
- int lowwmark,
- int timestamp,
- int enable)
-{
- int r = chan;
- unsigned long flags;
- spinlock_t *spinl = &s6dmac_ctrl[_dmac_addr_index(dmac)].lock;
- spin_lock_irqsave(spinl, flags);
- if (r < 0) {
- r = (readl(dmac + S6_DMA_NEXTFREE) >> S6_DMA_NEXTFREE_CHAN)
- & S6_DMA_NEXTFREE_CHAN_MASK;
- }
- if (r >= s6dmac_ctrl[_dmac_addr_index(dmac)].chan_nb) {
- if (chan < 0)
- r = -EBUSY;
- else
- r = -ENXIO;
- } else if (((readl(dmac + S6_DMA_NEXTFREE) >> S6_DMA_NEXTFREE_ENA)
- >> r) & 1) {
- r = -EBUSY;
- } else {
- s6dmac_enable_chan(dmac, r, prio, periphxfer,
- srcinc, dstinc, comchunk, srcskip, dstskip, burstsize,
- bandwidthconserve, lowwmark, timestamp, enable);
- }
- spin_unlock_irqrestore(spinl, flags);
- return r;
-}
-
-static inline void s6dmac_put_fifo(u32 dmac, int chan,
- u32 src, u32 dst, u32 size)
-{
- unsigned long flags;
- spinlock_t *spinl = &s6dmac_ctrl[_dmac_addr_index(dmac)].lock;
- spin_lock_irqsave(spinl, flags);
- writel(src, dmac + S6_DMA_DESCRFIFO0);
- writel(dst, dmac + S6_DMA_DESCRFIFO1);
- writel(size, dmac + S6_DMA_DESCRFIFO2);
- writel(chan, dmac + S6_DMA_DESCRFIFO3);
- spin_unlock_irqrestore(spinl, flags);
-}
-
-static inline u32 s6dmac_channel_enabled(u32 dmac, int chan)
-{
- return readl(DMA_CHNL(dmac, chan) + S6_DMA_CHNCTRL) &
- (1 << S6_DMA_CHNCTRL_ENABLE);
-}
-
-/*
- * group 1-4 data port channels
- * with port=0..3, nrch=1-4 channels,
- * frrep=0/1 (dis- or enable frame repeat)
- */
-static inline void s6dmac_dp_setup_group(u32 dmac, int port,
- int nrch, int frrep)
-{
- static const u8 mask[4] = {0, 3, 1, 2};
- BUG_ON(dmac != S6_REG_DPDMA);
- if ((port < 0) || (port > 3) || (nrch < 1) || (nrch > 4))
- return;
- writel((mask[nrch - 1] << S6_DMA_DPORTCTRLGRP_NRCHANS)
- | ((frrep ? 1 : 0) << S6_DMA_DPORTCTRLGRP_FRAMEREP),
- dmac + S6_DMA_DPORTCTRLGRP(port));
-}
-
-static inline void s6dmac_dp_switch_group(u32 dmac, int port, int enable)
-{
- u32 tmp;
- BUG_ON(dmac != S6_REG_DPDMA);
- tmp = readl(dmac + S6_DMA_DPORTCTRLGRP(port));
- if (enable)
- tmp |= (1 << S6_DMA_DPORTCTRLGRP_ENA);
- else
- tmp &= ~(1 << S6_DMA_DPORTCTRLGRP_ENA);
- writel(tmp, dmac + S6_DMA_DPORTCTRLGRP(port));
-}
-
-extern void s6dmac_put_fifo_cache(u32 dmac, int chan,
- u32 src, u32 dst, u32 size);
-extern void s6dmac_disable_error_irqs(u32 dmac, u32 mask);
-extern u32 s6dmac_int_sources(u32 dmac, u32 channel);
-extern void s6dmac_release_chan(u32 dmac, int chan);
-
-#endif /* __ASM_XTENSA_S6000_DMAC_H */
diff --git a/arch/xtensa/variants/s6000/include/variant/gpio.h b/arch/xtensa/variants/s6000/include/variant/gpio.h
deleted file mode 100644
index 8484ab0df46..00000000000
--- a/arch/xtensa/variants/s6000/include/variant/gpio.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _XTENSA_VARIANT_S6000_GPIO_H
-#define _XTENSA_VARIANT_S6000_GPIO_H
-
-extern int s6_gpio_init(u32 afsel);
-
-#endif /* _XTENSA_VARIANT_S6000_GPIO_H */
diff --git a/arch/xtensa/variants/s6000/include/variant/hardware.h b/arch/xtensa/variants/s6000/include/variant/hardware.h
deleted file mode 100644
index 5d9ba098d84..00000000000
--- a/arch/xtensa/variants/s6000/include/variant/hardware.h
+++ /dev/null
@@ -1,259 +0,0 @@
-#ifndef __XTENSA_S6000_HARDWARE_H
-#define __XTENSA_S6000_HARDWARE_H
-
-#define S6_SCLK 1843200
-
-#define S6_MEM_REG 0x20000000
-#define S6_MEM_EFI 0x33F00000
-#define S6_MEM_PCIE_DATARAM1 0x34000000
-#define S6_MEM_XLMI 0x37F80000
-#define S6_MEM_PIF_DATARAM1 0x37FFC000
-#define S6_MEM_GMAC 0x38000000
-#define S6_MEM_I2S 0x3A000000
-#define S6_MEM_EGIB 0x3C000000
-#define S6_MEM_PCIE_CFG 0x3E000000
-#define S6_MEM_PIF_DATARAM 0x3FFE0000
-#define S6_MEM_XLMI_DATARAM 0x3FFF0000
-#define S6_MEM_DDR 0x40000000
-#define S6_MEM_PCIE_APER 0xC0000000
-#define S6_MEM_AUX 0xF0000000
-
-/* Device addresses */
-
-#define S6_REG_SCB S6_MEM_REG
-#define S6_REG_NB (S6_REG_SCB + 0x10000)
-#define S6_REG_LMSDMA (S6_REG_SCB + 0x20000)
-#define S6_REG_NI (S6_REG_SCB + 0x30000)
-#define S6_REG_NIDMA (S6_REG_SCB + 0x40000)
-#define S6_REG_NS (S6_REG_SCB + 0x50000)
-#define S6_REG_DDR (S6_REG_SCB + 0x60000)
-#define S6_REG_GREG1 (S6_REG_SCB + 0x70000)
-#define S6_REG_DP (S6_REG_SCB + 0x80000)
-#define S6_REG_DPDMA (S6_REG_SCB + 0x90000)
-#define S6_REG_EGIB (S6_REG_SCB + 0xA0000)
-#define S6_REG_PCIE (S6_REG_SCB + 0xB0000)
-#define S6_REG_I2S (S6_REG_SCB + 0xC0000)
-#define S6_REG_GMAC (S6_REG_SCB + 0xD0000)
-#define S6_REG_HIFDMA (S6_REG_SCB + 0xE0000)
-#define S6_REG_GREG2 (S6_REG_SCB + 0xF0000)
-
-#define S6_REG_APB S6_REG_SCB
-#define S6_REG_UART (S6_REG_APB + 0x0000)
-#define S6_REG_INTC (S6_REG_APB + 0x2000)
-#define S6_REG_SPI (S6_REG_APB + 0x3000)
-#define S6_REG_I2C (S6_REG_APB + 0x4000)
-#define S6_REG_GPIO (S6_REG_APB + 0x8000)
-
-/* Global register block */
-
-#define S6_GREG1_PLL_LOCKCLEAR 0x000
-#define S6_GREG1_PLL_LOCK_SYS 0
-#define S6_GREG1_PLL_LOCK_IO 1
-#define S6_GREG1_PLL_LOCK_AIM 2
-#define S6_GREG1_PLL_LOCK_DP0 3
-#define S6_GREG1_PLL_LOCK_DP2 4
-#define S6_GREG1_PLL_LOCK_DDR 5
-#define S6_GREG1_PLL_LOCKSTAT 0x004
-#define S6_GREG1_PLL_LOCKSTAT_CURLOCK 0
-#define S6_GREG1_PLL_LOCKSTAT_EVERUNLCK 8
-#define S6_GREG1_PLLSEL 0x010
-#define S6_GREG1_PLLSEL_AIM 0
-#define S6_GREG1_PLLSEL_AIM_DDR2 0
-#define S6_GREG1_PLLSEL_AIM_300MHZ 1
-#define S6_GREG1_PLLSEL_AIM_240MHZ 2
-#define S6_GREG1_PLLSEL_AIM_200MHZ 3
-#define S6_GREG1_PLLSEL_AIM_150MHZ 4
-#define S6_GREG1_PLLSEL_AIM_120MHZ 5
-#define S6_GREG1_PLLSEL_AIM_40MHZ 6
-#define S6_GREG1_PLLSEL_AIM_PLLAIMREF 7
-#define S6_GREG1_PLLSEL_AIM_MASK 7
-#define S6_GREG1_PLLSEL_DDR 8
-#define S6_GREG1_PLLSEL_DDR_HS 0
-#define S6_GREG1_PLLSEL_DDR_333MHZ 1
-#define S6_GREG1_PLLSEL_DDR_250MHZ 2
-#define S6_GREG1_PLLSEL_DDR_200MHZ 3
-#define S6_GREG1_PLLSEL_DDR_167MHZ 4
-#define S6_GREG1_PLLSEL_DDR_100MHZ 5
-#define S6_GREG1_PLLSEL_DDR_33MHZ 6
-#define S6_GREG1_PLLSEL_DDR_PLLIOREF 7
-#define S6_GREG1_PLLSEL_DDR_MASK 7
-#define S6_GREG1_PLLSEL_GMAC 16
-#define S6_GREG1_PLLSEL_GMAC_125MHZ 0
-#define S6_GREG1_PLLSEL_GMAC_25MHZ 1
-#define S6_GREG1_PLLSEL_GMAC_2500KHZ 2
-#define S6_GREG1_PLLSEL_GMAC_EXTERN 3
-#define S6_GREG1_PLLSEL_GMAC_MASK 3
-#define S6_GREG1_PLLSEL_GMII 18
-#define S6_GREG1_PLLSEL_GMII_111MHZ 0
-#define S6_GREG1_PLLSEL_GMII_IOREF 1
-#define S6_GREG1_PLLSEL_GMII_NONE 2
-#define S6_GREG1_PLLSEL_GMII_125MHZ 3
-#define S6_GREG1_PLLSEL_GMII_MASK 3
-#define S6_GREG1_SYSUNLOCKCNT 0x020
-#define S6_GREG1_IOUNLOCKCNT 0x024
-#define S6_GREG1_AIMUNLOCKCNT 0x028
-#define S6_GREG1_DP0UNLOCKCNT 0x02C
-#define S6_GREG1_DP2UNLOCKCNT 0x030
-#define S6_GREG1_DDRUNLOCKCNT 0x034
-#define S6_GREG1_CLKBAL0 0x040
-#define S6_GREG1_CLKBAL0_LSGB 0
-#define S6_GREG1_CLKBAL0_LSPX 8
-#define S6_GREG1_CLKBAL0_MEMDO 16
-#define S6_GREG1_CLKBAL0_HSXT1 24
-#define S6_GREG1_CLKBAL1 0x044
-#define S6_GREG1_CLKBAL1_HSISEF 0
-#define S6_GREG1_CLKBAL1_HSNI 8
-#define S6_GREG1_CLKBAL1_HSNS 16
-#define S6_GREG1_CLKBAL1_HSISEFCFG 24
-#define S6_GREG1_CLKBAL2 0x048
-#define S6_GREG1_CLKBAL2_LSNB 0
-#define S6_GREG1_CLKBAL2_LSSB 8
-#define S6_GREG1_CLKBAL2_LSREST 24
-#define S6_GREG1_CLKBAL3 0x04C
-#define S6_GREG1_CLKBAL3_ISEFXAD 0
-#define S6_GREG1_CLKBAL3_ISEFLMS 8
-#define S6_GREG1_CLKBAL3_ISEFISEF 16
-#define S6_GREG1_CLKBAL3_DDRDD 24
-#define S6_GREG1_CLKBAL4 0x050
-#define S6_GREG1_CLKBAL4_DDRDP 0
-#define S6_GREG1_CLKBAL4_DDRDO 8
-#define S6_GREG1_CLKBAL4_DDRNB 16
-#define S6_GREG1_CLKBAL4_DDRLMS 24
-#define S6_GREG1_BLOCKENA 0x100
-#define S6_GREG1_BLOCK_DDR 0
-#define S6_GREG1_BLOCK_DP 1
-#define S6_GREG1_BLOCK_NSNI 2
-#define S6_GREG1_BLOCK_PCIE 3
-#define S6_GREG1_BLOCK_GMAC 4
-#define S6_GREG1_BLOCK_I2S 5
-#define S6_GREG1_BLOCK_EGIB 6
-#define S6_GREG1_BLOCK_SB 7
-#define S6_GREG1_BLOCK_XT1 8
-#define S6_GREG1_CLKGATE 0x104
-#define S6_GREG1_BGATE_AIMNORTH 9
-#define S6_GREG1_BGATE_AIMEAST 10
-#define S6_GREG1_BGATE_AIMWEST 11
-#define S6_GREG1_BGATE_AIMSOUTH 12
-#define S6_GREG1_CHIPRES 0x108
-#define S6_GREG1_CHIPRES_SOFTRES 0
-#define S6_GREG1_CHIPRES_LOSTLOCK 1
-#define S6_GREG1_RESETCAUSE 0x10C
-#define S6_GREG1_RESETCAUSE_RESETN 0
-#define S6_GREG1_RESETCAUSE_GLOBAL 1
-#define S6_GREG1_RESETCAUSE_WDOGTIMER 2
-#define S6_GREG1_RESETCAUSE_SWCHIP 3
-#define S6_GREG1_RESETCAUSE_PLLSYSLOSS 4
-#define S6_GREG1_RESETCAUSE_PCIE 5
-#define S6_GREG1_RESETCAUSE_CREATEDGLOB 6
-#define S6_GREG1_REFCLOCKCNT 0x110
-#define S6_GREG1_RESETTIMER 0x114
-#define S6_GREG1_NMITIMER 0x118
-#define S6_GREG1_GLOBAL_TIMER 0x11C
-#define S6_GREG1_TIMER0 0x180
-#define S6_GREG1_TIMER1 0x184
-#define S6_GREG1_UARTCLOCKSEL 0x204
-#define S6_GREG1_CHIPVERSPACKG 0x208
-#define S6_GREG1_CHIPVERSPACKG_CHIPVID 0
-#define S6_GREG1_CHIPVERSPACKG_PACKSEL 8
-#define S6_GREG1_ONDIETERMCTRL 0x20C
-#define S6_GREG1_ONDIETERMCTRL_WEST 0
-#define S6_GREG1_ONDIETERMCTRL_NORTH 2
-#define S6_GREG1_ONDIETERMCTRL_EAST 4
-#define S6_GREG1_ONDIETERMCTRL_SOUTH 6
-#define S6_GREG1_ONDIETERMCTRL_NONE 0
-#define S6_GREG1_ONDIETERMCTRL_75OHM 2
-#define S6_GREG1_ONDIETERMCTRL_MASK 3
-#define S6_GREG1_BOOT_CFG0 0x210
-#define S6_GREG1_BOOT_CFG0_AIMSTRONG 1
-#define S6_GREG1_BOOT_CFG0_MINIBOOTDL 2
-#define S6_GREG1_BOOT_CFG0_OCDGPIO8SET 5
-#define S6_GREG1_BOOT_CFG0_OCDGPIOENA 6
-#define S6_GREG1_BOOT_CFG0_DOWNSTREAM 7
-#define S6_GREG1_BOOT_CFG0_PLLSYSDIV 8
-#define S6_GREG1_BOOT_CFG0_PLLSYSDIV_300MHZ 1
-#define S6_GREG1_BOOT_CFG0_PLLSYSDIV_240MHZ 2
-#define S6_GREG1_BOOT_CFG0_PLLSYSDIV_200MHZ 3
-#define S6_GREG1_BOOT_CFG0_PLLSYSDIV_150MHZ 4
-#define S6_GREG1_BOOT_CFG0_PLLSYSDIV_120MHZ 5
-#define S6_GREG1_BOOT_CFG0_PLLSYSDIV_40MHZ 6
-#define S6_GREG1_BOOT_CFG0_PLLSYSDIV_MASK 7
-#define S6_GREG1_BOOT_CFG0_BALHSLMS 12
-#define S6_GREG1_BOOT_CFG0_BALHSNB 18
-#define S6_GREG1_BOOT_CFG0_BALHSXAD 24
-#define S6_GREG1_BOOT_CFG1 0x214
-#define S6_GREG1_BOOT_CFG1_PCIE1LANE 1
-#define S6_GREG1_BOOT_CFG1_MPLLPRESCALE 2
-#define S6_GREG1_BOOT_CFG1_MPLLNCY 4
-#define S6_GREG1_BOOT_CFG1_MPLLNCY5 9
-#define S6_GREG1_BOOT_CFG1_BALHSREST 14
-#define S6_GREG1_BOOT_CFG1_BALHSPSMEMS 20
-#define S6_GREG1_BOOT_CFG1_BALLSGI 26
-#define S6_GREG1_BOOT_CFG2 0x218
-#define S6_GREG1_BOOT_CFG2_PEID 0
-#define S6_GREG1_BOOT_CFG3 0x21C
-#define S6_GREG1_DRAMBUSYHOLDOF 0x220
-#define S6_GREG1_DRAMBUSYHOLDOF_XT0 0
-#define S6_GREG1_DRAMBUSYHOLDOF_XT1 4
-#define S6_GREG1_DRAMBUSYHOLDOF_XT_MASK 7
-#define S6_GREG1_PCIEBAR1SIZE 0x224
-#define S6_GREG1_PCIEBAR2SIZE 0x228
-#define S6_GREG1_PCIEVENDOR 0x22C
-#define S6_GREG1_PCIEDEVICE 0x230
-#define S6_GREG1_PCIEREV 0x234
-#define S6_GREG1_PCIECLASS 0x238
-#define S6_GREG1_XT1DCACHEMISS 0x240
-#define S6_GREG1_XT1ICACHEMISS 0x244
-#define S6_GREG1_HWSEMAPHORE(n) (0x400 + 4 * (n))
-#define S6_GREG1_HWSEMAPHORE_NB 16
-
-/* peripheral interrupt numbers */
-
-#define S6_INTC_GPIO(n) (n) /* 0..3 */
-#define S6_INTC_I2C 4
-#define S6_INTC_SPI 5
-#define S6_INTC_NB_ERR 6
-#define S6_INTC_DMA_LMSERR 7
-#define S6_INTC_DMA_LMSLOWWMRK(n) (8 + (n)) /* 0..11 */
-#define S6_INTC_DMA_LMSPENDCNT(n) (20 + (n)) /* 0..11 */
-#define S6_INTC_DMA HOSTLOWWMRK(n) (32 + (n)) /* 0..6 */
-#define S6_INTC_DMA_HOSTPENDCNT(n) (39 + (n)) /* 0..6 */
-#define S6_INTC_DMA_HOSTERR 46
-#define S6_INTC_UART(n) (47 + (n)) /* 0..1 */
-#define S6_INTC_XAD 49
-#define S6_INTC_NI_ERR 50
-#define S6_INTC_NI_INFIFOFULL 51
-#define S6_INTC_DMA_NIERR 52
-#define S6_INTC_DMA_NILOWWMRK(n) (53 + (n)) /* 0..3 */
-#define S6_INTC_DMA_NIPENDCNT(n) (57 + (n)) /* 0..3 */
-#define S6_INTC_DDR 61
-#define S6_INTC_NS_ERR 62
-#define S6_INTC_EFI_CFGERR 63
-#define S6_INTC_EFI_ISEFTEST 64
-#define S6_INTC_EFI_WRITEERR 65
-#define S6_INTC_NMI_TIMER 66
-#define S6_INTC_PLLLOCK_SYS 67
-#define S6_INTC_PLLLOCK_IO 68
-#define S6_INTC_PLLLOCK_AIM 69
-#define S6_INTC_PLLLOCK_DP0 70
-#define S6_INTC_PLLLOCK_DP2 71
-#define S6_INTC_I2S_ERR 72
-#define S6_INTC_GMAC_STAT 73
-#define S6_INTC_GMAC_ERR 74
-#define S6_INTC_GIB_ERR 75
-#define S6_INTC_PCIE_ERR 76
-#define S6_INTC_PCIE_MSI(n) (77 + (n)) /* 0..3 */
-#define S6_INTC_PCIE_INTA 81
-#define S6_INTC_PCIE_INTB 82
-#define S6_INTC_PCIE_INTC 83
-#define S6_INTC_PCIE_INTD 84
-#define S6_INTC_SW(n) (85 + (n)) /* 0..9 */
-#define S6_INTC_SW_ENABLE(n) (85 + 256 + (n))
-#define S6_INTC_DMA_DP_ERR 95
-#define S6_INTC_DMA_DPLOWWMRK(n) (96 + (n)) /* 0..3 */
-#define S6_INTC_DMA_DPPENDCNT(n) (100 + (n)) /* 0..3 */
-#define S6_INTC_DMA_DPTERMCNT(n) (104 + (n)) /* 0..3 */
-#define S6_INTC_TIMER0 108
-#define S6_INTC_TIMER1 109
-#define S6_INTC_DMA_HOSTTERMCNT(n) (110 + (n)) /* 0..6 */
-
-#endif /* __XTENSA_S6000_HARDWARE_H */
diff --git a/arch/xtensa/variants/s6000/include/variant/irq.h b/arch/xtensa/variants/s6000/include/variant/irq.h
deleted file mode 100644
index 39ca751a625..00000000000
--- a/arch/xtensa/variants/s6000/include/variant/irq.h
+++ /dev/null
@@ -1,8 +0,0 @@
-#ifndef _XTENSA_S6000_IRQ_H
-#define _XTENSA_S6000_IRQ_H
-
-#define VARIANT_NR_IRQS 8 /* GPIO interrupts */
-
-extern void variant_irq_enable(unsigned int irq);
-
-#endif /* __XTENSA_S6000_IRQ_H */
diff --git a/arch/xtensa/variants/s6000/include/variant/tie-asm.h b/arch/xtensa/variants/s6000/include/variant/tie-asm.h
deleted file mode 100644
index f02d0a3a2e2..00000000000
--- a/arch/xtensa/variants/s6000/include/variant/tie-asm.h
+++ /dev/null
@@ -1,304 +0,0 @@
-/*
- * This header file contains assembly-language definitions (assembly
- * macros, etc.) for this specific Xtensa processor's TIE extensions
- * and options. It is customized to this Xtensa processor configuration.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1999-2008 Tensilica Inc.
- */
-
-#ifndef _XTENSA_CORE_TIE_ASM_H
-#define _XTENSA_CORE_TIE_ASM_H
-
-/* Selection parameter values for save-area save/restore macros: */
-/* Option vs. TIE: */
-#define XTHAL_SAS_TIE 0x0001 /* custom extension or coprocessor */
-#define XTHAL_SAS_OPT 0x0002 /* optional (and not a coprocessor) */
-/* Whether used automatically by compiler: */
-#define XTHAL_SAS_NOCC 0x0004 /* not used by compiler w/o special opts/code */
-#define XTHAL_SAS_CC 0x0008 /* used by compiler without special opts/code */
-/* ABI handling across function calls: */
-#define XTHAL_SAS_CALR 0x0010 /* caller-saved */
-#define XTHAL_SAS_CALE 0x0020 /* callee-saved */
-#define XTHAL_SAS_GLOB 0x0040 /* global across function calls (in thread) */
-/* Misc */
-#define XTHAL_SAS_ALL 0xFFFF /* include all default NCP contents */
-
-
-
-/* Macro to save all non-coprocessor (extra) custom TIE and optional state
- * (not including zero-overhead loop registers).
- * Save area ptr (clobbered): ptr (16 byte aligned)
- * Scratch regs (clobbered): at1..at4 (only first XCHAL_NCP_NUM_ATMPS needed)
- */
- .macro xchal_ncp_store ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL
- xchal_sa_start \continue, \ofs
- .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~\select
- xchal_sa_align \ptr, 0, 1024-4, 4, 4
- rsr \at1, BR // boolean option
- s32i \at1, \ptr, .Lxchal_ofs_ + 0
- .set .Lxchal_ofs_, .Lxchal_ofs_ + 4
- .endif
- .endm // xchal_ncp_store
-
-/* Macro to save all non-coprocessor (extra) custom TIE and optional state
- * (not including zero-overhead loop registers).
- * Save area ptr (clobbered): ptr (16 byte aligned)
- * Scratch regs (clobbered): at1..at4 (only first XCHAL_NCP_NUM_ATMPS needed)
- */
- .macro xchal_ncp_load ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL
- xchal_sa_start \continue, \ofs
- .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~\select
- xchal_sa_align \ptr, 0, 1024-4, 4, 4
- l32i \at1, \ptr, .Lxchal_ofs_ + 0
- wsr \at1, BR // boolean option
- .set .Lxchal_ofs_, .Lxchal_ofs_ + 4
- .endif
- .endm // xchal_ncp_load
-
-
-
-#define XCHAL_NCP_NUM_ATMPS 1
-
-
-
-/* Macro to save the state of TIE coprocessor FPU.
- * Save area ptr (clobbered): ptr (16 byte aligned)
- * Scratch regs (clobbered): at1..at4 (only first XCHAL_CP0_NUM_ATMPS needed)
- */
-#define xchal_cp_FPU_store xchal_cp0_store
-/* #define xchal_cp_FPU_store_a2 xchal_cp0_store a2 a3 a4 a5 a6 */
- .macro xchal_cp0_store ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL
- xchal_sa_start \continue, \ofs
- .ifeq (XTHAL_SAS_TIE | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~\select
- xchal_sa_align \ptr, 0, 0, 1, 16
- rur232 \at1 // FCR
- s32i \at1, \ptr, 0
- rur233 \at1 // FSR
- s32i \at1, \ptr, 4
- SSI f0, \ptr, 8
- SSI f1, \ptr, 12
- SSI f2, \ptr, 16
- SSI f3, \ptr, 20
- SSI f4, \ptr, 24
- SSI f5, \ptr, 28
- SSI f6, \ptr, 32
- SSI f7, \ptr, 36
- SSI f8, \ptr, 40
- SSI f9, \ptr, 44
- SSI f10, \ptr, 48
- SSI f11, \ptr, 52
- SSI f12, \ptr, 56
- SSI f13, \ptr, 60
- SSI f14, \ptr, 64
- SSI f15, \ptr, 68
- .set .Lxchal_ofs_, .Lxchal_ofs_ + 72
- .endif
- .endm // xchal_cp0_store
-
-/* Macro to restore the state of TIE coprocessor FPU.
- * Save area ptr (clobbered): ptr (16 byte aligned)
- * Scratch regs (clobbered): at1..at4 (only first XCHAL_CP0_NUM_ATMPS needed)
- */
-#define xchal_cp_FPU_load xchal_cp0_load
-/* #define xchal_cp_FPU_load_a2 xchal_cp0_load a2 a3 a4 a5 a6 */
- .macro xchal_cp0_load ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL
- xchal_sa_start \continue, \ofs
- .ifeq (XTHAL_SAS_TIE | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~\select
- xchal_sa_align \ptr, 0, 0, 1, 16
- l32i \at1, \ptr, 0
- wur232 \at1 // FCR
- l32i \at1, \ptr, 4
- wur233 \at1 // FSR
- LSI f0, \ptr, 8
- LSI f1, \ptr, 12
- LSI f2, \ptr, 16
- LSI f3, \ptr, 20
- LSI f4, \ptr, 24
- LSI f5, \ptr, 28
- LSI f6, \ptr, 32
- LSI f7, \ptr, 36
- LSI f8, \ptr, 40
- LSI f9, \ptr, 44
- LSI f10, \ptr, 48
- LSI f11, \ptr, 52
- LSI f12, \ptr, 56
- LSI f13, \ptr, 60
- LSI f14, \ptr, 64
- LSI f15, \ptr, 68
- .set .Lxchal_ofs_, .Lxchal_ofs_ + 72
- .endif
- .endm // xchal_cp0_load
-
-#define XCHAL_CP0_NUM_ATMPS 1
-
-/* Macro to save the state of TIE coprocessor XAD.
- * Save area ptr (clobbered): ptr (16 byte aligned)
- * Scratch regs (clobbered): at1..at4 (only first XCHAL_CP6_NUM_ATMPS needed)
- */
-#define xchal_cp_XAD_store xchal_cp6_store
-/* #define xchal_cp_XAD_store_a2 xchal_cp6_store a2 a3 a4 a5 a6 */
- .macro xchal_cp6_store ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL
- xchal_sa_start \continue, \ofs
- .ifeq (XTHAL_SAS_TIE | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~\select
- xchal_sa_align \ptr, 0, 0, 1, 16
- rur0 \at1 // LDCBHI
- s32i \at1, \ptr, 0
- rur1 \at1 // LDCBLO
- s32i \at1, \ptr, 4
- rur2 \at1 // STCBHI
- s32i \at1, \ptr, 8
- rur3 \at1 // STCBLO
- s32i \at1, \ptr, 12
- rur8 \at1 // LDBRBASE
- s32i \at1, \ptr, 16
- rur9 \at1 // LDBROFF
- s32i \at1, \ptr, 20
- rur10 \at1 // LDBRINC
- s32i \at1, \ptr, 24
- rur11 \at1 // STBRBASE
- s32i \at1, \ptr, 28
- rur12 \at1 // STBROFF
- s32i \at1, \ptr, 32
- rur13 \at1 // STBRINC
- s32i \at1, \ptr, 36
- rur24 \at1 // SCRATCH0
- s32i \at1, \ptr, 40
- rur25 \at1 // SCRATCH1
- s32i \at1, \ptr, 44
- rur26 \at1 // SCRATCH2
- s32i \at1, \ptr, 48
- rur27 \at1 // SCRATCH3
- s32i \at1, \ptr, 52
- WRAS128I wra0, \ptr, 64
- WRAS128I wra1, \ptr, 80
- WRAS128I wra2, \ptr, 96
- WRAS128I wra3, \ptr, 112
- WRAS128I wra4, \ptr, 128
- WRAS128I wra5, \ptr, 144
- WRAS128I wra6, \ptr, 160
- WRAS128I wra7, \ptr, 176
- WRAS128I wra8, \ptr, 192
- WRAS128I wra9, \ptr, 208
- WRAS128I wra10, \ptr, 224
- WRAS128I wra11, \ptr, 240
- WRAS128I wra12, \ptr, 256
- WRAS128I wra13, \ptr, 272
- WRAS128I wra14, \ptr, 288
- WRAS128I wra15, \ptr, 304
- WRBS128I wrb0, \ptr, 320
- WRBS128I wrb1, \ptr, 336
- WRBS128I wrb2, \ptr, 352
- WRBS128I wrb3, \ptr, 368
- WRBS128I wrb4, \ptr, 384
- WRBS128I wrb5, \ptr, 400
- WRBS128I wrb6, \ptr, 416
- WRBS128I wrb7, \ptr, 432
- WRBS128I wrb8, \ptr, 448
- WRBS128I wrb9, \ptr, 464
- WRBS128I wrb10, \ptr, 480
- WRBS128I wrb11, \ptr, 496
- WRBS128I wrb12, \ptr, 512
- WRBS128I wrb13, \ptr, 528
- WRBS128I wrb14, \ptr, 544
- WRBS128I wrb15, \ptr, 560
- .set .Lxchal_ofs_, .Lxchal_ofs_ + 576
- .endif
- .endm // xchal_cp6_store
-
-/* Macro to restore the state of TIE coprocessor XAD.
- * Save area ptr (clobbered): ptr (16 byte aligned)
- * Scratch regs (clobbered): at1..at4 (only first XCHAL_CP6_NUM_ATMPS needed)
- */
-#define xchal_cp_XAD_load xchal_cp6_load
-/* #define xchal_cp_XAD_load_a2 xchal_cp6_load a2 a3 a4 a5 a6 */
- .macro xchal_cp6_load ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL
- xchal_sa_start \continue, \ofs
- .ifeq (XTHAL_SAS_TIE | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~\select
- xchal_sa_align \ptr, 0, 0, 1, 16
- l32i \at1, \ptr, 0
- wur0 \at1 // LDCBHI
- l32i \at1, \ptr, 4
- wur1 \at1 // LDCBLO
- l32i \at1, \ptr, 8
- wur2 \at1 // STCBHI
- l32i \at1, \ptr, 12
- wur3 \at1 // STCBLO
- l32i \at1, \ptr, 16
- wur8 \at1 // LDBRBASE
- l32i \at1, \ptr, 20
- wur9 \at1 // LDBROFF
- l32i \at1, \ptr, 24
- wur10 \at1 // LDBRINC
- l32i \at1, \ptr, 28
- wur11 \at1 // STBRBASE
- l32i \at1, \ptr, 32
- wur12 \at1 // STBROFF
- l32i \at1, \ptr, 36
- wur13 \at1 // STBRINC
- l32i \at1, \ptr, 40
- wur24 \at1 // SCRATCH0
- l32i \at1, \ptr, 44
- wur25 \at1 // SCRATCH1
- l32i \at1, \ptr, 48
- wur26 \at1 // SCRATCH2
- l32i \at1, \ptr, 52
- wur27 \at1 // SCRATCH3
- WRBL128I wrb0, \ptr, 320
- WRBL128I wrb1, \ptr, 336
- WRBL128I wrb2, \ptr, 352
- WRBL128I wrb3, \ptr, 368
- WRBL128I wrb4, \ptr, 384
- WRBL128I wrb5, \ptr, 400
- WRBL128I wrb6, \ptr, 416
- WRBL128I wrb7, \ptr, 432
- WRBL128I wrb8, \ptr, 448
- WRBL128I wrb9, \ptr, 464
- WRBL128I wrb10, \ptr, 480
- WRBL128I wrb11, \ptr, 496
- WRBL128I wrb12, \ptr, 512
- WRBL128I wrb13, \ptr, 528
- WRBL128I wrb14, \ptr, 544
- WRBL128I wrb15, \ptr, 560
- WRAL128I wra0, \ptr, 64
- WRAL128I wra1, \ptr, 80
- WRAL128I wra2, \ptr, 96
- WRAL128I wra3, \ptr, 112
- WRAL128I wra4, \ptr, 128
- WRAL128I wra5, \ptr, 144
- WRAL128I wra6, \ptr, 160
- WRAL128I wra7, \ptr, 176
- WRAL128I wra8, \ptr, 192
- WRAL128I wra9, \ptr, 208
- WRAL128I wra10, \ptr, 224
- WRAL128I wra11, \ptr, 240
- WRAL128I wra12, \ptr, 256
- WRAL128I wra13, \ptr, 272
- WRAL128I wra14, \ptr, 288
- WRAL128I wra15, \ptr, 304
- .set .Lxchal_ofs_, .Lxchal_ofs_ + 576
- .endif
- .endm // xchal_cp6_load
-
-#define XCHAL_CP6_NUM_ATMPS 1
-#define XCHAL_SA_NUM_ATMPS 1
-
- /* Empty macros for unconfigured coprocessors: */
- .macro xchal_cp1_store p a b c d continue=0 ofs=-1 select=-1 ; .endm
- .macro xchal_cp1_load p a b c d continue=0 ofs=-1 select=-1 ; .endm
- .macro xchal_cp2_store p a b c d continue=0 ofs=-1 select=-1 ; .endm
- .macro xchal_cp2_load p a b c d continue=0 ofs=-1 select=-1 ; .endm
- .macro xchal_cp3_store p a b c d continue=0 ofs=-1 select=-1 ; .endm
- .macro xchal_cp3_load p a b c d continue=0 ofs=-1 select=-1 ; .endm
- .macro xchal_cp4_store p a b c d continue=0 ofs=-1 select=-1 ; .endm
- .macro xchal_cp4_load p a b c d continue=0 ofs=-1 select=-1 ; .endm
- .macro xchal_cp5_store p a b c d continue=0 ofs=-1 select=-1 ; .endm
- .macro xchal_cp5_load p a b c d continue=0 ofs=-1 select=-1 ; .endm
- .macro xchal_cp7_store p a b c d continue=0 ofs=-1 select=-1 ; .endm
- .macro xchal_cp7_load p a b c d continue=0 ofs=-1 select=-1 ; .endm
-
-#endif /*_XTENSA_CORE_TIE_ASM_H*/
-
diff --git a/arch/xtensa/variants/s6000/include/variant/tie.h b/arch/xtensa/variants/s6000/include/variant/tie.h
deleted file mode 100644
index be7ea843d5d..00000000000
--- a/arch/xtensa/variants/s6000/include/variant/tie.h
+++ /dev/null
@@ -1,191 +0,0 @@
-/*
- * This header file describes this specific Xtensa processor's TIE extensions
- * that extend basic Xtensa core functionality. It is customized to this
- * Xtensa processor configuration.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1999-2008 Tensilica Inc.
- */
-
-#ifndef _XTENSA_CORE_TIE_H
-#define _XTENSA_CORE_TIE_H
-
-#define XCHAL_CP_NUM 2 /* number of coprocessors */
-#define XCHAL_CP_MAX 7 /* max CP ID + 1 (0 if none) */
-#define XCHAL_CP_MASK 0x41 /* bitmask of all CPs by ID */
-#define XCHAL_CP_PORT_MASK 0x00 /* bitmask of only port CPs */
-
-/* Basic parameters of each coprocessor: */
-#define XCHAL_CP0_NAME "FPU"
-#define XCHAL_CP0_IDENT FPU
-#define XCHAL_CP0_SA_SIZE 72 /* size of state save area */
-#define XCHAL_CP0_SA_ALIGN 4 /* min alignment of save area */
-#define XCHAL_CP_ID_FPU 0 /* coprocessor ID (0..7) */
-#define XCHAL_CP6_NAME "XAD"
-#define XCHAL_CP6_IDENT XAD
-#define XCHAL_CP6_SA_SIZE 576 /* size of state save area */
-#define XCHAL_CP6_SA_ALIGN 16 /* min alignment of save area */
-#define XCHAL_CP_ID_XAD 6 /* coprocessor ID (0..7) */
-
-/* Filler info for unassigned coprocessors, to simplify arrays etc: */
-#define XCHAL_CP1_SA_SIZE 0
-#define XCHAL_CP1_SA_ALIGN 1
-#define XCHAL_CP2_SA_SIZE 0
-#define XCHAL_CP2_SA_ALIGN 1
-#define XCHAL_CP3_SA_SIZE 0
-#define XCHAL_CP3_SA_ALIGN 1
-#define XCHAL_CP4_SA_SIZE 0
-#define XCHAL_CP4_SA_ALIGN 1
-#define XCHAL_CP5_SA_SIZE 0
-#define XCHAL_CP5_SA_ALIGN 1
-#define XCHAL_CP7_SA_SIZE 0
-#define XCHAL_CP7_SA_ALIGN 1
-
-/* Save area for non-coprocessor optional and custom (TIE) state: */
-#define XCHAL_NCP_SA_SIZE 4
-#define XCHAL_NCP_SA_ALIGN 4
-
-/* Total save area for optional and custom state (NCP + CPn): */
-#define XCHAL_TOTAL_SA_SIZE 672 /* with 16-byte align padding */
-#define XCHAL_TOTAL_SA_ALIGN 16 /* actual minimum alignment */
-
-/*
- * Detailed contents of save areas.
- * NOTE: caller must define the XCHAL_SA_REG macro (not defined here)
- * before expanding the XCHAL_xxx_SA_LIST() macros.
- *
- * XCHAL_SA_REG(s,ccused,abikind,kind,opt,name,galign,align,asize,
- * dbnum,base,regnum,bitsz,gapsz,reset,x...)
- *
- * s = passed from XCHAL_*_LIST(s), eg. to select how to expand
- * ccused = set if used by compiler without special options or code
- * abikind = 0 (caller-saved), 1 (callee-saved), or 2 (thread-global)
- * kind = 0 (special reg), 1 (TIE user reg), or 2 (TIE regfile reg)
- * opt = 0 (custom TIE extension or coprocessor), or 1 (optional reg)
- * name = lowercase reg name (no quotes)
- * galign = group byte alignment (power of 2) (galign >= align)
- * align = register byte alignment (power of 2)
- * asize = allocated size in bytes (asize*8 == bitsz + gapsz + padsz)
- * (not including any pad bytes required to galign this or next reg)
- * dbnum = unique target number f/debug (see <xtensa-libdb-macros.h>)
- * base = reg shortname w/o index (or sr=special, ur=TIE user reg)
- * regnum = reg index in regfile, or special/TIE-user reg number
- * bitsz = number of significant bits (regfile width, or ur/sr mask bits)
- * gapsz = intervening bits, if bitsz bits not stored contiguously
- * (padsz = pad bits at end [TIE regfile] or at msbits [ur,sr] of asize)
- * reset = register reset value (or 0 if undefined at reset)
- * x = reserved for future use (0 until then)
- *
- * To filter out certain registers, e.g. to expand only the non-global
- * registers used by the compiler, you can do something like this:
- *
- * #define XCHAL_SA_REG(s,ccused,p...) SELCC##ccused(p)
- * #define SELCC0(p...)
- * #define SELCC1(abikind,p...) SELAK##abikind(p)
- * #define SELAK0(p...) REG(p)
- * #define SELAK1(p...) REG(p)
- * #define SELAK2(p...)
- * #define REG(kind,tie,name,galn,aln,asz,csz,dbnum,base,rnum,bsz,rst,x...) \
- * ...what you want to expand...
- */
-
-#define XCHAL_NCP_SA_NUM 1
-#define XCHAL_NCP_SA_LIST(s) \
- XCHAL_SA_REG(s,0,0,0,1, br, 4, 4, 4,0x0204, sr,4 , 16,0,0,0)
-
-#define XCHAL_CP0_SA_NUM 18
-#define XCHAL_CP0_SA_LIST(s) \
- XCHAL_SA_REG(s,0,0,1,0, fcr, 4, 4, 4,0x03E8, ur,232, 32,0,0,0) \
- XCHAL_SA_REG(s,0,0,1,0, fsr, 4, 4, 4,0x03E9, ur,233, 32,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, f0, 4, 4, 4,0x0030, f,0 , 32,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, f1, 4, 4, 4,0x0031, f,1 , 32,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, f2, 4, 4, 4,0x0032, f,2 , 32,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, f3, 4, 4, 4,0x0033, f,3 , 32,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, f4, 4, 4, 4,0x0034, f,4 , 32,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, f5, 4, 4, 4,0x0035, f,5 , 32,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, f6, 4, 4, 4,0x0036, f,6 , 32,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, f7, 4, 4, 4,0x0037, f,7 , 32,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, f8, 4, 4, 4,0x0038, f,8 , 32,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, f9, 4, 4, 4,0x0039, f,9 , 32,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, f10, 4, 4, 4,0x003A, f,10 , 32,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, f11, 4, 4, 4,0x003B, f,11 , 32,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, f12, 4, 4, 4,0x003C, f,12 , 32,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, f13, 4, 4, 4,0x003D, f,13 , 32,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, f14, 4, 4, 4,0x003E, f,14 , 32,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, f15, 4, 4, 4,0x003F, f,15 , 32,0,0,0)
-
-#define XCHAL_CP1_SA_NUM 0
-#define XCHAL_CP1_SA_LIST(s) /* empty */
-
-#define XCHAL_CP2_SA_NUM 0
-#define XCHAL_CP2_SA_LIST(s) /* empty */
-
-#define XCHAL_CP3_SA_NUM 0
-#define XCHAL_CP3_SA_LIST(s) /* empty */
-
-#define XCHAL_CP4_SA_NUM 0
-#define XCHAL_CP4_SA_LIST(s) /* empty */
-
-#define XCHAL_CP5_SA_NUM 0
-#define XCHAL_CP5_SA_LIST(s) /* empty */
-
-#define XCHAL_CP6_SA_NUM 46
-#define XCHAL_CP6_SA_LIST(s) \
- XCHAL_SA_REG(s,0,0,1,0, ldcbhi,16, 4, 4,0x0300, ur,0 , 32,0,0,0) \
- XCHAL_SA_REG(s,0,0,1,0, ldcblo, 4, 4, 4,0x0301, ur,1 , 32,0,0,0) \
- XCHAL_SA_REG(s,0,0,1,0, stcbhi, 4, 4, 4,0x0302, ur,2 , 32,0,0,0) \
- XCHAL_SA_REG(s,0,0,1,0, stcblo, 4, 4, 4,0x0303, ur,3 , 32,0,0,0) \
- XCHAL_SA_REG(s,0,0,1,0, ldbrbase, 4, 4, 4,0x0308, ur,8 , 32,0,0,0) \
- XCHAL_SA_REG(s,0,0,1,0, ldbroff, 4, 4, 4,0x0309, ur,9 , 32,0,0,0) \
- XCHAL_SA_REG(s,0,0,1,0, ldbrinc, 4, 4, 4,0x030A, ur,10 , 32,0,0,0) \
- XCHAL_SA_REG(s,0,0,1,0, stbrbase, 4, 4, 4,0x030B, ur,11 , 32,0,0,0) \
- XCHAL_SA_REG(s,0,0,1,0, stbroff, 4, 4, 4,0x030C, ur,12 , 32,0,0,0) \
- XCHAL_SA_REG(s,0,0,1,0, stbrinc, 4, 4, 4,0x030D, ur,13 , 32,0,0,0) \
- XCHAL_SA_REG(s,0,0,1,0, scratch0, 4, 4, 4,0x0318, ur,24 , 32,0,0,0) \
- XCHAL_SA_REG(s,0,0,1,0, scratch1, 4, 4, 4,0x0319, ur,25 , 32,0,0,0) \
- XCHAL_SA_REG(s,0,0,1,0, scratch2, 4, 4, 4,0x031A, ur,26 , 32,0,0,0) \
- XCHAL_SA_REG(s,0,0,1,0, scratch3, 4, 4, 4,0x031B, ur,27 , 32,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, wra0,16,16,16,0x1010, wra,0 ,128,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, wra1,16,16,16,0x1011, wra,1 ,128,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, wra2,16,16,16,0x1012, wra,2 ,128,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, wra3,16,16,16,0x1013, wra,3 ,128,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, wra4,16,16,16,0x1014, wra,4 ,128,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, wra5,16,16,16,0x1015, wra,5 ,128,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, wra6,16,16,16,0x1016, wra,6 ,128,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, wra7,16,16,16,0x1017, wra,7 ,128,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, wra8,16,16,16,0x1018, wra,8 ,128,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, wra9,16,16,16,0x1019, wra,9 ,128,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, wra10,16,16,16,0x101A, wra,10 ,128,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, wra11,16,16,16,0x101B, wra,11 ,128,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, wra12,16,16,16,0x101C, wra,12 ,128,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, wra13,16,16,16,0x101D, wra,13 ,128,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, wra14,16,16,16,0x101E, wra,14 ,128,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, wra15,16,16,16,0x101F, wra,15 ,128,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, wrb0,16,16,16,0x1020, wrb,0 ,128,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, wrb1,16,16,16,0x1021, wrb,1 ,128,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, wrb2,16,16,16,0x1022, wrb,2 ,128,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, wrb3,16,16,16,0x1023, wrb,3 ,128,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, wrb4,16,16,16,0x1024, wrb,4 ,128,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, wrb5,16,16,16,0x1025, wrb,5 ,128,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, wrb6,16,16,16,0x1026, wrb,6 ,128,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, wrb7,16,16,16,0x1027, wrb,7 ,128,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, wrb8,16,16,16,0x1028, wrb,8 ,128,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, wrb9,16,16,16,0x1029, wrb,9 ,128,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, wrb10,16,16,16,0x102A, wrb,10 ,128,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, wrb11,16,16,16,0x102B, wrb,11 ,128,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, wrb12,16,16,16,0x102C, wrb,12 ,128,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, wrb13,16,16,16,0x102D, wrb,13 ,128,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, wrb14,16,16,16,0x102E, wrb,14 ,128,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, wrb15,16,16,16,0x102F, wrb,15 ,128,0,0,0)
-
-#define XCHAL_CP7_SA_NUM 0
-#define XCHAL_CP7_SA_LIST(s) /* empty */
-
-/* Byte length of instruction from its first nibble (op0 field), per FLIX. */
-#define XCHAL_OP0_FORMAT_LENGTHS 3,3,3,3,3,3,3,3,2,2,2,2,2,2,8,8
-
-#endif /*_XTENSA_CORE_TIE_H*/
-
diff --git a/arch/xtensa/variants/s6000/irq.c b/arch/xtensa/variants/s6000/irq.c
deleted file mode 100644
index 81a241e7907..00000000000
--- a/arch/xtensa/variants/s6000/irq.c
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * s6000 irq crossbar
- *
- * Copyright (c) 2009 emlix GmbH
- * Authors: Johannes Weiner <hannes@cmpxchg.org>
- * Oskar Schirmer <oskar@scara.com>
- */
-#include <linux/io.h>
-#include <asm/irq.h>
-#include <variant/hardware.h>
-
-/* S6_REG_INTC */
-#define INTC_STATUS 0x000
-#define INTC_RAW 0x010
-#define INTC_STATUS_AG 0x100
-#define INTC_CFG(n) (0x200 + 4 * (n))
-
-/*
- * The s6000 has a crossbar that multiplexes interrupt output lines
- * from the peripherals to input lines on the xtensa core.
- *
- * We leave the mapping decisions to the platform as it depends on the
- * actually connected peripherals which distribution makes sense.
- */
-extern const signed char *platform_irq_mappings[NR_IRQS];
-
-static unsigned long scp_to_intc_enable[] = {
-#define TO_INTC_ENABLE(n) (((n) << 1) + 1)
- TO_INTC_ENABLE(0),
- TO_INTC_ENABLE(1),
- TO_INTC_ENABLE(2),
- TO_INTC_ENABLE(3),
- TO_INTC_ENABLE(4),
- TO_INTC_ENABLE(5),
- TO_INTC_ENABLE(6),
- TO_INTC_ENABLE(7),
- TO_INTC_ENABLE(8),
- TO_INTC_ENABLE(9),
- TO_INTC_ENABLE(10),
- TO_INTC_ENABLE(11),
- TO_INTC_ENABLE(12),
- -1,
- -1,
- TO_INTC_ENABLE(13),
- -1,
- TO_INTC_ENABLE(14),
- -1,
- TO_INTC_ENABLE(15),
-#undef TO_INTC_ENABLE
-};
-
-static void irq_set(unsigned int irq, int enable)
-{
- unsigned long en;
- const signed char *m = platform_irq_mappings[irq];
-
- if (!m)
- return;
- en = enable ? scp_to_intc_enable[irq] : 0;
- while (*m >= 0) {
- writel(en, S6_REG_INTC + INTC_CFG(*m));
- m++;
- }
-}
-
-void variant_irq_enable(unsigned int irq)
-{
- irq_set(irq, 1);
-}
-
-void variant_irq_disable(unsigned int irq)
-{
- irq_set(irq, 0);
-}
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index e3d4e4043b4..32e8dbb9ad1 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -248,8 +248,8 @@ static int bt_get(struct blk_mq_alloc_data *data,
if (!(data->gfp & __GFP_WAIT))
return -1;
+ bs = bt_wait_ptr(bt, hctx);
do {
- bs = bt_wait_ptr(bt, hctx);
prepare_to_wait(&bs->wait, &wait, TASK_UNINTERRUPTIBLE);
tag = __bt_get(hctx, bt, last_tag);
@@ -285,6 +285,8 @@ static int bt_get(struct blk_mq_alloc_data *data,
hctx = data->hctx;
bt = &hctx->tags->bitmap_tags;
}
+ finish_wait(&bs->wait, &wait);
+ bs = bt_wait_ptr(bt, hctx);
} while (1);
finish_wait(&bs->wait, &wait);
diff --git a/drivers/Kconfig b/drivers/Kconfig
index af02a8a8ec4..694d5a70d6c 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -184,4 +184,6 @@ source "drivers/ras/Kconfig"
source "drivers/thunderbolt/Kconfig"
+source "drivers/android/Kconfig"
+
endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index 628b512b625..67d2334dc41 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -162,3 +162,4 @@ obj-$(CONFIG_MCB) += mcb/
obj-$(CONFIG_RAS) += ras/
obj-$(CONFIG_THUNDERBOLT) += thunderbolt/
obj-$(CONFIG_CORESIGHT) += coresight/
+obj-$(CONFIG_ANDROID) += android/
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
index 7556e7c4a05..9b693d54c74 100644
--- a/drivers/acpi/blacklist.c
+++ b/drivers/acpi/blacklist.c
@@ -305,60 +305,6 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
*/
/*
- * Lenovo has a mix of systems OSI(Linux) situations
- * and thus we can not wildcard the vendor.
- *
- * _OSI(Linux) helps sound
- * DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad R61"),
- * DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T61"),
- * T400, T500
- * _OSI(Linux) has Linux specific hooks
- * DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X61"),
- * _OSI(Linux) is a NOP:
- * DMI_MATCH(DMI_PRODUCT_VERSION, "3000 N100"),
- * DMI_MATCH(DMI_PRODUCT_VERSION, "LENOVO3000 V100"),
- */
- {
- .callback = dmi_enable_osi_linux,
- .ident = "Lenovo ThinkPad R61",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad R61"),
- },
- },
- {
- .callback = dmi_enable_osi_linux,
- .ident = "Lenovo ThinkPad T61",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T61"),
- },
- },
- {
- .callback = dmi_enable_osi_linux,
- .ident = "Lenovo ThinkPad X61",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X61"),
- },
- },
- {
- .callback = dmi_enable_osi_linux,
- .ident = "Lenovo ThinkPad T400",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T400"),
- },
- },
- {
- .callback = dmi_enable_osi_linux,
- .ident = "Lenovo ThinkPad T500",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T500"),
- },
- },
- /*
* Without this this EEEpc exports a non working WMI interface, with
* this it exports a working "good old" eeepc_laptop interface, fixing
* both brightness control, and rfkill not working.
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index 897640188ac..c2daa85fc9f 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -680,13 +680,21 @@ static int acpi_device_wakeup(struct acpi_device *adev, u32 target_state,
if (error)
return error;
+ if (adev->wakeup.flags.enabled)
+ return 0;
+
res = acpi_enable_gpe(wakeup->gpe_device, wakeup->gpe_number);
- if (ACPI_FAILURE(res)) {
+ if (ACPI_SUCCESS(res)) {
+ adev->wakeup.flags.enabled = 1;
+ } else {
acpi_disable_wakeup_device_power(adev);
return -EIO;
}
} else {
- acpi_disable_gpe(wakeup->gpe_device, wakeup->gpe_number);
+ if (adev->wakeup.flags.enabled) {
+ acpi_disable_gpe(wakeup->gpe_device, wakeup->gpe_number);
+ adev->wakeup.flags.enabled = 0;
+ }
acpi_disable_wakeup_device_power(adev);
}
return 0;
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 5f9b74b9b71..1b5853f384e 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -844,6 +844,8 @@ static int ec_install_handlers(struct acpi_ec *ec)
static void ec_remove_handlers(struct acpi_ec *ec)
{
+ if (!test_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags))
+ return;
acpi_disable_gpe(NULL, ec->gpe);
if (ACPI_FAILURE(acpi_remove_address_space_handler(ec->handle,
ACPI_ADR_SPACE_EC, &acpi_ec_space_handler)))
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index caf9b76b7ef..7a36f02598a 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -325,6 +325,7 @@ static int acpi_fan_probe(struct platform_device *pdev)
struct thermal_cooling_device *cdev;
struct acpi_fan *fan;
struct acpi_device *device = ACPI_COMPANION(&pdev->dev);
+ char *name;
fan = devm_kzalloc(&pdev->dev, sizeof(*fan), GFP_KERNEL);
if (!fan) {
@@ -346,7 +347,12 @@ static int acpi_fan_probe(struct platform_device *pdev)
}
}
- cdev = thermal_cooling_device_register("Fan", device,
+ if (!strncmp(pdev->name, "PNP0C0B", strlen("PNP0C0B")))
+ name = "Fan";
+ else
+ name = acpi_device_bid(device);
+
+ cdev = thermal_cooling_device_register(name, device,
&fan_cooling_ops);
if (IS_ERR(cdev)) {
result = PTR_ERR(cdev);
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index 7cc4e33179f..5277a0ee570 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -413,6 +413,9 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
return 0;
}
+ if (dev->irq_managed && dev->irq > 0)
+ return 0;
+
entry = acpi_pci_irq_lookup(dev, pin);
if (!entry) {
/*
@@ -456,6 +459,7 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
return rc;
}
dev->irq = rc;
+ dev->irq_managed = 1;
if (link)
snprintf(link_desc, sizeof(link_desc), " -> Link[%s]", link);
@@ -478,7 +482,7 @@ void acpi_pci_irq_disable(struct pci_dev *dev)
u8 pin;
pin = dev->pin;
- if (!pin)
+ if (!pin || !dev->irq_managed || dev->irq <= 0)
return;
/* Keep IOAPIC pin configuration when suspending */
@@ -506,6 +510,9 @@ void acpi_pci_irq_disable(struct pci_dev *dev)
*/
dev_dbg(&dev->dev, "PCI INT %c disabled\n", pin_name(pin));
- if (gsi >= 0 && dev->irq > 0)
+ if (gsi >= 0) {
acpi_unregister_gsi(gsi);
+ dev->irq = 0;
+ dev->irq_managed = 0;
+ }
}
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index ef58f46c844..342942f90a1 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -125,13 +125,12 @@ static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
}
header = (struct acpi_subtable_header *)obj->buffer.pointer;
- if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) {
+ if (header->type == ACPI_MADT_TYPE_LOCAL_APIC)
map_lapic_id(header, acpi_id, &apic_id);
- } else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
+ else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC)
map_lsapic_id(header, type, acpi_id, &apic_id);
- } else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC) {
+ else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC)
map_x2apic_id(header, type, acpi_id, &apic_id);
- }
exit:
kfree(buffer.pointer);
@@ -164,7 +163,7 @@ int acpi_map_cpuid(int apic_id, u32 acpi_id)
* For example,
*
* Scope (_PR)
- * {
+ * {
* Processor (CPU0, 0x00, 0x00000410, 0x06) {}
* Processor (CPU1, 0x01, 0x00000410, 0x06) {}
* Processor (CPU2, 0x02, 0x00000410, 0x06) {}
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index 2ba8f02ced3..782a0d15c25 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -200,7 +200,7 @@ bool acpi_dev_resource_address_space(struct acpi_resource *ares,
status = acpi_resource_to_address64(ares, &addr);
if (ACPI_FAILURE(status))
- return true;
+ return false;
res->start = addr.minimum;
res->end = addr.maximum;
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 1b1cf558d3d..16914cc3088 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -2214,7 +2214,7 @@ static void acpi_device_dep_initialize(struct acpi_device *adev)
status = acpi_evaluate_reference(adev->handle, "_DEP", NULL,
&dep_devices);
if (ACPI_FAILURE(status)) {
- dev_err(&adev->dev, "Failed to evaluate _DEP.\n");
+ dev_dbg(&adev->dev, "Failed to evaluate _DEP.\n");
return;
}
@@ -2224,7 +2224,7 @@ static void acpi_device_dep_initialize(struct acpi_device *adev)
status = acpi_get_object_info(dep_devices.handles[i], &info);
if (ACPI_FAILURE(status)) {
- dev_err(&adev->dev, "Error reading device info\n");
+ dev_dbg(&adev->dev, "Error reading _DEP device info\n");
continue;
}
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index dd8ff63ee2b..cd49a3982b6 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -346,22 +346,16 @@ acpi_evaluate_reference(acpi_handle handle,
package = buffer.pointer;
if ((buffer.length == 0) || !package) {
- printk(KERN_ERR PREFIX "No return object (len %X ptr %p)\n",
- (unsigned)buffer.length, package);
status = AE_BAD_DATA;
acpi_util_eval_error(handle, pathname, status);
goto end;
}
if (package->type != ACPI_TYPE_PACKAGE) {
- printk(KERN_ERR PREFIX "Expecting a [Package], found type %X\n",
- package->type);
status = AE_BAD_DATA;
acpi_util_eval_error(handle, pathname, status);
goto end;
}
if (!package->package.count) {
- printk(KERN_ERR PREFIX "[Package] has zero elements (%p)\n",
- package);
status = AE_BAD_DATA;
acpi_util_eval_error(handle, pathname, status);
goto end;
@@ -380,17 +374,13 @@ acpi_evaluate_reference(acpi_handle handle,
if (element->type != ACPI_TYPE_LOCAL_REFERENCE) {
status = AE_BAD_DATA;
- printk(KERN_ERR PREFIX
- "Expecting a [Reference] package element, found type %X\n",
- element->type);
acpi_util_eval_error(handle, pathname, status);
break;
}
if (!element->reference.handle) {
- printk(KERN_WARNING PREFIX "Invalid reference in"
- " package %s\n", pathname);
status = AE_NULL_ENTRY;
+ acpi_util_eval_error(handle, pathname, status);
break;
}
/* Get the acpi_handle. */
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 185a57d1372..1eaadff2e19 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -155,6 +155,7 @@ struct acpi_video_bus {
u8 dos_setting;
struct acpi_video_enumerated_device *attached_array;
u8 attached_count;
+ u8 child_count;
struct acpi_video_bus_cap cap;
struct acpi_video_bus_flags flags;
struct list_head video_device_list;
@@ -1159,8 +1160,12 @@ static bool acpi_video_device_in_dod(struct acpi_video_device *device)
struct acpi_video_bus *video = device->video;
int i;
- /* If we have a broken _DOD, no need to test */
- if (!video->attached_count)
+ /*
+ * If we have a broken _DOD or we have more than 8 output devices
+ * under the graphics controller node that we can't proper deal with
+ * in the operation region code currently, no need to test.
+ */
+ if (!video->attached_count || video->child_count > 8)
return true;
for (i = 0; i < video->attached_count; i++) {
@@ -1413,6 +1418,7 @@ acpi_video_bus_get_devices(struct acpi_video_bus *video,
dev_err(&dev->dev, "Can't attach device\n");
break;
}
+ video->child_count++;
}
return status;
}
diff --git a/drivers/android/Kconfig b/drivers/android/Kconfig
new file mode 100644
index 00000000000..bdfc6c6f4f5
--- /dev/null
+++ b/drivers/android/Kconfig
@@ -0,0 +1,37 @@
+menu "Android"
+
+config ANDROID
+ bool "Android Drivers"
+ ---help---
+ Enable support for various drivers needed on the Android platform
+
+if ANDROID
+
+config ANDROID_BINDER_IPC
+ bool "Android Binder IPC Driver"
+ depends on MMU
+ default n
+ ---help---
+ Binder is used in Android for both communication between processes,
+ and remote method invocation.
+
+ This means one Android process can call a method/routine in another
+ Android process, using Binder to identify, invoke and pass arguments
+ between said processes.
+
+config ANDROID_BINDER_IPC_32BIT
+ bool
+ depends on !64BIT && ANDROID_BINDER_IPC
+ default y
+ ---help---
+ The Binder API has been changed to support both 32 and 64bit
+ applications in a mixed environment.
+
+ Enable this to support an old 32-bit Android user-space (v4.4 and
+ earlier).
+
+ Note that enabling this will break newer Android user-space.
+
+endif # if ANDROID
+
+endmenu
diff --git a/drivers/android/Makefile b/drivers/android/Makefile
new file mode 100644
index 00000000000..3b7e4b072c5
--- /dev/null
+++ b/drivers/android/Makefile
@@ -0,0 +1,3 @@
+ccflags-y += -I$(src) # needed for trace events
+
+obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o
diff --git a/drivers/staging/android/binder.c b/drivers/android/binder.c
index c69c40d69d5..8c43521d3f1 100644
--- a/drivers/staging/android/binder.c
+++ b/drivers/android/binder.c
@@ -38,7 +38,11 @@
#include <linux/slab.h>
#include <linux/pid_namespace.h>
-#include "binder.h"
+#ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
+#define BINDER_IPC_32BIT 1
+#endif
+
+#include <uapi/linux/android/binder.h>
#include "binder_trace.h"
static DEFINE_MUTEX(binder_main_lock);
diff --git a/drivers/staging/android/binder_trace.h b/drivers/android/binder_trace.h
index 7f20f3dc836..7f20f3dc836 100644
--- a/drivers/staging/android/binder_trace.h
+++ b/drivers/android/binder_trace.h
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index cd4cccbfd2a..a3a13605a9c 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -61,7 +61,7 @@ config ATA_ACPI
config SATA_ZPODD
bool "SATA Zero Power Optical Disc Drive (ZPODD) support"
- depends on ATA_ACPI && PM_RUNTIME
+ depends on ATA_ACPI && PM
default n
help
This option adds support for SATA Zero Power Optical Disc
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index 2d195f3a199..d24dd614a0b 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -84,7 +84,11 @@ struct dev_pm_opp {
*
* This is an internal data structure maintaining the link to opps attached to
* a device. This structure is not meant to be shared to users as it is
- * meant for book keeping and private to OPP library
+ * meant for book keeping and private to OPP library.
+ *
+ * Because the opp structures can be used from both rcu and srcu readers, we
+ * need to wait for the grace period of both of them before freeing any
+ * resources. And so we have used kfree_rcu() from within call_srcu() handlers.
*/
struct device_opp {
struct list_head node;
@@ -382,12 +386,34 @@ struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
}
EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
+static struct device_opp *add_device_opp(struct device *dev)
+{
+ struct device_opp *dev_opp;
+
+ /*
+ * Allocate a new device OPP table. In the infrequent case where a new
+ * device is needed to be added, we pay this penalty.
+ */
+ dev_opp = kzalloc(sizeof(*dev_opp), GFP_KERNEL);
+ if (!dev_opp)
+ return NULL;
+
+ dev_opp->dev = dev;
+ srcu_init_notifier_head(&dev_opp->srcu_head);
+ INIT_LIST_HEAD(&dev_opp->opp_list);
+
+ /* Secure the device list modification */
+ list_add_rcu(&dev_opp->node, &dev_opp_list);
+ return dev_opp;
+}
+
static int dev_pm_opp_add_dynamic(struct device *dev, unsigned long freq,
unsigned long u_volt, bool dynamic)
{
struct device_opp *dev_opp = NULL;
struct dev_pm_opp *opp, *new_opp;
struct list_head *head;
+ int ret;
/* allocate new OPP node */
new_opp = kzalloc(sizeof(*new_opp), GFP_KERNEL);
@@ -400,7 +426,6 @@ static int dev_pm_opp_add_dynamic(struct device *dev, unsigned long freq,
mutex_lock(&dev_opp_list_lock);
/* populate the opp table */
- new_opp->dev_opp = dev_opp;
new_opp->rate = freq;
new_opp->u_volt = u_volt;
new_opp->available = true;
@@ -409,27 +434,12 @@ static int dev_pm_opp_add_dynamic(struct device *dev, unsigned long freq,
/* Check for existing list for 'dev' */
dev_opp = find_device_opp(dev);
if (IS_ERR(dev_opp)) {
- /*
- * Allocate a new device OPP table. In the infrequent case
- * where a new device is needed to be added, we pay this
- * penalty.
- */
- dev_opp = kzalloc(sizeof(struct device_opp), GFP_KERNEL);
+ dev_opp = add_device_opp(dev);
if (!dev_opp) {
- mutex_unlock(&dev_opp_list_lock);
- kfree(new_opp);
- dev_warn(dev,
- "%s: Unable to create device OPP structure\n",
- __func__);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto free_opp;
}
- dev_opp->dev = dev;
- srcu_init_notifier_head(&dev_opp->srcu_head);
- INIT_LIST_HEAD(&dev_opp->opp_list);
-
- /* Secure the device list modification */
- list_add_rcu(&dev_opp->node, &dev_opp_list);
head = &dev_opp->opp_list;
goto list_add;
}
@@ -448,18 +458,17 @@ static int dev_pm_opp_add_dynamic(struct device *dev, unsigned long freq,
/* Duplicate OPPs ? */
if (new_opp->rate == opp->rate) {
- int ret = opp->available && new_opp->u_volt == opp->u_volt ?
+ ret = opp->available && new_opp->u_volt == opp->u_volt ?
0 : -EEXIST;
dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n",
__func__, opp->rate, opp->u_volt, opp->available,
new_opp->rate, new_opp->u_volt, new_opp->available);
- mutex_unlock(&dev_opp_list_lock);
- kfree(new_opp);
- return ret;
+ goto free_opp;
}
list_add:
+ new_opp->dev_opp = dev_opp;
list_add_rcu(&new_opp->node, head);
mutex_unlock(&dev_opp_list_lock);
@@ -469,6 +478,11 @@ list_add:
*/
srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ADD, new_opp);
return 0;
+
+free_opp:
+ mutex_unlock(&dev_opp_list_lock);
+ kfree(new_opp);
+ return ret;
}
/**
@@ -511,10 +525,11 @@ static void kfree_device_rcu(struct rcu_head *head)
{
struct device_opp *device_opp = container_of(head, struct device_opp, rcu_head);
- kfree(device_opp);
+ kfree_rcu(device_opp, rcu_head);
}
-void __dev_pm_opp_remove(struct device_opp *dev_opp, struct dev_pm_opp *opp)
+static void __dev_pm_opp_remove(struct device_opp *dev_opp,
+ struct dev_pm_opp *opp)
{
/*
* Notify the changes in the availability of the operable
@@ -592,7 +607,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
static int opp_set_availability(struct device *dev, unsigned long freq,
bool availability_req)
{
- struct device_opp *tmp_dev_opp, *dev_opp = ERR_PTR(-ENODEV);
+ struct device_opp *dev_opp;
struct dev_pm_opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV);
int r = 0;
@@ -606,12 +621,7 @@ static int opp_set_availability(struct device *dev, unsigned long freq,
mutex_lock(&dev_opp_list_lock);
/* Find the device_opp */
- list_for_each_entry(tmp_dev_opp, &dev_opp_list, node) {
- if (dev == tmp_dev_opp->dev) {
- dev_opp = tmp_dev_opp;
- break;
- }
- }
+ dev_opp = find_device_opp(dev);
if (IS_ERR(dev_opp)) {
r = PTR_ERR(dev_opp);
dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
@@ -768,7 +778,7 @@ EXPORT_SYMBOL_GPL(of_init_opp_table);
*/
void of_free_opp_table(struct device *dev)
{
- struct device_opp *dev_opp = find_device_opp(dev);
+ struct device_opp *dev_opp;
struct dev_pm_opp *opp, *tmp;
/* Check for existing list for 'dev' */
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 27b71a0b72d..3ec85dfce12 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -2370,8 +2370,12 @@ static void rbd_img_obj_request_fill(struct rbd_obj_request *obj_request,
opcode = CEPH_OSD_OP_READ;
}
- osd_req_op_extent_init(osd_request, num_ops, opcode, offset, length,
- 0, 0);
+ if (opcode == CEPH_OSD_OP_DELETE)
+ osd_req_op_init(osd_request, num_ops, opcode);
+ else
+ osd_req_op_extent_init(osd_request, num_ops, opcode,
+ offset, length, 0, 0);
+
if (obj_request->type == OBJ_REQUEST_BIO)
osd_req_op_extent_osd_data_bio(osd_request, num_ops,
obj_request->bio_list, length);
@@ -3405,8 +3409,7 @@ err_rq:
if (result)
rbd_warn(rbd_dev, "%s %llx at %llx result %d",
obj_op_name(op_type), length, offset, result);
- if (snapc)
- ceph_put_snap_context(snapc);
+ ceph_put_snap_context(snapc);
blk_end_request_all(rq, result);
}
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index fce75889628..1ee27ac18de 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -87,6 +87,7 @@ static const struct usb_device_id ath3k_table[] = {
{ USB_DEVICE(0x04CA, 0x3007) },
{ USB_DEVICE(0x04CA, 0x3008) },
{ USB_DEVICE(0x04CA, 0x300b) },
+ { USB_DEVICE(0x04CA, 0x3010) },
{ USB_DEVICE(0x0930, 0x0219) },
{ USB_DEVICE(0x0930, 0x0220) },
{ USB_DEVICE(0x0930, 0x0227) },
@@ -140,6 +141,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
{ USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0227), .driver_info = BTUSB_ATH3012 },
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 31dd24ac992..19cf2cf22e8 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -167,6 +167,7 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0227), .driver_info = BTUSB_ATH3012 },
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index 9a024f899dd..f3334829e55 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -153,7 +153,6 @@ static struct page *i8xx_alloc_pages(void)
__free_pages(page, 2);
return NULL;
}
- get_page(page);
atomic_inc(&agp_bridge->current_memory_agp);
return page;
}
@@ -164,7 +163,6 @@ static void i8xx_destroy_pages(struct page *page)
return;
set_pages_wb(page, 4);
- put_page(page);
__free_pages(page, 2);
atomic_dec(&agp_bridge->current_memory_agp);
}
@@ -300,7 +298,6 @@ static int intel_gtt_setup_scratch_page(void)
page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
if (page == NULL)
return -ENOMEM;
- get_page(page);
set_pages_uc(page, 1);
if (intel_private.needs_dmar) {
@@ -560,7 +557,6 @@ static void intel_gtt_teardown_scratch_page(void)
set_pages_wb(intel_private.scratch_page, 1);
pci_unmap_page(intel_private.pcidev, intel_private.scratch_page_dma,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- put_page(intel_private.scratch_page);
__free_page(intel_private.scratch_page);
}
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 1405b393c93..742eefba12c 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -199,7 +199,14 @@ static signed int pid_calc(struct _pid *pid, int32_t busy)
pid->integral += fp_error;
- /* limit the integral term */
+ /*
+ * We limit the integral here so that it will never
+ * get higher than 30. This prevents it from becoming
+ * too large an input over long periods of time and allows
+ * it to get factored out sooner.
+ *
+ * The value of 30 was chosen through experimentation.
+ */
integral_limit = int_tofp(30);
if (pid->integral > integral_limit)
pid->integral = integral_limit;
@@ -616,6 +623,11 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
if (limits.no_turbo || limits.turbo_disabled)
max_perf = cpu->pstate.max_pstate;
+ /*
+ * performance can be limited by user through sysfs, by cpufreq
+ * policy, or by cpu specific default values determined through
+ * experimentation.
+ */
max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), limits.max_perf));
*max = clamp_t(int, max_perf_adj,
cpu->pstate.min_pstate, cpu->pstate.turbo_pstate);
@@ -717,11 +729,29 @@ static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu)
u32 duration_us;
u32 sample_time;
+ /*
+ * core_busy is the ratio of actual performance to max
+ * max_pstate is the max non turbo pstate available
+ * current_pstate was the pstate that was requested during
+ * the last sample period.
+ *
+ * We normalize core_busy, which was our actual percent
+ * performance to what we requested during the last sample
+ * period. The result will be a percentage of busy at a
+ * specified pstate.
+ */
core_busy = cpu->sample.core_pct_busy;
max_pstate = int_tofp(cpu->pstate.max_pstate);
current_pstate = int_tofp(cpu->pstate.current_pstate);
core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate));
+ /*
+ * Since we have a deferred timer, it will not fire unless
+ * we are in C0. So, determine if the actual elapsed time
+ * is significantly greater (3x) than our sample interval. If it
+ * is, then we were idle for a long enough period of time
+ * to adjust our busyness.
+ */
sample_time = pid_params.sample_rate_ms * USEC_PER_MSEC;
duration_us = (u32) ktime_us_delta(cpu->sample.time,
cpu->last_sample_time);
@@ -948,6 +978,7 @@ static struct cpufreq_driver intel_pstate_driver = {
static int __initdata no_load;
static int __initdata no_hwp;
+static unsigned int force_load;
static int intel_pstate_msrs_not_valid(void)
{
@@ -1094,7 +1125,8 @@ static bool intel_pstate_platform_pwr_mgmt_exists(void)
case PSS:
return intel_pstate_no_acpi_pss();
case PPC:
- return intel_pstate_has_acpi_ppc();
+ return intel_pstate_has_acpi_ppc() &&
+ (!force_load);
}
}
@@ -1175,6 +1207,8 @@ static int __init intel_pstate_setup(char *str)
no_load = 1;
if (!strcmp(str, "no_hwp"))
no_hwp = 1;
+ if (!strcmp(str, "force"))
+ force_load = 1;
return 0;
}
early_param("intel_pstate", intel_pstate_setup);
diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c
index c913906a719..0f6b229afcb 100644
--- a/drivers/cpufreq/longhaul.c
+++ b/drivers/cpufreq/longhaul.c
@@ -1,5 +1,5 @@
/*
- * (C) 2001-2004 Dave Jones. <davej@redhat.com>
+ * (C) 2001-2004 Dave Jones.
* (C) 2002 Padraig Brady. <padraig@antefacto.com>
*
* Licensed under the terms of the GNU GPL License version 2.
@@ -1008,7 +1008,7 @@ MODULE_PARM_DESC(revid_errata, "Ignore CPU Revision ID");
module_param(enable, int, 0644);
MODULE_PARM_DESC(enable, "Enable driver");
-MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
+MODULE_AUTHOR("Dave Jones");
MODULE_DESCRIPTION("Longhaul driver for VIA Cyrix processors.");
MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/powernow-k6.c b/drivers/cpufreq/powernow-k6.c
index f91027259c3..e6f24b281e3 100644
--- a/drivers/cpufreq/powernow-k6.c
+++ b/drivers/cpufreq/powernow-k6.c
@@ -300,7 +300,7 @@ static void __exit powernow_k6_exit(void)
}
-MODULE_AUTHOR("Arjan van de Ven, Dave Jones <davej@redhat.com>, "
+MODULE_AUTHOR("Arjan van de Ven, Dave Jones, "
"Dominik Brodowski <linux@brodo.de>");
MODULE_DESCRIPTION("PowerNow! driver for AMD K6-2+ / K6-3+ processors.");
MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/powernow-k7.c b/drivers/cpufreq/powernow-k7.c
index e61e224475a..37c5742482d 100644
--- a/drivers/cpufreq/powernow-k7.c
+++ b/drivers/cpufreq/powernow-k7.c
@@ -1,7 +1,6 @@
/*
* AMD K7 Powernow driver.
* (C) 2003 Dave Jones on behalf of SuSE Labs.
- * (C) 2003-2004 Dave Jones <davej@redhat.com>
*
* Licensed under the terms of the GNU GPL License version 2.
* Based upon datasheets & sample CPUs kindly provided by AMD.
@@ -701,7 +700,7 @@ static void __exit powernow_exit(void)
module_param(acpi_force, int, 0444);
MODULE_PARM_DESC(acpi_force, "Force ACPI to be used.");
-MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
+MODULE_AUTHOR("Dave Jones");
MODULE_DESCRIPTION("Powernow driver for AMD K7 processors.");
MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/speedstep-ich.c b/drivers/cpufreq/speedstep-ich.c
index 1a07b5904ed..e56d632a8b2 100644
--- a/drivers/cpufreq/speedstep-ich.c
+++ b/drivers/cpufreq/speedstep-ich.c
@@ -378,8 +378,7 @@ static void __exit speedstep_exit(void)
}
-MODULE_AUTHOR("Dave Jones <davej@redhat.com>, "
- "Dominik Brodowski <linux@brodo.de>");
+MODULE_AUTHOR("Dave Jones, Dominik Brodowski <linux@brodo.de>");
MODULE_DESCRIPTION("Speedstep driver for Intel mobile processors on chipsets "
"with ICH-M southbridges.");
MODULE_LICENSE("GPL");
diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c
index e9248bb9173..aedec095793 100644
--- a/drivers/cpuidle/cpuidle-powernv.c
+++ b/drivers/cpuidle/cpuidle-powernv.c
@@ -16,13 +16,10 @@
#include <asm/machdep.h>
#include <asm/firmware.h>
+#include <asm/opal.h>
#include <asm/runlatch.h>
-/* Flags and constants used in PowerNV platform */
-
#define MAX_POWERNV_IDLE_STATES 8
-#define IDLE_USE_INST_NAP 0x00010000 /* Use nap instruction */
-#define IDLE_USE_INST_SLEEP 0x00020000 /* Use sleep instruction */
struct cpuidle_driver powernv_idle_driver = {
.name = "powernv_idle",
@@ -197,7 +194,7 @@ static int powernv_add_idle_states(void)
* target residency to be 10x exit_latency
*/
latency_ns = be32_to_cpu(idle_state_latency[i]);
- if (flags & IDLE_USE_INST_NAP) {
+ if (flags & OPAL_PM_NAP_ENABLED) {
/* Add NAP state */
strcpy(powernv_states[nr_idle_states].name, "Nap");
strcpy(powernv_states[nr_idle_states].desc, "Nap");
@@ -210,7 +207,8 @@ static int powernv_add_idle_states(void)
nr_idle_states++;
}
- if (flags & IDLE_USE_INST_SLEEP) {
+ if (flags & OPAL_PM_SLEEP_ENABLED ||
+ flags & OPAL_PM_SLEEP_ENABLED_ER1) {
/* Add FASTSLEEP state */
strcpy(powernv_states[nr_idle_states].name, "FastSleep");
strcpy(powernv_states[nr_idle_states].desc, "FastSleep");
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
index 2c6d5e118ac..f9e3aee6a21 100644
--- a/drivers/firewire/core-device.c
+++ b/drivers/firewire/core-device.c
@@ -115,6 +115,9 @@ static int textual_leaf_to_string(const u32 *block, char *buf, size_t size)
*
* The string is taken from a minimal ASCII text descriptor leaf after
* the immediate entry with @key. The string is zero-terminated.
+ * An overlong string is silently truncated such that it and the
+ * zero byte fit into @size.
+ *
* Returns strlen(buf) or a negative error code.
*/
int fw_csr_string(const u32 *directory, int key, char *buf, size_t size)
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index a66a3217f1d..aff9018d065 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -689,8 +689,7 @@ static void ar_context_release(struct ar_context *ctx)
{
unsigned int i;
- if (ctx->buffer)
- vm_unmap_ram(ctx->buffer, AR_BUFFERS + AR_WRAPAROUND_PAGES);
+ vunmap(ctx->buffer);
for (i = 0; i < AR_BUFFERS; i++)
if (ctx->pages[i]) {
@@ -1018,8 +1017,7 @@ static int ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci,
pages[i] = ctx->pages[i];
for (i = 0; i < AR_WRAPAROUND_PAGES; i++)
pages[AR_BUFFERS + i] = ctx->pages[i];
- ctx->buffer = vm_map_ram(pages, AR_BUFFERS + AR_WRAPAROUND_PAGES,
- -1, PAGE_KERNEL);
+ ctx->buffer = vmap(pages, ARRAY_SIZE(pages), VM_MAP, PAGE_KERNEL);
if (!ctx->buffer)
goto out_of_memory;
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index 7aef911fdc7..64ac8f8f509 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -174,6 +174,7 @@ struct sbp2_target {
unsigned int mgt_orb_timeout;
unsigned int max_payload;
+ spinlock_t lock;
int dont_block; /* counter for each logical unit */
int blocked; /* ditto */
};
@@ -270,6 +271,7 @@ struct sbp2_orb {
dma_addr_t request_bus;
int rcode;
void (*callback)(struct sbp2_orb * orb, struct sbp2_status * status);
+ struct sbp2_logical_unit *lu;
struct list_head link;
};
@@ -321,7 +323,6 @@ struct sbp2_command_orb {
u8 command_block[SBP2_MAX_CDB_SIZE];
} request;
struct scsi_cmnd *cmd;
- struct sbp2_logical_unit *lu;
struct sbp2_pointer page_table[SG_ALL] __attribute__((aligned(8)));
dma_addr_t page_table_bus;
@@ -444,7 +445,7 @@ static void sbp2_status_write(struct fw_card *card, struct fw_request *request,
}
/* Lookup the orb corresponding to this status write. */
- spin_lock_irqsave(&card->lock, flags);
+ spin_lock_irqsave(&lu->tgt->lock, flags);
list_for_each_entry(orb, &lu->orb_list, link) {
if (STATUS_GET_ORB_HIGH(status) == 0 &&
STATUS_GET_ORB_LOW(status) == orb->request_bus) {
@@ -453,7 +454,7 @@ static void sbp2_status_write(struct fw_card *card, struct fw_request *request,
break;
}
}
- spin_unlock_irqrestore(&card->lock, flags);
+ spin_unlock_irqrestore(&lu->tgt->lock, flags);
if (&orb->link != &lu->orb_list) {
orb->callback(orb, &status);
@@ -480,18 +481,18 @@ static void complete_transaction(struct fw_card *card, int rcode,
* been set and only does the cleanup if the transaction
* failed and we didn't already get a status write.
*/
- spin_lock_irqsave(&card->lock, flags);
+ spin_lock_irqsave(&orb->lu->tgt->lock, flags);
if (orb->rcode == -1)
orb->rcode = rcode;
if (orb->rcode != RCODE_COMPLETE) {
list_del(&orb->link);
- spin_unlock_irqrestore(&card->lock, flags);
+ spin_unlock_irqrestore(&orb->lu->tgt->lock, flags);
orb->callback(orb, NULL);
kref_put(&orb->kref, free_orb); /* orb callback reference */
} else {
- spin_unlock_irqrestore(&card->lock, flags);
+ spin_unlock_irqrestore(&orb->lu->tgt->lock, flags);
}
kref_put(&orb->kref, free_orb); /* transaction callback reference */
@@ -507,9 +508,10 @@ static void sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu,
orb_pointer.high = 0;
orb_pointer.low = cpu_to_be32(orb->request_bus);
- spin_lock_irqsave(&device->card->lock, flags);
+ orb->lu = lu;
+ spin_lock_irqsave(&lu->tgt->lock, flags);
list_add_tail(&orb->link, &lu->orb_list);
- spin_unlock_irqrestore(&device->card->lock, flags);
+ spin_unlock_irqrestore(&lu->tgt->lock, flags);
kref_get(&orb->kref); /* transaction callback reference */
kref_get(&orb->kref); /* orb callback reference */
@@ -524,13 +526,12 @@ static int sbp2_cancel_orbs(struct sbp2_logical_unit *lu)
struct fw_device *device = target_parent_device(lu->tgt);
struct sbp2_orb *orb, *next;
struct list_head list;
- unsigned long flags;
int retval = -ENOENT;
INIT_LIST_HEAD(&list);
- spin_lock_irqsave(&device->card->lock, flags);
+ spin_lock_irq(&lu->tgt->lock);
list_splice_init(&lu->orb_list, &list);
- spin_unlock_irqrestore(&device->card->lock, flags);
+ spin_unlock_irq(&lu->tgt->lock);
list_for_each_entry_safe(orb, next, &list, link) {
retval = 0;
@@ -687,16 +688,11 @@ static void sbp2_agent_reset_no_wait(struct sbp2_logical_unit *lu)
&d, 4, complete_agent_reset_write_no_wait, t);
}
-static inline void sbp2_allow_block(struct sbp2_logical_unit *lu)
+static inline void sbp2_allow_block(struct sbp2_target *tgt)
{
- /*
- * We may access dont_block without taking card->lock here:
- * All callers of sbp2_allow_block() and all callers of sbp2_unblock()
- * are currently serialized against each other.
- * And a wrong result in sbp2_conditionally_block()'s access of
- * dont_block is rather harmless, it simply misses its first chance.
- */
- --lu->tgt->dont_block;
+ spin_lock_irq(&tgt->lock);
+ --tgt->dont_block;
+ spin_unlock_irq(&tgt->lock);
}
/*
@@ -705,7 +701,7 @@ static inline void sbp2_allow_block(struct sbp2_logical_unit *lu)
* logical units have been finished (indicated by dont_block == 0).
* - lu->generation is stale.
*
- * Note, scsi_block_requests() must be called while holding card->lock,
+ * Note, scsi_block_requests() must be called while holding tgt->lock,
* otherwise it might foil sbp2_[conditionally_]unblock()'s attempt to
* unblock the target.
*/
@@ -717,20 +713,20 @@ static void sbp2_conditionally_block(struct sbp2_logical_unit *lu)
container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
unsigned long flags;
- spin_lock_irqsave(&card->lock, flags);
+ spin_lock_irqsave(&tgt->lock, flags);
if (!tgt->dont_block && !lu->blocked &&
lu->generation != card->generation) {
lu->blocked = true;
if (++tgt->blocked == 1)
scsi_block_requests(shost);
}
- spin_unlock_irqrestore(&card->lock, flags);
+ spin_unlock_irqrestore(&tgt->lock, flags);
}
/*
* Unblocks lu->tgt as soon as all its logical units can be unblocked.
* Note, it is harmless to run scsi_unblock_requests() outside the
- * card->lock protected section. On the other hand, running it inside
+ * tgt->lock protected section. On the other hand, running it inside
* the section might clash with shost->host_lock.
*/
static void sbp2_conditionally_unblock(struct sbp2_logical_unit *lu)
@@ -739,15 +735,14 @@ static void sbp2_conditionally_unblock(struct sbp2_logical_unit *lu)
struct fw_card *card = target_parent_device(tgt)->card;
struct Scsi_Host *shost =
container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
- unsigned long flags;
bool unblock = false;
- spin_lock_irqsave(&card->lock, flags);
+ spin_lock_irq(&tgt->lock);
if (lu->blocked && lu->generation == card->generation) {
lu->blocked = false;
unblock = --tgt->blocked == 0;
}
- spin_unlock_irqrestore(&card->lock, flags);
+ spin_unlock_irq(&tgt->lock);
if (unblock)
scsi_unblock_requests(shost);
@@ -756,19 +751,17 @@ static void sbp2_conditionally_unblock(struct sbp2_logical_unit *lu)
/*
* Prevents future blocking of tgt and unblocks it.
* Note, it is harmless to run scsi_unblock_requests() outside the
- * card->lock protected section. On the other hand, running it inside
+ * tgt->lock protected section. On the other hand, running it inside
* the section might clash with shost->host_lock.
*/
static void sbp2_unblock(struct sbp2_target *tgt)
{
- struct fw_card *card = target_parent_device(tgt)->card;
struct Scsi_Host *shost =
container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
- unsigned long flags;
- spin_lock_irqsave(&card->lock, flags);
+ spin_lock_irq(&tgt->lock);
++tgt->dont_block;
- spin_unlock_irqrestore(&card->lock, flags);
+ spin_unlock_irq(&tgt->lock);
scsi_unblock_requests(shost);
}
@@ -904,7 +897,7 @@ static void sbp2_login(struct work_struct *work)
/* No error during __scsi_add_device() */
lu->has_sdev = true;
scsi_device_put(sdev);
- sbp2_allow_block(lu);
+ sbp2_allow_block(tgt);
return;
@@ -1163,6 +1156,7 @@ static int sbp2_probe(struct fw_unit *unit, const struct ieee1394_device_id *id)
dev_set_drvdata(&unit->device, tgt);
tgt->unit = unit;
INIT_LIST_HEAD(&tgt->lu_list);
+ spin_lock_init(&tgt->lock);
tgt->guid = (u64)device->config_rom[3] << 32 | device->config_rom[4];
if (fw_device_enable_phys_dma(device) < 0)
@@ -1359,12 +1353,12 @@ static void complete_command_orb(struct sbp2_orb *base_orb,
{
struct sbp2_command_orb *orb =
container_of(base_orb, struct sbp2_command_orb, base);
- struct fw_device *device = target_parent_device(orb->lu->tgt);
+ struct fw_device *device = target_parent_device(base_orb->lu->tgt);
int result;
if (status != NULL) {
if (STATUS_GET_DEAD(*status))
- sbp2_agent_reset_no_wait(orb->lu);
+ sbp2_agent_reset_no_wait(base_orb->lu);
switch (STATUS_GET_RESPONSE(*status)) {
case SBP2_STATUS_REQUEST_COMPLETE:
@@ -1390,7 +1384,7 @@ static void complete_command_orb(struct sbp2_orb *base_orb,
* or when sending the write (less likely).
*/
result = DID_BUS_BUSY << 16;
- sbp2_conditionally_block(orb->lu);
+ sbp2_conditionally_block(base_orb->lu);
}
dma_unmap_single(device->card->device, orb->base.request_bus,
@@ -1487,7 +1481,6 @@ static int sbp2_scsi_queuecommand(struct Scsi_Host *shost,
/* Initialize rcode to something not RCODE_COMPLETE. */
orb->base.rcode = -1;
kref_init(&orb->base.kref);
- orb->lu = lu;
orb->cmd = cmd;
orb->request.next.high = cpu_to_be32(SBP2_ORB_NULL);
orb->request.misc = cpu_to_be32(
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index e3b4b0f02b3..c3413b6adb1 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -167,6 +167,8 @@ config DRM_SAVAGE
source "drivers/gpu/drm/exynos/Kconfig"
+source "drivers/gpu/drm/rockchip/Kconfig"
+
source "drivers/gpu/drm/vmwgfx/Kconfig"
source "drivers/gpu/drm/gma500/Kconfig"
@@ -200,3 +202,7 @@ source "drivers/gpu/drm/tegra/Kconfig"
source "drivers/gpu/drm/panel/Kconfig"
source "drivers/gpu/drm/sti/Kconfig"
+
+source "drivers/gpu/drm/amd/amdkfd/Kconfig"
+
+source "drivers/gpu/drm/imx/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 9292a761ea6..66e40398b3d 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -14,7 +14,7 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \
drm_info.o drm_debugfs.o drm_encoder_slave.o \
drm_trace_points.o drm_global.o drm_prime.o \
drm_rect.o drm_vma_manager.o drm_flip_work.o \
- drm_modeset_lock.o
+ drm_modeset_lock.o drm_atomic.o
drm-$(CONFIG_COMPAT) += drm_ioc32.o
drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
@@ -23,7 +23,7 @@ drm-$(CONFIG_DRM_PANEL) += drm_panel.o
drm-$(CONFIG_OF) += drm_of.o
drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \
- drm_plane_helper.o drm_dp_mst_topology.o
+ drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o
drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
drm_kms_helper-$(CONFIG_DRM_KMS_FB_HELPER) += drm_fb_helper.o
drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o
@@ -49,6 +49,7 @@ obj-$(CONFIG_DRM_VMWGFX)+= vmwgfx/
obj-$(CONFIG_DRM_VIA) +=via/
obj-$(CONFIG_DRM_NOUVEAU) +=nouveau/
obj-$(CONFIG_DRM_EXYNOS) +=exynos/
+obj-$(CONFIG_DRM_ROCKCHIP) +=rockchip/
obj-$(CONFIG_DRM_GMA500) += gma500/
obj-$(CONFIG_DRM_UDL) += udl/
obj-$(CONFIG_DRM_AST) += ast/
@@ -62,6 +63,8 @@ obj-$(CONFIG_DRM_BOCHS) += bochs/
obj-$(CONFIG_DRM_MSM) += msm/
obj-$(CONFIG_DRM_TEGRA) += tegra/
obj-$(CONFIG_DRM_STI) += sti/
+obj-$(CONFIG_DRM_IMX) += imx/
obj-y += i2c/
obj-y += panel/
obj-y += bridge/
+obj-$(CONFIG_HSA_AMD) += amd/amdkfd/
diff --git a/drivers/gpu/drm/README.drm b/drivers/gpu/drm/README.drm
deleted file mode 100644
index b5b33272258..00000000000
--- a/drivers/gpu/drm/README.drm
+++ /dev/null
@@ -1,43 +0,0 @@
-************************************************************
-* For the very latest on DRI development, please see: *
-* http://dri.freedesktop.org/ *
-************************************************************
-
-The Direct Rendering Manager (drm) is a device-independent kernel-level
-device driver that provides support for the XFree86 Direct Rendering
-Infrastructure (DRI).
-
-The DRM supports the Direct Rendering Infrastructure (DRI) in four major
-ways:
-
- 1. The DRM provides synchronized access to the graphics hardware via
- the use of an optimized two-tiered lock.
-
- 2. The DRM enforces the DRI security policy for access to the graphics
- hardware by only allowing authenticated X11 clients access to
- restricted regions of memory.
-
- 3. The DRM provides a generic DMA engine, complete with multiple
- queues and the ability to detect the need for an OpenGL context
- switch.
-
- 4. The DRM is extensible via the use of small device-specific modules
- that rely extensively on the API exported by the DRM module.
-
-
-Documentation on the DRI is available from:
- http://dri.freedesktop.org/wiki/Documentation
- http://sourceforge.net/project/showfiles.php?group_id=387
- http://dri.sourceforge.net/doc/
-
-For specific information about kernel-level support, see:
-
- The Direct Rendering Manager, Kernel Support for the Direct Rendering
- Infrastructure
- http://dri.sourceforge.net/doc/drm_low_level.html
-
- Hardware Locking for the Direct Rendering Infrastructure
- http://dri.sourceforge.net/doc/hardware_locking_low_level.html
-
- A Security Analysis of the Direct Rendering Infrastructure
- http://dri.sourceforge.net/doc/security_low_level.html
diff --git a/drivers/gpu/drm/amd/amdkfd/Kconfig b/drivers/gpu/drm/amd/amdkfd/Kconfig
new file mode 100644
index 00000000000..8dfac37ff32
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/Kconfig
@@ -0,0 +1,9 @@
+#
+# Heterogenous system architecture configuration
+#
+
+config HSA_AMD
+ tristate "HSA kernel driver for AMD GPU devices"
+ depends on DRM_RADEON && AMD_IOMMU_V2 && X86_64
+ help
+ Enable this if you want to use HSA features on AMD GPU devices.
diff --git a/drivers/gpu/drm/amd/amdkfd/Makefile b/drivers/gpu/drm/amd/amdkfd/Makefile
new file mode 100644
index 00000000000..be6246de509
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/Makefile
@@ -0,0 +1,14 @@
+#
+# Makefile for Heterogenous System Architecture support for AMD GPU devices
+#
+
+ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/amd/include/
+
+amdkfd-y := kfd_module.o kfd_device.o kfd_chardev.o kfd_topology.o \
+ kfd_pasid.o kfd_doorbell.o kfd_flat_memory.o \
+ kfd_process.o kfd_queue.o kfd_mqd_manager.o \
+ kfd_kernel_queue.o kfd_packet_manager.o \
+ kfd_process_queue_manager.o kfd_device_queue_manager.o \
+ kfd_interrupt.o
+
+obj-$(CONFIG_HSA_AMD) += amdkfd.o
diff --git a/drivers/gpu/drm/amd/amdkfd/cik_regs.h b/drivers/gpu/drm/amd/amdkfd/cik_regs.h
new file mode 100644
index 00000000000..607fc5ceadb
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/cik_regs.h
@@ -0,0 +1,221 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef CIK_REGS_H
+#define CIK_REGS_H
+
+#define IH_VMID_0_LUT 0x3D40u
+
+#define BIF_DOORBELL_CNTL 0x530Cu
+
+#define SRBM_GFX_CNTL 0xE44
+#define PIPEID(x) ((x) << 0)
+#define MEID(x) ((x) << 2)
+#define VMID(x) ((x) << 4)
+#define QUEUEID(x) ((x) << 8)
+
+#define SQ_CONFIG 0x8C00
+
+#define SH_MEM_BASES 0x8C28
+/* if PTR32, these are the bases for scratch and lds */
+#define PRIVATE_BASE(x) ((x) << 0) /* scratch */
+#define SHARED_BASE(x) ((x) << 16) /* LDS */
+#define SH_MEM_APE1_BASE 0x8C2C
+/* if PTR32, this is the base location of GPUVM */
+#define SH_MEM_APE1_LIMIT 0x8C30
+/* if PTR32, this is the upper limit of GPUVM */
+#define SH_MEM_CONFIG 0x8C34
+#define PTR32 (1 << 0)
+#define PRIVATE_ATC (1 << 1)
+#define ALIGNMENT_MODE(x) ((x) << 2)
+#define SH_MEM_ALIGNMENT_MODE_DWORD 0
+#define SH_MEM_ALIGNMENT_MODE_DWORD_STRICT 1
+#define SH_MEM_ALIGNMENT_MODE_STRICT 2
+#define SH_MEM_ALIGNMENT_MODE_UNALIGNED 3
+#define DEFAULT_MTYPE(x) ((x) << 4)
+#define APE1_MTYPE(x) ((x) << 7)
+
+/* valid for both DEFAULT_MTYPE and APE1_MTYPE */
+#define MTYPE_CACHED 0
+#define MTYPE_NONCACHED 3
+
+
+#define SH_STATIC_MEM_CONFIG 0x9604u
+
+#define TC_CFG_L1_LOAD_POLICY0 0xAC68
+#define TC_CFG_L1_LOAD_POLICY1 0xAC6C
+#define TC_CFG_L1_STORE_POLICY 0xAC70
+#define TC_CFG_L2_LOAD_POLICY0 0xAC74
+#define TC_CFG_L2_LOAD_POLICY1 0xAC78
+#define TC_CFG_L2_STORE_POLICY0 0xAC7C
+#define TC_CFG_L2_STORE_POLICY1 0xAC80
+#define TC_CFG_L2_ATOMIC_POLICY 0xAC84
+#define TC_CFG_L1_VOLATILE 0xAC88
+#define TC_CFG_L2_VOLATILE 0xAC8C
+
+#define CP_PQ_WPTR_POLL_CNTL 0xC20C
+#define WPTR_POLL_EN (1 << 31)
+
+#define CPC_INT_CNTL 0xC2D0
+#define CP_ME1_PIPE0_INT_CNTL 0xC214
+#define CP_ME1_PIPE1_INT_CNTL 0xC218
+#define CP_ME1_PIPE2_INT_CNTL 0xC21C
+#define CP_ME1_PIPE3_INT_CNTL 0xC220
+#define CP_ME2_PIPE0_INT_CNTL 0xC224
+#define CP_ME2_PIPE1_INT_CNTL 0xC228
+#define CP_ME2_PIPE2_INT_CNTL 0xC22C
+#define CP_ME2_PIPE3_INT_CNTL 0xC230
+#define DEQUEUE_REQUEST_INT_ENABLE (1 << 13)
+#define WRM_POLL_TIMEOUT_INT_ENABLE (1 << 17)
+#define PRIV_REG_INT_ENABLE (1 << 23)
+#define TIME_STAMP_INT_ENABLE (1 << 26)
+#define GENERIC2_INT_ENABLE (1 << 29)
+#define GENERIC1_INT_ENABLE (1 << 30)
+#define GENERIC0_INT_ENABLE (1 << 31)
+#define CP_ME1_PIPE0_INT_STATUS 0xC214
+#define CP_ME1_PIPE1_INT_STATUS 0xC218
+#define CP_ME1_PIPE2_INT_STATUS 0xC21C
+#define CP_ME1_PIPE3_INT_STATUS 0xC220
+#define CP_ME2_PIPE0_INT_STATUS 0xC224
+#define CP_ME2_PIPE1_INT_STATUS 0xC228
+#define CP_ME2_PIPE2_INT_STATUS 0xC22C
+#define CP_ME2_PIPE3_INT_STATUS 0xC230
+#define DEQUEUE_REQUEST_INT_STATUS (1 << 13)
+#define WRM_POLL_TIMEOUT_INT_STATUS (1 << 17)
+#define PRIV_REG_INT_STATUS (1 << 23)
+#define TIME_STAMP_INT_STATUS (1 << 26)
+#define GENERIC2_INT_STATUS (1 << 29)
+#define GENERIC1_INT_STATUS (1 << 30)
+#define GENERIC0_INT_STATUS (1 << 31)
+
+#define CP_HPD_EOP_BASE_ADDR 0xC904
+#define CP_HPD_EOP_BASE_ADDR_HI 0xC908
+#define CP_HPD_EOP_VMID 0xC90C
+#define CP_HPD_EOP_CONTROL 0xC910
+#define EOP_SIZE(x) ((x) << 0)
+#define EOP_SIZE_MASK (0x3f << 0)
+#define CP_MQD_BASE_ADDR 0xC914
+#define CP_MQD_BASE_ADDR_HI 0xC918
+#define CP_HQD_ACTIVE 0xC91C
+#define CP_HQD_VMID 0xC920
+
+#define CP_HQD_PERSISTENT_STATE 0xC924u
+#define DEFAULT_CP_HQD_PERSISTENT_STATE (0x33U << 8)
+#define PRELOAD_REQ (1 << 0)
+
+#define CP_HQD_PIPE_PRIORITY 0xC928u
+#define CP_HQD_QUEUE_PRIORITY 0xC92Cu
+#define CP_HQD_QUANTUM 0xC930u
+#define QUANTUM_EN 1U
+#define QUANTUM_SCALE_1MS (1U << 4)
+#define QUANTUM_DURATION(x) ((x) << 8)
+
+#define CP_HQD_PQ_BASE 0xC934
+#define CP_HQD_PQ_BASE_HI 0xC938
+#define CP_HQD_PQ_RPTR 0xC93C
+#define CP_HQD_PQ_RPTR_REPORT_ADDR 0xC940
+#define CP_HQD_PQ_RPTR_REPORT_ADDR_HI 0xC944
+#define CP_HQD_PQ_WPTR_POLL_ADDR 0xC948
+#define CP_HQD_PQ_WPTR_POLL_ADDR_HI 0xC94C
+#define CP_HQD_PQ_DOORBELL_CONTROL 0xC950
+#define DOORBELL_OFFSET(x) ((x) << 2)
+#define DOORBELL_OFFSET_MASK (0x1fffff << 2)
+#define DOORBELL_SOURCE (1 << 28)
+#define DOORBELL_SCHD_HIT (1 << 29)
+#define DOORBELL_EN (1 << 30)
+#define DOORBELL_HIT (1 << 31)
+#define CP_HQD_PQ_WPTR 0xC954
+#define CP_HQD_PQ_CONTROL 0xC958
+#define QUEUE_SIZE(x) ((x) << 0)
+#define QUEUE_SIZE_MASK (0x3f << 0)
+#define RPTR_BLOCK_SIZE(x) ((x) << 8)
+#define RPTR_BLOCK_SIZE_MASK (0x3f << 8)
+#define MIN_AVAIL_SIZE(x) ((x) << 20)
+#define PQ_ATC_EN (1 << 23)
+#define PQ_VOLATILE (1 << 26)
+#define NO_UPDATE_RPTR (1 << 27)
+#define UNORD_DISPATCH (1 << 28)
+#define ROQ_PQ_IB_FLIP (1 << 29)
+#define PRIV_STATE (1 << 30)
+#define KMD_QUEUE (1 << 31)
+
+#define DEFAULT_RPTR_BLOCK_SIZE RPTR_BLOCK_SIZE(5)
+#define DEFAULT_MIN_AVAIL_SIZE MIN_AVAIL_SIZE(3)
+
+#define CP_HQD_IB_BASE_ADDR 0xC95Cu
+#define CP_HQD_IB_BASE_ADDR_HI 0xC960u
+#define CP_HQD_IB_RPTR 0xC964u
+#define CP_HQD_IB_CONTROL 0xC968u
+#define IB_ATC_EN (1U << 23)
+#define DEFAULT_MIN_IB_AVAIL_SIZE (3U << 20)
+
+#define CP_HQD_DEQUEUE_REQUEST 0xC974
+#define DEQUEUE_REQUEST_DRAIN 1
+#define DEQUEUE_REQUEST_RESET 2
+#define DEQUEUE_INT (1U << 8)
+
+#define CP_HQD_SEMA_CMD 0xC97Cu
+#define CP_HQD_MSG_TYPE 0xC980u
+#define CP_HQD_ATOMIC0_PREOP_LO 0xC984u
+#define CP_HQD_ATOMIC0_PREOP_HI 0xC988u
+#define CP_HQD_ATOMIC1_PREOP_LO 0xC98Cu
+#define CP_HQD_ATOMIC1_PREOP_HI 0xC990u
+#define CP_HQD_HQ_SCHEDULER0 0xC994u
+#define CP_HQD_HQ_SCHEDULER1 0xC998u
+
+
+#define CP_MQD_CONTROL 0xC99C
+#define MQD_VMID(x) ((x) << 0)
+#define MQD_VMID_MASK (0xf << 0)
+#define MQD_CONTROL_PRIV_STATE_EN (1U << 8)
+
+#define GRBM_GFX_INDEX 0x30800
+#define INSTANCE_INDEX(x) ((x) << 0)
+#define SH_INDEX(x) ((x) << 8)
+#define SE_INDEX(x) ((x) << 16)
+#define SH_BROADCAST_WRITES (1 << 29)
+#define INSTANCE_BROADCAST_WRITES (1 << 30)
+#define SE_BROADCAST_WRITES (1 << 31)
+
+#define SQC_CACHES 0x30d20
+#define SQC_POLICY 0x8C38u
+#define SQC_VOLATILE 0x8C3Cu
+
+#define CP_PERFMON_CNTL 0x36020
+
+#define ATC_VMID0_PASID_MAPPING 0x339Cu
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS 0x3398u
+#define ATC_VMID_PASID_MAPPING_VALID (1U << 31)
+
+#define ATC_VM_APERTURE0_CNTL 0x3310u
+#define ATS_ACCESS_MODE_NEVER 0
+#define ATS_ACCESS_MODE_ALWAYS 1
+
+#define ATC_VM_APERTURE0_CNTL2 0x3318u
+#define ATC_VM_APERTURE0_HIGH_ADDR 0x3308u
+#define ATC_VM_APERTURE0_LOW_ADDR 0x3300u
+#define ATC_VM_APERTURE1_CNTL 0x3314u
+#define ATC_VM_APERTURE1_CNTL2 0x331Cu
+#define ATC_VM_APERTURE1_HIGH_ADDR 0x330Cu
+#define ATC_VM_APERTURE1_LOW_ADDR 0x3304u
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
new file mode 100644
index 00000000000..4f7b275f2f7
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -0,0 +1,595 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/compat.h>
+#include <uapi/linux/kfd_ioctl.h>
+#include <linux/time.h>
+#include <linux/mm.h>
+#include <linux/uaccess.h>
+#include <uapi/asm-generic/mman-common.h>
+#include <asm/processor.h>
+#include "kfd_priv.h"
+#include "kfd_device_queue_manager.h"
+
+static long kfd_ioctl(struct file *, unsigned int, unsigned long);
+static int kfd_open(struct inode *, struct file *);
+static int kfd_mmap(struct file *, struct vm_area_struct *);
+
+static const char kfd_dev_name[] = "kfd";
+
+static const struct file_operations kfd_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = kfd_ioctl,
+ .compat_ioctl = kfd_ioctl,
+ .open = kfd_open,
+ .mmap = kfd_mmap,
+};
+
+static int kfd_char_dev_major = -1;
+static struct class *kfd_class;
+struct device *kfd_device;
+
+int kfd_chardev_init(void)
+{
+ int err = 0;
+
+ kfd_char_dev_major = register_chrdev(0, kfd_dev_name, &kfd_fops);
+ err = kfd_char_dev_major;
+ if (err < 0)
+ goto err_register_chrdev;
+
+ kfd_class = class_create(THIS_MODULE, kfd_dev_name);
+ err = PTR_ERR(kfd_class);
+ if (IS_ERR(kfd_class))
+ goto err_class_create;
+
+ kfd_device = device_create(kfd_class, NULL,
+ MKDEV(kfd_char_dev_major, 0),
+ NULL, kfd_dev_name);
+ err = PTR_ERR(kfd_device);
+ if (IS_ERR(kfd_device))
+ goto err_device_create;
+
+ return 0;
+
+err_device_create:
+ class_destroy(kfd_class);
+err_class_create:
+ unregister_chrdev(kfd_char_dev_major, kfd_dev_name);
+err_register_chrdev:
+ return err;
+}
+
+void kfd_chardev_exit(void)
+{
+ device_destroy(kfd_class, MKDEV(kfd_char_dev_major, 0));
+ class_destroy(kfd_class);
+ unregister_chrdev(kfd_char_dev_major, kfd_dev_name);
+}
+
+struct device *kfd_chardev(void)
+{
+ return kfd_device;
+}
+
+
+static int kfd_open(struct inode *inode, struct file *filep)
+{
+ struct kfd_process *process;
+ bool is_32bit_user_mode;
+
+ if (iminor(inode) != 0)
+ return -ENODEV;
+
+ is_32bit_user_mode = is_compat_task();
+
+ if (is_32bit_user_mode == true) {
+ dev_warn(kfd_device,
+ "Process %d (32-bit) failed to open /dev/kfd\n"
+ "32-bit processes are not supported by amdkfd\n",
+ current->pid);
+ return -EPERM;
+ }
+
+ process = kfd_create_process(current);
+ if (IS_ERR(process))
+ return PTR_ERR(process);
+
+ process->is_32bit_user_mode = is_32bit_user_mode;
+
+ dev_dbg(kfd_device, "process %d opened, compat mode (32 bit) - %d\n",
+ process->pasid, process->is_32bit_user_mode);
+
+ kfd_init_apertures(process);
+
+ return 0;
+}
+
+static long kfd_ioctl_get_version(struct file *filep, struct kfd_process *p,
+ void __user *arg)
+{
+ struct kfd_ioctl_get_version_args args;
+ int err = 0;
+
+ args.major_version = KFD_IOCTL_MAJOR_VERSION;
+ args.minor_version = KFD_IOCTL_MINOR_VERSION;
+
+ if (copy_to_user(arg, &args, sizeof(args)))
+ err = -EFAULT;
+
+ return err;
+}
+
+static int set_queue_properties_from_user(struct queue_properties *q_properties,
+ struct kfd_ioctl_create_queue_args *args)
+{
+ if (args->queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) {
+ pr_err("kfd: queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n");
+ return -EINVAL;
+ }
+
+ if (args->queue_priority > KFD_MAX_QUEUE_PRIORITY) {
+ pr_err("kfd: queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n");
+ return -EINVAL;
+ }
+
+ if ((args->ring_base_address) &&
+ (!access_ok(VERIFY_WRITE,
+ (const void __user *) args->ring_base_address,
+ sizeof(uint64_t)))) {
+ pr_err("kfd: can't access ring base address\n");
+ return -EFAULT;
+ }
+
+ if (!is_power_of_2(args->ring_size) && (args->ring_size != 0)) {
+ pr_err("kfd: ring size must be a power of 2 or 0\n");
+ return -EINVAL;
+ }
+
+ if (!access_ok(VERIFY_WRITE,
+ (const void __user *) args->read_pointer_address,
+ sizeof(uint32_t))) {
+ pr_err("kfd: can't access read pointer\n");
+ return -EFAULT;
+ }
+
+ if (!access_ok(VERIFY_WRITE,
+ (const void __user *) args->write_pointer_address,
+ sizeof(uint32_t))) {
+ pr_err("kfd: can't access write pointer\n");
+ return -EFAULT;
+ }
+
+ q_properties->is_interop = false;
+ q_properties->queue_percent = args->queue_percentage;
+ q_properties->priority = args->queue_priority;
+ q_properties->queue_address = args->ring_base_address;
+ q_properties->queue_size = args->ring_size;
+ q_properties->read_ptr = (uint32_t *) args->read_pointer_address;
+ q_properties->write_ptr = (uint32_t *) args->write_pointer_address;
+ if (args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE ||
+ args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE_AQL)
+ q_properties->type = KFD_QUEUE_TYPE_COMPUTE;
+ else
+ return -ENOTSUPP;
+
+ if (args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE_AQL)
+ q_properties->format = KFD_QUEUE_FORMAT_AQL;
+ else
+ q_properties->format = KFD_QUEUE_FORMAT_PM4;
+
+ pr_debug("Queue Percentage (%d, %d)\n",
+ q_properties->queue_percent, args->queue_percentage);
+
+ pr_debug("Queue Priority (%d, %d)\n",
+ q_properties->priority, args->queue_priority);
+
+ pr_debug("Queue Address (0x%llX, 0x%llX)\n",
+ q_properties->queue_address, args->ring_base_address);
+
+ pr_debug("Queue Size (0x%llX, %u)\n",
+ q_properties->queue_size, args->ring_size);
+
+ pr_debug("Queue r/w Pointers (0x%llX, 0x%llX)\n",
+ (uint64_t) q_properties->read_ptr,
+ (uint64_t) q_properties->write_ptr);
+
+ pr_debug("Queue Format (%d)\n", q_properties->format);
+
+ return 0;
+}
+
+static long kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
+ void __user *arg)
+{
+ struct kfd_ioctl_create_queue_args args;
+ struct kfd_dev *dev;
+ int err = 0;
+ unsigned int queue_id;
+ struct kfd_process_device *pdd;
+ struct queue_properties q_properties;
+
+ memset(&q_properties, 0, sizeof(struct queue_properties));
+
+ if (copy_from_user(&args, arg, sizeof(args)))
+ return -EFAULT;
+
+ pr_debug("kfd: creating queue ioctl\n");
+
+ err = set_queue_properties_from_user(&q_properties, &args);
+ if (err)
+ return err;
+
+ dev = kfd_device_by_id(args.gpu_id);
+ if (dev == NULL)
+ return -EINVAL;
+
+ mutex_lock(&p->mutex);
+
+ pdd = kfd_bind_process_to_device(dev, p);
+ if (IS_ERR(pdd)) {
+ err = PTR_ERR(pdd);
+ goto err_bind_process;
+ }
+
+ pr_debug("kfd: creating queue for PASID %d on GPU 0x%x\n",
+ p->pasid,
+ dev->id);
+
+ err = pqm_create_queue(&p->pqm, dev, filep, &q_properties, 0,
+ KFD_QUEUE_TYPE_COMPUTE, &queue_id);
+ if (err != 0)
+ goto err_create_queue;
+
+ args.queue_id = queue_id;
+
+ /* Return gpu_id as doorbell offset for mmap usage */
+ args.doorbell_offset = args.gpu_id << PAGE_SHIFT;
+
+ if (copy_to_user(arg, &args, sizeof(args))) {
+ err = -EFAULT;
+ goto err_copy_args_out;
+ }
+
+ mutex_unlock(&p->mutex);
+
+ pr_debug("kfd: queue id %d was created successfully\n", args.queue_id);
+
+ pr_debug("ring buffer address == 0x%016llX\n",
+ args.ring_base_address);
+
+ pr_debug("read ptr address == 0x%016llX\n",
+ args.read_pointer_address);
+
+ pr_debug("write ptr address == 0x%016llX\n",
+ args.write_pointer_address);
+
+ return 0;
+
+err_copy_args_out:
+ pqm_destroy_queue(&p->pqm, queue_id);
+err_create_queue:
+err_bind_process:
+ mutex_unlock(&p->mutex);
+ return err;
+}
+
+static int kfd_ioctl_destroy_queue(struct file *filp, struct kfd_process *p,
+ void __user *arg)
+{
+ int retval;
+ struct kfd_ioctl_destroy_queue_args args;
+
+ if (copy_from_user(&args, arg, sizeof(args)))
+ return -EFAULT;
+
+ pr_debug("kfd: destroying queue id %d for PASID %d\n",
+ args.queue_id,
+ p->pasid);
+
+ mutex_lock(&p->mutex);
+
+ retval = pqm_destroy_queue(&p->pqm, args.queue_id);
+
+ mutex_unlock(&p->mutex);
+ return retval;
+}
+
+static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p,
+ void __user *arg)
+{
+ int retval;
+ struct kfd_ioctl_update_queue_args args;
+ struct queue_properties properties;
+
+ if (copy_from_user(&args, arg, sizeof(args)))
+ return -EFAULT;
+
+ if (args.queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) {
+ pr_err("kfd: queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n");
+ return -EINVAL;
+ }
+
+ if (args.queue_priority > KFD_MAX_QUEUE_PRIORITY) {
+ pr_err("kfd: queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n");
+ return -EINVAL;
+ }
+
+ if ((args.ring_base_address) &&
+ (!access_ok(VERIFY_WRITE,
+ (const void __user *) args.ring_base_address,
+ sizeof(uint64_t)))) {
+ pr_err("kfd: can't access ring base address\n");
+ return -EFAULT;
+ }
+
+ if (!is_power_of_2(args.ring_size) && (args.ring_size != 0)) {
+ pr_err("kfd: ring size must be a power of 2 or 0\n");
+ return -EINVAL;
+ }
+
+ properties.queue_address = args.ring_base_address;
+ properties.queue_size = args.ring_size;
+ properties.queue_percent = args.queue_percentage;
+ properties.priority = args.queue_priority;
+
+ pr_debug("kfd: updating queue id %d for PASID %d\n",
+ args.queue_id, p->pasid);
+
+ mutex_lock(&p->mutex);
+
+ retval = pqm_update_queue(&p->pqm, args.queue_id, &properties);
+
+ mutex_unlock(&p->mutex);
+
+ return retval;
+}
+
+static long kfd_ioctl_set_memory_policy(struct file *filep,
+ struct kfd_process *p, void __user *arg)
+{
+ struct kfd_ioctl_set_memory_policy_args args;
+ struct kfd_dev *dev;
+ int err = 0;
+ struct kfd_process_device *pdd;
+ enum cache_policy default_policy, alternate_policy;
+
+ if (copy_from_user(&args, arg, sizeof(args)))
+ return -EFAULT;
+
+ if (args.default_policy != KFD_IOC_CACHE_POLICY_COHERENT
+ && args.default_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) {
+ return -EINVAL;
+ }
+
+ if (args.alternate_policy != KFD_IOC_CACHE_POLICY_COHERENT
+ && args.alternate_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) {
+ return -EINVAL;
+ }
+
+ dev = kfd_device_by_id(args.gpu_id);
+ if (dev == NULL)
+ return -EINVAL;
+
+ mutex_lock(&p->mutex);
+
+ pdd = kfd_bind_process_to_device(dev, p);
+ if (IS_ERR(pdd)) {
+ err = PTR_ERR(pdd);
+ goto out;
+ }
+
+ default_policy = (args.default_policy == KFD_IOC_CACHE_POLICY_COHERENT)
+ ? cache_policy_coherent : cache_policy_noncoherent;
+
+ alternate_policy =
+ (args.alternate_policy == KFD_IOC_CACHE_POLICY_COHERENT)
+ ? cache_policy_coherent : cache_policy_noncoherent;
+
+ if (!dev->dqm->set_cache_memory_policy(dev->dqm,
+ &pdd->qpd,
+ default_policy,
+ alternate_policy,
+ (void __user *)args.alternate_aperture_base,
+ args.alternate_aperture_size))
+ err = -EINVAL;
+
+out:
+ mutex_unlock(&p->mutex);
+
+ return err;
+}
+
+static long kfd_ioctl_get_clock_counters(struct file *filep,
+ struct kfd_process *p, void __user *arg)
+{
+ struct kfd_ioctl_get_clock_counters_args args;
+ struct kfd_dev *dev;
+ struct timespec time;
+
+ if (copy_from_user(&args, arg, sizeof(args)))
+ return -EFAULT;
+
+ dev = kfd_device_by_id(args.gpu_id);
+ if (dev == NULL)
+ return -EINVAL;
+
+ /* Reading GPU clock counter from KGD */
+ args.gpu_clock_counter = kfd2kgd->get_gpu_clock_counter(dev->kgd);
+
+ /* No access to rdtsc. Using raw monotonic time */
+ getrawmonotonic(&time);
+ args.cpu_clock_counter = (uint64_t)timespec_to_ns(&time);
+
+ get_monotonic_boottime(&time);
+ args.system_clock_counter = (uint64_t)timespec_to_ns(&time);
+
+ /* Since the counter is in nano-seconds we use 1GHz frequency */
+ args.system_clock_freq = 1000000000;
+
+ if (copy_to_user(arg, &args, sizeof(args)))
+ return -EFAULT;
+
+ return 0;
+}
+
+
+static int kfd_ioctl_get_process_apertures(struct file *filp,
+ struct kfd_process *p, void __user *arg)
+{
+ struct kfd_ioctl_get_process_apertures_args args;
+ struct kfd_process_device_apertures *pAperture;
+ struct kfd_process_device *pdd;
+
+ dev_dbg(kfd_device, "get apertures for PASID %d", p->pasid);
+
+ if (copy_from_user(&args, arg, sizeof(args)))
+ return -EFAULT;
+
+ args.num_of_nodes = 0;
+
+ mutex_lock(&p->mutex);
+
+ /*if the process-device list isn't empty*/
+ if (kfd_has_process_device_data(p)) {
+ /* Run over all pdd of the process */
+ pdd = kfd_get_first_process_device_data(p);
+ do {
+ pAperture = &args.process_apertures[args.num_of_nodes];
+ pAperture->gpu_id = pdd->dev->id;
+ pAperture->lds_base = pdd->lds_base;
+ pAperture->lds_limit = pdd->lds_limit;
+ pAperture->gpuvm_base = pdd->gpuvm_base;
+ pAperture->gpuvm_limit = pdd->gpuvm_limit;
+ pAperture->scratch_base = pdd->scratch_base;
+ pAperture->scratch_limit = pdd->scratch_limit;
+
+ dev_dbg(kfd_device,
+ "node id %u\n", args.num_of_nodes);
+ dev_dbg(kfd_device,
+ "gpu id %u\n", pdd->dev->id);
+ dev_dbg(kfd_device,
+ "lds_base %llX\n", pdd->lds_base);
+ dev_dbg(kfd_device,
+ "lds_limit %llX\n", pdd->lds_limit);
+ dev_dbg(kfd_device,
+ "gpuvm_base %llX\n", pdd->gpuvm_base);
+ dev_dbg(kfd_device,
+ "gpuvm_limit %llX\n", pdd->gpuvm_limit);
+ dev_dbg(kfd_device,
+ "scratch_base %llX\n", pdd->scratch_base);
+ dev_dbg(kfd_device,
+ "scratch_limit %llX\n", pdd->scratch_limit);
+
+ args.num_of_nodes++;
+ } while ((pdd = kfd_get_next_process_device_data(p, pdd)) != NULL &&
+ (args.num_of_nodes < NUM_OF_SUPPORTED_GPUS));
+ }
+
+ mutex_unlock(&p->mutex);
+
+ if (copy_to_user(arg, &args, sizeof(args)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static long kfd_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
+{
+ struct kfd_process *process;
+ long err = -EINVAL;
+
+ dev_dbg(kfd_device,
+ "ioctl cmd 0x%x (#%d), arg 0x%lx\n",
+ cmd, _IOC_NR(cmd), arg);
+
+ process = kfd_get_process(current);
+ if (IS_ERR(process))
+ return PTR_ERR(process);
+
+ switch (cmd) {
+ case KFD_IOC_GET_VERSION:
+ err = kfd_ioctl_get_version(filep, process, (void __user *)arg);
+ break;
+ case KFD_IOC_CREATE_QUEUE:
+ err = kfd_ioctl_create_queue(filep, process,
+ (void __user *)arg);
+ break;
+
+ case KFD_IOC_DESTROY_QUEUE:
+ err = kfd_ioctl_destroy_queue(filep, process,
+ (void __user *)arg);
+ break;
+
+ case KFD_IOC_SET_MEMORY_POLICY:
+ err = kfd_ioctl_set_memory_policy(filep, process,
+ (void __user *)arg);
+ break;
+
+ case KFD_IOC_GET_CLOCK_COUNTERS:
+ err = kfd_ioctl_get_clock_counters(filep, process,
+ (void __user *)arg);
+ break;
+
+ case KFD_IOC_GET_PROCESS_APERTURES:
+ err = kfd_ioctl_get_process_apertures(filep, process,
+ (void __user *)arg);
+ break;
+
+ case KFD_IOC_UPDATE_QUEUE:
+ err = kfd_ioctl_update_queue(filep, process,
+ (void __user *)arg);
+ break;
+
+ default:
+ dev_err(kfd_device,
+ "unknown ioctl cmd 0x%x, arg 0x%lx)\n",
+ cmd, arg);
+ err = -EINVAL;
+ break;
+ }
+
+ if (err < 0)
+ dev_err(kfd_device,
+ "ioctl error %ld for ioctl cmd 0x%x (#%d)\n",
+ err, cmd, _IOC_NR(cmd));
+
+ return err;
+}
+
+static int kfd_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct kfd_process *process;
+
+ process = kfd_get_process(current);
+ if (IS_ERR(process))
+ return PTR_ERR(process);
+
+ return kfd_doorbell_mmap(process, vma);
+}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.h b/drivers/gpu/drm/amd/amdkfd/kfd_crat.h
new file mode 100644
index 00000000000..a374fa3d3ee
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.h
@@ -0,0 +1,294 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef KFD_CRAT_H_INCLUDED
+#define KFD_CRAT_H_INCLUDED
+
+#include <linux/types.h>
+
+#pragma pack(1)
+
+/*
+ * 4CC signature values for the CRAT and CDIT ACPI tables
+ */
+
+#define CRAT_SIGNATURE "CRAT"
+#define CDIT_SIGNATURE "CDIT"
+
+/*
+ * Component Resource Association Table (CRAT)
+ */
+
+#define CRAT_OEMID_LENGTH 6
+#define CRAT_OEMTABLEID_LENGTH 8
+#define CRAT_RESERVED_LENGTH 6
+
+#define CRAT_OEMID_64BIT_MASK ((1ULL << (CRAT_OEMID_LENGTH * 8)) - 1)
+
+struct crat_header {
+ uint32_t signature;
+ uint32_t length;
+ uint8_t revision;
+ uint8_t checksum;
+ uint8_t oem_id[CRAT_OEMID_LENGTH];
+ uint8_t oem_table_id[CRAT_OEMTABLEID_LENGTH];
+ uint32_t oem_revision;
+ uint32_t creator_id;
+ uint32_t creator_revision;
+ uint32_t total_entries;
+ uint16_t num_domains;
+ uint8_t reserved[CRAT_RESERVED_LENGTH];
+};
+
+/*
+ * The header structure is immediately followed by total_entries of the
+ * data definitions
+ */
+
+/*
+ * The currently defined subtype entries in the CRAT
+ */
+#define CRAT_SUBTYPE_COMPUTEUNIT_AFFINITY 0
+#define CRAT_SUBTYPE_MEMORY_AFFINITY 1
+#define CRAT_SUBTYPE_CACHE_AFFINITY 2
+#define CRAT_SUBTYPE_TLB_AFFINITY 3
+#define CRAT_SUBTYPE_CCOMPUTE_AFFINITY 4
+#define CRAT_SUBTYPE_IOLINK_AFFINITY 5
+#define CRAT_SUBTYPE_MAX 6
+
+#define CRAT_SIBLINGMAP_SIZE 32
+
+/*
+ * ComputeUnit Affinity structure and definitions
+ */
+#define CRAT_CU_FLAGS_ENABLED 0x00000001
+#define CRAT_CU_FLAGS_HOT_PLUGGABLE 0x00000002
+#define CRAT_CU_FLAGS_CPU_PRESENT 0x00000004
+#define CRAT_CU_FLAGS_GPU_PRESENT 0x00000008
+#define CRAT_CU_FLAGS_IOMMU_PRESENT 0x00000010
+#define CRAT_CU_FLAGS_RESERVED 0xffffffe0
+
+#define CRAT_COMPUTEUNIT_RESERVED_LENGTH 4
+
+struct crat_subtype_computeunit {
+ uint8_t type;
+ uint8_t length;
+ uint16_t reserved;
+ uint32_t flags;
+ uint32_t proximity_domain;
+ uint32_t processor_id_low;
+ uint16_t num_cpu_cores;
+ uint16_t num_simd_cores;
+ uint16_t max_waves_simd;
+ uint16_t io_count;
+ uint16_t hsa_capability;
+ uint16_t lds_size_in_kb;
+ uint8_t wave_front_size;
+ uint8_t num_banks;
+ uint16_t micro_engine_id;
+ uint8_t num_arrays;
+ uint8_t num_cu_per_array;
+ uint8_t num_simd_per_cu;
+ uint8_t max_slots_scatch_cu;
+ uint8_t reserved2[CRAT_COMPUTEUNIT_RESERVED_LENGTH];
+};
+
+/*
+ * HSA Memory Affinity structure and definitions
+ */
+#define CRAT_MEM_FLAGS_ENABLED 0x00000001
+#define CRAT_MEM_FLAGS_HOT_PLUGGABLE 0x00000002
+#define CRAT_MEM_FLAGS_NON_VOLATILE 0x00000004
+#define CRAT_MEM_FLAGS_RESERVED 0xfffffff8
+
+#define CRAT_MEMORY_RESERVED_LENGTH 8
+
+struct crat_subtype_memory {
+ uint8_t type;
+ uint8_t length;
+ uint16_t reserved;
+ uint32_t flags;
+ uint32_t promixity_domain;
+ uint32_t base_addr_low;
+ uint32_t base_addr_high;
+ uint32_t length_low;
+ uint32_t length_high;
+ uint32_t width;
+ uint8_t reserved2[CRAT_MEMORY_RESERVED_LENGTH];
+};
+
+/*
+ * HSA Cache Affinity structure and definitions
+ */
+#define CRAT_CACHE_FLAGS_ENABLED 0x00000001
+#define CRAT_CACHE_FLAGS_DATA_CACHE 0x00000002
+#define CRAT_CACHE_FLAGS_INST_CACHE 0x00000004
+#define CRAT_CACHE_FLAGS_CPU_CACHE 0x00000008
+#define CRAT_CACHE_FLAGS_SIMD_CACHE 0x00000010
+#define CRAT_CACHE_FLAGS_RESERVED 0xffffffe0
+
+#define CRAT_CACHE_RESERVED_LENGTH 8
+
+struct crat_subtype_cache {
+ uint8_t type;
+ uint8_t length;
+ uint16_t reserved;
+ uint32_t flags;
+ uint32_t processor_id_low;
+ uint8_t sibling_map[CRAT_SIBLINGMAP_SIZE];
+ uint32_t cache_size;
+ uint8_t cache_level;
+ uint8_t lines_per_tag;
+ uint16_t cache_line_size;
+ uint8_t associativity;
+ uint8_t cache_properties;
+ uint16_t cache_latency;
+ uint8_t reserved2[CRAT_CACHE_RESERVED_LENGTH];
+};
+
+/*
+ * HSA TLB Affinity structure and definitions
+ */
+#define CRAT_TLB_FLAGS_ENABLED 0x00000001
+#define CRAT_TLB_FLAGS_DATA_TLB 0x00000002
+#define CRAT_TLB_FLAGS_INST_TLB 0x00000004
+#define CRAT_TLB_FLAGS_CPU_TLB 0x00000008
+#define CRAT_TLB_FLAGS_SIMD_TLB 0x00000010
+#define CRAT_TLB_FLAGS_RESERVED 0xffffffe0
+
+#define CRAT_TLB_RESERVED_LENGTH 4
+
+struct crat_subtype_tlb {
+ uint8_t type;
+ uint8_t length;
+ uint16_t reserved;
+ uint32_t flags;
+ uint32_t processor_id_low;
+ uint8_t sibling_map[CRAT_SIBLINGMAP_SIZE];
+ uint32_t tlb_level;
+ uint8_t data_tlb_associativity_2mb;
+ uint8_t data_tlb_size_2mb;
+ uint8_t instruction_tlb_associativity_2mb;
+ uint8_t instruction_tlb_size_2mb;
+ uint8_t data_tlb_associativity_4k;
+ uint8_t data_tlb_size_4k;
+ uint8_t instruction_tlb_associativity_4k;
+ uint8_t instruction_tlb_size_4k;
+ uint8_t data_tlb_associativity_1gb;
+ uint8_t data_tlb_size_1gb;
+ uint8_t instruction_tlb_associativity_1gb;
+ uint8_t instruction_tlb_size_1gb;
+ uint8_t reserved2[CRAT_TLB_RESERVED_LENGTH];
+};
+
+/*
+ * HSA CCompute/APU Affinity structure and definitions
+ */
+#define CRAT_CCOMPUTE_FLAGS_ENABLED 0x00000001
+#define CRAT_CCOMPUTE_FLAGS_RESERVED 0xfffffffe
+
+#define CRAT_CCOMPUTE_RESERVED_LENGTH 16
+
+struct crat_subtype_ccompute {
+ uint8_t type;
+ uint8_t length;
+ uint16_t reserved;
+ uint32_t flags;
+ uint32_t processor_id_low;
+ uint8_t sibling_map[CRAT_SIBLINGMAP_SIZE];
+ uint32_t apu_size;
+ uint8_t reserved2[CRAT_CCOMPUTE_RESERVED_LENGTH];
+};
+
+/*
+ * HSA IO Link Affinity structure and definitions
+ */
+#define CRAT_IOLINK_FLAGS_ENABLED 0x00000001
+#define CRAT_IOLINK_FLAGS_COHERENCY 0x00000002
+#define CRAT_IOLINK_FLAGS_RESERVED 0xfffffffc
+
+/*
+ * IO interface types
+ */
+#define CRAT_IOLINK_TYPE_UNDEFINED 0
+#define CRAT_IOLINK_TYPE_HYPERTRANSPORT 1
+#define CRAT_IOLINK_TYPE_PCIEXPRESS 2
+#define CRAT_IOLINK_TYPE_OTHER 3
+#define CRAT_IOLINK_TYPE_MAX 255
+
+#define CRAT_IOLINK_RESERVED_LENGTH 24
+
+struct crat_subtype_iolink {
+ uint8_t type;
+ uint8_t length;
+ uint16_t reserved;
+ uint32_t flags;
+ uint32_t proximity_domain_from;
+ uint32_t proximity_domain_to;
+ uint8_t io_interface_type;
+ uint8_t version_major;
+ uint16_t version_minor;
+ uint32_t minimum_latency;
+ uint32_t maximum_latency;
+ uint32_t minimum_bandwidth_mbs;
+ uint32_t maximum_bandwidth_mbs;
+ uint32_t recommended_transfer_size;
+ uint8_t reserved2[CRAT_IOLINK_RESERVED_LENGTH];
+};
+
+/*
+ * HSA generic sub-type header
+ */
+
+#define CRAT_SUBTYPE_FLAGS_ENABLED 0x00000001
+
+struct crat_subtype_generic {
+ uint8_t type;
+ uint8_t length;
+ uint16_t reserved;
+ uint32_t flags;
+};
+
+/*
+ * Component Locality Distance Information Table (CDIT)
+ */
+#define CDIT_OEMID_LENGTH 6
+#define CDIT_OEMTABLEID_LENGTH 8
+
+struct cdit_header {
+ uint32_t signature;
+ uint32_t length;
+ uint8_t revision;
+ uint8_t checksum;
+ uint8_t oem_id[CDIT_OEMID_LENGTH];
+ uint8_t oem_table_id[CDIT_OEMTABLEID_LENGTH];
+ uint32_t oem_revision;
+ uint32_t creator_id;
+ uint32_t creator_revision;
+ uint32_t total_entries;
+ uint16_t num_domains;
+ uint8_t entry[1];
+};
+
+#pragma pack()
+
+#endif /* KFD_CRAT_H_INCLUDED */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
new file mode 100644
index 00000000000..43884ebd430
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -0,0 +1,308 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/amd-iommu.h>
+#include <linux/bsearch.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include "kfd_priv.h"
+#include "kfd_device_queue_manager.h"
+
+#define MQD_SIZE_ALIGNED 768
+
+static const struct kfd_device_info kaveri_device_info = {
+ .max_pasid_bits = 16,
+ .ih_ring_entry_size = 4 * sizeof(uint32_t),
+ .mqd_size_aligned = MQD_SIZE_ALIGNED
+};
+
+struct kfd_deviceid {
+ unsigned short did;
+ const struct kfd_device_info *device_info;
+};
+
+/* Please keep this sorted by increasing device id. */
+static const struct kfd_deviceid supported_devices[] = {
+ { 0x1304, &kaveri_device_info }, /* Kaveri */
+ { 0x1305, &kaveri_device_info }, /* Kaveri */
+ { 0x1306, &kaveri_device_info }, /* Kaveri */
+ { 0x1307, &kaveri_device_info }, /* Kaveri */
+ { 0x1309, &kaveri_device_info }, /* Kaveri */
+ { 0x130A, &kaveri_device_info }, /* Kaveri */
+ { 0x130B, &kaveri_device_info }, /* Kaveri */
+ { 0x130C, &kaveri_device_info }, /* Kaveri */
+ { 0x130D, &kaveri_device_info }, /* Kaveri */
+ { 0x130E, &kaveri_device_info }, /* Kaveri */
+ { 0x130F, &kaveri_device_info }, /* Kaveri */
+ { 0x1310, &kaveri_device_info }, /* Kaveri */
+ { 0x1311, &kaveri_device_info }, /* Kaveri */
+ { 0x1312, &kaveri_device_info }, /* Kaveri */
+ { 0x1313, &kaveri_device_info }, /* Kaveri */
+ { 0x1315, &kaveri_device_info }, /* Kaveri */
+ { 0x1316, &kaveri_device_info }, /* Kaveri */
+ { 0x1317, &kaveri_device_info }, /* Kaveri */
+ { 0x1318, &kaveri_device_info }, /* Kaveri */
+ { 0x131B, &kaveri_device_info }, /* Kaveri */
+ { 0x131C, &kaveri_device_info }, /* Kaveri */
+ { 0x131D, &kaveri_device_info }, /* Kaveri */
+};
+
+static const struct kfd_device_info *lookup_device_info(unsigned short did)
+{
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(supported_devices); i++) {
+ if (supported_devices[i].did == did) {
+ BUG_ON(supported_devices[i].device_info == NULL);
+ return supported_devices[i].device_info;
+ }
+ }
+
+ return NULL;
+}
+
+struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev)
+{
+ struct kfd_dev *kfd;
+
+ const struct kfd_device_info *device_info =
+ lookup_device_info(pdev->device);
+
+ if (!device_info)
+ return NULL;
+
+ kfd = kzalloc(sizeof(*kfd), GFP_KERNEL);
+ if (!kfd)
+ return NULL;
+
+ kfd->kgd = kgd;
+ kfd->device_info = device_info;
+ kfd->pdev = pdev;
+ kfd->init_complete = false;
+
+ return kfd;
+}
+
+static bool device_iommu_pasid_init(struct kfd_dev *kfd)
+{
+ const u32 required_iommu_flags = AMD_IOMMU_DEVICE_FLAG_ATS_SUP |
+ AMD_IOMMU_DEVICE_FLAG_PRI_SUP |
+ AMD_IOMMU_DEVICE_FLAG_PASID_SUP;
+
+ struct amd_iommu_device_info iommu_info;
+ unsigned int pasid_limit;
+ int err;
+
+ err = amd_iommu_device_info(kfd->pdev, &iommu_info);
+ if (err < 0) {
+ dev_err(kfd_device,
+ "error getting iommu info. is the iommu enabled?\n");
+ return false;
+ }
+
+ if ((iommu_info.flags & required_iommu_flags) != required_iommu_flags) {
+ dev_err(kfd_device, "error required iommu flags ats(%i), pri(%i), pasid(%i)\n",
+ (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_ATS_SUP) != 0,
+ (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_PRI_SUP) != 0,
+ (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_PASID_SUP) != 0);
+ return false;
+ }
+
+ pasid_limit = min_t(unsigned int,
+ (unsigned int)1 << kfd->device_info->max_pasid_bits,
+ iommu_info.max_pasids);
+ /*
+ * last pasid is used for kernel queues doorbells
+ * in the future the last pasid might be used for a kernel thread.
+ */
+ pasid_limit = min_t(unsigned int,
+ pasid_limit,
+ kfd->doorbell_process_limit - 1);
+
+ err = amd_iommu_init_device(kfd->pdev, pasid_limit);
+ if (err < 0) {
+ dev_err(kfd_device, "error initializing iommu device\n");
+ return false;
+ }
+
+ if (!kfd_set_pasid_limit(pasid_limit)) {
+ dev_err(kfd_device, "error setting pasid limit\n");
+ amd_iommu_free_device(kfd->pdev);
+ return false;
+ }
+
+ return true;
+}
+
+static void iommu_pasid_shutdown_callback(struct pci_dev *pdev, int pasid)
+{
+ struct kfd_dev *dev = kfd_device_by_pci_dev(pdev);
+
+ if (dev)
+ kfd_unbind_process_from_device(dev, pasid);
+}
+
+bool kgd2kfd_device_init(struct kfd_dev *kfd,
+ const struct kgd2kfd_shared_resources *gpu_resources)
+{
+ unsigned int size;
+
+ kfd->shared_resources = *gpu_resources;
+
+ /* calculate max size of mqds needed for queues */
+ size = max_num_of_processes *
+ max_num_of_queues_per_process *
+ kfd->device_info->mqd_size_aligned;
+
+ /* add another 512KB for all other allocations on gart */
+ size += 512 * 1024;
+
+ if (kfd2kgd->init_sa_manager(kfd->kgd, size)) {
+ dev_err(kfd_device,
+ "Error initializing sa manager for device (%x:%x)\n",
+ kfd->pdev->vendor, kfd->pdev->device);
+ goto out;
+ }
+
+ kfd_doorbell_init(kfd);
+
+ if (kfd_topology_add_device(kfd) != 0) {
+ dev_err(kfd_device,
+ "Error adding device (%x:%x) to topology\n",
+ kfd->pdev->vendor, kfd->pdev->device);
+ goto kfd_topology_add_device_error;
+ }
+
+ if (kfd_interrupt_init(kfd)) {
+ dev_err(kfd_device,
+ "Error initializing interrupts for device (%x:%x)\n",
+ kfd->pdev->vendor, kfd->pdev->device);
+ goto kfd_interrupt_error;
+ }
+
+ if (!device_iommu_pasid_init(kfd)) {
+ dev_err(kfd_device,
+ "Error initializing iommuv2 for device (%x:%x)\n",
+ kfd->pdev->vendor, kfd->pdev->device);
+ goto device_iommu_pasid_error;
+ }
+ amd_iommu_set_invalidate_ctx_cb(kfd->pdev,
+ iommu_pasid_shutdown_callback);
+
+ kfd->dqm = device_queue_manager_init(kfd);
+ if (!kfd->dqm) {
+ dev_err(kfd_device,
+ "Error initializing queue manager for device (%x:%x)\n",
+ kfd->pdev->vendor, kfd->pdev->device);
+ goto device_queue_manager_error;
+ }
+
+ if (kfd->dqm->start(kfd->dqm) != 0) {
+ dev_err(kfd_device,
+ "Error starting queuen manager for device (%x:%x)\n",
+ kfd->pdev->vendor, kfd->pdev->device);
+ goto dqm_start_error;
+ }
+
+ kfd->init_complete = true;
+ dev_info(kfd_device, "added device (%x:%x)\n", kfd->pdev->vendor,
+ kfd->pdev->device);
+
+ pr_debug("kfd: Starting kfd with the following scheduling policy %d\n",
+ sched_policy);
+
+ goto out;
+
+dqm_start_error:
+ device_queue_manager_uninit(kfd->dqm);
+device_queue_manager_error:
+ amd_iommu_free_device(kfd->pdev);
+device_iommu_pasid_error:
+ kfd_interrupt_exit(kfd);
+kfd_interrupt_error:
+ kfd_topology_remove_device(kfd);
+kfd_topology_add_device_error:
+ kfd2kgd->fini_sa_manager(kfd->kgd);
+ dev_err(kfd_device,
+ "device (%x:%x) NOT added due to errors\n",
+ kfd->pdev->vendor, kfd->pdev->device);
+out:
+ return kfd->init_complete;
+}
+
+void kgd2kfd_device_exit(struct kfd_dev *kfd)
+{
+ if (kfd->init_complete) {
+ device_queue_manager_uninit(kfd->dqm);
+ amd_iommu_free_device(kfd->pdev);
+ kfd_interrupt_exit(kfd);
+ kfd_topology_remove_device(kfd);
+ }
+
+ kfree(kfd);
+}
+
+void kgd2kfd_suspend(struct kfd_dev *kfd)
+{
+ BUG_ON(kfd == NULL);
+
+ if (kfd->init_complete) {
+ kfd->dqm->stop(kfd->dqm);
+ amd_iommu_set_invalidate_ctx_cb(kfd->pdev, NULL);
+ amd_iommu_free_device(kfd->pdev);
+ }
+}
+
+int kgd2kfd_resume(struct kfd_dev *kfd)
+{
+ unsigned int pasid_limit;
+ int err;
+
+ BUG_ON(kfd == NULL);
+
+ pasid_limit = kfd_get_pasid_limit();
+
+ if (kfd->init_complete) {
+ err = amd_iommu_init_device(kfd->pdev, pasid_limit);
+ if (err < 0)
+ return -ENXIO;
+ amd_iommu_set_invalidate_ctx_cb(kfd->pdev,
+ iommu_pasid_shutdown_callback);
+ kfd->dqm->start(kfd->dqm);
+ }
+
+ return 0;
+}
+
+/* This is called directly from KGD at ISR. */
+void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
+{
+ if (kfd->init_complete) {
+ spin_lock(&kfd->interrupt_lock);
+
+ if (kfd->interrupts_active
+ && enqueue_ih_ring_entry(kfd, ih_ring_entry))
+ schedule_work(&kfd->interrupt_work);
+
+ spin_unlock(&kfd->interrupt_lock);
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
new file mode 100644
index 00000000000..924e90c072e
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -0,0 +1,1062 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/types.h>
+#include <linux/printk.h>
+#include <linux/bitops.h>
+#include "kfd_priv.h"
+#include "kfd_device_queue_manager.h"
+#include "kfd_mqd_manager.h"
+#include "cik_regs.h"
+#include "kfd_kernel_queue.h"
+#include "../../radeon/cik_reg.h"
+
+/* Size of the per-pipe EOP queue */
+#define CIK_HPD_EOP_BYTES_LOG2 11
+#define CIK_HPD_EOP_BYTES (1U << CIK_HPD_EOP_BYTES_LOG2)
+
+static bool is_mem_initialized;
+
+static int init_memory(struct device_queue_manager *dqm);
+static int set_pasid_vmid_mapping(struct device_queue_manager *dqm,
+ unsigned int pasid, unsigned int vmid);
+
+static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
+ struct queue *q,
+ struct qcm_process_device *qpd);
+static int execute_queues_cpsch(struct device_queue_manager *dqm, bool lock);
+static int destroy_queues_cpsch(struct device_queue_manager *dqm, bool lock);
+
+
+static inline unsigned int get_pipes_num(struct device_queue_manager *dqm)
+{
+ BUG_ON(!dqm || !dqm->dev);
+ return dqm->dev->shared_resources.compute_pipe_count;
+}
+
+static inline unsigned int get_first_pipe(struct device_queue_manager *dqm)
+{
+ BUG_ON(!dqm);
+ return dqm->dev->shared_resources.first_compute_pipe;
+}
+
+static inline unsigned int get_pipes_num_cpsch(void)
+{
+ return PIPE_PER_ME_CP_SCHEDULING;
+}
+
+static inline unsigned int
+get_sh_mem_bases_nybble_64(struct kfd_process_device *pdd)
+{
+ uint32_t nybble;
+
+ nybble = (pdd->lds_base >> 60) & 0x0E;
+
+ return nybble;
+
+}
+
+static inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd)
+{
+ unsigned int shared_base;
+
+ shared_base = (pdd->lds_base >> 16) & 0xFF;
+
+ return shared_base;
+}
+
+static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble);
+static void init_process_memory(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd)
+{
+ struct kfd_process_device *pdd;
+ unsigned int temp;
+
+ BUG_ON(!dqm || !qpd);
+
+ pdd = qpd_to_pdd(qpd);
+
+ /* check if sh_mem_config register already configured */
+ if (qpd->sh_mem_config == 0) {
+ qpd->sh_mem_config =
+ ALIGNMENT_MODE(SH_MEM_ALIGNMENT_MODE_UNALIGNED) |
+ DEFAULT_MTYPE(MTYPE_NONCACHED) |
+ APE1_MTYPE(MTYPE_NONCACHED);
+ qpd->sh_mem_ape1_limit = 0;
+ qpd->sh_mem_ape1_base = 0;
+ }
+
+ if (qpd->pqm->process->is_32bit_user_mode) {
+ temp = get_sh_mem_bases_32(pdd);
+ qpd->sh_mem_bases = SHARED_BASE(temp);
+ qpd->sh_mem_config |= PTR32;
+ } else {
+ temp = get_sh_mem_bases_nybble_64(pdd);
+ qpd->sh_mem_bases = compute_sh_mem_bases_64bit(temp);
+ }
+
+ pr_debug("kfd: is32bit process: %d sh_mem_bases nybble: 0x%X and register 0x%X\n",
+ qpd->pqm->process->is_32bit_user_mode, temp, qpd->sh_mem_bases);
+}
+
+static void program_sh_mem_settings(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd)
+{
+ return kfd2kgd->program_sh_mem_settings(dqm->dev->kgd, qpd->vmid,
+ qpd->sh_mem_config,
+ qpd->sh_mem_ape1_base,
+ qpd->sh_mem_ape1_limit,
+ qpd->sh_mem_bases);
+}
+
+static int allocate_vmid(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd,
+ struct queue *q)
+{
+ int bit, allocated_vmid;
+
+ if (dqm->vmid_bitmap == 0)
+ return -ENOMEM;
+
+ bit = find_first_bit((unsigned long *)&dqm->vmid_bitmap, CIK_VMID_NUM);
+ clear_bit(bit, (unsigned long *)&dqm->vmid_bitmap);
+
+ /* Kaveri kfd vmid's starts from vmid 8 */
+ allocated_vmid = bit + KFD_VMID_START_OFFSET;
+ pr_debug("kfd: vmid allocation %d\n", allocated_vmid);
+ qpd->vmid = allocated_vmid;
+ q->properties.vmid = allocated_vmid;
+
+ set_pasid_vmid_mapping(dqm, q->process->pasid, q->properties.vmid);
+ program_sh_mem_settings(dqm, qpd);
+
+ return 0;
+}
+
+static void deallocate_vmid(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd,
+ struct queue *q)
+{
+ int bit = qpd->vmid - KFD_VMID_START_OFFSET;
+
+ set_bit(bit, (unsigned long *)&dqm->vmid_bitmap);
+ qpd->vmid = 0;
+ q->properties.vmid = 0;
+}
+
+static int create_queue_nocpsch(struct device_queue_manager *dqm,
+ struct queue *q,
+ struct qcm_process_device *qpd,
+ int *allocated_vmid)
+{
+ int retval;
+
+ BUG_ON(!dqm || !q || !qpd || !allocated_vmid);
+
+ pr_debug("kfd: In func %s\n", __func__);
+ print_queue(q);
+
+ mutex_lock(&dqm->lock);
+
+ if (list_empty(&qpd->queues_list)) {
+ retval = allocate_vmid(dqm, qpd, q);
+ if (retval != 0) {
+ mutex_unlock(&dqm->lock);
+ return retval;
+ }
+ }
+ *allocated_vmid = qpd->vmid;
+ q->properties.vmid = qpd->vmid;
+
+ retval = create_compute_queue_nocpsch(dqm, q, qpd);
+
+ if (retval != 0) {
+ if (list_empty(&qpd->queues_list)) {
+ deallocate_vmid(dqm, qpd, q);
+ *allocated_vmid = 0;
+ }
+ mutex_unlock(&dqm->lock);
+ return retval;
+ }
+
+ list_add(&q->list, &qpd->queues_list);
+ dqm->queue_count++;
+
+ mutex_unlock(&dqm->lock);
+ return 0;
+}
+
+static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q)
+{
+ bool set;
+ int pipe, bit;
+
+ set = false;
+
+ for (pipe = dqm->next_pipe_to_allocate; pipe < get_pipes_num(dqm);
+ pipe = (pipe + 1) % get_pipes_num(dqm)) {
+ if (dqm->allocated_queues[pipe] != 0) {
+ bit = find_first_bit(
+ (unsigned long *)&dqm->allocated_queues[pipe],
+ QUEUES_PER_PIPE);
+
+ clear_bit(bit,
+ (unsigned long *)&dqm->allocated_queues[pipe]);
+ q->pipe = pipe;
+ q->queue = bit;
+ set = true;
+ break;
+ }
+ }
+
+ if (set == false)
+ return -EBUSY;
+
+ pr_debug("kfd: DQM %s hqd slot - pipe (%d) queue(%d)\n",
+ __func__, q->pipe, q->queue);
+ /* horizontal hqd allocation */
+ dqm->next_pipe_to_allocate = (pipe + 1) % get_pipes_num(dqm);
+
+ return 0;
+}
+
+static inline void deallocate_hqd(struct device_queue_manager *dqm,
+ struct queue *q)
+{
+ set_bit(q->queue, (unsigned long *)&dqm->allocated_queues[q->pipe]);
+}
+
+static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
+ struct queue *q,
+ struct qcm_process_device *qpd)
+{
+ int retval;
+ struct mqd_manager *mqd;
+
+ BUG_ON(!dqm || !q || !qpd);
+
+ mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_COMPUTE);
+ if (mqd == NULL)
+ return -ENOMEM;
+
+ retval = allocate_hqd(dqm, q);
+ if (retval != 0)
+ return retval;
+
+ retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
+ &q->gart_mqd_addr, &q->properties);
+ if (retval != 0) {
+ deallocate_hqd(dqm, q);
+ return retval;
+ }
+
+ return 0;
+}
+
+static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd,
+ struct queue *q)
+{
+ int retval;
+ struct mqd_manager *mqd;
+
+ BUG_ON(!dqm || !q || !q->mqd || !qpd);
+
+ retval = 0;
+
+ pr_debug("kfd: In Func %s\n", __func__);
+
+ mutex_lock(&dqm->lock);
+ mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_COMPUTE);
+ if (mqd == NULL) {
+ retval = -ENOMEM;
+ goto out;
+ }
+
+ retval = mqd->destroy_mqd(mqd, q->mqd,
+ KFD_PREEMPT_TYPE_WAVEFRONT,
+ QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS,
+ q->pipe, q->queue);
+
+ if (retval != 0)
+ goto out;
+
+ deallocate_hqd(dqm, q);
+
+ mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
+
+ list_del(&q->list);
+ if (list_empty(&qpd->queues_list))
+ deallocate_vmid(dqm, qpd, q);
+ dqm->queue_count--;
+out:
+ mutex_unlock(&dqm->lock);
+ return retval;
+}
+
+static int update_queue(struct device_queue_manager *dqm, struct queue *q)
+{
+ int retval;
+ struct mqd_manager *mqd;
+
+ BUG_ON(!dqm || !q || !q->mqd);
+
+ mutex_lock(&dqm->lock);
+ mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_COMPUTE);
+ if (mqd == NULL) {
+ mutex_unlock(&dqm->lock);
+ return -ENOMEM;
+ }
+
+ retval = mqd->update_mqd(mqd, q->mqd, &q->properties);
+ if (q->properties.is_active == true)
+ dqm->queue_count++;
+ else
+ dqm->queue_count--;
+
+ if (sched_policy != KFD_SCHED_POLICY_NO_HWS)
+ retval = execute_queues_cpsch(dqm, false);
+
+ mutex_unlock(&dqm->lock);
+ return retval;
+}
+
+static struct mqd_manager *get_mqd_manager_nocpsch(
+ struct device_queue_manager *dqm, enum KFD_MQD_TYPE type)
+{
+ struct mqd_manager *mqd;
+
+ BUG_ON(!dqm || type >= KFD_MQD_TYPE_MAX);
+
+ pr_debug("kfd: In func %s mqd type %d\n", __func__, type);
+
+ mqd = dqm->mqds[type];
+ if (!mqd) {
+ mqd = mqd_manager_init(type, dqm->dev);
+ if (mqd == NULL)
+ pr_err("kfd: mqd manager is NULL");
+ dqm->mqds[type] = mqd;
+ }
+
+ return mqd;
+}
+
+static int register_process_nocpsch(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd)
+{
+ struct device_process_node *n;
+
+ BUG_ON(!dqm || !qpd);
+
+ pr_debug("kfd: In func %s\n", __func__);
+
+ n = kzalloc(sizeof(struct device_process_node), GFP_KERNEL);
+ if (!n)
+ return -ENOMEM;
+
+ n->qpd = qpd;
+
+ mutex_lock(&dqm->lock);
+ list_add(&n->list, &dqm->queues);
+
+ init_process_memory(dqm, qpd);
+ dqm->processes_count++;
+
+ mutex_unlock(&dqm->lock);
+
+ return 0;
+}
+
+static int unregister_process_nocpsch(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd)
+{
+ int retval;
+ struct device_process_node *cur, *next;
+
+ BUG_ON(!dqm || !qpd);
+
+ BUG_ON(!list_empty(&qpd->queues_list));
+
+ pr_debug("kfd: In func %s\n", __func__);
+
+ retval = 0;
+ mutex_lock(&dqm->lock);
+
+ list_for_each_entry_safe(cur, next, &dqm->queues, list) {
+ if (qpd == cur->qpd) {
+ list_del(&cur->list);
+ kfree(cur);
+ dqm->processes_count--;
+ goto out;
+ }
+ }
+ /* qpd not found in dqm list */
+ retval = 1;
+out:
+ mutex_unlock(&dqm->lock);
+ return retval;
+}
+
+static int
+set_pasid_vmid_mapping(struct device_queue_manager *dqm, unsigned int pasid,
+ unsigned int vmid)
+{
+ uint32_t pasid_mapping;
+
+ pasid_mapping = (pasid == 0) ? 0 : (uint32_t)pasid |
+ ATC_VMID_PASID_MAPPING_VALID;
+ return kfd2kgd->set_pasid_vmid_mapping(dqm->dev->kgd, pasid_mapping,
+ vmid);
+}
+
+static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble)
+{
+ /* In 64-bit mode, we can only control the top 3 bits of the LDS,
+ * scratch and GPUVM apertures.
+ * The hardware fills in the remaining 59 bits according to the
+ * following pattern:
+ * LDS: X0000000'00000000 - X0000001'00000000 (4GB)
+ * Scratch: X0000001'00000000 - X0000002'00000000 (4GB)
+ * GPUVM: Y0010000'00000000 - Y0020000'00000000 (1TB)
+ *
+ * (where X/Y is the configurable nybble with the low-bit 0)
+ *
+ * LDS and scratch will have the same top nybble programmed in the
+ * top 3 bits of SH_MEM_BASES.PRIVATE_BASE.
+ * GPUVM can have a different top nybble programmed in the
+ * top 3 bits of SH_MEM_BASES.SHARED_BASE.
+ * We don't bother to support different top nybbles
+ * for LDS/Scratch and GPUVM.
+ */
+
+ BUG_ON((top_address_nybble & 1) || top_address_nybble > 0xE ||
+ top_address_nybble == 0);
+
+ return PRIVATE_BASE(top_address_nybble << 12) |
+ SHARED_BASE(top_address_nybble << 12);
+}
+
+static int init_memory(struct device_queue_manager *dqm)
+{
+ int i, retval;
+
+ for (i = 8; i < 16; i++)
+ set_pasid_vmid_mapping(dqm, 0, i);
+
+ retval = kfd2kgd->init_memory(dqm->dev->kgd);
+ if (retval == 0)
+ is_mem_initialized = true;
+ return retval;
+}
+
+
+static int init_pipelines(struct device_queue_manager *dqm,
+ unsigned int pipes_num, unsigned int first_pipe)
+{
+ void *hpdptr;
+ struct mqd_manager *mqd;
+ unsigned int i, err, inx;
+ uint64_t pipe_hpd_addr;
+
+ BUG_ON(!dqm || !dqm->dev);
+
+ pr_debug("kfd: In func %s\n", __func__);
+
+ /*
+ * Allocate memory for the HPDs. This is hardware-owned per-pipe data.
+ * The driver never accesses this memory after zeroing it.
+ * It doesn't even have to be saved/restored on suspend/resume
+ * because it contains no data when there are no active queues.
+ */
+
+ err = kfd2kgd->allocate_mem(dqm->dev->kgd,
+ CIK_HPD_EOP_BYTES * pipes_num,
+ PAGE_SIZE,
+ KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
+ (struct kgd_mem **) &dqm->pipeline_mem);
+
+ if (err) {
+ pr_err("kfd: error allocate vidmem num pipes: %d\n",
+ pipes_num);
+ return -ENOMEM;
+ }
+
+ hpdptr = dqm->pipeline_mem->cpu_ptr;
+ dqm->pipelines_addr = dqm->pipeline_mem->gpu_addr;
+
+ memset(hpdptr, 0, CIK_HPD_EOP_BYTES * pipes_num);
+
+ mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_COMPUTE);
+ if (mqd == NULL) {
+ kfd2kgd->free_mem(dqm->dev->kgd,
+ (struct kgd_mem *) dqm->pipeline_mem);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < pipes_num; i++) {
+ inx = i + first_pipe;
+ pipe_hpd_addr = dqm->pipelines_addr + i * CIK_HPD_EOP_BYTES;
+ pr_debug("kfd: pipeline address %llX\n", pipe_hpd_addr);
+ /* = log2(bytes/4)-1 */
+ kfd2kgd->init_pipeline(dqm->dev->kgd, i,
+ CIK_HPD_EOP_BYTES_LOG2 - 3, pipe_hpd_addr);
+ }
+
+ return 0;
+}
+
+
+static int init_scheduler(struct device_queue_manager *dqm)
+{
+ int retval;
+
+ BUG_ON(!dqm);
+
+ pr_debug("kfd: In %s\n", __func__);
+
+ retval = init_pipelines(dqm, get_pipes_num(dqm), KFD_DQM_FIRST_PIPE);
+ if (retval != 0)
+ return retval;
+
+ retval = init_memory(dqm);
+
+ return retval;
+}
+
+static int initialize_nocpsch(struct device_queue_manager *dqm)
+{
+ int i;
+
+ BUG_ON(!dqm);
+
+ pr_debug("kfd: In func %s num of pipes: %d\n",
+ __func__, get_pipes_num(dqm));
+
+ mutex_init(&dqm->lock);
+ INIT_LIST_HEAD(&dqm->queues);
+ dqm->queue_count = dqm->next_pipe_to_allocate = 0;
+ dqm->allocated_queues = kcalloc(get_pipes_num(dqm),
+ sizeof(unsigned int), GFP_KERNEL);
+ if (!dqm->allocated_queues) {
+ mutex_destroy(&dqm->lock);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < get_pipes_num(dqm); i++)
+ dqm->allocated_queues[i] = (1 << QUEUES_PER_PIPE) - 1;
+
+ dqm->vmid_bitmap = (1 << VMID_PER_DEVICE) - 1;
+
+ init_scheduler(dqm);
+ return 0;
+}
+
+static void uninitialize_nocpsch(struct device_queue_manager *dqm)
+{
+ int i;
+
+ BUG_ON(!dqm);
+
+ BUG_ON(dqm->queue_count > 0 || dqm->processes_count > 0);
+
+ kfree(dqm->allocated_queues);
+ for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++)
+ kfree(dqm->mqds[i]);
+ mutex_destroy(&dqm->lock);
+ kfd2kgd->free_mem(dqm->dev->kgd,
+ (struct kgd_mem *) dqm->pipeline_mem);
+}
+
+static int start_nocpsch(struct device_queue_manager *dqm)
+{
+ return 0;
+}
+
+static int stop_nocpsch(struct device_queue_manager *dqm)
+{
+ return 0;
+}
+
+/*
+ * Device Queue Manager implementation for cp scheduler
+ */
+
+static int set_sched_resources(struct device_queue_manager *dqm)
+{
+ struct scheduling_resources res;
+ unsigned int queue_num, queue_mask;
+
+ BUG_ON(!dqm);
+
+ pr_debug("kfd: In func %s\n", __func__);
+
+ queue_num = get_pipes_num_cpsch() * QUEUES_PER_PIPE;
+ queue_mask = (1 << queue_num) - 1;
+ res.vmid_mask = (1 << VMID_PER_DEVICE) - 1;
+ res.vmid_mask <<= KFD_VMID_START_OFFSET;
+ res.queue_mask = queue_mask << (get_first_pipe(dqm) * QUEUES_PER_PIPE);
+ res.gws_mask = res.oac_mask = res.gds_heap_base =
+ res.gds_heap_size = 0;
+
+ pr_debug("kfd: scheduling resources:\n"
+ " vmid mask: 0x%8X\n"
+ " queue mask: 0x%8llX\n",
+ res.vmid_mask, res.queue_mask);
+
+ return pm_send_set_resources(&dqm->packets, &res);
+}
+
+static int initialize_cpsch(struct device_queue_manager *dqm)
+{
+ int retval;
+
+ BUG_ON(!dqm);
+
+ pr_debug("kfd: In func %s num of pipes: %d\n",
+ __func__, get_pipes_num_cpsch());
+
+ mutex_init(&dqm->lock);
+ INIT_LIST_HEAD(&dqm->queues);
+ dqm->queue_count = dqm->processes_count = 0;
+ dqm->active_runlist = false;
+ retval = init_pipelines(dqm, get_pipes_num(dqm), 0);
+ if (retval != 0)
+ goto fail_init_pipelines;
+
+ return 0;
+
+fail_init_pipelines:
+ mutex_destroy(&dqm->lock);
+ return retval;
+}
+
+static int start_cpsch(struct device_queue_manager *dqm)
+{
+ struct device_process_node *node;
+ int retval;
+
+ BUG_ON(!dqm);
+
+ retval = 0;
+
+ retval = pm_init(&dqm->packets, dqm);
+ if (retval != 0)
+ goto fail_packet_manager_init;
+
+ retval = set_sched_resources(dqm);
+ if (retval != 0)
+ goto fail_set_sched_resources;
+
+ pr_debug("kfd: allocating fence memory\n");
+
+ /* allocate fence memory on the gart */
+ retval = kfd2kgd->allocate_mem(dqm->dev->kgd,
+ sizeof(*dqm->fence_addr),
+ 32,
+ KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
+ (struct kgd_mem **) &dqm->fence_mem);
+
+ if (retval != 0)
+ goto fail_allocate_vidmem;
+
+ dqm->fence_addr = dqm->fence_mem->cpu_ptr;
+ dqm->fence_gpu_addr = dqm->fence_mem->gpu_addr;
+
+ list_for_each_entry(node, &dqm->queues, list)
+ if (node->qpd->pqm->process && dqm->dev)
+ kfd_bind_process_to_device(dqm->dev,
+ node->qpd->pqm->process);
+
+ execute_queues_cpsch(dqm, true);
+
+ return 0;
+fail_allocate_vidmem:
+fail_set_sched_resources:
+ pm_uninit(&dqm->packets);
+fail_packet_manager_init:
+ return retval;
+}
+
+static int stop_cpsch(struct device_queue_manager *dqm)
+{
+ struct device_process_node *node;
+ struct kfd_process_device *pdd;
+
+ BUG_ON(!dqm);
+
+ destroy_queues_cpsch(dqm, true);
+
+ list_for_each_entry(node, &dqm->queues, list) {
+ pdd = qpd_to_pdd(node->qpd);
+ pdd->bound = false;
+ }
+ kfd2kgd->free_mem(dqm->dev->kgd,
+ (struct kgd_mem *) dqm->fence_mem);
+ pm_uninit(&dqm->packets);
+
+ return 0;
+}
+
+static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
+ struct kernel_queue *kq,
+ struct qcm_process_device *qpd)
+{
+ BUG_ON(!dqm || !kq || !qpd);
+
+ pr_debug("kfd: In func %s\n", __func__);
+
+ mutex_lock(&dqm->lock);
+ list_add(&kq->list, &qpd->priv_queue_list);
+ dqm->queue_count++;
+ qpd->is_debug = true;
+ execute_queues_cpsch(dqm, false);
+ mutex_unlock(&dqm->lock);
+
+ return 0;
+}
+
+static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
+ struct kernel_queue *kq,
+ struct qcm_process_device *qpd)
+{
+ BUG_ON(!dqm || !kq);
+
+ pr_debug("kfd: In %s\n", __func__);
+
+ mutex_lock(&dqm->lock);
+ destroy_queues_cpsch(dqm, false);
+ list_del(&kq->list);
+ dqm->queue_count--;
+ qpd->is_debug = false;
+ execute_queues_cpsch(dqm, false);
+ mutex_unlock(&dqm->lock);
+}
+
+static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
+ struct qcm_process_device *qpd, int *allocate_vmid)
+{
+ int retval;
+ struct mqd_manager *mqd;
+
+ BUG_ON(!dqm || !q || !qpd);
+
+ retval = 0;
+
+ if (allocate_vmid)
+ *allocate_vmid = 0;
+
+ mutex_lock(&dqm->lock);
+
+ mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_CP);
+ if (mqd == NULL) {
+ mutex_unlock(&dqm->lock);
+ return -ENOMEM;
+ }
+
+ retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
+ &q->gart_mqd_addr, &q->properties);
+ if (retval != 0)
+ goto out;
+
+ list_add(&q->list, &qpd->queues_list);
+ if (q->properties.is_active) {
+ dqm->queue_count++;
+ retval = execute_queues_cpsch(dqm, false);
+ }
+
+out:
+ mutex_unlock(&dqm->lock);
+ return retval;
+}
+
+static int fence_wait_timeout(unsigned int *fence_addr,
+ unsigned int fence_value,
+ unsigned long timeout)
+{
+ BUG_ON(!fence_addr);
+ timeout += jiffies;
+
+ while (*fence_addr != fence_value) {
+ if (time_after(jiffies, timeout)) {
+ pr_err("kfd: qcm fence wait loop timeout expired\n");
+ return -ETIME;
+ }
+ cpu_relax();
+ }
+
+ return 0;
+}
+
+static int destroy_queues_cpsch(struct device_queue_manager *dqm, bool lock)
+{
+ int retval;
+
+ BUG_ON(!dqm);
+
+ retval = 0;
+
+ if (lock)
+ mutex_lock(&dqm->lock);
+ if (dqm->active_runlist == false)
+ goto out;
+ retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_COMPUTE,
+ KFD_PREEMPT_TYPE_FILTER_ALL_QUEUES, 0, false, 0);
+ if (retval != 0)
+ goto out;
+
+ *dqm->fence_addr = KFD_FENCE_INIT;
+ pm_send_query_status(&dqm->packets, dqm->fence_gpu_addr,
+ KFD_FENCE_COMPLETED);
+ /* should be timed out */
+ fence_wait_timeout(dqm->fence_addr, KFD_FENCE_COMPLETED,
+ QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS);
+ pm_release_ib(&dqm->packets);
+ dqm->active_runlist = false;
+
+out:
+ if (lock)
+ mutex_unlock(&dqm->lock);
+ return retval;
+}
+
+static int execute_queues_cpsch(struct device_queue_manager *dqm, bool lock)
+{
+ int retval;
+
+ BUG_ON(!dqm);
+
+ if (lock)
+ mutex_lock(&dqm->lock);
+
+ retval = destroy_queues_cpsch(dqm, false);
+ if (retval != 0) {
+ pr_err("kfd: the cp might be in an unrecoverable state due to an unsuccessful queues preemption");
+ goto out;
+ }
+
+ if (dqm->queue_count <= 0 || dqm->processes_count <= 0) {
+ retval = 0;
+ goto out;
+ }
+
+ if (dqm->active_runlist) {
+ retval = 0;
+ goto out;
+ }
+
+ retval = pm_send_runlist(&dqm->packets, &dqm->queues);
+ if (retval != 0) {
+ pr_err("kfd: failed to execute runlist");
+ goto out;
+ }
+ dqm->active_runlist = true;
+
+out:
+ if (lock)
+ mutex_unlock(&dqm->lock);
+ return retval;
+}
+
+static int destroy_queue_cpsch(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd,
+ struct queue *q)
+{
+ int retval;
+ struct mqd_manager *mqd;
+
+ BUG_ON(!dqm || !qpd || !q);
+
+ retval = 0;
+
+ /* remove queue from list to prevent rescheduling after preemption */
+ mutex_lock(&dqm->lock);
+
+ mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_CP);
+ if (!mqd) {
+ retval = -ENOMEM;
+ goto failed;
+ }
+
+ list_del(&q->list);
+ dqm->queue_count--;
+
+ execute_queues_cpsch(dqm, false);
+
+ mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
+
+ mutex_unlock(&dqm->lock);
+
+ return 0;
+
+failed:
+ mutex_unlock(&dqm->lock);
+ return retval;
+}
+
+/*
+ * Low bits must be 0000/FFFF as required by HW, high bits must be 0 to
+ * stay in user mode.
+ */
+#define APE1_FIXED_BITS_MASK 0xFFFF80000000FFFFULL
+/* APE1 limit is inclusive and 64K aligned. */
+#define APE1_LIMIT_ALIGNMENT 0xFFFF
+
+static bool set_cache_memory_policy(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd,
+ enum cache_policy default_policy,
+ enum cache_policy alternate_policy,
+ void __user *alternate_aperture_base,
+ uint64_t alternate_aperture_size)
+{
+ uint32_t default_mtype;
+ uint32_t ape1_mtype;
+
+ pr_debug("kfd: In func %s\n", __func__);
+
+ mutex_lock(&dqm->lock);
+
+ if (alternate_aperture_size == 0) {
+ /* base > limit disables APE1 */
+ qpd->sh_mem_ape1_base = 1;
+ qpd->sh_mem_ape1_limit = 0;
+ } else {
+ /*
+ * In FSA64, APE1_Base[63:0] = { 16{SH_MEM_APE1_BASE[31]},
+ * SH_MEM_APE1_BASE[31:0], 0x0000 }
+ * APE1_Limit[63:0] = { 16{SH_MEM_APE1_LIMIT[31]},
+ * SH_MEM_APE1_LIMIT[31:0], 0xFFFF }
+ * Verify that the base and size parameters can be
+ * represented in this format and convert them.
+ * Additionally restrict APE1 to user-mode addresses.
+ */
+
+ uint64_t base = (uintptr_t)alternate_aperture_base;
+ uint64_t limit = base + alternate_aperture_size - 1;
+
+ if (limit <= base)
+ goto out;
+
+ if ((base & APE1_FIXED_BITS_MASK) != 0)
+ goto out;
+
+ if ((limit & APE1_FIXED_BITS_MASK) != APE1_LIMIT_ALIGNMENT)
+ goto out;
+
+ qpd->sh_mem_ape1_base = base >> 16;
+ qpd->sh_mem_ape1_limit = limit >> 16;
+ }
+
+ default_mtype = (default_policy == cache_policy_coherent) ?
+ MTYPE_NONCACHED :
+ MTYPE_CACHED;
+
+ ape1_mtype = (alternate_policy == cache_policy_coherent) ?
+ MTYPE_NONCACHED :
+ MTYPE_CACHED;
+
+ qpd->sh_mem_config = (qpd->sh_mem_config & PTR32)
+ | ALIGNMENT_MODE(SH_MEM_ALIGNMENT_MODE_UNALIGNED)
+ | DEFAULT_MTYPE(default_mtype)
+ | APE1_MTYPE(ape1_mtype);
+
+ if ((sched_policy == KFD_SCHED_POLICY_NO_HWS) && (qpd->vmid != 0))
+ program_sh_mem_settings(dqm, qpd);
+
+ pr_debug("kfd: sh_mem_config: 0x%x, ape1_base: 0x%x, ape1_limit: 0x%x\n",
+ qpd->sh_mem_config, qpd->sh_mem_ape1_base,
+ qpd->sh_mem_ape1_limit);
+
+ mutex_unlock(&dqm->lock);
+ return true;
+
+out:
+ mutex_unlock(&dqm->lock);
+ return false;
+}
+
+struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
+{
+ struct device_queue_manager *dqm;
+
+ BUG_ON(!dev);
+
+ dqm = kzalloc(sizeof(struct device_queue_manager), GFP_KERNEL);
+ if (!dqm)
+ return NULL;
+
+ dqm->dev = dev;
+ switch (sched_policy) {
+ case KFD_SCHED_POLICY_HWS:
+ case KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION:
+ /* initialize dqm for cp scheduling */
+ dqm->create_queue = create_queue_cpsch;
+ dqm->initialize = initialize_cpsch;
+ dqm->start = start_cpsch;
+ dqm->stop = stop_cpsch;
+ dqm->destroy_queue = destroy_queue_cpsch;
+ dqm->update_queue = update_queue;
+ dqm->get_mqd_manager = get_mqd_manager_nocpsch;
+ dqm->register_process = register_process_nocpsch;
+ dqm->unregister_process = unregister_process_nocpsch;
+ dqm->uninitialize = uninitialize_nocpsch;
+ dqm->create_kernel_queue = create_kernel_queue_cpsch;
+ dqm->destroy_kernel_queue = destroy_kernel_queue_cpsch;
+ dqm->set_cache_memory_policy = set_cache_memory_policy;
+ break;
+ case KFD_SCHED_POLICY_NO_HWS:
+ /* initialize dqm for no cp scheduling */
+ dqm->start = start_nocpsch;
+ dqm->stop = stop_nocpsch;
+ dqm->create_queue = create_queue_nocpsch;
+ dqm->destroy_queue = destroy_queue_nocpsch;
+ dqm->update_queue = update_queue;
+ dqm->get_mqd_manager = get_mqd_manager_nocpsch;
+ dqm->register_process = register_process_nocpsch;
+ dqm->unregister_process = unregister_process_nocpsch;
+ dqm->initialize = initialize_nocpsch;
+ dqm->uninitialize = uninitialize_nocpsch;
+ dqm->set_cache_memory_policy = set_cache_memory_policy;
+ break;
+ default:
+ BUG();
+ break;
+ }
+
+ if (dqm->initialize(dqm) != 0) {
+ kfree(dqm);
+ return NULL;
+ }
+
+ return dqm;
+}
+
+void device_queue_manager_uninit(struct device_queue_manager *dqm)
+{
+ BUG_ON(!dqm);
+
+ dqm->uninitialize(dqm);
+ kfree(dqm);
+}
+
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
new file mode 100644
index 00000000000..c3f189e8ae3
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -0,0 +1,146 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef KFD_DEVICE_QUEUE_MANAGER_H_
+#define KFD_DEVICE_QUEUE_MANAGER_H_
+
+#include <linux/rwsem.h>
+#include <linux/list.h>
+#include "kfd_priv.h"
+#include "kfd_mqd_manager.h"
+
+#define QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS (500)
+#define QUEUES_PER_PIPE (8)
+#define PIPE_PER_ME_CP_SCHEDULING (3)
+#define CIK_VMID_NUM (8)
+#define KFD_VMID_START_OFFSET (8)
+#define VMID_PER_DEVICE CIK_VMID_NUM
+#define KFD_DQM_FIRST_PIPE (0)
+
+struct device_process_node {
+ struct qcm_process_device *qpd;
+ struct list_head list;
+};
+
+/**
+ * struct device_queue_manager
+ *
+ * @create_queue: Queue creation routine.
+ *
+ * @destroy_queue: Queue destruction routine.
+ *
+ * @update_queue: Queue update routine.
+ *
+ * @get_mqd_manager: Returns the mqd manager according to the mqd type.
+ *
+ * @exeute_queues: Dispatches the queues list to the H/W.
+ *
+ * @register_process: This routine associates a specific process with device.
+ *
+ * @unregister_process: destroys the associations between process to device.
+ *
+ * @initialize: Initializes the pipelines and memory module for that device.
+ *
+ * @start: Initializes the resources/modules the the device needs for queues
+ * execution. This function is called on device initialization and after the
+ * system woke up after suspension.
+ *
+ * @stop: This routine stops execution of all the active queue running on the
+ * H/W and basically this function called on system suspend.
+ *
+ * @uninitialize: Destroys all the device queue manager resources allocated in
+ * initialize routine.
+ *
+ * @create_kernel_queue: Creates kernel queue. Used for debug queue.
+ *
+ * @destroy_kernel_queue: Destroys kernel queue. Used for debug queue.
+ *
+ * @set_cache_memory_policy: Sets memory policy (cached/ non cached) for the
+ * memory apertures.
+ *
+ * This struct is a base class for the kfd queues scheduler in the
+ * device level. The device base class should expose the basic operations
+ * for queue creation and queue destruction. This base class hides the
+ * scheduling mode of the driver and the specific implementation of the
+ * concrete device. This class is the only class in the queues scheduler
+ * that configures the H/W.
+ */
+
+struct device_queue_manager {
+ int (*create_queue)(struct device_queue_manager *dqm,
+ struct queue *q,
+ struct qcm_process_device *qpd,
+ int *allocate_vmid);
+ int (*destroy_queue)(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd,
+ struct queue *q);
+ int (*update_queue)(struct device_queue_manager *dqm,
+ struct queue *q);
+
+ struct mqd_manager * (*get_mqd_manager)
+ (struct device_queue_manager *dqm,
+ enum KFD_MQD_TYPE type);
+
+ int (*register_process)(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd);
+ int (*unregister_process)(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd);
+ int (*initialize)(struct device_queue_manager *dqm);
+ int (*start)(struct device_queue_manager *dqm);
+ int (*stop)(struct device_queue_manager *dqm);
+ void (*uninitialize)(struct device_queue_manager *dqm);
+ int (*create_kernel_queue)(struct device_queue_manager *dqm,
+ struct kernel_queue *kq,
+ struct qcm_process_device *qpd);
+ void (*destroy_kernel_queue)(struct device_queue_manager *dqm,
+ struct kernel_queue *kq,
+ struct qcm_process_device *qpd);
+ bool (*set_cache_memory_policy)(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd,
+ enum cache_policy default_policy,
+ enum cache_policy alternate_policy,
+ void __user *alternate_aperture_base,
+ uint64_t alternate_aperture_size);
+
+
+ struct mqd_manager *mqds[KFD_MQD_TYPE_MAX];
+ struct packet_manager packets;
+ struct kfd_dev *dev;
+ struct mutex lock;
+ struct list_head queues;
+ unsigned int processes_count;
+ unsigned int queue_count;
+ unsigned int next_pipe_to_allocate;
+ unsigned int *allocated_queues;
+ unsigned int vmid_bitmap;
+ uint64_t pipelines_addr;
+ struct kfd_mem_obj *pipeline_mem;
+ uint64_t fence_gpu_addr;
+ unsigned int *fence_addr;
+ struct kfd_mem_obj *fence_mem;
+ bool active_runlist;
+};
+
+
+
+#endif /* KFD_DEVICE_QUEUE_MANAGER_H_ */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
new file mode 100644
index 00000000000..b5791a5c7c0
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
@@ -0,0 +1,256 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "kfd_priv.h"
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+
+/*
+ * This extension supports a kernel level doorbells management for
+ * the kernel queues.
+ * Basically the last doorbells page is devoted to kernel queues
+ * and that's assures that any user process won't get access to the
+ * kernel doorbells page
+ */
+static DEFINE_MUTEX(doorbell_mutex);
+static unsigned long doorbell_available_index[
+ DIV_ROUND_UP(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS, BITS_PER_LONG)] = { 0 };
+
+#define KERNEL_DOORBELL_PASID 1
+#define KFD_SIZE_OF_DOORBELL_IN_BYTES 4
+
+/*
+ * Each device exposes a doorbell aperture, a PCI MMIO aperture that
+ * receives 32-bit writes that are passed to queues as wptr values.
+ * The doorbells are intended to be written by applications as part
+ * of queueing work on user-mode queues.
+ * We assign doorbells to applications in PAGE_SIZE-sized and aligned chunks.
+ * We map the doorbell address space into user-mode when a process creates
+ * its first queue on each device.
+ * Although the mapping is done by KFD, it is equivalent to an mmap of
+ * the /dev/kfd with the particular device encoded in the mmap offset.
+ * There will be other uses for mmap of /dev/kfd, so only a range of
+ * offsets (KFD_MMAP_DOORBELL_START-END) is used for doorbells.
+ */
+
+/* # of doorbell bytes allocated for each process. */
+static inline size_t doorbell_process_allocation(void)
+{
+ return roundup(KFD_SIZE_OF_DOORBELL_IN_BYTES *
+ KFD_MAX_NUM_OF_QUEUES_PER_PROCESS,
+ PAGE_SIZE);
+}
+
+/* Doorbell calculations for device init. */
+void kfd_doorbell_init(struct kfd_dev *kfd)
+{
+ size_t doorbell_start_offset;
+ size_t doorbell_aperture_size;
+ size_t doorbell_process_limit;
+
+ /*
+ * We start with calculations in bytes because the input data might
+ * only be byte-aligned.
+ * Only after we have done the rounding can we assume any alignment.
+ */
+
+ doorbell_start_offset =
+ roundup(kfd->shared_resources.doorbell_start_offset,
+ doorbell_process_allocation());
+
+ doorbell_aperture_size =
+ rounddown(kfd->shared_resources.doorbell_aperture_size,
+ doorbell_process_allocation());
+
+ if (doorbell_aperture_size > doorbell_start_offset)
+ doorbell_process_limit =
+ (doorbell_aperture_size - doorbell_start_offset) /
+ doorbell_process_allocation();
+ else
+ doorbell_process_limit = 0;
+
+ kfd->doorbell_base = kfd->shared_resources.doorbell_physical_address +
+ doorbell_start_offset;
+
+ kfd->doorbell_id_offset = doorbell_start_offset / sizeof(u32);
+ kfd->doorbell_process_limit = doorbell_process_limit - 1;
+
+ kfd->doorbell_kernel_ptr = ioremap(kfd->doorbell_base,
+ doorbell_process_allocation());
+
+ BUG_ON(!kfd->doorbell_kernel_ptr);
+
+ pr_debug("kfd: doorbell initialization:\n");
+ pr_debug("kfd: doorbell base == 0x%08lX\n",
+ (uintptr_t)kfd->doorbell_base);
+
+ pr_debug("kfd: doorbell_id_offset == 0x%08lX\n",
+ kfd->doorbell_id_offset);
+
+ pr_debug("kfd: doorbell_process_limit == 0x%08lX\n",
+ doorbell_process_limit);
+
+ pr_debug("kfd: doorbell_kernel_offset == 0x%08lX\n",
+ (uintptr_t)kfd->doorbell_base);
+
+ pr_debug("kfd: doorbell aperture size == 0x%08lX\n",
+ kfd->shared_resources.doorbell_aperture_size);
+
+ pr_debug("kfd: doorbell kernel address == 0x%08lX\n",
+ (uintptr_t)kfd->doorbell_kernel_ptr);
+}
+
+int kfd_doorbell_mmap(struct kfd_process *process, struct vm_area_struct *vma)
+{
+ phys_addr_t address;
+ struct kfd_dev *dev;
+
+ /*
+ * For simplicitly we only allow mapping of the entire doorbell
+ * allocation of a single device & process.
+ */
+ if (vma->vm_end - vma->vm_start != doorbell_process_allocation())
+ return -EINVAL;
+
+ /* Find kfd device according to gpu id */
+ dev = kfd_device_by_id(vma->vm_pgoff);
+ if (dev == NULL)
+ return -EINVAL;
+
+ /* Find if pdd exists for combination of process and gpu id */
+ if (!kfd_get_process_device_data(dev, process, 0))
+ return -EINVAL;
+
+ /* Calculate physical address of doorbell */
+ address = kfd_get_process_doorbells(dev, process);
+
+ vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE |
+ VM_DONTDUMP | VM_PFNMAP;
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ pr_debug("kfd: mapping doorbell page in kfd_doorbell_mmap\n"
+ " target user address == 0x%08llX\n"
+ " physical address == 0x%08llX\n"
+ " vm_flags == 0x%04lX\n"
+ " size == 0x%04lX\n",
+ (unsigned long long) vma->vm_start, address, vma->vm_flags,
+ doorbell_process_allocation());
+
+
+ return io_remap_pfn_range(vma,
+ vma->vm_start,
+ address >> PAGE_SHIFT,
+ doorbell_process_allocation(),
+ vma->vm_page_prot);
+}
+
+
+/* get kernel iomem pointer for a doorbell */
+u32 __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
+ unsigned int *doorbell_off)
+{
+ u32 inx;
+
+ BUG_ON(!kfd || !doorbell_off);
+
+ mutex_lock(&doorbell_mutex);
+ inx = find_first_zero_bit(doorbell_available_index,
+ KFD_MAX_NUM_OF_QUEUES_PER_PROCESS);
+
+ __set_bit(inx, doorbell_available_index);
+ mutex_unlock(&doorbell_mutex);
+
+ if (inx >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)
+ return NULL;
+
+ /*
+ * Calculating the kernel doorbell offset using "faked" kernel
+ * pasid that allocated for kernel queues only
+ */
+ *doorbell_off = KERNEL_DOORBELL_PASID * (doorbell_process_allocation() /
+ sizeof(u32)) + inx;
+
+ pr_debug("kfd: get kernel queue doorbell\n"
+ " doorbell offset == 0x%08d\n"
+ " kernel address == 0x%08lX\n",
+ *doorbell_off, (uintptr_t)(kfd->doorbell_kernel_ptr + inx));
+
+ return kfd->doorbell_kernel_ptr + inx;
+}
+
+void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr)
+{
+ unsigned int inx;
+
+ BUG_ON(!kfd || !db_addr);
+
+ inx = (unsigned int)(db_addr - kfd->doorbell_kernel_ptr);
+
+ mutex_lock(&doorbell_mutex);
+ __clear_bit(inx, doorbell_available_index);
+ mutex_unlock(&doorbell_mutex);
+}
+
+inline void write_kernel_doorbell(u32 __iomem *db, u32 value)
+{
+ if (db) {
+ writel(value, db);
+ pr_debug("writing %d to doorbell address 0x%p\n", value, db);
+ }
+}
+
+/*
+ * queue_ids are in the range [0,MAX_PROCESS_QUEUES) and are mapped 1:1
+ * to doorbells with the process's doorbell page
+ */
+unsigned int kfd_queue_id_to_doorbell(struct kfd_dev *kfd,
+ struct kfd_process *process,
+ unsigned int queue_id)
+{
+ /*
+ * doorbell_id_offset accounts for doorbells taken by KGD.
+ * pasid * doorbell_process_allocation/sizeof(u32) adjusts
+ * to the process's doorbells
+ */
+ return kfd->doorbell_id_offset +
+ process->pasid * (doorbell_process_allocation()/sizeof(u32)) +
+ queue_id;
+}
+
+uint64_t kfd_get_number_elems(struct kfd_dev *kfd)
+{
+ uint64_t num_of_elems = (kfd->shared_resources.doorbell_aperture_size -
+ kfd->shared_resources.doorbell_start_offset) /
+ doorbell_process_allocation() + 1;
+
+ return num_of_elems;
+
+}
+
+phys_addr_t kfd_get_process_doorbells(struct kfd_dev *dev,
+ struct kfd_process *process)
+{
+ return dev->doorbell_base +
+ process->pasid * doorbell_process_allocation();
+}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
new file mode 100644
index 00000000000..66df4da01c2
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
@@ -0,0 +1,356 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/compat.h>
+#include <uapi/linux/kfd_ioctl.h>
+#include <linux/time.h>
+#include "kfd_priv.h"
+#include <linux/mm.h>
+#include <uapi/asm-generic/mman-common.h>
+#include <asm/processor.h>
+
+/*
+ * The primary memory I/O features being added for revisions of gfxip
+ * beyond 7.0 (Kaveri) are:
+ *
+ * Access to ATC/IOMMU mapped memory w/ associated extension of VA to 48b
+ *
+ * “Flat” shader memory access – These are new shader vector memory
+ * operations that do not reference a T#/V# so a “pointer” is what is
+ * sourced from the vector gprs for direct access to memory.
+ * This pointer space has the Shared(LDS) and Private(Scratch) memory
+ * mapped into this pointer space as apertures.
+ * The hardware then determines how to direct the memory request
+ * based on what apertures the request falls in.
+ *
+ * Unaligned support and alignment check
+ *
+ *
+ * System Unified Address - SUA
+ *
+ * The standard usage for GPU virtual addresses are that they are mapped by
+ * a set of page tables we call GPUVM and these page tables are managed by
+ * a combination of vidMM/driver software components. The current virtual
+ * address (VA) range for GPUVM is 40b.
+ *
+ * As of gfxip7.1 and beyond we’re adding the ability for compute memory
+ * clients (CP/RLC, DMA, SHADER(ifetch, scalar, and vector ops)) to access
+ * the same page tables used by host x86 processors and that are managed by
+ * the operating system. This is via a technique and hardware called ATC/IOMMU.
+ * The GPU has the capability of accessing both the GPUVM and ATC address
+ * spaces for a given VMID (process) simultaneously and we call this feature
+ * system unified address (SUA).
+ *
+ * There are three fundamental address modes of operation for a given VMID
+ * (process) on the GPU:
+ *
+ * HSA64 – 64b pointers and the default address space is ATC
+ * HSA32 – 32b pointers and the default address space is ATC
+ * GPUVM – 64b pointers and the default address space is GPUVM (driver
+ * model mode)
+ *
+ *
+ * HSA64 - ATC/IOMMU 64b
+ *
+ * A 64b pointer in the AMD64/IA64 CPU architecture is not fully utilized
+ * by the CPU so an AMD CPU can only access the high area
+ * (VA[63:47] == 0x1FFFF) and low area (VA[63:47 == 0) of the address space
+ * so the actual VA carried to translation is 48b. There is a “hole” in
+ * the middle of the 64b VA space.
+ *
+ * The GPU not only has access to all of the CPU accessible address space via
+ * ATC/IOMMU, but it also has access to the GPUVM address space. The “system
+ * unified address” feature (SUA) is the mapping of GPUVM and ATC address
+ * spaces into a unified pointer space. The method we take for 64b mode is
+ * to map the full 40b GPUVM address space into the hole of the 64b address
+ * space.
+
+ * The GPUVM_Base/GPUVM_Limit defines the aperture in the 64b space where we
+ * direct requests to be translated via GPUVM page tables instead of the
+ * IOMMU path.
+ *
+ *
+ * 64b to 49b Address conversion
+ *
+ * Note that there are still significant portions of unused regions (holes)
+ * in the 64b address space even for the GPU. There are several places in
+ * the pipeline (sw and hw), we wish to compress the 64b virtual address
+ * to a 49b address. This 49b address is constituted of an “ATC” bit
+ * plus a 48b virtual address. This 49b address is what is passed to the
+ * translation hardware. ATC==0 means the 48b address is a GPUVM address
+ * (max of 2^40 – 1) intended to be translated via GPUVM page tables.
+ * ATC==1 means the 48b address is intended to be translated via IOMMU
+ * page tables.
+ *
+ * A 64b pointer is compared to the apertures that are defined (Base/Limit), in
+ * this case the GPUVM aperture (red) is defined and if a pointer falls in this
+ * aperture, we subtract the GPUVM_Base address and set the ATC bit to zero
+ * as part of the 64b to 49b conversion.
+ *
+ * Where this 64b to 49b conversion is done is a function of the usage.
+ * Most GPU memory access is via memory objects where the driver builds
+ * a descriptor which consists of a base address and a memory access by
+ * the GPU usually consists of some kind of an offset or Cartesian coordinate
+ * that references this memory descriptor. This is the case for shader
+ * instructions that reference the T# or V# constants, or for specified
+ * locations of assets (ex. the shader program location). In these cases
+ * the driver is what handles the 64b to 49b conversion and the base
+ * address in the descriptor (ex. V# or T# or shader program location)
+ * is defined as a 48b address w/ an ATC bit. For this usage a given
+ * memory object cannot straddle multiple apertures in the 64b address
+ * space. For example a shader program cannot jump in/out between ATC
+ * and GPUVM space.
+ *
+ * In some cases we wish to pass a 64b pointer to the GPU hardware and
+ * the GPU hw does the 64b to 49b conversion before passing memory
+ * requests to the cache/memory system. This is the case for the
+ * S_LOAD and FLAT_* shader memory instructions where we have 64b pointers
+ * in scalar and vector GPRs respectively.
+ *
+ * In all cases (no matter where the 64b -> 49b conversion is done), the gfxip
+ * hardware sends a 48b address along w/ an ATC bit, to the memory controller
+ * on the memory request interfaces.
+ *
+ * <client>_MC_rdreq_atc // read request ATC bit
+ *
+ * 0 : <client>_MC_rdreq_addr is a GPUVM VA
+ *
+ * 1 : <client>_MC_rdreq_addr is a ATC VA
+ *
+ *
+ * “Spare” aperture (APE1)
+ *
+ * We use the GPUVM aperture to differentiate ATC vs. GPUVM, but we also use
+ * apertures to set the Mtype field for S_LOAD/FLAT_* ops which is input to the
+ * config tables for setting cache policies. The “spare” (APE1) aperture is
+ * motivated by getting a different Mtype from the default.
+ * The default aperture isn’t an actual base/limit aperture; it is just the
+ * address space that doesn’t hit any defined base/limit apertures.
+ * The following diagram is a complete picture of the gfxip7.x SUA apertures.
+ * The APE1 can be placed either below or above
+ * the hole (cannot be in the hole).
+ *
+ *
+ * General Aperture definitions and rules
+ *
+ * An aperture register definition consists of a Base, Limit, Mtype, and
+ * usually an ATC bit indicating which translation tables that aperture uses.
+ * In all cases (for SUA and DUA apertures discussed later), aperture base
+ * and limit definitions are 64KB aligned.
+ *
+ * <ape>_Base[63:0] = { <ape>_Base_register[63:16], 0x0000 }
+ *
+ * <ape>_Limit[63:0] = { <ape>_Limit_register[63:16], 0xFFFF }
+ *
+ * The base and limit are considered inclusive to an aperture so being
+ * inside an aperture means (address >= Base) AND (address <= Limit).
+ *
+ * In no case is a payload that straddles multiple apertures expected to work.
+ * For example a load_dword_x4 that starts in one aperture and ends in another,
+ * does not work. For the vector FLAT_* ops we have detection capability in
+ * the shader for reporting a “memory violation” back to the
+ * SQ block for use in traps.
+ * A memory violation results when an op falls into the hole,
+ * or a payload straddles multiple apertures. The S_LOAD instruction
+ * does not have this detection.
+ *
+ * Apertures cannot overlap.
+ *
+ *
+ *
+ * HSA32 - ATC/IOMMU 32b
+ *
+ * For HSA32 mode, the pointers are interpreted as 32 bits and use a single GPR
+ * instead of two for the S_LOAD and FLAT_* ops. The entire GPUVM space of 40b
+ * will not fit so there is only partial visibility to the GPUVM
+ * space (defined by the aperture) for S_LOAD and FLAT_* ops.
+ * There is no spare (APE1) aperture for HSA32 mode.
+ *
+ *
+ * GPUVM 64b mode (driver model)
+ *
+ * This mode is related to HSA64 in that the difference really is that
+ * the default aperture is GPUVM (ATC==0) and not ATC space.
+ * We have gfxip7.x hardware that has FLAT_* and S_LOAD support for
+ * SUA GPUVM mode, but does not support HSA32/HSA64.
+ *
+ *
+ * Device Unified Address - DUA
+ *
+ * Device unified address (DUA) is the name of the feature that maps the
+ * Shared(LDS) memory and Private(Scratch) memory into the overall address
+ * space for use by the new FLAT_* vector memory ops. The Shared and
+ * Private memories are mapped as apertures into the address space,
+ * and the hardware detects when a FLAT_* memory request is to be redirected
+ * to the LDS or Scratch memory when it falls into one of these apertures.
+ * Like the SUA apertures, the Shared/Private apertures are 64KB aligned and
+ * the base/limit is “in” the aperture. For both HSA64 and GPUVM SUA modes,
+ * the Shared/Private apertures are always placed in a limited selection of
+ * options in the hole of the 64b address space. For HSA32 mode, the
+ * Shared/Private apertures can be placed anywhere in the 32b space
+ * except at 0.
+ *
+ *
+ * HSA64 Apertures for FLAT_* vector ops
+ *
+ * For HSA64 SUA mode, the Shared and Private apertures are always placed
+ * in the hole w/ a limited selection of possible locations. The requests
+ * that fall in the private aperture are expanded as a function of the
+ * work-item id (tid) and redirected to the location of the
+ * “hidden private memory”. The hidden private can be placed in either GPUVM
+ * or ATC space. The addresses that fall in the shared aperture are
+ * re-directed to the on-chip LDS memory hardware.
+ *
+ *
+ * HSA32 Apertures for FLAT_* vector ops
+ *
+ * In HSA32 mode, the Private and Shared apertures can be placed anywhere
+ * in the 32b space except at 0 (Private or Shared Base at zero disables
+ * the apertures). If the base address of the apertures are non-zero
+ * (ie apertures exists), the size is always 64KB.
+ *
+ *
+ * GPUVM Apertures for FLAT_* vector ops
+ *
+ * In GPUVM mode, the Shared/Private apertures are specified identically
+ * to HSA64 mode where they are always in the hole at a limited selection
+ * of locations.
+ *
+ *
+ * Aperture Definitions for SUA and DUA
+ *
+ * The interpretation of the aperture register definitions for a given
+ * VMID is a function of the “SUA Mode” which is one of HSA64, HSA32, or
+ * GPUVM64 discussed in previous sections. The mode is first decoded, and
+ * then the remaining register decode is a function of the mode.
+ *
+ *
+ * SUA Mode Decode
+ *
+ * For the S_LOAD and FLAT_* shader operations, the SUA mode is decoded from
+ * the COMPUTE_DISPATCH_INITIATOR:DATA_ATC bit and
+ * the SH_MEM_CONFIG:PTR32 bits.
+ *
+ * COMPUTE_DISPATCH_INITIATOR:DATA_ATC SH_MEM_CONFIG:PTR32 Mode
+ *
+ * 1 0 HSA64
+ *
+ * 1 1 HSA32
+ *
+ * 0 X GPUVM64
+ *
+ * In general the hardware will ignore the PTR32 bit and treat
+ * as “0” whenever DATA_ATC = “0”, but sw should set PTR32=0
+ * when DATA_ATC=0.
+ *
+ * The DATA_ATC bit is only set for compute dispatches.
+ * All “Draw” dispatches are hardcoded to GPUVM64 mode
+ * for FLAT_* / S_LOAD operations.
+ */
+
+#define MAKE_GPUVM_APP_BASE(gpu_num) \
+ (((uint64_t)(gpu_num) << 61) + 0x1000000000000L)
+
+#define MAKE_GPUVM_APP_LIMIT(base) \
+ (((uint64_t)(base) & \
+ 0xFFFFFF0000000000UL) | 0xFFFFFFFFFFL)
+
+#define MAKE_SCRATCH_APP_BASE(gpu_num) \
+ (((uint64_t)(gpu_num) << 61) + 0x100000000L)
+
+#define MAKE_SCRATCH_APP_LIMIT(base) \
+ (((uint64_t)base & 0xFFFFFFFF00000000UL) | 0xFFFFFFFF)
+
+#define MAKE_LDS_APP_BASE(gpu_num) \
+ (((uint64_t)(gpu_num) << 61) + 0x0)
+#define MAKE_LDS_APP_LIMIT(base) \
+ (((uint64_t)(base) & 0xFFFFFFFF00000000UL) | 0xFFFFFFFF)
+
+int kfd_init_apertures(struct kfd_process *process)
+{
+ uint8_t id = 0;
+ struct kfd_dev *dev;
+ struct kfd_process_device *pdd;
+
+ mutex_lock(&process->mutex);
+
+ /*Iterating over all devices*/
+ while ((dev = kfd_topology_enum_kfd_devices(id)) != NULL &&
+ id < NUM_OF_SUPPORTED_GPUS) {
+
+ pdd = kfd_get_process_device_data(dev, process, 1);
+
+ /*
+ * For 64 bit process aperture will be statically reserved in
+ * the x86_64 non canonical process address space
+ * amdkfd doesn't currently support apertures for 32 bit process
+ */
+ if (process->is_32bit_user_mode) {
+ pdd->lds_base = pdd->lds_limit = 0;
+ pdd->gpuvm_base = pdd->gpuvm_limit = 0;
+ pdd->scratch_base = pdd->scratch_limit = 0;
+ } else {
+ /*
+ * node id couldn't be 0 - the three MSB bits of
+ * aperture shoudn't be 0
+ */
+ pdd->lds_base = MAKE_LDS_APP_BASE(id + 1);
+
+ pdd->lds_limit = MAKE_LDS_APP_LIMIT(pdd->lds_base);
+
+ pdd->gpuvm_base = MAKE_GPUVM_APP_BASE(id + 1);
+
+ pdd->gpuvm_limit =
+ MAKE_GPUVM_APP_LIMIT(pdd->gpuvm_base);
+
+ pdd->scratch_base = MAKE_SCRATCH_APP_BASE(id + 1);
+
+ pdd->scratch_limit =
+ MAKE_SCRATCH_APP_LIMIT(pdd->scratch_base);
+ }
+
+ dev_dbg(kfd_device, "node id %u\n", id);
+ dev_dbg(kfd_device, "gpu id %u\n", pdd->dev->id);
+ dev_dbg(kfd_device, "lds_base %llX\n", pdd->lds_base);
+ dev_dbg(kfd_device, "lds_limit %llX\n", pdd->lds_limit);
+ dev_dbg(kfd_device, "gpuvm_base %llX\n", pdd->gpuvm_base);
+ dev_dbg(kfd_device, "gpuvm_limit %llX\n", pdd->gpuvm_limit);
+ dev_dbg(kfd_device, "scratch_base %llX\n", pdd->scratch_base);
+ dev_dbg(kfd_device, "scratch_limit %llX\n", pdd->scratch_limit);
+
+ id++;
+ }
+
+ mutex_unlock(&process->mutex);
+
+ return 0;
+}
+
+
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
new file mode 100644
index 00000000000..5b999095a1f
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
@@ -0,0 +1,176 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * KFD Interrupts.
+ *
+ * AMD GPUs deliver interrupts by pushing an interrupt description onto the
+ * interrupt ring and then sending an interrupt. KGD receives the interrupt
+ * in ISR and sends us a pointer to each new entry on the interrupt ring.
+ *
+ * We generally can't process interrupt-signaled events from ISR, so we call
+ * out to each interrupt client module (currently only the scheduler) to ask if
+ * each interrupt is interesting. If they return true, then it requires further
+ * processing so we copy it to an internal interrupt ring and call each
+ * interrupt client again from a work-queue.
+ *
+ * There's no acknowledgment for the interrupts we use. The hardware simply
+ * queues a new interrupt each time without waiting.
+ *
+ * The fixed-size internal queue means that it's possible for us to lose
+ * interrupts because we have no back-pressure to the hardware.
+ */
+
+#include <linux/slab.h>
+#include <linux/device.h>
+#include "kfd_priv.h"
+
+#define KFD_INTERRUPT_RING_SIZE 256
+
+static void interrupt_wq(struct work_struct *);
+
+int kfd_interrupt_init(struct kfd_dev *kfd)
+{
+ void *interrupt_ring = kmalloc_array(KFD_INTERRUPT_RING_SIZE,
+ kfd->device_info->ih_ring_entry_size,
+ GFP_KERNEL);
+ if (!interrupt_ring)
+ return -ENOMEM;
+
+ kfd->interrupt_ring = interrupt_ring;
+ kfd->interrupt_ring_size =
+ KFD_INTERRUPT_RING_SIZE * kfd->device_info->ih_ring_entry_size;
+ atomic_set(&kfd->interrupt_ring_wptr, 0);
+ atomic_set(&kfd->interrupt_ring_rptr, 0);
+
+ spin_lock_init(&kfd->interrupt_lock);
+
+ INIT_WORK(&kfd->interrupt_work, interrupt_wq);
+
+ kfd->interrupts_active = true;
+
+ /*
+ * After this function returns, the interrupt will be enabled. This
+ * barrier ensures that the interrupt running on a different processor
+ * sees all the above writes.
+ */
+ smp_wmb();
+
+ return 0;
+}
+
+void kfd_interrupt_exit(struct kfd_dev *kfd)
+{
+ /*
+ * Stop the interrupt handler from writing to the ring and scheduling
+ * workqueue items. The spinlock ensures that any interrupt running
+ * after we have unlocked sees interrupts_active = false.
+ */
+ unsigned long flags;
+
+ spin_lock_irqsave(&kfd->interrupt_lock, flags);
+ kfd->interrupts_active = false;
+ spin_unlock_irqrestore(&kfd->interrupt_lock, flags);
+
+ /*
+ * Flush_scheduled_work ensures that there are no outstanding
+ * work-queue items that will access interrupt_ring. New work items
+ * can't be created because we stopped interrupt handling above.
+ */
+ flush_scheduled_work();
+
+ kfree(kfd->interrupt_ring);
+}
+
+/*
+ * This assumes that it can't be called concurrently with itself
+ * but only with dequeue_ih_ring_entry.
+ */
+bool enqueue_ih_ring_entry(struct kfd_dev *kfd, const void *ih_ring_entry)
+{
+ unsigned int rptr = atomic_read(&kfd->interrupt_ring_rptr);
+ unsigned int wptr = atomic_read(&kfd->interrupt_ring_wptr);
+
+ if ((rptr - wptr) % kfd->interrupt_ring_size ==
+ kfd->device_info->ih_ring_entry_size) {
+ /* This is very bad, the system is likely to hang. */
+ dev_err_ratelimited(kfd_chardev(),
+ "Interrupt ring overflow, dropping interrupt.\n");
+ return false;
+ }
+
+ memcpy(kfd->interrupt_ring + wptr, ih_ring_entry,
+ kfd->device_info->ih_ring_entry_size);
+
+ wptr = (wptr + kfd->device_info->ih_ring_entry_size) %
+ kfd->interrupt_ring_size;
+ smp_wmb(); /* Ensure memcpy'd data is visible before wptr update. */
+ atomic_set(&kfd->interrupt_ring_wptr, wptr);
+
+ return true;
+}
+
+/*
+ * This assumes that it can't be called concurrently with itself
+ * but only with enqueue_ih_ring_entry.
+ */
+static bool dequeue_ih_ring_entry(struct kfd_dev *kfd, void *ih_ring_entry)
+{
+ /*
+ * Assume that wait queues have an implicit barrier, i.e. anything that
+ * happened in the ISR before it queued work is visible.
+ */
+
+ unsigned int wptr = atomic_read(&kfd->interrupt_ring_wptr);
+ unsigned int rptr = atomic_read(&kfd->interrupt_ring_rptr);
+
+ if (rptr == wptr)
+ return false;
+
+ memcpy(ih_ring_entry, kfd->interrupt_ring + rptr,
+ kfd->device_info->ih_ring_entry_size);
+
+ rptr = (rptr + kfd->device_info->ih_ring_entry_size) %
+ kfd->interrupt_ring_size;
+
+ /*
+ * Ensure the rptr write update is not visible until
+ * memcpy has finished reading.
+ */
+ smp_mb();
+ atomic_set(&kfd->interrupt_ring_rptr, rptr);
+
+ return true;
+}
+
+static void interrupt_wq(struct work_struct *work)
+{
+ struct kfd_dev *dev = container_of(work, struct kfd_dev,
+ interrupt_work);
+
+ uint32_t ih_ring_entry[DIV_ROUND_UP(
+ dev->device_info->ih_ring_entry_size,
+ sizeof(uint32_t))];
+
+ while (dequeue_ih_ring_entry(dev, ih_ring_entry))
+ ;
+}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
new file mode 100644
index 00000000000..93507141072
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
@@ -0,0 +1,353 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/printk.h>
+#include <linux/sched.h>
+#include "kfd_kernel_queue.h"
+#include "kfd_priv.h"
+#include "kfd_device_queue_manager.h"
+#include "kfd_pm4_headers.h"
+#include "kfd_pm4_opcodes.h"
+
+#define PM4_COUNT_ZERO (((1 << 15) - 1) << 16)
+
+static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
+ enum kfd_queue_type type, unsigned int queue_size)
+{
+ struct queue_properties prop;
+ int retval;
+ union PM4_MES_TYPE_3_HEADER nop;
+
+ BUG_ON(!kq || !dev);
+ BUG_ON(type != KFD_QUEUE_TYPE_DIQ && type != KFD_QUEUE_TYPE_HIQ);
+
+ pr_debug("kfd: In func %s initializing queue type %d size %d\n",
+ __func__, KFD_QUEUE_TYPE_HIQ, queue_size);
+
+ nop.opcode = IT_NOP;
+ nop.type = PM4_TYPE_3;
+ nop.u32all |= PM4_COUNT_ZERO;
+
+ kq->dev = dev;
+ kq->nop_packet = nop.u32all;
+ switch (type) {
+ case KFD_QUEUE_TYPE_DIQ:
+ case KFD_QUEUE_TYPE_HIQ:
+ kq->mqd = dev->dqm->get_mqd_manager(dev->dqm,
+ KFD_MQD_TYPE_CIK_HIQ);
+ break;
+ default:
+ BUG();
+ break;
+ }
+
+ if (kq->mqd == NULL)
+ return false;
+
+ prop.doorbell_ptr = kfd_get_kernel_doorbell(dev, &prop.doorbell_off);
+
+ if (prop.doorbell_ptr == NULL)
+ goto err_get_kernel_doorbell;
+
+ retval = kfd2kgd->allocate_mem(dev->kgd,
+ queue_size,
+ PAGE_SIZE,
+ KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
+ (struct kgd_mem **) &kq->pq);
+
+ if (retval != 0)
+ goto err_pq_allocate_vidmem;
+
+ kq->pq_kernel_addr = kq->pq->cpu_ptr;
+ kq->pq_gpu_addr = kq->pq->gpu_addr;
+
+ retval = kfd2kgd->allocate_mem(dev->kgd,
+ sizeof(*kq->rptr_kernel),
+ 32,
+ KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
+ (struct kgd_mem **) &kq->rptr_mem);
+
+ if (retval != 0)
+ goto err_rptr_allocate_vidmem;
+
+ kq->rptr_kernel = kq->rptr_mem->cpu_ptr;
+ kq->rptr_gpu_addr = kq->rptr_mem->gpu_addr;
+
+ retval = kfd2kgd->allocate_mem(dev->kgd,
+ sizeof(*kq->wptr_kernel),
+ 32,
+ KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
+ (struct kgd_mem **) &kq->wptr_mem);
+
+ if (retval != 0)
+ goto err_wptr_allocate_vidmem;
+
+ kq->wptr_kernel = kq->wptr_mem->cpu_ptr;
+ kq->wptr_gpu_addr = kq->wptr_mem->gpu_addr;
+
+ memset(kq->pq_kernel_addr, 0, queue_size);
+ memset(kq->rptr_kernel, 0, sizeof(*kq->rptr_kernel));
+ memset(kq->wptr_kernel, 0, sizeof(*kq->wptr_kernel));
+
+ prop.queue_size = queue_size;
+ prop.is_interop = false;
+ prop.priority = 1;
+ prop.queue_percent = 100;
+ prop.type = type;
+ prop.vmid = 0;
+ prop.queue_address = kq->pq_gpu_addr;
+ prop.read_ptr = (uint32_t *) kq->rptr_gpu_addr;
+ prop.write_ptr = (uint32_t *) kq->wptr_gpu_addr;
+
+ if (init_queue(&kq->queue, prop) != 0)
+ goto err_init_queue;
+
+ kq->queue->device = dev;
+ kq->queue->process = kfd_get_process(current);
+
+ retval = kq->mqd->init_mqd(kq->mqd, &kq->queue->mqd,
+ &kq->queue->mqd_mem_obj,
+ &kq->queue->gart_mqd_addr,
+ &kq->queue->properties);
+ if (retval != 0)
+ goto err_init_mqd;
+
+ /* assign HIQ to HQD */
+ if (type == KFD_QUEUE_TYPE_HIQ) {
+ pr_debug("assigning hiq to hqd\n");
+ kq->queue->pipe = KFD_CIK_HIQ_PIPE;
+ kq->queue->queue = KFD_CIK_HIQ_QUEUE;
+ kq->mqd->load_mqd(kq->mqd, kq->queue->mqd, kq->queue->pipe,
+ kq->queue->queue, NULL);
+ } else {
+ /* allocate fence for DIQ */
+
+ retval = kfd2kgd->allocate_mem(dev->kgd,
+ sizeof(uint32_t),
+ 32,
+ KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
+ (struct kgd_mem **) &kq->fence_mem_obj);
+
+ if (retval != 0)
+ goto err_alloc_fence;
+
+ kq->fence_kernel_address = kq->fence_mem_obj->cpu_ptr;
+ kq->fence_gpu_addr = kq->fence_mem_obj->gpu_addr;
+ }
+
+ print_queue(kq->queue);
+
+ return true;
+err_alloc_fence:
+err_init_mqd:
+ uninit_queue(kq->queue);
+err_init_queue:
+ kfd2kgd->free_mem(dev->kgd, (struct kgd_mem *) kq->wptr_mem);
+err_wptr_allocate_vidmem:
+ kfd2kgd->free_mem(dev->kgd, (struct kgd_mem *) kq->rptr_mem);
+err_rptr_allocate_vidmem:
+ kfd2kgd->free_mem(dev->kgd, (struct kgd_mem *) kq->pq);
+err_pq_allocate_vidmem:
+ pr_err("kfd: error init pq\n");
+ kfd_release_kernel_doorbell(dev, prop.doorbell_ptr);
+err_get_kernel_doorbell:
+ pr_err("kfd: error init doorbell");
+ return false;
+
+}
+
+static void uninitialize(struct kernel_queue *kq)
+{
+ BUG_ON(!kq);
+
+ if (kq->queue->properties.type == KFD_QUEUE_TYPE_HIQ)
+ kq->mqd->destroy_mqd(kq->mqd,
+ NULL,
+ false,
+ QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS,
+ kq->queue->pipe,
+ kq->queue->queue);
+
+ kfd2kgd->free_mem(kq->dev->kgd, (struct kgd_mem *) kq->rptr_mem);
+ kfd2kgd->free_mem(kq->dev->kgd, (struct kgd_mem *) kq->wptr_mem);
+ kfd2kgd->free_mem(kq->dev->kgd, (struct kgd_mem *) kq->pq);
+ kfd_release_kernel_doorbell(kq->dev,
+ kq->queue->properties.doorbell_ptr);
+ uninit_queue(kq->queue);
+}
+
+static int acquire_packet_buffer(struct kernel_queue *kq,
+ size_t packet_size_in_dwords, unsigned int **buffer_ptr)
+{
+ size_t available_size;
+ size_t queue_size_dwords;
+ uint32_t wptr, rptr;
+ unsigned int *queue_address;
+
+ BUG_ON(!kq || !buffer_ptr);
+
+ rptr = *kq->rptr_kernel;
+ wptr = *kq->wptr_kernel;
+ queue_address = (unsigned int *)kq->pq_kernel_addr;
+ queue_size_dwords = kq->queue->properties.queue_size / sizeof(uint32_t);
+
+ pr_debug("kfd: In func %s\nrptr: %d\nwptr: %d\nqueue_address 0x%p\n",
+ __func__, rptr, wptr, queue_address);
+
+ available_size = (rptr - 1 - wptr + queue_size_dwords) %
+ queue_size_dwords;
+
+ if (packet_size_in_dwords >= queue_size_dwords ||
+ packet_size_in_dwords >= available_size) {
+ /*
+ * make sure calling functions know
+ * acquire_packet_buffer() failed
+ */
+ *buffer_ptr = NULL;
+ return -ENOMEM;
+ }
+
+ if (wptr + packet_size_in_dwords >= queue_size_dwords) {
+ while (wptr > 0) {
+ queue_address[wptr] = kq->nop_packet;
+ wptr = (wptr + 1) % queue_size_dwords;
+ }
+ }
+
+ *buffer_ptr = &queue_address[wptr];
+ kq->pending_wptr = wptr + packet_size_in_dwords;
+
+ return 0;
+}
+
+static void submit_packet(struct kernel_queue *kq)
+{
+#ifdef DEBUG
+ int i;
+#endif
+
+ BUG_ON(!kq);
+
+#ifdef DEBUG
+ for (i = *kq->wptr_kernel; i < kq->pending_wptr; i++) {
+ pr_debug("0x%2X ", kq->pq_kernel_addr[i]);
+ if (i % 15 == 0)
+ pr_debug("\n");
+ }
+ pr_debug("\n");
+#endif
+
+ *kq->wptr_kernel = kq->pending_wptr;
+ write_kernel_doorbell(kq->queue->properties.doorbell_ptr,
+ kq->pending_wptr);
+}
+
+static int sync_with_hw(struct kernel_queue *kq, unsigned long timeout_ms)
+{
+ unsigned long org_timeout_ms;
+
+ BUG_ON(!kq);
+
+ org_timeout_ms = timeout_ms;
+ timeout_ms += jiffies * 1000 / HZ;
+ while (*kq->wptr_kernel != *kq->rptr_kernel) {
+ if (time_after(jiffies * 1000 / HZ, timeout_ms)) {
+ pr_err("kfd: kernel_queue %s timeout expired %lu\n",
+ __func__, org_timeout_ms);
+ pr_err("kfd: wptr: %d rptr: %d\n",
+ *kq->wptr_kernel, *kq->rptr_kernel);
+ return -ETIME;
+ }
+ schedule();
+ }
+
+ return 0;
+}
+
+static void rollback_packet(struct kernel_queue *kq)
+{
+ BUG_ON(!kq);
+ kq->pending_wptr = *kq->queue->properties.write_ptr;
+}
+
+struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
+ enum kfd_queue_type type)
+{
+ struct kernel_queue *kq;
+
+ BUG_ON(!dev);
+
+ kq = kzalloc(sizeof(struct kernel_queue), GFP_KERNEL);
+ if (!kq)
+ return NULL;
+
+ kq->initialize = initialize;
+ kq->uninitialize = uninitialize;
+ kq->acquire_packet_buffer = acquire_packet_buffer;
+ kq->submit_packet = submit_packet;
+ kq->sync_with_hw = sync_with_hw;
+ kq->rollback_packet = rollback_packet;
+
+ if (kq->initialize(kq, dev, type, KFD_KERNEL_QUEUE_SIZE) == false) {
+ pr_err("kfd: failed to init kernel queue\n");
+ kfree(kq);
+ return NULL;
+ }
+ return kq;
+}
+
+void kernel_queue_uninit(struct kernel_queue *kq)
+{
+ BUG_ON(!kq);
+
+ kq->uninitialize(kq);
+ kfree(kq);
+}
+
+static __attribute__((unused)) void test_kq(struct kfd_dev *dev)
+{
+ struct kernel_queue *kq;
+ uint32_t *buffer, i;
+ int retval;
+
+ BUG_ON(!dev);
+
+ pr_debug("kfd: starting kernel queue test\n");
+
+ kq = kernel_queue_init(dev, KFD_QUEUE_TYPE_HIQ);
+ BUG_ON(!kq);
+
+ retval = kq->acquire_packet_buffer(kq, 5, &buffer);
+ BUG_ON(retval != 0);
+ for (i = 0; i < 5; i++)
+ buffer[i] = kq->nop_packet;
+ kq->submit_packet(kq);
+ kq->sync_with_hw(kq, 1000);
+
+ pr_debug("kfd: ending kernel queue test\n");
+}
+
+
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
new file mode 100644
index 00000000000..dcd2bdb68d4
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef KFD_KERNEL_QUEUE_H_
+#define KFD_KERNEL_QUEUE_H_
+
+#include <linux/list.h>
+#include <linux/types.h>
+#include "kfd_priv.h"
+
+struct kernel_queue {
+ /* interface */
+ bool (*initialize)(struct kernel_queue *kq, struct kfd_dev *dev,
+ enum kfd_queue_type type, unsigned int queue_size);
+ void (*uninitialize)(struct kernel_queue *kq);
+ int (*acquire_packet_buffer)(struct kernel_queue *kq,
+ size_t packet_size_in_dwords,
+ unsigned int **buffer_ptr);
+
+ void (*submit_packet)(struct kernel_queue *kq);
+ int (*sync_with_hw)(struct kernel_queue *kq,
+ unsigned long timeout_ms);
+ void (*rollback_packet)(struct kernel_queue *kq);
+
+ /* data */
+ struct kfd_dev *dev;
+ struct mqd_manager *mqd;
+ struct queue *queue;
+ uint32_t pending_wptr;
+ unsigned int nop_packet;
+
+ struct kfd_mem_obj *rptr_mem;
+ uint32_t *rptr_kernel;
+ uint64_t rptr_gpu_addr;
+ struct kfd_mem_obj *wptr_mem;
+ uint32_t *wptr_kernel;
+ uint64_t wptr_gpu_addr;
+ struct kfd_mem_obj *pq;
+ uint64_t pq_gpu_addr;
+ uint32_t *pq_kernel_addr;
+
+ struct kfd_mem_obj *fence_mem_obj;
+ uint64_t fence_gpu_addr;
+ void *fence_kernel_address;
+
+ struct list_head list;
+};
+
+#endif /* KFD_KERNEL_QUEUE_H_ */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
new file mode 100644
index 00000000000..95d5af138e6
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
@@ -0,0 +1,159 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/moduleparam.h>
+#include <linux/device.h>
+#include "kfd_priv.h"
+
+#define KFD_DRIVER_AUTHOR "AMD Inc. and others"
+
+#define KFD_DRIVER_DESC "Standalone HSA driver for AMD's GPUs"
+#define KFD_DRIVER_DATE "20141113"
+#define KFD_DRIVER_MAJOR 0
+#define KFD_DRIVER_MINOR 7
+#define KFD_DRIVER_PATCHLEVEL 0
+
+const struct kfd2kgd_calls *kfd2kgd;
+static const struct kgd2kfd_calls kgd2kfd = {
+ .exit = kgd2kfd_exit,
+ .probe = kgd2kfd_probe,
+ .device_init = kgd2kfd_device_init,
+ .device_exit = kgd2kfd_device_exit,
+ .interrupt = kgd2kfd_interrupt,
+ .suspend = kgd2kfd_suspend,
+ .resume = kgd2kfd_resume,
+};
+
+int sched_policy = KFD_SCHED_POLICY_HWS;
+module_param(sched_policy, int, 0444);
+MODULE_PARM_DESC(sched_policy,
+ "Kernel cmdline parameter that defines the amdkfd scheduling policy");
+
+int max_num_of_processes = KFD_MAX_NUM_OF_PROCESSES_DEFAULT;
+module_param(max_num_of_processes, int, 0444);
+MODULE_PARM_DESC(max_num_of_processes,
+ "Kernel cmdline parameter that defines the amdkfd maximum number of supported processes");
+
+int max_num_of_queues_per_process = KFD_MAX_NUM_OF_QUEUES_PER_PROCESS_DEFAULT;
+module_param(max_num_of_queues_per_process, int, 0444);
+MODULE_PARM_DESC(max_num_of_queues_per_process,
+ "Kernel cmdline parameter that defines the amdkfd maximum number of supported queues per process");
+
+bool kgd2kfd_init(unsigned interface_version,
+ const struct kfd2kgd_calls *f2g,
+ const struct kgd2kfd_calls **g2f)
+{
+ /*
+ * Only one interface version is supported,
+ * no kfd/kgd version skew allowed.
+ */
+ if (interface_version != KFD_INTERFACE_VERSION)
+ return false;
+
+ /* Protection against multiple amd kgd loads */
+ if (kfd2kgd)
+ return true;
+
+ kfd2kgd = f2g;
+ *g2f = &kgd2kfd;
+
+ return true;
+}
+EXPORT_SYMBOL(kgd2kfd_init);
+
+void kgd2kfd_exit(void)
+{
+}
+
+static int __init kfd_module_init(void)
+{
+ int err;
+
+ kfd2kgd = NULL;
+
+ /* Verify module parameters */
+ if ((sched_policy < KFD_SCHED_POLICY_HWS) ||
+ (sched_policy > KFD_SCHED_POLICY_NO_HWS)) {
+ pr_err("kfd: sched_policy has invalid value\n");
+ return -1;
+ }
+
+ /* Verify module parameters */
+ if ((max_num_of_processes < 0) ||
+ (max_num_of_processes > KFD_MAX_NUM_OF_PROCESSES)) {
+ pr_err("kfd: max_num_of_processes must be between 0 to KFD_MAX_NUM_OF_PROCESSES\n");
+ return -1;
+ }
+
+ if ((max_num_of_queues_per_process < 0) ||
+ (max_num_of_queues_per_process >
+ KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)) {
+ pr_err("kfd: max_num_of_queues_per_process must be between 0 to KFD_MAX_NUM_OF_QUEUES_PER_PROCESS\n");
+ return -1;
+ }
+
+ err = kfd_pasid_init();
+ if (err < 0)
+ goto err_pasid;
+
+ err = kfd_chardev_init();
+ if (err < 0)
+ goto err_ioctl;
+
+ err = kfd_topology_init();
+ if (err < 0)
+ goto err_topology;
+
+ kfd_process_create_wq();
+
+ dev_info(kfd_device, "Initialized module\n");
+
+ return 0;
+
+err_topology:
+ kfd_chardev_exit();
+err_ioctl:
+ kfd_pasid_exit();
+err_pasid:
+ return err;
+}
+
+static void __exit kfd_module_exit(void)
+{
+ kfd_process_destroy_wq();
+ kfd_topology_shutdown();
+ kfd_chardev_exit();
+ kfd_pasid_exit();
+ dev_info(kfd_device, "Removed module\n");
+}
+
+module_init(kfd_module_init);
+module_exit(kfd_module_exit);
+
+MODULE_AUTHOR(KFD_DRIVER_AUTHOR);
+MODULE_DESCRIPTION(KFD_DRIVER_DESC);
+MODULE_LICENSE("GPL and additional rights");
+MODULE_VERSION(__stringify(KFD_DRIVER_MAJOR) "."
+ __stringify(KFD_DRIVER_MINOR) "."
+ __stringify(KFD_DRIVER_PATCHLEVEL));
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
new file mode 100644
index 00000000000..adc31474e78
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
@@ -0,0 +1,346 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/printk.h>
+#include <linux/slab.h>
+#include "kfd_priv.h"
+#include "kfd_mqd_manager.h"
+#include "cik_regs.h"
+#include "../../radeon/cik_reg.h"
+
+inline void busy_wait(unsigned long ms)
+{
+ while (time_before(jiffies, ms))
+ cpu_relax();
+}
+
+static inline struct cik_mqd *get_mqd(void *mqd)
+{
+ return (struct cik_mqd *)mqd;
+}
+
+static int init_mqd(struct mqd_manager *mm, void **mqd,
+ struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr,
+ struct queue_properties *q)
+{
+ uint64_t addr;
+ struct cik_mqd *m;
+ int retval;
+
+ BUG_ON(!mm || !q || !mqd);
+
+ pr_debug("kfd: In func %s\n", __func__);
+
+ retval = kfd2kgd->allocate_mem(mm->dev->kgd,
+ sizeof(struct cik_mqd),
+ 256,
+ KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
+ (struct kgd_mem **) mqd_mem_obj);
+
+ if (retval != 0)
+ return -ENOMEM;
+
+ m = (struct cik_mqd *) (*mqd_mem_obj)->cpu_ptr;
+ addr = (*mqd_mem_obj)->gpu_addr;
+
+ memset(m, 0, ALIGN(sizeof(struct cik_mqd), 256));
+
+ m->header = 0xC0310800;
+ m->compute_pipelinestat_enable = 1;
+ m->compute_static_thread_mgmt_se0 = 0xFFFFFFFF;
+ m->compute_static_thread_mgmt_se1 = 0xFFFFFFFF;
+ m->compute_static_thread_mgmt_se2 = 0xFFFFFFFF;
+ m->compute_static_thread_mgmt_se3 = 0xFFFFFFFF;
+
+ /*
+ * Make sure to use the last queue state saved on mqd when the cp
+ * reassigns the queue, so when queue is switched on/off (e.g over
+ * subscription or quantum timeout) the context will be consistent
+ */
+ m->cp_hqd_persistent_state =
+ DEFAULT_CP_HQD_PERSISTENT_STATE | PRELOAD_REQ;
+
+ m->cp_mqd_control = MQD_CONTROL_PRIV_STATE_EN;
+ m->cp_mqd_base_addr_lo = lower_32_bits(addr);
+ m->cp_mqd_base_addr_hi = upper_32_bits(addr);
+
+ m->cp_hqd_ib_control = DEFAULT_MIN_IB_AVAIL_SIZE | IB_ATC_EN;
+ /* Although WinKFD writes this, I suspect it should not be necessary */
+ m->cp_hqd_ib_control = IB_ATC_EN | DEFAULT_MIN_IB_AVAIL_SIZE;
+
+ m->cp_hqd_quantum = QUANTUM_EN | QUANTUM_SCALE_1MS |
+ QUANTUM_DURATION(10);
+
+ /*
+ * Pipe Priority
+ * Identifies the pipe relative priority when this queue is connected
+ * to the pipeline. The pipe priority is against the GFX pipe and HP3D.
+ * In KFD we are using a fixed pipe priority set to CS_MEDIUM.
+ * 0 = CS_LOW (typically below GFX)
+ * 1 = CS_MEDIUM (typically between HP3D and GFX
+ * 2 = CS_HIGH (typically above HP3D)
+ */
+ m->cp_hqd_pipe_priority = 1;
+ m->cp_hqd_queue_priority = 15;
+
+ *mqd = m;
+ if (gart_addr != NULL)
+ *gart_addr = addr;
+ retval = mm->update_mqd(mm, m, q);
+
+ return retval;
+}
+
+static void uninit_mqd(struct mqd_manager *mm, void *mqd,
+ struct kfd_mem_obj *mqd_mem_obj)
+{
+ BUG_ON(!mm || !mqd);
+ kfd2kgd->free_mem(mm->dev->kgd, (struct kgd_mem *) mqd_mem_obj);
+}
+
+static int load_mqd(struct mqd_manager *mm, void *mqd, uint32_t pipe_id,
+ uint32_t queue_id, uint32_t __user *wptr)
+{
+ return kfd2kgd->hqd_load(mm->dev->kgd, mqd, pipe_id, queue_id, wptr);
+
+}
+
+static int update_mqd(struct mqd_manager *mm, void *mqd,
+ struct queue_properties *q)
+{
+ struct cik_mqd *m;
+
+ BUG_ON(!mm || !q || !mqd);
+
+ pr_debug("kfd: In func %s\n", __func__);
+
+ m = get_mqd(mqd);
+ m->cp_hqd_pq_control = DEFAULT_RPTR_BLOCK_SIZE |
+ DEFAULT_MIN_AVAIL_SIZE | PQ_ATC_EN;
+
+ /*
+ * Calculating queue size which is log base 2 of actual queue size -1
+ * dwords and another -1 for ffs
+ */
+ m->cp_hqd_pq_control |= ffs(q->queue_size / sizeof(unsigned int))
+ - 1 - 1;
+ m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
+ m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8);
+ m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
+ m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
+ m->cp_hqd_pq_doorbell_control = DOORBELL_EN |
+ DOORBELL_OFFSET(q->doorbell_off);
+
+ m->cp_hqd_vmid = q->vmid;
+
+ if (q->format == KFD_QUEUE_FORMAT_AQL) {
+ m->cp_hqd_iq_rptr = AQL_ENABLE;
+ m->cp_hqd_pq_control |= NO_UPDATE_RPTR;
+ }
+
+ m->cp_hqd_active = 0;
+ q->is_active = false;
+ if (q->queue_size > 0 &&
+ q->queue_address != 0 &&
+ q->queue_percent > 0) {
+ m->cp_hqd_active = 1;
+ q->is_active = true;
+ }
+
+ return 0;
+}
+
+static int destroy_mqd(struct mqd_manager *mm, void *mqd,
+ enum kfd_preempt_type type,
+ unsigned int timeout, uint32_t pipe_id,
+ uint32_t queue_id)
+{
+ return kfd2kgd->hqd_destroy(mm->dev->kgd, type, timeout,
+ pipe_id, queue_id);
+}
+
+static bool is_occupied(struct mqd_manager *mm, void *mqd,
+ uint64_t queue_address, uint32_t pipe_id,
+ uint32_t queue_id)
+{
+
+ return kfd2kgd->hqd_is_occupies(mm->dev->kgd, queue_address,
+ pipe_id, queue_id);
+
+}
+
+/*
+ * HIQ MQD Implementation, concrete implementation for HIQ MQD implementation.
+ * The HIQ queue in Kaveri is using the same MQD structure as all the user mode
+ * queues but with different initial values.
+ */
+
+static int init_mqd_hiq(struct mqd_manager *mm, void **mqd,
+ struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr,
+ struct queue_properties *q)
+{
+ uint64_t addr;
+ struct cik_mqd *m;
+ int retval;
+
+ BUG_ON(!mm || !q || !mqd || !mqd_mem_obj);
+
+ pr_debug("kfd: In func %s\n", __func__);
+
+ retval = kfd2kgd->allocate_mem(mm->dev->kgd,
+ sizeof(struct cik_mqd),
+ 256,
+ KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
+ (struct kgd_mem **) mqd_mem_obj);
+
+ if (retval != 0)
+ return -ENOMEM;
+
+ m = (struct cik_mqd *) (*mqd_mem_obj)->cpu_ptr;
+ addr = (*mqd_mem_obj)->gpu_addr;
+
+ memset(m, 0, ALIGN(sizeof(struct cik_mqd), 256));
+
+ m->header = 0xC0310800;
+ m->compute_pipelinestat_enable = 1;
+ m->compute_static_thread_mgmt_se0 = 0xFFFFFFFF;
+ m->compute_static_thread_mgmt_se1 = 0xFFFFFFFF;
+ m->compute_static_thread_mgmt_se2 = 0xFFFFFFFF;
+ m->compute_static_thread_mgmt_se3 = 0xFFFFFFFF;
+
+ m->cp_hqd_persistent_state = DEFAULT_CP_HQD_PERSISTENT_STATE |
+ PRELOAD_REQ;
+ m->cp_hqd_quantum = QUANTUM_EN | QUANTUM_SCALE_1MS |
+ QUANTUM_DURATION(10);
+
+ m->cp_mqd_control = MQD_CONTROL_PRIV_STATE_EN;
+ m->cp_mqd_base_addr_lo = lower_32_bits(addr);
+ m->cp_mqd_base_addr_hi = upper_32_bits(addr);
+
+ m->cp_hqd_ib_control = DEFAULT_MIN_IB_AVAIL_SIZE;
+
+ /*
+ * Pipe Priority
+ * Identifies the pipe relative priority when this queue is connected
+ * to the pipeline. The pipe priority is against the GFX pipe and HP3D.
+ * In KFD we are using a fixed pipe priority set to CS_MEDIUM.
+ * 0 = CS_LOW (typically below GFX)
+ * 1 = CS_MEDIUM (typically between HP3D and GFX
+ * 2 = CS_HIGH (typically above HP3D)
+ */
+ m->cp_hqd_pipe_priority = 1;
+ m->cp_hqd_queue_priority = 15;
+
+ *mqd = m;
+ if (gart_addr)
+ *gart_addr = addr;
+ retval = mm->update_mqd(mm, m, q);
+
+ return retval;
+}
+
+static int update_mqd_hiq(struct mqd_manager *mm, void *mqd,
+ struct queue_properties *q)
+{
+ struct cik_mqd *m;
+
+ BUG_ON(!mm || !q || !mqd);
+
+ pr_debug("kfd: In func %s\n", __func__);
+
+ m = get_mqd(mqd);
+ m->cp_hqd_pq_control = DEFAULT_RPTR_BLOCK_SIZE |
+ DEFAULT_MIN_AVAIL_SIZE |
+ PRIV_STATE |
+ KMD_QUEUE;
+
+ /*
+ * Calculating queue size which is log base 2 of actual queue
+ * size -1 dwords
+ */
+ m->cp_hqd_pq_control |= ffs(q->queue_size / sizeof(unsigned int))
+ - 1 - 1;
+ m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
+ m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8);
+ m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
+ m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
+ m->cp_hqd_pq_doorbell_control = DOORBELL_EN |
+ DOORBELL_OFFSET(q->doorbell_off);
+
+ m->cp_hqd_vmid = q->vmid;
+
+ m->cp_hqd_active = 0;
+ q->is_active = false;
+ if (q->queue_size > 0 &&
+ q->queue_address != 0 &&
+ q->queue_percent > 0) {
+ m->cp_hqd_active = 1;
+ q->is_active = true;
+ }
+
+ return 0;
+}
+
+struct mqd_manager *mqd_manager_init(enum KFD_MQD_TYPE type,
+ struct kfd_dev *dev)
+{
+ struct mqd_manager *mqd;
+
+ BUG_ON(!dev);
+ BUG_ON(type >= KFD_MQD_TYPE_MAX);
+
+ pr_debug("kfd: In func %s\n", __func__);
+
+ mqd = kzalloc(sizeof(struct mqd_manager), GFP_KERNEL);
+ if (!mqd)
+ return NULL;
+
+ mqd->dev = dev;
+
+ switch (type) {
+ case KFD_MQD_TYPE_CIK_CP:
+ case KFD_MQD_TYPE_CIK_COMPUTE:
+ mqd->init_mqd = init_mqd;
+ mqd->uninit_mqd = uninit_mqd;
+ mqd->load_mqd = load_mqd;
+ mqd->update_mqd = update_mqd;
+ mqd->destroy_mqd = destroy_mqd;
+ mqd->is_occupied = is_occupied;
+ break;
+ case KFD_MQD_TYPE_CIK_HIQ:
+ mqd->init_mqd = init_mqd_hiq;
+ mqd->uninit_mqd = uninit_mqd;
+ mqd->load_mqd = load_mqd;
+ mqd->update_mqd = update_mqd_hiq;
+ mqd->destroy_mqd = destroy_mqd;
+ mqd->is_occupied = is_occupied;
+ break;
+ default:
+ kfree(mqd);
+ return NULL;
+ }
+
+ return mqd;
+}
+
+/* SDMA queues should be implemented here when the cp will supports them */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
new file mode 100644
index 00000000000..213a71e0b6c
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef KFD_MQD_MANAGER_H_
+#define KFD_MQD_MANAGER_H_
+
+#include "kfd_priv.h"
+
+/**
+ * struct mqd_manager
+ *
+ * @init_mqd: Allocates the mqd buffer on local gpu memory and initialize it.
+ *
+ * @load_mqd: Loads the mqd to a concrete hqd slot. Used only for no cp
+ * scheduling mode.
+ *
+ * @update_mqd: Handles a update call for the MQD
+ *
+ * @destroy_mqd: Destroys the HQD slot and by that preempt the relevant queue.
+ * Used only for no cp scheduling.
+ *
+ * @uninit_mqd: Releases the mqd buffer from local gpu memory.
+ *
+ * @is_occupied: Checks if the relevant HQD slot is occupied.
+ *
+ * @mqd_mutex: Mqd manager mutex.
+ *
+ * @dev: The kfd device structure coupled with this module.
+ *
+ * MQD stands for Memory Queue Descriptor which represents the current queue
+ * state in the memory and initiate the HQD (Hardware Queue Descriptor) state.
+ * This structure is actually a base class for the different types of MQDs
+ * structures for the variant ASICs that should be supported in the future.
+ * This base class is also contains all the MQD specific operations.
+ * Another important thing to mention is that each queue has a MQD that keeps
+ * his state (or context) after each preemption or reassignment.
+ * Basically there are a instances of the mqd manager class per MQD type per
+ * ASIC. Currently the kfd driver supports only Kaveri so there are instances
+ * per KFD_MQD_TYPE for each device.
+ *
+ */
+
+struct mqd_manager {
+ int (*init_mqd)(struct mqd_manager *mm, void **mqd,
+ struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr,
+ struct queue_properties *q);
+
+ int (*load_mqd)(struct mqd_manager *mm, void *mqd,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t __user *wptr);
+
+ int (*update_mqd)(struct mqd_manager *mm, void *mqd,
+ struct queue_properties *q);
+
+ int (*destroy_mqd)(struct mqd_manager *mm, void *mqd,
+ enum kfd_preempt_type type,
+ unsigned int timeout, uint32_t pipe_id,
+ uint32_t queue_id);
+
+ void (*uninit_mqd)(struct mqd_manager *mm, void *mqd,
+ struct kfd_mem_obj *mqd_mem_obj);
+
+ bool (*is_occupied)(struct mqd_manager *mm, void *mqd,
+ uint64_t queue_address, uint32_t pipe_id,
+ uint32_t queue_id);
+
+ struct mutex mqd_mutex;
+ struct kfd_dev *dev;
+};
+
+#endif /* KFD_MQD_MANAGER_H_ */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
new file mode 100644
index 00000000000..5ce9233d200
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
@@ -0,0 +1,565 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include "kfd_device_queue_manager.h"
+#include "kfd_kernel_queue.h"
+#include "kfd_priv.h"
+#include "kfd_pm4_headers.h"
+#include "kfd_pm4_opcodes.h"
+
+static inline void inc_wptr(unsigned int *wptr, unsigned int increment_bytes,
+ unsigned int buffer_size_bytes)
+{
+ unsigned int temp = *wptr + increment_bytes / sizeof(uint32_t);
+
+ BUG_ON((temp * sizeof(uint32_t)) > buffer_size_bytes);
+ *wptr = temp;
+}
+
+static unsigned int build_pm4_header(unsigned int opcode, size_t packet_size)
+{
+ union PM4_MES_TYPE_3_HEADER header;
+
+ header.u32all = 0;
+ header.opcode = opcode;
+ header.count = packet_size/sizeof(uint32_t) - 2;
+ header.type = PM4_TYPE_3;
+
+ return header.u32all;
+}
+
+static void pm_calc_rlib_size(struct packet_manager *pm,
+ unsigned int *rlib_size,
+ bool *over_subscription)
+{
+ unsigned int process_count, queue_count;
+
+ BUG_ON(!pm || !rlib_size || !over_subscription);
+
+ process_count = pm->dqm->processes_count;
+ queue_count = pm->dqm->queue_count;
+
+ /* check if there is over subscription*/
+ *over_subscription = false;
+ if ((process_count > 1) ||
+ queue_count > PIPE_PER_ME_CP_SCHEDULING * QUEUES_PER_PIPE) {
+ *over_subscription = true;
+ pr_debug("kfd: over subscribed runlist\n");
+ }
+
+ /* calculate run list ib allocation size */
+ *rlib_size = process_count * sizeof(struct pm4_map_process) +
+ queue_count * sizeof(struct pm4_map_queues);
+
+ /*
+ * Increase the allocation size in case we need a chained run list
+ * when over subscription
+ */
+ if (*over_subscription)
+ *rlib_size += sizeof(struct pm4_runlist);
+
+ pr_debug("kfd: runlist ib size %d\n", *rlib_size);
+}
+
+static int pm_allocate_runlist_ib(struct packet_manager *pm,
+ unsigned int **rl_buffer,
+ uint64_t *rl_gpu_buffer,
+ unsigned int *rl_buffer_size,
+ bool *is_over_subscription)
+{
+ int retval;
+
+ BUG_ON(!pm);
+ BUG_ON(pm->allocated == true);
+ BUG_ON(is_over_subscription == NULL);
+
+ pm_calc_rlib_size(pm, rl_buffer_size, is_over_subscription);
+
+ retval = kfd2kgd->allocate_mem(pm->dqm->dev->kgd,
+ *rl_buffer_size,
+ PAGE_SIZE,
+ KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
+ (struct kgd_mem **) &pm->ib_buffer_obj);
+
+ if (retval != 0) {
+ pr_err("kfd: failed to allocate runlist IB\n");
+ return retval;
+ }
+
+ *(void **)rl_buffer = pm->ib_buffer_obj->cpu_ptr;
+ *rl_gpu_buffer = pm->ib_buffer_obj->gpu_addr;
+
+ memset(*rl_buffer, 0, *rl_buffer_size);
+ pm->allocated = true;
+ return retval;
+}
+
+static int pm_create_runlist(struct packet_manager *pm, uint32_t *buffer,
+ uint64_t ib, size_t ib_size_in_dwords, bool chain)
+{
+ struct pm4_runlist *packet;
+
+ BUG_ON(!pm || !buffer || !ib);
+
+ packet = (struct pm4_runlist *)buffer;
+
+ memset(buffer, 0, sizeof(struct pm4_runlist));
+ packet->header.u32all = build_pm4_header(IT_RUN_LIST,
+ sizeof(struct pm4_runlist));
+
+ packet->bitfields4.ib_size = ib_size_in_dwords;
+ packet->bitfields4.chain = chain ? 1 : 0;
+ packet->bitfields4.offload_polling = 0;
+ packet->bitfields4.valid = 1;
+ packet->ordinal2 = lower_32_bits(ib);
+ packet->bitfields3.ib_base_hi = upper_32_bits(ib);
+
+ return 0;
+}
+
+static int pm_create_map_process(struct packet_manager *pm, uint32_t *buffer,
+ struct qcm_process_device *qpd)
+{
+ struct pm4_map_process *packet;
+ struct queue *cur;
+ uint32_t num_queues;
+
+ BUG_ON(!pm || !buffer || !qpd);
+
+ packet = (struct pm4_map_process *)buffer;
+
+ pr_debug("kfd: In func %s\n", __func__);
+
+ memset(buffer, 0, sizeof(struct pm4_map_process));
+
+ packet->header.u32all = build_pm4_header(IT_MAP_PROCESS,
+ sizeof(struct pm4_map_process));
+ packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0;
+ packet->bitfields2.process_quantum = 1;
+ packet->bitfields2.pasid = qpd->pqm->process->pasid;
+ packet->bitfields3.page_table_base = qpd->page_table_base;
+ packet->bitfields10.gds_size = qpd->gds_size;
+ packet->bitfields10.num_gws = qpd->num_gws;
+ packet->bitfields10.num_oac = qpd->num_oac;
+ num_queues = 0;
+ list_for_each_entry(cur, &qpd->queues_list, list)
+ num_queues++;
+ packet->bitfields10.num_queues = num_queues;
+
+ packet->sh_mem_config = qpd->sh_mem_config;
+ packet->sh_mem_bases = qpd->sh_mem_bases;
+ packet->sh_mem_ape1_base = qpd->sh_mem_ape1_base;
+ packet->sh_mem_ape1_limit = qpd->sh_mem_ape1_limit;
+
+ packet->gds_addr_lo = lower_32_bits(qpd->gds_context_area);
+ packet->gds_addr_hi = upper_32_bits(qpd->gds_context_area);
+
+ return 0;
+}
+
+static int pm_create_map_queue(struct packet_manager *pm, uint32_t *buffer,
+ struct queue *q)
+{
+ struct pm4_map_queues *packet;
+
+ BUG_ON(!pm || !buffer || !q);
+
+ pr_debug("kfd: In func %s\n", __func__);
+
+ packet = (struct pm4_map_queues *)buffer;
+ memset(buffer, 0, sizeof(struct pm4_map_queues));
+
+ packet->header.u32all = build_pm4_header(IT_MAP_QUEUES,
+ sizeof(struct pm4_map_queues));
+ packet->bitfields2.alloc_format =
+ alloc_format__mes_map_queues__one_per_pipe;
+ packet->bitfields2.num_queues = 1;
+ packet->bitfields2.queue_sel =
+ queue_sel__mes_map_queues__map_to_hws_determined_queue_slots;
+
+ packet->bitfields2.vidmem = (q->properties.is_interop) ?
+ vidmem__mes_map_queues__uses_video_memory :
+ vidmem__mes_map_queues__uses_no_video_memory;
+
+ switch (q->properties.type) {
+ case KFD_QUEUE_TYPE_COMPUTE:
+ case KFD_QUEUE_TYPE_DIQ:
+ packet->bitfields2.engine_sel =
+ engine_sel__mes_map_queues__compute;
+ break;
+ case KFD_QUEUE_TYPE_SDMA:
+ packet->bitfields2.engine_sel =
+ engine_sel__mes_map_queues__sdma0;
+ break;
+ default:
+ BUG();
+ break;
+ }
+
+ packet->mes_map_queues_ordinals[0].bitfields3.doorbell_offset =
+ q->properties.doorbell_off;
+
+ packet->mes_map_queues_ordinals[0].mqd_addr_lo =
+ lower_32_bits(q->gart_mqd_addr);
+
+ packet->mes_map_queues_ordinals[0].mqd_addr_hi =
+ upper_32_bits(q->gart_mqd_addr);
+
+ packet->mes_map_queues_ordinals[0].wptr_addr_lo =
+ lower_32_bits((uint64_t)q->properties.write_ptr);
+
+ packet->mes_map_queues_ordinals[0].wptr_addr_hi =
+ upper_32_bits((uint64_t)q->properties.write_ptr);
+
+ return 0;
+}
+
+static int pm_create_runlist_ib(struct packet_manager *pm,
+ struct list_head *queues,
+ uint64_t *rl_gpu_addr,
+ size_t *rl_size_bytes)
+{
+ unsigned int alloc_size_bytes;
+ unsigned int *rl_buffer, rl_wptr, i;
+ int retval, proccesses_mapped;
+ struct device_process_node *cur;
+ struct qcm_process_device *qpd;
+ struct queue *q;
+ struct kernel_queue *kq;
+ bool is_over_subscription;
+
+ BUG_ON(!pm || !queues || !rl_size_bytes || !rl_gpu_addr);
+
+ rl_wptr = retval = proccesses_mapped = 0;
+
+ retval = pm_allocate_runlist_ib(pm, &rl_buffer, rl_gpu_addr,
+ &alloc_size_bytes, &is_over_subscription);
+ if (retval != 0)
+ return retval;
+
+ *rl_size_bytes = alloc_size_bytes;
+
+ pr_debug("kfd: In func %s\n", __func__);
+ pr_debug("kfd: building runlist ib process count: %d queues count %d\n",
+ pm->dqm->processes_count, pm->dqm->queue_count);
+
+ /* build the run list ib packet */
+ list_for_each_entry(cur, queues, list) {
+ qpd = cur->qpd;
+ /* build map process packet */
+ if (proccesses_mapped >= pm->dqm->processes_count) {
+ pr_debug("kfd: not enough space left in runlist IB\n");
+ pm_release_ib(pm);
+ return -ENOMEM;
+ }
+ retval = pm_create_map_process(pm, &rl_buffer[rl_wptr], qpd);
+ if (retval != 0)
+ return retval;
+ proccesses_mapped++;
+ inc_wptr(&rl_wptr, sizeof(struct pm4_map_process),
+ alloc_size_bytes);
+
+ list_for_each_entry(kq, &qpd->priv_queue_list, list) {
+ if (kq->queue->properties.is_active != true)
+ continue;
+ retval = pm_create_map_queue(pm, &rl_buffer[rl_wptr],
+ kq->queue);
+ if (retval != 0)
+ return retval;
+ inc_wptr(&rl_wptr, sizeof(struct pm4_map_queues),
+ alloc_size_bytes);
+ }
+
+ list_for_each_entry(q, &qpd->queues_list, list) {
+ if (q->properties.is_active != true)
+ continue;
+ retval = pm_create_map_queue(pm,
+ &rl_buffer[rl_wptr], q);
+ if (retval != 0)
+ return retval;
+ inc_wptr(&rl_wptr, sizeof(struct pm4_map_queues),
+ alloc_size_bytes);
+ }
+ }
+
+ pr_debug("kfd: finished map process and queues to runlist\n");
+
+ if (is_over_subscription)
+ pm_create_runlist(pm, &rl_buffer[rl_wptr], *rl_gpu_addr,
+ alloc_size_bytes / sizeof(uint32_t), true);
+
+ for (i = 0; i < alloc_size_bytes / sizeof(uint32_t); i++)
+ pr_debug("0x%2X ", rl_buffer[i]);
+ pr_debug("\n");
+
+ return 0;
+}
+
+int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
+{
+ BUG_ON(!dqm);
+
+ pm->dqm = dqm;
+ mutex_init(&pm->lock);
+ pm->priv_queue = kernel_queue_init(dqm->dev, KFD_QUEUE_TYPE_HIQ);
+ if (pm->priv_queue == NULL) {
+ mutex_destroy(&pm->lock);
+ return -ENOMEM;
+ }
+ pm->allocated = false;
+
+ return 0;
+}
+
+void pm_uninit(struct packet_manager *pm)
+{
+ BUG_ON(!pm);
+
+ mutex_destroy(&pm->lock);
+ kernel_queue_uninit(pm->priv_queue);
+}
+
+int pm_send_set_resources(struct packet_manager *pm,
+ struct scheduling_resources *res)
+{
+ struct pm4_set_resources *packet;
+
+ BUG_ON(!pm || !res);
+
+ pr_debug("kfd: In func %s\n", __func__);
+
+ mutex_lock(&pm->lock);
+ pm->priv_queue->acquire_packet_buffer(pm->priv_queue,
+ sizeof(*packet) / sizeof(uint32_t),
+ (unsigned int **)&packet);
+ if (packet == NULL) {
+ mutex_unlock(&pm->lock);
+ pr_err("kfd: failed to allocate buffer on kernel queue\n");
+ return -ENOMEM;
+ }
+
+ memset(packet, 0, sizeof(struct pm4_set_resources));
+ packet->header.u32all = build_pm4_header(IT_SET_RESOURCES,
+ sizeof(struct pm4_set_resources));
+
+ packet->bitfields2.queue_type =
+ queue_type__mes_set_resources__hsa_interface_queue_hiq;
+ packet->bitfields2.vmid_mask = res->vmid_mask;
+ packet->bitfields2.unmap_latency = KFD_UNMAP_LATENCY;
+ packet->bitfields7.oac_mask = res->oac_mask;
+ packet->bitfields8.gds_heap_base = res->gds_heap_base;
+ packet->bitfields8.gds_heap_size = res->gds_heap_size;
+
+ packet->gws_mask_lo = lower_32_bits(res->gws_mask);
+ packet->gws_mask_hi = upper_32_bits(res->gws_mask);
+
+ packet->queue_mask_lo = lower_32_bits(res->queue_mask);
+ packet->queue_mask_hi = upper_32_bits(res->queue_mask);
+
+ pm->priv_queue->submit_packet(pm->priv_queue);
+ pm->priv_queue->sync_with_hw(pm->priv_queue, KFD_HIQ_TIMEOUT);
+
+ mutex_unlock(&pm->lock);
+
+ return 0;
+}
+
+int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues)
+{
+ uint64_t rl_gpu_ib_addr;
+ uint32_t *rl_buffer;
+ size_t rl_ib_size, packet_size_dwords;
+ int retval;
+
+ BUG_ON(!pm || !dqm_queues);
+
+ retval = pm_create_runlist_ib(pm, dqm_queues, &rl_gpu_ib_addr,
+ &rl_ib_size);
+ if (retval != 0)
+ goto fail_create_runlist_ib;
+
+ pr_debug("kfd: runlist IB address: 0x%llX\n", rl_gpu_ib_addr);
+
+ packet_size_dwords = sizeof(struct pm4_runlist) / sizeof(uint32_t);
+ mutex_lock(&pm->lock);
+
+ retval = pm->priv_queue->acquire_packet_buffer(pm->priv_queue,
+ packet_size_dwords, &rl_buffer);
+ if (retval != 0)
+ goto fail_acquire_packet_buffer;
+
+ retval = pm_create_runlist(pm, rl_buffer, rl_gpu_ib_addr,
+ rl_ib_size / sizeof(uint32_t), false);
+ if (retval != 0)
+ goto fail_create_runlist;
+
+ pm->priv_queue->submit_packet(pm->priv_queue);
+ pm->priv_queue->sync_with_hw(pm->priv_queue, KFD_HIQ_TIMEOUT);
+
+ mutex_unlock(&pm->lock);
+
+ return retval;
+
+fail_create_runlist:
+ pm->priv_queue->rollback_packet(pm->priv_queue);
+fail_acquire_packet_buffer:
+ mutex_unlock(&pm->lock);
+fail_create_runlist_ib:
+ if (pm->allocated == true)
+ pm_release_ib(pm);
+ return retval;
+}
+
+int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
+ uint32_t fence_value)
+{
+ int retval;
+ struct pm4_query_status *packet;
+
+ BUG_ON(!pm || !fence_address);
+
+ mutex_lock(&pm->lock);
+ retval = pm->priv_queue->acquire_packet_buffer(
+ pm->priv_queue,
+ sizeof(struct pm4_query_status) / sizeof(uint32_t),
+ (unsigned int **)&packet);
+ if (retval != 0)
+ goto fail_acquire_packet_buffer;
+
+ packet->header.u32all = build_pm4_header(IT_QUERY_STATUS,
+ sizeof(struct pm4_query_status));
+
+ packet->bitfields2.context_id = 0;
+ packet->bitfields2.interrupt_sel =
+ interrupt_sel__mes_query_status__completion_status;
+ packet->bitfields2.command =
+ command__mes_query_status__fence_only_after_write_ack;
+
+ packet->addr_hi = upper_32_bits((uint64_t)fence_address);
+ packet->addr_lo = lower_32_bits((uint64_t)fence_address);
+ packet->data_hi = upper_32_bits((uint64_t)fence_value);
+ packet->data_lo = lower_32_bits((uint64_t)fence_value);
+
+ pm->priv_queue->submit_packet(pm->priv_queue);
+ pm->priv_queue->sync_with_hw(pm->priv_queue, KFD_HIQ_TIMEOUT);
+ mutex_unlock(&pm->lock);
+
+ return 0;
+
+fail_acquire_packet_buffer:
+ mutex_unlock(&pm->lock);
+ return retval;
+}
+
+int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
+ enum kfd_preempt_type_filter mode,
+ uint32_t filter_param, bool reset,
+ unsigned int sdma_engine)
+{
+ int retval;
+ uint32_t *buffer;
+ struct pm4_unmap_queues *packet;
+
+ BUG_ON(!pm);
+
+ mutex_lock(&pm->lock);
+ retval = pm->priv_queue->acquire_packet_buffer(
+ pm->priv_queue,
+ sizeof(struct pm4_unmap_queues) / sizeof(uint32_t),
+ &buffer);
+ if (retval != 0)
+ goto err_acquire_packet_buffer;
+
+ packet = (struct pm4_unmap_queues *)buffer;
+ memset(buffer, 0, sizeof(struct pm4_unmap_queues));
+
+ packet->header.u32all = build_pm4_header(IT_UNMAP_QUEUES,
+ sizeof(struct pm4_unmap_queues));
+ switch (type) {
+ case KFD_QUEUE_TYPE_COMPUTE:
+ case KFD_QUEUE_TYPE_DIQ:
+ packet->bitfields2.engine_sel =
+ engine_sel__mes_unmap_queues__compute;
+ break;
+ case KFD_QUEUE_TYPE_SDMA:
+ packet->bitfields2.engine_sel =
+ engine_sel__mes_unmap_queues__sdma0 + sdma_engine;
+ break;
+ default:
+ BUG();
+ break;
+ }
+
+ if (reset)
+ packet->bitfields2.action =
+ action__mes_unmap_queues__reset_queues;
+ else
+ packet->bitfields2.action =
+ action__mes_unmap_queues__preempt_queues;
+
+ switch (mode) {
+ case KFD_PREEMPT_TYPE_FILTER_SINGLE_QUEUE:
+ packet->bitfields2.queue_sel =
+ queue_sel__mes_unmap_queues__perform_request_on_specified_queues;
+ packet->bitfields2.num_queues = 1;
+ packet->bitfields3b.doorbell_offset0 = filter_param;
+ break;
+ case KFD_PREEMPT_TYPE_FILTER_BY_PASID:
+ packet->bitfields2.queue_sel =
+ queue_sel__mes_unmap_queues__perform_request_on_pasid_queues;
+ packet->bitfields3a.pasid = filter_param;
+ break;
+ case KFD_PREEMPT_TYPE_FILTER_ALL_QUEUES:
+ packet->bitfields2.queue_sel =
+ queue_sel__mes_unmap_queues__perform_request_on_all_active_queues;
+ break;
+ default:
+ BUG();
+ break;
+ };
+
+ pm->priv_queue->submit_packet(pm->priv_queue);
+ pm->priv_queue->sync_with_hw(pm->priv_queue, KFD_HIQ_TIMEOUT);
+
+ mutex_unlock(&pm->lock);
+ return 0;
+
+err_acquire_packet_buffer:
+ mutex_unlock(&pm->lock);
+ return retval;
+}
+
+void pm_release_ib(struct packet_manager *pm)
+{
+ BUG_ON(!pm);
+
+ mutex_lock(&pm->lock);
+ if (pm->allocated) {
+ kfd2kgd->free_mem(pm->dqm->dev->kgd,
+ (struct kgd_mem *) pm->ib_buffer_obj);
+ pm->allocated = false;
+ }
+ mutex_unlock(&pm->lock);
+}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
new file mode 100644
index 00000000000..71699ad97d7
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
@@ -0,0 +1,96 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/slab.h>
+#include <linux/types.h>
+#include "kfd_priv.h"
+
+static unsigned long *pasid_bitmap;
+static unsigned int pasid_limit;
+static DEFINE_MUTEX(pasid_mutex);
+
+int kfd_pasid_init(void)
+{
+ pasid_limit = max_num_of_processes;
+
+ pasid_bitmap = kzalloc(BITS_TO_LONGS(pasid_limit), GFP_KERNEL);
+ if (!pasid_bitmap)
+ return -ENOMEM;
+
+ set_bit(0, pasid_bitmap); /* PASID 0 is reserved. */
+
+ return 0;
+}
+
+void kfd_pasid_exit(void)
+{
+ kfree(pasid_bitmap);
+}
+
+bool kfd_set_pasid_limit(unsigned int new_limit)
+{
+ if (new_limit < pasid_limit) {
+ bool ok;
+
+ mutex_lock(&pasid_mutex);
+
+ /* ensure that no pasids >= new_limit are in-use */
+ ok = (find_next_bit(pasid_bitmap, pasid_limit, new_limit) ==
+ pasid_limit);
+ if (ok)
+ pasid_limit = new_limit;
+
+ mutex_unlock(&pasid_mutex);
+
+ return ok;
+ }
+
+ return true;
+}
+
+inline unsigned int kfd_get_pasid_limit(void)
+{
+ return pasid_limit;
+}
+
+unsigned int kfd_pasid_alloc(void)
+{
+ unsigned int found;
+
+ mutex_lock(&pasid_mutex);
+
+ found = find_first_zero_bit(pasid_bitmap, pasid_limit);
+ if (found == pasid_limit)
+ found = 0;
+ else
+ set_bit(found, pasid_bitmap);
+
+ mutex_unlock(&pasid_mutex);
+
+ return found;
+}
+
+void kfd_pasid_free(unsigned int pasid)
+{
+ BUG_ON(pasid == 0 || pasid >= pasid_limit);
+ clear_bit(pasid, pasid_bitmap);
+}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers.h b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers.h
new file mode 100644
index 00000000000..071ad5724bd
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers.h
@@ -0,0 +1,405 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef KFD_PM4_HEADERS_H_
+#define KFD_PM4_HEADERS_H_
+
+#ifndef PM4_MES_HEADER_DEFINED
+#define PM4_MES_HEADER_DEFINED
+union PM4_MES_TYPE_3_HEADER {
+ struct {
+ uint32_t reserved1:8; /* < reserved */
+ uint32_t opcode:8; /* < IT opcode */
+ uint32_t count:14; /* < number of DWORDs - 1
+ * in the information body.
+ */
+ uint32_t type:2; /* < packet identifier.
+ * It should be 3 for type 3 packets
+ */
+ };
+ uint32_t u32all;
+};
+#endif /* PM4_MES_HEADER_DEFINED */
+
+/* --------------------MES_SET_RESOURCES-------------------- */
+
+#ifndef PM4_MES_SET_RESOURCES_DEFINED
+#define PM4_MES_SET_RESOURCES_DEFINED
+enum set_resources_queue_type_enum {
+ queue_type__mes_set_resources__kernel_interface_queue_kiq = 0,
+ queue_type__mes_set_resources__hsa_interface_queue_hiq = 1,
+ queue_type__mes_set_resources__hsa_debug_interface_queue = 4
+};
+
+struct pm4_set_resources {
+ union {
+ union PM4_MES_TYPE_3_HEADER header; /* header */
+ uint32_t ordinal1;
+ };
+
+ union {
+ struct {
+ uint32_t vmid_mask:16;
+ uint32_t unmap_latency:8;
+ uint32_t reserved1:5;
+ enum set_resources_queue_type_enum queue_type:3;
+ } bitfields2;
+ uint32_t ordinal2;
+ };
+
+ uint32_t queue_mask_lo;
+ uint32_t queue_mask_hi;
+ uint32_t gws_mask_lo;
+ uint32_t gws_mask_hi;
+
+ union {
+ struct {
+ uint32_t oac_mask:16;
+ uint32_t reserved2:16;
+ } bitfields7;
+ uint32_t ordinal7;
+ };
+
+ union {
+ struct {
+ uint32_t gds_heap_base:6;
+ uint32_t reserved3:5;
+ uint32_t gds_heap_size:6;
+ uint32_t reserved4:15;
+ } bitfields8;
+ uint32_t ordinal8;
+ };
+
+};
+#endif
+
+/*--------------------MES_RUN_LIST-------------------- */
+
+#ifndef PM4_MES_RUN_LIST_DEFINED
+#define PM4_MES_RUN_LIST_DEFINED
+
+struct pm4_runlist {
+ union {
+ union PM4_MES_TYPE_3_HEADER header; /* header */
+ uint32_t ordinal1;
+ };
+
+ union {
+ struct {
+ uint32_t reserved1:2;
+ uint32_t ib_base_lo:30;
+ } bitfields2;
+ uint32_t ordinal2;
+ };
+
+ union {
+ struct {
+ uint32_t ib_base_hi:16;
+ uint32_t reserved2:16;
+ } bitfields3;
+ uint32_t ordinal3;
+ };
+
+ union {
+ struct {
+ uint32_t ib_size:20;
+ uint32_t chain:1;
+ uint32_t offload_polling:1;
+ uint32_t reserved3:1;
+ uint32_t valid:1;
+ uint32_t reserved4:8;
+ } bitfields4;
+ uint32_t ordinal4;
+ };
+
+};
+#endif
+
+/*--------------------MES_MAP_PROCESS-------------------- */
+
+#ifndef PM4_MES_MAP_PROCESS_DEFINED
+#define PM4_MES_MAP_PROCESS_DEFINED
+
+struct pm4_map_process {
+ union {
+ union PM4_MES_TYPE_3_HEADER header; /* header */
+ uint32_t ordinal1;
+ };
+
+ union {
+ struct {
+ uint32_t pasid:16;
+ uint32_t reserved1:8;
+ uint32_t diq_enable:1;
+ uint32_t process_quantum:7;
+ } bitfields2;
+ uint32_t ordinal2;
+ };
+
+ union {
+ struct {
+ uint32_t page_table_base:28;
+ uint32_t reserved3:4;
+ } bitfields3;
+ uint32_t ordinal3;
+ };
+
+ uint32_t sh_mem_bases;
+ uint32_t sh_mem_ape1_base;
+ uint32_t sh_mem_ape1_limit;
+ uint32_t sh_mem_config;
+ uint32_t gds_addr_lo;
+ uint32_t gds_addr_hi;
+
+ union {
+ struct {
+ uint32_t num_gws:6;
+ uint32_t reserved4:2;
+ uint32_t num_oac:4;
+ uint32_t reserved5:4;
+ uint32_t gds_size:6;
+ uint32_t num_queues:10;
+ } bitfields10;
+ uint32_t ordinal10;
+ };
+
+};
+#endif
+
+/*--------------------MES_MAP_QUEUES--------------------*/
+
+#ifndef PM4_MES_MAP_QUEUES_DEFINED
+#define PM4_MES_MAP_QUEUES_DEFINED
+enum map_queues_queue_sel_enum {
+ queue_sel__mes_map_queues__map_to_specified_queue_slots = 0,
+ queue_sel__mes_map_queues__map_to_hws_determined_queue_slots = 1,
+ queue_sel__mes_map_queues__enable_process_queues = 2
+};
+
+enum map_queues_vidmem_enum {
+ vidmem__mes_map_queues__uses_no_video_memory = 0,
+ vidmem__mes_map_queues__uses_video_memory = 1
+};
+
+enum map_queues_alloc_format_enum {
+ alloc_format__mes_map_queues__one_per_pipe = 0,
+ alloc_format__mes_map_queues__all_on_one_pipe = 1
+};
+
+enum map_queues_engine_sel_enum {
+ engine_sel__mes_map_queues__compute = 0,
+ engine_sel__mes_map_queues__sdma0 = 2,
+ engine_sel__mes_map_queues__sdma1 = 3
+};
+
+struct pm4_map_queues {
+ union {
+ union PM4_MES_TYPE_3_HEADER header; /* header */
+ uint32_t ordinal1;
+ };
+
+ union {
+ struct {
+ uint32_t reserved1:4;
+ enum map_queues_queue_sel_enum queue_sel:2;
+ uint32_t reserved2:2;
+ uint32_t vmid:4;
+ uint32_t reserved3:4;
+ enum map_queues_vidmem_enum vidmem:2;
+ uint32_t reserved4:6;
+ enum map_queues_alloc_format_enum alloc_format:2;
+ enum map_queues_engine_sel_enum engine_sel:3;
+ uint32_t num_queues:3;
+ } bitfields2;
+ uint32_t ordinal2;
+ };
+
+ struct {
+ union {
+ struct {
+ uint32_t reserved5:2;
+ uint32_t doorbell_offset:21;
+ uint32_t reserved6:3;
+ uint32_t queue:6;
+ } bitfields3;
+ uint32_t ordinal3;
+ };
+
+ uint32_t mqd_addr_lo;
+ uint32_t mqd_addr_hi;
+ uint32_t wptr_addr_lo;
+ uint32_t wptr_addr_hi;
+
+ } mes_map_queues_ordinals[1]; /* 1..N of these ordinal groups */
+
+};
+#endif
+
+/*--------------------MES_QUERY_STATUS--------------------*/
+
+#ifndef PM4_MES_QUERY_STATUS_DEFINED
+#define PM4_MES_QUERY_STATUS_DEFINED
+enum query_status_interrupt_sel_enum {
+ interrupt_sel__mes_query_status__completion_status = 0,
+ interrupt_sel__mes_query_status__process_status = 1,
+ interrupt_sel__mes_query_status__queue_status = 2
+};
+
+enum query_status_command_enum {
+ command__mes_query_status__interrupt_only = 0,
+ command__mes_query_status__fence_only_immediate = 1,
+ command__mes_query_status__fence_only_after_write_ack = 2,
+ command__mes_query_status__fence_wait_for_write_ack_send_interrupt = 3
+};
+
+enum query_status_engine_sel_enum {
+ engine_sel__mes_query_status__compute = 0,
+ engine_sel__mes_query_status__sdma0_queue = 2,
+ engine_sel__mes_query_status__sdma1_queue = 3
+};
+
+struct pm4_query_status {
+ union {
+ union PM4_MES_TYPE_3_HEADER header; /* header */
+ uint32_t ordinal1;
+ };
+
+ union {
+ struct {
+ uint32_t context_id:28;
+ enum query_status_interrupt_sel_enum interrupt_sel:2;
+ enum query_status_command_enum command:2;
+ } bitfields2;
+ uint32_t ordinal2;
+ };
+
+ union {
+ struct {
+ uint32_t pasid:16;
+ uint32_t reserved1:16;
+ } bitfields3a;
+ struct {
+ uint32_t reserved2:2;
+ uint32_t doorbell_offset:21;
+ uint32_t reserved3:3;
+ enum query_status_engine_sel_enum engine_sel:3;
+ uint32_t reserved4:3;
+ } bitfields3b;
+ uint32_t ordinal3;
+ };
+
+ uint32_t addr_lo;
+ uint32_t addr_hi;
+ uint32_t data_lo;
+ uint32_t data_hi;
+};
+#endif
+
+/*--------------------MES_UNMAP_QUEUES--------------------*/
+
+#ifndef PM4_MES_UNMAP_QUEUES_DEFINED
+#define PM4_MES_UNMAP_QUEUES_DEFINED
+enum unmap_queues_action_enum {
+ action__mes_unmap_queues__preempt_queues = 0,
+ action__mes_unmap_queues__reset_queues = 1,
+ action__mes_unmap_queues__disable_process_queues = 2
+};
+
+enum unmap_queues_queue_sel_enum {
+ queue_sel__mes_unmap_queues__perform_request_on_specified_queues = 0,
+ queue_sel__mes_unmap_queues__perform_request_on_pasid_queues = 1,
+ queue_sel__mes_unmap_queues__perform_request_on_all_active_queues = 2
+};
+
+enum unmap_queues_engine_sel_enum {
+ engine_sel__mes_unmap_queues__compute = 0,
+ engine_sel__mes_unmap_queues__sdma0 = 2,
+ engine_sel__mes_unmap_queues__sdma1 = 3
+};
+
+struct pm4_unmap_queues {
+ union {
+ union PM4_MES_TYPE_3_HEADER header; /* header */
+ uint32_t ordinal1;
+ };
+
+ union {
+ struct {
+ enum unmap_queues_action_enum action:2;
+ uint32_t reserved1:2;
+ enum unmap_queues_queue_sel_enum queue_sel:2;
+ uint32_t reserved2:20;
+ enum unmap_queues_engine_sel_enum engine_sel:3;
+ uint32_t num_queues:3;
+ } bitfields2;
+ uint32_t ordinal2;
+ };
+
+ union {
+ struct {
+ uint32_t pasid:16;
+ uint32_t reserved3:16;
+ } bitfields3a;
+ struct {
+ uint32_t reserved4:2;
+ uint32_t doorbell_offset0:21;
+ uint32_t reserved5:9;
+ } bitfields3b;
+ uint32_t ordinal3;
+ };
+
+ union {
+ struct {
+ uint32_t reserved6:2;
+ uint32_t doorbell_offset1:21;
+ uint32_t reserved7:9;
+ } bitfields4;
+ uint32_t ordinal4;
+ };
+
+ union {
+ struct {
+ uint32_t reserved8:2;
+ uint32_t doorbell_offset2:21;
+ uint32_t reserved9:9;
+ } bitfields5;
+ uint32_t ordinal5;
+ };
+
+ union {
+ struct {
+ uint32_t reserved10:2;
+ uint32_t doorbell_offset3:21;
+ uint32_t reserved11:9;
+ } bitfields6;
+ uint32_t ordinal6;
+ };
+
+};
+#endif
+
+enum {
+ CACHE_FLUSH_AND_INV_TS_EVENT = 0x00000014
+};
+
+#endif /* KFD_PM4_HEADERS_H_ */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_opcodes.h b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_opcodes.h
new file mode 100644
index 00000000000..b72fa3b8c2d
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_opcodes.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+
+#ifndef KFD_PM4_OPCODES_H
+#define KFD_PM4_OPCODES_H
+
+enum it_opcode_type {
+ IT_NOP = 0x10,
+ IT_SET_BASE = 0x11,
+ IT_CLEAR_STATE = 0x12,
+ IT_INDEX_BUFFER_SIZE = 0x13,
+ IT_DISPATCH_DIRECT = 0x15,
+ IT_DISPATCH_INDIRECT = 0x16,
+ IT_ATOMIC_GDS = 0x1D,
+ IT_OCCLUSION_QUERY = 0x1F,
+ IT_SET_PREDICATION = 0x20,
+ IT_REG_RMW = 0x21,
+ IT_COND_EXEC = 0x22,
+ IT_PRED_EXEC = 0x23,
+ IT_DRAW_INDIRECT = 0x24,
+ IT_DRAW_INDEX_INDIRECT = 0x25,
+ IT_INDEX_BASE = 0x26,
+ IT_DRAW_INDEX_2 = 0x27,
+ IT_CONTEXT_CONTROL = 0x28,
+ IT_INDEX_TYPE = 0x2A,
+ IT_DRAW_INDIRECT_MULTI = 0x2C,
+ IT_DRAW_INDEX_AUTO = 0x2D,
+ IT_NUM_INSTANCES = 0x2F,
+ IT_DRAW_INDEX_MULTI_AUTO = 0x30,
+ IT_INDIRECT_BUFFER_CNST = 0x33,
+ IT_STRMOUT_BUFFER_UPDATE = 0x34,
+ IT_DRAW_INDEX_OFFSET_2 = 0x35,
+ IT_DRAW_PREAMBLE = 0x36,
+ IT_WRITE_DATA = 0x37,
+ IT_DRAW_INDEX_INDIRECT_MULTI = 0x38,
+ IT_MEM_SEMAPHORE = 0x39,
+ IT_COPY_DW = 0x3B,
+ IT_WAIT_REG_MEM = 0x3C,
+ IT_INDIRECT_BUFFER = 0x3F,
+ IT_COPY_DATA = 0x40,
+ IT_PFP_SYNC_ME = 0x42,
+ IT_SURFACE_SYNC = 0x43,
+ IT_COND_WRITE = 0x45,
+ IT_EVENT_WRITE = 0x46,
+ IT_EVENT_WRITE_EOP = 0x47,
+ IT_EVENT_WRITE_EOS = 0x48,
+ IT_RELEASE_MEM = 0x49,
+ IT_PREAMBLE_CNTL = 0x4A,
+ IT_DMA_DATA = 0x50,
+ IT_ACQUIRE_MEM = 0x58,
+ IT_REWIND = 0x59,
+ IT_LOAD_UCONFIG_REG = 0x5E,
+ IT_LOAD_SH_REG = 0x5F,
+ IT_LOAD_CONFIG_REG = 0x60,
+ IT_LOAD_CONTEXT_REG = 0x61,
+ IT_SET_CONFIG_REG = 0x68,
+ IT_SET_CONTEXT_REG = 0x69,
+ IT_SET_CONTEXT_REG_INDIRECT = 0x73,
+ IT_SET_SH_REG = 0x76,
+ IT_SET_SH_REG_OFFSET = 0x77,
+ IT_SET_QUEUE_REG = 0x78,
+ IT_SET_UCONFIG_REG = 0x79,
+ IT_SCRATCH_RAM_WRITE = 0x7D,
+ IT_SCRATCH_RAM_READ = 0x7E,
+ IT_LOAD_CONST_RAM = 0x80,
+ IT_WRITE_CONST_RAM = 0x81,
+ IT_DUMP_CONST_RAM = 0x83,
+ IT_INCREMENT_CE_COUNTER = 0x84,
+ IT_INCREMENT_DE_COUNTER = 0x85,
+ IT_WAIT_ON_CE_COUNTER = 0x86,
+ IT_WAIT_ON_DE_COUNTER_DIFF = 0x88,
+ IT_SWITCH_BUFFER = 0x8B,
+ IT_SET_RESOURCES = 0xA0,
+ IT_MAP_PROCESS = 0xA1,
+ IT_MAP_QUEUES = 0xA2,
+ IT_UNMAP_QUEUES = 0xA3,
+ IT_QUERY_STATUS = 0xA4,
+ IT_RUN_LIST = 0xA5,
+};
+
+#define PM4_TYPE_0 0
+#define PM4_TYPE_2 2
+#define PM4_TYPE_3 3
+
+#endif /* KFD_PM4_OPCODES_H */
+
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
new file mode 100644
index 00000000000..f9fb81e3bb0
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -0,0 +1,600 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef KFD_PRIV_H_INCLUDED
+#define KFD_PRIV_H_INCLUDED
+
+#include <linux/hashtable.h>
+#include <linux/mmu_notifier.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/atomic.h>
+#include <linux/workqueue.h>
+#include <linux/spinlock.h>
+#include <linux/kfd_ioctl.h>
+#include <kgd_kfd_interface.h>
+
+#define KFD_SYSFS_FILE_MODE 0444
+
+/*
+ * When working with cp scheduler we should assign the HIQ manually or via
+ * the radeon driver to a fixed hqd slot, here are the fixed HIQ hqd slot
+ * definitions for Kaveri. In Kaveri only the first ME queues participates
+ * in the cp scheduling taking that in mind we set the HIQ slot in the
+ * second ME.
+ */
+#define KFD_CIK_HIQ_PIPE 4
+#define KFD_CIK_HIQ_QUEUE 0
+
+/* GPU ID hash width in bits */
+#define KFD_GPU_ID_HASH_WIDTH 16
+
+/* Macro for allocating structures */
+#define kfd_alloc_struct(ptr_to_struct) \
+ ((typeof(ptr_to_struct)) kzalloc(sizeof(*ptr_to_struct), GFP_KERNEL))
+
+/* Kernel module parameter to specify maximum number of supported processes */
+extern int max_num_of_processes;
+
+#define KFD_MAX_NUM_OF_PROCESSES_DEFAULT 32
+#define KFD_MAX_NUM_OF_PROCESSES 512
+
+/*
+ * Kernel module parameter to specify maximum number of supported queues
+ * per process
+ */
+extern int max_num_of_queues_per_process;
+
+#define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS_DEFAULT 128
+#define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS 1024
+
+#define KFD_KERNEL_QUEUE_SIZE 2048
+
+/* Kernel module parameter to specify the scheduling policy */
+extern int sched_policy;
+
+/**
+ * enum kfd_sched_policy
+ *
+ * @KFD_SCHED_POLICY_HWS: H/W scheduling policy known as command processor (cp)
+ * scheduling. In this scheduling mode we're using the firmware code to
+ * schedule the user mode queues and kernel queues such as HIQ and DIQ.
+ * the HIQ queue is used as a special queue that dispatches the configuration
+ * to the cp and the user mode queues list that are currently running.
+ * the DIQ queue is a debugging queue that dispatches debugging commands to the
+ * firmware.
+ * in this scheduling mode user mode queues over subscription feature is
+ * enabled.
+ *
+ * @KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION: The same as above but the over
+ * subscription feature disabled.
+ *
+ * @KFD_SCHED_POLICY_NO_HWS: no H/W scheduling policy is a mode which directly
+ * set the command processor registers and sets the queues "manually". This
+ * mode is used *ONLY* for debugging proposes.
+ *
+ */
+enum kfd_sched_policy {
+ KFD_SCHED_POLICY_HWS = 0,
+ KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION,
+ KFD_SCHED_POLICY_NO_HWS
+};
+
+enum cache_policy {
+ cache_policy_coherent,
+ cache_policy_noncoherent
+};
+
+struct kfd_device_info {
+ unsigned int max_pasid_bits;
+ size_t ih_ring_entry_size;
+ uint16_t mqd_size_aligned;
+};
+
+struct kfd_dev {
+ struct kgd_dev *kgd;
+
+ const struct kfd_device_info *device_info;
+ struct pci_dev *pdev;
+
+ unsigned int id; /* topology stub index */
+
+ phys_addr_t doorbell_base; /* Start of actual doorbells used by
+ * KFD. It is aligned for mapping
+ * into user mode
+ */
+ size_t doorbell_id_offset; /* Doorbell offset (from KFD doorbell
+ * to HW doorbell, GFX reserved some
+ * at the start)
+ */
+ size_t doorbell_process_limit; /* Number of processes we have doorbell
+ * space for.
+ */
+ u32 __iomem *doorbell_kernel_ptr; /* This is a pointer for a doorbells
+ * page used by kernel queue
+ */
+
+ struct kgd2kfd_shared_resources shared_resources;
+
+ void *interrupt_ring;
+ size_t interrupt_ring_size;
+ atomic_t interrupt_ring_rptr;
+ atomic_t interrupt_ring_wptr;
+ struct work_struct interrupt_work;
+ spinlock_t interrupt_lock;
+
+ /* QCM Device instance */
+ struct device_queue_manager *dqm;
+
+ bool init_complete;
+ /*
+ * Interrupts of interest to KFD are copied
+ * from the HW ring into a SW ring.
+ */
+ bool interrupts_active;
+};
+
+/* KGD2KFD callbacks */
+void kgd2kfd_exit(void);
+struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev);
+bool kgd2kfd_device_init(struct kfd_dev *kfd,
+ const struct kgd2kfd_shared_resources *gpu_resources);
+void kgd2kfd_device_exit(struct kfd_dev *kfd);
+
+extern const struct kfd2kgd_calls *kfd2kgd;
+
+struct kfd_mem_obj {
+ void *bo;
+ uint64_t gpu_addr;
+ uint32_t *cpu_ptr;
+};
+
+enum kfd_mempool {
+ KFD_MEMPOOL_SYSTEM_CACHEABLE = 1,
+ KFD_MEMPOOL_SYSTEM_WRITECOMBINE = 2,
+ KFD_MEMPOOL_FRAMEBUFFER = 3,
+};
+
+/* Character device interface */
+int kfd_chardev_init(void);
+void kfd_chardev_exit(void);
+struct device *kfd_chardev(void);
+
+/**
+ * enum kfd_preempt_type_filter
+ *
+ * @KFD_PREEMPT_TYPE_FILTER_SINGLE_QUEUE: Preempts single queue.
+ *
+ * @KFD_PRERMPT_TYPE_FILTER_ALL_QUEUES: Preempts all queues in the
+ * running queues list.
+ *
+ * @KFD_PRERMPT_TYPE_FILTER_BY_PASID: Preempts queues that belongs to
+ * specific process.
+ *
+ */
+enum kfd_preempt_type_filter {
+ KFD_PREEMPT_TYPE_FILTER_SINGLE_QUEUE,
+ KFD_PREEMPT_TYPE_FILTER_ALL_QUEUES,
+ KFD_PREEMPT_TYPE_FILTER_BY_PASID
+};
+
+enum kfd_preempt_type {
+ KFD_PREEMPT_TYPE_WAVEFRONT,
+ KFD_PREEMPT_TYPE_WAVEFRONT_RESET
+};
+
+/**
+ * enum kfd_queue_type
+ *
+ * @KFD_QUEUE_TYPE_COMPUTE: Regular user mode queue type.
+ *
+ * @KFD_QUEUE_TYPE_SDMA: Sdma user mode queue type.
+ *
+ * @KFD_QUEUE_TYPE_HIQ: HIQ queue type.
+ *
+ * @KFD_QUEUE_TYPE_DIQ: DIQ queue type.
+ */
+enum kfd_queue_type {
+ KFD_QUEUE_TYPE_COMPUTE,
+ KFD_QUEUE_TYPE_SDMA,
+ KFD_QUEUE_TYPE_HIQ,
+ KFD_QUEUE_TYPE_DIQ
+};
+
+enum kfd_queue_format {
+ KFD_QUEUE_FORMAT_PM4,
+ KFD_QUEUE_FORMAT_AQL
+};
+
+/**
+ * struct queue_properties
+ *
+ * @type: The queue type.
+ *
+ * @queue_id: Queue identifier.
+ *
+ * @queue_address: Queue ring buffer address.
+ *
+ * @queue_size: Queue ring buffer size.
+ *
+ * @priority: Defines the queue priority relative to other queues in the
+ * process.
+ * This is just an indication and HW scheduling may override the priority as
+ * necessary while keeping the relative prioritization.
+ * the priority granularity is from 0 to f which f is the highest priority.
+ * currently all queues are initialized with the highest priority.
+ *
+ * @queue_percent: This field is partially implemented and currently a zero in
+ * this field defines that the queue is non active.
+ *
+ * @read_ptr: User space address which points to the number of dwords the
+ * cp read from the ring buffer. This field updates automatically by the H/W.
+ *
+ * @write_ptr: Defines the number of dwords written to the ring buffer.
+ *
+ * @doorbell_ptr: This field aim is to notify the H/W of new packet written to
+ * the queue ring buffer. This field should be similar to write_ptr and the user
+ * should update this field after he updated the write_ptr.
+ *
+ * @doorbell_off: The doorbell offset in the doorbell pci-bar.
+ *
+ * @is_interop: Defines if this is a interop queue. Interop queue means that the
+ * queue can access both graphics and compute resources.
+ *
+ * @is_active: Defines if the queue is active or not.
+ *
+ * @vmid: If the scheduling mode is no cp scheduling the field defines the vmid
+ * of the queue.
+ *
+ * This structure represents the queue properties for each queue no matter if
+ * it's user mode or kernel mode queue.
+ *
+ */
+struct queue_properties {
+ enum kfd_queue_type type;
+ enum kfd_queue_format format;
+ unsigned int queue_id;
+ uint64_t queue_address;
+ uint64_t queue_size;
+ uint32_t priority;
+ uint32_t queue_percent;
+ uint32_t *read_ptr;
+ uint32_t *write_ptr;
+ uint32_t __iomem *doorbell_ptr;
+ uint32_t doorbell_off;
+ bool is_interop;
+ bool is_active;
+ /* Not relevant for user mode queues in cp scheduling */
+ unsigned int vmid;
+};
+
+/**
+ * struct queue
+ *
+ * @list: Queue linked list.
+ *
+ * @mqd: The queue MQD.
+ *
+ * @mqd_mem_obj: The MQD local gpu memory object.
+ *
+ * @gart_mqd_addr: The MQD gart mc address.
+ *
+ * @properties: The queue properties.
+ *
+ * @mec: Used only in no cp scheduling mode and identifies to micro engine id
+ * that the queue should be execute on.
+ *
+ * @pipe: Used only in no cp scheduling mode and identifies the queue's pipe id.
+ *
+ * @queue: Used only in no cp scheduliong mode and identifies the queue's slot.
+ *
+ * @process: The kfd process that created this queue.
+ *
+ * @device: The kfd device that created this queue.
+ *
+ * This structure represents user mode compute queues.
+ * It contains all the necessary data to handle such queues.
+ *
+ */
+
+struct queue {
+ struct list_head list;
+ void *mqd;
+ struct kfd_mem_obj *mqd_mem_obj;
+ uint64_t gart_mqd_addr;
+ struct queue_properties properties;
+
+ uint32_t mec;
+ uint32_t pipe;
+ uint32_t queue;
+
+ struct kfd_process *process;
+ struct kfd_dev *device;
+};
+
+/*
+ * Please read the kfd_mqd_manager.h description.
+ */
+enum KFD_MQD_TYPE {
+ KFD_MQD_TYPE_CIK_COMPUTE = 0, /* for no cp scheduling */
+ KFD_MQD_TYPE_CIK_HIQ, /* for hiq */
+ KFD_MQD_TYPE_CIK_CP, /* for cp queues and diq */
+ KFD_MQD_TYPE_CIK_SDMA, /* for sdma queues */
+ KFD_MQD_TYPE_MAX
+};
+
+struct scheduling_resources {
+ unsigned int vmid_mask;
+ enum kfd_queue_type type;
+ uint64_t queue_mask;
+ uint64_t gws_mask;
+ uint32_t oac_mask;
+ uint32_t gds_heap_base;
+ uint32_t gds_heap_size;
+};
+
+struct process_queue_manager {
+ /* data */
+ struct kfd_process *process;
+ unsigned int num_concurrent_processes;
+ struct list_head queues;
+ unsigned long *queue_slot_bitmap;
+};
+
+struct qcm_process_device {
+ /* The Device Queue Manager that owns this data */
+ struct device_queue_manager *dqm;
+ struct process_queue_manager *pqm;
+ /* Device Queue Manager lock */
+ struct mutex *lock;
+ /* Queues list */
+ struct list_head queues_list;
+ struct list_head priv_queue_list;
+
+ unsigned int queue_count;
+ unsigned int vmid;
+ bool is_debug;
+ /*
+ * All the memory management data should be here too
+ */
+ uint64_t gds_context_area;
+ uint32_t sh_mem_config;
+ uint32_t sh_mem_bases;
+ uint32_t sh_mem_ape1_base;
+ uint32_t sh_mem_ape1_limit;
+ uint32_t page_table_base;
+ uint32_t gds_size;
+ uint32_t num_gws;
+ uint32_t num_oac;
+};
+
+/* Data that is per-process-per device. */
+struct kfd_process_device {
+ /*
+ * List of all per-device data for a process.
+ * Starts from kfd_process.per_device_data.
+ */
+ struct list_head per_device_list;
+
+ /* The device that owns this data. */
+ struct kfd_dev *dev;
+
+
+ /* per-process-per device QCM data structure */
+ struct qcm_process_device qpd;
+
+ /*Apertures*/
+ uint64_t lds_base;
+ uint64_t lds_limit;
+ uint64_t gpuvm_base;
+ uint64_t gpuvm_limit;
+ uint64_t scratch_base;
+ uint64_t scratch_limit;
+
+ /* Is this process/pasid bound to this device? (amd_iommu_bind_pasid) */
+ bool bound;
+};
+
+#define qpd_to_pdd(x) container_of(x, struct kfd_process_device, qpd)
+
+/* Process data */
+struct kfd_process {
+ /*
+ * kfd_process are stored in an mm_struct*->kfd_process*
+ * hash table (kfd_processes in kfd_process.c)
+ */
+ struct hlist_node kfd_processes;
+
+ struct mm_struct *mm;
+
+ struct mutex mutex;
+
+ /*
+ * In any process, the thread that started main() is the lead
+ * thread and outlives the rest.
+ * It is here because amd_iommu_bind_pasid wants a task_struct.
+ */
+ struct task_struct *lead_thread;
+
+ /* We want to receive a notification when the mm_struct is destroyed */
+ struct mmu_notifier mmu_notifier;
+
+ /* Use for delayed freeing of kfd_process structure */
+ struct rcu_head rcu;
+
+ unsigned int pasid;
+
+ /*
+ * List of kfd_process_device structures,
+ * one for each device the process is using.
+ */
+ struct list_head per_device_data;
+
+ struct process_queue_manager pqm;
+
+ /* The process's queues. */
+ size_t queue_array_size;
+
+ /* Size is queue_array_size, up to MAX_PROCESS_QUEUES. */
+ struct kfd_queue **queues;
+
+ unsigned long allocated_queue_bitmap[DIV_ROUND_UP(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS, BITS_PER_LONG)];
+
+ /*Is the user space process 32 bit?*/
+ bool is_32bit_user_mode;
+};
+
+void kfd_process_create_wq(void);
+void kfd_process_destroy_wq(void);
+struct kfd_process *kfd_create_process(const struct task_struct *);
+struct kfd_process *kfd_get_process(const struct task_struct *);
+
+struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
+ struct kfd_process *p);
+void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid);
+struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
+ struct kfd_process *p,
+ int create_pdd);
+
+/* Process device data iterator */
+struct kfd_process_device *kfd_get_first_process_device_data(struct kfd_process *p);
+struct kfd_process_device *kfd_get_next_process_device_data(struct kfd_process *p,
+ struct kfd_process_device *pdd);
+bool kfd_has_process_device_data(struct kfd_process *p);
+
+/* PASIDs */
+int kfd_pasid_init(void);
+void kfd_pasid_exit(void);
+bool kfd_set_pasid_limit(unsigned int new_limit);
+unsigned int kfd_get_pasid_limit(void);
+unsigned int kfd_pasid_alloc(void);
+void kfd_pasid_free(unsigned int pasid);
+
+/* Doorbells */
+void kfd_doorbell_init(struct kfd_dev *kfd);
+int kfd_doorbell_mmap(struct kfd_process *process, struct vm_area_struct *vma);
+u32 __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
+ unsigned int *doorbell_off);
+void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr);
+u32 read_kernel_doorbell(u32 __iomem *db);
+void write_kernel_doorbell(u32 __iomem *db, u32 value);
+unsigned int kfd_queue_id_to_doorbell(struct kfd_dev *kfd,
+ struct kfd_process *process,
+ unsigned int queue_id);
+
+extern struct device *kfd_device;
+
+/* Topology */
+int kfd_topology_init(void);
+void kfd_topology_shutdown(void);
+int kfd_topology_add_device(struct kfd_dev *gpu);
+int kfd_topology_remove_device(struct kfd_dev *gpu);
+struct kfd_dev *kfd_device_by_id(uint32_t gpu_id);
+struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev);
+struct kfd_dev *kfd_topology_enum_kfd_devices(uint8_t idx);
+
+/* Interrupts */
+int kfd_interrupt_init(struct kfd_dev *dev);
+void kfd_interrupt_exit(struct kfd_dev *dev);
+void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry);
+bool enqueue_ih_ring_entry(struct kfd_dev *kfd, const void *ih_ring_entry);
+
+/* Power Management */
+void kgd2kfd_suspend(struct kfd_dev *kfd);
+int kgd2kfd_resume(struct kfd_dev *kfd);
+
+/* amdkfd Apertures */
+int kfd_init_apertures(struct kfd_process *process);
+
+/* Queue Context Management */
+inline uint32_t lower_32(uint64_t x);
+inline uint32_t upper_32(uint64_t x);
+
+int init_queue(struct queue **q, struct queue_properties properties);
+void uninit_queue(struct queue *q);
+void print_queue_properties(struct queue_properties *q);
+void print_queue(struct queue *q);
+
+struct mqd_manager *mqd_manager_init(enum KFD_MQD_TYPE type,
+ struct kfd_dev *dev);
+struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev);
+void device_queue_manager_uninit(struct device_queue_manager *dqm);
+struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
+ enum kfd_queue_type type);
+void kernel_queue_uninit(struct kernel_queue *kq);
+
+/* Process Queue Manager */
+struct process_queue_node {
+ struct queue *q;
+ struct kernel_queue *kq;
+ struct list_head process_queue_list;
+};
+
+int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p);
+void pqm_uninit(struct process_queue_manager *pqm);
+int pqm_create_queue(struct process_queue_manager *pqm,
+ struct kfd_dev *dev,
+ struct file *f,
+ struct queue_properties *properties,
+ unsigned int flags,
+ enum kfd_queue_type type,
+ unsigned int *qid);
+int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid);
+int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid,
+ struct queue_properties *p);
+
+/* Packet Manager */
+
+#define KFD_HIQ_TIMEOUT (500)
+
+#define KFD_FENCE_COMPLETED (100)
+#define KFD_FENCE_INIT (10)
+#define KFD_UNMAP_LATENCY (150)
+
+struct packet_manager {
+ struct device_queue_manager *dqm;
+ struct kernel_queue *priv_queue;
+ struct mutex lock;
+ bool allocated;
+ struct kfd_mem_obj *ib_buffer_obj;
+};
+
+int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm);
+void pm_uninit(struct packet_manager *pm);
+int pm_send_set_resources(struct packet_manager *pm,
+ struct scheduling_resources *res);
+int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues);
+int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
+ uint32_t fence_value);
+
+int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
+ enum kfd_preempt_type_filter mode,
+ uint32_t filter_param, bool reset,
+ unsigned int sdma_engine);
+
+void pm_release_ib(struct packet_manager *pm);
+
+uint64_t kfd_get_number_elems(struct kfd_dev *kfd);
+phys_addr_t kfd_get_process_doorbells(struct kfd_dev *dev,
+ struct kfd_process *process);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
new file mode 100644
index 00000000000..b85eb0b830b
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -0,0 +1,410 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/mutex.h>
+#include <linux/log2.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/amd-iommu.h>
+#include <linux/notifier.h>
+struct mm_struct;
+
+#include "kfd_priv.h"
+
+/*
+ * Initial size for the array of queues.
+ * The allocated size is doubled each time
+ * it is exceeded up to MAX_PROCESS_QUEUES.
+ */
+#define INITIAL_QUEUE_ARRAY_SIZE 16
+
+/*
+ * List of struct kfd_process (field kfd_process).
+ * Unique/indexed by mm_struct*
+ */
+#define KFD_PROCESS_TABLE_SIZE 5 /* bits: 32 entries */
+static DEFINE_HASHTABLE(kfd_processes_table, KFD_PROCESS_TABLE_SIZE);
+static DEFINE_MUTEX(kfd_processes_mutex);
+
+DEFINE_STATIC_SRCU(kfd_processes_srcu);
+
+static struct workqueue_struct *kfd_process_wq;
+
+struct kfd_process_release_work {
+ struct work_struct kfd_work;
+ struct kfd_process *p;
+};
+
+static struct kfd_process *find_process(const struct task_struct *thread);
+static struct kfd_process *create_process(const struct task_struct *thread);
+
+void kfd_process_create_wq(void)
+{
+ if (!kfd_process_wq)
+ kfd_process_wq = create_workqueue("kfd_process_wq");
+}
+
+void kfd_process_destroy_wq(void)
+{
+ if (kfd_process_wq) {
+ flush_workqueue(kfd_process_wq);
+ destroy_workqueue(kfd_process_wq);
+ kfd_process_wq = NULL;
+ }
+}
+
+struct kfd_process *kfd_create_process(const struct task_struct *thread)
+{
+ struct kfd_process *process;
+
+ BUG_ON(!kfd_process_wq);
+
+ if (thread->mm == NULL)
+ return ERR_PTR(-EINVAL);
+
+ /* Only the pthreads threading model is supported. */
+ if (thread->group_leader->mm != thread->mm)
+ return ERR_PTR(-EINVAL);
+
+ /* Take mmap_sem because we call __mmu_notifier_register inside */
+ down_write(&thread->mm->mmap_sem);
+
+ /*
+ * take kfd processes mutex before starting of process creation
+ * so there won't be a case where two threads of the same process
+ * create two kfd_process structures
+ */
+ mutex_lock(&kfd_processes_mutex);
+
+ /* A prior open of /dev/kfd could have already created the process. */
+ process = find_process(thread);
+ if (process)
+ pr_debug("kfd: process already found\n");
+
+ if (!process)
+ process = create_process(thread);
+
+ mutex_unlock(&kfd_processes_mutex);
+
+ up_write(&thread->mm->mmap_sem);
+
+ return process;
+}
+
+struct kfd_process *kfd_get_process(const struct task_struct *thread)
+{
+ struct kfd_process *process;
+
+ if (thread->mm == NULL)
+ return ERR_PTR(-EINVAL);
+
+ /* Only the pthreads threading model is supported. */
+ if (thread->group_leader->mm != thread->mm)
+ return ERR_PTR(-EINVAL);
+
+ process = find_process(thread);
+
+ return process;
+}
+
+static struct kfd_process *find_process_by_mm(const struct mm_struct *mm)
+{
+ struct kfd_process *process;
+
+ hash_for_each_possible_rcu(kfd_processes_table, process,
+ kfd_processes, (uintptr_t)mm)
+ if (process->mm == mm)
+ return process;
+
+ return NULL;
+}
+
+static struct kfd_process *find_process(const struct task_struct *thread)
+{
+ struct kfd_process *p;
+ int idx;
+
+ idx = srcu_read_lock(&kfd_processes_srcu);
+ p = find_process_by_mm(thread->mm);
+ srcu_read_unlock(&kfd_processes_srcu, idx);
+
+ return p;
+}
+
+static void kfd_process_wq_release(struct work_struct *work)
+{
+ struct kfd_process_release_work *my_work;
+ struct kfd_process_device *pdd, *temp;
+ struct kfd_process *p;
+
+ my_work = (struct kfd_process_release_work *) work;
+
+ p = my_work->p;
+
+ mutex_lock(&p->mutex);
+
+ list_for_each_entry_safe(pdd, temp, &p->per_device_data,
+ per_device_list) {
+ amd_iommu_unbind_pasid(pdd->dev->pdev, p->pasid);
+ list_del(&pdd->per_device_list);
+
+ kfree(pdd);
+ }
+
+ kfd_pasid_free(p->pasid);
+
+ mutex_unlock(&p->mutex);
+
+ mutex_destroy(&p->mutex);
+
+ kfree(p->queues);
+
+ kfree(p);
+
+ kfree((void *)work);
+}
+
+static void kfd_process_destroy_delayed(struct rcu_head *rcu)
+{
+ struct kfd_process_release_work *work;
+ struct kfd_process *p;
+
+ BUG_ON(!kfd_process_wq);
+
+ p = container_of(rcu, struct kfd_process, rcu);
+ BUG_ON(atomic_read(&p->mm->mm_count) <= 0);
+
+ mmdrop(p->mm);
+
+ work = (struct kfd_process_release_work *)
+ kmalloc(sizeof(struct kfd_process_release_work), GFP_ATOMIC);
+
+ if (work) {
+ INIT_WORK((struct work_struct *) work, kfd_process_wq_release);
+ work->p = p;
+ queue_work(kfd_process_wq, (struct work_struct *) work);
+ }
+}
+
+static void kfd_process_notifier_release(struct mmu_notifier *mn,
+ struct mm_struct *mm)
+{
+ struct kfd_process *p;
+
+ /*
+ * The kfd_process structure can not be free because the
+ * mmu_notifier srcu is read locked
+ */
+ p = container_of(mn, struct kfd_process, mmu_notifier);
+ BUG_ON(p->mm != mm);
+
+ mutex_lock(&kfd_processes_mutex);
+ hash_del_rcu(&p->kfd_processes);
+ mutex_unlock(&kfd_processes_mutex);
+ synchronize_srcu(&kfd_processes_srcu);
+
+ mutex_lock(&p->mutex);
+
+ /* In case our notifier is called before IOMMU notifier */
+ pqm_uninit(&p->pqm);
+
+ mutex_unlock(&p->mutex);
+
+ /*
+ * Because we drop mm_count inside kfd_process_destroy_delayed
+ * and because the mmu_notifier_unregister function also drop
+ * mm_count we need to take an extra count here.
+ */
+ atomic_inc(&p->mm->mm_count);
+ mmu_notifier_unregister_no_release(&p->mmu_notifier, p->mm);
+ mmu_notifier_call_srcu(&p->rcu, &kfd_process_destroy_delayed);
+}
+
+static const struct mmu_notifier_ops kfd_process_mmu_notifier_ops = {
+ .release = kfd_process_notifier_release,
+};
+
+static struct kfd_process *create_process(const struct task_struct *thread)
+{
+ struct kfd_process *process;
+ int err = -ENOMEM;
+
+ process = kzalloc(sizeof(*process), GFP_KERNEL);
+
+ if (!process)
+ goto err_alloc_process;
+
+ process->queues = kmalloc_array(INITIAL_QUEUE_ARRAY_SIZE,
+ sizeof(process->queues[0]), GFP_KERNEL);
+ if (!process->queues)
+ goto err_alloc_queues;
+
+ process->pasid = kfd_pasid_alloc();
+ if (process->pasid == 0)
+ goto err_alloc_pasid;
+
+ mutex_init(&process->mutex);
+
+ process->mm = thread->mm;
+
+ /* register notifier */
+ process->mmu_notifier.ops = &kfd_process_mmu_notifier_ops;
+ err = __mmu_notifier_register(&process->mmu_notifier, process->mm);
+ if (err)
+ goto err_mmu_notifier;
+
+ hash_add_rcu(kfd_processes_table, &process->kfd_processes,
+ (uintptr_t)process->mm);
+
+ process->lead_thread = thread->group_leader;
+
+ process->queue_array_size = INITIAL_QUEUE_ARRAY_SIZE;
+
+ INIT_LIST_HEAD(&process->per_device_data);
+
+ err = pqm_init(&process->pqm, process);
+ if (err != 0)
+ goto err_process_pqm_init;
+
+ return process;
+
+err_process_pqm_init:
+ hash_del_rcu(&process->kfd_processes);
+ synchronize_rcu();
+ mmu_notifier_unregister_no_release(&process->mmu_notifier, process->mm);
+err_mmu_notifier:
+ kfd_pasid_free(process->pasid);
+err_alloc_pasid:
+ kfree(process->queues);
+err_alloc_queues:
+ kfree(process);
+err_alloc_process:
+ return ERR_PTR(err);
+}
+
+struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
+ struct kfd_process *p,
+ int create_pdd)
+{
+ struct kfd_process_device *pdd = NULL;
+
+ list_for_each_entry(pdd, &p->per_device_data, per_device_list)
+ if (pdd->dev == dev)
+ return pdd;
+
+ if (create_pdd) {
+ pdd = kzalloc(sizeof(*pdd), GFP_KERNEL);
+ if (pdd != NULL) {
+ pdd->dev = dev;
+ INIT_LIST_HEAD(&pdd->qpd.queues_list);
+ INIT_LIST_HEAD(&pdd->qpd.priv_queue_list);
+ pdd->qpd.dqm = dev->dqm;
+ list_add(&pdd->per_device_list, &p->per_device_data);
+ }
+ }
+
+ return pdd;
+}
+
+/*
+ * Direct the IOMMU to bind the process (specifically the pasid->mm)
+ * to the device.
+ * Unbinding occurs when the process dies or the device is removed.
+ *
+ * Assumes that the process lock is held.
+ */
+struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
+ struct kfd_process *p)
+{
+ struct kfd_process_device *pdd = kfd_get_process_device_data(dev, p, 1);
+ int err;
+
+ if (pdd == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ if (pdd->bound)
+ return pdd;
+
+ err = amd_iommu_bind_pasid(dev->pdev, p->pasid, p->lead_thread);
+ if (err < 0)
+ return ERR_PTR(err);
+
+ pdd->bound = true;
+
+ return pdd;
+}
+
+void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid)
+{
+ struct kfd_process *p;
+ struct kfd_process_device *pdd;
+ int idx, i;
+
+ BUG_ON(dev == NULL);
+
+ idx = srcu_read_lock(&kfd_processes_srcu);
+
+ hash_for_each_rcu(kfd_processes_table, i, p, kfd_processes)
+ if (p->pasid == pasid)
+ break;
+
+ srcu_read_unlock(&kfd_processes_srcu, idx);
+
+ BUG_ON(p->pasid != pasid);
+
+ mutex_lock(&p->mutex);
+
+ pqm_uninit(&p->pqm);
+
+ pdd = kfd_get_process_device_data(dev, p, 0);
+
+ /*
+ * Just mark pdd as unbound, because we still need it to call
+ * amd_iommu_unbind_pasid() in when the process exits.
+ * We don't call amd_iommu_unbind_pasid() here
+ * because the IOMMU called us.
+ */
+ if (pdd)
+ pdd->bound = false;
+
+ mutex_unlock(&p->mutex);
+}
+
+struct kfd_process_device *kfd_get_first_process_device_data(struct kfd_process *p)
+{
+ return list_first_entry(&p->per_device_data,
+ struct kfd_process_device,
+ per_device_list);
+}
+
+struct kfd_process_device *kfd_get_next_process_device_data(struct kfd_process *p,
+ struct kfd_process_device *pdd)
+{
+ if (list_is_last(&pdd->per_device_list, &p->per_device_data))
+ return NULL;
+ return list_next_entry(pdd, per_device_list);
+}
+
+bool kfd_has_process_device_data(struct kfd_process *p)
+{
+ return !(list_empty(&p->per_device_data));
+}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
new file mode 100644
index 00000000000..47526780d73
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -0,0 +1,343 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/list.h>
+#include "kfd_device_queue_manager.h"
+#include "kfd_priv.h"
+#include "kfd_kernel_queue.h"
+
+static inline struct process_queue_node *get_queue_by_qid(
+ struct process_queue_manager *pqm, unsigned int qid)
+{
+ struct process_queue_node *pqn;
+
+ BUG_ON(!pqm);
+
+ list_for_each_entry(pqn, &pqm->queues, process_queue_list) {
+ if (pqn->q && pqn->q->properties.queue_id == qid)
+ return pqn;
+ if (pqn->kq && pqn->kq->queue->properties.queue_id == qid)
+ return pqn;
+ }
+
+ return NULL;
+}
+
+static int find_available_queue_slot(struct process_queue_manager *pqm,
+ unsigned int *qid)
+{
+ unsigned long found;
+
+ BUG_ON(!pqm || !qid);
+
+ pr_debug("kfd: in %s\n", __func__);
+
+ found = find_first_zero_bit(pqm->queue_slot_bitmap,
+ max_num_of_queues_per_process);
+
+ pr_debug("kfd: the new slot id %lu\n", found);
+
+ if (found >= max_num_of_queues_per_process) {
+ pr_info("amdkfd: Can not open more queues for process with pasid %d\n",
+ pqm->process->pasid);
+ return -ENOMEM;
+ }
+
+ set_bit(found, pqm->queue_slot_bitmap);
+ *qid = found;
+
+ return 0;
+}
+
+int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p)
+{
+ BUG_ON(!pqm);
+
+ INIT_LIST_HEAD(&pqm->queues);
+ pqm->queue_slot_bitmap =
+ kzalloc(DIV_ROUND_UP(max_num_of_queues_per_process,
+ BITS_PER_BYTE), GFP_KERNEL);
+ if (pqm->queue_slot_bitmap == NULL)
+ return -ENOMEM;
+ pqm->process = p;
+
+ return 0;
+}
+
+void pqm_uninit(struct process_queue_manager *pqm)
+{
+ int retval;
+ struct process_queue_node *pqn, *next;
+
+ BUG_ON(!pqm);
+
+ pr_debug("In func %s\n", __func__);
+
+ list_for_each_entry_safe(pqn, next, &pqm->queues, process_queue_list) {
+ retval = pqm_destroy_queue(
+ pqm,
+ (pqn->q != NULL) ?
+ pqn->q->properties.queue_id :
+ pqn->kq->queue->properties.queue_id);
+
+ if (retval != 0) {
+ pr_err("kfd: failed to destroy queue\n");
+ return;
+ }
+ }
+ kfree(pqm->queue_slot_bitmap);
+ pqm->queue_slot_bitmap = NULL;
+}
+
+static int create_cp_queue(struct process_queue_manager *pqm,
+ struct kfd_dev *dev, struct queue **q,
+ struct queue_properties *q_properties,
+ struct file *f, unsigned int qid)
+{
+ int retval;
+
+ retval = 0;
+
+ /* Doorbell initialized in user space*/
+ q_properties->doorbell_ptr = NULL;
+
+ q_properties->doorbell_off =
+ kfd_queue_id_to_doorbell(dev, pqm->process, qid);
+
+ /* let DQM handle it*/
+ q_properties->vmid = 0;
+ q_properties->queue_id = qid;
+ q_properties->type = KFD_QUEUE_TYPE_COMPUTE;
+
+ retval = init_queue(q, *q_properties);
+ if (retval != 0)
+ goto err_init_queue;
+
+ (*q)->device = dev;
+ (*q)->process = pqm->process;
+
+ pr_debug("kfd: PQM After init queue");
+
+ return retval;
+
+err_init_queue:
+ return retval;
+}
+
+int pqm_create_queue(struct process_queue_manager *pqm,
+ struct kfd_dev *dev,
+ struct file *f,
+ struct queue_properties *properties,
+ unsigned int flags,
+ enum kfd_queue_type type,
+ unsigned int *qid)
+{
+ int retval;
+ struct kfd_process_device *pdd;
+ struct queue_properties q_properties;
+ struct queue *q;
+ struct process_queue_node *pqn;
+ struct kernel_queue *kq;
+
+ BUG_ON(!pqm || !dev || !properties || !qid);
+
+ memset(&q_properties, 0, sizeof(struct queue_properties));
+ memcpy(&q_properties, properties, sizeof(struct queue_properties));
+ q = NULL;
+ kq = NULL;
+
+ pdd = kfd_get_process_device_data(dev, pqm->process, 1);
+ BUG_ON(!pdd);
+
+ retval = find_available_queue_slot(pqm, qid);
+ if (retval != 0)
+ return retval;
+
+ if (list_empty(&pqm->queues)) {
+ pdd->qpd.pqm = pqm;
+ dev->dqm->register_process(dev->dqm, &pdd->qpd);
+ }
+
+ pqn = kzalloc(sizeof(struct process_queue_node), GFP_KERNEL);
+ if (!pqn) {
+ retval = -ENOMEM;
+ goto err_allocate_pqn;
+ }
+
+ switch (type) {
+ case KFD_QUEUE_TYPE_COMPUTE:
+ /* check if there is over subscription */
+ if ((sched_policy == KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) &&
+ ((dev->dqm->processes_count >= VMID_PER_DEVICE) ||
+ (dev->dqm->queue_count >= PIPE_PER_ME_CP_SCHEDULING * QUEUES_PER_PIPE))) {
+ pr_err("kfd: over-subscription is not allowed in radeon_kfd.sched_policy == 1\n");
+ retval = -EPERM;
+ goto err_create_queue;
+ }
+
+ retval = create_cp_queue(pqm, dev, &q, &q_properties, f, *qid);
+ if (retval != 0)
+ goto err_create_queue;
+ pqn->q = q;
+ pqn->kq = NULL;
+ retval = dev->dqm->create_queue(dev->dqm, q, &pdd->qpd,
+ &q->properties.vmid);
+ print_queue(q);
+ break;
+ case KFD_QUEUE_TYPE_DIQ:
+ kq = kernel_queue_init(dev, KFD_QUEUE_TYPE_DIQ);
+ if (kq == NULL) {
+ retval = -ENOMEM;
+ goto err_create_queue;
+ }
+ kq->queue->properties.queue_id = *qid;
+ pqn->kq = kq;
+ pqn->q = NULL;
+ retval = dev->dqm->create_kernel_queue(dev->dqm, kq, &pdd->qpd);
+ break;
+ default:
+ BUG();
+ break;
+ }
+
+ if (retval != 0) {
+ pr_err("kfd: error dqm create queue\n");
+ goto err_create_queue;
+ }
+
+ pr_debug("kfd: PQM After DQM create queue\n");
+
+ list_add(&pqn->process_queue_list, &pqm->queues);
+
+ if (q) {
+ *properties = q->properties;
+ pr_debug("kfd: PQM done creating queue\n");
+ print_queue_properties(properties);
+ }
+
+ return retval;
+
+err_create_queue:
+ kfree(pqn);
+err_allocate_pqn:
+ clear_bit(*qid, pqm->queue_slot_bitmap);
+ return retval;
+}
+
+int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
+{
+ struct process_queue_node *pqn;
+ struct kfd_process_device *pdd;
+ struct device_queue_manager *dqm;
+ struct kfd_dev *dev;
+ int retval;
+
+ dqm = NULL;
+
+ BUG_ON(!pqm);
+ retval = 0;
+
+ pr_debug("kfd: In Func %s\n", __func__);
+
+ pqn = get_queue_by_qid(pqm, qid);
+ if (pqn == NULL) {
+ pr_err("kfd: queue id does not match any known queue\n");
+ return -EINVAL;
+ }
+
+ dev = NULL;
+ if (pqn->kq)
+ dev = pqn->kq->dev;
+ if (pqn->q)
+ dev = pqn->q->device;
+ BUG_ON(!dev);
+
+ pdd = kfd_get_process_device_data(dev, pqm->process, 1);
+ BUG_ON(!pdd);
+
+ if (pqn->kq) {
+ /* destroy kernel queue (DIQ) */
+ dqm = pqn->kq->dev->dqm;
+ dqm->destroy_kernel_queue(dqm, pqn->kq, &pdd->qpd);
+ kernel_queue_uninit(pqn->kq);
+ }
+
+ if (pqn->q) {
+ dqm = pqn->q->device->dqm;
+ retval = dqm->destroy_queue(dqm, &pdd->qpd, pqn->q);
+ if (retval != 0)
+ return retval;
+
+ uninit_queue(pqn->q);
+ }
+
+ list_del(&pqn->process_queue_list);
+ kfree(pqn);
+ clear_bit(qid, pqm->queue_slot_bitmap);
+
+ if (list_empty(&pqm->queues))
+ dqm->unregister_process(dqm, &pdd->qpd);
+
+ return retval;
+}
+
+int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid,
+ struct queue_properties *p)
+{
+ int retval;
+ struct process_queue_node *pqn;
+
+ BUG_ON(!pqm);
+
+ pqn = get_queue_by_qid(pqm, qid);
+ BUG_ON(!pqn);
+
+ pqn->q->properties.queue_address = p->queue_address;
+ pqn->q->properties.queue_size = p->queue_size;
+ pqn->q->properties.queue_percent = p->queue_percent;
+ pqn->q->properties.priority = p->priority;
+
+ retval = pqn->q->device->dqm->update_queue(pqn->q->device->dqm, pqn->q);
+ if (retval != 0)
+ return retval;
+
+ return 0;
+}
+
+static __attribute__((unused)) struct kernel_queue *pqm_get_kernel_queue(
+ struct process_queue_manager *pqm,
+ unsigned int qid)
+{
+ struct process_queue_node *pqn;
+
+ BUG_ON(!pqm);
+
+ pqn = get_queue_by_qid(pqm, qid);
+ if (pqn && pqn->kq)
+ return pqn->kq;
+
+ return NULL;
+}
+
+
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
new file mode 100644
index 00000000000..9a0c90b0702
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
@@ -0,0 +1,85 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/slab.h>
+#include "kfd_priv.h"
+
+void print_queue_properties(struct queue_properties *q)
+{
+ if (!q)
+ return;
+
+ pr_debug("Printing queue properties:\n");
+ pr_debug("Queue Type: %u\n", q->type);
+ pr_debug("Queue Size: %llu\n", q->queue_size);
+ pr_debug("Queue percent: %u\n", q->queue_percent);
+ pr_debug("Queue Address: 0x%llX\n", q->queue_address);
+ pr_debug("Queue Id: %u\n", q->queue_id);
+ pr_debug("Queue Process Vmid: %u\n", q->vmid);
+ pr_debug("Queue Read Pointer: 0x%p\n", q->read_ptr);
+ pr_debug("Queue Write Pointer: 0x%p\n", q->write_ptr);
+ pr_debug("Queue Doorbell Pointer: 0x%p\n", q->doorbell_ptr);
+ pr_debug("Queue Doorbell Offset: %u\n", q->doorbell_off);
+}
+
+void print_queue(struct queue *q)
+{
+ if (!q)
+ return;
+ pr_debug("Printing queue:\n");
+ pr_debug("Queue Type: %u\n", q->properties.type);
+ pr_debug("Queue Size: %llu\n", q->properties.queue_size);
+ pr_debug("Queue percent: %u\n", q->properties.queue_percent);
+ pr_debug("Queue Address: 0x%llX\n", q->properties.queue_address);
+ pr_debug("Queue Id: %u\n", q->properties.queue_id);
+ pr_debug("Queue Process Vmid: %u\n", q->properties.vmid);
+ pr_debug("Queue Read Pointer: 0x%p\n", q->properties.read_ptr);
+ pr_debug("Queue Write Pointer: 0x%p\n", q->properties.write_ptr);
+ pr_debug("Queue Doorbell Pointer: 0x%p\n", q->properties.doorbell_ptr);
+ pr_debug("Queue Doorbell Offset: %u\n", q->properties.doorbell_off);
+ pr_debug("Queue MQD Address: 0x%p\n", q->mqd);
+ pr_debug("Queue MQD Gart: 0x%llX\n", q->gart_mqd_addr);
+ pr_debug("Queue Process Address: 0x%p\n", q->process);
+ pr_debug("Queue Device Address: 0x%p\n", q->device);
+}
+
+int init_queue(struct queue **q, struct queue_properties properties)
+{
+ struct queue *tmp;
+
+ BUG_ON(!q);
+
+ tmp = kzalloc(sizeof(struct queue), GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ memcpy(&tmp->properties, &properties, sizeof(struct queue_properties));
+
+ *q = tmp;
+ return 0;
+}
+
+void uninit_queue(struct queue *q)
+{
+ kfree(q);
+}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
new file mode 100644
index 00000000000..5733e2859e8
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -0,0 +1,1235 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/errno.h>
+#include <linux/acpi.h>
+#include <linux/hash.h>
+#include <linux/cpufreq.h>
+
+#include "kfd_priv.h"
+#include "kfd_crat.h"
+#include "kfd_topology.h"
+
+static struct list_head topology_device_list;
+static int topology_crat_parsed;
+static struct kfd_system_properties sys_props;
+
+static DECLARE_RWSEM(topology_lock);
+
+struct kfd_dev *kfd_device_by_id(uint32_t gpu_id)
+{
+ struct kfd_topology_device *top_dev;
+ struct kfd_dev *device = NULL;
+
+ down_read(&topology_lock);
+
+ list_for_each_entry(top_dev, &topology_device_list, list)
+ if (top_dev->gpu_id == gpu_id) {
+ device = top_dev->gpu;
+ break;
+ }
+
+ up_read(&topology_lock);
+
+ return device;
+}
+
+struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev)
+{
+ struct kfd_topology_device *top_dev;
+ struct kfd_dev *device = NULL;
+
+ down_read(&topology_lock);
+
+ list_for_each_entry(top_dev, &topology_device_list, list)
+ if (top_dev->gpu->pdev == pdev) {
+ device = top_dev->gpu;
+ break;
+ }
+
+ up_read(&topology_lock);
+
+ return device;
+}
+
+static int kfd_topology_get_crat_acpi(void *crat_image, size_t *size)
+{
+ struct acpi_table_header *crat_table;
+ acpi_status status;
+
+ if (!size)
+ return -EINVAL;
+
+ /*
+ * Fetch the CRAT table from ACPI
+ */
+ status = acpi_get_table(CRAT_SIGNATURE, 0, &crat_table);
+ if (status == AE_NOT_FOUND) {
+ pr_warn("CRAT table not found\n");
+ return -ENODATA;
+ } else if (ACPI_FAILURE(status)) {
+ const char *err = acpi_format_exception(status);
+
+ pr_err("CRAT table error: %s\n", err);
+ return -EINVAL;
+ }
+
+ if (*size >= crat_table->length && crat_image != NULL)
+ memcpy(crat_image, crat_table, crat_table->length);
+
+ *size = crat_table->length;
+
+ return 0;
+}
+
+static void kfd_populated_cu_info_cpu(struct kfd_topology_device *dev,
+ struct crat_subtype_computeunit *cu)
+{
+ BUG_ON(!dev);
+ BUG_ON(!cu);
+
+ dev->node_props.cpu_cores_count = cu->num_cpu_cores;
+ dev->node_props.cpu_core_id_base = cu->processor_id_low;
+ if (cu->hsa_capability & CRAT_CU_FLAGS_IOMMU_PRESENT)
+ dev->node_props.capability |= HSA_CAP_ATS_PRESENT;
+
+ pr_info("CU CPU: cores=%d id_base=%d\n", cu->num_cpu_cores,
+ cu->processor_id_low);
+}
+
+static void kfd_populated_cu_info_gpu(struct kfd_topology_device *dev,
+ struct crat_subtype_computeunit *cu)
+{
+ BUG_ON(!dev);
+ BUG_ON(!cu);
+
+ dev->node_props.simd_id_base = cu->processor_id_low;
+ dev->node_props.simd_count = cu->num_simd_cores;
+ dev->node_props.lds_size_in_kb = cu->lds_size_in_kb;
+ dev->node_props.max_waves_per_simd = cu->max_waves_simd;
+ dev->node_props.wave_front_size = cu->wave_front_size;
+ dev->node_props.mem_banks_count = cu->num_banks;
+ dev->node_props.array_count = cu->num_arrays;
+ dev->node_props.cu_per_simd_array = cu->num_cu_per_array;
+ dev->node_props.simd_per_cu = cu->num_simd_per_cu;
+ dev->node_props.max_slots_scratch_cu = cu->max_slots_scatch_cu;
+ if (cu->hsa_capability & CRAT_CU_FLAGS_HOT_PLUGGABLE)
+ dev->node_props.capability |= HSA_CAP_HOT_PLUGGABLE;
+ pr_info("CU GPU: simds=%d id_base=%d\n", cu->num_simd_cores,
+ cu->processor_id_low);
+}
+
+/* kfd_parse_subtype_cu is called when the topology mutex is already acquired */
+static int kfd_parse_subtype_cu(struct crat_subtype_computeunit *cu)
+{
+ struct kfd_topology_device *dev;
+ int i = 0;
+
+ BUG_ON(!cu);
+
+ pr_info("Found CU entry in CRAT table with proximity_domain=%d caps=%x\n",
+ cu->proximity_domain, cu->hsa_capability);
+ list_for_each_entry(dev, &topology_device_list, list) {
+ if (cu->proximity_domain == i) {
+ if (cu->flags & CRAT_CU_FLAGS_CPU_PRESENT)
+ kfd_populated_cu_info_cpu(dev, cu);
+
+ if (cu->flags & CRAT_CU_FLAGS_GPU_PRESENT)
+ kfd_populated_cu_info_gpu(dev, cu);
+ break;
+ }
+ i++;
+ }
+
+ return 0;
+}
+
+/*
+ * kfd_parse_subtype_mem is called when the topology mutex is
+ * already acquired
+ */
+static int kfd_parse_subtype_mem(struct crat_subtype_memory *mem)
+{
+ struct kfd_mem_properties *props;
+ struct kfd_topology_device *dev;
+ int i = 0;
+
+ BUG_ON(!mem);
+
+ pr_info("Found memory entry in CRAT table with proximity_domain=%d\n",
+ mem->promixity_domain);
+ list_for_each_entry(dev, &topology_device_list, list) {
+ if (mem->promixity_domain == i) {
+ props = kfd_alloc_struct(props);
+ if (props == NULL)
+ return -ENOMEM;
+
+ if (dev->node_props.cpu_cores_count == 0)
+ props->heap_type = HSA_MEM_HEAP_TYPE_FB_PRIVATE;
+ else
+ props->heap_type = HSA_MEM_HEAP_TYPE_SYSTEM;
+
+ if (mem->flags & CRAT_MEM_FLAGS_HOT_PLUGGABLE)
+ props->flags |= HSA_MEM_FLAGS_HOT_PLUGGABLE;
+ if (mem->flags & CRAT_MEM_FLAGS_NON_VOLATILE)
+ props->flags |= HSA_MEM_FLAGS_NON_VOLATILE;
+
+ props->size_in_bytes =
+ ((uint64_t)mem->length_high << 32) +
+ mem->length_low;
+ props->width = mem->width;
+
+ dev->mem_bank_count++;
+ list_add_tail(&props->list, &dev->mem_props);
+
+ break;
+ }
+ i++;
+ }
+
+ return 0;
+}
+
+/*
+ * kfd_parse_subtype_cache is called when the topology mutex
+ * is already acquired
+ */
+static int kfd_parse_subtype_cache(struct crat_subtype_cache *cache)
+{
+ struct kfd_cache_properties *props;
+ struct kfd_topology_device *dev;
+ uint32_t id;
+
+ BUG_ON(!cache);
+
+ id = cache->processor_id_low;
+
+ pr_info("Found cache entry in CRAT table with processor_id=%d\n", id);
+ list_for_each_entry(dev, &topology_device_list, list)
+ if (id == dev->node_props.cpu_core_id_base ||
+ id == dev->node_props.simd_id_base) {
+ props = kfd_alloc_struct(props);
+ if (props == NULL)
+ return -ENOMEM;
+
+ props->processor_id_low = id;
+ props->cache_level = cache->cache_level;
+ props->cache_size = cache->cache_size;
+ props->cacheline_size = cache->cache_line_size;
+ props->cachelines_per_tag = cache->lines_per_tag;
+ props->cache_assoc = cache->associativity;
+ props->cache_latency = cache->cache_latency;
+
+ if (cache->flags & CRAT_CACHE_FLAGS_DATA_CACHE)
+ props->cache_type |= HSA_CACHE_TYPE_DATA;
+ if (cache->flags & CRAT_CACHE_FLAGS_INST_CACHE)
+ props->cache_type |= HSA_CACHE_TYPE_INSTRUCTION;
+ if (cache->flags & CRAT_CACHE_FLAGS_CPU_CACHE)
+ props->cache_type |= HSA_CACHE_TYPE_CPU;
+ if (cache->flags & CRAT_CACHE_FLAGS_SIMD_CACHE)
+ props->cache_type |= HSA_CACHE_TYPE_HSACU;
+
+ dev->cache_count++;
+ dev->node_props.caches_count++;
+ list_add_tail(&props->list, &dev->cache_props);
+
+ break;
+ }
+
+ return 0;
+}
+
+/*
+ * kfd_parse_subtype_iolink is called when the topology mutex
+ * is already acquired
+ */
+static int kfd_parse_subtype_iolink(struct crat_subtype_iolink *iolink)
+{
+ struct kfd_iolink_properties *props;
+ struct kfd_topology_device *dev;
+ uint32_t i = 0;
+ uint32_t id_from;
+ uint32_t id_to;
+
+ BUG_ON(!iolink);
+
+ id_from = iolink->proximity_domain_from;
+ id_to = iolink->proximity_domain_to;
+
+ pr_info("Found IO link entry in CRAT table with id_from=%d\n", id_from);
+ list_for_each_entry(dev, &topology_device_list, list) {
+ if (id_from == i) {
+ props = kfd_alloc_struct(props);
+ if (props == NULL)
+ return -ENOMEM;
+
+ props->node_from = id_from;
+ props->node_to = id_to;
+ props->ver_maj = iolink->version_major;
+ props->ver_min = iolink->version_minor;
+
+ /*
+ * weight factor (derived from CDIR), currently always 1
+ */
+ props->weight = 1;
+
+ props->min_latency = iolink->minimum_latency;
+ props->max_latency = iolink->maximum_latency;
+ props->min_bandwidth = iolink->minimum_bandwidth_mbs;
+ props->max_bandwidth = iolink->maximum_bandwidth_mbs;
+ props->rec_transfer_size =
+ iolink->recommended_transfer_size;
+
+ dev->io_link_count++;
+ dev->node_props.io_links_count++;
+ list_add_tail(&props->list, &dev->io_link_props);
+
+ break;
+ }
+ i++;
+ }
+
+ return 0;
+}
+
+static int kfd_parse_subtype(struct crat_subtype_generic *sub_type_hdr)
+{
+ struct crat_subtype_computeunit *cu;
+ struct crat_subtype_memory *mem;
+ struct crat_subtype_cache *cache;
+ struct crat_subtype_iolink *iolink;
+ int ret = 0;
+
+ BUG_ON(!sub_type_hdr);
+
+ switch (sub_type_hdr->type) {
+ case CRAT_SUBTYPE_COMPUTEUNIT_AFFINITY:
+ cu = (struct crat_subtype_computeunit *)sub_type_hdr;
+ ret = kfd_parse_subtype_cu(cu);
+ break;
+ case CRAT_SUBTYPE_MEMORY_AFFINITY:
+ mem = (struct crat_subtype_memory *)sub_type_hdr;
+ ret = kfd_parse_subtype_mem(mem);
+ break;
+ case CRAT_SUBTYPE_CACHE_AFFINITY:
+ cache = (struct crat_subtype_cache *)sub_type_hdr;
+ ret = kfd_parse_subtype_cache(cache);
+ break;
+ case CRAT_SUBTYPE_TLB_AFFINITY:
+ /*
+ * For now, nothing to do here
+ */
+ pr_info("Found TLB entry in CRAT table (not processing)\n");
+ break;
+ case CRAT_SUBTYPE_CCOMPUTE_AFFINITY:
+ /*
+ * For now, nothing to do here
+ */
+ pr_info("Found CCOMPUTE entry in CRAT table (not processing)\n");
+ break;
+ case CRAT_SUBTYPE_IOLINK_AFFINITY:
+ iolink = (struct crat_subtype_iolink *)sub_type_hdr;
+ ret = kfd_parse_subtype_iolink(iolink);
+ break;
+ default:
+ pr_warn("Unknown subtype (%d) in CRAT\n",
+ sub_type_hdr->type);
+ }
+
+ return ret;
+}
+
+static void kfd_release_topology_device(struct kfd_topology_device *dev)
+{
+ struct kfd_mem_properties *mem;
+ struct kfd_cache_properties *cache;
+ struct kfd_iolink_properties *iolink;
+
+ BUG_ON(!dev);
+
+ list_del(&dev->list);
+
+ while (dev->mem_props.next != &dev->mem_props) {
+ mem = container_of(dev->mem_props.next,
+ struct kfd_mem_properties, list);
+ list_del(&mem->list);
+ kfree(mem);
+ }
+
+ while (dev->cache_props.next != &dev->cache_props) {
+ cache = container_of(dev->cache_props.next,
+ struct kfd_cache_properties, list);
+ list_del(&cache->list);
+ kfree(cache);
+ }
+
+ while (dev->io_link_props.next != &dev->io_link_props) {
+ iolink = container_of(dev->io_link_props.next,
+ struct kfd_iolink_properties, list);
+ list_del(&iolink->list);
+ kfree(iolink);
+ }
+
+ kfree(dev);
+
+ sys_props.num_devices--;
+}
+
+static void kfd_release_live_view(void)
+{
+ struct kfd_topology_device *dev;
+
+ while (topology_device_list.next != &topology_device_list) {
+ dev = container_of(topology_device_list.next,
+ struct kfd_topology_device, list);
+ kfd_release_topology_device(dev);
+}
+
+ memset(&sys_props, 0, sizeof(sys_props));
+}
+
+static struct kfd_topology_device *kfd_create_topology_device(void)
+{
+ struct kfd_topology_device *dev;
+
+ dev = kfd_alloc_struct(dev);
+ if (dev == NULL) {
+ pr_err("No memory to allocate a topology device");
+ return NULL;
+ }
+
+ INIT_LIST_HEAD(&dev->mem_props);
+ INIT_LIST_HEAD(&dev->cache_props);
+ INIT_LIST_HEAD(&dev->io_link_props);
+
+ list_add_tail(&dev->list, &topology_device_list);
+ sys_props.num_devices++;
+
+ return dev;
+}
+
+static int kfd_parse_crat_table(void *crat_image)
+{
+ struct kfd_topology_device *top_dev;
+ struct crat_subtype_generic *sub_type_hdr;
+ uint16_t node_id;
+ int ret;
+ struct crat_header *crat_table = (struct crat_header *)crat_image;
+ uint16_t num_nodes;
+ uint32_t image_len;
+
+ if (!crat_image)
+ return -EINVAL;
+
+ num_nodes = crat_table->num_domains;
+ image_len = crat_table->length;
+
+ pr_info("Parsing CRAT table with %d nodes\n", num_nodes);
+
+ for (node_id = 0; node_id < num_nodes; node_id++) {
+ top_dev = kfd_create_topology_device();
+ if (!top_dev) {
+ kfd_release_live_view();
+ return -ENOMEM;
+ }
+ }
+
+ sys_props.platform_id =
+ (*((uint64_t *)crat_table->oem_id)) & CRAT_OEMID_64BIT_MASK;
+ sys_props.platform_oem = *((uint64_t *)crat_table->oem_table_id);
+ sys_props.platform_rev = crat_table->revision;
+
+ sub_type_hdr = (struct crat_subtype_generic *)(crat_table+1);
+ while ((char *)sub_type_hdr + sizeof(struct crat_subtype_generic) <
+ ((char *)crat_image) + image_len) {
+ if (sub_type_hdr->flags & CRAT_SUBTYPE_FLAGS_ENABLED) {
+ ret = kfd_parse_subtype(sub_type_hdr);
+ if (ret != 0) {
+ kfd_release_live_view();
+ return ret;
+ }
+ }
+
+ sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
+ sub_type_hdr->length);
+ }
+
+ sys_props.generation_count++;
+ topology_crat_parsed = 1;
+
+ return 0;
+}
+
+
+#define sysfs_show_gen_prop(buffer, fmt, ...) \
+ snprintf(buffer, PAGE_SIZE, "%s"fmt, buffer, __VA_ARGS__)
+#define sysfs_show_32bit_prop(buffer, name, value) \
+ sysfs_show_gen_prop(buffer, "%s %u\n", name, value)
+#define sysfs_show_64bit_prop(buffer, name, value) \
+ sysfs_show_gen_prop(buffer, "%s %llu\n", name, value)
+#define sysfs_show_32bit_val(buffer, value) \
+ sysfs_show_gen_prop(buffer, "%u\n", value)
+#define sysfs_show_str_val(buffer, value) \
+ sysfs_show_gen_prop(buffer, "%s\n", value)
+
+static ssize_t sysprops_show(struct kobject *kobj, struct attribute *attr,
+ char *buffer)
+{
+ ssize_t ret;
+
+ /* Making sure that the buffer is an empty string */
+ buffer[0] = 0;
+
+ if (attr == &sys_props.attr_genid) {
+ ret = sysfs_show_32bit_val(buffer, sys_props.generation_count);
+ } else if (attr == &sys_props.attr_props) {
+ sysfs_show_64bit_prop(buffer, "platform_oem",
+ sys_props.platform_oem);
+ sysfs_show_64bit_prop(buffer, "platform_id",
+ sys_props.platform_id);
+ ret = sysfs_show_64bit_prop(buffer, "platform_rev",
+ sys_props.platform_rev);
+ } else {
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static const struct sysfs_ops sysprops_ops = {
+ .show = sysprops_show,
+};
+
+static struct kobj_type sysprops_type = {
+ .sysfs_ops = &sysprops_ops,
+};
+
+static ssize_t iolink_show(struct kobject *kobj, struct attribute *attr,
+ char *buffer)
+{
+ ssize_t ret;
+ struct kfd_iolink_properties *iolink;
+
+ /* Making sure that the buffer is an empty string */
+ buffer[0] = 0;
+
+ iolink = container_of(attr, struct kfd_iolink_properties, attr);
+ sysfs_show_32bit_prop(buffer, "type", iolink->iolink_type);
+ sysfs_show_32bit_prop(buffer, "version_major", iolink->ver_maj);
+ sysfs_show_32bit_prop(buffer, "version_minor", iolink->ver_min);
+ sysfs_show_32bit_prop(buffer, "node_from", iolink->node_from);
+ sysfs_show_32bit_prop(buffer, "node_to", iolink->node_to);
+ sysfs_show_32bit_prop(buffer, "weight", iolink->weight);
+ sysfs_show_32bit_prop(buffer, "min_latency", iolink->min_latency);
+ sysfs_show_32bit_prop(buffer, "max_latency", iolink->max_latency);
+ sysfs_show_32bit_prop(buffer, "min_bandwidth", iolink->min_bandwidth);
+ sysfs_show_32bit_prop(buffer, "max_bandwidth", iolink->max_bandwidth);
+ sysfs_show_32bit_prop(buffer, "recommended_transfer_size",
+ iolink->rec_transfer_size);
+ ret = sysfs_show_32bit_prop(buffer, "flags", iolink->flags);
+
+ return ret;
+}
+
+static const struct sysfs_ops iolink_ops = {
+ .show = iolink_show,
+};
+
+static struct kobj_type iolink_type = {
+ .sysfs_ops = &iolink_ops,
+};
+
+static ssize_t mem_show(struct kobject *kobj, struct attribute *attr,
+ char *buffer)
+{
+ ssize_t ret;
+ struct kfd_mem_properties *mem;
+
+ /* Making sure that the buffer is an empty string */
+ buffer[0] = 0;
+
+ mem = container_of(attr, struct kfd_mem_properties, attr);
+ sysfs_show_32bit_prop(buffer, "heap_type", mem->heap_type);
+ sysfs_show_64bit_prop(buffer, "size_in_bytes", mem->size_in_bytes);
+ sysfs_show_32bit_prop(buffer, "flags", mem->flags);
+ sysfs_show_32bit_prop(buffer, "width", mem->width);
+ ret = sysfs_show_32bit_prop(buffer, "mem_clk_max", mem->mem_clk_max);
+
+ return ret;
+}
+
+static const struct sysfs_ops mem_ops = {
+ .show = mem_show,
+};
+
+static struct kobj_type mem_type = {
+ .sysfs_ops = &mem_ops,
+};
+
+static ssize_t kfd_cache_show(struct kobject *kobj, struct attribute *attr,
+ char *buffer)
+{
+ ssize_t ret;
+ uint32_t i;
+ struct kfd_cache_properties *cache;
+
+ /* Making sure that the buffer is an empty string */
+ buffer[0] = 0;
+
+ cache = container_of(attr, struct kfd_cache_properties, attr);
+ sysfs_show_32bit_prop(buffer, "processor_id_low",
+ cache->processor_id_low);
+ sysfs_show_32bit_prop(buffer, "level", cache->cache_level);
+ sysfs_show_32bit_prop(buffer, "size", cache->cache_size);
+ sysfs_show_32bit_prop(buffer, "cache_line_size", cache->cacheline_size);
+ sysfs_show_32bit_prop(buffer, "cache_lines_per_tag",
+ cache->cachelines_per_tag);
+ sysfs_show_32bit_prop(buffer, "association", cache->cache_assoc);
+ sysfs_show_32bit_prop(buffer, "latency", cache->cache_latency);
+ sysfs_show_32bit_prop(buffer, "type", cache->cache_type);
+ snprintf(buffer, PAGE_SIZE, "%ssibling_map ", buffer);
+ for (i = 0; i < KFD_TOPOLOGY_CPU_SIBLINGS; i++)
+ ret = snprintf(buffer, PAGE_SIZE, "%s%d%s",
+ buffer, cache->sibling_map[i],
+ (i == KFD_TOPOLOGY_CPU_SIBLINGS-1) ?
+ "\n" : ",");
+
+ return ret;
+}
+
+static const struct sysfs_ops cache_ops = {
+ .show = kfd_cache_show,
+};
+
+static struct kobj_type cache_type = {
+ .sysfs_ops = &cache_ops,
+};
+
+static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
+ char *buffer)
+{
+ ssize_t ret;
+ struct kfd_topology_device *dev;
+ char public_name[KFD_TOPOLOGY_PUBLIC_NAME_SIZE];
+ uint32_t i;
+
+ /* Making sure that the buffer is an empty string */
+ buffer[0] = 0;
+
+ if (strcmp(attr->name, "gpu_id") == 0) {
+ dev = container_of(attr, struct kfd_topology_device,
+ attr_gpuid);
+ ret = sysfs_show_32bit_val(buffer, dev->gpu_id);
+ } else if (strcmp(attr->name, "name") == 0) {
+ dev = container_of(attr, struct kfd_topology_device,
+ attr_name);
+ for (i = 0; i < KFD_TOPOLOGY_PUBLIC_NAME_SIZE; i++) {
+ public_name[i] =
+ (char)dev->node_props.marketing_name[i];
+ if (dev->node_props.marketing_name[i] == 0)
+ break;
+ }
+ public_name[KFD_TOPOLOGY_PUBLIC_NAME_SIZE-1] = 0x0;
+ ret = sysfs_show_str_val(buffer, public_name);
+ } else {
+ dev = container_of(attr, struct kfd_topology_device,
+ attr_props);
+ sysfs_show_32bit_prop(buffer, "cpu_cores_count",
+ dev->node_props.cpu_cores_count);
+ sysfs_show_32bit_prop(buffer, "simd_count",
+ dev->node_props.simd_count);
+
+ if (dev->mem_bank_count < dev->node_props.mem_banks_count) {
+ pr_warn("kfd: mem_banks_count truncated from %d to %d\n",
+ dev->node_props.mem_banks_count,
+ dev->mem_bank_count);
+ sysfs_show_32bit_prop(buffer, "mem_banks_count",
+ dev->mem_bank_count);
+ } else {
+ sysfs_show_32bit_prop(buffer, "mem_banks_count",
+ dev->node_props.mem_banks_count);
+ }
+
+ sysfs_show_32bit_prop(buffer, "caches_count",
+ dev->node_props.caches_count);
+ sysfs_show_32bit_prop(buffer, "io_links_count",
+ dev->node_props.io_links_count);
+ sysfs_show_32bit_prop(buffer, "cpu_core_id_base",
+ dev->node_props.cpu_core_id_base);
+ sysfs_show_32bit_prop(buffer, "simd_id_base",
+ dev->node_props.simd_id_base);
+ sysfs_show_32bit_prop(buffer, "capability",
+ dev->node_props.capability);
+ sysfs_show_32bit_prop(buffer, "max_waves_per_simd",
+ dev->node_props.max_waves_per_simd);
+ sysfs_show_32bit_prop(buffer, "lds_size_in_kb",
+ dev->node_props.lds_size_in_kb);
+ sysfs_show_32bit_prop(buffer, "gds_size_in_kb",
+ dev->node_props.gds_size_in_kb);
+ sysfs_show_32bit_prop(buffer, "wave_front_size",
+ dev->node_props.wave_front_size);
+ sysfs_show_32bit_prop(buffer, "array_count",
+ dev->node_props.array_count);
+ sysfs_show_32bit_prop(buffer, "simd_arrays_per_engine",
+ dev->node_props.simd_arrays_per_engine);
+ sysfs_show_32bit_prop(buffer, "cu_per_simd_array",
+ dev->node_props.cu_per_simd_array);
+ sysfs_show_32bit_prop(buffer, "simd_per_cu",
+ dev->node_props.simd_per_cu);
+ sysfs_show_32bit_prop(buffer, "max_slots_scratch_cu",
+ dev->node_props.max_slots_scratch_cu);
+ sysfs_show_32bit_prop(buffer, "engine_id",
+ dev->node_props.engine_id);
+ sysfs_show_32bit_prop(buffer, "vendor_id",
+ dev->node_props.vendor_id);
+ sysfs_show_32bit_prop(buffer, "device_id",
+ dev->node_props.device_id);
+ sysfs_show_32bit_prop(buffer, "location_id",
+ dev->node_props.location_id);
+
+ if (dev->gpu) {
+ sysfs_show_32bit_prop(buffer, "max_engine_clk_fcompute",
+ kfd2kgd->get_max_engine_clock_in_mhz(
+ dev->gpu->kgd));
+ sysfs_show_64bit_prop(buffer, "local_mem_size",
+ kfd2kgd->get_vmem_size(dev->gpu->kgd));
+ }
+
+ ret = sysfs_show_32bit_prop(buffer, "max_engine_clk_ccompute",
+ cpufreq_quick_get_max(0)/1000);
+ }
+
+ return ret;
+}
+
+static const struct sysfs_ops node_ops = {
+ .show = node_show,
+};
+
+static struct kobj_type node_type = {
+ .sysfs_ops = &node_ops,
+};
+
+static void kfd_remove_sysfs_file(struct kobject *kobj, struct attribute *attr)
+{
+ sysfs_remove_file(kobj, attr);
+ kobject_del(kobj);
+ kobject_put(kobj);
+}
+
+static void kfd_remove_sysfs_node_entry(struct kfd_topology_device *dev)
+{
+ struct kfd_iolink_properties *iolink;
+ struct kfd_cache_properties *cache;
+ struct kfd_mem_properties *mem;
+
+ BUG_ON(!dev);
+
+ if (dev->kobj_iolink) {
+ list_for_each_entry(iolink, &dev->io_link_props, list)
+ if (iolink->kobj) {
+ kfd_remove_sysfs_file(iolink->kobj,
+ &iolink->attr);
+ iolink->kobj = NULL;
+ }
+ kobject_del(dev->kobj_iolink);
+ kobject_put(dev->kobj_iolink);
+ dev->kobj_iolink = NULL;
+ }
+
+ if (dev->kobj_cache) {
+ list_for_each_entry(cache, &dev->cache_props, list)
+ if (cache->kobj) {
+ kfd_remove_sysfs_file(cache->kobj,
+ &cache->attr);
+ cache->kobj = NULL;
+ }
+ kobject_del(dev->kobj_cache);
+ kobject_put(dev->kobj_cache);
+ dev->kobj_cache = NULL;
+ }
+
+ if (dev->kobj_mem) {
+ list_for_each_entry(mem, &dev->mem_props, list)
+ if (mem->kobj) {
+ kfd_remove_sysfs_file(mem->kobj, &mem->attr);
+ mem->kobj = NULL;
+ }
+ kobject_del(dev->kobj_mem);
+ kobject_put(dev->kobj_mem);
+ dev->kobj_mem = NULL;
+ }
+
+ if (dev->kobj_node) {
+ sysfs_remove_file(dev->kobj_node, &dev->attr_gpuid);
+ sysfs_remove_file(dev->kobj_node, &dev->attr_name);
+ sysfs_remove_file(dev->kobj_node, &dev->attr_props);
+ kobject_del(dev->kobj_node);
+ kobject_put(dev->kobj_node);
+ dev->kobj_node = NULL;
+ }
+}
+
+static int kfd_build_sysfs_node_entry(struct kfd_topology_device *dev,
+ uint32_t id)
+{
+ struct kfd_iolink_properties *iolink;
+ struct kfd_cache_properties *cache;
+ struct kfd_mem_properties *mem;
+ int ret;
+ uint32_t i;
+
+ BUG_ON(!dev);
+
+ /*
+ * Creating the sysfs folders
+ */
+ BUG_ON(dev->kobj_node);
+ dev->kobj_node = kfd_alloc_struct(dev->kobj_node);
+ if (!dev->kobj_node)
+ return -ENOMEM;
+
+ ret = kobject_init_and_add(dev->kobj_node, &node_type,
+ sys_props.kobj_nodes, "%d", id);
+ if (ret < 0)
+ return ret;
+
+ dev->kobj_mem = kobject_create_and_add("mem_banks", dev->kobj_node);
+ if (!dev->kobj_mem)
+ return -ENOMEM;
+
+ dev->kobj_cache = kobject_create_and_add("caches", dev->kobj_node);
+ if (!dev->kobj_cache)
+ return -ENOMEM;
+
+ dev->kobj_iolink = kobject_create_and_add("io_links", dev->kobj_node);
+ if (!dev->kobj_iolink)
+ return -ENOMEM;
+
+ /*
+ * Creating sysfs files for node properties
+ */
+ dev->attr_gpuid.name = "gpu_id";
+ dev->attr_gpuid.mode = KFD_SYSFS_FILE_MODE;
+ sysfs_attr_init(&dev->attr_gpuid);
+ dev->attr_name.name = "name";
+ dev->attr_name.mode = KFD_SYSFS_FILE_MODE;
+ sysfs_attr_init(&dev->attr_name);
+ dev->attr_props.name = "properties";
+ dev->attr_props.mode = KFD_SYSFS_FILE_MODE;
+ sysfs_attr_init(&dev->attr_props);
+ ret = sysfs_create_file(dev->kobj_node, &dev->attr_gpuid);
+ if (ret < 0)
+ return ret;
+ ret = sysfs_create_file(dev->kobj_node, &dev->attr_name);
+ if (ret < 0)
+ return ret;
+ ret = sysfs_create_file(dev->kobj_node, &dev->attr_props);
+ if (ret < 0)
+ return ret;
+
+ i = 0;
+ list_for_each_entry(mem, &dev->mem_props, list) {
+ mem->kobj = kzalloc(sizeof(struct kobject), GFP_KERNEL);
+ if (!mem->kobj)
+ return -ENOMEM;
+ ret = kobject_init_and_add(mem->kobj, &mem_type,
+ dev->kobj_mem, "%d", i);
+ if (ret < 0)
+ return ret;
+
+ mem->attr.name = "properties";
+ mem->attr.mode = KFD_SYSFS_FILE_MODE;
+ sysfs_attr_init(&mem->attr);
+ ret = sysfs_create_file(mem->kobj, &mem->attr);
+ if (ret < 0)
+ return ret;
+ i++;
+ }
+
+ i = 0;
+ list_for_each_entry(cache, &dev->cache_props, list) {
+ cache->kobj = kzalloc(sizeof(struct kobject), GFP_KERNEL);
+ if (!cache->kobj)
+ return -ENOMEM;
+ ret = kobject_init_and_add(cache->kobj, &cache_type,
+ dev->kobj_cache, "%d", i);
+ if (ret < 0)
+ return ret;
+
+ cache->attr.name = "properties";
+ cache->attr.mode = KFD_SYSFS_FILE_MODE;
+ sysfs_attr_init(&cache->attr);
+ ret = sysfs_create_file(cache->kobj, &cache->attr);
+ if (ret < 0)
+ return ret;
+ i++;
+ }
+
+ i = 0;
+ list_for_each_entry(iolink, &dev->io_link_props, list) {
+ iolink->kobj = kzalloc(sizeof(struct kobject), GFP_KERNEL);
+ if (!iolink->kobj)
+ return -ENOMEM;
+ ret = kobject_init_and_add(iolink->kobj, &iolink_type,
+ dev->kobj_iolink, "%d", i);
+ if (ret < 0)
+ return ret;
+
+ iolink->attr.name = "properties";
+ iolink->attr.mode = KFD_SYSFS_FILE_MODE;
+ sysfs_attr_init(&iolink->attr);
+ ret = sysfs_create_file(iolink->kobj, &iolink->attr);
+ if (ret < 0)
+ return ret;
+ i++;
+}
+
+ return 0;
+}
+
+static int kfd_build_sysfs_node_tree(void)
+{
+ struct kfd_topology_device *dev;
+ int ret;
+ uint32_t i = 0;
+
+ list_for_each_entry(dev, &topology_device_list, list) {
+ ret = kfd_build_sysfs_node_entry(dev, 0);
+ if (ret < 0)
+ return ret;
+ i++;
+ }
+
+ return 0;
+}
+
+static void kfd_remove_sysfs_node_tree(void)
+{
+ struct kfd_topology_device *dev;
+
+ list_for_each_entry(dev, &topology_device_list, list)
+ kfd_remove_sysfs_node_entry(dev);
+}
+
+static int kfd_topology_update_sysfs(void)
+{
+ int ret;
+
+ pr_info("Creating topology SYSFS entries\n");
+ if (sys_props.kobj_topology == NULL) {
+ sys_props.kobj_topology =
+ kfd_alloc_struct(sys_props.kobj_topology);
+ if (!sys_props.kobj_topology)
+ return -ENOMEM;
+
+ ret = kobject_init_and_add(sys_props.kobj_topology,
+ &sysprops_type, &kfd_device->kobj,
+ "topology");
+ if (ret < 0)
+ return ret;
+
+ sys_props.kobj_nodes = kobject_create_and_add("nodes",
+ sys_props.kobj_topology);
+ if (!sys_props.kobj_nodes)
+ return -ENOMEM;
+
+ sys_props.attr_genid.name = "generation_id";
+ sys_props.attr_genid.mode = KFD_SYSFS_FILE_MODE;
+ sysfs_attr_init(&sys_props.attr_genid);
+ ret = sysfs_create_file(sys_props.kobj_topology,
+ &sys_props.attr_genid);
+ if (ret < 0)
+ return ret;
+
+ sys_props.attr_props.name = "system_properties";
+ sys_props.attr_props.mode = KFD_SYSFS_FILE_MODE;
+ sysfs_attr_init(&sys_props.attr_props);
+ ret = sysfs_create_file(sys_props.kobj_topology,
+ &sys_props.attr_props);
+ if (ret < 0)
+ return ret;
+ }
+
+ kfd_remove_sysfs_node_tree();
+
+ return kfd_build_sysfs_node_tree();
+}
+
+static void kfd_topology_release_sysfs(void)
+{
+ kfd_remove_sysfs_node_tree();
+ if (sys_props.kobj_topology) {
+ sysfs_remove_file(sys_props.kobj_topology,
+ &sys_props.attr_genid);
+ sysfs_remove_file(sys_props.kobj_topology,
+ &sys_props.attr_props);
+ if (sys_props.kobj_nodes) {
+ kobject_del(sys_props.kobj_nodes);
+ kobject_put(sys_props.kobj_nodes);
+ sys_props.kobj_nodes = NULL;
+ }
+ kobject_del(sys_props.kobj_topology);
+ kobject_put(sys_props.kobj_topology);
+ sys_props.kobj_topology = NULL;
+ }
+}
+
+int kfd_topology_init(void)
+{
+ void *crat_image = NULL;
+ size_t image_size = 0;
+ int ret;
+
+ /*
+ * Initialize the head for the topology device list
+ */
+ INIT_LIST_HEAD(&topology_device_list);
+ init_rwsem(&topology_lock);
+ topology_crat_parsed = 0;
+
+ memset(&sys_props, 0, sizeof(sys_props));
+
+ /*
+ * Get the CRAT image from the ACPI
+ */
+ ret = kfd_topology_get_crat_acpi(crat_image, &image_size);
+ if (ret == 0 && image_size > 0) {
+ pr_info("Found CRAT image with size=%zd\n", image_size);
+ crat_image = kmalloc(image_size, GFP_KERNEL);
+ if (!crat_image) {
+ ret = -ENOMEM;
+ pr_err("No memory for allocating CRAT image\n");
+ goto err;
+ }
+ ret = kfd_topology_get_crat_acpi(crat_image, &image_size);
+
+ if (ret == 0) {
+ down_write(&topology_lock);
+ ret = kfd_parse_crat_table(crat_image);
+ if (ret == 0)
+ ret = kfd_topology_update_sysfs();
+ up_write(&topology_lock);
+ } else {
+ pr_err("Couldn't get CRAT table size from ACPI\n");
+ }
+ kfree(crat_image);
+ } else if (ret == -ENODATA) {
+ ret = 0;
+ } else {
+ pr_err("Couldn't get CRAT table size from ACPI\n");
+ }
+
+err:
+ pr_info("Finished initializing topology ret=%d\n", ret);
+ return ret;
+}
+
+void kfd_topology_shutdown(void)
+{
+ kfd_topology_release_sysfs();
+ kfd_release_live_view();
+}
+
+static void kfd_debug_print_topology(void)
+{
+ struct kfd_topology_device *dev;
+ uint32_t i = 0;
+
+ pr_info("DEBUG PRINT OF TOPOLOGY:");
+ list_for_each_entry(dev, &topology_device_list, list) {
+ pr_info("Node: %d\n", i);
+ pr_info("\tGPU assigned: %s\n", (dev->gpu ? "yes" : "no"));
+ pr_info("\tCPU count: %d\n", dev->node_props.cpu_cores_count);
+ pr_info("\tSIMD count: %d", dev->node_props.simd_count);
+ i++;
+ }
+}
+
+static uint32_t kfd_generate_gpu_id(struct kfd_dev *gpu)
+{
+ uint32_t hashout;
+ uint32_t buf[7];
+ int i;
+
+ if (!gpu)
+ return 0;
+
+ buf[0] = gpu->pdev->devfn;
+ buf[1] = gpu->pdev->subsystem_vendor;
+ buf[2] = gpu->pdev->subsystem_device;
+ buf[3] = gpu->pdev->device;
+ buf[4] = gpu->pdev->bus->number;
+ buf[5] = (uint32_t)(kfd2kgd->get_vmem_size(gpu->kgd) & 0xffffffff);
+ buf[6] = (uint32_t)(kfd2kgd->get_vmem_size(gpu->kgd) >> 32);
+
+ for (i = 0, hashout = 0; i < 7; i++)
+ hashout ^= hash_32(buf[i], KFD_GPU_ID_HASH_WIDTH);
+
+ return hashout;
+}
+
+static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu)
+{
+ struct kfd_topology_device *dev;
+ struct kfd_topology_device *out_dev = NULL;
+
+ BUG_ON(!gpu);
+
+ list_for_each_entry(dev, &topology_device_list, list)
+ if (dev->gpu == NULL && dev->node_props.simd_count > 0) {
+ dev->gpu = gpu;
+ out_dev = dev;
+ break;
+ }
+
+ return out_dev;
+}
+
+static void kfd_notify_gpu_change(uint32_t gpu_id, int arrival)
+{
+ /*
+ * TODO: Generate an event for thunk about the arrival/removal
+ * of the GPU
+ */
+}
+
+int kfd_topology_add_device(struct kfd_dev *gpu)
+{
+ uint32_t gpu_id;
+ struct kfd_topology_device *dev;
+ int res;
+
+ BUG_ON(!gpu);
+
+ gpu_id = kfd_generate_gpu_id(gpu);
+
+ pr_debug("kfd: Adding new GPU (ID: 0x%x) to topology\n", gpu_id);
+
+ down_write(&topology_lock);
+ /*
+ * Try to assign the GPU to existing topology device (generated from
+ * CRAT table
+ */
+ dev = kfd_assign_gpu(gpu);
+ if (!dev) {
+ pr_info("GPU was not found in the current topology. Extending.\n");
+ kfd_debug_print_topology();
+ dev = kfd_create_topology_device();
+ if (!dev) {
+ res = -ENOMEM;
+ goto err;
+ }
+ dev->gpu = gpu;
+
+ /*
+ * TODO: Make a call to retrieve topology information from the
+ * GPU vBIOS
+ */
+
+ /*
+ * Update the SYSFS tree, since we added another topology device
+ */
+ if (kfd_topology_update_sysfs() < 0)
+ kfd_topology_release_sysfs();
+
+ }
+
+ dev->gpu_id = gpu_id;
+ gpu->id = gpu_id;
+ dev->node_props.vendor_id = gpu->pdev->vendor;
+ dev->node_props.device_id = gpu->pdev->device;
+ dev->node_props.location_id = (gpu->pdev->bus->number << 24) +
+ (gpu->pdev->devfn & 0xffffff);
+ /*
+ * TODO: Retrieve max engine clock values from KGD
+ */
+
+ res = 0;
+
+err:
+ up_write(&topology_lock);
+
+ if (res == 0)
+ kfd_notify_gpu_change(gpu_id, 1);
+
+ return res;
+}
+
+int kfd_topology_remove_device(struct kfd_dev *gpu)
+{
+ struct kfd_topology_device *dev;
+ uint32_t gpu_id;
+ int res = -ENODEV;
+
+ BUG_ON(!gpu);
+
+ down_write(&topology_lock);
+
+ list_for_each_entry(dev, &topology_device_list, list)
+ if (dev->gpu == gpu) {
+ gpu_id = dev->gpu_id;
+ kfd_remove_sysfs_node_entry(dev);
+ kfd_release_topology_device(dev);
+ res = 0;
+ if (kfd_topology_update_sysfs() < 0)
+ kfd_topology_release_sysfs();
+ break;
+ }
+
+ up_write(&topology_lock);
+
+ if (res == 0)
+ kfd_notify_gpu_change(gpu_id, 0);
+
+ return res;
+}
+
+/*
+ * When idx is out of bounds, the function will return NULL
+ */
+struct kfd_dev *kfd_topology_enum_kfd_devices(uint8_t idx)
+{
+
+ struct kfd_topology_device *top_dev;
+ struct kfd_dev *device = NULL;
+ uint8_t device_idx = 0;
+
+ down_read(&topology_lock);
+
+ list_for_each_entry(top_dev, &topology_device_list, list) {
+ if (device_idx == idx) {
+ device = top_dev->gpu;
+ break;
+ }
+
+ device_idx++;
+ }
+
+ up_read(&topology_lock);
+
+ return device;
+
+}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
new file mode 100644
index 00000000000..989624b3cd1
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
@@ -0,0 +1,168 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __KFD_TOPOLOGY_H__
+#define __KFD_TOPOLOGY_H__
+
+#include <linux/types.h>
+#include <linux/list.h>
+#include "kfd_priv.h"
+
+#define KFD_TOPOLOGY_PUBLIC_NAME_SIZE 128
+
+#define HSA_CAP_HOT_PLUGGABLE 0x00000001
+#define HSA_CAP_ATS_PRESENT 0x00000002
+#define HSA_CAP_SHARED_WITH_GRAPHICS 0x00000004
+#define HSA_CAP_QUEUE_SIZE_POW2 0x00000008
+#define HSA_CAP_QUEUE_SIZE_32BIT 0x00000010
+#define HSA_CAP_QUEUE_IDLE_EVENT 0x00000020
+#define HSA_CAP_VA_LIMIT 0x00000040
+#define HSA_CAP_WATCH_POINTS_SUPPORTED 0x00000080
+#define HSA_CAP_WATCH_POINTS_TOTALBITS_MASK 0x00000f00
+#define HSA_CAP_WATCH_POINTS_TOTALBITS_SHIFT 8
+#define HSA_CAP_RESERVED 0xfffff000
+
+struct kfd_node_properties {
+ uint32_t cpu_cores_count;
+ uint32_t simd_count;
+ uint32_t mem_banks_count;
+ uint32_t caches_count;
+ uint32_t io_links_count;
+ uint32_t cpu_core_id_base;
+ uint32_t simd_id_base;
+ uint32_t capability;
+ uint32_t max_waves_per_simd;
+ uint32_t lds_size_in_kb;
+ uint32_t gds_size_in_kb;
+ uint32_t wave_front_size;
+ uint32_t array_count;
+ uint32_t simd_arrays_per_engine;
+ uint32_t cu_per_simd_array;
+ uint32_t simd_per_cu;
+ uint32_t max_slots_scratch_cu;
+ uint32_t engine_id;
+ uint32_t vendor_id;
+ uint32_t device_id;
+ uint32_t location_id;
+ uint32_t max_engine_clk_fcompute;
+ uint32_t max_engine_clk_ccompute;
+ uint16_t marketing_name[KFD_TOPOLOGY_PUBLIC_NAME_SIZE];
+};
+
+#define HSA_MEM_HEAP_TYPE_SYSTEM 0
+#define HSA_MEM_HEAP_TYPE_FB_PUBLIC 1
+#define HSA_MEM_HEAP_TYPE_FB_PRIVATE 2
+#define HSA_MEM_HEAP_TYPE_GPU_GDS 3
+#define HSA_MEM_HEAP_TYPE_GPU_LDS 4
+#define HSA_MEM_HEAP_TYPE_GPU_SCRATCH 5
+
+#define HSA_MEM_FLAGS_HOT_PLUGGABLE 0x00000001
+#define HSA_MEM_FLAGS_NON_VOLATILE 0x00000002
+#define HSA_MEM_FLAGS_RESERVED 0xfffffffc
+
+struct kfd_mem_properties {
+ struct list_head list;
+ uint32_t heap_type;
+ uint64_t size_in_bytes;
+ uint32_t flags;
+ uint32_t width;
+ uint32_t mem_clk_max;
+ struct kobject *kobj;
+ struct attribute attr;
+};
+
+#define KFD_TOPOLOGY_CPU_SIBLINGS 256
+
+#define HSA_CACHE_TYPE_DATA 0x00000001
+#define HSA_CACHE_TYPE_INSTRUCTION 0x00000002
+#define HSA_CACHE_TYPE_CPU 0x00000004
+#define HSA_CACHE_TYPE_HSACU 0x00000008
+#define HSA_CACHE_TYPE_RESERVED 0xfffffff0
+
+struct kfd_cache_properties {
+ struct list_head list;
+ uint32_t processor_id_low;
+ uint32_t cache_level;
+ uint32_t cache_size;
+ uint32_t cacheline_size;
+ uint32_t cachelines_per_tag;
+ uint32_t cache_assoc;
+ uint32_t cache_latency;
+ uint32_t cache_type;
+ uint8_t sibling_map[KFD_TOPOLOGY_CPU_SIBLINGS];
+ struct kobject *kobj;
+ struct attribute attr;
+};
+
+struct kfd_iolink_properties {
+ struct list_head list;
+ uint32_t iolink_type;
+ uint32_t ver_maj;
+ uint32_t ver_min;
+ uint32_t node_from;
+ uint32_t node_to;
+ uint32_t weight;
+ uint32_t min_latency;
+ uint32_t max_latency;
+ uint32_t min_bandwidth;
+ uint32_t max_bandwidth;
+ uint32_t rec_transfer_size;
+ uint32_t flags;
+ struct kobject *kobj;
+ struct attribute attr;
+};
+
+struct kfd_topology_device {
+ struct list_head list;
+ uint32_t gpu_id;
+ struct kfd_node_properties node_props;
+ uint32_t mem_bank_count;
+ struct list_head mem_props;
+ uint32_t cache_count;
+ struct list_head cache_props;
+ uint32_t io_link_count;
+ struct list_head io_link_props;
+ struct kfd_dev *gpu;
+ struct kobject *kobj_node;
+ struct kobject *kobj_mem;
+ struct kobject *kobj_cache;
+ struct kobject *kobj_iolink;
+ struct attribute attr_gpuid;
+ struct attribute attr_name;
+ struct attribute attr_props;
+};
+
+struct kfd_system_properties {
+ uint32_t num_devices; /* Number of H-NUMA nodes */
+ uint32_t generation_count;
+ uint64_t platform_oem;
+ uint64_t platform_id;
+ uint64_t platform_rev;
+ struct kobject *kobj_topology;
+ struct kobject *kobj_nodes;
+ struct attribute attr_genid;
+ struct attribute attr_props;
+};
+
+
+
+#endif /* __KFD_TOPOLOGY_H__ */
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
new file mode 100644
index 00000000000..9c729dd8dd5
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -0,0 +1,185 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * This file defines the private interface between the
+ * AMD kernel graphics drivers and the AMD KFD.
+ */
+
+#ifndef KGD_KFD_INTERFACE_H_INCLUDED
+#define KGD_KFD_INTERFACE_H_INCLUDED
+
+#include <linux/types.h>
+
+struct pci_dev;
+
+#define KFD_INTERFACE_VERSION 1
+
+struct kfd_dev;
+struct kgd_dev;
+
+struct kgd_mem;
+
+enum kgd_memory_pool {
+ KGD_POOL_SYSTEM_CACHEABLE = 1,
+ KGD_POOL_SYSTEM_WRITECOMBINE = 2,
+ KGD_POOL_FRAMEBUFFER = 3,
+};
+
+struct kgd2kfd_shared_resources {
+ /* Bit n == 1 means VMID n is available for KFD. */
+ unsigned int compute_vmid_bitmap;
+
+ /* Compute pipes are counted starting from MEC0/pipe0 as 0. */
+ unsigned int first_compute_pipe;
+
+ /* Number of MEC pipes available for KFD. */
+ unsigned int compute_pipe_count;
+
+ /* Base address of doorbell aperture. */
+ phys_addr_t doorbell_physical_address;
+
+ /* Size in bytes of doorbell aperture. */
+ size_t doorbell_aperture_size;
+
+ /* Number of bytes at start of aperture reserved for KGD. */
+ size_t doorbell_start_offset;
+};
+
+/**
+ * struct kgd2kfd_calls
+ *
+ * @exit: Notifies amdkfd that kgd module is unloaded
+ *
+ * @probe: Notifies amdkfd about a probe done on a device in the kgd driver.
+ *
+ * @device_init: Initialize the newly probed device (if it is a device that
+ * amdkfd supports)
+ *
+ * @device_exit: Notifies amdkfd about a removal of a kgd device
+ *
+ * @suspend: Notifies amdkfd about a suspend action done to a kgd device
+ *
+ * @resume: Notifies amdkfd about a resume action done to a kgd device
+ *
+ * This structure contains function callback pointers so the kgd driver
+ * will notify to the amdkfd about certain status changes.
+ *
+ */
+struct kgd2kfd_calls {
+ void (*exit)(void);
+ struct kfd_dev* (*probe)(struct kgd_dev *kgd, struct pci_dev *pdev);
+ bool (*device_init)(struct kfd_dev *kfd,
+ const struct kgd2kfd_shared_resources *gpu_resources);
+ void (*device_exit)(struct kfd_dev *kfd);
+ void (*interrupt)(struct kfd_dev *kfd, const void *ih_ring_entry);
+ void (*suspend)(struct kfd_dev *kfd);
+ int (*resume)(struct kfd_dev *kfd);
+};
+
+/**
+ * struct kfd2kgd_calls
+ *
+ * @init_sa_manager: Initialize an instance of the sa manager, used by
+ * amdkfd for all system memory allocations that are mapped to the GART
+ * address space
+ *
+ * @fini_sa_manager: Releases all memory allocations for amdkfd that are
+ * handled by kgd sa manager
+ *
+ * @allocate_mem: Allocate a buffer from amdkfd's sa manager. The buffer can
+ * be used for mqds, hpds, kernel queue, fence and runlists
+ *
+ * @free_mem: Frees a buffer that was allocated by amdkfd's sa manager
+ *
+ * @get_vmem_size: Retrieves (physical) size of VRAM
+ *
+ * @get_gpu_clock_counter: Retrieves GPU clock counter
+ *
+ * @get_max_engine_clock_in_mhz: Retrieves maximum GPU clock in MHz
+ *
+ * @program_sh_mem_settings: A function that should initiate the memory
+ * properties such as main aperture memory type (cache / non cached) and
+ * secondary aperture base address, size and memory type.
+ * This function is used only for no cp scheduling mode.
+ *
+ * @set_pasid_vmid_mapping: Exposes pasid/vmid pair to the H/W for no cp
+ * scheduling mode. Only used for no cp scheduling mode.
+ *
+ * @init_memory: Initializes memory apertures to fixed base/limit address
+ * and non cached memory types.
+ *
+ * @init_pipeline: Initialized the compute pipelines.
+ *
+ * @hqd_load: Loads the mqd structure to a H/W hqd slot. used only for no cp
+ * sceduling mode.
+ *
+ * @hqd_is_occupies: Checks if a hqd slot is occupied.
+ *
+ * @hqd_destroy: Destructs and preempts the queue assigned to that hqd slot.
+ *
+ * This structure contains function pointers to services that the kgd driver
+ * provides to amdkfd driver.
+ *
+ */
+struct kfd2kgd_calls {
+ /* Memory management. */
+ int (*init_sa_manager)(struct kgd_dev *kgd, unsigned int size);
+ void (*fini_sa_manager)(struct kgd_dev *kgd);
+ int (*allocate_mem)(struct kgd_dev *kgd, size_t size, size_t alignment,
+ enum kgd_memory_pool pool, struct kgd_mem **mem);
+
+ void (*free_mem)(struct kgd_dev *kgd, struct kgd_mem *mem);
+
+ uint64_t (*get_vmem_size)(struct kgd_dev *kgd);
+ uint64_t (*get_gpu_clock_counter)(struct kgd_dev *kgd);
+
+ uint32_t (*get_max_engine_clock_in_mhz)(struct kgd_dev *kgd);
+
+ /* Register access functions */
+ void (*program_sh_mem_settings)(struct kgd_dev *kgd, uint32_t vmid,
+ uint32_t sh_mem_config, uint32_t sh_mem_ape1_base,
+ uint32_t sh_mem_ape1_limit, uint32_t sh_mem_bases);
+
+ int (*set_pasid_vmid_mapping)(struct kgd_dev *kgd, unsigned int pasid,
+ unsigned int vmid);
+
+ int (*init_memory)(struct kgd_dev *kgd);
+ int (*init_pipeline)(struct kgd_dev *kgd, uint32_t pipe_id,
+ uint32_t hpd_size, uint64_t hpd_gpu_addr);
+
+ int (*hqd_load)(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
+ uint32_t queue_id, uint32_t __user *wptr);
+
+ bool (*hqd_is_occupies)(struct kgd_dev *kgd, uint64_t queue_address,
+ uint32_t pipe_id, uint32_t queue_id);
+
+ int (*hqd_destroy)(struct kgd_dev *kgd, uint32_t reset_type,
+ unsigned int timeout, uint32_t pipe_id,
+ uint32_t queue_id);
+};
+
+bool kgd2kfd_init(unsigned interface_version,
+ const struct kfd2kgd_calls *f2g,
+ const struct kgd2kfd_calls **g2f);
+
+#endif /* KGD_KFD_INTERFACE_H_INCLUDED */
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index e4a1490b42c..e3a7a5078e5 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -12,6 +12,7 @@
#include <linux/platform_device.h>
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_plane_helper.h>
#include "armada_crtc.h"
#include "armada_drm.h"
#include "armada_fb.h"
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 9dc0fd5c1ea..b7ee2634e47 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -31,6 +31,7 @@
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_plane_helper.h>
#include "ast_drv.h"
#include "ast_tables.h"
diff --git a/drivers/gpu/drm/bochs/bochs_fbdev.c b/drivers/gpu/drm/bochs/bochs_fbdev.c
index fe95d31cd11..61dbf09dff5 100644
--- a/drivers/gpu/drm/bochs/bochs_fbdev.c
+++ b/drivers/gpu/drm/bochs/bochs_fbdev.c
@@ -9,6 +9,17 @@
/* ---------------------------------------------------------------------- */
+static int bochsfb_mmap(struct fb_info *info,
+ struct vm_area_struct *vma)
+{
+ struct drm_fb_helper *fb_helper = info->par;
+ struct bochs_device *bochs =
+ container_of(fb_helper, struct bochs_device, fb.helper);
+ struct bochs_bo *bo = gem_to_bochs_bo(bochs->fb.gfb.obj);
+
+ return ttm_fbdev_mmap(vma, &bo->bo);
+}
+
static struct fb_ops bochsfb_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
@@ -19,6 +30,7 @@ static struct fb_ops bochsfb_ops = {
.fb_pan_display = drm_fb_helper_pan_display,
.fb_blank = drm_fb_helper_blank,
.fb_setcmap = drm_fb_helper_setcmap,
+ .fb_mmap = bochsfb_mmap,
};
static int bochsfb_create_object(struct bochs_device *bochs,
@@ -123,11 +135,9 @@ static int bochsfb_create(struct drm_fb_helper *helper,
info->screen_base = bo->kmap.virtual;
info->screen_size = size;
-#if 0
- /* FIXME: get this right for mmap(/dev/fb0) */
- info->fix.smem_start = bochs_bo_mmap_offset(bo);
+ drm_vma_offset_remove(&bo->bo.bdev->vma_manager, &bo->bo.vma_node);
+ info->fix.smem_start = 0;
info->fix.smem_len = size;
-#endif
ret = fb_alloc_cmap(&info->cmap, 256, 0);
if (ret) {
diff --git a/drivers/gpu/drm/bochs/bochs_hw.c b/drivers/gpu/drm/bochs/bochs_hw.c
index dbe619e6aab..460389702d3 100644
--- a/drivers/gpu/drm/bochs/bochs_hw.c
+++ b/drivers/gpu/drm/bochs/bochs_hw.c
@@ -51,11 +51,10 @@ int bochs_hw_init(struct drm_device *dev, uint32_t flags)
{
struct bochs_device *bochs = dev->dev_private;
struct pci_dev *pdev = dev->pdev;
- unsigned long addr, size, mem, ioaddr, iosize;
+ unsigned long addr, size, mem, ioaddr, iosize, qext_size;
u16 id;
- if (/* (ent->driver_data == BOCHS_QEMU_STDVGA) && */
- (pdev->resource[2].flags & IORESOURCE_MEM)) {
+ if (pdev->resource[2].flags & IORESOURCE_MEM) {
/* mmio bar with vga and bochs registers present */
if (pci_request_region(pdev, 2, "bochs-drm") != 0) {
DRM_ERROR("Cannot request mmio region\n");
@@ -116,6 +115,24 @@ int bochs_hw_init(struct drm_device *dev, uint32_t flags)
size / 1024, addr,
bochs->ioports ? "ioports" : "mmio",
ioaddr);
+
+ if (bochs->mmio && pdev->revision >= 2) {
+ qext_size = readl(bochs->mmio + 0x600);
+ if (qext_size < 4 || qext_size > iosize)
+ goto noext;
+ DRM_DEBUG("Found qemu ext regs, size %ld\n", qext_size);
+ if (qext_size >= 8) {
+#ifdef __BIG_ENDIAN
+ writel(0xbebebebe, bochs->mmio + 0x604);
+#else
+ writel(0x1e1e1e1e, bochs->mmio + 0x604);
+#endif
+ DRM_DEBUG(" qext endian: 0x%x\n",
+ readl(bochs->mmio + 0x604));
+ }
+ }
+
+noext:
return 0;
}
diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c
index 6b7efcf363d..85f0f8cf1fb 100644
--- a/drivers/gpu/drm/bochs/bochs_kms.c
+++ b/drivers/gpu/drm/bochs/bochs_kms.c
@@ -6,6 +6,7 @@
*/
#include "bochs.h"
+#include <drm/drm_plane_helper.h>
static int defx = 1024;
static int defy = 768;
@@ -108,11 +109,32 @@ static void bochs_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
{
}
+static int bochs_crtc_page_flip(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event,
+ uint32_t page_flip_flags)
+{
+ struct bochs_device *bochs =
+ container_of(crtc, struct bochs_device, crtc);
+ struct drm_framebuffer *old_fb = crtc->primary->fb;
+ unsigned long irqflags;
+
+ crtc->primary->fb = fb;
+ bochs_crtc_mode_set_base(crtc, 0, 0, old_fb);
+ if (event) {
+ spin_lock_irqsave(&bochs->dev->event_lock, irqflags);
+ drm_send_vblank_event(bochs->dev, -1, event);
+ spin_unlock_irqrestore(&bochs->dev->event_lock, irqflags);
+ }
+ return 0;
+}
+
/* These provide the minimum set of functions required to handle a CRTC */
static const struct drm_crtc_funcs bochs_crtc_funcs = {
.gamma_set = bochs_crtc_gamma_set,
.set_config = drm_crtc_helper_set_config,
.destroy = drm_crtc_cleanup,
+ .page_flip = bochs_crtc_page_flip,
};
static const struct drm_crtc_helper_funcs bochs_helper_funcs = {
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
index d44e69daa23..693a4565c4f 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.h
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.h
@@ -210,6 +210,9 @@ int cirrus_framebuffer_init(struct drm_device *dev,
struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj);
+bool cirrus_check_framebuffer(struct cirrus_device *cdev, int width, int height,
+ int bpp, int pitch);
+
/* cirrus_display.c */
int cirrus_modeset_init(struct cirrus_device *cdev);
void cirrus_modeset_fini(struct cirrus_device *cdev);
diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
index d231b1c317a..502a89eb54b 100644
--- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c
+++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
@@ -139,6 +139,7 @@ static int cirrusfb_create_object(struct cirrus_fbdev *afbdev,
struct drm_gem_object **gobj_p)
{
struct drm_device *dev = afbdev->helper.dev;
+ struct cirrus_device *cdev = dev->dev_private;
u32 bpp, depth;
u32 size;
struct drm_gem_object *gobj;
@@ -146,8 +147,10 @@ static int cirrusfb_create_object(struct cirrus_fbdev *afbdev,
int ret = 0;
drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
- if (bpp > 24)
+ if (!cirrus_check_framebuffer(cdev, mode_cmd->width, mode_cmd->height,
+ bpp, mode_cmd->pitches[0]))
return -EINVAL;
+
size = mode_cmd->pitches[0] * mode_cmd->height;
ret = cirrus_gem_create(dev, size, true, &gobj);
if (ret)
diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c
index 99c1983f99d..4c2d68e9102 100644
--- a/drivers/gpu/drm/cirrus/cirrus_main.c
+++ b/drivers/gpu/drm/cirrus/cirrus_main.c
@@ -49,14 +49,16 @@ cirrus_user_framebuffer_create(struct drm_device *dev,
struct drm_file *filp,
struct drm_mode_fb_cmd2 *mode_cmd)
{
+ struct cirrus_device *cdev = dev->dev_private;
struct drm_gem_object *obj;
struct cirrus_framebuffer *cirrus_fb;
int ret;
u32 bpp, depth;
drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
- /* cirrus can't handle > 24bpp framebuffers at all */
- if (bpp > 24)
+
+ if (!cirrus_check_framebuffer(cdev, mode_cmd->width, mode_cmd->height,
+ bpp, mode_cmd->pitches[0]))
return ERR_PTR(-EINVAL);
obj = drm_gem_object_lookup(dev, filp, mode_cmd->handles[0]);
@@ -96,8 +98,7 @@ static int cirrus_vram_init(struct cirrus_device *cdev)
{
/* BAR 0 is VRAM */
cdev->mc.vram_base = pci_resource_start(cdev->dev->pdev, 0);
- /* We have 4MB of VRAM */
- cdev->mc.vram_size = 4 * 1024 * 1024;
+ cdev->mc.vram_size = pci_resource_len(cdev->dev->pdev, 0);
if (!request_mem_region(cdev->mc.vram_base, cdev->mc.vram_size,
"cirrusdrmfb_vram")) {
@@ -179,17 +180,22 @@ int cirrus_driver_load(struct drm_device *dev, unsigned long flags)
}
r = cirrus_mm_init(cdev);
- if (r)
+ if (r) {
dev_err(&dev->pdev->dev, "fatal err on mm init\n");
+ goto out;
+ }
r = cirrus_modeset_init(cdev);
- if (r)
+ if (r) {
dev_err(&dev->pdev->dev, "Fatal error during modeset init: %d\n", r);
+ goto out;
+ }
dev->mode_config.funcs = (void *)&cirrus_mode_funcs;
+
+ return 0;
out:
- if (r)
- cirrus_driver_unload(dev);
+ cirrus_driver_unload(dev);
return r;
}
@@ -307,3 +313,21 @@ out_unlock:
return ret;
}
+
+bool cirrus_check_framebuffer(struct cirrus_device *cdev, int width, int height,
+ int bpp, int pitch)
+{
+ const int max_pitch = 0x1FF << 3; /* (4096 - 1) & ~111b bytes */
+ const int max_size = cdev->mc.vram_size;
+
+ if (bpp > 32)
+ return false;
+
+ if (pitch > max_pitch)
+ return false;
+
+ if (pitch * height > max_size)
+ return false;
+
+ return true;
+}
diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c
index c7c5a9d91fa..99d4a74ffea 100644
--- a/drivers/gpu/drm/cirrus/cirrus_mode.c
+++ b/drivers/gpu/drm/cirrus/cirrus_mode.c
@@ -16,6 +16,7 @@
*/
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_plane_helper.h>
#include <video/cirrus.h>
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
new file mode 100644
index 00000000000..ff5f034cc40
--- /dev/null
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -0,0 +1,657 @@
+/*
+ * Copyright (C) 2014 Red Hat
+ * Copyright (C) 2014 Intel Corp.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rob Clark <robdclark@gmail.com>
+ * Daniel Vetter <daniel.vetter@ffwll.ch>
+ */
+
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_plane_helper.h>
+
+static void kfree_state(struct drm_atomic_state *state)
+{
+ kfree(state->connectors);
+ kfree(state->connector_states);
+ kfree(state->crtcs);
+ kfree(state->crtc_states);
+ kfree(state->planes);
+ kfree(state->plane_states);
+ kfree(state);
+}
+
+/**
+ * drm_atomic_state_alloc - allocate atomic state
+ * @dev: DRM device
+ *
+ * This allocates an empty atomic state to track updates.
+ */
+struct drm_atomic_state *
+drm_atomic_state_alloc(struct drm_device *dev)
+{
+ struct drm_atomic_state *state;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return NULL;
+
+ state->num_connector = ACCESS_ONCE(dev->mode_config.num_connector);
+
+ state->crtcs = kcalloc(dev->mode_config.num_crtc,
+ sizeof(*state->crtcs), GFP_KERNEL);
+ if (!state->crtcs)
+ goto fail;
+ state->crtc_states = kcalloc(dev->mode_config.num_crtc,
+ sizeof(*state->crtc_states), GFP_KERNEL);
+ if (!state->crtc_states)
+ goto fail;
+ state->planes = kcalloc(dev->mode_config.num_total_plane,
+ sizeof(*state->planes), GFP_KERNEL);
+ if (!state->planes)
+ goto fail;
+ state->plane_states = kcalloc(dev->mode_config.num_total_plane,
+ sizeof(*state->plane_states), GFP_KERNEL);
+ if (!state->plane_states)
+ goto fail;
+ state->connectors = kcalloc(state->num_connector,
+ sizeof(*state->connectors),
+ GFP_KERNEL);
+ if (!state->connectors)
+ goto fail;
+ state->connector_states = kcalloc(state->num_connector,
+ sizeof(*state->connector_states),
+ GFP_KERNEL);
+ if (!state->connector_states)
+ goto fail;
+
+ state->dev = dev;
+
+ DRM_DEBUG_KMS("Allocate atomic state %p\n", state);
+
+ return state;
+fail:
+ kfree_state(state);
+
+ return NULL;
+}
+EXPORT_SYMBOL(drm_atomic_state_alloc);
+
+/**
+ * drm_atomic_state_clear - clear state object
+ * @state: atomic state
+ *
+ * When the w/w mutex algorithm detects a deadlock we need to back off and drop
+ * all locks. So someone else could sneak in and change the current modeset
+ * configuration. Which means that all the state assembled in @state is no
+ * longer an atomic update to the current state, but to some arbitrary earlier
+ * state. Which could break assumptions the driver's ->atomic_check likely
+ * relies on.
+ *
+ * Hence we must clear all cached state and completely start over, using this
+ * function.
+ */
+void drm_atomic_state_clear(struct drm_atomic_state *state)
+{
+ struct drm_device *dev = state->dev;
+ struct drm_mode_config *config = &dev->mode_config;
+ int i;
+
+ DRM_DEBUG_KMS("Clearing atomic state %p\n", state);
+
+ for (i = 0; i < state->num_connector; i++) {
+ struct drm_connector *connector = state->connectors[i];
+
+ if (!connector)
+ continue;
+
+ WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
+
+ connector->funcs->atomic_destroy_state(connector,
+ state->connector_states[i]);
+ }
+
+ for (i = 0; i < config->num_crtc; i++) {
+ struct drm_crtc *crtc = state->crtcs[i];
+
+ if (!crtc)
+ continue;
+
+ crtc->funcs->atomic_destroy_state(crtc,
+ state->crtc_states[i]);
+ }
+
+ for (i = 0; i < config->num_total_plane; i++) {
+ struct drm_plane *plane = state->planes[i];
+
+ if (!plane)
+ continue;
+
+ plane->funcs->atomic_destroy_state(plane,
+ state->plane_states[i]);
+ }
+}
+EXPORT_SYMBOL(drm_atomic_state_clear);
+
+/**
+ * drm_atomic_state_free - free all memory for an atomic state
+ * @state: atomic state to deallocate
+ *
+ * This frees all memory associated with an atomic state, including all the
+ * per-object state for planes, crtcs and connectors.
+ */
+void drm_atomic_state_free(struct drm_atomic_state *state)
+{
+ drm_atomic_state_clear(state);
+
+ DRM_DEBUG_KMS("Freeing atomic state %p\n", state);
+
+ kfree_state(state);
+}
+EXPORT_SYMBOL(drm_atomic_state_free);
+
+/**
+ * drm_atomic_get_crtc_state - get crtc state
+ * @state: global atomic state object
+ * @crtc: crtc to get state object for
+ *
+ * This function returns the crtc state for the given crtc, allocating it if
+ * needed. It will also grab the relevant crtc lock to make sure that the state
+ * is consistent.
+ *
+ * Returns:
+ *
+ * Either the allocated state or the error code encoded into the pointer. When
+ * the error is EDEADLK then the w/w mutex code has detected a deadlock and the
+ * entire atomic sequence must be restarted. All other errors are fatal.
+ */
+struct drm_crtc_state *
+drm_atomic_get_crtc_state(struct drm_atomic_state *state,
+ struct drm_crtc *crtc)
+{
+ int ret, index;
+ struct drm_crtc_state *crtc_state;
+
+ index = drm_crtc_index(crtc);
+
+ if (state->crtc_states[index])
+ return state->crtc_states[index];
+
+ ret = drm_modeset_lock(&crtc->mutex, state->acquire_ctx);
+ if (ret)
+ return ERR_PTR(ret);
+
+ crtc_state = crtc->funcs->atomic_duplicate_state(crtc);
+ if (!crtc_state)
+ return ERR_PTR(-ENOMEM);
+
+ state->crtc_states[index] = crtc_state;
+ state->crtcs[index] = crtc;
+ crtc_state->state = state;
+
+ DRM_DEBUG_KMS("Added [CRTC:%d] %p state to %p\n",
+ crtc->base.id, crtc_state, state);
+
+ return crtc_state;
+}
+EXPORT_SYMBOL(drm_atomic_get_crtc_state);
+
+/**
+ * drm_atomic_get_plane_state - get plane state
+ * @state: global atomic state object
+ * @plane: plane to get state object for
+ *
+ * This function returns the plane state for the given plane, allocating it if
+ * needed. It will also grab the relevant plane lock to make sure that the state
+ * is consistent.
+ *
+ * Returns:
+ *
+ * Either the allocated state or the error code encoded into the pointer. When
+ * the error is EDEADLK then the w/w mutex code has detected a deadlock and the
+ * entire atomic sequence must be restarted. All other errors are fatal.
+ */
+struct drm_plane_state *
+drm_atomic_get_plane_state(struct drm_atomic_state *state,
+ struct drm_plane *plane)
+{
+ int ret, index;
+ struct drm_plane_state *plane_state;
+
+ index = drm_plane_index(plane);
+
+ if (state->plane_states[index])
+ return state->plane_states[index];
+
+ ret = drm_modeset_lock(&plane->mutex, state->acquire_ctx);
+ if (ret)
+ return ERR_PTR(ret);
+
+ plane_state = plane->funcs->atomic_duplicate_state(plane);
+ if (!plane_state)
+ return ERR_PTR(-ENOMEM);
+
+ state->plane_states[index] = plane_state;
+ state->planes[index] = plane;
+ plane_state->state = state;
+
+ DRM_DEBUG_KMS("Added [PLANE:%d] %p state to %p\n",
+ plane->base.id, plane_state, state);
+
+ if (plane_state->crtc) {
+ struct drm_crtc_state *crtc_state;
+
+ crtc_state = drm_atomic_get_crtc_state(state,
+ plane_state->crtc);
+ if (IS_ERR(crtc_state))
+ return ERR_CAST(crtc_state);
+ }
+
+ return plane_state;
+}
+EXPORT_SYMBOL(drm_atomic_get_plane_state);
+
+/**
+ * drm_atomic_get_connector_state - get connector state
+ * @state: global atomic state object
+ * @connector: connector to get state object for
+ *
+ * This function returns the connector state for the given connector,
+ * allocating it if needed. It will also grab the relevant connector lock to
+ * make sure that the state is consistent.
+ *
+ * Returns:
+ *
+ * Either the allocated state or the error code encoded into the pointer. When
+ * the error is EDEADLK then the w/w mutex code has detected a deadlock and the
+ * entire atomic sequence must be restarted. All other errors are fatal.
+ */
+struct drm_connector_state *
+drm_atomic_get_connector_state(struct drm_atomic_state *state,
+ struct drm_connector *connector)
+{
+ int ret, index;
+ struct drm_mode_config *config = &connector->dev->mode_config;
+ struct drm_connector_state *connector_state;
+
+ ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx);
+ if (ret)
+ return ERR_PTR(ret);
+
+ index = drm_connector_index(connector);
+
+ /*
+ * Construction of atomic state updates can race with a connector
+ * hot-add which might overflow. In this case flip the table and just
+ * restart the entire ioctl - no one is fast enough to livelock a cpu
+ * with physical hotplug events anyway.
+ *
+ * Note that we only grab the indexes once we have the right lock to
+ * prevent hotplug/unplugging of connectors. So removal is no problem,
+ * at most the array is a bit too large.
+ */
+ if (index >= state->num_connector) {
+ DRM_DEBUG_KMS("Hot-added connector would overflow state array, restarting\n");
+ return ERR_PTR(-EAGAIN);
+ }
+
+ if (state->connector_states[index])
+ return state->connector_states[index];
+
+ connector_state = connector->funcs->atomic_duplicate_state(connector);
+ if (!connector_state)
+ return ERR_PTR(-ENOMEM);
+
+ state->connector_states[index] = connector_state;
+ state->connectors[index] = connector;
+ connector_state->state = state;
+
+ DRM_DEBUG_KMS("Added [CONNECTOR:%d] %p state to %p\n",
+ connector->base.id, connector_state, state);
+
+ if (connector_state->crtc) {
+ struct drm_crtc_state *crtc_state;
+
+ crtc_state = drm_atomic_get_crtc_state(state,
+ connector_state->crtc);
+ if (IS_ERR(crtc_state))
+ return ERR_CAST(crtc_state);
+ }
+
+ return connector_state;
+}
+EXPORT_SYMBOL(drm_atomic_get_connector_state);
+
+/**
+ * drm_atomic_set_crtc_for_plane - set crtc for plane
+ * @state: the incoming atomic state
+ * @plane: the plane whose incoming state to update
+ * @crtc: crtc to use for the plane
+ *
+ * Changing the assigned crtc for a plane requires us to grab the lock and state
+ * for the new crtc, as needed. This function takes care of all these details
+ * besides updating the pointer in the state object itself.
+ *
+ * Returns:
+ * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
+ * then the w/w mutex code has detected a deadlock and the entire atomic
+ * sequence must be restarted. All other errors are fatal.
+ */
+int
+drm_atomic_set_crtc_for_plane(struct drm_atomic_state *state,
+ struct drm_plane *plane, struct drm_crtc *crtc)
+{
+ struct drm_plane_state *plane_state =
+ drm_atomic_get_plane_state(state, plane);
+ struct drm_crtc_state *crtc_state;
+
+ if (WARN_ON(IS_ERR(plane_state)))
+ return PTR_ERR(plane_state);
+
+ if (plane_state->crtc) {
+ crtc_state = drm_atomic_get_crtc_state(plane_state->state,
+ plane_state->crtc);
+ if (WARN_ON(IS_ERR(crtc_state)))
+ return PTR_ERR(crtc_state);
+
+ crtc_state->plane_mask &= ~(1 << drm_plane_index(plane));
+ }
+
+ plane_state->crtc = crtc;
+
+ if (crtc) {
+ crtc_state = drm_atomic_get_crtc_state(plane_state->state,
+ crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+ crtc_state->plane_mask |= (1 << drm_plane_index(plane));
+ }
+
+ if (crtc)
+ DRM_DEBUG_KMS("Link plane state %p to [CRTC:%d]\n",
+ plane_state, crtc->base.id);
+ else
+ DRM_DEBUG_KMS("Link plane state %p to [NOCRTC]\n", plane_state);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_atomic_set_crtc_for_plane);
+
+/**
+ * drm_atomic_set_fb_for_plane - set crtc for plane
+ * @plane_state: atomic state object for the plane
+ * @fb: fb to use for the plane
+ *
+ * Changing the assigned framebuffer for a plane requires us to grab a reference
+ * to the new fb and drop the reference to the old fb, if there is one. This
+ * function takes care of all these details besides updating the pointer in the
+ * state object itself.
+ */
+void
+drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state,
+ struct drm_framebuffer *fb)
+{
+ if (plane_state->fb)
+ drm_framebuffer_unreference(plane_state->fb);
+ if (fb)
+ drm_framebuffer_reference(fb);
+ plane_state->fb = fb;
+
+ if (fb)
+ DRM_DEBUG_KMS("Set [FB:%d] for plane state %p\n",
+ fb->base.id, plane_state);
+ else
+ DRM_DEBUG_KMS("Set [NOFB] for plane state %p\n", plane_state);
+}
+EXPORT_SYMBOL(drm_atomic_set_fb_for_plane);
+
+/**
+ * drm_atomic_set_crtc_for_connector - set crtc for connector
+ * @conn_state: atomic state object for the connector
+ * @crtc: crtc to use for the connector
+ *
+ * Changing the assigned crtc for a connector requires us to grab the lock and
+ * state for the new crtc, as needed. This function takes care of all these
+ * details besides updating the pointer in the state object itself.
+ *
+ * Returns:
+ * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
+ * then the w/w mutex code has detected a deadlock and the entire atomic
+ * sequence must be restarted. All other errors are fatal.
+ */
+int
+drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
+ struct drm_crtc *crtc)
+{
+ struct drm_crtc_state *crtc_state;
+
+ if (crtc) {
+ crtc_state = drm_atomic_get_crtc_state(conn_state->state, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+ }
+
+ conn_state->crtc = crtc;
+
+ if (crtc)
+ DRM_DEBUG_KMS("Link connector state %p to [CRTC:%d]\n",
+ conn_state, crtc->base.id);
+ else
+ DRM_DEBUG_KMS("Link connector state %p to [NOCRTC]\n",
+ conn_state);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_atomic_set_crtc_for_connector);
+
+/**
+ * drm_atomic_add_affected_connectors - add connectors for crtc
+ * @state: atomic state
+ * @crtc: DRM crtc
+ *
+ * This function walks the current configuration and adds all connectors
+ * currently using @crtc to the atomic configuration @state. Note that this
+ * function must acquire the connection mutex. This can potentially cause
+ * unneeded seralization if the update is just for the planes on one crtc. Hence
+ * drivers and helpers should only call this when really needed (e.g. when a
+ * full modeset needs to happen due to some change).
+ *
+ * Returns:
+ * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
+ * then the w/w mutex code has detected a deadlock and the entire atomic
+ * sequence must be restarted. All other errors are fatal.
+ */
+int
+drm_atomic_add_affected_connectors(struct drm_atomic_state *state,
+ struct drm_crtc *crtc)
+{
+ struct drm_mode_config *config = &state->dev->mode_config;
+ struct drm_connector *connector;
+ struct drm_connector_state *conn_state;
+ int ret;
+
+ ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx);
+ if (ret)
+ return ret;
+
+ DRM_DEBUG_KMS("Adding all current connectors for [CRTC:%d] to %p\n",
+ crtc->base.id, state);
+
+ /*
+ * Changed connectors are already in @state, so only need to look at the
+ * current configuration.
+ */
+ list_for_each_entry(connector, &config->connector_list, head) {
+ if (connector->state->crtc != crtc)
+ continue;
+
+ conn_state = drm_atomic_get_connector_state(state, connector);
+ if (IS_ERR(conn_state))
+ return PTR_ERR(conn_state);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_atomic_add_affected_connectors);
+
+/**
+ * drm_atomic_connectors_for_crtc - count number of connected outputs
+ * @state: atomic state
+ * @crtc: DRM crtc
+ *
+ * This function counts all connectors which will be connected to @crtc
+ * according to @state. Useful to recompute the enable state for @crtc.
+ */
+int
+drm_atomic_connectors_for_crtc(struct drm_atomic_state *state,
+ struct drm_crtc *crtc)
+{
+ int i, num_connected_connectors = 0;
+
+ for (i = 0; i < state->num_connector; i++) {
+ struct drm_connector_state *conn_state;
+
+ conn_state = state->connector_states[i];
+
+ if (conn_state && conn_state->crtc == crtc)
+ num_connected_connectors++;
+ }
+
+ DRM_DEBUG_KMS("State %p has %i connectors for [CRTC:%d]\n",
+ state, num_connected_connectors, crtc->base.id);
+
+ return num_connected_connectors;
+}
+EXPORT_SYMBOL(drm_atomic_connectors_for_crtc);
+
+/**
+ * drm_atomic_legacy_backoff - locking backoff for legacy ioctls
+ * @state: atomic state
+ *
+ * This function should be used by legacy entry points which don't understand
+ * -EDEADLK semantics. For simplicity this one will grab all modeset locks after
+ * the slowpath completed.
+ */
+void drm_atomic_legacy_backoff(struct drm_atomic_state *state)
+{
+ int ret;
+
+retry:
+ drm_modeset_backoff(state->acquire_ctx);
+
+ ret = drm_modeset_lock(&state->dev->mode_config.connection_mutex,
+ state->acquire_ctx);
+ if (ret)
+ goto retry;
+ ret = drm_modeset_lock_all_crtcs(state->dev,
+ state->acquire_ctx);
+ if (ret)
+ goto retry;
+}
+EXPORT_SYMBOL(drm_atomic_legacy_backoff);
+
+/**
+ * drm_atomic_check_only - check whether a given config would work
+ * @state: atomic configuration to check
+ *
+ * Note that this function can return -EDEADLK if the driver needed to acquire
+ * more locks but encountered a deadlock. The caller must then do the usual w/w
+ * backoff dance and restart. All other errors are fatal.
+ *
+ * Returns:
+ * 0 on success, negative error code on failure.
+ */
+int drm_atomic_check_only(struct drm_atomic_state *state)
+{
+ struct drm_mode_config *config = &state->dev->mode_config;
+
+ DRM_DEBUG_KMS("checking %p\n", state);
+
+ if (config->funcs->atomic_check)
+ return config->funcs->atomic_check(state->dev, state);
+ else
+ return 0;
+}
+EXPORT_SYMBOL(drm_atomic_check_only);
+
+/**
+ * drm_atomic_commit - commit configuration atomically
+ * @state: atomic configuration to check
+ *
+ * Note that this function can return -EDEADLK if the driver needed to acquire
+ * more locks but encountered a deadlock. The caller must then do the usual w/w
+ * backoff dance and restart. All other errors are fatal.
+ *
+ * Also note that on successful execution ownership of @state is transferred
+ * from the caller of this function to the function itself. The caller must not
+ * free or in any other way access @state. If the function fails then the caller
+ * must clean up @state itself.
+ *
+ * Returns:
+ * 0 on success, negative error code on failure.
+ */
+int drm_atomic_commit(struct drm_atomic_state *state)
+{
+ struct drm_mode_config *config = &state->dev->mode_config;
+ int ret;
+
+ ret = drm_atomic_check_only(state);
+ if (ret)
+ return ret;
+
+ DRM_DEBUG_KMS("commiting %p\n", state);
+
+ return config->funcs->atomic_commit(state->dev, state, false);
+}
+EXPORT_SYMBOL(drm_atomic_commit);
+
+/**
+ * drm_atomic_async_commit - atomic&async configuration commit
+ * @state: atomic configuration to check
+ *
+ * Note that this function can return -EDEADLK if the driver needed to acquire
+ * more locks but encountered a deadlock. The caller must then do the usual w/w
+ * backoff dance and restart. All other errors are fatal.
+ *
+ * Also note that on successful execution ownership of @state is transferred
+ * from the caller of this function to the function itself. The caller must not
+ * free or in any other way access @state. If the function fails then the caller
+ * must clean up @state itself.
+ *
+ * Returns:
+ * 0 on success, negative error code on failure.
+ */
+int drm_atomic_async_commit(struct drm_atomic_state *state)
+{
+ struct drm_mode_config *config = &state->dev->mode_config;
+ int ret;
+
+ ret = drm_atomic_check_only(state);
+ if (ret)
+ return ret;
+
+ DRM_DEBUG_KMS("commiting %p asynchronously\n", state);
+
+ return config->funcs->atomic_commit(state->dev, state, true);
+}
+EXPORT_SYMBOL(drm_atomic_async_commit);
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
new file mode 100644
index 00000000000..4a78a773151
--- /dev/null
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -0,0 +1,1966 @@
+/*
+ * Copyright (C) 2014 Red Hat
+ * Copyright (C) 2014 Intel Corp.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rob Clark <robdclark@gmail.com>
+ * Daniel Vetter <daniel.vetter@ffwll.ch>
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_atomic_helper.h>
+#include <linux/fence.h>
+
+/**
+ * DOC: overview
+ *
+ * This helper library provides implementations of check and commit functions on
+ * top of the CRTC modeset helper callbacks and the plane helper callbacks. It
+ * also provides convenience implementations for the atomic state handling
+ * callbacks for drivers which don't need to subclass the drm core structures to
+ * add their own additional internal state.
+ *
+ * This library also provides default implementations for the check callback in
+ * drm_atomic_helper_check and for the commit callback with
+ * drm_atomic_helper_commit. But the individual stages and callbacks are expose
+ * to allow drivers to mix and match and e.g. use the plane helpers only
+ * together with a driver private modeset implementation.
+ *
+ * This library also provides implementations for all the legacy driver
+ * interfaces on top of the atomic interface. See drm_atomic_helper_set_config,
+ * drm_atomic_helper_disable_plane, drm_atomic_helper_disable_plane and the
+ * various functions to implement set_property callbacks. New drivers must not
+ * implement these functions themselves but must use the provided helpers.
+ */
+static void
+drm_atomic_helper_plane_changed(struct drm_atomic_state *state,
+ struct drm_plane_state *plane_state,
+ struct drm_plane *plane)
+{
+ struct drm_crtc_state *crtc_state;
+
+ if (plane->state->crtc) {
+ crtc_state = state->crtc_states[drm_crtc_index(plane->crtc)];
+
+ if (WARN_ON(!crtc_state))
+ return;
+
+ crtc_state->planes_changed = true;
+ }
+
+ if (plane_state->crtc) {
+ crtc_state =
+ state->crtc_states[drm_crtc_index(plane_state->crtc)];
+
+ if (WARN_ON(!crtc_state))
+ return;
+
+ crtc_state->planes_changed = true;
+ }
+}
+
+static struct drm_crtc *
+get_current_crtc_for_encoder(struct drm_device *dev,
+ struct drm_encoder *encoder)
+{
+ struct drm_mode_config *config = &dev->mode_config;
+ struct drm_connector *connector;
+
+ WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
+
+ list_for_each_entry(connector, &config->connector_list, head) {
+ if (connector->state->best_encoder != encoder)
+ continue;
+
+ return connector->state->crtc;
+ }
+
+ return NULL;
+}
+
+static int
+steal_encoder(struct drm_atomic_state *state,
+ struct drm_encoder *encoder,
+ struct drm_crtc *encoder_crtc)
+{
+ struct drm_mode_config *config = &state->dev->mode_config;
+ struct drm_crtc_state *crtc_state;
+ struct drm_connector *connector;
+ struct drm_connector_state *connector_state;
+ int ret;
+
+ /*
+ * We can only steal an encoder coming from a connector, which means we
+ * must already hold the connection_mutex.
+ */
+ WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
+
+ DRM_DEBUG_KMS("[ENCODER:%d:%s] in use on [CRTC:%d], stealing it\n",
+ encoder->base.id, encoder->name,
+ encoder_crtc->base.id);
+
+ crtc_state = drm_atomic_get_crtc_state(state, encoder_crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ crtc_state->mode_changed = true;
+
+ list_for_each_entry(connector, &config->connector_list, head) {
+ if (connector->state->best_encoder != encoder)
+ continue;
+
+ DRM_DEBUG_KMS("Stealing encoder from [CONNECTOR:%d:%s]\n",
+ connector->base.id,
+ connector->name);
+
+ connector_state = drm_atomic_get_connector_state(state,
+ connector);
+ if (IS_ERR(connector_state))
+ return PTR_ERR(connector_state);
+
+ ret = drm_atomic_set_crtc_for_connector(connector_state, NULL);
+ if (ret)
+ return ret;
+ connector_state->best_encoder = NULL;
+ }
+
+ return 0;
+}
+
+static int
+update_connector_routing(struct drm_atomic_state *state, int conn_idx)
+{
+ struct drm_connector_helper_funcs *funcs;
+ struct drm_encoder *new_encoder;
+ struct drm_crtc *encoder_crtc;
+ struct drm_connector *connector;
+ struct drm_connector_state *connector_state;
+ struct drm_crtc_state *crtc_state;
+ int idx, ret;
+
+ connector = state->connectors[conn_idx];
+ connector_state = state->connector_states[conn_idx];
+
+ if (!connector)
+ return 0;
+
+ DRM_DEBUG_KMS("Updating routing for [CONNECTOR:%d:%s]\n",
+ connector->base.id,
+ connector->name);
+
+ if (connector->state->crtc != connector_state->crtc) {
+ if (connector->state->crtc) {
+ idx = drm_crtc_index(connector->state->crtc);
+
+ crtc_state = state->crtc_states[idx];
+ crtc_state->mode_changed = true;
+ }
+
+ if (connector_state->crtc) {
+ idx = drm_crtc_index(connector_state->crtc);
+
+ crtc_state = state->crtc_states[idx];
+ crtc_state->mode_changed = true;
+ }
+ }
+
+ if (!connector_state->crtc) {
+ DRM_DEBUG_KMS("Disabling [CONNECTOR:%d:%s]\n",
+ connector->base.id,
+ connector->name);
+
+ connector_state->best_encoder = NULL;
+
+ return 0;
+ }
+
+ funcs = connector->helper_private;
+ new_encoder = funcs->best_encoder(connector);
+
+ if (!new_encoder) {
+ DRM_DEBUG_KMS("No suitable encoder found for [CONNECTOR:%d:%s]\n",
+ connector->base.id,
+ connector->name);
+ return -EINVAL;
+ }
+
+ if (new_encoder == connector_state->best_encoder) {
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] keeps [ENCODER:%d:%s], now on [CRTC:%d]\n",
+ connector->base.id,
+ connector->name,
+ new_encoder->base.id,
+ new_encoder->name,
+ connector_state->crtc->base.id);
+
+ return 0;
+ }
+
+ encoder_crtc = get_current_crtc_for_encoder(state->dev,
+ new_encoder);
+
+ if (encoder_crtc) {
+ ret = steal_encoder(state, new_encoder, encoder_crtc);
+ if (ret) {
+ DRM_DEBUG_KMS("Encoder stealing failed for [CONNECTOR:%d:%s]\n",
+ connector->base.id,
+ connector->name);
+ return ret;
+ }
+ }
+
+ connector_state->best_encoder = new_encoder;
+ idx = drm_crtc_index(connector_state->crtc);
+
+ crtc_state = state->crtc_states[idx];
+ crtc_state->mode_changed = true;
+
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d]\n",
+ connector->base.id,
+ connector->name,
+ new_encoder->base.id,
+ new_encoder->name,
+ connector_state->crtc->base.id);
+
+ return 0;
+}
+
+static int
+mode_fixup(struct drm_atomic_state *state)
+{
+ int ncrtcs = state->dev->mode_config.num_crtc;
+ struct drm_crtc_state *crtc_state;
+ struct drm_connector_state *conn_state;
+ int i;
+ bool ret;
+
+ for (i = 0; i < ncrtcs; i++) {
+ crtc_state = state->crtc_states[i];
+
+ if (!crtc_state || !crtc_state->mode_changed)
+ continue;
+
+ drm_mode_copy(&crtc_state->adjusted_mode, &crtc_state->mode);
+ }
+
+ for (i = 0; i < state->num_connector; i++) {
+ struct drm_encoder_helper_funcs *funcs;
+ struct drm_encoder *encoder;
+
+ conn_state = state->connector_states[i];
+
+ if (!conn_state)
+ continue;
+
+ WARN_ON(!!conn_state->best_encoder != !!conn_state->crtc);
+
+ if (!conn_state->crtc || !conn_state->best_encoder)
+ continue;
+
+ crtc_state =
+ state->crtc_states[drm_crtc_index(conn_state->crtc)];
+
+ /*
+ * Each encoder has at most one connector (since we always steal
+ * it away), so we won't call ->mode_fixup twice.
+ */
+ encoder = conn_state->best_encoder;
+ funcs = encoder->helper_private;
+
+ if (encoder->bridge && encoder->bridge->funcs->mode_fixup) {
+ ret = encoder->bridge->funcs->mode_fixup(
+ encoder->bridge, &crtc_state->mode,
+ &crtc_state->adjusted_mode);
+ if (!ret) {
+ DRM_DEBUG_KMS("Bridge fixup failed\n");
+ return -EINVAL;
+ }
+ }
+
+
+ ret = funcs->mode_fixup(encoder, &crtc_state->mode,
+ &crtc_state->adjusted_mode);
+ if (!ret) {
+ DRM_DEBUG_KMS("[ENCODER:%d:%s] fixup failed\n",
+ encoder->base.id, encoder->name);
+ return -EINVAL;
+ }
+ }
+
+ for (i = 0; i < ncrtcs; i++) {
+ struct drm_crtc_helper_funcs *funcs;
+ struct drm_crtc *crtc;
+
+ crtc_state = state->crtc_states[i];
+ crtc = state->crtcs[i];
+
+ if (!crtc_state || !crtc_state->mode_changed)
+ continue;
+
+ funcs = crtc->helper_private;
+ ret = funcs->mode_fixup(crtc, &crtc_state->mode,
+ &crtc_state->adjusted_mode);
+ if (!ret) {
+ DRM_DEBUG_KMS("[CRTC:%d] fixup failed\n",
+ crtc->base.id);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int
+drm_atomic_helper_check_modeset(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ int ncrtcs = dev->mode_config.num_crtc;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ int i, ret;
+
+ for (i = 0; i < ncrtcs; i++) {
+ crtc = state->crtcs[i];
+ crtc_state = state->crtc_states[i];
+
+ if (!crtc)
+ continue;
+
+ if (!drm_mode_equal(&crtc->state->mode, &crtc_state->mode)) {
+ DRM_DEBUG_KMS("[CRTC:%d] mode changed\n",
+ crtc->base.id);
+ crtc_state->mode_changed = true;
+ }
+
+ if (crtc->state->enable != crtc_state->enable) {
+ DRM_DEBUG_KMS("[CRTC:%d] enable changed\n",
+ crtc->base.id);
+ crtc_state->mode_changed = true;
+ }
+ }
+
+ for (i = 0; i < state->num_connector; i++) {
+ /*
+ * This only sets crtc->mode_changed for routing changes,
+ * drivers must set crtc->mode_changed themselves when connector
+ * properties need to be updated.
+ */
+ ret = update_connector_routing(state, i);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * After all the routing has been prepared we need to add in any
+ * connector which is itself unchanged, but who's crtc changes it's
+ * configuration. This must be done before calling mode_fixup in case a
+ * crtc only changed its mode but has the same set of connectors.
+ */
+ for (i = 0; i < ncrtcs; i++) {
+ int num_connectors;
+
+ crtc = state->crtcs[i];
+ crtc_state = state->crtc_states[i];
+
+ if (!crtc || !crtc_state->mode_changed)
+ continue;
+
+ DRM_DEBUG_KMS("[CRTC:%d] needs full modeset, enable: %c\n",
+ crtc->base.id,
+ crtc_state->enable ? 'y' : 'n');
+
+ ret = drm_atomic_add_affected_connectors(state, crtc);
+ if (ret != 0)
+ return ret;
+
+ num_connectors = drm_atomic_connectors_for_crtc(state,
+ crtc);
+
+ if (crtc_state->enable != !!num_connectors) {
+ DRM_DEBUG_KMS("[CRTC:%d] enabled/connectors mismatch\n",
+ crtc->base.id);
+
+ return -EINVAL;
+ }
+ }
+
+ return mode_fixup(state);
+}
+
+/**
+ * drm_atomic_helper_check - validate state object
+ * @dev: DRM device
+ * @state: the driver state object
+ *
+ * Check the state object to see if the requested state is physically possible.
+ * Only crtcs and planes have check callbacks, so for any additional (global)
+ * checking that a driver needs it can simply wrap that around this function.
+ * Drivers without such needs can directly use this as their ->atomic_check()
+ * callback.
+ *
+ * RETURNS
+ * Zero for success or -errno
+ */
+int drm_atomic_helper_check(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ int nplanes = dev->mode_config.num_total_plane;
+ int ncrtcs = dev->mode_config.num_crtc;
+ int i, ret = 0;
+
+ for (i = 0; i < nplanes; i++) {
+ struct drm_plane_helper_funcs *funcs;
+ struct drm_plane *plane = state->planes[i];
+ struct drm_plane_state *plane_state = state->plane_states[i];
+
+ if (!plane)
+ continue;
+
+ funcs = plane->helper_private;
+
+ drm_atomic_helper_plane_changed(state, plane_state, plane);
+
+ if (!funcs || !funcs->atomic_check)
+ continue;
+
+ ret = funcs->atomic_check(plane, plane_state);
+ if (ret) {
+ DRM_DEBUG_KMS("[PLANE:%d] atomic check failed\n",
+ plane->base.id);
+ return ret;
+ }
+ }
+
+ for (i = 0; i < ncrtcs; i++) {
+ struct drm_crtc_helper_funcs *funcs;
+ struct drm_crtc *crtc = state->crtcs[i];
+
+ if (!crtc)
+ continue;
+
+ funcs = crtc->helper_private;
+
+ if (!funcs || !funcs->atomic_check)
+ continue;
+
+ ret = funcs->atomic_check(crtc, state->crtc_states[i]);
+ if (ret) {
+ DRM_DEBUG_KMS("[CRTC:%d] atomic check failed\n",
+ crtc->base.id);
+ return ret;
+ }
+ }
+
+ ret = drm_atomic_helper_check_modeset(dev, state);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_atomic_helper_check);
+
+static void
+disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
+{
+ int ncrtcs = old_state->dev->mode_config.num_crtc;
+ int i;
+
+ for (i = 0; i < old_state->num_connector; i++) {
+ struct drm_connector_state *old_conn_state;
+ struct drm_connector *connector;
+ struct drm_encoder_helper_funcs *funcs;
+ struct drm_encoder *encoder;
+
+ old_conn_state = old_state->connector_states[i];
+ connector = old_state->connectors[i];
+
+ /* Shut down everything that's in the changeset and currently
+ * still on. So need to check the old, saved state. */
+ if (!old_conn_state || !old_conn_state->crtc)
+ continue;
+
+ encoder = old_conn_state->best_encoder;
+
+ /* We shouldn't get this far if we didn't previously have
+ * an encoder.. but WARN_ON() rather than explode.
+ */
+ if (WARN_ON(!encoder))
+ continue;
+
+ funcs = encoder->helper_private;
+
+ /*
+ * Each encoder has at most one connector (since we always steal
+ * it away), so we won't call call disable hooks twice.
+ */
+ if (encoder->bridge)
+ encoder->bridge->funcs->disable(encoder->bridge);
+
+ /* Right function depends upon target state. */
+ if (connector->state->crtc)
+ funcs->prepare(encoder);
+ else if (funcs->disable)
+ funcs->disable(encoder);
+ else
+ funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
+
+ if (encoder->bridge)
+ encoder->bridge->funcs->post_disable(encoder->bridge);
+ }
+
+ for (i = 0; i < ncrtcs; i++) {
+ struct drm_crtc_helper_funcs *funcs;
+ struct drm_crtc *crtc;
+
+ crtc = old_state->crtcs[i];
+
+ /* Shut down everything that needs a full modeset. */
+ if (!crtc || !crtc->state->mode_changed)
+ continue;
+
+ funcs = crtc->helper_private;
+
+ /* Right function depends upon target state. */
+ if (crtc->state->enable)
+ funcs->prepare(crtc);
+ else if (funcs->disable)
+ funcs->disable(crtc);
+ else
+ funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+ }
+}
+
+static void
+set_routing_links(struct drm_device *dev, struct drm_atomic_state *old_state)
+{
+ int ncrtcs = old_state->dev->mode_config.num_crtc;
+ int i;
+
+ /* clear out existing links */
+ for (i = 0; i < old_state->num_connector; i++) {
+ struct drm_connector *connector;
+
+ connector = old_state->connectors[i];
+
+ if (!connector || !connector->encoder)
+ continue;
+
+ WARN_ON(!connector->encoder->crtc);
+
+ connector->encoder->crtc = NULL;
+ connector->encoder = NULL;
+ }
+
+ /* set new links */
+ for (i = 0; i < old_state->num_connector; i++) {
+ struct drm_connector *connector;
+
+ connector = old_state->connectors[i];
+
+ if (!connector || !connector->state->crtc)
+ continue;
+
+ if (WARN_ON(!connector->state->best_encoder))
+ continue;
+
+ connector->encoder = connector->state->best_encoder;
+ connector->encoder->crtc = connector->state->crtc;
+ }
+
+ /* set legacy state in the crtc structure */
+ for (i = 0; i < ncrtcs; i++) {
+ struct drm_crtc *crtc;
+
+ crtc = old_state->crtcs[i];
+
+ if (!crtc)
+ continue;
+
+ crtc->mode = crtc->state->mode;
+ crtc->enabled = crtc->state->enable;
+ crtc->x = crtc->primary->state->src_x >> 16;
+ crtc->y = crtc->primary->state->src_y >> 16;
+ }
+}
+
+static void
+crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
+{
+ int ncrtcs = old_state->dev->mode_config.num_crtc;
+ int i;
+
+ for (i = 0; i < ncrtcs; i++) {
+ struct drm_crtc_helper_funcs *funcs;
+ struct drm_crtc *crtc;
+
+ crtc = old_state->crtcs[i];
+
+ if (!crtc || !crtc->state->mode_changed)
+ continue;
+
+ funcs = crtc->helper_private;
+
+ if (crtc->state->enable)
+ funcs->mode_set_nofb(crtc);
+ }
+
+ for (i = 0; i < old_state->num_connector; i++) {
+ struct drm_connector *connector;
+ struct drm_crtc_state *new_crtc_state;
+ struct drm_encoder_helper_funcs *funcs;
+ struct drm_encoder *encoder;
+ struct drm_display_mode *mode, *adjusted_mode;
+
+ connector = old_state->connectors[i];
+
+ if (!connector || !connector->state->best_encoder)
+ continue;
+
+ encoder = connector->state->best_encoder;
+ funcs = encoder->helper_private;
+ new_crtc_state = connector->state->crtc->state;
+ mode = &new_crtc_state->mode;
+ adjusted_mode = &new_crtc_state->adjusted_mode;
+
+ /*
+ * Each encoder has at most one connector (since we always steal
+ * it away), so we won't call call mode_set hooks twice.
+ */
+ funcs->mode_set(encoder, mode, adjusted_mode);
+
+ if (encoder->bridge && encoder->bridge->funcs->mode_set)
+ encoder->bridge->funcs->mode_set(encoder->bridge,
+ mode, adjusted_mode);
+ }
+}
+
+/**
+ * drm_atomic_helper_commit_pre_planes - modeset commit before plane updates
+ * @dev: DRM device
+ * @state: atomic state
+ *
+ * This function commits the modeset changes that need to be committed before
+ * updating planes. It shuts down all the outputs that need to be shut down and
+ * prepares them (if required) with the new mode.
+ */
+void drm_atomic_helper_commit_pre_planes(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ disable_outputs(dev, state);
+ set_routing_links(dev, state);
+ crtc_set_mode(dev, state);
+}
+EXPORT_SYMBOL(drm_atomic_helper_commit_pre_planes);
+
+/**
+ * drm_atomic_helper_commit_post_planes - modeset commit after plane updates
+ * @dev: DRM device
+ * @old_state: atomic state object with old state structures
+ *
+ * This function commits the modeset changes that need to be committed after
+ * updating planes: It enables all the outputs with the new configuration which
+ * had to be turned off for the update.
+ */
+void drm_atomic_helper_commit_post_planes(struct drm_device *dev,
+ struct drm_atomic_state *old_state)
+{
+ int ncrtcs = old_state->dev->mode_config.num_crtc;
+ int i;
+
+ for (i = 0; i < ncrtcs; i++) {
+ struct drm_crtc_helper_funcs *funcs;
+ struct drm_crtc *crtc;
+
+ crtc = old_state->crtcs[i];
+
+ /* Need to filter out CRTCs where only planes change. */
+ if (!crtc || !crtc->state->mode_changed)
+ continue;
+
+ funcs = crtc->helper_private;
+
+ if (crtc->state->enable)
+ funcs->commit(crtc);
+ }
+
+ for (i = 0; i < old_state->num_connector; i++) {
+ struct drm_connector *connector;
+ struct drm_encoder_helper_funcs *funcs;
+ struct drm_encoder *encoder;
+
+ connector = old_state->connectors[i];
+
+ if (!connector || !connector->state->best_encoder)
+ continue;
+
+ encoder = connector->state->best_encoder;
+ funcs = encoder->helper_private;
+
+ /*
+ * Each encoder has at most one connector (since we always steal
+ * it away), so we won't call call enable hooks twice.
+ */
+ if (encoder->bridge)
+ encoder->bridge->funcs->pre_enable(encoder->bridge);
+
+ funcs->commit(encoder);
+
+ if (encoder->bridge)
+ encoder->bridge->funcs->enable(encoder->bridge);
+ }
+}
+EXPORT_SYMBOL(drm_atomic_helper_commit_post_planes);
+
+static void wait_for_fences(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ int nplanes = dev->mode_config.num_total_plane;
+ int i;
+
+ for (i = 0; i < nplanes; i++) {
+ struct drm_plane *plane = state->planes[i];
+
+ if (!plane || !plane->state->fence)
+ continue;
+
+ WARN_ON(!plane->state->fb);
+
+ fence_wait(plane->state->fence, false);
+ fence_put(plane->state->fence);
+ plane->state->fence = NULL;
+ }
+}
+
+static bool framebuffer_changed(struct drm_device *dev,
+ struct drm_atomic_state *old_state,
+ struct drm_crtc *crtc)
+{
+ struct drm_plane *plane;
+ struct drm_plane_state *old_plane_state;
+ int nplanes = old_state->dev->mode_config.num_total_plane;
+ int i;
+
+ for (i = 0; i < nplanes; i++) {
+ plane = old_state->planes[i];
+ old_plane_state = old_state->plane_states[i];
+
+ if (!plane)
+ continue;
+
+ if (plane->state->crtc != crtc &&
+ old_plane_state->crtc != crtc)
+ continue;
+
+ if (plane->state->fb != old_plane_state->fb)
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * drm_atomic_helper_wait_for_vblanks - wait for vblank on crtcs
+ * @dev: DRM device
+ * @old_state: atomic state object with old state structures
+ *
+ * Helper to, after atomic commit, wait for vblanks on all effected
+ * crtcs (ie. before cleaning up old framebuffers using
+ * drm_atomic_helper_cleanup_planes()). It will only wait on crtcs where the
+ * framebuffers have actually changed to optimize for the legacy cursor and
+ * plane update use-case.
+ */
+void
+drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
+ struct drm_atomic_state *old_state)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state;
+ int ncrtcs = old_state->dev->mode_config.num_crtc;
+ int i, ret;
+
+ for (i = 0; i < ncrtcs; i++) {
+ crtc = old_state->crtcs[i];
+ old_crtc_state = old_state->crtc_states[i];
+
+ if (!crtc)
+ continue;
+
+ /* No one cares about the old state, so abuse it for tracking
+ * and store whether we hold a vblank reference (and should do a
+ * vblank wait) in the ->enable boolean. */
+ old_crtc_state->enable = false;
+
+ if (!crtc->state->enable)
+ continue;
+
+ if (!framebuffer_changed(dev, old_state, crtc))
+ continue;
+
+ ret = drm_crtc_vblank_get(crtc);
+ if (ret != 0)
+ continue;
+
+ old_crtc_state->enable = true;
+ old_crtc_state->last_vblank_count = drm_vblank_count(dev, i);
+ }
+
+ for (i = 0; i < ncrtcs; i++) {
+ crtc = old_state->crtcs[i];
+ old_crtc_state = old_state->crtc_states[i];
+
+ if (!crtc || !old_crtc_state->enable)
+ continue;
+
+ ret = wait_event_timeout(dev->vblank[i].queue,
+ old_crtc_state->last_vblank_count !=
+ drm_vblank_count(dev, i),
+ msecs_to_jiffies(50));
+
+ drm_crtc_vblank_put(crtc);
+ }
+}
+EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks);
+
+/**
+ * drm_atomic_helper_commit - commit validated state object
+ * @dev: DRM device
+ * @state: the driver state object
+ * @async: asynchronous commit
+ *
+ * This function commits a with drm_atomic_helper_check() pre-validated state
+ * object. This can still fail when e.g. the framebuffer reservation fails. For
+ * now this doesn't implement asynchronous commits.
+ *
+ * RETURNS
+ * Zero for success or -errno.
+ */
+int drm_atomic_helper_commit(struct drm_device *dev,
+ struct drm_atomic_state *state,
+ bool async)
+{
+ int ret;
+
+ if (async)
+ return -EBUSY;
+
+ ret = drm_atomic_helper_prepare_planes(dev, state);
+ if (ret)
+ return ret;
+
+ /*
+ * This is the point of no return - everything below never fails except
+ * when the hw goes bonghits. Which means we can commit the new state on
+ * the software side now.
+ */
+
+ drm_atomic_helper_swap_state(dev, state);
+
+ /*
+ * Everything below can be run asynchronously without the need to grab
+ * any modeset locks at all under one conditions: It must be guaranteed
+ * that the asynchronous work has either been cancelled (if the driver
+ * supports it, which at least requires that the framebuffers get
+ * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
+ * before the new state gets committed on the software side with
+ * drm_atomic_helper_swap_state().
+ *
+ * This scheme allows new atomic state updates to be prepared and
+ * checked in parallel to the asynchronous completion of the previous
+ * update. Which is important since compositors need to figure out the
+ * composition of the next frame right after having submitted the
+ * current layout.
+ */
+
+ wait_for_fences(dev, state);
+
+ drm_atomic_helper_commit_pre_planes(dev, state);
+
+ drm_atomic_helper_commit_planes(dev, state);
+
+ drm_atomic_helper_commit_post_planes(dev, state);
+
+ drm_atomic_helper_wait_for_vblanks(dev, state);
+
+ drm_atomic_helper_cleanup_planes(dev, state);
+
+ drm_atomic_state_free(state);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_atomic_helper_commit);
+
+/**
+ * DOC: implementing async commit
+ *
+ * For now the atomic helpers don't support async commit directly. If there is
+ * real need it could be added though, using the dma-buf fence infrastructure
+ * for generic synchronization with outstanding rendering.
+ *
+ * For now drivers have to implement async commit themselves, with the following
+ * sequence being the recommended one:
+ *
+ * 1. Run drm_atomic_helper_prepare_planes() first. This is the only function
+ * which commit needs to call which can fail, so we want to run it first and
+ * synchronously.
+ *
+ * 2. Synchronize with any outstanding asynchronous commit worker threads which
+ * might be affected the new state update. This can be done by either cancelling
+ * or flushing the work items, depending upon whether the driver can deal with
+ * cancelled updates. Note that it is important to ensure that the framebuffer
+ * cleanup is still done when cancelling.
+ *
+ * For sufficient parallelism it is recommended to have a work item per crtc
+ * (for updates which don't touch global state) and a global one. Then we only
+ * need to synchronize with the crtc work items for changed crtcs and the global
+ * work item, which allows nice concurrent updates on disjoint sets of crtcs.
+ *
+ * 3. The software state is updated synchronously with
+ * drm_atomic_helper_swap_state. Doing this under the protection of all modeset
+ * locks means concurrent callers never see inconsistent state. And doing this
+ * while it's guaranteed that no relevant async worker runs means that async
+ * workers do not need grab any locks. Actually they must not grab locks, for
+ * otherwise the work flushing will deadlock.
+ *
+ * 4. Schedule a work item to do all subsequent steps, using the split-out
+ * commit helpers: a) pre-plane commit b) plane commit c) post-plane commit and
+ * then cleaning up the framebuffers after the old framebuffer is no longer
+ * being displayed.
+ */
+
+/**
+ * drm_atomic_helper_prepare_planes - prepare plane resources after commit
+ * @dev: DRM device
+ * @state: atomic state object with old state structures
+ *
+ * This function prepares plane state, specifically framebuffers, for the new
+ * configuration. If any failure is encountered this function will call
+ * ->cleanup_fb on any already successfully prepared framebuffer.
+ *
+ * Returns:
+ * 0 on success, negative error code on failure.
+ */
+int drm_atomic_helper_prepare_planes(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ int nplanes = dev->mode_config.num_total_plane;
+ int ret, i;
+
+ for (i = 0; i < nplanes; i++) {
+ struct drm_plane_helper_funcs *funcs;
+ struct drm_plane *plane = state->planes[i];
+ struct drm_framebuffer *fb;
+
+ if (!plane)
+ continue;
+
+ funcs = plane->helper_private;
+
+ fb = state->plane_states[i]->fb;
+
+ if (fb && funcs->prepare_fb) {
+ ret = funcs->prepare_fb(plane, fb);
+ if (ret)
+ goto fail;
+ }
+ }
+
+ return 0;
+
+fail:
+ for (i--; i >= 0; i--) {
+ struct drm_plane_helper_funcs *funcs;
+ struct drm_plane *plane = state->planes[i];
+ struct drm_framebuffer *fb;
+
+ if (!plane)
+ continue;
+
+ funcs = plane->helper_private;
+
+ fb = state->plane_states[i]->fb;
+
+ if (fb && funcs->cleanup_fb)
+ funcs->cleanup_fb(plane, fb);
+
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_atomic_helper_prepare_planes);
+
+/**
+ * drm_atomic_helper_commit_planes - commit plane state
+ * @dev: DRM device
+ * @old_state: atomic state object with old state structures
+ *
+ * This function commits the new plane state using the plane and atomic helper
+ * functions for planes and crtcs. It assumes that the atomic state has already
+ * been pushed into the relevant object state pointers, since this step can no
+ * longer fail.
+ *
+ * It still requires the global state object @old_state to know which planes and
+ * crtcs need to be updated though.
+ */
+void drm_atomic_helper_commit_planes(struct drm_device *dev,
+ struct drm_atomic_state *old_state)
+{
+ int nplanes = dev->mode_config.num_total_plane;
+ int ncrtcs = dev->mode_config.num_crtc;
+ int i;
+
+ for (i = 0; i < ncrtcs; i++) {
+ struct drm_crtc_helper_funcs *funcs;
+ struct drm_crtc *crtc = old_state->crtcs[i];
+
+ if (!crtc)
+ continue;
+
+ funcs = crtc->helper_private;
+
+ if (!funcs || !funcs->atomic_begin)
+ continue;
+
+ funcs->atomic_begin(crtc);
+ }
+
+ for (i = 0; i < nplanes; i++) {
+ struct drm_plane_helper_funcs *funcs;
+ struct drm_plane *plane = old_state->planes[i];
+ struct drm_plane_state *old_plane_state;
+
+ if (!plane)
+ continue;
+
+ funcs = plane->helper_private;
+
+ if (!funcs || !funcs->atomic_update)
+ continue;
+
+ old_plane_state = old_state->plane_states[i];
+
+ funcs->atomic_update(plane, old_plane_state);
+ }
+
+ for (i = 0; i < ncrtcs; i++) {
+ struct drm_crtc_helper_funcs *funcs;
+ struct drm_crtc *crtc = old_state->crtcs[i];
+
+ if (!crtc)
+ continue;
+
+ funcs = crtc->helper_private;
+
+ if (!funcs || !funcs->atomic_flush)
+ continue;
+
+ funcs->atomic_flush(crtc);
+ }
+}
+EXPORT_SYMBOL(drm_atomic_helper_commit_planes);
+
+/**
+ * drm_atomic_helper_cleanup_planes - cleanup plane resources after commit
+ * @dev: DRM device
+ * @old_state: atomic state object with old state structures
+ *
+ * This function cleans up plane state, specifically framebuffers, from the old
+ * configuration. Hence the old configuration must be perserved in @old_state to
+ * be able to call this function.
+ *
+ * This function must also be called on the new state when the atomic update
+ * fails at any point after calling drm_atomic_helper_prepare_planes().
+ */
+void drm_atomic_helper_cleanup_planes(struct drm_device *dev,
+ struct drm_atomic_state *old_state)
+{
+ int nplanes = dev->mode_config.num_total_plane;
+ int i;
+
+ for (i = 0; i < nplanes; i++) {
+ struct drm_plane_helper_funcs *funcs;
+ struct drm_plane *plane = old_state->planes[i];
+ struct drm_framebuffer *old_fb;
+
+ if (!plane)
+ continue;
+
+ funcs = plane->helper_private;
+
+ old_fb = old_state->plane_states[i]->fb;
+
+ if (old_fb && funcs->cleanup_fb)
+ funcs->cleanup_fb(plane, old_fb);
+ }
+}
+EXPORT_SYMBOL(drm_atomic_helper_cleanup_planes);
+
+/**
+ * drm_atomic_helper_swap_state - store atomic state into current sw state
+ * @dev: DRM device
+ * @state: atomic state
+ *
+ * This function stores the atomic state into the current state pointers in all
+ * driver objects. It should be called after all failing steps have been done
+ * and succeeded, but before the actual hardware state is committed.
+ *
+ * For cleanup and error recovery the current state for all changed objects will
+ * be swaped into @state.
+ *
+ * With that sequence it fits perfectly into the plane prepare/cleanup sequence:
+ *
+ * 1. Call drm_atomic_helper_prepare_planes() with the staged atomic state.
+ *
+ * 2. Do any other steps that might fail.
+ *
+ * 3. Put the staged state into the current state pointers with this function.
+ *
+ * 4. Actually commit the hardware state.
+ *
+ * 5. Call drm_atomic_helper_cleanup_planes with @state, which since step 3
+ * contains the old state. Also do any other cleanup required with that state.
+ */
+void drm_atomic_helper_swap_state(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ int i;
+
+ for (i = 0; i < dev->mode_config.num_connector; i++) {
+ struct drm_connector *connector = state->connectors[i];
+
+ if (!connector)
+ continue;
+
+ connector->state->state = state;
+ swap(state->connector_states[i], connector->state);
+ connector->state->state = NULL;
+ }
+
+ for (i = 0; i < dev->mode_config.num_crtc; i++) {
+ struct drm_crtc *crtc = state->crtcs[i];
+
+ if (!crtc)
+ continue;
+
+ crtc->state->state = state;
+ swap(state->crtc_states[i], crtc->state);
+ crtc->state->state = NULL;
+ }
+
+ for (i = 0; i < dev->mode_config.num_total_plane; i++) {
+ struct drm_plane *plane = state->planes[i];
+
+ if (!plane)
+ continue;
+
+ plane->state->state = state;
+ swap(state->plane_states[i], plane->state);
+ plane->state->state = NULL;
+ }
+}
+EXPORT_SYMBOL(drm_atomic_helper_swap_state);
+
+/**
+ * drm_atomic_helper_update_plane - Helper for primary plane update using atomic
+ * @plane: plane object to update
+ * @crtc: owning CRTC of owning plane
+ * @fb: framebuffer to flip onto plane
+ * @crtc_x: x offset of primary plane on crtc
+ * @crtc_y: y offset of primary plane on crtc
+ * @crtc_w: width of primary plane rectangle on crtc
+ * @crtc_h: height of primary plane rectangle on crtc
+ * @src_x: x offset of @fb for panning
+ * @src_y: y offset of @fb for panning
+ * @src_w: width of source rectangle in @fb
+ * @src_h: height of source rectangle in @fb
+ *
+ * Provides a default plane update handler using the atomic driver interface.
+ *
+ * RETURNS:
+ * Zero on success, error code on failure
+ */
+int drm_atomic_helper_update_plane(struct drm_plane *plane,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct drm_atomic_state *state;
+ struct drm_plane_state *plane_state;
+ int ret = 0;
+
+ state = drm_atomic_state_alloc(plane->dev);
+ if (!state)
+ return -ENOMEM;
+
+ state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
+retry:
+ plane_state = drm_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state)) {
+ ret = PTR_ERR(plane_state);
+ goto fail;
+ }
+
+ ret = drm_atomic_set_crtc_for_plane(state, plane, crtc);
+ if (ret != 0)
+ goto fail;
+ drm_atomic_set_fb_for_plane(plane_state, fb);
+ plane_state->crtc_x = crtc_x;
+ plane_state->crtc_y = crtc_y;
+ plane_state->crtc_h = crtc_h;
+ plane_state->crtc_w = crtc_w;
+ plane_state->src_x = src_x;
+ plane_state->src_y = src_y;
+ plane_state->src_h = src_h;
+ plane_state->src_w = src_w;
+
+ ret = drm_atomic_commit(state);
+ if (ret != 0)
+ goto fail;
+
+ /* Driver takes ownership of state on successful commit. */
+ return 0;
+fail:
+ if (ret == -EDEADLK)
+ goto backoff;
+
+ drm_atomic_state_free(state);
+
+ return ret;
+backoff:
+ drm_atomic_state_clear(state);
+ drm_atomic_legacy_backoff(state);
+
+ /*
+ * Someone might have exchanged the framebuffer while we dropped locks
+ * in the backoff code. We need to fix up the fb refcount tracking the
+ * core does for us.
+ */
+ plane->old_fb = plane->fb;
+
+ goto retry;
+}
+EXPORT_SYMBOL(drm_atomic_helper_update_plane);
+
+/**
+ * drm_atomic_helper_disable_plane - Helper for primary plane disable using * atomic
+ * @plane: plane to disable
+ *
+ * Provides a default plane disable handler using the atomic driver interface.
+ *
+ * RETURNS:
+ * Zero on success, error code on failure
+ */
+int drm_atomic_helper_disable_plane(struct drm_plane *plane)
+{
+ struct drm_atomic_state *state;
+ struct drm_plane_state *plane_state;
+ int ret = 0;
+
+ /*
+ * FIXME: Without plane->crtc set we can't get at the implicit legacy
+ * acquire context. The real fix will be to wire the acquire ctx through
+ * everywhere we need it, but meanwhile prevent chaos by just skipping
+ * this noop. The critical case is the cursor ioctls which a) only grab
+ * crtc/cursor-plane locks (so we need the crtc to get at the right
+ * acquire context) and b) can try to disable the plane multiple times.
+ */
+ if (!plane->crtc)
+ return 0;
+
+ state = drm_atomic_state_alloc(plane->dev);
+ if (!state)
+ return -ENOMEM;
+
+ state->acquire_ctx = drm_modeset_legacy_acquire_ctx(plane->crtc);
+retry:
+ plane_state = drm_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state)) {
+ ret = PTR_ERR(plane_state);
+ goto fail;
+ }
+
+ ret = drm_atomic_set_crtc_for_plane(state, plane, NULL);
+ if (ret != 0)
+ goto fail;
+ drm_atomic_set_fb_for_plane(plane_state, NULL);
+ plane_state->crtc_x = 0;
+ plane_state->crtc_y = 0;
+ plane_state->crtc_h = 0;
+ plane_state->crtc_w = 0;
+ plane_state->src_x = 0;
+ plane_state->src_y = 0;
+ plane_state->src_h = 0;
+ plane_state->src_w = 0;
+
+ ret = drm_atomic_commit(state);
+ if (ret != 0)
+ goto fail;
+
+ /* Driver takes ownership of state on successful commit. */
+ return 0;
+fail:
+ if (ret == -EDEADLK)
+ goto backoff;
+
+ drm_atomic_state_free(state);
+
+ return ret;
+backoff:
+ drm_atomic_state_clear(state);
+ drm_atomic_legacy_backoff(state);
+
+ /*
+ * Someone might have exchanged the framebuffer while we dropped locks
+ * in the backoff code. We need to fix up the fb refcount tracking the
+ * core does for us.
+ */
+ plane->old_fb = plane->fb;
+
+ goto retry;
+}
+EXPORT_SYMBOL(drm_atomic_helper_disable_plane);
+
+static int update_output_state(struct drm_atomic_state *state,
+ struct drm_mode_set *set)
+{
+ struct drm_device *dev = set->crtc->dev;
+ struct drm_connector_state *conn_state;
+ int ncrtcs = state->dev->mode_config.num_crtc;
+ int ret, i, j;
+
+ ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
+ state->acquire_ctx);
+ if (ret)
+ return ret;
+
+ /* First grab all affected connector/crtc states. */
+ for (i = 0; i < set->num_connectors; i++) {
+ conn_state = drm_atomic_get_connector_state(state,
+ set->connectors[i]);
+ if (IS_ERR(conn_state))
+ return PTR_ERR(conn_state);
+ }
+
+ for (i = 0; i < ncrtcs; i++) {
+ struct drm_crtc *crtc = state->crtcs[i];
+
+ if (!crtc)
+ continue;
+
+ ret = drm_atomic_add_affected_connectors(state, crtc);
+ if (ret)
+ return ret;
+ }
+
+ /* Then recompute connector->crtc links and crtc enabling state. */
+ for (i = 0; i < state->num_connector; i++) {
+ struct drm_connector *connector;
+
+ connector = state->connectors[i];
+ conn_state = state->connector_states[i];
+
+ if (!connector)
+ continue;
+
+ if (conn_state->crtc == set->crtc) {
+ ret = drm_atomic_set_crtc_for_connector(conn_state,
+ NULL);
+ if (ret)
+ return ret;
+ }
+
+ for (j = 0; j < set->num_connectors; j++) {
+ if (set->connectors[j] == connector) {
+ ret = drm_atomic_set_crtc_for_connector(conn_state,
+ set->crtc);
+ if (ret)
+ return ret;
+ break;
+ }
+ }
+ }
+
+ for (i = 0; i < ncrtcs; i++) {
+ struct drm_crtc *crtc = state->crtcs[i];
+ struct drm_crtc_state *crtc_state = state->crtc_states[i];
+
+ if (!crtc)
+ continue;
+
+ /* Don't update ->enable for the CRTC in the set_config request,
+ * since a mismatch would indicate a bug in the upper layers.
+ * The actual modeset code later on will catch any
+ * inconsistencies here. */
+ if (crtc == set->crtc)
+ continue;
+
+ crtc_state->enable =
+ drm_atomic_connectors_for_crtc(state, crtc);
+ }
+
+ return 0;
+}
+
+/**
+ * drm_atomic_helper_set_config - set a new config from userspace
+ * @set: mode set configuration
+ *
+ * Provides a default crtc set_config handler using the atomic driver interface.
+ *
+ * Returns:
+ * Returns 0 on success, negative errno numbers on failure.
+ */
+int drm_atomic_helper_set_config(struct drm_mode_set *set)
+{
+ struct drm_atomic_state *state;
+ struct drm_crtc *crtc = set->crtc;
+ struct drm_crtc_state *crtc_state;
+ struct drm_plane_state *primary_state;
+ int ret = 0;
+
+ state = drm_atomic_state_alloc(crtc->dev);
+ if (!state)
+ return -ENOMEM;
+
+ state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
+retry:
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state)) {
+ ret = PTR_ERR(crtc_state);
+ goto fail;
+ }
+
+ primary_state = drm_atomic_get_plane_state(state, crtc->primary);
+ if (IS_ERR(primary_state)) {
+ ret = PTR_ERR(primary_state);
+ goto fail;
+ }
+
+ if (!set->mode) {
+ WARN_ON(set->fb);
+ WARN_ON(set->num_connectors);
+
+ crtc_state->enable = false;
+
+ ret = drm_atomic_set_crtc_for_plane(state, crtc->primary, NULL);
+ if (ret != 0)
+ goto fail;
+
+ drm_atomic_set_fb_for_plane(primary_state, NULL);
+
+ goto commit;
+ }
+
+ WARN_ON(!set->fb);
+ WARN_ON(!set->num_connectors);
+
+ crtc_state->enable = true;
+ drm_mode_copy(&crtc_state->mode, set->mode);
+
+ ret = drm_atomic_set_crtc_for_plane(state, crtc->primary, crtc);
+ if (ret != 0)
+ goto fail;
+ drm_atomic_set_fb_for_plane(primary_state, set->fb);
+ primary_state->crtc_x = 0;
+ primary_state->crtc_y = 0;
+ primary_state->crtc_h = set->mode->vdisplay;
+ primary_state->crtc_w = set->mode->hdisplay;
+ primary_state->src_x = set->x << 16;
+ primary_state->src_y = set->y << 16;
+ primary_state->src_h = set->mode->vdisplay << 16;
+ primary_state->src_w = set->mode->hdisplay << 16;
+
+commit:
+ ret = update_output_state(state, set);
+ if (ret)
+ goto fail;
+
+ ret = drm_atomic_commit(state);
+ if (ret != 0)
+ goto fail;
+
+ /* Driver takes ownership of state on successful commit. */
+ return 0;
+fail:
+ if (ret == -EDEADLK)
+ goto backoff;
+
+ drm_atomic_state_free(state);
+
+ return ret;
+backoff:
+ drm_atomic_state_clear(state);
+ drm_atomic_legacy_backoff(state);
+
+ /*
+ * Someone might have exchanged the framebuffer while we dropped locks
+ * in the backoff code. We need to fix up the fb refcount tracking the
+ * core does for us.
+ */
+ crtc->primary->old_fb = crtc->primary->fb;
+
+ goto retry;
+}
+EXPORT_SYMBOL(drm_atomic_helper_set_config);
+
+/**
+ * drm_atomic_helper_crtc_set_property - helper for crtc prorties
+ * @crtc: DRM crtc
+ * @property: DRM property
+ * @val: value of property
+ *
+ * Provides a default plane disablle handler using the atomic driver interface.
+ *
+ * RETURNS:
+ * Zero on success, error code on failure
+ */
+int
+drm_atomic_helper_crtc_set_property(struct drm_crtc *crtc,
+ struct drm_property *property,
+ uint64_t val)
+{
+ struct drm_atomic_state *state;
+ struct drm_crtc_state *crtc_state;
+ int ret = 0;
+
+ state = drm_atomic_state_alloc(crtc->dev);
+ if (!state)
+ return -ENOMEM;
+
+ /* ->set_property is always called with all locks held. */
+ state->acquire_ctx = crtc->dev->mode_config.acquire_ctx;
+retry:
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state)) {
+ ret = PTR_ERR(crtc_state);
+ goto fail;
+ }
+
+ ret = crtc->funcs->atomic_set_property(crtc, crtc_state,
+ property, val);
+ if (ret)
+ goto fail;
+
+ ret = drm_atomic_commit(state);
+ if (ret != 0)
+ goto fail;
+
+ /* Driver takes ownership of state on successful commit. */
+ return 0;
+fail:
+ if (ret == -EDEADLK)
+ goto backoff;
+
+ drm_atomic_state_free(state);
+
+ return ret;
+backoff:
+ drm_atomic_state_clear(state);
+ drm_atomic_legacy_backoff(state);
+
+ goto retry;
+}
+EXPORT_SYMBOL(drm_atomic_helper_crtc_set_property);
+
+/**
+ * drm_atomic_helper_plane_set_property - helper for plane prorties
+ * @plane: DRM plane
+ * @property: DRM property
+ * @val: value of property
+ *
+ * Provides a default plane disable handler using the atomic driver interface.
+ *
+ * RETURNS:
+ * Zero on success, error code on failure
+ */
+int
+drm_atomic_helper_plane_set_property(struct drm_plane *plane,
+ struct drm_property *property,
+ uint64_t val)
+{
+ struct drm_atomic_state *state;
+ struct drm_plane_state *plane_state;
+ int ret = 0;
+
+ state = drm_atomic_state_alloc(plane->dev);
+ if (!state)
+ return -ENOMEM;
+
+ /* ->set_property is always called with all locks held. */
+ state->acquire_ctx = plane->dev->mode_config.acquire_ctx;
+retry:
+ plane_state = drm_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state)) {
+ ret = PTR_ERR(plane_state);
+ goto fail;
+ }
+
+ ret = plane->funcs->atomic_set_property(plane, plane_state,
+ property, val);
+ if (ret)
+ goto fail;
+
+ ret = drm_atomic_commit(state);
+ if (ret != 0)
+ goto fail;
+
+ /* Driver takes ownership of state on successful commit. */
+ return 0;
+fail:
+ if (ret == -EDEADLK)
+ goto backoff;
+
+ drm_atomic_state_free(state);
+
+ return ret;
+backoff:
+ drm_atomic_state_clear(state);
+ drm_atomic_legacy_backoff(state);
+
+ goto retry;
+}
+EXPORT_SYMBOL(drm_atomic_helper_plane_set_property);
+
+/**
+ * drm_atomic_helper_connector_set_property - helper for connector prorties
+ * @connector: DRM connector
+ * @property: DRM property
+ * @val: value of property
+ *
+ * Provides a default plane disablle handler using the atomic driver interface.
+ *
+ * RETURNS:
+ * Zero on success, error code on failure
+ */
+int
+drm_atomic_helper_connector_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t val)
+{
+ struct drm_atomic_state *state;
+ struct drm_connector_state *connector_state;
+ int ret = 0;
+
+ state = drm_atomic_state_alloc(connector->dev);
+ if (!state)
+ return -ENOMEM;
+
+ /* ->set_property is always called with all locks held. */
+ state->acquire_ctx = connector->dev->mode_config.acquire_ctx;
+retry:
+ connector_state = drm_atomic_get_connector_state(state, connector);
+ if (IS_ERR(connector_state)) {
+ ret = PTR_ERR(connector_state);
+ goto fail;
+ }
+
+ ret = connector->funcs->atomic_set_property(connector, connector_state,
+ property, val);
+ if (ret)
+ goto fail;
+
+ ret = drm_atomic_commit(state);
+ if (ret != 0)
+ goto fail;
+
+ /* Driver takes ownership of state on successful commit. */
+ return 0;
+fail:
+ if (ret == -EDEADLK)
+ goto backoff;
+
+ drm_atomic_state_free(state);
+
+ return ret;
+backoff:
+ drm_atomic_state_clear(state);
+ drm_atomic_legacy_backoff(state);
+
+ goto retry;
+}
+EXPORT_SYMBOL(drm_atomic_helper_connector_set_property);
+
+/**
+ * drm_atomic_helper_page_flip - execute a legacy page flip
+ * @crtc: DRM crtc
+ * @fb: DRM framebuffer
+ * @event: optional DRM event to signal upon completion
+ * @flags: flip flags for non-vblank sync'ed updates
+ *
+ * Provides a default page flip implementation using the atomic driver interface.
+ *
+ * Note that for now so called async page flips (i.e. updates which are not
+ * synchronized to vblank) are not supported, since the atomic interfaces have
+ * no provisions for this yet.
+ *
+ * Returns:
+ * Returns 0 on success, negative errno numbers on failure.
+ */
+int drm_atomic_helper_page_flip(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event,
+ uint32_t flags)
+{
+ struct drm_plane *plane = crtc->primary;
+ struct drm_atomic_state *state;
+ struct drm_plane_state *plane_state;
+ struct drm_crtc_state *crtc_state;
+ int ret = 0;
+
+ if (flags & DRM_MODE_PAGE_FLIP_ASYNC)
+ return -EINVAL;
+
+ state = drm_atomic_state_alloc(plane->dev);
+ if (!state)
+ return -ENOMEM;
+
+ state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
+retry:
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state)) {
+ ret = PTR_ERR(crtc_state);
+ goto fail;
+ }
+ crtc_state->event = event;
+
+ plane_state = drm_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state)) {
+ ret = PTR_ERR(plane_state);
+ goto fail;
+ }
+
+ ret = drm_atomic_set_crtc_for_plane(state, plane, crtc);
+ if (ret != 0)
+ goto fail;
+ drm_atomic_set_fb_for_plane(plane_state, fb);
+
+ ret = drm_atomic_async_commit(state);
+ if (ret != 0)
+ goto fail;
+
+ /* TODO: ->page_flip is the only driver callback where the core
+ * doesn't update plane->fb. For now patch it up here. */
+ plane->fb = plane->state->fb;
+
+ /* Driver takes ownership of state on successful async commit. */
+ return 0;
+fail:
+ if (ret == -EDEADLK)
+ goto backoff;
+
+ drm_atomic_state_free(state);
+
+ return ret;
+backoff:
+ drm_atomic_state_clear(state);
+ drm_atomic_legacy_backoff(state);
+
+ /*
+ * Someone might have exchanged the framebuffer while we dropped locks
+ * in the backoff code. We need to fix up the fb refcount tracking the
+ * core does for us.
+ */
+ plane->old_fb = plane->fb;
+
+ goto retry;
+}
+EXPORT_SYMBOL(drm_atomic_helper_page_flip);
+
+/**
+ * DOC: atomic state reset and initialization
+ *
+ * Both the drm core and the atomic helpers assume that there is always the full
+ * and correct atomic software state for all connectors, CRTCs and planes
+ * available. Which is a bit a problem on driver load and also after system
+ * suspend. One way to solve this is to have a hardware state read-out
+ * infrastructure which reconstructs the full software state (e.g. the i915
+ * driver).
+ *
+ * The simpler solution is to just reset the software state to everything off,
+ * which is easiest to do by calling drm_mode_config_reset(). To facilitate this
+ * the atomic helpers provide default reset implementations for all hooks.
+ */
+
+/**
+ * drm_atomic_helper_crtc_reset - default ->reset hook for CRTCs
+ * @crtc: drm CRTC
+ *
+ * Resets the atomic state for @crtc by freeing the state pointer (which might
+ * be NULL, e.g. at driver load time) and allocating a new empty state object.
+ */
+void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc)
+{
+ kfree(crtc->state);
+ crtc->state = kzalloc(sizeof(*crtc->state), GFP_KERNEL);
+}
+EXPORT_SYMBOL(drm_atomic_helper_crtc_reset);
+
+/**
+ * drm_atomic_helper_crtc_duplicate_state - default state duplicate hook
+ * @crtc: drm CRTC
+ *
+ * Default CRTC state duplicate hook for drivers which don't have their own
+ * subclassed CRTC state structure.
+ */
+struct drm_crtc_state *
+drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc)
+{
+ struct drm_crtc_state *state;
+
+ if (WARN_ON(!crtc->state))
+ return NULL;
+
+ state = kmemdup(crtc->state, sizeof(*crtc->state), GFP_KERNEL);
+
+ if (state) {
+ state->mode_changed = false;
+ state->planes_changed = false;
+ state->event = NULL;
+ }
+
+ return state;
+}
+EXPORT_SYMBOL(drm_atomic_helper_crtc_duplicate_state);
+
+/**
+ * drm_atomic_helper_crtc_destroy_state - default state destroy hook
+ * @crtc: drm CRTC
+ * @state: CRTC state object to release
+ *
+ * Default CRTC state destroy hook for drivers which don't have their own
+ * subclassed CRTC state structure.
+ */
+void drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ kfree(state);
+}
+EXPORT_SYMBOL(drm_atomic_helper_crtc_destroy_state);
+
+/**
+ * drm_atomic_helper_plane_reset - default ->reset hook for planes
+ * @plane: drm plane
+ *
+ * Resets the atomic state for @plane by freeing the state pointer (which might
+ * be NULL, e.g. at driver load time) and allocating a new empty state object.
+ */
+void drm_atomic_helper_plane_reset(struct drm_plane *plane)
+{
+ if (plane->state && plane->state->fb)
+ drm_framebuffer_unreference(plane->state->fb);
+
+ kfree(plane->state);
+ plane->state = kzalloc(sizeof(*plane->state), GFP_KERNEL);
+}
+EXPORT_SYMBOL(drm_atomic_helper_plane_reset);
+
+/**
+ * drm_atomic_helper_plane_duplicate_state - default state duplicate hook
+ * @plane: drm plane
+ *
+ * Default plane state duplicate hook for drivers which don't have their own
+ * subclassed plane state structure.
+ */
+struct drm_plane_state *
+drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane)
+{
+ struct drm_plane_state *state;
+
+ if (WARN_ON(!plane->state))
+ return NULL;
+
+ state = kmemdup(plane->state, sizeof(*plane->state), GFP_KERNEL);
+
+ if (state && state->fb)
+ drm_framebuffer_reference(state->fb);
+
+ return state;
+}
+EXPORT_SYMBOL(drm_atomic_helper_plane_duplicate_state);
+
+/**
+ * drm_atomic_helper_plane_destroy_state - default state destroy hook
+ * @plane: drm plane
+ * @state: plane state object to release
+ *
+ * Default plane state destroy hook for drivers which don't have their own
+ * subclassed plane state structure.
+ */
+void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ if (state->fb)
+ drm_framebuffer_unreference(state->fb);
+
+ kfree(state);
+}
+EXPORT_SYMBOL(drm_atomic_helper_plane_destroy_state);
+
+/**
+ * drm_atomic_helper_connector_reset - default ->reset hook for connectors
+ * @connector: drm connector
+ *
+ * Resets the atomic state for @connector by freeing the state pointer (which
+ * might be NULL, e.g. at driver load time) and allocating a new empty state
+ * object.
+ */
+void drm_atomic_helper_connector_reset(struct drm_connector *connector)
+{
+ kfree(connector->state);
+ connector->state = kzalloc(sizeof(*connector->state), GFP_KERNEL);
+}
+EXPORT_SYMBOL(drm_atomic_helper_connector_reset);
+
+/**
+ * drm_atomic_helper_connector_duplicate_state - default state duplicate hook
+ * @connector: drm connector
+ *
+ * Default connector state duplicate hook for drivers which don't have their own
+ * subclassed connector state structure.
+ */
+struct drm_connector_state *
+drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector)
+{
+ if (WARN_ON(!connector->state))
+ return NULL;
+
+ return kmemdup(connector->state, sizeof(*connector->state), GFP_KERNEL);
+}
+EXPORT_SYMBOL(drm_atomic_helper_connector_duplicate_state);
+
+/**
+ * drm_atomic_helper_connector_destroy_state - default state destroy hook
+ * @connector: drm connector
+ * @state: connector state object to release
+ *
+ * Default connector state destroy hook for drivers which don't have their own
+ * subclassed connector state structure.
+ */
+void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
+ struct drm_connector_state *state)
+{
+ kfree(state);
+}
+EXPORT_SYMBOL(drm_atomic_helper_connector_destroy_state);
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index e79c8d3700d..5213da499d3 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -683,7 +683,7 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
drm_modeset_lock_init(&crtc->mutex);
ret = drm_mode_object_get(dev, &crtc->base, DRM_MODE_OBJECT_CRTC);
if (ret)
- goto out;
+ return ret;
crtc->base.properties = &crtc->properties;
@@ -697,9 +697,7 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
if (cursor)
cursor->possible_crtcs = 1 << drm_crtc_index(crtc);
- out:
-
- return ret;
+ return 0;
}
EXPORT_SYMBOL(drm_crtc_init_with_planes);
@@ -723,6 +721,12 @@ void drm_crtc_cleanup(struct drm_crtc *crtc)
drm_mode_object_put(dev, &crtc->base);
list_del(&crtc->head);
dev->mode_config.num_crtc--;
+
+ WARN_ON(crtc->state && !crtc->funcs->atomic_destroy_state);
+ if (crtc->state && crtc->funcs->atomic_destroy_state)
+ crtc->funcs->atomic_destroy_state(crtc, crtc->state);
+
+ memset(crtc, 0, sizeof(*crtc));
}
EXPORT_SYMBOL(drm_crtc_cleanup);
@@ -766,7 +770,6 @@ static void drm_mode_remove(struct drm_connector *connector,
/**
* drm_connector_get_cmdline_mode - reads the user's cmdline mode
* @connector: connector to quwery
- * @mode: returned mode
*
* The kernel supports per-connector configration of its consoles through
* use of the video= parameter. This function parses that option and
@@ -870,6 +873,8 @@ int drm_connector_init(struct drm_device *dev,
drm_connector_get_cmdline_mode(connector);
+ /* We should add connectors at the end to avoid upsetting the connector
+ * index too much. */
list_add_tail(&connector->head, &dev->mode_config.connector_list);
dev->mode_config.num_connector++;
@@ -905,6 +910,11 @@ void drm_connector_cleanup(struct drm_connector *connector)
struct drm_device *dev = connector->dev;
struct drm_display_mode *mode, *t;
+ if (connector->tile_group) {
+ drm_mode_put_tile_group(dev, connector->tile_group);
+ connector->tile_group = NULL;
+ }
+
list_for_each_entry_safe(mode, t, &connector->probed_modes, head)
drm_mode_remove(connector, mode);
@@ -919,6 +929,13 @@ void drm_connector_cleanup(struct drm_connector *connector)
connector->name = NULL;
list_del(&connector->head);
dev->mode_config.num_connector--;
+
+ WARN_ON(connector->state && !connector->funcs->atomic_destroy_state);
+ if (connector->state && connector->funcs->atomic_destroy_state)
+ connector->funcs->atomic_destroy_state(connector,
+ connector->state);
+
+ memset(connector, 0, sizeof(*connector));
}
EXPORT_SYMBOL(drm_connector_cleanup);
@@ -933,6 +950,9 @@ unsigned int drm_connector_index(struct drm_connector *connector)
{
unsigned int index = 0;
struct drm_connector *tmp;
+ struct drm_mode_config *config = &connector->dev->mode_config;
+
+ WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
list_for_each_entry(tmp, &connector->dev->mode_config.connector_list, head) {
if (tmp == connector)
@@ -1057,6 +1077,8 @@ void drm_bridge_cleanup(struct drm_bridge *bridge)
list_del(&bridge->head);
dev->mode_config.num_bridge--;
drm_modeset_unlock_all(dev);
+
+ memset(bridge, 0, sizeof(*bridge));
}
EXPORT_SYMBOL(drm_bridge_cleanup);
@@ -1123,10 +1145,11 @@ void drm_encoder_cleanup(struct drm_encoder *encoder)
drm_modeset_lock_all(dev);
drm_mode_object_put(dev, &encoder->base);
kfree(encoder->name);
- encoder->name = NULL;
list_del(&encoder->head);
dev->mode_config.num_encoder--;
drm_modeset_unlock_all(dev);
+
+ memset(encoder, 0, sizeof(*encoder));
}
EXPORT_SYMBOL(drm_encoder_cleanup);
@@ -1153,11 +1176,11 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
{
int ret;
- drm_modeset_lock_all(dev);
-
ret = drm_mode_object_get(dev, &plane->base, DRM_MODE_OBJECT_PLANE);
if (ret)
- goto out;
+ return ret;
+
+ drm_modeset_lock_init(&plane->mutex);
plane->base.properties = &plane->properties;
plane->dev = dev;
@@ -1167,8 +1190,7 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
if (!plane->format_types) {
DRM_DEBUG_KMS("out of memory when allocating plane\n");
drm_mode_object_put(dev, &plane->base);
- ret = -ENOMEM;
- goto out;
+ return -ENOMEM;
}
memcpy(plane->format_types, formats, format_count * sizeof(uint32_t));
@@ -1185,10 +1207,7 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
dev->mode_config.plane_type_property,
plane->type);
- out:
- drm_modeset_unlock_all(dev);
-
- return ret;
+ return 0;
}
EXPORT_SYMBOL(drm_universal_plane_init);
@@ -1246,6 +1265,12 @@ void drm_plane_cleanup(struct drm_plane *plane)
if (plane->type == DRM_PLANE_TYPE_OVERLAY)
dev->mode_config.num_overlay_plane--;
drm_modeset_unlock_all(dev);
+
+ WARN_ON(plane->state && !plane->funcs->atomic_destroy_state);
+ if (plane->state && plane->funcs->atomic_destroy_state)
+ plane->funcs->atomic_destroy_state(plane, plane->state);
+
+ memset(plane, 0, sizeof(*plane));
}
EXPORT_SYMBOL(drm_plane_cleanup);
@@ -1328,6 +1353,11 @@ static int drm_mode_create_standard_connector_properties(struct drm_device *dev)
"PATH", 0);
dev->mode_config.path_property = dev_path;
+ dev->mode_config.tile_property = drm_property_create(dev,
+ DRM_MODE_PROP_BLOB |
+ DRM_MODE_PROP_IMMUTABLE,
+ "TILE", 0);
+
return 0;
}
@@ -1388,12 +1418,13 @@ EXPORT_SYMBOL(drm_mode_create_dvi_i_properties);
* responsible for allocating a list of format names and passing them to
* this routine.
*/
-int drm_mode_create_tv_properties(struct drm_device *dev, int num_modes,
+int drm_mode_create_tv_properties(struct drm_device *dev,
+ unsigned int num_modes,
char *modes[])
{
struct drm_property *tv_selector;
struct drm_property *tv_subconnector;
- int i;
+ unsigned int i;
if (dev->mode_config.tv_select_subconnector_property)
return 0;
@@ -1491,7 +1522,7 @@ EXPORT_SYMBOL(drm_mode_create_scaling_mode_property);
* connectors.
*
* Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
*/
int drm_mode_create_aspect_ratio_property(struct drm_device *dev)
{
@@ -1535,6 +1566,30 @@ int drm_mode_create_dirty_info_property(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_mode_create_dirty_info_property);
+/**
+ * drm_mode_create_suggested_offset_properties - create suggests offset properties
+ * @dev: DRM device
+ *
+ * Create the the suggested x/y offset property for connectors.
+ */
+int drm_mode_create_suggested_offset_properties(struct drm_device *dev)
+{
+ if (dev->mode_config.suggested_x_property && dev->mode_config.suggested_y_property)
+ return 0;
+
+ dev->mode_config.suggested_x_property =
+ drm_property_create_range(dev, DRM_MODE_PROP_IMMUTABLE, "suggested X", 0, 0xffffffff);
+
+ dev->mode_config.suggested_y_property =
+ drm_property_create_range(dev, DRM_MODE_PROP_IMMUTABLE, "suggested Y", 0, 0xffffffff);
+
+ if (dev->mode_config.suggested_x_property == NULL ||
+ dev->mode_config.suggested_y_property == NULL)
+ return -ENOMEM;
+ return 0;
+}
+EXPORT_SYMBOL(drm_mode_create_suggested_offset_properties);
+
static int drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *group)
{
uint32_t total_objects = 0;
@@ -1651,7 +1706,7 @@ static void drm_crtc_convert_to_umode(struct drm_mode_modeinfo *out,
* the caller.
*
* Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
*/
static int drm_crtc_convert_umode(struct drm_display_mode *out,
const struct drm_mode_modeinfo *in)
@@ -1694,7 +1749,7 @@ static int drm_crtc_convert_umode(struct drm_display_mode *out,
* Called by the user via ioctl.
*
* Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
*/
int drm_mode_getresources(struct drm_device *dev, void *data,
struct drm_file *file_priv)
@@ -1745,7 +1800,9 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
card_res->count_fbs = fb_count;
mutex_unlock(&file_priv->fbs_lock);
- drm_modeset_lock_all(dev);
+ /* mode_config.mutex protects the connector list against e.g. DP MST
+ * connector hot-adding. CRTC/Plane lists are invariant. */
+ mutex_lock(&dev->mode_config.mutex);
if (!drm_is_primary_client(file_priv)) {
mode_group = NULL;
@@ -1865,7 +1922,7 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
card_res->count_connectors, card_res->count_encoders);
out:
- drm_modeset_unlock_all(dev);
+ mutex_unlock(&dev->mode_config.mutex);
return ret;
}
@@ -1880,26 +1937,22 @@ out:
* Called by the user via ioctl.
*
* Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
*/
int drm_mode_getcrtc(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_mode_crtc *crtc_resp = data;
struct drm_crtc *crtc;
- int ret = 0;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- drm_modeset_lock_all(dev);
-
crtc = drm_crtc_find(dev, crtc_resp->crtc_id);
- if (!crtc) {
- ret = -ENOENT;
- goto out;
- }
+ if (!crtc)
+ return -ENOENT;
+ drm_modeset_lock_crtc(crtc, crtc->primary);
crtc_resp->x = crtc->x;
crtc_resp->y = crtc->y;
crtc_resp->gamma_size = crtc->gamma_size;
@@ -1916,10 +1969,9 @@ int drm_mode_getcrtc(struct drm_device *dev,
} else {
crtc_resp->mode_valid = 0;
}
+ drm_modeset_unlock_crtc(crtc);
-out:
- drm_modeset_unlock_all(dev);
- return ret;
+ return 0;
}
static bool drm_mode_expose_to_userspace(const struct drm_display_mode *mode,
@@ -1935,6 +1987,15 @@ static bool drm_mode_expose_to_userspace(const struct drm_display_mode *mode,
return true;
}
+static struct drm_encoder *drm_connector_get_encoder(struct drm_connector *connector)
+{
+ /* For atomic drivers only state objects are synchronously updated and
+ * protected by modeset locks, so check those first. */
+ if (connector->state)
+ return connector->state->best_encoder;
+ return connector->encoder;
+}
+
/**
* drm_mode_getconnector - get connector configuration
* @dev: drm device for the ioctl
@@ -1946,13 +2007,14 @@ static bool drm_mode_expose_to_userspace(const struct drm_display_mode *mode,
* Called by the user via ioctl.
*
* Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
*/
int drm_mode_getconnector(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_mode_get_connector *out_resp = data;
struct drm_connector *connector;
+ struct drm_encoder *encoder;
struct drm_display_mode *mode;
int mode_count = 0;
int props_count = 0;
@@ -2008,8 +2070,10 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
out_resp->subpixel = connector->display_info.subpixel_order;
out_resp->connection = connector->status;
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
- if (connector->encoder)
- out_resp->encoder_id = connector->encoder->base.id;
+
+ encoder = drm_connector_get_encoder(connector);
+ if (encoder)
+ out_resp->encoder_id = encoder->base.id;
else
out_resp->encoder_id = 0;
drm_modeset_unlock(&dev->mode_config.connection_mutex);
@@ -2079,6 +2143,33 @@ out:
return ret;
}
+static struct drm_crtc *drm_encoder_get_crtc(struct drm_encoder *encoder)
+{
+ struct drm_connector *connector;
+ struct drm_device *dev = encoder->dev;
+ bool uses_atomic = false;
+
+ /* For atomic drivers only state objects are synchronously updated and
+ * protected by modeset locks, so check those first. */
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if (!connector->state)
+ continue;
+
+ uses_atomic = true;
+
+ if (connector->state->best_encoder != encoder)
+ continue;
+
+ return connector->state->crtc;
+ }
+
+ /* Don't return stale data (e.g. pending async disable). */
+ if (uses_atomic)
+ return NULL;
+
+ return encoder->crtc;
+}
+
/**
* drm_mode_getencoder - get encoder configuration
* @dev: drm device for the ioctl
@@ -2090,37 +2181,38 @@ out:
* Called by the user via ioctl.
*
* Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
*/
int drm_mode_getencoder(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_mode_get_encoder *enc_resp = data;
struct drm_encoder *encoder;
- int ret = 0;
+ struct drm_crtc *crtc;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- drm_modeset_lock_all(dev);
encoder = drm_encoder_find(dev, enc_resp->encoder_id);
- if (!encoder) {
- ret = -ENOENT;
- goto out;
- }
+ if (!encoder)
+ return -ENOENT;
- if (encoder->crtc)
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+ crtc = drm_encoder_get_crtc(encoder);
+ if (crtc)
+ enc_resp->crtc_id = crtc->base.id;
+ else if (encoder->crtc)
enc_resp->crtc_id = encoder->crtc->base.id;
else
enc_resp->crtc_id = 0;
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
+
enc_resp->encoder_type = encoder->encoder_type;
enc_resp->encoder_id = encoder->base.id;
enc_resp->possible_crtcs = encoder->possible_crtcs;
enc_resp->possible_clones = encoder->possible_clones;
-out:
- drm_modeset_unlock_all(dev);
- return ret;
+ return 0;
}
/**
@@ -2134,7 +2226,7 @@ out:
* Called by the user via ioctl.
*
* Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
*/
int drm_mode_getplane_res(struct drm_device *dev, void *data,
struct drm_file *file_priv)
@@ -2143,13 +2235,12 @@ int drm_mode_getplane_res(struct drm_device *dev, void *data,
struct drm_mode_config *config;
struct drm_plane *plane;
uint32_t __user *plane_ptr;
- int copied = 0, ret = 0;
+ int copied = 0;
unsigned num_planes;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- drm_modeset_lock_all(dev);
config = &dev->mode_config;
if (file_priv->universal_planes)
@@ -2165,6 +2256,7 @@ int drm_mode_getplane_res(struct drm_device *dev, void *data,
(plane_resp->count_planes >= num_planes)) {
plane_ptr = (uint32_t __user *)(unsigned long)plane_resp->plane_id_ptr;
+ /* Plane lists are invariant, no locking needed. */
list_for_each_entry(plane, &config->plane_list, head) {
/*
* Unless userspace set the 'universal planes'
@@ -2174,18 +2266,14 @@ int drm_mode_getplane_res(struct drm_device *dev, void *data,
!file_priv->universal_planes)
continue;
- if (put_user(plane->base.id, plane_ptr + copied)) {
- ret = -EFAULT;
- goto out;
- }
+ if (put_user(plane->base.id, plane_ptr + copied))
+ return -EFAULT;
copied++;
}
}
plane_resp->count_planes = num_planes;
-out:
- drm_modeset_unlock_all(dev);
- return ret;
+ return 0;
}
/**
@@ -2199,7 +2287,7 @@ out:
* Called by the user via ioctl.
*
* Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
*/
int drm_mode_getplane(struct drm_device *dev, void *data,
struct drm_file *file_priv)
@@ -2207,18 +2295,15 @@ int drm_mode_getplane(struct drm_device *dev, void *data,
struct drm_mode_get_plane *plane_resp = data;
struct drm_plane *plane;
uint32_t __user *format_ptr;
- int ret = 0;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- drm_modeset_lock_all(dev);
plane = drm_plane_find(dev, plane_resp->plane_id);
- if (!plane) {
- ret = -ENOENT;
- goto out;
- }
+ if (!plane)
+ return -ENOENT;
+ drm_modeset_lock(&plane->mutex, NULL);
if (plane->crtc)
plane_resp->crtc_id = plane->crtc->base.id;
else
@@ -2228,6 +2313,7 @@ int drm_mode_getplane(struct drm_device *dev, void *data,
plane_resp->fb_id = plane->fb->base.id;
else
plane_resp->fb_id = 0;
+ drm_modeset_unlock(&plane->mutex);
plane_resp->plane_id = plane->base.id;
plane_resp->possible_crtcs = plane->possible_crtcs;
@@ -2243,15 +2329,12 @@ int drm_mode_getplane(struct drm_device *dev, void *data,
if (copy_to_user(format_ptr,
plane->format_types,
sizeof(uint32_t) * plane->format_count)) {
- ret = -EFAULT;
- goto out;
+ return -EFAULT;
}
}
plane_resp->count_format_types = plane->format_count;
-out:
- drm_modeset_unlock_all(dev);
- return ret;
+ return 0;
}
/*
@@ -2274,7 +2357,7 @@ static int __setplane_internal(struct drm_plane *plane,
{
int ret = 0;
unsigned int fb_width, fb_height;
- int i;
+ unsigned int i;
/* No fb means shut it down */
if (!fb) {
@@ -2378,13 +2461,12 @@ static int setplane_internal(struct drm_plane *plane,
* valid crtc).
*
* Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
*/
int drm_mode_setplane(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_mode_set_plane *plane_req = data;
- struct drm_mode_object *obj;
struct drm_plane *plane;
struct drm_crtc *crtc = NULL;
struct drm_framebuffer *fb = NULL;
@@ -2407,14 +2489,12 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
* First, find the plane, crtc, and fb objects. If not available,
* we don't bother to call the driver.
*/
- obj = drm_mode_object_find(dev, plane_req->plane_id,
- DRM_MODE_OBJECT_PLANE);
- if (!obj) {
+ plane = drm_plane_find(dev, plane_req->plane_id);
+ if (!plane) {
DRM_DEBUG_KMS("Unknown plane ID %d\n",
plane_req->plane_id);
return -ENOENT;
}
- plane = obj_to_plane(obj);
if (plane_req->fb_id) {
fb = drm_framebuffer_lookup(dev, plane_req->fb_id);
@@ -2424,14 +2504,12 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
return -ENOENT;
}
- obj = drm_mode_object_find(dev, plane_req->crtc_id,
- DRM_MODE_OBJECT_CRTC);
- if (!obj) {
+ crtc = drm_crtc_find(dev, plane_req->crtc_id);
+ if (!crtc) {
DRM_DEBUG_KMS("Unknown crtc ID %d\n",
plane_req->crtc_id);
return -ENOENT;
}
- crtc = obj_to_crtc(obj);
}
/*
@@ -2453,7 +2531,7 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
* interface. The only thing it adds is correct refcounting dance.
*
* Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
*/
int drm_mode_set_config_internal(struct drm_mode_set *set)
{
@@ -2546,7 +2624,7 @@ EXPORT_SYMBOL(drm_crtc_check_viewport);
* Called by the user via ioctl.
*
* Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
*/
int drm_mode_setcrtc(struct drm_device *dev, void *data,
struct drm_file *file_priv)
@@ -2709,7 +2787,7 @@ out:
* userspace wants to make use of these capabilities.
*
* Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
*/
static int drm_mode_cursor_universal(struct drm_crtc *crtc,
struct drm_mode_cursor2 *req,
@@ -2810,7 +2888,7 @@ static int drm_mode_cursor_common(struct drm_device *dev,
* If this crtc has a universal cursor plane, call that plane's update
* handler rather than using legacy cursor handlers.
*/
- drm_modeset_lock_crtc(crtc);
+ drm_modeset_lock_crtc(crtc, crtc->cursor);
if (crtc->cursor) {
ret = drm_mode_cursor_universal(crtc, req, file_priv);
goto out;
@@ -2857,7 +2935,7 @@ out:
* Called by the user via ioctl.
*
* Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
*/
int drm_mode_cursor_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
@@ -2884,7 +2962,7 @@ int drm_mode_cursor_ioctl(struct drm_device *dev,
* Called by the user via ioctl.
*
* Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
*/
int drm_mode_cursor2_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
@@ -2943,23 +3021,21 @@ EXPORT_SYMBOL(drm_mode_legacy_fb_format);
* @file_priv: drm file for the ioctl call
*
* Add a new FB to the specified CRTC, given a user request. This is the
- * original addfb ioclt which only supported RGB formats.
+ * original addfb ioctl which only supported RGB formats.
*
* Called by the user via ioctl.
*
* Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
*/
int drm_mode_addfb(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_mode_fb_cmd *or = data;
struct drm_mode_fb_cmd2 r = {};
- struct drm_mode_config *config = &dev->mode_config;
- struct drm_framebuffer *fb;
- int ret = 0;
+ int ret;
- /* Use new struct with format internally */
+ /* convert to new format and call new ioctl */
r.fb_id = or->fb_id;
r.width = or->width;
r.height = or->height;
@@ -2967,28 +3043,13 @@ int drm_mode_addfb(struct drm_device *dev,
r.pixel_format = drm_mode_legacy_fb_format(or->bpp, or->depth);
r.handles[0] = or->handle;
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return -EINVAL;
-
- if ((config->min_width > r.width) || (r.width > config->max_width))
- return -EINVAL;
-
- if ((config->min_height > r.height) || (r.height > config->max_height))
- return -EINVAL;
+ ret = drm_mode_addfb2(dev, &r, file_priv);
+ if (ret)
+ return ret;
- fb = dev->mode_config.funcs->fb_create(dev, file_priv, &r);
- if (IS_ERR(fb)) {
- DRM_DEBUG_KMS("could not create framebuffer\n");
- return PTR_ERR(fb);
- }
+ or->fb_id = r.fb_id;
- mutex_lock(&file_priv->fbs_lock);
- or->fb_id = fb->base.id;
- list_add(&fb->filp_head, &file_priv->fbs);
- DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
- mutex_unlock(&file_priv->fbs_lock);
-
- return ret;
+ return 0;
}
static int format_check(const struct drm_mode_fb_cmd2 *r)
@@ -3080,7 +3141,7 @@ static int framebuffer_check(const struct drm_mode_fb_cmd2 *r)
num_planes = drm_format_num_planes(r->pixel_format);
if (r->width == 0 || r->width % hsub) {
- DRM_DEBUG_KMS("bad framebuffer width %u\n", r->height);
+ DRM_DEBUG_KMS("bad framebuffer width %u\n", r->width);
return -EINVAL;
}
@@ -3170,7 +3231,7 @@ static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev,
* Called by the user via ioctl.
*
* Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
*/
int drm_mode_addfb2(struct drm_device *dev,
void *data, struct drm_file *file_priv)
@@ -3198,7 +3259,7 @@ int drm_mode_addfb2(struct drm_device *dev,
* Called by the user via ioctl.
*
* Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
*/
int drm_mode_rmfb(struct drm_device *dev,
void *data, struct drm_file *file_priv)
@@ -3252,7 +3313,7 @@ fail_lookup:
* Called by the user via ioctl.
*
* Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
*/
int drm_mode_getfb(struct drm_device *dev,
void *data, struct drm_file *file_priv)
@@ -3313,7 +3374,7 @@ int drm_mode_getfb(struct drm_device *dev,
* Called by the user via ioctl.
*
* Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
*/
int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
@@ -3393,7 +3454,7 @@ out_err1:
* Called by the user via ioctl.
*
* Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
*/
void drm_fb_release(struct drm_file *priv)
{
@@ -3402,7 +3463,7 @@ void drm_fb_release(struct drm_file *priv)
/*
* When the file gets released that means no one else can access the fb
- * list any more, so no need to grab fpriv->fbs_lock. And we need to to
+ * list any more, so no need to grab fpriv->fbs_lock. And we need to
* avoid upsetting lockdep since the universal cursor code adds a
* framebuffer while holding mutex locks.
*
@@ -3435,6 +3496,10 @@ void drm_fb_release(struct drm_file *priv)
* object with drm_object_attach_property. The returned property object must be
* freed with drm_property_destroy.
*
+ * Note that the DRM core keeps a per-device list of properties and that, if
+ * drm_mode_config_cleanup() is called, it will destroy all properties created
+ * by the driver.
+ *
* Returns:
* A pointer to the newly created property on success, NULL on failure.
*/
@@ -3462,7 +3527,7 @@ struct drm_property *drm_property_create(struct drm_device *dev, int flags,
property->flags = flags;
property->num_values = num_values;
- INIT_LIST_HEAD(&property->enum_blob_list);
+ INIT_LIST_HEAD(&property->enum_list);
if (name) {
strncpy(property->name, name, DRM_PROP_NAME_LEN);
@@ -3611,7 +3676,7 @@ static struct drm_property *property_create_range(struct drm_device *dev,
* object with drm_object_attach_property. The returned property object must be
* freed with drm_property_destroy.
*
- * Userspace is allowed to set any interger value in the (min, max) range
+ * Userspace is allowed to set any integer value in the (min, max) range
* inclusive.
*
* Returns:
@@ -3684,8 +3749,8 @@ int drm_property_add_enum(struct drm_property *property, int index,
(value > 63))
return -EINVAL;
- if (!list_empty(&property->enum_blob_list)) {
- list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
+ if (!list_empty(&property->enum_list)) {
+ list_for_each_entry(prop_enum, &property->enum_list, head) {
if (prop_enum->value == value) {
strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN);
prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0';
@@ -3703,7 +3768,7 @@ int drm_property_add_enum(struct drm_property *property, int index,
prop_enum->value = value;
property->values[index] = value;
- list_add_tail(&prop_enum->head, &property->enum_blob_list);
+ list_add_tail(&prop_enum->head, &property->enum_list);
return 0;
}
EXPORT_SYMBOL(drm_property_add_enum);
@@ -3720,7 +3785,7 @@ void drm_property_destroy(struct drm_device *dev, struct drm_property *property)
{
struct drm_property_enum *prop_enum, *pt;
- list_for_each_entry_safe(prop_enum, pt, &property->enum_blob_list, head) {
+ list_for_each_entry_safe(prop_enum, pt, &property->enum_list, head) {
list_del(&prop_enum->head);
kfree(prop_enum);
}
@@ -3823,17 +3888,20 @@ int drm_object_property_get_value(struct drm_mode_object *obj,
EXPORT_SYMBOL(drm_object_property_get_value);
/**
- * drm_mode_getproperty_ioctl - get the current value of a connector's property
+ * drm_mode_getproperty_ioctl - get the property metadata
* @dev: DRM device
* @data: ioctl data
* @file_priv: DRM file info
*
- * This function retrieves the current value for an connectors's property.
+ * This function retrieves the metadata for a given property, like the different
+ * possible values for an enum property or the limits for a range property.
+ *
+ * Blob properties are special
*
* Called by the user via ioctl.
*
* Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
*/
int drm_mode_getproperty_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
@@ -3841,16 +3909,12 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
struct drm_mode_get_property *out_resp = data;
struct drm_property *property;
int enum_count = 0;
- int blob_count = 0;
int value_count = 0;
int ret = 0, i;
int copied;
struct drm_property_enum *prop_enum;
struct drm_mode_property_enum __user *enum_ptr;
- struct drm_property_blob *prop_blob;
- uint32_t __user *blob_id_ptr;
uint64_t __user *values_ptr;
- uint32_t __user *blob_length_ptr;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
@@ -3864,11 +3928,8 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
if (drm_property_type_is(property, DRM_MODE_PROP_ENUM) ||
drm_property_type_is(property, DRM_MODE_PROP_BITMASK)) {
- list_for_each_entry(prop_enum, &property->enum_blob_list, head)
+ list_for_each_entry(prop_enum, &property->enum_list, head)
enum_count++;
- } else if (drm_property_type_is(property, DRM_MODE_PROP_BLOB)) {
- list_for_each_entry(prop_blob, &property->enum_blob_list, head)
- blob_count++;
}
value_count = property->num_values;
@@ -3893,7 +3954,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
copied = 0;
enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
- list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
+ list_for_each_entry(prop_enum, &property->enum_list, head) {
if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
ret = -EFAULT;
@@ -3911,35 +3972,24 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
out_resp->count_enum_blobs = enum_count;
}
- if (drm_property_type_is(property, DRM_MODE_PROP_BLOB)) {
- if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
- copied = 0;
- blob_id_ptr = (uint32_t __user *)(unsigned long)out_resp->enum_blob_ptr;
- blob_length_ptr = (uint32_t __user *)(unsigned long)out_resp->values_ptr;
-
- list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
- if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
- ret = -EFAULT;
- goto done;
- }
-
- if (put_user(prop_blob->length, blob_length_ptr + copied)) {
- ret = -EFAULT;
- goto done;
- }
-
- copied++;
- }
- }
- out_resp->count_enum_blobs = blob_count;
- }
+ /*
+ * NOTE: The idea seems to have been to use this to read all the blob
+ * property values. But nothing ever added them to the corresponding
+ * list, userspace always used the special-purpose get_blob ioctl to
+ * read the value for a blob property. It also doesn't make a lot of
+ * sense to return values here when everything else is just metadata for
+ * the property itself.
+ */
+ if (drm_property_type_is(property, DRM_MODE_PROP_BLOB))
+ out_resp->count_enum_blobs = 0;
done:
drm_modeset_unlock_all(dev);
return ret;
}
-static struct drm_property_blob *drm_property_create_blob(struct drm_device *dev, int length,
- void *data)
+static struct drm_property_blob *
+drm_property_create_blob(struct drm_device *dev, size_t length,
+ const void *data)
{
struct drm_property_blob *blob;
int ret;
@@ -3985,7 +4035,7 @@ static void drm_property_destroy_blob(struct drm_device *dev,
* Called by the user via ioctl.
*
* Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
*/
int drm_mode_getblob_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
@@ -4019,12 +4069,25 @@ done:
return ret;
}
+/**
+ * drm_mode_connector_set_path_property - set tile property on connector
+ * @connector: connector to set property on.
+ * @path: path to use for property.
+ *
+ * This creates a property to expose to userspace to specify a
+ * connector path. This is mainly used for DisplayPort MST where
+ * connectors have a topology and we want to allow userspace to give
+ * them more meaningful names.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
int drm_mode_connector_set_path_property(struct drm_connector *connector,
- char *path)
+ const char *path)
{
struct drm_device *dev = connector->dev;
- int ret, size;
- size = strlen(path) + 1;
+ size_t size = strlen(path) + 1;
+ int ret;
connector->path_blob_ptr = drm_property_create_blob(connector->dev,
size, path);
@@ -4039,6 +4102,52 @@ int drm_mode_connector_set_path_property(struct drm_connector *connector,
EXPORT_SYMBOL(drm_mode_connector_set_path_property);
/**
+ * drm_mode_connector_set_tile_property - set tile property on connector
+ * @connector: connector to set property on.
+ *
+ * This looks up the tile information for a connector, and creates a
+ * property for userspace to parse if it exists. The property is of
+ * the form of 8 integers using ':' as a separator.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_connector_set_tile_property(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ int ret, size;
+ char tile[256];
+
+ if (connector->tile_blob_ptr)
+ drm_property_destroy_blob(dev, connector->tile_blob_ptr);
+
+ if (!connector->has_tile) {
+ connector->tile_blob_ptr = NULL;
+ ret = drm_object_property_set_value(&connector->base,
+ dev->mode_config.tile_property, 0);
+ return ret;
+ }
+
+ snprintf(tile, 256, "%d:%d:%d:%d:%d:%d:%d:%d",
+ connector->tile_group->id, connector->tile_is_single_monitor,
+ connector->num_h_tile, connector->num_v_tile,
+ connector->tile_h_loc, connector->tile_v_loc,
+ connector->tile_h_size, connector->tile_v_size);
+ size = strlen(tile) + 1;
+
+ connector->tile_blob_ptr = drm_property_create_blob(connector->dev,
+ size, tile);
+ if (!connector->tile_blob_ptr)
+ return -EINVAL;
+
+ ret = drm_object_property_set_value(&connector->base,
+ dev->mode_config.tile_property,
+ connector->tile_blob_ptr->base.id);
+ return ret;
+}
+EXPORT_SYMBOL(drm_mode_connector_set_tile_property);
+
+/**
* drm_mode_connector_update_edid_property - update the edid property of a connector
* @connector: drm connector
* @edid: new value of the edid property
@@ -4047,13 +4156,14 @@ EXPORT_SYMBOL(drm_mode_connector_set_path_property);
* connector's edid property.
*
* Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
*/
int drm_mode_connector_update_edid_property(struct drm_connector *connector,
- struct edid *edid)
+ const struct edid *edid)
{
struct drm_device *dev = connector->dev;
- int ret, size;
+ size_t size;
+ int ret;
/* ignore requests to set edid when overridden */
if (connector->override_edid)
@@ -4143,7 +4253,7 @@ static bool drm_property_change_is_valid(struct drm_property *property,
* Called by the user via ioctl.
*
* Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
*/
int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
@@ -4226,7 +4336,7 @@ int drm_mode_plane_set_obj_prop(struct drm_plane *plane,
EXPORT_SYMBOL(drm_mode_plane_set_obj_prop);
/**
- * drm_mode_getproperty_ioctl - get the current value of a object's property
+ * drm_mode_obj_get_properties_ioctl - get the current value of a object's property
* @dev: DRM device
* @data: ioctl data
* @file_priv: DRM file info
@@ -4238,7 +4348,7 @@ EXPORT_SYMBOL(drm_mode_plane_set_obj_prop);
* Called by the user via ioctl.
*
* Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
*/
int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
@@ -4310,7 +4420,7 @@ out:
* Called by the user via ioctl.
*
* Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
*/
int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
@@ -4382,7 +4492,7 @@ out:
* possible_clones and possible_crtcs bitmasks.
*
* Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
*/
int drm_mode_connector_attach_encoder(struct drm_connector *connector,
struct drm_encoder *encoder)
@@ -4409,7 +4519,7 @@ EXPORT_SYMBOL(drm_mode_connector_attach_encoder);
* fixed gamma table size.
*
* Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
*/
int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
int gamma_size)
@@ -4438,7 +4548,7 @@ EXPORT_SYMBOL(drm_mode_crtc_set_gamma_size);
* Called by the user via ioctl.
*
* Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
*/
int drm_mode_gamma_set_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
@@ -4510,7 +4620,7 @@ out:
* Called by the user via ioctl.
*
* Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
*/
int drm_mode_gamma_get_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
@@ -4576,7 +4686,7 @@ out:
* Called by the user via ioctl.
*
* Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
*/
int drm_mode_page_flip_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
@@ -4599,7 +4709,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
if (!crtc)
return -ENOENT;
- drm_modeset_lock_crtc(crtc);
+ drm_modeset_lock_crtc(crtc, crtc->primary);
if (crtc->primary->fb == NULL) {
/* The framebuffer is currently unbound, presumably
* due to a hotplug event, that userspace has not
@@ -4742,7 +4852,7 @@ EXPORT_SYMBOL(drm_mode_config_reset);
* Called by the user via ioctl.
*
* Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
*/
int drm_mode_create_dumb_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
@@ -4769,6 +4879,16 @@ int drm_mode_create_dumb_ioctl(struct drm_device *dev,
if (PAGE_ALIGN(size) == 0)
return -EINVAL;
+ /*
+ * handle, pitch and size are output parameters. Zero them out to
+ * prevent drivers from accidentally using uninitialized data. Since
+ * not all existing userspace is clearing these fields properly we
+ * cannot reject IOCTL with garbage in them.
+ */
+ args->handle = 0;
+ args->pitch = 0;
+ args->size = 0;
+
return dev->driver->dumb_create(file_priv, dev, args);
}
@@ -4784,7 +4904,7 @@ int drm_mode_create_dumb_ioctl(struct drm_device *dev,
* Called by the user via ioctl.
*
* Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
*/
int drm_mode_mmap_dumb_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
@@ -4811,7 +4931,7 @@ int drm_mode_mmap_dumb_ioctl(struct drm_device *dev,
* Called by the user via ioctl.
*
* Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
*/
int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
@@ -5097,6 +5217,7 @@ void drm_mode_config_init(struct drm_device *dev)
INIT_LIST_HEAD(&dev->mode_config.property_blob_list);
INIT_LIST_HEAD(&dev->mode_config.plane_list);
idr_init(&dev->mode_config.crtc_idr);
+ idr_init(&dev->mode_config.tile_idr);
drm_modeset_lock_all(dev);
drm_mode_create_standard_connector_properties(dev);
@@ -5184,6 +5305,7 @@ void drm_mode_config_cleanup(struct drm_device *dev)
crtc->funcs->destroy(crtc);
}
+ idr_destroy(&dev->mode_config.tile_idr);
idr_destroy(&dev->mode_config.crtc_idr);
drm_modeset_lock_fini(&dev->mode_config.connection_mutex);
}
@@ -5206,3 +5328,100 @@ struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev,
supported_rotations);
}
EXPORT_SYMBOL(drm_mode_create_rotation_property);
+
+/**
+ * DOC: Tile group
+ *
+ * Tile groups are used to represent tiled monitors with a unique
+ * integer identifier. Tiled monitors using DisplayID v1.3 have
+ * a unique 8-byte handle, we store this in a tile group, so we
+ * have a common identifier for all tiles in a monitor group.
+ */
+static void drm_tile_group_free(struct kref *kref)
+{
+ struct drm_tile_group *tg = container_of(kref, struct drm_tile_group, refcount);
+ struct drm_device *dev = tg->dev;
+ mutex_lock(&dev->mode_config.idr_mutex);
+ idr_remove(&dev->mode_config.tile_idr, tg->id);
+ mutex_unlock(&dev->mode_config.idr_mutex);
+ kfree(tg);
+}
+
+/**
+ * drm_mode_put_tile_group - drop a reference to a tile group.
+ * @dev: DRM device
+ * @tg: tile group to drop reference to.
+ *
+ * drop reference to tile group and free if 0.
+ */
+void drm_mode_put_tile_group(struct drm_device *dev,
+ struct drm_tile_group *tg)
+{
+ kref_put(&tg->refcount, drm_tile_group_free);
+}
+
+/**
+ * drm_mode_get_tile_group - get a reference to an existing tile group
+ * @dev: DRM device
+ * @topology: 8-bytes unique per monitor.
+ *
+ * Use the unique bytes to get a reference to an existing tile group.
+ *
+ * RETURNS:
+ * tile group or NULL if not found.
+ */
+struct drm_tile_group *drm_mode_get_tile_group(struct drm_device *dev,
+ char topology[8])
+{
+ struct drm_tile_group *tg;
+ int id;
+ mutex_lock(&dev->mode_config.idr_mutex);
+ idr_for_each_entry(&dev->mode_config.tile_idr, tg, id) {
+ if (!memcmp(tg->group_data, topology, 8)) {
+ if (!kref_get_unless_zero(&tg->refcount))
+ tg = NULL;
+ mutex_unlock(&dev->mode_config.idr_mutex);
+ return tg;
+ }
+ }
+ mutex_unlock(&dev->mode_config.idr_mutex);
+ return NULL;
+}
+
+/**
+ * drm_mode_create_tile_group - create a tile group from a displayid description
+ * @dev: DRM device
+ * @topology: 8-bytes unique per monitor.
+ *
+ * Create a tile group for the unique monitor, and get a unique
+ * identifier for the tile group.
+ *
+ * RETURNS:
+ * new tile group or error.
+ */
+struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev,
+ char topology[8])
+{
+ struct drm_tile_group *tg;
+ int ret;
+
+ tg = kzalloc(sizeof(*tg), GFP_KERNEL);
+ if (!tg)
+ return ERR_PTR(-ENOMEM);
+
+ kref_init(&tg->refcount);
+ memcpy(tg->group_data, topology, 8);
+ tg->dev = dev;
+
+ mutex_lock(&dev->mode_config.idr_mutex);
+ ret = idr_alloc(&dev->mode_config.tile_idr, tg, 1, 0, GFP_KERNEL);
+ if (ret >= 0) {
+ tg->id = ret;
+ } else {
+ kfree(tg);
+ tg = ERR_PTR(ret);
+ }
+
+ mutex_unlock(&dev->mode_config.idr_mutex);
+ return tg;
+}
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 6c65a0a28fb..d552708409d 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -34,12 +34,35 @@
#include <linux/moduleparam.h>
#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
+/**
+ * DOC: overview
+ *
+ * The CRTC modeset helper library provides a default set_config implementation
+ * in drm_crtc_helper_set_config(). Plus a few other convenience functions using
+ * the same callbacks which drivers can use to e.g. restore the modeset
+ * configuration on resume with drm_helper_resume_force_mode().
+ *
+ * The driver callbacks are mostly compatible with the atomic modeset helpers,
+ * except for the handling of the primary plane: Atomic helpers require that the
+ * primary plane is implemented as a real standalone plane and not directly tied
+ * to the CRTC state. For easier transition this library provides functions to
+ * implement the old semantics required by the CRTC helpers using the new plane
+ * and atomic helper callbacks.
+ *
+ * Drivers are strongly urged to convert to the atomic helpers (by way of first
+ * converting to the plane helpers). New drivers must not use these functions
+ * but need to implement the atomic interface instead, potentially using the
+ * atomic helpers for that.
+ */
MODULE_AUTHOR("David Airlie, Jesse Barnes");
MODULE_DESCRIPTION("DRM KMS helper");
MODULE_LICENSE("GPL and additional rights");
@@ -888,3 +911,112 @@ void drm_helper_resume_force_mode(struct drm_device *dev)
drm_modeset_unlock_all(dev);
}
EXPORT_SYMBOL(drm_helper_resume_force_mode);
+
+/**
+ * drm_helper_crtc_mode_set - mode_set implementation for atomic plane helpers
+ * @crtc: DRM CRTC
+ * @mode: DRM display mode which userspace requested
+ * @adjusted_mode: DRM display mode adjusted by ->mode_fixup callbacks
+ * @x: x offset of the CRTC scanout area on the underlying framebuffer
+ * @y: y offset of the CRTC scanout area on the underlying framebuffer
+ * @old_fb: previous framebuffer
+ *
+ * This function implements a callback useable as the ->mode_set callback
+ * required by the crtc helpers. Besides the atomic plane helper functions for
+ * the primary plane the driver must also provide the ->mode_set_nofb callback
+ * to set up the crtc.
+ *
+ * This is a transitional helper useful for converting drivers to the atomic
+ * interfaces.
+ */
+int drm_helper_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct drm_crtc_state *crtc_state;
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ int ret;
+
+ if (crtc->funcs->atomic_duplicate_state)
+ crtc_state = crtc->funcs->atomic_duplicate_state(crtc);
+ else if (crtc->state)
+ crtc_state = kmemdup(crtc->state, sizeof(*crtc_state),
+ GFP_KERNEL);
+ else
+ crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
+ if (!crtc_state)
+ return -ENOMEM;
+
+ crtc_state->enable = true;
+ crtc_state->planes_changed = true;
+ crtc_state->mode_changed = true;
+ drm_mode_copy(&crtc_state->mode, mode);
+ drm_mode_copy(&crtc_state->adjusted_mode, adjusted_mode);
+
+ if (crtc_funcs->atomic_check) {
+ ret = crtc_funcs->atomic_check(crtc, crtc_state);
+ if (ret) {
+ kfree(crtc_state);
+
+ return ret;
+ }
+ }
+
+ swap(crtc->state, crtc_state);
+
+ crtc_funcs->mode_set_nofb(crtc);
+
+ if (crtc_state) {
+ if (crtc->funcs->atomic_destroy_state)
+ crtc->funcs->atomic_destroy_state(crtc, crtc_state);
+ else
+ kfree(crtc_state);
+ }
+
+ return drm_helper_crtc_mode_set_base(crtc, x, y, old_fb);
+}
+EXPORT_SYMBOL(drm_helper_crtc_mode_set);
+
+/**
+ * drm_helper_crtc_mode_set_base - mode_set_base implementation for atomic plane helpers
+ * @crtc: DRM CRTC
+ * @x: x offset of the CRTC scanout area on the underlying framebuffer
+ * @y: y offset of the CRTC scanout area on the underlying framebuffer
+ * @old_fb: previous framebuffer
+ *
+ * This function implements a callback useable as the ->mode_set_base used
+ * required by the crtc helpers. The driver must provide the atomic plane helper
+ * functions for the primary plane.
+ *
+ * This is a transitional helper useful for converting drivers to the atomic
+ * interfaces.
+ */
+int drm_helper_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct drm_plane_state *plane_state;
+ struct drm_plane *plane = crtc->primary;
+
+ if (plane->funcs->atomic_duplicate_state)
+ plane_state = plane->funcs->atomic_duplicate_state(plane);
+ else if (plane->state)
+ plane_state = drm_atomic_helper_plane_duplicate_state(plane);
+ else
+ plane_state = kzalloc(sizeof(*plane_state), GFP_KERNEL);
+ if (!plane_state)
+ return -ENOMEM;
+
+ plane_state->crtc = crtc;
+ drm_atomic_set_fb_for_plane(plane_state, crtc->primary->fb);
+ plane_state->crtc_x = 0;
+ plane_state->crtc_y = 0;
+ plane_state->crtc_h = crtc->mode.vdisplay;
+ plane_state->crtc_w = crtc->mode.hdisplay;
+ plane_state->src_x = x << 16;
+ plane_state->src_y = y << 16;
+ plane_state->src_h = crtc->mode.vdisplay << 16;
+ plane_state->src_w = crtc->mode.hdisplay << 16;
+
+ return drm_plane_helper_commit(plane, plane_state, old_fb);
+}
+EXPORT_SYMBOL(drm_helper_crtc_mode_set_base);
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 08e33b8b13a..79968e39c8d 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -39,198 +39,6 @@
* blocks, ...
*/
-/* Run a single AUX_CH I2C transaction, writing/reading data as necessary */
-static int
-i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode,
- uint8_t write_byte, uint8_t *read_byte)
-{
- struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
- int ret;
-
- ret = (*algo_data->aux_ch)(adapter, mode,
- write_byte, read_byte);
- return ret;
-}
-
-/*
- * I2C over AUX CH
- */
-
-/*
- * Send the address. If the I2C link is running, this 'restarts'
- * the connection with the new address, this is used for doing
- * a write followed by a read (as needed for DDC)
- */
-static int
-i2c_algo_dp_aux_address(struct i2c_adapter *adapter, u16 address, bool reading)
-{
- struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
- int mode = MODE_I2C_START;
- int ret;
-
- if (reading)
- mode |= MODE_I2C_READ;
- else
- mode |= MODE_I2C_WRITE;
- algo_data->address = address;
- algo_data->running = true;
- ret = i2c_algo_dp_aux_transaction(adapter, mode, 0, NULL);
- return ret;
-}
-
-/*
- * Stop the I2C transaction. This closes out the link, sending
- * a bare address packet with the MOT bit turned off
- */
-static void
-i2c_algo_dp_aux_stop(struct i2c_adapter *adapter, bool reading)
-{
- struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
- int mode = MODE_I2C_STOP;
-
- if (reading)
- mode |= MODE_I2C_READ;
- else
- mode |= MODE_I2C_WRITE;
- if (algo_data->running) {
- (void) i2c_algo_dp_aux_transaction(adapter, mode, 0, NULL);
- algo_data->running = false;
- }
-}
-
-/*
- * Write a single byte to the current I2C address, the
- * the I2C link must be running or this returns -EIO
- */
-static int
-i2c_algo_dp_aux_put_byte(struct i2c_adapter *adapter, u8 byte)
-{
- struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
- int ret;
-
- if (!algo_data->running)
- return -EIO;
-
- ret = i2c_algo_dp_aux_transaction(adapter, MODE_I2C_WRITE, byte, NULL);
- return ret;
-}
-
-/*
- * Read a single byte from the current I2C address, the
- * I2C link must be running or this returns -EIO
- */
-static int
-i2c_algo_dp_aux_get_byte(struct i2c_adapter *adapter, u8 *byte_ret)
-{
- struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
- int ret;
-
- if (!algo_data->running)
- return -EIO;
-
- ret = i2c_algo_dp_aux_transaction(adapter, MODE_I2C_READ, 0, byte_ret);
- return ret;
-}
-
-static int
-i2c_algo_dp_aux_xfer(struct i2c_adapter *adapter,
- struct i2c_msg *msgs,
- int num)
-{
- int ret = 0;
- bool reading = false;
- int m;
- int b;
-
- for (m = 0; m < num; m++) {
- u16 len = msgs[m].len;
- u8 *buf = msgs[m].buf;
- reading = (msgs[m].flags & I2C_M_RD) != 0;
- ret = i2c_algo_dp_aux_address(adapter, msgs[m].addr, reading);
- if (ret < 0)
- break;
- if (reading) {
- for (b = 0; b < len; b++) {
- ret = i2c_algo_dp_aux_get_byte(adapter, &buf[b]);
- if (ret < 0)
- break;
- }
- } else {
- for (b = 0; b < len; b++) {
- ret = i2c_algo_dp_aux_put_byte(adapter, buf[b]);
- if (ret < 0)
- break;
- }
- }
- if (ret < 0)
- break;
- }
- if (ret >= 0)
- ret = num;
- i2c_algo_dp_aux_stop(adapter, reading);
- DRM_DEBUG_KMS("dp_aux_xfer return %d\n", ret);
- return ret;
-}
-
-static u32
-i2c_algo_dp_aux_functionality(struct i2c_adapter *adapter)
-{
- return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
- I2C_FUNC_SMBUS_READ_BLOCK_DATA |
- I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
- I2C_FUNC_10BIT_ADDR;
-}
-
-static const struct i2c_algorithm i2c_dp_aux_algo = {
- .master_xfer = i2c_algo_dp_aux_xfer,
- .functionality = i2c_algo_dp_aux_functionality,
-};
-
-static void
-i2c_dp_aux_reset_bus(struct i2c_adapter *adapter)
-{
- (void) i2c_algo_dp_aux_address(adapter, 0, false);
- (void) i2c_algo_dp_aux_stop(adapter, false);
-}
-
-static int
-i2c_dp_aux_prepare_bus(struct i2c_adapter *adapter)
-{
- adapter->algo = &i2c_dp_aux_algo;
- adapter->retries = 3;
- i2c_dp_aux_reset_bus(adapter);
- return 0;
-}
-
-/**
- * i2c_dp_aux_add_bus() - register an i2c adapter using the aux ch helper
- * @adapter: i2c adapter to register
- *
- * This registers an i2c adapter that uses dp aux channel as it's underlaying
- * transport. The driver needs to fill out the &i2c_algo_dp_aux_data structure
- * and store it in the algo_data member of the @adapter argument. This will be
- * used by the i2c over dp aux algorithm to drive the hardware.
- *
- * RETURNS:
- * 0 on success, -ERRNO on failure.
- *
- * IMPORTANT:
- * This interface is deprecated, please switch to the new dp aux helpers and
- * drm_dp_aux_register().
- */
-int
-i2c_dp_aux_add_bus(struct i2c_adapter *adapter)
-{
- int error;
-
- error = i2c_dp_aux_prepare_bus(adapter);
- if (error)
- return error;
- error = i2c_add_adapter(adapter);
- return error;
-}
-EXPORT_SYMBOL(i2c_dp_aux_add_bus);
-
/* Helpers for DP link training */
static u8 dp_link_status(const u8 link_status[DP_LINK_STATUS_SIZE], int r)
{
@@ -378,10 +186,11 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
/*
* The specification doesn't give any recommendation on how often to
- * retry native transactions, so retry 7 times like for I2C-over-AUX
- * transactions.
+ * retry native transactions. We used to retry 7 times like for
+ * aux i2c transactions but real world devices this wasn't
+ * sufficient, bump to 32 which makes Dell 4k monitors happier.
*/
- for (retry = 0; retry < 7; retry++) {
+ for (retry = 0; retry < 32; retry++) {
mutex_lock(&aux->hw_mutex);
err = aux->transfer(aux, &msg);
@@ -654,10 +463,12 @@ static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
case DP_AUX_I2C_REPLY_NACK:
DRM_DEBUG_KMS("I2C nack\n");
+ aux->i2c_nack_count++;
return -EREMOTEIO;
case DP_AUX_I2C_REPLY_DEFER:
DRM_DEBUG_KMS("I2C defer\n");
+ aux->i2c_defer_count++;
usleep_range(400, 500);
continue;
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 070f913d2db..9a5b68717ec 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -839,6 +839,8 @@ static void drm_dp_put_mst_branch_device(struct drm_dp_mst_branch *mstb)
static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt)
{
+ struct drm_dp_mst_branch *mstb;
+
switch (old_pdt) {
case DP_PEER_DEVICE_DP_LEGACY_CONV:
case DP_PEER_DEVICE_SST_SINK:
@@ -846,8 +848,9 @@ static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt)
drm_dp_mst_unregister_i2c_bus(&port->aux);
break;
case DP_PEER_DEVICE_MST_BRANCHING:
- drm_dp_put_mst_branch_device(port->mstb);
+ mstb = port->mstb;
port->mstb = NULL;
+ drm_dp_put_mst_branch_device(mstb);
break;
}
}
@@ -858,6 +861,8 @@ static void drm_dp_destroy_port(struct kref *kref)
struct drm_dp_mst_topology_mgr *mgr = port->mgr;
if (!port->input) {
port->vcpi.num_slots = 0;
+
+ kfree(port->cached_edid);
if (port->connector)
(*port->mgr->cbs->destroy_connector)(mgr, port->connector);
drm_dp_port_teardown_pdt(port, port->pdt);
@@ -1011,19 +1016,20 @@ static void drm_dp_check_port_guid(struct drm_dp_mst_branch *mstb,
static void build_mst_prop_path(struct drm_dp_mst_port *port,
struct drm_dp_mst_branch *mstb,
- char *proppath)
+ char *proppath,
+ size_t proppath_size)
{
int i;
char temp[8];
- snprintf(proppath, 255, "mst:%d", mstb->mgr->conn_base_id);
+ snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id);
for (i = 0; i < (mstb->lct - 1); i++) {
int shift = (i % 2) ? 0 : 4;
int port_num = mstb->rad[i / 2] >> shift;
- snprintf(temp, 8, "-%d", port_num);
- strncat(proppath, temp, 255);
+ snprintf(temp, sizeof(temp), "-%d", port_num);
+ strlcat(proppath, temp, proppath_size);
}
- snprintf(temp, 8, "-%d", port->port_num);
- strncat(proppath, temp, 255);
+ snprintf(temp, sizeof(temp), "-%d", port->port_num);
+ strlcat(proppath, temp, proppath_size);
}
static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
@@ -1094,8 +1100,12 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
if (created && !port->input) {
char proppath[255];
- build_mst_prop_path(port, mstb, proppath);
+ build_mst_prop_path(port, mstb, proppath, sizeof(proppath));
port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath);
+
+ if (port->port_num >= 8) {
+ port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc);
+ }
}
/* put reference to this port */
@@ -1798,17 +1808,27 @@ static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
return 0;
}
-static int drm_dp_get_vc_payload_bw(int dp_link_bw, int dp_link_count)
+static bool drm_dp_get_vc_payload_bw(int dp_link_bw,
+ int dp_link_count,
+ int *out)
{
switch (dp_link_bw) {
+ default:
+ DRM_DEBUG_KMS("invalid link bandwidth in DPCD: %x (link count: %d)\n",
+ dp_link_bw, dp_link_count);
+ return false;
+
case DP_LINK_BW_1_62:
- return 3 * dp_link_count;
+ *out = 3 * dp_link_count;
+ break;
case DP_LINK_BW_2_7:
- return 5 * dp_link_count;
+ *out = 5 * dp_link_count;
+ break;
case DP_LINK_BW_5_4:
- return 10 * dp_link_count;
+ *out = 10 * dp_link_count;
+ break;
}
- BUG();
+ return true;
}
/**
@@ -1840,7 +1860,13 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
goto out_unlock;
}
- mgr->pbn_div = drm_dp_get_vc_payload_bw(mgr->dpcd[1], mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK);
+ if (!drm_dp_get_vc_payload_bw(mgr->dpcd[1],
+ mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK,
+ &mgr->pbn_div)) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
mgr->total_pbn = 2560;
mgr->total_slots = DIV_ROUND_UP(mgr->total_pbn, mgr->pbn_div);
mgr->avail_slots = mgr->total_slots;
@@ -2150,7 +2176,8 @@ EXPORT_SYMBOL(drm_dp_mst_hpd_irq);
* This returns the current connection state for a port. It validates the
* port pointer still exists so the caller doesn't require a reference
*/
-enum drm_connector_status drm_dp_mst_detect_port(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
+enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector,
+ struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
{
enum drm_connector_status status = connector_status_disconnected;
@@ -2169,6 +2196,10 @@ enum drm_connector_status drm_dp_mst_detect_port(struct drm_dp_mst_topology_mgr
case DP_PEER_DEVICE_SST_SINK:
status = connector_status_connected;
+ /* for logical ports - cache the EDID */
+ if (port->port_num >= 8 && !port->cached_edid) {
+ port->cached_edid = drm_get_edid(connector, &port->aux.ddc);
+ }
break;
case DP_PEER_DEVICE_DP_LEGACY_CONV:
if (port->ldps)
@@ -2200,7 +2231,12 @@ struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_
if (!port)
return NULL;
- edid = drm_get_edid(connector, &port->aux.ddc);
+ if (port->cached_edid)
+ edid = drm_edid_duplicate(port->cached_edid);
+ else
+ edid = drm_get_edid(connector, &port->aux.ddc);
+
+ drm_mode_connector_set_tile_property(connector);
drm_dp_put_port(port);
return edid;
}
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index bc3da32d458..4f41377b0b8 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -56,7 +56,7 @@ static struct idr drm_minors_idr;
struct class *drm_class;
static struct dentry *drm_debugfs_root;
-void drm_err(const char *func, const char *format, ...)
+void drm_err(const char *format, ...)
{
struct va_format vaf;
va_list args;
@@ -66,7 +66,8 @@ void drm_err(const char *func, const char *format, ...)
vaf.fmt = format;
vaf.va = &args;
- printk(KERN_ERR "[" DRM_NAME ":%s] *ERROR* %pV", func, &vaf);
+ printk(KERN_ERR "[" DRM_NAME ":%pf] *ERROR* %pV",
+ __builtin_return_address(0), &vaf);
va_end(args);
}
@@ -534,6 +535,8 @@ static void drm_fs_inode_free(struct inode *inode)
* The initial ref-count of the object is 1. Use drm_dev_ref() and
* drm_dev_unref() to take and drop further ref-counts.
*
+ * Note that for purely virtual devices @parent can be NULL.
+ *
* RETURNS:
* Pointer to new DRM device, or NULL if out of memory.
*/
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 3bf999134bc..53bc7a62890 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -34,6 +34,7 @@
#include <linux/module.h>
#include <drm/drmP.h>
#include <drm/drm_edid.h>
+#include <drm/drm_displayid.h>
#define version_greater(edid, maj, min) \
(((edid)->version > (maj)) || \
@@ -1014,6 +1015,27 @@ module_param_named(edid_fixup, edid_fixup, int, 0400);
MODULE_PARM_DESC(edid_fixup,
"Minimum number of valid EDID header bytes (0-8, default 6)");
+static void drm_get_displayid(struct drm_connector *connector,
+ struct edid *edid);
+
+static int drm_edid_block_checksum(const u8 *raw_edid)
+{
+ int i;
+ u8 csum = 0;
+ for (i = 0; i < EDID_LENGTH; i++)
+ csum += raw_edid[i];
+
+ return csum;
+}
+
+static bool drm_edid_is_zero(const u8 *in_edid, int length)
+{
+ if (memchr_inv(in_edid, 0, length))
+ return false;
+
+ return true;
+}
+
/**
* drm_edid_block_valid - Sanity check the EDID block (base or extension)
* @raw_edid: pointer to raw EDID block
@@ -1027,8 +1049,7 @@ MODULE_PARM_DESC(edid_fixup,
*/
bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid)
{
- int i;
- u8 csum = 0;
+ u8 csum;
struct edid *edid = (struct edid *)raw_edid;
if (WARN_ON(!raw_edid))
@@ -1048,8 +1069,7 @@ bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid)
}
}
- for (i = 0; i < EDID_LENGTH; i++)
- csum += raw_edid[i];
+ csum = drm_edid_block_checksum(raw_edid);
if (csum) {
if (print_bad_edid) {
DRM_ERROR("EDID checksum is invalid, remainder is %d\n", csum);
@@ -1080,9 +1100,13 @@ bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid)
bad:
if (print_bad_edid) {
- printk(KERN_ERR "Raw EDID:\n");
- print_hex_dump(KERN_ERR, " \t", DUMP_PREFIX_NONE, 16, 1,
+ if (drm_edid_is_zero(raw_edid, EDID_LENGTH)) {
+ printk(KERN_ERR "EDID block is all zeroes\n");
+ } else {
+ printk(KERN_ERR "Raw EDID:\n");
+ print_hex_dump(KERN_ERR, " \t", DUMP_PREFIX_NONE, 16, 1,
raw_edid, EDID_LENGTH, false);
+ }
}
return false;
}
@@ -1115,7 +1139,7 @@ EXPORT_SYMBOL(drm_edid_is_valid);
#define DDC_SEGMENT_ADDR 0x30
/**
* drm_do_probe_ddc_edid() - get EDID information via I2C
- * @adapter: I2C device adaptor
+ * @data: I2C device adapter
* @buf: EDID data buffer to be filled
* @block: 128 byte EDID block to start fetching from
* @len: EDID data buffer length to fetch
@@ -1125,9 +1149,9 @@ EXPORT_SYMBOL(drm_edid_is_valid);
* Return: 0 on success or -1 on failure.
*/
static int
-drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
- int block, int len)
+drm_do_probe_ddc_edid(void *data, u8 *buf, unsigned int block, size_t len)
{
+ struct i2c_adapter *adapter = data;
unsigned char start = block * EDID_LENGTH;
unsigned char segment = block >> 1;
unsigned char xfers = segment ? 3 : 2;
@@ -1176,16 +1200,26 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
return ret == xfers ? 0 : -1;
}
-static bool drm_edid_is_zero(u8 *in_edid, int length)
-{
- if (memchr_inv(in_edid, 0, length))
- return false;
-
- return true;
-}
-
-static u8 *
-drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
+/**
+ * drm_do_get_edid - get EDID data using a custom EDID block read function
+ * @connector: connector we're probing
+ * @get_edid_block: EDID block read function
+ * @data: private data passed to the block read function
+ *
+ * When the I2C adapter connected to the DDC bus is hidden behind a device that
+ * exposes a different interface to read EDID blocks this function can be used
+ * to get EDID data using a custom block read function.
+ *
+ * As in the general case the DDC bus is accessible by the kernel at the I2C
+ * level, drivers must make all reasonable efforts to expose it as an I2C
+ * adapter and use drm_get_edid() instead of abusing this function.
+ *
+ * Return: Pointer to valid EDID or NULL if we couldn't find any.
+ */
+struct edid *drm_do_get_edid(struct drm_connector *connector,
+ int (*get_edid_block)(void *data, u8 *buf, unsigned int block,
+ size_t len),
+ void *data)
{
int i, j = 0, valid_extensions = 0;
u8 *block, *new;
@@ -1196,7 +1230,7 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
/* base block fetch */
for (i = 0; i < 4; i++) {
- if (drm_do_probe_ddc_edid(adapter, block, 0, EDID_LENGTH))
+ if (get_edid_block(data, block, 0, EDID_LENGTH))
goto out;
if (drm_edid_block_valid(block, 0, print_bad_edid))
break;
@@ -1210,7 +1244,7 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
/* if there's no extensions, we're done */
if (block[0x7e] == 0)
- return block;
+ return (struct edid *)block;
new = krealloc(block, (block[0x7e] + 1) * EDID_LENGTH, GFP_KERNEL);
if (!new)
@@ -1219,7 +1253,7 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
for (j = 1; j <= block[0x7e]; j++) {
for (i = 0; i < 4; i++) {
- if (drm_do_probe_ddc_edid(adapter,
+ if (get_edid_block(data,
block + (valid_extensions + 1) * EDID_LENGTH,
j, EDID_LENGTH))
goto out;
@@ -1247,7 +1281,7 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
block = new;
}
- return block;
+ return (struct edid *)block;
carp:
if (print_bad_edid) {
@@ -1260,6 +1294,7 @@ out:
kfree(block);
return NULL;
}
+EXPORT_SYMBOL_GPL(drm_do_get_edid);
/**
* drm_probe_ddc() - probe DDC presence
@@ -1289,11 +1324,14 @@ EXPORT_SYMBOL(drm_probe_ddc);
struct edid *drm_get_edid(struct drm_connector *connector,
struct i2c_adapter *adapter)
{
- struct edid *edid = NULL;
+ struct edid *edid;
- if (drm_probe_ddc(adapter))
- edid = (struct edid *)drm_do_get_edid(connector, adapter);
+ if (!drm_probe_ddc(adapter))
+ return NULL;
+ edid = drm_do_get_edid(connector, drm_do_probe_ddc_edid, adapter);
+ if (edid)
+ drm_get_displayid(connector, edid);
return edid;
}
EXPORT_SYMBOL(drm_get_edid);
@@ -2389,7 +2427,7 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid,
/*
* Search EDID for CEA extension block.
*/
-static u8 *drm_find_cea_extension(struct edid *edid)
+static u8 *drm_find_edid_extension(struct edid *edid, int ext_id)
{
u8 *edid_ext = NULL;
int i;
@@ -2401,7 +2439,7 @@ static u8 *drm_find_cea_extension(struct edid *edid)
/* Find CEA extension */
for (i = 0; i < edid->extensions; i++) {
edid_ext = (u8 *)edid + EDID_LENGTH * (i + 1);
- if (edid_ext[0] == CEA_EXT)
+ if (edid_ext[0] == ext_id)
break;
}
@@ -2411,6 +2449,16 @@ static u8 *drm_find_cea_extension(struct edid *edid)
return edid_ext;
}
+static u8 *drm_find_cea_extension(struct edid *edid)
+{
+ return drm_find_edid_extension(edid, CEA_EXT);
+}
+
+static u8 *drm_find_displayid_extension(struct edid *edid)
+{
+ return drm_find_edid_extension(edid, DISPLAYID_EXT);
+}
+
/*
* Calculate the alternate clock for the CEA mode
* (60Hz vs. 59.94Hz etc.)
@@ -3128,9 +3176,12 @@ void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
}
}
eld[5] |= sad_count << 4;
- eld[2] = (20 + mnl + sad_count * 3 + 3) / 4;
- DRM_DEBUG_KMS("ELD size %d, SAD count %d\n", (int)eld[2], sad_count);
+ eld[DRM_ELD_BASELINE_ELD_LEN] =
+ DIV_ROUND_UP(drm_eld_calc_baseline_block_size(eld), 4);
+
+ DRM_DEBUG_KMS("ELD size %d, SAD count %d\n",
+ drm_eld_size(eld), sad_count);
}
EXPORT_SYMBOL(drm_edid_to_eld);
@@ -3868,3 +3919,123 @@ drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame,
return 0;
}
EXPORT_SYMBOL(drm_hdmi_vendor_infoframe_from_display_mode);
+
+static int drm_parse_display_id(struct drm_connector *connector,
+ u8 *displayid, int length,
+ bool is_edid_extension)
+{
+ /* if this is an EDID extension the first byte will be 0x70 */
+ int idx = 0;
+ struct displayid_hdr *base;
+ struct displayid_block *block;
+ u8 csum = 0;
+ int i;
+
+ if (is_edid_extension)
+ idx = 1;
+
+ base = (struct displayid_hdr *)&displayid[idx];
+
+ DRM_DEBUG_KMS("base revision 0x%x, length %d, %d %d\n",
+ base->rev, base->bytes, base->prod_id, base->ext_count);
+
+ if (base->bytes + 5 > length - idx)
+ return -EINVAL;
+
+ for (i = idx; i <= base->bytes + 5; i++) {
+ csum += displayid[i];
+ }
+ if (csum) {
+ DRM_ERROR("DisplayID checksum invalid, remainder is %d\n", csum);
+ return -EINVAL;
+ }
+
+ block = (struct displayid_block *)&displayid[idx + 4];
+ DRM_DEBUG_KMS("block id %d, rev %d, len %d\n",
+ block->tag, block->rev, block->num_bytes);
+
+ switch (block->tag) {
+ case DATA_BLOCK_TILED_DISPLAY: {
+ struct displayid_tiled_block *tile = (struct displayid_tiled_block *)block;
+
+ u16 w, h;
+ u8 tile_v_loc, tile_h_loc;
+ u8 num_v_tile, num_h_tile;
+ struct drm_tile_group *tg;
+
+ w = tile->tile_size[0] | tile->tile_size[1] << 8;
+ h = tile->tile_size[2] | tile->tile_size[3] << 8;
+
+ num_v_tile = (tile->topo[0] & 0xf) | (tile->topo[2] & 0x30);
+ num_h_tile = (tile->topo[0] >> 4) | ((tile->topo[2] >> 2) & 0x30);
+ tile_v_loc = (tile->topo[1] & 0xf) | ((tile->topo[2] & 0x3) << 4);
+ tile_h_loc = (tile->topo[1] >> 4) | (((tile->topo[2] >> 2) & 0x3) << 4);
+
+ connector->has_tile = true;
+ if (tile->tile_cap & 0x80)
+ connector->tile_is_single_monitor = true;
+
+ connector->num_h_tile = num_h_tile + 1;
+ connector->num_v_tile = num_v_tile + 1;
+ connector->tile_h_loc = tile_h_loc;
+ connector->tile_v_loc = tile_v_loc;
+ connector->tile_h_size = w + 1;
+ connector->tile_v_size = h + 1;
+
+ DRM_DEBUG_KMS("tile cap 0x%x\n", tile->tile_cap);
+ DRM_DEBUG_KMS("tile_size %d x %d\n", w + 1, h + 1);
+ DRM_DEBUG_KMS("topo num tiles %dx%d, location %dx%d\n",
+ num_h_tile + 1, num_v_tile + 1, tile_h_loc, tile_v_loc);
+ DRM_DEBUG_KMS("vend %c%c%c\n", tile->topology_id[0], tile->topology_id[1], tile->topology_id[2]);
+
+ tg = drm_mode_get_tile_group(connector->dev, tile->topology_id);
+ if (!tg) {
+ tg = drm_mode_create_tile_group(connector->dev, tile->topology_id);
+ }
+ if (!tg)
+ return -ENOMEM;
+
+ if (connector->tile_group != tg) {
+ /* if we haven't got a pointer,
+ take the reference, drop ref to old tile group */
+ if (connector->tile_group) {
+ drm_mode_put_tile_group(connector->dev, connector->tile_group);
+ }
+ connector->tile_group = tg;
+ } else
+ /* if same tile group, then release the ref we just took. */
+ drm_mode_put_tile_group(connector->dev, tg);
+ }
+ break;
+ default:
+ printk("unknown displayid tag %d\n", block->tag);
+ break;
+ }
+ return 0;
+}
+
+static void drm_get_displayid(struct drm_connector *connector,
+ struct edid *edid)
+{
+ void *displayid = NULL;
+ int ret;
+ connector->has_tile = false;
+ displayid = drm_find_displayid_extension(edid);
+ if (!displayid) {
+ /* drop reference to any tile group we had */
+ goto out_drop_ref;
+ }
+
+ ret = drm_parse_display_id(connector, displayid, EDID_LENGTH, true);
+ if (ret < 0)
+ goto out_drop_ref;
+ if (!connector->has_tile)
+ goto out_drop_ref;
+ return;
+out_drop_ref:
+ if (connector->tile_group) {
+ drm_mode_put_tile_group(connector->dev, connector->tile_group);
+ connector->tile_group = NULL;
+ }
+ return;
+}
diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c
index 0a235fe61c9..732cb6f8e65 100644
--- a/drivers/gpu/drm/drm_edid_load.c
+++ b/drivers/gpu/drm/drm_edid_load.c
@@ -254,8 +254,7 @@ static void *edid_load(struct drm_connector *connector, const char *name,
name, connector_name);
out:
- if (fw)
- release_firmware(fw);
+ release_firmware(fw);
return edid;
}
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 0c0c39bac23..52ce26d6b4f 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -347,9 +347,18 @@ bool drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
{
struct drm_device *dev = fb_helper->dev;
bool ret;
+ bool do_delayed = false;
+
drm_modeset_lock_all(dev);
ret = restore_fbdev_mode(fb_helper);
+
+ do_delayed = fb_helper->delayed_hotplug;
+ if (do_delayed)
+ fb_helper->delayed_hotplug = false;
drm_modeset_unlock_all(dev);
+
+ if (do_delayed)
+ drm_fb_helper_hotplug_event(fb_helper);
return ret;
}
EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode_unlocked);
@@ -888,10 +897,6 @@ int drm_fb_helper_set_par(struct fb_info *info)
drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper);
- if (fb_helper->delayed_hotplug) {
- fb_helper->delayed_hotplug = false;
- drm_fb_helper_hotplug_event(fb_helper);
- }
return 0;
}
EXPORT_SYMBOL(drm_fb_helper_set_par);
@@ -995,19 +1000,21 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
crtc_count = 0;
for (i = 0; i < fb_helper->crtc_count; i++) {
struct drm_display_mode *desired_mode;
+ int x, y;
desired_mode = fb_helper->crtc_info[i].desired_mode;
-
+ x = fb_helper->crtc_info[i].x;
+ y = fb_helper->crtc_info[i].y;
if (desired_mode) {
if (gamma_size == 0)
gamma_size = fb_helper->crtc_info[i].mode_set.crtc->gamma_size;
- if (desired_mode->hdisplay < sizes.fb_width)
- sizes.fb_width = desired_mode->hdisplay;
- if (desired_mode->vdisplay < sizes.fb_height)
- sizes.fb_height = desired_mode->vdisplay;
- if (desired_mode->hdisplay > sizes.surface_width)
- sizes.surface_width = desired_mode->hdisplay;
- if (desired_mode->vdisplay > sizes.surface_height)
- sizes.surface_height = desired_mode->vdisplay;
+ if (desired_mode->hdisplay + x < sizes.fb_width)
+ sizes.fb_width = desired_mode->hdisplay + x;
+ if (desired_mode->vdisplay + y < sizes.fb_height)
+ sizes.fb_height = desired_mode->vdisplay + y;
+ if (desired_mode->hdisplay + x > sizes.surface_width)
+ sizes.surface_width = desired_mode->hdisplay + x;
+ if (desired_mode->vdisplay + y > sizes.surface_height)
+ sizes.surface_height = desired_mode->vdisplay + y;
crtc_count++;
}
}
@@ -1307,6 +1314,7 @@ static void drm_enable_connectors(struct drm_fb_helper *fb_helper,
static bool drm_target_cloned(struct drm_fb_helper *fb_helper,
struct drm_display_mode **modes,
+ struct drm_fb_offset *offsets,
bool *enabled, int width, int height)
{
int count, i, j;
@@ -1378,27 +1386,88 @@ static bool drm_target_cloned(struct drm_fb_helper *fb_helper,
return false;
}
+static int drm_get_tile_offsets(struct drm_fb_helper *fb_helper,
+ struct drm_display_mode **modes,
+ struct drm_fb_offset *offsets,
+ int idx,
+ int h_idx, int v_idx)
+{
+ struct drm_fb_helper_connector *fb_helper_conn;
+ int i;
+ int hoffset = 0, voffset = 0;
+
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ fb_helper_conn = fb_helper->connector_info[i];
+ if (!fb_helper_conn->connector->has_tile)
+ continue;
+
+ if (!modes[i] && (h_idx || v_idx)) {
+ DRM_DEBUG_KMS("no modes for connector tiled %d %d\n", i,
+ fb_helper_conn->connector->base.id);
+ continue;
+ }
+ if (fb_helper_conn->connector->tile_h_loc < h_idx)
+ hoffset += modes[i]->hdisplay;
+
+ if (fb_helper_conn->connector->tile_v_loc < v_idx)
+ voffset += modes[i]->vdisplay;
+ }
+ offsets[idx].x = hoffset;
+ offsets[idx].y = voffset;
+ DRM_DEBUG_KMS("returned %d %d for %d %d\n", hoffset, voffset, h_idx, v_idx);
+ return 0;
+}
+
static bool drm_target_preferred(struct drm_fb_helper *fb_helper,
struct drm_display_mode **modes,
+ struct drm_fb_offset *offsets,
bool *enabled, int width, int height)
{
struct drm_fb_helper_connector *fb_helper_conn;
int i;
-
+ uint64_t conn_configured = 0, mask;
+ int tile_pass = 0;
+ mask = (1 << fb_helper->connector_count) - 1;
+retry:
for (i = 0; i < fb_helper->connector_count; i++) {
fb_helper_conn = fb_helper->connector_info[i];
- if (enabled[i] == false)
+ if (conn_configured & (1 << i))
continue;
+ if (enabled[i] == false) {
+ conn_configured |= (1 << i);
+ continue;
+ }
+
+ /* first pass over all the untiled connectors */
+ if (tile_pass == 0 && fb_helper_conn->connector->has_tile)
+ continue;
+
+ if (tile_pass == 1) {
+ if (fb_helper_conn->connector->tile_h_loc != 0 ||
+ fb_helper_conn->connector->tile_v_loc != 0)
+ continue;
+
+ } else {
+ if (fb_helper_conn->connector->tile_h_loc != tile_pass -1 &&
+ fb_helper_conn->connector->tile_v_loc != tile_pass - 1)
+ /* if this tile_pass doesn't cover any of the tiles - keep going */
+ continue;
+
+ /* find the tile offsets for this pass - need
+ to find all tiles left and above */
+ drm_get_tile_offsets(fb_helper, modes, offsets,
+ i, fb_helper_conn->connector->tile_h_loc, fb_helper_conn->connector->tile_v_loc);
+ }
DRM_DEBUG_KMS("looking for cmdline mode on connector %d\n",
fb_helper_conn->connector->base.id);
/* got for command line mode first */
modes[i] = drm_pick_cmdline_mode(fb_helper_conn, width, height);
if (!modes[i]) {
- DRM_DEBUG_KMS("looking for preferred mode on connector %d\n",
- fb_helper_conn->connector->base.id);
+ DRM_DEBUG_KMS("looking for preferred mode on connector %d %d\n",
+ fb_helper_conn->connector->base.id, fb_helper_conn->connector->tile_group ? fb_helper_conn->connector->tile_group->id : 0);
modes[i] = drm_has_preferred_mode(fb_helper_conn, width, height);
}
/* No preferred modes, pick one off the list */
@@ -1408,6 +1477,12 @@ static bool drm_target_preferred(struct drm_fb_helper *fb_helper,
}
DRM_DEBUG_KMS("found mode %s\n", modes[i] ? modes[i]->name :
"none");
+ conn_configured |= (1 << i);
+ }
+
+ if ((conn_configured & mask) != mask) {
+ tile_pass++;
+ goto retry;
}
return true;
}
@@ -1497,6 +1572,7 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
struct drm_device *dev = fb_helper->dev;
struct drm_fb_helper_crtc **crtcs;
struct drm_display_mode **modes;
+ struct drm_fb_offset *offsets;
struct drm_mode_set *modeset;
bool *enabled;
int width, height;
@@ -1511,9 +1587,11 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL);
modes = kcalloc(dev->mode_config.num_connector,
sizeof(struct drm_display_mode *), GFP_KERNEL);
+ offsets = kcalloc(dev->mode_config.num_connector,
+ sizeof(struct drm_fb_offset), GFP_KERNEL);
enabled = kcalloc(dev->mode_config.num_connector,
sizeof(bool), GFP_KERNEL);
- if (!crtcs || !modes || !enabled) {
+ if (!crtcs || !modes || !enabled || !offsets) {
DRM_ERROR("Memory allocation failed\n");
goto out;
}
@@ -1523,14 +1601,16 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
if (!(fb_helper->funcs->initial_config &&
fb_helper->funcs->initial_config(fb_helper, crtcs, modes,
+ offsets,
enabled, width, height))) {
memset(modes, 0, dev->mode_config.num_connector*sizeof(modes[0]));
memset(crtcs, 0, dev->mode_config.num_connector*sizeof(crtcs[0]));
+ memset(offsets, 0, dev->mode_config.num_connector*sizeof(offsets[0]));
- if (!drm_target_cloned(fb_helper,
- modes, enabled, width, height) &&
- !drm_target_preferred(fb_helper,
- modes, enabled, width, height))
+ if (!drm_target_cloned(fb_helper, modes, offsets,
+ enabled, width, height) &&
+ !drm_target_preferred(fb_helper, modes, offsets,
+ enabled, width, height))
DRM_ERROR("Unable to find initial modes\n");
DRM_DEBUG_KMS("picking CRTCs for %dx%d config\n",
@@ -1550,18 +1630,23 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
for (i = 0; i < fb_helper->connector_count; i++) {
struct drm_display_mode *mode = modes[i];
struct drm_fb_helper_crtc *fb_crtc = crtcs[i];
+ struct drm_fb_offset *offset = &offsets[i];
modeset = &fb_crtc->mode_set;
if (mode && fb_crtc) {
- DRM_DEBUG_KMS("desired mode %s set on crtc %d\n",
- mode->name, fb_crtc->mode_set.crtc->base.id);
+ DRM_DEBUG_KMS("desired mode %s set on crtc %d (%d,%d)\n",
+ mode->name, fb_crtc->mode_set.crtc->base.id, offset->x, offset->y);
fb_crtc->desired_mode = mode;
+ fb_crtc->x = offset->x;
+ fb_crtc->y = offset->y;
if (modeset->mode)
drm_mode_destroy(dev, modeset->mode);
modeset->mode = drm_mode_duplicate(dev,
fb_crtc->desired_mode);
modeset->connectors[modeset->num_connectors++] = fb_helper->connector_info[i]->connector;
modeset->fb = fb_helper->fb;
+ modeset->x = offset->x;
+ modeset->y = offset->y;
}
}
@@ -1570,7 +1655,6 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
modeset = &fb_helper->crtc_info[i].mode_set;
if (modeset->num_connectors == 0) {
BUG_ON(modeset->fb);
- BUG_ON(modeset->num_connectors);
if (modeset->mode)
drm_mode_destroy(dev, modeset->mode);
modeset->mode = NULL;
@@ -1579,6 +1663,7 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
out:
kfree(crtcs);
kfree(modes);
+ kfree(offsets);
kfree(enabled);
}
diff --git a/drivers/gpu/drm/drm_flip_work.c b/drivers/gpu/drm/drm_flip_work.c
index f9c7fa3d001..43d9b950ef9 100644
--- a/drivers/gpu/drm/drm_flip_work.c
+++ b/drivers/gpu/drm/drm_flip_work.c
@@ -25,6 +25,44 @@
#include "drm_flip_work.h"
/**
+ * drm_flip_work_allocate_task - allocate a flip-work task
+ * @data: data associated to the task
+ * @flags: allocator flags
+ *
+ * Allocate a drm_flip_task object and attach private data to it.
+ */
+struct drm_flip_task *drm_flip_work_allocate_task(void *data, gfp_t flags)
+{
+ struct drm_flip_task *task;
+
+ task = kzalloc(sizeof(*task), flags);
+ if (task)
+ task->data = data;
+
+ return task;
+}
+EXPORT_SYMBOL(drm_flip_work_allocate_task);
+
+/**
+ * drm_flip_work_queue_task - queue a specific task
+ * @work: the flip-work
+ * @task: the task to handle
+ *
+ * Queues task, that will later be run (passed back to drm_flip_func_t
+ * func) on a work queue after drm_flip_work_commit() is called.
+ */
+void drm_flip_work_queue_task(struct drm_flip_work *work,
+ struct drm_flip_task *task)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&work->lock, flags);
+ list_add_tail(&task->node, &work->queued);
+ spin_unlock_irqrestore(&work->lock, flags);
+}
+EXPORT_SYMBOL(drm_flip_work_queue_task);
+
+/**
* drm_flip_work_queue - queue work
* @work: the flip-work
* @val: the value to queue
@@ -34,10 +72,14 @@
*/
void drm_flip_work_queue(struct drm_flip_work *work, void *val)
{
- if (kfifo_put(&work->fifo, val)) {
- atomic_inc(&work->pending);
+ struct drm_flip_task *task;
+
+ task = drm_flip_work_allocate_task(val,
+ drm_can_sleep() ? GFP_KERNEL : GFP_ATOMIC);
+ if (task) {
+ drm_flip_work_queue_task(work, task);
} else {
- DRM_ERROR("%s fifo full!\n", work->name);
+ DRM_ERROR("%s could not allocate task!\n", work->name);
work->func(work, val);
}
}
@@ -56,9 +98,12 @@ EXPORT_SYMBOL(drm_flip_work_queue);
void drm_flip_work_commit(struct drm_flip_work *work,
struct workqueue_struct *wq)
{
- uint32_t pending = atomic_read(&work->pending);
- atomic_add(pending, &work->count);
- atomic_sub(pending, &work->pending);
+ unsigned long flags;
+
+ spin_lock_irqsave(&work->lock, flags);
+ list_splice_tail(&work->queued, &work->commited);
+ INIT_LIST_HEAD(&work->queued);
+ spin_unlock_irqrestore(&work->lock, flags);
queue_work(wq, &work->worker);
}
EXPORT_SYMBOL(drm_flip_work_commit);
@@ -66,47 +111,46 @@ EXPORT_SYMBOL(drm_flip_work_commit);
static void flip_worker(struct work_struct *w)
{
struct drm_flip_work *work = container_of(w, struct drm_flip_work, worker);
- uint32_t count = atomic_read(&work->count);
- void *val = NULL;
+ struct list_head tasks;
+ unsigned long flags;
+
+ while (1) {
+ struct drm_flip_task *task, *tmp;
+
+ INIT_LIST_HEAD(&tasks);
+ spin_lock_irqsave(&work->lock, flags);
+ list_splice_tail(&work->commited, &tasks);
+ INIT_LIST_HEAD(&work->commited);
+ spin_unlock_irqrestore(&work->lock, flags);
- atomic_sub(count, &work->count);
+ if (list_empty(&tasks))
+ break;
- while(count--)
- if (!WARN_ON(!kfifo_get(&work->fifo, &val)))
- work->func(work, val);
+ list_for_each_entry_safe(task, tmp, &tasks, node) {
+ work->func(work, task->data);
+ kfree(task);
+ }
+ }
}
/**
* drm_flip_work_init - initialize flip-work
* @work: the flip-work to initialize
- * @size: the max queue depth
* @name: debug name
* @func: the callback work function
*
* Initializes/allocates resources for the flip-work
- *
- * RETURNS:
- * Zero on success, error code on failure.
*/
-int drm_flip_work_init(struct drm_flip_work *work, int size,
+void drm_flip_work_init(struct drm_flip_work *work,
const char *name, drm_flip_func_t func)
{
- int ret;
-
work->name = name;
- atomic_set(&work->count, 0);
- atomic_set(&work->pending, 0);
+ INIT_LIST_HEAD(&work->queued);
+ INIT_LIST_HEAD(&work->commited);
+ spin_lock_init(&work->lock);
work->func = func;
- ret = kfifo_alloc(&work->fifo, size, GFP_KERNEL);
- if (ret) {
- DRM_ERROR("could not allocate %s fifo\n", name);
- return ret;
- }
-
INIT_WORK(&work->worker, flip_worker);
-
- return 0;
}
EXPORT_SYMBOL(drm_flip_work_init);
@@ -118,7 +162,6 @@ EXPORT_SYMBOL(drm_flip_work_init);
*/
void drm_flip_work_cleanup(struct drm_flip_work *work)
{
- WARN_ON(!kfifo_is_empty(&work->fifo));
- kfifo_free(&work->fifo);
+ WARN_ON(!list_empty(&work->queued) || !list_empty(&work->commited));
}
EXPORT_SYMBOL(drm_flip_work_cleanup);
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index ed7bc68f7e8..0b9514b6cd6 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -515,16 +515,19 @@ ssize_t drm_read(struct file *filp, char __user *buffer,
size_t total;
ssize_t ret;
- ret = wait_event_interruptible(file_priv->event_wait,
- !list_empty(&file_priv->event_list));
- if (ret < 0)
- return ret;
+ if ((filp->f_flags & O_NONBLOCK) == 0) {
+ ret = wait_event_interruptible(file_priv->event_wait,
+ !list_empty(&file_priv->event_list));
+ if (ret < 0)
+ return ret;
+ }
total = 0;
while (drm_dequeue_event(file_priv, total, count, &e)) {
if (copy_to_user(buffer + total,
e->event, e->event->length)) {
total = -EFAULT;
+ e->destroy(e);
break;
}
@@ -532,7 +535,7 @@ ssize_t drm_read(struct file *filp, char __user *buffer,
e->destroy(e);
}
- return total;
+ return total ?: -EAGAIN;
}
EXPORT_SYMBOL(drm_read);
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index f6ca51259fa..16a16477071 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -188,7 +188,7 @@ drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
}
/**
- * drm_gem_object_free - release resources bound to userspace handles
+ * drm_gem_object_handle_free - release resources bound to userspace handles
* @obj: GEM object to clean up.
*
* Called after the last handle to the object has been closed
@@ -309,7 +309,7 @@ EXPORT_SYMBOL(drm_gem_dumb_destroy);
* drm_gem_handle_create_tail - internal functions to create a handle
* @file_priv: drm file-private structure to register the handle for
* @obj: object to register
- * @handlep: pionter to return the created handle to the caller
+ * @handlep: pointer to return the created handle to the caller
*
* This expects the dev->object_name_lock to be held already and will drop it
* before returning. Used to avoid races in establishing new handles when
@@ -362,7 +362,7 @@ drm_gem_handle_create_tail(struct drm_file *file_priv,
}
/**
- * gem_handle_create - create a gem handle for an object
+ * drm_gem_handle_create - create a gem handle for an object
* @file_priv: drm file-private structure to register the handle for
* @obj: object to register
* @handlep: pionter to return the created handle to the caller
@@ -371,10 +371,9 @@ drm_gem_handle_create_tail(struct drm_file *file_priv,
* to the object, which includes a regular reference count. Callers
* will likely want to dereference the object afterwards.
*/
-int
-drm_gem_handle_create(struct drm_file *file_priv,
- struct drm_gem_object *obj,
- u32 *handlep)
+int drm_gem_handle_create(struct drm_file *file_priv,
+ struct drm_gem_object *obj,
+ u32 *handlep)
{
mutex_lock(&obj->dev->object_name_lock);
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index 0316310e2cc..e419eedf751 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -29,18 +29,31 @@
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_vma_manager.h>
-/*
+/**
+ * DOC: cma helpers
+ *
+ * The Contiguous Memory Allocator reserves a pool of memory at early boot
+ * that is used to service requests for large blocks of contiguous memory.
+ *
+ * The DRM GEM/CMA helpers use this allocator as a means to provide buffer
+ * objects that are physically contiguous in memory. This is useful for
+ * display drivers that are unable to map scattered buffers via an IOMMU.
+ */
+
+/**
* __drm_gem_cma_create - Create a GEM CMA object without allocating memory
- * @drm: The drm device
- * @size: The GEM object size
+ * @drm: DRM device
+ * @size: size of the object to allocate
*
- * This function creates and initializes a GEM CMA object of the given size, but
- * doesn't allocate any memory to back the object.
+ * This function creates and initializes a GEM CMA object of the given size,
+ * but doesn't allocate any memory to back the object.
*
- * Return a struct drm_gem_cma_object* on success or ERR_PTR values on failure.
+ * Returns:
+ * A struct drm_gem_cma_object * on success or an ERR_PTR()-encoded negative
+ * error code on failure.
*/
static struct drm_gem_cma_object *
-__drm_gem_cma_create(struct drm_device *drm, unsigned int size)
+__drm_gem_cma_create(struct drm_device *drm, size_t size)
{
struct drm_gem_cma_object *cma_obj;
struct drm_gem_object *gem_obj;
@@ -69,14 +82,21 @@ error:
return ERR_PTR(ret);
}
-/*
+/**
* drm_gem_cma_create - allocate an object with the given size
+ * @drm: DRM device
+ * @size: size of the object to allocate
+ *
+ * This function creates a CMA GEM object and allocates a contiguous chunk of
+ * memory as backing store. The backing memory has the writecombine attribute
+ * set.
*
- * returns a struct drm_gem_cma_object* on success or ERR_PTR values
- * on failure.
+ * Returns:
+ * A struct drm_gem_cma_object * on success or an ERR_PTR()-encoded negative
+ * error code on failure.
*/
struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
- unsigned int size)
+ size_t size)
{
struct drm_gem_cma_object *cma_obj;
int ret;
@@ -104,17 +124,26 @@ error:
}
EXPORT_SYMBOL_GPL(drm_gem_cma_create);
-/*
- * drm_gem_cma_create_with_handle - allocate an object with the given
- * size and create a gem handle on it
+/**
+ * drm_gem_cma_create_with_handle - allocate an object with the given size and
+ * return a GEM handle to it
+ * @file_priv: DRM file-private structure to register the handle for
+ * @drm: DRM device
+ * @size: size of the object to allocate
+ * @handle: return location for the GEM handle
+ *
+ * This function creates a CMA GEM object, allocating a physically contiguous
+ * chunk of memory as backing store. The GEM object is then added to the list
+ * of object associated with the given file and a handle to it is returned.
*
- * returns a struct drm_gem_cma_object* on success or ERR_PTR values
- * on failure.
+ * Returns:
+ * A struct drm_gem_cma_object * on success or an ERR_PTR()-encoded negative
+ * error code on failure.
*/
-static struct drm_gem_cma_object *drm_gem_cma_create_with_handle(
- struct drm_file *file_priv,
- struct drm_device *drm, unsigned int size,
- unsigned int *handle)
+static struct drm_gem_cma_object *
+drm_gem_cma_create_with_handle(struct drm_file *file_priv,
+ struct drm_device *drm, size_t size,
+ uint32_t *handle)
{
struct drm_gem_cma_object *cma_obj;
struct drm_gem_object *gem_obj;
@@ -145,16 +174,19 @@ err_handle_create:
return ERR_PTR(ret);
}
-/*
- * drm_gem_cma_free_object - (struct drm_driver)->gem_free_object callback
- * function
+/**
+ * drm_gem_cma_free_object - free resources associated with a CMA GEM object
+ * @gem_obj: GEM object to free
+ *
+ * This function frees the backing memory of the CMA GEM object, cleans up the
+ * GEM object state and frees the memory used to store the object itself.
+ * Drivers using the CMA helpers should set this as their DRM driver's
+ * ->gem_free_object() callback.
*/
void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
{
struct drm_gem_cma_object *cma_obj;
- drm_gem_free_mmap_offset(gem_obj);
-
cma_obj = to_drm_gem_cma_obj(gem_obj);
if (cma_obj->vaddr) {
@@ -170,18 +202,26 @@ void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
}
EXPORT_SYMBOL_GPL(drm_gem_cma_free_object);
-/*
- * drm_gem_cma_dumb_create - (struct drm_driver)->dumb_create callback
- * function
+/**
+ * drm_gem_cma_dumb_create_internal - create a dumb buffer object
+ * @file_priv: DRM file-private structure to create the dumb buffer for
+ * @drm: DRM device
+ * @args: IOCTL data
+ *
+ * This aligns the pitch and size arguments to the minimum required. This is
+ * an internal helper that can be wrapped by a driver to account for hardware
+ * with more specific alignment requirements. It should not be used directly
+ * as the ->dumb_create() callback in a DRM driver.
*
- * This aligns the pitch and size arguments to the minimum required. wrap
- * this into your own function if you need bigger alignment.
+ * Returns:
+ * 0 on success or a negative error code on failure.
*/
-int drm_gem_cma_dumb_create(struct drm_file *file_priv,
- struct drm_device *dev, struct drm_mode_create_dumb *args)
+int drm_gem_cma_dumb_create_internal(struct drm_file *file_priv,
+ struct drm_device *drm,
+ struct drm_mode_create_dumb *args)
{
+ unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
struct drm_gem_cma_object *cma_obj;
- int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
if (args->pitch < min_pitch)
args->pitch = min_pitch;
@@ -189,18 +229,63 @@ int drm_gem_cma_dumb_create(struct drm_file *file_priv,
if (args->size < args->pitch * args->height)
args->size = args->pitch * args->height;
- cma_obj = drm_gem_cma_create_with_handle(file_priv, dev,
- args->size, &args->handle);
+ cma_obj = drm_gem_cma_create_with_handle(file_priv, drm, args->size,
+ &args->handle);
+ return PTR_ERR_OR_ZERO(cma_obj);
+}
+EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create_internal);
+
+/**
+ * drm_gem_cma_dumb_create - create a dumb buffer object
+ * @file_priv: DRM file-private structure to create the dumb buffer for
+ * @drm: DRM device
+ * @args: IOCTL data
+ *
+ * This function computes the pitch of the dumb buffer and rounds it up to an
+ * integer number of bytes per pixel. Drivers for hardware that doesn't have
+ * any additional restrictions on the pitch can directly use this function as
+ * their ->dumb_create() callback.
+ *
+ * For hardware with additional restrictions, drivers can adjust the fields
+ * set up by userspace and pass the IOCTL data along to the
+ * drm_gem_cma_dumb_create_internal() function.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+int drm_gem_cma_dumb_create(struct drm_file *file_priv,
+ struct drm_device *drm,
+ struct drm_mode_create_dumb *args)
+{
+ struct drm_gem_cma_object *cma_obj;
+
+ args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
+ args->size = args->pitch * args->height;
+
+ cma_obj = drm_gem_cma_create_with_handle(file_priv, drm, args->size,
+ &args->handle);
return PTR_ERR_OR_ZERO(cma_obj);
}
EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create);
-/*
- * drm_gem_cma_dumb_map_offset - (struct drm_driver)->dumb_map_offset callback
- * function
+/**
+ * drm_gem_cma_dumb_map_offset - return the fake mmap offset for a CMA GEM
+ * object
+ * @file_priv: DRM file-private structure containing the GEM object
+ * @drm: DRM device
+ * @handle: GEM object handle
+ * @offset: return location for the fake mmap offset
+ *
+ * This function look up an object by its handle and returns the fake mmap
+ * offset associated with it. Drivers using the CMA helpers should set this
+ * as their DRM driver's ->dumb_map_offset() callback.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
*/
int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv,
- struct drm_device *drm, uint32_t handle, uint64_t *offset)
+ struct drm_device *drm, u32 handle,
+ u64 *offset)
{
struct drm_gem_object *gem_obj;
@@ -208,7 +293,7 @@ int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv,
gem_obj = drm_gem_object_lookup(drm, file_priv, handle);
if (!gem_obj) {
- dev_err(drm->dev, "failed to lookup gem object\n");
+ dev_err(drm->dev, "failed to lookup GEM object\n");
mutex_unlock(&drm->struct_mutex);
return -EINVAL;
}
@@ -251,8 +336,20 @@ static int drm_gem_cma_mmap_obj(struct drm_gem_cma_object *cma_obj,
return ret;
}
-/*
- * drm_gem_cma_mmap - (struct file_operation)->mmap callback function
+/**
+ * drm_gem_cma_mmap - memory-map a CMA GEM object
+ * @filp: file object
+ * @vma: VMA for the area to be mapped
+ *
+ * This function implements an augmented version of the GEM DRM file mmap
+ * operation for CMA objects: In addition to the usual GEM VMA setup it
+ * immediately faults in the entire object instead of using on-demaind
+ * faulting. Drivers which employ the CMA helpers should use this function
+ * as their ->mmap() handler in the DRM device file's file_operations
+ * structure.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
*/
int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma)
{
@@ -272,7 +369,16 @@ int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma)
EXPORT_SYMBOL_GPL(drm_gem_cma_mmap);
#ifdef CONFIG_DEBUG_FS
-void drm_gem_cma_describe(struct drm_gem_cma_object *cma_obj, struct seq_file *m)
+/**
+ * drm_gem_cma_describe - describe a CMA GEM object for debugfs
+ * @cma_obj: CMA GEM object
+ * @m: debugfs file handle
+ *
+ * This function can be used to dump a human-readable representation of the
+ * CMA GEM object into a synthetic file.
+ */
+void drm_gem_cma_describe(struct drm_gem_cma_object *cma_obj,
+ struct seq_file *m)
{
struct drm_gem_object *obj = &cma_obj->base;
struct drm_device *dev = obj->dev;
@@ -291,7 +397,18 @@ void drm_gem_cma_describe(struct drm_gem_cma_object *cma_obj, struct seq_file *m
EXPORT_SYMBOL_GPL(drm_gem_cma_describe);
#endif
-/* low-level interface prime helpers */
+/**
+ * drm_gem_cma_prime_get_sg_table - provide a scatter/gather table of pinned
+ * pages for a CMA GEM object
+ * @obj: GEM object
+ *
+ * This function exports a scatter/gather table suitable for PRIME usage by
+ * calling the standard DMA mapping API. Drivers using the CMA helpers should
+ * set this as their DRM driver's ->gem_prime_get_sg_table() callback.
+ *
+ * Returns:
+ * A pointer to the scatter/gather table of pinned pages or NULL on failure.
+ */
struct sg_table *drm_gem_cma_prime_get_sg_table(struct drm_gem_object *obj)
{
struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
@@ -315,6 +432,23 @@ out:
}
EXPORT_SYMBOL_GPL(drm_gem_cma_prime_get_sg_table);
+/**
+ * drm_gem_cma_prime_import_sg_table - produce a CMA GEM object from another
+ * driver's scatter/gather table of pinned pages
+ * @dev: device to import into
+ * @attach: DMA-BUF attachment
+ * @sgt: scatter/gather table of pinned pages
+ *
+ * This function imports a scatter/gather table exported via DMA-BUF by
+ * another driver. Imported buffers must be physically contiguous in memory
+ * (i.e. the scatter/gather table must contain a single entry). Drivers that
+ * use the CMA helpers should set this as their DRM driver's
+ * ->gem_prime_import_sg_table() callback.
+ *
+ * Returns:
+ * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
+ * error code on failure.
+ */
struct drm_gem_object *
drm_gem_cma_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach,
@@ -339,6 +473,18 @@ drm_gem_cma_prime_import_sg_table(struct drm_device *dev,
}
EXPORT_SYMBOL_GPL(drm_gem_cma_prime_import_sg_table);
+/**
+ * drm_gem_cma_prime_mmap - memory-map an exported CMA GEM object
+ * @obj: GEM object
+ * @vma: VMA for the area to be mapped
+ *
+ * This function maps a buffer imported via DRM PRIME into a userspace
+ * process's address space. Drivers that use the CMA helpers should set this
+ * as their DRM driver's ->gem_prime_mmap() callback.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
int drm_gem_cma_prime_mmap(struct drm_gem_object *obj,
struct vm_area_struct *vma)
{
@@ -357,6 +503,20 @@ int drm_gem_cma_prime_mmap(struct drm_gem_object *obj,
}
EXPORT_SYMBOL_GPL(drm_gem_cma_prime_mmap);
+/**
+ * drm_gem_cma_prime_vmap - map a CMA GEM object into the kernel's virtual
+ * address space
+ * @obj: GEM object
+ *
+ * This function maps a buffer exported via DRM PRIME into the kernel's
+ * virtual address space. Since the CMA buffers are already mapped into the
+ * kernel virtual address space this simply returns the cached virtual
+ * address. Drivers using the CMA helpers should set this as their DRM
+ * driver's ->gem_prime_vmap() callback.
+ *
+ * Returns:
+ * The kernel virtual address of the CMA GEM object's backing store.
+ */
void *drm_gem_cma_prime_vmap(struct drm_gem_object *obj)
{
struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
@@ -365,6 +525,17 @@ void *drm_gem_cma_prime_vmap(struct drm_gem_object *obj)
}
EXPORT_SYMBOL_GPL(drm_gem_cma_prime_vmap);
+/**
+ * drm_gem_cma_prime_vunmap - unmap a CMA GEM object from the kernel's virtual
+ * address space
+ * @obj: GEM object
+ * @vaddr: kernel virtual address where the CMA GEM object was mapped
+ *
+ * This function removes a buffer exported via DRM PRIME from the kernel's
+ * virtual address space. This is a no-op because CMA buffers cannot be
+ * unmapped from kernel space. Drivers using the CMA helpers should set this
+ * as their DRM driver's ->gem_prime_vunmap() callback.
+ */
void drm_gem_cma_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
{
/* Nothing to do */
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 5ef03c216a2..f5a5f18efa5 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -166,7 +166,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
/*
- * If the vblank interrupt was already disbled update the count
+ * If the vblank interrupt was already disabled update the count
* and timestamp to maintain the appearance that the counter
* has been ticking all along until this time. This makes the
* count account for the entire time between drm_vblank_on() and
@@ -1029,7 +1029,8 @@ void drm_vblank_put(struct drm_device *dev, int crtc)
{
struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
- BUG_ON(atomic_read(&vblank->refcount) == 0);
+ if (WARN_ON(atomic_read(&vblank->refcount) == 0))
+ return;
if (WARN_ON(crtc >= dev->num_crtcs))
return;
@@ -1190,7 +1191,7 @@ EXPORT_SYMBOL(drm_crtc_vblank_off);
*
* This functions restores the vblank interrupt state captured with
* drm_vblank_off() again. Note that calls to drm_vblank_on() and
- * drm_vblank_off() can be unbalanced and so can also be unconditionaly called
+ * drm_vblank_off() can be unbalanced and so can also be unconditionally called
* in driver load code to reflect the current hardware state of the crtc.
*
* This is the legacy version of drm_crtc_vblank_on().
@@ -1237,7 +1238,7 @@ EXPORT_SYMBOL(drm_vblank_on);
*
* This functions restores the vblank interrupt state captured with
* drm_vblank_off() again. Note that calls to drm_vblank_on() and
- * drm_vblank_off() can be unbalanced and so can also be unconditionaly called
+ * drm_vblank_off() can be unbalanced and so can also be unconditionally called
* in driver load code to reflect the current hardware state of the crtc.
*
* This is the native kms version of drm_vblank_on().
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index eb6dfe52cab..c0644bb865f 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -35,6 +35,16 @@
#include <video/mipi_display.h>
+/**
+ * DOC: dsi helpers
+ *
+ * These functions contain some common logic and helpers to deal with MIPI DSI
+ * peripherals.
+ *
+ * Helpers are provided for a number of standard MIPI DSI command as well as a
+ * subset of the MIPI DCS command set.
+ */
+
static int mipi_dsi_device_match(struct device *dev, struct device_driver *drv)
{
return of_driver_match_device(dev, drv);
@@ -57,6 +67,29 @@ static struct bus_type mipi_dsi_bus_type = {
.pm = &mipi_dsi_device_pm_ops,
};
+static int of_device_match(struct device *dev, void *data)
+{
+ return dev->of_node == data;
+}
+
+/**
+ * of_find_mipi_dsi_device_by_node() - find the MIPI DSI device matching a
+ * device tree node
+ * @np: device tree node
+ *
+ * Return: A pointer to the MIPI DSI device corresponding to @np or NULL if no
+ * such device exists (or has not been registered yet).
+ */
+struct mipi_dsi_device *of_find_mipi_dsi_device_by_node(struct device_node *np)
+{
+ struct device *dev;
+
+ dev = bus_find_device(&mipi_dsi_bus_type, NULL, np, of_device_match);
+
+ return dev ? to_mipi_dsi_device(dev) : NULL;
+}
+EXPORT_SYMBOL(of_find_mipi_dsi_device_by_node);
+
static void mipi_dsi_dev_release(struct device *dev)
{
struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev);
@@ -198,59 +231,351 @@ int mipi_dsi_detach(struct mipi_dsi_device *dsi)
}
EXPORT_SYMBOL(mipi_dsi_detach);
+static ssize_t mipi_dsi_device_transfer(struct mipi_dsi_device *dsi,
+ struct mipi_dsi_msg *msg)
+{
+ const struct mipi_dsi_host_ops *ops = dsi->host->ops;
+
+ if (!ops || !ops->transfer)
+ return -ENOSYS;
+
+ if (dsi->mode_flags & MIPI_DSI_MODE_LPM)
+ msg->flags |= MIPI_DSI_MSG_USE_LPM;
+
+ return ops->transfer(dsi->host, msg);
+}
+
/**
- * mipi_dsi_dcs_write - send DCS write command
- * @dsi: DSI device
- * @data: pointer to the command followed by parameters
- * @len: length of @data
+ * mipi_dsi_packet_format_is_short - check if a packet is of the short format
+ * @type: MIPI DSI data type of the packet
+ *
+ * Return: true if the packet for the given data type is a short packet, false
+ * otherwise.
*/
-ssize_t mipi_dsi_dcs_write(struct mipi_dsi_device *dsi, const void *data,
- size_t len)
+bool mipi_dsi_packet_format_is_short(u8 type)
+{
+ switch (type) {
+ case MIPI_DSI_V_SYNC_START:
+ case MIPI_DSI_V_SYNC_END:
+ case MIPI_DSI_H_SYNC_START:
+ case MIPI_DSI_H_SYNC_END:
+ case MIPI_DSI_END_OF_TRANSMISSION:
+ case MIPI_DSI_COLOR_MODE_OFF:
+ case MIPI_DSI_COLOR_MODE_ON:
+ case MIPI_DSI_SHUTDOWN_PERIPHERAL:
+ case MIPI_DSI_TURN_ON_PERIPHERAL:
+ case MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM:
+ case MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM:
+ case MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM:
+ case MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM:
+ case MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM:
+ case MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM:
+ case MIPI_DSI_DCS_SHORT_WRITE:
+ case MIPI_DSI_DCS_SHORT_WRITE_PARAM:
+ case MIPI_DSI_DCS_READ:
+ case MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE:
+ return true;
+ }
+
+ return false;
+}
+EXPORT_SYMBOL(mipi_dsi_packet_format_is_short);
+
+/**
+ * mipi_dsi_packet_format_is_long - check if a packet is of the long format
+ * @type: MIPI DSI data type of the packet
+ *
+ * Return: true if the packet for the given data type is a long packet, false
+ * otherwise.
+ */
+bool mipi_dsi_packet_format_is_long(u8 type)
+{
+ switch (type) {
+ case MIPI_DSI_NULL_PACKET:
+ case MIPI_DSI_BLANKING_PACKET:
+ case MIPI_DSI_GENERIC_LONG_WRITE:
+ case MIPI_DSI_DCS_LONG_WRITE:
+ case MIPI_DSI_LOOSELY_PACKED_PIXEL_STREAM_YCBCR20:
+ case MIPI_DSI_PACKED_PIXEL_STREAM_YCBCR24:
+ case MIPI_DSI_PACKED_PIXEL_STREAM_YCBCR16:
+ case MIPI_DSI_PACKED_PIXEL_STREAM_30:
+ case MIPI_DSI_PACKED_PIXEL_STREAM_36:
+ case MIPI_DSI_PACKED_PIXEL_STREAM_YCBCR12:
+ case MIPI_DSI_PACKED_PIXEL_STREAM_16:
+ case MIPI_DSI_PACKED_PIXEL_STREAM_18:
+ case MIPI_DSI_PIXEL_STREAM_3BYTE_18:
+ case MIPI_DSI_PACKED_PIXEL_STREAM_24:
+ return true;
+ }
+
+ return false;
+}
+EXPORT_SYMBOL(mipi_dsi_packet_format_is_long);
+
+/**
+ * mipi_dsi_create_packet - create a packet from a message according to the
+ * DSI protocol
+ * @packet: pointer to a DSI packet structure
+ * @msg: message to translate into a packet
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int mipi_dsi_create_packet(struct mipi_dsi_packet *packet,
+ const struct mipi_dsi_msg *msg)
+{
+ const u8 *tx = msg->tx_buf;
+
+ if (!packet || !msg)
+ return -EINVAL;
+
+ /* do some minimum sanity checking */
+ if (!mipi_dsi_packet_format_is_short(msg->type) &&
+ !mipi_dsi_packet_format_is_long(msg->type))
+ return -EINVAL;
+
+ if (msg->channel > 3)
+ return -EINVAL;
+
+ memset(packet, 0, sizeof(*packet));
+ packet->header[0] = ((msg->channel & 0x3) << 6) | (msg->type & 0x3f);
+
+ /* TODO: compute ECC if hardware support is not available */
+
+ /*
+ * Long write packets contain the word count in header bytes 1 and 2.
+ * The payload follows the header and is word count bytes long.
+ *
+ * Short write packets encode up to two parameters in header bytes 1
+ * and 2.
+ */
+ if (mipi_dsi_packet_format_is_long(msg->type)) {
+ packet->header[1] = (msg->tx_len >> 0) & 0xff;
+ packet->header[2] = (msg->tx_len >> 8) & 0xff;
+
+ packet->payload_length = msg->tx_len;
+ packet->payload = tx;
+ } else {
+ packet->header[1] = (msg->tx_len > 0) ? tx[0] : 0;
+ packet->header[2] = (msg->tx_len > 1) ? tx[1] : 0;
+ }
+
+ packet->size = sizeof(packet->header) + packet->payload_length;
+
+ return 0;
+}
+EXPORT_SYMBOL(mipi_dsi_create_packet);
+
+/*
+ * mipi_dsi_set_maximum_return_packet_size() - specify the maximum size of the
+ * the payload in a long packet transmitted from the peripheral back to the
+ * host processor
+ * @dsi: DSI peripheral device
+ * @value: the maximum size of the payload
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int mipi_dsi_set_maximum_return_packet_size(struct mipi_dsi_device *dsi,
+ u16 value)
+{
+ u8 tx[2] = { value & 0xff, value >> 8 };
+ struct mipi_dsi_msg msg = {
+ .channel = dsi->channel,
+ .type = MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE,
+ .tx_len = sizeof(tx),
+ .tx_buf = tx,
+ };
+
+ return mipi_dsi_device_transfer(dsi, &msg);
+}
+EXPORT_SYMBOL(mipi_dsi_set_maximum_return_packet_size);
+
+/**
+ * mipi_dsi_generic_write() - transmit data using a generic write packet
+ * @dsi: DSI peripheral device
+ * @payload: buffer containing the payload
+ * @size: size of payload buffer
+ *
+ * This function will automatically choose the right data type depending on
+ * the payload length.
+ *
+ * Return: The number of bytes transmitted on success or a negative error code
+ * on failure.
+ */
+ssize_t mipi_dsi_generic_write(struct mipi_dsi_device *dsi, const void *payload,
+ size_t size)
+{
+ struct mipi_dsi_msg msg = {
+ .channel = dsi->channel,
+ .tx_buf = payload,
+ .tx_len = size
+ };
+
+ switch (size) {
+ case 0:
+ msg.type = MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM;
+ break;
+
+ case 1:
+ msg.type = MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM;
+ break;
+
+ case 2:
+ msg.type = MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM;
+ break;
+
+ default:
+ msg.type = MIPI_DSI_GENERIC_LONG_WRITE;
+ break;
+ }
+
+ return mipi_dsi_device_transfer(dsi, &msg);
+}
+EXPORT_SYMBOL(mipi_dsi_generic_write);
+
+/**
+ * mipi_dsi_generic_read() - receive data using a generic read packet
+ * @dsi: DSI peripheral device
+ * @params: buffer containing the request parameters
+ * @num_params: number of request parameters
+ * @data: buffer in which to return the received data
+ * @size: size of receive buffer
+ *
+ * This function will automatically choose the right data type depending on
+ * the number of parameters passed in.
+ *
+ * Return: The number of bytes successfully read or a negative error code on
+ * failure.
+ */
+ssize_t mipi_dsi_generic_read(struct mipi_dsi_device *dsi, const void *params,
+ size_t num_params, void *data, size_t size)
+{
+ struct mipi_dsi_msg msg = {
+ .channel = dsi->channel,
+ .tx_len = num_params,
+ .tx_buf = params,
+ .rx_len = size,
+ .rx_buf = data
+ };
+
+ switch (num_params) {
+ case 0:
+ msg.type = MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM;
+ break;
+
+ case 1:
+ msg.type = MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM;
+ break;
+
+ case 2:
+ msg.type = MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return mipi_dsi_device_transfer(dsi, &msg);
+}
+EXPORT_SYMBOL(mipi_dsi_generic_read);
+
+/**
+ * mipi_dsi_dcs_write_buffer() - transmit a DCS command with payload
+ * @dsi: DSI peripheral device
+ * @data: buffer containing data to be transmitted
+ * @len: size of transmission buffer
+ *
+ * This function will automatically choose the right data type depending on
+ * the command payload length.
+ *
+ * Return: The number of bytes successfully transmitted or a negative error
+ * code on failure.
+ */
+ssize_t mipi_dsi_dcs_write_buffer(struct mipi_dsi_device *dsi,
+ const void *data, size_t len)
{
- const struct mipi_dsi_host_ops *ops = dsi->host->ops;
struct mipi_dsi_msg msg = {
.channel = dsi->channel,
.tx_buf = data,
.tx_len = len
};
- if (!ops || !ops->transfer)
- return -ENOSYS;
-
switch (len) {
case 0:
return -EINVAL;
+
case 1:
msg.type = MIPI_DSI_DCS_SHORT_WRITE;
break;
+
case 2:
msg.type = MIPI_DSI_DCS_SHORT_WRITE_PARAM;
break;
+
default:
msg.type = MIPI_DSI_DCS_LONG_WRITE;
break;
}
- if (dsi->mode_flags & MIPI_DSI_MODE_LPM)
- msg.flags = MIPI_DSI_MSG_USE_LPM;
+ return mipi_dsi_device_transfer(dsi, &msg);
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_write_buffer);
- return ops->transfer(dsi->host, &msg);
+/**
+ * mipi_dsi_dcs_write() - send DCS write command
+ * @dsi: DSI peripheral device
+ * @cmd: DCS command
+ * @data: buffer containing the command payload
+ * @len: command payload length
+ *
+ * This function will automatically choose the right data type depending on
+ * the command payload length.
+ *
+ * Return: The number of bytes successfully transmitted or a negative error
+ * code on failure.
+ */
+ssize_t mipi_dsi_dcs_write(struct mipi_dsi_device *dsi, u8 cmd,
+ const void *data, size_t len)
+{
+ ssize_t err;
+ size_t size;
+ u8 *tx;
+
+ if (len > 0) {
+ size = 1 + len;
+
+ tx = kmalloc(size, GFP_KERNEL);
+ if (!tx)
+ return -ENOMEM;
+
+ /* concatenate the DCS command byte and the payload */
+ tx[0] = cmd;
+ memcpy(&tx[1], data, len);
+ } else {
+ tx = &cmd;
+ size = 1;
+ }
+
+ err = mipi_dsi_dcs_write_buffer(dsi, tx, size);
+
+ if (len > 0)
+ kfree(tx);
+
+ return err;
}
EXPORT_SYMBOL(mipi_dsi_dcs_write);
/**
- * mipi_dsi_dcs_read - send DCS read request command
- * @dsi: DSI device
- * @cmd: DCS read command
- * @data: pointer to read buffer
- * @len: length of @data
+ * mipi_dsi_dcs_read() - send DCS read request command
+ * @dsi: DSI peripheral device
+ * @cmd: DCS command
+ * @data: buffer in which to receive data
+ * @len: size of receive buffer
*
- * Function returns number of read bytes or error code.
+ * Return: The number of bytes read or a negative error code on failure.
*/
ssize_t mipi_dsi_dcs_read(struct mipi_dsi_device *dsi, u8 cmd, void *data,
size_t len)
{
- const struct mipi_dsi_host_ops *ops = dsi->host->ops;
struct mipi_dsi_msg msg = {
.channel = dsi->channel,
.type = MIPI_DSI_DCS_READ,
@@ -260,15 +585,282 @@ ssize_t mipi_dsi_dcs_read(struct mipi_dsi_device *dsi, u8 cmd, void *data,
.rx_len = len
};
- if (!ops || !ops->transfer)
- return -ENOSYS;
+ return mipi_dsi_device_transfer(dsi, &msg);
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_read);
- if (dsi->mode_flags & MIPI_DSI_MODE_LPM)
- msg.flags = MIPI_DSI_MSG_USE_LPM;
+/**
+ * mipi_dsi_dcs_nop() - send DCS nop packet
+ * @dsi: DSI peripheral device
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int mipi_dsi_dcs_nop(struct mipi_dsi_device *dsi)
+{
+ ssize_t err;
+
+ err = mipi_dsi_dcs_write(dsi, MIPI_DCS_NOP, NULL, 0);
+ if (err < 0)
+ return err;
- return ops->transfer(dsi->host, &msg);
+ return 0;
}
-EXPORT_SYMBOL(mipi_dsi_dcs_read);
+EXPORT_SYMBOL(mipi_dsi_dcs_nop);
+
+/**
+ * mipi_dsi_dcs_soft_reset() - perform a software reset of the display module
+ * @dsi: DSI peripheral device
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int mipi_dsi_dcs_soft_reset(struct mipi_dsi_device *dsi)
+{
+ ssize_t err;
+
+ err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SOFT_RESET, NULL, 0);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_soft_reset);
+
+/**
+ * mipi_dsi_dcs_get_power_mode() - query the display module's current power
+ * mode
+ * @dsi: DSI peripheral device
+ * @mode: return location for the current power mode
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int mipi_dsi_dcs_get_power_mode(struct mipi_dsi_device *dsi, u8 *mode)
+{
+ ssize_t err;
+
+ err = mipi_dsi_dcs_read(dsi, MIPI_DCS_GET_POWER_MODE, mode,
+ sizeof(*mode));
+ if (err <= 0) {
+ if (err == 0)
+ err = -ENODATA;
+
+ return err;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_get_power_mode);
+
+/**
+ * mipi_dsi_dcs_get_pixel_format() - gets the pixel format for the RGB image
+ * data used by the interface
+ * @dsi: DSI peripheral device
+ * @format: return location for the pixel format
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int mipi_dsi_dcs_get_pixel_format(struct mipi_dsi_device *dsi, u8 *format)
+{
+ ssize_t err;
+
+ err = mipi_dsi_dcs_read(dsi, MIPI_DCS_GET_PIXEL_FORMAT, format,
+ sizeof(*format));
+ if (err <= 0) {
+ if (err == 0)
+ err = -ENODATA;
+
+ return err;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_get_pixel_format);
+
+/**
+ * mipi_dsi_dcs_enter_sleep_mode() - disable all unnecessary blocks inside the
+ * display module except interface communication
+ * @dsi: DSI peripheral device
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int mipi_dsi_dcs_enter_sleep_mode(struct mipi_dsi_device *dsi)
+{
+ ssize_t err;
+
+ err = mipi_dsi_dcs_write(dsi, MIPI_DCS_ENTER_SLEEP_MODE, NULL, 0);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_enter_sleep_mode);
+
+/**
+ * mipi_dsi_dcs_exit_sleep_mode() - enable all blocks inside the display
+ * module
+ * @dsi: DSI peripheral device
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int mipi_dsi_dcs_exit_sleep_mode(struct mipi_dsi_device *dsi)
+{
+ ssize_t err;
+
+ err = mipi_dsi_dcs_write(dsi, MIPI_DCS_EXIT_SLEEP_MODE, NULL, 0);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_exit_sleep_mode);
+
+/**
+ * mipi_dsi_dcs_set_display_off() - stop displaying the image data on the
+ * display device
+ * @dsi: DSI peripheral device
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int mipi_dsi_dcs_set_display_off(struct mipi_dsi_device *dsi)
+{
+ ssize_t err;
+
+ err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_DISPLAY_OFF, NULL, 0);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_set_display_off);
+
+/**
+ * mipi_dsi_dcs_set_display_on() - start displaying the image data on the
+ * display device
+ * @dsi: DSI peripheral device
+ *
+ * Return: 0 on success or a negative error code on failure
+ */
+int mipi_dsi_dcs_set_display_on(struct mipi_dsi_device *dsi)
+{
+ ssize_t err;
+
+ err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_DISPLAY_ON, NULL, 0);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_set_display_on);
+
+/**
+ * mipi_dsi_dcs_set_column_address() - define the column extent of the frame
+ * memory accessed by the host processor
+ * @dsi: DSI peripheral device
+ * @start: first column of frame memory
+ * @end: last column of frame memory
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int mipi_dsi_dcs_set_column_address(struct mipi_dsi_device *dsi, u16 start,
+ u16 end)
+{
+ u8 payload[4] = { start >> 8, start & 0xff, end >> 8, end & 0xff };
+ ssize_t err;
+
+ err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_COLUMN_ADDRESS, payload,
+ sizeof(payload));
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_set_column_address);
+
+/**
+ * mipi_dsi_dcs_set_page_address() - define the page extent of the frame
+ * memory accessed by the host processor
+ * @dsi: DSI peripheral device
+ * @start: first page of frame memory
+ * @end: last page of frame memory
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int mipi_dsi_dcs_set_page_address(struct mipi_dsi_device *dsi, u16 start,
+ u16 end)
+{
+ u8 payload[4] = { start >> 8, start & 0xff, end >> 8, end & 0xff };
+ ssize_t err;
+
+ err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_PAGE_ADDRESS, payload,
+ sizeof(payload));
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_set_page_address);
+
+/**
+ * mipi_dsi_dcs_set_tear_off() - turn off the display module's Tearing Effect
+ * output signal on the TE signal line
+ * @dsi: DSI peripheral device
+ *
+ * Return: 0 on success or a negative error code on failure
+ */
+int mipi_dsi_dcs_set_tear_off(struct mipi_dsi_device *dsi)
+{
+ ssize_t err;
+
+ err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_TEAR_OFF, NULL, 0);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_set_tear_off);
+
+/**
+ * mipi_dsi_dcs_set_tear_on() - turn on the display module's Tearing Effect
+ * output signal on the TE signal line.
+ * @dsi: DSI peripheral device
+ * @mode: the Tearing Effect Output Line mode
+ *
+ * Return: 0 on success or a negative error code on failure
+ */
+int mipi_dsi_dcs_set_tear_on(struct mipi_dsi_device *dsi,
+ enum mipi_dsi_dcs_tear_mode mode)
+{
+ u8 value = mode;
+ ssize_t err;
+
+ err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_TEAR_ON, &value,
+ sizeof(value));
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_set_tear_on);
+
+/**
+ * mipi_dsi_dcs_set_pixel_format() - sets the pixel format for the RGB image
+ * data used by the interface
+ * @dsi: DSI peripheral device
+ * @format: pixel format
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int mipi_dsi_dcs_set_pixel_format(struct mipi_dsi_device *dsi, u8 format)
+{
+ ssize_t err;
+
+ err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_PIXEL_FORMAT, &format,
+ sizeof(format));
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_set_pixel_format);
static int mipi_dsi_drv_probe(struct device *dev)
{
@@ -295,12 +887,18 @@ static void mipi_dsi_drv_shutdown(struct device *dev)
}
/**
- * mipi_dsi_driver_register - register a driver for DSI devices
+ * mipi_dsi_driver_register_full() - register a driver for DSI devices
* @drv: DSI driver structure
+ * @owner: owner module
+ *
+ * Return: 0 on success or a negative error code on failure.
*/
-int mipi_dsi_driver_register(struct mipi_dsi_driver *drv)
+int mipi_dsi_driver_register_full(struct mipi_dsi_driver *drv,
+ struct module *owner)
{
drv->driver.bus = &mipi_dsi_bus_type;
+ drv->driver.owner = owner;
+
if (drv->probe)
drv->driver.probe = mipi_dsi_drv_probe;
if (drv->remove)
@@ -310,11 +908,13 @@ int mipi_dsi_driver_register(struct mipi_dsi_driver *drv)
return driver_register(&drv->driver);
}
-EXPORT_SYMBOL(mipi_dsi_driver_register);
+EXPORT_SYMBOL(mipi_dsi_driver_register_full);
/**
- * mipi_dsi_driver_unregister - unregister a driver for DSI devices
+ * mipi_dsi_driver_unregister() - unregister a driver for DSI devices
* @drv: DSI driver structure
+ *
+ * Return: 0 on success or a negative error code on failure.
*/
void mipi_dsi_driver_unregister(struct mipi_dsi_driver *drv)
{
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index d1b7d200652..6d8b941c820 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -914,7 +914,7 @@ EXPORT_SYMBOL(drm_mode_equal_no_clocks_no_stereo);
*
* This function is a helper which can be used to validate modes against size
* limitations of the DRM device/connector. If a mode is too big its status
- * memeber is updated with the appropriate validation failure code. The list
+ * member is updated with the appropriate validation failure code. The list
* itself is not changed.
*/
void drm_mode_validate_size(struct drm_device *dev,
diff --git a/drivers/gpu/drm/drm_modeset_lock.c b/drivers/gpu/drm/drm_modeset_lock.c
index 474e4d12a2d..51cc47d827d 100644
--- a/drivers/gpu/drm/drm_modeset_lock.c
+++ b/drivers/gpu/drm/drm_modeset_lock.c
@@ -157,14 +157,20 @@ void drm_modeset_unlock_all(struct drm_device *dev)
EXPORT_SYMBOL(drm_modeset_unlock_all);
/**
- * drm_modeset_lock_crtc - lock crtc with hidden acquire ctx
- * @crtc: drm crtc
+ * drm_modeset_lock_crtc - lock crtc with hidden acquire ctx for a plane update
+ * @crtc: DRM CRTC
+ * @plane: DRM plane to be updated on @crtc
+ *
+ * This function locks the given crtc and plane (which should be either the
+ * primary or cursor plane) using a hidden acquire context. This is necessary so
+ * that drivers internally using the atomic interfaces can grab further locks
+ * with the lock acquire context.
*
- * This function locks the given crtc using a hidden acquire context. This is
- * necessary so that drivers internally using the atomic interfaces can grab
- * further locks with the lock acquire context.
+ * Note that @plane can be NULL, e.g. when the cursor support hasn't yet been
+ * converted to universal planes yet.
*/
-void drm_modeset_lock_crtc(struct drm_crtc *crtc)
+void drm_modeset_lock_crtc(struct drm_crtc *crtc,
+ struct drm_plane *plane)
{
struct drm_modeset_acquire_ctx *ctx;
int ret;
@@ -180,6 +186,18 @@ retry:
if (ret)
goto fail;
+ if (plane) {
+ ret = drm_modeset_lock(&plane->mutex, ctx);
+ if (ret)
+ goto fail;
+
+ if (plane->crtc) {
+ ret = drm_modeset_lock(&plane->crtc->mutex, ctx);
+ if (ret)
+ goto fail;
+ }
+ }
+
WARN_ON(crtc->acquire_ctx);
/* now we hold the locks, so now that it is safe, stash the
@@ -437,15 +455,14 @@ void drm_modeset_unlock(struct drm_modeset_lock *lock)
}
EXPORT_SYMBOL(drm_modeset_unlock);
-/* Temporary.. until we have sufficiently fine grained locking, there
- * are a couple scenarios where it is convenient to grab all crtc locks.
- * It is planned to remove this:
- */
+/* In some legacy codepaths it's convenient to just grab all the crtc and plane
+ * related locks. */
int drm_modeset_lock_all_crtcs(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx)
{
struct drm_mode_config *config = &dev->mode_config;
struct drm_crtc *crtc;
+ struct drm_plane *plane;
int ret = 0;
list_for_each_entry(crtc, &config->crtc_list, head) {
@@ -454,6 +471,12 @@ int drm_modeset_lock_all_crtcs(struct drm_device *dev,
return ret;
}
+ list_for_each_entry(plane, &config->plane_list, head) {
+ ret = drm_modeset_lock(&plane->mutex, ctx);
+ if (ret)
+ return ret;
+ }
+
return 0;
}
EXPORT_SYMBOL(drm_modeset_lock_all_crtcs);
diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c
index 827ec1a3040..18a1ac6ac22 100644
--- a/drivers/gpu/drm/drm_plane_helper.c
+++ b/drivers/gpu/drm/drm_plane_helper.c
@@ -27,10 +27,38 @@
#include <drm/drmP.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_rect.h>
-#include <drm/drm_plane_helper.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_atomic_helper.h>
#define SUBPIXEL_MASK 0xffff
+/**
+ * DOC: overview
+ *
+ * This helper library has two parts. The first part has support to implement
+ * primary plane support on top of the normal CRTC configuration interface.
+ * Since the legacy ->set_config interface ties the primary plane together with
+ * the CRTC state this does not allow userspace to disable the primary plane
+ * itself. To avoid too much duplicated code use
+ * drm_plane_helper_check_update() which can be used to enforce the same
+ * restrictions as primary planes had thus. The default primary plane only
+ * expose XRBG8888 and ARGB8888 as valid pixel formats for the attached
+ * framebuffer.
+ *
+ * Drivers are highly recommended to implement proper support for primary
+ * planes, and newly merged drivers must not rely upon these transitional
+ * helpers.
+ *
+ * The second part also implements transitional helpers which allow drivers to
+ * gradually switch to the atomic helper infrastructure for plane updates. Once
+ * that switch is complete drivers shouldn't use these any longer, instead using
+ * the proper legacy implementations for update and disable plane hooks provided
+ * by the atomic helpers.
+ *
+ * Again drivers are strongly urged to switch to the new interfaces.
+ */
+
/*
* This is the minimal list of formats that seem to be safe for modeset use
* with all current DRM drivers. Most hardware can actually support more
@@ -127,6 +155,11 @@ int drm_plane_helper_check_update(struct drm_plane *plane,
return -ERANGE;
}
+ if (!fb) {
+ *visible = false;
+ return 0;
+ }
+
*visible = drm_rect_clip_scaled(src, dest, clip, hscale, vscale);
if (!*visible)
/*
@@ -369,3 +402,171 @@ int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
return drm_crtc_init_with_planes(dev, crtc, primary, NULL, funcs);
}
EXPORT_SYMBOL(drm_crtc_init);
+
+int drm_plane_helper_commit(struct drm_plane *plane,
+ struct drm_plane_state *plane_state,
+ struct drm_framebuffer *old_fb)
+{
+ struct drm_plane_helper_funcs *plane_funcs;
+ struct drm_crtc *crtc[2];
+ struct drm_crtc_helper_funcs *crtc_funcs[2];
+ int i, ret = 0;
+
+ plane_funcs = plane->helper_private;
+
+ /* Since this is a transitional helper we can't assume that plane->state
+ * is always valid. Hence we need to use plane->crtc instead of
+ * plane->state->crtc as the old crtc. */
+ crtc[0] = plane->crtc;
+ crtc[1] = crtc[0] != plane_state->crtc ? plane_state->crtc : NULL;
+
+ for (i = 0; i < 2; i++)
+ crtc_funcs[i] = crtc[i] ? crtc[i]->helper_private : NULL;
+
+ if (plane_funcs->atomic_check) {
+ ret = plane_funcs->atomic_check(plane, plane_state);
+ if (ret)
+ goto out;
+ }
+
+ if (plane_funcs->prepare_fb && plane_state->fb) {
+ ret = plane_funcs->prepare_fb(plane, plane_state->fb);
+ if (ret)
+ goto out;
+ }
+
+ /* Point of no return, commit sw state. */
+ swap(plane->state, plane_state);
+
+ for (i = 0; i < 2; i++) {
+ if (crtc_funcs[i] && crtc_funcs[i]->atomic_begin)
+ crtc_funcs[i]->atomic_begin(crtc[i]);
+ }
+
+ plane_funcs->atomic_update(plane, plane_state);
+
+ for (i = 0; i < 2; i++) {
+ if (crtc_funcs[i] && crtc_funcs[i]->atomic_flush)
+ crtc_funcs[i]->atomic_flush(crtc[i]);
+ }
+
+ for (i = 0; i < 2; i++) {
+ if (!crtc[i])
+ continue;
+
+ /* There's no other way to figure out whether the crtc is running. */
+ ret = drm_crtc_vblank_get(crtc[i]);
+ if (ret == 0) {
+ drm_crtc_wait_one_vblank(crtc[i]);
+ drm_crtc_vblank_put(crtc[i]);
+ }
+
+ ret = 0;
+ }
+
+ if (plane_funcs->cleanup_fb && old_fb)
+ plane_funcs->cleanup_fb(plane, old_fb);
+out:
+ if (plane_state) {
+ if (plane->funcs->atomic_destroy_state)
+ plane->funcs->atomic_destroy_state(plane, plane_state);
+ else
+ drm_atomic_helper_plane_destroy_state(plane, plane_state);
+ }
+
+ return ret;
+}
+
+/**
+ * drm_plane_helper_update() - Helper for primary plane update
+ * @plane: plane object to update
+ * @crtc: owning CRTC of owning plane
+ * @fb: framebuffer to flip onto plane
+ * @crtc_x: x offset of primary plane on crtc
+ * @crtc_y: y offset of primary plane on crtc
+ * @crtc_w: width of primary plane rectangle on crtc
+ * @crtc_h: height of primary plane rectangle on crtc
+ * @src_x: x offset of @fb for panning
+ * @src_y: y offset of @fb for panning
+ * @src_w: width of source rectangle in @fb
+ * @src_h: height of source rectangle in @fb
+ *
+ * Provides a default plane update handler using the atomic plane update
+ * functions. It is fully left to the driver to check plane constraints and
+ * handle corner-cases like a fully occluded or otherwise invisible plane.
+ *
+ * This is useful for piecewise transitioning of a driver to the atomic helpers.
+ *
+ * RETURNS:
+ * Zero on success, error code on failure
+ */
+int drm_plane_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct drm_plane_state *plane_state;
+
+ if (plane->funcs->atomic_duplicate_state)
+ plane_state = plane->funcs->atomic_duplicate_state(plane);
+ else if (plane->state)
+ plane_state = drm_atomic_helper_plane_duplicate_state(plane);
+ else
+ plane_state = kzalloc(sizeof(*plane_state), GFP_KERNEL);
+ if (!plane_state)
+ return -ENOMEM;
+
+ plane_state->crtc = crtc;
+ drm_atomic_set_fb_for_plane(plane_state, fb);
+ plane_state->crtc_x = crtc_x;
+ plane_state->crtc_y = crtc_y;
+ plane_state->crtc_h = crtc_h;
+ plane_state->crtc_w = crtc_w;
+ plane_state->src_x = src_x;
+ plane_state->src_y = src_y;
+ plane_state->src_h = src_h;
+ plane_state->src_w = src_w;
+
+ return drm_plane_helper_commit(plane, plane_state, plane->fb);
+}
+EXPORT_SYMBOL(drm_plane_helper_update);
+
+/**
+ * drm_plane_helper_disable() - Helper for primary plane disable
+ * @plane: plane to disable
+ *
+ * Provides a default plane disable handler using the atomic plane update
+ * functions. It is fully left to the driver to check plane constraints and
+ * handle corner-cases like a fully occluded or otherwise invisible plane.
+ *
+ * This is useful for piecewise transitioning of a driver to the atomic helpers.
+ *
+ * RETURNS:
+ * Zero on success, error code on failure
+ */
+int drm_plane_helper_disable(struct drm_plane *plane)
+{
+ struct drm_plane_state *plane_state;
+
+ /* crtc helpers love to call disable functions for already disabled hw
+ * functions. So cope with that. */
+ if (!plane->crtc)
+ return 0;
+
+ if (plane->funcs->atomic_duplicate_state)
+ plane_state = plane->funcs->atomic_duplicate_state(plane);
+ else if (plane->state)
+ plane_state = drm_atomic_helper_plane_duplicate_state(plane);
+ else
+ plane_state = kzalloc(sizeof(*plane_state), GFP_KERNEL);
+ if (!plane_state)
+ return -ENOMEM;
+
+ plane_state->crtc = NULL;
+ drm_atomic_set_fb_for_plane(plane_state, NULL);
+
+ return drm_plane_helper_commit(plane, plane_state, plane->fb);
+}
+EXPORT_SYMBOL(drm_plane_helper_disable);
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 78ca3080842..7482b06cd08 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -328,7 +328,7 @@ static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
*/
/**
- * drm_gem_prime_export - helper library implemention of the export callback
+ * drm_gem_prime_export - helper library implementation of the export callback
* @dev: drm_device to export from
* @obj: GEM object to export
* @flags: flags like DRM_CLOEXEC
@@ -483,7 +483,7 @@ out_unlock:
EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
/**
- * drm_gem_prime_import - helper library implemention of the import callback
+ * drm_gem_prime_import - helper library implementation of the import callback
* @dev: drm_device to import into
* @dma_buf: dma-buf object to import
*
@@ -669,7 +669,7 @@ int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
* the driver is responsible for mapping the pages into the
* importers address space for use with dma_buf itself.
*/
-struct sg_table *drm_prime_pages_to_sg(struct page **pages, int nr_pages)
+struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages)
{
struct sg_table *sg = NULL;
int ret;
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index 6857e9ad633..7483a47de8e 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -118,7 +118,8 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect
mode->status = MODE_UNVERIFIED;
if (connector->force) {
- if (connector->force == DRM_FORCE_ON)
+ if (connector->force == DRM_FORCE_ON ||
+ connector->force == DRM_FORCE_ON_DIGITAL)
connector->status = connector_status_connected;
else
connector->status = connector_status_disconnected;
diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.c b/drivers/gpu/drm/exynos/exynos_dp_core.c
index 6adb1e5cfb0..34d46aa7541 100644
--- a/drivers/gpu/drm/exynos/exynos_dp_core.c
+++ b/drivers/gpu/drm/exynos/exynos_dp_core.c
@@ -30,12 +30,17 @@
#include <drm/drm_panel.h>
#include <drm/bridge/ptn3460.h>
-#include "exynos_drm_drv.h"
#include "exynos_dp_core.h"
#define ctx_from_connector(c) container_of(c, struct exynos_dp_device, \
connector)
+static inline struct exynos_dp_device *
+display_to_dp(struct exynos_drm_display *d)
+{
+ return container_of(d, struct exynos_dp_device, display);
+}
+
struct bridge_init {
struct i2c_client *client;
struct device_node *node;
@@ -882,7 +887,7 @@ static void exynos_dp_hotplug(struct work_struct *work)
static void exynos_dp_commit(struct exynos_drm_display *display)
{
- struct exynos_dp_device *dp = display->ctx;
+ struct exynos_dp_device *dp = display_to_dp(display);
int ret;
/* Keep the panel disabled while we configure video */
@@ -1020,7 +1025,7 @@ static int exynos_drm_attach_lcd_bridge(struct drm_device *dev,
static int exynos_dp_create_connector(struct exynos_drm_display *display,
struct drm_encoder *encoder)
{
- struct exynos_dp_device *dp = display->ctx;
+ struct exynos_dp_device *dp = display_to_dp(display);
struct drm_connector *connector = &dp->connector;
int ret;
@@ -1052,33 +1057,19 @@ static int exynos_dp_create_connector(struct exynos_drm_display *display,
static void exynos_dp_phy_init(struct exynos_dp_device *dp)
{
- if (dp->phy) {
+ if (dp->phy)
phy_power_on(dp->phy);
- } else if (dp->phy_addr) {
- u32 reg;
-
- reg = __raw_readl(dp->phy_addr);
- reg |= dp->enable_mask;
- __raw_writel(reg, dp->phy_addr);
- }
}
static void exynos_dp_phy_exit(struct exynos_dp_device *dp)
{
- if (dp->phy) {
+ if (dp->phy)
phy_power_off(dp->phy);
- } else if (dp->phy_addr) {
- u32 reg;
-
- reg = __raw_readl(dp->phy_addr);
- reg &= ~(dp->enable_mask);
- __raw_writel(reg, dp->phy_addr);
- }
}
static void exynos_dp_poweron(struct exynos_drm_display *display)
{
- struct exynos_dp_device *dp = display->ctx;
+ struct exynos_dp_device *dp = display_to_dp(display);
if (dp->dpms_mode == DRM_MODE_DPMS_ON)
return;
@@ -1099,7 +1090,7 @@ static void exynos_dp_poweron(struct exynos_drm_display *display)
static void exynos_dp_poweroff(struct exynos_drm_display *display)
{
- struct exynos_dp_device *dp = display->ctx;
+ struct exynos_dp_device *dp = display_to_dp(display);
if (dp->dpms_mode != DRM_MODE_DPMS_ON)
return;
@@ -1124,7 +1115,7 @@ static void exynos_dp_poweroff(struct exynos_drm_display *display)
static void exynos_dp_dpms(struct exynos_drm_display *display, int mode)
{
- struct exynos_dp_device *dp = display->ctx;
+ struct exynos_dp_device *dp = display_to_dp(display);
switch (mode) {
case DRM_MODE_DPMS_ON:
@@ -1147,11 +1138,6 @@ static struct exynos_drm_display_ops exynos_dp_display_ops = {
.commit = exynos_dp_commit,
};
-static struct exynos_drm_display exynos_dp_display = {
- .type = EXYNOS_DISPLAY_TYPE_LCD,
- .ops = &exynos_dp_display_ops,
-};
-
static struct video_info *exynos_dp_dt_parse_pdata(struct device *dev)
{
struct device_node *dp_node = dev->of_node;
@@ -1210,44 +1196,6 @@ static struct video_info *exynos_dp_dt_parse_pdata(struct device *dev)
return dp_video_config;
}
-static int exynos_dp_dt_parse_phydata(struct exynos_dp_device *dp)
-{
- struct device_node *dp_phy_node = of_node_get(dp->dev->of_node);
- u32 phy_base;
- int ret = 0;
-
- dp_phy_node = of_find_node_by_name(dp_phy_node, "dptx-phy");
- if (!dp_phy_node) {
- dp->phy = devm_phy_get(dp->dev, "dp");
- return PTR_ERR_OR_ZERO(dp->phy);
- }
-
- if (of_property_read_u32(dp_phy_node, "reg", &phy_base)) {
- dev_err(dp->dev, "failed to get reg for dptx-phy\n");
- ret = -EINVAL;
- goto err;
- }
-
- if (of_property_read_u32(dp_phy_node, "samsung,enable-mask",
- &dp->enable_mask)) {
- dev_err(dp->dev, "failed to get enable-mask for dptx-phy\n");
- ret = -EINVAL;
- goto err;
- }
-
- dp->phy_addr = ioremap(phy_base, SZ_4);
- if (!dp->phy_addr) {
- dev_err(dp->dev, "failed to ioremap dp-phy\n");
- ret = -ENOMEM;
- goto err;
- }
-
-err:
- of_node_put(dp_phy_node);
-
- return ret;
-}
-
static int exynos_dp_dt_parse_panel(struct exynos_dp_device *dp)
{
int ret;
@@ -1263,10 +1211,10 @@ static int exynos_dp_dt_parse_panel(struct exynos_dp_device *dp)
static int exynos_dp_bind(struct device *dev, struct device *master, void *data)
{
+ struct exynos_dp_device *dp = dev_get_drvdata(dev);
struct platform_device *pdev = to_platform_device(dev);
struct drm_device *drm_dev = data;
struct resource *res;
- struct exynos_dp_device *dp = exynos_dp_display.ctx;
unsigned int irq_flags;
int ret = 0;
@@ -1277,9 +1225,21 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data)
if (IS_ERR(dp->video_info))
return PTR_ERR(dp->video_info);
- ret = exynos_dp_dt_parse_phydata(dp);
- if (ret)
- return ret;
+ dp->phy = devm_phy_get(dp->dev, "dp");
+ if (IS_ERR(dp->phy)) {
+ dev_err(dp->dev, "no DP phy configured\n");
+ ret = PTR_ERR(dp->phy);
+ if (ret) {
+ /*
+ * phy itself is not enabled, so we can move forward
+ * assigning NULL to phy pointer.
+ */
+ if (ret == -ENOSYS || ret == -ENODEV)
+ dp->phy = NULL;
+ else
+ return ret;
+ }
+ }
if (!dp->panel) {
ret = exynos_dp_dt_parse_panel(dp);
@@ -1346,17 +1306,15 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data)
dp->drm_dev = drm_dev;
- platform_set_drvdata(pdev, &exynos_dp_display);
-
- return exynos_drm_create_enc_conn(drm_dev, &exynos_dp_display);
+ return exynos_drm_create_enc_conn(drm_dev, &dp->display);
}
static void exynos_dp_unbind(struct device *dev, struct device *master,
void *data)
{
- struct exynos_drm_display *display = dev_get_drvdata(dev);
+ struct exynos_dp_device *dp = dev_get_drvdata(dev);
- exynos_dp_dpms(display, DRM_MODE_DPMS_OFF);
+ exynos_dp_dpms(&dp->display, DRM_MODE_DPMS_OFF);
}
static const struct component_ops exynos_dp_ops = {
@@ -1371,16 +1329,20 @@ static int exynos_dp_probe(struct platform_device *pdev)
struct exynos_dp_device *dp;
int ret;
- ret = exynos_drm_component_add(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR,
- exynos_dp_display.type);
- if (ret)
- return ret;
-
dp = devm_kzalloc(&pdev->dev, sizeof(struct exynos_dp_device),
GFP_KERNEL);
if (!dp)
return -ENOMEM;
+ dp->display.type = EXYNOS_DISPLAY_TYPE_LCD;
+ dp->display.ops = &exynos_dp_display_ops;
+ platform_set_drvdata(pdev, dp);
+
+ ret = exynos_drm_component_add(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR,
+ dp->display.type);
+ if (ret)
+ return ret;
+
panel_node = of_parse_phandle(dev->of_node, "panel", 0);
if (panel_node) {
dp->panel = of_drm_find_panel(panel_node);
@@ -1389,8 +1351,6 @@ static int exynos_dp_probe(struct platform_device *pdev)
return -EPROBE_DEFER;
}
- exynos_dp_display.ctx = dp;
-
ret = component_add(&pdev->dev, &exynos_dp_ops);
if (ret)
exynos_drm_component_del(&pdev->dev,
@@ -1410,19 +1370,17 @@ static int exynos_dp_remove(struct platform_device *pdev)
#ifdef CONFIG_PM_SLEEP
static int exynos_dp_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct exynos_drm_display *display = platform_get_drvdata(pdev);
+ struct exynos_dp_device *dp = dev_get_drvdata(dev);
- exynos_dp_dpms(display, DRM_MODE_DPMS_OFF);
+ exynos_dp_dpms(&dp->display, DRM_MODE_DPMS_OFF);
return 0;
}
static int exynos_dp_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct exynos_drm_display *display = platform_get_drvdata(pdev);
+ struct exynos_dp_device *dp = dev_get_drvdata(dev);
- exynos_dp_dpms(display, DRM_MODE_DPMS_ON);
+ exynos_dp_dpms(&dp->display, DRM_MODE_DPMS_ON);
return 0;
}
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.h b/drivers/gpu/drm/exynos/exynos_dp_core.h
index a1aee6931bd..164f171168e 100644
--- a/drivers/gpu/drm/exynos/exynos_dp_core.h
+++ b/drivers/gpu/drm/exynos/exynos_dp_core.h
@@ -17,6 +17,8 @@
#include <drm/drm_dp_helper.h>
#include <drm/exynos_drm.h>
+#include "exynos_drm_drv.h"
+
#define DP_TIMEOUT_LOOP_COUNT 100
#define MAX_CR_LOOP 5
#define MAX_EQ_LOOP 5
@@ -145,6 +147,7 @@ struct link_train {
};
struct exynos_dp_device {
+ struct exynos_drm_display display;
struct device *dev;
struct drm_device *drm_dev;
struct drm_connector connector;
@@ -153,8 +156,6 @@ struct exynos_dp_device {
struct clk *clock;
unsigned int irq;
void __iomem *reg_base;
- void __iomem *phy_addr;
- unsigned int enable_mask;
struct video_info *video_info;
struct link_train link_train;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.h b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
index 690dcddab72..e353d353836 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
@@ -15,10 +15,7 @@
#ifndef _EXYNOS_DRM_CRTC_H_
#define _EXYNOS_DRM_CRTC_H_
-struct drm_device;
-struct drm_crtc;
-struct exynos_drm_manager;
-struct exynos_drm_overlay;
+#include "exynos_drm_drv.h"
int exynos_drm_crtc_create(struct exynos_drm_manager *manager);
int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int pipe);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dpi.c b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
index 3dc678ed994..37678cf4425 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dpi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
@@ -22,6 +22,7 @@
#include "exynos_drm_drv.h"
struct exynos_dpi {
+ struct exynos_drm_display display;
struct device *dev;
struct device_node *panel_node;
@@ -35,6 +36,11 @@ struct exynos_dpi {
#define connector_to_dpi(c) container_of(c, struct exynos_dpi, connector)
+static inline struct exynos_dpi *display_to_dpi(struct exynos_drm_display *d)
+{
+ return container_of(d, struct exynos_dpi, display);
+}
+
static enum drm_connector_status
exynos_dpi_detect(struct drm_connector *connector, bool force)
{
@@ -100,7 +106,7 @@ static struct drm_connector_helper_funcs exynos_dpi_connector_helper_funcs = {
static int exynos_dpi_create_connector(struct exynos_drm_display *display,
struct drm_encoder *encoder)
{
- struct exynos_dpi *ctx = display->ctx;
+ struct exynos_dpi *ctx = display_to_dpi(display);
struct drm_connector *connector = &ctx->connector;
int ret;
@@ -141,7 +147,7 @@ static void exynos_dpi_poweroff(struct exynos_dpi *ctx)
static void exynos_dpi_dpms(struct exynos_drm_display *display, int mode)
{
- struct exynos_dpi *ctx = display->ctx;
+ struct exynos_dpi *ctx = display_to_dpi(display);
switch (mode) {
case DRM_MODE_DPMS_ON:
@@ -165,11 +171,6 @@ static struct exynos_drm_display_ops exynos_dpi_display_ops = {
.dpms = exynos_dpi_dpms
};
-static struct exynos_drm_display exynos_dpi_display = {
- .type = EXYNOS_DISPLAY_TYPE_LCD,
- .ops = &exynos_dpi_display_ops,
-};
-
/* of_* functions will be removed after merge of of_graph patches */
static struct device_node *
of_get_child_by_name_reg(struct device_node *parent, const char *name, u32 reg)
@@ -299,20 +300,21 @@ struct exynos_drm_display *exynos_dpi_probe(struct device *dev)
struct exynos_dpi *ctx;
int ret;
- ret = exynos_drm_component_add(dev,
- EXYNOS_DEVICE_TYPE_CONNECTOR,
- exynos_dpi_display.type);
- if (ret)
- return ERR_PTR(ret);
-
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
- goto err_del_component;
+ return ERR_PTR(-ENOMEM);
+ ctx->display.type = EXYNOS_DISPLAY_TYPE_LCD;
+ ctx->display.ops = &exynos_dpi_display_ops;
ctx->dev = dev;
- exynos_dpi_display.ctx = ctx;
ctx->dpms_mode = DRM_MODE_DPMS_OFF;
+ ret = exynos_drm_component_add(dev,
+ EXYNOS_DEVICE_TYPE_CONNECTOR,
+ ctx->display.type);
+ if (ret)
+ return ERR_PTR(ret);
+
ret = exynos_dpi_parse_dt(ctx);
if (ret < 0) {
devm_kfree(dev, ctx);
@@ -328,7 +330,7 @@ struct exynos_drm_display *exynos_dpi_probe(struct device *dev)
}
}
- return &exynos_dpi_display;
+ return &ctx->display;
err_del_component:
exynos_drm_component_del(dev, EXYNOS_DEVICE_TYPE_CONNECTOR);
@@ -336,16 +338,16 @@ err_del_component:
return NULL;
}
-int exynos_dpi_remove(struct device *dev)
+int exynos_dpi_remove(struct exynos_drm_display *display)
{
- struct exynos_dpi *ctx = exynos_dpi_display.ctx;
+ struct exynos_dpi *ctx = display_to_dpi(display);
- exynos_dpi_dpms(&exynos_dpi_display, DRM_MODE_DPMS_OFF);
+ exynos_dpi_dpms(&ctx->display, DRM_MODE_DPMS_OFF);
if (ctx->panel)
drm_panel_detach(ctx->panel);
- exynos_drm_component_del(dev, EXYNOS_DEVICE_TYPE_CONNECTOR);
+ exynos_drm_component_del(ctx->dev, EXYNOS_DEVICE_TYPE_CONNECTOR);
return 0;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index e277d4f1281..121470a83d1 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -203,8 +203,6 @@ static int exynos_drm_resume(struct drm_device *dev)
}
drm_modeset_unlock_all(dev);
- drm_helper_resume_force_mode(dev);
-
return 0;
}
@@ -475,8 +473,6 @@ void exynos_drm_component_del(struct device *dev,
list_del(&cdev->list);
kfree(cdev);
}
-
- break;
}
mutex_unlock(&drm_component_lock);
@@ -556,182 +552,68 @@ static const struct component_master_ops exynos_drm_ops = {
.unbind = exynos_drm_unbind,
};
-static int exynos_drm_platform_probe(struct platform_device *pdev)
-{
- struct component_match *match;
- int ret;
-
- pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
- exynos_drm_driver.num_ioctls = ARRAY_SIZE(exynos_ioctls);
-
+static struct platform_driver *const exynos_drm_kms_drivers[] = {
#ifdef CONFIG_DRM_EXYNOS_FIMD
- ret = platform_driver_register(&fimd_driver);
- if (ret < 0)
- return ret;
+ &fimd_driver,
#endif
-
#ifdef CONFIG_DRM_EXYNOS_DP
- ret = platform_driver_register(&dp_driver);
- if (ret < 0)
- goto err_unregister_fimd_drv;
+ &dp_driver,
#endif
-
#ifdef CONFIG_DRM_EXYNOS_DSI
- ret = platform_driver_register(&dsi_driver);
- if (ret < 0)
- goto err_unregister_dp_drv;
+ &dsi_driver,
#endif
-
#ifdef CONFIG_DRM_EXYNOS_HDMI
- ret = platform_driver_register(&mixer_driver);
- if (ret < 0)
- goto err_unregister_dsi_drv;
- ret = platform_driver_register(&hdmi_driver);
- if (ret < 0)
- goto err_unregister_mixer_drv;
+ &mixer_driver,
+ &hdmi_driver,
#endif
+};
- match = exynos_drm_match_add(&pdev->dev);
- if (IS_ERR(match)) {
- ret = PTR_ERR(match);
- goto err_unregister_hdmi_drv;
- }
-
- ret = component_master_add_with_match(&pdev->dev, &exynos_drm_ops,
- match);
- if (ret < 0)
- goto err_unregister_hdmi_drv;
-
+static struct platform_driver *const exynos_drm_non_kms_drivers[] = {
#ifdef CONFIG_DRM_EXYNOS_G2D
- ret = platform_driver_register(&g2d_driver);
- if (ret < 0)
- goto err_del_component_master;
+ &g2d_driver,
#endif
-
#ifdef CONFIG_DRM_EXYNOS_FIMC
- ret = platform_driver_register(&fimc_driver);
- if (ret < 0)
- goto err_unregister_g2d_drv;
+ &fimc_driver,
#endif
-
#ifdef CONFIG_DRM_EXYNOS_ROTATOR
- ret = platform_driver_register(&rotator_driver);
- if (ret < 0)
- goto err_unregister_fimc_drv;
+ &rotator_driver,
#endif
-
#ifdef CONFIG_DRM_EXYNOS_GSC
- ret = platform_driver_register(&gsc_driver);
- if (ret < 0)
- goto err_unregister_rotator_drv;
-#endif
-
-#ifdef CONFIG_DRM_EXYNOS_IPP
- ret = platform_driver_register(&ipp_driver);
- if (ret < 0)
- goto err_unregister_gsc_drv;
-
- ret = exynos_platform_device_ipp_register();
- if (ret < 0)
- goto err_unregister_ipp_drv;
+ &gsc_driver,
#endif
-
- return ret;
-
#ifdef CONFIG_DRM_EXYNOS_IPP
-err_unregister_ipp_drv:
- platform_driver_unregister(&ipp_driver);
-err_unregister_gsc_drv:
-#endif
-
-#ifdef CONFIG_DRM_EXYNOS_GSC
- platform_driver_unregister(&gsc_driver);
-err_unregister_rotator_drv:
+ &ipp_driver,
#endif
+};
-#ifdef CONFIG_DRM_EXYNOS_ROTATOR
- platform_driver_unregister(&rotator_driver);
-err_unregister_fimc_drv:
-#endif
-
-#ifdef CONFIG_DRM_EXYNOS_FIMC
- platform_driver_unregister(&fimc_driver);
-err_unregister_g2d_drv:
-#endif
-
-#ifdef CONFIG_DRM_EXYNOS_G2D
- platform_driver_unregister(&g2d_driver);
-err_del_component_master:
-#endif
- component_master_del(&pdev->dev, &exynos_drm_ops);
-
-err_unregister_hdmi_drv:
-#ifdef CONFIG_DRM_EXYNOS_HDMI
- platform_driver_unregister(&hdmi_driver);
-err_unregister_mixer_drv:
- platform_driver_unregister(&mixer_driver);
-err_unregister_dsi_drv:
-#endif
+static int exynos_drm_platform_probe(struct platform_device *pdev)
+{
+ struct component_match *match;
-#ifdef CONFIG_DRM_EXYNOS_DSI
- platform_driver_unregister(&dsi_driver);
-err_unregister_dp_drv:
-#endif
+ pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ exynos_drm_driver.num_ioctls = ARRAY_SIZE(exynos_ioctls);
-#ifdef CONFIG_DRM_EXYNOS_DP
- platform_driver_unregister(&dp_driver);
-err_unregister_fimd_drv:
-#endif
+ match = exynos_drm_match_add(&pdev->dev);
+ if (IS_ERR(match)) {
+ return PTR_ERR(match);
+ }
-#ifdef CONFIG_DRM_EXYNOS_FIMD
- platform_driver_unregister(&fimd_driver);
-#endif
- return ret;
+ return component_master_add_with_match(&pdev->dev, &exynos_drm_ops,
+ match);
}
static int exynos_drm_platform_remove(struct platform_device *pdev)
{
-#ifdef CONFIG_DRM_EXYNOS_IPP
- exynos_platform_device_ipp_unregister();
- platform_driver_unregister(&ipp_driver);
-#endif
-
-#ifdef CONFIG_DRM_EXYNOS_GSC
- platform_driver_unregister(&gsc_driver);
-#endif
-
-#ifdef CONFIG_DRM_EXYNOS_ROTATOR
- platform_driver_unregister(&rotator_driver);
-#endif
-
-#ifdef CONFIG_DRM_EXYNOS_FIMC
- platform_driver_unregister(&fimc_driver);
-#endif
-
-#ifdef CONFIG_DRM_EXYNOS_G2D
- platform_driver_unregister(&g2d_driver);
-#endif
-
-#ifdef CONFIG_DRM_EXYNOS_HDMI
- platform_driver_unregister(&mixer_driver);
- platform_driver_unregister(&hdmi_driver);
-#endif
-
-#ifdef CONFIG_DRM_EXYNOS_FIMD
- platform_driver_unregister(&fimd_driver);
-#endif
-
-#ifdef CONFIG_DRM_EXYNOS_DSI
- platform_driver_unregister(&dsi_driver);
-#endif
-
-#ifdef CONFIG_DRM_EXYNOS_DP
- platform_driver_unregister(&dp_driver);
-#endif
component_master_del(&pdev->dev, &exynos_drm_ops);
return 0;
}
+static const char * const strings[] = {
+ "samsung,exynos3",
+ "samsung,exynos4",
+ "samsung,exynos5",
+};
+
static struct platform_driver exynos_drm_platform_driver = {
.probe = exynos_drm_platform_probe,
.remove = exynos_drm_platform_remove,
@@ -743,7 +625,25 @@ static struct platform_driver exynos_drm_platform_driver = {
static int exynos_drm_init(void)
{
- int ret;
+ bool is_exynos = false;
+ int ret, i, j;
+
+ /*
+ * Register device object only in case of Exynos SoC.
+ *
+ * Below codes resolves temporarily infinite loop issue incurred
+ * by Exynos drm driver when using multi-platform kernel.
+ * So these codes will be replaced with more generic way later.
+ */
+ for (i = 0; i < ARRAY_SIZE(strings); i++) {
+ if (of_machine_is_compatible(strings[i])) {
+ is_exynos = true;
+ break;
+ }
+ }
+
+ if (!is_exynos)
+ return -ENODEV;
/*
* Register device object only in case of Exynos SoC.
@@ -762,24 +662,50 @@ static int exynos_drm_init(void)
if (IS_ERR(exynos_drm_pdev))
return PTR_ERR(exynos_drm_pdev);
-#ifdef CONFIG_DRM_EXYNOS_VIDI
ret = exynos_drm_probe_vidi();
if (ret < 0)
goto err_unregister_pd;
+
+ for (i = 0; i < ARRAY_SIZE(exynos_drm_kms_drivers); ++i) {
+ ret = platform_driver_register(exynos_drm_kms_drivers[i]);
+ if (ret < 0)
+ goto err_unregister_kms_drivers;
+ }
+
+ for (j = 0; j < ARRAY_SIZE(exynos_drm_non_kms_drivers); ++j) {
+ ret = platform_driver_register(exynos_drm_non_kms_drivers[j]);
+ if (ret < 0)
+ goto err_unregister_non_kms_drivers;
+ }
+
+#ifdef CONFIG_DRM_EXYNOS_IPP
+ ret = exynos_platform_device_ipp_register();
+ if (ret < 0)
+ goto err_unregister_non_kms_drivers;
#endif
ret = platform_driver_register(&exynos_drm_platform_driver);
if (ret)
- goto err_remove_vidi;
+ goto err_unregister_resources;
return 0;
-err_remove_vidi:
-#ifdef CONFIG_DRM_EXYNOS_VIDI
+err_unregister_resources:
+#ifdef CONFIG_DRM_EXYNOS_IPP
+ exynos_platform_device_ipp_unregister();
+#endif
+
+err_unregister_non_kms_drivers:
+ while (--j >= 0)
+ platform_driver_unregister(exynos_drm_non_kms_drivers[j]);
+
+err_unregister_kms_drivers:
+ while (--i >= 0)
+ platform_driver_unregister(exynos_drm_kms_drivers[i]);
+
exynos_drm_remove_vidi();
err_unregister_pd:
-#endif
platform_device_unregister(exynos_drm_pdev);
return ret;
@@ -787,10 +713,22 @@ err_unregister_pd:
static void exynos_drm_exit(void)
{
+ int i;
+
+#ifdef CONFIG_DRM_EXYNOS_IPP
+ exynos_platform_device_ipp_unregister();
+#endif
+
+ for (i = ARRAY_SIZE(exynos_drm_non_kms_drivers) - 1; i >= 0; --i)
+ platform_driver_unregister(exynos_drm_non_kms_drivers[i]);
+
+ for (i = ARRAY_SIZE(exynos_drm_kms_drivers) - 1; i >= 0; --i)
+ platform_driver_unregister(exynos_drm_kms_drivers[i]);
+
platform_driver_unregister(&exynos_drm_platform_driver);
-#ifdef CONFIG_DRM_EXYNOS_VIDI
+
exynos_drm_remove_vidi();
-#endif
+
platform_device_unregister(exynos_drm_pdev);
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index d22e640f59a..2e5063488c5 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -15,6 +15,7 @@
#ifndef _EXYNOS_DRM_DRV_H_
#define _EXYNOS_DRM_DRV_H_
+#include <drm/drmP.h>
#include <linux/module.h>
#define MAX_CRTC 3
@@ -22,24 +23,6 @@
#define MAX_FB_BUFFER 4
#define DEFAULT_ZPOS -1
-#define _wait_for(COND, MS) ({ \
- unsigned long timeout__ = jiffies + msecs_to_jiffies(MS); \
- int ret__ = 0; \
- while (!(COND)) { \
- if (time_after(jiffies, timeout__)) { \
- ret__ = -ETIMEDOUT; \
- break; \
- } \
- } \
- ret__; \
-})
-
-#define wait_for(COND, MS) _wait_for(COND, MS)
-
-struct drm_device;
-struct exynos_drm_overlay;
-struct drm_connector;
-
/* This enumerates device type. */
enum exynos_drm_device_type {
EXYNOS_DEVICE_TYPE_NONE,
@@ -83,10 +66,10 @@ enum exynos_drm_output_type {
* @dma_addr: array of bus(accessed by dma) address to the memory region
* allocated for a overlay.
* @zpos: order of overlay layer(z position).
- * @default_win: a window to be enabled.
- * @color_key: color key on or off.
* @index_color: if using color key feature then this value would be used
* as index color.
+ * @default_win: a window to be enabled.
+ * @color_key: color key on or off.
* @local_path: in case of lcd type, local path mode on or off.
* @transparency: transparency on or off.
* @activated: activated or not.
@@ -114,19 +97,20 @@ struct exynos_drm_overlay {
uint32_t pixel_format;
dma_addr_t dma_addr[MAX_FB_BUFFER];
int zpos;
-
- bool default_win;
- bool color_key;
unsigned int index_color;
- bool local_path;
- bool transparency;
- bool activated;
+
+ bool default_win:1;
+ bool color_key:1;
+ bool local_path:1;
+ bool transparency:1;
+ bool activated:1;
};
/*
* Exynos DRM Display Structure.
* - this structure is common to analog tv, digital tv and lcd panel.
*
+ * @create_connector: initialize and register a new connector
* @remove: cleans up the display for removal
* @mode_fixup: fix mode data comparing to hw specific display mode.
* @mode_set: convert drm_display_mode to hw specific display mode and
@@ -168,7 +152,6 @@ struct exynos_drm_display {
struct drm_encoder *encoder;
struct drm_connector *connector;
struct exynos_drm_display_ops *ops;
- void *ctx;
};
/*
@@ -227,7 +210,6 @@ struct exynos_drm_manager {
struct drm_crtc *crtc;
int pipe;
struct exynos_drm_manager_ops *ops;
- void *ctx;
};
struct exynos_drm_g2d_private {
@@ -279,8 +261,6 @@ struct exynos_drm_private {
* @dev: pointer to device object for subdrv device driver.
* @drm_dev: pointer to drm_device and this pointer would be set
* when sub driver calls exynos_drm_subdrv_register().
- * @manager: subdrv has its own manager to control a hardware appropriately
- * and we can access a hardware drawing on this manager.
* @probe: this callback would be called by exynos drm driver after
* subdrv is registered to it.
* @remove: this callback is used to release resources created
@@ -312,45 +292,34 @@ int exynos_drm_device_subdrv_remove(struct drm_device *dev);
int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file);
void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file);
-/*
- * this function registers exynos drm hdmi platform device. It ensures only one
- * instance of the device is created.
- */
-int exynos_platform_device_hdmi_register(void);
-
-/*
- * this function unregisters exynos drm hdmi platform device if it exists.
- */
-void exynos_platform_device_hdmi_unregister(void);
-
-/*
- * this function registers exynos drm ipp platform device.
- */
+#ifdef CONFIG_DRM_EXYNOS_IPP
int exynos_platform_device_ipp_register(void);
-
-/*
- * this function unregisters exynos drm ipp platform device if it exists.
- */
void exynos_platform_device_ipp_unregister(void);
+#else
+static inline int exynos_platform_device_ipp_register(void) { return 0; }
+static inline void exynos_platform_device_ipp_unregister(void) {}
+#endif
+
#ifdef CONFIG_DRM_EXYNOS_DPI
struct exynos_drm_display * exynos_dpi_probe(struct device *dev);
-int exynos_dpi_remove(struct device *dev);
+int exynos_dpi_remove(struct exynos_drm_display *display);
#else
static inline struct exynos_drm_display *
exynos_dpi_probe(struct device *dev) { return NULL; }
-static inline int exynos_dpi_remove(struct device *dev) { return 0; }
+static inline int exynos_dpi_remove(struct exynos_drm_display *display)
+{
+ return 0;
+}
#endif
-/*
- * this function registers exynos drm vidi platform device/driver.
- */
+#ifdef CONFIG_DRM_EXYNOS_VIDI
int exynos_drm_probe_vidi(void);
-
-/*
- * this function unregister exynos drm vidi platform device/driver.
- */
void exynos_drm_remove_vidi(void);
+#else
+static inline int exynos_drm_probe_vidi(void) { return 0; }
+static inline void exynos_drm_remove_vidi(void) {}
+#endif
/* This function creates a encoder and a connector, and initializes them. */
int exynos_drm_create_enc_conn(struct drm_device *dev,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index acf7e9e39dc..05fe93dc57a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -268,9 +268,9 @@ struct exynos_dsi_driver_data {
};
struct exynos_dsi {
+ struct exynos_drm_display display;
struct mipi_dsi_host dsi_host;
struct drm_connector connector;
- struct drm_encoder *encoder;
struct device_node *panel_node;
struct drm_panel *panel;
struct device *dev;
@@ -304,6 +304,11 @@ struct exynos_dsi {
#define host_to_dsi(host) container_of(host, struct exynos_dsi, dsi_host)
#define connector_to_dsi(c) container_of(c, struct exynos_dsi, connector)
+static inline struct exynos_dsi *display_to_dsi(struct exynos_drm_display *d)
+{
+ return container_of(d, struct exynos_dsi, display);
+}
+
static struct exynos_dsi_driver_data exynos3_dsi_driver_data = {
.plltmr_reg = 0x50,
.has_freqband = 1,
@@ -316,6 +321,11 @@ static struct exynos_dsi_driver_data exynos4_dsi_driver_data = {
.has_clklane_stop = 1,
};
+static struct exynos_dsi_driver_data exynos4415_dsi_driver_data = {
+ .plltmr_reg = 0x58,
+ .has_clklane_stop = 1,
+};
+
static struct exynos_dsi_driver_data exynos5_dsi_driver_data = {
.plltmr_reg = 0x58,
};
@@ -325,6 +335,8 @@ static struct of_device_id exynos_dsi_of_match[] = {
.data = &exynos3_dsi_driver_data },
{ .compatible = "samsung,exynos4210-mipi-dsi",
.data = &exynos4_dsi_driver_data },
+ { .compatible = "samsung,exynos4415-mipi-dsi",
+ .data = &exynos4415_dsi_driver_data },
{ .compatible = "samsung,exynos5410-mipi-dsi",
.data = &exynos5_dsi_driver_data },
{ }
@@ -1104,7 +1116,7 @@ static irqreturn_t exynos_dsi_irq(int irq, void *dev_id)
static irqreturn_t exynos_dsi_te_irq_handler(int irq, void *dev_id)
{
struct exynos_dsi *dsi = (struct exynos_dsi *)dev_id;
- struct drm_encoder *encoder = dsi->encoder;
+ struct drm_encoder *encoder = dsi->display.encoder;
if (dsi->state & DSIM_STATE_ENABLED)
exynos_drm_crtc_te_handler(encoder->crtc);
@@ -1143,6 +1155,7 @@ static int exynos_dsi_init(struct exynos_dsi *dsi)
static int exynos_dsi_register_te_irq(struct exynos_dsi *dsi)
{
int ret;
+ int te_gpio_irq;
dsi->te_gpio = of_get_named_gpio(dsi->panel_node, "te-gpios", 0);
if (!gpio_is_valid(dsi->te_gpio)) {
@@ -1157,14 +1170,10 @@ static int exynos_dsi_register_te_irq(struct exynos_dsi *dsi)
goto out;
}
- /*
- * This TE GPIO IRQ should not be set to IRQ_NOAUTOEN, because panel
- * calls drm_panel_init() first then calls mipi_dsi_attach() in probe().
- * It means that te_gpio is invalid when exynos_dsi_enable_irq() is
- * called by drm_panel_init() before panel is attached.
- */
- ret = request_threaded_irq(gpio_to_irq(dsi->te_gpio),
- exynos_dsi_te_irq_handler, NULL,
+ te_gpio_irq = gpio_to_irq(dsi->te_gpio);
+
+ irq_set_status_flags(te_gpio_irq, IRQ_NOAUTOEN);
+ ret = request_threaded_irq(te_gpio_irq, exynos_dsi_te_irq_handler, NULL,
IRQF_TRIGGER_RISING, "TE", dsi);
if (ret) {
dev_err(dsi->dev, "request interrupt failed with %d\n", ret);
@@ -1195,9 +1204,6 @@ static int exynos_dsi_host_attach(struct mipi_dsi_host *host,
dsi->mode_flags = device->mode_flags;
dsi->panel_node = device->dev.of_node;
- if (dsi->connector.dev)
- drm_helper_hpd_irq_event(dsi->connector.dev);
-
/*
* This is a temporary solution and should be made by more generic way.
*
@@ -1211,6 +1217,9 @@ static int exynos_dsi_host_attach(struct mipi_dsi_host *host,
return ret;
}
+ if (dsi->connector.dev)
+ drm_helper_hpd_irq_event(dsi->connector.dev);
+
return 0;
}
@@ -1236,7 +1245,7 @@ static bool exynos_dsi_is_short_dsi_type(u8 type)
}
static ssize_t exynos_dsi_host_transfer(struct mipi_dsi_host *host,
- struct mipi_dsi_msg *msg)
+ const struct mipi_dsi_msg *msg)
{
struct exynos_dsi *dsi = host_to_dsi(host);
struct exynos_dsi_transfer xfer;
@@ -1369,16 +1378,17 @@ static int exynos_dsi_enable(struct exynos_dsi *dsi)
exynos_dsi_set_display_mode(dsi);
exynos_dsi_set_display_enable(dsi, true);
+ dsi->state |= DSIM_STATE_ENABLED;
+
ret = drm_panel_enable(dsi->panel);
if (ret < 0) {
+ dsi->state &= ~DSIM_STATE_ENABLED;
exynos_dsi_set_display_enable(dsi, false);
drm_panel_unprepare(dsi->panel);
exynos_dsi_poweroff(dsi);
return ret;
}
- dsi->state |= DSIM_STATE_ENABLED;
-
return 0;
}
@@ -1397,7 +1407,7 @@ static void exynos_dsi_disable(struct exynos_dsi *dsi)
static void exynos_dsi_dpms(struct exynos_drm_display *display, int mode)
{
- struct exynos_dsi *dsi = display->ctx;
+ struct exynos_dsi *dsi = display_to_dsi(display);
if (dsi->panel) {
switch (mode) {
@@ -1474,7 +1484,7 @@ exynos_dsi_best_encoder(struct drm_connector *connector)
{
struct exynos_dsi *dsi = connector_to_dsi(connector);
- return dsi->encoder;
+ return dsi->display.encoder;
}
static struct drm_connector_helper_funcs exynos_dsi_connector_helper_funcs = {
@@ -1486,12 +1496,10 @@ static struct drm_connector_helper_funcs exynos_dsi_connector_helper_funcs = {
static int exynos_dsi_create_connector(struct exynos_drm_display *display,
struct drm_encoder *encoder)
{
- struct exynos_dsi *dsi = display->ctx;
+ struct exynos_dsi *dsi = display_to_dsi(display);
struct drm_connector *connector = &dsi->connector;
int ret;
- dsi->encoder = encoder;
-
connector->polled = DRM_CONNECTOR_POLL_HPD;
ret = drm_connector_init(encoder->dev, connector,
@@ -1512,7 +1520,7 @@ static int exynos_dsi_create_connector(struct exynos_drm_display *display,
static void exynos_dsi_mode_set(struct exynos_drm_display *display,
struct drm_display_mode *mode)
{
- struct exynos_dsi *dsi = display->ctx;
+ struct exynos_dsi *dsi = display_to_dsi(display);
struct videomode *vm = &dsi->vm;
vm->hactive = mode->hdisplay;
@@ -1531,10 +1539,6 @@ static struct exynos_drm_display_ops exynos_dsi_display_ops = {
.dpms = exynos_dsi_dpms
};
-static struct exynos_drm_display exynos_dsi_display = {
- .type = EXYNOS_DISPLAY_TYPE_LCD,
- .ops = &exynos_dsi_display_ops,
-};
MODULE_DEVICE_TABLE(of, exynos_dsi_of_match);
/* of_* functions will be removed after merge of of_graph patches */
@@ -1640,28 +1644,28 @@ end:
static int exynos_dsi_bind(struct device *dev, struct device *master,
void *data)
{
+ struct exynos_drm_display *display = dev_get_drvdata(dev);
+ struct exynos_dsi *dsi = display_to_dsi(display);
struct drm_device *drm_dev = data;
- struct exynos_dsi *dsi;
int ret;
- ret = exynos_drm_create_enc_conn(drm_dev, &exynos_dsi_display);
+ ret = exynos_drm_create_enc_conn(drm_dev, display);
if (ret) {
DRM_ERROR("Encoder create [%d] failed with %d\n",
- exynos_dsi_display.type, ret);
+ display->type, ret);
return ret;
}
- dsi = exynos_dsi_display.ctx;
-
return mipi_dsi_host_register(&dsi->dsi_host);
}
static void exynos_dsi_unbind(struct device *dev, struct device *master,
void *data)
{
- struct exynos_dsi *dsi = exynos_dsi_display.ctx;
+ struct exynos_drm_display *display = dev_get_drvdata(dev);
+ struct exynos_dsi *dsi = display_to_dsi(display);
- exynos_dsi_dpms(&exynos_dsi_display, DRM_MODE_DPMS_OFF);
+ exynos_dsi_dpms(display, DRM_MODE_DPMS_OFF);
mipi_dsi_host_unregister(&dsi->dsi_host);
}
@@ -1673,22 +1677,23 @@ static const struct component_ops exynos_dsi_component_ops = {
static int exynos_dsi_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct resource *res;
struct exynos_dsi *dsi;
int ret;
- ret = exynos_drm_component_add(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR,
- exynos_dsi_display.type);
+ dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
+ if (!dsi)
+ return -ENOMEM;
+
+ dsi->display.type = EXYNOS_DISPLAY_TYPE_LCD;
+ dsi->display.ops = &exynos_dsi_display_ops;
+
+ ret = exynos_drm_component_add(dev, EXYNOS_DEVICE_TYPE_CONNECTOR,
+ dsi->display.type);
if (ret)
return ret;
- dsi = devm_kzalloc(&pdev->dev, sizeof(*dsi), GFP_KERNEL);
- if (!dsi) {
- dev_err(&pdev->dev, "failed to allocate dsi object.\n");
- ret = -ENOMEM;
- goto err_del_component;
- }
-
/* To be checked as invalid one */
dsi->te_gpio = -ENOENT;
@@ -1697,9 +1702,9 @@ static int exynos_dsi_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&dsi->transfer_list);
dsi->dsi_host.ops = &exynos_dsi_ops;
- dsi->dsi_host.dev = &pdev->dev;
+ dsi->dsi_host.dev = dev;
- dsi->dev = &pdev->dev;
+ dsi->dev = dev;
dsi->driver_data = exynos_dsi_get_driver_data(pdev);
ret = exynos_dsi_parse_dt(dsi);
@@ -1708,70 +1713,68 @@ static int exynos_dsi_probe(struct platform_device *pdev)
dsi->supplies[0].supply = "vddcore";
dsi->supplies[1].supply = "vddio";
- ret = devm_regulator_bulk_get(&pdev->dev, ARRAY_SIZE(dsi->supplies),
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(dsi->supplies),
dsi->supplies);
if (ret) {
- dev_info(&pdev->dev, "failed to get regulators: %d\n", ret);
+ dev_info(dev, "failed to get regulators: %d\n", ret);
return -EPROBE_DEFER;
}
- dsi->pll_clk = devm_clk_get(&pdev->dev, "pll_clk");
+ dsi->pll_clk = devm_clk_get(dev, "pll_clk");
if (IS_ERR(dsi->pll_clk)) {
- dev_info(&pdev->dev, "failed to get dsi pll input clock\n");
+ dev_info(dev, "failed to get dsi pll input clock\n");
ret = PTR_ERR(dsi->pll_clk);
goto err_del_component;
}
- dsi->bus_clk = devm_clk_get(&pdev->dev, "bus_clk");
+ dsi->bus_clk = devm_clk_get(dev, "bus_clk");
if (IS_ERR(dsi->bus_clk)) {
- dev_info(&pdev->dev, "failed to get dsi bus clock\n");
+ dev_info(dev, "failed to get dsi bus clock\n");
ret = PTR_ERR(dsi->bus_clk);
goto err_del_component;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dsi->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ dsi->reg_base = devm_ioremap_resource(dev, res);
if (IS_ERR(dsi->reg_base)) {
- dev_err(&pdev->dev, "failed to remap io region\n");
+ dev_err(dev, "failed to remap io region\n");
ret = PTR_ERR(dsi->reg_base);
goto err_del_component;
}
- dsi->phy = devm_phy_get(&pdev->dev, "dsim");
+ dsi->phy = devm_phy_get(dev, "dsim");
if (IS_ERR(dsi->phy)) {
- dev_info(&pdev->dev, "failed to get dsim phy\n");
+ dev_info(dev, "failed to get dsim phy\n");
ret = PTR_ERR(dsi->phy);
goto err_del_component;
}
dsi->irq = platform_get_irq(pdev, 0);
if (dsi->irq < 0) {
- dev_err(&pdev->dev, "failed to request dsi irq resource\n");
+ dev_err(dev, "failed to request dsi irq resource\n");
ret = dsi->irq;
goto err_del_component;
}
irq_set_status_flags(dsi->irq, IRQ_NOAUTOEN);
- ret = devm_request_threaded_irq(&pdev->dev, dsi->irq, NULL,
+ ret = devm_request_threaded_irq(dev, dsi->irq, NULL,
exynos_dsi_irq, IRQF_ONESHOT,
- dev_name(&pdev->dev), dsi);
+ dev_name(dev), dsi);
if (ret) {
- dev_err(&pdev->dev, "failed to request dsi irq\n");
+ dev_err(dev, "failed to request dsi irq\n");
goto err_del_component;
}
- exynos_dsi_display.ctx = dsi;
-
- platform_set_drvdata(pdev, &exynos_dsi_display);
+ platform_set_drvdata(pdev, &dsi->display);
- ret = component_add(&pdev->dev, &exynos_dsi_component_ops);
+ ret = component_add(dev, &exynos_dsi_component_ops);
if (ret)
goto err_del_component;
return ret;
err_del_component:
- exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR);
+ exynos_drm_component_del(dev, EXYNOS_DEVICE_TYPE_CONNECTOR);
return ret;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.h b/drivers/gpu/drm/exynos/exynos_drm_encoder.h
index b7a1620a7e7..26305d8dd93 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.h
@@ -14,8 +14,6 @@
#ifndef _EXYNOS_DRM_ENCODER_H_
#define _EXYNOS_DRM_ENCODER_H_
-struct exynos_drm_manager;
-
void exynos_drm_encoder_setup(struct drm_device *dev);
struct drm_encoder *exynos_drm_encoder_create(struct drm_device *dev,
struct exynos_drm_display *mgr,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 085b066a999..e5810d13bf9 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -84,8 +84,6 @@
/* FIMD has totally five hardware windows. */
#define WINDOWS_NR 5
-#define get_fimd_manager(mgr) platform_get_drvdata(to_platform_device(dev))
-
struct fimd_driver_data {
unsigned int timing_base;
unsigned int lcdblk_offset;
@@ -96,6 +94,7 @@ struct fimd_driver_data {
unsigned int has_clksel:1;
unsigned int has_limited_fmt:1;
unsigned int has_vidoutcon:1;
+ unsigned int has_vtsel:1;
};
static struct fimd_driver_data s3c64xx_fimd_driver_data = {
@@ -118,6 +117,17 @@ static struct fimd_driver_data exynos4_fimd_driver_data = {
.lcdblk_vt_shift = 10,
.lcdblk_bypass_shift = 1,
.has_shadowcon = 1,
+ .has_vtsel = 1,
+};
+
+static struct fimd_driver_data exynos4415_fimd_driver_data = {
+ .timing_base = 0x20000,
+ .lcdblk_offset = 0x210,
+ .lcdblk_vt_shift = 10,
+ .lcdblk_bypass_shift = 1,
+ .has_shadowcon = 1,
+ .has_vidoutcon = 1,
+ .has_vtsel = 1,
};
static struct fimd_driver_data exynos5_fimd_driver_data = {
@@ -127,6 +137,7 @@ static struct fimd_driver_data exynos5_fimd_driver_data = {
.lcdblk_bypass_shift = 15,
.has_shadowcon = 1,
.has_vidoutcon = 1,
+ .has_vtsel = 1,
};
struct fimd_win_data {
@@ -146,6 +157,7 @@ struct fimd_win_data {
};
struct fimd_context {
+ struct exynos_drm_manager manager;
struct device *dev;
struct drm_device *drm_dev;
struct clk *bus_clk;
@@ -173,6 +185,11 @@ struct fimd_context {
struct exynos_drm_display *display;
};
+static inline struct fimd_context *mgr_to_fimd(struct exynos_drm_manager *mgr)
+{
+ return container_of(mgr, struct fimd_context, manager);
+}
+
static const struct of_device_id fimd_driver_dt_match[] = {
{ .compatible = "samsung,s3c6400-fimd",
.data = &s3c64xx_fimd_driver_data },
@@ -180,6 +197,8 @@ static const struct of_device_id fimd_driver_dt_match[] = {
.data = &exynos3_fimd_driver_data },
{ .compatible = "samsung,exynos4210-fimd",
.data = &exynos4_fimd_driver_data },
+ { .compatible = "samsung,exynos4415-fimd",
+ .data = &exynos4415_fimd_driver_data },
{ .compatible = "samsung,exynos5250-fimd",
.data = &exynos5_fimd_driver_data },
{},
@@ -197,7 +216,7 @@ static inline struct fimd_driver_data *drm_fimd_get_driver_data(
static void fimd_wait_for_vblank(struct exynos_drm_manager *mgr)
{
- struct fimd_context *ctx = mgr->ctx;
+ struct fimd_context *ctx = mgr_to_fimd(mgr);
if (ctx->suspended)
return;
@@ -214,9 +233,35 @@ static void fimd_wait_for_vblank(struct exynos_drm_manager *mgr)
DRM_DEBUG_KMS("vblank wait timed out.\n");
}
+static void fimd_enable_video_output(struct fimd_context *ctx, int win,
+ bool enable)
+{
+ u32 val = readl(ctx->regs + WINCON(win));
+
+ if (enable)
+ val |= WINCONx_ENWIN;
+ else
+ val &= ~WINCONx_ENWIN;
+
+ writel(val, ctx->regs + WINCON(win));
+}
+
+static void fimd_enable_shadow_channel_path(struct fimd_context *ctx, int win,
+ bool enable)
+{
+ u32 val = readl(ctx->regs + SHADOWCON);
+
+ if (enable)
+ val |= SHADOWCON_CHx_ENABLE(win);
+ else
+ val &= ~SHADOWCON_CHx_ENABLE(win);
+
+ writel(val, ctx->regs + SHADOWCON);
+}
+
static void fimd_clear_channel(struct exynos_drm_manager *mgr)
{
- struct fimd_context *ctx = mgr->ctx;
+ struct fimd_context *ctx = mgr_to_fimd(mgr);
int win, ch_enabled = 0;
DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -226,16 +271,12 @@ static void fimd_clear_channel(struct exynos_drm_manager *mgr)
u32 val = readl(ctx->regs + WINCON(win));
if (val & WINCONx_ENWIN) {
- /* wincon */
- val &= ~WINCONx_ENWIN;
- writel(val, ctx->regs + WINCON(win));
-
- /* unprotect windows */
- if (ctx->driver_data->has_shadowcon) {
- val = readl(ctx->regs + SHADOWCON);
- val &= ~SHADOWCON_CHx_ENABLE(win);
- writel(val, ctx->regs + SHADOWCON);
- }
+ fimd_enable_video_output(ctx, win, false);
+
+ if (ctx->driver_data->has_shadowcon)
+ fimd_enable_shadow_channel_path(ctx, win,
+ false);
+
ch_enabled = 1;
}
}
@@ -253,7 +294,7 @@ static void fimd_clear_channel(struct exynos_drm_manager *mgr)
static int fimd_mgr_initialize(struct exynos_drm_manager *mgr,
struct drm_device *drm_dev)
{
- struct fimd_context *ctx = mgr->ctx;
+ struct fimd_context *ctx = mgr_to_fimd(mgr);
struct exynos_drm_private *priv;
priv = drm_dev->dev_private;
@@ -275,7 +316,7 @@ static int fimd_mgr_initialize(struct exynos_drm_manager *mgr,
static void fimd_mgr_remove(struct exynos_drm_manager *mgr)
{
- struct fimd_context *ctx = mgr->ctx;
+ struct fimd_context *ctx = mgr_to_fimd(mgr);
/* detach this sub driver from iommu mapping if supported. */
if (is_drm_iommu_supported(ctx->drm_dev))
@@ -315,14 +356,14 @@ static bool fimd_mode_fixup(struct exynos_drm_manager *mgr,
static void fimd_mode_set(struct exynos_drm_manager *mgr,
const struct drm_display_mode *in_mode)
{
- struct fimd_context *ctx = mgr->ctx;
+ struct fimd_context *ctx = mgr_to_fimd(mgr);
drm_mode_copy(&ctx->mode, in_mode);
}
static void fimd_commit(struct exynos_drm_manager *mgr)
{
- struct fimd_context *ctx = mgr->ctx;
+ struct fimd_context *ctx = mgr_to_fimd(mgr);
struct drm_display_mode *mode = &ctx->mode;
struct fimd_driver_data *driver_data = ctx->driver_data;
void *timing_base = ctx->regs + driver_data->timing_base;
@@ -343,7 +384,8 @@ static void fimd_commit(struct exynos_drm_manager *mgr)
writel(0, timing_base + I80IFCONFBx(0));
/* set video type selection to I80 interface */
- if (ctx->sysreg && regmap_update_bits(ctx->sysreg,
+ if (driver_data->has_vtsel && ctx->sysreg &&
+ regmap_update_bits(ctx->sysreg,
driver_data->lcdblk_offset,
0x3 << driver_data->lcdblk_vt_shift,
0x1 << driver_data->lcdblk_vt_shift)) {
@@ -421,7 +463,7 @@ static void fimd_commit(struct exynos_drm_manager *mgr)
static int fimd_enable_vblank(struct exynos_drm_manager *mgr)
{
- struct fimd_context *ctx = mgr->ctx;
+ struct fimd_context *ctx = mgr_to_fimd(mgr);
u32 val;
if (ctx->suspended)
@@ -431,12 +473,19 @@ static int fimd_enable_vblank(struct exynos_drm_manager *mgr)
val = readl(ctx->regs + VIDINTCON0);
val |= VIDINTCON0_INT_ENABLE;
- val |= VIDINTCON0_INT_FRAME;
- val &= ~VIDINTCON0_FRAMESEL0_MASK;
- val |= VIDINTCON0_FRAMESEL0_VSYNC;
- val &= ~VIDINTCON0_FRAMESEL1_MASK;
- val |= VIDINTCON0_FRAMESEL1_NONE;
+ if (ctx->i80_if) {
+ val |= VIDINTCON0_INT_I80IFDONE;
+ val |= VIDINTCON0_INT_SYSMAINCON;
+ val &= ~VIDINTCON0_INT_SYSSUBCON;
+ } else {
+ val |= VIDINTCON0_INT_FRAME;
+
+ val &= ~VIDINTCON0_FRAMESEL0_MASK;
+ val |= VIDINTCON0_FRAMESEL0_VSYNC;
+ val &= ~VIDINTCON0_FRAMESEL1_MASK;
+ val |= VIDINTCON0_FRAMESEL1_NONE;
+ }
writel(val, ctx->regs + VIDINTCON0);
}
@@ -446,7 +495,7 @@ static int fimd_enable_vblank(struct exynos_drm_manager *mgr)
static void fimd_disable_vblank(struct exynos_drm_manager *mgr)
{
- struct fimd_context *ctx = mgr->ctx;
+ struct fimd_context *ctx = mgr_to_fimd(mgr);
u32 val;
if (ctx->suspended)
@@ -455,9 +504,15 @@ static void fimd_disable_vblank(struct exynos_drm_manager *mgr)
if (test_and_clear_bit(0, &ctx->irq_flags)) {
val = readl(ctx->regs + VIDINTCON0);
- val &= ~VIDINTCON0_INT_FRAME;
val &= ~VIDINTCON0_INT_ENABLE;
+ if (ctx->i80_if) {
+ val &= ~VIDINTCON0_INT_I80IFDONE;
+ val &= ~VIDINTCON0_INT_SYSMAINCON;
+ val &= ~VIDINTCON0_INT_SYSSUBCON;
+ } else
+ val &= ~VIDINTCON0_INT_FRAME;
+
writel(val, ctx->regs + VIDINTCON0);
}
}
@@ -465,7 +520,7 @@ static void fimd_disable_vblank(struct exynos_drm_manager *mgr)
static void fimd_win_mode_set(struct exynos_drm_manager *mgr,
struct exynos_drm_overlay *overlay)
{
- struct fimd_context *ctx = mgr->ctx;
+ struct fimd_context *ctx = mgr_to_fimd(mgr);
struct fimd_win_data *win_data;
int win;
unsigned long offset;
@@ -623,7 +678,7 @@ static void fimd_shadow_protect_win(struct fimd_context *ctx,
static void fimd_win_commit(struct exynos_drm_manager *mgr, int zpos)
{
- struct fimd_context *ctx = mgr->ctx;
+ struct fimd_context *ctx = mgr_to_fimd(mgr);
struct fimd_win_data *win_data;
int win = zpos;
unsigned long val, alpha, size;
@@ -730,20 +785,14 @@ static void fimd_win_commit(struct exynos_drm_manager *mgr, int zpos)
if (win != 0)
fimd_win_set_colkey(ctx, win);
- /* wincon */
- val = readl(ctx->regs + WINCON(win));
- val |= WINCONx_ENWIN;
- writel(val, ctx->regs + WINCON(win));
+ fimd_enable_video_output(ctx, win, true);
+
+ if (ctx->driver_data->has_shadowcon)
+ fimd_enable_shadow_channel_path(ctx, win, true);
/* Enable DMA channel and unprotect windows */
fimd_shadow_protect_win(ctx, win, false);
- if (ctx->driver_data->has_shadowcon) {
- val = readl(ctx->regs + SHADOWCON);
- val |= SHADOWCON_CHx_ENABLE(win);
- writel(val, ctx->regs + SHADOWCON);
- }
-
win_data->enabled = true;
if (ctx->i80_if)
@@ -752,10 +801,9 @@ static void fimd_win_commit(struct exynos_drm_manager *mgr, int zpos)
static void fimd_win_disable(struct exynos_drm_manager *mgr, int zpos)
{
- struct fimd_context *ctx = mgr->ctx;
+ struct fimd_context *ctx = mgr_to_fimd(mgr);
struct fimd_win_data *win_data;
int win = zpos;
- u32 val;
if (win == DEFAULT_ZPOS)
win = ctx->default_win;
@@ -774,18 +822,12 @@ static void fimd_win_disable(struct exynos_drm_manager *mgr, int zpos)
/* protect windows */
fimd_shadow_protect_win(ctx, win, true);
- /* wincon */
- val = readl(ctx->regs + WINCON(win));
- val &= ~WINCONx_ENWIN;
- writel(val, ctx->regs + WINCON(win));
+ fimd_enable_video_output(ctx, win, false);
- /* unprotect windows */
- if (ctx->driver_data->has_shadowcon) {
- val = readl(ctx->regs + SHADOWCON);
- val &= ~SHADOWCON_CHx_ENABLE(win);
- writel(val, ctx->regs + SHADOWCON);
- }
+ if (ctx->driver_data->has_shadowcon)
+ fimd_enable_shadow_channel_path(ctx, win, false);
+ /* unprotect windows */
fimd_shadow_protect_win(ctx, win, false);
win_data->enabled = false;
@@ -793,7 +835,7 @@ static void fimd_win_disable(struct exynos_drm_manager *mgr, int zpos)
static void fimd_window_suspend(struct exynos_drm_manager *mgr)
{
- struct fimd_context *ctx = mgr->ctx;
+ struct fimd_context *ctx = mgr_to_fimd(mgr);
struct fimd_win_data *win_data;
int i;
@@ -803,12 +845,11 @@ static void fimd_window_suspend(struct exynos_drm_manager *mgr)
if (win_data->enabled)
fimd_win_disable(mgr, i);
}
- fimd_wait_for_vblank(mgr);
}
static void fimd_window_resume(struct exynos_drm_manager *mgr)
{
- struct fimd_context *ctx = mgr->ctx;
+ struct fimd_context *ctx = mgr_to_fimd(mgr);
struct fimd_win_data *win_data;
int i;
@@ -821,7 +862,7 @@ static void fimd_window_resume(struct exynos_drm_manager *mgr)
static void fimd_apply(struct exynos_drm_manager *mgr)
{
- struct fimd_context *ctx = mgr->ctx;
+ struct fimd_context *ctx = mgr_to_fimd(mgr);
struct fimd_win_data *win_data;
int i;
@@ -838,7 +879,7 @@ static void fimd_apply(struct exynos_drm_manager *mgr)
static int fimd_poweron(struct exynos_drm_manager *mgr)
{
- struct fimd_context *ctx = mgr->ctx;
+ struct fimd_context *ctx = mgr_to_fimd(mgr);
int ret;
if (!ctx->suspended)
@@ -886,7 +927,7 @@ bus_clk_err:
static int fimd_poweroff(struct exynos_drm_manager *mgr)
{
- struct fimd_context *ctx = mgr->ctx;
+ struct fimd_context *ctx = mgr_to_fimd(mgr);
if (ctx->suspended)
return 0;
@@ -928,39 +969,41 @@ static void fimd_dpms(struct exynos_drm_manager *mgr, int mode)
static void fimd_trigger(struct device *dev)
{
- struct exynos_drm_manager *mgr = get_fimd_manager(dev);
- struct fimd_context *ctx = mgr->ctx;
+ struct fimd_context *ctx = dev_get_drvdata(dev);
struct fimd_driver_data *driver_data = ctx->driver_data;
void *timing_base = ctx->regs + driver_data->timing_base;
u32 reg;
- atomic_set(&ctx->triggering, 1);
+ /*
+ * Skips triggering if in triggering state, because multiple triggering
+ * requests can cause panel reset.
+ */
+ if (atomic_read(&ctx->triggering))
+ return;
- reg = readl(ctx->regs + VIDINTCON0);
- reg |= (VIDINTCON0_INT_ENABLE | VIDINTCON0_INT_I80IFDONE |
- VIDINTCON0_INT_SYSMAINCON);
- writel(reg, ctx->regs + VIDINTCON0);
+ /* Enters triggering mode */
+ atomic_set(&ctx->triggering, 1);
reg = readl(timing_base + TRIGCON);
reg |= (TRGMODE_I80_RGB_ENABLE_I80 | SWTRGCMD_I80_RGB_ENABLE);
writel(reg, timing_base + TRIGCON);
+
+ /*
+ * Exits triggering mode if vblank is not enabled yet, because when the
+ * VIDINTCON0 register is not set, it can not exit from triggering mode.
+ */
+ if (!test_bit(0, &ctx->irq_flags))
+ atomic_set(&ctx->triggering, 0);
}
static void fimd_te_handler(struct exynos_drm_manager *mgr)
{
- struct fimd_context *ctx = mgr->ctx;
+ struct fimd_context *ctx = mgr_to_fimd(mgr);
/* Checks the crtc is detached already from encoder */
if (ctx->pipe < 0 || !ctx->drm_dev)
return;
- /*
- * Skips to trigger if in triggering state, because multiple triggering
- * requests can cause panel reset.
- */
- if (atomic_read(&ctx->triggering))
- return;
-
/*
* If there is a page flip request, triggers and handles the page flip
* event so that current fb can be updated into panel GRAM.
@@ -972,10 +1015,10 @@ static void fimd_te_handler(struct exynos_drm_manager *mgr)
if (atomic_read(&ctx->wait_vsync_event)) {
atomic_set(&ctx->wait_vsync_event, 0);
wake_up(&ctx->wait_vsync_queue);
-
- if (!atomic_read(&ctx->triggering))
- drm_handle_vblank(ctx->drm_dev, ctx->pipe);
}
+
+ if (test_bit(0, &ctx->irq_flags))
+ drm_handle_vblank(ctx->drm_dev, ctx->pipe);
}
static struct exynos_drm_manager_ops fimd_manager_ops = {
@@ -992,11 +1035,6 @@ static struct exynos_drm_manager_ops fimd_manager_ops = {
.te_handler = fimd_te_handler,
};
-static struct exynos_drm_manager fimd_manager = {
- .type = EXYNOS_DISPLAY_TYPE_LCD,
- .ops = &fimd_manager_ops,
-};
-
static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
{
struct fimd_context *ctx = (struct fimd_context *)dev_id;
@@ -1013,16 +1051,10 @@ static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
goto out;
if (ctx->i80_if) {
- /* unset I80 frame done interrupt */
- val = readl(ctx->regs + VIDINTCON0);
- val &= ~(VIDINTCON0_INT_I80IFDONE | VIDINTCON0_INT_SYSMAINCON);
- writel(val, ctx->regs + VIDINTCON0);
+ exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe);
- /* exit triggering mode */
+ /* Exits triggering mode */
atomic_set(&ctx->triggering, 0);
-
- drm_handle_vblank(ctx->drm_dev, ctx->pipe);
- exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe);
} else {
drm_handle_vblank(ctx->drm_dev, ctx->pipe);
exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe);
@@ -1040,11 +1072,11 @@ out:
static int fimd_bind(struct device *dev, struct device *master, void *data)
{
- struct fimd_context *ctx = fimd_manager.ctx;
+ struct fimd_context *ctx = dev_get_drvdata(dev);
struct drm_device *drm_dev = data;
- fimd_mgr_initialize(&fimd_manager, drm_dev);
- exynos_drm_crtc_create(&fimd_manager);
+ fimd_mgr_initialize(&ctx->manager, drm_dev);
+ exynos_drm_crtc_create(&ctx->manager);
if (ctx->display)
exynos_drm_create_enc_conn(drm_dev, ctx->display);
@@ -1055,15 +1087,14 @@ static int fimd_bind(struct device *dev, struct device *master, void *data)
static void fimd_unbind(struct device *dev, struct device *master,
void *data)
{
- struct exynos_drm_manager *mgr = dev_get_drvdata(dev);
- struct fimd_context *ctx = fimd_manager.ctx;
+ struct fimd_context *ctx = dev_get_drvdata(dev);
- fimd_dpms(mgr, DRM_MODE_DPMS_OFF);
+ fimd_dpms(&ctx->manager, DRM_MODE_DPMS_OFF);
if (ctx->display)
- exynos_dpi_remove(dev);
+ exynos_dpi_remove(ctx->display);
- fimd_mgr_remove(mgr);
+ fimd_mgr_remove(&ctx->manager);
}
static const struct component_ops fimd_component_ops = {
@@ -1079,21 +1110,20 @@ static int fimd_probe(struct platform_device *pdev)
struct resource *res;
int ret = -EINVAL;
- ret = exynos_drm_component_add(&pdev->dev, EXYNOS_DEVICE_TYPE_CRTC,
- fimd_manager.type);
- if (ret)
- return ret;
-
- if (!dev->of_node) {
- ret = -ENODEV;
- goto err_del_component;
- }
+ if (!dev->of_node)
+ return -ENODEV;
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx) {
- ret = -ENOMEM;
- goto err_del_component;
- }
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->manager.type = EXYNOS_DISPLAY_TYPE_LCD;
+ ctx->manager.ops = &fimd_manager_ops;
+
+ ret = exynos_drm_component_add(dev, EXYNOS_DEVICE_TYPE_CRTC,
+ ctx->manager.type);
+ if (ret)
+ return ret;
ctx->dev = dev;
ctx->suspended = true;
@@ -1182,27 +1212,27 @@ static int fimd_probe(struct platform_device *pdev)
init_waitqueue_head(&ctx->wait_vsync_queue);
atomic_set(&ctx->wait_vsync_event, 0);
- platform_set_drvdata(pdev, &fimd_manager);
-
- fimd_manager.ctx = ctx;
+ platform_set_drvdata(pdev, ctx);
ctx->display = exynos_dpi_probe(dev);
- if (IS_ERR(ctx->display))
- return PTR_ERR(ctx->display);
+ if (IS_ERR(ctx->display)) {
+ ret = PTR_ERR(ctx->display);
+ goto err_del_component;
+ }
- pm_runtime_enable(&pdev->dev);
+ pm_runtime_enable(dev);
- ret = component_add(&pdev->dev, &fimd_component_ops);
+ ret = component_add(dev, &fimd_component_ops);
if (ret)
goto err_disable_pm_runtime;
return ret;
err_disable_pm_runtime:
- pm_runtime_disable(&pdev->dev);
+ pm_runtime_disable(dev);
err_del_component:
- exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CRTC);
+ exynos_drm_component_del(dev, EXYNOS_DEVICE_TYPE_CRTC);
return ret;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.h b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
index 72376d41c51..35d25889b47 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_iommu.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
@@ -40,7 +40,6 @@ static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
#else
-struct dma_iommu_mapping;
static inline int drm_create_iommu_mapping(struct drm_device *drm_dev)
{
return 0;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
index 00d74b18f7c..d5ad17dfc24 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -426,18 +426,21 @@ int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
c_node->start_work = ipp_create_cmd_work();
if (IS_ERR(c_node->start_work)) {
DRM_ERROR("failed to create start work.\n");
+ ret = PTR_ERR(c_node->start_work);
goto err_remove_id;
}
c_node->stop_work = ipp_create_cmd_work();
if (IS_ERR(c_node->stop_work)) {
DRM_ERROR("failed to create stop work.\n");
+ ret = PTR_ERR(c_node->stop_work);
goto err_free_start;
}
c_node->event_work = ipp_create_event_work();
if (IS_ERR(c_node->event_work)) {
DRM_ERROR("failed to create event work.\n");
+ ret = PTR_ERR(c_node->event_work);
goto err_free_stop;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 50faf913e57..45899fb6327 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -14,6 +14,7 @@
#include <linux/kernel.h>
#include <linux/platform_device.h>
+#include <linux/component.h>
#include <drm/exynos_drm.h>
@@ -28,7 +29,6 @@
/* vidi has totally three virtual windows. */
#define WINDOWS_NR 3
-#define get_vidi_mgr(dev) platform_get_drvdata(to_platform_device(dev))
#define ctx_from_connector(c) container_of(c, struct vidi_context, \
connector)
@@ -47,11 +47,13 @@ struct vidi_win_data {
};
struct vidi_context {
+ struct exynos_drm_manager manager;
+ struct exynos_drm_display display;
+ struct platform_device *pdev;
struct drm_device *drm_dev;
struct drm_crtc *crtc;
struct drm_encoder *encoder;
struct drm_connector connector;
- struct exynos_drm_subdrv subdrv;
struct vidi_win_data win_data[WINDOWS_NR];
struct edid *raw_edid;
unsigned int clkdiv;
@@ -66,6 +68,16 @@ struct vidi_context {
int pipe;
};
+static inline struct vidi_context *manager_to_vidi(struct exynos_drm_manager *m)
+{
+ return container_of(m, struct vidi_context, manager);
+}
+
+static inline struct vidi_context *display_to_vidi(struct exynos_drm_display *d)
+{
+ return container_of(d, struct vidi_context, display);
+}
+
static const char fake_edid_info[] = {
0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x4c, 0x2d, 0x05, 0x05,
0x00, 0x00, 0x00, 0x00, 0x30, 0x12, 0x01, 0x03, 0x80, 0x10, 0x09, 0x78,
@@ -93,7 +105,7 @@ static const char fake_edid_info[] = {
static void vidi_apply(struct exynos_drm_manager *mgr)
{
- struct vidi_context *ctx = mgr->ctx;
+ struct vidi_context *ctx = manager_to_vidi(mgr);
struct exynos_drm_manager_ops *mgr_ops = mgr->ops;
struct vidi_win_data *win_data;
int i;
@@ -110,7 +122,7 @@ static void vidi_apply(struct exynos_drm_manager *mgr)
static void vidi_commit(struct exynos_drm_manager *mgr)
{
- struct vidi_context *ctx = mgr->ctx;
+ struct vidi_context *ctx = manager_to_vidi(mgr);
if (ctx->suspended)
return;
@@ -118,7 +130,7 @@ static void vidi_commit(struct exynos_drm_manager *mgr)
static int vidi_enable_vblank(struct exynos_drm_manager *mgr)
{
- struct vidi_context *ctx = mgr->ctx;
+ struct vidi_context *ctx = manager_to_vidi(mgr);
if (ctx->suspended)
return -EPERM;
@@ -140,7 +152,7 @@ static int vidi_enable_vblank(struct exynos_drm_manager *mgr)
static void vidi_disable_vblank(struct exynos_drm_manager *mgr)
{
- struct vidi_context *ctx = mgr->ctx;
+ struct vidi_context *ctx = manager_to_vidi(mgr);
if (ctx->suspended)
return;
@@ -152,7 +164,7 @@ static void vidi_disable_vblank(struct exynos_drm_manager *mgr)
static void vidi_win_mode_set(struct exynos_drm_manager *mgr,
struct exynos_drm_overlay *overlay)
{
- struct vidi_context *ctx = mgr->ctx;
+ struct vidi_context *ctx = manager_to_vidi(mgr);
struct vidi_win_data *win_data;
int win;
unsigned long offset;
@@ -204,7 +216,7 @@ static void vidi_win_mode_set(struct exynos_drm_manager *mgr,
static void vidi_win_commit(struct exynos_drm_manager *mgr, int zpos)
{
- struct vidi_context *ctx = mgr->ctx;
+ struct vidi_context *ctx = manager_to_vidi(mgr);
struct vidi_win_data *win_data;
int win = zpos;
@@ -229,7 +241,7 @@ static void vidi_win_commit(struct exynos_drm_manager *mgr, int zpos)
static void vidi_win_disable(struct exynos_drm_manager *mgr, int zpos)
{
- struct vidi_context *ctx = mgr->ctx;
+ struct vidi_context *ctx = manager_to_vidi(mgr);
struct vidi_win_data *win_data;
int win = zpos;
@@ -247,7 +259,7 @@ static void vidi_win_disable(struct exynos_drm_manager *mgr, int zpos)
static int vidi_power_on(struct exynos_drm_manager *mgr, bool enable)
{
- struct vidi_context *ctx = mgr->ctx;
+ struct vidi_context *ctx = manager_to_vidi(mgr);
DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -271,7 +283,7 @@ static int vidi_power_on(struct exynos_drm_manager *mgr, bool enable)
static void vidi_dpms(struct exynos_drm_manager *mgr, int mode)
{
- struct vidi_context *ctx = mgr->ctx;
+ struct vidi_context *ctx = manager_to_vidi(mgr);
DRM_DEBUG_KMS("%d\n", mode);
@@ -297,7 +309,7 @@ static void vidi_dpms(struct exynos_drm_manager *mgr, int mode)
static int vidi_mgr_initialize(struct exynos_drm_manager *mgr,
struct drm_device *drm_dev)
{
- struct vidi_context *ctx = mgr->ctx;
+ struct vidi_context *ctx = manager_to_vidi(mgr);
struct exynos_drm_private *priv = drm_dev->dev_private;
mgr->drm_dev = ctx->drm_dev = drm_dev;
@@ -316,11 +328,6 @@ static struct exynos_drm_manager_ops vidi_manager_ops = {
.win_disable = vidi_win_disable,
};
-static struct exynos_drm_manager vidi_manager = {
- .type = EXYNOS_DISPLAY_TYPE_VIDI,
- .ops = &vidi_manager_ops,
-};
-
static void vidi_fake_vblank_handler(struct work_struct *work)
{
struct vidi_context *ctx = container_of(work, struct vidi_context,
@@ -349,9 +356,8 @@ static void vidi_fake_vblank_handler(struct work_struct *work)
static int vidi_show_connection(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ struct vidi_context *ctx = dev_get_drvdata(dev);
int rc;
- struct exynos_drm_manager *mgr = get_vidi_mgr(dev);
- struct vidi_context *ctx = mgr->ctx;
mutex_lock(&ctx->lock);
@@ -366,8 +372,7 @@ static int vidi_store_connection(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
- struct exynos_drm_manager *mgr = get_vidi_mgr(dev);
- struct vidi_context *ctx = mgr->ctx;
+ struct vidi_context *ctx = dev_get_drvdata(dev);
int ret;
ret = kstrtoint(buf, 0, &ctx->connected);
@@ -420,7 +425,7 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
display = exynos_drm_get_display(encoder);
if (display->type == EXYNOS_DISPLAY_TYPE_VIDI) {
- ctx = display->ctx;
+ ctx = display_to_vidi(display);
break;
}
}
@@ -530,7 +535,7 @@ static struct drm_connector_helper_funcs vidi_connector_helper_funcs = {
static int vidi_create_connector(struct exynos_drm_display *display,
struct drm_encoder *encoder)
{
- struct vidi_context *ctx = display->ctx;
+ struct vidi_context *ctx = display_to_vidi(display);
struct drm_connector *connector = &ctx->connector;
int ret;
@@ -556,27 +561,22 @@ static struct exynos_drm_display_ops vidi_display_ops = {
.create_connector = vidi_create_connector,
};
-static struct exynos_drm_display vidi_display = {
- .type = EXYNOS_DISPLAY_TYPE_VIDI,
- .ops = &vidi_display_ops,
-};
-
-static int vidi_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
+static int vidi_bind(struct device *dev, struct device *master, void *data)
{
- struct exynos_drm_manager *mgr = get_vidi_mgr(dev);
- struct vidi_context *ctx = mgr->ctx;
+ struct vidi_context *ctx = dev_get_drvdata(dev);
+ struct drm_device *drm_dev = data;
struct drm_crtc *crtc = ctx->crtc;
int ret;
- vidi_mgr_initialize(mgr, drm_dev);
+ vidi_mgr_initialize(&ctx->manager, drm_dev);
- ret = exynos_drm_crtc_create(&vidi_manager);
+ ret = exynos_drm_crtc_create(&ctx->manager);
if (ret) {
DRM_ERROR("failed to create crtc.\n");
return ret;
}
- ret = exynos_drm_create_enc_conn(drm_dev, &vidi_display);
+ ret = exynos_drm_create_enc_conn(drm_dev, &ctx->display);
if (ret) {
crtc->funcs->destroy(crtc);
DRM_ERROR("failed to create encoder and connector.\n");
@@ -586,9 +586,18 @@ static int vidi_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
return 0;
}
+
+static void vidi_unbind(struct device *dev, struct device *master, void *data)
+{
+}
+
+static const struct component_ops vidi_component_ops = {
+ .bind = vidi_bind,
+ .unbind = vidi_unbind,
+};
+
static int vidi_probe(struct platform_device *pdev)
{
- struct exynos_drm_subdrv *subdrv;
struct vidi_context *ctx;
int ret;
@@ -596,40 +605,54 @@ static int vidi_probe(struct platform_device *pdev)
if (!ctx)
return -ENOMEM;
+ ctx->manager.type = EXYNOS_DISPLAY_TYPE_VIDI;
+ ctx->manager.ops = &vidi_manager_ops;
+ ctx->display.type = EXYNOS_DISPLAY_TYPE_VIDI;
+ ctx->display.ops = &vidi_display_ops;
ctx->default_win = 0;
+ ctx->pdev = pdev;
- INIT_WORK(&ctx->work, vidi_fake_vblank_handler);
-
- vidi_manager.ctx = ctx;
- vidi_display.ctx = ctx;
+ ret = exynos_drm_component_add(&pdev->dev, EXYNOS_DEVICE_TYPE_CRTC,
+ ctx->manager.type);
+ if (ret)
+ return ret;
- mutex_init(&ctx->lock);
+ ret = exynos_drm_component_add(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR,
+ ctx->display.type);
+ if (ret)
+ goto err_del_crtc_component;
- platform_set_drvdata(pdev, &vidi_manager);
+ INIT_WORK(&ctx->work, vidi_fake_vblank_handler);
- subdrv = &ctx->subdrv;
- subdrv->dev = &pdev->dev;
- subdrv->probe = vidi_subdrv_probe;
+ mutex_init(&ctx->lock);
- ret = exynos_drm_subdrv_register(subdrv);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to register drm vidi device\n");
- return ret;
- }
+ platform_set_drvdata(pdev, ctx);
ret = device_create_file(&pdev->dev, &dev_attr_connection);
if (ret < 0) {
- exynos_drm_subdrv_unregister(subdrv);
- DRM_INFO("failed to create connection sysfs.\n");
+ DRM_ERROR("failed to create connection sysfs.\n");
+ goto err_del_conn_component;
}
- return 0;
+ ret = component_add(&pdev->dev, &vidi_component_ops);
+ if (ret)
+ goto err_remove_file;
+
+ return ret;
+
+err_remove_file:
+ device_remove_file(&pdev->dev, &dev_attr_connection);
+err_del_conn_component:
+ exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR);
+err_del_crtc_component:
+ exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CRTC);
+
+ return ret;
}
static int vidi_remove(struct platform_device *pdev)
{
- struct exynos_drm_manager *mgr = platform_get_drvdata(pdev);
- struct vidi_context *ctx = mgr->ctx;
+ struct vidi_context *ctx = platform_get_drvdata(pdev);
if (ctx->raw_edid != (struct edid *)fake_edid_info) {
kfree(ctx->raw_edid);
@@ -638,6 +661,10 @@ static int vidi_remove(struct platform_device *pdev)
return -EINVAL;
}
+ component_del(&pdev->dev, &vidi_component_ops);
+ exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR);
+ exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CRTC);
+
return 0;
}
@@ -668,12 +695,19 @@ int exynos_drm_probe_vidi(void)
return ret;
}
+static int exynos_drm_remove_vidi_device(struct device *dev, void *data)
+{
+ platform_device_unregister(to_platform_device(dev));
+
+ return 0;
+}
+
void exynos_drm_remove_vidi(void)
{
- struct vidi_context *ctx = vidi_manager.ctx;
- struct exynos_drm_subdrv *subdrv = &ctx->subdrv;
- struct platform_device *pdev = to_platform_device(subdrv->dev);
+ int ret = driver_for_each_device(&vidi_driver.driver, NULL, NULL,
+ exynos_drm_remove_vidi_device);
+ /* silence compiler warning */
+ (void)ret;
platform_driver_unregister(&vidi_driver);
- platform_device_unregister(pdev);
}
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 563a19e62eb..5765a161abd 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -49,7 +49,6 @@
#include <linux/gpio.h>
#include <media/s5p_hdmi.h>
-#define get_hdmi_display(dev) platform_get_drvdata(to_platform_device(dev))
#define ctx_from_connector(c) container_of(c, struct hdmi_context, connector)
#define HOTPLUG_DEBOUNCE_MS 1100
@@ -182,6 +181,7 @@ struct hdmi_conf_regs {
};
struct hdmi_context {
+ struct exynos_drm_display display;
struct device *dev;
struct drm_device *drm_dev;
struct drm_connector connector;
@@ -213,6 +213,11 @@ struct hdmi_context {
enum hdmi_type type;
};
+static inline struct hdmi_context *display_to_hdmi(struct exynos_drm_display *d)
+{
+ return container_of(d, struct hdmi_context, display);
+}
+
struct hdmiphy_config {
int pixel_clock;
u8 conf[32];
@@ -1123,7 +1128,7 @@ static struct drm_connector_helper_funcs hdmi_connector_helper_funcs = {
static int hdmi_create_connector(struct exynos_drm_display *display,
struct drm_encoder *encoder)
{
- struct hdmi_context *hdata = display->ctx;
+ struct hdmi_context *hdata = display_to_hdmi(display);
struct drm_connector *connector = &hdata->connector;
int ret;
@@ -2000,7 +2005,7 @@ static void hdmi_v14_mode_set(struct hdmi_context *hdata,
static void hdmi_mode_set(struct exynos_drm_display *display,
struct drm_display_mode *mode)
{
- struct hdmi_context *hdata = display->ctx;
+ struct hdmi_context *hdata = display_to_hdmi(display);
struct drm_display_mode *m = mode;
DRM_DEBUG_KMS("xres=%d, yres=%d, refresh=%d, intl=%s\n",
@@ -2019,7 +2024,7 @@ static void hdmi_mode_set(struct exynos_drm_display *display,
static void hdmi_commit(struct exynos_drm_display *display)
{
- struct hdmi_context *hdata = display->ctx;
+ struct hdmi_context *hdata = display_to_hdmi(display);
mutex_lock(&hdata->hdmi_mutex);
if (!hdata->powered) {
@@ -2033,7 +2038,7 @@ static void hdmi_commit(struct exynos_drm_display *display)
static void hdmi_poweron(struct exynos_drm_display *display)
{
- struct hdmi_context *hdata = display->ctx;
+ struct hdmi_context *hdata = display_to_hdmi(display);
struct hdmi_resources *res = &hdata->res;
mutex_lock(&hdata->hdmi_mutex);
@@ -2064,7 +2069,7 @@ static void hdmi_poweron(struct exynos_drm_display *display)
static void hdmi_poweroff(struct exynos_drm_display *display)
{
- struct hdmi_context *hdata = display->ctx;
+ struct hdmi_context *hdata = display_to_hdmi(display);
struct hdmi_resources *res = &hdata->res;
mutex_lock(&hdata->hdmi_mutex);
@@ -2099,7 +2104,7 @@ out:
static void hdmi_dpms(struct exynos_drm_display *display, int mode)
{
- struct hdmi_context *hdata = display->ctx;
+ struct hdmi_context *hdata = display_to_hdmi(display);
struct drm_encoder *encoder = hdata->encoder;
struct drm_crtc *crtc = encoder->crtc;
struct drm_crtc_helper_funcs *funcs = NULL;
@@ -2143,11 +2148,6 @@ static struct exynos_drm_display_ops hdmi_display_ops = {
.commit = hdmi_commit,
};
-static struct exynos_drm_display hdmi_display = {
- .type = EXYNOS_DISPLAY_TYPE_HDMI,
- .ops = &hdmi_display_ops,
-};
-
static void hdmi_hotplug_work_func(struct work_struct *work)
{
struct hdmi_context *hdata;
@@ -2302,12 +2302,11 @@ MODULE_DEVICE_TABLE (of, hdmi_match_types);
static int hdmi_bind(struct device *dev, struct device *master, void *data)
{
struct drm_device *drm_dev = data;
- struct hdmi_context *hdata;
+ struct hdmi_context *hdata = dev_get_drvdata(dev);
- hdata = hdmi_display.ctx;
hdata->drm_dev = drm_dev;
- return exynos_drm_create_enc_conn(drm_dev, &hdmi_display);
+ return exynos_drm_create_enc_conn(drm_dev, &hdata->display);
}
static void hdmi_unbind(struct device *dev, struct device *master, void *data)
@@ -2349,31 +2348,28 @@ static int hdmi_probe(struct platform_device *pdev)
struct resource *res;
int ret;
- ret = exynos_drm_component_add(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR,
- hdmi_display.type);
- if (ret)
- return ret;
-
- if (!dev->of_node) {
- ret = -ENODEV;
- goto err_del_component;
- }
+ if (!dev->of_node)
+ return -ENODEV;
pdata = drm_hdmi_dt_parse_pdata(dev);
- if (!pdata) {
- ret = -EINVAL;
- goto err_del_component;
- }
+ if (!pdata)
+ return -EINVAL;
hdata = devm_kzalloc(dev, sizeof(struct hdmi_context), GFP_KERNEL);
- if (!hdata) {
- ret = -ENOMEM;
- goto err_del_component;
- }
+ if (!hdata)
+ return -ENOMEM;
+
+ hdata->display.type = EXYNOS_DISPLAY_TYPE_HDMI;
+ hdata->display.ops = &hdmi_display_ops;
+
+ ret = exynos_drm_component_add(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR,
+ hdata->display.type);
+ if (ret)
+ return ret;
mutex_init(&hdata->hdmi_mutex);
- platform_set_drvdata(pdev, &hdmi_display);
+ platform_set_drvdata(pdev, hdata);
match = of_match_node(hdmi_match_types, dev->of_node);
if (!match) {
@@ -2485,7 +2481,6 @@ out_get_phy_port:
}
pm_runtime_enable(dev);
- hdmi_display.ctx = hdata;
ret = component_add(&pdev->dev, &hdmi_component_ops);
if (ret)
@@ -2510,7 +2505,7 @@ err_del_component:
static int hdmi_remove(struct platform_device *pdev)
{
- struct hdmi_context *hdata = hdmi_display.ctx;
+ struct hdmi_context *hdata = platform_get_drvdata(pdev);
cancel_delayed_work_sync(&hdata->hotplug_work);
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index a41c84ee3a2..820b76234ef 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -40,8 +40,6 @@
#include "exynos_drm_iommu.h"
#include "exynos_mixer.h"
-#define get_mixer_manager(dev) platform_get_drvdata(to_platform_device(dev))
-
#define MIXER_WIN_NR 3
#define MIXER_DEFAULT_WIN 0
@@ -86,6 +84,7 @@ enum mixer_version_id {
};
struct mixer_context {
+ struct exynos_drm_manager manager;
struct platform_device *pdev;
struct device *dev;
struct drm_device *drm_dev;
@@ -104,6 +103,11 @@ struct mixer_context {
atomic_t wait_vsync_event;
};
+static inline struct mixer_context *mgr_to_mixer(struct exynos_drm_manager *mgr)
+{
+ return container_of(mgr, struct mixer_context, manager);
+}
+
struct mixer_drv_data {
enum mixer_version_id version;
bool is_vp_enabled;
@@ -854,7 +858,7 @@ static int mixer_initialize(struct exynos_drm_manager *mgr,
struct drm_device *drm_dev)
{
int ret;
- struct mixer_context *mixer_ctx = mgr->ctx;
+ struct mixer_context *mixer_ctx = mgr_to_mixer(mgr);
struct exynos_drm_private *priv;
priv = drm_dev->dev_private;
@@ -885,7 +889,7 @@ static int mixer_initialize(struct exynos_drm_manager *mgr,
static void mixer_mgr_remove(struct exynos_drm_manager *mgr)
{
- struct mixer_context *mixer_ctx = mgr->ctx;
+ struct mixer_context *mixer_ctx = mgr_to_mixer(mgr);
if (is_drm_iommu_supported(mixer_ctx->drm_dev))
drm_iommu_detach_device(mixer_ctx->drm_dev, mixer_ctx->dev);
@@ -893,7 +897,7 @@ static void mixer_mgr_remove(struct exynos_drm_manager *mgr)
static int mixer_enable_vblank(struct exynos_drm_manager *mgr)
{
- struct mixer_context *mixer_ctx = mgr->ctx;
+ struct mixer_context *mixer_ctx = mgr_to_mixer(mgr);
struct mixer_resources *res = &mixer_ctx->mixer_res;
if (!mixer_ctx->powered) {
@@ -910,7 +914,7 @@ static int mixer_enable_vblank(struct exynos_drm_manager *mgr)
static void mixer_disable_vblank(struct exynos_drm_manager *mgr)
{
- struct mixer_context *mixer_ctx = mgr->ctx;
+ struct mixer_context *mixer_ctx = mgr_to_mixer(mgr);
struct mixer_resources *res = &mixer_ctx->mixer_res;
/* disable vsync interrupt */
@@ -920,7 +924,7 @@ static void mixer_disable_vblank(struct exynos_drm_manager *mgr)
static void mixer_win_mode_set(struct exynos_drm_manager *mgr,
struct exynos_drm_overlay *overlay)
{
- struct mixer_context *mixer_ctx = mgr->ctx;
+ struct mixer_context *mixer_ctx = mgr_to_mixer(mgr);
struct hdmi_win_data *win_data;
int win;
@@ -971,7 +975,7 @@ static void mixer_win_mode_set(struct exynos_drm_manager *mgr,
static void mixer_win_commit(struct exynos_drm_manager *mgr, int zpos)
{
- struct mixer_context *mixer_ctx = mgr->ctx;
+ struct mixer_context *mixer_ctx = mgr_to_mixer(mgr);
int win = zpos == DEFAULT_ZPOS ? MIXER_DEFAULT_WIN : zpos;
DRM_DEBUG_KMS("win: %d\n", win);
@@ -993,7 +997,7 @@ static void mixer_win_commit(struct exynos_drm_manager *mgr, int zpos)
static void mixer_win_disable(struct exynos_drm_manager *mgr, int zpos)
{
- struct mixer_context *mixer_ctx = mgr->ctx;
+ struct mixer_context *mixer_ctx = mgr_to_mixer(mgr);
struct mixer_resources *res = &mixer_ctx->mixer_res;
int win = zpos == DEFAULT_ZPOS ? MIXER_DEFAULT_WIN : zpos;
unsigned long flags;
@@ -1021,7 +1025,7 @@ static void mixer_win_disable(struct exynos_drm_manager *mgr, int zpos)
static void mixer_wait_for_vblank(struct exynos_drm_manager *mgr)
{
- struct mixer_context *mixer_ctx = mgr->ctx;
+ struct mixer_context *mixer_ctx = mgr_to_mixer(mgr);
mutex_lock(&mixer_ctx->mixer_mutex);
if (!mixer_ctx->powered) {
@@ -1048,7 +1052,7 @@ static void mixer_wait_for_vblank(struct exynos_drm_manager *mgr)
static void mixer_window_suspend(struct exynos_drm_manager *mgr)
{
- struct mixer_context *ctx = mgr->ctx;
+ struct mixer_context *ctx = mgr_to_mixer(mgr);
struct hdmi_win_data *win_data;
int i;
@@ -1062,7 +1066,7 @@ static void mixer_window_suspend(struct exynos_drm_manager *mgr)
static void mixer_window_resume(struct exynos_drm_manager *mgr)
{
- struct mixer_context *ctx = mgr->ctx;
+ struct mixer_context *ctx = mgr_to_mixer(mgr);
struct hdmi_win_data *win_data;
int i;
@@ -1077,7 +1081,7 @@ static void mixer_window_resume(struct exynos_drm_manager *mgr)
static void mixer_poweron(struct exynos_drm_manager *mgr)
{
- struct mixer_context *ctx = mgr->ctx;
+ struct mixer_context *ctx = mgr_to_mixer(mgr);
struct mixer_resources *res = &ctx->mixer_res;
mutex_lock(&ctx->mixer_mutex);
@@ -1111,7 +1115,7 @@ static void mixer_poweron(struct exynos_drm_manager *mgr)
static void mixer_poweroff(struct exynos_drm_manager *mgr)
{
- struct mixer_context *ctx = mgr->ctx;
+ struct mixer_context *ctx = mgr_to_mixer(mgr);
struct mixer_resources *res = &ctx->mixer_res;
mutex_lock(&ctx->mixer_mutex);
@@ -1187,11 +1191,6 @@ static struct exynos_drm_manager_ops mixer_manager_ops = {
.win_disable = mixer_win_disable,
};
-static struct exynos_drm_manager mixer_manager = {
- .type = EXYNOS_DISPLAY_TYPE_HDMI,
- .ops = &mixer_manager_ops,
-};
-
static struct mixer_drv_data exynos5420_mxr_drv_data = {
.version = MXR_VER_128_0_0_184,
.is_vp_enabled = 0,
@@ -1249,48 +1248,17 @@ MODULE_DEVICE_TABLE(of, mixer_match_types);
static int mixer_bind(struct device *dev, struct device *manager, void *data)
{
- struct platform_device *pdev = to_platform_device(dev);
+ struct mixer_context *ctx = dev_get_drvdata(dev);
struct drm_device *drm_dev = data;
- struct mixer_context *ctx;
- struct mixer_drv_data *drv;
int ret;
- dev_info(dev, "probe start\n");
-
- ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx) {
- DRM_ERROR("failed to alloc mixer context.\n");
- return -ENOMEM;
- }
-
- mutex_init(&ctx->mixer_mutex);
-
- if (dev->of_node) {
- const struct of_device_id *match;
- match = of_match_node(mixer_match_types, dev->of_node);
- drv = (struct mixer_drv_data *)match->data;
- } else {
- drv = (struct mixer_drv_data *)
- platform_get_device_id(pdev)->driver_data;
- }
-
- ctx->pdev = pdev;
- ctx->dev = dev;
- ctx->vp_enabled = drv->is_vp_enabled;
- ctx->has_sclk = drv->has_sclk;
- ctx->mxr_ver = drv->version;
- init_waitqueue_head(&ctx->wait_vsync_queue);
- atomic_set(&ctx->wait_vsync_event, 0);
-
- mixer_manager.ctx = ctx;
- ret = mixer_initialize(&mixer_manager, drm_dev);
+ ret = mixer_initialize(&ctx->manager, drm_dev);
if (ret)
return ret;
- platform_set_drvdata(pdev, &mixer_manager);
- ret = exynos_drm_crtc_create(&mixer_manager);
+ ret = exynos_drm_crtc_create(&ctx->manager);
if (ret) {
- mixer_mgr_remove(&mixer_manager);
+ mixer_mgr_remove(&ctx->manager);
return ret;
}
@@ -1301,11 +1269,9 @@ static int mixer_bind(struct device *dev, struct device *manager, void *data)
static void mixer_unbind(struct device *dev, struct device *master, void *data)
{
- struct exynos_drm_manager *mgr = dev_get_drvdata(dev);
+ struct mixer_context *ctx = dev_get_drvdata(dev);
- dev_info(dev, "remove successful\n");
-
- mixer_mgr_remove(mgr);
+ mixer_mgr_remove(&ctx->manager);
pm_runtime_disable(dev);
}
@@ -1317,22 +1283,62 @@ static const struct component_ops mixer_component_ops = {
static int mixer_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
+ struct mixer_drv_data *drv;
+ struct mixer_context *ctx;
int ret;
+ ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ DRM_ERROR("failed to alloc mixer context.\n");
+ return -ENOMEM;
+ }
+
+ mutex_init(&ctx->mixer_mutex);
+
+ ctx->manager.type = EXYNOS_DISPLAY_TYPE_HDMI;
+ ctx->manager.ops = &mixer_manager_ops;
+
+ if (dev->of_node) {
+ const struct of_device_id *match;
+
+ match = of_match_node(mixer_match_types, dev->of_node);
+ drv = (struct mixer_drv_data *)match->data;
+ } else {
+ drv = (struct mixer_drv_data *)
+ platform_get_device_id(pdev)->driver_data;
+ }
+
+ ctx->pdev = pdev;
+ ctx->dev = dev;
+ ctx->vp_enabled = drv->is_vp_enabled;
+ ctx->has_sclk = drv->has_sclk;
+ ctx->mxr_ver = drv->version;
+ init_waitqueue_head(&ctx->wait_vsync_queue);
+ atomic_set(&ctx->wait_vsync_event, 0);
+
+ platform_set_drvdata(pdev, ctx);
+
ret = exynos_drm_component_add(&pdev->dev, EXYNOS_DEVICE_TYPE_CRTC,
- mixer_manager.type);
+ ctx->manager.type);
if (ret)
return ret;
ret = component_add(&pdev->dev, &mixer_component_ops);
- if (ret)
+ if (ret) {
exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CRTC);
+ return ret;
+ }
+
+ pm_runtime_enable(dev);
return ret;
}
static int mixer_remove(struct platform_device *pdev)
{
+ pm_runtime_disable(&pdev->dev);
+
component_del(&pdev->dev, &mixer_component_ops);
exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CRTC);
diff --git a/drivers/gpu/drm/gma500/Makefile b/drivers/gpu/drm/gma500/Makefile
index b1531557637..190e55f2f89 100644
--- a/drivers/gpu/drm/gma500/Makefile
+++ b/drivers/gpu/drm/gma500/Makefile
@@ -39,6 +39,7 @@ gma500_gfx-$(CONFIG_DRM_GMA3600) += cdv_device.o \
gma500_gfx-$(CONFIG_DRM_GMA600) += oaktrail_device.o \
oaktrail_crtc.o \
oaktrail_lvds.o \
+ oaktrail_lvds_i2c.o \
oaktrail_hdmi.o \
oaktrail_hdmi_i2c.o
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index 9f158eab517..0fafb8e2483 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -37,6 +37,201 @@
#include "gma_display.h"
#include <drm/drm_dp_helper.h>
+/**
+ * struct i2c_algo_dp_aux_data - driver interface structure for i2c over dp
+ * aux algorithm
+ * @running: set by the algo indicating whether an i2c is ongoing or whether
+ * the i2c bus is quiescent
+ * @address: i2c target address for the currently ongoing transfer
+ * @aux_ch: driver callback to transfer a single byte of the i2c payload
+ */
+struct i2c_algo_dp_aux_data {
+ bool running;
+ u16 address;
+ int (*aux_ch) (struct i2c_adapter *adapter,
+ int mode, uint8_t write_byte,
+ uint8_t *read_byte);
+};
+
+/* Run a single AUX_CH I2C transaction, writing/reading data as necessary */
+static int
+i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode,
+ uint8_t write_byte, uint8_t *read_byte)
+{
+ struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+ int ret;
+
+ ret = (*algo_data->aux_ch)(adapter, mode,
+ write_byte, read_byte);
+ return ret;
+}
+
+/*
+ * I2C over AUX CH
+ */
+
+/*
+ * Send the address. If the I2C link is running, this 'restarts'
+ * the connection with the new address, this is used for doing
+ * a write followed by a read (as needed for DDC)
+ */
+static int
+i2c_algo_dp_aux_address(struct i2c_adapter *adapter, u16 address, bool reading)
+{
+ struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+ int mode = MODE_I2C_START;
+ int ret;
+
+ if (reading)
+ mode |= MODE_I2C_READ;
+ else
+ mode |= MODE_I2C_WRITE;
+ algo_data->address = address;
+ algo_data->running = true;
+ ret = i2c_algo_dp_aux_transaction(adapter, mode, 0, NULL);
+ return ret;
+}
+
+/*
+ * Stop the I2C transaction. This closes out the link, sending
+ * a bare address packet with the MOT bit turned off
+ */
+static void
+i2c_algo_dp_aux_stop(struct i2c_adapter *adapter, bool reading)
+{
+ struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+ int mode = MODE_I2C_STOP;
+
+ if (reading)
+ mode |= MODE_I2C_READ;
+ else
+ mode |= MODE_I2C_WRITE;
+ if (algo_data->running) {
+ (void) i2c_algo_dp_aux_transaction(adapter, mode, 0, NULL);
+ algo_data->running = false;
+ }
+}
+
+/*
+ * Write a single byte to the current I2C address, the
+ * the I2C link must be running or this returns -EIO
+ */
+static int
+i2c_algo_dp_aux_put_byte(struct i2c_adapter *adapter, u8 byte)
+{
+ struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+ int ret;
+
+ if (!algo_data->running)
+ return -EIO;
+
+ ret = i2c_algo_dp_aux_transaction(adapter, MODE_I2C_WRITE, byte, NULL);
+ return ret;
+}
+
+/*
+ * Read a single byte from the current I2C address, the
+ * I2C link must be running or this returns -EIO
+ */
+static int
+i2c_algo_dp_aux_get_byte(struct i2c_adapter *adapter, u8 *byte_ret)
+{
+ struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+ int ret;
+
+ if (!algo_data->running)
+ return -EIO;
+
+ ret = i2c_algo_dp_aux_transaction(adapter, MODE_I2C_READ, 0, byte_ret);
+ return ret;
+}
+
+static int
+i2c_algo_dp_aux_xfer(struct i2c_adapter *adapter,
+ struct i2c_msg *msgs,
+ int num)
+{
+ int ret = 0;
+ bool reading = false;
+ int m;
+ int b;
+
+ for (m = 0; m < num; m++) {
+ u16 len = msgs[m].len;
+ u8 *buf = msgs[m].buf;
+ reading = (msgs[m].flags & I2C_M_RD) != 0;
+ ret = i2c_algo_dp_aux_address(adapter, msgs[m].addr, reading);
+ if (ret < 0)
+ break;
+ if (reading) {
+ for (b = 0; b < len; b++) {
+ ret = i2c_algo_dp_aux_get_byte(adapter, &buf[b]);
+ if (ret < 0)
+ break;
+ }
+ } else {
+ for (b = 0; b < len; b++) {
+ ret = i2c_algo_dp_aux_put_byte(adapter, buf[b]);
+ if (ret < 0)
+ break;
+ }
+ }
+ if (ret < 0)
+ break;
+ }
+ if (ret >= 0)
+ ret = num;
+ i2c_algo_dp_aux_stop(adapter, reading);
+ DRM_DEBUG_KMS("dp_aux_xfer return %d\n", ret);
+ return ret;
+}
+
+static u32
+i2c_algo_dp_aux_functionality(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
+ I2C_FUNC_SMBUS_READ_BLOCK_DATA |
+ I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
+ I2C_FUNC_10BIT_ADDR;
+}
+
+static const struct i2c_algorithm i2c_dp_aux_algo = {
+ .master_xfer = i2c_algo_dp_aux_xfer,
+ .functionality = i2c_algo_dp_aux_functionality,
+};
+
+static void
+i2c_dp_aux_reset_bus(struct i2c_adapter *adapter)
+{
+ (void) i2c_algo_dp_aux_address(adapter, 0, false);
+ (void) i2c_algo_dp_aux_stop(adapter, false);
+}
+
+static int
+i2c_dp_aux_prepare_bus(struct i2c_adapter *adapter)
+{
+ adapter->algo = &i2c_dp_aux_algo;
+ adapter->retries = 3;
+ i2c_dp_aux_reset_bus(adapter);
+ return 0;
+}
+
+/*
+ * FIXME: This is the old dp aux helper, gma500 is the last driver that needs to
+ * be ported over to the new helper code in drm_dp_helper.c like i915 or radeon.
+ */
+static int __deprecated
+i2c_dp_aux_add_bus(struct i2c_adapter *adapter)
+{
+ int error;
+
+ error = i2c_dp_aux_prepare_bus(adapter);
+ if (error)
+ return error;
+ error = i2c_add_adapter(adapter);
+ return error;
+}
+
#define _wait_for(COND, MS, W) ({ \
unsigned long timeout__ = jiffies + msecs_to_jiffies(MS); \
int ret__ = 0; \
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c b/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
index 87885d8c06e..6b43ae3ffd7 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
@@ -25,6 +25,7 @@
*/
#include <linux/freezer.h>
+#include <video/mipi_display.h>
#include "mdfld_dsi_output.h"
#include "mdfld_dsi_pkg_sender.h"
@@ -32,20 +33,6 @@
#define MDFLD_DSI_READ_MAX_COUNT 5000
-enum data_type {
- DSI_DT_GENERIC_SHORT_WRITE_0 = 0x03,
- DSI_DT_GENERIC_SHORT_WRITE_1 = 0x13,
- DSI_DT_GENERIC_SHORT_WRITE_2 = 0x23,
- DSI_DT_GENERIC_READ_0 = 0x04,
- DSI_DT_GENERIC_READ_1 = 0x14,
- DSI_DT_GENERIC_READ_2 = 0x24,
- DSI_DT_GENERIC_LONG_WRITE = 0x29,
- DSI_DT_DCS_SHORT_WRITE_0 = 0x05,
- DSI_DT_DCS_SHORT_WRITE_1 = 0x15,
- DSI_DT_DCS_READ = 0x06,
- DSI_DT_DCS_LONG_WRITE = 0x39,
-};
-
enum {
MDFLD_DSI_PANEL_MODE_SLEEP = 0x1,
};
@@ -321,9 +308,9 @@ static int send_pkg_prepare(struct mdfld_dsi_pkg_sender *sender, u8 data_type,
u8 cmd;
switch (data_type) {
- case DSI_DT_DCS_SHORT_WRITE_0:
- case DSI_DT_DCS_SHORT_WRITE_1:
- case DSI_DT_DCS_LONG_WRITE:
+ case MIPI_DSI_DCS_SHORT_WRITE:
+ case MIPI_DSI_DCS_SHORT_WRITE_PARAM:
+ case MIPI_DSI_DCS_LONG_WRITE:
cmd = *data;
break;
default:
@@ -334,12 +321,12 @@ static int send_pkg_prepare(struct mdfld_dsi_pkg_sender *sender, u8 data_type,
sender->status = MDFLD_DSI_PKG_SENDER_BUSY;
/*wait for 120 milliseconds in case exit_sleep_mode just be sent*/
- if (unlikely(cmd == DCS_ENTER_SLEEP_MODE)) {
+ if (unlikely(cmd == MIPI_DCS_ENTER_SLEEP_MODE)) {
/*TODO: replace it with msleep later*/
mdelay(120);
}
- if (unlikely(cmd == DCS_EXIT_SLEEP_MODE)) {
+ if (unlikely(cmd == MIPI_DCS_EXIT_SLEEP_MODE)) {
/*TODO: replace it with msleep later*/
mdelay(120);
}
@@ -352,9 +339,9 @@ static int send_pkg_done(struct mdfld_dsi_pkg_sender *sender, u8 data_type,
u8 cmd;
switch (data_type) {
- case DSI_DT_DCS_SHORT_WRITE_0:
- case DSI_DT_DCS_SHORT_WRITE_1:
- case DSI_DT_DCS_LONG_WRITE:
+ case MIPI_DSI_DCS_SHORT_WRITE:
+ case MIPI_DSI_DCS_SHORT_WRITE_PARAM:
+ case MIPI_DSI_DCS_LONG_WRITE:
cmd = *data;
break;
default:
@@ -362,15 +349,15 @@ static int send_pkg_done(struct mdfld_dsi_pkg_sender *sender, u8 data_type,
}
/*update panel status*/
- if (unlikely(cmd == DCS_ENTER_SLEEP_MODE)) {
+ if (unlikely(cmd == MIPI_DCS_ENTER_SLEEP_MODE)) {
sender->panel_mode |= MDFLD_DSI_PANEL_MODE_SLEEP;
/*TODO: replace it with msleep later*/
mdelay(120);
- } else if (unlikely(cmd == DCS_EXIT_SLEEP_MODE)) {
+ } else if (unlikely(cmd == MIPI_DCS_EXIT_SLEEP_MODE)) {
sender->panel_mode &= ~MDFLD_DSI_PANEL_MODE_SLEEP;
/*TODO: replace it with msleep later*/
mdelay(120);
- } else if (unlikely(cmd == DCS_SOFT_RESET)) {
+ } else if (unlikely(cmd == MIPI_DCS_SOFT_RESET)) {
/*TODO: replace it with msleep later*/
mdelay(5);
}
@@ -405,19 +392,19 @@ static int send_pkg(struct mdfld_dsi_pkg_sender *sender, u8 data_type,
}
switch (data_type) {
- case DSI_DT_GENERIC_SHORT_WRITE_0:
- case DSI_DT_GENERIC_SHORT_WRITE_1:
- case DSI_DT_GENERIC_SHORT_WRITE_2:
- case DSI_DT_GENERIC_READ_0:
- case DSI_DT_GENERIC_READ_1:
- case DSI_DT_GENERIC_READ_2:
- case DSI_DT_DCS_SHORT_WRITE_0:
- case DSI_DT_DCS_SHORT_WRITE_1:
- case DSI_DT_DCS_READ:
+ case MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM:
+ case MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM:
+ case MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM:
+ case MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM:
+ case MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM:
+ case MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM:
+ case MIPI_DSI_DCS_SHORT_WRITE:
+ case MIPI_DSI_DCS_SHORT_WRITE_PARAM:
+ case MIPI_DSI_DCS_READ:
ret = send_short_pkg(sender, data_type, data[0], data[1], hs);
break;
- case DSI_DT_GENERIC_LONG_WRITE:
- case DSI_DT_DCS_LONG_WRITE:
+ case MIPI_DSI_GENERIC_LONG_WRITE:
+ case MIPI_DSI_DCS_LONG_WRITE:
ret = send_long_pkg(sender, data_type, data, len, hs);
break;
}
@@ -440,7 +427,7 @@ int mdfld_dsi_send_mcs_long(struct mdfld_dsi_pkg_sender *sender, u8 *data,
}
spin_lock_irqsave(&sender->lock, flags);
- send_pkg(sender, DSI_DT_DCS_LONG_WRITE, data, len, hs);
+ send_pkg(sender, MIPI_DSI_DCS_LONG_WRITE, data, len, hs);
spin_unlock_irqrestore(&sender->lock, flags);
return 0;
@@ -461,10 +448,10 @@ int mdfld_dsi_send_mcs_short(struct mdfld_dsi_pkg_sender *sender, u8 cmd,
data[0] = cmd;
if (param_num) {
- data_type = DSI_DT_DCS_SHORT_WRITE_1;
+ data_type = MIPI_DSI_DCS_SHORT_WRITE_PARAM;
data[1] = param;
} else {
- data_type = DSI_DT_DCS_SHORT_WRITE_0;
+ data_type = MIPI_DSI_DCS_SHORT_WRITE;
data[1] = 0;
}
@@ -489,17 +476,17 @@ int mdfld_dsi_send_gen_short(struct mdfld_dsi_pkg_sender *sender, u8 param0,
switch (param_num) {
case 0:
- data_type = DSI_DT_GENERIC_SHORT_WRITE_0;
+ data_type = MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM;
data[0] = 0;
data[1] = 0;
break;
case 1:
- data_type = DSI_DT_GENERIC_SHORT_WRITE_1;
+ data_type = MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM;
data[0] = param0;
data[1] = 0;
break;
case 2:
- data_type = DSI_DT_GENERIC_SHORT_WRITE_2;
+ data_type = MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM;
data[0] = param0;
data[1] = param1;
break;
@@ -523,7 +510,7 @@ int mdfld_dsi_send_gen_long(struct mdfld_dsi_pkg_sender *sender, u8 *data,
}
spin_lock_irqsave(&sender->lock, flags);
- send_pkg(sender, DSI_DT_GENERIC_LONG_WRITE, data, len, hs);
+ send_pkg(sender, MIPI_DSI_GENERIC_LONG_WRITE, data, len, hs);
spin_unlock_irqrestore(&sender->lock, flags);
return 0;
@@ -594,7 +581,7 @@ int mdfld_dsi_read_mcs(struct mdfld_dsi_pkg_sender *sender, u8 cmd,
return -EINVAL;
}
- return __read_panel_data(sender, DSI_DT_DCS_READ, &cmd, 1,
+ return __read_panel_data(sender, MIPI_DSI_DCS_READ, &cmd, 1,
data, len, hs);
}
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.h b/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.h
index 459cd7ea8b8..0478a21c15d 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.h
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.h
@@ -62,18 +62,6 @@ struct mdfld_dsi_pkg_sender {
u32 mipi_cmd_len_reg;
};
-/* DCS definitions */
-#define DCS_SOFT_RESET 0x01
-#define DCS_ENTER_SLEEP_MODE 0x10
-#define DCS_EXIT_SLEEP_MODE 0x11
-#define DCS_SET_DISPLAY_OFF 0x28
-#define DCS_SET_DISPLAY_ON 0x29
-#define DCS_SET_COLUMN_ADDRESS 0x2a
-#define DCS_SET_PAGE_ADDRESS 0x2b
-#define DCS_WRITE_MEM_START 0x2c
-#define DCS_SET_TEAR_OFF 0x34
-#define DCS_SET_TEAR_ON 0x35
-
extern int mdfld_dsi_pkg_sender_init(struct mdfld_dsi_connector *dsi_connector,
int pipe);
extern void mdfld_dsi_pkg_sender_destroy(struct mdfld_dsi_pkg_sender *sender);
diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c
index 0d39da6e8b7..83bbc271bcf 100644
--- a/drivers/gpu/drm/gma500/oaktrail_lvds.c
+++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c
@@ -359,22 +359,26 @@ void oaktrail_lvds_init(struct drm_device *dev,
* if closed, act like it's not there for now
*/
+ edid = NULL;
mutex_lock(&dev->mode_config.mutex);
i2c_adap = i2c_get_adapter(dev_priv->ops->i2c_bus);
- if (i2c_adap == NULL)
- dev_err(dev->dev, "No ddc adapter available!\n");
+ if (i2c_adap)
+ edid = drm_get_edid(connector, i2c_adap);
+ if (edid == NULL && dev_priv->lpc_gpio_base) {
+ oaktrail_lvds_i2c_init(encoder);
+ if (gma_encoder->ddc_bus != NULL) {
+ i2c_adap = &gma_encoder->ddc_bus->adapter;
+ edid = drm_get_edid(connector, i2c_adap);
+ }
+ }
/*
* Attempt to get the fixed panel mode from DDC. Assume that the
* preferred mode is the right one.
*/
- if (i2c_adap) {
- edid = drm_get_edid(connector, i2c_adap);
- if (edid) {
- drm_mode_connector_update_edid_property(connector,
- edid);
- drm_add_edid_modes(connector, edid);
- kfree(edid);
- }
+ if (edid) {
+ drm_mode_connector_update_edid_property(connector, edid);
+ drm_add_edid_modes(connector, edid);
+ kfree(edid);
list_for_each_entry(scan, &connector->probed_modes, head) {
if (scan->type & DRM_MODE_TYPE_PREFERRED) {
@@ -383,7 +387,8 @@ void oaktrail_lvds_init(struct drm_device *dev,
goto out; /* FIXME: check for quirks */
}
}
- }
+ } else
+ dev_err(dev->dev, "No ddc adapter available!\n");
/*
* If we didn't get EDID, try geting panel timing
* from configuration data
@@ -411,8 +416,10 @@ failed_find:
mutex_unlock(&dev->mode_config.mutex);
dev_dbg(dev->dev, "No LVDS modes found, disabling.\n");
- if (gma_encoder->ddc_bus)
+ if (gma_encoder->ddc_bus) {
psb_intel_i2c_destroy(gma_encoder->ddc_bus);
+ gma_encoder->ddc_bus = NULL;
+ }
/* failed_ddc: */
diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds_i2c.c b/drivers/gpu/drm/gma500/oaktrail_lvds_i2c.c
new file mode 100644
index 00000000000..f913a62eee5
--- /dev/null
+++ b/drivers/gpu/drm/gma500/oaktrail_lvds_i2c.c
@@ -0,0 +1,170 @@
+/*
+ * Copyright (c) 2002-2010, Intel Corporation.
+ * Copyright (c) 2014 ATRON electronic GmbH
+ * Author: Jan Safrata <jan.nikitenko@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+
+#include <drm/drmP.h>
+#include "psb_drv.h"
+#include "psb_intel_reg.h"
+
+
+/*
+ * LPC GPIO based I2C bus for LVDS of Atom E6xx
+ */
+
+/*-----------------------------------------------------------------------------
+ * LPC Register Offsets. Used for LVDS GPIO Bit Bashing. Registers are part
+ * Atom E6xx [D31:F0]
+ ----------------------------------------------------------------------------*/
+#define RGEN 0x20
+#define RGIO 0x24
+#define RGLVL 0x28
+#define RGTPE 0x2C
+#define RGTNE 0x30
+#define RGGPE 0x34
+#define RGSMI 0x38
+#define RGTS 0x3C
+
+/* The LVDS GPIO clock lines are GPIOSUS[3]
+ * The LVDS GPIO data lines are GPIOSUS[4]
+ */
+#define GPIO_CLOCK 0x08
+#define GPIO_DATA 0x10
+
+#define LPC_READ_REG(chan, r) inl((chan)->reg + (r))
+#define LPC_WRITE_REG(chan, r, val) outl((val), (chan)->reg + (r))
+
+static int get_clock(void *data)
+{
+ struct psb_intel_i2c_chan *chan = data;
+ u32 val, tmp;
+
+ val = LPC_READ_REG(chan, RGIO);
+ val |= GPIO_CLOCK;
+ LPC_WRITE_REG(chan, RGIO, val);
+ tmp = LPC_READ_REG(chan, RGLVL);
+ val = (LPC_READ_REG(chan, RGLVL) & GPIO_CLOCK) ? 1 : 0;
+
+ return val;
+}
+
+static int get_data(void *data)
+{
+ struct psb_intel_i2c_chan *chan = data;
+ u32 val, tmp;
+
+ val = LPC_READ_REG(chan, RGIO);
+ val |= GPIO_DATA;
+ LPC_WRITE_REG(chan, RGIO, val);
+ tmp = LPC_READ_REG(chan, RGLVL);
+ val = (LPC_READ_REG(chan, RGLVL) & GPIO_DATA) ? 1 : 0;
+
+ return val;
+}
+
+static void set_clock(void *data, int state_high)
+{
+ struct psb_intel_i2c_chan *chan = data;
+ u32 val;
+
+ if (state_high) {
+ val = LPC_READ_REG(chan, RGIO);
+ val |= GPIO_CLOCK;
+ LPC_WRITE_REG(chan, RGIO, val);
+ } else {
+ val = LPC_READ_REG(chan, RGIO);
+ val &= ~GPIO_CLOCK;
+ LPC_WRITE_REG(chan, RGIO, val);
+ val = LPC_READ_REG(chan, RGLVL);
+ val &= ~GPIO_CLOCK;
+ LPC_WRITE_REG(chan, RGLVL, val);
+ }
+}
+
+static void set_data(void *data, int state_high)
+{
+ struct psb_intel_i2c_chan *chan = data;
+ u32 val;
+
+ if (state_high) {
+ val = LPC_READ_REG(chan, RGIO);
+ val |= GPIO_DATA;
+ LPC_WRITE_REG(chan, RGIO, val);
+ } else {
+ val = LPC_READ_REG(chan, RGIO);
+ val &= ~GPIO_DATA;
+ LPC_WRITE_REG(chan, RGIO, val);
+ val = LPC_READ_REG(chan, RGLVL);
+ val &= ~GPIO_DATA;
+ LPC_WRITE_REG(chan, RGLVL, val);
+ }
+}
+
+void oaktrail_lvds_i2c_init(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct gma_encoder *gma_encoder = to_gma_encoder(encoder);
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_intel_i2c_chan *chan;
+
+ chan = kzalloc(sizeof(struct psb_intel_i2c_chan), GFP_KERNEL);
+ if (!chan)
+ return;
+
+ chan->drm_dev = dev;
+ chan->reg = dev_priv->lpc_gpio_base;
+ strncpy(chan->adapter.name, "gma500 LPC", I2C_NAME_SIZE - 1);
+ chan->adapter.owner = THIS_MODULE;
+ chan->adapter.algo_data = &chan->algo;
+ chan->adapter.dev.parent = &dev->pdev->dev;
+ chan->algo.setsda = set_data;
+ chan->algo.setscl = set_clock;
+ chan->algo.getsda = get_data;
+ chan->algo.getscl = get_clock;
+ chan->algo.udelay = 100;
+ chan->algo.timeout = usecs_to_jiffies(2200);
+ chan->algo.data = chan;
+
+ i2c_set_adapdata(&chan->adapter, chan);
+
+ set_data(chan, 1);
+ set_clock(chan, 1);
+ udelay(50);
+
+ if (i2c_bit_add_bus(&chan->adapter)) {
+ kfree(chan);
+ return;
+ }
+
+ gma_encoder->ddc_bus = chan;
+}
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index 6ec3a905fdd..92e7e579539 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -212,6 +212,8 @@ static int psb_driver_unload(struct drm_device *dev)
}
if (dev_priv->aux_pdev)
pci_dev_put(dev_priv->aux_pdev);
+ if (dev_priv->lpc_pdev)
+ pci_dev_put(dev_priv->lpc_pdev);
/* Destroy VBT data */
psb_intel_destroy_bios(dev);
@@ -280,6 +282,24 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags)
DRM_DEBUG_KMS("Couldn't find aux pci device");
}
dev_priv->gmbus_reg = dev_priv->aux_reg;
+
+ dev_priv->lpc_pdev = pci_get_bus_and_slot(0, PCI_DEVFN(31, 0));
+ if (dev_priv->lpc_pdev) {
+ pci_read_config_word(dev_priv->lpc_pdev, PSB_LPC_GBA,
+ &dev_priv->lpc_gpio_base);
+ pci_write_config_dword(dev_priv->lpc_pdev, PSB_LPC_GBA,
+ (u32)dev_priv->lpc_gpio_base | (1L<<31));
+ pci_read_config_word(dev_priv->lpc_pdev, PSB_LPC_GBA,
+ &dev_priv->lpc_gpio_base);
+ dev_priv->lpc_gpio_base &= 0xffc0;
+ if (dev_priv->lpc_gpio_base)
+ DRM_DEBUG_KMS("Found LPC GPIO at 0x%04x\n",
+ dev_priv->lpc_gpio_base);
+ else {
+ pci_dev_put(dev_priv->lpc_pdev);
+ dev_priv->lpc_pdev = NULL;
+ }
+ }
} else {
dev_priv->gmbus_reg = dev_priv->vdc_reg;
}
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index 55ebe2bd88d..e38057b9186 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -83,6 +83,7 @@ enum {
#define PSB_PGETBL_CTL 0x2020
#define _PSB_PGETBL_ENABLED 0x00000001
#define PSB_SGX_2D_SLAVE_PORT 0x4000
+#define PSB_LPC_GBA 0x44
/* TODO: To get rid of */
#define PSB_TT_PRIV0_LIMIT (256*1024*1024)
@@ -441,6 +442,7 @@ struct psb_ops;
struct drm_psb_private {
struct drm_device *dev;
struct pci_dev *aux_pdev; /* Currently only used by mrst */
+ struct pci_dev *lpc_pdev; /* Currently only used by mrst */
const struct psb_ops *ops;
const struct psb_offset *regmap;
@@ -470,6 +472,7 @@ struct drm_psb_private {
uint8_t __iomem *sgx_reg;
uint8_t __iomem *vdc_reg;
uint8_t __iomem *aux_reg; /* Auxillary vdc pipe regs */
+ uint16_t lpc_gpio_base;
uint32_t gatt_free_offset;
/* Fencing / irq */
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
index 87b50ba64ed..b21a09451d1 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.c
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -21,6 +21,7 @@
#include <linux/i2c.h>
#include <drm/drmP.h>
+#include <drm/drm_plane_helper.h>
#include "framebuffer.h"
#include "psb_drv.h"
#include "psb_intel_drv.h"
diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h
index 336bd3aa1a0..860dd2177ca 100644
--- a/drivers/gpu/drm/gma500/psb_intel_drv.h
+++ b/drivers/gpu/drm/gma500/psb_intel_drv.h
@@ -223,6 +223,7 @@ extern void oaktrail_lvds_init(struct drm_device *dev,
extern void oaktrail_wait_for_INTR_PKT_SENT(struct drm_device *dev);
extern void oaktrail_dsi_init(struct drm_device *dev,
struct psb_intel_mode_device *mode_dev);
+extern void oaktrail_lvds_i2c_init(struct drm_encoder *encoder);
extern void mid_dsi_init(struct drm_device *dev,
struct psb_intel_mode_device *mode_dev, int dsi_num);
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index 0be96fdb5e2..58529cea575 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -1631,57 +1631,8 @@ static int psb_intel_sdvo_get_modes(struct drm_connector *connector)
return !list_empty(&connector->probed_modes);
}
-static void
-psb_intel_sdvo_destroy_enhance_property(struct drm_connector *connector)
-{
- struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector);
- struct drm_device *dev = connector->dev;
-
- if (psb_intel_sdvo_connector->left)
- drm_property_destroy(dev, psb_intel_sdvo_connector->left);
- if (psb_intel_sdvo_connector->right)
- drm_property_destroy(dev, psb_intel_sdvo_connector->right);
- if (psb_intel_sdvo_connector->top)
- drm_property_destroy(dev, psb_intel_sdvo_connector->top);
- if (psb_intel_sdvo_connector->bottom)
- drm_property_destroy(dev, psb_intel_sdvo_connector->bottom);
- if (psb_intel_sdvo_connector->hpos)
- drm_property_destroy(dev, psb_intel_sdvo_connector->hpos);
- if (psb_intel_sdvo_connector->vpos)
- drm_property_destroy(dev, psb_intel_sdvo_connector->vpos);
- if (psb_intel_sdvo_connector->saturation)
- drm_property_destroy(dev, psb_intel_sdvo_connector->saturation);
- if (psb_intel_sdvo_connector->contrast)
- drm_property_destroy(dev, psb_intel_sdvo_connector->contrast);
- if (psb_intel_sdvo_connector->hue)
- drm_property_destroy(dev, psb_intel_sdvo_connector->hue);
- if (psb_intel_sdvo_connector->sharpness)
- drm_property_destroy(dev, psb_intel_sdvo_connector->sharpness);
- if (psb_intel_sdvo_connector->flicker_filter)
- drm_property_destroy(dev, psb_intel_sdvo_connector->flicker_filter);
- if (psb_intel_sdvo_connector->flicker_filter_2d)
- drm_property_destroy(dev, psb_intel_sdvo_connector->flicker_filter_2d);
- if (psb_intel_sdvo_connector->flicker_filter_adaptive)
- drm_property_destroy(dev, psb_intel_sdvo_connector->flicker_filter_adaptive);
- if (psb_intel_sdvo_connector->tv_luma_filter)
- drm_property_destroy(dev, psb_intel_sdvo_connector->tv_luma_filter);
- if (psb_intel_sdvo_connector->tv_chroma_filter)
- drm_property_destroy(dev, psb_intel_sdvo_connector->tv_chroma_filter);
- if (psb_intel_sdvo_connector->dot_crawl)
- drm_property_destroy(dev, psb_intel_sdvo_connector->dot_crawl);
- if (psb_intel_sdvo_connector->brightness)
- drm_property_destroy(dev, psb_intel_sdvo_connector->brightness);
-}
-
static void psb_intel_sdvo_destroy(struct drm_connector *connector)
{
- struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector);
-
- if (psb_intel_sdvo_connector->tv_format)
- drm_property_destroy(connector->dev,
- psb_intel_sdvo_connector->tv_format);
-
- psb_intel_sdvo_destroy_enhance_property(connector);
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
kfree(connector);
diff --git a/drivers/gpu/drm/i2c/Kconfig b/drivers/gpu/drm/i2c/Kconfig
index 4d341db462a..22c7ed63a00 100644
--- a/drivers/gpu/drm/i2c/Kconfig
+++ b/drivers/gpu/drm/i2c/Kconfig
@@ -1,6 +1,12 @@
menu "I2C encoder or helper chips"
depends on DRM && DRM_KMS_HELPER && I2C
+config DRM_I2C_ADV7511
+ tristate "AV7511 encoder"
+ select REGMAP_I2C
+ help
+ Support for the Analog Device ADV7511(W) and ADV7513 HDMI encoders.
+
config DRM_I2C_CH7006
tristate "Chrontel ch7006 TV encoder"
default m if DRM_NOUVEAU
diff --git a/drivers/gpu/drm/i2c/Makefile b/drivers/gpu/drm/i2c/Makefile
index 43aa33baebe..2c72eb584ab 100644
--- a/drivers/gpu/drm/i2c/Makefile
+++ b/drivers/gpu/drm/i2c/Makefile
@@ -1,5 +1,7 @@
ccflags-y := -Iinclude/drm
+obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511.o
+
ch7006-y := ch7006_drv.o ch7006_mode.o
obj-$(CONFIG_DRM_I2C_CH7006) += ch7006.o
diff --git a/drivers/gpu/drm/i2c/adv7511.c b/drivers/gpu/drm/i2c/adv7511.c
new file mode 100644
index 00000000000..faf1c0c5ab2
--- /dev/null
+++ b/drivers/gpu/drm/i2c/adv7511.c
@@ -0,0 +1,1010 @@
+/*
+ * Analog Devices ADV7511 HDMI transmitter driver
+ *
+ * Copyright 2012 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_encoder_slave.h>
+
+#include "adv7511.h"
+
+struct adv7511 {
+ struct i2c_client *i2c_main;
+ struct i2c_client *i2c_edid;
+
+ struct regmap *regmap;
+ struct regmap *packet_memory_regmap;
+ enum drm_connector_status status;
+ int dpms_mode;
+
+ unsigned int f_tmds;
+
+ unsigned int current_edid_segment;
+ uint8_t edid_buf[256];
+
+ wait_queue_head_t wq;
+ struct drm_encoder *encoder;
+
+ bool embedded_sync;
+ enum adv7511_sync_polarity vsync_polarity;
+ enum adv7511_sync_polarity hsync_polarity;
+ bool rgb;
+
+ struct edid *edid;
+
+ struct gpio_desc *gpio_pd;
+};
+
+static struct adv7511 *encoder_to_adv7511(struct drm_encoder *encoder)
+{
+ return to_encoder_slave(encoder)->slave_priv;
+}
+
+/* ADI recommended values for proper operation. */
+static const struct reg_default adv7511_fixed_registers[] = {
+ { 0x98, 0x03 },
+ { 0x9a, 0xe0 },
+ { 0x9c, 0x30 },
+ { 0x9d, 0x61 },
+ { 0xa2, 0xa4 },
+ { 0xa3, 0xa4 },
+ { 0xe0, 0xd0 },
+ { 0xf9, 0x00 },
+ { 0x55, 0x02 },
+};
+
+/* -----------------------------------------------------------------------------
+ * Register access
+ */
+
+static const uint8_t adv7511_register_defaults[] = {
+ 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 00 */
+ 0x00, 0x00, 0x01, 0x0e, 0xbc, 0x18, 0x01, 0x13,
+ 0x25, 0x37, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 10 */
+ 0x46, 0x62, 0x04, 0xa8, 0x00, 0x00, 0x1c, 0x84,
+ 0x1c, 0xbf, 0x04, 0xa8, 0x1e, 0x70, 0x02, 0x1e, /* 20 */
+ 0x00, 0x00, 0x04, 0xa8, 0x08, 0x12, 0x1b, 0xac,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 30 */
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0xb0,
+ 0x00, 0x50, 0x90, 0x7e, 0x79, 0x70, 0x00, 0x00, /* 40 */
+ 0x00, 0xa8, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x02, 0x0d, 0x00, 0x00, 0x00, 0x00, /* 50 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 60 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x01, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 70 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 80 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00, /* 90 */
+ 0x0b, 0x02, 0x00, 0x18, 0x5a, 0x60, 0x00, 0x00,
+ 0x00, 0x00, 0x80, 0x80, 0x08, 0x04, 0x00, 0x00, /* a0 */
+ 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x40, 0x14,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* b0 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* c0 */
+ 0x00, 0x03, 0x00, 0x00, 0x02, 0x00, 0x01, 0x04,
+ 0x30, 0xff, 0x80, 0x80, 0x80, 0x00, 0x00, 0x00, /* d0 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x01,
+ 0x80, 0x75, 0x00, 0x00, 0x60, 0x00, 0x00, 0x00, /* e0 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x75, 0x11, 0x00, /* f0 */
+ 0x00, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+static bool adv7511_register_volatile(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case ADV7511_REG_CHIP_REVISION:
+ case ADV7511_REG_SPDIF_FREQ:
+ case ADV7511_REG_CTS_AUTOMATIC1:
+ case ADV7511_REG_CTS_AUTOMATIC2:
+ case ADV7511_REG_VIC_DETECTED:
+ case ADV7511_REG_VIC_SEND:
+ case ADV7511_REG_AUX_VIC_DETECTED:
+ case ADV7511_REG_STATUS:
+ case ADV7511_REG_GC(1):
+ case ADV7511_REG_INT(0):
+ case ADV7511_REG_INT(1):
+ case ADV7511_REG_PLL_STATUS:
+ case ADV7511_REG_AN(0):
+ case ADV7511_REG_AN(1):
+ case ADV7511_REG_AN(2):
+ case ADV7511_REG_AN(3):
+ case ADV7511_REG_AN(4):
+ case ADV7511_REG_AN(5):
+ case ADV7511_REG_AN(6):
+ case ADV7511_REG_AN(7):
+ case ADV7511_REG_HDCP_STATUS:
+ case ADV7511_REG_BCAPS:
+ case ADV7511_REG_BKSV(0):
+ case ADV7511_REG_BKSV(1):
+ case ADV7511_REG_BKSV(2):
+ case ADV7511_REG_BKSV(3):
+ case ADV7511_REG_BKSV(4):
+ case ADV7511_REG_DDC_STATUS:
+ case ADV7511_REG_BSTATUS(0):
+ case ADV7511_REG_BSTATUS(1):
+ case ADV7511_REG_CHIP_ID_HIGH:
+ case ADV7511_REG_CHIP_ID_LOW:
+ return true;
+ }
+
+ return false;
+}
+
+static const struct regmap_config adv7511_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = 0xff,
+ .cache_type = REGCACHE_RBTREE,
+ .reg_defaults_raw = adv7511_register_defaults,
+ .num_reg_defaults_raw = ARRAY_SIZE(adv7511_register_defaults),
+
+ .volatile_reg = adv7511_register_volatile,
+};
+
+/* -----------------------------------------------------------------------------
+ * Hardware configuration
+ */
+
+static void adv7511_set_colormap(struct adv7511 *adv7511, bool enable,
+ const uint16_t *coeff,
+ unsigned int scaling_factor)
+{
+ unsigned int i;
+
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_CSC_UPPER(1),
+ ADV7511_CSC_UPDATE_MODE, ADV7511_CSC_UPDATE_MODE);
+
+ if (enable) {
+ for (i = 0; i < 12; ++i) {
+ regmap_update_bits(adv7511->regmap,
+ ADV7511_REG_CSC_UPPER(i),
+ 0x1f, coeff[i] >> 8);
+ regmap_write(adv7511->regmap,
+ ADV7511_REG_CSC_LOWER(i),
+ coeff[i] & 0xff);
+ }
+ }
+
+ if (enable)
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_CSC_UPPER(0),
+ 0xe0, 0x80 | (scaling_factor << 5));
+ else
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_CSC_UPPER(0),
+ 0x80, 0x00);
+
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_CSC_UPPER(1),
+ ADV7511_CSC_UPDATE_MODE, 0);
+}
+
+static int adv7511_packet_enable(struct adv7511 *adv7511, unsigned int packet)
+{
+ if (packet & 0xff)
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_PACKET_ENABLE0,
+ packet, 0xff);
+
+ if (packet & 0xff00) {
+ packet >>= 8;
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_PACKET_ENABLE1,
+ packet, 0xff);
+ }
+
+ return 0;
+}
+
+static int adv7511_packet_disable(struct adv7511 *adv7511, unsigned int packet)
+{
+ if (packet & 0xff)
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_PACKET_ENABLE0,
+ packet, 0x00);
+
+ if (packet & 0xff00) {
+ packet >>= 8;
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_PACKET_ENABLE1,
+ packet, 0x00);
+ }
+
+ return 0;
+}
+
+/* Coefficients for adv7511 color space conversion */
+static const uint16_t adv7511_csc_ycbcr_to_rgb[] = {
+ 0x0734, 0x04ad, 0x0000, 0x1c1b,
+ 0x1ddc, 0x04ad, 0x1f24, 0x0135,
+ 0x0000, 0x04ad, 0x087c, 0x1b77,
+};
+
+static void adv7511_set_config_csc(struct adv7511 *adv7511,
+ struct drm_connector *connector,
+ bool rgb)
+{
+ struct adv7511_video_config config;
+ bool output_format_422, output_format_ycbcr;
+ unsigned int mode;
+ uint8_t infoframe[17];
+
+ if (adv7511->edid)
+ config.hdmi_mode = drm_detect_hdmi_monitor(adv7511->edid);
+ else
+ config.hdmi_mode = false;
+
+ hdmi_avi_infoframe_init(&config.avi_infoframe);
+
+ config.avi_infoframe.scan_mode = HDMI_SCAN_MODE_UNDERSCAN;
+
+ if (rgb) {
+ config.csc_enable = false;
+ config.avi_infoframe.colorspace = HDMI_COLORSPACE_RGB;
+ } else {
+ config.csc_scaling_factor = ADV7511_CSC_SCALING_4;
+ config.csc_coefficents = adv7511_csc_ycbcr_to_rgb;
+
+ if ((connector->display_info.color_formats &
+ DRM_COLOR_FORMAT_YCRCB422) &&
+ config.hdmi_mode) {
+ config.csc_enable = false;
+ config.avi_infoframe.colorspace =
+ HDMI_COLORSPACE_YUV422;
+ } else {
+ config.csc_enable = true;
+ config.avi_infoframe.colorspace = HDMI_COLORSPACE_RGB;
+ }
+ }
+
+ if (config.hdmi_mode) {
+ mode = ADV7511_HDMI_CFG_MODE_HDMI;
+
+ switch (config.avi_infoframe.colorspace) {
+ case HDMI_COLORSPACE_YUV444:
+ output_format_422 = false;
+ output_format_ycbcr = true;
+ break;
+ case HDMI_COLORSPACE_YUV422:
+ output_format_422 = true;
+ output_format_ycbcr = true;
+ break;
+ default:
+ output_format_422 = false;
+ output_format_ycbcr = false;
+ break;
+ }
+ } else {
+ mode = ADV7511_HDMI_CFG_MODE_DVI;
+ output_format_422 = false;
+ output_format_ycbcr = false;
+ }
+
+ adv7511_packet_disable(adv7511, ADV7511_PACKET_ENABLE_AVI_INFOFRAME);
+
+ adv7511_set_colormap(adv7511, config.csc_enable,
+ config.csc_coefficents,
+ config.csc_scaling_factor);
+
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_VIDEO_INPUT_CFG1, 0x81,
+ (output_format_422 << 7) | output_format_ycbcr);
+
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_HDCP_HDMI_CFG,
+ ADV7511_HDMI_CFG_MODE_MASK, mode);
+
+ hdmi_avi_infoframe_pack(&config.avi_infoframe, infoframe,
+ sizeof(infoframe));
+
+ /* The AVI infoframe id is not configurable */
+ regmap_bulk_write(adv7511->regmap, ADV7511_REG_AVI_INFOFRAME_VERSION,
+ infoframe + 1, sizeof(infoframe) - 1);
+
+ adv7511_packet_enable(adv7511, ADV7511_PACKET_ENABLE_AVI_INFOFRAME);
+}
+
+static void adv7511_set_link_config(struct adv7511 *adv7511,
+ const struct adv7511_link_config *config)
+{
+ /*
+ * The input style values documented in the datasheet don't match the
+ * hardware register field values :-(
+ */
+ static const unsigned int input_styles[4] = { 0, 2, 1, 3 };
+
+ unsigned int clock_delay;
+ unsigned int color_depth;
+ unsigned int input_id;
+
+ clock_delay = (config->clock_delay + 1200) / 400;
+ color_depth = config->input_color_depth == 8 ? 3
+ : (config->input_color_depth == 10 ? 1 : 2);
+
+ /* TODO Support input ID 6 */
+ if (config->input_colorspace != HDMI_COLORSPACE_YUV422)
+ input_id = config->input_clock == ADV7511_INPUT_CLOCK_DDR
+ ? 5 : 0;
+ else if (config->input_clock == ADV7511_INPUT_CLOCK_DDR)
+ input_id = config->embedded_sync ? 8 : 7;
+ else if (config->input_clock == ADV7511_INPUT_CLOCK_2X)
+ input_id = config->embedded_sync ? 4 : 3;
+ else
+ input_id = config->embedded_sync ? 2 : 1;
+
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_I2C_FREQ_ID_CFG, 0xf,
+ input_id);
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_VIDEO_INPUT_CFG1, 0x7e,
+ (color_depth << 4) |
+ (input_styles[config->input_style] << 2));
+ regmap_write(adv7511->regmap, ADV7511_REG_VIDEO_INPUT_CFG2,
+ config->input_justification << 3);
+ regmap_write(adv7511->regmap, ADV7511_REG_TIMING_GEN_SEQ,
+ config->sync_pulse << 2);
+
+ regmap_write(adv7511->regmap, 0xba, clock_delay << 5);
+
+ adv7511->embedded_sync = config->embedded_sync;
+ adv7511->hsync_polarity = config->hsync_polarity;
+ adv7511->vsync_polarity = config->vsync_polarity;
+ adv7511->rgb = config->input_colorspace == HDMI_COLORSPACE_RGB;
+}
+
+/* -----------------------------------------------------------------------------
+ * Interrupt and hotplug detection
+ */
+
+static bool adv7511_hpd(struct adv7511 *adv7511)
+{
+ unsigned int irq0;
+ int ret;
+
+ ret = regmap_read(adv7511->regmap, ADV7511_REG_INT(0), &irq0);
+ if (ret < 0)
+ return false;
+
+ if (irq0 & ADV7511_INT0_HDP) {
+ regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
+ ADV7511_INT0_HDP);
+ return true;
+ }
+
+ return false;
+}
+
+static irqreturn_t adv7511_irq_handler(int irq, void *devid)
+{
+ struct adv7511 *adv7511 = devid;
+
+ if (adv7511_hpd(adv7511))
+ drm_helper_hpd_irq_event(adv7511->encoder->dev);
+
+ wake_up_all(&adv7511->wq);
+
+ return IRQ_HANDLED;
+}
+
+static unsigned int adv7511_is_interrupt_pending(struct adv7511 *adv7511,
+ unsigned int irq)
+{
+ unsigned int irq0, irq1;
+ unsigned int pending;
+ int ret;
+
+ ret = regmap_read(adv7511->regmap, ADV7511_REG_INT(0), &irq0);
+ if (ret < 0)
+ return 0;
+ ret = regmap_read(adv7511->regmap, ADV7511_REG_INT(1), &irq1);
+ if (ret < 0)
+ return 0;
+
+ pending = (irq1 << 8) | irq0;
+
+ return pending & irq;
+}
+
+static int adv7511_wait_for_interrupt(struct adv7511 *adv7511, int irq,
+ int timeout)
+{
+ unsigned int pending;
+ int ret;
+
+ if (adv7511->i2c_main->irq) {
+ ret = wait_event_interruptible_timeout(adv7511->wq,
+ adv7511_is_interrupt_pending(adv7511, irq),
+ msecs_to_jiffies(timeout));
+ if (ret <= 0)
+ return 0;
+ pending = adv7511_is_interrupt_pending(adv7511, irq);
+ } else {
+ if (timeout < 25)
+ timeout = 25;
+ do {
+ pending = adv7511_is_interrupt_pending(adv7511, irq);
+ if (pending)
+ break;
+ msleep(25);
+ timeout -= 25;
+ } while (timeout >= 25);
+ }
+
+ return pending;
+}
+
+/* -----------------------------------------------------------------------------
+ * EDID retrieval
+ */
+
+static int adv7511_get_edid_block(void *data, u8 *buf, unsigned int block,
+ size_t len)
+{
+ struct adv7511 *adv7511 = data;
+ struct i2c_msg xfer[2];
+ uint8_t offset;
+ unsigned int i;
+ int ret;
+
+ if (len > 128)
+ return -EINVAL;
+
+ if (adv7511->current_edid_segment != block / 2) {
+ unsigned int status;
+
+ ret = regmap_read(adv7511->regmap, ADV7511_REG_DDC_STATUS,
+ &status);
+ if (ret < 0)
+ return ret;
+
+ if (status != 2) {
+ regmap_write(adv7511->regmap, ADV7511_REG_EDID_SEGMENT,
+ block);
+ ret = adv7511_wait_for_interrupt(adv7511,
+ ADV7511_INT0_EDID_READY |
+ ADV7511_INT1_DDC_ERROR, 200);
+
+ if (!(ret & ADV7511_INT0_EDID_READY))
+ return -EIO;
+ }
+
+ regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
+ ADV7511_INT0_EDID_READY | ADV7511_INT1_DDC_ERROR);
+
+ /* Break this apart, hopefully more I2C controllers will
+ * support 64 byte transfers than 256 byte transfers
+ */
+
+ xfer[0].addr = adv7511->i2c_edid->addr;
+ xfer[0].flags = 0;
+ xfer[0].len = 1;
+ xfer[0].buf = &offset;
+ xfer[1].addr = adv7511->i2c_edid->addr;
+ xfer[1].flags = I2C_M_RD;
+ xfer[1].len = 64;
+ xfer[1].buf = adv7511->edid_buf;
+
+ offset = 0;
+
+ for (i = 0; i < 4; ++i) {
+ ret = i2c_transfer(adv7511->i2c_edid->adapter, xfer,
+ ARRAY_SIZE(xfer));
+ if (ret < 0)
+ return ret;
+ else if (ret != 2)
+ return -EIO;
+
+ xfer[1].buf += 64;
+ offset += 64;
+ }
+
+ adv7511->current_edid_segment = block / 2;
+ }
+
+ if (block % 2 == 0)
+ memcpy(buf, adv7511->edid_buf, len);
+ else
+ memcpy(buf, adv7511->edid_buf + 128, len);
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * Encoder operations
+ */
+
+static int adv7511_get_modes(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ struct adv7511 *adv7511 = encoder_to_adv7511(encoder);
+ struct edid *edid;
+ unsigned int count;
+
+ /* Reading the EDID only works if the device is powered */
+ if (adv7511->dpms_mode != DRM_MODE_DPMS_ON) {
+ regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
+ ADV7511_INT0_EDID_READY | ADV7511_INT1_DDC_ERROR);
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
+ ADV7511_POWER_POWER_DOWN, 0);
+ adv7511->current_edid_segment = -1;
+ }
+
+ edid = drm_do_get_edid(connector, adv7511_get_edid_block, adv7511);
+
+ if (adv7511->dpms_mode != DRM_MODE_DPMS_ON)
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
+ ADV7511_POWER_POWER_DOWN,
+ ADV7511_POWER_POWER_DOWN);
+
+ kfree(adv7511->edid);
+ adv7511->edid = edid;
+ if (!edid)
+ return 0;
+
+ drm_mode_connector_update_edid_property(connector, edid);
+ count = drm_add_edid_modes(connector, edid);
+
+ adv7511_set_config_csc(adv7511, connector, adv7511->rgb);
+
+ return count;
+}
+
+static void adv7511_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct adv7511 *adv7511 = encoder_to_adv7511(encoder);
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ adv7511->current_edid_segment = -1;
+
+ regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
+ ADV7511_INT0_EDID_READY | ADV7511_INT1_DDC_ERROR);
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
+ ADV7511_POWER_POWER_DOWN, 0);
+ /*
+ * Per spec it is allowed to pulse the HDP signal to indicate
+ * that the EDID information has changed. Some monitors do this
+ * when they wakeup from standby or are enabled. When the HDP
+ * goes low the adv7511 is reset and the outputs are disabled
+ * which might cause the monitor to go to standby again. To
+ * avoid this we ignore the HDP pin for the first few seconds
+ * after enabeling the output.
+ */
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2,
+ ADV7511_REG_POWER2_HDP_SRC_MASK,
+ ADV7511_REG_POWER2_HDP_SRC_NONE);
+ /* Most of the registers are reset during power down or
+ * when HPD is low
+ */
+ regcache_sync(adv7511->regmap);
+ break;
+ default:
+ /* TODO: setup additional power down modes */
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
+ ADV7511_POWER_POWER_DOWN,
+ ADV7511_POWER_POWER_DOWN);
+ regcache_mark_dirty(adv7511->regmap);
+ break;
+ }
+
+ adv7511->dpms_mode = mode;
+}
+
+static enum drm_connector_status
+adv7511_encoder_detect(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ struct adv7511 *adv7511 = encoder_to_adv7511(encoder);
+ enum drm_connector_status status;
+ unsigned int val;
+ bool hpd;
+ int ret;
+
+ ret = regmap_read(adv7511->regmap, ADV7511_REG_STATUS, &val);
+ if (ret < 0)
+ return connector_status_disconnected;
+
+ if (val & ADV7511_STATUS_HPD)
+ status = connector_status_connected;
+ else
+ status = connector_status_disconnected;
+
+ hpd = adv7511_hpd(adv7511);
+
+ /* The chip resets itself when the cable is disconnected, so in case
+ * there is a pending HPD interrupt and the cable is connected there was
+ * at least one transition from disconnected to connected and the chip
+ * has to be reinitialized. */
+ if (status == connector_status_connected && hpd &&
+ adv7511->dpms_mode == DRM_MODE_DPMS_ON) {
+ regcache_mark_dirty(adv7511->regmap);
+ adv7511_encoder_dpms(encoder, adv7511->dpms_mode);
+ adv7511_get_modes(encoder, connector);
+ if (adv7511->status == connector_status_connected)
+ status = connector_status_disconnected;
+ } else {
+ /* Renable HDP sensing */
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2,
+ ADV7511_REG_POWER2_HDP_SRC_MASK,
+ ADV7511_REG_POWER2_HDP_SRC_BOTH);
+ }
+
+ adv7511->status = status;
+ return status;
+}
+
+static int adv7511_encoder_mode_valid(struct drm_encoder *encoder,
+ struct drm_display_mode *mode)
+{
+ if (mode->clock > 165000)
+ return MODE_CLOCK_HIGH;
+
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ return MODE_NO_INTERLACE;
+
+ return MODE_OK;
+}
+
+static void adv7511_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+ struct adv7511 *adv7511 = encoder_to_adv7511(encoder);
+ unsigned int low_refresh_rate;
+ unsigned int hsync_polarity = 0;
+ unsigned int vsync_polarity = 0;
+
+ if (adv7511->embedded_sync) {
+ unsigned int hsync_offset, hsync_len;
+ unsigned int vsync_offset, vsync_len;
+
+ hsync_offset = adj_mode->crtc_hsync_start -
+ adj_mode->crtc_hdisplay;
+ vsync_offset = adj_mode->crtc_vsync_start -
+ adj_mode->crtc_vdisplay;
+ hsync_len = adj_mode->crtc_hsync_end -
+ adj_mode->crtc_hsync_start;
+ vsync_len = adj_mode->crtc_vsync_end -
+ adj_mode->crtc_vsync_start;
+
+ /* The hardware vsync generator has a off-by-one bug */
+ vsync_offset += 1;
+
+ regmap_write(adv7511->regmap, ADV7511_REG_HSYNC_PLACEMENT_MSB,
+ ((hsync_offset >> 10) & 0x7) << 5);
+ regmap_write(adv7511->regmap, ADV7511_REG_SYNC_DECODER(0),
+ (hsync_offset >> 2) & 0xff);
+ regmap_write(adv7511->regmap, ADV7511_REG_SYNC_DECODER(1),
+ ((hsync_offset & 0x3) << 6) |
+ ((hsync_len >> 4) & 0x3f));
+ regmap_write(adv7511->regmap, ADV7511_REG_SYNC_DECODER(2),
+ ((hsync_len & 0xf) << 4) |
+ ((vsync_offset >> 6) & 0xf));
+ regmap_write(adv7511->regmap, ADV7511_REG_SYNC_DECODER(3),
+ ((vsync_offset & 0x3f) << 2) |
+ ((vsync_len >> 8) & 0x3));
+ regmap_write(adv7511->regmap, ADV7511_REG_SYNC_DECODER(4),
+ vsync_len & 0xff);
+
+ hsync_polarity = !(adj_mode->flags & DRM_MODE_FLAG_PHSYNC);
+ vsync_polarity = !(adj_mode->flags & DRM_MODE_FLAG_PVSYNC);
+ } else {
+ enum adv7511_sync_polarity mode_hsync_polarity;
+ enum adv7511_sync_polarity mode_vsync_polarity;
+
+ /**
+ * If the input signal is always low or always high we want to
+ * invert or let it passthrough depending on the polarity of the
+ * current mode.
+ **/
+ if (adj_mode->flags & DRM_MODE_FLAG_NHSYNC)
+ mode_hsync_polarity = ADV7511_SYNC_POLARITY_LOW;
+ else
+ mode_hsync_polarity = ADV7511_SYNC_POLARITY_HIGH;
+
+ if (adj_mode->flags & DRM_MODE_FLAG_NVSYNC)
+ mode_vsync_polarity = ADV7511_SYNC_POLARITY_LOW;
+ else
+ mode_vsync_polarity = ADV7511_SYNC_POLARITY_HIGH;
+
+ if (adv7511->hsync_polarity != mode_hsync_polarity &&
+ adv7511->hsync_polarity !=
+ ADV7511_SYNC_POLARITY_PASSTHROUGH)
+ hsync_polarity = 1;
+
+ if (adv7511->vsync_polarity != mode_vsync_polarity &&
+ adv7511->vsync_polarity !=
+ ADV7511_SYNC_POLARITY_PASSTHROUGH)
+ vsync_polarity = 1;
+ }
+
+ if (mode->vrefresh <= 24000)
+ low_refresh_rate = ADV7511_LOW_REFRESH_RATE_24HZ;
+ else if (mode->vrefresh <= 25000)
+ low_refresh_rate = ADV7511_LOW_REFRESH_RATE_25HZ;
+ else if (mode->vrefresh <= 30000)
+ low_refresh_rate = ADV7511_LOW_REFRESH_RATE_30HZ;
+ else
+ low_refresh_rate = ADV7511_LOW_REFRESH_RATE_NONE;
+
+ regmap_update_bits(adv7511->regmap, 0xfb,
+ 0x6, low_refresh_rate << 1);
+ regmap_update_bits(adv7511->regmap, 0x17,
+ 0x60, (vsync_polarity << 6) | (hsync_polarity << 5));
+
+ /*
+ * TODO Test first order 4:2:2 to 4:4:4 up conversion method, which is
+ * supposed to give better results.
+ */
+
+ adv7511->f_tmds = mode->clock;
+}
+
+static struct drm_encoder_slave_funcs adv7511_encoder_funcs = {
+ .dpms = adv7511_encoder_dpms,
+ .mode_valid = adv7511_encoder_mode_valid,
+ .mode_set = adv7511_encoder_mode_set,
+ .detect = adv7511_encoder_detect,
+ .get_modes = adv7511_get_modes,
+};
+
+/* -----------------------------------------------------------------------------
+ * Probe & remove
+ */
+
+static int adv7511_parse_dt(struct device_node *np,
+ struct adv7511_link_config *config)
+{
+ const char *str;
+ int ret;
+
+ memset(config, 0, sizeof(*config));
+
+ of_property_read_u32(np, "adi,input-depth", &config->input_color_depth);
+ if (config->input_color_depth != 8 && config->input_color_depth != 10 &&
+ config->input_color_depth != 12)
+ return -EINVAL;
+
+ ret = of_property_read_string(np, "adi,input-colorspace", &str);
+ if (ret < 0)
+ return ret;
+
+ if (!strcmp(str, "rgb"))
+ config->input_colorspace = HDMI_COLORSPACE_RGB;
+ else if (!strcmp(str, "yuv422"))
+ config->input_colorspace = HDMI_COLORSPACE_YUV422;
+ else if (!strcmp(str, "yuv444"))
+ config->input_colorspace = HDMI_COLORSPACE_YUV444;
+ else
+ return -EINVAL;
+
+ ret = of_property_read_string(np, "adi,input-clock", &str);
+ if (ret < 0)
+ return ret;
+
+ if (!strcmp(str, "1x"))
+ config->input_clock = ADV7511_INPUT_CLOCK_1X;
+ else if (!strcmp(str, "2x"))
+ config->input_clock = ADV7511_INPUT_CLOCK_2X;
+ else if (!strcmp(str, "ddr"))
+ config->input_clock = ADV7511_INPUT_CLOCK_DDR;
+ else
+ return -EINVAL;
+
+ if (config->input_colorspace == HDMI_COLORSPACE_YUV422 ||
+ config->input_clock != ADV7511_INPUT_CLOCK_1X) {
+ ret = of_property_read_u32(np, "adi,input-style",
+ &config->input_style);
+ if (ret)
+ return ret;
+
+ if (config->input_style < 1 || config->input_style > 3)
+ return -EINVAL;
+
+ ret = of_property_read_string(np, "adi,input-justification",
+ &str);
+ if (ret < 0)
+ return ret;
+
+ if (!strcmp(str, "left"))
+ config->input_justification =
+ ADV7511_INPUT_JUSTIFICATION_LEFT;
+ else if (!strcmp(str, "evenly"))
+ config->input_justification =
+ ADV7511_INPUT_JUSTIFICATION_EVENLY;
+ else if (!strcmp(str, "right"))
+ config->input_justification =
+ ADV7511_INPUT_JUSTIFICATION_RIGHT;
+ else
+ return -EINVAL;
+
+ } else {
+ config->input_style = 1;
+ config->input_justification = ADV7511_INPUT_JUSTIFICATION_LEFT;
+ }
+
+ of_property_read_u32(np, "adi,clock-delay", &config->clock_delay);
+ if (config->clock_delay < -1200 || config->clock_delay > 1600)
+ return -EINVAL;
+
+ config->embedded_sync = of_property_read_bool(np, "adi,embedded-sync");
+
+ /* Hardcode the sync pulse configurations for now. */
+ config->sync_pulse = ADV7511_INPUT_SYNC_PULSE_NONE;
+ config->vsync_polarity = ADV7511_SYNC_POLARITY_PASSTHROUGH;
+ config->hsync_polarity = ADV7511_SYNC_POLARITY_PASSTHROUGH;
+
+ return 0;
+}
+
+static const int edid_i2c_addr = 0x7e;
+static const int packet_i2c_addr = 0x70;
+static const int cec_i2c_addr = 0x78;
+
+static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
+{
+ struct adv7511_link_config link_config;
+ struct adv7511 *adv7511;
+ struct device *dev = &i2c->dev;
+ unsigned int val;
+ int ret;
+
+ if (!dev->of_node)
+ return -EINVAL;
+
+ adv7511 = devm_kzalloc(dev, sizeof(*adv7511), GFP_KERNEL);
+ if (!adv7511)
+ return -ENOMEM;
+
+ adv7511->dpms_mode = DRM_MODE_DPMS_OFF;
+ adv7511->status = connector_status_disconnected;
+
+ ret = adv7511_parse_dt(dev->of_node, &link_config);
+ if (ret)
+ return ret;
+
+ /*
+ * The power down GPIO is optional. If present, toggle it from active to
+ * inactive to wake up the encoder.
+ */
+ adv7511->gpio_pd = devm_gpiod_get_optional(dev, "pd", GPIOD_OUT_HIGH);
+ if (IS_ERR(adv7511->gpio_pd))
+ return PTR_ERR(adv7511->gpio_pd);
+
+ if (adv7511->gpio_pd) {
+ mdelay(5);
+ gpiod_set_value_cansleep(adv7511->gpio_pd, 0);
+ }
+
+ adv7511->regmap = devm_regmap_init_i2c(i2c, &adv7511_regmap_config);
+ if (IS_ERR(adv7511->regmap))
+ return PTR_ERR(adv7511->regmap);
+
+ ret = regmap_read(adv7511->regmap, ADV7511_REG_CHIP_REVISION, &val);
+ if (ret)
+ return ret;
+ dev_dbg(dev, "Rev. %d\n", val);
+
+ ret = regmap_register_patch(adv7511->regmap, adv7511_fixed_registers,
+ ARRAY_SIZE(adv7511_fixed_registers));
+ if (ret)
+ return ret;
+
+ regmap_write(adv7511->regmap, ADV7511_REG_EDID_I2C_ADDR, edid_i2c_addr);
+ regmap_write(adv7511->regmap, ADV7511_REG_PACKET_I2C_ADDR,
+ packet_i2c_addr);
+ regmap_write(adv7511->regmap, ADV7511_REG_CEC_I2C_ADDR, cec_i2c_addr);
+ adv7511_packet_disable(adv7511, 0xffff);
+
+ adv7511->i2c_main = i2c;
+ adv7511->i2c_edid = i2c_new_dummy(i2c->adapter, edid_i2c_addr >> 1);
+ if (!adv7511->i2c_edid)
+ return -ENOMEM;
+
+ if (i2c->irq) {
+ init_waitqueue_head(&adv7511->wq);
+
+ ret = devm_request_threaded_irq(dev, i2c->irq, NULL,
+ adv7511_irq_handler,
+ IRQF_ONESHOT, dev_name(dev),
+ adv7511);
+ if (ret)
+ goto err_i2c_unregister_device;
+ }
+
+ /* CEC is unused for now */
+ regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL,
+ ADV7511_CEC_CTRL_POWER_DOWN);
+
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
+ ADV7511_POWER_POWER_DOWN, ADV7511_POWER_POWER_DOWN);
+
+ adv7511->current_edid_segment = -1;
+
+ i2c_set_clientdata(i2c, adv7511);
+
+ adv7511_set_link_config(adv7511, &link_config);
+
+ return 0;
+
+err_i2c_unregister_device:
+ i2c_unregister_device(adv7511->i2c_edid);
+
+ return ret;
+}
+
+static int adv7511_remove(struct i2c_client *i2c)
+{
+ struct adv7511 *adv7511 = i2c_get_clientdata(i2c);
+
+ i2c_unregister_device(adv7511->i2c_edid);
+
+ kfree(adv7511->edid);
+
+ return 0;
+}
+
+static int adv7511_encoder_init(struct i2c_client *i2c, struct drm_device *dev,
+ struct drm_encoder_slave *encoder)
+{
+
+ struct adv7511 *adv7511 = i2c_get_clientdata(i2c);
+
+ encoder->slave_priv = adv7511;
+ encoder->slave_funcs = &adv7511_encoder_funcs;
+
+ adv7511->encoder = &encoder->base;
+
+ return 0;
+}
+
+static const struct i2c_device_id adv7511_i2c_ids[] = {
+ { "adv7511", 0 },
+ { "adv7511w", 0 },
+ { "adv7513", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, adv7511_i2c_ids);
+
+static const struct of_device_id adv7511_of_ids[] = {
+ { .compatible = "adi,adv7511", },
+ { .compatible = "adi,adv7511w", },
+ { .compatible = "adi,adv7513", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, adv7511_of_ids);
+
+static struct drm_i2c_encoder_driver adv7511_driver = {
+ .i2c_driver = {
+ .driver = {
+ .name = "adv7511",
+ .of_match_table = adv7511_of_ids,
+ },
+ .id_table = adv7511_i2c_ids,
+ .probe = adv7511_probe,
+ .remove = adv7511_remove,
+ },
+
+ .encoder_init = adv7511_encoder_init,
+};
+
+static int __init adv7511_init(void)
+{
+ return drm_i2c_encoder_register(THIS_MODULE, &adv7511_driver);
+}
+module_init(adv7511_init);
+
+static void __exit adv7511_exit(void)
+{
+ drm_i2c_encoder_unregister(&adv7511_driver);
+}
+module_exit(adv7511_exit);
+
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("ADV7511 HDMI transmitter driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/i2c/adv7511.h b/drivers/gpu/drm/i2c/adv7511.h
new file mode 100644
index 00000000000..6599ed53842
--- /dev/null
+++ b/drivers/gpu/drm/i2c/adv7511.h
@@ -0,0 +1,289 @@
+/*
+ * Analog Devices ADV7511 HDMI transmitter driver
+ *
+ * Copyright 2012 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+
+#ifndef __DRM_I2C_ADV7511_H__
+#define __DRM_I2C_ADV7511_H__
+
+#include <linux/hdmi.h>
+
+#define ADV7511_REG_CHIP_REVISION 0x00
+#define ADV7511_REG_N0 0x01
+#define ADV7511_REG_N1 0x02
+#define ADV7511_REG_N2 0x03
+#define ADV7511_REG_SPDIF_FREQ 0x04
+#define ADV7511_REG_CTS_AUTOMATIC1 0x05
+#define ADV7511_REG_CTS_AUTOMATIC2 0x06
+#define ADV7511_REG_CTS_MANUAL0 0x07
+#define ADV7511_REG_CTS_MANUAL1 0x08
+#define ADV7511_REG_CTS_MANUAL2 0x09
+#define ADV7511_REG_AUDIO_SOURCE 0x0a
+#define ADV7511_REG_AUDIO_CONFIG 0x0b
+#define ADV7511_REG_I2S_CONFIG 0x0c
+#define ADV7511_REG_I2S_WIDTH 0x0d
+#define ADV7511_REG_AUDIO_SUB_SRC0 0x0e
+#define ADV7511_REG_AUDIO_SUB_SRC1 0x0f
+#define ADV7511_REG_AUDIO_SUB_SRC2 0x10
+#define ADV7511_REG_AUDIO_SUB_SRC3 0x11
+#define ADV7511_REG_AUDIO_CFG1 0x12
+#define ADV7511_REG_AUDIO_CFG2 0x13
+#define ADV7511_REG_AUDIO_CFG3 0x14
+#define ADV7511_REG_I2C_FREQ_ID_CFG 0x15
+#define ADV7511_REG_VIDEO_INPUT_CFG1 0x16
+#define ADV7511_REG_CSC_UPPER(x) (0x18 + (x) * 2)
+#define ADV7511_REG_CSC_LOWER(x) (0x19 + (x) * 2)
+#define ADV7511_REG_SYNC_DECODER(x) (0x30 + (x))
+#define ADV7511_REG_DE_GENERATOR (0x35 + (x))
+#define ADV7511_REG_PIXEL_REPETITION 0x3b
+#define ADV7511_REG_VIC_MANUAL 0x3c
+#define ADV7511_REG_VIC_SEND 0x3d
+#define ADV7511_REG_VIC_DETECTED 0x3e
+#define ADV7511_REG_AUX_VIC_DETECTED 0x3f
+#define ADV7511_REG_PACKET_ENABLE0 0x40
+#define ADV7511_REG_POWER 0x41
+#define ADV7511_REG_STATUS 0x42
+#define ADV7511_REG_EDID_I2C_ADDR 0x43
+#define ADV7511_REG_PACKET_ENABLE1 0x44
+#define ADV7511_REG_PACKET_I2C_ADDR 0x45
+#define ADV7511_REG_DSD_ENABLE 0x46
+#define ADV7511_REG_VIDEO_INPUT_CFG2 0x48
+#define ADV7511_REG_INFOFRAME_UPDATE 0x4a
+#define ADV7511_REG_GC(x) (0x4b + (x)) /* 0x4b - 0x51 */
+#define ADV7511_REG_AVI_INFOFRAME_VERSION 0x52
+#define ADV7511_REG_AVI_INFOFRAME_LENGTH 0x53
+#define ADV7511_REG_AVI_INFOFRAME_CHECKSUM 0x54
+#define ADV7511_REG_AVI_INFOFRAME(x) (0x55 + (x)) /* 0x55 - 0x6f */
+#define ADV7511_REG_AUDIO_INFOFRAME_VERSION 0x70
+#define ADV7511_REG_AUDIO_INFOFRAME_LENGTH 0x71
+#define ADV7511_REG_AUDIO_INFOFRAME_CHECKSUM 0x72
+#define ADV7511_REG_AUDIO_INFOFRAME(x) (0x73 + (x)) /* 0x73 - 0x7c */
+#define ADV7511_REG_INT_ENABLE(x) (0x94 + (x))
+#define ADV7511_REG_INT(x) (0x96 + (x))
+#define ADV7511_REG_INPUT_CLK_DIV 0x9d
+#define ADV7511_REG_PLL_STATUS 0x9e
+#define ADV7511_REG_HDMI_POWER 0xa1
+#define ADV7511_REG_HDCP_HDMI_CFG 0xaf
+#define ADV7511_REG_AN(x) (0xb0 + (x)) /* 0xb0 - 0xb7 */
+#define ADV7511_REG_HDCP_STATUS 0xb8
+#define ADV7511_REG_BCAPS 0xbe
+#define ADV7511_REG_BKSV(x) (0xc0 + (x)) /* 0xc0 - 0xc3 */
+#define ADV7511_REG_EDID_SEGMENT 0xc4
+#define ADV7511_REG_DDC_STATUS 0xc8
+#define ADV7511_REG_EDID_READ_CTRL 0xc9
+#define ADV7511_REG_BSTATUS(x) (0xca + (x)) /* 0xca - 0xcb */
+#define ADV7511_REG_TIMING_GEN_SEQ 0xd0
+#define ADV7511_REG_POWER2 0xd6
+#define ADV7511_REG_HSYNC_PLACEMENT_MSB 0xfa
+
+#define ADV7511_REG_SYNC_ADJUSTMENT(x) (0xd7 + (x)) /* 0xd7 - 0xdc */
+#define ADV7511_REG_TMDS_CLOCK_INV 0xde
+#define ADV7511_REG_ARC_CTRL 0xdf
+#define ADV7511_REG_CEC_I2C_ADDR 0xe1
+#define ADV7511_REG_CEC_CTRL 0xe2
+#define ADV7511_REG_CHIP_ID_HIGH 0xf5
+#define ADV7511_REG_CHIP_ID_LOW 0xf6
+
+#define ADV7511_CSC_ENABLE BIT(7)
+#define ADV7511_CSC_UPDATE_MODE BIT(5)
+
+#define ADV7511_INT0_HDP BIT(7)
+#define ADV7511_INT0_VSYNC BIT(5)
+#define ADV7511_INT0_AUDIO_FIFO_FULL BIT(4)
+#define ADV7511_INT0_EDID_READY BIT(2)
+#define ADV7511_INT0_HDCP_AUTHENTICATED BIT(1)
+
+#define ADV7511_INT1_DDC_ERROR BIT(7)
+#define ADV7511_INT1_BKSV BIT(6)
+#define ADV7511_INT1_CEC_TX_READY BIT(5)
+#define ADV7511_INT1_CEC_TX_ARBIT_LOST BIT(4)
+#define ADV7511_INT1_CEC_TX_RETRY_TIMEOUT BIT(3)
+#define ADV7511_INT1_CEC_RX_READY3 BIT(2)
+#define ADV7511_INT1_CEC_RX_READY2 BIT(1)
+#define ADV7511_INT1_CEC_RX_READY1 BIT(0)
+
+#define ADV7511_ARC_CTRL_POWER_DOWN BIT(0)
+
+#define ADV7511_CEC_CTRL_POWER_DOWN BIT(0)
+
+#define ADV7511_POWER_POWER_DOWN BIT(6)
+
+#define ADV7511_HDMI_CFG_MODE_MASK 0x2
+#define ADV7511_HDMI_CFG_MODE_DVI 0x0
+#define ADV7511_HDMI_CFG_MODE_HDMI 0x2
+
+#define ADV7511_AUDIO_SELECT_I2C 0x0
+#define ADV7511_AUDIO_SELECT_SPDIF 0x1
+#define ADV7511_AUDIO_SELECT_DSD 0x2
+#define ADV7511_AUDIO_SELECT_HBR 0x3
+#define ADV7511_AUDIO_SELECT_DST 0x4
+
+#define ADV7511_I2S_SAMPLE_LEN_16 0x2
+#define ADV7511_I2S_SAMPLE_LEN_20 0x3
+#define ADV7511_I2S_SAMPLE_LEN_18 0x4
+#define ADV7511_I2S_SAMPLE_LEN_22 0x5
+#define ADV7511_I2S_SAMPLE_LEN_19 0x8
+#define ADV7511_I2S_SAMPLE_LEN_23 0x9
+#define ADV7511_I2S_SAMPLE_LEN_24 0xb
+#define ADV7511_I2S_SAMPLE_LEN_17 0xc
+#define ADV7511_I2S_SAMPLE_LEN_21 0xd
+
+#define ADV7511_SAMPLE_FREQ_44100 0x0
+#define ADV7511_SAMPLE_FREQ_48000 0x2
+#define ADV7511_SAMPLE_FREQ_32000 0x3
+#define ADV7511_SAMPLE_FREQ_88200 0x8
+#define ADV7511_SAMPLE_FREQ_96000 0xa
+#define ADV7511_SAMPLE_FREQ_176400 0xc
+#define ADV7511_SAMPLE_FREQ_192000 0xe
+
+#define ADV7511_STATUS_POWER_DOWN_POLARITY BIT(7)
+#define ADV7511_STATUS_HPD BIT(6)
+#define ADV7511_STATUS_MONITOR_SENSE BIT(5)
+#define ADV7511_STATUS_I2S_32BIT_MODE BIT(3)
+
+#define ADV7511_PACKET_ENABLE_N_CTS BIT(8+6)
+#define ADV7511_PACKET_ENABLE_AUDIO_SAMPLE BIT(8+5)
+#define ADV7511_PACKET_ENABLE_AVI_INFOFRAME BIT(8+4)
+#define ADV7511_PACKET_ENABLE_AUDIO_INFOFRAME BIT(8+3)
+#define ADV7511_PACKET_ENABLE_GC BIT(7)
+#define ADV7511_PACKET_ENABLE_SPD BIT(6)
+#define ADV7511_PACKET_ENABLE_MPEG BIT(5)
+#define ADV7511_PACKET_ENABLE_ACP BIT(4)
+#define ADV7511_PACKET_ENABLE_ISRC BIT(3)
+#define ADV7511_PACKET_ENABLE_GM BIT(2)
+#define ADV7511_PACKET_ENABLE_SPARE2 BIT(1)
+#define ADV7511_PACKET_ENABLE_SPARE1 BIT(0)
+
+#define ADV7511_REG_POWER2_HDP_SRC_MASK 0xc0
+#define ADV7511_REG_POWER2_HDP_SRC_BOTH 0x00
+#define ADV7511_REG_POWER2_HDP_SRC_HDP 0x40
+#define ADV7511_REG_POWER2_HDP_SRC_CEC 0x80
+#define ADV7511_REG_POWER2_HDP_SRC_NONE 0xc0
+#define ADV7511_REG_POWER2_TDMS_ENABLE BIT(4)
+#define ADV7511_REG_POWER2_GATE_INPUT_CLK BIT(0)
+
+#define ADV7511_LOW_REFRESH_RATE_NONE 0x0
+#define ADV7511_LOW_REFRESH_RATE_24HZ 0x1
+#define ADV7511_LOW_REFRESH_RATE_25HZ 0x2
+#define ADV7511_LOW_REFRESH_RATE_30HZ 0x3
+
+#define ADV7511_AUDIO_CFG3_LEN_MASK 0x0f
+#define ADV7511_I2C_FREQ_ID_CFG_RATE_MASK 0xf0
+
+#define ADV7511_AUDIO_SOURCE_I2S 0
+#define ADV7511_AUDIO_SOURCE_SPDIF 1
+
+#define ADV7511_I2S_FORMAT_I2S 0
+#define ADV7511_I2S_FORMAT_RIGHT_J 1
+#define ADV7511_I2S_FORMAT_LEFT_J 2
+
+#define ADV7511_PACKET(p, x) ((p) * 0x20 + (x))
+#define ADV7511_PACKET_SDP(x) ADV7511_PACKET(0, x)
+#define ADV7511_PACKET_MPEG(x) ADV7511_PACKET(1, x)
+#define ADV7511_PACKET_ACP(x) ADV7511_PACKET(2, x)
+#define ADV7511_PACKET_ISRC1(x) ADV7511_PACKET(3, x)
+#define ADV7511_PACKET_ISRC2(x) ADV7511_PACKET(4, x)
+#define ADV7511_PACKET_GM(x) ADV7511_PACKET(5, x)
+#define ADV7511_PACKET_SPARE(x) ADV7511_PACKET(6, x)
+
+enum adv7511_input_clock {
+ ADV7511_INPUT_CLOCK_1X,
+ ADV7511_INPUT_CLOCK_2X,
+ ADV7511_INPUT_CLOCK_DDR,
+};
+
+enum adv7511_input_justification {
+ ADV7511_INPUT_JUSTIFICATION_EVENLY = 0,
+ ADV7511_INPUT_JUSTIFICATION_RIGHT = 1,
+ ADV7511_INPUT_JUSTIFICATION_LEFT = 2,
+};
+
+enum adv7511_input_sync_pulse {
+ ADV7511_INPUT_SYNC_PULSE_DE = 0,
+ ADV7511_INPUT_SYNC_PULSE_HSYNC = 1,
+ ADV7511_INPUT_SYNC_PULSE_VSYNC = 2,
+ ADV7511_INPUT_SYNC_PULSE_NONE = 3,
+};
+
+/**
+ * enum adv7511_sync_polarity - Polarity for the input sync signals
+ * @ADV7511_SYNC_POLARITY_PASSTHROUGH: Sync polarity matches that of
+ * the currently configured mode.
+ * @ADV7511_SYNC_POLARITY_LOW: Sync polarity is low
+ * @ADV7511_SYNC_POLARITY_HIGH: Sync polarity is high
+ *
+ * If the polarity is set to either LOW or HIGH the driver will configure the
+ * ADV7511 to internally invert the sync signal if required to match the sync
+ * polarity setting for the currently selected output mode.
+ *
+ * If the polarity is set to PASSTHROUGH, the ADV7511 will route the signal
+ * unchanged. This is used when the upstream graphics core already generates
+ * the sync signals with the correct polarity.
+ */
+enum adv7511_sync_polarity {
+ ADV7511_SYNC_POLARITY_PASSTHROUGH,
+ ADV7511_SYNC_POLARITY_LOW,
+ ADV7511_SYNC_POLARITY_HIGH,
+};
+
+/**
+ * struct adv7511_link_config - Describes adv7511 hardware configuration
+ * @input_color_depth: Number of bits per color component (8, 10 or 12)
+ * @input_colorspace: The input colorspace (RGB, YUV444, YUV422)
+ * @input_clock: The input video clock style (1x, 2x, DDR)
+ * @input_style: The input component arrangement variant
+ * @input_justification: Video input format bit justification
+ * @clock_delay: Clock delay for the input clock (in ps)
+ * @embedded_sync: Video input uses BT.656-style embedded sync
+ * @sync_pulse: Select the sync pulse
+ * @vsync_polarity: vsync input signal configuration
+ * @hsync_polarity: hsync input signal configuration
+ */
+struct adv7511_link_config {
+ unsigned int input_color_depth;
+ enum hdmi_colorspace input_colorspace;
+ enum adv7511_input_clock input_clock;
+ unsigned int input_style;
+ enum adv7511_input_justification input_justification;
+
+ int clock_delay;
+
+ bool embedded_sync;
+ enum adv7511_input_sync_pulse sync_pulse;
+ enum adv7511_sync_polarity vsync_polarity;
+ enum adv7511_sync_polarity hsync_polarity;
+};
+
+/**
+ * enum adv7511_csc_scaling - Scaling factor for the ADV7511 CSC
+ * @ADV7511_CSC_SCALING_1: CSC results are not scaled
+ * @ADV7511_CSC_SCALING_2: CSC results are scaled by a factor of two
+ * @ADV7511_CSC_SCALING_4: CSC results are scalled by a factor of four
+ */
+enum adv7511_csc_scaling {
+ ADV7511_CSC_SCALING_1 = 0,
+ ADV7511_CSC_SCALING_2 = 1,
+ ADV7511_CSC_SCALING_4 = 2,
+};
+
+/**
+ * struct adv7511_video_config - Describes adv7511 hardware configuration
+ * @csc_enable: Whether to enable color space conversion
+ * @csc_scaling_factor: Color space conversion scaling factor
+ * @csc_coefficents: Color space conversion coefficents
+ * @hdmi_mode: Whether to use HDMI or DVI output mode
+ * @avi_infoframe: HDMI infoframe
+ */
+struct adv7511_video_config {
+ bool csc_enable;
+ enum adv7511_csc_scaling csc_scaling_factor;
+ const uint16_t *csc_coefficents;
+
+ bool hdmi_mode;
+ struct hdmi_avi_infoframe avi_infoframe;
+};
+
+#endif /* __DRM_I2C_ADV7511_H__ */
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index c1dd485aeb6..e4083e41a60 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -11,7 +11,9 @@ i915-y := i915_drv.o \
i915_params.o \
i915_suspend.o \
i915_sysfs.o \
- intel_pm.o
+ intel_pm.o \
+ intel_runtime_pm.o
+
i915-$(CONFIG_COMPAT) += i915_ioc32.o
i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o
@@ -38,13 +40,18 @@ i915-y += i915_cmd_parser.o \
# autogenerated null render state
i915-y += intel_renderstate_gen6.o \
intel_renderstate_gen7.o \
- intel_renderstate_gen8.o
+ intel_renderstate_gen8.o \
+ intel_renderstate_gen9.o
# modesetting core code
-i915-y += intel_bios.o \
+i915-y += intel_audio.o \
+ intel_bios.o \
intel_display.o \
+ intel_fifo_underrun.o \
+ intel_frontbuffer.o \
intel_modes.o \
intel_overlay.o \
+ intel_psr.o \
intel_sideband.o \
intel_sprite.o
i915-$(CONFIG_ACPI) += intel_acpi.o intel_opregion.o
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 593b657d3e5..22c992a78ac 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -73,7 +73,7 @@
* those commands required by the parser. This generally works because command
* opcode ranges have standard command length encodings. So for commands that
* the parser does not need to check, it can easily skip them. This is
- * implementated via a per-ring length decoding vfunc.
+ * implemented via a per-ring length decoding vfunc.
*
* Unfortunately, there are a number of commands that do not follow the standard
* length encoding for their opcode range, primarily amongst the MI_* commands.
@@ -138,6 +138,11 @@ static const struct drm_i915_cmd_descriptor common_cmds[] = {
.mask = MI_GLOBAL_GTT,
.expected = 0,
}}, ),
+ /*
+ * MI_BATCH_BUFFER_START requires some special handling. It's not
+ * really a 'skip' action but it doesn't seem like it's worth adding
+ * a new action. See i915_parse_cmds().
+ */
CMD( MI_BATCH_BUFFER_START, SMI, !F, 0xFF, S ),
};
@@ -408,6 +413,8 @@ static const u32 gen7_render_regs[] = {
REG64(PS_INVOCATION_COUNT),
REG64(PS_DEPTH_COUNT),
OACONTROL, /* Only allowed for LRI and SRM. See below. */
+ REG64(MI_PREDICATE_SRC0),
+ REG64(MI_PREDICATE_SRC1),
GEN7_3DPRIM_END_OFFSET,
GEN7_3DPRIM_START_VERTEX,
GEN7_3DPRIM_VERTEX_COUNT,
@@ -838,7 +845,7 @@ finish:
* @ring: the ring in question
*
* Only certain platforms require software batch buffer command parsing, and
- * only when enabled via module paramter.
+ * only when enabled via module parameter.
*
* Return: true if the ring requires software command parsing
*/
@@ -847,12 +854,7 @@ bool i915_needs_cmd_parser(struct intel_engine_cs *ring)
if (!ring->needs_cmd_parser)
return false;
- /*
- * XXX: VLV is Gen7 and therefore has cmd_tables, but has PPGTT
- * disabled. That will cause all of the parser's PPGTT checks to
- * fail. For now, disable parsing when PPGTT is off.
- */
- if (USES_PPGTT(ring->dev))
+ if (!USES_PPGTT(ring->dev))
return false;
return (i915.enable_cmd_parser == 1);
@@ -888,8 +890,10 @@ static bool check_cmd(const struct intel_engine_cs *ring,
* OACONTROL writes to only MI_LOAD_REGISTER_IMM commands.
*/
if (reg_addr == OACONTROL) {
- if (desc->cmd.value == MI_LOAD_REGISTER_MEM)
+ if (desc->cmd.value == MI_LOAD_REGISTER_MEM) {
+ DRM_DEBUG_DRIVER("CMD: Rejected LRM to OACONTROL\n");
return false;
+ }
if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1))
*oacontrol_set = (cmd[2] != 0);
@@ -958,7 +962,8 @@ static bool check_cmd(const struct intel_engine_cs *ring,
* Parses the specified batch buffer looking for privilege violations as
* described in the overview.
*
- * Return: non-zero if the parser finds violations or otherwise fails
+ * Return: non-zero if the parser finds violations or otherwise fails; -EACCES
+ * if the batch appears legal but should use hardware parsing
*/
int i915_parse_cmds(struct intel_engine_cs *ring,
struct drm_i915_gem_object *batch_obj,
@@ -1005,6 +1010,16 @@ int i915_parse_cmds(struct intel_engine_cs *ring,
break;
}
+ /*
+ * If the batch buffer contains a chained batch, return an
+ * error that tells the caller to abort and dispatch the
+ * workload as a non-secure batch.
+ */
+ if (desc->cmd.value == MI_BATCH_BUFFER_START) {
+ ret = -EACCES;
+ break;
+ }
+
if (desc->flags & CMD_DESC_FIXED)
length = desc->length.fixed;
else
@@ -1059,6 +1074,8 @@ int i915_cmd_parser_get_version(void)
*
* 1. Initial version. Checks batches and reports violations, but leaves
* hardware parsing enabled (so does not allow new use cases).
+ * 2. Allow access to the MI_PREDICATE_SRC0 and
+ * MI_PREDICATE_SRC1 registers.
*/
- return 1;
+ return 2;
}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 063b44817e0..779a275eb1f 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -116,7 +116,7 @@ static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
static inline const char *get_global_flag(struct drm_i915_gem_object *obj)
{
- return obj->has_global_gtt_mapping ? "g" : " ";
+ return i915_gem_obj_to_ggtt(obj) ? "g" : " ";
}
static void
@@ -516,7 +516,6 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long flags;
struct intel_crtc *crtc;
int ret;
@@ -529,7 +528,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
const char plane = plane_name(crtc->plane);
struct intel_unpin_work *work;
- spin_lock_irqsave(&dev->event_lock, flags);
+ spin_lock_irq(&dev->event_lock);
work = crtc->unpin_work;
if (work == NULL) {
seq_printf(m, "No flip due on pipe %c (plane %c)\n",
@@ -575,7 +574,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
seq_printf(m, "MMIO update completed? %d\n", addr == work->gtt_offset);
}
}
- spin_unlock_irqrestore(&dev->event_lock, flags);
+ spin_unlock_irq(&dev->event_lock);
}
mutex_unlock(&dev->struct_mutex);
@@ -717,7 +716,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
}
for_each_pipe(dev_priv, pipe) {
- if (!intel_display_power_enabled(dev_priv,
+ if (!intel_display_power_is_enabled(dev_priv,
POWER_DOMAIN_PIPE(pipe))) {
seq_printf(m, "Pipe %c power disabled\n",
pipe_name(pipe));
@@ -1241,11 +1240,12 @@ static int vlv_drpc_info(struct seq_file *m)
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 rpmodectl1, rcctl1;
+ u32 rpmodectl1, rcctl1, pw_status;
unsigned fw_rendercount = 0, fw_mediacount = 0;
intel_runtime_pm_get(dev_priv);
+ pw_status = I915_READ(VLV_GTLC_PW_STATUS);
rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
rcctl1 = I915_READ(GEN6_RC_CONTROL);
@@ -1264,11 +1264,9 @@ static int vlv_drpc_info(struct seq_file *m)
yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
GEN6_RC_CTL_EI_MODE(1))));
seq_printf(m, "Render Power Well: %s\n",
- (I915_READ(VLV_GTLC_PW_STATUS) &
- VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
+ (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
seq_printf(m, "Media Power Well: %s\n",
- (I915_READ(VLV_GTLC_PW_STATUS) &
- VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
+ (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
seq_printf(m, "Render RC6 residency since boot: %u\n",
I915_READ(VLV_GT_RENDER_RC6));
@@ -1774,6 +1772,50 @@ static int i915_context_status(struct seq_file *m, void *unused)
return 0;
}
+static void i915_dump_lrc_obj(struct seq_file *m,
+ struct intel_engine_cs *ring,
+ struct drm_i915_gem_object *ctx_obj)
+{
+ struct page *page;
+ uint32_t *reg_state;
+ int j;
+ unsigned long ggtt_offset = 0;
+
+ if (ctx_obj == NULL) {
+ seq_printf(m, "Context on %s with no gem object\n",
+ ring->name);
+ return;
+ }
+
+ seq_printf(m, "CONTEXT: %s %u\n", ring->name,
+ intel_execlists_ctx_id(ctx_obj));
+
+ if (!i915_gem_obj_ggtt_bound(ctx_obj))
+ seq_puts(m, "\tNot bound in GGTT\n");
+ else
+ ggtt_offset = i915_gem_obj_ggtt_offset(ctx_obj);
+
+ if (i915_gem_object_get_pages(ctx_obj)) {
+ seq_puts(m, "\tFailed to get pages for context object\n");
+ return;
+ }
+
+ page = i915_gem_object_get_page(ctx_obj, 1);
+ if (!WARN_ON(page == NULL)) {
+ reg_state = kmap_atomic(page);
+
+ for (j = 0; j < 0x600 / sizeof(u32) / 4; j += 4) {
+ seq_printf(m, "\t[0x%08lx] 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ ggtt_offset + 4096 + (j * 4),
+ reg_state[j], reg_state[j + 1],
+ reg_state[j + 2], reg_state[j + 3]);
+ }
+ kunmap_atomic(reg_state);
+ }
+
+ seq_putc(m, '\n');
+}
+
static int i915_dump_lrc(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -1794,29 +1836,9 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
list_for_each_entry(ctx, &dev_priv->context_list, link) {
for_each_ring(ring, dev_priv, i) {
- struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state;
-
- if (ring->default_context == ctx)
- continue;
-
- if (ctx_obj) {
- struct page *page = i915_gem_object_get_page(ctx_obj, 1);
- uint32_t *reg_state = kmap_atomic(page);
- int j;
-
- seq_printf(m, "CONTEXT: %s %u\n", ring->name,
- intel_execlists_ctx_id(ctx_obj));
-
- for (j = 0; j < 0x600 / sizeof(u32) / 4; j += 4) {
- seq_printf(m, "\t[0x%08lx] 0x%08x 0x%08x 0x%08x 0x%08x\n",
- i915_gem_obj_ggtt_offset(ctx_obj) + 4096 + (j * 4),
- reg_state[j], reg_state[j + 1],
- reg_state[j + 2], reg_state[j + 3]);
- }
- kunmap_atomic(reg_state);
-
- seq_putc(m, '\n');
- }
+ if (ring->default_context != ctx)
+ i915_dump_lrc_obj(m, ring,
+ ctx->engine[i].state);
}
}
@@ -1849,6 +1871,8 @@ static int i915_execlists(struct seq_file *m, void *data)
if (ret)
return ret;
+ intel_runtime_pm_get(dev_priv);
+
for_each_ring(ring, dev_priv, ring_id) {
struct intel_ctx_submit_request *head_req = NULL;
int count = 0;
@@ -1900,6 +1924,7 @@ static int i915_execlists(struct seq_file *m, void *data)
seq_putc(m, '\n');
}
+ intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
return 0;
@@ -1973,6 +1998,8 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
if (IS_GEN3(dev) || IS_GEN4(dev)) {
seq_printf(m, "DDC = 0x%08x\n",
I915_READ(DCC));
+ seq_printf(m, "DDC2 = 0x%08x\n",
+ I915_READ(DCC2));
seq_printf(m, "C0DRB3 = 0x%04x\n",
I915_READ16(C0DRB3));
seq_printf(m, "C1DRB3 = 0x%04x\n",
@@ -1986,7 +2013,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
I915_READ(MAD_DIMM_C2));
seq_printf(m, "TILECTL = 0x%08x\n",
I915_READ(TILECTL));
- if (IS_GEN8(dev))
+ if (INTEL_INFO(dev)->gen >= 8)
seq_printf(m, "GAMTARBMODE = 0x%08x\n",
I915_READ(GAMTARBMODE));
else
@@ -1995,6 +2022,10 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
I915_READ(DISP_ARB_CTL));
}
+
+ if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
+ seq_puts(m, "L-shaped memory detected\n");
+
intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
@@ -2628,14 +2659,15 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused)
struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->name, pll->id);
- seq_printf(m, " refcount: %i, active: %i, on: %s\n", pll->refcount,
- pll->active, yesno(pll->on));
+ seq_printf(m, " crtc_mask: 0x%08x, active: %d, on: %s\n",
+ pll->config.crtc_mask, pll->active, yesno(pll->on));
seq_printf(m, " tracked hardware state:\n");
- seq_printf(m, " dpll: 0x%08x\n", pll->hw_state.dpll);
- seq_printf(m, " dpll_md: 0x%08x\n", pll->hw_state.dpll_md);
- seq_printf(m, " fp0: 0x%08x\n", pll->hw_state.fp0);
- seq_printf(m, " fp1: 0x%08x\n", pll->hw_state.fp1);
- seq_printf(m, " wrpll: 0x%08x\n", pll->hw_state.wrpll);
+ seq_printf(m, " dpll: 0x%08x\n", pll->config.hw_state.dpll);
+ seq_printf(m, " dpll_md: 0x%08x\n",
+ pll->config.hw_state.dpll_md);
+ seq_printf(m, " fp0: 0x%08x\n", pll->config.hw_state.fp0);
+ seq_printf(m, " fp1: 0x%08x\n", pll->config.hw_state.fp1);
+ seq_printf(m, " wrpll: 0x%08x\n", pll->config.hw_state.wrpll);
}
drm_modeset_unlock_all(dev);
@@ -2656,18 +2688,18 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
intel_runtime_pm_get(dev_priv);
- seq_printf(m, "Workarounds applied: %d\n", dev_priv->num_wa_regs);
- for (i = 0; i < dev_priv->num_wa_regs; ++i) {
- u32 addr, mask;
-
- addr = dev_priv->intel_wa_regs[i].addr;
- mask = dev_priv->intel_wa_regs[i].mask;
- dev_priv->intel_wa_regs[i].value = I915_READ(addr) | mask;
- if (dev_priv->intel_wa_regs[i].addr)
- seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
- dev_priv->intel_wa_regs[i].addr,
- dev_priv->intel_wa_regs[i].value,
- dev_priv->intel_wa_regs[i].mask);
+ seq_printf(m, "Workarounds applied: %d\n", dev_priv->workarounds.count);
+ for (i = 0; i < dev_priv->workarounds.count; ++i) {
+ u32 addr, mask, value, read;
+ bool ok;
+
+ addr = dev_priv->workarounds.reg[i].addr;
+ mask = dev_priv->workarounds.reg[i].mask;
+ value = dev_priv->workarounds.reg[i].value;
+ read = I915_READ(addr);
+ ok = (value & mask) == (read & mask);
+ seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X, read: 0x%08x, status: %s\n",
+ addr, value, mask, read, ok ? "OK" : "FAIL");
}
intel_runtime_pm_put(dev_priv);
@@ -2676,6 +2708,42 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
return 0;
}
+static int i915_ddb_info(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct skl_ddb_allocation *ddb;
+ struct skl_ddb_entry *entry;
+ enum pipe pipe;
+ int plane;
+
+ drm_modeset_lock_all(dev);
+
+ ddb = &dev_priv->wm.skl_hw.ddb;
+
+ seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
+
+ for_each_pipe(dev_priv, pipe) {
+ seq_printf(m, "Pipe %c\n", pipe_name(pipe));
+
+ for_each_plane(pipe, plane) {
+ entry = &ddb->plane[pipe][plane];
+ seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane + 1,
+ entry->start, entry->end,
+ skl_ddb_entry_size(entry));
+ }
+
+ entry = &ddb->cursor[pipe];
+ seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start,
+ entry->end, skl_ddb_entry_size(entry));
+ }
+
+ drm_modeset_unlock_all(dev);
+
+ return 0;
+}
+
struct pipe_crc_info {
const char *name;
struct drm_device *dev;
@@ -2969,6 +3037,8 @@ static int i9xx_pipe_crc_auto_source(struct drm_device *dev, enum pipe pipe,
break;
}
break;
+ default:
+ break;
}
}
drm_modeset_unlock_all(dev);
@@ -3256,6 +3326,8 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
+ struct intel_crtc *crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev,
+ pipe));
u32 val = 0; /* shut up gcc */
int ret;
@@ -3266,6 +3338,11 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
if (pipe_crc->source && source)
return -EINVAL;
+ if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PIPE(pipe))) {
+ DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n");
+ return -EIO;
+ }
+
if (IS_GEN2(dev))
ret = i8xx_pipe_crc_ctl_reg(&source, &val);
else if (INTEL_INFO(dev)->gen < 5)
@@ -3291,6 +3368,14 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
if (!pipe_crc->entries)
return -ENOMEM;
+ /*
+ * When IPS gets enabled, the pipe CRC changes. Since IPS gets
+ * enabled and disabled dynamically based on package C states,
+ * user space can't make reliable use of the CRCs, so let's just
+ * completely disable it.
+ */
+ hsw_disable_ips(crtc);
+
spin_lock_irq(&pipe_crc->lock);
pipe_crc->head = 0;
pipe_crc->tail = 0;
@@ -3329,6 +3414,8 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
vlv_undo_pipe_scramble_reset(dev, pipe);
else if (IS_HASWELL(dev) && pipe == PIPE_A)
hsw_undo_trans_edp_pipe_A_crc_wa(dev);
+
+ hsw_enable_ips(crtc);
}
return 0;
@@ -3506,7 +3593,7 @@ static const struct file_operations i915_display_crc_ctl_fops = {
.write = display_crc_ctl_write
};
-static void wm_latency_show(struct seq_file *m, const uint16_t wm[5])
+static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
{
struct drm_device *dev = m->private;
int num_levels = ilk_wm_max_level(dev) + 1;
@@ -3517,13 +3604,17 @@ static void wm_latency_show(struct seq_file *m, const uint16_t wm[5])
for (level = 0; level < num_levels; level++) {
unsigned int latency = wm[level];
- /* WM1+ latency values in 0.5us units */
- if (level > 0)
+ /*
+ * - WM1+ latency values in 0.5us units
+ * - latencies are in us on gen9
+ */
+ if (INTEL_INFO(dev)->gen >= 9)
+ latency *= 10;
+ else if (level > 0)
latency *= 5;
seq_printf(m, "WM%d %u (%u.%u usec)\n",
- level, wm[level],
- latency / 10, latency % 10);
+ level, wm[level], latency / 10, latency % 10);
}
drm_modeset_unlock_all(dev);
@@ -3532,8 +3623,15 @@ static void wm_latency_show(struct seq_file *m, const uint16_t wm[5])
static int pri_wm_latency_show(struct seq_file *m, void *data)
{
struct drm_device *dev = m->private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ const uint16_t *latencies;
+
+ if (INTEL_INFO(dev)->gen >= 9)
+ latencies = dev_priv->wm.skl_latency;
+ else
+ latencies = to_i915(dev)->wm.pri_latency;
- wm_latency_show(m, to_i915(dev)->wm.pri_latency);
+ wm_latency_show(m, latencies);
return 0;
}
@@ -3541,8 +3639,15 @@ static int pri_wm_latency_show(struct seq_file *m, void *data)
static int spr_wm_latency_show(struct seq_file *m, void *data)
{
struct drm_device *dev = m->private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ const uint16_t *latencies;
+
+ if (INTEL_INFO(dev)->gen >= 9)
+ latencies = dev_priv->wm.skl_latency;
+ else
+ latencies = to_i915(dev)->wm.spr_latency;
- wm_latency_show(m, to_i915(dev)->wm.spr_latency);
+ wm_latency_show(m, latencies);
return 0;
}
@@ -3550,8 +3655,15 @@ static int spr_wm_latency_show(struct seq_file *m, void *data)
static int cur_wm_latency_show(struct seq_file *m, void *data)
{
struct drm_device *dev = m->private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ const uint16_t *latencies;
+
+ if (INTEL_INFO(dev)->gen >= 9)
+ latencies = dev_priv->wm.skl_latency;
+ else
+ latencies = to_i915(dev)->wm.cur_latency;
- wm_latency_show(m, to_i915(dev)->wm.cur_latency);
+ wm_latency_show(m, latencies);
return 0;
}
@@ -3587,11 +3699,11 @@ static int cur_wm_latency_open(struct inode *inode, struct file *file)
}
static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
- size_t len, loff_t *offp, uint16_t wm[5])
+ size_t len, loff_t *offp, uint16_t wm[8])
{
struct seq_file *m = file->private_data;
struct drm_device *dev = m->private;
- uint16_t new[5] = { 0 };
+ uint16_t new[8] = { 0 };
int num_levels = ilk_wm_max_level(dev) + 1;
int level;
int ret;
@@ -3605,7 +3717,9 @@ static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
tmp[len] = '\0';
- ret = sscanf(tmp, "%hu %hu %hu %hu %hu", &new[0], &new[1], &new[2], &new[3], &new[4]);
+ ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
+ &new[0], &new[1], &new[2], &new[3],
+ &new[4], &new[5], &new[6], &new[7]);
if (ret != num_levels)
return -EINVAL;
@@ -3625,8 +3739,15 @@ static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
{
struct seq_file *m = file->private_data;
struct drm_device *dev = m->private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint16_t *latencies;
- return wm_latency_write(file, ubuf, len, offp, to_i915(dev)->wm.pri_latency);
+ if (INTEL_INFO(dev)->gen >= 9)
+ latencies = dev_priv->wm.skl_latency;
+ else
+ latencies = to_i915(dev)->wm.pri_latency;
+
+ return wm_latency_write(file, ubuf, len, offp, latencies);
}
static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
@@ -3634,8 +3755,15 @@ static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
{
struct seq_file *m = file->private_data;
struct drm_device *dev = m->private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint16_t *latencies;
- return wm_latency_write(file, ubuf, len, offp, to_i915(dev)->wm.spr_latency);
+ if (INTEL_INFO(dev)->gen >= 9)
+ latencies = dev_priv->wm.skl_latency;
+ else
+ latencies = to_i915(dev)->wm.spr_latency;
+
+ return wm_latency_write(file, ubuf, len, offp, latencies);
}
static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
@@ -3643,8 +3771,15 @@ static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
{
struct seq_file *m = file->private_data;
struct drm_device *dev = m->private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint16_t *latencies;
+
+ if (INTEL_INFO(dev)->gen >= 9)
+ latencies = dev_priv->wm.skl_latency;
+ else
+ latencies = to_i915(dev)->wm.cur_latency;
- return wm_latency_write(file, ubuf, len, offp, to_i915(dev)->wm.cur_latency);
+ return wm_latency_write(file, ubuf, len, offp, latencies);
}
static const struct file_operations i915_pri_wm_latency_fops = {
@@ -4187,6 +4322,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
{"i915_dp_mst_info", i915_dp_mst_info, 0},
{"i915_wa_registers", i915_wa_registers, 0},
+ {"i915_ddb_info", i915_ddb_info, 0},
};
#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 318ade9bb5a..ecee3bcc877 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -50,884 +50,6 @@
#include <linux/pm_runtime.h>
#include <linux/oom.h>
-#define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS])
-
-#define BEGIN_LP_RING(n) \
- intel_ring_begin(LP_RING(dev_priv), (n))
-
-#define OUT_RING(x) \
- intel_ring_emit(LP_RING(dev_priv), x)
-
-#define ADVANCE_LP_RING() \
- __intel_ring_advance(LP_RING(dev_priv))
-
-/**
- * Lock test for when it's just for synchronization of ring access.
- *
- * In that case, we don't need to do it when GEM is initialized as nobody else
- * has access to the ring.
- */
-#define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \
- if (LP_RING(dev->dev_private)->buffer->obj == NULL) \
- LOCK_TEST_WITH_RETURN(dev, file); \
-} while (0)
-
-static inline u32
-intel_read_legacy_status_page(struct drm_i915_private *dev_priv, int reg)
-{
- if (I915_NEED_GFX_HWS(dev_priv->dev))
- return ioread32(dev_priv->dri1.gfx_hws_cpu_addr + reg);
- else
- return intel_read_status_page(LP_RING(dev_priv), reg);
-}
-
-#define READ_HWSP(dev_priv, reg) intel_read_legacy_status_page(dev_priv, reg)
-#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
-#define I915_BREADCRUMB_INDEX 0x21
-
-void i915_update_dri1_breadcrumb(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_master_private *master_priv;
-
- /*
- * The dri breadcrumb update races against the drm master disappearing.
- * Instead of trying to fix this (this is by far not the only ums issue)
- * just don't do the update in kms mode.
- */
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return;
-
- if (dev->primary->master) {
- master_priv = dev->primary->master->driver_priv;
- if (master_priv->sarea_priv)
- master_priv->sarea_priv->last_dispatch =
- READ_BREADCRUMB(dev_priv);
- }
-}
-
-static void i915_write_hws_pga(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 addr;
-
- addr = dev_priv->status_page_dmah->busaddr;
- if (INTEL_INFO(dev)->gen >= 4)
- addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
- I915_WRITE(HWS_PGA, addr);
-}
-
-/**
- * Frees the hardware status page, whether it's a physical address or a virtual
- * address set up by the X Server.
- */
-static void i915_free_hws(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring = LP_RING(dev_priv);
-
- if (dev_priv->status_page_dmah) {
- drm_pci_free(dev, dev_priv->status_page_dmah);
- dev_priv->status_page_dmah = NULL;
- }
-
- if (ring->status_page.gfx_addr) {
- ring->status_page.gfx_addr = 0;
- iounmap(dev_priv->dri1.gfx_hws_cpu_addr);
- }
-
- /* Need to rewrite hardware status page */
- I915_WRITE(HWS_PGA, 0x1ffff000);
-}
-
-void i915_kernel_lost_context(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_master_private *master_priv;
- struct intel_engine_cs *ring = LP_RING(dev_priv);
- struct intel_ringbuffer *ringbuf = ring->buffer;
-
- /*
- * We should never lose context on the ring with modesetting
- * as we don't expose it to userspace
- */
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return;
-
- ringbuf->head = I915_READ_HEAD(ring) & HEAD_ADDR;
- ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
- ringbuf->space = ringbuf->head - (ringbuf->tail + I915_RING_FREE_SPACE);
- if (ringbuf->space < 0)
- ringbuf->space += ringbuf->size;
-
- if (!dev->primary->master)
- return;
-
- master_priv = dev->primary->master->driver_priv;
- if (ringbuf->head == ringbuf->tail && master_priv->sarea_priv)
- master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
-}
-
-static int i915_dma_cleanup(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int i;
-
- /* Make sure interrupts are disabled here because the uninstall ioctl
- * may not have been called from userspace and after dev_private
- * is freed, it's too late.
- */
- if (dev->irq_enabled)
- drm_irq_uninstall(dev);
-
- mutex_lock(&dev->struct_mutex);
- for (i = 0; i < I915_NUM_RINGS; i++)
- intel_cleanup_ring_buffer(&dev_priv->ring[i]);
- mutex_unlock(&dev->struct_mutex);
-
- /* Clear the HWS virtual address at teardown */
- if (I915_NEED_GFX_HWS(dev))
- i915_free_hws(dev);
-
- return 0;
-}
-
-static int i915_initialize(struct drm_device *dev, drm_i915_init_t *init)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
- int ret;
-
- master_priv->sarea = drm_legacy_getsarea(dev);
- if (master_priv->sarea) {
- master_priv->sarea_priv = (drm_i915_sarea_t *)
- ((u8 *)master_priv->sarea->handle + init->sarea_priv_offset);
- } else {
- DRM_DEBUG_DRIVER("sarea not found assuming DRI2 userspace\n");
- }
-
- if (init->ring_size != 0) {
- if (LP_RING(dev_priv)->buffer->obj != NULL) {
- i915_dma_cleanup(dev);
- DRM_ERROR("Client tried to initialize ringbuffer in "
- "GEM mode\n");
- return -EINVAL;
- }
-
- ret = intel_render_ring_init_dri(dev,
- init->ring_start,
- init->ring_size);
- if (ret) {
- i915_dma_cleanup(dev);
- return ret;
- }
- }
-
- dev_priv->dri1.cpp = init->cpp;
- dev_priv->dri1.back_offset = init->back_offset;
- dev_priv->dri1.front_offset = init->front_offset;
- dev_priv->dri1.current_page = 0;
- if (master_priv->sarea_priv)
- master_priv->sarea_priv->pf_current_page = 0;
-
- /* Allow hardware batchbuffers unless told otherwise.
- */
- dev_priv->dri1.allow_batchbuffer = 1;
-
- return 0;
-}
-
-static int i915_dma_resume(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring = LP_RING(dev_priv);
-
- DRM_DEBUG_DRIVER("%s\n", __func__);
-
- if (ring->buffer->virtual_start == NULL) {
- DRM_ERROR("can not ioremap virtual address for"
- " ring buffer\n");
- return -ENOMEM;
- }
-
- /* Program Hardware Status Page */
- if (!ring->status_page.page_addr) {
- DRM_ERROR("Can not find hardware status page\n");
- return -EINVAL;
- }
- DRM_DEBUG_DRIVER("hw status page @ %p\n",
- ring->status_page.page_addr);
- if (ring->status_page.gfx_addr != 0)
- intel_ring_setup_status_page(ring);
- else
- i915_write_hws_pga(dev);
-
- DRM_DEBUG_DRIVER("Enabled hardware status page\n");
-
- return 0;
-}
-
-static int i915_dma_init(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i915_init_t *init = data;
- int retcode = 0;
-
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return -ENODEV;
-
- switch (init->func) {
- case I915_INIT_DMA:
- retcode = i915_initialize(dev, init);
- break;
- case I915_CLEANUP_DMA:
- retcode = i915_dma_cleanup(dev);
- break;
- case I915_RESUME_DMA:
- retcode = i915_dma_resume(dev);
- break;
- default:
- retcode = -EINVAL;
- break;
- }
-
- return retcode;
-}
-
-/* Implement basically the same security restrictions as hardware does
- * for MI_BATCH_NON_SECURE. These can be made stricter at any time.
- *
- * Most of the calculations below involve calculating the size of a
- * particular instruction. It's important to get the size right as
- * that tells us where the next instruction to check is. Any illegal
- * instruction detected will be given a size of zero, which is a
- * signal to abort the rest of the buffer.
- */
-static int validate_cmd(int cmd)
-{
- switch (((cmd >> 29) & 0x7)) {
- case 0x0:
- switch ((cmd >> 23) & 0x3f) {
- case 0x0:
- return 1; /* MI_NOOP */
- case 0x4:
- return 1; /* MI_FLUSH */
- default:
- return 0; /* disallow everything else */
- }
- break;
- case 0x1:
- return 0; /* reserved */
- case 0x2:
- return (cmd & 0xff) + 2; /* 2d commands */
- case 0x3:
- if (((cmd >> 24) & 0x1f) <= 0x18)
- return 1;
-
- switch ((cmd >> 24) & 0x1f) {
- case 0x1c:
- return 1;
- case 0x1d:
- switch ((cmd >> 16) & 0xff) {
- case 0x3:
- return (cmd & 0x1f) + 2;
- case 0x4:
- return (cmd & 0xf) + 2;
- default:
- return (cmd & 0xffff) + 2;
- }
- case 0x1e:
- if (cmd & (1 << 23))
- return (cmd & 0xffff) + 1;
- else
- return 1;
- case 0x1f:
- if ((cmd & (1 << 23)) == 0) /* inline vertices */
- return (cmd & 0x1ffff) + 2;
- else if (cmd & (1 << 17)) /* indirect random */
- if ((cmd & 0xffff) == 0)
- return 0; /* unknown length, too hard */
- else
- return (((cmd & 0xffff) + 1) / 2) + 1;
- else
- return 2; /* indirect sequential */
- default:
- return 0;
- }
- default:
- return 0;
- }
-
- return 0;
-}
-
-static int i915_emit_cmds(struct drm_device *dev, int *buffer, int dwords)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int i, ret;
-
- if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->buffer->size - 8)
- return -EINVAL;
-
- for (i = 0; i < dwords;) {
- int sz = validate_cmd(buffer[i]);
-
- if (sz == 0 || i + sz > dwords)
- return -EINVAL;
- i += sz;
- }
-
- ret = BEGIN_LP_RING((dwords+1)&~1);
- if (ret)
- return ret;
-
- for (i = 0; i < dwords; i++)
- OUT_RING(buffer[i]);
- if (dwords & 1)
- OUT_RING(0);
-
- ADVANCE_LP_RING();
-
- return 0;
-}
-
-int
-i915_emit_box(struct drm_device *dev,
- struct drm_clip_rect *box,
- int DR1, int DR4)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int ret;
-
- if (box->y2 <= box->y1 || box->x2 <= box->x1 ||
- box->y2 <= 0 || box->x2 <= 0) {
- DRM_ERROR("Bad box %d,%d..%d,%d\n",
- box->x1, box->y1, box->x2, box->y2);
- return -EINVAL;
- }
-
- if (INTEL_INFO(dev)->gen >= 4) {
- ret = BEGIN_LP_RING(4);
- if (ret)
- return ret;
-
- OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
- OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
- OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
- OUT_RING(DR4);
- } else {
- ret = BEGIN_LP_RING(6);
- if (ret)
- return ret;
-
- OUT_RING(GFX_OP_DRAWRECT_INFO);
- OUT_RING(DR1);
- OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
- OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
- OUT_RING(DR4);
- OUT_RING(0);
- }
- ADVANCE_LP_RING();
-
- return 0;
-}
-
-/* XXX: Emitting the counter should really be moved to part of the IRQ
- * emit. For now, do it in both places:
- */
-
-static void i915_emit_breadcrumb(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
-
- dev_priv->dri1.counter++;
- if (dev_priv->dri1.counter > 0x7FFFFFFFUL)
- dev_priv->dri1.counter = 0;
- if (master_priv->sarea_priv)
- master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter;
-
- if (BEGIN_LP_RING(4) == 0) {
- OUT_RING(MI_STORE_DWORD_INDEX);
- OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- OUT_RING(dev_priv->dri1.counter);
- OUT_RING(0);
- ADVANCE_LP_RING();
- }
-}
-
-static int i915_dispatch_cmdbuffer(struct drm_device *dev,
- drm_i915_cmdbuffer_t *cmd,
- struct drm_clip_rect *cliprects,
- void *cmdbuf)
-{
- int nbox = cmd->num_cliprects;
- int i = 0, count, ret;
-
- if (cmd->sz & 0x3) {
- DRM_ERROR("alignment");
- return -EINVAL;
- }
-
- i915_kernel_lost_context(dev);
-
- count = nbox ? nbox : 1;
-
- for (i = 0; i < count; i++) {
- if (i < nbox) {
- ret = i915_emit_box(dev, &cliprects[i],
- cmd->DR1, cmd->DR4);
- if (ret)
- return ret;
- }
-
- ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4);
- if (ret)
- return ret;
- }
-
- i915_emit_breadcrumb(dev);
- return 0;
-}
-
-static int i915_dispatch_batchbuffer(struct drm_device *dev,
- drm_i915_batchbuffer_t *batch,
- struct drm_clip_rect *cliprects)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int nbox = batch->num_cliprects;
- int i, count, ret;
-
- if ((batch->start | batch->used) & 0x7) {
- DRM_ERROR("alignment");
- return -EINVAL;
- }
-
- i915_kernel_lost_context(dev);
-
- count = nbox ? nbox : 1;
- for (i = 0; i < count; i++) {
- if (i < nbox) {
- ret = i915_emit_box(dev, &cliprects[i],
- batch->DR1, batch->DR4);
- if (ret)
- return ret;
- }
-
- if (!IS_I830(dev) && !IS_845G(dev)) {
- ret = BEGIN_LP_RING(2);
- if (ret)
- return ret;
-
- if (INTEL_INFO(dev)->gen >= 4) {
- OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
- OUT_RING(batch->start);
- } else {
- OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
- OUT_RING(batch->start | MI_BATCH_NON_SECURE);
- }
- } else {
- ret = BEGIN_LP_RING(4);
- if (ret)
- return ret;
-
- OUT_RING(MI_BATCH_BUFFER);
- OUT_RING(batch->start | MI_BATCH_NON_SECURE);
- OUT_RING(batch->start + batch->used - 4);
- OUT_RING(0);
- }
- ADVANCE_LP_RING();
- }
-
-
- if (IS_G4X(dev) || IS_GEN5(dev)) {
- if (BEGIN_LP_RING(2) == 0) {
- OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP);
- OUT_RING(MI_NOOP);
- ADVANCE_LP_RING();
- }
- }
-
- i915_emit_breadcrumb(dev);
- return 0;
-}
-
-static int i915_dispatch_flip(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_master_private *master_priv =
- dev->primary->master->driver_priv;
- int ret;
-
- if (!master_priv->sarea_priv)
- return -EINVAL;
-
- DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n",
- __func__,
- dev_priv->dri1.current_page,
- master_priv->sarea_priv->pf_current_page);
-
- i915_kernel_lost_context(dev);
-
- ret = BEGIN_LP_RING(10);
- if (ret)
- return ret;
-
- OUT_RING(MI_FLUSH | MI_READ_FLUSH);
- OUT_RING(0);
-
- OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
- OUT_RING(0);
- if (dev_priv->dri1.current_page == 0) {
- OUT_RING(dev_priv->dri1.back_offset);
- dev_priv->dri1.current_page = 1;
- } else {
- OUT_RING(dev_priv->dri1.front_offset);
- dev_priv->dri1.current_page = 0;
- }
- OUT_RING(0);
-
- OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
- OUT_RING(0);
-
- ADVANCE_LP_RING();
-
- master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter++;
-
- if (BEGIN_LP_RING(4) == 0) {
- OUT_RING(MI_STORE_DWORD_INDEX);
- OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- OUT_RING(dev_priv->dri1.counter);
- OUT_RING(0);
- ADVANCE_LP_RING();
- }
-
- master_priv->sarea_priv->pf_current_page = dev_priv->dri1.current_page;
- return 0;
-}
-
-static int i915_quiescent(struct drm_device *dev)
-{
- i915_kernel_lost_context(dev);
- return intel_ring_idle(LP_RING(dev->dev_private));
-}
-
-static int i915_flush_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- int ret;
-
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return -ENODEV;
-
- RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- mutex_lock(&dev->struct_mutex);
- ret = i915_quiescent(dev);
- mutex_unlock(&dev->struct_mutex);
-
- return ret;
-}
-
-static int i915_batchbuffer(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_master_private *master_priv;
- drm_i915_sarea_t *sarea_priv;
- drm_i915_batchbuffer_t *batch = data;
- int ret;
- struct drm_clip_rect *cliprects = NULL;
-
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return -ENODEV;
-
- master_priv = dev->primary->master->driver_priv;
- sarea_priv = (drm_i915_sarea_t *) master_priv->sarea_priv;
-
- if (!dev_priv->dri1.allow_batchbuffer) {
- DRM_ERROR("Batchbuffer ioctl disabled\n");
- return -EINVAL;
- }
-
- DRM_DEBUG_DRIVER("i915 batchbuffer, start %x used %d cliprects %d\n",
- batch->start, batch->used, batch->num_cliprects);
-
- RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- if (batch->num_cliprects < 0)
- return -EINVAL;
-
- if (batch->num_cliprects) {
- cliprects = kcalloc(batch->num_cliprects,
- sizeof(*cliprects),
- GFP_KERNEL);
- if (cliprects == NULL)
- return -ENOMEM;
-
- ret = copy_from_user(cliprects, batch->cliprects,
- batch->num_cliprects *
- sizeof(struct drm_clip_rect));
- if (ret != 0) {
- ret = -EFAULT;
- goto fail_free;
- }
- }
-
- mutex_lock(&dev->struct_mutex);
- ret = i915_dispatch_batchbuffer(dev, batch, cliprects);
- mutex_unlock(&dev->struct_mutex);
-
- if (sarea_priv)
- sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
-
-fail_free:
- kfree(cliprects);
-
- return ret;
-}
-
-static int i915_cmdbuffer(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_master_private *master_priv;
- drm_i915_sarea_t *sarea_priv;
- drm_i915_cmdbuffer_t *cmdbuf = data;
- struct drm_clip_rect *cliprects = NULL;
- void *batch_data;
- int ret;
-
- DRM_DEBUG_DRIVER("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
- cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
-
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return -ENODEV;
-
- master_priv = dev->primary->master->driver_priv;
- sarea_priv = (drm_i915_sarea_t *) master_priv->sarea_priv;
-
- RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- if (cmdbuf->num_cliprects < 0)
- return -EINVAL;
-
- batch_data = kmalloc(cmdbuf->sz, GFP_KERNEL);
- if (batch_data == NULL)
- return -ENOMEM;
-
- ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz);
- if (ret != 0) {
- ret = -EFAULT;
- goto fail_batch_free;
- }
-
- if (cmdbuf->num_cliprects) {
- cliprects = kcalloc(cmdbuf->num_cliprects,
- sizeof(*cliprects), GFP_KERNEL);
- if (cliprects == NULL) {
- ret = -ENOMEM;
- goto fail_batch_free;
- }
-
- ret = copy_from_user(cliprects, cmdbuf->cliprects,
- cmdbuf->num_cliprects *
- sizeof(struct drm_clip_rect));
- if (ret != 0) {
- ret = -EFAULT;
- goto fail_clip_free;
- }
- }
-
- mutex_lock(&dev->struct_mutex);
- ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data);
- mutex_unlock(&dev->struct_mutex);
- if (ret) {
- DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
- goto fail_clip_free;
- }
-
- if (sarea_priv)
- sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
-
-fail_clip_free:
- kfree(cliprects);
-fail_batch_free:
- kfree(batch_data);
-
- return ret;
-}
-
-static int i915_emit_irq(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
-
- i915_kernel_lost_context(dev);
-
- DRM_DEBUG_DRIVER("\n");
-
- dev_priv->dri1.counter++;
- if (dev_priv->dri1.counter > 0x7FFFFFFFUL)
- dev_priv->dri1.counter = 1;
- if (master_priv->sarea_priv)
- master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter;
-
- if (BEGIN_LP_RING(4) == 0) {
- OUT_RING(MI_STORE_DWORD_INDEX);
- OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- OUT_RING(dev_priv->dri1.counter);
- OUT_RING(MI_USER_INTERRUPT);
- ADVANCE_LP_RING();
- }
-
- return dev_priv->dri1.counter;
-}
-
-static int i915_wait_irq(struct drm_device *dev, int irq_nr)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
- int ret = 0;
- struct intel_engine_cs *ring = LP_RING(dev_priv);
-
- DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
- READ_BREADCRUMB(dev_priv));
-
- if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
- if (master_priv->sarea_priv)
- master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
- return 0;
- }
-
- if (master_priv->sarea_priv)
- master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
-
- if (ring->irq_get(ring)) {
- DRM_WAIT_ON(ret, ring->irq_queue, 3 * HZ,
- READ_BREADCRUMB(dev_priv) >= irq_nr);
- ring->irq_put(ring);
- } else if (wait_for(READ_BREADCRUMB(dev_priv) >= irq_nr, 3000))
- ret = -EBUSY;
-
- if (ret == -EBUSY) {
- DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
- READ_BREADCRUMB(dev_priv), (int)dev_priv->dri1.counter);
- }
-
- return ret;
-}
-
-/* Needs the lock as it touches the ring.
- */
-static int i915_irq_emit(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- drm_i915_irq_emit_t *emit = data;
- int result;
-
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return -ENODEV;
-
- if (!dev_priv || !LP_RING(dev_priv)->buffer->virtual_start) {
- DRM_ERROR("called with no initialization\n");
- return -EINVAL;
- }
-
- RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- mutex_lock(&dev->struct_mutex);
- result = i915_emit_irq(dev);
- mutex_unlock(&dev->struct_mutex);
-
- if (copy_to_user(emit->irq_seq, &result, sizeof(int))) {
- DRM_ERROR("copy_to_user\n");
- return -EFAULT;
- }
-
- return 0;
-}
-
-/* Doesn't need the hardware lock.
- */
-static int i915_irq_wait(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- drm_i915_irq_wait_t *irqwait = data;
-
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return -ENODEV;
-
- if (!dev_priv) {
- DRM_ERROR("called with no initialization\n");
- return -EINVAL;
- }
-
- return i915_wait_irq(dev, irqwait->irq_seq);
-}
-
-static int i915_vblank_pipe_get(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- drm_i915_vblank_pipe_t *pipe = data;
-
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return -ENODEV;
-
- if (!dev_priv) {
- DRM_ERROR("called with no initialization\n");
- return -EINVAL;
- }
-
- pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
-
- return 0;
-}
-
-/**
- * Schedule buffer swap at given vertical blank.
- */
-static int i915_vblank_swap(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- /* The delayed swap mechanism was fundamentally racy, and has been
- * removed. The model was that the client requested a delayed flip/swap
- * from the kernel, then waited for vblank before continuing to perform
- * rendering. The problem was that the kernel might wake the client
- * up before it dispatched the vblank swap (since the lock has to be
- * held while touching the ringbuffer), in which case the client would
- * clear and start the next frame before the swap occurred, and
- * flicker would occur in addition to likely missing the vblank.
- *
- * In the absence of this ioctl, userland falls back to a correct path
- * of waiting for a vblank, then dispatching the swap on its own.
- * Context switching to userland and back is plenty fast enough for
- * meeting the requirements of vblank swapping.
- */
- return -EINVAL;
-}
-
-static int i915_flip_bufs(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- int ret;
-
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return -ENODEV;
-
- DRM_DEBUG_DRIVER("%s\n", __func__);
-
- RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- mutex_lock(&dev->struct_mutex);
- ret = i915_dispatch_flip(dev);
- mutex_unlock(&dev->struct_mutex);
-
- return ret;
-}
static int i915_getparam(struct drm_device *dev, void *data,
struct drm_file *file_priv)
@@ -936,21 +58,12 @@ static int i915_getparam(struct drm_device *dev, void *data,
drm_i915_getparam_t *param = data;
int value;
- if (!dev_priv) {
- DRM_ERROR("called with no initialization\n");
- return -EINVAL;
- }
-
switch (param->param) {
case I915_PARAM_IRQ_ACTIVE:
- value = dev->pdev->irq ? 1 : 0;
- break;
case I915_PARAM_ALLOW_BATCHBUFFER:
- value = dev_priv->dri1.allow_batchbuffer ? 1 : 0;
- break;
case I915_PARAM_LAST_DISPATCH:
- value = READ_BREADCRUMB(dev_priv);
- break;
+ /* Reject all old ums/dri params. */
+ return -ENODEV;
case I915_PARAM_CHIPSET_ID:
value = dev->pdev->device;
break;
@@ -1027,6 +140,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_CMD_PARSER_VERSION:
value = i915_cmd_parser_get_version();
break;
+ case I915_PARAM_HAS_COHERENT_PHYS_GTT:
+ value = 1;
+ break;
default:
DRM_DEBUG("Unknown parameter %d\n", param->param);
return -EINVAL;
@@ -1046,19 +162,13 @@ static int i915_setparam(struct drm_device *dev, void *data,
struct drm_i915_private *dev_priv = dev->dev_private;
drm_i915_setparam_t *param = data;
- if (!dev_priv) {
- DRM_ERROR("called with no initialization\n");
- return -EINVAL;
- }
-
switch (param->param) {
case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
- break;
case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
- break;
case I915_SETPARAM_ALLOW_BATCHBUFFER:
- dev_priv->dri1.allow_batchbuffer = param->value ? 1 : 0;
- break;
+ /* Reject all old ums/dri params. */
+ return -ENODEV;
+
case I915_SETPARAM_NUM_USED_FENCES:
if (param->value > dev_priv->num_fence_regs ||
param->value < 0)
@@ -1075,54 +185,6 @@ static int i915_setparam(struct drm_device *dev, void *data,
return 0;
}
-static int i915_set_status_page(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- drm_i915_hws_addr_t *hws = data;
- struct intel_engine_cs *ring;
-
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return -ENODEV;
-
- if (!I915_NEED_GFX_HWS(dev))
- return -EINVAL;
-
- if (!dev_priv) {
- DRM_ERROR("called with no initialization\n");
- return -EINVAL;
- }
-
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- WARN(1, "tried to set status page when mode setting active\n");
- return 0;
- }
-
- DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr);
-
- ring = LP_RING(dev_priv);
- ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12);
-
- dev_priv->dri1.gfx_hws_cpu_addr =
- ioremap_wc(dev_priv->gtt.mappable_base + hws->addr, 4096);
- if (dev_priv->dri1.gfx_hws_cpu_addr == NULL) {
- i915_dma_cleanup(dev);
- ring->status_page.gfx_addr = 0;
- DRM_ERROR("can not ioremap virtual address for"
- " G33 hw status page\n");
- return -ENOMEM;
- }
-
- memset_io(dev_priv->dri1.gfx_hws_cpu_addr, 0, PAGE_SIZE);
- I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
-
- DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n",
- ring->status_page.gfx_addr);
- DRM_DEBUG_DRIVER("load hws at %p\n",
- ring->status_page.page_addr);
- return 0;
-}
-
static int i915_get_bridge_dev(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1275,12 +337,12 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
/* i915 resume handler doesn't set to D0 */
pci_set_power_state(dev->pdev, PCI_D0);
- i915_resume(dev);
+ i915_resume_legacy(dev);
dev->switch_power_state = DRM_SWITCH_POWER_ON;
} else {
pr_err("switched off\n");
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
- i915_suspend(dev, pmm);
+ i915_suspend_legacy(dev, pmm);
dev->switch_power_state = DRM_SWITCH_POWER_OFF;
}
}
@@ -1338,14 +400,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
intel_power_domains_init_hw(dev_priv);
- /*
- * We enable some interrupt sources in our postinstall hooks, so mark
- * interrupts as enabled _before_ actually enabling them to avoid
- * special cases in our ordering checks.
- */
- dev_priv->pm._irqs_disabled = false;
-
- ret = drm_irq_install(dev, dev->pdev->irq);
+ ret = intel_irq_install(dev_priv);
if (ret)
goto cleanup_gem_stolen;
@@ -1370,7 +425,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
goto cleanup_gem;
/* Only enable hotplug handling once the fbdev is fully set up. */
- intel_hpd_init(dev);
+ intel_hpd_init(dev_priv);
/*
* Some ports require correctly set-up hpd registers for detection to
@@ -1405,30 +460,6 @@ out:
return ret;
}
-int i915_master_create(struct drm_device *dev, struct drm_master *master)
-{
- struct drm_i915_master_private *master_priv;
-
- master_priv = kzalloc(sizeof(*master_priv), GFP_KERNEL);
- if (!master_priv)
- return -ENOMEM;
-
- master->driver_priv = master_priv;
- return 0;
-}
-
-void i915_master_destroy(struct drm_device *dev, struct drm_master *master)
-{
- struct drm_i915_master_private *master_priv = master->driver_priv;
-
- if (!master_priv)
- return;
-
- kfree(master_priv);
-
- master->driver_priv = NULL;
-}
-
#if IS_ENABLED(CONFIG_FB)
static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
{
@@ -1534,7 +565,7 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
info = (struct intel_device_info *)&dev_priv->info;
- if (IS_VALLEYVIEW(dev))
+ if (IS_VALLEYVIEW(dev) || INTEL_INFO(dev)->gen == 9)
for_each_pipe(dev_priv, pipe)
info->num_sprites[pipe] = 2;
else
@@ -1614,7 +645,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->gpu_error.lock);
- spin_lock_init(&dev_priv->backlight_lock);
+ mutex_init(&dev_priv->backlight_lock);
spin_lock_init(&dev_priv->uncore.lock);
spin_lock_init(&dev_priv->mm.object_stat_lock);
spin_lock_init(&dev_priv->mmio_flip_lock);
@@ -1742,7 +773,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto out_freewq;
}
- intel_irq_init(dev);
+ intel_irq_init(dev_priv);
intel_uncore_sanitize(dev);
/* Try to make sure MCHBAR is enabled before poking at it */
@@ -1784,9 +815,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
DRM_ERROR("failed to init modeset\n");
goto out_power_well;
}
- } else {
- /* Start out suspended in ums mode. */
- dev_priv->ums.mm_suspended = 1;
}
i915_setup_sysfs(dev);
@@ -1800,12 +828,12 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
if (IS_GEN5(dev))
intel_gpu_ips_init(dev_priv);
- intel_init_runtime_pm(dev_priv);
+ intel_runtime_pm_enable(dev_priv);
return 0;
out_power_well:
- intel_power_domains_remove(dev_priv);
+ intel_power_domains_fini(dev_priv);
drm_vblank_cleanup(dev);
out_gem_unload:
WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
@@ -1848,16 +876,10 @@ int i915_driver_unload(struct drm_device *dev)
return ret;
}
- intel_fini_runtime_pm(dev_priv);
+ intel_power_domains_fini(dev_priv);
intel_gpu_ips_teardown();
- /* The i915.ko module is still not prepared to be loaded when
- * the power well is not enabled, so just enable it in case
- * we're going to unload/reload. */
- intel_display_set_init_power(dev_priv, true);
- intel_power_domains_remove(dev_priv);
-
i915_teardown_sysfs(dev);
WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
@@ -1868,8 +890,12 @@ int i915_driver_unload(struct drm_device *dev)
acpi_video_unregister();
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
intel_fbdev_fini(dev);
+
+ drm_vblank_cleanup(dev);
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
intel_modeset_cleanup(dev);
/*
@@ -1905,13 +931,8 @@ int i915_driver_unload(struct drm_device *dev)
i915_gem_context_fini(dev);
mutex_unlock(&dev->struct_mutex);
i915_gem_cleanup_stolen(dev);
-
- if (!I915_NEED_GFX_HWS(dev))
- i915_free_hws(dev);
}
- drm_vblank_cleanup(dev);
-
intel_teardown_gmbus(dev);
intel_teardown_mchbar(dev);
@@ -1959,23 +980,8 @@ int i915_driver_open(struct drm_device *dev, struct drm_file *file)
*/
void i915_driver_lastclose(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- /* On gen6+ we refuse to init without kms enabled, but then the drm core
- * goes right around and calls lastclose. Check for this and don't clean
- * up anything. */
- if (!dev_priv)
- return;
-
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- intel_fbdev_restore_mode(dev);
- vga_switcheroo_process_delayed_switch();
- return;
- }
-
- i915_gem_lastclose(dev);
-
- i915_dma_cleanup(dev);
+ intel_fbdev_restore_mode(dev);
+ vga_switcheroo_process_delayed_switch();
}
void i915_driver_preclose(struct drm_device *dev, struct drm_file *file)
@@ -1999,24 +1005,24 @@ void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
}
const struct drm_ioctl_desc i915_ioctls[] = {
- DRM_IOCTL_DEF_DRV(I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(I915_FLIP, i915_flip_bufs, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, drm_noop, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
@@ -2025,8 +1031,8 @@ const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 2318b4c7a8f..f990ab4c3ef 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -356,6 +356,19 @@ static const struct intel_device_info intel_cherryview_info = {
CURSOR_OFFSETS,
};
+static const struct intel_device_info intel_skylake_info = {
+ .is_preliminary = 1,
+ .is_skylake = 1,
+ .gen = 9, .num_pipes = 3,
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
+ .has_llc = 1,
+ .has_ddi = 1,
+ .has_fbc = 1,
+ GEN_DEFAULT_PIPEOFFSETS,
+ IVB_CURSOR_OFFSETS,
+};
+
/*
* Make sure any device matches here are from most specific to most
* general. For example, since the Quanta match is based on the subsystem
@@ -392,7 +405,8 @@ static const struct intel_device_info intel_cherryview_info = {
INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info), \
INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info), \
INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info), \
- INTEL_CHV_IDS(&intel_cherryview_info)
+ INTEL_CHV_IDS(&intel_cherryview_info), \
+ INTEL_SKL_IDS(&intel_skylake_info)
static const struct pci_device_id pciidlist[] = { /* aka */
INTEL_PCI_IDS,
@@ -449,7 +463,7 @@ void intel_detect_pch(struct drm_device *dev)
dev_priv->pch_type = PCH_LPT;
DRM_DEBUG_KMS("Found LynxPoint PCH\n");
WARN_ON(!IS_HASWELL(dev));
- WARN_ON(IS_ULT(dev));
+ WARN_ON(IS_HSW_ULT(dev));
} else if (IS_BROADWELL(dev)) {
dev_priv->pch_type = PCH_LPT;
dev_priv->pch_id =
@@ -460,7 +474,15 @@ void intel_detect_pch(struct drm_device *dev)
dev_priv->pch_type = PCH_LPT;
DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
WARN_ON(!IS_HASWELL(dev));
- WARN_ON(!IS_ULT(dev));
+ WARN_ON(!IS_HSW_ULT(dev));
+ } else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
+ dev_priv->pch_type = PCH_SPT;
+ DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
+ WARN_ON(!IS_SKYLAKE(dev));
+ } else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) {
+ dev_priv->pch_type = PCH_SPT;
+ DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
+ WARN_ON(!IS_SKYLAKE(dev));
} else
continue;
@@ -529,10 +551,10 @@ static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
}
static int intel_suspend_complete(struct drm_i915_private *dev_priv);
-static int intel_resume_prepare(struct drm_i915_private *dev_priv,
- bool rpm_resume);
+static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
+ bool rpm_resume);
-static int i915_drm_freeze(struct drm_device *dev)
+static int i915_drm_suspend(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
@@ -562,6 +584,8 @@ static int i915_drm_freeze(struct drm_device *dev)
return error;
}
+ intel_suspend_gt_powersave(dev);
+
/*
* Disable CRTCs directly since we want to preserve sw state
* for _thaw. Also, power gate the CRTC power wells.
@@ -573,16 +597,12 @@ static int i915_drm_freeze(struct drm_device *dev)
intel_dp_mst_suspend(dev);
- flush_delayed_work(&dev_priv->rps.delayed_resume_work);
-
- intel_runtime_pm_disable_interrupts(dev);
+ intel_runtime_pm_disable_interrupts(dev_priv);
intel_hpd_cancel_work(dev_priv);
intel_suspend_encoders(dev_priv);
- intel_suspend_gt_powersave(dev);
-
- intel_modeset_suspend_hw(dev);
+ intel_suspend_hw(dev);
}
i915_gem_suspend_gtt_mappings(dev);
@@ -608,7 +628,26 @@ static int i915_drm_freeze(struct drm_device *dev)
return 0;
}
-int i915_suspend(struct drm_device *dev, pm_message_t state)
+static int i915_drm_suspend_late(struct drm_device *drm_dev)
+{
+ struct drm_i915_private *dev_priv = drm_dev->dev_private;
+ int ret;
+
+ ret = intel_suspend_complete(dev_priv);
+
+ if (ret) {
+ DRM_ERROR("Suspend complete failed: %d\n", ret);
+
+ return ret;
+ }
+
+ pci_disable_device(drm_dev->pdev);
+ pci_set_power_state(drm_dev->pdev, PCI_D3hot);
+
+ return 0;
+}
+
+int i915_suspend_legacy(struct drm_device *dev, pm_message_t state)
{
int error;
@@ -618,48 +657,25 @@ int i915_suspend(struct drm_device *dev, pm_message_t state)
return -ENODEV;
}
- if (state.event == PM_EVENT_PRETHAW)
- return 0;
-
+ if (WARN_ON_ONCE(state.event != PM_EVENT_SUSPEND &&
+ state.event != PM_EVENT_FREEZE))
+ return -EINVAL;
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
- error = i915_drm_freeze(dev);
+ error = i915_drm_suspend(dev);
if (error)
return error;
- if (state.event == PM_EVENT_SUSPEND) {
- /* Shut down the device */
- pci_disable_device(dev->pdev);
- pci_set_power_state(dev->pdev, PCI_D3hot);
- }
-
- return 0;
+ return i915_drm_suspend_late(dev);
}
-static int i915_drm_thaw_early(struct drm_device *dev)
+static int i915_drm_resume(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- int ret;
-
- ret = intel_resume_prepare(dev_priv, false);
- if (ret)
- DRM_ERROR("Resume prepare failed: %d,Continuing resume\n", ret);
- intel_uncore_early_sanitize(dev, true);
- intel_uncore_sanitize(dev);
- intel_power_domains_init_hw(dev_priv);
-
- return ret;
-}
-
-static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (drm_core_check_feature(dev, DRIVER_MODESET) &&
- restore_gtt_mappings) {
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
mutex_lock(&dev->struct_mutex);
i915_gem_restore_gtt_mappings(dev);
mutex_unlock(&dev->struct_mutex);
@@ -680,30 +696,29 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
}
mutex_unlock(&dev->struct_mutex);
- intel_runtime_pm_restore_interrupts(dev);
+ /* We need working interrupts for modeset enabling ... */
+ intel_runtime_pm_enable_interrupts(dev_priv);
intel_modeset_init_hw(dev);
- {
- unsigned long irqflags;
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- if (dev_priv->display.hpd_irq_setup)
- dev_priv->display.hpd_irq_setup(dev);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
- }
+ spin_lock_irq(&dev_priv->irq_lock);
+ if (dev_priv->display.hpd_irq_setup)
+ dev_priv->display.hpd_irq_setup(dev);
+ spin_unlock_irq(&dev_priv->irq_lock);
- intel_dp_mst_resume(dev);
drm_modeset_lock_all(dev);
intel_modeset_setup_hw_state(dev, true);
drm_modeset_unlock_all(dev);
+ intel_dp_mst_resume(dev);
+
/*
* ... but also need to make sure that hotplug processing
* doesn't cause havoc. Like in the driver load code we don't
* bother with the tiny race here where we might loose hotplug
* notifications.
* */
- intel_hpd_init(dev);
+ intel_hpd_init(dev_priv);
/* Config may have changed between suspend and resume */
drm_helper_hpd_irq_event(dev);
}
@@ -718,21 +733,15 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
intel_opregion_notify_adapter(dev, PCI_D0);
- return 0;
-}
-
-static int i915_drm_thaw(struct drm_device *dev)
-{
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- i915_check_and_clear_faults(dev);
+ drm_kms_helper_poll_enable(dev);
- return __i915_drm_thaw(dev, true);
+ return 0;
}
-static int i915_resume_early(struct drm_device *dev)
+static int i915_drm_resume_early(struct drm_device *dev)
{
- if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
- return 0;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret = 0;
/*
* We have a resume ordering issue with the snd-hda driver also
@@ -748,33 +757,34 @@ static int i915_resume_early(struct drm_device *dev)
pci_set_master(dev->pdev);
- return i915_drm_thaw_early(dev);
+ if (IS_VALLEYVIEW(dev_priv))
+ ret = vlv_resume_prepare(dev_priv, false);
+ if (ret)
+ DRM_ERROR("Resume prepare failed: %d,Continuing resume\n", ret);
+
+ intel_uncore_early_sanitize(dev, true);
+
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+ hsw_disable_pc8(dev_priv);
+
+ intel_uncore_sanitize(dev);
+ intel_power_domains_init_hw(dev_priv);
+
+ return ret;
}
-int i915_resume(struct drm_device *dev)
+int i915_resume_legacy(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
- /*
- * Platforms with opregion should have sane BIOS, older ones (gen3 and
- * earlier) need to restore the GTT mappings since the BIOS might clear
- * all our scratch PTEs.
- */
- ret = __i915_drm_thaw(dev, !dev_priv->opregion.header);
+ if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ return 0;
+
+ ret = i915_drm_resume_early(dev);
if (ret)
return ret;
- drm_kms_helper_poll_enable(dev);
- return 0;
-}
-
-static int i915_resume_legacy(struct drm_device *dev)
-{
- i915_resume_early(dev);
- i915_resume(dev);
-
- return 0;
+ return i915_drm_resume(dev);
}
/**
@@ -820,6 +830,9 @@ int i915_reset(struct drm_device *dev)
}
}
+ if (i915_stop_ring_allow_warn(dev_priv))
+ pr_notice("drm/i915: Resetting chip after gpu hang\n");
+
if (ret) {
DRM_ERROR("Failed to reset chip: %i\n", ret);
mutex_unlock(&dev->struct_mutex);
@@ -840,10 +853,7 @@ int i915_reset(struct drm_device *dev)
* was running at the time of the reset (i.e. we weren't VT
* switched away).
*/
- if (drm_core_check_feature(dev, DRIVER_MODESET) ||
- !dev_priv->ums.mm_suspended) {
- dev_priv->ums.mm_suspended = 0;
-
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
/* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */
dev_priv->gpu_error.reload_in_reset = true;
@@ -923,15 +933,13 @@ static int i915_pm_suspend(struct device *dev)
if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
- return i915_drm_freeze(drm_dev);
+ return i915_drm_suspend(drm_dev);
}
static int i915_pm_suspend_late(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
- struct drm_i915_private *dev_priv = drm_dev->dev_private;
- int ret;
/*
* We have a suspedn ordering issue with the snd-hda driver also
@@ -945,16 +953,7 @@ static int i915_pm_suspend_late(struct device *dev)
if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
- ret = intel_suspend_complete(dev_priv);
-
- if (ret)
- DRM_ERROR("Suspend complete failed: %d\n", ret);
- else {
- pci_disable_device(pdev);
- pci_set_power_state(pdev, PCI_D3hot);
- }
-
- return ret;
+ return i915_drm_suspend_late(drm_dev);
}
static int i915_pm_resume_early(struct device *dev)
@@ -962,61 +961,21 @@ static int i915_pm_resume_early(struct device *dev)
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
- return i915_resume_early(drm_dev);
-}
-
-static int i915_pm_resume(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
-
- return i915_resume(drm_dev);
-}
-
-static int i915_pm_freeze(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
-
- if (!drm_dev || !drm_dev->dev_private) {
- dev_err(dev, "DRM not initialized, aborting suspend.\n");
- return -ENODEV;
- }
-
- return i915_drm_freeze(drm_dev);
-}
-
-static int i915_pm_freeze_late(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
- struct drm_i915_private *dev_priv = drm_dev->dev_private;
-
- return intel_suspend_complete(dev_priv);
-}
-
-static int i915_pm_thaw_early(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ return 0;
- return i915_drm_thaw_early(drm_dev);
+ return i915_drm_resume_early(drm_dev);
}
-static int i915_pm_thaw(struct device *dev)
+static int i915_pm_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
- return i915_drm_thaw(drm_dev);
-}
-
-static int i915_pm_poweroff(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ return 0;
- return i915_drm_freeze(drm_dev);
+ return i915_drm_resume(drm_dev);
}
static int hsw_suspend_complete(struct drm_i915_private *dev_priv)
@@ -1026,25 +985,6 @@ static int hsw_suspend_complete(struct drm_i915_private *dev_priv)
return 0;
}
-static int snb_resume_prepare(struct drm_i915_private *dev_priv,
- bool rpm_resume)
-{
- struct drm_device *dev = dev_priv->dev;
-
- if (rpm_resume)
- intel_init_pch_refclk(dev);
-
- return 0;
-}
-
-static int hsw_resume_prepare(struct drm_i915_private *dev_priv,
- bool rpm_resume)
-{
- hsw_disable_pc8(dev_priv);
-
- return 0;
-}
-
/*
* Save all Gunit registers that may be lost after a D3 and a subsequent
* S0i[R123] transition. The list of registers needing a save/restore is
@@ -1449,18 +1389,13 @@ static int intel_runtime_suspend(struct device *device)
i915_gem_release_all_mmaps(dev_priv);
mutex_unlock(&dev->struct_mutex);
- /*
- * rps.work can't be rearmed here, since we get here only after making
- * sure the GPU is idle and the RPS freq is set to the minimum. See
- * intel_mark_idle().
- */
- cancel_work_sync(&dev_priv->rps.work);
- intel_runtime_pm_disable_interrupts(dev);
+ intel_suspend_gt_powersave(dev);
+ intel_runtime_pm_disable_interrupts(dev_priv);
ret = intel_suspend_complete(dev_priv);
if (ret) {
DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
- intel_runtime_pm_restore_interrupts(dev);
+ intel_runtime_pm_enable_interrupts(dev_priv);
return ret;
}
@@ -1502,7 +1437,7 @@ static int intel_runtime_resume(struct device *device)
struct pci_dev *pdev = to_pci_dev(device);
struct drm_device *dev = pci_get_drvdata(pdev);
struct drm_i915_private *dev_priv = dev->dev_private;
- int ret;
+ int ret = 0;
if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
return -ENODEV;
@@ -1512,7 +1447,13 @@ static int intel_runtime_resume(struct device *device)
intel_opregion_notify_adapter(dev, PCI_D0);
dev_priv->pm.suspended = false;
- ret = intel_resume_prepare(dev_priv, true);
+ if (IS_GEN6(dev_priv))
+ intel_init_pch_refclk(dev);
+ else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+ hsw_disable_pc8(dev_priv);
+ else if (IS_VALLEYVIEW(dev_priv))
+ ret = vlv_resume_prepare(dev_priv, true);
+
/*
* No point of rolling back things in case of an error, as the best
* we can do is to hope that things will still work (and disable RPM).
@@ -1520,8 +1461,8 @@ static int intel_runtime_resume(struct device *device)
i915_gem_init_swizzling(dev);
gen6_update_ring_freq(dev);
- intel_runtime_pm_restore_interrupts(dev);
- intel_reset_gt_powersave(dev);
+ intel_runtime_pm_enable_interrupts(dev_priv);
+ intel_enable_gt_powersave(dev);
if (ret)
DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret);
@@ -1550,41 +1491,41 @@ static int intel_suspend_complete(struct drm_i915_private *dev_priv)
return ret;
}
-/*
- * This function implements common functionality of runtime and system
- * resume sequence. Variable rpm_resume used for implementing different
- * code paths.
- */
-static int intel_resume_prepare(struct drm_i915_private *dev_priv,
- bool rpm_resume)
-{
- struct drm_device *dev = dev_priv->dev;
- int ret;
-
- if (IS_GEN6(dev))
- ret = snb_resume_prepare(dev_priv, rpm_resume);
- else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
- ret = hsw_resume_prepare(dev_priv, rpm_resume);
- else if (IS_VALLEYVIEW(dev))
- ret = vlv_resume_prepare(dev_priv, rpm_resume);
- else
- ret = 0;
-
- return ret;
-}
-
static const struct dev_pm_ops i915_pm_ops = {
+ /*
+ * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,
+ * PMSG_RESUME]
+ */
.suspend = i915_pm_suspend,
.suspend_late = i915_pm_suspend_late,
.resume_early = i915_pm_resume_early,
.resume = i915_pm_resume,
- .freeze = i915_pm_freeze,
- .freeze_late = i915_pm_freeze_late,
- .thaw_early = i915_pm_thaw_early,
- .thaw = i915_pm_thaw,
- .poweroff = i915_pm_poweroff,
+
+ /*
+ * S4 event handlers
+ * @freeze, @freeze_late : called (1) before creating the
+ * hibernation image [PMSG_FREEZE] and
+ * (2) after rebooting, before restoring
+ * the image [PMSG_QUIESCE]
+ * @thaw, @thaw_early : called (1) after creating the hibernation
+ * image, before writing it [PMSG_THAW]
+ * and (2) after failing to create or
+ * restore the image [PMSG_RECOVER]
+ * @poweroff, @poweroff_late: called after writing the hibernation
+ * image, before rebooting [PMSG_HIBERNATE]
+ * @restore, @restore_early : called after rebooting and restoring the
+ * hibernation image [PMSG_RESTORE]
+ */
+ .freeze = i915_pm_suspend,
+ .freeze_late = i915_pm_suspend_late,
+ .thaw_early = i915_pm_resume_early,
+ .thaw = i915_pm_resume,
+ .poweroff = i915_pm_suspend,
+ .poweroff_late = i915_pm_suspend_late,
.restore_early = i915_pm_resume_early,
.restore = i915_pm_resume,
+
+ /* S0ix (via runtime suspend) event handlers */
.runtime_suspend = intel_runtime_suspend,
.runtime_resume = intel_runtime_resume,
};
@@ -1626,12 +1567,10 @@ static struct drm_driver driver = {
.set_busid = drm_pci_set_busid,
/* Used in place of i915_pm_ops for non-DRIVER_MODESET */
- .suspend = i915_suspend,
+ .suspend = i915_suspend_legacy,
.resume = i915_resume_legacy,
.device_is_agp = i915_driver_device_is_agp,
- .master_create = i915_master_create,
- .master_destroy = i915_master_destroy,
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = i915_debugfs_init,
.debugfs_cleanup = i915_debugfs_cleanup,
@@ -1645,7 +1584,7 @@ static struct drm_driver driver = {
.gem_prime_import = i915_gem_prime_import,
.dumb_create = i915_gem_dumb_create,
- .dumb_map_offset = i915_gem_mmap_gtt,
+ .dumb_map_offset = i915_gem_dumb_map_offset,
.dumb_destroy = drm_gem_dumb_destroy,
.ioctls = i915_ioctls,
.fops = &i915_driver_fops,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 16a6f6d187a..63bcda5541e 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -55,7 +55,10 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20140905"
+#define DRIVER_DATE "20141121"
+
+#undef WARN_ON
+#define WARN_ON(x) WARN(x, "WARN_ON(" #x ")")
enum pipe {
INVALID_PIPE = -1,
@@ -76,6 +79,14 @@ enum transcoder {
};
#define transcoder_name(t) ((t) + 'A')
+/*
+ * This is the maximum (across all platforms) number of planes (primary +
+ * sprites) that can be active at the same time on one pipe.
+ *
+ * This value doesn't count the cursor plane.
+ */
+#define I915_MAX_PLANES 3
+
enum plane {
PLANE_A = 0,
PLANE_B,
@@ -202,10 +213,15 @@ enum intel_dpll_id {
/* real shared dpll ids must be >= 0 */
DPLL_ID_PCH_PLL_A = 0,
DPLL_ID_PCH_PLL_B = 1,
+ /* hsw/bdw */
DPLL_ID_WRPLL1 = 0,
DPLL_ID_WRPLL2 = 1,
+ /* skl */
+ DPLL_ID_SKL_DPLL1 = 0,
+ DPLL_ID_SKL_DPLL2 = 1,
+ DPLL_ID_SKL_DPLL3 = 2,
};
-#define I915_NUM_PLLS 2
+#define I915_NUM_PLLS 3
struct intel_dpll_hw_state {
/* i9xx, pch plls */
@@ -216,16 +232,33 @@ struct intel_dpll_hw_state {
/* hsw, bdw */
uint32_t wrpll;
+
+ /* skl */
+ /*
+ * DPLL_CTRL1 has 6 bits for each each this DPLL. We store those in
+ * lower part of crtl1 and they get shifted into position when writing
+ * the register. This allows us to easily compare the state to share
+ * the DPLL.
+ */
+ uint32_t ctrl1;
+ /* HDMI only, 0 when used for DP */
+ uint32_t cfgcr1, cfgcr2;
+};
+
+struct intel_shared_dpll_config {
+ unsigned crtc_mask; /* mask of CRTCs sharing this PLL */
+ struct intel_dpll_hw_state hw_state;
};
struct intel_shared_dpll {
- int refcount; /* count of number of CRTCs sharing this PLL */
+ struct intel_shared_dpll_config config;
+ struct intel_shared_dpll_config *new_config;
+
int active; /* count of number of active CRTCs (i.e. DPMS on) */
bool on; /* is the PLL actually active? Disabled during modeset */
const char *name;
/* should match the index in the dev_priv->shared_dplls array */
enum intel_dpll_id id;
- struct intel_dpll_hw_state hw_state;
/* The mode_set hook is optional and should be used together with the
* intel_prepare_shared_dpll function. */
void (*mode_set)(struct drm_i915_private *dev_priv,
@@ -239,6 +272,11 @@ struct intel_shared_dpll {
struct intel_dpll_hw_state *hw_state);
};
+#define SKL_DPLL0 0
+#define SKL_DPLL1 1
+#define SKL_DPLL2 2
+#define SKL_DPLL3 3
+
/* Used by dp and fdi links */
struct intel_link_m_n {
uint32_t tu;
@@ -267,7 +305,6 @@ void intel_link_compute_m_n(int bpp, int nlanes,
#define DRIVER_PATCHLEVEL 0
#define WATCH_LISTS 0
-#define WATCH_GTT 0
struct opregion_header;
struct opregion_acpi;
@@ -290,12 +327,6 @@ struct intel_opregion {
struct intel_overlay;
struct intel_overlay_error_state;
-struct drm_local_map;
-
-struct drm_i915_master_private {
- struct drm_local_map *sarea;
- struct _drm_i915_sarea *sarea_priv;
-};
#define I915_FENCE_REG_NONE -1
#define I915_MAX_NUM_FENCES 32
/* 32 fences + sign bit for FENCE_REG_NONE */
@@ -426,6 +457,7 @@ struct drm_i915_error_state {
};
struct intel_connector;
+struct intel_encoder;
struct intel_crtc_config;
struct intel_plane_config;
struct intel_crtc;
@@ -452,7 +484,7 @@ struct drm_i915_display_funcs {
* Returns true on success, false on failure.
*/
bool (*find_dpll)(const struct intel_limit *limit,
- struct drm_crtc *crtc,
+ struct intel_crtc *crtc,
int target, int refclk,
struct dpll *match_clock,
struct dpll *best_clock);
@@ -468,15 +500,14 @@ struct drm_i915_display_funcs {
struct intel_crtc_config *);
void (*get_plane_config)(struct intel_crtc *,
struct intel_plane_config *);
- int (*crtc_mode_set)(struct drm_crtc *crtc,
- int x, int y,
- struct drm_framebuffer *old_fb);
+ int (*crtc_compute_clock)(struct intel_crtc *crtc);
void (*crtc_enable)(struct drm_crtc *crtc);
void (*crtc_disable)(struct drm_crtc *crtc);
void (*off)(struct drm_crtc *crtc);
- void (*write_eld)(struct drm_connector *connector,
- struct drm_crtc *crtc,
- struct drm_display_mode *mode);
+ void (*audio_codec_enable)(struct drm_connector *connector,
+ struct intel_encoder *encoder,
+ struct drm_display_mode *mode);
+ void (*audio_codec_disable)(struct intel_encoder *encoder);
void (*fdi_link_train)(struct drm_crtc *crtc);
void (*init_clock_gating)(struct drm_device *dev);
int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
@@ -494,7 +525,7 @@ struct drm_i915_display_funcs {
/* display clock increase/decrease */
/* pll clock increase/decrease */
- int (*setup_backlight)(struct intel_connector *connector);
+ int (*setup_backlight)(struct intel_connector *connector, enum pipe pipe);
uint32_t (*get_backlight)(struct intel_connector *connector);
void (*set_backlight)(struct intel_connector *connector,
uint32_t level);
@@ -533,6 +564,7 @@ struct intel_uncore {
unsigned fw_rendercount;
unsigned fw_mediacount;
+ unsigned fw_blittercount;
struct timer_list force_wake_timer;
};
@@ -551,6 +583,7 @@ struct intel_uncore {
func(is_ivybridge) sep \
func(is_valleyview) sep \
func(is_haswell) sep \
+ func(is_skylake) sep \
func(is_preliminary) sep \
func(has_fbc) sep \
func(has_pipe_cxsr) sep \
@@ -646,6 +679,7 @@ struct intel_context {
struct {
struct drm_i915_gem_object *state;
struct intel_ringbuffer *ringbuf;
+ int unpin_count;
} engine[I915_NUM_RINGS];
struct list_head link;
@@ -663,6 +697,18 @@ struct i915_fbc {
bool false_color;
+ /* Tracks whether the HW is actually enabled, not whether the feature is
+ * possible. */
+ bool enabled;
+
+ /* On gen8 some rings cannont perform fbc clean operation so for now
+ * we are doing this on SW with mmio.
+ * This variable works in the opposite information direction
+ * of ring->fbc_dirty telling software on frontbuffer tracking
+ * to perform the cache clean on sw side.
+ */
+ bool need_sw_cache_clean;
+
struct intel_fbc_work {
struct delayed_work work;
struct drm_crtc *crtc;
@@ -704,6 +750,7 @@ enum intel_pch {
PCH_IBX, /* Ibexpeak PCH */
PCH_CPT, /* Cougarpoint PCH */
PCH_LPT, /* Lynxpoint PCH */
+ PCH_SPT, /* Sunrisepoint PCH */
PCH_NOP,
};
@@ -717,6 +764,7 @@ enum intel_sbi_destination {
#define QUIRK_INVERT_BRIGHTNESS (1<<2)
#define QUIRK_BACKLIGHT_PRESENT (1<<3)
#define QUIRK_PIPEB_FORCE (1<<4)
+#define QUIRK_PIN_SWIZZLED_PAGES (1<<5)
struct intel_fbdev;
struct intel_fbc_work;
@@ -768,7 +816,6 @@ struct i915_suspend_saved_registers {
u32 saveBLC_HIST_CTL;
u32 saveBLC_PWM_CTL;
u32 saveBLC_PWM_CTL2;
- u32 saveBLC_HIST_CTL_B;
u32 saveBLC_CPU_PWM_CTL;
u32 saveBLC_CPU_PWM_CTL2;
u32 saveFPB0;
@@ -877,6 +924,7 @@ struct i915_suspend_saved_registers {
u32 savePIPEB_LINK_N1;
u32 saveMCHBAR_RENDER_STANDBY;
u32 savePCH_PORT_HOTPLUG;
+ u16 saveGCDGMBUS;
};
struct vlv_s0ix_state {
@@ -947,8 +995,12 @@ struct intel_rps_ei {
};
struct intel_gen6_power_mgmt {
- /* work and pm_iir are protected by dev_priv->irq_lock */
+ /*
+ * work, interrupts_enabled and pm_iir are protected by
+ * dev_priv->irq_lock
+ */
struct work_struct work;
+ bool interrupts_enabled;
u32 pm_iir;
/* Frequencies are stored in potentially platform dependent multiples.
@@ -1071,31 +1123,6 @@ struct i915_power_domains {
struct i915_power_well *power_wells;
};
-struct i915_dri1_state {
- unsigned allow_batchbuffer : 1;
- u32 __iomem *gfx_hws_cpu_addr;
-
- unsigned int cpp;
- int back_offset;
- int front_offset;
- int current_page;
- int page_flipping;
-
- uint32_t counter;
-};
-
-struct i915_ums_state {
- /**
- * Flag if the X Server, and thus DRM, is not currently in
- * control of the device.
- *
- * This is set between LeaveVT and EnterVT. It needs to be
- * replaced with a semaphore. It also needs to be
- * transitioned away from for kernel modesetting.
- */
- int mm_suspended;
-};
-
#define MAX_L3_SLICES 2
struct intel_l3_parity {
u32 *remap_info[MAX_L3_SLICES];
@@ -1357,6 +1384,49 @@ struct ilk_wm_values {
enum intel_ddb_partitioning partitioning;
};
+struct skl_ddb_entry {
+ uint16_t start, end; /* in number of blocks, 'end' is exclusive */
+};
+
+static inline uint16_t skl_ddb_entry_size(const struct skl_ddb_entry *entry)
+{
+ return entry->end - entry->start;
+}
+
+static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1,
+ const struct skl_ddb_entry *e2)
+{
+ if (e1->start == e2->start && e1->end == e2->end)
+ return true;
+
+ return false;
+}
+
+struct skl_ddb_allocation {
+ struct skl_ddb_entry pipe[I915_MAX_PIPES];
+ struct skl_ddb_entry plane[I915_MAX_PIPES][I915_MAX_PLANES];
+ struct skl_ddb_entry cursor[I915_MAX_PIPES];
+};
+
+struct skl_wm_values {
+ bool dirty[I915_MAX_PIPES];
+ struct skl_ddb_allocation ddb;
+ uint32_t wm_linetime[I915_MAX_PIPES];
+ uint32_t plane[I915_MAX_PIPES][I915_MAX_PLANES][8];
+ uint32_t cursor[I915_MAX_PIPES][8];
+ uint32_t plane_trans[I915_MAX_PIPES][I915_MAX_PLANES];
+ uint32_t cursor_trans[I915_MAX_PIPES];
+};
+
+struct skl_wm_level {
+ bool plane_en[I915_MAX_PLANES];
+ bool cursor_en;
+ uint16_t plane_res_b[I915_MAX_PLANES];
+ uint8_t plane_res_l[I915_MAX_PLANES];
+ uint16_t cursor_res_b;
+ uint8_t cursor_res_l;
+};
+
/*
* This struct helps tracking the state needed for runtime PM, which puts the
* device in PCI D3 state. Notice that when this happens, nothing on the
@@ -1369,7 +1439,7 @@ struct ilk_wm_values {
*
* Our driver uses the autosuspend delay feature, which means we'll only really
* suspend if we stay with zero refcount for a certain amount of time. The
- * default value is currently very conservative (see intel_init_runtime_pm), but
+ * default value is currently very conservative (see intel_runtime_pm_enable), but
* it can be changed with the standard runtime PM files from sysfs.
*
* The irqs_disabled variable becomes true exactly after we disable the IRQs and
@@ -1382,7 +1452,7 @@ struct ilk_wm_values {
*/
struct i915_runtime_pm {
bool suspended;
- bool _irqs_disabled;
+ bool irqs_enabled;
};
enum intel_pipe_crc_source {
@@ -1426,6 +1496,20 @@ struct i915_frontbuffer_tracking {
unsigned flip_bits;
};
+struct i915_wa_reg {
+ u32 addr;
+ u32 value;
+ /* bitmask representing WA bits */
+ u32 mask;
+};
+
+#define I915_MAX_WA_REGS 16
+
+struct i915_workarounds {
+ struct i915_wa_reg reg[I915_MAX_WA_REGS];
+ u32 count;
+};
+
struct drm_i915_private {
struct drm_device *dev;
struct kmem_cache *slab;
@@ -1505,11 +1589,13 @@ struct drm_i915_private {
struct intel_opregion opregion;
struct intel_vbt_data vbt;
+ bool preserve_bios_swizzle;
+
/* overlay */
struct intel_overlay *overlay;
/* backlight registers and fields in struct intel_panel */
- spinlock_t backlight_lock;
+ struct mutex backlight_lock;
/* LVDS info */
bool no_aux_handshake;
@@ -1523,6 +1609,7 @@ struct drm_i915_private {
unsigned int fsb_freq, mem_freq, is_ddr3;
unsigned int vlv_cdclk_freq;
+ unsigned int hpll_freq;
/**
* wq - Driver workqueue for GEM.
@@ -1568,19 +1655,7 @@ struct drm_i915_private {
struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
- /*
- * workarounds are currently applied at different places and
- * changes are being done to consolidate them so exact count is
- * not clear at this point, use a max value for now.
- */
-#define I915_MAX_WA_REGS 16
- struct {
- u32 addr;
- u32 value;
- /* bitmask representing WA bits */
- u32 mask;
- } intel_wa_regs[I915_MAX_WA_REGS];
- u32 num_wa_regs;
+ struct i915_workarounds workarounds;
/* Reclocking support */
bool render_reclock_avail;
@@ -1644,9 +1719,25 @@ struct drm_i915_private {
uint16_t spr_latency[5];
/* cursor */
uint16_t cur_latency[5];
+ /*
+ * Raw watermark memory latency values
+ * for SKL for all 8 levels
+ * in 1us units.
+ */
+ uint16_t skl_latency[8];
+
+ /*
+ * The skl_wm_values structure is a bit too big for stack
+ * allocation, so we keep the staging struct where we store
+ * intermediate results here instead.
+ */
+ struct skl_wm_values skl_results;
/* current hardware state */
- struct ilk_wm_values hw;
+ union {
+ struct ilk_wm_values hw;
+ struct skl_wm_values skl_hw;
+ };
} wm;
struct i915_runtime_pm pm;
@@ -1667,12 +1758,6 @@ struct drm_i915_private {
uint32_t bios_vgacntr;
- /* Old dri1 support infrastructure, beware the dragons ya fools entering
- * here! */
- struct i915_dri1_state dri1;
- /* Old ums support infrastructure, same warning applies. */
- struct i915_ums_state ums;
-
/* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
struct {
int (*do_execbuf)(struct drm_device *dev, struct drm_file *file,
@@ -1830,8 +1915,6 @@ struct drm_i915_gem_object {
unsigned long gt_ro:1;
unsigned int cache_level:3;
- unsigned int has_aliasing_ppgtt_mapping:1;
- unsigned int has_global_gtt_mapping:1;
unsigned int has_dma_mapping:1;
unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS;
@@ -1864,10 +1947,10 @@ struct drm_i915_gem_object {
unsigned long user_pin_count;
struct drm_file *pin_filp;
- /** for phy allocated objects */
- struct drm_dma_handle *phys_handle;
-
union {
+ /** for phy allocated objects */
+ struct drm_dma_handle *phys_handle;
+
struct i915_gem_userptr {
uintptr_t ptr;
unsigned read_only :1;
@@ -2073,6 +2156,7 @@ struct drm_i915_cmd_table {
#define IS_CHERRYVIEW(dev) (INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev))
#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
#define IS_BROADWELL(dev) (!INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev))
+#define IS_SKYLAKE(dev) (INTEL_INFO(dev)->is_skylake)
#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
(INTEL_DEVID(dev) & 0xFF00) == 0x0C00)
@@ -2080,9 +2164,10 @@ struct drm_i915_cmd_table {
((INTEL_DEVID(dev) & 0xf) == 0x2 || \
(INTEL_DEVID(dev) & 0xf) == 0x6 || \
(INTEL_DEVID(dev) & 0xf) == 0xe))
+#define IS_BDW_GT3(dev) (IS_BROADWELL(dev) && \
+ (INTEL_DEVID(dev) & 0x00F0) == 0x0020)
#define IS_HSW_ULT(dev) (IS_HASWELL(dev) && \
(INTEL_DEVID(dev) & 0xFF00) == 0x0A00)
-#define IS_ULT(dev) (IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
#define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \
(INTEL_DEVID(dev) & 0x00F0) == 0x0020)
/* ULX machines are also considered ULT. */
@@ -2103,6 +2188,7 @@ struct drm_i915_cmd_table {
#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6)
#define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7)
#define IS_GEN8(dev) (INTEL_INFO(dev)->gen == 8)
+#define IS_GEN9(dev) (INTEL_INFO(dev)->gen == 9)
#define RENDER_RING (1<<RCS)
#define BSD_RING (1<<VCS)
@@ -2115,13 +2201,11 @@ struct drm_i915_cmd_table {
#define HAS_VEBOX(dev) (INTEL_INFO(dev)->ring_mask & VEBOX_RING)
#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
#define HAS_WT(dev) ((IS_HASWELL(dev) || IS_BROADWELL(dev)) && \
- to_i915(dev)->ellc_size)
+ __I915__(dev)->ellc_size)
#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6)
#define HAS_LOGICAL_RING_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 8)
-#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >= 6)
-#define HAS_PPGTT(dev) (INTEL_INFO(dev)->gen >= 7 && !IS_GEN8(dev))
#define USES_PPGTT(dev) (i915.enable_ppgtt)
#define USES_FULL_PPGTT(dev) (i915.enable_ppgtt == 2)
@@ -2154,13 +2238,15 @@ struct drm_i915_cmd_table {
#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
#define HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
-#define HAS_IPS(dev) (IS_ULT(dev) || IS_BROADWELL(dev))
+#define HAS_IPS(dev) (IS_HSW_ULT(dev) || IS_BROADWELL(dev))
#define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi)
#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
#define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev))
#define HAS_RUNTIME_PM(dev) (IS_GEN6(dev) || IS_HASWELL(dev) || \
IS_BROADWELL(dev) || IS_VALLEYVIEW(dev))
+#define HAS_RC6(dev) (INTEL_INFO(dev)->gen >= 6)
+#define HAS_RC6p(dev) (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev))
#define INTEL_PCH_DEVICE_ID_MASK 0xff00
#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
@@ -2168,8 +2254,11 @@ struct drm_i915_cmd_table {
#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
+#define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100
+#define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00
-#define INTEL_PCH_TYPE(dev) (to_i915(dev)->pch_type)
+#define INTEL_PCH_TYPE(dev) (__I915__(dev)->pch_type)
+#define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT)
#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
@@ -2189,8 +2278,8 @@ struct drm_i915_cmd_table {
extern const struct drm_ioctl_desc i915_ioctls[];
extern int i915_max_ioctl;
-extern int i915_suspend(struct drm_device *dev, pm_message_t state);
-extern int i915_resume(struct drm_device *dev);
+extern int i915_suspend_legacy(struct drm_device *dev, pm_message_t state);
+extern int i915_resume_legacy(struct drm_device *dev);
extern int i915_master_create(struct drm_device *dev, struct drm_master *master);
extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master);
@@ -2227,8 +2316,6 @@ struct i915_params {
extern struct i915_params i915 __read_mostly;
/* i915_dma.c */
-void i915_update_dri1_breadcrumb(struct drm_device *dev);
-extern void i915_kernel_lost_context(struct drm_device * dev);
extern int i915_driver_load(struct drm_device *, unsigned long flags);
extern int i915_driver_unload(struct drm_device *);
extern int i915_driver_open(struct drm_device *dev, struct drm_file *file);
@@ -2242,9 +2329,6 @@ extern int i915_driver_device_is_agp(struct drm_device * dev);
extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
#endif
-extern int i915_emit_box(struct drm_device *dev,
- struct drm_clip_rect *box,
- int DR1, int DR4);
extern int intel_gpu_reset(struct drm_device *dev);
extern int i915_reset(struct drm_device *dev);
extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
@@ -2260,10 +2344,10 @@ __printf(3, 4)
void i915_handle_error(struct drm_device *dev, bool wedged,
const char *fmt, ...);
-void gen6_set_pm_mask(struct drm_i915_private *dev_priv, u32 pm_iir,
- int new_delay);
-extern void intel_irq_init(struct drm_device *dev);
-extern void intel_hpd_init(struct drm_device *dev);
+extern void intel_irq_init(struct drm_i915_private *dev_priv);
+extern void intel_hpd_init(struct drm_i915_private *dev_priv);
+int intel_irq_install(struct drm_i915_private *dev_priv);
+void intel_irq_uninstall(struct drm_i915_private *dev_priv);
extern void intel_uncore_sanitize(struct drm_device *dev);
extern void intel_uncore_early_sanitize(struct drm_device *dev,
@@ -2283,10 +2367,19 @@ i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv);
void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv);
+void
+ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask);
+void
+ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask);
+void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
+ uint32_t interrupt_mask,
+ uint32_t enabled_irq_mask);
+#define ibx_enable_display_interrupt(dev_priv, bits) \
+ ibx_display_interrupt_update((dev_priv), (bits), (bits))
+#define ibx_disable_display_interrupt(dev_priv, bits) \
+ ibx_display_interrupt_update((dev_priv), (bits), 0)
/* i915_gem.c */
-int i915_gem_init_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
int i915_gem_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_pread_ioctl(struct drm_device *dev, void *data,
@@ -2333,10 +2426,6 @@ int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-int i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
int i915_gem_set_tiling(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_get_tiling(struct drm_device *dev, void *data,
@@ -2379,7 +2468,6 @@ int __must_check i915_vma_unbind(struct i915_vma *vma);
int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);
void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
-void i915_gem_lastclose(struct drm_device *dev);
int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
int *needs_clflush);
@@ -2413,8 +2501,9 @@ void i915_vma_move_to_active(struct i915_vma *vma,
int i915_gem_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
-int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
- uint32_t handle, uint64_t *offset);
+int i915_gem_dumb_map_offset(struct drm_file *file_priv,
+ struct drm_device *dev, uint32_t handle,
+ uint64_t *offset);
/**
* Returns true if seq1 is later than seq2.
*/
@@ -2486,6 +2575,11 @@ int __i915_add_request(struct intel_engine_cs *ring,
u32 *seqno);
#define i915_add_request(ring, seqno) \
__i915_add_request(ring, NULL, NULL, seqno)
+int __i915_wait_seqno(struct intel_engine_cs *ring, u32 seqno,
+ unsigned reset_counter,
+ bool interruptible,
+ s64 *timeout,
+ struct drm_i915_file_private *file_priv);
int __must_check i915_wait_seqno(struct intel_engine_cs *ring,
uint32_t seqno);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
@@ -2755,7 +2849,6 @@ static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
extern void intel_i2c_reset(struct drm_device *dev);
/* intel_opregion.c */
-struct intel_encoder;
#ifdef CONFIG_ACPI
extern int intel_opregion_setup(struct drm_device *dev);
extern void intel_opregion_init(struct drm_device *dev);
@@ -2793,7 +2886,6 @@ static inline void intel_unregister_dsm_handler(void) { return; }
/* modesetting */
extern void intel_modeset_init_hw(struct drm_device *dev);
-extern void intel_modeset_suspend_hw(struct drm_device *dev);
extern void intel_modeset_init(struct drm_device *dev);
extern void intel_modeset_gem_init(struct drm_device *dev);
extern void intel_modeset_cleanup(struct drm_device *dev);
@@ -2804,7 +2896,7 @@ extern void intel_modeset_setup_hw_state(struct drm_device *dev,
extern void i915_redisable_vga(struct drm_device *dev);
extern void i915_redisable_vga_power_on(struct drm_device *dev);
extern bool intel_fbc_enabled(struct drm_device *dev);
-extern void gen8_fbc_sw_flush(struct drm_device *dev, u32 value);
+extern void bdw_fbc_sw_flush(struct drm_device *dev, u32 value);
extern void intel_disable_fbc(struct drm_device *dev);
extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
extern void intel_init_pch_refclk(struct drm_device *dev);
@@ -2842,8 +2934,8 @@ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine);
void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine);
void assert_force_wake_inactive(struct drm_i915_private *dev_priv);
-int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val);
-int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val);
+int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val);
+int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val);
/* intel_sideband.c */
u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr);
@@ -2873,7 +2965,9 @@ int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val);
#define FORCEWAKE_RENDER (1 << 0)
#define FORCEWAKE_MEDIA (1 << 1)
-#define FORCEWAKE_ALL (FORCEWAKE_RENDER | FORCEWAKE_MEDIA)
+#define FORCEWAKE_BLITTER (1 << 2)
+#define FORCEWAKE_ALL (FORCEWAKE_RENDER | FORCEWAKE_MEDIA | \
+ FORCEWAKE_BLITTER)
#define I915_READ8(reg) dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true)
@@ -2939,6 +3033,11 @@ static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m)
return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
}
+static inline unsigned long nsecs_to_jiffies_timeout(const u64 n)
+{
+ return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1);
+}
+
static inline unsigned long
timespec_to_jiffies_timeout(const struct timespec *value)
{
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 28f91df2604..4a9faea626d 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -160,33 +160,6 @@ i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
}
int
-i915_gem_init_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_init *args = data;
-
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return -ENODEV;
-
- if (args->gtt_start >= args->gtt_end ||
- (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
- return -EINVAL;
-
- /* GEM with user mode setting was never supported on ilk and later. */
- if (INTEL_INFO(dev)->gen >= 5)
- return -ENODEV;
-
- mutex_lock(&dev->struct_mutex);
- i915_gem_setup_global_gtt(dev, args->gtt_start, args->gtt_end,
- args->gtt_end);
- dev_priv->gtt.mappable_end = args->gtt_end;
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
-}
-
-int
i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
@@ -208,40 +181,137 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
return 0;
}
-static void i915_gem_object_detach_phys(struct drm_i915_gem_object *obj)
+static int
+i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
{
- drm_dma_handle_t *phys = obj->phys_handle;
+ struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
+ char *vaddr = obj->phys_handle->vaddr;
+ struct sg_table *st;
+ struct scatterlist *sg;
+ int i;
- if (!phys)
- return;
+ if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
+ return -EINVAL;
- if (obj->madv == I915_MADV_WILLNEED) {
+ for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
+ struct page *page;
+ char *src;
+
+ page = shmem_read_mapping_page(mapping, i);
+ if (IS_ERR(page))
+ return PTR_ERR(page);
+
+ src = kmap_atomic(page);
+ memcpy(vaddr, src, PAGE_SIZE);
+ drm_clflush_virt_range(vaddr, PAGE_SIZE);
+ kunmap_atomic(src);
+
+ page_cache_release(page);
+ vaddr += PAGE_SIZE;
+ }
+
+ i915_gem_chipset_flush(obj->base.dev);
+
+ st = kmalloc(sizeof(*st), GFP_KERNEL);
+ if (st == NULL)
+ return -ENOMEM;
+
+ if (sg_alloc_table(st, 1, GFP_KERNEL)) {
+ kfree(st);
+ return -ENOMEM;
+ }
+
+ sg = st->sgl;
+ sg->offset = 0;
+ sg->length = obj->base.size;
+
+ sg_dma_address(sg) = obj->phys_handle->busaddr;
+ sg_dma_len(sg) = obj->base.size;
+
+ obj->pages = st;
+ obj->has_dma_mapping = true;
+ return 0;
+}
+
+static void
+i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
+{
+ int ret;
+
+ BUG_ON(obj->madv == __I915_MADV_PURGED);
+
+ ret = i915_gem_object_set_to_cpu_domain(obj, true);
+ if (ret) {
+ /* In the event of a disaster, abandon all caches and
+ * hope for the best.
+ */
+ WARN_ON(ret != -EIO);
+ obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+ }
+
+ if (obj->madv == I915_MADV_DONTNEED)
+ obj->dirty = 0;
+
+ if (obj->dirty) {
struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
- char *vaddr = phys->vaddr;
+ char *vaddr = obj->phys_handle->vaddr;
int i;
for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
- struct page *page = shmem_read_mapping_page(mapping, i);
- if (!IS_ERR(page)) {
- char *dst = kmap_atomic(page);
- memcpy(dst, vaddr, PAGE_SIZE);
- drm_clflush_virt_range(dst, PAGE_SIZE);
- kunmap_atomic(dst);
-
- set_page_dirty(page);
+ struct page *page;
+ char *dst;
+
+ page = shmem_read_mapping_page(mapping, i);
+ if (IS_ERR(page))
+ continue;
+
+ dst = kmap_atomic(page);
+ drm_clflush_virt_range(vaddr, PAGE_SIZE);
+ memcpy(dst, vaddr, PAGE_SIZE);
+ kunmap_atomic(dst);
+
+ set_page_dirty(page);
+ if (obj->madv == I915_MADV_WILLNEED)
mark_page_accessed(page);
- page_cache_release(page);
- }
+ page_cache_release(page);
vaddr += PAGE_SIZE;
}
- i915_gem_chipset_flush(obj->base.dev);
+ obj->dirty = 0;
}
-#ifdef CONFIG_X86
- set_memory_wb((unsigned long)phys->vaddr, phys->size / PAGE_SIZE);
-#endif
- drm_pci_free(obj->base.dev, phys);
- obj->phys_handle = NULL;
+ sg_free_table(obj->pages);
+ kfree(obj->pages);
+
+ obj->has_dma_mapping = false;
+}
+
+static void
+i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
+{
+ drm_pci_free(obj->base.dev, obj->phys_handle);
+}
+
+static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
+ .get_pages = i915_gem_object_get_pages_phys,
+ .put_pages = i915_gem_object_put_pages_phys,
+ .release = i915_gem_object_release_phys,
+};
+
+static int
+drop_pages(struct drm_i915_gem_object *obj)
+{
+ struct i915_vma *vma, *next;
+ int ret;
+
+ drm_gem_object_reference(&obj->base);
+ list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link)
+ if (i915_vma_unbind(vma))
+ break;
+
+ ret = i915_gem_object_put_pages(obj);
+ drm_gem_object_unreference(&obj->base);
+
+ return ret;
}
int
@@ -249,9 +319,7 @@ i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
int align)
{
drm_dma_handle_t *phys;
- struct address_space *mapping;
- char *vaddr;
- int i;
+ int ret;
if (obj->phys_handle) {
if ((unsigned long)obj->phys_handle->vaddr & (align -1))
@@ -266,41 +334,19 @@ i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
if (obj->base.filp == NULL)
return -EINVAL;
+ ret = drop_pages(obj);
+ if (ret)
+ return ret;
+
/* create a new object */
phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
if (!phys)
return -ENOMEM;
- vaddr = phys->vaddr;
-#ifdef CONFIG_X86
- set_memory_wc((unsigned long)vaddr, phys->size / PAGE_SIZE);
-#endif
- mapping = file_inode(obj->base.filp)->i_mapping;
- for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
- struct page *page;
- char *src;
-
- page = shmem_read_mapping_page(mapping, i);
- if (IS_ERR(page)) {
-#ifdef CONFIG_X86
- set_memory_wb((unsigned long)phys->vaddr, phys->size / PAGE_SIZE);
-#endif
- drm_pci_free(obj->base.dev, phys);
- return PTR_ERR(page);
- }
-
- src = kmap_atomic(page);
- memcpy(vaddr, src, PAGE_SIZE);
- kunmap_atomic(src);
-
- mark_page_accessed(page);
- page_cache_release(page);
-
- vaddr += PAGE_SIZE;
- }
-
obj->phys_handle = phys;
- return 0;
+ obj->ops = &i915_gem_phys_ops;
+
+ return i915_gem_object_get_pages(obj);
}
static int
@@ -311,6 +357,14 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
struct drm_device *dev = obj->base.dev;
void *vaddr = obj->phys_handle->vaddr + args->offset;
char __user *user_data = to_user_ptr(args->data_ptr);
+ int ret;
+
+ /* We manually control the domain here and pretend that it
+ * remains coherent i.e. in the GTT domain, like shmem_pwrite.
+ */
+ ret = i915_gem_object_wait_rendering(obj, false);
+ if (ret)
+ return ret;
if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
unsigned long unwritten;
@@ -326,6 +380,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
return -EFAULT;
}
+ drm_clflush_virt_range(vaddr, args->size);
i915_gem_chipset_flush(dev);
return 0;
}
@@ -346,6 +401,7 @@ static int
i915_gem_create(struct drm_file *file,
struct drm_device *dev,
uint64_t size,
+ bool dumb,
uint32_t *handle_p)
{
struct drm_i915_gem_object *obj;
@@ -361,6 +417,7 @@ i915_gem_create(struct drm_file *file,
if (obj == NULL)
return -ENOMEM;
+ obj->base.dumb = dumb;
ret = drm_gem_handle_create(file, &obj->base, &handle);
/* drop reference from allocate - handle holds it now */
drm_gem_object_unreference_unlocked(&obj->base);
@@ -380,7 +437,7 @@ i915_gem_dumb_create(struct drm_file *file,
args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
args->size = args->pitch * args->height;
return i915_gem_create(file, dev,
- args->size, &args->handle);
+ args->size, true, &args->handle);
}
/**
@@ -393,7 +450,7 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
struct drm_i915_gem_create *args = data;
return i915_gem_create(file, dev,
- args->size, &args->handle);
+ args->size, false, &args->handle);
}
static inline int
@@ -1046,11 +1103,6 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
* pread/pwrite currently are reading and writing from the CPU
* perspective, requiring manual detiling by the client.
*/
- if (obj->phys_handle) {
- ret = i915_gem_phys_pwrite(obj, args, file);
- goto out;
- }
-
if (obj->tiling_mode == I915_TILING_NONE &&
obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
cpu_write_needs_clflush(obj)) {
@@ -1060,8 +1112,12 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
* textures). Fallback to the shmem path in that case. */
}
- if (ret == -EFAULT || ret == -ENOSPC)
- ret = i915_gem_shmem_pwrite(dev, obj, args, file);
+ if (ret == -EFAULT || ret == -ENOSPC) {
+ if (obj->phys_handle)
+ ret = i915_gem_phys_pwrite(obj, args, file);
+ else
+ ret = i915_gem_shmem_pwrite(dev, obj, args, file);
+ }
out:
drm_gem_object_unreference(&obj->base);
@@ -1134,7 +1190,7 @@ static bool can_wait_boost(struct drm_i915_file_private *file_priv)
}
/**
- * __wait_seqno - wait until execution of seqno has finished
+ * __i915_wait_seqno - wait until execution of seqno has finished
* @ring: the ring expected to report seqno
* @seqno: duh!
* @reset_counter: reset sequence associated with the given seqno
@@ -1151,7 +1207,7 @@ static bool can_wait_boost(struct drm_i915_file_private *file_priv)
* Returns 0 if the seqno was found within the alloted time. Else returns the
* errno with remaining time filled in timeout argument.
*/
-static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno,
+int __i915_wait_seqno(struct intel_engine_cs *ring, u32 seqno,
unsigned reset_counter,
bool interruptible,
s64 *timeout,
@@ -1171,7 +1227,8 @@ static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno,
if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
return 0;
- timeout_expire = timeout ? jiffies + nsecs_to_jiffies((u64)*timeout) : 0;
+ timeout_expire = timeout ?
+ jiffies + nsecs_to_jiffies_timeout((u64)*timeout) : 0;
if (INTEL_INFO(dev)->gen >= 6 && ring->id == RCS && can_wait_boost(file_priv)) {
gen6_rps_boost(dev_priv);
@@ -1247,6 +1304,16 @@ static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno,
s64 tres = *timeout - (now - before);
*timeout = tres < 0 ? 0 : tres;
+
+ /*
+ * Apparently ktime isn't accurate enough and occasionally has a
+ * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
+ * things up to make the test happy. We allow up to 1 jiffy.
+ *
+ * This is a regrssion from the timespec->ktime conversion.
+ */
+ if (ret == -ETIME && *timeout < jiffies_to_usecs(1)*1000)
+ *timeout = 0;
}
return ret;
@@ -1262,6 +1329,7 @@ i915_wait_seqno(struct intel_engine_cs *ring, uint32_t seqno)
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
bool interruptible = dev_priv->mm.interruptible;
+ unsigned reset_counter;
int ret;
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
@@ -1275,14 +1343,13 @@ i915_wait_seqno(struct intel_engine_cs *ring, uint32_t seqno)
if (ret)
return ret;
- return __wait_seqno(ring, seqno,
- atomic_read(&dev_priv->gpu_error.reset_counter),
- interruptible, NULL, NULL);
+ reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
+ return __i915_wait_seqno(ring, seqno, reset_counter, interruptible,
+ NULL, NULL);
}
static int
-i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj,
- struct intel_engine_cs *ring)
+i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj)
{
if (!obj->active)
return 0;
@@ -1319,7 +1386,7 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
if (ret)
return ret;
- return i915_gem_object_wait_rendering__tail(obj, ring);
+ return i915_gem_object_wait_rendering__tail(obj);
}
/* A nonblocking variant of the above wait. This is a highly dangerous routine
@@ -1354,12 +1421,13 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
mutex_unlock(&dev->struct_mutex);
- ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, file_priv);
+ ret = __i915_wait_seqno(ring, seqno, reset_counter, true, NULL,
+ file_priv);
mutex_lock(&dev->struct_mutex);
if (ret)
return ret;
- return i915_gem_object_wait_rendering__tail(obj, ring);
+ return i915_gem_object_wait_rendering__tail(obj);
}
/**
@@ -1466,6 +1534,16 @@ unlock:
*
* While the mapping holds a reference on the contents of the object, it doesn't
* imply a ref on the object itself.
+ *
+ * IMPORTANT:
+ *
+ * DRM driver writers who look a this function as an example for how to do GEM
+ * mmap support, please don't implement mmap support like here. The modern way
+ * to implement DRM mmap support is with an mmap offset ioctl (like
+ * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
+ * That way debug tooling like valgrind will understand what's going on, hiding
+ * the mmap call in a driver private ioctl will break that. The i915 driver only
+ * does cpu mmaps this way because we didn't know better.
*/
int
i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
@@ -1762,10 +1840,10 @@ static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
drm_gem_free_mmap_offset(&obj->base);
}
-int
+static int
i915_gem_mmap_gtt(struct drm_file *file,
struct drm_device *dev,
- uint32_t handle,
+ uint32_t handle, bool dumb,
uint64_t *offset)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1782,6 +1860,13 @@ i915_gem_mmap_gtt(struct drm_file *file,
goto unlock;
}
+ /*
+ * We don't allow dumb mmaps on objects created using another
+ * interface.
+ */
+ WARN_ONCE(dumb && !(obj->base.dumb || obj->base.import_attach),
+ "Illegal dumb map of accelerated buffer.\n");
+
if (obj->base.size > dev_priv->gtt.mappable_end) {
ret = -E2BIG;
goto out;
@@ -1806,6 +1891,15 @@ unlock:
return ret;
}
+int
+i915_gem_dumb_map_offset(struct drm_file *file,
+ struct drm_device *dev,
+ uint32_t handle,
+ uint64_t *offset)
+{
+ return i915_gem_mmap_gtt(file, dev, handle, true, offset);
+}
+
/**
* i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
* @dev: DRM device
@@ -1827,7 +1921,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_gem_mmap_gtt *args = data;
- return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
+ return i915_gem_mmap_gtt(file, dev, args->handle, false, &args->offset);
}
static inline int
@@ -1945,7 +2039,14 @@ unsigned long
i915_gem_shrink(struct drm_i915_private *dev_priv,
long target, unsigned flags)
{
- const bool purgeable_only = flags & I915_SHRINK_PURGEABLE;
+ const struct {
+ struct list_head *list;
+ unsigned int bit;
+ } phases[] = {
+ { &dev_priv->mm.unbound_list, I915_SHRINK_UNBOUND },
+ { &dev_priv->mm.bound_list, I915_SHRINK_BOUND },
+ { NULL, 0 },
+ }, *phase;
unsigned long count = 0;
/*
@@ -1967,48 +2068,30 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
* dev->struct_mutex and so we won't ever be able to observe an
* object on the bound_list with a reference count equals 0.
*/
- if (flags & I915_SHRINK_UNBOUND) {
+ for (phase = phases; phase->list; phase++) {
struct list_head still_in_list;
- INIT_LIST_HEAD(&still_in_list);
- while (count < target && !list_empty(&dev_priv->mm.unbound_list)) {
- struct drm_i915_gem_object *obj;
-
- obj = list_first_entry(&dev_priv->mm.unbound_list,
- typeof(*obj), global_list);
- list_move_tail(&obj->global_list, &still_in_list);
-
- if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
- continue;
-
- drm_gem_object_reference(&obj->base);
-
- if (i915_gem_object_put_pages(obj) == 0)
- count += obj->base.size >> PAGE_SHIFT;
-
- drm_gem_object_unreference(&obj->base);
- }
- list_splice(&still_in_list, &dev_priv->mm.unbound_list);
- }
-
- if (flags & I915_SHRINK_BOUND) {
- struct list_head still_in_list;
+ if ((flags & phase->bit) == 0)
+ continue;
INIT_LIST_HEAD(&still_in_list);
- while (count < target && !list_empty(&dev_priv->mm.bound_list)) {
+ while (count < target && !list_empty(phase->list)) {
struct drm_i915_gem_object *obj;
struct i915_vma *vma, *v;
- obj = list_first_entry(&dev_priv->mm.bound_list,
+ obj = list_first_entry(phase->list,
typeof(*obj), global_list);
list_move_tail(&obj->global_list, &still_in_list);
- if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
+ if (flags & I915_SHRINK_PURGEABLE &&
+ !i915_gem_object_is_purgeable(obj))
continue;
drm_gem_object_reference(&obj->base);
- list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link)
+ /* For the unbound phase, this should be a no-op! */
+ list_for_each_entry_safe(vma, v,
+ &obj->vma_list, vma_link)
if (i915_vma_unbind(vma))
break;
@@ -2017,7 +2100,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
drm_gem_object_unreference(&obj->base);
}
- list_splice(&still_in_list, &dev_priv->mm.bound_list);
+ list_splice(&still_in_list, phase->list);
}
return count;
@@ -2122,6 +2205,10 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
if (i915_gem_object_needs_bit17_swizzle(obj))
i915_gem_object_do_bit_17_swizzle(obj);
+ if (obj->tiling_mode != I915_TILING_NONE &&
+ dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
+ i915_gem_object_pin_pages(obj);
+
return 0;
err_pages:
@@ -2420,15 +2507,13 @@ int __i915_add_request(struct intel_engine_cs *ring,
ring->outstanding_lazy_seqno = 0;
ring->preallocated_lazy_request = NULL;
- if (!dev_priv->ums.mm_suspended) {
- i915_queue_hangcheck(ring->dev);
+ i915_queue_hangcheck(ring->dev);
- cancel_delayed_work_sync(&dev_priv->mm.idle_work);
- queue_delayed_work(dev_priv->wq,
- &dev_priv->mm.retire_work,
- round_jiffies_up_relative(HZ));
- intel_mark_busy(dev_priv->dev);
- }
+ cancel_delayed_work_sync(&dev_priv->mm.idle_work);
+ queue_delayed_work(dev_priv->wq,
+ &dev_priv->mm.retire_work,
+ round_jiffies_up_relative(HZ));
+ intel_mark_busy(dev_priv->dev);
if (out_seqno)
*out_seqno = request->seqno;
@@ -2495,12 +2580,20 @@ static void i915_set_reset_status(struct drm_i915_private *dev_priv,
static void i915_gem_free_request(struct drm_i915_gem_request *request)
{
+ struct intel_context *ctx = request->ctx;
+
list_del(&request->list);
i915_gem_request_remove_from_client(request);
- if (request->ctx)
- i915_gem_context_unreference(request->ctx);
+ if (ctx) {
+ if (i915.enable_execlists) {
+ struct intel_engine_cs *ring = request->ring;
+ if (ctx != ring->default_context)
+ intel_lr_context_unpin(ring, ctx);
+ }
+ i915_gem_context_unreference(ctx);
+ }
kfree(request);
}
@@ -2555,6 +2648,23 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
}
/*
+ * Clear the execlists queue up before freeing the requests, as those
+ * are the ones that keep the context and ringbuffer backing objects
+ * pinned in place.
+ */
+ while (!list_empty(&ring->execlist_queue)) {
+ struct intel_ctx_submit_request *submit_req;
+
+ submit_req = list_first_entry(&ring->execlist_queue,
+ struct intel_ctx_submit_request,
+ execlist_link);
+ list_del(&submit_req->execlist_link);
+ intel_runtime_pm_put(dev_priv);
+ i915_gem_context_unreference(submit_req->ctx);
+ kfree(submit_req);
+ }
+
+ /*
* We must free the requests after all the corresponding objects have
* been moved off active lists. Which is the same order as the normal
* retire_requests function does. This is important if object hold
@@ -2571,18 +2681,6 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
i915_gem_free_request(request);
}
- while (!list_empty(&ring->execlist_queue)) {
- struct intel_ctx_submit_request *submit_req;
-
- submit_req = list_first_entry(&ring->execlist_queue,
- struct intel_ctx_submit_request,
- execlist_link);
- list_del(&submit_req->execlist_link);
- intel_runtime_pm_put(dev_priv);
- i915_gem_context_unreference(submit_req->ctx);
- kfree(submit_req);
- }
-
/* These may not have been flush before the reset, do so now */
kfree(ring->preallocated_lazy_request);
ring->preallocated_lazy_request = NULL;
@@ -2719,6 +2817,15 @@ i915_gem_retire_requests(struct drm_device *dev)
for_each_ring(ring, dev_priv, i) {
i915_gem_retire_requests_ring(ring);
idle &= list_empty(&ring->request_list);
+ if (i915.enable_execlists) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&ring->execlist_lock, flags);
+ idle &= list_empty(&ring->execlist_queue);
+ spin_unlock_irqrestore(&ring->execlist_lock, flags);
+
+ intel_execlists_retire_requests(ring);
+ }
}
if (idle)
@@ -2811,6 +2918,9 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
u32 seqno = 0;
int ret = 0;
+ if (args->flags != 0)
+ return -EINVAL;
+
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
@@ -2846,8 +2956,8 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
mutex_unlock(&dev->struct_mutex);
- return __wait_seqno(ring, seqno, reset_counter, true, &args->timeout_ns,
- file->driver_priv);
+ return __i915_wait_seqno(ring, seqno, reset_counter, true,
+ &args->timeout_ns, file->driver_priv);
out:
drm_gem_object_unreference(&obj->base);
@@ -3166,6 +3276,7 @@ static void i915_gem_write_fence(struct drm_device *dev, int reg,
obj->stride, obj->tiling_mode);
switch (INTEL_INFO(dev)->gen) {
+ case 9:
case 8:
case 7:
case 6:
@@ -3384,46 +3495,6 @@ static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
return true;
}
-static void i915_gem_verify_gtt(struct drm_device *dev)
-{
-#if WATCH_GTT
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj;
- int err = 0;
-
- list_for_each_entry(obj, &dev_priv->mm.gtt_list, global_list) {
- if (obj->gtt_space == NULL) {
- printk(KERN_ERR "object found on GTT list with no space reserved\n");
- err++;
- continue;
- }
-
- if (obj->cache_level != obj->gtt_space->color) {
- printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
- i915_gem_obj_ggtt_offset(obj),
- i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
- obj->cache_level,
- obj->gtt_space->color);
- err++;
- continue;
- }
-
- if (!i915_gem_valid_gtt_space(dev,
- obj->gtt_space,
- obj->cache_level)) {
- printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
- i915_gem_obj_ggtt_offset(obj),
- i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
- obj->cache_level);
- err++;
- continue;
- }
- }
-
- WARN_ON(err);
-#endif
-}
-
/**
* Finds free space in the GTT aperture and binds the object there.
*/
@@ -3514,25 +3585,10 @@ search_free:
list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
list_add_tail(&vma->mm_list, &vm->inactive_list);
- if (i915_is_ggtt(vm)) {
- bool mappable, fenceable;
-
- fenceable = (vma->node.size == fence_size &&
- (vma->node.start & (fence_alignment - 1)) == 0);
-
- mappable = (vma->node.start + obj->base.size <=
- dev_priv->gtt.mappable_end);
-
- obj->map_and_fenceable = mappable && fenceable;
- }
-
- WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
-
trace_i915_vma_bind(vma, flags);
vma->bind_vma(vma, obj->cache_level,
- flags & (PIN_MAPPABLE | PIN_GLOBAL) ? GLOBAL_BIND : 0);
+ flags & PIN_GLOBAL ? GLOBAL_BIND : 0);
- i915_gem_verify_gtt(dev);
return vma;
err_remove_node:
@@ -3560,7 +3616,7 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj,
* Stolen memory is always coherent with the GPU as it is explicitly
* marked as wc by the system, or the system is cache-coherent.
*/
- if (obj->stolen)
+ if (obj->stolen || obj->phys_handle)
return false;
/* If the GPU is snooping the contents of the CPU cache,
@@ -3739,7 +3795,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
list_for_each_entry(vma, &obj->vma_list, vma_link)
if (drm_mm_node_allocated(&vma->node))
vma->bind_vma(vma, cache_level,
- obj->has_global_gtt_mapping ? GLOBAL_BIND : 0);
+ vma->bound & GLOBAL_BIND);
}
list_for_each_entry(vma, &obj->vma_list, vma_link)
@@ -3769,7 +3825,6 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
old_write_domain);
}
- i915_gem_verify_gtt(dev);
return 0;
}
@@ -4067,7 +4122,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
if (seqno == 0)
return 0;
- ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, NULL);
+ ret = __i915_wait_seqno(ring, seqno, reset_counter, true, NULL, NULL);
if (ret == 0)
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
@@ -4101,6 +4156,7 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
{
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
struct i915_vma *vma;
+ unsigned bound;
int ret;
if (WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base))
@@ -4109,6 +4165,9 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
if (WARN_ON(flags & (PIN_GLOBAL | PIN_MAPPABLE) && !i915_is_ggtt(vm)))
return -EINVAL;
+ if (WARN_ON((flags & (PIN_MAPPABLE | PIN_GLOBAL)) == PIN_MAPPABLE))
+ return -EINVAL;
+
vma = i915_gem_obj_to_vma(obj, vm);
if (vma) {
if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
@@ -4130,15 +4189,39 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
}
}
+ bound = vma ? vma->bound : 0;
if (vma == NULL || !drm_mm_node_allocated(&vma->node)) {
vma = i915_gem_object_bind_to_vm(obj, vm, alignment, flags);
if (IS_ERR(vma))
return PTR_ERR(vma);
}
- if (flags & PIN_GLOBAL && !obj->has_global_gtt_mapping)
+ if (flags & PIN_GLOBAL && !(vma->bound & GLOBAL_BIND))
vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND);
+ if ((bound ^ vma->bound) & GLOBAL_BIND) {
+ bool mappable, fenceable;
+ u32 fence_size, fence_alignment;
+
+ fence_size = i915_gem_get_gtt_size(obj->base.dev,
+ obj->base.size,
+ obj->tiling_mode);
+ fence_alignment = i915_gem_get_gtt_alignment(obj->base.dev,
+ obj->base.size,
+ obj->tiling_mode,
+ true);
+
+ fenceable = (vma->node.size == fence_size &&
+ (vma->node.start & (fence_alignment - 1)) == 0);
+
+ mappable = (vma->node.start + obj->base.size <=
+ dev_priv->gtt.mappable_end);
+
+ obj->map_and_fenceable = mappable && fenceable;
+ }
+
+ WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
+
vma->pin_count++;
if (flags & PIN_MAPPABLE)
obj->pin_mappable |= true;
@@ -4193,7 +4276,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
struct drm_i915_gem_object *obj;
int ret;
- if (INTEL_INFO(dev)->gen >= 6)
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
return -ENODEV;
ret = i915_mutex_lock_interruptible(dev);
@@ -4249,6 +4332,9 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
struct drm_i915_gem_object *obj;
int ret;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
+
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
@@ -4326,6 +4412,7 @@ int
i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_madvise *args = data;
struct drm_i915_gem_object *obj;
int ret;
@@ -4353,6 +4440,15 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
goto out;
}
+ if (obj->pages &&
+ obj->tiling_mode != I915_TILING_NONE &&
+ dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
+ if (obj->madv == I915_MADV_WILLNEED)
+ i915_gem_object_unpin_pages(obj);
+ if (args->madv == I915_MADV_WILLNEED)
+ i915_gem_object_pin_pages(obj);
+ }
+
if (obj->madv != __I915_MADV_PURGED)
obj->madv = args->madv;
@@ -4495,8 +4591,6 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
}
}
- i915_gem_object_detach_phys(obj);
-
/* Stolen objects don't hold a ref, but do hold pin count. Fix that up
* before progressing. */
if (obj->stolen)
@@ -4504,6 +4598,11 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
WARN_ON(obj->frontbuffer_bits);
+ if (obj->pages && obj->madv == I915_MADV_WILLNEED &&
+ dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES &&
+ obj->tiling_mode != I915_TILING_NONE)
+ i915_gem_object_unpin_pages(obj);
+
if (WARN_ON(obj->pages_pin_count))
obj->pages_pin_count = 0;
if (discard_backing_storage(obj))
@@ -4576,9 +4675,6 @@ i915_gem_suspend(struct drm_device *dev)
int ret = 0;
mutex_lock(&dev->struct_mutex);
- if (dev_priv->ums.mm_suspended)
- goto err;
-
ret = i915_gpu_idle(dev);
if (ret)
goto err;
@@ -4589,15 +4685,7 @@ i915_gem_suspend(struct drm_device *dev)
if (!drm_core_check_feature(dev, DRIVER_MODESET))
i915_gem_evict_everything(dev);
- i915_kernel_lost_context(dev);
i915_gem_stop_ringbuffers(dev);
-
- /* Hack! Don't let anybody do execbuf while we don't control the chip.
- * We need to replace this with a semaphore, or something.
- * And not confound ums.mm_suspended!
- */
- dev_priv->ums.mm_suspended = !drm_core_check_feature(dev,
- DRIVER_MODESET);
mutex_unlock(&dev->struct_mutex);
del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
@@ -4888,9 +4976,6 @@ int i915_gem_init(struct drm_device *dev)
}
mutex_unlock(&dev->struct_mutex);
- /* Allow hardware batchbuffers unless told otherwise, but not for KMS. */
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- dev_priv->dri1.allow_batchbuffer = 1;
return ret;
}
@@ -4905,74 +4990,6 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev)
dev_priv->gt.cleanup_ring(ring);
}
-int
-i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int ret;
-
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return 0;
-
- if (i915_reset_in_progress(&dev_priv->gpu_error)) {
- DRM_ERROR("Reenabling wedged hardware, good luck\n");
- atomic_set(&dev_priv->gpu_error.reset_counter, 0);
- }
-
- mutex_lock(&dev->struct_mutex);
- dev_priv->ums.mm_suspended = 0;
-
- ret = i915_gem_init_hw(dev);
- if (ret != 0) {
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
-
- BUG_ON(!list_empty(&dev_priv->gtt.base.active_list));
-
- ret = drm_irq_install(dev, dev->pdev->irq);
- if (ret)
- goto cleanup_ringbuffer;
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
-
-cleanup_ringbuffer:
- i915_gem_cleanup_ringbuffer(dev);
- dev_priv->ums.mm_suspended = 1;
- mutex_unlock(&dev->struct_mutex);
-
- return ret;
-}
-
-int
-i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return 0;
-
- mutex_lock(&dev->struct_mutex);
- drm_irq_uninstall(dev);
- mutex_unlock(&dev->struct_mutex);
-
- return i915_gem_suspend(dev);
-}
-
-void
-i915_gem_lastclose(struct drm_device *dev)
-{
- int ret;
-
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return;
-
- ret = i915_gem_suspend(dev);
- if (ret)
- DRM_ERROR("failed to idle hardware: %d\n", ret);
-}
-
static void
init_ring_lists(struct intel_engine_cs *ring)
{
@@ -5119,6 +5136,15 @@ int i915_gem_open(struct drm_device *dev, struct drm_file *file)
return ret;
}
+/**
+ * i915_gem_track_fb - update frontbuffer tracking
+ * old: current GEM buffer for the frontbuffer slots
+ * new: new GEM buffer for the frontbuffer slots
+ * frontbuffer_bits: bitmask of frontbuffer slots
+ *
+ * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
+ * from @old and setting them in @new. Both @old and @new can be NULL.
+ */
void i915_gem_track_fb(struct drm_i915_gem_object *old,
struct drm_i915_gem_object *new,
unsigned frontbuffer_bits)
@@ -5302,7 +5328,7 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
struct drm_device *dev = dev_priv->dev;
struct drm_i915_gem_object *obj;
unsigned long timeout = msecs_to_jiffies(5000) + 1;
- unsigned long pinned, bound, unbound, freed;
+ unsigned long pinned, bound, unbound, freed_pages;
bool was_interruptible;
bool unlock;
@@ -5319,7 +5345,7 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
was_interruptible = dev_priv->mm.interruptible;
dev_priv->mm.interruptible = false;
- freed = i915_gem_shrink_all(dev_priv);
+ freed_pages = i915_gem_shrink_all(dev_priv);
dev_priv->mm.interruptible = was_interruptible;
@@ -5350,14 +5376,15 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
if (unlock)
mutex_unlock(&dev->struct_mutex);
- pr_info("Purging GPU memory, %lu bytes freed, %lu bytes still pinned.\n",
- freed, pinned);
+ if (freed_pages || unbound || bound)
+ pr_info("Purging GPU memory, %lu bytes freed, %lu bytes still pinned.\n",
+ freed_pages << PAGE_SHIFT, pinned);
if (unbound || bound)
pr_err("%lu and %lu bytes still available in the "
"bound and unbound GPU page lists.\n",
bound, unbound);
- *(unsigned long *)ptr += freed;
+ *(unsigned long *)ptr += freed_pages;
return NOTIFY_DONE;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index a5221d8f158..d17ff435f27 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -88,6 +88,7 @@
#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
+#include "i915_trace.h"
/* This is a HW constraint. The value below is the largest known requirement
* I've seen in a spec to date, and that was a workaround for a non-shipping
@@ -137,6 +138,8 @@ void i915_gem_context_free(struct kref *ctx_ref)
struct intel_context *ctx = container_of(ctx_ref,
typeof(*ctx), ref);
+ trace_i915_context_free(ctx);
+
if (i915.enable_execlists)
intel_lr_context_free(ctx);
@@ -274,6 +277,8 @@ i915_gem_create_context(struct drm_device *dev,
ctx->ppgtt = ppgtt;
}
+ trace_i915_context_create(ctx);
+
return ctx;
err_unpin:
@@ -522,6 +527,7 @@ static int do_switch(struct intel_engine_cs *ring,
struct intel_context *from = ring->last_context;
u32 hw_flags = 0;
bool uninitialized = false;
+ struct i915_vma *vma;
int ret, i;
if (from != NULL && ring == &dev_priv->ring[RCS]) {
@@ -548,6 +554,7 @@ static int do_switch(struct intel_engine_cs *ring,
from = ring->last_context;
if (to->ppgtt) {
+ trace_switch_mm(ring, to);
ret = to->ppgtt->switch_mm(to->ppgtt, ring);
if (ret)
goto unpin_out;
@@ -571,11 +578,10 @@ static int do_switch(struct intel_engine_cs *ring,
if (ret)
goto unpin_out;
- if (!to->legacy_hw_ctx.rcs_state->has_global_gtt_mapping) {
- struct i915_vma *vma = i915_gem_obj_to_vma(to->legacy_hw_ctx.rcs_state,
- &dev_priv->gtt.base);
- vma->bind_vma(vma, to->legacy_hw_ctx.rcs_state->cache_level, GLOBAL_BIND);
- }
+ vma = i915_gem_obj_to_ggtt(to->legacy_hw_ctx.rcs_state);
+ if (!(vma->bound & GLOBAL_BIND))
+ vma->bind_vma(vma, to->legacy_hw_ctx.rcs_state->cache_level,
+ GLOBAL_BIND);
if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to))
hw_flags |= MI_RESTORE_INHIBIT;
@@ -629,7 +635,7 @@ done:
if (uninitialized) {
if (ring->init_context) {
- ret = ring->init_context(ring);
+ ret = ring->init_context(ring, to);
if (ret)
DRM_ERROR("ring init context: %d\n", ret);
}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 1a0611bb576..f06027ba3ee 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -121,6 +121,9 @@ eb_lookup_vmas(struct eb_vmas *eb,
goto err;
}
+ WARN_ONCE(obj->base.dumb,
+ "GPU use of dumb buffer is illegal.\n");
+
drm_gem_object_reference(&obj->base);
list_add_tail(&obj->obj_exec_link, &objects);
}
@@ -357,12 +360,9 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
* through the ppgtt for non_secure batchbuffers. */
if (unlikely(IS_GEN6(dev) &&
reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
- !target_i915_obj->has_global_gtt_mapping)) {
- struct i915_vma *vma =
- list_first_entry(&target_i915_obj->vma_list,
- typeof(*vma), vma_link);
- vma->bind_vma(vma, target_i915_obj->cache_level, GLOBAL_BIND);
- }
+ !(target_vma->bound & GLOBAL_BIND)))
+ target_vma->bind_vma(target_vma, target_i915_obj->cache_level,
+ GLOBAL_BIND);
/* Validate that the target is in a valid r/w GPU domain */
if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
@@ -531,7 +531,7 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
flags = 0;
if (entry->flags & __EXEC_OBJECT_NEEDS_MAP)
- flags |= PIN_MAPPABLE;
+ flags |= PIN_GLOBAL | PIN_MAPPABLE;
if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
flags |= PIN_GLOBAL;
if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS)
@@ -1023,6 +1023,47 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
return 0;
}
+static int
+i915_emit_box(struct intel_engine_cs *ring,
+ struct drm_clip_rect *box,
+ int DR1, int DR4)
+{
+ int ret;
+
+ if (box->y2 <= box->y1 || box->x2 <= box->x1 ||
+ box->y2 <= 0 || box->x2 <= 0) {
+ DRM_ERROR("Bad box %d,%d..%d,%d\n",
+ box->x1, box->y1, box->x2, box->y2);
+ return -EINVAL;
+ }
+
+ if (INTEL_INFO(ring->dev)->gen >= 4) {
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, GFX_OP_DRAWRECT_INFO_I965);
+ intel_ring_emit(ring, (box->x1 & 0xffff) | box->y1 << 16);
+ intel_ring_emit(ring, ((box->x2 - 1) & 0xffff) | (box->y2 - 1) << 16);
+ intel_ring_emit(ring, DR4);
+ } else {
+ ret = intel_ring_begin(ring, 6);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, GFX_OP_DRAWRECT_INFO);
+ intel_ring_emit(ring, DR1);
+ intel_ring_emit(ring, (box->x1 & 0xffff) | box->y1 << 16);
+ intel_ring_emit(ring, ((box->x2 - 1) & 0xffff) | (box->y2 - 1) << 16);
+ intel_ring_emit(ring, DR4);
+ intel_ring_emit(ring, 0);
+ }
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+
int
i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
struct intel_engine_cs *ring,
@@ -1151,7 +1192,7 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
exec_len = args->batch_len;
if (cliprects) {
for (i = 0; i < args->num_cliprects; i++) {
- ret = i915_emit_box(dev, &cliprects[i],
+ ret = i915_emit_box(ring, &cliprects[i],
args->DR1, args->DR4);
if (ret)
goto error;
@@ -1300,12 +1341,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (ret)
goto pre_mutex_err;
- if (dev_priv->ums.mm_suspended) {
- mutex_unlock(&dev->struct_mutex);
- ret = -EBUSY;
- goto pre_mutex_err;
- }
-
ctx = i915_gem_validate_context(dev, file, ring, ctx_id);
if (IS_ERR(ctx)) {
mutex_unlock(&dev->struct_mutex);
@@ -1368,17 +1403,19 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
batch_obj,
args->batch_start_offset,
file->is_master);
- if (ret)
- goto err;
-
- /*
- * XXX: Actually do this when enabling batch copy...
- *
- * Set the DISPATCH_SECURE bit to remove the NON_SECURE bit
- * from MI_BATCH_BUFFER_START commands issued in the
- * dispatch_execbuffer implementations. We specifically don't
- * want that set when the command parser is enabled.
- */
+ if (ret) {
+ if (ret != -EACCES)
+ goto err;
+ } else {
+ /*
+ * XXX: Actually do this when enabling batch copy...
+ *
+ * Set the DISPATCH_SECURE bit to remove the NON_SECURE bit
+ * from MI_BATCH_BUFFER_START commands issued in the
+ * dispatch_execbuffer implementations. We specifically don't
+ * want that set when the command parser is enabled.
+ */
+ }
}
/* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 728938f0234..171f6eafdee 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -35,13 +35,26 @@ static void chv_setup_private_ppat(struct drm_i915_private *dev_priv);
static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
{
- if (enable_ppgtt == 0 || !HAS_ALIASING_PPGTT(dev))
+ bool has_aliasing_ppgtt;
+ bool has_full_ppgtt;
+
+ has_aliasing_ppgtt = INTEL_INFO(dev)->gen >= 6;
+ has_full_ppgtt = INTEL_INFO(dev)->gen >= 7;
+ if (IS_GEN8(dev))
+ has_full_ppgtt = false; /* XXX why? */
+
+ /*
+ * We don't allow disabling PPGTT for gen9+ as it's a requirement for
+ * execlists, the sole mechanism available to submit work.
+ */
+ if (INTEL_INFO(dev)->gen < 9 &&
+ (enable_ppgtt == 0 || !has_aliasing_ppgtt))
return 0;
if (enable_ppgtt == 1)
return 1;
- if (enable_ppgtt == 2 && HAS_PPGTT(dev))
+ if (enable_ppgtt == 2 && has_full_ppgtt)
return 2;
#ifdef CONFIG_INTEL_IOMMU
@@ -59,7 +72,7 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
return 0;
}
- return HAS_ALIASING_PPGTT(dev) ? 1 : 0;
+ return has_aliasing_ppgtt ? 1 : 0;
}
@@ -156,9 +169,6 @@ static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
- /* Mark the page as writeable. Other platforms don't have a
- * setting for read-only/writable, so this matches that behavior.
- */
if (!(flags & PTE_READ_ONLY))
pte |= BYT_PTE_WRITEABLE;
@@ -1092,7 +1102,7 @@ static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
if (INTEL_INFO(dev)->gen < 8)
return gen6_ppgtt_init(ppgtt);
- else if (IS_GEN8(dev))
+ else if (IS_GEN8(dev) || IS_GEN9(dev))
return gen8_ppgtt_init(ppgtt, dev_priv->gtt.base.total);
else
BUG();
@@ -1166,6 +1176,8 @@ i915_ppgtt_create(struct drm_device *dev, struct drm_i915_file_private *fpriv)
ppgtt->file_priv = fpriv;
+ trace_i915_ppgtt_create(&ppgtt->base);
+
return ppgtt;
}
@@ -1174,6 +1186,8 @@ void i915_ppgtt_release(struct kref *kref)
struct i915_hw_ppgtt *ppgtt =
container_of(kref, struct i915_hw_ppgtt, ref);
+ trace_i915_ppgtt_release(&ppgtt->base);
+
/* vmas should already be unbound */
WARN_ON(!list_empty(&ppgtt->base.active_list));
WARN_ON(!list_empty(&ppgtt->base.inactive_list));
@@ -1258,7 +1272,7 @@ void i915_check_and_clear_faults(struct drm_device *dev)
fault_reg = I915_READ(RING_FAULT_REG(ring));
if (fault_reg & RING_FAULT_VALID) {
DRM_DEBUG_DRIVER("Unexpected fault\n"
- "\tAddr: 0x%08lx\\n"
+ "\tAddr: 0x%08lx\n"
"\tAddress space: %s\n"
"\tSource ID: %d\n"
"\tType: %d\n",
@@ -1328,7 +1342,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
* Unfortunately above, we've just wiped out the mappings
* without telling our object about it. So we need to fake it.
*/
- obj->has_global_gtt_mapping = 0;
+ vma->bound &= ~GLOBAL_BIND;
vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND);
}
@@ -1525,7 +1539,7 @@ static void i915_ggtt_bind_vma(struct i915_vma *vma,
BUG_ON(!i915_is_ggtt(vma->vm));
intel_gtt_insert_sg_entries(vma->obj->pages, entry, flags);
- vma->obj->has_global_gtt_mapping = 1;
+ vma->bound = GLOBAL_BIND;
}
static void i915_ggtt_clear_range(struct i915_address_space *vm,
@@ -1544,7 +1558,7 @@ static void i915_ggtt_unbind_vma(struct i915_vma *vma)
const unsigned int size = vma->obj->base.size >> PAGE_SHIFT;
BUG_ON(!i915_is_ggtt(vma->vm));
- vma->obj->has_global_gtt_mapping = 0;
+ vma->bound = 0;
intel_gtt_clear_range(first, size);
}
@@ -1572,24 +1586,24 @@ static void ggtt_bind_vma(struct i915_vma *vma,
* flags. At all other times, the GPU will use the aliasing PPGTT.
*/
if (!dev_priv->mm.aliasing_ppgtt || flags & GLOBAL_BIND) {
- if (!obj->has_global_gtt_mapping ||
+ if (!(vma->bound & GLOBAL_BIND) ||
(cache_level != obj->cache_level)) {
vma->vm->insert_entries(vma->vm, obj->pages,
vma->node.start,
cache_level, flags);
- obj->has_global_gtt_mapping = 1;
+ vma->bound |= GLOBAL_BIND;
}
}
if (dev_priv->mm.aliasing_ppgtt &&
- (!obj->has_aliasing_ppgtt_mapping ||
+ (!(vma->bound & LOCAL_BIND) ||
(cache_level != obj->cache_level))) {
struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
appgtt->base.insert_entries(&appgtt->base,
vma->obj->pages,
vma->node.start,
cache_level, flags);
- vma->obj->has_aliasing_ppgtt_mapping = 1;
+ vma->bound |= LOCAL_BIND;
}
}
@@ -1599,21 +1613,21 @@ static void ggtt_unbind_vma(struct i915_vma *vma)
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj = vma->obj;
- if (obj->has_global_gtt_mapping) {
+ if (vma->bound & GLOBAL_BIND) {
vma->vm->clear_range(vma->vm,
vma->node.start,
obj->base.size,
true);
- obj->has_global_gtt_mapping = 0;
+ vma->bound &= ~GLOBAL_BIND;
}
- if (obj->has_aliasing_ppgtt_mapping) {
+ if (vma->bound & LOCAL_BIND) {
struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
appgtt->base.clear_range(&appgtt->base,
vma->node.start,
obj->base.size,
true);
- obj->has_aliasing_ppgtt_mapping = 0;
+ vma->bound &= ~LOCAL_BIND;
}
}
@@ -1650,10 +1664,10 @@ static void i915_gtt_color_adjust(struct drm_mm_node *node,
}
}
-int i915_gem_setup_global_gtt(struct drm_device *dev,
- unsigned long start,
- unsigned long mappable_end,
- unsigned long end)
+static int i915_gem_setup_global_gtt(struct drm_device *dev,
+ unsigned long start,
+ unsigned long mappable_end,
+ unsigned long end)
{
/* Let GEM Manage all of the aperture.
*
@@ -1691,7 +1705,7 @@ int i915_gem_setup_global_gtt(struct drm_device *dev,
DRM_DEBUG_KMS("Reservation failed: %i\n", ret);
return ret;
}
- obj->has_global_gtt_mapping = 1;
+ vma->bound |= GLOBAL_BIND;
}
dev_priv->gtt.base.start = start;
@@ -1764,7 +1778,6 @@ static int setup_scratch_page(struct drm_device *dev)
page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
if (page == NULL)
return -ENOMEM;
- get_page(page);
set_pages_uc(page, 1);
#ifdef CONFIG_INTEL_IOMMU
@@ -1789,7 +1802,6 @@ static void teardown_scratch_page(struct drm_device *dev)
set_pages_wb(page, 1);
pci_unmap_page(dev->pdev, dev_priv->gtt.base.scratch.addr,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- put_page(page);
__free_page(page);
}
@@ -1859,6 +1871,18 @@ static size_t chv_get_stolen_size(u16 gmch_ctrl)
return (gmch_ctrl - 0x17 + 9) << 22;
}
+static size_t gen9_get_stolen_size(u16 gen9_gmch_ctl)
+{
+ gen9_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
+ gen9_gmch_ctl &= BDW_GMCH_GMS_MASK;
+
+ if (gen9_gmch_ctl < 0xf0)
+ return gen9_gmch_ctl << 25; /* 32 MB units */
+ else
+ /* 4MB increments starting at 0xf0 for 4MB */
+ return (gen9_gmch_ctl - 0xf0 + 1) << 22;
+}
+
static int ggtt_probe_common(struct drm_device *dev,
size_t gtt_size)
{
@@ -1934,9 +1958,17 @@ static void chv_setup_private_ppat(struct drm_i915_private *dev_priv)
* Only the snoop bit has meaning for CHV, the rest is
* ignored.
*
- * Note that the harware enforces snooping for all page
- * table accesses. The snoop bit is actually ignored for
- * PDEs.
+ * The hardware will never snoop for certain types of accesses:
+ * - CPU GTT (GMADR->GGTT->no snoop->memory)
+ * - PPGTT page tables
+ * - some other special cycles
+ *
+ * As with BDW, we also need to consider the following for GT accesses:
+ * "For GGTT, there is NO pat_sel[2:0] from the entry,
+ * so RTL will always use the value corresponding to
+ * pat_sel = 000".
+ * Which means we must set the snoop bit in PAT entry 0
+ * in order to keep the global status page working.
*/
pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) |
GEN8_PPAT(1, 0) |
@@ -1971,7 +2003,10 @@ static int gen8_gmch_probe(struct drm_device *dev,
pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
- if (IS_CHERRYVIEW(dev)) {
+ if (INTEL_INFO(dev)->gen >= 9) {
+ *stolen = gen9_get_stolen_size(snb_gmch_ctl);
+ gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl);
+ } else if (IS_CHERRYVIEW(dev)) {
*stolen = chv_get_stolen_size(snb_gmch_ctl);
gtt_size = chv_get_total_gtt_size(snb_gmch_ctl);
} else {
@@ -2143,6 +2178,7 @@ static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
vma->obj = obj;
switch (INTEL_INFO(vm->dev)->gen) {
+ case 9:
case 8:
case 7:
case 6:
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index d5c14af51e9..beaf4bcfdac 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -123,6 +123,12 @@ struct i915_vma {
struct drm_i915_gem_object *obj;
struct i915_address_space *vm;
+ /** Flags and address space this VMA is bound to */
+#define GLOBAL_BIND (1<<0)
+#define LOCAL_BIND (1<<1)
+#define PTE_READ_ONLY (1<<2)
+ unsigned int bound : 4;
+
/** This object's place on the active/inactive lists */
struct list_head mm_list;
@@ -155,8 +161,6 @@ struct i915_vma {
* setting the valid PTE entries to a reserved scratch page. */
void (*unbind_vma)(struct i915_vma *vma);
/* Map an object into an address space with the given cache flags. */
-#define GLOBAL_BIND (1<<0)
-#define PTE_READ_ONLY (1<<1)
void (*bind_vma)(struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags);
@@ -270,8 +274,6 @@ struct i915_hw_ppgtt {
int i915_gem_gtt_init(struct drm_device *dev);
void i915_gem_init_global_gtt(struct drm_device *dev);
-int i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start,
- unsigned long mappable_end, unsigned long end);
void i915_global_gtt_cleanup(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index a9a62d75aa5..98dcd94acba 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -38,6 +38,8 @@ render_state_get_rodata(struct drm_device *dev, const int gen)
return &gen7_null_state;
case 8:
return &gen8_null_state;
+ case 9:
+ return &gen9_null_state;
}
return NULL;
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 85fda6b803e..a2045848bd1 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -137,7 +137,11 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
r = devm_request_mem_region(dev->dev, base + 1,
dev_priv->gtt.stolen_size - 1,
"Graphics Stolen Memory");
- if (r == NULL) {
+ /*
+ * GEN3 firmware likes to smash pci bridges into the stolen
+ * range. Apparently this works.
+ */
+ if (r == NULL && !IS_GEN3(dev)) {
DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n",
base, base + (uint32_t)dev_priv->gtt.stolen_size);
base = 0;
@@ -533,7 +537,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
}
}
- obj->has_global_gtt_mapping = 1;
+ vma->bound |= GLOBAL_BIND;
list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
list_add_tail(&vma->mm_list, &ggtt->inactive_list);
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 2b1eaa29ada..4727a4e2c87 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -102,22 +102,33 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
} else if (INTEL_INFO(dev)->gen >= 6) {
- uint32_t dimm_c0, dimm_c1;
- dimm_c0 = I915_READ(MAD_DIMM_C0);
- dimm_c1 = I915_READ(MAD_DIMM_C1);
- dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
- dimm_c1 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
- /* Enable swizzling when the channels are populated with
- * identically sized dimms. We don't need to check the 3rd
- * channel because no cpu with gpu attached ships in that
- * configuration. Also, swizzling only makes sense for 2
- * channels anyway. */
- if (dimm_c0 == dimm_c1) {
- swizzle_x = I915_BIT_6_SWIZZLE_9_10;
- swizzle_y = I915_BIT_6_SWIZZLE_9;
+ if (dev_priv->preserve_bios_swizzle) {
+ if (I915_READ(DISP_ARB_CTL) &
+ DISP_TILE_SURFACE_SWIZZLING) {
+ swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+ swizzle_y = I915_BIT_6_SWIZZLE_9;
+ } else {
+ swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+ swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+ }
} else {
- swizzle_x = I915_BIT_6_SWIZZLE_NONE;
- swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+ uint32_t dimm_c0, dimm_c1;
+ dimm_c0 = I915_READ(MAD_DIMM_C0);
+ dimm_c1 = I915_READ(MAD_DIMM_C1);
+ dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
+ dimm_c1 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
+ /* Enable swizzling when the channels are populated
+ * with identically sized dimms. We don't need to check
+ * the 3rd channel because no cpu with gpu attached
+ * ships in that configuration. Also, swizzling only
+ * makes sense for 2 channels anyway. */
+ if (dimm_c0 == dimm_c1) {
+ swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+ swizzle_y = I915_BIT_6_SWIZZLE_9;
+ } else {
+ swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+ swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+ }
}
} else if (IS_GEN5(dev)) {
/* On Ironlake whatever DRAM config, GPU always do
@@ -167,6 +178,15 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
}
break;
}
+
+ /* check for L-shaped memory aka modified enhanced addressing */
+ if (IS_GEN4(dev)) {
+ uint32_t ddc2 = I915_READ(DCC2);
+
+ if (!(ddc2 & DCC2_MODIFIED_ENHANCED_DISABLE))
+ dev_priv->quirks |= QUIRK_PIN_SWIZZLED_PAGES;
+ }
+
if (dcc == 0xffffffff) {
DRM_ERROR("Couldn't read from MCHBAR. "
"Disabling tiling.\n");
@@ -369,6 +389,15 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
ret = i915_gem_object_ggtt_unbind(obj);
if (ret == 0) {
+ if (obj->pages &&
+ obj->madv == I915_MADV_WILLNEED &&
+ dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
+ if (args->tiling_mode == I915_TILING_NONE)
+ i915_gem_object_unpin_pages(obj);
+ if (obj->tiling_mode == I915_TILING_NONE)
+ i915_gem_object_pin_pages(obj);
+ }
+
obj->fence_dirty =
obj->last_fenced_seqno ||
obj->fence_reg != I915_FENCE_REG_NONE;
@@ -434,6 +463,7 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
}
/* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */
+ args->phys_swizzle_mode = args->swizzle_mode;
if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17)
args->swizzle_mode = I915_BIT_6_SWIZZLE_9;
if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 2c87a797213..cdaee6ce05f 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -242,11 +242,15 @@ static const char *hangcheck_action_to_str(enum intel_ring_hangcheck_action a)
static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
struct drm_device *dev,
- struct drm_i915_error_ring *ring)
+ struct drm_i915_error_state *error,
+ int ring_idx)
{
+ struct drm_i915_error_ring *ring = &error->ring[ring_idx];
+
if (!ring->valid)
return;
+ err_printf(m, "%s command stream:\n", ring_str(ring_idx));
err_printf(m, " HEAD: 0x%08x\n", ring->head);
err_printf(m, " TAIL: 0x%08x\n", ring->tail);
err_printf(m, " CTL: 0x%08x\n", ring->ctl);
@@ -388,10 +392,8 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
if (INTEL_INFO(dev)->gen == 7)
err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
- for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
- err_printf(m, "%s command stream:\n", ring_str(i));
- i915_ring_error_state(m, dev, &error->ring[i]);
- }
+ for (i = 0; i < ARRAY_SIZE(error->ring); i++)
+ i915_ring_error_state(m, dev, error, i);
for (i = 0; i < error->vm_count; i++) {
err_printf(m, "vm[%d]\n", i);
@@ -565,6 +567,7 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
struct i915_address_space *vm)
{
struct drm_i915_error_object *dst;
+ struct i915_vma *vma = NULL;
int num_pages;
bool use_ggtt;
int i = 0;
@@ -585,16 +588,17 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
dst->gtt_offset = -1;
reloc_offset = dst->gtt_offset;
+ if (i915_is_ggtt(vm))
+ vma = i915_gem_obj_to_ggtt(src);
use_ggtt = (src->cache_level == I915_CACHE_NONE &&
- i915_is_ggtt(vm) &&
- src->has_global_gtt_mapping &&
- reloc_offset + num_pages * PAGE_SIZE <= dev_priv->gtt.mappable_end);
+ vma && (vma->bound & GLOBAL_BIND) &&
+ reloc_offset + num_pages * PAGE_SIZE <= dev_priv->gtt.mappable_end);
/* Cannot access stolen address directly, try to use the aperture */
if (src->stolen) {
use_ggtt = true;
- if (!src->has_global_gtt_mapping)
+ if (!(vma && vma->bound & GLOBAL_BIND))
goto unwind;
reloc_offset = i915_gem_obj_ggtt_offset(src);
@@ -765,6 +769,7 @@ static void i915_gem_record_fences(struct drm_device *dev,
/* Fences */
switch (INTEL_INFO(dev)->gen) {
+ case 9:
case 8:
case 7:
case 6:
@@ -804,9 +809,8 @@ static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv,
if (!error->semaphore_obj)
error->semaphore_obj =
- i915_error_object_create(dev_priv,
- dev_priv->semaphore_obj,
- &dev_priv->gtt.base);
+ i915_error_ggtt_object_create(dev_priv,
+ dev_priv->semaphore_obj);
for_each_ring(to, dev_priv, i) {
int idx;
@@ -923,6 +927,7 @@ static void i915_record_ring_state(struct drm_device *dev,
ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(ring));
switch (INTEL_INFO(dev)->gen) {
+ case 9:
case 8:
for (i = 0; i < 4; i++) {
ering->vm_info.pdp[i] =
@@ -1238,7 +1243,8 @@ static void i915_error_capture_msg(struct drm_device *dev,
ecode = i915_error_generate_code(dev_priv, error, &ring_id);
len = scnprintf(error->error_msg, sizeof(error->error_msg),
- "GPU HANG: ecode %d:0x%08x", ring_id, ecode);
+ "GPU HANG: ecode %d:%d:0x%08x",
+ INTEL_INFO(dev)->gen, ring_id, ecode);
if (ring_id != -1 && error->ring[ring_id].pid != -1)
len += scnprintf(error->error_msg + len,
@@ -1326,13 +1332,12 @@ void i915_error_state_get(struct drm_device *dev,
struct i915_error_state_file_priv *error_priv)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long flags;
- spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
+ spin_lock_irq(&dev_priv->gpu_error.lock);
error_priv->error = dev_priv->gpu_error.first_error;
if (error_priv->error)
kref_get(&error_priv->error->ref);
- spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
+ spin_unlock_irq(&dev_priv->gpu_error.lock);
}
@@ -1346,12 +1351,11 @@ void i915_destroy_error_state(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_error_state *error;
- unsigned long flags;
- spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
+ spin_lock_irq(&dev_priv->gpu_error.lock);
error = dev_priv->gpu_error.first_error;
dev_priv->gpu_error.first_error = NULL;
- spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
+ spin_unlock_irq(&dev_priv->gpu_error.lock);
if (error)
kref_put(&error->ref, i915_error_state_free);
@@ -1389,6 +1393,7 @@ void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone)
WARN_ONCE(1, "Unsupported platform\n");
case 7:
case 8:
+ case 9:
instdone[0] = I915_READ(GEN7_INSTDONE_1);
instdone[1] = I915_READ(GEN7_SC_INSTDONE);
instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
index 2e0613e2625..176de6322e4 100644
--- a/drivers/gpu/drm/i915/i915_ioc32.c
+++ b/drivers/gpu/drm/i915/i915_ioc32.c
@@ -189,7 +189,6 @@ static drm_ioctl_compat_t *i915_compat_ioctls[] = {
[DRM_I915_ALLOC] = compat_i915_alloc
};
-#ifdef CONFIG_COMPAT
/**
* Called whenever a 32-bit process running under a 64-bit kernel
* performs an ioctl on /dev/dri/card<n>.
@@ -218,4 +217,3 @@ long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return ret;
}
-#endif
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index f66392b6e28..981834b0f9b 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -37,6 +37,14 @@
#include "i915_trace.h"
#include "intel_drv.h"
+/**
+ * DOC: interrupt handling
+ *
+ * These functions provide the basic support for enabling and disabling the
+ * interrupt handling support. There's a lot more functionality in i915_irq.c
+ * and related files, but that will be described in separate chapters.
+ */
+
static const u32 hpd_ibx[] = {
[HPD_CRT] = SDE_CRT_HOTPLUG,
[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
@@ -118,20 +126,22 @@ static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
#define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
GEN5_ASSERT_IIR_IS_ZERO(GEN8_##type##_IIR(which)); \
- I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
- POSTING_READ(GEN8_##type##_IER(which)); \
+ I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
+ POSTING_READ(GEN8_##type##_IMR(which)); \
} while (0)
#define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
GEN5_ASSERT_IIR_IS_ZERO(type##IIR); \
- I915_WRITE(type##IMR, (imr_val)); \
I915_WRITE(type##IER, (ier_val)); \
- POSTING_READ(type##IER); \
+ I915_WRITE(type##IMR, (imr_val)); \
+ POSTING_READ(type##IMR); \
} while (0)
+static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
+
/* For display hotplug interrupt */
-static void
+void
ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
{
assert_spin_locked(&dev_priv->irq_lock);
@@ -146,7 +156,7 @@ ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
}
}
-static void
+void
ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
{
assert_spin_locked(&dev_priv->irq_lock);
@@ -192,71 +202,28 @@ void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
ilk_update_gt_irq(dev_priv, mask, 0);
}
-/**
- * snb_update_pm_irq - update GEN6_PMIMR
- * @dev_priv: driver private
- * @interrupt_mask: mask of interrupt bits to update
- * @enabled_irq_mask: mask of interrupt bits to enable
- */
-static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
- uint32_t interrupt_mask,
- uint32_t enabled_irq_mask)
-{
- uint32_t new_val;
-
- assert_spin_locked(&dev_priv->irq_lock);
-
- if (WARN_ON(!intel_irqs_enabled(dev_priv)))
- return;
-
- new_val = dev_priv->pm_irq_mask;
- new_val &= ~interrupt_mask;
- new_val |= (~enabled_irq_mask & interrupt_mask);
-
- if (new_val != dev_priv->pm_irq_mask) {
- dev_priv->pm_irq_mask = new_val;
- I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask);
- POSTING_READ(GEN6_PMIMR);
- }
-}
-
-void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+static u32 gen6_pm_iir(struct drm_i915_private *dev_priv)
{
- snb_update_pm_irq(dev_priv, mask, mask);
+ return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
}
-void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+static u32 gen6_pm_imr(struct drm_i915_private *dev_priv)
{
- snb_update_pm_irq(dev_priv, mask, 0);
+ return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
}
-static bool ivb_can_enable_err_int(struct drm_device *dev)
+static u32 gen6_pm_ier(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *crtc;
- enum pipe pipe;
-
- assert_spin_locked(&dev_priv->irq_lock);
-
- for_each_pipe(dev_priv, pipe) {
- crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
-
- if (crtc->cpu_fifo_underrun_disabled)
- return false;
- }
-
- return true;
+ return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
}
/**
- * bdw_update_pm_irq - update GT interrupt 2
+ * snb_update_pm_irq - update GEN6_PMIMR
* @dev_priv: driver private
* @interrupt_mask: mask of interrupt bits to update
* @enabled_irq_mask: mask of interrupt bits to enable
- *
- * Copied from the snb function, updated with relevant register offsets
*/
-static void bdw_update_pm_irq(struct drm_i915_private *dev_priv,
+static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
uint32_t interrupt_mask,
uint32_t enabled_irq_mask)
{
@@ -264,144 +231,87 @@ static void bdw_update_pm_irq(struct drm_i915_private *dev_priv,
assert_spin_locked(&dev_priv->irq_lock);
- if (WARN_ON(!intel_irqs_enabled(dev_priv)))
- return;
-
new_val = dev_priv->pm_irq_mask;
new_val &= ~interrupt_mask;
new_val |= (~enabled_irq_mask & interrupt_mask);
if (new_val != dev_priv->pm_irq_mask) {
dev_priv->pm_irq_mask = new_val;
- I915_WRITE(GEN8_GT_IMR(2), dev_priv->pm_irq_mask);
- POSTING_READ(GEN8_GT_IMR(2));
+ I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask);
+ POSTING_READ(gen6_pm_imr(dev_priv));
}
}
-void gen8_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
{
- bdw_update_pm_irq(dev_priv, mask, mask);
-}
+ if (WARN_ON(!intel_irqs_enabled(dev_priv)))
+ return;
-void gen8_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
-{
- bdw_update_pm_irq(dev_priv, mask, 0);
+ snb_update_pm_irq(dev_priv, mask, mask);
}
-static bool cpt_can_enable_serr_int(struct drm_device *dev)
+static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv,
+ uint32_t mask)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- enum pipe pipe;
- struct intel_crtc *crtc;
-
- assert_spin_locked(&dev_priv->irq_lock);
-
- for_each_pipe(dev_priv, pipe) {
- crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
-
- if (crtc->pch_fifo_underrun_disabled)
- return false;
- }
-
- return true;
+ snb_update_pm_irq(dev_priv, mask, 0);
}
-void i9xx_check_fifo_underruns(struct drm_device *dev)
+void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *crtc;
- unsigned long flags;
-
- spin_lock_irqsave(&dev_priv->irq_lock, flags);
-
- for_each_intel_crtc(dev, crtc) {
- u32 reg = PIPESTAT(crtc->pipe);
- u32 pipestat;
-
- if (crtc->cpu_fifo_underrun_disabled)
- continue;
-
- pipestat = I915_READ(reg) & 0xffff0000;
- if ((pipestat & PIPE_FIFO_UNDERRUN_STATUS) == 0)
- continue;
-
- I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
- POSTING_READ(reg);
-
- DRM_ERROR("pipe %c underrun\n", pipe_name(crtc->pipe));
- }
+ if (WARN_ON(!intel_irqs_enabled(dev_priv)))
+ return;
- spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+ __gen6_disable_pm_irq(dev_priv, mask);
}
-static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
- enum pipe pipe,
- bool enable, bool old)
+void gen6_reset_rps_interrupts(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 reg = PIPESTAT(pipe);
- u32 pipestat = I915_READ(reg) & 0xffff0000;
-
- assert_spin_locked(&dev_priv->irq_lock);
+ uint32_t reg = gen6_pm_iir(dev_priv);
- if (enable) {
- I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
- POSTING_READ(reg);
- } else {
- if (old && pipestat & PIPE_FIFO_UNDERRUN_STATUS)
- DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
- }
+ spin_lock_irq(&dev_priv->irq_lock);
+ I915_WRITE(reg, dev_priv->pm_rps_events);
+ I915_WRITE(reg, dev_priv->pm_rps_events);
+ POSTING_READ(reg);
+ spin_unlock_irq(&dev_priv->irq_lock);
}
-static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
- enum pipe pipe, bool enable)
+void gen6_enable_rps_interrupts(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN :
- DE_PIPEB_FIFO_UNDERRUN;
- if (enable)
- ironlake_enable_display_irq(dev_priv, bit);
- else
- ironlake_disable_display_irq(dev_priv, bit);
+ spin_lock_irq(&dev_priv->irq_lock);
+ WARN_ON(dev_priv->rps.pm_iir);
+ WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
+ dev_priv->rps.interrupts_enabled = true;
+ gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
+ spin_unlock_irq(&dev_priv->irq_lock);
}
-static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
- enum pipe pipe,
- bool enable, bool old)
+void gen6_disable_rps_interrupts(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (enable) {
- I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
- if (!ivb_can_enable_err_int(dev))
- return;
+ spin_lock_irq(&dev_priv->irq_lock);
+ dev_priv->rps.interrupts_enabled = false;
+ spin_unlock_irq(&dev_priv->irq_lock);
- ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
- } else {
- ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
+ cancel_work_sync(&dev_priv->rps.work);
- if (old &&
- I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe)) {
- DRM_ERROR("uncleared fifo underrun on pipe %c\n",
- pipe_name(pipe));
- }
- }
-}
+ spin_lock_irq(&dev_priv->irq_lock);
-static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev,
- enum pipe pipe, bool enable)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ I915_WRITE(GEN6_PMINTRMSK, INTEL_INFO(dev_priv)->gen >= 8 ?
+ ~GEN8_PMINTR_REDIRECT_TO_NON_DISP : ~0);
- assert_spin_locked(&dev_priv->irq_lock);
+ __gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
+ I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
+ ~dev_priv->pm_rps_events);
+ I915_WRITE(gen6_pm_iir(dev_priv), dev_priv->pm_rps_events);
+ I915_WRITE(gen6_pm_iir(dev_priv), dev_priv->pm_rps_events);
- if (enable)
- dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_FIFO_UNDERRUN;
- else
- dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_FIFO_UNDERRUN;
- I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
- POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
+ dev_priv->rps.pm_iir = 0;
+
+ spin_unlock_irq(&dev_priv->irq_lock);
}
/**
@@ -410,9 +320,9 @@ static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev,
* @interrupt_mask: mask of interrupt bits to update
* @enabled_irq_mask: mask of interrupt bits to enable
*/
-static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
- uint32_t interrupt_mask,
- uint32_t enabled_irq_mask)
+void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
+ uint32_t interrupt_mask,
+ uint32_t enabled_irq_mask)
{
uint32_t sdeimr = I915_READ(SDEIMR);
sdeimr &= ~interrupt_mask;
@@ -426,160 +336,6 @@ static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
I915_WRITE(SDEIMR, sdeimr);
POSTING_READ(SDEIMR);
}
-#define ibx_enable_display_interrupt(dev_priv, bits) \
- ibx_display_interrupt_update((dev_priv), (bits), (bits))
-#define ibx_disable_display_interrupt(dev_priv, bits) \
- ibx_display_interrupt_update((dev_priv), (bits), 0)
-
-static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
- enum transcoder pch_transcoder,
- bool enable)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t bit = (pch_transcoder == TRANSCODER_A) ?
- SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
-
- if (enable)
- ibx_enable_display_interrupt(dev_priv, bit);
- else
- ibx_disable_display_interrupt(dev_priv, bit);
-}
-
-static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
- enum transcoder pch_transcoder,
- bool enable, bool old)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (enable) {
- I915_WRITE(SERR_INT,
- SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
-
- if (!cpt_can_enable_serr_int(dev))
- return;
-
- ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT);
- } else {
- ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT);
-
- if (old && I915_READ(SERR_INT) &
- SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) {
- DRM_ERROR("uncleared pch fifo underrun on pch transcoder %c\n",
- transcoder_name(pch_transcoder));
- }
- }
-}
-
-/**
- * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages
- * @dev: drm device
- * @pipe: pipe
- * @enable: true if we want to report FIFO underrun errors, false otherwise
- *
- * This function makes us disable or enable CPU fifo underruns for a specific
- * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun
- * reporting for one pipe may also disable all the other CPU error interruts for
- * the other pipes, due to the fact that there's just one interrupt mask/enable
- * bit for all the pipes.
- *
- * Returns the previous state of underrun reporting.
- */
-static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
- enum pipe pipe, bool enable)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- bool old;
-
- assert_spin_locked(&dev_priv->irq_lock);
-
- old = !intel_crtc->cpu_fifo_underrun_disabled;
- intel_crtc->cpu_fifo_underrun_disabled = !enable;
-
- if (HAS_GMCH_DISPLAY(dev))
- i9xx_set_fifo_underrun_reporting(dev, pipe, enable, old);
- else if (IS_GEN5(dev) || IS_GEN6(dev))
- ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
- else if (IS_GEN7(dev))
- ivybridge_set_fifo_underrun_reporting(dev, pipe, enable, old);
- else if (IS_GEN8(dev))
- broadwell_set_fifo_underrun_reporting(dev, pipe, enable);
-
- return old;
-}
-
-bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
- enum pipe pipe, bool enable)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long flags;
- bool ret;
-
- spin_lock_irqsave(&dev_priv->irq_lock, flags);
- ret = __intel_set_cpu_fifo_underrun_reporting(dev, pipe, enable);
- spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
-
- return ret;
-}
-
-static bool __cpu_fifo_underrun_reporting_enabled(struct drm_device *dev,
- enum pipe pipe)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
- return !intel_crtc->cpu_fifo_underrun_disabled;
-}
-
-/**
- * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages
- * @dev: drm device
- * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
- * @enable: true if we want to report FIFO underrun errors, false otherwise
- *
- * This function makes us disable or enable PCH fifo underruns for a specific
- * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO
- * underrun reporting for one transcoder may also disable all the other PCH
- * error interruts for the other transcoders, due to the fact that there's just
- * one interrupt mask/enable bit for all the transcoders.
- *
- * Returns the previous state of underrun reporting.
- */
-bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
- enum transcoder pch_transcoder,
- bool enable)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- unsigned long flags;
- bool old;
-
- /*
- * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT
- * has only one pch transcoder A that all pipes can use. To avoid racy
- * pch transcoder -> pipe lookups from interrupt code simply store the
- * underrun statistics in crtc A. Since we never expose this anywhere
- * nor use it outside of the fifo underrun code here using the "wrong"
- * crtc on LPT won't cause issues.
- */
-
- spin_lock_irqsave(&dev_priv->irq_lock, flags);
-
- old = !intel_crtc->pch_fifo_underrun_disabled;
- intel_crtc->pch_fifo_underrun_disabled = !enable;
-
- if (HAS_PCH_IBX(dev))
- ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
- else
- cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable, old);
-
- spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
- return old;
-}
-
static void
__i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
@@ -589,6 +345,7 @@ __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
assert_spin_locked(&dev_priv->irq_lock);
+ WARN_ON(!intel_irqs_enabled(dev_priv));
if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
status_mask & ~PIPESTAT_INT_STATUS_MASK,
@@ -615,6 +372,7 @@ __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
assert_spin_locked(&dev_priv->irq_lock);
+ WARN_ON(!intel_irqs_enabled(dev_priv));
if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
status_mask & ~PIPESTAT_INT_STATUS_MASK,
@@ -694,19 +452,18 @@ i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
static void i915_enable_asle_pipestat(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long irqflags;
if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
return;
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ spin_lock_irq(&dev_priv->irq_lock);
i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
if (INTEL_INFO(dev)->gen >= 4)
i915_enable_pipestat(dev_priv, PIPE_A,
PIPE_LEGACY_BLC_EVENT_STATUS);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_unlock_irq(&dev_priv->irq_lock);
}
/**
@@ -1094,18 +851,17 @@ static void i915_digport_work_func(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
container_of(work, struct drm_i915_private, dig_port_work);
- unsigned long irqflags;
u32 long_port_mask, short_port_mask;
struct intel_digital_port *intel_dig_port;
int i, ret;
u32 old_bits = 0;
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ spin_lock_irq(&dev_priv->irq_lock);
long_port_mask = dev_priv->long_hpd_port_mask;
dev_priv->long_hpd_port_mask = 0;
short_port_mask = dev_priv->short_hpd_port_mask;
dev_priv->short_hpd_port_mask = 0;
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_unlock_irq(&dev_priv->irq_lock);
for (i = 0; i < I915_MAX_PORTS; i++) {
bool valid = false;
@@ -1130,9 +886,9 @@ static void i915_digport_work_func(struct work_struct *work)
}
if (old_bits) {
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ spin_lock_irq(&dev_priv->irq_lock);
dev_priv->hpd_event_bits |= old_bits;
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_unlock_irq(&dev_priv->irq_lock);
schedule_work(&dev_priv->hotplug_work);
}
}
@@ -1151,7 +907,6 @@ static void i915_hotplug_work_func(struct work_struct *work)
struct intel_connector *intel_connector;
struct intel_encoder *intel_encoder;
struct drm_connector *connector;
- unsigned long irqflags;
bool hpd_disabled = false;
bool changed = false;
u32 hpd_event_bits;
@@ -1159,7 +914,7 @@ static void i915_hotplug_work_func(struct work_struct *work)
mutex_lock(&mode_config->mutex);
DRM_DEBUG_KMS("running encoder hotplug functions\n");
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ spin_lock_irq(&dev_priv->irq_lock);
hpd_event_bits = dev_priv->hpd_event_bits;
dev_priv->hpd_event_bits = 0;
@@ -1193,7 +948,7 @@ static void i915_hotplug_work_func(struct work_struct *work)
msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
}
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_unlock_irq(&dev_priv->irq_lock);
list_for_each_entry(connector, &mode_config->connector_list, head) {
intel_connector = to_intel_connector(connector);
@@ -1260,11 +1015,7 @@ static void notify_ring(struct drm_device *dev,
trace_i915_gem_request_complete(ring);
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- intel_notify_mmio_flip(ring);
-
wake_up_all(&ring->irq_queue);
- i915_queue_hangcheck(dev);
}
static u32 vlv_c0_residency(struct drm_i915_private *dev_priv,
@@ -1400,14 +1151,15 @@ static void gen6_pm_rps_work(struct work_struct *work)
int new_delay, adj;
spin_lock_irq(&dev_priv->irq_lock);
+ /* Speed up work cancelation during disabling rps interrupts. */
+ if (!dev_priv->rps.interrupts_enabled) {
+ spin_unlock_irq(&dev_priv->irq_lock);
+ return;
+ }
pm_iir = dev_priv->rps.pm_iir;
dev_priv->rps.pm_iir = 0;
- if (INTEL_INFO(dev_priv->dev)->gen >= 8)
- gen8_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
- else {
- /* Make sure not to corrupt PMIMR state used by ringbuffer */
- gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
- }
+ /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
+ gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
spin_unlock_irq(&dev_priv->irq_lock);
/* Make sure we didn't queue anything we're not going to process. */
@@ -1488,7 +1240,6 @@ static void ivybridge_parity_work(struct work_struct *work)
u32 error_status, row, bank, subbank;
char *parity_event[6];
uint32_t misccpctl;
- unsigned long flags;
uint8_t slice = 0;
/* We must turn off DOP level clock gating to access the L3 registers.
@@ -1547,9 +1298,9 @@ static void ivybridge_parity_work(struct work_struct *work)
out:
WARN_ON(dev_priv->l3_parity.which_slice);
- spin_lock_irqsave(&dev_priv->irq_lock, flags);
+ spin_lock_irq(&dev_priv->irq_lock);
gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
- spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+ spin_unlock_irq(&dev_priv->irq_lock);
mutex_unlock(&dev_priv->dev->struct_mutex);
}
@@ -1601,28 +1352,13 @@ static void snb_gt_irq_handler(struct drm_device *dev,
if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
GT_BSD_CS_ERROR_INTERRUPT |
- GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) {
- i915_handle_error(dev, false, "GT error interrupt 0x%08x",
- gt_iir);
- }
+ GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
+ DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
if (gt_iir & GT_PARITY_ERROR(dev))
ivybridge_parity_error_irq_handler(dev, gt_iir);
}
-static void gen8_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
-{
- if ((pm_iir & dev_priv->pm_rps_events) == 0)
- return;
-
- spin_lock(&dev_priv->irq_lock);
- dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
- gen8_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
- spin_unlock(&dev_priv->irq_lock);
-
- queue_work(dev_priv->wq, &dev_priv->rps.work);
-}
-
static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
struct drm_i915_private *dev_priv,
u32 master_ctl)
@@ -1684,7 +1420,7 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
I915_WRITE(GEN8_GT_IIR(2),
tmp & dev_priv->pm_rps_events);
ret = IRQ_HANDLED;
- gen8_rps_irq_handler(dev_priv, tmp);
+ gen6_rps_irq_handler(dev_priv, tmp);
} else
DRM_ERROR("The master control interrupt lied (PM)!\n");
}
@@ -1898,7 +1634,7 @@ static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
if (!pipe_crc->entries) {
spin_unlock(&pipe_crc->lock);
- DRM_ERROR("spurious interrupt\n");
+ DRM_DEBUG_KMS("spurious interrupt\n");
return;
}
@@ -1984,24 +1720,30 @@ static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
* the work queue. */
static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
{
+ /* TODO: RPS on GEN9+ is not supported yet. */
+ if (WARN_ONCE(INTEL_INFO(dev_priv)->gen >= 9,
+ "GEN9+: unexpected RPS IRQ\n"))
+ return;
+
if (pm_iir & dev_priv->pm_rps_events) {
spin_lock(&dev_priv->irq_lock);
- dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
+ if (dev_priv->rps.interrupts_enabled) {
+ dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
+ queue_work(dev_priv->wq, &dev_priv->rps.work);
+ }
spin_unlock(&dev_priv->irq_lock);
-
- queue_work(dev_priv->wq, &dev_priv->rps.work);
}
+ if (INTEL_INFO(dev_priv)->gen >= 8)
+ return;
+
if (HAS_VEBOX(dev_priv->dev)) {
if (pm_iir & PM_VEBOX_USER_INTERRUPT)
notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
- if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) {
- i915_handle_error(dev_priv->dev, false,
- "VEBOX CS error interrupt 0x%08x",
- pm_iir);
- }
+ if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
+ DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
}
}
@@ -2031,9 +1773,9 @@ static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
* we need to be careful that we only handle what we want to
* handle.
*/
- mask = 0;
- if (__cpu_fifo_underrun_reporting_enabled(dev, pipe))
- mask |= PIPE_FIFO_UNDERRUN_STATUS;
+
+ /* fifo underruns are filterered in the underrun handler. */
+ mask = PIPE_FIFO_UNDERRUN_STATUS;
switch (pipe) {
case PIPE_A:
@@ -2078,9 +1820,8 @@ static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
i9xx_pipe_crc_irq_handler(dev, pipe);
- if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
- intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
- DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
+ if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
+ intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
}
if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
@@ -2247,14 +1988,10 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
if (pch_iir & SDE_TRANSA_FIFO_UNDER)
- if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
- false))
- DRM_ERROR("PCH transcoder A FIFO underrun\n");
+ intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
if (pch_iir & SDE_TRANSB_FIFO_UNDER)
- if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
- false))
- DRM_ERROR("PCH transcoder B FIFO underrun\n");
+ intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
}
static void ivb_err_int_handler(struct drm_device *dev)
@@ -2267,12 +2004,8 @@ static void ivb_err_int_handler(struct drm_device *dev)
DRM_ERROR("Poison interrupt\n");
for_each_pipe(dev_priv, pipe) {
- if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) {
- if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
- false))
- DRM_ERROR("Pipe %c FIFO underrun\n",
- pipe_name(pipe));
- }
+ if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
+ intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
if (IS_IVYBRIDGE(dev))
@@ -2294,19 +2027,13 @@ static void cpt_serr_int_handler(struct drm_device *dev)
DRM_ERROR("PCH poison interrupt\n");
if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
- if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
- false))
- DRM_ERROR("PCH transcoder A FIFO underrun\n");
+ intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
- if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
- false))
- DRM_ERROR("PCH transcoder B FIFO underrun\n");
+ intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
- if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C,
- false))
- DRM_ERROR("PCH transcoder C FIFO underrun\n");
+ intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C);
I915_WRITE(SERR_INT, serr_int);
}
@@ -2372,9 +2099,7 @@ static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
intel_check_page_flip(dev, pipe);
if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
- if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
- DRM_ERROR("Pipe %c FIFO underrun\n",
- pipe_name(pipe));
+ intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
if (de_iir & DE_PIPE_CRC_DONE(pipe))
i9xx_pipe_crc_irq_handler(dev, pipe);
@@ -2524,6 +2249,11 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
irqreturn_t ret = IRQ_NONE;
uint32_t tmp = 0;
enum pipe pipe;
+ u32 aux_mask = GEN8_AUX_CHANNEL_A;
+
+ if (IS_GEN9(dev))
+ aux_mask |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
+ GEN9_AUX_CHANNEL_D;
master_ctl = I915_READ(GEN8_MASTER_IRQ);
master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
@@ -2556,7 +2286,8 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
if (tmp) {
I915_WRITE(GEN8_DE_PORT_IIR, tmp);
ret = IRQ_HANDLED;
- if (tmp & GEN8_AUX_CHANNEL_A)
+
+ if (tmp & aux_mask)
dp_aux_irq_handler(dev);
else
DRM_ERROR("Unexpected DE Port interrupt\n");
@@ -2566,7 +2297,7 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
}
for_each_pipe(dev_priv, pipe) {
- uint32_t pipe_iir;
+ uint32_t pipe_iir, flip_done = 0, fault_errors = 0;
if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
continue;
@@ -2575,11 +2306,17 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
if (pipe_iir) {
ret = IRQ_HANDLED;
I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
+
if (pipe_iir & GEN8_PIPE_VBLANK &&
intel_pipe_handle_vblank(dev, pipe))
intel_check_page_flip(dev, pipe);
- if (pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE) {
+ if (IS_GEN9(dev))
+ flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE;
+ else
+ flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE;
+
+ if (flip_done) {
intel_prepare_page_flip(dev, pipe);
intel_finish_page_flip_plane(dev, pipe);
}
@@ -2587,18 +2324,20 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
hsw_pipe_crc_irq_handler(dev, pipe);
- if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) {
- if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
- false))
- DRM_ERROR("Pipe %c FIFO underrun\n",
- pipe_name(pipe));
- }
+ if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN)
+ intel_cpu_fifo_underrun_irq_handler(dev_priv,
+ pipe);
+
- if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) {
+ if (IS_GEN9(dev))
+ fault_errors = pipe_iir & GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
+ else
+ fault_errors = pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
+
+ if (fault_errors)
DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
pipe_name(pipe),
pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
- }
} else
DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
}
@@ -2697,6 +2436,9 @@ static void i915_error_work_func(struct work_struct *work)
* simulated reset via debugs, so get an RPM reference.
*/
intel_runtime_pm_get(dev_priv);
+
+ intel_prepare_reset(dev);
+
/*
* All state reset _must_ be completed before we update the
* reset counter, for otherwise waiters might miss the reset
@@ -2705,7 +2447,7 @@ static void i915_error_work_func(struct work_struct *work)
*/
ret = i915_reset(dev);
- intel_display_handle_reset(dev);
+ intel_finish_reset(dev);
intel_runtime_pm_put(dev_priv);
@@ -3330,10 +3072,15 @@ static void i915_hangcheck_elapsed(unsigned long data)
void i915_queue_hangcheck(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct timer_list *timer = &dev_priv->gpu_error.hangcheck_timer;
+
if (!i915.enable_hangcheck)
return;
- mod_timer(&dev_priv->gpu_error.hangcheck_timer,
+ /* Don't continually defer the hangcheck, but make sure it is active */
+ if (timer_pending(timer))
+ return;
+ mod_timer(timer,
round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
}
@@ -3396,10 +3143,22 @@ static void ironlake_irq_reset(struct drm_device *dev)
ibx_irq_reset(dev);
}
+static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
+{
+ enum pipe pipe;
+
+ I915_WRITE(PORT_HOTPLUG_EN, 0);
+ I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+
+ for_each_pipe(dev_priv, pipe)
+ I915_WRITE(PIPESTAT(pipe), 0xffff);
+
+ GEN5_IRQ_RESET(VLV_);
+}
+
static void valleyview_irq_preinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- int pipe;
/* VLV magic */
I915_WRITE(VLV_IMR, 0);
@@ -3407,22 +3166,11 @@ static void valleyview_irq_preinstall(struct drm_device *dev)
I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
- /* and GT */
- I915_WRITE(GTIIR, I915_READ(GTIIR));
- I915_WRITE(GTIIR, I915_READ(GTIIR));
-
gen5_gt_irq_reset(dev);
- I915_WRITE(DPINVGTT, 0xff);
+ I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
- I915_WRITE(PORT_HOTPLUG_EN, 0);
- I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
- for_each_pipe(dev_priv, pipe)
- I915_WRITE(PIPESTAT(pipe), 0xffff);
- I915_WRITE(VLV_IIR, 0xffffffff);
- I915_WRITE(VLV_IMR, 0xffffffff);
- I915_WRITE(VLV_IER, 0x0);
- POSTING_READ(VLV_IER);
+ vlv_display_irq_reset(dev_priv);
}
static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
@@ -3444,8 +3192,8 @@ static void gen8_irq_reset(struct drm_device *dev)
gen8_gt_irq_reset(dev_priv);
for_each_pipe(dev_priv, pipe)
- if (intel_display_power_enabled(dev_priv,
- POWER_DOMAIN_PIPE(pipe)))
+ if (intel_display_power_is_enabled(dev_priv,
+ POWER_DOMAIN_PIPE(pipe)))
GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
GEN5_IRQ_RESET(GEN8_DE_PORT_);
@@ -3457,21 +3205,19 @@ static void gen8_irq_reset(struct drm_device *dev)
void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv)
{
- unsigned long irqflags;
uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ spin_lock_irq(&dev_priv->irq_lock);
GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, dev_priv->de_irq_mask[PIPE_B],
~dev_priv->de_irq_mask[PIPE_B] | extra_ier);
GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, dev_priv->de_irq_mask[PIPE_C],
~dev_priv->de_irq_mask[PIPE_C] | extra_ier);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_unlock_irq(&dev_priv->irq_lock);
}
static void cherryview_irq_preinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- int pipe;
I915_WRITE(GEN8_MASTER_IRQ, 0);
POSTING_READ(GEN8_MASTER_IRQ);
@@ -3480,20 +3226,9 @@ static void cherryview_irq_preinstall(struct drm_device *dev)
GEN5_IRQ_RESET(GEN8_PCU_);
- POSTING_READ(GEN8_PCU_IIR);
-
I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
- I915_WRITE(PORT_HOTPLUG_EN, 0);
- I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
-
- for_each_pipe(dev_priv, pipe)
- I915_WRITE(PIPESTAT(pipe), 0xffff);
-
- I915_WRITE(VLV_IMR, 0xffffffff);
- I915_WRITE(VLV_IER, 0x0);
- I915_WRITE(VLV_IIR, 0xffffffff);
- POSTING_READ(VLV_IIR);
+ vlv_display_irq_reset(dev_priv);
}
static void ibx_hpd_irq_setup(struct drm_device *dev)
@@ -3584,7 +3319,6 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
static int ironlake_irq_postinstall(struct drm_device *dev)
{
- unsigned long irqflags;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 display_mask, extra_mask;
@@ -3623,9 +3357,9 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
* spinlocking not required here for correctness since interrupt
* setup is guaranteed to run in single-threaded context. But we
* need it to make the assert_spin_locked happy. */
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ spin_lock_irq(&dev_priv->irq_lock);
ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_unlock_irq(&dev_priv->irq_lock);
}
return 0;
@@ -3635,45 +3369,51 @@ static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv)
{
u32 pipestat_mask;
u32 iir_mask;
+ enum pipe pipe;
pipestat_mask = PIPESTAT_INT_STATUS_MASK |
PIPE_FIFO_UNDERRUN_STATUS;
- I915_WRITE(PIPESTAT(PIPE_A), pipestat_mask);
- I915_WRITE(PIPESTAT(PIPE_B), pipestat_mask);
+ for_each_pipe(dev_priv, pipe)
+ I915_WRITE(PIPESTAT(pipe), pipestat_mask);
POSTING_READ(PIPESTAT(PIPE_A));
pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
PIPE_CRC_DONE_INTERRUPT_STATUS;
- i915_enable_pipestat(dev_priv, PIPE_A, pipestat_mask |
- PIPE_GMBUS_INTERRUPT_STATUS);
- i915_enable_pipestat(dev_priv, PIPE_B, pipestat_mask);
+ i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
+ for_each_pipe(dev_priv, pipe)
+ i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
iir_mask = I915_DISPLAY_PORT_INTERRUPT |
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
+ if (IS_CHERRYVIEW(dev_priv))
+ iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
dev_priv->irq_mask &= ~iir_mask;
I915_WRITE(VLV_IIR, iir_mask);
I915_WRITE(VLV_IIR, iir_mask);
- I915_WRITE(VLV_IMR, dev_priv->irq_mask);
I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
- POSTING_READ(VLV_IER);
+ I915_WRITE(VLV_IMR, dev_priv->irq_mask);
+ POSTING_READ(VLV_IMR);
}
static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
{
u32 pipestat_mask;
u32 iir_mask;
+ enum pipe pipe;
iir_mask = I915_DISPLAY_PORT_INTERRUPT |
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
+ if (IS_CHERRYVIEW(dev_priv))
+ iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
dev_priv->irq_mask |= iir_mask;
- I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
I915_WRITE(VLV_IMR, dev_priv->irq_mask);
+ I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
I915_WRITE(VLV_IIR, iir_mask);
I915_WRITE(VLV_IIR, iir_mask);
POSTING_READ(VLV_IIR);
@@ -3681,14 +3421,15 @@ static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
PIPE_CRC_DONE_INTERRUPT_STATUS;
- i915_disable_pipestat(dev_priv, PIPE_A, pipestat_mask |
- PIPE_GMBUS_INTERRUPT_STATUS);
- i915_disable_pipestat(dev_priv, PIPE_B, pipestat_mask);
+ i915_disable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
+ for_each_pipe(dev_priv, pipe)
+ i915_disable_pipestat(dev_priv, pipe, pipestat_mask);
pipestat_mask = PIPESTAT_INT_STATUS_MASK |
PIPE_FIFO_UNDERRUN_STATUS;
- I915_WRITE(PIPESTAT(PIPE_A), pipestat_mask);
- I915_WRITE(PIPESTAT(PIPE_B), pipestat_mask);
+
+ for_each_pipe(dev_priv, pipe)
+ I915_WRITE(PIPESTAT(pipe), pipestat_mask);
POSTING_READ(PIPESTAT(PIPE_A));
}
@@ -3701,7 +3442,7 @@ void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
dev_priv->display_irqs_enabled = true;
- if (dev_priv->dev->irq_enabled)
+ if (intel_irqs_enabled(dev_priv))
valleyview_display_irqs_install(dev_priv);
}
@@ -3714,34 +3455,36 @@ void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
dev_priv->display_irqs_enabled = false;
- if (dev_priv->dev->irq_enabled)
+ if (intel_irqs_enabled(dev_priv))
valleyview_display_irqs_uninstall(dev_priv);
}
-static int valleyview_irq_postinstall(struct drm_device *dev)
+static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long irqflags;
-
dev_priv->irq_mask = ~0;
I915_WRITE(PORT_HOTPLUG_EN, 0);
POSTING_READ(PORT_HOTPLUG_EN);
- I915_WRITE(VLV_IMR, dev_priv->irq_mask);
- I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
I915_WRITE(VLV_IIR, 0xffffffff);
- POSTING_READ(VLV_IER);
+ I915_WRITE(VLV_IIR, 0xffffffff);
+ I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
+ I915_WRITE(VLV_IMR, dev_priv->irq_mask);
+ POSTING_READ(VLV_IMR);
/* Interrupt setup is already guaranteed to be single-threaded, this is
* just to make the assert_spin_locked check happy. */
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display_irqs_enabled)
valleyview_display_irqs_install(dev_priv);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_unlock_irq(&dev_priv->irq_lock);
+}
- I915_WRITE(VLV_IIR, 0xffffffff);
- I915_WRITE(VLV_IIR, 0xffffffff);
+static int valleyview_irq_postinstall(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ vlv_display_irq_postinstall(dev_priv);
gen5_gt_irq_postinstall(dev);
@@ -3783,24 +3526,35 @@ static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
{
- uint32_t de_pipe_masked = GEN8_PIPE_PRIMARY_FLIP_DONE |
- GEN8_PIPE_CDCLK_CRC_DONE |
- GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
- uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
- GEN8_PIPE_FIFO_UNDERRUN;
+ uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
+ uint32_t de_pipe_enables;
int pipe;
+ u32 aux_en = GEN8_AUX_CHANNEL_A;
+
+ if (IS_GEN9(dev_priv)) {
+ de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
+ GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
+ aux_en |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
+ GEN9_AUX_CHANNEL_D;
+ } else
+ de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
+ GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
+
+ de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
+ GEN8_PIPE_FIFO_UNDERRUN;
+
dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
for_each_pipe(dev_priv, pipe)
- if (intel_display_power_enabled(dev_priv,
+ if (intel_display_power_is_enabled(dev_priv,
POWER_DOMAIN_PIPE(pipe)))
GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
dev_priv->de_irq_mask[pipe],
de_pipe_enables);
- GEN5_IRQ_INIT(GEN8_DE_PORT_, ~GEN8_AUX_CHANNEL_A, GEN8_AUX_CHANNEL_A);
+ GEN5_IRQ_INIT(GEN8_DE_PORT_, ~aux_en, aux_en);
}
static int gen8_irq_postinstall(struct drm_device *dev)
@@ -3823,33 +3577,8 @@ static int gen8_irq_postinstall(struct drm_device *dev)
static int cherryview_irq_postinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 enable_mask = I915_DISPLAY_PORT_INTERRUPT |
- I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
- I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
- I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
- u32 pipestat_enable = PLANE_FLIP_DONE_INT_STATUS_VLV |
- PIPE_CRC_DONE_INTERRUPT_STATUS;
- unsigned long irqflags;
- int pipe;
- /*
- * Leave vblank interrupts masked initially. enable/disable will
- * toggle them based on usage.
- */
- dev_priv->irq_mask = ~enable_mask;
-
- for_each_pipe(dev_priv, pipe)
- I915_WRITE(PIPESTAT(pipe), 0xffff);
-
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
- for_each_pipe(dev_priv, pipe)
- i915_enable_pipestat(dev_priv, pipe, pipestat_enable);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
-
- I915_WRITE(VLV_IIR, 0xffffffff);
- I915_WRITE(VLV_IMR, dev_priv->irq_mask);
- I915_WRITE(VLV_IER, enable_mask);
+ vlv_display_irq_postinstall(dev_priv);
gen8_gt_irq_postinstall(dev_priv);
@@ -3869,41 +3598,39 @@ static void gen8_irq_uninstall(struct drm_device *dev)
gen8_irq_reset(dev);
}
+static void vlv_display_irq_uninstall(struct drm_i915_private *dev_priv)
+{
+ /* Interrupt setup is already guaranteed to be single-threaded, this is
+ * just to make the assert_spin_locked check happy. */
+ spin_lock_irq(&dev_priv->irq_lock);
+ if (dev_priv->display_irqs_enabled)
+ valleyview_display_irqs_uninstall(dev_priv);
+ spin_unlock_irq(&dev_priv->irq_lock);
+
+ vlv_display_irq_reset(dev_priv);
+
+ dev_priv->irq_mask = 0;
+}
+
static void valleyview_irq_uninstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long irqflags;
- int pipe;
if (!dev_priv)
return;
I915_WRITE(VLV_MASTER_IER, 0);
- for_each_pipe(dev_priv, pipe)
- I915_WRITE(PIPESTAT(pipe), 0xffff);
+ gen5_gt_irq_reset(dev);
I915_WRITE(HWSTAM, 0xffffffff);
- I915_WRITE(PORT_HOTPLUG_EN, 0);
- I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
-
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- if (dev_priv->display_irqs_enabled)
- valleyview_display_irqs_uninstall(dev_priv);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
- dev_priv->irq_mask = 0;
-
- I915_WRITE(VLV_IIR, 0xffffffff);
- I915_WRITE(VLV_IMR, 0xffffffff);
- I915_WRITE(VLV_IER, 0x0);
- POSTING_READ(VLV_IER);
+ vlv_display_irq_uninstall(dev_priv);
}
static void cherryview_irq_uninstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- int pipe;
if (!dev_priv)
return;
@@ -3911,44 +3638,11 @@ static void cherryview_irq_uninstall(struct drm_device *dev)
I915_WRITE(GEN8_MASTER_IRQ, 0);
POSTING_READ(GEN8_MASTER_IRQ);
-#define GEN8_IRQ_FINI_NDX(type, which) \
-do { \
- I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
- I915_WRITE(GEN8_##type##_IER(which), 0); \
- I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
- POSTING_READ(GEN8_##type##_IIR(which)); \
- I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
-} while (0)
-
-#define GEN8_IRQ_FINI(type) \
-do { \
- I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \
- I915_WRITE(GEN8_##type##_IER, 0); \
- I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
- POSTING_READ(GEN8_##type##_IIR); \
- I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
-} while (0)
-
- GEN8_IRQ_FINI_NDX(GT, 0);
- GEN8_IRQ_FINI_NDX(GT, 1);
- GEN8_IRQ_FINI_NDX(GT, 2);
- GEN8_IRQ_FINI_NDX(GT, 3);
-
- GEN8_IRQ_FINI(PCU);
-
-#undef GEN8_IRQ_FINI
-#undef GEN8_IRQ_FINI_NDX
-
- I915_WRITE(PORT_HOTPLUG_EN, 0);
- I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+ gen8_gt_irq_reset(dev_priv);
- for_each_pipe(dev_priv, pipe)
- I915_WRITE(PIPESTAT(pipe), 0xffff);
+ GEN5_IRQ_RESET(GEN8_PCU_);
- I915_WRITE(VLV_IMR, 0xffffffff);
- I915_WRITE(VLV_IER, 0x0);
- I915_WRITE(VLV_IIR, 0xffffffff);
- POSTING_READ(VLV_IIR);
+ vlv_display_irq_uninstall(dev_priv);
}
static void ironlake_irq_uninstall(struct drm_device *dev)
@@ -3976,7 +3670,6 @@ static void i8xx_irq_preinstall(struct drm_device * dev)
static int i8xx_irq_postinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long irqflags;
I915_WRITE16(EMR,
~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
@@ -3999,10 +3692,10 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
/* Interrupt setup is already guaranteed to be single-threaded, this is
* just to make the assert_spin_locked check happy. */
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ spin_lock_irq(&dev_priv->irq_lock);
i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_unlock_irq(&dev_priv->irq_lock);
return 0;
}
@@ -4047,7 +3740,6 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
struct drm_i915_private *dev_priv = dev->dev_private;
u16 iir, new_iir;
u32 pipe_stats[2];
- unsigned long irqflags;
int pipe;
u16 flip_mask =
I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
@@ -4063,11 +3755,9 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
* It doesn't set the bit in iir again, but it still produces
* interrupts (for non-MSI).
*/
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ spin_lock(&dev_priv->irq_lock);
if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
- i915_handle_error(dev, false,
- "Command parser error, iir 0x%08x",
- iir);
+ DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
for_each_pipe(dev_priv, pipe) {
int reg = PIPESTAT(pipe);
@@ -4079,13 +3769,11 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
if (pipe_stats[pipe] & 0x8000ffff)
I915_WRITE(reg, pipe_stats[pipe]);
}
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_unlock(&dev_priv->irq_lock);
I915_WRITE16(IIR, iir & ~flip_mask);
new_iir = I915_READ16(IIR); /* Flush posted writes */
- i915_update_dri1_breadcrumb(dev);
-
if (iir & I915_USER_INTERRUPT)
notify_ring(dev, &dev_priv->ring[RCS]);
@@ -4101,9 +3789,9 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
i9xx_pipe_crc_irq_handler(dev, pipe);
- if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
- intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
- DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
+ if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
+ intel_cpu_fifo_underrun_irq_handler(dev_priv,
+ pipe);
}
iir = new_iir;
@@ -4149,7 +3837,6 @@ static int i915_irq_postinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 enable_mask;
- unsigned long irqflags;
I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
@@ -4187,10 +3874,10 @@ static int i915_irq_postinstall(struct drm_device *dev)
/* Interrupt setup is already guaranteed to be single-threaded, this is
* just to make the assert_spin_locked check happy. */
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ spin_lock_irq(&dev_priv->irq_lock);
i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_unlock_irq(&dev_priv->irq_lock);
return 0;
}
@@ -4234,7 +3921,6 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
struct drm_device *dev = arg;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
- unsigned long irqflags;
u32 flip_mask =
I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
@@ -4250,11 +3936,9 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
* It doesn't set the bit in iir again, but it still produces
* interrupts (for non-MSI).
*/
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ spin_lock(&dev_priv->irq_lock);
if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
- i915_handle_error(dev, false,
- "Command parser error, iir 0x%08x",
- iir);
+ DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
for_each_pipe(dev_priv, pipe) {
int reg = PIPESTAT(pipe);
@@ -4266,7 +3950,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
irq_received = true;
}
}
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_unlock(&dev_priv->irq_lock);
if (!irq_received)
break;
@@ -4297,9 +3981,9 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
i9xx_pipe_crc_irq_handler(dev, pipe);
- if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
- intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
- DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
+ if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
+ intel_cpu_fifo_underrun_irq_handler(dev_priv,
+ pipe);
}
if (blc_event || (iir & I915_ASLE_INTERRUPT))
@@ -4324,8 +4008,6 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
iir = new_iir;
} while (iir & ~flip_mask);
- i915_update_dri1_breadcrumb(dev);
-
return ret;
}
@@ -4372,7 +4054,6 @@ static int i965_irq_postinstall(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 enable_mask;
u32 error_mask;
- unsigned long irqflags;
/* Unmask the interrupts that we always want on. */
dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
@@ -4393,11 +4074,11 @@ static int i965_irq_postinstall(struct drm_device *dev)
/* Interrupt setup is already guaranteed to be single-threaded, this is
* just to make the assert_spin_locked check happy. */
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ spin_lock_irq(&dev_priv->irq_lock);
i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_unlock_irq(&dev_priv->irq_lock);
/*
* Enable some error detection, note the instruction error mask
@@ -4462,7 +4143,6 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 iir, new_iir;
u32 pipe_stats[I915_MAX_PIPES];
- unsigned long irqflags;
int ret = IRQ_NONE, pipe;
u32 flip_mask =
I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
@@ -4479,11 +4159,9 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
* It doesn't set the bit in iir again, but it still produces
* interrupts (for non-MSI).
*/
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ spin_lock(&dev_priv->irq_lock);
if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
- i915_handle_error(dev, false,
- "Command parser error, iir 0x%08x",
- iir);
+ DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
for_each_pipe(dev_priv, pipe) {
int reg = PIPESTAT(pipe);
@@ -4497,7 +4175,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
irq_received = true;
}
}
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_unlock(&dev_priv->irq_lock);
if (!irq_received)
break;
@@ -4527,9 +4205,8 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
i9xx_pipe_crc_irq_handler(dev, pipe);
- if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
- intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
- DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
+ if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
+ intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
}
if (blc_event || (iir & I915_ASLE_INTERRUPT))
@@ -4556,8 +4233,6 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
iir = new_iir;
}
- i915_update_dri1_breadcrumb(dev);
-
return ret;
}
@@ -4584,19 +4259,18 @@ static void i965_irq_uninstall(struct drm_device * dev)
I915_WRITE(IIR, I915_READ(IIR));
}
-static void intel_hpd_irq_reenable(struct work_struct *work)
+static void intel_hpd_irq_reenable_work(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
container_of(work, typeof(*dev_priv),
hotplug_reenable_work.work);
struct drm_device *dev = dev_priv->dev;
struct drm_mode_config *mode_config = &dev->mode_config;
- unsigned long irqflags;
int i;
intel_runtime_pm_get(dev_priv);
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ spin_lock_irq(&dev_priv->irq_lock);
for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
struct drm_connector *connector;
@@ -4620,14 +4294,21 @@ static void intel_hpd_irq_reenable(struct work_struct *work)
}
if (dev_priv->display.hpd_irq_setup)
dev_priv->display.hpd_irq_setup(dev);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_unlock_irq(&dev_priv->irq_lock);
intel_runtime_pm_put(dev_priv);
}
-void intel_irq_init(struct drm_device *dev)
+/**
+ * intel_irq_init - initializes irq support
+ * @dev_priv: i915 device instance
+ *
+ * This function initializes all the irq support including work items, timers
+ * and all the vtables. It does not setup the interrupt itself though.
+ */
+void intel_irq_init(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_device *dev = dev_priv->dev;
INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
INIT_WORK(&dev_priv->dig_port_work, i915_digport_work_func);
@@ -4636,7 +4317,7 @@ void intel_irq_init(struct drm_device *dev)
INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
/* Let's track the enabled rps events */
- if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev))
+ if (IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
/* WaGsvRC0ResidencyMethod:vlv */
dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
else
@@ -4646,17 +4327,14 @@ void intel_irq_init(struct drm_device *dev)
i915_hangcheck_elapsed,
(unsigned long) dev);
INIT_DELAYED_WORK(&dev_priv->hotplug_reenable_work,
- intel_hpd_irq_reenable);
+ intel_hpd_irq_reenable_work);
pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
- /* Haven't installed the IRQ handler yet */
- dev_priv->pm._irqs_disabled = true;
-
- if (IS_GEN2(dev)) {
+ if (IS_GEN2(dev_priv)) {
dev->max_vblank_count = 0;
dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
- } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
+ } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
dev->driver->get_vblank_counter = gm45_get_vblank_counter;
} else {
@@ -4669,7 +4347,7 @@ void intel_irq_init(struct drm_device *dev)
* Gen2 doesn't have a hardware frame counter and so depends on
* vblank interrupts to produce sane vblank seuquence numbers.
*/
- if (!IS_GEN2(dev))
+ if (!IS_GEN2(dev_priv))
dev->vblank_disable_immediate = true;
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
@@ -4677,7 +4355,7 @@ void intel_irq_init(struct drm_device *dev)
dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
}
- if (IS_CHERRYVIEW(dev)) {
+ if (IS_CHERRYVIEW(dev_priv)) {
dev->driver->irq_handler = cherryview_irq_handler;
dev->driver->irq_preinstall = cherryview_irq_preinstall;
dev->driver->irq_postinstall = cherryview_irq_postinstall;
@@ -4685,7 +4363,7 @@ void intel_irq_init(struct drm_device *dev)
dev->driver->enable_vblank = valleyview_enable_vblank;
dev->driver->disable_vblank = valleyview_disable_vblank;
dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
- } else if (IS_VALLEYVIEW(dev)) {
+ } else if (IS_VALLEYVIEW(dev_priv)) {
dev->driver->irq_handler = valleyview_irq_handler;
dev->driver->irq_preinstall = valleyview_irq_preinstall;
dev->driver->irq_postinstall = valleyview_irq_postinstall;
@@ -4693,7 +4371,7 @@ void intel_irq_init(struct drm_device *dev)
dev->driver->enable_vblank = valleyview_enable_vblank;
dev->driver->disable_vblank = valleyview_disable_vblank;
dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
- } else if (IS_GEN8(dev)) {
+ } else if (INTEL_INFO(dev_priv)->gen >= 8) {
dev->driver->irq_handler = gen8_irq_handler;
dev->driver->irq_preinstall = gen8_irq_reset;
dev->driver->irq_postinstall = gen8_irq_postinstall;
@@ -4710,12 +4388,12 @@ void intel_irq_init(struct drm_device *dev)
dev->driver->disable_vblank = ironlake_disable_vblank;
dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
} else {
- if (INTEL_INFO(dev)->gen == 2) {
+ if (INTEL_INFO(dev_priv)->gen == 2) {
dev->driver->irq_preinstall = i8xx_irq_preinstall;
dev->driver->irq_postinstall = i8xx_irq_postinstall;
dev->driver->irq_handler = i8xx_irq_handler;
dev->driver->irq_uninstall = i8xx_irq_uninstall;
- } else if (INTEL_INFO(dev)->gen == 3) {
+ } else if (INTEL_INFO(dev_priv)->gen == 3) {
dev->driver->irq_preinstall = i915_irq_preinstall;
dev->driver->irq_postinstall = i915_irq_postinstall;
dev->driver->irq_uninstall = i915_irq_uninstall;
@@ -4733,12 +4411,23 @@ void intel_irq_init(struct drm_device *dev)
}
}
-void intel_hpd_init(struct drm_device *dev)
+/**
+ * intel_hpd_init - initializes and enables hpd support
+ * @dev_priv: i915 device instance
+ *
+ * This function enables the hotplug support. It requires that interrupts have
+ * already been enabled with intel_irq_init_hw(). From this point on hotplug and
+ * poll request can run concurrently to other code, so locking rules must be
+ * obeyed.
+ *
+ * This is a separate step from interrupt enabling to simplify the locking rules
+ * in the driver load and resume code.
+ */
+void intel_hpd_init(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_device *dev = dev_priv->dev;
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_connector *connector;
- unsigned long irqflags;
int i;
for (i = 1; i < HPD_NUM_PINS; i++) {
@@ -4756,27 +4445,72 @@ void intel_hpd_init(struct drm_device *dev)
/* Interrupt setup is already guaranteed to be single-threaded, this is
* just to make the assert_spin_locked checks happy. */
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display.hpd_irq_setup)
dev_priv->display.hpd_irq_setup(dev);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_unlock_irq(&dev_priv->irq_lock);
}
-/* Disable interrupts so we can allow runtime PM. */
-void intel_runtime_pm_disable_interrupts(struct drm_device *dev)
+/**
+ * intel_irq_install - enables the hardware interrupt
+ * @dev_priv: i915 device instance
+ *
+ * This function enables the hardware interrupt handling, but leaves the hotplug
+ * handling still disabled. It is called after intel_irq_init().
+ *
+ * In the driver load and resume code we need working interrupts in a few places
+ * but don't want to deal with the hassle of concurrent probe and hotplug
+ * workers. Hence the split into this two-stage approach.
+ */
+int intel_irq_install(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ /*
+ * We enable some interrupt sources in our postinstall hooks, so mark
+ * interrupts as enabled _before_ actually enabling them to avoid
+ * special cases in our ordering checks.
+ */
+ dev_priv->pm.irqs_enabled = true;
- dev->driver->irq_uninstall(dev);
- dev_priv->pm._irqs_disabled = true;
+ return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq);
}
-/* Restore interrupts so we can recover from runtime PM. */
-void intel_runtime_pm_restore_interrupts(struct drm_device *dev)
+/**
+ * intel_irq_uninstall - finilizes all irq handling
+ * @dev_priv: i915 device instance
+ *
+ * This stops interrupt and hotplug handling and unregisters and frees all
+ * resources acquired in the init functions.
+ */
+void intel_irq_uninstall(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ drm_irq_uninstall(dev_priv->dev);
+ intel_hpd_cancel_work(dev_priv);
+ dev_priv->pm.irqs_enabled = false;
+}
- dev_priv->pm._irqs_disabled = false;
- dev->driver->irq_preinstall(dev);
- dev->driver->irq_postinstall(dev);
+/**
+ * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
+ * @dev_priv: i915 device instance
+ *
+ * This function is used to disable interrupts at runtime, both in the runtime
+ * pm and the system suspend/resume code.
+ */
+void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
+{
+ dev_priv->dev->driver->irq_uninstall(dev_priv->dev);
+ dev_priv->pm.irqs_enabled = false;
+}
+
+/**
+ * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
+ * @dev_priv: i915 device instance
+ *
+ * This function is used to enable interrupts at runtime, both in the runtime
+ * pm and the system suspend/resume code.
+ */
+void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
+{
+ dev_priv->pm.irqs_enabled = true;
+ dev_priv->dev->driver->irq_preinstall(dev_priv->dev);
+ dev_priv->dev->driver->irq_postinstall(dev_priv->dev);
}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index c01e5f31430..eefdc238f70 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -26,14 +26,25 @@
#define _I915_REG_H_
#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
+#define _PLANE(plane, a, b) _PIPE(plane, a, b)
#define _TRANSCODER(tran, a, b) ((a) + (tran)*((b)-(a)))
-
#define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
#define _PIPE3(pipe, a, b, c) ((pipe) == PIPE_A ? (a) : \
(pipe) == PIPE_B ? (b) : (c))
-#define _MASKED_BIT_ENABLE(a) (((a) << 16) | (a))
-#define _MASKED_BIT_DISABLE(a) ((a) << 16)
+#define _MASKED_FIELD(mask, value) ({ \
+ if (__builtin_constant_p(mask)) \
+ BUILD_BUG_ON_MSG(((mask) & 0xffff0000), "Incorrect mask"); \
+ if (__builtin_constant_p(value)) \
+ BUILD_BUG_ON_MSG((value) & 0xffff0000, "Incorrect value"); \
+ if (__builtin_constant_p(mask) && __builtin_constant_p(value)) \
+ BUILD_BUG_ON_MSG((value) & ~(mask), \
+ "Incorrect value for mask"); \
+ (mask) << 16 | (value); })
+#define _MASKED_BIT_ENABLE(a) ({ typeof(a) _a = (a); _MASKED_FIELD(_a, _a); })
+#define _MASKED_BIT_DISABLE(a) (_MASKED_FIELD((a), 0))
+
+
/* PCI config space */
@@ -74,15 +85,17 @@
#define I915_GC_RENDER_CLOCK_166_MHZ (0 << 0)
#define I915_GC_RENDER_CLOCK_200_MHZ (1 << 0)
#define I915_GC_RENDER_CLOCK_333_MHZ (4 << 0)
+#define GCDGMBUS 0xcc
#define PCI_LBPC 0xf4 /* legacy/combination backlight modes, also called LBB */
/* Graphics reset regs */
-#define I965_GDRST 0xc0 /* PCI config register */
+#define I915_GDRST 0xc0 /* PCI config register */
#define GRDOM_FULL (0<<2)
#define GRDOM_RENDER (1<<2)
#define GRDOM_MEDIA (3<<2)
#define GRDOM_MASK (3<<2)
+#define GRDOM_RESET_STATUS (1<<1)
#define GRDOM_RESET_ENABLE (1<<0)
#define ILK_GDSR 0x2ca4 /* MCHBAR offset */
@@ -248,6 +261,16 @@
#define MI_DISPLAY_FLIP_IVB_SPRITE_B (3 << 19)
#define MI_DISPLAY_FLIP_IVB_PLANE_C (4 << 19)
#define MI_DISPLAY_FLIP_IVB_SPRITE_C (5 << 19)
+/* SKL ones */
+#define MI_DISPLAY_FLIP_SKL_PLANE_1_A (0 << 8)
+#define MI_DISPLAY_FLIP_SKL_PLANE_1_B (1 << 8)
+#define MI_DISPLAY_FLIP_SKL_PLANE_1_C (2 << 8)
+#define MI_DISPLAY_FLIP_SKL_PLANE_2_A (4 << 8)
+#define MI_DISPLAY_FLIP_SKL_PLANE_2_B (5 << 8)
+#define MI_DISPLAY_FLIP_SKL_PLANE_2_C (6 << 8)
+#define MI_DISPLAY_FLIP_SKL_PLANE_3_A (7 << 8)
+#define MI_DISPLAY_FLIP_SKL_PLANE_3_B (8 << 8)
+#define MI_DISPLAY_FLIP_SKL_PLANE_3_C (9 << 8)
#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6, gen7 */
#define MI_SEMAPHORE_GLOBAL_GTT (1<<22)
#define MI_SEMAPHORE_UPDATE (1<<21)
@@ -314,6 +337,8 @@
#define MI_BATCH_GTT (2<<6) /* aliased with (1<<7) on gen4 */
#define MI_BATCH_BUFFER_START_GEN8 MI_INSTR(0x31, 1)
+#define MI_PREDICATE_SRC0 (0x2400)
+#define MI_PREDICATE_SRC1 (0x2408)
#define MI_PREDICATE_RESULT_2 (0x2214)
#define LOWER_SLICE_ENABLED (1<<0)
@@ -564,6 +589,7 @@ enum punit_power_well {
#define PUNIT_REG_GPU_LFM 0xd3
#define PUNIT_REG_GPU_FREQ_REQ 0xd4
#define PUNIT_REG_GPU_FREQ_STS 0xd8
+#define GPLLENABLE (1<<4)
#define GENFREQSTATUS (1<<0)
#define PUNIT_REG_MEDIA_TURBO_FREQ_REQ 0xdc
#define PUNIT_REG_CZ_TIMESTAMP 0xce
@@ -672,7 +698,7 @@ enum punit_power_well {
* need to be accessed during AUX communication,
*
* Generally the common lane corresponds to the pipe and
- * the spline (PCS/TX) correponds to the port.
+ * the spline (PCS/TX) corresponds to the port.
*
* For dual channel PHY (VLV/CHV):
*
@@ -796,6 +822,8 @@ enum punit_power_well {
#define _VLV_PCS_DW0_CH1 0x8400
#define DPIO_PCS_TX_LANE2_RESET (1<<16)
#define DPIO_PCS_TX_LANE1_RESET (1<<7)
+#define DPIO_LEFT_TXFIFO_RST_MASTER2 (1<<4)
+#define DPIO_RIGHT_TXFIFO_RST_MASTER2 (1<<3)
#define VLV_PCS_DW0(ch) _PORT(ch, _VLV_PCS_DW0_CH0, _VLV_PCS_DW0_CH1)
#define _VLV_PCS01_DW0_CH0 0x200
@@ -836,12 +864,31 @@ enum punit_power_well {
#define _VLV_PCS_DW9_CH0 0x8224
#define _VLV_PCS_DW9_CH1 0x8424
+#define DPIO_PCS_TX2MARGIN_MASK (0x7<<13)
+#define DPIO_PCS_TX2MARGIN_000 (0<<13)
+#define DPIO_PCS_TX2MARGIN_101 (1<<13)
+#define DPIO_PCS_TX1MARGIN_MASK (0x7<<10)
+#define DPIO_PCS_TX1MARGIN_000 (0<<10)
+#define DPIO_PCS_TX1MARGIN_101 (1<<10)
#define VLV_PCS_DW9(ch) _PORT(ch, _VLV_PCS_DW9_CH0, _VLV_PCS_DW9_CH1)
+#define _VLV_PCS01_DW9_CH0 0x224
+#define _VLV_PCS23_DW9_CH0 0x424
+#define _VLV_PCS01_DW9_CH1 0x2624
+#define _VLV_PCS23_DW9_CH1 0x2824
+#define VLV_PCS01_DW9(ch) _PORT(ch, _VLV_PCS01_DW9_CH0, _VLV_PCS01_DW9_CH1)
+#define VLV_PCS23_DW9(ch) _PORT(ch, _VLV_PCS23_DW9_CH0, _VLV_PCS23_DW9_CH1)
+
#define _CHV_PCS_DW10_CH0 0x8228
#define _CHV_PCS_DW10_CH1 0x8428
#define DPIO_PCS_SWING_CALC_TX0_TX2 (1<<30)
#define DPIO_PCS_SWING_CALC_TX1_TX3 (1<<31)
+#define DPIO_PCS_TX2DEEMP_MASK (0xf<<24)
+#define DPIO_PCS_TX2DEEMP_9P5 (0<<24)
+#define DPIO_PCS_TX2DEEMP_6P0 (2<<24)
+#define DPIO_PCS_TX1DEEMP_MASK (0xf<<16)
+#define DPIO_PCS_TX1DEEMP_9P5 (0<<16)
+#define DPIO_PCS_TX1DEEMP_6P0 (2<<16)
#define CHV_PCS_DW10(ch) _PORT(ch, _CHV_PCS_DW10_CH0, _CHV_PCS_DW10_CH1)
#define _VLV_PCS01_DW10_CH0 0x0228
@@ -853,8 +900,18 @@ enum punit_power_well {
#define _VLV_PCS_DW11_CH0 0x822c
#define _VLV_PCS_DW11_CH1 0x842c
+#define DPIO_LANEDESKEW_STRAP_OVRD (1<<3)
+#define DPIO_LEFT_TXFIFO_RST_MASTER (1<<1)
+#define DPIO_RIGHT_TXFIFO_RST_MASTER (1<<0)
#define VLV_PCS_DW11(ch) _PORT(ch, _VLV_PCS_DW11_CH0, _VLV_PCS_DW11_CH1)
+#define _VLV_PCS01_DW11_CH0 0x022c
+#define _VLV_PCS23_DW11_CH0 0x042c
+#define _VLV_PCS01_DW11_CH1 0x262c
+#define _VLV_PCS23_DW11_CH1 0x282c
+#define VLV_PCS01_DW11(ch) _PORT(ch, _VLV_PCS01_DW11_CH0, _VLV_PCS01_DW11_CH1)
+#define VLV_PCS23_DW11(ch) _PORT(ch, _VLV_PCS23_DW11_CH0, _VLV_PCS23_DW11_CH1)
+
#define _VLV_PCS_DW12_CH0 0x8230
#define _VLV_PCS_DW12_CH1 0x8430
#define VLV_PCS_DW12(ch) _PORT(ch, _VLV_PCS_DW12_CH0, _VLV_PCS_DW12_CH1)
@@ -1237,7 +1294,7 @@ enum punit_power_well {
#define GEN6_WIZ_HASHING_8x8 GEN6_WIZ_HASHING(0, 0)
#define GEN6_WIZ_HASHING_8x4 GEN6_WIZ_HASHING(0, 1)
#define GEN6_WIZ_HASHING_16x4 GEN6_WIZ_HASHING(1, 0)
-#define GEN6_WIZ_HASHING_MASK (GEN6_WIZ_HASHING(1, 1) << 16)
+#define GEN6_WIZ_HASHING_MASK GEN6_WIZ_HASHING(1, 1)
#define GEN6_TD_FOUR_ROW_DISPATCH_DISABLE (1 << 5)
#define GFX_MODE 0x02520
@@ -1999,6 +2056,8 @@ enum punit_power_well {
#define DCC_ADDRESSING_MODE_MASK (3 << 0)
#define DCC_CHANNEL_XOR_DISABLE (1 << 10)
#define DCC_CHANNEL_XOR_BIT_17 (1 << 9)
+#define DCC2 0x10204
+#define DCC2_MODIFIED_ENHANCED_DISABLE (1 << 20)
/* Pineview MCH register contains DDR3 setting */
#define CSHRDDR3CTL 0x101a8
@@ -2282,7 +2341,6 @@ enum punit_power_well {
#define GEN6_GT_THREAD_STATUS_REG 0x13805c
#define GEN6_GT_THREAD_STATUS_CORE_MASK 0x7
-#define GEN6_GT_THREAD_STATUS_CORE_MASK_HSW (0x7 | (0x07 << 16))
#define GEN6_GT_PERF_STATUS (MCHBAR_MIRROR_BASE_SNB + 0x5948)
#define GEN6_RP_STATE_LIMITS (MCHBAR_MIRROR_BASE_SNB + 0x5994)
@@ -2506,9 +2564,7 @@ enum punit_power_well {
#define EDP_PSR_AUX_CTL(dev) (EDP_PSR_BASE(dev) + 0x10)
#define EDP_PSR_AUX_DATA1(dev) (EDP_PSR_BASE(dev) + 0x14)
-#define EDP_PSR_DPCD_COMMAND 0x80060000
#define EDP_PSR_AUX_DATA2(dev) (EDP_PSR_BASE(dev) + 0x18)
-#define EDP_PSR_DPCD_NORMAL_OPERATION (1<<24)
#define EDP_PSR_AUX_DATA3(dev) (EDP_PSR_BASE(dev) + 0x1c)
#define EDP_PSR_AUX_DATA4(dev) (EDP_PSR_BASE(dev) + 0x20)
#define EDP_PSR_AUX_DATA5(dev) (EDP_PSR_BASE(dev) + 0x24)
@@ -3645,6 +3701,7 @@ enum punit_power_well {
#define DP_AUX_CH_CTL_PRECHARGE_TEST (1 << 11)
#define DP_AUX_CH_CTL_BIT_CLOCK_2X_MASK (0x7ff)
#define DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT 0
+#define DP_AUX_CH_CTL_SYNC_PULSE_SKL(c) ((c) - 1)
/*
* Computing GMCH M and N values for the Display Port link
@@ -4024,17 +4081,18 @@ enum punit_power_well {
#define DSPFW_PLANEA_WM1_HI_MASK (1<<0)
/* drain latency register values*/
+#define DRAIN_LATENCY_PRECISION_16 16
#define DRAIN_LATENCY_PRECISION_32 32
#define DRAIN_LATENCY_PRECISION_64 64
#define VLV_DDL(pipe) (VLV_DISPLAY_BASE + 0x70050 + 4 * (pipe))
-#define DDL_CURSOR_PRECISION_64 (1<<31)
-#define DDL_CURSOR_PRECISION_32 (0<<31)
+#define DDL_CURSOR_PRECISION_HIGH (1<<31)
+#define DDL_CURSOR_PRECISION_LOW (0<<31)
#define DDL_CURSOR_SHIFT 24
-#define DDL_SPRITE_PRECISION_64(sprite) (1<<(15+8*(sprite)))
-#define DDL_SPRITE_PRECISION_32(sprite) (0<<(15+8*(sprite)))
+#define DDL_SPRITE_PRECISION_HIGH(sprite) (1<<(15+8*(sprite)))
+#define DDL_SPRITE_PRECISION_LOW(sprite) (0<<(15+8*(sprite)))
#define DDL_SPRITE_SHIFT(sprite) (8+8*(sprite))
-#define DDL_PLANE_PRECISION_64 (1<<7)
-#define DDL_PLANE_PRECISION_32 (0<<7)
+#define DDL_PLANE_PRECISION_HIGH (1<<7)
+#define DDL_PLANE_PRECISION_LOW (0<<7)
#define DDL_PLANE_SHIFT 0
#define DRAIN_LATENCY_MASK 0x7f
@@ -4071,6 +4129,41 @@ enum punit_power_well {
#define I965_CURSOR_MAX_WM 32
#define I965_CURSOR_DFT_WM 8
+/* Watermark register definitions for SKL */
+#define CUR_WM_A_0 0x70140
+#define CUR_WM_B_0 0x71140
+#define PLANE_WM_1_A_0 0x70240
+#define PLANE_WM_1_B_0 0x71240
+#define PLANE_WM_2_A_0 0x70340
+#define PLANE_WM_2_B_0 0x71340
+#define PLANE_WM_TRANS_1_A_0 0x70268
+#define PLANE_WM_TRANS_1_B_0 0x71268
+#define PLANE_WM_TRANS_2_A_0 0x70368
+#define PLANE_WM_TRANS_2_B_0 0x71368
+#define CUR_WM_TRANS_A_0 0x70168
+#define CUR_WM_TRANS_B_0 0x71168
+#define PLANE_WM_EN (1 << 31)
+#define PLANE_WM_LINES_SHIFT 14
+#define PLANE_WM_LINES_MASK 0x1f
+#define PLANE_WM_BLOCKS_MASK 0x3ff
+
+#define CUR_WM_0(pipe) _PIPE(pipe, CUR_WM_A_0, CUR_WM_B_0)
+#define CUR_WM(pipe, level) (CUR_WM_0(pipe) + ((4) * (level)))
+#define CUR_WM_TRANS(pipe) _PIPE(pipe, CUR_WM_TRANS_A_0, CUR_WM_TRANS_B_0)
+
+#define _PLANE_WM_1(pipe) _PIPE(pipe, PLANE_WM_1_A_0, PLANE_WM_1_B_0)
+#define _PLANE_WM_2(pipe) _PIPE(pipe, PLANE_WM_2_A_0, PLANE_WM_2_B_0)
+#define _PLANE_WM_BASE(pipe, plane) \
+ _PLANE(plane, _PLANE_WM_1(pipe), _PLANE_WM_2(pipe))
+#define PLANE_WM(pipe, plane, level) \
+ (_PLANE_WM_BASE(pipe, plane) + ((4) * (level)))
+#define _PLANE_WM_TRANS_1(pipe) \
+ _PIPE(pipe, PLANE_WM_TRANS_1_A_0, PLANE_WM_TRANS_1_B_0)
+#define _PLANE_WM_TRANS_2(pipe) \
+ _PIPE(pipe, PLANE_WM_TRANS_2_A_0, PLANE_WM_TRANS_2_B_0)
+#define PLANE_WM_TRANS(pipe, plane) \
+ _PLANE(plane, _PLANE_WM_TRANS_1(pipe), _PLANE_WM_TRANS_2(pipe))
+
/* define the Watermark register on Ironlake */
#define WM0_PIPEA_ILK 0x45100
#define WM0_PIPE_PLANE_MASK (0xffff<<16)
@@ -4177,6 +4270,7 @@ enum punit_power_well {
#define MCURSOR_PIPE_A 0x00
#define MCURSOR_PIPE_B (1 << 28)
#define MCURSOR_GAMMA_ENABLE (1 << 26)
+#define CURSOR_ROTATE_180 (1<<15)
#define CURSOR_TRICKLE_FEED_DISABLE (1 << 14)
#define _CURABASE 0x70084
#define _CURAPOS 0x70088
@@ -4240,9 +4334,11 @@ enum punit_power_well {
#define DISPPLANE_NO_LINE_DOUBLE 0
#define DISPPLANE_STEREO_POLARITY_FIRST 0
#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18)
-#define DISPPLANE_ROTATE_180 (1<<15)
+#define DISPPLANE_ALPHA_PREMULTIPLY (1<<16) /* CHV pipe B */
+#define DISPPLANE_ROTATE_180 (1<<15)
#define DISPPLANE_TRICKLE_FEED_DISABLE (1<<14) /* Ironlake */
#define DISPPLANE_TILED (1<<10)
+#define DISPPLANE_MIRROR (1<<8) /* CHV pipe B */
#define _DSPAADDR 0x70184
#define _DSPASTRIDE 0x70188
#define _DSPAPOS 0x7018C /* reserved */
@@ -4263,6 +4359,24 @@ enum punit_power_well {
#define DSPOFFSET(plane) _PIPE2(plane, _DSPAOFFSET)
#define DSPSURFLIVE(plane) _PIPE2(plane, _DSPASURFLIVE)
+/* CHV pipe B blender and primary plane */
+#define _CHV_BLEND_A 0x60a00
+#define CHV_BLEND_LEGACY (0<<30)
+#define CHV_BLEND_ANDROID (1<<30)
+#define CHV_BLEND_MPO (2<<30)
+#define CHV_BLEND_MASK (3<<30)
+#define _CHV_CANVAS_A 0x60a04
+#define _PRIMPOS_A 0x60a08
+#define _PRIMSIZE_A 0x60a0c
+#define _PRIMCNSTALPHA_A 0x60a10
+#define PRIM_CONST_ALPHA_ENABLE (1<<31)
+
+#define CHV_BLEND(pipe) _TRANSCODER2(pipe, _CHV_BLEND_A)
+#define CHV_CANVAS(pipe) _TRANSCODER2(pipe, _CHV_CANVAS_A)
+#define PRIMPOS(plane) _TRANSCODER2(plane, _PRIMPOS_A)
+#define PRIMSIZE(plane) _TRANSCODER2(plane, _PRIMSIZE_A)
+#define PRIMCNSTALPHA(plane) _TRANSCODER2(plane, _PRIMCNSTALPHA_A)
+
/* Display/Sprite base address macros */
#define DISP_BASEADDR_MASK (0xfffff000)
#define I915_LO_DISPBASE(val) (val & ~DISP_BASEADDR_MASK)
@@ -4464,6 +4578,7 @@ enum punit_power_well {
#define SP_FORMAT_RGBA1010102 (9<<26)
#define SP_FORMAT_RGBX8888 (0xe<<26)
#define SP_FORMAT_RGBA8888 (0xf<<26)
+#define SP_ALPHA_PREMULTIPLY (1<<23) /* CHV pipe B */
#define SP_SOURCE_KEY (1<<22)
#define SP_YUV_BYTE_ORDER_MASK (3<<16)
#define SP_YUV_ORDER_YUYV (0<<16)
@@ -4472,6 +4587,7 @@ enum punit_power_well {
#define SP_YUV_ORDER_VYUY (3<<16)
#define SP_ROTATE_180 (1<<15)
#define SP_TILED (1<<10)
+#define SP_MIRROR (1<<8) /* CHV pipe B */
#define _SPALINOFF (VLV_DISPLAY_BASE + 0x72184)
#define _SPASTRIDE (VLV_DISPLAY_BASE + 0x72188)
#define _SPAPOS (VLV_DISPLAY_BASE + 0x7218c)
@@ -4482,6 +4598,7 @@ enum punit_power_well {
#define _SPAKEYMAXVAL (VLV_DISPLAY_BASE + 0x721a0)
#define _SPATILEOFF (VLV_DISPLAY_BASE + 0x721a4)
#define _SPACONSTALPHA (VLV_DISPLAY_BASE + 0x721a8)
+#define SP_CONST_ALPHA_ENABLE (1<<31)
#define _SPAGAMC (VLV_DISPLAY_BASE + 0x721f4)
#define _SPBCNTR (VLV_DISPLAY_BASE + 0x72280)
@@ -4510,6 +4627,195 @@ enum punit_power_well {
#define SPCONSTALPHA(pipe, plane) _PIPE(pipe * 2 + plane, _SPACONSTALPHA, _SPBCONSTALPHA)
#define SPGAMC(pipe, plane) _PIPE(pipe * 2 + plane, _SPAGAMC, _SPBGAMC)
+/*
+ * CHV pipe B sprite CSC
+ *
+ * |cr| |c0 c1 c2| |cr + cr_ioff| |cr_ooff|
+ * |yg| = |c3 c4 c5| x |yg + yg_ioff| + |yg_ooff|
+ * |cb| |c6 c7 c8| |cb + cr_ioff| |cb_ooff|
+ */
+#define SPCSCYGOFF(sprite) (VLV_DISPLAY_BASE + 0x6d900 + (sprite) * 0x1000)
+#define SPCSCCBOFF(sprite) (VLV_DISPLAY_BASE + 0x6d904 + (sprite) * 0x1000)
+#define SPCSCCROFF(sprite) (VLV_DISPLAY_BASE + 0x6d908 + (sprite) * 0x1000)
+#define SPCSC_OOFF(x) (((x) & 0x7ff) << 16) /* s11 */
+#define SPCSC_IOFF(x) (((x) & 0x7ff) << 0) /* s11 */
+
+#define SPCSCC01(sprite) (VLV_DISPLAY_BASE + 0x6d90c + (sprite) * 0x1000)
+#define SPCSCC23(sprite) (VLV_DISPLAY_BASE + 0x6d910 + (sprite) * 0x1000)
+#define SPCSCC45(sprite) (VLV_DISPLAY_BASE + 0x6d914 + (sprite) * 0x1000)
+#define SPCSCC67(sprite) (VLV_DISPLAY_BASE + 0x6d918 + (sprite) * 0x1000)
+#define SPCSCC8(sprite) (VLV_DISPLAY_BASE + 0x6d91c + (sprite) * 0x1000)
+#define SPCSC_C1(x) (((x) & 0x7fff) << 16) /* s3.12 */
+#define SPCSC_C0(x) (((x) & 0x7fff) << 0) /* s3.12 */
+
+#define SPCSCYGICLAMP(sprite) (VLV_DISPLAY_BASE + 0x6d920 + (sprite) * 0x1000)
+#define SPCSCCBICLAMP(sprite) (VLV_DISPLAY_BASE + 0x6d924 + (sprite) * 0x1000)
+#define SPCSCCRICLAMP(sprite) (VLV_DISPLAY_BASE + 0x6d928 + (sprite) * 0x1000)
+#define SPCSC_IMAX(x) (((x) & 0x7ff) << 16) /* s11 */
+#define SPCSC_IMIN(x) (((x) & 0x7ff) << 0) /* s11 */
+
+#define SPCSCYGOCLAMP(sprite) (VLV_DISPLAY_BASE + 0x6d92c + (sprite) * 0x1000)
+#define SPCSCCBOCLAMP(sprite) (VLV_DISPLAY_BASE + 0x6d930 + (sprite) * 0x1000)
+#define SPCSCCROCLAMP(sprite) (VLV_DISPLAY_BASE + 0x6d934 + (sprite) * 0x1000)
+#define SPCSC_OMAX(x) ((x) << 16) /* u10 */
+#define SPCSC_OMIN(x) ((x) << 0) /* u10 */
+
+/* Skylake plane registers */
+
+#define _PLANE_CTL_1_A 0x70180
+#define _PLANE_CTL_2_A 0x70280
+#define _PLANE_CTL_3_A 0x70380
+#define PLANE_CTL_ENABLE (1 << 31)
+#define PLANE_CTL_PIPE_GAMMA_ENABLE (1 << 30)
+#define PLANE_CTL_FORMAT_MASK (0xf << 24)
+#define PLANE_CTL_FORMAT_YUV422 ( 0 << 24)
+#define PLANE_CTL_FORMAT_NV12 ( 1 << 24)
+#define PLANE_CTL_FORMAT_XRGB_2101010 ( 2 << 24)
+#define PLANE_CTL_FORMAT_XRGB_8888 ( 4 << 24)
+#define PLANE_CTL_FORMAT_XRGB_16161616F ( 6 << 24)
+#define PLANE_CTL_FORMAT_AYUV ( 8 << 24)
+#define PLANE_CTL_FORMAT_INDEXED ( 12 << 24)
+#define PLANE_CTL_FORMAT_RGB_565 ( 14 << 24)
+#define PLANE_CTL_PIPE_CSC_ENABLE (1 << 23)
+#define PLANE_CTL_KEY_ENABLE_MASK (0x3 << 21)
+#define PLANE_CTL_KEY_ENABLE_SOURCE ( 1 << 21)
+#define PLANE_CTL_KEY_ENABLE_DESTINATION ( 2 << 21)
+#define PLANE_CTL_ORDER_BGRX (0 << 20)
+#define PLANE_CTL_ORDER_RGBX (1 << 20)
+#define PLANE_CTL_YUV422_ORDER_MASK (0x3 << 16)
+#define PLANE_CTL_YUV422_YUYV ( 0 << 16)
+#define PLANE_CTL_YUV422_UYVY ( 1 << 16)
+#define PLANE_CTL_YUV422_YVYU ( 2 << 16)
+#define PLANE_CTL_YUV422_VYUY ( 3 << 16)
+#define PLANE_CTL_DECOMPRESSION_ENABLE (1 << 15)
+#define PLANE_CTL_TRICKLE_FEED_DISABLE (1 << 14)
+#define PLANE_CTL_PLANE_GAMMA_DISABLE (1 << 13)
+#define PLANE_CTL_TILED_MASK (0x7 << 10)
+#define PLANE_CTL_TILED_LINEAR ( 0 << 10)
+#define PLANE_CTL_TILED_X ( 1 << 10)
+#define PLANE_CTL_TILED_Y ( 4 << 10)
+#define PLANE_CTL_TILED_YF ( 5 << 10)
+#define PLANE_CTL_ALPHA_MASK (0x3 << 4)
+#define PLANE_CTL_ALPHA_DISABLE ( 0 << 4)
+#define PLANE_CTL_ALPHA_SW_PREMULTIPLY ( 2 << 4)
+#define PLANE_CTL_ALPHA_HW_PREMULTIPLY ( 3 << 4)
+#define PLANE_CTL_ROTATE_MASK 0x3
+#define PLANE_CTL_ROTATE_0 0x0
+#define PLANE_CTL_ROTATE_180 0x2
+#define _PLANE_STRIDE_1_A 0x70188
+#define _PLANE_STRIDE_2_A 0x70288
+#define _PLANE_STRIDE_3_A 0x70388
+#define _PLANE_POS_1_A 0x7018c
+#define _PLANE_POS_2_A 0x7028c
+#define _PLANE_POS_3_A 0x7038c
+#define _PLANE_SIZE_1_A 0x70190
+#define _PLANE_SIZE_2_A 0x70290
+#define _PLANE_SIZE_3_A 0x70390
+#define _PLANE_SURF_1_A 0x7019c
+#define _PLANE_SURF_2_A 0x7029c
+#define _PLANE_SURF_3_A 0x7039c
+#define _PLANE_OFFSET_1_A 0x701a4
+#define _PLANE_OFFSET_2_A 0x702a4
+#define _PLANE_OFFSET_3_A 0x703a4
+#define _PLANE_KEYVAL_1_A 0x70194
+#define _PLANE_KEYVAL_2_A 0x70294
+#define _PLANE_KEYMSK_1_A 0x70198
+#define _PLANE_KEYMSK_2_A 0x70298
+#define _PLANE_KEYMAX_1_A 0x701a0
+#define _PLANE_KEYMAX_2_A 0x702a0
+#define _PLANE_BUF_CFG_1_A 0x7027c
+#define _PLANE_BUF_CFG_2_A 0x7037c
+
+#define _PLANE_CTL_1_B 0x71180
+#define _PLANE_CTL_2_B 0x71280
+#define _PLANE_CTL_3_B 0x71380
+#define _PLANE_CTL_1(pipe) _PIPE(pipe, _PLANE_CTL_1_A, _PLANE_CTL_1_B)
+#define _PLANE_CTL_2(pipe) _PIPE(pipe, _PLANE_CTL_2_A, _PLANE_CTL_2_B)
+#define _PLANE_CTL_3(pipe) _PIPE(pipe, _PLANE_CTL_3_A, _PLANE_CTL_3_B)
+#define PLANE_CTL(pipe, plane) \
+ _PLANE(plane, _PLANE_CTL_1(pipe), _PLANE_CTL_2(pipe))
+
+#define _PLANE_STRIDE_1_B 0x71188
+#define _PLANE_STRIDE_2_B 0x71288
+#define _PLANE_STRIDE_3_B 0x71388
+#define _PLANE_STRIDE_1(pipe) \
+ _PIPE(pipe, _PLANE_STRIDE_1_A, _PLANE_STRIDE_1_B)
+#define _PLANE_STRIDE_2(pipe) \
+ _PIPE(pipe, _PLANE_STRIDE_2_A, _PLANE_STRIDE_2_B)
+#define _PLANE_STRIDE_3(pipe) \
+ _PIPE(pipe, _PLANE_STRIDE_3_A, _PLANE_STRIDE_3_B)
+#define PLANE_STRIDE(pipe, plane) \
+ _PLANE(plane, _PLANE_STRIDE_1(pipe), _PLANE_STRIDE_2(pipe))
+
+#define _PLANE_POS_1_B 0x7118c
+#define _PLANE_POS_2_B 0x7128c
+#define _PLANE_POS_3_B 0x7138c
+#define _PLANE_POS_1(pipe) _PIPE(pipe, _PLANE_POS_1_A, _PLANE_POS_1_B)
+#define _PLANE_POS_2(pipe) _PIPE(pipe, _PLANE_POS_2_A, _PLANE_POS_2_B)
+#define _PLANE_POS_3(pipe) _PIPE(pipe, _PLANE_POS_3_A, _PLANE_POS_3_B)
+#define PLANE_POS(pipe, plane) \
+ _PLANE(plane, _PLANE_POS_1(pipe), _PLANE_POS_2(pipe))
+
+#define _PLANE_SIZE_1_B 0x71190
+#define _PLANE_SIZE_2_B 0x71290
+#define _PLANE_SIZE_3_B 0x71390
+#define _PLANE_SIZE_1(pipe) _PIPE(pipe, _PLANE_SIZE_1_A, _PLANE_SIZE_1_B)
+#define _PLANE_SIZE_2(pipe) _PIPE(pipe, _PLANE_SIZE_2_A, _PLANE_SIZE_2_B)
+#define _PLANE_SIZE_3(pipe) _PIPE(pipe, _PLANE_SIZE_3_A, _PLANE_SIZE_3_B)
+#define PLANE_SIZE(pipe, plane) \
+ _PLANE(plane, _PLANE_SIZE_1(pipe), _PLANE_SIZE_2(pipe))
+
+#define _PLANE_SURF_1_B 0x7119c
+#define _PLANE_SURF_2_B 0x7129c
+#define _PLANE_SURF_3_B 0x7139c
+#define _PLANE_SURF_1(pipe) _PIPE(pipe, _PLANE_SURF_1_A, _PLANE_SURF_1_B)
+#define _PLANE_SURF_2(pipe) _PIPE(pipe, _PLANE_SURF_2_A, _PLANE_SURF_2_B)
+#define _PLANE_SURF_3(pipe) _PIPE(pipe, _PLANE_SURF_3_A, _PLANE_SURF_3_B)
+#define PLANE_SURF(pipe, plane) \
+ _PLANE(plane, _PLANE_SURF_1(pipe), _PLANE_SURF_2(pipe))
+
+#define _PLANE_OFFSET_1_B 0x711a4
+#define _PLANE_OFFSET_2_B 0x712a4
+#define _PLANE_OFFSET_1(pipe) _PIPE(pipe, _PLANE_OFFSET_1_A, _PLANE_OFFSET_1_B)
+#define _PLANE_OFFSET_2(pipe) _PIPE(pipe, _PLANE_OFFSET_2_A, _PLANE_OFFSET_2_B)
+#define PLANE_OFFSET(pipe, plane) \
+ _PLANE(plane, _PLANE_OFFSET_1(pipe), _PLANE_OFFSET_2(pipe))
+
+#define _PLANE_KEYVAL_1_B 0x71194
+#define _PLANE_KEYVAL_2_B 0x71294
+#define _PLANE_KEYVAL_1(pipe) _PIPE(pipe, _PLANE_KEYVAL_1_A, _PLANE_KEYVAL_1_B)
+#define _PLANE_KEYVAL_2(pipe) _PIPE(pipe, _PLANE_KEYVAL_2_A, _PLANE_KEYVAL_2_B)
+#define PLANE_KEYVAL(pipe, plane) \
+ _PLANE(plane, _PLANE_KEYVAL_1(pipe), _PLANE_KEYVAL_2(pipe))
+
+#define _PLANE_KEYMSK_1_B 0x71198
+#define _PLANE_KEYMSK_2_B 0x71298
+#define _PLANE_KEYMSK_1(pipe) _PIPE(pipe, _PLANE_KEYMSK_1_A, _PLANE_KEYMSK_1_B)
+#define _PLANE_KEYMSK_2(pipe) _PIPE(pipe, _PLANE_KEYMSK_2_A, _PLANE_KEYMSK_2_B)
+#define PLANE_KEYMSK(pipe, plane) \
+ _PLANE(plane, _PLANE_KEYMSK_1(pipe), _PLANE_KEYMSK_2(pipe))
+
+#define _PLANE_KEYMAX_1_B 0x711a0
+#define _PLANE_KEYMAX_2_B 0x712a0
+#define _PLANE_KEYMAX_1(pipe) _PIPE(pipe, _PLANE_KEYMAX_1_A, _PLANE_KEYMAX_1_B)
+#define _PLANE_KEYMAX_2(pipe) _PIPE(pipe, _PLANE_KEYMAX_2_A, _PLANE_KEYMAX_2_B)
+#define PLANE_KEYMAX(pipe, plane) \
+ _PLANE(plane, _PLANE_KEYMAX_1(pipe), _PLANE_KEYMAX_2(pipe))
+
+#define _PLANE_BUF_CFG_1_B 0x7127c
+#define _PLANE_BUF_CFG_2_B 0x7137c
+#define _PLANE_BUF_CFG_1(pipe) \
+ _PIPE(pipe, _PLANE_BUF_CFG_1_A, _PLANE_BUF_CFG_1_B)
+#define _PLANE_BUF_CFG_2(pipe) \
+ _PIPE(pipe, _PLANE_BUF_CFG_2_A, _PLANE_BUF_CFG_2_B)
+#define PLANE_BUF_CFG(pipe, plane) \
+ _PLANE(plane, _PLANE_BUF_CFG_1(pipe), _PLANE_BUF_CFG_2(pipe))
+
+/* SKL new cursor registers */
+#define _CUR_BUF_CFG_A 0x7017c
+#define _CUR_BUF_CFG_B 0x7117c
+#define CUR_BUF_CFG(pipe) _PIPE(pipe, _CUR_BUF_CFG_A, _CUR_BUF_CFG_B)
+
/* VBIOS regs */
#define VGACNTRL 0x71400
# define VGA_DISP_DISABLE (1 << 31)
@@ -4625,6 +4931,18 @@ enum punit_power_well {
#define PF_VSCALE(pipe) _PIPE(pipe, _PFA_VSCALE, _PFB_VSCALE)
#define PF_HSCALE(pipe) _PIPE(pipe, _PFA_HSCALE, _PFB_HSCALE)
+#define _PSA_CTL 0x68180
+#define _PSB_CTL 0x68980
+#define PS_ENABLE (1<<31)
+#define _PSA_WIN_SZ 0x68174
+#define _PSB_WIN_SZ 0x68974
+#define _PSA_WIN_POS 0x68170
+#define _PSB_WIN_POS 0x68970
+
+#define PS_CTL(pipe) _PIPE(pipe, _PSA_CTL, _PSB_CTL)
+#define PS_WIN_SZ(pipe) _PIPE(pipe, _PSA_WIN_SZ, _PSB_WIN_SZ)
+#define PS_WIN_POS(pipe) _PIPE(pipe, _PSA_WIN_POS, _PSB_WIN_POS)
+
/* legacy palette */
#define _LGC_PALETTE_A 0x4a000
#define _LGC_PALETTE_B 0x4a800
@@ -4746,16 +5064,32 @@ enum punit_power_well {
#define GEN8_PIPE_SCAN_LINE_EVENT (1 << 2)
#define GEN8_PIPE_VSYNC (1 << 1)
#define GEN8_PIPE_VBLANK (1 << 0)
+#define GEN9_PIPE_CURSOR_FAULT (1 << 11)
+#define GEN9_PIPE_PLANE3_FAULT (1 << 9)
+#define GEN9_PIPE_PLANE2_FAULT (1 << 8)
+#define GEN9_PIPE_PLANE1_FAULT (1 << 7)
+#define GEN9_PIPE_PLANE3_FLIP_DONE (1 << 5)
+#define GEN9_PIPE_PLANE2_FLIP_DONE (1 << 4)
+#define GEN9_PIPE_PLANE1_FLIP_DONE (1 << 3)
+#define GEN9_PIPE_PLANE_FLIP_DONE(p) (1 << (3 + p))
#define GEN8_DE_PIPE_IRQ_FAULT_ERRORS \
(GEN8_PIPE_CURSOR_FAULT | \
GEN8_PIPE_SPRITE_FAULT | \
GEN8_PIPE_PRIMARY_FAULT)
+#define GEN9_DE_PIPE_IRQ_FAULT_ERRORS \
+ (GEN9_PIPE_CURSOR_FAULT | \
+ GEN9_PIPE_PLANE3_FAULT | \
+ GEN9_PIPE_PLANE2_FAULT | \
+ GEN9_PIPE_PLANE1_FAULT)
#define GEN8_DE_PORT_ISR 0x44440
#define GEN8_DE_PORT_IMR 0x44444
#define GEN8_DE_PORT_IIR 0x44448
#define GEN8_DE_PORT_IER 0x4444c
#define GEN8_PORT_DP_A_HOTPLUG (1 << 3)
+#define GEN9_AUX_CHANNEL_D (1 << 27)
+#define GEN9_AUX_CHANNEL_C (1 << 26)
+#define GEN9_AUX_CHANNEL_B (1 << 25)
#define GEN8_AUX_CHANNEL_A (1 << 0)
#define GEN8_DE_MISC_ISR 0x44460
@@ -4839,6 +5173,8 @@ enum punit_power_well {
/* GEN8 chicken */
#define HDC_CHICKEN0 0x7300
#define HDC_FORCE_NON_COHERENT (1<<4)
+#define HDC_DONOT_FETCH_MEM_WHEN_MASKED (1<<11)
+#define HDC_FENCE_DEST_SLM_DISABLE (1<<14)
/* WaCatErrorRejectionIssue */
#define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG 0x9030
@@ -5540,6 +5876,12 @@ enum punit_power_well {
#define VLV_GTLC_PW_MEDIA_STATUS_MASK (1 << 5)
#define VLV_GTLC_PW_RENDER_STATUS_MASK (1 << 7)
#define FORCEWAKE_MT 0xa188 /* multi-threaded */
+#define FORCEWAKE_MEDIA_GEN9 0xa270
+#define FORCEWAKE_RENDER_GEN9 0xa278
+#define FORCEWAKE_BLITTER_GEN9 0xa188
+#define FORCEWAKE_ACK_MEDIA_GEN9 0x0D88
+#define FORCEWAKE_ACK_RENDER_GEN9 0x0D84
+#define FORCEWAKE_ACK_BLITTER_GEN9 0x130044
#define FORCEWAKE_KERNEL 0x1
#define FORCEWAKE_USER 0x2
#define FORCEWAKE_MT_ACK 0x130040
@@ -5711,9 +6053,17 @@ enum punit_power_well {
#define GEN6_ENCODE_RC6_VID(mv) (((mv) - 245) / 5)
#define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) + 245)
#define DISPLAY_IPS_CONTROL 0x19
+#define HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL 0x1A
#define GEN6_PCODE_DATA 0x138128
#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8
#define GEN6_PCODE_FREQ_RING_RATIO_SHIFT 16
+#define GEN6_PCODE_DATA1 0x13812C
+
+#define GEN9_PCODE_READ_MEM_LATENCY 0x6
+#define GEN9_MEM_LATENCY_LEVEL_MASK 0xFF
+#define GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT 8
+#define GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT 16
+#define GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT 24
#define GEN6_GT_CORE_STATUS 0x138060
#define GEN6_CORE_CPD_STATE_MASK (7<<4)
@@ -5751,6 +6101,9 @@ enum punit_power_well {
#define GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE (1<<10)
#define GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE (1<<3)
+#define GEN9_HALF_SLICE_CHICKEN5 0xe188
+#define GEN9_DG_MIRROR_FIX_ENABLE (1<<5)
+
#define GEN8_ROW_CHICKEN 0xe4f0
#define PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE (1<<8)
#define STALL_DOP_GATING_DISABLE (1<<5)
@@ -5766,57 +6119,58 @@ enum punit_power_well {
#define GEN8_CENTROID_PIXEL_OPT_DIS (1<<8)
#define GEN8_SAMPLER_POWER_BYPASS_DIS (1<<1)
+/* Audio */
#define G4X_AUD_VID_DID (dev_priv->info.display_mmio_offset + 0x62020)
-#define INTEL_AUDIO_DEVCL 0x808629FB
-#define INTEL_AUDIO_DEVBLC 0x80862801
-#define INTEL_AUDIO_DEVCTG 0x80862802
+#define INTEL_AUDIO_DEVCL 0x808629FB
+#define INTEL_AUDIO_DEVBLC 0x80862801
+#define INTEL_AUDIO_DEVCTG 0x80862802
#define G4X_AUD_CNTL_ST 0x620B4
-#define G4X_ELDV_DEVCL_DEVBLC (1 << 13)
-#define G4X_ELDV_DEVCTG (1 << 14)
-#define G4X_ELD_ADDR (0xf << 5)
-#define G4X_ELD_ACK (1 << 4)
+#define G4X_ELDV_DEVCL_DEVBLC (1 << 13)
+#define G4X_ELDV_DEVCTG (1 << 14)
+#define G4X_ELD_ADDR_MASK (0xf << 5)
+#define G4X_ELD_ACK (1 << 4)
#define G4X_HDMIW_HDMIEDID 0x6210C
-#define IBX_HDMIW_HDMIEDID_A 0xE2050
-#define IBX_HDMIW_HDMIEDID_B 0xE2150
+#define _IBX_HDMIW_HDMIEDID_A 0xE2050
+#define _IBX_HDMIW_HDMIEDID_B 0xE2150
#define IBX_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \
- IBX_HDMIW_HDMIEDID_A, \
- IBX_HDMIW_HDMIEDID_B)
-#define IBX_AUD_CNTL_ST_A 0xE20B4
-#define IBX_AUD_CNTL_ST_B 0xE21B4
+ _IBX_HDMIW_HDMIEDID_A, \
+ _IBX_HDMIW_HDMIEDID_B)
+#define _IBX_AUD_CNTL_ST_A 0xE20B4
+#define _IBX_AUD_CNTL_ST_B 0xE21B4
#define IBX_AUD_CNTL_ST(pipe) _PIPE(pipe, \
- IBX_AUD_CNTL_ST_A, \
- IBX_AUD_CNTL_ST_B)
-#define IBX_ELD_BUFFER_SIZE (0x1f << 10)
-#define IBX_ELD_ADDRESS (0x1f << 5)
-#define IBX_ELD_ACK (1 << 4)
+ _IBX_AUD_CNTL_ST_A, \
+ _IBX_AUD_CNTL_ST_B)
+#define IBX_ELD_BUFFER_SIZE_MASK (0x1f << 10)
+#define IBX_ELD_ADDRESS_MASK (0x1f << 5)
+#define IBX_ELD_ACK (1 << 4)
#define IBX_AUD_CNTL_ST2 0xE20C0
-#define IBX_ELD_VALIDB (1 << 0)
-#define IBX_CP_READYB (1 << 1)
+#define IBX_CP_READY(port) ((1 << 1) << (((port) - 1) * 4))
+#define IBX_ELD_VALID(port) ((1 << 0) << (((port) - 1) * 4))
-#define CPT_HDMIW_HDMIEDID_A 0xE5050
-#define CPT_HDMIW_HDMIEDID_B 0xE5150
+#define _CPT_HDMIW_HDMIEDID_A 0xE5050
+#define _CPT_HDMIW_HDMIEDID_B 0xE5150
#define CPT_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \
- CPT_HDMIW_HDMIEDID_A, \
- CPT_HDMIW_HDMIEDID_B)
-#define CPT_AUD_CNTL_ST_A 0xE50B4
-#define CPT_AUD_CNTL_ST_B 0xE51B4
+ _CPT_HDMIW_HDMIEDID_A, \
+ _CPT_HDMIW_HDMIEDID_B)
+#define _CPT_AUD_CNTL_ST_A 0xE50B4
+#define _CPT_AUD_CNTL_ST_B 0xE51B4
#define CPT_AUD_CNTL_ST(pipe) _PIPE(pipe, \
- CPT_AUD_CNTL_ST_A, \
- CPT_AUD_CNTL_ST_B)
+ _CPT_AUD_CNTL_ST_A, \
+ _CPT_AUD_CNTL_ST_B)
#define CPT_AUD_CNTRL_ST2 0xE50C0
-#define VLV_HDMIW_HDMIEDID_A (VLV_DISPLAY_BASE + 0x62050)
-#define VLV_HDMIW_HDMIEDID_B (VLV_DISPLAY_BASE + 0x62150)
+#define _VLV_HDMIW_HDMIEDID_A (VLV_DISPLAY_BASE + 0x62050)
+#define _VLV_HDMIW_HDMIEDID_B (VLV_DISPLAY_BASE + 0x62150)
#define VLV_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \
- VLV_HDMIW_HDMIEDID_A, \
- VLV_HDMIW_HDMIEDID_B)
-#define VLV_AUD_CNTL_ST_A (VLV_DISPLAY_BASE + 0x620B4)
-#define VLV_AUD_CNTL_ST_B (VLV_DISPLAY_BASE + 0x621B4)
+ _VLV_HDMIW_HDMIEDID_A, \
+ _VLV_HDMIW_HDMIEDID_B)
+#define _VLV_AUD_CNTL_ST_A (VLV_DISPLAY_BASE + 0x620B4)
+#define _VLV_AUD_CNTL_ST_B (VLV_DISPLAY_BASE + 0x621B4)
#define VLV_AUD_CNTL_ST(pipe) _PIPE(pipe, \
- VLV_AUD_CNTL_ST_A, \
- VLV_AUD_CNTL_ST_B)
+ _VLV_AUD_CNTL_ST_A, \
+ _VLV_AUD_CNTL_ST_B)
#define VLV_AUD_CNTL_ST2 (VLV_DISPLAY_BASE + 0x620C0)
/* These are the 4 32-bit write offset registers for each stream
@@ -5825,28 +6179,28 @@ enum punit_power_well {
*/
#define GEN7_SO_WRITE_OFFSET(n) (0x5280 + (n) * 4)
-#define IBX_AUD_CONFIG_A 0xe2000
-#define IBX_AUD_CONFIG_B 0xe2100
+#define _IBX_AUD_CONFIG_A 0xe2000
+#define _IBX_AUD_CONFIG_B 0xe2100
#define IBX_AUD_CFG(pipe) _PIPE(pipe, \
- IBX_AUD_CONFIG_A, \
- IBX_AUD_CONFIG_B)
-#define CPT_AUD_CONFIG_A 0xe5000
-#define CPT_AUD_CONFIG_B 0xe5100
+ _IBX_AUD_CONFIG_A, \
+ _IBX_AUD_CONFIG_B)
+#define _CPT_AUD_CONFIG_A 0xe5000
+#define _CPT_AUD_CONFIG_B 0xe5100
#define CPT_AUD_CFG(pipe) _PIPE(pipe, \
- CPT_AUD_CONFIG_A, \
- CPT_AUD_CONFIG_B)
-#define VLV_AUD_CONFIG_A (VLV_DISPLAY_BASE + 0x62000)
-#define VLV_AUD_CONFIG_B (VLV_DISPLAY_BASE + 0x62100)
+ _CPT_AUD_CONFIG_A, \
+ _CPT_AUD_CONFIG_B)
+#define _VLV_AUD_CONFIG_A (VLV_DISPLAY_BASE + 0x62000)
+#define _VLV_AUD_CONFIG_B (VLV_DISPLAY_BASE + 0x62100)
#define VLV_AUD_CFG(pipe) _PIPE(pipe, \
- VLV_AUD_CONFIG_A, \
- VLV_AUD_CONFIG_B)
+ _VLV_AUD_CONFIG_A, \
+ _VLV_AUD_CONFIG_B)
#define AUD_CONFIG_N_VALUE_INDEX (1 << 29)
#define AUD_CONFIG_N_PROG_ENABLE (1 << 28)
#define AUD_CONFIG_UPPER_N_SHIFT 20
-#define AUD_CONFIG_UPPER_N_VALUE (0xff << 20)
+#define AUD_CONFIG_UPPER_N_MASK (0xff << 20)
#define AUD_CONFIG_LOWER_N_SHIFT 4
-#define AUD_CONFIG_LOWER_N_VALUE (0xfff << 4)
+#define AUD_CONFIG_LOWER_N_MASK (0xfff << 4)
#define AUD_CONFIG_PIXEL_CLOCK_HDMI_SHIFT 16
#define AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK (0xf << 16)
#define AUD_CONFIG_PIXEL_CLOCK_HDMI_25175 (0 << 16)
@@ -5862,52 +6216,44 @@ enum punit_power_well {
#define AUD_CONFIG_DISABLE_NCTS (1 << 3)
/* HSW Audio */
-#define HSW_AUD_CONFIG_A 0x65000 /* Audio Configuration Transcoder A */
-#define HSW_AUD_CONFIG_B 0x65100 /* Audio Configuration Transcoder B */
-#define HSW_AUD_CFG(pipe) _PIPE(pipe, \
- HSW_AUD_CONFIG_A, \
- HSW_AUD_CONFIG_B)
-
-#define HSW_AUD_MISC_CTRL_A 0x65010 /* Audio Misc Control Convert 1 */
-#define HSW_AUD_MISC_CTRL_B 0x65110 /* Audio Misc Control Convert 2 */
-#define HSW_AUD_MISC_CTRL(pipe) _PIPE(pipe, \
- HSW_AUD_MISC_CTRL_A, \
- HSW_AUD_MISC_CTRL_B)
-
-#define HSW_AUD_DIP_ELD_CTRL_ST_A 0x650b4 /* Audio DIP and ELD Control State Transcoder A */
-#define HSW_AUD_DIP_ELD_CTRL_ST_B 0x651b4 /* Audio DIP and ELD Control State Transcoder B */
-#define HSW_AUD_DIP_ELD_CTRL(pipe) _PIPE(pipe, \
- HSW_AUD_DIP_ELD_CTRL_ST_A, \
- HSW_AUD_DIP_ELD_CTRL_ST_B)
+#define _HSW_AUD_CONFIG_A 0x65000
+#define _HSW_AUD_CONFIG_B 0x65100
+#define HSW_AUD_CFG(pipe) _PIPE(pipe, \
+ _HSW_AUD_CONFIG_A, \
+ _HSW_AUD_CONFIG_B)
+
+#define _HSW_AUD_MISC_CTRL_A 0x65010
+#define _HSW_AUD_MISC_CTRL_B 0x65110
+#define HSW_AUD_MISC_CTRL(pipe) _PIPE(pipe, \
+ _HSW_AUD_MISC_CTRL_A, \
+ _HSW_AUD_MISC_CTRL_B)
+
+#define _HSW_AUD_DIP_ELD_CTRL_ST_A 0x650b4
+#define _HSW_AUD_DIP_ELD_CTRL_ST_B 0x651b4
+#define HSW_AUD_DIP_ELD_CTRL(pipe) _PIPE(pipe, \
+ _HSW_AUD_DIP_ELD_CTRL_ST_A, \
+ _HSW_AUD_DIP_ELD_CTRL_ST_B)
/* Audio Digital Converter */
-#define HSW_AUD_DIG_CNVT_1 0x65080 /* Audio Converter 1 */
-#define HSW_AUD_DIG_CNVT_2 0x65180 /* Audio Converter 1 */
-#define AUD_DIG_CNVT(pipe) _PIPE(pipe, \
- HSW_AUD_DIG_CNVT_1, \
- HSW_AUD_DIG_CNVT_2)
-#define DIP_PORT_SEL_MASK 0x3
-
-#define HSW_AUD_EDID_DATA_A 0x65050
-#define HSW_AUD_EDID_DATA_B 0x65150
-#define HSW_AUD_EDID_DATA(pipe) _PIPE(pipe, \
- HSW_AUD_EDID_DATA_A, \
- HSW_AUD_EDID_DATA_B)
-
-#define HSW_AUD_PIPE_CONV_CFG 0x6507c /* Audio pipe and converter configs */
-#define HSW_AUD_PIN_ELD_CP_VLD 0x650c0 /* Audio ELD and CP Ready Status */
-#define AUDIO_INACTIVE_C (1<<11)
-#define AUDIO_INACTIVE_B (1<<7)
-#define AUDIO_INACTIVE_A (1<<3)
-#define AUDIO_OUTPUT_ENABLE_A (1<<2)
-#define AUDIO_OUTPUT_ENABLE_B (1<<6)
-#define AUDIO_OUTPUT_ENABLE_C (1<<10)
-#define AUDIO_ELD_VALID_A (1<<0)
-#define AUDIO_ELD_VALID_B (1<<4)
-#define AUDIO_ELD_VALID_C (1<<8)
-#define AUDIO_CP_READY_A (1<<1)
-#define AUDIO_CP_READY_B (1<<5)
-#define AUDIO_CP_READY_C (1<<9)
+#define _HSW_AUD_DIG_CNVT_1 0x65080
+#define _HSW_AUD_DIG_CNVT_2 0x65180
+#define AUD_DIG_CNVT(pipe) _PIPE(pipe, \
+ _HSW_AUD_DIG_CNVT_1, \
+ _HSW_AUD_DIG_CNVT_2)
+#define DIP_PORT_SEL_MASK 0x3
+
+#define _HSW_AUD_EDID_DATA_A 0x65050
+#define _HSW_AUD_EDID_DATA_B 0x65150
+#define HSW_AUD_EDID_DATA(pipe) _PIPE(pipe, \
+ _HSW_AUD_EDID_DATA_A, \
+ _HSW_AUD_EDID_DATA_B)
+
+#define HSW_AUD_PIPE_CONV_CFG 0x6507c
+#define HSW_AUD_PIN_ELD_CP_VLD 0x650c0
+#define AUDIO_INACTIVE(trans) ((1 << 3) << ((trans) * 4))
+#define AUDIO_OUTPUT_ENABLE(trans) ((1 << 2) << ((trans) * 4))
+#define AUDIO_CP_READY(trans) ((1 << 1) << ((trans) * 4))
+#define AUDIO_ELD_VALID(trans) ((1 << 0) << ((trans) * 4))
/* HSW Power Wells */
#define HSW_PWR_WELL_BIOS 0x45400 /* CTL1 */
@@ -6125,6 +6471,83 @@ enum punit_power_well {
#define LCPLL_CD_SOURCE_FCLK (1<<21)
#define LCPLL_CD_SOURCE_FCLK_DONE (1<<19)
+/*
+ * SKL Clocks
+ */
+
+/* CDCLK_CTL */
+#define CDCLK_CTL 0x46000
+#define CDCLK_FREQ_SEL_MASK (3<<26)
+#define CDCLK_FREQ_450_432 (0<<26)
+#define CDCLK_FREQ_540 (1<<26)
+#define CDCLK_FREQ_337_308 (2<<26)
+#define CDCLK_FREQ_675_617 (3<<26)
+#define CDCLK_FREQ_DECIMAL_MASK (0x7ff)
+
+/* LCPLL_CTL */
+#define LCPLL1_CTL 0x46010
+#define LCPLL2_CTL 0x46014
+#define LCPLL_PLL_ENABLE (1<<31)
+
+/* DPLL control1 */
+#define DPLL_CTRL1 0x6C058
+#define DPLL_CTRL1_HDMI_MODE(id) (1<<((id)*6+5))
+#define DPLL_CTRL1_SSC(id) (1<<((id)*6+4))
+#define DPLL_CRTL1_LINK_RATE_MASK(id) (7<<((id)*6+1))
+#define DPLL_CRTL1_LINK_RATE_SHIFT(id) ((id)*6+1)
+#define DPLL_CRTL1_LINK_RATE(linkrate, id) ((linkrate)<<((id)*6+1))
+#define DPLL_CTRL1_OVERRIDE(id) (1<<((id)*6))
+#define DPLL_CRTL1_LINK_RATE_2700 0
+#define DPLL_CRTL1_LINK_RATE_1350 1
+#define DPLL_CRTL1_LINK_RATE_810 2
+#define DPLL_CRTL1_LINK_RATE_1620 3
+#define DPLL_CRTL1_LINK_RATE_1080 4
+#define DPLL_CRTL1_LINK_RATE_2160 5
+
+/* DPLL control2 */
+#define DPLL_CTRL2 0x6C05C
+#define DPLL_CTRL2_DDI_CLK_OFF(port) (1<<(port+15))
+#define DPLL_CTRL2_DDI_CLK_SEL_MASK(port) (3<<((port)*3+1))
+#define DPLL_CTRL2_DDI_CLK_SEL_SHIFT(port) ((port)*3+1)
+#define DPLL_CTRL2_DDI_CLK_SEL(clk, port) (clk<<((port)*3+1))
+#define DPLL_CTRL2_DDI_SEL_OVERRIDE(port) (1<<((port)*3))
+
+/* DPLL Status */
+#define DPLL_STATUS 0x6C060
+#define DPLL_LOCK(id) (1<<((id)*8))
+
+/* DPLL cfg */
+#define DPLL1_CFGCR1 0x6C040
+#define DPLL2_CFGCR1 0x6C048
+#define DPLL3_CFGCR1 0x6C050
+#define DPLL_CFGCR1_FREQ_ENABLE (1<<31)
+#define DPLL_CFGCR1_DCO_FRACTION_MASK (0x7fff<<9)
+#define DPLL_CFGCR1_DCO_FRACTION(x) (x<<9)
+#define DPLL_CFGCR1_DCO_INTEGER_MASK (0x1ff)
+
+#define DPLL1_CFGCR2 0x6C044
+#define DPLL2_CFGCR2 0x6C04C
+#define DPLL3_CFGCR2 0x6C054
+#define DPLL_CFGCR2_QDIV_RATIO_MASK (0xff<<8)
+#define DPLL_CFGCR2_QDIV_RATIO(x) (x<<8)
+#define DPLL_CFGCR2_QDIV_MODE(x) (x<<7)
+#define DPLL_CFGCR2_KDIV_MASK (3<<5)
+#define DPLL_CFGCR2_KDIV(x) (x<<5)
+#define DPLL_CFGCR2_KDIV_5 (0<<5)
+#define DPLL_CFGCR2_KDIV_2 (1<<5)
+#define DPLL_CFGCR2_KDIV_3 (2<<5)
+#define DPLL_CFGCR2_KDIV_1 (3<<5)
+#define DPLL_CFGCR2_PDIV_MASK (7<<2)
+#define DPLL_CFGCR2_PDIV(x) (x<<2)
+#define DPLL_CFGCR2_PDIV_1 (0<<2)
+#define DPLL_CFGCR2_PDIV_2 (1<<2)
+#define DPLL_CFGCR2_PDIV_3 (2<<2)
+#define DPLL_CFGCR2_PDIV_7 (4<<2)
+#define DPLL_CFGCR2_CENTRAL_FREQ_MASK (3)
+
+#define GET_CFG_CR1_REG(id) (DPLL1_CFGCR1 + (id - SKL_DPLL1) * 8)
+#define GET_CFG_CR2_REG(id) (DPLL1_CFGCR2 + (id - SKL_DPLL1) * 8)
+
/* Please see hsw_read_dcomp() and hsw_write_dcomp() before using this register,
* since on HSW we can't write to it using I915_WRITE. */
#define D_COMP_HSW (MCHBAR_MIRROR_BASE_SNB + 0x5F0C)
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 043123c77a1..26368822a33 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -203,34 +203,19 @@ static void i915_save_display(struct drm_device *dev)
i915_save_display_reg(dev);
/* LVDS state */
- if (HAS_PCH_SPLIT(dev)) {
- dev_priv->regfile.savePP_CONTROL = I915_READ(PCH_PP_CONTROL);
- if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
- dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS);
- } else if (IS_VALLEYVIEW(dev)) {
- dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
- dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
-
- dev_priv->regfile.saveBLC_HIST_CTL =
- I915_READ(VLV_BLC_HIST_CTL(PIPE_A));
- dev_priv->regfile.saveBLC_HIST_CTL_B =
- I915_READ(VLV_BLC_HIST_CTL(PIPE_B));
- } else {
- dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
- dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
- dev_priv->regfile.saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL);
- if (IS_MOBILE(dev) && !IS_I830(dev))
- dev_priv->regfile.saveLVDS = I915_READ(LVDS);
- }
-
- if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev))
- dev_priv->regfile.savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
+ if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
+ dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS);
+ else if (INTEL_INFO(dev)->gen <= 4 && IS_MOBILE(dev) && !IS_I830(dev))
+ dev_priv->regfile.saveLVDS = I915_READ(LVDS);
+ /* Panel power sequencer */
if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->regfile.savePP_CONTROL = I915_READ(PCH_PP_CONTROL);
dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS);
dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS);
dev_priv->regfile.savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR);
- } else {
+ } else if (!IS_VALLEYVIEW(dev)) {
+ dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS);
dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS);
dev_priv->regfile.savePP_DIVISOR = I915_READ(PP_DIVISOR);
@@ -259,29 +244,19 @@ static void i915_restore_display(struct drm_device *dev)
if (drm_core_check_feature(dev, DRIVER_MODESET))
mask = ~LVDS_PORT_EN;
+ /* LVDS state */
if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
I915_WRITE(PCH_LVDS, dev_priv->regfile.saveLVDS & mask);
else if (INTEL_INFO(dev)->gen <= 4 && IS_MOBILE(dev) && !IS_I830(dev))
I915_WRITE(LVDS, dev_priv->regfile.saveLVDS & mask);
- if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev))
- I915_WRITE(PFIT_CONTROL, dev_priv->regfile.savePFIT_CONTROL);
-
+ /* Panel power sequencer */
if (HAS_PCH_SPLIT(dev)) {
I915_WRITE(PCH_PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS);
I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS);
I915_WRITE(PCH_PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR);
I915_WRITE(PCH_PP_CONTROL, dev_priv->regfile.savePP_CONTROL);
- I915_WRITE(RSTDBYCTL,
- dev_priv->regfile.saveMCHBAR_RENDER_STANDBY);
- } else if (IS_VALLEYVIEW(dev)) {
- I915_WRITE(VLV_BLC_HIST_CTL(PIPE_A),
- dev_priv->regfile.saveBLC_HIST_CTL);
- I915_WRITE(VLV_BLC_HIST_CTL(PIPE_B),
- dev_priv->regfile.saveBLC_HIST_CTL);
- } else {
- I915_WRITE(PFIT_PGM_RATIOS, dev_priv->regfile.savePFIT_PGM_RATIOS);
- I915_WRITE(BLC_HIST_CTL, dev_priv->regfile.saveBLC_HIST_CTL);
+ } else if (!IS_VALLEYVIEW(dev)) {
I915_WRITE(PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS);
I915_WRITE(PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS);
I915_WRITE(PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR);
@@ -328,6 +303,10 @@ int i915_save_state(struct drm_device *dev)
}
}
+ if (IS_GEN4(dev))
+ pci_read_config_word(dev->pdev, GCDGMBUS,
+ &dev_priv->regfile.saveGCDGMBUS);
+
/* Cache mode state */
if (INTEL_INFO(dev)->gen < 7)
dev_priv->regfile.saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
@@ -356,6 +335,10 @@ int i915_restore_state(struct drm_device *dev)
mutex_lock(&dev->struct_mutex);
i915_gem_restore_fences(dev);
+
+ if (IS_GEN4(dev))
+ pci_write_config_word(dev->pdev, GCDGMBUS,
+ dev_priv->regfile.saveGCDGMBUS);
i915_restore_display(dev);
if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
@@ -368,6 +351,8 @@ int i915_restore_state(struct drm_device *dev)
I915_WRITE(_FDI_RXA_IMR, dev_priv->regfile.saveFDI_RXA_IMR);
I915_WRITE(_FDI_RXB_IMR, dev_priv->regfile.saveFDI_RXB_IMR);
I915_WRITE(PCH_PORT_HOTPLUG, dev_priv->regfile.savePCH_PORT_HOTPLUG);
+ I915_WRITE(RSTDBYCTL,
+ dev_priv->regfile.saveMCHBAR_RENDER_STANDBY);
} else {
I915_WRITE(IER, dev_priv->regfile.saveIER);
I915_WRITE(IMR, dev_priv->regfile.saveIMR);
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 503847f18fd..4a5af695307 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -139,8 +139,6 @@ static DEVICE_ATTR(rc6pp_residency_ms, S_IRUGO, show_rc6pp_ms, NULL);
static struct attribute *rc6_attrs[] = {
&dev_attr_rc6_enable.attr,
&dev_attr_rc6_residency_ms.attr,
- &dev_attr_rc6p_residency_ms.attr,
- &dev_attr_rc6pp_residency_ms.attr,
NULL
};
@@ -148,6 +146,17 @@ static struct attribute_group rc6_attr_group = {
.name = power_group_name,
.attrs = rc6_attrs
};
+
+static struct attribute *rc6p_attrs[] = {
+ &dev_attr_rc6p_residency_ms.attr,
+ &dev_attr_rc6pp_residency_ms.attr,
+ NULL
+};
+
+static struct attribute_group rc6p_attr_group = {
+ .name = power_group_name,
+ .attrs = rc6p_attrs
+};
#endif
static int l3_access_valid(struct drm_device *dev, loff_t offset)
@@ -595,12 +604,18 @@ void i915_setup_sysfs(struct drm_device *dev)
int ret;
#ifdef CONFIG_PM
- if (INTEL_INFO(dev)->gen >= 6) {
+ if (HAS_RC6(dev)) {
ret = sysfs_merge_group(&dev->primary->kdev->kobj,
&rc6_attr_group);
if (ret)
DRM_ERROR("RC6 residency sysfs setup failed\n");
}
+ if (HAS_RC6p(dev)) {
+ ret = sysfs_merge_group(&dev->primary->kdev->kobj,
+ &rc6p_attr_group);
+ if (ret)
+ DRM_ERROR("RC6p residency sysfs setup failed\n");
+ }
#endif
if (HAS_L3_DPF(dev)) {
ret = device_create_bin_file(dev->primary->kdev, &dpf_attrs);
@@ -640,5 +655,6 @@ void i915_teardown_sysfs(struct drm_device *dev)
device_remove_bin_file(dev->primary->kdev, &dpf_attrs);
#ifdef CONFIG_PM
sysfs_unmerge_group(&dev->primary->kdev->kobj, &rc6_attr_group);
+ sysfs_unmerge_group(&dev->primary->kdev->kobj, &rc6p_attr_group);
#endif
}
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index f5aa0067755..751d4ad14d6 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -587,6 +587,110 @@ TRACE_EVENT(intel_gpu_freq_change,
TP_printk("new_freq=%u", __entry->freq)
);
+/**
+ * DOC: i915_ppgtt_create and i915_ppgtt_release tracepoints
+ *
+ * With full ppgtt enabled each process using drm will allocate at least one
+ * translation table. With these traces it is possible to keep track of the
+ * allocation and of the lifetime of the tables; this can be used during
+ * testing/debug to verify that we are not leaking ppgtts.
+ * These traces identify the ppgtt through the vm pointer, which is also printed
+ * by the i915_vma_bind and i915_vma_unbind tracepoints.
+ */
+DECLARE_EVENT_CLASS(i915_ppgtt,
+ TP_PROTO(struct i915_address_space *vm),
+ TP_ARGS(vm),
+
+ TP_STRUCT__entry(
+ __field(struct i915_address_space *, vm)
+ __field(u32, dev)
+ ),
+
+ TP_fast_assign(
+ __entry->vm = vm;
+ __entry->dev = vm->dev->primary->index;
+ ),
+
+ TP_printk("dev=%u, vm=%p", __entry->dev, __entry->vm)
+)
+
+DEFINE_EVENT(i915_ppgtt, i915_ppgtt_create,
+ TP_PROTO(struct i915_address_space *vm),
+ TP_ARGS(vm)
+);
+
+DEFINE_EVENT(i915_ppgtt, i915_ppgtt_release,
+ TP_PROTO(struct i915_address_space *vm),
+ TP_ARGS(vm)
+);
+
+/**
+ * DOC: i915_context_create and i915_context_free tracepoints
+ *
+ * These tracepoints are used to track creation and deletion of contexts.
+ * If full ppgtt is enabled, they also print the address of the vm assigned to
+ * the context.
+ */
+DECLARE_EVENT_CLASS(i915_context,
+ TP_PROTO(struct intel_context *ctx),
+ TP_ARGS(ctx),
+
+ TP_STRUCT__entry(
+ __field(u32, dev)
+ __field(struct intel_context *, ctx)
+ __field(struct i915_address_space *, vm)
+ ),
+
+ TP_fast_assign(
+ __entry->ctx = ctx;
+ __entry->vm = ctx->ppgtt ? &ctx->ppgtt->base : NULL;
+ __entry->dev = ctx->file_priv->dev_priv->dev->primary->index;
+ ),
+
+ TP_printk("dev=%u, ctx=%p, ctx_vm=%p",
+ __entry->dev, __entry->ctx, __entry->vm)
+)
+
+DEFINE_EVENT(i915_context, i915_context_create,
+ TP_PROTO(struct intel_context *ctx),
+ TP_ARGS(ctx)
+);
+
+DEFINE_EVENT(i915_context, i915_context_free,
+ TP_PROTO(struct intel_context *ctx),
+ TP_ARGS(ctx)
+);
+
+/**
+ * DOC: switch_mm tracepoint
+ *
+ * This tracepoint allows tracking of the mm switch, which is an important point
+ * in the lifetime of the vm in the legacy submission path. This tracepoint is
+ * called only if full ppgtt is enabled.
+ */
+TRACE_EVENT(switch_mm,
+ TP_PROTO(struct intel_engine_cs *ring, struct intel_context *to),
+
+ TP_ARGS(ring, to),
+
+ TP_STRUCT__entry(
+ __field(u32, ring)
+ __field(struct intel_context *, to)
+ __field(struct i915_address_space *, vm)
+ __field(u32, dev)
+ ),
+
+ TP_fast_assign(
+ __entry->ring = ring->id;
+ __entry->to = to;
+ __entry->vm = to->ppgtt? &to->ppgtt->base : NULL;
+ __entry->dev = ring->dev->primary->index;
+ ),
+
+ TP_printk("dev=%u, ring=%u, ctx=%p, ctx_vm=%p",
+ __entry->dev, __entry->ring, __entry->to, __entry->vm)
+);
+
#endif /* _I915_TRACE_H_ */
/* This part must be outside protection */
diff --git a/drivers/gpu/drm/i915/i915_ums.c b/drivers/gpu/drm/i915/i915_ums.c
index 480da593e6c..d10fe3e9c49 100644
--- a/drivers/gpu/drm/i915/i915_ums.c
+++ b/drivers/gpu/drm/i915/i915_ums.c
@@ -270,6 +270,12 @@ void i915_save_display_reg(struct drm_device *dev)
}
/* FIXME: regfile.save TV & SDVO state */
+ /* Panel fitter */
+ if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev)) {
+ dev_priv->regfile.savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
+ dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
+ }
+
/* Backlight */
if (INTEL_INFO(dev)->gen <= 4)
pci_read_config_byte(dev->pdev, PCI_LBPC,
@@ -284,6 +290,7 @@ void i915_save_display_reg(struct drm_device *dev)
dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
if (INTEL_INFO(dev)->gen >= 4)
dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
+ dev_priv->regfile.saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL);
}
return;
@@ -313,6 +320,13 @@ void i915_restore_display_reg(struct drm_device *dev)
if (INTEL_INFO(dev)->gen >= 4)
I915_WRITE(BLC_PWM_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
I915_WRITE(BLC_PWM_CTL, dev_priv->regfile.saveBLC_PWM_CTL);
+ I915_WRITE(BLC_HIST_CTL, dev_priv->regfile.saveBLC_HIST_CTL);
+ }
+
+ /* Panel fitter */
+ if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(PFIT_PGM_RATIOS, dev_priv->regfile.savePFIT_PGM_RATIOS);
+ I915_WRITE(PFIT_CONTROL, dev_priv->regfile.savePFIT_CONTROL);
}
/* Display port ratios (must be done before clock is set) */
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
new file mode 100644
index 00000000000..2c7ed5cb29c
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -0,0 +1,463 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_edid.h>
+#include "intel_drv.h"
+#include "i915_drv.h"
+
+/**
+ * DOC: High Definition Audio over HDMI and Display Port
+ *
+ * The graphics and audio drivers together support High Definition Audio over
+ * HDMI and Display Port. The audio programming sequences are divided into audio
+ * codec and controller enable and disable sequences. The graphics driver
+ * handles the audio codec sequences, while the audio driver handles the audio
+ * controller sequences.
+ *
+ * The disable sequences must be performed before disabling the transcoder or
+ * port. The enable sequences may only be performed after enabling the
+ * transcoder and port, and after completed link training.
+ *
+ * The codec and controller sequences could be done either parallel or serial,
+ * but generally the ELDV/PD change in the codec sequence indicates to the audio
+ * driver that the controller sequence should start. Indeed, most of the
+ * co-operation between the graphics and audio drivers is handled via audio
+ * related registers. (The notable exception is the power management, not
+ * covered here.)
+ */
+
+static const struct {
+ int clock;
+ u32 config;
+} hdmi_audio_clock[] = {
+ { DIV_ROUND_UP(25200 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_25175 },
+ { 25200, AUD_CONFIG_PIXEL_CLOCK_HDMI_25200 }, /* default per bspec */
+ { 27000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27000 },
+ { 27000 * 1001 / 1000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27027 },
+ { 54000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54000 },
+ { 54000 * 1001 / 1000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54054 },
+ { DIV_ROUND_UP(74250 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_74176 },
+ { 74250, AUD_CONFIG_PIXEL_CLOCK_HDMI_74250 },
+ { DIV_ROUND_UP(148500 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_148352 },
+ { 148500, AUD_CONFIG_PIXEL_CLOCK_HDMI_148500 },
+};
+
+/* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */
+static u32 audio_config_hdmi_pixel_clock(struct drm_display_mode *mode)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hdmi_audio_clock); i++) {
+ if (mode->clock == hdmi_audio_clock[i].clock)
+ break;
+ }
+
+ if (i == ARRAY_SIZE(hdmi_audio_clock)) {
+ DRM_DEBUG_KMS("HDMI audio pixel clock setting for %d not found, falling back to defaults\n", mode->clock);
+ i = 1;
+ }
+
+ DRM_DEBUG_KMS("Configuring HDMI audio for pixel clock %d (0x%08x)\n",
+ hdmi_audio_clock[i].clock,
+ hdmi_audio_clock[i].config);
+
+ return hdmi_audio_clock[i].config;
+}
+
+static bool intel_eld_uptodate(struct drm_connector *connector,
+ int reg_eldv, uint32_t bits_eldv,
+ int reg_elda, uint32_t bits_elda,
+ int reg_edid)
+{
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ uint8_t *eld = connector->eld;
+ uint32_t tmp;
+ int i;
+
+ tmp = I915_READ(reg_eldv);
+ tmp &= bits_eldv;
+
+ if (!tmp)
+ return false;
+
+ tmp = I915_READ(reg_elda);
+ tmp &= ~bits_elda;
+ I915_WRITE(reg_elda, tmp);
+
+ for (i = 0; i < drm_eld_size(eld) / 4; i++)
+ if (I915_READ(reg_edid) != *((uint32_t *)eld + i))
+ return false;
+
+ return true;
+}
+
+static void g4x_audio_codec_disable(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ uint32_t eldv, tmp;
+
+ DRM_DEBUG_KMS("Disable audio codec\n");
+
+ tmp = I915_READ(G4X_AUD_VID_DID);
+ if (tmp == INTEL_AUDIO_DEVBLC || tmp == INTEL_AUDIO_DEVCL)
+ eldv = G4X_ELDV_DEVCL_DEVBLC;
+ else
+ eldv = G4X_ELDV_DEVCTG;
+
+ /* Invalidate ELD */
+ tmp = I915_READ(G4X_AUD_CNTL_ST);
+ tmp &= ~eldv;
+ I915_WRITE(G4X_AUD_CNTL_ST, tmp);
+}
+
+static void g4x_audio_codec_enable(struct drm_connector *connector,
+ struct intel_encoder *encoder,
+ struct drm_display_mode *mode)
+{
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ uint8_t *eld = connector->eld;
+ uint32_t eldv;
+ uint32_t tmp;
+ int len, i;
+
+ DRM_DEBUG_KMS("Enable audio codec, %u bytes ELD\n", eld[2]);
+
+ tmp = I915_READ(G4X_AUD_VID_DID);
+ if (tmp == INTEL_AUDIO_DEVBLC || tmp == INTEL_AUDIO_DEVCL)
+ eldv = G4X_ELDV_DEVCL_DEVBLC;
+ else
+ eldv = G4X_ELDV_DEVCTG;
+
+ if (intel_eld_uptodate(connector,
+ G4X_AUD_CNTL_ST, eldv,
+ G4X_AUD_CNTL_ST, G4X_ELD_ADDR_MASK,
+ G4X_HDMIW_HDMIEDID))
+ return;
+
+ tmp = I915_READ(G4X_AUD_CNTL_ST);
+ tmp &= ~(eldv | G4X_ELD_ADDR_MASK);
+ len = (tmp >> 9) & 0x1f; /* ELD buffer size */
+ I915_WRITE(G4X_AUD_CNTL_ST, tmp);
+
+ len = min(drm_eld_size(eld) / 4, len);
+ DRM_DEBUG_DRIVER("ELD size %d\n", len);
+ for (i = 0; i < len; i++)
+ I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i));
+
+ tmp = I915_READ(G4X_AUD_CNTL_ST);
+ tmp |= eldv;
+ I915_WRITE(G4X_AUD_CNTL_ST, tmp);
+}
+
+static void hsw_audio_codec_disable(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+ enum pipe pipe = intel_crtc->pipe;
+ uint32_t tmp;
+
+ DRM_DEBUG_KMS("Disable audio codec on pipe %c\n", pipe_name(pipe));
+
+ /* Disable timestamps */
+ tmp = I915_READ(HSW_AUD_CFG(pipe));
+ tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
+ tmp |= AUD_CONFIG_N_PROG_ENABLE;
+ tmp &= ~AUD_CONFIG_UPPER_N_MASK;
+ tmp &= ~AUD_CONFIG_LOWER_N_MASK;
+ if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT))
+ tmp |= AUD_CONFIG_N_VALUE_INDEX;
+ I915_WRITE(HSW_AUD_CFG(pipe), tmp);
+
+ /* Invalidate ELD */
+ tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
+ tmp &= ~AUDIO_ELD_VALID(pipe);
+ tmp &= ~AUDIO_OUTPUT_ENABLE(pipe);
+ I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
+}
+
+static void hsw_audio_codec_enable(struct drm_connector *connector,
+ struct intel_encoder *encoder,
+ struct drm_display_mode *mode)
+{
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+ enum pipe pipe = intel_crtc->pipe;
+ const uint8_t *eld = connector->eld;
+ uint32_t tmp;
+ int len, i;
+
+ DRM_DEBUG_KMS("Enable audio codec on pipe %c, %u bytes ELD\n",
+ pipe_name(pipe), drm_eld_size(eld));
+
+ /* Enable audio presence detect, invalidate ELD */
+ tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
+ tmp |= AUDIO_OUTPUT_ENABLE(pipe);
+ tmp &= ~AUDIO_ELD_VALID(pipe);
+ I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
+
+ /*
+ * FIXME: We're supposed to wait for vblank here, but we have vblanks
+ * disabled during the mode set. The proper fix would be to push the
+ * rest of the setup into a vblank work item, queued here, but the
+ * infrastructure is not there yet.
+ */
+
+ /* Reset ELD write address */
+ tmp = I915_READ(HSW_AUD_DIP_ELD_CTRL(pipe));
+ tmp &= ~IBX_ELD_ADDRESS_MASK;
+ I915_WRITE(HSW_AUD_DIP_ELD_CTRL(pipe), tmp);
+
+ /* Up to 84 bytes of hw ELD buffer */
+ len = min(drm_eld_size(eld), 84);
+ for (i = 0; i < len / 4; i++)
+ I915_WRITE(HSW_AUD_EDID_DATA(pipe), *((uint32_t *)eld + i));
+
+ /* ELD valid */
+ tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
+ tmp |= AUDIO_ELD_VALID(pipe);
+ I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
+
+ /* Enable timestamps */
+ tmp = I915_READ(HSW_AUD_CFG(pipe));
+ tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
+ tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
+ tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
+ if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT))
+ tmp |= AUD_CONFIG_N_VALUE_INDEX;
+ else
+ tmp |= audio_config_hdmi_pixel_clock(mode);
+ I915_WRITE(HSW_AUD_CFG(pipe), tmp);
+}
+
+static void ilk_audio_codec_disable(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+ struct intel_digital_port *intel_dig_port =
+ enc_to_dig_port(&encoder->base);
+ enum port port = intel_dig_port->port;
+ enum pipe pipe = intel_crtc->pipe;
+ uint32_t tmp, eldv;
+ int aud_config;
+ int aud_cntrl_st2;
+
+ DRM_DEBUG_KMS("Disable audio codec on port %c, pipe %c\n",
+ port_name(port), pipe_name(pipe));
+
+ if (HAS_PCH_IBX(dev_priv->dev)) {
+ aud_config = IBX_AUD_CFG(pipe);
+ aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
+ } else if (IS_VALLEYVIEW(dev_priv)) {
+ aud_config = VLV_AUD_CFG(pipe);
+ aud_cntrl_st2 = VLV_AUD_CNTL_ST2;
+ } else {
+ aud_config = CPT_AUD_CFG(pipe);
+ aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
+ }
+
+ /* Disable timestamps */
+ tmp = I915_READ(aud_config);
+ tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
+ tmp |= AUD_CONFIG_N_PROG_ENABLE;
+ tmp &= ~AUD_CONFIG_UPPER_N_MASK;
+ tmp &= ~AUD_CONFIG_LOWER_N_MASK;
+ if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT))
+ tmp |= AUD_CONFIG_N_VALUE_INDEX;
+ I915_WRITE(aud_config, tmp);
+
+ if (WARN_ON(!port)) {
+ eldv = IBX_ELD_VALID(PORT_B) | IBX_ELD_VALID(PORT_C) |
+ IBX_ELD_VALID(PORT_D);
+ } else {
+ eldv = IBX_ELD_VALID(port);
+ }
+
+ /* Invalidate ELD */
+ tmp = I915_READ(aud_cntrl_st2);
+ tmp &= ~eldv;
+ I915_WRITE(aud_cntrl_st2, tmp);
+}
+
+static void ilk_audio_codec_enable(struct drm_connector *connector,
+ struct intel_encoder *encoder,
+ struct drm_display_mode *mode)
+{
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+ struct intel_digital_port *intel_dig_port =
+ enc_to_dig_port(&encoder->base);
+ enum port port = intel_dig_port->port;
+ enum pipe pipe = intel_crtc->pipe;
+ uint8_t *eld = connector->eld;
+ uint32_t eldv;
+ uint32_t tmp;
+ int len, i;
+ int hdmiw_hdmiedid;
+ int aud_config;
+ int aud_cntl_st;
+ int aud_cntrl_st2;
+
+ DRM_DEBUG_KMS("Enable audio codec on port %c, pipe %c, %u bytes ELD\n",
+ port_name(port), pipe_name(pipe), drm_eld_size(eld));
+
+ /*
+ * FIXME: We're supposed to wait for vblank here, but we have vblanks
+ * disabled during the mode set. The proper fix would be to push the
+ * rest of the setup into a vblank work item, queued here, but the
+ * infrastructure is not there yet.
+ */
+
+ if (HAS_PCH_IBX(connector->dev)) {
+ hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID(pipe);
+ aud_config = IBX_AUD_CFG(pipe);
+ aud_cntl_st = IBX_AUD_CNTL_ST(pipe);
+ aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
+ } else if (IS_VALLEYVIEW(connector->dev)) {
+ hdmiw_hdmiedid = VLV_HDMIW_HDMIEDID(pipe);
+ aud_config = VLV_AUD_CFG(pipe);
+ aud_cntl_st = VLV_AUD_CNTL_ST(pipe);
+ aud_cntrl_st2 = VLV_AUD_CNTL_ST2;
+ } else {
+ hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID(pipe);
+ aud_config = CPT_AUD_CFG(pipe);
+ aud_cntl_st = CPT_AUD_CNTL_ST(pipe);
+ aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
+ }
+
+ if (WARN_ON(!port)) {
+ eldv = IBX_ELD_VALID(PORT_B) | IBX_ELD_VALID(PORT_C) |
+ IBX_ELD_VALID(PORT_D);
+ } else {
+ eldv = IBX_ELD_VALID(port);
+ }
+
+ /* Invalidate ELD */
+ tmp = I915_READ(aud_cntrl_st2);
+ tmp &= ~eldv;
+ I915_WRITE(aud_cntrl_st2, tmp);
+
+ /* Reset ELD write address */
+ tmp = I915_READ(aud_cntl_st);
+ tmp &= ~IBX_ELD_ADDRESS_MASK;
+ I915_WRITE(aud_cntl_st, tmp);
+
+ /* Up to 84 bytes of hw ELD buffer */
+ len = min(drm_eld_size(eld), 84);
+ for (i = 0; i < len / 4; i++)
+ I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
+
+ /* ELD valid */
+ tmp = I915_READ(aud_cntrl_st2);
+ tmp |= eldv;
+ I915_WRITE(aud_cntrl_st2, tmp);
+
+ /* Enable timestamps */
+ tmp = I915_READ(aud_config);
+ tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
+ tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
+ tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
+ if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT))
+ tmp |= AUD_CONFIG_N_VALUE_INDEX;
+ else
+ tmp |= audio_config_hdmi_pixel_clock(mode);
+ I915_WRITE(aud_config, tmp);
+}
+
+/**
+ * intel_audio_codec_enable - Enable the audio codec for HD audio
+ * @intel_encoder: encoder on which to enable audio
+ *
+ * The enable sequences may only be performed after enabling the transcoder and
+ * port, and after completed link training.
+ */
+void intel_audio_codec_enable(struct intel_encoder *intel_encoder)
+{
+ struct drm_encoder *encoder = &intel_encoder->base;
+ struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
+ struct drm_display_mode *mode = &crtc->config.adjusted_mode;
+ struct drm_connector *connector;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ connector = drm_select_eld(encoder, mode);
+ if (!connector)
+ return;
+
+ DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
+ connector->base.id,
+ connector->name,
+ connector->encoder->base.id,
+ connector->encoder->name);
+
+ /* ELD Conn_Type */
+ connector->eld[5] &= ~(3 << 2);
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
+ connector->eld[5] |= (1 << 2);
+
+ connector->eld[6] = drm_av_sync_delay(connector, mode) / 2;
+
+ if (dev_priv->display.audio_codec_enable)
+ dev_priv->display.audio_codec_enable(connector, intel_encoder, mode);
+}
+
+/**
+ * intel_audio_codec_disable - Disable the audio codec for HD audio
+ * @encoder: encoder on which to disable audio
+ *
+ * The disable sequences must be performed before disabling the transcoder or
+ * port.
+ */
+void intel_audio_codec_disable(struct intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->display.audio_codec_disable)
+ dev_priv->display.audio_codec_disable(encoder);
+}
+
+/**
+ * intel_init_audio - Set up chip specific audio functions
+ * @dev: drm device
+ */
+void intel_init_audio(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (IS_G4X(dev)) {
+ dev_priv->display.audio_codec_enable = g4x_audio_codec_enable;
+ dev_priv->display.audio_codec_disable = g4x_audio_codec_disable;
+ } else if (IS_VALLEYVIEW(dev)) {
+ dev_priv->display.audio_codec_enable = ilk_audio_codec_enable;
+ dev_priv->display.audio_codec_disable = ilk_audio_codec_disable;
+ } else if (IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8) {
+ dev_priv->display.audio_codec_enable = hsw_audio_codec_enable;
+ dev_priv->display.audio_codec_disable = hsw_audio_codec_disable;
+ } else if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->display.audio_codec_enable = ilk_audio_codec_enable;
+ dev_priv->display.audio_codec_disable = ilk_audio_codec_disable;
+ }
+}
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index 905999bee2a..7603765c91f 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -46,7 +46,7 @@ struct bdb_header {
u16 version; /**< decimal */
u16 header_size; /**< in bytes */
u16 bdb_size; /**< in bytes */
-};
+} __packed;
/* strictly speaking, this is a "skip" block, but it has interesting info */
struct vbios_data {
@@ -252,7 +252,7 @@ union child_device_config {
/* This one should also be safe to use anywhere, even without version
* checks. */
struct common_child_dev_config common;
-};
+} __packed;
struct bdb_general_definitions {
/* DDC GPIO */
@@ -888,12 +888,12 @@ struct mipi_pps_data {
u16 bl_disable_delay;
u16 panel_off_delay;
u16 panel_power_cycle_delay;
-};
+} __packed;
struct bdb_mipi_config {
struct mipi_config config[MAX_MIPI_CONFIGURATIONS];
struct mipi_pps_data pps[MAX_MIPI_CONFIGURATIONS];
-};
+} __packed;
/* Block 53 contains MIPI sequences as needed by the panel
* for enabling it. This block can be variable in size and
@@ -902,7 +902,7 @@ struct bdb_mipi_config {
struct bdb_mipi_sequence {
u8 version;
u8 data[0];
-};
+} __packed;
/* MIPI Sequnece Block definitions */
enum mipi_seq {
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 9212e6504e0..a9af9a4866d 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -72,7 +72,7 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
u32 tmp;
power_domain = intel_display_port_power_domain(encoder);
- if (!intel_display_power_enabled(dev_priv, power_domain))
+ if (!intel_display_power_is_enabled(dev_priv, power_domain))
return false;
tmp = I915_READ(crt->adpa_reg);
@@ -775,7 +775,7 @@ static void intel_crt_reset(struct drm_connector *connector)
I915_WRITE(crt->adpa_reg, adpa);
POSTING_READ(crt->adpa_reg);
- DRM_DEBUG_KMS("pch crt adpa set to 0x%x\n", adpa);
+ DRM_DEBUG_KMS("crt adpa set to 0x%x\n", adpa);
crt->force_hotplug_required = 1;
}
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index b63d4fa204a..e6b45cd150d 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -95,8 +95,8 @@ static const struct ddi_buf_trans bdw_ddi_translations_dp[] = {
{ 0x00BEFFFF, 0x00140006 },
{ 0x80B2CFFF, 0x001B0002 },
{ 0x00FFFFFF, 0x000E000A },
- { 0x00D75FFF, 0x00180004 },
- { 0x80CB2FFF, 0x001B0002 },
+ { 0x00DB6FFF, 0x00160005 },
+ { 0x80C71FFF, 0x001A0002 },
{ 0x00F7DFFF, 0x00180004 },
{ 0x80D75FFF, 0x001B0002 },
};
@@ -127,6 +127,32 @@ static const struct ddi_buf_trans bdw_ddi_translations_hdmi[] = {
{ 0x80FFFFFF, 0x001B0002 }, /* 9: 1000 1000 0 */
};
+static const struct ddi_buf_trans skl_ddi_translations_dp[] = {
+ { 0x00000018, 0x000000a0 },
+ { 0x00004014, 0x00000098 },
+ { 0x00006012, 0x00000088 },
+ { 0x00008010, 0x00000080 },
+ { 0x00000018, 0x00000098 },
+ { 0x00004014, 0x00000088 },
+ { 0x00006012, 0x00000080 },
+ { 0x00000018, 0x00000088 },
+ { 0x00004014, 0x00000080 },
+};
+
+static const struct ddi_buf_trans skl_ddi_translations_hdmi[] = {
+ /* Idx NT mV T mV db */
+ { 0x00000018, 0x000000a0 }, /* 0: 400 400 0 */
+ { 0x00004014, 0x00000098 }, /* 1: 400 600 3.5 */
+ { 0x00006012, 0x00000088 }, /* 2: 400 800 6 */
+ { 0x00000018, 0x0000003c }, /* 3: 450 450 0 */
+ { 0x00000018, 0x00000098 }, /* 4: 600 600 0 */
+ { 0x00003015, 0x00000088 }, /* 5: 600 800 2.5 */
+ { 0x00005013, 0x00000080 }, /* 6: 600 1000 4.5 */
+ { 0x00000018, 0x00000088 }, /* 7: 800 800 0 */
+ { 0x00000096, 0x00000080 }, /* 8: 800 1000 2 */
+ { 0x00000018, 0x00000080 }, /* 9: 1200 1200 0 */
+};
+
enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
{
struct drm_encoder *encoder = &intel_encoder->base;
@@ -169,7 +195,14 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
const struct ddi_buf_trans *ddi_translations_hdmi;
const struct ddi_buf_trans *ddi_translations;
- if (IS_BROADWELL(dev)) {
+ if (IS_SKYLAKE(dev)) {
+ ddi_translations_fdi = NULL;
+ ddi_translations_dp = skl_ddi_translations_dp;
+ ddi_translations_edp = skl_ddi_translations_dp;
+ ddi_translations_hdmi = skl_ddi_translations_hdmi;
+ n_hdmi_entries = ARRAY_SIZE(skl_ddi_translations_hdmi);
+ hdmi_800mV_0dB = 7;
+ } else if (IS_BROADWELL(dev)) {
ddi_translations_fdi = bdw_ddi_translations_fdi;
ddi_translations_dp = bdw_ddi_translations_dp;
ddi_translations_edp = bdw_ddi_translations_edp;
@@ -208,7 +241,10 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
ddi_translations = ddi_translations_dp;
break;
case PORT_E:
- ddi_translations = ddi_translations_fdi;
+ if (ddi_translations_fdi)
+ ddi_translations = ddi_translations_fdi;
+ else
+ ddi_translations = ddi_translations_dp;
break;
default:
BUG();
@@ -423,6 +459,27 @@ intel_ddi_get_crtc_encoder(struct drm_crtc *crtc)
return ret;
}
+static struct intel_encoder *
+intel_ddi_get_crtc_new_encoder(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct intel_encoder *intel_encoder, *ret = NULL;
+ int num_encoders = 0;
+
+ for_each_intel_encoder(dev, intel_encoder) {
+ if (intel_encoder->new_crtc == crtc) {
+ ret = intel_encoder;
+ num_encoders++;
+ }
+ }
+
+ WARN(num_encoders != 1, "%d encoders on crtc for pipe %c\n", num_encoders,
+ pipe_name(crtc->pipe));
+
+ BUG_ON(ret == NULL);
+ return ret;
+}
+
#define LC_FREQ 2700
#define LC_FREQ_2K U64_C(LC_FREQ * 2000)
@@ -613,6 +670,111 @@ static int intel_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv,
return (refclk * n * 100) / (p * r);
}
+static int skl_calc_wrpll_link(struct drm_i915_private *dev_priv,
+ uint32_t dpll)
+{
+ uint32_t cfgcr1_reg, cfgcr2_reg;
+ uint32_t cfgcr1_val, cfgcr2_val;
+ uint32_t p0, p1, p2, dco_freq;
+
+ cfgcr1_reg = GET_CFG_CR1_REG(dpll);
+ cfgcr2_reg = GET_CFG_CR2_REG(dpll);
+
+ cfgcr1_val = I915_READ(cfgcr1_reg);
+ cfgcr2_val = I915_READ(cfgcr2_reg);
+
+ p0 = cfgcr2_val & DPLL_CFGCR2_PDIV_MASK;
+ p2 = cfgcr2_val & DPLL_CFGCR2_KDIV_MASK;
+
+ if (cfgcr2_val & DPLL_CFGCR2_QDIV_MODE(1))
+ p1 = (cfgcr2_val & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
+ else
+ p1 = 1;
+
+
+ switch (p0) {
+ case DPLL_CFGCR2_PDIV_1:
+ p0 = 1;
+ break;
+ case DPLL_CFGCR2_PDIV_2:
+ p0 = 2;
+ break;
+ case DPLL_CFGCR2_PDIV_3:
+ p0 = 3;
+ break;
+ case DPLL_CFGCR2_PDIV_7:
+ p0 = 7;
+ break;
+ }
+
+ switch (p2) {
+ case DPLL_CFGCR2_KDIV_5:
+ p2 = 5;
+ break;
+ case DPLL_CFGCR2_KDIV_2:
+ p2 = 2;
+ break;
+ case DPLL_CFGCR2_KDIV_3:
+ p2 = 3;
+ break;
+ case DPLL_CFGCR2_KDIV_1:
+ p2 = 1;
+ break;
+ }
+
+ dco_freq = (cfgcr1_val & DPLL_CFGCR1_DCO_INTEGER_MASK) * 24 * 1000;
+
+ dco_freq += (((cfgcr1_val & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) * 24 *
+ 1000) / 0x8000;
+
+ return dco_freq / (p0 * p1 * p2 * 5);
+}
+
+
+static void skl_ddi_clock_get(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config)
+{
+ struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ int link_clock = 0;
+ uint32_t dpll_ctl1, dpll;
+
+ dpll = pipe_config->ddi_pll_sel;
+
+ dpll_ctl1 = I915_READ(DPLL_CTRL1);
+
+ if (dpll_ctl1 & DPLL_CTRL1_HDMI_MODE(dpll)) {
+ link_clock = skl_calc_wrpll_link(dev_priv, dpll);
+ } else {
+ link_clock = dpll_ctl1 & DPLL_CRTL1_LINK_RATE_MASK(dpll);
+ link_clock >>= DPLL_CRTL1_LINK_RATE_SHIFT(dpll);
+
+ switch (link_clock) {
+ case DPLL_CRTL1_LINK_RATE_810:
+ link_clock = 81000;
+ break;
+ case DPLL_CRTL1_LINK_RATE_1350:
+ link_clock = 135000;
+ break;
+ case DPLL_CRTL1_LINK_RATE_2700:
+ link_clock = 270000;
+ break;
+ default:
+ WARN(1, "Unsupported link rate\n");
+ break;
+ }
+ link_clock *= 2;
+ }
+
+ pipe_config->port_clock = link_clock;
+
+ if (pipe_config->has_dp_encoder)
+ pipe_config->adjusted_mode.crtc_clock =
+ intel_dotclock_calculate(pipe_config->port_clock,
+ &pipe_config->dp_m_n);
+ else
+ pipe_config->adjusted_mode.crtc_clock = pipe_config->port_clock;
+}
+
static void hsw_ddi_clock_get(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
{
@@ -756,7 +918,7 @@ hsw_ddi_pll_select(struct intel_crtc *intel_crtc,
WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
WRPLL_DIVIDER_POST(p);
- intel_crtc->config.dpll_hw_state.wrpll = val;
+ intel_crtc->new_config->dpll_hw_state.wrpll = val;
pll = intel_get_shared_dpll(intel_crtc);
if (pll == NULL) {
@@ -765,12 +927,234 @@ hsw_ddi_pll_select(struct intel_crtc *intel_crtc,
return false;
}
- intel_crtc->config.ddi_pll_sel = PORT_CLK_SEL_WRPLL(pll->id);
+ intel_crtc->new_config->ddi_pll_sel = PORT_CLK_SEL_WRPLL(pll->id);
}
return true;
}
+struct skl_wrpll_params {
+ uint32_t dco_fraction;
+ uint32_t dco_integer;
+ uint32_t qdiv_ratio;
+ uint32_t qdiv_mode;
+ uint32_t kdiv;
+ uint32_t pdiv;
+ uint32_t central_freq;
+};
+
+static void
+skl_ddi_calculate_wrpll(int clock /* in Hz */,
+ struct skl_wrpll_params *wrpll_params)
+{
+ uint64_t afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
+ uint64_t dco_central_freq[3] = {8400000000ULL,
+ 9000000000ULL,
+ 9600000000ULL};
+ uint32_t min_dco_deviation = 400;
+ uint32_t min_dco_index = 3;
+ uint32_t P0[4] = {1, 2, 3, 7};
+ uint32_t P2[4] = {1, 2, 3, 5};
+ bool found = false;
+ uint32_t candidate_p = 0;
+ uint32_t candidate_p0[3] = {0}, candidate_p1[3] = {0};
+ uint32_t candidate_p2[3] = {0};
+ uint32_t dco_central_freq_deviation[3];
+ uint32_t i, P1, k, dco_count;
+ bool retry_with_odd = false;
+ uint64_t dco_freq;
+
+ /* Determine P0, P1 or P2 */
+ for (dco_count = 0; dco_count < 3; dco_count++) {
+ found = false;
+ candidate_p =
+ div64_u64(dco_central_freq[dco_count], afe_clock);
+ if (retry_with_odd == false)
+ candidate_p = (candidate_p % 2 == 0 ?
+ candidate_p : candidate_p + 1);
+
+ for (P1 = 1; P1 < candidate_p; P1++) {
+ for (i = 0; i < 4; i++) {
+ if (!(P0[i] != 1 || P1 == 1))
+ continue;
+
+ for (k = 0; k < 4; k++) {
+ if (P1 != 1 && P2[k] != 2)
+ continue;
+
+ if (candidate_p == P0[i] * P1 * P2[k]) {
+ /* Found possible P0, P1, P2 */
+ found = true;
+ candidate_p0[dco_count] = P0[i];
+ candidate_p1[dco_count] = P1;
+ candidate_p2[dco_count] = P2[k];
+ goto found;
+ }
+
+ }
+ }
+ }
+
+found:
+ if (found) {
+ dco_central_freq_deviation[dco_count] =
+ div64_u64(10000 *
+ abs_diff((candidate_p * afe_clock),
+ dco_central_freq[dco_count]),
+ dco_central_freq[dco_count]);
+
+ if (dco_central_freq_deviation[dco_count] <
+ min_dco_deviation) {
+ min_dco_deviation =
+ dco_central_freq_deviation[dco_count];
+ min_dco_index = dco_count;
+ }
+ }
+
+ if (min_dco_index > 2 && dco_count == 2) {
+ retry_with_odd = true;
+ dco_count = 0;
+ }
+ }
+
+ if (min_dco_index > 2) {
+ WARN(1, "No valid values found for the given pixel clock\n");
+ } else {
+ wrpll_params->central_freq = dco_central_freq[min_dco_index];
+
+ switch (dco_central_freq[min_dco_index]) {
+ case 9600000000ULL:
+ wrpll_params->central_freq = 0;
+ break;
+ case 9000000000ULL:
+ wrpll_params->central_freq = 1;
+ break;
+ case 8400000000ULL:
+ wrpll_params->central_freq = 3;
+ }
+
+ switch (candidate_p0[min_dco_index]) {
+ case 1:
+ wrpll_params->pdiv = 0;
+ break;
+ case 2:
+ wrpll_params->pdiv = 1;
+ break;
+ case 3:
+ wrpll_params->pdiv = 2;
+ break;
+ case 7:
+ wrpll_params->pdiv = 4;
+ break;
+ default:
+ WARN(1, "Incorrect PDiv\n");
+ }
+
+ switch (candidate_p2[min_dco_index]) {
+ case 5:
+ wrpll_params->kdiv = 0;
+ break;
+ case 2:
+ wrpll_params->kdiv = 1;
+ break;
+ case 3:
+ wrpll_params->kdiv = 2;
+ break;
+ case 1:
+ wrpll_params->kdiv = 3;
+ break;
+ default:
+ WARN(1, "Incorrect KDiv\n");
+ }
+
+ wrpll_params->qdiv_ratio = candidate_p1[min_dco_index];
+ wrpll_params->qdiv_mode =
+ (wrpll_params->qdiv_ratio == 1) ? 0 : 1;
+
+ dco_freq = candidate_p0[min_dco_index] *
+ candidate_p1[min_dco_index] *
+ candidate_p2[min_dco_index] * afe_clock;
+
+ /*
+ * Intermediate values are in Hz.
+ * Divide by MHz to match bsepc
+ */
+ wrpll_params->dco_integer = div_u64(dco_freq, (24 * MHz(1)));
+ wrpll_params->dco_fraction =
+ div_u64(((div_u64(dco_freq, 24) -
+ wrpll_params->dco_integer * MHz(1)) * 0x8000), MHz(1));
+
+ }
+}
+
+
+static bool
+skl_ddi_pll_select(struct intel_crtc *intel_crtc,
+ struct intel_encoder *intel_encoder,
+ int clock)
+{
+ struct intel_shared_dpll *pll;
+ uint32_t ctrl1, cfgcr1, cfgcr2;
+
+ /*
+ * See comment in intel_dpll_hw_state to understand why we always use 0
+ * as the DPLL id in this function.
+ */
+
+ ctrl1 = DPLL_CTRL1_OVERRIDE(0);
+
+ if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
+ struct skl_wrpll_params wrpll_params = { 0, };
+
+ ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
+
+ skl_ddi_calculate_wrpll(clock * 1000, &wrpll_params);
+
+ cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
+ DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
+ wrpll_params.dco_integer;
+
+ cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
+ DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
+ DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
+ DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
+ wrpll_params.central_freq;
+ } else if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
+ struct drm_encoder *encoder = &intel_encoder->base;
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ switch (intel_dp->link_bw) {
+ case DP_LINK_BW_1_62:
+ ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_810, 0);
+ break;
+ case DP_LINK_BW_2_7:
+ ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1350, 0);
+ break;
+ case DP_LINK_BW_5_4:
+ ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2700, 0);
+ break;
+ }
+
+ cfgcr1 = cfgcr2 = 0;
+ } else /* eDP */
+ return true;
+
+ intel_crtc->new_config->dpll_hw_state.ctrl1 = ctrl1;
+ intel_crtc->new_config->dpll_hw_state.cfgcr1 = cfgcr1;
+ intel_crtc->new_config->dpll_hw_state.cfgcr2 = cfgcr2;
+
+ pll = intel_get_shared_dpll(intel_crtc);
+ if (pll == NULL) {
+ DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
+ pipe_name(intel_crtc->pipe));
+ return false;
+ }
+
+ /* shared DPLL id 0 is DPLL 1 */
+ intel_crtc->new_config->ddi_pll_sel = pll->id + 1;
+
+ return true;
+}
/*
* Tries to find a *shared* PLL for the CRTC and store it in
@@ -781,13 +1165,15 @@ hsw_ddi_pll_select(struct intel_crtc *intel_crtc,
*/
bool intel_ddi_pll_select(struct intel_crtc *intel_crtc)
{
- struct drm_crtc *crtc = &intel_crtc->base;
- struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
- int clock = intel_crtc->config.port_clock;
-
- intel_put_shared_dpll(intel_crtc);
+ struct drm_device *dev = intel_crtc->base.dev;
+ struct intel_encoder *intel_encoder =
+ intel_ddi_get_crtc_new_encoder(intel_crtc);
+ int clock = intel_crtc->new_config->port_clock;
- return hsw_ddi_pll_select(intel_crtc, intel_encoder, clock);
+ if (IS_SKYLAKE(dev))
+ return skl_ddi_pll_select(intel_crtc, intel_encoder, clock);
+ else
+ return hsw_ddi_pll_select(intel_crtc, intel_encoder, clock);
}
void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
@@ -962,7 +1348,7 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
uint32_t tmp;
power_domain = intel_display_port_power_domain(intel_encoder);
- if (!intel_display_power_enabled(dev_priv, power_domain))
+ if (!intel_display_power_is_enabled(dev_priv, power_domain))
return false;
if (!intel_encoder->get_hw_state(intel_encoder, &pipe))
@@ -1008,7 +1394,7 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
int i;
power_domain = intel_display_port_power_domain(encoder);
- if (!intel_display_power_enabled(dev_priv, power_domain))
+ if (!intel_display_power_is_enabled(dev_priv, power_domain))
return false;
tmp = I915_READ(DDI_BUF_CTL(port));
@@ -1079,27 +1465,53 @@ void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc)
static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
{
struct drm_encoder *encoder = &intel_encoder->base;
- struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
enum port port = intel_ddi_get_encoder_port(intel_encoder);
int type = intel_encoder->type;
- if (crtc->config.has_audio) {
- DRM_DEBUG_DRIVER("Audio on pipe %c on DDI\n",
- pipe_name(crtc->pipe));
-
- /* write eld */
- DRM_DEBUG_DRIVER("DDI audio: write eld information\n");
- intel_write_eld(encoder, &crtc->config.adjusted_mode);
- }
-
if (type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
intel_edp_panel_on(intel_dp);
}
- WARN_ON(crtc->config.ddi_pll_sel == PORT_CLK_SEL_NONE);
- I915_WRITE(PORT_CLK_SEL(port), crtc->config.ddi_pll_sel);
+ if (IS_SKYLAKE(dev)) {
+ uint32_t dpll = crtc->config.ddi_pll_sel;
+ uint32_t val;
+
+ /*
+ * DPLL0 is used for eDP and is the only "private" DPLL (as
+ * opposed to shared) on SKL
+ */
+ if (type == INTEL_OUTPUT_EDP) {
+ WARN_ON(dpll != SKL_DPLL0);
+
+ val = I915_READ(DPLL_CTRL1);
+
+ val &= ~(DPLL_CTRL1_HDMI_MODE(dpll) |
+ DPLL_CTRL1_SSC(dpll) |
+ DPLL_CRTL1_LINK_RATE_MASK(dpll));
+ val |= crtc->config.dpll_hw_state.ctrl1 << (dpll * 6);
+
+ I915_WRITE(DPLL_CTRL1, val);
+ POSTING_READ(DPLL_CTRL1);
+ }
+
+ /* DDI -> PLL mapping */
+ val = I915_READ(DPLL_CTRL2);
+
+ val &= ~(DPLL_CTRL2_DDI_CLK_OFF(port) |
+ DPLL_CTRL2_DDI_CLK_SEL_MASK(port));
+ val |= (DPLL_CTRL2_DDI_CLK_SEL(dpll, port) |
+ DPLL_CTRL2_DDI_SEL_OVERRIDE(port));
+
+ I915_WRITE(DPLL_CTRL2, val);
+
+ } else {
+ WARN_ON(crtc->config.ddi_pll_sel == PORT_CLK_SEL_NONE);
+ I915_WRITE(PORT_CLK_SEL(port), crtc->config.ddi_pll_sel);
+ }
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
@@ -1109,7 +1521,7 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
intel_dp_start_link_train(intel_dp);
intel_dp_complete_link_train(intel_dp);
- if (port != PORT_A)
+ if (port != PORT_A || INTEL_INFO(dev)->gen >= 9)
intel_dp_stop_link_train(intel_dp);
} else if (type == INTEL_OUTPUT_HDMI) {
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
@@ -1123,7 +1535,8 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
{
struct drm_encoder *encoder = &intel_encoder->base;
- struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
enum port port = intel_ddi_get_encoder_port(intel_encoder);
int type = intel_encoder->type;
uint32_t val;
@@ -1151,7 +1564,11 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
intel_edp_panel_off(intel_dp);
}
- I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE);
+ if (IS_SKYLAKE(dev))
+ I915_WRITE(DPLL_CTRL2, (I915_READ(DPLL_CTRL2) |
+ DPLL_CTRL2_DDI_CLK_OFF(port)));
+ else
+ I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE);
}
static void intel_enable_ddi(struct intel_encoder *intel_encoder)
@@ -1159,12 +1576,10 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder)
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_crtc *crtc = encoder->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum port port = intel_ddi_get_encoder_port(intel_encoder);
int type = intel_encoder->type;
- uint32_t tmp;
if (type == INTEL_OUTPUT_HDMI) {
struct intel_digital_port *intel_dig_port =
@@ -1180,18 +1595,16 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder)
} else if (type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- if (port == PORT_A)
+ if (port == PORT_A && INTEL_INFO(dev)->gen < 9)
intel_dp_stop_link_train(intel_dp);
intel_edp_backlight_on(intel_dp);
- intel_edp_psr_enable(intel_dp);
+ intel_psr_enable(intel_dp);
}
if (intel_crtc->config.has_audio) {
intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
- tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
- tmp |= ((AUDIO_OUTPUT_ENABLE_A | AUDIO_ELD_VALID_A) << (pipe * 4));
- I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
+ intel_audio_codec_enable(intel_encoder);
}
}
@@ -1200,30 +1613,71 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder)
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_crtc *crtc = encoder->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
int type = intel_encoder->type;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t tmp;
- /* We can't touch HSW_AUD_PIN_ELD_CP_VLD uncionditionally because this
- * register is part of the power well on Haswell. */
if (intel_crtc->config.has_audio) {
- tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
- tmp &= ~((AUDIO_OUTPUT_ENABLE_A | AUDIO_ELD_VALID_A) <<
- (pipe * 4));
- I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
+ intel_audio_codec_disable(intel_encoder);
intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
}
if (type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- intel_edp_psr_disable(intel_dp);
+ intel_psr_disable(intel_dp);
intel_edp_backlight_off(intel_dp);
}
}
+static int skl_get_cdclk_freq(struct drm_i915_private *dev_priv)
+{
+ uint32_t lcpll1 = I915_READ(LCPLL1_CTL);
+ uint32_t cdctl = I915_READ(CDCLK_CTL);
+ uint32_t linkrate;
+
+ if (!(lcpll1 & LCPLL_PLL_ENABLE)) {
+ WARN(1, "LCPLL1 not enabled\n");
+ return 24000; /* 24MHz is the cd freq with NSSC ref */
+ }
+
+ if ((cdctl & CDCLK_FREQ_SEL_MASK) == CDCLK_FREQ_540)
+ return 540000;
+
+ linkrate = (I915_READ(DPLL_CTRL1) &
+ DPLL_CRTL1_LINK_RATE_MASK(SKL_DPLL0)) >> 1;
+
+ if (linkrate == DPLL_CRTL1_LINK_RATE_2160 ||
+ linkrate == DPLL_CRTL1_LINK_RATE_1080) {
+ /* vco 8640 */
+ switch (cdctl & CDCLK_FREQ_SEL_MASK) {
+ case CDCLK_FREQ_450_432:
+ return 432000;
+ case CDCLK_FREQ_337_308:
+ return 308570;
+ case CDCLK_FREQ_675_617:
+ return 617140;
+ default:
+ WARN(1, "Unknown cd freq selection\n");
+ }
+ } else {
+ /* vco 8100 */
+ switch (cdctl & CDCLK_FREQ_SEL_MASK) {
+ case CDCLK_FREQ_450_432:
+ return 450000;
+ case CDCLK_FREQ_337_308:
+ return 337500;
+ case CDCLK_FREQ_675_617:
+ return 675000;
+ default:
+ WARN(1, "Unknown cd freq selection\n");
+ }
+ }
+
+ /* error case, do as if DPLL0 isn't enabled */
+ return 24000;
+}
+
static int bdw_get_cdclk_freq(struct drm_i915_private *dev_priv)
{
uint32_t lcpll = I915_READ(LCPLL_CTL);
@@ -1255,7 +1709,7 @@ static int hsw_get_cdclk_freq(struct drm_i915_private *dev_priv)
return 450000;
else if (freq == LCPLL_CLK_FREQ_450)
return 450000;
- else if (IS_ULT(dev))
+ else if (IS_HSW_ULT(dev))
return 337500;
else
return 540000;
@@ -1265,6 +1719,9 @@ int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
+ if (IS_SKYLAKE(dev))
+ return skl_get_cdclk_freq(dev_priv);
+
if (IS_BROADWELL(dev))
return bdw_get_cdclk_freq(dev_priv);
@@ -1275,7 +1732,7 @@ int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv)
static void hsw_ddi_pll_enable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
- I915_WRITE(WRPLL_CTL(pll->id), pll->hw_state.wrpll);
+ I915_WRITE(WRPLL_CTL(pll->id), pll->config.hw_state.wrpll);
POSTING_READ(WRPLL_CTL(pll->id));
udelay(20);
}
@@ -1296,7 +1753,7 @@ static bool hsw_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
{
uint32_t val;
- if (!intel_display_power_enabled(dev_priv, POWER_DOMAIN_PLLS))
+ if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS))
return false;
val = I915_READ(WRPLL_CTL(pll->id));
@@ -1326,26 +1783,156 @@ static void hsw_shared_dplls_init(struct drm_i915_private *dev_priv)
}
}
+static const char * const skl_ddi_pll_names[] = {
+ "DPLL 1",
+ "DPLL 2",
+ "DPLL 3",
+};
+
+struct skl_dpll_regs {
+ u32 ctl, cfgcr1, cfgcr2;
+};
+
+/* this array is indexed by the *shared* pll id */
+static const struct skl_dpll_regs skl_dpll_regs[3] = {
+ {
+ /* DPLL 1 */
+ .ctl = LCPLL2_CTL,
+ .cfgcr1 = DPLL1_CFGCR1,
+ .cfgcr2 = DPLL1_CFGCR2,
+ },
+ {
+ /* DPLL 2 */
+ .ctl = WRPLL_CTL1,
+ .cfgcr1 = DPLL2_CFGCR1,
+ .cfgcr2 = DPLL2_CFGCR2,
+ },
+ {
+ /* DPLL 3 */
+ .ctl = WRPLL_CTL2,
+ .cfgcr1 = DPLL3_CFGCR1,
+ .cfgcr2 = DPLL3_CFGCR2,
+ },
+};
+
+static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ uint32_t val;
+ unsigned int dpll;
+ const struct skl_dpll_regs *regs = skl_dpll_regs;
+
+ /* DPLL0 is not part of the shared DPLLs, so pll->id is 0 for DPLL1 */
+ dpll = pll->id + 1;
+
+ val = I915_READ(DPLL_CTRL1);
+
+ val &= ~(DPLL_CTRL1_HDMI_MODE(dpll) | DPLL_CTRL1_SSC(dpll) |
+ DPLL_CRTL1_LINK_RATE_MASK(dpll));
+ val |= pll->config.hw_state.ctrl1 << (dpll * 6);
+
+ I915_WRITE(DPLL_CTRL1, val);
+ POSTING_READ(DPLL_CTRL1);
+
+ I915_WRITE(regs[pll->id].cfgcr1, pll->config.hw_state.cfgcr1);
+ I915_WRITE(regs[pll->id].cfgcr2, pll->config.hw_state.cfgcr2);
+ POSTING_READ(regs[pll->id].cfgcr1);
+ POSTING_READ(regs[pll->id].cfgcr2);
+
+ /* the enable bit is always bit 31 */
+ I915_WRITE(regs[pll->id].ctl,
+ I915_READ(regs[pll->id].ctl) | LCPLL_PLL_ENABLE);
+
+ if (wait_for(I915_READ(DPLL_STATUS) & DPLL_LOCK(dpll), 5))
+ DRM_ERROR("DPLL %d not locked\n", dpll);
+}
+
+static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ const struct skl_dpll_regs *regs = skl_dpll_regs;
+
+ /* the enable bit is always bit 31 */
+ I915_WRITE(regs[pll->id].ctl,
+ I915_READ(regs[pll->id].ctl) & ~LCPLL_PLL_ENABLE);
+ POSTING_READ(regs[pll->id].ctl);
+}
+
+static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll,
+ struct intel_dpll_hw_state *hw_state)
+{
+ uint32_t val;
+ unsigned int dpll;
+ const struct skl_dpll_regs *regs = skl_dpll_regs;
+
+ if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS))
+ return false;
+
+ /* DPLL0 is not part of the shared DPLLs, so pll->id is 0 for DPLL1 */
+ dpll = pll->id + 1;
+
+ val = I915_READ(regs[pll->id].ctl);
+ if (!(val & LCPLL_PLL_ENABLE))
+ return false;
+
+ val = I915_READ(DPLL_CTRL1);
+ hw_state->ctrl1 = (val >> (dpll * 6)) & 0x3f;
+
+ /* avoid reading back stale values if HDMI mode is not enabled */
+ if (val & DPLL_CTRL1_HDMI_MODE(dpll)) {
+ hw_state->cfgcr1 = I915_READ(regs[pll->id].cfgcr1);
+ hw_state->cfgcr2 = I915_READ(regs[pll->id].cfgcr2);
+ }
+
+ return true;
+}
+
+static void skl_shared_dplls_init(struct drm_i915_private *dev_priv)
+{
+ int i;
+
+ dev_priv->num_shared_dpll = 3;
+
+ for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+ dev_priv->shared_dplls[i].id = i;
+ dev_priv->shared_dplls[i].name = skl_ddi_pll_names[i];
+ dev_priv->shared_dplls[i].disable = skl_ddi_pll_disable;
+ dev_priv->shared_dplls[i].enable = skl_ddi_pll_enable;
+ dev_priv->shared_dplls[i].get_hw_state =
+ skl_ddi_pll_get_hw_state;
+ }
+}
+
void intel_ddi_pll_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t val = I915_READ(LCPLL_CTL);
- hsw_shared_dplls_init(dev_priv);
-
- /* The LCPLL register should be turned on by the BIOS. For now let's
- * just check its state and print errors in case something is wrong.
- * Don't even try to turn it on.
- */
+ if (IS_SKYLAKE(dev))
+ skl_shared_dplls_init(dev_priv);
+ else
+ hsw_shared_dplls_init(dev_priv);
DRM_DEBUG_KMS("CDCLK running at %dKHz\n",
intel_ddi_get_cdclk_freq(dev_priv));
- if (val & LCPLL_CD_SOURCE_FCLK)
- DRM_ERROR("CDCLK source is not LCPLL\n");
+ if (IS_SKYLAKE(dev)) {
+ if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE))
+ DRM_ERROR("LCPLL1 is disabled\n");
+ } else {
+ /*
+ * The LCPLL register should be turned on by the BIOS. For now
+ * let's just check its state and print errors in case
+ * something is wrong. Don't even try to turn it on.
+ */
- if (val & LCPLL_PLL_DISABLE)
- DRM_ERROR("LCPLL is disabled\n");
+ if (val & LCPLL_CD_SOURCE_FCLK)
+ DRM_ERROR("CDCLK source is not LCPLL\n");
+
+ if (val & LCPLL_PLL_DISABLE)
+ DRM_ERROR("LCPLL is disabled\n");
+ }
}
void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder)
@@ -1440,7 +2027,9 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
+ struct intel_hdmi *intel_hdmi;
u32 temp, flags = 0;
+ struct drm_device *dev = dev_priv->dev;
temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
if (temp & TRANS_DDI_PHSYNC)
@@ -1474,6 +2063,11 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
switch (temp & TRANS_DDI_MODE_SELECT_MASK) {
case TRANS_DDI_MODE_SELECT_HDMI:
pipe_config->has_hdmi_sink = true;
+ intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+
+ if (intel_hdmi->infoframe_enabled(&encoder->base))
+ pipe_config->has_infoframe = true;
+ break;
case TRANS_DDI_MODE_SELECT_DVI:
case TRANS_DDI_MODE_SELECT_FDI:
break;
@@ -1486,9 +2080,9 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
break;
}
- if (intel_display_power_enabled(dev_priv, POWER_DOMAIN_AUDIO)) {
+ if (intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_AUDIO)) {
temp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
- if (temp & (AUDIO_OUTPUT_ENABLE_A << (intel_crtc->pipe * 4)))
+ if (temp & AUDIO_OUTPUT_ENABLE(intel_crtc->pipe))
pipe_config->has_audio = true;
}
@@ -1512,7 +2106,10 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
}
- hsw_ddi_clock_get(encoder, pipe_config);
+ if (INTEL_INFO(dev)->gen <= 8)
+ hsw_ddi_clock_get(encoder, pipe_config);
+ else
+ skl_ddi_clock_get(encoder, pipe_config);
}
static void intel_ddi_destroy(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 9cb5c95d589..fb3e3d42919 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -73,8 +73,6 @@ static const uint32_t intel_cursor_formats[] = {
DRM_FORMAT_ARGB8888,
};
-static void intel_increase_pllclock(struct drm_device *dev,
- enum pipe pipe);
static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
@@ -96,8 +94,10 @@ static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
static void ironlake_set_pipeconf(struct drm_crtc *crtc);
static void haswell_set_pipeconf(struct drm_crtc *crtc);
static void intel_set_pipe_csc(struct drm_crtc *crtc);
-static void vlv_prepare_pll(struct intel_crtc *crtc);
-static void chv_prepare_pll(struct intel_crtc *crtc);
+static void vlv_prepare_pll(struct intel_crtc *crtc,
+ const struct intel_crtc_config *pipe_config);
+static void chv_prepare_pll(struct intel_crtc *crtc,
+ const struct intel_crtc_config *pipe_config);
static struct intel_encoder *intel_find_encoder(struct intel_connector *connector, int pipe)
{
@@ -408,25 +408,43 @@ static void vlv_clock(int refclk, intel_clock_t *clock)
/**
* Returns whether any output on the specified pipe is of the specified type
*/
-static bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
+bool intel_pipe_has_type(struct intel_crtc *crtc, enum intel_output_type type)
{
- struct drm_device *dev = crtc->dev;
+ struct drm_device *dev = crtc->base.dev;
struct intel_encoder *encoder;
- for_each_encoder_on_crtc(dev, crtc, encoder)
+ for_each_encoder_on_crtc(dev, &crtc->base, encoder)
if (encoder->type == type)
return true;
return false;
}
-static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
+/**
+ * Returns whether any output on the specified pipe will have the specified
+ * type after a staged modeset is complete, i.e., the same as
+ * intel_pipe_has_type() but looking at encoder->new_crtc instead of
+ * encoder->crtc.
+ */
+static bool intel_pipe_will_have_type(struct intel_crtc *crtc, int type)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct intel_encoder *encoder;
+
+ for_each_intel_encoder(dev, encoder)
+ if (encoder->new_crtc == crtc && encoder->type == type)
+ return true;
+
+ return false;
+}
+
+static const intel_limit_t *intel_ironlake_limit(struct intel_crtc *crtc,
int refclk)
{
- struct drm_device *dev = crtc->dev;
+ struct drm_device *dev = crtc->base.dev;
const intel_limit_t *limit;
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+ if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
if (intel_is_dual_link_lvds(dev)) {
if (refclk == 100000)
limit = &intel_limits_ironlake_dual_lvds_100m;
@@ -444,20 +462,20 @@ static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
return limit;
}
-static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
+static const intel_limit_t *intel_g4x_limit(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->dev;
+ struct drm_device *dev = crtc->base.dev;
const intel_limit_t *limit;
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+ if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
if (intel_is_dual_link_lvds(dev))
limit = &intel_limits_g4x_dual_channel_lvds;
else
limit = &intel_limits_g4x_single_channel_lvds;
- } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) ||
- intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
+ } else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_HDMI) ||
+ intel_pipe_will_have_type(crtc, INTEL_OUTPUT_ANALOG)) {
limit = &intel_limits_g4x_hdmi;
- } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
+ } else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_SDVO)) {
limit = &intel_limits_g4x_sdvo;
} else /* The option is for other outputs */
limit = &intel_limits_i9xx_sdvo;
@@ -465,9 +483,9 @@ static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
return limit;
}
-static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
+static const intel_limit_t *intel_limit(struct intel_crtc *crtc, int refclk)
{
- struct drm_device *dev = crtc->dev;
+ struct drm_device *dev = crtc->base.dev;
const intel_limit_t *limit;
if (HAS_PCH_SPLIT(dev))
@@ -475,7 +493,7 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
else if (IS_G4X(dev)) {
limit = intel_g4x_limit(crtc);
} else if (IS_PINEVIEW(dev)) {
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
+ if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS))
limit = &intel_limits_pineview_lvds;
else
limit = &intel_limits_pineview_sdvo;
@@ -484,14 +502,14 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
} else if (IS_VALLEYVIEW(dev)) {
limit = &intel_limits_vlv;
} else if (!IS_GEN2(dev)) {
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
+ if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS))
limit = &intel_limits_i9xx_lvds;
else
limit = &intel_limits_i9xx_sdvo;
} else {
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
+ if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS))
limit = &intel_limits_i8xx_lvds;
- else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO))
+ else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_DVO))
limit = &intel_limits_i8xx_dvo;
else
limit = &intel_limits_i8xx_dac;
@@ -578,15 +596,15 @@ static bool intel_PLL_is_valid(struct drm_device *dev,
}
static bool
-i9xx_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
+i9xx_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock)
{
- struct drm_device *dev = crtc->dev;
+ struct drm_device *dev = crtc->base.dev;
intel_clock_t clock;
int err = target;
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+ if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
/*
* For LVDS just rely on its current settings for dual-channel.
* We haven't figured out how to reliably set up different
@@ -639,15 +657,15 @@ i9xx_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
}
static bool
-pnv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
+pnv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock)
{
- struct drm_device *dev = crtc->dev;
+ struct drm_device *dev = crtc->base.dev;
intel_clock_t clock;
int err = target;
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+ if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
/*
* For LVDS just rely on its current settings for dual-channel.
* We haven't figured out how to reliably set up different
@@ -698,11 +716,11 @@ pnv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
}
static bool
-g4x_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
+g4x_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock)
{
- struct drm_device *dev = crtc->dev;
+ struct drm_device *dev = crtc->base.dev;
intel_clock_t clock;
int max_n;
bool found;
@@ -710,7 +728,7 @@ g4x_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
int err_most = (target >> 8) + (target >> 9);
found = false;
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+ if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
if (intel_is_dual_link_lvds(dev))
clock.p2 = limit->p2.p2_fast;
else
@@ -755,11 +773,11 @@ g4x_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
}
static bool
-vlv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
+vlv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock)
{
- struct drm_device *dev = crtc->dev;
+ struct drm_device *dev = crtc->base.dev;
intel_clock_t clock;
unsigned int bestppm = 1000000;
/* min update 19.2 MHz */
@@ -812,11 +830,11 @@ vlv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
}
static bool
-chv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
+chv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock)
{
- struct drm_device *dev = crtc->dev;
+ struct drm_device *dev = crtc->base.dev;
intel_clock_t clock;
uint64_t m2;
int found = false;
@@ -889,60 +907,6 @@ enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
return intel_crtc->config.cpu_transcoder;
}
-static void g4x_wait_for_vblank(struct drm_device *dev, int pipe)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 frame, frame_reg = PIPE_FRMCOUNT_GM45(pipe);
-
- frame = I915_READ(frame_reg);
-
- if (wait_for(I915_READ_NOTRACE(frame_reg) != frame, 50))
- WARN(1, "vblank wait on pipe %c timed out\n",
- pipe_name(pipe));
-}
-
-/**
- * intel_wait_for_vblank - wait for vblank on a given pipe
- * @dev: drm device
- * @pipe: pipe to wait for
- *
- * Wait for vblank to occur on a given pipe. Needed for various bits of
- * mode setting code.
- */
-void intel_wait_for_vblank(struct drm_device *dev, int pipe)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int pipestat_reg = PIPESTAT(pipe);
-
- if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
- g4x_wait_for_vblank(dev, pipe);
- return;
- }
-
- /* Clear existing vblank status. Note this will clear any other
- * sticky status fields as well.
- *
- * This races with i915_driver_irq_handler() with the result
- * that either function could miss a vblank event. Here it is not
- * fatal, as we will either wait upon the next vblank interrupt or
- * timeout. Generally speaking intel_wait_for_vblank() is only
- * called during modeset at which time the GPU should be idle and
- * should *not* be performing page flips and thus not waiting on
- * vblanks...
- * Currently, the result of us stealing a vblank from the irq
- * handler is that a single frame will be skipped during swapbuffers.
- */
- I915_WRITE(pipestat_reg,
- I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS);
-
- /* Wait for vblank interrupt bit to set */
- if (wait_for(I915_READ(pipestat_reg) &
- PIPE_VBLANK_INTERRUPT_STATUS,
- 50))
- DRM_DEBUG_KMS("vblank wait on pipe %c timed out\n",
- pipe_name(pipe));
-}
-
static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1189,8 +1153,8 @@ void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
state_string(state), state_string(cur_state));
}
-static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
- enum pipe pipe)
+void assert_panel_unlocked(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
{
struct drm_device *dev = dev_priv->dev;
int pp_reg;
@@ -1263,7 +1227,7 @@ void assert_pipe(struct drm_i915_private *dev_priv,
(pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
state = true;
- if (!intel_display_power_enabled(dev_priv,
+ if (!intel_display_power_is_enabled(dev_priv,
POWER_DOMAIN_TRANSCODER(cpu_transcoder))) {
cur_state = false;
} else {
@@ -1332,7 +1296,14 @@ static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
int reg, sprite;
u32 val;
- if (IS_VALLEYVIEW(dev)) {
+ if (INTEL_INFO(dev)->gen >= 9) {
+ for_each_sprite(pipe, sprite) {
+ val = I915_READ(PLANE_CTL(pipe, sprite));
+ WARN(val & PLANE_CTL_ENABLE,
+ "plane %d assertion failure, should be off on pipe %c but is still active\n",
+ sprite, pipe_name(pipe));
+ }
+ } else if (IS_VALLEYVIEW(dev)) {
for_each_sprite(pipe, sprite) {
reg = SPCNTR(pipe, sprite);
val = I915_READ(reg);
@@ -1533,12 +1504,13 @@ static void intel_init_dpio(struct drm_device *dev)
}
}
-static void vlv_enable_pll(struct intel_crtc *crtc)
+static void vlv_enable_pll(struct intel_crtc *crtc,
+ const struct intel_crtc_config *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int reg = DPLL(crtc->pipe);
- u32 dpll = crtc->config.dpll_hw_state.dpll;
+ u32 dpll = pipe_config->dpll_hw_state.dpll;
assert_pipe_disabled(dev_priv, crtc->pipe);
@@ -1556,7 +1528,7 @@ static void vlv_enable_pll(struct intel_crtc *crtc)
if (wait_for(((I915_READ(reg) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
DRM_ERROR("DPLL %d failed to lock\n", crtc->pipe);
- I915_WRITE(DPLL_MD(crtc->pipe), crtc->config.dpll_hw_state.dpll_md);
+ I915_WRITE(DPLL_MD(crtc->pipe), pipe_config->dpll_hw_state.dpll_md);
POSTING_READ(DPLL_MD(crtc->pipe));
/* We do this three times for luck */
@@ -1571,7 +1543,8 @@ static void vlv_enable_pll(struct intel_crtc *crtc)
udelay(150); /* wait for warmup */
}
-static void chv_enable_pll(struct intel_crtc *crtc)
+static void chv_enable_pll(struct intel_crtc *crtc,
+ const struct intel_crtc_config *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1596,14 +1569,14 @@ static void chv_enable_pll(struct intel_crtc *crtc)
udelay(1);
/* Enable PLL */
- I915_WRITE(DPLL(pipe), crtc->config.dpll_hw_state.dpll);
+ I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
/* Check PLL is locked */
if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
DRM_ERROR("PLL %d failed to lock\n", pipe);
/* not sure when this should be written */
- I915_WRITE(DPLL_MD(pipe), crtc->config.dpll_hw_state.dpll_md);
+ I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
POSTING_READ(DPLL_MD(pipe));
mutex_unlock(&dev_priv->dpio_lock);
@@ -1616,7 +1589,7 @@ static int intel_num_dvo_pipes(struct drm_device *dev)
for_each_intel_crtc(dev, crtc)
count += crtc->active &&
- intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO);
+ intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO);
return count;
}
@@ -1695,7 +1668,7 @@ static void i9xx_disable_pll(struct intel_crtc *crtc)
/* Disable DVO 2x clock on both PLLs if necessary */
if (IS_I830(dev) &&
- intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO) &&
+ intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO) &&
intel_num_dvo_pipes(dev) == 1) {
I915_WRITE(DPLL(PIPE_B),
I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
@@ -1806,7 +1779,7 @@ static void intel_prepare_shared_dpll(struct intel_crtc *crtc)
if (WARN_ON(pll == NULL))
return;
- WARN_ON(!pll->refcount);
+ WARN_ON(!pll->config.crtc_mask);
if (pll->active == 0) {
DRM_DEBUG_DRIVER("setting up %s\n", pll->name);
WARN_ON(pll->on);
@@ -1833,7 +1806,7 @@ static void intel_enable_shared_dpll(struct intel_crtc *crtc)
if (WARN_ON(pll == NULL))
return;
- if (WARN_ON(pll->refcount == 0))
+ if (WARN_ON(pll->config.crtc_mask == 0))
return;
DRM_DEBUG_KMS("enable %s (active %d, on? %d) for crtc %d\n",
@@ -1865,7 +1838,7 @@ static void intel_disable_shared_dpll(struct intel_crtc *crtc)
if (WARN_ON(pll == NULL))
return;
- if (WARN_ON(pll->refcount == 0))
+ if (WARN_ON(pll->config.crtc_mask == 0))
return;
DRM_DEBUG_KMS("disable %s (active %d, on? %d) for crtc %d\n",
@@ -1933,7 +1906,7 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
val &= ~TRANS_INTERLACE_MASK;
if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
if (HAS_PCH_IBX(dev_priv->dev) &&
- intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO))
+ intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
val |= TRANS_LEGACY_INTERLACED_ILK;
else
val |= TRANS_INTERLACED;
@@ -2056,7 +2029,7 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
* need the check.
*/
if (!HAS_PCH_SPLIT(dev_priv->dev))
- if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DSI))
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI))
assert_dsi_pll_enabled(dev_priv);
else
assert_pll_enabled(dev_priv, pipe);
@@ -2221,11 +2194,13 @@ static int intel_align_height(struct drm_device *dev, int height, bool tiled)
}
int
-intel_pin_and_fence_fb_obj(struct drm_device *dev,
- struct drm_i915_gem_object *obj,
+intel_pin_and_fence_fb_obj(struct drm_plane *plane,
+ struct drm_framebuffer *fb,
struct intel_engine_cs *pipelined)
{
+ struct drm_device *dev = fb->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
u32 alignment;
int ret;
@@ -2233,7 +2208,9 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
switch (obj->tiling_mode) {
case I915_TILING_NONE:
- if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
+ if (INTEL_INFO(dev)->gen >= 9)
+ alignment = 256 * 1024;
+ else if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
alignment = 128 * 1024;
else if (INTEL_INFO(dev)->gen >= 4)
alignment = 4 * 1024;
@@ -2241,8 +2218,12 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
alignment = 64 * 1024;
break;
case I915_TILING_X:
- /* pin() will align the object as required by fence */
- alignment = 0;
+ if (INTEL_INFO(dev)->gen >= 9)
+ alignment = 256 * 1024;
+ else {
+ /* pin() will align the object as required by fence */
+ alignment = 0;
+ }
break;
case I915_TILING_Y:
WARN(1, "Y tiled bo slipped through, driver bug!\n");
@@ -2402,6 +2383,7 @@ static void intel_find_plane_obj(struct intel_crtc *intel_crtc,
struct intel_plane_config *plane_config)
{
struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *c;
struct intel_crtc *i;
struct drm_i915_gem_object *obj;
@@ -2433,6 +2415,9 @@ static void intel_find_plane_obj(struct intel_crtc *intel_crtc,
continue;
if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) {
+ if (obj->tiling_mode != I915_TILING_NONE)
+ dev_priv->preserve_bios_swizzle = true;
+
drm_framebuffer_reference(c->primary->fb);
intel_crtc->base.primary->fb = c->primary->fb;
obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
@@ -2486,6 +2471,12 @@ static void i9xx_update_primary_plane(struct drm_crtc *crtc,
((intel_crtc->config.pipe_src_h - 1) << 16) |
(intel_crtc->config.pipe_src_w - 1));
I915_WRITE(DSPPOS(plane), 0);
+ } else if (IS_CHERRYVIEW(dev) && plane == PLANE_B) {
+ I915_WRITE(PRIMSIZE(plane),
+ ((intel_crtc->config.pipe_src_h - 1) << 16) |
+ (intel_crtc->config.pipe_src_w - 1));
+ I915_WRITE(PRIMPOS(plane), 0);
+ I915_WRITE(PRIMCNSTALPHA(plane), 0);
}
switch (fb->pixel_format) {
@@ -2672,6 +2663,92 @@ static void ironlake_update_primary_plane(struct drm_crtc *crtc,
POSTING_READ(reg);
}
+static void skylake_update_primary_plane(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int x, int y)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_framebuffer *intel_fb;
+ struct drm_i915_gem_object *obj;
+ int pipe = intel_crtc->pipe;
+ u32 plane_ctl, stride;
+
+ if (!intel_crtc->primary_enabled) {
+ I915_WRITE(PLANE_CTL(pipe, 0), 0);
+ I915_WRITE(PLANE_SURF(pipe, 0), 0);
+ POSTING_READ(PLANE_CTL(pipe, 0));
+ return;
+ }
+
+ plane_ctl = PLANE_CTL_ENABLE |
+ PLANE_CTL_PIPE_GAMMA_ENABLE |
+ PLANE_CTL_PIPE_CSC_ENABLE;
+
+ switch (fb->pixel_format) {
+ case DRM_FORMAT_RGB565:
+ plane_ctl |= PLANE_CTL_FORMAT_RGB_565;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
+ break;
+ case DRM_FORMAT_XBGR8888:
+ plane_ctl |= PLANE_CTL_ORDER_RGBX;
+ plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
+ break;
+ case DRM_FORMAT_XRGB2101010:
+ plane_ctl |= PLANE_CTL_FORMAT_XRGB_2101010;
+ break;
+ case DRM_FORMAT_XBGR2101010:
+ plane_ctl |= PLANE_CTL_ORDER_RGBX;
+ plane_ctl |= PLANE_CTL_FORMAT_XRGB_2101010;
+ break;
+ default:
+ BUG();
+ }
+
+ intel_fb = to_intel_framebuffer(fb);
+ obj = intel_fb->obj;
+
+ /*
+ * The stride is either expressed as a multiple of 64 bytes chunks for
+ * linear buffers or in number of tiles for tiled buffers.
+ */
+ switch (obj->tiling_mode) {
+ case I915_TILING_NONE:
+ stride = fb->pitches[0] >> 6;
+ break;
+ case I915_TILING_X:
+ plane_ctl |= PLANE_CTL_TILED_X;
+ stride = fb->pitches[0] >> 9;
+ break;
+ default:
+ BUG();
+ }
+
+ plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
+ if (to_intel_plane(crtc->primary)->rotation == BIT(DRM_ROTATE_180))
+ plane_ctl |= PLANE_CTL_ROTATE_180;
+
+ I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl);
+
+ DRM_DEBUG_KMS("Writing base %08lX %d,%d,%d,%d pitch=%d\n",
+ i915_gem_obj_ggtt_offset(obj),
+ x, y, fb->width, fb->height,
+ fb->pitches[0]);
+
+ I915_WRITE(PLANE_POS(pipe, 0), 0);
+ I915_WRITE(PLANE_OFFSET(pipe, 0), (y << 16) | x);
+ I915_WRITE(PLANE_SIZE(pipe, 0),
+ (intel_crtc->config.pipe_src_h - 1) << 16 |
+ (intel_crtc->config.pipe_src_w - 1));
+ I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
+ I915_WRITE(PLANE_SURF(pipe, 0), i915_gem_obj_ggtt_offset(obj));
+
+ POSTING_READ(PLANE_SURF(pipe, 0));
+}
+
/* Assume fb object is pinned & idle & fenced and just update base pointers */
static int
intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
@@ -2682,32 +2759,16 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
if (dev_priv->display.disable_fbc)
dev_priv->display.disable_fbc(dev);
- intel_increase_pllclock(dev, to_intel_crtc(crtc)->pipe);
dev_priv->display.update_primary_plane(crtc, fb, x, y);
return 0;
}
-void intel_display_handle_reset(struct drm_device *dev)
+static void intel_complete_page_flips(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
- /*
- * Flips in the rings have been nuked by the reset,
- * so complete all pending flips so that user space
- * will get its events and not get stuck.
- *
- * Also update the base address of all primary
- * planes to the the last fb to make sure we're
- * showing the correct fb after a reset.
- *
- * Need to make two loops over the crtcs so that we
- * don't try to grab a crtc mutex before the
- * pending_flip_queue really got woken up.
- */
-
for_each_crtc(dev, crtc) {
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum plane plane = intel_crtc->plane;
@@ -2715,6 +2776,12 @@ void intel_display_handle_reset(struct drm_device *dev)
intel_prepare_page_flip(dev, plane);
intel_finish_page_flip_plane(dev, plane);
}
+}
+
+static void intel_update_primary_planes(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc;
for_each_crtc(dev, crtc) {
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -2734,6 +2801,79 @@ void intel_display_handle_reset(struct drm_device *dev)
}
}
+void intel_prepare_reset(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_crtc *crtc;
+
+ /* no reset support for gen2 */
+ if (IS_GEN2(dev))
+ return;
+
+ /* reset doesn't touch the display */
+ if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
+ return;
+
+ drm_modeset_lock_all(dev);
+
+ /*
+ * Disabling the crtcs gracefully seems nicer. Also the
+ * g33 docs say we should at least disable all the planes.
+ */
+ for_each_intel_crtc(dev, crtc) {
+ if (crtc->active)
+ dev_priv->display.crtc_disable(&crtc->base);
+ }
+}
+
+void intel_finish_reset(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+
+ /*
+ * Flips in the rings will be nuked by the reset,
+ * so complete all pending flips so that user space
+ * will get its events and not get stuck.
+ */
+ intel_complete_page_flips(dev);
+
+ /* no reset support for gen2 */
+ if (IS_GEN2(dev))
+ return;
+
+ /* reset doesn't touch the display */
+ if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) {
+ /*
+ * Flips in the rings have been nuked by the reset,
+ * so update the base address of all primary
+ * planes to the the last fb to make sure we're
+ * showing the correct fb after a reset.
+ */
+ intel_update_primary_planes(dev);
+ return;
+ }
+
+ /*
+ * The display has been reset as well,
+ * so need a full re-initialization.
+ */
+ intel_runtime_pm_disable_interrupts(dev_priv);
+ intel_runtime_pm_enable_interrupts(dev_priv);
+
+ intel_modeset_init_hw(dev);
+
+ spin_lock_irq(&dev_priv->irq_lock);
+ if (dev_priv->display.hpd_irq_setup)
+ dev_priv->display.hpd_irq_setup(dev);
+ spin_unlock_irq(&dev_priv->irq_lock);
+
+ intel_modeset_setup_hw_state(dev, true);
+
+ intel_hpd_init(dev_priv);
+
+ drm_modeset_unlock_all(dev);
+}
+
static int
intel_finish_fb(struct drm_framebuffer *old_fb)
{
@@ -2762,20 +2902,58 @@ static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- unsigned long flags;
bool pending;
if (i915_reset_in_progress(&dev_priv->gpu_error) ||
intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
return false;
- spin_lock_irqsave(&dev->event_lock, flags);
+ spin_lock_irq(&dev->event_lock);
pending = to_intel_crtc(crtc)->unpin_work != NULL;
- spin_unlock_irqrestore(&dev->event_lock, flags);
+ spin_unlock_irq(&dev->event_lock);
return pending;
}
+static void intel_update_pipe_size(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ const struct drm_display_mode *adjusted_mode;
+
+ if (!i915.fastboot)
+ return;
+
+ /*
+ * Update pipe size and adjust fitter if needed: the reason for this is
+ * that in compute_mode_changes we check the native mode (not the pfit
+ * mode) to see if we can flip rather than do a full mode set. In the
+ * fastboot case, we'll flip, but if we don't update the pipesrc and
+ * pfit state, we'll end up with a big fb scanned out into the wrong
+ * sized surface.
+ *
+ * To fix this properly, we need to hoist the checks up into
+ * compute_mode_changes (or above), check the actual pfit state and
+ * whether the platform allows pfit disable with pipe active, and only
+ * then update the pipesrc and pfit state, even on the flip path.
+ */
+
+ adjusted_mode = &crtc->config.adjusted_mode;
+
+ I915_WRITE(PIPESRC(crtc->pipe),
+ ((adjusted_mode->crtc_hdisplay - 1) << 16) |
+ (adjusted_mode->crtc_vdisplay - 1));
+ if (!crtc->config.pch_pfit.enabled &&
+ (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
+ intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
+ I915_WRITE(PF_CTL(crtc->pipe), 0);
+ I915_WRITE(PF_WIN_POS(crtc->pipe), 0);
+ I915_WRITE(PF_WIN_SZ(crtc->pipe), 0);
+ }
+ crtc->config.pipe_src_w = adjusted_mode->crtc_hdisplay;
+ crtc->config.pipe_src_h = adjusted_mode->crtc_vdisplay;
+}
+
static int
intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *fb)
@@ -2785,7 +2963,6 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum pipe pipe = intel_crtc->pipe;
struct drm_framebuffer *old_fb = crtc->primary->fb;
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct drm_i915_gem_object *old_obj = intel_fb_obj(old_fb);
int ret;
@@ -2808,9 +2985,9 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
}
mutex_lock(&dev->struct_mutex);
- ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
+ ret = intel_pin_and_fence_fb_obj(crtc->primary, fb, NULL);
if (ret == 0)
- i915_gem_track_fb(old_obj, obj,
+ i915_gem_track_fb(old_obj, intel_fb_obj(fb),
INTEL_FRONTBUFFER_PRIMARY(pipe));
mutex_unlock(&dev->struct_mutex);
if (ret != 0) {
@@ -2818,37 +2995,6 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
return ret;
}
- /*
- * Update pipe size and adjust fitter if needed: the reason for this is
- * that in compute_mode_changes we check the native mode (not the pfit
- * mode) to see if we can flip rather than do a full mode set. In the
- * fastboot case, we'll flip, but if we don't update the pipesrc and
- * pfit state, we'll end up with a big fb scanned out into the wrong
- * sized surface.
- *
- * To fix this properly, we need to hoist the checks up into
- * compute_mode_changes (or above), check the actual pfit state and
- * whether the platform allows pfit disable with pipe active, and only
- * then update the pipesrc and pfit state, even on the flip path.
- */
- if (i915.fastboot) {
- const struct drm_display_mode *adjusted_mode =
- &intel_crtc->config.adjusted_mode;
-
- I915_WRITE(PIPESRC(intel_crtc->pipe),
- ((adjusted_mode->crtc_hdisplay - 1) << 16) |
- (adjusted_mode->crtc_vdisplay - 1));
- if (!intel_crtc->config.pch_pfit.enabled &&
- (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
- intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
- I915_WRITE(PF_CTL(intel_crtc->pipe), 0);
- I915_WRITE(PF_WIN_POS(intel_crtc->pipe), 0);
- I915_WRITE(PF_WIN_SZ(intel_crtc->pipe), 0);
- }
- intel_crtc->config.pipe_src_w = adjusted_mode->crtc_hdisplay;
- intel_crtc->config.pipe_src_h = adjusted_mode->crtc_vdisplay;
- }
-
dev_priv->display.update_primary_plane(crtc, fb, x, y);
if (intel_crtc->active)
@@ -3472,14 +3618,13 @@ void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
!intel_crtc_has_pending_flip(crtc),
60*HZ) == 0)) {
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- unsigned long flags;
- spin_lock_irqsave(&dev->event_lock, flags);
+ spin_lock_irq(&dev->event_lock);
if (intel_crtc->unpin_work) {
WARN_ONCE(1, "Removing stuck page flip\n");
page_flip_completed(intel_crtc);
}
- spin_unlock_irqrestore(&dev->event_lock, flags);
+ spin_unlock_irq(&dev->event_lock);
}
if (crtc->primary->fb) {
@@ -3704,9 +3849,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
intel_fdi_normal_train(crtc);
/* For PCH DP, enable TRANS_DP_CTL */
- if (HAS_PCH_CPT(dev) &&
- (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
- intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
+ if (HAS_PCH_CPT(dev) && intel_crtc->config.has_dp_encoder) {
u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
reg = TRANS_DP_CTL(pipe);
temp = I915_READ(reg);
@@ -3766,12 +3909,13 @@ void intel_put_shared_dpll(struct intel_crtc *crtc)
if (pll == NULL)
return;
- if (pll->refcount == 0) {
- WARN(1, "bad %s refcount\n", pll->name);
+ if (!(pll->config.crtc_mask & (1 << crtc->pipe))) {
+ WARN(1, "bad %s crtc mask\n", pll->name);
return;
}
- if (--pll->refcount == 0) {
+ pll->config.crtc_mask &= ~(1 << crtc->pipe);
+ if (pll->config.crtc_mask == 0) {
WARN_ON(pll->on);
WARN_ON(pll->active);
}
@@ -3782,15 +3926,9 @@ void intel_put_shared_dpll(struct intel_crtc *crtc)
struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
- struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
+ struct intel_shared_dpll *pll;
enum intel_dpll_id i;
- if (pll) {
- DRM_DEBUG_KMS("CRTC:%d dropping existing %s\n",
- crtc->base.base.id, pll->name);
- intel_put_shared_dpll(crtc);
- }
-
if (HAS_PCH_IBX(dev_priv->dev)) {
/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
i = (enum intel_dpll_id) crtc->pipe;
@@ -3799,7 +3937,7 @@ struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc)
DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
crtc->base.base.id, pll->name);
- WARN_ON(pll->refcount);
+ WARN_ON(pll->new_config->crtc_mask);
goto found;
}
@@ -3808,15 +3946,16 @@ struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc)
pll = &dev_priv->shared_dplls[i];
/* Only want to check enabled timings first */
- if (pll->refcount == 0)
+ if (pll->new_config->crtc_mask == 0)
continue;
- if (memcmp(&crtc->config.dpll_hw_state, &pll->hw_state,
- sizeof(pll->hw_state)) == 0) {
- DRM_DEBUG_KMS("CRTC:%d sharing existing %s (refcount %d, ative %d)\n",
- crtc->base.base.id,
- pll->name, pll->refcount, pll->active);
-
+ if (memcmp(&crtc->new_config->dpll_hw_state,
+ &pll->new_config->hw_state,
+ sizeof(pll->new_config->hw_state)) == 0) {
+ DRM_DEBUG_KMS("CRTC:%d sharing existing %s (crtc mask 0x%08x, ative %d)\n",
+ crtc->base.base.id, pll->name,
+ pll->new_config->crtc_mask,
+ pll->active);
goto found;
}
}
@@ -3824,7 +3963,7 @@ struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc)
/* Ok no matching timings, maybe there's a free one? */
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
pll = &dev_priv->shared_dplls[i];
- if (pll->refcount == 0) {
+ if (pll->new_config->crtc_mask == 0) {
DRM_DEBUG_KMS("CRTC:%d allocated %s\n",
crtc->base.base.id, pll->name);
goto found;
@@ -3834,18 +3973,86 @@ struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc)
return NULL;
found:
- if (pll->refcount == 0)
- pll->hw_state = crtc->config.dpll_hw_state;
+ if (pll->new_config->crtc_mask == 0)
+ pll->new_config->hw_state = crtc->new_config->dpll_hw_state;
- crtc->config.shared_dpll = i;
+ crtc->new_config->shared_dpll = i;
DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name,
pipe_name(crtc->pipe));
- pll->refcount++;
+ pll->new_config->crtc_mask |= 1 << crtc->pipe;
return pll;
}
+/**
+ * intel_shared_dpll_start_config - start a new PLL staged config
+ * @dev_priv: DRM device
+ * @clear_pipes: mask of pipes that will have their PLLs freed
+ *
+ * Starts a new PLL staged config, copying the current config but
+ * releasing the references of pipes specified in clear_pipes.
+ */
+static int intel_shared_dpll_start_config(struct drm_i915_private *dev_priv,
+ unsigned clear_pipes)
+{
+ struct intel_shared_dpll *pll;
+ enum intel_dpll_id i;
+
+ for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+ pll = &dev_priv->shared_dplls[i];
+
+ pll->new_config = kmemdup(&pll->config, sizeof pll->config,
+ GFP_KERNEL);
+ if (!pll->new_config)
+ goto cleanup;
+
+ pll->new_config->crtc_mask &= ~clear_pipes;
+ }
+
+ return 0;
+
+cleanup:
+ while (--i >= 0) {
+ pll = &dev_priv->shared_dplls[i];
+ kfree(pll->new_config);
+ pll->new_config = NULL;
+ }
+
+ return -ENOMEM;
+}
+
+static void intel_shared_dpll_commit(struct drm_i915_private *dev_priv)
+{
+ struct intel_shared_dpll *pll;
+ enum intel_dpll_id i;
+
+ for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+ pll = &dev_priv->shared_dplls[i];
+
+ WARN_ON(pll->new_config == &pll->config);
+
+ pll->config = *pll->new_config;
+ kfree(pll->new_config);
+ pll->new_config = NULL;
+ }
+}
+
+static void intel_shared_dpll_abort_config(struct drm_i915_private *dev_priv)
+{
+ struct intel_shared_dpll *pll;
+ enum intel_dpll_id i;
+
+ for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+ pll = &dev_priv->shared_dplls[i];
+
+ WARN_ON(pll->new_config == &pll->config);
+
+ kfree(pll->new_config);
+ pll->new_config = NULL;
+ }
+}
+
static void cpt_verify_modeset(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3860,6 +4067,19 @@ static void cpt_verify_modeset(struct drm_device *dev, int pipe)
}
}
+static void skylake_pfit_enable(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe = crtc->pipe;
+
+ if (crtc->config.pch_pfit.enabled) {
+ I915_WRITE(PS_CTL(pipe), PS_ENABLE);
+ I915_WRITE(PS_WIN_POS(pipe), crtc->config.pch_pfit.pos);
+ I915_WRITE(PS_WIN_SZ(pipe), crtc->config.pch_pfit.size);
+ }
+}
+
static void ironlake_pfit_enable(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
@@ -3983,7 +4203,7 @@ static void intel_crtc_load_lut(struct drm_crtc *crtc)
return;
if (!HAS_PCH_SPLIT(dev_priv->dev)) {
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI))
+ if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI))
assert_dsi_pll_enabled(dev_priv);
else
assert_pll_enabled(dev_priv, pipe);
@@ -4038,10 +4258,6 @@ static void intel_crtc_enable_planes(struct drm_crtc *crtc)
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
- assert_vblank_disabled(crtc);
-
- drm_vblank_on(dev, pipe);
-
intel_enable_primary_hw_plane(crtc->primary, crtc);
intel_enable_planes(crtc);
intel_crtc_update_cursor(crtc, true);
@@ -4087,10 +4303,6 @@ static void intel_crtc_disable_planes(struct drm_crtc *crtc)
* consider this a flip to a NULL plane.
*/
intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
-
- drm_vblank_off(dev, pipe);
-
- assert_vblank_disabled(crtc);
}
static void ironlake_crtc_enable(struct drm_crtc *crtc)
@@ -4123,8 +4335,8 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
intel_crtc->active = true;
- intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
- intel_set_pch_fifo_underrun_reporting(dev, pipe, true);
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
+ intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->pre_enable)
@@ -4160,6 +4372,9 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
if (HAS_PCH_CPT(dev))
cpt_verify_modeset(dev, intel_crtc->pipe);
+ assert_vblank_disabled(crtc);
+ drm_crtc_vblank_on(crtc);
+
intel_crtc_enable_planes(crtc);
}
@@ -4235,19 +4450,23 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
intel_crtc->active = true;
- intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->pre_enable)
encoder->pre_enable(encoder);
if (intel_crtc->config.has_pch_encoder) {
- intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
+ intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
+ true);
dev_priv->display.fdi_link_train(crtc);
}
intel_ddi_enable_pipe_clock(intel_crtc);
- ironlake_pfit_enable(intel_crtc);
+ if (IS_SKYLAKE(dev))
+ skylake_pfit_enable(intel_crtc);
+ else
+ ironlake_pfit_enable(intel_crtc);
/*
* On ILK+ LUT must be loaded before the pipe is running but with
@@ -4272,12 +4491,30 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
intel_opregion_notify_encoder(encoder, true);
}
+ assert_vblank_disabled(crtc);
+ drm_crtc_vblank_on(crtc);
+
/* If we change the relative order between pipe/planes enabling, we need
* to change the workaround. */
haswell_mode_set_planes_workaround(intel_crtc);
intel_crtc_enable_planes(crtc);
}
+static void skylake_pfit_disable(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe = crtc->pipe;
+
+ /* To avoid upsetting the power well on haswell only disable the pfit if
+ * it's in use. The hw state code will make sure we get this right. */
+ if (crtc->config.pch_pfit.enabled) {
+ I915_WRITE(PS_CTL(pipe), 0);
+ I915_WRITE(PS_WIN_POS(pipe), 0);
+ I915_WRITE(PS_WIN_SZ(pipe), 0);
+ }
+}
+
static void ironlake_pfit_disable(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
@@ -4307,11 +4544,14 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
intel_crtc_disable_planes(crtc);
+ drm_crtc_vblank_off(crtc);
+ assert_vblank_disabled(crtc);
+
for_each_encoder_on_crtc(dev, crtc, encoder)
encoder->disable(encoder);
if (intel_crtc->config.has_pch_encoder)
- intel_set_pch_fifo_underrun_reporting(dev, pipe, false);
+ intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
intel_disable_pipe(intel_crtc);
@@ -4368,13 +4608,17 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
intel_crtc_disable_planes(crtc);
+ drm_crtc_vblank_off(crtc);
+ assert_vblank_disabled(crtc);
+
for_each_encoder_on_crtc(dev, crtc, encoder) {
intel_opregion_notify_encoder(encoder, false);
encoder->disable(encoder);
}
if (intel_crtc->config.has_pch_encoder)
- intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, false);
+ intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
+ false);
intel_disable_pipe(intel_crtc);
if (intel_crtc->config.dp_encoder_is_mst)
@@ -4382,7 +4626,10 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
- ironlake_pfit_disable(intel_crtc);
+ if (IS_SKYLAKE(dev))
+ skylake_pfit_disable(intel_crtc);
+ else
+ ironlake_pfit_disable(intel_crtc);
intel_ddi_disable_pipe_clock(intel_crtc);
@@ -4508,20 +4755,6 @@ static unsigned long get_crtc_power_domains(struct drm_crtc *crtc)
return mask;
}
-void intel_display_set_init_power(struct drm_i915_private *dev_priv,
- bool enable)
-{
- if (dev_priv->power_domains.init_power_on == enable)
- return;
-
- if (enable)
- intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
- else
- intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
-
- dev_priv->power_domains.init_power_on = enable;
-}
-
static void modeset_update_crtc_power_domains(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4544,6 +4777,9 @@ static void modeset_update_crtc_power_domains(struct drm_device *dev)
intel_display_power_get(dev_priv, domain);
}
+ if (dev_priv->display.modeset_global_resources)
+ dev_priv->display.modeset_global_resources(dev);
+
for_each_intel_crtc(dev, crtc) {
enum intel_display_power_domain domain;
@@ -4575,7 +4811,7 @@ static void vlv_update_cdclk(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
dev_priv->vlv_cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
- DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz",
+ DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n",
dev_priv->vlv_cdclk_freq);
/*
@@ -4614,10 +4850,9 @@ static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
mutex_unlock(&dev_priv->rps.hw_lock);
if (cdclk == 400000) {
- u32 divider, vco;
+ u32 divider;
- vco = valleyview_get_vco(dev_priv);
- divider = DIV_ROUND_CLOSEST(vco << 1, cdclk) - 1;
+ divider = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
mutex_lock(&dev_priv->dpio_lock);
/* adjust cdclk divider */
@@ -4696,8 +4931,7 @@ static void cherryview_set_cdclk(struct drm_device *dev, int cdclk)
static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
int max_pixclk)
{
- int vco = valleyview_get_vco(dev_priv);
- int freq_320 = (vco << 1) % 320000 != 0 ? 333333 : 320000;
+ int freq_320 = (dev_priv->hpll_freq << 1) % 320000 != 0 ? 333333 : 320000;
/* FIXME: Punit isn't quite ready yet */
if (IS_CHERRYVIEW(dev_priv->dev))
@@ -4766,18 +5000,30 @@ static void valleyview_modeset_global_resources(struct drm_device *dev)
int req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk);
if (req_cdclk != dev_priv->vlv_cdclk_freq) {
+ /*
+ * FIXME: We can end up here with all power domains off, yet
+ * with a CDCLK frequency other than the minimum. To account
+ * for this take the PIPE-A power domain, which covers the HW
+ * blocks needed for the following programming. This can be
+ * removed once it's guaranteed that we get here either with
+ * the minimum CDCLK set, or the required power domains
+ * enabled.
+ */
+ intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
+
if (IS_CHERRYVIEW(dev))
cherryview_set_cdclk(dev, req_cdclk);
else
valleyview_set_cdclk(dev, req_cdclk);
- }
- modeset_update_crtc_power_domains(dev);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A);
+ }
}
static void valleyview_crtc_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
@@ -4788,13 +5034,13 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
if (intel_crtc->active)
return;
- is_dsi = intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI);
+ is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI);
if (!is_dsi) {
if (IS_CHERRYVIEW(dev))
- chv_prepare_pll(intel_crtc);
+ chv_prepare_pll(intel_crtc, &intel_crtc->config);
else
- vlv_prepare_pll(intel_crtc);
+ vlv_prepare_pll(intel_crtc, &intel_crtc->config);
}
if (intel_crtc->config.has_dp_encoder)
@@ -4802,11 +5048,18 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
intel_set_pipe_timings(intel_crtc);
+ if (IS_CHERRYVIEW(dev) && pipe == PIPE_B) {
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
+ I915_WRITE(CHV_CANVAS(pipe), 0);
+ }
+
i9xx_set_pipeconf(intel_crtc);
intel_crtc->active = true;
- intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->pre_pll_enable)
@@ -4814,9 +5067,9 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
if (!is_dsi) {
if (IS_CHERRYVIEW(dev))
- chv_enable_pll(intel_crtc);
+ chv_enable_pll(intel_crtc, &intel_crtc->config);
else
- vlv_enable_pll(intel_crtc);
+ vlv_enable_pll(intel_crtc, &intel_crtc->config);
}
for_each_encoder_on_crtc(dev, crtc, encoder)
@@ -4833,10 +5086,13 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
for_each_encoder_on_crtc(dev, crtc, encoder)
encoder->enable(encoder);
+ assert_vblank_disabled(crtc);
+ drm_crtc_vblank_on(crtc);
+
intel_crtc_enable_planes(crtc);
/* Underruns don't raise interrupts, so check manually. */
- i9xx_check_fifo_underruns(dev);
+ i9xx_check_fifo_underruns(dev_priv);
}
static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
@@ -4851,6 +5107,7 @@ static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
static void i9xx_crtc_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
@@ -4872,7 +5129,7 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
intel_crtc->active = true;
if (!IS_GEN2(dev))
- intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->pre_enable)
@@ -4890,6 +5147,9 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
for_each_encoder_on_crtc(dev, crtc, encoder)
encoder->enable(encoder);
+ assert_vblank_disabled(crtc);
+ drm_crtc_vblank_on(crtc);
+
intel_crtc_enable_planes(crtc);
/*
@@ -4900,10 +5160,10 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
* but leave the pipe running.
*/
if (IS_GEN2(dev))
- intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
/* Underruns don't raise interrupts, so check manually. */
- i9xx_check_fifo_underruns(dev);
+ i9xx_check_fifo_underruns(dev_priv);
}
static void i9xx_pfit_disable(struct intel_crtc *crtc)
@@ -4939,7 +5199,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
* but leave the pipe running.
*/
if (IS_GEN2(dev))
- intel_set_cpu_fifo_underrun_reporting(dev, pipe, false);
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
/*
* Vblank time updates from the shadow to live plane control register
@@ -4953,9 +5213,6 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
intel_set_memory_cxsr(dev_priv, false);
intel_crtc_disable_planes(crtc);
- for_each_encoder_on_crtc(dev, crtc, encoder)
- encoder->disable(encoder);
-
/*
* On gen2 planes are double buffered but the pipe isn't, so we must
* wait for planes to fully turn off before disabling the pipe.
@@ -4964,6 +5221,12 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
*/
intel_wait_for_vblank(dev, pipe);
+ drm_crtc_vblank_off(crtc);
+ assert_vblank_disabled(crtc);
+
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ encoder->disable(encoder);
+
intel_disable_pipe(intel_crtc);
i9xx_pfit_disable(intel_crtc);
@@ -4972,7 +5235,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
if (encoder->post_disable)
encoder->post_disable(encoder);
- if (!intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI)) {
+ if (!intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI)) {
if (IS_CHERRYVIEW(dev))
chv_disable_pll(dev_priv, pipe);
else if (IS_VALLEYVIEW(dev))
@@ -4982,7 +5245,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
}
if (!IS_GEN2(dev))
- intel_set_cpu_fifo_underrun_reporting(dev, pipe, false);
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
intel_crtc->active = false;
intel_update_watermarks(crtc);
@@ -4996,36 +5259,6 @@ static void i9xx_crtc_off(struct drm_crtc *crtc)
{
}
-static void intel_crtc_update_sarea(struct drm_crtc *crtc,
- bool enabled)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_master_private *master_priv;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
-
- if (!dev->primary->master)
- return;
-
- master_priv = dev->primary->master->driver_priv;
- if (!master_priv->sarea_priv)
- return;
-
- switch (pipe) {
- case 0:
- master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0;
- master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0;
- break;
- case 1:
- master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0;
- master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0;
- break;
- default:
- DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe));
- break;
- }
-}
-
/* Master function to enable/disable CRTC and corresponding power wells */
void intel_crtc_control(struct drm_crtc *crtc, bool enable)
{
@@ -5069,8 +5302,6 @@ void intel_crtc_update_dpms(struct drm_crtc *crtc)
enable |= intel_encoder->connectors_active;
intel_crtc_control(crtc, enable);
-
- intel_crtc_update_sarea(crtc, enable);
}
static void intel_crtc_disable(struct drm_crtc *crtc)
@@ -5085,7 +5316,6 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
WARN_ON(!crtc->enabled);
dev_priv->display.crtc_disable(crtc);
- intel_crtc_update_sarea(crtc, false);
dev_priv->display.off(crtc);
if (crtc->primary->fb) {
@@ -5324,11 +5554,11 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
/* FIXME should check pixel clock limits on all platforms */
if (INTEL_INFO(dev)->gen < 4) {
- struct drm_i915_private *dev_priv = dev->dev_private;
int clock_limit =
dev_priv->display.get_display_clock_speed(dev);
@@ -5355,7 +5585,7 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
* - LVDS dual channel mode
* - Double wide pipe
*/
- if ((intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
+ if ((intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
intel_is_dual_link_lvds(dev)) || pipe_config->double_wide)
pipe_config->pipe_src_w &= ~1;
@@ -5377,13 +5607,6 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
if (HAS_IPS(dev))
hsw_compute_ips_config(crtc, pipe_config);
- /*
- * XXX: PCH/WRPLL clock sharing is done in ->mode_set, so make sure the
- * old clock survives for now.
- */
- if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev) || HAS_DDI(dev))
- pipe_config->shared_dpll = crtc->config.shared_dpll;
-
if (pipe_config->has_pch_encoder)
return ironlake_fdi_compute_config(crtc, pipe_config);
@@ -5393,7 +5616,6 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
static int valleyview_get_display_clock_speed(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- int vco = valleyview_get_vco(dev_priv);
u32 val;
int divider;
@@ -5401,6 +5623,9 @@ static int valleyview_get_display_clock_speed(struct drm_device *dev)
if (IS_CHERRYVIEW(dev))
return 400000;
+ if (dev_priv->hpll_freq == 0)
+ dev_priv->hpll_freq = valleyview_get_vco(dev_priv);
+
mutex_lock(&dev_priv->dpio_lock);
val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
mutex_unlock(&dev_priv->dpio_lock);
@@ -5411,7 +5636,7 @@ static int valleyview_get_display_clock_speed(struct drm_device *dev)
(divider << DISPLAY_FREQUENCY_STATUS_SHIFT),
"cdclk change in progress\n");
- return DIV_ROUND_CLOSEST(vco << 1, divider + 1);
+ return DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, divider + 1);
}
static int i945_get_display_clock_speed(struct drm_device *dev)
@@ -5543,15 +5768,15 @@ static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
}
-static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors)
+static int i9xx_get_refclk(struct intel_crtc *crtc, int num_connectors)
{
- struct drm_device *dev = crtc->dev;
+ struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int refclk;
if (IS_VALLEYVIEW(dev)) {
refclk = 100000;
- } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
+ } else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) &&
intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
refclk = dev_priv->vbt.lvds_ssc_freq;
DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
@@ -5581,24 +5806,24 @@ static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
u32 fp, fp2 = 0;
if (IS_PINEVIEW(dev)) {
- fp = pnv_dpll_compute_fp(&crtc->config.dpll);
+ fp = pnv_dpll_compute_fp(&crtc->new_config->dpll);
if (reduced_clock)
fp2 = pnv_dpll_compute_fp(reduced_clock);
} else {
- fp = i9xx_dpll_compute_fp(&crtc->config.dpll);
+ fp = i9xx_dpll_compute_fp(&crtc->new_config->dpll);
if (reduced_clock)
fp2 = i9xx_dpll_compute_fp(reduced_clock);
}
- crtc->config.dpll_hw_state.fp0 = fp;
+ crtc->new_config->dpll_hw_state.fp0 = fp;
crtc->lowfreq_avail = false;
- if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
+ if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) &&
reduced_clock && i915.powersave) {
- crtc->config.dpll_hw_state.fp1 = fp2;
+ crtc->new_config->dpll_hw_state.fp1 = fp2;
crtc->lowfreq_avail = true;
} else {
- crtc->config.dpll_hw_state.fp1 = fp;
+ crtc->new_config->dpll_hw_state.fp1 = fp;
}
}
@@ -5687,7 +5912,8 @@ void intel_dp_set_m_n(struct intel_crtc *crtc)
&crtc->config.dp_m2_n2);
}
-static void vlv_update_pll(struct intel_crtc *crtc)
+static void vlv_update_pll(struct intel_crtc *crtc,
+ struct intel_crtc_config *pipe_config)
{
u32 dpll, dpll_md;
@@ -5702,14 +5928,15 @@ static void vlv_update_pll(struct intel_crtc *crtc)
if (crtc->pipe == PIPE_B)
dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
dpll |= DPLL_VCO_ENABLE;
- crtc->config.dpll_hw_state.dpll = dpll;
+ pipe_config->dpll_hw_state.dpll = dpll;
- dpll_md = (crtc->config.pixel_multiplier - 1)
+ dpll_md = (pipe_config->pixel_multiplier - 1)
<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
- crtc->config.dpll_hw_state.dpll_md = dpll_md;
+ pipe_config->dpll_hw_state.dpll_md = dpll_md;
}
-static void vlv_prepare_pll(struct intel_crtc *crtc)
+static void vlv_prepare_pll(struct intel_crtc *crtc,
+ const struct intel_crtc_config *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -5720,11 +5947,11 @@ static void vlv_prepare_pll(struct intel_crtc *crtc)
mutex_lock(&dev_priv->dpio_lock);
- bestn = crtc->config.dpll.n;
- bestm1 = crtc->config.dpll.m1;
- bestm2 = crtc->config.dpll.m2;
- bestp1 = crtc->config.dpll.p1;
- bestp2 = crtc->config.dpll.p2;
+ bestn = pipe_config->dpll.n;
+ bestm1 = pipe_config->dpll.m1;
+ bestm2 = pipe_config->dpll.m2;
+ bestp1 = pipe_config->dpll.p1;
+ bestp2 = pipe_config->dpll.p2;
/* See eDP HDMI DPIO driver vbios notes doc */
@@ -5761,17 +5988,16 @@ static void vlv_prepare_pll(struct intel_crtc *crtc)
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
/* Set HBR and RBR LPF coefficients */
- if (crtc->config.port_clock == 162000 ||
- intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_ANALOG) ||
- intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI))
+ if (pipe_config->port_clock == 162000 ||
+ intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG) ||
+ intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
0x009f0003);
else
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
0x00d0000f);
- if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP) ||
- intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)) {
+ if (crtc->config.has_dp_encoder) {
/* Use SSC source */
if (pipe == PIPE_A)
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
@@ -5791,8 +6017,8 @@ static void vlv_prepare_pll(struct intel_crtc *crtc)
coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
- if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT) ||
- intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP))
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
+ intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
coreclk |= 0x01000000;
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
@@ -5800,19 +6026,21 @@ static void vlv_prepare_pll(struct intel_crtc *crtc)
mutex_unlock(&dev_priv->dpio_lock);
}
-static void chv_update_pll(struct intel_crtc *crtc)
+static void chv_update_pll(struct intel_crtc *crtc,
+ struct intel_crtc_config *pipe_config)
{
- crtc->config.dpll_hw_state.dpll = DPLL_SSC_REF_CLOCK_CHV |
+ pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLOCK_CHV |
DPLL_REFA_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS |
DPLL_VCO_ENABLE;
if (crtc->pipe != PIPE_A)
- crtc->config.dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
+ pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
- crtc->config.dpll_hw_state.dpll_md =
- (crtc->config.pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
+ pipe_config->dpll_hw_state.dpll_md =
+ (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
}
-static void chv_prepare_pll(struct intel_crtc *crtc)
+static void chv_prepare_pll(struct intel_crtc *crtc,
+ const struct intel_crtc_config *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -5823,18 +6051,18 @@ static void chv_prepare_pll(struct intel_crtc *crtc)
u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
int refclk;
- bestn = crtc->config.dpll.n;
- bestm2_frac = crtc->config.dpll.m2 & 0x3fffff;
- bestm1 = crtc->config.dpll.m1;
- bestm2 = crtc->config.dpll.m2 >> 22;
- bestp1 = crtc->config.dpll.p1;
- bestp2 = crtc->config.dpll.p2;
+ bestn = pipe_config->dpll.n;
+ bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
+ bestm1 = pipe_config->dpll.m1;
+ bestm2 = pipe_config->dpll.m2 >> 22;
+ bestp1 = pipe_config->dpll.p1;
+ bestp2 = pipe_config->dpll.p2;
/*
* Enable Refclk and SSC
*/
I915_WRITE(dpll_reg,
- crtc->config.dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
+ pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
mutex_lock(&dev_priv->dpio_lock);
@@ -5862,7 +6090,7 @@ static void chv_prepare_pll(struct intel_crtc *crtc)
(2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT));
/* Loop filter */
- refclk = i9xx_get_refclk(&crtc->base, 0);
+ refclk = i9xx_get_refclk(crtc, 0);
loopfilter = 5 << DPIO_CHV_PROP_COEFF_SHIFT |
2 << DPIO_CHV_GAIN_CTRL_SHIFT;
if (refclk == 100000)
@@ -5882,6 +6110,53 @@ static void chv_prepare_pll(struct intel_crtc *crtc)
mutex_unlock(&dev_priv->dpio_lock);
}
+/**
+ * vlv_force_pll_on - forcibly enable just the PLL
+ * @dev_priv: i915 private structure
+ * @pipe: pipe PLL to enable
+ * @dpll: PLL configuration
+ *
+ * Enable the PLL for @pipe using the supplied @dpll config. To be used
+ * in cases where we need the PLL enabled even when @pipe is not going to
+ * be enabled.
+ */
+void vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
+ const struct dpll *dpll)
+{
+ struct intel_crtc *crtc =
+ to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
+ struct intel_crtc_config pipe_config = {
+ .pixel_multiplier = 1,
+ .dpll = *dpll,
+ };
+
+ if (IS_CHERRYVIEW(dev)) {
+ chv_update_pll(crtc, &pipe_config);
+ chv_prepare_pll(crtc, &pipe_config);
+ chv_enable_pll(crtc, &pipe_config);
+ } else {
+ vlv_update_pll(crtc, &pipe_config);
+ vlv_prepare_pll(crtc, &pipe_config);
+ vlv_enable_pll(crtc, &pipe_config);
+ }
+}
+
+/**
+ * vlv_force_pll_off - forcibly disable just the PLL
+ * @dev_priv: i915 private structure
+ * @pipe: pipe PLL to disable
+ *
+ * Disable the PLL for @pipe. To be used in cases where we need
+ * the PLL enabled even when @pipe is not going to be enabled.
+ */
+void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe)
+{
+ if (IS_CHERRYVIEW(dev))
+ chv_disable_pll(to_i915(dev), pipe);
+ else
+ vlv_disable_pll(to_i915(dev), pipe);
+}
+
static void i9xx_update_pll(struct intel_crtc *crtc,
intel_clock_t *reduced_clock,
int num_connectors)
@@ -5890,29 +6165,29 @@ static void i9xx_update_pll(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpll;
bool is_sdvo;
- struct dpll *clock = &crtc->config.dpll;
+ struct dpll *clock = &crtc->new_config->dpll;
i9xx_update_pll_dividers(crtc, reduced_clock);
- is_sdvo = intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_SDVO) ||
- intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI);
+ is_sdvo = intel_pipe_will_have_type(crtc, INTEL_OUTPUT_SDVO) ||
+ intel_pipe_will_have_type(crtc, INTEL_OUTPUT_HDMI);
dpll = DPLL_VGA_MODE_DIS;
- if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS))
+ if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS))
dpll |= DPLLB_MODE_LVDS;
else
dpll |= DPLLB_MODE_DAC_SERIAL;
if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
- dpll |= (crtc->config.pixel_multiplier - 1)
+ dpll |= (crtc->new_config->pixel_multiplier - 1)
<< SDVO_MULTIPLIER_SHIFT_HIRES;
}
if (is_sdvo)
dpll |= DPLL_SDVO_HIGH_SPEED;
- if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT))
+ if (crtc->new_config->has_dp_encoder)
dpll |= DPLL_SDVO_HIGH_SPEED;
/* compute bitmask from p1 value */
@@ -5940,21 +6215,21 @@ static void i9xx_update_pll(struct intel_crtc *crtc,
if (INTEL_INFO(dev)->gen >= 4)
dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
- if (crtc->config.sdvo_tv_clock)
+ if (crtc->new_config->sdvo_tv_clock)
dpll |= PLL_REF_INPUT_TVCLKINBC;
- else if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
+ else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) &&
intel_panel_use_ssc(dev_priv) && num_connectors < 2)
dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
else
dpll |= PLL_REF_INPUT_DREFCLK;
dpll |= DPLL_VCO_ENABLE;
- crtc->config.dpll_hw_state.dpll = dpll;
+ crtc->new_config->dpll_hw_state.dpll = dpll;
if (INTEL_INFO(dev)->gen >= 4) {
- u32 dpll_md = (crtc->config.pixel_multiplier - 1)
+ u32 dpll_md = (crtc->new_config->pixel_multiplier - 1)
<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
- crtc->config.dpll_hw_state.dpll_md = dpll_md;
+ crtc->new_config->dpll_hw_state.dpll_md = dpll_md;
}
}
@@ -5965,13 +6240,13 @@ static void i8xx_update_pll(struct intel_crtc *crtc,
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpll;
- struct dpll *clock = &crtc->config.dpll;
+ struct dpll *clock = &crtc->new_config->dpll;
i9xx_update_pll_dividers(crtc, reduced_clock);
dpll = DPLL_VGA_MODE_DIS;
- if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS)) {
+ if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
} else {
if (clock->p1 == 2)
@@ -5982,17 +6257,17 @@ static void i8xx_update_pll(struct intel_crtc *crtc,
dpll |= PLL_P2_DIVIDE_BY_4;
}
- if (!IS_I830(dev) && intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO))
+ if (!IS_I830(dev) && intel_pipe_will_have_type(crtc, INTEL_OUTPUT_DVO))
dpll |= DPLL_DVO_2X_MODE;
- if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
+ if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) &&
intel_panel_use_ssc(dev_priv) && num_connectors < 2)
dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
else
dpll |= PLL_REF_INPUT_DREFCLK;
dpll |= DPLL_VCO_ENABLE;
- crtc->config.dpll_hw_state.dpll = dpll;
+ crtc->new_config->dpll_hw_state.dpll = dpll;
}
static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
@@ -6016,7 +6291,7 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
crtc_vtotal -= 1;
crtc_vblank_end -= 1;
- if (intel_pipe_has_type(&intel_crtc->base, INTEL_OUTPUT_SDVO))
+ if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
else
vsyncshift = adjusted_mode->crtc_hsync_start -
@@ -6174,7 +6449,7 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
if (INTEL_INFO(dev)->gen < 4 ||
- intel_pipe_has_type(&intel_crtc->base, INTEL_OUTPUT_SDVO))
+ intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
else
pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
@@ -6188,13 +6463,10 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
POSTING_READ(PIPECONF(intel_crtc->pipe));
}
-static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
- int x, int y,
- struct drm_framebuffer *fb)
+static int i9xx_crtc_compute_clock(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->dev;
+ struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int refclk, num_connectors = 0;
intel_clock_t clock, reduced_clock;
bool ok, has_reduced_clock = false;
@@ -6202,7 +6474,10 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
struct intel_encoder *encoder;
const intel_limit_t *limit;
- for_each_encoder_on_crtc(dev, crtc, encoder) {
+ for_each_intel_encoder(dev, encoder) {
+ if (encoder->new_crtc != crtc)
+ continue;
+
switch (encoder->type) {
case INTEL_OUTPUT_LVDS:
is_lvds = true;
@@ -6210,6 +6485,8 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
case INTEL_OUTPUT_DSI:
is_dsi = true;
break;
+ default:
+ break;
}
num_connectors++;
@@ -6218,7 +6495,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
if (is_dsi)
return 0;
- if (!intel_crtc->config.clock_set) {
+ if (!crtc->new_config->clock_set) {
refclk = i9xx_get_refclk(crtc, num_connectors);
/*
@@ -6229,7 +6506,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
*/
limit = intel_limit(crtc, refclk);
ok = dev_priv->display.find_dpll(limit, crtc,
- intel_crtc->config.port_clock,
+ crtc->new_config->port_clock,
refclk, NULL, &clock);
if (!ok) {
DRM_ERROR("Couldn't find PLL settings for mode!\n");
@@ -6250,23 +6527,23 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
&reduced_clock);
}
/* Compat-code for transition, will disappear. */
- intel_crtc->config.dpll.n = clock.n;
- intel_crtc->config.dpll.m1 = clock.m1;
- intel_crtc->config.dpll.m2 = clock.m2;
- intel_crtc->config.dpll.p1 = clock.p1;
- intel_crtc->config.dpll.p2 = clock.p2;
+ crtc->new_config->dpll.n = clock.n;
+ crtc->new_config->dpll.m1 = clock.m1;
+ crtc->new_config->dpll.m2 = clock.m2;
+ crtc->new_config->dpll.p1 = clock.p1;
+ crtc->new_config->dpll.p2 = clock.p2;
}
if (IS_GEN2(dev)) {
- i8xx_update_pll(intel_crtc,
+ i8xx_update_pll(crtc,
has_reduced_clock ? &reduced_clock : NULL,
num_connectors);
} else if (IS_CHERRYVIEW(dev)) {
- chv_update_pll(intel_crtc);
+ chv_update_pll(crtc, crtc->new_config);
} else if (IS_VALLEYVIEW(dev)) {
- vlv_update_pll(intel_crtc);
+ vlv_update_pll(crtc, crtc->new_config);
} else {
- i9xx_update_pll(intel_crtc,
+ i9xx_update_pll(crtc,
has_reduced_clock ? &reduced_clock : NULL,
num_connectors);
}
@@ -6432,8 +6709,8 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t tmp;
- if (!intel_display_power_enabled(dev_priv,
- POWER_DOMAIN_PIPE(crtc->pipe)))
+ if (!intel_display_power_is_enabled(dev_priv,
+ POWER_DOMAIN_PIPE(crtc->pipe)))
return false;
pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
@@ -6538,6 +6815,8 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
if (enc_to_dig_port(&encoder->base)->port == PORT_A)
has_cpu_edp = true;
break;
+ default:
+ break;
}
}
@@ -6842,6 +7121,8 @@ static void lpt_init_pch_refclk(struct drm_device *dev)
case INTEL_OUTPUT_ANALOG:
has_vga = true;
break;
+ default:
+ break;
}
}
@@ -6870,11 +7151,16 @@ static int ironlake_get_refclk(struct drm_crtc *crtc)
int num_connectors = 0;
bool is_lvds = false;
- for_each_encoder_on_crtc(dev, crtc, encoder) {
+ for_each_intel_encoder(dev, encoder) {
+ if (encoder->new_crtc != to_intel_crtc(crtc))
+ continue;
+
switch (encoder->type) {
case INTEL_OUTPUT_LVDS:
is_lvds = true;
break;
+ default:
+ break;
}
num_connectors++;
}
@@ -7019,7 +7305,7 @@ static void haswell_set_pipeconf(struct drm_crtc *crtc)
I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT);
POSTING_READ(GAMMA_MODE(intel_crtc->pipe));
- if (IS_BROADWELL(dev)) {
+ if (IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
val = 0;
switch (intel_crtc->config.pipe_bpp) {
@@ -7054,18 +7340,12 @@ static bool ironlake_compute_clocks(struct drm_crtc *crtc,
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_encoder *intel_encoder;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int refclk;
const intel_limit_t *limit;
bool ret, is_lvds = false;
- for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
- switch (intel_encoder->type) {
- case INTEL_OUTPUT_LVDS:
- is_lvds = true;
- break;
- }
- }
+ is_lvds = intel_pipe_will_have_type(intel_crtc, INTEL_OUTPUT_LVDS);
refclk = ironlake_get_refclk(crtc);
@@ -7074,9 +7354,9 @@ static bool ironlake_compute_clocks(struct drm_crtc *crtc,
* refclk, or FALSE. The returned values represent the clock equation:
* reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
*/
- limit = intel_limit(crtc, refclk);
- ret = dev_priv->display.find_dpll(limit, crtc,
- to_intel_crtc(crtc)->config.port_clock,
+ limit = intel_limit(intel_crtc, refclk);
+ ret = dev_priv->display.find_dpll(limit, intel_crtc,
+ intel_crtc->new_config->port_clock,
refclk, NULL, clock);
if (!ret)
return false;
@@ -7089,7 +7369,7 @@ static bool ironlake_compute_clocks(struct drm_crtc *crtc,
* downclock feature.
*/
*has_reduced_clock =
- dev_priv->display.find_dpll(limit, crtc,
+ dev_priv->display.find_dpll(limit, intel_crtc,
dev_priv->lvds_downclock,
refclk, clock,
reduced_clock);
@@ -7126,7 +7406,10 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
int factor, num_connectors = 0;
bool is_lvds = false, is_sdvo = false;
- for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
+ for_each_intel_encoder(dev, intel_encoder) {
+ if (intel_encoder->new_crtc != to_intel_crtc(crtc))
+ continue;
+
switch (intel_encoder->type) {
case INTEL_OUTPUT_LVDS:
is_lvds = true;
@@ -7135,6 +7418,8 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
case INTEL_OUTPUT_HDMI:
is_sdvo = true;
break;
+ default:
+ break;
}
num_connectors++;
@@ -7147,10 +7432,10 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
dev_priv->vbt.lvds_ssc_freq == 100000) ||
(HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev)))
factor = 25;
- } else if (intel_crtc->config.sdvo_tv_clock)
+ } else if (intel_crtc->new_config->sdvo_tv_clock)
factor = 20;
- if (ironlake_needs_fb_cb_tune(&intel_crtc->config.dpll, factor))
+ if (ironlake_needs_fb_cb_tune(&intel_crtc->new_config->dpll, factor))
*fp |= FP_CB_TUNE;
if (fp2 && (reduced_clock->m < factor * reduced_clock->n))
@@ -7163,20 +7448,20 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
else
dpll |= DPLLB_MODE_DAC_SERIAL;
- dpll |= (intel_crtc->config.pixel_multiplier - 1)
+ dpll |= (intel_crtc->new_config->pixel_multiplier - 1)
<< PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
if (is_sdvo)
dpll |= DPLL_SDVO_HIGH_SPEED;
- if (intel_crtc->config.has_dp_encoder)
+ if (intel_crtc->new_config->has_dp_encoder)
dpll |= DPLL_SDVO_HIGH_SPEED;
/* compute bitmask from p1 value */
- dpll |= (1 << (intel_crtc->config.dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+ dpll |= (1 << (intel_crtc->new_config->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
/* also FPA1 */
- dpll |= (1 << (intel_crtc->config.dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
+ dpll |= (1 << (intel_crtc->new_config->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
- switch (intel_crtc->config.dpll.p2) {
+ switch (intel_crtc->new_config->dpll.p2) {
case 5:
dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
break;
@@ -7199,78 +7484,64 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
return dpll | DPLL_VCO_ENABLE;
}
-static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
- int x, int y,
- struct drm_framebuffer *fb)
+static int ironlake_crtc_compute_clock(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->dev;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int num_connectors = 0;
+ struct drm_device *dev = crtc->base.dev;
intel_clock_t clock, reduced_clock;
u32 dpll = 0, fp = 0, fp2 = 0;
bool ok, has_reduced_clock = false;
bool is_lvds = false;
- struct intel_encoder *encoder;
struct intel_shared_dpll *pll;
- for_each_encoder_on_crtc(dev, crtc, encoder) {
- switch (encoder->type) {
- case INTEL_OUTPUT_LVDS:
- is_lvds = true;
- break;
- }
-
- num_connectors++;
- }
+ is_lvds = intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS);
WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)),
"Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev));
- ok = ironlake_compute_clocks(crtc, &clock,
+ ok = ironlake_compute_clocks(&crtc->base, &clock,
&has_reduced_clock, &reduced_clock);
- if (!ok && !intel_crtc->config.clock_set) {
+ if (!ok && !crtc->new_config->clock_set) {
DRM_ERROR("Couldn't find PLL settings for mode!\n");
return -EINVAL;
}
/* Compat-code for transition, will disappear. */
- if (!intel_crtc->config.clock_set) {
- intel_crtc->config.dpll.n = clock.n;
- intel_crtc->config.dpll.m1 = clock.m1;
- intel_crtc->config.dpll.m2 = clock.m2;
- intel_crtc->config.dpll.p1 = clock.p1;
- intel_crtc->config.dpll.p2 = clock.p2;
+ if (!crtc->new_config->clock_set) {
+ crtc->new_config->dpll.n = clock.n;
+ crtc->new_config->dpll.m1 = clock.m1;
+ crtc->new_config->dpll.m2 = clock.m2;
+ crtc->new_config->dpll.p1 = clock.p1;
+ crtc->new_config->dpll.p2 = clock.p2;
}
/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
- if (intel_crtc->config.has_pch_encoder) {
- fp = i9xx_dpll_compute_fp(&intel_crtc->config.dpll);
+ if (crtc->new_config->has_pch_encoder) {
+ fp = i9xx_dpll_compute_fp(&crtc->new_config->dpll);
if (has_reduced_clock)
fp2 = i9xx_dpll_compute_fp(&reduced_clock);
- dpll = ironlake_compute_dpll(intel_crtc,
+ dpll = ironlake_compute_dpll(crtc,
&fp, &reduced_clock,
has_reduced_clock ? &fp2 : NULL);
- intel_crtc->config.dpll_hw_state.dpll = dpll;
- intel_crtc->config.dpll_hw_state.fp0 = fp;
+ crtc->new_config->dpll_hw_state.dpll = dpll;
+ crtc->new_config->dpll_hw_state.fp0 = fp;
if (has_reduced_clock)
- intel_crtc->config.dpll_hw_state.fp1 = fp2;
+ crtc->new_config->dpll_hw_state.fp1 = fp2;
else
- intel_crtc->config.dpll_hw_state.fp1 = fp;
+ crtc->new_config->dpll_hw_state.fp1 = fp;
- pll = intel_get_shared_dpll(intel_crtc);
+ pll = intel_get_shared_dpll(crtc);
if (pll == NULL) {
DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
- pipe_name(intel_crtc->pipe));
+ pipe_name(crtc->pipe));
return -EINVAL;
}
- } else
- intel_put_shared_dpll(intel_crtc);
+ }
if (is_lvds && has_reduced_clock && i915.powersave)
- intel_crtc->lowfreq_avail = true;
+ crtc->lowfreq_avail = true;
else
- intel_crtc->lowfreq_avail = false;
+ crtc->lowfreq_avail = false;
return 0;
}
@@ -7351,6 +7622,22 @@ static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
&pipe_config->fdi_m_n, NULL);
}
+static void skylake_get_pfit_config(struct intel_crtc *crtc,
+ struct intel_crtc_config *pipe_config)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t tmp;
+
+ tmp = I915_READ(PS_CTL(crtc->pipe));
+
+ if (tmp & PS_ENABLE) {
+ pipe_config->pch_pfit.enabled = true;
+ pipe_config->pch_pfit.pos = I915_READ(PS_WIN_POS(crtc->pipe));
+ pipe_config->pch_pfit.size = I915_READ(PS_WIN_SZ(crtc->pipe));
+ }
+}
+
static void ironlake_get_pfit_config(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config)
{
@@ -7442,8 +7729,8 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t tmp;
- if (!intel_display_power_enabled(dev_priv,
- POWER_DOMAIN_PIPE(crtc->pipe)))
+ if (!intel_display_power_is_enabled(dev_priv,
+ POWER_DOMAIN_PIPE(crtc->pipe)))
return false;
pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
@@ -7636,7 +7923,6 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
{
uint32_t val;
- unsigned long irqflags;
val = I915_READ(LCPLL_CTL);
@@ -7656,10 +7942,10 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
* to call special forcewake code that doesn't touch runtime PM and
* doesn't enable the forcewake delayed work.
*/
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ spin_lock_irq(&dev_priv->uncore.lock);
if (dev_priv->uncore.forcewake_count++ == 0)
dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL);
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+ spin_unlock_irq(&dev_priv->uncore.lock);
if (val & LCPLL_POWER_DOWN_ALLOW) {
val &= ~LCPLL_POWER_DOWN_ALLOW;
@@ -7690,10 +7976,10 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
}
/* See the big comment above. */
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ spin_lock_irq(&dev_priv->uncore.lock);
if (--dev_priv->uncore.forcewake_count == 0)
dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+ spin_unlock_irq(&dev_priv->uncore.lock);
}
/*
@@ -7755,28 +8041,36 @@ void hsw_disable_pc8(struct drm_i915_private *dev_priv)
intel_prepare_ddi(dev);
}
-static void snb_modeset_global_resources(struct drm_device *dev)
+static int haswell_crtc_compute_clock(struct intel_crtc *crtc)
{
- modeset_update_crtc_power_domains(dev);
-}
+ if (!intel_ddi_pll_select(crtc))
+ return -EINVAL;
-static void haswell_modeset_global_resources(struct drm_device *dev)
-{
- modeset_update_crtc_power_domains(dev);
+ crtc->lowfreq_avail = false;
+
+ return 0;
}
-static int haswell_crtc_mode_set(struct drm_crtc *crtc,
- int x, int y,
- struct drm_framebuffer *fb)
+static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
+ enum port port,
+ struct intel_crtc_config *pipe_config)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
- if (!intel_ddi_pll_select(intel_crtc))
- return -EINVAL;
+ u32 temp;
- intel_crtc->lowfreq_avail = false;
+ temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
+ pipe_config->ddi_pll_sel = temp >> (port * 3 + 1);
- return 0;
+ switch (pipe_config->ddi_pll_sel) {
+ case SKL_DPLL1:
+ pipe_config->shared_dpll = DPLL_ID_SKL_DPLL1;
+ break;
+ case SKL_DPLL2:
+ pipe_config->shared_dpll = DPLL_ID_SKL_DPLL2;
+ break;
+ case SKL_DPLL3:
+ pipe_config->shared_dpll = DPLL_ID_SKL_DPLL3;
+ break;
+ }
}
static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
@@ -7808,7 +8102,10 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
- haswell_get_ddi_pll(dev_priv, port, pipe_config);
+ if (IS_SKYLAKE(dev))
+ skylake_get_ddi_pll(dev_priv, port, pipe_config);
+ else
+ haswell_get_ddi_pll(dev_priv, port, pipe_config);
if (pipe_config->shared_dpll >= 0) {
pll = &dev_priv->shared_dplls[pipe_config->shared_dpll];
@@ -7822,7 +8119,8 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
* DDI E. So just check whether this pipe is wired to DDI E and whether
* the PCH transcoder is on.
*/
- if ((port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
+ if (INTEL_INFO(dev)->gen < 9 &&
+ (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
pipe_config->has_pch_encoder = true;
tmp = I915_READ(FDI_RX_CTL(PIPE_A));
@@ -7841,7 +8139,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
enum intel_display_power_domain pfit_domain;
uint32_t tmp;
- if (!intel_display_power_enabled(dev_priv,
+ if (!intel_display_power_is_enabled(dev_priv,
POWER_DOMAIN_PIPE(crtc->pipe)))
return false;
@@ -7870,7 +8168,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
pipe_config->cpu_transcoder = TRANSCODER_EDP;
}
- if (!intel_display_power_enabled(dev_priv,
+ if (!intel_display_power_is_enabled(dev_priv,
POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
return false;
@@ -7883,8 +8181,12 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
intel_get_pipe_timings(crtc, pipe_config);
pfit_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
- if (intel_display_power_enabled(dev_priv, pfit_domain))
- ironlake_get_pfit_config(crtc, pipe_config);
+ if (intel_display_power_is_enabled(dev_priv, pfit_domain)) {
+ if (IS_SKYLAKE(dev))
+ skylake_get_pfit_config(crtc, pipe_config);
+ else
+ ironlake_get_pfit_config(crtc, pipe_config);
+ }
if (IS_HASWELL(dev))
pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
@@ -7900,314 +8202,6 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
return true;
}
-static struct {
- int clock;
- u32 config;
-} hdmi_audio_clock[] = {
- { DIV_ROUND_UP(25200 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_25175 },
- { 25200, AUD_CONFIG_PIXEL_CLOCK_HDMI_25200 }, /* default per bspec */
- { 27000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27000 },
- { 27000 * 1001 / 1000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27027 },
- { 54000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54000 },
- { 54000 * 1001 / 1000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54054 },
- { DIV_ROUND_UP(74250 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_74176 },
- { 74250, AUD_CONFIG_PIXEL_CLOCK_HDMI_74250 },
- { DIV_ROUND_UP(148500 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_148352 },
- { 148500, AUD_CONFIG_PIXEL_CLOCK_HDMI_148500 },
-};
-
-/* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */
-static u32 audio_config_hdmi_pixel_clock(struct drm_display_mode *mode)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(hdmi_audio_clock); i++) {
- if (mode->clock == hdmi_audio_clock[i].clock)
- break;
- }
-
- if (i == ARRAY_SIZE(hdmi_audio_clock)) {
- DRM_DEBUG_KMS("HDMI audio pixel clock setting for %d not found, falling back to defaults\n", mode->clock);
- i = 1;
- }
-
- DRM_DEBUG_KMS("Configuring HDMI audio for pixel clock %d (0x%08x)\n",
- hdmi_audio_clock[i].clock,
- hdmi_audio_clock[i].config);
-
- return hdmi_audio_clock[i].config;
-}
-
-static bool intel_eld_uptodate(struct drm_connector *connector,
- int reg_eldv, uint32_t bits_eldv,
- int reg_elda, uint32_t bits_elda,
- int reg_edid)
-{
- struct drm_i915_private *dev_priv = connector->dev->dev_private;
- uint8_t *eld = connector->eld;
- uint32_t i;
-
- i = I915_READ(reg_eldv);
- i &= bits_eldv;
-
- if (!eld[0])
- return !i;
-
- if (!i)
- return false;
-
- i = I915_READ(reg_elda);
- i &= ~bits_elda;
- I915_WRITE(reg_elda, i);
-
- for (i = 0; i < eld[2]; i++)
- if (I915_READ(reg_edid) != *((uint32_t *)eld + i))
- return false;
-
- return true;
-}
-
-static void g4x_write_eld(struct drm_connector *connector,
- struct drm_crtc *crtc,
- struct drm_display_mode *mode)
-{
- struct drm_i915_private *dev_priv = connector->dev->dev_private;
- uint8_t *eld = connector->eld;
- uint32_t eldv;
- uint32_t len;
- uint32_t i;
-
- i = I915_READ(G4X_AUD_VID_DID);
-
- if (i == INTEL_AUDIO_DEVBLC || i == INTEL_AUDIO_DEVCL)
- eldv = G4X_ELDV_DEVCL_DEVBLC;
- else
- eldv = G4X_ELDV_DEVCTG;
-
- if (intel_eld_uptodate(connector,
- G4X_AUD_CNTL_ST, eldv,
- G4X_AUD_CNTL_ST, G4X_ELD_ADDR,
- G4X_HDMIW_HDMIEDID))
- return;
-
- i = I915_READ(G4X_AUD_CNTL_ST);
- i &= ~(eldv | G4X_ELD_ADDR);
- len = (i >> 9) & 0x1f; /* ELD buffer size */
- I915_WRITE(G4X_AUD_CNTL_ST, i);
-
- if (!eld[0])
- return;
-
- len = min_t(uint8_t, eld[2], len);
- DRM_DEBUG_DRIVER("ELD size %d\n", len);
- for (i = 0; i < len; i++)
- I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i));
-
- i = I915_READ(G4X_AUD_CNTL_ST);
- i |= eldv;
- I915_WRITE(G4X_AUD_CNTL_ST, i);
-}
-
-static void haswell_write_eld(struct drm_connector *connector,
- struct drm_crtc *crtc,
- struct drm_display_mode *mode)
-{
- struct drm_i915_private *dev_priv = connector->dev->dev_private;
- uint8_t *eld = connector->eld;
- uint32_t eldv;
- uint32_t i;
- int len;
- int pipe = to_intel_crtc(crtc)->pipe;
- int tmp;
-
- int hdmiw_hdmiedid = HSW_AUD_EDID_DATA(pipe);
- int aud_cntl_st = HSW_AUD_DIP_ELD_CTRL(pipe);
- int aud_config = HSW_AUD_CFG(pipe);
- int aud_cntrl_st2 = HSW_AUD_PIN_ELD_CP_VLD;
-
- /* Audio output enable */
- DRM_DEBUG_DRIVER("HDMI audio: enable codec\n");
- tmp = I915_READ(aud_cntrl_st2);
- tmp |= (AUDIO_OUTPUT_ENABLE_A << (pipe * 4));
- I915_WRITE(aud_cntrl_st2, tmp);
- POSTING_READ(aud_cntrl_st2);
-
- assert_pipe_disabled(dev_priv, to_intel_crtc(crtc)->pipe);
-
- /* Set ELD valid state */
- tmp = I915_READ(aud_cntrl_st2);
- DRM_DEBUG_DRIVER("HDMI audio: pin eld vld status=0x%08x\n", tmp);
- tmp |= (AUDIO_ELD_VALID_A << (pipe * 4));
- I915_WRITE(aud_cntrl_st2, tmp);
- tmp = I915_READ(aud_cntrl_st2);
- DRM_DEBUG_DRIVER("HDMI audio: eld vld status=0x%08x\n", tmp);
-
- /* Enable HDMI mode */
- tmp = I915_READ(aud_config);
- DRM_DEBUG_DRIVER("HDMI audio: audio conf: 0x%08x\n", tmp);
- /* clear N_programing_enable and N_value_index */
- tmp &= ~(AUD_CONFIG_N_VALUE_INDEX | AUD_CONFIG_N_PROG_ENABLE);
- I915_WRITE(aud_config, tmp);
-
- DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
-
- eldv = AUDIO_ELD_VALID_A << (pipe * 4);
-
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
- DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
- eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */
- I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
- } else {
- I915_WRITE(aud_config, audio_config_hdmi_pixel_clock(mode));
- }
-
- if (intel_eld_uptodate(connector,
- aud_cntrl_st2, eldv,
- aud_cntl_st, IBX_ELD_ADDRESS,
- hdmiw_hdmiedid))
- return;
-
- i = I915_READ(aud_cntrl_st2);
- i &= ~eldv;
- I915_WRITE(aud_cntrl_st2, i);
-
- if (!eld[0])
- return;
-
- i = I915_READ(aud_cntl_st);
- i &= ~IBX_ELD_ADDRESS;
- I915_WRITE(aud_cntl_st, i);
- i = (i >> 29) & DIP_PORT_SEL_MASK; /* DIP_Port_Select, 0x1 = PortB */
- DRM_DEBUG_DRIVER("port num:%d\n", i);
-
- len = min_t(uint8_t, eld[2], 21); /* 84 bytes of hw ELD buffer */
- DRM_DEBUG_DRIVER("ELD size %d\n", len);
- for (i = 0; i < len; i++)
- I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
-
- i = I915_READ(aud_cntrl_st2);
- i |= eldv;
- I915_WRITE(aud_cntrl_st2, i);
-
-}
-
-static void ironlake_write_eld(struct drm_connector *connector,
- struct drm_crtc *crtc,
- struct drm_display_mode *mode)
-{
- struct drm_i915_private *dev_priv = connector->dev->dev_private;
- uint8_t *eld = connector->eld;
- uint32_t eldv;
- uint32_t i;
- int len;
- int hdmiw_hdmiedid;
- int aud_config;
- int aud_cntl_st;
- int aud_cntrl_st2;
- int pipe = to_intel_crtc(crtc)->pipe;
-
- if (HAS_PCH_IBX(connector->dev)) {
- hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID(pipe);
- aud_config = IBX_AUD_CFG(pipe);
- aud_cntl_st = IBX_AUD_CNTL_ST(pipe);
- aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
- } else if (IS_VALLEYVIEW(connector->dev)) {
- hdmiw_hdmiedid = VLV_HDMIW_HDMIEDID(pipe);
- aud_config = VLV_AUD_CFG(pipe);
- aud_cntl_st = VLV_AUD_CNTL_ST(pipe);
- aud_cntrl_st2 = VLV_AUD_CNTL_ST2;
- } else {
- hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID(pipe);
- aud_config = CPT_AUD_CFG(pipe);
- aud_cntl_st = CPT_AUD_CNTL_ST(pipe);
- aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
- }
-
- DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
-
- if (IS_VALLEYVIEW(connector->dev)) {
- struct intel_encoder *intel_encoder;
- struct intel_digital_port *intel_dig_port;
-
- intel_encoder = intel_attached_encoder(connector);
- intel_dig_port = enc_to_dig_port(&intel_encoder->base);
- i = intel_dig_port->port;
- } else {
- i = I915_READ(aud_cntl_st);
- i = (i >> 29) & DIP_PORT_SEL_MASK;
- /* DIP_Port_Select, 0x1 = PortB */
- }
-
- if (!i) {
- DRM_DEBUG_DRIVER("Audio directed to unknown port\n");
- /* operate blindly on all ports */
- eldv = IBX_ELD_VALIDB;
- eldv |= IBX_ELD_VALIDB << 4;
- eldv |= IBX_ELD_VALIDB << 8;
- } else {
- DRM_DEBUG_DRIVER("ELD on port %c\n", port_name(i));
- eldv = IBX_ELD_VALIDB << ((i - 1) * 4);
- }
-
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
- DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
- eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */
- I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
- } else {
- I915_WRITE(aud_config, audio_config_hdmi_pixel_clock(mode));
- }
-
- if (intel_eld_uptodate(connector,
- aud_cntrl_st2, eldv,
- aud_cntl_st, IBX_ELD_ADDRESS,
- hdmiw_hdmiedid))
- return;
-
- i = I915_READ(aud_cntrl_st2);
- i &= ~eldv;
- I915_WRITE(aud_cntrl_st2, i);
-
- if (!eld[0])
- return;
-
- i = I915_READ(aud_cntl_st);
- i &= ~IBX_ELD_ADDRESS;
- I915_WRITE(aud_cntl_st, i);
-
- len = min_t(uint8_t, eld[2], 21); /* 84 bytes of hw ELD buffer */
- DRM_DEBUG_DRIVER("ELD size %d\n", len);
- for (i = 0; i < len; i++)
- I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
-
- i = I915_READ(aud_cntrl_st2);
- i |= eldv;
- I915_WRITE(aud_cntrl_st2, i);
-}
-
-void intel_write_eld(struct drm_encoder *encoder,
- struct drm_display_mode *mode)
-{
- struct drm_crtc *crtc = encoder->crtc;
- struct drm_connector *connector;
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- connector = drm_select_eld(encoder, mode);
- if (!connector)
- return;
-
- DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
- connector->base.id,
- connector->name,
- connector->encoder->base.id,
- connector->encoder->name);
-
- connector->eld[6] = drm_av_sync_delay(connector, mode) / 2;
-
- if (dev_priv->display.write_eld)
- dev_priv->display.write_eld(connector, crtc, mode);
-}
-
static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
{
struct drm_device *dev = crtc->dev;
@@ -8253,8 +8247,10 @@ static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
intel_crtc->cursor_cntl = 0;
}
- if (intel_crtc->cursor_base != base)
+ if (intel_crtc->cursor_base != base) {
I915_WRITE(_CURABASE, base);
+ intel_crtc->cursor_base = base;
+ }
if (intel_crtc->cursor_size != size) {
I915_WRITE(CURSIZE, size);
@@ -8294,9 +8290,13 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
return;
}
cntl |= pipe << 28; /* Connect to correct pipe */
+
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ cntl |= CURSOR_PIPE_CSC_ENABLE;
}
- if (IS_HASWELL(dev) || IS_BROADWELL(dev))
- cntl |= CURSOR_PIPE_CSC_ENABLE;
+
+ if (to_intel_plane(crtc->cursor)->rotation == BIT(DRM_ROTATE_180))
+ cntl |= CURSOR_ROTATE_180;
if (intel_crtc->cursor_cntl != cntl) {
I915_WRITE(CURCNTR(pipe), cntl);
@@ -8307,6 +8307,8 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
/* and commit changes on next vblank */
I915_WRITE(CURBASE(pipe), base);
POSTING_READ(CURBASE(pipe));
+
+ intel_crtc->cursor_base = base;
}
/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
@@ -8353,11 +8355,17 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
I915_WRITE(CURPOS(pipe), pos);
+ /* ILK+ do this automagically */
+ if (HAS_GMCH_DISPLAY(dev) &&
+ to_intel_plane(crtc->cursor)->rotation == BIT(DRM_ROTATE_180)) {
+ base += (intel_crtc->cursor_height *
+ intel_crtc->cursor_width - 1) * 4;
+ }
+
if (IS_845G(dev) || IS_I865G(dev))
i845_update_cursor(crtc, base);
else
i9xx_update_cursor(crtc, base);
- intel_crtc->cursor_base = base;
}
static bool cursor_size_ok(struct drm_device *dev,
@@ -8397,22 +8405,15 @@ static bool cursor_size_ok(struct drm_device *dev,
return true;
}
-/*
- * intel_crtc_cursor_set_obj - Set cursor to specified GEM object
- *
- * Note that the object's reference will be consumed if the update fails. If
- * the update succeeds, the reference of the old object (if any) will be
- * consumed.
- */
static int intel_crtc_cursor_set_obj(struct drm_crtc *crtc,
struct drm_i915_gem_object *obj,
uint32_t width, uint32_t height)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum pipe pipe = intel_crtc->pipe;
- unsigned old_width, stride;
+ unsigned old_width;
uint32_t addr;
int ret;
@@ -8424,30 +8425,11 @@ static int intel_crtc_cursor_set_obj(struct drm_crtc *crtc,
goto finish;
}
- /* Check for which cursor types we support */
- if (!cursor_size_ok(dev, width, height)) {
- DRM_DEBUG("Cursor dimension not supported\n");
- return -EINVAL;
- }
-
- stride = roundup_pow_of_two(width) * 4;
- if (obj->base.size < stride * height) {
- DRM_DEBUG_KMS("buffer is too small\n");
- ret = -ENOMEM;
- goto fail;
- }
-
/* we only need to pin inside GTT if cursor is non-phy */
mutex_lock(&dev->struct_mutex);
if (!INTEL_INFO(dev)->cursor_needs_physical) {
unsigned alignment;
- if (obj->tiling_mode) {
- DRM_DEBUG_KMS("cursor cannot be tiled\n");
- ret = -EINVAL;
- goto fail_locked;
- }
-
/*
* Global gtt pte registers are special registers which actually
* forward writes to a chunk of system memory. Which means that
@@ -8514,17 +8496,15 @@ static int intel_crtc_cursor_set_obj(struct drm_crtc *crtc,
if (old_width != width)
intel_update_watermarks(crtc);
intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
- }
- intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_CURSOR(pipe));
+ intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_CURSOR(pipe));
+ }
return 0;
fail_unpin:
i915_gem_object_unpin_from_display_plane(obj);
fail_locked:
mutex_unlock(&dev->struct_mutex);
-fail:
- drm_gem_object_unreference_unlocked(&obj->base);
return ret;
}
@@ -8559,7 +8539,7 @@ __intel_framebuffer_create(struct drm_device *dev,
intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
if (!intel_fb) {
- drm_gem_object_unreference_unlocked(&obj->base);
+ drm_gem_object_unreference(&obj->base);
return ERR_PTR(-ENOMEM);
}
@@ -8569,7 +8549,7 @@ __intel_framebuffer_create(struct drm_device *dev,
return &intel_fb->base;
err:
- drm_gem_object_unreference_unlocked(&obj->base);
+ drm_gem_object_unreference(&obj->base);
kfree(intel_fb);
return ERR_PTR(ret);
@@ -8702,6 +8682,9 @@ retry:
ret = drm_modeset_lock(&crtc->mutex, ctx);
if (ret)
goto fail_unlock;
+ ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
+ if (ret)
+ goto fail_unlock;
old->dpms_mode = connector->dpms;
old->load_detect_temp = false;
@@ -8739,6 +8722,9 @@ retry:
ret = drm_modeset_lock(&crtc->mutex, ctx);
if (ret)
goto fail_unlock;
+ ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
+ if (ret)
+ goto fail_unlock;
intel_encoder->new_crtc = to_intel_crtc(crtc);
to_intel_connector(connector)->new_encoder = intel_encoder;
@@ -9021,35 +9007,6 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
return mode;
}
-static void intel_increase_pllclock(struct drm_device *dev,
- enum pipe pipe)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int dpll_reg = DPLL(pipe);
- int dpll;
-
- if (!HAS_GMCH_DISPLAY(dev))
- return;
-
- if (!dev_priv->lvds_downclock_avail)
- return;
-
- dpll = I915_READ(dpll_reg);
- if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
- DRM_DEBUG_DRIVER("upclocking LVDS\n");
-
- assert_panel_unlocked(dev_priv, pipe);
-
- dpll &= ~DISPLAY_RATE_SELECT_FPA1;
- I915_WRITE(dpll_reg, dpll);
- intel_wait_for_vblank(dev, pipe);
-
- dpll = I915_READ(dpll_reg);
- if (dpll & DISPLAY_RATE_SELECT_FPA1)
- DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
- }
-}
-
static void intel_decrease_pllclock(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -9125,199 +9082,16 @@ out:
intel_runtime_pm_put(dev_priv);
}
-
-/**
- * intel_mark_fb_busy - mark given planes as busy
- * @dev: DRM device
- * @frontbuffer_bits: bits for the affected planes
- * @ring: optional ring for asynchronous commands
- *
- * This function gets called every time the screen contents change. It can be
- * used to keep e.g. the update rate at the nominal refresh rate with DRRS.
- */
-static void intel_mark_fb_busy(struct drm_device *dev,
- unsigned frontbuffer_bits,
- struct intel_engine_cs *ring)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- enum pipe pipe;
-
- if (!i915.powersave)
- return;
-
- for_each_pipe(dev_priv, pipe) {
- if (!(frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)))
- continue;
-
- intel_increase_pllclock(dev, pipe);
- if (ring && intel_fbc_enabled(dev))
- ring->fbc_dirty = true;
- }
-}
-
-/**
- * intel_fb_obj_invalidate - invalidate frontbuffer object
- * @obj: GEM object to invalidate
- * @ring: set for asynchronous rendering
- *
- * This function gets called every time rendering on the given object starts and
- * frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must
- * be invalidated. If @ring is non-NULL any subsequent invalidation will be delayed
- * until the rendering completes or a flip on this frontbuffer plane is
- * scheduled.
- */
-void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
- struct intel_engine_cs *ring)
-{
- struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
-
- if (!obj->frontbuffer_bits)
- return;
-
- if (ring) {
- mutex_lock(&dev_priv->fb_tracking.lock);
- dev_priv->fb_tracking.busy_bits
- |= obj->frontbuffer_bits;
- dev_priv->fb_tracking.flip_bits
- &= ~obj->frontbuffer_bits;
- mutex_unlock(&dev_priv->fb_tracking.lock);
- }
-
- intel_mark_fb_busy(dev, obj->frontbuffer_bits, ring);
-
- intel_edp_psr_invalidate(dev, obj->frontbuffer_bits);
-}
-
-/**
- * intel_frontbuffer_flush - flush frontbuffer
- * @dev: DRM device
- * @frontbuffer_bits: frontbuffer plane tracking bits
- *
- * This function gets called every time rendering on the given planes has
- * completed and frontbuffer caching can be started again. Flushes will get
- * delayed if they're blocked by some oustanding asynchronous rendering.
- *
- * Can be called without any locks held.
- */
-void intel_frontbuffer_flush(struct drm_device *dev,
- unsigned frontbuffer_bits)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- /* Delay flushing when rings are still busy.*/
- mutex_lock(&dev_priv->fb_tracking.lock);
- frontbuffer_bits &= ~dev_priv->fb_tracking.busy_bits;
- mutex_unlock(&dev_priv->fb_tracking.lock);
-
- intel_mark_fb_busy(dev, frontbuffer_bits, NULL);
-
- intel_edp_psr_flush(dev, frontbuffer_bits);
-
- /*
- * FIXME: Unconditional fbc flushing here is a rather gross hack and
- * needs to be reworked into a proper frontbuffer tracking scheme like
- * psr employs.
- */
- if (IS_BROADWELL(dev))
- gen8_fbc_sw_flush(dev, FBC_REND_CACHE_CLEAN);
-}
-
-/**
- * intel_fb_obj_flush - flush frontbuffer object
- * @obj: GEM object to flush
- * @retire: set when retiring asynchronous rendering
- *
- * This function gets called every time rendering on the given object has
- * completed and frontbuffer caching can be started again. If @retire is true
- * then any delayed flushes will be unblocked.
- */
-void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
- bool retire)
-{
- struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned frontbuffer_bits;
-
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
-
- if (!obj->frontbuffer_bits)
- return;
-
- frontbuffer_bits = obj->frontbuffer_bits;
-
- if (retire) {
- mutex_lock(&dev_priv->fb_tracking.lock);
- /* Filter out new bits since rendering started. */
- frontbuffer_bits &= dev_priv->fb_tracking.busy_bits;
-
- dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
- mutex_unlock(&dev_priv->fb_tracking.lock);
- }
-
- intel_frontbuffer_flush(dev, frontbuffer_bits);
-}
-
-/**
- * intel_frontbuffer_flip_prepare - prepare asnychronous frontbuffer flip
- * @dev: DRM device
- * @frontbuffer_bits: frontbuffer plane tracking bits
- *
- * This function gets called after scheduling a flip on @obj. The actual
- * frontbuffer flushing will be delayed until completion is signalled with
- * intel_frontbuffer_flip_complete. If an invalidate happens in between this
- * flush will be cancelled.
- *
- * Can be called without any locks held.
- */
-void intel_frontbuffer_flip_prepare(struct drm_device *dev,
- unsigned frontbuffer_bits)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- mutex_lock(&dev_priv->fb_tracking.lock);
- dev_priv->fb_tracking.flip_bits
- |= frontbuffer_bits;
- mutex_unlock(&dev_priv->fb_tracking.lock);
-}
-
-/**
- * intel_frontbuffer_flip_complete - complete asynchronous frontbuffer flush
- * @dev: DRM device
- * @frontbuffer_bits: frontbuffer plane tracking bits
- *
- * This function gets called after the flip has been latched and will complete
- * on the next vblank. It will execute the fush if it hasn't been cancalled yet.
- *
- * Can be called without any locks held.
- */
-void intel_frontbuffer_flip_complete(struct drm_device *dev,
- unsigned frontbuffer_bits)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- mutex_lock(&dev_priv->fb_tracking.lock);
- /* Mask any cancelled flips. */
- frontbuffer_bits &= dev_priv->fb_tracking.flip_bits;
- dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits;
- mutex_unlock(&dev_priv->fb_tracking.lock);
-
- intel_frontbuffer_flush(dev, frontbuffer_bits);
-}
-
static void intel_crtc_destroy(struct drm_crtc *crtc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct intel_unpin_work *work;
- unsigned long flags;
- spin_lock_irqsave(&dev->event_lock, flags);
+ spin_lock_irq(&dev->event_lock);
work = intel_crtc->unpin_work;
intel_crtc->unpin_work = NULL;
- spin_unlock_irqrestore(&dev->event_lock, flags);
+ spin_unlock_irq(&dev->event_lock);
if (work) {
cancel_work_sync(&work->work);
@@ -9363,6 +9137,10 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
if (intel_crtc == NULL)
return;
+ /*
+ * This is called both by irq handlers and the reset code (to complete
+ * lost pageflips) so needs the full irqsave spinlocks.
+ */
spin_lock_irqsave(&dev->event_lock, flags);
work = intel_crtc->unpin_work;
@@ -9448,7 +9226,12 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane)
to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
unsigned long flags;
- /* NB: An MMIO update of the plane base pointer will also
+
+ /*
+ * This is called both by irq handlers and the reset code (to complete
+ * lost pageflips) so needs the full irqsave spinlocks.
+ *
+ * NB: An MMIO update of the plane base pointer will also
* generate a page-flip completion irq, i.e. every modeset
* is also accompanied by a spurious intel_prepare_page_flip().
*/
@@ -9738,115 +9521,128 @@ static void intel_do_mmio_flip(struct intel_crtc *intel_crtc)
struct intel_framebuffer *intel_fb =
to_intel_framebuffer(intel_crtc->base.primary->fb);
struct drm_i915_gem_object *obj = intel_fb->obj;
+ bool atomic_update;
+ u32 start_vbl_count;
u32 dspcntr;
u32 reg;
intel_mark_page_flip_active(intel_crtc);
+ atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count);
+
reg = DSPCNTR(intel_crtc->plane);
dspcntr = I915_READ(reg);
- if (INTEL_INFO(dev)->gen >= 4) {
- if (obj->tiling_mode != I915_TILING_NONE)
- dspcntr |= DISPPLANE_TILED;
- else
- dspcntr &= ~DISPPLANE_TILED;
- }
+ if (obj->tiling_mode != I915_TILING_NONE)
+ dspcntr |= DISPPLANE_TILED;
+ else
+ dspcntr &= ~DISPPLANE_TILED;
+
I915_WRITE(reg, dspcntr);
I915_WRITE(DSPSURF(intel_crtc->plane),
intel_crtc->unpin_work->gtt_offset);
POSTING_READ(DSPSURF(intel_crtc->plane));
+
+ if (atomic_update)
+ intel_pipe_update_end(intel_crtc, start_vbl_count);
}
-static int intel_postpone_flip(struct drm_i915_gem_object *obj)
+static void intel_mmio_flip_work_func(struct work_struct *work)
{
+ struct intel_crtc *intel_crtc =
+ container_of(work, struct intel_crtc, mmio_flip.work);
struct intel_engine_cs *ring;
- int ret;
+ uint32_t seqno;
- lockdep_assert_held(&obj->base.dev->struct_mutex);
+ seqno = intel_crtc->mmio_flip.seqno;
+ ring = intel_crtc->mmio_flip.ring;
- if (!obj->last_write_seqno)
- return 0;
+ if (seqno)
+ WARN_ON(__i915_wait_seqno(ring, seqno,
+ intel_crtc->reset_counter,
+ false, NULL, NULL) != 0);
- ring = obj->ring;
-
- if (i915_seqno_passed(ring->get_seqno(ring, true),
- obj->last_write_seqno))
- return 0;
-
- ret = i915_gem_check_olr(ring, obj->last_write_seqno);
- if (ret)
- return ret;
-
- if (WARN_ON(!ring->irq_get(ring)))
- return 0;
-
- return 1;
+ intel_do_mmio_flip(intel_crtc);
}
-void intel_notify_mmio_flip(struct intel_engine_cs *ring)
+static int intel_queue_mmio_flip(struct drm_device *dev,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj,
+ struct intel_engine_cs *ring,
+ uint32_t flags)
{
- struct drm_i915_private *dev_priv = to_i915(ring->dev);
- struct intel_crtc *intel_crtc;
- unsigned long irq_flags;
- u32 seqno;
-
- seqno = ring->get_seqno(ring, false);
-
- spin_lock_irqsave(&dev_priv->mmio_flip_lock, irq_flags);
- for_each_intel_crtc(ring->dev, intel_crtc) {
- struct intel_mmio_flip *mmio_flip;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- mmio_flip = &intel_crtc->mmio_flip;
- if (mmio_flip->seqno == 0)
- continue;
+ intel_crtc->mmio_flip.seqno = obj->last_write_seqno;
+ intel_crtc->mmio_flip.ring = obj->ring;
- if (ring->id != mmio_flip->ring_id)
- continue;
+ schedule_work(&intel_crtc->mmio_flip.work);
- if (i915_seqno_passed(seqno, mmio_flip->seqno)) {
- intel_do_mmio_flip(intel_crtc);
- mmio_flip->seqno = 0;
- ring->irq_put(ring);
- }
- }
- spin_unlock_irqrestore(&dev_priv->mmio_flip_lock, irq_flags);
+ return 0;
}
-static int intel_queue_mmio_flip(struct drm_device *dev,
+static int intel_gen9_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj,
struct intel_engine_cs *ring,
uint32_t flags)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- unsigned long irq_flags;
+ uint32_t plane = 0, stride;
int ret;
- if (WARN_ON(intel_crtc->mmio_flip.seqno))
- return -EBUSY;
+ switch(intel_crtc->pipe) {
+ case PIPE_A:
+ plane = MI_DISPLAY_FLIP_SKL_PLANE_1_A;
+ break;
+ case PIPE_B:
+ plane = MI_DISPLAY_FLIP_SKL_PLANE_1_B;
+ break;
+ case PIPE_C:
+ plane = MI_DISPLAY_FLIP_SKL_PLANE_1_C;
+ break;
+ default:
+ WARN_ONCE(1, "unknown plane in flip command\n");
+ return -ENODEV;
+ }
- ret = intel_postpone_flip(obj);
- if (ret < 0)
- return ret;
- if (ret == 0) {
- intel_do_mmio_flip(intel_crtc);
- return 0;
+ switch (obj->tiling_mode) {
+ case I915_TILING_NONE:
+ stride = fb->pitches[0] >> 6;
+ break;
+ case I915_TILING_X:
+ stride = fb->pitches[0] >> 9;
+ break;
+ default:
+ WARN_ONCE(1, "unknown tiling in flip command\n");
+ return -ENODEV;
}
- spin_lock_irqsave(&dev_priv->mmio_flip_lock, irq_flags);
- intel_crtc->mmio_flip.seqno = obj->last_write_seqno;
- intel_crtc->mmio_flip.ring_id = obj->ring->id;
- spin_unlock_irqrestore(&dev_priv->mmio_flip_lock, irq_flags);
+ ret = intel_ring_begin(ring, 10);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+ intel_ring_emit(ring, DERRMR);
+ intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
+ DERRMR_PIPEB_PRI_FLIP_DONE |
+ DERRMR_PIPEC_PRI_FLIP_DONE));
+ intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8(1) |
+ MI_SRM_LRM_GLOBAL_GTT);
+ intel_ring_emit(ring, DERRMR);
+ intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
+ intel_ring_emit(ring, 0);
+
+ intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane);
+ intel_ring_emit(ring, stride << 6 | obj->tiling_mode);
+ intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
+
+ intel_mark_page_flip_active(intel_crtc);
+ __intel_ring_advance(ring);
- /*
- * Double check to catch cases where irq fired before
- * mmio flip data was ready
- */
- intel_notify_mmio_flip(obj->ring);
return 0;
}
@@ -9905,18 +9701,19 @@ void intel_check_page_flip(struct drm_device *dev, int pipe)
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- unsigned long flags;
+
+ WARN_ON(!in_irq());
if (crtc == NULL)
return;
- spin_lock_irqsave(&dev->event_lock, flags);
+ spin_lock(&dev->event_lock);
if (intel_crtc->unpin_work && __intel_pageflip_stall_check(dev, crtc)) {
WARN_ONCE(1, "Kicking stuck page flip: queued at %d, now %d\n",
intel_crtc->unpin_work->flip_queued_vblank, drm_vblank_count(dev, pipe));
page_flip_completed(intel_crtc);
}
- spin_unlock_irqrestore(&dev->event_lock, flags);
+ spin_unlock(&dev->event_lock);
}
static int intel_crtc_page_flip(struct drm_crtc *crtc,
@@ -9932,7 +9729,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
enum pipe pipe = intel_crtc->pipe;
struct intel_unpin_work *work;
struct intel_engine_cs *ring;
- unsigned long flags;
int ret;
/*
@@ -9973,7 +9769,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
goto free_work;
/* We borrow the event spin lock for protecting unpin_work */
- spin_lock_irqsave(&dev->event_lock, flags);
+ spin_lock_irq(&dev->event_lock);
if (intel_crtc->unpin_work) {
/* Before declaring the flip queue wedged, check if
* the hardware completed the operation behind our backs.
@@ -9983,7 +9779,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
page_flip_completed(intel_crtc);
} else {
DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
- spin_unlock_irqrestore(&dev->event_lock, flags);
+ spin_unlock_irq(&dev->event_lock);
drm_crtc_vblank_put(crtc);
kfree(work);
@@ -9991,7 +9787,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
}
}
intel_crtc->unpin_work = work;
- spin_unlock_irqrestore(&dev->event_lock, flags);
+ spin_unlock_irq(&dev->event_lock);
if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
flush_workqueue(dev_priv->wq);
@@ -10029,7 +9825,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
ring = &dev_priv->ring[RCS];
}
- ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
+ ret = intel_pin_and_fence_fb_obj(crtc->primary, fb, ring);
if (ret)
goto cleanup_pending;
@@ -10078,9 +9874,9 @@ cleanup_pending:
mutex_unlock(&dev->struct_mutex);
cleanup:
- spin_lock_irqsave(&dev->event_lock, flags);
+ spin_lock_irq(&dev->event_lock);
intel_crtc->unpin_work = NULL;
- spin_unlock_irqrestore(&dev->event_lock, flags);
+ spin_unlock_irq(&dev->event_lock);
drm_crtc_vblank_put(crtc);
free_work:
@@ -10091,9 +9887,9 @@ out_hang:
intel_crtc_wait_for_pending_flips(crtc);
ret = intel_pipe_set_base(crtc, crtc->x, crtc->y, fb);
if (ret == 0 && event) {
- spin_lock_irqsave(&dev->event_lock, flags);
+ spin_lock_irq(&dev->event_lock);
drm_send_vblank_event(dev, pipe, event);
- spin_unlock_irqrestore(&dev->event_lock, flags);
+ spin_unlock_irq(&dev->event_lock);
}
}
return ret;
@@ -10289,6 +10085,10 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
pipe_config->dp_m2_n2.link_n,
pipe_config->dp_m2_n2.tu);
+ DRM_DEBUG_KMS("audio: %i, infoframes: %i\n",
+ pipe_config->has_audio,
+ pipe_config->has_infoframe);
+
DRM_DEBUG_KMS("requested mode:\n");
drm_mode_debug_printmodeline(&pipe_config->requested_mode);
DRM_DEBUG_KMS("adjusted mode:\n");
@@ -10350,6 +10150,48 @@ static bool check_encoder_cloning(struct intel_crtc *crtc)
return true;
}
+static bool check_digital_port_conflicts(struct drm_device *dev)
+{
+ struct intel_connector *connector;
+ unsigned int used_ports = 0;
+
+ /*
+ * Walk the connector list instead of the encoder
+ * list to detect the problem on ddi platforms
+ * where there's just one encoder per digital port.
+ */
+ list_for_each_entry(connector,
+ &dev->mode_config.connector_list, base.head) {
+ struct intel_encoder *encoder = connector->new_encoder;
+
+ if (!encoder)
+ continue;
+
+ WARN_ON(!encoder->new_crtc);
+
+ switch (encoder->type) {
+ unsigned int port_mask;
+ case INTEL_OUTPUT_UNKNOWN:
+ if (WARN_ON(!HAS_DDI(dev)))
+ break;
+ case INTEL_OUTPUT_DISPLAYPORT:
+ case INTEL_OUTPUT_HDMI:
+ case INTEL_OUTPUT_EDP:
+ port_mask = 1 << enc_to_dig_port(&encoder->base)->port;
+
+ /* the same port mustn't appear more than once */
+ if (used_ports & port_mask)
+ return false;
+
+ used_ports |= port_mask;
+ default:
+ break;
+ }
+ }
+
+ return true;
+}
+
static struct intel_crtc_config *
intel_modeset_pipe_config(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
@@ -10366,6 +10208,11 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
return ERR_PTR(-EINVAL);
}
+ if (!check_digital_port_conflicts(dev)) {
+ DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
+ return ERR_PTR(-EINVAL);
+ }
+
pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
if (!pipe_config)
return ERR_PTR(-ENOMEM);
@@ -10571,10 +10418,13 @@ static bool intel_crtc_in_use(struct drm_crtc *crtc)
static void
intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *intel_encoder;
struct intel_crtc *intel_crtc;
struct drm_connector *connector;
+ intel_shared_dpll_commit(dev_priv);
+
for_each_intel_encoder(dev, intel_encoder) {
if (!intel_encoder->base.crtc)
continue;
@@ -10754,6 +10604,7 @@ intel_pipe_config_compare(struct drm_device *dev,
if ((INTEL_INFO(dev)->gen < 8 && !IS_HASWELL(dev)) ||
IS_VALLEYVIEW(dev))
PIPE_CONF_CHECK_I(limited_color_range);
+ PIPE_CONF_CHECK_I(has_infoframe);
PIPE_CONF_CHECK_I(has_audio);
@@ -10810,6 +10661,9 @@ intel_pipe_config_compare(struct drm_device *dev,
PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
+ PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
+ PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
+ PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
PIPE_CONF_CHECK_I(pipe_bpp);
@@ -10827,6 +10681,56 @@ intel_pipe_config_compare(struct drm_device *dev,
return true;
}
+static void check_wm_state(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct skl_ddb_allocation hw_ddb, *sw_ddb;
+ struct intel_crtc *intel_crtc;
+ int plane;
+
+ if (INTEL_INFO(dev)->gen < 9)
+ return;
+
+ skl_ddb_get_hw_state(dev_priv, &hw_ddb);
+ sw_ddb = &dev_priv->wm.skl_hw.ddb;
+
+ for_each_intel_crtc(dev, intel_crtc) {
+ struct skl_ddb_entry *hw_entry, *sw_entry;
+ const enum pipe pipe = intel_crtc->pipe;
+
+ if (!intel_crtc->active)
+ continue;
+
+ /* planes */
+ for_each_plane(pipe, plane) {
+ hw_entry = &hw_ddb.plane[pipe][plane];
+ sw_entry = &sw_ddb->plane[pipe][plane];
+
+ if (skl_ddb_entry_equal(hw_entry, sw_entry))
+ continue;
+
+ DRM_ERROR("mismatch in DDB state pipe %c plane %d "
+ "(expected (%u,%u), found (%u,%u))\n",
+ pipe_name(pipe), plane + 1,
+ sw_entry->start, sw_entry->end,
+ hw_entry->start, hw_entry->end);
+ }
+
+ /* cursor */
+ hw_entry = &hw_ddb.cursor[pipe];
+ sw_entry = &sw_ddb->cursor[pipe];
+
+ if (skl_ddb_entry_equal(hw_entry, sw_entry))
+ continue;
+
+ DRM_ERROR("mismatch in DDB state pipe %c cursor "
+ "(expected (%u,%u), found (%u,%u))\n",
+ pipe_name(pipe),
+ sw_entry->start, sw_entry->end,
+ hw_entry->start, hw_entry->end);
+ }
+}
+
static void
check_connector_state(struct drm_device *dev)
{
@@ -10993,9 +10897,9 @@ check_shared_dpll_state(struct drm_device *dev)
active = pll->get_hw_state(dev_priv, pll, &dpll_hw_state);
- WARN(pll->active > pll->refcount,
+ WARN(pll->active > hweight32(pll->config.crtc_mask),
"more active pll users than references: %i vs %i\n",
- pll->active, pll->refcount);
+ pll->active, hweight32(pll->config.crtc_mask));
WARN(pll->active && !pll->on,
"pll in active use but not on in sw tracking\n");
WARN(pll->on && !pll->active,
@@ -11013,11 +10917,11 @@ check_shared_dpll_state(struct drm_device *dev)
WARN(pll->active != active_crtcs,
"pll active crtcs mismatch (expected %i, found %i)\n",
pll->active, active_crtcs);
- WARN(pll->refcount != enabled_crtcs,
+ WARN(hweight32(pll->config.crtc_mask) != enabled_crtcs,
"pll enabled crtcs mismatch (expected %i, found %i)\n",
- pll->refcount, enabled_crtcs);
+ hweight32(pll->config.crtc_mask), enabled_crtcs);
- WARN(pll->on && memcmp(&pll->hw_state, &dpll_hw_state,
+ WARN(pll->on && memcmp(&pll->config.hw_state, &dpll_hw_state,
sizeof(dpll_hw_state)),
"pll hw state mismatch\n");
}
@@ -11026,6 +10930,7 @@ check_shared_dpll_state(struct drm_device *dev)
void
intel_modeset_check_state(struct drm_device *dev)
{
+ check_wm_state(dev);
check_connector_state(dev);
check_encoder_state(dev);
check_crtc_state(dev);
@@ -11076,50 +10981,67 @@ static void update_scanline_offset(struct intel_crtc *crtc)
crtc->scanline_offset = vtotal - 1;
} else if (HAS_DDI(dev) &&
- intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI)) {
+ intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) {
crtc->scanline_offset = 2;
} else
crtc->scanline_offset = 1;
}
+static struct intel_crtc_config *
+intel_modeset_compute_config(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_framebuffer *fb,
+ unsigned *modeset_pipes,
+ unsigned *prepare_pipes,
+ unsigned *disable_pipes)
+{
+ struct intel_crtc_config *pipe_config = NULL;
+
+ intel_modeset_affected_pipes(crtc, modeset_pipes,
+ prepare_pipes, disable_pipes);
+
+ if ((*modeset_pipes) == 0)
+ goto out;
+
+ /*
+ * Note this needs changes when we start tracking multiple modes
+ * and crtcs. At that point we'll need to compute the whole config
+ * (i.e. one pipe_config for each crtc) rather than just the one
+ * for this crtc.
+ */
+ pipe_config = intel_modeset_pipe_config(crtc, fb, mode);
+ if (IS_ERR(pipe_config)) {
+ goto out;
+ }
+ intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
+ "[modeset]");
+
+out:
+ return pipe_config;
+}
+
static int __intel_set_mode(struct drm_crtc *crtc,
struct drm_display_mode *mode,
- int x, int y, struct drm_framebuffer *fb)
+ int x, int y, struct drm_framebuffer *fb,
+ struct intel_crtc_config *pipe_config,
+ unsigned modeset_pipes,
+ unsigned prepare_pipes,
+ unsigned disable_pipes)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_display_mode *saved_mode;
- struct intel_crtc_config *pipe_config = NULL;
struct intel_crtc *intel_crtc;
- unsigned disable_pipes, prepare_pipes, modeset_pipes;
int ret = 0;
saved_mode = kmalloc(sizeof(*saved_mode), GFP_KERNEL);
if (!saved_mode)
return -ENOMEM;
- intel_modeset_affected_pipes(crtc, &modeset_pipes,
- &prepare_pipes, &disable_pipes);
-
*saved_mode = crtc->mode;
- /* Hack: Because we don't (yet) support global modeset on multiple
- * crtcs, we don't keep track of the new mode for more than one crtc.
- * Hence simply check whether any bit is set in modeset_pipes in all the
- * pieces of code that are not yet converted to deal with mutliple crtcs
- * changing their mode at the same time. */
- if (modeset_pipes) {
- pipe_config = intel_modeset_pipe_config(crtc, fb, mode);
- if (IS_ERR(pipe_config)) {
- ret = PTR_ERR(pipe_config);
- pipe_config = NULL;
-
- goto out;
- }
- intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
- "[modeset]");
+ if (modeset_pipes)
to_intel_crtc(crtc)->new_config = pipe_config;
- }
/*
* See if the config requires any additional preparation, e.g.
@@ -11135,6 +11057,22 @@ static int __intel_set_mode(struct drm_crtc *crtc,
prepare_pipes &= ~disable_pipes;
}
+ if (dev_priv->display.crtc_compute_clock) {
+ unsigned clear_pipes = modeset_pipes | disable_pipes;
+
+ ret = intel_shared_dpll_start_config(dev_priv, clear_pipes);
+ if (ret)
+ goto done;
+
+ for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
+ ret = dev_priv->display.crtc_compute_clock(intel_crtc);
+ if (ret) {
+ intel_shared_dpll_abort_config(dev_priv);
+ goto done;
+ }
+ }
+ }
+
for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc)
intel_crtc_disable(&intel_crtc->base);
@@ -11145,6 +11083,10 @@ static int __intel_set_mode(struct drm_crtc *crtc,
/* crtc->mode is already used by the ->mode_set callbacks, hence we need
* to set it here already despite that we pass it down the callchain.
+ *
+ * Note we'll need to fix this up when we start tracking multiple
+ * pipes; here we assume a single modeset_pipe and only track the
+ * single crtc and mode.
*/
if (modeset_pipes) {
crtc->mode = *mode;
@@ -11166,8 +11108,7 @@ static int __intel_set_mode(struct drm_crtc *crtc,
* update the the output configuration. */
intel_modeset_update_state(dev, prepare_pipes);
- if (dev_priv->display.modeset_global_resources)
- dev_priv->display.modeset_global_resources(dev);
+ modeset_update_crtc_power_domains(dev);
/* Set up the DPLL and any encoders state that needs to adjust or depend
* on the DPLL.
@@ -11178,9 +11119,7 @@ static int __intel_set_mode(struct drm_crtc *crtc,
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
mutex_lock(&dev->struct_mutex);
- ret = intel_pin_and_fence_fb_obj(dev,
- obj,
- NULL);
+ ret = intel_pin_and_fence_fb_obj(crtc->primary, fb, NULL);
if (ret != 0) {
DRM_ERROR("pin & fence failed\n");
mutex_unlock(&dev->struct_mutex);
@@ -11195,11 +11134,6 @@ static int __intel_set_mode(struct drm_crtc *crtc,
crtc->primary->fb = fb;
crtc->x = x;
crtc->y = y;
-
- ret = dev_priv->display.crtc_mode_set(&intel_crtc->base,
- x, y, fb);
- if (ret)
- goto done;
}
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
@@ -11214,19 +11148,23 @@ done:
if (ret && crtc->enabled)
crtc->mode = *saved_mode;
-out:
kfree(pipe_config);
kfree(saved_mode);
return ret;
}
-static int intel_set_mode(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
- int x, int y, struct drm_framebuffer *fb)
+static int intel_set_mode_pipes(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ int x, int y, struct drm_framebuffer *fb,
+ struct intel_crtc_config *pipe_config,
+ unsigned modeset_pipes,
+ unsigned prepare_pipes,
+ unsigned disable_pipes)
{
int ret;
- ret = __intel_set_mode(crtc, mode, x, y, fb);
+ ret = __intel_set_mode(crtc, mode, x, y, fb, pipe_config, modeset_pipes,
+ prepare_pipes, disable_pipes);
if (ret == 0)
intel_modeset_check_state(crtc->dev);
@@ -11234,6 +11172,26 @@ static int intel_set_mode(struct drm_crtc *crtc,
return ret;
}
+static int intel_set_mode(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ int x, int y, struct drm_framebuffer *fb)
+{
+ struct intel_crtc_config *pipe_config;
+ unsigned modeset_pipes, prepare_pipes, disable_pipes;
+
+ pipe_config = intel_modeset_compute_config(crtc, mode, fb,
+ &modeset_pipes,
+ &prepare_pipes,
+ &disable_pipes);
+
+ if (IS_ERR(pipe_config))
+ return PTR_ERR(pipe_config);
+
+ return intel_set_mode_pipes(crtc, mode, x, y, fb, pipe_config,
+ modeset_pipes, prepare_pipes,
+ disable_pipes);
+}
+
void intel_crtc_restore_mode(struct drm_crtc *crtc)
{
intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->primary->fb);
@@ -11562,6 +11520,8 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
struct drm_device *dev;
struct drm_mode_set save_set;
struct intel_set_config *config;
+ struct intel_crtc_config *pipe_config;
+ unsigned modeset_pipes, prepare_pipes, disable_pipes;
int ret;
BUG_ON(!set);
@@ -11607,9 +11567,38 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
if (ret)
goto fail;
+ pipe_config = intel_modeset_compute_config(set->crtc, set->mode,
+ set->fb,
+ &modeset_pipes,
+ &prepare_pipes,
+ &disable_pipes);
+ if (IS_ERR(pipe_config)) {
+ ret = PTR_ERR(pipe_config);
+ goto fail;
+ } else if (pipe_config) {
+ if (pipe_config->has_audio !=
+ to_intel_crtc(set->crtc)->config.has_audio)
+ config->mode_changed = true;
+
+ /*
+ * Note we have an issue here with infoframes: current code
+ * only updates them on the full mode set path per hw
+ * requirements. So here we should be checking for any
+ * required changes and forcing a mode set.
+ */
+ }
+
+ /* set_mode will free it in the mode_changed case */
+ if (!config->mode_changed)
+ kfree(pipe_config);
+
+ intel_update_pipe_size(to_intel_crtc(set->crtc));
+
if (config->mode_changed) {
- ret = intel_set_mode(set->crtc, set->mode,
- set->x, set->y, set->fb);
+ ret = intel_set_mode_pipes(set->crtc, set->mode,
+ set->x, set->y, set->fb, pipe_config,
+ modeset_pipes, prepare_pipes,
+ disable_pipes);
} else if (config->fb_changed) {
struct intel_crtc *intel_crtc = to_intel_crtc(set->crtc);
@@ -11679,7 +11668,7 @@ static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
{
uint32_t val;
- if (!intel_display_power_enabled(dev_priv, POWER_DOMAIN_PLLS))
+ if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS))
return false;
val = I915_READ(PCH_DPLL(pll->id));
@@ -11693,8 +11682,8 @@ static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
static void ibx_pch_dpll_mode_set(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
- I915_WRITE(PCH_FP0(pll->id), pll->hw_state.fp0);
- I915_WRITE(PCH_FP1(pll->id), pll->hw_state.fp1);
+ I915_WRITE(PCH_FP0(pll->id), pll->config.hw_state.fp0);
+ I915_WRITE(PCH_FP1(pll->id), pll->config.hw_state.fp1);
}
static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
@@ -11703,7 +11692,7 @@ static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
/* PCH refclock must be enabled first */
ibx_assert_pch_refclk_enabled(dev_priv);
- I915_WRITE(PCH_DPLL(pll->id), pll->hw_state.dpll);
+ I915_WRITE(PCH_DPLL(pll->id), pll->config.hw_state.dpll);
/* Wait for the clocks to stabilize. */
POSTING_READ(PCH_DPLL(pll->id));
@@ -11714,7 +11703,7 @@ static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
*
* So write it again.
*/
- I915_WRITE(PCH_DPLL(pll->id), pll->hw_state.dpll);
+ I915_WRITE(PCH_DPLL(pll->id), pll->config.hw_state.dpll);
POSTING_READ(PCH_DPLL(pll->id));
udelay(200);
}
@@ -11813,161 +11802,195 @@ disable_unpin:
}
static int
-intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc,
- struct drm_framebuffer *fb, int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h)
+intel_check_primary_plane(struct drm_plane *plane,
+ struct intel_plane_state *state)
{
+ struct drm_crtc *crtc = state->crtc;
+ struct drm_framebuffer *fb = state->fb;
+ struct drm_rect *dest = &state->dst;
+ struct drm_rect *src = &state->src;
+ const struct drm_rect *clip = &state->clip;
+
+ return drm_plane_helper_check_update(plane, crtc, fb,
+ src, dest, clip,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ false, true, &state->visible);
+}
+
+static int
+intel_prepare_primary_plane(struct drm_plane *plane,
+ struct intel_plane_state *state)
+{
+ struct drm_crtc *crtc = state->crtc;
+ struct drm_framebuffer *fb = state->fb;
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ enum pipe pipe = intel_crtc->pipe;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb);
- struct drm_rect dest = {
- /* integer pixels */
- .x1 = crtc_x,
- .y1 = crtc_y,
- .x2 = crtc_x + crtc_w,
- .y2 = crtc_y + crtc_h,
- };
- struct drm_rect src = {
- /* 16.16 fixed point */
- .x1 = src_x,
- .y1 = src_y,
- .x2 = src_x + src_w,
- .y2 = src_y + src_h,
- };
- const struct drm_rect clip = {
- /* integer pixels */
- .x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0,
- .y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0,
- };
- const struct {
- int crtc_x, crtc_y;
- unsigned int crtc_w, crtc_h;
- uint32_t src_x, src_y, src_w, src_h;
- } orig = {
- .crtc_x = crtc_x,
- .crtc_y = crtc_y,
- .crtc_w = crtc_w,
- .crtc_h = crtc_h,
- .src_x = src_x,
- .src_y = src_y,
- .src_w = src_w,
- .src_h = src_h,
- };
- struct intel_plane *intel_plane = to_intel_plane(plane);
- bool visible;
int ret;
- ret = drm_plane_helper_check_update(plane, crtc, fb,
- &src, &dest, &clip,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
- false, true, &visible);
+ intel_crtc_wait_for_pending_flips(crtc);
- if (ret)
- return ret;
+ if (intel_crtc_has_pending_flip(crtc)) {
+ DRM_ERROR("pipe is still busy with an old pageflip\n");
+ return -EBUSY;
+ }
- /*
- * If the CRTC isn't enabled, we're just pinning the framebuffer,
- * updating the fb pointer, and returning without touching the
- * hardware. This allows us to later do a drmModeSetCrtc with fb=-1 to
- * turn on the display with all planes setup as desired.
- */
- if (!crtc->enabled) {
+ if (old_obj != obj) {
mutex_lock(&dev->struct_mutex);
-
- /*
- * If we already called setplane while the crtc was disabled,
- * we may have an fb pinned; unpin it.
- */
- if (plane->fb)
- intel_unpin_fb_obj(old_obj);
-
- i915_gem_track_fb(old_obj, obj,
- INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
-
- /* Pin and return without programming hardware */
- ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
+ ret = intel_pin_and_fence_fb_obj(plane, fb, NULL);
+ if (ret == 0)
+ i915_gem_track_fb(old_obj, obj,
+ INTEL_FRONTBUFFER_PRIMARY(pipe));
mutex_unlock(&dev->struct_mutex);
-
- return ret;
+ if (ret != 0) {
+ DRM_DEBUG_KMS("pin & fence failed\n");
+ return ret;
+ }
}
- intel_crtc_wait_for_pending_flips(crtc);
+ return 0;
+}
- /*
- * If clipping results in a non-visible primary plane, we'll disable
- * the primary plane. Note that this is a bit different than what
- * happens if userspace explicitly disables the plane by passing fb=0
- * because plane->fb still gets set and pinned.
- */
- if (!visible) {
- mutex_lock(&dev->struct_mutex);
+static void
+intel_commit_primary_plane(struct drm_plane *plane,
+ struct intel_plane_state *state)
+{
+ struct drm_crtc *crtc = state->crtc;
+ struct drm_framebuffer *fb = state->fb;
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ enum pipe pipe = intel_crtc->pipe;
+ struct drm_framebuffer *old_fb = plane->fb;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb);
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ struct drm_rect *src = &state->src;
+ crtc->primary->fb = fb;
+ crtc->x = src->x1 >> 16;
+ crtc->y = src->y1 >> 16;
+
+ intel_plane->crtc_x = state->orig_dst.x1;
+ intel_plane->crtc_y = state->orig_dst.y1;
+ intel_plane->crtc_w = drm_rect_width(&state->orig_dst);
+ intel_plane->crtc_h = drm_rect_height(&state->orig_dst);
+ intel_plane->src_x = state->orig_src.x1;
+ intel_plane->src_y = state->orig_src.y1;
+ intel_plane->src_w = drm_rect_width(&state->orig_src);
+ intel_plane->src_h = drm_rect_height(&state->orig_src);
+ intel_plane->obj = obj;
+
+ if (intel_crtc->active) {
/*
- * Try to pin the new fb first so that we can bail out if we
- * fail.
+ * FBC does not work on some platforms for rotated
+ * planes, so disable it when rotation is not 0 and
+ * update it when rotation is set back to 0.
+ *
+ * FIXME: This is redundant with the fbc update done in
+ * the primary plane enable function except that that
+ * one is done too late. We eventually need to unify
+ * this.
*/
- if (plane->fb != fb) {
- ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
- if (ret) {
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
+ if (intel_crtc->primary_enabled &&
+ INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
+ dev_priv->fbc.plane == intel_crtc->plane &&
+ intel_plane->rotation != BIT(DRM_ROTATE_0)) {
+ intel_disable_fbc(dev);
}
- i915_gem_track_fb(old_obj, obj,
- INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
-
- if (intel_crtc->primary_enabled)
- intel_disable_primary_hw_plane(plane, crtc);
+ if (state->visible) {
+ bool was_enabled = intel_crtc->primary_enabled;
+ /* FIXME: kill this fastboot hack */
+ intel_update_pipe_size(intel_crtc);
- if (plane->fb != fb)
- if (plane->fb)
- intel_unpin_fb_obj(old_obj);
+ intel_crtc->primary_enabled = true;
- mutex_unlock(&dev->struct_mutex);
+ dev_priv->display.update_primary_plane(crtc, plane->fb,
+ crtc->x, crtc->y);
- } else {
- if (intel_crtc && intel_crtc->active &&
- intel_crtc->primary_enabled) {
/*
- * FBC does not work on some platforms for rotated
- * planes, so disable it when rotation is not 0 and
- * update it when rotation is set back to 0.
- *
- * FIXME: This is redundant with the fbc update done in
- * the primary plane enable function except that that
- * one is done too late. We eventually need to unify
- * this.
+ * BDW signals flip done immediately if the plane
+ * is disabled, even if the plane enable is already
+ * armed to occur at the next vblank :(
*/
- if (INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
- dev_priv->fbc.plane == intel_crtc->plane &&
- intel_plane->rotation != BIT(DRM_ROTATE_0)) {
- intel_disable_fbc(dev);
- }
+ if (IS_BROADWELL(dev) && !was_enabled)
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+ } else {
+ /*
+ * If clipping results in a non-visible primary plane,
+ * we'll disable the primary plane. Note that this is
+ * a bit different than what happens if userspace
+ * explicitly disables the plane by passing fb=0
+ * because plane->fb still gets set and pinned.
+ */
+ intel_disable_primary_hw_plane(plane, crtc);
}
- ret = intel_pipe_set_base(crtc, src.x1, src.y1, fb);
- if (ret)
- return ret;
- if (!intel_crtc->primary_enabled)
- intel_enable_primary_hw_plane(plane, crtc);
+ intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
+
+ mutex_lock(&dev->struct_mutex);
+ intel_update_fbc(dev);
+ mutex_unlock(&dev->struct_mutex);
}
- intel_plane->crtc_x = orig.crtc_x;
- intel_plane->crtc_y = orig.crtc_y;
- intel_plane->crtc_w = orig.crtc_w;
- intel_plane->crtc_h = orig.crtc_h;
- intel_plane->src_x = orig.src_x;
- intel_plane->src_y = orig.src_y;
- intel_plane->src_w = orig.src_w;
- intel_plane->src_h = orig.src_h;
- intel_plane->obj = obj;
+ if (old_fb && old_fb != fb) {
+ if (intel_crtc->active)
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+
+ mutex_lock(&dev->struct_mutex);
+ intel_unpin_fb_obj(old_obj);
+ mutex_unlock(&dev->struct_mutex);
+ }
+}
+
+static int
+intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct intel_plane_state state;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int ret;
+
+ state.crtc = crtc;
+ state.fb = fb;
+
+ /* sample coordinates in 16.16 fixed point */
+ state.src.x1 = src_x;
+ state.src.x2 = src_x + src_w;
+ state.src.y1 = src_y;
+ state.src.y2 = src_y + src_h;
+
+ /* integer pixels */
+ state.dst.x1 = crtc_x;
+ state.dst.x2 = crtc_x + crtc_w;
+ state.dst.y1 = crtc_y;
+ state.dst.y2 = crtc_y + crtc_h;
+
+ state.clip.x1 = 0;
+ state.clip.y1 = 0;
+ state.clip.x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0;
+ state.clip.y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0;
+
+ state.orig_src = state.src;
+ state.orig_dst = state.dst;
+
+ ret = intel_check_primary_plane(plane, &state);
+ if (ret)
+ return ret;
+
+ ret = intel_prepare_primary_plane(plane, &state);
+ if (ret)
+ return ret;
+
+ intel_commit_primary_plane(plane, &state);
return 0;
}
@@ -12046,51 +12069,92 @@ intel_cursor_plane_disable(struct drm_plane *plane)
}
static int
-intel_cursor_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
- struct drm_framebuffer *fb, int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h)
+intel_check_cursor_plane(struct drm_plane *plane,
+ struct intel_plane_state *state)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- struct drm_i915_gem_object *obj = intel_fb->obj;
- struct drm_rect dest = {
- /* integer pixels */
- .x1 = crtc_x,
- .y1 = crtc_y,
- .x2 = crtc_x + crtc_w,
- .y2 = crtc_y + crtc_h,
- };
- struct drm_rect src = {
- /* 16.16 fixed point */
- .x1 = src_x,
- .y1 = src_y,
- .x2 = src_x + src_w,
- .y2 = src_y + src_h,
- };
- const struct drm_rect clip = {
- /* integer pixels */
- .x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0,
- .y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0,
- };
- bool visible;
+ struct drm_crtc *crtc = state->crtc;
+ struct drm_device *dev = crtc->dev;
+ struct drm_framebuffer *fb = state->fb;
+ struct drm_rect *dest = &state->dst;
+ struct drm_rect *src = &state->src;
+ const struct drm_rect *clip = &state->clip;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ int crtc_w, crtc_h;
+ unsigned stride;
int ret;
ret = drm_plane_helper_check_update(plane, crtc, fb,
- &src, &dest, &clip,
+ src, dest, clip,
DRM_PLANE_HELPER_NO_SCALING,
DRM_PLANE_HELPER_NO_SCALING,
- true, true, &visible);
+ true, true, &state->visible);
if (ret)
return ret;
- crtc->cursor_x = crtc_x;
- crtc->cursor_y = crtc_y;
+
+ /* if we want to turn off the cursor ignore width and height */
+ if (!obj)
+ return 0;
+
+ /* Check for which cursor types we support */
+ crtc_w = drm_rect_width(&state->orig_dst);
+ crtc_h = drm_rect_height(&state->orig_dst);
+ if (!cursor_size_ok(dev, crtc_w, crtc_h)) {
+ DRM_DEBUG("Cursor dimension not supported\n");
+ return -EINVAL;
+ }
+
+ stride = roundup_pow_of_two(crtc_w) * 4;
+ if (obj->base.size < stride * crtc_h) {
+ DRM_DEBUG_KMS("buffer is too small\n");
+ return -ENOMEM;
+ }
+
+ if (fb == crtc->cursor->fb)
+ return 0;
+
+ /* we only need to pin inside GTT if cursor is non-phy */
+ mutex_lock(&dev->struct_mutex);
+ if (!INTEL_INFO(dev)->cursor_needs_physical && obj->tiling_mode) {
+ DRM_DEBUG_KMS("cursor cannot be tiled\n");
+ ret = -EINVAL;
+ }
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
+}
+
+static int
+intel_commit_cursor_plane(struct drm_plane *plane,
+ struct intel_plane_state *state)
+{
+ struct drm_crtc *crtc = state->crtc;
+ struct drm_framebuffer *fb = state->fb;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+ struct drm_i915_gem_object *obj = intel_fb->obj;
+ int crtc_w, crtc_h;
+
+ crtc->cursor_x = state->orig_dst.x1;
+ crtc->cursor_y = state->orig_dst.y1;
+
+ intel_plane->crtc_x = state->orig_dst.x1;
+ intel_plane->crtc_y = state->orig_dst.y1;
+ intel_plane->crtc_w = drm_rect_width(&state->orig_dst);
+ intel_plane->crtc_h = drm_rect_height(&state->orig_dst);
+ intel_plane->src_x = state->orig_src.x1;
+ intel_plane->src_y = state->orig_src.y1;
+ intel_plane->src_w = drm_rect_width(&state->orig_src);
+ intel_plane->src_h = drm_rect_height(&state->orig_src);
+ intel_plane->obj = obj;
+
if (fb != crtc->cursor->fb) {
+ crtc_w = drm_rect_width(&state->orig_dst);
+ crtc_h = drm_rect_height(&state->orig_dst);
return intel_crtc_cursor_set_obj(crtc, obj, crtc_w, crtc_h);
} else {
- intel_crtc_update_cursor(crtc, visible);
+ intel_crtc_update_cursor(crtc, state->visible);
intel_frontbuffer_flip(crtc->dev,
INTEL_FRONTBUFFER_CURSOR(intel_crtc->pipe));
@@ -12098,10 +12162,53 @@ intel_cursor_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
return 0;
}
}
+
+static int
+intel_cursor_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_plane_state state;
+ int ret;
+
+ state.crtc = crtc;
+ state.fb = fb;
+
+ /* sample coordinates in 16.16 fixed point */
+ state.src.x1 = src_x;
+ state.src.x2 = src_x + src_w;
+ state.src.y1 = src_y;
+ state.src.y2 = src_y + src_h;
+
+ /* integer pixels */
+ state.dst.x1 = crtc_x;
+ state.dst.x2 = crtc_x + crtc_w;
+ state.dst.y1 = crtc_y;
+ state.dst.y2 = crtc_y + crtc_h;
+
+ state.clip.x1 = 0;
+ state.clip.y1 = 0;
+ state.clip.x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0;
+ state.clip.y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0;
+
+ state.orig_src = state.src;
+ state.orig_dst = state.dst;
+
+ ret = intel_check_cursor_plane(plane, &state);
+ if (ret)
+ return ret;
+
+ return intel_commit_cursor_plane(plane, &state);
+}
+
static const struct drm_plane_funcs intel_cursor_plane_funcs = {
.update_plane = intel_cursor_plane_update,
.disable_plane = intel_cursor_plane_disable,
.destroy = intel_plane_destroy,
+ .set_property = intel_plane_set_property,
};
static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
@@ -12117,12 +12224,26 @@ static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
cursor->max_downscale = 1;
cursor->pipe = pipe;
cursor->plane = pipe;
+ cursor->rotation = BIT(DRM_ROTATE_0);
drm_universal_plane_init(dev, &cursor->base, 0,
&intel_cursor_plane_funcs,
intel_cursor_formats,
ARRAY_SIZE(intel_cursor_formats),
DRM_PLANE_TYPE_CURSOR);
+
+ if (INTEL_INFO(dev)->gen >= 4) {
+ if (!dev->mode_config.rotation_property)
+ dev->mode_config.rotation_property =
+ drm_mode_create_rotation_property(dev,
+ BIT(DRM_ROTATE_0) |
+ BIT(DRM_ROTATE_180));
+ if (dev->mode_config.rotation_property)
+ drm_object_attach_property(&cursor->base.base,
+ dev->mode_config.rotation_property,
+ cursor->rotation);
+ }
+
return &cursor->base;
}
@@ -12178,6 +12299,8 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
+ INIT_WORK(&intel_crtc->mmio_flip.work, intel_mmio_flip_work_func);
+
drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
@@ -12198,7 +12321,7 @@ enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
- if (!encoder)
+ if (!encoder || WARN_ON(!encoder->crtc))
return INVALID_PIPE;
return to_intel_crtc(encoder->crtc)->pipe;
@@ -12286,7 +12409,10 @@ static bool intel_crt_present(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (IS_ULT(dev))
+ if (INTEL_INFO(dev)->gen >= 9)
+ return false;
+
+ if (IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
return false;
if (IS_CHERRYVIEW(dev))
@@ -12430,7 +12556,7 @@ static void intel_setup_outputs(struct drm_device *dev)
if (SUPPORTS_TV(dev))
intel_tv_init(dev);
- intel_edp_psr_init(dev);
+ intel_psr_init(dev);
for_each_intel_encoder(dev, encoder) {
encoder->base.possible_crtcs = encoder->crtc_mask;
@@ -12634,16 +12760,22 @@ static void intel_init_display(struct drm_device *dev)
if (HAS_DDI(dev)) {
dev_priv->display.get_pipe_config = haswell_get_pipe_config;
dev_priv->display.get_plane_config = ironlake_get_plane_config;
- dev_priv->display.crtc_mode_set = haswell_crtc_mode_set;
+ dev_priv->display.crtc_compute_clock =
+ haswell_crtc_compute_clock;
dev_priv->display.crtc_enable = haswell_crtc_enable;
dev_priv->display.crtc_disable = haswell_crtc_disable;
dev_priv->display.off = ironlake_crtc_off;
- dev_priv->display.update_primary_plane =
- ironlake_update_primary_plane;
+ if (INTEL_INFO(dev)->gen >= 9)
+ dev_priv->display.update_primary_plane =
+ skylake_update_primary_plane;
+ else
+ dev_priv->display.update_primary_plane =
+ ironlake_update_primary_plane;
} else if (HAS_PCH_SPLIT(dev)) {
dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
dev_priv->display.get_plane_config = ironlake_get_plane_config;
- dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
+ dev_priv->display.crtc_compute_clock =
+ ironlake_crtc_compute_clock;
dev_priv->display.crtc_enable = ironlake_crtc_enable;
dev_priv->display.crtc_disable = ironlake_crtc_disable;
dev_priv->display.off = ironlake_crtc_off;
@@ -12652,7 +12784,7 @@ static void intel_init_display(struct drm_device *dev)
} else if (IS_VALLEYVIEW(dev)) {
dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
dev_priv->display.get_plane_config = i9xx_get_plane_config;
- dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
+ dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
dev_priv->display.crtc_enable = valleyview_crtc_enable;
dev_priv->display.crtc_disable = i9xx_crtc_disable;
dev_priv->display.off = i9xx_crtc_off;
@@ -12661,7 +12793,7 @@ static void intel_init_display(struct drm_device *dev)
} else {
dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
dev_priv->display.get_plane_config = i9xx_get_plane_config;
- dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
+ dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
dev_priv->display.crtc_enable = i9xx_crtc_enable;
dev_priv->display.crtc_disable = i9xx_crtc_disable;
dev_priv->display.off = i9xx_crtc_off;
@@ -12698,31 +12830,20 @@ static void intel_init_display(struct drm_device *dev)
dev_priv->display.get_display_clock_speed =
i830_get_display_clock_speed;
- if (IS_G4X(dev)) {
- dev_priv->display.write_eld = g4x_write_eld;
- } else if (IS_GEN5(dev)) {
+ if (IS_GEN5(dev)) {
dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
- dev_priv->display.write_eld = ironlake_write_eld;
} else if (IS_GEN6(dev)) {
dev_priv->display.fdi_link_train = gen6_fdi_link_train;
- dev_priv->display.write_eld = ironlake_write_eld;
- dev_priv->display.modeset_global_resources =
- snb_modeset_global_resources;
} else if (IS_IVYBRIDGE(dev)) {
/* FIXME: detect B0+ stepping and use auto training */
dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
- dev_priv->display.write_eld = ironlake_write_eld;
dev_priv->display.modeset_global_resources =
ivb_modeset_global_resources;
} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
dev_priv->display.fdi_link_train = hsw_fdi_link_train;
- dev_priv->display.write_eld = haswell_write_eld;
- dev_priv->display.modeset_global_resources =
- haswell_modeset_global_resources;
} else if (IS_VALLEYVIEW(dev)) {
dev_priv->display.modeset_global_resources =
valleyview_modeset_global_resources;
- dev_priv->display.write_eld = ironlake_write_eld;
}
/* Default just returns -ENODEV to indicate unsupported */
@@ -12749,6 +12870,9 @@ static void intel_init_display(struct drm_device *dev)
case 8: /* FIXME(BDW): Check that the gen8 RCS flip works. */
dev_priv->display.queue_flip = intel_gen7_queue_flip;
break;
+ case 9:
+ dev_priv->display.queue_flip = intel_gen9_queue_flip;
+ break;
}
intel_panel_init_backlight_funcs(dev);
@@ -12953,11 +13077,6 @@ void intel_modeset_init_hw(struct drm_device *dev)
intel_enable_gt_powersave(dev);
}
-void intel_modeset_suspend_hw(struct drm_device *dev)
-{
- intel_suspend_hw(dev);
-}
-
void intel_modeset_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -12983,6 +13102,7 @@ void intel_modeset_init(struct drm_device *dev)
return;
intel_init_display(dev);
+ intel_init_audio(dev);
if (IS_GEN2(dev)) {
dev->mode_config.max_width = 2048;
@@ -13293,7 +13413,7 @@ void i915_redisable_vga(struct drm_device *dev)
* level, just check if the power well is enabled instead of trying to
* follow the "don't touch the power well if we don't need it" policy
* the rest of the driver uses. */
- if (!intel_display_power_enabled(dev_priv, POWER_DOMAIN_VGA))
+ if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_VGA))
return;
i915_redisable_vga_power_on(dev);
@@ -13337,18 +13457,21 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
- pll->on = pll->get_hw_state(dev_priv, pll, &pll->hw_state);
+ pll->on = pll->get_hw_state(dev_priv, pll,
+ &pll->config.hw_state);
pll->active = 0;
+ pll->config.crtc_mask = 0;
for_each_intel_crtc(dev, crtc) {
- if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll)
+ if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll) {
pll->active++;
+ pll->config.crtc_mask |= 1 << crtc->pipe;
+ }
}
- pll->refcount = pll->active;
- DRM_DEBUG_KMS("%s hw state readout: refcount %i, on %i\n",
- pll->name, pll->refcount, pll->on);
+ DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
+ pll->name, pll->config.crtc_mask, pll->on);
- if (pll->refcount)
+ if (pll->config.crtc_mask)
intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
}
@@ -13438,7 +13561,9 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
pll->on = false;
}
- if (HAS_PCH_SPLIT(dev))
+ if (IS_GEN9(dev))
+ skl_wm_get_hw_state(dev);
+ else if (HAS_PCH_SPLIT(dev))
ilk_wm_get_hw_state(dev);
if (force_restore) {
@@ -13452,8 +13577,8 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
struct drm_crtc *crtc =
dev_priv->pipe_to_crtc_mapping[pipe];
- __intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y,
- crtc->primary->fb);
+ intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y,
+ crtc->primary->fb);
}
} else {
intel_modeset_update_staged_output_state(dev);
@@ -13464,6 +13589,7 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
void intel_modeset_gem_init(struct drm_device *dev)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *c;
struct drm_i915_gem_object *obj;
@@ -13471,6 +13597,16 @@ void intel_modeset_gem_init(struct drm_device *dev)
intel_init_gt_powersave(dev);
mutex_unlock(&dev->struct_mutex);
+ /*
+ * There may be no VBT; and if the BIOS enabled SSC we can
+ * just keep using it to avoid unnecessary flicker. Whereas if the
+ * BIOS isn't using it, don't assume it will work even if the VBT
+ * indicates as much.
+ */
+ if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
+ dev_priv->vbt.lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
+ DREF_SSC1_ENABLE);
+
intel_modeset_init_hw(dev);
intel_setup_overlay(dev);
@@ -13486,7 +13622,9 @@ void intel_modeset_gem_init(struct drm_device *dev)
if (obj == NULL)
continue;
- if (intel_pin_and_fence_fb_obj(dev, obj, NULL)) {
+ if (intel_pin_and_fence_fb_obj(c->primary,
+ c->primary->fb,
+ NULL)) {
DRM_ERROR("failed to pin boot fb on pipe %d\n",
to_intel_crtc(c)->pipe);
drm_framebuffer_unreference(c->primary->fb);
@@ -13494,6 +13632,8 @@ void intel_modeset_gem_init(struct drm_device *dev)
}
}
mutex_unlock(&dev->struct_mutex);
+
+ intel_backlight_register(dev);
}
void intel_connector_unregister(struct intel_connector *intel_connector)
@@ -13509,14 +13649,16 @@ void intel_modeset_cleanup(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_connector *connector;
+ intel_disable_gt_powersave(dev);
+
+ intel_backlight_unregister(dev);
+
/*
* Interrupts and polling as the first thing to avoid creating havoc.
- * Too much stuff here (turning of rps, connectors, ...) would
+ * Too much stuff here (turning of connectors, ...) would
* experience fancy races otherwise.
*/
- drm_irq_uninstall(dev);
- intel_hpd_cancel_work(dev_priv);
- dev_priv->pm._irqs_disabled = true;
+ intel_irq_uninstall(dev_priv);
/*
* Due to the hpd irq storm handling the hotplug work can re-arm the
@@ -13530,8 +13672,6 @@ void intel_modeset_cleanup(struct drm_device *dev)
intel_disable_fbc(dev);
- intel_disable_gt_powersave(dev);
-
ironlake_teardown_rc6(dev);
mutex_unlock(&dev->struct_mutex);
@@ -13671,8 +13811,8 @@ intel_display_capture_error_state(struct drm_device *dev)
for_each_pipe(dev_priv, i) {
error->pipe[i].power_domain_on =
- intel_display_power_enabled_unlocked(dev_priv,
- POWER_DOMAIN_PIPE(i));
+ __intel_display_power_is_enabled(dev_priv,
+ POWER_DOMAIN_PIPE(i));
if (!error->pipe[i].power_domain_on)
continue;
@@ -13707,7 +13847,7 @@ intel_display_capture_error_state(struct drm_device *dev)
enum transcoder cpu_transcoder = transcoders[i];
error->transcoder[i].power_domain_on =
- intel_display_power_enabled_unlocked(dev_priv,
+ __intel_display_power_is_enabled(dev_priv,
POWER_DOMAIN_TRANSCODER(cpu_transcoder));
if (!error->transcoder[i].power_domain_on)
continue;
@@ -13791,9 +13931,8 @@ void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file)
for_each_intel_crtc(dev, crtc) {
struct intel_unpin_work *work;
- unsigned long irqflags;
- spin_lock_irqsave(&dev->event_lock, irqflags);
+ spin_lock_irq(&dev->event_lock);
work = crtc->unpin_work;
@@ -13803,6 +13942,6 @@ void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file)
work->event = NULL;
}
- spin_unlock_irqrestore(&dev->event_lock, irqflags);
+ spin_unlock_irq(&dev->event_lock);
}
}
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 4bcd9175732..5cecc20efa7 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -113,6 +113,9 @@ static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
static void intel_dp_link_down(struct intel_dp *intel_dp);
static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
+static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
+static void vlv_steal_power_sequencer(struct drm_device *dev,
+ enum pipe pipe);
int
intel_dp_max_link_bw(struct intel_dp *intel_dp)
@@ -224,8 +227,7 @@ intel_dp_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
-static uint32_t
-pack_aux(uint8_t *src, int src_bytes)
+uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
{
int i;
uint32_t v = 0;
@@ -237,8 +239,7 @@ pack_aux(uint8_t *src, int src_bytes)
return v;
}
-static void
-unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
+void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
{
int i;
if (dst_bytes > 4)
@@ -283,12 +284,10 @@ intel_hrawclk(struct drm_device *dev)
static void
intel_dp_init_panel_power_sequencer(struct drm_device *dev,
- struct intel_dp *intel_dp,
- struct edp_power_seq *out);
+ struct intel_dp *intel_dp);
static void
intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
- struct intel_dp *intel_dp,
- struct edp_power_seq *out);
+ struct intel_dp *intel_dp);
static void pps_lock(struct intel_dp *intel_dp)
{
@@ -322,6 +321,66 @@ static void pps_unlock(struct intel_dp *intel_dp)
intel_display_power_put(dev_priv, power_domain);
}
+static void
+vlv_power_sequencer_kick(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum pipe pipe = intel_dp->pps_pipe;
+ bool pll_enabled;
+ uint32_t DP;
+
+ if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
+ "skipping pipe %c power seqeuncer kick due to port %c being active\n",
+ pipe_name(pipe), port_name(intel_dig_port->port)))
+ return;
+
+ DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
+ pipe_name(pipe), port_name(intel_dig_port->port));
+
+ /* Preserve the BIOS-computed detected bit. This is
+ * supposed to be read-only.
+ */
+ DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
+ DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
+ DP |= DP_PORT_WIDTH(1);
+ DP |= DP_LINK_TRAIN_PAT_1;
+
+ if (IS_CHERRYVIEW(dev))
+ DP |= DP_PIPE_SELECT_CHV(pipe);
+ else if (pipe == PIPE_B)
+ DP |= DP_PIPEB_SELECT;
+
+ pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
+
+ /*
+ * The DPLL for the pipe must be enabled for this to work.
+ * So enable temporarily it if it's not already enabled.
+ */
+ if (!pll_enabled)
+ vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
+ &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
+
+ /*
+ * Similar magic as in intel_dp_enable_port().
+ * We _must_ do this port enable + disable trick
+ * to make this power seqeuencer lock onto the port.
+ * Otherwise even VDD force bit won't work.
+ */
+ I915_WRITE(intel_dp->output_reg, DP);
+ POSTING_READ(intel_dp->output_reg);
+
+ I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
+ POSTING_READ(intel_dp->output_reg);
+
+ I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
+ POSTING_READ(intel_dp->output_reg);
+
+ if (!pll_enabled)
+ vlv_force_pll_off(dev, pipe);
+}
+
static enum pipe
vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
{
@@ -330,10 +389,13 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *encoder;
unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
- struct edp_power_seq power_seq;
+ enum pipe pipe;
lockdep_assert_held(&dev_priv->pps_mutex);
+ /* We should never land here with regular DP ports */
+ WARN_ON(!is_edp(intel_dp));
+
if (intel_dp->pps_pipe != INVALID_PIPE)
return intel_dp->pps_pipe;
@@ -359,18 +421,26 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
* are two power sequencers and up to two eDP ports.
*/
if (WARN_ON(pipes == 0))
- return PIPE_A;
+ pipe = PIPE_A;
+ else
+ pipe = ffs(pipes) - 1;
- intel_dp->pps_pipe = ffs(pipes) - 1;
+ vlv_steal_power_sequencer(dev, pipe);
+ intel_dp->pps_pipe = pipe;
DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
pipe_name(intel_dp->pps_pipe),
port_name(intel_dig_port->port));
/* init power sequencer on this pipe and port */
- intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
- intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
- &power_seq);
+ intel_dp_init_panel_power_sequencer(dev, intel_dp);
+ intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
+
+ /*
+ * Even vdd force doesn't work until we've made
+ * the power sequencer lock in on the port.
+ */
+ vlv_power_sequencer_kick(intel_dp);
return intel_dp->pps_pipe;
}
@@ -425,7 +495,6 @@ vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct edp_power_seq power_seq;
enum port port = intel_dig_port->port;
lockdep_assert_held(&dev_priv->pps_mutex);
@@ -453,9 +522,8 @@ vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
port_name(port), pipe_name(intel_dp->pps_pipe));
- intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
- intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
- &power_seq);
+ intel_dp_init_panel_power_sequencer(dev, intel_dp);
+ intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
}
void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
@@ -550,6 +618,10 @@ static bool edp_have_panel_power(struct intel_dp *intel_dp)
lockdep_assert_held(&dev_priv->pps_mutex);
+ if (IS_VALLEYVIEW(dev) &&
+ intel_dp->pps_pipe == INVALID_PIPE)
+ return false;
+
return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
}
@@ -560,6 +632,10 @@ static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
lockdep_assert_held(&dev_priv->pps_mutex);
+ if (IS_VALLEYVIEW(dev) &&
+ intel_dp->pps_pipe == INVALID_PIPE)
+ return false;
+
return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
}
@@ -661,6 +737,16 @@ static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
return index ? 0 : 100;
}
+static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
+{
+ /*
+ * SKL doesn't need us to program the AUX clock divider (Hardware will
+ * derive the clock from CDCLK automatically). We still implement the
+ * get_aux_clock_divider vfunc to plug-in into the existing code.
+ */
+ return index ? 0 : 1;
+}
+
static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
bool has_aux_irq,
int send_bytes,
@@ -691,9 +777,24 @@ static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
(aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
}
+static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
+ bool has_aux_irq,
+ int send_bytes,
+ uint32_t unused)
+{
+ return DP_AUX_CH_CTL_SEND_BUSY |
+ DP_AUX_CH_CTL_DONE |
+ (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
+ DP_AUX_CH_CTL_TIME_OUT_ERROR |
+ DP_AUX_CH_CTL_TIME_OUT_1600us |
+ DP_AUX_CH_CTL_RECEIVE_ERROR |
+ (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
+ DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
+}
+
static int
intel_dp_aux_ch(struct intel_dp *intel_dp,
- uint8_t *send, int send_bytes,
+ const uint8_t *send, int send_bytes,
uint8_t *recv, int recv_size)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
@@ -760,7 +861,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
/* Load the send data into the aux channel data registers */
for (i = 0; i < send_bytes; i += 4)
I915_WRITE(ch_data + i,
- pack_aux(send + i, send_bytes - i));
+ intel_dp_pack_aux(send + i,
+ send_bytes - i));
/* Send the command and wait for it to complete */
I915_WRITE(ch_ctl, send_ctl);
@@ -814,8 +916,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
recv_bytes = recv_size;
for (i = 0; i < recv_bytes; i += 4)
- unpack_aux(I915_READ(ch_data + i),
- recv + i, recv_bytes - i);
+ intel_dp_unpack_aux(I915_READ(ch_data + i),
+ recv + i, recv_bytes - i);
ret = recv_bytes;
out:
@@ -925,7 +1027,16 @@ intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
BUG();
}
- if (!HAS_DDI(dev))
+ /*
+ * The AUX_CTL register is usually DP_CTL + 0x10.
+ *
+ * On Haswell and Broadwell though:
+ * - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
+ * - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
+ *
+ * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
+ */
+ if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
intel_dp->aux.name = name;
@@ -963,6 +1074,33 @@ intel_dp_connector_unregister(struct intel_connector *intel_connector)
}
static void
+skl_edp_set_pll_config(struct intel_crtc_config *pipe_config, int link_bw)
+{
+ u32 ctrl1;
+
+ pipe_config->ddi_pll_sel = SKL_DPLL0;
+ pipe_config->dpll_hw_state.cfgcr1 = 0;
+ pipe_config->dpll_hw_state.cfgcr2 = 0;
+
+ ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
+ switch (link_bw) {
+ case DP_LINK_BW_1_62:
+ ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_810,
+ SKL_DPLL0);
+ break;
+ case DP_LINK_BW_2_7:
+ ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1350,
+ SKL_DPLL0);
+ break;
+ case DP_LINK_BW_5_4:
+ ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2700,
+ SKL_DPLL0);
+ break;
+ }
+ pipe_config->dpll_hw_state.ctrl1 = ctrl1;
+}
+
+static void
hsw_dp_set_ddi_pll_sel(struct intel_crtc_config *pipe_config, int link_bw)
{
switch (link_bw) {
@@ -1139,7 +1277,9 @@ found:
&pipe_config->dp_m2_n2);
}
- if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ if (IS_SKYLAKE(dev) && is_edp(intel_dp))
+ skl_edp_set_pll_config(pipe_config, intel_dp->link_bw);
+ else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
else
intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
@@ -1212,12 +1352,8 @@ static void intel_dp_prepare(struct intel_encoder *encoder)
intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
- if (crtc->config.has_audio) {
- DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
- pipe_name(crtc->pipe));
+ if (crtc->config.has_audio)
intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
- intel_write_eld(&encoder->base, adjusted_mode);
- }
/* Split out the IBX/CPU vs CPT settings */
@@ -1367,6 +1503,7 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
if (!is_edp(intel_dp))
return false;
+ cancel_delayed_work(&intel_dp->panel_vdd_work);
intel_dp->want_panel_vdd = true;
if (edp_have_panel_vdd(intel_dp))
@@ -1375,7 +1512,8 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
power_domain = intel_display_port_power_domain(intel_encoder);
intel_display_power_get(dev_priv, power_domain);
- DRM_DEBUG_KMS("Turning eDP VDD on\n");
+ DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
+ port_name(intel_dig_port->port));
if (!edp_have_panel_power(intel_dp))
wait_panel_power_cycle(intel_dp);
@@ -1394,7 +1532,8 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
* If the panel wasn't on, delay before accessing aux channel
*/
if (!edp_have_panel_power(intel_dp)) {
- DRM_DEBUG_KMS("eDP was not running\n");
+ DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
+ port_name(intel_dig_port->port));
msleep(intel_dp->panel_power_up_delay);
}
@@ -1419,7 +1558,8 @@ void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
vdd = edp_panel_vdd_on(intel_dp);
pps_unlock(intel_dp);
- WARN(!vdd, "eDP VDD already requested on\n");
+ WARN(!vdd, "eDP port %c VDD already requested on\n",
+ port_name(dp_to_dig_port(intel_dp)->port));
}
static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
@@ -1440,7 +1580,8 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
if (!edp_have_panel_vdd(intel_dp))
return;
- DRM_DEBUG_KMS("Turning eDP VDD off\n");
+ DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
+ port_name(intel_dig_port->port));
pp = ironlake_get_pp_control(intel_dp);
pp &= ~EDP_FORCE_VDD;
@@ -1501,7 +1642,8 @@ static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
if (!is_edp(intel_dp))
return;
- WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on");
+ WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
+ port_name(dp_to_dig_port(intel_dp)->port));
intel_dp->want_panel_vdd = false;
@@ -1511,40 +1653,25 @@ static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
edp_panel_vdd_schedule_off(intel_dp);
}
-/*
- * Must be paired with intel_edp_panel_vdd_on().
- * Nested calls to these functions are not allowed since
- * we drop the lock. Caller must use some higher level
- * locking to prevent nested calls from other threads.
- */
-static void intel_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
-{
- if (!is_edp(intel_dp))
- return;
-
- pps_lock(intel_dp);
- edp_panel_vdd_off(intel_dp, sync);
- pps_unlock(intel_dp);
-}
-
-void intel_edp_panel_on(struct intel_dp *intel_dp)
+static void edp_panel_on(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp;
u32 pp_ctrl_reg;
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
if (!is_edp(intel_dp))
return;
- DRM_DEBUG_KMS("Turn eDP power on\n");
+ DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
+ port_name(dp_to_dig_port(intel_dp)->port));
- pps_lock(intel_dp);
-
- if (edp_have_panel_power(intel_dp)) {
- DRM_DEBUG_KMS("eDP power already on\n");
- goto out;
- }
+ if (WARN(edp_have_panel_power(intel_dp),
+ "eDP port %c panel power already on\n",
+ port_name(dp_to_dig_port(intel_dp)->port)))
+ return;
wait_panel_power_cycle(intel_dp);
@@ -1572,12 +1699,20 @@ void intel_edp_panel_on(struct intel_dp *intel_dp)
I915_WRITE(pp_ctrl_reg, pp);
POSTING_READ(pp_ctrl_reg);
}
+}
+
+void intel_edp_panel_on(struct intel_dp *intel_dp)
+{
+ if (!is_edp(intel_dp))
+ return;
- out:
+ pps_lock(intel_dp);
+ edp_panel_on(intel_dp);
pps_unlock(intel_dp);
}
-void intel_edp_panel_off(struct intel_dp *intel_dp)
+
+static void edp_panel_off(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *intel_encoder = &intel_dig_port->base;
@@ -1587,14 +1722,16 @@ void intel_edp_panel_off(struct intel_dp *intel_dp)
u32 pp;
u32 pp_ctrl_reg;
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
if (!is_edp(intel_dp))
return;
- DRM_DEBUG_KMS("Turn eDP power off\n");
-
- pps_lock(intel_dp);
+ DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
+ port_name(dp_to_dig_port(intel_dp)->port));
- WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
+ WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
+ port_name(dp_to_dig_port(intel_dp)->port));
pp = ironlake_get_pp_control(intel_dp);
/* We need to switch off panel power _and_ force vdd, for otherwise some
@@ -1615,7 +1752,15 @@ void intel_edp_panel_off(struct intel_dp *intel_dp)
/* We got a reference when we enabled the VDD. */
power_domain = intel_display_port_power_domain(intel_encoder);
intel_display_power_put(dev_priv, power_domain);
+}
+
+void intel_edp_panel_off(struct intel_dp *intel_dp)
+{
+ if (!is_edp(intel_dp))
+ return;
+ pps_lock(intel_dp);
+ edp_panel_off(intel_dp);
pps_unlock(intel_dp);
}
@@ -1819,7 +1964,7 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
u32 tmp;
power_domain = intel_display_port_power_domain(encoder);
- if (!intel_display_power_enabled(dev_priv, power_domain))
+ if (!intel_display_power_is_enabled(dev_priv, power_domain))
return false;
tmp = I915_READ(intel_dp->output_reg);
@@ -1951,368 +2096,14 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
}
}
-static bool is_edp_psr(struct intel_dp *intel_dp)
-{
- return intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED;
-}
-
-static bool intel_edp_is_psr_enabled(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (!HAS_PSR(dev))
- return false;
-
- return I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
-}
-
-static void intel_edp_psr_write_vsc(struct intel_dp *intel_dp,
- struct edp_vsc_psr *vsc_psr)
-{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
- u32 ctl_reg = HSW_TVIDEO_DIP_CTL(crtc->config.cpu_transcoder);
- u32 data_reg = HSW_TVIDEO_DIP_VSC_DATA(crtc->config.cpu_transcoder);
- uint32_t *data = (uint32_t *) vsc_psr;
- unsigned int i;
-
- /* As per BSPec (Pipe Video Data Island Packet), we need to disable
- the video DIP being updated before program video DIP data buffer
- registers for DIP being updated. */
- I915_WRITE(ctl_reg, 0);
- POSTING_READ(ctl_reg);
-
- for (i = 0; i < VIDEO_DIP_VSC_DATA_SIZE; i += 4) {
- if (i < sizeof(struct edp_vsc_psr))
- I915_WRITE(data_reg + i, *data++);
- else
- I915_WRITE(data_reg + i, 0);
- }
-
- I915_WRITE(ctl_reg, VIDEO_DIP_ENABLE_VSC_HSW);
- POSTING_READ(ctl_reg);
-}
-
-static void intel_edp_psr_setup(struct intel_dp *intel_dp)
-{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct edp_vsc_psr psr_vsc;
-
- /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
- memset(&psr_vsc, 0, sizeof(psr_vsc));
- psr_vsc.sdp_header.HB0 = 0;
- psr_vsc.sdp_header.HB1 = 0x7;
- psr_vsc.sdp_header.HB2 = 0x2;
- psr_vsc.sdp_header.HB3 = 0x8;
- intel_edp_psr_write_vsc(intel_dp, &psr_vsc);
-
- /* Avoid continuous PSR exit by masking memup and hpd */
- I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
- EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
-}
-
-static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp)
-{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t aux_clock_divider;
- int precharge = 0x3;
- int msg_size = 5; /* Header(4) + Message(1) */
- bool only_standby = false;
-
- aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
-
- if (IS_BROADWELL(dev) && dig_port->port != PORT_A)
- only_standby = true;
-
- /* Enable PSR in sink */
- if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby)
- drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
- DP_PSR_ENABLE & ~DP_PSR_MAIN_LINK_ACTIVE);
- else
- drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
- DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
-
- /* Setup AUX registers */
- I915_WRITE(EDP_PSR_AUX_DATA1(dev), EDP_PSR_DPCD_COMMAND);
- I915_WRITE(EDP_PSR_AUX_DATA2(dev), EDP_PSR_DPCD_NORMAL_OPERATION);
- I915_WRITE(EDP_PSR_AUX_CTL(dev),
- DP_AUX_CH_CTL_TIME_OUT_400us |
- (msg_size << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
- (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
- (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT));
-}
-
-static void intel_edp_psr_enable_source(struct intel_dp *intel_dp)
-{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t max_sleep_time = 0x1f;
- uint32_t idle_frames = 1;
- uint32_t val = 0x0;
- const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
- bool only_standby = false;
-
- if (IS_BROADWELL(dev) && dig_port->port != PORT_A)
- only_standby = true;
-
- if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby) {
- val |= EDP_PSR_LINK_STANDBY;
- val |= EDP_PSR_TP2_TP3_TIME_0us;
- val |= EDP_PSR_TP1_TIME_0us;
- val |= EDP_PSR_SKIP_AUX_EXIT;
- val |= IS_BROADWELL(dev) ? BDW_PSR_SINGLE_FRAME : 0;
- } else
- val |= EDP_PSR_LINK_DISABLE;
-
- I915_WRITE(EDP_PSR_CTL(dev), val |
- (IS_BROADWELL(dev) ? 0 : link_entry_time) |
- max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
- idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
- EDP_PSR_ENABLE);
-}
-
-static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
-{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc = dig_port->base.base.crtc;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
- lockdep_assert_held(&dev_priv->psr.lock);
- WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
- WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
-
- dev_priv->psr.source_ok = false;
-
- if (IS_HASWELL(dev) && dig_port->port != PORT_A) {
- DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
- return false;
- }
-
- if (!i915.enable_psr) {
- DRM_DEBUG_KMS("PSR disable by flag\n");
- return false;
- }
-
- /* Below limitations aren't valid for Broadwell */
- if (IS_BROADWELL(dev))
- goto out;
-
- if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) &
- S3D_ENABLE) {
- DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
- return false;
- }
-
- if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
- DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
- return false;
- }
-
- out:
- dev_priv->psr.source_ok = true;
- return true;
-}
-
-static void intel_edp_psr_do_enable(struct intel_dp *intel_dp)
-{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
- WARN_ON(dev_priv->psr.active);
- lockdep_assert_held(&dev_priv->psr.lock);
-
- /* Enable PSR on the panel */
- intel_edp_psr_enable_sink(intel_dp);
-
- /* Enable PSR on the host */
- intel_edp_psr_enable_source(intel_dp);
-
- dev_priv->psr.active = true;
-}
-
-void intel_edp_psr_enable(struct intel_dp *intel_dp)
-{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (!HAS_PSR(dev)) {
- DRM_DEBUG_KMS("PSR not supported on this platform\n");
- return;
- }
-
- if (!is_edp_psr(intel_dp)) {
- DRM_DEBUG_KMS("PSR not supported by this panel\n");
- return;
- }
-
- mutex_lock(&dev_priv->psr.lock);
- if (dev_priv->psr.enabled) {
- DRM_DEBUG_KMS("PSR already in use\n");
- mutex_unlock(&dev_priv->psr.lock);
- return;
- }
-
- dev_priv->psr.busy_frontbuffer_bits = 0;
-
- /* Setup PSR once */
- intel_edp_psr_setup(intel_dp);
-
- if (intel_edp_psr_match_conditions(intel_dp))
- dev_priv->psr.enabled = intel_dp;
- mutex_unlock(&dev_priv->psr.lock);
-}
-
-void intel_edp_psr_disable(struct intel_dp *intel_dp)
-{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- mutex_lock(&dev_priv->psr.lock);
- if (!dev_priv->psr.enabled) {
- mutex_unlock(&dev_priv->psr.lock);
- return;
- }
-
- if (dev_priv->psr.active) {
- I915_WRITE(EDP_PSR_CTL(dev),
- I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE);
-
- /* Wait till PSR is idle */
- if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) &
- EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
- DRM_ERROR("Timed out waiting for PSR Idle State\n");
-
- dev_priv->psr.active = false;
- } else {
- WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
- }
-
- dev_priv->psr.enabled = NULL;
- mutex_unlock(&dev_priv->psr.lock);
-
- cancel_delayed_work_sync(&dev_priv->psr.work);
-}
-
-static void intel_edp_psr_work(struct work_struct *work)
-{
- struct drm_i915_private *dev_priv =
- container_of(work, typeof(*dev_priv), psr.work.work);
- struct intel_dp *intel_dp = dev_priv->psr.enabled;
-
- mutex_lock(&dev_priv->psr.lock);
- intel_dp = dev_priv->psr.enabled;
-
- if (!intel_dp)
- goto unlock;
-
- /*
- * The delayed work can race with an invalidate hence we need to
- * recheck. Since psr_flush first clears this and then reschedules we
- * won't ever miss a flush when bailing out here.
- */
- if (dev_priv->psr.busy_frontbuffer_bits)
- goto unlock;
-
- intel_edp_psr_do_enable(intel_dp);
-unlock:
- mutex_unlock(&dev_priv->psr.lock);
-}
-
-static void intel_edp_psr_do_exit(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (dev_priv->psr.active) {
- u32 val = I915_READ(EDP_PSR_CTL(dev));
-
- WARN_ON(!(val & EDP_PSR_ENABLE));
-
- I915_WRITE(EDP_PSR_CTL(dev), val & ~EDP_PSR_ENABLE);
-
- dev_priv->psr.active = false;
- }
-
-}
-
-void intel_edp_psr_invalidate(struct drm_device *dev,
- unsigned frontbuffer_bits)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc;
- enum pipe pipe;
-
- mutex_lock(&dev_priv->psr.lock);
- if (!dev_priv->psr.enabled) {
- mutex_unlock(&dev_priv->psr.lock);
- return;
- }
-
- crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
- pipe = to_intel_crtc(crtc)->pipe;
-
- intel_edp_psr_do_exit(dev);
-
- frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
-
- dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
- mutex_unlock(&dev_priv->psr.lock);
-}
-
-void intel_edp_psr_flush(struct drm_device *dev,
- unsigned frontbuffer_bits)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc;
- enum pipe pipe;
-
- mutex_lock(&dev_priv->psr.lock);
- if (!dev_priv->psr.enabled) {
- mutex_unlock(&dev_priv->psr.lock);
- return;
- }
-
- crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
- pipe = to_intel_crtc(crtc)->pipe;
- dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
-
- /*
- * On Haswell sprite plane updates don't result in a psr invalidating
- * signal in the hardware. Which means we need to manually fake this in
- * software for all flushes, not just when we've seen a preceding
- * invalidation through frontbuffer rendering.
- */
- if (IS_HASWELL(dev) &&
- (frontbuffer_bits & INTEL_FRONTBUFFER_SPRITE(pipe)))
- intel_edp_psr_do_exit(dev);
-
- if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
- schedule_delayed_work(&dev_priv->psr.work,
- msecs_to_jiffies(100));
- mutex_unlock(&dev_priv->psr.lock);
-}
-
-void intel_edp_psr_init(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- INIT_DELAYED_WORK(&dev_priv->psr.work, intel_edp_psr_work);
- mutex_init(&dev_priv->psr.lock);
-}
-
static void intel_disable_dp(struct intel_encoder *encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct drm_device *dev = encoder->base.dev;
+ struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+
+ if (crtc->config.has_audio)
+ intel_audio_codec_disable(encoder);
/* Make sure the panel is off before trying to change the mode. But also
* ensure that we have vdd while we switch off the panel. */
@@ -2467,14 +2258,23 @@ static void intel_dp_enable_port(struct intel_dp *intel_dp)
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
- intel_dp->DP |= DP_PORT_EN;
-
/* enable with pattern 1 (as per spec) */
_intel_dp_set_link_train(intel_dp, &intel_dp->DP,
DP_TRAINING_PATTERN_1);
I915_WRITE(intel_dp->output_reg, intel_dp->DP);
POSTING_READ(intel_dp->output_reg);
+
+ /*
+ * Magic for VLV/CHV. We _must_ first set up the register
+ * without actually enabling the port, and then do another
+ * write to enable the port. Otherwise link training will
+ * fail when the power sequencer is freshly used for this port.
+ */
+ intel_dp->DP |= DP_PORT_EN;
+
+ I915_WRITE(intel_dp->output_reg, intel_dp->DP);
+ POSTING_READ(intel_dp->output_reg);
}
static void intel_enable_dp(struct intel_encoder *encoder)
@@ -2482,19 +2282,38 @@ static void intel_enable_dp(struct intel_encoder *encoder)
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
uint32_t dp_reg = I915_READ(intel_dp->output_reg);
if (WARN_ON(dp_reg & DP_PORT_EN))
return;
+ pps_lock(intel_dp);
+
+ if (IS_VALLEYVIEW(dev))
+ vlv_init_panel_power_sequencer(intel_dp);
+
intel_dp_enable_port(intel_dp);
- intel_edp_panel_vdd_on(intel_dp);
- intel_edp_panel_on(intel_dp);
- intel_edp_panel_vdd_off(intel_dp, true);
+
+ edp_panel_vdd_on(intel_dp);
+ edp_panel_on(intel_dp);
+ edp_panel_vdd_off(intel_dp, true);
+
+ pps_unlock(intel_dp);
+
+ if (IS_VALLEYVIEW(dev))
+ vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp));
+
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
intel_dp_start_link_train(intel_dp);
intel_dp_complete_link_train(intel_dp);
intel_dp_stop_link_train(intel_dp);
+
+ if (crtc->config.has_audio) {
+ DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
+ pipe_name(crtc->pipe));
+ intel_audio_codec_enable(encoder);
+ }
}
static void g4x_enable_dp(struct intel_encoder *encoder)
@@ -2526,6 +2345,32 @@ static void g4x_pre_enable_dp(struct intel_encoder *encoder)
}
}
+static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
+ enum pipe pipe = intel_dp->pps_pipe;
+ int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
+
+ edp_panel_vdd_off_sync(intel_dp);
+
+ /*
+ * VLV seems to get confused when multiple power seqeuencers
+ * have the same port selected (even if only one has power/vdd
+ * enabled). The failure manifests as vlv_wait_port_ready() failing
+ * CHV on the other hand doesn't seem to mind having the same port
+ * selected in multiple power seqeuencers, but let's clear the
+ * port select always when logically disconnecting a power sequencer
+ * from a port.
+ */
+ DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
+ pipe_name(pipe), port_name(intel_dig_port->port));
+ I915_WRITE(pp_on_reg, 0);
+ POSTING_READ(pp_on_reg);
+
+ intel_dp->pps_pipe = INVALID_PIPE;
+}
+
static void vlv_steal_power_sequencer(struct drm_device *dev,
enum pipe pipe)
{
@@ -2534,6 +2379,9 @@ static void vlv_steal_power_sequencer(struct drm_device *dev,
lockdep_assert_held(&dev_priv->pps_mutex);
+ if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
+ return;
+
list_for_each_entry(encoder, &dev->mode_config.encoder_list,
base.head) {
struct intel_dp *intel_dp;
@@ -2551,10 +2399,12 @@ static void vlv_steal_power_sequencer(struct drm_device *dev,
DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
pipe_name(pipe), port_name(port));
- /* make sure vdd is off before we steal it */
- edp_panel_vdd_off_sync(intel_dp);
+ WARN(encoder->connectors_active,
+ "stealing pipe %c power sequencer from active eDP port %c\n",
+ pipe_name(pipe), port_name(port));
- intel_dp->pps_pipe = INVALID_PIPE;
+ /* make sure vdd is off before we steal it */
+ vlv_detach_power_sequencer(intel_dp);
}
}
@@ -2565,10 +2415,12 @@ static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
- struct edp_power_seq power_seq;
lockdep_assert_held(&dev_priv->pps_mutex);
+ if (!is_edp(intel_dp))
+ return;
+
if (intel_dp->pps_pipe == crtc->pipe)
return;
@@ -2578,7 +2430,7 @@ static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
* we still have control of it.
*/
if (intel_dp->pps_pipe != INVALID_PIPE)
- edp_panel_vdd_off_sync(intel_dp);
+ vlv_detach_power_sequencer(intel_dp);
/*
* We may be stealing the power
@@ -2593,9 +2445,8 @@ static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
/* init power sequencer on this pipe and port */
- intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
- intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
- &power_seq);
+ intel_dp_init_panel_power_sequencer(dev, intel_dp);
+ intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
}
static void vlv_pre_enable_dp(struct intel_encoder *encoder)
@@ -2624,15 +2475,7 @@ static void vlv_pre_enable_dp(struct intel_encoder *encoder)
mutex_unlock(&dev_priv->dpio_lock);
- if (is_edp(intel_dp)) {
- pps_lock(intel_dp);
- vlv_init_panel_power_sequencer(intel_dp);
- pps_unlock(intel_dp);
- }
-
intel_enable_dp(encoder);
-
- vlv_wait_port_ready(dev_priv, dport);
}
static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
@@ -2680,6 +2523,15 @@ static void chv_pre_enable_dp(struct intel_encoder *encoder)
mutex_lock(&dev_priv->dpio_lock);
+ /* allow hardware to manage TX FIFO reset source */
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
+ val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
+ val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
+
/* Deassert soft data lane reset*/
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
val |= CHV_PCS_REQ_SOFTRESET_EN;
@@ -2715,15 +2567,7 @@ static void chv_pre_enable_dp(struct intel_encoder *encoder)
mutex_unlock(&dev_priv->dpio_lock);
- if (is_edp(intel_dp)) {
- pps_lock(intel_dp);
- vlv_init_panel_power_sequencer(intel_dp);
- pps_unlock(intel_dp);
- }
-
intel_enable_dp(encoder);
-
- vlv_wait_port_ready(dev_priv, dport);
}
static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
@@ -2843,7 +2687,9 @@ intel_dp_voltage_max(struct intel_dp *intel_dp)
struct drm_device *dev = intel_dp_to_dev(intel_dp);
enum port port = dp_to_dig_port(intel_dp)->port;
- if (IS_VALLEYVIEW(dev))
+ if (INTEL_INFO(dev)->gen >= 9)
+ return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
+ else if (IS_VALLEYVIEW(dev))
return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
else if (IS_GEN7(dev) && port == PORT_A)
return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
@@ -2859,7 +2705,18 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
struct drm_device *dev = intel_dp_to_dev(intel_dp);
enum port port = dp_to_dig_port(intel_dp)->port;
- if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+ if (INTEL_INFO(dev)->gen >= 9) {
+ switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
+ return DP_TRAIN_PRE_EMPH_LEVEL_3;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
+ return DP_TRAIN_PRE_EMPH_LEVEL_2;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
+ return DP_TRAIN_PRE_EMPH_LEVEL_1;
+ default:
+ return DP_TRAIN_PRE_EMPH_LEVEL_0;
+ }
+ } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
return DP_TRAIN_PRE_EMPH_LEVEL_3;
@@ -3095,12 +2952,26 @@ static uint32_t intel_chv_signal_levels(struct intel_dp *intel_dp)
/* Clear calc init */
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
+ val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
+ val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
+ val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
+ val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
+ val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
+ val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
+ val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
+ val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
+
/* Program swing deemph */
for (i = 0; i < 4; i++) {
val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
@@ -3341,7 +3212,7 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
uint32_t signal_levels, mask;
uint8_t train_set = intel_dp->train_set[0];
- if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
signal_levels = intel_hsw_signal_levels(train_set);
mask = DDI_BUF_EMP_MASK;
} else if (IS_CHERRYVIEW(dev)) {
@@ -3605,7 +3476,6 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
/* Try 5 times, then try clock recovery if that fails */
if (tries > 5) {
- intel_dp_link_down(intel_dp);
intel_dp_start_link_train(intel_dp);
intel_dp_set_link_train(intel_dp, &DP,
training_pattern |
@@ -3763,8 +3633,6 @@ intel_dp_probe_oui(struct intel_dp *intel_dp)
if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
return;
- intel_edp_panel_vdd_on(intel_dp);
-
if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
buf[0], buf[1], buf[2]);
@@ -3772,8 +3640,6 @@ intel_dp_probe_oui(struct intel_dp *intel_dp)
if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
buf[0], buf[1], buf[2]);
-
- intel_edp_panel_vdd_off(intel_dp, false);
}
static bool
@@ -3787,7 +3653,6 @@ intel_dp_probe_mst(struct intel_dp *intel_dp)
if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
return false;
- intel_edp_panel_vdd_on(intel_dp);
if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
if (buf[0] & DP_MST_CAP) {
DRM_DEBUG_KMS("Sink is MST capable\n");
@@ -3797,7 +3662,6 @@ intel_dp_probe_mst(struct intel_dp *intel_dp)
intel_dp->is_mst = false;
}
}
- intel_edp_panel_vdd_off(intel_dp, false);
drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
return intel_dp->is_mst;
@@ -3809,26 +3673,48 @@ int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
struct drm_device *dev = intel_dig_port->base.base.dev;
struct intel_crtc *intel_crtc =
to_intel_crtc(intel_dig_port->base.base.crtc);
- u8 buf[1];
+ u8 buf;
+ int test_crc_count;
+ int attempts = 6;
- if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, buf) < 0)
+ if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
return -EIO;
- if (!(buf[0] & DP_TEST_CRC_SUPPORTED))
+ if (!(buf & DP_TEST_CRC_SUPPORTED))
return -ENOTTY;
+ if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
+ return -EIO;
+
if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
- DP_TEST_SINK_START) < 0)
+ buf | DP_TEST_SINK_START) < 0)
+ return -EIO;
+
+ if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
return -EIO;
+ test_crc_count = buf & DP_TEST_COUNT_MASK;
- /* Wait 2 vblanks to be sure we will have the correct CRC value */
- intel_wait_for_vblank(dev, intel_crtc->pipe);
- intel_wait_for_vblank(dev, intel_crtc->pipe);
+ do {
+ if (drm_dp_dpcd_readb(&intel_dp->aux,
+ DP_TEST_SINK_MISC, &buf) < 0)
+ return -EIO;
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+ } while (--attempts && (buf & DP_TEST_COUNT_MASK) == test_crc_count);
+
+ if (attempts == 0) {
+ DRM_DEBUG_KMS("Panel is unable to calculate CRC after 6 vblanks\n");
+ return -ETIMEDOUT;
+ }
if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0)
return -EIO;
- drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK, 0);
+ if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
+ return -EIO;
+ if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
+ buf & ~DP_TEST_SINK_START) < 0)
+ return -EIO;
+
return 0;
}
@@ -4456,9 +4342,52 @@ static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
pps_unlock(intel_dp);
}
+static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum intel_display_power_domain power_domain;
+
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
+ if (!edp_have_panel_vdd(intel_dp))
+ return;
+
+ /*
+ * The VDD bit needs a power domain reference, so if the bit is
+ * already enabled when we boot or resume, grab this reference and
+ * schedule a vdd off, so we don't hold on to the reference
+ * indefinitely.
+ */
+ DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
+ power_domain = intel_display_port_power_domain(&intel_dig_port->base);
+ intel_display_power_get(dev_priv, power_domain);
+
+ edp_panel_vdd_schedule_off(intel_dp);
+}
+
static void intel_dp_encoder_reset(struct drm_encoder *encoder)
{
- intel_edp_panel_vdd_sanitize(to_intel_encoder(encoder));
+ struct intel_dp *intel_dp;
+
+ if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
+ return;
+
+ intel_dp = enc_to_intel_dp(encoder);
+
+ pps_lock(intel_dp);
+
+ /*
+ * Read out the current power sequencer assignment,
+ * in case the BIOS did something with it.
+ */
+ if (IS_VALLEYVIEW(encoder->dev))
+ vlv_initial_power_sequencer_setup(intel_dp);
+
+ intel_edp_panel_vdd_sanitize(intel_dp);
+
+ pps_unlock(intel_dp);
}
static const struct drm_connector_funcs intel_dp_connector_funcs = {
@@ -4645,16 +4574,20 @@ static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
static void
intel_dp_init_panel_power_sequencer(struct drm_device *dev,
- struct intel_dp *intel_dp,
- struct edp_power_seq *out)
+ struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct edp_power_seq cur, vbt, spec, final;
+ struct edp_power_seq cur, vbt, spec,
+ *final = &intel_dp->pps_delays;
u32 pp_on, pp_off, pp_div, pp;
int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
lockdep_assert_held(&dev_priv->pps_mutex);
+ /* already initialized? */
+ if (final->t11_t12 != 0)
+ return;
+
if (HAS_PCH_SPLIT(dev)) {
pp_ctrl_reg = PCH_PP_CONTROL;
pp_on_reg = PCH_PP_ON_DELAYS;
@@ -4716,7 +4649,7 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
/* Use the max of the register settings and vbt. If both are
* unset, fall back to the spec limits. */
-#define assign_final(field) final.field = (max(cur.field, vbt.field) == 0 ? \
+#define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
spec.field : \
max(cur.field, vbt.field))
assign_final(t1_t3);
@@ -4726,7 +4659,7 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
assign_final(t11_t12);
#undef assign_final
-#define get_delay(field) (DIV_ROUND_UP(final.field, 10))
+#define get_delay(field) (DIV_ROUND_UP(final->field, 10))
intel_dp->panel_power_up_delay = get_delay(t1_t3);
intel_dp->backlight_on_delay = get_delay(t8);
intel_dp->backlight_off_delay = get_delay(t9);
@@ -4740,21 +4673,18 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
-
- if (out)
- *out = final;
}
static void
intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
- struct intel_dp *intel_dp,
- struct edp_power_seq *seq)
+ struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp_on, pp_off, pp_div, port_sel = 0;
int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
int pp_on_reg, pp_off_reg, pp_div_reg;
enum port port = dp_to_dig_port(intel_dp)->port;
+ const struct edp_power_seq *seq = &intel_dp->pps_delays;
lockdep_assert_held(&dev_priv->pps_mutex);
@@ -4837,7 +4767,7 @@ void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
* hard to tell without seeing the user of this function of this code.
* Check locking and ordering once that lands.
*/
- if (INTEL_INFO(dev)->gen < 8 && intel_edp_is_psr_enabled(dev)) {
+ if (INTEL_INFO(dev)->gen < 8 && intel_psr_is_enabled(dev)) {
DRM_DEBUG_KMS("DRRS is disabled as PSR is enabled\n");
return;
}
@@ -4940,40 +4870,8 @@ intel_dp_drrs_init(struct intel_digital_port *intel_dig_port,
return downclock_mode;
}
-void intel_edp_panel_vdd_sanitize(struct intel_encoder *intel_encoder)
-{
- struct drm_device *dev = intel_encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_dp *intel_dp;
- enum intel_display_power_domain power_domain;
-
- if (intel_encoder->type != INTEL_OUTPUT_EDP)
- return;
-
- intel_dp = enc_to_intel_dp(&intel_encoder->base);
-
- pps_lock(intel_dp);
-
- if (!edp_have_panel_vdd(intel_dp))
- goto out;
- /*
- * The VDD bit needs a power domain reference, so if the bit is
- * already enabled when we boot or resume, grab this reference and
- * schedule a vdd off, so we don't hold on to the reference
- * indefinitely.
- */
- DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
- power_domain = intel_display_port_power_domain(intel_encoder);
- intel_display_power_get(dev_priv, power_domain);
-
- edp_panel_vdd_schedule_off(intel_dp);
- out:
- pps_unlock(intel_dp);
-}
-
static bool intel_edp_init_connector(struct intel_dp *intel_dp,
- struct intel_connector *intel_connector,
- struct edp_power_seq *power_seq)
+ struct intel_connector *intel_connector)
{
struct drm_connector *connector = &intel_connector->base;
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
@@ -4985,18 +4883,19 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
bool has_dpcd;
struct drm_display_mode *scan;
struct edid *edid;
+ enum pipe pipe = INVALID_PIPE;
intel_dp->drrs_state.type = DRRS_NOT_SUPPORTED;
if (!is_edp(intel_dp))
return true;
- intel_edp_panel_vdd_sanitize(intel_encoder);
+ pps_lock(intel_dp);
+ intel_edp_panel_vdd_sanitize(intel_dp);
+ pps_unlock(intel_dp);
/* Cache DPCD and EDID for edp. */
- intel_edp_panel_vdd_on(intel_dp);
has_dpcd = intel_dp_get_dpcd(intel_dp);
- intel_edp_panel_vdd_off(intel_dp, false);
if (has_dpcd) {
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
@@ -5011,7 +4910,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
/* We now know it's not a ghost, init power sequence regs. */
pps_lock(intel_dp);
- intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, power_seq);
+ intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
pps_unlock(intel_dp);
mutex_lock(&dev->mode_config.mutex);
@@ -5053,11 +4952,30 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
if (IS_VALLEYVIEW(dev)) {
intel_dp->edp_notifier.notifier_call = edp_notify_handler;
register_reboot_notifier(&intel_dp->edp_notifier);
+
+ /*
+ * Figure out the current pipe for the initial backlight setup.
+ * If the current pipe isn't valid, try the PPS pipe, and if that
+ * fails just assume pipe A.
+ */
+ if (IS_CHERRYVIEW(dev))
+ pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
+ else
+ pipe = PORT_TO_PIPE(intel_dp->DP);
+
+ if (pipe != PIPE_A && pipe != PIPE_B)
+ pipe = intel_dp->pps_pipe;
+
+ if (pipe != PIPE_A && pipe != PIPE_B)
+ pipe = PIPE_A;
+
+ DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
+ pipe_name(pipe));
}
intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
intel_connector->panel.backlight_power = intel_edp_backlight_power;
- intel_panel_setup_backlight(connector);
+ intel_panel_setup_backlight(connector, pipe);
return true;
}
@@ -5072,13 +4990,14 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum port port = intel_dig_port->port;
- struct edp_power_seq power_seq = { 0 };
int type;
intel_dp->pps_pipe = INVALID_PIPE;
/* intel_dp vfuncs */
- if (IS_VALLEYVIEW(dev))
+ if (INTEL_INFO(dev)->gen >= 9)
+ intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
+ else if (IS_VALLEYVIEW(dev))
intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
@@ -5087,7 +5006,10 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
else
intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
- intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
+ if (INTEL_INFO(dev)->gen >= 9)
+ intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
+ else
+ intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
/* Preserve the current hw state. */
intel_dp->DP = I915_READ(intel_dp->output_reg);
@@ -5106,6 +5028,11 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
if (type == DRM_MODE_CONNECTOR_eDP)
intel_encoder->type = INTEL_OUTPUT_EDP;
+ /* eDP only on port B and/or C on vlv/chv */
+ if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
+ port != PORT_B && port != PORT_C))
+ return false;
+
DRM_DEBUG_KMS("Adding %s connector on port %c\n",
type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
port_name(port));
@@ -5148,13 +5075,11 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
if (is_edp(intel_dp)) {
pps_lock(intel_dp);
- if (IS_VALLEYVIEW(dev)) {
+ intel_dp_init_panel_power_timestamps(intel_dp);
+ if (IS_VALLEYVIEW(dev))
vlv_initial_power_sequencer_setup(intel_dp);
- } else {
- intel_dp_init_panel_power_timestamps(intel_dp);
- intel_dp_init_panel_power_sequencer(dev, intel_dp,
- &power_seq);
- }
+ else
+ intel_dp_init_panel_power_sequencer(dev, intel_dp);
pps_unlock(intel_dp);
}
@@ -5168,7 +5093,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
}
}
- if (!intel_edp_init_connector(intel_dp, intel_connector, &power_seq)) {
+ if (!intel_edp_init_connector(intel_dp, intel_connector)) {
drm_dp_aux_unregister(&intel_dp->aux);
if (is_edp(intel_dp)) {
cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index d9a7a7865f6..7f8c6a66680 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -278,20 +278,12 @@ static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector)
}
static enum drm_connector_status
-intel_mst_port_dp_detect(struct drm_connector *connector)
+intel_dp_mst_detect(struct drm_connector *connector, bool force)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
struct intel_dp *intel_dp = intel_connector->mst_port;
- return drm_dp_mst_detect_port(&intel_dp->mst_mgr, intel_connector->port);
-}
-
-static enum drm_connector_status
-intel_dp_mst_detect(struct drm_connector *connector, bool force)
-{
- enum drm_connector_status status;
- status = intel_mst_port_dp_detect(connector);
- return status;
+ return drm_dp_mst_detect_port(connector, &intel_dp->mst_mgr, intel_connector->port);
}
static int
@@ -393,7 +385,7 @@ static void intel_connector_remove_from_fbdev(struct intel_connector *connector)
#endif
}
-static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, char *pathprop)
+static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *pathprop)
{
struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst_mgr);
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
@@ -422,6 +414,8 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
intel_dp_add_properties(intel_dp, connector);
drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0);
+ drm_object_attach_property(&connector->base, dev->mode_config.tile_property, 0);
+
drm_mode_connector_set_path_property(connector, pathprop);
drm_reinit_primary_mode_group(dev);
mutex_lock(&dev->mode_config.mutex);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index ba715229a54..25fdbb16d4e 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -34,6 +34,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_dp_mst_helper.h>
+#include <drm/drm_rect.h>
#define DIV_ROUND_CLOSEST_ULL(ll, d) \
({ unsigned long long _tmp = (ll)+(d)/2; do_div(_tmp, d); _tmp; })
@@ -93,18 +94,20 @@
/* these are outputs from the chip - integrated only
external chips are via DVO or SDVO output */
-#define INTEL_OUTPUT_UNUSED 0
-#define INTEL_OUTPUT_ANALOG 1
-#define INTEL_OUTPUT_DVO 2
-#define INTEL_OUTPUT_SDVO 3
-#define INTEL_OUTPUT_LVDS 4
-#define INTEL_OUTPUT_TVOUT 5
-#define INTEL_OUTPUT_HDMI 6
-#define INTEL_OUTPUT_DISPLAYPORT 7
-#define INTEL_OUTPUT_EDP 8
-#define INTEL_OUTPUT_DSI 9
-#define INTEL_OUTPUT_UNKNOWN 10
-#define INTEL_OUTPUT_DP_MST 11
+enum intel_output_type {
+ INTEL_OUTPUT_UNUSED = 0,
+ INTEL_OUTPUT_ANALOG = 1,
+ INTEL_OUTPUT_DVO = 2,
+ INTEL_OUTPUT_SDVO = 3,
+ INTEL_OUTPUT_LVDS = 4,
+ INTEL_OUTPUT_TVOUT = 5,
+ INTEL_OUTPUT_HDMI = 6,
+ INTEL_OUTPUT_DISPLAYPORT = 7,
+ INTEL_OUTPUT_EDP = 8,
+ INTEL_OUTPUT_DSI = 9,
+ INTEL_OUTPUT_UNKNOWN = 10,
+ INTEL_OUTPUT_DP_MST = 11,
+};
#define INTEL_DVO_CHIP_NONE 0
#define INTEL_DVO_CHIP_LVDS 1
@@ -135,7 +138,7 @@ struct intel_encoder {
*/
struct intel_crtc *new_crtc;
- int type;
+ enum intel_output_type type;
unsigned int cloneable;
bool connectors_active;
void (*hot_plug)(struct intel_encoder *);
@@ -240,6 +243,17 @@ typedef struct dpll {
int p;
} intel_clock_t;
+struct intel_plane_state {
+ struct drm_crtc *crtc;
+ struct drm_framebuffer *fb;
+ struct drm_rect src;
+ struct drm_rect dst;
+ struct drm_rect clip;
+ struct drm_rect orig_src;
+ struct drm_rect orig_dst;
+ bool visible;
+};
+
struct intel_plane_config {
bool tiled;
int size;
@@ -278,6 +292,9 @@ struct intel_crtc_config {
* between pch encoders and cpu encoders. */
bool has_pch_encoder;
+ /* Are we sending infoframes on the attached port */
+ bool has_infoframe;
+
/* CPU Transcoder for the pipe. Currently this can only differ from the
* pipe on Haswell (where we have a special eDP transcoder). */
enum transcoder cpu_transcoder;
@@ -326,7 +343,10 @@ struct intel_crtc_config {
/* Selected dpll when shared or DPLL_ID_PRIVATE. */
enum intel_dpll_id shared_dpll;
- /* PORT_CLK_SEL for DDI ports. */
+ /*
+ * - PORT_CLK_SEL for DDI ports on HSW/BDW.
+ * - enum skl_dpll on SKL
+ */
uint32_t ddi_pll_sel;
/* Actual register state of the dpll, for shared dpll cross-checking. */
@@ -387,7 +407,14 @@ struct intel_pipe_wm {
struct intel_mmio_flip {
u32 seqno;
- u32 ring_id;
+ struct intel_engine_cs *ring;
+ struct work_struct work;
+};
+
+struct skl_pipe_wm {
+ struct skl_wm_level wm[8];
+ struct skl_wm_level trans_wm;
+ uint32_t linetime;
};
struct intel_crtc {
@@ -437,6 +464,8 @@ struct intel_crtc {
struct {
/* watermarks currently being used */
struct intel_pipe_wm active;
+ /* SKL wm values currently in use */
+ struct skl_pipe_wm skl_active;
} wm;
int scanline_offset;
@@ -529,6 +558,7 @@ struct intel_hdmi {
void (*set_infoframes)(struct drm_encoder *encoder,
bool enable,
struct drm_display_mode *adjusted_mode);
+ bool (*infoframe_enabled)(struct drm_encoder *encoder);
};
struct intel_dp_mst_encoder;
@@ -578,6 +608,7 @@ struct intel_dp {
* this port. Only relevant on VLV/CHV.
*/
enum pipe pps_pipe;
+ struct edp_power_seq pps_delays;
bool use_tps3;
bool can_mst; /* this port supports mst */
@@ -734,32 +765,47 @@ hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
return container_of(intel_hdmi, struct intel_digital_port, hdmi);
}
+/*
+ * Returns the number of planes for this pipe, ie the number of sprites + 1
+ * (primary plane). This doesn't count the cursor plane then.
+ */
+static inline unsigned int intel_num_planes(struct intel_crtc *crtc)
+{
+ return INTEL_INFO(crtc->base.dev)->num_sprites[crtc->pipe] + 1;
+}
-/* i915_irq.c */
-bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
+/* intel_fifo_underrun.c */
+bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
enum pipe pipe, bool enable);
-bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
+bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
enum transcoder pch_transcoder,
bool enable);
+void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
+ enum pipe pipe);
+void intel_pch_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
+ enum transcoder pch_transcoder);
+void i9xx_check_fifo_underruns(struct drm_i915_private *dev_priv);
+
+/* i915_irq.c */
void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
-void gen8_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
-void gen8_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
-void intel_runtime_pm_disable_interrupts(struct drm_device *dev);
-void intel_runtime_pm_restore_interrupts(struct drm_device *dev);
+void gen6_reset_rps_interrupts(struct drm_device *dev);
+void gen6_enable_rps_interrupts(struct drm_device *dev);
+void gen6_disable_rps_interrupts(struct drm_device *dev);
+void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv);
+void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv);
static inline bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
{
/*
* We only use drm_irq_uninstall() at unload and VT switch, so
* this is the only thing we need to check.
*/
- return !dev_priv->pm._irqs_disabled;
+ return dev_priv->pm.irqs_enabled;
}
int intel_get_crtc_scanline(struct intel_crtc *crtc);
-void i9xx_check_fifo_underruns(struct drm_device *dev);
void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv);
/* intel_crt.c */
@@ -792,11 +838,7 @@ void intel_ddi_clock_get(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config);
void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state);
-/* intel_display.c */
-const char *intel_output_name(int output);
-bool intel_has_pending_fb_unpin(struct drm_device *dev);
-int intel_pch_rawclk(struct drm_device *dev);
-void intel_mark_busy(struct drm_device *dev);
+/* intel_frontbuffer.c */
void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
struct intel_engine_cs *ring);
void intel_frontbuffer_flip_prepare(struct drm_device *dev,
@@ -806,7 +848,7 @@ void intel_frontbuffer_flip_complete(struct drm_device *dev,
void intel_frontbuffer_flush(struct drm_device *dev,
unsigned frontbuffer_bits);
/**
- * intel_frontbuffer_flip - prepare frontbuffer flip
+ * intel_frontbuffer_flip - synchronous frontbuffer flip
* @dev: DRM device
* @frontbuffer_bits: frontbuffer plane tracking bits
*
@@ -824,6 +866,18 @@ void intel_frontbuffer_flip(struct drm_device *dev,
}
void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire);
+
+
+/* intel_audio.c */
+void intel_init_audio(struct drm_device *dev);
+void intel_audio_codec_enable(struct intel_encoder *encoder);
+void intel_audio_codec_disable(struct intel_encoder *encoder);
+
+/* intel_display.c */
+const char *intel_output_name(int output);
+bool intel_has_pending_fb_unpin(struct drm_device *dev);
+int intel_pch_rawclk(struct drm_device *dev);
+void intel_mark_busy(struct drm_device *dev);
void intel_mark_idle(struct drm_device *dev);
void intel_crtc_restore_mode(struct drm_crtc *crtc);
void intel_crtc_control(struct drm_crtc *crtc, bool enable);
@@ -844,7 +898,12 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
struct drm_file *file_priv);
enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
enum pipe pipe);
-void intel_wait_for_vblank(struct drm_device *dev, int pipe);
+bool intel_pipe_has_type(struct intel_crtc *crtc, enum intel_output_type type);
+static inline void
+intel_wait_for_vblank(struct drm_device *dev, int pipe)
+{
+ drm_wait_one_vblank(dev, pipe);
+}
int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
struct intel_digital_port *dport);
@@ -854,8 +913,8 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
struct drm_modeset_acquire_ctx *ctx);
void intel_release_load_detect_pipe(struct drm_connector *connector,
struct intel_load_detect_pipe *old);
-int intel_pin_and_fence_fb_obj(struct drm_device *dev,
- struct drm_i915_gem_object *obj,
+int intel_pin_and_fence_fb_obj(struct drm_plane *plane,
+ struct drm_framebuffer *fb,
struct intel_engine_cs *pipelined);
void intel_unpin_fb_obj(struct drm_i915_gem_object *obj);
struct drm_framebuffer *
@@ -877,7 +936,13 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc);
void intel_put_shared_dpll(struct intel_crtc *crtc);
+void vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
+ const struct dpll *dpll);
+void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe);
+
/* modesetting asserts */
+void assert_panel_unlocked(struct drm_i915_private *dev_priv,
+ enum pipe pipe);
void assert_pll(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state);
#define assert_pll_enabled(d, p) assert_pll(d, p, true)
@@ -889,13 +954,12 @@ void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state);
#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
-void intel_write_eld(struct drm_encoder *encoder,
- struct drm_display_mode *mode);
unsigned long intel_gen4_compute_page_offset(int *x, int *y,
unsigned int tiling_mode,
unsigned int bpp,
unsigned int pitch);
-void intel_display_handle_reset(struct drm_device *dev);
+void intel_prepare_reset(struct drm_device *dev);
+void intel_finish_reset(struct drm_device *dev);
void hsw_enable_pc8(struct drm_i915_private *dev_priv);
void hsw_disable_pc8(struct drm_i915_private *dev_priv);
void intel_dp_get_m_n(struct intel_crtc *crtc,
@@ -908,7 +972,6 @@ ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config,
bool intel_crtc_active(struct drm_crtc *crtc);
void hsw_enable_ips(struct intel_crtc *crtc);
void hsw_disable_ips(struct intel_crtc *crtc);
-void intel_display_set_init_power(struct drm_i915_private *dev, bool enable);
enum intel_display_power_domain
intel_display_port_power_domain(struct intel_encoder *intel_encoder);
void intel_mode_from_pipe_config(struct drm_display_mode *mode,
@@ -936,25 +999,18 @@ bool intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port,
void intel_edp_backlight_on(struct intel_dp *intel_dp);
void intel_edp_backlight_off(struct intel_dp *intel_dp);
void intel_edp_panel_vdd_on(struct intel_dp *intel_dp);
-void intel_edp_panel_vdd_sanitize(struct intel_encoder *intel_encoder);
void intel_edp_panel_on(struct intel_dp *intel_dp);
void intel_edp_panel_off(struct intel_dp *intel_dp);
-void intel_edp_psr_enable(struct intel_dp *intel_dp);
-void intel_edp_psr_disable(struct intel_dp *intel_dp);
void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate);
-void intel_edp_psr_invalidate(struct drm_device *dev,
- unsigned frontbuffer_bits);
-void intel_edp_psr_flush(struct drm_device *dev,
- unsigned frontbuffer_bits);
-void intel_edp_psr_init(struct drm_device *dev);
-
-int intel_dp_handle_hpd_irq(struct intel_digital_port *digport, bool long_hpd);
void intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector);
void intel_dp_mst_suspend(struct drm_device *dev);
void intel_dp_mst_resume(struct drm_device *dev);
int intel_dp_max_link_bw(struct intel_dp *intel_dp);
void intel_dp_hot_plug(struct intel_encoder *intel_encoder);
void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv);
+uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes);
+void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes);
+
/* intel_dp_mst.c */
int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id);
void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port);
@@ -1044,7 +1100,7 @@ void intel_gmch_panel_fitting(struct intel_crtc *crtc,
int fitting_mode);
void intel_panel_set_backlight_acpi(struct intel_connector *connector,
u32 level, u32 max);
-int intel_panel_setup_backlight(struct drm_connector *connector);
+int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe);
void intel_panel_enable_backlight(struct intel_connector *connector);
void intel_panel_disable_backlight(struct intel_connector *connector);
void intel_panel_destroy_backlight(struct drm_connector *connector);
@@ -1054,6 +1110,41 @@ extern struct drm_display_mode *intel_find_panel_downclock(
struct drm_device *dev,
struct drm_display_mode *fixed_mode,
struct drm_connector *connector);
+void intel_backlight_register(struct drm_device *dev);
+void intel_backlight_unregister(struct drm_device *dev);
+
+
+/* intel_psr.c */
+bool intel_psr_is_enabled(struct drm_device *dev);
+void intel_psr_enable(struct intel_dp *intel_dp);
+void intel_psr_disable(struct intel_dp *intel_dp);
+void intel_psr_invalidate(struct drm_device *dev,
+ unsigned frontbuffer_bits);
+void intel_psr_flush(struct drm_device *dev,
+ unsigned frontbuffer_bits);
+void intel_psr_init(struct drm_device *dev);
+
+/* intel_runtime_pm.c */
+int intel_power_domains_init(struct drm_i915_private *);
+void intel_power_domains_fini(struct drm_i915_private *);
+void intel_power_domains_init_hw(struct drm_i915_private *dev_priv);
+void intel_runtime_pm_enable(struct drm_i915_private *dev_priv);
+
+bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain);
+bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain);
+void intel_display_power_get(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain);
+void intel_display_power_put(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain);
+void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv);
+void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv);
+void intel_runtime_pm_get(struct drm_i915_private *dev_priv);
+void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv);
+void intel_runtime_pm_put(struct drm_i915_private *dev_priv);
+
+void intel_display_set_init_power(struct drm_i915_private *dev, bool enable);
/* intel_pm.c */
void intel_init_clock_gating(struct drm_device *dev);
@@ -1072,17 +1163,6 @@ bool intel_fbc_enabled(struct drm_device *dev);
void intel_update_fbc(struct drm_device *dev);
void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
void intel_gpu_ips_teardown(void);
-int intel_power_domains_init(struct drm_i915_private *);
-void intel_power_domains_remove(struct drm_i915_private *);
-bool intel_display_power_enabled(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain);
-bool intel_display_power_enabled_unlocked(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain);
-void intel_display_power_get(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain);
-void intel_display_power_put(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain);
-void intel_power_domains_init_hw(struct drm_i915_private *dev_priv);
void intel_init_gt_powersave(struct drm_device *dev);
void intel_cleanup_gt_powersave(struct drm_device *dev);
void intel_enable_gt_powersave(struct drm_device *dev);
@@ -1093,14 +1173,10 @@ void ironlake_teardown_rc6(struct drm_device *dev);
void gen6_update_ring_freq(struct drm_device *dev);
void gen6_rps_idle(struct drm_i915_private *dev_priv);
void gen6_rps_boost(struct drm_i915_private *dev_priv);
-void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv);
-void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv);
-void intel_runtime_pm_get(struct drm_i915_private *dev_priv);
-void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv);
-void intel_runtime_pm_put(struct drm_i915_private *dev_priv);
-void intel_init_runtime_pm(struct drm_i915_private *dev_priv);
-void intel_fini_runtime_pm(struct drm_i915_private *dev_priv);
void ilk_wm_get_hw_state(struct drm_device *dev);
+void skl_wm_get_hw_state(struct drm_device *dev);
+void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
+ struct skl_ddb_allocation *ddb /* out */);
/* intel_sdvo.c */
@@ -1120,7 +1196,9 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-
+bool intel_pipe_update_start(struct intel_crtc *crtc,
+ uint32_t *start_vbl_count);
+void intel_pipe_update_end(struct intel_crtc *crtc, u32 start_vbl_count);
/* intel_tv.c */
void intel_tv_init(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index 5bd9e09ad3c..0b184079de1 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -344,7 +344,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
DRM_DEBUG_KMS("\n");
power_domain = intel_display_port_power_domain(encoder);
- if (!intel_display_power_enabled(dev_priv, power_domain))
+ if (!intel_display_power_is_enabled(dev_priv, power_domain))
return false;
/* XXX: this only works for one DSI output */
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 9b584f3fbb9..850cf7d6578 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -119,25 +119,25 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
goto out;
}
- /* Flush everything out, we'll be doing GTT only from now on */
- ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
- if (ret) {
- DRM_ERROR("failed to pin obj: %d\n", ret);
- goto out_unref;
- }
-
fb = __intel_framebuffer_create(dev, &mode_cmd, obj);
if (IS_ERR(fb)) {
ret = PTR_ERR(fb);
- goto out_unpin;
+ goto out_unref;
+ }
+
+ /* Flush everything out, we'll be doing GTT only from now on */
+ ret = intel_pin_and_fence_fb_obj(NULL, fb, NULL);
+ if (ret) {
+ DRM_ERROR("failed to pin obj: %d\n", ret);
+ goto out_fb;
}
ifbdev->fb = to_intel_framebuffer(fb);
return 0;
-out_unpin:
- i915_gem_object_ggtt_unpin(obj);
+out_fb:
+ drm_framebuffer_remove(fb);
out_unref:
drm_gem_object_unreference(&obj->base);
out:
@@ -324,6 +324,7 @@ intel_fb_helper_crtc(struct drm_fb_helper *fb_helper, struct drm_crtc *crtc)
static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
struct drm_fb_helper_crtc **crtcs,
struct drm_display_mode **modes,
+ struct drm_fb_offset *offsets,
bool *enabled, int width, int height)
{
struct drm_device *dev = fb_helper->dev;
@@ -332,6 +333,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
bool fallback = true;
int num_connectors_enabled = 0;
int num_connectors_detected = 0;
+ uint64_t conn_configured = 0, mask;
+ int pass = 0;
save_enabled = kcalloc(dev->mode_config.num_connector, sizeof(bool),
GFP_KERNEL);
@@ -339,7 +342,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
return false;
memcpy(save_enabled, enabled, dev->mode_config.num_connector);
-
+ mask = (1 << fb_helper->connector_count) - 1;
+retry:
for (i = 0; i < fb_helper->connector_count; i++) {
struct drm_fb_helper_connector *fb_conn;
struct drm_connector *connector;
@@ -349,12 +353,19 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
fb_conn = fb_helper->connector_info[i];
connector = fb_conn->connector;
+ if (conn_configured & (1 << i))
+ continue;
+
+ if (pass == 0 && !connector->has_tile)
+ continue;
+
if (connector->status == connector_status_connected)
num_connectors_detected++;
if (!enabled[i]) {
DRM_DEBUG_KMS("connector %s not enabled, skipping\n",
connector->name);
+ conn_configured |= (1 << i);
continue;
}
@@ -373,6 +384,7 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
DRM_DEBUG_KMS("connector %s has no encoder or crtc, skipping\n",
connector->name);
enabled[i] = false;
+ conn_configured |= (1 << i);
continue;
}
@@ -400,8 +412,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
/* try for preferred next */
if (!modes[i]) {
- DRM_DEBUG_KMS("looking for preferred mode on connector %s\n",
- connector->name);
+ DRM_DEBUG_KMS("looking for preferred mode on connector %s %d\n",
+ connector->name, connector->has_tile);
modes[i] = drm_has_preferred_mode(fb_conn, width,
height);
}
@@ -444,6 +456,12 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
modes[i]->flags & DRM_MODE_FLAG_INTERLACE ? "i" :"");
fallback = false;
+ conn_configured |= (1 << i);
+ }
+
+ if ((conn_configured & mask) != mask) {
+ pass++;
+ goto retry;
}
/*
diff --git a/drivers/gpu/drm/i915/intel_fifo_underrun.c b/drivers/gpu/drm/i915/intel_fifo_underrun.c
new file mode 100644
index 00000000000..77af512d2d3
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_fifo_underrun.c
@@ -0,0 +1,381 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Daniel Vetter <daniel.vetter@ffwll.ch>
+ *
+ */
+
+#include "i915_drv.h"
+#include "intel_drv.h"
+
+/**
+ * DOC: fifo underrun handling
+ *
+ * The i915 driver checks for display fifo underruns using the interrupt signals
+ * provided by the hardware. This is enabled by default and fairly useful to
+ * debug display issues, especially watermark settings.
+ *
+ * If an underrun is detected this is logged into dmesg. To avoid flooding logs
+ * and occupying the cpu underrun interrupts are disabled after the first
+ * occurrence until the next modeset on a given pipe.
+ *
+ * Note that underrun detection on gmch platforms is a bit more ugly since there
+ * is no interrupt (despite that the signalling bit is in the PIPESTAT pipe
+ * interrupt register). Also on some other platforms underrun interrupts are
+ * shared, which means that if we detect an underrun we need to disable underrun
+ * reporting on all pipes.
+ *
+ * The code also supports underrun detection on the PCH transcoder.
+ */
+
+static bool ivb_can_enable_err_int(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *crtc;
+ enum pipe pipe;
+
+ assert_spin_locked(&dev_priv->irq_lock);
+
+ for_each_pipe(dev_priv, pipe) {
+ crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+
+ if (crtc->cpu_fifo_underrun_disabled)
+ return false;
+ }
+
+ return true;
+}
+
+static bool cpt_can_enable_serr_int(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum pipe pipe;
+ struct intel_crtc *crtc;
+
+ assert_spin_locked(&dev_priv->irq_lock);
+
+ for_each_pipe(dev_priv, pipe) {
+ crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+
+ if (crtc->pch_fifo_underrun_disabled)
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * i9xx_check_fifo_underruns - check for fifo underruns
+ * @dev_priv: i915 device instance
+ *
+ * This function checks for fifo underruns on GMCH platforms. This needs to be
+ * done manually on modeset to make sure that we catch all underruns since they
+ * do not generate an interrupt by themselves on these platforms.
+ */
+void i9xx_check_fifo_underruns(struct drm_i915_private *dev_priv)
+{
+ struct intel_crtc *crtc;
+
+ spin_lock_irq(&dev_priv->irq_lock);
+
+ for_each_intel_crtc(dev_priv->dev, crtc) {
+ u32 reg = PIPESTAT(crtc->pipe);
+ u32 pipestat;
+
+ if (crtc->cpu_fifo_underrun_disabled)
+ continue;
+
+ pipestat = I915_READ(reg) & 0xffff0000;
+ if ((pipestat & PIPE_FIFO_UNDERRUN_STATUS) == 0)
+ continue;
+
+ I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
+ POSTING_READ(reg);
+
+ DRM_ERROR("pipe %c underrun\n", pipe_name(crtc->pipe));
+ }
+
+ spin_unlock_irq(&dev_priv->irq_lock);
+}
+
+static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
+ enum pipe pipe,
+ bool enable, bool old)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 reg = PIPESTAT(pipe);
+ u32 pipestat = I915_READ(reg) & 0xffff0000;
+
+ assert_spin_locked(&dev_priv->irq_lock);
+
+ if (enable) {
+ I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
+ POSTING_READ(reg);
+ } else {
+ if (old && pipestat & PIPE_FIFO_UNDERRUN_STATUS)
+ DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
+ }
+}
+
+static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
+ enum pipe pipe, bool enable)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN :
+ DE_PIPEB_FIFO_UNDERRUN;
+
+ if (enable)
+ ironlake_enable_display_irq(dev_priv, bit);
+ else
+ ironlake_disable_display_irq(dev_priv, bit);
+}
+
+static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
+ enum pipe pipe,
+ bool enable, bool old)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ if (enable) {
+ I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
+
+ if (!ivb_can_enable_err_int(dev))
+ return;
+
+ ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
+ } else {
+ ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
+
+ if (old &&
+ I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe)) {
+ DRM_ERROR("uncleared fifo underrun on pipe %c\n",
+ pipe_name(pipe));
+ }
+ }
+}
+
+static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev,
+ enum pipe pipe, bool enable)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ assert_spin_locked(&dev_priv->irq_lock);
+
+ if (enable)
+ dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_FIFO_UNDERRUN;
+ else
+ dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_FIFO_UNDERRUN;
+ I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
+ POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
+}
+
+static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
+ enum transcoder pch_transcoder,
+ bool enable)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t bit = (pch_transcoder == TRANSCODER_A) ?
+ SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
+
+ if (enable)
+ ibx_enable_display_interrupt(dev_priv, bit);
+ else
+ ibx_disable_display_interrupt(dev_priv, bit);
+}
+
+static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
+ enum transcoder pch_transcoder,
+ bool enable, bool old)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (enable) {
+ I915_WRITE(SERR_INT,
+ SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
+
+ if (!cpt_can_enable_serr_int(dev))
+ return;
+
+ ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT);
+ } else {
+ ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT);
+
+ if (old && I915_READ(SERR_INT) &
+ SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) {
+ DRM_ERROR("uncleared pch fifo underrun on pch transcoder %c\n",
+ transcoder_name(pch_transcoder));
+ }
+ }
+}
+
+static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
+ enum pipe pipe, bool enable)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ bool old;
+
+ assert_spin_locked(&dev_priv->irq_lock);
+
+ old = !intel_crtc->cpu_fifo_underrun_disabled;
+ intel_crtc->cpu_fifo_underrun_disabled = !enable;
+
+ if (HAS_GMCH_DISPLAY(dev))
+ i9xx_set_fifo_underrun_reporting(dev, pipe, enable, old);
+ else if (IS_GEN5(dev) || IS_GEN6(dev))
+ ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
+ else if (IS_GEN7(dev))
+ ivybridge_set_fifo_underrun_reporting(dev, pipe, enable, old);
+ else if (IS_GEN8(dev) || IS_GEN9(dev))
+ broadwell_set_fifo_underrun_reporting(dev, pipe, enable);
+
+ return old;
+}
+
+/**
+ * intel_set_cpu_fifo_underrun_reporting - set cpu fifo underrrun reporting state
+ * @dev_priv: i915 device instance
+ * @pipe: (CPU) pipe to set state for
+ * @enable: whether underruns should be reported or not
+ *
+ * This function sets the fifo underrun state for @pipe. It is used in the
+ * modeset code to avoid false positives since on many platforms underruns are
+ * expected when disabling or enabling the pipe.
+ *
+ * Notice that on some platforms disabling underrun reports for one pipe
+ * disables for all due to shared interrupts. Actual reporting is still per-pipe
+ * though.
+ *
+ * Returns the previous state of underrun reporting.
+ */
+bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
+ enum pipe pipe, bool enable)
+{
+ unsigned long flags;
+ bool ret;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
+ ret = __intel_set_cpu_fifo_underrun_reporting(dev_priv->dev, pipe,
+ enable);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+
+ return ret;
+}
+
+static bool
+__cpu_fifo_underrun_reporting_enabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ return !intel_crtc->cpu_fifo_underrun_disabled;
+}
+
+/**
+ * intel_set_pch_fifo_underrun_reporting - set PCH fifo underrun reporting state
+ * @dev_priv: i915 device instance
+ * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
+ * @enable: whether underruns should be reported or not
+ *
+ * This function makes us disable or enable PCH fifo underruns for a specific
+ * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO
+ * underrun reporting for one transcoder may also disable all the other PCH
+ * error interruts for the other transcoders, due to the fact that there's just
+ * one interrupt mask/enable bit for all the transcoders.
+ *
+ * Returns the previous state of underrun reporting.
+ */
+bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
+ enum transcoder pch_transcoder,
+ bool enable)
+{
+ struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ unsigned long flags;
+ bool old;
+
+ /*
+ * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT
+ * has only one pch transcoder A that all pipes can use. To avoid racy
+ * pch transcoder -> pipe lookups from interrupt code simply store the
+ * underrun statistics in crtc A. Since we never expose this anywhere
+ * nor use it outside of the fifo underrun code here using the "wrong"
+ * crtc on LPT won't cause issues.
+ */
+
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
+
+ old = !intel_crtc->pch_fifo_underrun_disabled;
+ intel_crtc->pch_fifo_underrun_disabled = !enable;
+
+ if (HAS_PCH_IBX(dev_priv->dev))
+ ibx_set_fifo_underrun_reporting(dev_priv->dev, pch_transcoder,
+ enable);
+ else
+ cpt_set_fifo_underrun_reporting(dev_priv->dev, pch_transcoder,
+ enable, old);
+
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+ return old;
+}
+
+/**
+ * intel_pch_fifo_underrun_irq_handler - handle PCH fifo underrun interrupt
+ * @dev_priv: i915 device instance
+ * @pipe: (CPU) pipe to set state for
+ *
+ * This handles a CPU fifo underrun interrupt, generating an underrun warning
+ * into dmesg if underrun reporting is enabled and then disables the underrun
+ * interrupt to avoid an irq storm.
+ */
+void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ /* GMCH can't disable fifo underruns, filter them. */
+ if (HAS_GMCH_DISPLAY(dev_priv->dev) &&
+ !__cpu_fifo_underrun_reporting_enabled(dev_priv, pipe))
+ return;
+
+ if (intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false))
+ DRM_ERROR("CPU pipe %c FIFO underrun\n",
+ pipe_name(pipe));
+}
+
+/**
+ * intel_pch_fifo_underrun_irq_handler - handle PCH fifo underrun interrupt
+ * @dev_priv: i915 device instance
+ * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
+ *
+ * This handles a PCH fifo underrun interrupt, generating an underrun warning
+ * into dmesg if underrun reporting is enabled and then disables the underrun
+ * interrupt to avoid an irq storm.
+ */
+void intel_pch_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
+ enum transcoder pch_transcoder)
+{
+ if (intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder,
+ false))
+ DRM_ERROR("PCH transcoder %c FIFO underrun\n",
+ transcoder_name(pch_transcoder));
+}
diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.c b/drivers/gpu/drm/i915/intel_frontbuffer.c
new file mode 100644
index 00000000000..79f6d72179c
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_frontbuffer.c
@@ -0,0 +1,279 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Daniel Vetter <daniel.vetter@ffwll.ch>
+ */
+
+/**
+ * DOC: frontbuffer tracking
+ *
+ * Many features require us to track changes to the currently active
+ * frontbuffer, especially rendering targeted at the frontbuffer.
+ *
+ * To be able to do so GEM tracks frontbuffers using a bitmask for all possible
+ * frontbuffer slots through i915_gem_track_fb(). The function in this file are
+ * then called when the contents of the frontbuffer are invalidated, when
+ * frontbuffer rendering has stopped again to flush out all the changes and when
+ * the frontbuffer is exchanged with a flip. Subsystems interested in
+ * frontbuffer changes (e.g. PSR, FBC, DRRS) should directly put their callbacks
+ * into the relevant places and filter for the frontbuffer slots that they are
+ * interested int.
+ *
+ * On a high level there are two types of powersaving features. The first one
+ * work like a special cache (FBC and PSR) and are interested when they should
+ * stop caching and when to restart caching. This is done by placing callbacks
+ * into the invalidate and the flush functions: At invalidate the caching must
+ * be stopped and at flush time it can be restarted. And maybe they need to know
+ * when the frontbuffer changes (e.g. when the hw doesn't initiate an invalidate
+ * and flush on its own) which can be achieved with placing callbacks into the
+ * flip functions.
+ *
+ * The other type of display power saving feature only cares about busyness
+ * (e.g. DRRS). In that case all three (invalidate, flush and flip) indicate
+ * busyness. There is no direct way to detect idleness. Instead an idle timer
+ * work delayed work should be started from the flush and flip functions and
+ * cancelled as soon as busyness is detected.
+ *
+ * Note that there's also an older frontbuffer activity tracking scheme which
+ * just tracks general activity. This is done by the various mark_busy and
+ * mark_idle functions. For display power management features using these
+ * functions is deprecated and should be avoided.
+ */
+
+#include <drm/drmP.h>
+
+#include "intel_drv.h"
+#include "i915_drv.h"
+
+static void intel_increase_pllclock(struct drm_device *dev,
+ enum pipe pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int dpll_reg = DPLL(pipe);
+ int dpll;
+
+ if (!HAS_GMCH_DISPLAY(dev))
+ return;
+
+ if (!dev_priv->lvds_downclock_avail)
+ return;
+
+ dpll = I915_READ(dpll_reg);
+ if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
+ DRM_DEBUG_DRIVER("upclocking LVDS\n");
+
+ assert_panel_unlocked(dev_priv, pipe);
+
+ dpll &= ~DISPLAY_RATE_SELECT_FPA1;
+ I915_WRITE(dpll_reg, dpll);
+ intel_wait_for_vblank(dev, pipe);
+
+ dpll = I915_READ(dpll_reg);
+ if (dpll & DISPLAY_RATE_SELECT_FPA1)
+ DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
+ }
+}
+
+/**
+ * intel_mark_fb_busy - mark given planes as busy
+ * @dev: DRM device
+ * @frontbuffer_bits: bits for the affected planes
+ * @ring: optional ring for asynchronous commands
+ *
+ * This function gets called every time the screen contents change. It can be
+ * used to keep e.g. the update rate at the nominal refresh rate with DRRS.
+ */
+static void intel_mark_fb_busy(struct drm_device *dev,
+ unsigned frontbuffer_bits,
+ struct intel_engine_cs *ring)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum pipe pipe;
+
+ if (!i915.powersave)
+ return;
+
+ for_each_pipe(dev_priv, pipe) {
+ if (!(frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)))
+ continue;
+
+ intel_increase_pllclock(dev, pipe);
+ if (ring && intel_fbc_enabled(dev))
+ ring->fbc_dirty = true;
+ }
+}
+
+/**
+ * intel_fb_obj_invalidate - invalidate frontbuffer object
+ * @obj: GEM object to invalidate
+ * @ring: set for asynchronous rendering
+ *
+ * This function gets called every time rendering on the given object starts and
+ * frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must
+ * be invalidated. If @ring is non-NULL any subsequent invalidation will be delayed
+ * until the rendering completes or a flip on this frontbuffer plane is
+ * scheduled.
+ */
+void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
+ struct intel_engine_cs *ring)
+{
+ struct drm_device *dev = obj->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+ if (!obj->frontbuffer_bits)
+ return;
+
+ if (ring) {
+ mutex_lock(&dev_priv->fb_tracking.lock);
+ dev_priv->fb_tracking.busy_bits
+ |= obj->frontbuffer_bits;
+ dev_priv->fb_tracking.flip_bits
+ &= ~obj->frontbuffer_bits;
+ mutex_unlock(&dev_priv->fb_tracking.lock);
+ }
+
+ intel_mark_fb_busy(dev, obj->frontbuffer_bits, ring);
+
+ intel_psr_invalidate(dev, obj->frontbuffer_bits);
+}
+
+/**
+ * intel_frontbuffer_flush - flush frontbuffer
+ * @dev: DRM device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ *
+ * This function gets called every time rendering on the given planes has
+ * completed and frontbuffer caching can be started again. Flushes will get
+ * delayed if they're blocked by some outstanding asynchronous rendering.
+ *
+ * Can be called without any locks held.
+ */
+void intel_frontbuffer_flush(struct drm_device *dev,
+ unsigned frontbuffer_bits)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /* Delay flushing when rings are still busy.*/
+ mutex_lock(&dev_priv->fb_tracking.lock);
+ frontbuffer_bits &= ~dev_priv->fb_tracking.busy_bits;
+ mutex_unlock(&dev_priv->fb_tracking.lock);
+
+ intel_mark_fb_busy(dev, frontbuffer_bits, NULL);
+
+ intel_psr_flush(dev, frontbuffer_bits);
+
+ /*
+ * FIXME: Unconditional fbc flushing here is a rather gross hack and
+ * needs to be reworked into a proper frontbuffer tracking scheme like
+ * psr employs.
+ */
+ if (dev_priv->fbc.need_sw_cache_clean) {
+ dev_priv->fbc.need_sw_cache_clean = false;
+ bdw_fbc_sw_flush(dev, FBC_REND_CACHE_CLEAN);
+ }
+}
+
+/**
+ * intel_fb_obj_flush - flush frontbuffer object
+ * @obj: GEM object to flush
+ * @retire: set when retiring asynchronous rendering
+ *
+ * This function gets called every time rendering on the given object has
+ * completed and frontbuffer caching can be started again. If @retire is true
+ * then any delayed flushes will be unblocked.
+ */
+void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
+ bool retire)
+{
+ struct drm_device *dev = obj->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned frontbuffer_bits;
+
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+ if (!obj->frontbuffer_bits)
+ return;
+
+ frontbuffer_bits = obj->frontbuffer_bits;
+
+ if (retire) {
+ mutex_lock(&dev_priv->fb_tracking.lock);
+ /* Filter out new bits since rendering started. */
+ frontbuffer_bits &= dev_priv->fb_tracking.busy_bits;
+
+ dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
+ mutex_unlock(&dev_priv->fb_tracking.lock);
+ }
+
+ intel_frontbuffer_flush(dev, frontbuffer_bits);
+}
+
+/**
+ * intel_frontbuffer_flip_prepare - prepare asynchronous frontbuffer flip
+ * @dev: DRM device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ *
+ * This function gets called after scheduling a flip on @obj. The actual
+ * frontbuffer flushing will be delayed until completion is signalled with
+ * intel_frontbuffer_flip_complete. If an invalidate happens in between this
+ * flush will be cancelled.
+ *
+ * Can be called without any locks held.
+ */
+void intel_frontbuffer_flip_prepare(struct drm_device *dev,
+ unsigned frontbuffer_bits)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ mutex_lock(&dev_priv->fb_tracking.lock);
+ dev_priv->fb_tracking.flip_bits |= frontbuffer_bits;
+ /* Remove stale busy bits due to the old buffer. */
+ dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
+ mutex_unlock(&dev_priv->fb_tracking.lock);
+}
+
+/**
+ * intel_frontbuffer_flip_complete - complete asynchronous frontbuffer flip
+ * @dev: DRM device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ *
+ * This function gets called after the flip has been latched and will complete
+ * on the next vblank. It will execute the flush if it hasn't been cancelled yet.
+ *
+ * Can be called without any locks held.
+ */
+void intel_frontbuffer_flip_complete(struct drm_device *dev,
+ unsigned frontbuffer_bits)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ mutex_lock(&dev_priv->fb_tracking.lock);
+ /* Mask any cancelled flips. */
+ frontbuffer_bits &= dev_priv->fb_tracking.flip_bits;
+ dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits;
+ mutex_unlock(&dev_priv->fb_tracking.lock);
+
+ intel_frontbuffer_flush(dev, frontbuffer_bits);
+}
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 29ec1535992..3abc2000fce 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -166,6 +166,19 @@ static void g4x_write_infoframe(struct drm_encoder *encoder,
POSTING_READ(VIDEO_DIP_CTL);
}
+static bool g4x_infoframe_enabled(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+ u32 val = I915_READ(VIDEO_DIP_CTL);
+
+ if (VIDEO_DIP_PORT(intel_dig_port->port) == (val & VIDEO_DIP_PORT_MASK))
+ return val & VIDEO_DIP_ENABLE;
+
+ return false;
+}
+
static void ibx_write_infoframe(struct drm_encoder *encoder,
enum hdmi_infoframe_type type,
const void *frame, ssize_t len)
@@ -204,6 +217,17 @@ static void ibx_write_infoframe(struct drm_encoder *encoder,
POSTING_READ(reg);
}
+static bool ibx_infoframe_enabled(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
+ u32 val = I915_READ(reg);
+
+ return val & VIDEO_DIP_ENABLE;
+}
+
static void cpt_write_infoframe(struct drm_encoder *encoder,
enum hdmi_infoframe_type type,
const void *frame, ssize_t len)
@@ -245,6 +269,17 @@ static void cpt_write_infoframe(struct drm_encoder *encoder,
POSTING_READ(reg);
}
+static bool cpt_infoframe_enabled(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
+ u32 val = I915_READ(reg);
+
+ return val & VIDEO_DIP_ENABLE;
+}
+
static void vlv_write_infoframe(struct drm_encoder *encoder,
enum hdmi_infoframe_type type,
const void *frame, ssize_t len)
@@ -283,6 +318,17 @@ static void vlv_write_infoframe(struct drm_encoder *encoder,
POSTING_READ(reg);
}
+static bool vlv_infoframe_enabled(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ int reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
+ u32 val = I915_READ(reg);
+
+ return val & VIDEO_DIP_ENABLE;
+}
+
static void hsw_write_infoframe(struct drm_encoder *encoder,
enum hdmi_infoframe_type type,
const void *frame, ssize_t len)
@@ -320,6 +366,18 @@ static void hsw_write_infoframe(struct drm_encoder *encoder,
POSTING_READ(ctl_reg);
}
+static bool hsw_infoframe_enabled(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config.cpu_transcoder);
+ u32 val = I915_READ(ctl_reg);
+
+ return val & (VIDEO_DIP_ENABLE_AVI_HSW | VIDEO_DIP_ENABLE_SPD_HSW |
+ VIDEO_DIP_ENABLE_VS_HSW);
+}
+
/*
* The data we write to the DIP data buffer registers is 1 byte bigger than the
* HDMI infoframe size because of an ECC/reserved byte at position 3 (starting
@@ -661,14 +719,6 @@ static void intel_hdmi_prepare(struct intel_encoder *encoder)
if (crtc->config.has_hdmi_sink)
hdmi_val |= HDMI_MODE_SELECT_HDMI;
- if (crtc->config.has_audio) {
- WARN_ON(!crtc->config.has_hdmi_sink);
- DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n",
- pipe_name(crtc->pipe));
- hdmi_val |= SDVO_AUDIO_ENABLE;
- intel_write_eld(&encoder->base, adjusted_mode);
- }
-
if (HAS_PCH_CPT(dev))
hdmi_val |= SDVO_PIPE_SEL_CPT(crtc->pipe);
else if (IS_CHERRYVIEW(dev))
@@ -690,7 +740,7 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
u32 tmp;
power_domain = intel_display_port_power_domain(encoder);
- if (!intel_display_power_enabled(dev_priv, power_domain))
+ if (!intel_display_power_is_enabled(dev_priv, power_domain))
return false;
tmp = I915_READ(intel_hdmi->hdmi_reg);
@@ -732,6 +782,9 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
if (tmp & HDMI_MODE_SELECT_HDMI)
pipe_config->has_hdmi_sink = true;
+ if (intel_hdmi->infoframe_enabled(&encoder->base))
+ pipe_config->has_infoframe = true;
+
if (tmp & SDVO_AUDIO_ENABLE)
pipe_config->has_audio = true;
@@ -791,6 +844,13 @@ static void intel_enable_hdmi(struct intel_encoder *encoder)
I915_WRITE(intel_hdmi->hdmi_reg, temp);
POSTING_READ(intel_hdmi->hdmi_reg);
}
+
+ if (intel_crtc->config.has_audio) {
+ WARN_ON(!intel_crtc->config.has_hdmi_sink);
+ DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n",
+ pipe_name(intel_crtc->pipe));
+ intel_audio_codec_enable(encoder);
+ }
}
static void vlv_enable_hdmi(struct intel_encoder *encoder)
@@ -802,9 +862,13 @@ static void intel_disable_hdmi(struct intel_encoder *encoder)
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
u32 temp;
u32 enable_bits = SDVO_ENABLE | SDVO_AUDIO_ENABLE;
+ if (crtc->config.has_audio)
+ intel_audio_codec_disable(encoder);
+
temp = I915_READ(intel_hdmi->hdmi_reg);
/* HW workaround for IBX, we need to move the port to transcoder A
@@ -922,6 +986,9 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
pipe_config->has_hdmi_sink = intel_hdmi->has_hdmi_sink;
+ if (pipe_config->has_hdmi_sink)
+ pipe_config->has_infoframe = true;
+
if (intel_hdmi->color_range_auto) {
/* See CEA-861-E - 5.1 Default Encoding Parameters */
if (pipe_config->has_hdmi_sink &&
@@ -1394,10 +1461,13 @@ static void chv_hdmi_post_disable(struct intel_encoder *encoder)
static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
{
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+ struct intel_hdmi *intel_hdmi = &dport->hdmi;
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc =
to_intel_crtc(encoder->base.crtc);
+ struct drm_display_mode *adjusted_mode =
+ &intel_crtc->config.adjusted_mode;
enum dpio_channel ch = vlv_dport_to_channel(dport);
int pipe = intel_crtc->pipe;
int data, i;
@@ -1405,6 +1475,15 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
mutex_lock(&dev_priv->dpio_lock);
+ /* allow hardware to manage TX FIFO reset source */
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
+ val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
+ val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
+
/* Deassert soft data lane reset*/
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
val |= CHV_PCS_REQ_SOFTRESET_EN;
@@ -1441,12 +1520,26 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
/* Clear calc init */
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
+ val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
+ val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
+ val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
+ val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
+ val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
+ val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
+ val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
+ val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
+
/* FIXME: Program the support xxx V-dB */
/* Use 800mV-0dB */
for (i = 0; i < 4; i++) {
@@ -1499,6 +1592,10 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
mutex_unlock(&dev_priv->dpio_lock);
+ intel_hdmi->set_infoframes(&encoder->base,
+ intel_crtc->config.has_hdmi_sink,
+ adjusted_mode);
+
intel_enable_hdmi(encoder);
vlv_wait_port_ready(dev_priv, dport);
@@ -1593,18 +1690,23 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
if (IS_VALLEYVIEW(dev)) {
intel_hdmi->write_infoframe = vlv_write_infoframe;
intel_hdmi->set_infoframes = vlv_set_infoframes;
+ intel_hdmi->infoframe_enabled = vlv_infoframe_enabled;
} else if (IS_G4X(dev)) {
intel_hdmi->write_infoframe = g4x_write_infoframe;
intel_hdmi->set_infoframes = g4x_set_infoframes;
+ intel_hdmi->infoframe_enabled = g4x_infoframe_enabled;
} else if (HAS_DDI(dev)) {
intel_hdmi->write_infoframe = hsw_write_infoframe;
intel_hdmi->set_infoframes = hsw_set_infoframes;
+ intel_hdmi->infoframe_enabled = hsw_infoframe_enabled;
} else if (HAS_PCH_IBX(dev)) {
intel_hdmi->write_infoframe = ibx_write_infoframe;
intel_hdmi->set_infoframes = ibx_set_infoframes;
+ intel_hdmi->infoframe_enabled = ibx_infoframe_enabled;
} else {
intel_hdmi->write_infoframe = cpt_write_infoframe;
intel_hdmi->set_infoframes = cpt_set_infoframes;
+ intel_hdmi->infoframe_enabled = cpt_infoframe_enabled;
}
if (HAS_DDI(dev))
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index bafd38b5703..e588376227e 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -136,11 +136,10 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
+#define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
#define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
#define GEN8_LR_CONTEXT_OTHER_SIZE (2 * PAGE_SIZE)
-#define GEN8_LR_CONTEXT_ALIGN 4096
-
#define RING_EXECLIST_QFULL (1 << 0x2)
#define RING_EXECLIST1_VALID (1 << 0x3)
#define RING_EXECLIST0_VALID (1 << 0x4)
@@ -204,6 +203,9 @@ enum {
};
#define GEN8_CTX_ID_SHIFT 32
+static int intel_lr_context_pin(struct intel_engine_cs *ring,
+ struct intel_context *ctx);
+
/**
* intel_sanitize_enable_execlists() - sanitize i915.enable_execlists
* @dev: DRM device.
@@ -219,6 +221,9 @@ int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists
{
WARN_ON(i915.enable_ppgtt == -1);
+ if (INTEL_INFO(dev)->gen >= 9)
+ return 1;
+
if (enable_execlists == 0)
return 0;
@@ -275,7 +280,8 @@ static void execlists_elsp_write(struct intel_engine_cs *ring,
struct drm_i915_gem_object *ctx_obj0,
struct drm_i915_gem_object *ctx_obj1)
{
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
uint64_t temp = 0;
uint32_t desc[4];
unsigned long flags;
@@ -300,13 +306,18 @@ static void execlists_elsp_write(struct intel_engine_cs *ring,
* Instead, we do the runtime_pm_get/put when creating/destroying requests.
*/
spin_lock_irqsave(&dev_priv->uncore.lock, flags);
- if (IS_CHERRYVIEW(dev_priv->dev)) {
+ if (IS_CHERRYVIEW(dev) || INTEL_INFO(dev)->gen >= 9) {
if (dev_priv->uncore.fw_rendercount++ == 0)
dev_priv->uncore.funcs.force_wake_get(dev_priv,
FORCEWAKE_RENDER);
if (dev_priv->uncore.fw_mediacount++ == 0)
dev_priv->uncore.funcs.force_wake_get(dev_priv,
FORCEWAKE_MEDIA);
+ if (INTEL_INFO(dev)->gen >= 9) {
+ if (dev_priv->uncore.fw_blittercount++ == 0)
+ dev_priv->uncore.funcs.force_wake_get(dev_priv,
+ FORCEWAKE_BLITTER);
+ }
} else {
if (dev_priv->uncore.forcewake_count++ == 0)
dev_priv->uncore.funcs.force_wake_get(dev_priv,
@@ -325,13 +336,18 @@ static void execlists_elsp_write(struct intel_engine_cs *ring,
/* Release Force Wakeup (see the big comment above). */
spin_lock_irqsave(&dev_priv->uncore.lock, flags);
- if (IS_CHERRYVIEW(dev_priv->dev)) {
+ if (IS_CHERRYVIEW(dev) || INTEL_INFO(dev)->gen >= 9) {
if (--dev_priv->uncore.fw_rendercount == 0)
dev_priv->uncore.funcs.force_wake_put(dev_priv,
FORCEWAKE_RENDER);
if (--dev_priv->uncore.fw_mediacount == 0)
dev_priv->uncore.funcs.force_wake_put(dev_priv,
FORCEWAKE_MEDIA);
+ if (INTEL_INFO(dev)->gen >= 9) {
+ if (--dev_priv->uncore.fw_blittercount == 0)
+ dev_priv->uncore.funcs.force_wake_put(dev_priv,
+ FORCEWAKE_BLITTER);
+ }
} else {
if (--dev_priv->uncore.forcewake_count == 0)
dev_priv->uncore.funcs.force_wake_put(dev_priv,
@@ -341,7 +357,9 @@ static void execlists_elsp_write(struct intel_engine_cs *ring,
spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
}
-static int execlists_ctx_write_tail(struct drm_i915_gem_object *ctx_obj, u32 tail)
+static int execlists_update_context(struct drm_i915_gem_object *ctx_obj,
+ struct drm_i915_gem_object *ring_obj,
+ u32 tail)
{
struct page *page;
uint32_t *reg_state;
@@ -350,43 +368,45 @@ static int execlists_ctx_write_tail(struct drm_i915_gem_object *ctx_obj, u32 tai
reg_state = kmap_atomic(page);
reg_state[CTX_RING_TAIL+1] = tail;
+ reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(ring_obj);
kunmap_atomic(reg_state);
return 0;
}
-static int execlists_submit_context(struct intel_engine_cs *ring,
- struct intel_context *to0, u32 tail0,
- struct intel_context *to1, u32 tail1)
+static void execlists_submit_contexts(struct intel_engine_cs *ring,
+ struct intel_context *to0, u32 tail0,
+ struct intel_context *to1, u32 tail1)
{
- struct drm_i915_gem_object *ctx_obj0;
+ struct drm_i915_gem_object *ctx_obj0 = to0->engine[ring->id].state;
+ struct intel_ringbuffer *ringbuf0 = to0->engine[ring->id].ringbuf;
struct drm_i915_gem_object *ctx_obj1 = NULL;
+ struct intel_ringbuffer *ringbuf1 = NULL;
- ctx_obj0 = to0->engine[ring->id].state;
BUG_ON(!ctx_obj0);
WARN_ON(!i915_gem_obj_is_pinned(ctx_obj0));
+ WARN_ON(!i915_gem_obj_is_pinned(ringbuf0->obj));
- execlists_ctx_write_tail(ctx_obj0, tail0);
+ execlists_update_context(ctx_obj0, ringbuf0->obj, tail0);
if (to1) {
+ ringbuf1 = to1->engine[ring->id].ringbuf;
ctx_obj1 = to1->engine[ring->id].state;
BUG_ON(!ctx_obj1);
WARN_ON(!i915_gem_obj_is_pinned(ctx_obj1));
+ WARN_ON(!i915_gem_obj_is_pinned(ringbuf1->obj));
- execlists_ctx_write_tail(ctx_obj1, tail1);
+ execlists_update_context(ctx_obj1, ringbuf1->obj, tail1);
}
execlists_elsp_write(ring, ctx_obj0, ctx_obj1);
-
- return 0;
}
static void execlists_context_unqueue(struct intel_engine_cs *ring)
{
struct intel_ctx_submit_request *req0 = NULL, *req1 = NULL;
struct intel_ctx_submit_request *cursor = NULL, *tmp = NULL;
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
assert_spin_locked(&ring->execlist_lock);
@@ -403,7 +423,8 @@ static void execlists_context_unqueue(struct intel_engine_cs *ring)
* will update tail past first request's workload */
cursor->elsp_submitted = req0->elsp_submitted;
list_del(&req0->execlist_link);
- queue_work(dev_priv->wq, &req0->work);
+ list_add_tail(&req0->execlist_link,
+ &ring->execlist_retired_req_list);
req0 = cursor;
} else {
req1 = cursor;
@@ -413,9 +434,9 @@ static void execlists_context_unqueue(struct intel_engine_cs *ring)
WARN_ON(req1 && req1->elsp_submitted);
- WARN_ON(execlists_submit_context(ring, req0->ctx, req0->tail,
- req1 ? req1->ctx : NULL,
- req1 ? req1->tail : 0));
+ execlists_submit_contexts(ring, req0->ctx, req0->tail,
+ req1 ? req1->ctx : NULL,
+ req1 ? req1->tail : 0);
req0->elsp_submitted++;
if (req1)
@@ -425,7 +446,6 @@ static void execlists_context_unqueue(struct intel_engine_cs *ring)
static bool execlists_check_remove_request(struct intel_engine_cs *ring,
u32 request_id)
{
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
struct intel_ctx_submit_request *head_req;
assert_spin_locked(&ring->execlist_lock);
@@ -443,7 +463,8 @@ static bool execlists_check_remove_request(struct intel_engine_cs *ring,
if (--head_req->elsp_submitted <= 0) {
list_del(&head_req->execlist_link);
- queue_work(dev_priv->wq, &head_req->work);
+ list_add_tail(&head_req->execlist_link,
+ &ring->execlist_retired_req_list);
return true;
}
}
@@ -512,22 +533,6 @@ void intel_execlists_handle_ctx_events(struct intel_engine_cs *ring)
((u32)ring->next_context_status_buffer & 0x07) << 8);
}
-static void execlists_free_request_task(struct work_struct *work)
-{
- struct intel_ctx_submit_request *req =
- container_of(work, struct intel_ctx_submit_request, work);
- struct drm_device *dev = req->ring->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- intel_runtime_pm_put(dev_priv);
-
- mutex_lock(&dev->struct_mutex);
- i915_gem_context_unreference(req->ctx);
- mutex_unlock(&dev->struct_mutex);
-
- kfree(req);
-}
-
static int execlists_context_queue(struct intel_engine_cs *ring,
struct intel_context *to,
u32 tail)
@@ -542,9 +547,12 @@ static int execlists_context_queue(struct intel_engine_cs *ring,
return -ENOMEM;
req->ctx = to;
i915_gem_context_reference(req->ctx);
+
+ if (to != ring->default_context)
+ intel_lr_context_pin(ring, to);
+
req->ring = ring;
req->tail = tail;
- INIT_WORK(&req->work, execlists_free_request_task);
intel_runtime_pm_get(dev_priv);
@@ -563,9 +571,10 @@ static int execlists_context_queue(struct intel_engine_cs *ring,
if (to == tail_req->ctx) {
WARN(tail_req->elsp_submitted != 0,
- "More than 2 already-submitted reqs queued\n");
+ "More than 2 already-submitted reqs queued\n");
list_del(&tail_req->execlist_link);
- queue_work(dev_priv->wq, &tail_req->work);
+ list_add_tail(&tail_req->execlist_link,
+ &ring->execlist_retired_req_list);
}
}
@@ -733,6 +742,36 @@ int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
return 0;
}
+void intel_execlists_retire_requests(struct intel_engine_cs *ring)
+{
+ struct intel_ctx_submit_request *req, *tmp;
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ unsigned long flags;
+ struct list_head retired_list;
+
+ WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
+ if (list_empty(&ring->execlist_retired_req_list))
+ return;
+
+ INIT_LIST_HEAD(&retired_list);
+ spin_lock_irqsave(&ring->execlist_lock, flags);
+ list_replace_init(&ring->execlist_retired_req_list, &retired_list);
+ spin_unlock_irqrestore(&ring->execlist_lock, flags);
+
+ list_for_each_entry_safe(req, tmp, &retired_list, execlist_link) {
+ struct intel_context *ctx = req->ctx;
+ struct drm_i915_gem_object *ctx_obj =
+ ctx->engine[ring->id].state;
+
+ if (ctx_obj && (ctx != ring->default_context))
+ intel_lr_context_unpin(ring, ctx);
+ intel_runtime_pm_put(dev_priv);
+ i915_gem_context_unreference(req->ctx);
+ list_del(&req->execlist_link);
+ kfree(req);
+ }
+}
+
void intel_logical_ring_stop(struct intel_engine_cs *ring)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
@@ -793,9 +832,55 @@ void intel_logical_ring_advance_and_submit(struct intel_ringbuffer *ringbuf)
execlists_context_queue(ring, ctx, ringbuf->tail);
}
+static int intel_lr_context_pin(struct intel_engine_cs *ring,
+ struct intel_context *ctx)
+{
+ struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
+ struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
+ int ret = 0;
+
+ WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
+ if (ctx->engine[ring->id].unpin_count++ == 0) {
+ ret = i915_gem_obj_ggtt_pin(ctx_obj,
+ GEN8_LR_CONTEXT_ALIGN, 0);
+ if (ret)
+ goto reset_unpin_count;
+
+ ret = intel_pin_and_map_ringbuffer_obj(ring->dev, ringbuf);
+ if (ret)
+ goto unpin_ctx_obj;
+ }
+
+ return ret;
+
+unpin_ctx_obj:
+ i915_gem_object_ggtt_unpin(ctx_obj);
+reset_unpin_count:
+ ctx->engine[ring->id].unpin_count = 0;
+
+ return ret;
+}
+
+void intel_lr_context_unpin(struct intel_engine_cs *ring,
+ struct intel_context *ctx)
+{
+ struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
+ struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
+
+ if (ctx_obj) {
+ WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
+ if (--ctx->engine[ring->id].unpin_count == 0) {
+ intel_unpin_ringbuffer_obj(ringbuf);
+ i915_gem_object_ggtt_unpin(ctx_obj);
+ }
+ }
+}
+
static int logical_ring_alloc_seqno(struct intel_engine_cs *ring,
struct intel_context *ctx)
{
+ int ret;
+
if (ring->outstanding_lazy_seqno)
return 0;
@@ -806,6 +891,14 @@ static int logical_ring_alloc_seqno(struct intel_engine_cs *ring,
if (request == NULL)
return -ENOMEM;
+ if (ctx != ring->default_context) {
+ ret = intel_lr_context_pin(ring, ctx);
+ if (ret) {
+ kfree(request);
+ return ret;
+ }
+ }
+
/* Hold a reference to the context this request belongs to
* (we will need it when the time comes to emit/retire the
* request).
@@ -991,6 +1084,44 @@ int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf, int num_dwords)
return 0;
}
+static int intel_logical_ring_workarounds_emit(struct intel_engine_cs *ring,
+ struct intel_context *ctx)
+{
+ int ret, i;
+ struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_workarounds *w = &dev_priv->workarounds;
+
+ if (WARN_ON(w->count == 0))
+ return 0;
+
+ ring->gpu_caches_dirty = true;
+ ret = logical_ring_flush_all_caches(ringbuf);
+ if (ret)
+ return ret;
+
+ ret = intel_logical_ring_begin(ringbuf, w->count * 2 + 2);
+ if (ret)
+ return ret;
+
+ intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(w->count));
+ for (i = 0; i < w->count; i++) {
+ intel_logical_ring_emit(ringbuf, w->reg[i].addr);
+ intel_logical_ring_emit(ringbuf, w->reg[i].value);
+ }
+ intel_logical_ring_emit(ringbuf, MI_NOOP);
+
+ intel_logical_ring_advance(ringbuf);
+
+ ring->gpu_caches_dirty = true;
+ ret = logical_ring_flush_all_caches(ringbuf);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
static int gen8_init_common_ring(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
@@ -1034,7 +1165,7 @@ static int gen8_init_render_ring(struct intel_engine_cs *ring)
I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
- return ret;
+ return init_workarounds_ring(ring);
}
static int gen8_emit_bb_start(struct intel_ringbuffer *ringbuf,
@@ -1063,7 +1194,7 @@ static bool gen8_logical_ring_get_irq(struct intel_engine_cs *ring)
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
- if (!dev->irq_enabled)
+ if (WARN_ON(!intel_irqs_enabled(dev_priv)))
return false;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -1214,11 +1345,13 @@ static int gen8_emit_request(struct intel_ringbuffer *ringbuf)
*/
void intel_logical_ring_cleanup(struct intel_engine_cs *ring)
{
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ struct drm_i915_private *dev_priv;
if (!intel_ring_initialized(ring))
return;
+ dev_priv = ring->dev->dev_private;
+
intel_logical_ring_stop(ring);
WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
ring->preallocated_lazy_request = NULL;
@@ -1248,6 +1381,7 @@ static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *rin
init_waitqueue_head(&ring->irq_queue);
INIT_LIST_HEAD(&ring->execlist_queue);
+ INIT_LIST_HEAD(&ring->execlist_retired_req_list);
spin_lock_init(&ring->execlist_lock);
ring->next_context_status_buffer = 0;
@@ -1282,6 +1416,7 @@ static int logical_render_ring_init(struct drm_device *dev)
ring->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
ring->init = gen8_init_render_ring;
+ ring->init_context = intel_logical_ring_workarounds_emit;
ring->cleanup = intel_fini_pipe_control;
ring->get_seqno = gen8_get_seqno;
ring->set_seqno = gen8_set_seqno;
@@ -1495,7 +1630,6 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *ring_obj = ringbuf->obj;
struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
struct page *page;
uint32_t *reg_state;
@@ -1541,7 +1675,9 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
reg_state[CTX_RING_TAIL] = RING_TAIL(ring->mmio_base);
reg_state[CTX_RING_TAIL+1] = 0;
reg_state[CTX_RING_BUFFER_START] = RING_START(ring->mmio_base);
- reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(ring_obj);
+ /* Ring buffer start address is not known until the buffer is pinned.
+ * It is written to the context image in execlists_update_context()
+ */
reg_state[CTX_RING_BUFFER_CONTROL] = RING_CTL(ring->mmio_base);
reg_state[CTX_RING_BUFFER_CONTROL+1] =
((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID;
@@ -1617,12 +1753,18 @@ void intel_lr_context_free(struct intel_context *ctx)
for (i = 0; i < I915_NUM_RINGS; i++) {
struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state;
- struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf;
if (ctx_obj) {
+ struct intel_ringbuffer *ringbuf =
+ ctx->engine[i].ringbuf;
+ struct intel_engine_cs *ring = ringbuf->ring;
+
+ if (ctx == ring->default_context) {
+ intel_unpin_ringbuffer_obj(ringbuf);
+ i915_gem_object_ggtt_unpin(ctx_obj);
+ }
intel_destroy_ringbuffer_obj(ringbuf);
kfree(ringbuf);
- i915_gem_object_ggtt_unpin(ctx_obj);
drm_gem_object_unreference(&ctx_obj->base);
}
}
@@ -1632,11 +1774,14 @@ static uint32_t get_lr_context_size(struct intel_engine_cs *ring)
{
int ret = 0;
- WARN_ON(INTEL_INFO(ring->dev)->gen != 8);
+ WARN_ON(INTEL_INFO(ring->dev)->gen < 8);
switch (ring->id) {
case RCS:
- ret = GEN8_LR_CONTEXT_RENDER_SIZE;
+ if (INTEL_INFO(ring->dev)->gen >= 9)
+ ret = GEN9_LR_CONTEXT_RENDER_SIZE;
+ else
+ ret = GEN8_LR_CONTEXT_RENDER_SIZE;
break;
case VCS:
case BCS:
@@ -1649,6 +1794,23 @@ static uint32_t get_lr_context_size(struct intel_engine_cs *ring)
return ret;
}
+static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring,
+ struct drm_i915_gem_object *default_ctx_obj)
+{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+
+ /* The status page is offset 0 from the default context object
+ * in LRC mode. */
+ ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(default_ctx_obj);
+ ring->status_page.page_addr =
+ kmap(sg_page(default_ctx_obj->pages->sgl));
+ ring->status_page.obj = default_ctx_obj;
+
+ I915_WRITE(RING_HWS_PGA(ring->mmio_base),
+ (u32)ring->status_page.gfx_addr);
+ POSTING_READ(RING_HWS_PGA(ring->mmio_base));
+}
+
/**
* intel_lr_context_deferred_create() - create the LRC specific bits of a context
* @ctx: LR context to create.
@@ -1660,11 +1822,12 @@ static uint32_t get_lr_context_size(struct intel_engine_cs *ring)
* the creation is a deferred call: it's better to make sure first that we need to use
* a given ring with the context.
*
- * Return: non-zero on eror.
+ * Return: non-zero on error.
*/
int intel_lr_context_deferred_create(struct intel_context *ctx,
struct intel_engine_cs *ring)
{
+ const bool is_global_default_ctx = (ctx == ring->default_context);
struct drm_device *dev = ring->dev;
struct drm_i915_gem_object *ctx_obj;
uint32_t context_size;
@@ -1684,21 +1847,22 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
return ret;
}
- ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN, 0);
- if (ret) {
- DRM_DEBUG_DRIVER("Pin LRC backing obj failed: %d\n", ret);
- drm_gem_object_unreference(&ctx_obj->base);
- return ret;
+ if (is_global_default_ctx) {
+ ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN, 0);
+ if (ret) {
+ DRM_DEBUG_DRIVER("Pin LRC backing obj failed: %d\n",
+ ret);
+ drm_gem_object_unreference(&ctx_obj->base);
+ return ret;
+ }
}
ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
if (!ringbuf) {
DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s\n",
ring->name);
- i915_gem_object_ggtt_unpin(ctx_obj);
- drm_gem_object_unreference(&ctx_obj->base);
ret = -ENOMEM;
- return ret;
+ goto error_unpin_ctx;
}
ringbuf->ring = ring;
@@ -1711,46 +1875,51 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
ringbuf->space = ringbuf->size;
ringbuf->last_retired_head = -1;
- /* TODO: For now we put this in the mappable region so that we can reuse
- * the existing ringbuffer code which ioremaps it. When we start
- * creating many contexts, this will no longer work and we must switch
- * to a kmapish interface.
- */
- ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
- if (ret) {
- DRM_DEBUG_DRIVER("Failed to allocate ringbuffer obj %s: %d\n",
+ if (ringbuf->obj == NULL) {
+ ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
+ if (ret) {
+ DRM_DEBUG_DRIVER(
+ "Failed to allocate ringbuffer obj %s: %d\n",
ring->name, ret);
- goto error;
+ goto error_free_rbuf;
+ }
+
+ if (is_global_default_ctx) {
+ ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf);
+ if (ret) {
+ DRM_ERROR(
+ "Failed to pin and map ringbuffer %s: %d\n",
+ ring->name, ret);
+ goto error_destroy_rbuf;
+ }
+ }
+
}
ret = populate_lr_context(ctx, ctx_obj, ring, ringbuf);
if (ret) {
DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
- intel_destroy_ringbuffer_obj(ringbuf);
goto error;
}
ctx->engine[ring->id].ringbuf = ringbuf;
ctx->engine[ring->id].state = ctx_obj;
- if (ctx == ring->default_context) {
- /* The status page is offset 0 from the default context object
- * in LRC mode. */
- ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(ctx_obj);
- ring->status_page.page_addr =
- kmap(sg_page(ctx_obj->pages->sgl));
- if (ring->status_page.page_addr == NULL)
- return -ENOMEM;
- ring->status_page.obj = ctx_obj;
- }
+ if (ctx == ring->default_context)
+ lrc_setup_hardware_status_page(ring, ctx_obj);
if (ring->id == RCS && !ctx->rcs_initialized) {
+ if (ring->init_context) {
+ ret = ring->init_context(ring, ctx);
+ if (ret)
+ DRM_ERROR("ring init context: %d\n", ret);
+ }
+
ret = intel_lr_context_render_state_init(ring, ctx);
if (ret) {
DRM_ERROR("Init render state failed: %d\n", ret);
ctx->engine[ring->id].ringbuf = NULL;
ctx->engine[ring->id].state = NULL;
- intel_destroy_ringbuffer_obj(ringbuf);
goto error;
}
ctx->rcs_initialized = true;
@@ -1759,8 +1928,15 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
return 0;
error:
+ if (is_global_default_ctx)
+ intel_unpin_ringbuffer_obj(ringbuf);
+error_destroy_rbuf:
+ intel_destroy_ringbuffer_obj(ringbuf);
+error_free_rbuf:
kfree(ringbuf);
- i915_gem_object_ggtt_unpin(ctx_obj);
+error_unpin_ctx:
+ if (is_global_default_ctx)
+ i915_gem_object_ggtt_unpin(ctx_obj);
drm_gem_object_unreference(&ctx_obj->base);
return ret;
}
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 33c3b4bf28c..14b216b9be7 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -24,6 +24,8 @@
#ifndef _INTEL_LRC_H_
#define _INTEL_LRC_H_
+#define GEN8_LR_CONTEXT_ALIGN 4096
+
/* Execlists regs */
#define RING_ELSP(ring) ((ring)->mmio_base+0x230)
#define RING_EXECLIST_STATUS(ring) ((ring)->mmio_base+0x234)
@@ -67,6 +69,8 @@ int intel_lr_context_render_state_init(struct intel_engine_cs *ring,
void intel_lr_context_free(struct intel_context *ctx);
int intel_lr_context_deferred_create(struct intel_context *ctx,
struct intel_engine_cs *ring);
+void intel_lr_context_unpin(struct intel_engine_cs *ring,
+ struct intel_context *ctx);
/* Execlists */
int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists);
@@ -104,11 +108,11 @@ struct intel_ctx_submit_request {
u32 tail;
struct list_head execlist_link;
- struct work_struct work;
int elsp_submitted;
};
void intel_execlists_handle_ctx_events(struct intel_engine_cs *ring);
+void intel_execlists_retire_requests(struct intel_engine_cs *ring);
#endif /* _INTEL_LRC_H_ */
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index c0bbf217244..14654d628ca 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -76,7 +76,7 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
u32 tmp;
power_domain = intel_display_port_power_domain(encoder);
- if (!intel_display_power_enabled(dev_priv, power_domain))
+ if (!intel_display_power_is_enabled(dev_priv, power_domain))
return false;
tmp = I915_READ(lvds_encoder->reg);
@@ -1116,7 +1116,7 @@ out:
drm_connector_register(connector);
intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
- intel_panel_setup_backlight(connector);
+ intel_panel_setup_backlight(connector, INVALID_PIPE);
return;
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 41b3be21749..4d63839bd9b 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -521,6 +521,9 @@ static u32 _vlv_get_backlight(struct drm_device *dev, enum pipe pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
+ return 0;
+
return I915_READ(VLV_BLC_PWM_CTL(pipe)) & BACKLIGHT_DUTY_CYCLE_MASK;
}
@@ -536,15 +539,17 @@ static u32 intel_panel_get_backlight(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 val;
- unsigned long flags;
+ struct intel_panel *panel = &connector->panel;
+ u32 val = 0;
- spin_lock_irqsave(&dev_priv->backlight_lock, flags);
+ mutex_lock(&dev_priv->backlight_lock);
- val = dev_priv->display.get_backlight(connector);
- val = intel_panel_compute_brightness(connector, val);
+ if (panel->backlight.enabled) {
+ val = dev_priv->display.get_backlight(connector);
+ val = intel_panel_compute_brightness(connector, val);
+ }
- spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
+ mutex_unlock(&dev_priv->backlight_lock);
DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val);
return val;
@@ -603,6 +608,9 @@ static void vlv_set_backlight(struct intel_connector *connector, u32 level)
enum pipe pipe = intel_get_pipe_from_connector(connector);
u32 tmp;
+ if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
+ return;
+
tmp = I915_READ(VLV_BLC_PWM_CTL(pipe)) & ~BACKLIGHT_DUTY_CYCLE_MASK;
I915_WRITE(VLV_BLC_PWM_CTL(pipe), tmp | level);
}
@@ -626,14 +634,12 @@ static void intel_panel_set_backlight(struct intel_connector *connector,
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_panel *panel = &connector->panel;
- enum pipe pipe = intel_get_pipe_from_connector(connector);
u32 hw_level;
- unsigned long flags;
- if (!panel->backlight.present || pipe == INVALID_PIPE)
+ if (!panel->backlight.present)
return;
- spin_lock_irqsave(&dev_priv->backlight_lock, flags);
+ mutex_lock(&dev_priv->backlight_lock);
WARN_ON(panel->backlight.max == 0);
@@ -643,7 +649,7 @@ static void intel_panel_set_backlight(struct intel_connector *connector,
if (panel->backlight.enabled)
intel_panel_actually_set_backlight(connector, hw_level);
- spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
+ mutex_unlock(&dev_priv->backlight_lock);
}
/* set backlight brightness to level in range [0..max], assuming hw min is
@@ -657,12 +663,17 @@ void intel_panel_set_backlight_acpi(struct intel_connector *connector,
struct intel_panel *panel = &connector->panel;
enum pipe pipe = intel_get_pipe_from_connector(connector);
u32 hw_level;
- unsigned long flags;
+ /*
+ * INVALID_PIPE may occur during driver init because
+ * connection_mutex isn't held across the entire backlight
+ * setup + modeset readout, and the BIOS can issue the
+ * requests at any time.
+ */
if (!panel->backlight.present || pipe == INVALID_PIPE)
return;
- spin_lock_irqsave(&dev_priv->backlight_lock, flags);
+ mutex_lock(&dev_priv->backlight_lock);
WARN_ON(panel->backlight.max == 0);
@@ -678,7 +689,7 @@ void intel_panel_set_backlight_acpi(struct intel_connector *connector,
if (panel->backlight.enabled)
intel_panel_actually_set_backlight(connector, hw_level);
- spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
+ mutex_unlock(&dev_priv->backlight_lock);
}
static void pch_disable_backlight(struct intel_connector *connector)
@@ -720,6 +731,9 @@ static void vlv_disable_backlight(struct intel_connector *connector)
enum pipe pipe = intel_get_pipe_from_connector(connector);
u32 tmp;
+ if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
+ return;
+
intel_panel_actually_set_backlight(connector, 0);
tmp = I915_READ(VLV_BLC_PWM_CTL2(pipe));
@@ -731,10 +745,8 @@ void intel_panel_disable_backlight(struct intel_connector *connector)
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_panel *panel = &connector->panel;
- enum pipe pipe = intel_get_pipe_from_connector(connector);
- unsigned long flags;
- if (!panel->backlight.present || pipe == INVALID_PIPE)
+ if (!panel->backlight.present)
return;
/*
@@ -748,14 +760,14 @@ void intel_panel_disable_backlight(struct intel_connector *connector)
return;
}
- spin_lock_irqsave(&dev_priv->backlight_lock, flags);
+ mutex_lock(&dev_priv->backlight_lock);
if (panel->backlight.device)
panel->backlight.device->props.power = FB_BLANK_POWERDOWN;
panel->backlight.enabled = false;
dev_priv->display.disable_backlight(connector);
- spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
+ mutex_unlock(&dev_priv->backlight_lock);
}
static void bdw_enable_backlight(struct intel_connector *connector)
@@ -779,8 +791,9 @@ static void bdw_enable_backlight(struct intel_connector *connector)
if (panel->backlight.active_low_pwm)
pch_ctl1 |= BLM_PCH_POLARITY;
- /* BDW always uses the pch pwm controls. */
- pch_ctl1 |= BLM_PCH_OVERRIDE_ENABLE;
+ /* After LPT, override is the default. */
+ if (HAS_PCH_LPT(dev_priv))
+ pch_ctl1 |= BLM_PCH_OVERRIDE_ENABLE;
I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1);
POSTING_READ(BLC_PWM_PCH_CTL1);
@@ -909,6 +922,9 @@ static void vlv_enable_backlight(struct intel_connector *connector)
enum pipe pipe = intel_get_pipe_from_connector(connector);
u32 ctl, ctl2;
+ if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
+ return;
+
ctl2 = I915_READ(VLV_BLC_PWM_CTL2(pipe));
if (ctl2 & BLM_PWM_ENABLE) {
DRM_DEBUG_KMS("backlight already enabled\n");
@@ -936,14 +952,13 @@ void intel_panel_enable_backlight(struct intel_connector *connector)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_panel *panel = &connector->panel;
enum pipe pipe = intel_get_pipe_from_connector(connector);
- unsigned long flags;
- if (!panel->backlight.present || pipe == INVALID_PIPE)
+ if (!panel->backlight.present)
return;
DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe));
- spin_lock_irqsave(&dev_priv->backlight_lock, flags);
+ mutex_lock(&dev_priv->backlight_lock);
WARN_ON(panel->backlight.max == 0);
@@ -961,7 +976,7 @@ void intel_panel_enable_backlight(struct intel_connector *connector)
if (panel->backlight.device)
panel->backlight.device->props.power = FB_BLANK_UNBLANK;
- spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
+ mutex_unlock(&dev_priv->backlight_lock);
}
#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
@@ -1030,6 +1045,9 @@ static int intel_backlight_device_register(struct intel_connector *connector)
if (WARN_ON(panel->backlight.device))
return -ENODEV;
+ if (!panel->backlight.present)
+ return 0;
+
WARN_ON(panel->backlight.max == 0);
memset(&props, 0, sizeof(props));
@@ -1065,6 +1083,10 @@ static int intel_backlight_device_register(struct intel_connector *connector)
panel->backlight.device = NULL;
return -ENODEV;
}
+
+ DRM_DEBUG_KMS("Connector %s backlight sysfs interface registered\n",
+ connector->base.name);
+
return 0;
}
@@ -1119,7 +1141,7 @@ static u32 get_backlight_min_vbt(struct intel_connector *connector)
return scale(min, 0, 255, 0, panel->backlight.max);
}
-static int bdw_setup_backlight(struct intel_connector *connector)
+static int bdw_setup_backlight(struct intel_connector *connector, enum pipe unused)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1145,7 +1167,7 @@ static int bdw_setup_backlight(struct intel_connector *connector)
return 0;
}
-static int pch_setup_backlight(struct intel_connector *connector)
+static int pch_setup_backlight(struct intel_connector *connector, enum pipe unused)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1172,7 +1194,7 @@ static int pch_setup_backlight(struct intel_connector *connector)
return 0;
}
-static int i9xx_setup_backlight(struct intel_connector *connector)
+static int i9xx_setup_backlight(struct intel_connector *connector, enum pipe unused)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1204,7 +1226,7 @@ static int i9xx_setup_backlight(struct intel_connector *connector)
return 0;
}
-static int i965_setup_backlight(struct intel_connector *connector)
+static int i965_setup_backlight(struct intel_connector *connector, enum pipe unused)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1234,37 +1256,40 @@ static int i965_setup_backlight(struct intel_connector *connector)
return 0;
}
-static int vlv_setup_backlight(struct intel_connector *connector)
+static int vlv_setup_backlight(struct intel_connector *connector, enum pipe pipe)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_panel *panel = &connector->panel;
- enum pipe pipe;
+ enum pipe p;
u32 ctl, ctl2, val;
- for_each_pipe(dev_priv, pipe) {
- u32 cur_val = I915_READ(VLV_BLC_PWM_CTL(pipe));
+ for_each_pipe(dev_priv, p) {
+ u32 cur_val = I915_READ(VLV_BLC_PWM_CTL(p));
/* Skip if the modulation freq is already set */
if (cur_val & ~BACKLIGHT_DUTY_CYCLE_MASK)
continue;
cur_val &= BACKLIGHT_DUTY_CYCLE_MASK;
- I915_WRITE(VLV_BLC_PWM_CTL(pipe), (0xf42 << 16) |
+ I915_WRITE(VLV_BLC_PWM_CTL(p), (0xf42 << 16) |
cur_val);
}
- ctl2 = I915_READ(VLV_BLC_PWM_CTL2(PIPE_A));
+ if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
+ return -ENODEV;
+
+ ctl2 = I915_READ(VLV_BLC_PWM_CTL2(pipe));
panel->backlight.active_low_pwm = ctl2 & BLM_POLARITY_I965;
- ctl = I915_READ(VLV_BLC_PWM_CTL(PIPE_A));
+ ctl = I915_READ(VLV_BLC_PWM_CTL(pipe));
panel->backlight.max = ctl >> 16;
if (!panel->backlight.max)
return -ENODEV;
panel->backlight.min = get_backlight_min_vbt(connector);
- val = _vlv_get_backlight(dev, PIPE_A);
+ val = _vlv_get_backlight(dev, pipe);
panel->backlight.level = intel_panel_compute_brightness(connector, val);
panel->backlight.enabled = (ctl2 & BLM_PWM_ENABLE) &&
@@ -1273,13 +1298,12 @@ static int vlv_setup_backlight(struct intel_connector *connector)
return 0;
}
-int intel_panel_setup_backlight(struct drm_connector *connector)
+int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_connector *intel_connector = to_intel_connector(connector);
struct intel_panel *panel = &intel_connector->panel;
- unsigned long flags;
int ret;
if (!dev_priv->vbt.backlight.present) {
@@ -1292,9 +1316,9 @@ int intel_panel_setup_backlight(struct drm_connector *connector)
}
/* set level and max in panel struct */
- spin_lock_irqsave(&dev_priv->backlight_lock, flags);
- ret = dev_priv->display.setup_backlight(intel_connector);
- spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
+ mutex_lock(&dev_priv->backlight_lock);
+ ret = dev_priv->display.setup_backlight(intel_connector, pipe);
+ mutex_unlock(&dev_priv->backlight_lock);
if (ret) {
DRM_DEBUG_KMS("failed to setup backlight for connector %s\n",
@@ -1302,15 +1326,12 @@ int intel_panel_setup_backlight(struct drm_connector *connector)
return ret;
}
- intel_backlight_device_register(intel_connector);
-
panel->backlight.present = true;
- DRM_DEBUG_KMS("backlight initialized, %s, brightness %u/%u, "
- "sysfs interface %sregistered\n",
+ DRM_DEBUG_KMS("Connector %s backlight initialized, %s, brightness %u/%u\n",
+ connector->name,
panel->backlight.enabled ? "enabled" : "disabled",
- panel->backlight.level, panel->backlight.max,
- panel->backlight.device ? "" : "not ");
+ panel->backlight.level, panel->backlight.max);
return 0;
}
@@ -1321,7 +1342,6 @@ void intel_panel_destroy_backlight(struct drm_connector *connector)
struct intel_panel *panel = &intel_connector->panel;
panel->backlight.present = false;
- intel_backlight_device_unregister(intel_connector);
}
/* Set up chip specific backlight functions */
@@ -1329,7 +1349,7 @@ void intel_panel_init_backlight_funcs(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (IS_BROADWELL(dev)) {
+ if (IS_BROADWELL(dev) || (INTEL_INFO(dev)->gen >= 9)) {
dev_priv->display.setup_backlight = bdw_setup_backlight;
dev_priv->display.enable_backlight = bdw_enable_backlight;
dev_priv->display.disable_backlight = pch_disable_backlight;
@@ -1384,3 +1404,19 @@ void intel_panel_fini(struct intel_panel *panel)
drm_mode_destroy(intel_connector->base.dev,
panel->downclock_mode);
}
+
+void intel_backlight_register(struct drm_device *dev)
+{
+ struct intel_connector *connector;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, base.head)
+ intel_backlight_device_register(connector);
+}
+
+void intel_backlight_unregister(struct drm_device *dev)
+{
+ struct intel_connector *connector;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, base.head)
+ intel_backlight_device_unregister(connector);
+}
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index ad2fd605f76..1f4b56e273c 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -30,9 +30,6 @@
#include "intel_drv.h"
#include "../../../platform/x86/intel_ips.h"
#include <linux/module.h>
-#include <linux/vgaarb.h>
-#include <drm/i915_powerwell.h>
-#include <linux/pm_runtime.h>
/**
* RC6 is a special power stage which allows the GPU to enter an very
@@ -66,11 +63,37 @@
* i915.i915_enable_fbc parameter
*/
+static void gen9_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /*
+ * WaDisableSDEUnitClockGating:skl
+ * This seems to be a pre-production w/a.
+ */
+ I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
+ GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
+
+ /*
+ * WaDisableDgMirrorFixInHalfSliceChicken5:skl
+ * This is a pre-production w/a.
+ */
+ I915_WRITE(GEN9_HALF_SLICE_CHICKEN5,
+ I915_READ(GEN9_HALF_SLICE_CHICKEN5) &
+ ~GEN9_DG_MIRROR_FIX_ENABLE);
+
+ /* Wa4x4STCOptimizationDisable:skl */
+ I915_WRITE(CACHE_MODE_1,
+ _MASKED_BIT_ENABLE(GEN8_4x4_STC_OPTIMIZATION_DISABLE));
+}
+
static void i8xx_disable_fbc(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 fbc_ctl;
+ dev_priv->fbc.enabled = false;
+
/* Disable compression */
fbc_ctl = I915_READ(FBC_CONTROL);
if ((fbc_ctl & FBC_CTL_EN) == 0)
@@ -99,6 +122,8 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc)
int i;
u32 fbc_ctl;
+ dev_priv->fbc.enabled = true;
+
cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE;
if (fb->pitches[0] < cfb_pitch)
cfb_pitch = fb->pitches[0];
@@ -153,6 +178,8 @@ static void g4x_enable_fbc(struct drm_crtc *crtc)
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
u32 dpfc_ctl;
+ dev_priv->fbc.enabled = true;
+
dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane) | DPFC_SR_EN;
if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
dpfc_ctl |= DPFC_CTL_LIMIT_2X;
@@ -173,6 +200,8 @@ static void g4x_disable_fbc(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpfc_ctl;
+ dev_priv->fbc.enabled = false;
+
/* Disable compression */
dpfc_ctl = I915_READ(DPFC_CONTROL);
if (dpfc_ctl & DPFC_CTL_EN) {
@@ -224,6 +253,8 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc)
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
u32 dpfc_ctl;
+ dev_priv->fbc.enabled = true;
+
dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane);
if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
dev_priv->fbc.threshold++;
@@ -264,6 +295,8 @@ static void ironlake_disable_fbc(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpfc_ctl;
+ dev_priv->fbc.enabled = false;
+
/* Disable compression */
dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
if (dpfc_ctl & DPFC_CTL_EN) {
@@ -290,6 +323,8 @@ static void gen7_enable_fbc(struct drm_crtc *crtc)
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
u32 dpfc_ctl;
+ dev_priv->fbc.enabled = true;
+
dpfc_ctl = IVB_DPFC_CTL_PLANE(intel_crtc->plane);
if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
dev_priv->fbc.threshold++;
@@ -339,19 +374,19 @@ bool intel_fbc_enabled(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (!dev_priv->display.fbc_enabled)
- return false;
-
- return dev_priv->display.fbc_enabled(dev);
+ return dev_priv->fbc.enabled;
}
-void gen8_fbc_sw_flush(struct drm_device *dev, u32 value)
+void bdw_fbc_sw_flush(struct drm_device *dev, u32 value)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (!IS_GEN8(dev))
return;
+ if (!intel_fbc_enabled(dev))
+ return;
+
I915_WRITE(MSG_FBC_REND_STATE, value);
}
@@ -1310,6 +1345,7 @@ static bool vlv_compute_drain_latency(struct drm_crtc *crtc,
int *prec_mult,
int *drain_latency)
{
+ struct drm_device *dev = crtc->dev;
int entries;
int clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
@@ -1320,8 +1356,12 @@ static bool vlv_compute_drain_latency(struct drm_crtc *crtc,
return false;
entries = DIV_ROUND_UP(clock, 1000) * pixel_size;
- *prec_mult = (entries > 128) ? DRAIN_LATENCY_PRECISION_64 :
- DRAIN_LATENCY_PRECISION_32;
+ if (IS_CHERRYVIEW(dev))
+ *prec_mult = (entries > 128) ? DRAIN_LATENCY_PRECISION_32 :
+ DRAIN_LATENCY_PRECISION_16;
+ else
+ *prec_mult = (entries > 128) ? DRAIN_LATENCY_PRECISION_64 :
+ DRAIN_LATENCY_PRECISION_32;
*drain_latency = (64 * (*prec_mult) * 4) / entries;
if (*drain_latency > DRAIN_LATENCY_MASK)
@@ -1340,15 +1380,18 @@ static bool vlv_compute_drain_latency(struct drm_crtc *crtc,
static void vlv_update_drain_latency(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pixel_size;
int drain_latency;
enum pipe pipe = intel_crtc->pipe;
int plane_prec, prec_mult, plane_dl;
+ const int high_precision = IS_CHERRYVIEW(dev) ?
+ DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_64;
- plane_dl = I915_READ(VLV_DDL(pipe)) & ~(DDL_PLANE_PRECISION_64 |
- DRAIN_LATENCY_MASK | DDL_CURSOR_PRECISION_64 |
+ plane_dl = I915_READ(VLV_DDL(pipe)) & ~(DDL_PLANE_PRECISION_HIGH |
+ DRAIN_LATENCY_MASK | DDL_CURSOR_PRECISION_HIGH |
(DRAIN_LATENCY_MASK << DDL_CURSOR_SHIFT));
if (!intel_crtc_active(crtc)) {
@@ -1359,9 +1402,9 @@ static void vlv_update_drain_latency(struct drm_crtc *crtc)
/* Primary plane Drain Latency */
pixel_size = crtc->primary->fb->bits_per_pixel / 8; /* BPP */
if (vlv_compute_drain_latency(crtc, pixel_size, &prec_mult, &drain_latency)) {
- plane_prec = (prec_mult == DRAIN_LATENCY_PRECISION_64) ?
- DDL_PLANE_PRECISION_64 :
- DDL_PLANE_PRECISION_32;
+ plane_prec = (prec_mult == high_precision) ?
+ DDL_PLANE_PRECISION_HIGH :
+ DDL_PLANE_PRECISION_LOW;
plane_dl |= plane_prec | drain_latency;
}
@@ -1373,9 +1416,9 @@ static void vlv_update_drain_latency(struct drm_crtc *crtc)
/* Program cursor DL only if it is enabled */
if (intel_crtc->cursor_base &&
vlv_compute_drain_latency(crtc, pixel_size, &prec_mult, &drain_latency)) {
- plane_prec = (prec_mult == DRAIN_LATENCY_PRECISION_64) ?
- DDL_CURSOR_PRECISION_64 :
- DDL_CURSOR_PRECISION_32;
+ plane_prec = (prec_mult == high_precision) ?
+ DDL_CURSOR_PRECISION_HIGH :
+ DDL_CURSOR_PRECISION_LOW;
plane_dl |= plane_prec | (drain_latency << DDL_CURSOR_SHIFT);
}
@@ -1543,15 +1586,17 @@ static void valleyview_update_sprite_wm(struct drm_plane *plane,
int plane_prec;
int sprite_dl;
int prec_mult;
+ const int high_precision = IS_CHERRYVIEW(dev) ?
+ DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_64;
- sprite_dl = I915_READ(VLV_DDL(pipe)) & ~(DDL_SPRITE_PRECISION_64(sprite) |
+ sprite_dl = I915_READ(VLV_DDL(pipe)) & ~(DDL_SPRITE_PRECISION_HIGH(sprite) |
(DRAIN_LATENCY_MASK << DDL_SPRITE_SHIFT(sprite)));
if (enabled && vlv_compute_drain_latency(crtc, pixel_size, &prec_mult,
&drain_latency)) {
- plane_prec = (prec_mult == DRAIN_LATENCY_PRECISION_64) ?
- DDL_SPRITE_PRECISION_64(sprite) :
- DDL_SPRITE_PRECISION_32(sprite);
+ plane_prec = (prec_mult == high_precision) ?
+ DDL_SPRITE_PRECISION_HIGH(sprite) :
+ DDL_SPRITE_PRECISION_LOW(sprite);
sprite_dl |= plane_prec |
(drain_latency << DDL_SPRITE_SHIFT(sprite));
}
@@ -1915,6 +1960,14 @@ static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2;
}
+struct skl_pipe_wm_parameters {
+ bool active;
+ uint32_t pipe_htotal;
+ uint32_t pixel_rate; /* in KHz */
+ struct intel_plane_wm_parameters plane[I915_MAX_PLANES];
+ struct intel_plane_wm_parameters cursor;
+};
+
struct ilk_pipe_wm_parameters {
bool active;
uint32_t pipe_htotal;
@@ -2226,11 +2279,82 @@ hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
PIPE_WM_LINETIME_TIME(linetime);
}
-static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[5])
+static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8])
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+ if (IS_GEN9(dev)) {
+ uint32_t val;
+ int ret, i;
+ int level, max_level = ilk_wm_max_level(dev);
+
+ /* read the first set of memory latencies[0:3] */
+ val = 0; /* data0 to be programmed to 0 for first set */
+ mutex_lock(&dev_priv->rps.hw_lock);
+ ret = sandybridge_pcode_read(dev_priv,
+ GEN9_PCODE_READ_MEM_LATENCY,
+ &val);
+ mutex_unlock(&dev_priv->rps.hw_lock);
+
+ if (ret) {
+ DRM_ERROR("SKL Mailbox read error = %d\n", ret);
+ return;
+ }
+
+ wm[0] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
+ wm[1] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
+ GEN9_MEM_LATENCY_LEVEL_MASK;
+ wm[2] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
+ GEN9_MEM_LATENCY_LEVEL_MASK;
+ wm[3] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
+ GEN9_MEM_LATENCY_LEVEL_MASK;
+
+ /* read the second set of memory latencies[4:7] */
+ val = 1; /* data0 to be programmed to 1 for second set */
+ mutex_lock(&dev_priv->rps.hw_lock);
+ ret = sandybridge_pcode_read(dev_priv,
+ GEN9_PCODE_READ_MEM_LATENCY,
+ &val);
+ mutex_unlock(&dev_priv->rps.hw_lock);
+ if (ret) {
+ DRM_ERROR("SKL Mailbox read error = %d\n", ret);
+ return;
+ }
+
+ wm[4] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
+ wm[5] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
+ GEN9_MEM_LATENCY_LEVEL_MASK;
+ wm[6] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
+ GEN9_MEM_LATENCY_LEVEL_MASK;
+ wm[7] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
+ GEN9_MEM_LATENCY_LEVEL_MASK;
+
+ /*
+ * punit doesn't take into account the read latency so we need
+ * to add 2us to the various latency levels we retrieve from
+ * the punit.
+ * - W0 is a bit special in that it's the only level that
+ * can't be disabled if we want to have display working, so
+ * we always add 2us there.
+ * - For levels >=1, punit returns 0us latency when they are
+ * disabled, so we respect that and don't add 2us then
+ *
+ * Additionally, if a level n (n > 1) has a 0us latency, all
+ * levels m (m >= n) need to be disabled. We make sure to
+ * sanitize the values out of the punit to satisfy this
+ * requirement.
+ */
+ wm[0] += 2;
+ for (level = 1; level <= max_level; level++)
+ if (wm[level] != 0)
+ wm[level] += 2;
+ else {
+ for (i = level + 1; i <= max_level; i++)
+ wm[i] = 0;
+
+ break;
+ }
+ } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
uint64_t sskpd = I915_READ64(MCH_SSKPD);
wm[0] = (sskpd >> 56) & 0xFF;
@@ -2278,7 +2402,9 @@ static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5])
int ilk_wm_max_level(const struct drm_device *dev)
{
/* how many WM levels are we expecting */
- if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ if (IS_GEN9(dev))
+ return 7;
+ else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
return 4;
else if (INTEL_INFO(dev)->gen >= 6)
return 3;
@@ -2288,7 +2414,7 @@ int ilk_wm_max_level(const struct drm_device *dev)
static void intel_print_wm_latency(struct drm_device *dev,
const char *name,
- const uint16_t wm[5])
+ const uint16_t wm[8])
{
int level, max_level = ilk_wm_max_level(dev);
@@ -2301,8 +2427,13 @@ static void intel_print_wm_latency(struct drm_device *dev,
continue;
}
- /* WM1+ latency values in 0.5us units */
- if (level > 0)
+ /*
+ * - latencies are in us on gen9.
+ * - before then, WM1+ latency values are in 0.5us units
+ */
+ if (IS_GEN9(dev))
+ latency *= 10;
+ else if (level > 0)
latency *= 5;
DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n",
@@ -2370,6 +2501,14 @@ static void ilk_setup_wm_latency(struct drm_device *dev)
snb_wm_latency_quirk(dev);
}
+static void skl_setup_wm_latency(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ intel_read_wm_latency(dev, dev_priv->wm.skl_latency);
+ intel_print_wm_latency(dev, "Gen9 Plane", dev_priv->wm.skl_latency);
+}
+
static void ilk_compute_wm_parameters(struct drm_crtc *crtc,
struct ilk_pipe_wm_parameters *p)
{
@@ -2860,6 +2999,769 @@ static bool ilk_disable_lp_wm(struct drm_device *dev)
return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
}
+/*
+ * On gen9, we need to allocate Display Data Buffer (DDB) portions to the
+ * different active planes.
+ */
+
+#define SKL_DDB_SIZE 896 /* in blocks */
+
+static void
+skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
+ struct drm_crtc *for_crtc,
+ const struct intel_wm_config *config,
+ const struct skl_pipe_wm_parameters *params,
+ struct skl_ddb_entry *alloc /* out */)
+{
+ struct drm_crtc *crtc;
+ unsigned int pipe_size, ddb_size;
+ int nth_active_pipe;
+
+ if (!params->active) {
+ alloc->start = 0;
+ alloc->end = 0;
+ return;
+ }
+
+ ddb_size = SKL_DDB_SIZE;
+
+ ddb_size -= 4; /* 4 blocks for bypass path allocation */
+
+ nth_active_pipe = 0;
+ for_each_crtc(dev, crtc) {
+ if (!intel_crtc_active(crtc))
+ continue;
+
+ if (crtc == for_crtc)
+ break;
+
+ nth_active_pipe++;
+ }
+
+ pipe_size = ddb_size / config->num_pipes_active;
+ alloc->start = nth_active_pipe * ddb_size / config->num_pipes_active;
+ alloc->end = alloc->start + pipe_size;
+}
+
+static unsigned int skl_cursor_allocation(const struct intel_wm_config *config)
+{
+ if (config->num_pipes_active == 1)
+ return 32;
+
+ return 8;
+}
+
+static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry *entry, u32 reg)
+{
+ entry->start = reg & 0x3ff;
+ entry->end = (reg >> 16) & 0x3ff;
+ if (entry->end)
+ entry->end += 1;
+}
+
+void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
+ struct skl_ddb_allocation *ddb /* out */)
+{
+ struct drm_device *dev = dev_priv->dev;
+ enum pipe pipe;
+ int plane;
+ u32 val;
+
+ for_each_pipe(dev_priv, pipe) {
+ for_each_plane(pipe, plane) {
+ val = I915_READ(PLANE_BUF_CFG(pipe, plane));
+ skl_ddb_entry_init_from_hw(&ddb->plane[pipe][plane],
+ val);
+ }
+
+ val = I915_READ(CUR_BUF_CFG(pipe));
+ skl_ddb_entry_init_from_hw(&ddb->cursor[pipe], val);
+ }
+}
+
+static unsigned int
+skl_plane_relative_data_rate(const struct intel_plane_wm_parameters *p)
+{
+ return p->horiz_pixels * p->vert_pixels * p->bytes_per_pixel;
+}
+
+/*
+ * We don't overflow 32 bits. Worst case is 3 planes enabled, each fetching
+ * a 8192x4096@32bpp framebuffer:
+ * 3 * 4096 * 8192 * 4 < 2^32
+ */
+static unsigned int
+skl_get_total_relative_data_rate(struct intel_crtc *intel_crtc,
+ const struct skl_pipe_wm_parameters *params)
+{
+ unsigned int total_data_rate = 0;
+ int plane;
+
+ for (plane = 0; plane < intel_num_planes(intel_crtc); plane++) {
+ const struct intel_plane_wm_parameters *p;
+
+ p = &params->plane[plane];
+ if (!p->enabled)
+ continue;
+
+ total_data_rate += skl_plane_relative_data_rate(p);
+ }
+
+ return total_data_rate;
+}
+
+static void
+skl_allocate_pipe_ddb(struct drm_crtc *crtc,
+ const struct intel_wm_config *config,
+ const struct skl_pipe_wm_parameters *params,
+ struct skl_ddb_allocation *ddb /* out */)
+{
+ struct drm_device *dev = crtc->dev;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ enum pipe pipe = intel_crtc->pipe;
+ struct skl_ddb_entry *alloc = &ddb->pipe[pipe];
+ uint16_t alloc_size, start, cursor_blocks;
+ unsigned int total_data_rate;
+ int plane;
+
+ skl_ddb_get_pipe_allocation_limits(dev, crtc, config, params, alloc);
+ alloc_size = skl_ddb_entry_size(alloc);
+ if (alloc_size == 0) {
+ memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
+ memset(&ddb->cursor[pipe], 0, sizeof(ddb->cursor[pipe]));
+ return;
+ }
+
+ cursor_blocks = skl_cursor_allocation(config);
+ ddb->cursor[pipe].start = alloc->end - cursor_blocks;
+ ddb->cursor[pipe].end = alloc->end;
+
+ alloc_size -= cursor_blocks;
+ alloc->end -= cursor_blocks;
+
+ /*
+ * Each active plane get a portion of the remaining space, in
+ * proportion to the amount of data they need to fetch from memory.
+ *
+ * FIXME: we may not allocate every single block here.
+ */
+ total_data_rate = skl_get_total_relative_data_rate(intel_crtc, params);
+
+ start = alloc->start;
+ for (plane = 0; plane < intel_num_planes(intel_crtc); plane++) {
+ const struct intel_plane_wm_parameters *p;
+ unsigned int data_rate;
+ uint16_t plane_blocks;
+
+ p = &params->plane[plane];
+ if (!p->enabled)
+ continue;
+
+ data_rate = skl_plane_relative_data_rate(p);
+
+ /*
+ * promote the expression to 64 bits to avoid overflowing, the
+ * result is < available as data_rate / total_data_rate < 1
+ */
+ plane_blocks = div_u64((uint64_t)alloc_size * data_rate,
+ total_data_rate);
+
+ ddb->plane[pipe][plane].start = start;
+ ddb->plane[pipe][plane].end = start + plane_blocks;
+
+ start += plane_blocks;
+ }
+
+}
+
+static uint32_t skl_pipe_pixel_rate(const struct intel_crtc_config *config)
+{
+ /* TODO: Take into account the scalers once we support them */
+ return config->adjusted_mode.crtc_clock;
+}
+
+/*
+ * The max latency should be 257 (max the punit can code is 255 and we add 2us
+ * for the read latency) and bytes_per_pixel should always be <= 8, so that
+ * should allow pixel_rate up to ~2 GHz which seems sufficient since max
+ * 2xcdclk is 1350 MHz and the pixel rate should never exceed that.
+*/
+static uint32_t skl_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
+ uint32_t latency)
+{
+ uint32_t wm_intermediate_val, ret;
+
+ if (latency == 0)
+ return UINT_MAX;
+
+ wm_intermediate_val = latency * pixel_rate * bytes_per_pixel;
+ ret = DIV_ROUND_UP(wm_intermediate_val, 1000);
+
+ return ret;
+}
+
+static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
+ uint32_t horiz_pixels, uint8_t bytes_per_pixel,
+ uint32_t latency)
+{
+ uint32_t ret, plane_bytes_per_line, wm_intermediate_val;
+
+ if (latency == 0)
+ return UINT_MAX;
+
+ plane_bytes_per_line = horiz_pixels * bytes_per_pixel;
+ wm_intermediate_val = latency * pixel_rate;
+ ret = DIV_ROUND_UP(wm_intermediate_val, pipe_htotal * 1000) *
+ plane_bytes_per_line;
+
+ return ret;
+}
+
+static bool skl_ddb_allocation_changed(const struct skl_ddb_allocation *new_ddb,
+ const struct intel_crtc *intel_crtc)
+{
+ struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ const struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb;
+ enum pipe pipe = intel_crtc->pipe;
+
+ if (memcmp(new_ddb->plane[pipe], cur_ddb->plane[pipe],
+ sizeof(new_ddb->plane[pipe])))
+ return true;
+
+ if (memcmp(&new_ddb->cursor[pipe], &cur_ddb->cursor[pipe],
+ sizeof(new_ddb->cursor[pipe])))
+ return true;
+
+ return false;
+}
+
+static void skl_compute_wm_global_parameters(struct drm_device *dev,
+ struct intel_wm_config *config)
+{
+ struct drm_crtc *crtc;
+ struct drm_plane *plane;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+ config->num_pipes_active += intel_crtc_active(crtc);
+
+ /* FIXME: I don't think we need those two global parameters on SKL */
+ list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+
+ config->sprites_enabled |= intel_plane->wm.enabled;
+ config->sprites_scaled |= intel_plane->wm.scaled;
+ }
+}
+
+static void skl_compute_wm_pipe_parameters(struct drm_crtc *crtc,
+ struct skl_pipe_wm_parameters *p)
+{
+ struct drm_device *dev = crtc->dev;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ enum pipe pipe = intel_crtc->pipe;
+ struct drm_plane *plane;
+ int i = 1; /* Index for sprite planes start */
+
+ p->active = intel_crtc_active(crtc);
+ if (p->active) {
+ p->pipe_htotal = intel_crtc->config.adjusted_mode.crtc_htotal;
+ p->pixel_rate = skl_pipe_pixel_rate(&intel_crtc->config);
+
+ /*
+ * For now, assume primary and cursor planes are always enabled.
+ */
+ p->plane[0].enabled = true;
+ p->plane[0].bytes_per_pixel =
+ crtc->primary->fb->bits_per_pixel / 8;
+ p->plane[0].horiz_pixels = intel_crtc->config.pipe_src_w;
+ p->plane[0].vert_pixels = intel_crtc->config.pipe_src_h;
+
+ p->cursor.enabled = true;
+ p->cursor.bytes_per_pixel = 4;
+ p->cursor.horiz_pixels = intel_crtc->cursor_width ?
+ intel_crtc->cursor_width : 64;
+ }
+
+ list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+
+ if (intel_plane->pipe == pipe)
+ p->plane[i++] = intel_plane->wm;
+ }
+}
+
+static bool skl_compute_plane_wm(struct skl_pipe_wm_parameters *p,
+ struct intel_plane_wm_parameters *p_params,
+ uint16_t ddb_allocation,
+ uint32_t mem_value,
+ uint16_t *out_blocks, /* out */
+ uint8_t *out_lines /* out */)
+{
+ uint32_t method1, method2, plane_bytes_per_line, res_blocks, res_lines;
+ uint32_t result_bytes;
+
+ if (mem_value == 0 || !p->active || !p_params->enabled)
+ return false;
+
+ method1 = skl_wm_method1(p->pixel_rate,
+ p_params->bytes_per_pixel,
+ mem_value);
+ method2 = skl_wm_method2(p->pixel_rate,
+ p->pipe_htotal,
+ p_params->horiz_pixels,
+ p_params->bytes_per_pixel,
+ mem_value);
+
+ plane_bytes_per_line = p_params->horiz_pixels *
+ p_params->bytes_per_pixel;
+
+ /* For now xtile and linear */
+ if (((ddb_allocation * 512) / plane_bytes_per_line) >= 1)
+ result_bytes = min(method1, method2);
+ else
+ result_bytes = method1;
+
+ res_blocks = DIV_ROUND_UP(result_bytes, 512) + 1;
+ res_lines = DIV_ROUND_UP(result_bytes, plane_bytes_per_line);
+
+ if (res_blocks > ddb_allocation || res_lines > 31)
+ return false;
+
+ *out_blocks = res_blocks;
+ *out_lines = res_lines;
+
+ return true;
+}
+
+static void skl_compute_wm_level(const struct drm_i915_private *dev_priv,
+ struct skl_ddb_allocation *ddb,
+ struct skl_pipe_wm_parameters *p,
+ enum pipe pipe,
+ int level,
+ int num_planes,
+ struct skl_wm_level *result)
+{
+ uint16_t latency = dev_priv->wm.skl_latency[level];
+ uint16_t ddb_blocks;
+ int i;
+
+ for (i = 0; i < num_planes; i++) {
+ ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][i]);
+
+ result->plane_en[i] = skl_compute_plane_wm(p, &p->plane[i],
+ ddb_blocks,
+ latency,
+ &result->plane_res_b[i],
+ &result->plane_res_l[i]);
+ }
+
+ ddb_blocks = skl_ddb_entry_size(&ddb->cursor[pipe]);
+ result->cursor_en = skl_compute_plane_wm(p, &p->cursor, ddb_blocks,
+ latency, &result->cursor_res_b,
+ &result->cursor_res_l);
+}
+
+static uint32_t
+skl_compute_linetime_wm(struct drm_crtc *crtc, struct skl_pipe_wm_parameters *p)
+{
+ if (!intel_crtc_active(crtc))
+ return 0;
+
+ return DIV_ROUND_UP(8 * p->pipe_htotal * 1000, p->pixel_rate);
+
+}
+
+static void skl_compute_transition_wm(struct drm_crtc *crtc,
+ struct skl_pipe_wm_parameters *params,
+ struct skl_wm_level *trans_wm /* out */)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int i;
+
+ if (!params->active)
+ return;
+
+ /* Until we know more, just disable transition WMs */
+ for (i = 0; i < intel_num_planes(intel_crtc); i++)
+ trans_wm->plane_en[i] = false;
+ trans_wm->cursor_en = false;
+}
+
+static void skl_compute_pipe_wm(struct drm_crtc *crtc,
+ struct skl_ddb_allocation *ddb,
+ struct skl_pipe_wm_parameters *params,
+ struct skl_pipe_wm *pipe_wm)
+{
+ struct drm_device *dev = crtc->dev;
+ const struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int level, max_level = ilk_wm_max_level(dev);
+
+ for (level = 0; level <= max_level; level++) {
+ skl_compute_wm_level(dev_priv, ddb, params, intel_crtc->pipe,
+ level, intel_num_planes(intel_crtc),
+ &pipe_wm->wm[level]);
+ }
+ pipe_wm->linetime = skl_compute_linetime_wm(crtc, params);
+
+ skl_compute_transition_wm(crtc, params, &pipe_wm->trans_wm);
+}
+
+static void skl_compute_wm_results(struct drm_device *dev,
+ struct skl_pipe_wm_parameters *p,
+ struct skl_pipe_wm *p_wm,
+ struct skl_wm_values *r,
+ struct intel_crtc *intel_crtc)
+{
+ int level, max_level = ilk_wm_max_level(dev);
+ enum pipe pipe = intel_crtc->pipe;
+ uint32_t temp;
+ int i;
+
+ for (level = 0; level <= max_level; level++) {
+ for (i = 0; i < intel_num_planes(intel_crtc); i++) {
+ temp = 0;
+
+ temp |= p_wm->wm[level].plane_res_l[i] <<
+ PLANE_WM_LINES_SHIFT;
+ temp |= p_wm->wm[level].plane_res_b[i];
+ if (p_wm->wm[level].plane_en[i])
+ temp |= PLANE_WM_EN;
+
+ r->plane[pipe][i][level] = temp;
+ }
+
+ temp = 0;
+
+ temp |= p_wm->wm[level].cursor_res_l << PLANE_WM_LINES_SHIFT;
+ temp |= p_wm->wm[level].cursor_res_b;
+
+ if (p_wm->wm[level].cursor_en)
+ temp |= PLANE_WM_EN;
+
+ r->cursor[pipe][level] = temp;
+
+ }
+
+ /* transition WMs */
+ for (i = 0; i < intel_num_planes(intel_crtc); i++) {
+ temp = 0;
+ temp |= p_wm->trans_wm.plane_res_l[i] << PLANE_WM_LINES_SHIFT;
+ temp |= p_wm->trans_wm.plane_res_b[i];
+ if (p_wm->trans_wm.plane_en[i])
+ temp |= PLANE_WM_EN;
+
+ r->plane_trans[pipe][i] = temp;
+ }
+
+ temp = 0;
+ temp |= p_wm->trans_wm.cursor_res_l << PLANE_WM_LINES_SHIFT;
+ temp |= p_wm->trans_wm.cursor_res_b;
+ if (p_wm->trans_wm.cursor_en)
+ temp |= PLANE_WM_EN;
+
+ r->cursor_trans[pipe] = temp;
+
+ r->wm_linetime[pipe] = p_wm->linetime;
+}
+
+static void skl_ddb_entry_write(struct drm_i915_private *dev_priv, uint32_t reg,
+ const struct skl_ddb_entry *entry)
+{
+ if (entry->end)
+ I915_WRITE(reg, (entry->end - 1) << 16 | entry->start);
+ else
+ I915_WRITE(reg, 0);
+}
+
+static void skl_write_wm_values(struct drm_i915_private *dev_priv,
+ const struct skl_wm_values *new)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct intel_crtc *crtc;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
+ int i, level, max_level = ilk_wm_max_level(dev);
+ enum pipe pipe = crtc->pipe;
+
+ if (!new->dirty[pipe])
+ continue;
+
+ I915_WRITE(PIPE_WM_LINETIME(pipe), new->wm_linetime[pipe]);
+
+ for (level = 0; level <= max_level; level++) {
+ for (i = 0; i < intel_num_planes(crtc); i++)
+ I915_WRITE(PLANE_WM(pipe, i, level),
+ new->plane[pipe][i][level]);
+ I915_WRITE(CUR_WM(pipe, level),
+ new->cursor[pipe][level]);
+ }
+ for (i = 0; i < intel_num_planes(crtc); i++)
+ I915_WRITE(PLANE_WM_TRANS(pipe, i),
+ new->plane_trans[pipe][i]);
+ I915_WRITE(CUR_WM_TRANS(pipe), new->cursor_trans[pipe]);
+
+ for (i = 0; i < intel_num_planes(crtc); i++)
+ skl_ddb_entry_write(dev_priv,
+ PLANE_BUF_CFG(pipe, i),
+ &new->ddb.plane[pipe][i]);
+
+ skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe),
+ &new->ddb.cursor[pipe]);
+ }
+}
+
+/*
+ * When setting up a new DDB allocation arrangement, we need to correctly
+ * sequence the times at which the new allocations for the pipes are taken into
+ * account or we'll have pipes fetching from space previously allocated to
+ * another pipe.
+ *
+ * Roughly the sequence looks like:
+ * 1. re-allocate the pipe(s) with the allocation being reduced and not
+ * overlapping with a previous light-up pipe (another way to put it is:
+ * pipes with their new allocation strickly included into their old ones).
+ * 2. re-allocate the other pipes that get their allocation reduced
+ * 3. allocate the pipes having their allocation increased
+ *
+ * Steps 1. and 2. are here to take care of the following case:
+ * - Initially DDB looks like this:
+ * | B | C |
+ * - enable pipe A.
+ * - pipe B has a reduced DDB allocation that overlaps with the old pipe C
+ * allocation
+ * | A | B | C |
+ *
+ * We need to sequence the re-allocation: C, B, A (and not B, C, A).
+ */
+
+static void
+skl_wm_flush_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, int pass)
+{
+ struct drm_device *dev = dev_priv->dev;
+ int plane;
+
+ DRM_DEBUG_KMS("flush pipe %c (pass %d)\n", pipe_name(pipe), pass);
+
+ for_each_plane(pipe, plane) {
+ I915_WRITE(PLANE_SURF(pipe, plane),
+ I915_READ(PLANE_SURF(pipe, plane)));
+ }
+ I915_WRITE(CURBASE(pipe), I915_READ(CURBASE(pipe)));
+}
+
+static bool
+skl_ddb_allocation_included(const struct skl_ddb_allocation *old,
+ const struct skl_ddb_allocation *new,
+ enum pipe pipe)
+{
+ uint16_t old_size, new_size;
+
+ old_size = skl_ddb_entry_size(&old->pipe[pipe]);
+ new_size = skl_ddb_entry_size(&new->pipe[pipe]);
+
+ return old_size != new_size &&
+ new->pipe[pipe].start >= old->pipe[pipe].start &&
+ new->pipe[pipe].end <= old->pipe[pipe].end;
+}
+
+static void skl_flush_wm_values(struct drm_i915_private *dev_priv,
+ struct skl_wm_values *new_values)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct skl_ddb_allocation *cur_ddb, *new_ddb;
+ bool reallocated[I915_MAX_PIPES] = {false, false, false};
+ struct intel_crtc *crtc;
+ enum pipe pipe;
+
+ new_ddb = &new_values->ddb;
+ cur_ddb = &dev_priv->wm.skl_hw.ddb;
+
+ /*
+ * First pass: flush the pipes with the new allocation contained into
+ * the old space.
+ *
+ * We'll wait for the vblank on those pipes to ensure we can safely
+ * re-allocate the freed space without this pipe fetching from it.
+ */
+ for_each_intel_crtc(dev, crtc) {
+ if (!crtc->active)
+ continue;
+
+ pipe = crtc->pipe;
+
+ if (!skl_ddb_allocation_included(cur_ddb, new_ddb, pipe))
+ continue;
+
+ skl_wm_flush_pipe(dev_priv, pipe, 1);
+ intel_wait_for_vblank(dev, pipe);
+
+ reallocated[pipe] = true;
+ }
+
+
+ /*
+ * Second pass: flush the pipes that are having their allocation
+ * reduced, but overlapping with a previous allocation.
+ *
+ * Here as well we need to wait for the vblank to make sure the freed
+ * space is not used anymore.
+ */
+ for_each_intel_crtc(dev, crtc) {
+ if (!crtc->active)
+ continue;
+
+ pipe = crtc->pipe;
+
+ if (reallocated[pipe])
+ continue;
+
+ if (skl_ddb_entry_size(&new_ddb->pipe[pipe]) <
+ skl_ddb_entry_size(&cur_ddb->pipe[pipe])) {
+ skl_wm_flush_pipe(dev_priv, pipe, 2);
+ intel_wait_for_vblank(dev, pipe);
+ }
+
+ reallocated[pipe] = true;
+ }
+
+ /*
+ * Third pass: flush the pipes that got more space allocated.
+ *
+ * We don't need to actively wait for the update here, next vblank
+ * will just get more DDB space with the correct WM values.
+ */
+ for_each_intel_crtc(dev, crtc) {
+ if (!crtc->active)
+ continue;
+
+ pipe = crtc->pipe;
+
+ /*
+ * At this point, only the pipes more space than before are
+ * left to re-allocate.
+ */
+ if (reallocated[pipe])
+ continue;
+
+ skl_wm_flush_pipe(dev_priv, pipe, 3);
+ }
+}
+
+static bool skl_update_pipe_wm(struct drm_crtc *crtc,
+ struct skl_pipe_wm_parameters *params,
+ struct intel_wm_config *config,
+ struct skl_ddb_allocation *ddb, /* out */
+ struct skl_pipe_wm *pipe_wm /* out */)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ skl_compute_wm_pipe_parameters(crtc, params);
+ skl_allocate_pipe_ddb(crtc, config, params, ddb);
+ skl_compute_pipe_wm(crtc, ddb, params, pipe_wm);
+
+ if (!memcmp(&intel_crtc->wm.skl_active, pipe_wm, sizeof(*pipe_wm)))
+ return false;
+
+ intel_crtc->wm.skl_active = *pipe_wm;
+ return true;
+}
+
+static void skl_update_other_pipe_wm(struct drm_device *dev,
+ struct drm_crtc *crtc,
+ struct intel_wm_config *config,
+ struct skl_wm_values *r)
+{
+ struct intel_crtc *intel_crtc;
+ struct intel_crtc *this_crtc = to_intel_crtc(crtc);
+
+ /*
+ * If the WM update hasn't changed the allocation for this_crtc (the
+ * crtc we are currently computing the new WM values for), other
+ * enabled crtcs will keep the same allocation and we don't need to
+ * recompute anything for them.
+ */
+ if (!skl_ddb_allocation_changed(&r->ddb, this_crtc))
+ return;
+
+ /*
+ * Otherwise, because of this_crtc being freshly enabled/disabled, the
+ * other active pipes need new DDB allocation and WM values.
+ */
+ list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
+ base.head) {
+ struct skl_pipe_wm_parameters params = {};
+ struct skl_pipe_wm pipe_wm = {};
+ bool wm_changed;
+
+ if (this_crtc->pipe == intel_crtc->pipe)
+ continue;
+
+ if (!intel_crtc->active)
+ continue;
+
+ wm_changed = skl_update_pipe_wm(&intel_crtc->base,
+ &params, config,
+ &r->ddb, &pipe_wm);
+
+ /*
+ * If we end up re-computing the other pipe WM values, it's
+ * because it was really needed, so we expect the WM values to
+ * be different.
+ */
+ WARN_ON(!wm_changed);
+
+ skl_compute_wm_results(dev, &params, &pipe_wm, r, intel_crtc);
+ r->dirty[intel_crtc->pipe] = true;
+ }
+}
+
+static void skl_update_wm(struct drm_crtc *crtc)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct skl_pipe_wm_parameters params = {};
+ struct skl_wm_values *results = &dev_priv->wm.skl_results;
+ struct skl_pipe_wm pipe_wm = {};
+ struct intel_wm_config config = {};
+
+ memset(results, 0, sizeof(*results));
+
+ skl_compute_wm_global_parameters(dev, &config);
+
+ if (!skl_update_pipe_wm(crtc, &params, &config,
+ &results->ddb, &pipe_wm))
+ return;
+
+ skl_compute_wm_results(dev, &params, &pipe_wm, results, intel_crtc);
+ results->dirty[intel_crtc->pipe] = true;
+
+ skl_update_other_pipe_wm(dev, crtc, &config, results);
+ skl_write_wm_values(dev_priv, results);
+ skl_flush_wm_values(dev_priv, results);
+
+ /* store the new configuration */
+ dev_priv->wm.skl_hw = *results;
+}
+
+static void
+skl_update_sprite_wm(struct drm_plane *plane, struct drm_crtc *crtc,
+ uint32_t sprite_width, uint32_t sprite_height,
+ int pixel_size, bool enabled, bool scaled)
+{
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+
+ intel_plane->wm.enabled = enabled;
+ intel_plane->wm.scaled = scaled;
+ intel_plane->wm.horiz_pixels = sprite_width;
+ intel_plane->wm.vert_pixels = sprite_height;
+ intel_plane->wm.bytes_per_pixel = pixel_size;
+
+ skl_update_wm(crtc);
+}
+
static void ilk_update_wm(struct drm_crtc *crtc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -2934,6 +3836,113 @@ ilk_update_sprite_wm(struct drm_plane *plane,
ilk_update_wm(crtc);
}
+static void skl_pipe_wm_active_state(uint32_t val,
+ struct skl_pipe_wm *active,
+ bool is_transwm,
+ bool is_cursor,
+ int i,
+ int level)
+{
+ bool is_enabled = (val & PLANE_WM_EN) != 0;
+
+ if (!is_transwm) {
+ if (!is_cursor) {
+ active->wm[level].plane_en[i] = is_enabled;
+ active->wm[level].plane_res_b[i] =
+ val & PLANE_WM_BLOCKS_MASK;
+ active->wm[level].plane_res_l[i] =
+ (val >> PLANE_WM_LINES_SHIFT) &
+ PLANE_WM_LINES_MASK;
+ } else {
+ active->wm[level].cursor_en = is_enabled;
+ active->wm[level].cursor_res_b =
+ val & PLANE_WM_BLOCKS_MASK;
+ active->wm[level].cursor_res_l =
+ (val >> PLANE_WM_LINES_SHIFT) &
+ PLANE_WM_LINES_MASK;
+ }
+ } else {
+ if (!is_cursor) {
+ active->trans_wm.plane_en[i] = is_enabled;
+ active->trans_wm.plane_res_b[i] =
+ val & PLANE_WM_BLOCKS_MASK;
+ active->trans_wm.plane_res_l[i] =
+ (val >> PLANE_WM_LINES_SHIFT) &
+ PLANE_WM_LINES_MASK;
+ } else {
+ active->trans_wm.cursor_en = is_enabled;
+ active->trans_wm.cursor_res_b =
+ val & PLANE_WM_BLOCKS_MASK;
+ active->trans_wm.cursor_res_l =
+ (val >> PLANE_WM_LINES_SHIFT) &
+ PLANE_WM_LINES_MASK;
+ }
+ }
+}
+
+static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct skl_wm_values *hw = &dev_priv->wm.skl_hw;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct skl_pipe_wm *active = &intel_crtc->wm.skl_active;
+ enum pipe pipe = intel_crtc->pipe;
+ int level, i, max_level;
+ uint32_t temp;
+
+ max_level = ilk_wm_max_level(dev);
+
+ hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
+
+ for (level = 0; level <= max_level; level++) {
+ for (i = 0; i < intel_num_planes(intel_crtc); i++)
+ hw->plane[pipe][i][level] =
+ I915_READ(PLANE_WM(pipe, i, level));
+ hw->cursor[pipe][level] = I915_READ(CUR_WM(pipe, level));
+ }
+
+ for (i = 0; i < intel_num_planes(intel_crtc); i++)
+ hw->plane_trans[pipe][i] = I915_READ(PLANE_WM_TRANS(pipe, i));
+ hw->cursor_trans[pipe] = I915_READ(CUR_WM_TRANS(pipe));
+
+ if (!intel_crtc_active(crtc))
+ return;
+
+ hw->dirty[pipe] = true;
+
+ active->linetime = hw->wm_linetime[pipe];
+
+ for (level = 0; level <= max_level; level++) {
+ for (i = 0; i < intel_num_planes(intel_crtc); i++) {
+ temp = hw->plane[pipe][i][level];
+ skl_pipe_wm_active_state(temp, active, false,
+ false, i, level);
+ }
+ temp = hw->cursor[pipe][level];
+ skl_pipe_wm_active_state(temp, active, false, true, i, level);
+ }
+
+ for (i = 0; i < intel_num_planes(intel_crtc); i++) {
+ temp = hw->plane_trans[pipe][i];
+ skl_pipe_wm_active_state(temp, active, true, false, i, 0);
+ }
+
+ temp = hw->cursor_trans[pipe];
+ skl_pipe_wm_active_state(temp, active, true, true, i, 0);
+}
+
+void skl_wm_get_hw_state(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb;
+ struct drm_crtc *crtc;
+
+ skl_ddb_get_hw_state(dev_priv, ddb);
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+ skl_pipe_wm_get_hw_state(crtc);
+}
+
static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -3442,7 +4451,7 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
dev_priv->rps.min_freq_softlimit);
if (wait_for(((vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS))
- & GENFREQSTATUS) == 0, 5))
+ & GENFREQSTATUS) == 0, 100))
DRM_ERROR("timed out waiting for Punit\n");
vlv_force_gfx_clock(dev_priv, false);
@@ -3495,14 +4504,8 @@ void valleyview_set_rps(struct drm_device *dev, u8 val)
"Odd GPU freq value\n"))
val &= ~1;
- if (val != dev_priv->rps.cur_freq) {
- DRM_DEBUG_DRIVER("GPU freq request from %d MHz (%u) to %d MHz (%u)\n",
- vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
- dev_priv->rps.cur_freq,
- vlv_gpu_freq(dev_priv, val), val);
-
+ if (val != dev_priv->rps.cur_freq)
vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
- }
I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
@@ -3510,43 +4513,11 @@ void valleyview_set_rps(struct drm_device *dev, u8 val)
trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv, val));
}
-static void gen8_disable_rps_interrupts(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- I915_WRITE(GEN6_PMINTRMSK, ~GEN8_PMINTR_REDIRECT_TO_NON_DISP);
- I915_WRITE(GEN8_GT_IER(2), I915_READ(GEN8_GT_IER(2)) &
- ~dev_priv->pm_rps_events);
- /* Complete PM interrupt masking here doesn't race with the rps work
- * item again unmasking PM interrupts because that is using a different
- * register (GEN8_GT_IMR(2)) to mask PM interrupts. The only risk is in
- * leaving stale bits in GEN8_GT_IIR(2) and GEN8_GT_IMR(2) which
- * gen8_enable_rps will clean up. */
-
- spin_lock_irq(&dev_priv->irq_lock);
- dev_priv->rps.pm_iir = 0;
- spin_unlock_irq(&dev_priv->irq_lock);
-
- I915_WRITE(GEN8_GT_IIR(2), dev_priv->pm_rps_events);
-}
-
-static void gen6_disable_rps_interrupts(struct drm_device *dev)
+static void gen9_disable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
- I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) &
- ~dev_priv->pm_rps_events);
- /* Complete PM interrupt masking here doesn't race with the rps work
- * item again unmasking PM interrupts because that is using a different
- * register (PMIMR) to mask PM interrupts. The only risk is in leaving
- * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
-
- spin_lock_irq(&dev_priv->irq_lock);
- dev_priv->rps.pm_iir = 0;
- spin_unlock_irq(&dev_priv->irq_lock);
-
- I915_WRITE(GEN6_PMIIR, dev_priv->pm_rps_events);
+ I915_WRITE(GEN6_RC_CONTROL, 0);
}
static void gen6_disable_rps(struct drm_device *dev)
@@ -3555,11 +4526,6 @@ static void gen6_disable_rps(struct drm_device *dev)
I915_WRITE(GEN6_RC_CONTROL, 0);
I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
-
- if (IS_BROADWELL(dev))
- gen8_disable_rps_interrupts(dev);
- else
- gen6_disable_rps_interrupts(dev);
}
static void cherryview_disable_rps(struct drm_device *dev)
@@ -3567,8 +4533,6 @@ static void cherryview_disable_rps(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
I915_WRITE(GEN6_RC_CONTROL, 0);
-
- gen8_disable_rps_interrupts(dev);
}
static void valleyview_disable_rps(struct drm_device *dev)
@@ -3582,8 +4546,6 @@ static void valleyview_disable_rps(struct drm_device *dev)
I915_WRITE(GEN6_RC_CONTROL, 0);
gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
-
- gen6_disable_rps_interrupts(dev);
}
static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
@@ -3594,10 +4556,15 @@ static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
else
mode = 0;
}
- DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
- (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
- (mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
- (mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
+ if (HAS_RC6p(dev))
+ DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s RC6p %s RC6pp %s\n",
+ (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
+ (mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
+ (mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
+
+ else
+ DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s\n",
+ (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off");
}
static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
@@ -3614,7 +4581,7 @@ static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
if (enable_rc6 >= 0) {
int mask;
- if (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev))
+ if (HAS_RC6p(dev))
mask = INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE |
INTEL_RC6pp_ENABLE;
else
@@ -3642,54 +4609,92 @@ int intel_enable_rc6(const struct drm_device *dev)
return i915.enable_rc6;
}
-static void gen8_enable_rps_interrupts(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- spin_lock_irq(&dev_priv->irq_lock);
- WARN_ON(dev_priv->rps.pm_iir);
- gen8_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
- I915_WRITE(GEN8_GT_IIR(2), dev_priv->pm_rps_events);
- spin_unlock_irq(&dev_priv->irq_lock);
-}
-
-static void gen6_enable_rps_interrupts(struct drm_device *dev)
+static void gen6_init_rps_frequencies(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t rp_state_cap;
+ u32 ddcc_status = 0;
+ int ret;
- spin_lock_irq(&dev_priv->irq_lock);
- WARN_ON(dev_priv->rps.pm_iir);
- gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
- I915_WRITE(GEN6_PMIIR, dev_priv->pm_rps_events);
- spin_unlock_irq(&dev_priv->irq_lock);
-}
-
-static void parse_rp_state_cap(struct drm_i915_private *dev_priv, u32 rp_state_cap)
-{
+ rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
/* All of these values are in units of 50MHz */
dev_priv->rps.cur_freq = 0;
- /* static values from HW: RP0 < RPe < RP1 < RPn (min_freq) */
- dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
+ /* static values from HW: RP0 > RP1 > RPn (min_freq) */
dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff;
+ dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff;
- /* XXX: only BYT has a special efficient freq */
- dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
/* hw_max = RP0 until we check for overclocking */
dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
+ dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+ ret = sandybridge_pcode_read(dev_priv,
+ HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
+ &ddcc_status);
+ if (0 == ret)
+ dev_priv->rps.efficient_freq =
+ (ddcc_status >> 8) & 0xff;
+ }
+
/* Preserve min/max settings in case of re-init */
if (dev_priv->rps.max_freq_softlimit == 0)
dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
- if (dev_priv->rps.min_freq_softlimit == 0)
- dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
+ if (dev_priv->rps.min_freq_softlimit == 0) {
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ dev_priv->rps.min_freq_softlimit =
+ /* max(RPe, 450 MHz) */
+ max(dev_priv->rps.efficient_freq, (u8) 9);
+ else
+ dev_priv->rps.min_freq_softlimit =
+ dev_priv->rps.min_freq;
+ }
+}
+
+static void gen9_enable_rps(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_engine_cs *ring;
+ uint32_t rc6_mask = 0;
+ int unused;
+
+ /* 1a: Software RC state - RC0 */
+ I915_WRITE(GEN6_RC_STATE, 0);
+
+ /* 1b: Get forcewake during program sequence. Although the driver
+ * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
+ gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
+
+ /* 2a: Disable RC states. */
+ I915_WRITE(GEN6_RC_CONTROL, 0);
+
+ /* 2b: Program RC6 thresholds.*/
+ I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
+ I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
+ I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
+ for_each_ring(ring, dev_priv, unused)
+ I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
+ I915_WRITE(GEN6_RC_SLEEP, 0);
+ I915_WRITE(GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */
+
+ /* 3a: Enable RC6 */
+ if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
+ rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
+ DRM_INFO("RC6 %s\n", (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
+ "on" : "off");
+ I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
+ GEN6_RC_CTL_EI_MODE(1) |
+ rc6_mask);
+
+ gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
+
}
static void gen8_enable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring;
- uint32_t rc6_mask = 0, rp_state_cap;
+ uint32_t rc6_mask = 0;
int unused;
/* 1a: Software RC state - RC0 */
@@ -3702,8 +4707,8 @@ static void gen8_enable_rps(struct drm_device *dev)
/* 2a: Disable RC states. */
I915_WRITE(GEN6_RC_CONTROL, 0);
- rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
- parse_rp_state_cap(dev_priv, rp_state_cap);
+ /* Initialize rps frequencies */
+ gen6_init_rps_frequencies(dev);
/* 2b: Program RC6 thresholds.*/
I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
@@ -3761,9 +4766,8 @@ static void gen8_enable_rps(struct drm_device *dev)
/* 6: Ring frequency + overclocking (our driver does this later */
- gen6_set_rps(dev, (I915_READ(GEN6_GT_PERF_STATUS) & 0xff00) >> 8);
-
- gen8_enable_rps_interrupts(dev);
+ dev_priv->rps.power = HIGH_POWER; /* force a reset */
+ gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
}
@@ -3772,7 +4776,6 @@ static void gen6_enable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring;
- u32 rp_state_cap;
u32 rc6vids, pcu_mbox = 0, rc6_mask = 0;
u32 gtfifodbg;
int rc6_mode;
@@ -3796,9 +4799,8 @@ static void gen6_enable_rps(struct drm_device *dev)
gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
- rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
-
- parse_rp_state_cap(dev_priv, rp_state_cap);
+ /* Initialize rps frequencies */
+ gen6_init_rps_frequencies(dev);
/* disable the counters and set deterministic thresholds */
I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -3861,8 +4863,6 @@ static void gen6_enable_rps(struct drm_device *dev)
dev_priv->rps.power = HIGH_POWER; /* force a reset */
gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
- gen6_enable_rps_interrupts(dev);
-
rc6vids = 0;
ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
if (IS_GEN6(dev) && ret) {
@@ -3915,9 +4915,9 @@ static void __gen6_update_ring_freq(struct drm_device *dev)
* to use for memory access. We do this by specifying the IA frequency
* the PCU should use as a reference to determine the ring frequency.
*/
- for (gpu_freq = dev_priv->rps.max_freq_softlimit; gpu_freq >= dev_priv->rps.min_freq_softlimit;
+ for (gpu_freq = dev_priv->rps.max_freq; gpu_freq >= dev_priv->rps.min_freq;
gpu_freq--) {
- int diff = dev_priv->rps.max_freq_softlimit - gpu_freq;
+ int diff = dev_priv->rps.max_freq - gpu_freq;
unsigned int ia_freq = 0, ring_freq = 0;
if (INTEL_INFO(dev)->gen >= 8) {
@@ -4072,12 +5072,15 @@ static void cherryview_setup_pctx(struct drm_device *dev)
pcbr = I915_READ(VLV_PCBR);
if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
+ DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
paddr = (dev_priv->mm.stolen_base +
(gtt->stolen_size - pctx_size));
pctx_paddr = (paddr & (~4095));
I915_WRITE(VLV_PCBR, pctx_paddr);
}
+
+ DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
}
static void valleyview_setup_pctx(struct drm_device *dev)
@@ -4103,6 +5106,8 @@ static void valleyview_setup_pctx(struct drm_device *dev)
goto out;
}
+ DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
+
/*
* From the Gunit register HAS:
* The Gfx driver is expected to program this register and ensure
@@ -4121,6 +5126,7 @@ static void valleyview_setup_pctx(struct drm_device *dev)
I915_WRITE(VLV_PCBR, pctx_paddr);
out:
+ DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
dev_priv->vlv_pctx = pctx;
}
@@ -4157,7 +5163,7 @@ static void valleyview_init_gt_powersave(struct drm_device *dev)
dev_priv->mem_freq = 1333;
break;
}
- DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
+ DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv);
dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
@@ -4199,7 +5205,10 @@ static void cherryview_init_gt_powersave(struct drm_device *dev)
mutex_lock(&dev_priv->rps.hw_lock);
- val = vlv_punit_read(dev_priv, CCK_FUSE_REG);
+ mutex_lock(&dev_priv->dpio_lock);
+ val = vlv_cck_read(dev_priv, CCK_FUSE_REG);
+ mutex_unlock(&dev_priv->dpio_lock);
+
switch ((val >> 2) & 0x7) {
case 0:
case 1:
@@ -4223,7 +5232,7 @@ static void cherryview_init_gt_powersave(struct drm_device *dev)
dev_priv->mem_freq = 1600;
break;
}
- DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
+ DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
dev_priv->rps.max_freq = cherryview_rps_max_freq(dev_priv);
dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
@@ -4309,8 +5318,6 @@ static void cherryview_enable_rps(struct drm_device *dev)
/* For now we assume BIOS is allocating and populating the PCBR */
pcbr = I915_READ(VLV_PCBR);
- DRM_DEBUG_DRIVER("PCBR offset : 0x%x\n", pcbr);
-
/* 3: Enable RC6 */
if ((intel_enable_rc6(dev) & INTEL_RC6_ENABLE) &&
(pcbr >> VLV_PCBR_ADDR_SHIFT))
@@ -4340,7 +5347,10 @@ static void cherryview_enable_rps(struct drm_device *dev)
val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
- DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no");
+ /* RPS code assumes GPLL is used */
+ WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
+
+ DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & GPLLENABLE ? "yes" : "no");
DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
dev_priv->rps.cur_freq = (val >> 8) & 0xff;
@@ -4354,8 +5364,6 @@ static void cherryview_enable_rps(struct drm_device *dev)
valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
- gen8_enable_rps_interrupts(dev);
-
gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
}
@@ -4420,7 +5428,10 @@ static void valleyview_enable_rps(struct drm_device *dev)
val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
- DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no");
+ /* RPS code assumes GPLL is used */
+ WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
+
+ DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & GPLLENABLE ? "yes" : "no");
DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
dev_priv->rps.cur_freq = (val >> 8) & 0xff;
@@ -4434,8 +5445,6 @@ static void valleyview_enable_rps(struct drm_device *dev)
valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
- gen6_enable_rps_interrupts(dev);
-
gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
}
@@ -5194,12 +6203,17 @@ void intel_suspend_gt_powersave(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- /* Interrupts should be disabled already to avoid re-arming. */
- WARN_ON(intel_irqs_enabled(dev_priv));
+ if (INTEL_INFO(dev)->gen < 6)
+ return;
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
- cancel_work_sync(&dev_priv->rps.work);
+ /*
+ * TODO: disable RPS interrupts on GEN9+ too once RPS support
+ * is added for it.
+ */
+ if (INTEL_INFO(dev)->gen < 9)
+ gen6_disable_rps_interrupts(dev);
/* Force GPU to min freq during suspend */
gen6_rps_idle(dev_priv);
@@ -5209,9 +6223,6 @@ void intel_disable_gt_powersave(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- /* Interrupts should be disabled already to avoid re-arming. */
- WARN_ON(intel_irqs_enabled(dev_priv));
-
if (IS_IRONLAKE_M(dev)) {
ironlake_disable_drps(dev);
ironlake_disable_rc6(dev);
@@ -5219,12 +6230,15 @@ void intel_disable_gt_powersave(struct drm_device *dev)
intel_suspend_gt_powersave(dev);
mutex_lock(&dev_priv->rps.hw_lock);
- if (IS_CHERRYVIEW(dev))
+ if (INTEL_INFO(dev)->gen >= 9)
+ gen9_disable_rps(dev);
+ else if (IS_CHERRYVIEW(dev))
cherryview_disable_rps(dev);
else if (IS_VALLEYVIEW(dev))
valleyview_disable_rps(dev);
else
gen6_disable_rps(dev);
+
dev_priv->rps.enabled = false;
mutex_unlock(&dev_priv->rps.hw_lock);
}
@@ -5239,10 +6253,19 @@ static void intel_gen6_powersave_work(struct work_struct *work)
mutex_lock(&dev_priv->rps.hw_lock);
+ /*
+ * TODO: reset/enable RPS interrupts on GEN9+ too, once RPS support is
+ * added for it.
+ */
+ if (INTEL_INFO(dev)->gen < 9)
+ gen6_reset_rps_interrupts(dev);
+
if (IS_CHERRYVIEW(dev)) {
cherryview_enable_rps(dev);
} else if (IS_VALLEYVIEW(dev)) {
valleyview_enable_rps(dev);
+ } else if (INTEL_INFO(dev)->gen >= 9) {
+ gen9_enable_rps(dev);
} else if (IS_BROADWELL(dev)) {
gen8_enable_rps(dev);
__gen6_update_ring_freq(dev);
@@ -5251,6 +6274,10 @@ static void intel_gen6_powersave_work(struct work_struct *work)
__gen6_update_ring_freq(dev);
}
dev_priv->rps.enabled = true;
+
+ if (INTEL_INFO(dev)->gen < 9)
+ gen6_enable_rps_interrupts(dev);
+
mutex_unlock(&dev_priv->rps.hw_lock);
intel_runtime_pm_put(dev_priv);
@@ -5481,7 +6508,7 @@ static void gen6_init_clock_gating(struct drm_device *dev)
* to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
*/
I915_WRITE(GEN6_GT_MODE,
- GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
+ _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
ilk_init_lp_watermarks(dev);
@@ -5609,16 +6636,6 @@ static void broadwell_init_clock_gating(struct drm_device *dev)
I915_WRITE(WM2_LP_ILK, 0);
I915_WRITE(WM1_LP_ILK, 0);
- /* FIXME(BDW): Check all the w/a, some might only apply to
- * pre-production hw. */
-
-
- I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_BWGTLB_DISABLE));
-
- I915_WRITE(_3D_CHICKEN3,
- _MASKED_BIT_ENABLE(_3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(2)));
-
-
/* WaSwitchSolVfFArbitrationPriority:bdw */
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
@@ -5689,7 +6706,7 @@ static void haswell_init_clock_gating(struct drm_device *dev)
* to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
*/
I915_WRITE(GEN7_GT_MODE,
- GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
+ _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
/* WaSwitchSolVfFArbitrationPriority:hsw */
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
@@ -5786,7 +6803,7 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
* to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
*/
I915_WRITE(GEN7_GT_MODE,
- GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
+ _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
snpcr &= ~GEN6_MBC_SNPCR_MASK;
@@ -5899,18 +6916,6 @@ static void cherryview_init_clock_gating(struct drm_device *dev)
/* WaDisableSDEUnitClockGating:chv */
I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
-
- /* WaDisableGunitClockGating:chv (pre-production hw) */
- I915_WRITE(VLV_GUNIT_CLOCK_GATE, I915_READ(VLV_GUNIT_CLOCK_GATE) |
- GINT_DIS);
-
- /* WaDisableFfDopClockGating:chv (pre-production hw) */
- I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
- _MASKED_BIT_ENABLE(GEN8_FF_DOP_CLOCK_GATE_DISABLE));
-
- /* WaDisableDopClockGating:chv (pre-production hw) */
- I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
- GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE);
}
static void g4x_init_clock_gating(struct drm_device *dev)
@@ -6036,1161 +7041,35 @@ void intel_suspend_hw(struct drm_device *dev)
lpt_suspend_hw(dev);
}
-#define for_each_power_well(i, power_well, domain_mask, power_domains) \
- for (i = 0; \
- i < (power_domains)->power_well_count && \
- ((power_well) = &(power_domains)->power_wells[i]); \
- i++) \
- if ((power_well)->domains & (domain_mask))
-
-#define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \
- for (i = (power_domains)->power_well_count - 1; \
- i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\
- i--) \
- if ((power_well)->domains & (domain_mask))
-
-/**
- * We should only use the power well if we explicitly asked the hardware to
- * enable it, so check if it's enabled and also check if we've requested it to
- * be enabled.
- */
-static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- return I915_READ(HSW_PWR_WELL_DRIVER) ==
- (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
-}
-
-bool intel_display_power_enabled_unlocked(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain)
-{
- struct i915_power_domains *power_domains;
- struct i915_power_well *power_well;
- bool is_enabled;
- int i;
-
- if (dev_priv->pm.suspended)
- return false;
-
- power_domains = &dev_priv->power_domains;
-
- is_enabled = true;
-
- for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
- if (power_well->always_on)
- continue;
-
- if (!power_well->hw_enabled) {
- is_enabled = false;
- break;
- }
- }
-
- return is_enabled;
-}
-
-bool intel_display_power_enabled(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain)
-{
- struct i915_power_domains *power_domains;
- bool ret;
-
- power_domains = &dev_priv->power_domains;
-
- mutex_lock(&power_domains->lock);
- ret = intel_display_power_enabled_unlocked(dev_priv, domain);
- mutex_unlock(&power_domains->lock);
-
- return ret;
-}
-
-/*
- * Starting with Haswell, we have a "Power Down Well" that can be turned off
- * when not needed anymore. We have 4 registers that can request the power well
- * to be enabled, and it will only be disabled if none of the registers is
- * requesting it to be enabled.
- */
-static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
-{
- struct drm_device *dev = dev_priv->dev;
-
- /*
- * After we re-enable the power well, if we touch VGA register 0x3d5
- * we'll get unclaimed register interrupts. This stops after we write
- * anything to the VGA MSR register. The vgacon module uses this
- * register all the time, so if we unbind our driver and, as a
- * consequence, bind vgacon, we'll get stuck in an infinite loop at
- * console_unlock(). So make here we touch the VGA MSR register, making
- * sure vgacon can keep working normally without triggering interrupts
- * and error messages.
- */
- vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
- outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
- vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
-
- if (IS_BROADWELL(dev))
- gen8_irq_power_well_post_enable(dev_priv);
-}
-
-static void hsw_set_power_well(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well, bool enable)
-{
- bool is_enabled, enable_requested;
- uint32_t tmp;
-
- tmp = I915_READ(HSW_PWR_WELL_DRIVER);
- is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
- enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
-
- if (enable) {
- if (!enable_requested)
- I915_WRITE(HSW_PWR_WELL_DRIVER,
- HSW_PWR_WELL_ENABLE_REQUEST);
-
- if (!is_enabled) {
- DRM_DEBUG_KMS("Enabling power well\n");
- if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
- HSW_PWR_WELL_STATE_ENABLED), 20))
- DRM_ERROR("Timeout enabling power well\n");
- }
-
- hsw_power_well_post_enable(dev_priv);
- } else {
- if (enable_requested) {
- I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
- POSTING_READ(HSW_PWR_WELL_DRIVER);
- DRM_DEBUG_KMS("Requesting to disable the power well\n");
- }
- }
-}
-
-static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- hsw_set_power_well(dev_priv, power_well, power_well->count > 0);
-
- /*
- * We're taking over the BIOS, so clear any requests made by it since
- * the driver is in charge now.
- */
- if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
- I915_WRITE(HSW_PWR_WELL_BIOS, 0);
-}
-
-static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- hsw_set_power_well(dev_priv, power_well, true);
-}
-
-static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- hsw_set_power_well(dev_priv, power_well, false);
-}
-
-static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
+static void intel_init_fbc(struct drm_i915_private *dev_priv)
{
-}
-
-static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- return true;
-}
-
-static void vlv_set_power_well(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well, bool enable)
-{
- enum punit_power_well power_well_id = power_well->data;
- u32 mask;
- u32 state;
- u32 ctrl;
-
- mask = PUNIT_PWRGT_MASK(power_well_id);
- state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
- PUNIT_PWRGT_PWR_GATE(power_well_id);
-
- mutex_lock(&dev_priv->rps.hw_lock);
-
-#define COND \
- ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
-
- if (COND)
- goto out;
-
- ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
- ctrl &= ~mask;
- ctrl |= state;
- vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
-
- if (wait_for(COND, 100))
- DRM_ERROR("timout setting power well state %08x (%08x)\n",
- state,
- vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
-
-#undef COND
-
-out:
- mutex_unlock(&dev_priv->rps.hw_lock);
-}
-
-static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- vlv_set_power_well(dev_priv, power_well, power_well->count > 0);
-}
-
-static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- vlv_set_power_well(dev_priv, power_well, true);
-}
-
-static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- vlv_set_power_well(dev_priv, power_well, false);
-}
-
-static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- int power_well_id = power_well->data;
- bool enabled = false;
- u32 mask;
- u32 state;
- u32 ctrl;
-
- mask = PUNIT_PWRGT_MASK(power_well_id);
- ctrl = PUNIT_PWRGT_PWR_ON(power_well_id);
-
- mutex_lock(&dev_priv->rps.hw_lock);
-
- state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
- /*
- * We only ever set the power-on and power-gate states, anything
- * else is unexpected.
- */
- WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) &&
- state != PUNIT_PWRGT_PWR_GATE(power_well_id));
- if (state == ctrl)
- enabled = true;
-
- /*
- * A transient state at this point would mean some unexpected party
- * is poking at the power controls too.
- */
- ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
- WARN_ON(ctrl != state);
-
- mutex_unlock(&dev_priv->rps.hw_lock);
-
- return enabled;
-}
-
-static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
-
- vlv_set_power_well(dev_priv, power_well, true);
-
- spin_lock_irq(&dev_priv->irq_lock);
- valleyview_enable_display_irqs(dev_priv);
- spin_unlock_irq(&dev_priv->irq_lock);
-
- /*
- * During driver initialization/resume we can avoid restoring the
- * part of the HW/SW state that will be inited anyway explicitly.
- */
- if (dev_priv->power_domains.initializing)
+ if (!HAS_FBC(dev_priv)) {
+ dev_priv->fbc.enabled = false;
return;
-
- intel_hpd_init(dev_priv->dev);
-
- i915_redisable_vga_power_on(dev_priv->dev);
-}
-
-static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
-
- spin_lock_irq(&dev_priv->irq_lock);
- valleyview_disable_display_irqs(dev_priv);
- spin_unlock_irq(&dev_priv->irq_lock);
-
- vlv_set_power_well(dev_priv, power_well, false);
-
- vlv_power_sequencer_reset(dev_priv);
-}
-
-static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
-
- /*
- * Enable the CRI clock source so we can get at the
- * display and the reference clock for VGA
- * hotplug / manual detection.
- */
- I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
- DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
- udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
-
- vlv_set_power_well(dev_priv, power_well, true);
-
- /*
- * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
- * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
- * a. GUnit 0x2110 bit[0] set to 1 (def 0)
- * b. The other bits such as sfr settings / modesel may all
- * be set to 0.
- *
- * This should only be done on init and resume from S3 with
- * both PLLs disabled, or we risk losing DPIO and PLL
- * synchronization.
- */
- I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
-}
-
-static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- enum pipe pipe;
-
- WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
-
- for_each_pipe(dev_priv, pipe)
- assert_pll_disabled(dev_priv, pipe);
-
- /* Assert common reset */
- I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
-
- vlv_set_power_well(dev_priv, power_well, false);
-}
-
-static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- enum dpio_phy phy;
-
- WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
- power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
-
- /*
- * Enable the CRI clock source so we can get at the
- * display and the reference clock for VGA
- * hotplug / manual detection.
- */
- if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
- phy = DPIO_PHY0;
- I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
- DPLL_REFA_CLK_ENABLE_VLV);
- I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
- DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
- } else {
- phy = DPIO_PHY1;
- I915_WRITE(DPLL(PIPE_C), I915_READ(DPLL(PIPE_C)) |
- DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
}
- udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
- vlv_set_power_well(dev_priv, power_well, true);
- /* Poll for phypwrgood signal */
- if (wait_for(I915_READ(DISPLAY_PHY_STATUS) & PHY_POWERGOOD(phy), 1))
- DRM_ERROR("Display PHY %d is not power up\n", phy);
-
- I915_WRITE(DISPLAY_PHY_CONTROL, I915_READ(DISPLAY_PHY_CONTROL) |
- PHY_COM_LANE_RESET_DEASSERT(phy));
-}
-
-static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- enum dpio_phy phy;
-
- WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
- power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
-
- if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
- phy = DPIO_PHY0;
- assert_pll_disabled(dev_priv, PIPE_A);
- assert_pll_disabled(dev_priv, PIPE_B);
+ if (INTEL_INFO(dev_priv)->gen >= 7) {
+ dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
+ dev_priv->display.enable_fbc = gen7_enable_fbc;
+ dev_priv->display.disable_fbc = ironlake_disable_fbc;
+ } else if (INTEL_INFO(dev_priv)->gen >= 5) {
+ dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
+ dev_priv->display.enable_fbc = ironlake_enable_fbc;
+ dev_priv->display.disable_fbc = ironlake_disable_fbc;
+ } else if (IS_GM45(dev_priv)) {
+ dev_priv->display.fbc_enabled = g4x_fbc_enabled;
+ dev_priv->display.enable_fbc = g4x_enable_fbc;
+ dev_priv->display.disable_fbc = g4x_disable_fbc;
} else {
- phy = DPIO_PHY1;
- assert_pll_disabled(dev_priv, PIPE_C);
- }
+ dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
+ dev_priv->display.enable_fbc = i8xx_enable_fbc;
+ dev_priv->display.disable_fbc = i8xx_disable_fbc;
- I915_WRITE(DISPLAY_PHY_CONTROL, I915_READ(DISPLAY_PHY_CONTROL) &
- ~PHY_COM_LANE_RESET_DEASSERT(phy));
-
- vlv_set_power_well(dev_priv, power_well, false);
-}
-
-static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- enum pipe pipe = power_well->data;
- bool enabled;
- u32 state, ctrl;
-
- mutex_lock(&dev_priv->rps.hw_lock);
-
- state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe);
- /*
- * We only ever set the power-on and power-gate states, anything
- * else is unexpected.
- */
- WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
- enabled = state == DP_SSS_PWR_ON(pipe);
-
- /*
- * A transient state at this point would mean some unexpected party
- * is poking at the power controls too.
- */
- ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe);
- WARN_ON(ctrl << 16 != state);
-
- mutex_unlock(&dev_priv->rps.hw_lock);
-
- return enabled;
-}
-
-static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well,
- bool enable)
-{
- enum pipe pipe = power_well->data;
- u32 state;
- u32 ctrl;
-
- state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
-
- mutex_lock(&dev_priv->rps.hw_lock);
-
-#define COND \
- ((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state)
-
- if (COND)
- goto out;
-
- ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
- ctrl &= ~DP_SSC_MASK(pipe);
- ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
- vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl);
-
- if (wait_for(COND, 100))
- DRM_ERROR("timout setting power well state %08x (%08x)\n",
- state,
- vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ));
-
-#undef COND
-
-out:
- mutex_unlock(&dev_priv->rps.hw_lock);
-}
-
-static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- chv_set_pipe_power_well(dev_priv, power_well, power_well->count > 0);
-}
-
-static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- WARN_ON_ONCE(power_well->data != PIPE_A &&
- power_well->data != PIPE_B &&
- power_well->data != PIPE_C);
-
- chv_set_pipe_power_well(dev_priv, power_well, true);
-}
-
-static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- WARN_ON_ONCE(power_well->data != PIPE_A &&
- power_well->data != PIPE_B &&
- power_well->data != PIPE_C);
-
- chv_set_pipe_power_well(dev_priv, power_well, false);
-}
-
-static void check_power_well_state(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- bool enabled = power_well->ops->is_enabled(dev_priv, power_well);
-
- if (power_well->always_on || !i915.disable_power_well) {
- if (!enabled)
- goto mismatch;
-
- return;
- }
-
- if (enabled != (power_well->count > 0))
- goto mismatch;
-
- return;
-
-mismatch:
- WARN(1, "state mismatch for '%s' (always_on %d hw state %d use-count %d disable_power_well %d\n",
- power_well->name, power_well->always_on, enabled,
- power_well->count, i915.disable_power_well);
-}
-
-void intel_display_power_get(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain)
-{
- struct i915_power_domains *power_domains;
- struct i915_power_well *power_well;
- int i;
-
- intel_runtime_pm_get(dev_priv);
-
- power_domains = &dev_priv->power_domains;
-
- mutex_lock(&power_domains->lock);
-
- for_each_power_well(i, power_well, BIT(domain), power_domains) {
- if (!power_well->count++) {
- DRM_DEBUG_KMS("enabling %s\n", power_well->name);
- power_well->ops->enable(dev_priv, power_well);
- power_well->hw_enabled = true;
- }
-
- check_power_well_state(dev_priv, power_well);
+ /* This value was pulled out of someone's hat */
+ I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
}
- power_domains->domain_use_count[domain]++;
-
- mutex_unlock(&power_domains->lock);
-}
-
-void intel_display_power_put(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain)
-{
- struct i915_power_domains *power_domains;
- struct i915_power_well *power_well;
- int i;
-
- power_domains = &dev_priv->power_domains;
-
- mutex_lock(&power_domains->lock);
-
- WARN_ON(!power_domains->domain_use_count[domain]);
- power_domains->domain_use_count[domain]--;
-
- for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
- WARN_ON(!power_well->count);
-
- if (!--power_well->count && i915.disable_power_well) {
- DRM_DEBUG_KMS("disabling %s\n", power_well->name);
- power_well->hw_enabled = false;
- power_well->ops->disable(dev_priv, power_well);
- }
-
- check_power_well_state(dev_priv, power_well);
- }
-
- mutex_unlock(&power_domains->lock);
-
- intel_runtime_pm_put(dev_priv);
-}
-
-static struct i915_power_domains *hsw_pwr;
-
-/* Display audio driver power well request */
-int i915_request_power_well(void)
-{
- struct drm_i915_private *dev_priv;
-
- if (!hsw_pwr)
- return -ENODEV;
-
- dev_priv = container_of(hsw_pwr, struct drm_i915_private,
- power_domains);
- intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
- return 0;
-}
-EXPORT_SYMBOL_GPL(i915_request_power_well);
-
-/* Display audio driver power well release */
-int i915_release_power_well(void)
-{
- struct drm_i915_private *dev_priv;
-
- if (!hsw_pwr)
- return -ENODEV;
-
- dev_priv = container_of(hsw_pwr, struct drm_i915_private,
- power_domains);
- intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
- return 0;
-}
-EXPORT_SYMBOL_GPL(i915_release_power_well);
-
-/*
- * Private interface for the audio driver to get CDCLK in kHz.
- *
- * Caller must request power well using i915_request_power_well() prior to
- * making the call.
- */
-int i915_get_cdclk_freq(void)
-{
- struct drm_i915_private *dev_priv;
-
- if (!hsw_pwr)
- return -ENODEV;
-
- dev_priv = container_of(hsw_pwr, struct drm_i915_private,
- power_domains);
-
- return intel_ddi_get_cdclk_freq(dev_priv);
-}
-EXPORT_SYMBOL_GPL(i915_get_cdclk_freq);
-
-
-#define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
-
-#define HSW_ALWAYS_ON_POWER_DOMAINS ( \
- BIT(POWER_DOMAIN_PIPE_A) | \
- BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
- BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \
- BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \
- BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
- BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
- BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
- BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
- BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
- BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
- BIT(POWER_DOMAIN_PORT_CRT) | \
- BIT(POWER_DOMAIN_PLLS) | \
- BIT(POWER_DOMAIN_INIT))
-#define HSW_DISPLAY_POWER_DOMAINS ( \
- (POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) | \
- BIT(POWER_DOMAIN_INIT))
-
-#define BDW_ALWAYS_ON_POWER_DOMAINS ( \
- HSW_ALWAYS_ON_POWER_DOMAINS | \
- BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER))
-#define BDW_DISPLAY_POWER_DOMAINS ( \
- (POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS) | \
- BIT(POWER_DOMAIN_INIT))
-
-#define VLV_ALWAYS_ON_POWER_DOMAINS BIT(POWER_DOMAIN_INIT)
-#define VLV_DISPLAY_POWER_DOMAINS POWER_DOMAIN_MASK
-
-#define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \
- BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
- BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
- BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
- BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
- BIT(POWER_DOMAIN_PORT_CRT) | \
- BIT(POWER_DOMAIN_INIT))
-
-#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \
- BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
- BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
- BIT(POWER_DOMAIN_INIT))
-
-#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \
- BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
- BIT(POWER_DOMAIN_INIT))
-
-#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \
- BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
- BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
- BIT(POWER_DOMAIN_INIT))
-
-#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \
- BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
- BIT(POWER_DOMAIN_INIT))
-
-#define CHV_PIPE_A_POWER_DOMAINS ( \
- BIT(POWER_DOMAIN_PIPE_A) | \
- BIT(POWER_DOMAIN_INIT))
-
-#define CHV_PIPE_B_POWER_DOMAINS ( \
- BIT(POWER_DOMAIN_PIPE_B) | \
- BIT(POWER_DOMAIN_INIT))
-
-#define CHV_PIPE_C_POWER_DOMAINS ( \
- BIT(POWER_DOMAIN_PIPE_C) | \
- BIT(POWER_DOMAIN_INIT))
-
-#define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \
- BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
- BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
- BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
- BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
- BIT(POWER_DOMAIN_INIT))
-
-#define CHV_DPIO_CMN_D_POWER_DOMAINS ( \
- BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
- BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
- BIT(POWER_DOMAIN_INIT))
-
-#define CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS ( \
- BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
- BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
- BIT(POWER_DOMAIN_INIT))
-
-#define CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS ( \
- BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
- BIT(POWER_DOMAIN_INIT))
-
-static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
- .sync_hw = i9xx_always_on_power_well_noop,
- .enable = i9xx_always_on_power_well_noop,
- .disable = i9xx_always_on_power_well_noop,
- .is_enabled = i9xx_always_on_power_well_enabled,
-};
-
-static const struct i915_power_well_ops chv_pipe_power_well_ops = {
- .sync_hw = chv_pipe_power_well_sync_hw,
- .enable = chv_pipe_power_well_enable,
- .disable = chv_pipe_power_well_disable,
- .is_enabled = chv_pipe_power_well_enabled,
-};
-
-static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
- .sync_hw = vlv_power_well_sync_hw,
- .enable = chv_dpio_cmn_power_well_enable,
- .disable = chv_dpio_cmn_power_well_disable,
- .is_enabled = vlv_power_well_enabled,
-};
-
-static struct i915_power_well i9xx_always_on_power_well[] = {
- {
- .name = "always-on",
- .always_on = 1,
- .domains = POWER_DOMAIN_MASK,
- .ops = &i9xx_always_on_power_well_ops,
- },
-};
-
-static const struct i915_power_well_ops hsw_power_well_ops = {
- .sync_hw = hsw_power_well_sync_hw,
- .enable = hsw_power_well_enable,
- .disable = hsw_power_well_disable,
- .is_enabled = hsw_power_well_enabled,
-};
-
-static struct i915_power_well hsw_power_wells[] = {
- {
- .name = "always-on",
- .always_on = 1,
- .domains = HSW_ALWAYS_ON_POWER_DOMAINS,
- .ops = &i9xx_always_on_power_well_ops,
- },
- {
- .name = "display",
- .domains = HSW_DISPLAY_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- },
-};
-
-static struct i915_power_well bdw_power_wells[] = {
- {
- .name = "always-on",
- .always_on = 1,
- .domains = BDW_ALWAYS_ON_POWER_DOMAINS,
- .ops = &i9xx_always_on_power_well_ops,
- },
- {
- .name = "display",
- .domains = BDW_DISPLAY_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- },
-};
-
-static const struct i915_power_well_ops vlv_display_power_well_ops = {
- .sync_hw = vlv_power_well_sync_hw,
- .enable = vlv_display_power_well_enable,
- .disable = vlv_display_power_well_disable,
- .is_enabled = vlv_power_well_enabled,
-};
-
-static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
- .sync_hw = vlv_power_well_sync_hw,
- .enable = vlv_dpio_cmn_power_well_enable,
- .disable = vlv_dpio_cmn_power_well_disable,
- .is_enabled = vlv_power_well_enabled,
-};
-
-static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
- .sync_hw = vlv_power_well_sync_hw,
- .enable = vlv_power_well_enable,
- .disable = vlv_power_well_disable,
- .is_enabled = vlv_power_well_enabled,
-};
-
-static struct i915_power_well vlv_power_wells[] = {
- {
- .name = "always-on",
- .always_on = 1,
- .domains = VLV_ALWAYS_ON_POWER_DOMAINS,
- .ops = &i9xx_always_on_power_well_ops,
- },
- {
- .name = "display",
- .domains = VLV_DISPLAY_POWER_DOMAINS,
- .data = PUNIT_POWER_WELL_DISP2D,
- .ops = &vlv_display_power_well_ops,
- },
- {
- .name = "dpio-tx-b-01",
- .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
- VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
- VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
- VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
- .ops = &vlv_dpio_power_well_ops,
- .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
- },
- {
- .name = "dpio-tx-b-23",
- .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
- VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
- VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
- VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
- .ops = &vlv_dpio_power_well_ops,
- .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
- },
- {
- .name = "dpio-tx-c-01",
- .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
- VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
- VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
- VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
- .ops = &vlv_dpio_power_well_ops,
- .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
- },
- {
- .name = "dpio-tx-c-23",
- .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
- VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
- VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
- VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
- .ops = &vlv_dpio_power_well_ops,
- .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
- },
- {
- .name = "dpio-common",
- .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
- .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
- .ops = &vlv_dpio_cmn_power_well_ops,
- },
-};
-
-static struct i915_power_well chv_power_wells[] = {
- {
- .name = "always-on",
- .always_on = 1,
- .domains = VLV_ALWAYS_ON_POWER_DOMAINS,
- .ops = &i9xx_always_on_power_well_ops,
- },
-#if 0
- {
- .name = "display",
- .domains = VLV_DISPLAY_POWER_DOMAINS,
- .data = PUNIT_POWER_WELL_DISP2D,
- .ops = &vlv_display_power_well_ops,
- },
- {
- .name = "pipe-a",
- .domains = CHV_PIPE_A_POWER_DOMAINS,
- .data = PIPE_A,
- .ops = &chv_pipe_power_well_ops,
- },
- {
- .name = "pipe-b",
- .domains = CHV_PIPE_B_POWER_DOMAINS,
- .data = PIPE_B,
- .ops = &chv_pipe_power_well_ops,
- },
- {
- .name = "pipe-c",
- .domains = CHV_PIPE_C_POWER_DOMAINS,
- .data = PIPE_C,
- .ops = &chv_pipe_power_well_ops,
- },
-#endif
- {
- .name = "dpio-common-bc",
- /*
- * XXX: cmnreset for one PHY seems to disturb the other.
- * As a workaround keep both powered on at the same
- * time for now.
- */
- .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS | CHV_DPIO_CMN_D_POWER_DOMAINS,
- .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
- .ops = &chv_dpio_cmn_power_well_ops,
- },
- {
- .name = "dpio-common-d",
- /*
- * XXX: cmnreset for one PHY seems to disturb the other.
- * As a workaround keep both powered on at the same
- * time for now.
- */
- .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS | CHV_DPIO_CMN_D_POWER_DOMAINS,
- .data = PUNIT_POWER_WELL_DPIO_CMN_D,
- .ops = &chv_dpio_cmn_power_well_ops,
- },
-#if 0
- {
- .name = "dpio-tx-b-01",
- .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
- VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS,
- .ops = &vlv_dpio_power_well_ops,
- .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
- },
- {
- .name = "dpio-tx-b-23",
- .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
- VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS,
- .ops = &vlv_dpio_power_well_ops,
- .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
- },
- {
- .name = "dpio-tx-c-01",
- .domains = VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
- VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
- .ops = &vlv_dpio_power_well_ops,
- .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
- },
- {
- .name = "dpio-tx-c-23",
- .domains = VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
- VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
- .ops = &vlv_dpio_power_well_ops,
- .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
- },
- {
- .name = "dpio-tx-d-01",
- .domains = CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS |
- CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS,
- .ops = &vlv_dpio_power_well_ops,
- .data = PUNIT_POWER_WELL_DPIO_TX_D_LANES_01,
- },
- {
- .name = "dpio-tx-d-23",
- .domains = CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS |
- CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS,
- .ops = &vlv_dpio_power_well_ops,
- .data = PUNIT_POWER_WELL_DPIO_TX_D_LANES_23,
- },
-#endif
-};
-
-static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
- enum punit_power_well power_well_id)
-{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
- struct i915_power_well *power_well;
- int i;
-
- for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
- if (power_well->data == power_well_id)
- return power_well;
- }
-
- return NULL;
-}
-
-#define set_power_wells(power_domains, __power_wells) ({ \
- (power_domains)->power_wells = (__power_wells); \
- (power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \
-})
-
-int intel_power_domains_init(struct drm_i915_private *dev_priv)
-{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
-
- mutex_init(&power_domains->lock);
-
- /*
- * The enabling order will be from lower to higher indexed wells,
- * the disabling order is reversed.
- */
- if (IS_HASWELL(dev_priv->dev)) {
- set_power_wells(power_domains, hsw_power_wells);
- hsw_pwr = power_domains;
- } else if (IS_BROADWELL(dev_priv->dev)) {
- set_power_wells(power_domains, bdw_power_wells);
- hsw_pwr = power_domains;
- } else if (IS_CHERRYVIEW(dev_priv->dev)) {
- set_power_wells(power_domains, chv_power_wells);
- } else if (IS_VALLEYVIEW(dev_priv->dev)) {
- set_power_wells(power_domains, vlv_power_wells);
- } else {
- set_power_wells(power_domains, i9xx_always_on_power_well);
- }
-
- return 0;
-}
-
-void intel_power_domains_remove(struct drm_i915_private *dev_priv)
-{
- hsw_pwr = NULL;
-}
-
-static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
-{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
- struct i915_power_well *power_well;
- int i;
-
- mutex_lock(&power_domains->lock);
- for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
- power_well->ops->sync_hw(dev_priv, power_well);
- power_well->hw_enabled = power_well->ops->is_enabled(dev_priv,
- power_well);
- }
- mutex_unlock(&power_domains->lock);
-}
-
-static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
-{
- struct i915_power_well *cmn =
- lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
- struct i915_power_well *disp2d =
- lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D);
-
- /* nothing to do if common lane is already off */
- if (!cmn->ops->is_enabled(dev_priv, cmn))
- return;
-
- /* If the display might be already active skip this */
- if (disp2d->ops->is_enabled(dev_priv, disp2d) &&
- I915_READ(DPIO_CTL) & DPIO_CMNRST)
- return;
-
- DRM_DEBUG_KMS("toggling display PHY side reset\n");
-
- /* cmnlane needs DPLL registers */
- disp2d->ops->enable(dev_priv, disp2d);
-
- /*
- * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
- * Need to assert and de-assert PHY SB reset by gating the
- * common lane power, then un-gating it.
- * Simply ungating isn't enough to reset the PHY enough to get
- * ports and lanes running.
- */
- cmn->ops->disable(dev_priv, cmn);
-}
-
-void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
-{
- struct drm_device *dev = dev_priv->dev;
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
-
- power_domains->initializing = true;
-
- if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
- mutex_lock(&power_domains->lock);
- vlv_cmnlane_wa(dev_priv);
- mutex_unlock(&power_domains->lock);
- }
-
- /* For now, we need the power well to be always enabled. */
- intel_display_set_init_power(dev_priv, true);
- intel_power_domains_resume(dev_priv);
- power_domains->initializing = false;
-}
-
-void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv)
-{
- intel_runtime_pm_get(dev_priv);
-}
-
-void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv)
-{
- intel_runtime_pm_put(dev_priv);
-}
-
-void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
-{
- struct drm_device *dev = dev_priv->dev;
- struct device *device = &dev->pdev->dev;
-
- if (!HAS_RUNTIME_PM(dev))
- return;
-
- pm_runtime_get_sync(device);
- WARN(dev_priv->pm.suspended, "Device still suspended.\n");
-}
-
-void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
-{
- struct drm_device *dev = dev_priv->dev;
- struct device *device = &dev->pdev->dev;
-
- if (!HAS_RUNTIME_PM(dev))
- return;
-
- WARN(dev_priv->pm.suspended, "Getting nosync-ref while suspended.\n");
- pm_runtime_get_noresume(device);
-}
-
-void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
-{
- struct drm_device *dev = dev_priv->dev;
- struct device *device = &dev->pdev->dev;
-
- if (!HAS_RUNTIME_PM(dev))
- return;
-
- pm_runtime_mark_last_busy(device);
- pm_runtime_put_autosuspend(device);
-}
-
-void intel_init_runtime_pm(struct drm_i915_private *dev_priv)
-{
- struct drm_device *dev = dev_priv->dev;
- struct device *device = &dev->pdev->dev;
-
- if (!HAS_RUNTIME_PM(dev))
- return;
-
- pm_runtime_set_active(device);
-
- /*
- * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
- * requirement.
- */
- if (!intel_enable_rc6(dev)) {
- DRM_INFO("RC6 disabled, disabling runtime PM support\n");
- return;
- }
-
- pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */
- pm_runtime_mark_last_busy(device);
- pm_runtime_use_autosuspend(device);
-
- pm_runtime_put_autosuspend(device);
-}
-
-void intel_fini_runtime_pm(struct drm_i915_private *dev_priv)
-{
- struct drm_device *dev = dev_priv->dev;
- struct device *device = &dev->pdev->dev;
-
- if (!HAS_RUNTIME_PM(dev))
- return;
-
- if (!intel_enable_rc6(dev))
- return;
-
- /* Make sure we're not suspended first. */
- pm_runtime_get_sync(device);
- pm_runtime_disable(device);
+ dev_priv->fbc.enabled = dev_priv->display.fbc_enabled(dev_priv->dev);
}
/* Set up chip specific power management-related functions */
@@ -7198,28 +7077,7 @@ void intel_init_pm(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (HAS_FBC(dev)) {
- if (INTEL_INFO(dev)->gen >= 7) {
- dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
- dev_priv->display.enable_fbc = gen7_enable_fbc;
- dev_priv->display.disable_fbc = ironlake_disable_fbc;
- } else if (INTEL_INFO(dev)->gen >= 5) {
- dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
- dev_priv->display.enable_fbc = ironlake_enable_fbc;
- dev_priv->display.disable_fbc = ironlake_disable_fbc;
- } else if (IS_GM45(dev)) {
- dev_priv->display.fbc_enabled = g4x_fbc_enabled;
- dev_priv->display.enable_fbc = g4x_enable_fbc;
- dev_priv->display.disable_fbc = g4x_disable_fbc;
- } else {
- dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
- dev_priv->display.enable_fbc = i8xx_enable_fbc;
- dev_priv->display.disable_fbc = i8xx_disable_fbc;
-
- /* This value was pulled out of someone's hat */
- I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
- }
- }
+ intel_init_fbc(dev_priv);
/* For cxsr */
if (IS_PINEVIEW(dev))
@@ -7228,7 +7086,13 @@ void intel_init_pm(struct drm_device *dev)
i915_ironlake_get_mem_freq(dev);
/* For FIFO watermark updates */
- if (HAS_PCH_SPLIT(dev)) {
+ if (INTEL_INFO(dev)->gen >= 9) {
+ skl_setup_wm_latency(dev);
+
+ dev_priv->display.init_clock_gating = gen9_init_clock_gating;
+ dev_priv->display.update_wm = skl_update_wm;
+ dev_priv->display.update_sprite_wm = skl_update_sprite_wm;
+ } else if (HAS_PCH_SPLIT(dev)) {
ilk_setup_wm_latency(dev);
if ((IS_GEN5(dev) && dev_priv->wm.pri_latency[1] &&
@@ -7309,7 +7173,7 @@ void intel_init_pm(struct drm_device *dev)
}
}
-int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val)
+int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val)
{
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
@@ -7319,6 +7183,7 @@ int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val)
}
I915_WRITE(GEN6_PCODE_DATA, *val);
+ I915_WRITE(GEN6_PCODE_DATA1, 0);
I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
@@ -7333,7 +7198,7 @@ int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val)
return 0;
}
-int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val)
+int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val)
{
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
@@ -7356,99 +7221,66 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val)
return 0;
}
-static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
+static int vlv_gpu_freq_div(unsigned int czclk_freq)
{
- int div;
-
- /* 4 x czclk */
- switch (dev_priv->mem_freq) {
- case 800:
- div = 10;
- break;
- case 1066:
- div = 12;
- break;
- case 1333:
- div = 16;
- break;
+ switch (czclk_freq) {
+ case 200:
+ return 10;
+ case 267:
+ return 12;
+ case 320:
+ case 333:
+ return 16;
+ case 400:
+ return 20;
default:
return -1;
}
+}
+
+static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
+{
+ int div, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->mem_freq, 4);
+
+ div = vlv_gpu_freq_div(czclk_freq);
+ if (div < 0)
+ return div;
- return DIV_ROUND_CLOSEST(dev_priv->mem_freq * (val + 6 - 0xbd), 4 * div);
+ return DIV_ROUND_CLOSEST(czclk_freq * (val + 6 - 0xbd), div);
}
static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val)
{
- int mul;
+ int mul, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->mem_freq, 4);
- /* 4 x czclk */
- switch (dev_priv->mem_freq) {
- case 800:
- mul = 10;
- break;
- case 1066:
- mul = 12;
- break;
- case 1333:
- mul = 16;
- break;
- default:
- return -1;
- }
+ mul = vlv_gpu_freq_div(czclk_freq);
+ if (mul < 0)
+ return mul;
- return DIV_ROUND_CLOSEST(4 * mul * val, dev_priv->mem_freq) + 0xbd - 6;
+ return DIV_ROUND_CLOSEST(mul * val, czclk_freq) + 0xbd - 6;
}
static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val)
{
- int div, freq;
+ int div, czclk_freq = dev_priv->rps.cz_freq;
- switch (dev_priv->rps.cz_freq) {
- case 200:
- div = 5;
- break;
- case 267:
- div = 6;
- break;
- case 320:
- case 333:
- case 400:
- div = 8;
- break;
- default:
- return -1;
- }
-
- freq = (DIV_ROUND_CLOSEST((dev_priv->rps.cz_freq * val), 2 * div) / 2);
+ div = vlv_gpu_freq_div(czclk_freq) / 2;
+ if (div < 0)
+ return div;
- return freq;
+ return DIV_ROUND_CLOSEST(czclk_freq * val, 2 * div) / 2;
}
static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
{
- int mul, opcode;
+ int mul, czclk_freq = dev_priv->rps.cz_freq;
- switch (dev_priv->rps.cz_freq) {
- case 200:
- mul = 5;
- break;
- case 267:
- mul = 6;
- break;
- case 320:
- case 333:
- case 400:
- mul = 8;
- break;
- default:
- return -1;
- }
+ mul = vlv_gpu_freq_div(czclk_freq) / 2;
+ if (mul < 0)
+ return mul;
/* CHV needs even values */
- opcode = (DIV_ROUND_CLOSEST((val * 2 * mul), dev_priv->rps.cz_freq) * 2);
-
- return opcode;
+ return DIV_ROUND_CLOSEST(val * 2 * mul, czclk_freq) * 2;
}
int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val)
@@ -7485,5 +7317,4 @@ void intel_pm_setup(struct drm_device *dev)
intel_gen6_powersave_work);
dev_priv->pm.suspended = false;
- dev_priv->pm._irqs_disabled = false;
}
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
new file mode 100644
index 00000000000..716b8a961ee
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -0,0 +1,481 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * DOC: Panel Self Refresh (PSR/SRD)
+ *
+ * Since Haswell Display controller supports Panel Self-Refresh on display
+ * panels witch have a remote frame buffer (RFB) implemented according to PSR
+ * spec in eDP1.3. PSR feature allows the display to go to lower standby states
+ * when system is idle but display is on as it eliminates display refresh
+ * request to DDR memory completely as long as the frame buffer for that
+ * display is unchanged.
+ *
+ * Panel Self Refresh must be supported by both Hardware (source) and
+ * Panel (sink).
+ *
+ * PSR saves power by caching the framebuffer in the panel RFB, which allows us
+ * to power down the link and memory controller. For DSI panels the same idea
+ * is called "manual mode".
+ *
+ * The implementation uses the hardware-based PSR support which automatically
+ * enters/exits self-refresh mode. The hardware takes care of sending the
+ * required DP aux message and could even retrain the link (that part isn't
+ * enabled yet though). The hardware also keeps track of any frontbuffer
+ * changes to know when to exit self-refresh mode again. Unfortunately that
+ * part doesn't work too well, hence why the i915 PSR support uses the
+ * software frontbuffer tracking to make sure it doesn't miss a screen
+ * update. For this integration intel_psr_invalidate() and intel_psr_flush()
+ * get called by the frontbuffer tracking code. Note that because of locking
+ * issues the self-refresh re-enable code is done from a work queue, which
+ * must be correctly synchronized/cancelled when shutting down the pipe."
+ */
+
+#include <drm/drmP.h>
+
+#include "intel_drv.h"
+#include "i915_drv.h"
+
+static bool is_edp_psr(struct intel_dp *intel_dp)
+{
+ return intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED;
+}
+
+bool intel_psr_is_enabled(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!HAS_PSR(dev))
+ return false;
+
+ return I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
+}
+
+static void intel_psr_write_vsc(struct intel_dp *intel_dp,
+ struct edp_vsc_psr *vsc_psr)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
+ u32 ctl_reg = HSW_TVIDEO_DIP_CTL(crtc->config.cpu_transcoder);
+ u32 data_reg = HSW_TVIDEO_DIP_VSC_DATA(crtc->config.cpu_transcoder);
+ uint32_t *data = (uint32_t *) vsc_psr;
+ unsigned int i;
+
+ /* As per BSPec (Pipe Video Data Island Packet), we need to disable
+ the video DIP being updated before program video DIP data buffer
+ registers for DIP being updated. */
+ I915_WRITE(ctl_reg, 0);
+ POSTING_READ(ctl_reg);
+
+ for (i = 0; i < VIDEO_DIP_VSC_DATA_SIZE; i += 4) {
+ if (i < sizeof(struct edp_vsc_psr))
+ I915_WRITE(data_reg + i, *data++);
+ else
+ I915_WRITE(data_reg + i, 0);
+ }
+
+ I915_WRITE(ctl_reg, VIDEO_DIP_ENABLE_VSC_HSW);
+ POSTING_READ(ctl_reg);
+}
+
+static void intel_psr_setup_vsc(struct intel_dp *intel_dp)
+{
+ struct edp_vsc_psr psr_vsc;
+
+ /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
+ memset(&psr_vsc, 0, sizeof(psr_vsc));
+ psr_vsc.sdp_header.HB0 = 0;
+ psr_vsc.sdp_header.HB1 = 0x7;
+ psr_vsc.sdp_header.HB2 = 0x2;
+ psr_vsc.sdp_header.HB3 = 0x8;
+ intel_psr_write_vsc(intel_dp, &psr_vsc);
+}
+
+static void intel_psr_enable_sink(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t aux_clock_divider;
+ int precharge = 0x3;
+ bool only_standby = false;
+ static const uint8_t aux_msg[] = {
+ [0] = DP_AUX_NATIVE_WRITE << 4,
+ [1] = DP_SET_POWER >> 8,
+ [2] = DP_SET_POWER & 0xff,
+ [3] = 1 - 1,
+ [4] = DP_SET_POWER_D0,
+ };
+ int i;
+
+ BUILD_BUG_ON(sizeof(aux_msg) > 20);
+
+ aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
+
+ if (IS_BROADWELL(dev) && dig_port->port != PORT_A)
+ only_standby = true;
+
+ /* Enable PSR in sink */
+ if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby)
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
+ DP_PSR_ENABLE & ~DP_PSR_MAIN_LINK_ACTIVE);
+ else
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
+ DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
+
+ /* Setup AUX registers */
+ for (i = 0; i < sizeof(aux_msg); i += 4)
+ I915_WRITE(EDP_PSR_AUX_DATA1(dev) + i,
+ intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
+
+ I915_WRITE(EDP_PSR_AUX_CTL(dev),
+ DP_AUX_CH_CTL_TIME_OUT_400us |
+ (sizeof(aux_msg) << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
+ (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
+ (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT));
+}
+
+static void intel_psr_enable_source(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t max_sleep_time = 0x1f;
+ uint32_t idle_frames = 1;
+ uint32_t val = 0x0;
+ const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
+ bool only_standby = false;
+
+ if (IS_BROADWELL(dev) && dig_port->port != PORT_A)
+ only_standby = true;
+
+ if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby) {
+ val |= EDP_PSR_LINK_STANDBY;
+ val |= EDP_PSR_TP2_TP3_TIME_0us;
+ val |= EDP_PSR_TP1_TIME_0us;
+ val |= EDP_PSR_SKIP_AUX_EXIT;
+ val |= IS_BROADWELL(dev) ? BDW_PSR_SINGLE_FRAME : 0;
+ } else
+ val |= EDP_PSR_LINK_DISABLE;
+
+ I915_WRITE(EDP_PSR_CTL(dev), val |
+ (IS_BROADWELL(dev) ? 0 : link_entry_time) |
+ max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
+ idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
+ EDP_PSR_ENABLE);
+}
+
+static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = dig_port->base.base.crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ lockdep_assert_held(&dev_priv->psr.lock);
+ WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+ WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
+
+ dev_priv->psr.source_ok = false;
+
+ if (IS_HASWELL(dev) && dig_port->port != PORT_A) {
+ DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
+ return false;
+ }
+
+ if (!i915.enable_psr) {
+ DRM_DEBUG_KMS("PSR disable by flag\n");
+ return false;
+ }
+
+ /* Below limitations aren't valid for Broadwell */
+ if (IS_BROADWELL(dev))
+ goto out;
+
+ if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) &
+ S3D_ENABLE) {
+ DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
+ return false;
+ }
+
+ if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
+ DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
+ return false;
+ }
+
+ out:
+ dev_priv->psr.source_ok = true;
+ return true;
+}
+
+static void intel_psr_do_enable(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
+ WARN_ON(dev_priv->psr.active);
+ lockdep_assert_held(&dev_priv->psr.lock);
+
+ /* Enable/Re-enable PSR on the host */
+ intel_psr_enable_source(intel_dp);
+
+ dev_priv->psr.active = true;
+}
+
+/**
+ * intel_psr_enable - Enable PSR
+ * @intel_dp: Intel DP
+ *
+ * This function can only be called after the pipe is fully trained and enabled.
+ */
+void intel_psr_enable(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!HAS_PSR(dev)) {
+ DRM_DEBUG_KMS("PSR not supported on this platform\n");
+ return;
+ }
+
+ if (!is_edp_psr(intel_dp)) {
+ DRM_DEBUG_KMS("PSR not supported by this panel\n");
+ return;
+ }
+
+ mutex_lock(&dev_priv->psr.lock);
+ if (dev_priv->psr.enabled) {
+ DRM_DEBUG_KMS("PSR already in use\n");
+ goto unlock;
+ }
+
+ if (!intel_psr_match_conditions(intel_dp))
+ goto unlock;
+
+ dev_priv->psr.busy_frontbuffer_bits = 0;
+
+ intel_psr_setup_vsc(intel_dp);
+
+ /* Avoid continuous PSR exit by masking memup and hpd */
+ I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
+ EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
+
+ /* Enable PSR on the panel */
+ intel_psr_enable_sink(intel_dp);
+
+ dev_priv->psr.enabled = intel_dp;
+unlock:
+ mutex_unlock(&dev_priv->psr.lock);
+}
+
+/**
+ * intel_psr_disable - Disable PSR
+ * @intel_dp: Intel DP
+ *
+ * This function needs to be called before disabling pipe.
+ */
+void intel_psr_disable(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ mutex_lock(&dev_priv->psr.lock);
+ if (!dev_priv->psr.enabled) {
+ mutex_unlock(&dev_priv->psr.lock);
+ return;
+ }
+
+ if (dev_priv->psr.active) {
+ I915_WRITE(EDP_PSR_CTL(dev),
+ I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE);
+
+ /* Wait till PSR is idle */
+ if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) &
+ EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
+ DRM_ERROR("Timed out waiting for PSR Idle State\n");
+
+ dev_priv->psr.active = false;
+ } else {
+ WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
+ }
+
+ dev_priv->psr.enabled = NULL;
+ mutex_unlock(&dev_priv->psr.lock);
+
+ cancel_delayed_work_sync(&dev_priv->psr.work);
+}
+
+static void intel_psr_work(struct work_struct *work)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(work, typeof(*dev_priv), psr.work.work);
+ struct intel_dp *intel_dp = dev_priv->psr.enabled;
+
+ /* We have to make sure PSR is ready for re-enable
+ * otherwise it keeps disabled until next full enable/disable cycle.
+ * PSR might take some time to get fully disabled
+ * and be ready for re-enable.
+ */
+ if (wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev_priv->dev)) &
+ EDP_PSR_STATUS_STATE_MASK) == 0, 50)) {
+ DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
+ return;
+ }
+
+ mutex_lock(&dev_priv->psr.lock);
+ intel_dp = dev_priv->psr.enabled;
+
+ if (!intel_dp)
+ goto unlock;
+
+ /*
+ * The delayed work can race with an invalidate hence we need to
+ * recheck. Since psr_flush first clears this and then reschedules we
+ * won't ever miss a flush when bailing out here.
+ */
+ if (dev_priv->psr.busy_frontbuffer_bits)
+ goto unlock;
+
+ intel_psr_do_enable(intel_dp);
+unlock:
+ mutex_unlock(&dev_priv->psr.lock);
+}
+
+static void intel_psr_exit(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->psr.active) {
+ u32 val = I915_READ(EDP_PSR_CTL(dev));
+
+ WARN_ON(!(val & EDP_PSR_ENABLE));
+
+ I915_WRITE(EDP_PSR_CTL(dev), val & ~EDP_PSR_ENABLE);
+
+ dev_priv->psr.active = false;
+ }
+
+}
+
+/**
+ * intel_psr_invalidate - Invalidade PSR
+ * @dev: DRM device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ *
+ * Since the hardware frontbuffer tracking has gaps we need to integrate
+ * with the software frontbuffer tracking. This function gets called every
+ * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
+ * disabled if the frontbuffer mask contains a buffer relevant to PSR.
+ *
+ * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
+ */
+void intel_psr_invalidate(struct drm_device *dev,
+ unsigned frontbuffer_bits)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc;
+ enum pipe pipe;
+
+ mutex_lock(&dev_priv->psr.lock);
+ if (!dev_priv->psr.enabled) {
+ mutex_unlock(&dev_priv->psr.lock);
+ return;
+ }
+
+ crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
+ pipe = to_intel_crtc(crtc)->pipe;
+
+ intel_psr_exit(dev);
+
+ frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
+
+ dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
+ mutex_unlock(&dev_priv->psr.lock);
+}
+
+/**
+ * intel_psr_flush - Flush PSR
+ * @dev: DRM device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ *
+ * Since the hardware frontbuffer tracking has gaps we need to integrate
+ * with the software frontbuffer tracking. This function gets called every
+ * time frontbuffer rendering has completed and flushed out to memory. PSR
+ * can be enabled again if no other frontbuffer relevant to PSR is dirty.
+ *
+ * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
+ */
+void intel_psr_flush(struct drm_device *dev,
+ unsigned frontbuffer_bits)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc;
+ enum pipe pipe;
+
+ mutex_lock(&dev_priv->psr.lock);
+ if (!dev_priv->psr.enabled) {
+ mutex_unlock(&dev_priv->psr.lock);
+ return;
+ }
+
+ crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
+ pipe = to_intel_crtc(crtc)->pipe;
+ dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
+
+ /*
+ * On Haswell sprite plane updates don't result in a psr invalidating
+ * signal in the hardware. Which means we need to manually fake this in
+ * software for all flushes, not just when we've seen a preceding
+ * invalidation through frontbuffer rendering.
+ */
+ if (IS_HASWELL(dev) &&
+ (frontbuffer_bits & INTEL_FRONTBUFFER_SPRITE(pipe)))
+ intel_psr_exit(dev);
+
+ if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
+ schedule_delayed_work(&dev_priv->psr.work,
+ msecs_to_jiffies(100));
+ mutex_unlock(&dev_priv->psr.lock);
+}
+
+/**
+ * intel_psr_init - Init basic PSR work and mutex.
+ * @dev: DRM device
+ *
+ * This function is called only once at driver load to initialize basic
+ * PSR stuff.
+ */
+void intel_psr_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ INIT_DELAYED_WORK(&dev_priv->psr.work, intel_psr_work);
+ mutex_init(&dev_priv->psr.lock);
+}
diff --git a/drivers/gpu/drm/i915/intel_renderstate.h b/drivers/gpu/drm/i915/intel_renderstate.h
index 6c792d3a9c9..5bd69852752 100644
--- a/drivers/gpu/drm/i915/intel_renderstate.h
+++ b/drivers/gpu/drm/i915/intel_renderstate.h
@@ -29,6 +29,7 @@
extern const struct intel_renderstate_rodata gen6_null_state;
extern const struct intel_renderstate_rodata gen7_null_state;
extern const struct intel_renderstate_rodata gen8_null_state;
+extern const struct intel_renderstate_rodata gen9_null_state;
#define RO_RENDERSTATE(_g) \
const struct intel_renderstate_rodata gen ## _g ## _null_state = { \
diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen8.c b/drivers/gpu/drm/i915/intel_renderstate_gen8.c
index 75ef1b5de45..78011d73fa9 100644
--- a/drivers/gpu/drm/i915/intel_renderstate_gen8.c
+++ b/drivers/gpu/drm/i915/intel_renderstate_gen8.c
@@ -1,16 +1,134 @@
#include "intel_renderstate.h"
static const u32 gen8_null_state_relocs[] = {
- 0x00000048,
- 0x00000050,
- 0x00000060,
- 0x000003ec,
+ 0x00000798,
+ 0x000007a4,
+ 0x000007ac,
+ 0x000007bc,
-1,
};
static const u32 gen8_null_state_batch[] = {
+ 0x7a000004,
+ 0x01000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
0x69040000,
- 0x61020001,
+ 0x78140000,
+ 0x04000000,
+ 0x7820000a,
+ 0x00000000,
+ 0x00000000,
+ 0x80000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78130002,
+ 0x00000000,
+ 0x00000000,
+ 0x02001808,
+ 0x781f0002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78510009,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78100007,
+ 0x00000000,
+ 0x00000000,
+ 0x00010000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x781b0007,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000800,
+ 0x00000000,
+ 0x78110008,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x781e0003,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x781d0007,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78120002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78500003,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x781c0002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x780c0000,
+ 0x00000000,
+ 0x78520003,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78300000,
+ 0x08010040,
+ 0x78310000,
+ 0x1e000000,
+ 0x78320000,
+ 0x1e000000,
+ 0x78330000,
+ 0x1e000000,
+ 0x79190002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x791a0002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x791b0002,
+ 0x00000000,
0x00000000,
0x00000000,
0x79120000,
@@ -23,48 +141,435 @@ static const u32 gen8_null_state_batch[] = {
0x00000000,
0x79160000,
0x00000000,
- 0x6101000e,
- 0x00000001,
+ 0x78150009,
0x00000000,
- 0x00000001,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78190009,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x781a0009,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78160009,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78170009,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78490001,
+ 0x00000000,
+ 0x00000000,
+ 0x784a0000,
+ 0x00000000,
+ 0x784b0000,
+ 0x00000004,
+ 0x79170101,
+ 0x00000000,
+ 0x00000080,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x79180006,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x79180006,
+ 0x20000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x79180006,
+ 0x40000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x79180006,
+ 0x60000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x6101000e,
0x00000001, /* reloc */
0x00000000,
+ 0x00000000,
0x00000001, /* reloc */
0x00000000,
+ 0x00000001, /* reloc */
0x00000000,
+ 0x00000001,
0x00000000,
0x00000001, /* reloc */
0x00000000,
- 0xfffff001,
0x00001001,
- 0xfffff001,
0x00001001,
- 0x78230000,
- 0x000006e0,
- 0x78210000,
- 0x00000700,
- 0x78300000,
- 0x08010040,
- 0x78330000,
- 0x08000000,
- 0x78310000,
- 0x08000000,
- 0x78320000,
- 0x08000000,
- 0x78240000,
- 0x00000641,
- 0x780e0000,
- 0x00000601,
+ 0x00000001,
+ 0x00001001,
+ 0x61020001,
+ 0x00000000,
+ 0x00000000,
+ 0x79000002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78050006,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x79040002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x79040002,
+ 0x40000000,
+ 0x00000000,
+ 0x00000000,
+ 0x79040002,
+ 0x80000000,
+ 0x00000000,
+ 0x00000000,
+ 0x79040002,
+ 0xc0000000,
+ 0x00000000,
+ 0x00000000,
+ 0x79080001,
+ 0x00000000,
+ 0x00000000,
+ 0x790a0001,
+ 0x00000000,
+ 0x00000000,
+ 0x78060003,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78070003,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78040001,
+ 0x00000000,
+ 0x00000000,
+ 0x79110000,
+ 0x00000000,
0x780d0000,
0x00000000,
- 0x78180000,
- 0x00000001,
- 0x78520003,
+ 0x79060000,
0x00000000,
+ 0x7907001f,
0x00000000,
0x00000000,
0x00000000,
- 0x78190009,
0x00000000,
0x00000000,
0x00000000,
@@ -75,7 +580,6 @@ static const u32 gen8_null_state_batch[] = {
0x00000000,
0x00000000,
0x00000000,
- 0x781b0007,
0x00000000,
0x00000000,
0x00000000,
@@ -84,26 +588,22 @@ static const u32 gen8_null_state_batch[] = {
0x00000000,
0x00000000,
0x00000000,
- 0x78270000,
0x00000000,
- 0x782c0000,
0x00000000,
- 0x781c0002,
0x00000000,
0x00000000,
0x00000000,
- 0x78160009,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
+ 0x7902000f,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
- 0x78110008,
0x00000000,
0x00000000,
0x00000000,
@@ -113,12 +613,10 @@ static const u32 gen8_null_state_batch[] = {
0x00000000,
0x00000000,
0x00000000,
- 0x78290000,
0x00000000,
- 0x782e0000,
0x00000000,
- 0x781a0009,
0x00000000,
+ 0x790c000f,
0x00000000,
0x00000000,
0x00000000,
@@ -128,7 +626,6 @@ static const u32 gen8_null_state_batch[] = {
0x00000000,
0x00000000,
0x00000000,
- 0x781d0007,
0x00000000,
0x00000000,
0x00000000,
@@ -136,153 +633,153 @@ static const u32 gen8_null_state_batch[] = {
0x00000000,
0x00000000,
0x00000000,
+ 0x780a0003,
0x00000000,
- 0x78280000,
0x00000000,
- 0x782d0000,
0x00000000,
- 0x78260000,
0x00000000,
- 0x782b0000,
+ 0x78080083,
+ 0x00004000,
0x00000000,
- 0x78150009,
0x00000000,
0x00000000,
+ 0x04004000,
0x00000000,
0x00000000,
0x00000000,
+ 0x08004000,
0x00000000,
0x00000000,
0x00000000,
+ 0x0c004000,
0x00000000,
0x00000000,
- 0x78100007,
0x00000000,
+ 0x10004000,
0x00000000,
0x00000000,
0x00000000,
+ 0x14004000,
0x00000000,
0x00000000,
0x00000000,
+ 0x18004000,
0x00000000,
- 0x781e0003,
0x00000000,
0x00000000,
+ 0x1c004000,
0x00000000,
0x00000000,
- 0x78120002,
0x00000000,
+ 0x20004000,
0x00000000,
0x00000000,
- 0x781f0002,
- 0x30400820,
0x00000000,
+ 0x24004000,
0x00000000,
- 0x78510009,
0x00000000,
0x00000000,
+ 0x28004000,
0x00000000,
0x00000000,
0x00000000,
+ 0x2c004000,
0x00000000,
0x00000000,
0x00000000,
+ 0x30004000,
0x00000000,
0x00000000,
- 0x78500003,
- 0x00210000,
0x00000000,
+ 0x34004000,
0x00000000,
0x00000000,
- 0x78130002,
0x00000000,
+ 0x38004000,
0x00000000,
0x00000000,
- 0x782a0000,
- 0x00000480,
- 0x782f0000,
- 0x00000540,
- 0x78140000,
- 0x00000800,
- 0x78170009,
0x00000000,
+ 0x3c004000,
0x00000000,
0x00000000,
0x00000000,
+ 0x40004000,
0x00000000,
0x00000000,
0x00000000,
+ 0x44004000,
0x00000000,
0x00000000,
0x00000000,
- 0x7820000a,
- 0x00000580,
+ 0x48004000,
0x00000000,
- 0x08080000,
0x00000000,
0x00000000,
- 0x1f000002,
- 0x00060000,
+ 0x4c004000,
0x00000000,
0x00000000,
0x00000000,
+ 0x50004000,
0x00000000,
- 0x784d0000,
- 0x40000000,
- 0x784f0000,
- 0x80000100,
- 0x780f0000,
- 0x00000740,
- 0x78050006,
0x00000000,
0x00000000,
+ 0x54004000,
0x00000000,
0x00000000,
0x00000000,
+ 0x58004000,
0x00000000,
0x00000000,
- 0x78070003,
0x00000000,
+ 0x5c004000,
0x00000000,
0x00000000,
0x00000000,
- 0x78060003,
+ 0x60004000,
0x00000000,
0x00000000,
0x00000000,
+ 0x64004000,
0x00000000,
- 0x78040001,
0x00000000,
- 0x00000001,
- 0x79000002,
- 0xffffffff,
+ 0x00000000,
+ 0x68004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x6c004000,
+ 0x00000000,
0x00000000,
0x00000000,
- 0x78080003,
- 0x00006000,
- 0x000005e0, /* reloc */
+ 0x70004000,
0x00000000,
0x00000000,
- 0x78090005,
+ 0x00000000,
+ 0x74004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x7c004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x80004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78090043,
0x02000000,
0x22220000,
- 0x02f60000,
- 0x11230000,
- 0x02850004,
- 0x11230000,
- 0x784b0000,
- 0x0000000f,
- 0x78490001,
0x00000000,
0x00000000,
- 0x7b000005,
0x00000000,
- 0x00000003,
0x00000000,
- 0x00000001,
0x00000000,
0x00000000,
- 0x05000000, /* cmds end */
0x00000000,
0x00000000,
0x00000000,
@@ -297,8 +794,6 @@ static const u32 gen8_null_state_batch[] = {
0x00000000,
0x00000000,
0x00000000,
- 0x000004c0, /* state start */
- 0x00000500,
0x00000000,
0x00000000,
0x00000000,
@@ -345,46 +840,65 @@ static const u32 gen8_null_state_batch[] = {
0x00000000,
0x00000000,
0x00000000,
+ 0x680b0001,
+ 0x78260000,
+ 0x00000000,
+ 0x78270000,
+ 0x00000000,
+ 0x78280000,
+ 0x00000000,
+ 0x78290000,
+ 0x00000000,
+ 0x782a0000,
+ 0x00000000,
+ 0x780e0000,
+ 0x00000dc1,
+ 0x78240000,
+ 0x00000e01,
+ 0x784f0000,
+ 0x80000100,
+ 0x784d0000,
+ 0x40000000,
+ 0x782b0000,
+ 0x00000000,
+ 0x782c0000,
+ 0x00000000,
+ 0x782d0000,
0x00000000,
+ 0x782e0000,
0x00000000,
+ 0x782f0000,
0x00000000,
- 0x00000092,
+ 0x780f0000,
0x00000000,
+ 0x78230000,
+ 0x00000e60,
+ 0x78210000,
+ 0x00000e80,
+ 0x7b000005,
+ 0x00000004,
+ 0x00000001,
0x00000000,
+ 0x00000001,
0x00000000,
0x00000000,
+ 0x05000000, /* cmds end */
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
+ 0x00000000, /* state start */
+ 0x00000000,
+ 0x3f800000,
+ 0x3f800000,
+ 0x3f800000,
+ 0x3f800000,
+ 0x00000000,
+ 0x00000000,
0x00000000,
0x00000000,
- 0x0060005a,
- 0x21403ae8,
- 0x3a0000c0,
- 0x008d0040,
- 0x0060005a,
- 0x21603ae8,
- 0x3a0000c0,
- 0x008d0080,
- 0x0060005a,
- 0x21803ae8,
- 0x3a0000d0,
- 0x008d0040,
- 0x0060005a,
- 0x21a03ae8,
- 0x3a0000d0,
- 0x008d0080,
- 0x02800031,
- 0x2e0022e8,
- 0x0e000140,
- 0x08840001,
- 0x05800031,
- 0x200022e0,
- 0x0e000e00,
- 0x90031000,
0x00000000,
0x00000000,
0x00000000,
@@ -410,38 +924,6 @@ static const u32 gen8_null_state_batch[] = {
0x00000000,
0x00000000,
0x00000000,
- 0x06200000,
- 0x00000002,
- 0x06200000,
- 0x00000002,
- 0x06200000,
- 0x00000002,
- 0x06200000,
- 0x00000002,
- 0x06200000,
- 0x00000002,
- 0x06200000,
- 0x00000002,
- 0x06200000,
- 0x00000002,
- 0x06200000,
- 0x00000002,
- 0x06200000,
- 0x00000002,
- 0x06200000,
- 0x00000002,
- 0x06200000,
- 0x00000002,
- 0x06200000,
- 0x00000002,
- 0x06200000,
- 0x00000002,
- 0x06200000,
- 0x00000002,
- 0x06200000,
- 0x00000002,
- 0x06200000,
- 0x00000002,
0x00000000,
0x00000000,
0x00000000,
@@ -449,8 +931,6 @@ static const u32 gen8_null_state_batch[] = {
0x00000000,
0x00000000,
0x00000000,
- 0xf99a130c,
- 0x799a130c,
0x00000000,
0x00000000,
0x00000000,
@@ -466,9 +946,7 @@ static const u32 gen8_null_state_batch[] = {
0x00000000,
0x00000000,
0x00000000,
- 0x3f800000,
0x00000000,
- 0x3f800000,
0x00000000,
0x00000000,
0x00000000,
diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen9.c b/drivers/gpu/drm/i915/intel_renderstate_gen9.c
new file mode 100644
index 00000000000..87507537380
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_renderstate_gen9.c
@@ -0,0 +1,974 @@
+#include "intel_renderstate.h"
+
+static const u32 gen9_null_state_relocs[] = {
+ 0x000007a8,
+ 0x000007b4,
+ 0x000007bc,
+ 0x000007cc,
+ -1,
+};
+
+static const u32 gen9_null_state_batch[] = {
+ 0x7a000004,
+ 0x01000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x69040300,
+ 0x78140000,
+ 0x04000000,
+ 0x7820000a,
+ 0x00000000,
+ 0x00000000,
+ 0x80000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78130002,
+ 0x00000000,
+ 0x00000000,
+ 0x02001808,
+ 0x781f0004,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78510009,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78100007,
+ 0x00000000,
+ 0x00000000,
+ 0x00010000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x781b0007,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000800,
+ 0x00000000,
+ 0x78110008,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x781e0003,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x781d0009,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78120002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78500003,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x781c0002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x780c0000,
+ 0x00000000,
+ 0x78520003,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78300000,
+ 0x08010040,
+ 0x78310000,
+ 0x1e000000,
+ 0x78320000,
+ 0x1e000000,
+ 0x78330000,
+ 0x1e000000,
+ 0x79190002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x791a0002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x791b0002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x79120000,
+ 0x00000000,
+ 0x79130000,
+ 0x00000000,
+ 0x79140000,
+ 0x00000000,
+ 0x79150000,
+ 0x00000000,
+ 0x79160000,
+ 0x00000000,
+ 0x78150009,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78190009,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x781a0009,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78160009,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78170009,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78490001,
+ 0x00000000,
+ 0x00000000,
+ 0x784a0000,
+ 0x00000000,
+ 0x784b0000,
+ 0x00000004,
+ 0x79170101,
+ 0x00000000,
+ 0x00000080,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x79180006,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x79180006,
+ 0x20000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x79180006,
+ 0x40000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x79180006,
+ 0x60000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x61010011,
+ 0x00000001, /* reloc */
+ 0x00000000,
+ 0x00000000,
+ 0x00000001, /* reloc */
+ 0x00000000,
+ 0x00000001, /* reloc */
+ 0x00000000,
+ 0x00000001,
+ 0x00000000,
+ 0x00000001, /* reloc */
+ 0x00000000,
+ 0x00001001,
+ 0x00001001,
+ 0x00000001,
+ 0x00001001,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x61020001,
+ 0x00000000,
+ 0x00000000,
+ 0x79000002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78050006,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x79040002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x79040002,
+ 0x40000000,
+ 0x00000000,
+ 0x00000000,
+ 0x79040002,
+ 0x80000000,
+ 0x00000000,
+ 0x00000000,
+ 0x79040002,
+ 0xc0000000,
+ 0x00000000,
+ 0x00000000,
+ 0x79080001,
+ 0x00000000,
+ 0x00000000,
+ 0x790a0001,
+ 0x00000000,
+ 0x00000000,
+ 0x78060003,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78070003,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78040001,
+ 0x00000000,
+ 0x00000000,
+ 0x79110000,
+ 0x00000000,
+ 0x780d0000,
+ 0x00000000,
+ 0x79060000,
+ 0x00000000,
+ 0x7907001f,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x7902000f,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x790c000f,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x780a0003,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78080083,
+ 0x00004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x04004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x08004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x0c004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x10004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x14004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x18004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x1c004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x20004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x24004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x28004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x2c004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x30004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x34004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x38004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x3c004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x40004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x44004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x48004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x4c004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x50004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x54004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x58004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x5c004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x60004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x64004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x68004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x6c004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x70004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x74004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x7c004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x80004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78090043,
+ 0x02000000,
+ 0x22220000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78550003,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x680b0001,
+ 0x780e0000,
+ 0x00000e01,
+ 0x78240000,
+ 0x00000e41,
+ 0x784f0000,
+ 0x80000100,
+ 0x784d0000,
+ 0x40000000,
+ 0x782b0000,
+ 0x00000000,
+ 0x782c0000,
+ 0x00000000,
+ 0x782d0000,
+ 0x00000000,
+ 0x782e0000,
+ 0x00000000,
+ 0x782f0000,
+ 0x00000000,
+ 0x780f0000,
+ 0x00000000,
+ 0x78230000,
+ 0x00000ea0,
+ 0x78210000,
+ 0x00000ec0,
+ 0x78260000,
+ 0x00000000,
+ 0x78270000,
+ 0x00000000,
+ 0x78280000,
+ 0x00000000,
+ 0x78290000,
+ 0x00000000,
+ 0x782a0000,
+ 0x00000000,
+ 0x7b000005,
+ 0x00000004,
+ 0x00000001,
+ 0x00000000,
+ 0x00000001,
+ 0x00000000,
+ 0x00000000,
+ 0x05000000, /* cmds end */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000, /* state start */
+ 0x00000000,
+ 0x3f800000,
+ 0x3f800000,
+ 0x3f800000,
+ 0x3f800000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000, /* state end */
+};
+
+RO_RENDERSTATE(9);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 0a80e419b58..9f445e9a75d 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -589,14 +589,10 @@ static int init_ring_common(struct intel_engine_cs *ring)
goto out;
}
- if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
- i915_kernel_lost_context(ring->dev);
- else {
- ringbuf->head = I915_READ_HEAD(ring);
- ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
- ringbuf->space = intel_ring_space(ringbuf);
- ringbuf->last_retired_head = -1;
- }
+ ringbuf->head = I915_READ_HEAD(ring);
+ ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
+ ringbuf->space = intel_ring_space(ringbuf);
+ ringbuf->last_retired_head = -1;
memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
@@ -665,76 +661,112 @@ err:
return ret;
}
-static inline void intel_ring_emit_wa(struct intel_engine_cs *ring,
- u32 addr, u32 value)
+static int intel_ring_workarounds_emit(struct intel_engine_cs *ring,
+ struct intel_context *ctx)
{
+ int ret, i;
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_workarounds *w = &dev_priv->workarounds;
- if (WARN_ON(dev_priv->num_wa_regs >= I915_MAX_WA_REGS))
- return;
+ if (WARN_ON(w->count == 0))
+ return 0;
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
- intel_ring_emit(ring, addr);
- intel_ring_emit(ring, value);
+ ring->gpu_caches_dirty = true;
+ ret = intel_ring_flush_all_caches(ring);
+ if (ret)
+ return ret;
- dev_priv->intel_wa_regs[dev_priv->num_wa_regs].addr = addr;
- dev_priv->intel_wa_regs[dev_priv->num_wa_regs].mask = value & 0xFFFF;
- /* value is updated with the status of remaining bits of this
- * register when it is read from debugfs file
- */
- dev_priv->intel_wa_regs[dev_priv->num_wa_regs].value = value;
- dev_priv->num_wa_regs++;
+ ret = intel_ring_begin(ring, (w->count * 2 + 2));
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count));
+ for (i = 0; i < w->count; i++) {
+ intel_ring_emit(ring, w->reg[i].addr);
+ intel_ring_emit(ring, w->reg[i].value);
+ }
+ intel_ring_emit(ring, MI_NOOP);
+
+ intel_ring_advance(ring);
- return;
+ ring->gpu_caches_dirty = true;
+ ret = intel_ring_flush_all_caches(ring);
+ if (ret)
+ return ret;
+
+ DRM_DEBUG_DRIVER("Number of Workarounds emitted: %d\n", w->count);
+
+ return 0;
}
+static int wa_add(struct drm_i915_private *dev_priv,
+ const u32 addr, const u32 mask, const u32 val)
+{
+ const u32 idx = dev_priv->workarounds.count;
+
+ if (WARN_ON(idx >= I915_MAX_WA_REGS))
+ return -ENOSPC;
+
+ dev_priv->workarounds.reg[idx].addr = addr;
+ dev_priv->workarounds.reg[idx].value = val;
+ dev_priv->workarounds.reg[idx].mask = mask;
+
+ dev_priv->workarounds.count++;
+
+ return 0;
+}
+
+#define WA_REG(addr, mask, val) { \
+ const int r = wa_add(dev_priv, (addr), (mask), (val)); \
+ if (r) \
+ return r; \
+ }
+
+#define WA_SET_BIT_MASKED(addr, mask) \
+ WA_REG(addr, (mask), _MASKED_BIT_ENABLE(mask))
+
+#define WA_CLR_BIT_MASKED(addr, mask) \
+ WA_REG(addr, (mask), _MASKED_BIT_DISABLE(mask))
+
+#define WA_SET_FIELD_MASKED(addr, mask, value) \
+ WA_REG(addr, mask, _MASKED_FIELD(mask, value))
+
+#define WA_SET_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) | (mask))
+#define WA_CLR_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) & ~(mask))
+
+#define WA_WRITE(addr, val) WA_REG(addr, 0xffffffff, val)
+
static int bdw_init_workarounds(struct intel_engine_cs *ring)
{
- int ret;
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- /*
- * workarounds applied in this fn are part of register state context,
- * they need to be re-initialized followed by gpu reset, suspend/resume,
- * module reload.
- */
- dev_priv->num_wa_regs = 0;
- memset(dev_priv->intel_wa_regs, 0, sizeof(dev_priv->intel_wa_regs));
-
- /*
- * update the number of dwords required based on the
- * actual number of workarounds applied
- */
- ret = intel_ring_begin(ring, 18);
- if (ret)
- return ret;
-
/* WaDisablePartialInstShootdown:bdw */
- /* WaDisableThreadStallDopClockGating:bdw */
- /* FIXME: Unclear whether we really need this on production bdw. */
- intel_ring_emit_wa(ring, GEN8_ROW_CHICKEN,
- _MASKED_BIT_ENABLE(PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE
- | STALL_DOP_GATING_DISABLE));
+ /* WaDisableThreadStallDopClockGating:bdw (pre-production) */
+ WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
+ PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE |
+ STALL_DOP_GATING_DISABLE);
- /* WaDisableDopClockGating:bdw May not be needed for production */
- intel_ring_emit_wa(ring, GEN7_ROW_CHICKEN2,
- _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
+ /* WaDisableDopClockGating:bdw */
+ WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
+ DOP_CLOCK_GATING_DISABLE);
- intel_ring_emit_wa(ring, HALF_SLICE_CHICKEN3,
- _MASKED_BIT_ENABLE(GEN8_SAMPLER_POWER_BYPASS_DIS));
+ WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
+ GEN8_SAMPLER_POWER_BYPASS_DIS);
/* Use Force Non-Coherent whenever executing a 3D context. This is a
* workaround for for a possible hang in the unlikely event a TLB
* invalidation occurs during a PSD flush.
*/
- intel_ring_emit_wa(ring, HDC_CHICKEN0,
- _MASKED_BIT_ENABLE(HDC_FORCE_NON_COHERENT));
+ /* WaDisableFenceDestinationToSLM:bdw (GT3 pre-production) */
+ WA_SET_BIT_MASKED(HDC_CHICKEN0,
+ HDC_FORCE_NON_COHERENT |
+ (IS_BDW_GT3(dev) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
/* Wa4x4STCOptimizationDisable:bdw */
- intel_ring_emit_wa(ring, CACHE_MODE_1,
- _MASKED_BIT_ENABLE(GEN8_4x4_STC_OPTIMIZATION_DISABLE));
+ WA_SET_BIT_MASKED(CACHE_MODE_1,
+ GEN8_4x4_STC_OPTIMIZATION_DISABLE);
/*
* BSpec recommends 8x4 when MSAA is used,
@@ -744,52 +776,51 @@ static int bdw_init_workarounds(struct intel_engine_cs *ring)
* disable bit, which we don't touch here, but it's good
* to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
*/
- intel_ring_emit_wa(ring, GEN7_GT_MODE,
- GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
-
- intel_ring_advance(ring);
-
- DRM_DEBUG_DRIVER("Number of Workarounds applied: %d\n",
- dev_priv->num_wa_regs);
+ WA_SET_FIELD_MASKED(GEN7_GT_MODE,
+ GEN6_WIZ_HASHING_MASK,
+ GEN6_WIZ_HASHING_16x4);
return 0;
}
static int chv_init_workarounds(struct intel_engine_cs *ring)
{
- int ret;
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- /*
- * workarounds applied in this fn are part of register state context,
- * they need to be re-initialized followed by gpu reset, suspend/resume,
- * module reload.
+ /* WaDisablePartialInstShootdown:chv */
+ /* WaDisableThreadStallDopClockGating:chv */
+ WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
+ PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE |
+ STALL_DOP_GATING_DISABLE);
+
+ /* Use Force Non-Coherent whenever executing a 3D context. This is a
+ * workaround for a possible hang in the unlikely event a TLB
+ * invalidation occurs during a PSD flush.
*/
- dev_priv->num_wa_regs = 0;
- memset(dev_priv->intel_wa_regs, 0, sizeof(dev_priv->intel_wa_regs));
+ /* WaForceEnableNonCoherent:chv */
+ /* WaHdcDisableFetchWhenMasked:chv */
+ WA_SET_BIT_MASKED(HDC_CHICKEN0,
+ HDC_FORCE_NON_COHERENT |
+ HDC_DONOT_FETCH_MEM_WHEN_MASKED);
- ret = intel_ring_begin(ring, 12);
- if (ret)
- return ret;
+ return 0;
+}
- /* WaDisablePartialInstShootdown:chv */
- intel_ring_emit_wa(ring, GEN8_ROW_CHICKEN,
- _MASKED_BIT_ENABLE(PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE));
+int init_workarounds_ring(struct intel_engine_cs *ring)
+{
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
- /* WaDisableThreadStallDopClockGating:chv */
- intel_ring_emit_wa(ring, GEN8_ROW_CHICKEN,
- _MASKED_BIT_ENABLE(STALL_DOP_GATING_DISABLE));
+ WARN_ON(ring->id != RCS);
- /* WaDisableDopClockGating:chv (pre-production hw) */
- intel_ring_emit_wa(ring, GEN7_ROW_CHICKEN2,
- _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
+ dev_priv->workarounds.count = 0;
- /* WaDisableSamplerPowerBypass:chv (pre-production hw) */
- intel_ring_emit_wa(ring, HALF_SLICE_CHICKEN3,
- _MASKED_BIT_ENABLE(GEN8_SAMPLER_POWER_BYPASS_DIS));
+ if (IS_BROADWELL(dev))
+ return bdw_init_workarounds(ring);
- intel_ring_advance(ring);
+ if (IS_CHERRYVIEW(dev))
+ return chv_init_workarounds(ring);
return 0;
}
@@ -812,7 +843,7 @@ static int init_render_ring(struct intel_engine_cs *ring)
*
* WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv
*/
- if (INTEL_INFO(dev)->gen >= 6)
+ if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 9)
I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
/* Required for the hardware to program scanline values for waiting */
@@ -849,7 +880,7 @@ static int init_render_ring(struct intel_engine_cs *ring)
if (HAS_L3_DPF(dev))
I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
- return ret;
+ return init_workarounds_ring(ring);
}
static void render_ring_cleanup(struct intel_engine_cs *ring)
@@ -1186,7 +1217,7 @@ gen5_ring_get_irq(struct intel_engine_cs *ring)
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
- if (!dev->irq_enabled)
+ if (WARN_ON(!intel_irqs_enabled(dev_priv)))
return false;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -1217,7 +1248,7 @@ i9xx_ring_get_irq(struct intel_engine_cs *ring)
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
- if (!dev->irq_enabled)
+ if (!intel_irqs_enabled(dev_priv))
return false;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -1254,7 +1285,7 @@ i8xx_ring_get_irq(struct intel_engine_cs *ring)
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
- if (!dev->irq_enabled)
+ if (!intel_irqs_enabled(dev_priv))
return false;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -1388,8 +1419,8 @@ gen6_ring_get_irq(struct intel_engine_cs *ring)
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
- if (!dev->irq_enabled)
- return false;
+ if (WARN_ON(!intel_irqs_enabled(dev_priv)))
+ return false;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (ring->irq_refcount++ == 0) {
@@ -1431,7 +1462,7 @@ hsw_vebox_get_irq(struct intel_engine_cs *ring)
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
- if (!dev->irq_enabled)
+ if (WARN_ON(!intel_irqs_enabled(dev_priv)))
return false;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -1451,9 +1482,6 @@ hsw_vebox_put_irq(struct intel_engine_cs *ring)
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
- if (!dev->irq_enabled)
- return;
-
spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (--ring->irq_refcount == 0) {
I915_WRITE_IMR(ring, ~0);
@@ -1469,7 +1497,7 @@ gen8_ring_get_irq(struct intel_engine_cs *ring)
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
- if (!dev->irq_enabled)
+ if (WARN_ON(!intel_irqs_enabled(dev_priv)))
return false;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -1694,13 +1722,42 @@ static int init_phys_status_page(struct intel_engine_cs *ring)
return 0;
}
-void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
+void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
{
- if (!ringbuf->obj)
- return;
-
iounmap(ringbuf->virtual_start);
+ ringbuf->virtual_start = NULL;
i915_gem_object_ggtt_unpin(ringbuf->obj);
+}
+
+int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
+ struct intel_ringbuffer *ringbuf)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_gem_object *obj = ringbuf->obj;
+ int ret;
+
+ ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
+ if (ret)
+ return ret;
+
+ ret = i915_gem_object_set_to_gtt_domain(obj, true);
+ if (ret) {
+ i915_gem_object_ggtt_unpin(obj);
+ return ret;
+ }
+
+ ringbuf->virtual_start = ioremap_wc(dev_priv->gtt.mappable_base +
+ i915_gem_obj_ggtt_offset(obj), ringbuf->size);
+ if (ringbuf->virtual_start == NULL) {
+ i915_gem_object_ggtt_unpin(obj);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
+{
drm_gem_object_unreference(&ringbuf->obj->base);
ringbuf->obj = NULL;
}
@@ -1708,12 +1765,7 @@ void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
int intel_alloc_ringbuffer_obj(struct drm_device *dev,
struct intel_ringbuffer *ringbuf)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj;
- int ret;
-
- if (ringbuf->obj)
- return 0;
obj = NULL;
if (!HAS_LLC(dev))
@@ -1726,30 +1778,9 @@ int intel_alloc_ringbuffer_obj(struct drm_device *dev,
/* mark ring buffers as read-only from GPU side by default */
obj->gt_ro = 1;
- ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
- if (ret)
- goto err_unref;
-
- ret = i915_gem_object_set_to_gtt_domain(obj, true);
- if (ret)
- goto err_unpin;
-
- ringbuf->virtual_start =
- ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
- ringbuf->size);
- if (ringbuf->virtual_start == NULL) {
- ret = -EINVAL;
- goto err_unpin;
- }
-
ringbuf->obj = obj;
- return 0;
-err_unpin:
- i915_gem_object_ggtt_unpin(obj);
-err_unref:
- drm_gem_object_unreference(&obj->base);
- return ret;
+ return 0;
}
static int intel_init_ring_buffer(struct drm_device *dev,
@@ -1786,10 +1817,21 @@ static int intel_init_ring_buffer(struct drm_device *dev,
goto error;
}
- ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
- if (ret) {
- DRM_ERROR("Failed to allocate ringbuffer %s: %d\n", ring->name, ret);
- goto error;
+ if (ringbuf->obj == NULL) {
+ ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
+ if (ret) {
+ DRM_ERROR("Failed to allocate ringbuffer %s: %d\n",
+ ring->name, ret);
+ goto error;
+ }
+
+ ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf);
+ if (ret) {
+ DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n",
+ ring->name, ret);
+ intel_destroy_ringbuffer_obj(ringbuf);
+ goto error;
+ }
}
/* Workaround an erratum on the i830 which causes a hang if
@@ -1818,15 +1860,19 @@ error:
void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
{
- struct drm_i915_private *dev_priv = to_i915(ring->dev);
- struct intel_ringbuffer *ringbuf = ring->buffer;
+ struct drm_i915_private *dev_priv;
+ struct intel_ringbuffer *ringbuf;
if (!intel_ring_initialized(ring))
return;
+ dev_priv = to_i915(ring->dev);
+ ringbuf = ring->buffer;
+
intel_stop_ring_buffer(ring);
WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0);
+ intel_unpin_ringbuffer_obj(ringbuf);
intel_destroy_ringbuffer_obj(ringbuf);
ring->preallocated_lazy_request = NULL;
ring->outstanding_lazy_seqno = 0;
@@ -1912,13 +1958,6 @@ static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
break;
}
- if (!drm_core_check_feature(dev, DRIVER_MODESET) &&
- dev->primary->master) {
- struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
- if (master_priv->sarea_priv)
- master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
- }
-
msleep(1);
if (dev_priv->mm.interruptible && signal_pending(current)) {
@@ -2229,6 +2268,7 @@ static int gen6_ring_flush(struct intel_engine_cs *ring,
u32 invalidate, u32 flush)
{
struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t cmd;
int ret;
@@ -2259,8 +2299,12 @@ static int gen6_ring_flush(struct intel_engine_cs *ring,
}
intel_ring_advance(ring);
- if (IS_GEN7(dev) && !invalidate && flush)
- return gen7_ring_fbc_flush(ring, FBC_REND_CACHE_CLEAN);
+ if (!invalidate && flush) {
+ if (IS_GEN7(dev))
+ return gen7_ring_fbc_flush(ring, FBC_REND_CACHE_CLEAN);
+ else if (IS_BROADWELL(dev))
+ dev_priv->fbc.need_sw_cache_clean = true;
+ }
return 0;
}
@@ -2293,10 +2337,8 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
dev_priv->semaphore_obj = obj;
}
}
- if (IS_CHERRYVIEW(dev))
- ring->init_context = chv_init_workarounds;
- else
- ring->init_context = bdw_init_workarounds;
+
+ ring->init_context = intel_ring_workarounds_emit;
ring->add_request = gen6_add_request;
ring->flush = gen8_render_ring_flush;
ring->irq_get = gen8_ring_get_irq;
@@ -2406,91 +2448,6 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
return intel_init_ring_buffer(dev, ring);
}
-int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring = &dev_priv->ring[RCS];
- struct intel_ringbuffer *ringbuf = ring->buffer;
- int ret;
-
- if (ringbuf == NULL) {
- ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
- if (!ringbuf)
- return -ENOMEM;
- ring->buffer = ringbuf;
- }
-
- ring->name = "render ring";
- ring->id = RCS;
- ring->mmio_base = RENDER_RING_BASE;
-
- if (INTEL_INFO(dev)->gen >= 6) {
- /* non-kms not supported on gen6+ */
- ret = -ENODEV;
- goto err_ringbuf;
- }
-
- /* Note: gem is not supported on gen5/ilk without kms (the corresponding
- * gem_init ioctl returns with -ENODEV). Hence we do not need to set up
- * the special gen5 functions. */
- ring->add_request = i9xx_add_request;
- if (INTEL_INFO(dev)->gen < 4)
- ring->flush = gen2_render_ring_flush;
- else
- ring->flush = gen4_render_ring_flush;
- ring->get_seqno = ring_get_seqno;
- ring->set_seqno = ring_set_seqno;
- if (IS_GEN2(dev)) {
- ring->irq_get = i8xx_ring_get_irq;
- ring->irq_put = i8xx_ring_put_irq;
- } else {
- ring->irq_get = i9xx_ring_get_irq;
- ring->irq_put = i9xx_ring_put_irq;
- }
- ring->irq_enable_mask = I915_USER_INTERRUPT;
- ring->write_tail = ring_write_tail;
- if (INTEL_INFO(dev)->gen >= 4)
- ring->dispatch_execbuffer = i965_dispatch_execbuffer;
- else if (IS_I830(dev) || IS_845G(dev))
- ring->dispatch_execbuffer = i830_dispatch_execbuffer;
- else
- ring->dispatch_execbuffer = i915_dispatch_execbuffer;
- ring->init = init_render_ring;
- ring->cleanup = render_ring_cleanup;
-
- ring->dev = dev;
- INIT_LIST_HEAD(&ring->active_list);
- INIT_LIST_HEAD(&ring->request_list);
-
- ringbuf->size = size;
- ringbuf->effective_size = ringbuf->size;
- if (IS_I830(ring->dev) || IS_845G(ring->dev))
- ringbuf->effective_size -= 2 * CACHELINE_BYTES;
-
- ringbuf->virtual_start = ioremap_wc(start, size);
- if (ringbuf->virtual_start == NULL) {
- DRM_ERROR("can not ioremap virtual address for"
- " ring buffer\n");
- ret = -ENOMEM;
- goto err_ringbuf;
- }
-
- if (!I915_NEED_GFX_HWS(dev)) {
- ret = init_phys_status_page(ring);
- if (ret)
- goto err_vstart;
- }
-
- return 0;
-
-err_vstart:
- iounmap(ringbuf->virtual_start);
-err_ringbuf:
- kfree(ringbuf);
- ring->buffer = NULL;
- return ret;
-}
-
int intel_init_bsd_ring_buffer(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 96479c89f4b..fe426cff598 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -148,7 +148,8 @@ struct intel_engine_cs {
int (*init)(struct intel_engine_cs *ring);
- int (*init_context)(struct intel_engine_cs *ring);
+ int (*init_context)(struct intel_engine_cs *ring,
+ struct intel_context *ctx);
void (*write_tail)(struct intel_engine_cs *ring,
u32 value);
@@ -235,6 +236,7 @@ struct intel_engine_cs {
/* Execlists */
spinlock_t execlist_lock;
struct list_head execlist_queue;
+ struct list_head execlist_retired_req_list;
u8 next_context_status_buffer;
u32 irq_keep_mask; /* bitmask for interrupts that should not be masked */
int (*emit_request)(struct intel_ringbuffer *ringbuf);
@@ -381,6 +383,9 @@ intel_write_status_page(struct intel_engine_cs *ring,
#define I915_GEM_HWS_SCRATCH_INDEX 0x30
#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
+void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
+int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
+ struct intel_ringbuffer *ringbuf);
void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
int intel_alloc_ringbuffer_obj(struct drm_device *dev,
struct intel_ringbuffer *ringbuf);
@@ -424,6 +429,8 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev);
u64 intel_ring_get_active_head(struct intel_engine_cs *ring);
void intel_ring_setup_status_page(struct intel_engine_cs *ring);
+int init_workarounds_ring(struct intel_engine_cs *ring);
+
static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf)
{
return ringbuf->tail;
@@ -441,7 +448,4 @@ static inline void i915_trace_irq_get(struct intel_engine_cs *ring, u32 seqno)
ring->trace_irq_seqno = seqno;
}
-/* DRI warts */
-int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size);
-
#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
new file mode 100644
index 00000000000..f5a78d53e29
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -0,0 +1,1406 @@
+/*
+ * Copyright © 2012-2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eugeni Dodonov <eugeni.dodonov@intel.com>
+ * Daniel Vetter <daniel.vetter@ffwll.ch>
+ *
+ */
+
+#include <linux/pm_runtime.h>
+#include <linux/vgaarb.h>
+
+#include "i915_drv.h"
+#include "intel_drv.h"
+#include <drm/i915_powerwell.h>
+
+/**
+ * DOC: runtime pm
+ *
+ * The i915 driver supports dynamic enabling and disabling of entire hardware
+ * blocks at runtime. This is especially important on the display side where
+ * software is supposed to control many power gates manually on recent hardware,
+ * since on the GT side a lot of the power management is done by the hardware.
+ * But even there some manual control at the device level is required.
+ *
+ * Since i915 supports a diverse set of platforms with a unified codebase and
+ * hardware engineers just love to shuffle functionality around between power
+ * domains there's a sizeable amount of indirection required. This file provides
+ * generic functions to the driver for grabbing and releasing references for
+ * abstract power domains. It then maps those to the actual power wells
+ * present for a given platform.
+ */
+
+static struct i915_power_domains *hsw_pwr;
+
+#define for_each_power_well(i, power_well, domain_mask, power_domains) \
+ for (i = 0; \
+ i < (power_domains)->power_well_count && \
+ ((power_well) = &(power_domains)->power_wells[i]); \
+ i++) \
+ if ((power_well)->domains & (domain_mask))
+
+#define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \
+ for (i = (power_domains)->power_well_count - 1; \
+ i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\
+ i--) \
+ if ((power_well)->domains & (domain_mask))
+
+/*
+ * We should only use the power well if we explicitly asked the hardware to
+ * enable it, so check if it's enabled and also check if we've requested it to
+ * be enabled.
+ */
+static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ return I915_READ(HSW_PWR_WELL_DRIVER) ==
+ (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
+}
+
+/**
+ * __intel_display_power_is_enabled - unlocked check for a power domain
+ * @dev_priv: i915 device instance
+ * @domain: power domain to check
+ *
+ * This is the unlocked version of intel_display_power_is_enabled() and should
+ * only be used from error capture and recovery code where deadlocks are
+ * possible.
+ *
+ * Returns:
+ * True when the power domain is enabled, false otherwise.
+ */
+bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain)
+{
+ struct i915_power_domains *power_domains;
+ struct i915_power_well *power_well;
+ bool is_enabled;
+ int i;
+
+ if (dev_priv->pm.suspended)
+ return false;
+
+ power_domains = &dev_priv->power_domains;
+
+ is_enabled = true;
+
+ for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
+ if (power_well->always_on)
+ continue;
+
+ if (!power_well->hw_enabled) {
+ is_enabled = false;
+ break;
+ }
+ }
+
+ return is_enabled;
+}
+
+/**
+ * intel_display_power_is_enabled - unlocked check for a power domain
+ * @dev_priv: i915 device instance
+ * @domain: power domain to check
+ *
+ * This function can be used to check the hw power domain state. It is mostly
+ * used in hardware state readout functions. Everywhere else code should rely
+ * upon explicit power domain reference counting to ensure that the hardware
+ * block is powered up before accessing it.
+ *
+ * Callers must hold the relevant modesetting locks to ensure that concurrent
+ * threads can't disable the power well while the caller tries to read a few
+ * registers.
+ *
+ * Returns:
+ * True when the power domain is enabled, false otherwise.
+ */
+bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain)
+{
+ struct i915_power_domains *power_domains;
+ bool ret;
+
+ power_domains = &dev_priv->power_domains;
+
+ mutex_lock(&power_domains->lock);
+ ret = __intel_display_power_is_enabled(dev_priv, domain);
+ mutex_unlock(&power_domains->lock);
+
+ return ret;
+}
+
+/**
+ * intel_display_set_init_power - set the initial power domain state
+ * @dev_priv: i915 device instance
+ * @enable: whether to enable or disable the initial power domain state
+ *
+ * For simplicity our driver load/unload and system suspend/resume code assumes
+ * that all power domains are always enabled. This functions controls the state
+ * of this little hack. While the initial power domain state is enabled runtime
+ * pm is effectively disabled.
+ */
+void intel_display_set_init_power(struct drm_i915_private *dev_priv,
+ bool enable)
+{
+ if (dev_priv->power_domains.init_power_on == enable)
+ return;
+
+ if (enable)
+ intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
+ else
+ intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
+
+ dev_priv->power_domains.init_power_on = enable;
+}
+
+/*
+ * Starting with Haswell, we have a "Power Down Well" that can be turned off
+ * when not needed anymore. We have 4 registers that can request the power well
+ * to be enabled, and it will only be disabled if none of the registers is
+ * requesting it to be enabled.
+ */
+static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+
+ /*
+ * After we re-enable the power well, if we touch VGA register 0x3d5
+ * we'll get unclaimed register interrupts. This stops after we write
+ * anything to the VGA MSR register. The vgacon module uses this
+ * register all the time, so if we unbind our driver and, as a
+ * consequence, bind vgacon, we'll get stuck in an infinite loop at
+ * console_unlock(). So make here we touch the VGA MSR register, making
+ * sure vgacon can keep working normally without triggering interrupts
+ * and error messages.
+ */
+ vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
+ outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
+ vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
+
+ if (IS_BROADWELL(dev) || (INTEL_INFO(dev)->gen >= 9))
+ gen8_irq_power_well_post_enable(dev_priv);
+}
+
+static void hsw_set_power_well(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well, bool enable)
+{
+ bool is_enabled, enable_requested;
+ uint32_t tmp;
+
+ tmp = I915_READ(HSW_PWR_WELL_DRIVER);
+ is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
+ enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
+
+ if (enable) {
+ if (!enable_requested)
+ I915_WRITE(HSW_PWR_WELL_DRIVER,
+ HSW_PWR_WELL_ENABLE_REQUEST);
+
+ if (!is_enabled) {
+ DRM_DEBUG_KMS("Enabling power well\n");
+ if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
+ HSW_PWR_WELL_STATE_ENABLED), 20))
+ DRM_ERROR("Timeout enabling power well\n");
+ hsw_power_well_post_enable(dev_priv);
+ }
+
+ } else {
+ if (enable_requested) {
+ I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
+ POSTING_READ(HSW_PWR_WELL_DRIVER);
+ DRM_DEBUG_KMS("Requesting to disable the power well\n");
+ }
+ }
+}
+
+static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ hsw_set_power_well(dev_priv, power_well, power_well->count > 0);
+
+ /*
+ * We're taking over the BIOS, so clear any requests made by it since
+ * the driver is in charge now.
+ */
+ if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
+ I915_WRITE(HSW_PWR_WELL_BIOS, 0);
+}
+
+static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ hsw_set_power_well(dev_priv, power_well, true);
+}
+
+static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ hsw_set_power_well(dev_priv, power_well, false);
+}
+
+static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+}
+
+static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ return true;
+}
+
+static void vlv_set_power_well(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well, bool enable)
+{
+ enum punit_power_well power_well_id = power_well->data;
+ u32 mask;
+ u32 state;
+ u32 ctrl;
+
+ mask = PUNIT_PWRGT_MASK(power_well_id);
+ state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
+ PUNIT_PWRGT_PWR_GATE(power_well_id);
+
+ mutex_lock(&dev_priv->rps.hw_lock);
+
+#define COND \
+ ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
+
+ if (COND)
+ goto out;
+
+ ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
+ ctrl &= ~mask;
+ ctrl |= state;
+ vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
+
+ if (wait_for(COND, 100))
+ DRM_ERROR("timout setting power well state %08x (%08x)\n",
+ state,
+ vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
+
+#undef COND
+
+out:
+ mutex_unlock(&dev_priv->rps.hw_lock);
+}
+
+static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ vlv_set_power_well(dev_priv, power_well, power_well->count > 0);
+}
+
+static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ vlv_set_power_well(dev_priv, power_well, true);
+}
+
+static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ vlv_set_power_well(dev_priv, power_well, false);
+}
+
+static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ int power_well_id = power_well->data;
+ bool enabled = false;
+ u32 mask;
+ u32 state;
+ u32 ctrl;
+
+ mask = PUNIT_PWRGT_MASK(power_well_id);
+ ctrl = PUNIT_PWRGT_PWR_ON(power_well_id);
+
+ mutex_lock(&dev_priv->rps.hw_lock);
+
+ state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
+ /*
+ * We only ever set the power-on and power-gate states, anything
+ * else is unexpected.
+ */
+ WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) &&
+ state != PUNIT_PWRGT_PWR_GATE(power_well_id));
+ if (state == ctrl)
+ enabled = true;
+
+ /*
+ * A transient state at this point would mean some unexpected party
+ * is poking at the power controls too.
+ */
+ ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
+ WARN_ON(ctrl != state);
+
+ mutex_unlock(&dev_priv->rps.hw_lock);
+
+ return enabled;
+}
+
+static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
+
+ vlv_set_power_well(dev_priv, power_well, true);
+
+ spin_lock_irq(&dev_priv->irq_lock);
+ valleyview_enable_display_irqs(dev_priv);
+ spin_unlock_irq(&dev_priv->irq_lock);
+
+ /*
+ * During driver initialization/resume we can avoid restoring the
+ * part of the HW/SW state that will be inited anyway explicitly.
+ */
+ if (dev_priv->power_domains.initializing)
+ return;
+
+ intel_hpd_init(dev_priv);
+
+ i915_redisable_vga_power_on(dev_priv->dev);
+}
+
+static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
+
+ spin_lock_irq(&dev_priv->irq_lock);
+ valleyview_disable_display_irqs(dev_priv);
+ spin_unlock_irq(&dev_priv->irq_lock);
+
+ vlv_set_power_well(dev_priv, power_well, false);
+
+ vlv_power_sequencer_reset(dev_priv);
+}
+
+static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
+
+ /*
+ * Enable the CRI clock source so we can get at the
+ * display and the reference clock for VGA
+ * hotplug / manual detection.
+ */
+ I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
+ DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
+ udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
+
+ vlv_set_power_well(dev_priv, power_well, true);
+
+ /*
+ * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
+ * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
+ * a. GUnit 0x2110 bit[0] set to 1 (def 0)
+ * b. The other bits such as sfr settings / modesel may all
+ * be set to 0.
+ *
+ * This should only be done on init and resume from S3 with
+ * both PLLs disabled, or we risk losing DPIO and PLL
+ * synchronization.
+ */
+ I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
+}
+
+static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ enum pipe pipe;
+
+ WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
+
+ for_each_pipe(dev_priv, pipe)
+ assert_pll_disabled(dev_priv, pipe);
+
+ /* Assert common reset */
+ I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
+
+ vlv_set_power_well(dev_priv, power_well, false);
+}
+
+static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ enum dpio_phy phy;
+
+ WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
+ power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
+
+ /*
+ * Enable the CRI clock source so we can get at the
+ * display and the reference clock for VGA
+ * hotplug / manual detection.
+ */
+ if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
+ phy = DPIO_PHY0;
+ I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
+ DPLL_REFA_CLK_ENABLE_VLV);
+ I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
+ DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
+ } else {
+ phy = DPIO_PHY1;
+ I915_WRITE(DPLL(PIPE_C), I915_READ(DPLL(PIPE_C)) |
+ DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
+ }
+ udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
+ vlv_set_power_well(dev_priv, power_well, true);
+
+ /* Poll for phypwrgood signal */
+ if (wait_for(I915_READ(DISPLAY_PHY_STATUS) & PHY_POWERGOOD(phy), 1))
+ DRM_ERROR("Display PHY %d is not power up\n", phy);
+
+ I915_WRITE(DISPLAY_PHY_CONTROL, I915_READ(DISPLAY_PHY_CONTROL) |
+ PHY_COM_LANE_RESET_DEASSERT(phy));
+}
+
+static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ enum dpio_phy phy;
+
+ WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
+ power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
+
+ if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
+ phy = DPIO_PHY0;
+ assert_pll_disabled(dev_priv, PIPE_A);
+ assert_pll_disabled(dev_priv, PIPE_B);
+ } else {
+ phy = DPIO_PHY1;
+ assert_pll_disabled(dev_priv, PIPE_C);
+ }
+
+ I915_WRITE(DISPLAY_PHY_CONTROL, I915_READ(DISPLAY_PHY_CONTROL) &
+ ~PHY_COM_LANE_RESET_DEASSERT(phy));
+
+ vlv_set_power_well(dev_priv, power_well, false);
+}
+
+static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ enum pipe pipe = power_well->data;
+ bool enabled;
+ u32 state, ctrl;
+
+ mutex_lock(&dev_priv->rps.hw_lock);
+
+ state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe);
+ /*
+ * We only ever set the power-on and power-gate states, anything
+ * else is unexpected.
+ */
+ WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
+ enabled = state == DP_SSS_PWR_ON(pipe);
+
+ /*
+ * A transient state at this point would mean some unexpected party
+ * is poking at the power controls too.
+ */
+ ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe);
+ WARN_ON(ctrl << 16 != state);
+
+ mutex_unlock(&dev_priv->rps.hw_lock);
+
+ return enabled;
+}
+
+static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well,
+ bool enable)
+{
+ enum pipe pipe = power_well->data;
+ u32 state;
+ u32 ctrl;
+
+ state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
+
+ mutex_lock(&dev_priv->rps.hw_lock);
+
+#define COND \
+ ((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state)
+
+ if (COND)
+ goto out;
+
+ ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
+ ctrl &= ~DP_SSC_MASK(pipe);
+ ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
+ vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl);
+
+ if (wait_for(COND, 100))
+ DRM_ERROR("timout setting power well state %08x (%08x)\n",
+ state,
+ vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ));
+
+#undef COND
+
+out:
+ mutex_unlock(&dev_priv->rps.hw_lock);
+}
+
+static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ chv_set_pipe_power_well(dev_priv, power_well, power_well->count > 0);
+}
+
+static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ WARN_ON_ONCE(power_well->data != PIPE_A &&
+ power_well->data != PIPE_B &&
+ power_well->data != PIPE_C);
+
+ chv_set_pipe_power_well(dev_priv, power_well, true);
+
+ if (power_well->data == PIPE_A) {
+ spin_lock_irq(&dev_priv->irq_lock);
+ valleyview_enable_display_irqs(dev_priv);
+ spin_unlock_irq(&dev_priv->irq_lock);
+
+ /*
+ * During driver initialization/resume we can avoid restoring the
+ * part of the HW/SW state that will be inited anyway explicitly.
+ */
+ if (dev_priv->power_domains.initializing)
+ return;
+
+ intel_hpd_init(dev_priv);
+
+ i915_redisable_vga_power_on(dev_priv->dev);
+ }
+}
+
+static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ WARN_ON_ONCE(power_well->data != PIPE_A &&
+ power_well->data != PIPE_B &&
+ power_well->data != PIPE_C);
+
+ if (power_well->data == PIPE_A) {
+ spin_lock_irq(&dev_priv->irq_lock);
+ valleyview_disable_display_irqs(dev_priv);
+ spin_unlock_irq(&dev_priv->irq_lock);
+ }
+
+ chv_set_pipe_power_well(dev_priv, power_well, false);
+
+ if (power_well->data == PIPE_A)
+ vlv_power_sequencer_reset(dev_priv);
+}
+
+static void check_power_well_state(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ bool enabled = power_well->ops->is_enabled(dev_priv, power_well);
+
+ if (power_well->always_on || !i915.disable_power_well) {
+ if (!enabled)
+ goto mismatch;
+
+ return;
+ }
+
+ if (enabled != (power_well->count > 0))
+ goto mismatch;
+
+ return;
+
+mismatch:
+ WARN(1, "state mismatch for '%s' (always_on %d hw state %d use-count %d disable_power_well %d\n",
+ power_well->name, power_well->always_on, enabled,
+ power_well->count, i915.disable_power_well);
+}
+
+/**
+ * intel_display_power_get - grab a power domain reference
+ * @dev_priv: i915 device instance
+ * @domain: power domain to reference
+ *
+ * This function grabs a power domain reference for @domain and ensures that the
+ * power domain and all its parents are powered up. Therefore users should only
+ * grab a reference to the innermost power domain they need.
+ *
+ * Any power domain reference obtained by this function must have a symmetric
+ * call to intel_display_power_put() to release the reference again.
+ */
+void intel_display_power_get(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain)
+{
+ struct i915_power_domains *power_domains;
+ struct i915_power_well *power_well;
+ int i;
+
+ intel_runtime_pm_get(dev_priv);
+
+ power_domains = &dev_priv->power_domains;
+
+ mutex_lock(&power_domains->lock);
+
+ for_each_power_well(i, power_well, BIT(domain), power_domains) {
+ if (!power_well->count++) {
+ DRM_DEBUG_KMS("enabling %s\n", power_well->name);
+ power_well->ops->enable(dev_priv, power_well);
+ power_well->hw_enabled = true;
+ }
+
+ check_power_well_state(dev_priv, power_well);
+ }
+
+ power_domains->domain_use_count[domain]++;
+
+ mutex_unlock(&power_domains->lock);
+}
+
+/**
+ * intel_display_power_put - release a power domain reference
+ * @dev_priv: i915 device instance
+ * @domain: power domain to reference
+ *
+ * This function drops the power domain reference obtained by
+ * intel_display_power_get() and might power down the corresponding hardware
+ * block right away if this is the last reference.
+ */
+void intel_display_power_put(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain)
+{
+ struct i915_power_domains *power_domains;
+ struct i915_power_well *power_well;
+ int i;
+
+ power_domains = &dev_priv->power_domains;
+
+ mutex_lock(&power_domains->lock);
+
+ WARN_ON(!power_domains->domain_use_count[domain]);
+ power_domains->domain_use_count[domain]--;
+
+ for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
+ WARN_ON(!power_well->count);
+
+ if (!--power_well->count && i915.disable_power_well) {
+ DRM_DEBUG_KMS("disabling %s\n", power_well->name);
+ power_well->hw_enabled = false;
+ power_well->ops->disable(dev_priv, power_well);
+ }
+
+ check_power_well_state(dev_priv, power_well);
+ }
+
+ mutex_unlock(&power_domains->lock);
+
+ intel_runtime_pm_put(dev_priv);
+}
+
+#define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
+
+#define HSW_ALWAYS_ON_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PIPE_A) | \
+ BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
+ BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
+ BIT(POWER_DOMAIN_PORT_CRT) | \
+ BIT(POWER_DOMAIN_PLLS) | \
+ BIT(POWER_DOMAIN_INIT))
+#define HSW_DISPLAY_POWER_DOMAINS ( \
+ (POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) | \
+ BIT(POWER_DOMAIN_INIT))
+
+#define BDW_ALWAYS_ON_POWER_DOMAINS ( \
+ HSW_ALWAYS_ON_POWER_DOMAINS | \
+ BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER))
+#define BDW_DISPLAY_POWER_DOMAINS ( \
+ (POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS) | \
+ BIT(POWER_DOMAIN_INIT))
+
+#define VLV_ALWAYS_ON_POWER_DOMAINS BIT(POWER_DOMAIN_INIT)
+#define VLV_DISPLAY_POWER_DOMAINS POWER_DOMAIN_MASK
+
+#define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
+ BIT(POWER_DOMAIN_PORT_CRT) | \
+ BIT(POWER_DOMAIN_INIT))
+
+#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
+ BIT(POWER_DOMAIN_INIT))
+
+#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
+ BIT(POWER_DOMAIN_INIT))
+
+#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
+ BIT(POWER_DOMAIN_INIT))
+
+#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
+ BIT(POWER_DOMAIN_INIT))
+
+#define CHV_PIPE_A_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PIPE_A) | \
+ BIT(POWER_DOMAIN_INIT))
+
+#define CHV_PIPE_B_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PIPE_B) | \
+ BIT(POWER_DOMAIN_INIT))
+
+#define CHV_PIPE_C_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PIPE_C) | \
+ BIT(POWER_DOMAIN_INIT))
+
+#define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
+ BIT(POWER_DOMAIN_INIT))
+
+#define CHV_DPIO_CMN_D_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
+ BIT(POWER_DOMAIN_INIT))
+
+#define CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
+ BIT(POWER_DOMAIN_INIT))
+
+#define CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
+ BIT(POWER_DOMAIN_INIT))
+
+static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
+ .sync_hw = i9xx_always_on_power_well_noop,
+ .enable = i9xx_always_on_power_well_noop,
+ .disable = i9xx_always_on_power_well_noop,
+ .is_enabled = i9xx_always_on_power_well_enabled,
+};
+
+static const struct i915_power_well_ops chv_pipe_power_well_ops = {
+ .sync_hw = chv_pipe_power_well_sync_hw,
+ .enable = chv_pipe_power_well_enable,
+ .disable = chv_pipe_power_well_disable,
+ .is_enabled = chv_pipe_power_well_enabled,
+};
+
+static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
+ .sync_hw = vlv_power_well_sync_hw,
+ .enable = chv_dpio_cmn_power_well_enable,
+ .disable = chv_dpio_cmn_power_well_disable,
+ .is_enabled = vlv_power_well_enabled,
+};
+
+static struct i915_power_well i9xx_always_on_power_well[] = {
+ {
+ .name = "always-on",
+ .always_on = 1,
+ .domains = POWER_DOMAIN_MASK,
+ .ops = &i9xx_always_on_power_well_ops,
+ },
+};
+
+static const struct i915_power_well_ops hsw_power_well_ops = {
+ .sync_hw = hsw_power_well_sync_hw,
+ .enable = hsw_power_well_enable,
+ .disable = hsw_power_well_disable,
+ .is_enabled = hsw_power_well_enabled,
+};
+
+static struct i915_power_well hsw_power_wells[] = {
+ {
+ .name = "always-on",
+ .always_on = 1,
+ .domains = HSW_ALWAYS_ON_POWER_DOMAINS,
+ .ops = &i9xx_always_on_power_well_ops,
+ },
+ {
+ .name = "display",
+ .domains = HSW_DISPLAY_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ },
+};
+
+static struct i915_power_well bdw_power_wells[] = {
+ {
+ .name = "always-on",
+ .always_on = 1,
+ .domains = BDW_ALWAYS_ON_POWER_DOMAINS,
+ .ops = &i9xx_always_on_power_well_ops,
+ },
+ {
+ .name = "display",
+ .domains = BDW_DISPLAY_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ },
+};
+
+static const struct i915_power_well_ops vlv_display_power_well_ops = {
+ .sync_hw = vlv_power_well_sync_hw,
+ .enable = vlv_display_power_well_enable,
+ .disable = vlv_display_power_well_disable,
+ .is_enabled = vlv_power_well_enabled,
+};
+
+static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
+ .sync_hw = vlv_power_well_sync_hw,
+ .enable = vlv_dpio_cmn_power_well_enable,
+ .disable = vlv_dpio_cmn_power_well_disable,
+ .is_enabled = vlv_power_well_enabled,
+};
+
+static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
+ .sync_hw = vlv_power_well_sync_hw,
+ .enable = vlv_power_well_enable,
+ .disable = vlv_power_well_disable,
+ .is_enabled = vlv_power_well_enabled,
+};
+
+static struct i915_power_well vlv_power_wells[] = {
+ {
+ .name = "always-on",
+ .always_on = 1,
+ .domains = VLV_ALWAYS_ON_POWER_DOMAINS,
+ .ops = &i9xx_always_on_power_well_ops,
+ },
+ {
+ .name = "display",
+ .domains = VLV_DISPLAY_POWER_DOMAINS,
+ .data = PUNIT_POWER_WELL_DISP2D,
+ .ops = &vlv_display_power_well_ops,
+ },
+ {
+ .name = "dpio-tx-b-01",
+ .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
+ VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
+ VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
+ VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
+ .ops = &vlv_dpio_power_well_ops,
+ .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
+ },
+ {
+ .name = "dpio-tx-b-23",
+ .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
+ VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
+ VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
+ VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
+ .ops = &vlv_dpio_power_well_ops,
+ .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
+ },
+ {
+ .name = "dpio-tx-c-01",
+ .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
+ VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
+ VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
+ VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
+ .ops = &vlv_dpio_power_well_ops,
+ .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
+ },
+ {
+ .name = "dpio-tx-c-23",
+ .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
+ VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
+ VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
+ VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
+ .ops = &vlv_dpio_power_well_ops,
+ .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
+ },
+ {
+ .name = "dpio-common",
+ .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
+ .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
+ .ops = &vlv_dpio_cmn_power_well_ops,
+ },
+};
+
+static struct i915_power_well chv_power_wells[] = {
+ {
+ .name = "always-on",
+ .always_on = 1,
+ .domains = VLV_ALWAYS_ON_POWER_DOMAINS,
+ .ops = &i9xx_always_on_power_well_ops,
+ },
+#if 0
+ {
+ .name = "display",
+ .domains = VLV_DISPLAY_POWER_DOMAINS,
+ .data = PUNIT_POWER_WELL_DISP2D,
+ .ops = &vlv_display_power_well_ops,
+ },
+#endif
+ {
+ .name = "pipe-a",
+ /*
+ * FIXME: pipe A power well seems to be the new disp2d well.
+ * At least all registers seem to be housed there. Figure
+ * out if this a a temporary situation in pre-production
+ * hardware or a permanent state of affairs.
+ */
+ .domains = CHV_PIPE_A_POWER_DOMAINS | VLV_DISPLAY_POWER_DOMAINS,
+ .data = PIPE_A,
+ .ops = &chv_pipe_power_well_ops,
+ },
+#if 0
+ {
+ .name = "pipe-b",
+ .domains = CHV_PIPE_B_POWER_DOMAINS,
+ .data = PIPE_B,
+ .ops = &chv_pipe_power_well_ops,
+ },
+ {
+ .name = "pipe-c",
+ .domains = CHV_PIPE_C_POWER_DOMAINS,
+ .data = PIPE_C,
+ .ops = &chv_pipe_power_well_ops,
+ },
+#endif
+ {
+ .name = "dpio-common-bc",
+ /*
+ * XXX: cmnreset for one PHY seems to disturb the other.
+ * As a workaround keep both powered on at the same
+ * time for now.
+ */
+ .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS | CHV_DPIO_CMN_D_POWER_DOMAINS,
+ .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
+ .ops = &chv_dpio_cmn_power_well_ops,
+ },
+ {
+ .name = "dpio-common-d",
+ /*
+ * XXX: cmnreset for one PHY seems to disturb the other.
+ * As a workaround keep both powered on at the same
+ * time for now.
+ */
+ .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS | CHV_DPIO_CMN_D_POWER_DOMAINS,
+ .data = PUNIT_POWER_WELL_DPIO_CMN_D,
+ .ops = &chv_dpio_cmn_power_well_ops,
+ },
+#if 0
+ {
+ .name = "dpio-tx-b-01",
+ .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
+ VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS,
+ .ops = &vlv_dpio_power_well_ops,
+ .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
+ },
+ {
+ .name = "dpio-tx-b-23",
+ .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
+ VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS,
+ .ops = &vlv_dpio_power_well_ops,
+ .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
+ },
+ {
+ .name = "dpio-tx-c-01",
+ .domains = VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
+ VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
+ .ops = &vlv_dpio_power_well_ops,
+ .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
+ },
+ {
+ .name = "dpio-tx-c-23",
+ .domains = VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
+ VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
+ .ops = &vlv_dpio_power_well_ops,
+ .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
+ },
+ {
+ .name = "dpio-tx-d-01",
+ .domains = CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS |
+ CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS,
+ .ops = &vlv_dpio_power_well_ops,
+ .data = PUNIT_POWER_WELL_DPIO_TX_D_LANES_01,
+ },
+ {
+ .name = "dpio-tx-d-23",
+ .domains = CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS |
+ CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS,
+ .ops = &vlv_dpio_power_well_ops,
+ .data = PUNIT_POWER_WELL_DPIO_TX_D_LANES_23,
+ },
+#endif
+};
+
+static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
+ enum punit_power_well power_well_id)
+{
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_well *power_well;
+ int i;
+
+ for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
+ if (power_well->data == power_well_id)
+ return power_well;
+ }
+
+ return NULL;
+}
+
+#define set_power_wells(power_domains, __power_wells) ({ \
+ (power_domains)->power_wells = (__power_wells); \
+ (power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \
+})
+
+/**
+ * intel_power_domains_init - initializes the power domain structures
+ * @dev_priv: i915 device instance
+ *
+ * Initializes the power domain structures for @dev_priv depending upon the
+ * supported platform.
+ */
+int intel_power_domains_init(struct drm_i915_private *dev_priv)
+{
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+
+ mutex_init(&power_domains->lock);
+
+ /*
+ * The enabling order will be from lower to higher indexed wells,
+ * the disabling order is reversed.
+ */
+ if (IS_HASWELL(dev_priv->dev)) {
+ set_power_wells(power_domains, hsw_power_wells);
+ hsw_pwr = power_domains;
+ } else if (IS_BROADWELL(dev_priv->dev)) {
+ set_power_wells(power_domains, bdw_power_wells);
+ hsw_pwr = power_domains;
+ } else if (IS_CHERRYVIEW(dev_priv->dev)) {
+ set_power_wells(power_domains, chv_power_wells);
+ } else if (IS_VALLEYVIEW(dev_priv->dev)) {
+ set_power_wells(power_domains, vlv_power_wells);
+ } else {
+ set_power_wells(power_domains, i9xx_always_on_power_well);
+ }
+
+ return 0;
+}
+
+static void intel_runtime_pm_disable(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct device *device = &dev->pdev->dev;
+
+ if (!HAS_RUNTIME_PM(dev))
+ return;
+
+ if (!intel_enable_rc6(dev))
+ return;
+
+ /* Make sure we're not suspended first. */
+ pm_runtime_get_sync(device);
+ pm_runtime_disable(device);
+}
+
+/**
+ * intel_power_domains_fini - finalizes the power domain structures
+ * @dev_priv: i915 device instance
+ *
+ * Finalizes the power domain structures for @dev_priv depending upon the
+ * supported platform. This function also disables runtime pm and ensures that
+ * the device stays powered up so that the driver can be reloaded.
+ */
+void intel_power_domains_fini(struct drm_i915_private *dev_priv)
+{
+ intel_runtime_pm_disable(dev_priv);
+
+ /* The i915.ko module is still not prepared to be loaded when
+ * the power well is not enabled, so just enable it in case
+ * we're going to unload/reload. */
+ intel_display_set_init_power(dev_priv, true);
+
+ hsw_pwr = NULL;
+}
+
+static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
+{
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_well *power_well;
+ int i;
+
+ mutex_lock(&power_domains->lock);
+ for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
+ power_well->ops->sync_hw(dev_priv, power_well);
+ power_well->hw_enabled = power_well->ops->is_enabled(dev_priv,
+ power_well);
+ }
+ mutex_unlock(&power_domains->lock);
+}
+
+static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
+{
+ struct i915_power_well *cmn =
+ lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
+ struct i915_power_well *disp2d =
+ lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D);
+
+ /* If the display might be already active skip this */
+ if (cmn->ops->is_enabled(dev_priv, cmn) &&
+ disp2d->ops->is_enabled(dev_priv, disp2d) &&
+ I915_READ(DPIO_CTL) & DPIO_CMNRST)
+ return;
+
+ DRM_DEBUG_KMS("toggling display PHY side reset\n");
+
+ /* cmnlane needs DPLL registers */
+ disp2d->ops->enable(dev_priv, disp2d);
+
+ /*
+ * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
+ * Need to assert and de-assert PHY SB reset by gating the
+ * common lane power, then un-gating it.
+ * Simply ungating isn't enough to reset the PHY enough to get
+ * ports and lanes running.
+ */
+ cmn->ops->disable(dev_priv, cmn);
+}
+
+/**
+ * intel_power_domains_init_hw - initialize hardware power domain state
+ * @dev_priv: i915 device instance
+ *
+ * This function initializes the hardware power domain state and enables all
+ * power domains using intel_display_set_init_power().
+ */
+void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+
+ power_domains->initializing = true;
+
+ if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
+ mutex_lock(&power_domains->lock);
+ vlv_cmnlane_wa(dev_priv);
+ mutex_unlock(&power_domains->lock);
+ }
+
+ /* For now, we need the power well to be always enabled. */
+ intel_display_set_init_power(dev_priv, true);
+ intel_power_domains_resume(dev_priv);
+ power_domains->initializing = false;
+}
+
+/**
+ * intel_aux_display_runtime_get - grab an auxilliary power domain reference
+ * @dev_priv: i915 device instance
+ *
+ * This function grabs a power domain reference for the auxiliary power domain
+ * (for access to the GMBUS and DP AUX blocks) and ensures that it and all its
+ * parents are powered up. Therefore users should only grab a reference to the
+ * innermost power domain they need.
+ *
+ * Any power domain reference obtained by this function must have a symmetric
+ * call to intel_aux_display_runtime_put() to release the reference again.
+ */
+void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv)
+{
+ intel_runtime_pm_get(dev_priv);
+}
+
+/**
+ * intel_aux_display_runtime_put - release an auxilliary power domain reference
+ * @dev_priv: i915 device instance
+ *
+ * This function drops the auxilliary power domain reference obtained by
+ * intel_aux_display_runtime_get() and might power down the corresponding
+ * hardware block right away if this is the last reference.
+ */
+void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv)
+{
+ intel_runtime_pm_put(dev_priv);
+}
+
+/**
+ * intel_runtime_pm_get - grab a runtime pm reference
+ * @dev_priv: i915 device instance
+ *
+ * This function grabs a device-level runtime pm reference (mostly used for GEM
+ * code to ensure the GTT or GT is on) and ensures that it is powered up.
+ *
+ * Any runtime pm reference obtained by this function must have a symmetric
+ * call to intel_runtime_pm_put() to release the reference again.
+ */
+void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct device *device = &dev->pdev->dev;
+
+ if (!HAS_RUNTIME_PM(dev))
+ return;
+
+ pm_runtime_get_sync(device);
+ WARN(dev_priv->pm.suspended, "Device still suspended.\n");
+}
+
+/**
+ * intel_runtime_pm_get_noresume - grab a runtime pm reference
+ * @dev_priv: i915 device instance
+ *
+ * This function grabs a device-level runtime pm reference (mostly used for GEM
+ * code to ensure the GTT or GT is on).
+ *
+ * It will _not_ power up the device but instead only check that it's powered
+ * on. Therefore it is only valid to call this functions from contexts where
+ * the device is known to be powered up and where trying to power it up would
+ * result in hilarity and deadlocks. That pretty much means only the system
+ * suspend/resume code where this is used to grab runtime pm references for
+ * delayed setup down in work items.
+ *
+ * Any runtime pm reference obtained by this function must have a symmetric
+ * call to intel_runtime_pm_put() to release the reference again.
+ */
+void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct device *device = &dev->pdev->dev;
+
+ if (!HAS_RUNTIME_PM(dev))
+ return;
+
+ WARN(dev_priv->pm.suspended, "Getting nosync-ref while suspended.\n");
+ pm_runtime_get_noresume(device);
+}
+
+/**
+ * intel_runtime_pm_put - release a runtime pm reference
+ * @dev_priv: i915 device instance
+ *
+ * This function drops the device-level runtime pm reference obtained by
+ * intel_runtime_pm_get() and might power down the corresponding
+ * hardware block right away if this is the last reference.
+ */
+void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct device *device = &dev->pdev->dev;
+
+ if (!HAS_RUNTIME_PM(dev))
+ return;
+
+ pm_runtime_mark_last_busy(device);
+ pm_runtime_put_autosuspend(device);
+}
+
+/**
+ * intel_runtime_pm_enable - enable runtime pm
+ * @dev_priv: i915 device instance
+ *
+ * This function enables runtime pm at the end of the driver load sequence.
+ *
+ * Note that this function does currently not enable runtime pm for the
+ * subordinate display power domains. That is only done on the first modeset
+ * using intel_display_set_init_power().
+ */
+void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct device *device = &dev->pdev->dev;
+
+ if (!HAS_RUNTIME_PM(dev))
+ return;
+
+ pm_runtime_set_active(device);
+
+ /*
+ * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
+ * requirement.
+ */
+ if (!intel_enable_rc6(dev)) {
+ DRM_INFO("RC6 disabled, disabling runtime PM support\n");
+ return;
+ }
+
+ pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */
+ pm_runtime_mark_last_busy(device);
+ pm_runtime_use_autosuspend(device);
+
+ pm_runtime_put_autosuspend(device);
+}
+
+/* Display audio driver power well request */
+int i915_request_power_well(void)
+{
+ struct drm_i915_private *dev_priv;
+
+ if (!hsw_pwr)
+ return -ENODEV;
+
+ dev_priv = container_of(hsw_pwr, struct drm_i915_private,
+ power_domains);
+ intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(i915_request_power_well);
+
+/* Display audio driver power well release */
+int i915_release_power_well(void)
+{
+ struct drm_i915_private *dev_priv;
+
+ if (!hsw_pwr)
+ return -ENODEV;
+
+ dev_priv = container_of(hsw_pwr, struct drm_i915_private,
+ power_domains);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(i915_release_power_well);
+
+/*
+ * Private interface for the audio driver to get CDCLK in kHz.
+ *
+ * Caller must request power well using i915_request_power_well() prior to
+ * making the call.
+ */
+int i915_get_cdclk_freq(void)
+{
+ struct drm_i915_private *dev_priv;
+
+ if (!hsw_pwr)
+ return -ENODEV;
+
+ dev_priv = container_of(hsw_pwr, struct drm_i915_private,
+ power_domains);
+
+ return intel_ddi_get_cdclk_freq(dev_priv);
+}
+EXPORT_SYMBOL_GPL(i915_get_cdclk_freq);
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 9350edd6728..6d7a277458b 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1991,57 +1991,10 @@ static int intel_sdvo_get_modes(struct drm_connector *connector)
return !list_empty(&connector->probed_modes);
}
-static void
-intel_sdvo_destroy_enhance_property(struct drm_connector *connector)
-{
- struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
- struct drm_device *dev = connector->dev;
-
- if (intel_sdvo_connector->left)
- drm_property_destroy(dev, intel_sdvo_connector->left);
- if (intel_sdvo_connector->right)
- drm_property_destroy(dev, intel_sdvo_connector->right);
- if (intel_sdvo_connector->top)
- drm_property_destroy(dev, intel_sdvo_connector->top);
- if (intel_sdvo_connector->bottom)
- drm_property_destroy(dev, intel_sdvo_connector->bottom);
- if (intel_sdvo_connector->hpos)
- drm_property_destroy(dev, intel_sdvo_connector->hpos);
- if (intel_sdvo_connector->vpos)
- drm_property_destroy(dev, intel_sdvo_connector->vpos);
- if (intel_sdvo_connector->saturation)
- drm_property_destroy(dev, intel_sdvo_connector->saturation);
- if (intel_sdvo_connector->contrast)
- drm_property_destroy(dev, intel_sdvo_connector->contrast);
- if (intel_sdvo_connector->hue)
- drm_property_destroy(dev, intel_sdvo_connector->hue);
- if (intel_sdvo_connector->sharpness)
- drm_property_destroy(dev, intel_sdvo_connector->sharpness);
- if (intel_sdvo_connector->flicker_filter)
- drm_property_destroy(dev, intel_sdvo_connector->flicker_filter);
- if (intel_sdvo_connector->flicker_filter_2d)
- drm_property_destroy(dev, intel_sdvo_connector->flicker_filter_2d);
- if (intel_sdvo_connector->flicker_filter_adaptive)
- drm_property_destroy(dev, intel_sdvo_connector->flicker_filter_adaptive);
- if (intel_sdvo_connector->tv_luma_filter)
- drm_property_destroy(dev, intel_sdvo_connector->tv_luma_filter);
- if (intel_sdvo_connector->tv_chroma_filter)
- drm_property_destroy(dev, intel_sdvo_connector->tv_chroma_filter);
- if (intel_sdvo_connector->dot_crawl)
- drm_property_destroy(dev, intel_sdvo_connector->dot_crawl);
- if (intel_sdvo_connector->brightness)
- drm_property_destroy(dev, intel_sdvo_connector->brightness);
-}
-
static void intel_sdvo_destroy(struct drm_connector *connector)
{
struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
- if (intel_sdvo_connector->tv_format)
- drm_property_destroy(connector->dev,
- intel_sdvo_connector->tv_format);
-
- intel_sdvo_destroy_enhance_property(connector);
drm_connector_cleanup(connector);
kfree(intel_sdvo_connector);
}
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 07a74ef589b..7d9c340f769 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -37,6 +37,20 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
+static bool
+format_is_yuv(uint32_t format)
+{
+ switch (format) {
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ case DRM_FORMAT_YVYU:
+ return true;
+ default:
+ return false;
+ }
+}
+
static int usecs_to_scanlines(const struct drm_display_mode *mode, int usecs)
{
/* paranoia */
@@ -46,7 +60,23 @@ static int usecs_to_scanlines(const struct drm_display_mode *mode, int usecs)
return DIV_ROUND_UP(usecs * mode->crtc_clock, 1000 * mode->crtc_htotal);
}
-static bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count)
+/**
+ * intel_pipe_update_start() - start update of a set of display registers
+ * @crtc: the crtc of which the registers are going to be updated
+ * @start_vbl_count: vblank counter return pointer used for error checking
+ *
+ * Mark the start of an update to pipe registers that should be updated
+ * atomically regarding vblank. If the next vblank will happens within
+ * the next 100 us, this function waits until the vblank passes.
+ *
+ * After a successful call to this function, interrupts will be disabled
+ * until a subsequent call to intel_pipe_update_end(). That is done to
+ * avoid random delays. The value written to @start_vbl_count should be
+ * supplied to intel_pipe_update_end() for error checking.
+ *
+ * Return: true if the call was successful
+ */
+bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count)
{
struct drm_device *dev = crtc->base.dev;
const struct drm_display_mode *mode = &crtc->config.adjusted_mode;
@@ -56,8 +86,6 @@ static bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl
wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base);
DEFINE_WAIT(wait);
- WARN_ON(!drm_modeset_is_locked(&crtc->base.mutex));
-
vblank_start = mode->crtc_vblank_start;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
vblank_start = DIV_ROUND_UP(vblank_start, 2);
@@ -112,7 +140,16 @@ static bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl
return true;
}
-static void intel_pipe_update_end(struct intel_crtc *crtc, u32 start_vbl_count)
+/**
+ * intel_pipe_update_end() - end update of a set of display registers
+ * @crtc: the crtc of which the registers were updated
+ * @start_vbl_count: start vblank counter (used for error checking)
+ *
+ * Mark the end of an update started with intel_pipe_update_start(). This
+ * re-enables interrupts and verifies the update was actually completed
+ * before a vblank using the value of @start_vbl_count.
+ */
+void intel_pipe_update_end(struct intel_crtc *crtc, u32 start_vbl_count)
{
struct drm_device *dev = crtc->base.dev;
enum pipe pipe = crtc->pipe;
@@ -139,6 +176,226 @@ static void intel_update_primary_plane(struct intel_crtc *crtc)
}
static void
+skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t x, uint32_t y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct drm_device *dev = drm_plane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_plane *intel_plane = to_intel_plane(drm_plane);
+ const int pipe = intel_plane->pipe;
+ const int plane = intel_plane->plane + 1;
+ u32 plane_ctl, stride;
+ int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
+
+ plane_ctl = I915_READ(PLANE_CTL(pipe, plane));
+
+ /* Mask out pixel format bits in case we change it */
+ plane_ctl &= ~PLANE_CTL_FORMAT_MASK;
+ plane_ctl &= ~PLANE_CTL_ORDER_RGBX;
+ plane_ctl &= ~PLANE_CTL_YUV422_ORDER_MASK;
+ plane_ctl &= ~PLANE_CTL_TILED_MASK;
+ plane_ctl &= ~PLANE_CTL_ALPHA_MASK;
+ plane_ctl &= ~PLANE_CTL_ROTATE_MASK;
+
+ /* Trickle feed has to be enabled */
+ plane_ctl &= ~PLANE_CTL_TRICKLE_FEED_DISABLE;
+
+ switch (fb->pixel_format) {
+ case DRM_FORMAT_RGB565:
+ plane_ctl |= PLANE_CTL_FORMAT_RGB_565;
+ break;
+ case DRM_FORMAT_XBGR8888:
+ plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
+ break;
+ /*
+ * XXX: For ARBG/ABGR formats we default to expecting scanout buffers
+ * to be already pre-multiplied. We need to add a knob (or a different
+ * DRM_FORMAT) for user-space to configure that.
+ */
+ case DRM_FORMAT_ABGR8888:
+ plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888 |
+ PLANE_CTL_ORDER_RGBX |
+ PLANE_CTL_ALPHA_SW_PREMULTIPLY;
+ break;
+ case DRM_FORMAT_ARGB8888:
+ plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888 |
+ PLANE_CTL_ALPHA_SW_PREMULTIPLY;
+ break;
+ case DRM_FORMAT_YUYV:
+ plane_ctl |= PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
+ break;
+ case DRM_FORMAT_YVYU:
+ plane_ctl |= PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
+ break;
+ case DRM_FORMAT_UYVY:
+ plane_ctl |= PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
+ break;
+ case DRM_FORMAT_VYUY:
+ plane_ctl |= PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
+ break;
+ default:
+ BUG();
+ }
+
+ switch (obj->tiling_mode) {
+ case I915_TILING_NONE:
+ stride = fb->pitches[0] >> 6;
+ break;
+ case I915_TILING_X:
+ plane_ctl |= PLANE_CTL_TILED_X;
+ stride = fb->pitches[0] >> 9;
+ break;
+ default:
+ BUG();
+ }
+ if (intel_plane->rotation == BIT(DRM_ROTATE_180))
+ plane_ctl |= PLANE_CTL_ROTATE_180;
+
+ plane_ctl |= PLANE_CTL_ENABLE;
+ plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE;
+
+ intel_update_sprite_watermarks(drm_plane, crtc, src_w, src_h,
+ pixel_size, true,
+ src_w != crtc_w || src_h != crtc_h);
+
+ /* Sizes are 0 based */
+ src_w--;
+ src_h--;
+ crtc_w--;
+ crtc_h--;
+
+ I915_WRITE(PLANE_OFFSET(pipe, plane), (y << 16) | x);
+ I915_WRITE(PLANE_STRIDE(pipe, plane), stride);
+ I915_WRITE(PLANE_POS(pipe, plane), (crtc_y << 16) | crtc_x);
+ I915_WRITE(PLANE_SIZE(pipe, plane), (crtc_h << 16) | crtc_w);
+ I915_WRITE(PLANE_CTL(pipe, plane), plane_ctl);
+ I915_WRITE(PLANE_SURF(pipe, plane), i915_gem_obj_ggtt_offset(obj));
+ POSTING_READ(PLANE_SURF(pipe, plane));
+}
+
+static void
+skl_disable_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc)
+{
+ struct drm_device *dev = drm_plane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_plane *intel_plane = to_intel_plane(drm_plane);
+ const int pipe = intel_plane->pipe;
+ const int plane = intel_plane->plane + 1;
+
+ I915_WRITE(PLANE_CTL(pipe, plane),
+ I915_READ(PLANE_CTL(pipe, plane)) & ~PLANE_CTL_ENABLE);
+
+ /* Activate double buffered register update */
+ I915_WRITE(PLANE_CTL(pipe, plane), 0);
+ POSTING_READ(PLANE_CTL(pipe, plane));
+
+ intel_update_sprite_watermarks(drm_plane, crtc, 0, 0, 0, false, false);
+}
+
+static int
+skl_update_colorkey(struct drm_plane *drm_plane,
+ struct drm_intel_sprite_colorkey *key)
+{
+ struct drm_device *dev = drm_plane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_plane *intel_plane = to_intel_plane(drm_plane);
+ const int pipe = intel_plane->pipe;
+ const int plane = intel_plane->plane;
+ u32 plane_ctl;
+
+ I915_WRITE(PLANE_KEYVAL(pipe, plane), key->min_value);
+ I915_WRITE(PLANE_KEYMAX(pipe, plane), key->max_value);
+ I915_WRITE(PLANE_KEYMSK(pipe, plane), key->channel_mask);
+
+ plane_ctl = I915_READ(PLANE_CTL(pipe, plane));
+ plane_ctl &= ~PLANE_CTL_KEY_ENABLE_MASK;
+ if (key->flags & I915_SET_COLORKEY_DESTINATION)
+ plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
+ else if (key->flags & I915_SET_COLORKEY_SOURCE)
+ plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
+ I915_WRITE(PLANE_CTL(pipe, plane), plane_ctl);
+
+ POSTING_READ(PLANE_CTL(pipe, plane));
+
+ return 0;
+}
+
+static void
+skl_get_colorkey(struct drm_plane *drm_plane,
+ struct drm_intel_sprite_colorkey *key)
+{
+ struct drm_device *dev = drm_plane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_plane *intel_plane = to_intel_plane(drm_plane);
+ const int pipe = intel_plane->pipe;
+ const int plane = intel_plane->plane;
+ u32 plane_ctl;
+
+ key->min_value = I915_READ(PLANE_KEYVAL(pipe, plane));
+ key->max_value = I915_READ(PLANE_KEYMAX(pipe, plane));
+ key->channel_mask = I915_READ(PLANE_KEYMSK(pipe, plane));
+
+ plane_ctl = I915_READ(PLANE_CTL(pipe, plane));
+
+ switch (plane_ctl & PLANE_CTL_KEY_ENABLE_MASK) {
+ case PLANE_CTL_KEY_ENABLE_DESTINATION:
+ key->flags = I915_SET_COLORKEY_DESTINATION;
+ break;
+ case PLANE_CTL_KEY_ENABLE_SOURCE:
+ key->flags = I915_SET_COLORKEY_SOURCE;
+ break;
+ default:
+ key->flags = I915_SET_COLORKEY_NONE;
+ }
+}
+
+static void
+chv_update_csc(struct intel_plane *intel_plane, uint32_t format)
+{
+ struct drm_i915_private *dev_priv = intel_plane->base.dev->dev_private;
+ int plane = intel_plane->plane;
+
+ /* Seems RGB data bypasses the CSC always */
+ if (!format_is_yuv(format))
+ return;
+
+ /*
+ * BT.601 limited range YCbCr -> full range RGB
+ *
+ * |r| | 6537 4769 0| |cr |
+ * |g| = |-3330 4769 -1605| x |y-64|
+ * |b| | 0 4769 8263| |cb |
+ *
+ * Cb and Cr apparently come in as signed already, so no
+ * need for any offset. For Y we need to remove the offset.
+ */
+ I915_WRITE(SPCSCYGOFF(plane), SPCSC_OOFF(0) | SPCSC_IOFF(-64));
+ I915_WRITE(SPCSCCBOFF(plane), SPCSC_OOFF(0) | SPCSC_IOFF(0));
+ I915_WRITE(SPCSCCROFF(plane), SPCSC_OOFF(0) | SPCSC_IOFF(0));
+
+ I915_WRITE(SPCSCC01(plane), SPCSC_C1(4769) | SPCSC_C0(6537));
+ I915_WRITE(SPCSCC23(plane), SPCSC_C1(-3330) | SPCSC_C0(0));
+ I915_WRITE(SPCSCC45(plane), SPCSC_C1(-1605) | SPCSC_C0(4769));
+ I915_WRITE(SPCSCC67(plane), SPCSC_C1(4769) | SPCSC_C0(0));
+ I915_WRITE(SPCSCC8(plane), SPCSC_C0(8263));
+
+ I915_WRITE(SPCSCYGICLAMP(plane), SPCSC_IMAX(940) | SPCSC_IMIN(64));
+ I915_WRITE(SPCSCCBICLAMP(plane), SPCSC_IMAX(448) | SPCSC_IMIN(-448));
+ I915_WRITE(SPCSCCRICLAMP(plane), SPCSC_IMAX(448) | SPCSC_IMIN(-448));
+
+ I915_WRITE(SPCSCYGOCLAMP(plane), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
+ I915_WRITE(SPCSCCBOCLAMP(plane), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
+ I915_WRITE(SPCSCCROCLAMP(plane), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
+}
+
+static void
vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
@@ -249,6 +506,9 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
intel_update_primary_plane(intel_crtc);
+ if (IS_CHERRYVIEW(dev) && pipe == PIPE_B)
+ chv_update_csc(intel_plane, fb->pixel_format);
+
I915_WRITE(SPSTRIDE(pipe, plane), fb->pitches[0]);
I915_WRITE(SPPOS(pipe, plane), (crtc_y << 16) | crtc_x);
@@ -257,6 +517,8 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
else
I915_WRITE(SPLINOFF(pipe, plane), linear_offset);
+ I915_WRITE(SPCONSTALPHA(pipe, plane), 0);
+
I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w);
I915_WRITE(SPCNTR(pipe, plane), sprctl);
I915_WRITE(SPSURF(pipe, plane), i915_gem_obj_ggtt_offset(obj) +
@@ -821,20 +1083,6 @@ ilk_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key)
key->flags = I915_SET_COLORKEY_NONE;
}
-static bool
-format_is_yuv(uint32_t format)
-{
- switch (format) {
- case DRM_FORMAT_YUYV:
- case DRM_FORMAT_UYVY:
- case DRM_FORMAT_VYUY:
- case DRM_FORMAT_YVYU:
- return true;
- default:
- return false;
- }
-}
-
static bool colorkey_enabled(struct intel_plane *intel_plane)
{
struct drm_intel_sprite_colorkey key;
@@ -845,57 +1093,23 @@ static bool colorkey_enabled(struct intel_plane *intel_plane)
}
static int
-intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
- struct drm_framebuffer *fb, int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h)
+intel_check_sprite_plane(struct drm_plane *plane,
+ struct intel_plane_state *state)
{
- struct drm_device *dev = plane->dev;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(state->crtc);
struct intel_plane *intel_plane = to_intel_plane(plane);
- enum pipe pipe = intel_crtc->pipe;
- struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- struct drm_i915_gem_object *obj = intel_fb->obj;
- struct drm_i915_gem_object *old_obj = intel_plane->obj;
- int ret;
- bool primary_enabled;
- bool visible;
+ struct drm_framebuffer *fb = state->fb;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ int crtc_x, crtc_y;
+ unsigned int crtc_w, crtc_h;
+ uint32_t src_x, src_y, src_w, src_h;
+ struct drm_rect *src = &state->src;
+ struct drm_rect *dst = &state->dst;
+ struct drm_rect *orig_src = &state->orig_src;
+ const struct drm_rect *clip = &state->clip;
int hscale, vscale;
int max_scale, min_scale;
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
- struct drm_rect src = {
- /* sample coordinates in 16.16 fixed point */
- .x1 = src_x,
- .x2 = src_x + src_w,
- .y1 = src_y,
- .y2 = src_y + src_h,
- };
- struct drm_rect dst = {
- /* integer pixels */
- .x1 = crtc_x,
- .x2 = crtc_x + crtc_w,
- .y1 = crtc_y,
- .y2 = crtc_y + crtc_h,
- };
- const struct drm_rect clip = {
- .x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0,
- .y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0,
- };
- const struct {
- int crtc_x, crtc_y;
- unsigned int crtc_w, crtc_h;
- uint32_t src_x, src_y, src_w, src_h;
- } orig = {
- .crtc_x = crtc_x,
- .crtc_y = crtc_y,
- .crtc_w = crtc_w,
- .crtc_h = crtc_h,
- .src_x = src_x,
- .src_y = src_y,
- .src_w = src_w,
- .src_h = src_h,
- };
/* Don't modify another pipe's plane */
if (intel_plane->pipe != intel_crtc->pipe) {
@@ -927,55 +1141,55 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
max_scale = intel_plane->max_downscale << 16;
min_scale = intel_plane->can_scale ? 1 : (1 << 16);
- drm_rect_rotate(&src, fb->width << 16, fb->height << 16,
+ drm_rect_rotate(src, fb->width << 16, fb->height << 16,
intel_plane->rotation);
- hscale = drm_rect_calc_hscale_relaxed(&src, &dst, min_scale, max_scale);
+ hscale = drm_rect_calc_hscale_relaxed(src, dst, min_scale, max_scale);
BUG_ON(hscale < 0);
- vscale = drm_rect_calc_vscale_relaxed(&src, &dst, min_scale, max_scale);
+ vscale = drm_rect_calc_vscale_relaxed(src, dst, min_scale, max_scale);
BUG_ON(vscale < 0);
- visible = drm_rect_clip_scaled(&src, &dst, &clip, hscale, vscale);
+ state->visible = drm_rect_clip_scaled(src, dst, clip, hscale, vscale);
- crtc_x = dst.x1;
- crtc_y = dst.y1;
- crtc_w = drm_rect_width(&dst);
- crtc_h = drm_rect_height(&dst);
+ crtc_x = dst->x1;
+ crtc_y = dst->y1;
+ crtc_w = drm_rect_width(dst);
+ crtc_h = drm_rect_height(dst);
- if (visible) {
+ if (state->visible) {
/* check again in case clipping clamped the results */
- hscale = drm_rect_calc_hscale(&src, &dst, min_scale, max_scale);
+ hscale = drm_rect_calc_hscale(src, dst, min_scale, max_scale);
if (hscale < 0) {
DRM_DEBUG_KMS("Horizontal scaling factor out of limits\n");
- drm_rect_debug_print(&src, true);
- drm_rect_debug_print(&dst, false);
+ drm_rect_debug_print(src, true);
+ drm_rect_debug_print(dst, false);
return hscale;
}
- vscale = drm_rect_calc_vscale(&src, &dst, min_scale, max_scale);
+ vscale = drm_rect_calc_vscale(src, dst, min_scale, max_scale);
if (vscale < 0) {
DRM_DEBUG_KMS("Vertical scaling factor out of limits\n");
- drm_rect_debug_print(&src, true);
- drm_rect_debug_print(&dst, false);
+ drm_rect_debug_print(src, true);
+ drm_rect_debug_print(dst, false);
return vscale;
}
/* Make the source viewport size an exact multiple of the scaling factors. */
- drm_rect_adjust_size(&src,
- drm_rect_width(&dst) * hscale - drm_rect_width(&src),
- drm_rect_height(&dst) * vscale - drm_rect_height(&src));
+ drm_rect_adjust_size(src,
+ drm_rect_width(dst) * hscale - drm_rect_width(src),
+ drm_rect_height(dst) * vscale - drm_rect_height(src));
- drm_rect_rotate_inv(&src, fb->width << 16, fb->height << 16,
+ drm_rect_rotate_inv(src, fb->width << 16, fb->height << 16,
intel_plane->rotation);
/* sanity check to make sure the src viewport wasn't enlarged */
- WARN_ON(src.x1 < (int) src_x ||
- src.y1 < (int) src_y ||
- src.x2 > (int) (src_x + src_w) ||
- src.y2 > (int) (src_y + src_h));
+ WARN_ON(src->x1 < (int) orig_src->x1 ||
+ src->y1 < (int) orig_src->y1 ||
+ src->x2 > (int) orig_src->x2 ||
+ src->y2 > (int) orig_src->y2);
/*
* Hardware doesn't handle subpixel coordinates.
@@ -983,10 +1197,10 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
* increase the source viewport size, because that could
* push the downscaling factor out of bounds.
*/
- src_x = src.x1 >> 16;
- src_w = drm_rect_width(&src) >> 16;
- src_y = src.y1 >> 16;
- src_h = drm_rect_height(&src) >> 16;
+ src_x = src->x1 >> 16;
+ src_w = drm_rect_width(src) >> 16;
+ src_y = src->y1 >> 16;
+ src_h = drm_rect_height(src) >> 16;
if (format_is_yuv(fb->pixel_format)) {
src_x &= ~1;
@@ -1000,12 +1214,12 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
crtc_w &= ~1;
if (crtc_w == 0)
- visible = false;
+ state->visible = false;
}
}
/* Check size restrictions when scaling */
- if (visible && (src_w != crtc_w || src_h != crtc_h)) {
+ if (state->visible && (src_w != crtc_w || src_h != crtc_h)) {
unsigned int width_bytes;
WARN_ON(!intel_plane->can_scale);
@@ -1013,12 +1227,13 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
/* FIXME interlacing min height is 6 */
if (crtc_w < 3 || crtc_h < 3)
- visible = false;
+ state->visible = false;
if (src_w < 3 || src_h < 3)
- visible = false;
+ state->visible = false;
- width_bytes = ((src_x * pixel_size) & 63) + src_w * pixel_size;
+ width_bytes = ((src_x * pixel_size) & 63) +
+ src_w * pixel_size;
if (src_w > 2048 || src_h > 2048 ||
width_bytes > 4096 || fb->pitches[0] > 4096) {
@@ -1027,42 +1242,90 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
}
}
- dst.x1 = crtc_x;
- dst.x2 = crtc_x + crtc_w;
- dst.y1 = crtc_y;
- dst.y2 = crtc_y + crtc_h;
+ if (state->visible) {
+ src->x1 = src_x;
+ src->x2 = src_x + src_w;
+ src->y1 = src_y;
+ src->y2 = src_y + src_h;
+ }
- /*
- * If the sprite is completely covering the primary plane,
- * we can disable the primary and save power.
- */
- primary_enabled = !drm_rect_equals(&dst, &clip) || colorkey_enabled(intel_plane);
- WARN_ON(!primary_enabled && !visible && intel_crtc->active);
+ dst->x1 = crtc_x;
+ dst->x2 = crtc_x + crtc_w;
+ dst->y1 = crtc_y;
+ dst->y2 = crtc_y + crtc_h;
- mutex_lock(&dev->struct_mutex);
+ return 0;
+}
- /* Note that this will apply the VT-d workaround for scanouts,
- * which is more restrictive than required for sprites. (The
- * primary plane requires 256KiB alignment with 64 PTE padding,
- * the sprite planes only require 128KiB alignment and 32 PTE padding.
- */
- ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
+static int
+intel_prepare_sprite_plane(struct drm_plane *plane,
+ struct intel_plane_state *state)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_crtc *crtc = state->crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ enum pipe pipe = intel_crtc->pipe;
+ struct drm_framebuffer *fb = state->fb;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ struct drm_i915_gem_object *old_obj = intel_plane->obj;
+ int ret;
- i915_gem_track_fb(old_obj, obj,
- INTEL_FRONTBUFFER_SPRITE(pipe));
- mutex_unlock(&dev->struct_mutex);
+ if (old_obj != obj) {
+ mutex_lock(&dev->struct_mutex);
- if (ret)
- return ret;
+ /* Note that this will apply the VT-d workaround for scanouts,
+ * which is more restrictive than required for sprites. (The
+ * primary plane requires 256KiB alignment with 64 PTE padding,
+ * the sprite planes only require 128KiB alignment and 32 PTE
+ * padding.
+ */
+ ret = intel_pin_and_fence_fb_obj(plane, fb, NULL);
+ if (ret == 0)
+ i915_gem_track_fb(old_obj, obj,
+ INTEL_FRONTBUFFER_SPRITE(pipe));
+ mutex_unlock(&dev->struct_mutex);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
- intel_plane->crtc_x = orig.crtc_x;
- intel_plane->crtc_y = orig.crtc_y;
- intel_plane->crtc_w = orig.crtc_w;
- intel_plane->crtc_h = orig.crtc_h;
- intel_plane->src_x = orig.src_x;
- intel_plane->src_y = orig.src_y;
- intel_plane->src_w = orig.src_w;
- intel_plane->src_h = orig.src_h;
+static void
+intel_commit_sprite_plane(struct drm_plane *plane,
+ struct intel_plane_state *state)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_crtc *crtc = state->crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ enum pipe pipe = intel_crtc->pipe;
+ struct drm_framebuffer *fb = state->fb;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ struct drm_i915_gem_object *old_obj = intel_plane->obj;
+ int crtc_x, crtc_y;
+ unsigned int crtc_w, crtc_h;
+ uint32_t src_x, src_y, src_w, src_h;
+ struct drm_rect *dst = &state->dst;
+ const struct drm_rect *clip = &state->clip;
+ bool primary_enabled;
+
+ /*
+ * If the sprite is completely covering the primary plane,
+ * we can disable the primary and save power.
+ */
+ primary_enabled = !drm_rect_equals(dst, clip) || colorkey_enabled(intel_plane);
+ WARN_ON(!primary_enabled && !state->visible && intel_crtc->active);
+
+ intel_plane->crtc_x = state->orig_dst.x1;
+ intel_plane->crtc_y = state->orig_dst.y1;
+ intel_plane->crtc_w = drm_rect_width(&state->orig_dst);
+ intel_plane->crtc_h = drm_rect_height(&state->orig_dst);
+ intel_plane->src_x = state->orig_src.x1;
+ intel_plane->src_y = state->orig_src.y1;
+ intel_plane->src_w = drm_rect_width(&state->orig_src);
+ intel_plane->src_h = drm_rect_height(&state->orig_src);
intel_plane->obj = obj;
if (intel_crtc->active) {
@@ -1076,12 +1339,22 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
if (primary_was_enabled && !primary_enabled)
intel_pre_disable_primary(crtc);
- if (visible)
+ if (state->visible) {
+ crtc_x = state->dst.x1;
+ crtc_y = state->dst.y1;
+ crtc_w = drm_rect_width(&state->dst);
+ crtc_h = drm_rect_height(&state->dst);
+ src_x = state->src.x1;
+ src_y = state->src.y1;
+ src_w = drm_rect_width(&state->src);
+ src_h = drm_rect_height(&state->src);
intel_plane->update_plane(plane, crtc, fb, obj,
crtc_x, crtc_y, crtc_w, crtc_h,
src_x, src_y, src_w, src_h);
- else
+ } else {
intel_plane->disable_plane(plane, crtc);
+ }
+
intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_SPRITE(pipe));
@@ -1090,21 +1363,65 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
}
/* Unpin old obj after new one is active to avoid ugliness */
- if (old_obj) {
+ if (old_obj && old_obj != obj) {
+
/*
* It's fairly common to simply update the position of
* an existing object. In that case, we don't need to
* wait for vblank to avoid ugliness, we only need to
* do the pin & ref bookkeeping.
*/
- if (old_obj != obj && intel_crtc->active)
+ if (intel_crtc->active)
intel_wait_for_vblank(dev, intel_crtc->pipe);
mutex_lock(&dev->struct_mutex);
intel_unpin_fb_obj(old_obj);
mutex_unlock(&dev->struct_mutex);
}
+}
+
+static int
+intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct intel_plane_state state;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int ret;
+ state.crtc = crtc;
+ state.fb = fb;
+
+ /* sample coordinates in 16.16 fixed point */
+ state.src.x1 = src_x;
+ state.src.x2 = src_x + src_w;
+ state.src.y1 = src_y;
+ state.src.y2 = src_y + src_h;
+
+ /* integer pixels */
+ state.dst.x1 = crtc_x;
+ state.dst.x2 = crtc_x + crtc_w;
+ state.dst.y1 = crtc_y;
+ state.dst.y2 = crtc_y + crtc_h;
+
+ state.clip.x1 = 0;
+ state.clip.y1 = 0;
+ state.clip.x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0;
+ state.clip.y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0;
+ state.orig_src = state.src;
+ state.orig_dst = state.dst;
+
+ ret = intel_check_sprite_plane(plane, &state);
+ if (ret)
+ return ret;
+
+ ret = intel_prepare_sprite_plane(plane, &state);
+ if (ret)
+ return ret;
+
+ intel_commit_sprite_plane(plane, &state);
return 0;
}
@@ -1305,6 +1622,18 @@ static uint32_t vlv_plane_formats[] = {
DRM_FORMAT_VYUY,
};
+static uint32_t skl_plane_formats[] = {
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_VYUY,
+};
+
int
intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
{
@@ -1368,7 +1697,21 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
num_plane_formats = ARRAY_SIZE(snb_plane_formats);
}
break;
-
+ case 9:
+ /*
+ * FIXME: Skylake planes can be scaled (with some restrictions),
+ * but this is for another time.
+ */
+ intel_plane->can_scale = false;
+ intel_plane->max_downscale = 1;
+ intel_plane->update_plane = skl_update_plane;
+ intel_plane->disable_plane = skl_disable_plane;
+ intel_plane->update_colorkey = skl_update_colorkey;
+ intel_plane->get_colorkey = skl_get_colorkey;
+
+ plane_formats = skl_plane_formats;
+ num_plane_formats = ARRAY_SIZE(skl_plane_formats);
+ break;
default:
kfree(intel_plane);
return -ENODEV;
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index c14341ca3ef..6f5f59b880f 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1182,18 +1182,17 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long irqflags;
u32 tv_ctl, save_tv_ctl;
u32 tv_dac, save_tv_dac;
int type;
/* Disable TV interrupts around load detect or we'll recurse */
if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ spin_lock_irq(&dev_priv->irq_lock);
i915_disable_pipestat(dev_priv, 0,
PIPE_HOTPLUG_INTERRUPT_STATUS |
PIPE_HOTPLUG_TV_INTERRUPT_STATUS);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_unlock_irq(&dev_priv->irq_lock);
}
save_tv_dac = tv_dac = I915_READ(TV_DAC);
@@ -1266,11 +1265,11 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
/* Restore interrupt config */
if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ spin_lock_irq(&dev_priv->irq_lock);
i915_enable_pipestat(dev_priv, 0,
PIPE_HOTPLUG_INTERRUPT_STATUS |
PIPE_HOTPLUG_TV_INTERRUPT_STATUS);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_unlock_irq(&dev_priv->irq_lock);
}
return type;
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 918b7616396..46de8d75b4b 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -43,23 +43,17 @@
static void
assert_device_not_suspended(struct drm_i915_private *dev_priv)
{
- WARN(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended,
- "Device suspended\n");
+ WARN_ONCE(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended,
+ "Device suspended\n");
}
static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
{
- u32 gt_thread_status_mask;
-
- if (IS_HASWELL(dev_priv->dev))
- gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK_HSW;
- else
- gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK;
-
/* w/a for a sporadic read returning 0 by waiting for the GT
* thread to wake up.
*/
- if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) & gt_thread_status_mask) == 0, 500))
+ if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) &
+ GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500))
DRM_ERROR("GT thread status wait timed out\n");
}
@@ -120,8 +114,7 @@ static void __gen7_gt_force_wake_mt_get(struct drm_i915_private *dev_priv,
DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
/* WaRsForcewakeWaitTC0:ivb,hsw */
- if (INTEL_INFO(dev_priv->dev)->gen < 8)
- __gen6_gt_wait_for_thread_c0(dev_priv);
+ __gen6_gt_wait_for_thread_c0(dev_priv);
}
static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
@@ -229,10 +222,6 @@ static void __vlv_force_wake_get(struct drm_i915_private *dev_priv,
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out: waiting for media to ack.\n");
}
-
- /* WaRsForcewakeWaitTC0:vlv */
- if (!IS_CHERRYVIEW(dev_priv->dev))
- __gen6_gt_wait_for_thread_c0(dev_priv);
}
static void __vlv_force_wake_put(struct drm_i915_private *dev_priv,
@@ -299,6 +288,154 @@ static void vlv_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
+static void __gen9_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
+{
+ __raw_i915_write32(dev_priv, FORCEWAKE_RENDER_GEN9,
+ _MASKED_BIT_DISABLE(0xffff));
+
+ __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_GEN9,
+ _MASKED_BIT_DISABLE(0xffff));
+
+ __raw_i915_write32(dev_priv, FORCEWAKE_BLITTER_GEN9,
+ _MASKED_BIT_DISABLE(0xffff));
+}
+
+static void
+__gen9_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
+{
+ /* Check for Render Engine */
+ if (FORCEWAKE_RENDER & fw_engine) {
+ if (wait_for_atomic((__raw_i915_read32(dev_priv,
+ FORCEWAKE_ACK_RENDER_GEN9) &
+ FORCEWAKE_KERNEL) == 0,
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Timed out: Render forcewake old ack to clear.\n");
+
+ __raw_i915_write32(dev_priv, FORCEWAKE_RENDER_GEN9,
+ _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
+
+ if (wait_for_atomic((__raw_i915_read32(dev_priv,
+ FORCEWAKE_ACK_RENDER_GEN9) &
+ FORCEWAKE_KERNEL),
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Timed out: waiting for Render to ack.\n");
+ }
+
+ /* Check for Media Engine */
+ if (FORCEWAKE_MEDIA & fw_engine) {
+ if (wait_for_atomic((__raw_i915_read32(dev_priv,
+ FORCEWAKE_ACK_MEDIA_GEN9) &
+ FORCEWAKE_KERNEL) == 0,
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Timed out: Media forcewake old ack to clear.\n");
+
+ __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_GEN9,
+ _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
+
+ if (wait_for_atomic((__raw_i915_read32(dev_priv,
+ FORCEWAKE_ACK_MEDIA_GEN9) &
+ FORCEWAKE_KERNEL),
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Timed out: waiting for Media to ack.\n");
+ }
+
+ /* Check for Blitter Engine */
+ if (FORCEWAKE_BLITTER & fw_engine) {
+ if (wait_for_atomic((__raw_i915_read32(dev_priv,
+ FORCEWAKE_ACK_BLITTER_GEN9) &
+ FORCEWAKE_KERNEL) == 0,
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Timed out: Blitter forcewake old ack to clear.\n");
+
+ __raw_i915_write32(dev_priv, FORCEWAKE_BLITTER_GEN9,
+ _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
+
+ if (wait_for_atomic((__raw_i915_read32(dev_priv,
+ FORCEWAKE_ACK_BLITTER_GEN9) &
+ FORCEWAKE_KERNEL),
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Timed out: waiting for Blitter to ack.\n");
+ }
+}
+
+static void
+__gen9_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
+{
+ /* Check for Render Engine */
+ if (FORCEWAKE_RENDER & fw_engine)
+ __raw_i915_write32(dev_priv, FORCEWAKE_RENDER_GEN9,
+ _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
+
+ /* Check for Media Engine */
+ if (FORCEWAKE_MEDIA & fw_engine)
+ __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_GEN9,
+ _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
+
+ /* Check for Blitter Engine */
+ if (FORCEWAKE_BLITTER & fw_engine)
+ __raw_i915_write32(dev_priv, FORCEWAKE_BLITTER_GEN9,
+ _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
+}
+
+static void
+gen9_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
+{
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+ if (FORCEWAKE_RENDER & fw_engine) {
+ if (dev_priv->uncore.fw_rendercount++ == 0)
+ dev_priv->uncore.funcs.force_wake_get(dev_priv,
+ FORCEWAKE_RENDER);
+ }
+
+ if (FORCEWAKE_MEDIA & fw_engine) {
+ if (dev_priv->uncore.fw_mediacount++ == 0)
+ dev_priv->uncore.funcs.force_wake_get(dev_priv,
+ FORCEWAKE_MEDIA);
+ }
+
+ if (FORCEWAKE_BLITTER & fw_engine) {
+ if (dev_priv->uncore.fw_blittercount++ == 0)
+ dev_priv->uncore.funcs.force_wake_get(dev_priv,
+ FORCEWAKE_BLITTER);
+ }
+
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+static void
+gen9_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
+{
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+ if (FORCEWAKE_RENDER & fw_engine) {
+ WARN_ON(dev_priv->uncore.fw_rendercount == 0);
+ if (--dev_priv->uncore.fw_rendercount == 0)
+ dev_priv->uncore.funcs.force_wake_put(dev_priv,
+ FORCEWAKE_RENDER);
+ }
+
+ if (FORCEWAKE_MEDIA & fw_engine) {
+ WARN_ON(dev_priv->uncore.fw_mediacount == 0);
+ if (--dev_priv->uncore.fw_mediacount == 0)
+ dev_priv->uncore.funcs.force_wake_put(dev_priv,
+ FORCEWAKE_MEDIA);
+ }
+
+ if (FORCEWAKE_BLITTER & fw_engine) {
+ WARN_ON(dev_priv->uncore.fw_blittercount == 0);
+ if (--dev_priv->uncore.fw_blittercount == 0)
+ dev_priv->uncore.funcs.force_wake_put(dev_priv,
+ FORCEWAKE_BLITTER);
+ }
+
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
static void gen6_force_wake_timer(unsigned long arg)
{
struct drm_i915_private *dev_priv = (void *)arg;
@@ -337,6 +474,9 @@ void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_BROADWELL(dev))
__gen7_gt_force_wake_mt_reset(dev_priv);
+ if (IS_GEN9(dev))
+ __gen9_gt_force_wake_mt_reset(dev_priv);
+
if (restore) { /* If reset with a user forcewake, try to restore */
unsigned fw = 0;
@@ -346,6 +486,15 @@ void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
if (dev_priv->uncore.fw_mediacount)
fw |= FORCEWAKE_MEDIA;
+ } else if (IS_GEN9(dev)) {
+ if (dev_priv->uncore.fw_rendercount)
+ fw |= FORCEWAKE_RENDER;
+
+ if (dev_priv->uncore.fw_mediacount)
+ fw |= FORCEWAKE_MEDIA;
+
+ if (dev_priv->uncore.fw_blittercount)
+ fw |= FORCEWAKE_BLITTER;
} else {
if (dev_priv->uncore.forcewake_count)
fw = FORCEWAKE_ALL;
@@ -363,7 +512,8 @@ void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
-void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake)
+static void __intel_uncore_early_sanitize(struct drm_device *dev,
+ bool restore_forcewake)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -389,6 +539,12 @@ void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake)
intel_uncore_forcewake_reset(dev, restore_forcewake);
}
+void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake)
+{
+ __intel_uncore_early_sanitize(dev, restore_forcewake);
+ i915_check_and_clear_faults(dev);
+}
+
void intel_uncore_sanitize(struct drm_device *dev)
{
/* BIOS often leaves RC6 enabled, but disable it for hw init */
@@ -410,6 +566,10 @@ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
intel_runtime_pm_get(dev_priv);
+ /* Redirect to Gen9 specific routine */
+ if (IS_GEN9(dev_priv->dev))
+ return gen9_force_wake_get(dev_priv, fw_engine);
+
/* Redirect to VLV specific routine */
if (IS_VALLEYVIEW(dev_priv->dev))
return vlv_force_wake_get(dev_priv, fw_engine);
@@ -431,6 +591,12 @@ void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
if (!dev_priv->uncore.funcs.force_wake_put)
return;
+ /* Redirect to Gen9 specific routine */
+ if (IS_GEN9(dev_priv->dev)) {
+ gen9_force_wake_put(dev_priv, fw_engine);
+ goto out;
+ }
+
/* Redirect to VLV specific routine */
if (IS_VALLEYVIEW(dev_priv->dev)) {
vlv_force_wake_put(dev_priv, fw_engine);
@@ -504,6 +670,38 @@ void assert_force_wake_inactive(struct drm_i915_private *dev_priv)
REG_RANGE((reg), 0x14000, 0x14400) || \
REG_RANGE((reg), 0x22000, 0x24000))
+#define FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) \
+ REG_RANGE((reg), 0xB00, 0x2000)
+
+#define FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) \
+ (REG_RANGE((reg), 0x2000, 0x2700) || \
+ REG_RANGE((reg), 0x3000, 0x4000) || \
+ REG_RANGE((reg), 0x5200, 0x8000) || \
+ REG_RANGE((reg), 0x8140, 0x8160) || \
+ REG_RANGE((reg), 0x8300, 0x8500) || \
+ REG_RANGE((reg), 0x8C00, 0x8D00) || \
+ REG_RANGE((reg), 0xB000, 0xB480) || \
+ REG_RANGE((reg), 0xE000, 0xE900) || \
+ REG_RANGE((reg), 0x24400, 0x24800))
+
+#define FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) \
+ (REG_RANGE((reg), 0x8130, 0x8140) || \
+ REG_RANGE((reg), 0x8800, 0x8A00) || \
+ REG_RANGE((reg), 0xD000, 0xD800) || \
+ REG_RANGE((reg), 0x12000, 0x14000) || \
+ REG_RANGE((reg), 0x1A000, 0x1EA00) || \
+ REG_RANGE((reg), 0x30000, 0x40000))
+
+#define FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg) \
+ REG_RANGE((reg), 0x9400, 0x9800)
+
+#define FORCEWAKE_GEN9_BLITTER_RANGE_OFFSET(reg) \
+ ((reg) < 0x40000 &&\
+ !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) && \
+ !FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) && \
+ !FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) && \
+ !FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg))
+
static void
ilk_dummy_write(struct drm_i915_private *dev_priv)
{
@@ -634,6 +832,45 @@ chv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
REG_READ_FOOTER; \
}
+#define SKL_NEEDS_FORCE_WAKE(dev_priv, reg) \
+ ((reg) < 0x40000 && !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg))
+
+#define __gen9_read(x) \
+static u##x \
+gen9_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
+ REG_READ_HEADER(x); \
+ if (!SKL_NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
+ val = __raw_i915_read##x(dev_priv, reg); \
+ } else { \
+ unsigned fwengine = 0; \
+ if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) { \
+ if (dev_priv->uncore.fw_rendercount == 0) \
+ fwengine = FORCEWAKE_RENDER; \
+ } else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) { \
+ if (dev_priv->uncore.fw_mediacount == 0) \
+ fwengine = FORCEWAKE_MEDIA; \
+ } else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) { \
+ if (dev_priv->uncore.fw_rendercount == 0) \
+ fwengine |= FORCEWAKE_RENDER; \
+ if (dev_priv->uncore.fw_mediacount == 0) \
+ fwengine |= FORCEWAKE_MEDIA; \
+ } else { \
+ if (dev_priv->uncore.fw_blittercount == 0) \
+ fwengine = FORCEWAKE_BLITTER; \
+ } \
+ if (fwengine) \
+ dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \
+ val = __raw_i915_read##x(dev_priv, reg); \
+ if (fwengine) \
+ dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \
+ } \
+ REG_READ_FOOTER; \
+}
+
+__gen9_read(8)
+__gen9_read(16)
+__gen9_read(32)
+__gen9_read(64)
__chv_read(8)
__chv_read(16)
__chv_read(32)
@@ -655,6 +892,7 @@ __gen4_read(16)
__gen4_read(32)
__gen4_read(64)
+#undef __gen9_read
#undef __chv_read
#undef __vlv_read
#undef __gen6_read
@@ -792,6 +1030,69 @@ chv_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace)
REG_WRITE_FOOTER; \
}
+static const u32 gen9_shadowed_regs[] = {
+ RING_TAIL(RENDER_RING_BASE),
+ RING_TAIL(GEN6_BSD_RING_BASE),
+ RING_TAIL(VEBOX_RING_BASE),
+ RING_TAIL(BLT_RING_BASE),
+ FORCEWAKE_BLITTER_GEN9,
+ FORCEWAKE_RENDER_GEN9,
+ FORCEWAKE_MEDIA_GEN9,
+ GEN6_RPNSWREQ,
+ GEN6_RC_VIDEO_FREQ,
+ /* TODO: Other registers are not yet used */
+};
+
+static bool is_gen9_shadowed(struct drm_i915_private *dev_priv, u32 reg)
+{
+ int i;
+ for (i = 0; i < ARRAY_SIZE(gen9_shadowed_regs); i++)
+ if (reg == gen9_shadowed_regs[i])
+ return true;
+
+ return false;
+}
+
+#define __gen9_write(x) \
+static void \
+gen9_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, \
+ bool trace) { \
+ REG_WRITE_HEADER; \
+ if (!SKL_NEEDS_FORCE_WAKE((dev_priv), (reg)) || \
+ is_gen9_shadowed(dev_priv, reg)) { \
+ __raw_i915_write##x(dev_priv, reg, val); \
+ } else { \
+ unsigned fwengine = 0; \
+ if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) { \
+ if (dev_priv->uncore.fw_rendercount == 0) \
+ fwengine = FORCEWAKE_RENDER; \
+ } else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) { \
+ if (dev_priv->uncore.fw_mediacount == 0) \
+ fwengine = FORCEWAKE_MEDIA; \
+ } else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) { \
+ if (dev_priv->uncore.fw_rendercount == 0) \
+ fwengine |= FORCEWAKE_RENDER; \
+ if (dev_priv->uncore.fw_mediacount == 0) \
+ fwengine |= FORCEWAKE_MEDIA; \
+ } else { \
+ if (dev_priv->uncore.fw_blittercount == 0) \
+ fwengine = FORCEWAKE_BLITTER; \
+ } \
+ if (fwengine) \
+ dev_priv->uncore.funcs.force_wake_get(dev_priv, \
+ fwengine); \
+ __raw_i915_write##x(dev_priv, reg, val); \
+ if (fwengine) \
+ dev_priv->uncore.funcs.force_wake_put(dev_priv, \
+ fwengine); \
+ } \
+ REG_WRITE_FOOTER; \
+}
+
+__gen9_write(8)
+__gen9_write(16)
+__gen9_write(32)
+__gen9_write(64)
__chv_write(8)
__chv_write(16)
__chv_write(32)
@@ -817,6 +1118,7 @@ __gen4_write(16)
__gen4_write(32)
__gen4_write(64)
+#undef __gen9_write
#undef __chv_write
#undef __gen8_write
#undef __hsw_write
@@ -826,6 +1128,22 @@ __gen4_write(64)
#undef REG_WRITE_FOOTER
#undef REG_WRITE_HEADER
+#define ASSIGN_WRITE_MMIO_VFUNCS(x) \
+do { \
+ dev_priv->uncore.funcs.mmio_writeb = x##_write8; \
+ dev_priv->uncore.funcs.mmio_writew = x##_write16; \
+ dev_priv->uncore.funcs.mmio_writel = x##_write32; \
+ dev_priv->uncore.funcs.mmio_writeq = x##_write64; \
+} while (0)
+
+#define ASSIGN_READ_MMIO_VFUNCS(x) \
+do { \
+ dev_priv->uncore.funcs.mmio_readb = x##_read8; \
+ dev_priv->uncore.funcs.mmio_readw = x##_read16; \
+ dev_priv->uncore.funcs.mmio_readl = x##_read32; \
+ dev_priv->uncore.funcs.mmio_readq = x##_read64; \
+} while (0)
+
void intel_uncore_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -833,9 +1151,12 @@ void intel_uncore_init(struct drm_device *dev)
setup_timer(&dev_priv->uncore.force_wake_timer,
gen6_force_wake_timer, (unsigned long)dev_priv);
- intel_uncore_early_sanitize(dev, false);
+ __intel_uncore_early_sanitize(dev, false);
- if (IS_VALLEYVIEW(dev)) {
+ if (IS_GEN9(dev)) {
+ dev_priv->uncore.funcs.force_wake_get = __gen9_force_wake_get;
+ dev_priv->uncore.funcs.force_wake_put = __gen9_force_wake_put;
+ } else if (IS_VALLEYVIEW(dev)) {
dev_priv->uncore.funcs.force_wake_get = __vlv_force_wake_get;
dev_priv->uncore.funcs.force_wake_put = __vlv_force_wake_put;
} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
@@ -881,77 +1202,52 @@ void intel_uncore_init(struct drm_device *dev)
switch (INTEL_INFO(dev)->gen) {
default:
+ WARN_ON(1);
+ return;
+ case 9:
+ ASSIGN_WRITE_MMIO_VFUNCS(gen9);
+ ASSIGN_READ_MMIO_VFUNCS(gen9);
+ break;
+ case 8:
if (IS_CHERRYVIEW(dev)) {
- dev_priv->uncore.funcs.mmio_writeb = chv_write8;
- dev_priv->uncore.funcs.mmio_writew = chv_write16;
- dev_priv->uncore.funcs.mmio_writel = chv_write32;
- dev_priv->uncore.funcs.mmio_writeq = chv_write64;
- dev_priv->uncore.funcs.mmio_readb = chv_read8;
- dev_priv->uncore.funcs.mmio_readw = chv_read16;
- dev_priv->uncore.funcs.mmio_readl = chv_read32;
- dev_priv->uncore.funcs.mmio_readq = chv_read64;
+ ASSIGN_WRITE_MMIO_VFUNCS(chv);
+ ASSIGN_READ_MMIO_VFUNCS(chv);
} else {
- dev_priv->uncore.funcs.mmio_writeb = gen8_write8;
- dev_priv->uncore.funcs.mmio_writew = gen8_write16;
- dev_priv->uncore.funcs.mmio_writel = gen8_write32;
- dev_priv->uncore.funcs.mmio_writeq = gen8_write64;
- dev_priv->uncore.funcs.mmio_readb = gen6_read8;
- dev_priv->uncore.funcs.mmio_readw = gen6_read16;
- dev_priv->uncore.funcs.mmio_readl = gen6_read32;
- dev_priv->uncore.funcs.mmio_readq = gen6_read64;
+ ASSIGN_WRITE_MMIO_VFUNCS(gen8);
+ ASSIGN_READ_MMIO_VFUNCS(gen6);
}
break;
case 7:
case 6:
if (IS_HASWELL(dev)) {
- dev_priv->uncore.funcs.mmio_writeb = hsw_write8;
- dev_priv->uncore.funcs.mmio_writew = hsw_write16;
- dev_priv->uncore.funcs.mmio_writel = hsw_write32;
- dev_priv->uncore.funcs.mmio_writeq = hsw_write64;
+ ASSIGN_WRITE_MMIO_VFUNCS(hsw);
} else {
- dev_priv->uncore.funcs.mmio_writeb = gen6_write8;
- dev_priv->uncore.funcs.mmio_writew = gen6_write16;
- dev_priv->uncore.funcs.mmio_writel = gen6_write32;
- dev_priv->uncore.funcs.mmio_writeq = gen6_write64;
+ ASSIGN_WRITE_MMIO_VFUNCS(gen6);
}
if (IS_VALLEYVIEW(dev)) {
- dev_priv->uncore.funcs.mmio_readb = vlv_read8;
- dev_priv->uncore.funcs.mmio_readw = vlv_read16;
- dev_priv->uncore.funcs.mmio_readl = vlv_read32;
- dev_priv->uncore.funcs.mmio_readq = vlv_read64;
+ ASSIGN_READ_MMIO_VFUNCS(vlv);
} else {
- dev_priv->uncore.funcs.mmio_readb = gen6_read8;
- dev_priv->uncore.funcs.mmio_readw = gen6_read16;
- dev_priv->uncore.funcs.mmio_readl = gen6_read32;
- dev_priv->uncore.funcs.mmio_readq = gen6_read64;
+ ASSIGN_READ_MMIO_VFUNCS(gen6);
}
break;
case 5:
- dev_priv->uncore.funcs.mmio_writeb = gen5_write8;
- dev_priv->uncore.funcs.mmio_writew = gen5_write16;
- dev_priv->uncore.funcs.mmio_writel = gen5_write32;
- dev_priv->uncore.funcs.mmio_writeq = gen5_write64;
- dev_priv->uncore.funcs.mmio_readb = gen5_read8;
- dev_priv->uncore.funcs.mmio_readw = gen5_read16;
- dev_priv->uncore.funcs.mmio_readl = gen5_read32;
- dev_priv->uncore.funcs.mmio_readq = gen5_read64;
+ ASSIGN_WRITE_MMIO_VFUNCS(gen5);
+ ASSIGN_READ_MMIO_VFUNCS(gen5);
break;
case 4:
case 3:
case 2:
- dev_priv->uncore.funcs.mmio_writeb = gen4_write8;
- dev_priv->uncore.funcs.mmio_writew = gen4_write16;
- dev_priv->uncore.funcs.mmio_writel = gen4_write32;
- dev_priv->uncore.funcs.mmio_writeq = gen4_write64;
- dev_priv->uncore.funcs.mmio_readb = gen4_read8;
- dev_priv->uncore.funcs.mmio_readw = gen4_read16;
- dev_priv->uncore.funcs.mmio_readl = gen4_read32;
- dev_priv->uncore.funcs.mmio_readq = gen4_read64;
+ ASSIGN_WRITE_MMIO_VFUNCS(gen4);
+ ASSIGN_READ_MMIO_VFUNCS(gen4);
break;
}
+
+ i915_check_and_clear_faults(dev);
}
+#undef ASSIGN_WRITE_MMIO_VFUNCS
+#undef ASSIGN_READ_MMIO_VFUNCS
void intel_uncore_fini(struct drm_device *dev)
{
@@ -968,7 +1264,7 @@ static const struct register_whitelist {
/* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
uint32_t gen_bitmask;
} whitelist[] = {
- { RING_TIMESTAMP(RENDER_RING_BASE), 8, GEN_RANGE(4, 8) },
+ { RING_TIMESTAMP(RENDER_RING_BASE), 8, GEN_RANGE(4, 9) },
};
int i915_reg_read_ioctl(struct drm_device *dev,
@@ -1053,41 +1349,34 @@ int i915_get_reset_stats_ioctl(struct drm_device *dev,
return 0;
}
-static int i965_reset_complete(struct drm_device *dev)
+static int i915_reset_complete(struct drm_device *dev)
{
u8 gdrst;
- pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
- return (gdrst & GRDOM_RESET_ENABLE) == 0;
+ pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst);
+ return (gdrst & GRDOM_RESET_STATUS) == 0;
}
-static int i965_do_reset(struct drm_device *dev)
+static int i915_do_reset(struct drm_device *dev)
{
- int ret;
-
- /* FIXME: i965g/gm need a display save/restore for gpu reset. */
- return -ENODEV;
+ /* assert reset for at least 20 usec */
+ pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE);
+ udelay(20);
+ pci_write_config_byte(dev->pdev, I915_GDRST, 0);
- /*
- * Set the domains we want to reset (GRDOM/bits 2 and 3) as
- * well as the reset bit (GR/bit 0). Setting the GR bit
- * triggers the reset; when done, the hardware will clear it.
- */
- pci_write_config_byte(dev->pdev, I965_GDRST,
- GRDOM_RENDER | GRDOM_RESET_ENABLE);
- ret = wait_for(i965_reset_complete(dev), 500);
- if (ret)
- return ret;
-
- pci_write_config_byte(dev->pdev, I965_GDRST,
- GRDOM_MEDIA | GRDOM_RESET_ENABLE);
-
- ret = wait_for(i965_reset_complete(dev), 500);
- if (ret)
- return ret;
+ return wait_for(i915_reset_complete(dev), 500);
+}
- pci_write_config_byte(dev->pdev, I965_GDRST, 0);
+static int g4x_reset_complete(struct drm_device *dev)
+{
+ u8 gdrst;
+ pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst);
+ return (gdrst & GRDOM_RESET_ENABLE) == 0;
+}
- return 0;
+static int g33_do_reset(struct drm_device *dev)
+{
+ pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE);
+ return wait_for(g4x_reset_complete(dev), 500);
}
static int g4x_do_reset(struct drm_device *dev)
@@ -1095,9 +1384,9 @@ static int g4x_do_reset(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
- pci_write_config_byte(dev->pdev, I965_GDRST,
+ pci_write_config_byte(dev->pdev, I915_GDRST,
GRDOM_RENDER | GRDOM_RESET_ENABLE);
- ret = wait_for(i965_reset_complete(dev), 500);
+ ret = wait_for(g4x_reset_complete(dev), 500);
if (ret)
return ret;
@@ -1105,9 +1394,9 @@ static int g4x_do_reset(struct drm_device *dev)
I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
POSTING_READ(VDECCLK_GATE_D);
- pci_write_config_byte(dev->pdev, I965_GDRST,
+ pci_write_config_byte(dev->pdev, I915_GDRST,
GRDOM_MEDIA | GRDOM_RESET_ENABLE);
- ret = wait_for(i965_reset_complete(dev), 500);
+ ret = wait_for(g4x_reset_complete(dev), 500);
if (ret)
return ret;
@@ -1115,7 +1404,7 @@ static int g4x_do_reset(struct drm_device *dev)
I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
POSTING_READ(VDECCLK_GATE_D);
- pci_write_config_byte(dev->pdev, I965_GDRST, 0);
+ pci_write_config_byte(dev->pdev, I915_GDRST, 0);
return 0;
}
@@ -1173,8 +1462,10 @@ int intel_gpu_reset(struct drm_device *dev)
return ironlake_do_reset(dev);
else if (IS_G4X(dev))
return g4x_do_reset(dev);
- else if (IS_GEN4(dev))
- return i965_do_reset(dev);
+ else if (IS_G33(dev))
+ return g33_do_reset(dev);
+ else if (INTEL_INFO(dev)->gen >= 3)
+ return i915_do_reset(dev);
else
return -ENODEV;
}
diff --git a/drivers/staging/imx-drm/Kconfig b/drivers/gpu/drm/imx/Kconfig
index 82fb758a29b..ab31848e92c 100644
--- a/drivers/staging/imx-drm/Kconfig
+++ b/drivers/gpu/drm/imx/Kconfig
@@ -6,6 +6,7 @@ config DRM_IMX
select DRM_GEM_CMA_HELPER
select DRM_KMS_CMA_HELPER
depends on DRM && (ARCH_MXC || ARCH_MULTIPLATFORM)
+ depends on IMX_IPUV3_CORE
help
enable i.MX graphics support
@@ -40,11 +41,11 @@ config DRM_IMX_LDB
found on i.MX53 and i.MX6 processors.
config DRM_IMX_IPUV3
- tristate "DRM Support for i.MX IPUv3"
+ tristate
depends on DRM_IMX
depends on IMX_IPUV3_CORE
- help
- Choose this if you have a i.MX5 or i.MX6 processor.
+ default y if DRM_IMX=y
+ default m if DRM_IMX=m
config DRM_IMX_HDMI
tristate "Freescale i.MX DRM HDMI"
diff --git a/drivers/staging/imx-drm/Makefile b/drivers/gpu/drm/imx/Makefile
index 582c438d8cb..582c438d8cb 100644
--- a/drivers/staging/imx-drm/Makefile
+++ b/drivers/gpu/drm/imx/Makefile
diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index ad6173500bf..b250130debc 100644
--- a/drivers/staging/imx-drm/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -24,13 +24,12 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_plane_helper.h>
#include "imx-drm.h"
#define MAX_CRTC 4
-struct imx_drm_crtc;
-
struct imx_drm_component {
struct device_node *of_node;
struct list_head list;
@@ -632,7 +631,8 @@ static int imx_drm_platform_probe(struct platform_device *pdev)
continue;
}
- component_match_add(&pdev->dev, &match, compare_of, remote);
+ component_match_add(&pdev->dev, &match, compare_of,
+ remote);
of_node_put(remote);
}
of_node_put(port);
diff --git a/drivers/staging/imx-drm/imx-drm.h b/drivers/gpu/drm/imx/imx-drm.h
index 7453ae00c41..7453ae00c41 100644
--- a/drivers/staging/imx-drm/imx-drm.h
+++ b/drivers/gpu/drm/imx/imx-drm.h
diff --git a/drivers/staging/imx-drm/imx-hdmi.c b/drivers/gpu/drm/imx/imx-hdmi.c
index ddc53e03953..ddc53e03953 100644
--- a/drivers/staging/imx-drm/imx-hdmi.c
+++ b/drivers/gpu/drm/imx/imx-hdmi.c
diff --git a/drivers/staging/imx-drm/imx-hdmi.h b/drivers/gpu/drm/imx/imx-hdmi.h
index 39b677689db..39b677689db 100644
--- a/drivers/staging/imx-drm/imx-hdmi.h
+++ b/drivers/gpu/drm/imx/imx-hdmi.h
diff --git a/drivers/staging/imx-drm/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index 2638dc1671d..c60460043e2 100644
--- a/drivers/staging/imx-drm/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -11,11 +11,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA 02110-1301, USA.
*/
#include <linux/module.h>
diff --git a/drivers/staging/imx-drm/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c
index 64b54d7f996..a729f4f7074 100644
--- a/drivers/staging/imx-drm/imx-tve.c
+++ b/drivers/gpu/drm/imx/imx-tve.c
@@ -11,11 +11,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA 02110-1301, USA.
*/
#include <linux/clk.h>
@@ -665,7 +660,8 @@ static int imx_tve_bind(struct device *dev, struct device *master, void *data)
ret = regmap_read(tve->regmap, TVE_COM_CONF_REG, &val);
if (ret < 0) {
- dev_err(dev, "failed to read configuration register: %d\n", ret);
+ dev_err(dev, "failed to read configuration register: %d\n",
+ ret);
return ret;
}
if (val != 0x00100000) {
diff --git a/drivers/staging/imx-drm/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index 11e84a25177..ebee59cb96d 100644
--- a/drivers/staging/imx-drm/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -11,11 +11,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA 02110-1301, USA.
*/
#include <linux/component.h>
#include <linux/module.h>
diff --git a/drivers/staging/imx-drm/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index 944962b692b..6987e16fe99 100644
--- a/drivers/staging/imx-drm/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -64,6 +64,7 @@ int ipu_plane_set_base(struct ipu_plane *ipu_plane, struct drm_framebuffer *fb,
{
struct drm_gem_cma_object *cma_obj;
unsigned long eba;
+ int active;
cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
if (!cma_obj) {
@@ -74,12 +75,17 @@ int ipu_plane_set_base(struct ipu_plane *ipu_plane, struct drm_framebuffer *fb,
dev_dbg(ipu_plane->base.dev->dev, "phys = %pad, x = %d, y = %d",
&cma_obj->paddr, x, y);
- ipu_cpmem_set_stride(ipu_plane->ipu_ch, fb->pitches[0]);
-
eba = cma_obj->paddr + fb->offsets[0] +
fb->pitches[0] * y + (fb->bits_per_pixel >> 3) * x;
- ipu_cpmem_set_buffer(ipu_plane->ipu_ch, 0, eba);
- ipu_cpmem_set_buffer(ipu_plane->ipu_ch, 1, eba);
+
+ if (ipu_plane->enabled) {
+ active = ipu_idmac_get_current_buffer(ipu_plane->ipu_ch);
+ ipu_cpmem_set_buffer(ipu_plane->ipu_ch, !active, eba);
+ ipu_idmac_select_buffer(ipu_plane->ipu_ch, !active);
+ } else {
+ ipu_cpmem_set_buffer(ipu_plane->ipu_ch, 0, eba);
+ ipu_cpmem_set_buffer(ipu_plane->ipu_ch, 1, eba);
+ }
/* cache offsets for subsequent pageflips */
ipu_plane->x = x;
@@ -137,6 +143,18 @@ int ipu_plane_mode_set(struct ipu_plane *ipu_plane, struct drm_crtc *crtc,
if (crtc_h < 2)
return -EINVAL;
+ /*
+ * since we cannot touch active IDMAC channels, we do not support
+ * resizing the enabled plane or changing its format
+ */
+ if (ipu_plane->enabled) {
+ if (src_w != ipu_plane->w || src_h != ipu_plane->h ||
+ fb->pixel_format != ipu_plane->base.fb->pixel_format)
+ return -EINVAL;
+
+ return ipu_plane_set_base(ipu_plane, fb, src_x, src_y);
+ }
+
switch (ipu_plane->dp_flow) {
case IPU_DP_FLOW_SYNC_BG:
ret = ipu_dp_setup_channel(ipu_plane->dp,
@@ -148,14 +166,22 @@ int ipu_plane_mode_set(struct ipu_plane *ipu_plane, struct drm_crtc *crtc,
ret);
return ret;
}
- ipu_dp_set_global_alpha(ipu_plane->dp, 1, 0, 1);
+ ipu_dp_set_global_alpha(ipu_plane->dp, true, 0, true);
break;
case IPU_DP_FLOW_SYNC_FG:
ipu_dp_setup_channel(ipu_plane->dp,
ipu_drm_fourcc_to_colorspace(fb->pixel_format),
IPUV3_COLORSPACE_UNKNOWN);
ipu_dp_set_window_pos(ipu_plane->dp, crtc_x, crtc_y);
- break;
+ /* Enable local alpha on partial plane */
+ switch (fb->pixel_format) {
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_ABGR8888:
+ ipu_dp_set_global_alpha(ipu_plane->dp, false, 0, false);
+ break;
+ default:
+ break;
+ }
}
ret = ipu_dmfc_init_channel(ipu_plane->dmfc, crtc_w);
@@ -181,11 +207,16 @@ int ipu_plane_mode_set(struct ipu_plane *ipu_plane, struct drm_crtc *crtc,
return ret;
}
ipu_cpmem_set_high_priority(ipu_plane->ipu_ch);
+ ipu_idmac_set_double_buffer(ipu_plane->ipu_ch, 1);
+ ipu_cpmem_set_stride(ipu_plane->ipu_ch, fb->pitches[0]);
ret = ipu_plane_set_base(ipu_plane, fb, src_x, src_y);
if (ret < 0)
return ret;
+ ipu_plane->w = src_w;
+ ipu_plane->h = src_h;
+
return 0;
}
diff --git a/drivers/staging/imx-drm/ipuv3-plane.h b/drivers/gpu/drm/imx/ipuv3-plane.h
index c0aae5bcb5d..af125fb40ef 100644
--- a/drivers/staging/imx-drm/ipuv3-plane.h
+++ b/drivers/gpu/drm/imx/ipuv3-plane.h
@@ -26,6 +26,8 @@ struct ipu_plane {
int x;
int y;
+ int w;
+ int h;
bool enabled;
};
diff --git a/drivers/staging/imx-drm/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index 8a76a5c1c34..796c3c1c170 100644
--- a/drivers/staging/imx-drm/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -11,11 +11,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA 02110-1301, USA.
*/
#include <linux/component.h>
@@ -128,6 +123,10 @@ static void imx_pd_encoder_prepare(struct drm_encoder *encoder)
static void imx_pd_encoder_commit(struct drm_encoder *encoder)
{
+ struct imx_parallel_display *imxpd = enc_to_imxpd(encoder);
+
+ drm_panel_prepare(imxpd->panel);
+ drm_panel_enable(imxpd->panel);
}
static void imx_pd_encoder_mode_set(struct drm_encoder *encoder,
@@ -138,6 +137,10 @@ static void imx_pd_encoder_mode_set(struct drm_encoder *encoder,
static void imx_pd_encoder_disable(struct drm_encoder *encoder)
{
+ struct imx_parallel_display *imxpd = enc_to_imxpd(encoder);
+
+ drm_panel_disable(imxpd->panel);
+ drm_panel_unprepare(imxpd->panel);
}
static struct drm_connector_funcs imx_pd_connector_funcs = {
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index 83485ab81ce..9872ba9abf1 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -15,6 +15,7 @@
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_plane_helper.h>
#include "mgag200_drv.h"
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 9d907c526c9..5b2a1ff95d3 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -3,6 +3,7 @@ config DRM_MSM
tristate "MSM DRM"
depends on DRM
depends on ARCH_QCOM || (ARM && COMPILE_TEST)
+ select REGULATOR
select DRM_KMS_HELPER
select DRM_PANEL
select SHMEM
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 6283dcb96af..143d988f8ad 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -7,6 +7,7 @@ msm-y := \
adreno/adreno_device.o \
adreno/adreno_gpu.o \
adreno/a3xx_gpu.o \
+ adreno/a4xx_gpu.o \
hdmi/hdmi.o \
hdmi/hdmi_audio.o \
hdmi/hdmi_bridge.o \
@@ -24,12 +25,15 @@ msm-y := \
mdp/mdp4/mdp4_irq.o \
mdp/mdp4/mdp4_kms.o \
mdp/mdp4/mdp4_plane.o \
+ mdp/mdp5/mdp5_cfg.o \
+ mdp/mdp5/mdp5_ctl.o \
mdp/mdp5/mdp5_crtc.o \
mdp/mdp5/mdp5_encoder.o \
mdp/mdp5/mdp5_irq.o \
mdp/mdp5/mdp5_kms.o \
mdp/mdp5/mdp5_plane.o \
mdp/mdp5/mdp5_smp.o \
+ msm_atomic.o \
msm_drv.o \
msm_fb.o \
msm_gem.o \
diff --git a/drivers/gpu/drm/msm/adreno/a2xx.xml.h b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
index a3104598c27..22882cc0a57 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
@@ -11,10 +11,10 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 9859 bytes, from 2014-06-02 15:21:30)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14960 bytes, from 2014-07-27 17:22:13)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 58020 bytes, from 2014-08-01 12:22:48)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 41068 bytes, from 2014-08-01 12:22:48)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2014-11-13 22:44:30)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 15053 bytes, from 2014-11-09 15:45:47)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 63169 bytes, from 2014-11-13 22:44:18)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 49097 bytes, from 2014-11-14 15:38:00)
Copyright (C) 2013-2014 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
@@ -926,11 +926,11 @@ static inline uint32_t A2XX_VGT_DRAW_INITIATOR_INDEX_SIZE(enum pc_di_index_size
#define A2XX_VGT_DRAW_INITIATOR_NOT_EOP 0x00001000
#define A2XX_VGT_DRAW_INITIATOR_SMALL_INDEX 0x00002000
#define A2XX_VGT_DRAW_INITIATOR_PRE_DRAW_INITIATOR_ENABLE 0x00004000
-#define A2XX_VGT_DRAW_INITIATOR_NUM_INDICES__MASK 0xffff0000
-#define A2XX_VGT_DRAW_INITIATOR_NUM_INDICES__SHIFT 16
-static inline uint32_t A2XX_VGT_DRAW_INITIATOR_NUM_INDICES(uint32_t val)
+#define A2XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__MASK 0xff000000
+#define A2XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__SHIFT 24
+static inline uint32_t A2XX_VGT_DRAW_INITIATOR_NUM_INSTANCES(uint32_t val)
{
- return ((val) << A2XX_VGT_DRAW_INITIATOR_NUM_INDICES__SHIFT) & A2XX_VGT_DRAW_INITIATOR_NUM_INDICES__MASK;
+ return ((val) << A2XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__SHIFT) & A2XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__MASK;
}
#define REG_A2XX_VGT_IMMED_DATA 0x000021fd
@@ -1243,13 +1243,13 @@ static inline uint32_t A2XX_CLEAR_COLOR_ALPHA(uint32_t val)
#define A2XX_PA_SU_POINT_SIZE_HEIGHT__SHIFT 0
static inline uint32_t A2XX_PA_SU_POINT_SIZE_HEIGHT(float val)
{
- return ((((uint32_t)(val * 8.0))) << A2XX_PA_SU_POINT_SIZE_HEIGHT__SHIFT) & A2XX_PA_SU_POINT_SIZE_HEIGHT__MASK;
+ return ((((uint32_t)(val * 16.0))) << A2XX_PA_SU_POINT_SIZE_HEIGHT__SHIFT) & A2XX_PA_SU_POINT_SIZE_HEIGHT__MASK;
}
#define A2XX_PA_SU_POINT_SIZE_WIDTH__MASK 0xffff0000
#define A2XX_PA_SU_POINT_SIZE_WIDTH__SHIFT 16
static inline uint32_t A2XX_PA_SU_POINT_SIZE_WIDTH(float val)
{
- return ((((uint32_t)(val * 8.0))) << A2XX_PA_SU_POINT_SIZE_WIDTH__SHIFT) & A2XX_PA_SU_POINT_SIZE_WIDTH__MASK;
+ return ((((uint32_t)(val * 16.0))) << A2XX_PA_SU_POINT_SIZE_WIDTH__SHIFT) & A2XX_PA_SU_POINT_SIZE_WIDTH__MASK;
}
#define REG_A2XX_PA_SU_POINT_MINMAX 0x00002281
@@ -1257,13 +1257,13 @@ static inline uint32_t A2XX_PA_SU_POINT_SIZE_WIDTH(float val)
#define A2XX_PA_SU_POINT_MINMAX_MIN__SHIFT 0
static inline uint32_t A2XX_PA_SU_POINT_MINMAX_MIN(float val)
{
- return ((((uint32_t)(val * 8.0))) << A2XX_PA_SU_POINT_MINMAX_MIN__SHIFT) & A2XX_PA_SU_POINT_MINMAX_MIN__MASK;
+ return ((((uint32_t)(val * 16.0))) << A2XX_PA_SU_POINT_MINMAX_MIN__SHIFT) & A2XX_PA_SU_POINT_MINMAX_MIN__MASK;
}
#define A2XX_PA_SU_POINT_MINMAX_MAX__MASK 0xffff0000
#define A2XX_PA_SU_POINT_MINMAX_MAX__SHIFT 16
static inline uint32_t A2XX_PA_SU_POINT_MINMAX_MAX(float val)
{
- return ((((uint32_t)(val * 8.0))) << A2XX_PA_SU_POINT_MINMAX_MAX__SHIFT) & A2XX_PA_SU_POINT_MINMAX_MAX__MASK;
+ return ((((uint32_t)(val * 16.0))) << A2XX_PA_SU_POINT_MINMAX_MAX__SHIFT) & A2XX_PA_SU_POINT_MINMAX_MAX__MASK;
}
#define REG_A2XX_PA_SU_LINE_CNTL 0x00002282
@@ -1271,7 +1271,7 @@ static inline uint32_t A2XX_PA_SU_POINT_MINMAX_MAX(float val)
#define A2XX_PA_SU_LINE_CNTL_WIDTH__SHIFT 0
static inline uint32_t A2XX_PA_SU_LINE_CNTL_WIDTH(float val)
{
- return ((((uint32_t)(val * 8.0))) << A2XX_PA_SU_LINE_CNTL_WIDTH__SHIFT) & A2XX_PA_SU_LINE_CNTL_WIDTH__MASK;
+ return ((((uint32_t)(val * 16.0))) << A2XX_PA_SU_LINE_CNTL_WIDTH__SHIFT) & A2XX_PA_SU_LINE_CNTL_WIDTH__MASK;
}
#define REG_A2XX_PA_SC_LINE_STIPPLE 0x00002283
diff --git a/drivers/gpu/drm/msm/adreno/a3xx.xml.h b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
index 82d015279b4..109e9a263da 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
@@ -11,10 +11,10 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 9859 bytes, from 2014-06-02 15:21:30)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14960 bytes, from 2014-07-27 17:22:13)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 58020 bytes, from 2014-08-01 12:22:48)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 41068 bytes, from 2014-08-01 12:22:48)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2014-11-13 22:44:30)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 15053 bytes, from 2014-11-09 15:45:47)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 63169 bytes, from 2014-11-13 22:44:18)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 49097 bytes, from 2014-11-14 15:38:00)
Copyright (C) 2013-2014 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
@@ -86,6 +86,14 @@ enum a3xx_vtx_fmt {
VFMT_NORM_USHORT_16_16 = 29,
VFMT_NORM_USHORT_16_16_16 = 30,
VFMT_NORM_USHORT_16_16_16_16 = 31,
+ VFMT_UINT_32 = 32,
+ VFMT_UINT_32_32 = 33,
+ VFMT_UINT_32_32_32 = 34,
+ VFMT_UINT_32_32_32_32 = 35,
+ VFMT_INT_32 = 36,
+ VFMT_INT_32_32 = 37,
+ VFMT_INT_32_32_32 = 38,
+ VFMT_INT_32_32_32_32 = 39,
VFMT_UBYTE_8 = 40,
VFMT_UBYTE_8_8 = 41,
VFMT_UBYTE_8_8_8 = 42,
@@ -112,7 +120,9 @@ enum a3xx_tex_fmt {
TFMT_NORM_USHORT_565 = 4,
TFMT_NORM_USHORT_5551 = 6,
TFMT_NORM_USHORT_4444 = 7,
+ TFMT_NORM_USHORT_Z16 = 9,
TFMT_NORM_UINT_X8Z24 = 10,
+ TFMT_FLOAT_Z32 = 11,
TFMT_NORM_UINT_NV12_UV_TILED = 17,
TFMT_NORM_UINT_NV12_Y_TILED = 19,
TFMT_NORM_UINT_NV12_UV = 21,
@@ -121,18 +131,38 @@ enum a3xx_tex_fmt {
TFMT_NORM_UINT_I420_U = 26,
TFMT_NORM_UINT_I420_V = 27,
TFMT_NORM_UINT_2_10_10_10 = 41,
+ TFMT_FLOAT_9_9_9_E5 = 42,
+ TFMT_FLOAT_10_11_11 = 43,
TFMT_NORM_UINT_A8 = 44,
TFMT_NORM_UINT_L8_A8 = 47,
TFMT_NORM_UINT_8 = 48,
TFMT_NORM_UINT_8_8 = 49,
TFMT_NORM_UINT_8_8_8 = 50,
TFMT_NORM_UINT_8_8_8_8 = 51,
+ TFMT_NORM_SINT_8_8 = 53,
+ TFMT_NORM_SINT_8_8_8_8 = 55,
+ TFMT_UINT_8_8 = 57,
+ TFMT_UINT_8_8_8_8 = 59,
+ TFMT_SINT_8_8 = 61,
+ TFMT_SINT_8_8_8_8 = 63,
TFMT_FLOAT_16 = 64,
TFMT_FLOAT_16_16 = 65,
TFMT_FLOAT_16_16_16_16 = 67,
+ TFMT_UINT_16 = 68,
+ TFMT_UINT_16_16 = 69,
+ TFMT_UINT_16_16_16_16 = 71,
+ TFMT_SINT_16 = 72,
+ TFMT_SINT_16_16 = 73,
+ TFMT_SINT_16_16_16_16 = 75,
TFMT_FLOAT_32 = 84,
TFMT_FLOAT_32_32 = 85,
TFMT_FLOAT_32_32_32_32 = 87,
+ TFMT_UINT_32 = 88,
+ TFMT_UINT_32_32 = 89,
+ TFMT_UINT_32_32_32_32 = 91,
+ TFMT_SINT_32 = 92,
+ TFMT_SINT_32_32 = 93,
+ TFMT_SINT_32_32_32_32 = 95,
};
enum a3xx_tex_fetchsize {
@@ -145,19 +175,34 @@ enum a3xx_tex_fetchsize {
};
enum a3xx_color_fmt {
+ RB_R5G6B5_UNORM = 0,
+ RB_R5G5B5A1_UNORM = 1,
+ RB_R4G4B4A4_UNORM = 3,
RB_R8G8B8_UNORM = 4,
RB_R8G8B8A8_UNORM = 8,
- RB_Z16_UNORM = 12,
+ RB_R8G8B8A8_UINT = 10,
+ RB_R8G8B8A8_SINT = 11,
+ RB_R8G8_UNORM = 12,
+ RB_R8_UINT = 14,
+ RB_R8_SINT = 15,
+ RB_R10G10B10A2_UNORM = 16,
RB_A8_UNORM = 20,
+ RB_R8_UNORM = 21,
RB_R16G16B16A16_FLOAT = 27,
+ RB_R11G11B10_FLOAT = 28,
+ RB_R16_SINT = 40,
+ RB_R16G16_SINT = 41,
+ RB_R16G16B16A16_SINT = 43,
+ RB_R16_UINT = 44,
+ RB_R16G16_UINT = 45,
+ RB_R16G16B16A16_UINT = 47,
RB_R32G32B32A32_FLOAT = 51,
-};
-
-enum a3xx_color_swap {
- WZYX = 0,
- WXYZ = 1,
- ZYXW = 2,
- XYZW = 3,
+ RB_R32_SINT = 52,
+ RB_R32G32_SINT = 53,
+ RB_R32G32B32A32_SINT = 55,
+ RB_R32_UINT = 56,
+ RB_R32G32_UINT = 57,
+ RB_R32G32B32A32_UINT = 59,
};
enum a3xx_sp_perfcounter_select {
@@ -194,6 +239,11 @@ enum a3xx_rb_blend_opcode {
BLEND_MAX_DST_SRC = 4,
};
+enum a3xx_intp_mode {
+ SMOOTH = 0,
+ FLAT = 1,
+};
+
enum a3xx_tex_filter {
A3XX_TEX_NEAREST = 0,
A3XX_TEX_LINEAR = 1,
@@ -536,6 +586,10 @@ enum a3xx_tex_type {
#define REG_A3XX_CP_MEQ_DATA 0x000001db
+#define REG_A3XX_CP_WFI_PEND_CTR 0x000001f5
+
+#define REG_A3XX_RBBM_PM_OVERRIDE2 0x0000039d
+
#define REG_A3XX_CP_PERFCOUNTER_SELECT 0x00000445
#define REG_A3XX_CP_HW_FAULT 0x0000045c
@@ -550,6 +604,12 @@ static inline uint32_t REG_A3XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000460
#define REG_A3XX_CP_AHB_FAULT 0x0000054d
+#define REG_A3XX_SQ_GPR_MANAGEMENT 0x00000d00
+
+#define REG_A3XX_SQ_INST_STORE_MANAGMENT 0x00000d02
+
+#define REG_A3XX_TP0_CHICKEN 0x00000e1e
+
#define REG_A3XX_SP_GLOBAL_MEM_SIZE 0x00000e22
#define REG_A3XX_SP_GLOBAL_MEM_ADDR 0x00000e23
@@ -632,13 +692,13 @@ static inline uint32_t A3XX_GRAS_CL_VPORT_ZSCALE(float val)
#define A3XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT 0
static inline uint32_t A3XX_GRAS_SU_POINT_MINMAX_MIN(float val)
{
- return ((((uint32_t)(val * 8.0))) << A3XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT) & A3XX_GRAS_SU_POINT_MINMAX_MIN__MASK;
+ return ((((uint32_t)(val * 16.0))) << A3XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT) & A3XX_GRAS_SU_POINT_MINMAX_MIN__MASK;
}
#define A3XX_GRAS_SU_POINT_MINMAX_MAX__MASK 0xffff0000
#define A3XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT 16
static inline uint32_t A3XX_GRAS_SU_POINT_MINMAX_MAX(float val)
{
- return ((((uint32_t)(val * 8.0))) << A3XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT) & A3XX_GRAS_SU_POINT_MINMAX_MAX__MASK;
+ return ((((uint32_t)(val * 16.0))) << A3XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT) & A3XX_GRAS_SU_POINT_MINMAX_MAX__MASK;
}
#define REG_A3XX_GRAS_SU_POINT_SIZE 0x00002069
@@ -646,7 +706,7 @@ static inline uint32_t A3XX_GRAS_SU_POINT_MINMAX_MAX(float val)
#define A3XX_GRAS_SU_POINT_SIZE__SHIFT 0
static inline uint32_t A3XX_GRAS_SU_POINT_SIZE(float val)
{
- return ((((uint32_t)(val * 8.0))) << A3XX_GRAS_SU_POINT_SIZE__SHIFT) & A3XX_GRAS_SU_POINT_SIZE__MASK;
+ return ((((int32_t)(val * 16.0))) << A3XX_GRAS_SU_POINT_SIZE__SHIFT) & A3XX_GRAS_SU_POINT_SIZE__MASK;
}
#define REG_A3XX_GRAS_SU_POLY_OFFSET_SCALE 0x0000206c
@@ -654,7 +714,7 @@ static inline uint32_t A3XX_GRAS_SU_POINT_SIZE(float val)
#define A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__SHIFT 0
static inline uint32_t A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL(float val)
{
- return ((((uint32_t)(val * 28.0))) << A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__SHIFT) & A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__MASK;
+ return ((((int32_t)(val * 16384.0))) << A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__SHIFT) & A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__MASK;
}
#define REG_A3XX_GRAS_SU_POLY_OFFSET_OFFSET 0x0000206d
@@ -662,7 +722,7 @@ static inline uint32_t A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL(float val)
#define A3XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT 0
static inline uint32_t A3XX_GRAS_SU_POLY_OFFSET_OFFSET(float val)
{
- return ((((uint32_t)(val * 28.0))) << A3XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT) & A3XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK;
+ return ((((int32_t)(val * 16384.0))) << A3XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT) & A3XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK;
}
#define REG_A3XX_GRAS_SU_MODE_CONTROL 0x00002070
@@ -673,7 +733,7 @@ static inline uint32_t A3XX_GRAS_SU_POLY_OFFSET_OFFSET(float val)
#define A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT 3
static inline uint32_t A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH(float val)
{
- return ((((uint32_t)(val * 4.0))) << A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT) & A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK;
+ return ((((int32_t)(val * 4.0))) << A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT) & A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK;
}
#define A3XX_GRAS_SU_MODE_CONTROL_POLY_OFFSET 0x00000800
@@ -863,6 +923,7 @@ static inline uint32_t A3XX_RB_MRT_BUF_INFO_COLOR_SWAP(enum a3xx_color_swap val)
{
return ((val) << A3XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT) & A3XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK;
}
+#define A3XX_RB_MRT_BUF_INFO_COLOR_SRGB 0x00004000
#define A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__MASK 0xfffe0000
#define A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__SHIFT 17
static inline uint32_t A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH(uint32_t val)
@@ -1001,6 +1062,7 @@ static inline uint32_t A3XX_RB_COPY_CONTROL_FASTCLEAR(uint32_t val)
{
return ((val) << A3XX_RB_COPY_CONTROL_FASTCLEAR__SHIFT) & A3XX_RB_COPY_CONTROL_FASTCLEAR__MASK;
}
+#define A3XX_RB_COPY_CONTROL_UNK12 0x00001000
#define A3XX_RB_COPY_CONTROL_GMEM_BASE__MASK 0xffffc000
#define A3XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT 14
static inline uint32_t A3XX_RB_COPY_CONTROL_GMEM_BASE(uint32_t val)
@@ -1079,7 +1141,7 @@ static inline uint32_t A3XX_RB_DEPTH_CONTROL_ZFUNC(enum adreno_compare_func val)
#define REG_A3XX_RB_DEPTH_CLEAR 0x00002101
#define REG_A3XX_RB_DEPTH_INFO 0x00002102
-#define A3XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK 0x00000001
+#define A3XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK 0x00000003
#define A3XX_RB_DEPTH_INFO_DEPTH_FORMAT__SHIFT 0
static inline uint32_t A3XX_RB_DEPTH_INFO_DEPTH_FORMAT(enum adreno_rb_depth_format val)
{
@@ -1265,6 +1327,7 @@ static inline uint32_t A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE(enum adreno_pa_
{
return ((val) << A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__SHIFT) & A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__MASK;
}
+#define A3XX_PC_PRIM_VTX_CNTL_PRIMITIVE_RESTART 0x00100000
#define A3XX_PC_PRIM_VTX_CNTL_PROVOKING_VTX_LAST 0x02000000
#define A3XX_PC_PRIM_VTX_CNTL_PSIZE 0x04000000
@@ -1281,7 +1344,12 @@ static inline uint32_t A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE(enum a3xx_threadsize
#define A3XX_HLSQ_CONTROL_0_REG_SPSHADERRESTART 0x00000200
#define A3XX_HLSQ_CONTROL_0_REG_RESERVED2 0x00000400
#define A3XX_HLSQ_CONTROL_0_REG_CHUNKDISABLE 0x04000000
-#define A3XX_HLSQ_CONTROL_0_REG_CONSTSWITCHMODE 0x08000000
+#define A3XX_HLSQ_CONTROL_0_REG_CONSTMODE__MASK 0x08000000
+#define A3XX_HLSQ_CONTROL_0_REG_CONSTMODE__SHIFT 27
+static inline uint32_t A3XX_HLSQ_CONTROL_0_REG_CONSTMODE(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CONTROL_0_REG_CONSTMODE__SHIFT) & A3XX_HLSQ_CONTROL_0_REG_CONSTMODE__MASK;
+}
#define A3XX_HLSQ_CONTROL_0_REG_LAZYUPDATEDISABLE 0x10000000
#define A3XX_HLSQ_CONTROL_0_REG_SPCONSTFULLUPDATE 0x20000000
#define A3XX_HLSQ_CONTROL_0_REG_TPFULLUPDATE 0x40000000
@@ -1484,6 +1552,8 @@ static inline uint32_t A3XX_VFD_CONTROL_1_REGID4INST(uint32_t val)
#define REG_A3XX_VFD_INDEX_OFFSET 0x00002245
+#define REG_A3XX_VFD_INDEX_OFFSET 0x00002245
+
static inline uint32_t REG_A3XX_VFD_FETCH(uint32_t i0) { return 0x00002246 + 0x2*i0; }
static inline uint32_t REG_A3XX_VFD_FETCH_INSTR_0(uint32_t i0) { return 0x00002246 + 0x2*i0; }
@@ -1537,6 +1607,7 @@ static inline uint32_t A3XX_VFD_DECODE_INSTR_REGID(uint32_t val)
{
return ((val) << A3XX_VFD_DECODE_INSTR_REGID__SHIFT) & A3XX_VFD_DECODE_INSTR_REGID__MASK;
}
+#define A3XX_VFD_DECODE_INSTR_INT 0x00100000
#define A3XX_VFD_DECODE_INSTR_SWAP__MASK 0x00c00000
#define A3XX_VFD_DECODE_INSTR_SWAP__SHIFT 22
static inline uint32_t A3XX_VFD_DECODE_INSTR_SWAP(enum a3xx_color_swap val)
@@ -1604,6 +1675,102 @@ static inline uint32_t A3XX_VPC_PACK_NUMNONPOSVSVAR(uint32_t val)
static inline uint32_t REG_A3XX_VPC_VARYING_INTERP(uint32_t i0) { return 0x00002282 + 0x1*i0; }
static inline uint32_t REG_A3XX_VPC_VARYING_INTERP_MODE(uint32_t i0) { return 0x00002282 + 0x1*i0; }
+#define A3XX_VPC_VARYING_INTERP_MODE_C0__MASK 0x00000003
+#define A3XX_VPC_VARYING_INTERP_MODE_C0__SHIFT 0
+static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C0(enum a3xx_intp_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C0__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C0__MASK;
+}
+#define A3XX_VPC_VARYING_INTERP_MODE_C1__MASK 0x0000000c
+#define A3XX_VPC_VARYING_INTERP_MODE_C1__SHIFT 2
+static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C1(enum a3xx_intp_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C1__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C1__MASK;
+}
+#define A3XX_VPC_VARYING_INTERP_MODE_C2__MASK 0x00000030
+#define A3XX_VPC_VARYING_INTERP_MODE_C2__SHIFT 4
+static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C2(enum a3xx_intp_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C2__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C2__MASK;
+}
+#define A3XX_VPC_VARYING_INTERP_MODE_C3__MASK 0x000000c0
+#define A3XX_VPC_VARYING_INTERP_MODE_C3__SHIFT 6
+static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C3(enum a3xx_intp_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C3__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C3__MASK;
+}
+#define A3XX_VPC_VARYING_INTERP_MODE_C4__MASK 0x00000300
+#define A3XX_VPC_VARYING_INTERP_MODE_C4__SHIFT 8
+static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C4(enum a3xx_intp_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C4__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C4__MASK;
+}
+#define A3XX_VPC_VARYING_INTERP_MODE_C5__MASK 0x00000c00
+#define A3XX_VPC_VARYING_INTERP_MODE_C5__SHIFT 10
+static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C5(enum a3xx_intp_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C5__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C5__MASK;
+}
+#define A3XX_VPC_VARYING_INTERP_MODE_C6__MASK 0x00003000
+#define A3XX_VPC_VARYING_INTERP_MODE_C6__SHIFT 12
+static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C6(enum a3xx_intp_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C6__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C6__MASK;
+}
+#define A3XX_VPC_VARYING_INTERP_MODE_C7__MASK 0x0000c000
+#define A3XX_VPC_VARYING_INTERP_MODE_C7__SHIFT 14
+static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C7(enum a3xx_intp_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C7__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C7__MASK;
+}
+#define A3XX_VPC_VARYING_INTERP_MODE_C8__MASK 0x00030000
+#define A3XX_VPC_VARYING_INTERP_MODE_C8__SHIFT 16
+static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C8(enum a3xx_intp_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C8__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C8__MASK;
+}
+#define A3XX_VPC_VARYING_INTERP_MODE_C9__MASK 0x000c0000
+#define A3XX_VPC_VARYING_INTERP_MODE_C9__SHIFT 18
+static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C9(enum a3xx_intp_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C9__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C9__MASK;
+}
+#define A3XX_VPC_VARYING_INTERP_MODE_CA__MASK 0x00300000
+#define A3XX_VPC_VARYING_INTERP_MODE_CA__SHIFT 20
+static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_CA(enum a3xx_intp_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_INTERP_MODE_CA__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_CA__MASK;
+}
+#define A3XX_VPC_VARYING_INTERP_MODE_CB__MASK 0x00c00000
+#define A3XX_VPC_VARYING_INTERP_MODE_CB__SHIFT 22
+static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_CB(enum a3xx_intp_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_INTERP_MODE_CB__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_CB__MASK;
+}
+#define A3XX_VPC_VARYING_INTERP_MODE_CC__MASK 0x03000000
+#define A3XX_VPC_VARYING_INTERP_MODE_CC__SHIFT 24
+static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_CC(enum a3xx_intp_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_INTERP_MODE_CC__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_CC__MASK;
+}
+#define A3XX_VPC_VARYING_INTERP_MODE_CD__MASK 0x0c000000
+#define A3XX_VPC_VARYING_INTERP_MODE_CD__SHIFT 26
+static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_CD(enum a3xx_intp_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_INTERP_MODE_CD__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_CD__MASK;
+}
+#define A3XX_VPC_VARYING_INTERP_MODE_CE__MASK 0x30000000
+#define A3XX_VPC_VARYING_INTERP_MODE_CE__SHIFT 28
+static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_CE(enum a3xx_intp_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_INTERP_MODE_CE__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_CE__MASK;
+}
+#define A3XX_VPC_VARYING_INTERP_MODE_CF__MASK 0xc0000000
+#define A3XX_VPC_VARYING_INTERP_MODE_CF__SHIFT 30
+static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_CF(enum a3xx_intp_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_INTERP_MODE_CF__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_CF__MASK;
+}
static inline uint32_t REG_A3XX_VPC_VARYING_PS_REPL(uint32_t i0) { return 0x00002286 + 0x1*i0; }
@@ -1928,6 +2095,8 @@ static inline uint32_t A3XX_SP_FS_MRT_REG_REGID(uint32_t val)
return ((val) << A3XX_SP_FS_MRT_REG_REGID__SHIFT) & A3XX_SP_FS_MRT_REG_REGID__MASK;
}
#define A3XX_SP_FS_MRT_REG_HALF_PRECISION 0x00000100
+#define A3XX_SP_FS_MRT_REG_SINT 0x00000400
+#define A3XX_SP_FS_MRT_REG_UINT 0x00000800
static inline uint32_t REG_A3XX_SP_FS_IMAGE_OUTPUT(uint32_t i0) { return 0x000022f4 + 0x1*i0; }
@@ -1947,6 +2116,8 @@ static inline uint32_t A3XX_SP_FS_LENGTH_REG_SHADERLENGTH(uint32_t val)
return ((val) << A3XX_SP_FS_LENGTH_REG_SHADERLENGTH__SHIFT) & A3XX_SP_FS_LENGTH_REG_SHADERLENGTH__MASK;
}
+#define REG_A3XX_PA_SC_AA_CONFIG 0x00002301
+
#define REG_A3XX_TPL1_TP_VS_TEX_OFFSET 0x00002340
#define A3XX_TPL1_TP_VS_TEX_OFFSET_SAMPLEROFFSET__MASK 0x000000ff
#define A3XX_TPL1_TP_VS_TEX_OFFSET_SAMPLEROFFSET__SHIFT 0
@@ -2297,11 +2468,11 @@ static inline uint32_t A3XX_VGT_DRAW_INITIATOR_INDEX_SIZE(enum pc_di_index_size
#define A3XX_VGT_DRAW_INITIATOR_NOT_EOP 0x00001000
#define A3XX_VGT_DRAW_INITIATOR_SMALL_INDEX 0x00002000
#define A3XX_VGT_DRAW_INITIATOR_PRE_DRAW_INITIATOR_ENABLE 0x00004000
-#define A3XX_VGT_DRAW_INITIATOR_NUM_INDICES__MASK 0xffff0000
-#define A3XX_VGT_DRAW_INITIATOR_NUM_INDICES__SHIFT 16
-static inline uint32_t A3XX_VGT_DRAW_INITIATOR_NUM_INDICES(uint32_t val)
+#define A3XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__MASK 0xff000000
+#define A3XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__SHIFT 24
+static inline uint32_t A3XX_VGT_DRAW_INITIATOR_NUM_INSTANCES(uint32_t val)
{
- return ((val) << A3XX_VGT_DRAW_INITIATOR_NUM_INDICES__SHIFT) & A3XX_VGT_DRAW_INITIATOR_NUM_INDICES__MASK;
+ return ((val) << A3XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__SHIFT) & A3XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__MASK;
}
#define REG_A3XX_VGT_IMMED_DATA 0x000021fd
@@ -2347,17 +2518,23 @@ static inline uint32_t A3XX_TEX_SAMP_0_COMPARE_FUNC(enum adreno_compare_func val
#define A3XX_TEX_SAMP_0_UNNORM_COORDS 0x80000000
#define REG_A3XX_TEX_SAMP_1 0x00000001
+#define A3XX_TEX_SAMP_1_LOD_BIAS__MASK 0x000007ff
+#define A3XX_TEX_SAMP_1_LOD_BIAS__SHIFT 0
+static inline uint32_t A3XX_TEX_SAMP_1_LOD_BIAS(float val)
+{
+ return ((((int32_t)(val * 64.0))) << A3XX_TEX_SAMP_1_LOD_BIAS__SHIFT) & A3XX_TEX_SAMP_1_LOD_BIAS__MASK;
+}
#define A3XX_TEX_SAMP_1_MAX_LOD__MASK 0x003ff000
#define A3XX_TEX_SAMP_1_MAX_LOD__SHIFT 12
static inline uint32_t A3XX_TEX_SAMP_1_MAX_LOD(float val)
{
- return ((((uint32_t)(val * 12.0))) << A3XX_TEX_SAMP_1_MAX_LOD__SHIFT) & A3XX_TEX_SAMP_1_MAX_LOD__MASK;
+ return ((((uint32_t)(val * 64.0))) << A3XX_TEX_SAMP_1_MAX_LOD__SHIFT) & A3XX_TEX_SAMP_1_MAX_LOD__MASK;
}
#define A3XX_TEX_SAMP_1_MIN_LOD__MASK 0xffc00000
#define A3XX_TEX_SAMP_1_MIN_LOD__SHIFT 22
static inline uint32_t A3XX_TEX_SAMP_1_MIN_LOD(float val)
{
- return ((((uint32_t)(val * 12.0))) << A3XX_TEX_SAMP_1_MIN_LOD__SHIFT) & A3XX_TEX_SAMP_1_MIN_LOD__MASK;
+ return ((((uint32_t)(val * 64.0))) << A3XX_TEX_SAMP_1_MIN_LOD__SHIFT) & A3XX_TEX_SAMP_1_MIN_LOD__MASK;
}
#define REG_A3XX_TEX_CONST_0 0x00000000
@@ -2448,6 +2625,24 @@ static inline uint32_t A3XX_TEX_CONST_2_SWAP(enum a3xx_color_swap val)
}
#define REG_A3XX_TEX_CONST_3 0x00000003
+#define A3XX_TEX_CONST_3_LAYERSZ1__MASK 0x0000000f
+#define A3XX_TEX_CONST_3_LAYERSZ1__SHIFT 0
+static inline uint32_t A3XX_TEX_CONST_3_LAYERSZ1(uint32_t val)
+{
+ return ((val >> 12) << A3XX_TEX_CONST_3_LAYERSZ1__SHIFT) & A3XX_TEX_CONST_3_LAYERSZ1__MASK;
+}
+#define A3XX_TEX_CONST_3_DEPTH__MASK 0x0ffe0000
+#define A3XX_TEX_CONST_3_DEPTH__SHIFT 17
+static inline uint32_t A3XX_TEX_CONST_3_DEPTH(uint32_t val)
+{
+ return ((val) << A3XX_TEX_CONST_3_DEPTH__SHIFT) & A3XX_TEX_CONST_3_DEPTH__MASK;
+}
+#define A3XX_TEX_CONST_3_LAYERSZ2__MASK 0xf0000000
+#define A3XX_TEX_CONST_3_LAYERSZ2__SHIFT 28
+static inline uint32_t A3XX_TEX_CONST_3_LAYERSZ2(uint32_t val)
+{
+ return ((val >> 12) << A3XX_TEX_CONST_3_LAYERSZ2__SHIFT) & A3XX_TEX_CONST_3_LAYERSZ2__MASK;
+}
#endif /* A3XX_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index 218c5b06039..b66c53bdc03 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -2,6 +2,8 @@
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
+ * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ *
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
@@ -406,6 +408,94 @@ static void a3xx_dump(struct msm_gpu *gpu)
gpu_read(gpu, REG_A3XX_RBBM_STATUS));
adreno_dump(gpu);
}
+/* Register offset defines for A3XX */
+static const unsigned int a3xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_DEBUG, REG_AXXX_CP_DEBUG),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_WADDR, REG_AXXX_CP_ME_RAM_WADDR),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_DATA, REG_AXXX_CP_ME_RAM_DATA),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_PFP_UCODE_DATA,
+ REG_A3XX_CP_PFP_UCODE_DATA),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_PFP_UCODE_ADDR,
+ REG_A3XX_CP_PFP_UCODE_ADDR),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_WFI_PEND_CTR, REG_A3XX_CP_WFI_PEND_CTR),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_AXXX_CP_RB_BASE),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_AXXX_CP_RB_RPTR_ADDR),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_AXXX_CP_RB_RPTR),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_AXXX_CP_RB_WPTR),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_PROTECT_CTRL, REG_A3XX_CP_PROTECT_CTRL),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_CNTL, REG_AXXX_CP_ME_CNTL),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_AXXX_CP_RB_CNTL),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_IB1_BASE, REG_AXXX_CP_IB1_BASE),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_IB1_BUFSZ, REG_AXXX_CP_IB1_BUFSZ),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_IB2_BASE, REG_AXXX_CP_IB2_BASE),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_IB2_BUFSZ, REG_AXXX_CP_IB2_BUFSZ),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_TIMESTAMP, REG_AXXX_CP_SCRATCH_REG0),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_RADDR, REG_AXXX_CP_ME_RAM_RADDR),
+ REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_ADDR, REG_AXXX_SCRATCH_ADDR),
+ REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_UMSK, REG_AXXX_SCRATCH_UMSK),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_ROQ_ADDR, REG_A3XX_CP_ROQ_ADDR),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_ROQ_DATA, REG_A3XX_CP_ROQ_DATA),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_ADDR, REG_A3XX_CP_MERCIU_ADDR),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_DATA, REG_A3XX_CP_MERCIU_DATA),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_DATA2, REG_A3XX_CP_MERCIU_DATA2),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_MEQ_ADDR, REG_A3XX_CP_MEQ_ADDR),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_MEQ_DATA, REG_A3XX_CP_MEQ_DATA),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_HW_FAULT, REG_A3XX_CP_HW_FAULT),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_PROTECT_STATUS,
+ REG_A3XX_CP_PROTECT_STATUS),
+ REG_ADRENO_DEFINE(REG_ADRENO_RBBM_STATUS, REG_A3XX_RBBM_STATUS),
+ REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_CTL,
+ REG_A3XX_RBBM_PERFCTR_CTL),
+ REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD0,
+ REG_A3XX_RBBM_PERFCTR_LOAD_CMD0),
+ REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD1,
+ REG_A3XX_RBBM_PERFCTR_LOAD_CMD1),
+ REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_PWR_1_LO,
+ REG_A3XX_RBBM_PERFCTR_PWR_1_LO),
+ REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_0_MASK, REG_A3XX_RBBM_INT_0_MASK),
+ REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_0_STATUS,
+ REG_A3XX_RBBM_INT_0_STATUS),
+ REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_ERROR_STATUS,
+ REG_A3XX_RBBM_AHB_ERROR_STATUS),
+ REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_CMD, REG_A3XX_RBBM_AHB_CMD),
+ REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_CLEAR_CMD,
+ REG_A3XX_RBBM_INT_CLEAR_CMD),
+ REG_ADRENO_DEFINE(REG_ADRENO_RBBM_CLOCK_CTL, REG_A3XX_RBBM_CLOCK_CTL),
+ REG_ADRENO_DEFINE(REG_ADRENO_VPC_DEBUG_RAM_SEL,
+ REG_A3XX_VPC_VPC_DEBUG_RAM_SEL),
+ REG_ADRENO_DEFINE(REG_ADRENO_VPC_DEBUG_RAM_READ,
+ REG_A3XX_VPC_VPC_DEBUG_RAM_READ),
+ REG_ADRENO_DEFINE(REG_ADRENO_VSC_SIZE_ADDRESS,
+ REG_A3XX_VSC_SIZE_ADDRESS),
+ REG_ADRENO_DEFINE(REG_ADRENO_VFD_CONTROL_0, REG_A3XX_VFD_CONTROL_0),
+ REG_ADRENO_DEFINE(REG_ADRENO_VFD_INDEX_MAX, REG_A3XX_VFD_INDEX_MAX),
+ REG_ADRENO_DEFINE(REG_ADRENO_SP_VS_PVT_MEM_ADDR_REG,
+ REG_A3XX_SP_VS_PVT_MEM_ADDR_REG),
+ REG_ADRENO_DEFINE(REG_ADRENO_SP_FS_PVT_MEM_ADDR_REG,
+ REG_A3XX_SP_FS_PVT_MEM_ADDR_REG),
+ REG_ADRENO_DEFINE(REG_ADRENO_SP_VS_OBJ_START_REG,
+ REG_A3XX_SP_VS_OBJ_START_REG),
+ REG_ADRENO_DEFINE(REG_ADRENO_SP_FS_OBJ_START_REG,
+ REG_A3XX_SP_FS_OBJ_START_REG),
+ REG_ADRENO_DEFINE(REG_ADRENO_PA_SC_AA_CONFIG, REG_A3XX_PA_SC_AA_CONFIG),
+ REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PM_OVERRIDE2,
+ REG_A3XX_RBBM_PM_OVERRIDE2),
+ REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_REG2, REG_AXXX_CP_SCRATCH_REG2),
+ REG_ADRENO_DEFINE(REG_ADRENO_SQ_GPR_MANAGEMENT,
+ REG_A3XX_SQ_GPR_MANAGEMENT),
+ REG_ADRENO_DEFINE(REG_ADRENO_SQ_INST_STORE_MANAGMENT,
+ REG_A3XX_SQ_INST_STORE_MANAGMENT),
+ REG_ADRENO_DEFINE(REG_ADRENO_TP0_CHICKEN, REG_A3XX_TP0_CHICKEN),
+ REG_ADRENO_DEFINE(REG_ADRENO_RBBM_RBBM_CTL, REG_A3XX_RBBM_RBBM_CTL),
+ REG_ADRENO_DEFINE(REG_ADRENO_RBBM_SW_RESET_CMD,
+ REG_A3XX_RBBM_SW_RESET_CMD),
+ REG_ADRENO_DEFINE(REG_ADRENO_UCHE_INVALIDATE0,
+ REG_A3XX_UCHE_CACHE_INVALIDATE0_REG),
+ REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_LO,
+ REG_A3XX_RBBM_PERFCTR_LOAD_VALUE_LO),
+ REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_HI,
+ REG_A3XX_RBBM_PERFCTR_LOAD_VALUE_HI),
+};
static const struct adreno_gpu_funcs funcs = {
.base = {
@@ -463,6 +553,7 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
gpu->num_perfcntrs = ARRAY_SIZE(perfcntrs);
adreno_gpu->registers = a3xx_registers;
+ adreno_gpu->reg_offsets = a3xx_register_offsets;
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs);
if (ret)
diff --git a/drivers/gpu/drm/msm/adreno/a4xx.xml.h b/drivers/gpu/drm/msm/adreno/a4xx.xml.h
new file mode 100644
index 00000000000..5a24c416d2d
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a4xx.xml.h
@@ -0,0 +1,2144 @@
+#ifndef A4XX_XML
+#define A4XX_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2014-11-13 22:44:30)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 15053 bytes, from 2014-11-09 15:45:47)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 63169 bytes, from 2014-11-13 22:44:18)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 49097 bytes, from 2014-11-14 15:38:00)
+
+Copyright (C) 2013-2014 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+enum a4xx_color_fmt {
+ RB4_A8_UNORM = 1,
+ RB4_R5G6R5_UNORM = 14,
+ RB4_Z16_UNORM = 15,
+ RB4_R8G8B8_UNORM = 25,
+ RB4_R8G8B8A8_UNORM = 26,
+};
+
+enum a4xx_tile_mode {
+ TILE4_LINEAR = 0,
+ TILE4_3 = 3,
+};
+
+enum a4xx_rb_blend_opcode {
+ BLEND_DST_PLUS_SRC = 0,
+ BLEND_SRC_MINUS_DST = 1,
+ BLEND_DST_MINUS_SRC = 2,
+ BLEND_MIN_DST_SRC = 3,
+ BLEND_MAX_DST_SRC = 4,
+};
+
+enum a4xx_vtx_fmt {
+ VFMT4_FLOAT_32 = 1,
+ VFMT4_FLOAT_32_32 = 2,
+ VFMT4_FLOAT_32_32_32 = 3,
+ VFMT4_FLOAT_32_32_32_32 = 4,
+ VFMT4_FLOAT_16 = 5,
+ VFMT4_FLOAT_16_16 = 6,
+ VFMT4_FLOAT_16_16_16 = 7,
+ VFMT4_FLOAT_16_16_16_16 = 8,
+ VFMT4_FIXED_32 = 9,
+ VFMT4_FIXED_32_32 = 10,
+ VFMT4_FIXED_32_32_32 = 11,
+ VFMT4_FIXED_32_32_32_32 = 12,
+ VFMT4_SHORT_16 = 16,
+ VFMT4_SHORT_16_16 = 17,
+ VFMT4_SHORT_16_16_16 = 18,
+ VFMT4_SHORT_16_16_16_16 = 19,
+ VFMT4_USHORT_16 = 20,
+ VFMT4_USHORT_16_16 = 21,
+ VFMT4_USHORT_16_16_16 = 22,
+ VFMT4_USHORT_16_16_16_16 = 23,
+ VFMT4_NORM_SHORT_16 = 24,
+ VFMT4_NORM_SHORT_16_16 = 25,
+ VFMT4_NORM_SHORT_16_16_16 = 26,
+ VFMT4_NORM_SHORT_16_16_16_16 = 27,
+ VFMT4_NORM_USHORT_16 = 28,
+ VFMT4_NORM_USHORT_16_16 = 29,
+ VFMT4_NORM_USHORT_16_16_16 = 30,
+ VFMT4_NORM_USHORT_16_16_16_16 = 31,
+ VFMT4_UBYTE_8 = 40,
+ VFMT4_UBYTE_8_8 = 41,
+ VFMT4_UBYTE_8_8_8 = 42,
+ VFMT4_UBYTE_8_8_8_8 = 43,
+ VFMT4_NORM_UBYTE_8 = 44,
+ VFMT4_NORM_UBYTE_8_8 = 45,
+ VFMT4_NORM_UBYTE_8_8_8 = 46,
+ VFMT4_NORM_UBYTE_8_8_8_8 = 47,
+ VFMT4_BYTE_8 = 48,
+ VFMT4_BYTE_8_8 = 49,
+ VFMT4_BYTE_8_8_8 = 50,
+ VFMT4_BYTE_8_8_8_8 = 51,
+ VFMT4_NORM_BYTE_8 = 52,
+ VFMT4_NORM_BYTE_8_8 = 53,
+ VFMT4_NORM_BYTE_8_8_8 = 54,
+ VFMT4_NORM_BYTE_8_8_8_8 = 55,
+ VFMT4_UINT_10_10_10_2 = 60,
+ VFMT4_NORM_UINT_10_10_10_2 = 61,
+ VFMT4_INT_10_10_10_2 = 62,
+ VFMT4_NORM_INT_10_10_10_2 = 63,
+};
+
+enum a4xx_tex_fmt {
+ TFMT4_NORM_USHORT_565 = 11,
+ TFMT4_NORM_USHORT_5551 = 10,
+ TFMT4_NORM_USHORT_4444 = 8,
+ TFMT4_NORM_UINT_X8Z24 = 71,
+ TFMT4_NORM_UINT_2_10_10_10 = 33,
+ TFMT4_NORM_UINT_A8 = 3,
+ TFMT4_NORM_UINT_L8_A8 = 13,
+ TFMT4_NORM_UINT_8 = 4,
+ TFMT4_NORM_UINT_8_8_8_8 = 28,
+ TFMT4_FLOAT_16 = 20,
+ TFMT4_FLOAT_16_16 = 40,
+ TFMT4_FLOAT_16_16_16_16 = 53,
+ TFMT4_FLOAT_32 = 43,
+ TFMT4_FLOAT_32_32 = 56,
+ TFMT4_FLOAT_32_32_32_32 = 63,
+};
+
+enum a4xx_depth_format {
+ DEPTH4_NONE = 0,
+ DEPTH4_16 = 1,
+ DEPTH4_24_8 = 2,
+};
+
+enum a4xx_tex_filter {
+ A4XX_TEX_NEAREST = 0,
+ A4XX_TEX_LINEAR = 1,
+};
+
+enum a4xx_tex_clamp {
+ A4XX_TEX_REPEAT = 0,
+ A4XX_TEX_CLAMP_TO_EDGE = 1,
+ A4XX_TEX_MIRROR_REPEAT = 2,
+ A4XX_TEX_CLAMP_NONE = 3,
+};
+
+enum a4xx_tex_swiz {
+ A4XX_TEX_X = 0,
+ A4XX_TEX_Y = 1,
+ A4XX_TEX_Z = 2,
+ A4XX_TEX_W = 3,
+ A4XX_TEX_ZERO = 4,
+ A4XX_TEX_ONE = 5,
+};
+
+enum a4xx_tex_type {
+ A4XX_TEX_1D = 0,
+ A4XX_TEX_2D = 1,
+ A4XX_TEX_CUBE = 2,
+ A4XX_TEX_3D = 3,
+};
+
+#define A4XX_CGC_HLSQ_EARLY_CYC__MASK 0x00700000
+#define A4XX_CGC_HLSQ_EARLY_CYC__SHIFT 20
+static inline uint32_t A4XX_CGC_HLSQ_EARLY_CYC(uint32_t val)
+{
+ return ((val) << A4XX_CGC_HLSQ_EARLY_CYC__SHIFT) & A4XX_CGC_HLSQ_EARLY_CYC__MASK;
+}
+#define A4XX_INT0_RBBM_GPU_IDLE 0x00000001
+#define A4XX_INT0_RBBM_AHB_ERROR 0x00000002
+#define A4XX_INT0_RBBM_REG_TIMEOUT 0x00000004
+#define A4XX_INT0_RBBM_ME_MS_TIMEOUT 0x00000008
+#define A4XX_INT0_RBBM_PFP_MS_TIMEOUT 0x00000010
+#define A4XX_INT0_RBBM_ATB_BUS_OVERFLOW 0x00000020
+#define A4XX_INT0_VFD_ERROR 0x00000040
+#define A4XX_INT0_CP_SW_INT 0x00000080
+#define A4XX_INT0_CP_T0_PACKET_IN_IB 0x00000100
+#define A4XX_INT0_CP_OPCODE_ERROR 0x00000200
+#define A4XX_INT0_CP_RESERVED_BIT_ERROR 0x00000400
+#define A4XX_INT0_CP_HW_FAULT 0x00000800
+#define A4XX_INT0_CP_DMA 0x00001000
+#define A4XX_INT0_CP_IB2_INT 0x00002000
+#define A4XX_INT0_CP_IB1_INT 0x00004000
+#define A4XX_INT0_CP_RB_INT 0x00008000
+#define A4XX_INT0_CP_REG_PROTECT_FAULT 0x00010000
+#define A4XX_INT0_CP_RB_DONE_TS 0x00020000
+#define A4XX_INT0_CP_VS_DONE_TS 0x00040000
+#define A4XX_INT0_CP_PS_DONE_TS 0x00080000
+#define A4XX_INT0_CACHE_FLUSH_TS 0x00100000
+#define A4XX_INT0_CP_AHB_ERROR_HALT 0x00200000
+#define A4XX_INT0_MISC_HANG_DETECT 0x01000000
+#define A4XX_INT0_UCHE_OOB_ACCESS 0x02000000
+#define REG_A4XX_RB_GMEM_BASE_ADDR 0x00000cc0
+
+#define REG_A4XX_RB_PERFCTR_RB_SEL_0 0x00000cc7
+
+#define REG_A4XX_RB_PERFCTR_RB_SEL_1 0x00000cc8
+
+#define REG_A4XX_RB_PERFCTR_RB_SEL_2 0x00000cc9
+
+#define REG_A4XX_RB_PERFCTR_RB_SEL_3 0x00000cca
+
+#define REG_A4XX_RB_PERFCTR_RB_SEL_4 0x00000ccb
+
+#define REG_A4XX_RB_PERFCTR_RB_SEL_5 0x00000ccc
+
+#define REG_A4XX_RB_PERFCTR_RB_SEL_6 0x00000ccd
+
+#define REG_A4XX_RB_PERFCTR_RB_SEL_7 0x00000cce
+
+#define REG_A4XX_RB_PERFCTR_CCU_SEL_3 0x00000cd2
+
+#define REG_A4XX_RB_FRAME_BUFFER_DIMENSION 0x00000ce0
+#define A4XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__MASK 0x00003fff
+#define A4XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__SHIFT 0
+static inline uint32_t A4XX_RB_FRAME_BUFFER_DIMENSION_WIDTH(uint32_t val)
+{
+ return ((val) << A4XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__SHIFT) & A4XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__MASK;
+}
+#define A4XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__MASK 0x3fff0000
+#define A4XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__SHIFT 16
+static inline uint32_t A4XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT(uint32_t val)
+{
+ return ((val) << A4XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__SHIFT) & A4XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__MASK;
+}
+
+#define REG_A4XX_RB_CLEAR_COLOR_DW0 0x000020cc
+
+#define REG_A4XX_RB_CLEAR_COLOR_DW1 0x000020cd
+
+#define REG_A4XX_RB_CLEAR_COLOR_DW2 0x000020ce
+
+#define REG_A4XX_RB_CLEAR_COLOR_DW3 0x000020cf
+
+#define REG_A4XX_RB_MODE_CONTROL 0x000020a0
+#define A4XX_RB_MODE_CONTROL_WIDTH__MASK 0x0000003f
+#define A4XX_RB_MODE_CONTROL_WIDTH__SHIFT 0
+static inline uint32_t A4XX_RB_MODE_CONTROL_WIDTH(uint32_t val)
+{
+ return ((val >> 5) << A4XX_RB_MODE_CONTROL_WIDTH__SHIFT) & A4XX_RB_MODE_CONTROL_WIDTH__MASK;
+}
+#define A4XX_RB_MODE_CONTROL_HEIGHT__MASK 0x00003f00
+#define A4XX_RB_MODE_CONTROL_HEIGHT__SHIFT 8
+static inline uint32_t A4XX_RB_MODE_CONTROL_HEIGHT(uint32_t val)
+{
+ return ((val >> 5) << A4XX_RB_MODE_CONTROL_HEIGHT__SHIFT) & A4XX_RB_MODE_CONTROL_HEIGHT__MASK;
+}
+
+#define REG_A4XX_RB_RENDER_CONTROL 0x000020a1
+#define A4XX_RB_RENDER_CONTROL_BINNING_PASS 0x00000001
+#define A4XX_RB_RENDER_CONTROL_DISABLE_COLOR_PIPE 0x00000020
+
+#define REG_A4XX_RB_MSAA_CONTROL 0x000020a2
+#define A4XX_RB_MSAA_CONTROL_DISABLE 0x00001000
+#define A4XX_RB_MSAA_CONTROL_SAMPLES__MASK 0x0000e000
+#define A4XX_RB_MSAA_CONTROL_SAMPLES__SHIFT 13
+static inline uint32_t A4XX_RB_MSAA_CONTROL_SAMPLES(uint32_t val)
+{
+ return ((val) << A4XX_RB_MSAA_CONTROL_SAMPLES__SHIFT) & A4XX_RB_MSAA_CONTROL_SAMPLES__MASK;
+}
+
+#define REG_A4XX_RB_MSAA_CONTROL2 0x000020a3
+#define A4XX_RB_MSAA_CONTROL2_MSAA_SAMPLES__MASK 0x00000380
+#define A4XX_RB_MSAA_CONTROL2_MSAA_SAMPLES__SHIFT 7
+static inline uint32_t A4XX_RB_MSAA_CONTROL2_MSAA_SAMPLES(uint32_t val)
+{
+ return ((val) << A4XX_RB_MSAA_CONTROL2_MSAA_SAMPLES__SHIFT) & A4XX_RB_MSAA_CONTROL2_MSAA_SAMPLES__MASK;
+}
+#define A4XX_RB_MSAA_CONTROL2_VARYING 0x00001000
+
+static inline uint32_t REG_A4XX_RB_MRT(uint32_t i0) { return 0x000020a4 + 0x5*i0; }
+
+static inline uint32_t REG_A4XX_RB_MRT_CONTROL(uint32_t i0) { return 0x000020a4 + 0x5*i0; }
+#define A4XX_RB_MRT_CONTROL_READ_DEST_ENABLE 0x00000008
+#define A4XX_RB_MRT_CONTROL_BLEND 0x00000010
+#define A4XX_RB_MRT_CONTROL_BLEND2 0x00000020
+#define A4XX_RB_MRT_CONTROL_FASTCLEAR 0x00000400
+#define A4XX_RB_MRT_CONTROL_B11 0x00000800
+#define A4XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK 0x0f000000
+#define A4XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT 24
+static inline uint32_t A4XX_RB_MRT_CONTROL_COMPONENT_ENABLE(uint32_t val)
+{
+ return ((val) << A4XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT) & A4XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK;
+}
+
+static inline uint32_t REG_A4XX_RB_MRT_BUF_INFO(uint32_t i0) { return 0x000020a5 + 0x5*i0; }
+#define A4XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK 0x0000003f
+#define A4XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT 0
+static inline uint32_t A4XX_RB_MRT_BUF_INFO_COLOR_FORMAT(enum a4xx_color_fmt val)
+{
+ return ((val) << A4XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT) & A4XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK;
+}
+#define A4XX_RB_MRT_BUF_INFO_DITHER_MODE__MASK 0x00000600
+#define A4XX_RB_MRT_BUF_INFO_DITHER_MODE__SHIFT 9
+static inline uint32_t A4XX_RB_MRT_BUF_INFO_DITHER_MODE(enum adreno_rb_dither_mode val)
+{
+ return ((val) << A4XX_RB_MRT_BUF_INFO_DITHER_MODE__SHIFT) & A4XX_RB_MRT_BUF_INFO_DITHER_MODE__MASK;
+}
+#define A4XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK 0x00001800
+#define A4XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT 11
+static inline uint32_t A4XX_RB_MRT_BUF_INFO_COLOR_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A4XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT) & A4XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK;
+}
+#define A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__MASK 0x007fc000
+#define A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__SHIFT 14
+static inline uint32_t A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH(uint32_t val)
+{
+ return ((val >> 4) << A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__SHIFT) & A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__MASK;
+}
+
+static inline uint32_t REG_A4XX_RB_MRT_BASE(uint32_t i0) { return 0x000020a6 + 0x5*i0; }
+
+static inline uint32_t REG_A4XX_RB_MRT_CONTROL3(uint32_t i0) { return 0x000020a7 + 0x5*i0; }
+#define A4XX_RB_MRT_CONTROL3_STRIDE__MASK 0x0001fff8
+#define A4XX_RB_MRT_CONTROL3_STRIDE__SHIFT 3
+static inline uint32_t A4XX_RB_MRT_CONTROL3_STRIDE(uint32_t val)
+{
+ return ((val) << A4XX_RB_MRT_CONTROL3_STRIDE__SHIFT) & A4XX_RB_MRT_CONTROL3_STRIDE__MASK;
+}
+
+static inline uint32_t REG_A4XX_RB_MRT_BLEND_CONTROL(uint32_t i0) { return 0x000020a8 + 0x5*i0; }
+#define A4XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK 0x0000001f
+#define A4XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT 0
+static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A4XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK;
+}
+#define A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK 0x000000e0
+#define A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT 5
+static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(enum a4xx_rb_blend_opcode val)
+{
+ return ((val) << A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK;
+}
+#define A4XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK 0x00001f00
+#define A4XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT 8
+static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A4XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK;
+}
+#define A4XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK 0x001f0000
+#define A4XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT 16
+static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A4XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK;
+}
+#define A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK 0x00e00000
+#define A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT 21
+static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(enum a4xx_rb_blend_opcode val)
+{
+ return ((val) << A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK;
+}
+#define A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK 0x1f000000
+#define A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT 24
+static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK;
+}
+
+#define REG_A4XX_RB_ALPHA_CONTROL 0x000020f8
+#define A4XX_RB_ALPHA_CONTROL_ALPHA_TEST 0x00000100
+#define A4XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__MASK 0x00000e00
+#define A4XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__SHIFT 9
+static inline uint32_t A4XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC(enum adreno_compare_func val)
+{
+ return ((val) << A4XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__SHIFT) & A4XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__MASK;
+}
+
+#define REG_A4XX_RB_FS_OUTPUT 0x000020f9
+#define A4XX_RB_FS_OUTPUT_ENABLE_COLOR_PIPE 0x00000001
+#define A4XX_RB_FS_OUTPUT_FAST_CLEAR 0x00000100
+#define A4XX_RB_FS_OUTPUT_SAMPLE_MASK__MASK 0xffff0000
+#define A4XX_RB_FS_OUTPUT_SAMPLE_MASK__SHIFT 16
+static inline uint32_t A4XX_RB_FS_OUTPUT_SAMPLE_MASK(uint32_t val)
+{
+ return ((val) << A4XX_RB_FS_OUTPUT_SAMPLE_MASK__SHIFT) & A4XX_RB_FS_OUTPUT_SAMPLE_MASK__MASK;
+}
+
+#define REG_A4XX_RB_RENDER_CONTROL3 0x000020fb
+#define A4XX_RB_RENDER_CONTROL3_COMPONENT_ENABLE__MASK 0x0000001f
+#define A4XX_RB_RENDER_CONTROL3_COMPONENT_ENABLE__SHIFT 0
+static inline uint32_t A4XX_RB_RENDER_CONTROL3_COMPONENT_ENABLE(uint32_t val)
+{
+ return ((val) << A4XX_RB_RENDER_CONTROL3_COMPONENT_ENABLE__SHIFT) & A4XX_RB_RENDER_CONTROL3_COMPONENT_ENABLE__MASK;
+}
+
+#define REG_A4XX_RB_COPY_CONTROL 0x000020fc
+#define A4XX_RB_COPY_CONTROL_MSAA_RESOLVE__MASK 0x00000003
+#define A4XX_RB_COPY_CONTROL_MSAA_RESOLVE__SHIFT 0
+static inline uint32_t A4XX_RB_COPY_CONTROL_MSAA_RESOLVE(enum a3xx_msaa_samples val)
+{
+ return ((val) << A4XX_RB_COPY_CONTROL_MSAA_RESOLVE__SHIFT) & A4XX_RB_COPY_CONTROL_MSAA_RESOLVE__MASK;
+}
+#define A4XX_RB_COPY_CONTROL_MODE__MASK 0x00000070
+#define A4XX_RB_COPY_CONTROL_MODE__SHIFT 4
+static inline uint32_t A4XX_RB_COPY_CONTROL_MODE(enum adreno_rb_copy_control_mode val)
+{
+ return ((val) << A4XX_RB_COPY_CONTROL_MODE__SHIFT) & A4XX_RB_COPY_CONTROL_MODE__MASK;
+}
+#define A4XX_RB_COPY_CONTROL_FASTCLEAR__MASK 0x00000f00
+#define A4XX_RB_COPY_CONTROL_FASTCLEAR__SHIFT 8
+static inline uint32_t A4XX_RB_COPY_CONTROL_FASTCLEAR(uint32_t val)
+{
+ return ((val) << A4XX_RB_COPY_CONTROL_FASTCLEAR__SHIFT) & A4XX_RB_COPY_CONTROL_FASTCLEAR__MASK;
+}
+#define A4XX_RB_COPY_CONTROL_GMEM_BASE__MASK 0xffffc000
+#define A4XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT 14
+static inline uint32_t A4XX_RB_COPY_CONTROL_GMEM_BASE(uint32_t val)
+{
+ return ((val >> 14) << A4XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT) & A4XX_RB_COPY_CONTROL_GMEM_BASE__MASK;
+}
+
+#define REG_A4XX_RB_COPY_DEST_BASE 0x000020fd
+#define A4XX_RB_COPY_DEST_BASE_BASE__MASK 0xfffffff0
+#define A4XX_RB_COPY_DEST_BASE_BASE__SHIFT 4
+static inline uint32_t A4XX_RB_COPY_DEST_BASE_BASE(uint32_t val)
+{
+ return ((val >> 4) << A4XX_RB_COPY_DEST_BASE_BASE__SHIFT) & A4XX_RB_COPY_DEST_BASE_BASE__MASK;
+}
+
+#define REG_A4XX_RB_COPY_DEST_PITCH 0x000020fe
+#define A4XX_RB_COPY_DEST_PITCH_PITCH__MASK 0xffffffff
+#define A4XX_RB_COPY_DEST_PITCH_PITCH__SHIFT 0
+static inline uint32_t A4XX_RB_COPY_DEST_PITCH_PITCH(uint32_t val)
+{
+ return ((val >> 5) << A4XX_RB_COPY_DEST_PITCH_PITCH__SHIFT) & A4XX_RB_COPY_DEST_PITCH_PITCH__MASK;
+}
+
+#define REG_A4XX_RB_COPY_DEST_INFO 0x000020ff
+#define A4XX_RB_COPY_DEST_INFO_FORMAT__MASK 0x000000fc
+#define A4XX_RB_COPY_DEST_INFO_FORMAT__SHIFT 2
+static inline uint32_t A4XX_RB_COPY_DEST_INFO_FORMAT(enum a4xx_color_fmt val)
+{
+ return ((val) << A4XX_RB_COPY_DEST_INFO_FORMAT__SHIFT) & A4XX_RB_COPY_DEST_INFO_FORMAT__MASK;
+}
+#define A4XX_RB_COPY_DEST_INFO_SWAP__MASK 0x00000300
+#define A4XX_RB_COPY_DEST_INFO_SWAP__SHIFT 8
+static inline uint32_t A4XX_RB_COPY_DEST_INFO_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A4XX_RB_COPY_DEST_INFO_SWAP__SHIFT) & A4XX_RB_COPY_DEST_INFO_SWAP__MASK;
+}
+#define A4XX_RB_COPY_DEST_INFO_DITHER_MODE__MASK 0x00000c00
+#define A4XX_RB_COPY_DEST_INFO_DITHER_MODE__SHIFT 10
+static inline uint32_t A4XX_RB_COPY_DEST_INFO_DITHER_MODE(enum adreno_rb_dither_mode val)
+{
+ return ((val) << A4XX_RB_COPY_DEST_INFO_DITHER_MODE__SHIFT) & A4XX_RB_COPY_DEST_INFO_DITHER_MODE__MASK;
+}
+#define A4XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__MASK 0x0003c000
+#define A4XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__SHIFT 14
+static inline uint32_t A4XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE(uint32_t val)
+{
+ return ((val) << A4XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__SHIFT) & A4XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__MASK;
+}
+#define A4XX_RB_COPY_DEST_INFO_ENDIAN__MASK 0x001c0000
+#define A4XX_RB_COPY_DEST_INFO_ENDIAN__SHIFT 18
+static inline uint32_t A4XX_RB_COPY_DEST_INFO_ENDIAN(enum adreno_rb_surface_endian val)
+{
+ return ((val) << A4XX_RB_COPY_DEST_INFO_ENDIAN__SHIFT) & A4XX_RB_COPY_DEST_INFO_ENDIAN__MASK;
+}
+#define A4XX_RB_COPY_DEST_INFO_TILE__MASK 0x03000000
+#define A4XX_RB_COPY_DEST_INFO_TILE__SHIFT 24
+static inline uint32_t A4XX_RB_COPY_DEST_INFO_TILE(enum a4xx_tile_mode val)
+{
+ return ((val) << A4XX_RB_COPY_DEST_INFO_TILE__SHIFT) & A4XX_RB_COPY_DEST_INFO_TILE__MASK;
+}
+
+#define REG_A4XX_RB_FS_OUTPUT_REG 0x00002100
+#define A4XX_RB_FS_OUTPUT_REG_COLOR_PIPE_ENABLE 0x00000001
+#define A4XX_RB_FS_OUTPUT_REG_FRAG_WRITES_Z 0x00000020
+
+#define REG_A4XX_RB_DEPTH_CONTROL 0x00002101
+#define A4XX_RB_DEPTH_CONTROL_FRAG_WRITES_Z 0x00000001
+#define A4XX_RB_DEPTH_CONTROL_Z_ENABLE 0x00000002
+#define A4XX_RB_DEPTH_CONTROL_Z_WRITE_ENABLE 0x00000004
+#define A4XX_RB_DEPTH_CONTROL_ZFUNC__MASK 0x00000070
+#define A4XX_RB_DEPTH_CONTROL_ZFUNC__SHIFT 4
+static inline uint32_t A4XX_RB_DEPTH_CONTROL_ZFUNC(enum adreno_compare_func val)
+{
+ return ((val) << A4XX_RB_DEPTH_CONTROL_ZFUNC__SHIFT) & A4XX_RB_DEPTH_CONTROL_ZFUNC__MASK;
+}
+#define A4XX_RB_DEPTH_CONTROL_BF_ENABLE 0x00000080
+#define A4XX_RB_DEPTH_CONTROL_EARLY_Z_DISABLE 0x00010000
+#define A4XX_RB_DEPTH_CONTROL_Z_TEST_ENABLE 0x80000000
+
+#define REG_A4XX_RB_DEPTH_CLEAR 0x00002102
+
+#define REG_A4XX_RB_DEPTH_INFO 0x00002103
+#define A4XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK 0x00000003
+#define A4XX_RB_DEPTH_INFO_DEPTH_FORMAT__SHIFT 0
+static inline uint32_t A4XX_RB_DEPTH_INFO_DEPTH_FORMAT(enum a4xx_depth_format val)
+{
+ return ((val) << A4XX_RB_DEPTH_INFO_DEPTH_FORMAT__SHIFT) & A4XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK;
+}
+#define A4XX_RB_DEPTH_INFO_DEPTH_BASE__MASK 0xfffff000
+#define A4XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT 12
+static inline uint32_t A4XX_RB_DEPTH_INFO_DEPTH_BASE(uint32_t val)
+{
+ return ((val >> 12) << A4XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT) & A4XX_RB_DEPTH_INFO_DEPTH_BASE__MASK;
+}
+
+#define REG_A4XX_RB_DEPTH_PITCH 0x00002104
+#define A4XX_RB_DEPTH_PITCH__MASK 0xffffffff
+#define A4XX_RB_DEPTH_PITCH__SHIFT 0
+static inline uint32_t A4XX_RB_DEPTH_PITCH(uint32_t val)
+{
+ return ((val >> 4) << A4XX_RB_DEPTH_PITCH__SHIFT) & A4XX_RB_DEPTH_PITCH__MASK;
+}
+
+#define REG_A4XX_RB_DEPTH_PITCH2 0x00002105
+#define A4XX_RB_DEPTH_PITCH2__MASK 0xffffffff
+#define A4XX_RB_DEPTH_PITCH2__SHIFT 0
+static inline uint32_t A4XX_RB_DEPTH_PITCH2(uint32_t val)
+{
+ return ((val >> 4) << A4XX_RB_DEPTH_PITCH2__SHIFT) & A4XX_RB_DEPTH_PITCH2__MASK;
+}
+
+#define REG_A4XX_RB_STENCIL_CONTROL 0x00002106
+#define A4XX_RB_STENCIL_CONTROL_STENCIL_ENABLE 0x00000001
+#define A4XX_RB_STENCIL_CONTROL_STENCIL_ENABLE_BF 0x00000002
+#define A4XX_RB_STENCIL_CONTROL_STENCIL_READ 0x00000004
+#define A4XX_RB_STENCIL_CONTROL_FUNC__MASK 0x00000700
+#define A4XX_RB_STENCIL_CONTROL_FUNC__SHIFT 8
+static inline uint32_t A4XX_RB_STENCIL_CONTROL_FUNC(enum adreno_compare_func val)
+{
+ return ((val) << A4XX_RB_STENCIL_CONTROL_FUNC__SHIFT) & A4XX_RB_STENCIL_CONTROL_FUNC__MASK;
+}
+#define A4XX_RB_STENCIL_CONTROL_FAIL__MASK 0x00003800
+#define A4XX_RB_STENCIL_CONTROL_FAIL__SHIFT 11
+static inline uint32_t A4XX_RB_STENCIL_CONTROL_FAIL(enum adreno_stencil_op val)
+{
+ return ((val) << A4XX_RB_STENCIL_CONTROL_FAIL__SHIFT) & A4XX_RB_STENCIL_CONTROL_FAIL__MASK;
+}
+#define A4XX_RB_STENCIL_CONTROL_ZPASS__MASK 0x0001c000
+#define A4XX_RB_STENCIL_CONTROL_ZPASS__SHIFT 14
+static inline uint32_t A4XX_RB_STENCIL_CONTROL_ZPASS(enum adreno_stencil_op val)
+{
+ return ((val) << A4XX_RB_STENCIL_CONTROL_ZPASS__SHIFT) & A4XX_RB_STENCIL_CONTROL_ZPASS__MASK;
+}
+#define A4XX_RB_STENCIL_CONTROL_ZFAIL__MASK 0x000e0000
+#define A4XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT 17
+static inline uint32_t A4XX_RB_STENCIL_CONTROL_ZFAIL(enum adreno_stencil_op val)
+{
+ return ((val) << A4XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT) & A4XX_RB_STENCIL_CONTROL_ZFAIL__MASK;
+}
+#define A4XX_RB_STENCIL_CONTROL_FUNC_BF__MASK 0x00700000
+#define A4XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT 20
+static inline uint32_t A4XX_RB_STENCIL_CONTROL_FUNC_BF(enum adreno_compare_func val)
+{
+ return ((val) << A4XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT) & A4XX_RB_STENCIL_CONTROL_FUNC_BF__MASK;
+}
+#define A4XX_RB_STENCIL_CONTROL_FAIL_BF__MASK 0x03800000
+#define A4XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT 23
+static inline uint32_t A4XX_RB_STENCIL_CONTROL_FAIL_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A4XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT) & A4XX_RB_STENCIL_CONTROL_FAIL_BF__MASK;
+}
+#define A4XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK 0x1c000000
+#define A4XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT 26
+static inline uint32_t A4XX_RB_STENCIL_CONTROL_ZPASS_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A4XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT) & A4XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK;
+}
+#define A4XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK 0xe0000000
+#define A4XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT 29
+static inline uint32_t A4XX_RB_STENCIL_CONTROL_ZFAIL_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A4XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT) & A4XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK;
+}
+
+#define REG_A4XX_RB_STENCIL_CONTROL2 0x00002107
+#define A4XX_RB_STENCIL_CONTROL2_STENCIL_BUFFER 0x00000001
+
+#define REG_A4XX_RB_STENCILREFMASK 0x0000210b
+#define A4XX_RB_STENCILREFMASK_STENCILREF__MASK 0x000000ff
+#define A4XX_RB_STENCILREFMASK_STENCILREF__SHIFT 0
+static inline uint32_t A4XX_RB_STENCILREFMASK_STENCILREF(uint32_t val)
+{
+ return ((val) << A4XX_RB_STENCILREFMASK_STENCILREF__SHIFT) & A4XX_RB_STENCILREFMASK_STENCILREF__MASK;
+}
+#define A4XX_RB_STENCILREFMASK_STENCILMASK__MASK 0x0000ff00
+#define A4XX_RB_STENCILREFMASK_STENCILMASK__SHIFT 8
+static inline uint32_t A4XX_RB_STENCILREFMASK_STENCILMASK(uint32_t val)
+{
+ return ((val) << A4XX_RB_STENCILREFMASK_STENCILMASK__SHIFT) & A4XX_RB_STENCILREFMASK_STENCILMASK__MASK;
+}
+#define A4XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK 0x00ff0000
+#define A4XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT 16
+static inline uint32_t A4XX_RB_STENCILREFMASK_STENCILWRITEMASK(uint32_t val)
+{
+ return ((val) << A4XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT) & A4XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK;
+}
+
+#define REG_A4XX_RB_STENCILREFMASK_BF 0x0000210c
+#define A4XX_RB_STENCILREFMASK_BF_STENCILREF__MASK 0x000000ff
+#define A4XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT 0
+static inline uint32_t A4XX_RB_STENCILREFMASK_BF_STENCILREF(uint32_t val)
+{
+ return ((val) << A4XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT) & A4XX_RB_STENCILREFMASK_BF_STENCILREF__MASK;
+}
+#define A4XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK 0x0000ff00
+#define A4XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT 8
+static inline uint32_t A4XX_RB_STENCILREFMASK_BF_STENCILMASK(uint32_t val)
+{
+ return ((val) << A4XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT) & A4XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK;
+}
+#define A4XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK 0x00ff0000
+#define A4XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT 16
+static inline uint32_t A4XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK(uint32_t val)
+{
+ return ((val) << A4XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT) & A4XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK;
+}
+
+#define REG_A4XX_RB_BIN_OFFSET 0x0000210d
+#define A4XX_RB_BIN_OFFSET_WINDOW_OFFSET_DISABLE 0x80000000
+#define A4XX_RB_BIN_OFFSET_X__MASK 0x00007fff
+#define A4XX_RB_BIN_OFFSET_X__SHIFT 0
+static inline uint32_t A4XX_RB_BIN_OFFSET_X(uint32_t val)
+{
+ return ((val) << A4XX_RB_BIN_OFFSET_X__SHIFT) & A4XX_RB_BIN_OFFSET_X__MASK;
+}
+#define A4XX_RB_BIN_OFFSET_Y__MASK 0x7fff0000
+#define A4XX_RB_BIN_OFFSET_Y__SHIFT 16
+static inline uint32_t A4XX_RB_BIN_OFFSET_Y(uint32_t val)
+{
+ return ((val) << A4XX_RB_BIN_OFFSET_Y__SHIFT) & A4XX_RB_BIN_OFFSET_Y__MASK;
+}
+
+#define REG_A4XX_RB_VPORT_Z_CLAMP_MAX_15 0x0000213f
+
+#define REG_A4XX_RBBM_HW_VERSION 0x00000000
+
+#define REG_A4XX_RBBM_HW_CONFIGURATION 0x00000002
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_TP(uint32_t i0) { return 0x00000004 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_TP_REG(uint32_t i0) { return 0x00000004 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL2_TP(uint32_t i0) { return 0x00000008 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL2_TP_REG(uint32_t i0) { return 0x00000008 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_HYST_TP(uint32_t i0) { return 0x0000000c + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_HYST_TP_REG(uint32_t i0) { return 0x0000000c + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_TP(uint32_t i0) { return 0x00000010 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_TP_REG(uint32_t i0) { return 0x00000010 + 0x1*i0; }
+
+#define REG_A4XX_RBBM_CLOCK_CTL_UCHE 0x00000014
+
+#define REG_A4XX_RBBM_CLOCK_CTL2_UCHE 0x00000015
+
+#define REG_A4XX_RBBM_CLOCK_CTL3_UCHE 0x00000016
+
+#define REG_A4XX_RBBM_CLOCK_CTL4_UCHE 0x00000017
+
+#define REG_A4XX_RBBM_CLOCK_HYST_UCHE 0x00000018
+
+#define REG_A4XX_RBBM_CLOCK_DELAY_UCHE 0x00000019
+
+#define REG_A4XX_RBBM_CLOCK_MODE_GPC 0x0000001a
+
+#define REG_A4XX_RBBM_CLOCK_DELAY_GPC 0x0000001b
+
+#define REG_A4XX_RBBM_CLOCK_HYST_GPC 0x0000001c
+
+#define REG_A4XX_RBBM_CLOCK_CTL_TSE_RAS_RBBM 0x0000001d
+
+#define REG_A4XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM 0x0000001e
+
+#define REG_A4XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM 0x0000001f
+
+#define REG_A4XX_RBBM_CLOCK_CTL 0x00000020
+
+#define REG_A4XX_RBBM_SP_HYST_CNT 0x00000021
+
+#define REG_A4XX_RBBM_SW_RESET_CMD 0x00000022
+
+#define REG_A4XX_RBBM_AHB_CTL0 0x00000023
+
+#define REG_A4XX_RBBM_AHB_CTL1 0x00000024
+
+#define REG_A4XX_RBBM_AHB_CMD 0x00000025
+
+#define REG_A4XX_RBBM_RB_SUB_BLOCK_SEL_CTL 0x00000026
+
+#define REG_A4XX_RBBM_RAM_ACC_63_32 0x00000028
+
+#define REG_A4XX_RBBM_WAIT_IDLE_CLOCKS_CTL 0x0000002b
+
+#define REG_A4XX_RBBM_INTERFACE_HANG_INT_CTL 0x0000002f
+
+#define REG_A4XX_RBBM_INTERFACE_HANG_MASK_CTL4 0x00000034
+
+#define REG_A4XX_RBBM_INT_CLEAR_CMD 0x00000036
+
+#define REG_A4XX_RBBM_INT_0_MASK 0x00000037
+
+#define REG_A4XX_RBBM_RBBM_CTL 0x0000003e
+
+#define REG_A4XX_RBBM_AHB_DEBUG_CTL 0x0000003f
+
+#define REG_A4XX_RBBM_VBIF_DEBUG_CTL 0x00000041
+
+#define REG_A4XX_RBBM_CLOCK_CTL2 0x00000042
+
+#define REG_A4XX_RBBM_BLOCK_SW_RESET_CMD 0x00000045
+
+#define REG_A4XX_RBBM_RESET_CYCLES 0x00000047
+
+#define REG_A4XX_RBBM_EXT_TRACE_BUS_CTL 0x00000049
+
+#define REG_A4XX_RBBM_CFG_DEBBUS_SEL_A 0x0000004a
+
+#define REG_A4XX_RBBM_CFG_DEBBUS_SEL_B 0x0000004b
+
+#define REG_A4XX_RBBM_CFG_DEBBUS_SEL_C 0x0000004c
+
+#define REG_A4XX_RBBM_CFG_DEBBUS_SEL_D 0x0000004d
+
+#define REG_A4XX_RBBM_PERFCTR_CP_0_LO 0x0000009c
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_SP(uint32_t i0) { return 0x00000068 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_SP_REG(uint32_t i0) { return 0x00000068 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL2_SP(uint32_t i0) { return 0x0000006c + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL2_SP_REG(uint32_t i0) { return 0x0000006c + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_HYST_SP(uint32_t i0) { return 0x00000070 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_HYST_SP_REG(uint32_t i0) { return 0x00000070 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_SP(uint32_t i0) { return 0x00000074 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_SP_REG(uint32_t i0) { return 0x00000074 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_RB(uint32_t i0) { return 0x00000078 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_RB_REG(uint32_t i0) { return 0x00000078 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL2_RB(uint32_t i0) { return 0x0000007c + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL2_RB_REG(uint32_t i0) { return 0x0000007c + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_MARB_CCU(uint32_t i0) { return 0x00000082 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_MARB_CCU_REG(uint32_t i0) { return 0x00000082 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_HYST_RB_MARB_CCU(uint32_t i0) { return 0x00000086 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_HYST_RB_MARB_CCU_REG(uint32_t i0) { return 0x00000086 + 0x1*i0; }
+
+#define REG_A4XX_RBBM_CLOCK_HYST_COM_DCOM 0x00000080
+
+#define REG_A4XX_RBBM_CLOCK_CTL_COM_DCOM 0x00000081
+
+#define REG_A4XX_RBBM_CLOCK_CTL_HLSQ 0x0000008a
+
+#define REG_A4XX_RBBM_CLOCK_HYST_HLSQ 0x0000008b
+
+#define REG_A4XX_RBBM_CLOCK_DELAY_HLSQ 0x0000008c
+
+#define REG_A4XX_RBBM_CLOCK_DELAY_COM_DCOM 0x0000008d
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1(uint32_t i0) { return 0x0000008e + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1_REG(uint32_t i0) { return 0x0000008e + 0x1*i0; }
+
+#define REG_A4XX_RBBM_PERFCTR_PWR_1_LO 0x00000168
+
+#define REG_A4XX_RBBM_PERFCTR_CTL 0x00000170
+
+#define REG_A4XX_RBBM_PERFCTR_LOAD_CMD0 0x00000171
+
+#define REG_A4XX_RBBM_PERFCTR_LOAD_CMD1 0x00000172
+
+#define REG_A4XX_RBBM_PERFCTR_LOAD_CMD2 0x00000173
+
+#define REG_A4XX_RBBM_PERFCTR_LOAD_VALUE_LO 0x00000174
+
+#define REG_A4XX_RBBM_PERFCTR_LOAD_VALUE_HI 0x00000175
+
+#define REG_A4XX_RBBM_GPU_BUSY_MASKED 0x0000017a
+
+#define REG_A4XX_RBBM_INT_0_STATUS 0x0000017d
+
+#define REG_A4XX_RBBM_CLOCK_STATUS 0x00000182
+
+#define REG_A4XX_RBBM_AHB_STATUS 0x00000189
+
+#define REG_A4XX_RBBM_AHB_ME_SPLIT_STATUS 0x0000018c
+
+#define REG_A4XX_RBBM_AHB_PFP_SPLIT_STATUS 0x0000018d
+
+#define REG_A4XX_RBBM_AHB_ERROR_STATUS 0x0000018f
+
+#define REG_A4XX_RBBM_STATUS 0x00000191
+#define A4XX_RBBM_STATUS_HI_BUSY 0x00000001
+#define A4XX_RBBM_STATUS_CP_ME_BUSY 0x00000002
+#define A4XX_RBBM_STATUS_CP_PFP_BUSY 0x00000004
+#define A4XX_RBBM_STATUS_CP_NRT_BUSY 0x00004000
+#define A4XX_RBBM_STATUS_VBIF_BUSY 0x00008000
+#define A4XX_RBBM_STATUS_TSE_BUSY 0x00010000
+#define A4XX_RBBM_STATUS_RAS_BUSY 0x00020000
+#define A4XX_RBBM_STATUS_RB_BUSY 0x00040000
+#define A4XX_RBBM_STATUS_PC_DCALL_BUSY 0x00080000
+#define A4XX_RBBM_STATUS_PC_VSD_BUSY 0x00100000
+#define A4XX_RBBM_STATUS_VFD_BUSY 0x00200000
+#define A4XX_RBBM_STATUS_VPC_BUSY 0x00400000
+#define A4XX_RBBM_STATUS_UCHE_BUSY 0x00800000
+#define A4XX_RBBM_STATUS_SP_BUSY 0x01000000
+#define A4XX_RBBM_STATUS_TPL1_BUSY 0x02000000
+#define A4XX_RBBM_STATUS_MARB_BUSY 0x04000000
+#define A4XX_RBBM_STATUS_VSC_BUSY 0x08000000
+#define A4XX_RBBM_STATUS_ARB_BUSY 0x10000000
+#define A4XX_RBBM_STATUS_HLSQ_BUSY 0x20000000
+#define A4XX_RBBM_STATUS_GPU_BUSY_NOHC 0x40000000
+#define A4XX_RBBM_STATUS_GPU_BUSY 0x80000000
+
+#define REG_A4XX_RBBM_INTERFACE_RRDY_STATUS5 0x0000019f
+
+#define REG_A4XX_CP_SCRATCH_UMASK 0x00000228
+
+#define REG_A4XX_CP_SCRATCH_ADDR 0x00000229
+
+#define REG_A4XX_CP_RB_BASE 0x00000200
+
+#define REG_A4XX_CP_RB_CNTL 0x00000201
+
+#define REG_A4XX_CP_RB_WPTR 0x00000205
+
+#define REG_A4XX_CP_RB_RPTR_ADDR 0x00000203
+
+#define REG_A4XX_CP_RB_RPTR 0x00000204
+
+#define REG_A4XX_CP_IB1_BASE 0x00000206
+
+#define REG_A4XX_CP_IB1_BUFSZ 0x00000207
+
+#define REG_A4XX_CP_IB2_BASE 0x00000208
+
+#define REG_A4XX_CP_IB2_BUFSZ 0x00000209
+
+#define REG_A4XX_CP_ME_RB_DONE_DATA 0x00000217
+
+#define REG_A4XX_CP_QUEUE_THRESH2 0x00000219
+
+#define REG_A4XX_CP_MERCIU_SIZE 0x0000021b
+
+#define REG_A4XX_CP_ROQ_ADDR 0x0000021c
+
+#define REG_A4XX_CP_ROQ_DATA 0x0000021d
+
+#define REG_A4XX_CP_MEQ_ADDR 0x0000021e
+
+#define REG_A4XX_CP_MEQ_DATA 0x0000021f
+
+#define REG_A4XX_CP_MERCIU_ADDR 0x00000220
+
+#define REG_A4XX_CP_MERCIU_DATA 0x00000221
+
+#define REG_A4XX_CP_MERCIU_DATA2 0x00000222
+
+#define REG_A4XX_CP_PFP_UCODE_ADDR 0x00000223
+
+#define REG_A4XX_CP_PFP_UCODE_DATA 0x00000224
+
+#define REG_A4XX_CP_ME_RAM_WADDR 0x00000225
+
+#define REG_A4XX_CP_ME_RAM_RADDR 0x00000226
+
+#define REG_A4XX_CP_ME_RAM_DATA 0x00000227
+
+#define REG_A4XX_CP_PREEMPT 0x0000022a
+
+#define REG_A4XX_CP_CNTL 0x0000022c
+
+#define REG_A4XX_CP_ME_CNTL 0x0000022d
+
+#define REG_A4XX_CP_DEBUG 0x0000022e
+
+#define REG_A4XX_CP_DEBUG_ECO_CONTROL 0x00000231
+
+#define REG_A4XX_CP_DRAW_STATE_ADDR 0x00000232
+
+#define REG_A4XX_CP_PROTECT_REG_0 0x00000240
+
+static inline uint32_t REG_A4XX_CP_PROTECT(uint32_t i0) { return 0x00000240 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000240 + 0x1*i0; }
+
+#define REG_A4XX_CP_PROTECT_CTRL 0x00000250
+
+#define REG_A4XX_CP_ST_BASE 0x000004c0
+
+#define REG_A4XX_CP_STQ_AVAIL 0x000004ce
+
+#define REG_A4XX_CP_MERCIU_STAT 0x000004d0
+
+#define REG_A4XX_CP_WFI_PEND_CTR 0x000004d2
+
+#define REG_A4XX_CP_HW_FAULT 0x000004d8
+
+#define REG_A4XX_CP_PROTECT_STATUS 0x000004da
+
+#define REG_A4XX_CP_EVENTS_IN_FLIGHT 0x000004dd
+
+#define REG_A4XX_CP_PERFCTR_CP_SEL_0 0x00000500
+
+#define REG_A4XX_CP_PERFCOMBINER_SELECT 0x0000050b
+
+static inline uint32_t REG_A4XX_CP_SCRATCH(uint32_t i0) { return 0x00000578 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_CP_SCRATCH_REG(uint32_t i0) { return 0x00000578 + 0x1*i0; }
+
+#define REG_A4XX_SP_VS_STATUS 0x00000ec0
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_11 0x00000ecf
+
+#define REG_A4XX_SP_SP_CTRL_REG 0x000022c0
+#define A4XX_SP_SP_CTRL_REG_BINNING_PASS 0x00080000
+
+#define REG_A4XX_SP_INSTR_CACHE_CTRL 0x000022c1
+
+#define REG_A4XX_SP_VS_CTRL_REG0 0x000022c4
+#define A4XX_SP_VS_CTRL_REG0_THREADMODE__MASK 0x00000001
+#define A4XX_SP_VS_CTRL_REG0_THREADMODE__SHIFT 0
+static inline uint32_t A4XX_SP_VS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val)
+{
+ return ((val) << A4XX_SP_VS_CTRL_REG0_THREADMODE__SHIFT) & A4XX_SP_VS_CTRL_REG0_THREADMODE__MASK;
+}
+#define A4XX_SP_VS_CTRL_REG0_VARYING 0x00000002
+#define A4XX_SP_VS_CTRL_REG0_CACHEINVALID 0x00000004
+#define A4XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
+#define A4XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
+static inline uint32_t A4XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A4XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A4XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0003fc00
+#define A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
+static inline uint32_t A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A4XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__MASK 0x000c0000
+#define A4XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__SHIFT 18
+static inline uint32_t A4XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP(uint32_t val)
+{
+ return ((val) << A4XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__SHIFT) & A4XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__MASK;
+}
+#define A4XX_SP_VS_CTRL_REG0_THREADSIZE__MASK 0x00100000
+#define A4XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT 20
+static inline uint32_t A4XX_SP_VS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A4XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT) & A4XX_SP_VS_CTRL_REG0_THREADSIZE__MASK;
+}
+#define A4XX_SP_VS_CTRL_REG0_SUPERTHREADMODE 0x00200000
+#define A4XX_SP_VS_CTRL_REG0_PIXLODENABLE 0x00400000
+
+#define REG_A4XX_SP_VS_CTRL_REG1 0x000022c5
+#define A4XX_SP_VS_CTRL_REG1_CONSTLENGTH__MASK 0x000000ff
+#define A4XX_SP_VS_CTRL_REG1_CONSTLENGTH__SHIFT 0
+static inline uint32_t A4XX_SP_VS_CTRL_REG1_CONSTLENGTH(uint32_t val)
+{
+ return ((val) << A4XX_SP_VS_CTRL_REG1_CONSTLENGTH__SHIFT) & A4XX_SP_VS_CTRL_REG1_CONSTLENGTH__MASK;
+}
+#define A4XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__MASK 0x7f000000
+#define A4XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__SHIFT 24
+static inline uint32_t A4XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING(uint32_t val)
+{
+ return ((val) << A4XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__SHIFT) & A4XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__MASK;
+}
+
+#define REG_A4XX_SP_VS_PARAM_REG 0x000022c6
+#define A4XX_SP_VS_PARAM_REG_POSREGID__MASK 0x000000ff
+#define A4XX_SP_VS_PARAM_REG_POSREGID__SHIFT 0
+static inline uint32_t A4XX_SP_VS_PARAM_REG_POSREGID(uint32_t val)
+{
+ return ((val) << A4XX_SP_VS_PARAM_REG_POSREGID__SHIFT) & A4XX_SP_VS_PARAM_REG_POSREGID__MASK;
+}
+#define A4XX_SP_VS_PARAM_REG_PSIZEREGID__MASK 0x0000ff00
+#define A4XX_SP_VS_PARAM_REG_PSIZEREGID__SHIFT 8
+static inline uint32_t A4XX_SP_VS_PARAM_REG_PSIZEREGID(uint32_t val)
+{
+ return ((val) << A4XX_SP_VS_PARAM_REG_PSIZEREGID__SHIFT) & A4XX_SP_VS_PARAM_REG_PSIZEREGID__MASK;
+}
+#define A4XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__MASK 0xfff00000
+#define A4XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__SHIFT 20
+static inline uint32_t A4XX_SP_VS_PARAM_REG_TOTALVSOUTVAR(uint32_t val)
+{
+ return ((val) << A4XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__SHIFT) & A4XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__MASK;
+}
+
+static inline uint32_t REG_A4XX_SP_VS_OUT(uint32_t i0) { return 0x000022c7 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_SP_VS_OUT_REG(uint32_t i0) { return 0x000022c7 + 0x1*i0; }
+#define A4XX_SP_VS_OUT_REG_A_REGID__MASK 0x000001ff
+#define A4XX_SP_VS_OUT_REG_A_REGID__SHIFT 0
+static inline uint32_t A4XX_SP_VS_OUT_REG_A_REGID(uint32_t val)
+{
+ return ((val) << A4XX_SP_VS_OUT_REG_A_REGID__SHIFT) & A4XX_SP_VS_OUT_REG_A_REGID__MASK;
+}
+#define A4XX_SP_VS_OUT_REG_A_COMPMASK__MASK 0x00001e00
+#define A4XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT 9
+static inline uint32_t A4XX_SP_VS_OUT_REG_A_COMPMASK(uint32_t val)
+{
+ return ((val) << A4XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT) & A4XX_SP_VS_OUT_REG_A_COMPMASK__MASK;
+}
+#define A4XX_SP_VS_OUT_REG_B_REGID__MASK 0x01ff0000
+#define A4XX_SP_VS_OUT_REG_B_REGID__SHIFT 16
+static inline uint32_t A4XX_SP_VS_OUT_REG_B_REGID(uint32_t val)
+{
+ return ((val) << A4XX_SP_VS_OUT_REG_B_REGID__SHIFT) & A4XX_SP_VS_OUT_REG_B_REGID__MASK;
+}
+#define A4XX_SP_VS_OUT_REG_B_COMPMASK__MASK 0x1e000000
+#define A4XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT 25
+static inline uint32_t A4XX_SP_VS_OUT_REG_B_COMPMASK(uint32_t val)
+{
+ return ((val) << A4XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT) & A4XX_SP_VS_OUT_REG_B_COMPMASK__MASK;
+}
+
+static inline uint32_t REG_A4XX_SP_VS_VPC_DST(uint32_t i0) { return 0x000022d8 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_SP_VS_VPC_DST_REG(uint32_t i0) { return 0x000022d8 + 0x1*i0; }
+#define A4XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK 0x000000ff
+#define A4XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT 0
+static inline uint32_t A4XX_SP_VS_VPC_DST_REG_OUTLOC0(uint32_t val)
+{
+ return ((val) << A4XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT) & A4XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK;
+}
+#define A4XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK 0x0000ff00
+#define A4XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT 8
+static inline uint32_t A4XX_SP_VS_VPC_DST_REG_OUTLOC1(uint32_t val)
+{
+ return ((val) << A4XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT) & A4XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK;
+}
+#define A4XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK 0x00ff0000
+#define A4XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT 16
+static inline uint32_t A4XX_SP_VS_VPC_DST_REG_OUTLOC2(uint32_t val)
+{
+ return ((val) << A4XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT) & A4XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK;
+}
+#define A4XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK 0xff000000
+#define A4XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT 24
+static inline uint32_t A4XX_SP_VS_VPC_DST_REG_OUTLOC3(uint32_t val)
+{
+ return ((val) << A4XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT) & A4XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK;
+}
+
+#define REG_A4XX_SP_VS_OBJ_OFFSET_REG 0x000022e0
+#define A4XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000
+#define A4XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16
+static inline uint32_t A4XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A4XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK 0xfe000000
+#define A4XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT 25
+static inline uint32_t A4XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT) & A4XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A4XX_SP_VS_OBJ_START 0x000022e1
+
+#define REG_A4XX_SP_VS_PVT_MEM_PARAM 0x000022e2
+
+#define REG_A4XX_SP_VS_PVT_MEM_ADDR 0x000022e3
+
+#define REG_A4XX_SP_VS_LENGTH_REG 0x000022e5
+
+#define REG_A4XX_SP_FS_CTRL_REG0 0x000022e8
+#define A4XX_SP_FS_CTRL_REG0_THREADMODE__MASK 0x00000001
+#define A4XX_SP_FS_CTRL_REG0_THREADMODE__SHIFT 0
+static inline uint32_t A4XX_SP_FS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val)
+{
+ return ((val) << A4XX_SP_FS_CTRL_REG0_THREADMODE__SHIFT) & A4XX_SP_FS_CTRL_REG0_THREADMODE__MASK;
+}
+#define A4XX_SP_FS_CTRL_REG0_VARYING 0x00000002
+#define A4XX_SP_FS_CTRL_REG0_CACHEINVALID 0x00000004
+#define A4XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
+#define A4XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
+static inline uint32_t A4XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A4XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A4XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0003fc00
+#define A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
+static inline uint32_t A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A4XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__MASK 0x000c0000
+#define A4XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__SHIFT 18
+static inline uint32_t A4XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP(uint32_t val)
+{
+ return ((val) << A4XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__SHIFT) & A4XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__MASK;
+}
+#define A4XX_SP_FS_CTRL_REG0_THREADSIZE__MASK 0x00100000
+#define A4XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT 20
+static inline uint32_t A4XX_SP_FS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A4XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT) & A4XX_SP_FS_CTRL_REG0_THREADSIZE__MASK;
+}
+#define A4XX_SP_FS_CTRL_REG0_SUPERTHREADMODE 0x00200000
+#define A4XX_SP_FS_CTRL_REG0_PIXLODENABLE 0x00400000
+
+#define REG_A4XX_SP_FS_CTRL_REG1 0x000022e9
+#define A4XX_SP_FS_CTRL_REG1_CONSTLENGTH__MASK 0x000000ff
+#define A4XX_SP_FS_CTRL_REG1_CONSTLENGTH__SHIFT 0
+static inline uint32_t A4XX_SP_FS_CTRL_REG1_CONSTLENGTH(uint32_t val)
+{
+ return ((val) << A4XX_SP_FS_CTRL_REG1_CONSTLENGTH__SHIFT) & A4XX_SP_FS_CTRL_REG1_CONSTLENGTH__MASK;
+}
+#define A4XX_SP_FS_CTRL_REG1_VARYING 0x00100000
+
+#define REG_A4XX_SP_FS_OBJ_OFFSET_REG 0x000022ea
+#define A4XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000
+#define A4XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16
+static inline uint32_t A4XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A4XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK 0xfe000000
+#define A4XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT 25
+static inline uint32_t A4XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT) & A4XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A4XX_SP_FS_OBJ_START 0x000022eb
+
+#define REG_A4XX_SP_FS_PVT_MEM_PARAM 0x000022ec
+
+#define REG_A4XX_SP_FS_PVT_MEM_ADDR 0x000022ed
+
+#define REG_A4XX_SP_FS_LENGTH_REG 0x000022ef
+
+#define REG_A4XX_SP_FS_OUTPUT_REG 0x000022f0
+#define A4XX_SP_FS_OUTPUT_REG_DEPTH_ENABLE 0x00000080
+#define A4XX_SP_FS_OUTPUT_REG_DEPTH_REGID__MASK 0x0000ff00
+#define A4XX_SP_FS_OUTPUT_REG_DEPTH_REGID__SHIFT 8
+static inline uint32_t A4XX_SP_FS_OUTPUT_REG_DEPTH_REGID(uint32_t val)
+{
+ return ((val) << A4XX_SP_FS_OUTPUT_REG_DEPTH_REGID__SHIFT) & A4XX_SP_FS_OUTPUT_REG_DEPTH_REGID__MASK;
+}
+
+static inline uint32_t REG_A4XX_SP_FS_MRT(uint32_t i0) { return 0x000022f1 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_SP_FS_MRT_REG(uint32_t i0) { return 0x000022f1 + 0x1*i0; }
+#define A4XX_SP_FS_MRT_REG_REGID__MASK 0x000000ff
+#define A4XX_SP_FS_MRT_REG_REGID__SHIFT 0
+static inline uint32_t A4XX_SP_FS_MRT_REG_REGID(uint32_t val)
+{
+ return ((val) << A4XX_SP_FS_MRT_REG_REGID__SHIFT) & A4XX_SP_FS_MRT_REG_REGID__MASK;
+}
+#define A4XX_SP_FS_MRT_REG_HALF_PRECISION 0x00000100
+#define A4XX_SP_FS_MRT_REG_MRTFORMAT__MASK 0x0003f000
+#define A4XX_SP_FS_MRT_REG_MRTFORMAT__SHIFT 12
+static inline uint32_t A4XX_SP_FS_MRT_REG_MRTFORMAT(enum a4xx_color_fmt val)
+{
+ return ((val) << A4XX_SP_FS_MRT_REG_MRTFORMAT__SHIFT) & A4XX_SP_FS_MRT_REG_MRTFORMAT__MASK;
+}
+
+#define REG_A4XX_SP_HS_OBJ_OFFSET_REG 0x0000230d
+#define A4XX_SP_HS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000
+#define A4XX_SP_HS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16
+static inline uint32_t A4XX_SP_HS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_SP_HS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_SP_HS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A4XX_SP_HS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK 0xfe000000
+#define A4XX_SP_HS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT 25
+static inline uint32_t A4XX_SP_HS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_SP_HS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT) & A4XX_SP_HS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A4XX_SP_DS_OBJ_OFFSET_REG 0x00002334
+#define A4XX_SP_DS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000
+#define A4XX_SP_DS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16
+static inline uint32_t A4XX_SP_DS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_SP_DS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_SP_DS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A4XX_SP_DS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK 0xfe000000
+#define A4XX_SP_DS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT 25
+static inline uint32_t A4XX_SP_DS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_SP_DS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT) & A4XX_SP_DS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A4XX_SP_GS_OBJ_OFFSET_REG 0x0000235b
+#define A4XX_SP_GS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000
+#define A4XX_SP_GS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16
+static inline uint32_t A4XX_SP_GS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_SP_GS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_SP_GS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A4XX_SP_GS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK 0xfe000000
+#define A4XX_SP_GS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT 25
+static inline uint32_t A4XX_SP_GS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_SP_GS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT) & A4XX_SP_GS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A4XX_SP_GS_LENGTH_REG 0x00002360
+
+#define REG_A4XX_VPC_DEBUG_RAM_SEL 0x00000e60
+
+#define REG_A4XX_VPC_DEBUG_RAM_READ 0x00000e61
+
+#define REG_A4XX_VPC_DEBUG_ECO_CONTROL 0x00000e64
+
+#define REG_A4XX_VPC_PERFCTR_VPC_SEL_3 0x00000e68
+
+#define REG_A4XX_VPC_ATTR 0x00002140
+#define A4XX_VPC_ATTR_TOTALATTR__MASK 0x000001ff
+#define A4XX_VPC_ATTR_TOTALATTR__SHIFT 0
+static inline uint32_t A4XX_VPC_ATTR_TOTALATTR(uint32_t val)
+{
+ return ((val) << A4XX_VPC_ATTR_TOTALATTR__SHIFT) & A4XX_VPC_ATTR_TOTALATTR__MASK;
+}
+#define A4XX_VPC_ATTR_PSIZE 0x00000200
+#define A4XX_VPC_ATTR_THRDASSIGN__MASK 0x00003000
+#define A4XX_VPC_ATTR_THRDASSIGN__SHIFT 12
+static inline uint32_t A4XX_VPC_ATTR_THRDASSIGN(uint32_t val)
+{
+ return ((val) << A4XX_VPC_ATTR_THRDASSIGN__SHIFT) & A4XX_VPC_ATTR_THRDASSIGN__MASK;
+}
+#define A4XX_VPC_ATTR_ENABLE 0x02000000
+
+#define REG_A4XX_VPC_PACK 0x00002141
+#define A4XX_VPC_PACK_NUMBYPASSVAR__MASK 0x000000ff
+#define A4XX_VPC_PACK_NUMBYPASSVAR__SHIFT 0
+static inline uint32_t A4XX_VPC_PACK_NUMBYPASSVAR(uint32_t val)
+{
+ return ((val) << A4XX_VPC_PACK_NUMBYPASSVAR__SHIFT) & A4XX_VPC_PACK_NUMBYPASSVAR__MASK;
+}
+#define A4XX_VPC_PACK_NUMFPNONPOSVAR__MASK 0x0000ff00
+#define A4XX_VPC_PACK_NUMFPNONPOSVAR__SHIFT 8
+static inline uint32_t A4XX_VPC_PACK_NUMFPNONPOSVAR(uint32_t val)
+{
+ return ((val) << A4XX_VPC_PACK_NUMFPNONPOSVAR__SHIFT) & A4XX_VPC_PACK_NUMFPNONPOSVAR__MASK;
+}
+#define A4XX_VPC_PACK_NUMNONPOSVSVAR__MASK 0x00ff0000
+#define A4XX_VPC_PACK_NUMNONPOSVSVAR__SHIFT 16
+static inline uint32_t A4XX_VPC_PACK_NUMNONPOSVSVAR(uint32_t val)
+{
+ return ((val) << A4XX_VPC_PACK_NUMNONPOSVSVAR__SHIFT) & A4XX_VPC_PACK_NUMNONPOSVSVAR__MASK;
+}
+
+static inline uint32_t REG_A4XX_VPC_VARYING_INTERP(uint32_t i0) { return 0x00002142 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_VPC_VARYING_INTERP_MODE(uint32_t i0) { return 0x00002142 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_VPC_VARYING_PS_REPL(uint32_t i0) { return 0x0000214a + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_VPC_VARYING_PS_REPL_MODE(uint32_t i0) { return 0x0000214a + 0x1*i0; }
+
+#define REG_A4XX_VPC_SO_FLUSH_WADDR_3 0x0000216e
+
+#define REG_A4XX_VSC_BIN_SIZE 0x00000c00
+#define A4XX_VSC_BIN_SIZE_WIDTH__MASK 0x0000001f
+#define A4XX_VSC_BIN_SIZE_WIDTH__SHIFT 0
+static inline uint32_t A4XX_VSC_BIN_SIZE_WIDTH(uint32_t val)
+{
+ return ((val >> 5) << A4XX_VSC_BIN_SIZE_WIDTH__SHIFT) & A4XX_VSC_BIN_SIZE_WIDTH__MASK;
+}
+#define A4XX_VSC_BIN_SIZE_HEIGHT__MASK 0x000003e0
+#define A4XX_VSC_BIN_SIZE_HEIGHT__SHIFT 5
+static inline uint32_t A4XX_VSC_BIN_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val >> 5) << A4XX_VSC_BIN_SIZE_HEIGHT__SHIFT) & A4XX_VSC_BIN_SIZE_HEIGHT__MASK;
+}
+
+#define REG_A4XX_VSC_SIZE_ADDRESS 0x00000c01
+
+#define REG_A4XX_VSC_SIZE_ADDRESS2 0x00000c02
+
+#define REG_A4XX_VSC_DEBUG_ECO_CONTROL 0x00000c03
+
+static inline uint32_t REG_A4XX_VSC_PIPE_CONFIG(uint32_t i0) { return 0x00000c08 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_VSC_PIPE_CONFIG_REG(uint32_t i0) { return 0x00000c08 + 0x1*i0; }
+#define A4XX_VSC_PIPE_CONFIG_REG_X__MASK 0x000003ff
+#define A4XX_VSC_PIPE_CONFIG_REG_X__SHIFT 0
+static inline uint32_t A4XX_VSC_PIPE_CONFIG_REG_X(uint32_t val)
+{
+ return ((val) << A4XX_VSC_PIPE_CONFIG_REG_X__SHIFT) & A4XX_VSC_PIPE_CONFIG_REG_X__MASK;
+}
+#define A4XX_VSC_PIPE_CONFIG_REG_Y__MASK 0x000ffc00
+#define A4XX_VSC_PIPE_CONFIG_REG_Y__SHIFT 10
+static inline uint32_t A4XX_VSC_PIPE_CONFIG_REG_Y(uint32_t val)
+{
+ return ((val) << A4XX_VSC_PIPE_CONFIG_REG_Y__SHIFT) & A4XX_VSC_PIPE_CONFIG_REG_Y__MASK;
+}
+#define A4XX_VSC_PIPE_CONFIG_REG_W__MASK 0x00f00000
+#define A4XX_VSC_PIPE_CONFIG_REG_W__SHIFT 20
+static inline uint32_t A4XX_VSC_PIPE_CONFIG_REG_W(uint32_t val)
+{
+ return ((val) << A4XX_VSC_PIPE_CONFIG_REG_W__SHIFT) & A4XX_VSC_PIPE_CONFIG_REG_W__MASK;
+}
+#define A4XX_VSC_PIPE_CONFIG_REG_H__MASK 0x0f000000
+#define A4XX_VSC_PIPE_CONFIG_REG_H__SHIFT 24
+static inline uint32_t A4XX_VSC_PIPE_CONFIG_REG_H(uint32_t val)
+{
+ return ((val) << A4XX_VSC_PIPE_CONFIG_REG_H__SHIFT) & A4XX_VSC_PIPE_CONFIG_REG_H__MASK;
+}
+
+static inline uint32_t REG_A4XX_VSC_PIPE_DATA_ADDRESS(uint32_t i0) { return 0x00000c10 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_VSC_PIPE_DATA_ADDRESS_REG(uint32_t i0) { return 0x00000c10 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_VSC_PIPE_DATA_LENGTH(uint32_t i0) { return 0x00000c18 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_VSC_PIPE_DATA_LENGTH_REG(uint32_t i0) { return 0x00000c18 + 0x1*i0; }
+
+#define REG_A4XX_VSC_PIPE_PARTIAL_POSN_1 0x00000c41
+
+#define REG_A4XX_VSC_PERFCTR_VSC_SEL_0 0x00000c50
+
+#define REG_A4XX_VSC_PERFCTR_VSC_SEL_1 0x00000c51
+
+#define REG_A4XX_VFD_DEBUG_CONTROL 0x00000e40
+
+#define REG_A4XX_VFD_PERFCTR_VFD_SEL_7 0x00000e4a
+
+#define REG_A4XX_VFD_CONTROL_0 0x00002200
+#define A4XX_VFD_CONTROL_0_TOTALATTRTOVS__MASK 0x000000ff
+#define A4XX_VFD_CONTROL_0_TOTALATTRTOVS__SHIFT 0
+static inline uint32_t A4XX_VFD_CONTROL_0_TOTALATTRTOVS(uint32_t val)
+{
+ return ((val) << A4XX_VFD_CONTROL_0_TOTALATTRTOVS__SHIFT) & A4XX_VFD_CONTROL_0_TOTALATTRTOVS__MASK;
+}
+#define A4XX_VFD_CONTROL_0_BYPASSATTROVS__MASK 0x0001fe00
+#define A4XX_VFD_CONTROL_0_BYPASSATTROVS__SHIFT 9
+static inline uint32_t A4XX_VFD_CONTROL_0_BYPASSATTROVS(uint32_t val)
+{
+ return ((val) << A4XX_VFD_CONTROL_0_BYPASSATTROVS__SHIFT) & A4XX_VFD_CONTROL_0_BYPASSATTROVS__MASK;
+}
+#define A4XX_VFD_CONTROL_0_STRMDECINSTRCNT__MASK 0x03f00000
+#define A4XX_VFD_CONTROL_0_STRMDECINSTRCNT__SHIFT 20
+static inline uint32_t A4XX_VFD_CONTROL_0_STRMDECINSTRCNT(uint32_t val)
+{
+ return ((val) << A4XX_VFD_CONTROL_0_STRMDECINSTRCNT__SHIFT) & A4XX_VFD_CONTROL_0_STRMDECINSTRCNT__MASK;
+}
+#define A4XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__MASK 0xfc000000
+#define A4XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__SHIFT 26
+static inline uint32_t A4XX_VFD_CONTROL_0_STRMFETCHINSTRCNT(uint32_t val)
+{
+ return ((val) << A4XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__SHIFT) & A4XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__MASK;
+}
+
+#define REG_A4XX_VFD_CONTROL_1 0x00002201
+#define A4XX_VFD_CONTROL_1_MAXSTORAGE__MASK 0x0000ffff
+#define A4XX_VFD_CONTROL_1_MAXSTORAGE__SHIFT 0
+static inline uint32_t A4XX_VFD_CONTROL_1_MAXSTORAGE(uint32_t val)
+{
+ return ((val) << A4XX_VFD_CONTROL_1_MAXSTORAGE__SHIFT) & A4XX_VFD_CONTROL_1_MAXSTORAGE__MASK;
+}
+#define A4XX_VFD_CONTROL_1_REGID4VTX__MASK 0x00ff0000
+#define A4XX_VFD_CONTROL_1_REGID4VTX__SHIFT 16
+static inline uint32_t A4XX_VFD_CONTROL_1_REGID4VTX(uint32_t val)
+{
+ return ((val) << A4XX_VFD_CONTROL_1_REGID4VTX__SHIFT) & A4XX_VFD_CONTROL_1_REGID4VTX__MASK;
+}
+#define A4XX_VFD_CONTROL_1_REGID4INST__MASK 0xff000000
+#define A4XX_VFD_CONTROL_1_REGID4INST__SHIFT 24
+static inline uint32_t A4XX_VFD_CONTROL_1_REGID4INST(uint32_t val)
+{
+ return ((val) << A4XX_VFD_CONTROL_1_REGID4INST__SHIFT) & A4XX_VFD_CONTROL_1_REGID4INST__MASK;
+}
+
+#define REG_A4XX_VFD_CONTROL_2 0x00002202
+
+#define REG_A4XX_VFD_CONTROL_3 0x00002203
+
+#define REG_A4XX_VFD_CONTROL_4 0x00002204
+
+#define REG_A4XX_VFD_INDEX_OFFSET 0x00002208
+
+static inline uint32_t REG_A4XX_VFD_FETCH(uint32_t i0) { return 0x0000220a + 0x4*i0; }
+
+static inline uint32_t REG_A4XX_VFD_FETCH_INSTR_0(uint32_t i0) { return 0x0000220a + 0x4*i0; }
+#define A4XX_VFD_FETCH_INSTR_0_FETCHSIZE__MASK 0x0000007f
+#define A4XX_VFD_FETCH_INSTR_0_FETCHSIZE__SHIFT 0
+static inline uint32_t A4XX_VFD_FETCH_INSTR_0_FETCHSIZE(uint32_t val)
+{
+ return ((val) << A4XX_VFD_FETCH_INSTR_0_FETCHSIZE__SHIFT) & A4XX_VFD_FETCH_INSTR_0_FETCHSIZE__MASK;
+}
+#define A4XX_VFD_FETCH_INSTR_0_BUFSTRIDE__MASK 0x0001ff80
+#define A4XX_VFD_FETCH_INSTR_0_BUFSTRIDE__SHIFT 7
+static inline uint32_t A4XX_VFD_FETCH_INSTR_0_BUFSTRIDE(uint32_t val)
+{
+ return ((val) << A4XX_VFD_FETCH_INSTR_0_BUFSTRIDE__SHIFT) & A4XX_VFD_FETCH_INSTR_0_BUFSTRIDE__MASK;
+}
+#define A4XX_VFD_FETCH_INSTR_0_SWITCHNEXT 0x00080000
+#define A4XX_VFD_FETCH_INSTR_0_STEPRATE__MASK 0xff000000
+#define A4XX_VFD_FETCH_INSTR_0_STEPRATE__SHIFT 24
+static inline uint32_t A4XX_VFD_FETCH_INSTR_0_STEPRATE(uint32_t val)
+{
+ return ((val) << A4XX_VFD_FETCH_INSTR_0_STEPRATE__SHIFT) & A4XX_VFD_FETCH_INSTR_0_STEPRATE__MASK;
+}
+
+static inline uint32_t REG_A4XX_VFD_FETCH_INSTR_1(uint32_t i0) { return 0x0000220b + 0x4*i0; }
+
+static inline uint32_t REG_A4XX_VFD_FETCH_INSTR_2(uint32_t i0) { return 0x0000220c + 0x4*i0; }
+#define A4XX_VFD_FETCH_INSTR_2_SIZE__MASK 0xfffffff0
+#define A4XX_VFD_FETCH_INSTR_2_SIZE__SHIFT 4
+static inline uint32_t A4XX_VFD_FETCH_INSTR_2_SIZE(uint32_t val)
+{
+ return ((val >> 4) << A4XX_VFD_FETCH_INSTR_2_SIZE__SHIFT) & A4XX_VFD_FETCH_INSTR_2_SIZE__MASK;
+}
+
+static inline uint32_t REG_A4XX_VFD_FETCH_INSTR_3(uint32_t i0) { return 0x0000220d + 0x4*i0; }
+
+static inline uint32_t REG_A4XX_VFD_DECODE(uint32_t i0) { return 0x0000228a + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_VFD_DECODE_INSTR(uint32_t i0) { return 0x0000228a + 0x1*i0; }
+#define A4XX_VFD_DECODE_INSTR_WRITEMASK__MASK 0x0000000f
+#define A4XX_VFD_DECODE_INSTR_WRITEMASK__SHIFT 0
+static inline uint32_t A4XX_VFD_DECODE_INSTR_WRITEMASK(uint32_t val)
+{
+ return ((val) << A4XX_VFD_DECODE_INSTR_WRITEMASK__SHIFT) & A4XX_VFD_DECODE_INSTR_WRITEMASK__MASK;
+}
+#define A4XX_VFD_DECODE_INSTR_CONSTFILL 0x00000010
+#define A4XX_VFD_DECODE_INSTR_FORMAT__MASK 0x00000fc0
+#define A4XX_VFD_DECODE_INSTR_FORMAT__SHIFT 6
+static inline uint32_t A4XX_VFD_DECODE_INSTR_FORMAT(enum a4xx_vtx_fmt val)
+{
+ return ((val) << A4XX_VFD_DECODE_INSTR_FORMAT__SHIFT) & A4XX_VFD_DECODE_INSTR_FORMAT__MASK;
+}
+#define A4XX_VFD_DECODE_INSTR_REGID__MASK 0x000ff000
+#define A4XX_VFD_DECODE_INSTR_REGID__SHIFT 12
+static inline uint32_t A4XX_VFD_DECODE_INSTR_REGID(uint32_t val)
+{
+ return ((val) << A4XX_VFD_DECODE_INSTR_REGID__SHIFT) & A4XX_VFD_DECODE_INSTR_REGID__MASK;
+}
+#define A4XX_VFD_DECODE_INSTR_SWAP__MASK 0x00c00000
+#define A4XX_VFD_DECODE_INSTR_SWAP__SHIFT 22
+static inline uint32_t A4XX_VFD_DECODE_INSTR_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A4XX_VFD_DECODE_INSTR_SWAP__SHIFT) & A4XX_VFD_DECODE_INSTR_SWAP__MASK;
+}
+#define A4XX_VFD_DECODE_INSTR_SHIFTCNT__MASK 0x1f000000
+#define A4XX_VFD_DECODE_INSTR_SHIFTCNT__SHIFT 24
+static inline uint32_t A4XX_VFD_DECODE_INSTR_SHIFTCNT(uint32_t val)
+{
+ return ((val) << A4XX_VFD_DECODE_INSTR_SHIFTCNT__SHIFT) & A4XX_VFD_DECODE_INSTR_SHIFTCNT__MASK;
+}
+#define A4XX_VFD_DECODE_INSTR_LASTCOMPVALID 0x20000000
+#define A4XX_VFD_DECODE_INSTR_SWITCHNEXT 0x40000000
+
+#define REG_A4XX_TPL1_DEBUG_ECO_CONTROL 0x00000f00
+
+#define REG_A4XX_TPL1_PERFCTR_TP_SEL_7 0x00000f0b
+
+#define REG_A4XX_TPL1_TP_TEX_OFFSET 0x00002380
+
+#define REG_A4XX_TPL1_TP_CS_TEXMEMOBJ_BASE_ADDR 0x000023a6
+
+#define REG_A4XX_GRAS_TSE_STATUS 0x00000c80
+
+#define REG_A4XX_GRAS_DEBUG_ECO_CONTROL 0x00000c81
+
+#define REG_A4XX_GRAS_PERFCTR_TSE_SEL_0 0x00000c88
+
+#define REG_A4XX_GRAS_PERFCTR_TSE_SEL_3 0x00000c8b
+
+#define REG_A4XX_GRAS_CL_CLIP_CNTL 0x00002000
+
+#define REG_A4XX_GRAS_CLEAR_CNTL 0x00002003
+#define A4XX_GRAS_CLEAR_CNTL_NOT_FASTCLEAR 0x00000001
+
+#define REG_A4XX_GRAS_CL_GB_CLIP_ADJ 0x00002004
+#define A4XX_GRAS_CL_GB_CLIP_ADJ_HORZ__MASK 0x000003ff
+#define A4XX_GRAS_CL_GB_CLIP_ADJ_HORZ__SHIFT 0
+static inline uint32_t A4XX_GRAS_CL_GB_CLIP_ADJ_HORZ(uint32_t val)
+{
+ return ((val) << A4XX_GRAS_CL_GB_CLIP_ADJ_HORZ__SHIFT) & A4XX_GRAS_CL_GB_CLIP_ADJ_HORZ__MASK;
+}
+#define A4XX_GRAS_CL_GB_CLIP_ADJ_VERT__MASK 0x000ffc00
+#define A4XX_GRAS_CL_GB_CLIP_ADJ_VERT__SHIFT 10
+static inline uint32_t A4XX_GRAS_CL_GB_CLIP_ADJ_VERT(uint32_t val)
+{
+ return ((val) << A4XX_GRAS_CL_GB_CLIP_ADJ_VERT__SHIFT) & A4XX_GRAS_CL_GB_CLIP_ADJ_VERT__MASK;
+}
+
+#define REG_A4XX_GRAS_CL_VPORT_XOFFSET_0 0x00002008
+#define A4XX_GRAS_CL_VPORT_XOFFSET_0__MASK 0xffffffff
+#define A4XX_GRAS_CL_VPORT_XOFFSET_0__SHIFT 0
+static inline uint32_t A4XX_GRAS_CL_VPORT_XOFFSET_0(float val)
+{
+ return ((fui(val)) << A4XX_GRAS_CL_VPORT_XOFFSET_0__SHIFT) & A4XX_GRAS_CL_VPORT_XOFFSET_0__MASK;
+}
+
+#define REG_A4XX_GRAS_CL_VPORT_XSCALE_0 0x00002009
+#define A4XX_GRAS_CL_VPORT_XSCALE_0__MASK 0xffffffff
+#define A4XX_GRAS_CL_VPORT_XSCALE_0__SHIFT 0
+static inline uint32_t A4XX_GRAS_CL_VPORT_XSCALE_0(float val)
+{
+ return ((fui(val)) << A4XX_GRAS_CL_VPORT_XSCALE_0__SHIFT) & A4XX_GRAS_CL_VPORT_XSCALE_0__MASK;
+}
+
+#define REG_A4XX_GRAS_CL_VPORT_YOFFSET_0 0x0000200a
+#define A4XX_GRAS_CL_VPORT_YOFFSET_0__MASK 0xffffffff
+#define A4XX_GRAS_CL_VPORT_YOFFSET_0__SHIFT 0
+static inline uint32_t A4XX_GRAS_CL_VPORT_YOFFSET_0(float val)
+{
+ return ((fui(val)) << A4XX_GRAS_CL_VPORT_YOFFSET_0__SHIFT) & A4XX_GRAS_CL_VPORT_YOFFSET_0__MASK;
+}
+
+#define REG_A4XX_GRAS_CL_VPORT_YSCALE_0 0x0000200b
+#define A4XX_GRAS_CL_VPORT_YSCALE_0__MASK 0xffffffff
+#define A4XX_GRAS_CL_VPORT_YSCALE_0__SHIFT 0
+static inline uint32_t A4XX_GRAS_CL_VPORT_YSCALE_0(float val)
+{
+ return ((fui(val)) << A4XX_GRAS_CL_VPORT_YSCALE_0__SHIFT) & A4XX_GRAS_CL_VPORT_YSCALE_0__MASK;
+}
+
+#define REG_A4XX_GRAS_CL_VPORT_ZOFFSET_0 0x0000200c
+#define A4XX_GRAS_CL_VPORT_ZOFFSET_0__MASK 0xffffffff
+#define A4XX_GRAS_CL_VPORT_ZOFFSET_0__SHIFT 0
+static inline uint32_t A4XX_GRAS_CL_VPORT_ZOFFSET_0(float val)
+{
+ return ((fui(val)) << A4XX_GRAS_CL_VPORT_ZOFFSET_0__SHIFT) & A4XX_GRAS_CL_VPORT_ZOFFSET_0__MASK;
+}
+
+#define REG_A4XX_GRAS_CL_VPORT_ZSCALE_0 0x0000200d
+#define A4XX_GRAS_CL_VPORT_ZSCALE_0__MASK 0xffffffff
+#define A4XX_GRAS_CL_VPORT_ZSCALE_0__SHIFT 0
+static inline uint32_t A4XX_GRAS_CL_VPORT_ZSCALE_0(float val)
+{
+ return ((fui(val)) << A4XX_GRAS_CL_VPORT_ZSCALE_0__SHIFT) & A4XX_GRAS_CL_VPORT_ZSCALE_0__MASK;
+}
+
+#define REG_A4XX_GRAS_SU_POINT_MINMAX 0x00002070
+#define A4XX_GRAS_SU_POINT_MINMAX_MIN__MASK 0x0000ffff
+#define A4XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT 0
+static inline uint32_t A4XX_GRAS_SU_POINT_MINMAX_MIN(float val)
+{
+ return ((((uint32_t)(val * 16.0))) << A4XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT) & A4XX_GRAS_SU_POINT_MINMAX_MIN__MASK;
+}
+#define A4XX_GRAS_SU_POINT_MINMAX_MAX__MASK 0xffff0000
+#define A4XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT 16
+static inline uint32_t A4XX_GRAS_SU_POINT_MINMAX_MAX(float val)
+{
+ return ((((uint32_t)(val * 16.0))) << A4XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT) & A4XX_GRAS_SU_POINT_MINMAX_MAX__MASK;
+}
+
+#define REG_A4XX_GRAS_SU_POINT_SIZE 0x00002071
+#define A4XX_GRAS_SU_POINT_SIZE__MASK 0xffffffff
+#define A4XX_GRAS_SU_POINT_SIZE__SHIFT 0
+static inline uint32_t A4XX_GRAS_SU_POINT_SIZE(float val)
+{
+ return ((((int32_t)(val * 16.0))) << A4XX_GRAS_SU_POINT_SIZE__SHIFT) & A4XX_GRAS_SU_POINT_SIZE__MASK;
+}
+
+#define REG_A4XX_GRAS_ALPHA_CONTROL 0x00002073
+#define A4XX_GRAS_ALPHA_CONTROL_ALPHA_TEST_ENABLE 0x00000004
+
+#define REG_A4XX_GRAS_SU_POLY_OFFSET_SCALE 0x00002074
+#define A4XX_GRAS_SU_POLY_OFFSET_SCALE__MASK 0xffffffff
+#define A4XX_GRAS_SU_POLY_OFFSET_SCALE__SHIFT 0
+static inline uint32_t A4XX_GRAS_SU_POLY_OFFSET_SCALE(float val)
+{
+ return ((fui(val)) << A4XX_GRAS_SU_POLY_OFFSET_SCALE__SHIFT) & A4XX_GRAS_SU_POLY_OFFSET_SCALE__MASK;
+}
+
+#define REG_A4XX_GRAS_SU_POLY_OFFSET_OFFSET 0x00002075
+#define A4XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK 0xffffffff
+#define A4XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT 0
+static inline uint32_t A4XX_GRAS_SU_POLY_OFFSET_OFFSET(float val)
+{
+ return ((fui(val)) << A4XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT) & A4XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK;
+}
+
+#define REG_A4XX_GRAS_SC_EXTENT_WINDOW_TL 0x0000209f
+
+#define REG_A4XX_GRAS_SC_SCREEN_SCISSOR_TL 0x0000207c
+#define A4XX_GRAS_SC_SCREEN_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000
+#define A4XX_GRAS_SC_SCREEN_SCISSOR_TL_X__MASK 0x00007fff
+#define A4XX_GRAS_SC_SCREEN_SCISSOR_TL_X__SHIFT 0
+static inline uint32_t A4XX_GRAS_SC_SCREEN_SCISSOR_TL_X(uint32_t val)
+{
+ return ((val) << A4XX_GRAS_SC_SCREEN_SCISSOR_TL_X__SHIFT) & A4XX_GRAS_SC_SCREEN_SCISSOR_TL_X__MASK;
+}
+#define A4XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__MASK 0x7fff0000
+#define A4XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__SHIFT 16
+static inline uint32_t A4XX_GRAS_SC_SCREEN_SCISSOR_TL_Y(uint32_t val)
+{
+ return ((val) << A4XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__SHIFT) & A4XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__MASK;
+}
+
+#define REG_A4XX_GRAS_SC_SCREEN_SCISSOR_BR 0x0000207d
+#define A4XX_GRAS_SC_SCREEN_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000
+#define A4XX_GRAS_SC_SCREEN_SCISSOR_BR_X__MASK 0x00007fff
+#define A4XX_GRAS_SC_SCREEN_SCISSOR_BR_X__SHIFT 0
+static inline uint32_t A4XX_GRAS_SC_SCREEN_SCISSOR_BR_X(uint32_t val)
+{
+ return ((val) << A4XX_GRAS_SC_SCREEN_SCISSOR_BR_X__SHIFT) & A4XX_GRAS_SC_SCREEN_SCISSOR_BR_X__MASK;
+}
+#define A4XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__MASK 0x7fff0000
+#define A4XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__SHIFT 16
+static inline uint32_t A4XX_GRAS_SC_SCREEN_SCISSOR_BR_Y(uint32_t val)
+{
+ return ((val) << A4XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__SHIFT) & A4XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__MASK;
+}
+
+#define REG_A4XX_GRAS_SC_WINDOW_SCISSOR_BR 0x0000209c
+#define A4XX_GRAS_SC_WINDOW_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000
+#define A4XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK 0x00007fff
+#define A4XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT 0
+static inline uint32_t A4XX_GRAS_SC_WINDOW_SCISSOR_BR_X(uint32_t val)
+{
+ return ((val) << A4XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT) & A4XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK;
+}
+#define A4XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK 0x7fff0000
+#define A4XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT 16
+static inline uint32_t A4XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(uint32_t val)
+{
+ return ((val) << A4XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT) & A4XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK;
+}
+
+#define REG_A4XX_GRAS_SC_WINDOW_SCISSOR_TL 0x0000209d
+#define A4XX_GRAS_SC_WINDOW_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000
+#define A4XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK 0x00007fff
+#define A4XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT 0
+static inline uint32_t A4XX_GRAS_SC_WINDOW_SCISSOR_TL_X(uint32_t val)
+{
+ return ((val) << A4XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT) & A4XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK;
+}
+#define A4XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK 0x7fff0000
+#define A4XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT 16
+static inline uint32_t A4XX_GRAS_SC_WINDOW_SCISSOR_TL_Y(uint32_t val)
+{
+ return ((val) << A4XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT) & A4XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK;
+}
+
+#define REG_A4XX_GRAS_DEPTH_CONTROL 0x00002077
+#define A4XX_GRAS_DEPTH_CONTROL_FORMAT__MASK 0x00000003
+#define A4XX_GRAS_DEPTH_CONTROL_FORMAT__SHIFT 0
+static inline uint32_t A4XX_GRAS_DEPTH_CONTROL_FORMAT(enum a4xx_depth_format val)
+{
+ return ((val) << A4XX_GRAS_DEPTH_CONTROL_FORMAT__SHIFT) & A4XX_GRAS_DEPTH_CONTROL_FORMAT__MASK;
+}
+
+#define REG_A4XX_GRAS_SU_MODE_CONTROL 0x00002078
+#define A4XX_GRAS_SU_MODE_CONTROL_CULL_FRONT 0x00000001
+#define A4XX_GRAS_SU_MODE_CONTROL_CULL_BACK 0x00000002
+#define A4XX_GRAS_SU_MODE_CONTROL_FRONT_CW 0x00000004
+#define A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK 0x000007f8
+#define A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT 3
+static inline uint32_t A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH(float val)
+{
+ return ((((int32_t)(val * 4.0))) << A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT) & A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK;
+}
+#define A4XX_GRAS_SU_MODE_CONTROL_POLY_OFFSET 0x00000800
+#define A4XX_GRAS_SU_MODE_CONTROL_RENDERING_PASS 0x00100000
+
+#define REG_A4XX_GRAS_SC_CONTROL 0x0000207b
+#define A4XX_GRAS_SC_CONTROL_RENDER_MODE__MASK 0x0000000c
+#define A4XX_GRAS_SC_CONTROL_RENDER_MODE__SHIFT 2
+static inline uint32_t A4XX_GRAS_SC_CONTROL_RENDER_MODE(enum a3xx_render_mode val)
+{
+ return ((val) << A4XX_GRAS_SC_CONTROL_RENDER_MODE__SHIFT) & A4XX_GRAS_SC_CONTROL_RENDER_MODE__MASK;
+}
+#define A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES__MASK 0x00000380
+#define A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES__SHIFT 7
+static inline uint32_t A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES(uint32_t val)
+{
+ return ((val) << A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES__SHIFT) & A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES__MASK;
+}
+#define A4XX_GRAS_SC_CONTROL_MSAA_DISABLE 0x00000800
+#define A4XX_GRAS_SC_CONTROL_RASTER_MODE__MASK 0x0000f000
+#define A4XX_GRAS_SC_CONTROL_RASTER_MODE__SHIFT 12
+static inline uint32_t A4XX_GRAS_SC_CONTROL_RASTER_MODE(uint32_t val)
+{
+ return ((val) << A4XX_GRAS_SC_CONTROL_RASTER_MODE__SHIFT) & A4XX_GRAS_SC_CONTROL_RASTER_MODE__MASK;
+}
+
+#define REG_A4XX_UCHE_CACHE_MODE_CONTROL 0x00000e80
+
+#define REG_A4XX_UCHE_TRAP_BASE_LO 0x00000e83
+
+#define REG_A4XX_UCHE_TRAP_BASE_HI 0x00000e84
+
+#define REG_A4XX_UCHE_CACHE_STATUS 0x00000e88
+
+#define REG_A4XX_UCHE_INVALIDATE0 0x00000e8a
+
+#define REG_A4XX_UCHE_INVALIDATE1 0x00000e8b
+
+#define REG_A4XX_UCHE_CACHE_WAYS_VFD 0x00000e8c
+
+#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_7 0x00000e95
+
+#define REG_A4XX_HLSQ_TIMEOUT_THRESHOLD 0x00000e00
+
+#define REG_A4XX_HLSQ_DEBUG_ECO_CONTROL 0x00000e04
+
+#define REG_A4XX_HLSQ_PERF_PIPE_MASK 0x00000e0e
+
+#define REG_A4XX_HLSQ_CONTROL_0_REG 0x000023c0
+#define A4XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK 0x00000010
+#define A4XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__SHIFT 4
+static inline uint32_t A4XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A4XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__SHIFT) & A4XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK;
+}
+#define A4XX_HLSQ_CONTROL_0_REG_FSSUPERTHREADENABLE 0x00000040
+#define A4XX_HLSQ_CONTROL_0_REG_SPSHADERRESTART 0x00000200
+#define A4XX_HLSQ_CONTROL_0_REG_RESERVED2 0x00000400
+#define A4XX_HLSQ_CONTROL_0_REG_CHUNKDISABLE 0x04000000
+#define A4XX_HLSQ_CONTROL_0_REG_CONSTMODE__MASK 0x08000000
+#define A4XX_HLSQ_CONTROL_0_REG_CONSTMODE__SHIFT 27
+static inline uint32_t A4XX_HLSQ_CONTROL_0_REG_CONSTMODE(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CONTROL_0_REG_CONSTMODE__SHIFT) & A4XX_HLSQ_CONTROL_0_REG_CONSTMODE__MASK;
+}
+#define A4XX_HLSQ_CONTROL_0_REG_LAZYUPDATEDISABLE 0x10000000
+#define A4XX_HLSQ_CONTROL_0_REG_SPCONSTFULLUPDATE 0x20000000
+#define A4XX_HLSQ_CONTROL_0_REG_TPFULLUPDATE 0x40000000
+#define A4XX_HLSQ_CONTROL_0_REG_SINGLECONTEXT 0x80000000
+
+#define REG_A4XX_HLSQ_CONTROL_1_REG 0x000023c1
+#define A4XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__MASK 0x00000040
+#define A4XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__SHIFT 6
+static inline uint32_t A4XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A4XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__SHIFT) & A4XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__MASK;
+}
+#define A4XX_HLSQ_CONTROL_1_REG_VSSUPERTHREADENABLE 0x00000100
+#define A4XX_HLSQ_CONTROL_1_REG_RESERVED1 0x00000200
+#define A4XX_HLSQ_CONTROL_1_REG_ZWCOORD 0x02000000
+
+#define REG_A4XX_HLSQ_CONTROL_2_REG 0x000023c2
+#define A4XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__MASK 0xfc000000
+#define A4XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__SHIFT 26
+static inline uint32_t A4XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__SHIFT) & A4XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__MASK;
+}
+
+#define REG_A4XX_HLSQ_CONTROL_3_REG 0x000023c3
+#define A4XX_HLSQ_CONTROL_3_REG_REGID__MASK 0x000000ff
+#define A4XX_HLSQ_CONTROL_3_REG_REGID__SHIFT 0
+static inline uint32_t A4XX_HLSQ_CONTROL_3_REG_REGID(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CONTROL_3_REG_REGID__SHIFT) & A4XX_HLSQ_CONTROL_3_REG_REGID__MASK;
+}
+
+#define REG_A4XX_HLSQ_VS_CONTROL_REG 0x000023c5
+#define A4XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__MASK 0x000000ff
+#define A4XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__SHIFT 0
+static inline uint32_t A4XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__SHIFT) & A4XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__MASK;
+}
+#define A4XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x0000ff00
+#define A4XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 8
+static inline uint32_t A4XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A4XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000
+#define A4XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17
+static inline uint32_t A4XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A4XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+#define A4XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__MASK 0xff000000
+#define A4XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__SHIFT 24
+static inline uint32_t A4XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__SHIFT) & A4XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__MASK;
+}
+
+#define REG_A4XX_HLSQ_FS_CONTROL_REG 0x000023c6
+#define A4XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__MASK 0x000000ff
+#define A4XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__SHIFT 0
+static inline uint32_t A4XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__SHIFT) & A4XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__MASK;
+}
+#define A4XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x0000ff00
+#define A4XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 8
+static inline uint32_t A4XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A4XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000
+#define A4XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17
+static inline uint32_t A4XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A4XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+#define A4XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__MASK 0xff000000
+#define A4XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__SHIFT 24
+static inline uint32_t A4XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__SHIFT) & A4XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__MASK;
+}
+
+#define REG_A4XX_HLSQ_HS_CONTROL_REG 0x000023c7
+#define A4XX_HLSQ_HS_CONTROL_REG_CONSTLENGTH__MASK 0x000000ff
+#define A4XX_HLSQ_HS_CONTROL_REG_CONSTLENGTH__SHIFT 0
+static inline uint32_t A4XX_HLSQ_HS_CONTROL_REG_CONSTLENGTH(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_HS_CONTROL_REG_CONSTLENGTH__SHIFT) & A4XX_HLSQ_HS_CONTROL_REG_CONSTLENGTH__MASK;
+}
+#define A4XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x0000ff00
+#define A4XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 8
+static inline uint32_t A4XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A4XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000
+#define A4XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17
+static inline uint32_t A4XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A4XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+#define A4XX_HLSQ_HS_CONTROL_REG_INSTRLENGTH__MASK 0xff000000
+#define A4XX_HLSQ_HS_CONTROL_REG_INSTRLENGTH__SHIFT 24
+static inline uint32_t A4XX_HLSQ_HS_CONTROL_REG_INSTRLENGTH(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_HS_CONTROL_REG_INSTRLENGTH__SHIFT) & A4XX_HLSQ_HS_CONTROL_REG_INSTRLENGTH__MASK;
+}
+
+#define REG_A4XX_HLSQ_DS_CONTROL_REG 0x000023c8
+#define A4XX_HLSQ_DS_CONTROL_REG_CONSTLENGTH__MASK 0x000000ff
+#define A4XX_HLSQ_DS_CONTROL_REG_CONSTLENGTH__SHIFT 0
+static inline uint32_t A4XX_HLSQ_DS_CONTROL_REG_CONSTLENGTH(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_DS_CONTROL_REG_CONSTLENGTH__SHIFT) & A4XX_HLSQ_DS_CONTROL_REG_CONSTLENGTH__MASK;
+}
+#define A4XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x0000ff00
+#define A4XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 8
+static inline uint32_t A4XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A4XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000
+#define A4XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17
+static inline uint32_t A4XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A4XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+#define A4XX_HLSQ_DS_CONTROL_REG_INSTRLENGTH__MASK 0xff000000
+#define A4XX_HLSQ_DS_CONTROL_REG_INSTRLENGTH__SHIFT 24
+static inline uint32_t A4XX_HLSQ_DS_CONTROL_REG_INSTRLENGTH(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_DS_CONTROL_REG_INSTRLENGTH__SHIFT) & A4XX_HLSQ_DS_CONTROL_REG_INSTRLENGTH__MASK;
+}
+
+#define REG_A4XX_HLSQ_GS_CONTROL_REG 0x000023c9
+#define A4XX_HLSQ_GS_CONTROL_REG_CONSTLENGTH__MASK 0x000000ff
+#define A4XX_HLSQ_GS_CONTROL_REG_CONSTLENGTH__SHIFT 0
+static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_CONSTLENGTH(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_GS_CONTROL_REG_CONSTLENGTH__SHIFT) & A4XX_HLSQ_GS_CONTROL_REG_CONSTLENGTH__MASK;
+}
+#define A4XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x0000ff00
+#define A4XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 8
+static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A4XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000
+#define A4XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17
+static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A4XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+#define A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH__MASK 0xff000000
+#define A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH__SHIFT 24
+static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH__SHIFT) & A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH__MASK;
+}
+
+#define REG_A4XX_HLSQ_UPDATE_CONTROL 0x000023db
+
+#define REG_A4XX_PC_BINNING_COMMAND 0x00000d00
+#define A4XX_PC_BINNING_COMMAND_BINNING_ENABLE 0x00000001
+
+#define REG_A4XX_PC_DRAWCALL_SETUP_OVERRIDE 0x00000d0c
+
+#define REG_A4XX_PC_PERFCTR_PC_SEL_0 0x00000d10
+
+#define REG_A4XX_PC_PERFCTR_PC_SEL_7 0x00000d17
+
+#define REG_A4XX_PC_BIN_BASE 0x000021c0
+
+#define REG_A4XX_PC_PRIM_VTX_CNTL 0x000021c4
+#define A4XX_PC_PRIM_VTX_CNTL_VAROUT 0x00000001
+#define A4XX_PC_PRIM_VTX_CNTL_PROVOKING_VTX_LAST 0x02000000
+#define A4XX_PC_PRIM_VTX_CNTL_PSIZE 0x04000000
+
+#define REG_A4XX_UNKNOWN_21C5 0x000021c5
+
+#define REG_A4XX_PC_RESTART_INDEX 0x000021c6
+
+#define REG_A4XX_PC_GS_PARAM 0x000021e5
+
+#define REG_A4XX_PC_HS_PARAM 0x000021e7
+
+#define REG_A4XX_VBIF_VERSION 0x00003000
+
+#define REG_A4XX_VBIF_CLKON 0x00003001
+#define A4XX_VBIF_CLKON_FORCE_ON_TESTBUS 0x00000001
+
+#define REG_A4XX_VBIF_ABIT_SORT 0x0000301c
+
+#define REG_A4XX_VBIF_ABIT_SORT_CONF 0x0000301d
+
+#define REG_A4XX_VBIF_GATE_OFF_WRREQ_EN 0x0000302a
+
+#define REG_A4XX_VBIF_IN_RD_LIM_CONF0 0x0000302c
+
+#define REG_A4XX_VBIF_IN_RD_LIM_CONF1 0x0000302d
+
+#define REG_A4XX_VBIF_IN_WR_LIM_CONF0 0x00003030
+
+#define REG_A4XX_VBIF_IN_WR_LIM_CONF1 0x00003031
+
+#define REG_A4XX_VBIF_ROUND_ROBIN_QOS_ARB 0x00003049
+
+#define REG_A4XX_UNKNOWN_0CC5 0x00000cc5
+
+#define REG_A4XX_UNKNOWN_0CC6 0x00000cc6
+
+#define REG_A4XX_UNKNOWN_0D01 0x00000d01
+
+#define REG_A4XX_UNKNOWN_0E05 0x00000e05
+
+#define REG_A4XX_UNKNOWN_0E42 0x00000e42
+
+#define REG_A4XX_UNKNOWN_0EC2 0x00000ec2
+
+#define REG_A4XX_UNKNOWN_0EC3 0x00000ec3
+
+#define REG_A4XX_UNKNOWN_0F03 0x00000f03
+
+#define REG_A4XX_UNKNOWN_2001 0x00002001
+
+#define REG_A4XX_UNKNOWN_209B 0x0000209b
+
+#define REG_A4XX_UNKNOWN_20EF 0x000020ef
+
+#define REG_A4XX_UNKNOWN_20F0 0x000020f0
+
+#define REG_A4XX_UNKNOWN_20F1 0x000020f1
+
+#define REG_A4XX_UNKNOWN_20F2 0x000020f2
+
+#define REG_A4XX_UNKNOWN_20F3 0x000020f3
+
+#define REG_A4XX_UNKNOWN_20F4 0x000020f4
+
+#define REG_A4XX_UNKNOWN_20F5 0x000020f5
+
+#define REG_A4XX_UNKNOWN_20F6 0x000020f6
+
+#define REG_A4XX_UNKNOWN_20F7 0x000020f7
+
+#define REG_A4XX_UNKNOWN_2152 0x00002152
+
+#define REG_A4XX_UNKNOWN_2153 0x00002153
+
+#define REG_A4XX_UNKNOWN_2154 0x00002154
+
+#define REG_A4XX_UNKNOWN_2155 0x00002155
+
+#define REG_A4XX_UNKNOWN_2156 0x00002156
+
+#define REG_A4XX_UNKNOWN_2157 0x00002157
+
+#define REG_A4XX_UNKNOWN_21C3 0x000021c3
+
+#define REG_A4XX_UNKNOWN_21E6 0x000021e6
+
+#define REG_A4XX_UNKNOWN_2209 0x00002209
+
+#define REG_A4XX_UNKNOWN_22D7 0x000022d7
+
+#define REG_A4XX_UNKNOWN_2381 0x00002381
+
+#define REG_A4XX_UNKNOWN_23A0 0x000023a0
+
+#define REG_A4XX_TEX_SAMP_0 0x00000000
+#define A4XX_TEX_SAMP_0_XY_MAG__MASK 0x00000006
+#define A4XX_TEX_SAMP_0_XY_MAG__SHIFT 1
+static inline uint32_t A4XX_TEX_SAMP_0_XY_MAG(enum a4xx_tex_filter val)
+{
+ return ((val) << A4XX_TEX_SAMP_0_XY_MAG__SHIFT) & A4XX_TEX_SAMP_0_XY_MAG__MASK;
+}
+#define A4XX_TEX_SAMP_0_XY_MIN__MASK 0x00000018
+#define A4XX_TEX_SAMP_0_XY_MIN__SHIFT 3
+static inline uint32_t A4XX_TEX_SAMP_0_XY_MIN(enum a4xx_tex_filter val)
+{
+ return ((val) << A4XX_TEX_SAMP_0_XY_MIN__SHIFT) & A4XX_TEX_SAMP_0_XY_MIN__MASK;
+}
+#define A4XX_TEX_SAMP_0_WRAP_S__MASK 0x000000e0
+#define A4XX_TEX_SAMP_0_WRAP_S__SHIFT 5
+static inline uint32_t A4XX_TEX_SAMP_0_WRAP_S(enum a4xx_tex_clamp val)
+{
+ return ((val) << A4XX_TEX_SAMP_0_WRAP_S__SHIFT) & A4XX_TEX_SAMP_0_WRAP_S__MASK;
+}
+#define A4XX_TEX_SAMP_0_WRAP_T__MASK 0x00000700
+#define A4XX_TEX_SAMP_0_WRAP_T__SHIFT 8
+static inline uint32_t A4XX_TEX_SAMP_0_WRAP_T(enum a4xx_tex_clamp val)
+{
+ return ((val) << A4XX_TEX_SAMP_0_WRAP_T__SHIFT) & A4XX_TEX_SAMP_0_WRAP_T__MASK;
+}
+#define A4XX_TEX_SAMP_0_WRAP_R__MASK 0x00003800
+#define A4XX_TEX_SAMP_0_WRAP_R__SHIFT 11
+static inline uint32_t A4XX_TEX_SAMP_0_WRAP_R(enum a4xx_tex_clamp val)
+{
+ return ((val) << A4XX_TEX_SAMP_0_WRAP_R__SHIFT) & A4XX_TEX_SAMP_0_WRAP_R__MASK;
+}
+
+#define REG_A4XX_TEX_SAMP_1 0x00000001
+#define A4XX_TEX_SAMP_1_COMPARE_FUNC__MASK 0x0000000e
+#define A4XX_TEX_SAMP_1_COMPARE_FUNC__SHIFT 1
+static inline uint32_t A4XX_TEX_SAMP_1_COMPARE_FUNC(enum adreno_compare_func val)
+{
+ return ((val) << A4XX_TEX_SAMP_1_COMPARE_FUNC__SHIFT) & A4XX_TEX_SAMP_1_COMPARE_FUNC__MASK;
+}
+#define A4XX_TEX_SAMP_1_MAX_LOD__MASK 0x000fff00
+#define A4XX_TEX_SAMP_1_MAX_LOD__SHIFT 8
+static inline uint32_t A4XX_TEX_SAMP_1_MAX_LOD(float val)
+{
+ return ((((uint32_t)(val * 64.0))) << A4XX_TEX_SAMP_1_MAX_LOD__SHIFT) & A4XX_TEX_SAMP_1_MAX_LOD__MASK;
+}
+#define A4XX_TEX_SAMP_1_MIN_LOD__MASK 0xfff00000
+#define A4XX_TEX_SAMP_1_MIN_LOD__SHIFT 20
+static inline uint32_t A4XX_TEX_SAMP_1_MIN_LOD(float val)
+{
+ return ((((uint32_t)(val * 64.0))) << A4XX_TEX_SAMP_1_MIN_LOD__SHIFT) & A4XX_TEX_SAMP_1_MIN_LOD__MASK;
+}
+
+#define REG_A4XX_TEX_CONST_0 0x00000000
+#define A4XX_TEX_CONST_0_TILED 0x00000001
+#define A4XX_TEX_CONST_0_SWIZ_X__MASK 0x00000070
+#define A4XX_TEX_CONST_0_SWIZ_X__SHIFT 4
+static inline uint32_t A4XX_TEX_CONST_0_SWIZ_X(enum a4xx_tex_swiz val)
+{
+ return ((val) << A4XX_TEX_CONST_0_SWIZ_X__SHIFT) & A4XX_TEX_CONST_0_SWIZ_X__MASK;
+}
+#define A4XX_TEX_CONST_0_SWIZ_Y__MASK 0x00000380
+#define A4XX_TEX_CONST_0_SWIZ_Y__SHIFT 7
+static inline uint32_t A4XX_TEX_CONST_0_SWIZ_Y(enum a4xx_tex_swiz val)
+{
+ return ((val) << A4XX_TEX_CONST_0_SWIZ_Y__SHIFT) & A4XX_TEX_CONST_0_SWIZ_Y__MASK;
+}
+#define A4XX_TEX_CONST_0_SWIZ_Z__MASK 0x00001c00
+#define A4XX_TEX_CONST_0_SWIZ_Z__SHIFT 10
+static inline uint32_t A4XX_TEX_CONST_0_SWIZ_Z(enum a4xx_tex_swiz val)
+{
+ return ((val) << A4XX_TEX_CONST_0_SWIZ_Z__SHIFT) & A4XX_TEX_CONST_0_SWIZ_Z__MASK;
+}
+#define A4XX_TEX_CONST_0_SWIZ_W__MASK 0x0000e000
+#define A4XX_TEX_CONST_0_SWIZ_W__SHIFT 13
+static inline uint32_t A4XX_TEX_CONST_0_SWIZ_W(enum a4xx_tex_swiz val)
+{
+ return ((val) << A4XX_TEX_CONST_0_SWIZ_W__SHIFT) & A4XX_TEX_CONST_0_SWIZ_W__MASK;
+}
+#define A4XX_TEX_CONST_0_FMT__MASK 0x1fc00000
+#define A4XX_TEX_CONST_0_FMT__SHIFT 22
+static inline uint32_t A4XX_TEX_CONST_0_FMT(enum a4xx_tex_fmt val)
+{
+ return ((val) << A4XX_TEX_CONST_0_FMT__SHIFT) & A4XX_TEX_CONST_0_FMT__MASK;
+}
+#define A4XX_TEX_CONST_0_TYPE__MASK 0x60000000
+#define A4XX_TEX_CONST_0_TYPE__SHIFT 29
+static inline uint32_t A4XX_TEX_CONST_0_TYPE(enum a4xx_tex_type val)
+{
+ return ((val) << A4XX_TEX_CONST_0_TYPE__SHIFT) & A4XX_TEX_CONST_0_TYPE__MASK;
+}
+
+#define REG_A4XX_TEX_CONST_1 0x00000001
+#define A4XX_TEX_CONST_1_HEIGHT__MASK 0x00007fff
+#define A4XX_TEX_CONST_1_HEIGHT__SHIFT 0
+static inline uint32_t A4XX_TEX_CONST_1_HEIGHT(uint32_t val)
+{
+ return ((val) << A4XX_TEX_CONST_1_HEIGHT__SHIFT) & A4XX_TEX_CONST_1_HEIGHT__MASK;
+}
+#define A4XX_TEX_CONST_1_WIDTH__MASK 0x1fff8000
+#define A4XX_TEX_CONST_1_WIDTH__SHIFT 15
+static inline uint32_t A4XX_TEX_CONST_1_WIDTH(uint32_t val)
+{
+ return ((val) << A4XX_TEX_CONST_1_WIDTH__SHIFT) & A4XX_TEX_CONST_1_WIDTH__MASK;
+}
+
+#define REG_A4XX_TEX_CONST_2 0x00000002
+#define A4XX_TEX_CONST_2_PITCH__MASK 0x3ffffe00
+#define A4XX_TEX_CONST_2_PITCH__SHIFT 9
+static inline uint32_t A4XX_TEX_CONST_2_PITCH(uint32_t val)
+{
+ return ((val) << A4XX_TEX_CONST_2_PITCH__SHIFT) & A4XX_TEX_CONST_2_PITCH__MASK;
+}
+#define A4XX_TEX_CONST_2_SWAP__MASK 0xc0000000
+#define A4XX_TEX_CONST_2_SWAP__SHIFT 30
+static inline uint32_t A4XX_TEX_CONST_2_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A4XX_TEX_CONST_2_SWAP__SHIFT) & A4XX_TEX_CONST_2_SWAP__MASK;
+}
+
+#define REG_A4XX_TEX_CONST_3 0x00000003
+#define A4XX_TEX_CONST_3_LAYERSZ__MASK 0x0000000f
+#define A4XX_TEX_CONST_3_LAYERSZ__SHIFT 0
+static inline uint32_t A4XX_TEX_CONST_3_LAYERSZ(uint32_t val)
+{
+ return ((val >> 12) << A4XX_TEX_CONST_3_LAYERSZ__SHIFT) & A4XX_TEX_CONST_3_LAYERSZ__MASK;
+}
+
+#define REG_A4XX_TEX_CONST_4 0x00000004
+#define A4XX_TEX_CONST_4_BASE__MASK 0xffffffff
+#define A4XX_TEX_CONST_4_BASE__SHIFT 0
+static inline uint32_t A4XX_TEX_CONST_4_BASE(uint32_t val)
+{
+ return ((val) << A4XX_TEX_CONST_4_BASE__SHIFT) & A4XX_TEX_CONST_4_BASE__MASK;
+}
+
+#define REG_A4XX_TEX_CONST_5 0x00000005
+
+#define REG_A4XX_TEX_CONST_6 0x00000006
+
+#define REG_A4XX_TEX_CONST_7 0x00000007
+
+
+#endif /* A4XX_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
new file mode 100644
index 00000000000..91221836c5a
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
@@ -0,0 +1,604 @@
+/* Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include "a4xx_gpu.h"
+#ifdef CONFIG_MSM_OCMEM
+# include <soc/qcom/ocmem.h>
+#endif
+
+#define A4XX_INT0_MASK \
+ (A4XX_INT0_RBBM_AHB_ERROR | \
+ A4XX_INT0_RBBM_ATB_BUS_OVERFLOW | \
+ A4XX_INT0_CP_T0_PACKET_IN_IB | \
+ A4XX_INT0_CP_OPCODE_ERROR | \
+ A4XX_INT0_CP_RESERVED_BIT_ERROR | \
+ A4XX_INT0_CP_HW_FAULT | \
+ A4XX_INT0_CP_IB1_INT | \
+ A4XX_INT0_CP_IB2_INT | \
+ A4XX_INT0_CP_RB_INT | \
+ A4XX_INT0_CP_REG_PROTECT_FAULT | \
+ A4XX_INT0_CP_AHB_ERROR_HALT | \
+ A4XX_INT0_UCHE_OOB_ACCESS)
+
+extern bool hang_debug;
+static void a4xx_dump(struct msm_gpu *gpu);
+
+/*
+ * a4xx_enable_hwcg() - Program the clock control registers
+ * @device: The adreno device pointer
+ */
+static void a4xx_enable_hwcg(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ unsigned int i;
+ for (i = 0; i < 4; i++)
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_TP(i), 0x02222202);
+ for (i = 0; i < 4; i++)
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2_TP(i), 0x00002222);
+ for (i = 0; i < 4; i++)
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_TP(i), 0x0E739CE7);
+ for (i = 0; i < 4; i++)
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_TP(i), 0x00111111);
+ for (i = 0; i < 4; i++)
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_SP(i), 0x22222222);
+ for (i = 0; i < 4; i++)
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2_SP(i), 0x00222222);
+ for (i = 0; i < 4; i++)
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_SP(i), 0x00000104);
+ for (i = 0; i < 4; i++)
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_SP(i), 0x00000081);
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_UCHE, 0x22222222);
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2_UCHE, 0x02222222);
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL3_UCHE, 0x00000000);
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL4_UCHE, 0x00000000);
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_UCHE, 0x00004444);
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_UCHE, 0x00001112);
+ for (i = 0; i < 4; i++)
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_RB(i), 0x22222222);
+
+ /* Disable L1 clocking in A420 due to CCU issues with it */
+ for (i = 0; i < 4; i++) {
+ if (adreno_is_a420(adreno_gpu)) {
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2_RB(i),
+ 0x00002020);
+ } else {
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2_RB(i),
+ 0x00022020);
+ }
+ }
+
+ for (i = 0; i < 4; i++) {
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_MARB_CCU(i),
+ 0x00000922);
+ }
+
+ for (i = 0; i < 4; i++) {
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_RB_MARB_CCU(i),
+ 0x00000000);
+ }
+
+ for (i = 0; i < 4; i++) {
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1(i),
+ 0x00000001);
+ }
+
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_MODE_GPC, 0x02222222);
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_GPC, 0x04100104);
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_GPC, 0x00022222);
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_COM_DCOM, 0x00000022);
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_COM_DCOM, 0x0000010F);
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_COM_DCOM, 0x00000022);
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_TSE_RAS_RBBM, 0x00222222);
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00004104);
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00000222);
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_HLSQ , 0x00000000);
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000);
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_HLSQ, 0x00020000);
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL, 0xAAAAAAAA);
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2, 0);
+}
+
+static void a4xx_me_init(struct msm_gpu *gpu)
+{
+ struct msm_ringbuffer *ring = gpu->rb;
+
+ OUT_PKT3(ring, CP_ME_INIT, 17);
+ OUT_RING(ring, 0x000003f7);
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000080);
+ OUT_RING(ring, 0x00000100);
+ OUT_RING(ring, 0x00000180);
+ OUT_RING(ring, 0x00006600);
+ OUT_RING(ring, 0x00000150);
+ OUT_RING(ring, 0x0000014e);
+ OUT_RING(ring, 0x00000154);
+ OUT_RING(ring, 0x00000001);
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000000);
+
+ gpu->funcs->flush(gpu);
+ gpu->funcs->idle(gpu);
+}
+
+static int a4xx_hw_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a4xx_gpu *a4xx_gpu = to_a4xx_gpu(adreno_gpu);
+ uint32_t *ptr, len;
+ int i, ret;
+
+ if (adreno_is_a4xx(adreno_gpu)) {
+ gpu_write(gpu, REG_A4XX_VBIF_ABIT_SORT, 0x0001001F);
+ gpu_write(gpu, REG_A4XX_VBIF_ABIT_SORT_CONF, 0x000000A4);
+ gpu_write(gpu, REG_A4XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000001);
+ gpu_write(gpu, REG_A4XX_VBIF_IN_RD_LIM_CONF0, 0x18181818);
+ gpu_write(gpu, REG_A4XX_VBIF_IN_RD_LIM_CONF1, 0x00000018);
+ gpu_write(gpu, REG_A4XX_VBIF_IN_WR_LIM_CONF0, 0x18181818);
+ gpu_write(gpu, REG_A4XX_VBIF_IN_WR_LIM_CONF1, 0x00000018);
+ gpu_write(gpu, REG_A4XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003);
+ } else {
+ BUG();
+ }
+
+ /* Make all blocks contribute to the GPU BUSY perf counter */
+ gpu_write(gpu, REG_A4XX_RBBM_GPU_BUSY_MASKED, 0xffffffff);
+
+ /* Tune the hystersis counters for SP and CP idle detection */
+ gpu_write(gpu, REG_A4XX_RBBM_SP_HYST_CNT, 0x10);
+ gpu_write(gpu, REG_A4XX_RBBM_WAIT_IDLE_CLOCKS_CTL, 0x10);
+
+ /* Enable the RBBM error reporting bits */
+ gpu_write(gpu, REG_A4XX_RBBM_AHB_CTL0, 0x00000001);
+
+ /* Enable AHB error reporting*/
+ gpu_write(gpu, REG_A4XX_RBBM_AHB_CTL1, 0xa6ffffff);
+
+ /* Enable power counters*/
+ gpu_write(gpu, REG_A4XX_RBBM_RBBM_CTL, 0x00000030);
+
+ /*
+ * Turn on hang detection - this spews a lot of useful information
+ * into the RBBM registers on a hang:
+ */
+ gpu_write(gpu, REG_A4XX_RBBM_INTERFACE_HANG_INT_CTL,
+ (1 << 30) | 0xFFFF);
+
+ gpu_write(gpu, REG_A4XX_RB_GMEM_BASE_ADDR,
+ (unsigned int)(a4xx_gpu->ocmem_base >> 14));
+
+ /* Turn on performance counters: */
+ gpu_write(gpu, REG_A4XX_RBBM_PERFCTR_CTL, 0x01);
+
+ /* Disable L2 bypass to avoid UCHE out of bounds errors */
+ gpu_write(gpu, REG_A4XX_UCHE_TRAP_BASE_LO, 0xffff0000);
+ gpu_write(gpu, REG_A4XX_UCHE_TRAP_BASE_HI, 0xffff0000);
+
+ gpu_write(gpu, REG_A4XX_CP_DEBUG, (1 << 25) |
+ (adreno_is_a420(adreno_gpu) ? (1 << 29) : 0));
+
+ a4xx_enable_hwcg(gpu);
+
+ /*
+ * For A420 set RBBM_CLOCK_DELAY_HLSQ.CGC_HLSQ_TP_EARLY_CYC >= 2
+ * due to timing issue with HLSQ_TP_CLK_EN
+ */
+ if (adreno_is_a420(adreno_gpu)) {
+ unsigned int val;
+ val = gpu_read(gpu, REG_A4XX_RBBM_CLOCK_DELAY_HLSQ);
+ val &= ~A4XX_CGC_HLSQ_EARLY_CYC__MASK;
+ val |= 2 << A4XX_CGC_HLSQ_EARLY_CYC__SHIFT;
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_HLSQ, val);
+ }
+
+ ret = adreno_hw_init(gpu);
+ if (ret)
+ return ret;
+
+ /* setup access protection: */
+ gpu_write(gpu, REG_A4XX_CP_PROTECT_CTRL, 0x00000007);
+
+ /* RBBM registers */
+ gpu_write(gpu, REG_A4XX_CP_PROTECT(0), 0x62000010);
+ gpu_write(gpu, REG_A4XX_CP_PROTECT(1), 0x63000020);
+ gpu_write(gpu, REG_A4XX_CP_PROTECT(2), 0x64000040);
+ gpu_write(gpu, REG_A4XX_CP_PROTECT(3), 0x65000080);
+ gpu_write(gpu, REG_A4XX_CP_PROTECT(4), 0x66000100);
+ gpu_write(gpu, REG_A4XX_CP_PROTECT(5), 0x64000200);
+
+ /* CP registers */
+ gpu_write(gpu, REG_A4XX_CP_PROTECT(6), 0x67000800);
+ gpu_write(gpu, REG_A4XX_CP_PROTECT(7), 0x64001600);
+
+
+ /* RB registers */
+ gpu_write(gpu, REG_A4XX_CP_PROTECT(8), 0x60003300);
+
+ /* HLSQ registers */
+ gpu_write(gpu, REG_A4XX_CP_PROTECT(9), 0x60003800);
+
+ /* VPC registers */
+ gpu_write(gpu, REG_A4XX_CP_PROTECT(10), 0x61003980);
+
+ /* SMMU registers */
+ gpu_write(gpu, REG_A4XX_CP_PROTECT(11), 0x6e010000);
+
+ gpu_write(gpu, REG_A4XX_RBBM_INT_0_MASK, A4XX_INT0_MASK);
+
+ ret = adreno_hw_init(gpu);
+ if (ret)
+ return ret;
+
+ /* Load PM4: */
+ ptr = (uint32_t *)(adreno_gpu->pm4->data);
+ len = adreno_gpu->pm4->size / 4;
+ DBG("loading PM4 ucode version: %u", ptr[0]);
+ gpu_write(gpu, REG_A4XX_CP_ME_RAM_WADDR, 0);
+ for (i = 1; i < len; i++)
+ gpu_write(gpu, REG_A4XX_CP_ME_RAM_DATA, ptr[i]);
+
+ /* Load PFP: */
+ ptr = (uint32_t *)(adreno_gpu->pfp->data);
+ len = adreno_gpu->pfp->size / 4;
+ DBG("loading PFP ucode version: %u", ptr[0]);
+
+ gpu_write(gpu, REG_A4XX_CP_PFP_UCODE_ADDR, 0);
+ for (i = 1; i < len; i++)
+ gpu_write(gpu, REG_A4XX_CP_PFP_UCODE_DATA, ptr[i]);
+
+ /* clear ME_HALT to start micro engine */
+ gpu_write(gpu, REG_A4XX_CP_ME_CNTL, 0);
+
+ a4xx_me_init(gpu);
+ return 0;
+}
+
+static void a4xx_recover(struct msm_gpu *gpu)
+{
+ /* dump registers before resetting gpu, if enabled: */
+ if (hang_debug)
+ a4xx_dump(gpu);
+
+ gpu_write(gpu, REG_A4XX_RBBM_SW_RESET_CMD, 1);
+ gpu_read(gpu, REG_A4XX_RBBM_SW_RESET_CMD);
+ gpu_write(gpu, REG_A4XX_RBBM_SW_RESET_CMD, 0);
+ adreno_recover(gpu);
+}
+
+static void a4xx_destroy(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a4xx_gpu *a4xx_gpu = to_a4xx_gpu(adreno_gpu);
+
+ DBG("%s", gpu->name);
+
+ adreno_gpu_cleanup(adreno_gpu);
+
+#ifdef CONFIG_MSM_OCMEM
+ if (a4xx_gpu->ocmem_base)
+ ocmem_free(OCMEM_GRAPHICS, a4xx_gpu->ocmem_hdl);
+#endif
+
+ kfree(a4xx_gpu);
+}
+
+static void a4xx_idle(struct msm_gpu *gpu)
+{
+ /* wait for ringbuffer to drain: */
+ adreno_idle(gpu);
+
+ /* then wait for GPU to finish: */
+ if (spin_until(!(gpu_read(gpu, REG_A4XX_RBBM_STATUS) &
+ A4XX_RBBM_STATUS_GPU_BUSY)))
+ DRM_ERROR("%s: timeout waiting for GPU to idle!\n", gpu->name);
+
+ /* TODO maybe we need to reset GPU here to recover from hang? */
+}
+
+static irqreturn_t a4xx_irq(struct msm_gpu *gpu)
+{
+ uint32_t status;
+
+ status = gpu_read(gpu, REG_A4XX_RBBM_INT_0_STATUS);
+ DBG("%s: Int status %08x", gpu->name, status);
+
+ gpu_write(gpu, REG_A4XX_RBBM_INT_CLEAR_CMD, status);
+
+ msm_gpu_retire(gpu);
+
+ return IRQ_HANDLED;
+}
+
+static const unsigned int a4xx_registers[] = {
+ /* RBBM */
+ 0x0000, 0x0002, 0x0004, 0x0021, 0x0023, 0x0024, 0x0026, 0x0026,
+ 0x0028, 0x002B, 0x002E, 0x0034, 0x0037, 0x0044, 0x0047, 0x0066,
+ 0x0068, 0x0095, 0x009C, 0x0170, 0x0174, 0x01AF,
+ /* CP */
+ 0x0200, 0x0233, 0x0240, 0x0250, 0x04C0, 0x04DD, 0x0500, 0x050B,
+ 0x0578, 0x058F,
+ /* VSC */
+ 0x0C00, 0x0C03, 0x0C08, 0x0C41, 0x0C50, 0x0C51,
+ /* GRAS */
+ 0x0C80, 0x0C81, 0x0C88, 0x0C8F,
+ /* RB */
+ 0x0CC0, 0x0CC0, 0x0CC4, 0x0CD2,
+ /* PC */
+ 0x0D00, 0x0D0C, 0x0D10, 0x0D17, 0x0D20, 0x0D23,
+ /* VFD */
+ 0x0E40, 0x0E4A,
+ /* VPC */
+ 0x0E60, 0x0E61, 0x0E63, 0x0E68,
+ /* UCHE */
+ 0x0E80, 0x0E84, 0x0E88, 0x0E95,
+ /* VMIDMT */
+ 0x1000, 0x1000, 0x1002, 0x1002, 0x1004, 0x1004, 0x1008, 0x100A,
+ 0x100C, 0x100D, 0x100F, 0x1010, 0x1012, 0x1016, 0x1024, 0x1024,
+ 0x1027, 0x1027, 0x1100, 0x1100, 0x1102, 0x1102, 0x1104, 0x1104,
+ 0x1110, 0x1110, 0x1112, 0x1116, 0x1124, 0x1124, 0x1300, 0x1300,
+ 0x1380, 0x1380,
+ /* GRAS CTX 0 */
+ 0x2000, 0x2004, 0x2008, 0x2067, 0x2070, 0x2078, 0x207B, 0x216E,
+ /* PC CTX 0 */
+ 0x21C0, 0x21C6, 0x21D0, 0x21D0, 0x21D9, 0x21D9, 0x21E5, 0x21E7,
+ /* VFD CTX 0 */
+ 0x2200, 0x2204, 0x2208, 0x22A9,
+ /* GRAS CTX 1 */
+ 0x2400, 0x2404, 0x2408, 0x2467, 0x2470, 0x2478, 0x247B, 0x256E,
+ /* PC CTX 1 */
+ 0x25C0, 0x25C6, 0x25D0, 0x25D0, 0x25D9, 0x25D9, 0x25E5, 0x25E7,
+ /* VFD CTX 1 */
+ 0x2600, 0x2604, 0x2608, 0x26A9,
+ /* XPU */
+ 0x2C00, 0x2C01, 0x2C10, 0x2C10, 0x2C12, 0x2C16, 0x2C1D, 0x2C20,
+ 0x2C28, 0x2C28, 0x2C30, 0x2C30, 0x2C32, 0x2C36, 0x2C40, 0x2C40,
+ 0x2C50, 0x2C50, 0x2C52, 0x2C56, 0x2C80, 0x2C80, 0x2C94, 0x2C95,
+ /* VBIF */
+ 0x3000, 0x3007, 0x300C, 0x3014, 0x3018, 0x301D, 0x3020, 0x3022,
+ 0x3024, 0x3026, 0x3028, 0x302A, 0x302C, 0x302D, 0x3030, 0x3031,
+ 0x3034, 0x3036, 0x3038, 0x3038, 0x303C, 0x303D, 0x3040, 0x3040,
+ 0x3049, 0x3049, 0x3058, 0x3058, 0x305B, 0x3061, 0x3064, 0x3068,
+ 0x306C, 0x306D, 0x3080, 0x3088, 0x308B, 0x308C, 0x3090, 0x3094,
+ 0x3098, 0x3098, 0x309C, 0x309C, 0x30C0, 0x30C0, 0x30C8, 0x30C8,
+ 0x30D0, 0x30D0, 0x30D8, 0x30D8, 0x30E0, 0x30E0, 0x3100, 0x3100,
+ 0x3108, 0x3108, 0x3110, 0x3110, 0x3118, 0x3118, 0x3120, 0x3120,
+ 0x3124, 0x3125, 0x3129, 0x3129, 0x3131, 0x3131, 0x330C, 0x330C,
+ 0x3310, 0x3310, 0x3400, 0x3401, 0x3410, 0x3410, 0x3412, 0x3416,
+ 0x341D, 0x3420, 0x3428, 0x3428, 0x3430, 0x3430, 0x3432, 0x3436,
+ 0x3440, 0x3440, 0x3450, 0x3450, 0x3452, 0x3456, 0x3480, 0x3480,
+ 0x3494, 0x3495, 0x4000, 0x4000, 0x4002, 0x4002, 0x4004, 0x4004,
+ 0x4008, 0x400A, 0x400C, 0x400D, 0x400F, 0x4012, 0x4014, 0x4016,
+ 0x401D, 0x401D, 0x4020, 0x4027, 0x4060, 0x4062, 0x4200, 0x4200,
+ 0x4300, 0x4300, 0x4400, 0x4400, 0x4500, 0x4500, 0x4800, 0x4802,
+ 0x480F, 0x480F, 0x4811, 0x4811, 0x4813, 0x4813, 0x4815, 0x4816,
+ 0x482B, 0x482B, 0x4857, 0x4857, 0x4883, 0x4883, 0x48AF, 0x48AF,
+ 0x48C5, 0x48C5, 0x48E5, 0x48E5, 0x4905, 0x4905, 0x4925, 0x4925,
+ 0x4945, 0x4945, 0x4950, 0x4950, 0x495B, 0x495B, 0x4980, 0x498E,
+ 0x4B00, 0x4B00, 0x4C00, 0x4C00, 0x4D00, 0x4D00, 0x4E00, 0x4E00,
+ 0x4E80, 0x4E80, 0x4F00, 0x4F00, 0x4F08, 0x4F08, 0x4F10, 0x4F10,
+ 0x4F18, 0x4F18, 0x4F20, 0x4F20, 0x4F30, 0x4F30, 0x4F60, 0x4F60,
+ 0x4F80, 0x4F81, 0x4F88, 0x4F89, 0x4FEE, 0x4FEE, 0x4FF3, 0x4FF3,
+ 0x6000, 0x6001, 0x6008, 0x600F, 0x6014, 0x6016, 0x6018, 0x601B,
+ 0x61FD, 0x61FD, 0x623C, 0x623C, 0x6380, 0x6380, 0x63A0, 0x63A0,
+ 0x63C0, 0x63C1, 0x63C8, 0x63C9, 0x63D0, 0x63D4, 0x63D6, 0x63D6,
+ 0x63EE, 0x63EE, 0x6400, 0x6401, 0x6408, 0x640F, 0x6414, 0x6416,
+ 0x6418, 0x641B, 0x65FD, 0x65FD, 0x663C, 0x663C, 0x6780, 0x6780,
+ 0x67A0, 0x67A0, 0x67C0, 0x67C1, 0x67C8, 0x67C9, 0x67D0, 0x67D4,
+ 0x67D6, 0x67D6, 0x67EE, 0x67EE, 0x6800, 0x6801, 0x6808, 0x680F,
+ 0x6814, 0x6816, 0x6818, 0x681B, 0x69FD, 0x69FD, 0x6A3C, 0x6A3C,
+ 0x6B80, 0x6B80, 0x6BA0, 0x6BA0, 0x6BC0, 0x6BC1, 0x6BC8, 0x6BC9,
+ 0x6BD0, 0x6BD4, 0x6BD6, 0x6BD6, 0x6BEE, 0x6BEE,
+ ~0 /* sentinel */
+};
+
+#ifdef CONFIG_DEBUG_FS
+static void a4xx_show(struct msm_gpu *gpu, struct seq_file *m)
+{
+ gpu->funcs->pm_resume(gpu);
+
+ seq_printf(m, "status: %08x\n",
+ gpu_read(gpu, REG_A4XX_RBBM_STATUS));
+ gpu->funcs->pm_suspend(gpu);
+
+ adreno_show(gpu, m);
+
+}
+#endif
+
+/* Register offset defines for A4XX, in order of enum adreno_regs */
+static const unsigned int a4xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_DEBUG, REG_A4XX_CP_DEBUG),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_WADDR, REG_A4XX_CP_ME_RAM_WADDR),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_DATA, REG_A4XX_CP_ME_RAM_DATA),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_PFP_UCODE_DATA,
+ REG_A4XX_CP_PFP_UCODE_DATA),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_PFP_UCODE_ADDR,
+ REG_A4XX_CP_PFP_UCODE_ADDR),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_WFI_PEND_CTR, REG_A4XX_CP_WFI_PEND_CTR),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_A4XX_CP_RB_BASE),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_A4XX_CP_RB_RPTR_ADDR),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_A4XX_CP_RB_RPTR),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_A4XX_CP_RB_WPTR),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_PROTECT_CTRL, REG_A4XX_CP_PROTECT_CTRL),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_CNTL, REG_A4XX_CP_ME_CNTL),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A4XX_CP_RB_CNTL),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_IB1_BASE, REG_A4XX_CP_IB1_BASE),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_IB1_BUFSZ, REG_A4XX_CP_IB1_BUFSZ),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_IB2_BASE, REG_A4XX_CP_IB2_BASE),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_IB2_BUFSZ, REG_A4XX_CP_IB2_BUFSZ),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_TIMESTAMP, REG_AXXX_CP_SCRATCH_REG0),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_RADDR, REG_A4XX_CP_ME_RAM_RADDR),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_ROQ_ADDR, REG_A4XX_CP_ROQ_ADDR),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_ROQ_DATA, REG_A4XX_CP_ROQ_DATA),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_ADDR, REG_A4XX_CP_MERCIU_ADDR),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_DATA, REG_A4XX_CP_MERCIU_DATA),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_DATA2, REG_A4XX_CP_MERCIU_DATA2),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_MEQ_ADDR, REG_A4XX_CP_MEQ_ADDR),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_MEQ_DATA, REG_A4XX_CP_MEQ_DATA),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_HW_FAULT, REG_A4XX_CP_HW_FAULT),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_PROTECT_STATUS,
+ REG_A4XX_CP_PROTECT_STATUS),
+ REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_ADDR, REG_A4XX_CP_SCRATCH_ADDR),
+ REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_UMSK, REG_A4XX_CP_SCRATCH_UMASK),
+ REG_ADRENO_DEFINE(REG_ADRENO_RBBM_STATUS, REG_A4XX_RBBM_STATUS),
+ REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_CTL,
+ REG_A4XX_RBBM_PERFCTR_CTL),
+ REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD0,
+ REG_A4XX_RBBM_PERFCTR_LOAD_CMD0),
+ REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD1,
+ REG_A4XX_RBBM_PERFCTR_LOAD_CMD1),
+ REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD2,
+ REG_A4XX_RBBM_PERFCTR_LOAD_CMD2),
+ REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_PWR_1_LO,
+ REG_A4XX_RBBM_PERFCTR_PWR_1_LO),
+ REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_0_MASK, REG_A4XX_RBBM_INT_0_MASK),
+ REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_0_STATUS,
+ REG_A4XX_RBBM_INT_0_STATUS),
+ REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_ERROR_STATUS,
+ REG_A4XX_RBBM_AHB_ERROR_STATUS),
+ REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_CMD, REG_A4XX_RBBM_AHB_CMD),
+ REG_ADRENO_DEFINE(REG_ADRENO_RBBM_CLOCK_CTL, REG_A4XX_RBBM_CLOCK_CTL),
+ REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_ME_SPLIT_STATUS,
+ REG_A4XX_RBBM_AHB_ME_SPLIT_STATUS),
+ REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_PFP_SPLIT_STATUS,
+ REG_A4XX_RBBM_AHB_PFP_SPLIT_STATUS),
+ REG_ADRENO_DEFINE(REG_ADRENO_VPC_DEBUG_RAM_SEL,
+ REG_A4XX_VPC_DEBUG_RAM_SEL),
+ REG_ADRENO_DEFINE(REG_ADRENO_VPC_DEBUG_RAM_READ,
+ REG_A4XX_VPC_DEBUG_RAM_READ),
+ REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_CLEAR_CMD,
+ REG_A4XX_RBBM_INT_CLEAR_CMD),
+ REG_ADRENO_DEFINE(REG_ADRENO_VSC_SIZE_ADDRESS,
+ REG_A4XX_VSC_SIZE_ADDRESS),
+ REG_ADRENO_DEFINE(REG_ADRENO_VFD_CONTROL_0, REG_A4XX_VFD_CONTROL_0),
+ REG_ADRENO_DEFINE(REG_ADRENO_SP_VS_PVT_MEM_ADDR_REG,
+ REG_A4XX_SP_VS_PVT_MEM_ADDR),
+ REG_ADRENO_DEFINE(REG_ADRENO_SP_FS_PVT_MEM_ADDR_REG,
+ REG_A4XX_SP_FS_PVT_MEM_ADDR),
+ REG_ADRENO_DEFINE(REG_ADRENO_SP_VS_OBJ_START_REG,
+ REG_A4XX_SP_VS_OBJ_START),
+ REG_ADRENO_DEFINE(REG_ADRENO_SP_FS_OBJ_START_REG,
+ REG_A4XX_SP_FS_OBJ_START),
+ REG_ADRENO_DEFINE(REG_ADRENO_RBBM_RBBM_CTL, REG_A4XX_RBBM_RBBM_CTL),
+ REG_ADRENO_DEFINE(REG_ADRENO_RBBM_SW_RESET_CMD,
+ REG_A4XX_RBBM_SW_RESET_CMD),
+ REG_ADRENO_DEFINE(REG_ADRENO_UCHE_INVALIDATE0,
+ REG_A4XX_UCHE_INVALIDATE0),
+ REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_LO,
+ REG_A4XX_RBBM_PERFCTR_LOAD_VALUE_LO),
+ REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_HI,
+ REG_A4XX_RBBM_PERFCTR_LOAD_VALUE_HI),
+};
+
+static void a4xx_dump(struct msm_gpu *gpu)
+{
+ adreno_dump(gpu);
+ printk("status: %08x\n",
+ gpu_read(gpu, REG_A4XX_RBBM_STATUS));
+ adreno_dump(gpu);
+}
+
+static const struct adreno_gpu_funcs funcs = {
+ .base = {
+ .get_param = adreno_get_param,
+ .hw_init = a4xx_hw_init,
+ .pm_suspend = msm_gpu_pm_suspend,
+ .pm_resume = msm_gpu_pm_resume,
+ .recover = a4xx_recover,
+ .last_fence = adreno_last_fence,
+ .submit = adreno_submit,
+ .flush = adreno_flush,
+ .idle = a4xx_idle,
+ .irq = a4xx_irq,
+ .destroy = a4xx_destroy,
+#ifdef CONFIG_DEBUG_FS
+ .show = a4xx_show,
+#endif
+ },
+};
+
+struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
+{
+ struct a4xx_gpu *a4xx_gpu = NULL;
+ struct adreno_gpu *adreno_gpu;
+ struct msm_gpu *gpu;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct platform_device *pdev = priv->gpu_pdev;
+ int ret;
+
+ if (!pdev) {
+ dev_err(dev->dev, "no a4xx device\n");
+ ret = -ENXIO;
+ goto fail;
+ }
+
+ a4xx_gpu = kzalloc(sizeof(*a4xx_gpu), GFP_KERNEL);
+ if (!a4xx_gpu) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ adreno_gpu = &a4xx_gpu->base;
+ gpu = &adreno_gpu->base;
+
+ a4xx_gpu->pdev = pdev;
+
+ gpu->perfcntrs = NULL;
+ gpu->num_perfcntrs = 0;
+
+ adreno_gpu->registers = a4xx_registers;
+ adreno_gpu->reg_offsets = a4xx_register_offsets;
+
+ ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs);
+ if (ret)
+ goto fail;
+
+ /* if needed, allocate gmem: */
+ if (adreno_is_a4xx(adreno_gpu)) {
+#ifdef CONFIG_MSM_OCMEM
+ /* TODO this is different/missing upstream: */
+ struct ocmem_buf *ocmem_hdl =
+ ocmem_allocate(OCMEM_GRAPHICS, adreno_gpu->gmem);
+
+ a4xx_gpu->ocmem_hdl = ocmem_hdl;
+ a4xx_gpu->ocmem_base = ocmem_hdl->addr;
+ adreno_gpu->gmem = ocmem_hdl->len;
+ DBG("using %dK of OCMEM at 0x%08x", adreno_gpu->gmem / 1024,
+ a4xx_gpu->ocmem_base);
+#endif
+ }
+
+ if (!gpu->mmu) {
+ /* TODO we think it is possible to configure the GPU to
+ * restrict access to VRAM carveout. But the required
+ * registers are unknown. For now just bail out and
+ * limp along with just modesetting. If it turns out
+ * to not be possible to restrict access, then we must
+ * implement a cmdstream validator.
+ */
+ dev_err(dev->dev, "No memory protection without IOMMU\n");
+ ret = -ENXIO;
+ goto fail;
+ }
+
+ return gpu;
+
+fail:
+ if (a4xx_gpu)
+ a4xx_destroy(&a4xx_gpu->base.base);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.h b/drivers/gpu/drm/msm/adreno/a4xx_gpu.h
new file mode 100644
index 00000000000..01247204ac9
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.h
@@ -0,0 +1,34 @@
+/* Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __A4XX_GPU_H__
+#define __A4XX_GPU_H__
+
+#include "adreno_gpu.h"
+
+/* arrg, somehow fb.h is getting pulled in: */
+#undef ROP_COPY
+#undef ROP_XOR
+
+#include "a4xx.xml.h"
+
+struct a4xx_gpu {
+ struct adreno_gpu base;
+ struct platform_device *pdev;
+
+ /* if OCMEM is used for GMEM: */
+ uint32_t ocmem_base;
+ void *ocmem_hdl;
+};
+#define to_a4xx_gpu(x) container_of(x, struct a4xx_gpu, base)
+
+#endif /* __A4XX_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
index cc341bc62b5..a4b33af9338 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
@@ -11,10 +11,10 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 9859 bytes, from 2014-06-02 15:21:30)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14960 bytes, from 2014-07-27 17:22:13)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 58020 bytes, from 2014-08-01 12:22:48)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 41068 bytes, from 2014-08-01 12:22:48)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2014-11-13 22:44:30)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 15053 bytes, from 2014-11-09 15:45:47)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 63169 bytes, from 2014-11-13 22:44:18)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 49097 bytes, from 2014-11-14 15:38:00)
Copyright (C) 2013-2014 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
@@ -105,6 +105,7 @@ enum adreno_rb_dither_mode {
enum adreno_rb_depth_format {
DEPTHX_16 = 0,
DEPTHX_24_8 = 1,
+ DEPTHX_32 = 2,
};
enum adreno_rb_copy_control_mode {
@@ -132,6 +133,7 @@ enum a3xx_threadmode {
};
enum a3xx_instrbuffermode {
+ CACHE = 0,
BUFFER = 1,
};
@@ -140,6 +142,13 @@ enum a3xx_threadsize {
FOUR_QUADS = 1,
};
+enum a3xx_color_swap {
+ WZYX = 0,
+ WXYZ = 1,
+ ZYXW = 2,
+ XYZW = 3,
+};
+
#define REG_AXXX_CP_RB_BASE 0x000001c0
#define REG_AXXX_CP_RB_CNTL 0x000001c1
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 7ab85af3a7d..be83dee83d0 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -2,6 +2,8 @@
* Copyright (C) 2013-2014 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
+ * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ *
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
@@ -28,6 +30,7 @@ MODULE_PARM_DESC(hang_debug, "Dump registers when hang is detected (can be slow!
module_param_named(hang_debug, hang_debug, bool, 0600);
struct msm_gpu *a3xx_gpu_init(struct drm_device *dev);
+struct msm_gpu *a4xx_gpu_init(struct drm_device *dev);
static const struct adreno_info gpulist[] = {
{
@@ -54,6 +57,14 @@ static const struct adreno_info gpulist[] = {
.pfpfw = "a330_pfp.fw",
.gmem = SZ_1M,
.init = a3xx_gpu_init,
+ }, {
+ .rev = ADRENO_REV(4, 2, 0, ANY_ID),
+ .revn = 420,
+ .name = "A420",
+ .pm4fw = "a420_pm4.fw",
+ .pfpfw = "a420_pfp.fw",
+ .gmem = (SZ_1M + SZ_512K),
+ .init = a4xx_gpu_init,
},
};
@@ -61,6 +72,8 @@ MODULE_FIRMWARE("a300_pm4.fw");
MODULE_FIRMWARE("a300_pfp.fw");
MODULE_FIRMWARE("a330_pm4.fw");
MODULE_FIRMWARE("a330_pfp.fw");
+MODULE_FIRMWARE("a420_pm4.fw");
+MODULE_FIRMWARE("a420_pfp.fw");
static inline bool _rev_match(uint8_t entry, uint8_t id)
{
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 6afa29167fe..aa873048308 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -2,6 +2,8 @@
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
+ * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ *
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
@@ -63,19 +65,21 @@ int adreno_hw_init(struct msm_gpu *gpu)
}
/* Setup REG_CP_RB_CNTL: */
- gpu_write(gpu, REG_AXXX_CP_RB_CNTL,
+ adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_CNTL,
/* size is log2(quad-words): */
AXXX_CP_RB_CNTL_BUFSZ(ilog2(gpu->rb->size / 8)) |
AXXX_CP_RB_CNTL_BLKSZ(ilog2(RB_BLKSIZE / 8)));
/* Setup ringbuffer address: */
- gpu_write(gpu, REG_AXXX_CP_RB_BASE, gpu->rb_iova);
- gpu_write(gpu, REG_AXXX_CP_RB_RPTR_ADDR, rbmemptr(adreno_gpu, rptr));
+ adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_BASE, gpu->rb_iova);
+ adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_RPTR_ADDR,
+ rbmemptr(adreno_gpu, rptr));
/* Setup scratch/timestamp: */
- gpu_write(gpu, REG_AXXX_SCRATCH_ADDR, rbmemptr(adreno_gpu, fence));
+ adreno_gpu_write(adreno_gpu, REG_ADRENO_SCRATCH_ADDR,
+ rbmemptr(adreno_gpu, fence));
- gpu_write(gpu, REG_AXXX_SCRATCH_UMSK, 0x1);
+ adreno_gpu_write(adreno_gpu, REG_ADRENO_SCRATCH_UMSK, 0x1);
return 0;
}
@@ -151,7 +155,7 @@ int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1);
OUT_RING(ring, submit->fence);
- if (adreno_is_a3xx(adreno_gpu)) {
+ if (adreno_is_a3xx(adreno_gpu) || adreno_is_a4xx(adreno_gpu)) {
/* Flush HLSQ lazy updates to make sure there is nothing
* pending for indirect loads after the timestamp has
* passed:
@@ -188,12 +192,13 @@ int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
void adreno_flush(struct msm_gpu *gpu)
{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
uint32_t wptr = get_wptr(gpu->rb);
/* ensure writes to ringbuffer have hit system memory: */
mb();
- gpu_write(gpu, REG_AXXX_CP_RB_WPTR, wptr);
+ adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_WPTR, wptr);
}
void adreno_idle(struct msm_gpu *gpu)
@@ -319,6 +324,12 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
DBG("fast_rate=%u, slow_rate=%u, bus_freq=%u",
gpu->fast_rate, gpu->slow_rate, gpu->bus_freq);
+ ret = msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base,
+ adreno_gpu->info->name, "kgsl_3d0_reg_memory", "kgsl_3d0_irq",
+ RB_SIZE);
+ if (ret)
+ return ret;
+
ret = request_firmware(&adreno_gpu->pm4, adreno_gpu->info->pm4fw, drm->dev);
if (ret) {
dev_err(drm->dev, "failed to load %s PM4 firmware: %d\n",
@@ -333,12 +344,6 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
return ret;
}
- ret = msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base,
- adreno_gpu->info->name, "kgsl_3d0_reg_memory", "kgsl_3d0_irq",
- RB_SIZE);
- if (ret)
- return ret;
-
mmu = gpu->mmu;
if (mmu) {
ret = mmu->funcs->attach(mmu, iommu_ports,
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index 52f05157975..a0cc30977e6 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -2,6 +2,8 @@
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
+ * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ *
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
@@ -25,6 +27,81 @@
#include "adreno_common.xml.h"
#include "adreno_pm4.xml.h"
+#define REG_ADRENO_DEFINE(_offset, _reg) [_offset] = (_reg) + 1
+/**
+ * adreno_regs: List of registers that are used in across all
+ * 3D devices. Each device type has different offset value for the same
+ * register, so an array of register offsets are declared for every device
+ * and are indexed by the enumeration values defined in this enum
+ */
+enum adreno_regs {
+ REG_ADRENO_CP_DEBUG,
+ REG_ADRENO_CP_ME_RAM_WADDR,
+ REG_ADRENO_CP_ME_RAM_DATA,
+ REG_ADRENO_CP_PFP_UCODE_DATA,
+ REG_ADRENO_CP_PFP_UCODE_ADDR,
+ REG_ADRENO_CP_WFI_PEND_CTR,
+ REG_ADRENO_CP_RB_BASE,
+ REG_ADRENO_CP_RB_RPTR_ADDR,
+ REG_ADRENO_CP_RB_RPTR,
+ REG_ADRENO_CP_RB_WPTR,
+ REG_ADRENO_CP_PROTECT_CTRL,
+ REG_ADRENO_CP_ME_CNTL,
+ REG_ADRENO_CP_RB_CNTL,
+ REG_ADRENO_CP_IB1_BASE,
+ REG_ADRENO_CP_IB1_BUFSZ,
+ REG_ADRENO_CP_IB2_BASE,
+ REG_ADRENO_CP_IB2_BUFSZ,
+ REG_ADRENO_CP_TIMESTAMP,
+ REG_ADRENO_CP_ME_RAM_RADDR,
+ REG_ADRENO_CP_ROQ_ADDR,
+ REG_ADRENO_CP_ROQ_DATA,
+ REG_ADRENO_CP_MERCIU_ADDR,
+ REG_ADRENO_CP_MERCIU_DATA,
+ REG_ADRENO_CP_MERCIU_DATA2,
+ REG_ADRENO_CP_MEQ_ADDR,
+ REG_ADRENO_CP_MEQ_DATA,
+ REG_ADRENO_CP_HW_FAULT,
+ REG_ADRENO_CP_PROTECT_STATUS,
+ REG_ADRENO_SCRATCH_ADDR,
+ REG_ADRENO_SCRATCH_UMSK,
+ REG_ADRENO_SCRATCH_REG2,
+ REG_ADRENO_RBBM_STATUS,
+ REG_ADRENO_RBBM_PERFCTR_CTL,
+ REG_ADRENO_RBBM_PERFCTR_LOAD_CMD0,
+ REG_ADRENO_RBBM_PERFCTR_LOAD_CMD1,
+ REG_ADRENO_RBBM_PERFCTR_LOAD_CMD2,
+ REG_ADRENO_RBBM_PERFCTR_PWR_1_LO,
+ REG_ADRENO_RBBM_INT_0_MASK,
+ REG_ADRENO_RBBM_INT_0_STATUS,
+ REG_ADRENO_RBBM_AHB_ERROR_STATUS,
+ REG_ADRENO_RBBM_PM_OVERRIDE2,
+ REG_ADRENO_RBBM_AHB_CMD,
+ REG_ADRENO_RBBM_INT_CLEAR_CMD,
+ REG_ADRENO_RBBM_SW_RESET_CMD,
+ REG_ADRENO_RBBM_CLOCK_CTL,
+ REG_ADRENO_RBBM_AHB_ME_SPLIT_STATUS,
+ REG_ADRENO_RBBM_AHB_PFP_SPLIT_STATUS,
+ REG_ADRENO_VPC_DEBUG_RAM_SEL,
+ REG_ADRENO_VPC_DEBUG_RAM_READ,
+ REG_ADRENO_VSC_SIZE_ADDRESS,
+ REG_ADRENO_VFD_CONTROL_0,
+ REG_ADRENO_VFD_INDEX_MAX,
+ REG_ADRENO_SP_VS_PVT_MEM_ADDR_REG,
+ REG_ADRENO_SP_FS_PVT_MEM_ADDR_REG,
+ REG_ADRENO_SP_VS_OBJ_START_REG,
+ REG_ADRENO_SP_FS_OBJ_START_REG,
+ REG_ADRENO_PA_SC_AA_CONFIG,
+ REG_ADRENO_SQ_GPR_MANAGEMENT,
+ REG_ADRENO_SQ_INST_STORE_MANAGMENT,
+ REG_ADRENO_TP0_CHICKEN,
+ REG_ADRENO_RBBM_RBBM_CTL,
+ REG_ADRENO_UCHE_INVALIDATE0,
+ REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_LO,
+ REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_HI,
+ REG_ADRENO_REGISTER_MAX,
+};
+
struct adreno_rev {
uint8_t core;
uint8_t major;
@@ -76,6 +153,13 @@ struct adreno_gpu {
struct adreno_rbmemptrs *memptrs;
struct drm_gem_object *memptrs_bo;
uint32_t memptrs_iova;
+
+ /*
+ * Register offsets are different between some GPUs.
+ * GPU specific offsets will be exported by GPU specific
+ * code (a3xx_gpu.c) and stored in this common location.
+ */
+ const unsigned int *reg_offsets;
};
#define to_adreno_gpu(x) container_of(x, struct adreno_gpu, base)
@@ -128,6 +212,16 @@ static inline bool adreno_is_a330v2(struct adreno_gpu *gpu)
return adreno_is_a330(gpu) && (gpu->rev.patchid > 0);
}
+static inline bool adreno_is_a4xx(struct adreno_gpu *gpu)
+{
+ return (gpu->revn >= 400) && (gpu->revn < 500);
+}
+
+static inline int adreno_is_a420(struct adreno_gpu *gpu)
+{
+ return gpu->revn == 420;
+}
+
int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value);
int adreno_hw_init(struct msm_gpu *gpu);
uint32_t adreno_last_fence(struct msm_gpu *gpu);
@@ -171,5 +265,37 @@ OUT_PKT3(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt)
OUT_RING(ring, CP_TYPE3_PKT | ((cnt-1) << 16) | ((opcode & 0xFF) << 8));
}
+/*
+ * adreno_checkreg_off() - Checks the validity of a register enum
+ * @gpu: Pointer to struct adreno_gpu
+ * @offset_name: The register enum that is checked
+ */
+static inline bool adreno_reg_check(struct adreno_gpu *gpu,
+ enum adreno_regs offset_name)
+{
+ if (offset_name >= REG_ADRENO_REGISTER_MAX ||
+ !gpu->reg_offsets[offset_name]) {
+ BUG();
+ }
+ return true;
+}
+
+static inline u32 adreno_gpu_read(struct adreno_gpu *gpu,
+ enum adreno_regs offset_name)
+{
+ u32 reg = gpu->reg_offsets[offset_name];
+ u32 val = 0;
+ if(adreno_reg_check(gpu,offset_name))
+ val = gpu_read(&gpu->base, reg - 1);
+ return val;
+}
+
+static inline void adreno_gpu_write(struct adreno_gpu *gpu,
+ enum adreno_regs offset_name, u32 data)
+{
+ u32 reg = gpu->reg_offsets[offset_name];
+ if(adreno_reg_check(gpu, offset_name))
+ gpu_write(&gpu->base, reg - 1, data);
+}
#endif /* __ADRENO_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
index 6ef43f66c30..6a75cee94d8 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
@@ -11,10 +11,10 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 9859 bytes, from 2014-06-02 15:21:30)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14960 bytes, from 2014-07-27 17:22:13)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 58020 bytes, from 2014-08-01 12:22:48)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 41068 bytes, from 2014-08-01 12:22:48)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2014-11-13 22:44:30)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 15053 bytes, from 2014-11-09 15:45:47)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 63169 bytes, from 2014-11-13 22:44:18)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 49097 bytes, from 2014-11-14 15:38:00)
Copyright (C) 2013-2014 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
@@ -157,6 +157,7 @@ enum adreno_pm4_type3_packets {
CP_IM_STORE = 44,
CP_SET_DRAW_INIT_FLAGS = 75,
CP_SET_PROTECTED_MODE = 95,
+ CP_BOOTSTRAP_UCODE = 111,
CP_LOAD_STATE = 48,
CP_COND_INDIRECT_BUFFER_PFE = 58,
CP_COND_INDIRECT_BUFFER_PFD = 50,
@@ -278,11 +279,11 @@ static inline uint32_t CP_DRAW_INDX_1_INDEX_SIZE(enum pc_di_index_size val)
#define CP_DRAW_INDX_1_NOT_EOP 0x00001000
#define CP_DRAW_INDX_1_SMALL_INDEX 0x00002000
#define CP_DRAW_INDX_1_PRE_DRAW_INITIATOR_ENABLE 0x00004000
-#define CP_DRAW_INDX_1_NUM_INDICES__MASK 0xffff0000
-#define CP_DRAW_INDX_1_NUM_INDICES__SHIFT 16
-static inline uint32_t CP_DRAW_INDX_1_NUM_INDICES(uint32_t val)
+#define CP_DRAW_INDX_1_NUM_INSTANCES__MASK 0xff000000
+#define CP_DRAW_INDX_1_NUM_INSTANCES__SHIFT 24
+static inline uint32_t CP_DRAW_INDX_1_NUM_INSTANCES(uint32_t val)
{
- return ((val) << CP_DRAW_INDX_1_NUM_INDICES__SHIFT) & CP_DRAW_INDX_1_NUM_INDICES__MASK;
+ return ((val) << CP_DRAW_INDX_1_NUM_INSTANCES__SHIFT) & CP_DRAW_INDX_1_NUM_INSTANCES__MASK;
}
#define REG_CP_DRAW_INDX_2 0x00000002
@@ -293,20 +294,20 @@ static inline uint32_t CP_DRAW_INDX_2_NUM_INDICES(uint32_t val)
return ((val) << CP_DRAW_INDX_2_NUM_INDICES__SHIFT) & CP_DRAW_INDX_2_NUM_INDICES__MASK;
}
-#define REG_CP_DRAW_INDX_2 0x00000002
-#define CP_DRAW_INDX_2_INDX_BASE__MASK 0xffffffff
-#define CP_DRAW_INDX_2_INDX_BASE__SHIFT 0
-static inline uint32_t CP_DRAW_INDX_2_INDX_BASE(uint32_t val)
+#define REG_CP_DRAW_INDX_3 0x00000003
+#define CP_DRAW_INDX_3_INDX_BASE__MASK 0xffffffff
+#define CP_DRAW_INDX_3_INDX_BASE__SHIFT 0
+static inline uint32_t CP_DRAW_INDX_3_INDX_BASE(uint32_t val)
{
- return ((val) << CP_DRAW_INDX_2_INDX_BASE__SHIFT) & CP_DRAW_INDX_2_INDX_BASE__MASK;
+ return ((val) << CP_DRAW_INDX_3_INDX_BASE__SHIFT) & CP_DRAW_INDX_3_INDX_BASE__MASK;
}
-#define REG_CP_DRAW_INDX_2 0x00000002
-#define CP_DRAW_INDX_2_INDX_SIZE__MASK 0xffffffff
-#define CP_DRAW_INDX_2_INDX_SIZE__SHIFT 0
-static inline uint32_t CP_DRAW_INDX_2_INDX_SIZE(uint32_t val)
+#define REG_CP_DRAW_INDX_4 0x00000004
+#define CP_DRAW_INDX_4_INDX_SIZE__MASK 0xffffffff
+#define CP_DRAW_INDX_4_INDX_SIZE__SHIFT 0
+static inline uint32_t CP_DRAW_INDX_4_INDX_SIZE(uint32_t val)
{
- return ((val) << CP_DRAW_INDX_2_INDX_SIZE__SHIFT) & CP_DRAW_INDX_2_INDX_SIZE__MASK;
+ return ((val) << CP_DRAW_INDX_4_INDX_SIZE__SHIFT) & CP_DRAW_INDX_4_INDX_SIZE__MASK;
}
#define REG_CP_DRAW_INDX_2_0 0x00000000
@@ -345,11 +346,11 @@ static inline uint32_t CP_DRAW_INDX_2_1_INDEX_SIZE(enum pc_di_index_size val)
#define CP_DRAW_INDX_2_1_NOT_EOP 0x00001000
#define CP_DRAW_INDX_2_1_SMALL_INDEX 0x00002000
#define CP_DRAW_INDX_2_1_PRE_DRAW_INITIATOR_ENABLE 0x00004000
-#define CP_DRAW_INDX_2_1_NUM_INDICES__MASK 0xffff0000
-#define CP_DRAW_INDX_2_1_NUM_INDICES__SHIFT 16
-static inline uint32_t CP_DRAW_INDX_2_1_NUM_INDICES(uint32_t val)
+#define CP_DRAW_INDX_2_1_NUM_INSTANCES__MASK 0xff000000
+#define CP_DRAW_INDX_2_1_NUM_INSTANCES__SHIFT 24
+static inline uint32_t CP_DRAW_INDX_2_1_NUM_INSTANCES(uint32_t val)
{
- return ((val) << CP_DRAW_INDX_2_1_NUM_INDICES__SHIFT) & CP_DRAW_INDX_2_1_NUM_INDICES__MASK;
+ return ((val) << CP_DRAW_INDX_2_1_NUM_INSTANCES__SHIFT) & CP_DRAW_INDX_2_1_NUM_INSTANCES__MASK;
}
#define REG_CP_DRAW_INDX_2_2 0x00000002
@@ -388,11 +389,11 @@ static inline uint32_t CP_DRAW_INDX_OFFSET_0_INDEX_SIZE(enum pc_di_index_size va
#define CP_DRAW_INDX_OFFSET_0_NOT_EOP 0x00001000
#define CP_DRAW_INDX_OFFSET_0_SMALL_INDEX 0x00002000
#define CP_DRAW_INDX_OFFSET_0_PRE_DRAW_INITIATOR_ENABLE 0x00004000
-#define CP_DRAW_INDX_OFFSET_0_NUM_INDICES__MASK 0xffff0000
-#define CP_DRAW_INDX_OFFSET_0_NUM_INDICES__SHIFT 16
-static inline uint32_t CP_DRAW_INDX_OFFSET_0_NUM_INDICES(uint32_t val)
+#define CP_DRAW_INDX_OFFSET_0_NUM_INSTANCES__MASK 0xffff0000
+#define CP_DRAW_INDX_OFFSET_0_NUM_INSTANCES__SHIFT 16
+static inline uint32_t CP_DRAW_INDX_OFFSET_0_NUM_INSTANCES(uint32_t val)
{
- return ((val) << CP_DRAW_INDX_OFFSET_0_NUM_INDICES__SHIFT) & CP_DRAW_INDX_OFFSET_0_NUM_INDICES__MASK;
+ return ((val) << CP_DRAW_INDX_OFFSET_0_NUM_INSTANCES__SHIFT) & CP_DRAW_INDX_OFFSET_0_NUM_INSTANCES__MASK;
}
#define REG_CP_DRAW_INDX_OFFSET_1 0x00000001
@@ -405,20 +406,22 @@ static inline uint32_t CP_DRAW_INDX_OFFSET_2_NUM_INDICES(uint32_t val)
return ((val) << CP_DRAW_INDX_OFFSET_2_NUM_INDICES__SHIFT) & CP_DRAW_INDX_OFFSET_2_NUM_INDICES__MASK;
}
-#define REG_CP_DRAW_INDX_OFFSET_2 0x00000002
-#define CP_DRAW_INDX_OFFSET_2_INDX_BASE__MASK 0xffffffff
-#define CP_DRAW_INDX_OFFSET_2_INDX_BASE__SHIFT 0
-static inline uint32_t CP_DRAW_INDX_OFFSET_2_INDX_BASE(uint32_t val)
+#define REG_CP_DRAW_INDX_OFFSET_3 0x00000003
+
+#define REG_CP_DRAW_INDX_OFFSET_4 0x00000004
+#define CP_DRAW_INDX_OFFSET_4_INDX_BASE__MASK 0xffffffff
+#define CP_DRAW_INDX_OFFSET_4_INDX_BASE__SHIFT 0
+static inline uint32_t CP_DRAW_INDX_OFFSET_4_INDX_BASE(uint32_t val)
{
- return ((val) << CP_DRAW_INDX_OFFSET_2_INDX_BASE__SHIFT) & CP_DRAW_INDX_OFFSET_2_INDX_BASE__MASK;
+ return ((val) << CP_DRAW_INDX_OFFSET_4_INDX_BASE__SHIFT) & CP_DRAW_INDX_OFFSET_4_INDX_BASE__MASK;
}
-#define REG_CP_DRAW_INDX_OFFSET_2 0x00000002
-#define CP_DRAW_INDX_OFFSET_2_INDX_SIZE__MASK 0xffffffff
-#define CP_DRAW_INDX_OFFSET_2_INDX_SIZE__SHIFT 0
-static inline uint32_t CP_DRAW_INDX_OFFSET_2_INDX_SIZE(uint32_t val)
+#define REG_CP_DRAW_INDX_OFFSET_5 0x00000005
+#define CP_DRAW_INDX_OFFSET_5_INDX_SIZE__MASK 0xffffffff
+#define CP_DRAW_INDX_OFFSET_5_INDX_SIZE__SHIFT 0
+static inline uint32_t CP_DRAW_INDX_OFFSET_5_INDX_SIZE(uint32_t val)
{
- return ((val) << CP_DRAW_INDX_OFFSET_2_INDX_SIZE__SHIFT) & CP_DRAW_INDX_OFFSET_2_INDX_SIZE__MASK;
+ return ((val) << CP_DRAW_INDX_OFFSET_5_INDX_SIZE__SHIFT) & CP_DRAW_INDX_OFFSET_5_INDX_SIZE__MASK;
}
#define REG_CP_SET_DRAW_STATE_0 0x00000000
diff --git a/drivers/gpu/drm/msm/dsi/dsi.xml.h b/drivers/gpu/drm/msm/dsi/dsi.xml.h
index e965898dfda..448438b759b 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.xml.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.xml.h
@@ -10,12 +10,12 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20457 bytes, from 2014-08-01 12:22:48)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2014-07-17 15:34:33)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-07-17 15:34:33)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20136 bytes, from 2014-10-31 16:51:39)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1940 bytes, from 2014-10-31 16:51:39)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 23963 bytes, from 2014-10-31 16:51:46)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-08-01 12:23:53)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-07-17 15:33:30)
diff --git a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
index f2bdda95720..c102a7f074a 100644
--- a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
+++ b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
@@ -10,12 +10,12 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20457 bytes, from 2014-08-01 12:22:48)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2014-07-17 15:34:33)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-07-17 15:34:33)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20136 bytes, from 2014-10-31 16:51:39)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1940 bytes, from 2014-10-31 16:51:39)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 23963 bytes, from 2014-10-31 16:51:46)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-08-01 12:23:53)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-07-17 15:33:30)
diff --git a/drivers/gpu/drm/msm/dsi/sfpb.xml.h b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
index e5b071ffd86..a900134bdf3 100644
--- a/drivers/gpu/drm/msm/dsi/sfpb.xml.h
+++ b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
@@ -10,12 +10,12 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20457 bytes, from 2014-08-01 12:22:48)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2014-07-17 15:34:33)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-07-17 15:34:33)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20136 bytes, from 2014-10-31 16:51:39)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1940 bytes, from 2014-10-31 16:51:39)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 23963 bytes, from 2014-10-31 16:51:46)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-08-01 12:23:53)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-07-17 15:33:30)
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index 9d00dcba695..062c6872537 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -15,6 +15,7 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/of_irq.h>
#include "hdmi.h"
void hdmi_set_mode(struct hdmi *hdmi, bool power_on)
@@ -39,7 +40,7 @@ void hdmi_set_mode(struct hdmi *hdmi, bool power_on)
power_on ? "Enable" : "Disable", ctrl);
}
-irqreturn_t hdmi_irq(int irq, void *dev_id)
+static irqreturn_t hdmi_irq(int irq, void *dev_id)
{
struct hdmi *hdmi = dev_id;
@@ -54,9 +55,8 @@ irqreturn_t hdmi_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
-void hdmi_destroy(struct kref *kref)
+static void hdmi_destroy(struct hdmi *hdmi)
{
- struct hdmi *hdmi = container_of(kref, struct hdmi, refcount);
struct hdmi_phy *phy = hdmi->phy;
if (phy)
@@ -68,37 +68,24 @@ void hdmi_destroy(struct kref *kref)
platform_set_drvdata(hdmi->pdev, NULL);
}
-/* initialize connector */
-struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
+/* construct hdmi at bind/probe time, grab all the resources. If
+ * we are to EPROBE_DEFER we want to do it here, rather than later
+ * at modeset_init() time
+ */
+static struct hdmi *hdmi_init(struct platform_device *pdev)
{
+ struct hdmi_platform_config *config = pdev->dev.platform_data;
struct hdmi *hdmi = NULL;
- struct msm_drm_private *priv = dev->dev_private;
- struct platform_device *pdev = priv->hdmi_pdev;
- struct hdmi_platform_config *config;
int i, ret;
- if (!pdev) {
- dev_err(dev->dev, "no hdmi device\n");
- ret = -ENXIO;
- goto fail;
- }
-
- config = pdev->dev.platform_data;
-
- hdmi = kzalloc(sizeof(*hdmi), GFP_KERNEL);
+ hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL);
if (!hdmi) {
ret = -ENOMEM;
goto fail;
}
- kref_init(&hdmi->refcount);
-
- hdmi->dev = dev;
hdmi->pdev = pdev;
hdmi->config = config;
- hdmi->encoder = encoder;
-
- hdmi_audio_infoframe_init(&hdmi->audio.infoframe);
/* not sure about which phy maps to which msm.. probably I miss some */
if (config->phy_init)
@@ -108,7 +95,7 @@ struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
if (IS_ERR(hdmi->phy)) {
ret = PTR_ERR(hdmi->phy);
- dev_err(dev->dev, "failed to load phy: %d\n", ret);
+ dev_err(&pdev->dev, "failed to load phy: %d\n", ret);
hdmi->phy = NULL;
goto fail;
}
@@ -127,7 +114,7 @@ struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
config->hpd_reg_names[i]);
if (IS_ERR(reg)) {
ret = PTR_ERR(reg);
- dev_err(dev->dev, "failed to get hpd regulator: %s (%d)\n",
+ dev_err(&pdev->dev, "failed to get hpd regulator: %s (%d)\n",
config->hpd_reg_names[i], ret);
goto fail;
}
@@ -143,7 +130,7 @@ struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
config->pwr_reg_names[i]);
if (IS_ERR(reg)) {
ret = PTR_ERR(reg);
- dev_err(dev->dev, "failed to get pwr regulator: %s (%d)\n",
+ dev_err(&pdev->dev, "failed to get pwr regulator: %s (%d)\n",
config->pwr_reg_names[i], ret);
goto fail;
}
@@ -158,7 +145,7 @@ struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
clk = devm_clk_get(&pdev->dev, config->hpd_clk_names[i]);
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
- dev_err(dev->dev, "failed to get hpd clk: %s (%d)\n",
+ dev_err(&pdev->dev, "failed to get hpd clk: %s (%d)\n",
config->hpd_clk_names[i], ret);
goto fail;
}
@@ -173,7 +160,7 @@ struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
clk = devm_clk_get(&pdev->dev, config->pwr_clk_names[i]);
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
- dev_err(dev->dev, "failed to get pwr clk: %s (%d)\n",
+ dev_err(&pdev->dev, "failed to get pwr clk: %s (%d)\n",
config->pwr_clk_names[i], ret);
goto fail;
}
@@ -184,11 +171,40 @@ struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
hdmi->i2c = hdmi_i2c_init(hdmi);
if (IS_ERR(hdmi->i2c)) {
ret = PTR_ERR(hdmi->i2c);
- dev_err(dev->dev, "failed to get i2c: %d\n", ret);
+ dev_err(&pdev->dev, "failed to get i2c: %d\n", ret);
hdmi->i2c = NULL;
goto fail;
}
+ return hdmi;
+
+fail:
+ if (hdmi)
+ hdmi_destroy(hdmi);
+
+ return ERR_PTR(ret);
+}
+
+/* Second part of initialization, the drm/kms level modeset_init,
+ * constructs/initializes mode objects, etc, is called from master
+ * driver (not hdmi sub-device's probe/bind!)
+ *
+ * Any resource (regulator/clk/etc) which could be missing at boot
+ * should be handled in hdmi_init() so that failure happens from
+ * hdmi sub-device's probe.
+ */
+int hdmi_modeset_init(struct hdmi *hdmi,
+ struct drm_device *dev, struct drm_encoder *encoder)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct platform_device *pdev = hdmi->pdev;
+ int ret;
+
+ hdmi->dev = dev;
+ hdmi->encoder = encoder;
+
+ hdmi_audio_infoframe_init(&hdmi->audio.infoframe);
+
hdmi->bridge = hdmi_bridge_init(hdmi);
if (IS_ERR(hdmi->bridge)) {
ret = PTR_ERR(hdmi->bridge);
@@ -205,22 +221,20 @@ struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
goto fail;
}
- if (!config->shared_irq) {
- hdmi->irq = platform_get_irq(pdev, 0);
- if (hdmi->irq < 0) {
- ret = hdmi->irq;
- dev_err(dev->dev, "failed to get irq: %d\n", ret);
- goto fail;
- }
+ hdmi->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+ if (hdmi->irq < 0) {
+ ret = hdmi->irq;
+ dev_err(dev->dev, "failed to get irq: %d\n", ret);
+ goto fail;
+ }
- ret = devm_request_threaded_irq(&pdev->dev, hdmi->irq,
- NULL, hdmi_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
- "hdmi_isr", hdmi);
- if (ret < 0) {
- dev_err(dev->dev, "failed to request IRQ%u: %d\n",
- hdmi->irq, ret);
- goto fail;
- }
+ ret = devm_request_irq(&pdev->dev, hdmi->irq,
+ hdmi_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ "hdmi_isr", hdmi);
+ if (ret < 0) {
+ dev_err(dev->dev, "failed to request IRQ%u: %d\n",
+ hdmi->irq, ret);
+ goto fail;
}
encoder->bridge = hdmi->bridge;
@@ -230,19 +244,20 @@ struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
platform_set_drvdata(pdev, hdmi);
- return hdmi;
+ return 0;
fail:
- if (hdmi) {
- /* bridge/connector are normally destroyed by drm: */
- if (hdmi->bridge)
- hdmi->bridge->funcs->destroy(hdmi->bridge);
- if (hdmi->connector)
- hdmi->connector->funcs->destroy(hdmi->connector);
- hdmi_destroy(&hdmi->refcount);
+ /* bridge/connector are normally destroyed by drm: */
+ if (hdmi->bridge) {
+ hdmi->bridge->funcs->destroy(hdmi->bridge);
+ hdmi->bridge = NULL;
+ }
+ if (hdmi->connector) {
+ hdmi->connector->funcs->destroy(hdmi->connector);
+ hdmi->connector = NULL;
}
- return ERR_PTR(ret);
+ return ret;
}
/*
@@ -251,13 +266,6 @@ fail:
#include <linux/of_gpio.h>
-static void set_hdmi_pdev(struct drm_device *dev,
- struct platform_device *pdev)
-{
- struct msm_drm_private *priv = dev->dev_private;
- priv->hdmi_pdev = pdev;
-}
-
#ifdef CONFIG_OF
static int get_gpio(struct device *dev, struct device_node *of_node, const char *name)
{
@@ -278,7 +286,10 @@ static int get_gpio(struct device *dev, struct device_node *of_node, const char
static int hdmi_bind(struct device *dev, struct device *master, void *data)
{
+ struct drm_device *drm = dev_get_drvdata(master);
+ struct msm_drm_private *priv = drm->dev_private;
static struct hdmi_platform_config config = {};
+ struct hdmi *hdmi;
#ifdef CONFIG_OF
struct device_node *of_node = dev->of_node;
@@ -298,7 +309,6 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names);
config.pwr_clk_names = pwr_clk_names;
config.pwr_clk_cnt = ARRAY_SIZE(pwr_clk_names);
- config.shared_irq = true;
} else if (of_device_is_compatible(of_node, "qcom,hdmi-tx-8960")) {
static const char *hpd_clk_names[] = {"core_clk", "master_iface_clk", "slave_iface_clk"};
static const char *hpd_reg_names[] = {"core-vdda", "hdmi-mux"};
@@ -369,14 +379,22 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
}
#endif
dev->platform_data = &config;
- set_hdmi_pdev(dev_get_drvdata(master), to_platform_device(dev));
+ hdmi = hdmi_init(to_platform_device(dev));
+ if (IS_ERR(hdmi))
+ return PTR_ERR(hdmi);
+ priv->hdmi = hdmi;
return 0;
}
static void hdmi_unbind(struct device *dev, struct device *master,
void *data)
{
- set_hdmi_pdev(dev_get_drvdata(master), NULL);
+ struct drm_device *drm = dev_get_drvdata(master);
+ struct msm_drm_private *priv = drm->dev_private;
+ if (priv->hdmi) {
+ hdmi_destroy(priv->hdmi);
+ priv->hdmi = NULL;
+ }
}
static const struct component_ops hdmi_ops = {
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
index b981995410b..43e654f751b 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
@@ -38,8 +38,6 @@ struct hdmi_audio {
};
struct hdmi {
- struct kref refcount;
-
struct drm_device *dev;
struct platform_device *pdev;
@@ -97,13 +95,9 @@ struct hdmi_platform_config {
/* gpio's: */
int ddc_clk_gpio, ddc_data_gpio, hpd_gpio, mux_en_gpio, mux_sel_gpio;
int mux_lpm_gpio;
-
- /* older devices had their own irq, mdp5+ it is shared w/ mdp: */
- bool shared_irq;
};
void hdmi_set_mode(struct hdmi *hdmi, bool power_on);
-void hdmi_destroy(struct kref *kref);
static inline void hdmi_write(struct hdmi *hdmi, u32 reg, u32 data)
{
@@ -115,17 +109,6 @@ static inline u32 hdmi_read(struct hdmi *hdmi, u32 reg)
return msm_readl(hdmi->mmio + reg);
}
-static inline struct hdmi * hdmi_reference(struct hdmi *hdmi)
-{
- kref_get(&hdmi->refcount);
- return hdmi;
-}
-
-static inline void hdmi_unreference(struct hdmi *hdmi)
-{
- kref_put(&hdmi->refcount, hdmi_destroy);
-}
-
/*
* The phy appears to be different, for example between 8960 and 8x60,
* so split the phy related functions out and load the correct one at
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
index 76fd0cfc655..5b0844befba 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
@@ -10,12 +10,12 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20457 bytes, from 2014-08-01 12:22:48)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2014-07-17 15:34:33)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-07-17 15:34:33)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20136 bytes, from 2014-10-31 16:51:39)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1940 bytes, from 2014-10-31 16:51:39)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 23963 bytes, from 2014-10-31 16:51:46)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-08-01 12:23:53)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-07-17 15:33:30)
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
index f6cf745c249..6902ad6da71 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
@@ -26,7 +26,6 @@ struct hdmi_bridge {
static void hdmi_bridge_destroy(struct drm_bridge *bridge)
{
struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
- hdmi_unreference(hdmi_bridge->hdmi);
drm_bridge_cleanup(bridge);
kfree(hdmi_bridge);
}
@@ -218,7 +217,7 @@ struct drm_bridge *hdmi_bridge_init(struct hdmi *hdmi)
goto fail;
}
- hdmi_bridge->hdmi = hdmi_reference(hdmi);
+ hdmi_bridge->hdmi = hdmi;
bridge = &hdmi_bridge->base;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
index 4aca2a3c667..fbebb0405d7 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
@@ -330,8 +330,6 @@ static void hdmi_connector_destroy(struct drm_connector *connector)
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
- hdmi_unreference(hdmi_connector->hdmi);
-
kfree(hdmi_connector);
}
@@ -401,6 +399,9 @@ static const struct drm_connector_funcs hdmi_connector_funcs = {
.detect = hdmi_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = hdmi_connector_destroy,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static const struct drm_connector_helper_funcs hdmi_connector_helper_funcs = {
@@ -422,7 +423,7 @@ struct drm_connector *hdmi_connector_init(struct hdmi *hdmi)
goto fail;
}
- hdmi_connector->hdmi = hdmi_reference(hdmi);
+ hdmi_connector->hdmi = hdmi;
INIT_WORK(&hdmi_connector->hpd_work, hotplug_work);
connector = &hdmi_connector->base;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
index f408b69486a..eeed006eed1 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
@@ -510,7 +510,7 @@ struct hdmi_phy *hdmi_phy_8960_init(struct hdmi *hdmi)
#ifdef CONFIG_COMMON_CLK
phy_8960->pll_hw.init = &pll_init;
- phy_8960->pll = devm_clk_register(hdmi->dev->dev, &phy_8960->pll_hw);
+ phy_8960->pll = devm_clk_register(&hdmi->pdev->dev, &phy_8960->pll_hw);
if (IS_ERR(phy_8960->pll)) {
ret = PTR_ERR(phy_8960->pll);
phy_8960->pll = NULL;
diff --git a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
index d53c29327df..29bd796797d 100644
--- a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
+++ b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
@@ -10,12 +10,12 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20457 bytes, from 2014-08-01 12:22:48)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2014-07-17 15:34:33)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-07-17 15:34:33)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20136 bytes, from 2014-10-31 16:51:39)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1940 bytes, from 2014-10-31 16:51:39)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 23963 bytes, from 2014-10-31 16:51:46)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-08-01 12:23:53)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-07-17 15:33:30)
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
index 03c0bd9cd5b..a4a7f8c7122 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
@@ -10,12 +10,12 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20457 bytes, from 2014-08-01 12:22:48)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2014-07-17 15:34:33)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-07-17 15:34:33)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20136 bytes, from 2014-10-31 16:51:39)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1940 bytes, from 2014-10-31 16:51:39)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 23963 bytes, from 2014-10-31 16:51:46)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-08-01 12:23:53)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-07-17 15:33:30)
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
index 7d00f7fb577..a7672e100d8 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
@@ -25,8 +25,6 @@
struct mdp4_crtc {
struct drm_crtc base;
char name[8];
- struct drm_plane *plane;
- struct drm_plane *planes[8];
int id;
int ovlp;
enum mdp4_dma dma;
@@ -52,25 +50,11 @@ struct mdp4_crtc {
/* if there is a pending flip, these will be non-null: */
struct drm_pending_vblank_event *event;
- struct msm_fence_cb pageflip_cb;
#define PENDING_CURSOR 0x1
#define PENDING_FLIP 0x2
atomic_t pending;
- /* the fb that we logically (from PoV of KMS API) hold a ref
- * to. Which we may not yet be scanning out (we may still
- * be scanning out previous in case of page_flip while waiting
- * for gpu rendering to complete:
- */
- struct drm_framebuffer *fb;
-
- /* the fb that we currently hold a scanout ref to: */
- struct drm_framebuffer *scanout_fb;
-
- /* for unref'ing framebuffers after scanout completes: */
- struct drm_flip_work unref_fb_work;
-
/* for unref'ing cursor bo's after scanout completes: */
struct drm_flip_work unref_cursor_work;
@@ -97,15 +81,14 @@ static void crtc_flush(struct drm_crtc *crtc)
{
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
struct mdp4_kms *mdp4_kms = get_kms(crtc);
- uint32_t i, flush = 0;
+ struct drm_plane *plane;
+ uint32_t flush = 0;
- for (i = 0; i < ARRAY_SIZE(mdp4_crtc->planes); i++) {
- struct drm_plane *plane = mdp4_crtc->planes[i];
- if (plane) {
- enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
- flush |= pipe2flush(pipe_id);
- }
+ drm_atomic_crtc_for_each_plane(plane, crtc) {
+ enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
+ flush |= pipe2flush(pipe_id);
}
+
flush |= ovlp2flush(mdp4_crtc->ovlp);
DBG("%s: flush=%08x", mdp4_crtc->name, flush);
@@ -113,47 +96,6 @@ static void crtc_flush(struct drm_crtc *crtc)
mdp4_write(mdp4_kms, REG_MDP4_OVERLAY_FLUSH, flush);
}
-static void update_fb(struct drm_crtc *crtc, struct drm_framebuffer *new_fb)
-{
- struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
- struct drm_framebuffer *old_fb = mdp4_crtc->fb;
-
- /* grab reference to incoming scanout fb: */
- drm_framebuffer_reference(new_fb);
- mdp4_crtc->base.primary->fb = new_fb;
- mdp4_crtc->fb = new_fb;
-
- if (old_fb)
- drm_flip_work_queue(&mdp4_crtc->unref_fb_work, old_fb);
-}
-
-/* unlike update_fb(), take a ref to the new scanout fb *before* updating
- * plane, then call this. Needed to ensure we don't unref the buffer that
- * is actually still being scanned out.
- *
- * Note that this whole thing goes away with atomic.. since we can defer
- * calling into driver until rendering is done.
- */
-static void update_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb)
-{
- struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
-
- /* flush updates, to make sure hw is updated to new scanout fb,
- * so that we can safely queue unref to current fb (ie. next
- * vblank we know hw is done w/ previous scanout_fb).
- */
- crtc_flush(crtc);
-
- if (mdp4_crtc->scanout_fb)
- drm_flip_work_queue(&mdp4_crtc->unref_fb_work,
- mdp4_crtc->scanout_fb);
-
- mdp4_crtc->scanout_fb = fb;
-
- /* enable vblank to complete flip: */
- request_pending(crtc, PENDING_FLIP);
-}
-
/* if file!=NULL, this is preclose potential cancel-flip path */
static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
{
@@ -171,38 +113,13 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
*/
if (!file || (event->base.file_priv == file)) {
mdp4_crtc->event = NULL;
+ DBG("%s: send event: %p", mdp4_crtc->name, event);
drm_send_vblank_event(dev, mdp4_crtc->id, event);
}
}
spin_unlock_irqrestore(&dev->event_lock, flags);
}
-static void pageflip_cb(struct msm_fence_cb *cb)
-{
- struct mdp4_crtc *mdp4_crtc =
- container_of(cb, struct mdp4_crtc, pageflip_cb);
- struct drm_crtc *crtc = &mdp4_crtc->base;
- struct drm_framebuffer *fb = crtc->primary->fb;
-
- if (!fb)
- return;
-
- drm_framebuffer_reference(fb);
- mdp4_plane_set_scanout(mdp4_crtc->plane, fb);
- update_scanout(crtc, fb);
-}
-
-static void unref_fb_worker(struct drm_flip_work *work, void *val)
-{
- struct mdp4_crtc *mdp4_crtc =
- container_of(work, struct mdp4_crtc, unref_fb_work);
- struct drm_device *dev = mdp4_crtc->base.dev;
-
- mutex_lock(&dev->mode_config.mutex);
- drm_framebuffer_unreference(val);
- mutex_unlock(&dev->mode_config.mutex);
-}
-
static void unref_cursor_worker(struct drm_flip_work *work, void *val)
{
struct mdp4_crtc *mdp4_crtc =
@@ -218,7 +135,6 @@ static void mdp4_crtc_destroy(struct drm_crtc *crtc)
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
drm_crtc_cleanup(crtc);
- drm_flip_work_cleanup(&mdp4_crtc->unref_fb_work);
drm_flip_work_cleanup(&mdp4_crtc->unref_cursor_work);
kfree(mdp4_crtc);
@@ -251,57 +167,70 @@ static bool mdp4_crtc_mode_fixup(struct drm_crtc *crtc,
return true;
}
-static void blend_setup(struct drm_crtc *crtc)
+/* statically (for now) map planes to mixer stage (z-order): */
+static const int idxs[] = {
+ [VG1] = 1,
+ [VG2] = 2,
+ [RGB1] = 0,
+ [RGB2] = 0,
+ [RGB3] = 0,
+ [VG3] = 3,
+ [VG4] = 4,
+
+};
+
+/* setup mixer config, for which we need to consider all crtc's and
+ * the planes attached to them
+ *
+ * TODO may possibly need some extra locking here
+ */
+static void setup_mixer(struct mdp4_kms *mdp4_kms)
{
- struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
- struct mdp4_kms *mdp4_kms = get_kms(crtc);
- int i, ovlp = mdp4_crtc->ovlp;
+ struct drm_mode_config *config = &mdp4_kms->dev->mode_config;
+ struct drm_crtc *crtc;
uint32_t mixer_cfg = 0;
static const enum mdp_mixer_stage_id stages[] = {
STAGE_BASE, STAGE0, STAGE1, STAGE2, STAGE3,
};
- /* statically (for now) map planes to mixer stage (z-order): */
- static const int idxs[] = {
- [VG1] = 1,
- [VG2] = 2,
- [RGB1] = 0,
- [RGB2] = 0,
- [RGB3] = 0,
- [VG3] = 3,
- [VG4] = 4,
- };
- bool alpha[4]= { false, false, false, false };
+ list_for_each_entry(crtc, &config->crtc_list, head) {
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct drm_plane *plane;
- /* Don't rely on value read back from hw, but instead use our
- * own shadowed value. Possibly disable/reenable looses the
- * previous value and goes back to power-on default?
- */
- mixer_cfg = mdp4_kms->mixer_cfg;
+ drm_atomic_crtc_for_each_plane(plane, crtc) {
+ enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
+ int idx = idxs[pipe_id];
+ mixer_cfg = mixercfg(mixer_cfg, mdp4_crtc->mixer,
+ pipe_id, stages[idx]);
+ }
+ }
+
+ mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG, mixer_cfg);
+}
+
+static void blend_setup(struct drm_crtc *crtc)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct mdp4_kms *mdp4_kms = get_kms(crtc);
+ struct drm_plane *plane;
+ int i, ovlp = mdp4_crtc->ovlp;
+ bool alpha[4]= { false, false, false, false };
mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW0(ovlp), 0);
mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW1(ovlp), 0);
mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH0(ovlp), 0);
mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH1(ovlp), 0);
- for (i = 0; i < ARRAY_SIZE(mdp4_crtc->planes); i++) {
- struct drm_plane *plane = mdp4_crtc->planes[i];
- if (plane) {
- enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
- int idx = idxs[pipe_id];
- if (idx > 0) {
- const struct mdp_format *format =
+ drm_atomic_crtc_for_each_plane(plane, crtc) {
+ enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
+ int idx = idxs[pipe_id];
+ if (idx > 0) {
+ const struct mdp_format *format =
to_mdp_format(msm_framebuffer_format(plane->fb));
- alpha[idx-1] = format->alpha_enable;
- }
- mixer_cfg = mixercfg(mixer_cfg, mdp4_crtc->mixer,
- pipe_id, stages[idx]);
+ alpha[idx-1] = format->alpha_enable;
}
}
- /* this shouldn't happen.. and seems to cause underflow: */
- WARN_ON(!mixer_cfg);
-
for (i = 0; i < 4; i++) {
uint32_t op;
@@ -324,22 +253,21 @@ static void blend_setup(struct drm_crtc *crtc)
mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH1(ovlp, i), 0);
}
- mdp4_kms->mixer_cfg = mixer_cfg;
- mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG, mixer_cfg);
+ setup_mixer(mdp4_kms);
}
-static int mdp4_crtc_mode_set(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode,
- int x, int y,
- struct drm_framebuffer *old_fb)
+static void mdp4_crtc_mode_set_nofb(struct drm_crtc *crtc)
{
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
struct mdp4_kms *mdp4_kms = get_kms(crtc);
enum mdp4_dma dma = mdp4_crtc->dma;
- int ret, ovlp = mdp4_crtc->ovlp;
+ int ovlp = mdp4_crtc->ovlp;
+ struct drm_display_mode *mode;
+
+ if (WARN_ON(!crtc->state))
+ return;
- mode = adjusted_mode;
+ mode = &crtc->state->adjusted_mode;
DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
mdp4_crtc->name, mode->base.id, mode->name,
@@ -350,28 +278,13 @@ static int mdp4_crtc_mode_set(struct drm_crtc *crtc,
mode->vsync_end, mode->vtotal,
mode->type, mode->flags);
- /* grab extra ref for update_scanout() */
- drm_framebuffer_reference(crtc->primary->fb);
-
- ret = mdp4_plane_mode_set(mdp4_crtc->plane, crtc, crtc->primary->fb,
- 0, 0, mode->hdisplay, mode->vdisplay,
- x << 16, y << 16,
- mode->hdisplay << 16, mode->vdisplay << 16);
- if (ret) {
- drm_framebuffer_unreference(crtc->primary->fb);
- dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n",
- mdp4_crtc->name, ret);
- return ret;
- }
-
mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_SIZE(dma),
MDP4_DMA_SRC_SIZE_WIDTH(mode->hdisplay) |
MDP4_DMA_SRC_SIZE_HEIGHT(mode->vdisplay));
/* take data from pipe: */
mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_BASE(dma), 0);
- mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_STRIDE(dma),
- crtc->primary->fb->pitches[0]);
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_STRIDE(dma), 0);
mdp4_write(mdp4_kms, REG_MDP4_DMA_DST_SIZE(dma),
MDP4_DMA_DST_SIZE_WIDTH(0) |
MDP4_DMA_DST_SIZE_HEIGHT(0));
@@ -380,8 +293,7 @@ static int mdp4_crtc_mode_set(struct drm_crtc *crtc,
mdp4_write(mdp4_kms, REG_MDP4_OVLP_SIZE(ovlp),
MDP4_OVLP_SIZE_WIDTH(mode->hdisplay) |
MDP4_OVLP_SIZE_HEIGHT(mode->vdisplay));
- mdp4_write(mdp4_kms, REG_MDP4_OVLP_STRIDE(ovlp),
- crtc->primary->fb->pitches[0]);
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_STRIDE(ovlp), 0);
mdp4_write(mdp4_kms, REG_MDP4_OVLP_CFG(ovlp), 1);
@@ -390,11 +302,6 @@ static int mdp4_crtc_mode_set(struct drm_crtc *crtc,
mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(1), 0x00ff0000);
mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(2), 0x00ff0000);
}
-
- update_fb(crtc, crtc->primary->fb);
- update_scanout(crtc, crtc->primary->fb);
-
- return 0;
}
static void mdp4_crtc_prepare(struct drm_crtc *crtc)
@@ -416,60 +323,51 @@ static void mdp4_crtc_commit(struct drm_crtc *crtc)
drm_crtc_vblank_put(crtc);
}
-static int mdp4_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
- struct drm_framebuffer *old_fb)
+static void mdp4_crtc_load_lut(struct drm_crtc *crtc)
+{
+}
+
+static int mdp4_crtc_atomic_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
{
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
- struct drm_plane *plane = mdp4_crtc->plane;
- struct drm_display_mode *mode = &crtc->mode;
- int ret;
+ struct drm_device *dev = crtc->dev;
- /* grab extra ref for update_scanout() */
- drm_framebuffer_reference(crtc->primary->fb);
+ DBG("%s: check", mdp4_crtc->name);
- ret = mdp4_plane_mode_set(plane, crtc, crtc->primary->fb,
- 0, 0, mode->hdisplay, mode->vdisplay,
- x << 16, y << 16,
- mode->hdisplay << 16, mode->vdisplay << 16);
- if (ret) {
- drm_framebuffer_unreference(crtc->primary->fb);
- return ret;
+ if (mdp4_crtc->event) {
+ dev_err(dev->dev, "already pending flip!\n");
+ return -EBUSY;
}
- update_fb(crtc, crtc->primary->fb);
- update_scanout(crtc, crtc->primary->fb);
+ // TODO anything else to check?
return 0;
}
-static void mdp4_crtc_load_lut(struct drm_crtc *crtc)
+static void mdp4_crtc_atomic_begin(struct drm_crtc *crtc)
{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ DBG("%s: begin", mdp4_crtc->name);
}
-static int mdp4_crtc_page_flip(struct drm_crtc *crtc,
- struct drm_framebuffer *new_fb,
- struct drm_pending_vblank_event *event,
- uint32_t page_flip_flags)
+static void mdp4_crtc_atomic_flush(struct drm_crtc *crtc)
{
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct drm_gem_object *obj;
unsigned long flags;
- if (mdp4_crtc->event) {
- dev_err(dev->dev, "already pending flip!\n");
- return -EBUSY;
- }
+ DBG("%s: flush", mdp4_crtc->name);
- obj = msm_framebuffer_bo(new_fb, 0);
+ WARN_ON(mdp4_crtc->event);
spin_lock_irqsave(&dev->event_lock, flags);
- mdp4_crtc->event = event;
+ mdp4_crtc->event = crtc->state->event;
spin_unlock_irqrestore(&dev->event_lock, flags);
- update_fb(crtc, new_fb);
-
- return msm_gem_queue_inactive_cb(obj, &mdp4_crtc->pageflip_cb);
+ blend_setup(crtc);
+ crtc_flush(crtc);
+ request_pending(crtc, PENDING_FLIP);
}
static int mdp4_crtc_set_property(struct drm_crtc *crtc,
@@ -607,22 +505,29 @@ static int mdp4_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
}
static const struct drm_crtc_funcs mdp4_crtc_funcs = {
- .set_config = drm_crtc_helper_set_config,
+ .set_config = drm_atomic_helper_set_config,
.destroy = mdp4_crtc_destroy,
- .page_flip = mdp4_crtc_page_flip,
+ .page_flip = drm_atomic_helper_page_flip,
.set_property = mdp4_crtc_set_property,
.cursor_set = mdp4_crtc_cursor_set,
.cursor_move = mdp4_crtc_cursor_move,
+ .reset = drm_atomic_helper_crtc_reset,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
};
static const struct drm_crtc_helper_funcs mdp4_crtc_helper_funcs = {
.dpms = mdp4_crtc_dpms,
.mode_fixup = mdp4_crtc_mode_fixup,
- .mode_set = mdp4_crtc_mode_set,
+ .mode_set_nofb = mdp4_crtc_mode_set_nofb,
+ .mode_set = drm_helper_crtc_mode_set,
+ .mode_set_base = drm_helper_crtc_mode_set_base,
.prepare = mdp4_crtc_prepare,
.commit = mdp4_crtc_commit,
- .mode_set_base = mdp4_crtc_mode_set_base,
.load_lut = mdp4_crtc_load_lut,
+ .atomic_check = mdp4_crtc_atomic_check,
+ .atomic_begin = mdp4_crtc_atomic_begin,
+ .atomic_flush = mdp4_crtc_atomic_flush,
};
static void mdp4_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
@@ -638,7 +543,6 @@ static void mdp4_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
if (pending & PENDING_FLIP) {
complete_flip(crtc, NULL);
- drm_flip_work_commit(&mdp4_crtc->unref_fb_work, priv->wq);
}
if (pending & PENDING_CURSOR) {
@@ -663,7 +567,8 @@ uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc)
void mdp4_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file)
{
- DBG("cancel: %p", file);
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ DBG("%s: cancel: %p", mdp4_crtc->name, file);
complete_flip(crtc, file);
}
@@ -717,35 +622,6 @@ void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf, int mixer)
mdp4_write(mdp4_kms, REG_MDP4_DISP_INTF_SEL, intf_sel);
}
-static void set_attach(struct drm_crtc *crtc, enum mdp4_pipe pipe_id,
- struct drm_plane *plane)
-{
- struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
-
- BUG_ON(pipe_id >= ARRAY_SIZE(mdp4_crtc->planes));
-
- if (mdp4_crtc->planes[pipe_id] == plane)
- return;
-
- mdp4_crtc->planes[pipe_id] = plane;
- blend_setup(crtc);
- if (mdp4_crtc->enabled && (plane != mdp4_crtc->plane))
- crtc_flush(crtc);
-}
-
-void mdp4_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane)
-{
- set_attach(crtc, mdp4_plane_pipe(plane), plane);
-}
-
-void mdp4_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane)
-{
- /* don't actually detatch our primary plane: */
- if (to_mdp4_crtc(crtc)->plane == plane)
- return;
- set_attach(crtc, mdp4_plane_pipe(plane), NULL);
-}
-
static const char *dma_names[] = {
"DMA_P", "DMA_S", "DMA_E",
};
@@ -757,17 +633,13 @@ struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
{
struct drm_crtc *crtc = NULL;
struct mdp4_crtc *mdp4_crtc;
- int ret;
mdp4_crtc = kzalloc(sizeof(*mdp4_crtc), GFP_KERNEL);
- if (!mdp4_crtc) {
- ret = -ENOMEM;
- goto fail;
- }
+ if (!mdp4_crtc)
+ return ERR_PTR(-ENOMEM);
crtc = &mdp4_crtc->base;
- mdp4_crtc->plane = plane;
mdp4_crtc->id = id;
mdp4_crtc->ovlp = ovlp_id;
@@ -784,26 +656,14 @@ struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
spin_lock_init(&mdp4_crtc->cursor.lock);
- ret = drm_flip_work_init(&mdp4_crtc->unref_fb_work, 16,
- "unref fb", unref_fb_worker);
- if (ret)
- goto fail;
-
- ret = drm_flip_work_init(&mdp4_crtc->unref_cursor_work, 64,
+ drm_flip_work_init(&mdp4_crtc->unref_cursor_work,
"unref cursor", unref_cursor_worker);
- INIT_FENCE_CB(&mdp4_crtc->pageflip_cb, pageflip_cb);
-
drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp4_crtc_funcs);
drm_crtc_helper_add(crtc, &mdp4_crtc_helper_funcs);
+ plane->crtc = crtc;
- mdp4_plane_install_properties(mdp4_crtc->plane, &crtc->base);
+ mdp4_plane_install_properties(plane, &crtc->base);
return crtc;
-
-fail:
- if (crtc)
- mdp4_crtc_destroy(crtc);
-
- return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
index 79d804e61cc..a62109e4ae0 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
@@ -228,7 +228,6 @@ static int modeset_init(struct mdp4_kms *mdp4_kms)
struct drm_encoder *encoder;
struct drm_connector *connector;
struct drm_panel *panel;
- struct hdmi *hdmi;
int ret;
/* construct non-private planes: */
@@ -326,11 +325,13 @@ static int modeset_init(struct mdp4_kms *mdp4_kms)
priv->crtcs[priv->num_crtcs++] = crtc;
priv->encoders[priv->num_encoders++] = encoder;
- hdmi = hdmi_init(dev, encoder);
- if (IS_ERR(hdmi)) {
- ret = PTR_ERR(hdmi);
- dev_err(dev->dev, "failed to initialize HDMI: %d\n", ret);
- goto fail;
+ if (priv->hdmi) {
+ /* Construct bridge/connector for HDMI: */
+ ret = hdmi_modeset_init(priv->hdmi, dev, encoder);
+ if (ret) {
+ dev_err(dev->dev, "failed to initialize HDMI: %d\n", ret);
+ goto fail;
+ }
}
return 0;
@@ -381,6 +382,10 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
if (IS_ERR(mdp4_kms->dsi_pll_vddio))
mdp4_kms->dsi_pll_vddio = NULL;
+ /* NOTE: driver for this regulator still missing upstream.. use
+ * _get_exclusive() and ignore the error if it does not exist
+ * (and hope that the bootloader left it on for us)
+ */
mdp4_kms->vdd = devm_regulator_get_exclusive(&pdev->dev, "vdd");
if (IS_ERR(mdp4_kms->vdd))
mdp4_kms->vdd = NULL;
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
index 9ff6e7ccfe9..cbd77bc626d 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
@@ -32,13 +32,6 @@ struct mdp4_kms {
int rev;
- /* Shadow value for MDP4_LAYERMIXER_IN_CFG.. since setup for all
- * crtcs/encoders is in one shared register, we need to update it
- * via read/modify/write. But to avoid getting confused by power-
- * on-default values after resume, use this shadow value instead:
- */
- uint32_t mixer_cfg;
-
/* mapper-id used to request GEM buffer mapped for scanout: */
int id;
@@ -194,14 +187,6 @@ uint32_t mdp4_get_formats(enum mdp4_pipe pipe_id, uint32_t *pixel_formats,
void mdp4_plane_install_properties(struct drm_plane *plane,
struct drm_mode_object *obj);
-void mdp4_plane_set_scanout(struct drm_plane *plane,
- struct drm_framebuffer *fb);
-int mdp4_plane_mode_set(struct drm_plane *plane,
- struct drm_crtc *crtc, struct drm_framebuffer *fb,
- int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h);
enum mdp4_pipe mdp4_plane_pipe(struct drm_plane *plane);
struct drm_plane *mdp4_plane_init(struct drm_device *dev,
enum mdp4_pipe pipe_id, bool private_plane);
@@ -210,8 +195,6 @@ uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc);
void mdp4_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file);
void mdp4_crtc_set_config(struct drm_crtc *crtc, uint32_t config);
void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf, int mixer);
-void mdp4_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane);
-void mdp4_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane);
struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
struct drm_plane *plane, int id, int ovlp_id,
enum mdp4_dma dma_id);
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c
index 310034688c1..4ddc28e1275 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c
@@ -98,6 +98,9 @@ static const struct drm_connector_funcs mdp4_lvds_connector_funcs = {
.detect = mdp4_lvds_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = mdp4_lvds_connector_destroy,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static const struct drm_connector_helper_funcs mdp4_lvds_connector_helper_funcs = {
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
index 66f33dba1eb..1e5ebe83647 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
@@ -31,47 +31,26 @@ struct mdp4_plane {
};
#define to_mdp4_plane(x) container_of(x, struct mdp4_plane, base)
-static struct mdp4_kms *get_kms(struct drm_plane *plane)
-{
- struct msm_drm_private *priv = plane->dev->dev_private;
- return to_mdp4_kms(to_mdp_kms(priv->kms));
-}
-
-static int mdp4_plane_update(struct drm_plane *plane,
+static void mdp4_plane_set_scanout(struct drm_plane *plane,
+ struct drm_framebuffer *fb);
+static int mdp4_plane_mode_set(struct drm_plane *plane,
struct drm_crtc *crtc, struct drm_framebuffer *fb,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h)
-{
- struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
-
- mdp4_plane->enabled = true;
-
- if (plane->fb)
- drm_framebuffer_unreference(plane->fb);
-
- drm_framebuffer_reference(fb);
-
- return mdp4_plane_mode_set(plane, crtc, fb,
- crtc_x, crtc_y, crtc_w, crtc_h,
- src_x, src_y, src_w, src_h);
-}
+ uint32_t src_w, uint32_t src_h);
-static int mdp4_plane_disable(struct drm_plane *plane)
+static struct mdp4_kms *get_kms(struct drm_plane *plane)
{
- struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
- DBG("%s: disable", mdp4_plane->name);
- if (plane->crtc)
- mdp4_crtc_detach(plane->crtc, plane);
- return 0;
+ struct msm_drm_private *priv = plane->dev->dev_private;
+ return to_mdp4_kms(to_mdp_kms(priv->kms));
}
static void mdp4_plane_destroy(struct drm_plane *plane)
{
struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
- mdp4_plane_disable(plane);
+ drm_plane_helper_disable(plane);
drm_plane_cleanup(plane);
kfree(mdp4_plane);
@@ -92,19 +71,75 @@ int mdp4_plane_set_property(struct drm_plane *plane,
}
static const struct drm_plane_funcs mdp4_plane_funcs = {
- .update_plane = mdp4_plane_update,
- .disable_plane = mdp4_plane_disable,
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
.destroy = mdp4_plane_destroy,
.set_property = mdp4_plane_set_property,
+ .reset = drm_atomic_helper_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
};
-void mdp4_plane_set_scanout(struct drm_plane *plane,
+static int mdp4_plane_prepare_fb(struct drm_plane *plane,
+ struct drm_framebuffer *fb)
+{
+ struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
+ struct mdp4_kms *mdp4_kms = get_kms(plane);
+
+ DBG("%s: prepare: FB[%u]", mdp4_plane->name, fb->base.id);
+ return msm_framebuffer_prepare(fb, mdp4_kms->id);
+}
+
+static void mdp4_plane_cleanup_fb(struct drm_plane *plane,
+ struct drm_framebuffer *fb)
+{
+ struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
+ struct mdp4_kms *mdp4_kms = get_kms(plane);
+
+ DBG("%s: cleanup: FB[%u]", mdp4_plane->name, fb->base.id);
+ msm_framebuffer_cleanup(fb, mdp4_kms->id);
+}
+
+
+static int mdp4_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ return 0;
+}
+
+static void mdp4_plane_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct drm_plane_state *state = plane->state;
+ int ret;
+
+ ret = mdp4_plane_mode_set(plane,
+ state->crtc, state->fb,
+ state->crtc_x, state->crtc_y,
+ state->crtc_w, state->crtc_h,
+ state->src_x, state->src_y,
+ state->src_w, state->src_h);
+ /* atomic_check should have ensured that this doesn't fail */
+ WARN_ON(ret < 0);
+}
+
+static const struct drm_plane_helper_funcs mdp4_plane_helper_funcs = {
+ .prepare_fb = mdp4_plane_prepare_fb,
+ .cleanup_fb = mdp4_plane_cleanup_fb,
+ .atomic_check = mdp4_plane_atomic_check,
+ .atomic_update = mdp4_plane_atomic_update,
+};
+
+static void mdp4_plane_set_scanout(struct drm_plane *plane,
struct drm_framebuffer *fb)
{
struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
struct mdp4_kms *mdp4_kms = get_kms(plane);
enum mdp4_pipe pipe = mdp4_plane->pipe;
- uint32_t iova;
+ uint32_t iova = msm_framebuffer_iova(fb, mdp4_kms->id, 0);
+
+ DBG("%s: set_scanout: %08x (%u)", mdp4_plane->name,
+ iova, fb->pitches[0]);
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_STRIDE_A(pipe),
MDP4_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) |
@@ -114,7 +149,6 @@ void mdp4_plane_set_scanout(struct drm_plane *plane,
MDP4_PIPE_SRC_STRIDE_B_P2(fb->pitches[2]) |
MDP4_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
- msm_gem_get_iova(msm_framebuffer_bo(fb, 0), mdp4_kms->id, &iova);
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP0_BASE(pipe), iova);
plane->fb = fb;
@@ -122,7 +156,7 @@ void mdp4_plane_set_scanout(struct drm_plane *plane,
#define MDP4_VG_PHASE_STEP_DEFAULT 0x20000000
-int mdp4_plane_mode_set(struct drm_plane *plane,
+static int mdp4_plane_mode_set(struct drm_plane *plane,
struct drm_crtc *crtc, struct drm_framebuffer *fb,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
@@ -137,6 +171,11 @@ int mdp4_plane_mode_set(struct drm_plane *plane,
uint32_t phasex_step = MDP4_VG_PHASE_STEP_DEFAULT;
uint32_t phasey_step = MDP4_VG_PHASE_STEP_DEFAULT;
+ if (!(crtc && fb)) {
+ DBG("%s: disabled!", mdp4_plane->name);
+ return 0;
+ }
+
/* src values are in Q16 fixed point, convert to integer: */
src_x = src_x >> 16;
src_y = src_y >> 16;
@@ -197,9 +236,6 @@ int mdp4_plane_mode_set(struct drm_plane *plane,
mdp4_write(mdp4_kms, REG_MDP4_PIPE_PHASEX_STEP(pipe), phasex_step);
mdp4_write(mdp4_kms, REG_MDP4_PIPE_PHASEY_STEP(pipe), phasey_step);
- /* TODO detach from old crtc (if we had more than one) */
- mdp4_crtc_attach(crtc, plane);
-
return 0;
}
@@ -239,9 +275,12 @@ struct drm_plane *mdp4_plane_init(struct drm_device *dev,
ARRAY_SIZE(mdp4_plane->formats));
type = private_plane ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
- drm_universal_plane_init(dev, plane, 0xff, &mdp4_plane_funcs,
- mdp4_plane->formats, mdp4_plane->nformats,
- type);
+ ret = drm_universal_plane_init(dev, plane, 0xff, &mdp4_plane_funcs,
+ mdp4_plane->formats, mdp4_plane->nformats, type);
+ if (ret)
+ goto fail;
+
+ drm_plane_helper_add(plane, &mdp4_plane_helper_funcs);
mdp4_plane_install_properties(plane, &plane->base);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
index 67f4f896ba8..e87ef5512cb 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
@@ -10,14 +10,14 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-06-25 12:55:02)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20136 bytes, from 2014-10-31 16:51:39)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1940 bytes, from 2014-10-31 16:51:39)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 23963 bytes, from 2014-10-31 16:51:46)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-06-25 12:53:44)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-07-17 15:33:30)
Copyright (C) 2013-2014 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
new file mode 100644
index 00000000000..b0a44310cf2
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
@@ -0,0 +1,207 @@
+/*
+ * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "mdp5_kms.h"
+#include "mdp5_cfg.h"
+
+struct mdp5_cfg_handler {
+ int revision;
+ struct mdp5_cfg config;
+};
+
+/* mdp5_cfg must be exposed (used in mdp5.xml.h) */
+const struct mdp5_cfg_hw *mdp5_cfg = NULL;
+
+const struct mdp5_cfg_hw msm8x74_config = {
+ .name = "msm8x74",
+ .smp = {
+ .mmb_count = 22,
+ .mmb_size = 4096,
+ },
+ .ctl = {
+ .count = 5,
+ .base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 },
+ },
+ .pipe_vig = {
+ .count = 3,
+ .base = { 0x01200, 0x01600, 0x01a00 },
+ },
+ .pipe_rgb = {
+ .count = 3,
+ .base = { 0x01e00, 0x02200, 0x02600 },
+ },
+ .pipe_dma = {
+ .count = 2,
+ .base = { 0x02a00, 0x02e00 },
+ },
+ .lm = {
+ .count = 5,
+ .base = { 0x03200, 0x03600, 0x03a00, 0x03e00, 0x04200 },
+ .nb_stages = 5,
+ },
+ .dspp = {
+ .count = 3,
+ .base = { 0x04600, 0x04a00, 0x04e00 },
+ },
+ .ad = {
+ .count = 2,
+ .base = { 0x13100, 0x13300 }, /* NOTE: no ad in v1.0 */
+ },
+ .intf = {
+ .count = 4,
+ .base = { 0x12500, 0x12700, 0x12900, 0x12b00 },
+ },
+ .max_clk = 200000000,
+};
+
+const struct mdp5_cfg_hw apq8084_config = {
+ .name = "apq8084",
+ .smp = {
+ .mmb_count = 44,
+ .mmb_size = 8192,
+ .reserved_state[0] = GENMASK(7, 0), /* first 8 MMBs */
+ .reserved[CID_RGB0] = 2,
+ .reserved[CID_RGB1] = 2,
+ .reserved[CID_RGB2] = 2,
+ .reserved[CID_RGB3] = 2,
+ },
+ .ctl = {
+ .count = 5,
+ .base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 },
+ },
+ .pipe_vig = {
+ .count = 4,
+ .base = { 0x01200, 0x01600, 0x01a00, 0x01e00 },
+ },
+ .pipe_rgb = {
+ .count = 4,
+ .base = { 0x02200, 0x02600, 0x02a00, 0x02e00 },
+ },
+ .pipe_dma = {
+ .count = 2,
+ .base = { 0x03200, 0x03600 },
+ },
+ .lm = {
+ .count = 6,
+ .base = { 0x03a00, 0x03e00, 0x04200, 0x04600, 0x04a00, 0x04e00 },
+ .nb_stages = 5,
+ },
+ .dspp = {
+ .count = 4,
+ .base = { 0x05200, 0x05600, 0x05a00, 0x05e00 },
+
+ },
+ .ad = {
+ .count = 3,
+ .base = { 0x13500, 0x13700, 0x13900 },
+ },
+ .intf = {
+ .count = 5,
+ .base = { 0x12500, 0x12700, 0x12900, 0x12b00, 0x12d00 },
+ },
+ .max_clk = 320000000,
+};
+
+static const struct mdp5_cfg_handler cfg_handlers[] = {
+ { .revision = 0, .config = { .hw = &msm8x74_config } },
+ { .revision = 2, .config = { .hw = &msm8x74_config } },
+ { .revision = 3, .config = { .hw = &apq8084_config } },
+};
+
+
+static struct mdp5_cfg_platform *mdp5_get_config(struct platform_device *dev);
+
+const struct mdp5_cfg_hw *mdp5_cfg_get_hw_config(struct mdp5_cfg_handler *cfg_handler)
+{
+ return cfg_handler->config.hw;
+}
+
+struct mdp5_cfg *mdp5_cfg_get_config(struct mdp5_cfg_handler *cfg_handler)
+{
+ return &cfg_handler->config;
+}
+
+int mdp5_cfg_get_hw_rev(struct mdp5_cfg_handler *cfg_handler)
+{
+ return cfg_handler->revision;
+}
+
+void mdp5_cfg_destroy(struct mdp5_cfg_handler *cfg_handler)
+{
+ kfree(cfg_handler);
+}
+
+struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms,
+ uint32_t major, uint32_t minor)
+{
+ struct drm_device *dev = mdp5_kms->dev;
+ struct platform_device *pdev = dev->platformdev;
+ struct mdp5_cfg_handler *cfg_handler;
+ struct mdp5_cfg_platform *pconfig;
+ int i, ret = 0;
+
+ cfg_handler = kzalloc(sizeof(*cfg_handler), GFP_KERNEL);
+ if (unlikely(!cfg_handler)) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ if (major != 1) {
+ dev_err(dev->dev, "unexpected MDP major version: v%d.%d\n",
+ major, minor);
+ ret = -ENXIO;
+ goto fail;
+ }
+
+ /* only after mdp5_cfg global pointer's init can we access the hw */
+ for (i = 0; i < ARRAY_SIZE(cfg_handlers); i++) {
+ if (cfg_handlers[i].revision != minor)
+ continue;
+ mdp5_cfg = cfg_handlers[i].config.hw;
+
+ break;
+ }
+ if (unlikely(!mdp5_cfg)) {
+ dev_err(dev->dev, "unexpected MDP minor revision: v%d.%d\n",
+ major, minor);
+ ret = -ENXIO;
+ goto fail;
+ }
+
+ cfg_handler->revision = minor;
+ cfg_handler->config.hw = mdp5_cfg;
+
+ pconfig = mdp5_get_config(pdev);
+ memcpy(&cfg_handler->config.platform, pconfig, sizeof(*pconfig));
+
+ DBG("MDP5: %s hw config selected", mdp5_cfg->name);
+
+ return cfg_handler;
+
+fail:
+ if (cfg_handler)
+ mdp5_cfg_destroy(cfg_handler);
+
+ return NULL;
+}
+
+static struct mdp5_cfg_platform *mdp5_get_config(struct platform_device *dev)
+{
+ static struct mdp5_cfg_platform config = {};
+#ifdef CONFIG_OF
+ /* TODO */
+#endif
+ config.iommu = iommu_domain_alloc(&platform_bus_type);
+
+ return &config;
+}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h
new file mode 100644
index 00000000000..dba4d52ccee
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MDP5_CFG_H__
+#define __MDP5_CFG_H__
+
+#include "msm_drv.h"
+
+/*
+ * mdp5_cfg
+ *
+ * This module configures the dynamic offsets used by mdp5.xml.h
+ * (initialized in mdp5_cfg.c)
+ */
+extern const struct mdp5_cfg_hw *mdp5_cfg;
+
+#define MAX_CTL 8
+#define MAX_BASES 8
+#define MAX_SMP_BLOCKS 44
+#define MAX_CLIENTS 32
+
+typedef DECLARE_BITMAP(mdp5_smp_state_t, MAX_SMP_BLOCKS);
+
+#define MDP5_SUB_BLOCK_DEFINITION \
+ int count; \
+ uint32_t base[MAX_BASES]
+
+struct mdp5_sub_block {
+ MDP5_SUB_BLOCK_DEFINITION;
+};
+
+struct mdp5_lm_block {
+ MDP5_SUB_BLOCK_DEFINITION;
+ uint32_t nb_stages; /* number of stages per blender */
+};
+
+struct mdp5_smp_block {
+ int mmb_count; /* number of SMP MMBs */
+ int mmb_size; /* MMB: size in bytes */
+ mdp5_smp_state_t reserved_state;/* SMP MMBs statically allocated */
+ int reserved[MAX_CLIENTS]; /* # of MMBs allocated per client */
+};
+
+struct mdp5_cfg_hw {
+ char *name;
+
+ struct mdp5_smp_block smp;
+ struct mdp5_sub_block ctl;
+ struct mdp5_sub_block pipe_vig;
+ struct mdp5_sub_block pipe_rgb;
+ struct mdp5_sub_block pipe_dma;
+ struct mdp5_lm_block lm;
+ struct mdp5_sub_block dspp;
+ struct mdp5_sub_block ad;
+ struct mdp5_sub_block intf;
+
+ uint32_t max_clk;
+};
+
+/* platform config data (ie. from DT, or pdata) */
+struct mdp5_cfg_platform {
+ struct iommu_domain *iommu;
+};
+
+struct mdp5_cfg {
+ const struct mdp5_cfg_hw *hw;
+ struct mdp5_cfg_platform platform;
+};
+
+struct mdp5_kms;
+struct mdp5_cfg_handler;
+
+const struct mdp5_cfg_hw *mdp5_cfg_get_hw_config(struct mdp5_cfg_handler *cfg_hnd);
+struct mdp5_cfg *mdp5_cfg_get_config(struct mdp5_cfg_handler *cfg_hnd);
+int mdp5_cfg_get_hw_rev(struct mdp5_cfg_handler *cfg_hnd);
+
+struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms,
+ uint32_t major, uint32_t minor);
+void mdp5_cfg_destroy(struct mdp5_cfg_handler *cfg_hnd);
+
+#endif /* __MDP5_CFG_H__ */
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index ebe2e60f3ab..0e9a2e3a82d 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -1,4 +1,5 @@
/*
+ * Copyright (c) 2014 The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@@ -17,43 +18,35 @@
#include "mdp5_kms.h"
+#include <linux/sort.h>
#include <drm/drm_mode.h>
#include "drm_crtc.h"
#include "drm_crtc_helper.h"
#include "drm_flip_work.h"
+#define SSPP_MAX (SSPP_RGB3 + 1) /* TODO: Add SSPP_MAX in mdp5.xml.h */
+
struct mdp5_crtc {
struct drm_crtc base;
char name[8];
- struct drm_plane *plane;
- struct drm_plane *planes[8];
int id;
bool enabled;
- /* which mixer/encoder we route output to: */
- int mixer;
+ /* layer mixer used for this CRTC (+ its lock): */
+#define GET_LM_ID(crtc_id) ((crtc_id == 3) ? 5 : crtc_id)
+ int lm;
+ spinlock_t lm_lock; /* protect REG_MDP5_LM_* registers */
+
+ /* CTL used for this CRTC: */
+ struct mdp5_ctl *ctl;
/* if there is a pending flip, these will be non-null: */
struct drm_pending_vblank_event *event;
- struct msm_fence_cb pageflip_cb;
#define PENDING_CURSOR 0x1
#define PENDING_FLIP 0x2
atomic_t pending;
- /* the fb that we logically (from PoV of KMS API) hold a ref
- * to. Which we may not yet be scanning out (we may still
- * be scanning out previous in case of page_flip while waiting
- * for gpu rendering to complete:
- */
- struct drm_framebuffer *fb;
-
- /* the fb that we currently hold a scanout ref to: */
- struct drm_framebuffer *scanout_fb;
-
- /* for unref'ing framebuffers after scanout completes: */
- struct drm_flip_work unref_fb_work;
-
struct mdp_irq vblank;
struct mdp_irq err;
};
@@ -73,67 +66,38 @@ static void request_pending(struct drm_crtc *crtc, uint32_t pending)
mdp_irq_register(&get_kms(crtc)->base, &mdp5_crtc->vblank);
}
-static void crtc_flush(struct drm_crtc *crtc)
-{
- struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
- struct mdp5_kms *mdp5_kms = get_kms(crtc);
- int id = mdp5_crtc->id;
- uint32_t i, flush = 0;
-
- for (i = 0; i < ARRAY_SIZE(mdp5_crtc->planes); i++) {
- struct drm_plane *plane = mdp5_crtc->planes[i];
- if (plane) {
- enum mdp5_pipe pipe = mdp5_plane_pipe(plane);
- flush |= pipe2flush(pipe);
- }
- }
- flush |= mixer2flush(mdp5_crtc->id);
- flush |= MDP5_CTL_FLUSH_CTL;
-
- DBG("%s: flush=%08x", mdp5_crtc->name, flush);
-
- mdp5_write(mdp5_kms, REG_MDP5_CTL_FLUSH(id), flush);
-}
+#define mdp5_lm_get_flush(lm) mdp_ctl_flush_mask_lm(lm)
-static void update_fb(struct drm_crtc *crtc, struct drm_framebuffer *new_fb)
+static void crtc_flush(struct drm_crtc *crtc, u32 flush_mask)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
- struct drm_framebuffer *old_fb = mdp5_crtc->fb;
-
- /* grab reference to incoming scanout fb: */
- drm_framebuffer_reference(new_fb);
- mdp5_crtc->base.primary->fb = new_fb;
- mdp5_crtc->fb = new_fb;
- if (old_fb)
- drm_flip_work_queue(&mdp5_crtc->unref_fb_work, old_fb);
+ DBG("%s: flush=%08x", mdp5_crtc->name, flush_mask);
+ mdp5_ctl_commit(mdp5_crtc->ctl, flush_mask);
}
-/* unlike update_fb(), take a ref to the new scanout fb *before* updating
- * plane, then call this. Needed to ensure we don't unref the buffer that
- * is actually still being scanned out.
- *
- * Note that this whole thing goes away with atomic.. since we can defer
- * calling into driver until rendering is done.
+/*
+ * flush updates, to make sure hw is updated to new scanout fb,
+ * so that we can safely queue unref to current fb (ie. next
+ * vblank we know hw is done w/ previous scanout_fb).
*/
-static void update_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb)
+static void crtc_flush_all(struct drm_crtc *crtc)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct drm_plane *plane;
+ uint32_t flush_mask = 0;
- /* flush updates, to make sure hw is updated to new scanout fb,
- * so that we can safely queue unref to current fb (ie. next
- * vblank we know hw is done w/ previous scanout_fb).
- */
- crtc_flush(crtc);
-
- if (mdp5_crtc->scanout_fb)
- drm_flip_work_queue(&mdp5_crtc->unref_fb_work,
- mdp5_crtc->scanout_fb);
+ /* we could have already released CTL in the disable path: */
+ if (!mdp5_crtc->ctl)
+ return;
- mdp5_crtc->scanout_fb = fb;
+ drm_atomic_crtc_for_each_plane(plane, crtc) {
+ flush_mask |= mdp5_plane_get_flush(plane);
+ }
+ flush_mask |= mdp5_ctl_get_flush(mdp5_crtc->ctl);
+ flush_mask |= mdp5_lm_get_flush(mdp5_crtc->lm);
- /* enable vblank to complete flip: */
- request_pending(crtc, PENDING_FLIP);
+ crtc_flush(crtc, flush_mask);
}
/* if file!=NULL, this is preclose potential cancel-flip path */
@@ -142,7 +106,8 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct drm_pending_vblank_event *event;
- unsigned long flags, i;
+ struct drm_plane *plane;
+ unsigned long flags;
spin_lock_irqsave(&dev->event_lock, flags);
event = mdp5_crtc->event;
@@ -153,50 +118,22 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
*/
if (!file || (event->base.file_priv == file)) {
mdp5_crtc->event = NULL;
+ DBG("%s: send event: %p", mdp5_crtc->name, event);
drm_send_vblank_event(dev, mdp5_crtc->id, event);
}
}
spin_unlock_irqrestore(&dev->event_lock, flags);
- for (i = 0; i < ARRAY_SIZE(mdp5_crtc->planes); i++) {
- struct drm_plane *plane = mdp5_crtc->planes[i];
- if (plane)
- mdp5_plane_complete_flip(plane);
+ drm_atomic_crtc_for_each_plane(plane, crtc) {
+ mdp5_plane_complete_flip(plane);
}
}
-static void pageflip_cb(struct msm_fence_cb *cb)
-{
- struct mdp5_crtc *mdp5_crtc =
- container_of(cb, struct mdp5_crtc, pageflip_cb);
- struct drm_crtc *crtc = &mdp5_crtc->base;
- struct drm_framebuffer *fb = mdp5_crtc->fb;
-
- if (!fb)
- return;
-
- drm_framebuffer_reference(fb);
- mdp5_plane_set_scanout(mdp5_crtc->plane, fb);
- update_scanout(crtc, fb);
-}
-
-static void unref_fb_worker(struct drm_flip_work *work, void *val)
-{
- struct mdp5_crtc *mdp5_crtc =
- container_of(work, struct mdp5_crtc, unref_fb_work);
- struct drm_device *dev = mdp5_crtc->base.dev;
-
- mutex_lock(&dev->mode_config.mutex);
- drm_framebuffer_unreference(val);
- mutex_unlock(&dev->mode_config.mutex);
-}
-
static void mdp5_crtc_destroy(struct drm_crtc *crtc)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
drm_crtc_cleanup(crtc);
- drm_flip_work_cleanup(&mdp5_crtc->unref_fb_work);
kfree(mdp5_crtc);
}
@@ -214,6 +151,8 @@ static void mdp5_crtc_dpms(struct drm_crtc *crtc, int mode)
mdp5_enable(mdp5_kms);
mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err);
} else {
+ /* set STAGE_UNUSED for all layers */
+ mdp5_ctl_blend(mdp5_crtc->ctl, mdp5_crtc->lm, 0x00000000);
mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err);
mdp5_disable(mdp5_kms);
}
@@ -228,54 +167,78 @@ static bool mdp5_crtc_mode_fixup(struct drm_crtc *crtc,
return true;
}
+/*
+ * blend_setup() - blend all the planes of a CRTC
+ *
+ * When border is enabled, the border color will ALWAYS be the base layer.
+ * Therefore, the first plane (private RGB pipe) will start at STAGE0.
+ * If disabled, the first plane starts at STAGE_BASE.
+ *
+ * Note:
+ * Border is not enabled here because the private plane is exactly
+ * the CRTC resolution.
+ */
static void blend_setup(struct drm_crtc *crtc)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct mdp5_kms *mdp5_kms = get_kms(crtc);
- int id = mdp5_crtc->id;
+ struct drm_plane *plane;
+ const struct mdp5_cfg_hw *hw_cfg;
+ uint32_t lm = mdp5_crtc->lm, blend_cfg = 0;
+ unsigned long flags;
+#define blender(stage) ((stage) - STAGE_BASE)
- /*
- * Hard-coded setup for now until I figure out how the
- * layer-mixer works
- */
+ hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
- /* LM[id]: */
- mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(id),
- MDP5_LM_BLEND_COLOR_OUT_STAGE0_FG_ALPHA);
- mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_OP_MODE(id, 0),
- MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
- MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL) |
- MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA);
- mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(id, 0), 0xff);
- mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(id, 0), 0x00);
-
- /* NOTE: seems that LM[n] and CTL[m], we do not need n==m.. but
- * we want to be setting CTL[m].LAYER[n]. Not sure what the
- * point of having CTL[m].LAYER[o] (for o!=n).. maybe that is
- * used when chaining up mixers for high resolution displays?
- */
+ spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
+
+ /* ctl could be released already when we are shutting down: */
+ if (!mdp5_crtc->ctl)
+ goto out;
- /* CTL[id]: */
- mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 0),
- MDP5_CTL_LAYER_REG_RGB0(STAGE0) |
- MDP5_CTL_LAYER_REG_BORDER_COLOR);
- mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 1), 0);
- mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 2), 0);
- mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 3), 0);
- mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 4), 0);
+ drm_atomic_crtc_for_each_plane(plane, crtc) {
+ enum mdp_mixer_stage_id stage =
+ to_mdp5_plane_state(plane->state)->stage;
+
+ /*
+ * Note: This cannot happen with current implementation but
+ * we need to check this condition once z property is added
+ */
+ BUG_ON(stage > hw_cfg->lm.nb_stages);
+
+ /* LM */
+ mdp5_write(mdp5_kms,
+ REG_MDP5_LM_BLEND_OP_MODE(lm, blender(stage)),
+ MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
+ MDP5_LM_BLEND_OP_MODE_BG_ALPHA(BG_CONST));
+ mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(lm,
+ blender(stage)), 0xff);
+ mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(lm,
+ blender(stage)), 0x00);
+ /* CTL */
+ blend_cfg |= mdp_ctl_blend_mask(mdp5_plane_pipe(plane), stage);
+ DBG("%s: blending pipe %s on stage=%d", mdp5_crtc->name,
+ pipe2name(mdp5_plane_pipe(plane)), stage);
+ }
+
+ DBG("%s: lm%d: blend config = 0x%08x", mdp5_crtc->name, lm, blend_cfg);
+ mdp5_ctl_blend(mdp5_crtc->ctl, lm, blend_cfg);
+
+out:
+ spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
}
-static int mdp5_crtc_mode_set(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode,
- int x, int y,
- struct drm_framebuffer *old_fb)
+static void mdp5_crtc_mode_set_nofb(struct drm_crtc *crtc)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct mdp5_kms *mdp5_kms = get_kms(crtc);
- int ret;
+ unsigned long flags;
+ struct drm_display_mode *mode;
- mode = adjusted_mode;
+ if (WARN_ON(!crtc->state))
+ return;
+
+ mode = &crtc->state->adjusted_mode;
DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
mdp5_crtc->name, mode->base.id, mode->name,
@@ -286,28 +249,11 @@ static int mdp5_crtc_mode_set(struct drm_crtc *crtc,
mode->vsync_end, mode->vtotal,
mode->type, mode->flags);
- /* grab extra ref for update_scanout() */
- drm_framebuffer_reference(crtc->primary->fb);
-
- ret = mdp5_plane_mode_set(mdp5_crtc->plane, crtc, crtc->primary->fb,
- 0, 0, mode->hdisplay, mode->vdisplay,
- x << 16, y << 16,
- mode->hdisplay << 16, mode->vdisplay << 16);
- if (ret) {
- drm_framebuffer_unreference(crtc->primary->fb);
- dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n",
- mdp5_crtc->name, ret);
- return ret;
- }
-
- mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(mdp5_crtc->id),
+ spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
+ mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(mdp5_crtc->lm),
MDP5_LM_OUT_SIZE_WIDTH(mode->hdisplay) |
MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay));
-
- update_fb(crtc, crtc->primary->fb);
- update_scanout(crtc, crtc->primary->fb);
-
- return 0;
+ spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
}
static void mdp5_crtc_prepare(struct drm_crtc *crtc)
@@ -321,66 +267,119 @@ static void mdp5_crtc_prepare(struct drm_crtc *crtc)
static void mdp5_crtc_commit(struct drm_crtc *crtc)
{
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ DBG("%s", mdp5_crtc->name);
mdp5_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
- crtc_flush(crtc);
+ crtc_flush_all(crtc);
/* drop the ref to mdp clk's that we got in prepare: */
mdp5_disable(get_kms(crtc));
}
-static int mdp5_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
- struct drm_framebuffer *old_fb)
+static void mdp5_crtc_load_lut(struct drm_crtc *crtc)
+{
+}
+
+struct plane_state {
+ struct drm_plane *plane;
+ struct mdp5_plane_state *state;
+};
+
+static int pstate_cmp(const void *a, const void *b)
+{
+ struct plane_state *pa = (struct plane_state *)a;
+ struct plane_state *pb = (struct plane_state *)b;
+ return pa->state->zpos - pb->state->zpos;
+}
+
+static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
- struct drm_plane *plane = mdp5_crtc->plane;
- struct drm_display_mode *mode = &crtc->mode;
- int ret;
-
- /* grab extra ref for update_scanout() */
- drm_framebuffer_reference(crtc->primary->fb);
-
- ret = mdp5_plane_mode_set(plane, crtc, crtc->primary->fb,
- 0, 0, mode->hdisplay, mode->vdisplay,
- x << 16, y << 16,
- mode->hdisplay << 16, mode->vdisplay << 16);
- if (ret) {
- drm_framebuffer_unreference(crtc->primary->fb);
- return ret;
+ struct mdp5_kms *mdp5_kms = get_kms(crtc);
+ struct drm_plane *plane;
+ struct drm_device *dev = crtc->dev;
+ struct plane_state pstates[STAGE3 + 1];
+ int cnt = 0, i;
+
+ DBG("%s: check", mdp5_crtc->name);
+
+ if (mdp5_crtc->event) {
+ dev_err(dev->dev, "already pending flip!\n");
+ return -EBUSY;
}
- update_fb(crtc, crtc->primary->fb);
- update_scanout(crtc, crtc->primary->fb);
+ /* request a free CTL, if none is already allocated for this CRTC */
+ if (state->enable && !mdp5_crtc->ctl) {
+ mdp5_crtc->ctl = mdp5_ctlm_request(mdp5_kms->ctlm, crtc);
+ if (WARN_ON(!mdp5_crtc->ctl))
+ return -EINVAL;
+ }
+
+ /* verify that there are not too many planes attached to crtc
+ * and that we don't have conflicting mixer stages:
+ */
+ drm_atomic_crtc_state_for_each_plane(plane, state) {
+ struct drm_plane_state *pstate;
+
+ if (cnt >= ARRAY_SIZE(pstates)) {
+ dev_err(dev->dev, "too many planes!\n");
+ return -EINVAL;
+ }
+
+ pstate = state->state->plane_states[drm_plane_index(plane)];
+
+ /* plane might not have changed, in which case take
+ * current state:
+ */
+ if (!pstate)
+ pstate = plane->state;
+
+ pstates[cnt].plane = plane;
+ pstates[cnt].state = to_mdp5_plane_state(pstate);
+
+ cnt++;
+ }
+
+ sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL);
+
+ for (i = 0; i < cnt; i++) {
+ pstates[i].state->stage = STAGE_BASE + i;
+ DBG("%s: assign pipe %s on stage=%d", mdp5_crtc->name,
+ pipe2name(mdp5_plane_pipe(pstates[i].plane)),
+ pstates[i].state->stage);
+ }
return 0;
}
-static void mdp5_crtc_load_lut(struct drm_crtc *crtc)
+static void mdp5_crtc_atomic_begin(struct drm_crtc *crtc)
{
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ DBG("%s: begin", mdp5_crtc->name);
}
-static int mdp5_crtc_page_flip(struct drm_crtc *crtc,
- struct drm_framebuffer *new_fb,
- struct drm_pending_vblank_event *event,
- uint32_t page_flip_flags)
+static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct drm_gem_object *obj;
unsigned long flags;
- if (mdp5_crtc->event) {
- dev_err(dev->dev, "already pending flip!\n");
- return -EBUSY;
- }
+ DBG("%s: flush", mdp5_crtc->name);
- obj = msm_framebuffer_bo(new_fb, 0);
+ WARN_ON(mdp5_crtc->event);
spin_lock_irqsave(&dev->event_lock, flags);
- mdp5_crtc->event = event;
+ mdp5_crtc->event = crtc->state->event;
spin_unlock_irqrestore(&dev->event_lock, flags);
- update_fb(crtc, new_fb);
+ blend_setup(crtc);
+ crtc_flush_all(crtc);
+ request_pending(crtc, PENDING_FLIP);
- return msm_gem_queue_inactive_cb(obj, &mdp5_crtc->pageflip_cb);
+ if (mdp5_crtc->ctl && !crtc->state->enable) {
+ mdp5_ctl_release(mdp5_crtc->ctl);
+ mdp5_crtc->ctl = NULL;
+ }
}
static int mdp5_crtc_set_property(struct drm_crtc *crtc,
@@ -391,27 +390,33 @@ static int mdp5_crtc_set_property(struct drm_crtc *crtc,
}
static const struct drm_crtc_funcs mdp5_crtc_funcs = {
- .set_config = drm_crtc_helper_set_config,
+ .set_config = drm_atomic_helper_set_config,
.destroy = mdp5_crtc_destroy,
- .page_flip = mdp5_crtc_page_flip,
+ .page_flip = drm_atomic_helper_page_flip,
.set_property = mdp5_crtc_set_property,
+ .reset = drm_atomic_helper_crtc_reset,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
};
static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {
.dpms = mdp5_crtc_dpms,
.mode_fixup = mdp5_crtc_mode_fixup,
- .mode_set = mdp5_crtc_mode_set,
+ .mode_set_nofb = mdp5_crtc_mode_set_nofb,
+ .mode_set = drm_helper_crtc_mode_set,
+ .mode_set_base = drm_helper_crtc_mode_set_base,
.prepare = mdp5_crtc_prepare,
.commit = mdp5_crtc_commit,
- .mode_set_base = mdp5_crtc_mode_set_base,
.load_lut = mdp5_crtc_load_lut,
+ .atomic_check = mdp5_crtc_atomic_check,
+ .atomic_begin = mdp5_crtc_atomic_begin,
+ .atomic_flush = mdp5_crtc_atomic_flush,
};
static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
{
struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, vblank);
struct drm_crtc *crtc = &mdp5_crtc->base;
- struct msm_drm_private *priv = crtc->dev->dev_private;
unsigned pending;
mdp_irq_unregister(&get_kms(crtc)->base, &mdp5_crtc->vblank);
@@ -420,16 +425,14 @@ static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
if (pending & PENDING_FLIP) {
complete_flip(crtc, NULL);
- drm_flip_work_commit(&mdp5_crtc->unref_fb_work, priv->wq);
}
}
static void mdp5_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus)
{
struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, err);
- struct drm_crtc *crtc = &mdp5_crtc->base;
+
DBG("%s: error: %08x", mdp5_crtc->name, irqstatus);
- crtc_flush(crtc);
}
uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc)
@@ -450,10 +453,9 @@ void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf,
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct mdp5_kms *mdp5_kms = get_kms(crtc);
- static const enum mdp5_intfnum intfnum[] = {
- INTF0, INTF1, INTF2, INTF3,
- };
+ uint32_t flush_mask = 0;
uint32_t intf_sel;
+ unsigned long flags;
/* now that we know what irq's we want: */
mdp5_crtc->err.irqmask = intf2err(intf);
@@ -463,6 +465,7 @@ void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf,
if (!mdp5_kms)
return;
+ spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
intf_sel = mdp5_read(mdp5_kms, REG_MDP5_DISP_INTF_SEL);
switch (intf) {
@@ -487,45 +490,25 @@ void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf,
break;
}
- blend_setup(crtc);
+ mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, intf_sel);
+ spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags);
DBG("%s: intf_sel=%08x", mdp5_crtc->name, intf_sel);
+ mdp5_ctl_set_intf(mdp5_crtc->ctl, intf);
+ flush_mask |= mdp5_ctl_get_flush(mdp5_crtc->ctl);
+ flush_mask |= mdp5_lm_get_flush(mdp5_crtc->lm);
- mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, intf_sel);
- mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(mdp5_crtc->id),
- MDP5_CTL_OP_MODE(MODE_NONE) |
- MDP5_CTL_OP_INTF_NUM(intfnum[intf]));
-
- crtc_flush(crtc);
+ crtc_flush(crtc, flush_mask);
}
-static void set_attach(struct drm_crtc *crtc, enum mdp5_pipe pipe_id,
- struct drm_plane *plane)
+int mdp5_crtc_get_lm(struct drm_crtc *crtc)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
- BUG_ON(pipe_id >= ARRAY_SIZE(mdp5_crtc->planes));
+ if (WARN_ON(!crtc))
+ return -EINVAL;
- if (mdp5_crtc->planes[pipe_id] == plane)
- return;
-
- mdp5_crtc->planes[pipe_id] = plane;
- blend_setup(crtc);
- if (mdp5_crtc->enabled && (plane != mdp5_crtc->plane))
- crtc_flush(crtc);
-}
-
-void mdp5_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane)
-{
- set_attach(crtc, mdp5_plane_pipe(plane), plane);
-}
-
-void mdp5_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane)
-{
- /* don't actually detatch our primary plane: */
- if (to_mdp5_crtc(crtc)->plane == plane)
- return;
- set_attach(crtc, mdp5_plane_pipe(plane), NULL);
+ return mdp5_crtc->lm;
}
/* initialize crtc */
@@ -534,18 +517,17 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
{
struct drm_crtc *crtc = NULL;
struct mdp5_crtc *mdp5_crtc;
- int ret;
mdp5_crtc = kzalloc(sizeof(*mdp5_crtc), GFP_KERNEL);
- if (!mdp5_crtc) {
- ret = -ENOMEM;
- goto fail;
- }
+ if (!mdp5_crtc)
+ return ERR_PTR(-ENOMEM);
crtc = &mdp5_crtc->base;
- mdp5_crtc->plane = plane;
mdp5_crtc->id = id;
+ mdp5_crtc->lm = GET_LM_ID(id);
+
+ spin_lock_init(&mdp5_crtc->lm_lock);
mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq;
mdp5_crtc->err.irq = mdp5_crtc_err_irq;
@@ -553,23 +535,11 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
snprintf(mdp5_crtc->name, sizeof(mdp5_crtc->name), "%s:%d",
pipe2name(mdp5_plane_pipe(plane)), id);
- ret = drm_flip_work_init(&mdp5_crtc->unref_fb_work, 16,
- "unref fb", unref_fb_worker);
- if (ret)
- goto fail;
-
- INIT_FENCE_CB(&mdp5_crtc->pageflip_cb, pageflip_cb);
-
drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp5_crtc_funcs);
drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs);
+ plane->crtc = crtc;
- mdp5_plane_install_properties(mdp5_crtc->plane, &crtc->base);
+ mdp5_plane_install_properties(plane, &crtc->base);
return crtc;
-
-fail:
- if (crtc)
- mdp5_crtc_destroy(crtc);
-
- return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
new file mode 100644
index 00000000000..dea4505ac96
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
@@ -0,0 +1,322 @@
+/*
+ * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "mdp5_kms.h"
+#include "mdp5_ctl.h"
+
+/*
+ * CTL - MDP Control Pool Manager
+ *
+ * Controls are shared between all CRTCs.
+ *
+ * They are intended to be used for data path configuration.
+ * The top level register programming describes the complete data path for
+ * a specific data path ID - REG_MDP5_CTL_*(<id>, ...)
+ *
+ * Hardware capabilities determine the number of concurrent data paths
+ *
+ * In certain use cases (high-resolution dual pipe), one single CTL can be
+ * shared across multiple CRTCs.
+ *
+ * Because the number of CTLs can be less than the number of CRTCs,
+ * CTLs are dynamically allocated from a pool of CTLs, only once a CRTC is
+ * requested by the client (in mdp5_crtc_mode_set()).
+ */
+
+struct mdp5_ctl {
+ struct mdp5_ctl_manager *ctlm;
+
+ u32 id;
+
+ /* whether this CTL has been allocated or not: */
+ bool busy;
+
+ /* memory output connection (@see mdp5_ctl_mode): */
+ u32 mode;
+
+ /* REG_MDP5_CTL_*(<id>) registers access info + lock: */
+ spinlock_t hw_lock;
+ u32 reg_offset;
+
+ /* flush mask used to commit CTL registers */
+ u32 flush_mask;
+
+ bool cursor_on;
+
+ struct drm_crtc *crtc;
+};
+
+struct mdp5_ctl_manager {
+ struct drm_device *dev;
+
+ /* number of CTL / Layer Mixers in this hw config: */
+ u32 nlm;
+ u32 nctl;
+
+ /* pool of CTLs + lock to protect resource allocation (ctls[i].busy) */
+ spinlock_t pool_lock;
+ struct mdp5_ctl ctls[MAX_CTL];
+};
+
+static inline
+struct mdp5_kms *get_kms(struct mdp5_ctl_manager *ctl_mgr)
+{
+ struct msm_drm_private *priv = ctl_mgr->dev->dev_private;
+
+ return to_mdp5_kms(to_mdp_kms(priv->kms));
+}
+
+static inline
+void ctl_write(struct mdp5_ctl *ctl, u32 reg, u32 data)
+{
+ struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm);
+
+ (void)ctl->reg_offset; /* TODO use this instead of mdp5_write */
+ mdp5_write(mdp5_kms, reg, data);
+}
+
+static inline
+u32 ctl_read(struct mdp5_ctl *ctl, u32 reg)
+{
+ struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm);
+
+ (void)ctl->reg_offset; /* TODO use this instead of mdp5_write */
+ return mdp5_read(mdp5_kms, reg);
+}
+
+
+int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, enum mdp5_intf intf)
+{
+ unsigned long flags;
+ static const enum mdp5_intfnum intfnum[] = {
+ INTF0, INTF1, INTF2, INTF3,
+ };
+
+ spin_lock_irqsave(&ctl->hw_lock, flags);
+ ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id),
+ MDP5_CTL_OP_MODE(ctl->mode) |
+ MDP5_CTL_OP_INTF_NUM(intfnum[intf]));
+ spin_unlock_irqrestore(&ctl->hw_lock, flags);
+
+ return 0;
+}
+
+int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, bool enable)
+{
+ struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
+ unsigned long flags;
+ u32 blend_cfg;
+ int lm;
+
+ lm = mdp5_crtc_get_lm(ctl->crtc);
+ if (unlikely(WARN_ON(lm < 0))) {
+ dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM: %d",
+ ctl->id, lm);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&ctl->hw_lock, flags);
+
+ blend_cfg = ctl_read(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm));
+
+ if (enable)
+ blend_cfg |= MDP5_CTL_LAYER_REG_CURSOR_OUT;
+ else
+ blend_cfg &= ~MDP5_CTL_LAYER_REG_CURSOR_OUT;
+
+ ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm), blend_cfg);
+
+ spin_unlock_irqrestore(&ctl->hw_lock, flags);
+
+ ctl->cursor_on = enable;
+
+ return 0;
+}
+
+
+int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u32 blend_cfg)
+{
+ unsigned long flags;
+
+ if (ctl->cursor_on)
+ blend_cfg |= MDP5_CTL_LAYER_REG_CURSOR_OUT;
+ else
+ blend_cfg &= ~MDP5_CTL_LAYER_REG_CURSOR_OUT;
+
+ spin_lock_irqsave(&ctl->hw_lock, flags);
+ ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm), blend_cfg);
+ spin_unlock_irqrestore(&ctl->hw_lock, flags);
+
+ return 0;
+}
+
+int mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask)
+{
+ struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
+ unsigned long flags;
+
+ if (flush_mask & MDP5_CTL_FLUSH_CURSOR_DUMMY) {
+ int lm = mdp5_crtc_get_lm(ctl->crtc);
+
+ if (unlikely(WARN_ON(lm < 0))) {
+ dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM: %d",
+ ctl->id, lm);
+ return -EINVAL;
+ }
+
+ /* for current targets, cursor bit is the same as LM bit */
+ flush_mask |= mdp_ctl_flush_mask_lm(lm);
+ }
+
+ spin_lock_irqsave(&ctl->hw_lock, flags);
+ ctl_write(ctl, REG_MDP5_CTL_FLUSH(ctl->id), flush_mask);
+ spin_unlock_irqrestore(&ctl->hw_lock, flags);
+
+ return 0;
+}
+
+u32 mdp5_ctl_get_flush(struct mdp5_ctl *ctl)
+{
+ return ctl->flush_mask;
+}
+
+void mdp5_ctl_release(struct mdp5_ctl *ctl)
+{
+ struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
+ unsigned long flags;
+
+ if (unlikely(WARN_ON(ctl->id >= MAX_CTL) || !ctl->busy)) {
+ dev_err(ctl_mgr->dev->dev, "CTL %d in bad state (%d)",
+ ctl->id, ctl->busy);
+ return;
+ }
+
+ spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
+ ctl->busy = false;
+ spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
+
+ DBG("CTL %d released", ctl->id);
+}
+
+/*
+ * mdp5_ctl_request() - CTL dynamic allocation
+ *
+ * Note: Current implementation considers that we can only have one CRTC per CTL
+ *
+ * @return first free CTL
+ */
+struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctl_mgr,
+ struct drm_crtc *crtc)
+{
+ struct mdp5_ctl *ctl = NULL;
+ unsigned long flags;
+ int c;
+
+ spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
+
+ for (c = 0; c < ctl_mgr->nctl; c++)
+ if (!ctl_mgr->ctls[c].busy)
+ break;
+
+ if (unlikely(c >= ctl_mgr->nctl)) {
+ dev_err(ctl_mgr->dev->dev, "No more CTL available!");
+ goto unlock;
+ }
+
+ ctl = &ctl_mgr->ctls[c];
+
+ ctl->crtc = crtc;
+ ctl->busy = true;
+ DBG("CTL %d allocated", ctl->id);
+
+unlock:
+ spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
+ return ctl;
+}
+
+void mdp5_ctlm_hw_reset(struct mdp5_ctl_manager *ctl_mgr)
+{
+ unsigned long flags;
+ int c;
+
+ for (c = 0; c < ctl_mgr->nctl; c++) {
+ struct mdp5_ctl *ctl = &ctl_mgr->ctls[c];
+
+ spin_lock_irqsave(&ctl->hw_lock, flags);
+ ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id), 0);
+ spin_unlock_irqrestore(&ctl->hw_lock, flags);
+ }
+}
+
+void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctl_mgr)
+{
+ kfree(ctl_mgr);
+}
+
+struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
+ void __iomem *mmio_base, const struct mdp5_cfg_hw *hw_cfg)
+{
+ struct mdp5_ctl_manager *ctl_mgr;
+ const struct mdp5_sub_block *ctl_cfg = &hw_cfg->ctl;
+ unsigned long flags;
+ int c, ret;
+
+ ctl_mgr = kzalloc(sizeof(*ctl_mgr), GFP_KERNEL);
+ if (!ctl_mgr) {
+ dev_err(dev->dev, "failed to allocate CTL manager\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ if (unlikely(WARN_ON(ctl_cfg->count > MAX_CTL))) {
+ dev_err(dev->dev, "Increase static pool size to at least %d\n",
+ ctl_cfg->count);
+ ret = -ENOSPC;
+ goto fail;
+ }
+
+ /* initialize the CTL manager: */
+ ctl_mgr->dev = dev;
+ ctl_mgr->nlm = hw_cfg->lm.count;
+ ctl_mgr->nctl = ctl_cfg->count;
+ spin_lock_init(&ctl_mgr->pool_lock);
+
+ /* initialize each CTL of the pool: */
+ spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
+ for (c = 0; c < ctl_mgr->nctl; c++) {
+ struct mdp5_ctl *ctl = &ctl_mgr->ctls[c];
+
+ if (WARN_ON(!ctl_cfg->base[c])) {
+ dev_err(dev->dev, "CTL_%d: base is null!\n", c);
+ ret = -EINVAL;
+ goto fail;
+ }
+ ctl->ctlm = ctl_mgr;
+ ctl->id = c;
+ ctl->mode = MODE_NONE;
+ ctl->reg_offset = ctl_cfg->base[c];
+ ctl->flush_mask = MDP5_CTL_FLUSH_CTL;
+ ctl->busy = false;
+ spin_lock_init(&ctl->hw_lock);
+ }
+ spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
+ DBG("Pool of %d CTLs created.", ctl_mgr->nctl);
+
+ return ctl_mgr;
+
+fail:
+ if (ctl_mgr)
+ mdp5_ctlm_destroy(ctl_mgr);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h
new file mode 100644
index 00000000000..1018519b6af
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MDP5_CTL_H__
+#define __MDP5_CTL_H__
+
+#include "msm_drv.h"
+
+/*
+ * CTL Manager prototypes:
+ * mdp5_ctlm_init() returns a ctlm (CTL Manager) handler,
+ * which is then used to call the other mdp5_ctlm_*(ctlm, ...) functions.
+ */
+struct mdp5_ctl_manager;
+struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
+ void __iomem *mmio_base, const struct mdp5_cfg_hw *hw_cfg);
+void mdp5_ctlm_hw_reset(struct mdp5_ctl_manager *ctlm);
+void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctlm);
+
+/*
+ * CTL prototypes:
+ * mdp5_ctl_request(ctlm, ...) returns a ctl (CTL resource) handler,
+ * which is then used to call the other mdp5_ctl_*(ctl, ...) functions.
+ */
+struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctlm, struct drm_crtc *crtc);
+
+int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, enum mdp5_intf intf);
+
+int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, bool enable);
+
+/* @blend_cfg: see LM blender config definition below */
+int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u32 blend_cfg);
+
+/* @flush_mask: see CTL flush masks definitions below */
+int mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask);
+u32 mdp5_ctl_get_flush(struct mdp5_ctl *ctl);
+
+void mdp5_ctl_release(struct mdp5_ctl *ctl);
+
+/*
+ * blend_cfg (LM blender config):
+ *
+ * The function below allows the caller of mdp5_ctl_blend() to specify how pipes
+ * are being blended according to their stage (z-order), through @blend_cfg arg.
+ */
+static inline u32 mdp_ctl_blend_mask(enum mdp5_pipe pipe,
+ enum mdp_mixer_stage_id stage)
+{
+ switch (pipe) {
+ case SSPP_VIG0: return MDP5_CTL_LAYER_REG_VIG0(stage);
+ case SSPP_VIG1: return MDP5_CTL_LAYER_REG_VIG1(stage);
+ case SSPP_VIG2: return MDP5_CTL_LAYER_REG_VIG2(stage);
+ case SSPP_RGB0: return MDP5_CTL_LAYER_REG_RGB0(stage);
+ case SSPP_RGB1: return MDP5_CTL_LAYER_REG_RGB1(stage);
+ case SSPP_RGB2: return MDP5_CTL_LAYER_REG_RGB2(stage);
+ case SSPP_DMA0: return MDP5_CTL_LAYER_REG_DMA0(stage);
+ case SSPP_DMA1: return MDP5_CTL_LAYER_REG_DMA1(stage);
+ case SSPP_VIG3: return MDP5_CTL_LAYER_REG_VIG3(stage);
+ case SSPP_RGB3: return MDP5_CTL_LAYER_REG_RGB3(stage);
+ default: return 0;
+ }
+}
+
+/*
+ * flush_mask (CTL flush masks):
+ *
+ * The following functions allow each DRM entity to get and store
+ * their own flush mask.
+ * Once stored, these masks will then be accessed through each DRM's
+ * interface and used by the caller of mdp5_ctl_commit() to specify
+ * which block(s) need to be flushed through @flush_mask parameter.
+ */
+
+#define MDP5_CTL_FLUSH_CURSOR_DUMMY 0x80000000
+
+static inline u32 mdp_ctl_flush_mask_cursor(int cursor_id)
+{
+ /* TODO: use id once multiple cursor support is present */
+ (void)cursor_id;
+
+ return MDP5_CTL_FLUSH_CURSOR_DUMMY;
+}
+
+static inline u32 mdp_ctl_flush_mask_lm(int lm)
+{
+ switch (lm) {
+ case 0: return MDP5_CTL_FLUSH_LM0;
+ case 1: return MDP5_CTL_FLUSH_LM1;
+ case 2: return MDP5_CTL_FLUSH_LM2;
+ case 5: return MDP5_CTL_FLUSH_LM5;
+ default: return 0;
+ }
+}
+
+static inline u32 mdp_ctl_flush_mask_pipe(enum mdp5_pipe pipe)
+{
+ switch (pipe) {
+ case SSPP_VIG0: return MDP5_CTL_FLUSH_VIG0;
+ case SSPP_VIG1: return MDP5_CTL_FLUSH_VIG1;
+ case SSPP_VIG2: return MDP5_CTL_FLUSH_VIG2;
+ case SSPP_RGB0: return MDP5_CTL_FLUSH_RGB0;
+ case SSPP_RGB1: return MDP5_CTL_FLUSH_RGB1;
+ case SSPP_RGB2: return MDP5_CTL_FLUSH_RGB2;
+ case SSPP_DMA0: return MDP5_CTL_FLUSH_DMA0;
+ case SSPP_DMA1: return MDP5_CTL_FLUSH_DMA1;
+ case SSPP_VIG3: return MDP5_CTL_FLUSH_VIG3;
+ case SSPP_RGB3: return MDP5_CTL_FLUSH_RGB3;
+ default: return 0;
+ }
+}
+
+#endif /* __MDP5_CTL_H__ */
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
index edec7bfaa95..0254bfdeb92 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
@@ -24,6 +24,7 @@ struct mdp5_encoder {
struct drm_encoder base;
int intf;
enum mdp5_intf intf_id;
+ spinlock_t intf_lock; /* protect REG_MDP5_INTF_* registers */
bool enabled;
uint32_t bsc;
};
@@ -115,6 +116,7 @@ static void mdp5_encoder_dpms(struct drm_encoder *encoder, int mode)
struct mdp5_kms *mdp5_kms = get_kms(encoder);
int intf = mdp5_encoder->intf;
bool enabled = (mode == DRM_MODE_DPMS_ON);
+ unsigned long flags;
DBG("mode=%d", mode);
@@ -123,9 +125,24 @@ static void mdp5_encoder_dpms(struct drm_encoder *encoder, int mode)
if (enabled) {
bs_set(mdp5_encoder, 1);
+ spin_lock_irqsave(&mdp5_encoder->intf_lock, flags);
mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 1);
+ spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
} else {
+ spin_lock_irqsave(&mdp5_encoder->intf_lock, flags);
mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 0);
+ spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
+
+ /*
+ * Wait for a vsync so we know the ENABLE=0 latched before
+ * the (connector) source of the vsync's gets disabled,
+ * otherwise we end up in a funny state if we re-enable
+ * before the disable latches, which results that some of
+ * the settings changes for the new modeset (like new
+ * scanout buffer) don't latch properly..
+ */
+ mdp_irq_wait(&mdp5_kms->base, intf2vblank(intf));
+
bs_set(mdp5_encoder, 0);
}
@@ -150,6 +167,7 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder,
uint32_t display_v_start, display_v_end;
uint32_t hsync_start_x, hsync_end_x;
uint32_t format;
+ unsigned long flags;
mode = adjusted_mode;
@@ -180,6 +198,8 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder,
display_v_start = (mode->vtotal - mode->vsync_start) * mode->htotal + dtv_hsync_skew;
display_v_end = vsync_period - ((mode->vsync_start - mode->vdisplay) * mode->htotal) + dtv_hsync_skew - 1;
+ spin_lock_irqsave(&mdp5_encoder->intf_lock, flags);
+
mdp5_write(mdp5_kms, REG_MDP5_INTF_HSYNC_CTL(intf),
MDP5_INTF_HSYNC_CTL_PULSEW(mode->hsync_end - mode->hsync_start) |
MDP5_INTF_HSYNC_CTL_PERIOD(mode->htotal));
@@ -201,6 +221,8 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder,
mdp5_write(mdp5_kms, REG_MDP5_INTF_ACTIVE_VEND_F0(intf), 0);
mdp5_write(mdp5_kms, REG_MDP5_INTF_PANEL_FORMAT(intf), format);
mdp5_write(mdp5_kms, REG_MDP5_INTF_FRAME_LINE_COUNT_EN(intf), 0x3); /* frame+line? */
+
+ spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
}
static void mdp5_encoder_prepare(struct drm_encoder *encoder)
@@ -242,6 +264,8 @@ struct drm_encoder *mdp5_encoder_init(struct drm_device *dev, int intf,
mdp5_encoder->intf_id = intf_id;
encoder = &mdp5_encoder->base;
+ spin_lock_init(&mdp5_encoder->intf_lock);
+
drm_encoder_init(dev, encoder, &mdp5_encoder_funcs,
DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(encoder, &mdp5_encoder_helper_funcs);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
index f2b985bc2ad..70ac81edd40 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
@@ -15,6 +15,8 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/irqdomain.h>
+#include <linux/irq.h>
#include "msm_drv.h"
#include "mdp5_kms.h"
@@ -88,11 +90,17 @@ irqreturn_t mdp5_irq(struct msm_kms *kms)
VERB("intr=%08x", intr);
- if (intr & MDP5_HW_INTR_STATUS_INTR_MDP)
+ if (intr & MDP5_HW_INTR_STATUS_INTR_MDP) {
mdp5_irq_mdp(mdp_kms);
+ intr &= ~MDP5_HW_INTR_STATUS_INTR_MDP;
+ }
- if (intr & MDP5_HW_INTR_STATUS_INTR_HDMI)
- hdmi_irq(0, mdp5_kms->hdmi);
+ while (intr) {
+ irq_hw_number_t hwirq = fls(intr) - 1;
+ generic_handle_irq(irq_find_mapping(
+ mdp5_kms->irqcontroller.domain, hwirq));
+ intr &= ~(1 << hwirq);
+ }
return IRQ_HANDLED;
}
@@ -109,3 +117,82 @@ void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
mdp_update_vblank_mask(to_mdp_kms(kms),
mdp5_crtc_vblank(crtc), false);
}
+
+/*
+ * interrupt-controller implementation, so sub-blocks (hdmi/eDP/dsi/etc)
+ * can register to get their irq's delivered
+ */
+
+#define VALID_IRQS (MDP5_HW_INTR_STATUS_INTR_DSI0 | \
+ MDP5_HW_INTR_STATUS_INTR_DSI1 | \
+ MDP5_HW_INTR_STATUS_INTR_HDMI | \
+ MDP5_HW_INTR_STATUS_INTR_EDP)
+
+static void mdp5_hw_mask_irq(struct irq_data *irqd)
+{
+ struct mdp5_kms *mdp5_kms = irq_data_get_irq_chip_data(irqd);
+ smp_mb__before_atomic();
+ clear_bit(irqd->hwirq, &mdp5_kms->irqcontroller.enabled_mask);
+ smp_mb__after_atomic();
+}
+
+static void mdp5_hw_unmask_irq(struct irq_data *irqd)
+{
+ struct mdp5_kms *mdp5_kms = irq_data_get_irq_chip_data(irqd);
+ smp_mb__before_atomic();
+ set_bit(irqd->hwirq, &mdp5_kms->irqcontroller.enabled_mask);
+ smp_mb__after_atomic();
+}
+
+static struct irq_chip mdp5_hw_irq_chip = {
+ .name = "mdp5",
+ .irq_mask = mdp5_hw_mask_irq,
+ .irq_unmask = mdp5_hw_unmask_irq,
+};
+
+static int mdp5_hw_irqdomain_map(struct irq_domain *d,
+ unsigned int irq, irq_hw_number_t hwirq)
+{
+ struct mdp5_kms *mdp5_kms = d->host_data;
+
+ if (!(VALID_IRQS & (1 << hwirq)))
+ return -EPERM;
+
+ irq_set_chip_and_handler(irq, &mdp5_hw_irq_chip, handle_level_irq);
+ irq_set_chip_data(irq, mdp5_kms);
+ set_irq_flags(irq, IRQF_VALID);
+
+ return 0;
+}
+
+static struct irq_domain_ops mdp5_hw_irqdomain_ops = {
+ .map = mdp5_hw_irqdomain_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+
+int mdp5_irq_domain_init(struct mdp5_kms *mdp5_kms)
+{
+ struct device *dev = mdp5_kms->dev->dev;
+ struct irq_domain *d;
+
+ d = irq_domain_add_linear(dev->of_node, 32,
+ &mdp5_hw_irqdomain_ops, mdp5_kms);
+ if (!d) {
+ dev_err(dev, "mdp5 irq domain add failed\n");
+ return -ENXIO;
+ }
+
+ mdp5_kms->irqcontroller.enabled_mask = 0;
+ mdp5_kms->irqcontroller.domain = d;
+
+ return 0;
+}
+
+void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms)
+{
+ if (mdp5_kms->irqcontroller.domain) {
+ irq_domain_remove(mdp5_kms->irqcontroller.domain);
+ mdp5_kms->irqcontroller.domain = NULL;
+ }
+}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index 31a2c6331a1..a11f1b80c48 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -1,4 +1,5 @@
/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@@ -24,145 +25,11 @@ static const char *iommu_ports[] = {
"mdp_0",
};
-static struct mdp5_platform_config *mdp5_get_config(struct platform_device *dev);
-
-const struct mdp5_config *mdp5_cfg;
-
-static const struct mdp5_config msm8x74_config = {
- .name = "msm8x74",
- .ctl = {
- .count = 5,
- .base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 },
- },
- .pipe_vig = {
- .count = 3,
- .base = { 0x01200, 0x01600, 0x01a00 },
- },
- .pipe_rgb = {
- .count = 3,
- .base = { 0x01e00, 0x02200, 0x02600 },
- },
- .pipe_dma = {
- .count = 2,
- .base = { 0x02a00, 0x02e00 },
- },
- .lm = {
- .count = 5,
- .base = { 0x03200, 0x03600, 0x03a00, 0x03e00, 0x04200 },
- },
- .dspp = {
- .count = 3,
- .base = { 0x04600, 0x04a00, 0x04e00 },
- },
- .ad = {
- .count = 2,
- .base = { 0x13100, 0x13300 }, /* NOTE: no ad in v1.0 */
- },
- .intf = {
- .count = 4,
- .base = { 0x12500, 0x12700, 0x12900, 0x12b00 },
- },
-};
-
-static const struct mdp5_config apq8084_config = {
- .name = "apq8084",
- .ctl = {
- .count = 5,
- .base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 },
- },
- .pipe_vig = {
- .count = 4,
- .base = { 0x01200, 0x01600, 0x01a00, 0x01e00 },
- },
- .pipe_rgb = {
- .count = 4,
- .base = { 0x02200, 0x02600, 0x02a00, 0x02e00 },
- },
- .pipe_dma = {
- .count = 2,
- .base = { 0x03200, 0x03600 },
- },
- .lm = {
- .count = 6,
- .base = { 0x03a00, 0x03e00, 0x04200, 0x04600, 0x04a00, 0x04e00 },
- },
- .dspp = {
- .count = 4,
- .base = { 0x05200, 0x05600, 0x05a00, 0x05e00 },
-
- },
- .ad = {
- .count = 3,
- .base = { 0x13500, 0x13700, 0x13900 },
- },
- .intf = {
- .count = 5,
- .base = { 0x12500, 0x12700, 0x12900, 0x12b00, 0x12d00 },
- },
-};
-
-struct mdp5_config_entry {
- int revision;
- const struct mdp5_config *config;
-};
-
-static const struct mdp5_config_entry mdp5_configs[] = {
- { .revision = 0, .config = &msm8x74_config },
- { .revision = 2, .config = &msm8x74_config },
- { .revision = 3, .config = &apq8084_config },
-};
-
-static int mdp5_select_hw_cfg(struct msm_kms *kms)
-{
- struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
- struct drm_device *dev = mdp5_kms->dev;
- uint32_t version, major, minor;
- int i, ret = 0;
-
- mdp5_enable(mdp5_kms);
- version = mdp5_read(mdp5_kms, REG_MDP5_MDP_VERSION);
- mdp5_disable(mdp5_kms);
-
- major = FIELD(version, MDP5_MDP_VERSION_MAJOR);
- minor = FIELD(version, MDP5_MDP_VERSION_MINOR);
-
- DBG("found MDP5 version v%d.%d", major, minor);
-
- if (major != 1) {
- dev_err(dev->dev, "unexpected MDP major version: v%d.%d\n",
- major, minor);
- ret = -ENXIO;
- goto out;
- }
-
- mdp5_kms->rev = minor;
-
- /* only after mdp5_cfg global pointer's init can we access the hw */
- for (i = 0; i < ARRAY_SIZE(mdp5_configs); i++) {
- if (mdp5_configs[i].revision != minor)
- continue;
- mdp5_kms->hw_cfg = mdp5_cfg = mdp5_configs[i].config;
- break;
- }
- if (unlikely(!mdp5_kms->hw_cfg)) {
- dev_err(dev->dev, "unexpected MDP minor revision: v%d.%d\n",
- major, minor);
- ret = -ENXIO;
- goto out;
- }
-
- DBG("MDP5: %s config selected", mdp5_kms->hw_cfg->name);
-
- return 0;
-out:
- return ret;
-}
-
static int mdp5_hw_init(struct msm_kms *kms)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
struct drm_device *dev = mdp5_kms->dev;
- int i;
+ unsigned long flags;
pm_runtime_get_sync(dev->dev);
@@ -190,10 +57,11 @@ static int mdp5_hw_init(struct msm_kms *kms)
* care.
*/
+ spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, 0);
+ spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags);
- for (i = 0; i < mdp5_kms->hw_cfg->ctl.count; i++)
- mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(i), 0);
+ mdp5_ctlm_hw_reset(mdp5_kms->ctlm);
pm_runtime_put_sync(dev->dev);
@@ -221,10 +89,20 @@ static void mdp5_destroy(struct msm_kms *kms)
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
struct msm_mmu *mmu = mdp5_kms->mmu;
+ mdp5_irq_domain_fini(mdp5_kms);
+
if (mmu) {
mmu->funcs->detach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports));
mmu->funcs->destroy(mmu);
}
+
+ if (mdp5_kms->ctlm)
+ mdp5_ctlm_destroy(mdp5_kms->ctlm);
+ if (mdp5_kms->smp)
+ mdp5_smp_destroy(mdp5_kms->smp);
+ if (mdp5_kms->cfg)
+ mdp5_cfg_destroy(mdp5_kms->cfg);
+
kfree(mdp5_kms);
}
@@ -274,17 +152,31 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
static const enum mdp5_pipe crtcs[] = {
SSPP_RGB0, SSPP_RGB1, SSPP_RGB2, SSPP_RGB3,
};
+ static const enum mdp5_pipe pub_planes[] = {
+ SSPP_VIG0, SSPP_VIG1, SSPP_VIG2, SSPP_VIG3,
+ };
struct drm_device *dev = mdp5_kms->dev;
struct msm_drm_private *priv = dev->dev_private;
struct drm_encoder *encoder;
+ const struct mdp5_cfg_hw *hw_cfg;
int i, ret;
- /* construct CRTCs: */
- for (i = 0; i < mdp5_kms->hw_cfg->pipe_rgb.count; i++) {
+ hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
+
+ /* register our interrupt-controller for hdmi/eDP/dsi/etc
+ * to use for irqs routed through mdp:
+ */
+ ret = mdp5_irq_domain_init(mdp5_kms);
+ if (ret)
+ goto fail;
+
+ /* construct CRTCs and their private planes: */
+ for (i = 0; i < hw_cfg->pipe_rgb.count; i++) {
struct drm_plane *plane;
struct drm_crtc *crtc;
- plane = mdp5_plane_init(dev, crtcs[i], true);
+ plane = mdp5_plane_init(dev, crtcs[i], true,
+ hw_cfg->pipe_rgb.base[i]);
if (IS_ERR(plane)) {
ret = PTR_ERR(plane);
dev_err(dev->dev, "failed to construct plane for %s (%d)\n",
@@ -302,6 +194,20 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
priv->crtcs[priv->num_crtcs++] = crtc;
}
+ /* Construct public planes: */
+ for (i = 0; i < hw_cfg->pipe_vig.count; i++) {
+ struct drm_plane *plane;
+
+ plane = mdp5_plane_init(dev, pub_planes[i], false,
+ hw_cfg->pipe_vig.base[i]);
+ if (IS_ERR(plane)) {
+ ret = PTR_ERR(plane);
+ dev_err(dev->dev, "failed to construct %s plane: %d\n",
+ pipe2name(pub_planes[i]), ret);
+ goto fail;
+ }
+ }
+
/* Construct encoder for HDMI: */
encoder = mdp5_encoder_init(dev, 3, INTF_HDMI);
if (IS_ERR(encoder)) {
@@ -324,11 +230,12 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
priv->encoders[priv->num_encoders++] = encoder;
/* Construct bridge/connector for HDMI: */
- mdp5_kms->hdmi = hdmi_init(dev, encoder);
- if (IS_ERR(mdp5_kms->hdmi)) {
- ret = PTR_ERR(mdp5_kms->hdmi);
- dev_err(dev->dev, "failed to initialize HDMI: %d\n", ret);
- goto fail;
+ if (priv->hdmi) {
+ ret = hdmi_modeset_init(priv->hdmi, dev, encoder);
+ if (ret) {
+ dev_err(dev->dev, "failed to initialize HDMI: %d\n", ret);
+ goto fail;
+ }
}
return 0;
@@ -337,6 +244,21 @@ fail:
return ret;
}
+static void read_hw_revision(struct mdp5_kms *mdp5_kms,
+ uint32_t *major, uint32_t *minor)
+{
+ uint32_t version;
+
+ mdp5_enable(mdp5_kms);
+ version = mdp5_read(mdp5_kms, REG_MDP5_MDP_VERSION);
+ mdp5_disable(mdp5_kms);
+
+ *major = FIELD(version, MDP5_MDP_VERSION_MAJOR);
+ *minor = FIELD(version, MDP5_MDP_VERSION_MINOR);
+
+ DBG("MDP5 version v%d.%d", *major, *minor);
+}
+
static int get_clk(struct platform_device *pdev, struct clk **clkp,
const char *name)
{
@@ -353,10 +275,11 @@ static int get_clk(struct platform_device *pdev, struct clk **clkp,
struct msm_kms *mdp5_kms_init(struct drm_device *dev)
{
struct platform_device *pdev = dev->platformdev;
- struct mdp5_platform_config *config = mdp5_get_config(pdev);
+ struct mdp5_cfg *config;
struct mdp5_kms *mdp5_kms;
struct msm_kms *kms = NULL;
struct msm_mmu *mmu;
+ uint32_t major, minor;
int i, ret;
mdp5_kms = kzalloc(sizeof(*mdp5_kms), GFP_KERNEL);
@@ -366,12 +289,13 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
goto fail;
}
+ spin_lock_init(&mdp5_kms->resource_lock);
+
mdp_kms_init(&mdp5_kms->base, &kms_funcs);
kms = &mdp5_kms->base.base;
mdp5_kms->dev = dev;
- mdp5_kms->smp_blk_cnt = config->smp_blk_cnt;
mdp5_kms->mmio = msm_ioremap(pdev, "mdp_phys", "MDP5");
if (IS_ERR(mdp5_kms->mmio)) {
@@ -416,24 +340,52 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
if (ret)
goto fail;
- ret = clk_set_rate(mdp5_kms->src_clk, config->max_clk);
+ /* we need to set a default rate before enabling. Set a safe
+ * rate first, then figure out hw revision, and then set a
+ * more optimal rate:
+ */
+ clk_set_rate(mdp5_kms->src_clk, 200000000);
+
+ read_hw_revision(mdp5_kms, &major, &minor);
- ret = mdp5_select_hw_cfg(kms);
- if (ret)
+ mdp5_kms->cfg = mdp5_cfg_init(mdp5_kms, major, minor);
+ if (IS_ERR(mdp5_kms->cfg)) {
+ ret = PTR_ERR(mdp5_kms->cfg);
+ mdp5_kms->cfg = NULL;
goto fail;
+ }
+
+ config = mdp5_cfg_get_config(mdp5_kms->cfg);
+
+ /* TODO: compute core clock rate at runtime */
+ clk_set_rate(mdp5_kms->src_clk, config->hw->max_clk);
+
+ mdp5_kms->smp = mdp5_smp_init(mdp5_kms->dev, &config->hw->smp);
+ if (IS_ERR(mdp5_kms->smp)) {
+ ret = PTR_ERR(mdp5_kms->smp);
+ mdp5_kms->smp = NULL;
+ goto fail;
+ }
+
+ mdp5_kms->ctlm = mdp5_ctlm_init(dev, mdp5_kms->mmio, config->hw);
+ if (IS_ERR(mdp5_kms->ctlm)) {
+ ret = PTR_ERR(mdp5_kms->ctlm);
+ mdp5_kms->ctlm = NULL;
+ goto fail;
+ }
/* make sure things are off before attaching iommu (bootloader could
* have left things on, in which case we'll start getting faults if
* we don't disable):
*/
mdp5_enable(mdp5_kms);
- for (i = 0; i < mdp5_kms->hw_cfg->intf.count; i++)
+ for (i = 0; i < config->hw->intf.count; i++)
mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(i), 0);
mdp5_disable(mdp5_kms);
mdelay(16);
- if (config->iommu) {
- mmu = msm_iommu_new(&pdev->dev, config->iommu);
+ if (config->platform.iommu) {
+ mmu = msm_iommu_new(&pdev->dev, config->platform.iommu);
if (IS_ERR(mmu)) {
ret = PTR_ERR(mmu);
dev_err(dev->dev, "failed to init iommu: %d\n", ret);
@@ -474,18 +426,3 @@ fail:
mdp5_destroy(kms);
return ERR_PTR(ret);
}
-
-static struct mdp5_platform_config *mdp5_get_config(struct platform_device *dev)
-{
- static struct mdp5_platform_config config = {};
-#ifdef CONFIG_OF
- /* TODO */
-#endif
- config.iommu = iommu_domain_alloc(&platform_bus_type);
- /* TODO hard-coded in downstream mdss, but should it be? */
- config.max_clk = 200000000;
- /* TODO get from DT: */
- config.smp_blk_cnt = 22;
-
- return &config;
-}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
index 5bf340dd0f0..dd69c77c0d6 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
@@ -21,25 +21,9 @@
#include "msm_drv.h"
#include "msm_kms.h"
#include "mdp/mdp_kms.h"
-/* dynamic offsets used by mdp5.xml.h (initialized in mdp5_kms.c) */
-#define MDP5_MAX_BASES 8
-struct mdp5_sub_block {
- int count;
- uint32_t base[MDP5_MAX_BASES];
-};
-struct mdp5_config {
- char *name;
- struct mdp5_sub_block ctl;
- struct mdp5_sub_block pipe_vig;
- struct mdp5_sub_block pipe_rgb;
- struct mdp5_sub_block pipe_dma;
- struct mdp5_sub_block lm;
- struct mdp5_sub_block dspp;
- struct mdp5_sub_block ad;
- struct mdp5_sub_block intf;
-};
-extern const struct mdp5_config *mdp5_cfg;
+#include "mdp5_cfg.h" /* must be included before mdp5.xml.h */
#include "mdp5.xml.h"
+#include "mdp5_ctl.h"
#include "mdp5_smp.h"
struct mdp5_kms {
@@ -47,17 +31,14 @@ struct mdp5_kms {
struct drm_device *dev;
- int rev;
- const struct mdp5_config *hw_cfg;
+ struct mdp5_cfg_handler *cfg;
/* mapper-id used to request GEM buffer mapped for scanout: */
int id;
struct msm_mmu *mmu;
- /* for tracking smp allocation amongst pipes: */
- mdp5_smp_state_t smp_state;
- struct mdp5_client_smp_state smp_client_state[CID_MAX];
- int smp_blk_cnt;
+ struct mdp5_smp *smp;
+ struct mdp5_ctl_manager *ctlm;
/* io/register spaces: */
void __iomem *mmio, *vbif;
@@ -71,18 +52,47 @@ struct mdp5_kms {
struct clk *lut_clk;
struct clk *vsync_clk;
- struct hdmi *hdmi;
+ /*
+ * lock to protect access to global resources: ie., following register:
+ * - REG_MDP5_DISP_INTF_SEL
+ */
+ spinlock_t resource_lock;
struct mdp_irq error_handler;
+
+ struct {
+ volatile unsigned long enabled_mask;
+ struct irq_domain *domain;
+ } irqcontroller;
};
#define to_mdp5_kms(x) container_of(x, struct mdp5_kms, base)
-/* platform config data (ie. from DT, or pdata) */
-struct mdp5_platform_config {
- struct iommu_domain *iommu;
- uint32_t max_clk;
- int smp_blk_cnt;
+struct mdp5_plane_state {
+ struct drm_plane_state base;
+
+ /* "virtual" zpos.. we calculate actual mixer-stage at runtime
+ * by sorting the attached planes by zpos and then assigning
+ * mixer stage lowest to highest. Private planes get default
+ * zpos of zero, and public planes a unique value that is
+ * greater than zero. This way, things work out if a naive
+ * userspace assigns planes to a crtc without setting zpos.
+ */
+ int zpos;
+
+ /* the actual mixer stage, calculated in crtc->atomic_check()
+ * NOTE: this should move to mdp5_crtc_state, when that exists
+ */
+ enum mdp_mixer_stage_id stage;
+
+ /* some additional transactional status to help us know in the
+ * apply path whether we need to update SMP allocation, and
+ * whether current update is still pending:
+ */
+ bool mode_changed : 1;
+ bool pending : 1;
};
+#define to_mdp5_plane_state(x) \
+ container_of(x, struct mdp5_plane_state, base)
static inline void mdp5_write(struct mdp5_kms *mdp5_kms, u32 reg, u32 data)
{
@@ -107,23 +117,6 @@ static inline const char *pipe2name(enum mdp5_pipe pipe)
return names[pipe];
}
-static inline uint32_t pipe2flush(enum mdp5_pipe pipe)
-{
- switch (pipe) {
- case SSPP_VIG0: return MDP5_CTL_FLUSH_VIG0;
- case SSPP_VIG1: return MDP5_CTL_FLUSH_VIG1;
- case SSPP_VIG2: return MDP5_CTL_FLUSH_VIG2;
- case SSPP_RGB0: return MDP5_CTL_FLUSH_RGB0;
- case SSPP_RGB1: return MDP5_CTL_FLUSH_RGB1;
- case SSPP_RGB2: return MDP5_CTL_FLUSH_RGB2;
- case SSPP_DMA0: return MDP5_CTL_FLUSH_DMA0;
- case SSPP_DMA1: return MDP5_CTL_FLUSH_DMA1;
- case SSPP_VIG3: return MDP5_CTL_FLUSH_VIG3;
- case SSPP_RGB3: return MDP5_CTL_FLUSH_RGB3;
- default: return 0;
- }
-}
-
static inline int pipe2nclients(enum mdp5_pipe pipe)
{
switch (pipe) {
@@ -137,34 +130,6 @@ static inline int pipe2nclients(enum mdp5_pipe pipe)
}
}
-static inline enum mdp5_client_id pipe2client(enum mdp5_pipe pipe, int plane)
-{
- WARN_ON(plane >= pipe2nclients(pipe));
- switch (pipe) {
- case SSPP_VIG0: return CID_VIG0_Y + plane;
- case SSPP_VIG1: return CID_VIG1_Y + plane;
- case SSPP_VIG2: return CID_VIG2_Y + plane;
- case SSPP_RGB0: return CID_RGB0;
- case SSPP_RGB1: return CID_RGB1;
- case SSPP_RGB2: return CID_RGB2;
- case SSPP_DMA0: return CID_DMA0_Y + plane;
- case SSPP_DMA1: return CID_DMA1_Y + plane;
- case SSPP_VIG3: return CID_VIG3_Y + plane;
- case SSPP_RGB3: return CID_RGB3;
- default: return CID_UNUSED;
- }
-}
-
-static inline uint32_t mixer2flush(int lm)
-{
- switch (lm) {
- case 0: return MDP5_CTL_FLUSH_LM0;
- case 1: return MDP5_CTL_FLUSH_LM1;
- case 2: return MDP5_CTL_FLUSH_LM2;
- default: return 0;
- }
-}
-
static inline uint32_t intf2err(int intf)
{
switch (intf) {
@@ -197,6 +162,8 @@ void mdp5_irq_uninstall(struct msm_kms *kms);
irqreturn_t mdp5_irq(struct msm_kms *kms);
int mdp5_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
+int mdp5_irq_domain_init(struct mdp5_kms *mdp5_kms);
+void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms);
static inline
uint32_t mdp5_get_formats(enum mdp5_pipe pipe, uint32_t *pixel_formats,
@@ -210,26 +177,18 @@ uint32_t mdp5_get_formats(enum mdp5_pipe pipe, uint32_t *pixel_formats,
void mdp5_plane_install_properties(struct drm_plane *plane,
struct drm_mode_object *obj);
-void mdp5_plane_set_scanout(struct drm_plane *plane,
- struct drm_framebuffer *fb);
-int mdp5_plane_mode_set(struct drm_plane *plane,
- struct drm_crtc *crtc, struct drm_framebuffer *fb,
- int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h);
+uint32_t mdp5_plane_get_flush(struct drm_plane *plane);
void mdp5_plane_complete_flip(struct drm_plane *plane);
enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane);
struct drm_plane *mdp5_plane_init(struct drm_device *dev,
- enum mdp5_pipe pipe, bool private_plane);
+ enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset);
uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc);
+int mdp5_crtc_get_lm(struct drm_crtc *crtc);
void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file);
void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf,
enum mdp5_intf intf_id);
-void mdp5_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane);
-void mdp5_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane);
struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
struct drm_plane *plane, int id);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index f3daec4412a..26e5fdea659 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -1,4 +1,5 @@
/*
+ * Copyright (c) 2014 The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@@ -17,6 +18,7 @@
#include "mdp5_kms.h"
+#define MAX_PLANE 4
struct mdp5_plane {
struct drm_plane base;
@@ -24,6 +26,11 @@ struct mdp5_plane {
enum mdp5_pipe pipe;
+ spinlock_t pipe_lock; /* protect REG_MDP5_PIPE_* registers */
+ uint32_t reg_offset;
+
+ uint32_t flush_mask; /* used to commit pipe registers */
+
uint32_t nformats;
uint32_t formats[32];
@@ -31,31 +38,24 @@ struct mdp5_plane {
};
#define to_mdp5_plane(x) container_of(x, struct mdp5_plane, base)
+static int mdp5_plane_mode_set(struct drm_plane *plane,
+ struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h);
+static void set_scanout_locked(struct drm_plane *plane,
+ struct drm_framebuffer *fb);
+
static struct mdp5_kms *get_kms(struct drm_plane *plane)
{
struct msm_drm_private *priv = plane->dev->dev_private;
return to_mdp5_kms(to_mdp_kms(priv->kms));
}
-static int mdp5_plane_update(struct drm_plane *plane,
- struct drm_crtc *crtc, struct drm_framebuffer *fb,
- int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h)
+static bool plane_enabled(struct drm_plane_state *state)
{
- struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
-
- mdp5_plane->enabled = true;
-
- if (plane->fb)
- drm_framebuffer_unreference(plane->fb);
-
- drm_framebuffer_reference(fb);
-
- return mdp5_plane_mode_set(plane, crtc, fb,
- crtc_x, crtc_y, crtc_w, crtc_h,
- src_x, src_y, src_w, src_h);
+ return state->fb && state->crtc;
}
static int mdp5_plane_disable(struct drm_plane *plane)
@@ -63,21 +63,13 @@ static int mdp5_plane_disable(struct drm_plane *plane)
struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
struct mdp5_kms *mdp5_kms = get_kms(plane);
enum mdp5_pipe pipe = mdp5_plane->pipe;
- int i;
DBG("%s: disable", mdp5_plane->name);
- /* update our SMP request to zero (release all our blks): */
- for (i = 0; i < pipe2nclients(pipe); i++)
- mdp5_smp_request(mdp5_kms, pipe2client(pipe, i), 0);
-
- /* TODO detaching now will cause us not to get the last
- * vblank and mdp5_smp_commit().. so other planes will
- * still see smp blocks previously allocated to us as
- * in-use..
- */
- if (plane->crtc)
- mdp5_crtc_detach(plane->crtc, plane);
+ if (mdp5_kms) {
+ /* Release the memory we requested earlier from the SMP: */
+ mdp5_smp_release(mdp5_kms->smp, pipe);
+ }
return 0;
}
@@ -85,11 +77,8 @@ static int mdp5_plane_disable(struct drm_plane *plane)
static void mdp5_plane_destroy(struct drm_plane *plane)
{
struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
- struct msm_drm_private *priv = plane->dev->dev_private;
-
- if (priv->kms)
- mdp5_plane_disable(plane);
+ drm_plane_helper_disable(plane);
drm_plane_cleanup(plane);
kfree(mdp5_plane);
@@ -109,109 +98,186 @@ int mdp5_plane_set_property(struct drm_plane *plane,
return -EINVAL;
}
+static void mdp5_plane_reset(struct drm_plane *plane)
+{
+ struct mdp5_plane_state *mdp5_state;
+
+ if (plane->state && plane->state->fb)
+ drm_framebuffer_unreference(plane->state->fb);
+
+ kfree(to_mdp5_plane_state(plane->state));
+ mdp5_state = kzalloc(sizeof(*mdp5_state), GFP_KERNEL);
+
+ if (plane->type == DRM_PLANE_TYPE_PRIMARY) {
+ mdp5_state->zpos = 0;
+ } else {
+ mdp5_state->zpos = 1 + drm_plane_index(plane);
+ }
+
+ plane->state = &mdp5_state->base;
+}
+
+static struct drm_plane_state *
+mdp5_plane_duplicate_state(struct drm_plane *plane)
+{
+ struct mdp5_plane_state *mdp5_state;
+
+ if (WARN_ON(!plane->state))
+ return NULL;
+
+ mdp5_state = kmemdup(to_mdp5_plane_state(plane->state),
+ sizeof(*mdp5_state), GFP_KERNEL);
+
+ if (mdp5_state && mdp5_state->base.fb)
+ drm_framebuffer_reference(mdp5_state->base.fb);
+
+ mdp5_state->mode_changed = false;
+ mdp5_state->pending = false;
+
+ return &mdp5_state->base;
+}
+
+static void mdp5_plane_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ if (state->fb)
+ drm_framebuffer_unreference(state->fb);
+
+ kfree(to_mdp5_plane_state(state));
+}
+
static const struct drm_plane_funcs mdp5_plane_funcs = {
- .update_plane = mdp5_plane_update,
- .disable_plane = mdp5_plane_disable,
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
.destroy = mdp5_plane_destroy,
.set_property = mdp5_plane_set_property,
+ .reset = mdp5_plane_reset,
+ .atomic_duplicate_state = mdp5_plane_duplicate_state,
+ .atomic_destroy_state = mdp5_plane_destroy_state,
};
-void mdp5_plane_set_scanout(struct drm_plane *plane,
+static int mdp5_plane_prepare_fb(struct drm_plane *plane,
struct drm_framebuffer *fb)
{
struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
struct mdp5_kms *mdp5_kms = get_kms(plane);
- enum mdp5_pipe pipe = mdp5_plane->pipe;
- uint32_t nplanes = drm_format_num_planes(fb->pixel_format);
- uint32_t iova[4];
- int i;
-
- for (i = 0; i < nplanes; i++) {
- struct drm_gem_object *bo = msm_framebuffer_bo(fb, i);
- msm_gem_get_iova(bo, mdp5_kms->id, &iova[i]);
- }
- for (; i < 4; i++)
- iova[i] = 0;
- mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_A(pipe),
- MDP5_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) |
- MDP5_PIPE_SRC_STRIDE_A_P1(fb->pitches[1]));
-
- mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_B(pipe),
- MDP5_PIPE_SRC_STRIDE_B_P2(fb->pitches[2]) |
- MDP5_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
-
- mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC0_ADDR(pipe), iova[0]);
- mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC1_ADDR(pipe), iova[1]);
- mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC2_ADDR(pipe), iova[2]);
- mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC3_ADDR(pipe), iova[3]);
-
- plane->fb = fb;
+ DBG("%s: prepare: FB[%u]", mdp5_plane->name, fb->base.id);
+ return msm_framebuffer_prepare(fb, mdp5_kms->id);
}
-/* NOTE: looks like if horizontal decimation is used (if we supported that)
- * then the width used to calculate SMP block requirements is the post-
- * decimated width. Ie. SMP buffering sits downstream of decimation (which
- * presumably happens during the dma from scanout buffer).
- */
-static int request_smp_blocks(struct drm_plane *plane, uint32_t format,
- uint32_t nplanes, uint32_t width)
+static void mdp5_plane_cleanup_fb(struct drm_plane *plane,
+ struct drm_framebuffer *fb)
{
- struct drm_device *dev = plane->dev;
struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
struct mdp5_kms *mdp5_kms = get_kms(plane);
- enum mdp5_pipe pipe = mdp5_plane->pipe;
- int i, hsub, nlines, nblks, ret;
- hsub = drm_format_horz_chroma_subsampling(format);
+ DBG("%s: cleanup: FB[%u]", mdp5_plane->name, fb->base.id);
+ msm_framebuffer_cleanup(fb, mdp5_kms->id);
+}
- /* different if BWC (compressed framebuffer?) enabled: */
- nlines = 2;
+static int mdp5_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
+ struct drm_plane_state *old_state = plane->state;
- for (i = 0, nblks = 0; i < nplanes; i++) {
- int n, fetch_stride, cpp;
+ DBG("%s: check (%d -> %d)", mdp5_plane->name,
+ plane_enabled(old_state), plane_enabled(state));
- cpp = drm_format_plane_cpp(format, i);
- fetch_stride = width * cpp / (i ? hsub : 1);
+ if (plane_enabled(state) && plane_enabled(old_state)) {
+ /* we cannot change SMP block configuration during scanout: */
+ bool full_modeset = false;
+ if (state->fb->pixel_format != old_state->fb->pixel_format) {
+ DBG("%s: pixel_format change!", mdp5_plane->name);
+ full_modeset = true;
+ }
+ if (state->src_w != old_state->src_w) {
+ DBG("%s: src_w change!", mdp5_plane->name);
+ full_modeset = true;
+ }
+ if (to_mdp5_plane_state(old_state)->pending) {
+ DBG("%s: still pending!", mdp5_plane->name);
+ full_modeset = true;
+ }
+ if (full_modeset) {
+ struct drm_crtc_state *crtc_state =
+ drm_atomic_get_crtc_state(state->state, state->crtc);
+ crtc_state->mode_changed = true;
+ to_mdp5_plane_state(state)->mode_changed = true;
+ }
+ } else {
+ to_mdp5_plane_state(state)->mode_changed = true;
+ }
- n = DIV_ROUND_UP(fetch_stride * nlines, SMP_BLK_SIZE);
+ return 0;
+}
- /* for hw rev v1.00 */
- if (mdp5_kms->rev == 0)
- n = roundup_pow_of_two(n);
+static void mdp5_plane_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
+ struct drm_plane_state *state = plane->state;
- DBG("%s[%d]: request %d SMP blocks", mdp5_plane->name, i, n);
- ret = mdp5_smp_request(mdp5_kms, pipe2client(pipe, i), n);
- if (ret) {
- dev_err(dev->dev, "Could not allocate %d SMP blocks: %d\n",
- n, ret);
- return ret;
- }
+ DBG("%s: update", mdp5_plane->name);
- nblks += n;
+ if (!plane_enabled(state)) {
+ to_mdp5_plane_state(state)->pending = true;
+ mdp5_plane_disable(plane);
+ } else if (to_mdp5_plane_state(state)->mode_changed) {
+ int ret;
+ to_mdp5_plane_state(state)->pending = true;
+ ret = mdp5_plane_mode_set(plane,
+ state->crtc, state->fb,
+ state->crtc_x, state->crtc_y,
+ state->crtc_w, state->crtc_h,
+ state->src_x, state->src_y,
+ state->src_w, state->src_h);
+ /* atomic_check should have ensured that this doesn't fail */
+ WARN_ON(ret < 0);
+ } else {
+ unsigned long flags;
+ spin_lock_irqsave(&mdp5_plane->pipe_lock, flags);
+ set_scanout_locked(plane, state->fb);
+ spin_unlock_irqrestore(&mdp5_plane->pipe_lock, flags);
}
-
- /* in success case, return total # of blocks allocated: */
- return nblks;
}
-static void set_fifo_thresholds(struct drm_plane *plane, int nblks)
+static const struct drm_plane_helper_funcs mdp5_plane_helper_funcs = {
+ .prepare_fb = mdp5_plane_prepare_fb,
+ .cleanup_fb = mdp5_plane_cleanup_fb,
+ .atomic_check = mdp5_plane_atomic_check,
+ .atomic_update = mdp5_plane_atomic_update,
+};
+
+static void set_scanout_locked(struct drm_plane *plane,
+ struct drm_framebuffer *fb)
{
struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
struct mdp5_kms *mdp5_kms = get_kms(plane);
enum mdp5_pipe pipe = mdp5_plane->pipe;
- uint32_t val;
- /* 1/4 of SMP pool that is being fetched */
- val = (nblks * SMP_ENTRIES_PER_BLK) / 4;
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_A(pipe),
+ MDP5_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) |
+ MDP5_PIPE_SRC_STRIDE_A_P1(fb->pitches[1]));
- mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(pipe), val * 1);
- mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(pipe), val * 2);
- mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(pipe), val * 3);
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_B(pipe),
+ MDP5_PIPE_SRC_STRIDE_B_P2(fb->pitches[2]) |
+ MDP5_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC0_ADDR(pipe),
+ msm_framebuffer_iova(fb, mdp5_kms->id, 0));
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC1_ADDR(pipe),
+ msm_framebuffer_iova(fb, mdp5_kms->id, 1));
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC2_ADDR(pipe),
+ msm_framebuffer_iova(fb, mdp5_kms->id, 2));
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC3_ADDR(pipe),
+ msm_framebuffer_iova(fb, mdp5_kms->id, 4));
+ plane->fb = fb;
}
-int mdp5_plane_mode_set(struct drm_plane *plane,
+static int mdp5_plane_mode_set(struct drm_plane *plane,
struct drm_crtc *crtc, struct drm_framebuffer *fb,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
@@ -225,7 +291,8 @@ int mdp5_plane_mode_set(struct drm_plane *plane,
uint32_t nplanes, config = 0;
uint32_t phasex_step = 0, phasey_step = 0;
uint32_t hdecm = 0, vdecm = 0;
- int i, nblks;
+ unsigned long flags;
+ int ret;
nplanes = drm_format_num_planes(fb->pixel_format);
@@ -243,12 +310,11 @@ int mdp5_plane_mode_set(struct drm_plane *plane,
fb->base.id, src_x, src_y, src_w, src_h,
crtc->base.id, crtc_x, crtc_y, crtc_w, crtc_h);
- /*
- * Calculate and request required # of smp blocks:
- */
- nblks = request_smp_blocks(plane, fb->pixel_format, nplanes, src_w);
- if (nblks < 0)
- return nblks;
+ /* Request some memory from the SMP: */
+ ret = mdp5_smp_request(mdp5_kms->smp,
+ mdp5_plane->pipe, fb->pixel_format, src_w);
+ if (ret)
+ return ret;
/*
* Currently we update the hw for allocations/requests immediately,
@@ -256,8 +322,7 @@ int mdp5_plane_mode_set(struct drm_plane *plane,
* would move into atomic->check_plane_state(), while updating the
* hw would remain here:
*/
- for (i = 0; i < pipe2nclients(pipe); i++)
- mdp5_smp_configure(mdp5_kms, pipe2client(pipe, i));
+ mdp5_smp_configure(mdp5_kms->smp, pipe);
if (src_w != crtc_w) {
config |= MDP5_PIPE_SCALE_CONFIG_SCALEX_EN;
@@ -269,6 +334,8 @@ int mdp5_plane_mode_set(struct drm_plane *plane,
/* TODO calc phasey_step, vdecm */
}
+ spin_lock_irqsave(&mdp5_plane->pipe_lock, flags);
+
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_IMG_SIZE(pipe),
MDP5_PIPE_SRC_IMG_SIZE_WIDTH(src_w) |
MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(src_h));
@@ -289,8 +356,6 @@ int mdp5_plane_mode_set(struct drm_plane *plane,
MDP5_PIPE_OUT_XY_X(crtc_x) |
MDP5_PIPE_OUT_XY_Y(crtc_y));
- mdp5_plane_set_scanout(plane, fb);
-
format = to_mdp_format(msm_framebuffer_format(fb));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_FORMAT(pipe),
@@ -330,22 +395,24 @@ int mdp5_plane_mode_set(struct drm_plane *plane,
MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER(SCALE_FILTER_NEAREST) |
MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER(SCALE_FILTER_NEAREST));
- set_fifo_thresholds(plane, nblks);
+ set_scanout_locked(plane, fb);
- /* TODO detach from old crtc (if we had more than one) */
- mdp5_crtc_attach(crtc, plane);
+ spin_unlock_irqrestore(&mdp5_plane->pipe_lock, flags);
- return 0;
+ return ret;
}
void mdp5_plane_complete_flip(struct drm_plane *plane)
{
struct mdp5_kms *mdp5_kms = get_kms(plane);
- enum mdp5_pipe pipe = to_mdp5_plane(plane)->pipe;
- int i;
+ struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
+ enum mdp5_pipe pipe = mdp5_plane->pipe;
+
+ DBG("%s: complete flip", mdp5_plane->name);
- for (i = 0; i < pipe2nclients(pipe); i++)
- mdp5_smp_commit(mdp5_kms, pipe2client(pipe, i));
+ mdp5_smp_commit(mdp5_kms->smp, pipe);
+
+ to_mdp5_plane_state(plane->state)->pending = false;
}
enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane)
@@ -354,9 +421,16 @@ enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane)
return mdp5_plane->pipe;
}
+uint32_t mdp5_plane_get_flush(struct drm_plane *plane)
+{
+ struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
+
+ return mdp5_plane->flush_mask;
+}
+
/* initialize plane */
struct drm_plane *mdp5_plane_init(struct drm_device *dev,
- enum mdp5_pipe pipe, bool private_plane)
+ enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset)
{
struct drm_plane *plane = NULL;
struct mdp5_plane *mdp5_plane;
@@ -377,10 +451,18 @@ struct drm_plane *mdp5_plane_init(struct drm_device *dev,
mdp5_plane->nformats = mdp5_get_formats(pipe, mdp5_plane->formats,
ARRAY_SIZE(mdp5_plane->formats));
+ mdp5_plane->flush_mask = mdp_ctl_flush_mask_pipe(pipe);
+ mdp5_plane->reg_offset = reg_offset;
+ spin_lock_init(&mdp5_plane->pipe_lock);
+
type = private_plane ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
- drm_universal_plane_init(dev, plane, 0xff, &mdp5_plane_funcs,
+ ret = drm_universal_plane_init(dev, plane, 0xff, &mdp5_plane_funcs,
mdp5_plane->formats, mdp5_plane->nformats,
type);
+ if (ret)
+ goto fail;
+
+ drm_plane_helper_add(plane, &mdp5_plane_helper_funcs);
mdp5_plane_install_properties(plane, &plane->base);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
index 2d0236b963a..bf551885e01 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
@@ -1,4 +1,5 @@
/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@@ -29,8 +30,11 @@
* Based on the size of the attached scanout buffer, a certain # of
* blocks must be allocated to that client out of the shared pool.
*
- * For each block, it can be either free, or pending/in-use by a
- * client. The updates happen in three steps:
+ * In some hw, some blocks are statically allocated for certain pipes
+ * and CANNOT be re-allocated (eg: MMB0 and MMB1 both tied to RGB0).
+ *
+ * For each block that can be dynamically allocated, it can be either
+ * free, or pending/in-use by a client. The updates happen in three steps:
*
* 1) mdp5_smp_request():
* When plane scanout is setup, calculate required number of
@@ -61,21 +65,68 @@
* inuse and pending state of all clients..
*/
-static DEFINE_SPINLOCK(smp_lock);
+struct mdp5_smp {
+ struct drm_device *dev;
+
+ int blk_cnt;
+ int blk_size;
+
+ spinlock_t state_lock;
+ mdp5_smp_state_t state; /* to track smp allocation amongst pipes: */
+
+ struct mdp5_client_smp_state client_state[CID_MAX];
+};
+static inline
+struct mdp5_kms *get_kms(struct mdp5_smp *smp)
+{
+ struct msm_drm_private *priv = smp->dev->dev_private;
+
+ return to_mdp5_kms(to_mdp_kms(priv->kms));
+}
+
+static inline enum mdp5_client_id pipe2client(enum mdp5_pipe pipe, int plane)
+{
+ WARN_ON(plane >= pipe2nclients(pipe));
+ switch (pipe) {
+ case SSPP_VIG0: return CID_VIG0_Y + plane;
+ case SSPP_VIG1: return CID_VIG1_Y + plane;
+ case SSPP_VIG2: return CID_VIG2_Y + plane;
+ case SSPP_RGB0: return CID_RGB0;
+ case SSPP_RGB1: return CID_RGB1;
+ case SSPP_RGB2: return CID_RGB2;
+ case SSPP_DMA0: return CID_DMA0_Y + plane;
+ case SSPP_DMA1: return CID_DMA1_Y + plane;
+ case SSPP_VIG3: return CID_VIG3_Y + plane;
+ case SSPP_RGB3: return CID_RGB3;
+ default: return CID_UNUSED;
+ }
+}
/* step #1: update # of blocks pending for the client: */
-int mdp5_smp_request(struct mdp5_kms *mdp5_kms,
+static int smp_request_block(struct mdp5_smp *smp,
enum mdp5_client_id cid, int nblks)
{
- struct mdp5_client_smp_state *ps = &mdp5_kms->smp_client_state[cid];
- int i, ret, avail, cur_nblks, cnt = mdp5_kms->smp_blk_cnt;
+ struct mdp5_kms *mdp5_kms = get_kms(smp);
+ const struct mdp5_cfg_hw *hw_cfg;
+ struct mdp5_client_smp_state *ps = &smp->client_state[cid];
+ int i, ret, avail, cur_nblks, cnt = smp->blk_cnt;
+ int reserved;
unsigned long flags;
- spin_lock_irqsave(&smp_lock, flags);
+ hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
+ reserved = hw_cfg->smp.reserved[cid];
+
+ spin_lock_irqsave(&smp->state_lock, flags);
- avail = cnt - bitmap_weight(mdp5_kms->smp_state, cnt);
+ nblks -= reserved;
+ if (reserved)
+ DBG("%d MMBs allocated (%d reserved)", nblks, reserved);
+
+ avail = cnt - bitmap_weight(smp->state, cnt);
if (nblks > avail) {
+ dev_err(mdp5_kms->dev->dev, "out of blks (req=%d > avail=%d)\n",
+ nblks, avail);
ret = -ENOSPC;
goto fail;
}
@@ -84,9 +135,9 @@ int mdp5_smp_request(struct mdp5_kms *mdp5_kms,
if (nblks > cur_nblks) {
/* grow the existing pending reservation: */
for (i = cur_nblks; i < nblks; i++) {
- int blk = find_first_zero_bit(mdp5_kms->smp_state, cnt);
+ int blk = find_first_zero_bit(smp->state, cnt);
set_bit(blk, ps->pending);
- set_bit(blk, mdp5_kms->smp_state);
+ set_bit(blk, smp->state);
}
} else {
/* shrink the existing pending reservation: */
@@ -98,15 +149,88 @@ int mdp5_smp_request(struct mdp5_kms *mdp5_kms,
}
fail:
- spin_unlock_irqrestore(&smp_lock, flags);
+ spin_unlock_irqrestore(&smp->state_lock, flags);
+ return 0;
+}
+
+static void set_fifo_thresholds(struct mdp5_smp *smp,
+ enum mdp5_pipe pipe, int nblks)
+{
+ struct mdp5_kms *mdp5_kms = get_kms(smp);
+ u32 smp_entries_per_blk = smp->blk_size / (128 / BITS_PER_BYTE);
+ u32 val;
+
+ /* 1/4 of SMP pool that is being fetched */
+ val = (nblks * smp_entries_per_blk) / 4;
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(pipe), val * 1);
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(pipe), val * 2);
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(pipe), val * 3);
+}
+
+/*
+ * NOTE: looks like if horizontal decimation is used (if we supported that)
+ * then the width used to calculate SMP block requirements is the post-
+ * decimated width. Ie. SMP buffering sits downstream of decimation (which
+ * presumably happens during the dma from scanout buffer).
+ */
+int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe, u32 fmt, u32 width)
+{
+ struct mdp5_kms *mdp5_kms = get_kms(smp);
+ struct drm_device *dev = mdp5_kms->dev;
+ int rev = mdp5_cfg_get_hw_rev(mdp5_kms->cfg);
+ int i, hsub, nplanes, nlines, nblks, ret;
+
+ nplanes = drm_format_num_planes(fmt);
+ hsub = drm_format_horz_chroma_subsampling(fmt);
+
+ /* different if BWC (compressed framebuffer?) enabled: */
+ nlines = 2;
+
+ for (i = 0, nblks = 0; i < nplanes; i++) {
+ int n, fetch_stride, cpp;
+
+ cpp = drm_format_plane_cpp(fmt, i);
+ fetch_stride = width * cpp / (i ? hsub : 1);
+
+ n = DIV_ROUND_UP(fetch_stride * nlines, smp->blk_size);
+
+ /* for hw rev v1.00 */
+ if (rev == 0)
+ n = roundup_pow_of_two(n);
+
+ DBG("%s[%d]: request %d SMP blocks", pipe2name(pipe), i, n);
+ ret = smp_request_block(smp, pipe2client(pipe, i), n);
+ if (ret) {
+ dev_err(dev->dev, "Cannot allocate %d SMP blocks: %d\n",
+ n, ret);
+ return ret;
+ }
+
+ nblks += n;
+ }
+
+ set_fifo_thresholds(smp, pipe, nblks);
+
return 0;
}
-static void update_smp_state(struct mdp5_kms *mdp5_kms,
+/* Release SMP blocks for all clients of the pipe */
+void mdp5_smp_release(struct mdp5_smp *smp, enum mdp5_pipe pipe)
+{
+ int i, nblks;
+
+ for (i = 0, nblks = 0; i < pipe2nclients(pipe); i++)
+ smp_request_block(smp, pipe2client(pipe, i), 0);
+ set_fifo_thresholds(smp, pipe, 0);
+}
+
+static void update_smp_state(struct mdp5_smp *smp,
enum mdp5_client_id cid, mdp5_smp_state_t *assigned)
{
- int cnt = mdp5_kms->smp_blk_cnt;
- uint32_t blk, val;
+ struct mdp5_kms *mdp5_kms = get_kms(smp);
+ int cnt = smp->blk_cnt;
+ u32 blk, val;
for_each_set_bit(blk, *assigned, cnt) {
int idx = blk / 3;
@@ -135,39 +259,80 @@ static void update_smp_state(struct mdp5_kms *mdp5_kms,
}
/* step #2: configure hw for union(pending, inuse): */
-void mdp5_smp_configure(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid)
+void mdp5_smp_configure(struct mdp5_smp *smp, enum mdp5_pipe pipe)
{
- struct mdp5_client_smp_state *ps = &mdp5_kms->smp_client_state[cid];
- int cnt = mdp5_kms->smp_blk_cnt;
+ int cnt = smp->blk_cnt;
mdp5_smp_state_t assigned;
+ int i;
- bitmap_or(assigned, ps->inuse, ps->pending, cnt);
- update_smp_state(mdp5_kms, cid, &assigned);
+ for (i = 0; i < pipe2nclients(pipe); i++) {
+ enum mdp5_client_id cid = pipe2client(pipe, i);
+ struct mdp5_client_smp_state *ps = &smp->client_state[cid];
+
+ bitmap_or(assigned, ps->inuse, ps->pending, cnt);
+ update_smp_state(smp, cid, &assigned);
+ }
}
/* step #3: after vblank, copy pending -> inuse: */
-void mdp5_smp_commit(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid)
+void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe)
{
- struct mdp5_client_smp_state *ps = &mdp5_kms->smp_client_state[cid];
- int cnt = mdp5_kms->smp_blk_cnt;
+ int cnt = smp->blk_cnt;
mdp5_smp_state_t released;
+ int i;
+
+ for (i = 0; i < pipe2nclients(pipe); i++) {
+ enum mdp5_client_id cid = pipe2client(pipe, i);
+ struct mdp5_client_smp_state *ps = &smp->client_state[cid];
+
+ /*
+ * Figure out if there are any blocks we where previously
+ * using, which can be released and made available to other
+ * clients:
+ */
+ if (bitmap_andnot(released, ps->inuse, ps->pending, cnt)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&smp->state_lock, flags);
+ /* clear released blocks: */
+ bitmap_andnot(smp->state, smp->state, released, cnt);
+ spin_unlock_irqrestore(&smp->state_lock, flags);
- /*
- * Figure out if there are any blocks we where previously
- * using, which can be released and made available to other
- * clients:
- */
- if (bitmap_andnot(released, ps->inuse, ps->pending, cnt)) {
- unsigned long flags;
-
- spin_lock_irqsave(&smp_lock, flags);
- /* clear released blocks: */
- bitmap_andnot(mdp5_kms->smp_state, mdp5_kms->smp_state,
- released, cnt);
- spin_unlock_irqrestore(&smp_lock, flags);
-
- update_smp_state(mdp5_kms, CID_UNUSED, &released);
+ update_smp_state(smp, CID_UNUSED, &released);
+ }
+
+ bitmap_copy(ps->inuse, ps->pending, cnt);
}
+}
+
+void mdp5_smp_destroy(struct mdp5_smp *smp)
+{
+ kfree(smp);
+}
+
+struct mdp5_smp *mdp5_smp_init(struct drm_device *dev, const struct mdp5_smp_block *cfg)
+{
+ struct mdp5_smp *smp = NULL;
+ int ret;
+
+ smp = kzalloc(sizeof(*smp), GFP_KERNEL);
+ if (unlikely(!smp)) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ smp->dev = dev;
+ smp->blk_cnt = cfg->mmb_count;
+ smp->blk_size = cfg->mmb_size;
+
+ /* statically tied MMBs cannot be re-allocated: */
+ bitmap_copy(smp->state, cfg->reserved_state, smp->blk_cnt);
+ spin_lock_init(&smp->state_lock);
+
+ return smp;
+fail:
+ if (smp)
+ mdp5_smp_destroy(smp);
- bitmap_copy(ps->inuse, ps->pending, cnt);
+ return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h
index 0ab739e1a1d..e47179f6358 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h
@@ -1,4 +1,5 @@
/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@@ -20,22 +21,26 @@
#include "msm_drv.h"
-#define MAX_SMP_BLOCKS 22
-#define SMP_BLK_SIZE 4096
-#define SMP_ENTRIES_PER_BLK (SMP_BLK_SIZE / 16)
-
-typedef DECLARE_BITMAP(mdp5_smp_state_t, MAX_SMP_BLOCKS);
-
struct mdp5_client_smp_state {
mdp5_smp_state_t inuse;
mdp5_smp_state_t pending;
};
struct mdp5_kms;
+struct mdp5_smp;
+
+/*
+ * SMP module prototypes:
+ * mdp5_smp_init() returns a SMP @handler,
+ * which is then used to call the other mdp5_smp_*(handler, ...) functions.
+ */
-int mdp5_smp_request(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid, int nblks);
-void mdp5_smp_configure(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid);
-void mdp5_smp_commit(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid);
+struct mdp5_smp *mdp5_smp_init(struct drm_device *dev, const struct mdp5_smp_block *cfg);
+void mdp5_smp_destroy(struct mdp5_smp *smp);
+int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe, u32 fmt, u32 width);
+void mdp5_smp_configure(struct mdp5_smp *smp, enum mdp5_pipe pipe);
+void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe);
+void mdp5_smp_release(struct mdp5_smp *smp, enum mdp5_pipe pipe);
#endif /* __MDP5_SMP_H__ */
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
new file mode 100644
index 00000000000..f0de412e13d
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -0,0 +1,163 @@
+/*
+ * Copyright (C) 2014 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "msm_drv.h"
+#include "msm_kms.h"
+#include "msm_gem.h"
+
+struct msm_commit {
+ struct drm_atomic_state *state;
+ uint32_t fence;
+ struct msm_fence_cb fence_cb;
+};
+
+static void fence_cb(struct msm_fence_cb *cb);
+
+static struct msm_commit *new_commit(struct drm_atomic_state *state)
+{
+ struct msm_commit *c = kzalloc(sizeof(*c), GFP_KERNEL);
+
+ if (!c)
+ return NULL;
+
+ c->state = state;
+ /* TODO we might need a way to indicate to run the cb on a
+ * different wq so wait_for_vblanks() doesn't block retiring
+ * bo's..
+ */
+ INIT_FENCE_CB(&c->fence_cb, fence_cb);
+
+ return c;
+}
+
+/* The (potentially) asynchronous part of the commit. At this point
+ * nothing can fail short of armageddon.
+ */
+static void complete_commit(struct msm_commit *c)
+{
+ struct drm_atomic_state *state = c->state;
+ struct drm_device *dev = state->dev;
+
+ drm_atomic_helper_commit_pre_planes(dev, state);
+
+ drm_atomic_helper_commit_planes(dev, state);
+
+ drm_atomic_helper_commit_post_planes(dev, state);
+
+ drm_atomic_helper_wait_for_vblanks(dev, state);
+
+ drm_atomic_helper_cleanup_planes(dev, state);
+
+ drm_atomic_state_free(state);
+
+ kfree(c);
+}
+
+static void fence_cb(struct msm_fence_cb *cb)
+{
+ struct msm_commit *c =
+ container_of(cb, struct msm_commit, fence_cb);
+ complete_commit(c);
+}
+
+static void add_fb(struct msm_commit *c, struct drm_framebuffer *fb)
+{
+ struct drm_gem_object *obj = msm_framebuffer_bo(fb, 0);
+ c->fence = max(c->fence, msm_gem_fence(to_msm_bo(obj), MSM_PREP_READ));
+}
+
+
+/**
+ * drm_atomic_helper_commit - commit validated state object
+ * @dev: DRM device
+ * @state: the driver state object
+ * @async: asynchronous commit
+ *
+ * This function commits a with drm_atomic_helper_check() pre-validated state
+ * object. This can still fail when e.g. the framebuffer reservation fails. For
+ * now this doesn't implement asynchronous commits.
+ *
+ * RETURNS
+ * Zero for success or -errno.
+ */
+int msm_atomic_commit(struct drm_device *dev,
+ struct drm_atomic_state *state, bool async)
+{
+ struct msm_commit *c;
+ int nplanes = dev->mode_config.num_total_plane;
+ int i, ret;
+
+ ret = drm_atomic_helper_prepare_planes(dev, state);
+ if (ret)
+ return ret;
+
+ c = new_commit(state);
+
+ /*
+ * Figure out what fence to wait for:
+ */
+ for (i = 0; i < nplanes; i++) {
+ struct drm_plane *plane = state->planes[i];
+ struct drm_plane_state *new_state = state->plane_states[i];
+
+ if (!plane)
+ continue;
+
+ if ((plane->state->fb != new_state->fb) && new_state->fb)
+ add_fb(c, new_state->fb);
+ }
+
+ /*
+ * This is the point of no return - everything below never fails except
+ * when the hw goes bonghits. Which means we can commit the new state on
+ * the software side now.
+ */
+
+ drm_atomic_helper_swap_state(dev, state);
+
+ /*
+ * Everything below can be run asynchronously without the need to grab
+ * any modeset locks at all under one conditions: It must be guaranteed
+ * that the asynchronous work has either been cancelled (if the driver
+ * supports it, which at least requires that the framebuffers get
+ * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
+ * before the new state gets committed on the software side with
+ * drm_atomic_helper_swap_state().
+ *
+ * This scheme allows new atomic state updates to be prepared and
+ * checked in parallel to the asynchronous completion of the previous
+ * update. Which is important since compositors need to figure out the
+ * composition of the next frame right after having submitted the
+ * current layout.
+ */
+
+ if (async) {
+ msm_queue_fence_cb(dev, &c->fence_cb, c->fence);
+ return 0;
+ }
+
+ ret = msm_wait_fence_interruptable(dev, c->fence, NULL);
+ if (ret) {
+ WARN_ON(ret); // TODO unswap state back? or??
+ kfree(c);
+ return ret;
+ }
+
+ complete_commit(c);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 42e1c48eef2..c795217e1bf 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -29,6 +29,8 @@ static void msm_fb_output_poll_changed(struct drm_device *dev)
static const struct drm_mode_config_funcs mode_config_funcs = {
.fb_create = msm_framebuffer_create,
.output_poll_changed = msm_fb_output_poll_changed,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = msm_atomic_commit,
};
int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu)
@@ -294,6 +296,8 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
goto fail;
}
+ drm_mode_config_reset(dev);
+
#ifdef CONFIG_DRM_MSM_FBDEV
priv->fbdev = msm_fbdev_init(dev);
#endif
@@ -619,6 +623,26 @@ int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
return ret;
}
+int msm_queue_fence_cb(struct drm_device *dev,
+ struct msm_fence_cb *cb, uint32_t fence)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ int ret = 0;
+
+ mutex_lock(&dev->struct_mutex);
+ if (!list_empty(&cb->work.entry)) {
+ ret = -EINVAL;
+ } else if (fence > priv->completed_fence) {
+ cb->fence = fence;
+ list_add_tail(&cb->work.entry, &priv->fence_cbs);
+ } else {
+ queue_work(priv->wq, &cb->work);
+ }
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
+}
+
/* called from workqueue */
void msm_update_fence(struct drm_device *dev, uint32_t fence)
{
@@ -832,6 +856,7 @@ static struct drm_driver msm_driver = {
.gem_prime_import_sg_table = msm_gem_prime_import_sg_table,
.gem_prime_vmap = msm_gem_prime_vmap,
.gem_prime_vunmap = msm_gem_prime_vunmap,
+ .gem_prime_mmap = msm_gem_prime_mmap,
#ifdef CONFIG_DEBUG_FS
.debugfs_init = msm_debugfs_init,
.debugfs_cleanup = msm_debugfs_cleanup,
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 67f9d0a2332..13630381843 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -32,15 +32,6 @@
#include <linux/types.h>
#include <asm/sizes.h>
-
-#if defined(CONFIG_COMPILE_TEST) && !defined(CONFIG_ARCH_QCOM)
-/* stubs we need for compile-test: */
-static inline struct device *msm_iommu_get_ctx(const char *ctx_name)
-{
- return NULL;
-}
-#endif
-
#ifndef CONFIG_OF
#include <mach/board.h>
#include <mach/socinfo.h>
@@ -48,7 +39,10 @@ static inline struct device *msm_iommu_get_ctx(const char *ctx_name)
#endif
#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_plane_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/msm_drm.h>
#include <drm/drm_gem.h>
@@ -75,7 +69,12 @@ struct msm_drm_private {
struct msm_kms *kms;
/* subordinate devices, if present: */
- struct platform_device *hdmi_pdev, *gpu_pdev;
+ struct platform_device *gpu_pdev;
+
+ /* possibly this should be in the kms component, but it is
+ * shared by both mdp4 and mdp5..
+ */
+ struct hdmi *hdmi;
/* when we have more than one 'msm_gpu' these need to be an array: */
struct msm_gpu *gpu;
@@ -145,21 +144,29 @@ void __msm_fence_worker(struct work_struct *work);
(_cb)->func = _func; \
} while (0)
+int msm_atomic_commit(struct drm_device *dev,
+ struct drm_atomic_state *state, bool async);
+
int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
struct timespec *timeout);
+int msm_queue_fence_cb(struct drm_device *dev,
+ struct msm_fence_cb *cb, uint32_t fence);
void msm_update_fence(struct drm_device *dev, uint32_t fence);
int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
struct drm_file *file);
+int msm_gem_mmap_obj(struct drm_gem_object *obj,
+ struct vm_area_struct *vma);
int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
uint32_t *iova);
int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova);
+uint32_t msm_gem_iova(struct drm_gem_object *obj, int id);
struct page **msm_gem_get_pages(struct drm_gem_object *obj);
void msm_gem_put_pages(struct drm_gem_object *obj);
void msm_gem_put_iova(struct drm_gem_object *obj, int id);
@@ -170,6 +177,7 @@ int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj);
void *msm_gem_prime_vmap(struct drm_gem_object *obj);
void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
+int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach, struct sg_table *sg);
int msm_gem_prime_pin(struct drm_gem_object *obj);
@@ -192,6 +200,9 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
struct drm_gem_object *msm_gem_import(struct drm_device *dev,
uint32_t size, struct sg_table *sgt);
+int msm_framebuffer_prepare(struct drm_framebuffer *fb, int id);
+void msm_framebuffer_cleanup(struct drm_framebuffer *fb, int id);
+uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, int id, int plane);
struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane);
const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb);
struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
@@ -202,8 +213,8 @@ struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev,
struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev);
struct hdmi;
-struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder);
-irqreturn_t hdmi_irq(int irq, void *dev_id);
+int hdmi_modeset_init(struct hdmi *hdmi, struct drm_device *dev,
+ struct drm_encoder *encoder);
void __init hdmi_register(void);
void __exit hdmi_unregister(void);
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
index 81bafdf19ab..84dec161d83 100644
--- a/drivers/gpu/drm/msm/msm_fb.c
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -24,7 +24,7 @@
struct msm_framebuffer {
struct drm_framebuffer base;
const struct msm_format *format;
- struct drm_gem_object *planes[2];
+ struct drm_gem_object *planes[3];
};
#define to_msm_framebuffer(x) container_of(x, struct msm_framebuffer, base)
@@ -87,6 +87,44 @@ void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
}
#endif
+/* prepare/pin all the fb's bo's for scanout. Note that it is not valid
+ * to prepare an fb more multiple different initiator 'id's. But that
+ * should be fine, since only the scanout (mdpN) side of things needs
+ * this, the gpu doesn't care about fb's.
+ */
+int msm_framebuffer_prepare(struct drm_framebuffer *fb, int id)
+{
+ struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
+ int ret, i, n = drm_format_num_planes(fb->pixel_format);
+ uint32_t iova;
+
+ for (i = 0; i < n; i++) {
+ ret = msm_gem_get_iova(msm_fb->planes[i], id, &iova);
+ DBG("FB[%u]: iova[%d]: %08x (%d)", fb->base.id, i, iova, ret);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+void msm_framebuffer_cleanup(struct drm_framebuffer *fb, int id)
+{
+ struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
+ int i, n = drm_format_num_planes(fb->pixel_format);
+
+ for (i = 0; i < n; i++)
+ msm_gem_put_iova(msm_fb->planes[i], id);
+}
+
+uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, int id, int plane)
+{
+ struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
+ if (!msm_fb->planes[plane])
+ return 0;
+ return msm_gem_iova(msm_fb->planes[plane], id);
+}
+
struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane)
{
struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
@@ -166,6 +204,11 @@ struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
msm_fb->format = format;
+ if (n > ARRAY_SIZE(msm_fb->planes)) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
for (i = 0; i < n; i++) {
unsigned int width = mode_cmd->width / (i ? hsub : 1);
unsigned int height = mode_cmd->height / (i ? vsub : 1);
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index ab5bfd2d0eb..94d55e526b4 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -93,9 +93,6 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
uint32_t paddr;
int ret, size;
- sizes->surface_bpp = 32;
- sizes->surface_depth = 24;
-
DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width,
sizes->surface_height, sizes->surface_bpp,
sizes->fb_width, sizes->fb_height);
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 4b1b82adabd..4a6f0e49d5b 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -309,6 +309,7 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
return ret;
}
+/* get iova, taking a reference. Should have a matching put */
int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
@@ -328,6 +329,16 @@ int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova)
return ret;
}
+/* get iova without taking a reference, used in places where you have
+ * already done a 'msm_gem_get_iova()'.
+ */
+uint32_t msm_gem_iova(struct drm_gem_object *obj, int id)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ WARN_ON(!msm_obj->domain[id].iova);
+ return msm_obj->domain[id].iova;
+}
+
void msm_gem_put_iova(struct drm_gem_object *obj, int id)
{
// XXX TODO ..
@@ -397,23 +408,10 @@ void *msm_gem_vaddr(struct drm_gem_object *obj)
int msm_gem_queue_inactive_cb(struct drm_gem_object *obj,
struct msm_fence_cb *cb)
{
- struct drm_device *dev = obj->dev;
- struct msm_drm_private *priv = dev->dev_private;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- int ret = 0;
-
- mutex_lock(&dev->struct_mutex);
- if (!list_empty(&cb->work.entry)) {
- ret = -EINVAL;
- } else if (is_active(msm_obj)) {
- cb->fence = max(msm_obj->read_fence, msm_obj->write_fence);
- list_add_tail(&cb->work.entry, &priv->fence_cbs);
- } else {
- queue_work(priv->wq, &cb->work);
- }
- mutex_unlock(&dev->struct_mutex);
-
- return ret;
+ uint32_t fence = msm_gem_fence(msm_obj,
+ MSM_PREP_READ | MSM_PREP_WRITE);
+ return msm_queue_fence_cb(obj->dev, cb, fence);
}
void msm_gem_move_to_active(struct drm_gem_object *obj,
@@ -452,12 +450,8 @@ int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op,
int ret = 0;
if (is_active(msm_obj)) {
- uint32_t fence = 0;
+ uint32_t fence = msm_gem_fence(msm_obj, op);
- if (op & MSM_PREP_READ)
- fence = msm_obj->write_fence;
- if (op & MSM_PREP_WRITE)
- fence = max(fence, msm_obj->read_fence);
if (op & MSM_PREP_NOSYNC)
timeout = NULL;
@@ -525,13 +519,11 @@ void msm_gem_free_object(struct drm_gem_object *obj)
for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
struct msm_mmu *mmu = priv->mmus[id];
if (mmu && msm_obj->domain[id].iova) {
- uint32_t offset = (uint32_t)mmap_offset(obj);
+ uint32_t offset = msm_obj->domain[id].iova;
mmu->funcs->unmap(mmu, offset, msm_obj->sgt, obj->size);
}
}
- drm_gem_free_mmap_offset(obj);
-
if (obj->import_attach) {
if (msm_obj->vaddr)
dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index bfb052688f8..8fbbd0594c4 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -70,6 +70,19 @@ static inline bool is_active(struct msm_gem_object *msm_obj)
return msm_obj->gpu != NULL;
}
+static inline uint32_t msm_gem_fence(struct msm_gem_object *msm_obj,
+ uint32_t op)
+{
+ uint32_t fence = 0;
+
+ if (op & MSM_PREP_READ)
+ fence = msm_obj->write_fence;
+ if (op & MSM_PREP_WRITE)
+ fence = max(fence, msm_obj->read_fence);
+
+ return fence;
+}
+
#define MAX_CMDS 4
/* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,
diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
index ad772fe3611..dd7a7ab603e 100644
--- a/drivers/gpu/drm/msm/msm_gem_prime.c
+++ b/drivers/gpu/drm/msm/msm_gem_prime.c
@@ -37,6 +37,19 @@ void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
/* TODO msm_gem_vunmap() */
}
+int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+{
+ int ret;
+
+ mutex_lock(&obj->dev->struct_mutex);
+ ret = drm_gem_mmap_obj(obj, obj->size, vma);
+ mutex_unlock(&obj->dev->struct_mutex);
+ if (ret < 0)
+ return ret;
+
+ return msm_gem_mmap_obj(vma->vm_private_data, vma);
+}
+
struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach, struct sg_table *sg)
{
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index 12c24c8abf7..6461e3565af 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -41,17 +41,28 @@ nouveau-y += core/subdev/bios/extdev.o
nouveau-y += core/subdev/bios/fan.o
nouveau-y += core/subdev/bios/gpio.o
nouveau-y += core/subdev/bios/i2c.o
+nouveau-y += core/subdev/bios/image.o
nouveau-y += core/subdev/bios/init.o
nouveau-y += core/subdev/bios/mxm.o
+nouveau-y += core/subdev/bios/npde.o
+nouveau-y += core/subdev/bios/pcir.o
nouveau-y += core/subdev/bios/perf.o
nouveau-y += core/subdev/bios/pll.o
+nouveau-y += core/subdev/bios/pmu.o
nouveau-y += core/subdev/bios/ramcfg.o
nouveau-y += core/subdev/bios/rammap.o
+nouveau-y += core/subdev/bios/shadow.o
+nouveau-y += core/subdev/bios/shadowacpi.o
+nouveau-y += core/subdev/bios/shadowof.o
+nouveau-y += core/subdev/bios/shadowpci.o
+nouveau-y += core/subdev/bios/shadowramin.o
+nouveau-y += core/subdev/bios/shadowrom.o
nouveau-y += core/subdev/bios/timing.o
nouveau-y += core/subdev/bios/therm.o
nouveau-y += core/subdev/bios/vmap.o
nouveau-y += core/subdev/bios/volt.o
nouveau-y += core/subdev/bios/xpio.o
+nouveau-y += core/subdev/bios/M0203.o
nouveau-y += core/subdev/bios/M0205.o
nouveau-y += core/subdev/bios/M0209.o
nouveau-y += core/subdev/bios/P0260.o
@@ -86,6 +97,7 @@ nouveau-y += core/subdev/devinit/nva3.o
nouveau-y += core/subdev/devinit/nvaf.o
nouveau-y += core/subdev/devinit/nvc0.o
nouveau-y += core/subdev/devinit/gm107.o
+nouveau-y += core/subdev/devinit/gm204.o
nouveau-y += core/subdev/fb/base.o
nouveau-y += core/subdev/fb/nv04.o
nouveau-y += core/subdev/fb/nv10.o
@@ -129,6 +141,7 @@ nouveau-y += core/subdev/fb/ramgk20a.o
nouveau-y += core/subdev/fb/ramgm107.o
nouveau-y += core/subdev/fb/sddr2.o
nouveau-y += core/subdev/fb/sddr3.o
+nouveau-y += core/subdev/fb/gddr3.o
nouveau-y += core/subdev/fb/gddr5.o
nouveau-y += core/subdev/fuse/base.o
nouveau-y += core/subdev/fuse/g80.o
@@ -147,6 +160,7 @@ nouveau-y += core/subdev/i2c/bit.o
nouveau-y += core/subdev/i2c/pad.o
nouveau-y += core/subdev/i2c/padnv04.o
nouveau-y += core/subdev/i2c/padnv94.o
+nouveau-y += core/subdev/i2c/padgm204.o
nouveau-y += core/subdev/i2c/nv04.o
nouveau-y += core/subdev/i2c/nv4e.o
nouveau-y += core/subdev/i2c/nv50.o
@@ -154,6 +168,7 @@ nouveau-y += core/subdev/i2c/nv94.o
nouveau-y += core/subdev/i2c/nvd0.o
nouveau-y += core/subdev/i2c/gf117.o
nouveau-y += core/subdev/i2c/nve0.o
+nouveau-y += core/subdev/i2c/gm204.o
nouveau-y += core/subdev/ibus/nvc0.o
nouveau-y += core/subdev/ibus/nve0.o
nouveau-y += core/subdev/ibus/gk20a.o
@@ -211,6 +226,7 @@ nouveau-y += core/subdev/vm/nvc0.o
nouveau-y += core/subdev/volt/base.o
nouveau-y += core/subdev/volt/gpio.o
nouveau-y += core/subdev/volt/nv40.o
+nouveau-y += core/subdev/volt/gk20a.o
nouveau-y += core/engine/falcon.o
nouveau-y += core/engine/xtensa.o
@@ -254,6 +270,7 @@ nouveau-y += core/engine/disp/nvd0.o
nouveau-y += core/engine/disp/nve0.o
nouveau-y += core/engine/disp/nvf0.o
nouveau-y += core/engine/disp/gm107.o
+nouveau-y += core/engine/disp/gm204.o
nouveau-y += core/engine/disp/dacnv50.o
nouveau-y += core/engine/disp/dport.o
nouveau-y += core/engine/disp/hdanva3.o
@@ -266,6 +283,7 @@ nouveau-y += core/engine/disp/piornv50.o
nouveau-y += core/engine/disp/sornv50.o
nouveau-y += core/engine/disp/sornv94.o
nouveau-y += core/engine/disp/sornvd0.o
+nouveau-y += core/engine/disp/sorgm204.o
nouveau-y += core/engine/disp/vga.o
nouveau-y += core/engine/fifo/base.o
nouveau-y += core/engine/fifo/nv04.o
diff --git a/drivers/gpu/drm/nouveau/core/core/handle.c b/drivers/gpu/drm/nouveau/core/core/handle.c
index a490b805d7e..13f816cb08b 100644
--- a/drivers/gpu/drm/nouveau/core/core/handle.c
+++ b/drivers/gpu/drm/nouveau/core/core/handle.c
@@ -222,116 +222,3 @@ nouveau_handle_put(struct nouveau_handle *handle)
if (handle)
nouveau_namedb_put(handle);
}
-
-int
-nouveau_handle_new(struct nouveau_object *client, u32 _parent, u32 _handle,
- u16 _oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nouveau_object *parent = NULL;
- struct nouveau_object *engctx = NULL;
- struct nouveau_object *object = NULL;
- struct nouveau_object *engine;
- struct nouveau_oclass *oclass;
- struct nouveau_handle *handle;
- int ret;
-
- /* lookup parent object and ensure it *is* a parent */
- parent = nouveau_handle_ref(client, _parent);
- if (!parent) {
- nv_error(client, "parent 0x%08x not found\n", _parent);
- return -ENOENT;
- }
-
- if (!nv_iclass(parent, NV_PARENT_CLASS)) {
- nv_error(parent, "cannot have children\n");
- ret = -EINVAL;
- goto fail_class;
- }
-
- /* check that parent supports the requested subclass */
- ret = nouveau_parent_sclass(parent, _oclass, &engine, &oclass);
- if (ret) {
- nv_debug(parent, "illegal class 0x%04x\n", _oclass);
- goto fail_class;
- }
-
- /* make sure engine init has been completed *before* any objects
- * it controls are created - the constructors may depend on
- * state calculated at init (ie. default context construction)
- */
- if (engine) {
- ret = nouveau_object_inc(engine);
- if (ret)
- goto fail_class;
- }
-
- /* if engine requires it, create a context object to insert
- * between the parent and its children (eg. PGRAPH context)
- */
- if (engine && nv_engine(engine)->cclass) {
- ret = nouveau_object_ctor(parent, engine,
- nv_engine(engine)->cclass,
- data, size, &engctx);
- if (ret)
- goto fail_engctx;
- } else {
- nouveau_object_ref(parent, &engctx);
- }
-
- /* finally, create new object and bind it to its handle */
- ret = nouveau_object_ctor(engctx, engine, oclass, data, size, &object);
- *pobject = object;
- if (ret)
- goto fail_ctor;
-
- ret = nouveau_object_inc(object);
- if (ret)
- goto fail_init;
-
- ret = nouveau_handle_create(parent, _parent, _handle, object, &handle);
- if (ret)
- goto fail_handle;
-
- ret = nouveau_handle_init(handle);
- if (ret)
- nouveau_handle_destroy(handle);
-
-fail_handle:
- nouveau_object_dec(object, false);
-fail_init:
- nouveau_object_ref(NULL, &object);
-fail_ctor:
- nouveau_object_ref(NULL, &engctx);
-fail_engctx:
- if (engine)
- nouveau_object_dec(engine, false);
-fail_class:
- nouveau_object_ref(NULL, &parent);
- return ret;
-}
-
-int
-nouveau_handle_del(struct nouveau_object *client, u32 _parent, u32 _handle)
-{
- struct nouveau_object *parent = NULL;
- struct nouveau_object *namedb = NULL;
- struct nouveau_handle *handle = NULL;
-
- parent = nouveau_handle_ref(client, _parent);
- if (!parent)
- return -ENOENT;
-
- namedb = nv_pclass(parent, NV_NAMEDB_CLASS);
- if (namedb) {
- handle = nouveau_namedb_get(nv_namedb(namedb), _handle);
- if (handle) {
- nouveau_namedb_put(handle);
- nouveau_handle_fini(handle, false);
- nouveau_handle_destroy(handle);
- }
- }
-
- nouveau_object_ref(NULL, &parent);
- return handle ? 0 : -EINVAL;
-}
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/base.c b/drivers/gpu/drm/nouveau/core/engine/device/base.c
index 0ef5a571318..137e0b0faea 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/base.c
@@ -29,6 +29,7 @@
#include <nvif/unpack.h>
#include <nvif/class.h>
+#include <subdev/bios.h>
#include <subdev/fb.h>
#include <subdev/instmem.h>
@@ -138,7 +139,7 @@ nouveau_devobj_info(struct nouveau_object *object, void *data, u32 size)
}
args->v0.chipset = device->chipset;
- args->v0.revision = device->chipset >= 0x10 ? nv_rd32(device, 0) : 0x00;
+ args->v0.revision = device->chiprev;
if (pfb) args->v0.ram_size = args->v0.ram_user = pfb->ram->size;
else args->v0.ram_size = args->v0.ram_user = 0;
if (imem) args->v0.ram_user = args->v0.ram_user - imem->reserved;
@@ -222,6 +223,7 @@ static const u64 disable_map[] = {
[NVDEV_SUBDEV_VOLT] = NV_DEVICE_V0_DISABLE_CORE,
[NVDEV_SUBDEV_THERM] = NV_DEVICE_V0_DISABLE_CORE,
[NVDEV_SUBDEV_PWR] = NV_DEVICE_V0_DISABLE_CORE,
+ [NVDEV_SUBDEV_FUSE] = NV_DEVICE_V0_DISABLE_CORE,
[NVDEV_ENGINE_DMAOBJ] = NV_DEVICE_V0_DISABLE_CORE,
[NVDEV_ENGINE_PERFMON] = NV_DEVICE_V0_DISABLE_CORE,
[NVDEV_ENGINE_FIFO] = NV_DEVICE_V0_DISABLE_FIFO,
@@ -235,6 +237,7 @@ static const u64 disable_map[] = {
[NVDEV_ENGINE_PPP] = NV_DEVICE_V0_DISABLE_PPP,
[NVDEV_ENGINE_COPY0] = NV_DEVICE_V0_DISABLE_COPY0,
[NVDEV_ENGINE_COPY1] = NV_DEVICE_V0_DISABLE_COPY1,
+ [NVDEV_ENGINE_COPY2] = NV_DEVICE_V0_DISABLE_COPY1,
[NVDEV_ENGINE_VIC] = NV_DEVICE_V0_DISABLE_VIC,
[NVDEV_ENGINE_VENC] = NV_DEVICE_V0_DISABLE_VENC,
[NVDEV_ENGINE_DISP] = NV_DEVICE_V0_DISABLE_DISP,
@@ -352,12 +355,14 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
/* determine chipset and derive architecture from it */
if ((boot0 & 0x1f000000) > 0) {
device->chipset = (boot0 & 0x1ff00000) >> 20;
+ device->chiprev = (boot0 & 0x000000ff);
switch (device->chipset & 0x1f0) {
case 0x010: {
if (0x461 & (1 << (device->chipset & 0xf)))
device->card_type = NV_10;
else
device->card_type = NV_11;
+ device->chiprev = 0x00;
break;
}
case 0x020: device->card_type = NV_20; break;
@@ -373,7 +378,8 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
case 0x0e0:
case 0x0f0:
case 0x100: device->card_type = NV_E0; break;
- case 0x110: device->card_type = GM100; break;
+ case 0x110:
+ case 0x120: device->card_type = GM100; break;
default:
break;
}
@@ -427,6 +433,10 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
}
nv_debug(device, "crystal freq: %dKHz\n", device->crystal);
+ } else
+ if ( (args->v0.disable & NV_DEVICE_V0_DISABLE_IDENTIFY)) {
+ device->cname = "NULL";
+ device->oclass[NVDEV_SUBDEV_VBIOS] = &nouveau_bios_oclass;
}
if (!(args->v0.disable & NV_DEVICE_V0_DISABLE_MMIO) &&
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/gm100.c b/drivers/gpu/drm/nouveau/core/engine/device/gm100.c
index 6295668e29a..4e74a3376de 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/gm100.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/gm100.c
@@ -98,6 +98,49 @@ gm100_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
#endif
break;
+ case 0x124:
+ device->cname = "GM204";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nve0_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = gm204_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_FUSE ] = &gm107_fuse_oclass;
+#if 0
+ /* looks to be some non-trivial changes */
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass;
+ /* priv ring says no to 0x10eb14 writes */
+ device->oclass[NVDEV_SUBDEV_THERM ] = &gm107_therm_oclass;
+#endif
+ device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = gm204_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = gk20a_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &gk20a_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = gm107_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_LTC ] = gm107_ltc_oclass;
+ device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_PWR ] = nv108_pwr_oclass;
+#if 0
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
+#endif
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvd0_dmaeng_oclass;
+#if 0
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv108_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = gm107_graph_oclass;
+#endif
+ device->oclass[NVDEV_ENGINE_DISP ] = gm204_disp_oclass;
+#if 0
+ device->oclass[NVDEV_ENGINE_COPY0 ] = &gm204_copy0_oclass;
+ device->oclass[NVDEV_ENGINE_COPY1 ] = &gm204_copy1_oclass;
+ device->oclass[NVDEV_ENGINE_COPY2 ] = &gm204_copy2_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
+#endif
+ break;
default:
nv_fatal(device, "unknown Maxwell chipset\n");
return -EINVAL;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nve0.c b/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
index b1b2e484ecf..674da1f095b 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
@@ -179,6 +179,7 @@ nve0_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_GR ] = gk20a_graph_oclass;
device->oclass[NVDEV_ENGINE_COPY2 ] = &nve0_copy2_oclass;
device->oclass[NVDEV_ENGINE_PERFMON] = &nve0_perfmon_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &gk20a_volt_oclass;
break;
case 0xf0:
device->cname = "GK110";
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/dport.c b/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
index 39890221b91..16db08dfba6 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
@@ -28,7 +28,7 @@
#include <subdev/bios/init.h>
#include <subdev/i2c.h>
-#include <engine/disp.h>
+#include "nv50.h"
#include <nvif/class.h>
@@ -326,7 +326,7 @@ void
nouveau_dp_train(struct work_struct *w)
{
struct nvkm_output_dp *outp = container_of(w, typeof(*outp), lt.work);
- struct nouveau_disp *disp = nouveau_disp(outp);
+ struct nv50_disp_priv *priv = (void *)nouveau_disp(outp);
const struct dp_rates *cfg = nouveau_dp_rates;
struct dp_state _dp = {
.outp = outp,
@@ -334,8 +334,11 @@ nouveau_dp_train(struct work_struct *w)
u32 datarate = 0;
int ret;
+ if (!outp->base.info.location && priv->sor.magic)
+ priv->sor.magic(&outp->base);
+
/* bring capabilities within encoder limits */
- if (nv_mclass(disp) < GF110_DISP)
+ if (nv_mclass(priv) < GF110_DISP)
outp->dpcd[2] &= ~DPCD_RC02_TPS3_SUPPORTED;
if ((outp->dpcd[2] & 0x1f) > outp->base.info.dpconf.link_nr) {
outp->dpcd[2] &= ~DPCD_RC02_MAX_LANE_COUNT;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/gm107.c b/drivers/gpu/drm/nouveau/core/engine/disp/gm107.c
index b3df3fe2dc0..e2ad0543fb3 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/gm107.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/gm107.c
@@ -35,8 +35,8 @@
static struct nouveau_oclass
gm107_disp_sclass[] = {
- { GM107_DISP_CORE_CHANNEL_DMA, &nvd0_disp_mast_ofuncs.base },
- { GK110_DISP_BASE_CHANNEL_DMA, &nvd0_disp_sync_ofuncs.base },
+ { GM107_DISP_CORE_CHANNEL_DMA, &nvd0_disp_core_ofuncs.base },
+ { GK110_DISP_BASE_CHANNEL_DMA, &nvd0_disp_base_ofuncs.base },
{ GK104_DISP_OVERLAY_CONTROL_DMA, &nvd0_disp_ovly_ofuncs.base },
{ GK104_DISP_OVERLAY, &nvd0_disp_oimm_ofuncs.base },
{ GK104_DISP_CURSOR, &nvd0_disp_curs_ofuncs.base },
@@ -44,8 +44,8 @@ gm107_disp_sclass[] = {
};
static struct nouveau_oclass
-gm107_disp_base_oclass[] = {
- { GM107_DISP, &nvd0_disp_base_ofuncs },
+gm107_disp_main_oclass[] = {
+ { GM107_DISP, &nvd0_disp_main_ofuncs },
{}
};
@@ -72,7 +72,7 @@ gm107_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- nv_engine(priv)->sclass = gm107_disp_base_oclass;
+ nv_engine(priv)->sclass = gm107_disp_main_oclass;
nv_engine(priv)->cclass = &nv50_disp_cclass;
nv_subdev(priv)->intr = nvd0_disp_intr;
INIT_WORK(&priv->supervisor, nvd0_disp_intr_supervisor);
@@ -99,9 +99,9 @@ gm107_disp_oclass = &(struct nv50_disp_impl) {
},
.base.vblank = &nvd0_disp_vblank_func,
.base.outp = nvd0_disp_outp_sclass,
- .mthd.core = &nve0_disp_mast_mthd_chan,
- .mthd.base = &nvd0_disp_sync_mthd_chan,
+ .mthd.core = &nve0_disp_core_mthd_chan,
+ .mthd.base = &nvd0_disp_base_mthd_chan,
.mthd.ovly = &nve0_disp_ovly_mthd_chan,
.mthd.prev = -0x020000,
- .head.scanoutpos = nvd0_disp_base_scanoutpos,
+ .head.scanoutpos = nvd0_disp_main_scanoutpos,
}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/gm204.c b/drivers/gpu/drm/nouveau/core/engine/disp/gm204.c
new file mode 100644
index 00000000000..672ded79b2a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/gm204.c
@@ -0,0 +1,114 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <engine/software.h>
+#include <engine/disp.h>
+
+#include <nvif/class.h>
+
+#include "nv50.h"
+
+/*******************************************************************************
+ * Base display object
+ ******************************************************************************/
+
+static struct nouveau_oclass
+gm204_disp_sclass[] = {
+ { GM204_DISP_CORE_CHANNEL_DMA, &nvd0_disp_core_ofuncs.base },
+ { GK110_DISP_BASE_CHANNEL_DMA, &nvd0_disp_base_ofuncs.base },
+ { GK104_DISP_OVERLAY_CONTROL_DMA, &nvd0_disp_ovly_ofuncs.base },
+ { GK104_DISP_OVERLAY, &nvd0_disp_oimm_ofuncs.base },
+ { GK104_DISP_CURSOR, &nvd0_disp_curs_ofuncs.base },
+ {}
+};
+
+static struct nouveau_oclass
+gm204_disp_main_oclass[] = {
+ { GM204_DISP, &nvd0_disp_main_ofuncs },
+ {}
+};
+
+/*******************************************************************************
+ * Display engine implementation
+ ******************************************************************************/
+
+static int
+gm204_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_disp_priv *priv;
+ int heads = nv_rd32(parent, 0x022448);
+ int ret;
+
+ ret = nouveau_disp_create(parent, engine, oclass, heads,
+ "PDISP", "display", &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ ret = nvkm_event_init(&nvd0_disp_chan_uevent, 1, 17, &priv->uevent);
+ if (ret)
+ return ret;
+
+ nv_engine(priv)->sclass = gm204_disp_main_oclass;
+ nv_engine(priv)->cclass = &nv50_disp_cclass;
+ nv_subdev(priv)->intr = nvd0_disp_intr;
+ INIT_WORK(&priv->supervisor, nvd0_disp_intr_supervisor);
+ priv->sclass = gm204_disp_sclass;
+ priv->head.nr = heads;
+ priv->dac.nr = 3;
+ priv->sor.nr = 4;
+ priv->dac.power = nv50_dac_power;
+ priv->dac.sense = nv50_dac_sense;
+ priv->sor.power = nv50_sor_power;
+ priv->sor.hda_eld = nvd0_hda_eld;
+ priv->sor.hdmi = nvd0_hdmi_ctrl;
+ priv->sor.magic = gm204_sor_magic;
+ return 0;
+}
+
+struct nouveau_oclass *
+gm204_disp_outp_sclass[] = {
+ &gm204_sor_dp_impl.base.base,
+ NULL
+};
+
+struct nouveau_oclass *
+gm204_disp_oclass = &(struct nv50_disp_impl) {
+ .base.base.handle = NV_ENGINE(DISP, 0x07),
+ .base.base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = gm204_disp_ctor,
+ .dtor = _nouveau_disp_dtor,
+ .init = _nouveau_disp_init,
+ .fini = _nouveau_disp_fini,
+ },
+ .base.vblank = &nvd0_disp_vblank_func,
+ .base.outp = gm204_disp_outp_sclass,
+ .mthd.core = &nve0_disp_core_mthd_chan,
+ .mthd.base = &nvd0_disp_base_mthd_chan,
+ .mthd.ovly = &nve0_disp_ovly_mthd_chan,
+ .mthd.prev = -0x020000,
+ .head.scanoutpos = nvd0_disp_main_scanoutpos,
+}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
index 2df3a937037..44a8290aaea 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
@@ -88,12 +88,14 @@ nv50_disp_chan_uevent_fini(struct nvkm_event *event, int type, int index)
{
struct nv50_disp_priv *priv = container_of(event, typeof(*priv), uevent);
nv_mask(priv, 0x610028, 0x00000001 << index, 0x00000000 << index);
+ nv_wr32(priv, 0x610020, 0x00000001 << index);
}
static void
nv50_disp_chan_uevent_init(struct nvkm_event *event, int types, int index)
{
struct nv50_disp_priv *priv = container_of(event, typeof(*priv), uevent);
+ nv_wr32(priv, 0x610020, 0x00000001 << index);
nv_mask(priv, 0x610028, 0x00000001 << index, 0x00000001 << index);
}
@@ -374,7 +376,7 @@ nv50_disp_mthd_chan(struct nv50_disp_priv *priv, int debug, int head,
}
const struct nv50_disp_mthd_list
-nv50_disp_mast_mthd_base = {
+nv50_disp_core_mthd_base = {
.mthd = 0x0000,
.addr = 0x000000,
.data = {
@@ -387,7 +389,7 @@ nv50_disp_mast_mthd_base = {
};
static const struct nv50_disp_mthd_list
-nv50_disp_mast_mthd_dac = {
+nv50_disp_core_mthd_dac = {
.mthd = 0x0080,
.addr = 0x000008,
.data = {
@@ -399,7 +401,7 @@ nv50_disp_mast_mthd_dac = {
};
const struct nv50_disp_mthd_list
-nv50_disp_mast_mthd_sor = {
+nv50_disp_core_mthd_sor = {
.mthd = 0x0040,
.addr = 0x000008,
.data = {
@@ -409,7 +411,7 @@ nv50_disp_mast_mthd_sor = {
};
const struct nv50_disp_mthd_list
-nv50_disp_mast_mthd_pior = {
+nv50_disp_core_mthd_pior = {
.mthd = 0x0040,
.addr = 0x000008,
.data = {
@@ -419,7 +421,7 @@ nv50_disp_mast_mthd_pior = {
};
static const struct nv50_disp_mthd_list
-nv50_disp_mast_mthd_head = {
+nv50_disp_core_mthd_head = {
.mthd = 0x0400,
.addr = 0x000540,
.data = {
@@ -466,21 +468,21 @@ nv50_disp_mast_mthd_head = {
};
static const struct nv50_disp_mthd_chan
-nv50_disp_mast_mthd_chan = {
+nv50_disp_core_mthd_chan = {
.name = "Core",
.addr = 0x000000,
.data = {
- { "Global", 1, &nv50_disp_mast_mthd_base },
- { "DAC", 3, &nv50_disp_mast_mthd_dac },
- { "SOR", 2, &nv50_disp_mast_mthd_sor },
- { "PIOR", 3, &nv50_disp_mast_mthd_pior },
- { "HEAD", 2, &nv50_disp_mast_mthd_head },
+ { "Global", 1, &nv50_disp_core_mthd_base },
+ { "DAC", 3, &nv50_disp_core_mthd_dac },
+ { "SOR", 2, &nv50_disp_core_mthd_sor },
+ { "PIOR", 3, &nv50_disp_core_mthd_pior },
+ { "HEAD", 2, &nv50_disp_core_mthd_head },
{}
}
};
int
-nv50_disp_mast_ctor(struct nouveau_object *parent,
+nv50_disp_core_ctor(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
@@ -509,7 +511,7 @@ nv50_disp_mast_ctor(struct nouveau_object *parent,
}
static int
-nv50_disp_mast_init(struct nouveau_object *object)
+nv50_disp_core_init(struct nouveau_object *object)
{
struct nv50_disp_priv *priv = (void *)object->engine;
struct nv50_disp_dmac *mast = (void *)object;
@@ -546,7 +548,7 @@ nv50_disp_mast_init(struct nouveau_object *object)
}
static int
-nv50_disp_mast_fini(struct nouveau_object *object, bool suspend)
+nv50_disp_core_fini(struct nouveau_object *object, bool suspend)
{
struct nv50_disp_priv *priv = (void *)object->engine;
struct nv50_disp_dmac *mast = (void *)object;
@@ -567,11 +569,11 @@ nv50_disp_mast_fini(struct nouveau_object *object, bool suspend)
}
struct nv50_disp_chan_impl
-nv50_disp_mast_ofuncs = {
- .base.ctor = nv50_disp_mast_ctor,
+nv50_disp_core_ofuncs = {
+ .base.ctor = nv50_disp_core_ctor,
.base.dtor = nv50_disp_dmac_dtor,
- .base.init = nv50_disp_mast_init,
- .base.fini = nv50_disp_mast_fini,
+ .base.init = nv50_disp_core_init,
+ .base.fini = nv50_disp_core_fini,
.base.map = nv50_disp_chan_map,
.base.ntfy = nv50_disp_chan_ntfy,
.base.rd32 = nv50_disp_chan_rd32,
@@ -586,7 +588,7 @@ nv50_disp_mast_ofuncs = {
******************************************************************************/
static const struct nv50_disp_mthd_list
-nv50_disp_sync_mthd_base = {
+nv50_disp_base_mthd_base = {
.mthd = 0x0000,
.addr = 0x000000,
.data = {
@@ -611,7 +613,7 @@ nv50_disp_sync_mthd_base = {
};
const struct nv50_disp_mthd_list
-nv50_disp_sync_mthd_image = {
+nv50_disp_base_mthd_image = {
.mthd = 0x0400,
.addr = 0x000000,
.data = {
@@ -625,18 +627,18 @@ nv50_disp_sync_mthd_image = {
};
static const struct nv50_disp_mthd_chan
-nv50_disp_sync_mthd_chan = {
+nv50_disp_base_mthd_chan = {
.name = "Base",
.addr = 0x000540,
.data = {
- { "Global", 1, &nv50_disp_sync_mthd_base },
- { "Image", 2, &nv50_disp_sync_mthd_image },
+ { "Global", 1, &nv50_disp_base_mthd_base },
+ { "Image", 2, &nv50_disp_base_mthd_image },
{}
}
};
int
-nv50_disp_sync_ctor(struct nouveau_object *parent,
+nv50_disp_base_ctor(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
@@ -669,8 +671,8 @@ nv50_disp_sync_ctor(struct nouveau_object *parent,
}
struct nv50_disp_chan_impl
-nv50_disp_sync_ofuncs = {
- .base.ctor = nv50_disp_sync_ctor,
+nv50_disp_base_ofuncs = {
+ .base.ctor = nv50_disp_base_ctor,
.base.dtor = nv50_disp_dmac_dtor,
.base.init = nv50_disp_dmac_init,
.base.fini = nv50_disp_dmac_fini,
@@ -942,7 +944,7 @@ nv50_disp_curs_ofuncs = {
******************************************************************************/
int
-nv50_disp_base_scanoutpos(NV50_DISP_MTHD_V0)
+nv50_disp_main_scanoutpos(NV50_DISP_MTHD_V0)
{
const u32 blanke = nv_rd32(priv, 0x610aec + (head * 0x540));
const u32 blanks = nv_rd32(priv, 0x610af4 + (head * 0x540));
@@ -974,7 +976,7 @@ nv50_disp_base_scanoutpos(NV50_DISP_MTHD_V0)
}
int
-nv50_disp_base_mthd(struct nouveau_object *object, u32 mthd,
+nv50_disp_main_mthd(struct nouveau_object *object, u32 mthd,
void *data, u32 size)
{
const struct nv50_disp_impl *impl = (void *)nv_oclass(object->engine);
@@ -1098,7 +1100,7 @@ nv50_disp_base_mthd(struct nouveau_object *object, u32 mthd,
}
int
-nv50_disp_base_ctor(struct nouveau_object *parent,
+nv50_disp_main_ctor(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
@@ -1118,7 +1120,7 @@ nv50_disp_base_ctor(struct nouveau_object *parent,
}
void
-nv50_disp_base_dtor(struct nouveau_object *object)
+nv50_disp_main_dtor(struct nouveau_object *object)
{
struct nv50_disp_base *base = (void *)object;
nouveau_ramht_ref(NULL, &base->ramht);
@@ -1126,7 +1128,7 @@ nv50_disp_base_dtor(struct nouveau_object *object)
}
static int
-nv50_disp_base_init(struct nouveau_object *object)
+nv50_disp_main_init(struct nouveau_object *object)
{
struct nv50_disp_priv *priv = (void *)object->engine;
struct nv50_disp_base *base = (void *)object;
@@ -1194,7 +1196,7 @@ nv50_disp_base_init(struct nouveau_object *object)
}
static int
-nv50_disp_base_fini(struct nouveau_object *object, bool suspend)
+nv50_disp_main_fini(struct nouveau_object *object, bool suspend)
{
struct nv50_disp_priv *priv = (void *)object->engine;
struct nv50_disp_base *base = (void *)object;
@@ -1207,25 +1209,25 @@ nv50_disp_base_fini(struct nouveau_object *object, bool suspend)
}
struct nouveau_ofuncs
-nv50_disp_base_ofuncs = {
- .ctor = nv50_disp_base_ctor,
- .dtor = nv50_disp_base_dtor,
- .init = nv50_disp_base_init,
- .fini = nv50_disp_base_fini,
- .mthd = nv50_disp_base_mthd,
+nv50_disp_main_ofuncs = {
+ .ctor = nv50_disp_main_ctor,
+ .dtor = nv50_disp_main_dtor,
+ .init = nv50_disp_main_init,
+ .fini = nv50_disp_main_fini,
+ .mthd = nv50_disp_main_mthd,
.ntfy = nouveau_disp_ntfy,
};
static struct nouveau_oclass
-nv50_disp_base_oclass[] = {
- { NV50_DISP, &nv50_disp_base_ofuncs },
+nv50_disp_main_oclass[] = {
+ { NV50_DISP, &nv50_disp_main_ofuncs },
{}
};
static struct nouveau_oclass
nv50_disp_sclass[] = {
- { NV50_DISP_CORE_CHANNEL_DMA, &nv50_disp_mast_ofuncs.base },
- { NV50_DISP_BASE_CHANNEL_DMA, &nv50_disp_sync_ofuncs.base },
+ { NV50_DISP_CORE_CHANNEL_DMA, &nv50_disp_core_ofuncs.base },
+ { NV50_DISP_BASE_CHANNEL_DMA, &nv50_disp_base_ofuncs.base },
{ NV50_DISP_OVERLAY_CHANNEL_DMA, &nv50_disp_ovly_ofuncs.base },
{ NV50_DISP_OVERLAY, &nv50_disp_oimm_ofuncs.base },
{ NV50_DISP_CURSOR, &nv50_disp_curs_ofuncs.base },
@@ -1974,7 +1976,7 @@ nv50_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- nv_engine(priv)->sclass = nv50_disp_base_oclass;
+ nv_engine(priv)->sclass = nv50_disp_main_oclass;
nv_engine(priv)->cclass = &nv50_disp_cclass;
nv_subdev(priv)->intr = nv50_disp_intr;
INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor);
@@ -2007,9 +2009,9 @@ nv50_disp_oclass = &(struct nv50_disp_impl) {
},
.base.vblank = &nv50_disp_vblank_func,
.base.outp = nv50_disp_outp_sclass,
- .mthd.core = &nv50_disp_mast_mthd_chan,
- .mthd.base = &nv50_disp_sync_mthd_chan,
+ .mthd.core = &nv50_disp_core_mthd_chan,
+ .mthd.base = &nv50_disp_base_mthd_chan,
.mthd.ovly = &nv50_disp_ovly_mthd_chan,
.mthd.prev = 0x000004,
- .head.scanoutpos = nv50_disp_base_scanoutpos,
+ .head.scanoutpos = nv50_disp_main_scanoutpos,
}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h
index 5279feefec0..7f08078ee92 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h
@@ -42,6 +42,7 @@ struct nv50_disp_priv {
int (*hda_eld)(NV50_DISP_MTHD_V1);
int (*hdmi)(NV50_DISP_MTHD_V1);
u32 lvdsconf;
+ void (*magic)(struct nvkm_output *);
} sor;
struct {
int nr;
@@ -63,10 +64,10 @@ struct nv50_disp_impl {
} head;
};
-int nv50_disp_base_scanoutpos(NV50_DISP_MTHD_V0);
-int nv50_disp_base_mthd(struct nouveau_object *, u32, void *, u32);
+int nv50_disp_main_scanoutpos(NV50_DISP_MTHD_V0);
+int nv50_disp_main_mthd(struct nouveau_object *, u32, void *, u32);
-int nvd0_disp_base_scanoutpos(NV50_DISP_MTHD_V0);
+int nvd0_disp_main_scanoutpos(NV50_DISP_MTHD_V0);
int nv50_dac_power(NV50_DISP_MTHD_V1);
int nv50_dac_sense(NV50_DISP_MTHD_V1);
@@ -169,18 +170,18 @@ struct nv50_disp_mthd_chan {
} data[];
};
-extern struct nv50_disp_chan_impl nv50_disp_mast_ofuncs;
-int nv50_disp_mast_ctor(struct nouveau_object *, struct nouveau_object *,
+extern struct nv50_disp_chan_impl nv50_disp_core_ofuncs;
+int nv50_disp_core_ctor(struct nouveau_object *, struct nouveau_object *,
struct nouveau_oclass *, void *, u32,
struct nouveau_object **);
-extern const struct nv50_disp_mthd_list nv50_disp_mast_mthd_base;
-extern const struct nv50_disp_mthd_list nv50_disp_mast_mthd_sor;
-extern const struct nv50_disp_mthd_list nv50_disp_mast_mthd_pior;
-extern struct nv50_disp_chan_impl nv50_disp_sync_ofuncs;
-int nv50_disp_sync_ctor(struct nouveau_object *, struct nouveau_object *,
+extern const struct nv50_disp_mthd_list nv50_disp_core_mthd_base;
+extern const struct nv50_disp_mthd_list nv50_disp_core_mthd_sor;
+extern const struct nv50_disp_mthd_list nv50_disp_core_mthd_pior;
+extern struct nv50_disp_chan_impl nv50_disp_base_ofuncs;
+int nv50_disp_base_ctor(struct nouveau_object *, struct nouveau_object *,
struct nouveau_oclass *, void *, u32,
struct nouveau_object **);
-extern const struct nv50_disp_mthd_list nv50_disp_sync_mthd_image;
+extern const struct nv50_disp_mthd_list nv50_disp_base_mthd_image;
extern struct nv50_disp_chan_impl nv50_disp_ovly_ofuncs;
int nv50_disp_ovly_ctor(struct nouveau_object *, struct nouveau_object *,
struct nouveau_oclass *, void *, u32,
@@ -194,12 +195,12 @@ extern struct nv50_disp_chan_impl nv50_disp_curs_ofuncs;
int nv50_disp_curs_ctor(struct nouveau_object *, struct nouveau_object *,
struct nouveau_oclass *, void *, u32,
struct nouveau_object **);
-extern struct nouveau_ofuncs nv50_disp_base_ofuncs;
-int nv50_disp_base_ctor(struct nouveau_object *, struct nouveau_object *,
+extern struct nouveau_ofuncs nv50_disp_main_ofuncs;
+int nv50_disp_main_ctor(struct nouveau_object *, struct nouveau_object *,
struct nouveau_oclass *, void *, u32,
struct nouveau_object **);
-void nv50_disp_base_dtor(struct nouveau_object *);
-extern struct nouveau_omthds nv50_disp_base_omthds[];
+void nv50_disp_main_dtor(struct nouveau_object *);
+extern struct nouveau_omthds nv50_disp_main_omthds[];
extern struct nouveau_oclass nv50_disp_cclass;
void nv50_disp_mthd_chan(struct nv50_disp_priv *, int debug, int head,
const struct nv50_disp_mthd_chan *);
@@ -207,31 +208,31 @@ void nv50_disp_intr_supervisor(struct work_struct *);
void nv50_disp_intr(struct nouveau_subdev *);
extern const struct nvkm_event_func nv50_disp_vblank_func;
-extern const struct nv50_disp_mthd_chan nv84_disp_mast_mthd_chan;
-extern const struct nv50_disp_mthd_list nv84_disp_mast_mthd_dac;
-extern const struct nv50_disp_mthd_list nv84_disp_mast_mthd_head;
-extern const struct nv50_disp_mthd_chan nv84_disp_sync_mthd_chan;
+extern const struct nv50_disp_mthd_chan nv84_disp_core_mthd_chan;
+extern const struct nv50_disp_mthd_list nv84_disp_core_mthd_dac;
+extern const struct nv50_disp_mthd_list nv84_disp_core_mthd_head;
+extern const struct nv50_disp_mthd_chan nv84_disp_base_mthd_chan;
extern const struct nv50_disp_mthd_chan nv84_disp_ovly_mthd_chan;
-extern const struct nv50_disp_mthd_chan nv94_disp_mast_mthd_chan;
+extern const struct nv50_disp_mthd_chan nv94_disp_core_mthd_chan;
-extern struct nv50_disp_chan_impl nvd0_disp_mast_ofuncs;
-extern const struct nv50_disp_mthd_list nvd0_disp_mast_mthd_base;
-extern const struct nv50_disp_mthd_list nvd0_disp_mast_mthd_dac;
-extern const struct nv50_disp_mthd_list nvd0_disp_mast_mthd_sor;
-extern const struct nv50_disp_mthd_list nvd0_disp_mast_mthd_pior;
-extern struct nv50_disp_chan_impl nvd0_disp_sync_ofuncs;
+extern struct nv50_disp_chan_impl nvd0_disp_core_ofuncs;
+extern const struct nv50_disp_mthd_list nvd0_disp_core_mthd_base;
+extern const struct nv50_disp_mthd_list nvd0_disp_core_mthd_dac;
+extern const struct nv50_disp_mthd_list nvd0_disp_core_mthd_sor;
+extern const struct nv50_disp_mthd_list nvd0_disp_core_mthd_pior;
+extern struct nv50_disp_chan_impl nvd0_disp_base_ofuncs;
extern struct nv50_disp_chan_impl nvd0_disp_ovly_ofuncs;
-extern const struct nv50_disp_mthd_chan nvd0_disp_sync_mthd_chan;
+extern const struct nv50_disp_mthd_chan nvd0_disp_base_mthd_chan;
extern struct nv50_disp_chan_impl nvd0_disp_oimm_ofuncs;
extern struct nv50_disp_chan_impl nvd0_disp_curs_ofuncs;
-extern struct nouveau_ofuncs nvd0_disp_base_ofuncs;
+extern struct nouveau_ofuncs nvd0_disp_main_ofuncs;
extern struct nouveau_oclass nvd0_disp_cclass;
void nvd0_disp_intr_supervisor(struct work_struct *);
void nvd0_disp_intr(struct nouveau_subdev *);
extern const struct nvkm_event_func nvd0_disp_vblank_func;
-extern const struct nv50_disp_mthd_chan nve0_disp_mast_mthd_chan;
+extern const struct nv50_disp_mthd_chan nve0_disp_core_mthd_chan;
extern const struct nv50_disp_mthd_chan nve0_disp_ovly_mthd_chan;
extern struct nvkm_output_dp_impl nv50_pior_dp_impl;
@@ -242,6 +243,10 @@ int nv94_sor_dp_lnk_pwr(struct nvkm_output_dp *, int);
extern struct nouveau_oclass *nv94_disp_outp_sclass[];
extern struct nvkm_output_dp_impl nvd0_sor_dp_impl;
+int nvd0_sor_dp_lnk_ctl(struct nvkm_output_dp *, int, int, bool);
extern struct nouveau_oclass *nvd0_disp_outp_sclass[];
+void gm204_sor_magic(struct nvkm_output *outp);
+extern struct nvkm_output_dp_impl gm204_sor_dp_impl;
+
#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c
index d36284715b2..13eff5e4ee5 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c
@@ -34,7 +34,7 @@
******************************************************************************/
const struct nv50_disp_mthd_list
-nv84_disp_mast_mthd_dac = {
+nv84_disp_core_mthd_dac = {
.mthd = 0x0080,
.addr = 0x000008,
.data = {
@@ -46,7 +46,7 @@ nv84_disp_mast_mthd_dac = {
};
const struct nv50_disp_mthd_list
-nv84_disp_mast_mthd_head = {
+nv84_disp_core_mthd_head = {
.mthd = 0x0400,
.addr = 0x000540,
.data = {
@@ -98,15 +98,15 @@ nv84_disp_mast_mthd_head = {
};
const struct nv50_disp_mthd_chan
-nv84_disp_mast_mthd_chan = {
+nv84_disp_core_mthd_chan = {
.name = "Core",
.addr = 0x000000,
.data = {
- { "Global", 1, &nv50_disp_mast_mthd_base },
- { "DAC", 3, &nv84_disp_mast_mthd_dac },
- { "SOR", 2, &nv50_disp_mast_mthd_sor },
- { "PIOR", 3, &nv50_disp_mast_mthd_pior },
- { "HEAD", 2, &nv84_disp_mast_mthd_head },
+ { "Global", 1, &nv50_disp_core_mthd_base },
+ { "DAC", 3, &nv84_disp_core_mthd_dac },
+ { "SOR", 2, &nv50_disp_core_mthd_sor },
+ { "PIOR", 3, &nv50_disp_core_mthd_pior },
+ { "HEAD", 2, &nv84_disp_core_mthd_head },
{}
}
};
@@ -116,7 +116,7 @@ nv84_disp_mast_mthd_chan = {
******************************************************************************/
static const struct nv50_disp_mthd_list
-nv84_disp_sync_mthd_base = {
+nv84_disp_base_mthd_base = {
.mthd = 0x0000,
.addr = 0x000000,
.data = {
@@ -146,12 +146,12 @@ nv84_disp_sync_mthd_base = {
};
const struct nv50_disp_mthd_chan
-nv84_disp_sync_mthd_chan = {
+nv84_disp_base_mthd_chan = {
.name = "Base",
.addr = 0x000540,
.data = {
- { "Global", 1, &nv84_disp_sync_mthd_base },
- { "Image", 2, &nv50_disp_sync_mthd_image },
+ { "Global", 1, &nv84_disp_base_mthd_base },
+ { "Image", 2, &nv50_disp_base_mthd_image },
{}
}
};
@@ -204,8 +204,8 @@ nv84_disp_ovly_mthd_chan = {
static struct nouveau_oclass
nv84_disp_sclass[] = {
- { G82_DISP_CORE_CHANNEL_DMA, &nv50_disp_mast_ofuncs.base },
- { G82_DISP_BASE_CHANNEL_DMA, &nv50_disp_sync_ofuncs.base },
+ { G82_DISP_CORE_CHANNEL_DMA, &nv50_disp_core_ofuncs.base },
+ { G82_DISP_BASE_CHANNEL_DMA, &nv50_disp_base_ofuncs.base },
{ G82_DISP_OVERLAY_CHANNEL_DMA, &nv50_disp_ovly_ofuncs.base },
{ G82_DISP_OVERLAY, &nv50_disp_oimm_ofuncs.base },
{ G82_DISP_CURSOR, &nv50_disp_curs_ofuncs.base },
@@ -213,8 +213,8 @@ nv84_disp_sclass[] = {
};
static struct nouveau_oclass
-nv84_disp_base_oclass[] = {
- { G82_DISP, &nv50_disp_base_ofuncs },
+nv84_disp_main_oclass[] = {
+ { G82_DISP, &nv50_disp_main_ofuncs },
{}
};
@@ -240,7 +240,7 @@ nv84_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- nv_engine(priv)->sclass = nv84_disp_base_oclass;
+ nv_engine(priv)->sclass = nv84_disp_main_oclass;
nv_engine(priv)->cclass = &nv50_disp_cclass;
nv_subdev(priv)->intr = nv50_disp_intr;
INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor);
@@ -268,9 +268,9 @@ nv84_disp_oclass = &(struct nv50_disp_impl) {
},
.base.vblank = &nv50_disp_vblank_func,
.base.outp = nv50_disp_outp_sclass,
- .mthd.core = &nv84_disp_mast_mthd_chan,
- .mthd.base = &nv84_disp_sync_mthd_chan,
+ .mthd.core = &nv84_disp_core_mthd_chan,
+ .mthd.base = &nv84_disp_base_mthd_chan,
.mthd.ovly = &nv84_disp_ovly_mthd_chan,
.mthd.prev = 0x000004,
- .head.scanoutpos = nv50_disp_base_scanoutpos,
+ .head.scanoutpos = nv50_disp_main_scanoutpos,
}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c
index a117064002b..2bb7ac5cd0e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c
@@ -34,7 +34,7 @@
******************************************************************************/
const struct nv50_disp_mthd_list
-nv94_disp_mast_mthd_sor = {
+nv94_disp_core_mthd_sor = {
.mthd = 0x0040,
.addr = 0x000008,
.data = {
@@ -44,15 +44,15 @@ nv94_disp_mast_mthd_sor = {
};
const struct nv50_disp_mthd_chan
-nv94_disp_mast_mthd_chan = {
+nv94_disp_core_mthd_chan = {
.name = "Core",
.addr = 0x000000,
.data = {
- { "Global", 1, &nv50_disp_mast_mthd_base },
- { "DAC", 3, &nv84_disp_mast_mthd_dac },
- { "SOR", 4, &nv94_disp_mast_mthd_sor },
- { "PIOR", 3, &nv50_disp_mast_mthd_pior },
- { "HEAD", 2, &nv84_disp_mast_mthd_head },
+ { "Global", 1, &nv50_disp_core_mthd_base },
+ { "DAC", 3, &nv84_disp_core_mthd_dac },
+ { "SOR", 4, &nv94_disp_core_mthd_sor },
+ { "PIOR", 3, &nv50_disp_core_mthd_pior },
+ { "HEAD", 2, &nv84_disp_core_mthd_head },
{}
}
};
@@ -63,8 +63,8 @@ nv94_disp_mast_mthd_chan = {
static struct nouveau_oclass
nv94_disp_sclass[] = {
- { GT206_DISP_CORE_CHANNEL_DMA, &nv50_disp_mast_ofuncs.base },
- { GT200_DISP_BASE_CHANNEL_DMA, &nv50_disp_sync_ofuncs.base },
+ { GT206_DISP_CORE_CHANNEL_DMA, &nv50_disp_core_ofuncs.base },
+ { GT200_DISP_BASE_CHANNEL_DMA, &nv50_disp_base_ofuncs.base },
{ GT200_DISP_OVERLAY_CHANNEL_DMA, &nv50_disp_ovly_ofuncs.base },
{ G82_DISP_OVERLAY, &nv50_disp_oimm_ofuncs.base },
{ G82_DISP_CURSOR, &nv50_disp_curs_ofuncs.base },
@@ -72,8 +72,8 @@ nv94_disp_sclass[] = {
};
static struct nouveau_oclass
-nv94_disp_base_oclass[] = {
- { GT206_DISP, &nv50_disp_base_ofuncs },
+nv94_disp_main_oclass[] = {
+ { GT206_DISP, &nv50_disp_main_ofuncs },
{}
};
@@ -99,7 +99,7 @@ nv94_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- nv_engine(priv)->sclass = nv94_disp_base_oclass;
+ nv_engine(priv)->sclass = nv94_disp_main_oclass;
nv_engine(priv)->cclass = &nv50_disp_cclass;
nv_subdev(priv)->intr = nv50_disp_intr;
INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor);
@@ -134,9 +134,9 @@ nv94_disp_oclass = &(struct nv50_disp_impl) {
},
.base.vblank = &nv50_disp_vblank_func,
.base.outp = nv94_disp_outp_sclass,
- .mthd.core = &nv94_disp_mast_mthd_chan,
- .mthd.base = &nv84_disp_sync_mthd_chan,
+ .mthd.core = &nv94_disp_core_mthd_chan,
+ .mthd.base = &nv84_disp_base_mthd_chan,
.mthd.ovly = &nv84_disp_ovly_mthd_chan,
.mthd.prev = 0x000004,
- .head.scanoutpos = nv50_disp_base_scanoutpos,
+ .head.scanoutpos = nv50_disp_main_scanoutpos,
}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c
index c67e68aadd4..b32456c9494 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c
@@ -80,8 +80,8 @@ nva0_disp_ovly_mthd_chan = {
static struct nouveau_oclass
nva0_disp_sclass[] = {
- { GT200_DISP_CORE_CHANNEL_DMA, &nv50_disp_mast_ofuncs.base },
- { GT200_DISP_BASE_CHANNEL_DMA, &nv50_disp_sync_ofuncs.base },
+ { GT200_DISP_CORE_CHANNEL_DMA, &nv50_disp_core_ofuncs.base },
+ { GT200_DISP_BASE_CHANNEL_DMA, &nv50_disp_base_ofuncs.base },
{ GT200_DISP_OVERLAY_CHANNEL_DMA, &nv50_disp_ovly_ofuncs.base },
{ G82_DISP_OVERLAY, &nv50_disp_oimm_ofuncs.base },
{ G82_DISP_CURSOR, &nv50_disp_curs_ofuncs.base },
@@ -89,8 +89,8 @@ nva0_disp_sclass[] = {
};
static struct nouveau_oclass
-nva0_disp_base_oclass[] = {
- { GT200_DISP, &nv50_disp_base_ofuncs },
+nva0_disp_main_oclass[] = {
+ { GT200_DISP, &nv50_disp_main_ofuncs },
{}
};
@@ -116,7 +116,7 @@ nva0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- nv_engine(priv)->sclass = nva0_disp_base_oclass;
+ nv_engine(priv)->sclass = nva0_disp_main_oclass;
nv_engine(priv)->cclass = &nv50_disp_cclass;
nv_subdev(priv)->intr = nv50_disp_intr;
INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor);
@@ -144,9 +144,9 @@ nva0_disp_oclass = &(struct nv50_disp_impl) {
},
.base.vblank = &nv50_disp_vblank_func,
.base.outp = nv50_disp_outp_sclass,
- .mthd.core = &nv84_disp_mast_mthd_chan,
- .mthd.base = &nv84_disp_sync_mthd_chan,
+ .mthd.core = &nv84_disp_core_mthd_chan,
+ .mthd.base = &nv84_disp_base_mthd_chan,
.mthd.ovly = &nva0_disp_ovly_mthd_chan,
.mthd.prev = 0x000004,
- .head.scanoutpos = nv50_disp_base_scanoutpos,
+ .head.scanoutpos = nv50_disp_main_scanoutpos,
}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c b/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c
index 22969f355aa..951d79f9b78 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c
@@ -35,8 +35,8 @@
static struct nouveau_oclass
nva3_disp_sclass[] = {
- { GT214_DISP_CORE_CHANNEL_DMA, &nv50_disp_mast_ofuncs.base },
- { GT214_DISP_BASE_CHANNEL_DMA, &nv50_disp_sync_ofuncs.base },
+ { GT214_DISP_CORE_CHANNEL_DMA, &nv50_disp_core_ofuncs.base },
+ { GT214_DISP_BASE_CHANNEL_DMA, &nv50_disp_base_ofuncs.base },
{ GT214_DISP_OVERLAY_CHANNEL_DMA, &nv50_disp_ovly_ofuncs.base },
{ GT214_DISP_OVERLAY, &nv50_disp_oimm_ofuncs.base },
{ GT214_DISP_CURSOR, &nv50_disp_curs_ofuncs.base },
@@ -44,8 +44,8 @@ nva3_disp_sclass[] = {
};
static struct nouveau_oclass
-nva3_disp_base_oclass[] = {
- { GT214_DISP, &nv50_disp_base_ofuncs },
+nva3_disp_main_oclass[] = {
+ { GT214_DISP, &nv50_disp_main_ofuncs },
{}
};
@@ -71,7 +71,7 @@ nva3_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- nv_engine(priv)->sclass = nva3_disp_base_oclass;
+ nv_engine(priv)->sclass = nva3_disp_main_oclass;
nv_engine(priv)->cclass = &nv50_disp_cclass;
nv_subdev(priv)->intr = nv50_disp_intr;
INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor);
@@ -100,9 +100,9 @@ nva3_disp_oclass = &(struct nv50_disp_impl) {
},
.base.vblank = &nv50_disp_vblank_func,
.base.outp = nv94_disp_outp_sclass,
- .mthd.core = &nv94_disp_mast_mthd_chan,
- .mthd.base = &nv84_disp_sync_mthd_chan,
+ .mthd.core = &nv94_disp_core_mthd_chan,
+ .mthd.base = &nv84_disp_base_mthd_chan,
.mthd.ovly = &nv84_disp_ovly_mthd_chan,
.mthd.prev = 0x000004,
- .head.scanoutpos = nv50_disp_base_scanoutpos,
+ .head.scanoutpos = nv50_disp_main_scanoutpos,
}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
index 747e64bb9c0..181a2d57e35 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
@@ -51,12 +51,14 @@ nvd0_disp_chan_uevent_fini(struct nvkm_event *event, int type, int index)
{
struct nv50_disp_priv *priv = container_of(event, typeof(*priv), uevent);
nv_mask(priv, 0x610090, 0x00000001 << index, 0x00000000 << index);
+ nv_wr32(priv, 0x61008c, 0x00000001 << index);
}
static void
nvd0_disp_chan_uevent_init(struct nvkm_event *event, int types, int index)
{
struct nv50_disp_priv *priv = container_of(event, typeof(*priv), uevent);
+ nv_wr32(priv, 0x61008c, 0x00000001 << index);
nv_mask(priv, 0x610090, 0x00000001 << index, 0x00000001 << index);
}
@@ -151,7 +153,7 @@ nvd0_disp_dmac_fini(struct nouveau_object *object, bool suspend)
******************************************************************************/
const struct nv50_disp_mthd_list
-nvd0_disp_mast_mthd_base = {
+nvd0_disp_core_mthd_base = {
.mthd = 0x0000,
.addr = 0x000000,
.data = {
@@ -164,7 +166,7 @@ nvd0_disp_mast_mthd_base = {
};
const struct nv50_disp_mthd_list
-nvd0_disp_mast_mthd_dac = {
+nvd0_disp_core_mthd_dac = {
.mthd = 0x0020,
.addr = 0x000020,
.data = {
@@ -177,7 +179,7 @@ nvd0_disp_mast_mthd_dac = {
};
const struct nv50_disp_mthd_list
-nvd0_disp_mast_mthd_sor = {
+nvd0_disp_core_mthd_sor = {
.mthd = 0x0020,
.addr = 0x000020,
.data = {
@@ -190,7 +192,7 @@ nvd0_disp_mast_mthd_sor = {
};
const struct nv50_disp_mthd_list
-nvd0_disp_mast_mthd_pior = {
+nvd0_disp_core_mthd_pior = {
.mthd = 0x0020,
.addr = 0x000020,
.data = {
@@ -203,7 +205,7 @@ nvd0_disp_mast_mthd_pior = {
};
static const struct nv50_disp_mthd_list
-nvd0_disp_mast_mthd_head = {
+nvd0_disp_core_mthd_head = {
.mthd = 0x0300,
.addr = 0x000300,
.data = {
@@ -277,21 +279,21 @@ nvd0_disp_mast_mthd_head = {
};
static const struct nv50_disp_mthd_chan
-nvd0_disp_mast_mthd_chan = {
+nvd0_disp_core_mthd_chan = {
.name = "Core",
.addr = 0x000000,
.data = {
- { "Global", 1, &nvd0_disp_mast_mthd_base },
- { "DAC", 3, &nvd0_disp_mast_mthd_dac },
- { "SOR", 8, &nvd0_disp_mast_mthd_sor },
- { "PIOR", 4, &nvd0_disp_mast_mthd_pior },
- { "HEAD", 4, &nvd0_disp_mast_mthd_head },
+ { "Global", 1, &nvd0_disp_core_mthd_base },
+ { "DAC", 3, &nvd0_disp_core_mthd_dac },
+ { "SOR", 8, &nvd0_disp_core_mthd_sor },
+ { "PIOR", 4, &nvd0_disp_core_mthd_pior },
+ { "HEAD", 4, &nvd0_disp_core_mthd_head },
{}
}
};
static int
-nvd0_disp_mast_init(struct nouveau_object *object)
+nvd0_disp_core_init(struct nouveau_object *object)
{
struct nv50_disp_priv *priv = (void *)object->engine;
struct nv50_disp_dmac *mast = (void *)object;
@@ -322,7 +324,7 @@ nvd0_disp_mast_init(struct nouveau_object *object)
}
static int
-nvd0_disp_mast_fini(struct nouveau_object *object, bool suspend)
+nvd0_disp_core_fini(struct nouveau_object *object, bool suspend)
{
struct nv50_disp_priv *priv = (void *)object->engine;
struct nv50_disp_dmac *mast = (void *)object;
@@ -344,11 +346,11 @@ nvd0_disp_mast_fini(struct nouveau_object *object, bool suspend)
}
struct nv50_disp_chan_impl
-nvd0_disp_mast_ofuncs = {
- .base.ctor = nv50_disp_mast_ctor,
+nvd0_disp_core_ofuncs = {
+ .base.ctor = nv50_disp_core_ctor,
.base.dtor = nv50_disp_dmac_dtor,
- .base.init = nvd0_disp_mast_init,
- .base.fini = nvd0_disp_mast_fini,
+ .base.init = nvd0_disp_core_init,
+ .base.fini = nvd0_disp_core_fini,
.base.ntfy = nv50_disp_chan_ntfy,
.base.map = nv50_disp_chan_map,
.base.rd32 = nv50_disp_chan_rd32,
@@ -363,7 +365,7 @@ nvd0_disp_mast_ofuncs = {
******************************************************************************/
static const struct nv50_disp_mthd_list
-nvd0_disp_sync_mthd_base = {
+nvd0_disp_base_mthd_base = {
.mthd = 0x0000,
.addr = 0x000000,
.data = {
@@ -413,7 +415,7 @@ nvd0_disp_sync_mthd_base = {
};
static const struct nv50_disp_mthd_list
-nvd0_disp_sync_mthd_image = {
+nvd0_disp_base_mthd_image = {
.mthd = 0x0400,
.addr = 0x000400,
.data = {
@@ -427,19 +429,19 @@ nvd0_disp_sync_mthd_image = {
};
const struct nv50_disp_mthd_chan
-nvd0_disp_sync_mthd_chan = {
+nvd0_disp_base_mthd_chan = {
.name = "Base",
.addr = 0x001000,
.data = {
- { "Global", 1, &nvd0_disp_sync_mthd_base },
- { "Image", 2, &nvd0_disp_sync_mthd_image },
+ { "Global", 1, &nvd0_disp_base_mthd_base },
+ { "Image", 2, &nvd0_disp_base_mthd_image },
{}
}
};
struct nv50_disp_chan_impl
-nvd0_disp_sync_ofuncs = {
- .base.ctor = nv50_disp_sync_ctor,
+nvd0_disp_base_ofuncs = {
+ .base.ctor = nv50_disp_base_ctor,
.base.dtor = nv50_disp_dmac_dtor,
.base.init = nvd0_disp_dmac_init,
.base.fini = nvd0_disp_dmac_fini,
@@ -624,7 +626,7 @@ nvd0_disp_curs_ofuncs = {
******************************************************************************/
int
-nvd0_disp_base_scanoutpos(NV50_DISP_MTHD_V0)
+nvd0_disp_main_scanoutpos(NV50_DISP_MTHD_V0)
{
const u32 total = nv_rd32(priv, 0x640414 + (head * 0x300));
const u32 blanke = nv_rd32(priv, 0x64041c + (head * 0x300));
@@ -656,7 +658,7 @@ nvd0_disp_base_scanoutpos(NV50_DISP_MTHD_V0)
}
static int
-nvd0_disp_base_init(struct nouveau_object *object)
+nvd0_disp_main_init(struct nouveau_object *object)
{
struct nv50_disp_priv *priv = (void *)object->engine;
struct nv50_disp_base *base = (void *)object;
@@ -725,7 +727,7 @@ nvd0_disp_base_init(struct nouveau_object *object)
}
static int
-nvd0_disp_base_fini(struct nouveau_object *object, bool suspend)
+nvd0_disp_main_fini(struct nouveau_object *object, bool suspend)
{
struct nv50_disp_priv *priv = (void *)object->engine;
struct nv50_disp_base *base = (void *)object;
@@ -737,25 +739,25 @@ nvd0_disp_base_fini(struct nouveau_object *object, bool suspend)
}
struct nouveau_ofuncs
-nvd0_disp_base_ofuncs = {
- .ctor = nv50_disp_base_ctor,
- .dtor = nv50_disp_base_dtor,
- .init = nvd0_disp_base_init,
- .fini = nvd0_disp_base_fini,
- .mthd = nv50_disp_base_mthd,
+nvd0_disp_main_ofuncs = {
+ .ctor = nv50_disp_main_ctor,
+ .dtor = nv50_disp_main_dtor,
+ .init = nvd0_disp_main_init,
+ .fini = nvd0_disp_main_fini,
+ .mthd = nv50_disp_main_mthd,
.ntfy = nouveau_disp_ntfy,
};
static struct nouveau_oclass
-nvd0_disp_base_oclass[] = {
- { GF110_DISP, &nvd0_disp_base_ofuncs },
+nvd0_disp_main_oclass[] = {
+ { GF110_DISP, &nvd0_disp_main_ofuncs },
{}
};
static struct nouveau_oclass
nvd0_disp_sclass[] = {
- { GF110_DISP_CORE_CHANNEL_DMA, &nvd0_disp_mast_ofuncs.base },
- { GF110_DISP_BASE_CHANNEL_DMA, &nvd0_disp_sync_ofuncs.base },
+ { GF110_DISP_CORE_CHANNEL_DMA, &nvd0_disp_core_ofuncs.base },
+ { GF110_DISP_BASE_CHANNEL_DMA, &nvd0_disp_base_ofuncs.base },
{ GF110_DISP_OVERLAY_CONTROL_DMA, &nvd0_disp_ovly_ofuncs.base },
{ GF110_DISP_OVERLAY, &nvd0_disp_oimm_ofuncs.base },
{ GF110_DISP_CURSOR, &nvd0_disp_curs_ofuncs.base },
@@ -1055,6 +1057,9 @@ nvd0_disp_intr_unk2_2(struct nv50_disp_priv *priv, int head)
if (nvkm_output_dp_train(outp, pclk, true))
ERR("link not trained before attach\n");
+ } else {
+ if (priv->sor.magic)
+ priv->sor.magic(outp);
}
exec_clkcmp(priv, head, 0, pclk, &conf);
@@ -1063,10 +1068,18 @@ nvd0_disp_intr_unk2_2(struct nv50_disp_priv *priv, int head)
addr = 0x612280 + (ffs(outp->info.or) - 1) * 0x800;
data = 0x00000000;
} else {
- if (outp->info.type == DCB_OUTPUT_DP)
- nvd0_disp_intr_unk2_2_tu(priv, head, &outp->info);
addr = 0x612300 + (ffs(outp->info.or) - 1) * 0x800;
data = (conf & 0x0100) ? 0x00000101 : 0x00000000;
+ switch (outp->info.type) {
+ case DCB_OUTPUT_TMDS:
+ nv_mask(priv, addr, 0x007c0000, 0x00280000);
+ break;
+ case DCB_OUTPUT_DP:
+ nvd0_disp_intr_unk2_2_tu(priv, head, &outp->info);
+ break;
+ default:
+ break;
+ }
}
nv_mask(priv, addr, 0x00000707, data);
@@ -1259,7 +1272,7 @@ nvd0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- nv_engine(priv)->sclass = nvd0_disp_base_oclass;
+ nv_engine(priv)->sclass = nvd0_disp_main_oclass;
nv_engine(priv)->cclass = &nv50_disp_cclass;
nv_subdev(priv)->intr = nvd0_disp_intr;
INIT_WORK(&priv->supervisor, nvd0_disp_intr_supervisor);
@@ -1292,9 +1305,9 @@ nvd0_disp_oclass = &(struct nv50_disp_impl) {
},
.base.vblank = &nvd0_disp_vblank_func,
.base.outp = nvd0_disp_outp_sclass,
- .mthd.core = &nvd0_disp_mast_mthd_chan,
- .mthd.base = &nvd0_disp_sync_mthd_chan,
+ .mthd.core = &nvd0_disp_core_mthd_chan,
+ .mthd.base = &nvd0_disp_base_mthd_chan,
.mthd.ovly = &nvd0_disp_ovly_mthd_chan,
.mthd.prev = -0x020000,
- .head.scanoutpos = nvd0_disp_base_scanoutpos,
+ .head.scanoutpos = nvd0_disp_main_scanoutpos,
}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c
index db144b2cf06..55debec7e68 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c
@@ -34,7 +34,7 @@
******************************************************************************/
static const struct nv50_disp_mthd_list
-nve0_disp_mast_mthd_head = {
+nve0_disp_core_mthd_head = {
.mthd = 0x0300,
.addr = 0x000300,
.data = {
@@ -113,15 +113,15 @@ nve0_disp_mast_mthd_head = {
};
const struct nv50_disp_mthd_chan
-nve0_disp_mast_mthd_chan = {
+nve0_disp_core_mthd_chan = {
.name = "Core",
.addr = 0x000000,
.data = {
- { "Global", 1, &nvd0_disp_mast_mthd_base },
- { "DAC", 3, &nvd0_disp_mast_mthd_dac },
- { "SOR", 8, &nvd0_disp_mast_mthd_sor },
- { "PIOR", 4, &nvd0_disp_mast_mthd_pior },
- { "HEAD", 4, &nve0_disp_mast_mthd_head },
+ { "Global", 1, &nvd0_disp_core_mthd_base },
+ { "DAC", 3, &nvd0_disp_core_mthd_dac },
+ { "SOR", 8, &nvd0_disp_core_mthd_sor },
+ { "PIOR", 4, &nvd0_disp_core_mthd_pior },
+ { "HEAD", 4, &nve0_disp_core_mthd_head },
{}
}
};
@@ -200,8 +200,8 @@ nve0_disp_ovly_mthd_chan = {
static struct nouveau_oclass
nve0_disp_sclass[] = {
- { GK104_DISP_CORE_CHANNEL_DMA, &nvd0_disp_mast_ofuncs.base },
- { GK104_DISP_BASE_CHANNEL_DMA, &nvd0_disp_sync_ofuncs.base },
+ { GK104_DISP_CORE_CHANNEL_DMA, &nvd0_disp_core_ofuncs.base },
+ { GK104_DISP_BASE_CHANNEL_DMA, &nvd0_disp_base_ofuncs.base },
{ GK104_DISP_OVERLAY_CONTROL_DMA, &nvd0_disp_ovly_ofuncs.base },
{ GK104_DISP_OVERLAY, &nvd0_disp_oimm_ofuncs.base },
{ GK104_DISP_CURSOR, &nvd0_disp_curs_ofuncs.base },
@@ -209,8 +209,8 @@ nve0_disp_sclass[] = {
};
static struct nouveau_oclass
-nve0_disp_base_oclass[] = {
- { GK104_DISP, &nvd0_disp_base_ofuncs },
+nve0_disp_main_oclass[] = {
+ { GK104_DISP, &nvd0_disp_main_ofuncs },
{}
};
@@ -237,7 +237,7 @@ nve0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- nv_engine(priv)->sclass = nve0_disp_base_oclass;
+ nv_engine(priv)->sclass = nve0_disp_main_oclass;
nv_engine(priv)->cclass = &nv50_disp_cclass;
nv_subdev(priv)->intr = nvd0_disp_intr;
INIT_WORK(&priv->supervisor, nvd0_disp_intr_supervisor);
@@ -264,9 +264,9 @@ nve0_disp_oclass = &(struct nv50_disp_impl) {
},
.base.vblank = &nvd0_disp_vblank_func,
.base.outp = nvd0_disp_outp_sclass,
- .mthd.core = &nve0_disp_mast_mthd_chan,
- .mthd.base = &nvd0_disp_sync_mthd_chan,
+ .mthd.core = &nve0_disp_core_mthd_chan,
+ .mthd.base = &nvd0_disp_base_mthd_chan,
.mthd.ovly = &nve0_disp_ovly_mthd_chan,
.mthd.prev = -0x020000,
- .head.scanoutpos = nvd0_disp_base_scanoutpos,
+ .head.scanoutpos = nvd0_disp_main_scanoutpos,
}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c
index 402d7d67d80..3e7e2d28744 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c
@@ -35,8 +35,8 @@
static struct nouveau_oclass
nvf0_disp_sclass[] = {
- { GK110_DISP_CORE_CHANNEL_DMA, &nvd0_disp_mast_ofuncs.base },
- { GK110_DISP_BASE_CHANNEL_DMA, &nvd0_disp_sync_ofuncs.base },
+ { GK110_DISP_CORE_CHANNEL_DMA, &nvd0_disp_core_ofuncs.base },
+ { GK110_DISP_BASE_CHANNEL_DMA, &nvd0_disp_base_ofuncs.base },
{ GK104_DISP_OVERLAY_CONTROL_DMA, &nvd0_disp_ovly_ofuncs.base },
{ GK104_DISP_OVERLAY, &nvd0_disp_oimm_ofuncs.base },
{ GK104_DISP_CURSOR, &nvd0_disp_curs_ofuncs.base },
@@ -44,8 +44,8 @@ nvf0_disp_sclass[] = {
};
static struct nouveau_oclass
-nvf0_disp_base_oclass[] = {
- { GK110_DISP, &nvd0_disp_base_ofuncs },
+nvf0_disp_main_oclass[] = {
+ { GK110_DISP, &nvd0_disp_main_ofuncs },
{}
};
@@ -72,7 +72,7 @@ nvf0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- nv_engine(priv)->sclass = nvf0_disp_base_oclass;
+ nv_engine(priv)->sclass = nvf0_disp_main_oclass;
nv_engine(priv)->cclass = &nv50_disp_cclass;
nv_subdev(priv)->intr = nvd0_disp_intr;
INIT_WORK(&priv->supervisor, nvd0_disp_intr_supervisor);
@@ -99,9 +99,9 @@ nvf0_disp_oclass = &(struct nv50_disp_impl) {
},
.base.vblank = &nvd0_disp_vblank_func,
.base.outp = nvd0_disp_outp_sclass,
- .mthd.core = &nve0_disp_mast_mthd_chan,
- .mthd.base = &nvd0_disp_sync_mthd_chan,
+ .mthd.core = &nve0_disp_core_mthd_chan,
+ .mthd.base = &nvd0_disp_base_mthd_chan,
.mthd.ovly = &nve0_disp_ovly_mthd_chan,
.mthd.prev = -0x020000,
- .head.scanoutpos = nvd0_disp_base_scanoutpos,
+ .head.scanoutpos = nvd0_disp_main_scanoutpos,
}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/outp.c b/drivers/gpu/drm/nouveau/core/engine/disp/outp.c
index a5ff00a9ced..bbd9b6fdc90 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/outp.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/outp.c
@@ -85,7 +85,10 @@ nvkm_output_create_(struct nouveau_object *parent,
dcbE->sorconf.link : 0, dcbE->connector, dcbE->i2c_index,
dcbE->bus, dcbE->heads);
- outp->port = i2c->find(i2c, outp->info.i2c_index);
+ if (outp->info.type != DCB_OUTPUT_DP)
+ outp->port = i2c->find(i2c, NV_I2C_PORT(outp->info.i2c_index));
+ else
+ outp->port = i2c->find(i2c, NV_I2C_AUX(outp->info.i2c_index));
outp->edid = outp->port;
data = nvbios_connEp(bios, outp->info.connector, &ver, &hdr, &connE);
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sorgm204.c b/drivers/gpu/drm/nouveau/core/engine/disp/sorgm204.c
new file mode 100644
index 00000000000..0b4fad39e9a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/sorgm204.c
@@ -0,0 +1,144 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/bios/dp.h>
+#include <subdev/bios/init.h>
+#include <subdev/timer.h>
+
+#include "nv50.h"
+
+static inline u32
+gm204_sor_soff(struct nvkm_output_dp *outp)
+{
+ return (ffs(outp->base.info.or) - 1) * 0x800;
+}
+
+static inline u32
+gm204_sor_loff(struct nvkm_output_dp *outp)
+{
+ return gm204_sor_soff(outp) + !(outp->base.info.sorconf.link & 1) * 0x80;
+}
+
+void
+gm204_sor_magic(struct nvkm_output *outp)
+{
+ struct nv50_disp_priv *priv = (void *)nouveau_disp(outp);
+ const u32 soff = outp->or * 0x100;
+ const u32 data = outp->or + 1;
+ if (outp->info.sorconf.link & 1)
+ nv_mask(priv, 0x612308 + soff, 0x0000001f, 0x00000000 | data);
+ if (outp->info.sorconf.link & 2)
+ nv_mask(priv, 0x612388 + soff, 0x0000001f, 0x00000010 | data);
+}
+
+static inline u32
+gm204_sor_dp_lane_map(struct nv50_disp_priv *priv, u8 lane)
+{
+ return lane * 0x08;
+}
+
+static int
+gm204_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
+{
+ struct nv50_disp_priv *priv = (void *)nouveau_disp(outp);
+ const u32 soff = gm204_sor_soff(outp);
+ const u32 data = 0x01010101 * pattern;
+ if (outp->base.info.sorconf.link & 1)
+ nv_mask(priv, 0x61c110 + soff, 0x0f0f0f0f, data);
+ else
+ nv_mask(priv, 0x61c12c + soff, 0x0f0f0f0f, data);
+ return 0;
+}
+
+static int
+gm204_sor_dp_lnk_pwr(struct nvkm_output_dp *outp, int nr)
+{
+ struct nv50_disp_priv *priv = (void *)nouveau_disp(outp);
+ const u32 soff = gm204_sor_soff(outp);
+ const u32 loff = gm204_sor_loff(outp);
+ u32 mask = 0, i;
+
+ for (i = 0; i < nr; i++)
+ mask |= 1 << (gm204_sor_dp_lane_map(priv, i) >> 3);
+
+ nv_mask(priv, 0x61c130 + loff, 0x0000000f, mask);
+ nv_mask(priv, 0x61c034 + soff, 0x80000000, 0x80000000);
+ nv_wait(priv, 0x61c034 + soff, 0x80000000, 0x00000000);
+ return 0;
+}
+
+static int
+gm204_sor_dp_drv_ctl(struct nvkm_output_dp *outp, int ln, int vs, int pe, int pc)
+{
+ struct nv50_disp_priv *priv = (void *)nouveau_disp(outp);
+ struct nouveau_bios *bios = nouveau_bios(priv);
+ const u32 shift = gm204_sor_dp_lane_map(priv, ln);
+ const u32 loff = gm204_sor_loff(outp);
+ u32 addr, data[4];
+ u8 ver, hdr, cnt, len;
+ struct nvbios_dpout info;
+ struct nvbios_dpcfg ocfg;
+
+ addr = nvbios_dpout_match(bios, outp->base.info.hasht,
+ outp->base.info.hashm,
+ &ver, &hdr, &cnt, &len, &info);
+ if (!addr)
+ return -ENODEV;
+
+ addr = nvbios_dpcfg_match(bios, addr, pc, vs, pe,
+ &ver, &hdr, &cnt, &len, &ocfg);
+ if (!addr)
+ return -EINVAL;
+
+ data[0] = nv_rd32(priv, 0x61c118 + loff) & ~(0x000000ff << shift);
+ data[1] = nv_rd32(priv, 0x61c120 + loff) & ~(0x000000ff << shift);
+ data[2] = nv_rd32(priv, 0x61c130 + loff);
+ if ((data[2] & 0x0000ff00) < (ocfg.tx_pu << 8) || ln == 0)
+ data[2] = (data[2] & ~0x0000ff00) | (ocfg.tx_pu << 8);
+ nv_wr32(priv, 0x61c118 + loff, data[0] | (ocfg.dc << shift));
+ nv_wr32(priv, 0x61c120 + loff, data[1] | (ocfg.pe << shift));
+ nv_wr32(priv, 0x61c130 + loff, data[2] | (ocfg.tx_pu << 8));
+ data[3] = nv_rd32(priv, 0x61c13c + loff) & ~(0x000000ff << shift);
+ nv_wr32(priv, 0x61c13c + loff, data[3] | (ocfg.pc << shift));
+ return 0;
+}
+
+struct nvkm_output_dp_impl
+gm204_sor_dp_impl = {
+ .base.base.handle = DCB_OUTPUT_DP,
+ .base.base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = _nvkm_output_dp_ctor,
+ .dtor = _nvkm_output_dp_dtor,
+ .init = _nvkm_output_dp_init,
+ .fini = _nvkm_output_dp_fini,
+ },
+ .pattern = gm204_sor_dp_pattern,
+ .lnk_pwr = gm204_sor_dp_lnk_pwr,
+ .lnk_ctl = nvd0_sor_dp_lnk_ctl,
+ .drv_ctl = gm204_sor_dp_drv_ctl,
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c
index 7b7bbc3e459..fdab2939070 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c
@@ -60,7 +60,7 @@ nvd0_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
return 0;
}
-static int
+int
nvd0_sor_dp_lnk_ctl(struct nvkm_output_dp *outp, int nr, int bw, bool ef)
{
struct nv50_disp_priv *priv = (void *)nouveau_disp(outp);
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c
index 3fc4f0b0eac..19f5f652296 100644
--- a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c
@@ -51,6 +51,7 @@ nvd0_dmaobj_bind(struct nouveau_dmaobj *dmaobj,
case GK104_DISP_CORE_CHANNEL_DMA:
case GK110_DISP_CORE_CHANNEL_DMA:
case GM107_DISP_CORE_CHANNEL_DMA:
+ case GM204_DISP_CORE_CHANNEL_DMA:
case GF110_DISP_BASE_CHANNEL_DMA:
case GK104_DISP_BASE_CHANNEL_DMA:
case GK110_DISP_BASE_CHANNEL_DMA:
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
index f8734eb74ea..6a8db7c80bd 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
@@ -792,7 +792,7 @@ nve0_fifo_intr_fault(struct nve0_fifo_priv *priv, int unit)
nouveau_engctx_put(engctx);
}
-static const struct nouveau_bitfield nve0_fifo_pbdma_intr[] = {
+static const struct nouveau_bitfield nve0_fifo_pbdma_intr_0[] = {
{ 0x00000001, "MEMREQ" },
{ 0x00000002, "MEMACK_TIMEOUT" },
{ 0x00000004, "MEMACK_EXTRA" },
@@ -827,9 +827,10 @@ static const struct nouveau_bitfield nve0_fifo_pbdma_intr[] = {
};
static void
-nve0_fifo_intr_pbdma(struct nve0_fifo_priv *priv, int unit)
+nve0_fifo_intr_pbdma_0(struct nve0_fifo_priv *priv, int unit)
{
- u32 stat = nv_rd32(priv, 0x040108 + (unit * 0x2000));
+ u32 mask = nv_rd32(priv, 0x04010c + (unit * 0x2000));
+ u32 stat = nv_rd32(priv, 0x040108 + (unit * 0x2000)) & mask;
u32 addr = nv_rd32(priv, 0x0400c0 + (unit * 0x2000));
u32 data = nv_rd32(priv, 0x0400c4 + (unit * 0x2000));
u32 chid = nv_rd32(priv, 0x040120 + (unit * 0x2000)) & 0xfff;
@@ -840,11 +841,12 @@ nve0_fifo_intr_pbdma(struct nve0_fifo_priv *priv, int unit)
if (stat & 0x00800000) {
if (!nve0_fifo_swmthd(priv, chid, mthd, data))
show &= ~0x00800000;
+ nv_wr32(priv, 0x0400c0 + (unit * 0x2000), 0x80600008);
}
if (show) {
nv_error(priv, "PBDMA%d:", unit);
- nouveau_bitfield_print(nve0_fifo_pbdma_intr, show);
+ nouveau_bitfield_print(nve0_fifo_pbdma_intr_0, show);
pr_cont("\n");
nv_error(priv,
"PBDMA%d: ch %d [%s] subc %d mthd 0x%04x data 0x%08x\n",
@@ -853,10 +855,37 @@ nve0_fifo_intr_pbdma(struct nve0_fifo_priv *priv, int unit)
subc, mthd, data);
}
- nv_wr32(priv, 0x0400c0 + (unit * 0x2000), 0x80600008);
nv_wr32(priv, 0x040108 + (unit * 0x2000), stat);
}
+static const struct nouveau_bitfield nve0_fifo_pbdma_intr_1[] = {
+ { 0x00000001, "HCE_RE_ILLEGAL_OP" },
+ { 0x00000002, "HCE_RE_ALIGNB" },
+ { 0x00000004, "HCE_PRIV" },
+ { 0x00000008, "HCE_ILLEGAL_MTHD" },
+ { 0x00000010, "HCE_ILLEGAL_CLASS" },
+ {}
+};
+
+static void
+nve0_fifo_intr_pbdma_1(struct nve0_fifo_priv *priv, int unit)
+{
+ u32 mask = nv_rd32(priv, 0x04014c + (unit * 0x2000));
+ u32 stat = nv_rd32(priv, 0x040148 + (unit * 0x2000)) & mask;
+ u32 chid = nv_rd32(priv, 0x040120 + (unit * 0x2000)) & 0xfff;
+
+ if (stat) {
+ nv_error(priv, "PBDMA%d:", unit);
+ nouveau_bitfield_print(nve0_fifo_pbdma_intr_1, stat);
+ pr_cont("\n");
+ nv_error(priv, "PBDMA%d: ch %d %08x %08x\n", unit, chid,
+ nv_rd32(priv, 0x040150 + (unit * 0x2000)),
+ nv_rd32(priv, 0x040154 + (unit * 0x2000)));
+ }
+
+ nv_wr32(priv, 0x040148 + (unit * 0x2000), stat);
+}
+
static void
nve0_fifo_intr_runlist(struct nve0_fifo_priv *priv)
{
@@ -939,7 +968,8 @@ nve0_fifo_intr(struct nouveau_subdev *subdev)
u32 mask = nv_rd32(priv, 0x0025a0);
while (mask) {
u32 unit = __ffs(mask);
- nve0_fifo_intr_pbdma(priv, unit);
+ nve0_fifo_intr_pbdma_0(priv, unit);
+ nve0_fifo_intr_pbdma_1(priv, unit);
nv_wr32(priv, 0x0025a0, (1 << unit));
mask &= ~(1 << unit);
}
@@ -1022,6 +1052,12 @@ nve0_fifo_init(struct nouveau_object *object)
nv_wr32(priv, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
}
+ /* PBDMA[n].HCE */
+ for (i = 0; i < priv->spoon_nr; i++) {
+ nv_wr32(priv, 0x040148 + (i * 0x2000), 0xffffffff); /* INTR */
+ nv_wr32(priv, 0x04014c + (i * 0x2000), 0xffffffff); /* INTREN */
+ }
+
nv_wr32(priv, 0x002254, 0x10000000 | priv->user.bar.offset >> 12);
nv_wr32(priv, 0x002100, 0xffffffff);
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
index 30fd1dc64f9..17251e4b9e8 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
@@ -1557,7 +1557,7 @@ nvc0_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
nvc0_graph_ctor_fw(priv, "fuc409d", &priv->fuc409d) ||
nvc0_graph_ctor_fw(priv, "fuc41ac", &priv->fuc41ac) ||
nvc0_graph_ctor_fw(priv, "fuc41ad", &priv->fuc41ad))
- return -EINVAL;
+ return -ENODEV;
priv->firmware = true;
}
diff --git a/drivers/gpu/drm/nouveau/core/include/core/device.h b/drivers/gpu/drm/nouveau/core/include/core/device.h
index 1d9d893929b..2ec2e50d367 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/device.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/device.h
@@ -16,6 +16,7 @@ enum nv_subdev_type {
* to during POST.
*/
NVDEV_SUBDEV_DEVINIT,
+ NVDEV_SUBDEV_IBUS,
NVDEV_SUBDEV_GPIO,
NVDEV_SUBDEV_I2C,
NVDEV_SUBDEV_DEVINIT_LAST = NVDEV_SUBDEV_I2C,
@@ -31,7 +32,6 @@ enum nv_subdev_type {
NVDEV_SUBDEV_TIMER,
NVDEV_SUBDEV_FB,
NVDEV_SUBDEV_LTC,
- NVDEV_SUBDEV_IBUS,
NVDEV_SUBDEV_INSTMEM,
NVDEV_SUBDEV_VM,
NVDEV_SUBDEV_BAR,
@@ -92,6 +92,7 @@ struct nouveau_device {
GM100 = 0x110,
} card_type;
u32 chipset;
+ u8 chiprev;
u32 crystal;
struct nouveau_oclass *oclass[NVDEV_SUBDEV_NR];
@@ -158,6 +159,12 @@ nv_device_is_pci(struct nouveau_device *device)
return device->pdev != NULL;
}
+static inline bool
+nv_device_is_cpu_coherent(struct nouveau_device *device)
+{
+ return (!IS_ENABLED(CONFIG_ARM) && nv_device_is_pci(device));
+}
+
static inline struct device *
nv_device_base(struct nouveau_device *device)
{
diff --git a/drivers/gpu/drm/nouveau/core/include/core/handle.h b/drivers/gpu/drm/nouveau/core/include/core/handle.h
index ceb67d77087..d22a59138a9 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/handle.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/handle.h
@@ -23,11 +23,6 @@ void nouveau_handle_destroy(struct nouveau_handle *);
int nouveau_handle_init(struct nouveau_handle *);
int nouveau_handle_fini(struct nouveau_handle *, bool suspend);
-int nouveau_handle_new(struct nouveau_object *, u32 parent, u32 handle,
- u16 oclass, void *data, u32 size,
- struct nouveau_object **);
-int nouveau_handle_del(struct nouveau_object *, u32 parent, u32 handle);
-
struct nouveau_object *
nouveau_handle_ref(struct nouveau_object *, u32 name);
diff --git a/drivers/gpu/drm/nouveau/core/include/core/object.h b/drivers/gpu/drm/nouveau/core/include/core/object.h
index d7039482d6f..2e2afa502c9 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/object.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/object.h
@@ -203,21 +203,4 @@ nv_memcmp(void *obj, u32 addr, const char *str, u32 len)
return 0;
}
-#include <core/handle.h>
-
-static inline int
-nouveau_object_new(struct nouveau_object *client, u32 parent, u32 handle,
- u16 oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- return nouveau_handle_new(client, parent, handle, oclass,
- data, size, pobject);
-}
-
-static inline int
-nouveau_object_del(struct nouveau_object *client, u32 parent, u32 handle)
-{
- return nouveau_handle_del(client, parent, handle);
-}
-
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/disp.h b/drivers/gpu/drm/nouveau/core/include/engine/disp.h
index 7a64f347b38..fc307f1317f 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/disp.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/disp.h
@@ -31,5 +31,6 @@ extern struct nouveau_oclass *nvd0_disp_oclass;
extern struct nouveau_oclass *nve0_disp_oclass;
extern struct nouveau_oclass *nvf0_disp_oclass;
extern struct nouveau_oclass *gm107_disp_oclass;
+extern struct nouveau_oclass *gm204_disp_oclass;
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/M0203.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/M0203.h
new file mode 100644
index 00000000000..1f84d3612dd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/M0203.h
@@ -0,0 +1,31 @@
+#ifndef __NVBIOS_M0203_H__
+#define __NVBIOS_M0203_H__
+
+struct nvbios_M0203T {
+#define M0203T_TYPE_RAMCFG 0x00
+ u8 type;
+ u16 pointer;
+};
+
+u32 nvbios_M0203Te(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u32 nvbios_M0203Tp(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_M0203T *);
+
+struct nvbios_M0203E {
+#define M0203E_TYPE_DDR2 0x0
+#define M0203E_TYPE_DDR3 0x1
+#define M0203E_TYPE_GDDR3 0x2
+#define M0203E_TYPE_GDDR5 0x3
+#define M0203E_TYPE_SKIP 0xf
+ u8 type;
+ u8 strap;
+ u8 group;
+};
+
+u32 nvbios_M0203Ee(struct nouveau_bios *, int idx, u8 *ver, u8 *hdr);
+u32 nvbios_M0203Ep(struct nouveau_bios *, int idx, u8 *ver, u8 *hdr,
+ struct nvbios_M0203E *);
+u32 nvbios_M0203Em(struct nouveau_bios *, u8 ramcfg, u8 *ver, u8 *hdr,
+ struct nvbios_M0203E *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/i2c.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/i2c.h
index 10b57a19a7d..c9bb112895a 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/i2c.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/i2c.h
@@ -4,11 +4,14 @@
struct nouveau_bios;
enum dcb_i2c_type {
- DCB_I2C_NV04_BIT = 0,
- DCB_I2C_NV4E_BIT = 4,
- DCB_I2C_NVIO_BIT = 5,
- DCB_I2C_NVIO_AUX = 6,
- DCB_I2C_UNUSED = 0xff
+ /* matches bios type field prior to ccb 4.1 */
+ DCB_I2C_NV04_BIT = 0x00,
+ DCB_I2C_NV4E_BIT = 0x04,
+ DCB_I2C_NVIO_BIT = 0x05,
+ DCB_I2C_NVIO_AUX = 0x06,
+ /* made up - mostly */
+ DCB_I2C_PMGR = 0x80,
+ DCB_I2C_UNUSED = 0xff
};
struct dcb_i2c_entry {
@@ -16,6 +19,7 @@ struct dcb_i2c_entry {
u8 drive;
u8 sense;
u8 share;
+ u8 auxch;
};
u16 dcb_i2c_table(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/image.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/image.h
new file mode 100644
index 00000000000..3348b458084
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/image.h
@@ -0,0 +1,13 @@
+#ifndef __NVBIOS_IMAGE_H__
+#define __NVBIOS_IMAGE_H__
+
+struct nvbios_image {
+ u32 base;
+ u32 size;
+ u8 type;
+ bool last;
+};
+
+bool nvbios_image(struct nouveau_bios *, int, struct nvbios_image *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/npde.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/npde.h
new file mode 100644
index 00000000000..b18413d951e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/npde.h
@@ -0,0 +1,12 @@
+#ifndef __NVBIOS_NPDE_H__
+#define __NVBIOS_NPDE_H__
+
+struct nvbios_npdeT {
+ u32 image_size;
+ bool last;
+};
+
+u32 nvbios_npdeTe(struct nouveau_bios *, u32);
+u32 nvbios_npdeTp(struct nouveau_bios *, u32, struct nvbios_npdeT *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/pcir.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/pcir.h
new file mode 100644
index 00000000000..3d634a06dca
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/pcir.h
@@ -0,0 +1,18 @@
+#ifndef __NVBIOS_PCIR_H__
+#define __NVBIOS_PCIR_H__
+
+struct nvbios_pcirT {
+ u16 vendor_id;
+ u16 device_id;
+ u8 class_code[3];
+ u32 image_size;
+ u16 image_rev;
+ u8 image_type;
+ bool last;
+};
+
+u32 nvbios_pcirTe(struct nouveau_bios *, u32, u8 *ver, u16 *hdr);
+u32 nvbios_pcirTp(struct nouveau_bios *, u32, u8 *ver, u16 *hdr,
+ struct nvbios_pcirT *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/pmu.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/pmu.h
new file mode 100644
index 00000000000..9de593deaea
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/pmu.h
@@ -0,0 +1,37 @@
+#ifndef __NVBIOS_PMU_H__
+#define __NVBIOS_PMU_H__
+
+struct nvbios_pmuT {
+};
+
+u32 nvbios_pmuTe(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u32 nvbios_pmuTp(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_pmuT *);
+
+struct nvbios_pmuE {
+ u8 type;
+ u32 data;
+};
+
+u32 nvbios_pmuEe(struct nouveau_bios *, int idx, u8 *ver, u8 *hdr);
+u32 nvbios_pmuEp(struct nouveau_bios *, int idx, u8 *ver, u8 *hdr,
+ struct nvbios_pmuE *);
+
+struct nvbios_pmuR {
+ u32 boot_addr_pmu;
+ u32 boot_addr;
+ u32 boot_size;
+ u32 code_addr_pmu;
+ u32 code_addr;
+ u32 code_size;
+ u32 init_addr_pmu;
+
+ u32 data_addr_pmu;
+ u32 data_addr;
+ u32 data_size;
+ u32 args_addr_pmu;
+};
+
+bool nvbios_pmuRm(struct nouveau_bios *, u8 type, struct nvbios_pmuR *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/ramcfg.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/ramcfg.h
index a685bbd0456..4a0e0ceb41b 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/ramcfg.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/ramcfg.h
@@ -43,8 +43,9 @@ struct nvbios_ramcfg {
unsigned ramcfg_10_02_08:1;
unsigned ramcfg_10_02_10:1;
unsigned ramcfg_10_02_20:1;
- unsigned ramcfg_10_02_40:1;
+ unsigned ramcfg_10_DLLoff:1;
unsigned ramcfg_10_03_0f:4;
+ unsigned ramcfg_10_04_01:1;
unsigned ramcfg_10_05:8;
unsigned ramcfg_10_06:8;
unsigned ramcfg_10_07:8;
@@ -95,9 +96,29 @@ struct nvbios_ramcfg {
union {
struct {
unsigned timing_10_WR:8;
+ unsigned timing_10_WTR:8;
unsigned timing_10_CL:8;
+ unsigned timing_10_RC:8;
+ /*empty: 4 */
+ unsigned timing_10_RFC:8; /* Byte 5 */
+ /*empty: 6 */
+ unsigned timing_10_RAS:8; /* Byte 7 */
+ /*empty: 8 */
+ unsigned timing_10_RP:8; /* Byte 9 */
+ unsigned timing_10_RCDRD:8;
+ unsigned timing_10_RCDWR:8;
+ unsigned timing_10_RRD:8;
+ unsigned timing_10_13:8;
unsigned timing_10_ODT:3;
+ /* empty: 15 */
+ unsigned timing_10_16:8;
+ /* empty: 17 */
+ unsigned timing_10_18:8;
unsigned timing_10_CWL:8;
+ unsigned timing_10_20:8;
+ unsigned timing_10_21:8;
+ /* empty: 22, 23 */
+ unsigned timing_10_24:8;
};
struct {
unsigned timing_20_2e_03:2;
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/devinit.h b/drivers/gpu/drm/nouveau/core/include/subdev/devinit.h
index e292271a84e..e007a9d4468 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/devinit.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/devinit.h
@@ -30,5 +30,6 @@ extern struct nouveau_oclass *nva3_devinit_oclass;
extern struct nouveau_oclass *nvaf_devinit_oclass;
extern struct nouveau_oclass *nvc0_devinit_oclass;
extern struct nouveau_oclass *gm107_devinit_oclass;
+extern struct nouveau_oclass *gm204_devinit_oclass;
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h b/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
index 1b937c2c25a..d94ccacb40b 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
@@ -8,6 +8,8 @@
#include <subdev/bios/i2c.h>
#define NV_I2C_PORT(n) (0x00 + (n))
+#define NV_I2C_AUX(n) (0x10 + (n))
+#define NV_I2C_EXT(n) (0x20 + (n))
#define NV_I2C_DEFAULT(n) (0x80 + (n))
#define NV_I2C_TYPE_DCBI2C(n) (0x0000 | (n))
@@ -89,6 +91,7 @@ extern struct nouveau_oclass *nv94_i2c_oclass;
extern struct nouveau_oclass *nvd0_i2c_oclass;
extern struct nouveau_oclass *gf117_i2c_oclass;
extern struct nouveau_oclass *nve0_i2c_oclass;
+extern struct nouveau_oclass *gm204_i2c_oclass;
static inline int
nv_rdi2cr(struct nouveau_i2c_port *port, u8 addr, u8 reg)
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/pwr.h b/drivers/gpu/drm/nouveau/core/include/subdev/pwr.h
index bf3d1f61133..f2427bf5aee 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/pwr.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/pwr.h
@@ -48,6 +48,8 @@ void nouveau_memx_wait(struct nouveau_memx *,
u32 addr, u32 mask, u32 data, u32 nsec);
void nouveau_memx_nsec(struct nouveau_memx *, u32 nsec);
void nouveau_memx_wait_vblank(struct nouveau_memx *);
+void nouveau_memx_train(struct nouveau_memx *);
+int nouveau_memx_train_result(struct nouveau_pwr *, u32 *, int);
void nouveau_memx_block(struct nouveau_memx *);
void nouveau_memx_unblock(struct nouveau_memx *);
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/volt.h b/drivers/gpu/drm/nouveau/core/include/subdev/volt.h
index 820b62ffd75..67db5e58880 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/volt.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/volt.h
@@ -52,6 +52,7 @@ int _nouveau_volt_init(struct nouveau_object *);
#define _nouveau_volt_fini _nouveau_subdev_fini
extern struct nouveau_oclass nv40_volt_oclass;
+extern struct nouveau_oclass gk20a_volt_oclass;
int nouveau_voltgpio_init(struct nouveau_volt *);
int nouveau_voltgpio_get(struct nouveau_volt *);
diff --git a/drivers/gpu/drm/nouveau/core/os.h b/drivers/gpu/drm/nouveau/core/os.h
index ccfa21d72dd..bdd05ee7ec7 100644
--- a/drivers/gpu/drm/nouveau/core/os.h
+++ b/drivers/gpu/drm/nouveau/core/os.h
@@ -23,6 +23,7 @@
#include <linux/pm_runtime.h>
#include <linux/power_supply.h>
#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
#include <asm/unaligned.h>
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/M0203.c b/drivers/gpu/drm/nouveau/core/subdev/bios/M0203.c
new file mode 100644
index 00000000000..28906b16d4e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/M0203.c
@@ -0,0 +1,129 @@
+/*
+ * Copyright 2014 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bios/bit.h>
+#include <subdev/bios/M0203.h>
+
+u32
+nvbios_M0203Te(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+{
+ struct bit_entry bit_M;
+ u32 data = 0x00000000;
+
+ if (!bit_entry(bios, 'M', &bit_M)) {
+ if (bit_M.version == 2 && bit_M.length > 0x04)
+ data = nv_ro16(bios, bit_M.offset + 0x03);
+ if (data) {
+ *ver = nv_ro08(bios, data + 0x00);
+ switch (*ver) {
+ case 0x10:
+ *hdr = nv_ro08(bios, data + 0x01);
+ *len = nv_ro08(bios, data + 0x02);
+ *cnt = nv_ro08(bios, data + 0x03);
+ return data;
+ default:
+ break;
+ }
+ }
+ }
+
+ return 0x00000000;
+}
+
+u32
+nvbios_M0203Tp(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_M0203T *info)
+{
+ u32 data = nvbios_M0203Te(bios, ver, hdr, cnt, len);
+ memset(info, 0x00, sizeof(*info));
+ switch (!!data * *ver) {
+ case 0x10:
+ info->type = nv_ro08(bios, data + 0x04);
+ info->pointer = nv_ro16(bios, data + 0x05);
+ break;
+ default:
+ break;
+ }
+ return data;
+}
+
+u32
+nvbios_M0203Ee(struct nouveau_bios *bios, int idx, u8 *ver, u8 *hdr)
+{
+ u8 cnt, len;
+ u32 data = nvbios_M0203Te(bios, ver, hdr, &cnt, &len);
+ if (data && idx < cnt) {
+ data = data + *hdr + idx * len;
+ *hdr = len;
+ return data;
+ }
+ return 0x00000000;
+}
+
+u32
+nvbios_M0203Ep(struct nouveau_bios *bios, int idx, u8 *ver, u8 *hdr,
+ struct nvbios_M0203E *info)
+{
+ u32 data = nvbios_M0203Ee(bios, idx, ver, hdr);
+ memset(info, 0x00, sizeof(*info));
+ switch (!!data * *ver) {
+ case 0x10:
+ info->type = (nv_ro08(bios, data + 0x00) & 0x0f) >> 0;
+ info->strap = (nv_ro08(bios, data + 0x00) & 0xf0) >> 4;
+ info->group = (nv_ro08(bios, data + 0x01) & 0x0f) >> 0;
+ return data;
+ default:
+ break;
+ }
+ return 0x00000000;
+}
+
+u32
+nvbios_M0203Em(struct nouveau_bios *bios, u8 ramcfg, u8 *ver, u8 *hdr,
+ struct nvbios_M0203E *info)
+{
+ struct nvbios_M0203T M0203T;
+ u8 cnt, len, idx = 0xff;
+ u32 data;
+
+ if (!nvbios_M0203Tp(bios, ver, hdr, &cnt, &len, &M0203T)) {
+ nv_warn(bios, "M0203T not found\n");
+ return 0x00000000;
+ }
+
+ while ((data = nvbios_M0203Ep(bios, ++idx, ver, hdr, info))) {
+ switch (M0203T.type) {
+ case M0203T_TYPE_RAMCFG:
+ if (info->strap != ramcfg)
+ continue;
+ return data;
+ default:
+ nv_warn(bios, "M0203T type %02x\n", M0203T.type);
+ return 0x00000000;
+ }
+ }
+
+ return data;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
index d45704a2c2d..7df3a273553 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
@@ -31,6 +31,8 @@
#include <subdev/bios/bmp.h>
#include <subdev/bios/bit.h>
+#include "priv.h"
+
u8
nvbios_checksum(const u8 *data, int size)
{
@@ -56,362 +58,21 @@ nvbios_findstr(const u8 *data, int size, const char *str, int len)
return 0;
}
-#if defined(__powerpc__)
-static void
-nouveau_bios_shadow_of(struct nouveau_bios *bios)
+int
+nvbios_extend(struct nouveau_bios *bios, u32 length)
{
- struct pci_dev *pdev = nv_device(bios)->pdev;
- struct device_node *dn;
- const u32 *data;
- int size;
-
- dn = pci_device_to_OF_node(pdev);
- if (!dn) {
- nv_info(bios, "Unable to get the OF node\n");
- return;
- }
-
- data = of_get_property(dn, "NVDA,BMP", &size);
- if (data && size) {
- bios->size = size;
- bios->data = kmalloc(bios->size, GFP_KERNEL);
- if (bios->data)
- memcpy(bios->data, data, size);
- }
-}
-#endif
-
-static void
-nouveau_bios_shadow_pramin(struct nouveau_bios *bios)
-{
- struct nouveau_device *device = nv_device(bios);
- u64 addr = 0;
- u32 bar0 = 0;
- int i;
-
- if (device->card_type >= NV_50) {
- if (device->card_type >= NV_C0 && device->card_type < GM100) {
- if (nv_rd32(bios, 0x022500) & 0x00000001)
- return;
- } else
- if (device->card_type >= GM100) {
- if (nv_rd32(bios, 0x021c04) & 0x00000001)
- return;
- }
-
- addr = nv_rd32(bios, 0x619f04);
- if (!(addr & 0x00000008)) {
- nv_debug(bios, "... not enabled\n");
- return;
+ if (bios->size < length) {
+ u8 *prev = bios->data;
+ if (!(bios->data = kmalloc(length, GFP_KERNEL))) {
+ bios->data = prev;
+ return -ENOMEM;
}
- if ( (addr & 0x00000003) != 1) {
- nv_debug(bios, "... not in vram\n");
- return;
- }
-
- addr = (addr & 0xffffff00) << 8;
- if (!addr) {
- addr = (u64)nv_rd32(bios, 0x001700) << 16;
- addr += 0xf0000;
- }
-
- bar0 = nv_mask(bios, 0x001700, 0xffffffff, addr >> 16);
- }
-
- /* bail if no rom signature */
- if (nv_rd08(bios, 0x700000) != 0x55 ||
- nv_rd08(bios, 0x700001) != 0xaa)
- goto out;
-
- bios->size = nv_rd08(bios, 0x700002) * 512;
- if (!bios->size)
- goto out;
-
- bios->data = kmalloc(bios->size, GFP_KERNEL);
- if (bios->data) {
- for (i = 0; i < bios->size; i++)
- nv_wo08(bios, i, nv_rd08(bios, 0x700000 + i));
- }
-
-out:
- if (device->card_type >= NV_50)
- nv_wr32(bios, 0x001700, bar0);
-}
-
-static void
-nouveau_bios_shadow_prom(struct nouveau_bios *bios)
-{
- struct nouveau_device *device = nv_device(bios);
- u32 pcireg, access;
- u16 pcir;
- int i;
-
- /* there is no prom on nv4x IGP's */
- if (device->card_type == NV_40 && device->chipset >= 0x4c)
- return;
-
- /* enable access to rom */
- if (device->card_type >= NV_50)
- pcireg = 0x088050;
- else
- pcireg = 0x001850;
- access = nv_mask(bios, pcireg, 0x00000001, 0x00000000);
-
- /* WARNING: PROM accesses should always be 32-bits aligned. Other
- * accesses work on most chipset but do not on Kepler chipsets
- */
-
- /* bail if no rom signature, with a workaround for a PROM reading
- * issue on some chipsets. the first read after a period of
- * inactivity returns the wrong result, so retry the first header
- * byte a few times before giving up as a workaround
- */
- i = 16;
- do {
- u32 data = le32_to_cpu(nv_rd32(bios, 0x300000)) & 0xffff;
- if (data == 0xaa55)
- break;
- } while (i--);
-
- if (!i)
- goto out;
-
- /* read entire bios image to system memory */
- bios->size = (le32_to_cpu(nv_rd32(bios, 0x300000)) >> 16) & 0xff;
- bios->size = bios->size * 512;
- if (!bios->size)
- goto out;
-
- bios->data = kmalloc(bios->size, GFP_KERNEL);
- if (!bios->data)
- goto out;
-
- for (i = 0; i < bios->size; i += 4)
- ((u32 *)bios->data)[i/4] = nv_rd32(bios, 0x300000 + i);
-
- /* check the PCI record header */
- pcir = nv_ro16(bios, 0x0018);
- if (bios->data[pcir + 0] != 'P' ||
- bios->data[pcir + 1] != 'C' ||
- bios->data[pcir + 2] != 'I' ||
- bios->data[pcir + 3] != 'R') {
- bios->size = 0;
- kfree(bios->data);
- }
-
-out:
- /* disable access to rom */
- nv_wr32(bios, pcireg, access);
-}
-
-#if defined(CONFIG_ACPI) && defined(CONFIG_X86)
-int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len);
-bool nouveau_acpi_rom_supported(struct pci_dev *pdev);
-#else
-static inline bool
-nouveau_acpi_rom_supported(struct pci_dev *pdev) {
- return false;
-}
-
-static inline int
-nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len) {
- return -EINVAL;
-}
-#endif
-
-static void
-nouveau_bios_shadow_acpi(struct nouveau_bios *bios)
-{
- struct pci_dev *pdev = nv_device(bios)->pdev;
- int ret, cnt, i;
-
- if (!nouveau_acpi_rom_supported(pdev)) {
- bios->data = NULL;
- return;
- }
-
- bios->size = 0;
- bios->data = kmalloc(4096, GFP_KERNEL);
- if (bios->data) {
- if (nouveau_acpi_get_bios_chunk(bios->data, 0, 4096) == 4096)
- bios->size = bios->data[2] * 512;
- kfree(bios->data);
+ memcpy(bios->data, prev, bios->size);
+ bios->size = length;
+ kfree(prev);
+ return 1;
}
-
- if (!bios->size)
- return;
-
- bios->data = kmalloc(bios->size, GFP_KERNEL);
- if (bios->data) {
- /* disobey the acpi spec - much faster on at least w530 ... */
- ret = nouveau_acpi_get_bios_chunk(bios->data, 0, bios->size);
- if (ret != bios->size ||
- nvbios_checksum(bios->data, bios->size)) {
- /* ... that didn't work, ok, i'll be good now */
- for (i = 0; i < bios->size; i += cnt) {
- cnt = min((bios->size - i), (u32)4096);
- ret = nouveau_acpi_get_bios_chunk(bios->data, i, cnt);
- if (ret != cnt)
- break;
- }
- }
- }
-}
-
-static void
-nouveau_bios_shadow_pci(struct nouveau_bios *bios)
-{
- struct pci_dev *pdev = nv_device(bios)->pdev;
- size_t size;
-
- if (!pci_enable_rom(pdev)) {
- void __iomem *rom = pci_map_rom(pdev, &size);
- if (rom && size) {
- bios->data = kmalloc(size, GFP_KERNEL);
- if (bios->data) {
- memcpy_fromio(bios->data, rom, size);
- bios->size = size;
- }
- }
- if (rom)
- pci_unmap_rom(pdev, rom);
-
- pci_disable_rom(pdev);
- }
-}
-
-static void
-nouveau_bios_shadow_platform(struct nouveau_bios *bios)
-{
- struct pci_dev *pdev = nv_device(bios)->pdev;
- size_t size;
-
- void __iomem *rom = pci_platform_rom(pdev, &size);
- if (rom && size) {
- bios->data = kmalloc(size, GFP_KERNEL);
- if (bios->data) {
- memcpy_fromio(bios->data, rom, size);
- bios->size = size;
- }
- }
-}
-
-static int
-nouveau_bios_score(struct nouveau_bios *bios, const bool writeable)
-{
- if (bios->size < 3 || !bios->data || bios->data[0] != 0x55 ||
- bios->data[1] != 0xAA) {
- nv_info(bios, "... signature not found\n");
- return 0;
- }
-
- if (nvbios_checksum(bios->data,
- min_t(u32, bios->data[2] * 512, bios->size))) {
- nv_info(bios, "... checksum invalid\n");
- /* if a ro image is somewhat bad, it's probably all rubbish */
- return writeable ? 2 : 1;
- }
-
- nv_info(bios, "... appears to be valid\n");
- return 3;
-}
-
-struct methods {
- const char desc[16];
- void (*shadow)(struct nouveau_bios *);
- const bool rw;
- int score;
- u32 size;
- u8 *data;
-};
-
-static int
-nouveau_bios_shadow(struct nouveau_bios *bios)
-{
- struct methods shadow_methods[] = {
-#if defined(__powerpc__)
- { "OpenFirmware", nouveau_bios_shadow_of, true, 0, 0, NULL },
-#endif
- { "PRAMIN", nouveau_bios_shadow_pramin, true, 0, 0, NULL },
- { "PROM", nouveau_bios_shadow_prom, false, 0, 0, NULL },
- { "ACPI", nouveau_bios_shadow_acpi, true, 0, 0, NULL },
- { "PCIROM", nouveau_bios_shadow_pci, true, 0, 0, NULL },
- { "PLATFORM", nouveau_bios_shadow_platform, true, 0, 0, NULL },
- {}
- };
- struct methods *mthd, *best;
- const struct firmware *fw;
- const char *optarg;
- int optlen, ret;
- char *source;
-
- optarg = nouveau_stropt(nv_device(bios)->cfgopt, "NvBios", &optlen);
- source = optarg ? kstrndup(optarg, optlen, GFP_KERNEL) : NULL;
- if (source) {
- /* try to match one of the built-in methods */
- mthd = shadow_methods;
- do {
- if (strcasecmp(source, mthd->desc))
- continue;
- nv_info(bios, "source: %s\n", mthd->desc);
-
- mthd->shadow(bios);
- mthd->score = nouveau_bios_score(bios, mthd->rw);
- if (mthd->score) {
- kfree(source);
- return 0;
- }
- } while ((++mthd)->shadow);
-
- /* attempt to load firmware image */
- ret = request_firmware(&fw, source, &nv_device(bios)->pdev->dev);
- if (ret == 0) {
- bios->size = fw->size;
- bios->data = kmemdup(fw->data, fw->size, GFP_KERNEL);
- release_firmware(fw);
-
- nv_info(bios, "image: %s\n", source);
- if (nouveau_bios_score(bios, 1)) {
- kfree(source);
- return 0;
- }
-
- kfree(bios->data);
- bios->data = NULL;
- }
-
- nv_error(bios, "source \'%s\' invalid\n", source);
- kfree(source);
- }
-
- mthd = shadow_methods;
- do {
- nv_info(bios, "checking %s for image...\n", mthd->desc);
- mthd->shadow(bios);
- mthd->score = nouveau_bios_score(bios, mthd->rw);
- mthd->size = bios->size;
- mthd->data = bios->data;
- bios->data = NULL;
- } while (mthd->score != 3 && (++mthd)->shadow);
-
- mthd = shadow_methods;
- best = mthd;
- do {
- if (mthd->score > best->score) {
- kfree(best->data);
- best = mthd;
- }
- } while ((++mthd)->shadow);
-
- if (best->score) {
- nv_info(bios, "using image from %s\n", best->desc);
- bios->size = best->size;
- bios->data = best->data;
- return 0;
- }
-
- nv_error(bios, "unable to locate usable image\n");
- return -EINVAL;
+ return 0;
}
static u8
@@ -472,7 +133,7 @@ nouveau_bios_ctor(struct nouveau_object *parent,
if (ret)
return ret;
- ret = nouveau_bios_shadow(bios);
+ ret = nvbios_shadow(bios);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c b/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
index bd8d348385b..96099aff8b4 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
@@ -42,7 +42,7 @@ dcb_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
*ver = nv_ro08(bios, dcb);
- if (*ver >= 0x41) {
+ if (*ver >= 0x42) {
nv_warn(bios, "DCB version 0x%02x unknown\n", *ver);
return 0x0000;
} else
@@ -157,17 +157,20 @@ dcb_outp_parse(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len,
break;
}
- switch (conf & 0x0f000000) {
- case 0x0f000000:
- outp->dpconf.link_nr = 4;
- break;
- case 0x03000000:
- outp->dpconf.link_nr = 2;
- break;
- case 0x01000000:
- default:
- outp->dpconf.link_nr = 1;
- break;
+ outp->dpconf.link_nr = (conf & 0x0f000000) >> 24;
+ if (*ver < 0x41) {
+ switch (outp->dpconf.link_nr) {
+ case 0x0f:
+ outp->dpconf.link_nr = 4;
+ break;
+ case 0x03:
+ outp->dpconf.link_nr = 2;
+ break;
+ case 0x01:
+ default:
+ outp->dpconf.link_nr = 1;
+ break;
+ }
}
/* fall-through... */
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/disp.c b/drivers/gpu/drm/nouveau/core/subdev/bios/disp.c
index 7f16e52d9be..51f35559969 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/disp.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/disp.c
@@ -40,6 +40,7 @@ nvbios_disp_table(struct nouveau_bios *bios,
switch (*ver) {
case 0x20:
case 0x21:
+ case 0x22:
*hdr = nv_ro08(bios, data + 0x01);
*len = nv_ro08(bios, data + 0x02);
*cnt = nv_ro08(bios, data + 0x03);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c b/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c
index f309dd65725..cef53f81f12 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c
@@ -41,6 +41,7 @@ nvbios_dp_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
case 0x21:
case 0x30:
case 0x40:
+ case 0x41:
*hdr = nv_ro08(bios, data + 0x01);
*len = nv_ro08(bios, data + 0x02);
*cnt = nv_ro08(bios, data + 0x03);
@@ -70,6 +71,7 @@ nvbios_dpout_entry(struct nouveau_bios *bios, u8 idx,
*cnt = nv_ro08(bios, outp + 0x04);
break;
case 0x40:
+ case 0x41:
*hdr = nv_ro08(bios, data + 0x04);
*cnt = 0;
*len = 0;
@@ -108,6 +110,7 @@ nvbios_dpout_parse(struct nouveau_bios *bios, u8 idx,
info->script[4] = nv_ro16(bios, data + 0x10);
break;
case 0x40:
+ case 0x41:
info->flags = nv_ro08(bios, data + 0x04);
info->script[0] = nv_ro16(bios, data + 0x05);
info->script[1] = nv_ro16(bios, data + 0x07);
@@ -172,10 +175,11 @@ nvbios_dpcfg_parse(struct nouveau_bios *bios, u16 outp, u8 idx,
break;
case 0x30:
case 0x40:
+ case 0x41:
info->pc = nv_ro08(bios, data + 0x00);
info->dc = nv_ro08(bios, data + 0x01);
info->pe = nv_ro08(bios, data + 0x02);
- info->tx_pu = nv_ro08(bios, data + 0x03);
+ info->tx_pu = nv_ro08(bios, data + 0x03) & 0x0f;
break;
default:
data = 0x0000;
@@ -194,6 +198,10 @@ nvbios_dpcfg_match(struct nouveau_bios *bios, u16 outp, u8 pc, u8 vs, u8 pe,
u16 data;
if (*ver >= 0x30) {
+ /*XXX: there's a second set of these on at least 4.1, that
+ * i've witnessed nvidia using instead of the first
+ * on gm204. figure out what/why
+ */
const u8 vsoff[] = { 0, 4, 7, 9 };
idx = (pc * 10) + vsoff[vs] + pe;
} else {
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/extdev.c b/drivers/gpu/drm/nouveau/core/subdev/bios/extdev.c
index b2a676e5358..49285d4f7ca 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/extdev.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/extdev.c
@@ -90,7 +90,7 @@ nvbios_extdev_find(struct nouveau_bios *bios, enum nvbios_extdev_type type,
u16 entry;
i = 0;
- while (!(entry = nvbios_extdev_entry(bios, i++, &ver, &len))) {
+ while ((entry = nvbios_extdev_entry(bios, i++, &ver, &len))) {
extdev_parse_entry(bios, entry, func);
if (func->type == type)
return 0;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/i2c.c b/drivers/gpu/drm/nouveau/core/subdev/bios/i2c.c
index cfb9288c6d2..282320ba926 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/i2c.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/i2c.c
@@ -39,6 +39,11 @@ dcb_i2c_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
i2c = nv_ro16(bios, dcb + 4);
}
+ if (i2c && *ver >= 0x42) {
+ nv_warn(bios, "ccb %02x not supported\n", *ver);
+ return 0x0000;
+ }
+
if (i2c && *ver >= 0x30) {
*ver = nv_ro08(bios, i2c + 0);
*hdr = nv_ro08(bios, i2c + 1);
@@ -70,14 +75,25 @@ dcb_i2c_parse(struct nouveau_bios *bios, u8 idx, struct dcb_i2c_entry *info)
u8 ver, len;
u16 ent = dcb_i2c_entry(bios, idx, &ver, &len);
if (ent) {
- info->type = nv_ro08(bios, ent + 3);
- info->share = DCB_I2C_UNUSED;
- if (ver < 0x30) {
- info->type &= 0x07;
+ if (ver >= 0x41) {
+ if (!(nv_ro32(bios, ent) & 0x80000000))
+ info->type = DCB_I2C_UNUSED;
+ else
+ info->type = DCB_I2C_PMGR;
+ } else
+ if (ver >= 0x30) {
+ info->type = nv_ro08(bios, ent + 0x03);
+ } else {
+ info->type = nv_ro08(bios, ent + 0x03) & 0x07;
if (info->type == 0x07)
info->type = DCB_I2C_UNUSED;
}
+ info->drive = DCB_I2C_UNUSED;
+ info->sense = DCB_I2C_UNUSED;
+ info->share = DCB_I2C_UNUSED;
+ info->auxch = DCB_I2C_UNUSED;
+
switch (info->type) {
case DCB_I2C_NV04_BIT:
info->drive = nv_ro08(bios, ent + 0);
@@ -87,12 +103,23 @@ dcb_i2c_parse(struct nouveau_bios *bios, u8 idx, struct dcb_i2c_entry *info)
info->drive = nv_ro08(bios, ent + 1);
return 0;
case DCB_I2C_NVIO_BIT:
- case DCB_I2C_NVIO_AUX:
info->drive = nv_ro08(bios, ent + 0) & 0x0f;
- if (nv_ro08(bios, ent + 1) & 0x01) {
- info->share = nv_ro08(bios, ent + 1) >> 1;
- info->share &= 0x0f;
- }
+ if (nv_ro08(bios, ent + 1) & 0x01)
+ info->share = nv_ro08(bios, ent + 1) >> 1;
+ return 0;
+ case DCB_I2C_NVIO_AUX:
+ info->auxch = nv_ro08(bios, ent + 0) & 0x0f;
+ if (nv_ro08(bios, ent + 1) & 0x01)
+ info->share = info->auxch;
+ return 0;
+ case DCB_I2C_PMGR:
+ info->drive = (nv_ro16(bios, ent + 0) & 0x01f) >> 0;
+ if (info->drive == 0x1f)
+ info->drive = DCB_I2C_UNUSED;
+ info->auxch = (nv_ro16(bios, ent + 0) & 0x3e0) >> 5;
+ if (info->auxch == 0x1f)
+ info->auxch = DCB_I2C_UNUSED;
+ info->share = info->auxch;
return 0;
case DCB_I2C_UNUSED:
return 0;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/image.c b/drivers/gpu/drm/nouveau/core/subdev/bios/image.c
new file mode 100644
index 00000000000..373f9a564ac
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/image.c
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2014 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bios/image.h>
+#include <subdev/bios/pcir.h>
+#include <subdev/bios/npde.h>
+
+static bool
+nvbios_imagen(struct nouveau_bios *bios, struct nvbios_image *image)
+{
+ struct nvbios_pcirT pcir;
+ struct nvbios_npdeT npde;
+ u8 ver;
+ u16 hdr;
+ u32 data;
+
+ switch ((data = nv_ro16(bios, image->base + 0x00))) {
+ case 0xaa55:
+ case 0xbb77:
+ case 0x4e56: /* NV */
+ break;
+ default:
+ nv_debug(bios, "%08x: ROM signature (%04x) unknown\n",
+ image->base, data);
+ return false;
+ }
+
+ if (!(data = nvbios_pcirTp(bios, image->base, &ver, &hdr, &pcir)))
+ return false;
+ image->size = pcir.image_size;
+ image->type = pcir.image_type;
+ image->last = pcir.last;
+
+ if (image->type != 0x70) {
+ if (!(data = nvbios_npdeTp(bios, image->base, &npde)))
+ return true;
+ image->size = npde.image_size;
+ image->last = npde.last;
+ } else {
+ image->last = true;
+ }
+
+ return true;
+}
+
+bool
+nvbios_image(struct nouveau_bios *bios, int idx, struct nvbios_image *image)
+{
+ memset(image, 0x00, sizeof(*image));
+ do {
+ image->base += image->size;
+ if (image->last || !nvbios_imagen(bios, image))
+ return false;
+ } while(idx--);
+ return true;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
index 626380f9e4c..c6579ef32cd 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
@@ -255,6 +255,8 @@ init_i2c(struct nvbios_init *init, int index)
}
index = init->outp->i2c_index;
+ if (init->outp->type == DCB_OUTPUT_DP)
+ index += NV_I2C_AUX(0);
}
return i2c->find(i2c, index);
@@ -278,7 +280,7 @@ init_wri2cr(struct nvbios_init *init, u8 index, u8 addr, u8 reg, u8 val)
return -ENODEV;
}
-static int
+static u8
init_rdauxr(struct nvbios_init *init, u32 addr)
{
struct nouveau_i2c_port *port = init_i2c(init, -2);
@@ -286,20 +288,24 @@ init_rdauxr(struct nvbios_init *init, u32 addr)
if (port && init_exec(init)) {
int ret = nv_rdaux(port, addr, &data, 1);
- if (ret)
- return ret;
- return data;
+ if (ret == 0)
+ return data;
+ trace("auxch read failed with %d\n", ret);
}
- return -ENODEV;
+ return 0x00;
}
static int
init_wrauxr(struct nvbios_init *init, u32 addr, u8 data)
{
struct nouveau_i2c_port *port = init_i2c(init, -2);
- if (port && init_exec(init))
- return nv_wraux(port, addr, &data, 1);
+ if (port && init_exec(init)) {
+ int ret = nv_wraux(port, addr, &data, 1);
+ if (ret)
+ trace("auxch write failed with %d\n", ret);
+ return ret;
+ }
return -ENODEV;
}
@@ -838,6 +844,40 @@ init_io_or(struct nvbios_init *init)
}
/**
+ * INIT_ANDN_REG - opcode 0x47
+ *
+ */
+static void
+init_andn_reg(struct nvbios_init *init)
+{
+ struct nouveau_bios *bios = init->bios;
+ u32 reg = nv_ro32(bios, init->offset + 1);
+ u32 mask = nv_ro32(bios, init->offset + 5);
+
+ trace("ANDN_REG\tR[0x%06x] &= ~0x%08x\n", reg, mask);
+ init->offset += 9;
+
+ init_mask(init, reg, mask, 0);
+}
+
+/**
+ * INIT_OR_REG - opcode 0x48
+ *
+ */
+static void
+init_or_reg(struct nvbios_init *init)
+{
+ struct nouveau_bios *bios = init->bios;
+ u32 reg = nv_ro32(bios, init->offset + 1);
+ u32 mask = nv_ro32(bios, init->offset + 5);
+
+ trace("OR_REG\tR[0x%06x] |= 0x%08x\n", reg, mask);
+ init->offset += 9;
+
+ init_mask(init, reg, 0, mask);
+}
+
+/**
* INIT_INDEX_ADDRESS_LATCHED - opcode 0x49
*
*/
@@ -2068,6 +2108,8 @@ static struct nvbios_init_opcode {
[0x3a] = { init_dp_condition },
[0x3b] = { init_io_mask_or },
[0x3c] = { init_io_or },
+ [0x47] = { init_andn_reg },
+ [0x48] = { init_or_reg },
[0x49] = { init_idx_addr_latched },
[0x4a] = { init_io_restrict_pll2 },
[0x4b] = { init_pll2 },
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/npde.c b/drivers/gpu/drm/nouveau/core/subdev/bios/npde.c
new file mode 100644
index 00000000000..d694716a166
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/npde.c
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2014 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bios/npde.h>
+#include <subdev/bios/pcir.h>
+
+u32
+nvbios_npdeTe(struct nouveau_bios *bios, u32 base)
+{
+ struct nvbios_pcirT pcir;
+ u8 ver; u16 hdr;
+ u32 data = nvbios_pcirTp(bios, base, &ver, &hdr, &pcir);
+ if (data = (data + hdr + 0x0f) & ~0x0f, data) {
+ switch (nv_ro32(bios, data + 0x00)) {
+ case 0x4544504e: /* NPDE */
+ break;
+ default:
+ nv_debug(bios, "%08x: NPDE signature (%08x) unknown\n",
+ data, nv_ro32(bios, data + 0x00));
+ data = 0;
+ break;
+ }
+ }
+ return data;
+}
+
+u32
+nvbios_npdeTp(struct nouveau_bios *bios, u32 base, struct nvbios_npdeT *info)
+{
+ u32 data = nvbios_npdeTe(bios, base);
+ memset(info, 0x00, sizeof(*info));
+ if (data) {
+ info->image_size = nv_ro16(bios, data + 0x08) * 512;
+ info->last = nv_ro08(bios, data + 0x0a) & 0x80;
+ }
+ return data;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/pcir.c b/drivers/gpu/drm/nouveau/core/subdev/bios/pcir.c
new file mode 100644
index 00000000000..91dae26bc50
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/pcir.c
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2014 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bios/pcir.h>
+
+u32
+nvbios_pcirTe(struct nouveau_bios *bios, u32 base, u8 *ver, u16 *hdr)
+{
+ u32 data = nv_ro16(bios, base + 0x18);
+ if (data) {
+ data += base;
+ switch (nv_ro32(bios, data + 0x00)) {
+ case 0x52494350: /* PCIR */
+ case 0x53494752: /* RGIS */
+ case 0x5344504e: /* NPDS */
+ *hdr = nv_ro16(bios, data + 0x0a);
+ *ver = nv_ro08(bios, data + 0x0c);
+ break;
+ default:
+ nv_debug(bios, "%08x: PCIR signature (%08x) unknown\n",
+ data, nv_ro32(bios, data + 0x00));
+ data = 0;
+ break;
+ }
+ }
+ return data;
+}
+
+u32
+nvbios_pcirTp(struct nouveau_bios *bios, u32 base, u8 *ver, u16 *hdr,
+ struct nvbios_pcirT *info)
+{
+ u32 data = nvbios_pcirTe(bios, base, ver, hdr);
+ memset(info, 0x00, sizeof(*info));
+ if (data) {
+ info->vendor_id = nv_ro16(bios, data + 0x04);
+ info->device_id = nv_ro16(bios, data + 0x06);
+ info->class_code[0] = nv_ro08(bios, data + 0x0d);
+ info->class_code[1] = nv_ro08(bios, data + 0x0e);
+ info->class_code[2] = nv_ro08(bios, data + 0x0f);
+ info->image_size = nv_ro16(bios, data + 0x10) * 512;
+ info->image_rev = nv_ro16(bios, data + 0x12);
+ info->image_type = nv_ro08(bios, data + 0x14);
+ info->last = nv_ro08(bios, data + 0x15) & 0x80;
+ }
+ return data;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/pmu.c b/drivers/gpu/drm/nouveau/core/subdev/bios/pmu.c
new file mode 100644
index 00000000000..66c56ba07d1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/pmu.c
@@ -0,0 +1,135 @@
+/*
+ * Copyright 2014 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bios/bit.h>
+#include <subdev/bios/image.h>
+#include <subdev/bios/pmu.h>
+
+static u32
+weirdo_pointer(struct nouveau_bios *bios, u32 data)
+{
+ struct nvbios_image image;
+ int idx = 0;
+ if (nvbios_image(bios, idx++, &image)) {
+ data -= image.size;
+ while (nvbios_image(bios, idx++, &image)) {
+ if (image.type == 0xe0)
+ return image.base + data;
+ }
+ }
+ return 0;
+}
+
+u32
+nvbios_pmuTe(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+{
+ struct bit_entry bit_p;
+ u32 data = 0;
+
+ if (!bit_entry(bios, 'p', &bit_p)) {
+ if (bit_p.version == 2 && bit_p.length >= 4)
+ data = nv_ro32(bios, bit_p.offset + 0x00);
+ if ((data = weirdo_pointer(bios, data))) {
+ *ver = nv_ro08(bios, data + 0x00); /* maybe? */
+ *hdr = nv_ro08(bios, data + 0x01);
+ *len = nv_ro08(bios, data + 0x02);
+ *cnt = nv_ro08(bios, data + 0x03);
+ }
+ }
+
+ return data;
+}
+
+u32
+nvbios_pmuTp(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_pmuT *info)
+{
+ u32 data = nvbios_pmuTe(bios, ver, hdr, cnt, len);
+ memset(info, 0x00, sizeof(*info));
+ switch (!!data * *ver) {
+ default:
+ break;
+ }
+ return data;
+}
+
+u32
+nvbios_pmuEe(struct nouveau_bios *bios, int idx, u8 *ver, u8 *hdr)
+{
+ u8 cnt, len;
+ u32 data = nvbios_pmuTe(bios, ver, hdr, &cnt, &len);
+ if (data && idx < cnt) {
+ data = data + *hdr + (idx * len);
+ *hdr = len;
+ return data;
+ }
+ return 0;
+}
+
+u32
+nvbios_pmuEp(struct nouveau_bios *bios, int idx, u8 *ver, u8 *hdr,
+ struct nvbios_pmuE *info)
+{
+ u32 data = nvbios_pmuEe(bios, idx, ver, hdr);
+ memset(info, 0x00, sizeof(*info));
+ switch (!!data * *ver) {
+ default:
+ info->type = nv_ro08(bios, data + 0x00);
+ info->data = nv_ro32(bios, data + 0x02);
+ break;
+ }
+ return data;
+}
+
+bool
+nvbios_pmuRm(struct nouveau_bios *bios, u8 type, struct nvbios_pmuR *info)
+{
+ struct nvbios_pmuE pmuE;
+ u8 ver, hdr, idx = 0;
+ u32 data;
+ memset(info, 0x00, sizeof(*info));
+ while ((data = nvbios_pmuEp(bios, idx++, &ver, &hdr, &pmuE))) {
+ if ( pmuE.type == type &&
+ (data = weirdo_pointer(bios, pmuE.data))) {
+ info->init_addr_pmu = nv_ro32(bios, data + 0x08);
+ info->args_addr_pmu = nv_ro32(bios, data + 0x0c);
+ info->boot_addr = data + 0x30;
+ info->boot_addr_pmu = nv_ro32(bios, data + 0x10) +
+ nv_ro32(bios, data + 0x18);
+ info->boot_size = nv_ro32(bios, data + 0x1c) -
+ nv_ro32(bios, data + 0x18);
+ info->code_addr = info->boot_addr + info->boot_size;
+ info->code_addr_pmu = info->boot_addr_pmu +
+ info->boot_size;
+ info->code_size = nv_ro32(bios, data + 0x20);
+ info->data_addr = data + 0x30 +
+ nv_ro32(bios, data + 0x24);
+ info->data_addr_pmu = nv_ro32(bios, data + 0x28);
+ info->data_size = nv_ro32(bios, data + 0x2c);
+ return true;
+ }
+ }
+ return false;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/priv.h b/drivers/gpu/drm/nouveau/core/subdev/bios/priv.h
new file mode 100644
index 00000000000..187d225bd1e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/priv.h
@@ -0,0 +1,25 @@
+#ifndef __NVKM_BIOS_PRIV_H__
+#define __NVKM_BIOS_PRIV_H__
+
+#include <subdev/bios.h>
+
+struct nvbios_source {
+ const char *name;
+ void *(*init)(struct nouveau_bios *, const char *);
+ void (*fini)(void *);
+ u32 (*read)(void *, u32 offset, u32 length, struct nouveau_bios *);
+ bool rw;
+};
+
+int nvbios_extend(struct nouveau_bios *, u32 length);
+int nvbios_shadow(struct nouveau_bios *);
+
+extern const struct nvbios_source nvbios_rom;
+extern const struct nvbios_source nvbios_ramin;
+extern const struct nvbios_source nvbios_acpi_fast;
+extern const struct nvbios_source nvbios_acpi_slow;
+extern const struct nvbios_source nvbios_pcirom;
+extern const struct nvbios_source nvbios_platform;
+extern const struct nvbios_source nvbios_of;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/ramcfg.c b/drivers/gpu/drm/nouveau/core/subdev/bios/ramcfg.c
index 6c401f70ab9..1623c8dfe79 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/ramcfg.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/ramcfg.c
@@ -25,6 +25,7 @@
#include <subdev/bios.h>
#include <subdev/bios/bit.h>
#include <subdev/bios/ramcfg.h>
+#include <subdev/bios/M0203.h>
static u8
nvbios_ramcfg_strap(struct nouveau_subdev *subdev)
@@ -54,12 +55,22 @@ nvbios_ramcfg_index(struct nouveau_subdev *subdev)
u8 strap = nvbios_ramcfg_strap(subdev);
u32 xlat = 0x00000000;
struct bit_entry bit_M;
+ struct nvbios_M0203E M0203E;
+ u8 ver, hdr;
if (!bit_entry(bios, 'M', &bit_M)) {
if (bit_M.version == 1 && bit_M.length >= 5)
xlat = nv_ro16(bios, bit_M.offset + 3);
- if (bit_M.version == 2 && bit_M.length >= 3)
+ if (bit_M.version == 2 && bit_M.length >= 3) {
+ /*XXX: is M ever shorter than this?
+ * if not - what is xlat used for now?
+ * also - sigh..
+ */
+ if (bit_M.length >= 7 &&
+ nvbios_M0203Em(bios, strap, &ver, &hdr, &M0203E))
+ return M0203E.group;
xlat = nv_ro16(bios, bit_M.offset + 1);
+ }
}
if (xlat)
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/rammap.c b/drivers/gpu/drm/nouveau/core/subdev/bios/rammap.c
index 585e69331cc..c5685228c32 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/rammap.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/rammap.c
@@ -162,8 +162,9 @@ nvbios_rammapSp(struct nouveau_bios *bios, u32 data,
p->ramcfg_10_02_08 = (nv_ro08(bios, data + 0x02) & 0x08) >> 3;
p->ramcfg_10_02_10 = (nv_ro08(bios, data + 0x02) & 0x10) >> 4;
p->ramcfg_10_02_20 = (nv_ro08(bios, data + 0x02) & 0x20) >> 5;
- p->ramcfg_10_02_40 = (nv_ro08(bios, data + 0x02) & 0x40) >> 6;
+ p->ramcfg_10_DLLoff = (nv_ro08(bios, data + 0x02) & 0x40) >> 6;
p->ramcfg_10_03_0f = (nv_ro08(bios, data + 0x03) & 0x0f) >> 0;
+ p->ramcfg_10_04_01 = (nv_ro08(bios, data + 0x04) & 0x01) >> 0;
p->ramcfg_10_05 = (nv_ro08(bios, data + 0x05) & 0xff) >> 0;
p->ramcfg_10_06 = (nv_ro08(bios, data + 0x06) & 0xff) >> 0;
p->ramcfg_10_07 = (nv_ro08(bios, data + 0x07) & 0xff) >> 0;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/shadow.c b/drivers/gpu/drm/nouveau/core/subdev/bios/shadow.c
new file mode 100644
index 00000000000..bb9e0018d93
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/shadow.c
@@ -0,0 +1,270 @@
+/*
+ * Copyright 2014 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include "priv.h"
+#include <core/option.h>
+#include <subdev/bios/image.h>
+
+struct shadow {
+ struct nouveau_oclass base;
+ u32 skip;
+ const struct nvbios_source *func;
+ void *data;
+ u32 size;
+ int score;
+};
+
+static bool
+shadow_fetch(struct nouveau_bios *bios, u32 upto)
+{
+ struct shadow *mthd = (void *)nv_object(bios)->oclass;
+ const u32 limit = (upto + 3) & ~3;
+ const u32 start = bios->size;
+ void *data = mthd->data;
+ if (nvbios_extend(bios, limit) > 0) {
+ u32 read = mthd->func->read(data, start, limit - start, bios);
+ bios->size = start + read;
+ }
+ return bios->size >= limit;
+}
+
+static u8
+shadow_rd08(struct nouveau_object *object, u64 addr)
+{
+ struct nouveau_bios *bios = (void *)object;
+ if (shadow_fetch(bios, addr + 1))
+ return bios->data[addr];
+ return 0x00;
+}
+
+static u16
+shadow_rd16(struct nouveau_object *object, u64 addr)
+{
+ struct nouveau_bios *bios = (void *)object;
+ if (shadow_fetch(bios, addr + 2))
+ return get_unaligned_le16(&bios->data[addr]);
+ return 0x0000;
+}
+
+static u32
+shadow_rd32(struct nouveau_object *object, u64 addr)
+{
+ struct nouveau_bios *bios = (void *)object;
+ if (shadow_fetch(bios, addr + 4))
+ return get_unaligned_le32(&bios->data[addr]);
+ return 0x00000000;
+}
+
+static struct nouveau_oclass
+shadow_class = {
+ .handle = NV_SUBDEV(VBIOS, 0x00),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .rd08 = shadow_rd08,
+ .rd16 = shadow_rd16,
+ .rd32 = shadow_rd32,
+ },
+};
+
+static int
+shadow_image(struct nouveau_bios *bios, int idx, struct shadow *mthd)
+{
+ struct nvbios_image image;
+ int score = 1;
+
+ if (!nvbios_image(bios, idx, &image)) {
+ nv_debug(bios, "image %d invalid\n", idx);
+ return 0;
+ }
+ nv_debug(bios, "%08x: type %02x, %d bytes\n",
+ image.base, image.type, image.size);
+
+ if (!shadow_fetch(bios, image.size)) {
+ nv_debug(bios, "%08x: fetch failed\n", image.base);
+ return 0;
+ }
+
+ switch (image.type) {
+ case 0x00:
+ if (nvbios_checksum(&bios->data[image.base], image.size)) {
+ nv_debug(bios, "%08x: checksum failed\n", image.base);
+ if (mthd->func->rw)
+ score += 1;
+ score += 1;
+ } else {
+ score += 3;
+ }
+ break;
+ default:
+ score += 3;
+ break;
+ }
+
+ if (!image.last)
+ score += shadow_image(bios, idx + 1, mthd);
+ return score;
+}
+
+static int
+shadow_score(struct nouveau_bios *bios, struct shadow *mthd)
+{
+ struct nouveau_oclass *oclass = nv_object(bios)->oclass;
+ int score;
+ nv_object(bios)->oclass = &mthd->base;
+ score = shadow_image(bios, 0, mthd);
+ nv_object(bios)->oclass = oclass;
+ return score;
+
+}
+
+static int
+shadow_method(struct nouveau_bios *bios, struct shadow *mthd, const char *name)
+{
+ const struct nvbios_source *func = mthd->func;
+ if (func->name) {
+ nv_debug(bios, "trying %s...\n", name ? name : func->name);
+ if (func->init) {
+ mthd->data = func->init(bios, name);
+ if (IS_ERR(mthd->data)) {
+ mthd->data = NULL;
+ return 0;
+ }
+ }
+ mthd->score = shadow_score(bios, mthd);
+ if (func->fini)
+ func->fini(mthd->data);
+ nv_debug(bios, "scored %d\n", mthd->score);
+ mthd->data = bios->data;
+ mthd->size = bios->size;
+ bios->data = NULL;
+ bios->size = 0;
+ }
+ return mthd->score;
+}
+
+static u32
+shadow_fw_read(void *data, u32 offset, u32 length, struct nouveau_bios *bios)
+{
+ const struct firmware *fw = data;
+ if (offset + length <= fw->size) {
+ memcpy(bios->data + offset, fw->data + offset, length);
+ return length;
+ }
+ return 0;
+}
+
+static void *
+shadow_fw_init(struct nouveau_bios *bios, const char *name)
+{
+ struct device *dev = &nv_device(bios)->pdev->dev;
+ const struct firmware *fw;
+ int ret = request_firmware(&fw, name, dev);
+ if (ret)
+ return ERR_PTR(-ENOENT);
+ return (void *)fw;
+}
+
+static const struct nvbios_source
+shadow_fw = {
+ .name = "firmware",
+ .init = shadow_fw_init,
+ .fini = (void(*)(void *))release_firmware,
+ .read = shadow_fw_read,
+ .rw = false,
+};
+
+int
+nvbios_shadow(struct nouveau_bios *bios)
+{
+ struct shadow mthds[] = {
+ { shadow_class, 0, &nvbios_of },
+ { shadow_class, 0, &nvbios_ramin },
+ { shadow_class, 0, &nvbios_rom },
+ { shadow_class, 0, &nvbios_acpi_fast },
+ { shadow_class, 4, &nvbios_acpi_slow },
+ { shadow_class, 1, &nvbios_pcirom },
+ { shadow_class, 1, &nvbios_platform },
+ { shadow_class }
+ }, *mthd = mthds, *best = NULL;
+ const char *optarg;
+ char *source;
+ int optlen;
+
+ /* handle user-specified bios source */
+ optarg = nouveau_stropt(nv_device(bios)->cfgopt, "NvBios", &optlen);
+ source = optarg ? kstrndup(optarg, optlen, GFP_KERNEL) : NULL;
+ if (source) {
+ /* try to match one of the built-in methods */
+ for (mthd = mthds; mthd->func; mthd++) {
+ if (mthd->func->name &&
+ !strcasecmp(source, mthd->func->name)) {
+ best = mthd;
+ if (shadow_method(bios, mthd, NULL))
+ break;
+ }
+ }
+
+ /* otherwise, attempt to load as firmware */
+ if (!best && (best = mthd)) {
+ mthd->func = &shadow_fw;
+ shadow_method(bios, mthd, source);
+ mthd->func = NULL;
+ }
+
+ if (!best->score) {
+ nv_error(bios, "%s invalid\n", source);
+ kfree(source);
+ source = NULL;
+ }
+ }
+
+ /* scan all potential bios sources, looking for best image */
+ if (!best || !best->score) {
+ for (mthd = mthds, best = mthd; mthd->func; mthd++) {
+ if (!mthd->skip || best->score < mthd->skip) {
+ if (shadow_method(bios, mthd, NULL)) {
+ if (mthd->score > best->score)
+ best = mthd;
+ }
+ }
+ }
+ }
+
+ /* cleanup the ones we didn't use */
+ for (mthd = mthds; mthd->func; mthd++) {
+ if (mthd != best)
+ kfree(mthd->data);
+ }
+
+ if (!best->score) {
+ nv_fatal(bios, "unable to locate usable image\n");
+ return -EINVAL;
+ }
+
+ nv_info(bios, "using image from %s\n", best->func ?
+ best->func->name : source);
+ bios->data = best->data;
+ bios->size = best->size;
+ kfree(source);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/shadowacpi.c b/drivers/gpu/drm/nouveau/core/subdev/bios/shadowacpi.c
new file mode 100644
index 00000000000..bc130c12ec0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/shadowacpi.c
@@ -0,0 +1,111 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "priv.h"
+
+#if defined(CONFIG_ACPI) && defined(CONFIG_X86)
+int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len);
+bool nouveau_acpi_rom_supported(struct pci_dev *pdev);
+#else
+static inline bool
+nouveau_acpi_rom_supported(struct pci_dev *pdev)
+{
+ return false;
+}
+
+static inline int
+nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len)
+{
+ return -EINVAL;
+}
+#endif
+
+/* This version of the shadow function disobeys the ACPI spec and tries
+ * to fetch in units of more than 4KiB at a time. This is a LOT faster
+ * on some systems, such as Lenovo W530.
+ */
+static u32
+acpi_read_fast(void *data, u32 offset, u32 length, struct nouveau_bios *bios)
+{
+ u32 limit = (offset + length + 0xfff) & ~0xfff;
+ u32 start = offset & ~0x00000fff;
+ u32 fetch = limit - start;
+
+ if (nvbios_extend(bios, limit) > 0) {
+ int ret = nouveau_acpi_get_bios_chunk(bios->data, start, fetch);
+ if (ret == fetch)
+ return fetch;
+ }
+
+ return 0;
+}
+
+/* Other systems, such as the one in fdo#55948, will report a success
+ * but only return 4KiB of data. The common bios fetching logic will
+ * detect an invalid image, and fall back to this version of the read
+ * function.
+ */
+static u32
+acpi_read_slow(void *data, u32 offset, u32 length, struct nouveau_bios *bios)
+{
+ u32 limit = (offset + length + 0xfff) & ~0xfff;
+ u32 start = offset & ~0xfff;
+ u32 fetch = 0;
+
+ if (nvbios_extend(bios, limit) > 0) {
+ while (start + fetch < limit) {
+ int ret = nouveau_acpi_get_bios_chunk(bios->data,
+ start + fetch,
+ 0x1000);
+ if (ret != 0x1000)
+ break;
+ fetch += 0x1000;
+ }
+ }
+
+ return fetch;
+}
+
+static void *
+acpi_init(struct nouveau_bios *bios, const char *name)
+{
+ if (!nouveau_acpi_rom_supported(nv_device(bios)->pdev))
+ return ERR_PTR(-ENODEV);
+ return NULL;
+}
+
+const struct nvbios_source
+nvbios_acpi_fast = {
+ .name = "ACPI",
+ .init = acpi_init,
+ .read = acpi_read_fast,
+ .rw = false,
+};
+
+const struct nvbios_source
+nvbios_acpi_slow = {
+ .name = "ACPI",
+ .init = acpi_init,
+ .read = acpi_read_slow,
+ .rw = false,
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/shadowof.c b/drivers/gpu/drm/nouveau/core/subdev/bios/shadowof.c
new file mode 100644
index 00000000000..3abe487a602
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/shadowof.c
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "priv.h"
+
+#if defined(__powerpc__)
+struct priv {
+ const void __iomem *data;
+ int size;
+};
+
+static u32
+of_read(void *data, u32 offset, u32 length, struct nouveau_bios *bios)
+{
+ struct priv *priv = data;
+ if (offset + length <= priv->size) {
+ memcpy_fromio(bios->data + offset, priv->data + offset, length);
+ return length;
+ }
+ return 0;
+}
+
+static void *
+of_init(struct nouveau_bios *bios, const char *name)
+{
+ struct pci_dev *pdev = nv_device(bios)->pdev;
+ struct device_node *dn;
+ struct priv *priv;
+ if (!(dn = pci_device_to_OF_node(pdev)))
+ return ERR_PTR(-ENODEV);
+ if (!(priv = kzalloc(sizeof(*priv), GFP_KERNEL)))
+ return ERR_PTR(-ENOMEM);
+ if ((priv->data = of_get_property(dn, "NVDA,BMP", &priv->size)))
+ return priv;
+ kfree(priv);
+ return ERR_PTR(-EINVAL);
+}
+
+const struct nvbios_source
+nvbios_of = {
+ .name = "OpenFirmware",
+ .init = of_init,
+ .fini = (void(*)(void *))kfree,
+ .read = of_read,
+ .rw = false,
+};
+#else
+const struct nvbios_source
+nvbios_of = {
+};
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/shadowpci.c b/drivers/gpu/drm/nouveau/core/subdev/bios/shadowpci.c
new file mode 100644
index 00000000000..1d0389c0abe
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/shadowpci.c
@@ -0,0 +1,108 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "priv.h"
+
+struct priv {
+ struct pci_dev *pdev;
+ void __iomem *rom;
+ size_t size;
+};
+
+static u32
+pcirom_read(void *data, u32 offset, u32 length, struct nouveau_bios *bios)
+{
+ struct priv *priv = data;
+ if (offset + length <= priv->size) {
+ memcpy_fromio(bios->data + offset, priv->rom + offset, length);
+ return length;
+ }
+ return 0;
+}
+
+static void
+pcirom_fini(void *data)
+{
+ struct priv *priv = data;
+ pci_unmap_rom(priv->pdev, priv->rom);
+ pci_disable_rom(priv->pdev);
+ kfree(priv);
+}
+
+static void *
+pcirom_init(struct nouveau_bios *bios, const char *name)
+{
+ struct pci_dev *pdev = nv_device(bios)->pdev;
+ struct priv *priv = NULL;
+ int ret;
+
+ if (!(ret = pci_enable_rom(pdev))) {
+ if (ret = -ENOMEM,
+ (priv = kmalloc(sizeof(*priv), GFP_KERNEL))) {
+ if (ret = -EFAULT,
+ (priv->rom = pci_map_rom(pdev, &priv->size))) {
+ priv->pdev = pdev;
+ return priv;
+ }
+ kfree(priv);
+ }
+ pci_disable_rom(pdev);
+ }
+
+ return ERR_PTR(ret);
+}
+
+const struct nvbios_source
+nvbios_pcirom = {
+ .name = "PCIROM",
+ .init = pcirom_init,
+ .fini = pcirom_fini,
+ .read = pcirom_read,
+ .rw = true,
+};
+
+static void *
+platform_init(struct nouveau_bios *bios, const char *name)
+{
+ struct pci_dev *pdev = nv_device(bios)->pdev;
+ struct priv *priv;
+ int ret = -ENOMEM;
+
+ if ((priv = kmalloc(sizeof(*priv), GFP_KERNEL))) {
+ if (ret = -ENODEV,
+ (priv->rom = pci_platform_rom(pdev, &priv->size)))
+ return priv;
+ kfree(priv);
+ }
+
+ return ERR_PTR(ret);
+}
+
+const struct nvbios_source
+nvbios_platform = {
+ .name = "PLATFORM",
+ .init = platform_init,
+ .fini = (void(*)(void *))kfree,
+ .read = pcirom_read,
+ .rw = true,
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/shadowramin.c b/drivers/gpu/drm/nouveau/core/subdev/bios/shadowramin.c
new file mode 100644
index 00000000000..5e58bba0dd5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/shadowramin.c
@@ -0,0 +1,112 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "priv.h"
+
+struct priv {
+ struct nouveau_bios *bios;
+ u32 bar0;
+};
+
+static u32
+pramin_read(void *data, u32 offset, u32 length, struct nouveau_bios *bios)
+{
+ u32 i;
+ if (offset + length <= 0x00100000) {
+ for (i = offset; i < offset + length; i += 4)
+ *(u32 *)&bios->data[i] = nv_rd32(bios, 0x700000 + i);
+ return length;
+ }
+ return 0;
+}
+
+static void
+pramin_fini(void *data)
+{
+ struct priv *priv = data;
+ nv_wr32(priv->bios, 0x001700, priv->bar0);
+ kfree(priv);
+}
+
+static void *
+pramin_init(struct nouveau_bios *bios, const char *name)
+{
+ struct priv *priv = NULL;
+ u64 addr = 0;
+
+ /* PRAMIN always potentially available prior to nv50 */
+ if (nv_device(bios)->card_type < NV_50)
+ return NULL;
+
+ /* we can't get the bios image pointer without PDISP */
+ if (nv_device(bios)->card_type >= GM100)
+ addr = nv_rd32(bios, 0x021c04);
+ else
+ if (nv_device(bios)->card_type >= NV_C0)
+ addr = nv_rd32(bios, 0x022500);
+ if (addr & 0x00000001) {
+ nv_debug(bios, "... display disabled\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ /* check that the window is enabled and in vram, particularly
+ * important as we don't want to be touching vram on an
+ * uninitialised board
+ */
+ addr = nv_rd32(bios, 0x619f04);
+ if (!(addr & 0x00000008)) {
+ nv_debug(bios, "... not enabled\n");
+ return ERR_PTR(-ENODEV);
+ }
+ if ( (addr & 0x00000003) != 1) {
+ nv_debug(bios, "... not in vram\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ /* some alternate method inherited from xf86-video-nv... */
+ addr = (addr & 0xffffff00) << 8;
+ if (!addr) {
+ addr = (u64)nv_rd32(bios, 0x001700) << 16;
+ addr += 0xf0000;
+ }
+
+ /* modify bar0 PRAMIN window to cover the bios image */
+ if (!(priv = kmalloc(sizeof(*priv), GFP_KERNEL))) {
+ nv_error(bios, "... out of memory\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ priv->bios = bios;
+ priv->bar0 = nv_rd32(bios, 0x001700);
+ nv_wr32(bios, 0x001700, addr >> 16);
+ return priv;
+}
+
+const struct nvbios_source
+nvbios_ramin = {
+ .name = "PRAMIN",
+ .init = pramin_init,
+ .fini = pramin_fini,
+ .read = pramin_read,
+ .rw = true,
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/shadowrom.c b/drivers/gpu/drm/nouveau/core/subdev/bios/shadowrom.c
new file mode 100644
index 00000000000..b7992bc3ffa
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/shadowrom.c
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "priv.h"
+
+static u32
+prom_read(void *data, u32 offset, u32 length, struct nouveau_bios *bios)
+{
+ u32 i;
+ if (offset + length <= 0x00100000) {
+ for (i = offset; i < offset + length; i += 4)
+ *(u32 *)&bios->data[i] = nv_rd32(bios, 0x300000 + i);
+ return length;
+ }
+ return 0;
+}
+
+static void
+prom_fini(void *data)
+{
+ struct nouveau_bios *bios = data;
+ if (nv_device(bios)->card_type < NV_50)
+ nv_mask(bios, 0x001850, 0x00000001, 0x00000001);
+ else
+ nv_mask(bios, 0x088050, 0x00000001, 0x00000001);
+}
+
+static void *
+prom_init(struct nouveau_bios *bios, const char *name)
+{
+ if (nv_device(bios)->card_type < NV_50) {
+ if (nv_device(bios)->card_type == NV_40 &&
+ nv_device(bios)->chipset >= 0x4c)
+ return ERR_PTR(-ENODEV);
+ nv_mask(bios, 0x001850, 0x00000001, 0x00000000);
+ } else {
+ nv_mask(bios, 0x088050, 0x00000001, 0x00000000);
+ }
+ return bios;
+}
+
+const struct nvbios_source
+nvbios_rom = {
+ .name = "PROM",
+ .init = prom_init,
+ .fini = prom_fini,
+ .read = prom_read,
+ .rw = false,
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/timing.c b/drivers/gpu/drm/nouveau/core/subdev/bios/timing.c
index 46d955eb51e..8521eca1ed9 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/timing.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/timing.c
@@ -93,10 +93,44 @@ nvbios_timingEp(struct nouveau_bios *bios, int idx,
p->timing_hdr = *hdr;
switch (!!data * *ver) {
case 0x10:
- p->timing_10_WR = nv_ro08(bios, data + 0x00);
- p->timing_10_CL = nv_ro08(bios, data + 0x02);
- p->timing_10_ODT = nv_ro08(bios, data + 0x0e) & 0x07;
- p->timing_10_CWL = nv_ro08(bios, data + 0x13);
+ p->timing_10_WR = nv_ro08(bios, data + 0x00);
+ p->timing_10_WTR = nv_ro08(bios, data + 0x01);
+ p->timing_10_CL = nv_ro08(bios, data + 0x02);
+ p->timing_10_RC = nv_ro08(bios, data + 0x03);
+ p->timing_10_RFC = nv_ro08(bios, data + 0x05);
+ p->timing_10_RAS = nv_ro08(bios, data + 0x07);
+ p->timing_10_RP = nv_ro08(bios, data + 0x09);
+ p->timing_10_RCDRD = nv_ro08(bios, data + 0x0a);
+ p->timing_10_RCDWR = nv_ro08(bios, data + 0x0b);
+ p->timing_10_RRD = nv_ro08(bios, data + 0x0c);
+ p->timing_10_13 = nv_ro08(bios, data + 0x0d);
+ p->timing_10_ODT = nv_ro08(bios, data + 0x0e) & 0x07;
+
+ p->timing_10_24 = 0xff;
+ p->timing_10_21 = 0;
+ p->timing_10_20 = 0;
+ p->timing_10_CWL = 0;
+ p->timing_10_18 = 0;
+ p->timing_10_16 = 0;
+
+ switch (min_t(u8, *hdr, 25)) {
+ case 25:
+ p->timing_10_24 = nv_ro08(bios, data + 0x18);
+ case 24:
+ case 23:
+ case 22:
+ p->timing_10_21 = nv_ro08(bios, data + 0x15);
+ case 21:
+ p->timing_10_20 = nv_ro08(bios, data + 0x14);
+ case 20:
+ p->timing_10_CWL = nv_ro08(bios, data + 0x13);
+ case 19:
+ p->timing_10_18 = nv_ro08(bios, data + 0x12);
+ case 18:
+ case 17:
+ p->timing_10_16 = nv_ro08(bios, data + 0x10);
+ }
+
break;
case 0x20:
p->timing[0] = nv_ro32(bios, data + 0x00);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/gk20a.c b/drivers/gpu/drm/nouveau/core/subdev/clock/gk20a.c
index 425a8d5e912..fb4fad374bd 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/gk20a.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/gk20a.c
@@ -109,7 +109,7 @@ struct gk20a_clk_pllg_params {
};
static const struct gk20a_clk_pllg_params gk20a_pllg_params = {
- .min_vco = 1000, .max_vco = 1700,
+ .min_vco = 1000, .max_vco = 2064,
.min_u = 12, .max_u = 38,
.min_m = 1, .max_m = 255,
.min_n = 8, .max_n = 255,
@@ -470,76 +470,91 @@ gk20a_pstates[] = {
{
.base = {
.domain[nv_clk_src_gpc] = 72000,
+ .voltage = 0,
},
},
{
.base = {
.domain[nv_clk_src_gpc] = 108000,
+ .voltage = 1,
},
},
{
.base = {
.domain[nv_clk_src_gpc] = 180000,
+ .voltage = 2,
},
},
{
.base = {
.domain[nv_clk_src_gpc] = 252000,
+ .voltage = 3,
},
},
{
.base = {
.domain[nv_clk_src_gpc] = 324000,
+ .voltage = 4,
},
},
{
.base = {
.domain[nv_clk_src_gpc] = 396000,
+ .voltage = 5,
},
},
{
.base = {
.domain[nv_clk_src_gpc] = 468000,
+ .voltage = 6,
},
},
{
.base = {
.domain[nv_clk_src_gpc] = 540000,
+ .voltage = 7,
},
},
{
.base = {
.domain[nv_clk_src_gpc] = 612000,
+ .voltage = 8,
},
},
{
.base = {
.domain[nv_clk_src_gpc] = 648000,
+ .voltage = 9,
},
},
{
.base = {
.domain[nv_clk_src_gpc] = 684000,
+ .voltage = 10,
},
},
{
.base = {
.domain[nv_clk_src_gpc] = 708000,
+ .voltage = 11,
},
},
{
.base = {
.domain[nv_clk_src_gpc] = 756000,
+ .voltage = 12,
},
},
{
.base = {
.domain[nv_clk_src_gpc] = 804000,
+ .voltage = 13,
},
},
{
.base = {
.domain[nv_clk_src_gpc] = 852000,
+ .voltage = 14,
},
},
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c
index 094551d8ad9..07ad0124767 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c
@@ -510,7 +510,7 @@ nva3_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
int ret;
ret = nouveau_clock_create(parent, engine, oclass, nva3_domain, NULL, 0,
- false, &priv);
+ true, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/base.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/base.c
index 239acfe876c..0e45cee8246 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/base.c
@@ -24,8 +24,6 @@
#include <core/option.h>
-#include <subdev/bios.h>
-#include <subdev/bios/init.h>
#include <subdev/vga.h>
#include "priv.h"
@@ -56,7 +54,7 @@ _nouveau_devinit_init(struct nouveau_object *object)
if (ret)
return ret;
- ret = nvbios_init(&devinit->base, devinit->post);
+ ret = impl->post(&devinit->base, devinit->post);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/gm107.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/gm107.c
index c69bc7f54e3..4ba43d6a1ec 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/gm107.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/gm107.c
@@ -24,7 +24,7 @@
#include "nv50.h"
-static u64
+u64
gm107_devinit_disable(struct nouveau_devinit *devinit)
{
struct nv50_devinit_priv *priv = (void *)devinit;
@@ -53,4 +53,5 @@ gm107_devinit_oclass = &(struct nouveau_devinit_impl) {
},
.pll_set = nvc0_devinit_pll_set,
.disable = gm107_devinit_disable,
+ .post = nvbios_init,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/gm204.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/gm204.c
new file mode 100644
index 00000000000..e44a86662a2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/gm204.c
@@ -0,0 +1,173 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bios/bit.h>
+#include <subdev/bios/pmu.h>
+
+#include "nv50.h"
+
+static void
+pmu_code(struct nv50_devinit_priv *priv, u32 pmu, u32 img, u32 len, bool sec)
+{
+ struct nouveau_bios *bios = nouveau_bios(priv);
+ int i;
+
+ nv_wr32(priv, 0x10a180, 0x01000000 | (sec ? 0x10000000 : 0) | pmu);
+ for (i = 0; i < len; i += 4) {
+ if ((i & 0xff) == 0)
+ nv_wr32(priv, 0x10a188, (pmu + i) >> 8);
+ nv_wr32(priv, 0x10a184, nv_ro32(bios, img + i));
+ }
+
+ while (i & 0xff) {
+ nv_wr32(priv, 0x10a184, 0x00000000);
+ i += 4;
+ }
+}
+
+static void
+pmu_data(struct nv50_devinit_priv *priv, u32 pmu, u32 img, u32 len)
+{
+ struct nouveau_bios *bios = nouveau_bios(priv);
+ int i;
+
+ nv_wr32(priv, 0x10a1c0, 0x01000000 | pmu);
+ for (i = 0; i < len; i += 4)
+ nv_wr32(priv, 0x10a1c4, nv_ro32(bios, img + i));
+}
+
+static u32
+pmu_args(struct nv50_devinit_priv *priv, u32 argp, u32 argi)
+{
+ nv_wr32(priv, 0x10a1c0, argp);
+ nv_wr32(priv, 0x10a1c0, nv_rd32(priv, 0x10a1c4) + argi);
+ return nv_rd32(priv, 0x10a1c4);
+}
+
+static void
+pmu_exec(struct nv50_devinit_priv *priv, u32 init_addr)
+{
+ nv_wr32(priv, 0x10a104, init_addr);
+ nv_wr32(priv, 0x10a10c, 0x00000000);
+ nv_wr32(priv, 0x10a100, 0x00000002);
+}
+
+static int
+pmu_load(struct nv50_devinit_priv *priv, u8 type, bool post,
+ u32 *init_addr_pmu, u32 *args_addr_pmu)
+{
+ struct nouveau_bios *bios = nouveau_bios(priv);
+ struct nvbios_pmuR pmu;
+
+ if (!nvbios_pmuRm(bios, type, &pmu)) {
+ nv_error(priv, "VBIOS PMU fuc %02x not found\n", type);
+ return -EINVAL;
+ }
+
+ if (!post)
+ return 0;
+
+ pmu_code(priv, pmu.boot_addr_pmu, pmu.boot_addr, pmu.boot_size, false);
+ pmu_code(priv, pmu.code_addr_pmu, pmu.code_addr, pmu.code_size, true);
+ pmu_data(priv, pmu.data_addr_pmu, pmu.data_addr, pmu.data_size);
+
+ if (init_addr_pmu) {
+ *init_addr_pmu = pmu.init_addr_pmu;
+ *args_addr_pmu = pmu.args_addr_pmu;
+ return 0;
+ }
+
+ return pmu_exec(priv, pmu.init_addr_pmu), 0;
+}
+
+static int
+gm204_devinit_post(struct nouveau_subdev *subdev, bool post)
+{
+ struct nv50_devinit_priv *priv = (void *)nouveau_devinit(subdev);
+ struct nouveau_bios *bios = nouveau_bios(priv);
+ struct bit_entry bit_I;
+ u32 init, args;
+ int ret;
+
+ if (bit_entry(bios, 'I', &bit_I) || bit_I.version != 1 ||
+ bit_I.length < 0x1c) {
+ nv_error(priv, "VBIOS PMU init data not found\n");
+ return -EINVAL;
+ }
+
+ /* reset PMU and load init table parser ucode */
+ if (post) {
+ nv_mask(priv, 0x000200, 0x00002000, 0x00000000);
+ nv_mask(priv, 0x000200, 0x00002000, 0x00002000);
+ nv_rd32(priv, 0x000200);
+ while (nv_rd32(priv, 0x10a10c) & 0x00000006) {
+ }
+ }
+
+ ret = pmu_load(priv, 0x04, post, &init, &args);
+ if (ret)
+ return ret;
+
+ /* upload first chunk of init data */
+ if (post) {
+ u32 pmu = pmu_args(priv, args + 0x08, 0x08);
+ u32 img = nv_ro16(bios, bit_I.offset + 0x14);
+ u32 len = nv_ro16(bios, bit_I.offset + 0x16);
+ pmu_data(priv, pmu, img, len);
+ }
+
+ /* upload second chunk of init data */
+ if (post) {
+ u32 pmu = pmu_args(priv, args + 0x08, 0x10);
+ u32 img = nv_ro16(bios, bit_I.offset + 0x18);
+ u32 len = nv_ro16(bios, bit_I.offset + 0x1a);
+ pmu_data(priv, pmu, img, len);
+ }
+
+ /* execute init tables */
+ if (post) {
+ nv_wr32(priv, 0x10a040, 0x00005000);
+ pmu_exec(priv, init);
+ while (!(nv_rd32(priv, 0x10a040) & 0x00002000)) {
+ }
+ }
+
+ /* load and execute some other ucode image (bios therm?) */
+ return pmu_load(priv, 0x01, post, NULL, NULL);
+}
+
+struct nouveau_oclass *
+gm204_devinit_oclass = &(struct nouveau_devinit_impl) {
+ .base.handle = NV_SUBDEV(DEVINIT, 0x07),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv50_devinit_ctor,
+ .dtor = _nouveau_devinit_dtor,
+ .init = nv50_devinit_init,
+ .fini = _nouveau_devinit_fini,
+ },
+ .pll_set = nvc0_devinit_pll_set,
+ .disable = gm107_devinit_disable,
+ .post = gm204_devinit_post,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c
index 052ad690b46..65651c50f6e 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c
@@ -464,4 +464,5 @@ nv04_devinit_oclass = &(struct nouveau_devinit_impl) {
},
.meminit = nv04_devinit_meminit,
.pll_set = nv04_devinit_pll_set,
+ .post = nvbios_init,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv05.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv05.c
index 4a19c10e517..a2007a3efc4 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv05.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv05.c
@@ -136,4 +136,5 @@ nv05_devinit_oclass = &(struct nouveau_devinit_impl) {
},
.meminit = nv05_devinit_meminit,
.pll_set = nv04_devinit_pll_set,
+ .post = nvbios_init,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c
index 3b8d657da27..178b46f79b5 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c
@@ -107,4 +107,5 @@ nv10_devinit_oclass = &(struct nouveau_devinit_impl) {
},
.meminit = nv10_devinit_meminit,
.pll_set = nv04_devinit_pll_set,
+ .post = nvbios_init,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv1a.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv1a.c
index 526d0c6faac..995dd97af3e 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv1a.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv1a.c
@@ -34,4 +34,5 @@ nv1a_devinit_oclass = &(struct nouveau_devinit_impl) {
.fini = nv04_devinit_fini,
},
.pll_set = nv04_devinit_pll_set,
+ .post = nvbios_init,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv20.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv20.c
index 04bc9732644..915089fb46f 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv20.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv20.c
@@ -71,4 +71,5 @@ nv20_devinit_oclass = &(struct nouveau_devinit_impl) {
},
.meminit = nv20_devinit_meminit,
.pll_set = nv04_devinit_pll_set,
+ .post = nvbios_init,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c
index b46c62a1d5d..968334d1dca 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c
@@ -26,6 +26,7 @@
#include <subdev/bios/dcb.h>
#include <subdev/bios/disp.h>
#include <subdev/bios/init.h>
+#include <subdev/ibus.h>
#include <subdev/vga.h>
#include "nv50.h"
@@ -91,6 +92,7 @@ int
nv50_devinit_init(struct nouveau_object *object)
{
struct nouveau_bios *bios = nouveau_bios(object);
+ struct nouveau_ibus *ibus = nouveau_ibus(object);
struct nv50_devinit_priv *priv = (void *)object;
struct nvbios_outp info;
struct dcb_output outp;
@@ -105,6 +107,13 @@ nv50_devinit_init(struct nouveau_object *object)
}
}
+ /* some boards appear to require certain priv register timeouts
+ * to be bumped before runing devinit scripts. not a clue why
+ * the vbios engineers didn't make the scripts just work...
+ */
+ if (priv->base.post && ibus)
+ nv_ofuncs(ibus)->init(nv_object(ibus));
+
ret = nouveau_devinit_init(&priv->base);
if (ret)
return ret;
@@ -160,4 +169,5 @@ nv50_devinit_oclass = &(struct nouveau_devinit_impl) {
},
.pll_set = nv50_devinit_pll_set,
.disable = nv50_devinit_disable,
+ .post = nvbios_init,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.h b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.h
index 51d5076333e..f412bb7f780 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.h
@@ -18,4 +18,6 @@ int nva3_devinit_pll_set(struct nouveau_devinit *, u32, u32);
int nvc0_devinit_pll_set(struct nouveau_devinit *, u32, u32);
+u64 gm107_devinit_disable(struct nouveau_devinit *);
+
#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv84.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv84.c
index 787422505d8..a7c80ded77c 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv84.c
@@ -60,4 +60,5 @@ nv84_devinit_oclass = &(struct nouveau_devinit_impl) {
},
.pll_set = nv50_devinit_pll_set,
.disable = nv84_devinit_disable,
+ .post = nvbios_init,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv98.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv98.c
index 2b0e963fc6f..a773253a17f 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv98.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv98.c
@@ -59,4 +59,5 @@ nv98_devinit_oclass = &(struct nouveau_devinit_impl) {
},
.pll_set = nv50_devinit_pll_set,
.disable = nv98_devinit_disable,
+ .post = nvbios_init,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nva3.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nva3.c
index 006cf348bda..b9cd9e53f76 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nva3.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nva3.c
@@ -142,4 +142,5 @@ nva3_devinit_oclass = &(struct nouveau_devinit_impl) {
.pll_set = nva3_devinit_pll_set,
.disable = nva3_devinit_disable,
.mmio = nva3_devinit_mmio,
+ .post = nvbios_init,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nvaf.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nvaf.c
index 4fc68d27eff..3729846a8e5 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nvaf.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nvaf.c
@@ -60,4 +60,5 @@ nvaf_devinit_oclass = &(struct nouveau_devinit_impl) {
},
.pll_set = nva3_devinit_pll_set,
.disable = nvaf_devinit_disable,
+ .post = nvbios_init,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nvc0.c
index 30c765747ee..80bd7f5eda3 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nvc0.c
@@ -115,4 +115,5 @@ nvc0_devinit_oclass = &(struct nouveau_devinit_impl) {
},
.pll_set = nvc0_devinit_pll_set,
.disable = nvc0_devinit_disable,
+ .post = nvbios_init,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/priv.h b/drivers/gpu/drm/nouveau/core/subdev/devinit/priv.h
index f0e8683ad84..cbcd5185247 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/priv.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/priv.h
@@ -3,6 +3,7 @@
#include <subdev/bios.h>
#include <subdev/bios/pll.h>
+#include <subdev/bios/init.h>
#include <subdev/clock/pll.h>
#include <subdev/devinit.h>
@@ -12,6 +13,7 @@ struct nouveau_devinit_impl {
int (*pll_set)(struct nouveau_devinit *, u32 type, u32 freq);
u64 (*disable)(struct nouveau_devinit *);
u32 (*mmio)(struct nouveau_devinit *, u32);
+ int (*post)(struct nouveau_subdev *, bool);
};
#define nouveau_devinit_create(p,e,o,d) \
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/base.c b/drivers/gpu/drm/nouveau/core/subdev/fb/base.c
index f009d8a39d9..c866148c440 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/base.c
@@ -23,37 +23,30 @@
*/
#include <subdev/bios.h>
-#include <subdev/bios/bit.h>
+#include <subdev/bios/M0203.h>
#include "priv.h"
int
nouveau_fb_bios_memtype(struct nouveau_bios *bios)
{
- struct bit_entry M;
- u8 ramcfg;
-
- ramcfg = (nv_rd32(bios, 0x101000) & 0x0000003c) >> 2;
- if (!bit_entry(bios, 'M', &M) && M.version == 2 && M.length >= 5) {
- u16 table = nv_ro16(bios, M.offset + 3);
- u8 version = nv_ro08(bios, table + 0);
- u8 header = nv_ro08(bios, table + 1);
- u8 record = nv_ro08(bios, table + 2);
- u8 entries = nv_ro08(bios, table + 3);
- if (table && version == 0x10 && ramcfg < entries) {
- u16 entry = table + header + (ramcfg * record);
- switch (nv_ro08(bios, entry) & 0x0f) {
- case 0: return NV_MEM_TYPE_DDR2;
- case 1: return NV_MEM_TYPE_DDR3;
- case 2: return NV_MEM_TYPE_GDDR3;
- case 3: return NV_MEM_TYPE_GDDR5;
- default:
- break;
- }
-
+ const u8 ramcfg = (nv_rd32(bios, 0x101000) & 0x0000003c) >> 2;
+ struct nvbios_M0203E M0203E;
+ u8 ver, hdr;
+
+ if (nvbios_M0203Em(bios, ramcfg, &ver, &hdr, &M0203E)) {
+ switch (M0203E.type) {
+ case M0203E_TYPE_DDR2 : return NV_MEM_TYPE_DDR2;
+ case M0203E_TYPE_DDR3 : return NV_MEM_TYPE_DDR3;
+ case M0203E_TYPE_GDDR3: return NV_MEM_TYPE_GDDR3;
+ case M0203E_TYPE_GDDR5: return NV_MEM_TYPE_GDDR5;
+ default:
+ nv_warn(bios, "M0203E type %02x\n", M0203E.type);
+ return NV_MEM_TYPE_UNKNOWN;
}
}
+ nv_warn(bios, "M0203E not matched!\n");
return NV_MEM_TYPE_UNKNOWN;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/gddr3.c b/drivers/gpu/drm/nouveau/core/subdev/fb/gddr3.c
new file mode 100644
index 00000000000..d85a25d027e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/gddr3.c
@@ -0,0 +1,117 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ * Roy Spliet <rspliet@eclipso.eu>
+ */
+
+#include <subdev/bios.h>
+#include "priv.h"
+
+struct ramxlat {
+ int id;
+ u8 enc;
+};
+
+static inline int
+ramxlat(const struct ramxlat *xlat, int id)
+{
+ while (xlat->id >= 0) {
+ if (xlat->id == id)
+ return xlat->enc;
+ xlat++;
+ }
+ return -EINVAL;
+}
+
+static const struct ramxlat
+ramgddr3_cl_lo[] = {
+ { 7, 7 }, { 8, 0 }, { 9, 1 }, { 10, 2 }, { 11, 3 },
+ /* the below are mentioned in some, but not all, gddr3 docs */
+ { 12, 4 }, { 13, 5 }, { 14, 6 },
+ /* XXX: Per Samsung docs, are these used? They overlap with Qimonda */
+ /* { 4, 4 }, { 5, 5 }, { 6, 6 }, { 12, 8 }, { 13, 9 }, { 14, 10 },
+ * { 15, 11 }, */
+ { -1 }
+};
+
+static const struct ramxlat
+ramgddr3_cl_hi[] = {
+ { 10, 2 }, { 11, 3 }, { 12, 4 }, { 13, 5 }, { 14, 6 }, { 15, 7 },
+ { 16, 0 }, { 17, 1 },
+ { -1 }
+};
+
+static const struct ramxlat
+ramgddr3_wr_lo[] = {
+ { 5, 2 }, { 7, 4 }, { 8, 5 }, { 9, 6 }, { 10, 7 },
+ { 11, 0 },
+ /* the below are mentioned in some, but not all, gddr3 docs */
+ { 4, 1 }, { 6, 3 }, { 12, 1 }, { 13 , 2 },
+ { -1 }
+};
+
+int
+nouveau_gddr3_calc(struct nouveau_ram *ram)
+{
+ int CL, WR, CWL, DLL = 0, ODT = 0, hi;
+
+ switch (ram->next->bios.timing_ver) {
+ case 0x10:
+ CWL = ram->next->bios.timing_10_CWL;
+ CL = ram->next->bios.timing_10_CL;
+ WR = ram->next->bios.timing_10_WR;
+ DLL = !ram->next->bios.ramcfg_10_DLLoff;
+ ODT = ram->next->bios.timing_10_ODT;
+ break;
+ case 0x20:
+ CWL = (ram->next->bios.timing[1] & 0x00000f80) >> 7;
+ CL = (ram->next->bios.timing[1] & 0x0000001f) >> 0;
+ WR = (ram->next->bios.timing[2] & 0x007f0000) >> 16;
+ /* XXX: Get these values from the VBIOS instead */
+ DLL = !(ram->mr[1] & 0x1);
+ ODT = (ram->mr[1] & 0x004) >> 2 |
+ (ram->mr[1] & 0x040) >> 5 |
+ (ram->mr[1] & 0x200) >> 7;
+ break;
+ default:
+ return -ENOSYS;
+ }
+
+ hi = ram->mr[2] & 0x1;
+ CL = ramxlat(hi ? ramgddr3_cl_hi : ramgddr3_cl_lo, CL);
+ WR = ramxlat(ramgddr3_wr_lo, WR);
+ if (CL < 0 || CWL < 1 || CWL > 7 || WR < 0)
+ return -EINVAL;
+
+ ram->mr[0] &= ~0xf74;
+ ram->mr[0] |= (CWL & 0x07) << 9;
+ ram->mr[0] |= (CL & 0x07) << 4;
+ ram->mr[0] |= (CL & 0x08) >> 1;
+
+ ram->mr[1] &= ~0x3fc;
+ ram->mr[1] |= (ODT & 0x03) << 2;
+ ram->mr[1] |= (ODT & 0x03) << 8;
+ ram->mr[1] |= (WR & 0x03) << 4;
+ ram->mr[1] |= (WR & 0x04) << 5;
+ ram->mr[1] |= !DLL << 6;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h
index 60322e906dd..283863f7aa9 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h
@@ -37,6 +37,7 @@ extern struct nouveau_oclass gm107_ram_oclass;
int nouveau_sddr2_calc(struct nouveau_ram *ram);
int nouveau_sddr3_calc(struct nouveau_ram *ram);
+int nouveau_gddr3_calc(struct nouveau_ram *ram);
int nouveau_gddr5_calc(struct nouveau_ram *ram, bool nuts);
#define nouveau_fb_create(p,e,c,d) \
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramfuc.h b/drivers/gpu/drm/nouveau/core/subdev/fb/ramfuc.h
index d1fbbe4b00a..0ac7256443b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramfuc.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramfuc.h
@@ -141,6 +141,20 @@ ramfuc_wait_vblank(struct ramfuc *ram)
}
static inline void
+ramfuc_train(struct ramfuc *ram)
+{
+ nouveau_memx_train(ram->memx);
+}
+
+static inline int
+ramfuc_train_result(struct nouveau_fb *pfb, u32 *result, u32 rsize)
+{
+ struct nouveau_pwr *ppwr = nouveau_pwr(pfb);
+
+ return nouveau_memx_train_result(ppwr, result, rsize);
+}
+
+static inline void
ramfuc_block(struct ramfuc *ram)
{
nouveau_memx_block(ram->memx);
@@ -162,6 +176,8 @@ ramfuc_unblock(struct ramfuc *ram)
#define ram_wait(s,r,m,d,n) ramfuc_wait(&(s)->base, (r), (m), (d), (n))
#define ram_nsec(s,n) ramfuc_nsec(&(s)->base, (n))
#define ram_wait_vblank(s) ramfuc_wait_vblank(&(s)->base)
+#define ram_train(s) ramfuc_train(&(s)->base)
+#define ram_train_result(s,r,l) ramfuc_train_result((s), (r), (l))
#define ram_block(s) ramfuc_block(&(s)->base)
#define ram_unblock(s) ramfuc_unblock(&(s)->base)
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnva3.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnva3.c
index 3601deca0bd..3b38a538845 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnva3.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnva3.c
@@ -20,86 +20,512 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
+ * Roy Spliet <rspliet@eclipso.eu>
*/
#include <subdev/bios.h>
#include <subdev/bios/bit.h>
#include <subdev/bios/pll.h>
#include <subdev/bios/rammap.h>
+#include <subdev/bios/M0205.h>
#include <subdev/bios/timing.h>
#include <subdev/clock/nva3.h>
#include <subdev/clock/pll.h>
+#include <subdev/gpio.h>
+
+#include <subdev/timer.h>
+
+#include <engine/fifo.h>
+
#include <core/option.h>
#include "ramfuc.h"
#include "nv50.h"
+/* XXX: Remove when memx gains GPIO support */
+extern int nv50_gpio_location(int line, u32 *reg, u32 *shift);
+
struct nva3_ramfuc {
struct ramfuc base;
+ struct ramfuc_reg r_0x001610;
+ struct ramfuc_reg r_0x001700;
+ struct ramfuc_reg r_0x002504;
struct ramfuc_reg r_0x004000;
struct ramfuc_reg r_0x004004;
struct ramfuc_reg r_0x004018;
struct ramfuc_reg r_0x004128;
struct ramfuc_reg r_0x004168;
+ struct ramfuc_reg r_0x100080;
struct ramfuc_reg r_0x100200;
struct ramfuc_reg r_0x100210;
struct ramfuc_reg r_0x100220[9];
+ struct ramfuc_reg r_0x100264;
struct ramfuc_reg r_0x1002d0;
struct ramfuc_reg r_0x1002d4;
struct ramfuc_reg r_0x1002dc;
struct ramfuc_reg r_0x10053c;
struct ramfuc_reg r_0x1005a0;
struct ramfuc_reg r_0x1005a4;
+ struct ramfuc_reg r_0x100700;
struct ramfuc_reg r_0x100714;
struct ramfuc_reg r_0x100718;
struct ramfuc_reg r_0x10071c;
+ struct ramfuc_reg r_0x100720;
struct ramfuc_reg r_0x100760;
struct ramfuc_reg r_0x1007a0;
struct ramfuc_reg r_0x1007e0;
+ struct ramfuc_reg r_0x100da0;
struct ramfuc_reg r_0x10f804;
struct ramfuc_reg r_0x1110e0;
struct ramfuc_reg r_0x111100;
struct ramfuc_reg r_0x111104;
+ struct ramfuc_reg r_0x1111e0;
+ struct ramfuc_reg r_0x111400;
struct ramfuc_reg r_0x611200;
struct ramfuc_reg r_mr[4];
+ struct ramfuc_reg r_gpioFBVREF;
+};
+
+struct nva3_ltrain {
+ enum {
+ NVA3_TRAIN_UNKNOWN,
+ NVA3_TRAIN_UNSUPPORTED,
+ NVA3_TRAIN_ONCE,
+ NVA3_TRAIN_EXEC,
+ NVA3_TRAIN_DONE
+ } state;
+ u32 r_100720;
+ u32 r_1111e0;
+ u32 r_111400;
+ struct nouveau_mem *mem;
};
struct nva3_ram {
struct nouveau_ram base;
struct nva3_ramfuc fuc;
+ struct nva3_ltrain ltrain;
};
+void
+nva3_link_train_calc(u32 *vals, struct nva3_ltrain *train)
+{
+ int i, lo, hi;
+ u8 median[8], bins[4] = {0, 0, 0, 0}, bin = 0, qty = 0;
+
+ for (i = 0; i < 8; i++) {
+ for (lo = 0; lo < 0x40; lo++) {
+ if (!(vals[lo] & 0x80000000))
+ continue;
+ if (vals[lo] & (0x101 << i))
+ break;
+ }
+
+ if (lo == 0x40)
+ return;
+
+ for (hi = lo + 1; hi < 0x40; hi++) {
+ if (!(vals[lo] & 0x80000000))
+ continue;
+ if (!(vals[hi] & (0x101 << i))) {
+ hi--;
+ break;
+ }
+ }
+
+ median[i] = ((hi - lo) >> 1) + lo;
+ bins[(median[i] & 0xf0) >> 4]++;
+ median[i] += 0x30;
+ }
+
+ /* Find the best value for 0x1111e0 */
+ for (i = 0; i < 4; i++) {
+ if (bins[i] > qty) {
+ bin = i + 3;
+ qty = bins[i];
+ }
+ }
+
+ train->r_100720 = 0;
+ for (i = 0; i < 8; i++) {
+ median[i] = max(median[i], (u8) (bin << 4));
+ median[i] = min(median[i], (u8) ((bin << 4) | 0xf));
+
+ train->r_100720 |= ((median[i] & 0x0f) << (i << 2));
+ }
+
+ train->r_1111e0 = 0x02000000 | (bin * 0x101);
+ train->r_111400 = 0x0;
+}
+
+/*
+ * Link training for (at least) DDR3
+ */
+int
+nva3_link_train(struct nouveau_fb *pfb)
+{
+ struct nouveau_bios *bios = nouveau_bios(pfb);
+ struct nva3_ram *ram = (void *)pfb->ram;
+ struct nouveau_clock *clk = nouveau_clock(pfb);
+ struct nva3_ltrain *train = &ram->ltrain;
+ struct nouveau_device *device = nv_device(pfb);
+ struct nva3_ramfuc *fuc = &ram->fuc;
+ u32 *result, r1700;
+ int ret, i;
+ struct nvbios_M0205T M0205T = { 0 };
+ u8 ver, hdr, cnt, len, snr, ssz;
+ unsigned int clk_current;
+ unsigned long flags;
+ unsigned long *f = &flags;
+
+ if (nouveau_boolopt(device->cfgopt, "NvMemExec", true) != true)
+ return -ENOSYS;
+
+ /* XXX: Multiple partitions? */
+ result = kmalloc(64 * sizeof(u32), GFP_KERNEL);
+ if (!result)
+ return -ENOMEM;
+
+ train->state = NVA3_TRAIN_EXEC;
+
+ /* Clock speeds for training and back */
+ nvbios_M0205Tp(bios, &ver, &hdr, &cnt, &len, &snr, &ssz, &M0205T);
+ if (M0205T.freq == 0)
+ return -ENOENT;
+
+ clk_current = clk->read(clk, nv_clk_src_mem);
+
+ ret = nva3_clock_pre(clk, f);
+ if (ret)
+ goto out;
+
+ /* First: clock up/down */
+ ret = ram->base.calc(pfb, (u32) M0205T.freq * 1000);
+ if (ret)
+ goto out;
+
+ /* Do this *after* calc, eliminates write in script */
+ nv_wr32(pfb, 0x111400, 0x00000000);
+ /* XXX: Magic writes that improve train reliability? */
+ nv_mask(pfb, 0x100674, 0x0000ffff, 0x00000000);
+ nv_mask(pfb, 0x1005e4, 0x0000ffff, 0x00000000);
+ nv_mask(pfb, 0x100b0c, 0x000000ff, 0x00000000);
+ nv_wr32(pfb, 0x100c04, 0x00000400);
+
+ /* Now the training script */
+ r1700 = ram_rd32(fuc, 0x001700);
+
+ ram_mask(fuc, 0x100200, 0x00000800, 0x00000000);
+ ram_wr32(fuc, 0x611200, 0x3300);
+ ram_wait_vblank(fuc);
+ ram_wait(fuc, 0x611200, 0x00000003, 0x00000000, 500000);
+ ram_mask(fuc, 0x001610, 0x00000083, 0x00000003);
+ ram_mask(fuc, 0x100080, 0x00000020, 0x00000000);
+ ram_mask(fuc, 0x10f804, 0x80000000, 0x00000000);
+ ram_wr32(fuc, 0x001700, 0x00000000);
+
+ ram_train(fuc);
+
+ /* Reset */
+ ram_mask(fuc, 0x10f804, 0x80000000, 0x80000000);
+ ram_wr32(fuc, 0x10053c, 0x0);
+ ram_wr32(fuc, 0x100720, train->r_100720);
+ ram_wr32(fuc, 0x1111e0, train->r_1111e0);
+ ram_wr32(fuc, 0x111400, train->r_111400);
+ ram_nuke(fuc, 0x100080);
+ ram_mask(fuc, 0x100080, 0x00000020, 0x00000020);
+ ram_nsec(fuc, 1000);
+
+ ram_wr32(fuc, 0x001700, r1700);
+ ram_mask(fuc, 0x001610, 0x00000083, 0x00000080);
+ ram_wr32(fuc, 0x611200, 0x3330);
+ ram_mask(fuc, 0x100200, 0x00000800, 0x00000800);
+
+ ram_exec(fuc, true);
+
+ ram->base.calc(pfb, clk_current);
+ ram_exec(fuc, true);
+
+ /* Post-processing, avoids flicker */
+ nv_mask(pfb, 0x616308, 0x10, 0x10);
+ nv_mask(pfb, 0x616b08, 0x10, 0x10);
+
+ nva3_clock_post(clk, f);
+
+ ram_train_result(pfb, result, 64);
+ for (i = 0; i < 64; i++)
+ nv_debug(pfb, "Train: %08x", result[i]);
+ nva3_link_train_calc(result, train);
+
+ nv_debug(pfb, "Train: %08x %08x %08x", train->r_100720,
+ train->r_1111e0, train->r_111400);
+
+ kfree(result);
+
+ train->state = NVA3_TRAIN_DONE;
+
+ return ret;
+
+out:
+ if(ret == -EBUSY)
+ f = NULL;
+
+ train->state = NVA3_TRAIN_UNSUPPORTED;
+
+ nva3_clock_post(clk, f);
+ return ret;
+}
+
+int
+nva3_link_train_init(struct nouveau_fb *pfb)
+{
+ static const u32 pattern[16] = {
+ 0xaaaaaaaa, 0xcccccccc, 0xdddddddd, 0xeeeeeeee,
+ 0x00000000, 0x11111111, 0x44444444, 0xdddddddd,
+ 0x33333333, 0x55555555, 0x77777777, 0x66666666,
+ 0x99999999, 0x88888888, 0xeeeeeeee, 0xbbbbbbbb,
+ };
+ struct nouveau_bios *bios = nouveau_bios(pfb);
+ struct nva3_ram *ram = (void *)pfb->ram;
+ struct nva3_ltrain *train = &ram->ltrain;
+ struct nouveau_mem *mem;
+ struct nvbios_M0205E M0205E;
+ u8 ver, hdr, cnt, len;
+ u32 r001700;
+ int ret, i = 0;
+
+ train->state = NVA3_TRAIN_UNSUPPORTED;
+
+ /* We support type "5"
+ * XXX: training pattern table appears to be unused for this routine */
+ if (!nvbios_M0205Ep(bios, i, &ver, &hdr, &cnt, &len, &M0205E))
+ return -ENOENT;
+
+ if (M0205E.type != 5)
+ return 0;
+
+ train->state = NVA3_TRAIN_ONCE;
+
+ ret = pfb->ram->get(pfb, 0x8000, 0x10000, 0, 0x800, &ram->ltrain.mem);
+ if (ret)
+ return ret;
+
+ mem = ram->ltrain.mem;
+
+ nv_wr32(pfb, 0x100538, 0x10000000 | (mem->offset >> 16));
+ nv_wr32(pfb, 0x1005a8, 0x0000ffff);
+ nv_mask(pfb, 0x10f800, 0x00000001, 0x00000001);
+
+ for (i = 0; i < 0x30; i++) {
+ nv_wr32(pfb, 0x10f8c0, (i << 8) | i);
+ nv_wr32(pfb, 0x10f900, pattern[i % 16]);
+ }
+
+ for (i = 0; i < 0x30; i++) {
+ nv_wr32(pfb, 0x10f8e0, (i << 8) | i);
+ nv_wr32(pfb, 0x10f920, pattern[i % 16]);
+ }
+
+ /* And upload the pattern */
+ r001700 = nv_rd32(pfb, 0x1700);
+ nv_wr32(pfb, 0x1700, mem->offset >> 16);
+ for (i = 0; i < 16; i++)
+ nv_wr32(pfb, 0x700000 + (i << 2), pattern[i]);
+ for (i = 0; i < 16; i++)
+ nv_wr32(pfb, 0x700100 + (i << 2), pattern[i]);
+ nv_wr32(pfb, 0x1700, r001700);
+
+ train->r_100720 = nv_rd32(pfb, 0x100720);
+ train->r_1111e0 = nv_rd32(pfb, 0x1111e0);
+ train->r_111400 = nv_rd32(pfb, 0x111400);
+
+ return 0;
+}
+
+void
+nva3_link_train_fini(struct nouveau_fb *pfb)
+{
+ struct nva3_ram *ram = (void *)pfb->ram;
+
+ if (ram->ltrain.mem)
+ pfb->ram->put(pfb, &ram->ltrain.mem);
+}
+
+/*
+ * RAM reclocking
+ */
+#define T(t) cfg->timing_10_##t
+static int
+nva3_ram_timing_calc(struct nouveau_fb *pfb, u32 *timing)
+{
+ struct nva3_ram *ram = (void *)pfb->ram;
+ struct nvbios_ramcfg *cfg = &ram->base.target.bios;
+ int tUNK_base, tUNK_40_0, prevCL;
+ u32 cur2, cur3, cur7, cur8;
+
+ cur2 = nv_rd32(pfb, 0x100228);
+ cur3 = nv_rd32(pfb, 0x10022c);
+ cur7 = nv_rd32(pfb, 0x10023c);
+ cur8 = nv_rd32(pfb, 0x100240);
+
+
+ switch ((!T(CWL)) * ram->base.type) {
+ case NV_MEM_TYPE_DDR2:
+ T(CWL) = T(CL) - 1;
+ break;
+ case NV_MEM_TYPE_GDDR3:
+ T(CWL) = ((cur2 & 0xff000000) >> 24) + 1;
+ break;
+ }
+
+ prevCL = (cur3 & 0x000000ff) + 1;
+ tUNK_base = ((cur7 & 0x00ff0000) >> 16) - prevCL;
+
+ timing[0] = (T(RP) << 24 | T(RAS) << 16 | T(RFC) << 8 | T(RC));
+ timing[1] = (T(WR) + 1 + T(CWL)) << 24 |
+ max_t(u8,T(18), 1) << 16 |
+ (T(WTR) + 1 + T(CWL)) << 8 |
+ (5 + T(CL) - T(CWL));
+ timing[2] = (T(CWL) - 1) << 24 |
+ (T(RRD) << 16) |
+ (T(RCDWR) << 8) |
+ T(RCDRD);
+ timing[3] = (cur3 & 0x00ff0000) |
+ (0x30 + T(CL)) << 24 |
+ (0xb + T(CL)) << 8 |
+ (T(CL) - 1);
+ timing[4] = T(20) << 24 |
+ T(21) << 16 |
+ T(13) << 8 |
+ T(13);
+ timing[5] = T(RFC) << 24 |
+ max_t(u8,T(RCDRD), T(RCDWR)) << 16 |
+ max_t(u8, (T(CWL) + 6), (T(CL) + 2)) << 8 |
+ T(RP);
+ timing[6] = (0x5a + T(CL)) << 16 |
+ max_t(u8, 1, (6 - T(CL) + T(CWL))) << 8 |
+ (0x50 + T(CL) - T(CWL));
+ timing[7] = (cur7 & 0xff000000) |
+ ((tUNK_base + T(CL)) << 16) |
+ 0x202;
+ timing[8] = cur8 & 0xffffff00;
+
+ switch (ram->base.type) {
+ case NV_MEM_TYPE_DDR2:
+ case NV_MEM_TYPE_GDDR3:
+ tUNK_40_0 = prevCL - (cur8 & 0xff);
+ if (tUNK_40_0 > 0)
+ timing[8] |= T(CL);
+ break;
+ default:
+ break;
+ }
+
+ nv_debug(pfb, "Entry: 220: %08x %08x %08x %08x\n",
+ timing[0], timing[1], timing[2], timing[3]);
+ nv_debug(pfb, " 230: %08x %08x %08x %08x\n",
+ timing[4], timing[5], timing[6], timing[7]);
+ nv_debug(pfb, " 240: %08x\n", timing[8]);
+ return 0;
+}
+#undef T
+
+static void
+nouveau_sddr2_dll_reset(struct nva3_ramfuc *fuc)
+{
+ ram_mask(fuc, mr[0], 0x100, 0x100);
+ ram_nsec(fuc, 1000);
+ ram_mask(fuc, mr[0], 0x100, 0x000);
+ ram_nsec(fuc, 1000);
+}
+
+static void
+nouveau_sddr3_dll_disable(struct nva3_ramfuc *fuc, u32 *mr)
+{
+ u32 mr1_old = ram_rd32(fuc, mr[1]);
+
+ if (!(mr1_old & 0x1)) {
+ ram_wr32(fuc, 0x1002d4, 0x00000001);
+ ram_wr32(fuc, mr[1], mr[1]);
+ ram_nsec(fuc, 1000);
+ }
+}
+
+static void
+nouveau_gddr3_dll_disable(struct nva3_ramfuc *fuc, u32 *mr)
+{
+ u32 mr1_old = ram_rd32(fuc, mr[1]);
+
+ if (!(mr1_old & 0x40)) {
+ ram_wr32(fuc, mr[1], mr[1]);
+ ram_nsec(fuc, 1000);
+ }
+}
+
+static void
+nva3_ram_lock_pll(struct nva3_ramfuc *fuc, struct nva3_clock_info *mclk)
+{
+ ram_wr32(fuc, 0x004004, mclk->pll);
+ ram_mask(fuc, 0x004000, 0x00000001, 0x00000001);
+ ram_mask(fuc, 0x004000, 0x00000010, 0x00000000);
+ ram_wait(fuc, 0x004000, 0x00020000, 0x00020000, 64000);
+ ram_mask(fuc, 0x004000, 0x00000010, 0x00000010);
+}
+
+static void
+nva3_ram_fbvref(struct nva3_ramfuc *fuc, u32 val)
+{
+ struct nouveau_gpio *gpio = nouveau_gpio(fuc->base.pfb);
+ struct dcb_gpio_func func;
+ u32 reg, sh, gpio_val;
+ int ret;
+
+ if (gpio->get(gpio, 0, 0x2e, DCB_GPIO_UNUSED) != val) {
+ ret = gpio->find(gpio, 0, 0x2e, DCB_GPIO_UNUSED, &func);
+ if (ret)
+ return;
+
+ nv50_gpio_location(func.line, &reg, &sh);
+ gpio_val = ram_rd32(fuc, gpioFBVREF);
+ if (gpio_val & (8 << sh))
+ val = !val;
+
+ ram_mask(fuc, gpioFBVREF, (0x3 << sh), ((val | 0x2) << sh));
+ ram_nsec(fuc, 20000);
+ }
+}
+
static int
nva3_ram_calc(struct nouveau_fb *pfb, u32 freq)
{
struct nouveau_bios *bios = nouveau_bios(pfb);
struct nva3_ram *ram = (void *)pfb->ram;
struct nva3_ramfuc *fuc = &ram->fuc;
+ struct nva3_ltrain *train = &ram->ltrain;
struct nva3_clock_info mclk;
struct nouveau_ram_data *next;
u8 ver, hdr, cnt, len, strap;
u32 data;
- u32 r004018, r100760, ctrl;
+ u32 r004018, r100760, r100da0, r111100, ctrl;
u32 unk714, unk718, unk71c;
int ret, i;
+ u32 timing[9];
+ bool pll2pll;
next = &ram->base.target;
next->freq = freq;
ram->base.next = next;
+ if (ram->ltrain.state == NVA3_TRAIN_ONCE)
+ nva3_link_train(pfb);
+
/* lookup memory config data relevant to the target frequency */
i = 0;
- while ((data = nvbios_rammapEp(bios, i++, &ver, &hdr, &cnt, &len,
- &next->bios))) {
- if (freq / 1000 >= next->bios.rammap_min &&
- freq / 1000 <= next->bios.rammap_max)
- break;
- }
-
- if (!data || ver != 0x10 || hdr < 0x0e) {
+ data = nvbios_rammapEm(bios, freq / 1000, &ver, &hdr, &cnt, &len,
+ &next->bios);
+ if (!data || ver != 0x10 || hdr < 0x05) {
nv_error(pfb, "invalid/missing rammap entry\n");
return -EINVAL;
}
@@ -113,7 +539,7 @@ nva3_ram_calc(struct nouveau_fb *pfb, u32 freq)
data = nvbios_rammapSp(bios, data, ver, hdr, cnt, len, strap,
&ver, &hdr, &next->bios);
- if (!data || ver != 0x10 || hdr < 0x0e) {
+ if (!data || ver != 0x10 || hdr < 0x09) {
nv_error(pfb, "invalid/missing ramcfg entry\n");
return -EINVAL;
}
@@ -123,7 +549,7 @@ nva3_ram_calc(struct nouveau_fb *pfb, u32 freq)
data = nvbios_timingEp(bios, next->bios.ramcfg_timing,
&ver, &hdr, &cnt, &len,
&next->bios);
- if (!data || ver != 0x10 || hdr < 0x19) {
+ if (!data || ver != 0x10 || hdr < 0x17) {
nv_error(pfb, "invalid/missing timing entry\n");
return -EINVAL;
}
@@ -135,53 +561,99 @@ nva3_ram_calc(struct nouveau_fb *pfb, u32 freq)
return ret;
}
+ nva3_ram_timing_calc(pfb, timing);
+
ret = ram_init(fuc, pfb);
if (ret)
return ret;
+ /* Determine ram-specific MR values */
+ ram->base.mr[0] = ram_rd32(fuc, mr[0]);
+ ram->base.mr[1] = ram_rd32(fuc, mr[1]);
+ ram->base.mr[2] = ram_rd32(fuc, mr[2]);
+
+ switch (ram->base.type) {
+ case NV_MEM_TYPE_DDR2:
+ ret = nouveau_sddr2_calc(&ram->base);
+ break;
+ case NV_MEM_TYPE_DDR3:
+ ret = nouveau_sddr3_calc(&ram->base);
+ break;
+ case NV_MEM_TYPE_GDDR3:
+ ret = nouveau_gddr3_calc(&ram->base);
+ break;
+ default:
+ ret = -ENOSYS;
+ break;
+ }
+
+ if (ret)
+ return ret;
+
/* XXX: where the fuck does 750MHz come from? */
if (freq <= 750000) {
r004018 = 0x10000000;
r100760 = 0x22222222;
+ r100da0 = 0x00000010;
} else {
r004018 = 0x00000000;
r100760 = 0x00000000;
+ r100da0 = 0x00000000;
}
+ if (!next->bios.ramcfg_10_DLLoff)
+ r004018 |= 0x00004000;
+
+ /* pll2pll requires to switch to a safe clock first */
ctrl = ram_rd32(fuc, 0x004000);
- if (ctrl & 0x00000008) {
- if (mclk.pll) {
- ram_mask(fuc, 0x004128, 0x00000101, 0x00000101);
- ram_wr32(fuc, 0x004004, mclk.pll);
- ram_wr32(fuc, 0x004000, (ctrl |= 0x00000001));
- ram_wr32(fuc, 0x004000, (ctrl &= 0xffffffef));
- ram_wait(fuc, 0x004000, 0x00020000, 0x00020000, 64000);
- ram_wr32(fuc, 0x004000, (ctrl |= 0x00000010));
- ram_wr32(fuc, 0x004018, 0x00005000 | r004018);
- ram_wr32(fuc, 0x004000, (ctrl |= 0x00000004));
- }
- } else {
- u32 ssel = 0x00000101;
- if (mclk.clk)
- ssel |= mclk.clk;
- else
- ssel |= 0x00080000; /* 324MHz, shouldn't matter... */
- ram_mask(fuc, 0x004168, 0x003f3141, ctrl);
- }
+ pll2pll = (!(ctrl & 0x00000008)) && mclk.pll;
+ /* Pre, NVIDIA does this outside the script */
if (next->bios.ramcfg_10_02_10) {
ram_mask(fuc, 0x111104, 0x00000600, 0x00000000);
} else {
ram_mask(fuc, 0x111100, 0x40000000, 0x40000000);
ram_mask(fuc, 0x111104, 0x00000180, 0x00000000);
}
+ /* Always disable this bit during reclock */
+ ram_mask(fuc, 0x100200, 0x00000800, 0x00000000);
+
+ /* If switching from non-pll to pll, lock before disabling FB */
+ if (mclk.pll && !pll2pll) {
+ ram_mask(fuc, 0x004128, 0x003f3141, mclk.clk | 0x00000101);
+ nva3_ram_lock_pll(fuc, &mclk);
+ }
+
+ /* Start with disabling some CRTCs and PFIFO? */
+ ram_wait_vblank(fuc);
+ ram_wr32(fuc, 0x611200, 0x3300);
+ ram_mask(fuc, 0x002504, 0x1, 0x1);
+ ram_nsec(fuc, 10000);
+ ram_wait(fuc, 0x002504, 0x10, 0x10, 20000); /* XXX: or longer? */
+ ram_block(fuc);
+ ram_nsec(fuc, 2000);
+
+ if (!next->bios.ramcfg_10_02_10) {
+ if (ram->base.type == NV_MEM_TYPE_GDDR3)
+ ram_mask(fuc, 0x111100, 0x04020000, 0x00020000);
+ else
+ ram_mask(fuc, 0x111100, 0x04020000, 0x04020000);
+ }
+
+ /* If we're disabling the DLL, do it now */
+ switch (next->bios.ramcfg_10_DLLoff * ram->base.type) {
+ case NV_MEM_TYPE_DDR3:
+ nouveau_sddr3_dll_disable(fuc, ram->base.mr);
+ break;
+ case NV_MEM_TYPE_GDDR3:
+ nouveau_gddr3_dll_disable(fuc, ram->base.mr);
+ break;
+ }
- if (!next->bios.rammap_10_04_02)
- ram_mask(fuc, 0x100200, 0x00000800, 0x00000000);
- ram_wr32(fuc, 0x611200, 0x00003300);
- if (!next->bios.ramcfg_10_02_10)
- ram_wr32(fuc, 0x111100, 0x4c020000); /*XXX*/
+ if (fuc->r_gpioFBVREF.addr && next->bios.timing_10_ODT)
+ nva3_ram_fbvref(fuc, 0);
+ /* Brace RAM for impact */
ram_wr32(fuc, 0x1002d4, 0x00000001);
ram_wr32(fuc, 0x1002d0, 0x00000001);
ram_wr32(fuc, 0x1002d0, 0x00000001);
@@ -189,24 +661,38 @@ nva3_ram_calc(struct nouveau_fb *pfb, u32 freq)
ram_wr32(fuc, 0x1002dc, 0x00000001);
ram_nsec(fuc, 2000);
- ctrl = ram_rd32(fuc, 0x004000);
- if (!(ctrl & 0x00000008) && mclk.pll) {
- ram_wr32(fuc, 0x004000, (ctrl |= 0x00000008));
+ if (nv_device(pfb)->chipset == 0xa3 && freq <= 500000)
+ ram_mask(fuc, 0x100700, 0x00000006, 0x00000006);
+
+ /* Fiddle with clocks */
+ /* There's 4 scenario's
+ * pll->pll: first switch to a 324MHz clock, set up new PLL, switch
+ * clk->pll: Set up new PLL, switch
+ * pll->clk: Set up clock, switch
+ * clk->clk: Overwrite ctrl and other bits, switch */
+
+ /* Switch to regular clock - 324MHz */
+ if (pll2pll) {
+ ram_mask(fuc, 0x004000, 0x00000004, 0x00000004);
+ ram_mask(fuc, 0x004168, 0x003f3141, 0x00083101);
+ ram_mask(fuc, 0x004000, 0x00000008, 0x00000008);
ram_mask(fuc, 0x1110e0, 0x00088000, 0x00088000);
ram_wr32(fuc, 0x004018, 0x00001000);
- ram_wr32(fuc, 0x004000, (ctrl &= ~0x00000001));
- ram_wr32(fuc, 0x004004, mclk.pll);
- ram_wr32(fuc, 0x004000, (ctrl |= 0x00000001));
- udelay(64);
- ram_wr32(fuc, 0x004018, 0x00005000 | r004018);
- udelay(20);
- } else
- if (!mclk.pll) {
- ram_mask(fuc, 0x004168, 0x003f3040, mclk.clk);
- ram_wr32(fuc, 0x004000, (ctrl |= 0x00000008));
+ nva3_ram_lock_pll(fuc, &mclk);
+ }
+
+ if (mclk.pll) {
+ ram_mask(fuc, 0x004000, 0x00000105, 0x00000105);
+ ram_wr32(fuc, 0x004018, 0x00001000 | r004018);
+ ram_wr32(fuc, 0x100da0, r100da0);
+ } else {
+ ram_mask(fuc, 0x004168, 0x003f3141, mclk.clk | 0x00000101);
+ ram_mask(fuc, 0x004000, 0x00000108, 0x00000008);
ram_mask(fuc, 0x1110e0, 0x00088000, 0x00088000);
- ram_wr32(fuc, 0x004018, 0x0000d000 | r004018);
+ ram_wr32(fuc, 0x004018, 0x00009000 | r004018);
+ ram_wr32(fuc, 0x100da0, r100da0);
}
+ ram_nsec(fuc, 20000);
if (next->bios.rammap_10_04_08) {
ram_wr32(fuc, 0x1005a0, next->bios.ramcfg_10_06 << 16 |
@@ -220,6 +706,12 @@ nva3_ram_calc(struct nouveau_fb *pfb, u32 freq)
0x80000000);
ram_mask(fuc, 0x10053c, 0x00001000, 0x00000000);
} else {
+ if (train->state == NVA3_TRAIN_DONE) {
+ ram_wr32(fuc, 0x100080, 0x1020);
+ ram_mask(fuc, 0x111400, 0xffffffff, train->r_111400);
+ ram_mask(fuc, 0x1111e0, 0xffffffff, train->r_1111e0);
+ ram_mask(fuc, 0x100720, 0xffffffff, train->r_100720);
+ }
ram_mask(fuc, 0x10053c, 0x00001000, 0x00001000);
ram_mask(fuc, 0x10f804, 0x80000000, 0x00000000);
ram_mask(fuc, 0x100760, 0x22222222, r100760);
@@ -227,65 +719,131 @@ nva3_ram_calc(struct nouveau_fb *pfb, u32 freq)
ram_mask(fuc, 0x1007e0, 0x22222222, r100760);
}
+ if (nv_device(pfb)->chipset == 0xa3 && freq > 500000) {
+ ram_mask(fuc, 0x100700, 0x00000006, 0x00000000);
+ }
+
+ /* Final switch */
if (mclk.pll) {
ram_mask(fuc, 0x1110e0, 0x00088000, 0x00011000);
- ram_wr32(fuc, 0x004000, (ctrl &= ~0x00000008));
+ ram_mask(fuc, 0x004000, 0x00000008, 0x00000000);
}
- /*XXX: LEAVE */
ram_wr32(fuc, 0x1002dc, 0x00000000);
ram_wr32(fuc, 0x1002d4, 0x00000001);
ram_wr32(fuc, 0x100210, 0x80000000);
- ram_nsec(fuc, 1000);
- ram_nsec(fuc, 1000);
+ ram_nsec(fuc, 2000);
- ram_mask(fuc, mr[2], 0x00000000, 0x00000000);
- ram_nsec(fuc, 1000);
- ram_nuke(fuc, mr[0]);
- ram_mask(fuc, mr[0], 0x00000000, 0x00000000);
- ram_nsec(fuc, 1000);
+ /* Set RAM MR parameters and timings */
+ for (i = 2; i >= 0; i--) {
+ if (ram_rd32(fuc, mr[i]) != ram->base.mr[i]) {
+ ram_wr32(fuc, mr[i], ram->base.mr[i]);
+ ram_nsec(fuc, 1000);
+ }
+ }
- ram_mask(fuc, 0x100220[3], 0x00000000, 0x00000000);
- ram_mask(fuc, 0x100220[1], 0x00000000, 0x00000000);
- ram_mask(fuc, 0x100220[6], 0x00000000, 0x00000000);
- ram_mask(fuc, 0x100220[7], 0x00000000, 0x00000000);
- ram_mask(fuc, 0x100220[2], 0x00000000, 0x00000000);
- ram_mask(fuc, 0x100220[4], 0x00000000, 0x00000000);
- ram_mask(fuc, 0x100220[5], 0x00000000, 0x00000000);
- ram_mask(fuc, 0x100220[0], 0x00000000, 0x00000000);
- ram_mask(fuc, 0x100220[8], 0x00000000, 0x00000000);
+ ram_wr32(fuc, 0x100220[3], timing[3]);
+ ram_wr32(fuc, 0x100220[1], timing[1]);
+ ram_wr32(fuc, 0x100220[6], timing[6]);
+ ram_wr32(fuc, 0x100220[7], timing[7]);
+ ram_wr32(fuc, 0x100220[2], timing[2]);
+ ram_wr32(fuc, 0x100220[4], timing[4]);
+ ram_wr32(fuc, 0x100220[5], timing[5]);
+ ram_wr32(fuc, 0x100220[0], timing[0]);
+ ram_wr32(fuc, 0x100220[8], timing[8]);
+ /* Misc */
ram_mask(fuc, 0x100200, 0x00001000, !next->bios.ramcfg_10_02_08 << 12);
- unk714 = ram_rd32(fuc, 0x100714) & ~0xf0000010;
- unk718 = ram_rd32(fuc, 0x100718) & ~0x00000100;
- unk71c = ram_rd32(fuc, 0x10071c) & ~0x00000100;
+ /* XXX: A lot of "chipset"/"ram type" specific stuff...? */
+ unk714 = ram_rd32(fuc, 0x100714) & ~0xf0000130;
+ unk718 = ram_rd32(fuc, 0x100718) & ~0x00000100;
+ unk71c = ram_rd32(fuc, 0x10071c) & ~0x00000100;
+ r111100 = ram_rd32(fuc, 0x111100) & ~0x3a800000;
+
+ if (next->bios.ramcfg_10_02_04) {
+ switch (ram->base.type) {
+ case NV_MEM_TYPE_DDR3:
+ if (nv_device(pfb)->chipset != 0xa8)
+ r111100 |= 0x00000004;
+ /* no break */
+ case NV_MEM_TYPE_DDR2:
+ r111100 |= 0x08000000;
+ break;
+ default:
+ break;
+ }
+ } else {
+ switch (ram->base.type) {
+ case NV_MEM_TYPE_DDR2:
+ r111100 |= 0x1a800000;
+ unk714 |= 0x00000010;
+ break;
+ case NV_MEM_TYPE_DDR3:
+ if (nv_device(pfb)->chipset == 0xa8) {
+ r111100 |= 0x08000000;
+ } else {
+ r111100 &= ~0x00000004;
+ r111100 |= 0x12800000;
+ }
+ unk714 |= 0x00000010;
+ break;
+ case NV_MEM_TYPE_GDDR3:
+ r111100 |= 0x30000000;
+ unk714 |= 0x00000020;
+ break;
+ default:
+ break;
+ }
+ }
+
+ unk714 |= (next->bios.ramcfg_10_04_01) << 8;
+
if (next->bios.ramcfg_10_02_20)
unk714 |= 0xf0000000;
- if (!next->bios.ramcfg_10_02_04)
- unk714 |= 0x00000010;
- ram_wr32(fuc, 0x100714, unk714);
-
+ if (next->bios.ramcfg_10_02_02)
+ unk718 |= 0x00000100;
if (next->bios.ramcfg_10_02_01)
unk71c |= 0x00000100;
- ram_wr32(fuc, 0x10071c, unk71c);
+ if (next->bios.timing_10_24 != 0xff) {
+ unk718 &= ~0xf0000000;
+ unk718 |= next->bios.timing_10_24 << 28;
+ }
+ if (next->bios.ramcfg_10_02_10)
+ r111100 &= ~0x04020000;
- if (next->bios.ramcfg_10_02_02)
- unk718 |= 0x00000100;
- ram_wr32(fuc, 0x100718, unk718);
+ ram_mask(fuc, 0x100714, 0xffffffff, unk714);
+ ram_mask(fuc, 0x10071c, 0xffffffff, unk71c);
+ ram_mask(fuc, 0x100718, 0xffffffff, unk718);
+ ram_mask(fuc, 0x111100, 0xffffffff, r111100);
- if (next->bios.ramcfg_10_02_10)
- ram_wr32(fuc, 0x111100, 0x48000000); /*XXX*/
+ if (fuc->r_gpioFBVREF.addr && !next->bios.timing_10_ODT)
+ nva3_ram_fbvref(fuc, 1);
- ram_mask(fuc, mr[0], 0x100, 0x100);
- ram_nsec(fuc, 1000);
- ram_mask(fuc, mr[0], 0x100, 0x000);
- ram_nsec(fuc, 1000);
+ /* Reset DLL */
+ if (!next->bios.ramcfg_10_DLLoff)
+ nouveau_sddr2_dll_reset(fuc);
- ram_nsec(fuc, 2000);
- ram_nsec(fuc, 12000);
+ if (ram->base.type == NV_MEM_TYPE_GDDR3) {
+ ram_nsec(fuc, 31000);
+ } else {
+ ram_nsec(fuc, 14000);
+ }
+
+ if (ram->base.type == NV_MEM_TYPE_DDR3) {
+ ram_wr32(fuc, 0x100264, 0x1);
+ ram_nsec(fuc, 2000);
+ }
- ram_wr32(fuc, 0x611200, 0x00003330);
+ ram_nuke(fuc, 0x100700);
+ ram_mask(fuc, 0x100700, 0x01000000, 0x01000000);
+ ram_mask(fuc, 0x100700, 0x01000000, 0x00000000);
+
+ /* Re-enable FB */
+ ram_unblock(fuc);
+ ram_wr32(fuc, 0x611200, 0x3330);
+
+ /* Post fiddlings */
if (next->bios.rammap_10_04_02)
ram_mask(fuc, 0x100200, 0x00000800, 0x00000800);
if (next->bios.ramcfg_10_02_10) {
@@ -313,7 +871,22 @@ nva3_ram_prog(struct nouveau_fb *pfb)
struct nouveau_device *device = nv_device(pfb);
struct nva3_ram *ram = (void *)pfb->ram;
struct nva3_ramfuc *fuc = &ram->fuc;
- ram_exec(fuc, nouveau_boolopt(device->cfgopt, "NvMemExec", true));
+ bool exec = nouveau_boolopt(device->cfgopt, "NvMemExec", true);
+
+ if (exec) {
+ nv_mask(pfb, 0x001534, 0x2, 0x2);
+
+ ram_exec(fuc, true);
+
+ /* Post-processing, avoids flicker */
+ nv_mask(pfb, 0x002504, 0x1, 0x0);
+ nv_mask(pfb, 0x001534, 0x2, 0x0);
+
+ nv_mask(pfb, 0x616308, 0x10, 0x10);
+ nv_mask(pfb, 0x616b08, 0x10, 0x10);
+ } else {
+ ram_exec(fuc, false);
+ }
return 0;
}
@@ -330,38 +903,24 @@ nva3_ram_init(struct nouveau_object *object)
{
struct nouveau_fb *pfb = (void *)object->parent;
struct nva3_ram *ram = (void *)object;
- int ret, i;
+ int ret;
ret = nouveau_ram_init(&ram->base);
if (ret)
return ret;
- /* prepare for ddr link training, and load training patterns */
- switch (ram->base.type) {
- case NV_MEM_TYPE_DDR3: {
- if (nv_device(pfb)->chipset == 0xa8) {
- static const u32 pattern[16] = {
- 0xaaaaaaaa, 0xcccccccc, 0xdddddddd, 0xeeeeeeee,
- 0x00000000, 0x11111111, 0x44444444, 0xdddddddd,
- 0x33333333, 0x55555555, 0x77777777, 0x66666666,
- 0x99999999, 0x88888888, 0xeeeeeeee, 0xbbbbbbbb,
- };
-
- nv_wr32(pfb, 0x100538, 0x10001ff6); /*XXX*/
- nv_wr32(pfb, 0x1005a8, 0x0000ffff);
- nv_mask(pfb, 0x10f800, 0x00000001, 0x00000001);
- for (i = 0; i < 0x30; i++) {
- nv_wr32(pfb, 0x10f8c0, (i << 8) | i);
- nv_wr32(pfb, 0x10f8e0, (i << 8) | i);
- nv_wr32(pfb, 0x10f900, pattern[i % 16]);
- nv_wr32(pfb, 0x10f920, pattern[i % 16]);
- }
- }
- }
- break;
- default:
- break;
- }
+ nva3_link_train_init(pfb);
+
+ return 0;
+}
+
+static int
+nva3_ram_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nouveau_fb *pfb = (void *)object->parent;
+
+ if (!suspend)
+ nva3_link_train_fini(pfb);
return 0;
}
@@ -371,8 +930,12 @@ nva3_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 datasize,
struct nouveau_object **pobject)
{
+ struct nouveau_fb *pfb = nouveau_fb(parent);
+ struct nouveau_gpio *gpio = nouveau_gpio(pfb);
+ struct dcb_gpio_func func;
struct nva3_ram *ram;
int ret, i;
+ u32 reg, shift;
ret = nv50_ram_create(parent, engine, oclass, &ram);
*pobject = nv_object(ram);
@@ -380,7 +943,9 @@ nva3_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return ret;
switch (ram->base.type) {
+ case NV_MEM_TYPE_DDR2:
case NV_MEM_TYPE_DDR3:
+ case NV_MEM_TYPE_GDDR3:
ram->base.calc = nva3_ram_calc;
ram->base.prog = nva3_ram_prog;
ram->base.tidy = nva3_ram_tidy;
@@ -390,31 +955,41 @@ nva3_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return 0;
}
+ ram->fuc.r_0x001610 = ramfuc_reg(0x001610);
+ ram->fuc.r_0x001700 = ramfuc_reg(0x001700);
+ ram->fuc.r_0x002504 = ramfuc_reg(0x002504);
ram->fuc.r_0x004000 = ramfuc_reg(0x004000);
ram->fuc.r_0x004004 = ramfuc_reg(0x004004);
ram->fuc.r_0x004018 = ramfuc_reg(0x004018);
ram->fuc.r_0x004128 = ramfuc_reg(0x004128);
ram->fuc.r_0x004168 = ramfuc_reg(0x004168);
+ ram->fuc.r_0x100080 = ramfuc_reg(0x100080);
ram->fuc.r_0x100200 = ramfuc_reg(0x100200);
ram->fuc.r_0x100210 = ramfuc_reg(0x100210);
for (i = 0; i < 9; i++)
ram->fuc.r_0x100220[i] = ramfuc_reg(0x100220 + (i * 4));
+ ram->fuc.r_0x100264 = ramfuc_reg(0x100264);
ram->fuc.r_0x1002d0 = ramfuc_reg(0x1002d0);
ram->fuc.r_0x1002d4 = ramfuc_reg(0x1002d4);
ram->fuc.r_0x1002dc = ramfuc_reg(0x1002dc);
ram->fuc.r_0x10053c = ramfuc_reg(0x10053c);
ram->fuc.r_0x1005a0 = ramfuc_reg(0x1005a0);
ram->fuc.r_0x1005a4 = ramfuc_reg(0x1005a4);
+ ram->fuc.r_0x100700 = ramfuc_reg(0x100700);
ram->fuc.r_0x100714 = ramfuc_reg(0x100714);
ram->fuc.r_0x100718 = ramfuc_reg(0x100718);
ram->fuc.r_0x10071c = ramfuc_reg(0x10071c);
+ ram->fuc.r_0x100720 = ramfuc_reg(0x100720);
ram->fuc.r_0x100760 = ramfuc_stride(0x100760, 4, ram->base.part_mask);
ram->fuc.r_0x1007a0 = ramfuc_stride(0x1007a0, 4, ram->base.part_mask);
ram->fuc.r_0x1007e0 = ramfuc_stride(0x1007e0, 4, ram->base.part_mask);
+ ram->fuc.r_0x100da0 = ramfuc_stride(0x100da0, 4, ram->base.part_mask);
ram->fuc.r_0x10f804 = ramfuc_reg(0x10f804);
ram->fuc.r_0x1110e0 = ramfuc_stride(0x1110e0, 4, ram->base.part_mask);
ram->fuc.r_0x111100 = ramfuc_reg(0x111100);
ram->fuc.r_0x111104 = ramfuc_reg(0x111104);
+ ram->fuc.r_0x1111e0 = ramfuc_reg(0x1111e0);
+ ram->fuc.r_0x111400 = ramfuc_reg(0x111400);
ram->fuc.r_0x611200 = ramfuc_reg(0x611200);
if (ram->base.ranks > 1) {
@@ -429,6 +1004,12 @@ nva3_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
ram->fuc.r_mr[3] = ramfuc_reg(0x1002e4);
}
+ ret = gpio->find(gpio, 0, 0x2e, DCB_GPIO_UNUSED, &func);
+ if (ret == 0) {
+ nv50_gpio_location(func.line, &reg, &shift);
+ ram->fuc.r_gpioFBVREF = ramfuc_reg(reg);
+ }
+
return 0;
}
@@ -438,6 +1019,6 @@ nva3_ram_oclass = {
.ctor = nva3_ram_ctor,
.dtor = _nouveau_ram_dtor,
.init = nva3_ram_init,
- .fini = _nouveau_ram_fini,
+ .fini = nva3_ram_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/sddr2.c b/drivers/gpu/drm/nouveau/core/subdev/fb/sddr2.c
index bb1eb8f3e63..252575f3aa2 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/sddr2.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/sddr2.c
@@ -66,7 +66,7 @@ nouveau_sddr2_calc(struct nouveau_ram *ram)
case 0x10:
CL = ram->next->bios.timing_10_CL;
WR = ram->next->bios.timing_10_WR;
- DLL = !ram->next->bios.ramcfg_10_02_40;
+ DLL = !ram->next->bios.ramcfg_10_DLLoff;
ODT = ram->next->bios.timing_10_ODT & 3;
break;
case 0x20:
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/sddr3.c b/drivers/gpu/drm/nouveau/core/subdev/fb/sddr3.c
index 83949b11833..a2dca4869e5 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/sddr3.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/sddr3.c
@@ -80,7 +80,7 @@ nouveau_sddr3_calc(struct nouveau_ram *ram)
CWL = ram->next->bios.timing_10_CWL;
CL = ram->next->bios.timing_10_CL;
WR = ram->next->bios.timing_10_WR;
- DLL = !ram->next->bios.ramcfg_10_02_40;
+ DLL = !ram->next->bios.ramcfg_10_DLLoff;
ODT = ram->next->bios.timing_10_ODT;
break;
case 0x20:
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c
index 1864fa98e6b..2e30d5a62d6 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c
@@ -54,7 +54,7 @@ nv50_gpio_reset(struct nouveau_gpio *gpio, u8 match)
}
}
-static int
+int
nv50_gpio_location(int line, u32 *reg, u32 *shift)
{
const u32 nv50_gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 };
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
index 2b1bf545e48..0dc605db7ec 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
@@ -473,18 +473,56 @@ nouveau_i2c_extdev_sclass[] = {
nouveau_anx9805_sclass,
};
+static void
+nouveau_i2c_create_port(struct nouveau_i2c *i2c, int index, u8 type,
+ struct dcb_i2c_entry *info)
+{
+ const struct nouveau_i2c_impl *impl = (void *)nv_oclass(i2c);
+ struct nouveau_oclass *oclass;
+ struct nouveau_object *parent;
+ struct nouveau_object *object;
+ int ret, pad;
+
+ if (info->share != DCB_I2C_UNUSED) {
+ pad = info->share;
+ oclass = impl->pad_s;
+ } else {
+ if (type != DCB_I2C_NVIO_AUX)
+ pad = 0x100 + info->drive;
+ else
+ pad = 0x100 + info->auxch;
+ oclass = impl->pad_x;
+ }
+
+ ret = nouveau_object_ctor(NULL, nv_object(i2c), oclass, NULL, pad,
+ &parent);
+ if (ret < 0)
+ return;
+
+ oclass = impl->sclass;
+ do {
+ ret = -EINVAL;
+ if (oclass->handle == type) {
+ ret = nouveau_object_ctor(parent, nv_object(i2c),
+ oclass, info, index,
+ &object);
+ }
+ } while (ret && (++oclass)->handle);
+
+ nouveau_object_ref(NULL, &parent);
+}
+
int
nouveau_i2c_create_(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass,
int length, void **pobject)
{
- const struct nouveau_i2c_impl *impl = (void *)oclass;
struct nouveau_bios *bios = nouveau_bios(parent);
struct nouveau_i2c *i2c;
struct nouveau_object *object;
struct dcb_i2c_entry info;
- int ret, i, j, index = -1, pad;
+ int ret, i, j, index = -1;
struct dcb_output outp;
u8 ver, hdr;
u32 data;
@@ -507,43 +545,40 @@ nouveau_i2c_create_(struct nouveau_object *parent,
INIT_LIST_HEAD(&i2c->ports);
while (!dcb_i2c_parse(bios, ++index, &info)) {
- if (info.type == DCB_I2C_UNUSED)
+ switch (info.type) {
+ case DCB_I2C_NV04_BIT:
+ case DCB_I2C_NV4E_BIT:
+ case DCB_I2C_NVIO_BIT:
+ nouveau_i2c_create_port(i2c, NV_I2C_PORT(index),
+ info.type, &info);
+ break;
+ case DCB_I2C_NVIO_AUX:
+ nouveau_i2c_create_port(i2c, NV_I2C_AUX(index),
+ info.type, &info);
+ break;
+ case DCB_I2C_PMGR:
+ if (info.drive != DCB_I2C_UNUSED) {
+ nouveau_i2c_create_port(i2c, NV_I2C_PORT(index),
+ DCB_I2C_NVIO_BIT,
+ &info);
+ }
+ if (info.auxch != DCB_I2C_UNUSED) {
+ nouveau_i2c_create_port(i2c, NV_I2C_AUX(index),
+ DCB_I2C_NVIO_AUX,
+ &info);
+ }
+ break;
+ case DCB_I2C_UNUSED:
+ default:
continue;
-
- if (info.share != DCB_I2C_UNUSED) {
- if (info.type == DCB_I2C_NVIO_AUX)
- pad = info.drive;
- else
- pad = info.share;
- oclass = impl->pad_s;
- } else {
- pad = 0x100 + info.drive;
- oclass = impl->pad_x;
}
-
- ret = nouveau_object_ctor(NULL, *pobject, oclass,
- NULL, pad, &parent);
- if (ret < 0)
- continue;
-
- oclass = impl->sclass;
- do {
- ret = -EINVAL;
- if (oclass->handle == info.type) {
- ret = nouveau_object_ctor(parent, *pobject,
- oclass, &info,
- index, &object);
- }
- } while (ret && (++oclass)->handle);
-
- nouveau_object_ref(NULL, &parent);
}
/* in addition to the busses specified in the i2c table, there
* may be ddc/aux channels hiding behind external tmds/dp/etc
* transmitters.
*/
- index = ((index + 0x0f) / 0x10) * 0x10;
+ index = NV_I2C_EXT(0);
i = -1;
while ((data = dcb_outp_parse(bios, ++i, &ver, &hdr, &outp))) {
if (!outp.location || !outp.extdev)
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/gm204.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/gm204.c
new file mode 100644
index 00000000000..06a2b87ccbf
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/gm204.c
@@ -0,0 +1,221 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nv50.h"
+
+#define AUX_DBG(fmt, args...) nv_debug(aux, "AUXCH(%d): " fmt, ch, ##args)
+#define AUX_ERR(fmt, args...) nv_error(aux, "AUXCH(%d): " fmt, ch, ##args)
+
+static void
+auxch_fini(struct nouveau_i2c *aux, int ch)
+{
+ nv_mask(aux, 0x00d954 + (ch * 0x50), 0x00310000, 0x00000000);
+}
+
+static int
+auxch_init(struct nouveau_i2c *aux, int ch)
+{
+ const u32 unksel = 1; /* nfi which to use, or if it matters.. */
+ const u32 ureq = unksel ? 0x00100000 : 0x00200000;
+ const u32 urep = unksel ? 0x01000000 : 0x02000000;
+ u32 ctrl, timeout;
+
+ /* wait up to 1ms for any previous transaction to be done... */
+ timeout = 1000;
+ do {
+ ctrl = nv_rd32(aux, 0x00d954 + (ch * 0x50));
+ udelay(1);
+ if (!timeout--) {
+ AUX_ERR("begin idle timeout 0x%08x\n", ctrl);
+ return -EBUSY;
+ }
+ } while (ctrl & 0x03010000);
+
+ /* set some magic, and wait up to 1ms for it to appear */
+ nv_mask(aux, 0x00d954 + (ch * 0x50), 0x00300000, ureq);
+ timeout = 1000;
+ do {
+ ctrl = nv_rd32(aux, 0x00d954 + (ch * 0x50));
+ udelay(1);
+ if (!timeout--) {
+ AUX_ERR("magic wait 0x%08x\n", ctrl);
+ auxch_fini(aux, ch);
+ return -EBUSY;
+ }
+ } while ((ctrl & 0x03000000) != urep);
+
+ return 0;
+}
+
+int
+gm204_aux(struct nouveau_i2c_port *base, bool retry,
+ u8 type, u32 addr, u8 *data, u8 size)
+{
+ struct nouveau_i2c *aux = nouveau_i2c(base);
+ struct nv50_i2c_port *port = (void *)base;
+ u32 ctrl, stat, timeout, retries;
+ u32 xbuf[4] = {};
+ int ch = port->addr;
+ int ret, i;
+
+ AUX_DBG("%d: 0x%08x %d\n", type, addr, size);
+
+ ret = auxch_init(aux, ch);
+ if (ret)
+ goto out;
+
+ stat = nv_rd32(aux, 0x00d958 + (ch * 0x50));
+ if (!(stat & 0x10000000)) {
+ AUX_DBG("sink not detected\n");
+ ret = -ENXIO;
+ goto out;
+ }
+
+ if (!(type & 1)) {
+ memcpy(xbuf, data, size);
+ for (i = 0; i < 16; i += 4) {
+ AUX_DBG("wr 0x%08x\n", xbuf[i / 4]);
+ nv_wr32(aux, 0x00d930 + (ch * 0x50) + i, xbuf[i / 4]);
+ }
+ }
+
+ ctrl = nv_rd32(aux, 0x00d954 + (ch * 0x50));
+ ctrl &= ~0x0001f0ff;
+ ctrl |= type << 12;
+ ctrl |= size - 1;
+ nv_wr32(aux, 0x00d950 + (ch * 0x50), addr);
+
+ /* (maybe) retry transaction a number of times on failure... */
+ for (retries = 0; !ret && retries < 32; retries++) {
+ /* reset, and delay a while if this is a retry */
+ nv_wr32(aux, 0x00d954 + (ch * 0x50), 0x80000000 | ctrl);
+ nv_wr32(aux, 0x00d954 + (ch * 0x50), 0x00000000 | ctrl);
+ if (retries)
+ udelay(400);
+
+ /* transaction request, wait up to 1ms for it to complete */
+ nv_wr32(aux, 0x00d954 + (ch * 0x50), 0x00010000 | ctrl);
+
+ timeout = 1000;
+ do {
+ ctrl = nv_rd32(aux, 0x00d954 + (ch * 0x50));
+ udelay(1);
+ if (!timeout--) {
+ AUX_ERR("tx req timeout 0x%08x\n", ctrl);
+ ret = -EIO;
+ goto out;
+ }
+ } while (ctrl & 0x00010000);
+ ret = 1;
+
+ /* read status, and check if transaction completed ok */
+ stat = nv_mask(aux, 0x00d958 + (ch * 0x50), 0, 0);
+ if ((stat & 0x000f0000) == 0x00080000 ||
+ (stat & 0x000f0000) == 0x00020000)
+ ret = retry ? 0 : 1;
+ if ((stat & 0x00000100))
+ ret = -ETIMEDOUT;
+ if ((stat & 0x00000e00))
+ ret = -EIO;
+
+ AUX_DBG("%02d 0x%08x 0x%08x\n", retries, ctrl, stat);
+ }
+
+ if (type & 1) {
+ for (i = 0; i < 16; i += 4) {
+ xbuf[i / 4] = nv_rd32(aux, 0x00d940 + (ch * 0x50) + i);
+ AUX_DBG("rd 0x%08x\n", xbuf[i / 4]);
+ }
+ memcpy(data, xbuf, size);
+ }
+
+out:
+ auxch_fini(aux, ch);
+ return ret < 0 ? ret : (stat & 0x000f0000) >> 16;
+}
+
+static const struct nouveau_i2c_func
+gm204_aux_func = {
+ .aux = gm204_aux,
+};
+
+int
+gm204_aux_port_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 index,
+ struct nouveau_object **pobject)
+{
+ struct dcb_i2c_entry *info = data;
+ struct nv50_i2c_port *port;
+ int ret;
+
+ ret = nouveau_i2c_port_create(parent, engine, oclass, index,
+ &nouveau_i2c_aux_algo, &gm204_aux_func,
+ &port);
+ *pobject = nv_object(port);
+ if (ret)
+ return ret;
+
+ port->base.aux = info->auxch;
+ port->addr = info->auxch;
+ return 0;
+}
+
+struct nouveau_oclass
+gm204_i2c_sclass[] = {
+ { .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_BIT),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nvd0_i2c_port_ctor,
+ .dtor = _nouveau_i2c_port_dtor,
+ .init = nv50_i2c_port_init,
+ .fini = _nouveau_i2c_port_fini,
+ },
+ },
+ { .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_AUX),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = gm204_aux_port_ctor,
+ .dtor = _nouveau_i2c_port_dtor,
+ .init = _nouveau_i2c_port_init,
+ .fini = _nouveau_i2c_port_fini,
+ },
+ },
+ {}
+};
+
+struct nouveau_oclass *
+gm204_i2c_oclass = &(struct nouveau_i2c_impl) {
+ .base.handle = NV_SUBDEV(I2C, 0x24),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = _nouveau_i2c_ctor,
+ .dtor = _nouveau_i2c_dtor,
+ .init = _nouveau_i2c_init,
+ .fini = _nouveau_i2c_fini,
+ },
+ .sclass = gm204_i2c_sclass,
+ .pad_x = &nv04_i2c_pad_oclass,
+ .pad_s = &gm204_i2c_pad_oclass,
+ .aux = 8,
+ .aux_stat = nve0_aux_stat,
+ .aux_mask = nve0_aux_mask,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.h b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.h
index 5d2a77421c7..9ef965692fb 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.h
@@ -10,8 +10,6 @@ struct nv50_i2c_priv {
struct nv50_i2c_port {
struct nouveau_i2c_port base;
u32 addr;
- u32 ctrl;
- u32 data;
u32 state;
};
@@ -29,4 +27,8 @@ int nv94_aux_port_ctor(struct nouveau_object *, struct nouveau_object *,
void nv94_i2c_acquire(struct nouveau_i2c_port *);
void nv94_i2c_release(struct nouveau_i2c_port *);
+int nvd0_i2c_port_ctor(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, void *, u32,
+ struct nouveau_object **);
+
#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv94.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv94.c
index f59c3a25546..e383ee81f4d 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv94.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv94.c
@@ -214,10 +214,6 @@ nv94_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
port->state = 7;
port->addr = nv50_i2c_addr[info->drive];
- if (info->share != DCB_I2C_UNUSED) {
- port->ctrl = 0x00e500 + (info->share * 0x50);
- port->data = 0x0000e001;
- }
return 0;
}
@@ -242,13 +238,8 @@ nv94_aux_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- port->base.aux = info->drive;
- port->addr = info->drive;
- if (info->share != DCB_I2C_UNUSED) {
- port->ctrl = 0x00e500 + (info->drive * 0x50);
- port->data = 0x00002002;
- }
-
+ port->base.aux = info->auxch;
+ port->addr = info->auxch;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/nvd0.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/nvd0.c
index 364ddb1c5f0..fd99380502e 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/nvd0.c
@@ -48,7 +48,7 @@ nvd0_i2c_func = {
.sense_sda = nvd0_i2c_sense_sda,
};
-static int
+int
nvd0_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 index,
struct nouveau_object **pobject)
@@ -66,10 +66,6 @@ nvd0_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
port->state = 0x00000007;
port->addr = 0x00d014 + (info->drive * 0x20);
- if (info->share != DCB_I2C_UNUSED) {
- port->ctrl = 0x00e500 + (info->share * 0x50);
- port->data = 0x0000e001;
- }
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/nve0.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/nve0.c
index cae77e1ad8d..25fe5c2d110 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/nve0.c
@@ -24,7 +24,7 @@
#include "nv50.h"
-static void
+void
nve0_aux_stat(struct nouveau_i2c *i2c, u32 *hi, u32 *lo, u32 *rq, u32 *tx)
{
u32 intr = nv_rd32(i2c, 0x00dc60);
@@ -38,7 +38,7 @@ nve0_aux_stat(struct nouveau_i2c *i2c, u32 *hi, u32 *lo, u32 *rq, u32 *tx)
nv_wr32(i2c, 0x00dc60, intr);
}
-static void
+void
nve0_aux_mask(struct nouveau_i2c *i2c, u32 type, u32 mask, u32 data)
{
u32 temp = nv_rd32(i2c, 0x00dc68), i;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/padgm204.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/padgm204.c
new file mode 100644
index 00000000000..f0e6fbbaa8c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/padgm204.c
@@ -0,0 +1,86 @@
+/*
+ * Copyright 2014 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "pad.h"
+
+struct gm204_i2c_pad {
+ struct nvkm_i2c_pad base;
+ int addr;
+};
+
+static int
+gm204_i2c_pad_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nouveau_i2c *i2c = (void *)object->engine;
+ struct gm204_i2c_pad *pad = (void *)object;
+ nv_mask(i2c, 0x00d97c + pad->addr, 0x00000001, 0x00000001);
+ return nvkm_i2c_pad_fini(&pad->base, suspend);
+}
+
+static int
+gm204_i2c_pad_init(struct nouveau_object *object)
+{
+ struct nouveau_i2c *i2c = (void *)object->engine;
+ struct gm204_i2c_pad *pad = (void *)object;
+
+ switch (nv_oclass(pad->base.next)->handle) {
+ case NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_AUX):
+ nv_mask(i2c, 0x00d970 + pad->addr, 0x0000c003, 0x00000002);
+ break;
+ case NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_BIT):
+ default:
+ nv_mask(i2c, 0x00d970 + pad->addr, 0x0000c003, 0x0000c001);
+ break;
+ }
+
+ nv_mask(i2c, 0x00d97c + pad->addr, 0x00000001, 0x00000000);
+ return nvkm_i2c_pad_init(&pad->base);
+}
+
+static int
+gm204_i2c_pad_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 index,
+ struct nouveau_object **pobject)
+{
+ struct gm204_i2c_pad *pad;
+ int ret;
+
+ ret = nvkm_i2c_pad_create(parent, engine, oclass, index, &pad);
+ *pobject = nv_object(pad);
+ if (ret)
+ return ret;
+
+ pad->addr = index * 0x50;;
+ return 0;
+}
+
+struct nouveau_oclass
+gm204_i2c_pad_oclass = {
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = gm204_i2c_pad_ctor,
+ .dtor = _nvkm_i2c_pad_dtor,
+ .init = gm204_i2c_pad_init,
+ .fini = gm204_i2c_pad_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/priv.h b/drivers/gpu/drm/nouveau/core/subdev/i2c/priv.h
index 780090b6425..4fe7ae3fde4 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/priv.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/priv.h
@@ -5,6 +5,7 @@
extern struct nouveau_oclass nv04_i2c_pad_oclass;
extern struct nouveau_oclass nv94_i2c_pad_oclass;
+extern struct nouveau_oclass gm204_i2c_pad_oclass;
#define nouveau_i2c_port_create(p,e,o,i,a,f,d) \
nouveau_i2c_port_create_((p), (e), (o), (i), (a), (f), \
@@ -82,4 +83,7 @@ struct nouveau_i2c_impl {
void nv94_aux_stat(struct nouveau_i2c *, u32 *, u32 *, u32 *, u32 *);
void nv94_aux_mask(struct nouveau_i2c *, u32, u32, u32);
+void nve0_aux_stat(struct nouveau_i2c *, u32 *, u32 *, u32 *, u32 *);
+void nve0_aux_mask(struct nouveau_i2c *, u32, u32, u32);
+
#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/memx.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/memx.fuc
index e89789a53b8..ec03f9a4290 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/memx.fuc
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/memx.fuc
@@ -50,6 +50,7 @@ handler(WR32 , 0x0000, 0x0002, #memx_func_wr32)
handler(WAIT , 0x0004, 0x0000, #memx_func_wait)
handler(DELAY , 0x0001, 0x0000, #memx_func_delay)
handler(VBLANK, 0x0001, 0x0000, #memx_func_wait_vblank)
+handler(TRAIN , 0x0000, 0x0000, #memx_func_train)
memx_func_tail:
.equ #memx_func_size #memx_func_next - #memx_func_head
@@ -63,6 +64,10 @@ memx_ts_end:
memx_data_head:
.skip 0x0800
memx_data_tail:
+
+memx_train_head:
+.skip 0x0100
+memx_train_tail:
#endif
/******************************************************************************
@@ -260,6 +265,101 @@ memx_func_delay:
// description
//
// $r15 - current (memx)
+// $r4 - packet length
+// $r3 - opcode desciption
+// $r0 - zero
+memx_func_train:
+#if NVKM_PPWR_CHIPSET == GT215
+// $r5 - outer loop counter
+// $r6 - inner loop counter
+// $r7 - entry counter (#memx_train_head + $r7)
+ movw $r5 0x3
+ movw $r7 0x0
+
+// Read random memory to wake up... things
+ imm32($r9, 0x700000)
+ nv_rd32($r8,$r9)
+ movw $r14 0x2710
+ call(nsec)
+
+ memx_func_train_loop_outer:
+ mulu $r8 $r5 0x101
+ sethi $r8 0x02000000
+ imm32($r9, 0x1111e0)
+ nv_wr32($r9, $r8)
+ push $r5
+
+ movw $r6 0x0
+ memx_func_train_loop_inner:
+ movw $r8 0x1111
+ mulu $r9 $r6 $r8
+ shl b32 $r8 $r9 0x10
+ or $r8 $r9
+ imm32($r9, 0x100720)
+ nv_wr32($r9, $r8)
+
+ imm32($r9, 0x100080)
+ nv_rd32($r8, $r9)
+ or $r8 $r8 0x20
+ nv_wr32($r9, $r8)
+
+ imm32($r9, 0x10053c)
+ imm32($r8, 0x80003002)
+ nv_wr32($r9, $r8)
+
+ imm32($r14, 0x100560)
+ imm32($r13, 0x80000000)
+ add b32 $r12 $r13 0
+ imm32($r11, 0x001e8480)
+ call(wait)
+
+ // $r5 - inner inner loop counter
+ // $r9 - result
+ movw $r5 0
+ imm32($r9, 0x8300ffff)
+ memx_func_train_loop_4x:
+ imm32($r10, 0x100080)
+ nv_rd32($r8, $r10)
+ imm32($r11, 0xffffffdf)
+ and $r8 $r11
+ nv_wr32($r10, $r8)
+
+ imm32($r10, 0x10053c)
+ imm32($r8, 0x80003002)
+ nv_wr32($r10, $r8)
+
+ imm32($r14, 0x100560)
+ imm32($r13, 0x80000000)
+ mov b32 $r12 $r13
+ imm32($r11, 0x00002710)
+ call(wait)
+
+ nv_rd32($r13, $r14)
+ and $r9 $r9 $r13
+
+ add b32 $r5 1
+ cmp b16 $r5 0x4
+ bra l #memx_func_train_loop_4x
+
+ add b32 $r10 $r7 #memx_train_head
+ st b32 D[$r10 + 0] $r9
+ add b32 $r6 1
+ add b32 $r7 4
+
+ cmp b16 $r6 0x10
+ bra l #memx_func_train_loop_inner
+
+ pop $r5
+ add b32 $r5 1
+ cmp b16 $r5 7
+ bra l #memx_func_train_loop_outer
+
+#endif
+ ret
+
+// description
+//
+// $r15 - current (memx)
// $r14 - sender process name
// $r13 - message (exec)
// $r12 - head of script
@@ -307,8 +407,19 @@ memx_exec:
// $r11 - data1
// $r0 - zero
memx_info:
+ cmp b16 $r12 0x1
+ bra e #memx_info_train
+
+ memx_info_data:
mov $r12 #memx_data_head
mov $r11 #memx_data_tail - #memx_data_head
+ bra #memx_info_send
+
+ memx_info_train:
+ mov $r12 #memx_train_head
+ mov $r11 #memx_train_tail - #memx_train_head
+
+ memx_info_send:
call(send)
ret
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h
index 4d278a96b2b..713e11e2953 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h
@@ -46,8 +46,8 @@ uint32_t nv108_pwr_data[] = {
0x00000000,
0x00000000,
0x584d454d,
- 0x0000061c,
- 0x0000060e,
+ 0x0000062d,
+ 0x0000061f,
0x00000000,
0x00000000,
0x00000000,
@@ -68,8 +68,8 @@ uint32_t nv108_pwr_data[] = {
0x00000000,
0x00000000,
0x46524550,
- 0x00000620,
- 0x0000061e,
+ 0x00000631,
+ 0x0000062f,
0x00000000,
0x00000000,
0x00000000,
@@ -90,8 +90,8 @@ uint32_t nv108_pwr_data[] = {
0x00000000,
0x00000000,
0x5f433249,
- 0x00000a24,
- 0x000008cb,
+ 0x00000a35,
+ 0x000008dc,
0x00000000,
0x00000000,
0x00000000,
@@ -112,8 +112,8 @@ uint32_t nv108_pwr_data[] = {
0x00000000,
0x00000000,
0x54534554,
- 0x00000a45,
- 0x00000a26,
+ 0x00000a56,
+ 0x00000a37,
0x00000000,
0x00000000,
0x00000000,
@@ -134,8 +134,8 @@ uint32_t nv108_pwr_data[] = {
0x00000000,
0x00000000,
0x454c4449,
- 0x00000a50,
- 0x00000a4e,
+ 0x00000a61,
+ 0x00000a5f,
0x00000000,
0x00000000,
0x00000000,
@@ -246,13 +246,15 @@ uint32_t nv108_pwr_data[] = {
0x00010006,
0x00000000,
0x0000057b,
-/* 0x03b8: memx_func_tail */
-/* 0x03b8: memx_ts_start */
+ 0x00000007,
0x00000000,
-/* 0x03bc: memx_ts_end */
+ 0x000005c3,
+/* 0x03c4: memx_func_tail */
+/* 0x03c4: memx_ts_start */
0x00000000,
-/* 0x03c0: memx_data_head */
+/* 0x03c8: memx_ts_end */
0x00000000,
+/* 0x03cc: memx_data_head */
0x00000000,
0x00000000,
0x00000000,
@@ -764,8 +766,75 @@ uint32_t nv108_pwr_data[] = {
0x00000000,
0x00000000,
0x00000000,
-/* 0x0bc0: memx_data_tail */
-/* 0x0bc0: i2c_scl_map */
+ 0x00000000,
+/* 0x0bcc: memx_data_tail */
+/* 0x0bcc: memx_train_head */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+/* 0x0ccc: memx_train_tail */
+/* 0x0ccc: i2c_scl_map */
0x00000400,
0x00000800,
0x00001000,
@@ -776,7 +845,7 @@ uint32_t nv108_pwr_data[] = {
0x00020000,
0x00040000,
0x00080000,
-/* 0x0be8: i2c_sda_map */
+/* 0x0cf4: i2c_sda_map */
0x00100000,
0x00200000,
0x00400000,
@@ -844,9 +913,6 @@ uint32_t nv108_pwr_data[] = {
0x00000000,
0x00000000,
0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
};
uint32_t nv108_pwr_code[] = {
@@ -1215,10 +1281,10 @@ uint32_t nv108_pwr_code[] = {
0xf40464f0,
0x2c06f70b,
0xb50066cf,
- 0x00f8ee06,
+ 0x00f8f106,
/* 0x0500: memx_func_leave */
0x66cf2c06,
- 0xef06b500,
+ 0xf206b500,
0xe4400406,
0x0006f607,
/* 0x0512: memx_func_leave_wait */
@@ -1270,370 +1336,374 @@ uint32_t nv108_pwr_code[] = {
0x9800f800,
0x10b6001e,
0x005d7e04,
-/* 0x05c3: memx_exec */
- 0xf900f800,
- 0xb2d0f9e0,
-/* 0x05cb: memx_exec_next */
- 0x98b2b2c1,
- 0x10b60013,
- 0xf034e704,
- 0xe033e701,
- 0x0132b601,
- 0x980c30f0,
- 0x55f9de35,
- 0x1ef412a6,
- 0xee0b98e5,
- 0xbbef0c98,
- 0xc44b02cb,
- 0x00bbcf07,
- 0xe0fcd0fc,
- 0x0002c27e,
-/* 0x0602: memx_info */
- 0xc04c00f8,
+/* 0x05c3: memx_func_train */
+ 0xf800f800,
+/* 0x05c5: memx_exec */
+ 0xf9e0f900,
+ 0xb2c1b2d0,
+/* 0x05cd: memx_exec_next */
+ 0x001398b2,
+ 0xe70410b6,
+ 0xe701f034,
+ 0xb601e033,
+ 0x30f00132,
+ 0xde35980c,
+ 0x12a655f9,
+ 0x98e51ef4,
+ 0x0c98f10b,
+ 0x02cbbbf2,
+ 0xcf07c44b,
+ 0xd0fc00bb,
+ 0xc27ee0fc,
+ 0x00f80002,
+/* 0x0604: memx_info */
+ 0xf401c670,
+/* 0x060a: memx_info_data */
+ 0xcc4c0c0b,
0x08004b03,
- 0x0002c27e,
-/* 0x060e: memx_recv */
- 0xd6b000f8,
- 0xb20bf401,
- 0xf400d6b0,
- 0x00f8eb0b,
-/* 0x061c: memx_init */
-/* 0x061e: perf_recv */
- 0x00f800f8,
-/* 0x0620: perf_init */
-/* 0x0622: i2c_drive_scl */
- 0x36b000f8,
- 0x0d0bf400,
- 0xf607e040,
- 0x04bd0001,
-/* 0x0632: i2c_drive_scl_lo */
- 0xe44000f8,
- 0x0001f607,
- 0x00f804bd,
-/* 0x063c: i2c_drive_sda */
- 0xf40036b0,
- 0xe0400d0b,
- 0x0002f607,
- 0x00f804bd,
-/* 0x064c: i2c_drive_sda_lo */
- 0xf607e440,
- 0x04bd0002,
-/* 0x0656: i2c_sense_scl */
- 0x32f400f8,
- 0x07c44301,
- 0xfd0033cf,
- 0x0bf40431,
- 0x0131f406,
-/* 0x0668: i2c_sense_scl_done */
-/* 0x066a: i2c_sense_sda */
- 0x32f400f8,
- 0x07c44301,
- 0xfd0033cf,
- 0x0bf40432,
- 0x0131f406,
-/* 0x067c: i2c_sense_sda_done */
-/* 0x067e: i2c_raise_scl */
- 0x40f900f8,
- 0x03089844,
- 0x06227e01,
-/* 0x0689: i2c_raise_scl_wait */
- 0x03e84e00,
- 0x00005d7e,
- 0x0006567e,
- 0xb60901f4,
- 0x1bf40142,
-/* 0x069d: i2c_raise_scl_done */
- 0xf840fcef,
-/* 0x06a1: i2c_start */
- 0x06567e00,
- 0x0d11f400,
- 0x00066a7e,
- 0xf40611f4,
-/* 0x06b2: i2c_start_rep */
- 0x00032e0e,
- 0x0006227e,
- 0x3c7e0103,
- 0x76bb0006,
- 0x0465b600,
- 0x659450f9,
- 0x0256bb04,
- 0x75fd50bd,
- 0x7e50fc04,
- 0xb600067e,
- 0x11f40464,
-/* 0x06dd: i2c_start_send */
- 0x7e00031d,
- 0x4e00063c,
- 0x5d7e1388,
- 0x00030000,
- 0x0006227e,
- 0x7e13884e,
-/* 0x06f7: i2c_start_out */
- 0xf800005d,
-/* 0x06f9: i2c_stop */
- 0x7e000300,
- 0x03000622,
- 0x063c7e00,
- 0x03e84e00,
- 0x00005d7e,
- 0x227e0103,
- 0x884e0006,
- 0x005d7e13,
+/* 0x0613: memx_info_train */
+ 0x4c090ef4,
+ 0x004b0bcc,
+/* 0x0619: memx_info_send */
+ 0x02c27e01,
+/* 0x061f: memx_recv */
+ 0xb000f800,
+ 0x0bf401d6,
+ 0x00d6b0a3,
+ 0xf8dc0bf4,
+/* 0x062d: memx_init */
+/* 0x062f: perf_recv */
+ 0xf800f800,
+/* 0x0631: perf_init */
+/* 0x0633: i2c_drive_scl */
+ 0xb000f800,
+ 0x0bf40036,
+ 0x07e0400d,
+ 0xbd0001f6,
+/* 0x0643: i2c_drive_scl_lo */
+ 0x4000f804,
+ 0x01f607e4,
+ 0xf804bd00,
+/* 0x064d: i2c_drive_sda */
+ 0x0036b000,
+ 0x400d0bf4,
+ 0x02f607e0,
+ 0xf804bd00,
+/* 0x065d: i2c_drive_sda_lo */
+ 0x07e44000,
+ 0xbd0002f6,
+/* 0x0667: i2c_sense_scl */
+ 0xf400f804,
+ 0xc4430132,
+ 0x0033cf07,
+ 0xf40431fd,
+ 0x31f4060b,
+/* 0x0679: i2c_sense_scl_done */
+/* 0x067b: i2c_sense_sda */
+ 0xf400f801,
+ 0xc4430132,
+ 0x0033cf07,
+ 0xf40432fd,
+ 0x31f4060b,
+/* 0x068d: i2c_sense_sda_done */
+/* 0x068f: i2c_raise_scl */
+ 0xf900f801,
+ 0x08984440,
+ 0x337e0103,
+/* 0x069a: i2c_raise_scl_wait */
+ 0xe84e0006,
+ 0x005d7e03,
+ 0x06677e00,
+ 0x0901f400,
+ 0xf40142b6,
+/* 0x06ae: i2c_raise_scl_done */
+ 0x40fcef1b,
+/* 0x06b2: i2c_start */
+ 0x677e00f8,
+ 0x11f40006,
+ 0x067b7e0d,
+ 0x0611f400,
+/* 0x06c3: i2c_start_rep */
+ 0x032e0ef4,
+ 0x06337e00,
0x7e010300,
- 0x4e00063c,
- 0x5d7e1388,
- 0x00f80000,
-/* 0x0728: i2c_bitw */
- 0x00063c7e,
- 0x7e03e84e,
- 0xbb00005d,
+ 0xbb00064d,
0x65b60076,
0x9450f904,
0x56bb0465,
0xfd50bd02,
0x50fc0475,
- 0x00067e7e,
+ 0x00068f7e,
0xf40464b6,
- 0x884e1711,
- 0x005d7e13,
- 0x7e000300,
- 0x4e000622,
- 0x5d7e1388,
-/* 0x0766: i2c_bitw_out */
- 0x00f80000,
-/* 0x0768: i2c_bitr */
- 0x3c7e0103,
+/* 0x06ee: i2c_start_send */
+ 0x00031d11,
+ 0x00064d7e,
+ 0x7e13884e,
+ 0x0300005d,
+ 0x06337e00,
+ 0x13884e00,
+ 0x00005d7e,
+/* 0x0708: i2c_start_out */
+/* 0x070a: i2c_stop */
+ 0x000300f8,
+ 0x0006337e,
+ 0x4d7e0003,
0xe84e0006,
0x005d7e03,
- 0x0076bb00,
- 0xf90465b6,
- 0x04659450,
- 0xbd0256bb,
- 0x0475fd50,
- 0x7e7e50fc,
- 0x64b60006,
- 0x1a11f404,
- 0x00066a7e,
- 0x227e0003,
- 0x884e0006,
- 0x005d7e13,
- 0x013cf000,
-/* 0x07ab: i2c_bitr_done */
- 0xf80131f4,
-/* 0x07ad: i2c_get_byte */
- 0x04000500,
-/* 0x07b1: i2c_get_byte_next */
- 0x0154b608,
+ 0x7e010300,
+ 0x4e000633,
+ 0x5d7e1388,
+ 0x01030000,
+ 0x00064d7e,
+ 0x7e13884e,
+ 0xf800005d,
+/* 0x0739: i2c_bitw */
+ 0x064d7e00,
+ 0x03e84e00,
+ 0x00005d7e,
0xb60076bb,
0x50f90465,
0xbb046594,
0x50bd0256,
0xfc0475fd,
- 0x07687e50,
+ 0x068f7e50,
0x0464b600,
- 0xfd2a11f4,
- 0x42b60553,
- 0xd81bf401,
- 0x76bb0103,
+ 0x4e1711f4,
+ 0x5d7e1388,
+ 0x00030000,
+ 0x0006337e,
+ 0x7e13884e,
+/* 0x0777: i2c_bitw_out */
+ 0xf800005d,
+/* 0x0779: i2c_bitr */
+ 0x7e010300,
+ 0x4e00064d,
+ 0x5d7e03e8,
+ 0x76bb0000,
0x0465b600,
0x659450f9,
0x0256bb04,
0x75fd50bd,
0x7e50fc04,
- 0xb6000728,
-/* 0x07fa: i2c_get_byte_done */
- 0x00f80464,
-/* 0x07fc: i2c_put_byte */
-/* 0x07fe: i2c_put_byte_next */
- 0x42b60804,
- 0x3854ff01,
- 0xb60076bb,
- 0x50f90465,
- 0xbb046594,
- 0x50bd0256,
- 0xfc0475fd,
- 0x07287e50,
- 0x0464b600,
- 0xb03411f4,
- 0x1bf40046,
- 0x0076bbd8,
+ 0xb600068f,
+ 0x11f40464,
+ 0x067b7e1a,
+ 0x7e000300,
+ 0x4e000633,
+ 0x5d7e1388,
+ 0x3cf00000,
+ 0x0131f401,
+/* 0x07bc: i2c_bitr_done */
+/* 0x07be: i2c_get_byte */
+ 0x000500f8,
+/* 0x07c2: i2c_get_byte_next */
+ 0x54b60804,
+ 0x0076bb01,
+ 0xf90465b6,
+ 0x04659450,
+ 0xbd0256bb,
+ 0x0475fd50,
+ 0x797e50fc,
+ 0x64b60007,
+ 0x2a11f404,
+ 0xb60553fd,
+ 0x1bf40142,
+ 0xbb0103d8,
+ 0x65b60076,
+ 0x9450f904,
+ 0x56bb0465,
+ 0xfd50bd02,
+ 0x50fc0475,
+ 0x0007397e,
+/* 0x080b: i2c_get_byte_done */
+ 0xf80464b6,
+/* 0x080d: i2c_put_byte */
+/* 0x080f: i2c_put_byte_next */
+ 0xb6080400,
+ 0x54ff0142,
+ 0x0076bb38,
0xf90465b6,
0x04659450,
0xbd0256bb,
0x0475fd50,
- 0x687e50fc,
+ 0x397e50fc,
0x64b60007,
- 0x0f11f404,
- 0xb00076bb,
- 0x1bf40136,
- 0x0132f406,
-/* 0x0854: i2c_put_byte_done */
-/* 0x0856: i2c_addr */
- 0x76bb00f8,
+ 0x3411f404,
+ 0xf40046b0,
+ 0x76bbd81b,
0x0465b600,
0x659450f9,
0x0256bb04,
0x75fd50bd,
0x7e50fc04,
- 0xb60006a1,
+ 0xb6000779,
0x11f40464,
- 0x2ec3e729,
- 0x0134b601,
- 0xbb0553fd,
+ 0x0076bb0f,
+ 0xf40136b0,
+ 0x32f4061b,
+/* 0x0865: i2c_put_byte_done */
+/* 0x0867: i2c_addr */
+ 0xbb00f801,
0x65b60076,
0x9450f904,
0x56bb0465,
0xfd50bd02,
0x50fc0475,
- 0x0007fc7e,
-/* 0x089b: i2c_addr_done */
- 0xf80464b6,
-/* 0x089d: i2c_acquire_addr */
- 0xf8cec700,
- 0xb705e4b6,
- 0xf8d014e0,
-/* 0x08a9: i2c_acquire */
- 0x089d7e00,
- 0x00047e00,
- 0x03d9f000,
- 0x00002e7e,
-/* 0x08ba: i2c_release */
- 0x9d7e00f8,
+ 0x0006b27e,
+ 0xf40464b6,
+ 0xc3e72911,
+ 0x34b6012e,
+ 0x0553fd01,
+ 0xb60076bb,
+ 0x50f90465,
+ 0xbb046594,
+ 0x50bd0256,
+ 0xfc0475fd,
+ 0x080d7e50,
+ 0x0464b600,
+/* 0x08ac: i2c_addr_done */
+/* 0x08ae: i2c_acquire_addr */
+ 0xcec700f8,
+ 0x05e4b6f8,
+ 0xd014e0b7,
+/* 0x08ba: i2c_acquire */
+ 0xae7e00f8,
0x047e0008,
- 0xdaf00000,
+ 0xd9f00000,
0x002e7e03,
-/* 0x08cb: i2c_recv */
- 0xf400f800,
- 0xc1c70132,
- 0x0214b6f8,
- 0xf52816b0,
- 0xb801371f,
- 0x000be813,
- 0xb8003298,
- 0x000bc013,
- 0xf4003198,
- 0xd0f90231,
- 0xd0f9e0f9,
- 0x000067f1,
- 0x100063f1,
- 0xbb016792,
+/* 0x08cb: i2c_release */
+ 0x7e00f800,
+ 0x7e0008ae,
+ 0xf0000004,
+ 0x2e7e03da,
+ 0x00f80000,
+/* 0x08dc: i2c_recv */
+ 0xc70132f4,
+ 0x14b6f8c1,
+ 0x2816b002,
+ 0x01371ff5,
+ 0x0cf413b8,
+ 0x00329800,
+ 0x0ccc13b8,
+ 0x00319800,
+ 0xf90231f4,
+ 0xf9e0f9d0,
+ 0x0067f1d0,
+ 0x0063f100,
+ 0x01679210,
+ 0xb60076bb,
+ 0x50f90465,
+ 0xbb046594,
+ 0x50bd0256,
+ 0xfc0475fd,
+ 0x08ba7e50,
+ 0x0464b600,
+ 0xd6b0d0fc,
+ 0xb01bf500,
+ 0xbb000500,
0x65b60076,
0x9450f904,
0x56bb0465,
0xfd50bd02,
0x50fc0475,
- 0x0008a97e,
- 0xfc0464b6,
- 0x00d6b0d0,
- 0x00b01bf5,
- 0x76bb0005,
+ 0x0008677e,
+ 0xf50464b6,
+ 0xc700cc11,
+ 0x76bbe0c5,
0x0465b600,
0x659450f9,
0x0256bb04,
0x75fd50bd,
0x7e50fc04,
- 0xb6000856,
+ 0xb600080d,
0x11f50464,
- 0xc5c700cc,
- 0x0076bbe0,
- 0xf90465b6,
- 0x04659450,
- 0xbd0256bb,
- 0x0475fd50,
- 0xfc7e50fc,
- 0x64b60007,
- 0xa911f504,
- 0xbb010500,
- 0x65b60076,
- 0x9450f904,
- 0x56bb0465,
- 0xfd50bd02,
- 0x50fc0475,
- 0x0008567e,
- 0xf50464b6,
- 0xbb008711,
- 0x65b60076,
- 0x9450f904,
- 0x56bb0465,
- 0xfd50bd02,
- 0x50fc0475,
- 0x0007ad7e,
- 0xf40464b6,
- 0x5bcb6711,
- 0x0076bbe0,
- 0xf90465b6,
- 0x04659450,
- 0xbd0256bb,
- 0x0475fd50,
- 0xf97e50fc,
- 0x64b60006,
- 0xbd5bb204,
- 0x410ef474,
-/* 0x09d0: i2c_recv_not_rd08 */
- 0xf401d6b0,
- 0x00053b1b,
- 0x0008567e,
- 0xc73211f4,
- 0xfc7ee0c5,
- 0x11f40007,
- 0x7e000528,
- 0xf4000856,
- 0xb5c71f11,
- 0x07fc7ee0,
- 0x1511f400,
- 0x0006f97e,
- 0xc5c774bd,
- 0x091bf408,
- 0xf40232f4,
-/* 0x0a0e: i2c_recv_not_wr08 */
-/* 0x0a0e: i2c_recv_done */
- 0xcec7030e,
- 0x08ba7ef8,
- 0xfce0fc00,
- 0x0912f4d0,
- 0xc27e7cb2,
-/* 0x0a22: i2c_recv_exit */
- 0x00f80002,
-/* 0x0a24: i2c_init */
-/* 0x0a26: test_recv */
- 0x584100f8,
- 0x0011cf04,
- 0x400110b6,
- 0x01f60458,
- 0xf104bd00,
- 0xf1d900e7,
- 0x7e134fe3,
- 0xf8000201,
-/* 0x0a45: test_init */
- 0x08004e00,
- 0x0002017e,
-/* 0x0a4e: idle_recv */
- 0x00f800f8,
-/* 0x0a50: idle */
- 0x410031f4,
- 0x11cf0454,
+ 0x010500a9,
+ 0xb60076bb,
+ 0x50f90465,
+ 0xbb046594,
+ 0x50bd0256,
+ 0xfc0475fd,
+ 0x08677e50,
+ 0x0464b600,
+ 0x008711f5,
+ 0xb60076bb,
+ 0x50f90465,
+ 0xbb046594,
+ 0x50bd0256,
+ 0xfc0475fd,
+ 0x07be7e50,
+ 0x0464b600,
+ 0xcb6711f4,
+ 0x76bbe05b,
+ 0x0465b600,
+ 0x659450f9,
+ 0x0256bb04,
+ 0x75fd50bd,
+ 0x7e50fc04,
+ 0xb600070a,
+ 0x5bb20464,
+ 0x0ef474bd,
+/* 0x09e1: i2c_recv_not_rd08 */
+ 0x01d6b041,
+ 0x053b1bf4,
+ 0x08677e00,
+ 0x3211f400,
+ 0x7ee0c5c7,
+ 0xf400080d,
+ 0x00052811,
+ 0x0008677e,
+ 0xc71f11f4,
+ 0x0d7ee0b5,
+ 0x11f40008,
+ 0x070a7e15,
+ 0xc774bd00,
+ 0x1bf408c5,
+ 0x0232f409,
+/* 0x0a1f: i2c_recv_not_wr08 */
+/* 0x0a1f: i2c_recv_done */
+ 0xc7030ef4,
+ 0xcb7ef8ce,
+ 0xe0fc0008,
+ 0x12f4d0fc,
+ 0x7e7cb209,
+/* 0x0a33: i2c_recv_exit */
+ 0xf80002c2,
+/* 0x0a35: i2c_init */
+/* 0x0a37: test_recv */
+ 0x4100f800,
+ 0x11cf0458,
0x0110b600,
- 0xf6045440,
+ 0xf6045840,
0x04bd0001,
-/* 0x0a64: idle_loop */
- 0x32f45801,
-/* 0x0a69: idle_proc */
-/* 0x0a69: idle_proc_exec */
- 0xb210f902,
- 0x02cb7e1e,
- 0xf410fc00,
- 0x31f40911,
- 0xf00ef402,
-/* 0x0a7c: idle_proc_next */
- 0xa65810b6,
- 0xe81bf41f,
- 0xf4e002f4,
- 0x0ef40028,
- 0x000000c6,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
+ 0xd900e7f1,
+ 0x134fe3f1,
+ 0x0002017e,
+/* 0x0a56: test_init */
+ 0x004e00f8,
+ 0x02017e08,
+/* 0x0a5f: idle_recv */
+ 0xf800f800,
+/* 0x0a61: idle */
+ 0x0031f400,
+ 0xcf045441,
+ 0x10b60011,
+ 0x04544001,
+ 0xbd0001f6,
+/* 0x0a75: idle_loop */
+ 0xf4580104,
+/* 0x0a7a: idle_proc */
+/* 0x0a7a: idle_proc_exec */
+ 0x10f90232,
+ 0xcb7e1eb2,
+ 0x10fc0002,
+ 0xf40911f4,
+ 0x0ef40231,
+/* 0x0a8d: idle_proc_next */
+ 0x5810b6f0,
+ 0x1bf41fa6,
+ 0xe002f4e8,
+ 0xf40028f4,
+ 0x0000c60e,
0x00000000,
0x00000000,
0x00000000,
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h
index 64e97baabc3..d1f9b6cb66d 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h
@@ -46,8 +46,8 @@ uint32_t nva3_pwr_data[] = {
0x00000000,
0x00000000,
0x584d454d,
- 0x000006e0,
- 0x000006d2,
+ 0x00000842,
+ 0x00000834,
0x00000000,
0x00000000,
0x00000000,
@@ -68,8 +68,8 @@ uint32_t nva3_pwr_data[] = {
0x00000000,
0x00000000,
0x46524550,
- 0x000006e4,
- 0x000006e2,
+ 0x00000846,
+ 0x00000844,
0x00000000,
0x00000000,
0x00000000,
@@ -90,8 +90,8 @@ uint32_t nva3_pwr_data[] = {
0x00000000,
0x00000000,
0x5f433249,
- 0x00000b14,
- 0x000009b7,
+ 0x00000c76,
+ 0x00000b19,
0x00000000,
0x00000000,
0x00000000,
@@ -112,8 +112,8 @@ uint32_t nva3_pwr_data[] = {
0x00000000,
0x00000000,
0x54534554,
- 0x00000b3d,
- 0x00000b16,
+ 0x00000c9f,
+ 0x00000c78,
0x00000000,
0x00000000,
0x00000000,
@@ -134,8 +134,8 @@ uint32_t nva3_pwr_data[] = {
0x00000000,
0x00000000,
0x454c4449,
- 0x00000b49,
- 0x00000b47,
+ 0x00000cab,
+ 0x00000ca9,
0x00000000,
0x00000000,
0x00000000,
@@ -246,13 +246,15 @@ uint32_t nva3_pwr_data[] = {
0x00010006,
0x00000000,
0x000005f8,
-/* 0x03b8: memx_func_tail */
-/* 0x03b8: memx_ts_start */
+ 0x00000007,
0x00000000,
-/* 0x03bc: memx_ts_end */
+ 0x0000067e,
+/* 0x03c4: memx_func_tail */
+/* 0x03c4: memx_ts_start */
0x00000000,
-/* 0x03c0: memx_data_head */
+/* 0x03c8: memx_ts_end */
0x00000000,
+/* 0x03cc: memx_data_head */
0x00000000,
0x00000000,
0x00000000,
@@ -764,8 +766,75 @@ uint32_t nva3_pwr_data[] = {
0x00000000,
0x00000000,
0x00000000,
-/* 0x0bc0: memx_data_tail */
-/* 0x0bc0: i2c_scl_map */
+ 0x00000000,
+/* 0x0bcc: memx_data_tail */
+/* 0x0bcc: memx_train_head */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+/* 0x0ccc: memx_train_tail */
+/* 0x0ccc: i2c_scl_map */
0x00001000,
0x00004000,
0x00010000,
@@ -776,7 +845,7 @@ uint32_t nva3_pwr_data[] = {
0x01000000,
0x04000000,
0x10000000,
-/* 0x0be8: i2c_sda_map */
+/* 0x0cf4: i2c_sda_map */
0x00002000,
0x00008000,
0x00020000,
@@ -787,7 +856,7 @@ uint32_t nva3_pwr_data[] = {
0x02000000,
0x08000000,
0x20000000,
-/* 0x0c10: i2c_ctrl */
+/* 0x0d1c: i2c_ctrl */
0x0000e138,
0x0000e150,
0x0000e168,
@@ -845,9 +914,6 @@ uint32_t nva3_pwr_data[] = {
0x00000000,
0x00000000,
0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
};
uint32_t nva3_pwr_code[] = {
@@ -1258,11 +1324,11 @@ uint32_t nva3_pwr_code[] = {
0x67f0f30b,
0x0664b62c,
0x800066cf,
- 0x00f8ee06,
+ 0x00f8f106,
/* 0x05a8: memx_func_leave */
0xb62c67f0,
0x66cf0664,
- 0xef068000,
+ 0xf2068000,
0xf10467f0,
0xb607e407,
0x06d00604,
@@ -1323,408 +1389,479 @@ uint32_t nva3_pwr_code[] = {
0x9800f8a4,
0x10b6001e,
0x7f21f404,
-/* 0x067e: memx_exec */
- 0xe0f900f8,
- 0xc1b9d0f9,
- 0x02b2b902,
-/* 0x0688: memx_exec_next */
- 0xb6001398,
- 0x34e70410,
- 0x33e701f0,
- 0x32b601e0,
- 0x0c30f001,
- 0xf9de3598,
- 0x0612b855,
- 0x98e41ef4,
- 0x0c98ee0b,
- 0x02cbbbef,
- 0x07c4b7f1,
- 0xcf06b4b6,
- 0xd0fc00bb,
- 0x21f5e0fc,
+/* 0x067e: memx_func_train */
+ 0x57f100f8,
+ 0x77f10003,
+ 0x97f10000,
+ 0x93f00000,
+ 0x029eb970,
+ 0xb90421f4,
+ 0xe7f102d8,
+ 0x21f42710,
+/* 0x069d: memx_func_train_loop_outer */
+ 0x0158e07f,
+ 0x0083f101,
+ 0xe097f102,
+ 0x1193f011,
+ 0x80f990f9,
+ 0xe0fcd0fc,
+ 0xf93f21f4,
+ 0x0067f150,
+/* 0x06bd: memx_func_train_loop_inner */
+ 0x1187f100,
+ 0x9068ff11,
+ 0xfd109894,
+ 0x97f10589,
+ 0x93f00720,
+ 0xf990f910,
+ 0xfcd0fc80,
+ 0x3f21f4e0,
+ 0x008097f1,
+ 0xb91093f0,
+ 0x21f4029e,
+ 0x02d8b904,
+ 0xf92088c5,
+ 0xfc80f990,
+ 0xf4e0fcd0,
+ 0x97f13f21,
+ 0x93f0053c,
+ 0x0287f110,
+ 0x0083f130,
+ 0xf990f980,
+ 0xfcd0fc80,
+ 0x3f21f4e0,
+ 0x0560e7f1,
+ 0xf110e3f0,
+ 0xf10000d7,
+ 0x908000d3,
+ 0xb7f100dc,
+ 0xb3f08480,
+ 0xa421f41e,
+ 0x000057f1,
+ 0xffff97f1,
+ 0x830093f1,
+/* 0x073c: memx_func_train_loop_4x */
+ 0x0080a7f1,
+ 0xb910a3f0,
+ 0x21f402ae,
+ 0x02d8b904,
+ 0xffdfb7f1,
+ 0xffffb3f1,
+ 0xf9048bfd,
+ 0xfc80f9a0,
+ 0xf4e0fcd0,
+ 0xa7f13f21,
+ 0xa3f0053c,
+ 0x0287f110,
+ 0x0083f130,
+ 0xf9a0f980,
+ 0xfcd0fc80,
+ 0x3f21f4e0,
+ 0x0560e7f1,
+ 0xf110e3f0,
+ 0xf10000d7,
+ 0xb98000d3,
+ 0xb7f102dc,
+ 0xb3f02710,
+ 0xa421f400,
+ 0xf402eeb9,
+ 0xddb90421,
+ 0x949dff02,
+ 0x700150b6,
+ 0x1ef40456,
+ 0xcc7aa092,
+ 0x00a9800b,
+ 0xb60160b6,
+ 0x66700470,
+ 0x001ef510,
+ 0xb650fcff,
+ 0x56700150,
+ 0xd41ef507,
+/* 0x07cf: memx_exec */
+ 0xf900f8fe,
+ 0xb9d0f9e0,
+ 0xb2b902c1,
+/* 0x07d9: memx_exec_next */
+ 0x00139802,
+ 0xe70410b6,
+ 0xe701f034,
+ 0xb601e033,
+ 0x30f00132,
+ 0xde35980c,
+ 0x12b855f9,
+ 0xe41ef406,
+ 0x98f10b98,
+ 0xcbbbf20c,
+ 0xc4b7f102,
+ 0x06b4b607,
+ 0xfc00bbcf,
+ 0xf5e0fcd0,
+ 0xf8034221,
+/* 0x0815: memx_info */
+ 0x01c67000,
+/* 0x081b: memx_info_data */
+ 0xf10e0bf4,
+ 0xf103ccc7,
+ 0xf40800b7,
+/* 0x0826: memx_info_train */
+ 0xc7f10b0e,
+ 0xb7f10bcc,
+/* 0x082e: memx_info_send */
+ 0x21f50100,
0x00f80342,
-/* 0x06c4: memx_info */
- 0x03c0c7f1,
- 0x0800b7f1,
- 0x034221f5,
-/* 0x06d2: memx_recv */
- 0xd6b000f8,
- 0xa90bf401,
- 0xf400d6b0,
- 0x00f8e90b,
-/* 0x06e0: memx_init */
-/* 0x06e2: perf_recv */
+/* 0x0834: memx_recv */
+ 0xf401d6b0,
+ 0xd6b0980b,
+ 0xd80bf400,
+/* 0x0842: memx_init */
+ 0x00f800f8,
+/* 0x0844: perf_recv */
+/* 0x0846: perf_init */
0x00f800f8,
-/* 0x06e4: perf_init */
-/* 0x06e6: i2c_drive_scl */
+/* 0x0848: i2c_drive_scl */
+ 0xf40036b0,
+ 0x07f1110b,
+ 0x04b607e0,
+ 0x0001d006,
+ 0x00f804bd,
+/* 0x085c: i2c_drive_scl_lo */
+ 0x07e407f1,
+ 0xd00604b6,
+ 0x04bd0001,
+/* 0x086a: i2c_drive_sda */
0x36b000f8,
0x110bf400,
0x07e007f1,
0xd00604b6,
- 0x04bd0001,
-/* 0x06fa: i2c_drive_scl_lo */
+ 0x04bd0002,
+/* 0x087e: i2c_drive_sda_lo */
0x07f100f8,
0x04b607e4,
- 0x0001d006,
- 0x00f804bd,
-/* 0x0708: i2c_drive_sda */
- 0xf40036b0,
- 0x07f1110b,
- 0x04b607e0,
0x0002d006,
0x00f804bd,
-/* 0x071c: i2c_drive_sda_lo */
- 0x07e407f1,
- 0xd00604b6,
- 0x04bd0002,
-/* 0x072a: i2c_sense_scl */
- 0x32f400f8,
- 0xc437f101,
- 0x0634b607,
- 0xfd0033cf,
- 0x0bf40431,
- 0x0131f406,
-/* 0x0740: i2c_sense_scl_done */
-/* 0x0742: i2c_sense_sda */
- 0x32f400f8,
- 0xc437f101,
- 0x0634b607,
- 0xfd0033cf,
- 0x0bf40432,
- 0x0131f406,
-/* 0x0758: i2c_sense_sda_done */
-/* 0x075a: i2c_raise_scl */
- 0x40f900f8,
- 0x089847f1,
- 0xf50137f0,
-/* 0x0767: i2c_raise_scl_wait */
- 0xf106e621,
- 0xf403e8e7,
- 0x21f57f21,
- 0x01f4072a,
- 0x0142b609,
-/* 0x077b: i2c_raise_scl_done */
- 0xfcef1bf4,
-/* 0x077f: i2c_start */
- 0xf500f840,
- 0xf4072a21,
- 0x21f50d11,
- 0x11f40742,
- 0x300ef406,
-/* 0x0790: i2c_start_rep */
- 0xf50037f0,
- 0xf006e621,
- 0x21f50137,
- 0x76bb0708,
- 0x0465b600,
- 0x659450f9,
- 0x0256bb04,
- 0x75fd50bd,
- 0xf550fc04,
- 0xb6075a21,
- 0x11f40464,
-/* 0x07bd: i2c_start_send */
- 0x0037f01f,
- 0x070821f5,
- 0x1388e7f1,
- 0xf07f21f4,
- 0x21f50037,
- 0xe7f106e6,
- 0x21f41388,
-/* 0x07d9: i2c_start_out */
-/* 0x07db: i2c_stop */
- 0xf000f87f,
- 0x21f50037,
- 0x37f006e6,
- 0x0821f500,
- 0xe8e7f107,
+/* 0x088c: i2c_sense_scl */
+ 0xf10132f4,
+ 0xb607c437,
+ 0x33cf0634,
+ 0x0431fd00,
+ 0xf4060bf4,
+/* 0x08a2: i2c_sense_scl_done */
+ 0x00f80131,
+/* 0x08a4: i2c_sense_sda */
+ 0xf10132f4,
+ 0xb607c437,
+ 0x33cf0634,
+ 0x0432fd00,
+ 0xf4060bf4,
+/* 0x08ba: i2c_sense_sda_done */
+ 0x00f80131,
+/* 0x08bc: i2c_raise_scl */
+ 0x47f140f9,
+ 0x37f00898,
+ 0x4821f501,
+/* 0x08c9: i2c_raise_scl_wait */
+ 0xe8e7f108,
0x7f21f403,
- 0xf50137f0,
- 0xf106e621,
- 0xf41388e7,
- 0x37f07f21,
- 0x0821f501,
- 0x88e7f107,
- 0x7f21f413,
-/* 0x080e: i2c_bitw */
- 0x21f500f8,
- 0xe7f10708,
- 0x21f403e8,
- 0x0076bb7f,
- 0xf90465b6,
- 0x04659450,
- 0xbd0256bb,
- 0x0475fd50,
- 0x21f550fc,
- 0x64b6075a,
- 0x1811f404,
- 0x1388e7f1,
- 0xf07f21f4,
+ 0x088c21f5,
+ 0xb60901f4,
+ 0x1bf40142,
+/* 0x08dd: i2c_raise_scl_done */
+ 0xf840fcef,
+/* 0x08e1: i2c_start */
+ 0x8c21f500,
+ 0x0d11f408,
+ 0x08a421f5,
+ 0xf40611f4,
+/* 0x08f2: i2c_start_rep */
+ 0x37f0300e,
+ 0x4821f500,
+ 0x0137f008,
+ 0x086a21f5,
+ 0xb60076bb,
+ 0x50f90465,
+ 0xbb046594,
+ 0x50bd0256,
+ 0xfc0475fd,
+ 0xbc21f550,
+ 0x0464b608,
+/* 0x091f: i2c_start_send */
+ 0xf01f11f4,
0x21f50037,
- 0xe7f106e6,
+ 0xe7f1086a,
0x21f41388,
-/* 0x084d: i2c_bitw_out */
-/* 0x084f: i2c_bitr */
- 0xf000f87f,
- 0x21f50137,
- 0xe7f10708,
- 0x21f403e8,
- 0x0076bb7f,
- 0xf90465b6,
- 0x04659450,
- 0xbd0256bb,
- 0x0475fd50,
- 0x21f550fc,
- 0x64b6075a,
- 0x1b11f404,
- 0x074221f5,
+ 0x0037f07f,
+ 0x084821f5,
+ 0x1388e7f1,
+/* 0x093b: i2c_start_out */
+ 0xf87f21f4,
+/* 0x093d: i2c_stop */
+ 0x0037f000,
+ 0x084821f5,
0xf50037f0,
- 0xf106e621,
+ 0xf1086a21,
+ 0xf403e8e7,
+ 0x37f07f21,
+ 0x4821f501,
+ 0x88e7f108,
+ 0x7f21f413,
+ 0xf50137f0,
+ 0xf1086a21,
0xf41388e7,
- 0x3cf07f21,
- 0x0131f401,
-/* 0x0894: i2c_bitr_done */
-/* 0x0896: i2c_get_byte */
- 0x57f000f8,
- 0x0847f000,
-/* 0x089c: i2c_get_byte_next */
- 0xbb0154b6,
+ 0x00f87f21,
+/* 0x0970: i2c_bitw */
+ 0x086a21f5,
+ 0x03e8e7f1,
+ 0xbb7f21f4,
0x65b60076,
0x9450f904,
0x56bb0465,
0xfd50bd02,
0x50fc0475,
- 0x084f21f5,
+ 0x08bc21f5,
0xf40464b6,
- 0x53fd2b11,
- 0x0142b605,
- 0xf0d81bf4,
- 0x76bb0137,
- 0x0465b600,
- 0x659450f9,
- 0x0256bb04,
- 0x75fd50bd,
- 0xf550fc04,
- 0xb6080e21,
-/* 0x08e6: i2c_get_byte_done */
- 0x00f80464,
-/* 0x08e8: i2c_put_byte */
-/* 0x08eb: i2c_put_byte_next */
- 0xb60847f0,
- 0x54ff0142,
- 0x0076bb38,
+ 0xe7f11811,
+ 0x21f41388,
+ 0x0037f07f,
+ 0x084821f5,
+ 0x1388e7f1,
+/* 0x09af: i2c_bitw_out */
+ 0xf87f21f4,
+/* 0x09b1: i2c_bitr */
+ 0x0137f000,
+ 0x086a21f5,
+ 0x03e8e7f1,
+ 0xbb7f21f4,
+ 0x65b60076,
+ 0x9450f904,
+ 0x56bb0465,
+ 0xfd50bd02,
+ 0x50fc0475,
+ 0x08bc21f5,
+ 0xf40464b6,
+ 0x21f51b11,
+ 0x37f008a4,
+ 0x4821f500,
+ 0x88e7f108,
+ 0x7f21f413,
+ 0xf4013cf0,
+/* 0x09f6: i2c_bitr_done */
+ 0x00f80131,
+/* 0x09f8: i2c_get_byte */
+ 0xf00057f0,
+/* 0x09fe: i2c_get_byte_next */
+ 0x54b60847,
+ 0x0076bb01,
0xf90465b6,
0x04659450,
0xbd0256bb,
0x0475fd50,
0x21f550fc,
- 0x64b6080e,
- 0x3411f404,
- 0xf40046b0,
- 0x76bbd81b,
- 0x0465b600,
- 0x659450f9,
- 0x0256bb04,
- 0x75fd50bd,
- 0xf550fc04,
- 0xb6084f21,
- 0x11f40464,
- 0x0076bb0f,
- 0xf40136b0,
- 0x32f4061b,
-/* 0x0941: i2c_put_byte_done */
-/* 0x0943: i2c_addr */
- 0xbb00f801,
+ 0x64b609b1,
+ 0x2b11f404,
+ 0xb60553fd,
+ 0x1bf40142,
+ 0x0137f0d8,
+ 0xb60076bb,
+ 0x50f90465,
+ 0xbb046594,
+ 0x50bd0256,
+ 0xfc0475fd,
+ 0x7021f550,
+ 0x0464b609,
+/* 0x0a48: i2c_get_byte_done */
+/* 0x0a4a: i2c_put_byte */
+ 0x47f000f8,
+/* 0x0a4d: i2c_put_byte_next */
+ 0x0142b608,
+ 0xbb3854ff,
0x65b60076,
0x9450f904,
0x56bb0465,
0xfd50bd02,
0x50fc0475,
- 0x077f21f5,
+ 0x097021f5,
0xf40464b6,
- 0xc3e72911,
- 0x34b6012e,
- 0x0553fd01,
+ 0x46b03411,
+ 0xd81bf400,
0xb60076bb,
0x50f90465,
0xbb046594,
0x50bd0256,
0xfc0475fd,
- 0xe821f550,
- 0x0464b608,
-/* 0x0988: i2c_addr_done */
-/* 0x098a: i2c_acquire_addr */
- 0xcec700f8,
- 0x02e4b6f8,
- 0x0c10e0b7,
- 0xf800ee98,
-/* 0x0999: i2c_acquire */
- 0x8a21f500,
- 0x0421f409,
- 0xf403d9f0,
- 0x00f83f21,
-/* 0x09a8: i2c_release */
- 0x098a21f5,
- 0xf00421f4,
- 0x21f403da,
-/* 0x09b7: i2c_recv */
- 0xf400f83f,
- 0xc1c70132,
- 0x0214b6f8,
- 0xf52816b0,
- 0xa0013a1f,
- 0x980be813,
- 0x13a00032,
- 0x31980bc0,
- 0x0231f400,
- 0xe0f9d0f9,
- 0x67f1d0f9,
- 0x63f10000,
- 0x67921000,
- 0x0076bb01,
- 0xf90465b6,
- 0x04659450,
- 0xbd0256bb,
- 0x0475fd50,
- 0x21f550fc,
- 0x64b60999,
- 0xb0d0fc04,
- 0x1bf500d6,
- 0x57f000b3,
+ 0xb121f550,
+ 0x0464b609,
+ 0xbb0f11f4,
+ 0x36b00076,
+ 0x061bf401,
+/* 0x0aa3: i2c_put_byte_done */
+ 0xf80132f4,
+/* 0x0aa5: i2c_addr */
0x0076bb00,
0xf90465b6,
0x04659450,
0xbd0256bb,
0x0475fd50,
0x21f550fc,
- 0x64b60943,
- 0xd011f504,
- 0xe0c5c700,
- 0xb60076bb,
- 0x50f90465,
- 0xbb046594,
- 0x50bd0256,
- 0xfc0475fd,
- 0xe821f550,
- 0x0464b608,
- 0x00ad11f5,
- 0xbb0157f0,
+ 0x64b608e1,
+ 0x2911f404,
+ 0x012ec3e7,
+ 0xfd0134b6,
+ 0x76bb0553,
+ 0x0465b600,
+ 0x659450f9,
+ 0x0256bb04,
+ 0x75fd50bd,
+ 0xf550fc04,
+ 0xb60a4a21,
+/* 0x0aea: i2c_addr_done */
+ 0x00f80464,
+/* 0x0aec: i2c_acquire_addr */
+ 0xb6f8cec7,
+ 0xe0b702e4,
+ 0xee980d1c,
+/* 0x0afb: i2c_acquire */
+ 0xf500f800,
+ 0xf40aec21,
+ 0xd9f00421,
+ 0x3f21f403,
+/* 0x0b0a: i2c_release */
+ 0x21f500f8,
+ 0x21f40aec,
+ 0x03daf004,
+ 0xf83f21f4,
+/* 0x0b19: i2c_recv */
+ 0x0132f400,
+ 0xb6f8c1c7,
+ 0x16b00214,
+ 0x3a1ff528,
+ 0xf413a001,
+ 0x0032980c,
+ 0x0ccc13a0,
+ 0xf4003198,
+ 0xd0f90231,
+ 0xd0f9e0f9,
+ 0x000067f1,
+ 0x100063f1,
+ 0xbb016792,
0x65b60076,
0x9450f904,
0x56bb0465,
0xfd50bd02,
0x50fc0475,
- 0x094321f5,
- 0xf50464b6,
- 0xbb008a11,
+ 0x0afb21f5,
+ 0xfc0464b6,
+ 0x00d6b0d0,
+ 0x00b31bf5,
+ 0xbb0057f0,
0x65b60076,
0x9450f904,
0x56bb0465,
0xfd50bd02,
0x50fc0475,
- 0x089621f5,
- 0xf40464b6,
- 0x5bcb6a11,
- 0x0076bbe0,
+ 0x0aa521f5,
+ 0xf50464b6,
+ 0xc700d011,
+ 0x76bbe0c5,
+ 0x0465b600,
+ 0x659450f9,
+ 0x0256bb04,
+ 0x75fd50bd,
+ 0xf550fc04,
+ 0xb60a4a21,
+ 0x11f50464,
+ 0x57f000ad,
+ 0x0076bb01,
0xf90465b6,
0x04659450,
0xbd0256bb,
0x0475fd50,
0x21f550fc,
- 0x64b607db,
- 0x025bb904,
- 0x0ef474bd,
-/* 0x0abd: i2c_recv_not_rd08 */
- 0x01d6b043,
- 0xf03d1bf4,
- 0x21f50057,
- 0x11f40943,
- 0xe0c5c733,
- 0x08e821f5,
- 0xf02911f4,
- 0x21f50057,
- 0x11f40943,
- 0xe0b5c71f,
- 0x08e821f5,
- 0xf51511f4,
- 0xbd07db21,
- 0x08c5c774,
- 0xf4091bf4,
- 0x0ef40232,
-/* 0x0afd: i2c_recv_not_wr08 */
-/* 0x0afd: i2c_recv_done */
- 0xf8cec703,
- 0x09a821f5,
- 0xd0fce0fc,
- 0xb90a12f4,
- 0x21f5027c,
-/* 0x0b12: i2c_recv_exit */
- 0x00f80342,
-/* 0x0b14: i2c_init */
-/* 0x0b16: test_recv */
- 0x17f100f8,
- 0x14b605d8,
- 0x0011cf06,
- 0xf10110b6,
- 0xb605d807,
- 0x01d00604,
- 0xf104bd00,
- 0xf1d900e7,
- 0xf5134fe3,
- 0xf8026221,
-/* 0x0b3d: test_init */
- 0x00e7f100,
- 0x6221f508,
-/* 0x0b47: idle_recv */
- 0xf800f802,
-/* 0x0b49: idle */
- 0x0031f400,
- 0x05d417f1,
+ 0x64b60aa5,
+ 0x8a11f504,
+ 0x0076bb00,
+ 0xf90465b6,
+ 0x04659450,
+ 0xbd0256bb,
+ 0x0475fd50,
+ 0x21f550fc,
+ 0x64b609f8,
+ 0x6a11f404,
+ 0xbbe05bcb,
+ 0x65b60076,
+ 0x9450f904,
+ 0x56bb0465,
+ 0xfd50bd02,
+ 0x50fc0475,
+ 0x093d21f5,
+ 0xb90464b6,
+ 0x74bd025b,
+/* 0x0c1f: i2c_recv_not_rd08 */
+ 0xb0430ef4,
+ 0x1bf401d6,
+ 0x0057f03d,
+ 0x0aa521f5,
+ 0xc73311f4,
+ 0x21f5e0c5,
+ 0x11f40a4a,
+ 0x0057f029,
+ 0x0aa521f5,
+ 0xc71f11f4,
+ 0x21f5e0b5,
+ 0x11f40a4a,
+ 0x3d21f515,
+ 0xc774bd09,
+ 0x1bf408c5,
+ 0x0232f409,
+/* 0x0c5f: i2c_recv_not_wr08 */
+/* 0x0c5f: i2c_recv_done */
+ 0xc7030ef4,
+ 0x21f5f8ce,
+ 0xe0fc0b0a,
+ 0x12f4d0fc,
+ 0x027cb90a,
+ 0x034221f5,
+/* 0x0c74: i2c_recv_exit */
+/* 0x0c76: i2c_init */
+ 0x00f800f8,
+/* 0x0c78: test_recv */
+ 0x05d817f1,
0xcf0614b6,
0x10b60011,
- 0xd407f101,
+ 0xd807f101,
0x0604b605,
0xbd0001d0,
-/* 0x0b65: idle_loop */
- 0x5817f004,
-/* 0x0b6b: idle_proc */
-/* 0x0b6b: idle_proc_exec */
- 0xf90232f4,
- 0x021eb910,
- 0x034b21f5,
- 0x11f410fc,
- 0x0231f409,
-/* 0x0b7f: idle_proc_next */
- 0xb6ef0ef4,
- 0x1fb85810,
- 0xe61bf406,
- 0xf4dd02f4,
- 0x0ef40028,
- 0x000000bb,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
+ 0x00e7f104,
+ 0x4fe3f1d9,
+ 0x6221f513,
+/* 0x0c9f: test_init */
+ 0xf100f802,
+ 0xf50800e7,
+ 0xf8026221,
+/* 0x0ca9: idle_recv */
+/* 0x0cab: idle */
+ 0xf400f800,
+ 0x17f10031,
+ 0x14b605d4,
+ 0x0011cf06,
+ 0xf10110b6,
+ 0xb605d407,
+ 0x01d00604,
+/* 0x0cc7: idle_loop */
+ 0xf004bd00,
+ 0x32f45817,
+/* 0x0ccd: idle_proc */
+/* 0x0ccd: idle_proc_exec */
+ 0xb910f902,
+ 0x21f5021e,
+ 0x10fc034b,
+ 0xf40911f4,
+ 0x0ef40231,
+/* 0x0ce1: idle_proc_next */
+ 0x5810b6ef,
+ 0xf4061fb8,
+ 0x02f4e61b,
+ 0x0028f4dd,
+ 0x00bb0ef4,
0x00000000,
0x00000000,
0x00000000,
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h
index ca30fa4011b..90221d973f8 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h
@@ -46,8 +46,8 @@ uint32_t nvc0_pwr_data[] = {
0x00000000,
0x00000000,
0x584d454d,
- 0x0000074b,
- 0x0000073d,
+ 0x0000075e,
+ 0x00000750,
0x00000000,
0x00000000,
0x00000000,
@@ -68,8 +68,8 @@ uint32_t nvc0_pwr_data[] = {
0x00000000,
0x00000000,
0x46524550,
- 0x0000074f,
- 0x0000074d,
+ 0x00000762,
+ 0x00000760,
0x00000000,
0x00000000,
0x00000000,
@@ -90,8 +90,8 @@ uint32_t nvc0_pwr_data[] = {
0x00000000,
0x00000000,
0x5f433249,
- 0x00000b7f,
- 0x00000a22,
+ 0x00000b92,
+ 0x00000a35,
0x00000000,
0x00000000,
0x00000000,
@@ -112,8 +112,8 @@ uint32_t nvc0_pwr_data[] = {
0x00000000,
0x00000000,
0x54534554,
- 0x00000ba8,
- 0x00000b81,
+ 0x00000bbb,
+ 0x00000b94,
0x00000000,
0x00000000,
0x00000000,
@@ -134,8 +134,8 @@ uint32_t nvc0_pwr_data[] = {
0x00000000,
0x00000000,
0x454c4449,
- 0x00000bb4,
- 0x00000bb2,
+ 0x00000bc7,
+ 0x00000bc5,
0x00000000,
0x00000000,
0x00000000,
@@ -246,13 +246,15 @@ uint32_t nvc0_pwr_data[] = {
0x00010006,
0x00000000,
0x00000663,
-/* 0x03b8: memx_func_tail */
-/* 0x03b8: memx_ts_start */
+ 0x00000007,
0x00000000,
-/* 0x03bc: memx_ts_end */
+ 0x000006e9,
+/* 0x03c4: memx_func_tail */
+/* 0x03c4: memx_ts_start */
0x00000000,
-/* 0x03c0: memx_data_head */
+/* 0x03c8: memx_ts_end */
0x00000000,
+/* 0x03cc: memx_data_head */
0x00000000,
0x00000000,
0x00000000,
@@ -764,8 +766,75 @@ uint32_t nvc0_pwr_data[] = {
0x00000000,
0x00000000,
0x00000000,
-/* 0x0bc0: memx_data_tail */
-/* 0x0bc0: i2c_scl_map */
+ 0x00000000,
+/* 0x0bcc: memx_data_tail */
+/* 0x0bcc: memx_train_head */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+/* 0x0ccc: memx_train_tail */
+/* 0x0ccc: i2c_scl_map */
0x00001000,
0x00004000,
0x00010000,
@@ -776,7 +845,7 @@ uint32_t nvc0_pwr_data[] = {
0x01000000,
0x04000000,
0x10000000,
-/* 0x0be8: i2c_sda_map */
+/* 0x0cf4: i2c_sda_map */
0x00002000,
0x00008000,
0x00020000,
@@ -787,7 +856,7 @@ uint32_t nvc0_pwr_data[] = {
0x02000000,
0x08000000,
0x20000000,
-/* 0x0c10: i2c_ctrl */
+/* 0x0d1c: i2c_ctrl */
0x0000e138,
0x0000e150,
0x0000e168,
@@ -845,9 +914,6 @@ uint32_t nvc0_pwr_data[] = {
0x00000000,
0x00000000,
0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
};
uint32_t nvc0_pwr_code[] = {
@@ -1272,10 +1338,10 @@ uint32_t nvc0_pwr_code[] = {
0xcf0664b6,
0x06800066,
/* 0x05db: memx_func_leave */
- 0xf000f8ee,
+ 0xf000f8f1,
0x64b62c67,
0x0066cf06,
- 0xf0ef0680,
+ 0xf0f20680,
0x07f10467,
0x04b607e4,
0x0006d006,
@@ -1350,382 +1416,450 @@ uint32_t nvc0_pwr_code[] = {
0x1e9800f8,
0x0410b600,
0xf87f21f4,
-/* 0x06e9: memx_exec */
- 0xf9e0f900,
- 0x02c1b9d0,
-/* 0x06f3: memx_exec_next */
- 0x9802b2b9,
- 0x10b60013,
- 0xf034e704,
- 0xe033e701,
- 0x0132b601,
- 0x980c30f0,
- 0x55f9de35,
- 0xf40612b8,
- 0x0b98e41e,
- 0xef0c98ee,
- 0xf102cbbb,
- 0xb607c4b7,
- 0xbbcf06b4,
- 0xfcd0fc00,
- 0x4221f5e0,
-/* 0x072f: memx_info */
- 0xf100f803,
- 0xf103c0c7,
- 0xf50800b7,
+/* 0x06e9: memx_func_train */
+/* 0x06eb: memx_exec */
+ 0xf900f800,
+ 0xb9d0f9e0,
+ 0xb2b902c1,
+/* 0x06f5: memx_exec_next */
+ 0x00139802,
+ 0xe70410b6,
+ 0xe701f034,
+ 0xb601e033,
+ 0x30f00132,
+ 0xde35980c,
+ 0x12b855f9,
+ 0xe41ef406,
+ 0x98f10b98,
+ 0xcbbbf20c,
+ 0xc4b7f102,
+ 0x06b4b607,
+ 0xfc00bbcf,
+ 0xf5e0fcd0,
0xf8034221,
-/* 0x073d: memx_recv */
- 0x01d6b000,
- 0xb0a90bf4,
- 0x0bf400d6,
-/* 0x074b: memx_init */
- 0xf800f8e9,
-/* 0x074d: perf_recv */
-/* 0x074f: perf_init */
- 0xf800f800,
-/* 0x0751: i2c_drive_scl */
- 0x0036b000,
- 0xf1110bf4,
- 0xb607e007,
- 0x01d00604,
- 0xf804bd00,
-/* 0x0765: i2c_drive_scl_lo */
- 0xe407f100,
- 0x0604b607,
- 0xbd0001d0,
-/* 0x0773: i2c_drive_sda */
- 0xb000f804,
- 0x0bf40036,
- 0xe007f111,
- 0x0604b607,
- 0xbd0002d0,
-/* 0x0787: i2c_drive_sda_lo */
- 0xf100f804,
- 0xb607e407,
- 0x02d00604,
- 0xf804bd00,
-/* 0x0795: i2c_sense_scl */
- 0x0132f400,
- 0x07c437f1,
- 0xcf0634b6,
- 0x31fd0033,
- 0x060bf404,
-/* 0x07ab: i2c_sense_scl_done */
- 0xf80131f4,
-/* 0x07ad: i2c_sense_sda */
- 0x0132f400,
- 0x07c437f1,
- 0xcf0634b6,
- 0x32fd0033,
- 0x060bf404,
-/* 0x07c3: i2c_sense_sda_done */
- 0xf80131f4,
-/* 0x07c5: i2c_raise_scl */
- 0xf140f900,
- 0xf0089847,
- 0x21f50137,
-/* 0x07d2: i2c_raise_scl_wait */
- 0xe7f10751,
- 0x21f403e8,
- 0x9521f57f,
- 0x0901f407,
- 0xf40142b6,
-/* 0x07e6: i2c_raise_scl_done */
- 0x40fcef1b,
-/* 0x07ea: i2c_start */
- 0x21f500f8,
- 0x11f40795,
- 0xad21f50d,
- 0x0611f407,
-/* 0x07fb: i2c_start_rep */
- 0xf0300ef4,
- 0x21f50037,
- 0x37f00751,
- 0x7321f501,
- 0x0076bb07,
- 0xf90465b6,
- 0x04659450,
- 0xbd0256bb,
- 0x0475fd50,
- 0x21f550fc,
- 0x64b607c5,
- 0x1f11f404,
-/* 0x0828: i2c_start_send */
- 0xf50037f0,
- 0xf1077321,
- 0xf41388e7,
- 0x37f07f21,
- 0x5121f500,
- 0x88e7f107,
- 0x7f21f413,
-/* 0x0844: i2c_start_out */
-/* 0x0846: i2c_stop */
- 0x37f000f8,
- 0x5121f500,
- 0x0037f007,
- 0x077321f5,
- 0x03e8e7f1,
- 0xf07f21f4,
- 0x21f50137,
- 0xe7f10751,
- 0x21f41388,
- 0x0137f07f,
- 0x077321f5,
- 0x1388e7f1,
- 0xf87f21f4,
-/* 0x0879: i2c_bitw */
- 0x7321f500,
+/* 0x0731: memx_info */
+ 0x01c67000,
+/* 0x0737: memx_info_data */
+ 0xf10e0bf4,
+ 0xf103ccc7,
+ 0xf40800b7,
+/* 0x0742: memx_info_train */
+ 0xc7f10b0e,
+ 0xb7f10bcc,
+/* 0x074a: memx_info_send */
+ 0x21f50100,
+ 0x00f80342,
+/* 0x0750: memx_recv */
+ 0xf401d6b0,
+ 0xd6b0980b,
+ 0xd80bf400,
+/* 0x075e: memx_init */
+ 0x00f800f8,
+/* 0x0760: perf_recv */
+/* 0x0762: perf_init */
+ 0x00f800f8,
+/* 0x0764: i2c_drive_scl */
+ 0xf40036b0,
+ 0x07f1110b,
+ 0x04b607e0,
+ 0x0001d006,
+ 0x00f804bd,
+/* 0x0778: i2c_drive_scl_lo */
+ 0x07e407f1,
+ 0xd00604b6,
+ 0x04bd0001,
+/* 0x0786: i2c_drive_sda */
+ 0x36b000f8,
+ 0x110bf400,
+ 0x07e007f1,
+ 0xd00604b6,
+ 0x04bd0002,
+/* 0x079a: i2c_drive_sda_lo */
+ 0x07f100f8,
+ 0x04b607e4,
+ 0x0002d006,
+ 0x00f804bd,
+/* 0x07a8: i2c_sense_scl */
+ 0xf10132f4,
+ 0xb607c437,
+ 0x33cf0634,
+ 0x0431fd00,
+ 0xf4060bf4,
+/* 0x07be: i2c_sense_scl_done */
+ 0x00f80131,
+/* 0x07c0: i2c_sense_sda */
+ 0xf10132f4,
+ 0xb607c437,
+ 0x33cf0634,
+ 0x0432fd00,
+ 0xf4060bf4,
+/* 0x07d6: i2c_sense_sda_done */
+ 0x00f80131,
+/* 0x07d8: i2c_raise_scl */
+ 0x47f140f9,
+ 0x37f00898,
+ 0x6421f501,
+/* 0x07e5: i2c_raise_scl_wait */
0xe8e7f107,
0x7f21f403,
+ 0x07a821f5,
+ 0xb60901f4,
+ 0x1bf40142,
+/* 0x07f9: i2c_raise_scl_done */
+ 0xf840fcef,
+/* 0x07fd: i2c_start */
+ 0xa821f500,
+ 0x0d11f407,
+ 0x07c021f5,
+ 0xf40611f4,
+/* 0x080e: i2c_start_rep */
+ 0x37f0300e,
+ 0x6421f500,
+ 0x0137f007,
+ 0x078621f5,
0xb60076bb,
0x50f90465,
0xbb046594,
0x50bd0256,
0xfc0475fd,
- 0xc521f550,
+ 0xd821f550,
0x0464b607,
- 0xf11811f4,
- 0xf41388e7,
+/* 0x083b: i2c_start_send */
+ 0xf01f11f4,
+ 0x21f50037,
+ 0xe7f10786,
+ 0x21f41388,
+ 0x0037f07f,
+ 0x076421f5,
+ 0x1388e7f1,
+/* 0x0857: i2c_start_out */
+ 0xf87f21f4,
+/* 0x0859: i2c_stop */
+ 0x0037f000,
+ 0x076421f5,
+ 0xf50037f0,
+ 0xf1078621,
+ 0xf403e8e7,
0x37f07f21,
- 0x5121f500,
+ 0x6421f501,
0x88e7f107,
0x7f21f413,
-/* 0x08b8: i2c_bitw_out */
-/* 0x08ba: i2c_bitr */
- 0x37f000f8,
- 0x7321f501,
- 0xe8e7f107,
- 0x7f21f403,
- 0xb60076bb,
- 0x50f90465,
- 0xbb046594,
- 0x50bd0256,
- 0xfc0475fd,
- 0xc521f550,
- 0x0464b607,
- 0xf51b11f4,
- 0xf007ad21,
- 0x21f50037,
- 0xe7f10751,
+ 0xf50137f0,
+ 0xf1078621,
+ 0xf41388e7,
+ 0x00f87f21,
+/* 0x088c: i2c_bitw */
+ 0x078621f5,
+ 0x03e8e7f1,
+ 0xbb7f21f4,
+ 0x65b60076,
+ 0x9450f904,
+ 0x56bb0465,
+ 0xfd50bd02,
+ 0x50fc0475,
+ 0x07d821f5,
+ 0xf40464b6,
+ 0xe7f11811,
0x21f41388,
- 0x013cf07f,
-/* 0x08ff: i2c_bitr_done */
- 0xf80131f4,
-/* 0x0901: i2c_get_byte */
- 0x0057f000,
-/* 0x0907: i2c_get_byte_next */
- 0xb60847f0,
- 0x76bb0154,
- 0x0465b600,
- 0x659450f9,
- 0x0256bb04,
- 0x75fd50bd,
- 0xf550fc04,
- 0xb608ba21,
- 0x11f40464,
- 0x0553fd2b,
- 0xf40142b6,
- 0x37f0d81b,
+ 0x0037f07f,
+ 0x076421f5,
+ 0x1388e7f1,
+/* 0x08cb: i2c_bitw_out */
+ 0xf87f21f4,
+/* 0x08cd: i2c_bitr */
+ 0x0137f000,
+ 0x078621f5,
+ 0x03e8e7f1,
+ 0xbb7f21f4,
+ 0x65b60076,
+ 0x9450f904,
+ 0x56bb0465,
+ 0xfd50bd02,
+ 0x50fc0475,
+ 0x07d821f5,
+ 0xf40464b6,
+ 0x21f51b11,
+ 0x37f007c0,
+ 0x6421f500,
+ 0x88e7f107,
+ 0x7f21f413,
+ 0xf4013cf0,
+/* 0x0912: i2c_bitr_done */
+ 0x00f80131,
+/* 0x0914: i2c_get_byte */
+ 0xf00057f0,
+/* 0x091a: i2c_get_byte_next */
+ 0x54b60847,
0x0076bb01,
0xf90465b6,
0x04659450,
0xbd0256bb,
0x0475fd50,
0x21f550fc,
- 0x64b60879,
-/* 0x0951: i2c_get_byte_done */
-/* 0x0953: i2c_put_byte */
- 0xf000f804,
-/* 0x0956: i2c_put_byte_next */
- 0x42b60847,
- 0x3854ff01,
+ 0x64b608cd,
+ 0x2b11f404,
+ 0xb60553fd,
+ 0x1bf40142,
+ 0x0137f0d8,
0xb60076bb,
0x50f90465,
0xbb046594,
0x50bd0256,
0xfc0475fd,
- 0x7921f550,
+ 0x8c21f550,
0x0464b608,
- 0xb03411f4,
- 0x1bf40046,
- 0x0076bbd8,
+/* 0x0964: i2c_get_byte_done */
+/* 0x0966: i2c_put_byte */
+ 0x47f000f8,
+/* 0x0969: i2c_put_byte_next */
+ 0x0142b608,
+ 0xbb3854ff,
+ 0x65b60076,
+ 0x9450f904,
+ 0x56bb0465,
+ 0xfd50bd02,
+ 0x50fc0475,
+ 0x088c21f5,
+ 0xf40464b6,
+ 0x46b03411,
+ 0xd81bf400,
+ 0xb60076bb,
+ 0x50f90465,
+ 0xbb046594,
+ 0x50bd0256,
+ 0xfc0475fd,
+ 0xcd21f550,
+ 0x0464b608,
+ 0xbb0f11f4,
+ 0x36b00076,
+ 0x061bf401,
+/* 0x09bf: i2c_put_byte_done */
+ 0xf80132f4,
+/* 0x09c1: i2c_addr */
+ 0x0076bb00,
0xf90465b6,
0x04659450,
0xbd0256bb,
0x0475fd50,
0x21f550fc,
- 0x64b608ba,
- 0x0f11f404,
- 0xb00076bb,
- 0x1bf40136,
- 0x0132f406,
-/* 0x09ac: i2c_put_byte_done */
-/* 0x09ae: i2c_addr */
- 0x76bb00f8,
+ 0x64b607fd,
+ 0x2911f404,
+ 0x012ec3e7,
+ 0xfd0134b6,
+ 0x76bb0553,
0x0465b600,
0x659450f9,
0x0256bb04,
0x75fd50bd,
0xf550fc04,
- 0xb607ea21,
- 0x11f40464,
- 0x2ec3e729,
- 0x0134b601,
- 0xbb0553fd,
+ 0xb6096621,
+/* 0x0a06: i2c_addr_done */
+ 0x00f80464,
+/* 0x0a08: i2c_acquire_addr */
+ 0xb6f8cec7,
+ 0xe0b702e4,
+ 0xee980d1c,
+/* 0x0a17: i2c_acquire */
+ 0xf500f800,
+ 0xf40a0821,
+ 0xd9f00421,
+ 0x3f21f403,
+/* 0x0a26: i2c_release */
+ 0x21f500f8,
+ 0x21f40a08,
+ 0x03daf004,
+ 0xf83f21f4,
+/* 0x0a35: i2c_recv */
+ 0x0132f400,
+ 0xb6f8c1c7,
+ 0x16b00214,
+ 0x3a1ff528,
+ 0xf413a001,
+ 0x0032980c,
+ 0x0ccc13a0,
+ 0xf4003198,
+ 0xd0f90231,
+ 0xd0f9e0f9,
+ 0x000067f1,
+ 0x100063f1,
+ 0xbb016792,
0x65b60076,
0x9450f904,
0x56bb0465,
0xfd50bd02,
0x50fc0475,
- 0x095321f5,
-/* 0x09f3: i2c_addr_done */
- 0xf80464b6,
-/* 0x09f5: i2c_acquire_addr */
- 0xf8cec700,
- 0xb702e4b6,
- 0x980c10e0,
- 0x00f800ee,
-/* 0x0a04: i2c_acquire */
- 0x09f521f5,
- 0xf00421f4,
- 0x21f403d9,
-/* 0x0a13: i2c_release */
- 0xf500f83f,
- 0xf409f521,
- 0xdaf00421,
- 0x3f21f403,
-/* 0x0a22: i2c_recv */
- 0x32f400f8,
- 0xf8c1c701,
- 0xb00214b6,
- 0x1ff52816,
- 0x13a0013a,
- 0x32980be8,
- 0xc013a000,
- 0x0031980b,
- 0xf90231f4,
- 0xf9e0f9d0,
- 0x0067f1d0,
- 0x0063f100,
- 0x01679210,
- 0xb60076bb,
- 0x50f90465,
- 0xbb046594,
- 0x50bd0256,
- 0xfc0475fd,
- 0x0421f550,
- 0x0464b60a,
- 0xd6b0d0fc,
- 0xb31bf500,
- 0x0057f000,
- 0xb60076bb,
- 0x50f90465,
- 0xbb046594,
- 0x50bd0256,
- 0xfc0475fd,
- 0xae21f550,
- 0x0464b609,
- 0x00d011f5,
- 0xbbe0c5c7,
+ 0x0a1721f5,
+ 0xfc0464b6,
+ 0x00d6b0d0,
+ 0x00b31bf5,
+ 0xbb0057f0,
0x65b60076,
0x9450f904,
0x56bb0465,
0xfd50bd02,
0x50fc0475,
- 0x095321f5,
+ 0x09c121f5,
0xf50464b6,
- 0xf000ad11,
- 0x76bb0157,
+ 0xc700d011,
+ 0x76bbe0c5,
0x0465b600,
0x659450f9,
0x0256bb04,
0x75fd50bd,
0xf550fc04,
- 0xb609ae21,
+ 0xb6096621,
0x11f50464,
- 0x76bb008a,
- 0x0465b600,
- 0x659450f9,
- 0x0256bb04,
- 0x75fd50bd,
- 0xf550fc04,
- 0xb6090121,
- 0x11f40464,
- 0xe05bcb6a,
- 0xb60076bb,
- 0x50f90465,
- 0xbb046594,
- 0x50bd0256,
- 0xfc0475fd,
- 0x4621f550,
- 0x0464b608,
- 0xbd025bb9,
- 0x430ef474,
-/* 0x0b28: i2c_recv_not_rd08 */
- 0xf401d6b0,
- 0x57f03d1b,
- 0xae21f500,
- 0x3311f409,
- 0xf5e0c5c7,
- 0xf4095321,
- 0x57f02911,
- 0xae21f500,
- 0x1f11f409,
- 0xf5e0b5c7,
- 0xf4095321,
- 0x21f51511,
- 0x74bd0846,
- 0xf408c5c7,
- 0x32f4091b,
- 0x030ef402,
-/* 0x0b68: i2c_recv_not_wr08 */
-/* 0x0b68: i2c_recv_done */
- 0xf5f8cec7,
- 0xfc0a1321,
- 0xf4d0fce0,
- 0x7cb90a12,
- 0x4221f502,
-/* 0x0b7d: i2c_recv_exit */
-/* 0x0b7f: i2c_init */
- 0xf800f803,
-/* 0x0b81: test_recv */
- 0xd817f100,
- 0x0614b605,
- 0xb60011cf,
- 0x07f10110,
- 0x04b605d8,
- 0x0001d006,
- 0xe7f104bd,
- 0xe3f1d900,
- 0x21f5134f,
- 0x00f80262,
-/* 0x0ba8: test_init */
- 0x0800e7f1,
- 0x026221f5,
-/* 0x0bb2: idle_recv */
+ 0x57f000ad,
+ 0x0076bb01,
+ 0xf90465b6,
+ 0x04659450,
+ 0xbd0256bb,
+ 0x0475fd50,
+ 0x21f550fc,
+ 0x64b609c1,
+ 0x8a11f504,
+ 0x0076bb00,
+ 0xf90465b6,
+ 0x04659450,
+ 0xbd0256bb,
+ 0x0475fd50,
+ 0x21f550fc,
+ 0x64b60914,
+ 0x6a11f404,
+ 0xbbe05bcb,
+ 0x65b60076,
+ 0x9450f904,
+ 0x56bb0465,
+ 0xfd50bd02,
+ 0x50fc0475,
+ 0x085921f5,
+ 0xb90464b6,
+ 0x74bd025b,
+/* 0x0b3b: i2c_recv_not_rd08 */
+ 0xb0430ef4,
+ 0x1bf401d6,
+ 0x0057f03d,
+ 0x09c121f5,
+ 0xc73311f4,
+ 0x21f5e0c5,
+ 0x11f40966,
+ 0x0057f029,
+ 0x09c121f5,
+ 0xc71f11f4,
+ 0x21f5e0b5,
+ 0x11f40966,
+ 0x5921f515,
+ 0xc774bd08,
+ 0x1bf408c5,
+ 0x0232f409,
+/* 0x0b7b: i2c_recv_not_wr08 */
+/* 0x0b7b: i2c_recv_done */
+ 0xc7030ef4,
+ 0x21f5f8ce,
+ 0xe0fc0a26,
+ 0x12f4d0fc,
+ 0x027cb90a,
+ 0x034221f5,
+/* 0x0b90: i2c_recv_exit */
+/* 0x0b92: i2c_init */
0x00f800f8,
-/* 0x0bb4: idle */
- 0xf10031f4,
- 0xb605d417,
- 0x11cf0614,
- 0x0110b600,
- 0x05d407f1,
- 0xd00604b6,
- 0x04bd0001,
-/* 0x0bd0: idle_loop */
- 0xf45817f0,
-/* 0x0bd6: idle_proc */
-/* 0x0bd6: idle_proc_exec */
- 0x10f90232,
- 0xf5021eb9,
- 0xfc034b21,
- 0x0911f410,
- 0xf40231f4,
-/* 0x0bea: idle_proc_next */
- 0x10b6ef0e,
- 0x061fb858,
- 0xf4e61bf4,
- 0x28f4dd02,
- 0xbb0ef400,
+/* 0x0b94: test_recv */
+ 0x05d817f1,
+ 0xcf0614b6,
+ 0x10b60011,
+ 0xd807f101,
+ 0x0604b605,
+ 0xbd0001d0,
+ 0x00e7f104,
+ 0x4fe3f1d9,
+ 0x6221f513,
+/* 0x0bbb: test_init */
+ 0xf100f802,
+ 0xf50800e7,
+ 0xf8026221,
+/* 0x0bc5: idle_recv */
+/* 0x0bc7: idle */
+ 0xf400f800,
+ 0x17f10031,
+ 0x14b605d4,
+ 0x0011cf06,
+ 0xf10110b6,
+ 0xb605d407,
+ 0x01d00604,
+/* 0x0be3: idle_loop */
+ 0xf004bd00,
+ 0x32f45817,
+/* 0x0be9: idle_proc */
+/* 0x0be9: idle_proc_exec */
+ 0xb910f902,
+ 0x21f5021e,
+ 0x10fc034b,
+ 0xf40911f4,
+ 0x0ef40231,
+/* 0x0bfd: idle_proc_next */
+ 0x5810b6ef,
+ 0xf4061fb8,
+ 0x02f4e61b,
+ 0x0028f4dd,
+ 0x00bb0ef4,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
0x00000000,
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h
index 12d86f72ad1..7e16aab44d8 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h
@@ -46,8 +46,8 @@ uint32_t nvd0_pwr_data[] = {
0x00000000,
0x00000000,
0x584d454d,
- 0x00000678,
- 0x0000066a,
+ 0x0000068b,
+ 0x0000067d,
0x00000000,
0x00000000,
0x00000000,
@@ -68,8 +68,8 @@ uint32_t nvd0_pwr_data[] = {
0x00000000,
0x00000000,
0x46524550,
- 0x0000067c,
- 0x0000067a,
+ 0x0000068f,
+ 0x0000068d,
0x00000000,
0x00000000,
0x00000000,
@@ -90,8 +90,8 @@ uint32_t nvd0_pwr_data[] = {
0x00000000,
0x00000000,
0x5f433249,
- 0x00000a97,
- 0x0000093a,
+ 0x00000aaa,
+ 0x0000094d,
0x00000000,
0x00000000,
0x00000000,
@@ -112,8 +112,8 @@ uint32_t nvd0_pwr_data[] = {
0x00000000,
0x00000000,
0x54534554,
- 0x00000aba,
- 0x00000a99,
+ 0x00000acd,
+ 0x00000aac,
0x00000000,
0x00000000,
0x00000000,
@@ -134,8 +134,8 @@ uint32_t nvd0_pwr_data[] = {
0x00000000,
0x00000000,
0x454c4449,
- 0x00000ac6,
- 0x00000ac4,
+ 0x00000ad9,
+ 0x00000ad7,
0x00000000,
0x00000000,
0x00000000,
@@ -246,13 +246,15 @@ uint32_t nvd0_pwr_data[] = {
0x00010006,
0x00000000,
0x000005d3,
-/* 0x03b8: memx_func_tail */
-/* 0x03b8: memx_ts_start */
+ 0x00000007,
0x00000000,
-/* 0x03bc: memx_ts_end */
+ 0x00000619,
+/* 0x03c4: memx_func_tail */
+/* 0x03c4: memx_ts_start */
0x00000000,
-/* 0x03c0: memx_data_head */
+/* 0x03c8: memx_ts_end */
0x00000000,
+/* 0x03cc: memx_data_head */
0x00000000,
0x00000000,
0x00000000,
@@ -764,8 +766,75 @@ uint32_t nvd0_pwr_data[] = {
0x00000000,
0x00000000,
0x00000000,
-/* 0x0bc0: memx_data_tail */
-/* 0x0bc0: i2c_scl_map */
+ 0x00000000,
+/* 0x0bcc: memx_data_tail */
+/* 0x0bcc: memx_train_head */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+/* 0x0ccc: memx_train_tail */
+/* 0x0ccc: i2c_scl_map */
0x00000400,
0x00000800,
0x00001000,
@@ -776,7 +845,7 @@ uint32_t nvd0_pwr_data[] = {
0x00020000,
0x00040000,
0x00080000,
-/* 0x0be8: i2c_sda_map */
+/* 0x0cf4: i2c_sda_map */
0x00100000,
0x00200000,
0x00400000,
@@ -844,9 +913,6 @@ uint32_t nvd0_pwr_data[] = {
0x00000000,
0x00000000,
0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
};
uint32_t nvd0_pwr_code[] = {
@@ -1236,11 +1302,11 @@ uint32_t nvd0_pwr_code[] = {
0x0bf40464,
0x2c67f0f6,
0x800066cf,
- 0x00f8ee06,
+ 0x00f8f106,
/* 0x0554: memx_func_leave */
0xcf2c67f0,
0x06800066,
- 0x0467f0ef,
+ 0x0467f0f2,
0x07e407f1,
0xbd0006d0,
/* 0x0569: memx_func_leave_wait */
@@ -1292,379 +1358,383 @@ uint32_t nvd0_pwr_code[] = {
0x1e9800f8,
0x0410b600,
0xf86721f4,
-/* 0x0619: memx_exec */
- 0xf9e0f900,
- 0x02c1b9d0,
-/* 0x0623: memx_exec_next */
- 0x9802b2b9,
- 0x10b60013,
- 0xf034e704,
- 0xe033e701,
- 0x0132b601,
- 0x980c30f0,
- 0x55f9de35,
- 0xf40612b8,
- 0x0b98e41e,
- 0xef0c98ee,
- 0xf102cbbb,
- 0xcf07c4b7,
- 0xd0fc00bb,
- 0x21f5e0fc,
- 0x00f802f1,
-/* 0x065c: memx_info */
- 0x03c0c7f1,
- 0x0800b7f1,
+/* 0x0619: memx_func_train */
+/* 0x061b: memx_exec */
+ 0xf900f800,
+ 0xb9d0f9e0,
+ 0xb2b902c1,
+/* 0x0625: memx_exec_next */
+ 0x00139802,
+ 0xe70410b6,
+ 0xe701f034,
+ 0xb601e033,
+ 0x30f00132,
+ 0xde35980c,
+ 0x12b855f9,
+ 0xe41ef406,
+ 0x98f10b98,
+ 0xcbbbf20c,
+ 0xc4b7f102,
+ 0x00bbcf07,
+ 0xe0fcd0fc,
0x02f121f5,
-/* 0x066a: memx_recv */
- 0xd6b000f8,
- 0xac0bf401,
- 0xf400d6b0,
- 0x00f8e90b,
-/* 0x0678: memx_init */
-/* 0x067a: perf_recv */
- 0x00f800f8,
-/* 0x067c: perf_init */
-/* 0x067e: i2c_drive_scl */
- 0x36b000f8,
- 0x0e0bf400,
- 0x07e007f1,
- 0xbd0001d0,
-/* 0x068f: i2c_drive_scl_lo */
- 0xf100f804,
- 0xd007e407,
+/* 0x065e: memx_info */
+ 0xc67000f8,
+ 0x0e0bf401,
+/* 0x0664: memx_info_data */
+ 0x03ccc7f1,
+ 0x0800b7f1,
+/* 0x066f: memx_info_train */
+ 0xf10b0ef4,
+ 0xf10bccc7,
+/* 0x0677: memx_info_send */
+ 0xf50100b7,
+ 0xf802f121,
+/* 0x067d: memx_recv */
+ 0x01d6b000,
+ 0xb09b0bf4,
+ 0x0bf400d6,
+/* 0x068b: memx_init */
+ 0xf800f8d8,
+/* 0x068d: perf_recv */
+/* 0x068f: perf_init */
+ 0xf800f800,
+/* 0x0691: i2c_drive_scl */
+ 0x0036b000,
+ 0xf10e0bf4,
+ 0xd007e007,
0x04bd0001,
-/* 0x069a: i2c_drive_sda */
- 0x36b000f8,
- 0x0e0bf400,
- 0x07e007f1,
- 0xbd0002d0,
-/* 0x06ab: i2c_drive_sda_lo */
- 0xf100f804,
- 0xd007e407,
+/* 0x06a2: i2c_drive_scl_lo */
+ 0x07f100f8,
+ 0x01d007e4,
+ 0xf804bd00,
+/* 0x06ad: i2c_drive_sda */
+ 0x0036b000,
+ 0xf10e0bf4,
+ 0xd007e007,
0x04bd0002,
-/* 0x06b6: i2c_sense_scl */
+/* 0x06be: i2c_drive_sda_lo */
+ 0x07f100f8,
+ 0x02d007e4,
+ 0xf804bd00,
+/* 0x06c9: i2c_sense_scl */
+ 0x0132f400,
+ 0x07c437f1,
+ 0xfd0033cf,
+ 0x0bf40431,
+ 0x0131f406,
+/* 0x06dc: i2c_sense_scl_done */
+/* 0x06de: i2c_sense_sda */
0x32f400f8,
0xc437f101,
0x0033cf07,
- 0xf40431fd,
+ 0xf40432fd,
0x31f4060b,
-/* 0x06c9: i2c_sense_scl_done */
-/* 0x06cb: i2c_sense_sda */
- 0xf400f801,
- 0x37f10132,
- 0x33cf07c4,
- 0x0432fd00,
- 0xf4060bf4,
-/* 0x06de: i2c_sense_sda_done */
- 0x00f80131,
-/* 0x06e0: i2c_raise_scl */
- 0x47f140f9,
- 0x37f00898,
- 0x7e21f501,
-/* 0x06ed: i2c_raise_scl_wait */
- 0xe8e7f106,
- 0x6721f403,
- 0x06b621f5,
- 0xb60901f4,
- 0x1bf40142,
-/* 0x0701: i2c_raise_scl_done */
- 0xf840fcef,
-/* 0x0705: i2c_start */
- 0xb621f500,
- 0x0d11f406,
- 0x06cb21f5,
- 0xf40611f4,
-/* 0x0716: i2c_start_rep */
- 0x37f0300e,
- 0x7e21f500,
- 0x0137f006,
- 0x069a21f5,
- 0xb60076bb,
- 0x50f90465,
- 0xbb046594,
- 0x50bd0256,
- 0xfc0475fd,
- 0xe021f550,
- 0x0464b606,
-/* 0x0743: i2c_start_send */
- 0xf01f11f4,
- 0x21f50037,
- 0xe7f1069a,
- 0x21f41388,
- 0x0037f067,
- 0x067e21f5,
- 0x1388e7f1,
-/* 0x075f: i2c_start_out */
- 0xf86721f4,
-/* 0x0761: i2c_stop */
- 0x0037f000,
- 0x067e21f5,
- 0xf50037f0,
- 0xf1069a21,
- 0xf403e8e7,
- 0x37f06721,
- 0x7e21f501,
- 0x88e7f106,
- 0x6721f413,
- 0xf50137f0,
- 0xf1069a21,
- 0xf41388e7,
- 0x00f86721,
-/* 0x0794: i2c_bitw */
- 0x069a21f5,
+/* 0x06f1: i2c_sense_sda_done */
+/* 0x06f3: i2c_raise_scl */
+ 0xf900f801,
+ 0x9847f140,
+ 0x0137f008,
+ 0x069121f5,
+/* 0x0700: i2c_raise_scl_wait */
0x03e8e7f1,
- 0xbb6721f4,
- 0x65b60076,
- 0x9450f904,
- 0x56bb0465,
- 0xfd50bd02,
- 0x50fc0475,
- 0x06e021f5,
- 0xf40464b6,
- 0xe7f11811,
- 0x21f41388,
- 0x0037f067,
- 0x067e21f5,
- 0x1388e7f1,
-/* 0x07d3: i2c_bitw_out */
- 0xf86721f4,
-/* 0x07d5: i2c_bitr */
- 0x0137f000,
- 0x069a21f5,
- 0x03e8e7f1,
- 0xbb6721f4,
+ 0xf56721f4,
+ 0xf406c921,
+ 0x42b60901,
+ 0xef1bf401,
+/* 0x0714: i2c_raise_scl_done */
+ 0x00f840fc,
+/* 0x0718: i2c_start */
+ 0x06c921f5,
+ 0xf50d11f4,
+ 0xf406de21,
+ 0x0ef40611,
+/* 0x0729: i2c_start_rep */
+ 0x0037f030,
+ 0x069121f5,
+ 0xf50137f0,
+ 0xbb06ad21,
0x65b60076,
0x9450f904,
0x56bb0465,
0xfd50bd02,
0x50fc0475,
- 0x06e021f5,
+ 0x06f321f5,
0xf40464b6,
- 0x21f51b11,
- 0x37f006cb,
- 0x7e21f500,
+/* 0x0756: i2c_start_send */
+ 0x37f01f11,
+ 0xad21f500,
0x88e7f106,
0x6721f413,
- 0xf4013cf0,
-/* 0x081a: i2c_bitr_done */
- 0x00f80131,
-/* 0x081c: i2c_get_byte */
- 0xf00057f0,
-/* 0x0822: i2c_get_byte_next */
- 0x54b60847,
- 0x0076bb01,
- 0xf90465b6,
- 0x04659450,
- 0xbd0256bb,
- 0x0475fd50,
- 0x21f550fc,
- 0x64b607d5,
- 0x2b11f404,
- 0xb60553fd,
- 0x1bf40142,
- 0x0137f0d8,
+ 0xf50037f0,
+ 0xf1069121,
+ 0xf41388e7,
+/* 0x0772: i2c_start_out */
+ 0x00f86721,
+/* 0x0774: i2c_stop */
+ 0xf50037f0,
+ 0xf0069121,
+ 0x21f50037,
+ 0xe7f106ad,
+ 0x21f403e8,
+ 0x0137f067,
+ 0x069121f5,
+ 0x1388e7f1,
+ 0xf06721f4,
+ 0x21f50137,
+ 0xe7f106ad,
+ 0x21f41388,
+/* 0x07a7: i2c_bitw */
+ 0xf500f867,
+ 0xf106ad21,
+ 0xf403e8e7,
+ 0x76bb6721,
+ 0x0465b600,
+ 0x659450f9,
+ 0x0256bb04,
+ 0x75fd50bd,
+ 0xf550fc04,
+ 0xb606f321,
+ 0x11f40464,
+ 0x88e7f118,
+ 0x6721f413,
+ 0xf50037f0,
+ 0xf1069121,
+ 0xf41388e7,
+/* 0x07e6: i2c_bitw_out */
+ 0x00f86721,
+/* 0x07e8: i2c_bitr */
+ 0xf50137f0,
+ 0xf106ad21,
+ 0xf403e8e7,
+ 0x76bb6721,
+ 0x0465b600,
+ 0x659450f9,
+ 0x0256bb04,
+ 0x75fd50bd,
+ 0xf550fc04,
+ 0xb606f321,
+ 0x11f40464,
+ 0xde21f51b,
+ 0x0037f006,
+ 0x069121f5,
+ 0x1388e7f1,
+ 0xf06721f4,
+ 0x31f4013c,
+/* 0x082d: i2c_bitr_done */
+/* 0x082f: i2c_get_byte */
+ 0xf000f801,
+ 0x47f00057,
+/* 0x0835: i2c_get_byte_next */
+ 0x0154b608,
0xb60076bb,
0x50f90465,
0xbb046594,
0x50bd0256,
0xfc0475fd,
- 0x9421f550,
+ 0xe821f550,
0x0464b607,
-/* 0x086c: i2c_get_byte_done */
-/* 0x086e: i2c_put_byte */
- 0x47f000f8,
-/* 0x0871: i2c_put_byte_next */
- 0x0142b608,
- 0xbb3854ff,
+ 0xfd2b11f4,
+ 0x42b60553,
+ 0xd81bf401,
+ 0xbb0137f0,
+ 0x65b60076,
+ 0x9450f904,
+ 0x56bb0465,
+ 0xfd50bd02,
+ 0x50fc0475,
+ 0x07a721f5,
+/* 0x087f: i2c_get_byte_done */
+ 0xf80464b6,
+/* 0x0881: i2c_put_byte */
+ 0x0847f000,
+/* 0x0884: i2c_put_byte_next */
+ 0xff0142b6,
+ 0x76bb3854,
+ 0x0465b600,
+ 0x659450f9,
+ 0x0256bb04,
+ 0x75fd50bd,
+ 0xf550fc04,
+ 0xb607a721,
+ 0x11f40464,
+ 0x0046b034,
+ 0xbbd81bf4,
0x65b60076,
0x9450f904,
0x56bb0465,
0xfd50bd02,
0x50fc0475,
- 0x079421f5,
+ 0x07e821f5,
0xf40464b6,
- 0x46b03411,
- 0xd81bf400,
+ 0x76bb0f11,
+ 0x0136b000,
+ 0xf4061bf4,
+/* 0x08da: i2c_put_byte_done */
+ 0x00f80132,
+/* 0x08dc: i2c_addr */
0xb60076bb,
0x50f90465,
0xbb046594,
0x50bd0256,
0xfc0475fd,
- 0xd521f550,
+ 0x1821f550,
0x0464b607,
- 0xbb0f11f4,
- 0x36b00076,
- 0x061bf401,
-/* 0x08c7: i2c_put_byte_done */
- 0xf80132f4,
-/* 0x08c9: i2c_addr */
- 0x0076bb00,
+ 0xe72911f4,
+ 0xb6012ec3,
+ 0x53fd0134,
+ 0x0076bb05,
0xf90465b6,
0x04659450,
0xbd0256bb,
0x0475fd50,
0x21f550fc,
- 0x64b60705,
- 0x2911f404,
- 0x012ec3e7,
- 0xfd0134b6,
- 0x76bb0553,
- 0x0465b600,
- 0x659450f9,
- 0x0256bb04,
- 0x75fd50bd,
- 0xf550fc04,
- 0xb6086e21,
-/* 0x090e: i2c_addr_done */
- 0x00f80464,
-/* 0x0910: i2c_acquire_addr */
- 0xb6f8cec7,
- 0xe0b705e4,
- 0x00f8d014,
-/* 0x091c: i2c_acquire */
- 0x091021f5,
- 0xf00421f4,
- 0x21f403d9,
-/* 0x092b: i2c_release */
- 0xf500f833,
- 0xf4091021,
- 0xdaf00421,
+ 0x64b60881,
+/* 0x0921: i2c_addr_done */
+/* 0x0923: i2c_acquire_addr */
+ 0xc700f804,
+ 0xe4b6f8ce,
+ 0x14e0b705,
+/* 0x092f: i2c_acquire */
+ 0xf500f8d0,
+ 0xf4092321,
+ 0xd9f00421,
0x3321f403,
-/* 0x093a: i2c_recv */
- 0x32f400f8,
- 0xf8c1c701,
- 0xb00214b6,
- 0x1ff52816,
- 0x13a0013a,
- 0x32980be8,
- 0xc013a000,
- 0x0031980b,
- 0xf90231f4,
- 0xf9e0f9d0,
- 0x0067f1d0,
- 0x0063f100,
- 0x01679210,
- 0xb60076bb,
- 0x50f90465,
- 0xbb046594,
- 0x50bd0256,
- 0xfc0475fd,
- 0x1c21f550,
- 0x0464b609,
- 0xd6b0d0fc,
- 0xb31bf500,
- 0x0057f000,
- 0xb60076bb,
- 0x50f90465,
- 0xbb046594,
- 0x50bd0256,
- 0xfc0475fd,
- 0xc921f550,
- 0x0464b608,
- 0x00d011f5,
- 0xbbe0c5c7,
+/* 0x093e: i2c_release */
+ 0x21f500f8,
+ 0x21f40923,
+ 0x03daf004,
+ 0xf83321f4,
+/* 0x094d: i2c_recv */
+ 0x0132f400,
+ 0xb6f8c1c7,
+ 0x16b00214,
+ 0x3a1ff528,
+ 0xf413a001,
+ 0x0032980c,
+ 0x0ccc13a0,
+ 0xf4003198,
+ 0xd0f90231,
+ 0xd0f9e0f9,
+ 0x000067f1,
+ 0x100063f1,
+ 0xbb016792,
0x65b60076,
0x9450f904,
0x56bb0465,
0xfd50bd02,
0x50fc0475,
- 0x086e21f5,
+ 0x092f21f5,
+ 0xfc0464b6,
+ 0x00d6b0d0,
+ 0x00b31bf5,
+ 0xbb0057f0,
+ 0x65b60076,
+ 0x9450f904,
+ 0x56bb0465,
+ 0xfd50bd02,
+ 0x50fc0475,
+ 0x08dc21f5,
0xf50464b6,
- 0xf000ad11,
- 0x76bb0157,
+ 0xc700d011,
+ 0x76bbe0c5,
0x0465b600,
0x659450f9,
0x0256bb04,
0x75fd50bd,
0xf550fc04,
- 0xb608c921,
+ 0xb6088121,
0x11f50464,
- 0x76bb008a,
- 0x0465b600,
- 0x659450f9,
- 0x0256bb04,
- 0x75fd50bd,
- 0xf550fc04,
- 0xb6081c21,
- 0x11f40464,
- 0xe05bcb6a,
- 0xb60076bb,
- 0x50f90465,
- 0xbb046594,
- 0x50bd0256,
- 0xfc0475fd,
- 0x6121f550,
- 0x0464b607,
- 0xbd025bb9,
- 0x430ef474,
-/* 0x0a40: i2c_recv_not_rd08 */
- 0xf401d6b0,
- 0x57f03d1b,
- 0xc921f500,
- 0x3311f408,
- 0xf5e0c5c7,
- 0xf4086e21,
- 0x57f02911,
- 0xc921f500,
- 0x1f11f408,
- 0xf5e0b5c7,
- 0xf4086e21,
- 0x21f51511,
- 0x74bd0761,
- 0xf408c5c7,
- 0x32f4091b,
- 0x030ef402,
-/* 0x0a80: i2c_recv_not_wr08 */
-/* 0x0a80: i2c_recv_done */
- 0xf5f8cec7,
- 0xfc092b21,
- 0xf4d0fce0,
- 0x7cb90a12,
- 0xf121f502,
-/* 0x0a95: i2c_recv_exit */
-/* 0x0a97: i2c_init */
+ 0x57f000ad,
+ 0x0076bb01,
+ 0xf90465b6,
+ 0x04659450,
+ 0xbd0256bb,
+ 0x0475fd50,
+ 0x21f550fc,
+ 0x64b608dc,
+ 0x8a11f504,
+ 0x0076bb00,
+ 0xf90465b6,
+ 0x04659450,
+ 0xbd0256bb,
+ 0x0475fd50,
+ 0x21f550fc,
+ 0x64b6082f,
+ 0x6a11f404,
+ 0xbbe05bcb,
+ 0x65b60076,
+ 0x9450f904,
+ 0x56bb0465,
+ 0xfd50bd02,
+ 0x50fc0475,
+ 0x077421f5,
+ 0xb90464b6,
+ 0x74bd025b,
+/* 0x0a53: i2c_recv_not_rd08 */
+ 0xb0430ef4,
+ 0x1bf401d6,
+ 0x0057f03d,
+ 0x08dc21f5,
+ 0xc73311f4,
+ 0x21f5e0c5,
+ 0x11f40881,
+ 0x0057f029,
+ 0x08dc21f5,
+ 0xc71f11f4,
+ 0x21f5e0b5,
+ 0x11f40881,
+ 0x7421f515,
+ 0xc774bd07,
+ 0x1bf408c5,
+ 0x0232f409,
+/* 0x0a93: i2c_recv_not_wr08 */
+/* 0x0a93: i2c_recv_done */
+ 0xc7030ef4,
+ 0x21f5f8ce,
+ 0xe0fc093e,
+ 0x12f4d0fc,
+ 0x027cb90a,
+ 0x02f121f5,
+/* 0x0aa8: i2c_recv_exit */
+/* 0x0aaa: i2c_init */
+ 0x00f800f8,
+/* 0x0aac: test_recv */
+ 0x05d817f1,
+ 0xb60011cf,
+ 0x07f10110,
+ 0x01d005d8,
+ 0xf104bd00,
+ 0xf1d900e7,
+ 0xf5134fe3,
+ 0xf8022321,
+/* 0x0acd: test_init */
+ 0x00e7f100,
+ 0x2321f508,
+/* 0x0ad7: idle_recv */
0xf800f802,
-/* 0x0a99: test_recv */
- 0xd817f100,
- 0x0011cf05,
- 0xf10110b6,
- 0xd005d807,
- 0x04bd0001,
- 0xd900e7f1,
- 0x134fe3f1,
- 0x022321f5,
-/* 0x0aba: test_init */
- 0xe7f100f8,
- 0x21f50800,
- 0x00f80223,
-/* 0x0ac4: idle_recv */
-/* 0x0ac6: idle */
- 0x31f400f8,
- 0xd417f100,
- 0x0011cf05,
- 0xf10110b6,
- 0xd005d407,
- 0x04bd0001,
-/* 0x0adc: idle_loop */
- 0xf45817f0,
-/* 0x0ae2: idle_proc */
-/* 0x0ae2: idle_proc_exec */
- 0x10f90232,
- 0xf5021eb9,
- 0xfc02fa21,
- 0x0911f410,
- 0xf40231f4,
-/* 0x0af6: idle_proc_next */
- 0x10b6ef0e,
- 0x061fb858,
- 0xf4e61bf4,
- 0x28f4dd02,
- 0xc10ef400,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
+/* 0x0ad9: idle */
+ 0x0031f400,
+ 0x05d417f1,
+ 0xb60011cf,
+ 0x07f10110,
+ 0x01d005d4,
+/* 0x0aef: idle_loop */
+ 0xf004bd00,
+ 0x32f45817,
+/* 0x0af5: idle_proc */
+/* 0x0af5: idle_proc_exec */
+ 0xb910f902,
+ 0x21f5021e,
+ 0x10fc02fa,
+ 0xf40911f4,
+ 0x0ef40231,
+/* 0x0b09: idle_proc_next */
+ 0x5810b6ef,
+ 0xf4061fb8,
+ 0x02f4e61b,
+ 0x0028f4dd,
+ 0x00c10ef4,
0x00000000,
0x00000000,
0x00000000,
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/os.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/os.h
index 522e3079f82..c8b06cb77e7 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/os.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/os.h
@@ -18,6 +18,10 @@
#define MEMX_MSG_INFO 0
#define MEMX_MSG_EXEC 1
+/* MEMX: info types */
+#define MEMX_INFO_DATA 0
+#define MEMX_INFO_TRAIN 1
+
/* MEMX: script opcode definitions */
#define MEMX_ENTER 1
#define MEMX_LEAVE 2
@@ -25,6 +29,7 @@
#define MEMX_WAIT 4
#define MEMX_DELAY 5
#define MEMX_VBLANK 6
+#define MEMX_TRAIN 7
/* I2C_: message identifiers */
#define I2C__MSG_RD08 0
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/memx.c b/drivers/gpu/drm/nouveau/core/subdev/pwr/memx.c
index 65eaa2546ca..7a9299d7159 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/memx.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/memx.c
@@ -47,7 +47,8 @@ nouveau_memx_init(struct nouveau_pwr *ppwr, struct nouveau_memx **pmemx)
u32 reply[2];
int ret;
- ret = ppwr->message(ppwr, reply, PROC_MEMX, MEMX_MSG_INFO, 0, 0);
+ ret = ppwr->message(ppwr, reply, PROC_MEMX, MEMX_MSG_INFO,
+ MEMX_INFO_DATA, 0);
if (ret)
return ret;
@@ -106,7 +107,7 @@ nouveau_memx_wait(struct nouveau_memx *memx,
{
nv_debug(memx->ppwr, "R[%06x] & 0x%08x == 0x%08x, %d us\n",
addr, mask, data, nsec);
- memx_cmd(memx, MEMX_WAIT, 4, (u32[]){ addr, ~mask, data, nsec });
+ memx_cmd(memx, MEMX_WAIT, 4, (u32[]){ addr, mask, data, nsec });
memx_out(memx); /* fuc can't handle multiple */
}
@@ -152,6 +153,38 @@ nouveau_memx_wait_vblank(struct nouveau_memx *memx)
}
void
+nouveau_memx_train(struct nouveau_memx *memx)
+{
+ nv_debug(memx->ppwr, " MEM TRAIN\n");
+ memx_cmd(memx, MEMX_TRAIN, 0, NULL);
+}
+
+int
+nouveau_memx_train_result(struct nouveau_pwr *ppwr, u32 *res, int rsize)
+{
+ u32 reply[2], base, size, i;
+ int ret;
+
+ ret = ppwr->message(ppwr, reply, PROC_MEMX, MEMX_MSG_INFO,
+ MEMX_INFO_TRAIN, 0);
+ if (ret)
+ return ret;
+
+ base = reply[0];
+ size = reply[1] >> 2;
+ if (size > rsize)
+ return -ENOMEM;
+
+ /* read the packet */
+ nv_wr32(ppwr, 0x10a1c0, 0x02000000 | base);
+
+ for (i = 0; i < size; i++)
+ res[i] = nv_rd32(ppwr, 0x10a1c4);
+
+ return 0;
+}
+
+void
nouveau_memx_block(struct nouveau_memx *memx)
{
nv_debug(memx->ppwr, " HOST BLOCKED\n");
diff --git a/drivers/gpu/drm/nouveau/core/subdev/volt/base.c b/drivers/gpu/drm/nouveau/core/subdev/volt/base.c
index 32794a99910..26ccd8df193 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/volt/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/volt/base.c
@@ -101,6 +101,41 @@ nouveau_volt_set_id(struct nouveau_volt *volt, u8 id, int condition)
return ret;
}
+static void nouveau_volt_parse_bios(struct nouveau_bios *bios,
+ struct nouveau_volt *volt)
+{
+ struct nvbios_volt_entry ivid;
+ struct nvbios_volt info;
+ u8 ver, hdr, cnt, len;
+ u16 data;
+ int i;
+
+ data = nvbios_volt_parse(bios, &ver, &hdr, &cnt, &len, &info);
+ if (data && info.vidmask && info.base && info.step) {
+ for (i = 0; i < info.vidmask + 1; i++) {
+ if (info.base >= info.min &&
+ info.base <= info.max) {
+ volt->vid[volt->vid_nr].uv = info.base;
+ volt->vid[volt->vid_nr].vid = i;
+ volt->vid_nr++;
+ }
+ info.base += info.step;
+ }
+ volt->vid_mask = info.vidmask;
+ } else if (data && info.vidmask) {
+ for (i = 0; i < cnt; i++) {
+ data = nvbios_volt_entry_parse(bios, i, &ver, &hdr,
+ &ivid);
+ if (data) {
+ volt->vid[volt->vid_nr].uv = ivid.voltage;
+ volt->vid[volt->vid_nr].vid = ivid.vid;
+ volt->vid_nr++;
+ }
+ }
+ volt->vid_mask = info.vidmask;
+ }
+}
+
int
_nouveau_volt_init(struct nouveau_object *object)
{
@@ -136,10 +171,6 @@ nouveau_volt_create_(struct nouveau_object *parent,
{
struct nouveau_bios *bios = nouveau_bios(parent);
struct nouveau_volt *volt;
- struct nvbios_volt_entry ivid;
- struct nvbios_volt info;
- u8 ver, hdr, cnt, len;
- u16 data;
int ret, i;
ret = nouveau_subdev_create_(parent, engine, oclass, 0, "VOLT",
@@ -152,31 +183,9 @@ nouveau_volt_create_(struct nouveau_object *parent,
volt->set = nouveau_volt_set;
volt->set_id = nouveau_volt_set_id;
- data = nvbios_volt_parse(bios, &ver, &hdr, &cnt, &len, &info);
- if (data && info.vidmask && info.base && info.step) {
- for (i = 0; i < info.vidmask + 1; i++) {
- if (info.base >= info.min &&
- info.base <= info.max) {
- volt->vid[volt->vid_nr].uv = info.base;
- volt->vid[volt->vid_nr].vid = i;
- volt->vid_nr++;
- }
- info.base += info.step;
- }
- volt->vid_mask = info.vidmask;
- } else
- if (data && info.vidmask) {
- for (i = 0; i < cnt; i++) {
- data = nvbios_volt_entry_parse(bios, i, &ver, &hdr,
- &ivid);
- if (data) {
- volt->vid[volt->vid_nr].uv = ivid.voltage;
- volt->vid[volt->vid_nr].vid = ivid.vid;
- volt->vid_nr++;
- }
- }
- volt->vid_mask = info.vidmask;
- }
+ /* Assuming the non-bios device should build the voltage table later */
+ if (bios)
+ nouveau_volt_parse_bios(bios, volt);
if (volt->vid_nr) {
for (i = 0; i < volt->vid_nr; i++) {
diff --git a/drivers/gpu/drm/nouveau/core/subdev/volt/gk20a.c b/drivers/gpu/drm/nouveau/core/subdev/volt/gk20a.c
new file mode 100644
index 00000000000..717368ef31a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/volt/gk20a.c
@@ -0,0 +1,199 @@
+/*
+ * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifdef __KERNEL__
+#include <nouveau_platform.h>
+#endif
+#include <subdev/volt.h>
+
+struct cvb_coef {
+ int c0;
+ int c1;
+ int c2;
+ int c3;
+ int c4;
+ int c5;
+};
+
+struct gk20a_volt_priv {
+ struct nouveau_volt base;
+ struct regulator *vdd;
+};
+
+const struct cvb_coef gk20a_cvb_coef[] = {
+ /* MHz, c0, c1, c2, c3, c4, c5 */
+ /* 72 */ { 1209886, -36468, 515, 417, -13123, 203},
+ /* 108 */ { 1130804, -27659, 296, 298, -10834, 221},
+ /* 180 */ { 1162871, -27110, 247, 238, -10681, 268},
+ /* 252 */ { 1220458, -28654, 247, 179, -10376, 298},
+ /* 324 */ { 1280953, -30204, 247, 119, -9766, 304},
+ /* 396 */ { 1344547, -31777, 247, 119, -8545, 292},
+ /* 468 */ { 1420168, -34227, 269, 60, -7172, 256},
+ /* 540 */ { 1490757, -35955, 274, 60, -5188, 197},
+ /* 612 */ { 1599112, -42583, 398, 0, -1831, 119},
+ /* 648 */ { 1366986, -16459, -274, 0, -3204, 72},
+ /* 684 */ { 1391884, -17078, -274, -60, -1526, 30},
+ /* 708 */ { 1415522, -17497, -274, -60, -458, 0},
+ /* 756 */ { 1464061, -18331, -274, -119, 1831, -72},
+ /* 804 */ { 1524225, -20064, -254, -119, 4272, -155},
+ /* 852 */ { 1608418, -21643, -269, 0, 763, -48},
+};
+
+/**
+ * cvb_mv = ((c2 * speedo / s_scale + c1) * speedo / s_scale + c0)
+ */
+static inline int
+gk20a_volt_get_cvb_voltage(int speedo, int s_scale,
+ const struct cvb_coef *coef)
+{
+ int mv;
+
+ mv = DIV_ROUND_CLOSEST(coef->c2 * speedo, s_scale);
+ mv = DIV_ROUND_CLOSEST((mv + coef->c1) * speedo, s_scale) + coef->c0;
+ return mv;
+}
+
+/**
+ * cvb_t_mv =
+ * ((c2 * speedo / s_scale + c1) * speedo / s_scale + c0) +
+ * ((c3 * speedo / s_scale + c4 + c5 * T / t_scale) * T / t_scale)
+ */
+static inline int
+gk20a_volt_get_cvb_t_voltage(int speedo, int temp, int s_scale, int t_scale,
+ const struct cvb_coef *coef)
+{
+ int cvb_mv, mv;
+
+ cvb_mv = gk20a_volt_get_cvb_voltage(speedo, s_scale, coef);
+
+ mv = DIV_ROUND_CLOSEST(coef->c3 * speedo, s_scale) + coef->c4 +
+ DIV_ROUND_CLOSEST(coef->c5 * temp, t_scale);
+ mv = DIV_ROUND_CLOSEST(mv * temp, t_scale) + cvb_mv;
+ return mv;
+}
+
+static int
+gk20a_volt_calc_voltage(const struct cvb_coef *coef, int speedo)
+{
+ int mv;
+
+ mv = gk20a_volt_get_cvb_t_voltage(speedo, -10, 100, 10, coef);
+ mv = DIV_ROUND_UP(mv, 1000);
+
+ return mv * 1000;
+}
+
+static int
+gk20a_volt_vid_get(struct nouveau_volt *volt)
+{
+ struct gk20a_volt_priv *priv = (void *)volt;
+ int i, uv;
+
+ uv = regulator_get_voltage(priv->vdd);
+
+ for (i = 0; i < volt->vid_nr; i++)
+ if (volt->vid[i].uv >= uv)
+ return i;
+
+ return -EINVAL;
+}
+
+static int
+gk20a_volt_vid_set(struct nouveau_volt *volt, u8 vid)
+{
+ struct gk20a_volt_priv *priv = (void *)volt;
+
+ nv_debug(volt, "set voltage as %duv\n", volt->vid[vid].uv);
+ return regulator_set_voltage(priv->vdd, volt->vid[vid].uv, 1200000);
+}
+
+static int
+gk20a_volt_set_id(struct nouveau_volt *volt, u8 id, int condition)
+{
+ struct gk20a_volt_priv *priv = (void *)volt;
+ int prev_uv = regulator_get_voltage(priv->vdd);
+ int target_uv = volt->vid[id].uv;
+ int ret;
+
+ nv_debug(volt, "prev=%d, target=%d, condition=%d\n",
+ prev_uv, target_uv, condition);
+ if (!condition ||
+ (condition < 0 && target_uv < prev_uv) ||
+ (condition > 0 && target_uv > prev_uv)) {
+ ret = gk20a_volt_vid_set(volt, volt->vid[id].vid);
+ } else {
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static int
+gk20a_volt_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct gk20a_volt_priv *priv;
+ struct nouveau_volt *volt;
+ struct nouveau_platform_device *plat;
+ int i, ret, uv;
+
+ ret = nouveau_volt_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ volt = &priv->base;
+
+ plat = nv_device_to_platform(nv_device(parent));
+
+ uv = regulator_get_voltage(plat->gpu->vdd);
+ nv_info(priv, "The default voltage is %duV\n", uv);
+
+ priv->vdd = plat->gpu->vdd;
+ priv->base.vid_get = gk20a_volt_vid_get;
+ priv->base.vid_set = gk20a_volt_vid_set;
+ priv->base.set_id = gk20a_volt_set_id;
+
+ volt->vid_nr = ARRAY_SIZE(gk20a_cvb_coef);
+ nv_debug(priv, "%s - vid_nr = %d\n", __func__, volt->vid_nr);
+ for (i = 0; i < volt->vid_nr; i++) {
+ volt->vid[i].vid = i;
+ volt->vid[i].uv = gk20a_volt_calc_voltage(&gk20a_cvb_coef[i],
+ plat->gpu_speedo);
+ nv_debug(priv, "%2d: vid=%d, uv=%d\n", i, volt->vid[i].vid,
+ volt->vid[i].uv);
+ }
+
+ return 0;
+}
+
+struct nouveau_oclass
+gk20a_volt_oclass = {
+ .handle = NV_SUBDEV(VOLT, 0xea),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = gk20a_volt_ctor,
+ .dtor = _nouveau_volt_dtor,
+ .init = _nouveau_volt_init,
+ .fini = _nouveau_volt_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index fca6a1f9c20..38402ade683 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -26,6 +26,7 @@
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_plane_helper.h>
#include "nouveau_drm.h"
#include "nouveau_reg.h"
@@ -613,7 +614,7 @@ nv_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb)
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
int ret;
- ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM);
+ ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM, false);
if (ret == 0) {
if (disp->image[nv_crtc->index])
nouveau_bo_unpin(disp->image[nv_crtc->index]);
@@ -1129,7 +1130,7 @@ nv04_crtc_create(struct drm_device *dev, int crtc_num)
ret = nouveau_bo_new(dev, 64*64*4, 0x100, TTM_PL_FLAG_VRAM,
0, 0x0000, NULL, NULL, &nv_crtc->cursor.nvbo);
if (!ret) {
- ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
+ ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM, false);
if (!ret) {
ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/dispnv04/overlay.c b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
index 1e9056a8df9..9f2498571d0 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/overlay.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
@@ -126,7 +126,7 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
return -ERANGE;
}
- ret = nouveau_bo_pin(nv_fb->nvbo, TTM_PL_FLAG_VRAM);
+ ret = nouveau_bo_pin(nv_fb->nvbo, TTM_PL_FLAG_VRAM, false);
if (ret)
return ret;
@@ -373,7 +373,7 @@ nv04_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
if (crtc_w < src_w || crtc_h < src_h)
return -ERANGE;
- ret = nouveau_bo_pin(nv_fb->nvbo, TTM_PL_FLAG_VRAM);
+ ret = nouveau_bo_pin(nv_fb->nvbo, TTM_PL_FLAG_VRAM, false);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index a24faa5e2a2..d39a1500006 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -308,7 +308,7 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
ret = nouveau_gem_new(dev, PAGE_SIZE, 0, NOUVEAU_GEM_DOMAIN_GART,
0, 0, &chan->ntfy);
if (ret == 0)
- ret = nouveau_bo_pin(chan->ntfy, TTM_PL_FLAG_TT);
+ ret = nouveau_bo_pin(chan->ntfy, TTM_PL_FLAG_TT, false);
if (ret)
goto done;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index dae2c96deef..7df6acc8bb3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -1258,7 +1258,7 @@ olddcb_table(struct drm_device *dev)
return NULL;
}
- if (dcb[0] >= 0x41) {
+ if (dcb[0] >= 0x42) {
NV_WARN(drm, "DCB version 0x%02x unknown\n", dcb[0]);
return NULL;
} else
@@ -1481,18 +1481,22 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
entry->dpconf.link_bw = 540000;
break;
}
- switch ((conf & 0x0f000000) >> 24) {
- case 0xf:
- entry->dpconf.link_nr = 4;
- break;
- case 0x3:
- entry->dpconf.link_nr = 2;
- break;
- default:
- entry->dpconf.link_nr = 1;
- break;
+ entry->dpconf.link_nr = (conf & 0x0f000000) >> 24;
+ if (dcb->version < 0x41) {
+ switch (entry->dpconf.link_nr) {
+ case 0xf:
+ entry->dpconf.link_nr = 4;
+ break;
+ case 0x3:
+ entry->dpconf.link_nr = 2;
+ break;
+ default:
+ entry->dpconf.link_nr = 1;
+ break;
+ }
}
link = entry->dpconf.sor.link;
+ entry->i2c_index += NV_I2C_AUX(0);
break;
case DCB_OUTPUT_TMDS:
if (dcb->version >= 0x40) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 3d474ac03f8..21ec561edc9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -214,6 +214,9 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
nvbo->tile_flags = tile_flags;
nvbo->bo.bdev = &drm->ttm.bdev;
+ if (!nv_device_is_cpu_coherent(nvkm_device(&drm->device)))
+ nvbo->force_coherent = flags & TTM_PL_FLAG_UNCACHED;
+
nvbo->page_shift = 12;
if (drm->client.vm) {
if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024)
@@ -291,8 +294,9 @@ void
nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
{
struct ttm_placement *pl = &nvbo->placement;
- uint32_t flags = TTM_PL_MASK_CACHING |
- (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
+ uint32_t flags = (nvbo->force_coherent ? TTM_PL_FLAG_UNCACHED :
+ TTM_PL_MASK_CACHING) |
+ (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
pl->placement = nvbo->placements;
set_placement_list(nvbo->placements, &pl->num_placement,
@@ -306,42 +310,75 @@ nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
}
int
-nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
+nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype, bool contig)
{
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct ttm_buffer_object *bo = &nvbo->bo;
+ bool force = false, evict = false;
int ret;
ret = ttm_bo_reserve(bo, false, false, false, NULL);
if (ret)
- goto out;
+ return ret;
- if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) {
- NV_ERROR(drm, "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
- 1 << bo->mem.mem_type, memtype);
- ret = -EINVAL;
- goto out;
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
+ memtype == TTM_PL_FLAG_VRAM && contig) {
+ if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) {
+ if (bo->mem.mem_type == TTM_PL_VRAM) {
+ struct nouveau_mem *mem = bo->mem.mm_node;
+ if (!list_is_singular(&mem->regions))
+ evict = true;
+ }
+ nvbo->tile_flags &= ~NOUVEAU_GEM_TILE_NONCONTIG;
+ force = true;
+ }
}
- if (nvbo->pin_refcnt++)
+ if (nvbo->pin_refcnt) {
+ if (!(memtype & (1 << bo->mem.mem_type)) || evict) {
+ NV_ERROR(drm, "bo %p pinned elsewhere: "
+ "0x%08x vs 0x%08x\n", bo,
+ 1 << bo->mem.mem_type, memtype);
+ ret = -EBUSY;
+ }
+ nvbo->pin_refcnt++;
goto out;
+ }
+ if (evict) {
+ nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT, 0);
+ ret = nouveau_bo_validate(nvbo, false, false);
+ if (ret)
+ goto out;
+ }
+
+ nvbo->pin_refcnt++;
nouveau_bo_placement_set(nvbo, memtype, 0);
+ /* drop pin_refcnt temporarily, so we don't trip the assertion
+ * in nouveau_bo_move() that makes sure we're not trying to
+ * move a pinned buffer
+ */
+ nvbo->pin_refcnt--;
ret = nouveau_bo_validate(nvbo, false, false);
- if (ret == 0) {
- switch (bo->mem.mem_type) {
- case TTM_PL_VRAM:
- drm->gem.vram_available -= bo->mem.size;
- break;
- case TTM_PL_TT:
- drm->gem.gart_available -= bo->mem.size;
- break;
- default:
- break;
- }
+ if (ret)
+ goto out;
+ nvbo->pin_refcnt++;
+
+ switch (bo->mem.mem_type) {
+ case TTM_PL_VRAM:
+ drm->gem.vram_available -= bo->mem.size;
+ break;
+ case TTM_PL_TT:
+ drm->gem.gart_available -= bo->mem.size;
+ break;
+ default:
+ break;
}
+
out:
+ if (force && ret)
+ nvbo->tile_flags |= NOUVEAU_GEM_TILE_NONCONTIG;
ttm_bo_unreserve(bo);
return ret;
}
@@ -392,7 +429,14 @@ nouveau_bo_map(struct nouveau_bo *nvbo)
if (ret)
return ret;
- ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
+ /*
+ * TTM buffers allocated using the DMA API already have a mapping, let's
+ * use it instead.
+ */
+ if (!nvbo->force_coherent)
+ ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
+ &nvbo->kmap);
+
ttm_bo_unreserve(&nvbo->bo);
return ret;
}
@@ -400,10 +444,57 @@ nouveau_bo_map(struct nouveau_bo *nvbo)
void
nouveau_bo_unmap(struct nouveau_bo *nvbo)
{
- if (nvbo)
+ if (!nvbo)
+ return;
+
+ /*
+ * TTM buffers allocated using the DMA API already had a coherent
+ * mapping which we used, no need to unmap.
+ */
+ if (!nvbo->force_coherent)
ttm_bo_kunmap(&nvbo->kmap);
}
+void
+nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
+{
+ struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
+ struct nouveau_device *device = nvkm_device(&drm->device);
+ struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
+ int i;
+
+ if (!ttm_dma)
+ return;
+
+ /* Don't waste time looping if the object is coherent */
+ if (nvbo->force_coherent)
+ return;
+
+ for (i = 0; i < ttm_dma->ttm.num_pages; i++)
+ dma_sync_single_for_device(nv_device_base(device),
+ ttm_dma->dma_address[i], PAGE_SIZE, DMA_TO_DEVICE);
+}
+
+void
+nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
+{
+ struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
+ struct nouveau_device *device = nvkm_device(&drm->device);
+ struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
+ int i;
+
+ if (!ttm_dma)
+ return;
+
+ /* Don't waste time looping if the object is coherent */
+ if (nvbo->force_coherent)
+ return;
+
+ for (i = 0; i < ttm_dma->ttm.num_pages; i++)
+ dma_sync_single_for_cpu(nv_device_base(device),
+ ttm_dma->dma_address[i], PAGE_SIZE, DMA_FROM_DEVICE);
+}
+
int
nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
bool no_wait_gpu)
@@ -415,15 +506,41 @@ nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
if (ret)
return ret;
+ nouveau_bo_sync_for_device(nvbo);
+
return 0;
}
+static inline void *
+_nouveau_bo_mem_index(struct nouveau_bo *nvbo, unsigned index, void *mem, u8 sz)
+{
+ struct ttm_dma_tt *dma_tt;
+ u8 *m = mem;
+
+ index *= sz;
+
+ if (m) {
+ /* kmap'd address, return the corresponding offset */
+ m += index;
+ } else {
+ /* DMA-API mapping, lookup the right address */
+ dma_tt = (struct ttm_dma_tt *)nvbo->bo.ttm;
+ m = dma_tt->cpu_address[index / PAGE_SIZE];
+ m += index % PAGE_SIZE;
+ }
+
+ return m;
+}
+#define nouveau_bo_mem_index(o, i, m) _nouveau_bo_mem_index(o, i, m, sizeof(*m))
+
u16
nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index)
{
bool is_iomem;
u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
- mem = &mem[index];
+
+ mem = nouveau_bo_mem_index(nvbo, index, mem);
+
if (is_iomem)
return ioread16_native((void __force __iomem *)mem);
else
@@ -435,7 +552,9 @@ nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
{
bool is_iomem;
u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
- mem = &mem[index];
+
+ mem = nouveau_bo_mem_index(nvbo, index, mem);
+
if (is_iomem)
iowrite16_native(val, (void __force __iomem *)mem);
else
@@ -447,7 +566,9 @@ nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
{
bool is_iomem;
u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
- mem = &mem[index];
+
+ mem = nouveau_bo_mem_index(nvbo, index, mem);
+
if (is_iomem)
return ioread32_native((void __force __iomem *)mem);
else
@@ -459,7 +580,9 @@ nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
{
bool is_iomem;
u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
- mem = &mem[index];
+
+ mem = nouveau_bo_mem_index(nvbo, index, mem);
+
if (is_iomem)
iowrite32_native(val, (void __force __iomem *)mem);
else
@@ -1184,6 +1307,9 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
struct nouveau_drm_tile *new_tile = NULL;
int ret = 0;
+ if (nvbo->pin_refcnt)
+ NV_WARN(drm, "Moving pinned object %p!\n", nvbo);
+
if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
if (ret)
@@ -1376,6 +1502,14 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
dev = drm->dev;
pdev = nv_device_base(device);
+ /*
+ * Objects matching this condition have been marked as force_coherent,
+ * so use the DMA API for them.
+ */
+ if (!nv_device_is_cpu_coherent(device) &&
+ ttm->caching_state == tt_uncached)
+ return ttm_dma_populate(ttm_dma, dev->dev);
+
#if __OS_HAS_AGP
if (drm->agp.stat == ENABLED) {
return ttm_agp_tt_populate(ttm);
@@ -1433,6 +1567,14 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
dev = drm->dev;
pdev = nv_device_base(device);
+ /*
+ * Objects matching this condition have been marked as force_coherent,
+ * so use the DMA API for them.
+ */
+ if (!nv_device_is_cpu_coherent(device) &&
+ ttm->caching_state == tt_uncached)
+ ttm_dma_unpopulate(ttm_dma, dev->dev);
+
#if __OS_HAS_AGP
if (drm->agp.stat == ENABLED) {
ttm_agp_tt_unpopulate(ttm);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h
index 22d2c764d80..072222efeeb 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
@@ -13,6 +13,7 @@ struct nouveau_bo {
u32 valid_domains;
struct ttm_place placements[3];
struct ttm_place busy_placements[3];
+ bool force_coherent;
struct ttm_bo_kmap_obj kmap;
struct list_head head;
@@ -72,7 +73,7 @@ int nouveau_bo_new(struct drm_device *, int size, int align, u32 flags,
u32 tile_mode, u32 tile_flags, struct sg_table *sg,
struct reservation_object *robj,
struct nouveau_bo **);
-int nouveau_bo_pin(struct nouveau_bo *, u32 flags);
+int nouveau_bo_pin(struct nouveau_bo *, u32 flags, bool contig);
int nouveau_bo_unpin(struct nouveau_bo *);
int nouveau_bo_map(struct nouveau_bo *);
void nouveau_bo_unmap(struct nouveau_bo *);
@@ -84,6 +85,8 @@ void nouveau_bo_wr32(struct nouveau_bo *, unsigned index, u32 val);
void nouveau_bo_fence(struct nouveau_bo *, struct nouveau_fence *, bool exclusive);
int nouveau_bo_validate(struct nouveau_bo *, bool interruptible,
bool no_wait_gpu);
+void nouveau_bo_sync_for_device(struct nouveau_bo *nvbo);
+void nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo);
struct nouveau_vma *
nouveau_bo_vma_find(struct nouveau_bo *, struct nouveau_vm *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index fd3dbd59d73..aff9099aae6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -102,14 +102,14 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
chan->drm = drm;
/* allocate memory for dma push buffer */
- target = TTM_PL_FLAG_TT;
+ target = TTM_PL_FLAG_TT | TTM_PL_FLAG_UNCACHED;
if (nouveau_vram_pushbuf)
target = TTM_PL_FLAG_VRAM;
ret = nouveau_bo_new(drm->dev, size, 0, target, 0, 0, NULL, NULL,
&chan->push.buffer);
if (ret == 0) {
- ret = nouveau_bo_pin(chan->push.buffer, target);
+ ret = nouveau_bo_pin(chan->push.buffer, target, false);
if (ret == 0)
ret = nouveau_bo_map(chan->push.buffer);
}
@@ -285,7 +285,6 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
struct nouveau_software_chan *swch;
struct nv_dma_v0 args = {};
int ret, i;
- bool save;
nvif_object_map(chan->object);
@@ -387,11 +386,7 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
}
/* initialise synchronisation */
- save = cli->base.super;
- cli->base.super = true; /* hack until fencenv50 fixed */
- ret = nouveau_fence(chan->drm)->context_new(chan);
- cli->base.super = save;
- return ret;
+ return nouveau_fence(chan->drm)->context_new(chan);
}
int
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index a88e6927f57..5d93902a91a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -479,6 +479,7 @@ nouveau_display_create(struct drm_device *dev)
if (nouveau_modeset != 2 && drm->vbios.dcb.entries) {
static const u16 oclass[] = {
+ GM204_DISP,
GM107_DISP,
GK110_DISP,
GK104_DISP,
@@ -568,9 +569,10 @@ nouveau_display_suspend(struct drm_device *dev, bool runtime)
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-
- nouveau_bo_unmap(nv_crtc->cursor.nvbo);
- nouveau_bo_unpin(nv_crtc->cursor.nvbo);
+ if (nv_crtc->cursor.nvbo) {
+ nouveau_bo_unmap(nv_crtc->cursor.nvbo);
+ nouveau_bo_unpin(nv_crtc->cursor.nvbo);
+ }
}
return 0;
@@ -591,15 +593,17 @@ nouveau_display_resume(struct drm_device *dev, bool runtime)
if (!nouveau_fb || !nouveau_fb->nvbo)
continue;
- ret = nouveau_bo_pin(nouveau_fb->nvbo, TTM_PL_FLAG_VRAM);
+ ret = nouveau_bo_pin(nouveau_fb->nvbo, TTM_PL_FLAG_VRAM, true);
if (ret)
NV_ERROR(drm, "Could not pin framebuffer\n");
}
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ if (!nv_crtc->cursor.nvbo)
+ continue;
- ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
+ ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM, true);
if (!ret)
ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
if (ret)
@@ -630,9 +634,10 @@ nouveau_display_resume(struct drm_device *dev, bool runtime)
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- u32 offset = nv_crtc->cursor.nvbo->bo.offset;
- nv_crtc->cursor.set_offset(nv_crtc, offset);
+ if (!nv_crtc->cursor.nvbo)
+ continue;
+ nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.nvbo->bo.offset);
nv_crtc->cursor.set_pos(nv_crtc, nv_crtc->cursor_saved_x,
nv_crtc->cursor_saved_y);
}
@@ -710,7 +715,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
return -ENOMEM;
if (new_bo != old_bo) {
- ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM);
+ ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM, true);
if (ret)
goto fail_free;
}
@@ -871,6 +876,7 @@ nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
if (ret)
return ret;
+ bo->gem.dumb = true;
ret = drm_gem_handle_create(file_priv, &bo->gem, &args->handle);
drm_gem_object_unreference_unlocked(&bo->gem);
return ret;
@@ -886,6 +892,14 @@ nouveau_display_dumb_map_offset(struct drm_file *file_priv,
gem = drm_gem_object_lookup(dev, file_priv, handle);
if (gem) {
struct nouveau_bo *bo = nouveau_gem_object(gem);
+
+ /*
+ * We don't allow dumb mmaps on objects created using another
+ * interface.
+ */
+ WARN_ONCE(!(gem->dumb || gem->import_attach),
+ "Illegal dumb map of accelerated buffer.\n");
+
*poffset = drm_vma_node_offset_addr(&bo->bo.vma_node);
drm_gem_object_unreference_unlocked(gem);
return 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 62b97c4eef8..65910e3aed0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -613,26 +613,6 @@ fail_display:
return ret;
}
-int nouveau_pmops_suspend(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
- int ret;
-
- if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF ||
- drm_dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF)
- return 0;
-
- ret = nouveau_do_suspend(drm_dev, false);
- if (ret)
- return ret;
-
- pci_save_state(pdev);
- pci_disable_device(pdev);
- pci_set_power_state(pdev, PCI_D3hot);
- return 0;
-}
-
static int
nouveau_do_resume(struct drm_device *dev, bool runtime)
{
@@ -667,7 +647,29 @@ nouveau_do_resume(struct drm_device *dev, bool runtime)
return 0;
}
-int nouveau_pmops_resume(struct device *dev)
+int
+nouveau_pmops_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ int ret;
+
+ if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF ||
+ drm_dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF)
+ return 0;
+
+ ret = nouveau_do_suspend(drm_dev, false);
+ if (ret)
+ return ret;
+
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, PCI_D3hot);
+ return 0;
+}
+
+int
+nouveau_pmops_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
@@ -687,20 +689,122 @@ int nouveau_pmops_resume(struct device *dev)
return nouveau_do_resume(drm_dev, false);
}
-static int nouveau_pmops_freeze(struct device *dev)
+static int
+nouveau_pmops_freeze(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
return nouveau_do_suspend(drm_dev, false);
}
-static int nouveau_pmops_thaw(struct device *dev)
+static int
+nouveau_pmops_thaw(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
return nouveau_do_resume(drm_dev, false);
}
+static int
+nouveau_pmops_runtime_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ int ret;
+
+ if (nouveau_runtime_pm == 0) {
+ pm_runtime_forbid(dev);
+ return -EBUSY;
+ }
+
+ /* are we optimus enabled? */
+ if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) {
+ DRM_DEBUG_DRIVER("failing to power off - not optimus\n");
+ pm_runtime_forbid(dev);
+ return -EBUSY;
+ }
+
+ nv_debug_level(SILENT);
+ drm_kms_helper_poll_disable(drm_dev);
+ vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF);
+ nouveau_switcheroo_optimus_dsm();
+ ret = nouveau_do_suspend(drm_dev, true);
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+ pci_ignore_hotplug(pdev);
+ pci_set_power_state(pdev, PCI_D3cold);
+ drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
+ return ret;
+}
+
+static int
+nouveau_pmops_runtime_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct nvif_device *device = &nouveau_drm(drm_dev)->device;
+ int ret;
+
+ if (nouveau_runtime_pm == 0)
+ return -EINVAL;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ ret = pci_enable_device(pdev);
+ if (ret)
+ return ret;
+ pci_set_master(pdev);
+
+ ret = nouveau_do_resume(drm_dev, true);
+ drm_kms_helper_poll_enable(drm_dev);
+ /* do magic */
+ nvif_mask(device, 0x88488, (1 << 25), (1 << 25));
+ vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);
+ drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
+ nv_debug_level(NORMAL);
+ return ret;
+}
+
+static int
+nouveau_pmops_runtime_idle(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct nouveau_drm *drm = nouveau_drm(drm_dev);
+ struct drm_crtc *crtc;
+
+ if (nouveau_runtime_pm == 0) {
+ pm_runtime_forbid(dev);
+ return -EBUSY;
+ }
+
+ /* are we optimus enabled? */
+ if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) {
+ DRM_DEBUG_DRIVER("failing to power off - not optimus\n");
+ pm_runtime_forbid(dev);
+ return -EBUSY;
+ }
+
+ /* if we have a hdmi audio device - make sure it has a driver loaded */
+ if (drm->hdmi_device) {
+ if (!drm->hdmi_device->driver) {
+ DRM_DEBUG_DRIVER("failing to power off - no HDMI audio driver loaded\n");
+ pm_runtime_mark_last_busy(dev);
+ return -EBUSY;
+ }
+ }
+
+ list_for_each_entry(crtc, &drm->dev->mode_config.crtc_list, head) {
+ if (crtc->enabled) {
+ DRM_DEBUG_DRIVER("failing to power off - crtc active\n");
+ return -EBUSY;
+ }
+ }
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_autosuspend(dev);
+ /* we don't want the main rpm_idle to call suspend - we want to autosuspend */
+ return 1;
+}
static int
nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
@@ -907,104 +1011,6 @@ nouveau_drm_pci_table[] = {
{}
};
-static int nouveau_pmops_runtime_suspend(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
- int ret;
-
- if (nouveau_runtime_pm == 0) {
- pm_runtime_forbid(dev);
- return -EBUSY;
- }
-
- /* are we optimus enabled? */
- if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) {
- DRM_DEBUG_DRIVER("failing to power off - not optimus\n");
- pm_runtime_forbid(dev);
- return -EBUSY;
- }
-
- nv_debug_level(SILENT);
- drm_kms_helper_poll_disable(drm_dev);
- vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF);
- nouveau_switcheroo_optimus_dsm();
- ret = nouveau_do_suspend(drm_dev, true);
- pci_save_state(pdev);
- pci_disable_device(pdev);
- pci_ignore_hotplug(pdev);
- pci_set_power_state(pdev, PCI_D3cold);
- drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
- return ret;
-}
-
-static int nouveau_pmops_runtime_resume(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
- struct nvif_device *device = &nouveau_drm(drm_dev)->device;
- int ret;
-
- if (nouveau_runtime_pm == 0)
- return -EINVAL;
-
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
- ret = pci_enable_device(pdev);
- if (ret)
- return ret;
- pci_set_master(pdev);
-
- ret = nouveau_do_resume(drm_dev, true);
- drm_kms_helper_poll_enable(drm_dev);
- /* do magic */
- nvif_mask(device, 0x88488, (1 << 25), (1 << 25));
- vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);
- drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
- nv_debug_level(NORMAL);
- return ret;
-}
-
-static int nouveau_pmops_runtime_idle(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
- struct nouveau_drm *drm = nouveau_drm(drm_dev);
- struct drm_crtc *crtc;
-
- if (nouveau_runtime_pm == 0) {
- pm_runtime_forbid(dev);
- return -EBUSY;
- }
-
- /* are we optimus enabled? */
- if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) {
- DRM_DEBUG_DRIVER("failing to power off - not optimus\n");
- pm_runtime_forbid(dev);
- return -EBUSY;
- }
-
- /* if we have a hdmi audio device - make sure it has a driver loaded */
- if (drm->hdmi_device) {
- if (!drm->hdmi_device->driver) {
- DRM_DEBUG_DRIVER("failing to power off - no HDMI audio driver loaded\n");
- pm_runtime_mark_last_busy(dev);
- return -EBUSY;
- }
- }
-
- list_for_each_entry(crtc, &drm->dev->mode_config.crtc_list, head) {
- if (crtc->enabled) {
- DRM_DEBUG_DRIVER("failing to power off - crtc active\n");
- return -EBUSY;
- }
- }
- pm_runtime_mark_last_busy(dev);
- pm_runtime_autosuspend(dev);
- /* we don't want the main rpm_idle to call suspend - we want to autosuspend */
- return 1;
-}
-
static void nouveau_display_options(void)
{
DRM_DEBUG_DRIVER("Loading Nouveau with parameters:\n");
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 593ef8a2a06..3ed12a8cfc9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -341,7 +341,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
goto out;
}
- ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM);
+ ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM, false);
if (ret) {
NV_ERROR(drm, "failed to pin fb: %d\n", ret);
goto out_unref;
@@ -498,6 +498,23 @@ nouveau_fbcon_set_suspend_work(struct work_struct *work)
console_unlock();
}
+void
+nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
+{
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ if (drm->fbcon) {
+ if (state == FBINFO_STATE_RUNNING) {
+ schedule_work(&drm->fbcon->work);
+ return;
+ }
+ flush_work(&drm->fbcon->work);
+ console_lock();
+ fb_set_suspend(drm->fbcon->helper.fbdev, state);
+ nouveau_fbcon_accel_save_disable(dev);
+ console_unlock();
+ }
+}
+
int
nouveau_fbcon_init(struct drm_device *dev)
{
@@ -557,20 +574,3 @@ nouveau_fbcon_fini(struct drm_device *dev)
kfree(drm->fbcon);
drm->fbcon = NULL;
}
-
-void
-nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
-{
- struct nouveau_drm *drm = nouveau_drm(dev);
- if (drm->fbcon) {
- if (state == FBINFO_STATE_RUNNING) {
- schedule_work(&drm->fbcon->work);
- return;
- }
- flush_work(&drm->fbcon->work);
- console_lock();
- fb_set_suspend(drm->fbcon->helper.fbdev, state);
- nouveau_fbcon_accel_save_disable(dev);
- console_unlock();
- }
-}
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 36951ee4b15..28d51a22a4b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -444,6 +444,9 @@ validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
list_for_each_entry(nvbo, list, entry) {
struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
+ WARN_ONCE(nvbo->gem.dumb,
+ "GPU use of dumb buffer is illegal.\n");
+
ret = nouveau_gem_set_domain(&nvbo->gem, b->read_domains,
b->write_domains,
b->valid_domains);
@@ -867,6 +870,7 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
else
ret = lret;
}
+ nouveau_bo_sync_for_cpu(nvbo);
drm_gem_object_unreference_unlocked(gem);
return ret;
@@ -876,6 +880,17 @@ int
nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ struct drm_nouveau_gem_cpu_fini *req = data;
+ struct drm_gem_object *gem;
+ struct nouveau_bo *nvbo;
+
+ gem = drm_gem_object_lookup(dev, file_priv, req->handle);
+ if (!gem)
+ return -ENOENT;
+ nvbo = nouveau_gem_object(gem);
+
+ nouveau_bo_sync_for_device(nvbo);
+ drm_gem_object_unreference_unlocked(gem);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.c b/drivers/gpu/drm/nouveau/nouveau_platform.c
index 246a824c16c..b307bbedd4c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_platform.c
+++ b/drivers/gpu/drm/nouveau/nouveau_platform.c
@@ -27,6 +27,7 @@
#include <linux/of.h>
#include <linux/reset.h>
#include <linux/regulator/consumer.h>
+#include <soc/tegra/fuse.h>
#include <soc/tegra/pmc.h>
#include "nouveau_drm.h"
@@ -128,6 +129,7 @@ static int nouveau_platform_probe(struct platform_device *pdev)
}
device->gpu = gpu;
+ device->gpu_speedo = tegra_sku_info.gpu_speedo_value;
err = drm_dev_register(drm, 0);
if (err < 0)
diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.h b/drivers/gpu/drm/nouveau/nouveau_platform.h
index 91f66504900..58c28b5653d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_platform.h
+++ b/drivers/gpu/drm/nouveau/nouveau_platform.h
@@ -41,6 +41,8 @@ struct nouveau_platform_device {
struct nouveau_device device;
struct nouveau_platform_gpu *gpu;
+
+ int gpu_speedo;
};
#define nv_device_to_platform(d) \
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
index 228226ab27f..dd32ad6db53 100644
--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
+++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -93,7 +93,7 @@ int nouveau_gem_prime_pin(struct drm_gem_object *obj)
int ret;
/* pin buffer into GTT */
- ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_TT);
+ ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_TT, false);
if (ret)
return -EINVAL;
diff --git a/drivers/gpu/drm/nouveau/nv17_fence.c b/drivers/gpu/drm/nouveau/nv17_fence.c
index 40b461c7d5c..57860cfa1de 100644
--- a/drivers/gpu/drm/nouveau/nv17_fence.c
+++ b/drivers/gpu/drm/nouveau/nv17_fence.c
@@ -131,7 +131,7 @@ nv17_fence_create(struct nouveau_drm *drm)
ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
0, 0x0000, NULL, NULL, &priv->bo);
if (!ret) {
- ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
+ ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM, false);
if (!ret) {
ret = nouveau_bo_map(priv->bo);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index eb8b36714fa..490b90866ba 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -26,6 +26,7 @@
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_plane_helper.h>
#include <drm/drm_dp_helper.h>
#include <nvif/class.h>
@@ -65,15 +66,29 @@ static int
nv50_chan_create(struct nvif_object *disp, const u32 *oclass, u8 head,
void *data, u32 size, struct nv50_chan *chan)
{
+ const u32 handle = (oclass[0] << 16) | head;
+ u32 sclass[8];
+ int ret, i;
+
+ ret = nvif_object_sclass(disp, sclass, ARRAY_SIZE(sclass));
+ WARN_ON(ret > ARRAY_SIZE(sclass));
+ if (ret < 0)
+ return ret;
+
while (oclass[0]) {
- int ret = nvif_object_init(disp, NULL, (oclass[0] << 16) | head,
- oclass[0], data, size,
- &chan->user);
- if (oclass++, ret == 0) {
- nvif_object_map(&chan->user);
- return ret;
+ for (i = 0; i < ARRAY_SIZE(sclass); i++) {
+ if (sclass[i] == oclass[0]) {
+ ret = nvif_object_init(disp, NULL, handle,
+ oclass[0], data, size,
+ &chan->user);
+ if (ret == 0)
+ nvif_object_map(&chan->user);
+ return ret;
+ }
}
+ oclass++;
}
+
return -ENOSYS;
}
@@ -110,6 +125,7 @@ nv50_pioc_create(struct nvif_object *disp, const u32 *oclass, u8 head,
struct nv50_curs {
struct nv50_pioc base;
+ struct nouveau_bo *image;
};
static int
@@ -265,6 +281,7 @@ nv50_core_create(struct nvif_object *disp, u64 syncbuf, struct nv50_mast *core)
.pushbuf = 0xb0007d00,
};
static const u32 oclass[] = {
+ GM204_DISP_CORE_CHANNEL_DMA,
GM107_DISP_CORE_CHANNEL_DMA,
GK110_DISP_CORE_CHANNEL_DMA,
GK104_DISP_CORE_CHANNEL_DMA,
@@ -424,8 +441,21 @@ evo_kick(u32 *push, void *evoc)
mutex_unlock(&dmac->lock);
}
+#if 1
#define evo_mthd(p,m,s) *((p)++) = (((s) << 18) | (m))
#define evo_data(p,d) *((p)++) = (d)
+#else
+#define evo_mthd(p,m,s) do { \
+ const u32 _m = (m), _s = (s); \
+ printk(KERN_ERR "%04x %d %s\n", _m, _s, __func__); \
+ *((p)++) = ((_s << 18) | _m); \
+} while(0)
+#define evo_data(p,d) do { \
+ const u32 _d = (d); \
+ printk(KERN_ERR "\t%08x\n", _d); \
+ *((p)++) = _d; \
+} while(0)
+#endif
static bool
evo_sync_wait(void *data)
@@ -887,23 +917,24 @@ static void
nv50_crtc_cursor_show(struct nouveau_crtc *nv_crtc)
{
struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
+ struct nv50_curs *curs = nv50_curs(&nv_crtc->base);
u32 *push = evo_wait(mast, 16);
if (push) {
if (nv50_vers(mast) < G82_DISP_CORE_CHANNEL_DMA) {
evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 2);
evo_data(push, 0x85000000);
- evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8);
+ evo_data(push, curs->image->bo.offset >> 8);
} else
if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 2);
evo_data(push, 0x85000000);
- evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8);
+ evo_data(push, curs->image->bo.offset >> 8);
evo_mthd(push, 0x089c + (nv_crtc->index * 0x400), 1);
evo_data(push, mast->base.vram.handle);
} else {
evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 2);
evo_data(push, 0x85000000);
- evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8);
+ evo_data(push, curs->image->bo.offset >> 8);
evo_mthd(push, 0x048c + (nv_crtc->index * 0x300), 1);
evo_data(push, mast->base.vram.handle);
}
@@ -940,8 +971,9 @@ static void
nv50_crtc_cursor_show_hide(struct nouveau_crtc *nv_crtc, bool show, bool update)
{
struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
+ struct nv50_curs *curs = nv50_curs(&nv_crtc->base);
- if (show)
+ if (show && curs->image)
nv50_crtc_cursor_show(nv_crtc);
else
nv50_crtc_cursor_hide(nv_crtc);
@@ -1041,7 +1073,7 @@ nv50_crtc_commit(struct drm_crtc *crtc)
evo_kick(push, mast);
}
- nv50_crtc_cursor_show_hide(nv_crtc, nv_crtc->cursor.visible, true);
+ nv50_crtc_cursor_show_hide(nv_crtc, true, true);
nv50_display_flip_next(crtc, crtc->primary->fb, NULL, 1);
}
@@ -1060,7 +1092,7 @@ nv50_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb)
struct nv50_head *head = nv50_head(crtc);
int ret;
- ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM);
+ ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM, true);
if (ret == 0) {
if (head->image)
nouveau_bo_unpin(head->image);
@@ -1241,13 +1273,13 @@ nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
uint32_t handle, uint32_t width, uint32_t height)
{
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ struct nv50_curs *curs = nv50_curs(crtc);
struct drm_device *dev = crtc->dev;
- struct drm_gem_object *gem;
- struct nouveau_bo *nvbo;
- bool visible = (handle != 0);
- int i, ret = 0;
+ struct drm_gem_object *gem = NULL;
+ struct nouveau_bo *nvbo = NULL;
+ int ret = 0;
- if (visible) {
+ if (handle) {
if (width != 64 || height != 64)
return -EINVAL;
@@ -1256,23 +1288,17 @@ nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
return -ENOENT;
nvbo = nouveau_gem_object(gem);
- ret = nouveau_bo_map(nvbo);
- if (ret == 0) {
- for (i = 0; i < 64 * 64; i++) {
- u32 v = nouveau_bo_rd32(nvbo, i);
- nouveau_bo_wr32(nv_crtc->cursor.nvbo, i, v);
- }
- nouveau_bo_unmap(nvbo);
- }
-
- drm_gem_object_unreference_unlocked(gem);
+ ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM, true);
}
- if (visible != nv_crtc->cursor.visible) {
- nv50_crtc_cursor_show_hide(nv_crtc, visible, true);
- nv_crtc->cursor.visible = visible;
+ if (ret == 0) {
+ if (curs->image)
+ nouveau_bo_unpin(curs->image);
+ nouveau_bo_ref(nvbo, &curs->image);
}
+ drm_gem_object_unreference_unlocked(gem);
+ nv50_crtc_cursor_show_hide(nv_crtc, true, true);
return ret;
}
@@ -1327,10 +1353,10 @@ nv50_crtc_destroy(struct drm_crtc *crtc)
nouveau_bo_unpin(head->image);
nouveau_bo_ref(NULL, &head->image);
- nouveau_bo_unmap(nv_crtc->cursor.nvbo);
- if (nv_crtc->cursor.nvbo)
- nouveau_bo_unpin(nv_crtc->cursor.nvbo);
- nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
+ /*XXX: ditto */
+ if (head->curs.image)
+ nouveau_bo_unpin(head->curs.image);
+ nouveau_bo_ref(NULL, &head->curs.image);
nouveau_bo_unmap(nv_crtc->lut.nvbo);
if (nv_crtc->lut.nvbo)
@@ -1362,16 +1388,6 @@ static const struct drm_crtc_funcs nv50_crtc_func = {
.page_flip = nouveau_crtc_page_flip,
};
-static void
-nv50_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y)
-{
-}
-
-static void
-nv50_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset)
-{
-}
-
static int
nv50_crtc_create(struct drm_device *dev, int index)
{
@@ -1390,8 +1406,6 @@ nv50_crtc_create(struct drm_device *dev, int index)
head->base.set_color_vibrance = nv50_crtc_set_color_vibrance;
head->base.color_vibrance = 50;
head->base.vibrant_hue = 0;
- head->base.cursor.set_offset = nv50_cursor_set_offset;
- head->base.cursor.set_pos = nv50_cursor_set_pos;
for (i = 0; i < 256; i++) {
head->base.lut.r[i] = i << 8;
head->base.lut.g[i] = i << 8;
@@ -1406,7 +1420,7 @@ nv50_crtc_create(struct drm_device *dev, int index)
ret = nouveau_bo_new(dev, 8192, 0x100, TTM_PL_FLAG_VRAM,
0, 0x0000, NULL, NULL, &head->base.lut.nvbo);
if (!ret) {
- ret = nouveau_bo_pin(head->base.lut.nvbo, TTM_PL_FLAG_VRAM);
+ ret = nouveau_bo_pin(head->base.lut.nvbo, TTM_PL_FLAG_VRAM, true);
if (!ret) {
ret = nouveau_bo_map(head->base.lut.nvbo);
if (ret)
@@ -1426,22 +1440,6 @@ nv50_crtc_create(struct drm_device *dev, int index)
if (ret)
goto out;
- ret = nouveau_bo_new(dev, 64 * 64 * 4, 0x100, TTM_PL_FLAG_VRAM,
- 0, 0x0000, NULL, NULL, &head->base.cursor.nvbo);
- if (!ret) {
- ret = nouveau_bo_pin(head->base.cursor.nvbo, TTM_PL_FLAG_VRAM);
- if (!ret) {
- ret = nouveau_bo_map(head->base.cursor.nvbo);
- if (ret)
- nouveau_bo_unpin(head->base.lut.nvbo);
- }
- if (ret)
- nouveau_bo_ref(NULL, &head->base.cursor.nvbo);
- }
-
- if (ret)
- goto out;
-
/* allocate page flip / sync resources */
ret = nv50_base_create(disp->disp, index, disp->sync->bo.offset,
&head->sync);
@@ -1701,7 +1699,8 @@ nv50_audio_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
drm_edid_to_eld(&nv_connector->base, nv_connector->edid);
memcpy(args.data, nv_connector->base.eld, sizeof(args.data));
- nvif_mthd(disp->disp, 0, &args, sizeof(args.base) + args.data[2] * 4);
+ nvif_mthd(disp->disp, 0, &args,
+ sizeof(args.base) + drm_eld_size(args.data));
}
static void
@@ -2373,11 +2372,6 @@ nv50_fb_ctor(struct drm_framebuffer *fb)
u8 kind = nouveau_bo_tile_layout(nvbo) >> 8;
u8 tile = nvbo->tile_mode;
- if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) {
- NV_ERROR(drm, "framebuffer requires contiguous bo\n");
- return -EINVAL;
- }
-
if (drm->device.info.chipset >= 0xc0)
tile >>= 4; /* yep.. */
@@ -2491,7 +2485,7 @@ nv50_display_create(struct drm_device *dev)
ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
0, 0x0000, NULL, NULL, &disp->sync);
if (!ret) {
- ret = nouveau_bo_pin(disp->sync, TTM_PL_FLAG_VRAM);
+ ret = nouveau_bo_pin(disp->sync, TTM_PL_FLAG_VRAM, true);
if (!ret) {
ret = nouveau_bo_map(disp->sync);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c
index 22d242b3796..a82d9ea7c6f 100644
--- a/drivers/gpu/drm/nouveau/nv50_fence.c
+++ b/drivers/gpu/drm/nouveau/nv50_fence.c
@@ -102,7 +102,7 @@ nv50_fence_create(struct nouveau_drm *drm)
ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
0, 0x0000, NULL, NULL, &priv->bo);
if (!ret) {
- ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
+ ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM, false);
if (!ret) {
ret = nouveau_bo_map(priv->bo);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c
index d6c6c87c3f0..cb5b88938d4 100644
--- a/drivers/gpu/drm/nouveau/nv84_fence.c
+++ b/drivers/gpu/drm/nouveau/nv84_fence.c
@@ -234,7 +234,7 @@ nv84_fence_create(struct nouveau_drm *drm)
ret = nouveau_bo_new(drm->dev, 16 * priv->base.contexts, 0,
TTM_PL_FLAG_VRAM, 0, 0, NULL, NULL, &priv->bo);
if (ret == 0) {
- ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
+ ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM, false);
if (ret == 0) {
ret = nouveau_bo_map(priv->bo);
if (ret)
@@ -246,10 +246,10 @@ nv84_fence_create(struct nouveau_drm *drm)
if (ret == 0)
ret = nouveau_bo_new(drm->dev, 16 * priv->base.contexts, 0,
- TTM_PL_FLAG_TT, 0, 0, NULL, NULL,
- &priv->bo_gart);
+ TTM_PL_FLAG_TT | TTM_PL_FLAG_UNCACHED, 0,
+ 0, NULL, NULL, &priv->bo_gart);
if (ret == 0) {
- ret = nouveau_bo_pin(priv->bo_gart, TTM_PL_FLAG_TT);
+ ret = nouveau_bo_pin(priv->bo_gart, TTM_PL_FLAG_TT, false);
if (ret == 0) {
ret = nouveau_bo_map(priv->bo_gart);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nvif/class.h b/drivers/gpu/drm/nouveau/nvif/class.h
index e5a27df0672..4e308eacb27 100644
--- a/drivers/gpu/drm/nouveau/nvif/class.h
+++ b/drivers/gpu/drm/nouveau/nvif/class.h
@@ -35,6 +35,7 @@
#define GK104_DISP 0x00009170
#define GK110_DISP 0x00009270
#define GM107_DISP 0x00009470
+#define GM204_DISP 0x00009570
#define NV50_DISP_CURSOR 0x0000507a
#define G82_DISP_CURSOR 0x0000827a
@@ -65,6 +66,7 @@
#define GK104_DISP_CORE_CHANNEL_DMA 0x0000917d
#define GK110_DISP_CORE_CHANNEL_DMA 0x0000927d
#define GM107_DISP_CORE_CHANNEL_DMA 0x0000947d
+#define GM204_DISP_CORE_CHANNEL_DMA 0x0000957d
#define NV50_DISP_OVERLAY_CHANNEL_DMA 0x0000507e
#define G82_DISP_OVERLAY_CHANNEL_DMA 0x0000827e
@@ -131,6 +133,7 @@ struct nv_device_v0 {
#define NV_DEVICE_V0_DISABLE_COPY1 0x0000010000000000ULL
#define NV_DEVICE_V0_DISABLE_VIC 0x0000020000000000ULL
#define NV_DEVICE_V0_DISABLE_VENC 0x0000040000000000ULL
+#define NV_DEVICE_V0_DISABLE_COPY2 0x0000080000000000ULL
__u64 disable; /* disable particular subsystems */
__u64 debug0; /* as above, but *internal* ids, and *NOT* ABI */
};
diff --git a/drivers/gpu/drm/nouveau/nvif/client.c b/drivers/gpu/drm/nouveau/nvif/client.c
index 3c4df1fc26d..3f7ac5bc8e0 100644
--- a/drivers/gpu/drm/nouveau/nvif/client.c
+++ b/drivers/gpu/drm/nouveau/nvif/client.c
@@ -62,6 +62,7 @@ nvif_drivers[] = {
#else
&nvif_driver_drm,
&nvif_driver_lib,
+ &nvif_driver_null,
#endif
NULL
};
diff --git a/drivers/gpu/drm/nouveau/nvif/driver.h b/drivers/gpu/drm/nouveau/nvif/driver.h
index ac4bdb3ea50..8bd39e69229 100644
--- a/drivers/gpu/drm/nouveau/nvif/driver.h
+++ b/drivers/gpu/drm/nouveau/nvif/driver.h
@@ -17,5 +17,6 @@ struct nvif_driver {
extern const struct nvif_driver nvif_driver_nvkm;
extern const struct nvif_driver nvif_driver_drm;
extern const struct nvif_driver nvif_driver_lib;
+extern const struct nvif_driver nvif_driver_null;
#endif
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index 2d28dc337cf..b0566a1ca28 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -20,6 +20,7 @@
#include "omap_drv.h"
#include <drm/drm_mode.h>
+#include <drm/drm_plane_helper.h>
#include "drm_crtc.h"
#include "drm_crtc_helper.h"
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index e4849413ee8..aeb91ed653c 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -612,8 +612,7 @@ int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
{
union omap_gem_size gsize;
- /* in case someone tries to feed us a completely bogus stride: */
- args->pitch = align_pitch(args->pitch, args->width, args->bpp);
+ args->pitch = align_pitch(0, args->width, args->bpp);
args->size = PAGE_ALIGN(args->pitch * args->height);
gsize = (union omap_gem_size){
diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c
index 891a4dc608a..ee8e2b3a117 100644
--- a/drivers/gpu/drm/omapdrm/omap_plane.c
+++ b/drivers/gpu/drm/omapdrm/omap_plane.c
@@ -388,20 +388,15 @@ struct drm_plane *omap_plane_init(struct drm_device *dev,
struct drm_plane *plane = NULL;
struct omap_plane *omap_plane;
struct omap_overlay_info *info;
- int ret;
DBG("%s: priv=%d", plane_names[id], private_plane);
omap_plane = kzalloc(sizeof(*omap_plane), GFP_KERNEL);
if (!omap_plane)
- goto fail;
+ return NULL;
- ret = drm_flip_work_init(&omap_plane->unpin_work, 16,
+ drm_flip_work_init(&omap_plane->unpin_work,
"unpin", unpin_worker);
- if (ret) {
- dev_err(dev->dev, "could not allocate unpin FIFO\n");
- goto fail;
- }
omap_plane->nformats = omap_framebuffer_get_formats(
omap_plane->formats, ARRAY_SIZE(omap_plane->formats),
@@ -443,10 +438,4 @@ struct drm_plane *omap_plane_init(struct drm_device *dev,
omap_plane->info.zorder = id;
return plane;
-
-fail:
- if (plane)
- omap_plane_destroy(plane);
-
- return NULL;
}
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index bee9f72b3a9..024e98ef8e4 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -27,4 +27,17 @@ config DRM_PANEL_S6E8AA0
select DRM_MIPI_DSI
select VIDEOMODE_HELPERS
+config DRM_PANEL_SHARP_LQ101R1SX01
+ tristate "Sharp LQ101R1SX01 panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ help
+ Say Y here if you want to enable support for Sharp LQ101R1SX01
+ TFT-LCD modules. The panel has a 2560x1600 resolution and uses
+ 24 bit RGB per pixel. It provides a dual MIPI DSI interface to
+ the host and has a built-in LED backlight.
+
+ To compile this driver as a module, choose M here: the module
+ will be called panel-sharp-lq101r1sx01.
+
endmenu
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index 8b929212fad..4b2a0430804 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -1,3 +1,4 @@
obj-$(CONFIG_DRM_PANEL_SIMPLE) += panel-simple.o
obj-$(CONFIG_DRM_PANEL_LD9040) += panel-ld9040.o
obj-$(CONFIG_DRM_PANEL_S6E8AA0) += panel-s6e8aa0.o
+obj-$(CONFIG_DRM_PANEL_SHARP_LQ101R1SX01) += panel-sharp-lq101r1sx01.o
diff --git a/drivers/gpu/drm/panel/panel-ld9040.c b/drivers/gpu/drm/panel/panel-ld9040.c
index 42ac67b21e9..08cf2c588c3 100644
--- a/drivers/gpu/drm/panel/panel-ld9040.c
+++ b/drivers/gpu/drm/panel/panel-ld9040.c
@@ -145,7 +145,7 @@ static void ld9040_dcs_write(struct ld9040 *ctx, const u8 *data, size_t len)
if (ctx->error < 0 || len == 0)
return;
- dev_dbg(ctx->dev, "writing dcs seq: %*ph\n", len, data);
+ dev_dbg(ctx->dev, "writing dcs seq: %*ph\n", (int)len, data);
ret = ld9040_spi_write_word(ctx, *data);
while (!ret && --len) {
@@ -154,8 +154,8 @@ static void ld9040_dcs_write(struct ld9040 *ctx, const u8 *data, size_t len)
}
if (ret) {
- dev_err(ctx->dev, "error %d writing dcs seq: %*ph\n", ret, len,
- data);
+ dev_err(ctx->dev, "error %d writing dcs seq: %*ph\n", ret,
+ (int)len, data);
ctx->error = ret;
}
@@ -336,17 +336,12 @@ static int ld9040_probe(struct spi_device *spi)
if (ret < 0)
return ret;
- ctx->reset_gpio = devm_gpiod_get(dev, "reset");
+ ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(ctx->reset_gpio)) {
dev_err(dev, "cannot get reset-gpios %ld\n",
PTR_ERR(ctx->reset_gpio));
return PTR_ERR(ctx->reset_gpio);
}
- ret = gpiod_direction_output(ctx->reset_gpio, 1);
- if (ret < 0) {
- dev_err(dev, "cannot configure reset-gpios %d\n", ret);
- return ret;
- }
spi->bits_per_word = 9;
ret = spi_setup(spi);
diff --git a/drivers/gpu/drm/panel/panel-s6e8aa0.c b/drivers/gpu/drm/panel/panel-s6e8aa0.c
index b5217fe37f0..144b2733e3d 100644
--- a/drivers/gpu/drm/panel/panel-s6e8aa0.c
+++ b/drivers/gpu/drm/panel/panel-s6e8aa0.c
@@ -141,10 +141,10 @@ static void s6e8aa0_dcs_write(struct s6e8aa0 *ctx, const void *data, size_t len)
if (ctx->error < 0)
return;
- ret = mipi_dsi_dcs_write(dsi, data, len);
+ ret = mipi_dsi_dcs_write_buffer(dsi, data, len);
if (ret < 0) {
- dev_err(ctx->dev, "error %zd writing dcs seq: %*ph\n", ret, len,
- data);
+ dev_err(ctx->dev, "error %zd writing dcs seq: %*ph\n", ret,
+ (int)len, data);
ctx->error = ret;
}
}
@@ -800,27 +800,15 @@ static void s6e8aa0_panel_init(struct s6e8aa0 *ctx)
}
static void s6e8aa0_set_maximum_return_packet_size(struct s6e8aa0 *ctx,
- int size)
+ u16 size)
{
struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
- const struct mipi_dsi_host_ops *ops = dsi->host->ops;
- u8 buf[] = {size, 0};
- struct mipi_dsi_msg msg = {
- .channel = dsi->channel,
- .type = MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE,
- .tx_len = sizeof(buf),
- .tx_buf = buf
- };
int ret;
if (ctx->error < 0)
return;
- if (!ops || !ops->transfer)
- ret = -EIO;
- else
- ret = ops->transfer(dsi->host, &msg);
-
+ ret = mipi_dsi_set_maximum_return_packet_size(dsi, size);
if (ret < 0) {
dev_err(ctx->dev,
"error %d setting maximum return packet size to %d\n",
@@ -1019,17 +1007,12 @@ static int s6e8aa0_probe(struct mipi_dsi_device *dsi)
return ret;
}
- ctx->reset_gpio = devm_gpiod_get(dev, "reset");
+ ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(ctx->reset_gpio)) {
dev_err(dev, "cannot get reset-gpios %ld\n",
PTR_ERR(ctx->reset_gpio));
return PTR_ERR(ctx->reset_gpio);
}
- ret = gpiod_direction_output(ctx->reset_gpio, 1);
- if (ret < 0) {
- dev_err(dev, "cannot configure reset-gpios %d\n", ret);
- return ret;
- }
ctx->brightness = GAMMA_LEVEL_NUM - 1;
@@ -1069,7 +1052,6 @@ static struct mipi_dsi_driver s6e8aa0_driver = {
.remove = s6e8aa0_remove,
.driver = {
.name = "panel_s6e8aa0",
- .owner = THIS_MODULE,
.of_match_table = s6e8aa0_of_match,
},
};
diff --git a/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c b/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c
new file mode 100644
index 00000000000..9d81759d82f
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c
@@ -0,0 +1,464 @@
+/*
+ * Copyright (C) 2014 NVIDIA Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/backlight.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_panel.h>
+
+#include <video/mipi_display.h>
+
+#include <linux/host1x.h>
+
+struct sharp_panel {
+ struct drm_panel base;
+ /* the datasheet refers to them as DSI-LINK1 and DSI-LINK2 */
+ struct mipi_dsi_device *link1;
+ struct mipi_dsi_device *link2;
+
+ struct backlight_device *backlight;
+ struct regulator *supply;
+
+ bool prepared;
+ bool enabled;
+
+ const struct drm_display_mode *mode;
+};
+
+static inline struct sharp_panel *to_sharp_panel(struct drm_panel *panel)
+{
+ return container_of(panel, struct sharp_panel, base);
+}
+
+static int sharp_panel_write(struct sharp_panel *sharp, u16 offset, u8 value)
+{
+ u8 payload[3] = { offset >> 8, offset & 0xff, value };
+ struct mipi_dsi_device *dsi = sharp->link1;
+ ssize_t err;
+
+ err = mipi_dsi_generic_write(dsi, payload, sizeof(payload));
+ if (err < 0) {
+ dev_err(&dsi->dev, "failed to write %02x to %04x: %zd\n",
+ value, offset, err);
+ return err;
+ }
+
+ err = mipi_dsi_dcs_nop(dsi);
+ if (err < 0) {
+ dev_err(&dsi->dev, "failed to send DCS nop: %zd\n", err);
+ return err;
+ }
+
+ usleep_range(10, 20);
+
+ return 0;
+}
+
+static __maybe_unused int sharp_panel_read(struct sharp_panel *sharp,
+ u16 offset, u8 *value)
+{
+ ssize_t err;
+
+ cpu_to_be16s(&offset);
+
+ err = mipi_dsi_generic_read(sharp->link1, &offset, sizeof(offset),
+ value, sizeof(*value));
+ if (err < 0)
+ dev_err(&sharp->link1->dev, "failed to read from %04x: %zd\n",
+ offset, err);
+
+ return err;
+}
+
+static int sharp_panel_disable(struct drm_panel *panel)
+{
+ struct sharp_panel *sharp = to_sharp_panel(panel);
+
+ if (!sharp->enabled)
+ return 0;
+
+ if (sharp->backlight) {
+ sharp->backlight->props.power = FB_BLANK_POWERDOWN;
+ backlight_update_status(sharp->backlight);
+ }
+
+ sharp->enabled = false;
+
+ return 0;
+}
+
+static int sharp_panel_unprepare(struct drm_panel *panel)
+{
+ struct sharp_panel *sharp = to_sharp_panel(panel);
+ int err;
+
+ if (!sharp->prepared)
+ return 0;
+
+ err = mipi_dsi_dcs_set_display_off(sharp->link1);
+ if (err < 0)
+ dev_err(panel->dev, "failed to set display off: %d\n", err);
+
+ err = mipi_dsi_dcs_enter_sleep_mode(sharp->link1);
+ if (err < 0)
+ dev_err(panel->dev, "failed to enter sleep mode: %d\n", err);
+
+ msleep(120);
+
+ regulator_disable(sharp->supply);
+
+ sharp->prepared = false;
+
+ return 0;
+}
+
+static int sharp_setup_symmetrical_split(struct mipi_dsi_device *left,
+ struct mipi_dsi_device *right,
+ const struct drm_display_mode *mode)
+{
+ int err;
+
+ err = mipi_dsi_dcs_set_column_address(left, 0, mode->hdisplay / 2 - 1);
+ if (err < 0) {
+ dev_err(&left->dev, "failed to set column address: %d\n", err);
+ return err;
+ }
+
+ err = mipi_dsi_dcs_set_page_address(left, 0, mode->vdisplay - 1);
+ if (err < 0) {
+ dev_err(&left->dev, "failed to set page address: %d\n", err);
+ return err;
+ }
+
+ err = mipi_dsi_dcs_set_column_address(right, mode->hdisplay / 2,
+ mode->hdisplay - 1);
+ if (err < 0) {
+ dev_err(&right->dev, "failed to set column address: %d\n", err);
+ return err;
+ }
+
+ err = mipi_dsi_dcs_set_page_address(right, 0, mode->vdisplay - 1);
+ if (err < 0) {
+ dev_err(&right->dev, "failed to set page address: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int sharp_panel_prepare(struct drm_panel *panel)
+{
+ struct sharp_panel *sharp = to_sharp_panel(panel);
+ u8 format = MIPI_DCS_PIXEL_FMT_24BIT;
+ int err;
+
+ if (sharp->prepared)
+ return 0;
+
+ err = regulator_enable(sharp->supply);
+ if (err < 0)
+ return err;
+
+ usleep_range(10000, 20000);
+
+ err = mipi_dsi_dcs_soft_reset(sharp->link1);
+ if (err < 0) {
+ dev_err(panel->dev, "soft reset failed: %d\n", err);
+ goto poweroff;
+ }
+
+ msleep(120);
+
+ err = mipi_dsi_dcs_exit_sleep_mode(sharp->link1);
+ if (err < 0) {
+ dev_err(panel->dev, "failed to exit sleep mode: %d\n", err);
+ goto poweroff;
+ }
+
+ /*
+ * The MIPI DCS specification mandates this delay only between the
+ * exit_sleep_mode and enter_sleep_mode commands, so it isn't strictly
+ * necessary here.
+ */
+ /*
+ msleep(120);
+ */
+
+ /* set left-right mode */
+ err = sharp_panel_write(sharp, 0x1000, 0x2a);
+ if (err < 0) {
+ dev_err(panel->dev, "failed to set left-right mode: %d\n", err);
+ goto poweroff;
+ }
+
+ /* enable command mode */
+ err = sharp_panel_write(sharp, 0x1001, 0x01);
+ if (err < 0) {
+ dev_err(panel->dev, "failed to enable command mode: %d\n", err);
+ goto poweroff;
+ }
+
+ err = mipi_dsi_dcs_set_pixel_format(sharp->link1, format);
+ if (err < 0) {
+ dev_err(panel->dev, "failed to set pixel format: %d\n", err);
+ goto poweroff;
+ }
+
+ /*
+ * TODO: The device supports both left-right and even-odd split
+ * configurations, but this driver currently supports only the left-
+ * right split. To support a different mode a mechanism needs to be
+ * put in place to communicate the configuration back to the DSI host
+ * controller.
+ */
+ err = sharp_setup_symmetrical_split(sharp->link1, sharp->link2,
+ sharp->mode);
+ if (err < 0) {
+ dev_err(panel->dev, "failed to set up symmetrical split: %d\n",
+ err);
+ goto poweroff;
+ }
+
+ err = mipi_dsi_dcs_set_display_on(sharp->link1);
+ if (err < 0) {
+ dev_err(panel->dev, "failed to set display on: %d\n", err);
+ goto poweroff;
+ }
+
+ sharp->prepared = true;
+
+ return 0;
+
+poweroff:
+ regulator_disable(sharp->supply);
+ return err;
+}
+
+static int sharp_panel_enable(struct drm_panel *panel)
+{
+ struct sharp_panel *sharp = to_sharp_panel(panel);
+
+ if (sharp->enabled)
+ return 0;
+
+ if (sharp->backlight) {
+ sharp->backlight->props.power = FB_BLANK_UNBLANK;
+ backlight_update_status(sharp->backlight);
+ }
+
+ sharp->enabled = true;
+
+ return 0;
+}
+
+static const struct drm_display_mode default_mode = {
+ .clock = 278000,
+ .hdisplay = 2560,
+ .hsync_start = 2560 + 128,
+ .hsync_end = 2560 + 128 + 64,
+ .htotal = 2560 + 128 + 64 + 64,
+ .vdisplay = 1600,
+ .vsync_start = 1600 + 4,
+ .vsync_end = 1600 + 4 + 8,
+ .vtotal = 1600 + 4 + 8 + 32,
+ .vrefresh = 60,
+};
+
+static int sharp_panel_get_modes(struct drm_panel *panel)
+{
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(panel->drm, &default_mode);
+ if (!mode) {
+ dev_err(panel->drm->dev, "failed to add mode %ux%ux@%u\n",
+ default_mode.hdisplay, default_mode.vdisplay,
+ default_mode.vrefresh);
+ return -ENOMEM;
+ }
+
+ drm_mode_set_name(mode);
+
+ drm_mode_probed_add(panel->connector, mode);
+
+ panel->connector->display_info.width_mm = 217;
+ panel->connector->display_info.height_mm = 136;
+
+ return 1;
+}
+
+static const struct drm_panel_funcs sharp_panel_funcs = {
+ .disable = sharp_panel_disable,
+ .unprepare = sharp_panel_unprepare,
+ .prepare = sharp_panel_prepare,
+ .enable = sharp_panel_enable,
+ .get_modes = sharp_panel_get_modes,
+};
+
+static const struct of_device_id sharp_of_match[] = {
+ { .compatible = "sharp,lq101r1sx01", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sharp_of_match);
+
+static int sharp_panel_add(struct sharp_panel *sharp)
+{
+ struct device_node *np;
+ int err;
+
+ sharp->mode = &default_mode;
+
+ sharp->supply = devm_regulator_get(&sharp->link1->dev, "power");
+ if (IS_ERR(sharp->supply))
+ return PTR_ERR(sharp->supply);
+
+ np = of_parse_phandle(sharp->link1->dev.of_node, "backlight", 0);
+ if (np) {
+ sharp->backlight = of_find_backlight_by_node(np);
+ of_node_put(np);
+
+ if (!sharp->backlight)
+ return -EPROBE_DEFER;
+ }
+
+ drm_panel_init(&sharp->base);
+ sharp->base.funcs = &sharp_panel_funcs;
+ sharp->base.dev = &sharp->link1->dev;
+
+ err = drm_panel_add(&sharp->base);
+ if (err < 0)
+ goto put_backlight;
+
+ return 0;
+
+put_backlight:
+ if (sharp->backlight)
+ put_device(&sharp->backlight->dev);
+
+ return err;
+}
+
+static void sharp_panel_del(struct sharp_panel *sharp)
+{
+ if (sharp->base.dev)
+ drm_panel_remove(&sharp->base);
+
+ if (sharp->backlight)
+ put_device(&sharp->backlight->dev);
+
+ if (sharp->link2)
+ put_device(&sharp->link2->dev);
+}
+
+static int sharp_panel_probe(struct mipi_dsi_device *dsi)
+{
+ struct mipi_dsi_device *secondary = NULL;
+ struct sharp_panel *sharp;
+ struct device_node *np;
+ int err;
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_LPM;
+
+ /* Find DSI-LINK1 */
+ np = of_parse_phandle(dsi->dev.of_node, "link2", 0);
+ if (np) {
+ secondary = of_find_mipi_dsi_device_by_node(np);
+ of_node_put(np);
+
+ if (!secondary)
+ return -EPROBE_DEFER;
+ }
+
+ /* register a panel for only the DSI-LINK1 interface */
+ if (secondary) {
+ sharp = devm_kzalloc(&dsi->dev, sizeof(*sharp), GFP_KERNEL);
+ if (!sharp) {
+ put_device(&secondary->dev);
+ return -ENOMEM;
+ }
+
+ mipi_dsi_set_drvdata(dsi, sharp);
+
+ sharp->link2 = secondary;
+ sharp->link1 = dsi;
+
+ err = sharp_panel_add(sharp);
+ if (err < 0) {
+ put_device(&secondary->dev);
+ return err;
+ }
+ }
+
+ err = mipi_dsi_attach(dsi);
+ if (err < 0) {
+ if (secondary)
+ sharp_panel_del(sharp);
+
+ return err;
+ }
+
+ return 0;
+}
+
+static int sharp_panel_remove(struct mipi_dsi_device *dsi)
+{
+ struct sharp_panel *sharp = mipi_dsi_get_drvdata(dsi);
+ int err;
+
+ /* only detach from host for the DSI-LINK2 interface */
+ if (!sharp) {
+ mipi_dsi_detach(dsi);
+ return 0;
+ }
+
+ err = sharp_panel_disable(&sharp->base);
+ if (err < 0)
+ dev_err(&dsi->dev, "failed to disable panel: %d\n", err);
+
+ err = mipi_dsi_detach(dsi);
+ if (err < 0)
+ dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", err);
+
+ drm_panel_detach(&sharp->base);
+ sharp_panel_del(sharp);
+
+ return 0;
+}
+
+static void sharp_panel_shutdown(struct mipi_dsi_device *dsi)
+{
+ struct sharp_panel *sharp = mipi_dsi_get_drvdata(dsi);
+
+ /* nothing to do for DSI-LINK2 */
+ if (!sharp)
+ return;
+
+ sharp_panel_disable(&sharp->base);
+}
+
+static struct mipi_dsi_driver sharp_panel_driver = {
+ .driver = {
+ .name = "panel-sharp-lq101r1sx01",
+ .of_match_table = sharp_of_match,
+ },
+ .probe = sharp_panel_probe,
+ .remove = sharp_panel_remove,
+ .shutdown = sharp_panel_shutdown,
+};
+module_mipi_dsi_driver(sharp_panel_driver);
+
+MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
+MODULE_DESCRIPTION("Sharp LQ101R1SX01 panel driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 12bc8a0ab1c..e95385bf835 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -247,21 +247,14 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
if (IS_ERR(panel->supply))
return PTR_ERR(panel->supply);
- panel->enable_gpio = devm_gpiod_get_optional(dev, "enable");
+ panel->enable_gpio = devm_gpiod_get_optional(dev, "enable",
+ GPIOD_OUT_LOW);
if (IS_ERR(panel->enable_gpio)) {
err = PTR_ERR(panel->enable_gpio);
dev_err(dev, "failed to request GPIO: %d\n", err);
return err;
}
- if (panel->enable_gpio) {
- err = gpiod_direction_output(panel->enable_gpio, 0);
- if (err < 0) {
- dev_err(dev, "failed to setup GPIO: %d\n", err);
- return err;
- }
- }
-
backlight = of_parse_phandle(dev->of_node, "backlight", 0);
if (backlight) {
panel->backlight = of_find_backlight_by_node(backlight);
@@ -376,6 +369,29 @@ static const struct panel_desc auo_b101xtn01 = {
},
};
+static const struct drm_display_mode auo_b116xw03_mode = {
+ .clock = 70589,
+ .hdisplay = 1366,
+ .hsync_start = 1366 + 40,
+ .hsync_end = 1366 + 40 + 40,
+ .htotal = 1366 + 40 + 40 + 32,
+ .vdisplay = 768,
+ .vsync_start = 768 + 10,
+ .vsync_end = 768 + 10 + 12,
+ .vtotal = 768 + 10 + 12 + 6,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc auo_b116xw03 = {
+ .modes = &auo_b116xw03_mode,
+ .num_modes = 1,
+ .bpc = 6,
+ .size = {
+ .width = 256,
+ .height = 144,
+ },
+};
+
static const struct drm_display_mode auo_b133xtn01_mode = {
.clock = 69500,
.hdisplay = 1366,
@@ -415,6 +431,7 @@ static const struct drm_display_mode auo_b133htn01_mode = {
static const struct panel_desc auo_b133htn01 = {
.modes = &auo_b133htn01_mode,
.num_modes = 1,
+ .bpc = 6,
.size = {
.width = 293,
.height = 165,
@@ -536,22 +553,92 @@ static const struct drm_display_mode foxlink_fl500wvr00_a0t_mode = {
static const struct panel_desc foxlink_fl500wvr00_a0t = {
.modes = &foxlink_fl500wvr00_a0t_mode,
.num_modes = 1,
+ .bpc = 8,
.size = {
.width = 108,
.height = 65,
},
};
-static const struct drm_display_mode innolux_n116bge_mode = {
+static const struct drm_display_mode hannstar_hsd070pww1_mode = {
+ .clock = 71100,
+ .hdisplay = 1280,
+ .hsync_start = 1280 + 1,
+ .hsync_end = 1280 + 1 + 158,
+ .htotal = 1280 + 1 + 158 + 1,
+ .vdisplay = 800,
+ .vsync_start = 800 + 1,
+ .vsync_end = 800 + 1 + 21,
+ .vtotal = 800 + 1 + 21 + 1,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc hannstar_hsd070pww1 = {
+ .modes = &hannstar_hsd070pww1_mode,
+ .num_modes = 1,
+ .bpc = 6,
+ .size = {
+ .width = 151,
+ .height = 94,
+ },
+};
+
+static const struct drm_display_mode hitachi_tx23d38vm0caa_mode = {
+ .clock = 33333,
+ .hdisplay = 800,
+ .hsync_start = 800 + 85,
+ .hsync_end = 800 + 85 + 86,
+ .htotal = 800 + 85 + 86 + 85,
+ .vdisplay = 480,
+ .vsync_start = 480 + 16,
+ .vsync_end = 480 + 16 + 13,
+ .vtotal = 480 + 16 + 13 + 16,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc hitachi_tx23d38vm0caa = {
+ .modes = &hitachi_tx23d38vm0caa_mode,
+ .num_modes = 1,
+ .bpc = 6,
+ .size = {
+ .width = 195,
+ .height = 117,
+ },
+};
+
+static const struct drm_display_mode innolux_g121i1_l01_mode = {
.clock = 71000,
+ .hdisplay = 1280,
+ .hsync_start = 1280 + 64,
+ .hsync_end = 1280 + 64 + 32,
+ .htotal = 1280 + 64 + 32 + 64,
+ .vdisplay = 800,
+ .vsync_start = 800 + 9,
+ .vsync_end = 800 + 9 + 6,
+ .vtotal = 800 + 9 + 6 + 9,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc innolux_g121i1_l01 = {
+ .modes = &innolux_g121i1_l01_mode,
+ .num_modes = 1,
+ .bpc = 6,
+ .size = {
+ .width = 261,
+ .height = 163,
+ },
+};
+
+static const struct drm_display_mode innolux_n116bge_mode = {
+ .clock = 76420,
.hdisplay = 1366,
- .hsync_start = 1366 + 64,
- .hsync_end = 1366 + 64 + 6,
- .htotal = 1366 + 64 + 6 + 64,
+ .hsync_start = 1366 + 136,
+ .hsync_end = 1366 + 136 + 30,
+ .htotal = 1366 + 136 + 30 + 60,
.vdisplay = 768,
.vsync_start = 768 + 8,
- .vsync_end = 768 + 8 + 4,
- .vtotal = 768 + 8 + 4 + 8,
+ .vsync_end = 768 + 8 + 12,
+ .vtotal = 768 + 8 + 12 + 12,
.vrefresh = 60,
.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
};
@@ -643,6 +730,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "auo,b101xtn01",
.data = &auo_b101xtn01,
}, {
+ .compatible = "auo,b116xw03",
+ .data = &auo_b116xw03,
+ }, {
.compatible = "auo,b133htn01",
.data = &auo_b133htn01,
}, {
@@ -667,6 +757,15 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "foxlink,fl500wvr00-a0t",
.data = &foxlink_fl500wvr00_a0t,
}, {
+ .compatible = "hannstar,hsd070pww1",
+ .data = &hannstar_hsd070pww1,
+ }, {
+ .compatible = "hit,tx23d38vm0caa",
+ .data = &hitachi_tx23d38vm0caa
+ }, {
+ .compatible ="innolux,g121i1-l01",
+ .data = &innolux_g121i1_l01
+ }, {
.compatible = "innolux,n116bge",
.data = &innolux_n116bge,
}, {
@@ -740,6 +839,7 @@ static const struct panel_desc_dsi lg_ld070wx3_sl01 = {
.desc = {
.modes = &lg_ld070wx3_sl01_mode,
.num_modes = 1,
+ .bpc = 8,
.size = {
.width = 94,
.height = 151,
@@ -767,6 +867,7 @@ static const struct panel_desc_dsi lg_lh500wx1_sd03 = {
.desc = {
.modes = &lg_lh500wx1_sd03_mode,
.num_modes = 1,
+ .bpc = 8,
.size = {
.width = 62,
.height = 110,
@@ -794,6 +895,7 @@ static const struct panel_desc_dsi panasonic_vvx10f004b00 = {
.desc = {
.modes = &panasonic_vvx10f004b00_mode,
.num_modes = 1,
+ .bpc = 8,
.size = {
.width = 217,
.height = 136,
@@ -863,7 +965,6 @@ static void panel_simple_dsi_shutdown(struct mipi_dsi_device *dsi)
static struct mipi_dsi_driver panel_simple_dsi_driver = {
.driver = {
.name = "panel-simple-dsi",
- .owner = THIS_MODULE,
.of_match_table = dsi_of_match,
},
.probe = panel_simple_dsi_probe,
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 0d139626685..4a0a8b29b0a 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -29,6 +29,7 @@
#include "qxl_drv.h"
#include "qxl_object.h"
#include "drm_crtc_helper.h"
+#include <drm/drm_plane_helper.h>
static bool qxl_head_enabled(struct qxl_head *head)
{
@@ -100,14 +101,37 @@ static int qxl_display_copy_rom_client_monitors_config(struct qxl_device *qdev)
return 0;
}
+static void qxl_update_offset_props(struct qxl_device *qdev)
+{
+ struct drm_device *dev = qdev->ddev;
+ struct drm_connector *connector;
+ struct qxl_output *output;
+ struct qxl_head *head;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ output = drm_connector_to_qxl_output(connector);
+
+ head = &qdev->client_monitors_config->heads[output->index];
+
+ drm_object_property_set_value(&connector->base,
+ dev->mode_config.suggested_x_property, head->x);
+ drm_object_property_set_value(&connector->base,
+ dev->mode_config.suggested_y_property, head->y);
+ }
+}
+
void qxl_display_read_client_monitors_config(struct qxl_device *qdev)
{
+ struct drm_device *dev = qdev->ddev;
while (qxl_display_copy_rom_client_monitors_config(qdev)) {
qxl_io_log(qdev, "failed crc check for client_monitors_config,"
" retrying\n");
}
+ drm_modeset_lock_all(dev);
+ qxl_update_offset_props(qdev);
+ drm_modeset_unlock_all(dev);
if (!drm_helper_hpd_irq_event(qdev->ddev)) {
/* notify that the monitor configuration changed, to
adjust at the arbitrary resolution */
@@ -568,7 +592,6 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc,
{
struct drm_device *dev = crtc->dev;
struct qxl_device *qdev = dev->dev_private;
- struct qxl_mode *m = (void *)mode->private;
struct qxl_framebuffer *qfb;
struct qxl_bo *bo, *old_bo = NULL;
struct qxl_crtc *qcrtc = to_qxl_crtc(crtc);
@@ -586,12 +609,6 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc,
}
qfb = to_qxl_framebuffer(crtc->primary->fb);
bo = gem_to_qxl_bo(qfb->obj);
- if (!m)
- /* and do we care? */
- DRM_DEBUG("%dx%d: not a native mode\n", x, y);
- else
- DRM_DEBUG("%dx%d: qxl id %d\n",
- mode->hdisplay, mode->vdisplay, m->id);
DRM_DEBUG("+%d+%d (%d,%d) => (%d,%d)\n",
x, y,
mode->hdisplay, mode->vdisplay,
@@ -951,6 +968,10 @@ static int qdev_output_init(struct drm_device *dev, int num_output)
drm_object_attach_property(&connector->base,
qdev->hotplug_mode_update_property, 0);
+ drm_object_attach_property(&connector->base,
+ dev->mode_config.suggested_x_property, 0);
+ drm_object_attach_property(&connector->base,
+ dev->mode_config.suggested_y_property, 0);
drm_connector_register(connector);
return 0;
}
@@ -1064,6 +1085,7 @@ int qxl_modeset_init(struct qxl_device *qdev)
qdev->ddev->mode_config.fb_base = qdev->vram_base;
+ drm_mode_create_suggested_offset_properties(qdev->ddev);
qxl_mode_create_hotplug_mode_update_property(qdev);
for (i = 0 ; i < qxl_num_crtc; ++i) {
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index 446e71ca36c..d9b25684ac9 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -264,7 +264,8 @@ int qxl_release_reserve_list(struct qxl_release *release, bool no_intr)
if (list_is_singular(&release->bos))
return 0;
- ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos, !no_intr);
+ ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos,
+ !no_intr, NULL);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
index 575e986f82a..8fd2d9f58f7 100644
--- a/drivers/gpu/drm/r128/r128_state.c
+++ b/drivers/gpu/drm/r128/r128_state.c
@@ -905,7 +905,7 @@ static int r128_cce_dispatch_write_span(struct drm_device *dev,
if (IS_ERR(buffer))
return PTR_ERR(buffer);
- mask_size = depth->n * sizeof(u8);
+ mask_size = depth->n;
if (depth->mask) {
mask = memdup_user(depth->mask, mask_size);
if (IS_ERR(mask)) {
@@ -1010,7 +1010,7 @@ static int r128_cce_dispatch_write_pixels(struct drm_device *dev,
}
if (depth->mask) {
- mask_size = depth->n * sizeof(u8);
+ mask_size = depth->n;
mask = memdup_user(depth->mask, mask_size);
if (IS_ERR(mask)) {
kfree(x);
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index d01b8799142..12bc21219a0 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -80,7 +80,8 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
r600_dpm.o rs780_dpm.o rv6xx_dpm.o rv770_dpm.o rv730_dpm.o rv740_dpm.o \
rv770_smc.o cypress_dpm.o btc_dpm.o sumo_dpm.o sumo_smc.o trinity_dpm.o \
trinity_smc.o ni_dpm.o si_smc.o si_dpm.o kv_smc.o kv_dpm.o ci_smc.o \
- ci_dpm.o dce6_afmt.o radeon_vm.o radeon_ucode.o radeon_ib.o radeon_mn.o
+ ci_dpm.o dce6_afmt.o radeon_vm.o radeon_ucode.o radeon_ib.o radeon_mn.o \
+ radeon_sync.o
# add async DMA block
radeon-y += \
@@ -104,6 +105,7 @@ radeon-y += \
radeon_vce.o \
vce_v1_0.o \
vce_v2_0.o \
+ radeon_kfd.o
radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 30d242b2507..d59ec491dbb 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -2039,6 +2039,7 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
atombios_crtc_set_base(crtc, x, y, old_fb);
atombios_overscan_setup(crtc, mode, adjusted_mode);
atombios_scaler_setup(crtc);
+ radeon_cursor_reset(crtc);
/* update the hw version fpr dpm */
radeon_crtc->hw_mode = *adjusted_mode;
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index 11a55e9dad7..f373a81ba3d 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -46,15 +46,15 @@
static const struct ci_pt_defaults defaults_hawaii_xt =
{
1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000,
- { 0x84, 0x0, 0x0, 0x7F, 0x0, 0x0, 0x5A, 0x60, 0x51, 0x8E, 0x79, 0x6B, 0x5F, 0x90, 0x79 },
- { 0x1EA, 0x1EA, 0x1EA, 0x224, 0x224, 0x224, 0x24F, 0x24F, 0x24F, 0x28E, 0x28E, 0x28E, 0x2BC, 0x2BC, 0x2BC }
+ { 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 },
+ { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
};
static const struct ci_pt_defaults defaults_hawaii_pro =
{
1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062,
- { 0x93, 0x0, 0x0, 0x97, 0x0, 0x0, 0x6B, 0x60, 0x51, 0x95, 0x79, 0x6B, 0x5F, 0x90, 0x79 },
- { 0x1EA, 0x1EA, 0x1EA, 0x224, 0x224, 0x224, 0x24F, 0x24F, 0x24F, 0x28E, 0x28E, 0x28E, 0x2BC, 0x2BC, 0x2BC }
+ { 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 },
+ { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
};
static const struct ci_pt_defaults defaults_bonaire_xt =
@@ -184,6 +184,9 @@ static int ci_set_overdrive_target_tdp(struct radeon_device *rdev,
u32 target_tdp);
static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate);
+static PPSMC_Result ci_send_msg_to_smc_with_parameter(struct radeon_device *rdev,
+ PPSMC_Msg msg, u32 parameter);
+
static struct ci_power_info *ci_get_pi(struct radeon_device *rdev)
{
struct ci_power_info *pi = rdev->pm.dpm.priv;
@@ -249,7 +252,10 @@ static void ci_initialize_powertune_defaults(struct radeon_device *rdev)
if (pi->caps_power_containment) {
pi->caps_cac = true;
- pi->enable_bapm_feature = true;
+ if (rdev->family == CHIP_HAWAII)
+ pi->enable_bapm_feature = false;
+ else
+ pi->enable_bapm_feature = true;
pi->enable_tdc_limit_feature = true;
pi->enable_pkg_pwr_tracking_feature = true;
}
@@ -352,6 +358,21 @@ static int ci_populate_dw8(struct radeon_device *rdev)
return 0;
}
+static int ci_populate_fuzzy_fan(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+
+ if ((rdev->pm.dpm.fan.fan_output_sensitivity & (1 << 15)) ||
+ (rdev->pm.dpm.fan.fan_output_sensitivity == 0))
+ rdev->pm.dpm.fan.fan_output_sensitivity =
+ rdev->pm.dpm.fan.default_fan_output_sensitivity;
+
+ pi->smc_powertune_table.FuzzyFan_PwmSetDelta =
+ cpu_to_be16(rdev->pm.dpm.fan.fan_output_sensitivity);
+
+ return 0;
+}
+
static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct radeon_device *rdev)
{
struct ci_power_info *pi = ci_get_pi(rdev);
@@ -477,6 +498,9 @@ static int ci_populate_pm_base(struct radeon_device *rdev)
ret = ci_populate_dw8(rdev);
if (ret)
return ret;
+ ret = ci_populate_fuzzy_fan(rdev);
+ if (ret)
+ return ret;
ret = ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(rdev);
if (ret)
return ret;
@@ -690,6 +714,25 @@ static int ci_enable_smc_cac(struct radeon_device *rdev, bool enable)
return ret;
}
+static int ci_enable_thermal_based_sclk_dpm(struct radeon_device *rdev,
+ bool enable)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ PPSMC_Result smc_result = PPSMC_Result_OK;
+
+ if (pi->thermal_sclk_dpm_enabled) {
+ if (enable)
+ smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_ENABLE_THERMAL_DPM);
+ else
+ smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DISABLE_THERMAL_DPM);
+ }
+
+ if (smc_result == PPSMC_Result_OK)
+ return 0;
+ else
+ return -EINVAL;
+}
+
static int ci_power_control_set_level(struct radeon_device *rdev)
{
struct ci_power_info *pi = ci_get_pi(rdev);
@@ -700,13 +743,11 @@ static int ci_power_control_set_level(struct radeon_device *rdev)
int ret = 0;
bool adjust_polarity = false; /* ??? */
- if (pi->caps_power_containment &&
- (pi->power_containment_features & POWERCONTAINMENT_FEATURE_BAPM)) {
+ if (pi->caps_power_containment) {
adjust_percent = adjust_polarity ?
rdev->pm.dpm.tdp_adjustment : (-1 * rdev->pm.dpm.tdp_adjustment);
target_tdp = ((100 + adjust_percent) *
(s32)cac_tdp_table->configurable_tdp) / 100;
- target_tdp *= 256;
ret = ci_set_overdrive_target_tdp(rdev, (u32)target_tdp);
}
@@ -814,7 +855,7 @@ static void ci_apply_state_adjust_rules(struct radeon_device *rdev,
}
}
-static int ci_set_thermal_temperature_range(struct radeon_device *rdev,
+static int ci_thermal_set_temperature_range(struct radeon_device *rdev,
int min_temp, int max_temp)
{
int low_temp = 0 * 1000;
@@ -850,6 +891,350 @@ static int ci_set_thermal_temperature_range(struct radeon_device *rdev,
return 0;
}
+static int ci_thermal_enable_alert(struct radeon_device *rdev,
+ bool enable)
+{
+ u32 thermal_int = RREG32_SMC(CG_THERMAL_INT);
+ PPSMC_Result result;
+
+ if (enable) {
+ thermal_int &= ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
+ WREG32_SMC(CG_THERMAL_INT, thermal_int);
+ rdev->irq.dpm_thermal = false;
+ result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Thermal_Cntl_Enable);
+ if (result != PPSMC_Result_OK) {
+ DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
+ return -EINVAL;
+ }
+ } else {
+ thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
+ WREG32_SMC(CG_THERMAL_INT, thermal_int);
+ rdev->irq.dpm_thermal = true;
+ result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Thermal_Cntl_Disable);
+ if (result != PPSMC_Result_OK) {
+ DRM_DEBUG_KMS("Could not disable thermal interrupts.\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static void ci_fan_ctrl_set_static_mode(struct radeon_device *rdev, u32 mode)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ u32 tmp;
+
+ if (pi->fan_ctrl_is_in_default_mode) {
+ tmp = (RREG32_SMC(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK) >> FDO_PWM_MODE_SHIFT;
+ pi->fan_ctrl_default_mode = tmp;
+ tmp = (RREG32_SMC(CG_FDO_CTRL2) & TMIN_MASK) >> TMIN_SHIFT;
+ pi->t_min = tmp;
+ pi->fan_ctrl_is_in_default_mode = false;
+ }
+
+ tmp = RREG32_SMC(CG_FDO_CTRL2) & ~TMIN_MASK;
+ tmp |= TMIN(0);
+ WREG32_SMC(CG_FDO_CTRL2, tmp);
+
+ tmp = RREG32_SMC(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;
+ tmp |= FDO_PWM_MODE(mode);
+ WREG32_SMC(CG_FDO_CTRL2, tmp);
+}
+
+static int ci_thermal_setup_fan_table(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ SMU7_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
+ u32 duty100;
+ u32 t_diff1, t_diff2, pwm_diff1, pwm_diff2;
+ u16 fdo_min, slope1, slope2;
+ u32 reference_clock, tmp;
+ int ret;
+ u64 tmp64;
+
+ if (!pi->fan_table_start) {
+ rdev->pm.dpm.fan.ucode_fan_control = false;
+ return 0;
+ }
+
+ duty100 = (RREG32_SMC(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
+
+ if (duty100 == 0) {
+ rdev->pm.dpm.fan.ucode_fan_control = false;
+ return 0;
+ }
+
+ tmp64 = (u64)rdev->pm.dpm.fan.pwm_min * duty100;
+ do_div(tmp64, 10000);
+ fdo_min = (u16)tmp64;
+
+ t_diff1 = rdev->pm.dpm.fan.t_med - rdev->pm.dpm.fan.t_min;
+ t_diff2 = rdev->pm.dpm.fan.t_high - rdev->pm.dpm.fan.t_med;
+
+ pwm_diff1 = rdev->pm.dpm.fan.pwm_med - rdev->pm.dpm.fan.pwm_min;
+ pwm_diff2 = rdev->pm.dpm.fan.pwm_high - rdev->pm.dpm.fan.pwm_med;
+
+ slope1 = (u16)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
+ slope2 = (u16)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
+
+ fan_table.TempMin = cpu_to_be16((50 + rdev->pm.dpm.fan.t_min) / 100);
+ fan_table.TempMed = cpu_to_be16((50 + rdev->pm.dpm.fan.t_med) / 100);
+ fan_table.TempMax = cpu_to_be16((50 + rdev->pm.dpm.fan.t_max) / 100);
+
+ fan_table.Slope1 = cpu_to_be16(slope1);
+ fan_table.Slope2 = cpu_to_be16(slope2);
+
+ fan_table.FdoMin = cpu_to_be16(fdo_min);
+
+ fan_table.HystDown = cpu_to_be16(rdev->pm.dpm.fan.t_hyst);
+
+ fan_table.HystUp = cpu_to_be16(1);
+
+ fan_table.HystSlope = cpu_to_be16(1);
+
+ fan_table.TempRespLim = cpu_to_be16(5);
+
+ reference_clock = radeon_get_xclk(rdev);
+
+ fan_table.RefreshPeriod = cpu_to_be32((rdev->pm.dpm.fan.cycle_delay *
+ reference_clock) / 1600);
+
+ fan_table.FdoMax = cpu_to_be16((u16)duty100);
+
+ tmp = (RREG32_SMC(CG_MULT_THERMAL_CTRL) & TEMP_SEL_MASK) >> TEMP_SEL_SHIFT;
+ fan_table.TempSrc = (uint8_t)tmp;
+
+ ret = ci_copy_bytes_to_smc(rdev,
+ pi->fan_table_start,
+ (u8 *)(&fan_table),
+ sizeof(fan_table),
+ pi->sram_end);
+
+ if (ret) {
+ DRM_ERROR("Failed to load fan table to the SMC.");
+ rdev->pm.dpm.fan.ucode_fan_control = false;
+ }
+
+ return 0;
+}
+
+static int ci_fan_ctrl_start_smc_fan_control(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ PPSMC_Result ret;
+
+ if (pi->caps_od_fuzzy_fan_control_support) {
+ ret = ci_send_msg_to_smc_with_parameter(rdev,
+ PPSMC_StartFanControl,
+ FAN_CONTROL_FUZZY);
+ if (ret != PPSMC_Result_OK)
+ return -EINVAL;
+ ret = ci_send_msg_to_smc_with_parameter(rdev,
+ PPSMC_MSG_SetFanPwmMax,
+ rdev->pm.dpm.fan.default_max_fan_pwm);
+ if (ret != PPSMC_Result_OK)
+ return -EINVAL;
+ } else {
+ ret = ci_send_msg_to_smc_with_parameter(rdev,
+ PPSMC_StartFanControl,
+ FAN_CONTROL_TABLE);
+ if (ret != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+#if 0
+static int ci_fan_ctrl_stop_smc_fan_control(struct radeon_device *rdev)
+{
+ PPSMC_Result ret;
+
+ ret = ci_send_msg_to_smc(rdev, PPSMC_StopFanControl);
+ if (ret == PPSMC_Result_OK)
+ return 0;
+ else
+ return -EINVAL;
+}
+
+static int ci_fan_ctrl_get_fan_speed_percent(struct radeon_device *rdev,
+ u32 *speed)
+{
+ u32 duty, duty100;
+ u64 tmp64;
+
+ if (rdev->pm.no_fan)
+ return -ENOENT;
+
+ duty100 = (RREG32_SMC(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
+ duty = (RREG32_SMC(CG_THERMAL_STATUS) & FDO_PWM_DUTY_MASK) >> FDO_PWM_DUTY_SHIFT;
+
+ if (duty100 == 0)
+ return -EINVAL;
+
+ tmp64 = (u64)duty * 100;
+ do_div(tmp64, duty100);
+ *speed = (u32)tmp64;
+
+ if (*speed > 100)
+ *speed = 100;
+
+ return 0;
+}
+
+static int ci_fan_ctrl_set_fan_speed_percent(struct radeon_device *rdev,
+ u32 speed)
+{
+ u32 tmp;
+ u32 duty, duty100;
+ u64 tmp64;
+
+ if (rdev->pm.no_fan)
+ return -ENOENT;
+
+ if (speed > 100)
+ return -EINVAL;
+
+ if (rdev->pm.dpm.fan.ucode_fan_control)
+ ci_fan_ctrl_stop_smc_fan_control(rdev);
+
+ duty100 = (RREG32_SMC(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
+
+ if (duty100 == 0)
+ return -EINVAL;
+
+ tmp64 = (u64)speed * duty100;
+ do_div(tmp64, 100);
+ duty = (u32)tmp64;
+
+ tmp = RREG32_SMC(CG_FDO_CTRL0) & ~FDO_STATIC_DUTY_MASK;
+ tmp |= FDO_STATIC_DUTY(duty);
+ WREG32_SMC(CG_FDO_CTRL0, tmp);
+
+ ci_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC);
+
+ return 0;
+}
+
+static int ci_fan_ctrl_get_fan_speed_rpm(struct radeon_device *rdev,
+ u32 *speed)
+{
+ u32 tach_period;
+ u32 xclk = radeon_get_xclk(rdev);
+
+ if (rdev->pm.no_fan)
+ return -ENOENT;
+
+ if (rdev->pm.fan_pulses_per_revolution == 0)
+ return -ENOENT;
+
+ tach_period = (RREG32_SMC(CG_TACH_STATUS) & TACH_PERIOD_MASK) >> TACH_PERIOD_SHIFT;
+ if (tach_period == 0)
+ return -ENOENT;
+
+ *speed = 60 * xclk * 10000 / tach_period;
+
+ return 0;
+}
+
+static int ci_fan_ctrl_set_fan_speed_rpm(struct radeon_device *rdev,
+ u32 speed)
+{
+ u32 tach_period, tmp;
+ u32 xclk = radeon_get_xclk(rdev);
+
+ if (rdev->pm.no_fan)
+ return -ENOENT;
+
+ if (rdev->pm.fan_pulses_per_revolution == 0)
+ return -ENOENT;
+
+ if ((speed < rdev->pm.fan_min_rpm) ||
+ (speed > rdev->pm.fan_max_rpm))
+ return -EINVAL;
+
+ if (rdev->pm.dpm.fan.ucode_fan_control)
+ ci_fan_ctrl_stop_smc_fan_control(rdev);
+
+ tach_period = 60 * xclk * 10000 / (8 * speed);
+ tmp = RREG32_SMC(CG_TACH_CTRL) & ~TARGET_PERIOD_MASK;
+ tmp |= TARGET_PERIOD(tach_period);
+ WREG32_SMC(CG_TACH_CTRL, tmp);
+
+ ci_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC_RPM);
+
+ return 0;
+}
+#endif
+
+static void ci_fan_ctrl_set_default_mode(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ u32 tmp;
+
+ if (!pi->fan_ctrl_is_in_default_mode) {
+ tmp = RREG32_SMC(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;
+ tmp |= FDO_PWM_MODE(pi->fan_ctrl_default_mode);
+ WREG32_SMC(CG_FDO_CTRL2, tmp);
+
+ tmp = RREG32_SMC(CG_FDO_CTRL2) & ~TMIN_MASK;
+ tmp |= TMIN(pi->t_min);
+ WREG32_SMC(CG_FDO_CTRL2, tmp);
+ pi->fan_ctrl_is_in_default_mode = true;
+ }
+}
+
+static void ci_thermal_start_smc_fan_control(struct radeon_device *rdev)
+{
+ if (rdev->pm.dpm.fan.ucode_fan_control) {
+ ci_fan_ctrl_start_smc_fan_control(rdev);
+ ci_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC);
+ }
+}
+
+static void ci_thermal_initialize(struct radeon_device *rdev)
+{
+ u32 tmp;
+
+ if (rdev->pm.fan_pulses_per_revolution) {
+ tmp = RREG32_SMC(CG_TACH_CTRL) & ~EDGE_PER_REV_MASK;
+ tmp |= EDGE_PER_REV(rdev->pm.fan_pulses_per_revolution -1);
+ WREG32_SMC(CG_TACH_CTRL, tmp);
+ }
+
+ tmp = RREG32_SMC(CG_FDO_CTRL2) & ~TACH_PWM_RESP_RATE_MASK;
+ tmp |= TACH_PWM_RESP_RATE(0x28);
+ WREG32_SMC(CG_FDO_CTRL2, tmp);
+}
+
+static int ci_thermal_start_thermal_controller(struct radeon_device *rdev)
+{
+ int ret;
+
+ ci_thermal_initialize(rdev);
+ ret = ci_thermal_set_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
+ if (ret)
+ return ret;
+ ret = ci_thermal_enable_alert(rdev, true);
+ if (ret)
+ return ret;
+ if (rdev->pm.dpm.fan.ucode_fan_control) {
+ ret = ci_thermal_setup_fan_table(rdev);
+ if (ret)
+ return ret;
+ ci_thermal_start_smc_fan_control(rdev);
+ }
+
+ return 0;
+}
+
+static void ci_thermal_stop_thermal_controller(struct radeon_device *rdev)
+{
+ if (!rdev->pm.no_fan)
+ ci_fan_ctrl_set_default_mode(rdev);
+}
+
#if 0
static int ci_read_smc_soft_register(struct radeon_device *rdev,
u16 reg_offset, u32 *value)
@@ -1253,7 +1638,7 @@ static int ci_dpm_force_state_sclk(struct radeon_device *rdev, u32 n)
if (!pi->sclk_dpm_key_disabled) {
PPSMC_Result smc_result =
- ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, n);
+ ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SCLKDPM_SetEnabledMask, 1 << n);
if (smc_result != PPSMC_Result_OK)
return -EINVAL;
}
@@ -1267,7 +1652,7 @@ static int ci_dpm_force_state_mclk(struct radeon_device *rdev, u32 n)
if (!pi->mclk_dpm_key_disabled) {
PPSMC_Result smc_result =
- ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_MCLKDPM_ForceState, n);
+ ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_MCLKDPM_SetEnabledMask, 1 << n);
if (smc_result != PPSMC_Result_OK)
return -EINVAL;
}
@@ -2042,6 +2427,33 @@ static int ci_force_switch_to_arb_f0(struct radeon_device *rdev)
return ni_copy_and_switch_arb_sets(rdev, tmp, MC_CG_ARB_FREQ_F0);
}
+static void ci_register_patching_mc_arb(struct radeon_device *rdev,
+ const u32 engine_clock,
+ const u32 memory_clock,
+ u32 *dram_timimg2)
+{
+ bool patch;
+ u32 tmp, tmp2;
+
+ tmp = RREG32(MC_SEQ_MISC0);
+ patch = ((tmp & 0x0000f00) == 0x300) ? true : false;
+
+ if (patch &&
+ ((rdev->pdev->device == 0x67B0) ||
+ (rdev->pdev->device == 0x67B1))) {
+ if ((memory_clock > 100000) && (memory_clock <= 125000)) {
+ tmp2 = (((0x31 * engine_clock) / 125000) - 1) & 0xff;
+ *dram_timimg2 &= ~0x00ff0000;
+ *dram_timimg2 |= tmp2 << 16;
+ } else if ((memory_clock > 125000) && (memory_clock <= 137500)) {
+ tmp2 = (((0x36 * engine_clock) / 137500) - 1) & 0xff;
+ *dram_timimg2 &= ~0x00ff0000;
+ *dram_timimg2 |= tmp2 << 16;
+ }
+ }
+}
+
+
static int ci_populate_memory_timing_parameters(struct radeon_device *rdev,
u32 sclk,
u32 mclk,
@@ -2057,6 +2469,8 @@ static int ci_populate_memory_timing_parameters(struct radeon_device *rdev,
dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
burst_time = RREG32(MC_ARB_BURST_TIME) & STATE0_MASK;
+ ci_register_patching_mc_arb(rdev, sclk, mclk, &dram_timing2);
+
arb_regs->McArbDramTiming = cpu_to_be32(dram_timing);
arb_regs->McArbDramTiming2 = cpu_to_be32(dram_timing2);
arb_regs->McArbBurstTime = (u8)burst_time;
@@ -2351,10 +2765,10 @@ static int ci_calculate_mclk_params(struct radeon_device *rdev,
u32 tmp;
u32 reference_clock = rdev->clock.mpll.reference_freq;
- if (pi->mem_gddr5)
- freq_nom = memory_clock * 4;
+ if (mpll_param.qdr == 1)
+ freq_nom = memory_clock * 4 * (1 << mpll_param.post_div);
else
- freq_nom = memory_clock * 2;
+ freq_nom = memory_clock * 2 * (1 << mpll_param.post_div);
tmp = (freq_nom / reference_clock);
tmp = tmp * tmp;
@@ -2434,7 +2848,6 @@ static int ci_populate_single_memory_level(struct radeon_device *rdev,
&memory_level->MinVddcPhases);
memory_level->EnabledForThrottle = 1;
- memory_level->EnabledForActivity = 1;
memory_level->UpH = 0;
memory_level->DownH = 100;
memory_level->VoltageDownH = 0;
@@ -2767,7 +3180,6 @@ static int ci_populate_single_graphic_level(struct radeon_device *rdev,
graphic_level->CcPwrDynRm = 0;
graphic_level->CcPwrDynRm1 = 0;
- graphic_level->EnabledForActivity = 1;
graphic_level->EnabledForThrottle = 1;
graphic_level->UpH = 0;
graphic_level->DownH = 0;
@@ -2816,10 +3228,13 @@ static int ci_populate_all_graphic_levels(struct radeon_device *rdev)
&pi->smc_state_table.GraphicsLevel[i]);
if (ret)
return ret;
+ if (i > 1)
+ pi->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
if (i == (dpm_table->sclk_table.count - 1))
pi->smc_state_table.GraphicsLevel[i].DisplayWatermark =
PPSMC_DISPLAY_WATERMARK_HIGH;
}
+ pi->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
pi->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count;
pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
@@ -2863,6 +3278,16 @@ static int ci_populate_all_memory_levels(struct radeon_device *rdev)
return ret;
}
+ pi->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
+
+ if ((dpm_table->mclk_table.count >= 2) &&
+ ((rdev->pdev->device == 0x67B0) || (rdev->pdev->device == 0x67B1))) {
+ pi->smc_state_table.MemoryLevel[1].MinVddc =
+ pi->smc_state_table.MemoryLevel[0].MinVddc;
+ pi->smc_state_table.MemoryLevel[1].MinVddcPhases =
+ pi->smc_state_table.MemoryLevel[0].MinVddcPhases;
+ }
+
pi->smc_state_table.MemoryLevel[0].ActivityLevel = cpu_to_be16(0x1F);
pi->smc_state_table.MemoryDpmLevelCount = (u8)dpm_table->mclk_table.count;
@@ -2919,9 +3344,14 @@ static int ci_setup_default_pcie_tables(struct radeon_device *rdev)
&pi->dpm_table.pcie_speed_table,
SMU7_MAX_LEVELS_LINK);
- ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
- pi->pcie_gen_powersaving.min,
- pi->pcie_lane_powersaving.min);
+ if (rdev->family == CHIP_BONAIRE)
+ ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
+ pi->pcie_gen_powersaving.min,
+ pi->pcie_lane_powersaving.max);
+ else
+ ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
+ pi->pcie_gen_powersaving.min,
+ pi->pcie_lane_powersaving.min);
ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 1,
pi->pcie_gen_performance.min,
pi->pcie_lane_performance.min);
@@ -2988,19 +3418,21 @@ static int ci_setup_default_dpm_tables(struct radeon_device *rdev)
allowed_sclk_vddc_table->entries[i].clk)) {
pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].value =
allowed_sclk_vddc_table->entries[i].clk;
- pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].enabled = true;
+ pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].enabled =
+ (i == 0) ? true : false;
pi->dpm_table.sclk_table.count++;
}
}
pi->dpm_table.mclk_table.count = 0;
for (i = 0; i < allowed_mclk_table->count; i++) {
- if ((i==0) ||
+ if ((i == 0) ||
(pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count-1].value !=
allowed_mclk_table->entries[i].clk)) {
pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].value =
allowed_mclk_table->entries[i].clk;
- pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].enabled = true;
+ pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].enabled =
+ (i == 0) ? true : false;
pi->dpm_table.mclk_table.count++;
}
}
@@ -3166,7 +3598,7 @@ static int ci_init_smc_table(struct radeon_device *rdev)
table->VddcVddciDelta = 4000;
table->PhaseResponseTime = 0;
table->MemoryThermThrottleEnable = 1;
- table->PCIeBootLinkLevel = 0;
+ table->PCIeBootLinkLevel = pi->dpm_table.pcie_speed_table.count - 1;
table->PCIeGenInterval = 1;
if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2)
table->SVI2Enable = 1;
@@ -3320,6 +3752,8 @@ static int ci_upload_dpm_level_enable_mask(struct radeon_device *rdev)
struct ci_power_info *pi = ci_get_pi(rdev);
PPSMC_Result result;
+ ci_apply_disp_minimum_voltage_request(rdev);
+
if (!pi->sclk_dpm_key_disabled) {
if (pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
result = ci_send_msg_to_smc_with_parameter(rdev,
@@ -3339,7 +3773,7 @@ static int ci_upload_dpm_level_enable_mask(struct radeon_device *rdev)
return -EINVAL;
}
}
-
+#if 0
if (!pi->pcie_dpm_key_disabled) {
if (pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
result = ci_send_msg_to_smc_with_parameter(rdev,
@@ -3349,9 +3783,7 @@ static int ci_upload_dpm_level_enable_mask(struct radeon_device *rdev)
return -EINVAL;
}
}
-
- ci_apply_disp_minimum_voltage_request(rdev);
-
+#endif
return 0;
}
@@ -3377,7 +3809,7 @@ static void ci_find_dpm_states_clocks_in_dpm_table(struct radeon_device *rdev,
pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
} else {
/* XXX check display min clock requirements */
- if (0 != CISLAND_MINIMUM_ENGINE_CLOCK)
+ if (CISLAND_MINIMUM_ENGINE_CLOCK != CISLAND_MINIMUM_ENGINE_CLOCK)
pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
}
@@ -3707,62 +4139,61 @@ int ci_dpm_force_performance_level(struct radeon_device *rdev,
enum radeon_dpm_forced_level level)
{
struct ci_power_info *pi = ci_get_pi(rdev);
- PPSMC_Result smc_result;
u32 tmp, levels, i;
int ret;
if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
- if ((!pi->sclk_dpm_key_disabled) &&
- pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
+ if ((!pi->pcie_dpm_key_disabled) &&
+ pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
levels = 0;
- tmp = pi->dpm_level_enable_mask.sclk_dpm_enable_mask;
+ tmp = pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
while (tmp >>= 1)
levels++;
if (levels) {
- ret = ci_dpm_force_state_sclk(rdev, levels);
+ ret = ci_dpm_force_state_pcie(rdev, level);
if (ret)
return ret;
for (i = 0; i < rdev->usec_timeout; i++) {
- tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
- CURR_SCLK_INDEX_MASK) >> CURR_SCLK_INDEX_SHIFT;
+ tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) &
+ CURR_PCIE_INDEX_MASK) >> CURR_PCIE_INDEX_SHIFT;
if (tmp == levels)
break;
udelay(1);
}
}
}
- if ((!pi->mclk_dpm_key_disabled) &&
- pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
+ if ((!pi->sclk_dpm_key_disabled) &&
+ pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
levels = 0;
- tmp = pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
+ tmp = pi->dpm_level_enable_mask.sclk_dpm_enable_mask;
while (tmp >>= 1)
levels++;
if (levels) {
- ret = ci_dpm_force_state_mclk(rdev, levels);
+ ret = ci_dpm_force_state_sclk(rdev, levels);
if (ret)
return ret;
for (i = 0; i < rdev->usec_timeout; i++) {
tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
- CURR_MCLK_INDEX_MASK) >> CURR_MCLK_INDEX_SHIFT;
+ CURR_SCLK_INDEX_MASK) >> CURR_SCLK_INDEX_SHIFT;
if (tmp == levels)
break;
udelay(1);
}
}
}
- if ((!pi->pcie_dpm_key_disabled) &&
- pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
+ if ((!pi->mclk_dpm_key_disabled) &&
+ pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
levels = 0;
- tmp = pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
+ tmp = pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
while (tmp >>= 1)
levels++;
if (levels) {
- ret = ci_dpm_force_state_pcie(rdev, level);
+ ret = ci_dpm_force_state_mclk(rdev, levels);
if (ret)
return ret;
for (i = 0; i < rdev->usec_timeout; i++) {
- tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) &
- CURR_PCIE_INDEX_MASK) >> CURR_PCIE_INDEX_SHIFT;
+ tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
+ CURR_MCLK_INDEX_MASK) >> CURR_MCLK_INDEX_SHIFT;
if (tmp == levels)
break;
udelay(1);
@@ -3816,21 +4247,17 @@ int ci_dpm_force_performance_level(struct radeon_device *rdev,
}
}
} else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) {
- if (!pi->sclk_dpm_key_disabled) {
- smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_NoForcedLevel);
- if (smc_result != PPSMC_Result_OK)
- return -EINVAL;
- }
- if (!pi->mclk_dpm_key_disabled) {
- smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_NoForcedLevel);
- if (smc_result != PPSMC_Result_OK)
- return -EINVAL;
- }
if (!pi->pcie_dpm_key_disabled) {
- smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_UnForceLevel);
+ PPSMC_Result smc_result;
+
+ smc_result = ci_send_msg_to_smc(rdev,
+ PPSMC_MSG_PCIeDPM_UnForceLevel);
if (smc_result != PPSMC_Result_OK)
return -EINVAL;
}
+ ret = ci_upload_dpm_level_enable_mask(rdev);
+ if (ret)
+ return ret;
}
rdev->pm.dpm.forced_level = level;
@@ -4036,6 +4463,96 @@ static int ci_copy_vbios_mc_reg_table(const struct atom_mc_reg_table *table,
return 0;
}
+static int ci_register_patching_mc_seq(struct radeon_device *rdev,
+ struct ci_mc_reg_table *table)
+{
+ u8 i, k;
+ u32 tmp;
+ bool patch;
+
+ tmp = RREG32(MC_SEQ_MISC0);
+ patch = ((tmp & 0x0000f00) == 0x300) ? true : false;
+
+ if (patch &&
+ ((rdev->pdev->device == 0x67B0) ||
+ (rdev->pdev->device == 0x67B1))) {
+ for (i = 0; i < table->last; i++) {
+ if (table->last >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
+ return -EINVAL;
+ switch(table->mc_reg_address[i].s1 >> 2) {
+ case MC_SEQ_MISC1:
+ for (k = 0; k < table->num_entries; k++) {
+ if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
+ (table->mc_reg_table_entry[k].mclk_max == 137500))
+ table->mc_reg_table_entry[k].mc_data[i] =
+ (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFF8) |
+ 0x00000007;
+ }
+ break;
+ case MC_SEQ_WR_CTL_D0:
+ for (k = 0; k < table->num_entries; k++) {
+ if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
+ (table->mc_reg_table_entry[k].mclk_max == 137500))
+ table->mc_reg_table_entry[k].mc_data[i] =
+ (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) |
+ 0x0000D0DD;
+ }
+ break;
+ case MC_SEQ_WR_CTL_D1:
+ for (k = 0; k < table->num_entries; k++) {
+ if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
+ (table->mc_reg_table_entry[k].mclk_max == 137500))
+ table->mc_reg_table_entry[k].mc_data[i] =
+ (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) |
+ 0x0000D0DD;
+ }
+ break;
+ case MC_SEQ_WR_CTL_2:
+ for (k = 0; k < table->num_entries; k++) {
+ if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
+ (table->mc_reg_table_entry[k].mclk_max == 137500))
+ table->mc_reg_table_entry[k].mc_data[i] = 0;
+ }
+ break;
+ case MC_SEQ_CAS_TIMING:
+ for (k = 0; k < table->num_entries; k++) {
+ if (table->mc_reg_table_entry[k].mclk_max == 125000)
+ table->mc_reg_table_entry[k].mc_data[i] =
+ (table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) |
+ 0x000C0140;
+ else if (table->mc_reg_table_entry[k].mclk_max == 137500)
+ table->mc_reg_table_entry[k].mc_data[i] =
+ (table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) |
+ 0x000C0150;
+ }
+ break;
+ case MC_SEQ_MISC_TIMING:
+ for (k = 0; k < table->num_entries; k++) {
+ if (table->mc_reg_table_entry[k].mclk_max == 125000)
+ table->mc_reg_table_entry[k].mc_data[i] =
+ (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) |
+ 0x00000030;
+ else if (table->mc_reg_table_entry[k].mclk_max == 137500)
+ table->mc_reg_table_entry[k].mc_data[i] =
+ (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) |
+ 0x00000035;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ WREG32(MC_SEQ_IO_DEBUG_INDEX, 3);
+ tmp = RREG32(MC_SEQ_IO_DEBUG_DATA);
+ tmp = (tmp & 0xFFF8FFFF) | (1 << 16);
+ WREG32(MC_SEQ_IO_DEBUG_INDEX, 3);
+ WREG32(MC_SEQ_IO_DEBUG_DATA, tmp);
+ }
+
+ return 0;
+}
+
static int ci_initialize_mc_reg_table(struct radeon_device *rdev)
{
struct ci_power_info *pi = ci_get_pi(rdev);
@@ -4079,6 +4596,10 @@ static int ci_initialize_mc_reg_table(struct radeon_device *rdev)
ci_set_s0_mc_reg_index(ci_table);
+ ret = ci_register_patching_mc_seq(rdev, ci_table);
+ if (ret)
+ goto init_mc_done;
+
ret = ci_set_mc_special_registers(rdev, ci_table);
if (ret)
goto init_mc_done;
@@ -4675,36 +5196,51 @@ int ci_dpm_enable(struct radeon_device *rdev)
return ret;
}
+ ret = ci_power_control_set_level(rdev);
+ if (ret) {
+ DRM_ERROR("ci_power_control_set_level failed\n");
+ return ret;
+ }
+
ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
+ ret = ci_enable_thermal_based_sclk_dpm(rdev, true);
+ if (ret) {
+ DRM_ERROR("ci_enable_thermal_based_sclk_dpm failed\n");
+ return ret;
+ }
+
+ ci_thermal_start_thermal_controller(rdev);
+
ci_update_current_ps(rdev, boot_ps);
return 0;
}
-int ci_dpm_late_enable(struct radeon_device *rdev)
+static int ci_set_temperature_range(struct radeon_device *rdev)
{
int ret;
- if (rdev->irq.installed &&
- r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
-#if 0
- PPSMC_Result result;
-#endif
- ret = ci_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
- if (ret) {
- DRM_ERROR("ci_set_thermal_temperature_range failed\n");
- return ret;
- }
- rdev->irq.dpm_thermal = true;
- radeon_irq_set(rdev);
-#if 0
- result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableThermalInterrupt);
+ ret = ci_thermal_enable_alert(rdev, false);
+ if (ret)
+ return ret;
+ ret = ci_thermal_set_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
+ if (ret)
+ return ret;
+ ret = ci_thermal_enable_alert(rdev, true);
+ if (ret)
+ return ret;
- if (result != PPSMC_Result_OK)
- DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
-#endif
- }
+ return ret;
+}
+
+int ci_dpm_late_enable(struct radeon_device *rdev)
+{
+ int ret;
+
+ ret = ci_set_temperature_range(rdev);
+ if (ret)
+ return ret;
ci_dpm_powergate_uvd(rdev, true);
@@ -4721,6 +5257,8 @@ void ci_dpm_disable(struct radeon_device *rdev)
if (!ci_is_smc_running(rdev))
return;
+ ci_thermal_stop_thermal_controller(rdev);
+
if (pi->thermal_protection)
ci_enable_thermal_protection(rdev, false);
ci_enable_power_containment(rdev, false);
@@ -4729,12 +5267,13 @@ void ci_dpm_disable(struct radeon_device *rdev)
ci_enable_spread_spectrum(rdev, false);
ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
ci_stop_dpm(rdev);
- ci_enable_ds_master_switch(rdev, true);
+ ci_enable_ds_master_switch(rdev, false);
ci_enable_ulv(rdev, false);
ci_clear_vc(rdev);
ci_reset_to_default(rdev);
ci_dpm_stop_smc(rdev);
ci_force_switch_to_arb_f0(rdev);
+ ci_enable_thermal_based_sclk_dpm(rdev, false);
ci_update_current_ps(rdev, boot_ps);
}
@@ -4804,11 +5343,6 @@ int ci_dpm_set_power_state(struct radeon_device *rdev)
return 0;
}
-int ci_dpm_power_control_set_level(struct radeon_device *rdev)
-{
- return ci_power_control_set_level(rdev);
-}
-
void ci_dpm_reset_asic(struct radeon_device *rdev)
{
ci_set_boot_state(rdev);
@@ -5068,6 +5602,8 @@ void ci_dpm_fini(struct radeon_device *rdev)
int ci_dpm_init(struct radeon_device *rdev)
{
int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
+ SMU7_Discrete_DpmTable *dpm_table;
+ struct radeon_gpio_rec gpio;
u16 data_offset, size;
u8 frev, crev;
struct ci_power_info *pi;
@@ -5137,6 +5673,7 @@ int ci_dpm_init(struct radeon_device *rdev)
pi->sclk_dpm_key_disabled = 0;
pi->mclk_dpm_key_disabled = 0;
pi->pcie_dpm_key_disabled = 0;
+ pi->thermal_sclk_dpm_enabled = 0;
/* mclk dpm is unstable on some R7 260X cards with the old mc ucode */
if ((rdev->pdev->device == 0x6658) &&
@@ -5201,6 +5738,55 @@ int ci_dpm_init(struct radeon_device *rdev)
pi->uvd_enabled = false;
+ dpm_table = &pi->smc_state_table;
+
+ gpio = radeon_atombios_lookup_gpio(rdev, VDDC_VRHOT_GPIO_PINID);
+ if (gpio.valid) {
+ dpm_table->VRHotGpio = gpio.shift;
+ rdev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_REGULATOR_HOT;
+ } else {
+ dpm_table->VRHotGpio = CISLANDS_UNUSED_GPIO_PIN;
+ rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_REGULATOR_HOT;
+ }
+
+ gpio = radeon_atombios_lookup_gpio(rdev, PP_AC_DC_SWITCH_GPIO_PINID);
+ if (gpio.valid) {
+ dpm_table->AcDcGpio = gpio.shift;
+ rdev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_HARDWAREDC;
+ } else {
+ dpm_table->AcDcGpio = CISLANDS_UNUSED_GPIO_PIN;
+ rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_HARDWAREDC;
+ }
+
+ gpio = radeon_atombios_lookup_gpio(rdev, VDDC_PCC_GPIO_PINID);
+ if (gpio.valid) {
+ u32 tmp = RREG32_SMC(CNB_PWRMGT_CNTL);
+
+ switch (gpio.shift) {
+ case 0:
+ tmp &= ~GNB_SLOW_MODE_MASK;
+ tmp |= GNB_SLOW_MODE(1);
+ break;
+ case 1:
+ tmp &= ~GNB_SLOW_MODE_MASK;
+ tmp |= GNB_SLOW_MODE(2);
+ break;
+ case 2:
+ tmp |= GNB_SLOW;
+ break;
+ case 3:
+ tmp |= FORCE_NB_PS1;
+ break;
+ case 4:
+ tmp |= DPM_ENABLED;
+ break;
+ default:
+ DRM_ERROR("Invalid PCC GPIO: %u!\n", gpio.shift);
+ break;
+ }
+ WREG32_SMC(CNB_PWRMGT_CNTL, tmp);
+ }
+
pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_NONE;
pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_NONE;
pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_NONE;
@@ -5262,6 +5848,8 @@ int ci_dpm_init(struct radeon_device *rdev)
rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+ pi->fan_ctrl_is_in_default_mode = true;
+
return 0;
}
diff --git a/drivers/gpu/drm/radeon/ci_dpm.h b/drivers/gpu/drm/radeon/ci_dpm.h
index 93bbed977ff..84e3d3bcf9f 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.h
+++ b/drivers/gpu/drm/radeon/ci_dpm.h
@@ -33,6 +33,8 @@
#define CISLANDS_MAX_HARDWARE_POWERLEVELS 2
+#define CISLANDS_UNUSED_GPIO_PIN 0x7F
+
struct ci_pl {
u32 mclk;
u32 sclk;
@@ -237,6 +239,7 @@ struct ci_power_info {
u32 sclk_dpm_key_disabled;
u32 mclk_dpm_key_disabled;
u32 pcie_dpm_key_disabled;
+ u32 thermal_sclk_dpm_enabled;
struct ci_pcie_perf_range pcie_gen_performance;
struct ci_pcie_perf_range pcie_lane_performance;
struct ci_pcie_perf_range pcie_gen_powersaving;
@@ -264,6 +267,7 @@ struct ci_power_info {
bool caps_automatic_dc_transition;
bool caps_sclk_throttle_low_notification;
bool caps_dynamic_ac_timing;
+ bool caps_od_fuzzy_fan_control_support;
/* flags */
bool thermal_protection;
bool pcie_performance_request;
@@ -285,6 +289,10 @@ struct ci_power_info {
struct ci_ps current_ps;
struct radeon_ps requested_rps;
struct ci_ps requested_ps;
+ /* fan control */
+ bool fan_ctrl_is_in_default_mode;
+ u32 t_min;
+ u32 fan_ctrl_default_mode;
};
#define CISLANDS_VOLTAGE_CONTROL_NONE 0x0
diff --git a/drivers/gpu/drm/radeon/ci_smc.c b/drivers/gpu/drm/radeon/ci_smc.c
index b630edc2fd0..e78bcad7a43 100644
--- a/drivers/gpu/drm/radeon/ci_smc.c
+++ b/drivers/gpu/drm/radeon/ci_smc.c
@@ -129,7 +129,7 @@ void ci_reset_smc(struct radeon_device *rdev)
int ci_program_jump_on_start(struct radeon_device *rdev)
{
- static u8 data[] = { 0xE0, 0x00, 0x80, 0x40 };
+ static const u8 data[] = { 0xE0, 0x00, 0x80, 0x40 };
return ci_copy_bytes_to_smc(rdev, 0x0, data, 4, sizeof(data)+1);
}
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 89c01fa6dd8..6dcde3798b4 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -32,6 +32,7 @@
#include "cik_blit_shaders.h"
#include "radeon_ucode.h"
#include "clearstate_ci.h"
+#include "radeon_kfd.h"
MODULE_FIRMWARE("radeon/BONAIRE_pfp.bin");
MODULE_FIRMWARE("radeon/BONAIRE_me.bin");
@@ -1563,6 +1564,8 @@ static const u32 godavari_golden_registers[] =
static void cik_init_golden_registers(struct radeon_device *rdev)
{
+ /* Some of the registers might be dependent on GRBM_GFX_INDEX */
+ mutex_lock(&rdev->grbm_idx_mutex);
switch (rdev->family) {
case CHIP_BONAIRE:
radeon_program_register_sequence(rdev,
@@ -1637,6 +1640,7 @@ static void cik_init_golden_registers(struct radeon_device *rdev)
default:
break;
}
+ mutex_unlock(&rdev->grbm_idx_mutex);
}
/**
@@ -1806,7 +1810,7 @@ int ci_mc_load_microcode(struct radeon_device *rdev)
{
const __be32 *fw_data = NULL;
const __le32 *new_fw_data = NULL;
- u32 running, blackout = 0;
+ u32 running, blackout = 0, tmp;
u32 *io_mc_regs = NULL;
const __le32 *new_io_mc_regs = NULL;
int i, regs_size, ucode_size;
@@ -1866,6 +1870,15 @@ int ci_mc_load_microcode(struct radeon_device *rdev)
WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]);
}
}
+
+ tmp = RREG32(MC_SEQ_MISC0);
+ if ((rdev->pdev->device == 0x6649) && ((tmp & 0xff00) == 0x5600)) {
+ WREG32(MC_SEQ_IO_DEBUG_INDEX, 5);
+ WREG32(MC_SEQ_IO_DEBUG_DATA, 0x00000023);
+ WREG32(MC_SEQ_IO_DEBUG_INDEX, 9);
+ WREG32(MC_SEQ_IO_DEBUG_DATA, 0x000001f0);
+ }
+
/* load the MC ucode */
for (i = 0; i < ucode_size; i++) {
if (rdev->new_fw)
@@ -3419,6 +3432,7 @@ static void cik_setup_rb(struct radeon_device *rdev,
u32 disabled_rbs = 0;
u32 enabled_rbs = 0;
+ mutex_lock(&rdev->grbm_idx_mutex);
for (i = 0; i < se_num; i++) {
for (j = 0; j < sh_per_se; j++) {
cik_select_se_sh(rdev, i, j);
@@ -3430,6 +3444,7 @@ static void cik_setup_rb(struct radeon_device *rdev,
}
}
cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
+ mutex_unlock(&rdev->grbm_idx_mutex);
mask = 1;
for (i = 0; i < max_rb_num_per_se * se_num; i++) {
@@ -3440,6 +3455,7 @@ static void cik_setup_rb(struct radeon_device *rdev,
rdev->config.cik.backend_enable_mask = enabled_rbs;
+ mutex_lock(&rdev->grbm_idx_mutex);
for (i = 0; i < se_num; i++) {
cik_select_se_sh(rdev, i, 0xffffffff);
data = 0;
@@ -3467,6 +3483,7 @@ static void cik_setup_rb(struct radeon_device *rdev,
WREG32(PA_SC_RASTER_CONFIG, data);
}
cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
+ mutex_unlock(&rdev->grbm_idx_mutex);
}
/**
@@ -3684,6 +3701,12 @@ static void cik_gpu_init(struct radeon_device *rdev)
/* set HW defaults for 3D engine */
WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60));
+ mutex_lock(&rdev->grbm_idx_mutex);
+ /*
+ * making sure that the following register writes will be broadcasted
+ * to all the shaders
+ */
+ cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
WREG32(SX_DEBUG_1, 0x20);
WREG32(TA_CNTL_AUX, 0x00010000);
@@ -3739,6 +3762,7 @@ static void cik_gpu_init(struct radeon_device *rdev)
WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
WREG32(PA_SC_ENHANCE, ENABLE_PA_SC_OUT_OF_ORDER);
+ mutex_unlock(&rdev->grbm_idx_mutex);
udelay(50);
}
@@ -3970,31 +3994,27 @@ struct radeon_fence *cik_copy_cpdma(struct radeon_device *rdev,
unsigned num_gpu_pages,
struct reservation_object *resv)
{
- struct radeon_semaphore *sem = NULL;
struct radeon_fence *fence;
+ struct radeon_sync sync;
int ring_index = rdev->asic->copy.blit_ring_index;
struct radeon_ring *ring = &rdev->ring[ring_index];
u32 size_in_bytes, cur_size_in_bytes, control;
int i, num_loops;
int r = 0;
- r = radeon_semaphore_create(rdev, &sem);
- if (r) {
- DRM_ERROR("radeon: moving bo (%d).\n", r);
- return ERR_PTR(r);
- }
+ radeon_sync_create(&sync);
size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff);
r = radeon_ring_lock(rdev, ring, num_loops * 7 + 18);
if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r);
- radeon_semaphore_free(rdev, &sem, NULL);
+ radeon_sync_free(rdev, &sync, NULL);
return ERR_PTR(r);
}
- radeon_semaphore_sync_resv(rdev, sem, resv, false);
- radeon_semaphore_sync_rings(rdev, sem, ring->idx);
+ radeon_sync_resv(rdev, &sync, resv, false);
+ radeon_sync_rings(rdev, &sync, ring->idx);
for (i = 0; i < num_loops; i++) {
cur_size_in_bytes = size_in_bytes;
@@ -4018,12 +4038,12 @@ struct radeon_fence *cik_copy_cpdma(struct radeon_device *rdev,
r = radeon_fence_emit(rdev, &fence, ring->idx);
if (r) {
radeon_ring_unlock_undo(rdev, ring);
- radeon_semaphore_free(rdev, &sem, NULL);
+ radeon_sync_free(rdev, &sync, NULL);
return ERR_PTR(r);
}
radeon_ring_unlock_commit(rdev, ring, false);
- radeon_semaphore_free(rdev, &sem, fence);
+ radeon_sync_free(rdev, &sync, fence);
return fence;
}
@@ -4046,6 +4066,7 @@ struct radeon_fence *cik_copy_cpdma(struct radeon_device *rdev,
void cik_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
{
struct radeon_ring *ring = &rdev->ring[ib->ring];
+ unsigned vm_id = ib->vm ? ib->vm->ids[ib->ring].id : 0;
u32 header, control = INDIRECT_BUFFER_VALID;
if (ib->is_const_ib) {
@@ -4074,8 +4095,7 @@ void cik_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
}
- control |= ib->length_dw |
- (ib->vm ? (ib->vm->id << 24) : 0);
+ control |= ib->length_dw | (vm_id << 24);
radeon_ring_write(ring, header);
radeon_ring_write(ring,
@@ -4675,12 +4695,11 @@ static int cik_mec_init(struct radeon_device *rdev)
/*
* KV: 2 MEC, 4 Pipes/MEC, 8 Queues/Pipe - 64 Queues total
* CI/KB: 1 MEC, 4 Pipes/MEC, 8 Queues/Pipe - 32 Queues total
+ * Nonetheless, we assign only 1 pipe because all other pipes will
+ * be handled by KFD
*/
- if (rdev->family == CHIP_KAVERI)
- rdev->mec.num_mec = 2;
- else
- rdev->mec.num_mec = 1;
- rdev->mec.num_pipe = 4;
+ rdev->mec.num_mec = 1;
+ rdev->mec.num_pipe = 1;
rdev->mec.num_queue = rdev->mec.num_mec * rdev->mec.num_pipe * 8;
if (rdev->mec.hpd_eop_obj == NULL) {
@@ -4822,28 +4841,24 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
/* init the pipes */
mutex_lock(&rdev->srbm_mutex);
- for (i = 0; i < (rdev->mec.num_pipe * rdev->mec.num_mec); i++) {
- int me = (i < 4) ? 1 : 2;
- int pipe = (i < 4) ? i : (i - 4);
- eop_gpu_addr = rdev->mec.hpd_eop_gpu_addr + (i * MEC_HPD_SIZE * 2);
+ eop_gpu_addr = rdev->mec.hpd_eop_gpu_addr;
- cik_srbm_select(rdev, me, pipe, 0, 0);
+ cik_srbm_select(rdev, 0, 0, 0, 0);
- /* write the EOP addr */
- WREG32(CP_HPD_EOP_BASE_ADDR, eop_gpu_addr >> 8);
- WREG32(CP_HPD_EOP_BASE_ADDR_HI, upper_32_bits(eop_gpu_addr) >> 8);
+ /* write the EOP addr */
+ WREG32(CP_HPD_EOP_BASE_ADDR, eop_gpu_addr >> 8);
+ WREG32(CP_HPD_EOP_BASE_ADDR_HI, upper_32_bits(eop_gpu_addr) >> 8);
- /* set the VMID assigned */
- WREG32(CP_HPD_EOP_VMID, 0);
+ /* set the VMID assigned */
+ WREG32(CP_HPD_EOP_VMID, 0);
+
+ /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
+ tmp = RREG32(CP_HPD_EOP_CONTROL);
+ tmp &= ~EOP_SIZE_MASK;
+ tmp |= order_base_2(MEC_HPD_SIZE / 8);
+ WREG32(CP_HPD_EOP_CONTROL, tmp);
- /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
- tmp = RREG32(CP_HPD_EOP_CONTROL);
- tmp &= ~EOP_SIZE_MASK;
- tmp |= order_base_2(MEC_HPD_SIZE / 8);
- WREG32(CP_HPD_EOP_CONTROL, tmp);
- }
- cik_srbm_select(rdev, 0, 0, 0, 0);
mutex_unlock(&rdev->srbm_mutex);
/* init the queues. Just two for now. */
@@ -5897,8 +5912,13 @@ int cik_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
*/
int cik_vm_init(struct radeon_device *rdev)
{
- /* number of VMs */
- rdev->vm_manager.nvm = 16;
+ /*
+ * number of VMs
+ * VMID 0 is reserved for System
+ * radeon graphics/compute will use VMIDs 1-7
+ * amdkfd will use VMIDs 8-15
+ */
+ rdev->vm_manager.nvm = RADEON_NUM_OF_VMIDS;
/* base offset of vram pages */
if (rdev->flags & RADEON_IS_IGP) {
u64 tmp = RREG32(MC_VM_FB_OFFSET);
@@ -5958,26 +5978,23 @@ static void cik_vm_decode_fault(struct radeon_device *rdev,
* Update the page table base and flush the VM TLB
* using the CP (CIK).
*/
-void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
+void cik_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
+ unsigned vm_id, uint64_t pd_addr)
{
- struct radeon_ring *ring = &rdev->ring[ridx];
- int usepfp = (ridx == RADEON_RING_TYPE_GFX_INDEX);
-
- if (vm == NULL)
- return;
+ int usepfp = (ring->idx == RADEON_RING_TYPE_GFX_INDEX);
radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
WRITE_DATA_DST_SEL(0)));
- if (vm->id < 8) {
+ if (vm_id < 8) {
radeon_ring_write(ring,
- (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2);
+ (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2)) >> 2);
} else {
radeon_ring_write(ring,
- (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2);
+ (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm_id - 8) << 2)) >> 2);
}
radeon_ring_write(ring, 0);
- radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
+ radeon_ring_write(ring, pd_addr >> 12);
/* update SH_MEM_* regs */
radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
@@ -5985,7 +6002,7 @@ void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
WRITE_DATA_DST_SEL(0)));
radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
radeon_ring_write(ring, 0);
- radeon_ring_write(ring, VMID(vm->id));
+ radeon_ring_write(ring, VMID(vm_id));
radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 6));
radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
@@ -6006,7 +6023,7 @@ void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
radeon_ring_write(ring, VMID(0));
/* HDP flush */
- cik_hdp_flush_cp_ring_emit(rdev, ridx);
+ cik_hdp_flush_cp_ring_emit(rdev, ring->idx);
/* bits 0-15 are the VM contexts0-15 */
radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
@@ -6014,7 +6031,7 @@ void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
WRITE_DATA_DST_SEL(0)));
radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
radeon_ring_write(ring, 0);
- radeon_ring_write(ring, 1 << vm->id);
+ radeon_ring_write(ring, 1 << vm_id);
/* compute doesn't have PFP */
if (usepfp) {
@@ -6059,6 +6076,7 @@ static void cik_wait_for_rlc_serdes(struct radeon_device *rdev)
u32 i, j, k;
u32 mask;
+ mutex_lock(&rdev->grbm_idx_mutex);
for (i = 0; i < rdev->config.cik.max_shader_engines; i++) {
for (j = 0; j < rdev->config.cik.max_sh_per_se; j++) {
cik_select_se_sh(rdev, i, j);
@@ -6070,6 +6088,7 @@ static void cik_wait_for_rlc_serdes(struct radeon_device *rdev)
}
}
cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
+ mutex_unlock(&rdev->grbm_idx_mutex);
mask = SE_MASTER_BUSY_MASK | GC_MASTER_BUSY | TC0_MASTER_BUSY | TC1_MASTER_BUSY;
for (k = 0; k < rdev->usec_timeout; k++) {
@@ -6204,10 +6223,12 @@ static int cik_rlc_resume(struct radeon_device *rdev)
WREG32(RLC_LB_CNTR_INIT, 0);
WREG32(RLC_LB_CNTR_MAX, 0x00008000);
+ mutex_lock(&rdev->grbm_idx_mutex);
cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
WREG32(RLC_LB_INIT_CU_MASK, 0xffffffff);
WREG32(RLC_LB_PARAMS, 0x00600408);
WREG32(RLC_LB_CNTL, 0x80000004);
+ mutex_unlock(&rdev->grbm_idx_mutex);
WREG32(RLC_MC_CNTL, 0);
WREG32(RLC_UCODE_CNTL, 0);
@@ -6274,11 +6295,13 @@ static void cik_enable_cgcg(struct radeon_device *rdev, bool enable)
tmp = cik_halt_rlc(rdev);
+ mutex_lock(&rdev->grbm_idx_mutex);
cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
WREG32(RLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
WREG32(RLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
tmp2 = BPM_ADDR_MASK | CGCG_OVERRIDE_0 | CGLS_ENABLE;
WREG32(RLC_SERDES_WR_CTRL, tmp2);
+ mutex_unlock(&rdev->grbm_idx_mutex);
cik_update_rlc(rdev, tmp);
@@ -6314,17 +6337,20 @@ static void cik_enable_mgcg(struct radeon_device *rdev, bool enable)
}
orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
+ data |= 0x00000001;
data &= 0xfffffffd;
if (orig != data)
WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
tmp = cik_halt_rlc(rdev);
+ mutex_lock(&rdev->grbm_idx_mutex);
cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
WREG32(RLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
WREG32(RLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
data = BPM_ADDR_MASK | MGCG_OVERRIDE_0;
WREG32(RLC_SERDES_WR_CTRL, data);
+ mutex_unlock(&rdev->grbm_idx_mutex);
cik_update_rlc(rdev, tmp);
@@ -6345,7 +6371,7 @@ static void cik_enable_mgcg(struct radeon_device *rdev, bool enable)
}
} else {
orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
- data |= 0x00000002;
+ data |= 0x00000003;
if (orig != data)
WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
@@ -6368,11 +6394,13 @@ static void cik_enable_mgcg(struct radeon_device *rdev, bool enable)
tmp = cik_halt_rlc(rdev);
+ mutex_lock(&rdev->grbm_idx_mutex);
cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
WREG32(RLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
WREG32(RLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
data = BPM_ADDR_MASK | MGCG_OVERRIDE_1;
WREG32(RLC_SERDES_WR_CTRL, data);
+ mutex_unlock(&rdev->grbm_idx_mutex);
cik_update_rlc(rdev, tmp);
}
@@ -6801,10 +6829,12 @@ static u32 cik_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh)
u32 mask = 0, tmp, tmp1;
int i;
+ mutex_lock(&rdev->grbm_idx_mutex);
cik_select_se_sh(rdev, se, sh);
tmp = RREG32(CC_GC_SHADER_ARRAY_CONFIG);
tmp1 = RREG32(GC_USER_SHADER_ARRAY_CONFIG);
cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
+ mutex_unlock(&rdev->grbm_idx_mutex);
tmp &= 0xffff0000;
@@ -7288,8 +7318,7 @@ static int cik_irq_init(struct radeon_device *rdev)
int cik_irq_set(struct radeon_device *rdev)
{
u32 cp_int_cntl;
- u32 cp_m1p0, cp_m1p1, cp_m1p2, cp_m1p3;
- u32 cp_m2p0, cp_m2p1, cp_m2p2, cp_m2p3;
+ u32 cp_m1p0;
u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
u32 grbm_int_cntl = 0;
@@ -7323,13 +7352,6 @@ int cik_irq_set(struct radeon_device *rdev)
dma_cntl1 = RREG32(SDMA0_CNTL + SDMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
cp_m1p0 = RREG32(CP_ME1_PIPE0_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
- cp_m1p1 = RREG32(CP_ME1_PIPE1_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
- cp_m1p2 = RREG32(CP_ME1_PIPE2_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
- cp_m1p3 = RREG32(CP_ME1_PIPE3_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
- cp_m2p0 = RREG32(CP_ME2_PIPE0_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
- cp_m2p1 = RREG32(CP_ME2_PIPE1_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
- cp_m2p2 = RREG32(CP_ME2_PIPE2_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
- cp_m2p3 = RREG32(CP_ME2_PIPE3_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
if (rdev->flags & RADEON_IS_IGP)
thermal_int = RREG32_SMC(CG_THERMAL_INT_CTRL) &
@@ -7351,33 +7373,6 @@ int cik_irq_set(struct radeon_device *rdev)
case 0:
cp_m1p0 |= TIME_STAMP_INT_ENABLE;
break;
- case 1:
- cp_m1p1 |= TIME_STAMP_INT_ENABLE;
- break;
- case 2:
- cp_m1p2 |= TIME_STAMP_INT_ENABLE;
- break;
- case 3:
- cp_m1p2 |= TIME_STAMP_INT_ENABLE;
- break;
- default:
- DRM_DEBUG("si_irq_set: sw int cp1 invalid pipe %d\n", ring->pipe);
- break;
- }
- } else if (ring->me == 2) {
- switch (ring->pipe) {
- case 0:
- cp_m2p0 |= TIME_STAMP_INT_ENABLE;
- break;
- case 1:
- cp_m2p1 |= TIME_STAMP_INT_ENABLE;
- break;
- case 2:
- cp_m2p2 |= TIME_STAMP_INT_ENABLE;
- break;
- case 3:
- cp_m2p2 |= TIME_STAMP_INT_ENABLE;
- break;
default:
DRM_DEBUG("si_irq_set: sw int cp1 invalid pipe %d\n", ring->pipe);
break;
@@ -7394,33 +7389,6 @@ int cik_irq_set(struct radeon_device *rdev)
case 0:
cp_m1p0 |= TIME_STAMP_INT_ENABLE;
break;
- case 1:
- cp_m1p1 |= TIME_STAMP_INT_ENABLE;
- break;
- case 2:
- cp_m1p2 |= TIME_STAMP_INT_ENABLE;
- break;
- case 3:
- cp_m1p2 |= TIME_STAMP_INT_ENABLE;
- break;
- default:
- DRM_DEBUG("si_irq_set: sw int cp2 invalid pipe %d\n", ring->pipe);
- break;
- }
- } else if (ring->me == 2) {
- switch (ring->pipe) {
- case 0:
- cp_m2p0 |= TIME_STAMP_INT_ENABLE;
- break;
- case 1:
- cp_m2p1 |= TIME_STAMP_INT_ENABLE;
- break;
- case 2:
- cp_m2p2 |= TIME_STAMP_INT_ENABLE;
- break;
- case 3:
- cp_m2p2 |= TIME_STAMP_INT_ENABLE;
- break;
default:
DRM_DEBUG("si_irq_set: sw int cp2 invalid pipe %d\n", ring->pipe);
break;
@@ -7509,13 +7477,6 @@ int cik_irq_set(struct radeon_device *rdev)
WREG32(SDMA0_CNTL + SDMA1_REGISTER_OFFSET, dma_cntl1);
WREG32(CP_ME1_PIPE0_INT_CNTL, cp_m1p0);
- WREG32(CP_ME1_PIPE1_INT_CNTL, cp_m1p1);
- WREG32(CP_ME1_PIPE2_INT_CNTL, cp_m1p2);
- WREG32(CP_ME1_PIPE3_INT_CNTL, cp_m1p3);
- WREG32(CP_ME2_PIPE0_INT_CNTL, cp_m2p0);
- WREG32(CP_ME2_PIPE1_INT_CNTL, cp_m2p1);
- WREG32(CP_ME2_PIPE2_INT_CNTL, cp_m2p2);
- WREG32(CP_ME2_PIPE3_INT_CNTL, cp_m2p3);
WREG32(GRBM_INT_CNTL, grbm_int_cntl);
@@ -7832,6 +7793,10 @@ restart_ih:
while (rptr != wptr) {
/* wptr/rptr are in bytes! */
ring_index = rptr / 4;
+
+ radeon_kfd_interrupt(rdev,
+ (const void *) &rdev->ih.ring[ring_index]);
+
src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
ring_id = le32_to_cpu(rdev->ih.ring[ring_index + 2]) & 0xff;
@@ -8521,6 +8486,10 @@ static int cik_startup(struct radeon_device *rdev)
if (r)
return r;
+ r = radeon_kfd_resume(rdev);
+ if (r)
+ return r;
+
return 0;
}
@@ -8569,6 +8538,7 @@ int cik_resume(struct radeon_device *rdev)
*/
int cik_suspend(struct radeon_device *rdev)
{
+ radeon_kfd_suspend(rdev);
radeon_pm_suspend(rdev);
dce6_audio_fini(rdev);
radeon_vm_manager_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/cik_reg.h b/drivers/gpu/drm/radeon/cik_reg.h
index ca1bb613358..79c45e8a536 100644
--- a/drivers/gpu/drm/radeon/cik_reg.h
+++ b/drivers/gpu/drm/radeon/cik_reg.h
@@ -147,4 +147,140 @@
#define CIK_LB_DESKTOP_HEIGHT 0x6b0c
+#define CP_HQD_IQ_RPTR 0xC970u
+#define AQL_ENABLE (1U << 0)
+
+#define IDLE (1 << 2)
+
+struct cik_mqd {
+ uint32_t header;
+ uint32_t compute_dispatch_initiator;
+ uint32_t compute_dim_x;
+ uint32_t compute_dim_y;
+ uint32_t compute_dim_z;
+ uint32_t compute_start_x;
+ uint32_t compute_start_y;
+ uint32_t compute_start_z;
+ uint32_t compute_num_thread_x;
+ uint32_t compute_num_thread_y;
+ uint32_t compute_num_thread_z;
+ uint32_t compute_pipelinestat_enable;
+ uint32_t compute_perfcount_enable;
+ uint32_t compute_pgm_lo;
+ uint32_t compute_pgm_hi;
+ uint32_t compute_tba_lo;
+ uint32_t compute_tba_hi;
+ uint32_t compute_tma_lo;
+ uint32_t compute_tma_hi;
+ uint32_t compute_pgm_rsrc1;
+ uint32_t compute_pgm_rsrc2;
+ uint32_t compute_vmid;
+ uint32_t compute_resource_limits;
+ uint32_t compute_static_thread_mgmt_se0;
+ uint32_t compute_static_thread_mgmt_se1;
+ uint32_t compute_tmpring_size;
+ uint32_t compute_static_thread_mgmt_se2;
+ uint32_t compute_static_thread_mgmt_se3;
+ uint32_t compute_restart_x;
+ uint32_t compute_restart_y;
+ uint32_t compute_restart_z;
+ uint32_t compute_thread_trace_enable;
+ uint32_t compute_misc_reserved;
+ uint32_t compute_user_data_0;
+ uint32_t compute_user_data_1;
+ uint32_t compute_user_data_2;
+ uint32_t compute_user_data_3;
+ uint32_t compute_user_data_4;
+ uint32_t compute_user_data_5;
+ uint32_t compute_user_data_6;
+ uint32_t compute_user_data_7;
+ uint32_t compute_user_data_8;
+ uint32_t compute_user_data_9;
+ uint32_t compute_user_data_10;
+ uint32_t compute_user_data_11;
+ uint32_t compute_user_data_12;
+ uint32_t compute_user_data_13;
+ uint32_t compute_user_data_14;
+ uint32_t compute_user_data_15;
+ uint32_t cp_compute_csinvoc_count_lo;
+ uint32_t cp_compute_csinvoc_count_hi;
+ uint32_t cp_mqd_base_addr_lo;
+ uint32_t cp_mqd_base_addr_hi;
+ uint32_t cp_hqd_active;
+ uint32_t cp_hqd_vmid;
+ uint32_t cp_hqd_persistent_state;
+ uint32_t cp_hqd_pipe_priority;
+ uint32_t cp_hqd_queue_priority;
+ uint32_t cp_hqd_quantum;
+ uint32_t cp_hqd_pq_base_lo;
+ uint32_t cp_hqd_pq_base_hi;
+ uint32_t cp_hqd_pq_rptr;
+ uint32_t cp_hqd_pq_rptr_report_addr_lo;
+ uint32_t cp_hqd_pq_rptr_report_addr_hi;
+ uint32_t cp_hqd_pq_wptr_poll_addr_lo;
+ uint32_t cp_hqd_pq_wptr_poll_addr_hi;
+ uint32_t cp_hqd_pq_doorbell_control;
+ uint32_t cp_hqd_pq_wptr;
+ uint32_t cp_hqd_pq_control;
+ uint32_t cp_hqd_ib_base_addr_lo;
+ uint32_t cp_hqd_ib_base_addr_hi;
+ uint32_t cp_hqd_ib_rptr;
+ uint32_t cp_hqd_ib_control;
+ uint32_t cp_hqd_iq_timer;
+ uint32_t cp_hqd_iq_rptr;
+ uint32_t cp_hqd_dequeue_request;
+ uint32_t cp_hqd_dma_offload;
+ uint32_t cp_hqd_sema_cmd;
+ uint32_t cp_hqd_msg_type;
+ uint32_t cp_hqd_atomic0_preop_lo;
+ uint32_t cp_hqd_atomic0_preop_hi;
+ uint32_t cp_hqd_atomic1_preop_lo;
+ uint32_t cp_hqd_atomic1_preop_hi;
+ uint32_t cp_hqd_hq_status0;
+ uint32_t cp_hqd_hq_control0;
+ uint32_t cp_mqd_control;
+ uint32_t cp_mqd_query_time_lo;
+ uint32_t cp_mqd_query_time_hi;
+ uint32_t cp_mqd_connect_start_time_lo;
+ uint32_t cp_mqd_connect_start_time_hi;
+ uint32_t cp_mqd_connect_end_time_lo;
+ uint32_t cp_mqd_connect_end_time_hi;
+ uint32_t cp_mqd_connect_end_wf_count;
+ uint32_t cp_mqd_connect_end_pq_rptr;
+ uint32_t cp_mqd_connect_end_pq_wptr;
+ uint32_t cp_mqd_connect_end_ib_rptr;
+ uint32_t reserved_96;
+ uint32_t reserved_97;
+ uint32_t reserved_98;
+ uint32_t reserved_99;
+ uint32_t iqtimer_pkt_header;
+ uint32_t iqtimer_pkt_dw0;
+ uint32_t iqtimer_pkt_dw1;
+ uint32_t iqtimer_pkt_dw2;
+ uint32_t iqtimer_pkt_dw3;
+ uint32_t iqtimer_pkt_dw4;
+ uint32_t iqtimer_pkt_dw5;
+ uint32_t iqtimer_pkt_dw6;
+ uint32_t reserved_108;
+ uint32_t reserved_109;
+ uint32_t reserved_110;
+ uint32_t reserved_111;
+ uint32_t queue_doorbell_id0;
+ uint32_t queue_doorbell_id1;
+ uint32_t queue_doorbell_id2;
+ uint32_t queue_doorbell_id3;
+ uint32_t queue_doorbell_id4;
+ uint32_t queue_doorbell_id5;
+ uint32_t queue_doorbell_id6;
+ uint32_t queue_doorbell_id7;
+ uint32_t queue_doorbell_id8;
+ uint32_t queue_doorbell_id9;
+ uint32_t queue_doorbell_id10;
+ uint32_t queue_doorbell_id11;
+ uint32_t queue_doorbell_id12;
+ uint32_t queue_doorbell_id13;
+ uint32_t queue_doorbell_id14;
+ uint32_t queue_doorbell_id15;
+};
+
#endif
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
index d748963af08..dde5c7e29eb 100644
--- a/drivers/gpu/drm/radeon/cik_sdma.c
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -134,7 +134,7 @@ void cik_sdma_ring_ib_execute(struct radeon_device *rdev,
struct radeon_ib *ib)
{
struct radeon_ring *ring = &rdev->ring[ib->ring];
- u32 extra_bits = (ib->vm ? ib->vm->id : 0) & 0xf;
+ u32 extra_bits = (ib->vm ? ib->vm->ids[ib->ring].id : 0) & 0xf;
if (rdev->wb.enabled) {
u32 next_rptr = ring->wptr + 5;
@@ -541,31 +541,27 @@ struct radeon_fence *cik_copy_dma(struct radeon_device *rdev,
unsigned num_gpu_pages,
struct reservation_object *resv)
{
- struct radeon_semaphore *sem = NULL;
struct radeon_fence *fence;
+ struct radeon_sync sync;
int ring_index = rdev->asic->copy.dma_ring_index;
struct radeon_ring *ring = &rdev->ring[ring_index];
u32 size_in_bytes, cur_size_in_bytes;
int i, num_loops;
int r = 0;
- r = radeon_semaphore_create(rdev, &sem);
- if (r) {
- DRM_ERROR("radeon: moving bo (%d).\n", r);
- return ERR_PTR(r);
- }
+ radeon_sync_create(&sync);
size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff);
r = radeon_ring_lock(rdev, ring, num_loops * 7 + 14);
if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r);
- radeon_semaphore_free(rdev, &sem, NULL);
+ radeon_sync_free(rdev, &sync, NULL);
return ERR_PTR(r);
}
- radeon_semaphore_sync_resv(rdev, sem, resv, false);
- radeon_semaphore_sync_rings(rdev, sem, ring->idx);
+ radeon_sync_resv(rdev, &sync, resv, false);
+ radeon_sync_rings(rdev, &sync, ring->idx);
for (i = 0; i < num_loops; i++) {
cur_size_in_bytes = size_in_bytes;
@@ -586,12 +582,12 @@ struct radeon_fence *cik_copy_dma(struct radeon_device *rdev,
r = radeon_fence_emit(rdev, &fence, ring->idx);
if (r) {
radeon_ring_unlock_undo(rdev, ring);
- radeon_semaphore_free(rdev, &sem, NULL);
+ radeon_sync_free(rdev, &sync, NULL);
return ERR_PTR(r);
}
radeon_ring_unlock_commit(rdev, ring, false);
- radeon_semaphore_free(rdev, &sem, fence);
+ radeon_sync_free(rdev, &sync, fence);
return fence;
}
@@ -904,25 +900,21 @@ void cik_sdma_vm_pad_ib(struct radeon_ib *ib)
* Update the page table base and flush the VM TLB
* using sDMA (CIK).
*/
-void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
+void cik_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
+ unsigned vm_id, uint64_t pd_addr)
{
- struct radeon_ring *ring = &rdev->ring[ridx];
-
- if (vm == NULL)
- return;
-
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
- if (vm->id < 8) {
- radeon_ring_write(ring, (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2);
+ if (vm_id < 8) {
+ radeon_ring_write(ring, (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2)) >> 2);
} else {
- radeon_ring_write(ring, (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2);
+ radeon_ring_write(ring, (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm_id - 8) << 2)) >> 2);
}
- radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
+ radeon_ring_write(ring, pd_addr >> 12);
/* update SH_MEM_* regs */
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
- radeon_ring_write(ring, VMID(vm->id));
+ radeon_ring_write(ring, VMID(vm_id));
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
radeon_ring_write(ring, SH_MEM_BASES >> 2);
@@ -945,11 +937,11 @@ void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm
radeon_ring_write(ring, VMID(0));
/* flush HDP */
- cik_sdma_hdp_flush_ring_emit(rdev, ridx);
+ cik_sdma_hdp_flush_ring_emit(rdev, ring->idx);
/* flush TLB */
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
- radeon_ring_write(ring, 1 << vm->id);
+ radeon_ring_write(ring, 1 << vm_id);
}
diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h
index 0c6e1b55d96..ba85986febe 100644
--- a/drivers/gpu/drm/radeon/cikd.h
+++ b/drivers/gpu/drm/radeon/cikd.h
@@ -30,6 +30,8 @@
#define CIK_RB_BITMAP_WIDTH_PER_SH 2
#define HAWAII_RB_BITMAP_WIDTH_PER_SH 4
+#define RADEON_NUM_OF_VMIDS 8
+
/* DIDT IND registers */
#define DIDT_SQ_CTRL0 0x0
# define DIDT_CTRL_EN (1 << 0)
@@ -184,7 +186,10 @@
#define DIG_THERM_DPM(x) ((x) << 14)
#define DIG_THERM_DPM_MASK 0x003FC000
#define DIG_THERM_DPM_SHIFT 14
-
+#define CG_THERMAL_STATUS 0xC0300008
+#define FDO_PWM_DUTY(x) ((x) << 9)
+#define FDO_PWM_DUTY_MASK (0xff << 9)
+#define FDO_PWM_DUTY_SHIFT 9
#define CG_THERMAL_INT 0xC030000C
#define CI_DIG_THERM_INTH(x) ((x) << 8)
#define CI_DIG_THERM_INTH_MASK 0x0000FF00
@@ -194,7 +199,10 @@
#define CI_DIG_THERM_INTL_SHIFT 16
#define THERM_INT_MASK_HIGH (1 << 24)
#define THERM_INT_MASK_LOW (1 << 25)
-
+#define CG_MULT_THERMAL_CTRL 0xC0300010
+#define TEMP_SEL(x) ((x) << 20)
+#define TEMP_SEL_MASK (0xff << 20)
+#define TEMP_SEL_SHIFT 20
#define CG_MULT_THERMAL_STATUS 0xC0300014
#define ASIC_MAX_TEMP(x) ((x) << 0)
#define ASIC_MAX_TEMP_MASK 0x000001ff
@@ -203,6 +211,36 @@
#define CTF_TEMP_MASK 0x0003fe00
#define CTF_TEMP_SHIFT 9
+#define CG_FDO_CTRL0 0xC0300064
+#define FDO_STATIC_DUTY(x) ((x) << 0)
+#define FDO_STATIC_DUTY_MASK 0x000000FF
+#define FDO_STATIC_DUTY_SHIFT 0
+#define CG_FDO_CTRL1 0xC0300068
+#define FMAX_DUTY100(x) ((x) << 0)
+#define FMAX_DUTY100_MASK 0x000000FF
+#define FMAX_DUTY100_SHIFT 0
+#define CG_FDO_CTRL2 0xC030006C
+#define TMIN(x) ((x) << 0)
+#define TMIN_MASK 0x000000FF
+#define TMIN_SHIFT 0
+#define FDO_PWM_MODE(x) ((x) << 11)
+#define FDO_PWM_MODE_MASK (7 << 11)
+#define FDO_PWM_MODE_SHIFT 11
+#define TACH_PWM_RESP_RATE(x) ((x) << 25)
+#define TACH_PWM_RESP_RATE_MASK (0x7f << 25)
+#define TACH_PWM_RESP_RATE_SHIFT 25
+#define CG_TACH_CTRL 0xC0300070
+# define EDGE_PER_REV(x) ((x) << 0)
+# define EDGE_PER_REV_MASK (0x7 << 0)
+# define EDGE_PER_REV_SHIFT 0
+# define TARGET_PERIOD(x) ((x) << 3)
+# define TARGET_PERIOD_MASK 0xfffffff8
+# define TARGET_PERIOD_SHIFT 3
+#define CG_TACH_STATUS 0xC0300074
+# define TACH_PERIOD(x) ((x) << 0)
+# define TACH_PERIOD_MASK 0xffffffff
+# define TACH_PERIOD_SHIFT 0
+
#define CG_ECLK_CNTL 0xC05000AC
# define ECLK_DIVIDER_MASK 0x7f
# define ECLK_DIR_CNTL_EN (1 << 8)
@@ -1137,6 +1175,9 @@
#define SH_MEM_ALIGNMENT_MODE_UNALIGNED 3
#define DEFAULT_MTYPE(x) ((x) << 4)
#define APE1_MTYPE(x) ((x) << 7)
+/* valid for both DEFAULT_MTYPE and APE1_MTYPE */
+#define MTYPE_CACHED 0
+#define MTYPE_NONCACHED 3
#define SX_DEBUG_1 0x9060
@@ -1447,6 +1488,16 @@
#define CP_HQD_ACTIVE 0xC91C
#define CP_HQD_VMID 0xC920
+#define CP_HQD_PERSISTENT_STATE 0xC924u
+#define DEFAULT_CP_HQD_PERSISTENT_STATE (0x33U << 8)
+
+#define CP_HQD_PIPE_PRIORITY 0xC928u
+#define CP_HQD_QUEUE_PRIORITY 0xC92Cu
+#define CP_HQD_QUANTUM 0xC930u
+#define QUANTUM_EN 1U
+#define QUANTUM_SCALE_1MS (1U << 4)
+#define QUANTUM_DURATION(x) ((x) << 8)
+
#define CP_HQD_PQ_BASE 0xC934
#define CP_HQD_PQ_BASE_HI 0xC938
#define CP_HQD_PQ_RPTR 0xC93C
@@ -1474,12 +1525,32 @@
#define PRIV_STATE (1 << 30)
#define KMD_QUEUE (1 << 31)
-#define CP_HQD_DEQUEUE_REQUEST 0xC974
+#define CP_HQD_IB_BASE_ADDR 0xC95Cu
+#define CP_HQD_IB_BASE_ADDR_HI 0xC960u
+#define CP_HQD_IB_RPTR 0xC964u
+#define CP_HQD_IB_CONTROL 0xC968u
+#define IB_ATC_EN (1U << 23)
+#define DEFAULT_MIN_IB_AVAIL_SIZE (3U << 20)
+
+#define CP_HQD_DEQUEUE_REQUEST 0xC974
+#define DEQUEUE_REQUEST_DRAIN 1
+#define DEQUEUE_REQUEST_RESET 2
#define CP_MQD_CONTROL 0xC99C
#define MQD_VMID(x) ((x) << 0)
#define MQD_VMID_MASK (0xf << 0)
+#define CP_HQD_SEMA_CMD 0xC97Cu
+#define CP_HQD_MSG_TYPE 0xC980u
+#define CP_HQD_ATOMIC0_PREOP_LO 0xC984u
+#define CP_HQD_ATOMIC0_PREOP_HI 0xC988u
+#define CP_HQD_ATOMIC1_PREOP_LO 0xC98Cu
+#define CP_HQD_ATOMIC1_PREOP_HI 0xC990u
+#define CP_HQD_HQ_SCHEDULER0 0xC994u
+#define CP_HQD_HQ_SCHEDULER1 0xC998u
+
+#define SH_STATIC_MEM_CONFIG 0x9604u
+
#define DB_RENDER_CONTROL 0x28000
#define PA_SC_RASTER_CONFIG 0x28350
@@ -2069,4 +2140,20 @@
#define VCE_CMD_IB_AUTO 0x00000005
#define VCE_CMD_SEMAPHORE 0x00000006
+#define ATC_VMID0_PASID_MAPPING 0x339Cu
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS 0x3398u
+#define ATC_VMID_PASID_MAPPING_VALID (1U << 31)
+
+#define ATC_VM_APERTURE0_CNTL 0x3310u
+#define ATS_ACCESS_MODE_NEVER 0
+#define ATS_ACCESS_MODE_ALWAYS 1
+
+#define ATC_VM_APERTURE0_CNTL2 0x3318u
+#define ATC_VM_APERTURE0_HIGH_ADDR 0x3308u
+#define ATC_VM_APERTURE0_LOW_ADDR 0x3300u
+#define ATC_VM_APERTURE1_CNTL 0x3314u
+#define ATC_VM_APERTURE1_CNTL2 0x331Cu
+#define ATC_VM_APERTURE1_HIGH_ADDR 0x330Cu
+#define ATC_VM_APERTURE1_LOW_ADDR 0x3304u
+
#endif
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index 5c8b358f9fb..924b1b7ab45 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -35,7 +35,7 @@
#define MIN(a,b) (((a)<(b))?(a):(b))
int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
- struct radeon_cs_reloc **cs_reloc);
+ struct radeon_bo_list **cs_reloc);
struct evergreen_cs_track {
u32 group_size;
u32 nbanks;
@@ -1094,7 +1094,7 @@ static int evergreen_cs_parse_packet0(struct radeon_cs_parser *p,
static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
{
struct evergreen_cs_track *track = (struct evergreen_cs_track *)p->track;
- struct radeon_cs_reloc *reloc;
+ struct radeon_bo_list *reloc;
u32 last_reg;
u32 m, i, tmp, *ib;
int r;
@@ -1792,7 +1792,7 @@ static bool evergreen_is_safe_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
static int evergreen_packet3_check(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt)
{
- struct radeon_cs_reloc *reloc;
+ struct radeon_bo_list *reloc;
struct evergreen_cs_track *track;
volatile u32 *ib;
unsigned idx;
@@ -2661,7 +2661,7 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
p->track = NULL;
return r;
}
- } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
+ } while (p->idx < p->chunk_ib->length_dw);
#if 0
for (r = 0; r < p->ib.length_dw; r++) {
printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]);
@@ -2684,8 +2684,8 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
**/
int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
{
- struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
- struct radeon_cs_reloc *src_reloc, *dst_reloc, *dst2_reloc;
+ struct radeon_cs_chunk *ib_chunk = p->chunk_ib;
+ struct radeon_bo_list *src_reloc, *dst_reloc, *dst2_reloc;
u32 header, cmd, count, sub_cmd;
volatile u32 *ib = p->ib.ptr;
u32 idx;
@@ -3100,7 +3100,7 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
return -EINVAL;
}
- } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
+ } while (p->idx < p->chunk_ib->length_dw);
#if 0
for (r = 0; r < p->ib->length_dw; r++) {
printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]);
diff --git a/drivers/gpu/drm/radeon/evergreen_dma.c b/drivers/gpu/drm/radeon/evergreen_dma.c
index 66bcfadeedd..96535aa8659 100644
--- a/drivers/gpu/drm/radeon/evergreen_dma.c
+++ b/drivers/gpu/drm/radeon/evergreen_dma.c
@@ -110,31 +110,27 @@ struct radeon_fence *evergreen_copy_dma(struct radeon_device *rdev,
unsigned num_gpu_pages,
struct reservation_object *resv)
{
- struct radeon_semaphore *sem = NULL;
struct radeon_fence *fence;
+ struct radeon_sync sync;
int ring_index = rdev->asic->copy.dma_ring_index;
struct radeon_ring *ring = &rdev->ring[ring_index];
u32 size_in_dw, cur_size_in_dw;
int i, num_loops;
int r = 0;
- r = radeon_semaphore_create(rdev, &sem);
- if (r) {
- DRM_ERROR("radeon: moving bo (%d).\n", r);
- return ERR_PTR(r);
- }
+ radeon_sync_create(&sync);
size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
num_loops = DIV_ROUND_UP(size_in_dw, 0xfffff);
r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r);
- radeon_semaphore_free(rdev, &sem, NULL);
+ radeon_sync_free(rdev, &sync, NULL);
return ERR_PTR(r);
}
- radeon_semaphore_sync_resv(rdev, sem, resv, false);
- radeon_semaphore_sync_rings(rdev, sem, ring->idx);
+ radeon_sync_resv(rdev, &sync, resv, false);
+ radeon_sync_rings(rdev, &sync, ring->idx);
for (i = 0; i < num_loops; i++) {
cur_size_in_dw = size_in_dw;
@@ -153,12 +149,12 @@ struct radeon_fence *evergreen_copy_dma(struct radeon_device *rdev,
r = radeon_fence_emit(rdev, &fence, ring->idx);
if (r) {
radeon_ring_unlock_undo(rdev, ring);
- radeon_semaphore_free(rdev, &sem, NULL);
+ radeon_sync_free(rdev, &sync, NULL);
return ERR_PTR(r);
}
radeon_ring_unlock_commit(rdev, ring, false);
- radeon_semaphore_free(rdev, &sem, fence);
+ radeon_sync_free(rdev, &sync, fence);
return fence;
}
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 3faee58946d..360de9f1f49 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -1373,6 +1373,7 @@ void cayman_fence_ring_emit(struct radeon_device *rdev,
void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
{
struct radeon_ring *ring = &rdev->ring[ib->ring];
+ unsigned vm_id = ib->vm ? ib->vm->ids[ib->ring].id : 0;
u32 cp_coher_cntl = PACKET3_FULL_CACHE_ENA | PACKET3_TC_ACTION_ENA |
PACKET3_SH_ACTION_ENA;
@@ -1395,15 +1396,14 @@ void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
#endif
(ib->gpu_addr & 0xFFFFFFFC));
radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
- radeon_ring_write(ring, ib->length_dw |
- (ib->vm ? (ib->vm->id << 24) : 0));
+ radeon_ring_write(ring, ib->length_dw | (vm_id << 24));
/* flush read cache over gart for this vmid */
radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
radeon_ring_write(ring, PACKET3_ENGINE_ME | cp_coher_cntl);
radeon_ring_write(ring, 0xFFFFFFFF);
radeon_ring_write(ring, 0);
- radeon_ring_write(ring, ((ib->vm ? ib->vm->id : 0) << 24) | 10); /* poll interval */
+ radeon_ring_write(ring, (vm_id << 24) | 10); /* poll interval */
}
static void cayman_cp_enable(struct radeon_device *rdev, bool enable)
@@ -2502,15 +2502,11 @@ void cayman_vm_decode_fault(struct radeon_device *rdev,
* Update the page table base and flush the VM TLB
* using the CP (cayman-si).
*/
-void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
+void cayman_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
+ unsigned vm_id, uint64_t pd_addr)
{
- struct radeon_ring *ring = &rdev->ring[ridx];
-
- if (vm == NULL)
- return;
-
- radeon_ring_write(ring, PACKET0(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2), 0));
- radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
+ radeon_ring_write(ring, PACKET0(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2), 0));
+ radeon_ring_write(ring, pd_addr >> 12);
/* flush hdp cache */
radeon_ring_write(ring, PACKET0(HDP_MEM_COHERENCY_FLUSH_CNTL, 0));
@@ -2518,7 +2514,7 @@ void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
/* bits 0-7 are the VM contexts0-7 */
radeon_ring_write(ring, PACKET0(VM_INVALIDATE_REQUEST, 0));
- radeon_ring_write(ring, 1 << vm->id);
+ radeon_ring_write(ring, 1 << vm_id);
/* sync PFP to ME, otherwise we might get invalid PFP reads */
radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
diff --git a/drivers/gpu/drm/radeon/ni_dma.c b/drivers/gpu/drm/radeon/ni_dma.c
index f26f0a9fb52..50f88611ff6 100644
--- a/drivers/gpu/drm/radeon/ni_dma.c
+++ b/drivers/gpu/drm/radeon/ni_dma.c
@@ -123,6 +123,7 @@ void cayman_dma_ring_ib_execute(struct radeon_device *rdev,
struct radeon_ib *ib)
{
struct radeon_ring *ring = &rdev->ring[ib->ring];
+ unsigned vm_id = ib->vm ? ib->vm->ids[ib->ring].id : 0;
if (rdev->wb.enabled) {
u32 next_rptr = ring->wptr + 4;
@@ -140,7 +141,7 @@ void cayman_dma_ring_ib_execute(struct radeon_device *rdev,
*/
while ((ring->wptr & 7) != 5)
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
- radeon_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, ib->vm ? ib->vm->id : 0, 0));
+ radeon_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, vm_id, 0));
radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
@@ -446,16 +447,12 @@ void cayman_dma_vm_pad_ib(struct radeon_ib *ib)
ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0);
}
-void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
+void cayman_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
+ unsigned vm_id, uint64_t pd_addr)
{
- struct radeon_ring *ring = &rdev->ring[ridx];
-
- if (vm == NULL)
- return;
-
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
- radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2));
- radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
+ radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2)) >> 2));
+ radeon_ring_write(ring, pd_addr >> 12);
/* flush hdp cache */
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
@@ -465,6 +462,6 @@ void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm
/* bits 0-7 are the VM contexts0-7 */
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
- radeon_ring_write(ring, 1 << vm->id);
+ radeon_ring_write(ring, 1 << vm_id);
}
diff --git a/drivers/gpu/drm/radeon/ppsmc.h b/drivers/gpu/drm/radeon/ppsmc.h
index 5670b829128..7e5724a12f8 100644
--- a/drivers/gpu/drm/radeon/ppsmc.h
+++ b/drivers/gpu/drm/radeon/ppsmc.h
@@ -56,6 +56,14 @@
#define PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE 0x20
#define PPSMC_STATEFLAG_DEEPSLEEP_BYPASS 0x40
+#define FDO_MODE_HARDWARE 0
+#define FDO_MODE_PIECE_WISE_LINEAR 1
+
+enum FAN_CONTROL {
+ FAN_CONTROL_FUZZY,
+ FAN_CONTROL_TABLE
+};
+
#define PPSMC_Result_OK ((uint8_t)0x01)
#define PPSMC_Result_Failed ((uint8_t)0xFF)
@@ -79,6 +87,8 @@ typedef uint8_t PPSMC_Result;
#define PPSMC_MSG_DisableCac ((uint8_t)0x54)
#define PPSMC_TDPClampingActive ((uint8_t)0x59)
#define PPSMC_TDPClampingInactive ((uint8_t)0x5A)
+#define PPSMC_StartFanControl ((uint8_t)0x5B)
+#define PPSMC_StopFanControl ((uint8_t)0x5C)
#define PPSMC_MSG_NoDisplay ((uint8_t)0x5D)
#define PPSMC_MSG_HasDisplay ((uint8_t)0x5E)
#define PPSMC_MSG_UVDPowerOFF ((uint8_t)0x60)
@@ -106,6 +116,7 @@ typedef uint8_t PPSMC_Result;
#define PPSMC_MSG_SAMUDPM_SetEnabledMask ((uint16_t) 0x130)
#define PPSMC_MSG_MCLKDPM_ForceState ((uint16_t) 0x131)
#define PPSMC_MSG_MCLKDPM_NoForcedLevel ((uint16_t) 0x132)
+#define PPSMC_MSG_Thermal_Cntl_Disable ((uint16_t) 0x133)
#define PPSMC_MSG_Voltage_Cntl_Disable ((uint16_t) 0x135)
#define PPSMC_MSG_PCIeDPM_Enable ((uint16_t) 0x136)
#define PPSMC_MSG_PCIeDPM_Disable ((uint16_t) 0x13d)
@@ -149,6 +160,10 @@ typedef uint8_t PPSMC_Result;
#define PPSMC_MSG_MASTER_DeepSleep_ON ((uint16_t) 0x18F)
#define PPSMC_MSG_MASTER_DeepSleep_OFF ((uint16_t) 0x190)
#define PPSMC_MSG_Remove_DC_Clamp ((uint16_t) 0x191)
+#define PPSMC_MSG_SetFanPwmMax ((uint16_t) 0x19A)
+
+#define PPSMC_MSG_ENABLE_THERMAL_DPM ((uint16_t) 0x19C)
+#define PPSMC_MSG_DISABLE_THERMAL_DPM ((uint16_t) 0x19D)
#define PPSMC_MSG_API_GetSclkFrequency ((uint16_t) 0x200)
#define PPSMC_MSG_API_GetMclkFrequency ((uint16_t) 0x201)
@@ -157,10 +172,11 @@ typedef uint8_t PPSMC_Result;
#define PPSMC_MSG_DPM_Config ((uint32_t) 0x102)
#define PPSMC_MSG_DPM_ForceState ((uint32_t) 0x104)
#define PPSMC_MSG_PG_SIMD_Config ((uint32_t) 0x108)
-#define PPSMC_MSG_DPM_N_LevelsDisabled ((uint32_t) 0x112)
+#define PPSMC_MSG_Thermal_Cntl_Enable ((uint32_t) 0x10a)
#define PPSMC_MSG_Voltage_Cntl_Enable ((uint32_t) 0x109)
#define PPSMC_MSG_VCEPowerOFF ((uint32_t) 0x10e)
#define PPSMC_MSG_VCEPowerON ((uint32_t) 0x10f)
+#define PPSMC_MSG_DPM_N_LevelsDisabled ((uint32_t) 0x112)
#define PPSMC_MSG_DCE_RemoveVoltageAdjustment ((uint32_t) 0x11d)
#define PPSMC_MSG_DCE_AllowVoltageAdjustment ((uint32_t) 0x11e)
#define PPSMC_MSG_EnableBAPM ((uint32_t) 0x120)
diff --git a/drivers/gpu/drm/radeon/pptable.h b/drivers/gpu/drm/radeon/pptable.h
index 2d532996c69..4c2eec49dad 100644
--- a/drivers/gpu/drm/radeon/pptable.h
+++ b/drivers/gpu/drm/radeon/pptable.h
@@ -96,6 +96,14 @@ typedef struct _ATOM_PPLIB_FANTABLE2
USHORT usTMax; // The max temperature
} ATOM_PPLIB_FANTABLE2;
+typedef struct _ATOM_PPLIB_FANTABLE3
+{
+ ATOM_PPLIB_FANTABLE2 basicTable2;
+ UCHAR ucFanControlMode;
+ USHORT usFanPWMMax;
+ USHORT usFanOutputSensitivity;
+} ATOM_PPLIB_FANTABLE3;
+
typedef struct _ATOM_PPLIB_EXTENDEDHEADER
{
USHORT usSize;
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index b53b31a7b76..74f06d54059 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -1254,7 +1254,7 @@ int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
int r;
u32 tile_flags = 0;
u32 tmp;
- struct radeon_cs_reloc *reloc;
+ struct radeon_bo_list *reloc;
u32 value;
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
@@ -1293,7 +1293,7 @@ int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
int idx)
{
unsigned c, i;
- struct radeon_cs_reloc *reloc;
+ struct radeon_bo_list *reloc;
struct r100_cs_track *track;
int r = 0;
volatile uint32_t *ib;
@@ -1542,7 +1542,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt,
unsigned idx, unsigned reg)
{
- struct radeon_cs_reloc *reloc;
+ struct radeon_bo_list *reloc;
struct r100_cs_track *track;
volatile uint32_t *ib;
uint32_t tmp;
@@ -1901,7 +1901,7 @@ int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
static int r100_packet3_check(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt)
{
- struct radeon_cs_reloc *reloc;
+ struct radeon_bo_list *reloc;
struct r100_cs_track *track;
unsigned idx;
volatile uint32_t *ib;
@@ -2061,7 +2061,7 @@ int r100_cs_parse(struct radeon_cs_parser *p)
}
if (r)
return r;
- } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
+ } while (p->idx < p->chunk_ib->length_dw);
return 0;
}
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
index 732d4938aab..c70e6d5bcd1 100644
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -146,7 +146,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt,
unsigned idx, unsigned reg)
{
- struct radeon_cs_reloc *reloc;
+ struct radeon_bo_list *reloc;
struct r100_cs_track *track;
volatile uint32_t *ib;
uint32_t tmp;
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 1bc4704034c..064ad5569cc 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -598,7 +598,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt,
unsigned idx, unsigned reg)
{
- struct radeon_cs_reloc *reloc;
+ struct radeon_bo_list *reloc;
struct r100_cs_track *track;
volatile uint32_t *ib;
uint32_t tmp, tile_flags = 0;
@@ -1142,7 +1142,7 @@ fail:
static int r300_packet3_check(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt)
{
- struct radeon_cs_reloc *reloc;
+ struct radeon_bo_list *reloc;
struct r100_cs_track *track;
volatile uint32_t *ib;
unsigned idx;
@@ -1283,7 +1283,7 @@ int r300_cs_parse(struct radeon_cs_parser *p)
if (r) {
return r;
}
- } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
+ } while (p->idx < p->chunk_ib->length_dw);
return 0;
}
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 56b02927cd3..ef5d6066fa5 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -2889,31 +2889,27 @@ struct radeon_fence *r600_copy_cpdma(struct radeon_device *rdev,
unsigned num_gpu_pages,
struct reservation_object *resv)
{
- struct radeon_semaphore *sem = NULL;
struct radeon_fence *fence;
+ struct radeon_sync sync;
int ring_index = rdev->asic->copy.blit_ring_index;
struct radeon_ring *ring = &rdev->ring[ring_index];
u32 size_in_bytes, cur_size_in_bytes, tmp;
int i, num_loops;
int r = 0;
- r = radeon_semaphore_create(rdev, &sem);
- if (r) {
- DRM_ERROR("radeon: moving bo (%d).\n", r);
- return ERR_PTR(r);
- }
+ radeon_sync_create(&sync);
size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff);
r = radeon_ring_lock(rdev, ring, num_loops * 6 + 24);
if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r);
- radeon_semaphore_free(rdev, &sem, NULL);
+ radeon_sync_free(rdev, &sync, NULL);
return ERR_PTR(r);
}
- radeon_semaphore_sync_resv(rdev, sem, resv, false);
- radeon_semaphore_sync_rings(rdev, sem, ring->idx);
+ radeon_sync_resv(rdev, &sync, resv, false);
+ radeon_sync_rings(rdev, &sync, ring->idx);
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
@@ -2942,12 +2938,12 @@ struct radeon_fence *r600_copy_cpdma(struct radeon_device *rdev,
r = radeon_fence_emit(rdev, &fence, ring->idx);
if (r) {
radeon_ring_unlock_undo(rdev, ring);
- radeon_semaphore_free(rdev, &sem, NULL);
+ radeon_sync_free(rdev, &sync, NULL);
return ERR_PTR(r);
}
radeon_ring_unlock_commit(rdev, ring, false);
- radeon_semaphore_free(rdev, &sem, fence);
+ radeon_sync_free(rdev, &sync, fence);
return fence;
}
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index c47537a1ddb..acc1f99c84d 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -969,7 +969,7 @@ static int r600_cs_parse_packet0(struct radeon_cs_parser *p,
static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
{
struct r600_cs_track *track = (struct r600_cs_track *)p->track;
- struct radeon_cs_reloc *reloc;
+ struct radeon_bo_list *reloc;
u32 m, i, tmp, *ib;
int r;
@@ -1626,7 +1626,7 @@ static bool r600_is_safe_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
static int r600_packet3_check(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt)
{
- struct radeon_cs_reloc *reloc;
+ struct radeon_bo_list *reloc;
struct r600_cs_track *track;
volatile u32 *ib;
unsigned idx;
@@ -2316,7 +2316,7 @@ int r600_cs_parse(struct radeon_cs_parser *p)
p->track = NULL;
return r;
}
- } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
+ } while (p->idx < p->chunk_ib->length_dw);
#if 0
for (r = 0; r < p->ib.length_dw; r++) {
printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]);
@@ -2351,10 +2351,10 @@ static void r600_cs_parser_fini(struct radeon_cs_parser *parser, int error)
static int r600_cs_parser_relocs_legacy(struct radeon_cs_parser *p)
{
- if (p->chunk_relocs_idx == -1) {
+ if (p->chunk_relocs == NULL) {
return 0;
}
- p->relocs = kzalloc(sizeof(struct radeon_cs_reloc), GFP_KERNEL);
+ p->relocs = kzalloc(sizeof(struct radeon_bo_list), GFP_KERNEL);
if (p->relocs == NULL) {
return -ENOMEM;
}
@@ -2398,7 +2398,7 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
/* Copy the packet into the IB, the parser will read from the
* input memory (cached) and write to the IB (which can be
* uncached). */
- ib_chunk = &parser.chunks[parser.chunk_ib_idx];
+ ib_chunk = parser.chunk_ib;
parser.ib.length_dw = ib_chunk->length_dw;
*l = parser.ib.length_dw;
if (copy_from_user(ib, ib_chunk->user_ptr, ib_chunk->length_dw * 4)) {
@@ -2435,24 +2435,24 @@ void r600_cs_legacy_init(void)
* GPU offset using the provided start.
**/
int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
- struct radeon_cs_reloc **cs_reloc)
+ struct radeon_bo_list **cs_reloc)
{
struct radeon_cs_chunk *relocs_chunk;
unsigned idx;
*cs_reloc = NULL;
- if (p->chunk_relocs_idx == -1) {
+ if (p->chunk_relocs == NULL) {
DRM_ERROR("No relocation chunk !\n");
return -EINVAL;
}
- relocs_chunk = &p->chunks[p->chunk_relocs_idx];
+ relocs_chunk = p->chunk_relocs;
idx = p->dma_reloc_idx;
if (idx >= p->nrelocs) {
DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
idx, p->nrelocs);
return -EINVAL;
}
- *cs_reloc = p->relocs_ptr[idx];
+ *cs_reloc = &p->relocs[idx];
p->dma_reloc_idx++;
return 0;
}
@@ -2472,8 +2472,8 @@ int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
**/
int r600_dma_cs_parse(struct radeon_cs_parser *p)
{
- struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
- struct radeon_cs_reloc *src_reloc, *dst_reloc;
+ struct radeon_cs_chunk *ib_chunk = p->chunk_ib;
+ struct radeon_bo_list *src_reloc, *dst_reloc;
u32 header, cmd, count, tiled;
volatile u32 *ib = p->ib.ptr;
u32 idx, idx_value;
@@ -2619,7 +2619,7 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)
DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
return -EINVAL;
}
- } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
+ } while (p->idx < p->chunk_ib->length_dw);
#if 0
for (r = 0; r < p->ib->length_dw; r++) {
printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]);
diff --git a/drivers/gpu/drm/radeon/r600_dma.c b/drivers/gpu/drm/radeon/r600_dma.c
index cf0df45d455..d2dd29ab24f 100644
--- a/drivers/gpu/drm/radeon/r600_dma.c
+++ b/drivers/gpu/drm/radeon/r600_dma.c
@@ -441,31 +441,27 @@ struct radeon_fence *r600_copy_dma(struct radeon_device *rdev,
unsigned num_gpu_pages,
struct reservation_object *resv)
{
- struct radeon_semaphore *sem = NULL;
struct radeon_fence *fence;
+ struct radeon_sync sync;
int ring_index = rdev->asic->copy.dma_ring_index;
struct radeon_ring *ring = &rdev->ring[ring_index];
u32 size_in_dw, cur_size_in_dw;
int i, num_loops;
int r = 0;
- r = radeon_semaphore_create(rdev, &sem);
- if (r) {
- DRM_ERROR("radeon: moving bo (%d).\n", r);
- return ERR_PTR(r);
- }
+ radeon_sync_create(&sync);
size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFE);
r = radeon_ring_lock(rdev, ring, num_loops * 4 + 8);
if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r);
- radeon_semaphore_free(rdev, &sem, NULL);
+ radeon_sync_free(rdev, &sync, NULL);
return ERR_PTR(r);
}
- radeon_semaphore_sync_resv(rdev, sem, resv, false);
- radeon_semaphore_sync_rings(rdev, sem, ring->idx);
+ radeon_sync_resv(rdev, &sync, resv, false);
+ radeon_sync_rings(rdev, &sync, ring->idx);
for (i = 0; i < num_loops; i++) {
cur_size_in_dw = size_in_dw;
@@ -484,12 +480,12 @@ struct radeon_fence *r600_copy_dma(struct radeon_device *rdev,
r = radeon_fence_emit(rdev, &fence, ring->idx);
if (r) {
radeon_ring_unlock_undo(rdev, ring);
- radeon_semaphore_free(rdev, &sem, NULL);
+ radeon_sync_free(rdev, &sync, NULL);
return ERR_PTR(r);
}
radeon_ring_unlock_commit(rdev, ring, false);
- radeon_semaphore_free(rdev, &sem, fence);
+ radeon_sync_free(rdev, &sync, fence);
return fence;
}
diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c
index b5c73df8e20..843b65f46ec 100644
--- a/drivers/gpu/drm/radeon/r600_dpm.c
+++ b/drivers/gpu/drm/radeon/r600_dpm.c
@@ -811,6 +811,7 @@ union power_info {
union fan_info {
struct _ATOM_PPLIB_FANTABLE fan;
struct _ATOM_PPLIB_FANTABLE2 fan2;
+ struct _ATOM_PPLIB_FANTABLE3 fan3;
};
static int r600_parse_clk_voltage_dep_table(struct radeon_clock_voltage_dependency_table *radeon_table,
@@ -900,6 +901,14 @@ int r600_parse_extended_power_table(struct radeon_device *rdev)
else
rdev->pm.dpm.fan.t_max = 10900;
rdev->pm.dpm.fan.cycle_delay = 100000;
+ if (fan_info->fan.ucFanTableFormat >= 3) {
+ rdev->pm.dpm.fan.control_mode = fan_info->fan3.ucFanControlMode;
+ rdev->pm.dpm.fan.default_max_fan_pwm =
+ le16_to_cpu(fan_info->fan3.usFanPWMMax);
+ rdev->pm.dpm.fan.default_fan_output_sensitivity = 4836;
+ rdev->pm.dpm.fan.fan_output_sensitivity =
+ le16_to_cpu(fan_info->fan3.usFanOutputSensitivity);
+ }
rdev->pm.dpm.fan.ucode_fan_control = true;
}
}
diff --git a/drivers/gpu/drm/radeon/r600_dpm.h b/drivers/gpu/drm/radeon/r600_dpm.h
index 46b9d2a0301..bd499d749bc 100644
--- a/drivers/gpu/drm/radeon/r600_dpm.h
+++ b/drivers/gpu/drm/radeon/r600_dpm.h
@@ -96,6 +96,9 @@
#define R600_TEMP_RANGE_MIN (90 * 1000)
#define R600_TEMP_RANGE_MAX (120 * 1000)
+#define FDO_PWM_MODE_STATIC 1
+#define FDO_PWM_MODE_STATIC_RPM 5
+
enum r600_power_level {
R600_POWER_LEVEL_LOW = 0,
R600_POWER_LEVEL_MEDIUM = 1,
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index a9717b3fbf1..54529b837af 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -150,9 +150,6 @@ extern int radeon_backlight;
/* number of hw syncs before falling back on blocking */
#define RADEON_NUM_SYNCS 4
-/* number of hw syncs before falling back on blocking */
-#define RADEON_NUM_SYNCS 4
-
/* hardcode those limit for now */
#define RADEON_VA_IB_OFFSET (1 << 20)
#define RADEON_VA_RESERVED_SIZE (8 << 20)
@@ -363,14 +360,15 @@ struct radeon_fence_driver {
};
struct radeon_fence {
- struct fence base;
+ struct fence base;
- struct radeon_device *rdev;
- uint64_t seq;
+ struct radeon_device *rdev;
+ uint64_t seq;
/* RB, DMA, etc. */
- unsigned ring;
+ unsigned ring;
+ bool is_vm_update;
- wait_queue_t fence_wake;
+ wait_queue_t fence_wake;
};
int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring);
@@ -452,12 +450,22 @@ struct radeon_mman {
#endif
};
+struct radeon_bo_list {
+ struct radeon_bo *robj;
+ struct ttm_validate_buffer tv;
+ uint64_t gpu_offset;
+ unsigned prefered_domains;
+ unsigned allowed_domains;
+ uint32_t tiling_flags;
+};
+
/* bo virtual address in a specific vm */
struct radeon_bo_va {
/* protected by bo being reserved */
struct list_head bo_list;
uint32_t flags;
uint64_t addr;
+ struct radeon_fence *last_pt_update;
unsigned ref_count;
/* protected by vm mutex */
@@ -474,7 +482,7 @@ struct radeon_bo {
struct list_head list;
/* Protected by tbo.reserved */
u32 initial_domain;
- struct ttm_place placements[3];
+ struct ttm_place placements[4];
struct ttm_placement placement;
struct ttm_buffer_object tbo;
struct ttm_bo_kmap_obj kmap;
@@ -576,10 +584,9 @@ int radeon_mode_dumb_mmap(struct drm_file *filp,
* Semaphores.
*/
struct radeon_semaphore {
- struct radeon_sa_bo *sa_bo;
- signed waiters;
- uint64_t gpu_addr;
- struct radeon_fence *sync_to[RADEON_NUM_RINGS];
+ struct radeon_sa_bo *sa_bo;
+ signed waiters;
+ uint64_t gpu_addr;
};
int radeon_semaphore_create(struct radeon_device *rdev,
@@ -588,20 +595,33 @@ bool radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
struct radeon_semaphore *semaphore);
bool radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
struct radeon_semaphore *semaphore);
-void radeon_semaphore_sync_fence(struct radeon_semaphore *semaphore,
- struct radeon_fence *fence);
-int radeon_semaphore_sync_resv(struct radeon_device *rdev,
- struct radeon_semaphore *semaphore,
- struct reservation_object *resv,
- bool shared);
-int radeon_semaphore_sync_rings(struct radeon_device *rdev,
- struct radeon_semaphore *semaphore,
- int waiting_ring);
void radeon_semaphore_free(struct radeon_device *rdev,
struct radeon_semaphore **semaphore,
struct radeon_fence *fence);
/*
+ * Synchronization
+ */
+struct radeon_sync {
+ struct radeon_semaphore *semaphores[RADEON_NUM_SYNCS];
+ struct radeon_fence *sync_to[RADEON_NUM_RINGS];
+ struct radeon_fence *last_vm_update;
+};
+
+void radeon_sync_create(struct radeon_sync *sync);
+void radeon_sync_fence(struct radeon_sync *sync,
+ struct radeon_fence *fence);
+int radeon_sync_resv(struct radeon_device *rdev,
+ struct radeon_sync *sync,
+ struct reservation_object *resv,
+ bool shared);
+int radeon_sync_rings(struct radeon_device *rdev,
+ struct radeon_sync *sync,
+ int waiting_ring);
+void radeon_sync_free(struct radeon_device *rdev, struct radeon_sync *sync,
+ struct radeon_fence *fence);
+
+/*
* GART structures, functions & helpers
*/
struct radeon_mc;
@@ -701,6 +721,10 @@ struct radeon_doorbell {
int radeon_doorbell_get(struct radeon_device *rdev, u32 *page);
void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell);
+void radeon_doorbell_get_kfd_info(struct radeon_device *rdev,
+ phys_addr_t *aperture_base,
+ size_t *aperture_size,
+ size_t *start_offset);
/*
* IRQS.
@@ -814,7 +838,7 @@ struct radeon_ib {
struct radeon_fence *fence;
struct radeon_vm *vm;
bool is_const_ib;
- struct radeon_semaphore *semaphore;
+ struct radeon_sync sync;
};
struct radeon_ring {
@@ -891,33 +915,40 @@ struct radeon_vm_pt {
uint64_t addr;
};
+struct radeon_vm_id {
+ unsigned id;
+ uint64_t pd_gpu_addr;
+ /* last flushed PD/PT update */
+ struct radeon_fence *flushed_updates;
+ /* last use of vmid */
+ struct radeon_fence *last_id_use;
+};
+
struct radeon_vm {
- struct rb_root va;
- unsigned id;
+ struct mutex mutex;
+
+ struct rb_root va;
+
+ /* protecting invalidated and freed */
+ spinlock_t status_lock;
/* BOs moved, but not yet updated in the PT */
- struct list_head invalidated;
+ struct list_head invalidated;
/* BOs freed, but not yet updated in the PT */
- struct list_head freed;
+ struct list_head freed;
/* contains the page directory */
- struct radeon_bo *page_directory;
- uint64_t pd_gpu_addr;
- unsigned max_pde_used;
+ struct radeon_bo *page_directory;
+ unsigned max_pde_used;
/* array of page tables, one for each page directory entry */
- struct radeon_vm_pt *page_tables;
+ struct radeon_vm_pt *page_tables;
- struct radeon_bo_va *ib_bo_va;
+ struct radeon_bo_va *ib_bo_va;
- struct mutex mutex;
- /* last fence for cs using this vm */
- struct radeon_fence *fence;
- /* last flush or NULL if we still need to flush */
- struct radeon_fence *last_flush;
- /* last use of vmid */
- struct radeon_fence *last_id_use;
+ /* for id and flush management per ring */
+ struct radeon_vm_id ids[RADEON_NUM_RINGS];
};
struct radeon_vm_manager {
@@ -1025,19 +1056,7 @@ void cayman_dma_fini(struct radeon_device *rdev);
/*
* CS.
*/
-struct radeon_cs_reloc {
- struct drm_gem_object *gobj;
- struct radeon_bo *robj;
- struct ttm_validate_buffer tv;
- uint64_t gpu_offset;
- unsigned prefered_domains;
- unsigned allowed_domains;
- uint32_t tiling_flags;
- uint32_t handle;
-};
-
struct radeon_cs_chunk {
- uint32_t chunk_id;
uint32_t length_dw;
uint32_t *kdata;
void __user *user_ptr;
@@ -1055,16 +1074,15 @@ struct radeon_cs_parser {
unsigned idx;
/* relocations */
unsigned nrelocs;
- struct radeon_cs_reloc *relocs;
- struct radeon_cs_reloc **relocs_ptr;
- struct radeon_cs_reloc *vm_bos;
+ struct radeon_bo_list *relocs;
+ struct radeon_bo_list *vm_bos;
struct list_head validated;
unsigned dma_reloc_idx;
/* indices of various chunks */
- int chunk_ib_idx;
- int chunk_relocs_idx;
- int chunk_flags_idx;
- int chunk_const_ib_idx;
+ struct radeon_cs_chunk *chunk_ib;
+ struct radeon_cs_chunk *chunk_relocs;
+ struct radeon_cs_chunk *chunk_flags;
+ struct radeon_cs_chunk *chunk_const_ib;
struct radeon_ib ib;
struct radeon_ib const_ib;
void *track;
@@ -1078,7 +1096,7 @@ struct radeon_cs_parser {
static inline u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
{
- struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
+ struct radeon_cs_chunk *ibc = p->chunk_ib;
if (ibc->kdata)
return ibc->kdata[idx];
@@ -1490,6 +1508,10 @@ struct radeon_dpm_fan {
u8 t_hyst;
u32 cycle_delay;
u16 t_max;
+ u8 control_mode;
+ u16 default_max_fan_pwm;
+ u16 default_fan_output_sensitivity;
+ u16 fan_output_sensitivity;
bool ucode_fan_control;
};
@@ -1623,6 +1645,11 @@ struct radeon_pm {
/* internal thermal controller on rv6xx+ */
enum radeon_int_thermal_type int_thermal_type;
struct device *int_hwmon_dev;
+ /* fan control parameters */
+ bool no_fan;
+ u8 fan_pulses_per_revolution;
+ u8 fan_min_rpm;
+ u8 fan_max_rpm;
/* dpm */
bool dpm_enabled;
struct radeon_dpm dpm;
@@ -1785,7 +1812,8 @@ struct radeon_asic_ring {
void (*hdp_flush)(struct radeon_device *rdev, struct radeon_ring *ring);
bool (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp,
struct radeon_semaphore *semaphore, bool emit_wait);
- void (*vm_flush)(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
+ void (*vm_flush)(struct radeon_device *rdev, struct radeon_ring *ring,
+ unsigned vm_id, uint64_t pd_addr);
/* testing functions */
int (*ring_test)(struct radeon_device *rdev, struct radeon_ring *cp);
@@ -2388,6 +2416,8 @@ struct radeon_device {
struct radeon_atcs atcs;
/* srbm instance registers */
struct mutex srbm_mutex;
+ /* GRBM index mutex. Protects concurrents access to GRBM index */
+ struct mutex grbm_idx_mutex;
/* clock, powergating flags */
u32 cg_flags;
u32 pg_flags;
@@ -2400,6 +2430,10 @@ struct radeon_device {
u64 vram_pin_size;
u64 gart_pin_size;
+ /* amdkfd interface */
+ struct kfd_dev *kfd;
+ struct radeon_sa_manager kfd_bo;
+
struct mutex mn_lock;
DECLARE_HASHTABLE(mn_hash, 7);
};
@@ -2831,7 +2865,7 @@ static inline void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
#define radeon_ring_ib_execute(rdev, r, ib) (rdev)->asic->ring[(r)]->ib_execute((rdev), (ib))
#define radeon_ring_ib_parse(rdev, r, ib) (rdev)->asic->ring[(r)]->ib_parse((rdev), (ib))
#define radeon_ring_is_lockup(rdev, r, cp) (rdev)->asic->ring[(r)]->is_lockup((rdev), (cp))
-#define radeon_ring_vm_flush(rdev, r, vm) (rdev)->asic->ring[(r)]->vm_flush((rdev), (r), (vm))
+#define radeon_ring_vm_flush(rdev, r, vm_id, pd_addr) (rdev)->asic->ring[(r)->idx]->vm_flush((rdev), (r), (vm_id), (pd_addr))
#define radeon_ring_get_rptr(rdev, r) (rdev)->asic->ring[(r)->idx]->get_rptr((rdev), (r))
#define radeon_ring_get_wptr(rdev, r) (rdev)->asic->ring[(r)->idx]->get_wptr((rdev), (r))
#define radeon_ring_set_wptr(rdev, r) (rdev)->asic->ring[(r)->idx]->set_wptr((rdev), (r))
@@ -2940,14 +2974,14 @@ int radeon_vm_manager_init(struct radeon_device *rdev);
void radeon_vm_manager_fini(struct radeon_device *rdev);
int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm);
void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm);
-struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev,
+struct radeon_bo_list *radeon_vm_get_bos(struct radeon_device *rdev,
struct radeon_vm *vm,
struct list_head *head);
struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
struct radeon_vm *vm, int ring);
void radeon_vm_flush(struct radeon_device *rdev,
struct radeon_vm *vm,
- int ring);
+ int ring, struct radeon_fence *fence);
void radeon_vm_fence(struct radeon_device *rdev,
struct radeon_vm *vm,
struct radeon_fence *fence);
@@ -3054,7 +3088,7 @@ bool radeon_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p);
void radeon_cs_dump_packet(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt);
int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
- struct radeon_cs_reloc **cs_reloc,
+ struct radeon_bo_list **cs_reloc,
int nomm);
int r600_cs_common_vline_parse(struct radeon_cs_parser *p,
uint32_t *vline_start_end,
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index d8ace5b28a5..2a45d548d5e 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -599,7 +599,8 @@ int cayman_asic_reset(struct radeon_device *rdev);
void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
int cayman_vm_init(struct radeon_device *rdev);
void cayman_vm_fini(struct radeon_device *rdev);
-void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
+void cayman_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
+ unsigned vm_id, uint64_t pd_addr);
uint32_t cayman_vm_page_flags(struct radeon_device *rdev, uint32_t flags);
int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
@@ -624,7 +625,8 @@ void cayman_dma_vm_set_pages(struct radeon_device *rdev,
uint32_t incr, uint32_t flags);
void cayman_dma_vm_pad_ib(struct radeon_ib *ib);
-void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
+void cayman_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
+ unsigned vm_id, uint64_t pd_addr);
u32 cayman_gfx_get_rptr(struct radeon_device *rdev,
struct radeon_ring *ring);
@@ -699,7 +701,8 @@ int si_irq_set(struct radeon_device *rdev);
int si_irq_process(struct radeon_device *rdev);
int si_vm_init(struct radeon_device *rdev);
void si_vm_fini(struct radeon_device *rdev);
-void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
+void si_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
+ unsigned vm_id, uint64_t pd_addr);
int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
struct radeon_fence *si_copy_dma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
@@ -721,7 +724,8 @@ void si_dma_vm_set_pages(struct radeon_device *rdev,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags);
-void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
+void si_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
+ unsigned vm_id, uint64_t pd_addr);
u32 si_get_xclk(struct radeon_device *rdev);
uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev);
int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
@@ -793,7 +797,8 @@ int cik_irq_set(struct radeon_device *rdev);
int cik_irq_process(struct radeon_device *rdev);
int cik_vm_init(struct radeon_device *rdev);
void cik_vm_fini(struct radeon_device *rdev);
-void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
+void cik_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
+ unsigned vm_id, uint64_t pd_addr);
void cik_sdma_vm_copy_pages(struct radeon_device *rdev,
struct radeon_ib *ib,
@@ -811,7 +816,8 @@ void cik_sdma_vm_set_pages(struct radeon_device *rdev,
uint32_t incr, uint32_t flags);
void cik_sdma_vm_pad_ib(struct radeon_ib *ib);
-void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
+void cik_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
+ unsigned vm_id, uint64_t pd_addr);
int cik_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
u32 cik_gfx_get_rptr(struct radeon_device *rdev,
struct radeon_ring *ring);
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index df69b92ba16..dbc94f30029 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -196,8 +196,8 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev)
}
}
-static struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rdev,
- u8 id)
+struct radeon_gpio_rec radeon_atombios_lookup_gpio(struct radeon_device *rdev,
+ u8 id)
{
struct atom_context *ctx = rdev->mode_info.atom_context;
struct radeon_gpio_rec gpio;
@@ -221,6 +221,7 @@ static struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rdev,
if (id == pin->ucGPIO_ID) {
gpio.id = pin->ucGPIO_ID;
gpio.reg = le16_to_cpu(pin->usGpioPin_AIndex) * 4;
+ gpio.shift = pin->ucGpioPinBitShift;
gpio.mask = (1 << pin->ucGpioPinBitShift);
gpio.valid = true;
break;
@@ -801,7 +802,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
hpd_record =
(ATOM_HPD_INT_RECORD *)
record;
- gpio = radeon_lookup_gpio(rdev,
+ gpio = radeon_atombios_lookup_gpio(rdev,
hpd_record->ucHPDIntGPIOID);
hpd = radeon_atom_get_hpd_info_from_gpio(rdev, &gpio);
hpd.plugged_state = hpd_record->ucPlugged_PinState;
@@ -2128,7 +2129,7 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
rdev->pm.power_state[state_index].clock_info[0].voltage.type =
VOLTAGE_GPIO;
rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
- radeon_lookup_gpio(rdev,
+ radeon_atombios_lookup_gpio(rdev,
power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex);
if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
@@ -2164,7 +2165,7 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
rdev->pm.power_state[state_index].clock_info[0].voltage.type =
VOLTAGE_GPIO;
rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
- radeon_lookup_gpio(rdev,
+ radeon_atombios_lookup_gpio(rdev,
power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex);
if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
@@ -2200,7 +2201,7 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
rdev->pm.power_state[state_index].clock_info[0].voltage.type =
VOLTAGE_GPIO;
rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
- radeon_lookup_gpio(rdev,
+ radeon_atombios_lookup_gpio(rdev,
power_info->info_3.asPowerPlayInfo[i].ucVoltageDropIndex);
if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
@@ -2248,6 +2249,14 @@ static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *r
/* add the i2c bus for thermal/fan chip */
if (controller->ucType > 0) {
+ if (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN)
+ rdev->pm.no_fan = true;
+ rdev->pm.fan_pulses_per_revolution =
+ controller->ucFanParameters & ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK;
+ if (rdev->pm.fan_pulses_per_revolution) {
+ rdev->pm.fan_min_rpm = controller->ucFanMinRPM;
+ rdev->pm.fan_max_rpm = controller->ucFanMaxRPM;
+ }
if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) {
DRM_INFO("Internal thermal controller %s fan control\n",
(controller->ucFanParameters &
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 6f377de099f..c830863bc98 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -77,22 +77,18 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
struct drm_device *ddev = p->rdev->ddev;
struct radeon_cs_chunk *chunk;
struct radeon_cs_buckets buckets;
- unsigned i, j;
- bool duplicate, need_mmap_lock = false;
+ unsigned i;
+ bool need_mmap_lock = false;
int r;
- if (p->chunk_relocs_idx == -1) {
+ if (p->chunk_relocs == NULL) {
return 0;
}
- chunk = &p->chunks[p->chunk_relocs_idx];
+ chunk = p->chunk_relocs;
p->dma_reloc_idx = 0;
/* FIXME: we assume that each relocs use 4 dwords */
p->nrelocs = chunk->length_dw / 4;
- p->relocs_ptr = kcalloc(p->nrelocs, sizeof(void *), GFP_KERNEL);
- if (p->relocs_ptr == NULL) {
- return -ENOMEM;
- }
- p->relocs = kcalloc(p->nrelocs, sizeof(struct radeon_cs_reloc), GFP_KERNEL);
+ p->relocs = kcalloc(p->nrelocs, sizeof(struct radeon_bo_list), GFP_KERNEL);
if (p->relocs == NULL) {
return -ENOMEM;
}
@@ -101,31 +97,17 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
for (i = 0; i < p->nrelocs; i++) {
struct drm_radeon_cs_reloc *r;
+ struct drm_gem_object *gobj;
unsigned priority;
- duplicate = false;
r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4];
- for (j = 0; j < i; j++) {
- if (r->handle == p->relocs[j].handle) {
- p->relocs_ptr[i] = &p->relocs[j];
- duplicate = true;
- break;
- }
- }
- if (duplicate) {
- p->relocs[i].handle = 0;
- continue;
- }
-
- p->relocs[i].gobj = drm_gem_object_lookup(ddev, p->filp,
- r->handle);
- if (p->relocs[i].gobj == NULL) {
+ gobj = drm_gem_object_lookup(ddev, p->filp, r->handle);
+ if (gobj == NULL) {
DRM_ERROR("gem object lookup failed 0x%x\n",
r->handle);
return -ENOENT;
}
- p->relocs_ptr[i] = &p->relocs[i];
- p->relocs[i].robj = gem_to_radeon_bo(p->relocs[i].gobj);
+ p->relocs[i].robj = gem_to_radeon_bo(gobj);
/* The userspace buffer priorities are from 0 to 15. A higher
* number means the buffer is more important.
@@ -184,7 +166,6 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
p->relocs[i].tv.bo = &p->relocs[i].robj->tbo;
p->relocs[i].tv.shared = !r->write_domain;
- p->relocs[i].handle = r->handle;
radeon_cs_buckets_add(&buckets, &p->relocs[i].tv.head,
priority);
@@ -251,15 +232,15 @@ static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority
static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
{
- struct radeon_cs_reloc *reloc;
+ struct radeon_bo_list *reloc;
int r;
list_for_each_entry(reloc, &p->validated, tv.head) {
struct reservation_object *resv;
resv = reloc->robj->tbo.resv;
- r = radeon_semaphore_sync_resv(p->rdev, p->ib.semaphore, resv,
- reloc->tv.shared);
+ r = radeon_sync_resv(p->rdev, &p->ib.sync, resv,
+ reloc->tv.shared);
if (r)
return r;
}
@@ -282,13 +263,11 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
INIT_LIST_HEAD(&p->validated);
p->idx = 0;
p->ib.sa_bo = NULL;
- p->ib.semaphore = NULL;
p->const_ib.sa_bo = NULL;
- p->const_ib.semaphore = NULL;
- p->chunk_ib_idx = -1;
- p->chunk_relocs_idx = -1;
- p->chunk_flags_idx = -1;
- p->chunk_const_ib_idx = -1;
+ p->chunk_ib = NULL;
+ p->chunk_relocs = NULL;
+ p->chunk_flags = NULL;
+ p->chunk_const_ib = NULL;
p->chunks_array = kcalloc(cs->num_chunks, sizeof(uint64_t), GFP_KERNEL);
if (p->chunks_array == NULL) {
return -ENOMEM;
@@ -315,24 +294,23 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
return -EFAULT;
}
p->chunks[i].length_dw = user_chunk.length_dw;
- p->chunks[i].chunk_id = user_chunk.chunk_id;
- if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) {
- p->chunk_relocs_idx = i;
+ if (user_chunk.chunk_id == RADEON_CHUNK_ID_RELOCS) {
+ p->chunk_relocs = &p->chunks[i];
}
- if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) {
- p->chunk_ib_idx = i;
+ if (user_chunk.chunk_id == RADEON_CHUNK_ID_IB) {
+ p->chunk_ib = &p->chunks[i];
/* zero length IB isn't useful */
if (p->chunks[i].length_dw == 0)
return -EINVAL;
}
- if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_CONST_IB) {
- p->chunk_const_ib_idx = i;
+ if (user_chunk.chunk_id == RADEON_CHUNK_ID_CONST_IB) {
+ p->chunk_const_ib = &p->chunks[i];
/* zero length CONST IB isn't useful */
if (p->chunks[i].length_dw == 0)
return -EINVAL;
}
- if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
- p->chunk_flags_idx = i;
+ if (user_chunk.chunk_id == RADEON_CHUNK_ID_FLAGS) {
+ p->chunk_flags = &p->chunks[i];
/* zero length flags aren't useful */
if (p->chunks[i].length_dw == 0)
return -EINVAL;
@@ -341,10 +319,10 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
size = p->chunks[i].length_dw;
cdata = (void __user *)(unsigned long)user_chunk.chunk_data;
p->chunks[i].user_ptr = cdata;
- if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_CONST_IB)
+ if (user_chunk.chunk_id == RADEON_CHUNK_ID_CONST_IB)
continue;
- if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) {
+ if (user_chunk.chunk_id == RADEON_CHUNK_ID_IB) {
if (!p->rdev || !(p->rdev->flags & RADEON_IS_AGP))
continue;
}
@@ -357,7 +335,7 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
return -EFAULT;
}
- if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
+ if (user_chunk.chunk_id == RADEON_CHUNK_ID_FLAGS) {
p->cs_flags = p->chunks[i].kdata[0];
if (p->chunks[i].length_dw > 1)
ring = p->chunks[i].kdata[1];
@@ -398,8 +376,8 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
static int cmp_size_smaller_first(void *priv, struct list_head *a,
struct list_head *b)
{
- struct radeon_cs_reloc *la = list_entry(a, struct radeon_cs_reloc, tv.head);
- struct radeon_cs_reloc *lb = list_entry(b, struct radeon_cs_reloc, tv.head);
+ struct radeon_bo_list *la = list_entry(a, struct radeon_bo_list, tv.head);
+ struct radeon_bo_list *lb = list_entry(b, struct radeon_bo_list, tv.head);
/* Sort A before B if A is smaller. */
return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages;
@@ -440,13 +418,15 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bo
if (parser->relocs != NULL) {
for (i = 0; i < parser->nrelocs; i++) {
- if (parser->relocs[i].gobj)
- drm_gem_object_unreference_unlocked(parser->relocs[i].gobj);
+ struct radeon_bo *bo = parser->relocs[i].robj;
+ if (bo == NULL)
+ continue;
+
+ drm_gem_object_unreference_unlocked(&bo->gem_base);
}
}
kfree(parser->track);
kfree(parser->relocs);
- kfree(parser->relocs_ptr);
drm_free_large(parser->vm_bos);
for (i = 0; i < parser->nchunks; i++)
drm_free_large(parser->chunks[i].kdata);
@@ -461,7 +441,7 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev,
{
int r;
- if (parser->chunk_ib_idx == -1)
+ if (parser->chunk_ib == NULL)
return 0;
if (parser->cs_flags & RADEON_CS_USE_VM)
@@ -521,10 +501,6 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
for (i = 0; i < p->nrelocs; i++) {
struct radeon_bo *bo;
- /* ignore duplicates */
- if (p->relocs_ptr[i] != &p->relocs[i])
- continue;
-
bo = p->relocs[i].robj;
bo_va = radeon_vm_bo_find(vm, bo);
if (bo_va == NULL) {
@@ -535,6 +511,8 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
r = radeon_vm_bo_update(rdev, bo_va, &bo->tbo.mem);
if (r)
return r;
+
+ radeon_sync_fence(&p->ib.sync, bo_va->last_pt_update);
}
return radeon_vm_clear_invalids(rdev, vm);
@@ -547,7 +525,7 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
struct radeon_vm *vm = &fpriv->vm;
int r;
- if (parser->chunk_ib_idx == -1)
+ if (parser->chunk_ib == NULL)
return 0;
if ((parser->cs_flags & RADEON_CS_USE_VM) == 0)
return 0;
@@ -579,10 +557,9 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
DRM_ERROR("Failed to sync rings: %i\n", r);
goto out;
}
- radeon_semaphore_sync_fence(parser->ib.semaphore, vm->fence);
if ((rdev->family >= CHIP_TAHITI) &&
- (parser->chunk_const_ib_idx != -1)) {
+ (parser->chunk_const_ib != NULL)) {
r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib, true);
} else {
r = radeon_ib_schedule(rdev, &parser->ib, NULL, true);
@@ -609,7 +586,7 @@ static int radeon_cs_ib_fill(struct radeon_device *rdev, struct radeon_cs_parser
struct radeon_vm *vm = NULL;
int r;
- if (parser->chunk_ib_idx == -1)
+ if (parser->chunk_ib == NULL)
return 0;
if (parser->cs_flags & RADEON_CS_USE_VM) {
@@ -617,8 +594,8 @@ static int radeon_cs_ib_fill(struct radeon_device *rdev, struct radeon_cs_parser
vm = &fpriv->vm;
if ((rdev->family >= CHIP_TAHITI) &&
- (parser->chunk_const_ib_idx != -1)) {
- ib_chunk = &parser->chunks[parser->chunk_const_ib_idx];
+ (parser->chunk_const_ib != NULL)) {
+ ib_chunk = parser->chunk_const_ib;
if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
DRM_ERROR("cs IB CONST too big: %d\n", ib_chunk->length_dw);
return -EINVAL;
@@ -637,13 +614,13 @@ static int radeon_cs_ib_fill(struct radeon_device *rdev, struct radeon_cs_parser
return -EFAULT;
}
- ib_chunk = &parser->chunks[parser->chunk_ib_idx];
+ ib_chunk = parser->chunk_ib;
if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
DRM_ERROR("cs IB too big: %d\n", ib_chunk->length_dw);
return -EINVAL;
}
}
- ib_chunk = &parser->chunks[parser->chunk_ib_idx];
+ ib_chunk = parser->chunk_ib;
r = radeon_ib_get(rdev, parser->ring, &parser->ib,
vm, ib_chunk->length_dw * 4);
@@ -735,7 +712,7 @@ int radeon_cs_packet_parse(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt,
unsigned idx)
{
- struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
+ struct radeon_cs_chunk *ib_chunk = p->chunk_ib;
struct radeon_device *rdev = p->rdev;
uint32_t header;
@@ -829,7 +806,7 @@ void radeon_cs_dump_packet(struct radeon_cs_parser *p,
* GPU offset using the provided start.
**/
int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
- struct radeon_cs_reloc **cs_reloc,
+ struct radeon_bo_list **cs_reloc,
int nomm)
{
struct radeon_cs_chunk *relocs_chunk;
@@ -837,12 +814,12 @@ int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
unsigned idx;
int r;
- if (p->chunk_relocs_idx == -1) {
+ if (p->chunk_relocs == NULL) {
DRM_ERROR("No relocation chunk !\n");
return -EINVAL;
}
*cs_reloc = NULL;
- relocs_chunk = &p->chunks[p->chunk_relocs_idx];
+ relocs_chunk = p->chunk_relocs;
r = radeon_cs_packet_parse(p, &p3reloc, p->idx);
if (r)
return r;
@@ -868,6 +845,6 @@ int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
(u64)relocs_chunk->kdata[idx + 3] << 32;
(*cs_reloc)->gpu_offset |= relocs_chunk->kdata[idx + 0];
} else
- *cs_reloc = p->relocs_ptr[(idx / 4)];
+ *cs_reloc = &p->relocs[(idx / 4)];
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
index 9630e8d95fb..45e54060ee9 100644
--- a/drivers/gpu/drm/radeon/radeon_cursor.c
+++ b/drivers/gpu/drm/radeon/radeon_cursor.c
@@ -117,106 +117,7 @@ static void radeon_show_cursor(struct drm_crtc *crtc)
}
}
-static void radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj,
- uint64_t gpu_addr)
-{
- struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
- struct radeon_device *rdev = crtc->dev->dev_private;
-
- if (ASIC_IS_DCE4(rdev)) {
- WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
- upper_32_bits(gpu_addr));
- WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
- gpu_addr & 0xffffffff);
- } else if (ASIC_IS_AVIVO(rdev)) {
- if (rdev->family >= CHIP_RV770) {
- if (radeon_crtc->crtc_id)
- WREG32(R700_D2CUR_SURFACE_ADDRESS_HIGH, upper_32_bits(gpu_addr));
- else
- WREG32(R700_D1CUR_SURFACE_ADDRESS_HIGH, upper_32_bits(gpu_addr));
- }
- WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
- gpu_addr & 0xffffffff);
- } else {
- radeon_crtc->legacy_cursor_offset = gpu_addr - radeon_crtc->legacy_display_base_addr;
- /* offset is from DISP(2)_BASE_ADDRESS */
- WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, radeon_crtc->legacy_cursor_offset);
- }
-}
-
-int radeon_crtc_cursor_set(struct drm_crtc *crtc,
- struct drm_file *file_priv,
- uint32_t handle,
- uint32_t width,
- uint32_t height)
-{
- struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
- struct radeon_device *rdev = crtc->dev->dev_private;
- struct drm_gem_object *obj;
- struct radeon_bo *robj;
- uint64_t gpu_addr;
- int ret;
-
- if (!handle) {
- /* turn off cursor */
- radeon_hide_cursor(crtc);
- obj = NULL;
- goto unpin;
- }
-
- if ((width > radeon_crtc->max_cursor_width) ||
- (height > radeon_crtc->max_cursor_height)) {
- DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
- return -EINVAL;
- }
-
- obj = drm_gem_object_lookup(crtc->dev, file_priv, handle);
- if (!obj) {
- DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, radeon_crtc->crtc_id);
- return -ENOENT;
- }
-
- robj = gem_to_radeon_bo(obj);
- ret = radeon_bo_reserve(robj, false);
- if (unlikely(ret != 0))
- goto fail;
- /* Only 27 bit offset for legacy cursor */
- ret = radeon_bo_pin_restricted(robj, RADEON_GEM_DOMAIN_VRAM,
- ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27,
- &gpu_addr);
- radeon_bo_unreserve(robj);
- if (ret)
- goto fail;
-
- radeon_crtc->cursor_width = width;
- radeon_crtc->cursor_height = height;
-
- radeon_lock_cursor(crtc, true);
- radeon_set_cursor(crtc, obj, gpu_addr);
- radeon_show_cursor(crtc);
- radeon_lock_cursor(crtc, false);
-
-unpin:
- if (radeon_crtc->cursor_bo) {
- robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
- ret = radeon_bo_reserve(robj, false);
- if (likely(ret == 0)) {
- radeon_bo_unpin(robj);
- radeon_bo_unreserve(robj);
- }
- drm_gem_object_unreference_unlocked(radeon_crtc->cursor_bo);
- }
-
- radeon_crtc->cursor_bo = obj;
- return 0;
-fail:
- drm_gem_object_unreference_unlocked(obj);
-
- return ret;
-}
-
-int radeon_crtc_cursor_move(struct drm_crtc *crtc,
- int x, int y)
+static int radeon_cursor_move_locked(struct drm_crtc *crtc, int x, int y)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct radeon_device *rdev = crtc->dev->dev_private;
@@ -281,7 +182,6 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
}
}
- radeon_lock_cursor(crtc, true);
if (ASIC_IS_DCE4(rdev)) {
WREG32(EVERGREEN_CUR_POSITION + radeon_crtc->crtc_offset, (x << 16) | y);
WREG32(EVERGREEN_CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin);
@@ -308,7 +208,173 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, (radeon_crtc->legacy_cursor_offset +
(yorigin * 256)));
}
+
+ radeon_crtc->cursor_x = x;
+ radeon_crtc->cursor_y = y;
+
+ return 0;
+}
+
+int radeon_crtc_cursor_move(struct drm_crtc *crtc,
+ int x, int y)
+{
+ int ret;
+
+ radeon_lock_cursor(crtc, true);
+ ret = radeon_cursor_move_locked(crtc, x, y);
radeon_lock_cursor(crtc, false);
+ return ret;
+}
+
+static int radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct radeon_device *rdev = crtc->dev->dev_private;
+ struct radeon_bo *robj = gem_to_radeon_bo(obj);
+ uint64_t gpu_addr;
+ int ret;
+
+ ret = radeon_bo_reserve(robj, false);
+ if (unlikely(ret != 0))
+ goto fail;
+ /* Only 27 bit offset for legacy cursor */
+ ret = radeon_bo_pin_restricted(robj, RADEON_GEM_DOMAIN_VRAM,
+ ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27,
+ &gpu_addr);
+ radeon_bo_unreserve(robj);
+ if (ret)
+ goto fail;
+
+ if (ASIC_IS_DCE4(rdev)) {
+ WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
+ upper_32_bits(gpu_addr));
+ WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+ gpu_addr & 0xffffffff);
+ } else if (ASIC_IS_AVIVO(rdev)) {
+ if (rdev->family >= CHIP_RV770) {
+ if (radeon_crtc->crtc_id)
+ WREG32(R700_D2CUR_SURFACE_ADDRESS_HIGH, upper_32_bits(gpu_addr));
+ else
+ WREG32(R700_D1CUR_SURFACE_ADDRESS_HIGH, upper_32_bits(gpu_addr));
+ }
+ WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+ gpu_addr & 0xffffffff);
+ } else {
+ radeon_crtc->legacy_cursor_offset = gpu_addr - radeon_crtc->legacy_display_base_addr;
+ /* offset is from DISP(2)_BASE_ADDRESS */
+ WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, radeon_crtc->legacy_cursor_offset);
+ }
+
return 0;
+
+fail:
+ drm_gem_object_unreference_unlocked(obj);
+
+ return ret;
+}
+
+int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
+ struct drm_file *file_priv,
+ uint32_t handle,
+ uint32_t width,
+ uint32_t height,
+ int32_t hot_x,
+ int32_t hot_y)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct drm_gem_object *obj;
+ int ret;
+
+ if (!handle) {
+ /* turn off cursor */
+ radeon_hide_cursor(crtc);
+ obj = NULL;
+ goto unpin;
+ }
+
+ if ((width > radeon_crtc->max_cursor_width) ||
+ (height > radeon_crtc->max_cursor_height)) {
+ DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
+ return -EINVAL;
+ }
+
+ obj = drm_gem_object_lookup(crtc->dev, file_priv, handle);
+ if (!obj) {
+ DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, radeon_crtc->crtc_id);
+ return -ENOENT;
+ }
+
+ radeon_crtc->cursor_width = width;
+ radeon_crtc->cursor_height = height;
+
+ radeon_lock_cursor(crtc, true);
+
+ if (hot_x != radeon_crtc->cursor_hot_x ||
+ hot_y != radeon_crtc->cursor_hot_y) {
+ int x, y;
+
+ x = radeon_crtc->cursor_x + radeon_crtc->cursor_hot_x - hot_x;
+ y = radeon_crtc->cursor_y + radeon_crtc->cursor_hot_y - hot_y;
+
+ radeon_cursor_move_locked(crtc, x, y);
+
+ radeon_crtc->cursor_hot_x = hot_x;
+ radeon_crtc->cursor_hot_y = hot_y;
+ }
+
+ ret = radeon_set_cursor(crtc, obj);
+
+ if (ret)
+ DRM_ERROR("radeon_set_cursor returned %d, not changing cursor\n",
+ ret);
+ else
+ radeon_show_cursor(crtc);
+
+ radeon_lock_cursor(crtc, false);
+
+unpin:
+ if (radeon_crtc->cursor_bo) {
+ struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
+ ret = radeon_bo_reserve(robj, false);
+ if (likely(ret == 0)) {
+ radeon_bo_unpin(robj);
+ radeon_bo_unreserve(robj);
+ }
+ if (radeon_crtc->cursor_bo != obj)
+ drm_gem_object_unreference_unlocked(radeon_crtc->cursor_bo);
+ }
+
+ radeon_crtc->cursor_bo = obj;
+ return 0;
+}
+
+/**
+ * radeon_cursor_reset - Re-set the current cursor, if any.
+ *
+ * @crtc: drm crtc
+ *
+ * If the CRTC passed in currently has a cursor assigned, this function
+ * makes sure it's visible.
+ */
+void radeon_cursor_reset(struct drm_crtc *crtc)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ int ret;
+
+ if (radeon_crtc->cursor_bo) {
+ radeon_lock_cursor(crtc, true);
+
+ radeon_cursor_move_locked(crtc, radeon_crtc->cursor_x,
+ radeon_crtc->cursor_y);
+
+ ret = radeon_set_cursor(crtc, radeon_crtc->cursor_bo);
+ if (ret)
+ DRM_ERROR("radeon_set_cursor returned %d, not showing "
+ "cursor\n", ret);
+ else
+ radeon_show_cursor(crtc);
+
+ radeon_lock_cursor(crtc, false);
+ }
}
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 995a8b1770d..0ec65168f33 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -377,6 +377,37 @@ void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell)
__clear_bit(doorbell, rdev->doorbell.used);
}
+/**
+ * radeon_doorbell_get_kfd_info - Report doorbell configuration required to
+ * setup KFD
+ *
+ * @rdev: radeon_device pointer
+ * @aperture_base: output returning doorbell aperture base physical address
+ * @aperture_size: output returning doorbell aperture size in bytes
+ * @start_offset: output returning # of doorbell bytes reserved for radeon.
+ *
+ * Radeon and the KFD share the doorbell aperture. Radeon sets it up,
+ * takes doorbells required for its own rings and reports the setup to KFD.
+ * Radeon reserved doorbells are at the start of the doorbell aperture.
+ */
+void radeon_doorbell_get_kfd_info(struct radeon_device *rdev,
+ phys_addr_t *aperture_base,
+ size_t *aperture_size,
+ size_t *start_offset)
+{
+ /* The first num_doorbells are used by radeon.
+ * KFD takes whatever's left in the aperture. */
+ if (rdev->doorbell.size > rdev->doorbell.num_doorbells * sizeof(u32)) {
+ *aperture_base = rdev->doorbell.base;
+ *aperture_size = rdev->doorbell.size;
+ *start_offset = rdev->doorbell.num_doorbells * sizeof(u32);
+ } else {
+ *aperture_base = 0;
+ *aperture_size = 0;
+ *start_offset = 0;
+ }
+}
+
/*
* radeon_wb_*()
* Writeback is the the method by which the the GPU updates special pages
@@ -1273,6 +1304,7 @@ int radeon_device_init(struct radeon_device *rdev,
mutex_init(&rdev->pm.mutex);
mutex_init(&rdev->gpu_clock_mutex);
mutex_init(&rdev->srbm_mutex);
+ mutex_init(&rdev->grbm_idx_mutex);
init_rwsem(&rdev->pm.mclk_lock);
init_rwsem(&rdev->exclusive_lock);
init_waitqueue_head(&rdev->irq.vblank_queue);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 00ead8c2758..102116902a0 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -32,6 +32,7 @@
#include <linux/pm_runtime.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_plane_helper.h>
#include <drm/drm_edid.h>
#include <linux/gcd.h>
@@ -634,7 +635,7 @@ radeon_crtc_set_config(struct drm_mode_set *set)
return ret;
}
static const struct drm_crtc_funcs radeon_crtc_funcs = {
- .cursor_set = radeon_crtc_cursor_set,
+ .cursor_set2 = radeon_crtc_cursor_set2,
.cursor_move = radeon_crtc_cursor_move,
.gamma_set = radeon_crtc_gamma_set,
.set_config = radeon_crtc_set_config,
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index dcffa30ee2d..4f50fb0e3d9 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -41,6 +41,8 @@
#include <drm/drm_gem.h>
#include "drm_crtc_helper.h"
+#include "radeon_kfd.h"
+
/*
* KMS wrapper.
* - 2.0.0 - initial interface
@@ -654,12 +656,15 @@ static int __init radeon_init(void)
#endif
}
+ radeon_kfd_init();
+
/* let modprobe override vga console setting */
return drm_pci_init(driver, pdriver);
}
static void __exit radeon_exit(void)
{
+ radeon_kfd_fini();
drm_pci_exit(driver, pdriver);
radeon_unregister_atpx_handler();
}
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 0ea1db83d57..29b9220ec39 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -48,10 +48,40 @@ struct radeon_fbdev {
struct radeon_device *rdev;
};
+/**
+ * radeon_fb_helper_set_par - Hide cursor on CRTCs used by fbdev.
+ *
+ * @info: fbdev info
+ *
+ * This function hides the cursor on all CRTCs used by fbdev.
+ */
+static int radeon_fb_helper_set_par(struct fb_info *info)
+{
+ int ret;
+
+ ret = drm_fb_helper_set_par(info);
+
+ /* XXX: with universal plane support fbdev will automatically disable
+ * all non-primary planes (including the cursor)
+ */
+ if (ret == 0) {
+ struct drm_fb_helper *fb_helper = info->par;
+ int i;
+
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ struct drm_crtc *crtc = fb_helper->crtc_info[i].mode_set.crtc;
+
+ radeon_crtc_cursor_set2(crtc, NULL, 0, 0, 0, 0, 0);
+ }
+ }
+
+ return ret;
+}
+
static struct fb_ops radeonfb_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par,
+ .fb_set_par = radeon_fb_helper_set_par,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 99516702528..d13d1b5a859 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -140,6 +140,7 @@ int radeon_fence_emit(struct radeon_device *rdev,
(*fence)->rdev = rdev;
(*fence)->seq = seq;
(*fence)->ring = ring;
+ (*fence)->is_vm_update = false;
fence_init(&(*fence)->base, &radeon_fence_ops,
&rdev->fence_queue.lock, rdev->fence_context + ring, seq);
radeon_fence_ring_emit(rdev, ring, *fence);
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index c194497aa58..fe48f229043 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -394,9 +394,10 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
return r;
}
-int radeon_mode_dumb_mmap(struct drm_file *filp,
- struct drm_device *dev,
- uint32_t handle, uint64_t *offset_p)
+static int radeon_mode_mmap(struct drm_file *filp,
+ struct drm_device *dev,
+ uint32_t handle, bool dumb,
+ uint64_t *offset_p)
{
struct drm_gem_object *gobj;
struct radeon_bo *robj;
@@ -405,6 +406,14 @@ int radeon_mode_dumb_mmap(struct drm_file *filp,
if (gobj == NULL) {
return -ENOENT;
}
+
+ /*
+ * We don't allow dumb mmaps on objects created using another
+ * interface.
+ */
+ WARN_ONCE(dumb && !(gobj->dumb || gobj->import_attach),
+ "Illegal dumb map of GPU buffer.\n");
+
robj = gem_to_radeon_bo(gobj);
if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) {
drm_gem_object_unreference_unlocked(gobj);
@@ -415,12 +424,20 @@ int radeon_mode_dumb_mmap(struct drm_file *filp,
return 0;
}
+int radeon_mode_dumb_mmap(struct drm_file *filp,
+ struct drm_device *dev,
+ uint32_t handle, uint64_t *offset_p)
+{
+ return radeon_mode_mmap(filp, dev, handle, true, offset_p);
+}
+
int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
struct drm_radeon_gem_mmap *args = data;
- return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr);
+ return radeon_mode_mmap(filp, dev, args->handle, false,
+ &args->addr_ptr);
}
int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
@@ -518,6 +535,68 @@ out:
return r;
}
+/**
+ * radeon_gem_va_update_vm -update the bo_va in its VM
+ *
+ * @rdev: radeon_device pointer
+ * @bo_va: bo_va to update
+ *
+ * Update the bo_va directly after setting it's address. Errors are not
+ * vital here, so they are not reported back to userspace.
+ */
+static void radeon_gem_va_update_vm(struct radeon_device *rdev,
+ struct radeon_bo_va *bo_va)
+{
+ struct ttm_validate_buffer tv, *entry;
+ struct radeon_bo_list *vm_bos;
+ struct ww_acquire_ctx ticket;
+ struct list_head list;
+ unsigned domain;
+ int r;
+
+ INIT_LIST_HEAD(&list);
+
+ tv.bo = &bo_va->bo->tbo;
+ tv.shared = true;
+ list_add(&tv.head, &list);
+
+ vm_bos = radeon_vm_get_bos(rdev, bo_va->vm, &list);
+ if (!vm_bos)
+ return;
+
+ r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
+ if (r)
+ goto error_free;
+
+ list_for_each_entry(entry, &list, head) {
+ domain = radeon_mem_type_to_domain(entry->bo->mem.mem_type);
+ /* if anything is swapped out don't swap it in here,
+ just abort and wait for the next CS */
+ if (domain == RADEON_GEM_DOMAIN_CPU)
+ goto error_unreserve;
+ }
+
+ mutex_lock(&bo_va->vm->mutex);
+ r = radeon_vm_clear_freed(rdev, bo_va->vm);
+ if (r)
+ goto error_unlock;
+
+ if (bo_va->it.start)
+ r = radeon_vm_bo_update(rdev, bo_va, &bo_va->bo->tbo.mem);
+
+error_unlock:
+ mutex_unlock(&bo_va->vm->mutex);
+
+error_unreserve:
+ ttm_eu_backoff_reservation(&ticket, &list);
+
+error_free:
+ drm_free_large(vm_bos);
+
+ if (r)
+ DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
+}
+
int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
@@ -601,6 +680,7 @@ int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
if (bo_va->it.start) {
args->operation = RADEON_VA_RESULT_VA_EXIST;
args->offset = bo_va->it.start * RADEON_GPU_PAGE_SIZE;
+ radeon_bo_unreserve(rbo);
goto out;
}
r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags);
@@ -611,12 +691,13 @@ int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
default:
break;
}
+ if (!r)
+ radeon_gem_va_update_vm(rdev, bo_va);
args->operation = RADEON_VA_RESULT_OK;
if (r) {
args->operation = RADEON_VA_RESULT_ERROR;
}
out:
- radeon_bo_unreserve(rbo);
drm_gem_object_unreference_unlocked(gobj);
return r;
}
@@ -682,6 +763,7 @@ int radeon_mode_dumb_create(struct drm_file *file_priv,
return -ENOMEM;
r = drm_gem_handle_create(file_priv, gobj, &handle);
+ gobj->dumb = true;
/* drop reference from allocate - handle holds it now */
drm_gem_object_unreference_unlocked(gobj);
if (r) {
diff --git a/drivers/gpu/drm/radeon/radeon_ib.c b/drivers/gpu/drm/radeon/radeon_ib.c
index 3f39fcca4d0..c39ce1f0570 100644
--- a/drivers/gpu/drm/radeon/radeon_ib.c
+++ b/drivers/gpu/drm/radeon/radeon_ib.c
@@ -64,10 +64,7 @@ int radeon_ib_get(struct radeon_device *rdev, int ring,
return r;
}
- r = radeon_semaphore_create(rdev, &ib->semaphore);
- if (r) {
- return r;
- }
+ radeon_sync_create(&ib->sync);
ib->ring = ring;
ib->fence = NULL;
@@ -96,7 +93,7 @@ int radeon_ib_get(struct radeon_device *rdev, int ring,
*/
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib)
{
- radeon_semaphore_free(rdev, &ib->semaphore, ib->fence);
+ radeon_sync_free(rdev, &ib->sync, ib->fence);
radeon_sa_bo_free(rdev, &ib->sa_bo, ib->fence);
radeon_fence_unref(&ib->fence);
}
@@ -145,11 +142,11 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
if (ib->vm) {
struct radeon_fence *vm_id_fence;
vm_id_fence = radeon_vm_grab_id(rdev, ib->vm, ib->ring);
- radeon_semaphore_sync_fence(ib->semaphore, vm_id_fence);
+ radeon_sync_fence(&ib->sync, vm_id_fence);
}
/* sync with other rings */
- r = radeon_semaphore_sync_rings(rdev, ib->semaphore, ib->ring);
+ r = radeon_sync_rings(rdev, &ib->sync, ib->ring);
if (r) {
dev_err(rdev->dev, "failed to sync rings (%d)\n", r);
radeon_ring_unlock_undo(rdev, ring);
@@ -157,11 +154,12 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
}
if (ib->vm)
- radeon_vm_flush(rdev, ib->vm, ib->ring);
+ radeon_vm_flush(rdev, ib->vm, ib->ring,
+ ib->sync.last_vm_update);
if (const_ib) {
radeon_ring_ib_execute(rdev, const_ib->ring, const_ib);
- radeon_semaphore_free(rdev, &const_ib->semaphore, NULL);
+ radeon_sync_free(rdev, &const_ib->sync, NULL);
}
radeon_ring_ib_execute(rdev, ib->ring, ib);
r = radeon_fence_emit(rdev, &ib->fence, ib->ring);
diff --git a/drivers/gpu/drm/radeon/radeon_kfd.c b/drivers/gpu/drm/radeon/radeon_kfd.c
new file mode 100644
index 00000000000..065d02068ec
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_kfd.c
@@ -0,0 +1,563 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/fdtable.h>
+#include <linux/uaccess.h>
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "cikd.h"
+#include "cik_reg.h"
+#include "radeon_kfd.h"
+
+#define CIK_PIPE_PER_MEC (4)
+
+struct kgd_mem {
+ struct radeon_sa_bo *sa_bo;
+ uint64_t gpu_addr;
+ void *ptr;
+};
+
+static int init_sa_manager(struct kgd_dev *kgd, unsigned int size);
+static void fini_sa_manager(struct kgd_dev *kgd);
+
+static int allocate_mem(struct kgd_dev *kgd, size_t size, size_t alignment,
+ enum kgd_memory_pool pool, struct kgd_mem **mem);
+
+static void free_mem(struct kgd_dev *kgd, struct kgd_mem *mem);
+
+static uint64_t get_vmem_size(struct kgd_dev *kgd);
+static uint64_t get_gpu_clock_counter(struct kgd_dev *kgd);
+
+static uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd);
+
+/*
+ * Register access functions
+ */
+
+static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
+ uint32_t sh_mem_config, uint32_t sh_mem_ape1_base,
+ uint32_t sh_mem_ape1_limit, uint32_t sh_mem_bases);
+
+static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
+ unsigned int vmid);
+
+static int kgd_init_memory(struct kgd_dev *kgd);
+
+static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
+ uint32_t hpd_size, uint64_t hpd_gpu_addr);
+
+static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
+ uint32_t queue_id, uint32_t __user *wptr);
+
+static bool kgd_hqd_is_occupies(struct kgd_dev *kgd, uint64_t queue_address,
+ uint32_t pipe_id, uint32_t queue_id);
+
+static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
+ unsigned int timeout, uint32_t pipe_id,
+ uint32_t queue_id);
+
+static const struct kfd2kgd_calls kfd2kgd = {
+ .init_sa_manager = init_sa_manager,
+ .fini_sa_manager = fini_sa_manager,
+ .allocate_mem = allocate_mem,
+ .free_mem = free_mem,
+ .get_vmem_size = get_vmem_size,
+ .get_gpu_clock_counter = get_gpu_clock_counter,
+ .get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz,
+ .program_sh_mem_settings = kgd_program_sh_mem_settings,
+ .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
+ .init_memory = kgd_init_memory,
+ .init_pipeline = kgd_init_pipeline,
+ .hqd_load = kgd_hqd_load,
+ .hqd_is_occupies = kgd_hqd_is_occupies,
+ .hqd_destroy = kgd_hqd_destroy,
+};
+
+static const struct kgd2kfd_calls *kgd2kfd;
+
+bool radeon_kfd_init(void)
+{
+ bool (*kgd2kfd_init_p)(unsigned, const struct kfd2kgd_calls*,
+ const struct kgd2kfd_calls**);
+
+ kgd2kfd_init_p = symbol_request(kgd2kfd_init);
+
+ if (kgd2kfd_init_p == NULL)
+ return false;
+
+ if (!kgd2kfd_init_p(KFD_INTERFACE_VERSION, &kfd2kgd, &kgd2kfd)) {
+ symbol_put(kgd2kfd_init);
+ kgd2kfd = NULL;
+
+ return false;
+ }
+
+ return true;
+}
+
+void radeon_kfd_fini(void)
+{
+ if (kgd2kfd) {
+ kgd2kfd->exit();
+ symbol_put(kgd2kfd_init);
+ }
+}
+
+void radeon_kfd_device_probe(struct radeon_device *rdev)
+{
+ if (kgd2kfd)
+ rdev->kfd = kgd2kfd->probe((struct kgd_dev *)rdev, rdev->pdev);
+}
+
+void radeon_kfd_device_init(struct radeon_device *rdev)
+{
+ if (rdev->kfd) {
+ struct kgd2kfd_shared_resources gpu_resources = {
+ .compute_vmid_bitmap = 0xFF00,
+
+ .first_compute_pipe = 1,
+ .compute_pipe_count = 8 - 1,
+ };
+
+ radeon_doorbell_get_kfd_info(rdev,
+ &gpu_resources.doorbell_physical_address,
+ &gpu_resources.doorbell_aperture_size,
+ &gpu_resources.doorbell_start_offset);
+
+ kgd2kfd->device_init(rdev->kfd, &gpu_resources);
+ }
+}
+
+void radeon_kfd_device_fini(struct radeon_device *rdev)
+{
+ if (rdev->kfd) {
+ kgd2kfd->device_exit(rdev->kfd);
+ rdev->kfd = NULL;
+ }
+}
+
+void radeon_kfd_interrupt(struct radeon_device *rdev, const void *ih_ring_entry)
+{
+ if (rdev->kfd)
+ kgd2kfd->interrupt(rdev->kfd, ih_ring_entry);
+}
+
+void radeon_kfd_suspend(struct radeon_device *rdev)
+{
+ if (rdev->kfd)
+ kgd2kfd->suspend(rdev->kfd);
+}
+
+int radeon_kfd_resume(struct radeon_device *rdev)
+{
+ int r = 0;
+
+ if (rdev->kfd)
+ r = kgd2kfd->resume(rdev->kfd);
+
+ return r;
+}
+
+static u32 pool_to_domain(enum kgd_memory_pool p)
+{
+ switch (p) {
+ case KGD_POOL_FRAMEBUFFER: return RADEON_GEM_DOMAIN_VRAM;
+ default: return RADEON_GEM_DOMAIN_GTT;
+ }
+}
+
+static int init_sa_manager(struct kgd_dev *kgd, unsigned int size)
+{
+ struct radeon_device *rdev = (struct radeon_device *)kgd;
+ int r;
+
+ BUG_ON(kgd == NULL);
+
+ r = radeon_sa_bo_manager_init(rdev, &rdev->kfd_bo,
+ size,
+ RADEON_GPU_PAGE_SIZE,
+ RADEON_GEM_DOMAIN_GTT,
+ RADEON_GEM_GTT_WC);
+
+ if (r)
+ return r;
+
+ r = radeon_sa_bo_manager_start(rdev, &rdev->kfd_bo);
+ if (r)
+ radeon_sa_bo_manager_fini(rdev, &rdev->kfd_bo);
+
+ return r;
+}
+
+static void fini_sa_manager(struct kgd_dev *kgd)
+{
+ struct radeon_device *rdev = (struct radeon_device *)kgd;
+
+ BUG_ON(kgd == NULL);
+
+ radeon_sa_bo_manager_suspend(rdev, &rdev->kfd_bo);
+ radeon_sa_bo_manager_fini(rdev, &rdev->kfd_bo);
+}
+
+static int allocate_mem(struct kgd_dev *kgd, size_t size, size_t alignment,
+ enum kgd_memory_pool pool, struct kgd_mem **mem)
+{
+ struct radeon_device *rdev = (struct radeon_device *)kgd;
+ u32 domain;
+ int r;
+
+ BUG_ON(kgd == NULL);
+
+ domain = pool_to_domain(pool);
+ if (domain != RADEON_GEM_DOMAIN_GTT) {
+ dev_err(rdev->dev,
+ "Only allowed to allocate gart memory for kfd\n");
+ return -EINVAL;
+ }
+
+ *mem = kmalloc(sizeof(struct kgd_mem), GFP_KERNEL);
+ if ((*mem) == NULL)
+ return -ENOMEM;
+
+ r = radeon_sa_bo_new(rdev, &rdev->kfd_bo, &(*mem)->sa_bo, size,
+ alignment);
+ if (r) {
+ dev_err(rdev->dev, "failed to get memory for kfd (%d)\n", r);
+ return r;
+ }
+
+ (*mem)->ptr = radeon_sa_bo_cpu_addr((*mem)->sa_bo);
+ (*mem)->gpu_addr = radeon_sa_bo_gpu_addr((*mem)->sa_bo);
+
+ return 0;
+}
+
+static void free_mem(struct kgd_dev *kgd, struct kgd_mem *mem)
+{
+ struct radeon_device *rdev = (struct radeon_device *)kgd;
+
+ BUG_ON(kgd == NULL);
+
+ radeon_sa_bo_free(rdev, &mem->sa_bo, NULL);
+ kfree(mem);
+}
+
+static uint64_t get_vmem_size(struct kgd_dev *kgd)
+{
+ struct radeon_device *rdev = (struct radeon_device *)kgd;
+
+ BUG_ON(kgd == NULL);
+
+ return rdev->mc.real_vram_size;
+}
+
+static uint64_t get_gpu_clock_counter(struct kgd_dev *kgd)
+{
+ struct radeon_device *rdev = (struct radeon_device *)kgd;
+
+ return rdev->asic->get_gpu_clock_counter(rdev);
+}
+
+static uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd)
+{
+ struct radeon_device *rdev = (struct radeon_device *)kgd;
+
+ /* The sclk is in quantas of 10kHz */
+ return rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk / 100;
+}
+
+static inline struct radeon_device *get_radeon_device(struct kgd_dev *kgd)
+{
+ return (struct radeon_device *)kgd;
+}
+
+static void write_register(struct kgd_dev *kgd, uint32_t offset, uint32_t value)
+{
+ struct radeon_device *rdev = get_radeon_device(kgd);
+
+ writel(value, (void __iomem *)(rdev->rmmio + offset));
+}
+
+static uint32_t read_register(struct kgd_dev *kgd, uint32_t offset)
+{
+ struct radeon_device *rdev = get_radeon_device(kgd);
+
+ return readl((void __iomem *)(rdev->rmmio + offset));
+}
+
+static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe,
+ uint32_t queue, uint32_t vmid)
+{
+ struct radeon_device *rdev = get_radeon_device(kgd);
+ uint32_t value = PIPEID(pipe) | MEID(mec) | VMID(vmid) | QUEUEID(queue);
+
+ mutex_lock(&rdev->srbm_mutex);
+ write_register(kgd, SRBM_GFX_CNTL, value);
+}
+
+static void unlock_srbm(struct kgd_dev *kgd)
+{
+ struct radeon_device *rdev = get_radeon_device(kgd);
+
+ write_register(kgd, SRBM_GFX_CNTL, 0);
+ mutex_unlock(&rdev->srbm_mutex);
+}
+
+static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
+ uint32_t queue_id)
+{
+ uint32_t mec = (++pipe_id / CIK_PIPE_PER_MEC) + 1;
+ uint32_t pipe = (pipe_id % CIK_PIPE_PER_MEC);
+
+ lock_srbm(kgd, mec, pipe, queue_id, 0);
+}
+
+static void release_queue(struct kgd_dev *kgd)
+{
+ unlock_srbm(kgd);
+}
+
+static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
+ uint32_t sh_mem_config,
+ uint32_t sh_mem_ape1_base,
+ uint32_t sh_mem_ape1_limit,
+ uint32_t sh_mem_bases)
+{
+ lock_srbm(kgd, 0, 0, 0, vmid);
+
+ write_register(kgd, SH_MEM_CONFIG, sh_mem_config);
+ write_register(kgd, SH_MEM_APE1_BASE, sh_mem_ape1_base);
+ write_register(kgd, SH_MEM_APE1_LIMIT, sh_mem_ape1_limit);
+ write_register(kgd, SH_MEM_BASES, sh_mem_bases);
+
+ unlock_srbm(kgd);
+}
+
+static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
+ unsigned int vmid)
+{
+ /*
+ * We have to assume that there is no outstanding mapping.
+ * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0
+ * because a mapping is in progress or because a mapping finished and
+ * the SW cleared it.
+ * So the protocol is to always wait & clear.
+ */
+ uint32_t pasid_mapping = (pasid == 0) ? 0 :
+ (uint32_t)pasid | ATC_VMID_PASID_MAPPING_VALID;
+
+ write_register(kgd, ATC_VMID0_PASID_MAPPING + vmid*sizeof(uint32_t),
+ pasid_mapping);
+
+ while (!(read_register(kgd, ATC_VMID_PASID_MAPPING_UPDATE_STATUS) &
+ (1U << vmid)))
+ cpu_relax();
+ write_register(kgd, ATC_VMID_PASID_MAPPING_UPDATE_STATUS, 1U << vmid);
+
+ return 0;
+}
+
+static int kgd_init_memory(struct kgd_dev *kgd)
+{
+ /*
+ * Configure apertures:
+ * LDS: 0x60000000'00000000 - 0x60000001'00000000 (4GB)
+ * Scratch: 0x60000001'00000000 - 0x60000002'00000000 (4GB)
+ * GPUVM: 0x60010000'00000000 - 0x60020000'00000000 (1TB)
+ */
+ int i;
+ uint32_t sh_mem_bases = PRIVATE_BASE(0x6000) | SHARED_BASE(0x6000);
+
+ for (i = 8; i < 16; i++) {
+ uint32_t sh_mem_config;
+
+ lock_srbm(kgd, 0, 0, 0, i);
+
+ sh_mem_config = ALIGNMENT_MODE(SH_MEM_ALIGNMENT_MODE_UNALIGNED);
+ sh_mem_config |= DEFAULT_MTYPE(MTYPE_NONCACHED);
+
+ write_register(kgd, SH_MEM_CONFIG, sh_mem_config);
+
+ write_register(kgd, SH_MEM_BASES, sh_mem_bases);
+
+ /* Scratch aperture is not supported for now. */
+ write_register(kgd, SH_STATIC_MEM_CONFIG, 0);
+
+ /* APE1 disabled for now. */
+ write_register(kgd, SH_MEM_APE1_BASE, 1);
+ write_register(kgd, SH_MEM_APE1_LIMIT, 0);
+
+ unlock_srbm(kgd);
+ }
+
+ return 0;
+}
+
+static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
+ uint32_t hpd_size, uint64_t hpd_gpu_addr)
+{
+ uint32_t mec = (++pipe_id / CIK_PIPE_PER_MEC) + 1;
+ uint32_t pipe = (pipe_id % CIK_PIPE_PER_MEC);
+
+ lock_srbm(kgd, mec, pipe, 0, 0);
+ write_register(kgd, CP_HPD_EOP_BASE_ADDR,
+ lower_32_bits(hpd_gpu_addr >> 8));
+ write_register(kgd, CP_HPD_EOP_BASE_ADDR_HI,
+ upper_32_bits(hpd_gpu_addr >> 8));
+ write_register(kgd, CP_HPD_EOP_VMID, 0);
+ write_register(kgd, CP_HPD_EOP_CONTROL, hpd_size);
+ unlock_srbm(kgd);
+
+ return 0;
+}
+
+static inline struct cik_mqd *get_mqd(void *mqd)
+{
+ return (struct cik_mqd *)mqd;
+}
+
+static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
+ uint32_t queue_id, uint32_t __user *wptr)
+{
+ uint32_t wptr_shadow, is_wptr_shadow_valid;
+ struct cik_mqd *m;
+
+ m = get_mqd(mqd);
+
+ is_wptr_shadow_valid = !get_user(wptr_shadow, wptr);
+
+ acquire_queue(kgd, pipe_id, queue_id);
+ write_register(kgd, CP_MQD_BASE_ADDR, m->cp_mqd_base_addr_lo);
+ write_register(kgd, CP_MQD_BASE_ADDR_HI, m->cp_mqd_base_addr_hi);
+ write_register(kgd, CP_MQD_CONTROL, m->cp_mqd_control);
+
+ write_register(kgd, CP_HQD_PQ_BASE, m->cp_hqd_pq_base_lo);
+ write_register(kgd, CP_HQD_PQ_BASE_HI, m->cp_hqd_pq_base_hi);
+ write_register(kgd, CP_HQD_PQ_CONTROL, m->cp_hqd_pq_control);
+
+ write_register(kgd, CP_HQD_IB_CONTROL, m->cp_hqd_ib_control);
+ write_register(kgd, CP_HQD_IB_BASE_ADDR, m->cp_hqd_ib_base_addr_lo);
+ write_register(kgd, CP_HQD_IB_BASE_ADDR_HI, m->cp_hqd_ib_base_addr_hi);
+
+ write_register(kgd, CP_HQD_IB_RPTR, m->cp_hqd_ib_rptr);
+
+ write_register(kgd, CP_HQD_PERSISTENT_STATE,
+ m->cp_hqd_persistent_state);
+ write_register(kgd, CP_HQD_SEMA_CMD, m->cp_hqd_sema_cmd);
+ write_register(kgd, CP_HQD_MSG_TYPE, m->cp_hqd_msg_type);
+
+ write_register(kgd, CP_HQD_ATOMIC0_PREOP_LO,
+ m->cp_hqd_atomic0_preop_lo);
+
+ write_register(kgd, CP_HQD_ATOMIC0_PREOP_HI,
+ m->cp_hqd_atomic0_preop_hi);
+
+ write_register(kgd, CP_HQD_ATOMIC1_PREOP_LO,
+ m->cp_hqd_atomic1_preop_lo);
+
+ write_register(kgd, CP_HQD_ATOMIC1_PREOP_HI,
+ m->cp_hqd_atomic1_preop_hi);
+
+ write_register(kgd, CP_HQD_PQ_RPTR_REPORT_ADDR,
+ m->cp_hqd_pq_rptr_report_addr_lo);
+
+ write_register(kgd, CP_HQD_PQ_RPTR_REPORT_ADDR_HI,
+ m->cp_hqd_pq_rptr_report_addr_hi);
+
+ write_register(kgd, CP_HQD_PQ_RPTR, m->cp_hqd_pq_rptr);
+
+ write_register(kgd, CP_HQD_PQ_WPTR_POLL_ADDR,
+ m->cp_hqd_pq_wptr_poll_addr_lo);
+
+ write_register(kgd, CP_HQD_PQ_WPTR_POLL_ADDR_HI,
+ m->cp_hqd_pq_wptr_poll_addr_hi);
+
+ write_register(kgd, CP_HQD_PQ_DOORBELL_CONTROL,
+ m->cp_hqd_pq_doorbell_control);
+
+ write_register(kgd, CP_HQD_VMID, m->cp_hqd_vmid);
+
+ write_register(kgd, CP_HQD_QUANTUM, m->cp_hqd_quantum);
+
+ write_register(kgd, CP_HQD_PIPE_PRIORITY, m->cp_hqd_pipe_priority);
+ write_register(kgd, CP_HQD_QUEUE_PRIORITY, m->cp_hqd_queue_priority);
+
+ write_register(kgd, CP_HQD_IQ_RPTR, m->cp_hqd_iq_rptr);
+
+ if (is_wptr_shadow_valid)
+ write_register(kgd, CP_HQD_PQ_WPTR, wptr_shadow);
+
+ write_register(kgd, CP_HQD_ACTIVE, m->cp_hqd_active);
+ release_queue(kgd);
+
+ return 0;
+}
+
+static bool kgd_hqd_is_occupies(struct kgd_dev *kgd, uint64_t queue_address,
+ uint32_t pipe_id, uint32_t queue_id)
+{
+ uint32_t act;
+ bool retval = false;
+ uint32_t low, high;
+
+ acquire_queue(kgd, pipe_id, queue_id);
+ act = read_register(kgd, CP_HQD_ACTIVE);
+ if (act) {
+ low = lower_32_bits(queue_address >> 8);
+ high = upper_32_bits(queue_address >> 8);
+
+ if (low == read_register(kgd, CP_HQD_PQ_BASE) &&
+ high == read_register(kgd, CP_HQD_PQ_BASE_HI))
+ retval = true;
+ }
+ release_queue(kgd);
+ return retval;
+}
+
+static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
+ unsigned int timeout, uint32_t pipe_id,
+ uint32_t queue_id)
+{
+ uint32_t temp;
+
+ acquire_queue(kgd, pipe_id, queue_id);
+ write_register(kgd, CP_HQD_PQ_DOORBELL_CONTROL, 0);
+
+ write_register(kgd, CP_HQD_DEQUEUE_REQUEST, reset_type);
+
+ while (true) {
+ temp = read_register(kgd, CP_HQD_ACTIVE);
+ if (temp & 0x1)
+ break;
+ if (timeout == 0) {
+ pr_err("kfd: cp queue preemption time out (%dms)\n",
+ temp);
+ return -ETIME;
+ }
+ msleep(20);
+ timeout -= 20;
+ }
+
+ release_queue(kgd);
+ return 0;
+}
diff --git a/drivers/gpu/drm/radeon/radeon_kfd.h b/drivers/gpu/drm/radeon/radeon_kfd.h
new file mode 100644
index 00000000000..f90e161ca50
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_kfd.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * radeon_kfd.h defines the private interface between the
+ * AMD kernel graphics drivers and the AMD KFD.
+ */
+
+#ifndef RADEON_KFD_H_INCLUDED
+#define RADEON_KFD_H_INCLUDED
+
+#include <linux/types.h>
+#include "../amd/include/kgd_kfd_interface.h"
+
+struct radeon_device;
+
+bool radeon_kfd_init(void);
+void radeon_kfd_fini(void);
+
+void radeon_kfd_suspend(struct radeon_device *rdev);
+int radeon_kfd_resume(struct radeon_device *rdev);
+void radeon_kfd_interrupt(struct radeon_device *rdev,
+ const void *ih_ring_entry);
+void radeon_kfd_device_probe(struct radeon_device *rdev);
+void radeon_kfd_device_init(struct radeon_device *rdev);
+void radeon_kfd_device_fini(struct radeon_device *rdev);
+
+#endif /* RADEON_KFD_H_INCLUDED */
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 03586763ee8..3cf9c1fa647 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -34,6 +34,8 @@
#include <linux/slab.h>
#include <linux/pm_runtime.h>
+#include "radeon_kfd.h"
+
#if defined(CONFIG_VGA_SWITCHEROO)
bool radeon_has_atpx(void);
#else
@@ -63,6 +65,8 @@ int radeon_driver_unload_kms(struct drm_device *dev)
pm_runtime_get_sync(dev->dev);
+ radeon_kfd_device_fini(rdev);
+
radeon_acpi_fini(rdev);
radeon_modeset_fini(rdev);
@@ -142,6 +146,9 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
"Error during ACPI methods call\n");
}
+ radeon_kfd_device_probe(rdev);
+ radeon_kfd_device_init(rdev);
+
if (radeon_is_px(dev)) {
pm_runtime_use_autosuspend(dev->dev);
pm_runtime_set_autosuspend_delay(dev->dev, 5000);
@@ -621,8 +628,6 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
RADEON_VA_IB_OFFSET,
RADEON_VM_PAGE_READABLE |
RADEON_VM_PAGE_SNOOPED);
-
- radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
if (r) {
radeon_vm_fini(rdev, vm);
kfree(fpriv);
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index cafb1ccf2ec..678b4386540 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -1054,6 +1054,7 @@ static int radeon_crtc_mode_set(struct drm_crtc *crtc,
DRM_ERROR("Mode need scaling but only first crtc can do that.\n");
}
}
+ radeon_cursor_reset(crtc);
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 04db2fdd869..390db897f32 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -321,6 +321,10 @@ struct radeon_crtc {
uint32_t crtc_offset;
struct drm_gem_object *cursor_bo;
uint64_t cursor_addr;
+ int cursor_x;
+ int cursor_y;
+ int cursor_hot_x;
+ int cursor_hot_y;
int cursor_width;
int cursor_height;
int max_cursor_width;
@@ -462,6 +466,7 @@ struct radeon_gpio_rec {
u8 id;
u32 reg;
u32 mask;
+ u32 shift;
};
struct radeon_hpd {
@@ -748,6 +753,8 @@ extern bool radeon_atombios_get_ppll_ss_info(struct radeon_device *rdev,
extern bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
struct radeon_atom_ss *ss,
int id, u32 clock);
+extern struct radeon_gpio_rec radeon_atombios_lookup_gpio(struct radeon_device *rdev,
+ u8 id);
extern void radeon_compute_pll_legacy(struct radeon_pll *pll,
uint64_t freq,
@@ -802,13 +809,16 @@ extern int radeon_crtc_set_base_atomic(struct drm_crtc *crtc,
extern int radeon_crtc_do_set_base(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int x, int y, int atomic);
-extern int radeon_crtc_cursor_set(struct drm_crtc *crtc,
- struct drm_file *file_priv,
- uint32_t handle,
- uint32_t width,
- uint32_t height);
+extern int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
+ struct drm_file *file_priv,
+ uint32_t handle,
+ uint32_t width,
+ uint32_t height,
+ int32_t hot_x,
+ int32_t hot_y);
extern int radeon_crtc_cursor_move(struct drm_crtc *crtc,
int x, int y);
+extern void radeon_cursor_reset(struct drm_crtc *crtc);
extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
unsigned int flags,
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 4c0d786d5c7..7d68223eb46 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -99,22 +99,39 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
rbo->placement.placement = rbo->placements;
rbo->placement.busy_placement = rbo->placements;
- if (domain & RADEON_GEM_DOMAIN_VRAM)
+ if (domain & RADEON_GEM_DOMAIN_VRAM) {
+ /* Try placing BOs which don't need CPU access outside of the
+ * CPU accessible part of VRAM
+ */
+ if ((rbo->flags & RADEON_GEM_NO_CPU_ACCESS) &&
+ rbo->rdev->mc.visible_vram_size < rbo->rdev->mc.real_vram_size) {
+ rbo->placements[c].fpfn =
+ rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
+ rbo->placements[c++].flags = TTM_PL_FLAG_WC |
+ TTM_PL_FLAG_UNCACHED |
+ TTM_PL_FLAG_VRAM;
+ }
+
+ rbo->placements[c].fpfn = 0;
rbo->placements[c++].flags = TTM_PL_FLAG_WC |
TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_VRAM;
+ }
if (domain & RADEON_GEM_DOMAIN_GTT) {
if (rbo->flags & RADEON_GEM_GTT_UC) {
+ rbo->placements[c].fpfn = 0;
rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_TT;
} else if ((rbo->flags & RADEON_GEM_GTT_WC) ||
(rbo->rdev->flags & RADEON_IS_AGP)) {
+ rbo->placements[c].fpfn = 0;
rbo->placements[c++].flags = TTM_PL_FLAG_WC |
TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_TT;
} else {
+ rbo->placements[c].fpfn = 0;
rbo->placements[c++].flags = TTM_PL_FLAG_CACHED |
TTM_PL_FLAG_TT;
}
@@ -122,30 +139,35 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
if (domain & RADEON_GEM_DOMAIN_CPU) {
if (rbo->flags & RADEON_GEM_GTT_UC) {
+ rbo->placements[c].fpfn = 0;
rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_SYSTEM;
} else if ((rbo->flags & RADEON_GEM_GTT_WC) ||
rbo->rdev->flags & RADEON_IS_AGP) {
+ rbo->placements[c].fpfn = 0;
rbo->placements[c++].flags = TTM_PL_FLAG_WC |
TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_SYSTEM;
} else {
+ rbo->placements[c].fpfn = 0;
rbo->placements[c++].flags = TTM_PL_FLAG_CACHED |
TTM_PL_FLAG_SYSTEM;
}
}
- if (!c)
+ if (!c) {
+ rbo->placements[c].fpfn = 0;
rbo->placements[c++].flags = TTM_PL_MASK_CACHING |
TTM_PL_FLAG_SYSTEM;
+ }
rbo->placement.num_placement = c;
rbo->placement.num_busy_placement = c;
for (i = 0; i < c; ++i) {
- rbo->placements[i].fpfn = 0;
if ((rbo->flags & RADEON_GEM_CPU_ACCESS) &&
- (rbo->placements[i].flags & TTM_PL_FLAG_VRAM))
+ (rbo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
+ !rbo->placements[i].fpfn)
rbo->placements[i].lpfn =
rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
else
@@ -157,9 +179,7 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
* improve fragmentation quality.
* 512kb was measured as the most optimal number.
*/
- if (!((rbo->flags & RADEON_GEM_CPU_ACCESS) &&
- (rbo->placements[i].flags & TTM_PL_FLAG_VRAM)) &&
- rbo->tbo.mem.size > 512 * 1024) {
+ if (rbo->tbo.mem.size > 512 * 1024) {
for (i = 0; i < c; i++) {
rbo->placements[i].flags |= TTM_PL_FLAG_TOPDOWN;
}
@@ -489,25 +509,29 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
struct ww_acquire_ctx *ticket,
struct list_head *head, int ring)
{
- struct radeon_cs_reloc *lobj;
- struct radeon_bo *bo;
+ struct radeon_bo_list *lobj;
+ struct list_head duplicates;
int r;
u64 bytes_moved = 0, initial_bytes_moved;
u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev);
- r = ttm_eu_reserve_buffers(ticket, head, true);
+ INIT_LIST_HEAD(&duplicates);
+ r = ttm_eu_reserve_buffers(ticket, head, true, &duplicates);
if (unlikely(r != 0)) {
return r;
}
list_for_each_entry(lobj, head, tv.head) {
- bo = lobj->robj;
+ struct radeon_bo *bo = lobj->robj;
if (!bo->pin_count) {
u32 domain = lobj->prefered_domains;
u32 allowed = lobj->allowed_domains;
u32 current_domain =
radeon_mem_type_to_domain(bo->tbo.mem.mem_type);
+ WARN_ONCE(bo->gem_base.dumb,
+ "GPU use of dumb buffer is illegal.\n");
+
/* Check if this buffer will be moved and don't move it
* if we have moved too many buffers for this IB already.
*
@@ -546,6 +570,12 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
lobj->gpu_offset = radeon_bo_gpu_offset(bo);
lobj->tiling_flags = bo->tiling_flags;
}
+
+ list_for_each_entry(lobj, &duplicates, tv.head) {
+ lobj->gpu_offset = radeon_bo_gpu_offset(lobj->robj);
+ lobj->tiling_flags = lobj->robj->tiling_flags;
+ }
+
return 0;
}
@@ -750,8 +780,8 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
{
struct radeon_device *rdev;
struct radeon_bo *rbo;
- unsigned long offset, size;
- int r;
+ unsigned long offset, size, lpfn;
+ int i, r;
if (!radeon_ttm_bo_is_radeon_bo(bo))
return 0;
@@ -768,7 +798,13 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
/* hurrah the memory is not visible ! */
radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
- rbo->placements[0].lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
+ lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
+ for (i = 0; i < rbo->placement.num_placement; i++) {
+ /* Force into visible VRAM */
+ if ((rbo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
+ (!rbo->placements[i].lpfn || rbo->placements[i].lpfn > lpfn))
+ rbo->placements[i].lpfn = lpfn;
+ }
r = ttm_bo_validate(bo, &rbo->placement, false, false);
if (unlikely(r == -ENOMEM)) {
radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
@@ -799,3 +835,22 @@ int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait)
ttm_bo_unreserve(&bo->tbo);
return r;
}
+
+/**
+ * radeon_bo_fence - add fence to buffer object
+ *
+ * @bo: buffer object in question
+ * @fence: fence to add
+ * @shared: true if fence should be added shared
+ *
+ */
+void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence,
+ bool shared)
+{
+ struct reservation_object *resv = bo->tbo.resv;
+
+ if (shared)
+ reservation_object_add_shared_fence(resv, &fence->base);
+ else
+ reservation_object_add_excl_fence(resv, &fence->base);
+}
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 1b8ec791715..3b0b377f76c 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -155,6 +155,8 @@ extern void radeon_bo_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *new_mem);
extern int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
extern int radeon_bo_get_surface_reg(struct radeon_bo *bo);
+extern void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence,
+ bool shared);
/*
* sub allocation
diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c
index 6deb08f045b..e6ad54cdfa6 100644
--- a/drivers/gpu/drm/radeon/radeon_semaphore.c
+++ b/drivers/gpu/drm/radeon/radeon_semaphore.c
@@ -34,15 +34,14 @@
int radeon_semaphore_create(struct radeon_device *rdev,
struct radeon_semaphore **semaphore)
{
- uint64_t *cpu_addr;
- int i, r;
+ int r;
*semaphore = kmalloc(sizeof(struct radeon_semaphore), GFP_KERNEL);
if (*semaphore == NULL) {
return -ENOMEM;
}
- r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &(*semaphore)->sa_bo,
- 8 * RADEON_NUM_SYNCS, 8);
+ r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo,
+ &(*semaphore)->sa_bo, 8, 8);
if (r) {
kfree(*semaphore);
*semaphore = NULL;
@@ -51,12 +50,7 @@ int radeon_semaphore_create(struct radeon_device *rdev,
(*semaphore)->waiters = 0;
(*semaphore)->gpu_addr = radeon_sa_bo_gpu_addr((*semaphore)->sa_bo);
- cpu_addr = radeon_sa_bo_cpu_addr((*semaphore)->sa_bo);
- for (i = 0; i < RADEON_NUM_SYNCS; ++i)
- cpu_addr[i] = 0;
-
- for (i = 0; i < RADEON_NUM_RINGS; ++i)
- (*semaphore)->sync_to[i] = NULL;
+ *((uint64_t *)radeon_sa_bo_cpu_addr((*semaphore)->sa_bo)) = 0;
return 0;
}
@@ -95,146 +89,6 @@ bool radeon_semaphore_emit_wait(struct radeon_device *rdev, int ridx,
return false;
}
-/**
- * radeon_semaphore_sync_fence - use the semaphore to sync to a fence
- *
- * @semaphore: semaphore object to add fence to
- * @fence: fence to sync to
- *
- * Sync to the fence using this semaphore object
- */
-void radeon_semaphore_sync_fence(struct radeon_semaphore *semaphore,
- struct radeon_fence *fence)
-{
- struct radeon_fence *other;
-
- if (!fence)
- return;
-
- other = semaphore->sync_to[fence->ring];
- semaphore->sync_to[fence->ring] = radeon_fence_later(fence, other);
-}
-
-/**
- * radeon_semaphore_sync_to - use the semaphore to sync to a reservation object
- *
- * @sema: semaphore object to add fence from reservation object to
- * @resv: reservation object with embedded fence
- * @shared: true if we should onyl sync to the exclusive fence
- *
- * Sync to the fence using this semaphore object
- */
-int radeon_semaphore_sync_resv(struct radeon_device *rdev,
- struct radeon_semaphore *sema,
- struct reservation_object *resv,
- bool shared)
-{
- struct reservation_object_list *flist;
- struct fence *f;
- struct radeon_fence *fence;
- unsigned i;
- int r = 0;
-
- /* always sync to the exclusive fence */
- f = reservation_object_get_excl(resv);
- fence = f ? to_radeon_fence(f) : NULL;
- if (fence && fence->rdev == rdev)
- radeon_semaphore_sync_fence(sema, fence);
- else if (f)
- r = fence_wait(f, true);
-
- flist = reservation_object_get_list(resv);
- if (shared || !flist || r)
- return r;
-
- for (i = 0; i < flist->shared_count; ++i) {
- f = rcu_dereference_protected(flist->shared[i],
- reservation_object_held(resv));
- fence = to_radeon_fence(f);
- if (fence && fence->rdev == rdev)
- radeon_semaphore_sync_fence(sema, fence);
- else
- r = fence_wait(f, true);
-
- if (r)
- break;
- }
- return r;
-}
-
-/**
- * radeon_semaphore_sync_rings - sync ring to all registered fences
- *
- * @rdev: radeon_device pointer
- * @semaphore: semaphore object to use for sync
- * @ring: ring that needs sync
- *
- * Ensure that all registered fences are signaled before letting
- * the ring continue. The caller must hold the ring lock.
- */
-int radeon_semaphore_sync_rings(struct radeon_device *rdev,
- struct radeon_semaphore *semaphore,
- int ring)
-{
- unsigned count = 0;
- int i, r;
-
- for (i = 0; i < RADEON_NUM_RINGS; ++i) {
- struct radeon_fence *fence = semaphore->sync_to[i];
-
- /* check if we really need to sync */
- if (!radeon_fence_need_sync(fence, ring))
- continue;
-
- /* prevent GPU deadlocks */
- if (!rdev->ring[i].ready) {
- dev_err(rdev->dev, "Syncing to a disabled ring!");
- return -EINVAL;
- }
-
- if (++count > RADEON_NUM_SYNCS) {
- /* not enough room, wait manually */
- r = radeon_fence_wait(fence, false);
- if (r)
- return r;
- continue;
- }
-
- /* allocate enough space for sync command */
- r = radeon_ring_alloc(rdev, &rdev->ring[i], 16);
- if (r) {
- return r;
- }
-
- /* emit the signal semaphore */
- if (!radeon_semaphore_emit_signal(rdev, i, semaphore)) {
- /* signaling wasn't successful wait manually */
- radeon_ring_undo(&rdev->ring[i]);
- r = radeon_fence_wait(fence, false);
- if (r)
- return r;
- continue;
- }
-
- /* we assume caller has already allocated space on waiters ring */
- if (!radeon_semaphore_emit_wait(rdev, ring, semaphore)) {
- /* waiting wasn't successful wait manually */
- radeon_ring_undo(&rdev->ring[i]);
- r = radeon_fence_wait(fence, false);
- if (r)
- return r;
- continue;
- }
-
- radeon_ring_commit(rdev, &rdev->ring[i], false);
- radeon_fence_note_sync(fence, ring);
-
- semaphore->gpu_addr += 8;
- }
-
- return 0;
-}
-
void radeon_semaphore_free(struct radeon_device *rdev,
struct radeon_semaphore **semaphore,
struct radeon_fence *fence)
diff --git a/drivers/gpu/drm/radeon/radeon_sync.c b/drivers/gpu/drm/radeon/radeon_sync.c
new file mode 100644
index 00000000000..02ac8a1de4f
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_sync.c
@@ -0,0 +1,220 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors:
+ * Christian König <christian.koenig@amd.com>
+ */
+
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_trace.h"
+
+/**
+ * radeon_sync_create - zero init sync object
+ *
+ * @sync: sync object to initialize
+ *
+ * Just clear the sync object for now.
+ */
+void radeon_sync_create(struct radeon_sync *sync)
+{
+ unsigned i;
+
+ for (i = 0; i < RADEON_NUM_SYNCS; ++i)
+ sync->semaphores[i] = NULL;
+
+ for (i = 0; i < RADEON_NUM_RINGS; ++i)
+ sync->sync_to[i] = NULL;
+
+ sync->last_vm_update = NULL;
+}
+
+/**
+ * radeon_sync_fence - use the semaphore to sync to a fence
+ *
+ * @sync: sync object to add fence to
+ * @fence: fence to sync to
+ *
+ * Sync to the fence using the semaphore objects
+ */
+void radeon_sync_fence(struct radeon_sync *sync,
+ struct radeon_fence *fence)
+{
+ struct radeon_fence *other;
+
+ if (!fence)
+ return;
+
+ other = sync->sync_to[fence->ring];
+ sync->sync_to[fence->ring] = radeon_fence_later(fence, other);
+
+ if (fence->is_vm_update) {
+ other = sync->last_vm_update;
+ sync->last_vm_update = radeon_fence_later(fence, other);
+ }
+}
+
+/**
+ * radeon_sync_resv - use the semaphores to sync to a reservation object
+ *
+ * @sync: sync object to add fences from reservation object to
+ * @resv: reservation object with embedded fence
+ * @shared: true if we should only sync to the exclusive fence
+ *
+ * Sync to the fence using the semaphore objects
+ */
+int radeon_sync_resv(struct radeon_device *rdev,
+ struct radeon_sync *sync,
+ struct reservation_object *resv,
+ bool shared)
+{
+ struct reservation_object_list *flist;
+ struct fence *f;
+ struct radeon_fence *fence;
+ unsigned i;
+ int r = 0;
+
+ /* always sync to the exclusive fence */
+ f = reservation_object_get_excl(resv);
+ fence = f ? to_radeon_fence(f) : NULL;
+ if (fence && fence->rdev == rdev)
+ radeon_sync_fence(sync, fence);
+ else if (f)
+ r = fence_wait(f, true);
+
+ flist = reservation_object_get_list(resv);
+ if (shared || !flist || r)
+ return r;
+
+ for (i = 0; i < flist->shared_count; ++i) {
+ f = rcu_dereference_protected(flist->shared[i],
+ reservation_object_held(resv));
+ fence = to_radeon_fence(f);
+ if (fence && fence->rdev == rdev)
+ radeon_sync_fence(sync, fence);
+ else
+ r = fence_wait(f, true);
+
+ if (r)
+ break;
+ }
+ return r;
+}
+
+/**
+ * radeon_sync_rings - sync ring to all registered fences
+ *
+ * @rdev: radeon_device pointer
+ * @sync: sync object to use
+ * @ring: ring that needs sync
+ *
+ * Ensure that all registered fences are signaled before letting
+ * the ring continue. The caller must hold the ring lock.
+ */
+int radeon_sync_rings(struct radeon_device *rdev,
+ struct radeon_sync *sync,
+ int ring)
+{
+ unsigned count = 0;
+ int i, r;
+
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ struct radeon_fence *fence = sync->sync_to[i];
+ struct radeon_semaphore *semaphore;
+
+ /* check if we really need to sync */
+ if (!radeon_fence_need_sync(fence, ring))
+ continue;
+
+ /* prevent GPU deadlocks */
+ if (!rdev->ring[i].ready) {
+ dev_err(rdev->dev, "Syncing to a disabled ring!");
+ return -EINVAL;
+ }
+
+ if (count >= RADEON_NUM_SYNCS) {
+ /* not enough room, wait manually */
+ r = radeon_fence_wait(fence, false);
+ if (r)
+ return r;
+ continue;
+ }
+ r = radeon_semaphore_create(rdev, &semaphore);
+ if (r)
+ return r;
+
+ sync->semaphores[count++] = semaphore;
+
+ /* allocate enough space for sync command */
+ r = radeon_ring_alloc(rdev, &rdev->ring[i], 16);
+ if (r)
+ return r;
+
+ /* emit the signal semaphore */
+ if (!radeon_semaphore_emit_signal(rdev, i, semaphore)) {
+ /* signaling wasn't successful wait manually */
+ radeon_ring_undo(&rdev->ring[i]);
+ r = radeon_fence_wait(fence, false);
+ if (r)
+ return r;
+ continue;
+ }
+
+ /* we assume caller has already allocated space on waiters ring */
+ if (!radeon_semaphore_emit_wait(rdev, ring, semaphore)) {
+ /* waiting wasn't successful wait manually */
+ radeon_ring_undo(&rdev->ring[i]);
+ r = radeon_fence_wait(fence, false);
+ if (r)
+ return r;
+ continue;
+ }
+
+ radeon_ring_commit(rdev, &rdev->ring[i], false);
+ radeon_fence_note_sync(fence, ring);
+ }
+
+ return 0;
+}
+
+/**
+ * radeon_sync_free - free the sync object
+ *
+ * @rdev: radeon_device pointer
+ * @sync: sync object to use
+ * @fence: fence to use for the free
+ *
+ * Free the sync object by freeing all semaphores in it.
+ */
+void radeon_sync_free(struct radeon_device *rdev,
+ struct radeon_sync *sync,
+ struct radeon_fence *fence)
+{
+ unsigned i;
+
+ for (i = 0; i < RADEON_NUM_SYNCS; ++i)
+ radeon_semaphore_free(rdev, &sync->semaphores[i], fence);
+}
diff --git a/drivers/gpu/drm/radeon/radeon_trace.h b/drivers/gpu/drm/radeon/radeon_trace.h
index 9db74a96ef6..ce075cb08cb 100644
--- a/drivers/gpu/drm/radeon/radeon_trace.h
+++ b/drivers/gpu/drm/radeon/radeon_trace.h
@@ -38,7 +38,7 @@ TRACE_EVENT(radeon_cs,
TP_fast_assign(
__entry->ring = p->ring;
- __entry->dw = p->chunks[p->chunk_ib_idx].length_dw;
+ __entry->dw = p->chunk_ib->length_dw;
__entry->fences = radeon_fence_count_emitted(
p->rdev, p->ring);
),
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 8624979afb6..d02aa1d0f58 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -196,9 +196,32 @@ static void radeon_evict_flags(struct ttm_buffer_object *bo,
rbo = container_of(bo, struct radeon_bo, tbo);
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
- if (rbo->rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready == false)
+ if (rbo->rdev->ring[radeon_copy_ring_index(rbo->rdev)].ready == false)
radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
- else
+ else if (rbo->rdev->mc.visible_vram_size < rbo->rdev->mc.real_vram_size &&
+ bo->mem.start < (rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT)) {
+ unsigned fpfn = rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
+ int i;
+
+ /* Try evicting to the CPU inaccessible part of VRAM
+ * first, but only set GTT as busy placement, so this
+ * BO will be evicted to GTT rather than causing other
+ * BOs to be evicted from VRAM
+ */
+ radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM |
+ RADEON_GEM_DOMAIN_GTT);
+ rbo->placement.num_busy_placement = 0;
+ for (i = 0; i < rbo->placement.num_placement; i++) {
+ if (rbo->placements[i].flags & TTM_PL_FLAG_VRAM) {
+ if (rbo->placements[0].fpfn < fpfn)
+ rbo->placements[0].fpfn = fpfn;
+ } else {
+ rbo->placement.busy_placement =
+ &rbo->placements[i];
+ rbo->placement.num_busy_placement = 1;
+ }
+ }
+ } else
radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
break;
case TTM_PL_TT:
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 11b66246925..c10b2aec645 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -488,12 +488,12 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
unsigned buf_sizes[], bool *has_msg_cmd)
{
struct radeon_cs_chunk *relocs_chunk;
- struct radeon_cs_reloc *reloc;
+ struct radeon_bo_list *reloc;
unsigned idx, cmd, offset;
uint64_t start, end;
int r;
- relocs_chunk = &p->chunks[p->chunk_relocs_idx];
+ relocs_chunk = p->chunk_relocs;
offset = radeon_get_ib_value(p, data0);
idx = radeon_get_ib_value(p, data1);
if (idx >= relocs_chunk->length_dw) {
@@ -502,7 +502,7 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
return -EINVAL;
}
- reloc = p->relocs_ptr[(idx / 4)];
+ reloc = &p->relocs[(idx / 4)];
start = reloc->gpu_offset;
end = start + radeon_bo_size(reloc->robj);
start += offset;
@@ -610,13 +610,13 @@ int radeon_uvd_cs_parse(struct radeon_cs_parser *p)
[0x00000003] = 2048,
};
- if (p->chunks[p->chunk_ib_idx].length_dw % 16) {
+ if (p->chunk_ib->length_dw % 16) {
DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n",
- p->chunks[p->chunk_ib_idx].length_dw);
+ p->chunk_ib->length_dw);
return -EINVAL;
}
- if (p->chunk_relocs_idx == -1) {
+ if (p->chunk_relocs == NULL) {
DRM_ERROR("No relocation chunk !\n");
return -EINVAL;
}
@@ -640,7 +640,7 @@ int radeon_uvd_cs_parse(struct radeon_cs_parser *p)
DRM_ERROR("Unknown packet type %d !\n", pkt.type);
return -EINVAL;
}
- } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
+ } while (p->idx < p->chunk_ib->length_dw);
if (!has_msg_cmd) {
DRM_ERROR("UVD-IBs need a msg command!\n");
diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c
index 9e85757d559..976fe432f4e 100644
--- a/drivers/gpu/drm/radeon/radeon_vce.c
+++ b/drivers/gpu/drm/radeon/radeon_vce.c
@@ -453,11 +453,11 @@ int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi,
unsigned size)
{
struct radeon_cs_chunk *relocs_chunk;
- struct radeon_cs_reloc *reloc;
+ struct radeon_bo_list *reloc;
uint64_t start, end, offset;
unsigned idx;
- relocs_chunk = &p->chunks[p->chunk_relocs_idx];
+ relocs_chunk = p->chunk_relocs;
offset = radeon_get_ib_value(p, lo);
idx = radeon_get_ib_value(p, hi);
@@ -467,7 +467,7 @@ int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi,
return -EINVAL;
}
- reloc = p->relocs_ptr[(idx / 4)];
+ reloc = &p->relocs[(idx / 4)];
start = reloc->gpu_offset;
end = start + radeon_bo_size(reloc->robj);
start += offset;
@@ -534,7 +534,7 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
uint32_t *size = &tmp;
int i, r;
- while (p->idx < p->chunks[p->chunk_ib_idx].length_dw) {
+ while (p->idx < p->chunk_ib->length_dw) {
uint32_t len = radeon_get_ib_value(p, p->idx);
uint32_t cmd = radeon_get_ib_value(p, p->idx + 1);
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index dfde266529e..cde48c42b30 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -125,41 +125,37 @@ void radeon_vm_manager_fini(struct radeon_device *rdev)
* Add the page directory to the list of BOs to
* validate for command submission (cayman+).
*/
-struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev,
+struct radeon_bo_list *radeon_vm_get_bos(struct radeon_device *rdev,
struct radeon_vm *vm,
struct list_head *head)
{
- struct radeon_cs_reloc *list;
+ struct radeon_bo_list *list;
unsigned i, idx;
list = drm_malloc_ab(vm->max_pde_used + 2,
- sizeof(struct radeon_cs_reloc));
+ sizeof(struct radeon_bo_list));
if (!list)
return NULL;
/* add the vm page table to the list */
- list[0].gobj = NULL;
list[0].robj = vm->page_directory;
list[0].prefered_domains = RADEON_GEM_DOMAIN_VRAM;
list[0].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
list[0].tv.bo = &vm->page_directory->tbo;
- list[0].tv.shared = false;
+ list[0].tv.shared = true;
list[0].tiling_flags = 0;
- list[0].handle = 0;
list_add(&list[0].tv.head, head);
for (i = 0, idx = 1; i <= vm->max_pde_used; i++) {
if (!vm->page_tables[i].bo)
continue;
- list[idx].gobj = NULL;
list[idx].robj = vm->page_tables[i].bo;
list[idx].prefered_domains = RADEON_GEM_DOMAIN_VRAM;
list[idx].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
list[idx].tv.bo = &list[idx].robj->tbo;
- list[idx].tv.shared = false;
+ list[idx].tv.shared = true;
list[idx].tiling_flags = 0;
- list[idx].handle = 0;
list_add(&list[idx++].tv.head, head);
}
@@ -182,15 +178,18 @@ struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
struct radeon_vm *vm, int ring)
{
struct radeon_fence *best[RADEON_NUM_RINGS] = {};
+ struct radeon_vm_id *vm_id = &vm->ids[ring];
+
unsigned choices[2] = {};
unsigned i;
/* check if the id is still valid */
- if (vm->last_id_use && vm->last_id_use == rdev->vm_manager.active[vm->id])
+ if (vm_id->id && vm_id->last_id_use &&
+ vm_id->last_id_use == rdev->vm_manager.active[vm_id->id])
return NULL;
/* we definately need to flush */
- radeon_fence_unref(&vm->last_flush);
+ vm_id->pd_gpu_addr = ~0ll;
/* skip over VMID 0, since it is the system VM */
for (i = 1; i < rdev->vm_manager.nvm; ++i) {
@@ -198,8 +197,8 @@ struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
if (fence == NULL) {
/* found a free one */
- vm->id = i;
- trace_radeon_vm_grab_id(vm->id, ring);
+ vm_id->id = i;
+ trace_radeon_vm_grab_id(i, ring);
return NULL;
}
@@ -211,8 +210,8 @@ struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
for (i = 0; i < 2; ++i) {
if (choices[i]) {
- vm->id = choices[i];
- trace_radeon_vm_grab_id(vm->id, ring);
+ vm_id->id = choices[i];
+ trace_radeon_vm_grab_id(choices[i], ring);
return rdev->vm_manager.active[choices[i]];
}
}
@@ -228,6 +227,7 @@ struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
* @rdev: radeon_device pointer
* @vm: vm we want to flush
* @ring: ring to use for flush
+ * @updates: last vm update that is waited for
*
* Flush the vm (cayman+).
*
@@ -235,15 +235,21 @@ struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
*/
void radeon_vm_flush(struct radeon_device *rdev,
struct radeon_vm *vm,
- int ring)
+ int ring, struct radeon_fence *updates)
{
uint64_t pd_addr = radeon_bo_gpu_offset(vm->page_directory);
+ struct radeon_vm_id *vm_id = &vm->ids[ring];
+
+ if (pd_addr != vm_id->pd_gpu_addr || !vm_id->flushed_updates ||
+ radeon_fence_is_earlier(vm_id->flushed_updates, updates)) {
+
+ trace_radeon_vm_flush(pd_addr, ring, vm->ids[ring].id);
+ radeon_fence_unref(&vm_id->flushed_updates);
+ vm_id->flushed_updates = radeon_fence_ref(updates);
+ vm_id->pd_gpu_addr = pd_addr;
+ radeon_ring_vm_flush(rdev, &rdev->ring[ring],
+ vm_id->id, vm_id->pd_gpu_addr);
- /* if we can't remember our last VM flush then flush now! */
- if (!vm->last_flush || pd_addr != vm->pd_gpu_addr) {
- trace_radeon_vm_flush(pd_addr, ring, vm->id);
- vm->pd_gpu_addr = pd_addr;
- radeon_ring_vm_flush(rdev, ring, vm);
}
}
@@ -263,18 +269,13 @@ void radeon_vm_fence(struct radeon_device *rdev,
struct radeon_vm *vm,
struct radeon_fence *fence)
{
- radeon_fence_unref(&vm->fence);
- vm->fence = radeon_fence_ref(fence);
-
- radeon_fence_unref(&rdev->vm_manager.active[vm->id]);
- rdev->vm_manager.active[vm->id] = radeon_fence_ref(fence);
+ unsigned vm_id = vm->ids[fence->ring].id;
- radeon_fence_unref(&vm->last_id_use);
- vm->last_id_use = radeon_fence_ref(fence);
+ radeon_fence_unref(&rdev->vm_manager.active[vm_id]);
+ rdev->vm_manager.active[vm_id] = radeon_fence_ref(fence);
- /* we just flushed the VM, remember that */
- if (!vm->last_flush)
- vm->last_flush = radeon_fence_ref(fence);
+ radeon_fence_unref(&vm->ids[fence->ring].last_id_use);
+ vm->ids[fence->ring].last_id_use = radeon_fence_ref(fence);
}
/**
@@ -387,35 +388,25 @@ static void radeon_vm_set_pages(struct radeon_device *rdev,
static int radeon_vm_clear_bo(struct radeon_device *rdev,
struct radeon_bo *bo)
{
- struct ttm_validate_buffer tv;
- struct ww_acquire_ctx ticket;
- struct list_head head;
struct radeon_ib ib;
unsigned entries;
uint64_t addr;
int r;
- memset(&tv, 0, sizeof(tv));
- tv.bo = &bo->tbo;
- tv.shared = false;
-
- INIT_LIST_HEAD(&head);
- list_add(&tv.head, &head);
-
- r = ttm_eu_reserve_buffers(&ticket, &head, true);
- if (r)
+ r = radeon_bo_reserve(bo, false);
+ if (r)
return r;
- r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
- if (r)
- goto error;
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
+ if (r)
+ goto error_unreserve;
addr = radeon_bo_gpu_offset(bo);
entries = radeon_bo_size(bo) / 8;
r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, 256);
if (r)
- goto error;
+ goto error_unreserve;
ib.length_dw = 0;
@@ -425,15 +416,16 @@ static int radeon_vm_clear_bo(struct radeon_device *rdev,
r = radeon_ib_schedule(rdev, &ib, NULL, false);
if (r)
- goto error;
+ goto error_free;
- ttm_eu_fence_buffer_objects(&ticket, &head, &ib.fence->base);
- radeon_ib_free(rdev, &ib);
+ ib.fence->is_vm_update = true;
+ radeon_bo_fence(bo, ib.fence, false);
- return 0;
+error_free:
+ radeon_ib_free(rdev, &ib);
-error:
- ttm_eu_backoff_reservation(&ticket, &head);
+error_unreserve:
+ radeon_bo_unreserve(bo);
return r;
}
@@ -449,7 +441,7 @@ error:
* Validate and set the offset requested within the vm address space.
* Returns 0 for success, error for failure.
*
- * Object has to be reserved!
+ * Object has to be reserved and gets unreserved by this function!
*/
int radeon_vm_bo_set_addr(struct radeon_device *rdev,
struct radeon_bo_va *bo_va,
@@ -495,7 +487,9 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
tmp->vm = vm;
tmp->addr = bo_va->addr;
tmp->bo = radeon_bo_ref(bo_va->bo);
+ spin_lock(&vm->status_lock);
list_add(&tmp->vm_status, &vm->freed);
+ spin_unlock(&vm->status_lock);
}
interval_tree_remove(&bo_va->it, &vm->va);
@@ -575,7 +569,7 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
}
mutex_unlock(&vm->mutex);
- return radeon_bo_reserve(bo_va->bo, false);
+ return 0;
}
/**
@@ -699,17 +693,15 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev,
if (ib.length_dw != 0) {
radeon_asic_vm_pad_ib(rdev, &ib);
- radeon_semaphore_sync_resv(rdev, ib.semaphore, pd->tbo.resv, false);
- radeon_semaphore_sync_fence(ib.semaphore, vm->last_id_use);
+ radeon_sync_resv(rdev, &ib.sync, pd->tbo.resv, true);
WARN_ON(ib.length_dw > ndw);
r = radeon_ib_schedule(rdev, &ib, NULL, false);
if (r) {
radeon_ib_free(rdev, &ib);
return r;
}
- radeon_fence_unref(&vm->fence);
- vm->fence = radeon_fence_ref(ib.fence);
- radeon_fence_unref(&vm->last_flush);
+ ib.fence->is_vm_update = true;
+ radeon_bo_fence(pd, ib.fence, false);
}
radeon_ib_free(rdev, &ib);
@@ -808,11 +800,11 @@ static void radeon_vm_frag_ptes(struct radeon_device *rdev,
*
* Global and local mutex must be locked!
*/
-static void radeon_vm_update_ptes(struct radeon_device *rdev,
- struct radeon_vm *vm,
- struct radeon_ib *ib,
- uint64_t start, uint64_t end,
- uint64_t dst, uint32_t flags)
+static int radeon_vm_update_ptes(struct radeon_device *rdev,
+ struct radeon_vm *vm,
+ struct radeon_ib *ib,
+ uint64_t start, uint64_t end,
+ uint64_t dst, uint32_t flags)
{
uint64_t mask = RADEON_VM_PTE_COUNT - 1;
uint64_t last_pte = ~0, last_dst = ~0;
@@ -825,8 +817,12 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
struct radeon_bo *pt = vm->page_tables[pt_idx].bo;
unsigned nptes;
uint64_t pte;
+ int r;
- radeon_semaphore_sync_resv(rdev, ib->semaphore, pt->tbo.resv, false);
+ radeon_sync_resv(rdev, &ib->sync, pt->tbo.resv, true);
+ r = reservation_object_reserve_shared(pt->tbo.resv);
+ if (r)
+ return r;
if ((addr & ~mask) == (end & ~mask))
nptes = end - addr;
@@ -860,6 +856,33 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
last_pte + 8 * count,
last_dst, flags);
}
+
+ return 0;
+}
+
+/**
+ * radeon_vm_fence_pts - fence page tables after an update
+ *
+ * @vm: requested vm
+ * @start: start of GPU address range
+ * @end: end of GPU address range
+ * @fence: fence to use
+ *
+ * Fence the page tables in the range @start - @end (cayman+).
+ *
+ * Global and local mutex must be locked!
+ */
+static void radeon_vm_fence_pts(struct radeon_vm *vm,
+ uint64_t start, uint64_t end,
+ struct radeon_fence *fence)
+{
+ unsigned i;
+
+ start >>= radeon_vm_block_size;
+ end >>= radeon_vm_block_size;
+
+ for (i = start; i <= end; ++i)
+ radeon_bo_fence(vm->page_tables[i].bo, fence, true);
}
/**
@@ -892,7 +915,9 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
return -EINVAL;
}
+ spin_lock(&vm->status_lock);
list_del_init(&bo_va->vm_status);
+ spin_unlock(&vm->status_lock);
bo_va->flags &= ~RADEON_VM_PAGE_VALID;
bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM;
@@ -961,23 +986,34 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
return r;
ib.length_dw = 0;
- radeon_vm_update_ptes(rdev, vm, &ib, bo_va->it.start,
- bo_va->it.last + 1, addr,
- radeon_vm_page_flags(bo_va->flags));
+ if (!(bo_va->flags & RADEON_VM_PAGE_VALID)) {
+ unsigned i;
+
+ for (i = 0; i < RADEON_NUM_RINGS; ++i)
+ radeon_sync_fence(&ib.sync, vm->ids[i].last_id_use);
+ }
+
+ r = radeon_vm_update_ptes(rdev, vm, &ib, bo_va->it.start,
+ bo_va->it.last + 1, addr,
+ radeon_vm_page_flags(bo_va->flags));
+ if (r) {
+ radeon_ib_free(rdev, &ib);
+ return r;
+ }
radeon_asic_vm_pad_ib(rdev, &ib);
WARN_ON(ib.length_dw > ndw);
- radeon_semaphore_sync_fence(ib.semaphore, vm->fence);
r = radeon_ib_schedule(rdev, &ib, NULL, false);
if (r) {
radeon_ib_free(rdev, &ib);
return r;
}
- radeon_fence_unref(&vm->fence);
- vm->fence = radeon_fence_ref(ib.fence);
+ ib.fence->is_vm_update = true;
+ radeon_vm_fence_pts(vm, bo_va->it.start, bo_va->it.last + 1, ib.fence);
+ radeon_fence_unref(&bo_va->last_pt_update);
+ bo_va->last_pt_update = radeon_fence_ref(ib.fence);
radeon_ib_free(rdev, &ib);
- radeon_fence_unref(&vm->last_flush);
return 0;
}
@@ -996,16 +1032,25 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
int radeon_vm_clear_freed(struct radeon_device *rdev,
struct radeon_vm *vm)
{
- struct radeon_bo_va *bo_va, *tmp;
+ struct radeon_bo_va *bo_va;
int r;
- list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status) {
+ spin_lock(&vm->status_lock);
+ while (!list_empty(&vm->freed)) {
+ bo_va = list_first_entry(&vm->freed,
+ struct radeon_bo_va, vm_status);
+ spin_unlock(&vm->status_lock);
+
r = radeon_vm_bo_update(rdev, bo_va, NULL);
radeon_bo_unref(&bo_va->bo);
+ radeon_fence_unref(&bo_va->last_pt_update);
kfree(bo_va);
if (r)
return r;
+
+ spin_lock(&vm->status_lock);
}
+ spin_unlock(&vm->status_lock);
return 0;
}
@@ -1024,14 +1069,23 @@ int radeon_vm_clear_freed(struct radeon_device *rdev,
int radeon_vm_clear_invalids(struct radeon_device *rdev,
struct radeon_vm *vm)
{
- struct radeon_bo_va *bo_va, *tmp;
+ struct radeon_bo_va *bo_va;
int r;
- list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, vm_status) {
+ spin_lock(&vm->status_lock);
+ while (!list_empty(&vm->invalidated)) {
+ bo_va = list_first_entry(&vm->invalidated,
+ struct radeon_bo_va, vm_status);
+ spin_unlock(&vm->status_lock);
+
r = radeon_vm_bo_update(rdev, bo_va, NULL);
if (r)
return r;
+
+ spin_lock(&vm->status_lock);
}
+ spin_unlock(&vm->status_lock);
+
return 0;
}
@@ -1054,14 +1108,17 @@ void radeon_vm_bo_rmv(struct radeon_device *rdev,
mutex_lock(&vm->mutex);
interval_tree_remove(&bo_va->it, &vm->va);
+ spin_lock(&vm->status_lock);
list_del(&bo_va->vm_status);
if (bo_va->addr) {
bo_va->bo = radeon_bo_ref(bo_va->bo);
list_add(&bo_va->vm_status, &vm->freed);
} else {
+ radeon_fence_unref(&bo_va->last_pt_update);
kfree(bo_va);
}
+ spin_unlock(&vm->status_lock);
mutex_unlock(&vm->mutex);
}
@@ -1082,10 +1139,10 @@ void radeon_vm_bo_invalidate(struct radeon_device *rdev,
list_for_each_entry(bo_va, &bo->va, bo_list) {
if (bo_va->addr) {
- mutex_lock(&bo_va->vm->mutex);
+ spin_lock(&bo_va->vm->status_lock);
list_del(&bo_va->vm_status);
list_add(&bo_va->vm_status, &bo_va->vm->invalidated);
- mutex_unlock(&bo_va->vm->mutex);
+ spin_unlock(&bo_va->vm->status_lock);
}
}
}
@@ -1103,15 +1160,17 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
const unsigned align = min(RADEON_VM_PTB_ALIGN_SIZE,
RADEON_VM_PTE_COUNT * 8);
unsigned pd_size, pd_entries, pts_size;
- int r;
+ int i, r;
- vm->id = 0;
vm->ib_bo_va = NULL;
- vm->fence = NULL;
- vm->last_flush = NULL;
- vm->last_id_use = NULL;
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ vm->ids[i].id = 0;
+ vm->ids[i].flushed_updates = NULL;
+ vm->ids[i].last_id_use = NULL;
+ }
mutex_init(&vm->mutex);
vm->va = RB_ROOT;
+ spin_lock_init(&vm->status_lock);
INIT_LIST_HEAD(&vm->invalidated);
INIT_LIST_HEAD(&vm->freed);
@@ -1165,11 +1224,13 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
if (!r) {
list_del_init(&bo_va->bo_list);
radeon_bo_unreserve(bo_va->bo);
+ radeon_fence_unref(&bo_va->last_pt_update);
kfree(bo_va);
}
}
list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status) {
radeon_bo_unref(&bo_va->bo);
+ radeon_fence_unref(&bo_va->last_pt_update);
kfree(bo_va);
}
@@ -1179,9 +1240,10 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
radeon_bo_unref(&vm->page_directory);
- radeon_fence_unref(&vm->fence);
- radeon_fence_unref(&vm->last_flush);
- radeon_fence_unref(&vm->last_id_use);
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ radeon_fence_unref(&vm->ids[i].flushed_updates);
+ radeon_fence_unref(&vm->ids[i].last_id_use);
+ }
mutex_destroy(&vm->mutex);
}
diff --git a/drivers/gpu/drm/radeon/rv770_dma.c b/drivers/gpu/drm/radeon/rv770_dma.c
index 7f34bad2e72..acff6e09cc4 100644
--- a/drivers/gpu/drm/radeon/rv770_dma.c
+++ b/drivers/gpu/drm/radeon/rv770_dma.c
@@ -44,31 +44,27 @@ struct radeon_fence *rv770_copy_dma(struct radeon_device *rdev,
unsigned num_gpu_pages,
struct reservation_object *resv)
{
- struct radeon_semaphore *sem = NULL;
struct radeon_fence *fence;
+ struct radeon_sync sync;
int ring_index = rdev->asic->copy.dma_ring_index;
struct radeon_ring *ring = &rdev->ring[ring_index];
u32 size_in_dw, cur_size_in_dw;
int i, num_loops;
int r = 0;
- r = radeon_semaphore_create(rdev, &sem);
- if (r) {
- DRM_ERROR("radeon: moving bo (%d).\n", r);
- return ERR_PTR(r);
- }
+ radeon_sync_create(&sync);
size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFF);
r = radeon_ring_lock(rdev, ring, num_loops * 5 + 8);
if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r);
- radeon_semaphore_free(rdev, &sem, NULL);
+ radeon_sync_free(rdev, &sync, NULL);
return ERR_PTR(r);
}
- radeon_semaphore_sync_resv(rdev, sem, resv, false);
- radeon_semaphore_sync_rings(rdev, sem, ring->idx);
+ radeon_sync_resv(rdev, &sync, resv, false);
+ radeon_sync_rings(rdev, &sync, ring->idx);
for (i = 0; i < num_loops; i++) {
cur_size_in_dw = size_in_dw;
@@ -87,12 +83,12 @@ struct radeon_fence *rv770_copy_dma(struct radeon_device *rdev,
r = radeon_fence_emit(rdev, &fence, ring->idx);
if (r) {
radeon_ring_unlock_undo(rdev, ring);
- radeon_semaphore_free(rdev, &sem, NULL);
+ radeon_sync_free(rdev, &sync, NULL);
return ERR_PTR(r);
}
radeon_ring_unlock_commit(rdev, ring, false);
- radeon_semaphore_free(rdev, &sem, fence);
+ radeon_sync_free(rdev, &sync, fence);
return fence;
}
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 7d5083dc4ac..60df444bd07 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -3365,6 +3365,7 @@ void si_fence_ring_emit(struct radeon_device *rdev,
void si_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
{
struct radeon_ring *ring = &rdev->ring[ib->ring];
+ unsigned vm_id = ib->vm ? ib->vm->ids[ib->ring].id : 0;
u32 header;
if (ib->is_const_ib) {
@@ -3400,14 +3401,13 @@ void si_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
#endif
(ib->gpu_addr & 0xFFFFFFFC));
radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
- radeon_ring_write(ring, ib->length_dw |
- (ib->vm ? (ib->vm->id << 24) : 0));
+ radeon_ring_write(ring, ib->length_dw | (vm_id << 24));
if (!ib->is_const_ib) {
/* flush read cache over gart for this vmid */
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
- radeon_ring_write(ring, ib->vm ? ib->vm->id : 0);
+ radeon_ring_write(ring, vm_id);
radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
radeon_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
PACKET3_TC_ACTION_ENA |
@@ -5023,27 +5023,23 @@ static void si_vm_decode_fault(struct radeon_device *rdev,
block, mc_id);
}
-void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
+void si_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
+ unsigned vm_id, uint64_t pd_addr)
{
- struct radeon_ring *ring = &rdev->ring[ridx];
-
- if (vm == NULL)
- return;
-
/* write new base address */
radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
WRITE_DATA_DST_SEL(0)));
- if (vm->id < 8) {
+ if (vm_id < 8) {
radeon_ring_write(ring,
- (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2);
+ (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2)) >> 2);
} else {
radeon_ring_write(ring,
- (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2);
+ (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm_id - 8) << 2)) >> 2);
}
radeon_ring_write(ring, 0);
- radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
+ radeon_ring_write(ring, pd_addr >> 12);
/* flush hdp cache */
radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
@@ -5059,7 +5055,7 @@ void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
WRITE_DATA_DST_SEL(0)));
radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
radeon_ring_write(ring, 0);
- radeon_ring_write(ring, 1 << vm->id);
+ radeon_ring_write(ring, 1 << vm_id);
/* sync PFP to ME, otherwise we might get invalid PFP reads */
radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
diff --git a/drivers/gpu/drm/radeon/si_dma.c b/drivers/gpu/drm/radeon/si_dma.c
index b58f12b762d..f5cc777e1c5 100644
--- a/drivers/gpu/drm/radeon/si_dma.c
+++ b/drivers/gpu/drm/radeon/si_dma.c
@@ -185,20 +185,17 @@ void si_dma_vm_set_pages(struct radeon_device *rdev,
}
}
-void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
-{
- struct radeon_ring *ring = &rdev->ring[ridx];
-
- if (vm == NULL)
- return;
+void si_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
+ unsigned vm_id, uint64_t pd_addr)
+{
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
- if (vm->id < 8) {
- radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2));
+ if (vm_id < 8) {
+ radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2)) >> 2));
} else {
- radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2));
+ radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm_id - 8) << 2)) >> 2));
}
- radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
+ radeon_ring_write(ring, pd_addr >> 12);
/* flush hdp cache */
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
@@ -208,7 +205,7 @@ void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
/* bits 0-7 are the VM contexts0-7 */
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
- radeon_ring_write(ring, 1 << vm->id);
+ radeon_ring_write(ring, 1 << vm_id);
}
/**
@@ -229,31 +226,27 @@ struct radeon_fence *si_copy_dma(struct radeon_device *rdev,
unsigned num_gpu_pages,
struct reservation_object *resv)
{
- struct radeon_semaphore *sem = NULL;
struct radeon_fence *fence;
+ struct radeon_sync sync;
int ring_index = rdev->asic->copy.dma_ring_index;
struct radeon_ring *ring = &rdev->ring[ring_index];
u32 size_in_bytes, cur_size_in_bytes;
int i, num_loops;
int r = 0;
- r = radeon_semaphore_create(rdev, &sem);
- if (r) {
- DRM_ERROR("radeon: moving bo (%d).\n", r);
- return ERR_PTR(r);
- }
+ radeon_sync_create(&sync);
size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
num_loops = DIV_ROUND_UP(size_in_bytes, 0xfffff);
r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r);
- radeon_semaphore_free(rdev, &sem, NULL);
+ radeon_sync_free(rdev, &sync, NULL);
return ERR_PTR(r);
}
- radeon_semaphore_sync_resv(rdev, sem, resv, false);
- radeon_semaphore_sync_rings(rdev, sem, ring->idx);
+ radeon_sync_resv(rdev, &sync, resv, false);
+ radeon_sync_rings(rdev, &sync, ring->idx);
for (i = 0; i < num_loops; i++) {
cur_size_in_bytes = size_in_bytes;
@@ -272,12 +265,12 @@ struct radeon_fence *si_copy_dma(struct radeon_device *rdev,
r = radeon_fence_emit(rdev, &fence, ring->idx);
if (r) {
radeon_ring_unlock_undo(rdev, ring);
- radeon_semaphore_free(rdev, &sem, NULL);
+ radeon_sync_free(rdev, &sync, NULL);
return ERR_PTR(r);
}
radeon_ring_unlock_commit(rdev, ring, false);
- radeon_semaphore_free(rdev, &sem, fence);
+ radeon_sync_free(rdev, &sync, fence);
return fence;
}
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 676e6c2ba90..32e354b8b0a 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -3398,6 +3398,15 @@ static int si_process_firmware_header(struct radeon_device *rdev)
ret = si_read_smc_sram_dword(rdev,
SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
+ SISLANDS_SMC_FIRMWARE_HEADER_fanTable,
+ &tmp, si_pi->sram_end);
+ if (ret)
+ return ret;
+
+ si_pi->fan_table_start = tmp;
+
+ ret = si_read_smc_sram_dword(rdev,
+ SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
SISLANDS_SMC_FIRMWARE_HEADER_mcArbDramAutoRefreshTable,
&tmp, si_pi->sram_end);
if (ret)
@@ -5817,8 +5826,33 @@ void si_dpm_setup_asic(struct radeon_device *rdev)
si_enable_acpi_power_management(rdev);
}
-static int si_set_thermal_temperature_range(struct radeon_device *rdev,
- int min_temp, int max_temp)
+static int si_thermal_enable_alert(struct radeon_device *rdev,
+ bool enable)
+{
+ u32 thermal_int = RREG32(CG_THERMAL_INT);
+
+ if (enable) {
+ PPSMC_Result result;
+
+ thermal_int &= ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
+ WREG32(CG_THERMAL_INT, thermal_int);
+ rdev->irq.dpm_thermal = false;
+ result = si_send_msg_to_smc(rdev, PPSMC_MSG_EnableThermalInterrupt);
+ if (result != PPSMC_Result_OK) {
+ DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
+ return -EINVAL;
+ }
+ } else {
+ thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
+ WREG32(CG_THERMAL_INT, thermal_int);
+ rdev->irq.dpm_thermal = true;
+ }
+
+ return 0;
+}
+
+static int si_thermal_set_temperature_range(struct radeon_device *rdev,
+ int min_temp, int max_temp)
{
int low_temp = 0 * 1000;
int high_temp = 255 * 1000;
@@ -5842,6 +5876,309 @@ static int si_set_thermal_temperature_range(struct radeon_device *rdev,
return 0;
}
+static void si_fan_ctrl_set_static_mode(struct radeon_device *rdev, u32 mode)
+{
+ struct si_power_info *si_pi = si_get_pi(rdev);
+ u32 tmp;
+
+ if (si_pi->fan_ctrl_is_in_default_mode) {
+ tmp = (RREG32(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK) >> FDO_PWM_MODE_SHIFT;
+ si_pi->fan_ctrl_default_mode = tmp;
+ tmp = (RREG32(CG_FDO_CTRL2) & TMIN_MASK) >> TMIN_SHIFT;
+ si_pi->t_min = tmp;
+ si_pi->fan_ctrl_is_in_default_mode = false;
+ }
+
+ tmp = RREG32(CG_FDO_CTRL2) & ~TMIN_MASK;
+ tmp |= TMIN(0);
+ WREG32(CG_FDO_CTRL2, tmp);
+
+ tmp = RREG32(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;
+ tmp |= FDO_PWM_MODE(mode);
+ WREG32(CG_FDO_CTRL2, tmp);
+}
+
+static int si_thermal_setup_fan_table(struct radeon_device *rdev)
+{
+ struct si_power_info *si_pi = si_get_pi(rdev);
+ PP_SIslands_FanTable fan_table = { FDO_MODE_HARDWARE };
+ u32 duty100;
+ u32 t_diff1, t_diff2, pwm_diff1, pwm_diff2;
+ u16 fdo_min, slope1, slope2;
+ u32 reference_clock, tmp;
+ int ret;
+ u64 tmp64;
+
+ if (!si_pi->fan_table_start) {
+ rdev->pm.dpm.fan.ucode_fan_control = false;
+ return 0;
+ }
+
+ duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
+
+ if (duty100 == 0) {
+ rdev->pm.dpm.fan.ucode_fan_control = false;
+ return 0;
+ }
+
+ tmp64 = (u64)rdev->pm.dpm.fan.pwm_min * duty100;
+ do_div(tmp64, 10000);
+ fdo_min = (u16)tmp64;
+
+ t_diff1 = rdev->pm.dpm.fan.t_med - rdev->pm.dpm.fan.t_min;
+ t_diff2 = rdev->pm.dpm.fan.t_high - rdev->pm.dpm.fan.t_med;
+
+ pwm_diff1 = rdev->pm.dpm.fan.pwm_med - rdev->pm.dpm.fan.pwm_min;
+ pwm_diff2 = rdev->pm.dpm.fan.pwm_high - rdev->pm.dpm.fan.pwm_med;
+
+ slope1 = (u16)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
+ slope2 = (u16)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
+
+ fan_table.slope1 = cpu_to_be16(slope1);
+ fan_table.slope2 = cpu_to_be16(slope2);
+
+ fan_table.fdo_min = cpu_to_be16(fdo_min);
+
+ fan_table.hys_down = cpu_to_be16(rdev->pm.dpm.fan.t_hyst);
+
+ fan_table.hys_up = cpu_to_be16(1);
+
+ fan_table.hys_slope = cpu_to_be16(1);
+
+ fan_table.temp_resp_lim = cpu_to_be16(5);
+
+ reference_clock = radeon_get_xclk(rdev);
+
+ fan_table.refresh_period = cpu_to_be32((rdev->pm.dpm.fan.cycle_delay *
+ reference_clock) / 1600);
+
+ fan_table.fdo_max = cpu_to_be16((u16)duty100);
+
+ tmp = (RREG32(CG_MULT_THERMAL_CTRL) & TEMP_SEL_MASK) >> TEMP_SEL_SHIFT;
+ fan_table.temp_src = (uint8_t)tmp;
+
+ ret = si_copy_bytes_to_smc(rdev,
+ si_pi->fan_table_start,
+ (u8 *)(&fan_table),
+ sizeof(fan_table),
+ si_pi->sram_end);
+
+ if (ret) {
+ DRM_ERROR("Failed to load fan table to the SMC.");
+ rdev->pm.dpm.fan.ucode_fan_control = false;
+ }
+
+ return 0;
+}
+
+static int si_fan_ctrl_start_smc_fan_control(struct radeon_device *rdev)
+{
+ PPSMC_Result ret;
+
+ ret = si_send_msg_to_smc(rdev, PPSMC_StartFanControl);
+ if (ret == PPSMC_Result_OK)
+ return 0;
+ else
+ return -EINVAL;
+}
+
+static int si_fan_ctrl_stop_smc_fan_control(struct radeon_device *rdev)
+{
+ PPSMC_Result ret;
+
+ ret = si_send_msg_to_smc(rdev, PPSMC_StopFanControl);
+ if (ret == PPSMC_Result_OK)
+ return 0;
+ else
+ return -EINVAL;
+}
+
+#if 0
+static int si_fan_ctrl_get_fan_speed_percent(struct radeon_device *rdev,
+ u32 *speed)
+{
+ u32 duty, duty100;
+ u64 tmp64;
+
+ if (rdev->pm.no_fan)
+ return -ENOENT;
+
+ duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
+ duty = (RREG32(CG_THERMAL_STATUS) & FDO_PWM_DUTY_MASK) >> FDO_PWM_DUTY_SHIFT;
+
+ if (duty100 == 0)
+ return -EINVAL;
+
+ tmp64 = (u64)duty * 100;
+ do_div(tmp64, duty100);
+ *speed = (u32)tmp64;
+
+ if (*speed > 100)
+ *speed = 100;
+
+ return 0;
+}
+
+static int si_fan_ctrl_set_fan_speed_percent(struct radeon_device *rdev,
+ u32 speed)
+{
+ u32 tmp;
+ u32 duty, duty100;
+ u64 tmp64;
+
+ if (rdev->pm.no_fan)
+ return -ENOENT;
+
+ if (speed > 100)
+ return -EINVAL;
+
+ if (rdev->pm.dpm.fan.ucode_fan_control)
+ si_fan_ctrl_stop_smc_fan_control(rdev);
+
+ duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
+
+ if (duty100 == 0)
+ return -EINVAL;
+
+ tmp64 = (u64)speed * duty100;
+ do_div(tmp64, 100);
+ duty = (u32)tmp64;
+
+ tmp = RREG32(CG_FDO_CTRL0) & ~FDO_STATIC_DUTY_MASK;
+ tmp |= FDO_STATIC_DUTY(duty);
+ WREG32(CG_FDO_CTRL0, tmp);
+
+ si_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC);
+
+ return 0;
+}
+
+static int si_fan_ctrl_get_fan_speed_rpm(struct radeon_device *rdev,
+ u32 *speed)
+{
+ u32 tach_period;
+ u32 xclk = radeon_get_xclk(rdev);
+
+ if (rdev->pm.no_fan)
+ return -ENOENT;
+
+ if (rdev->pm.fan_pulses_per_revolution == 0)
+ return -ENOENT;
+
+ tach_period = (RREG32(CG_TACH_STATUS) & TACH_PERIOD_MASK) >> TACH_PERIOD_SHIFT;
+ if (tach_period == 0)
+ return -ENOENT;
+
+ *speed = 60 * xclk * 10000 / tach_period;
+
+ return 0;
+}
+
+static int si_fan_ctrl_set_fan_speed_rpm(struct radeon_device *rdev,
+ u32 speed)
+{
+ u32 tach_period, tmp;
+ u32 xclk = radeon_get_xclk(rdev);
+
+ if (rdev->pm.no_fan)
+ return -ENOENT;
+
+ if (rdev->pm.fan_pulses_per_revolution == 0)
+ return -ENOENT;
+
+ if ((speed < rdev->pm.fan_min_rpm) ||
+ (speed > rdev->pm.fan_max_rpm))
+ return -EINVAL;
+
+ if (rdev->pm.dpm.fan.ucode_fan_control)
+ si_fan_ctrl_stop_smc_fan_control(rdev);
+
+ tach_period = 60 * xclk * 10000 / (8 * speed);
+ tmp = RREG32(CG_TACH_CTRL) & ~TARGET_PERIOD_MASK;
+ tmp |= TARGET_PERIOD(tach_period);
+ WREG32(CG_TACH_CTRL, tmp);
+
+ si_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC_RPM);
+
+ return 0;
+}
+#endif
+
+static void si_fan_ctrl_set_default_mode(struct radeon_device *rdev)
+{
+ struct si_power_info *si_pi = si_get_pi(rdev);
+ u32 tmp;
+
+ if (!si_pi->fan_ctrl_is_in_default_mode) {
+ tmp = RREG32(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;
+ tmp |= FDO_PWM_MODE(si_pi->fan_ctrl_default_mode);
+ WREG32(CG_FDO_CTRL2, tmp);
+
+ tmp = RREG32(CG_FDO_CTRL2) & ~TMIN_MASK;
+ tmp |= TMIN(si_pi->t_min);
+ WREG32(CG_FDO_CTRL2, tmp);
+ si_pi->fan_ctrl_is_in_default_mode = true;
+ }
+}
+
+static void si_thermal_start_smc_fan_control(struct radeon_device *rdev)
+{
+ if (rdev->pm.dpm.fan.ucode_fan_control) {
+ si_fan_ctrl_start_smc_fan_control(rdev);
+ si_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC);
+ }
+}
+
+static void si_thermal_initialize(struct radeon_device *rdev)
+{
+ u32 tmp;
+
+ if (rdev->pm.fan_pulses_per_revolution) {
+ tmp = RREG32(CG_TACH_CTRL) & ~EDGE_PER_REV_MASK;
+ tmp |= EDGE_PER_REV(rdev->pm.fan_pulses_per_revolution -1);
+ WREG32(CG_TACH_CTRL, tmp);
+ }
+
+ tmp = RREG32(CG_FDO_CTRL2) & ~TACH_PWM_RESP_RATE_MASK;
+ tmp |= TACH_PWM_RESP_RATE(0x28);
+ WREG32(CG_FDO_CTRL2, tmp);
+}
+
+static int si_thermal_start_thermal_controller(struct radeon_device *rdev)
+{
+ int ret;
+
+ si_thermal_initialize(rdev);
+ ret = si_thermal_set_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
+ if (ret)
+ return ret;
+ ret = si_thermal_enable_alert(rdev, true);
+ if (ret)
+ return ret;
+ if (rdev->pm.dpm.fan.ucode_fan_control) {
+ ret = si_halt_smc(rdev);
+ if (ret)
+ return ret;
+ ret = si_thermal_setup_fan_table(rdev);
+ if (ret)
+ return ret;
+ ret = si_resume_smc(rdev);
+ if (ret)
+ return ret;
+ si_thermal_start_smc_fan_control(rdev);
+ }
+
+ return 0;
+}
+
+static void si_thermal_stop_thermal_controller(struct radeon_device *rdev)
+{
+ if (!rdev->pm.no_fan) {
+ si_fan_ctrl_set_default_mode(rdev);
+ si_fan_ctrl_stop_smc_fan_control(rdev);
+ }
+}
+
int si_dpm_enable(struct radeon_device *rdev)
{
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
@@ -5954,31 +6291,39 @@ int si_dpm_enable(struct radeon_device *rdev)
si_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
+ si_thermal_start_thermal_controller(rdev);
+
ni_update_current_ps(rdev, boot_ps);
return 0;
}
-int si_dpm_late_enable(struct radeon_device *rdev)
+static int si_set_temperature_range(struct radeon_device *rdev)
{
int ret;
- if (rdev->irq.installed &&
- r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
- PPSMC_Result result;
+ ret = si_thermal_enable_alert(rdev, false);
+ if (ret)
+ return ret;
+ ret = si_thermal_set_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
+ if (ret)
+ return ret;
+ ret = si_thermal_enable_alert(rdev, true);
+ if (ret)
+ return ret;
- ret = si_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
- if (ret)
- return ret;
- rdev->irq.dpm_thermal = true;
- radeon_irq_set(rdev);
- result = si_send_msg_to_smc(rdev, PPSMC_MSG_EnableThermalInterrupt);
+ return ret;
+}
- if (result != PPSMC_Result_OK)
- DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
- }
+int si_dpm_late_enable(struct radeon_device *rdev)
+{
+ int ret;
- return 0;
+ ret = si_set_temperature_range(rdev);
+ if (ret)
+ return ret;
+
+ return ret;
}
void si_dpm_disable(struct radeon_device *rdev)
@@ -5988,6 +6333,7 @@ void si_dpm_disable(struct radeon_device *rdev)
if (!si_is_smc_running(rdev))
return;
+ si_thermal_stop_thermal_controller(rdev);
si_disable_ulv(rdev);
si_clear_vc(rdev);
if (pi->thermal_protection)
@@ -6526,6 +6872,9 @@ int si_dpm_init(struct radeon_device *rdev)
rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+ si_pi->fan_ctrl_is_in_default_mode = true;
+ rdev->pm.dpm.fan.ucode_fan_control = false;
+
return 0;
}
diff --git a/drivers/gpu/drm/radeon/si_dpm.h b/drivers/gpu/drm/radeon/si_dpm.h
index 8b5c06a0832..d16bb1b5f10 100644
--- a/drivers/gpu/drm/radeon/si_dpm.h
+++ b/drivers/gpu/drm/radeon/si_dpm.h
@@ -182,6 +182,7 @@ struct si_power_info {
u32 dte_table_start;
u32 spll_table_start;
u32 papm_cfg_table_start;
+ u32 fan_table_start;
/* CAC stuff */
const struct si_cac_config_reg *cac_weights;
const struct si_cac_config_reg *lcac_config;
@@ -197,6 +198,10 @@ struct si_power_info {
/* SVI2 */
u8 svd_gpio_id;
u8 svc_gpio_id;
+ /* fan control */
+ bool fan_ctrl_is_in_default_mode;
+ u32 t_min;
+ u32 fan_ctrl_default_mode;
};
#define SISLANDS_INITIAL_STATE_ARB_INDEX 0
diff --git a/drivers/gpu/drm/radeon/si_smc.c b/drivers/gpu/drm/radeon/si_smc.c
index 73dbc79c959..e5bb92f1677 100644
--- a/drivers/gpu/drm/radeon/si_smc.c
+++ b/drivers/gpu/drm/radeon/si_smc.c
@@ -135,7 +135,7 @@ void si_reset_smc(struct radeon_device *rdev)
int si_program_jump_on_start(struct radeon_device *rdev)
{
- static u8 data[] = { 0x0E, 0x00, 0x40, 0x40 };
+ static const u8 data[] = { 0x0E, 0x00, 0x40, 0x40 };
return si_copy_bytes_to_smc(rdev, 0x0, data, 4, sizeof(data)+1);
}
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index 6635da9ec98..4069be89e58 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -180,7 +180,10 @@
#define DIG_THERM_DPM(x) ((x) << 14)
#define DIG_THERM_DPM_MASK 0x003FC000
#define DIG_THERM_DPM_SHIFT 14
-
+#define CG_THERMAL_STATUS 0x704
+#define FDO_PWM_DUTY(x) ((x) << 9)
+#define FDO_PWM_DUTY_MASK (0xff << 9)
+#define FDO_PWM_DUTY_SHIFT 9
#define CG_THERMAL_INT 0x708
#define DIG_THERM_INTH(x) ((x) << 8)
#define DIG_THERM_INTH_MASK 0x0000FF00
@@ -191,6 +194,10 @@
#define THERM_INT_MASK_HIGH (1 << 24)
#define THERM_INT_MASK_LOW (1 << 25)
+#define CG_MULT_THERMAL_CTRL 0x710
+#define TEMP_SEL(x) ((x) << 20)
+#define TEMP_SEL_MASK (0xff << 20)
+#define TEMP_SEL_SHIFT 20
#define CG_MULT_THERMAL_STATUS 0x714
#define ASIC_MAX_TEMP(x) ((x) << 0)
#define ASIC_MAX_TEMP_MASK 0x000001ff
@@ -199,6 +206,37 @@
#define CTF_TEMP_MASK 0x0003fe00
#define CTF_TEMP_SHIFT 9
+#define CG_FDO_CTRL0 0x754
+#define FDO_STATIC_DUTY(x) ((x) << 0)
+#define FDO_STATIC_DUTY_MASK 0x000000FF
+#define FDO_STATIC_DUTY_SHIFT 0
+#define CG_FDO_CTRL1 0x758
+#define FMAX_DUTY100(x) ((x) << 0)
+#define FMAX_DUTY100_MASK 0x000000FF
+#define FMAX_DUTY100_SHIFT 0
+#define CG_FDO_CTRL2 0x75C
+#define TMIN(x) ((x) << 0)
+#define TMIN_MASK 0x000000FF
+#define TMIN_SHIFT 0
+#define FDO_PWM_MODE(x) ((x) << 11)
+#define FDO_PWM_MODE_MASK (7 << 11)
+#define FDO_PWM_MODE_SHIFT 11
+#define TACH_PWM_RESP_RATE(x) ((x) << 25)
+#define TACH_PWM_RESP_RATE_MASK (0x7f << 25)
+#define TACH_PWM_RESP_RATE_SHIFT 25
+
+#define CG_TACH_CTRL 0x770
+# define EDGE_PER_REV(x) ((x) << 0)
+# define EDGE_PER_REV_MASK (0x7 << 0)
+# define EDGE_PER_REV_SHIFT 0
+# define TARGET_PERIOD(x) ((x) << 3)
+# define TARGET_PERIOD_MASK 0xfffffff8
+# define TARGET_PERIOD_SHIFT 3
+#define CG_TACH_STATUS 0x774
+# define TACH_PERIOD(x) ((x) << 0)
+# define TACH_PERIOD_MASK 0xffffffff
+# define TACH_PERIOD_SHIFT 0
+
#define GENERAL_PWRMGT 0x780
# define GLOBAL_PWRMGT_EN (1 << 0)
# define STATIC_PM_EN (1 << 1)
diff --git a/drivers/gpu/drm/radeon/sislands_smc.h b/drivers/gpu/drm/radeon/sislands_smc.h
index 623a0b1e2d9..3c779838d9a 100644
--- a/drivers/gpu/drm/radeon/sislands_smc.h
+++ b/drivers/gpu/drm/radeon/sislands_smc.h
@@ -245,6 +245,31 @@ typedef struct SISLANDS_SMC_STATETABLE SISLANDS_SMC_STATETABLE;
#define SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svd 0x11c
#define SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svc 0x120
+struct PP_SIslands_FanTable
+{
+ uint8_t fdo_mode;
+ uint8_t padding;
+ int16_t temp_min;
+ int16_t temp_med;
+ int16_t temp_max;
+ int16_t slope1;
+ int16_t slope2;
+ int16_t fdo_min;
+ int16_t hys_up;
+ int16_t hys_down;
+ int16_t hys_slope;
+ int16_t temp_resp_lim;
+ int16_t temp_curr;
+ int16_t slope_curr;
+ int16_t pwm_curr;
+ uint32_t refresh_period;
+ int16_t fdo_max;
+ uint8_t temp_src;
+ int8_t padding2;
+};
+
+typedef struct PP_SIslands_FanTable PP_SIslands_FanTable;
+
#define SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES 16
#define SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES 32
diff --git a/drivers/gpu/drm/radeon/smu7_discrete.h b/drivers/gpu/drm/radeon/smu7_discrete.h
index 82f70c90a9e..0b0b404ff09 100644
--- a/drivers/gpu/drm/radeon/smu7_discrete.h
+++ b/drivers/gpu/drm/radeon/smu7_discrete.h
@@ -431,6 +431,31 @@ struct SMU7_Discrete_MCRegisters
typedef struct SMU7_Discrete_MCRegisters SMU7_Discrete_MCRegisters;
+struct SMU7_Discrete_FanTable
+{
+ uint16_t FdoMode;
+ int16_t TempMin;
+ int16_t TempMed;
+ int16_t TempMax;
+ int16_t Slope1;
+ int16_t Slope2;
+ int16_t FdoMin;
+ int16_t HystUp;
+ int16_t HystDown;
+ int16_t HystSlope;
+ int16_t TempRespLim;
+ int16_t TempCurr;
+ int16_t SlopeCurr;
+ int16_t PwmCurr;
+ uint32_t RefreshPeriod;
+ int16_t FdoMax;
+ uint8_t TempSrc;
+ int8_t Padding;
+};
+
+typedef struct SMU7_Discrete_FanTable SMU7_Discrete_FanTable;
+
+
struct SMU7_Discrete_PmFuses {
// dw0-dw1
uint8_t BapmVddCVidHiSidd[8];
@@ -462,7 +487,10 @@ struct SMU7_Discrete_PmFuses {
uint8_t BapmVddCVidHiSidd2[8];
// dw11-dw12
- uint32_t Reserved6[2];
+ int16_t FuzzyFan_ErrorSetDelta;
+ int16_t FuzzyFan_ErrorRateSetDelta;
+ int16_t FuzzyFan_PwmSetDelta;
+ uint16_t CalcMeasPowerBlend;
// dw13-dw16
uint8_t GnbLPML[16];
diff --git a/drivers/gpu/drm/rcar-du/Kconfig b/drivers/gpu/drm/rcar-du/Kconfig
index c96f6089f8b..2324a526de6 100644
--- a/drivers/gpu/drm/rcar-du/Kconfig
+++ b/drivers/gpu/drm/rcar-du/Kconfig
@@ -11,10 +11,17 @@ config DRM_RCAR_DU
Choose this option if you have an R-Car chipset.
If M is selected the module will be called rcar-du-drm.
+config DRM_RCAR_HDMI
+ bool "R-Car DU HDMI Encoder Support"
+ depends on DRM_RCAR_DU
+ depends on OF
+ help
+ Enable support for external HDMI encoders.
+
config DRM_RCAR_LVDS
bool "R-Car DU LVDS Encoder Support"
depends on DRM_RCAR_DU
depends on ARCH_R8A7790 || ARCH_R8A7791 || COMPILE_TEST
help
- Enable support the R-Car Display Unit embedded LVDS encoders
- (currently only on R8A7790).
+ Enable support for the R-Car Display Unit embedded LVDS encoders
+ (currently only on R8A7790 and R8A7791).
diff --git a/drivers/gpu/drm/rcar-du/Makefile b/drivers/gpu/drm/rcar-du/Makefile
index 12b8d447783..05de1c4097a 100644
--- a/drivers/gpu/drm/rcar-du/Makefile
+++ b/drivers/gpu/drm/rcar-du/Makefile
@@ -7,6 +7,8 @@ rcar-du-drm-y := rcar_du_crtc.o \
rcar_du_plane.o \
rcar_du_vgacon.o
+rcar-du-drm-$(CONFIG_DRM_RCAR_HDMI) += rcar_du_hdmicon.o \
+ rcar_du_hdmienc.o
rcar-du-drm-$(CONFIG_DRM_RCAR_LVDS) += rcar_du_lvdsenc.o
obj-$(CONFIG_DRM_RCAR_DU) += rcar-du-drm.o
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
index 148b5058918..23cc910951f 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
@@ -19,6 +19,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_plane_helper.h>
#include "rcar_du_crtc.h"
#include "rcar_du_drv.h"
@@ -585,7 +586,7 @@ int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int index)
if (irq < 0) {
dev_err(rcdu->dev, "no IRQ for CRTC %u\n", index);
- return ret;
+ return irq;
}
ret = devm_request_irq(rcdu->dev, irq, rcar_du_crtc_irq, irqflags,
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
index e97ae502dec..984e6083699 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
@@ -15,7 +15,6 @@
#define __RCAR_DU_CRTC_H__
#include <linux/mutex.h>
-#include <linux/platform_data/rcar-du.h>
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
@@ -41,6 +40,15 @@ struct rcar_du_crtc {
#define to_rcar_crtc(c) container_of(c, struct rcar_du_crtc, crtc)
+enum rcar_du_output {
+ RCAR_DU_OUTPUT_DPAD0,
+ RCAR_DU_OUTPUT_DPAD1,
+ RCAR_DU_OUTPUT_LVDS0,
+ RCAR_DU_OUTPUT_LVDS1,
+ RCAR_DU_OUTPUT_TCON,
+ RCAR_DU_OUTPUT_MAX,
+};
+
int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int index);
void rcar_du_crtc_enable_vblank(struct rcar_du_crtc *rcrtc, bool enable);
void rcar_du_crtc_cancel_page_flip(struct rcar_du_crtc *rcrtc,
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index e419aade220..7bfa09cf18d 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -146,12 +146,11 @@ static int rcar_du_load(struct drm_device *dev, unsigned long flags)
{
struct platform_device *pdev = dev->platformdev;
struct device_node *np = pdev->dev.of_node;
- struct rcar_du_platform_data *pdata = pdev->dev.platform_data;
struct rcar_du_device *rcdu;
struct resource *mem;
int ret;
- if (pdata == NULL && np == NULL) {
+ if (np == NULL) {
dev_err(dev->dev, "no platform data\n");
return -ENODEV;
}
@@ -163,7 +162,6 @@ static int rcar_du_load(struct drm_device *dev, unsigned long flags)
}
rcdu->dev = &pdev->dev;
- rcdu->pdata = pdata;
rcdu->info = np ? of_match_device(rcar_du_of_table, rcdu->dev)->data
: (void *)platform_get_device_id(pdev)->driver_data;
rcdu->ddev = dev;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.h b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
index 8e494633c3b..0a724669f02 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
@@ -15,7 +15,6 @@
#define __RCAR_DU_DRV_H__
#include <linux/kernel.h>
-#include <linux/platform_data/rcar-du.h>
#include "rcar_du_crtc.h"
#include "rcar_du_group.h"
@@ -67,7 +66,6 @@ struct rcar_du_device_info {
struct rcar_du_device {
struct device *dev;
- const struct rcar_du_platform_data *pdata;
const struct rcar_du_device_info *info;
void __iomem *mmio;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
index 7c0ec95915e..34a122a3966 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
@@ -19,6 +19,8 @@
#include "rcar_du_drv.h"
#include "rcar_du_encoder.h"
+#include "rcar_du_hdmicon.h"
+#include "rcar_du_hdmienc.h"
#include "rcar_du_kms.h"
#include "rcar_du_lvdscon.h"
#include "rcar_du_lvdsenc.h"
@@ -33,7 +35,7 @@ rcar_du_connector_best_encoder(struct drm_connector *connector)
{
struct rcar_du_connector *rcon = to_rcar_connector(connector);
- return &rcon->encoder->encoder;
+ return rcar_encoder_to_drm_encoder(rcon->encoder);
}
/* -----------------------------------------------------------------------------
@@ -142,10 +144,11 @@ static const struct drm_encoder_funcs encoder_funcs = {
int rcar_du_encoder_init(struct rcar_du_device *rcdu,
enum rcar_du_encoder_type type,
enum rcar_du_output output,
- const struct rcar_du_encoder_data *data,
- struct device_node *np)
+ struct device_node *enc_node,
+ struct device_node *con_node)
{
struct rcar_du_encoder *renc;
+ struct drm_encoder *encoder;
unsigned int encoder_type;
int ret;
@@ -154,6 +157,7 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu,
return -ENOMEM;
renc->output = output;
+ encoder = rcar_encoder_to_drm_encoder(renc);
switch (output) {
case RCAR_DU_OUTPUT_LVDS0:
@@ -175,6 +179,9 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu,
case RCAR_DU_ENCODER_LVDS:
encoder_type = DRM_MODE_ENCODER_LVDS;
break;
+ case RCAR_DU_ENCODER_HDMI:
+ encoder_type = DRM_MODE_ENCODER_TMDS;
+ break;
case RCAR_DU_ENCODER_NONE:
default:
/* No external encoder, use the internal encoder type. */
@@ -182,23 +189,35 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu,
break;
}
- ret = drm_encoder_init(rcdu->ddev, &renc->encoder, &encoder_funcs,
- encoder_type);
- if (ret < 0)
- return ret;
+ if (type == RCAR_DU_ENCODER_HDMI) {
+ if (renc->lvds) {
+ dev_err(rcdu->dev,
+ "Chaining LVDS and HDMI encoders not supported\n");
+ return -EINVAL;
+ }
- drm_encoder_helper_add(&renc->encoder, &encoder_helper_funcs);
+ ret = rcar_du_hdmienc_init(rcdu, renc, enc_node);
+ if (ret < 0)
+ return ret;
+ } else {
+ ret = drm_encoder_init(rcdu->ddev, encoder, &encoder_funcs,
+ encoder_type);
+ if (ret < 0)
+ return ret;
- switch (encoder_type) {
- case DRM_MODE_ENCODER_LVDS: {
- const struct rcar_du_panel_data *pdata =
- data ? &data->connector.lvds.panel : NULL;
- return rcar_du_lvds_connector_init(rcdu, renc, pdata, np);
+ drm_encoder_helper_add(encoder, &encoder_helper_funcs);
}
+ switch (encoder_type) {
+ case DRM_MODE_ENCODER_LVDS:
+ return rcar_du_lvds_connector_init(rcdu, renc, con_node);
+
case DRM_MODE_ENCODER_DAC:
return rcar_du_vga_connector_init(rcdu, renc);
+ case DRM_MODE_ENCODER_TMDS:
+ return rcar_du_hdmi_connector_init(rcdu, renc);
+
default:
return -EINVAL;
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.h b/drivers/gpu/drm/rcar-du/rcar_du_encoder.h
index bd624135ef1..719b6f2a031 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.h
@@ -14,21 +14,32 @@
#ifndef __RCAR_DU_ENCODER_H__
#define __RCAR_DU_ENCODER_H__
-#include <linux/platform_data/rcar-du.h>
-
#include <drm/drm_crtc.h>
+#include <drm/drm_encoder_slave.h>
struct rcar_du_device;
+struct rcar_du_hdmienc;
struct rcar_du_lvdsenc;
+enum rcar_du_encoder_type {
+ RCAR_DU_ENCODER_UNUSED = 0,
+ RCAR_DU_ENCODER_NONE,
+ RCAR_DU_ENCODER_VGA,
+ RCAR_DU_ENCODER_LVDS,
+ RCAR_DU_ENCODER_HDMI,
+};
+
struct rcar_du_encoder {
- struct drm_encoder encoder;
+ struct drm_encoder_slave slave;
enum rcar_du_output output;
+ struct rcar_du_hdmienc *hdmi;
struct rcar_du_lvdsenc *lvds;
};
#define to_rcar_encoder(e) \
- container_of(e, struct rcar_du_encoder, encoder)
+ container_of(e, struct rcar_du_encoder, slave.base)
+
+#define rcar_encoder_to_drm_encoder(e) (&(e)->slave.base)
struct rcar_du_connector {
struct drm_connector connector;
@@ -44,7 +55,7 @@ rcar_du_connector_best_encoder(struct drm_connector *connector);
int rcar_du_encoder_init(struct rcar_du_device *rcdu,
enum rcar_du_encoder_type type,
enum rcar_du_output output,
- const struct rcar_du_encoder_data *data,
- struct device_node *np);
+ struct device_node *enc_node,
+ struct device_node *con_node);
#endif /* __RCAR_DU_ENCODER_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c b/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c
new file mode 100644
index 00000000000..4d7d4dd46d2
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c
@@ -0,0 +1,121 @@
+/*
+ * R-Car Display Unit HDMI Connector
+ *
+ * Copyright (C) 2014 Renesas Electronics Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_encoder_slave.h>
+
+#include "rcar_du_drv.h"
+#include "rcar_du_encoder.h"
+#include "rcar_du_hdmicon.h"
+#include "rcar_du_kms.h"
+
+#define to_slave_funcs(e) (to_rcar_encoder(e)->slave.slave_funcs)
+
+static int rcar_du_hdmi_connector_get_modes(struct drm_connector *connector)
+{
+ struct rcar_du_connector *con = to_rcar_connector(connector);
+ struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(con->encoder);
+ struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
+
+ if (sfuncs->get_modes == NULL)
+ return 0;
+
+ return sfuncs->get_modes(encoder, connector);
+}
+
+static int rcar_du_hdmi_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct rcar_du_connector *con = to_rcar_connector(connector);
+ struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(con->encoder);
+ struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
+
+ if (sfuncs->mode_valid == NULL)
+ return MODE_OK;
+
+ return sfuncs->mode_valid(encoder, mode);
+}
+
+static const struct drm_connector_helper_funcs connector_helper_funcs = {
+ .get_modes = rcar_du_hdmi_connector_get_modes,
+ .mode_valid = rcar_du_hdmi_connector_mode_valid,
+ .best_encoder = rcar_du_connector_best_encoder,
+};
+
+static void rcar_du_hdmi_connector_destroy(struct drm_connector *connector)
+{
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+}
+
+static enum drm_connector_status
+rcar_du_hdmi_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct rcar_du_connector *con = to_rcar_connector(connector);
+ struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(con->encoder);
+ struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
+
+ if (sfuncs->detect == NULL)
+ return connector_status_unknown;
+
+ return sfuncs->detect(encoder, connector);
+}
+
+static const struct drm_connector_funcs connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .detect = rcar_du_hdmi_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = rcar_du_hdmi_connector_destroy,
+};
+
+int rcar_du_hdmi_connector_init(struct rcar_du_device *rcdu,
+ struct rcar_du_encoder *renc)
+{
+ struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(renc);
+ struct rcar_du_connector *rcon;
+ struct drm_connector *connector;
+ int ret;
+
+ rcon = devm_kzalloc(rcdu->dev, sizeof(*rcon), GFP_KERNEL);
+ if (rcon == NULL)
+ return -ENOMEM;
+
+ connector = &rcon->connector;
+ connector->display_info.width_mm = 0;
+ connector->display_info.height_mm = 0;
+
+ ret = drm_connector_init(rcdu->ddev, connector, &connector_funcs,
+ DRM_MODE_CONNECTOR_HDMIA);
+ if (ret < 0)
+ return ret;
+
+ drm_connector_helper_add(connector, &connector_helper_funcs);
+ ret = drm_connector_register(connector);
+ if (ret < 0)
+ return ret;
+
+ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+ drm_object_property_set_value(&connector->base,
+ rcdu->ddev->mode_config.dpms_property, DRM_MODE_DPMS_OFF);
+
+ ret = drm_mode_connector_attach_encoder(connector, encoder);
+ if (ret < 0)
+ return ret;
+
+ connector->encoder = encoder;
+ rcon->encoder = renc;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.h b/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.h
new file mode 100644
index 00000000000..87daa949227
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.h
@@ -0,0 +1,31 @@
+/*
+ * R-Car Display Unit HDMI Connector
+ *
+ * Copyright (C) 2014 Renesas Electronics Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __RCAR_DU_HDMICON_H__
+#define __RCAR_DU_HDMICON_H__
+
+struct rcar_du_device;
+struct rcar_du_encoder;
+
+#if IS_ENABLED(CONFIG_DRM_RCAR_HDMI)
+int rcar_du_hdmi_connector_init(struct rcar_du_device *rcdu,
+ struct rcar_du_encoder *renc);
+#else
+static inline int rcar_du_hdmi_connector_init(struct rcar_du_device *rcdu,
+ struct rcar_du_encoder *renc)
+{
+ return -ENOSYS;
+}
+#endif
+
+#endif /* __RCAR_DU_HDMICON_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c b/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c
new file mode 100644
index 00000000000..359bc999a9c
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c
@@ -0,0 +1,151 @@
+/*
+ * R-Car Display Unit HDMI Encoder
+ *
+ * Copyright (C) 2014 Renesas Electronics Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/slab.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_encoder_slave.h>
+
+#include "rcar_du_drv.h"
+#include "rcar_du_encoder.h"
+#include "rcar_du_hdmienc.h"
+
+struct rcar_du_hdmienc {
+ struct rcar_du_encoder *renc;
+ struct device *dev;
+ int dpms;
+};
+
+#define to_rcar_hdmienc(e) (to_rcar_encoder(e)->hdmi)
+#define to_slave_funcs(e) (to_rcar_encoder(e)->slave.slave_funcs)
+
+static void rcar_du_hdmienc_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder);
+ struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
+
+ if (hdmienc->dpms == mode)
+ return;
+
+ if (sfuncs->dpms)
+ sfuncs->dpms(encoder, mode);
+
+ hdmienc->dpms = mode;
+}
+
+static bool rcar_du_hdmienc_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
+
+ if (sfuncs->mode_fixup == NULL)
+ return true;
+
+ return sfuncs->mode_fixup(encoder, mode, adjusted_mode);
+}
+
+static void rcar_du_hdmienc_mode_prepare(struct drm_encoder *encoder)
+{
+ rcar_du_hdmienc_dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+static void rcar_du_hdmienc_mode_commit(struct drm_encoder *encoder)
+{
+ rcar_du_hdmienc_dpms(encoder, DRM_MODE_DPMS_ON);
+}
+
+static void rcar_du_hdmienc_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder);
+ struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
+
+ if (sfuncs->mode_set)
+ sfuncs->mode_set(encoder, mode, adjusted_mode);
+
+ rcar_du_crtc_route_output(encoder->crtc, hdmienc->renc->output);
+}
+
+static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
+ .dpms = rcar_du_hdmienc_dpms,
+ .mode_fixup = rcar_du_hdmienc_mode_fixup,
+ .prepare = rcar_du_hdmienc_mode_prepare,
+ .commit = rcar_du_hdmienc_mode_commit,
+ .mode_set = rcar_du_hdmienc_mode_set,
+};
+
+static void rcar_du_hdmienc_cleanup(struct drm_encoder *encoder)
+{
+ struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder);
+
+ rcar_du_hdmienc_dpms(encoder, DRM_MODE_DPMS_OFF);
+
+ drm_encoder_cleanup(encoder);
+ put_device(hdmienc->dev);
+}
+
+static const struct drm_encoder_funcs encoder_funcs = {
+ .destroy = rcar_du_hdmienc_cleanup,
+};
+
+int rcar_du_hdmienc_init(struct rcar_du_device *rcdu,
+ struct rcar_du_encoder *renc, struct device_node *np)
+{
+ struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(renc);
+ struct drm_i2c_encoder_driver *driver;
+ struct i2c_client *i2c_slave;
+ struct rcar_du_hdmienc *hdmienc;
+ int ret;
+
+ hdmienc = devm_kzalloc(rcdu->dev, sizeof(*hdmienc), GFP_KERNEL);
+ if (hdmienc == NULL)
+ return -ENOMEM;
+
+ /* Locate the slave I2C device and driver. */
+ i2c_slave = of_find_i2c_device_by_node(np);
+ if (!i2c_slave || !i2c_get_clientdata(i2c_slave))
+ return -EPROBE_DEFER;
+
+ hdmienc->dev = &i2c_slave->dev;
+
+ if (hdmienc->dev->driver == NULL) {
+ ret = -EPROBE_DEFER;
+ goto error;
+ }
+
+ /* Initialize the slave encoder. */
+ driver = to_drm_i2c_encoder_driver(to_i2c_driver(hdmienc->dev->driver));
+ ret = driver->encoder_init(i2c_slave, rcdu->ddev, &renc->slave);
+ if (ret < 0)
+ goto error;
+
+ ret = drm_encoder_init(rcdu->ddev, encoder, &encoder_funcs,
+ DRM_MODE_ENCODER_TMDS);
+ if (ret < 0)
+ goto error;
+
+ drm_encoder_helper_add(encoder, &encoder_helper_funcs);
+
+ renc->hdmi = hdmienc;
+ hdmienc->renc = renc;
+
+ return 0;
+
+error:
+ put_device(hdmienc->dev);
+ return ret;
+}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.h b/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.h
new file mode 100644
index 00000000000..2ff0128ac8e
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.h
@@ -0,0 +1,35 @@
+/*
+ * R-Car Display Unit HDMI Encoder
+ *
+ * Copyright (C) 2014 Renesas Electronics Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __RCAR_DU_HDMIENC_H__
+#define __RCAR_DU_HDMIENC_H__
+
+#include <linux/module.h>
+
+struct device_node;
+struct rcar_du_device;
+struct rcar_du_encoder;
+
+#if IS_ENABLED(CONFIG_DRM_RCAR_HDMI)
+int rcar_du_hdmienc_init(struct rcar_du_device *rcdu,
+ struct rcar_du_encoder *renc, struct device_node *np);
+#else
+static inline int rcar_du_hdmienc_init(struct rcar_du_device *rcdu,
+ struct rcar_du_encoder *renc,
+ struct device_node *np)
+{
+ return -ENOSYS;
+}
+#endif
+
+#endif /* __RCAR_DU_HDMIENC_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index 6c24ad7d03e..0c5ee616b5a 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -126,9 +126,9 @@ int rcar_du_dumb_create(struct drm_file *file, struct drm_device *dev,
else
align = 16 * args->bpp / 8;
- args->pitch = roundup(max(args->pitch, min_pitch), align);
+ args->pitch = roundup(min_pitch, align);
- return drm_gem_cma_dumb_create(file, dev, args);
+ return drm_gem_cma_dumb_create_internal(file, dev, args);
}
static struct drm_framebuffer *
@@ -190,49 +190,16 @@ static const struct drm_mode_config_funcs rcar_du_mode_config_funcs = {
.output_poll_changed = rcar_du_output_poll_changed,
};
-static int rcar_du_encoders_init_pdata(struct rcar_du_device *rcdu)
-{
- unsigned int num_encoders = 0;
- unsigned int i;
- int ret;
-
- for (i = 0; i < rcdu->pdata->num_encoders; ++i) {
- const struct rcar_du_encoder_data *pdata =
- &rcdu->pdata->encoders[i];
- const struct rcar_du_output_routing *route =
- &rcdu->info->routes[pdata->output];
-
- if (pdata->type == RCAR_DU_ENCODER_UNUSED)
- continue;
-
- if (pdata->output >= RCAR_DU_OUTPUT_MAX ||
- route->possible_crtcs == 0) {
- dev_warn(rcdu->dev,
- "encoder %u references unexisting output %u, skipping\n",
- i, pdata->output);
- continue;
- }
-
- ret = rcar_du_encoder_init(rcdu, pdata->type, pdata->output,
- pdata, NULL);
- if (ret < 0)
- return ret;
-
- num_encoders++;
- }
-
- return num_encoders;
-}
-
-static int rcar_du_encoders_init_dt_one(struct rcar_du_device *rcdu,
- enum rcar_du_output output,
- struct of_endpoint *ep)
+static int rcar_du_encoders_init_one(struct rcar_du_device *rcdu,
+ enum rcar_du_output output,
+ struct of_endpoint *ep)
{
static const struct {
const char *compatible;
enum rcar_du_encoder_type type;
} encoders[] = {
{ "adi,adv7123", RCAR_DU_ENCODER_VGA },
+ { "adi,adv7511w", RCAR_DU_ENCODER_HDMI },
{ "thine,thc63lvdm83d", RCAR_DU_ENCODER_LVDS },
};
@@ -323,14 +290,14 @@ static int rcar_du_encoders_init_dt_one(struct rcar_du_device *rcdu,
connector = entity;
}
- ret = rcar_du_encoder_init(rcdu, enc_type, output, NULL, connector);
+ ret = rcar_du_encoder_init(rcdu, enc_type, output, encoder, connector);
of_node_put(encoder);
of_node_put(connector);
return ret < 0 ? ret : 1;
}
-static int rcar_du_encoders_init_dt(struct rcar_du_device *rcdu)
+static int rcar_du_encoders_init(struct rcar_du_device *rcdu)
{
struct device_node *np = rcdu->dev->of_node;
struct device_node *prev = NULL;
@@ -377,7 +344,7 @@ static int rcar_du_encoders_init_dt(struct rcar_du_device *rcdu)
}
/* Process the output pipeline. */
- ret = rcar_du_encoders_init_dt_one(rcdu, output, &ep);
+ ret = rcar_du_encoders_init_one(rcdu, output, &ep);
if (ret < 0) {
of_node_put(ep_node);
return ret;
@@ -442,11 +409,7 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu)
if (ret < 0)
return ret;
- if (rcdu->pdata)
- ret = rcar_du_encoders_init_pdata(rcdu);
- else
- ret = rcar_du_encoders_init_dt(rcdu);
-
+ ret = rcar_du_encoders_init(rcdu);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
index 115eed20db1..6d9811c052c 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
@@ -27,7 +27,11 @@
struct rcar_du_lvds_connector {
struct rcar_du_connector connector;
- struct rcar_du_panel_data panel;
+ struct {
+ unsigned int width_mm; /* Panel width in mm */
+ unsigned int height_mm; /* Panel height in mm */
+ struct videomode mode;
+ } panel;
};
#define to_rcar_lvds_connector(c) \
@@ -78,31 +82,26 @@ static const struct drm_connector_funcs connector_funcs = {
int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu,
struct rcar_du_encoder *renc,
- const struct rcar_du_panel_data *panel,
/* TODO const */ struct device_node *np)
{
+ struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(renc);
struct rcar_du_lvds_connector *lvdscon;
struct drm_connector *connector;
+ struct display_timing timing;
int ret;
lvdscon = devm_kzalloc(rcdu->dev, sizeof(*lvdscon), GFP_KERNEL);
if (lvdscon == NULL)
return -ENOMEM;
- if (panel) {
- lvdscon->panel = *panel;
- } else {
- struct display_timing timing;
-
- ret = of_get_display_timing(np, "panel-timing", &timing);
- if (ret < 0)
- return ret;
+ ret = of_get_display_timing(np, "panel-timing", &timing);
+ if (ret < 0)
+ return ret;
- videomode_from_timing(&timing, &lvdscon->panel.mode);
+ videomode_from_timing(&timing, &lvdscon->panel.mode);
- of_property_read_u32(np, "width-mm", &lvdscon->panel.width_mm);
- of_property_read_u32(np, "height-mm", &lvdscon->panel.height_mm);
- }
+ of_property_read_u32(np, "width-mm", &lvdscon->panel.width_mm);
+ of_property_read_u32(np, "height-mm", &lvdscon->panel.height_mm);
connector = &lvdscon->connector.connector;
connector->display_info.width_mm = lvdscon->panel.width_mm;
@@ -122,11 +121,11 @@ int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu,
drm_object_property_set_value(&connector->base,
rcdu->ddev->mode_config.dpms_property, DRM_MODE_DPMS_OFF);
- ret = drm_mode_connector_attach_encoder(connector, &renc->encoder);
+ ret = drm_mode_connector_attach_encoder(connector, encoder);
if (ret < 0)
return ret;
- connector->encoder = &renc->encoder;
+ connector->encoder = encoder;
lvdscon->connector.encoder = renc;
return 0;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.h b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.h
index d11424d537f..d4881ee0be7 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.h
@@ -16,11 +16,9 @@
struct rcar_du_device;
struct rcar_du_encoder;
-struct rcar_du_panel_data;
int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu,
struct rcar_du_encoder *renc,
- const struct rcar_du_panel_data *panel,
struct device_node *np);
#endif /* __RCAR_DU_LVDSCON_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h
index 3303a55cec7..f65aabda079 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h
@@ -16,7 +16,6 @@
#include <linux/io.h>
#include <linux/module.h>
-#include <linux/platform_data/rcar-du.h>
struct rcar_drm_crtc;
struct rcar_du_lvdsenc;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
index 564a723ede0..752747a5e92 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
@@ -52,6 +52,7 @@ static const struct drm_connector_funcs connector_funcs = {
int rcar_du_vga_connector_init(struct rcar_du_device *rcdu,
struct rcar_du_encoder *renc)
{
+ struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(renc);
struct rcar_du_connector *rcon;
struct drm_connector *connector;
int ret;
@@ -78,11 +79,11 @@ int rcar_du_vga_connector_init(struct rcar_du_device *rcdu,
drm_object_property_set_value(&connector->base,
rcdu->ddev->mode_config.dpms_property, DRM_MODE_DPMS_OFF);
- ret = drm_mode_connector_attach_encoder(connector, &renc->encoder);
+ ret = drm_mode_connector_attach_encoder(connector, encoder);
if (ret < 0)
return ret;
- connector->encoder = &renc->encoder;
+ connector->encoder = encoder;
rcon->encoder = renc;
return 0;
diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig
new file mode 100644
index 00000000000..ca9f085efa9
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/Kconfig
@@ -0,0 +1,17 @@
+config DRM_ROCKCHIP
+ tristate "DRM Support for Rockchip"
+ depends on DRM && ROCKCHIP_IOMMU
+ select DRM_KMS_HELPER
+ select DRM_KMS_FB_HELPER
+ select DRM_PANEL
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+ select VT_HW_CONSOLE_BINDING if FRAMEBUFFER_CONSOLE
+ select VIDEOMODE_HELPERS
+ help
+ Choose this option if you have a Rockchip soc chipset.
+ This driver provides kernel mode setting and buffer
+ management to userspace. This driver does not provide
+ 2D or 3D acceleration; acceleration is performed by other
+ IP found on the SoC.
diff --git a/drivers/gpu/drm/rockchip/Makefile b/drivers/gpu/drm/rockchip/Makefile
new file mode 100644
index 00000000000..2cb0672f57e
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for the drm device driver. This driver provides support for the
+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+
+rockchipdrm-y := rockchip_drm_drv.o rockchip_drm_fb.o rockchip_drm_fbdev.o \
+ rockchip_drm_gem.o
+
+obj-$(CONFIG_DRM_ROCKCHIP) += rockchipdrm.o rockchip_drm_vop.o
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
new file mode 100644
index 00000000000..a798c7c71f9
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -0,0 +1,551 @@
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Author:Mark Yao <mark.yao@rock-chips.com>
+ *
+ * based on exynos_drm_drv.c
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <asm/dma-iommu.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <linux/dma-mapping.h>
+#include <linux/pm_runtime.h>
+#include <linux/of_graph.h>
+#include <linux/component.h>
+
+#include "rockchip_drm_drv.h"
+#include "rockchip_drm_fb.h"
+#include "rockchip_drm_fbdev.h"
+#include "rockchip_drm_gem.h"
+
+#define DRIVER_NAME "rockchip"
+#define DRIVER_DESC "RockChip Soc DRM"
+#define DRIVER_DATE "20140818"
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 0
+
+/*
+ * Attach a (component) device to the shared drm dma mapping from master drm
+ * device. This is used by the VOPs to map GEM buffers to a common DMA
+ * mapping.
+ */
+int rockchip_drm_dma_attach_device(struct drm_device *drm_dev,
+ struct device *dev)
+{
+ struct dma_iommu_mapping *mapping = drm_dev->dev->archdata.mapping;
+ int ret;
+
+ ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
+ dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
+
+ return arm_iommu_attach_device(dev, mapping);
+}
+EXPORT_SYMBOL_GPL(rockchip_drm_dma_attach_device);
+
+void rockchip_drm_dma_detach_device(struct drm_device *drm_dev,
+ struct device *dev)
+{
+ arm_iommu_detach_device(dev);
+}
+EXPORT_SYMBOL_GPL(rockchip_drm_dma_detach_device);
+
+int rockchip_register_crtc_funcs(struct drm_device *dev,
+ const struct rockchip_crtc_funcs *crtc_funcs,
+ int pipe)
+{
+ struct rockchip_drm_private *priv = dev->dev_private;
+
+ if (pipe > ROCKCHIP_MAX_CRTC)
+ return -EINVAL;
+
+ priv->crtc_funcs[pipe] = crtc_funcs;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rockchip_register_crtc_funcs);
+
+void rockchip_unregister_crtc_funcs(struct drm_device *dev, int pipe)
+{
+ struct rockchip_drm_private *priv = dev->dev_private;
+
+ if (pipe > ROCKCHIP_MAX_CRTC)
+ return;
+
+ priv->crtc_funcs[pipe] = NULL;
+}
+EXPORT_SYMBOL_GPL(rockchip_unregister_crtc_funcs);
+
+static struct drm_crtc *rockchip_crtc_from_pipe(struct drm_device *drm,
+ int pipe)
+{
+ struct drm_crtc *crtc;
+ int i = 0;
+
+ list_for_each_entry(crtc, &drm->mode_config.crtc_list, head)
+ if (i++ == pipe)
+ return crtc;
+
+ return NULL;
+}
+
+static int rockchip_drm_crtc_enable_vblank(struct drm_device *dev, int pipe)
+{
+ struct rockchip_drm_private *priv = dev->dev_private;
+ struct drm_crtc *crtc = rockchip_crtc_from_pipe(dev, pipe);
+
+ if (crtc && priv->crtc_funcs[pipe] &&
+ priv->crtc_funcs[pipe]->enable_vblank)
+ return priv->crtc_funcs[pipe]->enable_vblank(crtc);
+
+ return 0;
+}
+
+static void rockchip_drm_crtc_disable_vblank(struct drm_device *dev, int pipe)
+{
+ struct rockchip_drm_private *priv = dev->dev_private;
+ struct drm_crtc *crtc = rockchip_crtc_from_pipe(dev, pipe);
+
+ if (crtc && priv->crtc_funcs[pipe] &&
+ priv->crtc_funcs[pipe]->enable_vblank)
+ priv->crtc_funcs[pipe]->disable_vblank(crtc);
+}
+
+static int rockchip_drm_load(struct drm_device *drm_dev, unsigned long flags)
+{
+ struct rockchip_drm_private *private;
+ struct dma_iommu_mapping *mapping;
+ struct device *dev = drm_dev->dev;
+ int ret;
+
+ private = devm_kzalloc(drm_dev->dev, sizeof(*private), GFP_KERNEL);
+ if (!private)
+ return -ENOMEM;
+
+ drm_dev->dev_private = private;
+
+ drm_mode_config_init(drm_dev);
+
+ rockchip_drm_mode_config_init(drm_dev);
+
+ dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms),
+ GFP_KERNEL);
+ if (!dev->dma_parms) {
+ ret = -ENOMEM;
+ goto err_config_cleanup;
+ }
+
+ /* TODO(djkurtz): fetch the mapping start/size from somewhere */
+ mapping = arm_iommu_create_mapping(&platform_bus_type, 0x00000000,
+ SZ_2G);
+ if (IS_ERR(mapping)) {
+ ret = PTR_ERR(mapping);
+ goto err_config_cleanup;
+ }
+
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (ret)
+ goto err_release_mapping;
+
+ dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
+
+ ret = arm_iommu_attach_device(dev, mapping);
+ if (ret)
+ goto err_release_mapping;
+
+ /* Try to bind all sub drivers. */
+ ret = component_bind_all(dev, drm_dev);
+ if (ret)
+ goto err_detach_device;
+
+ /* init kms poll for handling hpd */
+ drm_kms_helper_poll_init(drm_dev);
+
+ /*
+ * enable drm irq mode.
+ * - with irq_enabled = true, we can use the vblank feature.
+ */
+ drm_dev->irq_enabled = true;
+
+ ret = drm_vblank_init(drm_dev, ROCKCHIP_MAX_CRTC);
+ if (ret)
+ goto err_kms_helper_poll_fini;
+
+ /*
+ * with vblank_disable_allowed = true, vblank interrupt will be disabled
+ * by drm timer once a current process gives up ownership of
+ * vblank event.(after drm_vblank_put function is called)
+ */
+ drm_dev->vblank_disable_allowed = true;
+
+ ret = rockchip_drm_fbdev_init(drm_dev);
+ if (ret)
+ goto err_vblank_cleanup;
+
+ return 0;
+err_vblank_cleanup:
+ drm_vblank_cleanup(drm_dev);
+err_kms_helper_poll_fini:
+ drm_kms_helper_poll_fini(drm_dev);
+ component_unbind_all(dev, drm_dev);
+err_detach_device:
+ arm_iommu_detach_device(dev);
+err_release_mapping:
+ arm_iommu_release_mapping(dev->archdata.mapping);
+err_config_cleanup:
+ drm_mode_config_cleanup(drm_dev);
+ drm_dev->dev_private = NULL;
+ return ret;
+}
+
+static int rockchip_drm_unload(struct drm_device *drm_dev)
+{
+ struct device *dev = drm_dev->dev;
+
+ rockchip_drm_fbdev_fini(drm_dev);
+ drm_vblank_cleanup(drm_dev);
+ drm_kms_helper_poll_fini(drm_dev);
+ component_unbind_all(dev, drm_dev);
+ arm_iommu_detach_device(dev);
+ arm_iommu_release_mapping(dev->archdata.mapping);
+ drm_mode_config_cleanup(drm_dev);
+ drm_dev->dev_private = NULL;
+
+ return 0;
+}
+
+void rockchip_drm_lastclose(struct drm_device *dev)
+{
+ struct rockchip_drm_private *priv = dev->dev_private;
+
+ drm_fb_helper_restore_fbdev_mode_unlocked(&priv->fbdev_helper);
+}
+
+static const struct file_operations rockchip_drm_driver_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .mmap = rockchip_gem_mmap,
+ .poll = drm_poll,
+ .read = drm_read,
+ .unlocked_ioctl = drm_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = drm_compat_ioctl,
+#endif
+ .release = drm_release,
+};
+
+const struct vm_operations_struct rockchip_drm_vm_ops = {
+ .open = drm_gem_vm_open,
+ .close = drm_gem_vm_close,
+};
+
+static struct drm_driver rockchip_drm_driver = {
+ .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
+ .load = rockchip_drm_load,
+ .unload = rockchip_drm_unload,
+ .lastclose = rockchip_drm_lastclose,
+ .get_vblank_counter = drm_vblank_count,
+ .enable_vblank = rockchip_drm_crtc_enable_vblank,
+ .disable_vblank = rockchip_drm_crtc_disable_vblank,
+ .gem_vm_ops = &rockchip_drm_vm_ops,
+ .gem_free_object = rockchip_gem_free_object,
+ .dumb_create = rockchip_gem_dumb_create,
+ .dumb_map_offset = rockchip_gem_dumb_map_offset,
+ .dumb_destroy = drm_gem_dumb_destroy,
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_import = drm_gem_prime_import,
+ .gem_prime_export = drm_gem_prime_export,
+ .gem_prime_get_sg_table = rockchip_gem_prime_get_sg_table,
+ .gem_prime_vmap = rockchip_gem_prime_vmap,
+ .gem_prime_vunmap = rockchip_gem_prime_vunmap,
+ .gem_prime_mmap = rockchip_gem_mmap_buf,
+ .fops = &rockchip_drm_driver_fops,
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+};
+
+#ifdef CONFIG_PM_SLEEP
+static int rockchip_drm_sys_suspend(struct device *dev)
+{
+ struct drm_device *drm = dev_get_drvdata(dev);
+ struct drm_connector *connector;
+
+ if (!drm)
+ return 0;
+
+ drm_modeset_lock_all(drm);
+ list_for_each_entry(connector, &drm->mode_config.connector_list, head) {
+ int old_dpms = connector->dpms;
+
+ if (connector->funcs->dpms)
+ connector->funcs->dpms(connector, DRM_MODE_DPMS_OFF);
+
+ /* Set the old mode back to the connector for resume */
+ connector->dpms = old_dpms;
+ }
+ drm_modeset_unlock_all(drm);
+
+ return 0;
+}
+
+static int rockchip_drm_sys_resume(struct device *dev)
+{
+ struct drm_device *drm = dev_get_drvdata(dev);
+ struct drm_connector *connector;
+ enum drm_connector_status status;
+ bool changed = false;
+
+ if (!drm)
+ return 0;
+
+ drm_modeset_lock_all(drm);
+ list_for_each_entry(connector, &drm->mode_config.connector_list, head) {
+ int desired_mode = connector->dpms;
+
+ /*
+ * at suspend time, we save dpms to connector->dpms,
+ * restore the old_dpms, and at current time, the connector
+ * dpms status must be DRM_MODE_DPMS_OFF.
+ */
+ connector->dpms = DRM_MODE_DPMS_OFF;
+
+ /*
+ * If the connector has been disconnected during suspend,
+ * disconnect it from the encoder and leave it off. We'll notify
+ * userspace at the end.
+ */
+ if (desired_mode == DRM_MODE_DPMS_ON) {
+ status = connector->funcs->detect(connector, true);
+ if (status == connector_status_disconnected) {
+ connector->encoder = NULL;
+ connector->status = status;
+ changed = true;
+ continue;
+ }
+ }
+ if (connector->funcs->dpms)
+ connector->funcs->dpms(connector, desired_mode);
+ }
+ drm_modeset_unlock_all(drm);
+
+ drm_helper_resume_force_mode(drm);
+
+ if (changed)
+ drm_kms_helper_hotplug_event(drm);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops rockchip_drm_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(rockchip_drm_sys_suspend,
+ rockchip_drm_sys_resume)
+};
+
+/*
+ * @node: device tree node containing encoder input ports
+ * @encoder: drm_encoder
+ */
+int rockchip_drm_encoder_get_mux_id(struct device_node *node,
+ struct drm_encoder *encoder)
+{
+ struct device_node *ep = NULL;
+ struct drm_crtc *crtc = encoder->crtc;
+ struct of_endpoint endpoint;
+ struct device_node *port;
+ int ret;
+
+ if (!node || !crtc)
+ return -EINVAL;
+
+ do {
+ ep = of_graph_get_next_endpoint(node, ep);
+ if (!ep)
+ break;
+
+ port = of_graph_get_remote_port(ep);
+ of_node_put(port);
+ if (port == crtc->port) {
+ ret = of_graph_parse_endpoint(ep, &endpoint);
+ return ret ?: endpoint.id;
+ }
+ } while (ep);
+
+ return -EINVAL;
+}
+
+static int compare_of(struct device *dev, void *data)
+{
+ struct device_node *np = data;
+
+ return dev->of_node == np;
+}
+
+static void rockchip_add_endpoints(struct device *dev,
+ struct component_match **match,
+ struct device_node *port)
+{
+ struct device_node *ep, *remote;
+
+ for_each_child_of_node(port, ep) {
+ remote = of_graph_get_remote_port_parent(ep);
+ if (!remote || !of_device_is_available(remote)) {
+ of_node_put(remote);
+ continue;
+ } else if (!of_device_is_available(remote->parent)) {
+ dev_warn(dev, "parent device of %s is not available\n",
+ remote->full_name);
+ of_node_put(remote);
+ continue;
+ }
+
+ component_match_add(dev, match, compare_of, remote);
+ of_node_put(remote);
+ }
+}
+
+static int rockchip_drm_bind(struct device *dev)
+{
+ struct drm_device *drm;
+ int ret;
+
+ drm = drm_dev_alloc(&rockchip_drm_driver, dev);
+ if (!drm)
+ return -ENOMEM;
+
+ ret = drm_dev_set_unique(drm, "%s", dev_name(dev));
+ if (ret)
+ goto err_free;
+
+ ret = drm_dev_register(drm, 0);
+ if (ret)
+ goto err_free;
+
+ dev_set_drvdata(dev, drm);
+
+ return 0;
+
+err_free:
+ drm_dev_unref(drm);
+ return ret;
+}
+
+static void rockchip_drm_unbind(struct device *dev)
+{
+ struct drm_device *drm = dev_get_drvdata(dev);
+
+ drm_dev_unregister(drm);
+ drm_dev_unref(drm);
+ dev_set_drvdata(dev, NULL);
+}
+
+static const struct component_master_ops rockchip_drm_ops = {
+ .bind = rockchip_drm_bind,
+ .unbind = rockchip_drm_unbind,
+};
+
+static int rockchip_drm_platform_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct component_match *match = NULL;
+ struct device_node *np = dev->of_node;
+ struct device_node *port;
+ int i;
+
+ if (!np)
+ return -ENODEV;
+ /*
+ * Bind the crtc ports first, so that
+ * drm_of_find_possible_crtcs called from encoder .bind callbacks
+ * works as expected.
+ */
+ for (i = 0;; i++) {
+ port = of_parse_phandle(np, "ports", i);
+ if (!port)
+ break;
+
+ if (!of_device_is_available(port->parent)) {
+ of_node_put(port);
+ continue;
+ }
+
+ component_match_add(dev, &match, compare_of, port->parent);
+ of_node_put(port);
+ }
+
+ if (i == 0) {
+ dev_err(dev, "missing 'ports' property\n");
+ return -ENODEV;
+ }
+
+ if (!match) {
+ dev_err(dev, "No available vop found for display-subsystem.\n");
+ return -ENODEV;
+ }
+ /*
+ * For each bound crtc, bind the encoders attached to its
+ * remote endpoint.
+ */
+ for (i = 0;; i++) {
+ port = of_parse_phandle(np, "ports", i);
+ if (!port)
+ break;
+
+ if (!of_device_is_available(port->parent)) {
+ of_node_put(port);
+ continue;
+ }
+
+ rockchip_add_endpoints(dev, &match, port);
+ of_node_put(port);
+ }
+
+ return component_master_add_with_match(dev, &rockchip_drm_ops, match);
+}
+
+static int rockchip_drm_platform_remove(struct platform_device *pdev)
+{
+ component_master_del(&pdev->dev, &rockchip_drm_ops);
+
+ return 0;
+}
+
+static const struct of_device_id rockchip_drm_dt_ids[] = {
+ { .compatible = "rockchip,display-subsystem", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, rockchip_drm_dt_ids);
+
+static struct platform_driver rockchip_drm_platform_driver = {
+ .probe = rockchip_drm_platform_probe,
+ .remove = rockchip_drm_platform_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "rockchip-drm",
+ .of_match_table = rockchip_drm_dt_ids,
+ .pm = &rockchip_drm_pm_ops,
+ },
+};
+
+module_platform_driver(rockchip_drm_platform_driver);
+
+MODULE_AUTHOR("Mark Yao <mark.yao@rock-chips.com>");
+MODULE_DESCRIPTION("ROCKCHIP DRM Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
new file mode 100644
index 00000000000..dc4e5f03ac7
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Author:Mark Yao <mark.yao@rock-chips.com>
+ *
+ * based on exynos_drm_drv.h
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ROCKCHIP_DRM_DRV_H
+#define _ROCKCHIP_DRM_DRV_H
+
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem.h>
+
+#include <linux/module.h>
+#include <linux/component.h>
+
+#define ROCKCHIP_MAX_FB_BUFFER 3
+#define ROCKCHIP_MAX_CONNECTOR 2
+#define ROCKCHIP_MAX_CRTC 2
+
+struct drm_device;
+struct drm_connector;
+
+/*
+ * Rockchip drm private crtc funcs.
+ * @enable_vblank: enable crtc vblank irq.
+ * @disable_vblank: disable crtc vblank irq.
+ */
+struct rockchip_crtc_funcs {
+ int (*enable_vblank)(struct drm_crtc *crtc);
+ void (*disable_vblank)(struct drm_crtc *crtc);
+};
+
+/*
+ * Rockchip drm private structure.
+ *
+ * @crtc: array of enabled CRTCs, used to map from "pipe" to drm_crtc.
+ * @num_pipe: number of pipes for this device.
+ */
+struct rockchip_drm_private {
+ struct drm_fb_helper fbdev_helper;
+ struct drm_gem_object *fbdev_bo;
+ const struct rockchip_crtc_funcs *crtc_funcs[ROCKCHIP_MAX_CRTC];
+};
+
+int rockchip_register_crtc_funcs(struct drm_device *dev,
+ const struct rockchip_crtc_funcs *crtc_funcs,
+ int pipe);
+void rockchip_unregister_crtc_funcs(struct drm_device *dev, int pipe);
+int rockchip_drm_encoder_get_mux_id(struct device_node *node,
+ struct drm_encoder *encoder);
+int rockchip_drm_crtc_mode_config(struct drm_crtc *crtc, int connector_type,
+ int out_mode);
+int rockchip_drm_dma_attach_device(struct drm_device *drm_dev,
+ struct device *dev);
+void rockchip_drm_dma_detach_device(struct drm_device *drm_dev,
+ struct device *dev);
+
+#endif /* _ROCKCHIP_DRM_DRV_H_ */
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
new file mode 100644
index 00000000000..77d52893d40
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
@@ -0,0 +1,201 @@
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Author:Mark Yao <mark.yao@rock-chips.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <drm/drm.h>
+#include <drm/drmP.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "rockchip_drm_drv.h"
+#include "rockchip_drm_gem.h"
+
+#define to_rockchip_fb(x) container_of(x, struct rockchip_drm_fb, fb)
+
+struct rockchip_drm_fb {
+ struct drm_framebuffer fb;
+ struct drm_gem_object *obj[ROCKCHIP_MAX_FB_BUFFER];
+};
+
+struct drm_gem_object *rockchip_fb_get_gem_obj(struct drm_framebuffer *fb,
+ unsigned int plane)
+{
+ struct rockchip_drm_fb *rk_fb = to_rockchip_fb(fb);
+
+ if (plane >= ROCKCHIP_MAX_FB_BUFFER)
+ return NULL;
+
+ return rk_fb->obj[plane];
+}
+EXPORT_SYMBOL_GPL(rockchip_fb_get_gem_obj);
+
+static void rockchip_drm_fb_destroy(struct drm_framebuffer *fb)
+{
+ struct rockchip_drm_fb *rockchip_fb = to_rockchip_fb(fb);
+ struct drm_gem_object *obj;
+ int i;
+
+ for (i = 0; i < ROCKCHIP_MAX_FB_BUFFER; i++) {
+ obj = rockchip_fb->obj[i];
+ if (obj)
+ drm_gem_object_unreference_unlocked(obj);
+ }
+
+ drm_framebuffer_cleanup(fb);
+ kfree(rockchip_fb);
+}
+
+static int rockchip_drm_fb_create_handle(struct drm_framebuffer *fb,
+ struct drm_file *file_priv,
+ unsigned int *handle)
+{
+ struct rockchip_drm_fb *rockchip_fb = to_rockchip_fb(fb);
+
+ return drm_gem_handle_create(file_priv,
+ rockchip_fb->obj[0], handle);
+}
+
+static struct drm_framebuffer_funcs rockchip_drm_fb_funcs = {
+ .destroy = rockchip_drm_fb_destroy,
+ .create_handle = rockchip_drm_fb_create_handle,
+};
+
+static struct rockchip_drm_fb *
+rockchip_fb_alloc(struct drm_device *dev, struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object **obj, unsigned int num_planes)
+{
+ struct rockchip_drm_fb *rockchip_fb;
+ int ret;
+ int i;
+
+ rockchip_fb = kzalloc(sizeof(*rockchip_fb), GFP_KERNEL);
+ if (!rockchip_fb)
+ return ERR_PTR(-ENOMEM);
+
+ drm_helper_mode_fill_fb_struct(&rockchip_fb->fb, mode_cmd);
+
+ for (i = 0; i < num_planes; i++)
+ rockchip_fb->obj[i] = obj[i];
+
+ ret = drm_framebuffer_init(dev, &rockchip_fb->fb,
+ &rockchip_drm_fb_funcs);
+ if (ret) {
+ dev_err(dev->dev, "Failed to initialize framebuffer: %d\n",
+ ret);
+ kfree(rockchip_fb);
+ return ERR_PTR(ret);
+ }
+
+ return rockchip_fb;
+}
+
+static struct drm_framebuffer *
+rockchip_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
+ struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ struct rockchip_drm_fb *rockchip_fb;
+ struct drm_gem_object *objs[ROCKCHIP_MAX_FB_BUFFER];
+ struct drm_gem_object *obj;
+ unsigned int hsub;
+ unsigned int vsub;
+ int num_planes;
+ int ret;
+ int i;
+
+ hsub = drm_format_horz_chroma_subsampling(mode_cmd->pixel_format);
+ vsub = drm_format_vert_chroma_subsampling(mode_cmd->pixel_format);
+ num_planes = min(drm_format_num_planes(mode_cmd->pixel_format),
+ ROCKCHIP_MAX_FB_BUFFER);
+
+ for (i = 0; i < num_planes; i++) {
+ unsigned int width = mode_cmd->width / (i ? hsub : 1);
+ unsigned int height = mode_cmd->height / (i ? vsub : 1);
+ unsigned int min_size;
+
+ obj = drm_gem_object_lookup(dev, file_priv,
+ mode_cmd->handles[i]);
+ if (!obj) {
+ dev_err(dev->dev, "Failed to lookup GEM object\n");
+ ret = -ENXIO;
+ goto err_gem_object_unreference;
+ }
+
+ min_size = (height - 1) * mode_cmd->pitches[i] +
+ mode_cmd->offsets[i] +
+ width * drm_format_plane_cpp(mode_cmd->pixel_format, i);
+
+ if (obj->size < min_size) {
+ drm_gem_object_unreference_unlocked(obj);
+ ret = -EINVAL;
+ goto err_gem_object_unreference;
+ }
+ objs[i] = obj;
+ }
+
+ rockchip_fb = rockchip_fb_alloc(dev, mode_cmd, objs, i);
+ if (IS_ERR(rockchip_fb)) {
+ ret = PTR_ERR(rockchip_fb);
+ goto err_gem_object_unreference;
+ }
+
+ return &rockchip_fb->fb;
+
+err_gem_object_unreference:
+ for (i--; i >= 0; i--)
+ drm_gem_object_unreference_unlocked(objs[i]);
+ return ERR_PTR(ret);
+}
+
+static void rockchip_drm_output_poll_changed(struct drm_device *dev)
+{
+ struct rockchip_drm_private *private = dev->dev_private;
+ struct drm_fb_helper *fb_helper = &private->fbdev_helper;
+
+ drm_fb_helper_hotplug_event(fb_helper);
+}
+
+static const struct drm_mode_config_funcs rockchip_drm_mode_config_funcs = {
+ .fb_create = rockchip_user_fb_create,
+ .output_poll_changed = rockchip_drm_output_poll_changed,
+};
+
+struct drm_framebuffer *
+rockchip_drm_framebuffer_init(struct drm_device *dev,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj)
+{
+ struct rockchip_drm_fb *rockchip_fb;
+
+ rockchip_fb = rockchip_fb_alloc(dev, mode_cmd, &obj, 1);
+ if (IS_ERR(rockchip_fb))
+ return NULL;
+
+ return &rockchip_fb->fb;
+}
+
+void rockchip_drm_mode_config_init(struct drm_device *dev)
+{
+ dev->mode_config.min_width = 0;
+ dev->mode_config.min_height = 0;
+
+ /*
+ * set max width and height as default value(4096x4096).
+ * this value would be used to check framebuffer size limitation
+ * at drm_mode_addfb().
+ */
+ dev->mode_config.max_width = 4096;
+ dev->mode_config.max_height = 4096;
+
+ dev->mode_config.funcs = &rockchip_drm_mode_config_funcs;
+}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.h b/drivers/gpu/drm/rockchip/rockchip_drm_fb.h
new file mode 100644
index 00000000000..09574d48226
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Author:Mark Yao <mark.yao@rock-chips.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ROCKCHIP_DRM_FB_H
+#define _ROCKCHIP_DRM_FB_H
+
+struct drm_framebuffer *
+rockchip_drm_framebuffer_init(struct drm_device *dev,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj);
+void rockchip_drm_framebuffer_fini(struct drm_framebuffer *fb);
+
+void rockchip_drm_mode_config_init(struct drm_device *dev);
+
+struct drm_gem_object *rockchip_fb_get_gem_obj(struct drm_framebuffer *fb,
+ unsigned int plane);
+#endif /* _ROCKCHIP_DRM_FB_H */
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
new file mode 100644
index 00000000000..a5d889a8716
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
@@ -0,0 +1,210 @@
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Author:Mark Yao <mark.yao@rock-chips.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drm.h>
+#include <drm/drmP.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "rockchip_drm_drv.h"
+#include "rockchip_drm_gem.h"
+#include "rockchip_drm_fb.h"
+
+#define PREFERRED_BPP 32
+#define to_drm_private(x) \
+ container_of(x, struct rockchip_drm_private, fbdev_helper)
+
+static int rockchip_fbdev_mmap(struct fb_info *info,
+ struct vm_area_struct *vma)
+{
+ struct drm_fb_helper *helper = info->par;
+ struct rockchip_drm_private *private = to_drm_private(helper);
+
+ return rockchip_gem_mmap_buf(private->fbdev_bo, vma);
+}
+
+static struct fb_ops rockchip_drm_fbdev_ops = {
+ .owner = THIS_MODULE,
+ .fb_mmap = rockchip_fbdev_mmap,
+ .fb_fillrect = cfb_fillrect,
+ .fb_copyarea = cfb_copyarea,
+ .fb_imageblit = cfb_imageblit,
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = drm_fb_helper_set_par,
+ .fb_blank = drm_fb_helper_blank,
+ .fb_pan_display = drm_fb_helper_pan_display,
+ .fb_setcmap = drm_fb_helper_setcmap,
+};
+
+static int rockchip_drm_fbdev_create(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct rockchip_drm_private *private = to_drm_private(helper);
+ struct drm_mode_fb_cmd2 mode_cmd = { 0 };
+ struct drm_device *dev = helper->dev;
+ struct rockchip_gem_object *rk_obj;
+ struct drm_framebuffer *fb;
+ unsigned int bytes_per_pixel;
+ unsigned long offset;
+ struct fb_info *fbi;
+ size_t size;
+ int ret;
+
+ bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8);
+
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height;
+ mode_cmd.pitches[0] = sizes->surface_width * bytes_per_pixel;
+ mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+ sizes->surface_depth);
+
+ size = mode_cmd.pitches[0] * mode_cmd.height;
+
+ rk_obj = rockchip_gem_create_object(dev, size);
+ if (IS_ERR(rk_obj))
+ return -ENOMEM;
+
+ private->fbdev_bo = &rk_obj->base;
+
+ fbi = framebuffer_alloc(0, dev->dev);
+ if (!fbi) {
+ dev_err(dev->dev, "Failed to allocate framebuffer info.\n");
+ ret = -ENOMEM;
+ goto err_rockchip_gem_free_object;
+ }
+
+ helper->fb = rockchip_drm_framebuffer_init(dev, &mode_cmd,
+ private->fbdev_bo);
+ if (IS_ERR(helper->fb)) {
+ dev_err(dev->dev, "Failed to allocate DRM framebuffer.\n");
+ ret = PTR_ERR(helper->fb);
+ goto err_framebuffer_release;
+ }
+
+ helper->fbdev = fbi;
+
+ fbi->par = helper;
+ fbi->flags = FBINFO_FLAG_DEFAULT;
+ fbi->fbops = &rockchip_drm_fbdev_ops;
+
+ ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
+ if (ret) {
+ dev_err(dev->dev, "Failed to allocate color map.\n");
+ goto err_drm_framebuffer_unref;
+ }
+
+ fb = helper->fb;
+ drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
+ drm_fb_helper_fill_var(fbi, helper, fb->width, fb->height);
+
+ offset = fbi->var.xoffset * bytes_per_pixel;
+ offset += fbi->var.yoffset * fb->pitches[0];
+
+ dev->mode_config.fb_base = 0;
+ fbi->screen_base = rk_obj->kvaddr + offset;
+ fbi->screen_size = rk_obj->base.size;
+ fbi->fix.smem_len = rk_obj->base.size;
+
+ DRM_DEBUG_KMS("FB [%dx%d]-%d kvaddr=%p offset=%ld size=%d\n",
+ fb->width, fb->height, fb->depth, rk_obj->kvaddr,
+ offset, size);
+ return 0;
+
+err_drm_framebuffer_unref:
+ drm_framebuffer_unreference(helper->fb);
+err_framebuffer_release:
+ framebuffer_release(fbi);
+err_rockchip_gem_free_object:
+ rockchip_gem_free_object(&rk_obj->base);
+ return ret;
+}
+
+static const struct drm_fb_helper_funcs rockchip_drm_fb_helper_funcs = {
+ .fb_probe = rockchip_drm_fbdev_create,
+};
+
+int rockchip_drm_fbdev_init(struct drm_device *dev)
+{
+ struct rockchip_drm_private *private = dev->dev_private;
+ struct drm_fb_helper *helper;
+ unsigned int num_crtc;
+ int ret;
+
+ if (!dev->mode_config.num_crtc || !dev->mode_config.num_connector)
+ return -EINVAL;
+
+ num_crtc = dev->mode_config.num_crtc;
+
+ helper = &private->fbdev_helper;
+
+ drm_fb_helper_prepare(dev, helper, &rockchip_drm_fb_helper_funcs);
+
+ ret = drm_fb_helper_init(dev, helper, num_crtc, ROCKCHIP_MAX_CONNECTOR);
+ if (ret < 0) {
+ dev_err(dev->dev, "Failed to initialize drm fb helper - %d.\n",
+ ret);
+ return ret;
+ }
+
+ ret = drm_fb_helper_single_add_all_connectors(helper);
+ if (ret < 0) {
+ dev_err(dev->dev, "Failed to add connectors - %d.\n", ret);
+ goto err_drm_fb_helper_fini;
+ }
+
+ /* disable all the possible outputs/crtcs before entering KMS mode */
+ drm_helper_disable_unused_functions(dev);
+
+ ret = drm_fb_helper_initial_config(helper, PREFERRED_BPP);
+ if (ret < 0) {
+ dev_err(dev->dev, "Failed to set initial hw config - %d.\n",
+ ret);
+ goto err_drm_fb_helper_fini;
+ }
+
+ return 0;
+
+err_drm_fb_helper_fini:
+ drm_fb_helper_fini(helper);
+ return ret;
+}
+
+void rockchip_drm_fbdev_fini(struct drm_device *dev)
+{
+ struct rockchip_drm_private *private = dev->dev_private;
+ struct drm_fb_helper *helper;
+
+ helper = &private->fbdev_helper;
+
+ if (helper->fbdev) {
+ struct fb_info *info;
+ int ret;
+
+ info = helper->fbdev;
+ ret = unregister_framebuffer(info);
+ if (ret < 0)
+ DRM_DEBUG_KMS("failed unregister_framebuffer() - %d\n",
+ ret);
+
+ if (info->cmap.len)
+ fb_dealloc_cmap(&info->cmap);
+
+ framebuffer_release(info);
+ }
+
+ if (helper->fb)
+ drm_framebuffer_unreference(helper->fb);
+
+ drm_fb_helper_fini(helper);
+}
diff --git a/drivers/staging/android/binder.h b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.h
index eb0834656df..50432e9b5b3 100644
--- a/drivers/staging/android/binder.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.h
@@ -1,10 +1,6 @@
/*
- * Copyright (C) 2008 Google, Inc.
- *
- * Based on, but no longer compatible with, the original
- * OpenBinder.org binder driver interface, which is:
- *
- * Copyright (c) 2005 Palmsource, Inc.
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Author:Mark Yao <mark.yao@rock-chips.com>
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -14,17 +10,12 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
*/
-#ifndef _LINUX_BINDER_H
-#define _LINUX_BINDER_H
-
-#ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
-#define BINDER_IPC_32BIT 1
-#endif
-
-#include "uapi/binder.h"
+#ifndef _ROCKCHIP_DRM_FBDEV_H
+#define _ROCKCHIP_DRM_FBDEV_H
-#endif /* _LINUX_BINDER_H */
+int rockchip_drm_fbdev_init(struct drm_device *dev);
+void rockchip_drm_fbdev_fini(struct drm_device *dev);
+#endif /* _ROCKCHIP_DRM_FBDEV_H */
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
new file mode 100644
index 00000000000..bc98a227dc7
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
@@ -0,0 +1,294 @@
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Author:Mark Yao <mark.yao@rock-chips.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drm.h>
+#include <drm/drmP.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_vma_manager.h>
+
+#include <linux/dma-attrs.h>
+
+#include "rockchip_drm_drv.h"
+#include "rockchip_drm_gem.h"
+
+static int rockchip_gem_alloc_buf(struct rockchip_gem_object *rk_obj)
+{
+ struct drm_gem_object *obj = &rk_obj->base;
+ struct drm_device *drm = obj->dev;
+
+ init_dma_attrs(&rk_obj->dma_attrs);
+ dma_set_attr(DMA_ATTR_WRITE_COMBINE, &rk_obj->dma_attrs);
+
+ /* TODO(djkurtz): Use DMA_ATTR_NO_KERNEL_MAPPING except for fbdev */
+ rk_obj->kvaddr = dma_alloc_attrs(drm->dev, obj->size,
+ &rk_obj->dma_addr, GFP_KERNEL,
+ &rk_obj->dma_attrs);
+ if (IS_ERR(rk_obj->kvaddr)) {
+ int ret = PTR_ERR(rk_obj->kvaddr);
+
+ DRM_ERROR("failed to allocate %#x byte dma buffer, %d",
+ obj->size, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void rockchip_gem_free_buf(struct rockchip_gem_object *rk_obj)
+{
+ struct drm_gem_object *obj = &rk_obj->base;
+ struct drm_device *drm = obj->dev;
+
+ dma_free_attrs(drm->dev, obj->size, rk_obj->kvaddr, rk_obj->dma_addr,
+ &rk_obj->dma_attrs);
+}
+
+int rockchip_gem_mmap_buf(struct drm_gem_object *obj,
+ struct vm_area_struct *vma)
+{
+ struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
+ struct drm_device *drm = obj->dev;
+ unsigned long vm_size;
+
+ vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+ vm_size = vma->vm_end - vma->vm_start;
+
+ if (vm_size > obj->size)
+ return -EINVAL;
+
+ return dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr,
+ obj->size, &rk_obj->dma_attrs);
+}
+
+/* drm driver mmap file operations */
+int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->minor->dev;
+ struct drm_gem_object *obj;
+ struct drm_vma_offset_node *node;
+ int ret;
+
+ if (drm_device_is_unplugged(dev))
+ return -ENODEV;
+
+ mutex_lock(&dev->struct_mutex);
+
+ node = drm_vma_offset_exact_lookup(dev->vma_offset_manager,
+ vma->vm_pgoff,
+ vma_pages(vma));
+ if (!node) {
+ mutex_unlock(&dev->struct_mutex);
+ DRM_ERROR("failed to find vma node.\n");
+ return -EINVAL;
+ } else if (!drm_vma_node_is_allowed(node, filp)) {
+ mutex_unlock(&dev->struct_mutex);
+ return -EACCES;
+ }
+
+ obj = container_of(node, struct drm_gem_object, vma_node);
+ ret = rockchip_gem_mmap_buf(obj, vma);
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
+}
+
+struct rockchip_gem_object *
+ rockchip_gem_create_object(struct drm_device *drm, unsigned int size)
+{
+ struct rockchip_gem_object *rk_obj;
+ struct drm_gem_object *obj;
+ int ret;
+
+ size = round_up(size, PAGE_SIZE);
+
+ rk_obj = kzalloc(sizeof(*rk_obj), GFP_KERNEL);
+ if (!rk_obj)
+ return ERR_PTR(-ENOMEM);
+
+ obj = &rk_obj->base;
+
+ drm_gem_private_object_init(drm, obj, size);
+
+ ret = rockchip_gem_alloc_buf(rk_obj);
+ if (ret)
+ goto err_free_rk_obj;
+
+ return rk_obj;
+
+err_free_rk_obj:
+ kfree(rk_obj);
+ return ERR_PTR(ret);
+}
+
+/*
+ * rockchip_gem_free_object - (struct drm_driver)->gem_free_object callback
+ * function
+ */
+void rockchip_gem_free_object(struct drm_gem_object *obj)
+{
+ struct rockchip_gem_object *rk_obj;
+
+ drm_gem_free_mmap_offset(obj);
+
+ rk_obj = to_rockchip_obj(obj);
+
+ rockchip_gem_free_buf(rk_obj);
+
+ kfree(rk_obj);
+}
+
+/*
+ * rockchip_gem_create_with_handle - allocate an object with the given
+ * size and create a gem handle on it
+ *
+ * returns a struct rockchip_gem_object* on success or ERR_PTR values
+ * on failure.
+ */
+static struct rockchip_gem_object *
+rockchip_gem_create_with_handle(struct drm_file *file_priv,
+ struct drm_device *drm, unsigned int size,
+ unsigned int *handle)
+{
+ struct rockchip_gem_object *rk_obj;
+ struct drm_gem_object *obj;
+ int ret;
+
+ rk_obj = rockchip_gem_create_object(drm, size);
+ if (IS_ERR(rk_obj))
+ return ERR_CAST(rk_obj);
+
+ obj = &rk_obj->base;
+
+ /*
+ * allocate a id of idr table where the obj is registered
+ * and handle has the id what user can see.
+ */
+ ret = drm_gem_handle_create(file_priv, obj, handle);
+ if (ret)
+ goto err_handle_create;
+
+ /* drop reference from allocate - handle holds it now. */
+ drm_gem_object_unreference_unlocked(obj);
+
+ return rk_obj;
+
+err_handle_create:
+ rockchip_gem_free_object(obj);
+
+ return ERR_PTR(ret);
+}
+
+int rockchip_gem_dumb_map_offset(struct drm_file *file_priv,
+ struct drm_device *dev, uint32_t handle,
+ uint64_t *offset)
+{
+ struct drm_gem_object *obj;
+ int ret;
+
+ mutex_lock(&dev->struct_mutex);
+
+ obj = drm_gem_object_lookup(dev, file_priv, handle);
+ if (!obj) {
+ DRM_ERROR("failed to lookup gem object.\n");
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ ret = drm_gem_create_mmap_offset(obj);
+ if (ret)
+ goto out;
+
+ *offset = drm_vma_node_offset_addr(&obj->vma_node);
+ DRM_DEBUG_KMS("offset = 0x%llx\n", *offset);
+
+out:
+ drm_gem_object_unreference(obj);
+unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+/*
+ * rockchip_gem_dumb_create - (struct drm_driver)->dumb_create callback
+ * function
+ *
+ * This aligns the pitch and size arguments to the minimum required. wrap
+ * this into your own function if you need bigger alignment.
+ */
+int rockchip_gem_dumb_create(struct drm_file *file_priv,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ struct rockchip_gem_object *rk_obj;
+ int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
+
+ /*
+ * align to 64 bytes since Mali requires it.
+ */
+ min_pitch = ALIGN(min_pitch, 64);
+
+ if (args->pitch < min_pitch)
+ args->pitch = min_pitch;
+
+ if (args->size < args->pitch * args->height)
+ args->size = args->pitch * args->height;
+
+ rk_obj = rockchip_gem_create_with_handle(file_priv, dev, args->size,
+ &args->handle);
+
+ return PTR_ERR_OR_ZERO(rk_obj);
+}
+
+/*
+ * Allocate a sg_table for this GEM object.
+ * Note: Both the table's contents, and the sg_table itself must be freed by
+ * the caller.
+ * Returns a pointer to the newly allocated sg_table, or an ERR_PTR() error.
+ */
+struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj)
+{
+ struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
+ struct drm_device *drm = obj->dev;
+ struct sg_table *sgt;
+ int ret;
+
+ sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
+ if (!sgt)
+ return ERR_PTR(-ENOMEM);
+
+ ret = dma_get_sgtable_attrs(drm->dev, sgt, rk_obj->kvaddr,
+ rk_obj->dma_addr, obj->size,
+ &rk_obj->dma_attrs);
+ if (ret) {
+ DRM_ERROR("failed to allocate sgt, %d\n", ret);
+ kfree(sgt);
+ return ERR_PTR(ret);
+ }
+
+ return sgt;
+}
+
+void *rockchip_gem_prime_vmap(struct drm_gem_object *obj)
+{
+ struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
+
+ return rk_obj->kvaddr;
+}
+
+void rockchip_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
+{
+ /* Nothing to do */
+}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.h b/drivers/gpu/drm/rockchip/rockchip_drm_gem.h
new file mode 100644
index 00000000000..67bcebe9000
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Author:Mark Yao <mark.yao@rock-chips.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ROCKCHIP_DRM_GEM_H
+#define _ROCKCHIP_DRM_GEM_H
+
+#define to_rockchip_obj(x) container_of(x, struct rockchip_gem_object, base)
+
+struct rockchip_gem_object {
+ struct drm_gem_object base;
+ unsigned int flags;
+
+ void *kvaddr;
+ dma_addr_t dma_addr;
+ struct dma_attrs dma_attrs;
+};
+
+struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj);
+struct drm_gem_object *
+rockchip_gem_prime_import_sg_table(struct drm_device *dev, size_t size,
+ struct sg_table *sgt);
+void *rockchip_gem_prime_vmap(struct drm_gem_object *obj);
+void rockchip_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
+
+/* drm driver mmap file operations */
+int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma);
+
+/* mmap a gem object to userspace. */
+int rockchip_gem_mmap_buf(struct drm_gem_object *obj,
+ struct vm_area_struct *vma);
+
+struct rockchip_gem_object *
+ rockchip_gem_create_object(struct drm_device *drm, unsigned int size);
+
+void rockchip_gem_free_object(struct drm_gem_object *obj);
+
+int rockchip_gem_dumb_create(struct drm_file *file_priv,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+int rockchip_gem_dumb_map_offset(struct drm_file *file_priv,
+ struct drm_device *dev, uint32_t handle,
+ uint64_t *offset);
+#endif /* _ROCKCHIP_DRM_GEM_H */
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
new file mode 100644
index 00000000000..e7ca25b3fb3
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -0,0 +1,1455 @@
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Author:Mark Yao <mark.yao@rock-chips.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drm.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_plane_helper.h>
+
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/component.h>
+
+#include <linux/reset.h>
+#include <linux/delay.h>
+
+#include "rockchip_drm_drv.h"
+#include "rockchip_drm_gem.h"
+#include "rockchip_drm_fb.h"
+#include "rockchip_drm_vop.h"
+
+#define VOP_REG(off, _mask, s) \
+ {.offset = off, \
+ .mask = _mask, \
+ .shift = s,}
+
+#define __REG_SET_RELAXED(x, off, mask, shift, v) \
+ vop_mask_write_relaxed(x, off, (mask) << shift, (v) << shift)
+#define __REG_SET_NORMAL(x, off, mask, shift, v) \
+ vop_mask_write(x, off, (mask) << shift, (v) << shift)
+
+#define REG_SET(x, base, reg, v, mode) \
+ __REG_SET_##mode(x, base + reg.offset, reg.mask, reg.shift, v)
+
+#define VOP_WIN_SET(x, win, name, v) \
+ REG_SET(x, win->base, win->phy->name, v, RELAXED)
+#define VOP_CTRL_SET(x, name, v) \
+ REG_SET(x, 0, (x)->data->ctrl->name, v, NORMAL)
+
+#define VOP_WIN_GET(x, win, name) \
+ vop_read_reg(x, win->base, &win->phy->name)
+
+#define VOP_WIN_GET_YRGBADDR(vop, win) \
+ vop_readl(vop, win->base + win->phy->yrgb_mst.offset)
+
+#define to_vop(x) container_of(x, struct vop, crtc)
+#define to_vop_win(x) container_of(x, struct vop_win, base)
+
+struct vop_win_state {
+ struct list_head head;
+ struct drm_framebuffer *fb;
+ dma_addr_t yrgb_mst;
+ struct drm_pending_vblank_event *event;
+};
+
+struct vop_win {
+ struct drm_plane base;
+ const struct vop_win_data *data;
+ struct vop *vop;
+
+ struct list_head pending;
+ struct vop_win_state *active;
+};
+
+struct vop {
+ struct drm_crtc crtc;
+ struct device *dev;
+ struct drm_device *drm_dev;
+ unsigned int dpms;
+
+ int connector_type;
+ int connector_out_mode;
+
+ /* mutex vsync_ work */
+ struct mutex vsync_mutex;
+ bool vsync_work_pending;
+
+ const struct vop_data *data;
+
+ uint32_t *regsbak;
+ void __iomem *regs;
+
+ /* physical map length of vop register */
+ uint32_t len;
+
+ /* one time only one process allowed to config the register */
+ spinlock_t reg_lock;
+ /* lock vop irq reg */
+ spinlock_t irq_lock;
+
+ unsigned int irq;
+
+ /* vop AHP clk */
+ struct clk *hclk;
+ /* vop dclk */
+ struct clk *dclk;
+ /* vop share memory frequency */
+ struct clk *aclk;
+
+ /* vop dclk reset */
+ struct reset_control *dclk_rst;
+
+ int pipe;
+
+ struct vop_win win[];
+};
+
+enum vop_data_format {
+ VOP_FMT_ARGB8888 = 0,
+ VOP_FMT_RGB888,
+ VOP_FMT_RGB565,
+ VOP_FMT_YUV420SP = 4,
+ VOP_FMT_YUV422SP,
+ VOP_FMT_YUV444SP,
+};
+
+struct vop_reg_data {
+ uint32_t offset;
+ uint32_t value;
+};
+
+struct vop_reg {
+ uint32_t offset;
+ uint32_t shift;
+ uint32_t mask;
+};
+
+struct vop_ctrl {
+ struct vop_reg standby;
+ struct vop_reg data_blank;
+ struct vop_reg gate_en;
+ struct vop_reg mmu_en;
+ struct vop_reg rgb_en;
+ struct vop_reg edp_en;
+ struct vop_reg hdmi_en;
+ struct vop_reg mipi_en;
+ struct vop_reg out_mode;
+ struct vop_reg dither_down;
+ struct vop_reg dither_up;
+ struct vop_reg pin_pol;
+
+ struct vop_reg htotal_pw;
+ struct vop_reg hact_st_end;
+ struct vop_reg vtotal_pw;
+ struct vop_reg vact_st_end;
+ struct vop_reg hpost_st_end;
+ struct vop_reg vpost_st_end;
+};
+
+struct vop_win_phy {
+ const uint32_t *data_formats;
+ uint32_t nformats;
+
+ struct vop_reg enable;
+ struct vop_reg format;
+ struct vop_reg act_info;
+ struct vop_reg dsp_info;
+ struct vop_reg dsp_st;
+ struct vop_reg yrgb_mst;
+ struct vop_reg uv_mst;
+ struct vop_reg yrgb_vir;
+ struct vop_reg uv_vir;
+
+ struct vop_reg dst_alpha_ctl;
+ struct vop_reg src_alpha_ctl;
+};
+
+struct vop_win_data {
+ uint32_t base;
+ const struct vop_win_phy *phy;
+ enum drm_plane_type type;
+};
+
+struct vop_data {
+ const struct vop_reg_data *init_table;
+ unsigned int table_size;
+ const struct vop_ctrl *ctrl;
+ const struct vop_win_data *win;
+ unsigned int win_size;
+};
+
+static const uint32_t formats_01[] = {
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_NV12,
+ DRM_FORMAT_NV16,
+ DRM_FORMAT_NV24,
+};
+
+static const uint32_t formats_234[] = {
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_RGB565,
+};
+
+static const struct vop_win_phy win01_data = {
+ .data_formats = formats_01,
+ .nformats = ARRAY_SIZE(formats_01),
+ .enable = VOP_REG(WIN0_CTRL0, 0x1, 0),
+ .format = VOP_REG(WIN0_CTRL0, 0x7, 1),
+ .act_info = VOP_REG(WIN0_ACT_INFO, 0x1fff1fff, 0),
+ .dsp_info = VOP_REG(WIN0_DSP_INFO, 0x0fff0fff, 0),
+ .dsp_st = VOP_REG(WIN0_DSP_ST, 0x1fff1fff, 0),
+ .yrgb_mst = VOP_REG(WIN0_YRGB_MST, 0xffffffff, 0),
+ .uv_mst = VOP_REG(WIN0_CBR_MST, 0xffffffff, 0),
+ .yrgb_vir = VOP_REG(WIN0_VIR, 0x3fff, 0),
+ .uv_vir = VOP_REG(WIN0_VIR, 0x3fff, 16),
+ .src_alpha_ctl = VOP_REG(WIN0_SRC_ALPHA_CTRL, 0xff, 0),
+ .dst_alpha_ctl = VOP_REG(WIN0_DST_ALPHA_CTRL, 0xff, 0),
+};
+
+static const struct vop_win_phy win23_data = {
+ .data_formats = formats_234,
+ .nformats = ARRAY_SIZE(formats_234),
+ .enable = VOP_REG(WIN2_CTRL0, 0x1, 0),
+ .format = VOP_REG(WIN2_CTRL0, 0x7, 1),
+ .dsp_info = VOP_REG(WIN2_DSP_INFO0, 0x0fff0fff, 0),
+ .dsp_st = VOP_REG(WIN2_DSP_ST0, 0x1fff1fff, 0),
+ .yrgb_mst = VOP_REG(WIN2_MST0, 0xffffffff, 0),
+ .yrgb_vir = VOP_REG(WIN2_VIR0_1, 0x1fff, 0),
+ .src_alpha_ctl = VOP_REG(WIN2_SRC_ALPHA_CTRL, 0xff, 0),
+ .dst_alpha_ctl = VOP_REG(WIN2_DST_ALPHA_CTRL, 0xff, 0),
+};
+
+static const struct vop_win_phy cursor_data = {
+ .data_formats = formats_234,
+ .nformats = ARRAY_SIZE(formats_234),
+ .enable = VOP_REG(HWC_CTRL0, 0x1, 0),
+ .format = VOP_REG(HWC_CTRL0, 0x7, 1),
+ .dsp_st = VOP_REG(HWC_DSP_ST, 0x1fff1fff, 0),
+ .yrgb_mst = VOP_REG(HWC_MST, 0xffffffff, 0),
+};
+
+static const struct vop_ctrl ctrl_data = {
+ .standby = VOP_REG(SYS_CTRL, 0x1, 22),
+ .gate_en = VOP_REG(SYS_CTRL, 0x1, 23),
+ .mmu_en = VOP_REG(SYS_CTRL, 0x1, 20),
+ .rgb_en = VOP_REG(SYS_CTRL, 0x1, 12),
+ .hdmi_en = VOP_REG(SYS_CTRL, 0x1, 13),
+ .edp_en = VOP_REG(SYS_CTRL, 0x1, 14),
+ .mipi_en = VOP_REG(SYS_CTRL, 0x1, 15),
+ .dither_down = VOP_REG(DSP_CTRL1, 0xf, 1),
+ .dither_up = VOP_REG(DSP_CTRL1, 0x1, 6),
+ .data_blank = VOP_REG(DSP_CTRL0, 0x1, 19),
+ .out_mode = VOP_REG(DSP_CTRL0, 0xf, 0),
+ .pin_pol = VOP_REG(DSP_CTRL0, 0xf, 4),
+ .htotal_pw = VOP_REG(DSP_HTOTAL_HS_END, 0x1fff1fff, 0),
+ .hact_st_end = VOP_REG(DSP_HACT_ST_END, 0x1fff1fff, 0),
+ .vtotal_pw = VOP_REG(DSP_VTOTAL_VS_END, 0x1fff1fff, 0),
+ .vact_st_end = VOP_REG(DSP_VACT_ST_END, 0x1fff1fff, 0),
+ .hpost_st_end = VOP_REG(POST_DSP_HACT_INFO, 0x1fff1fff, 0),
+ .vpost_st_end = VOP_REG(POST_DSP_VACT_INFO, 0x1fff1fff, 0),
+};
+
+static const struct vop_reg_data vop_init_reg_table[] = {
+ {SYS_CTRL, 0x00c00000},
+ {DSP_CTRL0, 0x00000000},
+ {WIN0_CTRL0, 0x00000080},
+ {WIN1_CTRL0, 0x00000080},
+};
+
+/*
+ * Note: rk3288 has a dedicated 'cursor' window, however, that window requires
+ * special support to get alpha blending working. For now, just use overlay
+ * window 1 for the drm cursor.
+ */
+static const struct vop_win_data rk3288_vop_win_data[] = {
+ { .base = 0x00, .phy = &win01_data, .type = DRM_PLANE_TYPE_PRIMARY },
+ { .base = 0x40, .phy = &win01_data, .type = DRM_PLANE_TYPE_CURSOR },
+ { .base = 0x00, .phy = &win23_data, .type = DRM_PLANE_TYPE_OVERLAY },
+ { .base = 0x50, .phy = &win23_data, .type = DRM_PLANE_TYPE_OVERLAY },
+ { .base = 0x00, .phy = &cursor_data, .type = DRM_PLANE_TYPE_OVERLAY },
+};
+
+static const struct vop_data rk3288_vop = {
+ .init_table = vop_init_reg_table,
+ .table_size = ARRAY_SIZE(vop_init_reg_table),
+ .ctrl = &ctrl_data,
+ .win = rk3288_vop_win_data,
+ .win_size = ARRAY_SIZE(rk3288_vop_win_data),
+};
+
+static const struct of_device_id vop_driver_dt_match[] = {
+ { .compatible = "rockchip,rk3288-vop",
+ .data = &rk3288_vop },
+ {},
+};
+
+static inline void vop_writel(struct vop *vop, uint32_t offset, uint32_t v)
+{
+ writel(v, vop->regs + offset);
+ vop->regsbak[offset >> 2] = v;
+}
+
+static inline uint32_t vop_readl(struct vop *vop, uint32_t offset)
+{
+ return readl(vop->regs + offset);
+}
+
+static inline uint32_t vop_read_reg(struct vop *vop, uint32_t base,
+ const struct vop_reg *reg)
+{
+ return (vop_readl(vop, base + reg->offset) >> reg->shift) & reg->mask;
+}
+
+static inline void vop_cfg_done(struct vop *vop)
+{
+ writel(0x01, vop->regs + REG_CFG_DONE);
+}
+
+static inline void vop_mask_write(struct vop *vop, uint32_t offset,
+ uint32_t mask, uint32_t v)
+{
+ if (mask) {
+ uint32_t cached_val = vop->regsbak[offset >> 2];
+
+ cached_val = (cached_val & ~mask) | v;
+ writel(cached_val, vop->regs + offset);
+ vop->regsbak[offset >> 2] = cached_val;
+ }
+}
+
+static inline void vop_mask_write_relaxed(struct vop *vop, uint32_t offset,
+ uint32_t mask, uint32_t v)
+{
+ if (mask) {
+ uint32_t cached_val = vop->regsbak[offset >> 2];
+
+ cached_val = (cached_val & ~mask) | v;
+ writel_relaxed(cached_val, vop->regs + offset);
+ vop->regsbak[offset >> 2] = cached_val;
+ }
+}
+
+static enum vop_data_format vop_convert_format(uint32_t format)
+{
+ switch (format) {
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB8888:
+ return VOP_FMT_ARGB8888;
+ case DRM_FORMAT_RGB888:
+ return VOP_FMT_RGB888;
+ case DRM_FORMAT_RGB565:
+ return VOP_FMT_RGB565;
+ case DRM_FORMAT_NV12:
+ return VOP_FMT_YUV420SP;
+ case DRM_FORMAT_NV16:
+ return VOP_FMT_YUV422SP;
+ case DRM_FORMAT_NV24:
+ return VOP_FMT_YUV444SP;
+ default:
+ DRM_ERROR("unsupport format[%08x]\n", format);
+ return -EINVAL;
+ }
+}
+
+static bool is_alpha_support(uint32_t format)
+{
+ switch (format) {
+ case DRM_FORMAT_ARGB8888:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static void vop_enable(struct drm_crtc *crtc)
+{
+ struct vop *vop = to_vop(crtc);
+ int ret;
+
+ ret = clk_enable(vop->hclk);
+ if (ret < 0) {
+ dev_err(vop->dev, "failed to enable hclk - %d\n", ret);
+ return;
+ }
+
+ ret = clk_enable(vop->dclk);
+ if (ret < 0) {
+ dev_err(vop->dev, "failed to enable dclk - %d\n", ret);
+ goto err_disable_hclk;
+ }
+
+ ret = clk_enable(vop->aclk);
+ if (ret < 0) {
+ dev_err(vop->dev, "failed to enable aclk - %d\n", ret);
+ goto err_disable_dclk;
+ }
+
+ /*
+ * Slave iommu shares power, irq and clock with vop. It was associated
+ * automatically with this master device via common driver code.
+ * Now that we have enabled the clock we attach it to the shared drm
+ * mapping.
+ */
+ ret = rockchip_drm_dma_attach_device(vop->drm_dev, vop->dev);
+ if (ret) {
+ dev_err(vop->dev, "failed to attach dma mapping, %d\n", ret);
+ goto err_disable_aclk;
+ }
+
+ spin_lock(&vop->reg_lock);
+
+ VOP_CTRL_SET(vop, standby, 0);
+
+ spin_unlock(&vop->reg_lock);
+
+ enable_irq(vop->irq);
+
+ drm_vblank_on(vop->drm_dev, vop->pipe);
+
+ return;
+
+err_disable_aclk:
+ clk_disable(vop->aclk);
+err_disable_dclk:
+ clk_disable(vop->dclk);
+err_disable_hclk:
+ clk_disable(vop->hclk);
+}
+
+static void vop_disable(struct drm_crtc *crtc)
+{
+ struct vop *vop = to_vop(crtc);
+
+ drm_vblank_off(crtc->dev, vop->pipe);
+
+ disable_irq(vop->irq);
+
+ /*
+ * TODO: Since standby doesn't take effect until the next vblank,
+ * when we turn off dclk below, the vop is probably still active.
+ */
+ spin_lock(&vop->reg_lock);
+
+ VOP_CTRL_SET(vop, standby, 1);
+
+ spin_unlock(&vop->reg_lock);
+ /*
+ * disable dclk to stop frame scan, so we can safely detach iommu,
+ */
+ clk_disable(vop->dclk);
+
+ rockchip_drm_dma_detach_device(vop->drm_dev, vop->dev);
+
+ clk_disable(vop->aclk);
+ clk_disable(vop->hclk);
+}
+
+/*
+ * Caller must hold vsync_mutex.
+ */
+static struct drm_framebuffer *vop_win_last_pending_fb(struct vop_win *vop_win)
+{
+ struct vop_win_state *last;
+ struct vop_win_state *active = vop_win->active;
+
+ if (list_empty(&vop_win->pending))
+ return active ? active->fb : NULL;
+
+ last = list_last_entry(&vop_win->pending, struct vop_win_state, head);
+ return last ? last->fb : NULL;
+}
+
+/*
+ * Caller must hold vsync_mutex.
+ */
+static int vop_win_queue_fb(struct vop_win *vop_win,
+ struct drm_framebuffer *fb, dma_addr_t yrgb_mst,
+ struct drm_pending_vblank_event *event)
+{
+ struct vop_win_state *state;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ state->fb = fb;
+ state->yrgb_mst = yrgb_mst;
+ state->event = event;
+
+ list_add_tail(&state->head, &vop_win->pending);
+
+ return 0;
+}
+
+static int vop_update_plane_event(struct drm_plane *plane,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int crtc_x,
+ int crtc_y, unsigned int crtc_w,
+ unsigned int crtc_h, uint32_t src_x,
+ uint32_t src_y, uint32_t src_w,
+ uint32_t src_h,
+ struct drm_pending_vblank_event *event)
+{
+ struct vop_win *vop_win = to_vop_win(plane);
+ const struct vop_win_data *win = vop_win->data;
+ struct vop *vop = to_vop(crtc);
+ struct drm_gem_object *obj;
+ struct rockchip_gem_object *rk_obj;
+ unsigned long offset;
+ unsigned int actual_w;
+ unsigned int actual_h;
+ unsigned int dsp_stx;
+ unsigned int dsp_sty;
+ unsigned int y_vir_stride;
+ dma_addr_t yrgb_mst;
+ enum vop_data_format format;
+ uint32_t val;
+ bool is_alpha;
+ bool visible;
+ int ret;
+ struct drm_rect dest = {
+ .x1 = crtc_x,
+ .y1 = crtc_y,
+ .x2 = crtc_x + crtc_w,
+ .y2 = crtc_y + crtc_h,
+ };
+ struct drm_rect src = {
+ /* 16.16 fixed point */
+ .x1 = src_x,
+ .y1 = src_y,
+ .x2 = src_x + src_w,
+ .y2 = src_y + src_h,
+ };
+ const struct drm_rect clip = {
+ .x2 = crtc->mode.hdisplay,
+ .y2 = crtc->mode.vdisplay,
+ };
+ bool can_position = plane->type != DRM_PLANE_TYPE_PRIMARY;
+
+ ret = drm_plane_helper_check_update(plane, crtc, fb,
+ &src, &dest, &clip,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ can_position, false, &visible);
+ if (ret)
+ return ret;
+
+ if (!visible)
+ return 0;
+
+ is_alpha = is_alpha_support(fb->pixel_format);
+ format = vop_convert_format(fb->pixel_format);
+ if (format < 0)
+ return format;
+
+ obj = rockchip_fb_get_gem_obj(fb, 0);
+ if (!obj) {
+ DRM_ERROR("fail to get rockchip gem object from framebuffer\n");
+ return -EINVAL;
+ }
+
+ rk_obj = to_rockchip_obj(obj);
+
+ actual_w = (src.x2 - src.x1) >> 16;
+ actual_h = (src.y2 - src.y1) >> 16;
+ crtc_x = max(0, crtc_x);
+ crtc_y = max(0, crtc_y);
+
+ dsp_stx = crtc_x + crtc->mode.htotal - crtc->mode.hsync_start;
+ dsp_sty = crtc_y + crtc->mode.vtotal - crtc->mode.vsync_start;
+
+ offset = (src.x1 >> 16) * (fb->bits_per_pixel >> 3);
+ offset += (src.y1 >> 16) * fb->pitches[0];
+ yrgb_mst = rk_obj->dma_addr + offset;
+
+ y_vir_stride = fb->pitches[0] / (fb->bits_per_pixel >> 3);
+
+ /*
+ * If this plane update changes the plane's framebuffer, (or more
+ * precisely, if this update has a different framebuffer than the last
+ * update), enqueue it so we can track when it completes.
+ *
+ * Only when we discover that this update has completed, can we
+ * unreference any previous framebuffers.
+ */
+ mutex_lock(&vop->vsync_mutex);
+ if (fb != vop_win_last_pending_fb(vop_win)) {
+ ret = drm_vblank_get(plane->dev, vop->pipe);
+ if (ret) {
+ DRM_ERROR("failed to get vblank, %d\n", ret);
+ mutex_unlock(&vop->vsync_mutex);
+ return ret;
+ }
+
+ drm_framebuffer_reference(fb);
+
+ ret = vop_win_queue_fb(vop_win, fb, yrgb_mst, event);
+ if (ret) {
+ drm_vblank_put(plane->dev, vop->pipe);
+ mutex_unlock(&vop->vsync_mutex);
+ return ret;
+ }
+
+ vop->vsync_work_pending = true;
+ }
+ mutex_unlock(&vop->vsync_mutex);
+
+ spin_lock(&vop->reg_lock);
+
+ VOP_WIN_SET(vop, win, format, format);
+ VOP_WIN_SET(vop, win, yrgb_vir, y_vir_stride);
+ VOP_WIN_SET(vop, win, yrgb_mst, yrgb_mst);
+ val = (actual_h - 1) << 16;
+ val |= (actual_w - 1) & 0xffff;
+ VOP_WIN_SET(vop, win, act_info, val);
+ VOP_WIN_SET(vop, win, dsp_info, val);
+ val = (dsp_sty - 1) << 16;
+ val |= (dsp_stx - 1) & 0xffff;
+ VOP_WIN_SET(vop, win, dsp_st, val);
+
+ if (is_alpha) {
+ VOP_WIN_SET(vop, win, dst_alpha_ctl,
+ DST_FACTOR_M0(ALPHA_SRC_INVERSE));
+ val = SRC_ALPHA_EN(1) | SRC_COLOR_M0(ALPHA_SRC_PRE_MUL) |
+ SRC_ALPHA_M0(ALPHA_STRAIGHT) |
+ SRC_BLEND_M0(ALPHA_PER_PIX) |
+ SRC_ALPHA_CAL_M0(ALPHA_NO_SATURATION) |
+ SRC_FACTOR_M0(ALPHA_ONE);
+ VOP_WIN_SET(vop, win, src_alpha_ctl, val);
+ } else {
+ VOP_WIN_SET(vop, win, src_alpha_ctl, SRC_ALPHA_EN(0));
+ }
+
+ VOP_WIN_SET(vop, win, enable, 1);
+
+ vop_cfg_done(vop);
+ spin_unlock(&vop->reg_lock);
+
+ return 0;
+}
+
+static int vop_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y, uint32_t src_w,
+ uint32_t src_h)
+{
+ return vop_update_plane_event(plane, crtc, fb, crtc_x, crtc_y, crtc_w,
+ crtc_h, src_x, src_y, src_w, src_h,
+ NULL);
+}
+
+static int vop_update_primary_plane(struct drm_crtc *crtc,
+ struct drm_pending_vblank_event *event)
+{
+ unsigned int crtc_w, crtc_h;
+
+ crtc_w = crtc->primary->fb->width - crtc->x;
+ crtc_h = crtc->primary->fb->height - crtc->y;
+
+ return vop_update_plane_event(crtc->primary, crtc, crtc->primary->fb,
+ 0, 0, crtc_w, crtc_h, crtc->x << 16,
+ crtc->y << 16, crtc_w << 16,
+ crtc_h << 16, event);
+}
+
+static int vop_disable_plane(struct drm_plane *plane)
+{
+ struct vop_win *vop_win = to_vop_win(plane);
+ const struct vop_win_data *win = vop_win->data;
+ struct vop *vop;
+ int ret;
+
+ if (!plane->crtc)
+ return 0;
+
+ vop = to_vop(plane->crtc);
+
+ ret = drm_vblank_get(plane->dev, vop->pipe);
+ if (ret) {
+ DRM_ERROR("failed to get vblank, %d\n", ret);
+ return ret;
+ }
+
+ mutex_lock(&vop->vsync_mutex);
+
+ ret = vop_win_queue_fb(vop_win, NULL, 0, NULL);
+ if (ret) {
+ drm_vblank_put(plane->dev, vop->pipe);
+ mutex_unlock(&vop->vsync_mutex);
+ return ret;
+ }
+
+ vop->vsync_work_pending = true;
+ mutex_unlock(&vop->vsync_mutex);
+
+ spin_lock(&vop->reg_lock);
+ VOP_WIN_SET(vop, win, enable, 0);
+ vop_cfg_done(vop);
+ spin_unlock(&vop->reg_lock);
+
+ return 0;
+}
+
+static void vop_plane_destroy(struct drm_plane *plane)
+{
+ vop_disable_plane(plane);
+ drm_plane_cleanup(plane);
+}
+
+static const struct drm_plane_funcs vop_plane_funcs = {
+ .update_plane = vop_update_plane,
+ .disable_plane = vop_disable_plane,
+ .destroy = vop_plane_destroy,
+};
+
+int rockchip_drm_crtc_mode_config(struct drm_crtc *crtc,
+ int connector_type,
+ int out_mode)
+{
+ struct vop *vop = to_vop(crtc);
+
+ vop->connector_type = connector_type;
+ vop->connector_out_mode = out_mode;
+
+ return 0;
+}
+
+static int vop_crtc_enable_vblank(struct drm_crtc *crtc)
+{
+ struct vop *vop = to_vop(crtc);
+ unsigned long flags;
+
+ if (vop->dpms != DRM_MODE_DPMS_ON)
+ return -EPERM;
+
+ spin_lock_irqsave(&vop->irq_lock, flags);
+
+ vop_mask_write(vop, INTR_CTRL0, FS_INTR_MASK, FS_INTR_EN(1));
+
+ spin_unlock_irqrestore(&vop->irq_lock, flags);
+
+ return 0;
+}
+
+static void vop_crtc_disable_vblank(struct drm_crtc *crtc)
+{
+ struct vop *vop = to_vop(crtc);
+ unsigned long flags;
+
+ if (vop->dpms != DRM_MODE_DPMS_ON)
+ return;
+ spin_lock_irqsave(&vop->irq_lock, flags);
+ vop_mask_write(vop, INTR_CTRL0, FS_INTR_MASK, FS_INTR_EN(0));
+ spin_unlock_irqrestore(&vop->irq_lock, flags);
+}
+
+static const struct rockchip_crtc_funcs private_crtc_funcs = {
+ .enable_vblank = vop_crtc_enable_vblank,
+ .disable_vblank = vop_crtc_disable_vblank,
+};
+
+static void vop_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+ struct vop *vop = to_vop(crtc);
+
+ DRM_DEBUG_KMS("crtc[%d] mode[%d]\n", crtc->base.id, mode);
+
+ if (vop->dpms == mode) {
+ DRM_DEBUG_KMS("desired dpms mode is same as previous one.\n");
+ return;
+ }
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ vop_enable(crtc);
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ vop_disable(crtc);
+ break;
+ default:
+ DRM_DEBUG_KMS("unspecified mode %d\n", mode);
+ break;
+ }
+
+ vop->dpms = mode;
+}
+
+static void vop_crtc_prepare(struct drm_crtc *crtc)
+{
+ vop_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
+}
+
+static bool vop_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ if (adjusted_mode->htotal == 0 || adjusted_mode->vtotal == 0)
+ return false;
+
+ return true;
+}
+
+static int vop_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ int ret;
+
+ crtc->x = x;
+ crtc->y = y;
+
+ ret = vop_update_primary_plane(crtc, NULL);
+ if (ret < 0) {
+ DRM_ERROR("fail to update plane\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int vop_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y, struct drm_framebuffer *fb)
+{
+ struct vop *vop = to_vop(crtc);
+ u16 hsync_len = adjusted_mode->hsync_end - adjusted_mode->hsync_start;
+ u16 hdisplay = adjusted_mode->hdisplay;
+ u16 htotal = adjusted_mode->htotal;
+ u16 hact_st = adjusted_mode->htotal - adjusted_mode->hsync_start;
+ u16 hact_end = hact_st + hdisplay;
+ u16 vdisplay = adjusted_mode->vdisplay;
+ u16 vtotal = adjusted_mode->vtotal;
+ u16 vsync_len = adjusted_mode->vsync_end - adjusted_mode->vsync_start;
+ u16 vact_st = adjusted_mode->vtotal - adjusted_mode->vsync_start;
+ u16 vact_end = vact_st + vdisplay;
+ int ret;
+ uint32_t val;
+
+ /*
+ * disable dclk to stop frame scan, so that we can safe config mode and
+ * enable iommu.
+ */
+ clk_disable(vop->dclk);
+
+ switch (vop->connector_type) {
+ case DRM_MODE_CONNECTOR_LVDS:
+ VOP_CTRL_SET(vop, rgb_en, 1);
+ break;
+ case DRM_MODE_CONNECTOR_eDP:
+ VOP_CTRL_SET(vop, edp_en, 1);
+ break;
+ case DRM_MODE_CONNECTOR_HDMIA:
+ VOP_CTRL_SET(vop, hdmi_en, 1);
+ break;
+ default:
+ DRM_ERROR("unsupport connector_type[%d]\n",
+ vop->connector_type);
+ return -EINVAL;
+ };
+ VOP_CTRL_SET(vop, out_mode, vop->connector_out_mode);
+
+ val = 0x8;
+ val |= (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) ? 1 : 0;
+ val |= (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) ? (1 << 1) : 0;
+ VOP_CTRL_SET(vop, pin_pol, val);
+
+ VOP_CTRL_SET(vop, htotal_pw, (htotal << 16) | hsync_len);
+ val = hact_st << 16;
+ val |= hact_end;
+ VOP_CTRL_SET(vop, hact_st_end, val);
+ VOP_CTRL_SET(vop, hpost_st_end, val);
+
+ VOP_CTRL_SET(vop, vtotal_pw, (vtotal << 16) | vsync_len);
+ val = vact_st << 16;
+ val |= vact_end;
+ VOP_CTRL_SET(vop, vact_st_end, val);
+ VOP_CTRL_SET(vop, vpost_st_end, val);
+
+ ret = vop_crtc_mode_set_base(crtc, x, y, fb);
+ if (ret)
+ return ret;
+
+ /*
+ * reset dclk, take all mode config affect, so the clk would run in
+ * correct frame.
+ */
+ reset_control_assert(vop->dclk_rst);
+ usleep_range(10, 20);
+ reset_control_deassert(vop->dclk_rst);
+
+ clk_set_rate(vop->dclk, adjusted_mode->clock * 1000);
+ ret = clk_enable(vop->dclk);
+ if (ret < 0) {
+ dev_err(vop->dev, "failed to enable dclk - %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void vop_crtc_commit(struct drm_crtc *crtc)
+{
+}
+
+static const struct drm_crtc_helper_funcs vop_crtc_helper_funcs = {
+ .dpms = vop_crtc_dpms,
+ .prepare = vop_crtc_prepare,
+ .mode_fixup = vop_crtc_mode_fixup,
+ .mode_set = vop_crtc_mode_set,
+ .mode_set_base = vop_crtc_mode_set_base,
+ .commit = vop_crtc_commit,
+};
+
+static int vop_crtc_page_flip(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event,
+ uint32_t page_flip_flags)
+{
+ struct vop *vop = to_vop(crtc);
+ struct drm_framebuffer *old_fb = crtc->primary->fb;
+ int ret;
+
+ /* when the page flip is requested, crtc's dpms should be on */
+ if (vop->dpms > DRM_MODE_DPMS_ON) {
+ DRM_DEBUG("failed page flip request at dpms[%d].\n", vop->dpms);
+ return 0;
+ }
+
+ crtc->primary->fb = fb;
+
+ ret = vop_update_primary_plane(crtc, event);
+ if (ret)
+ crtc->primary->fb = old_fb;
+
+ return ret;
+}
+
+static void vop_win_state_complete(struct vop_win *vop_win,
+ struct vop_win_state *state)
+{
+ struct vop *vop = vop_win->vop;
+ struct drm_crtc *crtc = &vop->crtc;
+ struct drm_device *drm = crtc->dev;
+ unsigned long flags;
+
+ if (state->event) {
+ spin_lock_irqsave(&drm->event_lock, flags);
+ drm_send_vblank_event(drm, -1, state->event);
+ spin_unlock_irqrestore(&drm->event_lock, flags);
+ }
+
+ list_del(&state->head);
+ drm_vblank_put(crtc->dev, vop->pipe);
+}
+
+static void vop_crtc_destroy(struct drm_crtc *crtc)
+{
+ drm_crtc_cleanup(crtc);
+}
+
+static const struct drm_crtc_funcs vop_crtc_funcs = {
+ .set_config = drm_crtc_helper_set_config,
+ .page_flip = vop_crtc_page_flip,
+ .destroy = vop_crtc_destroy,
+};
+
+static bool vop_win_state_is_active(struct vop_win *vop_win,
+ struct vop_win_state *state)
+{
+ bool active = false;
+
+ if (state->fb) {
+ dma_addr_t yrgb_mst;
+
+ /* check yrgb_mst to tell if pending_fb is now front */
+ yrgb_mst = VOP_WIN_GET_YRGBADDR(vop_win->vop, vop_win->data);
+
+ active = (yrgb_mst == state->yrgb_mst);
+ } else {
+ bool enabled;
+
+ /* if enable bit is clear, plane is now disabled */
+ enabled = VOP_WIN_GET(vop_win->vop, vop_win->data, enable);
+
+ active = (enabled == 0);
+ }
+
+ return active;
+}
+
+static void vop_win_state_destroy(struct vop_win_state *state)
+{
+ struct drm_framebuffer *fb = state->fb;
+
+ if (fb)
+ drm_framebuffer_unreference(fb);
+
+ kfree(state);
+}
+
+static void vop_win_update_state(struct vop_win *vop_win)
+{
+ struct vop_win_state *state, *n, *new_active = NULL;
+
+ /* Check if any pending states are now active */
+ list_for_each_entry(state, &vop_win->pending, head)
+ if (vop_win_state_is_active(vop_win, state)) {
+ new_active = state;
+ break;
+ }
+
+ if (!new_active)
+ return;
+
+ /*
+ * Destroy any 'skipped' pending states - states that were queued
+ * before the newly active state.
+ */
+ list_for_each_entry_safe(state, n, &vop_win->pending, head) {
+ if (state == new_active)
+ break;
+ vop_win_state_complete(vop_win, state);
+ vop_win_state_destroy(state);
+ }
+
+ vop_win_state_complete(vop_win, new_active);
+
+ if (vop_win->active)
+ vop_win_state_destroy(vop_win->active);
+ vop_win->active = new_active;
+}
+
+static bool vop_win_has_pending_state(struct vop_win *vop_win)
+{
+ return !list_empty(&vop_win->pending);
+}
+
+static irqreturn_t vop_isr_thread(int irq, void *data)
+{
+ struct vop *vop = data;
+ const struct vop_data *vop_data = vop->data;
+ unsigned int i;
+
+ mutex_lock(&vop->vsync_mutex);
+
+ if (!vop->vsync_work_pending)
+ goto done;
+
+ vop->vsync_work_pending = false;
+
+ for (i = 0; i < vop_data->win_size; i++) {
+ struct vop_win *vop_win = &vop->win[i];
+
+ vop_win_update_state(vop_win);
+ if (vop_win_has_pending_state(vop_win))
+ vop->vsync_work_pending = true;
+ }
+
+done:
+ mutex_unlock(&vop->vsync_mutex);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t vop_isr(int irq, void *data)
+{
+ struct vop *vop = data;
+ uint32_t intr0_reg, active_irqs;
+ unsigned long flags;
+
+ /*
+ * INTR_CTRL0 register has interrupt status, enable and clear bits, we
+ * must hold irq_lock to avoid a race with enable/disable_vblank().
+ */
+ spin_lock_irqsave(&vop->irq_lock, flags);
+ intr0_reg = vop_readl(vop, INTR_CTRL0);
+ active_irqs = intr0_reg & INTR_MASK;
+ /* Clear all active interrupt sources */
+ if (active_irqs)
+ vop_writel(vop, INTR_CTRL0,
+ intr0_reg | (active_irqs << INTR_CLR_SHIFT));
+ spin_unlock_irqrestore(&vop->irq_lock, flags);
+
+ /* This is expected for vop iommu irqs, since the irq is shared */
+ if (!active_irqs)
+ return IRQ_NONE;
+
+ /* Only Frame Start Interrupt is enabled; other irqs are spurious. */
+ if (!(active_irqs & FS_INTR)) {
+ DRM_ERROR("Unknown VOP IRQs: %#02x\n", active_irqs);
+ return IRQ_NONE;
+ }
+
+ drm_handle_vblank(vop->drm_dev, vop->pipe);
+
+ return (vop->vsync_work_pending) ? IRQ_WAKE_THREAD : IRQ_HANDLED;
+}
+
+static int vop_create_crtc(struct vop *vop)
+{
+ const struct vop_data *vop_data = vop->data;
+ struct device *dev = vop->dev;
+ struct drm_device *drm_dev = vop->drm_dev;
+ struct drm_plane *primary = NULL, *cursor = NULL, *plane;
+ struct drm_crtc *crtc = &vop->crtc;
+ struct device_node *port;
+ int ret;
+ int i;
+
+ /*
+ * Create drm_plane for primary and cursor planes first, since we need
+ * to pass them to drm_crtc_init_with_planes, which sets the
+ * "possible_crtcs" to the newly initialized crtc.
+ */
+ for (i = 0; i < vop_data->win_size; i++) {
+ struct vop_win *vop_win = &vop->win[i];
+ const struct vop_win_data *win_data = vop_win->data;
+
+ if (win_data->type != DRM_PLANE_TYPE_PRIMARY &&
+ win_data->type != DRM_PLANE_TYPE_CURSOR)
+ continue;
+
+ ret = drm_universal_plane_init(vop->drm_dev, &vop_win->base,
+ 0, &vop_plane_funcs,
+ win_data->phy->data_formats,
+ win_data->phy->nformats,
+ win_data->type);
+ if (ret) {
+ DRM_ERROR("failed to initialize plane\n");
+ goto err_cleanup_planes;
+ }
+
+ plane = &vop_win->base;
+ if (plane->type == DRM_PLANE_TYPE_PRIMARY)
+ primary = plane;
+ else if (plane->type == DRM_PLANE_TYPE_CURSOR)
+ cursor = plane;
+ }
+
+ ret = drm_crtc_init_with_planes(drm_dev, crtc, primary, cursor,
+ &vop_crtc_funcs);
+ if (ret)
+ return ret;
+
+ drm_crtc_helper_add(crtc, &vop_crtc_helper_funcs);
+
+ /*
+ * Create drm_planes for overlay windows with possible_crtcs restricted
+ * to the newly created crtc.
+ */
+ for (i = 0; i < vop_data->win_size; i++) {
+ struct vop_win *vop_win = &vop->win[i];
+ const struct vop_win_data *win_data = vop_win->data;
+ unsigned long possible_crtcs = 1 << drm_crtc_index(crtc);
+
+ if (win_data->type != DRM_PLANE_TYPE_OVERLAY)
+ continue;
+
+ ret = drm_universal_plane_init(vop->drm_dev, &vop_win->base,
+ possible_crtcs,
+ &vop_plane_funcs,
+ win_data->phy->data_formats,
+ win_data->phy->nformats,
+ win_data->type);
+ if (ret) {
+ DRM_ERROR("failed to initialize overlay plane\n");
+ goto err_cleanup_crtc;
+ }
+ }
+
+ port = of_get_child_by_name(dev->of_node, "port");
+ if (!port) {
+ DRM_ERROR("no port node found in %s\n",
+ dev->of_node->full_name);
+ goto err_cleanup_crtc;
+ }
+
+ crtc->port = port;
+ vop->pipe = drm_crtc_index(crtc);
+ rockchip_register_crtc_funcs(drm_dev, &private_crtc_funcs, vop->pipe);
+
+ return 0;
+
+err_cleanup_crtc:
+ drm_crtc_cleanup(crtc);
+err_cleanup_planes:
+ list_for_each_entry(plane, &drm_dev->mode_config.plane_list, head)
+ drm_plane_cleanup(plane);
+ return ret;
+}
+
+static void vop_destroy_crtc(struct vop *vop)
+{
+ struct drm_crtc *crtc = &vop->crtc;
+
+ rockchip_unregister_crtc_funcs(vop->drm_dev, vop->pipe);
+ of_node_put(crtc->port);
+ drm_crtc_cleanup(crtc);
+}
+
+static int vop_initial(struct vop *vop)
+{
+ const struct vop_data *vop_data = vop->data;
+ const struct vop_reg_data *init_table = vop_data->init_table;
+ struct reset_control *ahb_rst;
+ int i, ret;
+
+ vop->hclk = devm_clk_get(vop->dev, "hclk_vop");
+ if (IS_ERR(vop->hclk)) {
+ dev_err(vop->dev, "failed to get hclk source\n");
+ return PTR_ERR(vop->hclk);
+ }
+ vop->aclk = devm_clk_get(vop->dev, "aclk_vop");
+ if (IS_ERR(vop->aclk)) {
+ dev_err(vop->dev, "failed to get aclk source\n");
+ return PTR_ERR(vop->aclk);
+ }
+ vop->dclk = devm_clk_get(vop->dev, "dclk_vop");
+ if (IS_ERR(vop->dclk)) {
+ dev_err(vop->dev, "failed to get dclk source\n");
+ return PTR_ERR(vop->dclk);
+ }
+
+ ret = clk_prepare(vop->hclk);
+ if (ret < 0) {
+ dev_err(vop->dev, "failed to prepare hclk\n");
+ return ret;
+ }
+
+ ret = clk_prepare(vop->dclk);
+ if (ret < 0) {
+ dev_err(vop->dev, "failed to prepare dclk\n");
+ goto err_unprepare_hclk;
+ }
+
+ ret = clk_prepare(vop->aclk);
+ if (ret < 0) {
+ dev_err(vop->dev, "failed to prepare aclk\n");
+ goto err_unprepare_dclk;
+ }
+
+ /*
+ * enable hclk, so that we can config vop register.
+ */
+ ret = clk_enable(vop->hclk);
+ if (ret < 0) {
+ dev_err(vop->dev, "failed to prepare aclk\n");
+ goto err_unprepare_aclk;
+ }
+ /*
+ * do hclk_reset, reset all vop registers.
+ */
+ ahb_rst = devm_reset_control_get(vop->dev, "ahb");
+ if (IS_ERR(ahb_rst)) {
+ dev_err(vop->dev, "failed to get ahb reset\n");
+ ret = PTR_ERR(ahb_rst);
+ goto err_disable_hclk;
+ }
+ reset_control_assert(ahb_rst);
+ usleep_range(10, 20);
+ reset_control_deassert(ahb_rst);
+
+ memcpy(vop->regsbak, vop->regs, vop->len);
+
+ for (i = 0; i < vop_data->table_size; i++)
+ vop_writel(vop, init_table[i].offset, init_table[i].value);
+
+ for (i = 0; i < vop_data->win_size; i++) {
+ const struct vop_win_data *win = &vop_data->win[i];
+
+ VOP_WIN_SET(vop, win, enable, 0);
+ }
+
+ vop_cfg_done(vop);
+
+ /*
+ * do dclk_reset, let all config take affect.
+ */
+ vop->dclk_rst = devm_reset_control_get(vop->dev, "dclk");
+ if (IS_ERR(vop->dclk_rst)) {
+ dev_err(vop->dev, "failed to get dclk reset\n");
+ ret = PTR_ERR(vop->dclk_rst);
+ goto err_unprepare_aclk;
+ }
+ reset_control_assert(vop->dclk_rst);
+ usleep_range(10, 20);
+ reset_control_deassert(vop->dclk_rst);
+
+ clk_disable(vop->hclk);
+
+ vop->dpms = DRM_MODE_DPMS_OFF;
+
+ return 0;
+
+err_disable_hclk:
+ clk_disable(vop->hclk);
+err_unprepare_aclk:
+ clk_unprepare(vop->aclk);
+err_unprepare_dclk:
+ clk_unprepare(vop->dclk);
+err_unprepare_hclk:
+ clk_unprepare(vop->hclk);
+ return ret;
+}
+
+/*
+ * Initialize the vop->win array elements.
+ */
+static void vop_win_init(struct vop *vop)
+{
+ const struct vop_data *vop_data = vop->data;
+ unsigned int i;
+
+ for (i = 0; i < vop_data->win_size; i++) {
+ struct vop_win *vop_win = &vop->win[i];
+ const struct vop_win_data *win_data = &vop_data->win[i];
+
+ vop_win->data = win_data;
+ vop_win->vop = vop;
+ INIT_LIST_HEAD(&vop_win->pending);
+ }
+}
+
+static int vop_bind(struct device *dev, struct device *master, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ const struct of_device_id *of_id;
+ const struct vop_data *vop_data;
+ struct drm_device *drm_dev = data;
+ struct vop *vop;
+ struct resource *res;
+ size_t alloc_size;
+ int ret;
+
+ of_id = of_match_device(vop_driver_dt_match, dev);
+ vop_data = of_id->data;
+ if (!vop_data)
+ return -ENODEV;
+
+ /* Allocate vop struct and its vop_win array */
+ alloc_size = sizeof(*vop) + sizeof(*vop->win) * vop_data->win_size;
+ vop = devm_kzalloc(dev, alloc_size, GFP_KERNEL);
+ if (!vop)
+ return -ENOMEM;
+
+ vop->dev = dev;
+ vop->data = vop_data;
+ vop->drm_dev = drm_dev;
+ dev_set_drvdata(dev, vop);
+
+ vop_win_init(vop);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ vop->len = resource_size(res);
+ vop->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(vop->regs))
+ return PTR_ERR(vop->regs);
+
+ vop->regsbak = devm_kzalloc(dev, vop->len, GFP_KERNEL);
+ if (!vop->regsbak)
+ return -ENOMEM;
+
+ ret = vop_initial(vop);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "cannot initial vop dev - err %d\n", ret);
+ return ret;
+ }
+
+ vop->irq = platform_get_irq(pdev, 0);
+ if (vop->irq < 0) {
+ dev_err(dev, "cannot find irq for vop\n");
+ return vop->irq;
+ }
+
+ spin_lock_init(&vop->reg_lock);
+ spin_lock_init(&vop->irq_lock);
+
+ mutex_init(&vop->vsync_mutex);
+
+ ret = devm_request_threaded_irq(dev, vop->irq, vop_isr, vop_isr_thread,
+ IRQF_SHARED, dev_name(dev), vop);
+ if (ret)
+ return ret;
+
+ /* IRQ is initially disabled; it gets enabled in power_on */
+ disable_irq(vop->irq);
+
+ ret = vop_create_crtc(vop);
+ if (ret)
+ return ret;
+
+ pm_runtime_enable(&pdev->dev);
+ return 0;
+}
+
+static void vop_unbind(struct device *dev, struct device *master, void *data)
+{
+ struct vop *vop = dev_get_drvdata(dev);
+
+ pm_runtime_disable(dev);
+ vop_destroy_crtc(vop);
+}
+
+static const struct component_ops vop_component_ops = {
+ .bind = vop_bind,
+ .unbind = vop_unbind,
+};
+
+static int vop_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+
+ if (!dev->of_node) {
+ dev_err(dev, "can't find vop devices\n");
+ return -ENODEV;
+ }
+
+ return component_add(dev, &vop_component_ops);
+}
+
+static int vop_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &vop_component_ops);
+
+ return 0;
+}
+
+struct platform_driver vop_platform_driver = {
+ .probe = vop_probe,
+ .remove = vop_remove,
+ .driver = {
+ .name = "rockchip-vop",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(vop_driver_dt_match),
+ },
+};
+
+module_platform_driver(vop_platform_driver);
+
+MODULE_AUTHOR("Mark Yao <mark.yao@rock-chips.com>");
+MODULE_DESCRIPTION("ROCKCHIP VOP Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
new file mode 100644
index 00000000000..63e9b3a084c
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
@@ -0,0 +1,201 @@
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Author:Mark Yao <mark.yao@rock-chips.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ROCKCHIP_DRM_VOP_H
+#define _ROCKCHIP_DRM_VOP_H
+
+/* register definition */
+#define REG_CFG_DONE 0x0000
+#define VERSION_INFO 0x0004
+#define SYS_CTRL 0x0008
+#define SYS_CTRL1 0x000c
+#define DSP_CTRL0 0x0010
+#define DSP_CTRL1 0x0014
+#define DSP_BG 0x0018
+#define MCU_CTRL 0x001c
+#define INTR_CTRL0 0x0020
+#define INTR_CTRL1 0x0024
+#define WIN0_CTRL0 0x0030
+#define WIN0_CTRL1 0x0034
+#define WIN0_COLOR_KEY 0x0038
+#define WIN0_VIR 0x003c
+#define WIN0_YRGB_MST 0x0040
+#define WIN0_CBR_MST 0x0044
+#define WIN0_ACT_INFO 0x0048
+#define WIN0_DSP_INFO 0x004c
+#define WIN0_DSP_ST 0x0050
+#define WIN0_SCL_FACTOR_YRGB 0x0054
+#define WIN0_SCL_FACTOR_CBR 0x0058
+#define WIN0_SCL_OFFSET 0x005c
+#define WIN0_SRC_ALPHA_CTRL 0x0060
+#define WIN0_DST_ALPHA_CTRL 0x0064
+#define WIN0_FADING_CTRL 0x0068
+/* win1 register */
+#define WIN1_CTRL0 0x0070
+#define WIN1_CTRL1 0x0074
+#define WIN1_COLOR_KEY 0x0078
+#define WIN1_VIR 0x007c
+#define WIN1_YRGB_MST 0x0080
+#define WIN1_CBR_MST 0x0084
+#define WIN1_ACT_INFO 0x0088
+#define WIN1_DSP_INFO 0x008c
+#define WIN1_DSP_ST 0x0090
+#define WIN1_SCL_FACTOR_YRGB 0x0094
+#define WIN1_SCL_FACTOR_CBR 0x0098
+#define WIN1_SCL_OFFSET 0x009c
+#define WIN1_SRC_ALPHA_CTRL 0x00a0
+#define WIN1_DST_ALPHA_CTRL 0x00a4
+#define WIN1_FADING_CTRL 0x00a8
+/* win2 register */
+#define WIN2_CTRL0 0x00b0
+#define WIN2_CTRL1 0x00b4
+#define WIN2_VIR0_1 0x00b8
+#define WIN2_VIR2_3 0x00bc
+#define WIN2_MST0 0x00c0
+#define WIN2_DSP_INFO0 0x00c4
+#define WIN2_DSP_ST0 0x00c8
+#define WIN2_COLOR_KEY 0x00cc
+#define WIN2_MST1 0x00d0
+#define WIN2_DSP_INFO1 0x00d4
+#define WIN2_DSP_ST1 0x00d8
+#define WIN2_SRC_ALPHA_CTRL 0x00dc
+#define WIN2_MST2 0x00e0
+#define WIN2_DSP_INFO2 0x00e4
+#define WIN2_DSP_ST2 0x00e8
+#define WIN2_DST_ALPHA_CTRL 0x00ec
+#define WIN2_MST3 0x00f0
+#define WIN2_DSP_INFO3 0x00f4
+#define WIN2_DSP_ST3 0x00f8
+#define WIN2_FADING_CTRL 0x00fc
+/* win3 register */
+#define WIN3_CTRL0 0x0100
+#define WIN3_CTRL1 0x0104
+#define WIN3_VIR0_1 0x0108
+#define WIN3_VIR2_3 0x010c
+#define WIN3_MST0 0x0110
+#define WIN3_DSP_INFO0 0x0114
+#define WIN3_DSP_ST0 0x0118
+#define WIN3_COLOR_KEY 0x011c
+#define WIN3_MST1 0x0120
+#define WIN3_DSP_INFO1 0x0124
+#define WIN3_DSP_ST1 0x0128
+#define WIN3_SRC_ALPHA_CTRL 0x012c
+#define WIN3_MST2 0x0130
+#define WIN3_DSP_INFO2 0x0134
+#define WIN3_DSP_ST2 0x0138
+#define WIN3_DST_ALPHA_CTRL 0x013c
+#define WIN3_MST3 0x0140
+#define WIN3_DSP_INFO3 0x0144
+#define WIN3_DSP_ST3 0x0148
+#define WIN3_FADING_CTRL 0x014c
+/* hwc register */
+#define HWC_CTRL0 0x0150
+#define HWC_CTRL1 0x0154
+#define HWC_MST 0x0158
+#define HWC_DSP_ST 0x015c
+#define HWC_SRC_ALPHA_CTRL 0x0160
+#define HWC_DST_ALPHA_CTRL 0x0164
+#define HWC_FADING_CTRL 0x0168
+/* post process register */
+#define POST_DSP_HACT_INFO 0x0170
+#define POST_DSP_VACT_INFO 0x0174
+#define POST_SCL_FACTOR_YRGB 0x0178
+#define POST_SCL_CTRL 0x0180
+#define POST_DSP_VACT_INFO_F1 0x0184
+#define DSP_HTOTAL_HS_END 0x0188
+#define DSP_HACT_ST_END 0x018c
+#define DSP_VTOTAL_VS_END 0x0190
+#define DSP_VACT_ST_END 0x0194
+#define DSP_VS_ST_END_F1 0x0198
+#define DSP_VACT_ST_END_F1 0x019c
+/* register definition end */
+
+/* interrupt define */
+#define DSP_HOLD_VALID_INTR (1 << 0)
+#define FS_INTR (1 << 1)
+#define LINE_FLAG_INTR (1 << 2)
+#define BUS_ERROR_INTR (1 << 3)
+
+#define INTR_MASK (DSP_HOLD_VALID_INTR | FS_INTR | \
+ LINE_FLAG_INTR | BUS_ERROR_INTR)
+
+#define DSP_HOLD_VALID_INTR_EN(x) ((x) << 4)
+#define FS_INTR_EN(x) ((x) << 5)
+#define LINE_FLAG_INTR_EN(x) ((x) << 6)
+#define BUS_ERROR_INTR_EN(x) ((x) << 7)
+#define DSP_HOLD_VALID_INTR_MASK (1 << 4)
+#define FS_INTR_MASK (1 << 5)
+#define LINE_FLAG_INTR_MASK (1 << 6)
+#define BUS_ERROR_INTR_MASK (1 << 7)
+
+#define INTR_CLR_SHIFT 8
+#define DSP_HOLD_VALID_INTR_CLR (1 << (INTR_CLR_SHIFT + 0))
+#define FS_INTR_CLR (1 << (INTR_CLR_SHIFT + 1))
+#define LINE_FLAG_INTR_CLR (1 << (INTR_CLR_SHIFT + 2))
+#define BUS_ERROR_INTR_CLR (1 << (INTR_CLR_SHIFT + 3))
+
+#define DSP_LINE_NUM(x) (((x) & 0x1fff) << 12)
+#define DSP_LINE_NUM_MASK (0x1fff << 12)
+
+/* src alpha ctrl define */
+#define SRC_FADING_VALUE(x) (((x) & 0xff) << 24)
+#define SRC_GLOBAL_ALPHA(x) (((x) & 0xff) << 16)
+#define SRC_FACTOR_M0(x) (((x) & 0x7) << 6)
+#define SRC_ALPHA_CAL_M0(x) (((x) & 0x1) << 5)
+#define SRC_BLEND_M0(x) (((x) & 0x3) << 3)
+#define SRC_ALPHA_M0(x) (((x) & 0x1) << 2)
+#define SRC_COLOR_M0(x) (((x) & 0x1) << 1)
+#define SRC_ALPHA_EN(x) (((x) & 0x1) << 0)
+/* dst alpha ctrl define */
+#define DST_FACTOR_M0(x) (((x) & 0x7) << 6)
+
+/*
+ * display output interface supported by rockchip lcdc
+ */
+#define ROCKCHIP_OUT_MODE_P888 0
+#define ROCKCHIP_OUT_MODE_P666 1
+#define ROCKCHIP_OUT_MODE_P565 2
+/* for use special outface */
+#define ROCKCHIP_OUT_MODE_AAAA 15
+
+enum alpha_mode {
+ ALPHA_STRAIGHT,
+ ALPHA_INVERSE,
+};
+
+enum global_blend_mode {
+ ALPHA_GLOBAL,
+ ALPHA_PER_PIX,
+ ALPHA_PER_PIX_GLOBAL,
+};
+
+enum alpha_cal_mode {
+ ALPHA_SATURATION,
+ ALPHA_NO_SATURATION,
+};
+
+enum color_mode {
+ ALPHA_SRC_PRE_MUL,
+ ALPHA_SRC_NO_PRE_MUL,
+};
+
+enum factor_mode {
+ ALPHA_ZERO,
+ ALPHA_ONE,
+ ALPHA_SRC,
+ ALPHA_SRC_INVERSE,
+ ALPHA_SRC_GLOBAL,
+};
+
+#endif /* _ROCKCHIP_DRM_VOP_H */
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
index 0ddce4d046d..859ccb65860 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
@@ -19,6 +19,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_plane_helper.h>
#include <video/sh_mobile_meram.h>
diff --git a/drivers/gpu/drm/sti/Kconfig b/drivers/gpu/drm/sti/Kconfig
index ae8850f3e63..d6d6b705b8c 100644
--- a/drivers/gpu/drm/sti/Kconfig
+++ b/drivers/gpu/drm/sti/Kconfig
@@ -5,6 +5,7 @@ config DRM_STI
select DRM_KMS_HELPER
select DRM_GEM_CMA_HELPER
select DRM_KMS_CMA_HELPER
+ select FW_LOADER_USER_HELPER_FALLBACK
help
Choose this option to enable DRM on STM stiH41x chipset
diff --git a/drivers/gpu/drm/sti/Makefile b/drivers/gpu/drm/sti/Makefile
index 04ac2ceef27..6ba9d27c1b9 100644
--- a/drivers/gpu/drm/sti/Makefile
+++ b/drivers/gpu/drm/sti/Makefile
@@ -3,6 +3,7 @@ sticompositor-y := \
sti_mixer.o \
sti_gdp.o \
sti_vid.o \
+ sti_cursor.o \
sti_compositor.o \
sti_drm_crtc.o \
sti_drm_plane.o
@@ -18,4 +19,5 @@ obj-$(CONFIG_DRM_STI) = \
sti_hda.o \
sti_tvout.o \
sticompositor.o \
- sti_drm_drv.o \ No newline at end of file
+ sti_hqvdp.o \
+ sti_drm_drv.o
diff --git a/drivers/gpu/drm/sti/sti_compositor.c b/drivers/gpu/drm/sti/sti_compositor.c
index 9e31dfe154e..43215d3020f 100644
--- a/drivers/gpu/drm/sti/sti_compositor.c
+++ b/drivers/gpu/drm/sti/sti_compositor.c
@@ -24,14 +24,16 @@
* stiH407 compositor properties
*/
struct sti_compositor_data stih407_compositor_data = {
- .nb_subdev = 6,
+ .nb_subdev = 8,
.subdev_desc = {
+ {STI_CURSOR_SUBDEV, (int)STI_CURSOR, 0x000},
{STI_GPD_SUBDEV, (int)STI_GDP_0, 0x100},
{STI_GPD_SUBDEV, (int)STI_GDP_1, 0x200},
{STI_GPD_SUBDEV, (int)STI_GDP_2, 0x300},
{STI_GPD_SUBDEV, (int)STI_GDP_3, 0x400},
{STI_VID_SUBDEV, (int)STI_VID_0, 0x700},
- {STI_MIXER_MAIN_SUBDEV, STI_MIXER_MAIN, 0xC00}
+ {STI_MIXER_MAIN_SUBDEV, STI_MIXER_MAIN, 0xC00},
+ {STI_MIXER_AUX_SUBDEV, STI_MIXER_AUX, 0xD00},
},
};
@@ -67,11 +69,11 @@ static int sti_compositor_init_subdev(struct sti_compositor *compo,
break;
case STI_GPD_SUBDEV:
case STI_VID_SUBDEV:
+ case STI_CURSOR_SUBDEV:
compo->layer[layer_id++] =
sti_layer_create(compo->dev, desc[i].id,
compo->regs + desc[i].offset);
break;
- /* case STI_CURSOR_SUBDEV : TODO */
default:
DRM_ERROR("Unknow subdev compoment type\n");
return 1;
@@ -102,33 +104,35 @@ static int sti_compositor_bind(struct device *dev, struct device *master,
enum sti_layer_type type = desc & STI_LAYER_TYPE_MASK;
enum drm_plane_type plane_type = DRM_PLANE_TYPE_OVERLAY;
- if (compo->mixer[crtc])
+ if (crtc < compo->nb_mixers)
plane_type = DRM_PLANE_TYPE_PRIMARY;
switch (type) {
case STI_CUR:
cursor = sti_drm_plane_init(drm_dev,
compo->layer[i],
- (1 << crtc) - 1,
- DRM_PLANE_TYPE_CURSOR);
+ 1, DRM_PLANE_TYPE_CURSOR);
break;
case STI_GDP:
case STI_VID:
primary = sti_drm_plane_init(drm_dev,
compo->layer[i],
- (1 << crtc) - 1, plane_type);
+ (1 << compo->nb_mixers) - 1,
+ plane_type);
plane++;
break;
case STI_BCK:
+ case STI_VDP:
break;
}
/* The first planes are reserved for primary planes*/
- if (compo->mixer[crtc]) {
+ if (crtc < compo->nb_mixers && primary) {
sti_drm_crtc_init(drm_dev, compo->mixer[crtc],
primary, cursor);
crtc++;
cursor = NULL;
+ primary = NULL;
}
}
}
diff --git a/drivers/gpu/drm/sti/sti_compositor.h b/drivers/gpu/drm/sti/sti_compositor.h
index 3ea19db72e0..019eb44c62c 100644
--- a/drivers/gpu/drm/sti/sti_compositor.h
+++ b/drivers/gpu/drm/sti/sti_compositor.h
@@ -64,7 +64,6 @@ struct sti_compositor_data {
* @layer: array of layers
* @nb_mixers: number of mixers for this compositor
* @nb_layers: number of layers (GDP,VID,...) for this compositor
- * @enable: true if compositor is enable else false
* @vtg_vblank_nb: callback for VTG VSYNC notification
*/
struct sti_compositor {
@@ -83,7 +82,6 @@ struct sti_compositor {
struct sti_layer *layer[STI_MAX_LAYER];
int nb_mixers;
int nb_layers;
- bool enable;
struct notifier_block vtg_vblank_nb;
};
diff --git a/drivers/gpu/drm/sti/sti_cursor.c b/drivers/gpu/drm/sti/sti_cursor.c
new file mode 100644
index 00000000000..010eaee60bf
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_cursor.c
@@ -0,0 +1,242 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Authors: Vincent Abriou <vincent.abriou@st.com>
+ * Fabien Dessenne <fabien.dessenne@st.com>
+ * for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+#include <drm/drmP.h>
+
+#include "sti_cursor.h"
+#include "sti_layer.h"
+#include "sti_vtg.h"
+
+/* Registers */
+#define CUR_CTL 0x00
+#define CUR_VPO 0x0C
+#define CUR_PML 0x14
+#define CUR_PMP 0x18
+#define CUR_SIZE 0x1C
+#define CUR_CML 0x20
+#define CUR_AWS 0x28
+#define CUR_AWE 0x2C
+
+#define CUR_CTL_CLUT_UPDATE BIT(1)
+
+#define STI_CURS_MIN_SIZE 1
+#define STI_CURS_MAX_SIZE 128
+
+/*
+ * pixmap dma buffer stucture
+ *
+ * @paddr: physical address
+ * @size: buffer size
+ * @base: virtual address
+ */
+struct dma_pixmap {
+ dma_addr_t paddr;
+ size_t size;
+ void *base;
+};
+
+/**
+ * STI Cursor structure
+ *
+ * @layer: layer structure
+ * @width: cursor width
+ * @height: cursor height
+ * @clut: color look up table
+ * @clut_paddr: color look up table physical address
+ * @pixmap: pixmap dma buffer (clut8-format cursor)
+ */
+struct sti_cursor {
+ struct sti_layer layer;
+ unsigned int width;
+ unsigned int height;
+ unsigned short *clut;
+ dma_addr_t clut_paddr;
+ struct dma_pixmap pixmap;
+};
+
+static const uint32_t cursor_supported_formats[] = {
+ DRM_FORMAT_ARGB8888,
+};
+
+#define to_sti_cursor(x) container_of(x, struct sti_cursor, layer)
+
+static const uint32_t *sti_cursor_get_formats(struct sti_layer *layer)
+{
+ return cursor_supported_formats;
+}
+
+static unsigned int sti_cursor_get_nb_formats(struct sti_layer *layer)
+{
+ return ARRAY_SIZE(cursor_supported_formats);
+}
+
+static void sti_cursor_argb8888_to_clut8(struct sti_layer *layer)
+{
+ struct sti_cursor *cursor = to_sti_cursor(layer);
+ u32 *src = layer->vaddr;
+ u8 *dst = cursor->pixmap.base;
+ unsigned int i, j;
+ u32 a, r, g, b;
+
+ for (i = 0; i < cursor->height; i++) {
+ for (j = 0; j < cursor->width; j++) {
+ /* Pick the 2 higher bits of each component */
+ a = (*src >> 30) & 3;
+ r = (*src >> 22) & 3;
+ g = (*src >> 14) & 3;
+ b = (*src >> 6) & 3;
+ *dst = a << 6 | r << 4 | g << 2 | b;
+ src++;
+ dst++;
+ }
+ }
+}
+
+static int sti_cursor_prepare_layer(struct sti_layer *layer, bool first_prepare)
+{
+ struct sti_cursor *cursor = to_sti_cursor(layer);
+ struct drm_display_mode *mode = layer->mode;
+ u32 y, x;
+ u32 val;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ dev_dbg(layer->dev, "%s %s\n", __func__, sti_layer_to_str(layer));
+
+ if (layer->src_w < STI_CURS_MIN_SIZE ||
+ layer->src_h < STI_CURS_MIN_SIZE ||
+ layer->src_w > STI_CURS_MAX_SIZE ||
+ layer->src_h > STI_CURS_MAX_SIZE) {
+ DRM_ERROR("Invalid cursor size (%dx%d)\n",
+ layer->src_w, layer->src_h);
+ return -EINVAL;
+ }
+
+ /* If the cursor size has changed, re-allocated the pixmap */
+ if (!cursor->pixmap.base ||
+ (cursor->width != layer->src_w) ||
+ (cursor->height != layer->src_h)) {
+ cursor->width = layer->src_w;
+ cursor->height = layer->src_h;
+
+ if (cursor->pixmap.base)
+ dma_free_writecombine(layer->dev,
+ cursor->pixmap.size,
+ cursor->pixmap.base,
+ cursor->pixmap.paddr);
+
+ cursor->pixmap.size = cursor->width * cursor->height;
+
+ cursor->pixmap.base = dma_alloc_writecombine(layer->dev,
+ cursor->pixmap.size,
+ &cursor->pixmap.paddr,
+ GFP_KERNEL | GFP_DMA);
+ if (!cursor->pixmap.base) {
+ DRM_ERROR("Failed to allocate memory for pixmap\n");
+ return -ENOMEM;
+ }
+ }
+
+ /* Convert ARGB8888 to CLUT8 */
+ sti_cursor_argb8888_to_clut8(layer);
+
+ /* AWS and AWE depend on the mode */
+ y = sti_vtg_get_line_number(*mode, 0);
+ x = sti_vtg_get_pixel_number(*mode, 0);
+ val = y << 16 | x;
+ writel(val, layer->regs + CUR_AWS);
+ y = sti_vtg_get_line_number(*mode, mode->vdisplay - 1);
+ x = sti_vtg_get_pixel_number(*mode, mode->hdisplay - 1);
+ val = y << 16 | x;
+ writel(val, layer->regs + CUR_AWE);
+
+ if (first_prepare) {
+ /* Set and fetch CLUT */
+ writel(cursor->clut_paddr, layer->regs + CUR_CML);
+ writel(CUR_CTL_CLUT_UPDATE, layer->regs + CUR_CTL);
+ }
+
+ return 0;
+}
+
+static int sti_cursor_commit_layer(struct sti_layer *layer)
+{
+ struct sti_cursor *cursor = to_sti_cursor(layer);
+ struct drm_display_mode *mode = layer->mode;
+ u32 ydo, xdo;
+
+ dev_dbg(layer->dev, "%s %s\n", __func__, sti_layer_to_str(layer));
+
+ /* Set memory location, size, and position */
+ writel(cursor->pixmap.paddr, layer->regs + CUR_PML);
+ writel(cursor->width, layer->regs + CUR_PMP);
+ writel(cursor->height << 16 | cursor->width, layer->regs + CUR_SIZE);
+
+ ydo = sti_vtg_get_line_number(*mode, layer->dst_y);
+ xdo = sti_vtg_get_pixel_number(*mode, layer->dst_y);
+ writel((ydo << 16) | xdo, layer->regs + CUR_VPO);
+
+ return 0;
+}
+
+static int sti_cursor_disable_layer(struct sti_layer *layer)
+{
+ return 0;
+}
+
+static void sti_cursor_init(struct sti_layer *layer)
+{
+ struct sti_cursor *cursor = to_sti_cursor(layer);
+ unsigned short *base = cursor->clut;
+ unsigned int a, r, g, b;
+
+ /* Assign CLUT values, ARGB444 format */
+ for (a = 0; a < 4; a++)
+ for (r = 0; r < 4; r++)
+ for (g = 0; g < 4; g++)
+ for (b = 0; b < 4; b++)
+ *base++ = (a * 5) << 12 |
+ (r * 5) << 8 |
+ (g * 5) << 4 |
+ (b * 5);
+}
+
+static const struct sti_layer_funcs cursor_ops = {
+ .get_formats = sti_cursor_get_formats,
+ .get_nb_formats = sti_cursor_get_nb_formats,
+ .init = sti_cursor_init,
+ .prepare = sti_cursor_prepare_layer,
+ .commit = sti_cursor_commit_layer,
+ .disable = sti_cursor_disable_layer,
+};
+
+struct sti_layer *sti_cursor_create(struct device *dev)
+{
+ struct sti_cursor *cursor;
+
+ cursor = devm_kzalloc(dev, sizeof(*cursor), GFP_KERNEL);
+ if (!cursor) {
+ DRM_ERROR("Failed to allocate memory for cursor\n");
+ return NULL;
+ }
+
+ /* Allocate clut buffer */
+ cursor->clut = dma_alloc_writecombine(dev,
+ 0x100 * sizeof(unsigned short),
+ &cursor->clut_paddr,
+ GFP_KERNEL | GFP_DMA);
+
+ if (!cursor->clut) {
+ DRM_ERROR("Failed to allocate memory for cursor clut\n");
+ devm_kfree(dev, cursor);
+ return NULL;
+ }
+
+ cursor->layer.ops = &cursor_ops;
+
+ return (struct sti_layer *)cursor;
+}
diff --git a/drivers/gpu/drm/sti/sti_cursor.h b/drivers/gpu/drm/sti/sti_cursor.h
new file mode 100644
index 00000000000..3c9827404f2
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_cursor.h
@@ -0,0 +1,12 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2013
+ * Authors: Vincent Abriou <vincent.abriou@st.com> for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#ifndef _STI_CURSOR_H_
+#define _STI_CURSOR_H_
+
+struct sti_layer *sti_cursor_create(struct device *dev);
+
+#endif
diff --git a/drivers/gpu/drm/sti/sti_drm_crtc.c b/drivers/gpu/drm/sti/sti_drm_crtc.c
index d2ae0c0e13b..4c651c200f2 100644
--- a/drivers/gpu/drm/sti/sti_drm_crtc.c
+++ b/drivers/gpu/drm/sti/sti_drm_crtc.c
@@ -10,6 +10,7 @@
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_plane_helper.h>
#include "sti_compositor.h"
#include "sti_drm_drv.h"
@@ -27,7 +28,7 @@ static void sti_drm_crtc_prepare(struct drm_crtc *crtc)
struct device *dev = mixer->dev;
struct sti_compositor *compo = dev_get_drvdata(dev);
- compo->enable = true;
+ mixer->enabled = true;
/* Prepare and enable the compo IP clock */
if (mixer->id == STI_MIXER_MAIN) {
@@ -37,6 +38,8 @@ static void sti_drm_crtc_prepare(struct drm_crtc *crtc)
if (clk_prepare_enable(compo->clk_compo_aux))
DRM_INFO("Failed to prepare/enable compo_aux clk\n");
}
+
+ sti_mixer_clear_all_layers(mixer);
}
static void sti_drm_crtc_commit(struct drm_crtc *crtc)
@@ -61,6 +64,8 @@ static void sti_drm_crtc_commit(struct drm_crtc *crtc)
/* Enable layer on mixer */
if (sti_mixer_set_layer_status(mixer, layer, true))
DRM_ERROR("Can not enable layer at mixer\n");
+
+ drm_crtc_vblank_on(crtc);
}
static bool sti_drm_crtc_mode_fixup(struct drm_crtc *crtc,
@@ -143,7 +148,8 @@ sti_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
w = crtc->primary->fb->width - x;
h = crtc->primary->fb->height - y;
- return sti_layer_prepare(layer, crtc->primary->fb, &crtc->mode,
+ return sti_layer_prepare(layer, crtc,
+ crtc->primary->fb, &crtc->mode,
mixer->id, 0, 0, w, h, x, y, w, h);
}
@@ -170,7 +176,8 @@ static int sti_drm_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
w = crtc->primary->fb->width - crtc->x;
h = crtc->primary->fb->height - crtc->y;
- ret = sti_layer_prepare(layer, crtc->primary->fb, &crtc->mode,
+ ret = sti_layer_prepare(layer, crtc,
+ crtc->primary->fb, &crtc->mode,
mixer->id, 0, 0, w, h,
crtc->x, crtc->y, w, h);
if (ret) {
@@ -195,7 +202,7 @@ static void sti_drm_crtc_disable(struct drm_crtc *crtc)
struct sti_compositor *compo = dev_get_drvdata(dev);
struct sti_layer *layer;
- if (!compo->enable)
+ if (!mixer->enabled)
return;
DRM_DEBUG_KMS("CRTC:%d (%s)\n", crtc->base.id, sti_mixer_to_str(mixer));
@@ -221,7 +228,7 @@ static void sti_drm_crtc_disable(struct drm_crtc *crtc)
/* Then disable layer itself */
sti_layer_disable(layer);
- drm_vblank_off(crtc->dev, mixer->id);
+ drm_crtc_vblank_off(crtc);
/* Disable pixel clock and compo IP clocks */
if (mixer->id == STI_MIXER_MAIN) {
@@ -232,7 +239,7 @@ static void sti_drm_crtc_disable(struct drm_crtc *crtc)
clk_disable_unprepare(compo->clk_compo_aux);
}
- compo->enable = false;
+ mixer->enabled = false;
}
static struct drm_crtc_helper_funcs sti_crtc_helper_funcs = {
@@ -363,7 +370,6 @@ void sti_drm_crtc_disable_vblank(struct drm_device *dev, int crtc)
struct sti_drm_private *priv = dev->dev_private;
struct sti_compositor *compo = priv->compo;
struct notifier_block *vtg_vblank_nb = &compo->vtg_vblank_nb;
- unsigned long flags;
DRM_DEBUG_DRIVER("\n");
@@ -372,13 +378,10 @@ void sti_drm_crtc_disable_vblank(struct drm_device *dev, int crtc)
DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n");
/* free the resources of the pending requests */
- spin_lock_irqsave(&dev->event_lock, flags);
if (compo->mixer[crtc]->pending_event) {
drm_vblank_put(dev, crtc);
compo->mixer[crtc]->pending_event = NULL;
}
- spin_unlock_irqrestore(&dev->event_lock, flags);
-
}
EXPORT_SYMBOL(sti_drm_crtc_disable_vblank);
@@ -398,6 +401,7 @@ bool sti_drm_crtc_is_main(struct drm_crtc *crtc)
return false;
}
+EXPORT_SYMBOL(sti_drm_crtc_is_main);
int sti_drm_crtc_init(struct drm_device *drm_dev, struct sti_mixer *mixer,
struct drm_plane *primary, struct drm_plane *cursor)
diff --git a/drivers/gpu/drm/sti/sti_drm_drv.c b/drivers/gpu/drm/sti/sti_drm_drv.c
index 8e64220e879..5239fa12172 100644
--- a/drivers/gpu/drm/sti/sti_drm_drv.c
+++ b/drivers/gpu/drm/sti/sti_drm_drv.c
@@ -67,8 +67,12 @@ static int sti_drm_load(struct drm_device *dev, unsigned long flags)
sti_drm_mode_config_init(dev);
ret = component_bind_all(dev->dev, dev);
- if (ret)
+ if (ret) {
+ drm_kms_helper_poll_fini(dev);
+ drm_mode_config_cleanup(dev);
+ kfree(private);
return ret;
+ }
drm_helper_disable_unused_functions(dev);
diff --git a/drivers/gpu/drm/sti/sti_drm_plane.c b/drivers/gpu/drm/sti/sti_drm_plane.c
index f4118d4cac2..bb6a29339e1 100644
--- a/drivers/gpu/drm/sti/sti_drm_plane.c
+++ b/drivers/gpu/drm/sti/sti_drm_plane.c
@@ -45,7 +45,8 @@ sti_drm_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
}
/* src_x are in 16.16 format. */
- res = sti_layer_prepare(layer, fb, &crtc->mode, mixer->id,
+ res = sti_layer_prepare(layer, crtc, fb,
+ &crtc->mode, mixer->id,
crtc_x, crtc_y, crtc_w, crtc_h,
src_x >> 16, src_y >> 16,
src_w >> 16, src_h >> 16);
@@ -193,3 +194,4 @@ struct drm_plane *sti_drm_plane_init(struct drm_device *dev,
return &layer->plane;
}
+EXPORT_SYMBOL(sti_drm_plane_init);
diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c
index 4e30b74559f..32448d1d1e8 100644
--- a/drivers/gpu/drm/sti/sti_gdp.c
+++ b/drivers/gpu/drm/sti/sti_gdp.c
@@ -73,7 +73,9 @@ struct sti_gdp_node {
struct sti_gdp_node_list {
struct sti_gdp_node *top_field;
+ dma_addr_t top_field_paddr;
struct sti_gdp_node *btm_field;
+ dma_addr_t btm_field_paddr;
};
/**
@@ -81,6 +83,8 @@ struct sti_gdp_node_list {
*
* @layer: layer structure
* @clk_pix: pixel clock for the current gdp
+ * @clk_main_parent: gdp parent clock if main path used
+ * @clk_aux_parent: gdp parent clock if aux path used
* @vtg_field_nb: callback for VTG FIELD (top or bottom) notification
* @is_curr_top: true if the current node processed is the top field
* @node_list: array of node list
@@ -88,6 +92,8 @@ struct sti_gdp_node_list {
struct sti_gdp {
struct sti_layer layer;
struct clk *clk_pix;
+ struct clk *clk_main_parent;
+ struct clk *clk_aux_parent;
struct notifier_block vtg_field_nb;
bool is_curr_top;
struct sti_gdp_node_list node_list[GDP_NODE_NB_BANK];
@@ -168,7 +174,6 @@ static int sti_gdp_get_alpharange(int format)
static struct sti_gdp_node_list *sti_gdp_get_free_nodes(struct sti_layer *layer)
{
int hw_nvn;
- void *virt_nvn;
struct sti_gdp *gdp = to_sti_gdp(layer);
unsigned int i;
@@ -176,11 +181,9 @@ static struct sti_gdp_node_list *sti_gdp_get_free_nodes(struct sti_layer *layer)
if (!hw_nvn)
goto end;
- virt_nvn = dma_to_virt(layer->dev, (dma_addr_t) hw_nvn);
-
for (i = 0; i < GDP_NODE_NB_BANK; i++)
- if ((virt_nvn != gdp->node_list[i].btm_field) &&
- (virt_nvn != gdp->node_list[i].top_field))
+ if ((hw_nvn != gdp->node_list[i].btm_field_paddr) &&
+ (hw_nvn != gdp->node_list[i].top_field_paddr))
return &gdp->node_list[i];
/* in hazardious cases restart with the first node */
@@ -204,7 +207,6 @@ static
struct sti_gdp_node_list *sti_gdp_get_current_nodes(struct sti_layer *layer)
{
int hw_nvn;
- void *virt_nvn;
struct sti_gdp *gdp = to_sti_gdp(layer);
unsigned int i;
@@ -212,11 +214,9 @@ struct sti_gdp_node_list *sti_gdp_get_current_nodes(struct sti_layer *layer)
if (!hw_nvn)
goto end;
- virt_nvn = dma_to_virt(layer->dev, (dma_addr_t) hw_nvn);
-
for (i = 0; i < GDP_NODE_NB_BANK; i++)
- if ((virt_nvn == gdp->node_list[i].btm_field) ||
- (virt_nvn == gdp->node_list[i].top_field))
+ if ((hw_nvn == gdp->node_list[i].btm_field_paddr) ||
+ (hw_nvn == gdp->node_list[i].top_field_paddr))
return &gdp->node_list[i];
end:
@@ -292,8 +292,8 @@ static int sti_gdp_prepare_layer(struct sti_layer *layer, bool first_prepare)
/* Same content and chained together */
memcpy(btm_field, top_field, sizeof(*btm_field));
- top_field->gam_gdp_nvn = virt_to_dma(dev, btm_field);
- btm_field->gam_gdp_nvn = virt_to_dma(dev, top_field);
+ top_field->gam_gdp_nvn = list->btm_field_paddr;
+ btm_field->gam_gdp_nvn = list->top_field_paddr;
/* Interlaced mode */
if (layer->mode->flags & DRM_MODE_FLAG_INTERLACE)
@@ -311,6 +311,17 @@ static int sti_gdp_prepare_layer(struct sti_layer *layer, bool first_prepare)
/* Set and enable gdp clock */
if (gdp->clk_pix) {
+ struct clk *clkp;
+ /* According to the mixer used, the gdp pixel clock
+ * should have a different parent clock. */
+ if (layer->mixer_id == STI_MIXER_MAIN)
+ clkp = gdp->clk_main_parent;
+ else
+ clkp = gdp->clk_aux_parent;
+
+ if (clkp)
+ clk_set_parent(gdp->clk_pix, clkp);
+
res = clk_set_rate(gdp->clk_pix, rate);
if (res < 0) {
DRM_ERROR("Cannot set rate (%dHz) for gdp\n",
@@ -349,8 +360,8 @@ static int sti_gdp_commit_layer(struct sti_layer *layer)
struct sti_gdp_node *updated_top_node = updated_list->top_field;
struct sti_gdp_node *updated_btm_node = updated_list->btm_field;
struct sti_gdp *gdp = to_sti_gdp(layer);
- u32 dma_updated_top = virt_to_dma(layer->dev, updated_top_node);
- u32 dma_updated_btm = virt_to_dma(layer->dev, updated_btm_node);
+ u32 dma_updated_top = updated_list->top_field_paddr;
+ u32 dma_updated_btm = updated_list->btm_field_paddr;
struct sti_gdp_node_list *curr_list = sti_gdp_get_current_nodes(layer);
dev_dbg(layer->dev, "%s %s top/btm_node:0x%p/0x%p\n", __func__,
@@ -461,16 +472,16 @@ static void sti_gdp_init(struct sti_layer *layer)
{
struct sti_gdp *gdp = to_sti_gdp(layer);
struct device_node *np = layer->dev->of_node;
- dma_addr_t dma;
+ dma_addr_t dma_addr;
void *base;
unsigned int i, size;
/* Allocate all the nodes within a single memory page */
size = sizeof(struct sti_gdp_node) *
GDP_NODE_PER_FIELD * GDP_NODE_NB_BANK;
-
base = dma_alloc_writecombine(layer->dev,
- size, &dma, GFP_KERNEL | GFP_DMA);
+ size, &dma_addr, GFP_KERNEL | GFP_DMA);
+
if (!base) {
DRM_ERROR("Failed to allocate memory for GDP node\n");
return;
@@ -478,21 +489,26 @@ static void sti_gdp_init(struct sti_layer *layer)
memset(base, 0, size);
for (i = 0; i < GDP_NODE_NB_BANK; i++) {
- if (virt_to_dma(layer->dev, base) & 0xF) {
+ if (dma_addr & 0xF) {
DRM_ERROR("Mem alignment failed\n");
return;
}
gdp->node_list[i].top_field = base;
+ gdp->node_list[i].top_field_paddr = dma_addr;
+
DRM_DEBUG_DRIVER("node[%d].top_field=%p\n", i, base);
base += sizeof(struct sti_gdp_node);
+ dma_addr += sizeof(struct sti_gdp_node);
- if (virt_to_dma(layer->dev, base) & 0xF) {
+ if (dma_addr & 0xF) {
DRM_ERROR("Mem alignment failed\n");
return;
}
gdp->node_list[i].btm_field = base;
+ gdp->node_list[i].btm_field_paddr = dma_addr;
DRM_DEBUG_DRIVER("node[%d].btm_field=%p\n", i, base);
base += sizeof(struct sti_gdp_node);
+ dma_addr += sizeof(struct sti_gdp_node);
}
if (of_device_is_compatible(np, "st,stih407-compositor")) {
@@ -520,6 +536,14 @@ static void sti_gdp_init(struct sti_layer *layer)
gdp->clk_pix = devm_clk_get(layer->dev, clk_name);
if (IS_ERR(gdp->clk_pix))
DRM_ERROR("Cannot get %s clock\n", clk_name);
+
+ gdp->clk_main_parent = devm_clk_get(layer->dev, "main_parent");
+ if (IS_ERR(gdp->clk_main_parent))
+ DRM_ERROR("Cannot get main_parent clock\n");
+
+ gdp->clk_aux_parent = devm_clk_get(layer->dev, "aux_parent");
+ if (IS_ERR(gdp->clk_aux_parent))
+ DRM_ERROR("Cannot get aux_parent clock\n");
}
}
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
index b22968c08d1..d032e024b0b 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.c
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -130,8 +130,7 @@ static irqreturn_t hdmi_irq_thread(int irq, void *arg)
/* Hot plug/unplug IRQ */
if (hdmi->irq_status & HDMI_INT_HOT_PLUG) {
- /* read gpio to get the status */
- hdmi->hpd = gpio_get_value(hdmi->hpd_gpio);
+ hdmi->hpd = readl(hdmi->regs + HDMI_STA) & HDMI_STA_HOT_PLUG;
if (hdmi->drm_dev)
drm_helper_hpd_irq_event(hdmi->drm_dev);
}
@@ -273,31 +272,32 @@ static int hdmi_avi_infoframe_config(struct sti_hdmi *hdmi)
hdmi_write(hdmi, val, HDMI_SW_DI_CFG);
/* Infoframe header */
- val = buffer[0x0];
- val |= buffer[0x1] << 8;
- val |= buffer[0x2] << 16;
+ val = buffer[0];
+ val |= buffer[1] << 8;
+ val |= buffer[2] << 16;
hdmi_write(hdmi, val, HDMI_SW_DI_N_HEAD_WORD(HDMI_IFRAME_SLOT_AVI));
/* Infoframe packet bytes */
- val = frame[0x0];
- val |= frame[0x1] << 8;
- val |= frame[0x2] << 16;
- val |= frame[0x3] << 24;
+ val = buffer[3];
+ val |= *(frame++) << 8;
+ val |= *(frame++) << 16;
+ val |= *(frame++) << 24;
hdmi_write(hdmi, val, HDMI_SW_DI_N_PKT_WORD0(HDMI_IFRAME_SLOT_AVI));
- val = frame[0x4];
- val |= frame[0x5] << 8;
- val |= frame[0x6] << 16;
- val |= frame[0x7] << 24;
+ val = *(frame++);
+ val |= *(frame++) << 8;
+ val |= *(frame++) << 16;
+ val |= *(frame++) << 24;
hdmi_write(hdmi, val, HDMI_SW_DI_N_PKT_WORD1(HDMI_IFRAME_SLOT_AVI));
- val = frame[0x8];
- val |= frame[0x9] << 8;
- val |= frame[0xA] << 16;
- val |= frame[0xB] << 24;
+ val = *(frame++);
+ val |= *(frame++) << 8;
+ val |= *(frame++) << 16;
+ val |= *(frame++) << 24;
hdmi_write(hdmi, val, HDMI_SW_DI_N_PKT_WORD2(HDMI_IFRAME_SLOT_AVI));
- val = frame[0xC];
+ val = *(frame++);
+ val |= *(frame) << 8;
hdmi_write(hdmi, val, HDMI_SW_DI_N_PKT_WORD3(HDMI_IFRAME_SLOT_AVI));
/* Enable transmission slot for AVI infoframe
@@ -480,17 +480,15 @@ static const struct drm_bridge_funcs sti_hdmi_bridge_funcs = {
static int sti_hdmi_connector_get_modes(struct drm_connector *connector)
{
- struct i2c_adapter *i2c_adap;
+ struct sti_hdmi_connector *hdmi_connector
+ = to_sti_hdmi_connector(connector);
+ struct sti_hdmi *hdmi = hdmi_connector->hdmi;
struct edid *edid;
int count;
DRM_DEBUG_DRIVER("\n");
- i2c_adap = i2c_get_adapter(1);
- if (!i2c_adap)
- goto fail;
-
- edid = drm_get_edid(connector, i2c_adap);
+ edid = drm_get_edid(connector, hdmi->ddc_adapt);
if (!edid)
goto fail;
@@ -603,29 +601,38 @@ static int sti_hdmi_bind(struct device *dev, struct device *master, void *data)
struct sti_hdmi_connector *connector;
struct drm_connector *drm_connector;
struct drm_bridge *bridge;
- struct i2c_adapter *i2c_adap;
+ struct device_node *ddc;
int err;
- i2c_adap = i2c_get_adapter(1);
- if (!i2c_adap)
- return -EPROBE_DEFER;
+ ddc = of_parse_phandle(dev->of_node, "ddc", 0);
+ if (ddc) {
+ hdmi->ddc_adapt = of_find_i2c_adapter_by_node(ddc);
+ if (!hdmi->ddc_adapt) {
+ err = -EPROBE_DEFER;
+ of_node_put(ddc);
+ return err;
+ }
+
+ of_node_put(ddc);
+ }
/* Set the drm device handle */
hdmi->drm_dev = drm_dev;
encoder = sti_hdmi_find_encoder(drm_dev);
if (!encoder)
- return -ENOMEM;
+ goto err_adapt;
connector = devm_kzalloc(dev, sizeof(*connector), GFP_KERNEL);
if (!connector)
- return -ENOMEM;
+ goto err_adapt;
+
connector->hdmi = hdmi;
bridge = devm_kzalloc(dev, sizeof(*bridge), GFP_KERNEL);
if (!bridge)
- return -ENOMEM;
+ goto err_adapt;
bridge->driver_private = hdmi;
drm_bridge_init(drm_dev, bridge, &sti_hdmi_bridge_funcs);
@@ -662,6 +669,8 @@ err_sysfs:
err_connector:
drm_bridge_cleanup(bridge);
drm_connector_cleanup(drm_connector);
+err_adapt:
+ put_device(&hdmi->ddc_adapt->dev);
return -EINVAL;
}
@@ -757,13 +766,7 @@ static int sti_hdmi_probe(struct platform_device *pdev)
return PTR_ERR(hdmi->clk_audio);
}
- hdmi->hpd_gpio = of_get_named_gpio(np, "hdmi,hpd-gpio", 0);
- if (hdmi->hpd_gpio < 0) {
- DRM_ERROR("Failed to get hdmi hpd-gpio\n");
- return -EIO;
- }
-
- hdmi->hpd = gpio_get_value(hdmi->hpd_gpio);
+ hdmi->hpd = readl(hdmi->regs + HDMI_STA) & HDMI_STA_HOT_PLUG;
init_waitqueue_head(&hdmi->wait_event);
@@ -788,6 +791,11 @@ static int sti_hdmi_probe(struct platform_device *pdev)
static int sti_hdmi_remove(struct platform_device *pdev)
{
+ struct sti_hdmi *hdmi = dev_get_drvdata(&pdev->dev);
+
+ if (hdmi->ddc_adapt)
+ put_device(&hdmi->ddc_adapt->dev);
+
component_del(&pdev->dev, &sti_hdmi_ops);
return 0;
}
diff --git a/drivers/gpu/drm/sti/sti_hdmi.h b/drivers/gpu/drm/sti/sti_hdmi.h
index 61bec6557ce..3d22390e1f3 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.h
+++ b/drivers/gpu/drm/sti/sti_hdmi.h
@@ -14,6 +14,9 @@
#define HDMI_STA 0x0010
#define HDMI_STA_DLL_LCK BIT(5)
+#define HDMI_STA_HOT_PLUG_SHIFT 4
+#define HDMI_STA_HOT_PLUG (1 << HDMI_STA_HOT_PLUG_SHIFT)
+
struct sti_hdmi;
struct hdmi_phy_ops {
@@ -37,7 +40,6 @@ struct hdmi_phy_ops {
* @irq_status: interrupt status register
* @phy_ops: phy start/stop operations
* @enabled: true if hdmi is enabled else false
- * @hpd_gpio: hdmi hot plug detect gpio number
* @hpd: hot plug detect status
* @wait_event: wait event
* @event_received: wait event status
@@ -57,11 +59,11 @@ struct sti_hdmi {
u32 irq_status;
struct hdmi_phy_ops *phy_ops;
bool enabled;
- int hpd_gpio;
bool hpd;
wait_queue_head_t wait_event;
bool event_received;
struct reset_control *reset;
+ struct i2c_adapter *ddc_adapt;
};
u32 hdmi_read(struct sti_hdmi *hdmi, int offset);
diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c
new file mode 100644
index 00000000000..f3db05dab0a
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_hqvdp.c
@@ -0,0 +1,1073 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Authors: Fabien Dessenne <fabien.dessenne@st.com> for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+#include <drm/drmP.h>
+
+#include "sti_drm_plane.h"
+#include "sti_hqvdp.h"
+#include "sti_hqvdp_lut.h"
+#include "sti_layer.h"
+#include "sti_vtg.h"
+
+/* Firmware name */
+#define HQVDP_FMW_NAME "hqvdp-stih407.bin"
+
+/* Regs address */
+#define HQVDP_DMEM 0x00000000 /* 0x00000000 */
+#define HQVDP_PMEM 0x00040000 /* 0x00040000 */
+#define HQVDP_RD_PLUG 0x000E0000 /* 0x000E0000 */
+#define HQVDP_RD_PLUG_CONTROL (HQVDP_RD_PLUG + 0x1000) /* 0x000E1000 */
+#define HQVDP_RD_PLUG_PAGE_SIZE (HQVDP_RD_PLUG + 0x1004) /* 0x000E1004 */
+#define HQVDP_RD_PLUG_MIN_OPC (HQVDP_RD_PLUG + 0x1008) /* 0x000E1008 */
+#define HQVDP_RD_PLUG_MAX_OPC (HQVDP_RD_PLUG + 0x100C) /* 0x000E100C */
+#define HQVDP_RD_PLUG_MAX_CHK (HQVDP_RD_PLUG + 0x1010) /* 0x000E1010 */
+#define HQVDP_RD_PLUG_MAX_MSG (HQVDP_RD_PLUG + 0x1014) /* 0x000E1014 */
+#define HQVDP_RD_PLUG_MIN_SPACE (HQVDP_RD_PLUG + 0x1018) /* 0x000E1018 */
+#define HQVDP_WR_PLUG 0x000E2000 /* 0x000E2000 */
+#define HQVDP_WR_PLUG_CONTROL (HQVDP_WR_PLUG + 0x1000) /* 0x000E3000 */
+#define HQVDP_WR_PLUG_PAGE_SIZE (HQVDP_WR_PLUG + 0x1004) /* 0x000E3004 */
+#define HQVDP_WR_PLUG_MIN_OPC (HQVDP_WR_PLUG + 0x1008) /* 0x000E3008 */
+#define HQVDP_WR_PLUG_MAX_OPC (HQVDP_WR_PLUG + 0x100C) /* 0x000E300C */
+#define HQVDP_WR_PLUG_MAX_CHK (HQVDP_WR_PLUG + 0x1010) /* 0x000E3010 */
+#define HQVDP_WR_PLUG_MAX_MSG (HQVDP_WR_PLUG + 0x1014) /* 0x000E3014 */
+#define HQVDP_WR_PLUG_MIN_SPACE (HQVDP_WR_PLUG + 0x1018) /* 0x000E3018 */
+#define HQVDP_MBX 0x000E4000 /* 0x000E4000 */
+#define HQVDP_MBX_IRQ_TO_XP70 (HQVDP_MBX + 0x0000) /* 0x000E4000 */
+#define HQVDP_MBX_INFO_HOST (HQVDP_MBX + 0x0004) /* 0x000E4004 */
+#define HQVDP_MBX_IRQ_TO_HOST (HQVDP_MBX + 0x0008) /* 0x000E4008 */
+#define HQVDP_MBX_INFO_XP70 (HQVDP_MBX + 0x000C) /* 0x000E400C */
+#define HQVDP_MBX_SW_RESET_CTRL (HQVDP_MBX + 0x0010) /* 0x000E4010 */
+#define HQVDP_MBX_STARTUP_CTRL1 (HQVDP_MBX + 0x0014) /* 0x000E4014 */
+#define HQVDP_MBX_STARTUP_CTRL2 (HQVDP_MBX + 0x0018) /* 0x000E4018 */
+#define HQVDP_MBX_GP_STATUS (HQVDP_MBX + 0x001C) /* 0x000E401C */
+#define HQVDP_MBX_NEXT_CMD (HQVDP_MBX + 0x0020) /* 0x000E4020 */
+#define HQVDP_MBX_CURRENT_CMD (HQVDP_MBX + 0x0024) /* 0x000E4024 */
+#define HQVDP_MBX_SOFT_VSYNC (HQVDP_MBX + 0x0028) /* 0x000E4028 */
+
+/* Plugs config */
+#define PLUG_CONTROL_ENABLE 0x00000001
+#define PLUG_PAGE_SIZE_256 0x00000002
+#define PLUG_MIN_OPC_8 0x00000003
+#define PLUG_MAX_OPC_64 0x00000006
+#define PLUG_MAX_CHK_2X 0x00000001
+#define PLUG_MAX_MSG_1X 0x00000000
+#define PLUG_MIN_SPACE_1 0x00000000
+
+/* SW reset CTRL */
+#define SW_RESET_CTRL_FULL BIT(0)
+#define SW_RESET_CTRL_CORE BIT(1)
+
+/* Startup ctrl 1 */
+#define STARTUP_CTRL1_RST_DONE BIT(0)
+#define STARTUP_CTRL1_AUTH_IDLE BIT(2)
+
+/* Startup ctrl 2 */
+#define STARTUP_CTRL2_FETCH_EN BIT(1)
+
+/* Info xP70 */
+#define INFO_XP70_FW_READY BIT(15)
+#define INFO_XP70_FW_PROCESSING BIT(14)
+#define INFO_XP70_FW_INITQUEUES BIT(13)
+
+/* SOFT_VSYNC */
+#define SOFT_VSYNC_HW 0x00000000
+#define SOFT_VSYNC_SW_CMD 0x00000001
+#define SOFT_VSYNC_SW_CTRL_IRQ 0x00000003
+
+/* Reset & boot poll config */
+#define POLL_MAX_ATTEMPT 50
+#define POLL_DELAY_MS 20
+
+#define SCALE_FACTOR 8192
+#define SCALE_MAX_FOR_LEG_LUT_F 4096
+#define SCALE_MAX_FOR_LEG_LUT_E 4915
+#define SCALE_MAX_FOR_LEG_LUT_D 6654
+#define SCALE_MAX_FOR_LEG_LUT_C 8192
+
+enum sti_hvsrc_orient {
+ HVSRC_HORI,
+ HVSRC_VERT
+};
+
+/* Command structures */
+struct sti_hqvdp_top {
+ u32 config;
+ u32 mem_format;
+ u32 current_luma;
+ u32 current_enh_luma;
+ u32 current_right_luma;
+ u32 current_enh_right_luma;
+ u32 current_chroma;
+ u32 current_enh_chroma;
+ u32 current_right_chroma;
+ u32 current_enh_right_chroma;
+ u32 output_luma;
+ u32 output_chroma;
+ u32 luma_src_pitch;
+ u32 luma_enh_src_pitch;
+ u32 luma_right_src_pitch;
+ u32 luma_enh_right_src_pitch;
+ u32 chroma_src_pitch;
+ u32 chroma_enh_src_pitch;
+ u32 chroma_right_src_pitch;
+ u32 chroma_enh_right_src_pitch;
+ u32 luma_processed_pitch;
+ u32 chroma_processed_pitch;
+ u32 input_frame_size;
+ u32 input_viewport_ori;
+ u32 input_viewport_ori_right;
+ u32 input_viewport_size;
+ u32 left_view_border_width;
+ u32 right_view_border_width;
+ u32 left_view_3d_offset_width;
+ u32 right_view_3d_offset_width;
+ u32 side_stripe_color;
+ u32 crc_reset_ctrl;
+};
+
+/* Configs for interlaced : no IT, no pass thru, 3 fields */
+#define TOP_CONFIG_INTER_BTM 0x00000000
+#define TOP_CONFIG_INTER_TOP 0x00000002
+
+/* Config for progressive : no IT, no pass thru, 3 fields */
+#define TOP_CONFIG_PROGRESSIVE 0x00000001
+
+/* Default MemFormat: in=420_raster_dual out=444_raster;opaque Mem2Tv mode */
+#define TOP_MEM_FORMAT_DFLT 0x00018060
+
+/* Min/Max size */
+#define MAX_WIDTH 0x1FFF
+#define MAX_HEIGHT 0x0FFF
+#define MIN_WIDTH 0x0030
+#define MIN_HEIGHT 0x0010
+
+struct sti_hqvdp_vc1re {
+ u32 ctrl_prv_csdi;
+ u32 ctrl_cur_csdi;
+ u32 ctrl_nxt_csdi;
+ u32 ctrl_cur_fmd;
+ u32 ctrl_nxt_fmd;
+};
+
+struct sti_hqvdp_fmd {
+ u32 config;
+ u32 viewport_ori;
+ u32 viewport_size;
+ u32 next_next_luma;
+ u32 next_next_right_luma;
+ u32 next_next_next_luma;
+ u32 next_next_next_right_luma;
+ u32 threshold_scd;
+ u32 threshold_rfd;
+ u32 threshold_move;
+ u32 threshold_cfd;
+};
+
+struct sti_hqvdp_csdi {
+ u32 config;
+ u32 config2;
+ u32 dcdi_config;
+ u32 prev_luma;
+ u32 prev_enh_luma;
+ u32 prev_right_luma;
+ u32 prev_enh_right_luma;
+ u32 next_luma;
+ u32 next_enh_luma;
+ u32 next_right_luma;
+ u32 next_enh_right_luma;
+ u32 prev_chroma;
+ u32 prev_enh_chroma;
+ u32 prev_right_chroma;
+ u32 prev_enh_right_chroma;
+ u32 next_chroma;
+ u32 next_enh_chroma;
+ u32 next_right_chroma;
+ u32 next_enh_right_chroma;
+ u32 prev_motion;
+ u32 prev_right_motion;
+ u32 cur_motion;
+ u32 cur_right_motion;
+ u32 next_motion;
+ u32 next_right_motion;
+};
+
+/* Config for progressive: by pass */
+#define CSDI_CONFIG_PROG 0x00000000
+/* Config for directional deinterlacing without motion */
+#define CSDI_CONFIG_INTER_DIR 0x00000016
+/* Additional configs for fader, blender, motion,... deinterlace algorithms */
+#define CSDI_CONFIG2_DFLT 0x000001B3
+#define CSDI_DCDI_CONFIG_DFLT 0x00203803
+
+struct sti_hqvdp_hvsrc {
+ u32 hor_panoramic_ctrl;
+ u32 output_picture_size;
+ u32 init_horizontal;
+ u32 init_vertical;
+ u32 param_ctrl;
+ u32 yh_coef[NB_COEF];
+ u32 ch_coef[NB_COEF];
+ u32 yv_coef[NB_COEF];
+ u32 cv_coef[NB_COEF];
+ u32 hori_shift;
+ u32 vert_shift;
+};
+
+/* Default ParamCtrl: all controls enabled */
+#define HVSRC_PARAM_CTRL_DFLT 0xFFFFFFFF
+
+struct sti_hqvdp_iqi {
+ u32 config;
+ u32 demo_wind_size;
+ u32 pk_config;
+ u32 coeff0_coeff1;
+ u32 coeff2_coeff3;
+ u32 coeff4;
+ u32 pk_lut;
+ u32 pk_gain;
+ u32 pk_coring_level;
+ u32 cti_config;
+ u32 le_config;
+ u32 le_lut[64];
+ u32 con_bri;
+ u32 sat_gain;
+ u32 pxf_conf;
+ u32 default_color;
+};
+
+/* Default Config : IQI bypassed */
+#define IQI_CONFIG_DFLT 0x00000001
+/* Default Contrast & Brightness gain = 256 */
+#define IQI_CON_BRI_DFLT 0x00000100
+/* Default Saturation gain = 256 */
+#define IQI_SAT_GAIN_DFLT 0x00000100
+/* Default PxfConf : P2I bypassed */
+#define IQI_PXF_CONF_DFLT 0x00000001
+
+struct sti_hqvdp_top_status {
+ u32 processing_time;
+ u32 input_y_crc;
+ u32 input_uv_crc;
+};
+
+struct sti_hqvdp_fmd_status {
+ u32 fmd_repeat_move_status;
+ u32 fmd_scene_count_status;
+ u32 cfd_sum;
+ u32 field_sum;
+ u32 next_y_fmd_crc;
+ u32 next_next_y_fmd_crc;
+ u32 next_next_next_y_fmd_crc;
+};
+
+struct sti_hqvdp_csdi_status {
+ u32 prev_y_csdi_crc;
+ u32 cur_y_csdi_crc;
+ u32 next_y_csdi_crc;
+ u32 prev_uv_csdi_crc;
+ u32 cur_uv_csdi_crc;
+ u32 next_uv_csdi_crc;
+ u32 y_csdi_crc;
+ u32 uv_csdi_crc;
+ u32 uv_cup_crc;
+ u32 mot_csdi_crc;
+ u32 mot_cur_csdi_crc;
+ u32 mot_prev_csdi_crc;
+};
+
+struct sti_hqvdp_hvsrc_status {
+ u32 y_hvsrc_crc;
+ u32 u_hvsrc_crc;
+ u32 v_hvsrc_crc;
+};
+
+struct sti_hqvdp_iqi_status {
+ u32 pxf_it_status;
+ u32 y_iqi_crc;
+ u32 u_iqi_crc;
+ u32 v_iqi_crc;
+};
+
+/* Main commands. We use 2 commands one being processed by the firmware, one
+ * ready to be fetched upon next Vsync*/
+#define NB_VDP_CMD 2
+
+struct sti_hqvdp_cmd {
+ struct sti_hqvdp_top top;
+ struct sti_hqvdp_vc1re vc1re;
+ struct sti_hqvdp_fmd fmd;
+ struct sti_hqvdp_csdi csdi;
+ struct sti_hqvdp_hvsrc hvsrc;
+ struct sti_hqvdp_iqi iqi;
+ struct sti_hqvdp_top_status top_status;
+ struct sti_hqvdp_fmd_status fmd_status;
+ struct sti_hqvdp_csdi_status csdi_status;
+ struct sti_hqvdp_hvsrc_status hvsrc_status;
+ struct sti_hqvdp_iqi_status iqi_status;
+};
+
+/*
+ * STI HQVDP structure
+ *
+ * @dev: driver device
+ * @drm_dev: the drm device
+ * @regs: registers
+ * @layer: layer structure for hqvdp it self
+ * @vid_plane: VID plug used as link with compositor IP
+ * @clk: IP clock
+ * @clk_pix_main: pix main clock
+ * @reset: reset control
+ * @vtg_nb: notifier to handle VTG Vsync
+ * @btm_field_pending: is there any bottom field (interlaced frame) to display
+ * @curr_field_count: number of field updates
+ * @last_field_count: number of field updates since last fps measure
+ * @hqvdp_cmd: buffer of commands
+ * @hqvdp_cmd_paddr: physical address of hqvdp_cmd
+ * @vtg: vtg for main data path
+ */
+struct sti_hqvdp {
+ struct device *dev;
+ struct drm_device *drm_dev;
+ void __iomem *regs;
+ struct sti_layer layer;
+ struct drm_plane *vid_plane;
+ struct clk *clk;
+ struct clk *clk_pix_main;
+ struct reset_control *reset;
+ struct notifier_block vtg_nb;
+ bool btm_field_pending;
+ unsigned int curr_field_count;
+ unsigned int last_field_count;
+ void *hqvdp_cmd;
+ dma_addr_t hqvdp_cmd_paddr;
+ struct sti_vtg *vtg;
+};
+
+#define to_sti_hqvdp(x) container_of(x, struct sti_hqvdp, layer)
+
+static const uint32_t hqvdp_supported_formats[] = {
+ DRM_FORMAT_NV12,
+};
+
+static const uint32_t *sti_hqvdp_get_formats(struct sti_layer *layer)
+{
+ return hqvdp_supported_formats;
+}
+
+static unsigned int sti_hqvdp_get_nb_formats(struct sti_layer *layer)
+{
+ return ARRAY_SIZE(hqvdp_supported_formats);
+}
+
+/**
+ * sti_hqvdp_get_free_cmd
+ * @hqvdp: hqvdp structure
+ *
+ * Look for a hqvdp_cmd that is not being used (or about to be used) by the FW.
+ *
+ * RETURNS:
+ * the offset of the command to be used.
+ * -1 in error cases
+ */
+static int sti_hqvdp_get_free_cmd(struct sti_hqvdp *hqvdp)
+{
+ int curr_cmd, next_cmd;
+ dma_addr_t cmd = hqvdp->hqvdp_cmd_paddr;
+ int i;
+
+ curr_cmd = readl(hqvdp->regs + HQVDP_MBX_CURRENT_CMD);
+ next_cmd = readl(hqvdp->regs + HQVDP_MBX_NEXT_CMD);
+
+ for (i = 0; i < NB_VDP_CMD; i++) {
+ if ((cmd != curr_cmd) && (cmd != next_cmd))
+ return i * sizeof(struct sti_hqvdp_cmd);
+ cmd += sizeof(struct sti_hqvdp_cmd);
+ }
+
+ return -1;
+}
+
+/**
+ * sti_hqvdp_get_curr_cmd
+ * @hqvdp: hqvdp structure
+ *
+ * Look for the hqvdp_cmd that is being used by the FW.
+ *
+ * RETURNS:
+ * the offset of the command to be used.
+ * -1 in error cases
+ */
+static int sti_hqvdp_get_curr_cmd(struct sti_hqvdp *hqvdp)
+{
+ int curr_cmd;
+ dma_addr_t cmd = hqvdp->hqvdp_cmd_paddr;
+ unsigned int i;
+
+ curr_cmd = readl(hqvdp->regs + HQVDP_MBX_CURRENT_CMD);
+
+ for (i = 0; i < NB_VDP_CMD; i++) {
+ if (cmd == curr_cmd)
+ return i * sizeof(struct sti_hqvdp_cmd);
+
+ cmd += sizeof(struct sti_hqvdp_cmd);
+ }
+
+ return -1;
+}
+
+/**
+ * sti_hqvdp_update_hvsrc
+ * @orient: horizontal or vertical
+ * @scale: scaling/zoom factor
+ * @hvsrc: the structure containing the LUT coef
+ *
+ * Update the Y and C Lut coef, as well as the shift param
+ *
+ * RETURNS:
+ * None.
+ */
+static void sti_hqvdp_update_hvsrc(enum sti_hvsrc_orient orient, int scale,
+ struct sti_hqvdp_hvsrc *hvsrc)
+{
+ const int *coef_c, *coef_y;
+ int shift_c, shift_y;
+
+ /* Get the appropriate coef tables */
+ if (scale < SCALE_MAX_FOR_LEG_LUT_F) {
+ coef_y = coef_lut_f_y_legacy;
+ coef_c = coef_lut_f_c_legacy;
+ shift_y = SHIFT_LUT_F_Y_LEGACY;
+ shift_c = SHIFT_LUT_F_C_LEGACY;
+ } else if (scale < SCALE_MAX_FOR_LEG_LUT_E) {
+ coef_y = coef_lut_e_y_legacy;
+ coef_c = coef_lut_e_c_legacy;
+ shift_y = SHIFT_LUT_E_Y_LEGACY;
+ shift_c = SHIFT_LUT_E_C_LEGACY;
+ } else if (scale < SCALE_MAX_FOR_LEG_LUT_D) {
+ coef_y = coef_lut_d_y_legacy;
+ coef_c = coef_lut_d_c_legacy;
+ shift_y = SHIFT_LUT_D_Y_LEGACY;
+ shift_c = SHIFT_LUT_D_C_LEGACY;
+ } else if (scale < SCALE_MAX_FOR_LEG_LUT_C) {
+ coef_y = coef_lut_c_y_legacy;
+ coef_c = coef_lut_c_c_legacy;
+ shift_y = SHIFT_LUT_C_Y_LEGACY;
+ shift_c = SHIFT_LUT_C_C_LEGACY;
+ } else if (scale == SCALE_MAX_FOR_LEG_LUT_C) {
+ coef_y = coef_c = coef_lut_b;
+ shift_y = shift_c = SHIFT_LUT_B;
+ } else {
+ coef_y = coef_c = coef_lut_a_legacy;
+ shift_y = shift_c = SHIFT_LUT_A_LEGACY;
+ }
+
+ if (orient == HVSRC_HORI) {
+ hvsrc->hori_shift = (shift_c << 16) | shift_y;
+ memcpy(hvsrc->yh_coef, coef_y, sizeof(hvsrc->yh_coef));
+ memcpy(hvsrc->ch_coef, coef_c, sizeof(hvsrc->ch_coef));
+ } else {
+ hvsrc->vert_shift = (shift_c << 16) | shift_y;
+ memcpy(hvsrc->yv_coef, coef_y, sizeof(hvsrc->yv_coef));
+ memcpy(hvsrc->cv_coef, coef_c, sizeof(hvsrc->cv_coef));
+ }
+}
+
+/**
+ * sti_hqvdp_check_hw_scaling
+ * @layer: hqvdp layer
+ *
+ * Check if the HW is able to perform the scaling request
+ * The firmware scaling limitation is "CEIL(1/Zy) <= FLOOR(LFW)" where:
+ * Zy = OutputHeight / InputHeight
+ * LFW = (Tx * IPClock) / (MaxNbCycles * Cp)
+ * Tx : Total video mode horizontal resolution
+ * IPClock : HQVDP IP clock (Mhz)
+ * MaxNbCycles: max(InputWidth, OutputWidth)
+ * Cp: Video mode pixel clock (Mhz)
+ *
+ * RETURNS:
+ * True if the HW can scale.
+ */
+static bool sti_hqvdp_check_hw_scaling(struct sti_layer *layer)
+{
+ struct sti_hqvdp *hqvdp = to_sti_hqvdp(layer);
+ unsigned long lfw;
+ unsigned int inv_zy;
+
+ lfw = layer->mode->htotal * (clk_get_rate(hqvdp->clk) / 1000000);
+ lfw /= max(layer->src_w, layer->dst_w) * layer->mode->clock / 1000;
+
+ inv_zy = DIV_ROUND_UP(layer->src_h, layer->dst_h);
+
+ return (inv_zy <= lfw) ? true : false;
+}
+
+/**
+ * sti_hqvdp_prepare_layer
+ * @layer: hqvdp layer
+ * @first_prepare: true if it is the first time this function is called
+ *
+ * Prepares a command for the firmware
+ *
+ * RETURNS:
+ * 0 on success.
+ */
+static int sti_hqvdp_prepare_layer(struct sti_layer *layer, bool first_prepare)
+{
+ struct sti_hqvdp *hqvdp = to_sti_hqvdp(layer);
+ struct sti_hqvdp_cmd *cmd;
+ int scale_h, scale_v;
+ int cmd_offset;
+
+ dev_dbg(hqvdp->dev, "%s %s\n", __func__, sti_layer_to_str(layer));
+
+ /* prepare and commit VID plane */
+ hqvdp->vid_plane->funcs->update_plane(hqvdp->vid_plane,
+ layer->crtc, layer->fb,
+ layer->dst_x, layer->dst_y,
+ layer->dst_w, layer->dst_h,
+ layer->src_x, layer->src_y,
+ layer->src_w, layer->src_h);
+
+ cmd_offset = sti_hqvdp_get_free_cmd(hqvdp);
+ if (cmd_offset == -1) {
+ DRM_ERROR("No available hqvdp_cmd now\n");
+ return -EBUSY;
+ }
+ cmd = hqvdp->hqvdp_cmd + cmd_offset;
+
+ if (!sti_hqvdp_check_hw_scaling(layer)) {
+ DRM_ERROR("Scaling beyond HW capabilities\n");
+ return -EINVAL;
+ }
+
+ /* Static parameters, defaulting to progressive mode */
+ cmd->top.config = TOP_CONFIG_PROGRESSIVE;
+ cmd->top.mem_format = TOP_MEM_FORMAT_DFLT;
+ cmd->hvsrc.param_ctrl = HVSRC_PARAM_CTRL_DFLT;
+ cmd->csdi.config = CSDI_CONFIG_PROG;
+
+ /* VC1RE, FMD bypassed : keep everything set to 0
+ * IQI/P2I bypassed */
+ cmd->iqi.config = IQI_CONFIG_DFLT;
+ cmd->iqi.con_bri = IQI_CON_BRI_DFLT;
+ cmd->iqi.sat_gain = IQI_SAT_GAIN_DFLT;
+ cmd->iqi.pxf_conf = IQI_PXF_CONF_DFLT;
+
+ /* Buffer planes address */
+ cmd->top.current_luma = (u32) layer->paddr + layer->offsets[0];
+ cmd->top.current_chroma = (u32) layer->paddr + layer->offsets[1];
+
+ /* Pitches */
+ cmd->top.luma_processed_pitch = cmd->top.luma_src_pitch =
+ layer->pitches[0];
+ cmd->top.chroma_processed_pitch = cmd->top.chroma_src_pitch =
+ layer->pitches[1];
+
+ /* Input / output size
+ * Align to upper even value */
+ layer->dst_w = ALIGN(layer->dst_w, 2);
+ layer->dst_h = ALIGN(layer->dst_h, 2);
+
+ if ((layer->src_w > MAX_WIDTH) || (layer->src_w < MIN_WIDTH) ||
+ (layer->src_h > MAX_HEIGHT) || (layer->src_h < MIN_HEIGHT) ||
+ (layer->dst_w > MAX_WIDTH) || (layer->dst_w < MIN_WIDTH) ||
+ (layer->dst_h > MAX_HEIGHT) || (layer->dst_h < MIN_HEIGHT)) {
+ DRM_ERROR("Invalid in/out size %dx%d -> %dx%d\n",
+ layer->src_w, layer->src_h,
+ layer->dst_w, layer->dst_h);
+ return -EINVAL;
+ }
+ cmd->top.input_viewport_size = cmd->top.input_frame_size =
+ layer->src_h << 16 | layer->src_w;
+ cmd->hvsrc.output_picture_size = layer->dst_h << 16 | layer->dst_w;
+ cmd->top.input_viewport_ori = layer->src_y << 16 | layer->src_x;
+
+ /* Handle interlaced */
+ if (layer->fb->flags & DRM_MODE_FB_INTERLACED) {
+ /* Top field to display */
+ cmd->top.config = TOP_CONFIG_INTER_TOP;
+
+ /* Update pitches and vert size */
+ cmd->top.input_frame_size = (layer->src_h / 2) << 16 |
+ layer->src_w;
+ cmd->top.luma_processed_pitch *= 2;
+ cmd->top.luma_src_pitch *= 2;
+ cmd->top.chroma_processed_pitch *= 2;
+ cmd->top.chroma_src_pitch *= 2;
+
+ /* Enable directional deinterlacing processing */
+ cmd->csdi.config = CSDI_CONFIG_INTER_DIR;
+ cmd->csdi.config2 = CSDI_CONFIG2_DFLT;
+ cmd->csdi.dcdi_config = CSDI_DCDI_CONFIG_DFLT;
+ }
+
+ /* Update hvsrc lut coef */
+ scale_h = SCALE_FACTOR * layer->dst_w / layer->src_w;
+ sti_hqvdp_update_hvsrc(HVSRC_HORI, scale_h, &cmd->hvsrc);
+
+ scale_v = SCALE_FACTOR * layer->dst_h / layer->src_h;
+ sti_hqvdp_update_hvsrc(HVSRC_VERT, scale_v, &cmd->hvsrc);
+
+ if (first_prepare) {
+ /* Prevent VTG shutdown */
+ if (clk_prepare_enable(hqvdp->clk_pix_main)) {
+ DRM_ERROR("Failed to prepare/enable pix main clk\n");
+ return -ENXIO;
+ }
+
+ /* Register VTG Vsync callback to handle bottom fields */
+ if ((layer->fb->flags & DRM_MODE_FB_INTERLACED) &&
+ sti_vtg_register_client(hqvdp->vtg,
+ &hqvdp->vtg_nb, layer->mixer_id)) {
+ DRM_ERROR("Cannot register VTG notifier\n");
+ return -ENXIO;
+ }
+ }
+
+ return 0;
+}
+
+static int sti_hqvdp_commit_layer(struct sti_layer *layer)
+{
+ struct sti_hqvdp *hqvdp = to_sti_hqvdp(layer);
+ int cmd_offset;
+
+ dev_dbg(hqvdp->dev, "%s %s\n", __func__, sti_layer_to_str(layer));
+
+ cmd_offset = sti_hqvdp_get_free_cmd(hqvdp);
+ if (cmd_offset == -1) {
+ DRM_ERROR("No available hqvdp_cmd now\n");
+ return -EBUSY;
+ }
+
+ writel(hqvdp->hqvdp_cmd_paddr + cmd_offset,
+ hqvdp->regs + HQVDP_MBX_NEXT_CMD);
+
+ hqvdp->curr_field_count++;
+
+ /* Interlaced : get ready to display the bottom field at next Vsync */
+ if (layer->fb->flags & DRM_MODE_FB_INTERLACED)
+ hqvdp->btm_field_pending = true;
+
+ dev_dbg(hqvdp->dev, "%s Posted command:0x%x\n",
+ __func__, hqvdp->hqvdp_cmd_paddr + cmd_offset);
+
+ return 0;
+}
+
+static int sti_hqvdp_disable_layer(struct sti_layer *layer)
+{
+ struct sti_hqvdp *hqvdp = to_sti_hqvdp(layer);
+ int i;
+
+ DRM_DEBUG_DRIVER("%s\n", sti_layer_to_str(layer));
+
+ /* Unregister VTG Vsync callback */
+ if ((layer->fb->flags & DRM_MODE_FB_INTERLACED) &&
+ sti_vtg_unregister_client(hqvdp->vtg, &hqvdp->vtg_nb))
+ DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n");
+
+ /* Set next cmd to NULL */
+ writel(0, hqvdp->regs + HQVDP_MBX_NEXT_CMD);
+
+ for (i = 0; i < POLL_MAX_ATTEMPT; i++) {
+ if (readl(hqvdp->regs + HQVDP_MBX_INFO_XP70)
+ & INFO_XP70_FW_READY)
+ break;
+ msleep(POLL_DELAY_MS);
+ }
+
+ /* VTG can stop now */
+ clk_disable_unprepare(hqvdp->clk_pix_main);
+
+ if (i == POLL_MAX_ATTEMPT) {
+ DRM_ERROR("XP70 could not revert to idle\n");
+ return -ENXIO;
+ }
+
+ /* disable VID plane */
+ hqvdp->vid_plane->funcs->disable_plane(hqvdp->vid_plane);
+
+ return 0;
+}
+
+/**
+ * sti_vdp_vtg_cb
+ * @nb: notifier block
+ * @evt: event message
+ * @data: private data
+ *
+ * Handle VTG Vsync event, display pending bottom field
+ *
+ * RETURNS:
+ * 0 on success.
+ */
+int sti_hqvdp_vtg_cb(struct notifier_block *nb, unsigned long evt, void *data)
+{
+ struct sti_hqvdp *hqvdp = container_of(nb, struct sti_hqvdp, vtg_nb);
+ int btm_cmd_offset, top_cmd_offest;
+ struct sti_hqvdp_cmd *btm_cmd, *top_cmd;
+
+ if ((evt != VTG_TOP_FIELD_EVENT) && (evt != VTG_BOTTOM_FIELD_EVENT)) {
+ DRM_DEBUG_DRIVER("Unknown event\n");
+ return 0;
+ }
+
+ if (hqvdp->btm_field_pending) {
+ /* Create the btm field command from the current one */
+ btm_cmd_offset = sti_hqvdp_get_free_cmd(hqvdp);
+ top_cmd_offest = sti_hqvdp_get_curr_cmd(hqvdp);
+ if ((btm_cmd_offset == -1) || (top_cmd_offest == -1)) {
+ DRM_ERROR("Cannot get cmds, skip btm field\n");
+ return -EBUSY;
+ }
+
+ btm_cmd = hqvdp->hqvdp_cmd + btm_cmd_offset;
+ top_cmd = hqvdp->hqvdp_cmd + top_cmd_offest;
+
+ memcpy(btm_cmd, top_cmd, sizeof(*btm_cmd));
+
+ btm_cmd->top.config = TOP_CONFIG_INTER_BTM;
+ btm_cmd->top.current_luma +=
+ btm_cmd->top.luma_src_pitch / 2;
+ btm_cmd->top.current_chroma +=
+ btm_cmd->top.chroma_src_pitch / 2;
+
+ /* Post the command to mailbox */
+ writel(hqvdp->hqvdp_cmd_paddr + btm_cmd_offset,
+ hqvdp->regs + HQVDP_MBX_NEXT_CMD);
+
+ hqvdp->curr_field_count++;
+ hqvdp->btm_field_pending = false;
+
+ dev_dbg(hqvdp->dev, "%s Posted command:0x%x\n",
+ __func__, hqvdp->hqvdp_cmd_paddr);
+ }
+
+ return 0;
+}
+
+static struct drm_plane *sti_hqvdp_find_vid(struct drm_device *dev, int id)
+{
+ struct drm_plane *plane;
+
+ list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
+ struct sti_layer *layer = to_sti_layer(plane);
+
+ if (layer->desc == id)
+ return plane;
+ }
+
+ return NULL;
+}
+
+static void sti_hqvd_init(struct sti_layer *layer)
+{
+ struct sti_hqvdp *hqvdp = to_sti_hqvdp(layer);
+ int size;
+
+ /* find the plane macthing with vid 0 */
+ hqvdp->vid_plane = sti_hqvdp_find_vid(hqvdp->drm_dev, STI_VID_0);
+ if (!hqvdp->vid_plane) {
+ DRM_ERROR("Cannot find Main video layer\n");
+ return;
+ }
+
+ hqvdp->vtg_nb.notifier_call = sti_hqvdp_vtg_cb;
+
+ /* Allocate memory for the VDP commands */
+ size = NB_VDP_CMD * sizeof(struct sti_hqvdp_cmd);
+ hqvdp->hqvdp_cmd = dma_alloc_writecombine(hqvdp->dev, size,
+ &hqvdp->hqvdp_cmd_paddr,
+ GFP_KERNEL | GFP_DMA);
+ if (!hqvdp->hqvdp_cmd) {
+ DRM_ERROR("Failed to allocate memory for VDP cmd\n");
+ return;
+ }
+
+ memset(hqvdp->hqvdp_cmd, 0, size);
+}
+
+static const struct sti_layer_funcs hqvdp_ops = {
+ .get_formats = sti_hqvdp_get_formats,
+ .get_nb_formats = sti_hqvdp_get_nb_formats,
+ .init = sti_hqvd_init,
+ .prepare = sti_hqvdp_prepare_layer,
+ .commit = sti_hqvdp_commit_layer,
+ .disable = sti_hqvdp_disable_layer,
+};
+
+struct sti_layer *sti_hqvdp_create(struct device *dev)
+{
+ struct sti_hqvdp *hqvdp = dev_get_drvdata(dev);
+
+ hqvdp->layer.ops = &hqvdp_ops;
+
+ return &hqvdp->layer;
+}
+EXPORT_SYMBOL(sti_hqvdp_create);
+
+static void sti_hqvdp_init_plugs(struct sti_hqvdp *hqvdp)
+{
+ /* Configure Plugs (same for RD & WR) */
+ writel(PLUG_PAGE_SIZE_256, hqvdp->regs + HQVDP_RD_PLUG_PAGE_SIZE);
+ writel(PLUG_MIN_OPC_8, hqvdp->regs + HQVDP_RD_PLUG_MIN_OPC);
+ writel(PLUG_MAX_OPC_64, hqvdp->regs + HQVDP_RD_PLUG_MAX_OPC);
+ writel(PLUG_MAX_CHK_2X, hqvdp->regs + HQVDP_RD_PLUG_MAX_CHK);
+ writel(PLUG_MAX_MSG_1X, hqvdp->regs + HQVDP_RD_PLUG_MAX_MSG);
+ writel(PLUG_MIN_SPACE_1, hqvdp->regs + HQVDP_RD_PLUG_MIN_SPACE);
+ writel(PLUG_CONTROL_ENABLE, hqvdp->regs + HQVDP_RD_PLUG_CONTROL);
+
+ writel(PLUG_PAGE_SIZE_256, hqvdp->regs + HQVDP_WR_PLUG_PAGE_SIZE);
+ writel(PLUG_MIN_OPC_8, hqvdp->regs + HQVDP_WR_PLUG_MIN_OPC);
+ writel(PLUG_MAX_OPC_64, hqvdp->regs + HQVDP_WR_PLUG_MAX_OPC);
+ writel(PLUG_MAX_CHK_2X, hqvdp->regs + HQVDP_WR_PLUG_MAX_CHK);
+ writel(PLUG_MAX_MSG_1X, hqvdp->regs + HQVDP_WR_PLUG_MAX_MSG);
+ writel(PLUG_MIN_SPACE_1, hqvdp->regs + HQVDP_WR_PLUG_MIN_SPACE);
+ writel(PLUG_CONTROL_ENABLE, hqvdp->regs + HQVDP_WR_PLUG_CONTROL);
+}
+
+/**
+ * sti_hqvdp_start_xp70
+ * @firmware: firmware found
+ * @ctxt: hqvdp structure
+ *
+ * Run the xP70 initialization sequence
+ */
+static void sti_hqvdp_start_xp70(const struct firmware *firmware, void *ctxt)
+{
+ struct sti_hqvdp *hqvdp = ctxt;
+ u32 *fw_rd_plug, *fw_wr_plug, *fw_pmem, *fw_dmem;
+ u8 *data;
+ int i;
+ struct fw_header {
+ int rd_size;
+ int wr_size;
+ int pmem_size;
+ int dmem_size;
+ } *header;
+
+ DRM_DEBUG_DRIVER("\n");
+ /* Check firmware parts */
+ if (!firmware) {
+ DRM_ERROR("Firmware not available\n");
+ return;
+ }
+
+ header = (struct fw_header *) firmware->data;
+ if (firmware->size < sizeof(*header)) {
+ DRM_ERROR("Invalid firmware size (%d)\n", firmware->size);
+ goto out;
+ }
+ if ((sizeof(*header) + header->rd_size + header->wr_size +
+ header->pmem_size + header->dmem_size) != firmware->size) {
+ DRM_ERROR("Invalid fmw structure (%d+%d+%d+%d+%d != %d)\n",
+ sizeof(*header), header->rd_size, header->wr_size,
+ header->pmem_size, header->dmem_size,
+ firmware->size);
+ goto out;
+ }
+
+ data = (u8 *) firmware->data;
+ data += sizeof(*header);
+ fw_rd_plug = (void *) data;
+ data += header->rd_size;
+ fw_wr_plug = (void *) data;
+ data += header->wr_size;
+ fw_pmem = (void *) data;
+ data += header->pmem_size;
+ fw_dmem = (void *) data;
+
+ /* Enable clock */
+ if (clk_prepare_enable(hqvdp->clk))
+ DRM_ERROR("Failed to prepare/enable HQVDP clk\n");
+
+ /* Reset */
+ writel(SW_RESET_CTRL_FULL, hqvdp->regs + HQVDP_MBX_SW_RESET_CTRL);
+
+ for (i = 0; i < POLL_MAX_ATTEMPT; i++) {
+ if (readl(hqvdp->regs + HQVDP_MBX_STARTUP_CTRL1)
+ & STARTUP_CTRL1_RST_DONE)
+ break;
+ msleep(POLL_DELAY_MS);
+ }
+ if (i == POLL_MAX_ATTEMPT) {
+ DRM_ERROR("Could not reset\n");
+ goto out;
+ }
+
+ /* Init Read & Write plugs */
+ for (i = 0; i < header->rd_size / 4; i++)
+ writel(fw_rd_plug[i], hqvdp->regs + HQVDP_RD_PLUG + i * 4);
+ for (i = 0; i < header->wr_size / 4; i++)
+ writel(fw_wr_plug[i], hqvdp->regs + HQVDP_WR_PLUG + i * 4);
+
+ sti_hqvdp_init_plugs(hqvdp);
+
+ /* Authorize Idle Mode */
+ writel(STARTUP_CTRL1_AUTH_IDLE, hqvdp->regs + HQVDP_MBX_STARTUP_CTRL1);
+
+ /* Prevent VTG interruption during the boot */
+ writel(SOFT_VSYNC_SW_CTRL_IRQ, hqvdp->regs + HQVDP_MBX_SOFT_VSYNC);
+ writel(0, hqvdp->regs + HQVDP_MBX_NEXT_CMD);
+
+ /* Download PMEM & DMEM */
+ for (i = 0; i < header->pmem_size / 4; i++)
+ writel(fw_pmem[i], hqvdp->regs + HQVDP_PMEM + i * 4);
+ for (i = 0; i < header->dmem_size / 4; i++)
+ writel(fw_dmem[i], hqvdp->regs + HQVDP_DMEM + i * 4);
+
+ /* Enable fetch */
+ writel(STARTUP_CTRL2_FETCH_EN, hqvdp->regs + HQVDP_MBX_STARTUP_CTRL2);
+
+ /* Wait end of boot */
+ for (i = 0; i < POLL_MAX_ATTEMPT; i++) {
+ if (readl(hqvdp->regs + HQVDP_MBX_INFO_XP70)
+ & INFO_XP70_FW_READY)
+ break;
+ msleep(POLL_DELAY_MS);
+ }
+ if (i == POLL_MAX_ATTEMPT) {
+ DRM_ERROR("Could not boot\n");
+ goto out;
+ }
+
+ /* Launch Vsync */
+ writel(SOFT_VSYNC_HW, hqvdp->regs + HQVDP_MBX_SOFT_VSYNC);
+
+ DRM_INFO("HQVDP XP70 started\n");
+out:
+ release_firmware(firmware);
+}
+
+int sti_hqvdp_bind(struct device *dev, struct device *master, void *data)
+{
+ struct sti_hqvdp *hqvdp = dev_get_drvdata(dev);
+ struct drm_device *drm_dev = data;
+ struct sti_layer *layer;
+ int err;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ hqvdp->drm_dev = drm_dev;
+
+ /* Request for firmware */
+ err = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
+ HQVDP_FMW_NAME, hqvdp->dev,
+ GFP_KERNEL, hqvdp, sti_hqvdp_start_xp70);
+ if (err) {
+ DRM_ERROR("Can't get HQVDP firmware\n");
+ return err;
+ }
+
+ layer = sti_layer_create(hqvdp->dev, STI_HQVDP_0, hqvdp->regs);
+ if (!layer) {
+ DRM_ERROR("Can't create HQVDP plane\n");
+ return -ENOMEM;
+ }
+
+ sti_drm_plane_init(drm_dev, layer, 1, DRM_PLANE_TYPE_OVERLAY);
+
+ return 0;
+}
+
+static void sti_hqvdp_unbind(struct device *dev,
+ struct device *master, void *data)
+{
+ /* do nothing */
+}
+
+static const struct component_ops sti_hqvdp_ops = {
+ .bind = sti_hqvdp_bind,
+ .unbind = sti_hqvdp_unbind,
+};
+
+static int sti_hqvdp_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *vtg_np;
+ struct sti_hqvdp *hqvdp;
+ struct resource *res;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ hqvdp = devm_kzalloc(dev, sizeof(*hqvdp), GFP_KERNEL);
+ if (!hqvdp) {
+ DRM_ERROR("Failed to allocate HQVDP context\n");
+ return -ENOMEM;
+ }
+
+ hqvdp->dev = dev;
+
+ /* Get Memory resources */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ DRM_ERROR("Get memory resource failed\n");
+ return -ENXIO;
+ }
+ hqvdp->regs = devm_ioremap(dev, res->start, resource_size(res));
+ if (hqvdp->regs == NULL) {
+ DRM_ERROR("Register mapping failed\n");
+ return -ENXIO;
+ }
+
+ /* Get clock resources */
+ hqvdp->clk = devm_clk_get(dev, "hqvdp");
+ hqvdp->clk_pix_main = devm_clk_get(dev, "pix_main");
+ if (IS_ERR(hqvdp->clk) || IS_ERR(hqvdp->clk)) {
+ DRM_ERROR("Cannot get clocks\n");
+ return -ENXIO;
+ }
+
+ /* Get reset resources */
+ hqvdp->reset = devm_reset_control_get(dev, "hqvdp");
+ if (!IS_ERR(hqvdp->reset))
+ reset_control_deassert(hqvdp->reset);
+
+ vtg_np = of_parse_phandle(pdev->dev.of_node, "st,vtg", 0);
+ if (vtg_np)
+ hqvdp->vtg = of_vtg_find(vtg_np);
+
+ platform_set_drvdata(pdev, hqvdp);
+
+ return component_add(&pdev->dev, &sti_hqvdp_ops);
+}
+
+static int sti_hqvdp_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &sti_hqvdp_ops);
+ return 0;
+}
+
+static struct of_device_id hqvdp_of_match[] = {
+ { .compatible = "st,stih407-hqvdp", },
+ { /* end node */ }
+};
+MODULE_DEVICE_TABLE(of, hqvdp_of_match);
+
+struct platform_driver sti_hqvdp_driver = {
+ .driver = {
+ .name = "sti-hqvdp",
+ .owner = THIS_MODULE,
+ .of_match_table = hqvdp_of_match,
+ },
+ .probe = sti_hqvdp_probe,
+ .remove = sti_hqvdp_remove,
+};
+
+module_platform_driver(sti_hqvdp_driver);
+
+MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics SoC DRM driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/sti/sti_hqvdp.h b/drivers/gpu/drm/sti/sti_hqvdp.h
new file mode 100644
index 00000000000..cd5ecd0a6de
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_hqvdp.h
@@ -0,0 +1,12 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Authors: Fabien Dessenne <fabien.dessenne@st.com> for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#ifndef _STI_HQVDP_H_
+#define _STI_HQVDP_H_
+
+struct sti_layer *sti_hqvdp_create(struct device *dev);
+
+#endif
diff --git a/drivers/gpu/drm/sti/sti_hqvdp_lut.h b/drivers/gpu/drm/sti/sti_hqvdp_lut.h
new file mode 100644
index 00000000000..619af7f4384
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_hqvdp_lut.h
@@ -0,0 +1,373 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Authors: Fabien Dessenne <fabien.dessenne@st.com> for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#ifndef _STI_HQVDP_LUT_H_
+#define _STI_HQVDP_LUT_H_
+
+#define NB_COEF 128
+
+#define SHIFT_LUT_A_LEGACY 8
+#define SHIFT_LUT_B 8
+#define SHIFT_LUT_C_Y_LEGACY 8
+#define SHIFT_LUT_C_C_LEGACY 8
+#define SHIFT_LUT_D_Y_LEGACY 8
+#define SHIFT_LUT_D_C_LEGACY 8
+#define SHIFT_LUT_E_Y_LEGACY 8
+#define SHIFT_LUT_E_C_LEGACY 8
+#define SHIFT_LUT_F_Y_LEGACY 8
+#define SHIFT_LUT_F_C_LEGACY 8
+
+static const u32 coef_lut_a_legacy[NB_COEF] = {
+ 0x0000ffff, 0x00010000, 0x000100ff, 0x00000000,
+ 0x00000000, 0x00050000, 0xfffc00ff, 0x00000000,
+ 0x00000000, 0x00090000, 0xfff900fe, 0x00000000,
+ 0x00000000, 0x0010ffff, 0xfff600fb, 0x00000000,
+ 0x00000000, 0x0017fffe, 0xfff400f7, 0x00000000,
+ 0x00000000, 0x001ffffd, 0xfff200f2, 0x00000000,
+ 0x00000000, 0x0027fffc, 0xfff100ec, 0x00000000,
+ 0x00000000, 0x0030fffb, 0xfff000e5, 0x00000000,
+ 0x00000000, 0x003afffa, 0xffee00de, 0x00000000,
+ 0x00000000, 0x0044fff9, 0xffed00d6, 0x00000000,
+ 0x00000000, 0x004efff8, 0xffed00cd, 0x00000000,
+ 0x00000000, 0x0059fff6, 0xffed00c4, 0x00000000,
+ 0x00000000, 0x0064fff5, 0xffed00ba, 0x00000000,
+ 0x00000000, 0x006ffff3, 0xffee00b0, 0x00000000,
+ 0x00000000, 0x007afff2, 0xffee00a6, 0x00000000,
+ 0x00000000, 0x0085fff1, 0xffef009b, 0x00000000,
+ 0x00000000, 0x0090fff0, 0xfff00090, 0x00000000,
+ 0x00000000, 0x009bffef, 0xfff10085, 0x00000000,
+ 0x00000000, 0x00a6ffee, 0xfff2007a, 0x00000000,
+ 0x00000000, 0x00b0ffee, 0xfff3006f, 0x00000000,
+ 0x00000000, 0x00baffed, 0xfff50064, 0x00000000,
+ 0x00000000, 0x00c4ffed, 0xfff60059, 0x00000000,
+ 0x00000000, 0x00cdffed, 0xfff8004e, 0x00000000,
+ 0x00000000, 0x00d6ffed, 0xfff90044, 0x00000000,
+ 0x00000000, 0x00deffee, 0xfffa003a, 0x00000000,
+ 0x00000000, 0x00e5fff0, 0xfffb0030, 0x00000000,
+ 0x00000000, 0x00ecfff1, 0xfffc0027, 0x00000000,
+ 0x00000000, 0x00f2fff2, 0xfffd001f, 0x00000000,
+ 0x00000000, 0x00f7fff4, 0xfffe0017, 0x00000000,
+ 0x00000000, 0x00fbfff6, 0xffff0010, 0x00000000,
+ 0x00000000, 0x00fefff9, 0x00000009, 0x00000000,
+ 0x00000000, 0x00fffffc, 0x00000005, 0x00000000
+};
+
+static const u32 coef_lut_b[NB_COEF] = {
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000
+};
+
+static const u32 coef_lut_c_y_legacy[NB_COEF] = {
+ 0x00060004, 0x0038ffe1, 0x003800be, 0x0006ffe1,
+ 0x00050005, 0x0042ffe1, 0x003800b3, 0x0007ffe1,
+ 0x00040006, 0x0046ffe1, 0x003300b2, 0x0008ffe2,
+ 0x00030007, 0x004cffe1, 0x002e00b1, 0x0008ffe2,
+ 0x00020006, 0x0051ffe2, 0x002900b0, 0x0009ffe3,
+ 0x00010008, 0x0056ffe2, 0x002400ae, 0x0009ffe4,
+ 0xffff0008, 0x005cffe3, 0x001f00ad, 0x000affe4,
+ 0xfffe0008, 0x0062ffe4, 0x001a00ab, 0x000affe5,
+ 0xfffd000a, 0x0066ffe5, 0x001500a8, 0x000bffe6,
+ 0xfffc0009, 0x006bffe7, 0x001100a5, 0x000bffe8,
+ 0xfffa000a, 0x0070ffe8, 0x000d00a3, 0x000bffe9,
+ 0xfff9000b, 0x0076ffea, 0x0008009f, 0x000bffea,
+ 0xfff7000b, 0x007affec, 0x0005009b, 0x000cffec,
+ 0xfff6000b, 0x007effef, 0x00010098, 0x000cffed,
+ 0xfff4000b, 0x0084fff1, 0xfffd0095, 0x000cffee,
+ 0xfff3000b, 0x0088fff4, 0xfffa0090, 0x000cfff0,
+ 0xfff1000b, 0x008dfff7, 0xfff7008d, 0x000bfff1,
+ 0xfff0000c, 0x0090fffa, 0xfff40088, 0x000bfff3,
+ 0xffee000c, 0x0095fffd, 0xfff10084, 0x000bfff4,
+ 0xffed000c, 0x00980001, 0xffef007e, 0x000bfff6,
+ 0xffec000c, 0x009b0005, 0xffec007a, 0x000bfff7,
+ 0xffea000b, 0x009f0008, 0xffea0076, 0x000bfff9,
+ 0xffe9000b, 0x00a3000d, 0xffe80070, 0x000afffa,
+ 0xffe8000b, 0x00a50011, 0xffe7006b, 0x0009fffc,
+ 0xffe6000b, 0x00a80015, 0xffe50066, 0x000afffd,
+ 0xffe5000a, 0x00ab001a, 0xffe40062, 0x0008fffe,
+ 0xffe4000a, 0x00ad001f, 0xffe3005c, 0x0008ffff,
+ 0xffe40009, 0x00ae0024, 0xffe20056, 0x00080001,
+ 0xffe30009, 0x00b00029, 0xffe20051, 0x00060002,
+ 0xffe20008, 0x00b1002e, 0xffe1004c, 0x00070003,
+ 0xffe20008, 0x00b20033, 0xffe10046, 0x00060004,
+ 0xffe10007, 0x00b30038, 0xffe10042, 0x00050005
+};
+
+static const u32 coef_lut_c_c_legacy[NB_COEF] = {
+ 0x0001fff3, 0x003afffb, 0x003a00a1, 0x0001fffb,
+ 0x0001fff5, 0x0041fffb, 0x0038009a, 0x0001fffb,
+ 0x0001fff5, 0x0046fffb, 0x00340099, 0x0001fffb,
+ 0x0001fff7, 0x0049fffb, 0x00300098, 0x0001fffb,
+ 0x0001fff9, 0x004cfffb, 0x002d0096, 0x0001fffb,
+ 0x0001fffa, 0x004ffffc, 0x00290095, 0x0001fffb,
+ 0x0001fff9, 0x0054fffd, 0x00250093, 0x0001fffc,
+ 0x0001fffa, 0x0058fffd, 0x00220092, 0x0000fffc,
+ 0x0001fffb, 0x005bfffe, 0x001f0090, 0x0000fffc,
+ 0x0001fffd, 0x005effff, 0x001c008c, 0x0000fffd,
+ 0x0001fffd, 0x00620000, 0x0019008a, 0x0000fffd,
+ 0x0001fffe, 0x00660001, 0x00160088, 0xfffffffd,
+ 0x0000fffe, 0x006a0003, 0x00130085, 0xfffffffe,
+ 0x0000fffe, 0x006e0004, 0x00100083, 0xfffffffe,
+ 0x0000fffe, 0x00710006, 0x000e007f, 0xffffffff,
+ 0x0000fffe, 0x00750008, 0x000c007c, 0xfffeffff,
+ 0xfffffffe, 0x0079000a, 0x000a0079, 0xfffeffff,
+ 0xfffffffe, 0x007c000c, 0x00080075, 0xfffe0000,
+ 0xffffffff, 0x007f000e, 0x00060071, 0xfffe0000,
+ 0xfffeffff, 0x00830010, 0x0004006e, 0xfffe0000,
+ 0xfffeffff, 0x00850013, 0x0003006a, 0xfffe0000,
+ 0xfffdffff, 0x00880016, 0x00010066, 0xfffe0001,
+ 0xfffd0000, 0x008a0019, 0x00000062, 0xfffd0001,
+ 0xfffd0000, 0x008c001c, 0xffff005e, 0xfffd0001,
+ 0xfffc0000, 0x0090001f, 0xfffe005b, 0xfffb0001,
+ 0xfffc0000, 0x00920022, 0xfffd0058, 0xfffa0001,
+ 0xfffc0001, 0x00930025, 0xfffd0054, 0xfff90001,
+ 0xfffb0001, 0x00950029, 0xfffc004f, 0xfffa0001,
+ 0xfffb0001, 0x0096002d, 0xfffb004c, 0xfff90001,
+ 0xfffb0001, 0x00980030, 0xfffb0049, 0xfff70001,
+ 0xfffb0001, 0x00990034, 0xfffb0046, 0xfff50001,
+ 0xfffb0001, 0x009a0038, 0xfffb0041, 0xfff50001
+};
+
+static const u32 coef_lut_d_y_legacy[NB_COEF] = {
+ 0xfff80009, 0x0046ffec, 0x004600a3, 0xfff8ffec,
+ 0xfff70009, 0x004effed, 0x0044009d, 0xfff9ffeb,
+ 0xfff6000a, 0x0052ffee, 0x003f009d, 0xfffaffea,
+ 0xfff50009, 0x0057ffef, 0x003b009d, 0xfffbffe9,
+ 0xfff50008, 0x005bfff0, 0x0037009c, 0xfffcffe9,
+ 0xfff40008, 0x005ffff2, 0x0033009b, 0xfffcffe9,
+ 0xfff30007, 0x0064fff3, 0x002f009b, 0xfffdffe8,
+ 0xfff20007, 0x0068fff5, 0x002b0099, 0xfffeffe8,
+ 0xfff10008, 0x006bfff7, 0x00270097, 0xffffffe8,
+ 0xfff00007, 0x006ffff9, 0x00230097, 0xffffffe8,
+ 0xffef0006, 0x0073fffb, 0x00200095, 0x0000ffe8,
+ 0xffee0005, 0x0077fffe, 0x001c0093, 0x0000ffe9,
+ 0xffee0005, 0x007a0000, 0x00180091, 0x0001ffe9,
+ 0xffed0005, 0x007d0003, 0x0015008e, 0x0002ffe9,
+ 0xffec0005, 0x00800006, 0x0012008b, 0x0002ffea,
+ 0xffeb0004, 0x00840008, 0x000e008a, 0x0003ffea,
+ 0xffeb0003, 0x0087000b, 0x000b0087, 0x0003ffeb,
+ 0xffea0003, 0x008a000e, 0x00080084, 0x0004ffeb,
+ 0xffea0002, 0x008b0012, 0x00060080, 0x0005ffec,
+ 0xffe90002, 0x008e0015, 0x0003007d, 0x0005ffed,
+ 0xffe90001, 0x00910018, 0x0000007a, 0x0005ffee,
+ 0xffe90000, 0x0093001c, 0xfffe0077, 0x0005ffee,
+ 0xffe80000, 0x00950020, 0xfffb0073, 0x0006ffef,
+ 0xffe8ffff, 0x00970023, 0xfff9006f, 0x0007fff0,
+ 0xffe8ffff, 0x00970027, 0xfff7006b, 0x0008fff1,
+ 0xffe8fffe, 0x0099002b, 0xfff50068, 0x0007fff2,
+ 0xffe8fffd, 0x009b002f, 0xfff30064, 0x0007fff3,
+ 0xffe9fffc, 0x009b0033, 0xfff2005f, 0x0008fff4,
+ 0xffe9fffc, 0x009c0037, 0xfff0005b, 0x0008fff5,
+ 0xffe9fffb, 0x009d003b, 0xffef0057, 0x0009fff5,
+ 0xffeafffa, 0x009d003f, 0xffee0052, 0x000afff6,
+ 0xffebfff9, 0x009d0044, 0xffed004e, 0x0009fff7
+};
+
+static const u32 coef_lut_d_c_legacy[NB_COEF] = {
+ 0xfffeffff, 0x003fffff, 0x003f0089, 0xfffeffff,
+ 0xfffe0000, 0x00460000, 0x0042007d, 0xfffffffe,
+ 0xfffe0000, 0x00490001, 0x003f007d, 0xfffffffd,
+ 0xfffd0001, 0x004b0002, 0x003c007d, 0x0000fffc,
+ 0xfffd0001, 0x004e0003, 0x0039007c, 0x0000fffc,
+ 0xfffc0001, 0x00510005, 0x0036007c, 0x0000fffb,
+ 0xfffc0001, 0x00540006, 0x0033007b, 0x0001fffa,
+ 0xfffc0003, 0x00550008, 0x00310078, 0x0001fffa,
+ 0xfffb0003, 0x00580009, 0x002e0078, 0x0001fffa,
+ 0xfffb0002, 0x005b000b, 0x002b0077, 0x0002fff9,
+ 0xfffa0003, 0x005e000d, 0x00280075, 0x0002fff9,
+ 0xfffa0002, 0x0060000f, 0x00260074, 0x0002fff9,
+ 0xfffa0004, 0x00610011, 0x00230072, 0x0002fff9,
+ 0xfffa0004, 0x00640013, 0x00200070, 0x0002fff9,
+ 0xfff90004, 0x00660015, 0x001e006e, 0x0003fff9,
+ 0xfff90004, 0x00680017, 0x001c006c, 0x0003fff9,
+ 0xfff90003, 0x006b0019, 0x0019006b, 0x0003fff9,
+ 0xfff90003, 0x006c001c, 0x00170068, 0x0004fff9,
+ 0xfff90003, 0x006e001e, 0x00150066, 0x0004fff9,
+ 0xfff90002, 0x00700020, 0x00130064, 0x0004fffa,
+ 0xfff90002, 0x00720023, 0x00110061, 0x0004fffa,
+ 0xfff90002, 0x00740026, 0x000f0060, 0x0002fffa,
+ 0xfff90002, 0x00750028, 0x000d005e, 0x0003fffa,
+ 0xfff90002, 0x0077002b, 0x000b005b, 0x0002fffb,
+ 0xfffa0001, 0x0078002e, 0x00090058, 0x0003fffb,
+ 0xfffa0001, 0x00780031, 0x00080055, 0x0003fffc,
+ 0xfffa0001, 0x007b0033, 0x00060054, 0x0001fffc,
+ 0xfffb0000, 0x007c0036, 0x00050051, 0x0001fffc,
+ 0xfffc0000, 0x007c0039, 0x0003004e, 0x0001fffd,
+ 0xfffc0000, 0x007d003c, 0x0002004b, 0x0001fffd,
+ 0xfffdffff, 0x007d003f, 0x00010049, 0x0000fffe,
+ 0xfffeffff, 0x007d0042, 0x00000046, 0x0000fffe
+};
+
+static const u32 coef_lut_e_y_legacy[NB_COEF] = {
+ 0xfff10001, 0x00490004, 0x00490083, 0xfff10004,
+ 0xfff10000, 0x00500006, 0x004b007b, 0xfff10002,
+ 0xfff10000, 0x00530007, 0x0048007b, 0xfff10001,
+ 0xfff10000, 0x00550009, 0x0046007a, 0xfff10000,
+ 0xfff1fffe, 0x0058000b, 0x0043007b, 0xfff2fffe,
+ 0xfff1ffff, 0x005a000d, 0x0040007a, 0xfff2fffd,
+ 0xfff1fffd, 0x005d000f, 0x003e007a, 0xfff2fffc,
+ 0xfff1fffd, 0x005f0011, 0x003b0079, 0xfff3fffb,
+ 0xfff1fffc, 0x00610013, 0x00390079, 0xfff3fffa,
+ 0xfff1fffb, 0x00640015, 0x00360079, 0xfff3fff9,
+ 0xfff1fffa, 0x00660017, 0x00340078, 0xfff4fff8,
+ 0xfff1fffb, 0x00680019, 0x00310077, 0xfff4fff7,
+ 0xfff2fff9, 0x006a001b, 0x002f0076, 0xfff5fff6,
+ 0xfff2fff9, 0x006c001e, 0x002c0075, 0xfff5fff5,
+ 0xfff2fff9, 0x006d0020, 0x002a0073, 0xfff6fff5,
+ 0xfff3fff7, 0x00700022, 0x00270073, 0xfff6fff4,
+ 0xfff3fff7, 0x00710025, 0x00250071, 0xfff7fff3,
+ 0xfff4fff6, 0x00730027, 0x00220070, 0xfff7fff3,
+ 0xfff5fff6, 0x0073002a, 0x0020006d, 0xfff9fff2,
+ 0xfff5fff5, 0x0075002c, 0x001e006c, 0xfff9fff2,
+ 0xfff6fff5, 0x0076002f, 0x001b006a, 0xfff9fff2,
+ 0xfff7fff4, 0x00770031, 0x00190068, 0xfffbfff1,
+ 0xfff8fff4, 0x00780034, 0x00170066, 0xfffafff1,
+ 0xfff9fff3, 0x00790036, 0x00150064, 0xfffbfff1,
+ 0xfffafff3, 0x00790039, 0x00130061, 0xfffcfff1,
+ 0xfffbfff3, 0x0079003b, 0x0011005f, 0xfffdfff1,
+ 0xfffcfff2, 0x007a003e, 0x000f005d, 0xfffdfff1,
+ 0xfffdfff2, 0x007a0040, 0x000d005a, 0xfffffff1,
+ 0xfffefff2, 0x007b0043, 0x000b0058, 0xfffefff1,
+ 0x0000fff1, 0x007a0046, 0x00090055, 0x0000fff1,
+ 0x0001fff1, 0x007b0048, 0x00070053, 0x0000fff1,
+ 0x0002fff1, 0x007b004b, 0x00060050, 0x0000fff1
+};
+
+static const u32 coef_lut_e_c_legacy[NB_COEF] = {
+ 0xfffa0001, 0x003f0010, 0x003f006d, 0xfffa0010,
+ 0xfffb0002, 0x00440011, 0x00440062, 0xfffa000e,
+ 0xfffb0001, 0x00460013, 0x00420062, 0xfffa000d,
+ 0xfffb0000, 0x00480014, 0x00410062, 0xfffa000c,
+ 0xfffb0001, 0x00490015, 0x003f0061, 0xfffb000b,
+ 0xfffb0000, 0x004b0017, 0x003d0061, 0xfffb000a,
+ 0xfffb0000, 0x004d0018, 0x003b0062, 0xfffb0008,
+ 0xfffcffff, 0x004f001a, 0x00390061, 0xfffb0007,
+ 0xfffc0000, 0x004f001c, 0x00380060, 0xfffb0006,
+ 0xfffcffff, 0x0052001d, 0x00360060, 0xfffb0005,
+ 0xfffdfffe, 0x0053001f, 0x00340060, 0xfffb0004,
+ 0xfffdfffe, 0x00540021, 0x0032005e, 0xfffc0004,
+ 0xfffeffff, 0x00550022, 0x0030005d, 0xfffc0003,
+ 0xfffeffff, 0x00560024, 0x002f005c, 0xfffc0002,
+ 0xfffffffd, 0x00580026, 0x002d005c, 0xfffc0001,
+ 0xfffffffd, 0x005a0027, 0x002b005c, 0xfffc0000,
+ 0x0000fffd, 0x005a0029, 0x0029005a, 0xfffd0000,
+ 0x0000fffc, 0x005c002b, 0x0027005a, 0xfffdffff,
+ 0x0001fffc, 0x005c002d, 0x00260058, 0xfffdffff,
+ 0x0002fffc, 0x005c002f, 0x00240056, 0xfffffffe,
+ 0x0003fffc, 0x005d0030, 0x00220055, 0xfffffffe,
+ 0x0004fffc, 0x005e0032, 0x00210054, 0xfffefffd,
+ 0x0004fffb, 0x00600034, 0x001f0053, 0xfffefffd,
+ 0x0005fffb, 0x00600036, 0x001d0052, 0xfffffffc,
+ 0x0006fffb, 0x00600038, 0x001c004f, 0x0000fffc,
+ 0x0007fffb, 0x00610039, 0x001a004f, 0xfffffffc,
+ 0x0008fffb, 0x0062003b, 0x0018004d, 0x0000fffb,
+ 0x000afffb, 0x0061003d, 0x0017004b, 0x0000fffb,
+ 0x000bfffb, 0x0061003f, 0x00150049, 0x0001fffb,
+ 0x000cfffa, 0x00620041, 0x00140048, 0x0000fffb,
+ 0x000dfffa, 0x00620042, 0x00130046, 0x0001fffb,
+ 0x000efffa, 0x00620044, 0x00110044, 0x0002fffb
+};
+
+static const u32 coef_lut_f_y_legacy[NB_COEF] = {
+ 0xfff6fff0, 0x00490012, 0x0049006e, 0xfff60012,
+ 0xfff7fff1, 0x004e0013, 0x00490068, 0xfff60010,
+ 0xfff7fff2, 0x004f0015, 0x00470067, 0xfff6000f,
+ 0xfff7fff5, 0x004f0017, 0x00450065, 0xfff6000e,
+ 0xfff8fff5, 0x00500018, 0x00440065, 0xfff6000c,
+ 0xfff8fff6, 0x0051001a, 0x00420064, 0xfff6000b,
+ 0xfff8fff6, 0x0052001c, 0x00400064, 0xfff6000a,
+ 0xfff9fff6, 0x0054001d, 0x003e0064, 0xfff60008,
+ 0xfff9fff8, 0x0054001f, 0x003c0063, 0xfff60007,
+ 0xfffafff8, 0x00550021, 0x003a0062, 0xfff60006,
+ 0xfffbfff7, 0x00560022, 0x00390062, 0xfff60005,
+ 0xfffbfff8, 0x00570024, 0x00370061, 0xfff60004,
+ 0xfffcfff8, 0x00580026, 0x00350060, 0xfff60003,
+ 0xfffdfff8, 0x00590028, 0x0033005f, 0xfff60002,
+ 0xfffdfff7, 0x005b002a, 0x0031005f, 0xfff60001,
+ 0xfffefff7, 0x005c002c, 0x002f005e, 0xfff60000,
+ 0xfffffff6, 0x005e002d, 0x002d005e, 0xfff6ffff,
+ 0x0000fff6, 0x005e002f, 0x002c005c, 0xfff7fffe,
+ 0x0001fff6, 0x005f0031, 0x002a005b, 0xfff7fffd,
+ 0x0002fff6, 0x005f0033, 0x00280059, 0xfff8fffd,
+ 0x0003fff6, 0x00600035, 0x00260058, 0xfff8fffc,
+ 0x0004fff6, 0x00610037, 0x00240057, 0xfff8fffb,
+ 0x0005fff6, 0x00620039, 0x00220056, 0xfff7fffb,
+ 0x0006fff6, 0x0062003a, 0x00210055, 0xfff8fffa,
+ 0x0007fff6, 0x0063003c, 0x001f0054, 0xfff8fff9,
+ 0x0008fff6, 0x0064003e, 0x001d0054, 0xfff6fff9,
+ 0x000afff6, 0x00640040, 0x001c0052, 0xfff6fff8,
+ 0x000bfff6, 0x00640042, 0x001a0051, 0xfff6fff8,
+ 0x000cfff6, 0x00650044, 0x00180050, 0xfff5fff8,
+ 0x000efff6, 0x00650045, 0x0017004f, 0xfff5fff7,
+ 0x000ffff6, 0x00670047, 0x0015004f, 0xfff2fff7,
+ 0x0010fff6, 0x00680049, 0x0013004e, 0xfff1fff7
+};
+
+static const u32 coef_lut_f_c_legacy[NB_COEF] = {
+ 0x0000fffb, 0x003a001a, 0x003a005d, 0x0000001a,
+ 0x0001fffb, 0x003f001b, 0x00400051, 0x00000019,
+ 0x0001fffc, 0x0040001c, 0x003f0051, 0x00000017,
+ 0x0002fffb, 0x0042001d, 0x003e0051, 0xffff0016,
+ 0x0002fffb, 0x0043001e, 0x003d0051, 0xffff0015,
+ 0x0003fffc, 0x00430020, 0x003b0050, 0xffff0014,
+ 0x0003fffb, 0x00450021, 0x003a0051, 0xfffe0013,
+ 0x0004fffc, 0x00450022, 0x00390050, 0xfffe0012,
+ 0x0005fffc, 0x00460023, 0x0038004f, 0xfffe0011,
+ 0x0005fffb, 0x00480025, 0x00360050, 0xfffd0010,
+ 0x0006fffc, 0x00480026, 0x0035004f, 0xfffd000f,
+ 0x0006fffc, 0x00490027, 0x0034004f, 0xfffd000e,
+ 0x0007fffd, 0x00490028, 0x0033004e, 0xfffd000d,
+ 0x0008fffc, 0x004a002a, 0x0031004d, 0xfffd000d,
+ 0x0009fffd, 0x004a002b, 0x0030004d, 0xfffc000c,
+ 0x0009fffc, 0x004c002c, 0x002f004d, 0xfffc000b,
+ 0x000afffc, 0x004c002e, 0x002e004c, 0xfffc000a,
+ 0x000bfffc, 0x004d002f, 0x002c004c, 0xfffc0009,
+ 0x000cfffc, 0x004d0030, 0x002b004a, 0xfffd0009,
+ 0x000dfffd, 0x004d0031, 0x002a004a, 0xfffc0008,
+ 0x000dfffd, 0x004e0033, 0x00280049, 0xfffd0007,
+ 0x000efffd, 0x004f0034, 0x00270049, 0xfffc0006,
+ 0x000ffffd, 0x004f0035, 0x00260048, 0xfffc0006,
+ 0x0010fffd, 0x00500036, 0x00250048, 0xfffb0005,
+ 0x0011fffe, 0x004f0038, 0x00230046, 0xfffc0005,
+ 0x0012fffe, 0x00500039, 0x00220045, 0xfffc0004,
+ 0x0013fffe, 0x0051003a, 0x00210045, 0xfffb0003,
+ 0x0014ffff, 0x0050003b, 0x00200043, 0xfffc0003,
+ 0x0015ffff, 0x0051003d, 0x001e0043, 0xfffb0002,
+ 0x0016ffff, 0x0051003e, 0x001d0042, 0xfffb0002,
+ 0x00170000, 0x0051003f, 0x001c0040, 0xfffc0001,
+ 0x00190000, 0x00510040, 0x001b003f, 0xfffb0001
+};
+
+#endif
diff --git a/drivers/gpu/drm/sti/sti_layer.c b/drivers/gpu/drm/sti/sti_layer.c
index 06a587c4f1b..899104f9d4b 100644
--- a/drivers/gpu/drm/sti/sti_layer.c
+++ b/drivers/gpu/drm/sti/sti_layer.c
@@ -11,7 +11,9 @@
#include <drm/drm_fb_cma_helper.h>
#include "sti_compositor.h"
+#include "sti_cursor.h"
#include "sti_gdp.h"
+#include "sti_hqvdp.h"
#include "sti_layer.h"
#include "sti_vid.h"
@@ -32,10 +34,13 @@ const char *sti_layer_to_str(struct sti_layer *layer)
return "VID1";
case STI_CURSOR:
return "CURSOR";
+ case STI_HQVDP_0:
+ return "HQVDP0";
default:
return "<UNKNOWN LAYER>";
}
}
+EXPORT_SYMBOL(sti_layer_to_str);
struct sti_layer *sti_layer_create(struct device *dev, int desc,
void __iomem *baseaddr)
@@ -50,6 +55,12 @@ struct sti_layer *sti_layer_create(struct device *dev, int desc,
case STI_VID:
layer = sti_vid_create(dev);
break;
+ case STI_CUR:
+ layer = sti_cursor_create(dev);
+ break;
+ case STI_VDP:
+ layer = sti_hqvdp_create(dev);
+ break;
}
if (!layer) {
@@ -67,8 +78,11 @@ struct sti_layer *sti_layer_create(struct device *dev, int desc,
return layer;
}
+EXPORT_SYMBOL(sti_layer_create);
-int sti_layer_prepare(struct sti_layer *layer, struct drm_framebuffer *fb,
+int sti_layer_prepare(struct sti_layer *layer,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
struct drm_display_mode *mode, int mixer_id,
int dest_x, int dest_y, int dest_w, int dest_h,
int src_x, int src_y, int src_w, int src_h)
@@ -88,6 +102,7 @@ int sti_layer_prepare(struct sti_layer *layer, struct drm_framebuffer *fb,
return 1;
}
+ layer->crtc = crtc;
layer->fb = fb;
layer->mode = mode;
layer->mixer_id = mixer_id;
@@ -100,6 +115,7 @@ int sti_layer_prepare(struct sti_layer *layer, struct drm_framebuffer *fb,
layer->src_w = src_w;
layer->src_h = src_h;
layer->format = fb->pixel_format;
+ layer->vaddr = cma_obj->vaddr;
layer->paddr = cma_obj->paddr;
for (i = 0; i < 4; i++) {
layer->pitches[i] = fb->pitches[i];
diff --git a/drivers/gpu/drm/sti/sti_layer.h b/drivers/gpu/drm/sti/sti_layer.h
index 198c3774cc1..ceff497f557 100644
--- a/drivers/gpu/drm/sti/sti_layer.h
+++ b/drivers/gpu/drm/sti/sti_layer.h
@@ -22,7 +22,8 @@ enum sti_layer_type {
STI_GDP = 1 << STI_LAYER_TYPE_SHIFT,
STI_VID = 2 << STI_LAYER_TYPE_SHIFT,
STI_CUR = 3 << STI_LAYER_TYPE_SHIFT,
- STI_BCK = 4 << STI_LAYER_TYPE_SHIFT
+ STI_BCK = 4 << STI_LAYER_TYPE_SHIFT,
+ STI_VDP = 5 << STI_LAYER_TYPE_SHIFT
};
enum sti_layer_id_of_type {
@@ -39,6 +40,7 @@ enum sti_layer_desc {
STI_GDP_3 = STI_GDP | STI_ID_3,
STI_VID_0 = STI_VID | STI_ID_0,
STI_VID_1 = STI_VID | STI_ID_1,
+ STI_HQVDP_0 = STI_VDP | STI_ID_0,
STI_CURSOR = STI_CUR,
STI_BACK = STI_BCK
};
@@ -67,6 +69,7 @@ struct sti_layer_funcs {
*
* @plane: drm plane it is bound to (if any)
* @fb: drm fb it is bound to
+ * @crtc: crtc it is bound to
* @mode: display mode
* @desc: layer type & id
* @device: driver device
@@ -82,11 +85,13 @@ struct sti_layer_funcs {
* @format: format
* @pitches: pitch of 'planes' (eg: Y, U, V)
* @offsets: offset of 'planes'
+ * @vaddr: virtual address of the input buffer
* @paddr: physical address of the input buffer
*/
struct sti_layer {
struct drm_plane plane;
struct drm_framebuffer *fb;
+ struct drm_crtc *crtc;
struct drm_display_mode *mode;
enum sti_layer_desc desc;
struct device *dev;
@@ -102,12 +107,15 @@ struct sti_layer {
uint32_t format;
unsigned int pitches[4];
unsigned int offsets[4];
+ void *vaddr;
dma_addr_t paddr;
};
struct sti_layer *sti_layer_create(struct device *dev, int desc,
void __iomem *baseaddr);
-int sti_layer_prepare(struct sti_layer *layer, struct drm_framebuffer *fb,
+int sti_layer_prepare(struct sti_layer *layer,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
struct drm_display_mode *mode,
int mixer_id,
int dest_x, int dest_y,
diff --git a/drivers/gpu/drm/sti/sti_mixer.c b/drivers/gpu/drm/sti/sti_mixer.c
index 79f369db9fb..13a4b84deab 100644
--- a/drivers/gpu/drm/sti/sti_mixer.c
+++ b/drivers/gpu/drm/sti/sti_mixer.c
@@ -45,6 +45,7 @@ static const u32 mixerColorSpaceMatIdentity[] = {
#define GAM_CTL_GDP1_MASK BIT(4)
#define GAM_CTL_GDP2_MASK BIT(5)
#define GAM_CTL_GDP3_MASK BIT(6)
+#define GAM_CTL_CURSOR_MASK BIT(9)
const char *sti_mixer_to_str(struct sti_mixer *mixer)
{
@@ -122,11 +123,15 @@ int sti_mixer_set_layer_depth(struct sti_mixer *mixer, struct sti_layer *layer)
layer_id = GAM_DEPTH_GDP3_ID;
break;
case STI_VID_0:
+ case STI_HQVDP_0:
layer_id = GAM_DEPTH_VID0_ID;
break;
case STI_VID_1:
layer_id = GAM_DEPTH_VID1_ID;
break;
+ case STI_CURSOR:
+ /* no need to set depth for cursor */
+ return 0;
default:
DRM_ERROR("Unknown layer %d\n", layer->desc);
return 1;
@@ -185,9 +190,12 @@ static u32 sti_mixer_get_layer_mask(struct sti_layer *layer)
case STI_GDP_3:
return GAM_CTL_GDP3_MASK;
case STI_VID_0:
+ case STI_HQVDP_0:
return GAM_CTL_VID0_MASK;
case STI_VID_1:
return GAM_CTL_VID1_MASK;
+ case STI_CURSOR:
+ return GAM_CTL_CURSOR_MASK;
default:
return 0;
}
@@ -215,6 +223,15 @@ int sti_mixer_set_layer_status(struct sti_mixer *mixer,
return 0;
}
+void sti_mixer_clear_all_layers(struct sti_mixer *mixer)
+{
+ u32 val;
+
+ DRM_DEBUG_DRIVER("%s clear all layer\n", sti_mixer_to_str(mixer));
+ val = sti_mixer_reg_read(mixer, GAM_MIXER_CTL) & 0xFFFF0000;
+ sti_mixer_reg_write(mixer, GAM_MIXER_CTL, val);
+}
+
void sti_mixer_set_matrix(struct sti_mixer *mixer)
{
unsigned int i;
diff --git a/drivers/gpu/drm/sti/sti_mixer.h b/drivers/gpu/drm/sti/sti_mixer.h
index 874372102e5..b9728218290 100644
--- a/drivers/gpu/drm/sti/sti_mixer.h
+++ b/drivers/gpu/drm/sti/sti_mixer.h
@@ -23,6 +23,7 @@
* @id: id of the mixer
* @drm_crtc: crtc object link to the mixer
* @pending_event: set if a flip event is pending on crtc
+ * @enabled: to know if the mixer is active or not
*/
struct sti_mixer {
struct device *dev;
@@ -30,6 +31,7 @@ struct sti_mixer {
int id;
struct drm_crtc drm_crtc;
struct drm_pending_vblank_event *pending_event;
+ bool enabled;
};
const char *sti_mixer_to_str(struct sti_mixer *mixer);
@@ -39,6 +41,7 @@ struct sti_mixer *sti_mixer_create(struct device *dev, int id,
int sti_mixer_set_layer_status(struct sti_mixer *mixer,
struct sti_layer *layer, bool status);
+void sti_mixer_clear_all_layers(struct sti_mixer *mixer);
int sti_mixer_set_layer_depth(struct sti_mixer *mixer, struct sti_layer *layer);
int sti_mixer_active_video_area(struct sti_mixer *mixer,
struct drm_display_mode *mode);
diff --git a/drivers/gpu/drm/sti/sti_tvout.c b/drivers/gpu/drm/sti/sti_tvout.c
index b8afe490356..cb924aa2b32 100644
--- a/drivers/gpu/drm/sti/sti_tvout.c
+++ b/drivers/gpu/drm/sti/sti_tvout.c
@@ -16,6 +16,8 @@
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
+#include "sti_drm_crtc.h"
+
/* glue registers */
#define TVO_CSC_MAIN_M0 0x000
#define TVO_CSC_MAIN_M1 0x004
@@ -96,7 +98,7 @@
#define TVO_SYNC_HD_DCS_SHIFT 8
-#define ENCODER_MAIN_CRTC_MASK BIT(0)
+#define ENCODER_CRTC_MASK (BIT(0) | BIT(1))
/* enum listing the supported output data format */
enum sti_tvout_video_out_type {
@@ -149,14 +151,15 @@ static void tvout_write(struct sti_tvout *tvout, u32 val, int offset)
* Set the clipping mode of a VIP
*
* @tvout: tvout structure
+ * @reg: register to set
* @cr_r:
* @y_g:
* @cb_b:
*/
-static void tvout_vip_set_color_order(struct sti_tvout *tvout,
+static void tvout_vip_set_color_order(struct sti_tvout *tvout, int reg,
u32 cr_r, u32 y_g, u32 cb_b)
{
- u32 val = tvout_read(tvout, TVO_VIP_HDMI);
+ u32 val = tvout_read(tvout, reg);
val &= ~(TVO_VIP_REORDER_MASK << TVO_VIP_REORDER_R_SHIFT);
val &= ~(TVO_VIP_REORDER_MASK << TVO_VIP_REORDER_G_SHIFT);
@@ -165,52 +168,58 @@ static void tvout_vip_set_color_order(struct sti_tvout *tvout,
val |= y_g << TVO_VIP_REORDER_G_SHIFT;
val |= cb_b << TVO_VIP_REORDER_B_SHIFT;
- tvout_write(tvout, val, TVO_VIP_HDMI);
+ tvout_write(tvout, val, reg);
}
/**
* Set the clipping mode of a VIP
*
* @tvout: tvout structure
+ * @reg: register to set
* @range: clipping range
*/
-static void tvout_vip_set_clip_mode(struct sti_tvout *tvout, u32 range)
+static void tvout_vip_set_clip_mode(struct sti_tvout *tvout, int reg, u32 range)
{
- u32 val = tvout_read(tvout, TVO_VIP_HDMI);
+ u32 val = tvout_read(tvout, reg);
val &= ~(TVO_VIP_CLIP_MASK << TVO_VIP_CLIP_SHIFT);
val |= range << TVO_VIP_CLIP_SHIFT;
- tvout_write(tvout, val, TVO_VIP_HDMI);
+ tvout_write(tvout, val, reg);
}
/**
* Set the rounded value of a VIP
*
* @tvout: tvout structure
+ * @reg: register to set
* @rnd: rounded val per component
*/
-static void tvout_vip_set_rnd(struct sti_tvout *tvout, u32 rnd)
+static void tvout_vip_set_rnd(struct sti_tvout *tvout, int reg, u32 rnd)
{
- u32 val = tvout_read(tvout, TVO_VIP_HDMI);
+ u32 val = tvout_read(tvout, reg);
val &= ~(TVO_VIP_RND_MASK << TVO_VIP_RND_SHIFT);
val |= rnd << TVO_VIP_RND_SHIFT;
- tvout_write(tvout, val, TVO_VIP_HDMI);
+ tvout_write(tvout, val, reg);
}
/**
* Select the VIP input
*
* @tvout: tvout structure
+ * @reg: register to set
+ * @main_path: main or auxiliary path
+ * @sel_input_logic_inverted: need to invert the logic
* @sel_input: selected_input (main/aux + conv)
*/
static void tvout_vip_set_sel_input(struct sti_tvout *tvout,
+ int reg,
bool main_path,
bool sel_input_logic_inverted,
enum sti_tvout_video_out_type video_out)
{
u32 sel_input;
- u32 val = tvout_read(tvout, TVO_VIP_HDMI);
+ u32 val = tvout_read(tvout, reg);
if (main_path)
sel_input = TVO_VIP_SEL_INPUT_MAIN;
@@ -232,22 +241,24 @@ static void tvout_vip_set_sel_input(struct sti_tvout *tvout,
val &= ~TVO_VIP_SEL_INPUT_MASK;
val |= sel_input;
- tvout_write(tvout, val, TVO_VIP_HDMI);
+ tvout_write(tvout, val, reg);
}
/**
* Select the input video signed or unsigned
*
* @tvout: tvout structure
+ * @reg: register to set
* @in_vid_signed: used video input format
*/
-static void tvout_vip_set_in_vid_fmt(struct sti_tvout *tvout, u32 in_vid_fmt)
+static void tvout_vip_set_in_vid_fmt(struct sti_tvout *tvout,
+ int reg, u32 in_vid_fmt)
{
- u32 val = tvout_read(tvout, TVO_VIP_HDMI);
+ u32 val = tvout_read(tvout, reg);
val &= ~TVO_IN_FMT_SIGNED;
val |= in_vid_fmt;
- tvout_write(tvout, val, TVO_MAIN_IN_VID_FORMAT);
+ tvout_write(tvout, val, reg);
}
/**
@@ -261,6 +272,7 @@ static void tvout_hdmi_start(struct sti_tvout *tvout, bool main_path)
{
struct device_node *node = tvout->dev->of_node;
bool sel_input_logic_inverted = false;
+ u32 tvo_in_vid_format;
dev_dbg(tvout->dev, "%s\n", __func__);
@@ -268,33 +280,36 @@ static void tvout_hdmi_start(struct sti_tvout *tvout, bool main_path)
DRM_DEBUG_DRIVER("main vip for hdmi\n");
/* select the input sync for hdmi = VTG set 1 */
tvout_write(tvout, TVO_SYNC_MAIN_VTG_SET_1, TVO_HDMI_SYNC_SEL);
+ tvo_in_vid_format = TVO_MAIN_IN_VID_FORMAT;
} else {
DRM_DEBUG_DRIVER("aux vip for hdmi\n");
/* select the input sync for hdmi = VTG set 1 */
tvout_write(tvout, TVO_SYNC_AUX_VTG_SET_1, TVO_HDMI_SYNC_SEL);
+ tvo_in_vid_format = TVO_AUX_IN_VID_FORMAT;
}
/* set color channel order */
- tvout_vip_set_color_order(tvout,
+ tvout_vip_set_color_order(tvout, TVO_VIP_HDMI,
TVO_VIP_REORDER_CR_R_SEL,
TVO_VIP_REORDER_Y_G_SEL,
TVO_VIP_REORDER_CB_B_SEL);
/* set clipping mode (Limited range RGB/Y) */
- tvout_vip_set_clip_mode(tvout, TVO_VIP_CLIP_LIMITED_RANGE_RGB_Y);
+ tvout_vip_set_clip_mode(tvout, TVO_VIP_HDMI,
+ TVO_VIP_CLIP_LIMITED_RANGE_RGB_Y);
/* set round mode (rounded to 8-bit per component) */
- tvout_vip_set_rnd(tvout, TVO_VIP_RND_8BIT_ROUNDED);
+ tvout_vip_set_rnd(tvout, TVO_VIP_HDMI, TVO_VIP_RND_8BIT_ROUNDED);
if (of_device_is_compatible(node, "st,stih407-tvout")) {
/* set input video format */
- tvout_vip_set_in_vid_fmt(tvout->regs + TVO_MAIN_IN_VID_FORMAT,
- TVO_IN_FMT_SIGNED);
+ tvout_vip_set_in_vid_fmt(tvout, tvo_in_vid_format,
+ TVO_IN_FMT_SIGNED);
sel_input_logic_inverted = true;
}
/* input selection */
- tvout_vip_set_sel_input(tvout, main_path,
+ tvout_vip_set_sel_input(tvout, TVO_VIP_HDMI, main_path,
sel_input_logic_inverted, STI_TVOUT_VIDEO_OUT_RGB);
}
@@ -309,48 +324,47 @@ static void tvout_hda_start(struct sti_tvout *tvout, bool main_path)
{
struct device_node *node = tvout->dev->of_node;
bool sel_input_logic_inverted = false;
+ u32 tvo_in_vid_format;
+ int val;
dev_dbg(tvout->dev, "%s\n", __func__);
- if (!main_path) {
- DRM_ERROR("HD Analog on aux not implemented\n");
- return;
+ if (main_path) {
+ val = TVO_SYNC_MAIN_VTG_SET_2 << TVO_SYNC_HD_DCS_SHIFT;
+ val |= TVO_SYNC_MAIN_VTG_SET_3;
+ tvout_write(tvout, val, TVO_HD_SYNC_SEL);
+ tvo_in_vid_format = TVO_MAIN_IN_VID_FORMAT;
+ } else {
+ val = TVO_SYNC_AUX_VTG_SET_2 << TVO_SYNC_HD_DCS_SHIFT;
+ val |= TVO_SYNC_AUX_VTG_SET_3;
+ tvout_write(tvout, val, TVO_HD_SYNC_SEL);
+ tvo_in_vid_format = TVO_AUX_IN_VID_FORMAT;
}
- DRM_DEBUG_DRIVER("main vip for HDF\n");
-
/* set color channel order */
- tvout_vip_set_color_order(tvout->regs + TVO_VIP_HDF,
+ tvout_vip_set_color_order(tvout, TVO_VIP_HDF,
TVO_VIP_REORDER_CR_R_SEL,
TVO_VIP_REORDER_Y_G_SEL,
TVO_VIP_REORDER_CB_B_SEL);
- /* set clipping mode (Limited range RGB/Y) */
- tvout_vip_set_clip_mode(tvout->regs + TVO_VIP_HDF,
- TVO_VIP_CLIP_LIMITED_RANGE_CB_CR);
+ /* set clipping mode (EAV/SAV clipping) */
+ tvout_vip_set_clip_mode(tvout, TVO_VIP_HDF, TVO_VIP_CLIP_EAV_SAV);
/* set round mode (rounded to 10-bit per component) */
- tvout_vip_set_rnd(tvout->regs + TVO_VIP_HDF, TVO_VIP_RND_10BIT_ROUNDED);
+ tvout_vip_set_rnd(tvout, TVO_VIP_HDF, TVO_VIP_RND_10BIT_ROUNDED);
if (of_device_is_compatible(node, "st,stih407-tvout")) {
/* set input video format */
- tvout_vip_set_in_vid_fmt(tvout, TVO_IN_FMT_SIGNED);
+ tvout_vip_set_in_vid_fmt(tvout,
+ tvo_in_vid_format, TVO_IN_FMT_SIGNED);
sel_input_logic_inverted = true;
}
/* Input selection */
- tvout_vip_set_sel_input(tvout->regs + TVO_VIP_HDF,
- main_path,
+ tvout_vip_set_sel_input(tvout, TVO_VIP_HDF, main_path,
sel_input_logic_inverted,
STI_TVOUT_VIDEO_OUT_YUV);
- /* select the input sync for HD analog = VTG set 3
- * and HD DCS = VTG set 2 */
- tvout_write(tvout,
- (TVO_SYNC_MAIN_VTG_SET_2 << TVO_SYNC_HD_DCS_SHIFT)
- | TVO_SYNC_MAIN_VTG_SET_3,
- TVO_HD_SYNC_SEL);
-
/* power up HD DAC */
tvout_write(tvout, 0, TVO_HD_DAC_CFG_OFF);
}
@@ -392,7 +406,7 @@ static void sti_hda_encoder_commit(struct drm_encoder *encoder)
{
struct sti_tvout *tvout = to_sti_tvout(encoder);
- tvout_hda_start(tvout, true);
+ tvout_hda_start(tvout, sti_drm_crtc_is_main(encoder->crtc));
}
static void sti_hda_encoder_disable(struct drm_encoder *encoder)
@@ -429,7 +443,7 @@ static struct drm_encoder *sti_tvout_create_hda_encoder(struct drm_device *dev,
drm_encoder = (struct drm_encoder *) encoder;
- drm_encoder->possible_crtcs = ENCODER_MAIN_CRTC_MASK;
+ drm_encoder->possible_crtcs = ENCODER_CRTC_MASK;
drm_encoder->possible_clones = 1 << 0;
drm_encoder_init(dev, drm_encoder,
@@ -444,7 +458,7 @@ static void sti_hdmi_encoder_commit(struct drm_encoder *encoder)
{
struct sti_tvout *tvout = to_sti_tvout(encoder);
- tvout_hdmi_start(tvout, true);
+ tvout_hdmi_start(tvout, sti_drm_crtc_is_main(encoder->crtc));
}
static void sti_hdmi_encoder_disable(struct drm_encoder *encoder)
@@ -478,7 +492,7 @@ static struct drm_encoder *sti_tvout_create_hdmi_encoder(struct drm_device *dev,
drm_encoder = (struct drm_encoder *) encoder;
- drm_encoder->possible_crtcs = ENCODER_MAIN_CRTC_MASK;
+ drm_encoder->possible_crtcs = ENCODER_CRTC_MASK;
drm_encoder->possible_clones = 1 << 1;
drm_encoder_init(dev, drm_encoder,
diff --git a/drivers/gpu/drm/sti/sti_vtg.c b/drivers/gpu/drm/sti/sti_vtg.c
index 740d6e347a6..9564f2568e2 100644
--- a/drivers/gpu/drm/sti/sti_vtg.c
+++ b/drivers/gpu/drm/sti/sti_vtg.c
@@ -51,10 +51,19 @@
#define VTG_TOP_V_HD_3 0x010C
#define VTG_BOT_V_HD_3 0x0110
+#define VTG_H_HD_4 0x0120
+#define VTG_TOP_V_VD_4 0x0124
+#define VTG_BOT_V_VD_4 0x0128
+#define VTG_TOP_V_HD_4 0x012c
+#define VTG_BOT_V_HD_4 0x0130
+
#define VTG_IRQ_BOTTOM BIT(0)
#define VTG_IRQ_TOP BIT(1)
#define VTG_IRQ_MASK (VTG_IRQ_TOP | VTG_IRQ_BOTTOM)
+/* Delay introduced by the HDMI in nb of pixel */
+#define HDMI_DELAY (6)
+
/* delay introduced by the Arbitrary Waveform Generator in nb of pixels */
#define AWG_DELAY_HD (-9)
#define AWG_DELAY_ED (-8)
@@ -133,10 +142,10 @@ static void vtg_set_mode(struct sti_vtg *vtg,
writel(tmp, vtg->regs + VTG_VID_TFS);
writel(tmp, vtg->regs + VTG_VID_BFS);
- /* prepare VTG set 1 and 2 for HDMI and VTG set 3 for HD DAC */
- tmp = (mode->hsync_end - mode->hsync_start) << 16;
+ /* prepare VTG set 1 for HDMI */
+ tmp = (mode->hsync_end - mode->hsync_start + HDMI_DELAY) << 16;
+ tmp |= HDMI_DELAY;
writel(tmp, vtg->regs + VTG_H_HD_1);
- writel(tmp, vtg->regs + VTG_H_HD_2);
tmp = (mode->vsync_end - mode->vsync_start + 1) << 16;
tmp |= 1;
@@ -146,6 +155,11 @@ static void vtg_set_mode(struct sti_vtg *vtg,
writel(0, vtg->regs + VTG_BOT_V_HD_1);
/* prepare VTG set 2 for for HD DCS */
+ tmp = (mode->hsync_end - mode->hsync_start) << 16;
+ writel(tmp, vtg->regs + VTG_H_HD_2);
+
+ tmp = (mode->vsync_end - mode->vsync_start + 1) << 16;
+ tmp |= 1;
writel(tmp, vtg->regs + VTG_TOP_V_VD_2);
writel(tmp, vtg->regs + VTG_BOT_V_VD_2);
writel(0, vtg->regs + VTG_TOP_V_HD_2);
@@ -166,6 +180,17 @@ static void vtg_set_mode(struct sti_vtg *vtg,
writel(tmp, vtg->regs + VTG_TOP_V_HD_3);
writel(tmp, vtg->regs + VTG_BOT_V_HD_3);
+ /* Prepare VTG set 4 for DVO */
+ tmp = (mode->hsync_end - mode->hsync_start) << 16;
+ writel(tmp, vtg->regs + VTG_H_HD_4);
+
+ tmp = (mode->vsync_end - mode->vsync_start + 1) << 16;
+ tmp |= 1;
+ writel(tmp, vtg->regs + VTG_TOP_V_VD_4);
+ writel(tmp, vtg->regs + VTG_BOT_V_VD_4);
+ writel(0, vtg->regs + VTG_TOP_V_HD_4);
+ writel(0, vtg->regs + VTG_BOT_V_HD_4);
+
/* mode */
writel(type, vtg->regs + VTG_MODE);
}
diff --git a/drivers/gpu/drm/tegra/Kconfig b/drivers/gpu/drm/tegra/Kconfig
index 354ddb29231..74d9d621453 100644
--- a/drivers/gpu/drm/tegra/Kconfig
+++ b/drivers/gpu/drm/tegra/Kconfig
@@ -1,6 +1,7 @@
config DRM_TEGRA
tristate "NVIDIA Tegra DRM"
depends on ARCH_TEGRA || (ARM && COMPILE_TEST)
+ depends on COMMON_CLK
depends on DRM
depends on RESET_CONTROLLER
select DRM_KMS_HELPER
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 054a79f143a..3367960286a 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -9,17 +9,23 @@
#include <linux/clk.h>
#include <linux/debugfs.h>
+#include <linux/iommu.h>
#include <linux/reset.h>
+#include <soc/tegra/pmc.h>
+
#include "dc.h"
#include "drm.h"
#include "gem.h"
+#include <drm/drm_plane_helper.h>
+
struct tegra_dc_soc_info {
bool supports_interlacing;
bool supports_cursor;
bool supports_block_linear;
unsigned int pitch_align;
+ bool has_powergate;
};
struct tegra_plane {
@@ -32,6 +38,26 @@ static inline struct tegra_plane *to_tegra_plane(struct drm_plane *plane)
return container_of(plane, struct tegra_plane, base);
}
+static void tegra_dc_window_commit(struct tegra_dc *dc, unsigned int index)
+{
+ u32 value = WIN_A_ACT_REQ << index;
+
+ tegra_dc_writel(dc, value << 8, DC_CMD_STATE_CONTROL);
+ tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL);
+}
+
+static void tegra_dc_cursor_commit(struct tegra_dc *dc)
+{
+ tegra_dc_writel(dc, CURSOR_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
+ tegra_dc_writel(dc, CURSOR_ACT_REQ, DC_CMD_STATE_CONTROL);
+}
+
+static void tegra_dc_commit(struct tegra_dc *dc)
+{
+ tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
+ tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
+}
+
static unsigned int tegra_dc_format(uint32_t format, uint32_t *swap)
{
/* assume no swapping of fetched data */
@@ -303,17 +329,260 @@ static int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index,
break;
}
- tegra_dc_writel(dc, WIN_A_UPDATE << index, DC_CMD_STATE_CONTROL);
- tegra_dc_writel(dc, WIN_A_ACT_REQ << index, DC_CMD_STATE_CONTROL);
+ tegra_dc_window_commit(dc, index);
+
+ return 0;
+}
+
+static int tegra_window_plane_disable(struct drm_plane *plane)
+{
+ struct tegra_dc *dc = to_tegra_dc(plane->crtc);
+ struct tegra_plane *p = to_tegra_plane(plane);
+ u32 value;
+
+ if (!plane->crtc)
+ return 0;
+
+ value = WINDOW_A_SELECT << p->index;
+ tegra_dc_writel(dc, value, DC_CMD_DISPLAY_WINDOW_HEADER);
+
+ value = tegra_dc_readl(dc, DC_WIN_WIN_OPTIONS);
+ value &= ~WIN_ENABLE;
+ tegra_dc_writel(dc, value, DC_WIN_WIN_OPTIONS);
+
+ tegra_dc_window_commit(dc, p->index);
+
+ return 0;
+}
+
+static void tegra_plane_destroy(struct drm_plane *plane)
+{
+ struct tegra_plane *p = to_tegra_plane(plane);
+
+ drm_plane_cleanup(plane);
+ kfree(p);
+}
+
+static const u32 tegra_primary_plane_formats[] = {
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGB565,
+};
+
+static int tegra_primary_plane_update(struct drm_plane *plane,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int crtc_x,
+ int crtc_y, unsigned int crtc_w,
+ unsigned int crtc_h, uint32_t src_x,
+ uint32_t src_y, uint32_t src_w,
+ uint32_t src_h)
+{
+ struct tegra_bo *bo = tegra_fb_get_plane(fb, 0);
+ struct tegra_plane *p = to_tegra_plane(plane);
+ struct tegra_dc *dc = to_tegra_dc(crtc);
+ struct tegra_dc_window window;
+ int err;
+
+ memset(&window, 0, sizeof(window));
+ window.src.x = src_x >> 16;
+ window.src.y = src_y >> 16;
+ window.src.w = src_w >> 16;
+ window.src.h = src_h >> 16;
+ window.dst.x = crtc_x;
+ window.dst.y = crtc_y;
+ window.dst.w = crtc_w;
+ window.dst.h = crtc_h;
+ window.format = tegra_dc_format(fb->pixel_format, &window.swap);
+ window.bits_per_pixel = fb->bits_per_pixel;
+ window.bottom_up = tegra_fb_is_bottom_up(fb);
+
+ err = tegra_fb_get_tiling(fb, &window.tiling);
+ if (err < 0)
+ return err;
+
+ window.base[0] = bo->paddr + fb->offsets[0];
+ window.stride[0] = fb->pitches[0];
+
+ err = tegra_dc_setup_window(dc, p->index, &window);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static void tegra_primary_plane_destroy(struct drm_plane *plane)
+{
+ tegra_window_plane_disable(plane);
+ tegra_plane_destroy(plane);
+}
+
+static const struct drm_plane_funcs tegra_primary_plane_funcs = {
+ .update_plane = tegra_primary_plane_update,
+ .disable_plane = tegra_window_plane_disable,
+ .destroy = tegra_primary_plane_destroy,
+};
+
+static struct drm_plane *tegra_dc_primary_plane_create(struct drm_device *drm,
+ struct tegra_dc *dc)
+{
+ struct tegra_plane *plane;
+ unsigned int num_formats;
+ const u32 *formats;
+ int err;
+
+ plane = kzalloc(sizeof(*plane), GFP_KERNEL);
+ if (!plane)
+ return ERR_PTR(-ENOMEM);
+
+ num_formats = ARRAY_SIZE(tegra_primary_plane_formats);
+ formats = tegra_primary_plane_formats;
+
+ err = drm_universal_plane_init(drm, &plane->base, 1 << dc->pipe,
+ &tegra_primary_plane_funcs, formats,
+ num_formats, DRM_PLANE_TYPE_PRIMARY);
+ if (err < 0) {
+ kfree(plane);
+ return ERR_PTR(err);
+ }
+
+ return &plane->base;
+}
+
+static const u32 tegra_cursor_plane_formats[] = {
+ DRM_FORMAT_RGBA8888,
+};
+
+static int tegra_cursor_plane_update(struct drm_plane *plane,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int crtc_x,
+ int crtc_y, unsigned int crtc_w,
+ unsigned int crtc_h, uint32_t src_x,
+ uint32_t src_y, uint32_t src_w,
+ uint32_t src_h)
+{
+ struct tegra_bo *bo = tegra_fb_get_plane(fb, 0);
+ struct tegra_dc *dc = to_tegra_dc(crtc);
+ u32 value = CURSOR_CLIP_DISPLAY;
+
+ /* scaling not supported for cursor */
+ if ((src_w >> 16 != crtc_w) || (src_h >> 16 != crtc_h))
+ return -EINVAL;
+
+ /* only square cursors supported */
+ if (src_w != src_h)
+ return -EINVAL;
+
+ switch (crtc_w) {
+ case 32:
+ value |= CURSOR_SIZE_32x32;
+ break;
+
+ case 64:
+ value |= CURSOR_SIZE_64x64;
+ break;
+
+ case 128:
+ value |= CURSOR_SIZE_128x128;
+ break;
+
+ case 256:
+ value |= CURSOR_SIZE_256x256;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ value |= (bo->paddr >> 10) & 0x3fffff;
+ tegra_dc_writel(dc, value, DC_DISP_CURSOR_START_ADDR);
+
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ value = (bo->paddr >> 32) & 0x3;
+ tegra_dc_writel(dc, value, DC_DISP_CURSOR_START_ADDR_HI);
+#endif
+
+ /* enable cursor and set blend mode */
+ value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
+ value |= CURSOR_ENABLE;
+ tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
+
+ value = tegra_dc_readl(dc, DC_DISP_BLEND_CURSOR_CONTROL);
+ value &= ~CURSOR_DST_BLEND_MASK;
+ value &= ~CURSOR_SRC_BLEND_MASK;
+ value |= CURSOR_MODE_NORMAL;
+ value |= CURSOR_DST_BLEND_NEG_K1_TIMES_SRC;
+ value |= CURSOR_SRC_BLEND_K1_TIMES_SRC;
+ value |= CURSOR_ALPHA;
+ tegra_dc_writel(dc, value, DC_DISP_BLEND_CURSOR_CONTROL);
+
+ /* position the cursor */
+ value = (crtc_y & 0x3fff) << 16 | (crtc_x & 0x3fff);
+ tegra_dc_writel(dc, value, DC_DISP_CURSOR_POSITION);
+
+ /* apply changes */
+ tegra_dc_cursor_commit(dc);
+ tegra_dc_commit(dc);
+
+ return 0;
+}
+
+static int tegra_cursor_plane_disable(struct drm_plane *plane)
+{
+ struct tegra_dc *dc = to_tegra_dc(plane->crtc);
+ u32 value;
+
+ if (!plane->crtc)
+ return 0;
+
+ value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
+ value &= ~CURSOR_ENABLE;
+ tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
+
+ tegra_dc_cursor_commit(dc);
+ tegra_dc_commit(dc);
return 0;
}
-static int tegra_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
- struct drm_framebuffer *fb, int crtc_x,
- int crtc_y, unsigned int crtc_w,
- unsigned int crtc_h, uint32_t src_x,
- uint32_t src_y, uint32_t src_w, uint32_t src_h)
+static const struct drm_plane_funcs tegra_cursor_plane_funcs = {
+ .update_plane = tegra_cursor_plane_update,
+ .disable_plane = tegra_cursor_plane_disable,
+ .destroy = tegra_plane_destroy,
+};
+
+static struct drm_plane *tegra_dc_cursor_plane_create(struct drm_device *drm,
+ struct tegra_dc *dc)
+{
+ struct tegra_plane *plane;
+ unsigned int num_formats;
+ const u32 *formats;
+ int err;
+
+ plane = kzalloc(sizeof(*plane), GFP_KERNEL);
+ if (!plane)
+ return ERR_PTR(-ENOMEM);
+
+ num_formats = ARRAY_SIZE(tegra_cursor_plane_formats);
+ formats = tegra_cursor_plane_formats;
+
+ err = drm_universal_plane_init(drm, &plane->base, 1 << dc->pipe,
+ &tegra_cursor_plane_funcs, formats,
+ num_formats, DRM_PLANE_TYPE_CURSOR);
+ if (err < 0) {
+ kfree(plane);
+ return ERR_PTR(err);
+ }
+
+ return &plane->base;
+}
+
+static int tegra_overlay_plane_update(struct drm_plane *plane,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int crtc_x,
+ int crtc_y, unsigned int crtc_w,
+ unsigned int crtc_h, uint32_t src_x,
+ uint32_t src_y, uint32_t src_w,
+ uint32_t src_h)
{
struct tegra_plane *p = to_tegra_plane(plane);
struct tegra_dc *dc = to_tegra_dc(crtc);
@@ -359,44 +628,19 @@ static int tegra_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
return tegra_dc_setup_window(dc, p->index, &window);
}
-static int tegra_plane_disable(struct drm_plane *plane)
+static void tegra_overlay_plane_destroy(struct drm_plane *plane)
{
- struct tegra_dc *dc = to_tegra_dc(plane->crtc);
- struct tegra_plane *p = to_tegra_plane(plane);
- unsigned long value;
-
- if (!plane->crtc)
- return 0;
-
- value = WINDOW_A_SELECT << p->index;
- tegra_dc_writel(dc, value, DC_CMD_DISPLAY_WINDOW_HEADER);
-
- value = tegra_dc_readl(dc, DC_WIN_WIN_OPTIONS);
- value &= ~WIN_ENABLE;
- tegra_dc_writel(dc, value, DC_WIN_WIN_OPTIONS);
-
- tegra_dc_writel(dc, WIN_A_UPDATE << p->index, DC_CMD_STATE_CONTROL);
- tegra_dc_writel(dc, WIN_A_ACT_REQ << p->index, DC_CMD_STATE_CONTROL);
-
- return 0;
-}
-
-static void tegra_plane_destroy(struct drm_plane *plane)
-{
- struct tegra_plane *p = to_tegra_plane(plane);
-
- tegra_plane_disable(plane);
- drm_plane_cleanup(plane);
- kfree(p);
+ tegra_window_plane_disable(plane);
+ tegra_plane_destroy(plane);
}
-static const struct drm_plane_funcs tegra_plane_funcs = {
- .update_plane = tegra_plane_update,
- .disable_plane = tegra_plane_disable,
- .destroy = tegra_plane_destroy,
+static const struct drm_plane_funcs tegra_overlay_plane_funcs = {
+ .update_plane = tegra_overlay_plane_update,
+ .disable_plane = tegra_window_plane_disable,
+ .destroy = tegra_overlay_plane_destroy,
};
-static const uint32_t plane_formats[] = {
+static const uint32_t tegra_overlay_plane_formats[] = {
DRM_FORMAT_XBGR8888,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_RGB565,
@@ -406,27 +650,44 @@ static const uint32_t plane_formats[] = {
DRM_FORMAT_YUV422,
};
-static int tegra_dc_add_planes(struct drm_device *drm, struct tegra_dc *dc)
+static struct drm_plane *tegra_dc_overlay_plane_create(struct drm_device *drm,
+ struct tegra_dc *dc,
+ unsigned int index)
{
- unsigned int i;
- int err = 0;
+ struct tegra_plane *plane;
+ unsigned int num_formats;
+ const u32 *formats;
+ int err;
- for (i = 0; i < 2; i++) {
- struct tegra_plane *plane;
+ plane = kzalloc(sizeof(*plane), GFP_KERNEL);
+ if (!plane)
+ return ERR_PTR(-ENOMEM);
- plane = kzalloc(sizeof(*plane), GFP_KERNEL);
- if (!plane)
- return -ENOMEM;
+ plane->index = index;
- plane->index = 1 + i;
+ num_formats = ARRAY_SIZE(tegra_overlay_plane_formats);
+ formats = tegra_overlay_plane_formats;
- err = drm_plane_init(drm, &plane->base, 1 << dc->pipe,
- &tegra_plane_funcs, plane_formats,
- ARRAY_SIZE(plane_formats), false);
- if (err < 0) {
- kfree(plane);
- return err;
- }
+ err = drm_universal_plane_init(drm, &plane->base, 1 << dc->pipe,
+ &tegra_overlay_plane_funcs, formats,
+ num_formats, DRM_PLANE_TYPE_OVERLAY);
+ if (err < 0) {
+ kfree(plane);
+ return ERR_PTR(err);
+ }
+
+ return &plane->base;
+}
+
+static int tegra_dc_add_planes(struct drm_device *drm, struct tegra_dc *dc)
+{
+ struct drm_plane *plane;
+ unsigned int i;
+
+ for (i = 0; i < 2; i++) {
+ plane = tegra_dc_overlay_plane_create(drm, dc, 1 + i);
+ if (IS_ERR(plane))
+ return PTR_ERR(plane);
}
return 0;
@@ -513,10 +774,8 @@ static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y,
tegra_dc_writel(dc, h_offset, DC_WINBUF_ADDR_H_OFFSET);
tegra_dc_writel(dc, v_offset, DC_WINBUF_ADDR_V_OFFSET);
- value = GENERAL_UPDATE | WIN_A_UPDATE;
- tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL);
-
value = GENERAL_ACT_REQ | WIN_A_ACT_REQ;
+ tegra_dc_writel(dc, value << 8, DC_CMD_STATE_CONTROL);
tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL);
return 0;
@@ -548,109 +807,6 @@ void tegra_dc_disable_vblank(struct tegra_dc *dc)
spin_unlock_irqrestore(&dc->lock, flags);
}
-static int tegra_dc_cursor_set2(struct drm_crtc *crtc, struct drm_file *file,
- uint32_t handle, uint32_t width,
- uint32_t height, int32_t hot_x, int32_t hot_y)
-{
- unsigned long value = CURSOR_CLIP_DISPLAY;
- struct tegra_dc *dc = to_tegra_dc(crtc);
- struct drm_gem_object *gem;
- struct tegra_bo *bo = NULL;
-
- if (!dc->soc->supports_cursor)
- return -ENXIO;
-
- if (width != height)
- return -EINVAL;
-
- switch (width) {
- case 32:
- value |= CURSOR_SIZE_32x32;
- break;
-
- case 64:
- value |= CURSOR_SIZE_64x64;
- break;
-
- case 128:
- value |= CURSOR_SIZE_128x128;
-
- case 256:
- value |= CURSOR_SIZE_256x256;
- break;
-
- default:
- return -EINVAL;
- }
-
- if (handle) {
- gem = drm_gem_object_lookup(crtc->dev, file, handle);
- if (!gem)
- return -ENOENT;
-
- bo = to_tegra_bo(gem);
- }
-
- if (bo) {
- unsigned long addr = (bo->paddr & 0xfffffc00) >> 10;
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- unsigned long high = (bo->paddr & 0xfffffffc) >> 32;
-#endif
-
- tegra_dc_writel(dc, value | addr, DC_DISP_CURSOR_START_ADDR);
-
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- tegra_dc_writel(dc, high, DC_DISP_CURSOR_START_ADDR_HI);
-#endif
-
- value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
- value |= CURSOR_ENABLE;
- tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
-
- value = tegra_dc_readl(dc, DC_DISP_BLEND_CURSOR_CONTROL);
- value &= ~CURSOR_DST_BLEND_MASK;
- value &= ~CURSOR_SRC_BLEND_MASK;
- value |= CURSOR_MODE_NORMAL;
- value |= CURSOR_DST_BLEND_NEG_K1_TIMES_SRC;
- value |= CURSOR_SRC_BLEND_K1_TIMES_SRC;
- value |= CURSOR_ALPHA;
- tegra_dc_writel(dc, value, DC_DISP_BLEND_CURSOR_CONTROL);
- } else {
- value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
- value &= ~CURSOR_ENABLE;
- tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
- }
-
- tegra_dc_writel(dc, CURSOR_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
- tegra_dc_writel(dc, CURSOR_ACT_REQ, DC_CMD_STATE_CONTROL);
-
- tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
- tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
-
- return 0;
-}
-
-static int tegra_dc_cursor_move(struct drm_crtc *crtc, int x, int y)
-{
- struct tegra_dc *dc = to_tegra_dc(crtc);
- unsigned long value;
-
- if (!dc->soc->supports_cursor)
- return -ENXIO;
-
- value = ((y & 0x3fff) << 16) | (x & 0x3fff);
- tegra_dc_writel(dc, value, DC_DISP_CURSOR_POSITION);
-
- tegra_dc_writel(dc, CURSOR_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
- tegra_dc_writel(dc, CURSOR_ACT_REQ, DC_CMD_STATE_CONTROL);
-
- /* XXX: only required on generations earlier than Tegra124? */
- tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
- tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
-
- return 0;
-}
-
static void tegra_dc_finish_page_flip(struct tegra_dc *dc)
{
struct drm_device *drm = dc->base.dev;
@@ -727,8 +883,6 @@ static void tegra_dc_destroy(struct drm_crtc *crtc)
}
static const struct drm_crtc_funcs tegra_crtc_funcs = {
- .cursor_set2 = tegra_dc_cursor_set2,
- .cursor_move = tegra_dc_cursor_move,
.page_flip = tegra_dc_page_flip,
.set_config = drm_crtc_helper_set_config,
.destroy = tegra_dc_destroy,
@@ -736,12 +890,13 @@ static const struct drm_crtc_funcs tegra_crtc_funcs = {
static void tegra_crtc_disable(struct drm_crtc *crtc)
{
+ struct tegra_dc *dc = to_tegra_dc(crtc);
struct drm_device *drm = crtc->dev;
struct drm_plane *plane;
drm_for_each_legacy_plane(plane, &drm->mode_config.plane_list) {
if (plane->crtc == crtc) {
- tegra_plane_disable(plane);
+ tegra_window_plane_disable(plane);
plane->crtc = NULL;
if (plane->fb) {
@@ -752,6 +907,7 @@ static void tegra_crtc_disable(struct drm_crtc *crtc)
}
drm_crtc_vblank_off(crtc);
+ tegra_dc_commit(dc);
}
static bool tegra_crtc_mode_fixup(struct drm_crtc *crtc,
@@ -934,15 +1090,9 @@ static void tegra_crtc_prepare(struct drm_crtc *crtc)
static void tegra_crtc_commit(struct drm_crtc *crtc)
{
struct tegra_dc *dc = to_tegra_dc(crtc);
- unsigned long value;
-
- value = GENERAL_UPDATE | WIN_A_UPDATE;
- tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL);
-
- value = GENERAL_ACT_REQ | WIN_A_ACT_REQ;
- tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL);
drm_crtc_vblank_on(crtc);
+ tegra_dc_commit(dc);
}
static void tegra_crtc_load_lut(struct drm_crtc *crtc)
@@ -996,7 +1146,7 @@ static int tegra_dc_show_regs(struct seq_file *s, void *data)
struct tegra_dc *dc = node->info_ent->data;
#define DUMP_REG(name) \
- seq_printf(s, "%-40s %#05x %08lx\n", #name, name, \
+ seq_printf(s, "%-40s %#05x %08x\n", #name, name, \
tegra_dc_readl(dc, name))
DUMP_REG(DC_CMD_GENERAL_INCR_SYNCPT);
@@ -1284,9 +1434,40 @@ static int tegra_dc_init(struct host1x_client *client)
struct drm_device *drm = dev_get_drvdata(client->parent);
struct tegra_dc *dc = host1x_client_to_dc(client);
struct tegra_drm *tegra = drm->dev_private;
+ struct drm_plane *primary = NULL;
+ struct drm_plane *cursor = NULL;
int err;
- drm_crtc_init(drm, &dc->base, &tegra_crtc_funcs);
+ if (tegra->domain) {
+ err = iommu_attach_device(tegra->domain, dc->dev);
+ if (err < 0) {
+ dev_err(dc->dev, "failed to attach to domain: %d\n",
+ err);
+ return err;
+ }
+
+ dc->domain = tegra->domain;
+ }
+
+ primary = tegra_dc_primary_plane_create(drm, dc);
+ if (IS_ERR(primary)) {
+ err = PTR_ERR(primary);
+ goto cleanup;
+ }
+
+ if (dc->soc->supports_cursor) {
+ cursor = tegra_dc_cursor_plane_create(drm, dc);
+ if (IS_ERR(cursor)) {
+ err = PTR_ERR(cursor);
+ goto cleanup;
+ }
+ }
+
+ err = drm_crtc_init_with_planes(drm, &dc->base, primary, cursor,
+ &tegra_crtc_funcs);
+ if (err < 0)
+ goto cleanup;
+
drm_mode_crtc_set_gamma_size(&dc->base, 256);
drm_crtc_helper_add(&dc->base, &tegra_crtc_helper_funcs);
@@ -1300,12 +1481,12 @@ static int tegra_dc_init(struct host1x_client *client)
err = tegra_dc_rgb_init(drm, dc);
if (err < 0 && err != -ENODEV) {
dev_err(dc->dev, "failed to initialize RGB output: %d\n", err);
- return err;
+ goto cleanup;
}
err = tegra_dc_add_planes(drm, dc);
if (err < 0)
- return err;
+ goto cleanup;
if (IS_ENABLED(CONFIG_DEBUG_FS)) {
err = tegra_dc_debugfs_init(dc, drm->primary);
@@ -1318,10 +1499,24 @@ static int tegra_dc_init(struct host1x_client *client)
if (err < 0) {
dev_err(dc->dev, "failed to request IRQ#%u: %d\n", dc->irq,
err);
- return err;
+ goto cleanup;
}
return 0;
+
+cleanup:
+ if (cursor)
+ drm_plane_cleanup(cursor);
+
+ if (primary)
+ drm_plane_cleanup(primary);
+
+ if (tegra->domain) {
+ iommu_detach_device(tegra->domain, dc->dev);
+ dc->domain = NULL;
+ }
+
+ return err;
}
static int tegra_dc_exit(struct host1x_client *client)
@@ -1343,6 +1538,11 @@ static int tegra_dc_exit(struct host1x_client *client)
return err;
}
+ if (dc->domain) {
+ iommu_detach_device(dc->domain, dc->dev);
+ dc->domain = NULL;
+ }
+
return 0;
}
@@ -1356,6 +1556,7 @@ static const struct tegra_dc_soc_info tegra20_dc_soc_info = {
.supports_cursor = false,
.supports_block_linear = false,
.pitch_align = 8,
+ .has_powergate = false,
};
static const struct tegra_dc_soc_info tegra30_dc_soc_info = {
@@ -1363,6 +1564,7 @@ static const struct tegra_dc_soc_info tegra30_dc_soc_info = {
.supports_cursor = false,
.supports_block_linear = false,
.pitch_align = 8,
+ .has_powergate = false,
};
static const struct tegra_dc_soc_info tegra114_dc_soc_info = {
@@ -1370,6 +1572,7 @@ static const struct tegra_dc_soc_info tegra114_dc_soc_info = {
.supports_cursor = false,
.supports_block_linear = false,
.pitch_align = 64,
+ .has_powergate = true,
};
static const struct tegra_dc_soc_info tegra124_dc_soc_info = {
@@ -1377,6 +1580,7 @@ static const struct tegra_dc_soc_info tegra124_dc_soc_info = {
.supports_cursor = true,
.supports_block_linear = true,
.pitch_align = 64,
+ .has_powergate = true,
};
static const struct of_device_id tegra_dc_of_match[] = {
@@ -1384,6 +1588,9 @@ static const struct of_device_id tegra_dc_of_match[] = {
.compatible = "nvidia,tegra124-dc",
.data = &tegra124_dc_soc_info,
}, {
+ .compatible = "nvidia,tegra114-dc",
+ .data = &tegra114_dc_soc_info,
+ }, {
.compatible = "nvidia,tegra30-dc",
.data = &tegra30_dc_soc_info,
}, {
@@ -1466,9 +1673,34 @@ static int tegra_dc_probe(struct platform_device *pdev)
return PTR_ERR(dc->rst);
}
- err = clk_prepare_enable(dc->clk);
- if (err < 0)
- return err;
+ if (dc->soc->has_powergate) {
+ if (dc->pipe == 0)
+ dc->powergate = TEGRA_POWERGATE_DIS;
+ else
+ dc->powergate = TEGRA_POWERGATE_DISB;
+
+ err = tegra_powergate_sequence_power_up(dc->powergate, dc->clk,
+ dc->rst);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to power partition: %d\n",
+ err);
+ return err;
+ }
+ } else {
+ err = clk_prepare_enable(dc->clk);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to enable clock: %d\n",
+ err);
+ return err;
+ }
+
+ err = reset_control_deassert(dc->rst);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to deassert reset: %d\n",
+ err);
+ return err;
+ }
+ }
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
dc->regs = devm_ioremap_resource(&pdev->dev, regs);
@@ -1522,6 +1754,10 @@ static int tegra_dc_remove(struct platform_device *pdev)
}
reset_control_assert(dc->rst);
+
+ if (dc->soc->has_powergate)
+ tegra_powergate_power_off(dc->powergate);
+
clk_disable_unprepare(dc->clk);
return 0;
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 59736bb810c..e549afeece1 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -8,6 +8,7 @@
*/
#include <linux/host1x.h>
+#include <linux/iommu.h>
#include "drm.h"
#include "gem.h"
@@ -33,6 +34,17 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
if (!tegra)
return -ENOMEM;
+ if (iommu_present(&platform_bus_type)) {
+ tegra->domain = iommu_domain_alloc(&platform_bus_type);
+ if (IS_ERR(tegra->domain)) {
+ err = PTR_ERR(tegra->domain);
+ goto free;
+ }
+
+ DRM_DEBUG("IOMMU context initialized\n");
+ drm_mm_init(&tegra->mm, 0, SZ_2G);
+ }
+
mutex_init(&tegra->clients_lock);
INIT_LIST_HEAD(&tegra->clients);
drm->dev_private = tegra;
@@ -42,13 +54,13 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
err = tegra_drm_fb_prepare(drm);
if (err < 0)
- return err;
+ goto config;
drm_kms_helper_poll_init(drm);
err = host1x_device_init(device);
if (err < 0)
- return err;
+ goto fbdev;
/*
* We don't use the drm_irq_install() helpers provided by the DRM
@@ -59,18 +71,37 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
err = drm_vblank_init(drm, drm->mode_config.num_crtc);
if (err < 0)
- return err;
+ goto device;
err = tegra_drm_fb_init(drm);
if (err < 0)
- return err;
+ goto vblank;
return 0;
+
+vblank:
+ drm_vblank_cleanup(drm);
+device:
+ host1x_device_exit(device);
+fbdev:
+ drm_kms_helper_poll_fini(drm);
+ tegra_drm_fb_free(drm);
+config:
+ drm_mode_config_cleanup(drm);
+
+ if (tegra->domain) {
+ iommu_domain_free(tegra->domain);
+ drm_mm_takedown(&tegra->mm);
+ }
+free:
+ kfree(tegra);
+ return err;
}
static int tegra_drm_unload(struct drm_device *drm)
{
struct host1x_device *device = to_host1x_device(drm->dev);
+ struct tegra_drm *tegra = drm->dev_private;
int err;
drm_kms_helper_poll_fini(drm);
@@ -82,6 +113,13 @@ static int tegra_drm_unload(struct drm_device *drm)
if (err < 0)
return err;
+ if (tegra->domain) {
+ iommu_domain_free(tegra->domain);
+ drm_mm_takedown(&tegra->mm);
+ }
+
+ kfree(tegra);
+
return 0;
}
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index e89c70fa82d..3a3b2e7b5b3 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -39,6 +39,9 @@ struct tegra_fbdev {
struct tegra_drm {
struct drm_device *drm;
+ struct iommu_domain *domain;
+ struct drm_mm mm;
+
struct mutex clients_lock;
struct list_head clients;
@@ -101,6 +104,7 @@ struct tegra_dc {
spinlock_t lock;
struct drm_crtc base;
+ int powergate;
int pipe;
struct clk *clk;
@@ -120,6 +124,8 @@ struct tegra_dc {
struct drm_pending_vblank_event *event;
const struct tegra_dc_soc_info *soc;
+
+ struct iommu_domain *domain;
};
static inline struct tegra_dc *
@@ -133,16 +139,15 @@ static inline struct tegra_dc *to_tegra_dc(struct drm_crtc *crtc)
return crtc ? container_of(crtc, struct tegra_dc, base) : NULL;
}
-static inline void tegra_dc_writel(struct tegra_dc *dc, unsigned long value,
- unsigned long reg)
+static inline void tegra_dc_writel(struct tegra_dc *dc, u32 value,
+ unsigned long offset)
{
- writel(value, dc->regs + (reg << 2));
+ writel(value, dc->regs + (offset << 2));
}
-static inline unsigned long tegra_dc_readl(struct tegra_dc *dc,
- unsigned long reg)
+static inline u32 tegra_dc_readl(struct tegra_dc *dc, unsigned long offset)
{
- return readl(dc->regs + (reg << 2));
+ return readl(dc->regs + (offset << 2));
}
struct tegra_dc_window {
@@ -287,6 +292,7 @@ bool tegra_fb_is_bottom_up(struct drm_framebuffer *framebuffer);
int tegra_fb_get_tiling(struct drm_framebuffer *framebuffer,
struct tegra_bo_tiling *tiling);
int tegra_drm_fb_prepare(struct drm_device *drm);
+void tegra_drm_fb_free(struct drm_device *drm);
int tegra_drm_fb_init(struct drm_device *drm);
void tegra_drm_fb_exit(struct drm_device *drm);
#ifdef CONFIG_DRM_TEGRA_FBDEV
diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
index f7874458926..33f67fd601c 100644
--- a/drivers/gpu/drm/tegra/dsi.c
+++ b/drivers/gpu/drm/tegra/dsi.c
@@ -11,6 +11,7 @@
#include <linux/host1x.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
@@ -26,9 +27,6 @@
#include "dsi.h"
#include "mipi-phy.h"
-#define DSI_VIDEO_FIFO_DEPTH (1920 / 4)
-#define DSI_HOST_FIFO_DEPTH 64
-
struct tegra_dsi {
struct host1x_client client;
struct tegra_output output;
@@ -54,6 +52,13 @@ struct tegra_dsi {
struct regulator *vdd;
bool enabled;
+
+ unsigned int video_fifo_depth;
+ unsigned int host_fifo_depth;
+
+ /* for ganged-mode support */
+ struct tegra_dsi *master;
+ struct tegra_dsi *slave;
};
static inline struct tegra_dsi *
@@ -318,6 +323,21 @@ static const u32 pkt_seq_video_non_burst_sync_events[NUM_PKT_SEQ] = {
[11] = PKT_ID0(MIPI_DSI_BLANKING_PACKET) | PKT_LEN0(4),
};
+static const u32 pkt_seq_command_mode[NUM_PKT_SEQ] = {
+ [ 0] = 0,
+ [ 1] = 0,
+ [ 2] = 0,
+ [ 3] = 0,
+ [ 4] = 0,
+ [ 5] = 0,
+ [ 6] = PKT_ID0(MIPI_DSI_DCS_LONG_WRITE) | PKT_LEN0(3) | PKT_LP,
+ [ 7] = 0,
+ [ 8] = 0,
+ [ 9] = 0,
+ [10] = PKT_ID0(MIPI_DSI_DCS_LONG_WRITE) | PKT_LEN0(5) | PKT_LP,
+ [11] = 0,
+};
+
static int tegra_dsi_set_phy_timing(struct tegra_dsi *dsi)
{
struct mipi_dphy_timing timing;
@@ -329,7 +349,7 @@ static int tegra_dsi_set_phy_timing(struct tegra_dsi *dsi)
if (rate < 0)
return rate;
- period = DIV_ROUND_CLOSEST(1000000000UL, rate * 2);
+ period = DIV_ROUND_CLOSEST(NSEC_PER_SEC, rate * 2);
err = mipi_dphy_timing_get_default(&timing, period);
if (err < 0)
@@ -369,6 +389,9 @@ static int tegra_dsi_set_phy_timing(struct tegra_dsi *dsi)
DSI_TIMING_FIELD(timing.tago, period, 1);
tegra_dsi_writel(dsi, value, DSI_BTA_TIMING);
+ if (dsi->slave)
+ return tegra_dsi_set_phy_timing(dsi->slave);
+
return 0;
}
@@ -426,26 +449,59 @@ static int tegra_dsi_get_format(enum mipi_dsi_pixel_format format,
return 0;
}
-static int tegra_output_dsi_enable(struct tegra_output *output)
+static void tegra_dsi_ganged_enable(struct tegra_dsi *dsi, unsigned int start,
+ unsigned int size)
+{
+ u32 value;
+
+ tegra_dsi_writel(dsi, start, DSI_GANGED_MODE_START);
+ tegra_dsi_writel(dsi, size << 16 | size, DSI_GANGED_MODE_SIZE);
+
+ value = DSI_GANGED_MODE_CONTROL_ENABLE;
+ tegra_dsi_writel(dsi, value, DSI_GANGED_MODE_CONTROL);
+}
+
+static void tegra_dsi_enable(struct tegra_dsi *dsi)
+{
+ u32 value;
+
+ value = tegra_dsi_readl(dsi, DSI_POWER_CONTROL);
+ value |= DSI_POWER_CONTROL_ENABLE;
+ tegra_dsi_writel(dsi, value, DSI_POWER_CONTROL);
+
+ if (dsi->slave)
+ tegra_dsi_enable(dsi->slave);
+}
+
+static unsigned int tegra_dsi_get_lanes(struct tegra_dsi *dsi)
+{
+ if (dsi->master)
+ return dsi->master->lanes + dsi->lanes;
+
+ if (dsi->slave)
+ return dsi->lanes + dsi->slave->lanes;
+
+ return dsi->lanes;
+}
+
+static int tegra_dsi_configure(struct tegra_dsi *dsi, unsigned int pipe,
+ const struct drm_display_mode *mode)
{
- struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc);
- struct drm_display_mode *mode = &dc->base.mode;
unsigned int hact, hsw, hbp, hfp, i, mul, div;
- struct tegra_dsi *dsi = to_dsi(output);
enum tegra_dsi_format format;
- unsigned long value;
const u32 *pkt_seq;
+ u32 value;
int err;
- if (dsi->enabled)
- return 0;
-
if (dsi->flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) {
DRM_DEBUG_KMS("Non-burst video mode with sync pulses\n");
pkt_seq = pkt_seq_video_non_burst_sync_pulses;
- } else {
+ } else if (dsi->flags & MIPI_DSI_MODE_VIDEO) {
DRM_DEBUG_KMS("Non-burst video mode with sync events\n");
pkt_seq = pkt_seq_video_non_burst_sync_events;
+ } else {
+ DRM_DEBUG_KMS("Command mode\n");
+ pkt_seq = pkt_seq_command_mode;
}
err = tegra_dsi_get_muldiv(dsi->format, &mul, &div);
@@ -456,61 +512,136 @@ static int tegra_output_dsi_enable(struct tegra_output *output)
if (err < 0)
return err;
- err = clk_enable(dsi->clk);
- if (err < 0)
- return err;
-
- reset_control_deassert(dsi->rst);
-
value = DSI_CONTROL_CHANNEL(0) | DSI_CONTROL_FORMAT(format) |
DSI_CONTROL_LANES(dsi->lanes - 1) |
- DSI_CONTROL_SOURCE(dc->pipe);
+ DSI_CONTROL_SOURCE(pipe);
tegra_dsi_writel(dsi, value, DSI_CONTROL);
- tegra_dsi_writel(dsi, DSI_VIDEO_FIFO_DEPTH, DSI_MAX_THRESHOLD);
+ tegra_dsi_writel(dsi, dsi->video_fifo_depth, DSI_MAX_THRESHOLD);
- value = DSI_HOST_CONTROL_HS | DSI_HOST_CONTROL_CS |
- DSI_HOST_CONTROL_ECC;
+ value = DSI_HOST_CONTROL_HS;
tegra_dsi_writel(dsi, value, DSI_HOST_CONTROL);
value = tegra_dsi_readl(dsi, DSI_CONTROL);
+
if (dsi->flags & MIPI_DSI_CLOCK_NON_CONTINUOUS)
value |= DSI_CONTROL_HS_CLK_CTRL;
+
value &= ~DSI_CONTROL_TX_TRIG(3);
- value &= ~DSI_CONTROL_DCS_ENABLE;
+
+ /* enable DCS commands for command mode */
+ if (dsi->flags & MIPI_DSI_MODE_VIDEO)
+ value &= ~DSI_CONTROL_DCS_ENABLE;
+ else
+ value |= DSI_CONTROL_DCS_ENABLE;
+
value |= DSI_CONTROL_VIDEO_ENABLE;
value &= ~DSI_CONTROL_HOST_ENABLE;
tegra_dsi_writel(dsi, value, DSI_CONTROL);
- err = tegra_dsi_set_phy_timing(dsi);
- if (err < 0)
- return err;
-
for (i = 0; i < NUM_PKT_SEQ; i++)
tegra_dsi_writel(dsi, pkt_seq[i], DSI_PKT_SEQ_0_LO + i);
- /* horizontal active pixels */
- hact = mode->hdisplay * mul / div;
+ if (dsi->flags & MIPI_DSI_MODE_VIDEO) {
+ /* horizontal active pixels */
+ hact = mode->hdisplay * mul / div;
- /* horizontal sync width */
- hsw = (mode->hsync_end - mode->hsync_start) * mul / div;
- hsw -= 10;
+ /* horizontal sync width */
+ hsw = (mode->hsync_end - mode->hsync_start) * mul / div;
+ hsw -= 10;
- /* horizontal back porch */
- hbp = (mode->htotal - mode->hsync_end) * mul / div;
- hbp -= 14;
+ /* horizontal back porch */
+ hbp = (mode->htotal - mode->hsync_end) * mul / div;
+ hbp -= 14;
- /* horizontal front porch */
- hfp = (mode->hsync_start - mode->hdisplay) * mul / div;
- hfp -= 8;
+ /* horizontal front porch */
+ hfp = (mode->hsync_start - mode->hdisplay) * mul / div;
+ hfp -= 8;
- tegra_dsi_writel(dsi, hsw << 16 | 0, DSI_PKT_LEN_0_1);
- tegra_dsi_writel(dsi, hact << 16 | hbp, DSI_PKT_LEN_2_3);
- tegra_dsi_writel(dsi, hfp, DSI_PKT_LEN_4_5);
- tegra_dsi_writel(dsi, 0x0f0f << 16, DSI_PKT_LEN_6_7);
+ tegra_dsi_writel(dsi, hsw << 16 | 0, DSI_PKT_LEN_0_1);
+ tegra_dsi_writel(dsi, hact << 16 | hbp, DSI_PKT_LEN_2_3);
+ tegra_dsi_writel(dsi, hfp, DSI_PKT_LEN_4_5);
+ tegra_dsi_writel(dsi, 0x0f0f << 16, DSI_PKT_LEN_6_7);
- /* set SOL delay */
- tegra_dsi_writel(dsi, 8 * mul / div, DSI_SOL_DELAY);
+ /* set SOL delay (for non-burst mode only) */
+ tegra_dsi_writel(dsi, 8 * mul / div, DSI_SOL_DELAY);
+
+ /* TODO: implement ganged mode */
+ } else {
+ u16 bytes;
+
+ if (dsi->master || dsi->slave) {
+ /*
+ * For ganged mode, assume symmetric left-right mode.
+ */
+ bytes = 1 + (mode->hdisplay / 2) * mul / div;
+ } else {
+ /* 1 byte (DCS command) + pixel data */
+ bytes = 1 + mode->hdisplay * mul / div;
+ }
+
+ tegra_dsi_writel(dsi, 0, DSI_PKT_LEN_0_1);
+ tegra_dsi_writel(dsi, bytes << 16, DSI_PKT_LEN_2_3);
+ tegra_dsi_writel(dsi, bytes << 16, DSI_PKT_LEN_4_5);
+ tegra_dsi_writel(dsi, 0, DSI_PKT_LEN_6_7);
+
+ value = MIPI_DCS_WRITE_MEMORY_START << 8 |
+ MIPI_DCS_WRITE_MEMORY_CONTINUE;
+ tegra_dsi_writel(dsi, value, DSI_DCS_CMDS);
+
+ /* set SOL delay */
+ if (dsi->master || dsi->slave) {
+ unsigned int lanes = tegra_dsi_get_lanes(dsi);
+ unsigned long delay, bclk, bclk_ganged;
+
+ /* SOL to valid, valid to FIFO and FIFO write delay */
+ delay = 4 + 4 + 2;
+ delay = DIV_ROUND_UP(delay * mul, div * lanes);
+ /* FIFO read delay */
+ delay = delay + 6;
+
+ bclk = DIV_ROUND_UP(mode->htotal * mul, div * lanes);
+ bclk_ganged = DIV_ROUND_UP(bclk * lanes / 2, lanes);
+ value = bclk - bclk_ganged + delay + 20;
+ } else {
+ /* TODO: revisit for non-ganged mode */
+ value = 8 * mul / div;
+ }
+
+ tegra_dsi_writel(dsi, value, DSI_SOL_DELAY);
+ }
+
+ if (dsi->slave) {
+ err = tegra_dsi_configure(dsi->slave, pipe, mode);
+ if (err < 0)
+ return err;
+
+ /*
+ * TODO: Support modes other than symmetrical left-right
+ * split.
+ */
+ tegra_dsi_ganged_enable(dsi, 0, mode->hdisplay / 2);
+ tegra_dsi_ganged_enable(dsi->slave, mode->hdisplay / 2,
+ mode->hdisplay / 2);
+ }
+
+ return 0;
+}
+
+static int tegra_output_dsi_enable(struct tegra_output *output)
+{
+ struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc);
+ const struct drm_display_mode *mode = &dc->base.mode;
+ struct tegra_dsi *dsi = to_dsi(output);
+ u32 value;
+ int err;
+
+ if (dsi->enabled)
+ return 0;
+
+ err = tegra_dsi_configure(dsi, dc->pipe, mode);
+ if (err < 0)
+ return err;
/* enable display controller */
value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
@@ -531,28 +662,79 @@ static int tegra_output_dsi_enable(struct tegra_output *output)
tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
/* enable DSI controller */
- value = tegra_dsi_readl(dsi, DSI_POWER_CONTROL);
- value |= DSI_POWER_CONTROL_ENABLE;
- tegra_dsi_writel(dsi, value, DSI_POWER_CONTROL);
+ tegra_dsi_enable(dsi);
dsi->enabled = true;
return 0;
}
+static int tegra_dsi_wait_idle(struct tegra_dsi *dsi, unsigned long timeout)
+{
+ u32 value;
+
+ timeout = jiffies + msecs_to_jiffies(timeout);
+
+ while (time_before(jiffies, timeout)) {
+ value = tegra_dsi_readl(dsi, DSI_STATUS);
+ if (value & DSI_STATUS_IDLE)
+ return 0;
+
+ usleep_range(1000, 2000);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static void tegra_dsi_video_disable(struct tegra_dsi *dsi)
+{
+ u32 value;
+
+ value = tegra_dsi_readl(dsi, DSI_CONTROL);
+ value &= ~DSI_CONTROL_VIDEO_ENABLE;
+ tegra_dsi_writel(dsi, value, DSI_CONTROL);
+
+ if (dsi->slave)
+ tegra_dsi_video_disable(dsi->slave);
+}
+
+static void tegra_dsi_ganged_disable(struct tegra_dsi *dsi)
+{
+ tegra_dsi_writel(dsi, 0, DSI_GANGED_MODE_START);
+ tegra_dsi_writel(dsi, 0, DSI_GANGED_MODE_SIZE);
+ tegra_dsi_writel(dsi, 0, DSI_GANGED_MODE_CONTROL);
+}
+
+static void tegra_dsi_disable(struct tegra_dsi *dsi)
+{
+ u32 value;
+
+ if (dsi->slave) {
+ tegra_dsi_ganged_disable(dsi->slave);
+ tegra_dsi_ganged_disable(dsi);
+ }
+
+ value = tegra_dsi_readl(dsi, DSI_POWER_CONTROL);
+ value &= ~DSI_POWER_CONTROL_ENABLE;
+ tegra_dsi_writel(dsi, value, DSI_POWER_CONTROL);
+
+ if (dsi->slave)
+ tegra_dsi_disable(dsi->slave);
+
+ usleep_range(5000, 10000);
+}
+
static int tegra_output_dsi_disable(struct tegra_output *output)
{
struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc);
struct tegra_dsi *dsi = to_dsi(output);
unsigned long value;
+ int err;
if (!dsi->enabled)
return 0;
- /* disable DSI controller */
- value = tegra_dsi_readl(dsi, DSI_POWER_CONTROL);
- value &= ~DSI_POWER_CONTROL_ENABLE;
- tegra_dsi_writel(dsi, value, DSI_POWER_CONTROL);
+ tegra_dsi_video_disable(dsi);
/*
* The following accesses registers of the display controller, so make
@@ -576,39 +758,68 @@ static int tegra_output_dsi_disable(struct tegra_output *output)
tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
}
- clk_disable(dsi->clk);
+ err = tegra_dsi_wait_idle(dsi, 100);
+ if (err < 0)
+ dev_dbg(dsi->dev, "failed to idle DSI: %d\n", err);
+
+ tegra_dsi_disable(dsi);
dsi->enabled = false;
return 0;
}
+static void tegra_dsi_set_timeout(struct tegra_dsi *dsi, unsigned long bclk,
+ unsigned int vrefresh)
+{
+ unsigned int timeout;
+ u32 value;
+
+ /* one frame high-speed transmission timeout */
+ timeout = (bclk / vrefresh) / 512;
+ value = DSI_TIMEOUT_LRX(0x2000) | DSI_TIMEOUT_HTX(timeout);
+ tegra_dsi_writel(dsi, value, DSI_TIMEOUT_0);
+
+ /* 2 ms peripheral timeout for panel */
+ timeout = 2 * bclk / 512 * 1000;
+ value = DSI_TIMEOUT_PR(timeout) | DSI_TIMEOUT_TA(0x2000);
+ tegra_dsi_writel(dsi, value, DSI_TIMEOUT_1);
+
+ value = DSI_TALLY_TA(0) | DSI_TALLY_LRX(0) | DSI_TALLY_HTX(0);
+ tegra_dsi_writel(dsi, value, DSI_TO_TALLY);
+
+ if (dsi->slave)
+ tegra_dsi_set_timeout(dsi->slave, bclk, vrefresh);
+}
+
static int tegra_output_dsi_setup_clock(struct tegra_output *output,
struct clk *clk, unsigned long pclk,
unsigned int *divp)
{
struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc);
struct drm_display_mode *mode = &dc->base.mode;
- unsigned int timeout, mul, div, vrefresh;
struct tegra_dsi *dsi = to_dsi(output);
- unsigned long bclk, plld, value;
+ unsigned int mul, div, vrefresh, lanes;
+ unsigned long bclk, plld;
int err;
+ lanes = tegra_dsi_get_lanes(dsi);
+
err = tegra_dsi_get_muldiv(dsi->format, &mul, &div);
if (err < 0)
return err;
- DRM_DEBUG_KMS("mul: %u, div: %u, lanes: %u\n", mul, div, dsi->lanes);
+ DRM_DEBUG_KMS("mul: %u, div: %u, lanes: %u\n", mul, div, lanes);
vrefresh = drm_mode_vrefresh(mode);
DRM_DEBUG_KMS("vrefresh: %u\n", vrefresh);
/* compute byte clock */
- bclk = (pclk * mul) / (div * dsi->lanes);
+ bclk = (pclk * mul) / (div * lanes);
/*
* Compute bit clock and round up to the next MHz.
*/
- plld = DIV_ROUND_UP(bclk * 8, 1000000) * 1000000;
+ plld = DIV_ROUND_UP(bclk * 8, USEC_PER_SEC) * USEC_PER_SEC;
/*
* We divide the frequency by two here, but we make up for that by
@@ -640,25 +851,17 @@ static int tegra_output_dsi_setup_clock(struct tegra_output *output,
* not working properly otherwise. Perhaps the PLLs cannot generate
* frequencies sufficiently high.
*/
- *divp = ((8 * mul) / (div * dsi->lanes)) - 2;
+ *divp = ((8 * mul) / (div * lanes)) - 2;
/*
* XXX: Move the below somewhere else so that we don't need to have
* access to the vrefresh in this function?
*/
+ tegra_dsi_set_timeout(dsi, bclk, vrefresh);
- /* one frame high-speed transmission timeout */
- timeout = (bclk / vrefresh) / 512;
- value = DSI_TIMEOUT_LRX(0x2000) | DSI_TIMEOUT_HTX(timeout);
- tegra_dsi_writel(dsi, value, DSI_TIMEOUT_0);
-
- /* 2 ms peripheral timeout for panel */
- timeout = 2 * bclk / 512 * 1000;
- value = DSI_TIMEOUT_PR(timeout) | DSI_TIMEOUT_TA(0x2000);
- tegra_dsi_writel(dsi, value, DSI_TIMEOUT_1);
-
- value = DSI_TALLY_TA(0) | DSI_TALLY_LRX(0) | DSI_TALLY_HTX(0);
- tegra_dsi_writel(dsi, value, DSI_TO_TALLY);
+ err = tegra_dsi_set_phy_timing(dsi);
+ if (err < 0)
+ return err;
return 0;
}
@@ -695,7 +898,7 @@ static int tegra_dsi_pad_enable(struct tegra_dsi *dsi)
static int tegra_dsi_pad_calibrate(struct tegra_dsi *dsi)
{
- unsigned long value;
+ u32 value;
tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_0);
tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_1);
@@ -720,14 +923,17 @@ static int tegra_dsi_init(struct host1x_client *client)
struct tegra_dsi *dsi = host1x_client_to_dsi(client);
int err;
- dsi->output.type = TEGRA_OUTPUT_DSI;
- dsi->output.dev = client->dev;
- dsi->output.ops = &dsi_ops;
-
- err = tegra_output_init(drm, &dsi->output);
- if (err < 0) {
- dev_err(client->dev, "output setup failed: %d\n", err);
- return err;
+ /* Gangsters must not register their own outputs. */
+ if (!dsi->master) {
+ dsi->output.type = TEGRA_OUTPUT_DSI;
+ dsi->output.dev = client->dev;
+ dsi->output.ops = &dsi_ops;
+
+ err = tegra_output_init(drm, &dsi->output);
+ if (err < 0) {
+ dev_err(client->dev, "output setup failed: %d\n", err);
+ return err;
+ }
}
if (IS_ENABLED(CONFIG_DEBUG_FS)) {
@@ -736,12 +942,6 @@ static int tegra_dsi_init(struct host1x_client *client)
dev_err(dsi->dev, "debugfs setup failed: %d\n", err);
}
- err = tegra_dsi_pad_calibrate(dsi);
- if (err < 0) {
- dev_err(dsi->dev, "MIPI calibration failed: %d\n", err);
- return err;
- }
-
return 0;
}
@@ -756,16 +956,20 @@ static int tegra_dsi_exit(struct host1x_client *client)
dev_err(dsi->dev, "debugfs cleanup failed: %d\n", err);
}
- err = tegra_output_disable(&dsi->output);
- if (err < 0) {
- dev_err(client->dev, "output failed to disable: %d\n", err);
- return err;
- }
-
- err = tegra_output_exit(&dsi->output);
- if (err < 0) {
- dev_err(client->dev, "output cleanup failed: %d\n", err);
- return err;
+ if (!dsi->master) {
+ err = tegra_output_disable(&dsi->output);
+ if (err < 0) {
+ dev_err(client->dev, "output failed to disable: %d\n",
+ err);
+ return err;
+ }
+
+ err = tegra_output_exit(&dsi->output);
+ if (err < 0) {
+ dev_err(client->dev, "output cleanup failed: %d\n",
+ err);
+ return err;
+ }
}
return 0;
@@ -792,20 +996,324 @@ static int tegra_dsi_setup_clocks(struct tegra_dsi *dsi)
return 0;
}
+static const char * const error_report[16] = {
+ "SoT Error",
+ "SoT Sync Error",
+ "EoT Sync Error",
+ "Escape Mode Entry Command Error",
+ "Low-Power Transmit Sync Error",
+ "Peripheral Timeout Error",
+ "False Control Error",
+ "Contention Detected",
+ "ECC Error, single-bit",
+ "ECC Error, multi-bit",
+ "Checksum Error",
+ "DSI Data Type Not Recognized",
+ "DSI VC ID Invalid",
+ "Invalid Transmission Length",
+ "Reserved",
+ "DSI Protocol Violation",
+};
+
+static ssize_t tegra_dsi_read_response(struct tegra_dsi *dsi,
+ const struct mipi_dsi_msg *msg,
+ size_t count)
+{
+ u8 *rx = msg->rx_buf;
+ unsigned int i, j, k;
+ size_t size = 0;
+ u16 errors;
+ u32 value;
+
+ /* read and parse packet header */
+ value = tegra_dsi_readl(dsi, DSI_RD_DATA);
+
+ switch (value & 0x3f) {
+ case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT:
+ errors = (value >> 8) & 0xffff;
+ dev_dbg(dsi->dev, "Acknowledge and error report: %04x\n",
+ errors);
+ for (i = 0; i < ARRAY_SIZE(error_report); i++)
+ if (errors & BIT(i))
+ dev_dbg(dsi->dev, " %2u: %s\n", i,
+ error_report[i]);
+ break;
+
+ case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE:
+ rx[0] = (value >> 8) & 0xff;
+ size = 1;
+ break;
+
+ case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE:
+ rx[0] = (value >> 8) & 0xff;
+ rx[1] = (value >> 16) & 0xff;
+ size = 2;
+ break;
+
+ case MIPI_DSI_RX_DCS_LONG_READ_RESPONSE:
+ size = ((value >> 8) & 0xff00) | ((value >> 8) & 0xff);
+ break;
+
+ case MIPI_DSI_RX_GENERIC_LONG_READ_RESPONSE:
+ size = ((value >> 8) & 0xff00) | ((value >> 8) & 0xff);
+ break;
+
+ default:
+ dev_err(dsi->dev, "unhandled response type: %02x\n",
+ value & 0x3f);
+ return -EPROTO;
+ }
+
+ size = min(size, msg->rx_len);
+
+ if (msg->rx_buf && size > 0) {
+ for (i = 0, j = 0; i < count - 1; i++, j += 4) {
+ u8 *rx = msg->rx_buf + j;
+
+ value = tegra_dsi_readl(dsi, DSI_RD_DATA);
+
+ for (k = 0; k < 4 && (j + k) < msg->rx_len; k++)
+ rx[j + k] = (value >> (k << 3)) & 0xff;
+ }
+ }
+
+ return size;
+}
+
+static int tegra_dsi_transmit(struct tegra_dsi *dsi, unsigned long timeout)
+{
+ tegra_dsi_writel(dsi, DSI_TRIGGER_HOST, DSI_TRIGGER);
+
+ timeout = jiffies + msecs_to_jiffies(timeout);
+
+ while (time_before(jiffies, timeout)) {
+ u32 value = tegra_dsi_readl(dsi, DSI_TRIGGER);
+ if ((value & DSI_TRIGGER_HOST) == 0)
+ return 0;
+
+ usleep_range(1000, 2000);
+ }
+
+ DRM_DEBUG_KMS("timeout waiting for transmission to complete\n");
+ return -ETIMEDOUT;
+}
+
+static int tegra_dsi_wait_for_response(struct tegra_dsi *dsi,
+ unsigned long timeout)
+{
+ timeout = jiffies + msecs_to_jiffies(250);
+
+ while (time_before(jiffies, timeout)) {
+ u32 value = tegra_dsi_readl(dsi, DSI_STATUS);
+ u8 count = value & 0x1f;
+
+ if (count > 0)
+ return count;
+
+ usleep_range(1000, 2000);
+ }
+
+ DRM_DEBUG_KMS("peripheral returned no data\n");
+ return -ETIMEDOUT;
+}
+
+static void tegra_dsi_writesl(struct tegra_dsi *dsi, unsigned long offset,
+ const void *buffer, size_t size)
+{
+ const u8 *buf = buffer;
+ size_t i, j;
+ u32 value;
+
+ for (j = 0; j < size; j += 4) {
+ value = 0;
+
+ for (i = 0; i < 4 && j + i < size; i++)
+ value |= buf[j + i] << (i << 3);
+
+ tegra_dsi_writel(dsi, value, DSI_WR_DATA);
+ }
+}
+
+static ssize_t tegra_dsi_host_transfer(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg)
+{
+ struct tegra_dsi *dsi = host_to_tegra(host);
+ struct mipi_dsi_packet packet;
+ const u8 *header;
+ size_t count;
+ ssize_t err;
+ u32 value;
+
+ err = mipi_dsi_create_packet(&packet, msg);
+ if (err < 0)
+ return err;
+
+ header = packet.header;
+
+ /* maximum FIFO depth is 1920 words */
+ if (packet.size > dsi->video_fifo_depth * 4)
+ return -ENOSPC;
+
+ /* reset underflow/overflow flags */
+ value = tegra_dsi_readl(dsi, DSI_STATUS);
+ if (value & (DSI_STATUS_UNDERFLOW | DSI_STATUS_OVERFLOW)) {
+ value = DSI_HOST_CONTROL_FIFO_RESET;
+ tegra_dsi_writel(dsi, value, DSI_HOST_CONTROL);
+ usleep_range(10, 20);
+ }
+
+ value = tegra_dsi_readl(dsi, DSI_POWER_CONTROL);
+ value |= DSI_POWER_CONTROL_ENABLE;
+ tegra_dsi_writel(dsi, value, DSI_POWER_CONTROL);
+
+ usleep_range(5000, 10000);
+
+ value = DSI_HOST_CONTROL_CRC_RESET | DSI_HOST_CONTROL_TX_TRIG_HOST |
+ DSI_HOST_CONTROL_CS | DSI_HOST_CONTROL_ECC;
+
+ if ((msg->flags & MIPI_DSI_MSG_USE_LPM) == 0)
+ value |= DSI_HOST_CONTROL_HS;
+
+ /*
+ * The host FIFO has a maximum of 64 words, so larger transmissions
+ * need to use the video FIFO.
+ */
+ if (packet.size > dsi->host_fifo_depth * 4)
+ value |= DSI_HOST_CONTROL_FIFO_SEL;
+
+ tegra_dsi_writel(dsi, value, DSI_HOST_CONTROL);
+
+ /*
+ * For reads and messages with explicitly requested ACK, generate a
+ * BTA sequence after the transmission of the packet.
+ */
+ if ((msg->flags & MIPI_DSI_MSG_REQ_ACK) ||
+ (msg->rx_buf && msg->rx_len > 0)) {
+ value = tegra_dsi_readl(dsi, DSI_HOST_CONTROL);
+ value |= DSI_HOST_CONTROL_PKT_BTA;
+ tegra_dsi_writel(dsi, value, DSI_HOST_CONTROL);
+ }
+
+ value = DSI_CONTROL_LANES(0) | DSI_CONTROL_HOST_ENABLE;
+ tegra_dsi_writel(dsi, value, DSI_CONTROL);
+
+ /* write packet header, ECC is generated by hardware */
+ value = header[2] << 16 | header[1] << 8 | header[0];
+ tegra_dsi_writel(dsi, value, DSI_WR_DATA);
+
+ /* write payload (if any) */
+ if (packet.payload_length > 0)
+ tegra_dsi_writesl(dsi, DSI_WR_DATA, packet.payload,
+ packet.payload_length);
+
+ err = tegra_dsi_transmit(dsi, 250);
+ if (err < 0)
+ return err;
+
+ if ((msg->flags & MIPI_DSI_MSG_REQ_ACK) ||
+ (msg->rx_buf && msg->rx_len > 0)) {
+ err = tegra_dsi_wait_for_response(dsi, 250);
+ if (err < 0)
+ return err;
+
+ count = err;
+
+ value = tegra_dsi_readl(dsi, DSI_RD_DATA);
+ switch (value) {
+ case 0x84:
+ /*
+ dev_dbg(dsi->dev, "ACK\n");
+ */
+ break;
+
+ case 0x87:
+ /*
+ dev_dbg(dsi->dev, "ESCAPE\n");
+ */
+ break;
+
+ default:
+ dev_err(dsi->dev, "unknown status: %08x\n", value);
+ break;
+ }
+
+ if (count > 1) {
+ err = tegra_dsi_read_response(dsi, msg, count);
+ if (err < 0)
+ dev_err(dsi->dev,
+ "failed to parse response: %zd\n",
+ err);
+ else {
+ /*
+ * For read commands, return the number of
+ * bytes returned by the peripheral.
+ */
+ count = err;
+ }
+ }
+ } else {
+ /*
+ * For write commands, we have transmitted the 4-byte header
+ * plus the variable-length payload.
+ */
+ count = 4 + packet.payload_length;
+ }
+
+ return count;
+}
+
+static int tegra_dsi_ganged_setup(struct tegra_dsi *dsi)
+{
+ struct clk *parent;
+ int err;
+
+ /* make sure both DSI controllers share the same PLL */
+ parent = clk_get_parent(dsi->slave->clk);
+ if (!parent)
+ return -EINVAL;
+
+ err = clk_set_parent(parent, dsi->clk_parent);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
static int tegra_dsi_host_attach(struct mipi_dsi_host *host,
struct mipi_dsi_device *device)
{
struct tegra_dsi *dsi = host_to_tegra(host);
- struct tegra_output *output = &dsi->output;
dsi->flags = device->mode_flags;
dsi->format = device->format;
dsi->lanes = device->lanes;
- output->panel = of_drm_find_panel(device->dev.of_node);
- if (output->panel) {
- if (output->connector.dev)
+ if (dsi->slave) {
+ int err;
+
+ dev_dbg(dsi->dev, "attaching dual-channel device %s\n",
+ dev_name(&device->dev));
+
+ err = tegra_dsi_ganged_setup(dsi);
+ if (err < 0) {
+ dev_err(dsi->dev, "failed to set up ganged mode: %d\n",
+ err);
+ return err;
+ }
+ }
+
+ /*
+ * Slaves don't have a panel associated with them, so they provide
+ * merely the second channel.
+ */
+ if (!dsi->master) {
+ struct tegra_output *output = &dsi->output;
+
+ output->panel = of_drm_find_panel(device->dev.of_node);
+ if (output->panel && output->connector.dev) {
+ drm_panel_attach(output->panel, &output->connector);
drm_helper_hpd_irq_event(output->connector.dev);
+ }
}
return 0;
@@ -818,10 +1326,10 @@ static int tegra_dsi_host_detach(struct mipi_dsi_host *host,
struct tegra_output *output = &dsi->output;
if (output->panel && &device->dev == output->panel->dev) {
+ output->panel = NULL;
+
if (output->connector.dev)
drm_helper_hpd_irq_event(output->connector.dev);
-
- output->panel = NULL;
}
return 0;
@@ -830,8 +1338,29 @@ static int tegra_dsi_host_detach(struct mipi_dsi_host *host,
static const struct mipi_dsi_host_ops tegra_dsi_host_ops = {
.attach = tegra_dsi_host_attach,
.detach = tegra_dsi_host_detach,
+ .transfer = tegra_dsi_host_transfer,
};
+static int tegra_dsi_ganged_probe(struct tegra_dsi *dsi)
+{
+ struct device_node *np;
+
+ np = of_parse_phandle(dsi->dev->of_node, "nvidia,ganged-mode", 0);
+ if (np) {
+ struct platform_device *gangster = of_find_device_by_node(np);
+
+ dsi->slave = platform_get_drvdata(gangster);
+ of_node_put(np);
+
+ if (!dsi->slave)
+ return -EPROBE_DEFER;
+
+ dsi->slave->master = dsi;
+ }
+
+ return 0;
+}
+
static int tegra_dsi_probe(struct platform_device *pdev)
{
struct tegra_dsi *dsi;
@@ -843,11 +1372,19 @@ static int tegra_dsi_probe(struct platform_device *pdev)
return -ENOMEM;
dsi->output.dev = dsi->dev = &pdev->dev;
+ dsi->video_fifo_depth = 1920;
+ dsi->host_fifo_depth = 64;
+
+ err = tegra_dsi_ganged_probe(dsi);
+ if (err < 0)
+ return err;
err = tegra_output_probe(&dsi->output);
if (err < 0)
return err;
+ dsi->output.connector.polled = DRM_CONNECTOR_POLL_HPD;
+
/*
* Assume these values by default. When a DSI peripheral driver
* attaches to the DSI host, the parameters will be taken from
@@ -861,68 +1398,83 @@ static int tegra_dsi_probe(struct platform_device *pdev)
if (IS_ERR(dsi->rst))
return PTR_ERR(dsi->rst);
+ err = reset_control_deassert(dsi->rst);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to bring DSI out of reset: %d\n",
+ err);
+ return err;
+ }
+
dsi->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(dsi->clk)) {
dev_err(&pdev->dev, "cannot get DSI clock\n");
- return PTR_ERR(dsi->clk);
+ err = PTR_ERR(dsi->clk);
+ goto reset;
}
err = clk_prepare_enable(dsi->clk);
if (err < 0) {
dev_err(&pdev->dev, "cannot enable DSI clock\n");
- return err;
+ goto reset;
}
dsi->clk_lp = devm_clk_get(&pdev->dev, "lp");
if (IS_ERR(dsi->clk_lp)) {
dev_err(&pdev->dev, "cannot get low-power clock\n");
- return PTR_ERR(dsi->clk_lp);
+ err = PTR_ERR(dsi->clk_lp);
+ goto disable_clk;
}
err = clk_prepare_enable(dsi->clk_lp);
if (err < 0) {
dev_err(&pdev->dev, "cannot enable low-power clock\n");
- return err;
+ goto disable_clk;
}
dsi->clk_parent = devm_clk_get(&pdev->dev, "parent");
if (IS_ERR(dsi->clk_parent)) {
dev_err(&pdev->dev, "cannot get parent clock\n");
- return PTR_ERR(dsi->clk_parent);
- }
-
- err = clk_prepare_enable(dsi->clk_parent);
- if (err < 0) {
- dev_err(&pdev->dev, "cannot enable parent clock\n");
- return err;
+ err = PTR_ERR(dsi->clk_parent);
+ goto disable_clk_lp;
}
dsi->vdd = devm_regulator_get(&pdev->dev, "avdd-dsi-csi");
if (IS_ERR(dsi->vdd)) {
dev_err(&pdev->dev, "cannot get VDD supply\n");
- return PTR_ERR(dsi->vdd);
+ err = PTR_ERR(dsi->vdd);
+ goto disable_clk_lp;
}
err = regulator_enable(dsi->vdd);
if (err < 0) {
dev_err(&pdev->dev, "cannot enable VDD supply\n");
- return err;
+ goto disable_clk_lp;
}
err = tegra_dsi_setup_clocks(dsi);
if (err < 0) {
dev_err(&pdev->dev, "cannot setup clocks\n");
- return err;
+ goto disable_vdd;
}
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
dsi->regs = devm_ioremap_resource(&pdev->dev, regs);
- if (IS_ERR(dsi->regs))
- return PTR_ERR(dsi->regs);
+ if (IS_ERR(dsi->regs)) {
+ err = PTR_ERR(dsi->regs);
+ goto disable_vdd;
+ }
dsi->mipi = tegra_mipi_request(&pdev->dev);
- if (IS_ERR(dsi->mipi))
- return PTR_ERR(dsi->mipi);
+ if (IS_ERR(dsi->mipi)) {
+ err = PTR_ERR(dsi->mipi);
+ goto disable_vdd;
+ }
+
+ err = tegra_dsi_pad_calibrate(dsi);
+ if (err < 0) {
+ dev_err(dsi->dev, "MIPI calibration failed: %d\n", err);
+ goto mipi_free;
+ }
dsi->host.ops = &tegra_dsi_host_ops;
dsi->host.dev = &pdev->dev;
@@ -930,7 +1482,7 @@ static int tegra_dsi_probe(struct platform_device *pdev)
err = mipi_dsi_host_register(&dsi->host);
if (err < 0) {
dev_err(&pdev->dev, "failed to register DSI host: %d\n", err);
- return err;
+ goto mipi_free;
}
INIT_LIST_HEAD(&dsi->client.list);
@@ -941,12 +1493,26 @@ static int tegra_dsi_probe(struct platform_device *pdev)
if (err < 0) {
dev_err(&pdev->dev, "failed to register host1x client: %d\n",
err);
- return err;
+ goto unregister;
}
platform_set_drvdata(pdev, dsi);
return 0;
+
+unregister:
+ mipi_dsi_host_unregister(&dsi->host);
+mipi_free:
+ tegra_mipi_free(dsi->mipi);
+disable_vdd:
+ regulator_disable(dsi->vdd);
+disable_clk_lp:
+ clk_disable_unprepare(dsi->clk_lp);
+disable_clk:
+ clk_disable_unprepare(dsi->clk);
+reset:
+ reset_control_assert(dsi->rst);
+ return err;
}
static int tegra_dsi_remove(struct platform_device *pdev)
@@ -965,7 +1531,6 @@ static int tegra_dsi_remove(struct platform_device *pdev)
tegra_mipi_free(dsi->mipi);
regulator_disable(dsi->vdd);
- clk_disable_unprepare(dsi->clk_parent);
clk_disable_unprepare(dsi->clk_lp);
clk_disable_unprepare(dsi->clk);
reset_control_assert(dsi->rst);
diff --git a/drivers/gpu/drm/tegra/dsi.h b/drivers/gpu/drm/tegra/dsi.h
index 5ce610d08d7..bad1006a515 100644
--- a/drivers/gpu/drm/tegra/dsi.h
+++ b/drivers/gpu/drm/tegra/dsi.h
@@ -21,9 +21,16 @@
#define DSI_INT_STATUS 0x0d
#define DSI_INT_MASK 0x0e
#define DSI_HOST_CONTROL 0x0f
+#define DSI_HOST_CONTROL_FIFO_RESET (1 << 21)
+#define DSI_HOST_CONTROL_CRC_RESET (1 << 20)
+#define DSI_HOST_CONTROL_TX_TRIG_SOL (0 << 12)
+#define DSI_HOST_CONTROL_TX_TRIG_FIFO (1 << 12)
+#define DSI_HOST_CONTROL_TX_TRIG_HOST (2 << 12)
#define DSI_HOST_CONTROL_RAW (1 << 6)
#define DSI_HOST_CONTROL_HS (1 << 5)
-#define DSI_HOST_CONTROL_BTA (1 << 2)
+#define DSI_HOST_CONTROL_FIFO_SEL (1 << 4)
+#define DSI_HOST_CONTROL_IMM_BTA (1 << 3)
+#define DSI_HOST_CONTROL_PKT_BTA (1 << 2)
#define DSI_HOST_CONTROL_CS (1 << 1)
#define DSI_HOST_CONTROL_ECC (1 << 0)
#define DSI_CONTROL 0x10
@@ -39,9 +46,13 @@
#define DSI_SOL_DELAY 0x11
#define DSI_MAX_THRESHOLD 0x12
#define DSI_TRIGGER 0x13
+#define DSI_TRIGGER_HOST (1 << 1)
+#define DSI_TRIGGER_VIDEO (1 << 0)
#define DSI_TX_CRC 0x14
#define DSI_STATUS 0x15
#define DSI_STATUS_IDLE (1 << 10)
+#define DSI_STATUS_UNDERFLOW (1 << 9)
+#define DSI_STATUS_OVERFLOW (1 << 8)
#define DSI_INIT_SEQ_CONTROL 0x1a
#define DSI_INIT_SEQ_DATA_0 0x1b
#define DSI_INIT_SEQ_DATA_1 0x1c
@@ -104,6 +115,7 @@
#define DSI_PAD_CONTROL_3 0x51
#define DSI_PAD_CONTROL_4 0x52
#define DSI_GANGED_MODE_CONTROL 0x53
+#define DSI_GANGED_MODE_CONTROL_ENABLE (1 << 0)
#define DSI_GANGED_MODE_START 0x54
#define DSI_GANGED_MODE_SIZE 0x55
#define DSI_RAW_DATA_BYTE_COUNT 0x56
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index 3513d12d5aa..e9c715d8926 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -65,8 +65,12 @@ static void tegra_fb_destroy(struct drm_framebuffer *framebuffer)
for (i = 0; i < fb->num_planes; i++) {
struct tegra_bo *bo = fb->planes[i];
- if (bo)
+ if (bo) {
+ if (bo->pages && bo->vaddr)
+ vunmap(bo->vaddr);
+
drm_gem_object_unreference_unlocked(&bo->gem);
+ }
}
drm_framebuffer_cleanup(framebuffer);
@@ -223,14 +227,16 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper,
info = framebuffer_alloc(0, drm->dev);
if (!info) {
dev_err(drm->dev, "failed to allocate framebuffer info\n");
- tegra_bo_free_object(&bo->gem);
+ drm_gem_object_unreference_unlocked(&bo->gem);
return -ENOMEM;
}
fbdev->fb = tegra_fb_alloc(drm, &cmd, &bo, 1);
if (IS_ERR(fbdev->fb)) {
- dev_err(drm->dev, "failed to allocate DRM framebuffer\n");
err = PTR_ERR(fbdev->fb);
+ dev_err(drm->dev, "failed to allocate DRM framebuffer: %d\n",
+ err);
+ drm_gem_object_unreference_unlocked(&bo->gem);
goto release;
}
@@ -254,6 +260,16 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper,
offset = info->var.xoffset * bytes_per_pixel +
info->var.yoffset * fb->pitches[0];
+ if (bo->pages) {
+ bo->vaddr = vmap(bo->pages, bo->num_pages, VM_MAP,
+ pgprot_writecombine(PAGE_KERNEL));
+ if (!bo->vaddr) {
+ dev_err(drm->dev, "failed to vmap() framebuffer\n");
+ err = -ENOMEM;
+ goto destroy;
+ }
+ }
+
drm->mode_config.fb_base = (resource_size_t)bo->paddr;
info->screen_base = (void __iomem *)bo->vaddr + offset;
info->screen_size = size;
@@ -289,6 +305,11 @@ static struct tegra_fbdev *tegra_fbdev_create(struct drm_device *drm)
return fbdev;
}
+static void tegra_fbdev_free(struct tegra_fbdev *fbdev)
+{
+ kfree(fbdev);
+}
+
static int tegra_fbdev_init(struct tegra_fbdev *fbdev,
unsigned int preferred_bpp,
unsigned int num_crtc,
@@ -299,19 +320,21 @@ static int tegra_fbdev_init(struct tegra_fbdev *fbdev,
err = drm_fb_helper_init(drm, &fbdev->base, num_crtc, max_connectors);
if (err < 0) {
- dev_err(drm->dev, "failed to initialize DRM FB helper\n");
+ dev_err(drm->dev, "failed to initialize DRM FB helper: %d\n",
+ err);
return err;
}
err = drm_fb_helper_single_add_all_connectors(&fbdev->base);
if (err < 0) {
- dev_err(drm->dev, "failed to add connectors\n");
+ dev_err(drm->dev, "failed to add connectors: %d\n", err);
goto fini;
}
err = drm_fb_helper_initial_config(&fbdev->base, preferred_bpp);
if (err < 0) {
- dev_err(drm->dev, "failed to set initial configuration\n");
+ dev_err(drm->dev, "failed to set initial configuration: %d\n",
+ err);
goto fini;
}
@@ -322,7 +345,7 @@ fini:
return err;
}
-static void tegra_fbdev_free(struct tegra_fbdev *fbdev)
+static void tegra_fbdev_exit(struct tegra_fbdev *fbdev)
{
struct fb_info *info = fbdev->base.fbdev;
@@ -341,11 +364,11 @@ static void tegra_fbdev_free(struct tegra_fbdev *fbdev)
if (fbdev->fb) {
drm_framebuffer_unregister_private(&fbdev->fb->base);
- tegra_fb_destroy(&fbdev->fb->base);
+ drm_framebuffer_remove(&fbdev->fb->base);
}
drm_fb_helper_fini(&fbdev->base);
- kfree(fbdev);
+ tegra_fbdev_free(fbdev);
}
void tegra_fbdev_restore_mode(struct tegra_fbdev *fbdev)
@@ -393,6 +416,15 @@ int tegra_drm_fb_prepare(struct drm_device *drm)
return 0;
}
+void tegra_drm_fb_free(struct drm_device *drm)
+{
+#ifdef CONFIG_DRM_TEGRA_FBDEV
+ struct tegra_drm *tegra = drm->dev_private;
+
+ tegra_fbdev_free(tegra->fbdev);
+#endif
+}
+
int tegra_drm_fb_init(struct drm_device *drm)
{
#ifdef CONFIG_DRM_TEGRA_FBDEV
@@ -413,6 +445,6 @@ void tegra_drm_fb_exit(struct drm_device *drm)
#ifdef CONFIG_DRM_TEGRA_FBDEV
struct tegra_drm *tegra = drm->dev_private;
- tegra_fbdev_free(tegra->fbdev);
+ tegra_fbdev_exit(tegra->fbdev);
#endif
}
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index ce023fa3e8a..da32086cbea 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -14,6 +14,7 @@
*/
#include <linux/dma-buf.h>
+#include <linux/iommu.h>
#include <drm/tegra_drm.h>
#include "drm.h"
@@ -91,13 +92,90 @@ static const struct host1x_bo_ops tegra_bo_ops = {
.kunmap = tegra_bo_kunmap,
};
-static void tegra_bo_destroy(struct drm_device *drm, struct tegra_bo *bo)
+/*
+ * A generic iommu_map_sg() function is being reviewed and will hopefully be
+ * merged soon. At that point this function can be dropped in favour of the
+ * one provided by the IOMMU API.
+ */
+static ssize_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
+ struct scatterlist *sg, unsigned int nents,
+ int prot)
{
- dma_free_writecombine(drm->dev, bo->gem.size, bo->vaddr, bo->paddr);
+ struct scatterlist *s;
+ size_t offset = 0;
+ unsigned int i;
+ int err;
+
+ for_each_sg(sg, s, nents, i) {
+ phys_addr_t phys = page_to_phys(sg_page(s));
+ size_t length = s->offset + s->length;
+
+ err = iommu_map(domain, iova + offset, phys, length, prot);
+ if (err < 0) {
+ iommu_unmap(domain, iova, offset);
+ return err;
+ }
+
+ offset += length;
+ }
+
+ return offset;
}
-struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size,
- unsigned long flags)
+static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo)
+{
+ int prot = IOMMU_READ | IOMMU_WRITE;
+ ssize_t err;
+
+ if (bo->mm)
+ return -EBUSY;
+
+ bo->mm = kzalloc(sizeof(*bo->mm), GFP_KERNEL);
+ if (!bo->mm)
+ return -ENOMEM;
+
+ err = drm_mm_insert_node_generic(&tegra->mm, bo->mm, bo->gem.size,
+ PAGE_SIZE, 0, 0, 0);
+ if (err < 0) {
+ dev_err(tegra->drm->dev, "out of I/O virtual memory: %zd\n",
+ err);
+ goto free;
+ }
+
+ bo->paddr = bo->mm->start;
+
+ err = __iommu_map_sg(tegra->domain, bo->paddr, bo->sgt->sgl,
+ bo->sgt->nents, prot);
+ if (err < 0) {
+ dev_err(tegra->drm->dev, "failed to map buffer: %zd\n", err);
+ goto remove;
+ }
+
+ bo->size = err;
+
+ return 0;
+
+remove:
+ drm_mm_remove_node(bo->mm);
+free:
+ kfree(bo->mm);
+ return err;
+}
+
+static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo)
+{
+ if (!bo->mm)
+ return 0;
+
+ iommu_unmap(tegra->domain, bo->paddr, bo->size);
+ drm_mm_remove_node(bo->mm);
+ kfree(bo->mm);
+
+ return 0;
+}
+
+static struct tegra_bo *tegra_bo_alloc_object(struct drm_device *drm,
+ size_t size)
{
struct tegra_bo *bo;
int err;
@@ -109,22 +187,96 @@ struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size,
host1x_bo_init(&bo->base, &tegra_bo_ops);
size = round_up(size, PAGE_SIZE);
- bo->vaddr = dma_alloc_writecombine(drm->dev, size, &bo->paddr,
- GFP_KERNEL | __GFP_NOWARN);
- if (!bo->vaddr) {
- dev_err(drm->dev, "failed to allocate buffer with size %u\n",
- size);
- err = -ENOMEM;
- goto err_dma;
- }
-
err = drm_gem_object_init(drm, &bo->gem, size);
- if (err)
- goto err_init;
+ if (err < 0)
+ goto free;
err = drm_gem_create_mmap_offset(&bo->gem);
- if (err)
- goto err_mmap;
+ if (err < 0)
+ goto release;
+
+ return bo;
+
+release:
+ drm_gem_object_release(&bo->gem);
+free:
+ kfree(bo);
+ return ERR_PTR(err);
+}
+
+static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
+{
+ if (bo->pages) {
+ drm_gem_put_pages(&bo->gem, bo->pages, true, true);
+ sg_free_table(bo->sgt);
+ kfree(bo->sgt);
+ } else if (bo->vaddr) {
+ dma_free_writecombine(drm->dev, bo->gem.size, bo->vaddr,
+ bo->paddr);
+ }
+}
+
+static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo,
+ size_t size)
+{
+ bo->pages = drm_gem_get_pages(&bo->gem);
+ if (IS_ERR(bo->pages))
+ return PTR_ERR(bo->pages);
+
+ bo->num_pages = size >> PAGE_SHIFT;
+
+ bo->sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages);
+ if (IS_ERR(bo->sgt)) {
+ drm_gem_put_pages(&bo->gem, bo->pages, false, false);
+ return PTR_ERR(bo->sgt);
+ }
+
+ return 0;
+}
+
+static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo,
+ size_t size)
+{
+ struct tegra_drm *tegra = drm->dev_private;
+ int err;
+
+ if (tegra->domain) {
+ err = tegra_bo_get_pages(drm, bo, size);
+ if (err < 0)
+ return err;
+
+ err = tegra_bo_iommu_map(tegra, bo);
+ if (err < 0) {
+ tegra_bo_free(drm, bo);
+ return err;
+ }
+ } else {
+ bo->vaddr = dma_alloc_writecombine(drm->dev, size, &bo->paddr,
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!bo->vaddr) {
+ dev_err(drm->dev,
+ "failed to allocate buffer of size %zu\n",
+ size);
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+struct tegra_bo *tegra_bo_create(struct drm_device *drm, size_t size,
+ unsigned long flags)
+{
+ struct tegra_bo *bo;
+ int err;
+
+ bo = tegra_bo_alloc_object(drm, size);
+ if (IS_ERR(bo))
+ return bo;
+
+ err = tegra_bo_alloc(drm, bo, size);
+ if (err < 0)
+ goto release;
if (flags & DRM_TEGRA_GEM_CREATE_TILED)
bo->tiling.mode = TEGRA_BO_TILING_MODE_TILED;
@@ -134,69 +286,52 @@ struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size,
return bo;
-err_mmap:
+release:
drm_gem_object_release(&bo->gem);
-err_init:
- tegra_bo_destroy(drm, bo);
-err_dma:
kfree(bo);
-
return ERR_PTR(err);
}
struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
struct drm_device *drm,
- unsigned int size,
+ size_t size,
unsigned long flags,
- unsigned int *handle)
+ u32 *handle)
{
struct tegra_bo *bo;
- int ret;
+ int err;
bo = tegra_bo_create(drm, size, flags);
if (IS_ERR(bo))
return bo;
- ret = drm_gem_handle_create(file, &bo->gem, handle);
- if (ret)
- goto err;
+ err = drm_gem_handle_create(file, &bo->gem, handle);
+ if (err) {
+ tegra_bo_free_object(&bo->gem);
+ return ERR_PTR(err);
+ }
drm_gem_object_unreference_unlocked(&bo->gem);
return bo;
-
-err:
- tegra_bo_free_object(&bo->gem);
- return ERR_PTR(ret);
}
static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
struct dma_buf *buf)
{
+ struct tegra_drm *tegra = drm->dev_private;
struct dma_buf_attachment *attach;
struct tegra_bo *bo;
- ssize_t size;
int err;
- bo = kzalloc(sizeof(*bo), GFP_KERNEL);
- if (!bo)
- return ERR_PTR(-ENOMEM);
-
- host1x_bo_init(&bo->base, &tegra_bo_ops);
- size = round_up(buf->size, PAGE_SIZE);
-
- err = drm_gem_object_init(drm, &bo->gem, size);
- if (err < 0)
- goto free;
-
- err = drm_gem_create_mmap_offset(&bo->gem);
- if (err < 0)
- goto release;
+ bo = tegra_bo_alloc_object(drm, buf->size);
+ if (IS_ERR(bo))
+ return bo;
attach = dma_buf_attach(buf, drm->dev);
if (IS_ERR(attach)) {
err = PTR_ERR(attach);
- goto free_mmap;
+ goto free;
}
get_dma_buf(buf);
@@ -212,12 +347,19 @@ static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
goto detach;
}
- if (bo->sgt->nents > 1) {
- err = -EINVAL;
- goto detach;
+ if (tegra->domain) {
+ err = tegra_bo_iommu_map(tegra, bo);
+ if (err < 0)
+ goto detach;
+ } else {
+ if (bo->sgt->nents > 1) {
+ err = -EINVAL;
+ goto detach;
+ }
+
+ bo->paddr = sg_dma_address(bo->sgt->sgl);
}
- bo->paddr = sg_dma_address(bo->sgt->sgl);
bo->gem.import_attach = attach;
return bo;
@@ -228,47 +370,41 @@ detach:
dma_buf_detach(buf, attach);
dma_buf_put(buf);
-free_mmap:
- drm_gem_free_mmap_offset(&bo->gem);
-release:
- drm_gem_object_release(&bo->gem);
free:
+ drm_gem_object_release(&bo->gem);
kfree(bo);
-
return ERR_PTR(err);
}
void tegra_bo_free_object(struct drm_gem_object *gem)
{
+ struct tegra_drm *tegra = gem->dev->dev_private;
struct tegra_bo *bo = to_tegra_bo(gem);
+ if (tegra->domain)
+ tegra_bo_iommu_unmap(tegra, bo);
+
if (gem->import_attach) {
dma_buf_unmap_attachment(gem->import_attach, bo->sgt,
DMA_TO_DEVICE);
drm_prime_gem_destroy(gem, NULL);
} else {
- tegra_bo_destroy(gem->dev, bo);
+ tegra_bo_free(gem->dev, bo);
}
- drm_gem_free_mmap_offset(gem);
drm_gem_object_release(gem);
-
kfree(bo);
}
int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
struct drm_mode_create_dumb *args)
{
- int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
+ unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
struct tegra_drm *tegra = drm->dev_private;
struct tegra_bo *bo;
- min_pitch = round_up(min_pitch, tegra->pitch_align);
- if (args->pitch < min_pitch)
- args->pitch = min_pitch;
-
- if (args->size < args->pitch * args->height)
- args->size = args->pitch * args->height;
+ args->pitch = round_up(min_pitch, tegra->pitch_align);
+ args->size = args->pitch * args->height;
bo = tegra_bo_create_with_handle(file, drm, args->size, 0,
&args->handle);
@@ -279,7 +415,7 @@ int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
}
int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm,
- uint32_t handle, uint64_t *offset)
+ u32 handle, u64 *offset)
{
struct drm_gem_object *gem;
struct tegra_bo *bo;
@@ -304,7 +440,38 @@ int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm,
return 0;
}
+static int tegra_bo_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct drm_gem_object *gem = vma->vm_private_data;
+ struct tegra_bo *bo = to_tegra_bo(gem);
+ struct page *page;
+ pgoff_t offset;
+ int err;
+
+ if (!bo->pages)
+ return VM_FAULT_SIGBUS;
+
+ offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> PAGE_SHIFT;
+ page = bo->pages[offset];
+
+ err = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page);
+ switch (err) {
+ case -EAGAIN:
+ case 0:
+ case -ERESTARTSYS:
+ case -EINTR:
+ case -EBUSY:
+ return VM_FAULT_NOPAGE;
+
+ case -ENOMEM:
+ return VM_FAULT_OOM;
+ }
+
+ return VM_FAULT_SIGBUS;
+}
+
const struct vm_operations_struct tegra_bo_vm_ops = {
+ .fault = tegra_bo_fault,
.open = drm_gem_vm_open,
.close = drm_gem_vm_close,
};
@@ -322,12 +489,30 @@ int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
gem = vma->vm_private_data;
bo = to_tegra_bo(gem);
- ret = remap_pfn_range(vma, vma->vm_start, bo->paddr >> PAGE_SHIFT,
- vma->vm_end - vma->vm_start, vma->vm_page_prot);
- if (ret)
- drm_gem_vm_close(vma);
+ if (!bo->pages) {
+ unsigned long vm_pgoff = vma->vm_pgoff;
+
+ vma->vm_flags &= ~VM_PFNMAP;
+ vma->vm_pgoff = 0;
+
+ ret = dma_mmap_writecombine(gem->dev->dev, vma, bo->vaddr,
+ bo->paddr, gem->size);
+ if (ret) {
+ drm_gem_vm_close(vma);
+ return ret;
+ }
+
+ vma->vm_pgoff = vm_pgoff;
+ } else {
+ pgprot_t prot = vm_get_page_prot(vma->vm_flags);
+
+ vma->vm_flags |= VM_MIXEDMAP;
+ vma->vm_flags &= ~VM_PFNMAP;
- return ret;
+ vma->vm_page_prot = pgprot_writecombine(prot);
+ }
+
+ return 0;
}
static struct sg_table *
@@ -342,21 +527,44 @@ tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
if (!sgt)
return NULL;
- if (sg_alloc_table(sgt, 1, GFP_KERNEL)) {
- kfree(sgt);
- return NULL;
- }
+ if (bo->pages) {
+ struct scatterlist *sg;
+ unsigned int i;
- sg_dma_address(sgt->sgl) = bo->paddr;
- sg_dma_len(sgt->sgl) = gem->size;
+ if (sg_alloc_table(sgt, bo->num_pages, GFP_KERNEL))
+ goto free;
+
+ for_each_sg(sgt->sgl, sg, bo->num_pages, i)
+ sg_set_page(sg, bo->pages[i], PAGE_SIZE, 0);
+
+ if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
+ goto free;
+ } else {
+ if (sg_alloc_table(sgt, 1, GFP_KERNEL))
+ goto free;
+
+ sg_dma_address(sgt->sgl) = bo->paddr;
+ sg_dma_len(sgt->sgl) = gem->size;
+ }
return sgt;
+
+free:
+ sg_free_table(sgt);
+ kfree(sgt);
+ return NULL;
}
static void tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
struct sg_table *sgt,
enum dma_data_direction dir)
{
+ struct drm_gem_object *gem = attach->dmabuf->priv;
+ struct tegra_bo *bo = to_tegra_bo(gem);
+
+ if (bo->pages)
+ dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
+
sg_free_table(sgt);
kfree(sgt);
}
diff --git a/drivers/gpu/drm/tegra/gem.h b/drivers/gpu/drm/tegra/gem.h
index 6538b56780c..6c5f12ac008 100644
--- a/drivers/gpu/drm/tegra/gem.h
+++ b/drivers/gpu/drm/tegra/gem.h
@@ -38,6 +38,12 @@ struct tegra_bo {
dma_addr_t paddr;
void *vaddr;
+ struct drm_mm_node *mm;
+ unsigned long num_pages;
+ struct page **pages;
+ /* size of IOMMU mapping */
+ size_t size;
+
struct tegra_bo_tiling tiling;
};
@@ -46,18 +52,18 @@ static inline struct tegra_bo *to_tegra_bo(struct drm_gem_object *gem)
return container_of(gem, struct tegra_bo, gem);
}
-struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size,
+struct tegra_bo *tegra_bo_create(struct drm_device *drm, size_t size,
unsigned long flags);
struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
struct drm_device *drm,
- unsigned int size,
+ size_t size,
unsigned long flags,
- unsigned int *handle);
+ u32 *handle);
void tegra_bo_free_object(struct drm_gem_object *gem);
int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
struct drm_mode_create_dumb *args);
int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm,
- uint32_t handle, uint64_t *offset);
+ u32 handle, u64 *offset);
int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma);
diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c
index 0c67d7eebc9..6a5c7b81fbc 100644
--- a/drivers/gpu/drm/tegra/output.c
+++ b/drivers/gpu/drm/tegra/output.c
@@ -157,22 +157,18 @@ static bool tegra_encoder_mode_fixup(struct drm_encoder *encoder,
static void tegra_encoder_prepare(struct drm_encoder *encoder)
{
+ tegra_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
}
static void tegra_encoder_commit(struct drm_encoder *encoder)
{
+ tegra_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
}
static void tegra_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted)
{
- struct tegra_output *output = encoder_to_output(encoder);
- int err;
-
- err = tegra_output_enable(output);
- if (err < 0)
- dev_err(encoder->dev->dev, "tegra_output_enable(): %d\n", err);
}
static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
@@ -187,7 +183,8 @@ static irqreturn_t hpd_irq(int irq, void *data)
{
struct tegra_output *output = data;
- drm_helper_hpd_irq_event(output->connector.dev);
+ if (output->connector.dev)
+ drm_helper_hpd_irq_event(output->connector.dev);
return IRQ_HANDLED;
}
@@ -259,6 +256,13 @@ int tegra_output_probe(struct tegra_output *output)
}
output->connector.polled = DRM_CONNECTOR_POLL_HPD;
+
+ /*
+ * Disable the interrupt until the connector has been
+ * initialized to avoid a race in the hotplug interrupt
+ * handler.
+ */
+ disable_irq(output->hpd_irq);
}
return 0;
@@ -324,10 +328,27 @@ int tegra_output_init(struct drm_device *drm, struct tegra_output *output)
output->encoder.possible_crtcs = 0x3;
+ /*
+ * The connector is now registered and ready to receive hotplug events
+ * so the hotplug interrupt can be enabled.
+ */
+ if (gpio_is_valid(output->hpd_gpio))
+ enable_irq(output->hpd_irq);
+
return 0;
}
int tegra_output_exit(struct tegra_output *output)
{
+ /*
+ * The connector is going away, so the interrupt must be disabled to
+ * prevent the hotplug interrupt handler from potentially crashing.
+ */
+ if (gpio_is_valid(output->hpd_gpio))
+ disable_irq(output->hpd_irq);
+
+ if (output->panel)
+ drm_panel_detach(output->panel);
+
return 0;
}
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
index d642d4a0213..c73588483be 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
@@ -16,6 +16,7 @@
*/
#include "drm_flip_work.h"
+#include <drm/drm_plane_helper.h>
#include "tilcdc_drv.h"
#include "tilcdc_regs.h"
@@ -664,12 +665,8 @@ struct drm_crtc *tilcdc_crtc_create(struct drm_device *dev)
tilcdc_crtc->dpms = DRM_MODE_DPMS_OFF;
init_waitqueue_head(&tilcdc_crtc->frame_done_wq);
- ret = drm_flip_work_init(&tilcdc_crtc->unref_work, 16,
+ drm_flip_work_init(&tilcdc_crtc->unref_work,
"unref", unref_worker);
- if (ret) {
- dev_err(dev->dev, "could not allocate unref FIFO\n");
- goto fail;
- }
ret = drm_crtc_init(dev, crtc, &tilcdc_crtc_funcs);
if (ret < 0)
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index f8546824d17..095fca91525 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -58,8 +58,7 @@ static struct drm_framebuffer *tilcdc_fb_create(struct drm_device *dev,
static void tilcdc_fb_output_poll_changed(struct drm_device *dev)
{
struct tilcdc_drm_private *priv = dev->dev_private;
- if (priv->fbdev)
- drm_fbdev_cma_hotplug_event(priv->fbdev);
+ drm_fbdev_cma_hotplug_event(priv->fbdev);
}
static const struct drm_mode_config_funcs mode_config_funcs = {
diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
index 964387fc5c8..aa0bd054d3e 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
@@ -55,6 +55,7 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
struct drm_mm *mm = &rman->mm;
struct drm_mm_node *node = NULL;
+ enum drm_mm_search_flags sflags = DRM_MM_SEARCH_BEST;
enum drm_mm_allocator_flags aflags = DRM_MM_CREATE_DEFAULT;
unsigned long lpfn;
int ret;
@@ -67,15 +68,16 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
if (!node)
return -ENOMEM;
- if (place->flags & TTM_PL_FLAG_TOPDOWN)
+ if (place->flags & TTM_PL_FLAG_TOPDOWN) {
+ sflags = DRM_MM_SEARCH_BELOW;
aflags = DRM_MM_CREATE_TOP;
+ }
spin_lock(&rman->lock);
ret = drm_mm_insert_node_in_range_generic(mm, node, mem->num_pages,
mem->page_alignment, 0,
place->fpfn, lpfn,
- DRM_MM_SEARCH_BEST,
- aflags);
+ sflags, aflags);
spin_unlock(&rman->lock);
if (unlikely(ret)) {
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index 8ce508e7620..3820ae97a03 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -93,7 +93,8 @@ EXPORT_SYMBOL(ttm_eu_backoff_reservation);
*/
int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
- struct list_head *list, bool intr)
+ struct list_head *list, bool intr,
+ struct list_head *dups)
{
struct ttm_bo_global *glob;
struct ttm_validate_buffer *entry;
@@ -117,6 +118,13 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
__ttm_bo_unreserve(bo);
ret = -EBUSY;
+
+ } else if (ret == -EALREADY && dups) {
+ struct ttm_validate_buffer *safe = entry;
+ entry = list_prev_entry(entry, head);
+ list_del(&safe->head);
+ list_add(&safe->head, dups);
+ continue;
}
if (!ret) {
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 09874d69518..025c429050c 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -297,11 +297,12 @@ static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
*
* @pool: to free the pages from
* @free_all: If set to true will free all pages in pool
- * @gfp: GFP flags.
+ * @use_static: Safe to use static buffer
**/
static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
- gfp_t gfp)
+ bool use_static)
{
+ static struct page *static_buf[NUM_PAGES_TO_ALLOC];
unsigned long irq_flags;
struct page *p;
struct page **pages_to_free;
@@ -311,7 +312,11 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
if (NUM_PAGES_TO_ALLOC < nr_free)
npages_to_free = NUM_PAGES_TO_ALLOC;
- pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), gfp);
+ if (use_static)
+ pages_to_free = static_buf;
+ else
+ pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
+ GFP_KERNEL);
if (!pages_to_free) {
pr_err("Failed to allocate memory for pool free operation\n");
return 0;
@@ -374,7 +379,8 @@ restart:
if (freed_pages)
ttm_pages_put(pages_to_free, freed_pages);
out:
- kfree(pages_to_free);
+ if (pages_to_free != static_buf)
+ kfree(pages_to_free);
return nr_free;
}
@@ -383,8 +389,6 @@ out:
*
* XXX: (dchinner) Deadlock warning!
*
- * We need to pass sc->gfp_mask to ttm_page_pool_free().
- *
* This code is crying out for a shrinker per pool....
*/
static unsigned long
@@ -407,8 +411,8 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
if (shrink_pages == 0)
break;
pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
- shrink_pages = ttm_page_pool_free(pool, nr_free,
- sc->gfp_mask);
+ /* OK to use static buffer since global mutex is held. */
+ shrink_pages = ttm_page_pool_free(pool, nr_free, true);
freed += nr_free - shrink_pages;
}
mutex_unlock(&lock);
@@ -710,7 +714,7 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
}
spin_unlock_irqrestore(&pool->lock, irq_flags);
if (npages)
- ttm_page_pool_free(pool, npages, GFP_KERNEL);
+ ttm_page_pool_free(pool, npages, false);
}
/*
@@ -849,9 +853,9 @@ void ttm_page_alloc_fini(void)
pr_info("Finalizing pool allocator\n");
ttm_pool_mm_shrink_fini(_manager);
+ /* OK to use static buffer since global mutex is no longer used. */
for (i = 0; i < NUM_POOLS; ++i)
- ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES,
- GFP_KERNEL);
+ ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES, true);
kobject_put(&_manager->kobj);
_manager = NULL;
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index c96db433f8a..01e1d27eb07 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -411,11 +411,12 @@ static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
*
* @pool: to free the pages from
* @nr_free: If set to true will free all pages in pool
- * @gfp: GFP flags.
+ * @use_static: Safe to use static buffer
**/
static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
- gfp_t gfp)
+ bool use_static)
{
+ static struct page *static_buf[NUM_PAGES_TO_ALLOC];
unsigned long irq_flags;
struct dma_page *dma_p, *tmp;
struct page **pages_to_free;
@@ -432,7 +433,11 @@ static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
npages_to_free, nr_free);
}
#endif
- pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), gfp);
+ if (use_static)
+ pages_to_free = static_buf;
+ else
+ pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
+ GFP_KERNEL);
if (!pages_to_free) {
pr_err("%s: Failed to allocate memory for pool free operation\n",
@@ -502,7 +507,8 @@ restart:
if (freed_pages)
ttm_dma_pages_put(pool, &d_pages, pages_to_free, freed_pages);
out:
- kfree(pages_to_free);
+ if (pages_to_free != static_buf)
+ kfree(pages_to_free);
return nr_free;
}
@@ -531,7 +537,8 @@ static void ttm_dma_free_pool(struct device *dev, enum pool_type type)
if (pool->type != type)
continue;
/* Takes a spinlock.. */
- ttm_dma_page_pool_free(pool, FREE_ALL_PAGES, GFP_KERNEL);
+ /* OK to use static buffer since global mutex is held. */
+ ttm_dma_page_pool_free(pool, FREE_ALL_PAGES, true);
WARN_ON(((pool->npages_in_use + pool->npages_free) != 0));
/* This code path is called after _all_ references to the
* struct device has been dropped - so nobody should be
@@ -986,7 +993,7 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
/* shrink pool if necessary (only on !is_cached pools)*/
if (npages)
- ttm_dma_page_pool_free(pool, npages, GFP_KERNEL);
+ ttm_dma_page_pool_free(pool, npages, false);
ttm->state = tt_unpopulated;
}
EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
@@ -996,8 +1003,6 @@ EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
*
* XXX: (dchinner) Deadlock warning!
*
- * We need to pass sc->gfp_mask to ttm_dma_page_pool_free().
- *
* I'm getting sadder as I hear more pathetical whimpers about needing per-pool
* shrinkers
*/
@@ -1030,8 +1035,8 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
if (++idx < pool_offset)
continue;
nr_free = shrink_pages;
- shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free,
- sc->gfp_mask);
+ /* OK to use static buffer since global mutex is held. */
+ shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free, true);
freed += nr_free - shrink_pages;
pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
diff --git a/drivers/gpu/drm/udl/Makefile b/drivers/gpu/drm/udl/Makefile
index 05c7481bfd4..195bcac0b6c 100644
--- a/drivers/gpu/drm/udl/Makefile
+++ b/drivers/gpu/drm/udl/Makefile
@@ -1,6 +1,6 @@
ccflags-y := -Iinclude/drm
-udl-y := udl_drv.o udl_modeset.o udl_connector.o udl_encoder.o udl_main.o udl_fb.o udl_transfer.o udl_gem.o
+udl-y := udl_drv.o udl_modeset.o udl_connector.o udl_encoder.o udl_main.o udl_fb.o udl_transfer.o udl_gem.o udl_dmabuf.o
obj-$(CONFIG_DRM_UDL) := udl.o
diff --git a/drivers/gpu/drm/udl/udl_dmabuf.c b/drivers/gpu/drm/udl/udl_dmabuf.c
new file mode 100644
index 00000000000..ac8a66b4dfc
--- /dev/null
+++ b/drivers/gpu/drm/udl/udl_dmabuf.c
@@ -0,0 +1,276 @@
+/*
+ * udl_dmabuf.c
+ *
+ * Copyright (c) 2014 The Chromium OS Authors
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <drm/drmP.h>
+#include "udl_drv.h"
+#include <linux/shmem_fs.h>
+#include <linux/dma-buf.h>
+
+struct udl_drm_dmabuf_attachment {
+ struct sg_table sgt;
+ enum dma_data_direction dir;
+ bool is_mapped;
+};
+
+static int udl_attach_dma_buf(struct dma_buf *dmabuf,
+ struct device *dev,
+ struct dma_buf_attachment *attach)
+{
+ struct udl_drm_dmabuf_attachment *udl_attach;
+
+ DRM_DEBUG_PRIME("[DEV:%s] size:%zd\n", dev_name(attach->dev),
+ attach->dmabuf->size);
+
+ udl_attach = kzalloc(sizeof(*udl_attach), GFP_KERNEL);
+ if (!udl_attach)
+ return -ENOMEM;
+
+ udl_attach->dir = DMA_NONE;
+ attach->priv = udl_attach;
+
+ return 0;
+}
+
+static void udl_detach_dma_buf(struct dma_buf *dmabuf,
+ struct dma_buf_attachment *attach)
+{
+ struct udl_drm_dmabuf_attachment *udl_attach = attach->priv;
+ struct sg_table *sgt;
+
+ if (!udl_attach)
+ return;
+
+ DRM_DEBUG_PRIME("[DEV:%s] size:%zd\n", dev_name(attach->dev),
+ attach->dmabuf->size);
+
+ sgt = &udl_attach->sgt;
+
+ if (udl_attach->dir != DMA_NONE)
+ dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
+ udl_attach->dir);
+
+ sg_free_table(sgt);
+ kfree(udl_attach);
+ attach->priv = NULL;
+}
+
+static struct sg_table *udl_map_dma_buf(struct dma_buf_attachment *attach,
+ enum dma_data_direction dir)
+{
+ struct udl_drm_dmabuf_attachment *udl_attach = attach->priv;
+ struct udl_gem_object *obj = to_udl_bo(attach->dmabuf->priv);
+ struct drm_device *dev = obj->base.dev;
+ struct scatterlist *rd, *wr;
+ struct sg_table *sgt = NULL;
+ unsigned int i;
+ int page_count;
+ int nents, ret;
+
+ DRM_DEBUG_PRIME("[DEV:%s] size:%zd dir=%d\n", dev_name(attach->dev),
+ attach->dmabuf->size, dir);
+
+ /* just return current sgt if already requested. */
+ if (udl_attach->dir == dir && udl_attach->is_mapped)
+ return &udl_attach->sgt;
+
+ if (!obj->pages) {
+ ret = udl_gem_get_pages(obj);
+ if (ret) {
+ DRM_ERROR("failed to map pages.\n");
+ return ERR_PTR(ret);
+ }
+ }
+
+ page_count = obj->base.size / PAGE_SIZE;
+ obj->sg = drm_prime_pages_to_sg(obj->pages, page_count);
+ if (IS_ERR(obj->sg)) {
+ DRM_ERROR("failed to allocate sgt.\n");
+ return ERR_CAST(obj->sg);
+ }
+
+ sgt = &udl_attach->sgt;
+
+ ret = sg_alloc_table(sgt, obj->sg->orig_nents, GFP_KERNEL);
+ if (ret) {
+ DRM_ERROR("failed to alloc sgt.\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ mutex_lock(&dev->struct_mutex);
+
+ rd = obj->sg->sgl;
+ wr = sgt->sgl;
+ for (i = 0; i < sgt->orig_nents; ++i) {
+ sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
+ rd = sg_next(rd);
+ wr = sg_next(wr);
+ }
+
+ if (dir != DMA_NONE) {
+ nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
+ if (!nents) {
+ DRM_ERROR("failed to map sgl with iommu.\n");
+ sg_free_table(sgt);
+ sgt = ERR_PTR(-EIO);
+ goto err_unlock;
+ }
+ }
+
+ udl_attach->is_mapped = true;
+ udl_attach->dir = dir;
+ attach->priv = udl_attach;
+
+err_unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return sgt;
+}
+
+static void udl_unmap_dma_buf(struct dma_buf_attachment *attach,
+ struct sg_table *sgt,
+ enum dma_data_direction dir)
+{
+ /* Nothing to do. */
+ DRM_DEBUG_PRIME("[DEV:%s] size:%zd dir:%d\n", dev_name(attach->dev),
+ attach->dmabuf->size, dir);
+}
+
+static void *udl_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
+{
+ /* TODO */
+
+ return NULL;
+}
+
+static void *udl_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
+ unsigned long page_num)
+{
+ /* TODO */
+
+ return NULL;
+}
+
+static void udl_dmabuf_kunmap(struct dma_buf *dma_buf,
+ unsigned long page_num, void *addr)
+{
+ /* TODO */
+}
+
+static void udl_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
+ unsigned long page_num,
+ void *addr)
+{
+ /* TODO */
+}
+
+static int udl_dmabuf_mmap(struct dma_buf *dma_buf,
+ struct vm_area_struct *vma)
+{
+ /* TODO */
+
+ return -EINVAL;
+}
+
+static struct dma_buf_ops udl_dmabuf_ops = {
+ .attach = udl_attach_dma_buf,
+ .detach = udl_detach_dma_buf,
+ .map_dma_buf = udl_map_dma_buf,
+ .unmap_dma_buf = udl_unmap_dma_buf,
+ .kmap = udl_dmabuf_kmap,
+ .kmap_atomic = udl_dmabuf_kmap_atomic,
+ .kunmap = udl_dmabuf_kunmap,
+ .kunmap_atomic = udl_dmabuf_kunmap_atomic,
+ .mmap = udl_dmabuf_mmap,
+ .release = drm_gem_dmabuf_release,
+};
+
+struct dma_buf *udl_gem_prime_export(struct drm_device *dev,
+ struct drm_gem_object *obj, int flags)
+{
+ return dma_buf_export(obj, &udl_dmabuf_ops, obj->size, flags, NULL);
+}
+
+static int udl_prime_create(struct drm_device *dev,
+ size_t size,
+ struct sg_table *sg,
+ struct udl_gem_object **obj_p)
+{
+ struct udl_gem_object *obj;
+ int npages;
+
+ npages = size / PAGE_SIZE;
+
+ *obj_p = NULL;
+ obj = udl_gem_alloc_object(dev, npages * PAGE_SIZE);
+ if (!obj)
+ return -ENOMEM;
+
+ obj->sg = sg;
+ obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
+ if (obj->pages == NULL) {
+ DRM_ERROR("obj pages is NULL %d\n", npages);
+ return -ENOMEM;
+ }
+
+ drm_prime_sg_to_page_addr_arrays(sg, obj->pages, NULL, npages);
+
+ *obj_p = obj;
+ return 0;
+}
+
+struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf)
+{
+ struct dma_buf_attachment *attach;
+ struct sg_table *sg;
+ struct udl_gem_object *uobj;
+ int ret;
+
+ /* need to attach */
+ get_device(dev->dev);
+ attach = dma_buf_attach(dma_buf, dev->dev);
+ if (IS_ERR(attach)) {
+ put_device(dev->dev);
+ return ERR_CAST(attach);
+ }
+
+ get_dma_buf(dma_buf);
+
+ sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
+ if (IS_ERR(sg)) {
+ ret = PTR_ERR(sg);
+ goto fail_detach;
+ }
+
+ ret = udl_prime_create(dev, dma_buf->size, sg, &uobj);
+ if (ret)
+ goto fail_unmap;
+
+ uobj->base.import_attach = attach;
+ uobj->flags = UDL_BO_WC;
+
+ return &uobj->base;
+
+fail_unmap:
+ dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
+fail_detach:
+ dma_buf_detach(dma_buf, attach);
+ dma_buf_put(dma_buf);
+ put_device(dev->dev);
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index 8607e9e513d..d5728ec8525 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -51,7 +51,9 @@ static struct drm_driver driver = {
.dumb_destroy = drm_gem_dumb_destroy,
.fops = &udl_driver_fops,
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_export = udl_gem_prime_export,
.gem_prime_import = udl_gem_prime_import,
.name = DRIVER_NAME,
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
index c7490a2489a..80adbac82bd 100644
--- a/drivers/gpu/drm/udl/udl_drv.h
+++ b/drivers/gpu/drm/udl/udl_drv.h
@@ -25,6 +25,9 @@
#define DRIVER_MINOR 0
#define DRIVER_PATCHLEVEL 1
+#define UDL_BO_CACHEABLE (1 << 0)
+#define UDL_BO_WC (1 << 1)
+
struct udl_device;
struct urb_node {
@@ -69,6 +72,7 @@ struct udl_gem_object {
struct page **pages;
void *vmapping;
struct sg_table *sg;
+ unsigned int flags;
};
#define to_udl_bo(x) container_of(x, struct udl_gem_object, base)
@@ -120,9 +124,13 @@ int udl_gem_mmap(struct drm_file *file_priv, struct drm_device *dev,
void udl_gem_free_object(struct drm_gem_object *gem_obj);
struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev,
size_t size);
+struct dma_buf *udl_gem_prime_export(struct drm_device *dev,
+ struct drm_gem_object *obj, int flags);
struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev,
struct dma_buf *dma_buf);
+int udl_gem_get_pages(struct udl_gem_object *obj);
+void udl_gem_put_pages(struct udl_gem_object *obj);
int udl_gem_vmap(struct udl_gem_object *obj);
void udl_gem_vunmap(struct udl_gem_object *obj);
int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index 8044f5fb7c4..2a0a784ab6e 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -25,6 +25,7 @@ struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev,
return NULL;
}
+ obj->flags = UDL_BO_CACHEABLE;
return obj;
}
@@ -56,6 +57,23 @@ udl_gem_create(struct drm_file *file,
return 0;
}
+static void update_vm_cache_attr(struct udl_gem_object *obj,
+ struct vm_area_struct *vma)
+{
+ DRM_DEBUG_KMS("flags = 0x%x\n", obj->flags);
+
+ /* non-cacheable as default. */
+ if (obj->flags & UDL_BO_CACHEABLE) {
+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+ } else if (obj->flags & UDL_BO_WC) {
+ vma->vm_page_prot =
+ pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+ } else {
+ vma->vm_page_prot =
+ pgprot_noncached(vm_get_page_prot(vma->vm_flags));
+ }
+}
+
int udl_dumb_create(struct drm_file *file,
struct drm_device *dev,
struct drm_mode_create_dumb *args)
@@ -77,6 +95,8 @@ int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
vma->vm_flags &= ~VM_PFNMAP;
vma->vm_flags |= VM_MIXEDMAP;
+ update_vm_cache_attr(to_udl_bo(vma->vm_private_data), vma);
+
return ret;
}
@@ -107,7 +127,7 @@ int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
}
}
-static int udl_gem_get_pages(struct udl_gem_object *obj)
+int udl_gem_get_pages(struct udl_gem_object *obj)
{
struct page **pages;
@@ -123,7 +143,7 @@ static int udl_gem_get_pages(struct udl_gem_object *obj)
return 0;
}
-static void udl_gem_put_pages(struct udl_gem_object *obj)
+void udl_gem_put_pages(struct udl_gem_object *obj)
{
if (obj->base.import_attach) {
drm_free_large(obj->pages);
@@ -164,8 +184,7 @@ void udl_gem_vunmap(struct udl_gem_object *obj)
return;
}
- if (obj->vmapping)
- vunmap(obj->vmapping);
+ vunmap(obj->vmapping);
udl_gem_put_pages(obj);
}
@@ -220,73 +239,3 @@ unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
}
-
-static int udl_prime_create(struct drm_device *dev,
- size_t size,
- struct sg_table *sg,
- struct udl_gem_object **obj_p)
-{
- struct udl_gem_object *obj;
- int npages;
-
- npages = size / PAGE_SIZE;
-
- *obj_p = NULL;
- obj = udl_gem_alloc_object(dev, npages * PAGE_SIZE);
- if (!obj)
- return -ENOMEM;
-
- obj->sg = sg;
- obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
- if (obj->pages == NULL) {
- DRM_ERROR("obj pages is NULL %d\n", npages);
- return -ENOMEM;
- }
-
- drm_prime_sg_to_page_addr_arrays(sg, obj->pages, NULL, npages);
-
- *obj_p = obj;
- return 0;
-}
-
-struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev,
- struct dma_buf *dma_buf)
-{
- struct dma_buf_attachment *attach;
- struct sg_table *sg;
- struct udl_gem_object *uobj;
- int ret;
-
- /* need to attach */
- get_device(dev->dev);
- attach = dma_buf_attach(dma_buf, dev->dev);
- if (IS_ERR(attach)) {
- put_device(dev->dev);
- return ERR_CAST(attach);
- }
-
- get_dma_buf(dma_buf);
-
- sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
- if (IS_ERR(sg)) {
- ret = PTR_ERR(sg);
- goto fail_detach;
- }
-
- ret = udl_prime_create(dev, dma_buf->size, sg, &uobj);
- if (ret) {
- goto fail_unmap;
- }
-
- uobj->base.import_attach = attach;
-
- return &uobj->base;
-
-fail_unmap:
- dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
-fail_detach:
- dma_buf_detach(dma_buf, attach);
- dma_buf_put(dma_buf);
- put_device(dev->dev);
- return ERR_PTR(ret);
-}
diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c
index dc145d320b2..1701f1dfb23 100644
--- a/drivers/gpu/drm/udl/udl_modeset.c
+++ b/drivers/gpu/drm/udl/udl_modeset.c
@@ -14,6 +14,7 @@
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_plane_helper.h>
#include "udl_drv.h"
/*
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 25f3c250fd9..7b5d22110f2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -889,8 +889,7 @@ static int vmw_driver_unload(struct drm_device *dev)
if (dev_priv->ctx.res_ht_initialized)
drm_ht_remove(&dev_priv->ctx.res_ht);
- if (dev_priv->ctx.cmd_bounce)
- vfree(dev_priv->ctx.cmd_bounce);
+ vfree(dev_priv->ctx.cmd_bounce);
if (dev_priv->enable_fb) {
vmw_fb_close(dev_priv);
vmw_kms_restore_vga(dev_priv);
@@ -1063,8 +1062,12 @@ static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
vmaster = vmw_master_check(dev, file_priv, flags);
if (unlikely(IS_ERR(vmaster))) {
- DRM_INFO("IOCTL ERROR %d\n", nr);
- return PTR_ERR(vmaster);
+ ret = PTR_ERR(vmaster);
+
+ if (ret != -ERESTARTSYS)
+ DRM_INFO("IOCTL ERROR Command %d, Error %ld.\n",
+ nr, ret);
+ return ret;
}
ret = ioctl_func(filp, cmd, arg);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 596cd6dafd3..33176d05db3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -2487,7 +2487,8 @@ int vmw_execbuf_process(struct drm_file *file_priv,
if (unlikely(ret != 0))
goto out_err_nores;
- ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes, true);
+ ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes,
+ true, NULL);
if (unlikely(ret != 0))
goto out_err;
@@ -2677,7 +2678,8 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
query_val.shared = false;
list_add_tail(&query_val.head, &validate_list);
- ret = ttm_eu_reserve_buffers(&ticket, &validate_list, false);
+ ret = ttm_eu_reserve_buffers(&ticket, &validate_list,
+ false, NULL);
if (unlikely(ret != 0)) {
vmw_execbuf_unpin_panic(dev_priv);
goto out_no_reserve;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 197164fd780..b7594cb758a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -545,35 +545,19 @@ void vmw_fence_obj_flush(struct vmw_fence_obj *fence)
static void vmw_fence_destroy(struct vmw_fence_obj *fence)
{
- struct vmw_fence_manager *fman = fman_from_fence(fence);
-
fence_free(&fence->base);
-
- /*
- * Free kernel space accounting.
- */
- ttm_mem_global_free(vmw_mem_glob(fman->dev_priv),
- fman->fence_size);
}
int vmw_fence_create(struct vmw_fence_manager *fman,
uint32_t seqno,
struct vmw_fence_obj **p_fence)
{
- struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv);
struct vmw_fence_obj *fence;
int ret;
- ret = ttm_mem_global_alloc(mem_glob, fman->fence_size,
- false, false);
- if (unlikely(ret != 0))
- return ret;
-
fence = kzalloc(sizeof(*fence), GFP_KERNEL);
- if (unlikely(fence == NULL)) {
- ret = -ENOMEM;
- goto out_no_object;
- }
+ if (unlikely(fence == NULL))
+ return -ENOMEM;
ret = vmw_fence_obj_init(fman, fence, seqno,
vmw_fence_destroy);
@@ -585,8 +569,6 @@ int vmw_fence_create(struct vmw_fence_manager *fman,
out_err_init:
kfree(fence);
-out_no_object:
- ttm_mem_global_free(mem_glob, fman->fence_size);
return ret;
}
@@ -1105,6 +1087,8 @@ static int vmw_event_fence_action_create(struct drm_file *file_priv,
if (ret != 0)
goto out_no_queue;
+ return 0;
+
out_no_queue:
event->base.destroy(&event->base);
out_no_event:
@@ -1180,17 +1164,10 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
BUG_ON(fence == NULL);
- if (arg->flags & DRM_VMW_FE_FLAG_REQ_TIME)
- ret = vmw_event_fence_action_create(file_priv, fence,
- arg->flags,
- arg->user_data,
- true);
- else
- ret = vmw_event_fence_action_create(file_priv, fence,
- arg->flags,
- arg->user_data,
- true);
-
+ ret = vmw_event_fence_action_create(file_priv, fence,
+ arg->flags,
+ arg->user_data,
+ true);
if (unlikely(ret != 0)) {
if (ret != -ERESTARTSYS)
DRM_ERROR("Failed to attach event to fence.\n");
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 941a7bc0b79..3725b521d93 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -252,7 +252,7 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
ret = 0;
out:
drm_modeset_unlock_all(dev_priv->dev);
- drm_modeset_lock_crtc(crtc);
+ drm_modeset_lock_crtc(crtc, crtc->cursor);
return ret;
}
@@ -281,7 +281,7 @@ int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
du->cursor_y + du->hotspot_y);
drm_modeset_unlock_all(dev_priv->dev);
- drm_modeset_lock_crtc(crtc);
+ drm_modeset_lock_crtc(crtc, crtc->cursor);
return 0;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 15e185ae4c9..5c289f748ab 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -26,6 +26,7 @@
**************************************************************************/
#include "vmwgfx_kms.h"
+#include <drm/drm_plane_helper.h>
#define vmw_crtc_to_ldu(x) \
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 026de7cea0f..210ef15b1d0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -1222,7 +1222,7 @@ vmw_resource_check_buffer(struct vmw_resource *res,
val_buf->bo = ttm_bo_reference(&res->backup->base);
val_buf->shared = false;
list_add_tail(&val_buf->head, &val_list);
- ret = ttm_eu_reserve_buffers(NULL, &val_list, interruptible);
+ ret = ttm_eu_reserve_buffers(NULL, &val_list, interruptible, NULL);
if (unlikely(ret != 0))
goto out_no_reserve;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index b295463a60b..7dc591d04d9 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -26,6 +26,7 @@
**************************************************************************/
#include "vmwgfx_kms.h"
+#include <drm/drm_plane_helper.h>
#define vmw_crtc_to_sou(x) \
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index 8719fb3cccc..6a4584a43aa 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -198,7 +198,7 @@ static int vmw_gb_shader_bind(struct vmw_resource *res,
cmd->header.size = sizeof(cmd->body);
cmd->body.shid = res->id;
cmd->body.mobid = bo->mem.start;
- cmd->body.offsetInBytes = 0;
+ cmd->body.offsetInBytes = res->backup_offset;
res->backup_dirty = false;
vmw_fifo_commit(dev_priv, sizeof(*cmd));
diff --git a/drivers/gpu/host1x/cdma.c b/drivers/gpu/host1x/cdma.c
index 3995255b16c..5a8c8d55317 100644
--- a/drivers/gpu/host1x/cdma.c
+++ b/drivers/gpu/host1x/cdma.c
@@ -97,7 +97,7 @@ fail:
static void host1x_pushbuffer_push(struct push_buffer *pb, u32 op1, u32 op2)
{
u32 pos = pb->pos;
- u32 *p = (u32 *)((u32)pb->mapped + pos);
+ u32 *p = (u32 *)((void *)pb->mapped + pos);
WARN_ON(pos == pb->fence);
*(p++) = op1;
*(p++) = op2;
diff --git a/drivers/gpu/host1x/cdma.h b/drivers/gpu/host1x/cdma.h
index 313c4b78434..470087af8fe 100644
--- a/drivers/gpu/host1x/cdma.h
+++ b/drivers/gpu/host1x/cdma.h
@@ -42,7 +42,7 @@ struct host1x_job;
*/
struct push_buffer {
- u32 *mapped; /* mapped pushbuffer memory */
+ void *mapped; /* mapped pushbuffer memory */
dma_addr_t phys; /* physical address of pushbuffer */
u32 fence; /* index we've written */
u32 pos; /* index to write to */
diff --git a/drivers/gpu/host1x/hw/cdma_hw.c b/drivers/gpu/host1x/hw/cdma_hw.c
index 6b09b71940c..305ea8f3382 100644
--- a/drivers/gpu/host1x/hw/cdma_hw.c
+++ b/drivers/gpu/host1x/hw/cdma_hw.c
@@ -26,11 +26,11 @@
#include "../debug.h"
/*
- * Put the restart at the end of pushbuffer memor
+ * Put the restart at the end of pushbuffer memory
*/
static void push_buffer_init(struct push_buffer *pb)
{
- *(pb->mapped + (pb->size_bytes >> 2)) = host1x_opcode_restart(0);
+ *(u32 *)(pb->mapped + pb->size_bytes) = host1x_opcode_restart(0);
}
/*
@@ -51,11 +51,11 @@ static void cdma_timeout_cpu_incr(struct host1x_cdma *cdma, u32 getptr,
/* NOP all the PB slots */
while (nr_slots--) {
- u32 *p = (u32 *)((u32)pb->mapped + getptr);
+ u32 *p = (u32 *)(pb->mapped + getptr);
*(p++) = HOST1X_OPCODE_NOP;
*(p++) = HOST1X_OPCODE_NOP;
- dev_dbg(host1x->dev, "%s: NOP at %#llx\n", __func__,
- (u64)pb->phys + getptr);
+ dev_dbg(host1x->dev, "%s: NOP at %pad+%#x\n", __func__,
+ &pb->phys, getptr);
getptr = (getptr + 8) & (pb->size_bytes - 1);
}
wmb();
diff --git a/drivers/gpu/host1x/hw/channel_hw.c b/drivers/gpu/host1x/hw/channel_hw.c
index 4608257ab65..946c332c390 100644
--- a/drivers/gpu/host1x/hw/channel_hw.c
+++ b/drivers/gpu/host1x/hw/channel_hw.c
@@ -32,6 +32,7 @@
static void trace_write_gather(struct host1x_cdma *cdma, struct host1x_bo *bo,
u32 offset, u32 words)
{
+ struct device *dev = cdma_to_channel(cdma)->dev;
void *mem = NULL;
if (host1x_debug_trace_cmdbuf)
@@ -44,11 +45,14 @@ static void trace_write_gather(struct host1x_cdma *cdma, struct host1x_bo *bo,
* of how much you can output to ftrace at once.
*/
for (i = 0; i < words; i += TRACE_MAX_LENGTH) {
- trace_host1x_cdma_push_gather(
- dev_name(cdma_to_channel(cdma)->dev),
- (u32)bo, min(words - i, TRACE_MAX_LENGTH),
- offset + i * sizeof(u32), mem);
+ u32 num_words = min(words - i, TRACE_MAX_LENGTH);
+ offset += i * sizeof(u32);
+
+ trace_host1x_cdma_push_gather(dev_name(dev), bo,
+ num_words, offset,
+ mem);
}
+
host1x_bo_munmap(bo, mem);
}
}
diff --git a/drivers/gpu/host1x/hw/debug_hw.c b/drivers/gpu/host1x/hw/debug_hw.c
index f72c873eff8..791de9351ee 100644
--- a/drivers/gpu/host1x/hw/debug_hw.c
+++ b/drivers/gpu/host1x/hw/debug_hw.c
@@ -163,8 +163,8 @@ static void show_channel_gathers(struct output *o, struct host1x_cdma *cdma)
continue;
}
- host1x_debug_output(o, " GATHER at %#llx+%04x, %d words\n",
- (u64)g->base, g->offset, g->words);
+ host1x_debug_output(o, " GATHER at %pad+%#x, %d words\n",
+ &g->base, g->offset, g->words);
show_gather(o, g->base + g->offset, g->words, cdma,
g->base, mapped);
diff --git a/drivers/gpu/host1x/job.h b/drivers/gpu/host1x/job.h
index 33a697d6dce..8b3c15df066 100644
--- a/drivers/gpu/host1x/job.h
+++ b/drivers/gpu/host1x/job.h
@@ -23,7 +23,7 @@ struct host1x_job_gather {
u32 words;
dma_addr_t base;
struct host1x_bo *bo;
- int offset;
+ u32 offset;
bool handled;
};
diff --git a/drivers/gpu/host1x/mipi.c b/drivers/gpu/host1x/mipi.c
index 9882ea12202..fbc6ee6ca33 100644
--- a/drivers/gpu/host1x/mipi.c
+++ b/drivers/gpu/host1x/mipi.c
@@ -49,35 +49,47 @@
#define MIPI_CAL_CONFIG_DSIC 0x10
#define MIPI_CAL_CONFIG_DSID 0x11
+#define MIPI_CAL_CONFIG_DSIAB_CLK 0x19
+#define MIPI_CAL_CONFIG_DSICD_CLK 0x1a
+#define MIPI_CAL_CONFIG_CSIAB_CLK 0x1b
+#define MIPI_CAL_CONFIG_CSICD_CLK 0x1c
+#define MIPI_CAL_CONFIG_CSIE_CLK 0x1d
+
+/* for data and clock lanes */
#define MIPI_CAL_CONFIG_SELECT (1 << 21)
+
+/* for data lanes */
#define MIPI_CAL_CONFIG_HSPDOS(x) (((x) & 0x1f) << 16)
#define MIPI_CAL_CONFIG_HSPUOS(x) (((x) & 0x1f) << 8)
#define MIPI_CAL_CONFIG_TERMOS(x) (((x) & 0x1f) << 0)
+/* for clock lanes */
+#define MIPI_CAL_CONFIG_HSCLKPDOSD(x) (((x) & 0x1f) << 8)
+#define MIPI_CAL_CONFIG_HSCLKPUOSD(x) (((x) & 0x1f) << 0)
+
#define MIPI_CAL_BIAS_PAD_CFG0 0x16
#define MIPI_CAL_BIAS_PAD_PDVCLAMP (1 << 1)
#define MIPI_CAL_BIAS_PAD_E_VCLAMP_REF (1 << 0)
#define MIPI_CAL_BIAS_PAD_CFG1 0x17
+#define MIPI_CAL_BIAS_PAD_DRV_DN_REF(x) (((x) & 0x7) << 16)
#define MIPI_CAL_BIAS_PAD_CFG2 0x18
#define MIPI_CAL_BIAS_PAD_PDVREG (1 << 1)
-static const struct module {
- unsigned long reg;
-} modules[] = {
- { .reg = MIPI_CAL_CONFIG_CSIA },
- { .reg = MIPI_CAL_CONFIG_CSIB },
- { .reg = MIPI_CAL_CONFIG_CSIC },
- { .reg = MIPI_CAL_CONFIG_CSID },
- { .reg = MIPI_CAL_CONFIG_CSIE },
- { .reg = MIPI_CAL_CONFIG_DSIA },
- { .reg = MIPI_CAL_CONFIG_DSIB },
- { .reg = MIPI_CAL_CONFIG_DSIC },
- { .reg = MIPI_CAL_CONFIG_DSID },
+struct tegra_mipi_pad {
+ unsigned long data;
+ unsigned long clk;
+};
+
+struct tegra_mipi_soc {
+ bool has_clk_lane;
+ const struct tegra_mipi_pad *pads;
+ unsigned int num_pads;
};
struct tegra_mipi {
+ const struct tegra_mipi_soc *soc;
void __iomem *regs;
struct mutex lock;
struct clk *clk;
@@ -90,16 +102,16 @@ struct tegra_mipi_device {
unsigned long pads;
};
-static inline unsigned long tegra_mipi_readl(struct tegra_mipi *mipi,
- unsigned long reg)
+static inline u32 tegra_mipi_readl(struct tegra_mipi *mipi,
+ unsigned long offset)
{
- return readl(mipi->regs + (reg << 2));
+ return readl(mipi->regs + (offset << 2));
}
-static inline void tegra_mipi_writel(struct tegra_mipi *mipi,
- unsigned long value, unsigned long reg)
+static inline void tegra_mipi_writel(struct tegra_mipi *mipi, u32 value,
+ unsigned long offset)
{
- writel(value, mipi->regs + (reg << 2));
+ writel(value, mipi->regs + (offset << 2));
}
struct tegra_mipi_device *tegra_mipi_request(struct device *device)
@@ -117,36 +129,35 @@ struct tegra_mipi_device *tegra_mipi_request(struct device *device)
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev) {
- of_node_put(args.np);
err = -ENOMEM;
goto out;
}
dev->pdev = of_find_device_by_node(args.np);
if (!dev->pdev) {
- of_node_put(args.np);
err = -ENODEV;
goto free;
}
- of_node_put(args.np);
-
dev->mipi = platform_get_drvdata(dev->pdev);
if (!dev->mipi) {
err = -EPROBE_DEFER;
- goto pdev_put;
+ goto put;
}
+ of_node_put(args.np);
+
dev->pads = args.args[0];
dev->device = device;
return dev;
-pdev_put:
+put:
platform_device_put(dev->pdev);
free:
kfree(dev);
out:
+ of_node_put(args.np);
return ERR_PTR(err);
}
EXPORT_SYMBOL(tegra_mipi_request);
@@ -161,7 +172,7 @@ EXPORT_SYMBOL(tegra_mipi_free);
static int tegra_mipi_wait(struct tegra_mipi *mipi)
{
unsigned long timeout = jiffies + msecs_to_jiffies(250);
- unsigned long value;
+ u32 value;
while (time_before(jiffies, timeout)) {
value = tegra_mipi_readl(mipi, MIPI_CAL_STATUS);
@@ -177,8 +188,9 @@ static int tegra_mipi_wait(struct tegra_mipi *mipi)
int tegra_mipi_calibrate(struct tegra_mipi_device *device)
{
- unsigned long value;
+ const struct tegra_mipi_soc *soc = device->mipi->soc;
unsigned int i;
+ u32 value;
int err;
err = clk_enable(device->mipi->clk);
@@ -192,23 +204,35 @@ int tegra_mipi_calibrate(struct tegra_mipi_device *device)
value |= MIPI_CAL_BIAS_PAD_E_VCLAMP_REF;
tegra_mipi_writel(device->mipi, value, MIPI_CAL_BIAS_PAD_CFG0);
+ tegra_mipi_writel(device->mipi, MIPI_CAL_BIAS_PAD_DRV_DN_REF(2),
+ MIPI_CAL_BIAS_PAD_CFG1);
+
value = tegra_mipi_readl(device->mipi, MIPI_CAL_BIAS_PAD_CFG2);
value &= ~MIPI_CAL_BIAS_PAD_PDVREG;
tegra_mipi_writel(device->mipi, value, MIPI_CAL_BIAS_PAD_CFG2);
- for (i = 0; i < ARRAY_SIZE(modules); i++) {
- if (device->pads & BIT(i))
- value = MIPI_CAL_CONFIG_SELECT |
- MIPI_CAL_CONFIG_HSPDOS(0) |
- MIPI_CAL_CONFIG_HSPUOS(4) |
- MIPI_CAL_CONFIG_TERMOS(5);
- else
- value = 0;
+ for (i = 0; i < soc->num_pads; i++) {
+ u32 clk = 0, data = 0;
+
+ if (device->pads & BIT(i)) {
+ data = MIPI_CAL_CONFIG_SELECT |
+ MIPI_CAL_CONFIG_HSPDOS(0) |
+ MIPI_CAL_CONFIG_HSPUOS(4) |
+ MIPI_CAL_CONFIG_TERMOS(5);
+ clk = MIPI_CAL_CONFIG_SELECT |
+ MIPI_CAL_CONFIG_HSCLKPDOSD(0) |
+ MIPI_CAL_CONFIG_HSCLKPUOSD(4);
+ }
- tegra_mipi_writel(device->mipi, value, modules[i].reg);
+ tegra_mipi_writel(device->mipi, data, soc->pads[i].data);
+
+ if (soc->has_clk_lane)
+ tegra_mipi_writel(device->mipi, clk, soc->pads[i].clk);
}
- tegra_mipi_writel(device->mipi, MIPI_CAL_CTRL_START, MIPI_CAL_CTRL);
+ value = tegra_mipi_readl(device->mipi, MIPI_CAL_CTRL);
+ value |= MIPI_CAL_CTRL_START;
+ tegra_mipi_writel(device->mipi, value, MIPI_CAL_CTRL);
err = tegra_mipi_wait(device->mipi);
@@ -219,16 +243,63 @@ int tegra_mipi_calibrate(struct tegra_mipi_device *device)
}
EXPORT_SYMBOL(tegra_mipi_calibrate);
+static const struct tegra_mipi_pad tegra114_mipi_pads[] = {
+ { .data = MIPI_CAL_CONFIG_CSIA },
+ { .data = MIPI_CAL_CONFIG_CSIB },
+ { .data = MIPI_CAL_CONFIG_CSIC },
+ { .data = MIPI_CAL_CONFIG_CSID },
+ { .data = MIPI_CAL_CONFIG_CSIE },
+ { .data = MIPI_CAL_CONFIG_DSIA },
+ { .data = MIPI_CAL_CONFIG_DSIB },
+ { .data = MIPI_CAL_CONFIG_DSIC },
+ { .data = MIPI_CAL_CONFIG_DSID },
+};
+
+static const struct tegra_mipi_soc tegra114_mipi_soc = {
+ .has_clk_lane = false,
+ .pads = tegra114_mipi_pads,
+ .num_pads = ARRAY_SIZE(tegra114_mipi_pads),
+};
+
+static const struct tegra_mipi_pad tegra124_mipi_pads[] = {
+ { .data = MIPI_CAL_CONFIG_CSIA, .clk = MIPI_CAL_CONFIG_CSIAB_CLK },
+ { .data = MIPI_CAL_CONFIG_CSIB, .clk = MIPI_CAL_CONFIG_CSIAB_CLK },
+ { .data = MIPI_CAL_CONFIG_CSIC, .clk = MIPI_CAL_CONFIG_CSICD_CLK },
+ { .data = MIPI_CAL_CONFIG_CSID, .clk = MIPI_CAL_CONFIG_CSICD_CLK },
+ { .data = MIPI_CAL_CONFIG_CSIE, .clk = MIPI_CAL_CONFIG_CSIE_CLK },
+ { .data = MIPI_CAL_CONFIG_DSIA, .clk = MIPI_CAL_CONFIG_DSIAB_CLK },
+ { .data = MIPI_CAL_CONFIG_DSIB, .clk = MIPI_CAL_CONFIG_DSIAB_CLK },
+};
+
+static const struct tegra_mipi_soc tegra124_mipi_soc = {
+ .has_clk_lane = true,
+ .pads = tegra124_mipi_pads,
+ .num_pads = ARRAY_SIZE(tegra124_mipi_pads),
+};
+
+static struct of_device_id tegra_mipi_of_match[] = {
+ { .compatible = "nvidia,tegra114-mipi", .data = &tegra114_mipi_soc },
+ { .compatible = "nvidia,tegra124-mipi", .data = &tegra124_mipi_soc },
+ { },
+};
+
static int tegra_mipi_probe(struct platform_device *pdev)
{
+ const struct of_device_id *match;
struct tegra_mipi *mipi;
struct resource *res;
int err;
+ match = of_match_node(tegra_mipi_of_match, pdev->dev.of_node);
+ if (!match)
+ return -ENODEV;
+
mipi = devm_kzalloc(&pdev->dev, sizeof(*mipi), GFP_KERNEL);
if (!mipi)
return -ENOMEM;
+ mipi->soc = match->data;
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
mipi->regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(mipi->regs))
@@ -260,11 +331,6 @@ static int tegra_mipi_remove(struct platform_device *pdev)
return 0;
}
-static struct of_device_id tegra_mipi_of_match[] = {
- { .compatible = "nvidia,tegra114-mipi", },
- { },
-};
-
struct platform_driver tegra_mipi_driver = {
.driver = {
.name = "tegra-mipi",
diff --git a/drivers/hsi/clients/nokia-modem.c b/drivers/hsi/clients/nokia-modem.c
index 363b780dace..f0c21458962 100644
--- a/drivers/hsi/clients/nokia-modem.c
+++ b/drivers/hsi/clients/nokia-modem.c
@@ -29,7 +29,7 @@
#include <linux/of_gpio.h>
#include <linux/hsi/ssi_protocol.h>
-static unsigned int pm;
+static unsigned int pm = 1;
module_param(pm, int, 0400);
MODULE_PARM_DESC(pm,
"Enable power management (0=disabled, 1=userland based [default])");
@@ -164,9 +164,9 @@ static int nokia_modem_probe(struct device *dev)
dev_set_drvdata(dev, modem);
irq = irq_of_parse_and_map(np, 0);
- if (irq < 0) {
+ if (!irq) {
dev_err(dev, "Invalid rst_ind interrupt (%d)\n", irq);
- return irq;
+ return -EINVAL;
}
modem->nokia_modem_rst_ind_irq = irq;
pflags = irq_get_trigger_type(irq);
@@ -174,7 +174,7 @@ static int nokia_modem_probe(struct device *dev)
tasklet_init(&modem->nokia_modem_rst_ind_tasklet,
do_nokia_modem_rst_ind_tasklet, (unsigned long)modem);
err = devm_request_irq(dev, irq, nokia_modem_rst_ind_isr,
- IRQF_DISABLED | pflags, "modem_rst_ind", modem);
+ pflags, "modem_rst_ind", modem);
if (err < 0) {
dev_err(dev, "Request rst_ind irq(%d) failed (flags %d)\n",
irq, pflags);
diff --git a/drivers/hsi/controllers/omap_ssi_port.c b/drivers/hsi/controllers/omap_ssi_port.c
index 1314ab80164..1f8652b3de0 100644
--- a/drivers/hsi/controllers/omap_ssi_port.c
+++ b/drivers/hsi/controllers/omap_ssi_port.c
@@ -1118,8 +1118,7 @@ static int __init ssi_port_probe(struct platform_device *pd)
dev_dbg(&pd->dev, "init ssi port...\n");
if (!try_module_get(ssi->owner)) {
- dev_err(&pd->dev, "could not increment parent module refcount (err=%d)\n",
- err);
+ dev_err(&pd->dev, "could not increment parent module refcount\n");
return -ENODEV;
}
diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c
index 6753fd940c7..fe41d5ae7cb 100644
--- a/drivers/hwmon/lm75.c
+++ b/drivers/hwmon/lm75.c
@@ -177,6 +177,10 @@ static struct attribute *lm75_attrs[] = {
};
ATTRIBUTE_GROUPS(lm75);
+static const struct thermal_zone_of_device_ops lm75_of_thermal_ops = {
+ .get_temp = lm75_read_temp,
+};
+
/*-----------------------------------------------------------------------*/
/* device probe and removal */
@@ -296,10 +300,9 @@ lm75_probe(struct i2c_client *client, const struct i2c_device_id *id)
if (IS_ERR(data->hwmon_dev))
return PTR_ERR(data->hwmon_dev);
- data->tz = thermal_zone_of_sensor_register(data->hwmon_dev,
- 0,
+ data->tz = thermal_zone_of_sensor_register(data->hwmon_dev, 0,
data->hwmon_dev,
- lm75_read_temp, NULL);
+ &lm75_of_thermal_ops);
if (IS_ERR(data->tz))
data->tz = NULL;
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
index fd9a945fe8d..112e4d45e4a 100644
--- a/drivers/hwmon/ntc_thermistor.c
+++ b/drivers/hwmon/ntc_thermistor.c
@@ -486,6 +486,10 @@ static const struct attribute_group ntc_attr_group = {
.attrs = ntc_attributes,
};
+static const struct thermal_zone_of_device_ops ntc_of_thermal_ops = {
+ .get_temp = ntc_read_temp,
+};
+
static int ntc_thermistor_probe(struct platform_device *pdev)
{
const struct of_device_id *of_id =
@@ -579,7 +583,7 @@ static int ntc_thermistor_probe(struct platform_device *pdev)
pdev_id->name);
data->tz = thermal_zone_of_sensor_register(data->dev, 0, data->dev,
- ntc_read_temp, NULL);
+ &ntc_of_thermal_ops);
if (IS_ERR(data->tz)) {
dev_dbg(&pdev->dev, "Failed to register to thermal fw.\n");
data->tz = NULL;
diff --git a/drivers/hwmon/tmp102.c b/drivers/hwmon/tmp102.c
index 51719956cc0..ba9f478f64e 100644
--- a/drivers/hwmon/tmp102.c
+++ b/drivers/hwmon/tmp102.c
@@ -158,6 +158,10 @@ ATTRIBUTE_GROUPS(tmp102);
#define TMP102_CONFIG (TMP102_CONF_TM | TMP102_CONF_EM | TMP102_CONF_CR1)
#define TMP102_CONFIG_RD_ONLY (TMP102_CONF_R0 | TMP102_CONF_R1 | TMP102_CONF_AL)
+static const struct thermal_zone_of_device_ops tmp102_of_thermal_ops = {
+ .get_temp = tmp102_read_temp,
+};
+
static int tmp102_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -215,7 +219,7 @@ static int tmp102_probe(struct i2c_client *client,
}
tmp102->hwmon_dev = hwmon_dev;
tmp102->tz = thermal_zone_of_sensor_register(hwmon_dev, 0, hwmon_dev,
- tmp102_read_temp, NULL);
+ &tmp102_of_thermal_ops);
if (IS_ERR(tmp102->tz))
tmp102->tz = NULL;
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index c1351d9fb35..91a488c7cc4 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -1072,4 +1072,15 @@ config SCx200_ACB
This support is also available as a module. If so, the module
will be called scx200_acb.
+config I2C_OPAL
+ tristate "IBM OPAL I2C driver"
+ depends on PPC_POWERNV
+ default y
+ help
+ This exposes the PowerNV platform i2c busses to the linux i2c layer,
+ the driver is based on the OPAL interfaces.
+
+ This driver can also be built as a module. If so, the module will be
+ called as i2c-opal.
+
endmenu
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 5e6c8223719..56388f658d2 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -102,6 +102,7 @@ obj-$(CONFIG_I2C_ACORN) += i2c-acorn.o
obj-$(CONFIG_I2C_BCM_KONA) += i2c-bcm-kona.o
obj-$(CONFIG_I2C_CROS_EC_TUNNEL) += i2c-cros-ec-tunnel.o
obj-$(CONFIG_I2C_ELEKTOR) += i2c-elektor.o
+obj-$(CONFIG_I2C_OPAL) += i2c-opal.o
obj-$(CONFIG_I2C_PCA_ISA) += i2c-pca-isa.o
obj-$(CONFIG_I2C_SIBYTE) += i2c-sibyte.o
obj-$(CONFIG_SCx200_ACB) += scx200_acb.o
diff --git a/drivers/i2c/busses/i2c-opal.c b/drivers/i2c/busses/i2c-opal.c
new file mode 100644
index 00000000000..16f90b1a750
--- /dev/null
+++ b/drivers/i2c/busses/i2c-opal.c
@@ -0,0 +1,294 @@
+/*
+ * IBM OPAL I2C driver
+ * Copyright (C) 2014 IBM
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.
+ */
+
+#include <linux/device.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <asm/firmware.h>
+#include <asm/opal.h>
+
+static int i2c_opal_translate_error(int rc)
+{
+ switch (rc) {
+ case OPAL_NO_MEM:
+ return -ENOMEM;
+ case OPAL_PARAMETER:
+ return -EINVAL;
+ case OPAL_I2C_ARBT_LOST:
+ return -EAGAIN;
+ case OPAL_I2C_TIMEOUT:
+ return -ETIMEDOUT;
+ case OPAL_I2C_NACK_RCVD:
+ return -ENXIO;
+ case OPAL_I2C_STOP_ERR:
+ return -EBUSY;
+ default:
+ return -EIO;
+ }
+}
+
+static int i2c_opal_send_request(u32 bus_id, struct opal_i2c_request *req)
+{
+ struct opal_msg msg;
+ int token, rc;
+
+ token = opal_async_get_token_interruptible();
+ if (token < 0) {
+ if (token != -ERESTARTSYS)
+ pr_err("Failed to get the async token\n");
+
+ return token;
+ }
+
+ rc = opal_i2c_request(token, bus_id, req);
+ if (rc != OPAL_ASYNC_COMPLETION) {
+ rc = i2c_opal_translate_error(rc);
+ goto exit;
+ }
+
+ rc = opal_async_wait_response(token, &msg);
+ if (rc)
+ goto exit;
+
+ rc = be64_to_cpu(msg.params[1]);
+ if (rc != OPAL_SUCCESS) {
+ rc = i2c_opal_translate_error(rc);
+ goto exit;
+ }
+
+exit:
+ opal_async_release_token(token);
+ return rc;
+}
+
+static int i2c_opal_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
+ int num)
+{
+ unsigned long opal_id = (unsigned long)adap->algo_data;
+ struct opal_i2c_request req;
+ int rc, i;
+
+ /* We only support fairly simple combinations here of one
+ * or two messages
+ */
+ memset(&req, 0, sizeof(req));
+ switch(num) {
+ case 0:
+ return 0;
+ case 1:
+ req.type = (msgs[0].flags & I2C_M_RD) ?
+ OPAL_I2C_RAW_READ : OPAL_I2C_RAW_WRITE;
+ req.addr = cpu_to_be16(msgs[0].addr);
+ req.size = cpu_to_be32(msgs[0].len);
+ req.buffer_ra = cpu_to_be64(__pa(msgs[0].buf));
+ break;
+ case 2:
+ /* For two messages, we basically support only simple
+ * smbus transactions of a write plus a read. We might
+ * want to allow also two writes but we'd have to bounce
+ * the data into a single buffer.
+ */
+ if ((msgs[0].flags & I2C_M_RD) || !(msgs[1].flags & I2C_M_RD))
+ return -EOPNOTSUPP;
+ if (msgs[0].len > 4)
+ return -EOPNOTSUPP;
+ if (msgs[0].addr != msgs[1].addr)
+ return -EOPNOTSUPP;
+ req.type = OPAL_I2C_SM_READ;
+ req.addr = cpu_to_be16(msgs[0].addr);
+ req.subaddr_sz = msgs[0].len;
+ for (i = 0; i < msgs[0].len; i++)
+ req.subaddr = (req.subaddr << 8) | msgs[0].buf[i];
+ req.subaddr = cpu_to_be32(req.subaddr);
+ req.size = cpu_to_be32(msgs[1].len);
+ req.buffer_ra = cpu_to_be64(__pa(msgs[1].buf));
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ rc = i2c_opal_send_request(opal_id, &req);
+ if (rc)
+ return rc;
+
+ return num;
+}
+
+static int i2c_opal_smbus_xfer(struct i2c_adapter *adap, u16 addr,
+ unsigned short flags, char read_write,
+ u8 command, int size, union i2c_smbus_data *data)
+{
+ unsigned long opal_id = (unsigned long)adap->algo_data;
+ struct opal_i2c_request req;
+ u8 local[2];
+ int rc;
+
+ memset(&req, 0, sizeof(req));
+
+ req.addr = cpu_to_be16(addr);
+ switch (size) {
+ case I2C_SMBUS_BYTE:
+ req.buffer_ra = cpu_to_be64(__pa(&data->byte));
+ req.size = cpu_to_be32(1);
+ /* Fall through */
+ case I2C_SMBUS_QUICK:
+ req.type = (read_write == I2C_SMBUS_READ) ?
+ OPAL_I2C_RAW_READ : OPAL_I2C_RAW_WRITE;
+ break;
+ case I2C_SMBUS_BYTE_DATA:
+ req.buffer_ra = cpu_to_be64(__pa(&data->byte));
+ req.size = cpu_to_be32(1);
+ req.subaddr = cpu_to_be32(command);
+ req.subaddr_sz = 1;
+ req.type = (read_write == I2C_SMBUS_READ) ?
+ OPAL_I2C_SM_READ : OPAL_I2C_SM_WRITE;
+ break;
+ case I2C_SMBUS_WORD_DATA:
+ if (!read_write) {
+ local[0] = data->word & 0xff;
+ local[1] = (data->word >> 8) & 0xff;
+ }
+ req.buffer_ra = cpu_to_be64(__pa(local));
+ req.size = cpu_to_be32(2);
+ req.subaddr = cpu_to_be32(command);
+ req.subaddr_sz = 1;
+ req.type = (read_write == I2C_SMBUS_READ) ?
+ OPAL_I2C_SM_READ : OPAL_I2C_SM_WRITE;
+ break;
+ case I2C_SMBUS_I2C_BLOCK_DATA:
+ req.buffer_ra = cpu_to_be64(__pa(&data->block[1]));
+ req.size = cpu_to_be32(data->block[0]);
+ req.subaddr = cpu_to_be32(command);
+ req.subaddr_sz = 1;
+ req.type = (read_write == I2C_SMBUS_READ) ?
+ OPAL_I2C_SM_READ : OPAL_I2C_SM_WRITE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ rc = i2c_opal_send_request(opal_id, &req);
+ if (!rc && read_write && size == I2C_SMBUS_WORD_DATA) {
+ data->word = ((u16)local[1]) << 8;
+ data->word |= local[0];
+ }
+
+ return rc;
+}
+
+static u32 i2c_opal_func(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE |
+ I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA |
+ I2C_FUNC_SMBUS_I2C_BLOCK;
+}
+
+static const struct i2c_algorithm i2c_opal_algo = {
+ .master_xfer = i2c_opal_master_xfer,
+ .smbus_xfer = i2c_opal_smbus_xfer,
+ .functionality = i2c_opal_func,
+};
+
+static int i2c_opal_probe(struct platform_device *pdev)
+{
+ struct i2c_adapter *adapter;
+ const char *pname;
+ u32 opal_id;
+ int rc;
+
+ if (!pdev->dev.of_node)
+ return -ENODEV;
+
+ rc = of_property_read_u32(pdev->dev.of_node, "ibm,opal-id", &opal_id);
+ if (rc) {
+ dev_err(&pdev->dev, "Missing ibm,opal-id property !\n");
+ return -EIO;
+ }
+
+ adapter = devm_kzalloc(&pdev->dev, sizeof(*adapter), GFP_KERNEL);
+ if (!adapter)
+ return -ENOMEM;
+
+ adapter->algo = &i2c_opal_algo;
+ adapter->algo_data = (void *)(unsigned long)opal_id;
+ adapter->dev.parent = &pdev->dev;
+ adapter->dev.of_node = of_node_get(pdev->dev.of_node);
+ pname = of_get_property(pdev->dev.of_node, "ibm,port-name", NULL);
+ if (pname)
+ strlcpy(adapter->name, pname, sizeof(adapter->name));
+ else
+ strlcpy(adapter->name, "opal", sizeof(adapter->name));
+
+ platform_set_drvdata(pdev, adapter);
+ rc = i2c_add_adapter(adapter);
+ if (rc)
+ dev_err(&pdev->dev, "Failed to register the i2c adapter\n");
+
+ return rc;
+}
+
+static int i2c_opal_remove(struct platform_device *pdev)
+{
+ struct i2c_adapter *adapter = platform_get_drvdata(pdev);
+
+ i2c_del_adapter(adapter);
+
+ return 0;
+}
+
+static const struct of_device_id i2c_opal_of_match[] = {
+ {
+ .compatible = "ibm,opal-i2c",
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, i2c_opal_of_match);
+
+static struct platform_driver i2c_opal_driver = {
+ .probe = i2c_opal_probe,
+ .remove = i2c_opal_remove,
+ .driver = {
+ .name = "i2c-opal",
+ .of_match_table = i2c_opal_of_match,
+ },
+};
+
+static int __init i2c_opal_init(void)
+{
+ if (!firmware_has_feature(FW_FEATURE_OPAL))
+ return -ENODEV;
+
+ return platform_driver_register(&i2c_opal_driver);
+}
+module_init(i2c_opal_init);
+
+static void __exit i2c_opal_exit(void)
+{
+ return platform_driver_unregister(&i2c_opal_driver);
+}
+module_exit(i2c_opal_exit);
+
+MODULE_AUTHOR("Neelesh Gupta <neelegup@linux.vnet.ibm.com>");
+MODULE_DESCRIPTION("IBM OPAL I2C driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/accel/st_accel.h b/drivers/iio/accel/st_accel.h
index c3877630b2e..fa964603430 100644
--- a/drivers/iio/accel/st_accel.h
+++ b/drivers/iio/accel/st_accel.h
@@ -33,8 +33,7 @@ static const struct st_sensors_platform_data default_accel_pdata = {
.drdy_int_pin = 1,
};
-int st_accel_common_probe(struct iio_dev *indio_dev,
- struct st_sensors_platform_data *pdata);
+int st_accel_common_probe(struct iio_dev *indio_dev);
void st_accel_common_remove(struct iio_dev *indio_dev);
#ifdef CONFIG_IIO_BUFFER
diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c
index 087864854c6..53f32629283 100644
--- a/drivers/iio/accel/st_accel_core.c
+++ b/drivers/iio/accel/st_accel_core.c
@@ -161,7 +161,7 @@ static const struct iio_chan_spec st_accel_16bit_channels[] = {
IIO_CHAN_SOFT_TIMESTAMP(3)
};
-static const struct st_sensors st_accel_sensors[] = {
+static const struct st_sensor_settings st_accel_sensors_settings[] = {
{
.wai = ST_ACCEL_1_WAI_EXP,
.sensors_supported = {
@@ -457,8 +457,7 @@ static const struct iio_trigger_ops st_accel_trigger_ops = {
#define ST_ACCEL_TRIGGER_OPS NULL
#endif
-int st_accel_common_probe(struct iio_dev *indio_dev,
- struct st_sensors_platform_data *plat_data)
+int st_accel_common_probe(struct iio_dev *indio_dev)
{
struct st_sensor_data *adata = iio_priv(indio_dev);
int irq = adata->get_irq_data_ready(indio_dev);
@@ -470,24 +469,25 @@ int st_accel_common_probe(struct iio_dev *indio_dev,
st_sensors_power_enable(indio_dev);
err = st_sensors_check_device_support(indio_dev,
- ARRAY_SIZE(st_accel_sensors), st_accel_sensors);
+ ARRAY_SIZE(st_accel_sensors_settings),
+ st_accel_sensors_settings);
if (err < 0)
return err;
adata->num_data_channels = ST_ACCEL_NUMBER_DATA_CHANNELS;
- adata->multiread_bit = adata->sensor->multi_read_bit;
- indio_dev->channels = adata->sensor->ch;
+ adata->multiread_bit = adata->sensor_settings->multi_read_bit;
+ indio_dev->channels = adata->sensor_settings->ch;
indio_dev->num_channels = ST_SENSORS_NUMBER_ALL_CHANNELS;
adata->current_fullscale = (struct st_sensor_fullscale_avl *)
- &adata->sensor->fs.fs_avl[0];
- adata->odr = adata->sensor->odr.odr_avl[0].hz;
+ &adata->sensor_settings->fs.fs_avl[0];
+ adata->odr = adata->sensor_settings->odr.odr_avl[0].hz;
- if (!plat_data)
- plat_data =
+ if (!adata->dev->platform_data)
+ adata->dev->platform_data =
(struct st_sensors_platform_data *)&default_accel_pdata;
- err = st_sensors_init_sensor(indio_dev, plat_data);
+ err = st_sensors_init_sensor(indio_dev, adata->dev->platform_data);
if (err < 0)
return err;
diff --git a/drivers/iio/accel/st_accel_i2c.c b/drivers/iio/accel/st_accel_i2c.c
index 7164aeff3ab..c7246bdd30b 100644
--- a/drivers/iio/accel/st_accel_i2c.c
+++ b/drivers/iio/accel/st_accel_i2c.c
@@ -79,12 +79,11 @@ static int st_accel_i2c_probe(struct i2c_client *client,
return -ENOMEM;
adata = iio_priv(indio_dev);
- adata->dev = &client->dev;
st_sensors_of_i2c_probe(client, st_accel_of_match);
st_sensors_i2c_configure(indio_dev, client, adata);
- err = st_accel_common_probe(indio_dev, client->dev.platform_data);
+ err = st_accel_common_probe(indio_dev);
if (err < 0)
return err;
diff --git a/drivers/iio/accel/st_accel_spi.c b/drivers/iio/accel/st_accel_spi.c
index 195639646e3..12ec29389e4 100644
--- a/drivers/iio/accel/st_accel_spi.c
+++ b/drivers/iio/accel/st_accel_spi.c
@@ -29,11 +29,10 @@ static int st_accel_spi_probe(struct spi_device *spi)
return -ENOMEM;
adata = iio_priv(indio_dev);
- adata->dev = &spi->dev;
st_sensors_spi_configure(indio_dev, spi, adata);
- err = st_accel_common_probe(indio_dev, spi->dev.platform_data);
+ err = st_accel_common_probe(indio_dev);
if (err < 0)
return err;
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index bc4e787096e..0f79e472576 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -214,6 +214,20 @@ config NAU7802
To compile this driver as a module, choose M here: the
module will be called nau7802.
+config QCOM_SPMI_IADC
+ tristate "Qualcomm SPMI PMIC current ADC"
+ depends on SPMI
+ select REGMAP_SPMI
+ help
+ This is the IIO Current ADC driver for Qualcomm QPNP IADC Chip.
+
+ The driver supports single mode operation to read from one of two
+ channels (external or internal). Hardware have additional
+ channels internally used for gain and offset calibration.
+
+ To compile this driver as a module, choose M here: the module will
+ be called qcom-spmi-iadc.
+
config ROCKCHIP_SARADC
tristate "Rockchip SARADC driver"
depends on ARCH_ROCKCHIP || (ARM && COMPILE_TEST)
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
index f30093f5b67..701fdb7c96a 100644
--- a/drivers/iio/adc/Makefile
+++ b/drivers/iio/adc/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_MCP320X) += mcp320x.o
obj-$(CONFIG_MCP3422) += mcp3422.o
obj-$(CONFIG_MEN_Z188_ADC) += men_z188_adc.o
obj-$(CONFIG_NAU7802) += nau7802.o
+obj-$(CONFIG_QCOM_SPMI_IADC) += qcom-spmi-iadc.o
obj-$(CONFIG_ROCKCHIP_SARADC) += rockchip_saradc.o
obj-$(CONFIG_TI_ADC081C) += ti-adc081c.o
obj-$(CONFIG_TI_ADC128S052) += ti-adc128s052.o
diff --git a/drivers/iio/adc/exynos_adc.c b/drivers/iio/adc/exynos_adc.c
index 43620fd4c66..3a2dbb3b492 100644
--- a/drivers/iio/adc/exynos_adc.c
+++ b/drivers/iio/adc/exynos_adc.c
@@ -39,6 +39,8 @@
#include <linux/iio/iio.h>
#include <linux/iio/machine.h>
#include <linux/iio/driver.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
/* S3C/EXYNOS4412/5250 ADC_V1 registers definitions */
#define ADC_V1_CON(x) ((x) + 0x00)
@@ -90,11 +92,14 @@
#define EXYNOS_ADC_TIMEOUT (msecs_to_jiffies(100))
+#define EXYNOS_ADCV1_PHY_OFFSET 0x0718
+#define EXYNOS_ADCV2_PHY_OFFSET 0x0720
+
struct exynos_adc {
struct exynos_adc_data *data;
struct device *dev;
void __iomem *regs;
- void __iomem *enable_reg;
+ struct regmap *pmu_map;
struct clk *clk;
struct clk *sclk;
unsigned int irq;
@@ -110,6 +115,7 @@ struct exynos_adc_data {
int num_channels;
bool needs_sclk;
bool needs_adc_phy;
+ int phy_offset;
u32 mask;
void (*init_hw)(struct exynos_adc *info);
@@ -183,7 +189,7 @@ static void exynos_adc_v1_init_hw(struct exynos_adc *info)
u32 con1;
if (info->data->needs_adc_phy)
- writel(1, info->enable_reg);
+ regmap_write(info->pmu_map, info->data->phy_offset, 1);
/* set default prescaler values and Enable prescaler */
con1 = ADC_V1_CON_PRSCLV(49) | ADC_V1_CON_PRSCEN;
@@ -198,7 +204,7 @@ static void exynos_adc_v1_exit_hw(struct exynos_adc *info)
u32 con;
if (info->data->needs_adc_phy)
- writel(0, info->enable_reg);
+ regmap_write(info->pmu_map, info->data->phy_offset, 0);
con = readl(ADC_V1_CON(info->regs));
con |= ADC_V1_CON_STANDBY;
@@ -225,6 +231,7 @@ static const struct exynos_adc_data exynos_adc_v1_data = {
.num_channels = MAX_ADC_V1_CHANNELS,
.mask = ADC_DATX_MASK, /* 12 bit ADC resolution */
.needs_adc_phy = true,
+ .phy_offset = EXYNOS_ADCV1_PHY_OFFSET,
.init_hw = exynos_adc_v1_init_hw,
.exit_hw = exynos_adc_v1_exit_hw,
@@ -314,7 +321,7 @@ static void exynos_adc_v2_init_hw(struct exynos_adc *info)
u32 con1, con2;
if (info->data->needs_adc_phy)
- writel(1, info->enable_reg);
+ regmap_write(info->pmu_map, info->data->phy_offset, 1);
con1 = ADC_V2_CON1_SOFT_RESET;
writel(con1, ADC_V2_CON1(info->regs));
@@ -332,7 +339,7 @@ static void exynos_adc_v2_exit_hw(struct exynos_adc *info)
u32 con;
if (info->data->needs_adc_phy)
- writel(0, info->enable_reg);
+ regmap_write(info->pmu_map, info->data->phy_offset, 0);
con = readl(ADC_V2_CON1(info->regs));
con &= ~ADC_CON_EN_START;
@@ -362,6 +369,7 @@ static const struct exynos_adc_data exynos_adc_v2_data = {
.num_channels = MAX_ADC_V2_CHANNELS,
.mask = ADC_DATX_MASK, /* 12 bit ADC resolution */
.needs_adc_phy = true,
+ .phy_offset = EXYNOS_ADCV2_PHY_OFFSET,
.init_hw = exynos_adc_v2_init_hw,
.exit_hw = exynos_adc_v2_exit_hw,
@@ -374,6 +382,7 @@ static const struct exynos_adc_data exynos3250_adc_data = {
.mask = ADC_DATX_MASK, /* 12 bit ADC resolution */
.needs_sclk = true,
.needs_adc_phy = true,
+ .phy_offset = EXYNOS_ADCV1_PHY_OFFSET,
.init_hw = exynos_adc_v2_init_hw,
.exit_hw = exynos_adc_v2_exit_hw,
@@ -381,6 +390,35 @@ static const struct exynos_adc_data exynos3250_adc_data = {
.start_conv = exynos_adc_v2_start_conv,
};
+static void exynos_adc_exynos7_init_hw(struct exynos_adc *info)
+{
+ u32 con1, con2;
+
+ if (info->data->needs_adc_phy)
+ regmap_write(info->pmu_map, info->data->phy_offset, 1);
+
+ con1 = ADC_V2_CON1_SOFT_RESET;
+ writel(con1, ADC_V2_CON1(info->regs));
+
+ con2 = readl(ADC_V2_CON2(info->regs));
+ con2 &= ~ADC_V2_CON2_C_TIME(7);
+ con2 |= ADC_V2_CON2_C_TIME(0);
+ writel(con2, ADC_V2_CON2(info->regs));
+
+ /* Enable interrupts */
+ writel(1, ADC_V2_INT_EN(info->regs));
+}
+
+static const struct exynos_adc_data exynos7_adc_data = {
+ .num_channels = MAX_ADC_V1_CHANNELS,
+ .mask = ADC_DATX_MASK, /* 12 bit ADC resolution */
+
+ .init_hw = exynos_adc_exynos7_init_hw,
+ .exit_hw = exynos_adc_v2_exit_hw,
+ .clear_irq = exynos_adc_v2_clear_irq,
+ .start_conv = exynos_adc_v2_start_conv,
+};
+
static const struct of_device_id exynos_adc_match[] = {
{
.compatible = "samsung,s3c2410-adc",
@@ -406,6 +444,9 @@ static const struct of_device_id exynos_adc_match[] = {
}, {
.compatible = "samsung,exynos3250-adc",
.data = &exynos3250_adc_data,
+ }, {
+ .compatible = "samsung,exynos7-adc",
+ .data = &exynos7_adc_data,
},
{},
};
@@ -558,10 +599,13 @@ static int exynos_adc_probe(struct platform_device *pdev)
if (info->data->needs_adc_phy) {
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- info->enable_reg = devm_ioremap_resource(&pdev->dev, mem);
- if (IS_ERR(info->enable_reg))
- return PTR_ERR(info->enable_reg);
+ info->pmu_map = syscon_regmap_lookup_by_phandle(
+ pdev->dev.of_node,
+ "samsung,syscon-phandle");
+ if (IS_ERR(info->pmu_map)) {
+ dev_err(&pdev->dev, "syscon regmap lookup failed.\n");
+ return PTR_ERR(info->pmu_map);
+ }
}
irq = platform_get_irq(pdev, 0);
diff --git a/drivers/iio/adc/mcp320x.c b/drivers/iio/adc/mcp320x.c
index 28a086e4877..efbfd12a4bf 100644
--- a/drivers/iio/adc/mcp320x.c
+++ b/drivers/iio/adc/mcp320x.c
@@ -1,9 +1,30 @@
/*
* Copyright (C) 2013 Oskar Andero <oskar.andero@gmail.com>
+ * Copyright (C) 2014 Rose Technology
+ * Allan Bendorff Jensen <abj@rosetechnology.dk>
+ * Soren Andersen <san@rosetechnology.dk>
+ *
+ * Driver for following ADC chips from Microchip Technology's:
+ * 10 Bit converter
+ * MCP3001
+ * MCP3002
+ * MCP3004
+ * MCP3008
+ * ------------
+ * 12 bit converter
+ * MCP3201
+ * MCP3202
+ * MCP3204
+ * MCP3208
+ * ------------
*
- * Driver for Microchip Technology's MCP3204 and MCP3208 ADC chips.
* Datasheet can be found here:
- * http://ww1.microchip.com/downloads/en/devicedoc/21298c.pdf
+ * http://ww1.microchip.com/downloads/en/DeviceDoc/21293C.pdf mcp3001
+ * http://ww1.microchip.com/downloads/en/DeviceDoc/21294E.pdf mcp3002
+ * http://ww1.microchip.com/downloads/en/DeviceDoc/21295d.pdf mcp3004/08
+ * http://ww1.microchip.com/downloads/en/DeviceDoc/21290D.pdf mcp3201
+ * http://ww1.microchip.com/downloads/en/DeviceDoc/21034D.pdf mcp3202
+ * http://ww1.microchip.com/downloads/en/DeviceDoc/21298c.pdf mcp3204/08
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -11,19 +32,29 @@
*/
#include <linux/err.h>
+#include <linux/delay.h>
#include <linux/spi/spi.h>
#include <linux/module.h>
#include <linux/iio/iio.h>
#include <linux/regulator/consumer.h>
-#define MCP_SINGLE_ENDED (1 << 3)
-#define MCP_START_BIT (1 << 4)
-
enum {
+ mcp3001,
+ mcp3002,
+ mcp3004,
+ mcp3008,
+ mcp3201,
+ mcp3202,
mcp3204,
mcp3208,
};
+struct mcp320x_chip_info {
+ const struct iio_chan_spec *channels;
+ unsigned int num_channels;
+ unsigned int resolution;
+};
+
struct mcp320x {
struct spi_device *spi;
struct spi_message msg;
@@ -34,19 +65,69 @@ struct mcp320x {
struct regulator *reg;
struct mutex lock;
+ const struct mcp320x_chip_info *chip_info;
};
-static int mcp320x_adc_conversion(struct mcp320x *adc, u8 msg)
+static int mcp320x_channel_to_tx_data(int device_index,
+ const unsigned int channel, bool differential)
+{
+ int start_bit = 1;
+
+ switch (device_index) {
+ case mcp3001:
+ case mcp3201:
+ return 0;
+ case mcp3002:
+ case mcp3202:
+ return ((start_bit << 4) | (!differential << 3) |
+ (channel << 2));
+ case mcp3004:
+ case mcp3204:
+ case mcp3008:
+ case mcp3208:
+ return ((start_bit << 6) | (!differential << 5) |
+ (channel << 2));
+ default:
+ return -EINVAL;
+ }
+}
+
+static int mcp320x_adc_conversion(struct mcp320x *adc, u8 channel,
+ bool differential, int device_index)
{
int ret;
- adc->tx_buf = msg;
- ret = spi_sync(adc->spi, &adc->msg);
- if (ret < 0)
- return ret;
+ adc->rx_buf[0] = 0;
+ adc->rx_buf[1] = 0;
+ adc->tx_buf = mcp320x_channel_to_tx_data(device_index,
+ channel, differential);
+
+ if (device_index != mcp3001 && device_index != mcp3201) {
+ ret = spi_sync(adc->spi, &adc->msg);
+ if (ret < 0)
+ return ret;
+ } else {
+ ret = spi_read(adc->spi, &adc->rx_buf, sizeof(adc->rx_buf));
+ if (ret < 0)
+ return ret;
+ }
- return ((adc->rx_buf[0] & 0x3f) << 6) |
- (adc->rx_buf[1] >> 2);
+ switch (device_index) {
+ case mcp3001:
+ return (adc->rx_buf[0] << 5 | adc->rx_buf[1] >> 3);
+ case mcp3002:
+ case mcp3004:
+ case mcp3008:
+ return (adc->rx_buf[0] << 2 | adc->rx_buf[1] >> 6);
+ case mcp3201:
+ return (adc->rx_buf[0] << 7 | adc->rx_buf[1] >> 1);
+ case mcp3202:
+ case mcp3204:
+ case mcp3208:
+ return (adc->rx_buf[0] << 4 | adc->rx_buf[1] >> 4);
+ default:
+ return -EINVAL;
+ }
}
static int mcp320x_read_raw(struct iio_dev *indio_dev,
@@ -55,18 +136,17 @@ static int mcp320x_read_raw(struct iio_dev *indio_dev,
{
struct mcp320x *adc = iio_priv(indio_dev);
int ret = -EINVAL;
+ int device_index = 0;
mutex_lock(&adc->lock);
+ device_index = spi_get_device_id(adc->spi)->driver_data;
+
switch (mask) {
case IIO_CHAN_INFO_RAW:
- if (channel->differential)
- ret = mcp320x_adc_conversion(adc,
- MCP_START_BIT | channel->address);
- else
- ret = mcp320x_adc_conversion(adc,
- MCP_START_BIT | MCP_SINGLE_ENDED |
- channel->address);
+ ret = mcp320x_adc_conversion(adc, channel->address,
+ channel->differential, device_index);
+
if (ret < 0)
goto out;
@@ -75,18 +155,15 @@ static int mcp320x_read_raw(struct iio_dev *indio_dev,
break;
case IIO_CHAN_INFO_SCALE:
- /* Digital output code = (4096 * Vin) / Vref */
ret = regulator_get_voltage(adc->reg);
if (ret < 0)
goto out;
+ /* convert regulator output voltage to mV */
*val = ret / 1000;
- *val2 = 12;
+ *val2 = adc->chip_info->resolution;
ret = IIO_VAL_FRACTIONAL_LOG2;
break;
-
- default:
- break;
}
out:
@@ -117,6 +194,16 @@ out:
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) \
}
+static const struct iio_chan_spec mcp3201_channels[] = {
+ MCP320X_VOLTAGE_CHANNEL_DIFF(0),
+};
+
+static const struct iio_chan_spec mcp3202_channels[] = {
+ MCP320X_VOLTAGE_CHANNEL(0),
+ MCP320X_VOLTAGE_CHANNEL(1),
+ MCP320X_VOLTAGE_CHANNEL_DIFF(0),
+};
+
static const struct iio_chan_spec mcp3204_channels[] = {
MCP320X_VOLTAGE_CHANNEL(0),
MCP320X_VOLTAGE_CHANNEL(1),
@@ -146,19 +233,46 @@ static const struct iio_info mcp320x_info = {
.driver_module = THIS_MODULE,
};
-struct mcp3208_chip_info {
- const struct iio_chan_spec *channels;
- unsigned int num_channels;
-};
-
-static const struct mcp3208_chip_info mcp3208_chip_infos[] = {
+static const struct mcp320x_chip_info mcp320x_chip_infos[] = {
+ [mcp3001] = {
+ .channels = mcp3201_channels,
+ .num_channels = ARRAY_SIZE(mcp3201_channels),
+ .resolution = 10
+ },
+ [mcp3002] = {
+ .channels = mcp3202_channels,
+ .num_channels = ARRAY_SIZE(mcp3202_channels),
+ .resolution = 10
+ },
+ [mcp3004] = {
+ .channels = mcp3204_channels,
+ .num_channels = ARRAY_SIZE(mcp3204_channels),
+ .resolution = 10
+ },
+ [mcp3008] = {
+ .channels = mcp3208_channels,
+ .num_channels = ARRAY_SIZE(mcp3208_channels),
+ .resolution = 10
+ },
+ [mcp3201] = {
+ .channels = mcp3201_channels,
+ .num_channels = ARRAY_SIZE(mcp3201_channels),
+ .resolution = 12
+ },
+ [mcp3202] = {
+ .channels = mcp3202_channels,
+ .num_channels = ARRAY_SIZE(mcp3202_channels),
+ .resolution = 12
+ },
[mcp3204] = {
.channels = mcp3204_channels,
- .num_channels = ARRAY_SIZE(mcp3204_channels)
+ .num_channels = ARRAY_SIZE(mcp3204_channels),
+ .resolution = 12
},
[mcp3208] = {
.channels = mcp3208_channels,
- .num_channels = ARRAY_SIZE(mcp3208_channels)
+ .num_channels = ARRAY_SIZE(mcp3208_channels),
+ .resolution = 12
},
};
@@ -166,7 +280,7 @@ static int mcp320x_probe(struct spi_device *spi)
{
struct iio_dev *indio_dev;
struct mcp320x *adc;
- const struct mcp3208_chip_info *chip_info;
+ const struct mcp320x_chip_info *chip_info;
int ret;
indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*adc));
@@ -181,7 +295,7 @@ static int mcp320x_probe(struct spi_device *spi)
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &mcp320x_info;
- chip_info = &mcp3208_chip_infos[spi_get_device_id(spi)->driver_data];
+ chip_info = &mcp320x_chip_infos[spi_get_device_id(spi)->driver_data];
indio_dev->channels = chip_info->channels;
indio_dev->num_channels = chip_info->num_channels;
@@ -226,7 +340,45 @@ static int mcp320x_remove(struct spi_device *spi)
return 0;
}
+#if defined(CONFIG_OF)
+static const struct of_device_id mcp320x_dt_ids[] = {
+ {
+ .compatible = "mcp3001",
+ .data = &mcp320x_chip_infos[mcp3001],
+ }, {
+ .compatible = "mcp3002",
+ .data = &mcp320x_chip_infos[mcp3002],
+ }, {
+ .compatible = "mcp3004",
+ .data = &mcp320x_chip_infos[mcp3004],
+ }, {
+ .compatible = "mcp3008",
+ .data = &mcp320x_chip_infos[mcp3008],
+ }, {
+ .compatible = "mcp3201",
+ .data = &mcp320x_chip_infos[mcp3201],
+ }, {
+ .compatible = "mcp3202",
+ .data = &mcp320x_chip_infos[mcp3202],
+ }, {
+ .compatible = "mcp3204",
+ .data = &mcp320x_chip_infos[mcp3204],
+ }, {
+ .compatible = "mcp3208",
+ .data = &mcp320x_chip_infos[mcp3208],
+ }, {
+ }
+};
+MODULE_DEVICE_TABLE(of, mcp320x_dt_ids);
+#endif
+
static const struct spi_device_id mcp320x_id[] = {
+ { "mcp3001", mcp3001 },
+ { "mcp3002", mcp3002 },
+ { "mcp3004", mcp3004 },
+ { "mcp3008", mcp3008 },
+ { "mcp3201", mcp3201 },
+ { "mcp3202", mcp3202 },
{ "mcp3204", mcp3204 },
{ "mcp3208", mcp3208 },
{ }
@@ -245,5 +397,5 @@ static struct spi_driver mcp320x_driver = {
module_spi_driver(mcp320x_driver);
MODULE_AUTHOR("Oskar Andero <oskar.andero@gmail.com>");
-MODULE_DESCRIPTION("Microchip Technology MCP3204/08");
+MODULE_DESCRIPTION("Microchip Technology MCP3x01/02/04/08");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/qcom-spmi-iadc.c b/drivers/iio/adc/qcom-spmi-iadc.c
new file mode 100644
index 00000000000..b9666f2f5e5
--- /dev/null
+++ b/drivers/iio/adc/qcom-spmi-iadc.c
@@ -0,0 +1,595 @@
+/*
+ * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bitops.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/iio/iio.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+/* IADC register and bit definition */
+#define IADC_REVISION2 0x1
+#define IADC_REVISION2_SUPPORTED_IADC 1
+
+#define IADC_PERPH_TYPE 0x4
+#define IADC_PERPH_TYPE_ADC 8
+
+#define IADC_PERPH_SUBTYPE 0x5
+#define IADC_PERPH_SUBTYPE_IADC 3
+
+#define IADC_STATUS1 0x8
+#define IADC_STATUS1_OP_MODE 4
+#define IADC_STATUS1_REQ_STS BIT(1)
+#define IADC_STATUS1_EOC BIT(0)
+#define IADC_STATUS1_REQ_STS_EOC_MASK 0x3
+
+#define IADC_MODE_CTL 0x40
+#define IADC_OP_MODE_SHIFT 3
+#define IADC_OP_MODE_NORMAL 0
+#define IADC_TRIM_EN BIT(0)
+
+#define IADC_EN_CTL1 0x46
+#define IADC_EN_CTL1_SET BIT(7)
+
+#define IADC_CH_SEL_CTL 0x48
+
+#define IADC_DIG_PARAM 0x50
+#define IADC_DIG_DEC_RATIO_SEL_SHIFT 2
+
+#define IADC_HW_SETTLE_DELAY 0x51
+
+#define IADC_CONV_REQ 0x52
+#define IADC_CONV_REQ_SET BIT(7)
+
+#define IADC_FAST_AVG_CTL 0x5a
+#define IADC_FAST_AVG_EN 0x5b
+#define IADC_FAST_AVG_EN_SET BIT(7)
+
+#define IADC_PERH_RESET_CTL3 0xda
+#define IADC_FOLLOW_WARM_RB BIT(2)
+
+#define IADC_DATA 0x60 /* 16 bits */
+
+#define IADC_SEC_ACCESS 0xd0
+#define IADC_SEC_ACCESS_DATA 0xa5
+
+#define IADC_NOMINAL_RSENSE 0xf4
+#define IADC_NOMINAL_RSENSE_SIGN_MASK BIT(7)
+
+#define IADC_REF_GAIN_MICRO_VOLTS 17857
+
+#define IADC_INT_RSENSE_DEVIATION 15625 /* nano Ohms per bit */
+
+#define IADC_INT_RSENSE_IDEAL_VALUE 10000 /* micro Ohms */
+#define IADC_INT_RSENSE_DEFAULT_VALUE 7800 /* micro Ohms */
+#define IADC_INT_RSENSE_DEFAULT_GF 9000 /* micro Ohms */
+#define IADC_INT_RSENSE_DEFAULT_SMIC 9700 /* micro Ohms */
+
+#define IADC_CONV_TIME_MIN_US 2000
+#define IADC_CONV_TIME_MAX_US 2100
+
+#define IADC_DEF_PRESCALING 0 /* 1:1 */
+#define IADC_DEF_DECIMATION 0 /* 512 */
+#define IADC_DEF_HW_SETTLE_TIME 0 /* 0 us */
+#define IADC_DEF_AVG_SAMPLES 0 /* 1 sample */
+
+/* IADC channel list */
+#define IADC_INT_RSENSE 0
+#define IADC_EXT_RSENSE 1
+#define IADC_GAIN_17P857MV 3
+#define IADC_EXT_OFFSET_CSP_CSN 5
+#define IADC_INT_OFFSET_CSP2_CSN2 6
+
+/**
+ * struct iadc_chip - IADC Current ADC device structure.
+ * @regmap: regmap for register read/write.
+ * @dev: This device pointer.
+ * @base: base offset for the ADC peripheral.
+ * @rsense: Values of the internal and external sense resister in micro Ohms.
+ * @poll_eoc: Poll for end of conversion instead of waiting for IRQ.
+ * @offset: Raw offset values for the internal and external channels.
+ * @gain: Raw gain of the channels.
+ * @lock: ADC lock for access to the peripheral.
+ * @complete: ADC notification after end of conversion interrupt is received.
+ */
+struct iadc_chip {
+ struct regmap *regmap;
+ struct device *dev;
+ u16 base;
+ bool poll_eoc;
+ u32 rsense[2];
+ u16 offset[2];
+ u16 gain;
+ struct mutex lock;
+ struct completion complete;
+};
+
+static int iadc_read(struct iadc_chip *iadc, u16 offset, u8 *data)
+{
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(iadc->regmap, iadc->base + offset, &val);
+ if (ret < 0)
+ return ret;
+
+ *data = val;
+ return 0;
+}
+
+static int iadc_write(struct iadc_chip *iadc, u16 offset, u8 data)
+{
+ return regmap_write(iadc->regmap, iadc->base + offset, data);
+}
+
+static int iadc_reset(struct iadc_chip *iadc)
+{
+ u8 data;
+ int ret;
+
+ ret = iadc_write(iadc, IADC_SEC_ACCESS, IADC_SEC_ACCESS_DATA);
+ if (ret < 0)
+ return ret;
+
+ ret = iadc_read(iadc, IADC_PERH_RESET_CTL3, &data);
+ if (ret < 0)
+ return ret;
+
+ ret = iadc_write(iadc, IADC_SEC_ACCESS, IADC_SEC_ACCESS_DATA);
+ if (ret < 0)
+ return ret;
+
+ data |= IADC_FOLLOW_WARM_RB;
+
+ return iadc_write(iadc, IADC_PERH_RESET_CTL3, data);
+}
+
+static int iadc_set_state(struct iadc_chip *iadc, bool state)
+{
+ return iadc_write(iadc, IADC_EN_CTL1, state ? IADC_EN_CTL1_SET : 0);
+}
+
+static void iadc_status_show(struct iadc_chip *iadc)
+{
+ u8 mode, sta1, chan, dig, en, req;
+ int ret;
+
+ ret = iadc_read(iadc, IADC_MODE_CTL, &mode);
+ if (ret < 0)
+ return;
+
+ ret = iadc_read(iadc, IADC_DIG_PARAM, &dig);
+ if (ret < 0)
+ return;
+
+ ret = iadc_read(iadc, IADC_CH_SEL_CTL, &chan);
+ if (ret < 0)
+ return;
+
+ ret = iadc_read(iadc, IADC_CONV_REQ, &req);
+ if (ret < 0)
+ return;
+
+ ret = iadc_read(iadc, IADC_STATUS1, &sta1);
+ if (ret < 0)
+ return;
+
+ ret = iadc_read(iadc, IADC_EN_CTL1, &en);
+ if (ret < 0)
+ return;
+
+ dev_err(iadc->dev,
+ "mode:%02x en:%02x chan:%02x dig:%02x req:%02x sta1:%02x\n",
+ mode, en, chan, dig, req, sta1);
+}
+
+static int iadc_configure(struct iadc_chip *iadc, int channel)
+{
+ u8 decim, mode;
+ int ret;
+
+ /* Mode selection */
+ mode = (IADC_OP_MODE_NORMAL << IADC_OP_MODE_SHIFT) | IADC_TRIM_EN;
+ ret = iadc_write(iadc, IADC_MODE_CTL, mode);
+ if (ret < 0)
+ return ret;
+
+ /* Channel selection */
+ ret = iadc_write(iadc, IADC_CH_SEL_CTL, channel);
+ if (ret < 0)
+ return ret;
+
+ /* Digital parameter setup */
+ decim = IADC_DEF_DECIMATION << IADC_DIG_DEC_RATIO_SEL_SHIFT;
+ ret = iadc_write(iadc, IADC_DIG_PARAM, decim);
+ if (ret < 0)
+ return ret;
+
+ /* HW settle time delay */
+ ret = iadc_write(iadc, IADC_HW_SETTLE_DELAY, IADC_DEF_HW_SETTLE_TIME);
+ if (ret < 0)
+ return ret;
+
+ ret = iadc_write(iadc, IADC_FAST_AVG_CTL, IADC_DEF_AVG_SAMPLES);
+ if (ret < 0)
+ return ret;
+
+ if (IADC_DEF_AVG_SAMPLES)
+ ret = iadc_write(iadc, IADC_FAST_AVG_EN, IADC_FAST_AVG_EN_SET);
+ else
+ ret = iadc_write(iadc, IADC_FAST_AVG_EN, 0);
+
+ if (ret < 0)
+ return ret;
+
+ if (!iadc->poll_eoc)
+ reinit_completion(&iadc->complete);
+
+ ret = iadc_set_state(iadc, true);
+ if (ret < 0)
+ return ret;
+
+ /* Request conversion */
+ return iadc_write(iadc, IADC_CONV_REQ, IADC_CONV_REQ_SET);
+}
+
+static int iadc_poll_wait_eoc(struct iadc_chip *iadc, unsigned int interval_us)
+{
+ unsigned int count, retry;
+ int ret;
+ u8 sta1;
+
+ retry = interval_us / IADC_CONV_TIME_MIN_US;
+
+ for (count = 0; count < retry; count++) {
+ ret = iadc_read(iadc, IADC_STATUS1, &sta1);
+ if (ret < 0)
+ return ret;
+
+ sta1 &= IADC_STATUS1_REQ_STS_EOC_MASK;
+ if (sta1 == IADC_STATUS1_EOC)
+ return 0;
+
+ usleep_range(IADC_CONV_TIME_MIN_US, IADC_CONV_TIME_MAX_US);
+ }
+
+ iadc_status_show(iadc);
+
+ return -ETIMEDOUT;
+}
+
+static int iadc_read_result(struct iadc_chip *iadc, u16 *data)
+{
+ return regmap_bulk_read(iadc->regmap, iadc->base + IADC_DATA, data, 2);
+}
+
+static int iadc_do_conversion(struct iadc_chip *iadc, int chan, u16 *data)
+{
+ unsigned int wait;
+ int ret;
+
+ ret = iadc_configure(iadc, chan);
+ if (ret < 0)
+ goto exit;
+
+ wait = BIT(IADC_DEF_AVG_SAMPLES) * IADC_CONV_TIME_MIN_US * 2;
+
+ if (iadc->poll_eoc) {
+ ret = iadc_poll_wait_eoc(iadc, wait);
+ } else {
+ ret = wait_for_completion_timeout(&iadc->complete, wait);
+ if (!ret)
+ ret = -ETIMEDOUT;
+ else
+ /* double check conversion status */
+ ret = iadc_poll_wait_eoc(iadc, IADC_CONV_TIME_MIN_US);
+ }
+
+ if (!ret)
+ ret = iadc_read_result(iadc, data);
+exit:
+ iadc_set_state(iadc, false);
+ if (ret < 0)
+ dev_err(iadc->dev, "conversion failed\n");
+
+ return ret;
+}
+
+static int iadc_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct iadc_chip *iadc = iio_priv(indio_dev);
+ s32 isense_ua, vsense_uv;
+ u16 adc_raw, vsense_raw;
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&iadc->lock);
+ ret = iadc_do_conversion(iadc, chan->channel, &adc_raw);
+ mutex_unlock(&iadc->lock);
+ if (ret < 0)
+ return ret;
+
+ vsense_raw = adc_raw - iadc->offset[chan->channel];
+
+ vsense_uv = vsense_raw * IADC_REF_GAIN_MICRO_VOLTS;
+ vsense_uv /= (s32)iadc->gain - iadc->offset[chan->channel];
+
+ isense_ua = vsense_uv / iadc->rsense[chan->channel];
+
+ dev_dbg(iadc->dev, "off %d gain %d adc %d %duV I %duA\n",
+ iadc->offset[chan->channel], iadc->gain,
+ adc_raw, vsense_uv, isense_ua);
+
+ *val = isense_ua;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = 0;
+ *val2 = 1000;
+ return IIO_VAL_INT_PLUS_MICRO;
+ }
+
+ return -EINVAL;
+}
+
+static const struct iio_info iadc_info = {
+ .read_raw = iadc_read_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static irqreturn_t iadc_isr(int irq, void *dev_id)
+{
+ struct iadc_chip *iadc = dev_id;
+
+ complete(&iadc->complete);
+
+ return IRQ_HANDLED;
+}
+
+static int iadc_update_offset(struct iadc_chip *iadc)
+{
+ int ret;
+
+ ret = iadc_do_conversion(iadc, IADC_GAIN_17P857MV, &iadc->gain);
+ if (ret < 0)
+ return ret;
+
+ ret = iadc_do_conversion(iadc, IADC_INT_OFFSET_CSP2_CSN2,
+ &iadc->offset[IADC_INT_RSENSE]);
+ if (ret < 0)
+ return ret;
+
+ if (iadc->gain == iadc->offset[IADC_INT_RSENSE]) {
+ dev_err(iadc->dev, "error: internal offset == gain %d\n",
+ iadc->gain);
+ return -EINVAL;
+ }
+
+ ret = iadc_do_conversion(iadc, IADC_EXT_OFFSET_CSP_CSN,
+ &iadc->offset[IADC_EXT_RSENSE]);
+ if (ret < 0)
+ return ret;
+
+ if (iadc->gain == iadc->offset[IADC_EXT_RSENSE]) {
+ dev_err(iadc->dev, "error: external offset == gain %d\n",
+ iadc->gain);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int iadc_version_check(struct iadc_chip *iadc)
+{
+ u8 val;
+ int ret;
+
+ ret = iadc_read(iadc, IADC_PERPH_TYPE, &val);
+ if (ret < 0)
+ return ret;
+
+ if (val < IADC_PERPH_TYPE_ADC) {
+ dev_err(iadc->dev, "%d is not ADC\n", val);
+ return -EINVAL;
+ }
+
+ ret = iadc_read(iadc, IADC_PERPH_SUBTYPE, &val);
+ if (ret < 0)
+ return ret;
+
+ if (val < IADC_PERPH_SUBTYPE_IADC) {
+ dev_err(iadc->dev, "%d is not IADC\n", val);
+ return -EINVAL;
+ }
+
+ ret = iadc_read(iadc, IADC_REVISION2, &val);
+ if (ret < 0)
+ return ret;
+
+ if (val < IADC_REVISION2_SUPPORTED_IADC) {
+ dev_err(iadc->dev, "revision %d not supported\n", val);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int iadc_rsense_read(struct iadc_chip *iadc, struct device_node *node)
+{
+ int ret, sign, int_sense;
+ u8 deviation;
+
+ ret = of_property_read_u32(node, "qcom,external-resistor-micro-ohms",
+ &iadc->rsense[IADC_EXT_RSENSE]);
+ if (ret < 0)
+ iadc->rsense[IADC_EXT_RSENSE] = IADC_INT_RSENSE_IDEAL_VALUE;
+
+ if (!iadc->rsense[IADC_EXT_RSENSE]) {
+ dev_err(iadc->dev, "external resistor can't be zero Ohms");
+ return -EINVAL;
+ }
+
+ ret = iadc_read(iadc, IADC_NOMINAL_RSENSE, &deviation);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Deviation value stored is an offset from 10 mili Ohms, bit 7 is
+ * the sign, the remaining bits have an LSB of 15625 nano Ohms.
+ */
+ sign = (deviation & IADC_NOMINAL_RSENSE_SIGN_MASK) ? -1 : 1;
+
+ deviation &= ~IADC_NOMINAL_RSENSE_SIGN_MASK;
+
+ /* Scale it to nono Ohms */
+ int_sense = IADC_INT_RSENSE_IDEAL_VALUE * 1000;
+ int_sense += sign * deviation * IADC_INT_RSENSE_DEVIATION;
+ int_sense /= 1000; /* micro Ohms */
+
+ iadc->rsense[IADC_INT_RSENSE] = int_sense;
+ return 0;
+}
+
+static const struct iio_chan_spec iadc_channels[] = {
+ {
+ .type = IIO_CURRENT,
+ .datasheet_name = "INTERNAL_RSENSE",
+ .channel = 0,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .indexed = 1,
+ },
+ {
+ .type = IIO_CURRENT,
+ .datasheet_name = "EXTERNAL_RSENSE",
+ .channel = 1,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .indexed = 1,
+ },
+};
+
+static int iadc_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct iio_dev *indio_dev;
+ struct iadc_chip *iadc;
+ int ret, irq_eoc;
+ u32 res;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*iadc));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ iadc = iio_priv(indio_dev);
+ iadc->dev = dev;
+
+ iadc->regmap = dev_get_regmap(dev->parent, NULL);
+ if (!iadc->regmap)
+ return -ENODEV;
+
+ init_completion(&iadc->complete);
+ mutex_init(&iadc->lock);
+
+ ret = of_property_read_u32(node, "reg", &res);
+ if (ret < 0)
+ return -ENODEV;
+
+ iadc->base = res;
+
+ ret = iadc_version_check(iadc);
+ if (ret < 0)
+ return -ENODEV;
+
+ ret = iadc_rsense_read(iadc, node);
+ if (ret < 0)
+ return -ENODEV;
+
+ dev_dbg(iadc->dev, "sense resistors %d and %d micro Ohm\n",
+ iadc->rsense[IADC_INT_RSENSE],
+ iadc->rsense[IADC_EXT_RSENSE]);
+
+ irq_eoc = platform_get_irq(pdev, 0);
+ if (irq_eoc == -EPROBE_DEFER)
+ return irq_eoc;
+
+ if (irq_eoc < 0)
+ iadc->poll_eoc = true;
+
+ ret = iadc_reset(iadc);
+ if (ret < 0) {
+ dev_err(dev, "reset failed\n");
+ return ret;
+ }
+
+ if (!iadc->poll_eoc) {
+ ret = devm_request_irq(dev, irq_eoc, iadc_isr, 0,
+ "spmi-iadc", iadc);
+ if (!ret)
+ enable_irq_wake(irq_eoc);
+ else
+ return ret;
+ } else {
+ device_init_wakeup(iadc->dev, 1);
+ }
+
+ ret = iadc_update_offset(iadc);
+ if (ret < 0) {
+ dev_err(dev, "failed offset calibration\n");
+ return ret;
+ }
+
+ indio_dev->dev.parent = dev;
+ indio_dev->dev.of_node = node;
+ indio_dev->name = pdev->name;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &iadc_info;
+ indio_dev->channels = iadc_channels;
+ indio_dev->num_channels = ARRAY_SIZE(iadc_channels);
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+
+static const struct of_device_id iadc_match_table[] = {
+ { .compatible = "qcom,spmi-iadc" },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, iadc_match_table);
+
+static struct platform_driver iadc_driver = {
+ .driver = {
+ .name = "qcom-spmi-iadc",
+ .of_match_table = iadc_match_table,
+ },
+ .probe = iadc_probe,
+};
+
+module_platform_driver(iadc_driver);
+
+MODULE_ALIAS("platform:qcom-spmi-iadc");
+MODULE_DESCRIPTION("Qualcomm SPMI PMIC current ADC driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Ivan T. Ivanov <iivanov@mm-sol.com>");
diff --git a/drivers/iio/adc/rockchip_saradc.c b/drivers/iio/adc/rockchip_saradc.c
index 63a9797775c..8d4e019ea4c 100644
--- a/drivers/iio/adc/rockchip_saradc.c
+++ b/drivers/iio/adc/rockchip_saradc.c
@@ -18,13 +18,13 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/regulator/consumer.h>
#include <linux/iio/iio.h>
#define SARADC_DATA 0x00
-#define SARADC_DATA_MASK 0x3ff
#define SARADC_STAS 0x04
#define SARADC_STAS_BUSY BIT(0)
@@ -38,15 +38,22 @@
#define SARADC_DLY_PU_SOC 0x0c
#define SARADC_DLY_PU_SOC_MASK 0x3f
-#define SARADC_BITS 10
#define SARADC_TIMEOUT msecs_to_jiffies(100)
+struct rockchip_saradc_data {
+ int num_bits;
+ const struct iio_chan_spec *channels;
+ int num_channels;
+ unsigned long clk_rate;
+};
+
struct rockchip_saradc {
void __iomem *regs;
struct clk *pclk;
struct clk *clk;
struct completion completion;
struct regulator *vref;
+ const struct rockchip_saradc_data *data;
u16 last_val;
};
@@ -90,7 +97,7 @@ static int rockchip_saradc_read_raw(struct iio_dev *indio_dev,
}
*val = ret / 1000;
- *val2 = SARADC_BITS;
+ *val2 = info->data->num_bits;
return IIO_VAL_FRACTIONAL_LOG2;
default:
return -EINVAL;
@@ -103,7 +110,7 @@ static irqreturn_t rockchip_saradc_isr(int irq, void *dev_id)
/* Read value */
info->last_val = readl_relaxed(info->regs + SARADC_DATA);
- info->last_val &= SARADC_DATA_MASK;
+ info->last_val &= GENMASK(info->data->num_bits - 1, 0);
/* Clear irq & power down adc */
writel_relaxed(0, info->regs + SARADC_CTRL);
@@ -133,12 +140,44 @@ static const struct iio_chan_spec rockchip_saradc_iio_channels[] = {
ADC_CHANNEL(2, "adc2"),
};
+static const struct rockchip_saradc_data saradc_data = {
+ .num_bits = 10,
+ .channels = rockchip_saradc_iio_channels,
+ .num_channels = ARRAY_SIZE(rockchip_saradc_iio_channels),
+ .clk_rate = 1000000,
+};
+
+static const struct iio_chan_spec rockchip_rk3066_tsadc_iio_channels[] = {
+ ADC_CHANNEL(0, "adc0"),
+ ADC_CHANNEL(1, "adc1"),
+};
+
+static const struct rockchip_saradc_data rk3066_tsadc_data = {
+ .num_bits = 12,
+ .channels = rockchip_rk3066_tsadc_iio_channels,
+ .num_channels = ARRAY_SIZE(rockchip_rk3066_tsadc_iio_channels),
+ .clk_rate = 50000,
+};
+
+static const struct of_device_id rockchip_saradc_match[] = {
+ {
+ .compatible = "rockchip,saradc",
+ .data = &saradc_data,
+ }, {
+ .compatible = "rockchip,rk3066-tsadc",
+ .data = &rk3066_tsadc_data,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, rockchip_saradc_match);
+
static int rockchip_saradc_probe(struct platform_device *pdev)
{
struct rockchip_saradc *info = NULL;
struct device_node *np = pdev->dev.of_node;
struct iio_dev *indio_dev = NULL;
struct resource *mem;
+ const struct of_device_id *match;
int ret;
int irq;
@@ -152,6 +191,9 @@ static int rockchip_saradc_probe(struct platform_device *pdev)
}
info = iio_priv(indio_dev);
+ match = of_match_device(rockchip_saradc_match, &pdev->dev);
+ info->data = match->data;
+
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
info->regs = devm_ioremap_resource(&pdev->dev, mem);
if (IS_ERR(info->regs))
@@ -192,10 +234,10 @@ static int rockchip_saradc_probe(struct platform_device *pdev)
}
/*
- * Use a default of 1MHz for the converter clock.
+ * Use a default value for the converter clock.
* This may become user-configurable in the future.
*/
- ret = clk_set_rate(info->clk, 1000000);
+ ret = clk_set_rate(info->clk, info->data->clk_rate);
if (ret < 0) {
dev_err(&pdev->dev, "failed to set adc clk rate, %d\n", ret);
return ret;
@@ -227,8 +269,8 @@ static int rockchip_saradc_probe(struct platform_device *pdev)
indio_dev->info = &rockchip_saradc_iio_info;
indio_dev->modes = INDIO_DIRECT_MODE;
- indio_dev->channels = rockchip_saradc_iio_channels;
- indio_dev->num_channels = ARRAY_SIZE(rockchip_saradc_iio_channels);
+ indio_dev->channels = info->data->channels;
+ indio_dev->num_channels = info->data->num_channels;
ret = iio_device_register(indio_dev);
if (ret)
@@ -296,12 +338,6 @@ static int rockchip_saradc_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(rockchip_saradc_pm_ops,
rockchip_saradc_suspend, rockchip_saradc_resume);
-static const struct of_device_id rockchip_saradc_match[] = {
- { .compatible = "rockchip,saradc" },
- {},
-};
-MODULE_DEVICE_TABLE(of, rockchip_saradc_match);
-
static struct platform_driver rockchip_saradc_driver = {
.probe = rockchip_saradc_probe,
.remove = rockchip_saradc_remove,
diff --git a/drivers/iio/adc/vf610_adc.c b/drivers/iio/adc/vf610_adc.c
index 4a10ae97dbf..8ec353c01d9 100644
--- a/drivers/iio/adc/vf610_adc.c
+++ b/drivers/iio/adc/vf610_adc.c
@@ -91,7 +91,7 @@
#define VF610_ADC_CAL 0x80
/* Other field define */
-#define VF610_ADC_ADCHC(x) ((x) & 0xF)
+#define VF610_ADC_ADCHC(x) ((x) & 0x1F)
#define VF610_ADC_AIEN (0x1 << 7)
#define VF610_ADC_CONV_DISABLE 0x1F
#define VF610_ADC_HS_COCO0 0x1
@@ -153,6 +153,12 @@ struct vf610_adc {
BIT(IIO_CHAN_INFO_SAMP_FREQ), \
}
+#define VF610_ADC_TEMPERATURE_CHAN(_idx, _chan_type) { \
+ .type = (_chan_type), \
+ .channel = (_idx), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED), \
+}
+
static const struct iio_chan_spec vf610_adc_iio_channels[] = {
VF610_ADC_CHAN(0, IIO_VOLTAGE),
VF610_ADC_CHAN(1, IIO_VOLTAGE),
@@ -170,6 +176,7 @@ static const struct iio_chan_spec vf610_adc_iio_channels[] = {
VF610_ADC_CHAN(13, IIO_VOLTAGE),
VF610_ADC_CHAN(14, IIO_VOLTAGE),
VF610_ADC_CHAN(15, IIO_VOLTAGE),
+ VF610_ADC_TEMPERATURE_CHAN(26, IIO_TEMP),
/* sentinel */
};
@@ -451,6 +458,7 @@ static int vf610_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
+ case IIO_CHAN_INFO_PROCESSED:
mutex_lock(&indio_dev->mlock);
reinit_completion(&info->completion);
@@ -468,7 +476,23 @@ static int vf610_read_raw(struct iio_dev *indio_dev,
return ret;
}
- *val = info->value;
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ *val = info->value;
+ break;
+ case IIO_TEMP:
+ /*
+ * Calculate in degree Celsius times 1000
+ * Using sensor slope of 1.84 mV/°C and
+ * V at 25°C of 696 mV
+ */
+ *val = 25000 - ((int)info->value - 864) * 1000000 / 1840;
+ break;
+ default:
+ mutex_unlock(&indio_dev->mlock);
+ return -EINVAL;
+ }
+
mutex_unlock(&indio_dev->mlock);
return IIO_VAL_INT;
@@ -569,9 +593,9 @@ static int vf610_adc_probe(struct platform_device *pdev)
return PTR_ERR(info->regs);
irq = platform_get_irq(pdev, 0);
- if (irq <= 0) {
+ if (irq < 0) {
dev_err(&pdev->dev, "no irq resource?\n");
- return -EINVAL;
+ return irq;
}
ret = devm_request_irq(info->dev, irq,
@@ -586,8 +610,7 @@ static int vf610_adc_probe(struct platform_device *pdev)
if (IS_ERR(info->clk)) {
dev_err(&pdev->dev, "failed getting clock, err = %ld\n",
PTR_ERR(info->clk));
- ret = PTR_ERR(info->clk);
- return ret;
+ return PTR_ERR(info->clk);
}
info->vref = devm_regulator_get(&pdev->dev, "vref");
@@ -681,17 +704,19 @@ static int vf610_adc_resume(struct device *dev)
ret = clk_prepare_enable(info->clk);
if (ret)
- return ret;
+ goto disable_reg;
vf610_adc_hw_init(info);
return 0;
+
+disable_reg:
+ regulator_disable(info->vref);
+ return ret;
}
#endif
-static SIMPLE_DEV_PM_OPS(vf610_adc_pm_ops,
- vf610_adc_suspend,
- vf610_adc_resume);
+static SIMPLE_DEV_PM_OPS(vf610_adc_pm_ops, vf610_adc_suspend, vf610_adc_resume);
static struct platform_driver vf610_adc_driver = {
.probe = vf610_adc_probe,
diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c
index 24cfe4e044f..edd13d2b412 100644
--- a/drivers/iio/common/st_sensors/st_sensors_core.c
+++ b/drivers/iio/common/st_sensors/st_sensors_core.c
@@ -44,18 +44,18 @@ st_sensors_write_data_with_mask_error:
return err;
}
-static int st_sensors_match_odr(struct st_sensors *sensor,
+static int st_sensors_match_odr(struct st_sensor_settings *sensor_settings,
unsigned int odr, struct st_sensor_odr_avl *odr_out)
{
int i, ret = -EINVAL;
for (i = 0; i < ST_SENSORS_ODR_LIST_MAX; i++) {
- if (sensor->odr.odr_avl[i].hz == 0)
+ if (sensor_settings->odr.odr_avl[i].hz == 0)
goto st_sensors_match_odr_error;
- if (sensor->odr.odr_avl[i].hz == odr) {
- odr_out->hz = sensor->odr.odr_avl[i].hz;
- odr_out->value = sensor->odr.odr_avl[i].value;
+ if (sensor_settings->odr.odr_avl[i].hz == odr) {
+ odr_out->hz = sensor_settings->odr.odr_avl[i].hz;
+ odr_out->value = sensor_settings->odr.odr_avl[i].value;
ret = 0;
break;
}
@@ -71,23 +71,26 @@ int st_sensors_set_odr(struct iio_dev *indio_dev, unsigned int odr)
struct st_sensor_odr_avl odr_out = {0, 0};
struct st_sensor_data *sdata = iio_priv(indio_dev);
- err = st_sensors_match_odr(sdata->sensor, odr, &odr_out);
+ err = st_sensors_match_odr(sdata->sensor_settings, odr, &odr_out);
if (err < 0)
goto st_sensors_match_odr_error;
- if ((sdata->sensor->odr.addr == sdata->sensor->pw.addr) &&
- (sdata->sensor->odr.mask == sdata->sensor->pw.mask)) {
+ if ((sdata->sensor_settings->odr.addr ==
+ sdata->sensor_settings->pw.addr) &&
+ (sdata->sensor_settings->odr.mask ==
+ sdata->sensor_settings->pw.mask)) {
if (sdata->enabled == true) {
err = st_sensors_write_data_with_mask(indio_dev,
- sdata->sensor->odr.addr,
- sdata->sensor->odr.mask,
+ sdata->sensor_settings->odr.addr,
+ sdata->sensor_settings->odr.mask,
odr_out.value);
} else {
err = 0;
}
} else {
err = st_sensors_write_data_with_mask(indio_dev,
- sdata->sensor->odr.addr, sdata->sensor->odr.mask,
+ sdata->sensor_settings->odr.addr,
+ sdata->sensor_settings->odr.mask,
odr_out.value);
}
if (err >= 0)
@@ -98,16 +101,16 @@ st_sensors_match_odr_error:
}
EXPORT_SYMBOL(st_sensors_set_odr);
-static int st_sensors_match_fs(struct st_sensors *sensor,
+static int st_sensors_match_fs(struct st_sensor_settings *sensor_settings,
unsigned int fs, int *index_fs_avl)
{
int i, ret = -EINVAL;
for (i = 0; i < ST_SENSORS_FULLSCALE_AVL_MAX; i++) {
- if (sensor->fs.fs_avl[i].num == 0)
+ if (sensor_settings->fs.fs_avl[i].num == 0)
goto st_sensors_match_odr_error;
- if (sensor->fs.fs_avl[i].num == fs) {
+ if (sensor_settings->fs.fs_avl[i].num == fs) {
*index_fs_avl = i;
ret = 0;
break;
@@ -118,25 +121,24 @@ st_sensors_match_odr_error:
return ret;
}
-static int st_sensors_set_fullscale(struct iio_dev *indio_dev,
- unsigned int fs)
+static int st_sensors_set_fullscale(struct iio_dev *indio_dev, unsigned int fs)
{
int err, i = 0;
struct st_sensor_data *sdata = iio_priv(indio_dev);
- err = st_sensors_match_fs(sdata->sensor, fs, &i);
+ err = st_sensors_match_fs(sdata->sensor_settings, fs, &i);
if (err < 0)
goto st_accel_set_fullscale_error;
err = st_sensors_write_data_with_mask(indio_dev,
- sdata->sensor->fs.addr,
- sdata->sensor->fs.mask,
- sdata->sensor->fs.fs_avl[i].value);
+ sdata->sensor_settings->fs.addr,
+ sdata->sensor_settings->fs.mask,
+ sdata->sensor_settings->fs.fs_avl[i].value);
if (err < 0)
goto st_accel_set_fullscale_error;
sdata->current_fullscale = (struct st_sensor_fullscale_avl *)
- &sdata->sensor->fs.fs_avl[i];
+ &sdata->sensor_settings->fs.fs_avl[i];
return err;
st_accel_set_fullscale_error:
@@ -153,10 +155,12 @@ int st_sensors_set_enable(struct iio_dev *indio_dev, bool enable)
struct st_sensor_data *sdata = iio_priv(indio_dev);
if (enable) {
- tmp_value = sdata->sensor->pw.value_on;
- if ((sdata->sensor->odr.addr == sdata->sensor->pw.addr) &&
- (sdata->sensor->odr.mask == sdata->sensor->pw.mask)) {
- err = st_sensors_match_odr(sdata->sensor,
+ tmp_value = sdata->sensor_settings->pw.value_on;
+ if ((sdata->sensor_settings->odr.addr ==
+ sdata->sensor_settings->pw.addr) &&
+ (sdata->sensor_settings->odr.mask ==
+ sdata->sensor_settings->pw.mask)) {
+ err = st_sensors_match_odr(sdata->sensor_settings,
sdata->odr, &odr_out);
if (err < 0)
goto set_enable_error;
@@ -164,8 +168,8 @@ int st_sensors_set_enable(struct iio_dev *indio_dev, bool enable)
found = true;
}
err = st_sensors_write_data_with_mask(indio_dev,
- sdata->sensor->pw.addr,
- sdata->sensor->pw.mask, tmp_value);
+ sdata->sensor_settings->pw.addr,
+ sdata->sensor_settings->pw.mask, tmp_value);
if (err < 0)
goto set_enable_error;
@@ -175,9 +179,9 @@ int st_sensors_set_enable(struct iio_dev *indio_dev, bool enable)
sdata->odr = odr_out.hz;
} else {
err = st_sensors_write_data_with_mask(indio_dev,
- sdata->sensor->pw.addr,
- sdata->sensor->pw.mask,
- sdata->sensor->pw.value_off);
+ sdata->sensor_settings->pw.addr,
+ sdata->sensor_settings->pw.mask,
+ sdata->sensor_settings->pw.value_off);
if (err < 0)
goto set_enable_error;
@@ -194,8 +198,9 @@ int st_sensors_set_axis_enable(struct iio_dev *indio_dev, u8 axis_enable)
struct st_sensor_data *sdata = iio_priv(indio_dev);
return st_sensors_write_data_with_mask(indio_dev,
- sdata->sensor->enable_axis.addr,
- sdata->sensor->enable_axis.mask, axis_enable);
+ sdata->sensor_settings->enable_axis.addr,
+ sdata->sensor_settings->enable_axis.mask,
+ axis_enable);
}
EXPORT_SYMBOL(st_sensors_set_axis_enable);
@@ -236,13 +241,13 @@ void st_sensors_power_disable(struct iio_dev *indio_dev)
EXPORT_SYMBOL(st_sensors_power_disable);
static int st_sensors_set_drdy_int_pin(struct iio_dev *indio_dev,
- struct st_sensors_platform_data *pdata)
+ struct st_sensors_platform_data *pdata)
{
struct st_sensor_data *sdata = iio_priv(indio_dev);
switch (pdata->drdy_int_pin) {
case 1:
- if (sdata->sensor->drdy_irq.mask_int1 == 0) {
+ if (sdata->sensor_settings->drdy_irq.mask_int1 == 0) {
dev_err(&indio_dev->dev,
"DRDY on INT1 not available.\n");
return -EINVAL;
@@ -250,7 +255,7 @@ static int st_sensors_set_drdy_int_pin(struct iio_dev *indio_dev,
sdata->drdy_int_pin = 1;
break;
case 2:
- if (sdata->sensor->drdy_irq.mask_int2 == 0) {
+ if (sdata->sensor_settings->drdy_irq.mask_int2 == 0) {
dev_err(&indio_dev->dev,
"DRDY on INT2 not available.\n");
return -EINVAL;
@@ -318,7 +323,7 @@ int st_sensors_init_sensor(struct iio_dev *indio_dev,
if (sdata->current_fullscale) {
err = st_sensors_set_fullscale(indio_dev,
- sdata->current_fullscale->num);
+ sdata->current_fullscale->num);
if (err < 0)
return err;
} else
@@ -330,7 +335,8 @@ int st_sensors_init_sensor(struct iio_dev *indio_dev,
/* set BDU */
err = st_sensors_write_data_with_mask(indio_dev,
- sdata->sensor->bdu.addr, sdata->sensor->bdu.mask, true);
+ sdata->sensor_settings->bdu.addr,
+ sdata->sensor_settings->bdu.mask, true);
if (err < 0)
return err;
@@ -346,26 +352,28 @@ int st_sensors_set_dataready_irq(struct iio_dev *indio_dev, bool enable)
u8 drdy_mask;
struct st_sensor_data *sdata = iio_priv(indio_dev);
- if (!sdata->sensor->drdy_irq.addr)
+ if (!sdata->sensor_settings->drdy_irq.addr)
return 0;
/* Enable/Disable the interrupt generator 1. */
- if (sdata->sensor->drdy_irq.ig1.en_addr > 0) {
+ if (sdata->sensor_settings->drdy_irq.ig1.en_addr > 0) {
err = st_sensors_write_data_with_mask(indio_dev,
- sdata->sensor->drdy_irq.ig1.en_addr,
- sdata->sensor->drdy_irq.ig1.en_mask, (int)enable);
+ sdata->sensor_settings->drdy_irq.ig1.en_addr,
+ sdata->sensor_settings->drdy_irq.ig1.en_mask,
+ (int)enable);
if (err < 0)
goto st_accel_set_dataready_irq_error;
}
if (sdata->drdy_int_pin == 1)
- drdy_mask = sdata->sensor->drdy_irq.mask_int1;
+ drdy_mask = sdata->sensor_settings->drdy_irq.mask_int1;
else
- drdy_mask = sdata->sensor->drdy_irq.mask_int2;
+ drdy_mask = sdata->sensor_settings->drdy_irq.mask_int2;
/* Enable/Disable the interrupt generator for data ready. */
err = st_sensors_write_data_with_mask(indio_dev,
- sdata->sensor->drdy_irq.addr, drdy_mask, (int)enable);
+ sdata->sensor_settings->drdy_irq.addr,
+ drdy_mask, (int)enable);
st_accel_set_dataready_irq_error:
return err;
@@ -378,8 +386,8 @@ int st_sensors_set_fullscale_by_gain(struct iio_dev *indio_dev, int scale)
struct st_sensor_data *sdata = iio_priv(indio_dev);
for (i = 0; i < ST_SENSORS_FULLSCALE_AVL_MAX; i++) {
- if ((sdata->sensor->fs.fs_avl[i].gain == scale) &&
- (sdata->sensor->fs.fs_avl[i].gain != 0)) {
+ if ((sdata->sensor_settings->fs.fs_avl[i].gain == scale) &&
+ (sdata->sensor_settings->fs.fs_avl[i].gain != 0)) {
err = 0;
break;
}
@@ -388,7 +396,7 @@ int st_sensors_set_fullscale_by_gain(struct iio_dev *indio_dev, int scale)
goto st_sensors_match_scale_error;
err = st_sensors_set_fullscale(indio_dev,
- sdata->sensor->fs.fs_avl[i].num);
+ sdata->sensor_settings->fs.fs_avl[i].num);
st_sensors_match_scale_error:
return err;
@@ -439,7 +447,7 @@ int st_sensors_read_info_raw(struct iio_dev *indio_dev,
if (err < 0)
goto out;
- msleep((sdata->sensor->bootime * 1000) / sdata->odr);
+ msleep((sdata->sensor_settings->bootime * 1000) / sdata->odr);
err = st_sensors_read_axis_data(indio_dev, ch, val);
if (err < 0)
goto out;
@@ -456,7 +464,8 @@ out:
EXPORT_SYMBOL(st_sensors_read_info_raw);
int st_sensors_check_device_support(struct iio_dev *indio_dev,
- int num_sensors_list, const struct st_sensors *sensors)
+ int num_sensors_list,
+ const struct st_sensor_settings *sensor_settings)
{
u8 wai;
int i, n, err;
@@ -470,23 +479,24 @@ int st_sensors_check_device_support(struct iio_dev *indio_dev,
}
for (i = 0; i < num_sensors_list; i++) {
- if (sensors[i].wai == wai)
+ if (sensor_settings[i].wai == wai)
break;
}
if (i == num_sensors_list)
goto device_not_supported;
- for (n = 0; n < ARRAY_SIZE(sensors[i].sensors_supported); n++) {
+ for (n = 0; n < ARRAY_SIZE(sensor_settings[i].sensors_supported); n++) {
if (strcmp(indio_dev->name,
- &sensors[i].sensors_supported[n][0]) == 0)
+ &sensor_settings[i].sensors_supported[n][0]) == 0)
break;
}
- if (n == ARRAY_SIZE(sensors[i].sensors_supported)) {
+ if (n == ARRAY_SIZE(sensor_settings[i].sensors_supported)) {
dev_err(&indio_dev->dev, "device name and WhoAmI mismatch.\n");
goto sensor_name_mismatch;
}
- sdata->sensor = (struct st_sensors *)&sensors[i];
+ sdata->sensor_settings =
+ (struct st_sensor_settings *)&sensor_settings[i];
return i;
@@ -508,11 +518,11 @@ ssize_t st_sensors_sysfs_sampling_frequency_avail(struct device *dev,
mutex_lock(&indio_dev->mlock);
for (i = 0; i < ST_SENSORS_ODR_LIST_MAX; i++) {
- if (sdata->sensor->odr.odr_avl[i].hz == 0)
+ if (sdata->sensor_settings->odr.odr_avl[i].hz == 0)
break;
len += scnprintf(buf + len, PAGE_SIZE - len, "%d ",
- sdata->sensor->odr.odr_avl[i].hz);
+ sdata->sensor_settings->odr.odr_avl[i].hz);
}
mutex_unlock(&indio_dev->mlock);
buf[len - 1] = '\n';
@@ -530,11 +540,11 @@ ssize_t st_sensors_sysfs_scale_avail(struct device *dev,
mutex_lock(&indio_dev->mlock);
for (i = 0; i < ST_SENSORS_FULLSCALE_AVL_MAX; i++) {
- if (sdata->sensor->fs.fs_avl[i].num == 0)
+ if (sdata->sensor_settings->fs.fs_avl[i].num == 0)
break;
len += scnprintf(buf + len, PAGE_SIZE - len, "0.%06u ",
- sdata->sensor->fs.fs_avl[i].gain);
+ sdata->sensor_settings->fs.fs_avl[i].gain);
}
mutex_unlock(&indio_dev->mlock);
buf[len - 1] = '\n';
diff --git a/drivers/iio/common/st_sensors/st_sensors_i2c.c b/drivers/iio/common/st_sensors/st_sensors_i2c.c
index bb6f3085f57..98cfee296d4 100644
--- a/drivers/iio/common/st_sensors/st_sensors_i2c.c
+++ b/drivers/iio/common/st_sensors/st_sensors_i2c.c
@@ -72,6 +72,7 @@ void st_sensors_i2c_configure(struct iio_dev *indio_dev,
indio_dev->dev.parent = &client->dev;
indio_dev->name = client->name;
+ sdata->dev = &client->dev;
sdata->tf = &st_sensors_tf_i2c;
sdata->get_irq_data_ready = st_sensors_i2c_get_irq;
}
diff --git a/drivers/iio/common/st_sensors/st_sensors_spi.c b/drivers/iio/common/st_sensors/st_sensors_spi.c
index 251baf6abc2..78a6a1ab3ec 100644
--- a/drivers/iio/common/st_sensors/st_sensors_spi.c
+++ b/drivers/iio/common/st_sensors/st_sensors_spi.c
@@ -111,6 +111,7 @@ void st_sensors_spi_configure(struct iio_dev *indio_dev,
indio_dev->dev.parent = &spi->dev;
indio_dev->name = spi->modalias;
+ sdata->dev = &spi->dev;
sdata->tf = &st_sensors_tf_spi;
sdata->get_irq_data_ready = st_sensors_spi_get_irq;
}
diff --git a/drivers/iio/gyro/st_gyro.h b/drivers/iio/gyro/st_gyro.h
index c197360c450..5353d6328c5 100644
--- a/drivers/iio/gyro/st_gyro.h
+++ b/drivers/iio/gyro/st_gyro.h
@@ -30,8 +30,7 @@ static const struct st_sensors_platform_data gyro_pdata = {
.drdy_int_pin = 2,
};
-int st_gyro_common_probe(struct iio_dev *indio_dev,
- struct st_sensors_platform_data *pdata);
+int st_gyro_common_probe(struct iio_dev *indio_dev);
void st_gyro_common_remove(struct iio_dev *indio_dev);
#ifdef CONFIG_IIO_BUFFER
diff --git a/drivers/iio/gyro/st_gyro_core.c b/drivers/iio/gyro/st_gyro_core.c
index f156fc6c5c6..f07a2336f7d 100644
--- a/drivers/iio/gyro/st_gyro_core.c
+++ b/drivers/iio/gyro/st_gyro_core.c
@@ -103,7 +103,7 @@ static const struct iio_chan_spec st_gyro_16bit_channels[] = {
IIO_CHAN_SOFT_TIMESTAMP(3)
};
-static const struct st_sensors st_gyro_sensors[] = {
+static const struct st_sensor_settings st_gyro_sensors_settings[] = {
{
.wai = ST_GYRO_1_WAI_EXP,
.sensors_supported = {
@@ -309,8 +309,7 @@ static const struct iio_trigger_ops st_gyro_trigger_ops = {
#define ST_GYRO_TRIGGER_OPS NULL
#endif
-int st_gyro_common_probe(struct iio_dev *indio_dev,
- struct st_sensors_platform_data *pdata)
+int st_gyro_common_probe(struct iio_dev *indio_dev)
{
struct st_sensor_data *gdata = iio_priv(indio_dev);
int irq = gdata->get_irq_data_ready(indio_dev);
@@ -322,20 +321,22 @@ int st_gyro_common_probe(struct iio_dev *indio_dev,
st_sensors_power_enable(indio_dev);
err = st_sensors_check_device_support(indio_dev,
- ARRAY_SIZE(st_gyro_sensors), st_gyro_sensors);
+ ARRAY_SIZE(st_gyro_sensors_settings),
+ st_gyro_sensors_settings);
if (err < 0)
return err;
gdata->num_data_channels = ST_GYRO_NUMBER_DATA_CHANNELS;
- gdata->multiread_bit = gdata->sensor->multi_read_bit;
- indio_dev->channels = gdata->sensor->ch;
+ gdata->multiread_bit = gdata->sensor_settings->multi_read_bit;
+ indio_dev->channels = gdata->sensor_settings->ch;
indio_dev->num_channels = ST_SENSORS_NUMBER_ALL_CHANNELS;
gdata->current_fullscale = (struct st_sensor_fullscale_avl *)
- &gdata->sensor->fs.fs_avl[0];
- gdata->odr = gdata->sensor->odr.odr_avl[0].hz;
+ &gdata->sensor_settings->fs.fs_avl[0];
+ gdata->odr = gdata->sensor_settings->odr.odr_avl[0].hz;
- err = st_sensors_init_sensor(indio_dev, pdata);
+ err = st_sensors_init_sensor(indio_dev,
+ (struct st_sensors_platform_data *)&gyro_pdata);
if (err < 0)
return err;
diff --git a/drivers/iio/gyro/st_gyro_i2c.c b/drivers/iio/gyro/st_gyro_i2c.c
index 8fa0ad2ef4e..64480b16c68 100644
--- a/drivers/iio/gyro/st_gyro_i2c.c
+++ b/drivers/iio/gyro/st_gyro_i2c.c
@@ -67,13 +67,11 @@ static int st_gyro_i2c_probe(struct i2c_client *client,
return -ENOMEM;
gdata = iio_priv(indio_dev);
- gdata->dev = &client->dev;
st_sensors_of_i2c_probe(client, st_gyro_of_match);
st_sensors_i2c_configure(indio_dev, client, gdata);
- err = st_gyro_common_probe(indio_dev,
- (struct st_sensors_platform_data *)&gyro_pdata);
+ err = st_gyro_common_probe(indio_dev);
if (err < 0)
return err;
diff --git a/drivers/iio/gyro/st_gyro_spi.c b/drivers/iio/gyro/st_gyro_spi.c
index b4ad3be2668..e59bead6bc3 100644
--- a/drivers/iio/gyro/st_gyro_spi.c
+++ b/drivers/iio/gyro/st_gyro_spi.c
@@ -29,12 +29,10 @@ static int st_gyro_spi_probe(struct spi_device *spi)
return -ENOMEM;
gdata = iio_priv(indio_dev);
- gdata->dev = &spi->dev;
st_sensors_spi_configure(indio_dev, spi, gdata);
- err = st_gyro_common_probe(indio_dev,
- (struct st_sensors_platform_data *)&gyro_pdata);
+ err = st_gyro_common_probe(indio_dev);
if (err < 0)
return err;
diff --git a/drivers/iio/humidity/Kconfig b/drivers/iio/humidity/Kconfig
index e116bd8dd0e..4813b793b9f 100644
--- a/drivers/iio/humidity/Kconfig
+++ b/drivers/iio/humidity/Kconfig
@@ -22,4 +22,14 @@ config SI7005
To compile this driver as a module, choose M here: the module
will be called si7005.
+config SI7020
+ tristate "Si7013/20/21 Relative Humidity and Temperature Sensors"
+ depends on I2C
+ help
+ Say yes here to build support for the Silicon Labs Si7013/20/21
+ Relative Humidity and Temperature Sensors.
+
+ To compile this driver as a module, choose M here: the module
+ will be called si7020.
+
endmenu
diff --git a/drivers/iio/humidity/Makefile b/drivers/iio/humidity/Makefile
index e3f3d942e64..86e2d26e9f4 100644
--- a/drivers/iio/humidity/Makefile
+++ b/drivers/iio/humidity/Makefile
@@ -4,3 +4,4 @@
obj-$(CONFIG_DHT11) += dht11.o
obj-$(CONFIG_SI7005) += si7005.o
+obj-$(CONFIG_SI7020) += si7020.o
diff --git a/drivers/iio/humidity/si7020.c b/drivers/iio/humidity/si7020.c
new file mode 100644
index 00000000000..b54164677b8
--- /dev/null
+++ b/drivers/iio/humidity/si7020.c
@@ -0,0 +1,161 @@
+/*
+ * si7020.c - Silicon Labs Si7013/20/21 Relative Humidity and Temp Sensors
+ * Copyright (c) 2013,2014 Uplogix, Inc.
+ * David Barksdale <dbarksdale@uplogix.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * The Silicon Labs Si7013/20/21 Relative Humidity and Temperature Sensors
+ * are i2c devices which have an identical programming interface for
+ * measuring relative humidity and temperature. The Si7013 has an additional
+ * temperature input which this driver does not support.
+ *
+ * Data Sheets:
+ * Si7013: http://www.silabs.com/Support%20Documents/TechnicalDocs/Si7013.pdf
+ * Si7020: http://www.silabs.com/Support%20Documents/TechnicalDocs/Si7020.pdf
+ * Si7021: http://www.silabs.com/Support%20Documents/TechnicalDocs/Si7021.pdf
+ */
+
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+/* Measure Relative Humidity, Hold Master Mode */
+#define SI7020CMD_RH_HOLD 0xE5
+/* Measure Temperature, Hold Master Mode */
+#define SI7020CMD_TEMP_HOLD 0xE3
+/* Software Reset */
+#define SI7020CMD_RESET 0xFE
+
+static int si7020_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ struct i2c_client *client = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = i2c_smbus_read_word_data(client,
+ chan->type == IIO_TEMP ?
+ SI7020CMD_TEMP_HOLD :
+ SI7020CMD_RH_HOLD);
+ if (ret < 0)
+ return ret;
+ *val = ret >> 2;
+ if (chan->type == IIO_HUMIDITYRELATIVE)
+ *val &= GENMASK(11, 0);
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ if (chan->type == IIO_TEMP)
+ *val = 175720; /* = 175.72 * 1000 */
+ else
+ *val = 125 * 1000;
+ *val2 = 65536 >> 2;
+ return IIO_VAL_FRACTIONAL;
+ case IIO_CHAN_INFO_OFFSET:
+ /*
+ * Since iio_convert_raw_to_processed_unlocked assumes offset
+ * is an integer we have to round these values and lose
+ * accuracy.
+ * Relative humidity will be 0.0032959% too high and
+ * temperature will be 0.00277344 degrees too high.
+ * This is no big deal because it's within the accuracy of the
+ * sensor.
+ */
+ if (chan->type == IIO_TEMP)
+ *val = -4368; /* = -46.85 * (65536 >> 2) / 175.72 */
+ else
+ *val = -786; /* = -6 * (65536 >> 2) / 125 */
+ return IIO_VAL_INT;
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static const struct iio_chan_spec si7020_channels[] = {
+ {
+ .type = IIO_HUMIDITYRELATIVE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_OFFSET),
+ },
+ {
+ .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_OFFSET),
+ }
+};
+
+static const struct iio_info si7020_info = {
+ .read_raw = si7020_read_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static int si7020_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct iio_dev *indio_dev;
+ struct i2c_client **data;
+ int ret;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_WRITE_BYTE |
+ I2C_FUNC_SMBUS_READ_WORD_DATA))
+ return -ENODEV;
+
+ /* Reset device, loads default settings. */
+ ret = i2c_smbus_write_byte(client, SI7020CMD_RESET);
+ if (ret < 0)
+ return ret;
+ /* Wait the maximum power-up time after software reset. */
+ msleep(15);
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*client));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ *data = client;
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->name = dev_name(&client->dev);
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &si7020_info;
+ indio_dev->channels = si7020_channels;
+ indio_dev->num_channels = ARRAY_SIZE(si7020_channels);
+
+ return devm_iio_device_register(&client->dev, indio_dev);
+}
+
+static const struct i2c_device_id si7020_id[] = {
+ { "si7020", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, si7020_id);
+
+static struct i2c_driver si7020_driver = {
+ .driver.name = "si7020",
+ .probe = si7020_probe,
+ .id_table = si7020_id,
+};
+
+module_i2c_driver(si7020_driver);
+MODULE_DESCRIPTION("Silicon Labs Si7013/20/21 Relative Humidity and Temperature Sensors");
+MODULE_AUTHOR("David Barksdale <dbarksdale@uplogix.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
index f0846108d00..866fe904cba 100644
--- a/drivers/iio/inkern.c
+++ b/drivers/iio/inkern.c
@@ -100,6 +100,28 @@ static int iio_dev_node_match(struct device *dev, void *data)
return dev->of_node == data && dev->type == &iio_device_type;
}
+/**
+ * __of_iio_simple_xlate - translate iiospec to the IIO channel index
+ * @indio_dev: pointer to the iio_dev structure
+ * @iiospec: IIO specifier as found in the device tree
+ *
+ * This is simple translation function, suitable for the most 1:1 mapped
+ * channels in IIO chips. This function performs only one sanity check:
+ * whether IIO index is less than num_channels (that is specified in the
+ * iio_dev).
+ */
+static int __of_iio_simple_xlate(struct iio_dev *indio_dev,
+ const struct of_phandle_args *iiospec)
+{
+ if (!iiospec->args_count)
+ return 0;
+
+ if (iiospec->args[0] >= indio_dev->num_channels)
+ return -EINVAL;
+
+ return iiospec->args[0];
+}
+
static int __of_iio_channel_get(struct iio_channel *channel,
struct device_node *np, int index)
{
@@ -122,18 +144,19 @@ static int __of_iio_channel_get(struct iio_channel *channel,
indio_dev = dev_to_iio_dev(idev);
channel->indio_dev = indio_dev;
- index = iiospec.args_count ? iiospec.args[0] : 0;
- if (index >= indio_dev->num_channels) {
- err = -EINVAL;
+ if (indio_dev->info->of_xlate)
+ index = indio_dev->info->of_xlate(indio_dev, &iiospec);
+ else
+ index = __of_iio_simple_xlate(indio_dev, &iiospec);
+ if (index < 0)
goto err_put;
- }
channel->channel = &indio_dev->channels[index];
return 0;
err_put:
iio_device_put(indio_dev);
- return err;
+ return index;
}
static struct iio_channel *of_iio_channel_get(struct device_node *np, int index)
diff --git a/drivers/iio/magnetometer/st_magn.h b/drivers/iio/magnetometer/st_magn.h
index 694e33e0fb7..7e81d00ef0c 100644
--- a/drivers/iio/magnetometer/st_magn.h
+++ b/drivers/iio/magnetometer/st_magn.h
@@ -18,8 +18,7 @@
#define LSM303DLM_MAGN_DEV_NAME "lsm303dlm_magn"
#define LIS3MDL_MAGN_DEV_NAME "lis3mdl"
-int st_magn_common_probe(struct iio_dev *indio_dev,
- struct st_sensors_platform_data *pdata);
+int st_magn_common_probe(struct iio_dev *indio_dev);
void st_magn_common_remove(struct iio_dev *indio_dev);
#ifdef CONFIG_IIO_BUFFER
diff --git a/drivers/iio/magnetometer/st_magn_core.c b/drivers/iio/magnetometer/st_magn_core.c
index 68cae86dbd2..8ade473f99f 100644
--- a/drivers/iio/magnetometer/st_magn_core.c
+++ b/drivers/iio/magnetometer/st_magn_core.c
@@ -149,7 +149,7 @@ static const struct iio_chan_spec st_magn_2_16bit_channels[] = {
IIO_CHAN_SOFT_TIMESTAMP(3)
};
-static const struct st_sensors st_magn_sensors[] = {
+static const struct st_sensor_settings st_magn_sensors_settings[] = {
{
.wai = ST_MAGN_1_WAI_EXP,
.sensors_supported = {
@@ -361,8 +361,7 @@ static const struct iio_info magn_info = {
.write_raw = &st_magn_write_raw,
};
-int st_magn_common_probe(struct iio_dev *indio_dev,
- struct st_sensors_platform_data *pdata)
+int st_magn_common_probe(struct iio_dev *indio_dev)
{
struct st_sensor_data *mdata = iio_priv(indio_dev);
int irq = mdata->get_irq_data_ready(indio_dev);
@@ -374,20 +373,21 @@ int st_magn_common_probe(struct iio_dev *indio_dev,
st_sensors_power_enable(indio_dev);
err = st_sensors_check_device_support(indio_dev,
- ARRAY_SIZE(st_magn_sensors), st_magn_sensors);
+ ARRAY_SIZE(st_magn_sensors_settings),
+ st_magn_sensors_settings);
if (err < 0)
return err;
mdata->num_data_channels = ST_MAGN_NUMBER_DATA_CHANNELS;
- mdata->multiread_bit = mdata->sensor->multi_read_bit;
- indio_dev->channels = mdata->sensor->ch;
+ mdata->multiread_bit = mdata->sensor_settings->multi_read_bit;
+ indio_dev->channels = mdata->sensor_settings->ch;
indio_dev->num_channels = ST_SENSORS_NUMBER_ALL_CHANNELS;
mdata->current_fullscale = (struct st_sensor_fullscale_avl *)
- &mdata->sensor->fs.fs_avl[0];
- mdata->odr = mdata->sensor->odr.odr_avl[0].hz;
+ &mdata->sensor_settings->fs.fs_avl[0];
+ mdata->odr = mdata->sensor_settings->odr.odr_avl[0].hz;
- err = st_sensors_init_sensor(indio_dev, pdata);
+ err = st_sensors_init_sensor(indio_dev, NULL);
if (err < 0)
return err;
diff --git a/drivers/iio/magnetometer/st_magn_i2c.c b/drivers/iio/magnetometer/st_magn_i2c.c
index 68925005844..92e5c15452a 100644
--- a/drivers/iio/magnetometer/st_magn_i2c.c
+++ b/drivers/iio/magnetometer/st_magn_i2c.c
@@ -51,12 +51,11 @@ static int st_magn_i2c_probe(struct i2c_client *client,
return -ENOMEM;
mdata = iio_priv(indio_dev);
- mdata->dev = &client->dev;
st_sensors_of_i2c_probe(client, st_magn_of_match);
st_sensors_i2c_configure(indio_dev, client, mdata);
- err = st_magn_common_probe(indio_dev, NULL);
+ err = st_magn_common_probe(indio_dev);
if (err < 0)
return err;
diff --git a/drivers/iio/magnetometer/st_magn_spi.c b/drivers/iio/magnetometer/st_magn_spi.c
index a6143ea51df..7adacf16014 100644
--- a/drivers/iio/magnetometer/st_magn_spi.c
+++ b/drivers/iio/magnetometer/st_magn_spi.c
@@ -29,11 +29,10 @@ static int st_magn_spi_probe(struct spi_device *spi)
return -ENOMEM;
mdata = iio_priv(indio_dev);
- mdata->dev = &spi->dev;
st_sensors_spi_configure(indio_dev, spi, mdata);
- err = st_magn_common_probe(indio_dev, NULL);
+ err = st_magn_common_probe(indio_dev);
if (err < 0)
return err;
diff --git a/drivers/iio/pressure/Kconfig b/drivers/iio/pressure/Kconfig
index 15afbc91952..a3be5379207 100644
--- a/drivers/iio/pressure/Kconfig
+++ b/drivers/iio/pressure/Kconfig
@@ -5,6 +5,17 @@
menu "Pressure sensors"
+config BMP280
+ tristate "Bosch Sensortec BMP280 pressure sensor driver"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ Say yes here to build support for Bosch Sensortec BMP280
+ pressure and temperature sensor.
+
+ To compile this driver as a module, choose M here: the module
+ will be called bmp280.
+
config HID_SENSOR_PRESS
depends on HID_SENSOR_HUB
select IIO_BUFFER
diff --git a/drivers/iio/pressure/Makefile b/drivers/iio/pressure/Makefile
index 90a37e85cf2..88011f2ae00 100644
--- a/drivers/iio/pressure/Makefile
+++ b/drivers/iio/pressure/Makefile
@@ -3,6 +3,7 @@
#
# When adding new entries keep the list in alphabetical order
+obj-$(CONFIG_BMP280) += bmp280.o
obj-$(CONFIG_HID_SENSOR_PRESS) += hid-sensor-press.o
obj-$(CONFIG_MPL115) += mpl115.o
obj-$(CONFIG_MPL3115) += mpl3115.o
diff --git a/drivers/iio/pressure/bmp280.c b/drivers/iio/pressure/bmp280.c
new file mode 100644
index 00000000000..75038dacfff
--- /dev/null
+++ b/drivers/iio/pressure/bmp280.c
@@ -0,0 +1,455 @@
+/*
+ * Copyright (c) 2014 Intel Corporation
+ *
+ * Driver for Bosch Sensortec BMP280 digital pressure sensor.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#define pr_fmt(fmt) "bmp280: " fmt
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/acpi.h>
+#include <linux/regmap.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+#define BMP280_REG_TEMP_XLSB 0xFC
+#define BMP280_REG_TEMP_LSB 0xFB
+#define BMP280_REG_TEMP_MSB 0xFA
+#define BMP280_REG_PRESS_XLSB 0xF9
+#define BMP280_REG_PRESS_LSB 0xF8
+#define BMP280_REG_PRESS_MSB 0xF7
+
+#define BMP280_REG_CONFIG 0xF5
+#define BMP280_REG_CTRL_MEAS 0xF4
+#define BMP280_REG_STATUS 0xF3
+#define BMP280_REG_RESET 0xE0
+#define BMP280_REG_ID 0xD0
+
+#define BMP280_REG_COMP_TEMP_START 0x88
+#define BMP280_COMP_TEMP_REG_COUNT 6
+
+#define BMP280_REG_COMP_PRESS_START 0x8E
+#define BMP280_COMP_PRESS_REG_COUNT 18
+
+#define BMP280_FILTER_MASK (BIT(4) | BIT(3) | BIT(2))
+#define BMP280_FILTER_OFF 0
+#define BMP280_FILTER_2X BIT(2)
+#define BMP280_FILTER_4X BIT(3)
+#define BMP280_FILTER_8X (BIT(3) | BIT(2))
+#define BMP280_FILTER_16X BIT(4)
+
+#define BMP280_OSRS_TEMP_MASK (BIT(7) | BIT(6) | BIT(5))
+#define BMP280_OSRS_TEMP_SKIP 0
+#define BMP280_OSRS_TEMP_1X BIT(5)
+#define BMP280_OSRS_TEMP_2X BIT(6)
+#define BMP280_OSRS_TEMP_4X (BIT(6) | BIT(5))
+#define BMP280_OSRS_TEMP_8X BIT(7)
+#define BMP280_OSRS_TEMP_16X (BIT(7) | BIT(5))
+
+#define BMP280_OSRS_PRESS_MASK (BIT(4) | BIT(3) | BIT(2))
+#define BMP280_OSRS_PRESS_SKIP 0
+#define BMP280_OSRS_PRESS_1X BIT(2)
+#define BMP280_OSRS_PRESS_2X BIT(3)
+#define BMP280_OSRS_PRESS_4X (BIT(3) | BIT(2))
+#define BMP280_OSRS_PRESS_8X BIT(4)
+#define BMP280_OSRS_PRESS_16X (BIT(4) | BIT(2))
+
+#define BMP280_MODE_MASK (BIT(1) | BIT(0))
+#define BMP280_MODE_SLEEP 0
+#define BMP280_MODE_FORCED BIT(0)
+#define BMP280_MODE_NORMAL (BIT(1) | BIT(0))
+
+#define BMP280_CHIP_ID 0x58
+#define BMP280_SOFT_RESET_VAL 0xB6
+
+struct bmp280_data {
+ struct i2c_client *client;
+ struct mutex lock;
+ struct regmap *regmap;
+
+ /*
+ * Carryover value from temperature conversion, used in pressure
+ * calculation.
+ */
+ s32 t_fine;
+};
+
+/* Compensation parameters. */
+struct bmp280_comp_temp {
+ u16 dig_t1;
+ s16 dig_t2, dig_t3;
+};
+
+struct bmp280_comp_press {
+ u16 dig_p1;
+ s16 dig_p2, dig_p3, dig_p4, dig_p5, dig_p6, dig_p7, dig_p8, dig_p9;
+};
+
+static const struct iio_chan_spec bmp280_channels[] = {
+ {
+ .type = IIO_PRESSURE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ },
+ {
+ .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ },
+};
+
+static bool bmp280_is_writeable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case BMP280_REG_CONFIG:
+ case BMP280_REG_CTRL_MEAS:
+ case BMP280_REG_RESET:
+ return true;
+ default:
+ return false;
+ };
+}
+
+static bool bmp280_is_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case BMP280_REG_TEMP_XLSB:
+ case BMP280_REG_TEMP_LSB:
+ case BMP280_REG_TEMP_MSB:
+ case BMP280_REG_PRESS_XLSB:
+ case BMP280_REG_PRESS_LSB:
+ case BMP280_REG_PRESS_MSB:
+ case BMP280_REG_STATUS:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static const struct regmap_config bmp280_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = BMP280_REG_TEMP_XLSB,
+ .cache_type = REGCACHE_RBTREE,
+
+ .writeable_reg = bmp280_is_writeable_reg,
+ .volatile_reg = bmp280_is_volatile_reg,
+};
+
+static int bmp280_read_compensation_temp(struct bmp280_data *data,
+ struct bmp280_comp_temp *comp)
+{
+ int ret;
+ __le16 buf[BMP280_COMP_TEMP_REG_COUNT / 2];
+
+ ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_TEMP_START,
+ buf, BMP280_COMP_TEMP_REG_COUNT);
+ if (ret < 0) {
+ dev_err(&data->client->dev,
+ "failed to read temperature calibration parameters\n");
+ return ret;
+ }
+
+ comp->dig_t1 = (u16) le16_to_cpu(buf[0]);
+ comp->dig_t2 = (s16) le16_to_cpu(buf[1]);
+ comp->dig_t3 = (s16) le16_to_cpu(buf[2]);
+
+ return 0;
+}
+
+static int bmp280_read_compensation_press(struct bmp280_data *data,
+ struct bmp280_comp_press *comp)
+{
+ int ret;
+ __le16 buf[BMP280_COMP_PRESS_REG_COUNT / 2];
+
+ ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_PRESS_START,
+ buf, BMP280_COMP_PRESS_REG_COUNT);
+ if (ret < 0) {
+ dev_err(&data->client->dev,
+ "failed to read pressure calibration parameters\n");
+ return ret;
+ }
+
+ comp->dig_p1 = (u16) le16_to_cpu(buf[0]);
+ comp->dig_p2 = (s16) le16_to_cpu(buf[1]);
+ comp->dig_p3 = (s16) le16_to_cpu(buf[2]);
+ comp->dig_p4 = (s16) le16_to_cpu(buf[3]);
+ comp->dig_p5 = (s16) le16_to_cpu(buf[4]);
+ comp->dig_p6 = (s16) le16_to_cpu(buf[5]);
+ comp->dig_p7 = (s16) le16_to_cpu(buf[6]);
+ comp->dig_p8 = (s16) le16_to_cpu(buf[7]);
+ comp->dig_p9 = (s16) le16_to_cpu(buf[8]);
+
+ return 0;
+}
+
+/*
+ * Returns temperature in DegC, resolution is 0.01 DegC. Output value of
+ * "5123" equals 51.23 DegC. t_fine carries fine temperature as global
+ * value.
+ *
+ * Taken from datasheet, Section 3.11.3, "Compensation formula".
+ */
+static s32 bmp280_compensate_temp(struct bmp280_data *data,
+ struct bmp280_comp_temp *comp,
+ s32 adc_temp)
+{
+ s32 var1, var2, t;
+
+ var1 = (((adc_temp >> 3) - ((s32) comp->dig_t1 << 1)) *
+ ((s32) comp->dig_t2)) >> 11;
+ var2 = (((((adc_temp >> 4) - ((s32) comp->dig_t1)) *
+ ((adc_temp >> 4) - ((s32) comp->dig_t1))) >> 12) *
+ ((s32) comp->dig_t3)) >> 14;
+
+ data->t_fine = var1 + var2;
+ t = (data->t_fine * 5 + 128) >> 8;
+
+ return t;
+}
+
+/*
+ * Returns pressure in Pa as unsigned 32 bit integer in Q24.8 format (24
+ * integer bits and 8 fractional bits). Output value of "24674867"
+ * represents 24674867/256 = 96386.2 Pa = 963.862 hPa
+ *
+ * Taken from datasheet, Section 3.11.3, "Compensation formula".
+ */
+static u32 bmp280_compensate_press(struct bmp280_data *data,
+ struct bmp280_comp_press *comp,
+ s32 adc_press)
+{
+ s64 var1, var2, p;
+
+ var1 = ((s64) data->t_fine) - 128000;
+ var2 = var1 * var1 * (s64) comp->dig_p6;
+ var2 = var2 + ((var1 * (s64) comp->dig_p5) << 17);
+ var2 = var2 + (((s64) comp->dig_p4) << 35);
+ var1 = ((var1 * var1 * (s64) comp->dig_p3) >> 8) +
+ ((var1 * (s64) comp->dig_p2) << 12);
+ var1 = (((((s64) 1) << 47) + var1)) * ((s64) comp->dig_p1) >> 33;
+
+ if (var1 == 0)
+ return 0;
+
+ p = ((((s64) 1048576 - adc_press) << 31) - var2) * 3125;
+ p = div64_s64(p, var1);
+ var1 = (((s64) comp->dig_p9) * (p >> 13) * (p >> 13)) >> 25;
+ var2 = (((s64) comp->dig_p8) * p) >> 19;
+ p = ((p + var1 + var2) >> 8) + (((s64) comp->dig_p7) << 4);
+
+ return (u32) p;
+}
+
+static int bmp280_read_temp(struct bmp280_data *data,
+ int *val)
+{
+ int ret;
+ __be32 tmp = 0;
+ s32 adc_temp, comp_temp;
+ struct bmp280_comp_temp comp;
+
+ ret = bmp280_read_compensation_temp(data, &comp);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_bulk_read(data->regmap, BMP280_REG_TEMP_MSB,
+ (u8 *) &tmp, 3);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "failed to read temperature\n");
+ return ret;
+ }
+
+ adc_temp = be32_to_cpu(tmp) >> 12;
+ comp_temp = bmp280_compensate_temp(data, &comp, adc_temp);
+
+ /*
+ * val might be NULL if we're called by the read_press routine,
+ * who only cares about the carry over t_fine value.
+ */
+ if (val) {
+ *val = comp_temp * 10;
+ return IIO_VAL_INT;
+ }
+
+ return 0;
+}
+
+static int bmp280_read_press(struct bmp280_data *data,
+ int *val, int *val2)
+{
+ int ret;
+ __be32 tmp = 0;
+ s32 adc_press;
+ u32 comp_press;
+ struct bmp280_comp_press comp;
+
+ ret = bmp280_read_compensation_press(data, &comp);
+ if (ret < 0)
+ return ret;
+
+ /* Read and compensate temperature so we get a reading of t_fine. */
+ ret = bmp280_read_temp(data, NULL);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_bulk_read(data->regmap, BMP280_REG_PRESS_MSB,
+ (u8 *) &tmp, 3);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "failed to read pressure\n");
+ return ret;
+ }
+
+ adc_press = be32_to_cpu(tmp) >> 12;
+ comp_press = bmp280_compensate_press(data, &comp, adc_press);
+
+ *val = comp_press;
+ *val2 = 256000;
+
+ return IIO_VAL_FRACTIONAL;
+}
+
+static int bmp280_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ int ret;
+ struct bmp280_data *data = iio_priv(indio_dev);
+
+ mutex_lock(&data->lock);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_PROCESSED:
+ switch (chan->type) {
+ case IIO_PRESSURE:
+ ret = bmp280_read_press(data, val, val2);
+ break;
+ case IIO_TEMP:
+ ret = bmp280_read_temp(data, val);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ mutex_unlock(&data->lock);
+
+ return ret;
+}
+
+static const struct iio_info bmp280_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = &bmp280_read_raw,
+};
+
+static int bmp280_chip_init(struct bmp280_data *data)
+{
+ int ret;
+
+ ret = regmap_update_bits(data->regmap, BMP280_REG_CTRL_MEAS,
+ BMP280_OSRS_TEMP_MASK |
+ BMP280_OSRS_PRESS_MASK |
+ BMP280_MODE_MASK,
+ BMP280_OSRS_TEMP_2X |
+ BMP280_OSRS_PRESS_16X |
+ BMP280_MODE_NORMAL);
+ if (ret < 0) {
+ dev_err(&data->client->dev,
+ "failed to write config register\n");
+ return ret;
+ }
+
+ ret = regmap_update_bits(data->regmap, BMP280_REG_CONFIG,
+ BMP280_FILTER_MASK,
+ BMP280_FILTER_4X);
+ if (ret < 0) {
+ dev_err(&data->client->dev,
+ "failed to write config register\n");
+ return ret;
+ }
+
+ return ret;
+}
+
+static int bmp280_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret;
+ struct iio_dev *indio_dev;
+ struct bmp280_data *data;
+ unsigned int chip_id;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, indio_dev);
+ data = iio_priv(indio_dev);
+ mutex_init(&data->lock);
+ data->client = client;
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->name = id->name;
+ indio_dev->channels = bmp280_channels;
+ indio_dev->num_channels = ARRAY_SIZE(bmp280_channels);
+ indio_dev->info = &bmp280_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ data->regmap = devm_regmap_init_i2c(client, &bmp280_regmap_config);
+ if (IS_ERR(data->regmap)) {
+ dev_err(&client->dev, "failed to allocate register map\n");
+ return PTR_ERR(data->regmap);
+ }
+
+ ret = regmap_read(data->regmap, BMP280_REG_ID, &chip_id);
+ if (ret < 0)
+ return ret;
+ if (chip_id != BMP280_CHIP_ID) {
+ dev_err(&client->dev, "bad chip id. expected %x got %x\n",
+ BMP280_CHIP_ID, chip_id);
+ return -EINVAL;
+ }
+
+ ret = bmp280_chip_init(data);
+ if (ret < 0)
+ return ret;
+
+ return devm_iio_device_register(&client->dev, indio_dev);
+}
+
+static const struct acpi_device_id bmp280_acpi_match[] = {
+ {"BMP0280", 0},
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, bmp280_acpi_match);
+
+static const struct i2c_device_id bmp280_id[] = {
+ {"bmp280", 0},
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, bmp280_id);
+
+static struct i2c_driver bmp280_driver = {
+ .driver = {
+ .name = "bmp280",
+ .acpi_match_table = ACPI_PTR(bmp280_acpi_match),
+ },
+ .probe = bmp280_probe,
+ .id_table = bmp280_id,
+};
+module_i2c_driver(bmp280_driver);
+
+MODULE_AUTHOR("Vlad Dogaru <vlad.dogaru@intel.com>");
+MODULE_DESCRIPTION("Driver for Bosch Sensortec BMP280 pressure and temperature sensor");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/pressure/st_pressure.h b/drivers/iio/pressure/st_pressure.h
index 242943c0c4e..f5f41490060 100644
--- a/drivers/iio/pressure/st_pressure.h
+++ b/drivers/iio/pressure/st_pressure.h
@@ -26,8 +26,7 @@ static const struct st_sensors_platform_data default_press_pdata = {
.drdy_int_pin = 1,
};
-int st_press_common_probe(struct iio_dev *indio_dev,
- struct st_sensors_platform_data *pdata);
+int st_press_common_probe(struct iio_dev *indio_dev);
void st_press_common_remove(struct iio_dev *indio_dev);
#ifdef CONFIG_IIO_BUFFER
diff --git a/drivers/iio/pressure/st_pressure_buffer.c b/drivers/iio/pressure/st_pressure_buffer.c
index b37b1c9ac93..2ff53f22235 100644
--- a/drivers/iio/pressure/st_pressure_buffer.c
+++ b/drivers/iio/pressure/st_pressure_buffer.c
@@ -38,10 +38,10 @@ static int st_press_buffer_preenable(struct iio_dev *indio_dev)
static int st_press_buffer_postenable(struct iio_dev *indio_dev)
{
int err;
- struct st_sensor_data *pdata = iio_priv(indio_dev);
+ struct st_sensor_data *press_data = iio_priv(indio_dev);
- pdata->buffer_data = kmalloc(indio_dev->scan_bytes, GFP_KERNEL);
- if (pdata->buffer_data == NULL) {
+ press_data->buffer_data = kmalloc(indio_dev->scan_bytes, GFP_KERNEL);
+ if (press_data->buffer_data == NULL) {
err = -ENOMEM;
goto allocate_memory_error;
}
@@ -53,7 +53,7 @@ static int st_press_buffer_postenable(struct iio_dev *indio_dev)
return err;
st_press_buffer_postenable_error:
- kfree(pdata->buffer_data);
+ kfree(press_data->buffer_data);
allocate_memory_error:
return err;
}
@@ -61,7 +61,7 @@ allocate_memory_error:
static int st_press_buffer_predisable(struct iio_dev *indio_dev)
{
int err;
- struct st_sensor_data *pdata = iio_priv(indio_dev);
+ struct st_sensor_data *press_data = iio_priv(indio_dev);
err = iio_triggered_buffer_predisable(indio_dev);
if (err < 0)
@@ -70,7 +70,7 @@ static int st_press_buffer_predisable(struct iio_dev *indio_dev)
err = st_sensors_set_enable(indio_dev, false);
st_press_buffer_predisable_error:
- kfree(pdata->buffer_data);
+ kfree(press_data->buffer_data);
return err;
}
diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c
index 473d914ef47..97baf40d424 100644
--- a/drivers/iio/pressure/st_pressure_core.c
+++ b/drivers/iio/pressure/st_pressure_core.c
@@ -175,7 +175,7 @@ static const struct iio_chan_spec st_press_lps001wp_channels[] = {
IIO_CHAN_SOFT_TIMESTAMP(1)
};
-static const struct st_sensors st_press_sensors[] = {
+static const struct st_sensor_settings st_press_sensors_settings[] = {
{
.wai = ST_PRESS_LPS331AP_WAI_EXP,
.sensors_supported = {
@@ -333,7 +333,7 @@ static int st_press_read_raw(struct iio_dev *indio_dev,
int *val2, long mask)
{
int err;
- struct st_sensor_data *pdata = iio_priv(indio_dev);
+ struct st_sensor_data *press_data = iio_priv(indio_dev);
switch (mask) {
case IIO_CHAN_INFO_RAW:
@@ -347,10 +347,10 @@ static int st_press_read_raw(struct iio_dev *indio_dev,
switch (ch->type) {
case IIO_PRESSURE:
- *val2 = pdata->current_fullscale->gain;
+ *val2 = press_data->current_fullscale->gain;
break;
case IIO_TEMP:
- *val2 = pdata->current_fullscale->gain2;
+ *val2 = press_data->current_fullscale->gain2;
break;
default:
err = -EINVAL;
@@ -371,7 +371,7 @@ static int st_press_read_raw(struct iio_dev *indio_dev,
return IIO_VAL_FRACTIONAL;
case IIO_CHAN_INFO_SAMP_FREQ:
- *val = pdata->odr;
+ *val = press_data->odr;
return IIO_VAL_INT;
default:
return -EINVAL;
@@ -409,11 +409,10 @@ static const struct iio_trigger_ops st_press_trigger_ops = {
#define ST_PRESS_TRIGGER_OPS NULL
#endif
-int st_press_common_probe(struct iio_dev *indio_dev,
- struct st_sensors_platform_data *plat_data)
+int st_press_common_probe(struct iio_dev *indio_dev)
{
- struct st_sensor_data *pdata = iio_priv(indio_dev);
- int irq = pdata->get_irq_data_ready(indio_dev);
+ struct st_sensor_data *press_data = iio_priv(indio_dev);
+ int irq = press_data->get_irq_data_ready(indio_dev);
int err;
indio_dev->modes = INDIO_DIRECT_MODE;
@@ -422,28 +421,30 @@ int st_press_common_probe(struct iio_dev *indio_dev,
st_sensors_power_enable(indio_dev);
err = st_sensors_check_device_support(indio_dev,
- ARRAY_SIZE(st_press_sensors),
- st_press_sensors);
+ ARRAY_SIZE(st_press_sensors_settings),
+ st_press_sensors_settings);
if (err < 0)
return err;
- pdata->num_data_channels = ST_PRESS_NUMBER_DATA_CHANNELS;
- pdata->multiread_bit = pdata->sensor->multi_read_bit;
- indio_dev->channels = pdata->sensor->ch;
- indio_dev->num_channels = pdata->sensor->num_ch;
+ press_data->num_data_channels = ST_PRESS_NUMBER_DATA_CHANNELS;
+ press_data->multiread_bit = press_data->sensor_settings->multi_read_bit;
+ indio_dev->channels = press_data->sensor_settings->ch;
+ indio_dev->num_channels = press_data->sensor_settings->num_ch;
- if (pdata->sensor->fs.addr != 0)
- pdata->current_fullscale = (struct st_sensor_fullscale_avl *)
- &pdata->sensor->fs.fs_avl[0];
+ if (press_data->sensor_settings->fs.addr != 0)
+ press_data->current_fullscale =
+ (struct st_sensor_fullscale_avl *)
+ &press_data->sensor_settings->fs.fs_avl[0];
- pdata->odr = pdata->sensor->odr.odr_avl[0].hz;
+ press_data->odr = press_data->sensor_settings->odr.odr_avl[0].hz;
/* Some devices don't support a data ready pin. */
- if (!plat_data && pdata->sensor->drdy_irq.addr)
- plat_data =
+ if (!press_data->dev->platform_data &&
+ press_data->sensor_settings->drdy_irq.addr)
+ press_data->dev->platform_data =
(struct st_sensors_platform_data *)&default_press_pdata;
- err = st_sensors_init_sensor(indio_dev, plat_data);
+ err = st_sensors_init_sensor(indio_dev, press_data->dev->platform_data);
if (err < 0)
return err;
@@ -479,12 +480,12 @@ EXPORT_SYMBOL(st_press_common_probe);
void st_press_common_remove(struct iio_dev *indio_dev)
{
- struct st_sensor_data *pdata = iio_priv(indio_dev);
+ struct st_sensor_data *press_data = iio_priv(indio_dev);
st_sensors_power_disable(indio_dev);
iio_device_unregister(indio_dev);
- if (pdata->get_irq_data_ready(indio_dev) > 0)
+ if (press_data->get_irq_data_ready(indio_dev) > 0)
st_sensors_deallocate_trigger(indio_dev);
st_press_deallocate_ring(indio_dev);
diff --git a/drivers/iio/pressure/st_pressure_i2c.c b/drivers/iio/pressure/st_pressure_i2c.c
index acaf165260b..137788bba4a 100644
--- a/drivers/iio/pressure/st_pressure_i2c.c
+++ b/drivers/iio/pressure/st_pressure_i2c.c
@@ -43,20 +43,19 @@ static int st_press_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct iio_dev *indio_dev;
- struct st_sensor_data *pdata;
+ struct st_sensor_data *press_data;
int err;
- indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*pdata));
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*press_data));
if (!indio_dev)
return -ENOMEM;
- pdata = iio_priv(indio_dev);
- pdata->dev = &client->dev;
+ press_data = iio_priv(indio_dev);
st_sensors_of_i2c_probe(client, st_press_of_match);
- st_sensors_i2c_configure(indio_dev, client, pdata);
+ st_sensors_i2c_configure(indio_dev, client, press_data);
- err = st_press_common_probe(indio_dev, client->dev.platform_data);
+ err = st_press_common_probe(indio_dev);
if (err < 0)
return err;
diff --git a/drivers/iio/pressure/st_pressure_spi.c b/drivers/iio/pressure/st_pressure_spi.c
index f45d430ec52..1ffa6d4d349 100644
--- a/drivers/iio/pressure/st_pressure_spi.c
+++ b/drivers/iio/pressure/st_pressure_spi.c
@@ -21,19 +21,18 @@
static int st_press_spi_probe(struct spi_device *spi)
{
struct iio_dev *indio_dev;
- struct st_sensor_data *pdata;
+ struct st_sensor_data *press_data;
int err;
- indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*pdata));
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*press_data));
if (indio_dev == NULL)
return -ENOMEM;
- pdata = iio_priv(indio_dev);
- pdata->dev = &spi->dev;
+ press_data = iio_priv(indio_dev);
- st_sensors_spi_configure(indio_dev, spi, pdata);
+ st_sensors_spi_configure(indio_dev, spi, press_data);
- err = st_press_common_probe(indio_dev, spi->dev.platform_data);
+ err = st_press_common_probe(indio_dev);
if (err < 0)
return err;
diff --git a/drivers/iio/proximity/as3935.c b/drivers/iio/proximity/as3935.c
index 8349cc0fdf6..466aa431466 100644
--- a/drivers/iio/proximity/as3935.c
+++ b/drivers/iio/proximity/as3935.c
@@ -95,7 +95,7 @@ static int as3935_read(struct as3935_state *st, unsigned int reg, int *val)
*val = ret;
return 0;
-};
+}
static int as3935_write(struct as3935_state *st,
unsigned int reg,
@@ -107,7 +107,7 @@ static int as3935_write(struct as3935_state *st,
buf[1] = val;
return spi_write(st->spi, buf, 2);
-};
+}
static ssize_t as3935_sensor_sensitivity_show(struct device *dev,
struct device_attribute *attr,
@@ -122,7 +122,7 @@ static ssize_t as3935_sensor_sensitivity_show(struct device *dev,
val = (val & AS3935_AFE_MASK) >> 1;
return sprintf(buf, "%d\n", val);
-};
+}
static ssize_t as3935_sensor_sensitivity_store(struct device *dev,
struct device_attribute *attr,
@@ -142,7 +142,7 @@ static ssize_t as3935_sensor_sensitivity_store(struct device *dev,
as3935_write(st, AS3935_AFE_GAIN, val << 1);
return len;
-};
+}
static IIO_DEVICE_ATTR(sensor_sensitivity, S_IRUGO | S_IWUSR,
as3935_sensor_sensitivity_show, as3935_sensor_sensitivity_store, 0);
@@ -214,7 +214,7 @@ err_read:
iio_trigger_notify_done(indio_dev->trig);
return IRQ_HANDLED;
-};
+}
static const struct iio_trigger_ops iio_interrupt_trigger_ops = {
.owner = THIS_MODULE,
@@ -238,7 +238,7 @@ static void as3935_event_work(struct work_struct *work)
dev_warn(&st->spi->dev, "noise level is too high");
break;
}
-};
+}
static irqreturn_t as3935_interrupt_handler(int irq, void *private)
{
@@ -417,7 +417,7 @@ unregister_trigger:
iio_trigger_unregister(st->trig);
return ret;
-};
+}
static int as3935_remove(struct spi_device *spi)
{
@@ -429,7 +429,7 @@ static int as3935_remove(struct spi_device *spi)
iio_trigger_unregister(st->trig);
return 0;
-};
+}
static const struct spi_device_id as3935_id[] = {
{"as3935", 0},
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index 77089399359..b899531498e 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -38,6 +38,17 @@ config INFINIBAND_USER_MEM
depends on INFINIBAND_USER_ACCESS != n
default y
+config INFINIBAND_ON_DEMAND_PAGING
+ bool "InfiniBand on-demand paging support"
+ depends on INFINIBAND_USER_MEM
+ select MMU_NOTIFIER
+ default y
+ ---help---
+ On demand paging support for the InfiniBand subsystem.
+ Together with driver support this allows registration of
+ memory regions without pinning their pages, fetching the
+ pages on demand instead.
+
config INFINIBAND_ADDR_TRANS
bool
depends on INFINIBAND
diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile
index ffd0af6734a..acf73676444 100644
--- a/drivers/infiniband/core/Makefile
+++ b/drivers/infiniband/core/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o ib_ucm.o \
ib_core-y := packer.o ud_header.o verbs.o sysfs.o \
device.o fmr_pool.o cache.o netlink.o
ib_core-$(CONFIG_INFINIBAND_USER_MEM) += umem.o
+ib_core-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += umem_odp.o umem_rbtree.o
ib_mad-y := mad.o smi.o agent.o mad_rmpp.o
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index 8172d37f9ad..f80da50d84a 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -176,8 +176,8 @@ static void set_timeout(unsigned long time)
unsigned long delay;
delay = time - jiffies;
- if ((long)delay <= 0)
- delay = 1;
+ if ((long)delay < 0)
+ delay = 0;
mod_delayed_work(addr_wq, &work, delay);
}
diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c
index d2360a8ef0b..fa17b552ff7 100644
--- a/drivers/infiniband/core/multicast.c
+++ b/drivers/infiniband/core/multicast.c
@@ -525,17 +525,22 @@ static void join_handler(int status, struct ib_sa_mcmember_rec *rec,
if (status)
process_join_error(group, status);
else {
+ int mgids_changed, is_mgid0;
ib_find_pkey(group->port->dev->device, group->port->port_num,
be16_to_cpu(rec->pkey), &pkey_index);
spin_lock_irq(&group->port->lock);
- group->rec = *rec;
if (group->state == MCAST_BUSY &&
group->pkey_index == MCAST_INVALID_PKEY_INDEX)
group->pkey_index = pkey_index;
- if (!memcmp(&mgid0, &group->rec.mgid, sizeof mgid0)) {
+ mgids_changed = memcmp(&rec->mgid, &group->rec.mgid,
+ sizeof(group->rec.mgid));
+ group->rec = *rec;
+ if (mgids_changed) {
rb_erase(&group->node, &group->port->table);
- mcast_insert(group->port, group, 1);
+ is_mgid0 = !memcmp(&mgid0, &group->rec.mgid,
+ sizeof(mgid0));
+ mcast_insert(group->port, group, is_mgid0);
}
spin_unlock_irq(&group->port->lock);
}
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index df0c4f605a2..aec7a6aa295 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -39,6 +39,7 @@
#include <linux/hugetlb.h>
#include <linux/dma-attrs.h>
#include <linux/slab.h>
+#include <rdma/ib_umem_odp.h>
#include "uverbs.h"
@@ -69,6 +70,10 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d
/**
* ib_umem_get - Pin and DMA map userspace memory.
+ *
+ * If access flags indicate ODP memory, avoid pinning. Instead, stores
+ * the mm for future page fault handling in conjunction with MMU notifiers.
+ *
* @context: userspace context to pin memory for
* @addr: userspace virtual address to start at
* @size: length of region to pin
@@ -103,17 +108,30 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
umem->context = context;
umem->length = size;
- umem->offset = addr & ~PAGE_MASK;
+ umem->address = addr;
umem->page_size = PAGE_SIZE;
umem->pid = get_task_pid(current, PIDTYPE_PID);
/*
- * We ask for writable memory if any access flags other than
- * "remote read" are set. "Local write" and "remote write"
+ * We ask for writable memory if any of the following
+ * access flags are set. "Local write" and "remote write"
* obviously require write access. "Remote atomic" can do
* things like fetch and add, which will modify memory, and
* "MW bind" can change permissions by binding a window.
*/
- umem->writable = !!(access & ~IB_ACCESS_REMOTE_READ);
+ umem->writable = !!(access &
+ (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE |
+ IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND));
+
+ if (access & IB_ACCESS_ON_DEMAND) {
+ ret = ib_umem_odp_get(context, umem);
+ if (ret) {
+ kfree(umem);
+ return ERR_PTR(ret);
+ }
+ return umem;
+ }
+
+ umem->odp_data = NULL;
/* We assume the memory is from hugetlb until proved otherwise */
umem->hugetlb = 1;
@@ -132,7 +150,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
if (!vma_list)
umem->hugetlb = 0;
- npages = PAGE_ALIGN(size + umem->offset) >> PAGE_SHIFT;
+ npages = ib_umem_num_pages(umem);
down_write(&current->mm->mmap_sem);
@@ -235,6 +253,11 @@ void ib_umem_release(struct ib_umem *umem)
struct task_struct *task;
unsigned long diff;
+ if (umem->odp_data) {
+ ib_umem_odp_release(umem);
+ return;
+ }
+
__ib_umem_release(umem->context->device, umem, 1);
task = get_pid_task(umem->pid, PIDTYPE_PID);
@@ -246,7 +269,7 @@ void ib_umem_release(struct ib_umem *umem)
if (!mm)
goto out;
- diff = PAGE_ALIGN(umem->length + umem->offset) >> PAGE_SHIFT;
+ diff = ib_umem_num_pages(umem);
/*
* We may be called with the mm's mmap_sem already held. This
@@ -283,6 +306,9 @@ int ib_umem_page_count(struct ib_umem *umem)
int n;
struct scatterlist *sg;
+ if (umem->odp_data)
+ return ib_umem_num_pages(umem);
+
shift = ilog2(umem->page_size);
n = 0;
@@ -292,3 +318,37 @@ int ib_umem_page_count(struct ib_umem *umem)
return n;
}
EXPORT_SYMBOL(ib_umem_page_count);
+
+/*
+ * Copy from the given ib_umem's pages to the given buffer.
+ *
+ * umem - the umem to copy from
+ * offset - offset to start copying from
+ * dst - destination buffer
+ * length - buffer length
+ *
+ * Returns 0 on success, or an error code.
+ */
+int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
+ size_t length)
+{
+ size_t end = offset + length;
+ int ret;
+
+ if (offset > umem->length || length > umem->length - offset) {
+ pr_err("ib_umem_copy_from not in range. offset: %zd umem length: %zd end: %zd\n",
+ offset, umem->length, end);
+ return -EINVAL;
+ }
+
+ ret = sg_pcopy_to_buffer(umem->sg_head.sgl, umem->nmap, dst, length,
+ offset + ib_umem_offset(umem));
+
+ if (ret < 0)
+ return ret;
+ else if (ret != length)
+ return -EINVAL;
+ else
+ return 0;
+}
+EXPORT_SYMBOL(ib_umem_copy_from);
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
new file mode 100644
index 00000000000..6095872549e
--- /dev/null
+++ b/drivers/infiniband/core/umem_odp.c
@@ -0,0 +1,668 @@
+/*
+ * Copyright (c) 2014 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/pid.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+#include <linux/vmalloc.h>
+
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_umem.h>
+#include <rdma/ib_umem_odp.h>
+
+static void ib_umem_notifier_start_account(struct ib_umem *item)
+{
+ mutex_lock(&item->odp_data->umem_mutex);
+
+ /* Only update private counters for this umem if it has them.
+ * Otherwise skip it. All page faults will be delayed for this umem. */
+ if (item->odp_data->mn_counters_active) {
+ int notifiers_count = item->odp_data->notifiers_count++;
+
+ if (notifiers_count == 0)
+ /* Initialize the completion object for waiting on
+ * notifiers. Since notifier_count is zero, no one
+ * should be waiting right now. */
+ reinit_completion(&item->odp_data->notifier_completion);
+ }
+ mutex_unlock(&item->odp_data->umem_mutex);
+}
+
+static void ib_umem_notifier_end_account(struct ib_umem *item)
+{
+ mutex_lock(&item->odp_data->umem_mutex);
+
+ /* Only update private counters for this umem if it has them.
+ * Otherwise skip it. All page faults will be delayed for this umem. */
+ if (item->odp_data->mn_counters_active) {
+ /*
+ * This sequence increase will notify the QP page fault that
+ * the page that is going to be mapped in the spte could have
+ * been freed.
+ */
+ ++item->odp_data->notifiers_seq;
+ if (--item->odp_data->notifiers_count == 0)
+ complete_all(&item->odp_data->notifier_completion);
+ }
+ mutex_unlock(&item->odp_data->umem_mutex);
+}
+
+/* Account for a new mmu notifier in an ib_ucontext. */
+static void ib_ucontext_notifier_start_account(struct ib_ucontext *context)
+{
+ atomic_inc(&context->notifier_count);
+}
+
+/* Account for a terminating mmu notifier in an ib_ucontext.
+ *
+ * Must be called with the ib_ucontext->umem_rwsem semaphore unlocked, since
+ * the function takes the semaphore itself. */
+static void ib_ucontext_notifier_end_account(struct ib_ucontext *context)
+{
+ int zero_notifiers = atomic_dec_and_test(&context->notifier_count);
+
+ if (zero_notifiers &&
+ !list_empty(&context->no_private_counters)) {
+ /* No currently running mmu notifiers. Now is the chance to
+ * add private accounting to all previously added umems. */
+ struct ib_umem_odp *odp_data, *next;
+
+ /* Prevent concurrent mmu notifiers from working on the
+ * no_private_counters list. */
+ down_write(&context->umem_rwsem);
+
+ /* Read the notifier_count again, with the umem_rwsem
+ * semaphore taken for write. */
+ if (!atomic_read(&context->notifier_count)) {
+ list_for_each_entry_safe(odp_data, next,
+ &context->no_private_counters,
+ no_private_counters) {
+ mutex_lock(&odp_data->umem_mutex);
+ odp_data->mn_counters_active = true;
+ list_del(&odp_data->no_private_counters);
+ complete_all(&odp_data->notifier_completion);
+ mutex_unlock(&odp_data->umem_mutex);
+ }
+ }
+
+ up_write(&context->umem_rwsem);
+ }
+}
+
+static int ib_umem_notifier_release_trampoline(struct ib_umem *item, u64 start,
+ u64 end, void *cookie) {
+ /*
+ * Increase the number of notifiers running, to
+ * prevent any further fault handling on this MR.
+ */
+ ib_umem_notifier_start_account(item);
+ item->odp_data->dying = 1;
+ /* Make sure that the fact the umem is dying is out before we release
+ * all pending page faults. */
+ smp_wmb();
+ complete_all(&item->odp_data->notifier_completion);
+ item->context->invalidate_range(item, ib_umem_start(item),
+ ib_umem_end(item));
+ return 0;
+}
+
+static void ib_umem_notifier_release(struct mmu_notifier *mn,
+ struct mm_struct *mm)
+{
+ struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn);
+
+ if (!context->invalidate_range)
+ return;
+
+ ib_ucontext_notifier_start_account(context);
+ down_read(&context->umem_rwsem);
+ rbt_ib_umem_for_each_in_range(&context->umem_tree, 0,
+ ULLONG_MAX,
+ ib_umem_notifier_release_trampoline,
+ NULL);
+ up_read(&context->umem_rwsem);
+}
+
+static int invalidate_page_trampoline(struct ib_umem *item, u64 start,
+ u64 end, void *cookie)
+{
+ ib_umem_notifier_start_account(item);
+ item->context->invalidate_range(item, start, start + PAGE_SIZE);
+ ib_umem_notifier_end_account(item);
+ return 0;
+}
+
+static void ib_umem_notifier_invalidate_page(struct mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long address)
+{
+ struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn);
+
+ if (!context->invalidate_range)
+ return;
+
+ ib_ucontext_notifier_start_account(context);
+ down_read(&context->umem_rwsem);
+ rbt_ib_umem_for_each_in_range(&context->umem_tree, address,
+ address + PAGE_SIZE,
+ invalidate_page_trampoline, NULL);
+ up_read(&context->umem_rwsem);
+ ib_ucontext_notifier_end_account(context);
+}
+
+static int invalidate_range_start_trampoline(struct ib_umem *item, u64 start,
+ u64 end, void *cookie)
+{
+ ib_umem_notifier_start_account(item);
+ item->context->invalidate_range(item, start, end);
+ return 0;
+}
+
+static void ib_umem_notifier_invalidate_range_start(struct mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end)
+{
+ struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn);
+
+ if (!context->invalidate_range)
+ return;
+
+ ib_ucontext_notifier_start_account(context);
+ down_read(&context->umem_rwsem);
+ rbt_ib_umem_for_each_in_range(&context->umem_tree, start,
+ end,
+ invalidate_range_start_trampoline, NULL);
+ up_read(&context->umem_rwsem);
+}
+
+static int invalidate_range_end_trampoline(struct ib_umem *item, u64 start,
+ u64 end, void *cookie)
+{
+ ib_umem_notifier_end_account(item);
+ return 0;
+}
+
+static void ib_umem_notifier_invalidate_range_end(struct mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end)
+{
+ struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn);
+
+ if (!context->invalidate_range)
+ return;
+
+ down_read(&context->umem_rwsem);
+ rbt_ib_umem_for_each_in_range(&context->umem_tree, start,
+ end,
+ invalidate_range_end_trampoline, NULL);
+ up_read(&context->umem_rwsem);
+ ib_ucontext_notifier_end_account(context);
+}
+
+static struct mmu_notifier_ops ib_umem_notifiers = {
+ .release = ib_umem_notifier_release,
+ .invalidate_page = ib_umem_notifier_invalidate_page,
+ .invalidate_range_start = ib_umem_notifier_invalidate_range_start,
+ .invalidate_range_end = ib_umem_notifier_invalidate_range_end,
+};
+
+int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem)
+{
+ int ret_val;
+ struct pid *our_pid;
+ struct mm_struct *mm = get_task_mm(current);
+
+ if (!mm)
+ return -EINVAL;
+
+ /* Prevent creating ODP MRs in child processes */
+ rcu_read_lock();
+ our_pid = get_task_pid(current->group_leader, PIDTYPE_PID);
+ rcu_read_unlock();
+ put_pid(our_pid);
+ if (context->tgid != our_pid) {
+ ret_val = -EINVAL;
+ goto out_mm;
+ }
+
+ umem->hugetlb = 0;
+ umem->odp_data = kzalloc(sizeof(*umem->odp_data), GFP_KERNEL);
+ if (!umem->odp_data) {
+ ret_val = -ENOMEM;
+ goto out_mm;
+ }
+ umem->odp_data->umem = umem;
+
+ mutex_init(&umem->odp_data->umem_mutex);
+
+ init_completion(&umem->odp_data->notifier_completion);
+
+ umem->odp_data->page_list = vzalloc(ib_umem_num_pages(umem) *
+ sizeof(*umem->odp_data->page_list));
+ if (!umem->odp_data->page_list) {
+ ret_val = -ENOMEM;
+ goto out_odp_data;
+ }
+
+ umem->odp_data->dma_list = vzalloc(ib_umem_num_pages(umem) *
+ sizeof(*umem->odp_data->dma_list));
+ if (!umem->odp_data->dma_list) {
+ ret_val = -ENOMEM;
+ goto out_page_list;
+ }
+
+ /*
+ * When using MMU notifiers, we will get a
+ * notification before the "current" task (and MM) is
+ * destroyed. We use the umem_rwsem semaphore to synchronize.
+ */
+ down_write(&context->umem_rwsem);
+ context->odp_mrs_count++;
+ if (likely(ib_umem_start(umem) != ib_umem_end(umem)))
+ rbt_ib_umem_insert(&umem->odp_data->interval_tree,
+ &context->umem_tree);
+ if (likely(!atomic_read(&context->notifier_count)))
+ umem->odp_data->mn_counters_active = true;
+ else
+ list_add(&umem->odp_data->no_private_counters,
+ &context->no_private_counters);
+ downgrade_write(&context->umem_rwsem);
+
+ if (context->odp_mrs_count == 1) {
+ /*
+ * Note that at this point, no MMU notifier is running
+ * for this context!
+ */
+ atomic_set(&context->notifier_count, 0);
+ INIT_HLIST_NODE(&context->mn.hlist);
+ context->mn.ops = &ib_umem_notifiers;
+ /*
+ * Lock-dep detects a false positive for mmap_sem vs.
+ * umem_rwsem, due to not grasping downgrade_write correctly.
+ */
+ lockdep_off();
+ ret_val = mmu_notifier_register(&context->mn, mm);
+ lockdep_on();
+ if (ret_val) {
+ pr_err("Failed to register mmu_notifier %d\n", ret_val);
+ ret_val = -EBUSY;
+ goto out_mutex;
+ }
+ }
+
+ up_read(&context->umem_rwsem);
+
+ /*
+ * Note that doing an mmput can cause a notifier for the relevant mm.
+ * If the notifier is called while we hold the umem_rwsem, this will
+ * cause a deadlock. Therefore, we release the reference only after we
+ * released the semaphore.
+ */
+ mmput(mm);
+ return 0;
+
+out_mutex:
+ up_read(&context->umem_rwsem);
+ vfree(umem->odp_data->dma_list);
+out_page_list:
+ vfree(umem->odp_data->page_list);
+out_odp_data:
+ kfree(umem->odp_data);
+out_mm:
+ mmput(mm);
+ return ret_val;
+}
+
+void ib_umem_odp_release(struct ib_umem *umem)
+{
+ struct ib_ucontext *context = umem->context;
+
+ /*
+ * Ensure that no more pages are mapped in the umem.
+ *
+ * It is the driver's responsibility to ensure, before calling us,
+ * that the hardware will not attempt to access the MR any more.
+ */
+ ib_umem_odp_unmap_dma_pages(umem, ib_umem_start(umem),
+ ib_umem_end(umem));
+
+ down_write(&context->umem_rwsem);
+ if (likely(ib_umem_start(umem) != ib_umem_end(umem)))
+ rbt_ib_umem_remove(&umem->odp_data->interval_tree,
+ &context->umem_tree);
+ context->odp_mrs_count--;
+ if (!umem->odp_data->mn_counters_active) {
+ list_del(&umem->odp_data->no_private_counters);
+ complete_all(&umem->odp_data->notifier_completion);
+ }
+
+ /*
+ * Downgrade the lock to a read lock. This ensures that the notifiers
+ * (who lock the mutex for reading) will be able to finish, and we
+ * will be able to enventually obtain the mmu notifiers SRCU. Note
+ * that since we are doing it atomically, no other user could register
+ * and unregister while we do the check.
+ */
+ downgrade_write(&context->umem_rwsem);
+ if (!context->odp_mrs_count) {
+ struct task_struct *owning_process = NULL;
+ struct mm_struct *owning_mm = NULL;
+
+ owning_process = get_pid_task(context->tgid,
+ PIDTYPE_PID);
+ if (owning_process == NULL)
+ /*
+ * The process is already dead, notifier were removed
+ * already.
+ */
+ goto out;
+
+ owning_mm = get_task_mm(owning_process);
+ if (owning_mm == NULL)
+ /*
+ * The process' mm is already dead, notifier were
+ * removed already.
+ */
+ goto out_put_task;
+ mmu_notifier_unregister(&context->mn, owning_mm);
+
+ mmput(owning_mm);
+
+out_put_task:
+ put_task_struct(owning_process);
+ }
+out:
+ up_read(&context->umem_rwsem);
+
+ vfree(umem->odp_data->dma_list);
+ vfree(umem->odp_data->page_list);
+ kfree(umem->odp_data);
+ kfree(umem);
+}
+
+/*
+ * Map for DMA and insert a single page into the on-demand paging page tables.
+ *
+ * @umem: the umem to insert the page to.
+ * @page_index: index in the umem to add the page to.
+ * @page: the page struct to map and add.
+ * @access_mask: access permissions needed for this page.
+ * @current_seq: sequence number for synchronization with invalidations.
+ * the sequence number is taken from
+ * umem->odp_data->notifiers_seq.
+ *
+ * The function returns -EFAULT if the DMA mapping operation fails. It returns
+ * -EAGAIN if a concurrent invalidation prevents us from updating the page.
+ *
+ * The page is released via put_page even if the operation failed. For
+ * on-demand pinning, the page is released whenever it isn't stored in the
+ * umem.
+ */
+static int ib_umem_odp_map_dma_single_page(
+ struct ib_umem *umem,
+ int page_index,
+ u64 base_virt_addr,
+ struct page *page,
+ u64 access_mask,
+ unsigned long current_seq)
+{
+ struct ib_device *dev = umem->context->device;
+ dma_addr_t dma_addr;
+ int stored_page = 0;
+ int remove_existing_mapping = 0;
+ int ret = 0;
+
+ mutex_lock(&umem->odp_data->umem_mutex);
+ /*
+ * Note: we avoid writing if seq is different from the initial seq, to
+ * handle case of a racing notifier. This check also allows us to bail
+ * early if we have a notifier running in parallel with us.
+ */
+ if (ib_umem_mmu_notifier_retry(umem, current_seq)) {
+ ret = -EAGAIN;
+ goto out;
+ }
+ if (!(umem->odp_data->dma_list[page_index])) {
+ dma_addr = ib_dma_map_page(dev,
+ page,
+ 0, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ if (ib_dma_mapping_error(dev, dma_addr)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ umem->odp_data->dma_list[page_index] = dma_addr | access_mask;
+ umem->odp_data->page_list[page_index] = page;
+ stored_page = 1;
+ } else if (umem->odp_data->page_list[page_index] == page) {
+ umem->odp_data->dma_list[page_index] |= access_mask;
+ } else {
+ pr_err("error: got different pages in IB device and from get_user_pages. IB device page: %p, gup page: %p\n",
+ umem->odp_data->page_list[page_index], page);
+ /* Better remove the mapping now, to prevent any further
+ * damage. */
+ remove_existing_mapping = 1;
+ }
+
+out:
+ mutex_unlock(&umem->odp_data->umem_mutex);
+
+ /* On Demand Paging - avoid pinning the page */
+ if (umem->context->invalidate_range || !stored_page)
+ put_page(page);
+
+ if (remove_existing_mapping && umem->context->invalidate_range) {
+ invalidate_page_trampoline(
+ umem,
+ base_virt_addr + (page_index * PAGE_SIZE),
+ base_virt_addr + ((page_index+1)*PAGE_SIZE),
+ NULL);
+ ret = -EAGAIN;
+ }
+
+ return ret;
+}
+
+/**
+ * ib_umem_odp_map_dma_pages - Pin and DMA map userspace memory in an ODP MR.
+ *
+ * Pins the range of pages passed in the argument, and maps them to
+ * DMA addresses. The DMA addresses of the mapped pages is updated in
+ * umem->odp_data->dma_list.
+ *
+ * Returns the number of pages mapped in success, negative error code
+ * for failure.
+ * An -EAGAIN error code is returned when a concurrent mmu notifier prevents
+ * the function from completing its task.
+ *
+ * @umem: the umem to map and pin
+ * @user_virt: the address from which we need to map.
+ * @bcnt: the minimal number of bytes to pin and map. The mapping might be
+ * bigger due to alignment, and may also be smaller in case of an error
+ * pinning or mapping a page. The actual pages mapped is returned in
+ * the return value.
+ * @access_mask: bit mask of the requested access permissions for the given
+ * range.
+ * @current_seq: the MMU notifiers sequance value for synchronization with
+ * invalidations. the sequance number is read from
+ * umem->odp_data->notifiers_seq before calling this function
+ */
+int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
+ u64 access_mask, unsigned long current_seq)
+{
+ struct task_struct *owning_process = NULL;
+ struct mm_struct *owning_mm = NULL;
+ struct page **local_page_list = NULL;
+ u64 off;
+ int j, k, ret = 0, start_idx, npages = 0;
+ u64 base_virt_addr;
+
+ if (access_mask == 0)
+ return -EINVAL;
+
+ if (user_virt < ib_umem_start(umem) ||
+ user_virt + bcnt > ib_umem_end(umem))
+ return -EFAULT;
+
+ local_page_list = (struct page **)__get_free_page(GFP_KERNEL);
+ if (!local_page_list)
+ return -ENOMEM;
+
+ off = user_virt & (~PAGE_MASK);
+ user_virt = user_virt & PAGE_MASK;
+ base_virt_addr = user_virt;
+ bcnt += off; /* Charge for the first page offset as well. */
+
+ owning_process = get_pid_task(umem->context->tgid, PIDTYPE_PID);
+ if (owning_process == NULL) {
+ ret = -EINVAL;
+ goto out_no_task;
+ }
+
+ owning_mm = get_task_mm(owning_process);
+ if (owning_mm == NULL) {
+ ret = -EINVAL;
+ goto out_put_task;
+ }
+
+ start_idx = (user_virt - ib_umem_start(umem)) >> PAGE_SHIFT;
+ k = start_idx;
+
+ while (bcnt > 0) {
+ const size_t gup_num_pages =
+ min_t(size_t, ALIGN(bcnt, PAGE_SIZE) / PAGE_SIZE,
+ PAGE_SIZE / sizeof(struct page *));
+
+ down_read(&owning_mm->mmap_sem);
+ /*
+ * Note: this might result in redundent page getting. We can
+ * avoid this by checking dma_list to be 0 before calling
+ * get_user_pages. However, this make the code much more
+ * complex (and doesn't gain us much performance in most use
+ * cases).
+ */
+ npages = get_user_pages(owning_process, owning_mm, user_virt,
+ gup_num_pages,
+ access_mask & ODP_WRITE_ALLOWED_BIT, 0,
+ local_page_list, NULL);
+ up_read(&owning_mm->mmap_sem);
+
+ if (npages < 0)
+ break;
+
+ bcnt -= min_t(size_t, npages << PAGE_SHIFT, bcnt);
+ user_virt += npages << PAGE_SHIFT;
+ for (j = 0; j < npages; ++j) {
+ ret = ib_umem_odp_map_dma_single_page(
+ umem, k, base_virt_addr, local_page_list[j],
+ access_mask, current_seq);
+ if (ret < 0)
+ break;
+ k++;
+ }
+
+ if (ret < 0) {
+ /* Release left over pages when handling errors. */
+ for (++j; j < npages; ++j)
+ put_page(local_page_list[j]);
+ break;
+ }
+ }
+
+ if (ret >= 0) {
+ if (npages < 0 && k == start_idx)
+ ret = npages;
+ else
+ ret = k - start_idx;
+ }
+
+ mmput(owning_mm);
+out_put_task:
+ put_task_struct(owning_process);
+out_no_task:
+ free_page((unsigned long)local_page_list);
+ return ret;
+}
+EXPORT_SYMBOL(ib_umem_odp_map_dma_pages);
+
+void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 virt,
+ u64 bound)
+{
+ int idx;
+ u64 addr;
+ struct ib_device *dev = umem->context->device;
+
+ virt = max_t(u64, virt, ib_umem_start(umem));
+ bound = min_t(u64, bound, ib_umem_end(umem));
+ /* Note that during the run of this function, the
+ * notifiers_count of the MR is > 0, preventing any racing
+ * faults from completion. We might be racing with other
+ * invalidations, so we must make sure we free each page only
+ * once. */
+ for (addr = virt; addr < bound; addr += (u64)umem->page_size) {
+ idx = (addr - ib_umem_start(umem)) / PAGE_SIZE;
+ mutex_lock(&umem->odp_data->umem_mutex);
+ if (umem->odp_data->page_list[idx]) {
+ struct page *page = umem->odp_data->page_list[idx];
+ struct page *head_page = compound_head(page);
+ dma_addr_t dma = umem->odp_data->dma_list[idx];
+ dma_addr_t dma_addr = dma & ODP_DMA_ADDR_MASK;
+
+ WARN_ON(!dma_addr);
+
+ ib_dma_unmap_page(dev, dma_addr, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ if (dma & ODP_WRITE_ALLOWED_BIT)
+ /*
+ * set_page_dirty prefers being called with
+ * the page lock. However, MMU notifiers are
+ * called sometimes with and sometimes without
+ * the lock. We rely on the umem_mutex instead
+ * to prevent other mmu notifiers from
+ * continuing and allowing the page mapping to
+ * be removed.
+ */
+ set_page_dirty(head_page);
+ /* on demand pinning support */
+ if (!umem->context->invalidate_range)
+ put_page(page);
+ umem->odp_data->page_list[idx] = NULL;
+ umem->odp_data->dma_list[idx] = 0;
+ }
+ mutex_unlock(&umem->odp_data->umem_mutex);
+ }
+}
+EXPORT_SYMBOL(ib_umem_odp_unmap_dma_pages);
diff --git a/drivers/infiniband/core/umem_rbtree.c b/drivers/infiniband/core/umem_rbtree.c
new file mode 100644
index 00000000000..727d788448f
--- /dev/null
+++ b/drivers/infiniband/core/umem_rbtree.c
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2014 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interval_tree_generic.h>
+#include <linux/sched.h>
+#include <linux/gfp.h>
+#include <rdma/ib_umem_odp.h>
+
+/*
+ * The ib_umem list keeps track of memory regions for which the HW
+ * device request to receive notification when the related memory
+ * mapping is changed.
+ *
+ * ib_umem_lock protects the list.
+ */
+
+static inline u64 node_start(struct umem_odp_node *n)
+{
+ struct ib_umem_odp *umem_odp =
+ container_of(n, struct ib_umem_odp, interval_tree);
+
+ return ib_umem_start(umem_odp->umem);
+}
+
+/* Note that the representation of the intervals in the interval tree
+ * considers the ending point as contained in the interval, while the
+ * function ib_umem_end returns the first address which is not contained
+ * in the umem.
+ */
+static inline u64 node_last(struct umem_odp_node *n)
+{
+ struct ib_umem_odp *umem_odp =
+ container_of(n, struct ib_umem_odp, interval_tree);
+
+ return ib_umem_end(umem_odp->umem) - 1;
+}
+
+INTERVAL_TREE_DEFINE(struct umem_odp_node, rb, u64, __subtree_last,
+ node_start, node_last, , rbt_ib_umem)
+
+/* @last is not a part of the interval. See comment for function
+ * node_last.
+ */
+int rbt_ib_umem_for_each_in_range(struct rb_root *root,
+ u64 start, u64 last,
+ umem_call_back cb,
+ void *cookie)
+{
+ int ret_val = 0;
+ struct umem_odp_node *node;
+ struct ib_umem_odp *umem;
+
+ if (unlikely(start == last))
+ return ret_val;
+
+ for (node = rbt_ib_umem_iter_first(root, start, last - 1); node;
+ node = rbt_ib_umem_iter_next(node, start, last - 1)) {
+ umem = container_of(node, struct ib_umem_odp, interval_tree);
+ ret_val = cb(umem->umem, start, last, cookie) || ret_val;
+ }
+
+ return ret_val;
+}
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index 643c08a025a..b716b081564 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -258,5 +258,6 @@ IB_UVERBS_DECLARE_CMD(close_xrcd);
IB_UVERBS_DECLARE_EX_CMD(create_flow);
IB_UVERBS_DECLARE_EX_CMD(destroy_flow);
+IB_UVERBS_DECLARE_EX_CMD(query_device);
#endif /* UVERBS_H */
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 5ba2a86aab6..532d8eba8b0 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -36,6 +36,7 @@
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/slab.h>
+#include <linux/sched.h>
#include <asm/uaccess.h>
@@ -288,6 +289,9 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
struct ib_uverbs_get_context_resp resp;
struct ib_udata udata;
struct ib_device *ibdev = file->device->ib_dev;
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+ struct ib_device_attr dev_attr;
+#endif
struct ib_ucontext *ucontext;
struct file *filp;
int ret;
@@ -325,8 +329,25 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
INIT_LIST_HEAD(&ucontext->ah_list);
INIT_LIST_HEAD(&ucontext->xrcd_list);
INIT_LIST_HEAD(&ucontext->rule_list);
+ rcu_read_lock();
+ ucontext->tgid = get_task_pid(current->group_leader, PIDTYPE_PID);
+ rcu_read_unlock();
ucontext->closing = 0;
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+ ucontext->umem_tree = RB_ROOT;
+ init_rwsem(&ucontext->umem_rwsem);
+ ucontext->odp_mrs_count = 0;
+ INIT_LIST_HEAD(&ucontext->no_private_counters);
+
+ ret = ib_query_device(ibdev, &dev_attr);
+ if (ret)
+ goto err_free;
+ if (!(dev_attr.device_cap_flags & IB_DEVICE_ON_DEMAND_PAGING))
+ ucontext->invalidate_range = NULL;
+
+#endif
+
resp.num_comp_vectors = file->device->num_comp_vectors;
ret = get_unused_fd_flags(O_CLOEXEC);
@@ -371,6 +392,7 @@ err_fd:
put_unused_fd(resp.async_fd);
err_free:
+ put_pid(ucontext->tgid);
ibdev->dealloc_ucontext(ucontext);
err:
@@ -378,6 +400,52 @@ err:
return ret;
}
+static void copy_query_dev_fields(struct ib_uverbs_file *file,
+ struct ib_uverbs_query_device_resp *resp,
+ struct ib_device_attr *attr)
+{
+ resp->fw_ver = attr->fw_ver;
+ resp->node_guid = file->device->ib_dev->node_guid;
+ resp->sys_image_guid = attr->sys_image_guid;
+ resp->max_mr_size = attr->max_mr_size;
+ resp->page_size_cap = attr->page_size_cap;
+ resp->vendor_id = attr->vendor_id;
+ resp->vendor_part_id = attr->vendor_part_id;
+ resp->hw_ver = attr->hw_ver;
+ resp->max_qp = attr->max_qp;
+ resp->max_qp_wr = attr->max_qp_wr;
+ resp->device_cap_flags = attr->device_cap_flags;
+ resp->max_sge = attr->max_sge;
+ resp->max_sge_rd = attr->max_sge_rd;
+ resp->max_cq = attr->max_cq;
+ resp->max_cqe = attr->max_cqe;
+ resp->max_mr = attr->max_mr;
+ resp->max_pd = attr->max_pd;
+ resp->max_qp_rd_atom = attr->max_qp_rd_atom;
+ resp->max_ee_rd_atom = attr->max_ee_rd_atom;
+ resp->max_res_rd_atom = attr->max_res_rd_atom;
+ resp->max_qp_init_rd_atom = attr->max_qp_init_rd_atom;
+ resp->max_ee_init_rd_atom = attr->max_ee_init_rd_atom;
+ resp->atomic_cap = attr->atomic_cap;
+ resp->max_ee = attr->max_ee;
+ resp->max_rdd = attr->max_rdd;
+ resp->max_mw = attr->max_mw;
+ resp->max_raw_ipv6_qp = attr->max_raw_ipv6_qp;
+ resp->max_raw_ethy_qp = attr->max_raw_ethy_qp;
+ resp->max_mcast_grp = attr->max_mcast_grp;
+ resp->max_mcast_qp_attach = attr->max_mcast_qp_attach;
+ resp->max_total_mcast_qp_attach = attr->max_total_mcast_qp_attach;
+ resp->max_ah = attr->max_ah;
+ resp->max_fmr = attr->max_fmr;
+ resp->max_map_per_fmr = attr->max_map_per_fmr;
+ resp->max_srq = attr->max_srq;
+ resp->max_srq_wr = attr->max_srq_wr;
+ resp->max_srq_sge = attr->max_srq_sge;
+ resp->max_pkeys = attr->max_pkeys;
+ resp->local_ca_ack_delay = attr->local_ca_ack_delay;
+ resp->phys_port_cnt = file->device->ib_dev->phys_port_cnt;
+}
+
ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
const char __user *buf,
int in_len, int out_len)
@@ -398,47 +466,7 @@ ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
return ret;
memset(&resp, 0, sizeof resp);
-
- resp.fw_ver = attr.fw_ver;
- resp.node_guid = file->device->ib_dev->node_guid;
- resp.sys_image_guid = attr.sys_image_guid;
- resp.max_mr_size = attr.max_mr_size;
- resp.page_size_cap = attr.page_size_cap;
- resp.vendor_id = attr.vendor_id;
- resp.vendor_part_id = attr.vendor_part_id;
- resp.hw_ver = attr.hw_ver;
- resp.max_qp = attr.max_qp;
- resp.max_qp_wr = attr.max_qp_wr;
- resp.device_cap_flags = attr.device_cap_flags;
- resp.max_sge = attr.max_sge;
- resp.max_sge_rd = attr.max_sge_rd;
- resp.max_cq = attr.max_cq;
- resp.max_cqe = attr.max_cqe;
- resp.max_mr = attr.max_mr;
- resp.max_pd = attr.max_pd;
- resp.max_qp_rd_atom = attr.max_qp_rd_atom;
- resp.max_ee_rd_atom = attr.max_ee_rd_atom;
- resp.max_res_rd_atom = attr.max_res_rd_atom;
- resp.max_qp_init_rd_atom = attr.max_qp_init_rd_atom;
- resp.max_ee_init_rd_atom = attr.max_ee_init_rd_atom;
- resp.atomic_cap = attr.atomic_cap;
- resp.max_ee = attr.max_ee;
- resp.max_rdd = attr.max_rdd;
- resp.max_mw = attr.max_mw;
- resp.max_raw_ipv6_qp = attr.max_raw_ipv6_qp;
- resp.max_raw_ethy_qp = attr.max_raw_ethy_qp;
- resp.max_mcast_grp = attr.max_mcast_grp;
- resp.max_mcast_qp_attach = attr.max_mcast_qp_attach;
- resp.max_total_mcast_qp_attach = attr.max_total_mcast_qp_attach;
- resp.max_ah = attr.max_ah;
- resp.max_fmr = attr.max_fmr;
- resp.max_map_per_fmr = attr.max_map_per_fmr;
- resp.max_srq = attr.max_srq;
- resp.max_srq_wr = attr.max_srq_wr;
- resp.max_srq_sge = attr.max_srq_sge;
- resp.max_pkeys = attr.max_pkeys;
- resp.local_ca_ack_delay = attr.local_ca_ack_delay;
- resp.phys_port_cnt = file->device->ib_dev->phys_port_cnt;
+ copy_query_dev_fields(file, &resp, &attr);
if (copy_to_user((void __user *) (unsigned long) cmd.response,
&resp, sizeof resp))
@@ -947,6 +975,18 @@ ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
goto err_free;
}
+ if (cmd.access_flags & IB_ACCESS_ON_DEMAND) {
+ struct ib_device_attr attr;
+
+ ret = ib_query_device(pd->device, &attr);
+ if (ret || !(attr.device_cap_flags &
+ IB_DEVICE_ON_DEMAND_PAGING)) {
+ pr_debug("ODP support not available\n");
+ ret = -EINVAL;
+ goto err_put;
+ }
+ }
+
mr = pd->device->reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va,
cmd.access_flags, &udata);
if (IS_ERR(mr)) {
@@ -3253,3 +3293,52 @@ ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
return ret ? ret : in_len;
}
+
+int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
+ struct ib_udata *ucore,
+ struct ib_udata *uhw)
+{
+ struct ib_uverbs_ex_query_device_resp resp;
+ struct ib_uverbs_ex_query_device cmd;
+ struct ib_device_attr attr;
+ struct ib_device *device;
+ int err;
+
+ device = file->device->ib_dev;
+ if (ucore->inlen < sizeof(cmd))
+ return -EINVAL;
+
+ err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
+ if (err)
+ return err;
+
+ if (cmd.reserved)
+ return -EINVAL;
+
+ err = device->query_device(device, &attr);
+ if (err)
+ return err;
+
+ memset(&resp, 0, sizeof(resp));
+ copy_query_dev_fields(file, &resp.base, &attr);
+ resp.comp_mask = 0;
+
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+ if (cmd.comp_mask & IB_USER_VERBS_EX_QUERY_DEVICE_ODP) {
+ resp.odp_caps.general_caps = attr.odp_caps.general_caps;
+ resp.odp_caps.per_transport_caps.rc_odp_caps =
+ attr.odp_caps.per_transport_caps.rc_odp_caps;
+ resp.odp_caps.per_transport_caps.uc_odp_caps =
+ attr.odp_caps.per_transport_caps.uc_odp_caps;
+ resp.odp_caps.per_transport_caps.ud_odp_caps =
+ attr.odp_caps.per_transport_caps.ud_odp_caps;
+ resp.comp_mask |= IB_USER_VERBS_EX_QUERY_DEVICE_ODP;
+ }
+#endif
+
+ err = ib_copy_to_udata(ucore, &resp, sizeof(resp));
+ if (err)
+ return err;
+
+ return 0;
+}
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 71ab83fde47..e6c23b9eab3 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -122,7 +122,8 @@ static int (*uverbs_ex_cmd_table[])(struct ib_uverbs_file *file,
struct ib_udata *ucore,
struct ib_udata *uhw) = {
[IB_USER_VERBS_EX_CMD_CREATE_FLOW] = ib_uverbs_ex_create_flow,
- [IB_USER_VERBS_EX_CMD_DESTROY_FLOW] = ib_uverbs_ex_destroy_flow
+ [IB_USER_VERBS_EX_CMD_DESTROY_FLOW] = ib_uverbs_ex_destroy_flow,
+ [IB_USER_VERBS_EX_CMD_QUERY_DEVICE] = ib_uverbs_ex_query_device
};
static void ib_uverbs_add_one(struct ib_device *device);
@@ -296,6 +297,8 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file,
kfree(uobj);
}
+ put_pid(context->tgid);
+
return context->device->dealloc_ucontext(context);
}
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index c2b89cc5dbc..f93eb8da7b5 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -879,7 +879,8 @@ int ib_resolve_eth_l2_attrs(struct ib_qp *qp,
if (rdma_link_local_addr((struct in6_addr *)qp_attr->ah_attr.grh.dgid.raw)) {
rdma_get_ll_mac((struct in6_addr *)qp_attr->ah_attr.grh.dgid.raw, qp_attr->ah_attr.dmac);
rdma_get_ll_mac((struct in6_addr *)sgid.raw, qp_attr->smac);
- qp_attr->vlan_id = rdma_get_vlan_id(&sgid);
+ if (!(*qp_attr_mask & IB_QP_VID))
+ qp_attr->vlan_id = rdma_get_vlan_id(&sgid);
} else {
ret = rdma_addr_find_dmac_by_grh(&sgid, &qp_attr->ah_attr.grh.dgid,
qp_attr->ah_attr.dmac, &qp_attr->vlan_id);
diff --git a/drivers/infiniband/hw/amso1100/c2_provider.c b/drivers/infiniband/hw/amso1100/c2_provider.c
index 2d5cbf4363e..bdf3507810c 100644
--- a/drivers/infiniband/hw/amso1100/c2_provider.c
+++ b/drivers/infiniband/hw/amso1100/c2_provider.c
@@ -476,7 +476,7 @@ static struct ib_mr *c2_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
c2mr->umem->page_size,
i,
length,
- c2mr->umem->offset,
+ ib_umem_offset(c2mr->umem),
&kva,
c2_convert_access(acc),
c2mr);
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 4b8c6116c05..9edc200b311 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -1640,7 +1640,8 @@ static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
__state_set(&ep->com, MPA_REQ_RCVD);
/* drive upcall */
- mutex_lock(&ep->parent_ep->com.mutex);
+ mutex_lock_nested(&ep->parent_ep->com.mutex,
+ SINGLE_DEPTH_NESTING);
if (ep->parent_ep->com.state != DEAD) {
if (connect_request_upcall(ep))
abort_connection(ep, skb, GFP_KERNEL);
@@ -3126,6 +3127,8 @@ static int create_server6(struct c4iw_dev *dev, struct c4iw_listen_ep *ep)
err = c4iw_wait_for_reply(&ep->com.dev->rdev,
&ep->com.wr_wait,
0, 0, __func__);
+ else if (err > 0)
+ err = net_xmit_errno(err);
if (err)
pr_err("cxgb4_create_server6/filter failed err %d stid %d laddr %pI6 lport %d\n",
err, ep->stid,
@@ -3159,6 +3162,8 @@ static int create_server4(struct c4iw_dev *dev, struct c4iw_listen_ep *ep)
err = c4iw_wait_for_reply(&ep->com.dev->rdev,
&ep->com.wr_wait,
0, 0, __func__);
+ else if (err > 0)
+ err = net_xmit_errno(err);
}
if (err)
pr_err("cxgb4_create_server/filter failed err %d stid %d laddr %pI4 lport %d\n"
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index 72f1f052e88..eb5df4e6270 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -670,7 +670,7 @@ static int ep_open(struct inode *inode, struct file *file)
idr_for_each(&epd->devp->stid_idr, count_idrs, &count);
spin_unlock_irq(&epd->devp->lock);
- epd->bufsize = count * 160;
+ epd->bufsize = count * 240;
epd->buf = vmalloc(epd->bufsize);
if (!epd->buf) {
ret = -ENOMEM;
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index 0744455cd88..cb43c2299ac 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -50,6 +50,13 @@ static int inline_threshold = C4IW_INLINE_THRESHOLD;
module_param(inline_threshold, int, 0644);
MODULE_PARM_DESC(inline_threshold, "inline vs dsgl threshold (default=128)");
+static int mr_exceeds_hw_limits(struct c4iw_dev *dev, u64 length)
+{
+ return (is_t4(dev->rdev.lldi.adapter_type) ||
+ is_t5(dev->rdev.lldi.adapter_type)) &&
+ length >= 8*1024*1024*1024ULL;
+}
+
static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr,
u32 len, dma_addr_t data, int wait)
{
@@ -369,9 +376,11 @@ static int register_mem(struct c4iw_dev *rhp, struct c4iw_pd *php,
int ret;
ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, mhp->attr.pdid,
- FW_RI_STAG_NSMR, mhp->attr.perms,
+ FW_RI_STAG_NSMR, mhp->attr.len ?
+ mhp->attr.perms : 0,
mhp->attr.mw_bind_enable, mhp->attr.zbva,
- mhp->attr.va_fbo, mhp->attr.len, shift - 12,
+ mhp->attr.va_fbo, mhp->attr.len ?
+ mhp->attr.len : -1, shift - 12,
mhp->attr.pbl_size, mhp->attr.pbl_addr);
if (ret)
return ret;
@@ -536,6 +545,11 @@ int c4iw_reregister_phys_mem(struct ib_mr *mr, int mr_rereg_mask,
return ret;
}
+ if (mr_exceeds_hw_limits(rhp, total_size)) {
+ kfree(page_list);
+ return -EINVAL;
+ }
+
ret = reregister_mem(rhp, php, &mh, shift, npages);
kfree(page_list);
if (ret)
@@ -596,6 +610,12 @@ struct ib_mr *c4iw_register_phys_mem(struct ib_pd *pd,
if (ret)
goto err;
+ if (mr_exceeds_hw_limits(rhp, total_size)) {
+ kfree(page_list);
+ ret = -EINVAL;
+ goto err;
+ }
+
ret = alloc_pbl(mhp, npages);
if (ret) {
kfree(page_list);
@@ -699,6 +719,10 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
php = to_c4iw_pd(pd);
rhp = php->rhp;
+
+ if (mr_exceeds_hw_limits(rhp, length))
+ return ERR_PTR(-EINVAL);
+
mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
if (!mhp)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 2ed3ece2b2e..bb85d479e66 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -1538,9 +1538,9 @@ err:
set_state(qhp, C4IW_QP_STATE_ERROR);
free = 1;
abort = 1;
- wake_up(&qhp->wait);
BUG_ON(!ep);
flush_qp(qhp);
+ wake_up(&qhp->wait);
out:
mutex_unlock(&qhp->mutex);
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.c b/drivers/infiniband/hw/ehca/ehca_mrmw.c
index 3488e8c9fcb..f914b30999f 100644
--- a/drivers/infiniband/hw/ehca/ehca_mrmw.c
+++ b/drivers/infiniband/hw/ehca/ehca_mrmw.c
@@ -399,7 +399,7 @@ reg_user_mr_fallback:
pginfo.num_kpages = num_kpages;
pginfo.num_hwpages = num_hwpages;
pginfo.u.usr.region = e_mr->umem;
- pginfo.next_hwpage = e_mr->umem->offset / hwpage_size;
+ pginfo.next_hwpage = ib_umem_offset(e_mr->umem) / hwpage_size;
pginfo.u.usr.next_sg = pginfo.u.usr.region->sg_head.sgl;
ret = ehca_reg_mr(shca, e_mr, (u64 *)virt, length, mr_access_flags,
e_pd, &pginfo, &e_mr->ib.ib_mr.lkey,
diff --git a/drivers/infiniband/hw/ipath/ipath_mr.c b/drivers/infiniband/hw/ipath/ipath_mr.c
index 5e61e9bff69..c7278f6a821 100644
--- a/drivers/infiniband/hw/ipath/ipath_mr.c
+++ b/drivers/infiniband/hw/ipath/ipath_mr.c
@@ -214,7 +214,7 @@ struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
mr->mr.user_base = start;
mr->mr.iova = virt_addr;
mr->mr.length = length;
- mr->mr.offset = umem->offset;
+ mr->mr.offset = ib_umem_offset(umem);
mr->mr.access_flags = mr_access_flags;
mr->mr.max_segs = n;
mr->umem = umem;
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index 8f9325cfc85..c36ccbd9a64 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -223,7 +223,6 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
if (flags & IB_MR_REREG_TRANS) {
int shift;
- int err;
int n;
mlx4_mr_rereg_mem_cleanup(dev->dev, &mmr->mmr);
diff --git a/drivers/infiniband/hw/mlx5/Makefile b/drivers/infiniband/hw/mlx5/Makefile
index 4ea0135af48..27a70159e2e 100644
--- a/drivers/infiniband/hw/mlx5/Makefile
+++ b/drivers/infiniband/hw/mlx5/Makefile
@@ -1,3 +1,4 @@
obj-$(CONFIG_MLX5_INFINIBAND) += mlx5_ib.o
mlx5_ib-y := main.o cq.o doorbell.o qp.o mem.o srq.o mr.o ah.o mad.o
+mlx5_ib-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += odp.o
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 1ba6c42e4df..8a87404e9c7 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -244,6 +244,12 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
props->max_mcast_grp;
props->max_map_per_fmr = INT_MAX; /* no limit in ConnectIB */
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+ if (dev->mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_ON_DMND_PG)
+ props->device_cap_flags |= IB_DEVICE_ON_DEMAND_PAGING;
+ props->odp_caps = dev->odp_caps;
+#endif
+
out:
kfree(in_mad);
kfree(out_mad);
@@ -568,6 +574,10 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
goto out_count;
}
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+ context->ibucontext.invalidate_range = &mlx5_ib_invalidate_range;
+#endif
+
INIT_LIST_HEAD(&context->db_page_list);
mutex_init(&context->db_page_mutex);
@@ -858,7 +868,7 @@ static ssize_t show_reg_pages(struct device *device,
struct mlx5_ib_dev *dev =
container_of(device, struct mlx5_ib_dev, ib_dev.dev);
- return sprintf(buf, "%d\n", dev->mdev->priv.reg_pages);
+ return sprintf(buf, "%d\n", atomic_read(&dev->mdev->priv.reg_pages));
}
static ssize_t show_hca(struct device *device, struct device_attribute *attr,
@@ -1321,6 +1331,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
(1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
(1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) |
(1ull << IB_USER_VERBS_CMD_OPEN_QP);
+ dev->ib_dev.uverbs_ex_cmd_mask =
+ (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE);
dev->ib_dev.query_device = mlx5_ib_query_device;
dev->ib_dev.query_port = mlx5_ib_query_port;
@@ -1366,6 +1378,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
dev->ib_dev.free_fast_reg_page_list = mlx5_ib_free_fast_reg_page_list;
dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status;
+ mlx5_ib_internal_query_odp_caps(dev);
+
if (mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_XRC) {
dev->ib_dev.alloc_xrcd = mlx5_ib_alloc_xrcd;
dev->ib_dev.dealloc_xrcd = mlx5_ib_dealloc_xrcd;
@@ -1379,16 +1393,19 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
goto err_eqs;
mutex_init(&dev->cap_mask_mutex);
- spin_lock_init(&dev->mr_lock);
err = create_dev_resources(&dev->devr);
if (err)
goto err_eqs;
- err = ib_register_device(&dev->ib_dev, NULL);
+ err = mlx5_ib_odp_init_one(dev);
if (err)
goto err_rsrc;
+ err = ib_register_device(&dev->ib_dev, NULL);
+ if (err)
+ goto err_odp;
+
err = create_umr_res(dev);
if (err)
goto err_dev;
@@ -1410,6 +1427,9 @@ err_umrc:
err_dev:
ib_unregister_device(&dev->ib_dev);
+err_odp:
+ mlx5_ib_odp_remove_one(dev);
+
err_rsrc:
destroy_dev_resources(&dev->devr);
@@ -1425,8 +1445,10 @@ err_dealloc:
static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
{
struct mlx5_ib_dev *dev = context;
+
ib_unregister_device(&dev->ib_dev);
destroy_umrc_res(dev);
+ mlx5_ib_odp_remove_one(dev);
destroy_dev_resources(&dev->devr);
free_comp_eqs(dev);
ib_dealloc_device(&dev->ib_dev);
@@ -1440,15 +1462,30 @@ static struct mlx5_interface mlx5_ib_interface = {
static int __init mlx5_ib_init(void)
{
+ int err;
+
if (deprecated_prof_sel != 2)
pr_warn("prof_sel is deprecated for mlx5_ib, set it for mlx5_core\n");
- return mlx5_register_interface(&mlx5_ib_interface);
+ err = mlx5_ib_odp_init();
+ if (err)
+ return err;
+
+ err = mlx5_register_interface(&mlx5_ib_interface);
+ if (err)
+ goto clean_odp;
+
+ return err;
+
+clean_odp:
+ mlx5_ib_odp_cleanup();
+ return err;
}
static void __exit mlx5_ib_cleanup(void)
{
mlx5_unregister_interface(&mlx5_ib_interface);
+ mlx5_ib_odp_cleanup();
}
module_init(mlx5_ib_init);
diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c
index dae07eae950..b56e4c5593e 100644
--- a/drivers/infiniband/hw/mlx5/mem.c
+++ b/drivers/infiniband/hw/mlx5/mem.c
@@ -32,6 +32,7 @@
#include <linux/module.h>
#include <rdma/ib_umem.h>
+#include <rdma/ib_umem_odp.h>
#include "mlx5_ib.h"
/* @umem: umem object to scan
@@ -57,6 +58,17 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
int entry;
unsigned long page_shift = ilog2(umem->page_size);
+ /* With ODP we must always match OS page size. */
+ if (umem->odp_data) {
+ *count = ib_umem_page_count(umem);
+ *shift = PAGE_SHIFT;
+ *ncont = *count;
+ if (order)
+ *order = ilog2(roundup_pow_of_two(*count));
+
+ return;
+ }
+
addr = addr >> page_shift;
tmp = (unsigned long)addr;
m = find_first_bit(&tmp, sizeof(tmp));
@@ -108,8 +120,36 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
*count = i;
}
-void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
- int page_shift, __be64 *pas, int umr)
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+static u64 umem_dma_to_mtt(dma_addr_t umem_dma)
+{
+ u64 mtt_entry = umem_dma & ODP_DMA_ADDR_MASK;
+
+ if (umem_dma & ODP_READ_ALLOWED_BIT)
+ mtt_entry |= MLX5_IB_MTT_READ;
+ if (umem_dma & ODP_WRITE_ALLOWED_BIT)
+ mtt_entry |= MLX5_IB_MTT_WRITE;
+
+ return mtt_entry;
+}
+#endif
+
+/*
+ * Populate the given array with bus addresses from the umem.
+ *
+ * dev - mlx5_ib device
+ * umem - umem to use to fill the pages
+ * page_shift - determines the page size used in the resulting array
+ * offset - offset into the umem to start from,
+ * only implemented for ODP umems
+ * num_pages - total number of pages to fill
+ * pas - bus addresses array to fill
+ * access_flags - access flags to set on all present pages.
+ use enum mlx5_ib_mtt_access_flags for this.
+ */
+void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
+ int page_shift, size_t offset, size_t num_pages,
+ __be64 *pas, int access_flags)
{
unsigned long umem_page_shift = ilog2(umem->page_size);
int shift = page_shift - umem_page_shift;
@@ -120,6 +160,21 @@ void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
int len;
struct scatterlist *sg;
int entry;
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+ const bool odp = umem->odp_data != NULL;
+
+ if (odp) {
+ WARN_ON(shift != 0);
+ WARN_ON(access_flags != (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE));
+
+ for (i = 0; i < num_pages; ++i) {
+ dma_addr_t pa = umem->odp_data->dma_list[offset + i];
+
+ pas[i] = cpu_to_be64(umem_dma_to_mtt(pa));
+ }
+ return;
+ }
+#endif
i = 0;
for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
@@ -128,8 +183,7 @@ void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
for (k = 0; k < len; k++) {
if (!(i & mask)) {
cur = base + (k << umem_page_shift);
- if (umr)
- cur |= 3;
+ cur |= access_flags;
pas[i >> shift] = cpu_to_be64(cur);
mlx5_ib_dbg(dev, "pas[%d] 0x%llx\n",
@@ -142,6 +196,13 @@ void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
}
}
+void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
+ int page_shift, __be64 *pas, int access_flags)
+{
+ return __mlx5_ib_populate_pas(dev, umem, page_shift, 0,
+ ib_umem_num_pages(umem), pas,
+ access_flags);
+}
int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset)
{
u64 page_size;
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 386780f0d1e..83f22fe297c 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -111,6 +111,8 @@ struct mlx5_ib_pd {
*/
#define MLX5_IB_SEND_UMR_UNREG IB_SEND_RESERVED_START
+#define MLX5_IB_SEND_UMR_FAIL_IF_FREE (IB_SEND_RESERVED_START << 1)
+#define MLX5_IB_SEND_UMR_UPDATE_MTT (IB_SEND_RESERVED_START << 2)
#define MLX5_IB_QPT_REG_UMR IB_QPT_RESERVED1
#define MLX5_IB_WR_UMR IB_WR_RESERVED1
@@ -147,6 +149,29 @@ enum {
MLX5_QP_EMPTY
};
+/*
+ * Connect-IB can trigger up to four concurrent pagefaults
+ * per-QP.
+ */
+enum mlx5_ib_pagefault_context {
+ MLX5_IB_PAGEFAULT_RESPONDER_READ,
+ MLX5_IB_PAGEFAULT_REQUESTOR_READ,
+ MLX5_IB_PAGEFAULT_RESPONDER_WRITE,
+ MLX5_IB_PAGEFAULT_REQUESTOR_WRITE,
+ MLX5_IB_PAGEFAULT_CONTEXTS
+};
+
+static inline enum mlx5_ib_pagefault_context
+ mlx5_ib_get_pagefault_context(struct mlx5_pagefault *pagefault)
+{
+ return pagefault->flags & (MLX5_PFAULT_REQUESTOR | MLX5_PFAULT_WRITE);
+}
+
+struct mlx5_ib_pfault {
+ struct work_struct work;
+ struct mlx5_pagefault mpfault;
+};
+
struct mlx5_ib_qp {
struct ib_qp ibqp;
struct mlx5_core_qp mqp;
@@ -192,6 +217,21 @@ struct mlx5_ib_qp {
/* Store signature errors */
bool signature_en;
+
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+ /*
+ * A flag that is true for QP's that are in a state that doesn't
+ * allow page faults, and shouldn't schedule any more faults.
+ */
+ int disable_page_faults;
+ /*
+ * The disable_page_faults_lock protects a QP's disable_page_faults
+ * field, allowing for a thread to atomically check whether the QP
+ * allows page faults, and if so schedule a page fault.
+ */
+ spinlock_t disable_page_faults_lock;
+ struct mlx5_ib_pfault pagefaults[MLX5_IB_PAGEFAULT_CONTEXTS];
+#endif
};
struct mlx5_ib_cq_buf {
@@ -206,6 +246,19 @@ enum mlx5_ib_qp_flags {
MLX5_IB_QP_SIGNATURE_HANDLING = 1 << 1,
};
+struct mlx5_umr_wr {
+ union {
+ u64 virt_addr;
+ u64 offset;
+ } target;
+ struct ib_pd *pd;
+ unsigned int page_shift;
+ unsigned int npages;
+ u32 length;
+ int access_flags;
+ u32 mkey;
+};
+
struct mlx5_shared_mr_info {
int mr_id;
struct ib_umem *umem;
@@ -253,6 +306,13 @@ struct mlx5_ib_xrcd {
u32 xrcdn;
};
+enum mlx5_ib_mtt_access_flags {
+ MLX5_IB_MTT_READ = (1 << 0),
+ MLX5_IB_MTT_WRITE = (1 << 1),
+};
+
+#define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE)
+
struct mlx5_ib_mr {
struct ib_mr ibmr;
struct mlx5_core_mr mmr;
@@ -261,12 +321,11 @@ struct mlx5_ib_mr {
struct list_head list;
int order;
int umred;
- __be64 *pas;
- dma_addr_t dma;
int npages;
struct mlx5_ib_dev *dev;
struct mlx5_create_mkey_mbox_out out;
struct mlx5_core_sig_ctx *sig;
+ int live;
};
struct mlx5_ib_fast_reg_page_list {
@@ -372,11 +431,18 @@ struct mlx5_ib_dev {
struct umr_common umrc;
/* sync used page count stats
*/
- spinlock_t mr_lock;
struct mlx5_ib_resources devr;
struct mlx5_mr_cache cache;
struct timer_list delay_timer;
int fill_delay;
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+ struct ib_odp_caps odp_caps;
+ /*
+ * Sleepable RCU that prevents destruction of MRs while they are still
+ * being used by a page fault handler.
+ */
+ struct srcu_struct mr_srcu;
+#endif
};
static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
@@ -490,6 +556,8 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
struct ib_recv_wr **bad_wr);
void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n);
+int mlx5_ib_read_user_wqe(struct mlx5_ib_qp *qp, int send, int wqe_index,
+ void *buffer, u32 length);
struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries,
int vector, struct ib_ucontext *context,
struct ib_udata *udata);
@@ -502,6 +570,8 @@ struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc);
struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int access_flags,
struct ib_udata *udata);
+int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index,
+ int npages, int zap);
int mlx5_ib_dereg_mr(struct ib_mr *ibmr);
int mlx5_ib_destroy_mr(struct ib_mr *ibmr);
struct ib_mr *mlx5_ib_create_mr(struct ib_pd *pd,
@@ -533,8 +603,11 @@ int mlx5_ib_init_fmr(struct mlx5_ib_dev *dev);
void mlx5_ib_cleanup_fmr(struct mlx5_ib_dev *dev);
void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
int *ncont, int *order);
+void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
+ int page_shift, size_t offset, size_t num_pages,
+ __be64 *pas, int access_flags);
void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
- int page_shift, __be64 *pas, int umr);
+ int page_shift, __be64 *pas, int access_flags);
void mlx5_ib_copy_pas(u64 *old, u64 *new, int step, int num);
int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq);
int mlx5_mr_cache_init(struct mlx5_ib_dev *dev);
@@ -544,6 +617,38 @@ void mlx5_umr_cq_handler(struct ib_cq *cq, void *cq_context);
int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
struct ib_mr_status *mr_status);
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+extern struct workqueue_struct *mlx5_ib_page_fault_wq;
+
+int mlx5_ib_internal_query_odp_caps(struct mlx5_ib_dev *dev);
+void mlx5_ib_mr_pfault_handler(struct mlx5_ib_qp *qp,
+ struct mlx5_ib_pfault *pfault);
+void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp);
+int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev);
+void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *ibdev);
+int __init mlx5_ib_odp_init(void);
+void mlx5_ib_odp_cleanup(void);
+void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp *qp);
+void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp);
+void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start,
+ unsigned long end);
+
+#else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
+static inline int mlx5_ib_internal_query_odp_caps(struct mlx5_ib_dev *dev)
+{
+ return 0;
+}
+
+static inline void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp) {}
+static inline int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev) { return 0; }
+static inline void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *ibdev) {}
+static inline int mlx5_ib_odp_init(void) { return 0; }
+static inline void mlx5_ib_odp_cleanup(void) {}
+static inline void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp *qp) {}
+static inline void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp) {}
+
+#endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
+
static inline void init_query_mad(struct ib_smp *mad)
{
mad->base_version = 1;
@@ -561,4 +666,7 @@ static inline u8 convert_access(int acc)
MLX5_PERM_LOCAL_READ;
}
+#define MLX5_MAX_UMR_SHIFT 16
+#define MLX5_MAX_UMR_PAGES (1 << MLX5_MAX_UMR_SHIFT)
+
#endif /* MLX5_IB_H */
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 5a80dd99376..32a28bd50b2 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -37,21 +37,34 @@
#include <linux/export.h>
#include <linux/delay.h>
#include <rdma/ib_umem.h>
+#include <rdma/ib_umem_odp.h>
+#include <rdma/ib_verbs.h>
#include "mlx5_ib.h"
enum {
MAX_PENDING_REG_MR = 8,
};
-enum {
- MLX5_UMR_ALIGN = 2048
-};
+#define MLX5_UMR_ALIGN 2048
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+static __be64 mlx5_ib_update_mtt_emergency_buffer[
+ MLX5_UMR_MTT_MIN_CHUNK_SIZE/sizeof(__be64)]
+ __aligned(MLX5_UMR_ALIGN);
+static DEFINE_MUTEX(mlx5_ib_update_mtt_emergency_buffer_mutex);
+#endif
+
+static int clean_mr(struct mlx5_ib_mr *mr);
-static __be64 *mr_align(__be64 *ptr, int align)
+static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
{
- unsigned long mask = align - 1;
+ int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmr);
- return (__be64 *)(((unsigned long)ptr + mask) & ~mask);
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+ /* Wait until all page fault handlers using the mr complete. */
+ synchronize_srcu(&dev->mr_srcu);
+#endif
+
+ return err;
}
static int order2idx(struct mlx5_ib_dev *dev, int order)
@@ -146,7 +159,7 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
mr->order = ent->order;
mr->umred = 1;
mr->dev = dev;
- in->seg.status = 1 << 6;
+ in->seg.status = MLX5_MKEY_STATUS_FREE;
in->seg.xlt_oct_size = cpu_to_be32((npages + 1) / 2);
in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
in->seg.flags = MLX5_ACCESS_MODE_MTT | MLX5_PERM_UMR_EN;
@@ -191,7 +204,7 @@ static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
ent->cur--;
ent->size--;
spin_unlock_irq(&ent->lock);
- err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmr);
+ err = destroy_mkey(dev, mr);
if (err)
mlx5_ib_warn(dev, "failed destroy mkey\n");
else
@@ -482,7 +495,7 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c)
ent->cur--;
ent->size--;
spin_unlock_irq(&ent->lock);
- err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmr);
+ err = destroy_mkey(dev, mr);
if (err)
mlx5_ib_warn(dev, "failed destroy mkey\n");
else
@@ -668,7 +681,7 @@ static int get_octo_len(u64 addr, u64 len, int page_size)
static int use_umr(int order)
{
- return order <= 17;
+ return order <= MLX5_MAX_UMR_SHIFT;
}
static void prep_umr_reg_wqe(struct ib_pd *pd, struct ib_send_wr *wr,
@@ -678,6 +691,7 @@ static void prep_umr_reg_wqe(struct ib_pd *pd, struct ib_send_wr *wr,
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
struct ib_mr *mr = dev->umrc.mr;
+ struct mlx5_umr_wr *umrwr = (struct mlx5_umr_wr *)&wr->wr.fast_reg;
sg->addr = dma;
sg->length = ALIGN(sizeof(u64) * n, 64);
@@ -692,21 +706,24 @@ static void prep_umr_reg_wqe(struct ib_pd *pd, struct ib_send_wr *wr,
wr->num_sge = 0;
wr->opcode = MLX5_IB_WR_UMR;
- wr->wr.fast_reg.page_list_len = n;
- wr->wr.fast_reg.page_shift = page_shift;
- wr->wr.fast_reg.rkey = key;
- wr->wr.fast_reg.iova_start = virt_addr;
- wr->wr.fast_reg.length = len;
- wr->wr.fast_reg.access_flags = access_flags;
- wr->wr.fast_reg.page_list = (struct ib_fast_reg_page_list *)pd;
+
+ umrwr->npages = n;
+ umrwr->page_shift = page_shift;
+ umrwr->mkey = key;
+ umrwr->target.virt_addr = virt_addr;
+ umrwr->length = len;
+ umrwr->access_flags = access_flags;
+ umrwr->pd = pd;
}
static void prep_umr_unreg_wqe(struct mlx5_ib_dev *dev,
struct ib_send_wr *wr, u32 key)
{
- wr->send_flags = MLX5_IB_SEND_UMR_UNREG;
+ struct mlx5_umr_wr *umrwr = (struct mlx5_umr_wr *)&wr->wr.fast_reg;
+
+ wr->send_flags = MLX5_IB_SEND_UMR_UNREG | MLX5_IB_SEND_UMR_FAIL_IF_FREE;
wr->opcode = MLX5_IB_WR_UMR;
- wr->wr.fast_reg.rkey = key;
+ umrwr->mkey = key;
}
void mlx5_umr_cq_handler(struct ib_cq *cq, void *cq_context)
@@ -742,7 +759,10 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
struct ib_send_wr wr, *bad;
struct mlx5_ib_mr *mr;
struct ib_sge sg;
- int size = sizeof(u64) * npages;
+ int size;
+ __be64 *mr_pas;
+ __be64 *pas;
+ dma_addr_t dma;
int err = 0;
int i;
@@ -761,25 +781,31 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
if (!mr)
return ERR_PTR(-EAGAIN);
- mr->pas = kmalloc(size + MLX5_UMR_ALIGN - 1, GFP_KERNEL);
- if (!mr->pas) {
+ /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes.
+ * To avoid copying garbage after the pas array, we allocate
+ * a little more. */
+ size = ALIGN(sizeof(u64) * npages, MLX5_UMR_MTT_ALIGNMENT);
+ mr_pas = kmalloc(size + MLX5_UMR_ALIGN - 1, GFP_KERNEL);
+ if (!mr_pas) {
err = -ENOMEM;
goto free_mr;
}
- mlx5_ib_populate_pas(dev, umem, page_shift,
- mr_align(mr->pas, MLX5_UMR_ALIGN), 1);
+ pas = PTR_ALIGN(mr_pas, MLX5_UMR_ALIGN);
+ mlx5_ib_populate_pas(dev, umem, page_shift, pas, MLX5_IB_MTT_PRESENT);
+ /* Clear padding after the actual pages. */
+ memset(pas + npages, 0, size - npages * sizeof(u64));
- mr->dma = dma_map_single(ddev, mr_align(mr->pas, MLX5_UMR_ALIGN), size,
- DMA_TO_DEVICE);
- if (dma_mapping_error(ddev, mr->dma)) {
+ dma = dma_map_single(ddev, pas, size, DMA_TO_DEVICE);
+ if (dma_mapping_error(ddev, dma)) {
err = -ENOMEM;
goto free_pas;
}
memset(&wr, 0, sizeof(wr));
wr.wr_id = (u64)(unsigned long)&umr_context;
- prep_umr_reg_wqe(pd, &wr, &sg, mr->dma, npages, mr->mmr.key, page_shift, virt_addr, len, access_flags);
+ prep_umr_reg_wqe(pd, &wr, &sg, dma, npages, mr->mmr.key, page_shift,
+ virt_addr, len, access_flags);
mlx5_ib_init_umr_context(&umr_context);
down(&umrc->sem);
@@ -799,12 +825,14 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
mr->mmr.size = len;
mr->mmr.pd = to_mpd(pd)->pdn;
+ mr->live = 1;
+
unmap_dma:
up(&umrc->sem);
- dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE);
+ dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE);
free_pas:
- kfree(mr->pas);
+ kfree(mr_pas);
free_mr:
if (err) {
@@ -815,6 +843,128 @@ free_mr:
return mr;
}
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index, int npages,
+ int zap)
+{
+ struct mlx5_ib_dev *dev = mr->dev;
+ struct device *ddev = dev->ib_dev.dma_device;
+ struct umr_common *umrc = &dev->umrc;
+ struct mlx5_ib_umr_context umr_context;
+ struct ib_umem *umem = mr->umem;
+ int size;
+ __be64 *pas;
+ dma_addr_t dma;
+ struct ib_send_wr wr, *bad;
+ struct mlx5_umr_wr *umrwr = (struct mlx5_umr_wr *)&wr.wr.fast_reg;
+ struct ib_sge sg;
+ int err = 0;
+ const int page_index_alignment = MLX5_UMR_MTT_ALIGNMENT / sizeof(u64);
+ const int page_index_mask = page_index_alignment - 1;
+ size_t pages_mapped = 0;
+ size_t pages_to_map = 0;
+ size_t pages_iter = 0;
+ int use_emergency_buf = 0;
+
+ /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes,
+ * so we need to align the offset and length accordingly */
+ if (start_page_index & page_index_mask) {
+ npages += start_page_index & page_index_mask;
+ start_page_index &= ~page_index_mask;
+ }
+
+ pages_to_map = ALIGN(npages, page_index_alignment);
+
+ if (start_page_index + pages_to_map > MLX5_MAX_UMR_PAGES)
+ return -EINVAL;
+
+ size = sizeof(u64) * pages_to_map;
+ size = min_t(int, PAGE_SIZE, size);
+ /* We allocate with GFP_ATOMIC to avoid recursion into page-reclaim
+ * code, when we are called from an invalidation. The pas buffer must
+ * be 2k-aligned for Connect-IB. */
+ pas = (__be64 *)get_zeroed_page(GFP_ATOMIC);
+ if (!pas) {
+ mlx5_ib_warn(dev, "unable to allocate memory during MTT update, falling back to slower chunked mechanism.\n");
+ pas = mlx5_ib_update_mtt_emergency_buffer;
+ size = MLX5_UMR_MTT_MIN_CHUNK_SIZE;
+ use_emergency_buf = 1;
+ mutex_lock(&mlx5_ib_update_mtt_emergency_buffer_mutex);
+ memset(pas, 0, size);
+ }
+ pages_iter = size / sizeof(u64);
+ dma = dma_map_single(ddev, pas, size, DMA_TO_DEVICE);
+ if (dma_mapping_error(ddev, dma)) {
+ mlx5_ib_err(dev, "unable to map DMA during MTT update.\n");
+ err = -ENOMEM;
+ goto free_pas;
+ }
+
+ for (pages_mapped = 0;
+ pages_mapped < pages_to_map && !err;
+ pages_mapped += pages_iter, start_page_index += pages_iter) {
+ dma_sync_single_for_cpu(ddev, dma, size, DMA_TO_DEVICE);
+
+ npages = min_t(size_t,
+ pages_iter,
+ ib_umem_num_pages(umem) - start_page_index);
+
+ if (!zap) {
+ __mlx5_ib_populate_pas(dev, umem, PAGE_SHIFT,
+ start_page_index, npages, pas,
+ MLX5_IB_MTT_PRESENT);
+ /* Clear padding after the pages brought from the
+ * umem. */
+ memset(pas + npages, 0, size - npages * sizeof(u64));
+ }
+
+ dma_sync_single_for_device(ddev, dma, size, DMA_TO_DEVICE);
+
+ memset(&wr, 0, sizeof(wr));
+ wr.wr_id = (u64)(unsigned long)&umr_context;
+
+ sg.addr = dma;
+ sg.length = ALIGN(npages * sizeof(u64),
+ MLX5_UMR_MTT_ALIGNMENT);
+ sg.lkey = dev->umrc.mr->lkey;
+
+ wr.send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE |
+ MLX5_IB_SEND_UMR_UPDATE_MTT;
+ wr.sg_list = &sg;
+ wr.num_sge = 1;
+ wr.opcode = MLX5_IB_WR_UMR;
+ umrwr->npages = sg.length / sizeof(u64);
+ umrwr->page_shift = PAGE_SHIFT;
+ umrwr->mkey = mr->mmr.key;
+ umrwr->target.offset = start_page_index;
+
+ mlx5_ib_init_umr_context(&umr_context);
+ down(&umrc->sem);
+ err = ib_post_send(umrc->qp, &wr, &bad);
+ if (err) {
+ mlx5_ib_err(dev, "UMR post send failed, err %d\n", err);
+ } else {
+ wait_for_completion(&umr_context.done);
+ if (umr_context.status != IB_WC_SUCCESS) {
+ mlx5_ib_err(dev, "UMR completion failed, code %d\n",
+ umr_context.status);
+ err = -EFAULT;
+ }
+ }
+ up(&umrc->sem);
+ }
+ dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE);
+
+free_pas:
+ if (!use_emergency_buf)
+ free_page((unsigned long)pas);
+ else
+ mutex_unlock(&mlx5_ib_update_mtt_emergency_buffer_mutex);
+
+ return err;
+}
+#endif
+
static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr,
u64 length, struct ib_umem *umem,
int npages, int page_shift,
@@ -825,6 +975,8 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr,
struct mlx5_ib_mr *mr;
int inlen;
int err;
+ bool pg_cap = !!(dev->mdev->caps.gen.flags &
+ MLX5_DEV_CAP_FLAG_ON_DMND_PG);
mr = kzalloc(sizeof(*mr), GFP_KERNEL);
if (!mr)
@@ -836,8 +988,12 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr,
err = -ENOMEM;
goto err_1;
}
- mlx5_ib_populate_pas(dev, umem, page_shift, in->pas, 0);
+ mlx5_ib_populate_pas(dev, umem, page_shift, in->pas,
+ pg_cap ? MLX5_IB_MTT_PRESENT : 0);
+ /* The MLX5_MKEY_INBOX_PG_ACCESS bit allows setting the access flags
+ * in the page list submitted with the command. */
+ in->flags = pg_cap ? cpu_to_be32(MLX5_MKEY_INBOX_PG_ACCESS) : 0;
in->seg.flags = convert_access(access_flags) |
MLX5_ACCESS_MODE_MTT;
in->seg.flags_pd = cpu_to_be32(to_mpd(pd)->pdn);
@@ -856,6 +1012,7 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr,
goto err_2;
}
mr->umem = umem;
+ mr->live = 1;
kvfree(in);
mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmr.key);
@@ -910,6 +1067,10 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
mlx5_ib_dbg(dev, "cache empty for order %d", order);
mr = NULL;
}
+ } else if (access_flags & IB_ACCESS_ON_DEMAND) {
+ err = -EINVAL;
+ pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB");
+ goto error;
}
if (!mr)
@@ -925,16 +1086,51 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
mr->umem = umem;
mr->npages = npages;
- spin_lock(&dev->mr_lock);
- dev->mdev->priv.reg_pages += npages;
- spin_unlock(&dev->mr_lock);
+ atomic_add(npages, &dev->mdev->priv.reg_pages);
mr->ibmr.lkey = mr->mmr.key;
mr->ibmr.rkey = mr->mmr.key;
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+ if (umem->odp_data) {
+ /*
+ * This barrier prevents the compiler from moving the
+ * setting of umem->odp_data->private to point to our
+ * MR, before reg_umr finished, to ensure that the MR
+ * initialization have finished before starting to
+ * handle invalidations.
+ */
+ smp_wmb();
+ mr->umem->odp_data->private = mr;
+ /*
+ * Make sure we will see the new
+ * umem->odp_data->private value in the invalidation
+ * routines, before we can get page faults on the
+ * MR. Page faults can happen once we put the MR in
+ * the tree, below this line. Without the barrier,
+ * there can be a fault handling and an invalidation
+ * before umem->odp_data->private == mr is visible to
+ * the invalidation handler.
+ */
+ smp_wmb();
+ }
+#endif
+
return &mr->ibmr;
error:
+ /*
+ * Destroy the umem *before* destroying the MR, to ensure we
+ * will not have any in-flight notifiers when destroying the
+ * MR.
+ *
+ * As the MR is completely invalid to begin with, and this
+ * error path is only taken if we can't push the mr entry into
+ * the pagefault tree, this is safe.
+ */
+
ib_umem_release(umem);
+ /* Kill the MR, and return an error code. */
+ clean_mr(mr);
return ERR_PTR(err);
}
@@ -971,17 +1167,14 @@ error:
return err;
}
-int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
+static int clean_mr(struct mlx5_ib_mr *mr)
{
- struct mlx5_ib_dev *dev = to_mdev(ibmr->device);
- struct mlx5_ib_mr *mr = to_mmr(ibmr);
- struct ib_umem *umem = mr->umem;
- int npages = mr->npages;
+ struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
int umred = mr->umred;
int err;
if (!umred) {
- err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmr);
+ err = destroy_mkey(dev, mr);
if (err) {
mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n",
mr->mmr.key, err);
@@ -996,15 +1189,47 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
free_cached_mr(dev, mr);
}
- if (umem) {
+ if (!umred)
+ kfree(mr);
+
+ return 0;
+}
+
+int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibmr->device);
+ struct mlx5_ib_mr *mr = to_mmr(ibmr);
+ int npages = mr->npages;
+ struct ib_umem *umem = mr->umem;
+
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+ if (umem && umem->odp_data) {
+ /* Prevent new page faults from succeeding */
+ mr->live = 0;
+ /* Wait for all running page-fault handlers to finish. */
+ synchronize_srcu(&dev->mr_srcu);
+ /* Destroy all page mappings */
+ mlx5_ib_invalidate_range(umem, ib_umem_start(umem),
+ ib_umem_end(umem));
+ /*
+ * We kill the umem before the MR for ODP,
+ * so that there will not be any invalidations in
+ * flight, looking at the *mr struct.
+ */
ib_umem_release(umem);
- spin_lock(&dev->mr_lock);
- dev->mdev->priv.reg_pages -= npages;
- spin_unlock(&dev->mr_lock);
+ atomic_sub(npages, &dev->mdev->priv.reg_pages);
+
+ /* Avoid double-freeing the umem. */
+ umem = NULL;
}
+#endif
- if (!umred)
- kfree(mr);
+ clean_mr(mr);
+
+ if (umem) {
+ ib_umem_release(umem);
+ atomic_sub(npages, &dev->mdev->priv.reg_pages);
+ }
return 0;
}
@@ -1028,7 +1253,7 @@ struct ib_mr *mlx5_ib_create_mr(struct ib_pd *pd,
goto err_free;
}
- in->seg.status = 1 << 6; /* free */
+ in->seg.status = MLX5_MKEY_STATUS_FREE;
in->seg.xlt_oct_size = cpu_to_be32(ndescs);
in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
in->seg.flags_pd = cpu_to_be32(to_mpd(pd)->pdn);
@@ -1113,7 +1338,7 @@ int mlx5_ib_destroy_mr(struct ib_mr *ibmr)
kfree(mr->sig);
}
- err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmr);
+ err = destroy_mkey(dev, mr);
if (err) {
mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n",
mr->mmr.key, err);
@@ -1143,7 +1368,7 @@ struct ib_mr *mlx5_ib_alloc_fast_reg_mr(struct ib_pd *pd,
goto err_free;
}
- in->seg.status = 1 << 6; /* free */
+ in->seg.status = MLX5_MKEY_STATUS_FREE;
in->seg.xlt_oct_size = cpu_to_be32((max_page_list_len + 1) / 2);
in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
in->seg.flags = MLX5_PERM_UMR_EN | MLX5_ACCESS_MODE_MTT;
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
new file mode 100644
index 00000000000..a2c541c4809
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -0,0 +1,798 @@
+/*
+ * Copyright (c) 2014 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <rdma/ib_umem.h>
+#include <rdma/ib_umem_odp.h>
+
+#include "mlx5_ib.h"
+
+#define MAX_PREFETCH_LEN (4*1024*1024U)
+
+/* Timeout in ms to wait for an active mmu notifier to complete when handling
+ * a pagefault. */
+#define MMU_NOTIFIER_TIMEOUT 1000
+
+struct workqueue_struct *mlx5_ib_page_fault_wq;
+
+void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start,
+ unsigned long end)
+{
+ struct mlx5_ib_mr *mr;
+ const u64 umr_block_mask = (MLX5_UMR_MTT_ALIGNMENT / sizeof(u64)) - 1;
+ u64 idx = 0, blk_start_idx = 0;
+ int in_block = 0;
+ u64 addr;
+
+ if (!umem || !umem->odp_data) {
+ pr_err("invalidation called on NULL umem or non-ODP umem\n");
+ return;
+ }
+
+ mr = umem->odp_data->private;
+
+ if (!mr || !mr->ibmr.pd)
+ return;
+
+ start = max_t(u64, ib_umem_start(umem), start);
+ end = min_t(u64, ib_umem_end(umem), end);
+
+ /*
+ * Iteration one - zap the HW's MTTs. The notifiers_count ensures that
+ * while we are doing the invalidation, no page fault will attempt to
+ * overwrite the same MTTs. Concurent invalidations might race us,
+ * but they will write 0s as well, so no difference in the end result.
+ */
+
+ for (addr = start; addr < end; addr += (u64)umem->page_size) {
+ idx = (addr - ib_umem_start(umem)) / PAGE_SIZE;
+ /*
+ * Strive to write the MTTs in chunks, but avoid overwriting
+ * non-existing MTTs. The huristic here can be improved to
+ * estimate the cost of another UMR vs. the cost of bigger
+ * UMR.
+ */
+ if (umem->odp_data->dma_list[idx] &
+ (ODP_READ_ALLOWED_BIT | ODP_WRITE_ALLOWED_BIT)) {
+ if (!in_block) {
+ blk_start_idx = idx;
+ in_block = 1;
+ }
+ } else {
+ u64 umr_offset = idx & umr_block_mask;
+
+ if (in_block && umr_offset == 0) {
+ mlx5_ib_update_mtt(mr, blk_start_idx,
+ idx - blk_start_idx, 1);
+ in_block = 0;
+ }
+ }
+ }
+ if (in_block)
+ mlx5_ib_update_mtt(mr, blk_start_idx, idx - blk_start_idx + 1,
+ 1);
+
+ /*
+ * We are now sure that the device will not access the
+ * memory. We can safely unmap it, and mark it as dirty if
+ * needed.
+ */
+
+ ib_umem_odp_unmap_dma_pages(umem, start, end);
+}
+
+#define COPY_ODP_BIT_MLX_TO_IB(reg, ib_caps, field_name, bit_name) do { \
+ if (be32_to_cpu(reg.field_name) & MLX5_ODP_SUPPORT_##bit_name) \
+ ib_caps->field_name |= IB_ODP_SUPPORT_##bit_name; \
+} while (0)
+
+int mlx5_ib_internal_query_odp_caps(struct mlx5_ib_dev *dev)
+{
+ int err;
+ struct mlx5_odp_caps hw_caps;
+ struct ib_odp_caps *caps = &dev->odp_caps;
+
+ memset(caps, 0, sizeof(*caps));
+
+ if (!(dev->mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_ON_DMND_PG))
+ return 0;
+
+ err = mlx5_query_odp_caps(dev->mdev, &hw_caps);
+ if (err)
+ goto out;
+
+ caps->general_caps = IB_ODP_SUPPORT;
+ COPY_ODP_BIT_MLX_TO_IB(hw_caps, caps, per_transport_caps.ud_odp_caps,
+ SEND);
+ COPY_ODP_BIT_MLX_TO_IB(hw_caps, caps, per_transport_caps.rc_odp_caps,
+ SEND);
+ COPY_ODP_BIT_MLX_TO_IB(hw_caps, caps, per_transport_caps.rc_odp_caps,
+ RECV);
+ COPY_ODP_BIT_MLX_TO_IB(hw_caps, caps, per_transport_caps.rc_odp_caps,
+ WRITE);
+ COPY_ODP_BIT_MLX_TO_IB(hw_caps, caps, per_transport_caps.rc_odp_caps,
+ READ);
+
+out:
+ return err;
+}
+
+static struct mlx5_ib_mr *mlx5_ib_odp_find_mr_lkey(struct mlx5_ib_dev *dev,
+ u32 key)
+{
+ u32 base_key = mlx5_base_mkey(key);
+ struct mlx5_core_mr *mmr = __mlx5_mr_lookup(dev->mdev, base_key);
+ struct mlx5_ib_mr *mr = container_of(mmr, struct mlx5_ib_mr, mmr);
+
+ if (!mmr || mmr->key != key || !mr->live)
+ return NULL;
+
+ return container_of(mmr, struct mlx5_ib_mr, mmr);
+}
+
+static void mlx5_ib_page_fault_resume(struct mlx5_ib_qp *qp,
+ struct mlx5_ib_pfault *pfault,
+ int error) {
+ struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.pd->device);
+ int ret = mlx5_core_page_fault_resume(dev->mdev, qp->mqp.qpn,
+ pfault->mpfault.flags,
+ error);
+ if (ret)
+ pr_err("Failed to resolve the page fault on QP 0x%x\n",
+ qp->mqp.qpn);
+}
+
+/*
+ * Handle a single data segment in a page-fault WQE.
+ *
+ * Returns number of pages retrieved on success. The caller will continue to
+ * the next data segment.
+ * Can return the following error codes:
+ * -EAGAIN to designate a temporary error. The caller will abort handling the
+ * page fault and resolve it.
+ * -EFAULT when there's an error mapping the requested pages. The caller will
+ * abort the page fault handling and possibly move the QP to an error state.
+ * On other errors the QP should also be closed with an error.
+ */
+static int pagefault_single_data_segment(struct mlx5_ib_qp *qp,
+ struct mlx5_ib_pfault *pfault,
+ u32 key, u64 io_virt, size_t bcnt,
+ u32 *bytes_mapped)
+{
+ struct mlx5_ib_dev *mib_dev = to_mdev(qp->ibqp.pd->device);
+ int srcu_key;
+ unsigned int current_seq;
+ u64 start_idx;
+ int npages = 0, ret = 0;
+ struct mlx5_ib_mr *mr;
+ u64 access_mask = ODP_READ_ALLOWED_BIT;
+
+ srcu_key = srcu_read_lock(&mib_dev->mr_srcu);
+ mr = mlx5_ib_odp_find_mr_lkey(mib_dev, key);
+ /*
+ * If we didn't find the MR, it means the MR was closed while we were
+ * handling the ODP event. In this case we return -EFAULT so that the
+ * QP will be closed.
+ */
+ if (!mr || !mr->ibmr.pd) {
+ pr_err("Failed to find relevant mr for lkey=0x%06x, probably the MR was destroyed\n",
+ key);
+ ret = -EFAULT;
+ goto srcu_unlock;
+ }
+ if (!mr->umem->odp_data) {
+ pr_debug("skipping non ODP MR (lkey=0x%06x) in page fault handler.\n",
+ key);
+ if (bytes_mapped)
+ *bytes_mapped +=
+ (bcnt - pfault->mpfault.bytes_committed);
+ goto srcu_unlock;
+ }
+ if (mr->ibmr.pd != qp->ibqp.pd) {
+ pr_err("Page-fault with different PDs for QP and MR.\n");
+ ret = -EFAULT;
+ goto srcu_unlock;
+ }
+
+ current_seq = ACCESS_ONCE(mr->umem->odp_data->notifiers_seq);
+ /*
+ * Ensure the sequence number is valid for some time before we call
+ * gup.
+ */
+ smp_rmb();
+
+ /*
+ * Avoid branches - this code will perform correctly
+ * in all iterations (in iteration 2 and above,
+ * bytes_committed == 0).
+ */
+ io_virt += pfault->mpfault.bytes_committed;
+ bcnt -= pfault->mpfault.bytes_committed;
+
+ start_idx = (io_virt - (mr->mmr.iova & PAGE_MASK)) >> PAGE_SHIFT;
+
+ if (mr->umem->writable)
+ access_mask |= ODP_WRITE_ALLOWED_BIT;
+ npages = ib_umem_odp_map_dma_pages(mr->umem, io_virt, bcnt,
+ access_mask, current_seq);
+ if (npages < 0) {
+ ret = npages;
+ goto srcu_unlock;
+ }
+
+ if (npages > 0) {
+ mutex_lock(&mr->umem->odp_data->umem_mutex);
+ if (!ib_umem_mmu_notifier_retry(mr->umem, current_seq)) {
+ /*
+ * No need to check whether the MTTs really belong to
+ * this MR, since ib_umem_odp_map_dma_pages already
+ * checks this.
+ */
+ ret = mlx5_ib_update_mtt(mr, start_idx, npages, 0);
+ } else {
+ ret = -EAGAIN;
+ }
+ mutex_unlock(&mr->umem->odp_data->umem_mutex);
+ if (ret < 0) {
+ if (ret != -EAGAIN)
+ pr_err("Failed to update mkey page tables\n");
+ goto srcu_unlock;
+ }
+
+ if (bytes_mapped) {
+ u32 new_mappings = npages * PAGE_SIZE -
+ (io_virt - round_down(io_virt, PAGE_SIZE));
+ *bytes_mapped += min_t(u32, new_mappings, bcnt);
+ }
+ }
+
+srcu_unlock:
+ if (ret == -EAGAIN) {
+ if (!mr->umem->odp_data->dying) {
+ struct ib_umem_odp *odp_data = mr->umem->odp_data;
+ unsigned long timeout =
+ msecs_to_jiffies(MMU_NOTIFIER_TIMEOUT);
+
+ if (!wait_for_completion_timeout(
+ &odp_data->notifier_completion,
+ timeout)) {
+ pr_warn("timeout waiting for mmu notifier completion\n");
+ }
+ } else {
+ /* The MR is being killed, kill the QP as well. */
+ ret = -EFAULT;
+ }
+ }
+ srcu_read_unlock(&mib_dev->mr_srcu, srcu_key);
+ pfault->mpfault.bytes_committed = 0;
+ return ret ? ret : npages;
+}
+
+/**
+ * Parse a series of data segments for page fault handling.
+ *
+ * @qp the QP on which the fault occurred.
+ * @pfault contains page fault information.
+ * @wqe points at the first data segment in the WQE.
+ * @wqe_end points after the end of the WQE.
+ * @bytes_mapped receives the number of bytes that the function was able to
+ * map. This allows the caller to decide intelligently whether
+ * enough memory was mapped to resolve the page fault
+ * successfully (e.g. enough for the next MTU, or the entire
+ * WQE).
+ * @total_wqe_bytes receives the total data size of this WQE in bytes (minus
+ * the committed bytes).
+ *
+ * Returns the number of pages loaded if positive, zero for an empty WQE, or a
+ * negative error code.
+ */
+static int pagefault_data_segments(struct mlx5_ib_qp *qp,
+ struct mlx5_ib_pfault *pfault, void *wqe,
+ void *wqe_end, u32 *bytes_mapped,
+ u32 *total_wqe_bytes, int receive_queue)
+{
+ int ret = 0, npages = 0;
+ u64 io_virt;
+ u32 key;
+ u32 byte_count;
+ size_t bcnt;
+ int inline_segment;
+
+ /* Skip SRQ next-WQE segment. */
+ if (receive_queue && qp->ibqp.srq)
+ wqe += sizeof(struct mlx5_wqe_srq_next_seg);
+
+ if (bytes_mapped)
+ *bytes_mapped = 0;
+ if (total_wqe_bytes)
+ *total_wqe_bytes = 0;
+
+ while (wqe < wqe_end) {
+ struct mlx5_wqe_data_seg *dseg = wqe;
+
+ io_virt = be64_to_cpu(dseg->addr);
+ key = be32_to_cpu(dseg->lkey);
+ byte_count = be32_to_cpu(dseg->byte_count);
+ inline_segment = !!(byte_count & MLX5_INLINE_SEG);
+ bcnt = byte_count & ~MLX5_INLINE_SEG;
+
+ if (inline_segment) {
+ bcnt = bcnt & MLX5_WQE_INLINE_SEG_BYTE_COUNT_MASK;
+ wqe += ALIGN(sizeof(struct mlx5_wqe_inline_seg) + bcnt,
+ 16);
+ } else {
+ wqe += sizeof(*dseg);
+ }
+
+ /* receive WQE end of sg list. */
+ if (receive_queue && bcnt == 0 && key == MLX5_INVALID_LKEY &&
+ io_virt == 0)
+ break;
+
+ if (!inline_segment && total_wqe_bytes) {
+ *total_wqe_bytes += bcnt - min_t(size_t, bcnt,
+ pfault->mpfault.bytes_committed);
+ }
+
+ /* A zero length data segment designates a length of 2GB. */
+ if (bcnt == 0)
+ bcnt = 1U << 31;
+
+ if (inline_segment || bcnt <= pfault->mpfault.bytes_committed) {
+ pfault->mpfault.bytes_committed -=
+ min_t(size_t, bcnt,
+ pfault->mpfault.bytes_committed);
+ continue;
+ }
+
+ ret = pagefault_single_data_segment(qp, pfault, key, io_virt,
+ bcnt, bytes_mapped);
+ if (ret < 0)
+ break;
+ npages += ret;
+ }
+
+ return ret < 0 ? ret : npages;
+}
+
+/*
+ * Parse initiator WQE. Advances the wqe pointer to point at the
+ * scatter-gather list, and set wqe_end to the end of the WQE.
+ */
+static int mlx5_ib_mr_initiator_pfault_handler(
+ struct mlx5_ib_qp *qp, struct mlx5_ib_pfault *pfault,
+ void **wqe, void **wqe_end, int wqe_length)
+{
+ struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.pd->device);
+ struct mlx5_wqe_ctrl_seg *ctrl = *wqe;
+ u16 wqe_index = pfault->mpfault.wqe.wqe_index;
+ unsigned ds, opcode;
+#if defined(DEBUG)
+ u32 ctrl_wqe_index, ctrl_qpn;
+#endif
+
+ ds = be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_DS_MASK;
+ if (ds * MLX5_WQE_DS_UNITS > wqe_length) {
+ mlx5_ib_err(dev, "Unable to read the complete WQE. ds = 0x%x, ret = 0x%x\n",
+ ds, wqe_length);
+ return -EFAULT;
+ }
+
+ if (ds == 0) {
+ mlx5_ib_err(dev, "Got WQE with zero DS. wqe_index=%x, qpn=%x\n",
+ wqe_index, qp->mqp.qpn);
+ return -EFAULT;
+ }
+
+#if defined(DEBUG)
+ ctrl_wqe_index = (be32_to_cpu(ctrl->opmod_idx_opcode) &
+ MLX5_WQE_CTRL_WQE_INDEX_MASK) >>
+ MLX5_WQE_CTRL_WQE_INDEX_SHIFT;
+ if (wqe_index != ctrl_wqe_index) {
+ mlx5_ib_err(dev, "Got WQE with invalid wqe_index. wqe_index=0x%x, qpn=0x%x ctrl->wqe_index=0x%x\n",
+ wqe_index, qp->mqp.qpn,
+ ctrl_wqe_index);
+ return -EFAULT;
+ }
+
+ ctrl_qpn = (be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_QPN_MASK) >>
+ MLX5_WQE_CTRL_QPN_SHIFT;
+ if (qp->mqp.qpn != ctrl_qpn) {
+ mlx5_ib_err(dev, "Got WQE with incorrect QP number. wqe_index=0x%x, qpn=0x%x ctrl->qpn=0x%x\n",
+ wqe_index, qp->mqp.qpn,
+ ctrl_qpn);
+ return -EFAULT;
+ }
+#endif /* DEBUG */
+
+ *wqe_end = *wqe + ds * MLX5_WQE_DS_UNITS;
+ *wqe += sizeof(*ctrl);
+
+ opcode = be32_to_cpu(ctrl->opmod_idx_opcode) &
+ MLX5_WQE_CTRL_OPCODE_MASK;
+ switch (qp->ibqp.qp_type) {
+ case IB_QPT_RC:
+ switch (opcode) {
+ case MLX5_OPCODE_SEND:
+ case MLX5_OPCODE_SEND_IMM:
+ case MLX5_OPCODE_SEND_INVAL:
+ if (!(dev->odp_caps.per_transport_caps.rc_odp_caps &
+ IB_ODP_SUPPORT_SEND))
+ goto invalid_transport_or_opcode;
+ break;
+ case MLX5_OPCODE_RDMA_WRITE:
+ case MLX5_OPCODE_RDMA_WRITE_IMM:
+ if (!(dev->odp_caps.per_transport_caps.rc_odp_caps &
+ IB_ODP_SUPPORT_WRITE))
+ goto invalid_transport_or_opcode;
+ *wqe += sizeof(struct mlx5_wqe_raddr_seg);
+ break;
+ case MLX5_OPCODE_RDMA_READ:
+ if (!(dev->odp_caps.per_transport_caps.rc_odp_caps &
+ IB_ODP_SUPPORT_READ))
+ goto invalid_transport_or_opcode;
+ *wqe += sizeof(struct mlx5_wqe_raddr_seg);
+ break;
+ default:
+ goto invalid_transport_or_opcode;
+ }
+ break;
+ case IB_QPT_UD:
+ switch (opcode) {
+ case MLX5_OPCODE_SEND:
+ case MLX5_OPCODE_SEND_IMM:
+ if (!(dev->odp_caps.per_transport_caps.ud_odp_caps &
+ IB_ODP_SUPPORT_SEND))
+ goto invalid_transport_or_opcode;
+ *wqe += sizeof(struct mlx5_wqe_datagram_seg);
+ break;
+ default:
+ goto invalid_transport_or_opcode;
+ }
+ break;
+ default:
+invalid_transport_or_opcode:
+ mlx5_ib_err(dev, "ODP fault on QP of an unsupported opcode or transport. transport: 0x%x opcode: 0x%x.\n",
+ qp->ibqp.qp_type, opcode);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/*
+ * Parse responder WQE. Advances the wqe pointer to point at the
+ * scatter-gather list, and set wqe_end to the end of the WQE.
+ */
+static int mlx5_ib_mr_responder_pfault_handler(
+ struct mlx5_ib_qp *qp, struct mlx5_ib_pfault *pfault,
+ void **wqe, void **wqe_end, int wqe_length)
+{
+ struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.pd->device);
+ struct mlx5_ib_wq *wq = &qp->rq;
+ int wqe_size = 1 << wq->wqe_shift;
+
+ if (qp->ibqp.srq) {
+ mlx5_ib_err(dev, "ODP fault on SRQ is not supported\n");
+ return -EFAULT;
+ }
+
+ if (qp->wq_sig) {
+ mlx5_ib_err(dev, "ODP fault with WQE signatures is not supported\n");
+ return -EFAULT;
+ }
+
+ if (wqe_size > wqe_length) {
+ mlx5_ib_err(dev, "Couldn't read all of the receive WQE's content\n");
+ return -EFAULT;
+ }
+
+ switch (qp->ibqp.qp_type) {
+ case IB_QPT_RC:
+ if (!(dev->odp_caps.per_transport_caps.rc_odp_caps &
+ IB_ODP_SUPPORT_RECV))
+ goto invalid_transport_or_opcode;
+ break;
+ default:
+invalid_transport_or_opcode:
+ mlx5_ib_err(dev, "ODP fault on QP of an unsupported transport. transport: 0x%x\n",
+ qp->ibqp.qp_type);
+ return -EFAULT;
+ }
+
+ *wqe_end = *wqe + wqe_size;
+
+ return 0;
+}
+
+static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_qp *qp,
+ struct mlx5_ib_pfault *pfault)
+{
+ struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.pd->device);
+ int ret;
+ void *wqe, *wqe_end;
+ u32 bytes_mapped, total_wqe_bytes;
+ char *buffer = NULL;
+ int resume_with_error = 0;
+ u16 wqe_index = pfault->mpfault.wqe.wqe_index;
+ int requestor = pfault->mpfault.flags & MLX5_PFAULT_REQUESTOR;
+
+ buffer = (char *)__get_free_page(GFP_KERNEL);
+ if (!buffer) {
+ mlx5_ib_err(dev, "Error allocating memory for IO page fault handling.\n");
+ resume_with_error = 1;
+ goto resolve_page_fault;
+ }
+
+ ret = mlx5_ib_read_user_wqe(qp, requestor, wqe_index, buffer,
+ PAGE_SIZE);
+ if (ret < 0) {
+ mlx5_ib_err(dev, "Failed reading a WQE following page fault, error=%x, wqe_index=%x, qpn=%x\n",
+ -ret, wqe_index, qp->mqp.qpn);
+ resume_with_error = 1;
+ goto resolve_page_fault;
+ }
+
+ wqe = buffer;
+ if (requestor)
+ ret = mlx5_ib_mr_initiator_pfault_handler(qp, pfault, &wqe,
+ &wqe_end, ret);
+ else
+ ret = mlx5_ib_mr_responder_pfault_handler(qp, pfault, &wqe,
+ &wqe_end, ret);
+ if (ret < 0) {
+ resume_with_error = 1;
+ goto resolve_page_fault;
+ }
+
+ if (wqe >= wqe_end) {
+ mlx5_ib_err(dev, "ODP fault on invalid WQE.\n");
+ resume_with_error = 1;
+ goto resolve_page_fault;
+ }
+
+ ret = pagefault_data_segments(qp, pfault, wqe, wqe_end, &bytes_mapped,
+ &total_wqe_bytes, !requestor);
+ if (ret == -EAGAIN) {
+ goto resolve_page_fault;
+ } else if (ret < 0 || total_wqe_bytes > bytes_mapped) {
+ mlx5_ib_err(dev, "Error getting user pages for page fault. Error: 0x%x\n",
+ -ret);
+ resume_with_error = 1;
+ goto resolve_page_fault;
+ }
+
+resolve_page_fault:
+ mlx5_ib_page_fault_resume(qp, pfault, resume_with_error);
+ mlx5_ib_dbg(dev, "PAGE FAULT completed. QP 0x%x resume_with_error=%d, flags: 0x%x\n",
+ qp->mqp.qpn, resume_with_error, pfault->mpfault.flags);
+
+ free_page((unsigned long)buffer);
+}
+
+static int pages_in_range(u64 address, u32 length)
+{
+ return (ALIGN(address + length, PAGE_SIZE) -
+ (address & PAGE_MASK)) >> PAGE_SHIFT;
+}
+
+static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_qp *qp,
+ struct mlx5_ib_pfault *pfault)
+{
+ struct mlx5_pagefault *mpfault = &pfault->mpfault;
+ u64 address;
+ u32 length;
+ u32 prefetch_len = mpfault->bytes_committed;
+ int prefetch_activated = 0;
+ u32 rkey = mpfault->rdma.r_key;
+ int ret;
+
+ /* The RDMA responder handler handles the page fault in two parts.
+ * First it brings the necessary pages for the current packet
+ * (and uses the pfault context), and then (after resuming the QP)
+ * prefetches more pages. The second operation cannot use the pfault
+ * context and therefore uses the dummy_pfault context allocated on
+ * the stack */
+ struct mlx5_ib_pfault dummy_pfault = {};
+
+ dummy_pfault.mpfault.bytes_committed = 0;
+
+ mpfault->rdma.rdma_va += mpfault->bytes_committed;
+ mpfault->rdma.rdma_op_len -= min(mpfault->bytes_committed,
+ mpfault->rdma.rdma_op_len);
+ mpfault->bytes_committed = 0;
+
+ address = mpfault->rdma.rdma_va;
+ length = mpfault->rdma.rdma_op_len;
+
+ /* For some operations, the hardware cannot tell the exact message
+ * length, and in those cases it reports zero. Use prefetch
+ * logic. */
+ if (length == 0) {
+ prefetch_activated = 1;
+ length = mpfault->rdma.packet_size;
+ prefetch_len = min(MAX_PREFETCH_LEN, prefetch_len);
+ }
+
+ ret = pagefault_single_data_segment(qp, pfault, rkey, address, length,
+ NULL);
+ if (ret == -EAGAIN) {
+ /* We're racing with an invalidation, don't prefetch */
+ prefetch_activated = 0;
+ } else if (ret < 0 || pages_in_range(address, length) > ret) {
+ mlx5_ib_page_fault_resume(qp, pfault, 1);
+ return;
+ }
+
+ mlx5_ib_page_fault_resume(qp, pfault, 0);
+
+ /* At this point, there might be a new pagefault already arriving in
+ * the eq, switch to the dummy pagefault for the rest of the
+ * processing. We're still OK with the objects being alive as the
+ * work-queue is being fenced. */
+
+ if (prefetch_activated) {
+ ret = pagefault_single_data_segment(qp, &dummy_pfault, rkey,
+ address,
+ prefetch_len,
+ NULL);
+ if (ret < 0) {
+ pr_warn("Prefetch failed (ret = %d, prefetch_activated = %d) for QPN %d, address: 0x%.16llx, length = 0x%.16x\n",
+ ret, prefetch_activated,
+ qp->ibqp.qp_num, address, prefetch_len);
+ }
+ }
+}
+
+void mlx5_ib_mr_pfault_handler(struct mlx5_ib_qp *qp,
+ struct mlx5_ib_pfault *pfault)
+{
+ u8 event_subtype = pfault->mpfault.event_subtype;
+
+ switch (event_subtype) {
+ case MLX5_PFAULT_SUBTYPE_WQE:
+ mlx5_ib_mr_wqe_pfault_handler(qp, pfault);
+ break;
+ case MLX5_PFAULT_SUBTYPE_RDMA:
+ mlx5_ib_mr_rdma_pfault_handler(qp, pfault);
+ break;
+ default:
+ pr_warn("Invalid page fault event subtype: 0x%x\n",
+ event_subtype);
+ mlx5_ib_page_fault_resume(qp, pfault, 1);
+ break;
+ }
+}
+
+static void mlx5_ib_qp_pfault_action(struct work_struct *work)
+{
+ struct mlx5_ib_pfault *pfault = container_of(work,
+ struct mlx5_ib_pfault,
+ work);
+ enum mlx5_ib_pagefault_context context =
+ mlx5_ib_get_pagefault_context(&pfault->mpfault);
+ struct mlx5_ib_qp *qp = container_of(pfault, struct mlx5_ib_qp,
+ pagefaults[context]);
+ mlx5_ib_mr_pfault_handler(qp, pfault);
+}
+
+void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp *qp)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&qp->disable_page_faults_lock, flags);
+ qp->disable_page_faults = 1;
+ spin_unlock_irqrestore(&qp->disable_page_faults_lock, flags);
+
+ /*
+ * Note that at this point, we are guarenteed that no more
+ * work queue elements will be posted to the work queue with
+ * the QP we are closing.
+ */
+ flush_workqueue(mlx5_ib_page_fault_wq);
+}
+
+void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&qp->disable_page_faults_lock, flags);
+ qp->disable_page_faults = 0;
+ spin_unlock_irqrestore(&qp->disable_page_faults_lock, flags);
+}
+
+static void mlx5_ib_pfault_handler(struct mlx5_core_qp *qp,
+ struct mlx5_pagefault *pfault)
+{
+ /*
+ * Note that we will only get one fault event per QP per context
+ * (responder/initiator, read/write), until we resolve the page fault
+ * with the mlx5_ib_page_fault_resume command. Since this function is
+ * called from within the work element, there is no risk of missing
+ * events.
+ */
+ struct mlx5_ib_qp *mibqp = to_mibqp(qp);
+ enum mlx5_ib_pagefault_context context =
+ mlx5_ib_get_pagefault_context(pfault);
+ struct mlx5_ib_pfault *qp_pfault = &mibqp->pagefaults[context];
+
+ qp_pfault->mpfault = *pfault;
+
+ /* No need to stop interrupts here since we are in an interrupt */
+ spin_lock(&mibqp->disable_page_faults_lock);
+ if (!mibqp->disable_page_faults)
+ queue_work(mlx5_ib_page_fault_wq, &qp_pfault->work);
+ spin_unlock(&mibqp->disable_page_faults_lock);
+}
+
+void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp)
+{
+ int i;
+
+ qp->disable_page_faults = 1;
+ spin_lock_init(&qp->disable_page_faults_lock);
+
+ qp->mqp.pfault_handler = mlx5_ib_pfault_handler;
+
+ for (i = 0; i < MLX5_IB_PAGEFAULT_CONTEXTS; ++i)
+ INIT_WORK(&qp->pagefaults[i].work, mlx5_ib_qp_pfault_action);
+}
+
+int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev)
+{
+ int ret;
+
+ ret = init_srcu_struct(&ibdev->mr_srcu);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *ibdev)
+{
+ cleanup_srcu_struct(&ibdev->mr_srcu);
+}
+
+int __init mlx5_ib_odp_init(void)
+{
+ mlx5_ib_page_fault_wq =
+ create_singlethread_workqueue("mlx5_ib_page_faults");
+ if (!mlx5_ib_page_fault_wq)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void mlx5_ib_odp_cleanup(void)
+{
+ destroy_workqueue(mlx5_ib_page_fault_wq);
+}
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 1cae1c7132b..be0cd358b08 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -70,15 +70,6 @@ static const u32 mlx5_ib_opcode[] = {
[MLX5_IB_WR_UMR] = MLX5_OPCODE_UMR,
};
-struct umr_wr {
- u64 virt_addr;
- struct ib_pd *pd;
- unsigned int page_shift;
- unsigned int npages;
- u32 length;
- int access_flags;
- u32 mkey;
-};
static int is_qp0(enum ib_qp_type qp_type)
{
@@ -110,6 +101,77 @@ void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n)
return get_wqe(qp, qp->sq.offset + (n << MLX5_IB_SQ_STRIDE));
}
+/**
+ * mlx5_ib_read_user_wqe() - Copy a user-space WQE to kernel space.
+ *
+ * @qp: QP to copy from.
+ * @send: copy from the send queue when non-zero, use the receive queue
+ * otherwise.
+ * @wqe_index: index to start copying from. For send work queues, the
+ * wqe_index is in units of MLX5_SEND_WQE_BB.
+ * For receive work queue, it is the number of work queue
+ * element in the queue.
+ * @buffer: destination buffer.
+ * @length: maximum number of bytes to copy.
+ *
+ * Copies at least a single WQE, but may copy more data.
+ *
+ * Return: the number of bytes copied, or an error code.
+ */
+int mlx5_ib_read_user_wqe(struct mlx5_ib_qp *qp, int send, int wqe_index,
+ void *buffer, u32 length)
+{
+ struct ib_device *ibdev = qp->ibqp.device;
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ struct mlx5_ib_wq *wq = send ? &qp->sq : &qp->rq;
+ size_t offset;
+ size_t wq_end;
+ struct ib_umem *umem = qp->umem;
+ u32 first_copy_length;
+ int wqe_length;
+ int ret;
+
+ if (wq->wqe_cnt == 0) {
+ mlx5_ib_dbg(dev, "mlx5_ib_read_user_wqe for a QP with wqe_cnt == 0. qp_type: 0x%x\n",
+ qp->ibqp.qp_type);
+ return -EINVAL;
+ }
+
+ offset = wq->offset + ((wqe_index % wq->wqe_cnt) << wq->wqe_shift);
+ wq_end = wq->offset + (wq->wqe_cnt << wq->wqe_shift);
+
+ if (send && length < sizeof(struct mlx5_wqe_ctrl_seg))
+ return -EINVAL;
+
+ if (offset > umem->length ||
+ (send && offset + sizeof(struct mlx5_wqe_ctrl_seg) > umem->length))
+ return -EINVAL;
+
+ first_copy_length = min_t(u32, offset + length, wq_end) - offset;
+ ret = ib_umem_copy_from(buffer, umem, offset, first_copy_length);
+ if (ret)
+ return ret;
+
+ if (send) {
+ struct mlx5_wqe_ctrl_seg *ctrl = buffer;
+ int ds = be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_DS_MASK;
+
+ wqe_length = ds * MLX5_WQE_DS_UNITS;
+ } else {
+ wqe_length = 1 << wq->wqe_shift;
+ }
+
+ if (wqe_length <= first_copy_length)
+ return first_copy_length;
+
+ ret = ib_umem_copy_from(buffer + first_copy_length, umem, wq->offset,
+ wqe_length - first_copy_length);
+ if (ret)
+ return ret;
+
+ return wqe_length;
+}
+
static void mlx5_ib_qp_event(struct mlx5_core_qp *qp, int type)
{
struct ib_qp *ibqp = &to_mibqp(qp)->ibqp;
@@ -814,6 +876,8 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
int inlen = sizeof(*in);
int err;
+ mlx5_ib_odp_create_qp(qp);
+
gen = &dev->mdev->caps.gen;
mutex_init(&qp->mutex);
spin_lock_init(&qp->sq.lock);
@@ -1098,11 +1162,13 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
in = kzalloc(sizeof(*in), GFP_KERNEL);
if (!in)
return;
- if (qp->state != IB_QPS_RESET)
+ if (qp->state != IB_QPS_RESET) {
+ mlx5_ib_qp_disable_pagefaults(qp);
if (mlx5_core_qp_modify(dev->mdev, to_mlx5_state(qp->state),
MLX5_QP_STATE_RST, in, sizeof(*in), &qp->mqp))
mlx5_ib_warn(dev, "mlx5_ib: modify QP %06x to RESET failed\n",
qp->mqp.qpn);
+ }
get_cqs(qp, &send_cq, &recv_cq);
@@ -1650,6 +1716,15 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
if (mlx5_st < 0)
goto out;
+ /* If moving to a reset or error state, we must disable page faults on
+ * this QP and flush all current page faults. Otherwise a stale page
+ * fault may attempt to work on this QP after it is reset and moved
+ * again to RTS, and may cause the driver and the device to get out of
+ * sync. */
+ if (cur_state != IB_QPS_RESET && cur_state != IB_QPS_ERR &&
+ (new_state == IB_QPS_RESET || new_state == IB_QPS_ERR))
+ mlx5_ib_qp_disable_pagefaults(qp);
+
optpar = ib_mask_to_mlx5_opt(attr_mask);
optpar &= opt_mask[mlx5_cur][mlx5_new][mlx5_st];
in->optparam = cpu_to_be32(optpar);
@@ -1659,6 +1734,9 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
if (err)
goto out;
+ if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
+ mlx5_ib_qp_enable_pagefaults(qp);
+
qp->state = new_state;
if (attr_mask & IB_QP_ACCESS_FLAGS)
@@ -1848,37 +1926,70 @@ static void set_frwr_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
umr->mkey_mask = frwr_mkey_mask();
}
+static __be64 get_umr_reg_mr_mask(void)
+{
+ u64 result;
+
+ result = MLX5_MKEY_MASK_LEN |
+ MLX5_MKEY_MASK_PAGE_SIZE |
+ MLX5_MKEY_MASK_START_ADDR |
+ MLX5_MKEY_MASK_PD |
+ MLX5_MKEY_MASK_LR |
+ MLX5_MKEY_MASK_LW |
+ MLX5_MKEY_MASK_KEY |
+ MLX5_MKEY_MASK_RR |
+ MLX5_MKEY_MASK_RW |
+ MLX5_MKEY_MASK_A |
+ MLX5_MKEY_MASK_FREE;
+
+ return cpu_to_be64(result);
+}
+
+static __be64 get_umr_unreg_mr_mask(void)
+{
+ u64 result;
+
+ result = MLX5_MKEY_MASK_FREE;
+
+ return cpu_to_be64(result);
+}
+
+static __be64 get_umr_update_mtt_mask(void)
+{
+ u64 result;
+
+ result = MLX5_MKEY_MASK_FREE;
+
+ return cpu_to_be64(result);
+}
+
static void set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
struct ib_send_wr *wr)
{
- struct umr_wr *umrwr = (struct umr_wr *)&wr->wr.fast_reg;
- u64 mask;
+ struct mlx5_umr_wr *umrwr = (struct mlx5_umr_wr *)&wr->wr.fast_reg;
memset(umr, 0, sizeof(*umr));
+ if (wr->send_flags & MLX5_IB_SEND_UMR_FAIL_IF_FREE)
+ umr->flags = MLX5_UMR_CHECK_FREE; /* fail if free */
+ else
+ umr->flags = MLX5_UMR_CHECK_NOT_FREE; /* fail if not free */
+
if (!(wr->send_flags & MLX5_IB_SEND_UMR_UNREG)) {
- umr->flags = 1 << 5; /* fail if not free */
umr->klm_octowords = get_klm_octo(umrwr->npages);
- mask = MLX5_MKEY_MASK_LEN |
- MLX5_MKEY_MASK_PAGE_SIZE |
- MLX5_MKEY_MASK_START_ADDR |
- MLX5_MKEY_MASK_PD |
- MLX5_MKEY_MASK_LR |
- MLX5_MKEY_MASK_LW |
- MLX5_MKEY_MASK_KEY |
- MLX5_MKEY_MASK_RR |
- MLX5_MKEY_MASK_RW |
- MLX5_MKEY_MASK_A |
- MLX5_MKEY_MASK_FREE;
- umr->mkey_mask = cpu_to_be64(mask);
+ if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_MTT) {
+ umr->mkey_mask = get_umr_update_mtt_mask();
+ umr->bsf_octowords = get_klm_octo(umrwr->target.offset);
+ umr->flags |= MLX5_UMR_TRANSLATION_OFFSET_EN;
+ } else {
+ umr->mkey_mask = get_umr_reg_mr_mask();
+ }
} else {
- umr->flags = 2 << 5; /* fail if free */
- mask = MLX5_MKEY_MASK_FREE;
- umr->mkey_mask = cpu_to_be64(mask);
+ umr->mkey_mask = get_umr_unreg_mr_mask();
}
if (!wr->num_sge)
- umr->flags |= (1 << 7); /* inline */
+ umr->flags |= MLX5_UMR_INLINE;
}
static u8 get_umr_flags(int acc)
@@ -1895,7 +2006,7 @@ static void set_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr,
{
memset(seg, 0, sizeof(*seg));
if (li) {
- seg->status = 1 << 6;
+ seg->status = MLX5_MKEY_STATUS_FREE;
return;
}
@@ -1912,19 +2023,23 @@ static void set_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr,
static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr)
{
+ struct mlx5_umr_wr *umrwr = (struct mlx5_umr_wr *)&wr->wr.fast_reg;
+
memset(seg, 0, sizeof(*seg));
if (wr->send_flags & MLX5_IB_SEND_UMR_UNREG) {
- seg->status = 1 << 6;
+ seg->status = MLX5_MKEY_STATUS_FREE;
return;
}
- seg->flags = convert_access(wr->wr.fast_reg.access_flags);
- seg->flags_pd = cpu_to_be32(to_mpd((struct ib_pd *)wr->wr.fast_reg.page_list)->pdn);
- seg->start_addr = cpu_to_be64(wr->wr.fast_reg.iova_start);
- seg->len = cpu_to_be64(wr->wr.fast_reg.length);
- seg->log2_page_size = wr->wr.fast_reg.page_shift;
+ seg->flags = convert_access(umrwr->access_flags);
+ if (!(wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_MTT)) {
+ seg->flags_pd = cpu_to_be32(to_mpd(umrwr->pd)->pdn);
+ seg->start_addr = cpu_to_be64(umrwr->target.virt_addr);
+ }
+ seg->len = cpu_to_be64(umrwr->length);
+ seg->log2_page_size = umrwr->page_shift;
seg->qpn_mkey7_0 = cpu_to_be32(0xffffff00 |
- mlx5_mkey_variant(wr->wr.fast_reg.rkey));
+ mlx5_mkey_variant(umrwr->mkey));
}
static void set_frwr_pages(struct mlx5_wqe_data_seg *dseg,
@@ -2927,6 +3042,14 @@ int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr
int mlx5_state;
int err = 0;
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+ /*
+ * Wait for any outstanding page faults, in case the user frees memory
+ * based upon this query's result.
+ */
+ flush_workqueue(mlx5_ib_page_fault_wq);
+#endif
+
mutex_lock(&qp->mutex);
outb = kzalloc(sizeof(*outb), GFP_KERNEL);
if (!outb) {
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index fef067c959f..c0d0296e7a0 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -2341,9 +2341,9 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
nes_debug(NES_DBG_MR, "User base = 0x%lX, Virt base = 0x%lX, length = %u,"
" offset = %u, page size = %u.\n",
(unsigned long int)start, (unsigned long int)virt, (u32)length,
- region->offset, region->page_size);
+ ib_umem_offset(region), region->page_size);
- skip_pages = ((u32)region->offset) >> 12;
+ skip_pages = ((u32)ib_umem_offset(region)) >> 12;
if (ib_copy_from_udata(&req, udata, sizeof(req))) {
ib_umem_release(region);
@@ -2408,7 +2408,7 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
region_length -= skip_pages << 12;
for (page_index = skip_pages; page_index < chunk_pages; page_index++) {
skip_pages = 0;
- if ((page_count != 0) && (page_count<<12)-(region->offset&(4096-1)) >= region->length)
+ if ((page_count != 0) && (page_count << 12) - (ib_umem_offset(region) & (4096 - 1)) >= region->length)
goto enough_pages;
if ((page_count&0x01FF) == 0) {
if (page_count >= 1024 * 512) {
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
index ac02ce4e804..f3cc8c9e65a 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
@@ -96,7 +96,6 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
union ib_gid sgid;
- u8 zmac[ETH_ALEN];
if (!(attr->ah_flags & IB_AH_GRH))
return ERR_PTR(-EINVAL);
@@ -118,9 +117,7 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
goto av_conf_err;
}
- memset(&zmac, 0, ETH_ALEN);
- if (pd->uctx &&
- memcmp(attr->dmac, &zmac, ETH_ALEN)) {
+ if (pd->uctx) {
status = rdma_addr_find_dmac_by_grh(&sgid, &attr->grh.dgid,
attr->dmac, &attr->vlan_id);
if (status) {
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index 4c68305ee78..fb8d8c4dfbb 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -805,7 +805,7 @@ struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
goto umem_err;
mr->hwmr.pbe_size = mr->umem->page_size;
- mr->hwmr.fbo = mr->umem->offset;
+ mr->hwmr.fbo = ib_umem_offset(mr->umem);
mr->hwmr.va = usr_addr;
mr->hwmr.len = len;
mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
@@ -1410,6 +1410,8 @@ int ocrdma_query_qp(struct ib_qp *ibqp,
mutex_unlock(&dev->dev_lock);
if (status)
goto mbx_err;
+ if (qp->qp_type == IB_QPT_UD)
+ qp_attr->qkey = params.qkey;
qp_attr->qp_state = get_ibqp_state(IB_QPS_INIT);
qp_attr->cur_qp_state = get_ibqp_state(IB_QPS_INIT);
qp_attr->path_mtu =
diff --git a/drivers/infiniband/hw/qib/qib_mr.c b/drivers/infiniband/hw/qib/qib_mr.c
index 9bbb55347cc..a77fb4fb14e 100644
--- a/drivers/infiniband/hw/qib/qib_mr.c
+++ b/drivers/infiniband/hw/qib/qib_mr.c
@@ -258,7 +258,7 @@ struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
mr->mr.user_base = start;
mr->mr.iova = virt_addr;
mr->mr.length = length;
- mr->mr.offset = umem->offset;
+ mr->mr.offset = ib_umem_offset(umem);
mr->mr.access_flags = mr_access_flags;
mr->umem = umem;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index d7562beb542..8ba80a6d3a4 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -98,9 +98,15 @@ enum {
IPOIB_MCAST_FLAG_FOUND = 0, /* used in set_multicast_list */
IPOIB_MCAST_FLAG_SENDONLY = 1,
- IPOIB_MCAST_FLAG_BUSY = 2, /* joining or already joined */
+ /*
+ * For IPOIB_MCAST_FLAG_BUSY
+ * When set, in flight join and mcast->mc is unreliable
+ * When clear and mcast->mc IS_ERR_OR_NULL, need to restart or
+ * haven't started yet
+ * When clear and mcast->mc is valid pointer, join was successful
+ */
+ IPOIB_MCAST_FLAG_BUSY = 2,
IPOIB_MCAST_FLAG_ATTACHED = 3,
- IPOIB_MCAST_JOIN_STARTED = 4,
MAX_SEND_CQE = 16,
IPOIB_CM_COPYBREAK = 256,
@@ -317,6 +323,7 @@ struct ipoib_dev_priv {
struct list_head multicast_list;
struct rb_root multicast_tree;
+ struct workqueue_struct *wq;
struct delayed_work mcast_task;
struct work_struct carrier_on_task;
struct work_struct flush_light;
@@ -477,10 +484,10 @@ void ipoib_ib_dev_flush_heavy(struct work_struct *work);
void ipoib_pkey_event(struct work_struct *work);
void ipoib_ib_dev_cleanup(struct net_device *dev);
-int ipoib_ib_dev_open(struct net_device *dev, int flush);
+int ipoib_ib_dev_open(struct net_device *dev);
int ipoib_ib_dev_up(struct net_device *dev);
-int ipoib_ib_dev_down(struct net_device *dev, int flush);
-int ipoib_ib_dev_stop(struct net_device *dev, int flush);
+int ipoib_ib_dev_down(struct net_device *dev);
+int ipoib_ib_dev_stop(struct net_device *dev);
void ipoib_pkey_dev_check_presence(struct net_device *dev);
int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port);
@@ -492,7 +499,7 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb);
void ipoib_mcast_restart_task(struct work_struct *work);
int ipoib_mcast_start_thread(struct net_device *dev);
-int ipoib_mcast_stop_thread(struct net_device *dev, int flush);
+int ipoib_mcast_stop_thread(struct net_device *dev);
void ipoib_mcast_dev_down(struct net_device *dev);
void ipoib_mcast_dev_flush(struct net_device *dev);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 933efcea0d0..56959adb6c7 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -474,7 +474,7 @@ static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *even
}
spin_lock_irq(&priv->lock);
- queue_delayed_work(ipoib_workqueue,
+ queue_delayed_work(priv->wq,
&priv->cm.stale_task, IPOIB_CM_RX_DELAY);
/* Add this entry to passive ids list head, but do not re-add it
* if IB_EVENT_QP_LAST_WQE_REACHED has moved it to flush list. */
@@ -576,7 +576,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
spin_lock_irqsave(&priv->lock, flags);
list_splice_init(&priv->cm.rx_drain_list, &priv->cm.rx_reap_list);
ipoib_cm_start_rx_drain(priv);
- queue_work(ipoib_workqueue, &priv->cm.rx_reap_task);
+ queue_work(priv->wq, &priv->cm.rx_reap_task);
spin_unlock_irqrestore(&priv->lock, flags);
} else
ipoib_warn(priv, "cm recv completion event with wrid %d (> %d)\n",
@@ -603,7 +603,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
spin_lock_irqsave(&priv->lock, flags);
list_move(&p->list, &priv->cm.rx_reap_list);
spin_unlock_irqrestore(&priv->lock, flags);
- queue_work(ipoib_workqueue, &priv->cm.rx_reap_task);
+ queue_work(priv->wq, &priv->cm.rx_reap_task);
}
return;
}
@@ -827,7 +827,7 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
list_move(&tx->list, &priv->cm.reap_list);
- queue_work(ipoib_workqueue, &priv->cm.reap_task);
+ queue_work(priv->wq, &priv->cm.reap_task);
}
clear_bit(IPOIB_FLAG_OPER_UP, &tx->flags);
@@ -1255,7 +1255,7 @@ static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
list_move(&tx->list, &priv->cm.reap_list);
- queue_work(ipoib_workqueue, &priv->cm.reap_task);
+ queue_work(priv->wq, &priv->cm.reap_task);
}
spin_unlock_irqrestore(&priv->lock, flags);
@@ -1284,7 +1284,7 @@ struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path
tx->dev = dev;
list_add(&tx->list, &priv->cm.start_list);
set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags);
- queue_work(ipoib_workqueue, &priv->cm.start_task);
+ queue_work(priv->wq, &priv->cm.start_task);
return tx;
}
@@ -1295,7 +1295,7 @@ void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx)
if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
spin_lock_irqsave(&priv->lock, flags);
list_move(&tx->list, &priv->cm.reap_list);
- queue_work(ipoib_workqueue, &priv->cm.reap_task);
+ queue_work(priv->wq, &priv->cm.reap_task);
ipoib_dbg(priv, "Reap connection for gid %pI6\n",
tx->neigh->daddr + 4);
tx->neigh = NULL;
@@ -1417,7 +1417,7 @@ void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb,
skb_queue_tail(&priv->cm.skb_queue, skb);
if (e)
- queue_work(ipoib_workqueue, &priv->cm.skb_task);
+ queue_work(priv->wq, &priv->cm.skb_task);
}
static void ipoib_cm_rx_reap(struct work_struct *work)
@@ -1450,7 +1450,7 @@ static void ipoib_cm_stale_task(struct work_struct *work)
}
if (!list_empty(&priv->cm.passive_ids))
- queue_delayed_work(ipoib_workqueue,
+ queue_delayed_work(priv->wq,
&priv->cm.stale_task, IPOIB_CM_RX_DELAY);
spin_unlock_irq(&priv->lock);
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 72626c34817..fe65abb5150 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -655,7 +655,7 @@ void ipoib_reap_ah(struct work_struct *work)
__ipoib_reap_ah(dev);
if (!test_bit(IPOIB_STOP_REAPER, &priv->flags))
- queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task,
+ queue_delayed_work(priv->wq, &priv->ah_reap_task,
round_jiffies_relative(HZ));
}
@@ -664,7 +664,7 @@ static void ipoib_ib_tx_timer_func(unsigned long ctx)
drain_tx_cq((struct net_device *)ctx);
}
-int ipoib_ib_dev_open(struct net_device *dev, int flush)
+int ipoib_ib_dev_open(struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
int ret;
@@ -696,7 +696,7 @@ int ipoib_ib_dev_open(struct net_device *dev, int flush)
}
clear_bit(IPOIB_STOP_REAPER, &priv->flags);
- queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task,
+ queue_delayed_work(priv->wq, &priv->ah_reap_task,
round_jiffies_relative(HZ));
if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
@@ -706,7 +706,7 @@ int ipoib_ib_dev_open(struct net_device *dev, int flush)
dev_stop:
if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
napi_enable(&priv->napi);
- ipoib_ib_dev_stop(dev, flush);
+ ipoib_ib_dev_stop(dev);
return -1;
}
@@ -738,7 +738,7 @@ int ipoib_ib_dev_up(struct net_device *dev)
return ipoib_mcast_start_thread(dev);
}
-int ipoib_ib_dev_down(struct net_device *dev, int flush)
+int ipoib_ib_dev_down(struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
@@ -747,7 +747,7 @@ int ipoib_ib_dev_down(struct net_device *dev, int flush)
clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
netif_carrier_off(dev);
- ipoib_mcast_stop_thread(dev, flush);
+ ipoib_mcast_stop_thread(dev);
ipoib_mcast_dev_flush(dev);
ipoib_flush_paths(dev);
@@ -807,7 +807,7 @@ void ipoib_drain_cq(struct net_device *dev)
local_bh_enable();
}
-int ipoib_ib_dev_stop(struct net_device *dev, int flush)
+int ipoib_ib_dev_stop(struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ib_qp_attr qp_attr;
@@ -880,8 +880,7 @@ timeout:
/* Wait for all AHs to be reaped */
set_bit(IPOIB_STOP_REAPER, &priv->flags);
cancel_delayed_work(&priv->ah_reap_task);
- if (flush)
- flush_workqueue(ipoib_workqueue);
+ flush_workqueue(priv->wq);
begin = jiffies;
@@ -918,7 +917,7 @@ int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
(unsigned long) dev);
if (dev->flags & IFF_UP) {
- if (ipoib_ib_dev_open(dev, 1)) {
+ if (ipoib_ib_dev_open(dev)) {
ipoib_transport_dev_cleanup(dev);
return -ENODEV;
}
@@ -1040,12 +1039,12 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
}
if (level >= IPOIB_FLUSH_NORMAL)
- ipoib_ib_dev_down(dev, 0);
+ ipoib_ib_dev_down(dev);
if (level == IPOIB_FLUSH_HEAVY) {
if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
- ipoib_ib_dev_stop(dev, 0);
- if (ipoib_ib_dev_open(dev, 0) != 0)
+ ipoib_ib_dev_stop(dev);
+ if (ipoib_ib_dev_open(dev) != 0)
return;
if (netif_queue_stopped(dev))
netif_start_queue(dev);
@@ -1097,7 +1096,7 @@ void ipoib_ib_dev_cleanup(struct net_device *dev)
*/
ipoib_flush_paths(dev);
- ipoib_mcast_stop_thread(dev, 1);
+ ipoib_mcast_stop_thread(dev);
ipoib_mcast_dev_flush(dev);
ipoib_transport_dev_cleanup(dev);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 58b5aa3b6f2..6bad17d4d58 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -108,7 +108,7 @@ int ipoib_open(struct net_device *dev)
set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
- if (ipoib_ib_dev_open(dev, 1)) {
+ if (ipoib_ib_dev_open(dev)) {
if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags))
return 0;
goto err_disable;
@@ -139,7 +139,7 @@ int ipoib_open(struct net_device *dev)
return 0;
err_stop:
- ipoib_ib_dev_stop(dev, 1);
+ ipoib_ib_dev_stop(dev);
err_disable:
clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
@@ -157,8 +157,8 @@ static int ipoib_stop(struct net_device *dev)
netif_stop_queue(dev);
- ipoib_ib_dev_down(dev, 1);
- ipoib_ib_dev_stop(dev, 0);
+ ipoib_ib_dev_down(dev);
+ ipoib_ib_dev_stop(dev);
if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
struct ipoib_dev_priv *cpriv;
@@ -839,7 +839,7 @@ static void ipoib_set_mcast_list(struct net_device *dev)
return;
}
- queue_work(ipoib_workqueue, &priv->restart_task);
+ queue_work(priv->wq, &priv->restart_task);
}
static u32 ipoib_addr_hash(struct ipoib_neigh_hash *htbl, u8 *daddr)
@@ -954,7 +954,7 @@ static void ipoib_reap_neigh(struct work_struct *work)
__ipoib_reap_neigh(priv);
if (!test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags))
- queue_delayed_work(ipoib_workqueue, &priv->neigh_reap_task,
+ queue_delayed_work(priv->wq, &priv->neigh_reap_task,
arp_tbl.gc_interval);
}
@@ -1133,7 +1133,7 @@ static int ipoib_neigh_hash_init(struct ipoib_dev_priv *priv)
/* start garbage collection */
clear_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
- queue_delayed_work(ipoib_workqueue, &priv->neigh_reap_task,
+ queue_delayed_work(priv->wq, &priv->neigh_reap_task,
arp_tbl.gc_interval);
return 0;
@@ -1262,15 +1262,13 @@ int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
- if (ipoib_neigh_hash_init(priv) < 0)
- goto out;
/* Allocate RX/TX "rings" to hold queued skbs */
priv->rx_ring = kzalloc(ipoib_recvq_size * sizeof *priv->rx_ring,
GFP_KERNEL);
if (!priv->rx_ring) {
printk(KERN_WARNING "%s: failed to allocate RX ring (%d entries)\n",
ca->name, ipoib_recvq_size);
- goto out_neigh_hash_cleanup;
+ goto out;
}
priv->tx_ring = vzalloc(ipoib_sendq_size * sizeof *priv->tx_ring);
@@ -1285,16 +1283,24 @@ int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
if (ipoib_ib_dev_init(dev, ca, port))
goto out_tx_ring_cleanup;
+ /*
+ * Must be after ipoib_ib_dev_init so we can allocate a per
+ * device wq there and use it here
+ */
+ if (ipoib_neigh_hash_init(priv) < 0)
+ goto out_dev_uninit;
+
return 0;
+out_dev_uninit:
+ ipoib_ib_dev_cleanup(dev);
+
out_tx_ring_cleanup:
vfree(priv->tx_ring);
out_rx_ring_cleanup:
kfree(priv->rx_ring);
-out_neigh_hash_cleanup:
- ipoib_neigh_hash_uninit(dev);
out:
return -ENOMEM;
}
@@ -1317,6 +1323,12 @@ void ipoib_dev_cleanup(struct net_device *dev)
}
unregister_netdevice_many(&head);
+ /*
+ * Must be before ipoib_ib_dev_cleanup or we delete an in use
+ * work queue
+ */
+ ipoib_neigh_hash_uninit(dev);
+
ipoib_ib_dev_cleanup(dev);
kfree(priv->rx_ring);
@@ -1324,8 +1336,6 @@ void ipoib_dev_cleanup(struct net_device *dev)
priv->rx_ring = NULL;
priv->tx_ring = NULL;
-
- ipoib_neigh_hash_uninit(dev);
}
static const struct header_ops ipoib_header_ops = {
@@ -1636,7 +1646,7 @@ register_failed:
/* Stop GC if started before flush */
set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
cancel_delayed_work(&priv->neigh_reap_task);
- flush_workqueue(ipoib_workqueue);
+ flush_workqueue(priv->wq);
event_failed:
ipoib_dev_cleanup(priv->dev);
@@ -1707,7 +1717,7 @@ static void ipoib_remove_one(struct ib_device *device)
/* Stop GC */
set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
cancel_delayed_work(&priv->neigh_reap_task);
- flush_workqueue(ipoib_workqueue);
+ flush_workqueue(priv->wq);
unregister_netdev(priv->dev);
free_netdev(priv->dev);
@@ -1748,8 +1758,13 @@ static int __init ipoib_init_module(void)
* unregister_netdev() and linkwatch_event take the rtnl lock,
* so flush_scheduled_work() can deadlock during device
* removal.
+ *
+ * In addition, bringing one device up and another down at the
+ * same time can deadlock a single workqueue, so we have this
+ * global fallback workqueue, but we also attempt to open a
+ * per device workqueue each time we bring an interface up
*/
- ipoib_workqueue = create_singlethread_workqueue("ipoib");
+ ipoib_workqueue = create_singlethread_workqueue("ipoib_flush");
if (!ipoib_workqueue) {
ret = -ENOMEM;
goto err_fs;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index ffb83b5f7e8..bc50dd0d0e4 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -190,12 +190,6 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
spin_unlock_irq(&priv->lock);
priv->tx_wr.wr.ud.remote_qkey = priv->qkey;
set_qkey = 1;
-
- if (!ipoib_cm_admin_enabled(dev)) {
- rtnl_lock();
- dev_set_mtu(dev, min(priv->mcast_mtu, priv->admin_mtu));
- rtnl_unlock();
- }
}
if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
@@ -277,16 +271,27 @@ ipoib_mcast_sendonly_join_complete(int status,
struct ipoib_mcast *mcast = multicast->context;
struct net_device *dev = mcast->dev;
+ /*
+ * We have to take the mutex to force mcast_sendonly_join to
+ * return from ib_sa_multicast_join and set mcast->mc to a
+ * valid value. Otherwise we were racing with ourselves in
+ * that we might fail here, but get a valid return from
+ * ib_sa_multicast_join after we had cleared mcast->mc here,
+ * resulting in mis-matched joins and leaves and a deadlock
+ */
+ mutex_lock(&mcast_mutex);
+
/* We trap for port events ourselves. */
if (status == -ENETRESET)
- return 0;
+ goto out;
if (!status)
status = ipoib_mcast_join_finish(mcast, &multicast->rec);
if (status) {
if (mcast->logcount++ < 20)
- ipoib_dbg_mcast(netdev_priv(dev), "multicast join failed for %pI6, status %d\n",
+ ipoib_dbg_mcast(netdev_priv(dev), "sendonly multicast "
+ "join failed for %pI6, status %d\n",
mcast->mcmember.mgid.raw, status);
/* Flush out any queued packets */
@@ -296,11 +301,15 @@ ipoib_mcast_sendonly_join_complete(int status,
dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
}
netif_tx_unlock_bh(dev);
-
- /* Clear the busy flag so we try again */
- status = test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY,
- &mcast->flags);
}
+out:
+ clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
+ if (status)
+ mcast->mc = NULL;
+ complete(&mcast->done);
+ if (status == -ENETRESET)
+ status = 0;
+ mutex_unlock(&mcast_mutex);
return status;
}
@@ -318,12 +327,14 @@ static int ipoib_mcast_sendonly_join(struct ipoib_mcast *mcast)
int ret = 0;
if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) {
- ipoib_dbg_mcast(priv, "device shutting down, no multicast joins\n");
+ ipoib_dbg_mcast(priv, "device shutting down, no sendonly "
+ "multicast joins\n");
return -ENODEV;
}
- if (test_and_set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) {
- ipoib_dbg_mcast(priv, "multicast entry busy, skipping\n");
+ if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) {
+ ipoib_dbg_mcast(priv, "multicast entry busy, skipping "
+ "sendonly join\n");
return -EBUSY;
}
@@ -331,6 +342,9 @@ static int ipoib_mcast_sendonly_join(struct ipoib_mcast *mcast)
rec.port_gid = priv->local_gid;
rec.pkey = cpu_to_be16(priv->pkey);
+ mutex_lock(&mcast_mutex);
+ init_completion(&mcast->done);
+ set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
mcast->mc = ib_sa_join_multicast(&ipoib_sa_client, priv->ca,
priv->port, &rec,
IB_SA_MCMEMBER_REC_MGID |
@@ -343,12 +357,14 @@ static int ipoib_mcast_sendonly_join(struct ipoib_mcast *mcast)
if (IS_ERR(mcast->mc)) {
ret = PTR_ERR(mcast->mc);
clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
- ipoib_warn(priv, "ib_sa_join_multicast failed (ret = %d)\n",
- ret);
+ complete(&mcast->done);
+ ipoib_warn(priv, "ib_sa_join_multicast for sendonly join "
+ "failed (ret = %d)\n", ret);
} else {
- ipoib_dbg_mcast(priv, "no multicast record for %pI6, starting join\n",
- mcast->mcmember.mgid.raw);
+ ipoib_dbg_mcast(priv, "no multicast record for %pI6, starting "
+ "sendonly join\n", mcast->mcmember.mgid.raw);
}
+ mutex_unlock(&mcast_mutex);
return ret;
}
@@ -359,18 +375,29 @@ void ipoib_mcast_carrier_on_task(struct work_struct *work)
carrier_on_task);
struct ib_port_attr attr;
- /*
- * Take rtnl_lock to avoid racing with ipoib_stop() and
- * turning the carrier back on while a device is being
- * removed.
- */
if (ib_query_port(priv->ca, priv->port, &attr) ||
attr.state != IB_PORT_ACTIVE) {
ipoib_dbg(priv, "Keeping carrier off until IB port is active\n");
return;
}
- rtnl_lock();
+ /*
+ * Take rtnl_lock to avoid racing with ipoib_stop() and
+ * turning the carrier back on while a device is being
+ * removed. However, ipoib_stop() will attempt to flush
+ * the workqueue while holding the rtnl lock, so loop
+ * on trylock until either we get the lock or we see
+ * FLAG_ADMIN_UP go away as that signals that we are bailing
+ * and can safely ignore the carrier on work.
+ */
+ while (!rtnl_trylock()) {
+ if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
+ return;
+ else
+ msleep(20);
+ }
+ if (!ipoib_cm_admin_enabled(priv->dev))
+ dev_set_mtu(priv->dev, min(priv->mcast_mtu, priv->admin_mtu));
netif_carrier_on(priv->dev);
rtnl_unlock();
}
@@ -385,60 +412,63 @@ static int ipoib_mcast_join_complete(int status,
ipoib_dbg_mcast(priv, "join completion for %pI6 (status %d)\n",
mcast->mcmember.mgid.raw, status);
+ /*
+ * We have to take the mutex to force mcast_join to
+ * return from ib_sa_multicast_join and set mcast->mc to a
+ * valid value. Otherwise we were racing with ourselves in
+ * that we might fail here, but get a valid return from
+ * ib_sa_multicast_join after we had cleared mcast->mc here,
+ * resulting in mis-matched joins and leaves and a deadlock
+ */
+ mutex_lock(&mcast_mutex);
+
/* We trap for port events ourselves. */
- if (status == -ENETRESET) {
- status = 0;
+ if (status == -ENETRESET)
goto out;
- }
if (!status)
status = ipoib_mcast_join_finish(mcast, &multicast->rec);
if (!status) {
mcast->backoff = 1;
- mutex_lock(&mcast_mutex);
if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
- queue_delayed_work(ipoib_workqueue,
- &priv->mcast_task, 0);
- mutex_unlock(&mcast_mutex);
+ queue_delayed_work(priv->wq, &priv->mcast_task, 0);
/*
- * Defer carrier on work to ipoib_workqueue to avoid a
+ * Defer carrier on work to priv->wq to avoid a
* deadlock on rtnl_lock here.
*/
if (mcast == priv->broadcast)
- queue_work(ipoib_workqueue, &priv->carrier_on_task);
-
- status = 0;
- goto out;
- }
-
- if (mcast->logcount++ < 20) {
- if (status == -ETIMEDOUT || status == -EAGAIN) {
- ipoib_dbg_mcast(priv, "multicast join failed for %pI6, status %d\n",
- mcast->mcmember.mgid.raw, status);
- } else {
- ipoib_warn(priv, "multicast join failed for %pI6, status %d\n",
- mcast->mcmember.mgid.raw, status);
+ queue_work(priv->wq, &priv->carrier_on_task);
+ } else {
+ if (mcast->logcount++ < 20) {
+ if (status == -ETIMEDOUT || status == -EAGAIN) {
+ ipoib_dbg_mcast(priv, "multicast join failed for %pI6, status %d\n",
+ mcast->mcmember.mgid.raw, status);
+ } else {
+ ipoib_warn(priv, "multicast join failed for %pI6, status %d\n",
+ mcast->mcmember.mgid.raw, status);
+ }
}
- }
-
- mcast->backoff *= 2;
- if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS)
- mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS;
- /* Clear the busy flag so we try again */
- status = test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
-
- mutex_lock(&mcast_mutex);
+ mcast->backoff *= 2;
+ if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS)
+ mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS;
+ }
+out:
spin_lock_irq(&priv->lock);
- if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
- queue_delayed_work(ipoib_workqueue, &priv->mcast_task,
+ clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
+ if (status)
+ mcast->mc = NULL;
+ complete(&mcast->done);
+ if (status == -ENETRESET)
+ status = 0;
+ if (status && test_bit(IPOIB_MCAST_RUN, &priv->flags))
+ queue_delayed_work(priv->wq, &priv->mcast_task,
mcast->backoff * HZ);
spin_unlock_irq(&priv->lock);
mutex_unlock(&mcast_mutex);
-out:
- complete(&mcast->done);
+
return status;
}
@@ -487,10 +517,9 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast,
rec.hop_limit = priv->broadcast->mcmember.hop_limit;
}
- set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
+ mutex_lock(&mcast_mutex);
init_completion(&mcast->done);
- set_bit(IPOIB_MCAST_JOIN_STARTED, &mcast->flags);
-
+ set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
mcast->mc = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, priv->port,
&rec, comp_mask, GFP_KERNEL,
ipoib_mcast_join_complete, mcast);
@@ -504,13 +533,11 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast,
if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS)
mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS;
- mutex_lock(&mcast_mutex);
if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
- queue_delayed_work(ipoib_workqueue,
- &priv->mcast_task,
+ queue_delayed_work(priv->wq, &priv->mcast_task,
mcast->backoff * HZ);
- mutex_unlock(&mcast_mutex);
}
+ mutex_unlock(&mcast_mutex);
}
void ipoib_mcast_join_task(struct work_struct *work)
@@ -547,8 +574,8 @@ void ipoib_mcast_join_task(struct work_struct *work)
ipoib_warn(priv, "failed to allocate broadcast group\n");
mutex_lock(&mcast_mutex);
if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
- queue_delayed_work(ipoib_workqueue,
- &priv->mcast_task, HZ);
+ queue_delayed_work(priv->wq, &priv->mcast_task,
+ HZ);
mutex_unlock(&mcast_mutex);
return;
}
@@ -563,7 +590,8 @@ void ipoib_mcast_join_task(struct work_struct *work)
}
if (!test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) {
- if (!test_bit(IPOIB_MCAST_FLAG_BUSY, &priv->broadcast->flags))
+ if (IS_ERR_OR_NULL(priv->broadcast->mc) &&
+ !test_bit(IPOIB_MCAST_FLAG_BUSY, &priv->broadcast->flags))
ipoib_mcast_join(dev, priv->broadcast, 0);
return;
}
@@ -571,23 +599,33 @@ void ipoib_mcast_join_task(struct work_struct *work)
while (1) {
struct ipoib_mcast *mcast = NULL;
+ /*
+ * Need the mutex so our flags are consistent, need the
+ * priv->lock so we don't race with list removals in either
+ * mcast_dev_flush or mcast_restart_task
+ */
+ mutex_lock(&mcast_mutex);
spin_lock_irq(&priv->lock);
list_for_each_entry(mcast, &priv->multicast_list, list) {
- if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)
- && !test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)
- && !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) {
+ if (IS_ERR_OR_NULL(mcast->mc) &&
+ !test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags) &&
+ !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) {
/* Found the next unjoined group */
break;
}
}
spin_unlock_irq(&priv->lock);
+ mutex_unlock(&mcast_mutex);
if (&mcast->list == &priv->multicast_list) {
/* All done */
break;
}
- ipoib_mcast_join(dev, mcast, 1);
+ if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags))
+ ipoib_mcast_sendonly_join(mcast);
+ else
+ ipoib_mcast_join(dev, mcast, 1);
return;
}
@@ -604,13 +642,13 @@ int ipoib_mcast_start_thread(struct net_device *dev)
mutex_lock(&mcast_mutex);
if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags))
- queue_delayed_work(ipoib_workqueue, &priv->mcast_task, 0);
+ queue_delayed_work(priv->wq, &priv->mcast_task, 0);
mutex_unlock(&mcast_mutex);
return 0;
}
-int ipoib_mcast_stop_thread(struct net_device *dev, int flush)
+int ipoib_mcast_stop_thread(struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
@@ -621,8 +659,7 @@ int ipoib_mcast_stop_thread(struct net_device *dev, int flush)
cancel_delayed_work(&priv->mcast_task);
mutex_unlock(&mcast_mutex);
- if (flush)
- flush_workqueue(ipoib_workqueue);
+ flush_workqueue(priv->wq);
return 0;
}
@@ -633,6 +670,9 @@ static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast)
int ret = 0;
if (test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
+ ipoib_warn(priv, "ipoib_mcast_leave on an in-flight join\n");
+
+ if (!IS_ERR_OR_NULL(mcast->mc))
ib_sa_free_multicast(mcast->mc);
if (test_and_clear_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) {
@@ -685,6 +725,8 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb)
memcpy(mcast->mcmember.mgid.raw, mgid, sizeof (union ib_gid));
__ipoib_mcast_add(dev, mcast);
list_add_tail(&mcast->list, &priv->multicast_list);
+ if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags))
+ queue_delayed_work(priv->wq, &priv->mcast_task, 0);
}
if (!mcast->ah) {
@@ -698,8 +740,6 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb)
if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
ipoib_dbg_mcast(priv, "no address vector, "
"but multicast join already started\n");
- else if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags))
- ipoib_mcast_sendonly_join(mcast);
/*
* If lookup completes between here and out:, don't
@@ -759,9 +799,12 @@ void ipoib_mcast_dev_flush(struct net_device *dev)
spin_unlock_irqrestore(&priv->lock, flags);
- /* seperate between the wait to the leave*/
+ /*
+ * make sure the in-flight joins have finished before we attempt
+ * to leave
+ */
list_for_each_entry_safe(mcast, tmcast, &remove_list, list)
- if (test_bit(IPOIB_MCAST_JOIN_STARTED, &mcast->flags))
+ if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
wait_for_completion(&mcast->done);
list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
@@ -794,8 +837,6 @@ void ipoib_mcast_restart_task(struct work_struct *work)
ipoib_dbg_mcast(priv, "restarting multicast task\n");
- ipoib_mcast_stop_thread(dev, 0);
-
local_irq_save(flags);
netif_addr_lock(dev);
spin_lock(&priv->lock);
@@ -880,14 +921,38 @@ void ipoib_mcast_restart_task(struct work_struct *work)
netif_addr_unlock(dev);
local_irq_restore(flags);
- /* We have to cancel outside of the spinlock */
+ /*
+ * make sure the in-flight joins have finished before we attempt
+ * to leave
+ */
+ list_for_each_entry_safe(mcast, tmcast, &remove_list, list)
+ if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
+ wait_for_completion(&mcast->done);
+
+ /*
+ * We have to cancel outside of the spinlock, but we have to
+ * take the rtnl lock or else we race with the removal of
+ * entries from the remove list in mcast_dev_flush as part
+ * of ipoib_stop(). We detect the drop of the ADMIN_UP flag
+ * to signal that we have hit this particular race, and we
+ * return since we know we don't need to do anything else
+ * anyway.
+ */
+ while (!rtnl_trylock()) {
+ if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
+ return;
+ else
+ msleep(20);
+ }
list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
ipoib_mcast_leave(mcast->dev, mcast);
ipoib_mcast_free(mcast);
}
-
- if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
- ipoib_mcast_start_thread(dev);
+ /*
+ * Restart our join task if needed
+ */
+ ipoib_mcast_start_thread(dev);
+ rtnl_unlock();
}
#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
index c56d5d44c53..b72a753eb41 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
@@ -145,10 +145,20 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
int ret, size;
int i;
+ /*
+ * the various IPoIB tasks assume they will never race against
+ * themselves, so always use a single thread workqueue
+ */
+ priv->wq = create_singlethread_workqueue("ipoib_wq");
+ if (!priv->wq) {
+ printk(KERN_WARNING "ipoib: failed to allocate device WQ\n");
+ return -ENODEV;
+ }
+
priv->pd = ib_alloc_pd(priv->ca);
if (IS_ERR(priv->pd)) {
printk(KERN_WARNING "%s: failed to allocate PD\n", ca->name);
- return -ENODEV;
+ goto out_free_wq;
}
priv->mr = ib_get_dma_mr(priv->pd, IB_ACCESS_LOCAL_WRITE);
@@ -242,6 +252,10 @@ out_free_mr:
out_free_pd:
ib_dealloc_pd(priv->pd);
+
+out_free_wq:
+ destroy_workqueue(priv->wq);
+ priv->wq = NULL;
return -ENODEV;
}
@@ -270,6 +284,12 @@ void ipoib_transport_dev_cleanup(struct net_device *dev)
if (ib_dealloc_pd(priv->pd))
ipoib_warn(priv, "ib_dealloc_pd failed\n");
+
+ if (priv->wq) {
+ flush_workqueue(priv->wq);
+ destroy_workqueue(priv->wq);
+ priv->wq = NULL;
+ }
}
void ipoib_event(struct ib_event_handler *handler,
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 20ca6a61947..6a594aac229 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -97,7 +97,7 @@ module_param_named(pi_enable, iser_pi_enable, bool, 0644);
MODULE_PARM_DESC(pi_enable, "Enable T10-PI offload support (default:disabled)");
module_param_named(pi_guard, iser_pi_guard, int, 0644);
-MODULE_PARM_DESC(pi_guard, "T10-PI guard_type, 0:CRC|1:IP_CSUM (default:IP_CSUM)");
+MODULE_PARM_DESC(pi_guard, "T10-PI guard_type [deprecated]");
static struct workqueue_struct *release_wq;
struct iser_global ig;
@@ -164,18 +164,42 @@ iscsi_iser_pdu_alloc(struct iscsi_task *task, uint8_t opcode)
return 0;
}
-int iser_initialize_task_headers(struct iscsi_task *task,
- struct iser_tx_desc *tx_desc)
+/**
+ * iser_initialize_task_headers() - Initialize task headers
+ * @task: iscsi task
+ * @tx_desc: iser tx descriptor
+ *
+ * Notes:
+ * This routine may race with iser teardown flow for scsi
+ * error handling TMFs. So for TMF we should acquire the
+ * state mutex to avoid dereferencing the IB device which
+ * may have already been terminated.
+ */
+int
+iser_initialize_task_headers(struct iscsi_task *task,
+ struct iser_tx_desc *tx_desc)
{
- struct iser_conn *iser_conn = task->conn->dd_data;
+ struct iser_conn *iser_conn = task->conn->dd_data;
struct iser_device *device = iser_conn->ib_conn.device;
struct iscsi_iser_task *iser_task = task->dd_data;
u64 dma_addr;
+ const bool mgmt_task = !task->sc && !in_interrupt();
+ int ret = 0;
+
+ if (unlikely(mgmt_task))
+ mutex_lock(&iser_conn->state_mutex);
+
+ if (unlikely(iser_conn->state != ISER_CONN_UP)) {
+ ret = -ENODEV;
+ goto out;
+ }
dma_addr = ib_dma_map_single(device->ib_device, (void *)tx_desc,
ISER_HEADERS_LEN, DMA_TO_DEVICE);
- if (ib_dma_mapping_error(device->ib_device, dma_addr))
- return -ENOMEM;
+ if (ib_dma_mapping_error(device->ib_device, dma_addr)) {
+ ret = -ENOMEM;
+ goto out;
+ }
tx_desc->dma_addr = dma_addr;
tx_desc->tx_sg[0].addr = tx_desc->dma_addr;
@@ -183,7 +207,11 @@ int iser_initialize_task_headers(struct iscsi_task *task,
tx_desc->tx_sg[0].lkey = device->mr->lkey;
iser_task->iser_conn = iser_conn;
- return 0;
+out:
+ if (unlikely(mgmt_task))
+ mutex_unlock(&iser_conn->state_mutex);
+
+ return ret;
}
/**
@@ -199,9 +227,14 @@ static int
iscsi_iser_task_init(struct iscsi_task *task)
{
struct iscsi_iser_task *iser_task = task->dd_data;
+ int ret;
- if (iser_initialize_task_headers(task, &iser_task->desc))
- return -ENOMEM;
+ ret = iser_initialize_task_headers(task, &iser_task->desc);
+ if (ret) {
+ iser_err("Failed to init task %p, err = %d\n",
+ iser_task, ret);
+ return ret;
+ }
/* mgmt task */
if (!task->sc)
@@ -508,8 +541,8 @@ iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
*/
if (iser_conn) {
mutex_lock(&iser_conn->state_mutex);
- iscsi_conn_stop(cls_conn, flag);
iser_conn_terminate(iser_conn);
+ iscsi_conn_stop(cls_conn, flag);
/* unbind */
iser_conn->iscsi_conn = NULL;
@@ -541,12 +574,13 @@ iscsi_iser_session_destroy(struct iscsi_cls_session *cls_session)
static inline unsigned int
iser_dif_prot_caps(int prot_caps)
{
- return ((prot_caps & IB_PROT_T10DIF_TYPE_1) ? SHOST_DIF_TYPE1_PROTECTION |
- SHOST_DIX_TYPE1_PROTECTION : 0) |
- ((prot_caps & IB_PROT_T10DIF_TYPE_2) ? SHOST_DIF_TYPE2_PROTECTION |
- SHOST_DIX_TYPE2_PROTECTION : 0) |
- ((prot_caps & IB_PROT_T10DIF_TYPE_3) ? SHOST_DIF_TYPE3_PROTECTION |
- SHOST_DIX_TYPE3_PROTECTION : 0);
+ return ((prot_caps & IB_PROT_T10DIF_TYPE_1) ?
+ SHOST_DIF_TYPE1_PROTECTION | SHOST_DIX_TYPE0_PROTECTION |
+ SHOST_DIX_TYPE1_PROTECTION : 0) |
+ ((prot_caps & IB_PROT_T10DIF_TYPE_2) ?
+ SHOST_DIF_TYPE2_PROTECTION | SHOST_DIX_TYPE2_PROTECTION : 0) |
+ ((prot_caps & IB_PROT_T10DIF_TYPE_3) ?
+ SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE3_PROTECTION : 0);
}
/**
@@ -569,6 +603,7 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
struct Scsi_Host *shost;
struct iser_conn *iser_conn = NULL;
struct ib_conn *ib_conn;
+ u16 max_cmds;
shost = iscsi_host_alloc(&iscsi_iser_sht, 0, 0);
if (!shost)
@@ -586,26 +621,41 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
*/
if (ep) {
iser_conn = ep->dd_data;
+ max_cmds = iser_conn->max_cmds;
+
+ mutex_lock(&iser_conn->state_mutex);
+ if (iser_conn->state != ISER_CONN_UP) {
+ iser_err("iser conn %p already started teardown\n",
+ iser_conn);
+ mutex_unlock(&iser_conn->state_mutex);
+ goto free_host;
+ }
+
ib_conn = &iser_conn->ib_conn;
if (ib_conn->pi_support) {
u32 sig_caps = ib_conn->device->dev_attr.sig_prot_cap;
scsi_host_set_prot(shost, iser_dif_prot_caps(sig_caps));
- if (iser_pi_guard)
- scsi_host_set_guard(shost, SHOST_DIX_GUARD_IP);
- else
- scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
+ scsi_host_set_guard(shost, SHOST_DIX_GUARD_IP |
+ SHOST_DIX_GUARD_CRC);
}
- }
- if (iscsi_host_add(shost, ep ?
- ib_conn->device->ib_device->dma_device : NULL))
- goto free_host;
+ if (iscsi_host_add(shost,
+ ib_conn->device->ib_device->dma_device)) {
+ mutex_unlock(&iser_conn->state_mutex);
+ goto free_host;
+ }
+ mutex_unlock(&iser_conn->state_mutex);
+ } else {
+ max_cmds = ISER_DEF_XMIT_CMDS_MAX;
+ if (iscsi_host_add(shost, NULL))
+ goto free_host;
+ }
- if (cmds_max > ISER_DEF_XMIT_CMDS_MAX) {
+ if (cmds_max > max_cmds) {
iser_info("cmds_max changed from %u to %u\n",
- cmds_max, ISER_DEF_XMIT_CMDS_MAX);
- cmds_max = ISER_DEF_XMIT_CMDS_MAX;
+ cmds_max, max_cmds);
+ cmds_max = max_cmds;
}
cls_session = iscsi_session_setup(&iscsi_iser_transport, shost,
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index cd4174ca9a7..5ce26817e7e 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -69,34 +69,31 @@
#define DRV_NAME "iser"
#define PFX DRV_NAME ": "
-#define DRV_VER "1.4.8"
+#define DRV_VER "1.5"
#define iser_dbg(fmt, arg...) \
do { \
- if (iser_debug_level > 2) \
+ if (unlikely(iser_debug_level > 2)) \
printk(KERN_DEBUG PFX "%s: " fmt,\
__func__ , ## arg); \
} while (0)
#define iser_warn(fmt, arg...) \
do { \
- if (iser_debug_level > 0) \
+ if (unlikely(iser_debug_level > 0)) \
pr_warn(PFX "%s: " fmt, \
__func__ , ## arg); \
} while (0)
#define iser_info(fmt, arg...) \
do { \
- if (iser_debug_level > 1) \
+ if (unlikely(iser_debug_level > 1)) \
pr_info(PFX "%s: " fmt, \
__func__ , ## arg); \
} while (0)
-#define iser_err(fmt, arg...) \
- do { \
- printk(KERN_ERR PFX "%s: " fmt, \
- __func__ , ## arg); \
- } while (0)
+#define iser_err(fmt, arg...) \
+ pr_err(PFX "%s: " fmt, __func__ , ## arg)
#define SHIFT_4K 12
#define SIZE_4K (1ULL << SHIFT_4K)
@@ -144,6 +141,11 @@
ISER_MAX_TX_MISC_PDUS + \
ISER_MAX_RX_MISC_PDUS)
+#define ISER_GET_MAX_XMIT_CMDS(send_wr) ((send_wr \
+ - ISER_MAX_TX_MISC_PDUS \
+ - ISER_MAX_RX_MISC_PDUS) / \
+ (1 + ISER_INFLIGHT_DATAOUTS))
+
#define ISER_WC_BATCH_COUNT 16
#define ISER_SIGNAL_CMD_COUNT 32
@@ -247,7 +249,6 @@ struct iscsi_endpoint;
* @va: MR start address (buffer va)
* @len: MR length
* @mem_h: pointer to registration context (FMR/Fastreg)
- * @is_mr: indicates weather we registered the buffer
*/
struct iser_mem_reg {
u32 lkey;
@@ -255,7 +256,6 @@ struct iser_mem_reg {
u64 va;
u64 len;
void *mem_h;
- int is_mr;
};
/**
@@ -323,8 +323,6 @@ struct iser_rx_desc {
char pad[ISER_RX_PAD_SIZE];
} __attribute__((packed));
-#define ISER_MAX_CQ 4
-
struct iser_conn;
struct ib_conn;
struct iscsi_iser_task;
@@ -375,7 +373,7 @@ struct iser_device {
struct list_head ig_list;
int refcount;
int comps_used;
- struct iser_comp comps[ISER_MAX_CQ];
+ struct iser_comp *comps;
int (*iser_alloc_rdma_reg_res)(struct ib_conn *ib_conn,
unsigned cmds_max);
void (*iser_free_rdma_reg_res)(struct ib_conn *ib_conn);
@@ -432,6 +430,7 @@ struct fast_reg_descriptor {
* @cma_id: rdma_cm connection maneger handle
* @qp: Connection Queue-pair
* @post_recv_buf_count: post receive counter
+ * @sig_count: send work request signal count
* @rx_wr: receive work request for batch posts
* @device: reference to iser device
* @comp: iser completion context
@@ -452,6 +451,7 @@ struct ib_conn {
struct rdma_cm_id *cma_id;
struct ib_qp *qp;
int post_recv_buf_count;
+ u8 sig_count;
struct ib_recv_wr rx_wr[ISER_MIN_POSTED_RX];
struct iser_device *device;
struct iser_comp *comp;
@@ -482,6 +482,7 @@ struct ib_conn {
* to max number of post recvs
* @qp_max_recv_dtos_mask: (qp_max_recv_dtos - 1)
* @min_posted_rx: (qp_max_recv_dtos >> 2)
+ * @max_cmds: maximum cmds allowed for this connection
* @name: connection peer portal
* @release_work: deffered work for release job
* @state_mutex: protects iser onnection state
@@ -507,6 +508,7 @@ struct iser_conn {
unsigned qp_max_recv_dtos;
unsigned qp_max_recv_dtos_mask;
unsigned min_posted_rx;
+ u16 max_cmds;
char name[ISER_OBJECT_NAME_SIZE];
struct work_struct release_work;
struct mutex state_mutex;
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 5a489ea6373..3821633f106 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -369,7 +369,7 @@ static int iser_post_rx_bufs(struct iscsi_conn *conn, struct iscsi_hdr *req)
return 0;
}
-static inline bool iser_signal_comp(int sig_count)
+static inline bool iser_signal_comp(u8 sig_count)
{
return ((sig_count % ISER_SIGNAL_CMD_COUNT) == 0);
}
@@ -388,7 +388,7 @@ int iser_send_command(struct iscsi_conn *conn,
struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr;
struct scsi_cmnd *sc = task->sc;
struct iser_tx_desc *tx_desc = &iser_task->desc;
- static unsigned sig_count;
+ u8 sig_count = ++iser_conn->ib_conn.sig_count;
edtl = ntohl(hdr->data_length);
@@ -435,7 +435,7 @@ int iser_send_command(struct iscsi_conn *conn,
iser_task->status = ISER_TASK_STATUS_STARTED;
err = iser_post_send(&iser_conn->ib_conn, tx_desc,
- iser_signal_comp(++sig_count));
+ iser_signal_comp(sig_count));
if (!err)
return 0;
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index 6c5ce357fba..abce9339333 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -73,7 +73,6 @@ static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
if (cmd_dir == ISER_DIR_OUT) {
/* copy the unaligned sg the buffer which is used for RDMA */
- int i;
char *p, *from;
sgl = (struct scatterlist *)data->buf;
@@ -409,7 +408,6 @@ int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *iser_task,
regd_buf->reg.rkey = device->mr->rkey;
regd_buf->reg.len = ib_sg_dma_len(ibdev, &sg[0]);
regd_buf->reg.va = ib_sg_dma_address(ibdev, &sg[0]);
- regd_buf->reg.is_mr = 0;
iser_dbg("PHYSICAL Mem.register: lkey: 0x%08X rkey: 0x%08X "
"va: 0x%08lX sz: %ld]\n",
@@ -440,13 +438,13 @@ int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *iser_task,
return 0;
}
-static inline void
+static void
iser_set_dif_domain(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs,
struct ib_sig_domain *domain)
{
domain->sig_type = IB_SIG_TYPE_T10_DIF;
- domain->sig.dif.pi_interval = sc->device->sector_size;
- domain->sig.dif.ref_tag = scsi_get_lba(sc) & 0xffffffff;
+ domain->sig.dif.pi_interval = scsi_prot_interval(sc);
+ domain->sig.dif.ref_tag = scsi_prot_ref_tag(sc);
/*
* At the moment we hard code those, but in the future
* we will take them from sc.
@@ -454,8 +452,7 @@ iser_set_dif_domain(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs,
domain->sig.dif.apptag_check_mask = 0xffff;
domain->sig.dif.app_escape = true;
domain->sig.dif.ref_escape = true;
- if (scsi_get_prot_type(sc) == SCSI_PROT_DIF_TYPE1 ||
- scsi_get_prot_type(sc) == SCSI_PROT_DIF_TYPE2)
+ if (sc->prot_flags & SCSI_PROT_REF_INCREMENT)
domain->sig.dif.ref_remap = true;
};
@@ -473,26 +470,16 @@ iser_set_sig_attrs(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs)
case SCSI_PROT_WRITE_STRIP:
sig_attrs->wire.sig_type = IB_SIG_TYPE_NONE;
iser_set_dif_domain(sc, sig_attrs, &sig_attrs->mem);
- /*
- * At the moment we use this modparam to tell what is
- * the memory bg_type, in the future we will take it
- * from sc.
- */
- sig_attrs->mem.sig.dif.bg_type = iser_pi_guard ? IB_T10DIF_CSUM :
- IB_T10DIF_CRC;
+ sig_attrs->mem.sig.dif.bg_type = sc->prot_flags & SCSI_PROT_IP_CHECKSUM ?
+ IB_T10DIF_CSUM : IB_T10DIF_CRC;
break;
case SCSI_PROT_READ_PASS:
case SCSI_PROT_WRITE_PASS:
iser_set_dif_domain(sc, sig_attrs, &sig_attrs->wire);
sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC;
iser_set_dif_domain(sc, sig_attrs, &sig_attrs->mem);
- /*
- * At the moment we use this modparam to tell what is
- * the memory bg_type, in the future we will take it
- * from sc.
- */
- sig_attrs->mem.sig.dif.bg_type = iser_pi_guard ? IB_T10DIF_CSUM :
- IB_T10DIF_CRC;
+ sig_attrs->mem.sig.dif.bg_type = sc->prot_flags & SCSI_PROT_IP_CHECKSUM ?
+ IB_T10DIF_CSUM : IB_T10DIF_CRC;
break;
default:
iser_err("Unsupported PI operation %d\n",
@@ -503,26 +490,28 @@ iser_set_sig_attrs(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs)
return 0;
}
-static int
+static inline void
iser_set_prot_checks(struct scsi_cmnd *sc, u8 *mask)
{
- switch (scsi_get_prot_type(sc)) {
- case SCSI_PROT_DIF_TYPE0:
- break;
- case SCSI_PROT_DIF_TYPE1:
- case SCSI_PROT_DIF_TYPE2:
- *mask = ISER_CHECK_GUARD | ISER_CHECK_REFTAG;
- break;
- case SCSI_PROT_DIF_TYPE3:
- *mask = ISER_CHECK_GUARD;
- break;
- default:
- iser_err("Unsupported protection type %d\n",
- scsi_get_prot_type(sc));
- return -EINVAL;
- }
+ *mask = 0;
+ if (sc->prot_flags & SCSI_PROT_REF_CHECK)
+ *mask |= ISER_CHECK_REFTAG;
+ if (sc->prot_flags & SCSI_PROT_GUARD_CHECK)
+ *mask |= ISER_CHECK_GUARD;
+}
- return 0;
+static void
+iser_inv_rkey(struct ib_send_wr *inv_wr, struct ib_mr *mr)
+{
+ u32 rkey;
+
+ memset(inv_wr, 0, sizeof(*inv_wr));
+ inv_wr->opcode = IB_WR_LOCAL_INV;
+ inv_wr->wr_id = ISER_FASTREG_LI_WRID;
+ inv_wr->ex.invalidate_rkey = mr->rkey;
+
+ rkey = ib_inc_rkey(mr->rkey);
+ ib_update_fast_reg_key(mr, rkey);
}
static int
@@ -536,26 +525,17 @@ iser_reg_sig_mr(struct iscsi_iser_task *iser_task,
struct ib_send_wr *bad_wr, *wr = NULL;
struct ib_sig_attrs sig_attrs;
int ret;
- u32 key;
memset(&sig_attrs, 0, sizeof(sig_attrs));
ret = iser_set_sig_attrs(iser_task->sc, &sig_attrs);
if (ret)
goto err;
- ret = iser_set_prot_checks(iser_task->sc, &sig_attrs.check_mask);
- if (ret)
- goto err;
+ iser_set_prot_checks(iser_task->sc, &sig_attrs.check_mask);
if (!(desc->reg_indicators & ISER_SIG_KEY_VALID)) {
- memset(&inv_wr, 0, sizeof(inv_wr));
- inv_wr.opcode = IB_WR_LOCAL_INV;
- inv_wr.wr_id = ISER_FASTREG_LI_WRID;
- inv_wr.ex.invalidate_rkey = pi_ctx->sig_mr->rkey;
+ iser_inv_rkey(&inv_wr, pi_ctx->sig_mr);
wr = &inv_wr;
- /* Bump the key */
- key = (u8)(pi_ctx->sig_mr->rkey & 0x000000FF);
- ib_update_fast_reg_key(pi_ctx->sig_mr, ++key);
}
memset(&sig_wr, 0, sizeof(sig_wr));
@@ -585,12 +565,7 @@ iser_reg_sig_mr(struct iscsi_iser_task *iser_task,
sig_sge->lkey = pi_ctx->sig_mr->lkey;
sig_sge->addr = 0;
- sig_sge->length = data_sge->length + prot_sge->length;
- if (scsi_get_prot_op(iser_task->sc) == SCSI_PROT_WRITE_INSERT ||
- scsi_get_prot_op(iser_task->sc) == SCSI_PROT_READ_STRIP) {
- sig_sge->length += (data_sge->length /
- iser_task->sc->device->sector_size) * 8;
- }
+ sig_sge->length = scsi_transfer_length(iser_task->sc);
iser_dbg("sig_sge: addr: 0x%llx length: %u lkey: 0x%x\n",
sig_sge->addr, sig_sge->length,
@@ -613,7 +588,6 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
struct ib_fast_reg_page_list *frpl;
struct ib_send_wr fastreg_wr, inv_wr;
struct ib_send_wr *bad_wr, *wr = NULL;
- u8 key;
int ret, offset, size, plen;
/* if there a single dma entry, dma mr suffices */
@@ -645,14 +619,8 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
}
if (!(desc->reg_indicators & ind)) {
- memset(&inv_wr, 0, sizeof(inv_wr));
- inv_wr.wr_id = ISER_FASTREG_LI_WRID;
- inv_wr.opcode = IB_WR_LOCAL_INV;
- inv_wr.ex.invalidate_rkey = mr->rkey;
+ iser_inv_rkey(&inv_wr, mr);
wr = &inv_wr;
- /* Bump the key */
- key = (u8)(mr->rkey & 0x000000FF);
- ib_update_fast_reg_key(mr, ++key);
}
/* Prepare FASTREG WR */
@@ -770,15 +738,11 @@ int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *iser_task,
regd_buf->reg.rkey = desc->pi_ctx->sig_mr->rkey;
regd_buf->reg.va = sig_sge.addr;
regd_buf->reg.len = sig_sge.length;
- regd_buf->reg.is_mr = 1;
} else {
- if (desc) {
+ if (desc)
regd_buf->reg.rkey = desc->data_mr->rkey;
- regd_buf->reg.is_mr = 1;
- } else {
+ else
regd_buf->reg.rkey = device->mr->rkey;
- regd_buf->reg.is_mr = 0;
- }
regd_buf->reg.lkey = data_sge.lkey;
regd_buf->reg.va = data_sge.addr;
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 67225bb82bb..695a2704bd4 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -76,7 +76,7 @@ static void iser_event_handler(struct ib_event_handler *handler,
static int iser_create_device_ib_res(struct iser_device *device)
{
struct ib_device_attr *dev_attr = &device->dev_attr;
- int ret, i;
+ int ret, i, max_cqe;
ret = ib_query_device(device->ib_device, dev_attr);
if (ret) {
@@ -104,11 +104,19 @@ static int iser_create_device_ib_res(struct iser_device *device)
return -1;
}
- device->comps_used = min(ISER_MAX_CQ,
+ device->comps_used = min_t(int, num_online_cpus(),
device->ib_device->num_comp_vectors);
- iser_info("using %d CQs, device %s supports %d vectors\n",
+
+ device->comps = kcalloc(device->comps_used, sizeof(*device->comps),
+ GFP_KERNEL);
+ if (!device->comps)
+ goto comps_err;
+
+ max_cqe = min(ISER_MAX_CQ_LEN, dev_attr->max_cqe);
+
+ iser_info("using %d CQs, device %s supports %d vectors max_cqe %d\n",
device->comps_used, device->ib_device->name,
- device->ib_device->num_comp_vectors);
+ device->ib_device->num_comp_vectors, max_cqe);
device->pd = ib_alloc_pd(device->ib_device);
if (IS_ERR(device->pd))
@@ -122,7 +130,7 @@ static int iser_create_device_ib_res(struct iser_device *device)
iser_cq_callback,
iser_cq_event_callback,
(void *)comp,
- ISER_MAX_CQ_LEN, i);
+ max_cqe, i);
if (IS_ERR(comp->cq)) {
comp->cq = NULL;
goto cq_err;
@@ -162,6 +170,8 @@ cq_err:
}
ib_dealloc_pd(device->pd);
pd_err:
+ kfree(device->comps);
+comps_err:
iser_err("failed to allocate an IB resource\n");
return -1;
}
@@ -187,6 +197,9 @@ static void iser_free_device_ib_res(struct iser_device *device)
(void)ib_dereg_mr(device->mr);
(void)ib_dealloc_pd(device->pd);
+ kfree(device->comps);
+ device->comps = NULL;
+
device->mr = NULL;
device->pd = NULL;
}
@@ -425,7 +438,10 @@ void iser_free_fastreg_pool(struct ib_conn *ib_conn)
*/
static int iser_create_ib_conn_res(struct ib_conn *ib_conn)
{
+ struct iser_conn *iser_conn = container_of(ib_conn, struct iser_conn,
+ ib_conn);
struct iser_device *device;
+ struct ib_device_attr *dev_attr;
struct ib_qp_init_attr init_attr;
int ret = -ENOMEM;
int index, min_index = 0;
@@ -433,6 +449,7 @@ static int iser_create_ib_conn_res(struct ib_conn *ib_conn)
BUG_ON(ib_conn->device == NULL);
device = ib_conn->device;
+ dev_attr = &device->dev_attr;
memset(&init_attr, 0, sizeof init_attr);
@@ -460,8 +477,20 @@ static int iser_create_ib_conn_res(struct ib_conn *ib_conn)
if (ib_conn->pi_support) {
init_attr.cap.max_send_wr = ISER_QP_SIG_MAX_REQ_DTOS + 1;
init_attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN;
+ iser_conn->max_cmds =
+ ISER_GET_MAX_XMIT_CMDS(ISER_QP_SIG_MAX_REQ_DTOS);
} else {
- init_attr.cap.max_send_wr = ISER_QP_MAX_REQ_DTOS + 1;
+ if (dev_attr->max_qp_wr > ISER_QP_MAX_REQ_DTOS) {
+ init_attr.cap.max_send_wr = ISER_QP_MAX_REQ_DTOS + 1;
+ iser_conn->max_cmds =
+ ISER_GET_MAX_XMIT_CMDS(ISER_QP_MAX_REQ_DTOS);
+ } else {
+ init_attr.cap.max_send_wr = dev_attr->max_qp_wr;
+ iser_conn->max_cmds =
+ ISER_GET_MAX_XMIT_CMDS(dev_attr->max_qp_wr);
+ iser_dbg("device %s supports max_send_wr %d\n",
+ device->ib_device->name, dev_attr->max_qp_wr);
+ }
}
ret = rdma_create_qp(ib_conn->cma_id, device->pd, &init_attr);
@@ -475,7 +504,11 @@ static int iser_create_ib_conn_res(struct ib_conn *ib_conn)
return ret;
out_err:
+ mutex_lock(&ig.connlist_mutex);
+ ib_conn->comp->active_qps--;
+ mutex_unlock(&ig.connlist_mutex);
iser_err("unable to alloc mem or create resource, err %d\n", ret);
+
return ret;
}
@@ -610,9 +643,11 @@ void iser_conn_release(struct iser_conn *iser_conn)
mutex_unlock(&ig.connlist_mutex);
mutex_lock(&iser_conn->state_mutex);
- if (iser_conn->state != ISER_CONN_DOWN)
+ if (iser_conn->state != ISER_CONN_DOWN) {
iser_warn("iser conn %p state %d, expected state down.\n",
iser_conn, iser_conn->state);
+ iser_conn->state = ISER_CONN_DOWN;
+ }
/*
* In case we never got to bind stage, we still need to
* release IB resources (which is safe to call more than once).
@@ -662,8 +697,10 @@ int iser_conn_terminate(struct iser_conn *iser_conn)
/* post an indication that all flush errors were consumed */
err = ib_post_send(ib_conn->qp, &ib_conn->beacon, &bad_wr);
- if (err)
+ if (err) {
iser_err("conn %p failed to post beacon", ib_conn);
+ return 1;
+ }
wait_for_completion(&ib_conn->flush_comp);
}
@@ -846,20 +883,21 @@ static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *eve
break;
case RDMA_CM_EVENT_DISCONNECTED:
case RDMA_CM_EVENT_ADDR_CHANGE:
- iser_disconnected_handler(cma_id);
+ case RDMA_CM_EVENT_TIMEWAIT_EXIT:
+ iser_cleanup_handler(cma_id, false);
break;
case RDMA_CM_EVENT_DEVICE_REMOVAL:
/*
* we *must* destroy the device as we cannot rely
* on iscsid to be around to initiate error handling.
- * also implicitly destroy the cma_id.
+ * also if we are not in state DOWN implicitly destroy
+ * the cma_id.
*/
iser_cleanup_handler(cma_id, true);
- iser_conn->ib_conn.cma_id = NULL;
- ret = 1;
- break;
- case RDMA_CM_EVENT_TIMEWAIT_EXIT:
- iser_cleanup_handler(cma_id, false);
+ if (iser_conn->state != ISER_CONN_DOWN) {
+ iser_conn->ib_conn.cma_id = NULL;
+ ret = 1;
+ }
break;
default:
iser_err("Unexpected RDMA CM event (%d)\n", event->event);
@@ -981,7 +1019,6 @@ int iser_reg_page_vec(struct ib_conn *ib_conn,
mem_reg->rkey = mem->fmr->rkey;
mem_reg->len = page_vec->length * SIZE_4K;
mem_reg->va = io_addr;
- mem_reg->is_mr = 1;
mem_reg->mem_h = (void *)mem;
mem_reg->va += page_vec->offset;
@@ -1008,7 +1045,7 @@ void iser_unreg_mem_fmr(struct iscsi_iser_task *iser_task,
struct iser_mem_reg *reg = &iser_task->rdma_regd[cmd_dir].reg;
int ret;
- if (!reg->is_mr)
+ if (!reg->mem_h)
return;
iser_dbg("PHYSICAL Mem.Unregister mem_h %p\n",reg->mem_h);
@@ -1028,11 +1065,10 @@ void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task,
struct ib_conn *ib_conn = &iser_conn->ib_conn;
struct fast_reg_descriptor *desc = reg->mem_h;
- if (!reg->is_mr)
+ if (!desc)
return;
reg->mem_h = NULL;
- reg->is_mr = 0;
spin_lock_bh(&ib_conn->lock);
list_add_tail(&desc->list, &ib_conn->fastreg.pool);
spin_unlock_bh(&ib_conn->lock);
@@ -1049,7 +1085,7 @@ int iser_post_recvl(struct iser_conn *iser_conn)
sge.length = ISER_RX_LOGIN_SIZE;
sge.lkey = ib_conn->device->mr->lkey;
- rx_wr.wr_id = (unsigned long)iser_conn->login_resp_buf;
+ rx_wr.wr_id = (uintptr_t)iser_conn->login_resp_buf;
rx_wr.sg_list = &sge;
rx_wr.num_sge = 1;
rx_wr.next = NULL;
@@ -1073,7 +1109,7 @@ int iser_post_recvm(struct iser_conn *iser_conn, int count)
for (rx_wr = ib_conn->rx_wr, i = 0; i < count; i++, rx_wr++) {
rx_desc = &iser_conn->rx_descs[my_rx_head];
- rx_wr->wr_id = (unsigned long)rx_desc;
+ rx_wr->wr_id = (uintptr_t)rx_desc;
rx_wr->sg_list = &rx_desc->rx_sg;
rx_wr->num_sge = 1;
rx_wr->next = rx_wr + 1;
@@ -1110,7 +1146,7 @@ int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc,
DMA_TO_DEVICE);
send_wr.next = NULL;
- send_wr.wr_id = (unsigned long)tx_desc;
+ send_wr.wr_id = (uintptr_t)tx_desc;
send_wr.sg_list = tx_desc->tx_sg;
send_wr.num_sge = tx_desc->num_sge;
send_wr.opcode = IB_WR_SEND;
@@ -1160,6 +1196,7 @@ static void
iser_handle_comp_error(struct ib_conn *ib_conn,
struct ib_wc *wc)
{
+ void *wr_id = (void *)(uintptr_t)wc->wr_id;
struct iser_conn *iser_conn = container_of(ib_conn, struct iser_conn,
ib_conn);
@@ -1168,8 +1205,8 @@ iser_handle_comp_error(struct ib_conn *ib_conn,
iscsi_conn_failure(iser_conn->iscsi_conn,
ISCSI_ERR_CONN_FAILED);
- if (is_iser_tx_desc(iser_conn, (void *)wc->wr_id)) {
- struct iser_tx_desc *desc = (struct iser_tx_desc *)wc->wr_id;
+ if (is_iser_tx_desc(iser_conn, wr_id)) {
+ struct iser_tx_desc *desc = wr_id;
if (desc->type == ISCSI_TX_DATAOUT)
kmem_cache_free(ig.desc_cache, desc);
@@ -1193,14 +1230,14 @@ static void iser_handle_wc(struct ib_wc *wc)
struct iser_rx_desc *rx_desc;
ib_conn = wc->qp->qp_context;
- if (wc->status == IB_WC_SUCCESS) {
+ if (likely(wc->status == IB_WC_SUCCESS)) {
if (wc->opcode == IB_WC_RECV) {
- rx_desc = (struct iser_rx_desc *)wc->wr_id;
+ rx_desc = (struct iser_rx_desc *)(uintptr_t)wc->wr_id;
iser_rcv_completion(rx_desc, wc->byte_len,
ib_conn);
} else
if (wc->opcode == IB_WC_SEND) {
- tx_desc = (struct iser_tx_desc *)wc->wr_id;
+ tx_desc = (struct iser_tx_desc *)(uintptr_t)wc->wr_id;
iser_snd_completion(tx_desc, ib_conn);
} else {
iser_err("Unknown wc opcode %d\n", wc->opcode);
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 10641b7816f..dafb3c531f9 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -22,7 +22,6 @@
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/in6.h>
-#include <linux/llist.h>
#include <rdma/ib_verbs.h>
#include <rdma/rdma_cm.h>
#include <target/target_core_base.h>
@@ -36,11 +35,17 @@
#define ISERT_MAX_CONN 8
#define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
#define ISER_MAX_TX_CQ_LEN (ISERT_QP_MAX_REQ_DTOS * ISERT_MAX_CONN)
+#define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
+ ISERT_MAX_CONN)
+
+int isert_debug_level = 0;
+module_param_named(debug_level, isert_debug_level, int, 0644);
+MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0 (default:0)");
static DEFINE_MUTEX(device_list_mutex);
static LIST_HEAD(device_list);
-static struct workqueue_struct *isert_rx_wq;
static struct workqueue_struct *isert_comp_wq;
+static struct workqueue_struct *isert_release_wq;
static void
isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
@@ -54,19 +59,32 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
struct isert_rdma_wr *wr);
static int
isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd);
+static int
+isert_rdma_post_recvl(struct isert_conn *isert_conn);
+static int
+isert_rdma_accept(struct isert_conn *isert_conn);
+struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np);
+
+static inline bool
+isert_prot_cmd(struct isert_conn *conn, struct se_cmd *cmd)
+{
+ return (conn->pi_support &&
+ cmd->prot_op != TARGET_PROT_NORMAL);
+}
+
static void
isert_qp_event_callback(struct ib_event *e, void *context)
{
struct isert_conn *isert_conn = (struct isert_conn *)context;
- pr_err("isert_qp_event_callback event: %d\n", e->event);
+ isert_err("conn %p event: %d\n", isert_conn, e->event);
switch (e->event) {
case IB_EVENT_COMM_EST:
rdma_notify(isert_conn->conn_cm_id, IB_EVENT_COMM_EST);
break;
case IB_EVENT_QP_LAST_WQE_REACHED:
- pr_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED:\n");
+ isert_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED\n");
break;
default:
break;
@@ -80,39 +98,41 @@ isert_query_device(struct ib_device *ib_dev, struct ib_device_attr *devattr)
ret = ib_query_device(ib_dev, devattr);
if (ret) {
- pr_err("ib_query_device() failed: %d\n", ret);
+ isert_err("ib_query_device() failed: %d\n", ret);
return ret;
}
- pr_debug("devattr->max_sge: %d\n", devattr->max_sge);
- pr_debug("devattr->max_sge_rd: %d\n", devattr->max_sge_rd);
+ isert_dbg("devattr->max_sge: %d\n", devattr->max_sge);
+ isert_dbg("devattr->max_sge_rd: %d\n", devattr->max_sge_rd);
return 0;
}
static int
-isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id,
- u8 protection)
+isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
{
struct isert_device *device = isert_conn->conn_device;
struct ib_qp_init_attr attr;
- int ret, index, min_index = 0;
+ struct isert_comp *comp;
+ int ret, i, min = 0;
mutex_lock(&device_list_mutex);
- for (index = 0; index < device->cqs_used; index++)
- if (device->cq_active_qps[index] <
- device->cq_active_qps[min_index])
- min_index = index;
- device->cq_active_qps[min_index]++;
- pr_debug("isert_conn_setup_qp: Using min_index: %d\n", min_index);
+ for (i = 0; i < device->comps_used; i++)
+ if (device->comps[i].active_qps <
+ device->comps[min].active_qps)
+ min = i;
+ comp = &device->comps[min];
+ comp->active_qps++;
+ isert_info("conn %p, using comp %p min_index: %d\n",
+ isert_conn, comp, min);
mutex_unlock(&device_list_mutex);
memset(&attr, 0, sizeof(struct ib_qp_init_attr));
attr.event_handler = isert_qp_event_callback;
attr.qp_context = isert_conn;
- attr.send_cq = device->dev_tx_cq[min_index];
- attr.recv_cq = device->dev_rx_cq[min_index];
+ attr.send_cq = comp->cq;
+ attr.recv_cq = comp->cq;
attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS;
- attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS;
+ attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS + 1;
/*
* FIXME: Use devattr.max_sge - 2 for max_send_sge as
* work-around for RDMA_READs with ConnectX-2.
@@ -126,29 +146,29 @@ isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id,
attr.cap.max_recv_sge = 1;
attr.sq_sig_type = IB_SIGNAL_REQ_WR;
attr.qp_type = IB_QPT_RC;
- if (protection)
+ if (device->pi_capable)
attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN;
- pr_debug("isert_conn_setup_qp cma_id->device: %p\n",
- cma_id->device);
- pr_debug("isert_conn_setup_qp conn_pd->device: %p\n",
- isert_conn->conn_pd->device);
-
ret = rdma_create_qp(cma_id, isert_conn->conn_pd, &attr);
if (ret) {
- pr_err("rdma_create_qp failed for cma_id %d\n", ret);
- return ret;
+ isert_err("rdma_create_qp failed for cma_id %d\n", ret);
+ goto err;
}
isert_conn->conn_qp = cma_id->qp;
- pr_debug("rdma_create_qp() returned success >>>>>>>>>>>>>>>>>>>>>>>>>.\n");
return 0;
+err:
+ mutex_lock(&device_list_mutex);
+ comp->active_qps--;
+ mutex_unlock(&device_list_mutex);
+
+ return ret;
}
static void
isert_cq_event_callback(struct ib_event *e, void *context)
{
- pr_debug("isert_cq_event_callback event: %d\n", e->event);
+ isert_dbg("event: %d\n", e->event);
}
static int
@@ -182,6 +202,7 @@ isert_alloc_rx_descriptors(struct isert_conn *isert_conn)
}
isert_conn->conn_rx_desc_head = 0;
+
return 0;
dma_map_fail:
@@ -193,6 +214,8 @@ dma_map_fail:
kfree(isert_conn->conn_rx_descs);
isert_conn->conn_rx_descs = NULL;
fail:
+ isert_err("conn %p failed to allocate rx descriptors\n", isert_conn);
+
return -ENOMEM;
}
@@ -216,27 +239,23 @@ isert_free_rx_descriptors(struct isert_conn *isert_conn)
isert_conn->conn_rx_descs = NULL;
}
-static void isert_cq_tx_work(struct work_struct *);
-static void isert_cq_tx_callback(struct ib_cq *, void *);
-static void isert_cq_rx_work(struct work_struct *);
-static void isert_cq_rx_callback(struct ib_cq *, void *);
+static void isert_cq_work(struct work_struct *);
+static void isert_cq_callback(struct ib_cq *, void *);
static int
isert_create_device_ib_res(struct isert_device *device)
{
struct ib_device *ib_dev = device->ib_device;
- struct isert_cq_desc *cq_desc;
struct ib_device_attr *dev_attr;
- int ret = 0, i, j;
- int max_rx_cqe, max_tx_cqe;
+ int ret = 0, i;
+ int max_cqe;
dev_attr = &device->dev_attr;
ret = isert_query_device(ib_dev, dev_attr);
if (ret)
return ret;
- max_rx_cqe = min(ISER_MAX_RX_CQ_LEN, dev_attr->max_cqe);
- max_tx_cqe = min(ISER_MAX_TX_CQ_LEN, dev_attr->max_cqe);
+ max_cqe = min(ISER_MAX_CQ_LEN, dev_attr->max_cqe);
/* asign function handlers */
if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS &&
@@ -254,55 +273,38 @@ isert_create_device_ib_res(struct isert_device *device)
device->pi_capable = dev_attr->device_cap_flags &
IB_DEVICE_SIGNATURE_HANDOVER ? true : false;
- device->cqs_used = min_t(int, num_online_cpus(),
- device->ib_device->num_comp_vectors);
- device->cqs_used = min(ISERT_MAX_CQ, device->cqs_used);
- pr_debug("Using %d CQs, device %s supports %d vectors support "
- "Fast registration %d pi_capable %d\n",
- device->cqs_used, device->ib_device->name,
- device->ib_device->num_comp_vectors, device->use_fastreg,
- device->pi_capable);
- device->cq_desc = kzalloc(sizeof(struct isert_cq_desc) *
- device->cqs_used, GFP_KERNEL);
- if (!device->cq_desc) {
- pr_err("Unable to allocate device->cq_desc\n");
+ device->comps_used = min(ISERT_MAX_CQ, min_t(int, num_online_cpus(),
+ device->ib_device->num_comp_vectors));
+ isert_info("Using %d CQs, %s supports %d vectors support "
+ "Fast registration %d pi_capable %d\n",
+ device->comps_used, device->ib_device->name,
+ device->ib_device->num_comp_vectors, device->use_fastreg,
+ device->pi_capable);
+
+ device->comps = kcalloc(device->comps_used, sizeof(struct isert_comp),
+ GFP_KERNEL);
+ if (!device->comps) {
+ isert_err("Unable to allocate completion contexts\n");
return -ENOMEM;
}
- cq_desc = device->cq_desc;
-
- for (i = 0; i < device->cqs_used; i++) {
- cq_desc[i].device = device;
- cq_desc[i].cq_index = i;
-
- INIT_WORK(&cq_desc[i].cq_rx_work, isert_cq_rx_work);
- device->dev_rx_cq[i] = ib_create_cq(device->ib_device,
- isert_cq_rx_callback,
- isert_cq_event_callback,
- (void *)&cq_desc[i],
- max_rx_cqe, i);
- if (IS_ERR(device->dev_rx_cq[i])) {
- ret = PTR_ERR(device->dev_rx_cq[i]);
- device->dev_rx_cq[i] = NULL;
- goto out_cq;
- }
- INIT_WORK(&cq_desc[i].cq_tx_work, isert_cq_tx_work);
- device->dev_tx_cq[i] = ib_create_cq(device->ib_device,
- isert_cq_tx_callback,
- isert_cq_event_callback,
- (void *)&cq_desc[i],
- max_tx_cqe, i);
- if (IS_ERR(device->dev_tx_cq[i])) {
- ret = PTR_ERR(device->dev_tx_cq[i]);
- device->dev_tx_cq[i] = NULL;
- goto out_cq;
- }
+ for (i = 0; i < device->comps_used; i++) {
+ struct isert_comp *comp = &device->comps[i];
- ret = ib_req_notify_cq(device->dev_rx_cq[i], IB_CQ_NEXT_COMP);
- if (ret)
+ comp->device = device;
+ INIT_WORK(&comp->work, isert_cq_work);
+ comp->cq = ib_create_cq(device->ib_device,
+ isert_cq_callback,
+ isert_cq_event_callback,
+ (void *)comp,
+ max_cqe, i);
+ if (IS_ERR(comp->cq)) {
+ ret = PTR_ERR(comp->cq);
+ comp->cq = NULL;
goto out_cq;
+ }
- ret = ib_req_notify_cq(device->dev_tx_cq[i], IB_CQ_NEXT_COMP);
+ ret = ib_req_notify_cq(comp->cq, IB_CQ_NEXT_COMP);
if (ret)
goto out_cq;
}
@@ -310,19 +312,15 @@ isert_create_device_ib_res(struct isert_device *device)
return 0;
out_cq:
- for (j = 0; j < i; j++) {
- cq_desc = &device->cq_desc[j];
+ for (i = 0; i < device->comps_used; i++) {
+ struct isert_comp *comp = &device->comps[i];
- if (device->dev_rx_cq[j]) {
- cancel_work_sync(&cq_desc->cq_rx_work);
- ib_destroy_cq(device->dev_rx_cq[j]);
- }
- if (device->dev_tx_cq[j]) {
- cancel_work_sync(&cq_desc->cq_tx_work);
- ib_destroy_cq(device->dev_tx_cq[j]);
+ if (comp->cq) {
+ cancel_work_sync(&comp->work);
+ ib_destroy_cq(comp->cq);
}
}
- kfree(device->cq_desc);
+ kfree(device->comps);
return ret;
}
@@ -330,21 +328,18 @@ out_cq:
static void
isert_free_device_ib_res(struct isert_device *device)
{
- struct isert_cq_desc *cq_desc;
int i;
- for (i = 0; i < device->cqs_used; i++) {
- cq_desc = &device->cq_desc[i];
+ isert_info("device %p\n", device);
- cancel_work_sync(&cq_desc->cq_rx_work);
- cancel_work_sync(&cq_desc->cq_tx_work);
- ib_destroy_cq(device->dev_rx_cq[i]);
- ib_destroy_cq(device->dev_tx_cq[i]);
- device->dev_rx_cq[i] = NULL;
- device->dev_tx_cq[i] = NULL;
- }
+ for (i = 0; i < device->comps_used; i++) {
+ struct isert_comp *comp = &device->comps[i];
- kfree(device->cq_desc);
+ cancel_work_sync(&comp->work);
+ ib_destroy_cq(comp->cq);
+ comp->cq = NULL;
+ }
+ kfree(device->comps);
}
static void
@@ -352,6 +347,7 @@ isert_device_try_release(struct isert_device *device)
{
mutex_lock(&device_list_mutex);
device->refcount--;
+ isert_info("device %p refcount %d\n", device, device->refcount);
if (!device->refcount) {
isert_free_device_ib_res(device);
list_del(&device->dev_node);
@@ -370,6 +366,8 @@ isert_device_find_by_ib_dev(struct rdma_cm_id *cma_id)
list_for_each_entry(device, &device_list, dev_node) {
if (device->ib_device->node_guid == cma_id->device->node_guid) {
device->refcount++;
+ isert_info("Found iser device %p refcount %d\n",
+ device, device->refcount);
mutex_unlock(&device_list_mutex);
return device;
}
@@ -393,6 +391,8 @@ isert_device_find_by_ib_dev(struct rdma_cm_id *cma_id)
device->refcount++;
list_add_tail(&device->dev_node, &device_list);
+ isert_info("Created a new iser device %p refcount %d\n",
+ device, device->refcount);
mutex_unlock(&device_list_mutex);
return device;
@@ -407,7 +407,7 @@ isert_conn_free_fastreg_pool(struct isert_conn *isert_conn)
if (list_empty(&isert_conn->conn_fr_pool))
return;
- pr_debug("Freeing conn %p fastreg pool", isert_conn);
+ isert_info("Freeing conn %p fastreg pool", isert_conn);
list_for_each_entry_safe(fr_desc, tmp,
&isert_conn->conn_fr_pool, list) {
@@ -425,87 +425,97 @@ isert_conn_free_fastreg_pool(struct isert_conn *isert_conn)
}
if (i < isert_conn->conn_fr_pool_size)
- pr_warn("Pool still has %d regions registered\n",
+ isert_warn("Pool still has %d regions registered\n",
isert_conn->conn_fr_pool_size - i);
}
static int
+isert_create_pi_ctx(struct fast_reg_descriptor *desc,
+ struct ib_device *device,
+ struct ib_pd *pd)
+{
+ struct ib_mr_init_attr mr_init_attr;
+ struct pi_context *pi_ctx;
+ int ret;
+
+ pi_ctx = kzalloc(sizeof(*desc->pi_ctx), GFP_KERNEL);
+ if (!pi_ctx) {
+ isert_err("Failed to allocate pi context\n");
+ return -ENOMEM;
+ }
+
+ pi_ctx->prot_frpl = ib_alloc_fast_reg_page_list(device,
+ ISCSI_ISER_SG_TABLESIZE);
+ if (IS_ERR(pi_ctx->prot_frpl)) {
+ isert_err("Failed to allocate prot frpl err=%ld\n",
+ PTR_ERR(pi_ctx->prot_frpl));
+ ret = PTR_ERR(pi_ctx->prot_frpl);
+ goto err_pi_ctx;
+ }
+
+ pi_ctx->prot_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE);
+ if (IS_ERR(pi_ctx->prot_mr)) {
+ isert_err("Failed to allocate prot frmr err=%ld\n",
+ PTR_ERR(pi_ctx->prot_mr));
+ ret = PTR_ERR(pi_ctx->prot_mr);
+ goto err_prot_frpl;
+ }
+ desc->ind |= ISERT_PROT_KEY_VALID;
+
+ memset(&mr_init_attr, 0, sizeof(mr_init_attr));
+ mr_init_attr.max_reg_descriptors = 2;
+ mr_init_attr.flags |= IB_MR_SIGNATURE_EN;
+ pi_ctx->sig_mr = ib_create_mr(pd, &mr_init_attr);
+ if (IS_ERR(pi_ctx->sig_mr)) {
+ isert_err("Failed to allocate signature enabled mr err=%ld\n",
+ PTR_ERR(pi_ctx->sig_mr));
+ ret = PTR_ERR(pi_ctx->sig_mr);
+ goto err_prot_mr;
+ }
+
+ desc->pi_ctx = pi_ctx;
+ desc->ind |= ISERT_SIG_KEY_VALID;
+ desc->ind &= ~ISERT_PROTECTED;
+
+ return 0;
+
+err_prot_mr:
+ ib_dereg_mr(desc->pi_ctx->prot_mr);
+err_prot_frpl:
+ ib_free_fast_reg_page_list(desc->pi_ctx->prot_frpl);
+err_pi_ctx:
+ kfree(desc->pi_ctx);
+
+ return ret;
+}
+
+static int
isert_create_fr_desc(struct ib_device *ib_device, struct ib_pd *pd,
- struct fast_reg_descriptor *fr_desc, u8 protection)
+ struct fast_reg_descriptor *fr_desc)
{
int ret;
fr_desc->data_frpl = ib_alloc_fast_reg_page_list(ib_device,
ISCSI_ISER_SG_TABLESIZE);
if (IS_ERR(fr_desc->data_frpl)) {
- pr_err("Failed to allocate data frpl err=%ld\n",
- PTR_ERR(fr_desc->data_frpl));
+ isert_err("Failed to allocate data frpl err=%ld\n",
+ PTR_ERR(fr_desc->data_frpl));
return PTR_ERR(fr_desc->data_frpl);
}
fr_desc->data_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE);
if (IS_ERR(fr_desc->data_mr)) {
- pr_err("Failed to allocate data frmr err=%ld\n",
- PTR_ERR(fr_desc->data_mr));
+ isert_err("Failed to allocate data frmr err=%ld\n",
+ PTR_ERR(fr_desc->data_mr));
ret = PTR_ERR(fr_desc->data_mr);
goto err_data_frpl;
}
- pr_debug("Create fr_desc %p page_list %p\n",
- fr_desc, fr_desc->data_frpl->page_list);
fr_desc->ind |= ISERT_DATA_KEY_VALID;
- if (protection) {
- struct ib_mr_init_attr mr_init_attr = {0};
- struct pi_context *pi_ctx;
-
- fr_desc->pi_ctx = kzalloc(sizeof(*fr_desc->pi_ctx), GFP_KERNEL);
- if (!fr_desc->pi_ctx) {
- pr_err("Failed to allocate pi context\n");
- ret = -ENOMEM;
- goto err_data_mr;
- }
- pi_ctx = fr_desc->pi_ctx;
-
- pi_ctx->prot_frpl = ib_alloc_fast_reg_page_list(ib_device,
- ISCSI_ISER_SG_TABLESIZE);
- if (IS_ERR(pi_ctx->prot_frpl)) {
- pr_err("Failed to allocate prot frpl err=%ld\n",
- PTR_ERR(pi_ctx->prot_frpl));
- ret = PTR_ERR(pi_ctx->prot_frpl);
- goto err_pi_ctx;
- }
-
- pi_ctx->prot_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE);
- if (IS_ERR(pi_ctx->prot_mr)) {
- pr_err("Failed to allocate prot frmr err=%ld\n",
- PTR_ERR(pi_ctx->prot_mr));
- ret = PTR_ERR(pi_ctx->prot_mr);
- goto err_prot_frpl;
- }
- fr_desc->ind |= ISERT_PROT_KEY_VALID;
-
- mr_init_attr.max_reg_descriptors = 2;
- mr_init_attr.flags |= IB_MR_SIGNATURE_EN;
- pi_ctx->sig_mr = ib_create_mr(pd, &mr_init_attr);
- if (IS_ERR(pi_ctx->sig_mr)) {
- pr_err("Failed to allocate signature enabled mr err=%ld\n",
- PTR_ERR(pi_ctx->sig_mr));
- ret = PTR_ERR(pi_ctx->sig_mr);
- goto err_prot_mr;
- }
- fr_desc->ind |= ISERT_SIG_KEY_VALID;
- }
- fr_desc->ind &= ~ISERT_PROTECTED;
+ isert_dbg("Created fr_desc %p\n", fr_desc);
return 0;
-err_prot_mr:
- ib_dereg_mr(fr_desc->pi_ctx->prot_mr);
-err_prot_frpl:
- ib_free_fast_reg_page_list(fr_desc->pi_ctx->prot_frpl);
-err_pi_ctx:
- kfree(fr_desc->pi_ctx);
-err_data_mr:
- ib_dereg_mr(fr_desc->data_mr);
+
err_data_frpl:
ib_free_fast_reg_page_list(fr_desc->data_frpl);
@@ -513,7 +523,7 @@ err_data_frpl:
}
static int
-isert_conn_create_fastreg_pool(struct isert_conn *isert_conn, u8 pi_support)
+isert_conn_create_fastreg_pool(struct isert_conn *isert_conn)
{
struct fast_reg_descriptor *fr_desc;
struct isert_device *device = isert_conn->conn_device;
@@ -531,16 +541,15 @@ isert_conn_create_fastreg_pool(struct isert_conn *isert_conn, u8 pi_support)
for (i = 0; i < tag_num; i++) {
fr_desc = kzalloc(sizeof(*fr_desc), GFP_KERNEL);
if (!fr_desc) {
- pr_err("Failed to allocate fast_reg descriptor\n");
+ isert_err("Failed to allocate fast_reg descriptor\n");
ret = -ENOMEM;
goto err;
}
ret = isert_create_fr_desc(device->ib_device,
- isert_conn->conn_pd, fr_desc,
- pi_support);
+ isert_conn->conn_pd, fr_desc);
if (ret) {
- pr_err("Failed to create fastreg descriptor err=%d\n",
+ isert_err("Failed to create fastreg descriptor err=%d\n",
ret);
kfree(fr_desc);
goto err;
@@ -550,7 +559,7 @@ isert_conn_create_fastreg_pool(struct isert_conn *isert_conn, u8 pi_support)
isert_conn->conn_fr_pool_size++;
}
- pr_debug("Creating conn %p fastreg pool size=%d",
+ isert_dbg("Creating conn %p fastreg pool size=%d",
isert_conn, isert_conn->conn_fr_pool_size);
return 0;
@@ -563,47 +572,45 @@ err:
static int
isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
{
- struct iscsi_np *np = cma_id->context;
- struct isert_np *isert_np = np->np_context;
+ struct isert_np *isert_np = cma_id->context;
+ struct iscsi_np *np = isert_np->np;
struct isert_conn *isert_conn;
struct isert_device *device;
struct ib_device *ib_dev = cma_id->device;
int ret = 0;
- u8 pi_support;
spin_lock_bh(&np->np_thread_lock);
if (!np->enabled) {
spin_unlock_bh(&np->np_thread_lock);
- pr_debug("iscsi_np is not enabled, reject connect request\n");
+ isert_dbg("iscsi_np is not enabled, reject connect request\n");
return rdma_reject(cma_id, NULL, 0);
}
spin_unlock_bh(&np->np_thread_lock);
- pr_debug("Entering isert_connect_request cma_id: %p, context: %p\n",
+ isert_dbg("cma_id: %p, portal: %p\n",
cma_id, cma_id->context);
isert_conn = kzalloc(sizeof(struct isert_conn), GFP_KERNEL);
if (!isert_conn) {
- pr_err("Unable to allocate isert_conn\n");
+ isert_err("Unable to allocate isert_conn\n");
return -ENOMEM;
}
isert_conn->state = ISER_CONN_INIT;
INIT_LIST_HEAD(&isert_conn->conn_accept_node);
init_completion(&isert_conn->conn_login_comp);
+ init_completion(&isert_conn->login_req_comp);
init_completion(&isert_conn->conn_wait);
- init_completion(&isert_conn->conn_wait_comp_err);
kref_init(&isert_conn->conn_kref);
mutex_init(&isert_conn->conn_mutex);
spin_lock_init(&isert_conn->conn_lock);
INIT_LIST_HEAD(&isert_conn->conn_fr_pool);
- cma_id->context = isert_conn;
isert_conn->conn_cm_id = cma_id;
isert_conn->login_buf = kzalloc(ISCSI_DEF_MAX_RECV_SEG_LEN +
ISER_RX_LOGIN_SIZE, GFP_KERNEL);
if (!isert_conn->login_buf) {
- pr_err("Unable to allocate isert_conn->login_buf\n");
+ isert_err("Unable to allocate isert_conn->login_buf\n");
ret = -ENOMEM;
goto out;
}
@@ -611,7 +618,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
isert_conn->login_req_buf = isert_conn->login_buf;
isert_conn->login_rsp_buf = isert_conn->login_buf +
ISCSI_DEF_MAX_RECV_SEG_LEN;
- pr_debug("Set login_buf: %p login_req_buf: %p login_rsp_buf: %p\n",
+ isert_dbg("Set login_buf: %p login_req_buf: %p login_rsp_buf: %p\n",
isert_conn->login_buf, isert_conn->login_req_buf,
isert_conn->login_rsp_buf);
@@ -621,7 +628,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma);
if (ret) {
- pr_err("ib_dma_mapping_error failed for login_req_dma: %d\n",
+ isert_err("ib_dma_mapping_error failed for login_req_dma: %d\n",
ret);
isert_conn->login_req_dma = 0;
goto out_login_buf;
@@ -633,7 +640,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma);
if (ret) {
- pr_err("ib_dma_mapping_error failed for login_rsp_dma: %d\n",
+ isert_err("ib_dma_mapping_error failed for login_rsp_dma: %d\n",
ret);
isert_conn->login_rsp_dma = 0;
goto out_req_dma_map;
@@ -649,13 +656,13 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
isert_conn->initiator_depth = min_t(u8,
event->param.conn.initiator_depth,
device->dev_attr.max_qp_init_rd_atom);
- pr_debug("Using initiator_depth: %u\n", isert_conn->initiator_depth);
+ isert_dbg("Using initiator_depth: %u\n", isert_conn->initiator_depth);
isert_conn->conn_device = device;
isert_conn->conn_pd = ib_alloc_pd(isert_conn->conn_device->ib_device);
if (IS_ERR(isert_conn->conn_pd)) {
ret = PTR_ERR(isert_conn->conn_pd);
- pr_err("ib_alloc_pd failed for conn %p: ret=%d\n",
+ isert_err("ib_alloc_pd failed for conn %p: ret=%d\n",
isert_conn, ret);
goto out_pd;
}
@@ -664,20 +671,20 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(isert_conn->conn_mr)) {
ret = PTR_ERR(isert_conn->conn_mr);
- pr_err("ib_get_dma_mr failed for conn %p: ret=%d\n",
+ isert_err("ib_get_dma_mr failed for conn %p: ret=%d\n",
isert_conn, ret);
goto out_mr;
}
- pi_support = np->tpg_np->tpg->tpg_attrib.t10_pi;
- if (pi_support && !device->pi_capable) {
- pr_err("Protection information requested but not supported, "
- "rejecting connect request\n");
- ret = rdma_reject(cma_id, NULL, 0);
- goto out_mr;
- }
+ ret = isert_conn_setup_qp(isert_conn, cma_id);
+ if (ret)
+ goto out_conn_dev;
- ret = isert_conn_setup_qp(isert_conn, cma_id, pi_support);
+ ret = isert_rdma_post_recvl(isert_conn);
+ if (ret)
+ goto out_conn_dev;
+
+ ret = isert_rdma_accept(isert_conn);
if (ret)
goto out_conn_dev;
@@ -685,7 +692,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
list_add_tail(&isert_conn->conn_accept_node, &isert_np->np_accept_list);
mutex_unlock(&isert_np->np_accept_mutex);
- pr_debug("isert_connect_request() up np_sem np: %p\n", np);
+ isert_info("np %p: Allow accept_np to continue\n", np);
up(&isert_np->np_sem);
return 0;
@@ -705,6 +712,7 @@ out_login_buf:
kfree(isert_conn->login_buf);
out:
kfree(isert_conn);
+ rdma_reject(cma_id, NULL, 0);
return ret;
}
@@ -713,24 +721,25 @@ isert_connect_release(struct isert_conn *isert_conn)
{
struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
struct isert_device *device = isert_conn->conn_device;
- int cq_index;
- pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
+ isert_dbg("conn %p\n", isert_conn);
if (device && device->use_fastreg)
isert_conn_free_fastreg_pool(isert_conn);
+ isert_free_rx_descriptors(isert_conn);
+ rdma_destroy_id(isert_conn->conn_cm_id);
+
if (isert_conn->conn_qp) {
- cq_index = ((struct isert_cq_desc *)
- isert_conn->conn_qp->recv_cq->cq_context)->cq_index;
- pr_debug("isert_connect_release: cq_index: %d\n", cq_index);
- isert_conn->conn_device->cq_active_qps[cq_index]--;
+ struct isert_comp *comp = isert_conn->conn_qp->recv_cq->cq_context;
- rdma_destroy_qp(isert_conn->conn_cm_id);
- }
+ isert_dbg("dec completion context %p active_qps\n", comp);
+ mutex_lock(&device_list_mutex);
+ comp->active_qps--;
+ mutex_unlock(&device_list_mutex);
- isert_free_rx_descriptors(isert_conn);
- rdma_destroy_id(isert_conn->conn_cm_id);
+ ib_destroy_qp(isert_conn->conn_qp);
+ }
ib_dereg_mr(isert_conn->conn_mr);
ib_dealloc_pd(isert_conn->conn_pd);
@@ -747,16 +756,24 @@ isert_connect_release(struct isert_conn *isert_conn)
if (device)
isert_device_try_release(device);
-
- pr_debug("Leaving isert_connect_release >>>>>>>>>>>>\n");
}
static void
isert_connected_handler(struct rdma_cm_id *cma_id)
{
- struct isert_conn *isert_conn = cma_id->context;
+ struct isert_conn *isert_conn = cma_id->qp->qp_context;
- kref_get(&isert_conn->conn_kref);
+ isert_info("conn %p\n", isert_conn);
+
+ if (!kref_get_unless_zero(&isert_conn->conn_kref)) {
+ isert_warn("conn %p connect_release is running\n", isert_conn);
+ return;
+ }
+
+ mutex_lock(&isert_conn->conn_mutex);
+ if (isert_conn->state != ISER_CONN_FULL_FEATURE)
+ isert_conn->state = ISER_CONN_UP;
+ mutex_unlock(&isert_conn->conn_mutex);
}
static void
@@ -765,8 +782,8 @@ isert_release_conn_kref(struct kref *kref)
struct isert_conn *isert_conn = container_of(kref,
struct isert_conn, conn_kref);
- pr_debug("Calling isert_connect_release for final kref %s/%d\n",
- current->comm, current->pid);
+ isert_info("conn %p final kref %s/%d\n", isert_conn, current->comm,
+ current->pid);
isert_connect_release(isert_conn);
}
@@ -777,75 +794,111 @@ isert_put_conn(struct isert_conn *isert_conn)
kref_put(&isert_conn->conn_kref, isert_release_conn_kref);
}
+/**
+ * isert_conn_terminate() - Initiate connection termination
+ * @isert_conn: isert connection struct
+ *
+ * Notes:
+ * In case the connection state is FULL_FEATURE, move state
+ * to TEMINATING and start teardown sequence (rdma_disconnect).
+ * In case the connection state is UP, complete flush as well.
+ *
+ * This routine must be called with conn_mutex held. Thus it is
+ * safe to call multiple times.
+ */
static void
-isert_disconnect_work(struct work_struct *work)
+isert_conn_terminate(struct isert_conn *isert_conn)
{
- struct isert_conn *isert_conn = container_of(work,
- struct isert_conn, conn_logout_work);
+ int err;
- pr_debug("isert_disconnect_work(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
- mutex_lock(&isert_conn->conn_mutex);
- if (isert_conn->state == ISER_CONN_UP)
+ switch (isert_conn->state) {
+ case ISER_CONN_TERMINATING:
+ break;
+ case ISER_CONN_UP:
+ case ISER_CONN_FULL_FEATURE: /* FALLTHRU */
+ isert_info("Terminating conn %p state %d\n",
+ isert_conn, isert_conn->state);
isert_conn->state = ISER_CONN_TERMINATING;
-
- if (isert_conn->post_recv_buf_count == 0 &&
- atomic_read(&isert_conn->post_send_buf_count) == 0) {
- mutex_unlock(&isert_conn->conn_mutex);
- goto wake_up;
- }
- if (!isert_conn->conn_cm_id) {
- mutex_unlock(&isert_conn->conn_mutex);
- isert_put_conn(isert_conn);
- return;
+ err = rdma_disconnect(isert_conn->conn_cm_id);
+ if (err)
+ isert_warn("Failed rdma_disconnect isert_conn %p\n",
+ isert_conn);
+ break;
+ default:
+ isert_warn("conn %p teminating in state %d\n",
+ isert_conn, isert_conn->state);
}
+}
- if (isert_conn->disconnect) {
- /* Send DREQ/DREP towards our initiator */
- rdma_disconnect(isert_conn->conn_cm_id);
- }
+static int
+isert_np_cma_handler(struct isert_np *isert_np,
+ enum rdma_cm_event_type event)
+{
+ isert_dbg("isert np %p, handling event %d\n", isert_np, event);
- mutex_unlock(&isert_conn->conn_mutex);
+ switch (event) {
+ case RDMA_CM_EVENT_DEVICE_REMOVAL:
+ isert_np->np_cm_id = NULL;
+ break;
+ case RDMA_CM_EVENT_ADDR_CHANGE:
+ isert_np->np_cm_id = isert_setup_id(isert_np);
+ if (IS_ERR(isert_np->np_cm_id)) {
+ isert_err("isert np %p setup id failed: %ld\n",
+ isert_np, PTR_ERR(isert_np->np_cm_id));
+ isert_np->np_cm_id = NULL;
+ }
+ break;
+ default:
+ isert_err("isert np %p Unexpected event %d\n",
+ isert_np, event);
+ }
-wake_up:
- complete(&isert_conn->conn_wait);
+ return -1;
}
static int
-isert_disconnected_handler(struct rdma_cm_id *cma_id, bool disconnect)
+isert_disconnected_handler(struct rdma_cm_id *cma_id,
+ enum rdma_cm_event_type event)
{
+ struct isert_np *isert_np = cma_id->context;
struct isert_conn *isert_conn;
- if (!cma_id->qp) {
- struct isert_np *isert_np = cma_id->context;
+ if (isert_np->np_cm_id == cma_id)
+ return isert_np_cma_handler(cma_id->context, event);
- isert_np->np_cm_id = NULL;
- return -1;
- }
+ isert_conn = cma_id->qp->qp_context;
- isert_conn = (struct isert_conn *)cma_id->context;
+ mutex_lock(&isert_conn->conn_mutex);
+ isert_conn_terminate(isert_conn);
+ mutex_unlock(&isert_conn->conn_mutex);
- isert_conn->disconnect = disconnect;
- INIT_WORK(&isert_conn->conn_logout_work, isert_disconnect_work);
- schedule_work(&isert_conn->conn_logout_work);
+ isert_info("conn %p completing conn_wait\n", isert_conn);
+ complete(&isert_conn->conn_wait);
return 0;
}
+static void
+isert_connect_error(struct rdma_cm_id *cma_id)
+{
+ struct isert_conn *isert_conn = cma_id->qp->qp_context;
+
+ isert_put_conn(isert_conn);
+}
+
static int
isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
{
int ret = 0;
- bool disconnect = false;
- pr_debug("isert_cma_handler: event %d status %d conn %p id %p\n",
- event->event, event->status, cma_id->context, cma_id);
+ isert_info("event %d status %d id %p np %p\n", event->event,
+ event->status, cma_id, cma_id->context);
switch (event->event) {
case RDMA_CM_EVENT_CONNECT_REQUEST:
ret = isert_connect_request(cma_id, event);
if (ret)
- pr_err("isert_cma_handler failed RDMA_CM_EVENT: 0x%08x %d\n",
- event->event, ret);
+ isert_err("failed handle connect request %d\n", ret);
break;
case RDMA_CM_EVENT_ESTABLISHED:
isert_connected_handler(cma_id);
@@ -853,13 +906,16 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
case RDMA_CM_EVENT_ADDR_CHANGE: /* FALLTHRU */
case RDMA_CM_EVENT_DISCONNECTED: /* FALLTHRU */
case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */
- disconnect = true;
case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */
- ret = isert_disconnected_handler(cma_id, disconnect);
+ ret = isert_disconnected_handler(cma_id, event->event);
break;
+ case RDMA_CM_EVENT_REJECTED: /* FALLTHRU */
+ case RDMA_CM_EVENT_UNREACHABLE: /* FALLTHRU */
case RDMA_CM_EVENT_CONNECT_ERROR:
+ isert_connect_error(cma_id);
+ break;
default:
- pr_err("Unhandled RDMA CMA event: %d\n", event->event);
+ isert_err("Unhandled RDMA CMA event: %d\n", event->event);
break;
}
@@ -876,7 +932,7 @@ isert_post_recv(struct isert_conn *isert_conn, u32 count)
for (rx_wr = isert_conn->conn_rx_wr, i = 0; i < count; i++, rx_wr++) {
rx_desc = &isert_conn->conn_rx_descs[rx_head];
- rx_wr->wr_id = (unsigned long)rx_desc;
+ rx_wr->wr_id = (uintptr_t)rx_desc;
rx_wr->sg_list = &rx_desc->rx_sg;
rx_wr->num_sge = 1;
rx_wr->next = rx_wr + 1;
@@ -890,10 +946,10 @@ isert_post_recv(struct isert_conn *isert_conn, u32 count)
ret = ib_post_recv(isert_conn->conn_qp, isert_conn->conn_rx_wr,
&rx_wr_failed);
if (ret) {
- pr_err("ib_post_recv() failed with ret: %d\n", ret);
+ isert_err("ib_post_recv() failed with ret: %d\n", ret);
isert_conn->post_recv_buf_count -= count;
} else {
- pr_debug("isert_post_recv(): Posted %d RX buffers\n", count);
+ isert_dbg("isert_post_recv(): Posted %d RX buffers\n", count);
isert_conn->conn_rx_desc_head = rx_head;
}
return ret;
@@ -910,19 +966,15 @@ isert_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc)
ISER_HEADERS_LEN, DMA_TO_DEVICE);
send_wr.next = NULL;
- send_wr.wr_id = (unsigned long)tx_desc;
+ send_wr.wr_id = (uintptr_t)tx_desc;
send_wr.sg_list = tx_desc->tx_sg;
send_wr.num_sge = tx_desc->num_sge;
send_wr.opcode = IB_WR_SEND;
send_wr.send_flags = IB_SEND_SIGNALED;
- atomic_inc(&isert_conn->post_send_buf_count);
-
ret = ib_post_send(isert_conn->conn_qp, &send_wr, &send_wr_failed);
- if (ret) {
- pr_err("ib_post_send() failed, ret: %d\n", ret);
- atomic_dec(&isert_conn->post_send_buf_count);
- }
+ if (ret)
+ isert_err("ib_post_send() failed, ret: %d\n", ret);
return ret;
}
@@ -945,7 +997,7 @@ isert_create_send_desc(struct isert_conn *isert_conn,
if (tx_desc->tx_sg[0].lkey != isert_conn->conn_mr->lkey) {
tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey;
- pr_debug("tx_desc %p lkey mismatch, fixing\n", tx_desc);
+ isert_dbg("tx_desc %p lkey mismatch, fixing\n", tx_desc);
}
}
@@ -959,7 +1011,7 @@ isert_init_tx_hdrs(struct isert_conn *isert_conn,
dma_addr = ib_dma_map_single(ib_dev, (void *)tx_desc,
ISER_HEADERS_LEN, DMA_TO_DEVICE);
if (ib_dma_mapping_error(ib_dev, dma_addr)) {
- pr_err("ib_dma_mapping_error() failed\n");
+ isert_err("ib_dma_mapping_error() failed\n");
return -ENOMEM;
}
@@ -968,40 +1020,24 @@ isert_init_tx_hdrs(struct isert_conn *isert_conn,
tx_desc->tx_sg[0].length = ISER_HEADERS_LEN;
tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey;
- pr_debug("isert_init_tx_hdrs: Setup tx_sg[0].addr: 0x%llx length: %u"
- " lkey: 0x%08x\n", tx_desc->tx_sg[0].addr,
- tx_desc->tx_sg[0].length, tx_desc->tx_sg[0].lkey);
+ isert_dbg("Setup tx_sg[0].addr: 0x%llx length: %u lkey: 0x%x\n",
+ tx_desc->tx_sg[0].addr, tx_desc->tx_sg[0].length,
+ tx_desc->tx_sg[0].lkey);
return 0;
}
static void
isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
- struct ib_send_wr *send_wr, bool coalesce)
+ struct ib_send_wr *send_wr)
{
struct iser_tx_desc *tx_desc = &isert_cmd->tx_desc;
isert_cmd->rdma_wr.iser_ib_op = ISER_IB_SEND;
- send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc;
+ send_wr->wr_id = (uintptr_t)&isert_cmd->tx_desc;
send_wr->opcode = IB_WR_SEND;
send_wr->sg_list = &tx_desc->tx_sg[0];
send_wr->num_sge = isert_cmd->tx_desc.num_sge;
- /*
- * Coalesce send completion interrupts by only setting IB_SEND_SIGNALED
- * bit for every ISERT_COMP_BATCH_COUNT number of ib_post_send() calls.
- */
- mutex_lock(&isert_conn->conn_mutex);
- if (coalesce && isert_conn->state == ISER_CONN_UP &&
- ++isert_conn->conn_comp_batch < ISERT_COMP_BATCH_COUNT) {
- tx_desc->llnode_active = true;
- llist_add(&tx_desc->comp_llnode, &isert_conn->conn_comp_llist);
- mutex_unlock(&isert_conn->conn_mutex);
- return;
- }
- isert_conn->conn_comp_batch = 0;
- tx_desc->comp_llnode_batch = llist_del_all(&isert_conn->conn_comp_llist);
- mutex_unlock(&isert_conn->conn_mutex);
-
send_wr->send_flags = IB_SEND_SIGNALED;
}
@@ -1017,22 +1053,21 @@ isert_rdma_post_recvl(struct isert_conn *isert_conn)
sge.length = ISER_RX_LOGIN_SIZE;
sge.lkey = isert_conn->conn_mr->lkey;
- pr_debug("Setup sge: addr: %llx length: %d 0x%08x\n",
+ isert_dbg("Setup sge: addr: %llx length: %d 0x%08x\n",
sge.addr, sge.length, sge.lkey);
memset(&rx_wr, 0, sizeof(struct ib_recv_wr));
- rx_wr.wr_id = (unsigned long)isert_conn->login_req_buf;
+ rx_wr.wr_id = (uintptr_t)isert_conn->login_req_buf;
rx_wr.sg_list = &sge;
rx_wr.num_sge = 1;
isert_conn->post_recv_buf_count++;
ret = ib_post_recv(isert_conn->conn_qp, &rx_wr, &rx_wr_fail);
if (ret) {
- pr_err("ib_post_recv() failed: %d\n", ret);
+ isert_err("ib_post_recv() failed: %d\n", ret);
isert_conn->post_recv_buf_count--;
}
- pr_debug("ib_post_recv(): returned success >>>>>>>>>>>>>>>>>>>>>>>>\n");
return ret;
}
@@ -1072,13 +1107,9 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
if (login->login_complete) {
if (!conn->sess->sess_ops->SessionType &&
isert_conn->conn_device->use_fastreg) {
- /* Normal Session and fastreg is used */
- u8 pi_support = login->np->tpg_np->tpg->tpg_attrib.t10_pi;
-
- ret = isert_conn_create_fastreg_pool(isert_conn,
- pi_support);
+ ret = isert_conn_create_fastreg_pool(isert_conn);
if (ret) {
- pr_err("Conn: %p failed to create"
+ isert_err("Conn: %p failed to create"
" fastreg pool\n", isert_conn);
return ret;
}
@@ -1092,7 +1123,10 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
if (ret)
return ret;
- isert_conn->state = ISER_CONN_UP;
+ /* Now we are in FULL_FEATURE phase */
+ mutex_lock(&isert_conn->conn_mutex);
+ isert_conn->state = ISER_CONN_FULL_FEATURE;
+ mutex_unlock(&isert_conn->conn_mutex);
goto post_send;
}
@@ -1109,18 +1143,17 @@ post_send:
}
static void
-isert_rx_login_req(struct iser_rx_desc *rx_desc, int rx_buflen,
- struct isert_conn *isert_conn)
+isert_rx_login_req(struct isert_conn *isert_conn)
{
+ struct iser_rx_desc *rx_desc = (void *)isert_conn->login_req_buf;
+ int rx_buflen = isert_conn->login_req_len;
struct iscsi_conn *conn = isert_conn->conn;
struct iscsi_login *login = conn->conn_login;
int size;
- if (!login) {
- pr_err("conn->conn_login is NULL\n");
- dump_stack();
- return;
- }
+ isert_info("conn %p\n", isert_conn);
+
+ WARN_ON_ONCE(!login);
if (login->first_request) {
struct iscsi_login_req *login_req =
@@ -1146,8 +1179,9 @@ isert_rx_login_req(struct iser_rx_desc *rx_desc, int rx_buflen,
memcpy(&login->req[0], (void *)&rx_desc->iscsi_header, ISCSI_HDR_LEN);
size = min(rx_buflen, MAX_KEY_VALUE_PAIRS);
- pr_debug("Using login payload size: %d, rx_buflen: %d MAX_KEY_VALUE_PAIRS: %d\n",
- size, rx_buflen, MAX_KEY_VALUE_PAIRS);
+ isert_dbg("Using login payload size: %d, rx_buflen: %d "
+ "MAX_KEY_VALUE_PAIRS: %d\n", size, rx_buflen,
+ MAX_KEY_VALUE_PAIRS);
memcpy(login->req_buf, &rx_desc->data[0], size);
if (login->first_request) {
@@ -1166,7 +1200,7 @@ static struct iscsi_cmd
cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
if (!cmd) {
- pr_err("Unable to allocate iscsi_cmd + isert_cmd\n");
+ isert_err("Unable to allocate iscsi_cmd + isert_cmd\n");
return NULL;
}
isert_cmd = iscsit_priv_cmd(cmd);
@@ -1209,8 +1243,8 @@ isert_handle_scsi_cmd(struct isert_conn *isert_conn,
sg = &cmd->se_cmd.t_data_sg[0];
sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE));
- pr_debug("Copying Immediate SG: %p sg_nents: %u from %p imm_data_len: %d\n",
- sg, sg_nents, &rx_desc->data[0], imm_data_len);
+ isert_dbg("Copying Immediate SG: %p sg_nents: %u from %p imm_data_len: %d\n",
+ sg, sg_nents, &rx_desc->data[0], imm_data_len);
sg_copy_from_buffer(sg, sg_nents, &rx_desc->data[0], imm_data_len);
@@ -1254,13 +1288,15 @@ isert_handle_iscsi_dataout(struct isert_conn *isert_conn,
* FIXME: Unexpected unsolicited_data out
*/
if (!cmd->unsolicited_data) {
- pr_err("Received unexpected solicited data payload\n");
+ isert_err("Received unexpected solicited data payload\n");
dump_stack();
return -1;
}
- pr_debug("Unsolicited DataOut unsol_data_len: %u, write_data_done: %u, data_length: %u\n",
- unsol_data_len, cmd->write_data_done, cmd->se_cmd.data_length);
+ isert_dbg("Unsolicited DataOut unsol_data_len: %u, "
+ "write_data_done: %u, data_length: %u\n",
+ unsol_data_len, cmd->write_data_done,
+ cmd->se_cmd.data_length);
sg_off = cmd->write_data_done / PAGE_SIZE;
sg_start = &cmd->se_cmd.t_data_sg[sg_off];
@@ -1270,12 +1306,13 @@ isert_handle_iscsi_dataout(struct isert_conn *isert_conn,
* FIXME: Non page-aligned unsolicited_data out
*/
if (page_off) {
- pr_err("Received unexpected non-page aligned data payload\n");
+ isert_err("unexpected non-page aligned data payload\n");
dump_stack();
return -1;
}
- pr_debug("Copying DataOut: sg_start: %p, sg_off: %u sg_nents: %u from %p %u\n",
- sg_start, sg_off, sg_nents, &rx_desc->data[0], unsol_data_len);
+ isert_dbg("Copying DataOut: sg_start: %p, sg_off: %u "
+ "sg_nents: %u from %p %u\n", sg_start, sg_off,
+ sg_nents, &rx_desc->data[0], unsol_data_len);
sg_copy_from_buffer(sg_start, sg_nents, &rx_desc->data[0],
unsol_data_len);
@@ -1322,8 +1359,8 @@ isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd
text_in = kzalloc(payload_length, GFP_KERNEL);
if (!text_in) {
- pr_err("Unable to allocate text_in of payload_length: %u\n",
- payload_length);
+ isert_err("Unable to allocate text_in of payload_length: %u\n",
+ payload_length);
return -ENOMEM;
}
cmd->text_in_ptr = text_in;
@@ -1348,8 +1385,8 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
if (sess->sess_ops->SessionType &&
(!(opcode & ISCSI_OP_TEXT) || !(opcode & ISCSI_OP_LOGOUT))) {
- pr_err("Got illegal opcode: 0x%02x in SessionType=Discovery,"
- " ignoring\n", opcode);
+ isert_err("Got illegal opcode: 0x%02x in SessionType=Discovery,"
+ " ignoring\n", opcode);
return 0;
}
@@ -1395,10 +1432,6 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
break;
ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr);
- if (ret > 0)
- wait_for_completion_timeout(&conn->conn_logout_comp,
- SECONDS_FOR_LOGOUT_COMP *
- HZ);
break;
case ISCSI_OP_TEXT:
cmd = isert_allocate_cmd(conn);
@@ -1410,7 +1443,7 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
rx_desc, (struct iscsi_text *)hdr);
break;
default:
- pr_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode);
+ isert_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode);
dump_stack();
break;
}
@@ -1431,23 +1464,23 @@ isert_rx_do_work(struct iser_rx_desc *rx_desc, struct isert_conn *isert_conn)
if (iser_hdr->flags & ISER_RSV) {
read_stag = be32_to_cpu(iser_hdr->read_stag);
read_va = be64_to_cpu(iser_hdr->read_va);
- pr_debug("ISER_RSV: read_stag: 0x%08x read_va: 0x%16llx\n",
- read_stag, (unsigned long long)read_va);
+ isert_dbg("ISER_RSV: read_stag: 0x%x read_va: 0x%llx\n",
+ read_stag, (unsigned long long)read_va);
}
if (iser_hdr->flags & ISER_WSV) {
write_stag = be32_to_cpu(iser_hdr->write_stag);
write_va = be64_to_cpu(iser_hdr->write_va);
- pr_debug("ISER_WSV: write__stag: 0x%08x write_va: 0x%16llx\n",
- write_stag, (unsigned long long)write_va);
+ isert_dbg("ISER_WSV: write_stag: 0x%x write_va: 0x%llx\n",
+ write_stag, (unsigned long long)write_va);
}
- pr_debug("ISER ISCSI_CTRL PDU\n");
+ isert_dbg("ISER ISCSI_CTRL PDU\n");
break;
case ISER_HELLO:
- pr_err("iSER Hello message\n");
+ isert_err("iSER Hello message\n");
break;
default:
- pr_warn("Unknown iSER hdr flags: 0x%02x\n", iser_hdr->flags);
+ isert_warn("Unknown iSER hdr flags: 0x%02x\n", iser_hdr->flags);
break;
}
@@ -1457,7 +1490,7 @@ isert_rx_do_work(struct iser_rx_desc *rx_desc, struct isert_conn *isert_conn)
static void
isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn,
- unsigned long xfer_len)
+ u32 xfer_len)
{
struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
struct iscsi_hdr *hdr;
@@ -1467,34 +1500,43 @@ isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn,
if ((char *)desc == isert_conn->login_req_buf) {
rx_dma = isert_conn->login_req_dma;
rx_buflen = ISER_RX_LOGIN_SIZE;
- pr_debug("ISER login_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
+ isert_dbg("login_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
rx_dma, rx_buflen);
} else {
rx_dma = desc->dma_addr;
rx_buflen = ISER_RX_PAYLOAD_SIZE;
- pr_debug("ISER req_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
+ isert_dbg("req_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
rx_dma, rx_buflen);
}
ib_dma_sync_single_for_cpu(ib_dev, rx_dma, rx_buflen, DMA_FROM_DEVICE);
hdr = &desc->iscsi_header;
- pr_debug("iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n",
+ isert_dbg("iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n",
hdr->opcode, hdr->itt, hdr->flags,
(int)(xfer_len - ISER_HEADERS_LEN));
- if ((char *)desc == isert_conn->login_req_buf)
- isert_rx_login_req(desc, xfer_len - ISER_HEADERS_LEN,
- isert_conn);
- else
+ if ((char *)desc == isert_conn->login_req_buf) {
+ isert_conn->login_req_len = xfer_len - ISER_HEADERS_LEN;
+ if (isert_conn->conn) {
+ struct iscsi_login *login = isert_conn->conn->conn_login;
+
+ if (login && !login->first_request)
+ isert_rx_login_req(isert_conn);
+ }
+ mutex_lock(&isert_conn->conn_mutex);
+ complete(&isert_conn->login_req_comp);
+ mutex_unlock(&isert_conn->conn_mutex);
+ } else {
isert_rx_do_work(desc, isert_conn);
+ }
ib_dma_sync_single_for_device(ib_dev, rx_dma, rx_buflen,
DMA_FROM_DEVICE);
isert_conn->post_recv_buf_count--;
- pr_debug("iSERT: Decremented post_recv_buf_count: %d\n",
- isert_conn->post_recv_buf_count);
+ isert_dbg("Decremented post_recv_buf_count: %d\n",
+ isert_conn->post_recv_buf_count);
if ((char *)desc == isert_conn->login_req_buf)
return;
@@ -1505,7 +1547,7 @@ isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn,
ISERT_MIN_POSTED_RX);
err = isert_post_recv(isert_conn, count);
if (err) {
- pr_err("isert_post_recv() count: %d failed, %d\n",
+ isert_err("isert_post_recv() count: %d failed, %d\n",
count, err);
}
}
@@ -1534,12 +1576,12 @@ isert_map_data_buf(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
data->dma_nents = ib_dma_map_sg(ib_dev, data->sg, data->nents,
data->dma_dir);
if (unlikely(!data->dma_nents)) {
- pr_err("Cmd: unable to dma map SGs %p\n", sg);
+ isert_err("Cmd: unable to dma map SGs %p\n", sg);
return -EINVAL;
}
- pr_debug("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
- isert_cmd, data->dma_nents, data->sg, data->nents, data->len);
+ isert_dbg("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
+ isert_cmd, data->dma_nents, data->sg, data->nents, data->len);
return 0;
}
@@ -1560,21 +1602,21 @@ isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
{
struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
- pr_debug("isert_unmap_cmd: %p\n", isert_cmd);
+ isert_dbg("Cmd %p\n", isert_cmd);
if (wr->data.sg) {
- pr_debug("isert_unmap_cmd: %p unmap_sg op\n", isert_cmd);
+ isert_dbg("Cmd %p unmap_sg op\n", isert_cmd);
isert_unmap_data_buf(isert_conn, &wr->data);
}
if (wr->send_wr) {
- pr_debug("isert_unmap_cmd: %p free send_wr\n", isert_cmd);
+ isert_dbg("Cmd %p free send_wr\n", isert_cmd);
kfree(wr->send_wr);
wr->send_wr = NULL;
}
if (wr->ib_sge) {
- pr_debug("isert_unmap_cmd: %p free ib_sge\n", isert_cmd);
+ isert_dbg("Cmd %p free ib_sge\n", isert_cmd);
kfree(wr->ib_sge);
wr->ib_sge = NULL;
}
@@ -1586,11 +1628,10 @@ isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
LIST_HEAD(unmap_list);
- pr_debug("unreg_fastreg_cmd: %p\n", isert_cmd);
+ isert_dbg("Cmd %p\n", isert_cmd);
if (wr->fr_desc) {
- pr_debug("unreg_fastreg_cmd: %p free fr_desc %p\n",
- isert_cmd, wr->fr_desc);
+ isert_dbg("Cmd %p free fr_desc %p\n", isert_cmd, wr->fr_desc);
if (wr->fr_desc->ind & ISERT_PROTECTED) {
isert_unmap_data_buf(isert_conn, &wr->prot);
wr->fr_desc->ind &= ~ISERT_PROTECTED;
@@ -1602,7 +1643,7 @@ isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
}
if (wr->data.sg) {
- pr_debug("unreg_fastreg_cmd: %p unmap_sg op\n", isert_cmd);
+ isert_dbg("Cmd %p unmap_sg op\n", isert_cmd);
isert_unmap_data_buf(isert_conn, &wr->data);
}
@@ -1618,7 +1659,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
struct iscsi_conn *conn = isert_conn->conn;
struct isert_device *device = isert_conn->conn_device;
- pr_debug("Entering isert_put_cmd: %p\n", isert_cmd);
+ isert_dbg("Cmd %p\n", isert_cmd);
switch (cmd->iscsi_opcode) {
case ISCSI_OP_SCSI_CMD:
@@ -1668,7 +1709,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
* associated cmd->se_cmd needs to be released.
*/
if (cmd->se_cmd.se_tfo != NULL) {
- pr_debug("Calling transport_generic_free_cmd from"
+ isert_dbg("Calling transport_generic_free_cmd from"
" isert_put_cmd for 0x%02x\n",
cmd->iscsi_opcode);
transport_generic_free_cmd(&cmd->se_cmd, 0);
@@ -1687,7 +1728,7 @@ static void
isert_unmap_tx_desc(struct iser_tx_desc *tx_desc, struct ib_device *ib_dev)
{
if (tx_desc->dma_addr != 0) {
- pr_debug("Calling ib_dma_unmap_single for tx_desc->dma_addr\n");
+ isert_dbg("unmap single for tx_desc->dma_addr\n");
ib_dma_unmap_single(ib_dev, tx_desc->dma_addr,
ISER_HEADERS_LEN, DMA_TO_DEVICE);
tx_desc->dma_addr = 0;
@@ -1699,7 +1740,7 @@ isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd,
struct ib_device *ib_dev, bool comp_err)
{
if (isert_cmd->pdu_buf_dma != 0) {
- pr_debug("Calling ib_dma_unmap_single for isert_cmd->pdu_buf_dma\n");
+ isert_dbg("unmap single for isert_cmd->pdu_buf_dma\n");
ib_dma_unmap_single(ib_dev, isert_cmd->pdu_buf_dma,
isert_cmd->pdu_buf_len, DMA_TO_DEVICE);
isert_cmd->pdu_buf_dma = 0;
@@ -1717,7 +1758,7 @@ isert_check_pi_status(struct se_cmd *se_cmd, struct ib_mr *sig_mr)
ret = ib_check_mr_status(sig_mr, IB_MR_CHECK_SIG_STATUS, &mr_status);
if (ret) {
- pr_err("ib_check_mr_status failed, ret %d\n", ret);
+ isert_err("ib_check_mr_status failed, ret %d\n", ret);
goto fail_mr_status;
}
@@ -1740,12 +1781,12 @@ isert_check_pi_status(struct se_cmd *se_cmd, struct ib_mr *sig_mr)
do_div(sec_offset_err, block_size);
se_cmd->bad_sector = sec_offset_err + se_cmd->t_task_lba;
- pr_err("isert: PI error found type %d at sector 0x%llx "
- "expected 0x%x vs actual 0x%x\n",
- mr_status.sig_err.err_type,
- (unsigned long long)se_cmd->bad_sector,
- mr_status.sig_err.expected,
- mr_status.sig_err.actual);
+ isert_err("PI error found type %d at sector 0x%llx "
+ "expected 0x%x vs actual 0x%x\n",
+ mr_status.sig_err.err_type,
+ (unsigned long long)se_cmd->bad_sector,
+ mr_status.sig_err.expected,
+ mr_status.sig_err.actual);
ret = 1;
}
@@ -1801,7 +1842,7 @@ isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
cmd->write_data_done = wr->data.len;
wr->send_wr_num = 0;
- pr_debug("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd);
+ isert_dbg("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd);
spin_lock_bh(&cmd->istate_lock);
cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
@@ -1823,36 +1864,22 @@ isert_do_control_comp(struct work_struct *work)
struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
+ isert_dbg("Cmd %p i_state %d\n", isert_cmd, cmd->i_state);
+
switch (cmd->i_state) {
case ISTATE_SEND_TASKMGTRSP:
- pr_debug("Calling iscsit_tmr_post_handler >>>>>>>>>>>>>>>>>\n");
-
- atomic_dec(&isert_conn->post_send_buf_count);
iscsit_tmr_post_handler(cmd, cmd->conn);
-
- cmd->i_state = ISTATE_SENT_STATUS;
- isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
- break;
- case ISTATE_SEND_REJECT:
- pr_debug("Got isert_do_control_comp ISTATE_SEND_REJECT: >>>\n");
- atomic_dec(&isert_conn->post_send_buf_count);
-
+ case ISTATE_SEND_REJECT: /* FALLTHRU */
+ case ISTATE_SEND_TEXTRSP: /* FALLTHRU */
cmd->i_state = ISTATE_SENT_STATUS;
- isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
+ isert_completion_put(&isert_cmd->tx_desc, isert_cmd,
+ ib_dev, false);
break;
case ISTATE_SEND_LOGOUTRSP:
- pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n");
-
- atomic_dec(&isert_conn->post_send_buf_count);
iscsit_logout_post_handler(cmd, cmd->conn);
break;
- case ISTATE_SEND_TEXTRSP:
- atomic_dec(&isert_conn->post_send_buf_count);
- cmd->i_state = ISTATE_SENT_STATUS;
- isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
- break;
default:
- pr_err("Unknown do_control_comp i_state %d\n", cmd->i_state);
+ isert_err("Unknown i_state %d\n", cmd->i_state);
dump_stack();
break;
}
@@ -1865,7 +1892,6 @@ isert_response_completion(struct iser_tx_desc *tx_desc,
struct ib_device *ib_dev)
{
struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
- struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
if (cmd->i_state == ISTATE_SEND_TASKMGTRSP ||
cmd->i_state == ISTATE_SEND_LOGOUTRSP ||
@@ -1878,267 +1904,151 @@ isert_response_completion(struct iser_tx_desc *tx_desc,
return;
}
- /**
- * If send_wr_num is 0 this means that we got
- * RDMA completion and we cleared it and we should
- * simply decrement the response post. else the
- * response is incorporated in send_wr_num, just
- * sub it.
- **/
- if (wr->send_wr_num)
- atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
- else
- atomic_dec(&isert_conn->post_send_buf_count);
-
cmd->i_state = ISTATE_SENT_STATUS;
isert_completion_put(tx_desc, isert_cmd, ib_dev, false);
}
static void
-__isert_send_completion(struct iser_tx_desc *tx_desc,
- struct isert_conn *isert_conn)
+isert_send_completion(struct iser_tx_desc *tx_desc,
+ struct isert_conn *isert_conn)
{
struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
struct isert_rdma_wr *wr;
if (!isert_cmd) {
- atomic_dec(&isert_conn->post_send_buf_count);
isert_unmap_tx_desc(tx_desc, ib_dev);
return;
}
wr = &isert_cmd->rdma_wr;
+ isert_dbg("Cmd %p iser_ib_op %d\n", isert_cmd, wr->iser_ib_op);
+
switch (wr->iser_ib_op) {
case ISER_IB_RECV:
- pr_err("isert_send_completion: Got ISER_IB_RECV\n");
+ isert_err("Got ISER_IB_RECV\n");
dump_stack();
break;
case ISER_IB_SEND:
- pr_debug("isert_send_completion: Got ISER_IB_SEND\n");
isert_response_completion(tx_desc, isert_cmd,
isert_conn, ib_dev);
break;
case ISER_IB_RDMA_WRITE:
- pr_debug("isert_send_completion: Got ISER_IB_RDMA_WRITE\n");
- atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
isert_completion_rdma_write(tx_desc, isert_cmd);
break;
case ISER_IB_RDMA_READ:
- pr_debug("isert_send_completion: Got ISER_IB_RDMA_READ:\n");
-
- atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
isert_completion_rdma_read(tx_desc, isert_cmd);
break;
default:
- pr_err("Unknown wr->iser_ib_op: 0x%02x\n", wr->iser_ib_op);
+ isert_err("Unknown wr->iser_ib_op: 0x%x\n", wr->iser_ib_op);
dump_stack();
break;
}
}
-static void
-isert_send_completion(struct iser_tx_desc *tx_desc,
- struct isert_conn *isert_conn)
-{
- struct llist_node *llnode = tx_desc->comp_llnode_batch;
- struct iser_tx_desc *t;
- /*
- * Drain coalesced completion llist starting from comp_llnode_batch
- * setup in isert_init_send_wr(), and then complete trailing tx_desc.
- */
- while (llnode) {
- t = llist_entry(llnode, struct iser_tx_desc, comp_llnode);
- llnode = llist_next(llnode);
- __isert_send_completion(t, isert_conn);
- }
- __isert_send_completion(tx_desc, isert_conn);
-}
-
-static void
-isert_cq_drain_comp_llist(struct isert_conn *isert_conn, struct ib_device *ib_dev)
+/**
+ * is_isert_tx_desc() - Indicate if the completion wr_id
+ * is a TX descriptor or not.
+ * @isert_conn: iser connection
+ * @wr_id: completion WR identifier
+ *
+ * Since we cannot rely on wc opcode in FLUSH errors
+ * we must work around it by checking if the wr_id address
+ * falls in the iser connection rx_descs buffer. If so
+ * it is an RX descriptor, otherwize it is a TX.
+ */
+static inline bool
+is_isert_tx_desc(struct isert_conn *isert_conn, void *wr_id)
{
- struct llist_node *llnode;
- struct isert_rdma_wr *wr;
- struct iser_tx_desc *t;
+ void *start = isert_conn->conn_rx_descs;
+ int len = ISERT_QP_MAX_RECV_DTOS * sizeof(*isert_conn->conn_rx_descs);
- mutex_lock(&isert_conn->conn_mutex);
- llnode = llist_del_all(&isert_conn->conn_comp_llist);
- isert_conn->conn_comp_batch = 0;
- mutex_unlock(&isert_conn->conn_mutex);
-
- while (llnode) {
- t = llist_entry(llnode, struct iser_tx_desc, comp_llnode);
- llnode = llist_next(llnode);
- wr = &t->isert_cmd->rdma_wr;
-
- /**
- * If send_wr_num is 0 this means that we got
- * RDMA completion and we cleared it and we should
- * simply decrement the response post. else the
- * response is incorporated in send_wr_num, just
- * sub it.
- **/
- if (wr->send_wr_num)
- atomic_sub(wr->send_wr_num,
- &isert_conn->post_send_buf_count);
- else
- atomic_dec(&isert_conn->post_send_buf_count);
+ if (wr_id >= start && wr_id < start + len)
+ return false;
- isert_completion_put(t, t->isert_cmd, ib_dev, true);
- }
+ return true;
}
static void
-isert_cq_tx_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn)
+isert_cq_comp_err(struct isert_conn *isert_conn, struct ib_wc *wc)
{
- struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
- struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
- struct llist_node *llnode = tx_desc->comp_llnode_batch;
- struct isert_rdma_wr *wr;
- struct iser_tx_desc *t;
-
- while (llnode) {
- t = llist_entry(llnode, struct iser_tx_desc, comp_llnode);
- llnode = llist_next(llnode);
- wr = &t->isert_cmd->rdma_wr;
+ if (wc->wr_id == ISER_BEACON_WRID) {
+ isert_info("conn %p completing conn_wait_comp_err\n",
+ isert_conn);
+ complete(&isert_conn->conn_wait_comp_err);
+ } else if (is_isert_tx_desc(isert_conn, (void *)(uintptr_t)wc->wr_id)) {
+ struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+ struct isert_cmd *isert_cmd;
+ struct iser_tx_desc *desc;
- /**
- * If send_wr_num is 0 this means that we got
- * RDMA completion and we cleared it and we should
- * simply decrement the response post. else the
- * response is incorporated in send_wr_num, just
- * sub it.
- **/
- if (wr->send_wr_num)
- atomic_sub(wr->send_wr_num,
- &isert_conn->post_send_buf_count);
+ desc = (struct iser_tx_desc *)(uintptr_t)wc->wr_id;
+ isert_cmd = desc->isert_cmd;
+ if (!isert_cmd)
+ isert_unmap_tx_desc(desc, ib_dev);
else
- atomic_dec(&isert_conn->post_send_buf_count);
-
- isert_completion_put(t, t->isert_cmd, ib_dev, true);
- }
- tx_desc->comp_llnode_batch = NULL;
-
- if (!isert_cmd)
- isert_unmap_tx_desc(tx_desc, ib_dev);
- else
- isert_completion_put(tx_desc, isert_cmd, ib_dev, true);
-}
-
-static void
-isert_cq_rx_comp_err(struct isert_conn *isert_conn)
-{
- struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
- struct iscsi_conn *conn = isert_conn->conn;
-
- if (isert_conn->post_recv_buf_count)
- return;
-
- isert_cq_drain_comp_llist(isert_conn, ib_dev);
-
- if (conn->sess) {
- target_sess_cmd_list_set_waiting(conn->sess->se_sess);
- target_wait_for_sess_cmds(conn->sess->se_sess);
+ isert_completion_put(desc, isert_cmd, ib_dev, true);
+ } else {
+ isert_conn->post_recv_buf_count--;
+ if (!isert_conn->post_recv_buf_count)
+ iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
}
-
- while (atomic_read(&isert_conn->post_send_buf_count))
- msleep(3000);
-
- mutex_lock(&isert_conn->conn_mutex);
- isert_conn->state = ISER_CONN_DOWN;
- mutex_unlock(&isert_conn->conn_mutex);
-
- iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
-
- complete(&isert_conn->conn_wait_comp_err);
}
static void
-isert_cq_tx_work(struct work_struct *work)
+isert_handle_wc(struct ib_wc *wc)
{
- struct isert_cq_desc *cq_desc = container_of(work,
- struct isert_cq_desc, cq_tx_work);
- struct isert_device *device = cq_desc->device;
- int cq_index = cq_desc->cq_index;
- struct ib_cq *tx_cq = device->dev_tx_cq[cq_index];
struct isert_conn *isert_conn;
struct iser_tx_desc *tx_desc;
- struct ib_wc wc;
-
- while (ib_poll_cq(tx_cq, 1, &wc) == 1) {
- tx_desc = (struct iser_tx_desc *)(unsigned long)wc.wr_id;
- isert_conn = wc.qp->qp_context;
+ struct iser_rx_desc *rx_desc;
- if (wc.status == IB_WC_SUCCESS) {
- isert_send_completion(tx_desc, isert_conn);
+ isert_conn = wc->qp->qp_context;
+ if (likely(wc->status == IB_WC_SUCCESS)) {
+ if (wc->opcode == IB_WC_RECV) {
+ rx_desc = (struct iser_rx_desc *)(uintptr_t)wc->wr_id;
+ isert_rx_completion(rx_desc, isert_conn, wc->byte_len);
} else {
- pr_debug("TX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n");
- pr_debug("TX wc.status: 0x%08x\n", wc.status);
- pr_debug("TX wc.vendor_err: 0x%08x\n", wc.vendor_err);
-
- if (wc.wr_id != ISER_FASTREG_LI_WRID) {
- if (tx_desc->llnode_active)
- continue;
-
- atomic_dec(&isert_conn->post_send_buf_count);
- isert_cq_tx_comp_err(tx_desc, isert_conn);
- }
+ tx_desc = (struct iser_tx_desc *)(uintptr_t)wc->wr_id;
+ isert_send_completion(tx_desc, isert_conn);
}
- }
-
- ib_req_notify_cq(tx_cq, IB_CQ_NEXT_COMP);
-}
-
-static void
-isert_cq_tx_callback(struct ib_cq *cq, void *context)
-{
- struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context;
+ } else {
+ if (wc->status != IB_WC_WR_FLUSH_ERR)
+ isert_err("wr id %llx status %d vend_err %x\n",
+ wc->wr_id, wc->status, wc->vendor_err);
+ else
+ isert_dbg("flush error: wr id %llx\n", wc->wr_id);
- queue_work(isert_comp_wq, &cq_desc->cq_tx_work);
+ if (wc->wr_id != ISER_FASTREG_LI_WRID)
+ isert_cq_comp_err(isert_conn, wc);
+ }
}
static void
-isert_cq_rx_work(struct work_struct *work)
+isert_cq_work(struct work_struct *work)
{
- struct isert_cq_desc *cq_desc = container_of(work,
- struct isert_cq_desc, cq_rx_work);
- struct isert_device *device = cq_desc->device;
- int cq_index = cq_desc->cq_index;
- struct ib_cq *rx_cq = device->dev_rx_cq[cq_index];
- struct isert_conn *isert_conn;
- struct iser_rx_desc *rx_desc;
- struct ib_wc wc;
- unsigned long xfer_len;
+ enum { isert_poll_budget = 65536 };
+ struct isert_comp *comp = container_of(work, struct isert_comp,
+ work);
+ struct ib_wc *const wcs = comp->wcs;
+ int i, n, completed = 0;
- while (ib_poll_cq(rx_cq, 1, &wc) == 1) {
- rx_desc = (struct iser_rx_desc *)(unsigned long)wc.wr_id;
- isert_conn = wc.qp->qp_context;
+ while ((n = ib_poll_cq(comp->cq, ARRAY_SIZE(comp->wcs), wcs)) > 0) {
+ for (i = 0; i < n; i++)
+ isert_handle_wc(&wcs[i]);
- if (wc.status == IB_WC_SUCCESS) {
- xfer_len = (unsigned long)wc.byte_len;
- isert_rx_completion(rx_desc, isert_conn, xfer_len);
- } else {
- pr_debug("RX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n");
- if (wc.status != IB_WC_WR_FLUSH_ERR) {
- pr_debug("RX wc.status: 0x%08x\n", wc.status);
- pr_debug("RX wc.vendor_err: 0x%08x\n",
- wc.vendor_err);
- }
- isert_conn->post_recv_buf_count--;
- isert_cq_rx_comp_err(isert_conn);
- }
+ completed += n;
+ if (completed >= isert_poll_budget)
+ break;
}
- ib_req_notify_cq(rx_cq, IB_CQ_NEXT_COMP);
+ ib_req_notify_cq(comp->cq, IB_CQ_NEXT_COMP);
}
static void
-isert_cq_rx_callback(struct ib_cq *cq, void *context)
+isert_cq_callback(struct ib_cq *cq, void *context)
{
- struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context;
+ struct isert_comp *comp = context;
- queue_work(isert_rx_wq, &cq_desc->cq_rx_work);
+ queue_work(isert_comp_wq, &comp->work);
}
static int
@@ -2147,13 +2057,10 @@ isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd)
struct ib_send_wr *wr_failed;
int ret;
- atomic_inc(&isert_conn->post_send_buf_count);
-
ret = ib_post_send(isert_conn->conn_qp, &isert_cmd->tx_desc.send_wr,
&wr_failed);
if (ret) {
- pr_err("ib_post_send failed with %d\n", ret);
- atomic_dec(&isert_conn->post_send_buf_count);
+ isert_err("ib_post_send failed with %d\n", ret);
return ret;
}
return ret;
@@ -2200,9 +2107,9 @@ isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
isert_cmd->tx_desc.num_sge = 2;
}
- isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
+ isert_init_send_wr(isert_conn, isert_cmd, send_wr);
- pr_debug("Posting SCSI Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
+ isert_dbg("Posting SCSI Response\n");
return isert_post_response(isert_conn, isert_cmd);
}
@@ -2231,8 +2138,16 @@ isert_get_sup_prot_ops(struct iscsi_conn *conn)
struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
struct isert_device *device = isert_conn->conn_device;
- if (device->pi_capable)
- return TARGET_PROT_ALL;
+ if (conn->tpg->tpg_attrib.t10_pi) {
+ if (device->pi_capable) {
+ isert_info("conn %p PI offload enabled\n", isert_conn);
+ isert_conn->pi_support = true;
+ return TARGET_PROT_ALL;
+ }
+ }
+
+ isert_info("conn %p PI offload disabled\n", isert_conn);
+ isert_conn->pi_support = false;
return TARGET_PROT_NORMAL;
}
@@ -2250,9 +2165,9 @@ isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
&isert_cmd->tx_desc.iscsi_header,
nopout_response);
isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
- isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
+ isert_init_send_wr(isert_conn, isert_cmd, send_wr);
- pr_debug("Posting NOPIN Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
+ isert_dbg("conn %p Posting NOPIN Response\n", isert_conn);
return isert_post_response(isert_conn, isert_cmd);
}
@@ -2268,9 +2183,9 @@ isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
iscsit_build_logout_rsp(cmd, conn, (struct iscsi_logout_rsp *)
&isert_cmd->tx_desc.iscsi_header);
isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
- isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
+ isert_init_send_wr(isert_conn, isert_cmd, send_wr);
- pr_debug("Posting Logout Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
+ isert_dbg("conn %p Posting Logout Response\n", isert_conn);
return isert_post_response(isert_conn, isert_cmd);
}
@@ -2286,9 +2201,9 @@ isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
iscsit_build_task_mgt_rsp(cmd, conn, (struct iscsi_tm_rsp *)
&isert_cmd->tx_desc.iscsi_header);
isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
- isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
+ isert_init_send_wr(isert_conn, isert_cmd, send_wr);
- pr_debug("Posting Task Management Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
+ isert_dbg("conn %p Posting Task Management Response\n", isert_conn);
return isert_post_response(isert_conn, isert_cmd);
}
@@ -2318,9 +2233,9 @@ isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
tx_dsg->lkey = isert_conn->conn_mr->lkey;
isert_cmd->tx_desc.num_sge = 2;
- isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
+ isert_init_send_wr(isert_conn, isert_cmd, send_wr);
- pr_debug("Posting Reject IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
+ isert_dbg("conn %p Posting Reject\n", isert_conn);
return isert_post_response(isert_conn, isert_cmd);
}
@@ -2358,9 +2273,9 @@ isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
tx_dsg->lkey = isert_conn->conn_mr->lkey;
isert_cmd->tx_desc.num_sge = 2;
}
- isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
+ isert_init_send_wr(isert_conn, isert_cmd, send_wr);
- pr_debug("Posting Text Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
+ isert_dbg("conn %p Text Reject\n", isert_conn);
return isert_post_response(isert_conn, isert_cmd);
}
@@ -2383,30 +2298,31 @@ isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
send_wr->sg_list = ib_sge;
send_wr->num_sge = sg_nents;
- send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc;
+ send_wr->wr_id = (uintptr_t)&isert_cmd->tx_desc;
/*
* Perform mapping of TCM scatterlist memory ib_sge dma_addr.
*/
for_each_sg(sg_start, tmp_sg, sg_nents, i) {
- pr_debug("ISER RDMA from SGL dma_addr: 0x%16llx dma_len: %u, page_off: %u\n",
- (unsigned long long)tmp_sg->dma_address,
- tmp_sg->length, page_off);
+ isert_dbg("RDMA from SGL dma_addr: 0x%llx dma_len: %u, "
+ "page_off: %u\n",
+ (unsigned long long)tmp_sg->dma_address,
+ tmp_sg->length, page_off);
ib_sge->addr = ib_sg_dma_address(ib_dev, tmp_sg) + page_off;
ib_sge->length = min_t(u32, data_left,
ib_sg_dma_len(ib_dev, tmp_sg) - page_off);
ib_sge->lkey = isert_conn->conn_mr->lkey;
- pr_debug("RDMA ib_sge: addr: 0x%16llx length: %u lkey: %08x\n",
- ib_sge->addr, ib_sge->length, ib_sge->lkey);
+ isert_dbg("RDMA ib_sge: addr: 0x%llx length: %u lkey: %x\n",
+ ib_sge->addr, ib_sge->length, ib_sge->lkey);
page_off = 0;
data_left -= ib_sge->length;
ib_sge++;
- pr_debug("Incrementing ib_sge pointer to %p\n", ib_sge);
+ isert_dbg("Incrementing ib_sge pointer to %p\n", ib_sge);
}
- pr_debug("Set outgoing sg_list: %p num_sg: %u from TCM SGLs\n",
- send_wr->sg_list, send_wr->num_sge);
+ isert_dbg("Set outgoing sg_list: %p num_sg: %u from TCM SGLs\n",
+ send_wr->sg_list, send_wr->num_sge);
return sg_nents;
}
@@ -2438,7 +2354,7 @@ isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
ib_sge = kzalloc(sizeof(struct ib_sge) * data->nents, GFP_KERNEL);
if (!ib_sge) {
- pr_warn("Unable to allocate ib_sge\n");
+ isert_warn("Unable to allocate ib_sge\n");
ret = -ENOMEM;
goto unmap_cmd;
}
@@ -2448,7 +2364,7 @@ isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num,
GFP_KERNEL);
if (!wr->send_wr) {
- pr_debug("Unable to allocate wr->send_wr\n");
+ isert_dbg("Unable to allocate wr->send_wr\n");
ret = -ENOMEM;
goto unmap_cmd;
}
@@ -2512,9 +2428,9 @@ isert_map_fr_pagelist(struct ib_device *ib_dev,
chunk_start = start_addr;
end_addr = start_addr + ib_sg_dma_len(ib_dev, tmp_sg);
- pr_debug("SGL[%d] dma_addr: 0x%16llx len: %u\n",
- i, (unsigned long long)tmp_sg->dma_address,
- tmp_sg->length);
+ isert_dbg("SGL[%d] dma_addr: 0x%llx len: %u\n",
+ i, (unsigned long long)tmp_sg->dma_address,
+ tmp_sg->length);
if ((end_addr & ~PAGE_MASK) && i < last_ent) {
new_chunk = 0;
@@ -2525,8 +2441,8 @@ isert_map_fr_pagelist(struct ib_device *ib_dev,
page = chunk_start & PAGE_MASK;
do {
fr_pl[n_pages++] = page;
- pr_debug("Mapped page_list[%d] page_addr: 0x%16llx\n",
- n_pages - 1, page);
+ isert_dbg("Mapped page_list[%d] page_addr: 0x%llx\n",
+ n_pages - 1, page);
page += PAGE_SIZE;
} while (page < end_addr);
}
@@ -2534,6 +2450,21 @@ isert_map_fr_pagelist(struct ib_device *ib_dev,
return n_pages;
}
+static inline void
+isert_inv_rkey(struct ib_send_wr *inv_wr, struct ib_mr *mr)
+{
+ u32 rkey;
+
+ memset(inv_wr, 0, sizeof(*inv_wr));
+ inv_wr->wr_id = ISER_FASTREG_LI_WRID;
+ inv_wr->opcode = IB_WR_LOCAL_INV;
+ inv_wr->ex.invalidate_rkey = mr->rkey;
+
+ /* Bump the key */
+ rkey = ib_inc_rkey(mr->rkey);
+ ib_update_fast_reg_key(mr, rkey);
+}
+
static int
isert_fast_reg_mr(struct isert_conn *isert_conn,
struct fast_reg_descriptor *fr_desc,
@@ -2548,15 +2479,13 @@ isert_fast_reg_mr(struct isert_conn *isert_conn,
struct ib_send_wr *bad_wr, *wr = NULL;
int ret, pagelist_len;
u32 page_off;
- u8 key;
if (mem->dma_nents == 1) {
sge->lkey = isert_conn->conn_mr->lkey;
sge->addr = ib_sg_dma_address(ib_dev, &mem->sg[0]);
sge->length = ib_sg_dma_len(ib_dev, &mem->sg[0]);
- pr_debug("%s:%d sge: addr: 0x%llx length: %u lkey: %x\n",
- __func__, __LINE__, sge->addr, sge->length,
- sge->lkey);
+ isert_dbg("sge: addr: 0x%llx length: %u lkey: %x\n",
+ sge->addr, sge->length, sge->lkey);
return 0;
}
@@ -2572,21 +2501,15 @@ isert_fast_reg_mr(struct isert_conn *isert_conn,
page_off = mem->offset % PAGE_SIZE;
- pr_debug("Use fr_desc %p sg_nents %d offset %u\n",
- fr_desc, mem->nents, mem->offset);
+ isert_dbg("Use fr_desc %p sg_nents %d offset %u\n",
+ fr_desc, mem->nents, mem->offset);
pagelist_len = isert_map_fr_pagelist(ib_dev, mem->sg, mem->nents,
&frpl->page_list[0]);
- if (!(fr_desc->ind & ISERT_DATA_KEY_VALID)) {
- memset(&inv_wr, 0, sizeof(inv_wr));
- inv_wr.wr_id = ISER_FASTREG_LI_WRID;
- inv_wr.opcode = IB_WR_LOCAL_INV;
- inv_wr.ex.invalidate_rkey = mr->rkey;
+ if (!(fr_desc->ind & ind)) {
+ isert_inv_rkey(&inv_wr, mr);
wr = &inv_wr;
- /* Bump the key */
- key = (u8)(mr->rkey & 0x000000FF);
- ib_update_fast_reg_key(mr, ++key);
}
/* Prepare FASTREG WR */
@@ -2608,7 +2531,7 @@ isert_fast_reg_mr(struct isert_conn *isert_conn,
ret = ib_post_send(isert_conn->conn_qp, wr, &bad_wr);
if (ret) {
- pr_err("fast registration failed, ret:%d\n", ret);
+ isert_err("fast registration failed, ret:%d\n", ret);
return ret;
}
fr_desc->ind &= ~ind;
@@ -2617,9 +2540,8 @@ isert_fast_reg_mr(struct isert_conn *isert_conn,
sge->addr = frpl->page_list[0] + page_off;
sge->length = mem->len;
- pr_debug("%s:%d sge: addr: 0x%llx length: %u lkey: %x\n",
- __func__, __LINE__, sge->addr, sge->length,
- sge->lkey);
+ isert_dbg("sge: addr: 0x%llx length: %u lkey: %x\n",
+ sge->addr, sge->length, sge->lkey);
return ret;
}
@@ -2665,7 +2587,7 @@ isert_set_sig_attrs(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs)
isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->mem);
break;
default:
- pr_err("Unsupported PI operation %d\n", se_cmd->prot_op);
+ isert_err("Unsupported PI operation %d\n", se_cmd->prot_op);
return -EINVAL;
}
@@ -2681,17 +2603,16 @@ isert_set_prot_checks(u8 prot_checks)
}
static int
-isert_reg_sig_mr(struct isert_conn *isert_conn, struct se_cmd *se_cmd,
- struct fast_reg_descriptor *fr_desc,
- struct ib_sge *data_sge, struct ib_sge *prot_sge,
- struct ib_sge *sig_sge)
+isert_reg_sig_mr(struct isert_conn *isert_conn,
+ struct se_cmd *se_cmd,
+ struct isert_rdma_wr *rdma_wr,
+ struct fast_reg_descriptor *fr_desc)
{
struct ib_send_wr sig_wr, inv_wr;
struct ib_send_wr *bad_wr, *wr = NULL;
struct pi_context *pi_ctx = fr_desc->pi_ctx;
struct ib_sig_attrs sig_attrs;
int ret;
- u32 key;
memset(&sig_attrs, 0, sizeof(sig_attrs));
ret = isert_set_sig_attrs(se_cmd, &sig_attrs);
@@ -2701,26 +2622,20 @@ isert_reg_sig_mr(struct isert_conn *isert_conn, struct se_cmd *se_cmd,
sig_attrs.check_mask = isert_set_prot_checks(se_cmd->prot_checks);
if (!(fr_desc->ind & ISERT_SIG_KEY_VALID)) {
- memset(&inv_wr, 0, sizeof(inv_wr));
- inv_wr.opcode = IB_WR_LOCAL_INV;
- inv_wr.wr_id = ISER_FASTREG_LI_WRID;
- inv_wr.ex.invalidate_rkey = pi_ctx->sig_mr->rkey;
+ isert_inv_rkey(&inv_wr, pi_ctx->sig_mr);
wr = &inv_wr;
- /* Bump the key */
- key = (u8)(pi_ctx->sig_mr->rkey & 0x000000FF);
- ib_update_fast_reg_key(pi_ctx->sig_mr, ++key);
}
memset(&sig_wr, 0, sizeof(sig_wr));
sig_wr.opcode = IB_WR_REG_SIG_MR;
sig_wr.wr_id = ISER_FASTREG_LI_WRID;
- sig_wr.sg_list = data_sge;
+ sig_wr.sg_list = &rdma_wr->ib_sg[DATA];
sig_wr.num_sge = 1;
sig_wr.wr.sig_handover.access_flags = IB_ACCESS_LOCAL_WRITE;
sig_wr.wr.sig_handover.sig_attrs = &sig_attrs;
sig_wr.wr.sig_handover.sig_mr = pi_ctx->sig_mr;
if (se_cmd->t_prot_sg)
- sig_wr.wr.sig_handover.prot = prot_sge;
+ sig_wr.wr.sig_handover.prot = &rdma_wr->ib_sg[PROT];
if (!wr)
wr = &sig_wr;
@@ -2729,39 +2644,98 @@ isert_reg_sig_mr(struct isert_conn *isert_conn, struct se_cmd *se_cmd,
ret = ib_post_send(isert_conn->conn_qp, wr, &bad_wr);
if (ret) {
- pr_err("fast registration failed, ret:%d\n", ret);
+ isert_err("fast registration failed, ret:%d\n", ret);
goto err;
}
fr_desc->ind &= ~ISERT_SIG_KEY_VALID;
- sig_sge->lkey = pi_ctx->sig_mr->lkey;
- sig_sge->addr = 0;
- sig_sge->length = se_cmd->data_length;
+ rdma_wr->ib_sg[SIG].lkey = pi_ctx->sig_mr->lkey;
+ rdma_wr->ib_sg[SIG].addr = 0;
+ rdma_wr->ib_sg[SIG].length = se_cmd->data_length;
if (se_cmd->prot_op != TARGET_PROT_DIN_STRIP &&
se_cmd->prot_op != TARGET_PROT_DOUT_INSERT)
/*
* We have protection guards on the wire
* so we need to set a larget transfer
*/
- sig_sge->length += se_cmd->prot_length;
+ rdma_wr->ib_sg[SIG].length += se_cmd->prot_length;
- pr_debug("sig_sge: addr: 0x%llx length: %u lkey: %x\n",
- sig_sge->addr, sig_sge->length,
- sig_sge->lkey);
+ isert_dbg("sig_sge: addr: 0x%llx length: %u lkey: %x\n",
+ rdma_wr->ib_sg[SIG].addr, rdma_wr->ib_sg[SIG].length,
+ rdma_wr->ib_sg[SIG].lkey);
err:
return ret;
}
static int
+isert_handle_prot_cmd(struct isert_conn *isert_conn,
+ struct isert_cmd *isert_cmd,
+ struct isert_rdma_wr *wr)
+{
+ struct isert_device *device = isert_conn->conn_device;
+ struct se_cmd *se_cmd = &isert_cmd->iscsi_cmd->se_cmd;
+ int ret;
+
+ if (!wr->fr_desc->pi_ctx) {
+ ret = isert_create_pi_ctx(wr->fr_desc,
+ device->ib_device,
+ isert_conn->conn_pd);
+ if (ret) {
+ isert_err("conn %p failed to allocate pi_ctx\n",
+ isert_conn);
+ return ret;
+ }
+ }
+
+ if (se_cmd->t_prot_sg) {
+ ret = isert_map_data_buf(isert_conn, isert_cmd,
+ se_cmd->t_prot_sg,
+ se_cmd->t_prot_nents,
+ se_cmd->prot_length,
+ 0, wr->iser_ib_op, &wr->prot);
+ if (ret) {
+ isert_err("conn %p failed to map protection buffer\n",
+ isert_conn);
+ return ret;
+ }
+
+ memset(&wr->ib_sg[PROT], 0, sizeof(wr->ib_sg[PROT]));
+ ret = isert_fast_reg_mr(isert_conn, wr->fr_desc, &wr->prot,
+ ISERT_PROT_KEY_VALID, &wr->ib_sg[PROT]);
+ if (ret) {
+ isert_err("conn %p failed to fast reg mr\n",
+ isert_conn);
+ goto unmap_prot_cmd;
+ }
+ }
+
+ ret = isert_reg_sig_mr(isert_conn, se_cmd, wr, wr->fr_desc);
+ if (ret) {
+ isert_err("conn %p failed to fast reg mr\n",
+ isert_conn);
+ goto unmap_prot_cmd;
+ }
+ wr->fr_desc->ind |= ISERT_PROTECTED;
+
+ return 0;
+
+unmap_prot_cmd:
+ if (se_cmd->t_prot_sg)
+ isert_unmap_data_buf(isert_conn, &wr->prot);
+
+ return ret;
+}
+
+static int
isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
struct isert_rdma_wr *wr)
{
struct se_cmd *se_cmd = &cmd->se_cmd;
struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
struct isert_conn *isert_conn = conn->context;
- struct ib_sge data_sge;
- struct ib_send_wr *send_wr;
struct fast_reg_descriptor *fr_desc = NULL;
+ struct ib_send_wr *send_wr;
+ struct ib_sge *ib_sg;
u32 offset;
int ret = 0;
unsigned long flags;
@@ -2775,8 +2749,7 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
if (ret)
return ret;
- if (wr->data.dma_nents != 1 ||
- se_cmd->prot_op != TARGET_PROT_NORMAL) {
+ if (wr->data.dma_nents != 1 || isert_prot_cmd(isert_conn, se_cmd)) {
spin_lock_irqsave(&isert_conn->conn_lock, flags);
fr_desc = list_first_entry(&isert_conn->conn_fr_pool,
struct fast_reg_descriptor, list);
@@ -2786,38 +2759,21 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
}
ret = isert_fast_reg_mr(isert_conn, fr_desc, &wr->data,
- ISERT_DATA_KEY_VALID, &data_sge);
+ ISERT_DATA_KEY_VALID, &wr->ib_sg[DATA]);
if (ret)
goto unmap_cmd;
- if (se_cmd->prot_op != TARGET_PROT_NORMAL) {
- struct ib_sge prot_sge, sig_sge;
-
- if (se_cmd->t_prot_sg) {
- ret = isert_map_data_buf(isert_conn, isert_cmd,
- se_cmd->t_prot_sg,
- se_cmd->t_prot_nents,
- se_cmd->prot_length,
- 0, wr->iser_ib_op, &wr->prot);
- if (ret)
- goto unmap_cmd;
-
- ret = isert_fast_reg_mr(isert_conn, fr_desc, &wr->prot,
- ISERT_PROT_KEY_VALID, &prot_sge);
- if (ret)
- goto unmap_prot_cmd;
- }
-
- ret = isert_reg_sig_mr(isert_conn, se_cmd, fr_desc,
- &data_sge, &prot_sge, &sig_sge);
+ if (isert_prot_cmd(isert_conn, se_cmd)) {
+ ret = isert_handle_prot_cmd(isert_conn, isert_cmd, wr);
if (ret)
- goto unmap_prot_cmd;
+ goto unmap_cmd;
- fr_desc->ind |= ISERT_PROTECTED;
- memcpy(&wr->s_ib_sge, &sig_sge, sizeof(sig_sge));
- } else
- memcpy(&wr->s_ib_sge, &data_sge, sizeof(data_sge));
+ ib_sg = &wr->ib_sg[SIG];
+ } else {
+ ib_sg = &wr->ib_sg[DATA];
+ }
+ memcpy(&wr->s_ib_sge, ib_sg, sizeof(*ib_sg));
wr->ib_sge = &wr->s_ib_sge;
wr->send_wr_num = 1;
memset(&wr->s_send_wr, 0, sizeof(*send_wr));
@@ -2827,12 +2783,12 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
send_wr = &isert_cmd->rdma_wr.s_send_wr;
send_wr->sg_list = &wr->s_ib_sge;
send_wr->num_sge = 1;
- send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc;
+ send_wr->wr_id = (uintptr_t)&isert_cmd->tx_desc;
if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
send_wr->opcode = IB_WR_RDMA_WRITE;
send_wr->wr.rdma.remote_addr = isert_cmd->read_va;
send_wr->wr.rdma.rkey = isert_cmd->read_stag;
- send_wr->send_flags = se_cmd->prot_op == TARGET_PROT_NORMAL ?
+ send_wr->send_flags = !isert_prot_cmd(isert_conn, se_cmd) ?
0 : IB_SEND_SIGNALED;
} else {
send_wr->opcode = IB_WR_RDMA_READ;
@@ -2842,9 +2798,7 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
}
return 0;
-unmap_prot_cmd:
- if (se_cmd->t_prot_sg)
- isert_unmap_data_buf(isert_conn, &wr->prot);
+
unmap_cmd:
if (fr_desc) {
spin_lock_irqsave(&isert_conn->conn_lock, flags);
@@ -2867,16 +2821,17 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
struct ib_send_wr *wr_failed;
int rc;
- pr_debug("Cmd: %p RDMA_WRITE data_length: %u\n",
+ isert_dbg("Cmd: %p RDMA_WRITE data_length: %u\n",
isert_cmd, se_cmd->data_length);
+
wr->iser_ib_op = ISER_IB_RDMA_WRITE;
rc = device->reg_rdma_mem(conn, cmd, wr);
if (rc) {
- pr_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
+ isert_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
return rc;
}
- if (se_cmd->prot_op == TARGET_PROT_NORMAL) {
+ if (!isert_prot_cmd(isert_conn, se_cmd)) {
/*
* Build isert_conn->tx_desc for iSCSI response PDU and attach
*/
@@ -2886,24 +2841,20 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
&isert_cmd->tx_desc.iscsi_header);
isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
isert_init_send_wr(isert_conn, isert_cmd,
- &isert_cmd->tx_desc.send_wr, false);
+ &isert_cmd->tx_desc.send_wr);
isert_cmd->rdma_wr.s_send_wr.next = &isert_cmd->tx_desc.send_wr;
wr->send_wr_num += 1;
}
- atomic_add(wr->send_wr_num, &isert_conn->post_send_buf_count);
-
rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
- if (rc) {
- pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
- atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
- }
+ if (rc)
+ isert_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
- if (se_cmd->prot_op == TARGET_PROT_NORMAL)
- pr_debug("Cmd: %p posted RDMA_WRITE + Response for iSER Data "
+ if (!isert_prot_cmd(isert_conn, se_cmd))
+ isert_dbg("Cmd: %p posted RDMA_WRITE + Response for iSER Data "
"READ\n", isert_cmd);
else
- pr_debug("Cmd: %p posted RDMA_WRITE for iSER Data READ\n",
+ isert_dbg("Cmd: %p posted RDMA_WRITE for iSER Data READ\n",
isert_cmd);
return 1;
@@ -2920,23 +2871,20 @@ isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
struct ib_send_wr *wr_failed;
int rc;
- pr_debug("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n",
+ isert_dbg("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n",
isert_cmd, se_cmd->data_length, cmd->write_data_done);
wr->iser_ib_op = ISER_IB_RDMA_READ;
rc = device->reg_rdma_mem(conn, cmd, wr);
if (rc) {
- pr_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
+ isert_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
return rc;
}
- atomic_add(wr->send_wr_num, &isert_conn->post_send_buf_count);
-
rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
- if (rc) {
- pr_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
- atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
- }
- pr_debug("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n",
+ if (rc)
+ isert_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
+
+ isert_dbg("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n",
isert_cmd);
return 0;
@@ -2952,7 +2900,7 @@ isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
ret = isert_put_nopin(cmd, conn, false);
break;
default:
- pr_err("Unknown immediate state: 0x%02x\n", state);
+ isert_err("Unknown immediate state: 0x%02x\n", state);
ret = -EINVAL;
break;
}
@@ -2963,15 +2911,14 @@ isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
static int
isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
{
+ struct isert_conn *isert_conn = conn->context;
int ret;
switch (state) {
case ISTATE_SEND_LOGOUTRSP:
ret = isert_put_logout_rsp(cmd, conn);
- if (!ret) {
- pr_debug("Returning iSER Logout -EAGAIN\n");
- ret = -EAGAIN;
- }
+ if (!ret)
+ isert_conn->logout_posted = true;
break;
case ISTATE_SEND_NOPIN:
ret = isert_put_nopin(cmd, conn, true);
@@ -2993,7 +2940,7 @@ isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
ret = isert_put_response(conn, cmd);
break;
default:
- pr_err("Unknown response state: 0x%02x\n", state);
+ isert_err("Unknown response state: 0x%02x\n", state);
ret = -EINVAL;
break;
}
@@ -3001,27 +2948,64 @@ isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
return ret;
}
+struct rdma_cm_id *
+isert_setup_id(struct isert_np *isert_np)
+{
+ struct iscsi_np *np = isert_np->np;
+ struct rdma_cm_id *id;
+ struct sockaddr *sa;
+ int ret;
+
+ sa = (struct sockaddr *)&np->np_sockaddr;
+ isert_dbg("ksockaddr: %p, sa: %p\n", &np->np_sockaddr, sa);
+
+ id = rdma_create_id(isert_cma_handler, isert_np,
+ RDMA_PS_TCP, IB_QPT_RC);
+ if (IS_ERR(id)) {
+ isert_err("rdma_create_id() failed: %ld\n", PTR_ERR(id));
+ ret = PTR_ERR(id);
+ goto out;
+ }
+ isert_dbg("id %p context %p\n", id, id->context);
+
+ ret = rdma_bind_addr(id, sa);
+ if (ret) {
+ isert_err("rdma_bind_addr() failed: %d\n", ret);
+ goto out_id;
+ }
+
+ ret = rdma_listen(id, ISERT_RDMA_LISTEN_BACKLOG);
+ if (ret) {
+ isert_err("rdma_listen() failed: %d\n", ret);
+ goto out_id;
+ }
+
+ return id;
+out_id:
+ rdma_destroy_id(id);
+out:
+ return ERR_PTR(ret);
+}
+
static int
isert_setup_np(struct iscsi_np *np,
struct __kernel_sockaddr_storage *ksockaddr)
{
struct isert_np *isert_np;
struct rdma_cm_id *isert_lid;
- struct sockaddr *sa;
int ret;
isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL);
if (!isert_np) {
- pr_err("Unable to allocate struct isert_np\n");
+ isert_err("Unable to allocate struct isert_np\n");
return -ENOMEM;
}
sema_init(&isert_np->np_sem, 0);
mutex_init(&isert_np->np_accept_mutex);
INIT_LIST_HEAD(&isert_np->np_accept_list);
init_completion(&isert_np->np_login_comp);
+ isert_np->np = np;
- sa = (struct sockaddr *)ksockaddr;
- pr_debug("ksockaddr: %p, sa: %p\n", ksockaddr, sa);
/*
* Setup the np->np_sockaddr from the passed sockaddr setup
* in iscsi_target_configfs.c code..
@@ -3029,37 +3013,20 @@ isert_setup_np(struct iscsi_np *np,
memcpy(&np->np_sockaddr, ksockaddr,
sizeof(struct __kernel_sockaddr_storage));
- isert_lid = rdma_create_id(isert_cma_handler, np, RDMA_PS_TCP,
- IB_QPT_RC);
+ isert_lid = isert_setup_id(isert_np);
if (IS_ERR(isert_lid)) {
- pr_err("rdma_create_id() for isert_listen_handler failed: %ld\n",
- PTR_ERR(isert_lid));
ret = PTR_ERR(isert_lid);
goto out;
}
- ret = rdma_bind_addr(isert_lid, sa);
- if (ret) {
- pr_err("rdma_bind_addr() for isert_lid failed: %d\n", ret);
- goto out_lid;
- }
-
- ret = rdma_listen(isert_lid, ISERT_RDMA_LISTEN_BACKLOG);
- if (ret) {
- pr_err("rdma_listen() for isert_lid failed: %d\n", ret);
- goto out_lid;
- }
-
isert_np->np_cm_id = isert_lid;
np->np_context = isert_np;
- pr_debug("Setup isert_lid->context: %p\n", isert_lid->context);
return 0;
-out_lid:
- rdma_destroy_id(isert_lid);
out:
kfree(isert_np);
+
return ret;
}
@@ -3075,16 +3042,12 @@ isert_rdma_accept(struct isert_conn *isert_conn)
cp.retry_count = 7;
cp.rnr_retry_count = 7;
- pr_debug("Before rdma_accept >>>>>>>>>>>>>>>>>>>>.\n");
-
ret = rdma_accept(cm_id, &cp);
if (ret) {
- pr_err("rdma_accept() failed with: %d\n", ret);
+ isert_err("rdma_accept() failed with: %d\n", ret);
return ret;
}
- pr_debug("After rdma_accept >>>>>>>>>>>>>>>>>>>>>.\n");
-
return 0;
}
@@ -3094,7 +3057,15 @@ isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
int ret;
- pr_debug("isert_get_login_rx before conn_login_comp conn: %p\n", conn);
+ isert_info("before login_req comp conn: %p\n", isert_conn);
+ ret = wait_for_completion_interruptible(&isert_conn->login_req_comp);
+ if (ret) {
+ isert_err("isert_conn %p interrupted before got login req\n",
+ isert_conn);
+ return ret;
+ }
+ reinit_completion(&isert_conn->login_req_comp);
+
/*
* For login requests after the first PDU, isert_rx_login_req() will
* kick schedule_delayed_work(&conn->login_work) as the packet is
@@ -3104,11 +3075,15 @@ isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
if (!login->first_request)
return 0;
+ isert_rx_login_req(isert_conn);
+
+ isert_info("before conn_login_comp conn: %p\n", conn);
ret = wait_for_completion_interruptible(&isert_conn->conn_login_comp);
if (ret)
return ret;
- pr_debug("isert_get_login_rx processing login->req: %p\n", login->req);
+ isert_info("processing login->req: %p\n", login->req);
+
return 0;
}
@@ -3161,7 +3136,7 @@ accept_wait:
spin_lock_bh(&np->np_thread_lock);
if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) {
spin_unlock_bh(&np->np_thread_lock);
- pr_debug("np_thread_state %d for isert_accept_np\n",
+ isert_dbg("np_thread_state %d for isert_accept_np\n",
np->np_thread_state);
/**
* No point in stalling here when np_thread
@@ -3186,17 +3161,10 @@ accept_wait:
isert_conn->conn = conn;
max_accept = 0;
- ret = isert_rdma_post_recvl(isert_conn);
- if (ret)
- return ret;
-
- ret = isert_rdma_accept(isert_conn);
- if (ret)
- return ret;
-
isert_set_conn_info(np, conn, isert_conn);
- pr_debug("Processing isert_accept_np: isert_conn: %p\n", isert_conn);
+ isert_dbg("Processing isert_conn: %p\n", isert_conn);
+
return 0;
}
@@ -3204,25 +3172,103 @@ static void
isert_free_np(struct iscsi_np *np)
{
struct isert_np *isert_np = (struct isert_np *)np->np_context;
+ struct isert_conn *isert_conn, *n;
if (isert_np->np_cm_id)
rdma_destroy_id(isert_np->np_cm_id);
+ /*
+ * FIXME: At this point we don't have a good way to insure
+ * that at this point we don't have hanging connections that
+ * completed RDMA establishment but didn't start iscsi login
+ * process. So work-around this by cleaning up what ever piled
+ * up in np_accept_list.
+ */
+ mutex_lock(&isert_np->np_accept_mutex);
+ if (!list_empty(&isert_np->np_accept_list)) {
+ isert_info("Still have isert connections, cleaning up...\n");
+ list_for_each_entry_safe(isert_conn, n,
+ &isert_np->np_accept_list,
+ conn_accept_node) {
+ isert_info("cleaning isert_conn %p state (%d)\n",
+ isert_conn, isert_conn->state);
+ isert_connect_release(isert_conn);
+ }
+ }
+ mutex_unlock(&isert_np->np_accept_mutex);
+
np->np_context = NULL;
kfree(isert_np);
}
+static void isert_release_work(struct work_struct *work)
+{
+ struct isert_conn *isert_conn = container_of(work,
+ struct isert_conn,
+ release_work);
+
+ isert_info("Starting release conn %p\n", isert_conn);
+
+ wait_for_completion(&isert_conn->conn_wait);
+
+ mutex_lock(&isert_conn->conn_mutex);
+ isert_conn->state = ISER_CONN_DOWN;
+ mutex_unlock(&isert_conn->conn_mutex);
+
+ isert_info("Destroying conn %p\n", isert_conn);
+ isert_put_conn(isert_conn);
+}
+
+static void
+isert_wait4logout(struct isert_conn *isert_conn)
+{
+ struct iscsi_conn *conn = isert_conn->conn;
+
+ isert_info("conn %p\n", isert_conn);
+
+ if (isert_conn->logout_posted) {
+ isert_info("conn %p wait for conn_logout_comp\n", isert_conn);
+ wait_for_completion_timeout(&conn->conn_logout_comp,
+ SECONDS_FOR_LOGOUT_COMP * HZ);
+ }
+}
+
+static void
+isert_wait4cmds(struct iscsi_conn *conn)
+{
+ isert_info("iscsi_conn %p\n", conn);
+
+ if (conn->sess) {
+ target_sess_cmd_list_set_waiting(conn->sess->se_sess);
+ target_wait_for_sess_cmds(conn->sess->se_sess);
+ }
+}
+
+static void
+isert_wait4flush(struct isert_conn *isert_conn)
+{
+ struct ib_recv_wr *bad_wr;
+
+ isert_info("conn %p\n", isert_conn);
+
+ init_completion(&isert_conn->conn_wait_comp_err);
+ isert_conn->beacon.wr_id = ISER_BEACON_WRID;
+ /* post an indication that all flush errors were consumed */
+ if (ib_post_recv(isert_conn->conn_qp, &isert_conn->beacon, &bad_wr)) {
+ isert_err("conn %p failed to post beacon", isert_conn);
+ return;
+ }
+
+ wait_for_completion(&isert_conn->conn_wait_comp_err);
+}
+
static void isert_wait_conn(struct iscsi_conn *conn)
{
struct isert_conn *isert_conn = conn->context;
- pr_debug("isert_wait_conn: Starting \n");
+ isert_info("Starting conn %p\n", isert_conn);
mutex_lock(&isert_conn->conn_mutex);
- if (isert_conn->conn_cm_id && !isert_conn->disconnect) {
- pr_debug("Calling rdma_disconnect from isert_wait_conn\n");
- rdma_disconnect(isert_conn->conn_cm_id);
- }
/*
* Only wait for conn_wait_comp_err if the isert_conn made it
* into full feature phase..
@@ -3231,14 +3277,15 @@ static void isert_wait_conn(struct iscsi_conn *conn)
mutex_unlock(&isert_conn->conn_mutex);
return;
}
- if (isert_conn->state == ISER_CONN_UP)
- isert_conn->state = ISER_CONN_TERMINATING;
+ isert_conn_terminate(isert_conn);
mutex_unlock(&isert_conn->conn_mutex);
- wait_for_completion(&isert_conn->conn_wait_comp_err);
+ isert_wait4cmds(conn);
+ isert_wait4flush(isert_conn);
+ isert_wait4logout(isert_conn);
- wait_for_completion(&isert_conn->conn_wait);
- isert_put_conn(isert_conn);
+ INIT_WORK(&isert_conn->release_work, isert_release_work);
+ queue_work(isert_release_wq, &isert_conn->release_work);
}
static void isert_free_conn(struct iscsi_conn *conn)
@@ -3273,35 +3320,39 @@ static int __init isert_init(void)
{
int ret;
- isert_rx_wq = alloc_workqueue("isert_rx_wq", 0, 0);
- if (!isert_rx_wq) {
- pr_err("Unable to allocate isert_rx_wq\n");
+ isert_comp_wq = alloc_workqueue("isert_comp_wq", 0, 0);
+ if (!isert_comp_wq) {
+ isert_err("Unable to allocate isert_comp_wq\n");
+ ret = -ENOMEM;
return -ENOMEM;
}
- isert_comp_wq = alloc_workqueue("isert_comp_wq", 0, 0);
- if (!isert_comp_wq) {
- pr_err("Unable to allocate isert_comp_wq\n");
+ isert_release_wq = alloc_workqueue("isert_release_wq", WQ_UNBOUND,
+ WQ_UNBOUND_MAX_ACTIVE);
+ if (!isert_release_wq) {
+ isert_err("Unable to allocate isert_release_wq\n");
ret = -ENOMEM;
- goto destroy_rx_wq;
+ goto destroy_comp_wq;
}
iscsit_register_transport(&iser_target_transport);
- pr_debug("iSER_TARGET[0] - Loaded iser_target_transport\n");
+ isert_info("iSER_TARGET[0] - Loaded iser_target_transport\n");
+
return 0;
-destroy_rx_wq:
- destroy_workqueue(isert_rx_wq);
+destroy_comp_wq:
+ destroy_workqueue(isert_comp_wq);
+
return ret;
}
static void __exit isert_exit(void)
{
flush_scheduled_work();
+ destroy_workqueue(isert_release_wq);
destroy_workqueue(isert_comp_wq);
- destroy_workqueue(isert_rx_wq);
iscsit_unregister_transport(&iser_target_transport);
- pr_debug("iSER_TARGET[0] - Released iser_target_transport\n");
+ isert_info("iSER_TARGET[0] - Released iser_target_transport\n");
}
MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure");
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
index 04f51f7bf61..8dc8415d152 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.h
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -4,9 +4,37 @@
#include <rdma/ib_verbs.h>
#include <rdma/rdma_cm.h>
+#define DRV_NAME "isert"
+#define PFX DRV_NAME ": "
+
+#define isert_dbg(fmt, arg...) \
+ do { \
+ if (unlikely(isert_debug_level > 2)) \
+ printk(KERN_DEBUG PFX "%s: " fmt,\
+ __func__ , ## arg); \
+ } while (0)
+
+#define isert_warn(fmt, arg...) \
+ do { \
+ if (unlikely(isert_debug_level > 0)) \
+ pr_warn(PFX "%s: " fmt, \
+ __func__ , ## arg); \
+ } while (0)
+
+#define isert_info(fmt, arg...) \
+ do { \
+ if (unlikely(isert_debug_level > 1)) \
+ pr_info(PFX "%s: " fmt, \
+ __func__ , ## arg); \
+ } while (0)
+
+#define isert_err(fmt, arg...) \
+ pr_err(PFX "%s: " fmt, __func__ , ## arg)
+
#define ISERT_RDMA_LISTEN_BACKLOG 10
#define ISCSI_ISER_SG_TABLESIZE 256
#define ISER_FASTREG_LI_WRID 0xffffffffffffffffULL
+#define ISER_BEACON_WRID 0xfffffffffffffffeULL
enum isert_desc_type {
ISCSI_TX_CONTROL,
@@ -23,6 +51,7 @@ enum iser_ib_op_code {
enum iser_conn_state {
ISER_CONN_INIT,
ISER_CONN_UP,
+ ISER_CONN_FULL_FEATURE,
ISER_CONN_TERMINATING,
ISER_CONN_DOWN,
};
@@ -44,9 +73,6 @@ struct iser_tx_desc {
struct ib_sge tx_sg[2];
int num_sge;
struct isert_cmd *isert_cmd;
- struct llist_node *comp_llnode_batch;
- struct llist_node comp_llnode;
- bool llnode_active;
struct ib_send_wr send_wr;
} __packed;
@@ -81,6 +107,12 @@ struct isert_data_buf {
enum dma_data_direction dma_dir;
};
+enum {
+ DATA = 0,
+ PROT = 1,
+ SIG = 2,
+};
+
struct isert_rdma_wr {
struct list_head wr_list;
struct isert_cmd *isert_cmd;
@@ -90,6 +122,7 @@ struct isert_rdma_wr {
int send_wr_num;
struct ib_send_wr *send_wr;
struct ib_send_wr s_send_wr;
+ struct ib_sge ib_sg[3];
struct isert_data_buf data;
struct isert_data_buf prot;
struct fast_reg_descriptor *fr_desc;
@@ -117,14 +150,15 @@ struct isert_device;
struct isert_conn {
enum iser_conn_state state;
int post_recv_buf_count;
- atomic_t post_send_buf_count;
u32 responder_resources;
u32 initiator_depth;
+ bool pi_support;
u32 max_sge;
char *login_buf;
char *login_req_buf;
char *login_rsp_buf;
u64 login_req_dma;
+ int login_req_len;
u64 login_rsp_dma;
unsigned int conn_rx_desc_head;
struct iser_rx_desc *conn_rx_descs;
@@ -132,13 +166,13 @@ struct isert_conn {
struct iscsi_conn *conn;
struct list_head conn_accept_node;
struct completion conn_login_comp;
+ struct completion login_req_comp;
struct iser_tx_desc conn_login_tx_desc;
struct rdma_cm_id *conn_cm_id;
struct ib_pd *conn_pd;
struct ib_mr *conn_mr;
struct ib_qp *conn_qp;
struct isert_device *conn_device;
- struct work_struct conn_logout_work;
struct mutex conn_mutex;
struct completion conn_wait;
struct completion conn_wait_comp_err;
@@ -147,31 +181,38 @@ struct isert_conn {
int conn_fr_pool_size;
/* lock to protect fastreg pool */
spinlock_t conn_lock;
-#define ISERT_COMP_BATCH_COUNT 8
- int conn_comp_batch;
- struct llist_head conn_comp_llist;
- bool disconnect;
+ struct work_struct release_work;
+ struct ib_recv_wr beacon;
+ bool logout_posted;
};
#define ISERT_MAX_CQ 64
-struct isert_cq_desc {
- struct isert_device *device;
- int cq_index;
- struct work_struct cq_rx_work;
- struct work_struct cq_tx_work;
+/**
+ * struct isert_comp - iSER completion context
+ *
+ * @device: pointer to device handle
+ * @cq: completion queue
+ * @wcs: work completion array
+ * @active_qps: Number of active QPs attached
+ * to completion context
+ * @work: completion work handle
+ */
+struct isert_comp {
+ struct isert_device *device;
+ struct ib_cq *cq;
+ struct ib_wc wcs[16];
+ int active_qps;
+ struct work_struct work;
};
struct isert_device {
int use_fastreg;
bool pi_capable;
- int cqs_used;
int refcount;
- int cq_active_qps[ISERT_MAX_CQ];
struct ib_device *ib_device;
- struct ib_cq *dev_rx_cq[ISERT_MAX_CQ];
- struct ib_cq *dev_tx_cq[ISERT_MAX_CQ];
- struct isert_cq_desc *cq_desc;
+ struct isert_comp *comps;
+ int comps_used;
struct list_head dev_node;
struct ib_device_attr dev_attr;
int (*reg_rdma_mem)(struct iscsi_conn *conn,
@@ -182,6 +223,7 @@ struct isert_device {
};
struct isert_np {
+ struct iscsi_np *np;
struct semaphore np_sem;
struct rdma_cm_id *np_cm_id;
struct mutex np_accept_mutex;
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index a1640364144..0747c0595a9 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -2928,7 +2928,7 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target)
return -ENOMEM;
sep_opt = options;
- while ((p = strsep(&sep_opt, ",")) != NULL) {
+ while ((p = strsep(&sep_opt, ",\n")) != NULL) {
if (!*p)
continue;
diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
index e29c04e2aff..e853a213468 100644
--- a/drivers/input/gameport/gameport.c
+++ b/drivers/input/gameport/gameport.c
@@ -527,14 +527,14 @@ EXPORT_SYMBOL(gameport_set_phys);
*/
static void gameport_init_port(struct gameport *gameport)
{
- static atomic_t gameport_no = ATOMIC_INIT(0);
+ static atomic_t gameport_no = ATOMIC_INIT(-1);
__module_get(THIS_MODULE);
mutex_init(&gameport->drv_mutex);
device_initialize(&gameport->dev);
dev_set_name(&gameport->dev, "gameport%lu",
- (unsigned long)atomic_inc_return(&gameport_no) - 1);
+ (unsigned long)atomic_inc_return(&gameport_no));
gameport->dev.bus = &gameport_bus;
gameport->dev.release = gameport_release_port;
if (gameport->parent)
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 0f175f55782..04217c2e345 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -1775,7 +1775,7 @@ EXPORT_SYMBOL_GPL(input_class);
*/
struct input_dev *input_allocate_device(void)
{
- static atomic_t input_no = ATOMIC_INIT(0);
+ static atomic_t input_no = ATOMIC_INIT(-1);
struct input_dev *dev;
dev = kzalloc(sizeof(struct input_dev), GFP_KERNEL);
@@ -1790,7 +1790,7 @@ struct input_dev *input_allocate_device(void)
INIT_LIST_HEAD(&dev->node);
dev_set_name(&dev->dev, "input%lu",
- (unsigned long) atomic_inc_return(&input_no) - 1);
+ (unsigned long)atomic_inc_return(&input_no));
__module_get(THIS_MODULE);
}
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index fc55f0d15b7..3aa2f3f3da5 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -886,8 +886,8 @@ static void xpad_led_set(struct led_classdev *led_cdev,
static int xpad_led_probe(struct usb_xpad *xpad)
{
- static atomic_t led_seq = ATOMIC_INIT(0);
- long led_no;
+ static atomic_t led_seq = ATOMIC_INIT(-1);
+ unsigned long led_no;
struct xpad_led *led;
struct led_classdev *led_cdev;
int error;
@@ -899,9 +899,9 @@ static int xpad_led_probe(struct usb_xpad *xpad)
if (!led)
return -ENOMEM;
- led_no = (long)atomic_inc_return(&led_seq) - 1;
+ led_no = atomic_inc_return(&led_seq);
- snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
+ snprintf(led->name, sizeof(led->name), "xpad%lu", led_no);
led->xpad = xpad;
led_cdev = &led->led_cdev;
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index a3958c63d7d..96ee26c555e 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -665,14 +665,14 @@ config KEYBOARD_CROS_EC
To compile this driver as a module, choose M here: the
module will be called cros_ec_keyb.
-config KEYBOARD_CAP1106
- tristate "Microchip CAP1106 touch sensor"
+config KEYBOARD_CAP11XX
+ tristate "Microchip CAP11XX based touch sensors"
depends on OF && I2C
select REGMAP_I2C
help
- Say Y here to enable the CAP1106 touch sensor driver.
+ Say Y here to enable the CAP11XX touch sensor driver.
To compile this driver as a module, choose M here: the
- module will be called cap1106.
+ module will be called cap11xx.
endif
diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile
index 0a3345634d7..febafa527eb 100644
--- a/drivers/input/keyboard/Makefile
+++ b/drivers/input/keyboard/Makefile
@@ -11,7 +11,7 @@ obj-$(CONFIG_KEYBOARD_AMIGA) += amikbd.o
obj-$(CONFIG_KEYBOARD_ATARI) += atakbd.o
obj-$(CONFIG_KEYBOARD_ATKBD) += atkbd.o
obj-$(CONFIG_KEYBOARD_BFIN) += bf54x-keys.o
-obj-$(CONFIG_KEYBOARD_CAP1106) += cap1106.o
+obj-$(CONFIG_KEYBOARD_CAP11XX) += cap11xx.o
obj-$(CONFIG_KEYBOARD_CLPS711X) += clps711x-keypad.o
obj-$(CONFIG_KEYBOARD_CROS_EC) += cros_ec_keyb.o
obj-$(CONFIG_KEYBOARD_DAVINCI) += davinci_keyscan.o
diff --git a/drivers/input/keyboard/amikbd.c b/drivers/input/keyboard/amikbd.c
index d3b8c58fcfd..e04a3b4e55d 100644
--- a/drivers/input/keyboard/amikbd.c
+++ b/drivers/input/keyboard/amikbd.c
@@ -45,6 +45,7 @@ MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
MODULE_DESCRIPTION("Amiga keyboard driver");
MODULE_LICENSE("GPL");
+#ifdef CONFIG_HW_CONSOLE
static unsigned char amikbd_keycode[0x78] __initdata = {
[0] = KEY_GRAVE,
[1] = KEY_1,
@@ -144,6 +145,32 @@ static unsigned char amikbd_keycode[0x78] __initdata = {
[103] = KEY_RIGHTMETA
};
+static void __init amikbd_init_console_keymaps(void)
+{
+ /* We can spare 512 bytes on stack for temp_map in init path. */
+ unsigned short temp_map[NR_KEYS];
+ int i, j;
+
+ for (i = 0; i < MAX_NR_KEYMAPS; i++) {
+ if (!key_maps[i])
+ continue;
+ memset(temp_map, 0, sizeof(temp_map));
+ for (j = 0; j < 0x78; j++) {
+ if (!amikbd_keycode[j])
+ continue;
+ temp_map[j] = key_maps[i][amikbd_keycode[j]];
+ }
+ for (j = 0; j < NR_KEYS; j++) {
+ if (!temp_map[j])
+ temp_map[j] = 0xf200;
+ }
+ memcpy(key_maps[i], temp_map, sizeof(temp_map));
+ }
+}
+#else /* !CONFIG_HW_CONSOLE */
+static inline void amikbd_init_console_keymaps(void) {}
+#endif /* !CONFIG_HW_CONSOLE */
+
static const char *amikbd_messages[8] = {
[0] = KERN_ALERT "amikbd: Ctrl-Amiga-Amiga reset warning!!\n",
[1] = KERN_WARNING "amikbd: keyboard lost sync\n",
@@ -186,7 +213,7 @@ static irqreturn_t amikbd_interrupt(int irq, void *data)
static int __init amikbd_probe(struct platform_device *pdev)
{
struct input_dev *dev;
- int i, j, err;
+ int i, err;
dev = input_allocate_device();
if (!dev) {
@@ -207,22 +234,8 @@ static int __init amikbd_probe(struct platform_device *pdev)
for (i = 0; i < 0x78; i++)
set_bit(i, dev->keybit);
- for (i = 0; i < MAX_NR_KEYMAPS; i++) {
- static u_short temp_map[NR_KEYS] __initdata;
- if (!key_maps[i])
- continue;
- memset(temp_map, 0, sizeof(temp_map));
- for (j = 0; j < 0x78; j++) {
- if (!amikbd_keycode[j])
- continue;
- temp_map[j] = key_maps[i][amikbd_keycode[j]];
- }
- for (j = 0; j < NR_KEYS; j++) {
- if (!temp_map[j])
- temp_map[j] = 0xf200;
- }
- memcpy(key_maps[i], temp_map, sizeof(temp_map));
- }
+ amikbd_init_console_keymaps();
+
ciaa.cra &= ~0x41; /* serial data in, turn off TA */
err = request_irq(IRQ_AMIGA_CIAA_SP, amikbd_interrupt, 0, "amikbd",
dev);
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index 6f5d7956913..e27a25892db 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -456,8 +456,9 @@ static irqreturn_t atkbd_interrupt(struct serio *serio, unsigned char data,
keycode = atkbd->keycode[code];
- if (keycode != ATKBD_KEY_NULL)
- input_event(dev, EV_MSC, MSC_SCAN, code);
+ if (!(atkbd->release && test_bit(code, atkbd->force_release_mask)))
+ if (keycode != ATKBD_KEY_NULL)
+ input_event(dev, EV_MSC, MSC_SCAN, code);
switch (keycode) {
case ATKBD_KEY_NULL:
@@ -511,6 +512,7 @@ static irqreturn_t atkbd_interrupt(struct serio *serio, unsigned char data,
input_sync(dev);
if (value && test_bit(code, atkbd->force_release_mask)) {
+ input_event(dev, EV_MSC, MSC_SCAN, code);
input_report_key(dev, keycode, 0);
input_sync(dev);
}
diff --git a/drivers/input/keyboard/cap1106.c b/drivers/input/keyboard/cap1106.c
deleted file mode 100644
index d70b65a14ce..00000000000
--- a/drivers/input/keyboard/cap1106.c
+++ /dev/null
@@ -1,341 +0,0 @@
-/*
- * Input driver for Microchip CAP1106, 6 channel capacitive touch sensor
- *
- * http://www.microchip.com/wwwproducts/Devices.aspx?product=CAP1106
- *
- * (c) 2014 Daniel Mack <linux@zonque.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/input.h>
-#include <linux/of_irq.h>
-#include <linux/regmap.h>
-#include <linux/i2c.h>
-#include <linux/gpio/consumer.h>
-
-#define CAP1106_REG_MAIN_CONTROL 0x00
-#define CAP1106_REG_MAIN_CONTROL_GAIN_SHIFT (6)
-#define CAP1106_REG_MAIN_CONTROL_GAIN_MASK (0xc0)
-#define CAP1106_REG_MAIN_CONTROL_DLSEEP BIT(4)
-#define CAP1106_REG_GENERAL_STATUS 0x02
-#define CAP1106_REG_SENSOR_INPUT 0x03
-#define CAP1106_REG_NOISE_FLAG_STATUS 0x0a
-#define CAP1106_REG_SENOR_DELTA(X) (0x10 + (X))
-#define CAP1106_REG_SENSITIVITY_CONTROL 0x1f
-#define CAP1106_REG_CONFIG 0x20
-#define CAP1106_REG_SENSOR_ENABLE 0x21
-#define CAP1106_REG_SENSOR_CONFIG 0x22
-#define CAP1106_REG_SENSOR_CONFIG2 0x23
-#define CAP1106_REG_SAMPLING_CONFIG 0x24
-#define CAP1106_REG_CALIBRATION 0x26
-#define CAP1106_REG_INT_ENABLE 0x27
-#define CAP1106_REG_REPEAT_RATE 0x28
-#define CAP1106_REG_MT_CONFIG 0x2a
-#define CAP1106_REG_MT_PATTERN_CONFIG 0x2b
-#define CAP1106_REG_MT_PATTERN 0x2d
-#define CAP1106_REG_RECALIB_CONFIG 0x2f
-#define CAP1106_REG_SENSOR_THRESH(X) (0x30 + (X))
-#define CAP1106_REG_SENSOR_NOISE_THRESH 0x38
-#define CAP1106_REG_STANDBY_CHANNEL 0x40
-#define CAP1106_REG_STANDBY_CONFIG 0x41
-#define CAP1106_REG_STANDBY_SENSITIVITY 0x42
-#define CAP1106_REG_STANDBY_THRESH 0x43
-#define CAP1106_REG_CONFIG2 0x44
-#define CAP1106_REG_SENSOR_BASE_CNT(X) (0x50 + (X))
-#define CAP1106_REG_SENSOR_CALIB (0xb1 + (X))
-#define CAP1106_REG_SENSOR_CALIB_LSB1 0xb9
-#define CAP1106_REG_SENSOR_CALIB_LSB2 0xba
-#define CAP1106_REG_PRODUCT_ID 0xfd
-#define CAP1106_REG_MANUFACTURER_ID 0xfe
-#define CAP1106_REG_REVISION 0xff
-
-#define CAP1106_NUM_CHN 6
-#define CAP1106_PRODUCT_ID 0x55
-#define CAP1106_MANUFACTURER_ID 0x5d
-
-struct cap1106_priv {
- struct regmap *regmap;
- struct input_dev *idev;
-
- /* config */
- unsigned short keycodes[CAP1106_NUM_CHN];
-};
-
-static const struct reg_default cap1106_reg_defaults[] = {
- { CAP1106_REG_MAIN_CONTROL, 0x00 },
- { CAP1106_REG_GENERAL_STATUS, 0x00 },
- { CAP1106_REG_SENSOR_INPUT, 0x00 },
- { CAP1106_REG_NOISE_FLAG_STATUS, 0x00 },
- { CAP1106_REG_SENSITIVITY_CONTROL, 0x2f },
- { CAP1106_REG_CONFIG, 0x20 },
- { CAP1106_REG_SENSOR_ENABLE, 0x3f },
- { CAP1106_REG_SENSOR_CONFIG, 0xa4 },
- { CAP1106_REG_SENSOR_CONFIG2, 0x07 },
- { CAP1106_REG_SAMPLING_CONFIG, 0x39 },
- { CAP1106_REG_CALIBRATION, 0x00 },
- { CAP1106_REG_INT_ENABLE, 0x3f },
- { CAP1106_REG_REPEAT_RATE, 0x3f },
- { CAP1106_REG_MT_CONFIG, 0x80 },
- { CAP1106_REG_MT_PATTERN_CONFIG, 0x00 },
- { CAP1106_REG_MT_PATTERN, 0x3f },
- { CAP1106_REG_RECALIB_CONFIG, 0x8a },
- { CAP1106_REG_SENSOR_THRESH(0), 0x40 },
- { CAP1106_REG_SENSOR_THRESH(1), 0x40 },
- { CAP1106_REG_SENSOR_THRESH(2), 0x40 },
- { CAP1106_REG_SENSOR_THRESH(3), 0x40 },
- { CAP1106_REG_SENSOR_THRESH(4), 0x40 },
- { CAP1106_REG_SENSOR_THRESH(5), 0x40 },
- { CAP1106_REG_SENSOR_NOISE_THRESH, 0x01 },
- { CAP1106_REG_STANDBY_CHANNEL, 0x00 },
- { CAP1106_REG_STANDBY_CONFIG, 0x39 },
- { CAP1106_REG_STANDBY_SENSITIVITY, 0x02 },
- { CAP1106_REG_STANDBY_THRESH, 0x40 },
- { CAP1106_REG_CONFIG2, 0x40 },
- { CAP1106_REG_SENSOR_CALIB_LSB1, 0x00 },
- { CAP1106_REG_SENSOR_CALIB_LSB2, 0x00 },
-};
-
-static bool cap1106_volatile_reg(struct device *dev, unsigned int reg)
-{
- switch (reg) {
- case CAP1106_REG_MAIN_CONTROL:
- case CAP1106_REG_SENSOR_INPUT:
- case CAP1106_REG_SENOR_DELTA(0):
- case CAP1106_REG_SENOR_DELTA(1):
- case CAP1106_REG_SENOR_DELTA(2):
- case CAP1106_REG_SENOR_DELTA(3):
- case CAP1106_REG_SENOR_DELTA(4):
- case CAP1106_REG_SENOR_DELTA(5):
- case CAP1106_REG_PRODUCT_ID:
- case CAP1106_REG_MANUFACTURER_ID:
- case CAP1106_REG_REVISION:
- return true;
- }
-
- return false;
-}
-
-static const struct regmap_config cap1106_regmap_config = {
- .reg_bits = 8,
- .val_bits = 8,
-
- .max_register = CAP1106_REG_REVISION,
- .reg_defaults = cap1106_reg_defaults,
-
- .num_reg_defaults = ARRAY_SIZE(cap1106_reg_defaults),
- .cache_type = REGCACHE_RBTREE,
- .volatile_reg = cap1106_volatile_reg,
-};
-
-static irqreturn_t cap1106_thread_func(int irq_num, void *data)
-{
- struct cap1106_priv *priv = data;
- unsigned int status;
- int ret, i;
-
- /*
- * Deassert interrupt. This needs to be done before reading the status
- * registers, which will not carry valid values otherwise.
- */
- ret = regmap_update_bits(priv->regmap, CAP1106_REG_MAIN_CONTROL, 1, 0);
- if (ret < 0)
- goto out;
-
- ret = regmap_read(priv->regmap, CAP1106_REG_SENSOR_INPUT, &status);
- if (ret < 0)
- goto out;
-
- for (i = 0; i < CAP1106_NUM_CHN; i++)
- input_report_key(priv->idev, priv->keycodes[i],
- status & (1 << i));
-
- input_sync(priv->idev);
-
-out:
- return IRQ_HANDLED;
-}
-
-static int cap1106_set_sleep(struct cap1106_priv *priv, bool sleep)
-{
- return regmap_update_bits(priv->regmap, CAP1106_REG_MAIN_CONTROL,
- CAP1106_REG_MAIN_CONTROL_DLSEEP,
- sleep ? CAP1106_REG_MAIN_CONTROL_DLSEEP : 0);
-}
-
-static int cap1106_input_open(struct input_dev *idev)
-{
- struct cap1106_priv *priv = input_get_drvdata(idev);
-
- return cap1106_set_sleep(priv, false);
-}
-
-static void cap1106_input_close(struct input_dev *idev)
-{
- struct cap1106_priv *priv = input_get_drvdata(idev);
-
- cap1106_set_sleep(priv, true);
-}
-
-static int cap1106_i2c_probe(struct i2c_client *i2c_client,
- const struct i2c_device_id *id)
-{
- struct device *dev = &i2c_client->dev;
- struct cap1106_priv *priv;
- struct device_node *node;
- int i, error, irq, gain = 0;
- unsigned int val, rev;
- u32 gain32, keycodes[CAP1106_NUM_CHN];
-
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- priv->regmap = devm_regmap_init_i2c(i2c_client, &cap1106_regmap_config);
- if (IS_ERR(priv->regmap))
- return PTR_ERR(priv->regmap);
-
- error = regmap_read(priv->regmap, CAP1106_REG_PRODUCT_ID, &val);
- if (error)
- return error;
-
- if (val != CAP1106_PRODUCT_ID) {
- dev_err(dev, "Product ID: Got 0x%02x, expected 0x%02x\n",
- val, CAP1106_PRODUCT_ID);
- return -ENODEV;
- }
-
- error = regmap_read(priv->regmap, CAP1106_REG_MANUFACTURER_ID, &val);
- if (error)
- return error;
-
- if (val != CAP1106_MANUFACTURER_ID) {
- dev_err(dev, "Manufacturer ID: Got 0x%02x, expected 0x%02x\n",
- val, CAP1106_MANUFACTURER_ID);
- return -ENODEV;
- }
-
- error = regmap_read(priv->regmap, CAP1106_REG_REVISION, &rev);
- if (error < 0)
- return error;
-
- dev_info(dev, "CAP1106 detected, revision 0x%02x\n", rev);
- i2c_set_clientdata(i2c_client, priv);
- node = dev->of_node;
-
- if (!of_property_read_u32(node, "microchip,sensor-gain", &gain32)) {
- if (is_power_of_2(gain32) && gain32 <= 8)
- gain = ilog2(gain32);
- else
- dev_err(dev, "Invalid sensor-gain value %d\n", gain32);
- }
-
- BUILD_BUG_ON(ARRAY_SIZE(keycodes) != ARRAY_SIZE(priv->keycodes));
-
- /* Provide some useful defaults */
- for (i = 0; i < ARRAY_SIZE(keycodes); i++)
- keycodes[i] = KEY_A + i;
-
- of_property_read_u32_array(node, "linux,keycodes",
- keycodes, ARRAY_SIZE(keycodes));
-
- for (i = 0; i < ARRAY_SIZE(keycodes); i++)
- priv->keycodes[i] = keycodes[i];
-
- error = regmap_update_bits(priv->regmap, CAP1106_REG_MAIN_CONTROL,
- CAP1106_REG_MAIN_CONTROL_GAIN_MASK,
- gain << CAP1106_REG_MAIN_CONTROL_GAIN_SHIFT);
- if (error)
- return error;
-
- /* Disable autorepeat. The Linux input system has its own handling. */
- error = regmap_write(priv->regmap, CAP1106_REG_REPEAT_RATE, 0);
- if (error)
- return error;
-
- priv->idev = devm_input_allocate_device(dev);
- if (!priv->idev)
- return -ENOMEM;
-
- priv->idev->name = "CAP1106 capacitive touch sensor";
- priv->idev->id.bustype = BUS_I2C;
- priv->idev->evbit[0] = BIT_MASK(EV_KEY);
-
- if (of_property_read_bool(node, "autorepeat"))
- __set_bit(EV_REP, priv->idev->evbit);
-
- for (i = 0; i < CAP1106_NUM_CHN; i++)
- __set_bit(priv->keycodes[i], priv->idev->keybit);
-
- __clear_bit(KEY_RESERVED, priv->idev->keybit);
-
- priv->idev->keycode = priv->keycodes;
- priv->idev->keycodesize = sizeof(priv->keycodes[0]);
- priv->idev->keycodemax = ARRAY_SIZE(priv->keycodes);
-
- priv->idev->id.vendor = CAP1106_MANUFACTURER_ID;
- priv->idev->id.product = CAP1106_PRODUCT_ID;
- priv->idev->id.version = rev;
-
- priv->idev->open = cap1106_input_open;
- priv->idev->close = cap1106_input_close;
-
- input_set_drvdata(priv->idev, priv);
-
- /*
- * Put the device in deep sleep mode for now.
- * ->open() will bring it back once the it is actually needed.
- */
- cap1106_set_sleep(priv, true);
-
- error = input_register_device(priv->idev);
- if (error)
- return error;
-
- irq = irq_of_parse_and_map(node, 0);
- if (!irq) {
- dev_err(dev, "Unable to parse or map IRQ\n");
- return -ENXIO;
- }
-
- error = devm_request_threaded_irq(dev, irq, NULL, cap1106_thread_func,
- IRQF_ONESHOT, dev_name(dev), priv);
- if (error)
- return error;
-
- return 0;
-}
-
-static const struct of_device_id cap1106_dt_ids[] = {
- { .compatible = "microchip,cap1106", },
- {}
-};
-MODULE_DEVICE_TABLE(of, cap1106_dt_ids);
-
-static const struct i2c_device_id cap1106_i2c_ids[] = {
- { "cap1106", 0 },
- {}
-};
-MODULE_DEVICE_TABLE(i2c, cap1106_i2c_ids);
-
-static struct i2c_driver cap1106_i2c_driver = {
- .driver = {
- .name = "cap1106",
- .owner = THIS_MODULE,
- .of_match_table = cap1106_dt_ids,
- },
- .id_table = cap1106_i2c_ids,
- .probe = cap1106_i2c_probe,
-};
-
-module_i2c_driver(cap1106_i2c_driver);
-
-MODULE_ALIAS("platform:cap1106");
-MODULE_DESCRIPTION("Microchip CAP1106 driver");
-MODULE_AUTHOR("Daniel Mack <linux@zonque.org>");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/keyboard/cap11xx.c b/drivers/input/keyboard/cap11xx.c
new file mode 100644
index 00000000000..4f59f0bab28
--- /dev/null
+++ b/drivers/input/keyboard/cap11xx.c
@@ -0,0 +1,376 @@
+/*
+ * Input driver for Microchip CAP11xx based capacitive touch sensors
+ *
+ * (c) 2014 Daniel Mack <linux@zonque.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/input.h>
+#include <linux/of_irq.h>
+#include <linux/regmap.h>
+#include <linux/i2c.h>
+#include <linux/gpio/consumer.h>
+
+#define CAP11XX_REG_MAIN_CONTROL 0x00
+#define CAP11XX_REG_MAIN_CONTROL_GAIN_SHIFT (6)
+#define CAP11XX_REG_MAIN_CONTROL_GAIN_MASK (0xc0)
+#define CAP11XX_REG_MAIN_CONTROL_DLSEEP BIT(4)
+#define CAP11XX_REG_GENERAL_STATUS 0x02
+#define CAP11XX_REG_SENSOR_INPUT 0x03
+#define CAP11XX_REG_NOISE_FLAG_STATUS 0x0a
+#define CAP11XX_REG_SENOR_DELTA(X) (0x10 + (X))
+#define CAP11XX_REG_SENSITIVITY_CONTROL 0x1f
+#define CAP11XX_REG_CONFIG 0x20
+#define CAP11XX_REG_SENSOR_ENABLE 0x21
+#define CAP11XX_REG_SENSOR_CONFIG 0x22
+#define CAP11XX_REG_SENSOR_CONFIG2 0x23
+#define CAP11XX_REG_SAMPLING_CONFIG 0x24
+#define CAP11XX_REG_CALIBRATION 0x26
+#define CAP11XX_REG_INT_ENABLE 0x27
+#define CAP11XX_REG_REPEAT_RATE 0x28
+#define CAP11XX_REG_MT_CONFIG 0x2a
+#define CAP11XX_REG_MT_PATTERN_CONFIG 0x2b
+#define CAP11XX_REG_MT_PATTERN 0x2d
+#define CAP11XX_REG_RECALIB_CONFIG 0x2f
+#define CAP11XX_REG_SENSOR_THRESH(X) (0x30 + (X))
+#define CAP11XX_REG_SENSOR_NOISE_THRESH 0x38
+#define CAP11XX_REG_STANDBY_CHANNEL 0x40
+#define CAP11XX_REG_STANDBY_CONFIG 0x41
+#define CAP11XX_REG_STANDBY_SENSITIVITY 0x42
+#define CAP11XX_REG_STANDBY_THRESH 0x43
+#define CAP11XX_REG_CONFIG2 0x44
+#define CAP11XX_REG_CONFIG2_ALT_POL BIT(6)
+#define CAP11XX_REG_SENSOR_BASE_CNT(X) (0x50 + (X))
+#define CAP11XX_REG_SENSOR_CALIB (0xb1 + (X))
+#define CAP11XX_REG_SENSOR_CALIB_LSB1 0xb9
+#define CAP11XX_REG_SENSOR_CALIB_LSB2 0xba
+#define CAP11XX_REG_PRODUCT_ID 0xfd
+#define CAP11XX_REG_MANUFACTURER_ID 0xfe
+#define CAP11XX_REG_REVISION 0xff
+
+#define CAP11XX_MANUFACTURER_ID 0x5d
+
+struct cap11xx_priv {
+ struct regmap *regmap;
+ struct input_dev *idev;
+
+ /* config */
+ u32 keycodes[];
+};
+
+struct cap11xx_hw_model {
+ u8 product_id;
+ unsigned int num_channels;
+};
+
+enum {
+ CAP1106,
+ CAP1126,
+ CAP1188,
+};
+
+static const struct cap11xx_hw_model cap11xx_devices[] = {
+ [CAP1106] = { .product_id = 0x55, .num_channels = 6 },
+ [CAP1126] = { .product_id = 0x53, .num_channels = 6 },
+ [CAP1188] = { .product_id = 0x50, .num_channels = 8 },
+};
+
+static const struct reg_default cap11xx_reg_defaults[] = {
+ { CAP11XX_REG_MAIN_CONTROL, 0x00 },
+ { CAP11XX_REG_GENERAL_STATUS, 0x00 },
+ { CAP11XX_REG_SENSOR_INPUT, 0x00 },
+ { CAP11XX_REG_NOISE_FLAG_STATUS, 0x00 },
+ { CAP11XX_REG_SENSITIVITY_CONTROL, 0x2f },
+ { CAP11XX_REG_CONFIG, 0x20 },
+ { CAP11XX_REG_SENSOR_ENABLE, 0x3f },
+ { CAP11XX_REG_SENSOR_CONFIG, 0xa4 },
+ { CAP11XX_REG_SENSOR_CONFIG2, 0x07 },
+ { CAP11XX_REG_SAMPLING_CONFIG, 0x39 },
+ { CAP11XX_REG_CALIBRATION, 0x00 },
+ { CAP11XX_REG_INT_ENABLE, 0x3f },
+ { CAP11XX_REG_REPEAT_RATE, 0x3f },
+ { CAP11XX_REG_MT_CONFIG, 0x80 },
+ { CAP11XX_REG_MT_PATTERN_CONFIG, 0x00 },
+ { CAP11XX_REG_MT_PATTERN, 0x3f },
+ { CAP11XX_REG_RECALIB_CONFIG, 0x8a },
+ { CAP11XX_REG_SENSOR_THRESH(0), 0x40 },
+ { CAP11XX_REG_SENSOR_THRESH(1), 0x40 },
+ { CAP11XX_REG_SENSOR_THRESH(2), 0x40 },
+ { CAP11XX_REG_SENSOR_THRESH(3), 0x40 },
+ { CAP11XX_REG_SENSOR_THRESH(4), 0x40 },
+ { CAP11XX_REG_SENSOR_THRESH(5), 0x40 },
+ { CAP11XX_REG_SENSOR_NOISE_THRESH, 0x01 },
+ { CAP11XX_REG_STANDBY_CHANNEL, 0x00 },
+ { CAP11XX_REG_STANDBY_CONFIG, 0x39 },
+ { CAP11XX_REG_STANDBY_SENSITIVITY, 0x02 },
+ { CAP11XX_REG_STANDBY_THRESH, 0x40 },
+ { CAP11XX_REG_CONFIG2, 0x40 },
+ { CAP11XX_REG_SENSOR_CALIB_LSB1, 0x00 },
+ { CAP11XX_REG_SENSOR_CALIB_LSB2, 0x00 },
+};
+
+static bool cap11xx_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case CAP11XX_REG_MAIN_CONTROL:
+ case CAP11XX_REG_SENSOR_INPUT:
+ case CAP11XX_REG_SENOR_DELTA(0):
+ case CAP11XX_REG_SENOR_DELTA(1):
+ case CAP11XX_REG_SENOR_DELTA(2):
+ case CAP11XX_REG_SENOR_DELTA(3):
+ case CAP11XX_REG_SENOR_DELTA(4):
+ case CAP11XX_REG_SENOR_DELTA(5):
+ case CAP11XX_REG_PRODUCT_ID:
+ case CAP11XX_REG_MANUFACTURER_ID:
+ case CAP11XX_REG_REVISION:
+ return true;
+ }
+
+ return false;
+}
+
+static const struct regmap_config cap11xx_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = CAP11XX_REG_REVISION,
+ .reg_defaults = cap11xx_reg_defaults,
+
+ .num_reg_defaults = ARRAY_SIZE(cap11xx_reg_defaults),
+ .cache_type = REGCACHE_RBTREE,
+ .volatile_reg = cap11xx_volatile_reg,
+};
+
+static irqreturn_t cap11xx_thread_func(int irq_num, void *data)
+{
+ struct cap11xx_priv *priv = data;
+ unsigned int status;
+ int ret, i;
+
+ /*
+ * Deassert interrupt. This needs to be done before reading the status
+ * registers, which will not carry valid values otherwise.
+ */
+ ret = regmap_update_bits(priv->regmap, CAP11XX_REG_MAIN_CONTROL, 1, 0);
+ if (ret < 0)
+ goto out;
+
+ ret = regmap_read(priv->regmap, CAP11XX_REG_SENSOR_INPUT, &status);
+ if (ret < 0)
+ goto out;
+
+ for (i = 0; i < priv->idev->keycodemax; i++)
+ input_report_key(priv->idev, priv->keycodes[i],
+ status & (1 << i));
+
+ input_sync(priv->idev);
+
+out:
+ return IRQ_HANDLED;
+}
+
+static int cap11xx_set_sleep(struct cap11xx_priv *priv, bool sleep)
+{
+ return regmap_update_bits(priv->regmap, CAP11XX_REG_MAIN_CONTROL,
+ CAP11XX_REG_MAIN_CONTROL_DLSEEP,
+ sleep ? CAP11XX_REG_MAIN_CONTROL_DLSEEP : 0);
+}
+
+static int cap11xx_input_open(struct input_dev *idev)
+{
+ struct cap11xx_priv *priv = input_get_drvdata(idev);
+
+ return cap11xx_set_sleep(priv, false);
+}
+
+static void cap11xx_input_close(struct input_dev *idev)
+{
+ struct cap11xx_priv *priv = input_get_drvdata(idev);
+
+ cap11xx_set_sleep(priv, true);
+}
+
+static int cap11xx_i2c_probe(struct i2c_client *i2c_client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &i2c_client->dev;
+ struct cap11xx_priv *priv;
+ struct device_node *node;
+ const struct cap11xx_hw_model *cap;
+ int i, error, irq, gain = 0;
+ unsigned int val, rev;
+ u32 gain32;
+
+ if (id->driver_data >= ARRAY_SIZE(cap11xx_devices)) {
+ dev_err(dev, "Invalid device ID %lu\n", id->driver_data);
+ return -EINVAL;
+ }
+
+ cap = &cap11xx_devices[id->driver_data];
+ if (!cap || !cap->num_channels) {
+ dev_err(dev, "Invalid device configuration\n");
+ return -EINVAL;
+ }
+
+ priv = devm_kzalloc(dev,
+ sizeof(*priv) +
+ cap->num_channels * sizeof(priv->keycodes[0]),
+ GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->regmap = devm_regmap_init_i2c(i2c_client, &cap11xx_regmap_config);
+ if (IS_ERR(priv->regmap))
+ return PTR_ERR(priv->regmap);
+
+ error = regmap_read(priv->regmap, CAP11XX_REG_PRODUCT_ID, &val);
+ if (error)
+ return error;
+
+ if (val != cap->product_id) {
+ dev_err(dev, "Product ID: Got 0x%02x, expected 0x%02x\n",
+ val, cap->product_id);
+ return -ENXIO;
+ }
+
+ error = regmap_read(priv->regmap, CAP11XX_REG_MANUFACTURER_ID, &val);
+ if (error)
+ return error;
+
+ if (val != CAP11XX_MANUFACTURER_ID) {
+ dev_err(dev, "Manufacturer ID: Got 0x%02x, expected 0x%02x\n",
+ val, CAP11XX_MANUFACTURER_ID);
+ return -ENXIO;
+ }
+
+ error = regmap_read(priv->regmap, CAP11XX_REG_REVISION, &rev);
+ if (error < 0)
+ return error;
+
+ dev_info(dev, "CAP11XX detected, revision 0x%02x\n", rev);
+ i2c_set_clientdata(i2c_client, priv);
+ node = dev->of_node;
+
+ if (!of_property_read_u32(node, "microchip,sensor-gain", &gain32)) {
+ if (is_power_of_2(gain32) && gain32 <= 8)
+ gain = ilog2(gain32);
+ else
+ dev_err(dev, "Invalid sensor-gain value %d\n", gain32);
+ }
+
+ if (of_property_read_bool(node, "microchip,irq-active-high")) {
+ error = regmap_update_bits(priv->regmap, CAP11XX_REG_CONFIG2,
+ CAP11XX_REG_CONFIG2_ALT_POL, 0);
+ if (error)
+ return error;
+ }
+
+ /* Provide some useful defaults */
+ for (i = 0; i < cap->num_channels; i++)
+ priv->keycodes[i] = KEY_A + i;
+
+ of_property_read_u32_array(node, "linux,keycodes",
+ priv->keycodes, cap->num_channels);
+
+ error = regmap_update_bits(priv->regmap, CAP11XX_REG_MAIN_CONTROL,
+ CAP11XX_REG_MAIN_CONTROL_GAIN_MASK,
+ gain << CAP11XX_REG_MAIN_CONTROL_GAIN_SHIFT);
+ if (error)
+ return error;
+
+ /* Disable autorepeat. The Linux input system has its own handling. */
+ error = regmap_write(priv->regmap, CAP11XX_REG_REPEAT_RATE, 0);
+ if (error)
+ return error;
+
+ priv->idev = devm_input_allocate_device(dev);
+ if (!priv->idev)
+ return -ENOMEM;
+
+ priv->idev->name = "CAP11XX capacitive touch sensor";
+ priv->idev->id.bustype = BUS_I2C;
+ priv->idev->evbit[0] = BIT_MASK(EV_KEY);
+
+ if (of_property_read_bool(node, "autorepeat"))
+ __set_bit(EV_REP, priv->idev->evbit);
+
+ for (i = 0; i < cap->num_channels; i++)
+ __set_bit(priv->keycodes[i], priv->idev->keybit);
+
+ __clear_bit(KEY_RESERVED, priv->idev->keybit);
+
+ priv->idev->keycode = priv->keycodes;
+ priv->idev->keycodesize = sizeof(priv->keycodes[0]);
+ priv->idev->keycodemax = cap->num_channels;
+
+ priv->idev->id.vendor = CAP11XX_MANUFACTURER_ID;
+ priv->idev->id.product = cap->product_id;
+ priv->idev->id.version = rev;
+
+ priv->idev->open = cap11xx_input_open;
+ priv->idev->close = cap11xx_input_close;
+
+ input_set_drvdata(priv->idev, priv);
+
+ /*
+ * Put the device in deep sleep mode for now.
+ * ->open() will bring it back once the it is actually needed.
+ */
+ cap11xx_set_sleep(priv, true);
+
+ error = input_register_device(priv->idev);
+ if (error)
+ return error;
+
+ irq = irq_of_parse_and_map(node, 0);
+ if (!irq) {
+ dev_err(dev, "Unable to parse or map IRQ\n");
+ return -ENXIO;
+ }
+
+ error = devm_request_threaded_irq(dev, irq, NULL, cap11xx_thread_func,
+ IRQF_ONESHOT, dev_name(dev), priv);
+ if (error)
+ return error;
+
+ return 0;
+}
+
+static const struct of_device_id cap11xx_dt_ids[] = {
+ { .compatible = "microchip,cap1106", },
+ { .compatible = "microchip,cap1126", },
+ { .compatible = "microchip,cap1188", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, cap11xx_dt_ids);
+
+static const struct i2c_device_id cap11xx_i2c_ids[] = {
+ { "cap1106", CAP1106 },
+ { "cap1126", CAP1126 },
+ { "cap1188", CAP1188 },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, cap11xx_i2c_ids);
+
+static struct i2c_driver cap11xx_i2c_driver = {
+ .driver = {
+ .name = "cap11xx",
+ .owner = THIS_MODULE,
+ .of_match_table = cap11xx_dt_ids,
+ },
+ .id_table = cap11xx_i2c_ids,
+ .probe = cap11xx_i2c_probe,
+};
+
+module_i2c_driver(cap11xx_i2c_driver);
+
+MODULE_ALIAS("platform:cap11xx");
+MODULE_DESCRIPTION("Microchip CAP11XX driver");
+MODULE_AUTHOR("Daniel Mack <linux@zonque.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c
index 8f3a24e1540..d4dd78a7d56 100644
--- a/drivers/input/keyboard/gpio_keys.c
+++ b/drivers/input/keyboard/gpio_keys.c
@@ -29,6 +29,7 @@
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/of_gpio.h>
+#include <linux/of_irq.h>
#include <linux/spinlock.h>
struct gpio_button_data {
@@ -617,27 +618,31 @@ gpio_keys_get_devtree_pdata(struct device *dev)
i = 0;
for_each_child_of_node(node, pp) {
- int gpio;
+ int gpio = -1;
enum of_gpio_flags flags;
- if (!of_find_property(pp, "gpios", NULL)) {
- pdata->nbuttons--;
- dev_warn(dev, "Found button without gpios\n");
- continue;
- }
+ button = &pdata->buttons[i++];
- gpio = of_get_gpio_flags(pp, 0, &flags);
- if (gpio < 0) {
- error = gpio;
- if (error != -EPROBE_DEFER)
- dev_err(dev,
- "Failed to get gpio flags, error: %d\n",
- error);
- return ERR_PTR(error);
+ if (!of_find_property(pp, "gpios", NULL)) {
+ button->irq = irq_of_parse_and_map(pp, 0);
+ if (button->irq == 0) {
+ i--;
+ pdata->nbuttons--;
+ dev_warn(dev, "Found button without gpios or irqs\n");
+ continue;
+ }
+ } else {
+ gpio = of_get_gpio_flags(pp, 0, &flags);
+ if (gpio < 0) {
+ error = gpio;
+ if (error != -EPROBE_DEFER)
+ dev_err(dev,
+ "Failed to get gpio flags, error: %d\n",
+ error);
+ return ERR_PTR(error);
+ }
}
- button = &pdata->buttons[i++];
-
button->gpio = gpio;
button->active_low = flags & OF_GPIO_ACTIVE_LOW;
diff --git a/drivers/input/keyboard/lm8323.c b/drivers/input/keyboard/lm8323.c
index cb32e2b506b..21bea52d436 100644
--- a/drivers/input/keyboard/lm8323.c
+++ b/drivers/input/keyboard/lm8323.c
@@ -616,6 +616,8 @@ static ssize_t lm8323_set_disable(struct device *dev,
unsigned int i;
ret = kstrtouint(buf, 10, &i);
+ if (ret)
+ return ret;
mutex_lock(&lm->lock);
lm->kp_enabled = !i;
diff --git a/drivers/input/keyboard/lpc32xx-keys.c b/drivers/input/keyboard/lpc32xx-keys.c
index 8c079371c2e..265d641c40e 100644
--- a/drivers/input/keyboard/lpc32xx-keys.c
+++ b/drivers/input/keyboard/lpc32xx-keys.c
@@ -66,7 +66,6 @@
struct lpc32xx_kscan_drv {
struct input_dev *input;
struct clk *clk;
- struct resource *iores;
void __iomem *kscan_base;
unsigned int irq;
@@ -188,32 +187,27 @@ static int lpc32xx_kscan_probe(struct platform_device *pdev)
return -EINVAL;
}
- kscandat = kzalloc(sizeof(struct lpc32xx_kscan_drv), GFP_KERNEL);
- if (!kscandat) {
- dev_err(&pdev->dev, "failed to allocate memory\n");
+ kscandat = devm_kzalloc(&pdev->dev, sizeof(*kscandat),
+ GFP_KERNEL);
+ if (!kscandat)
return -ENOMEM;
- }
error = lpc32xx_parse_dt(&pdev->dev, kscandat);
if (error) {
dev_err(&pdev->dev, "failed to parse device tree\n");
- goto err_free_mem;
+ return error;
}
keymap_size = sizeof(kscandat->keymap[0]) *
(kscandat->matrix_sz << kscandat->row_shift);
- kscandat->keymap = kzalloc(keymap_size, GFP_KERNEL);
- if (!kscandat->keymap) {
- dev_err(&pdev->dev, "could not allocate memory for keymap\n");
- error = -ENOMEM;
- goto err_free_mem;
- }
+ kscandat->keymap = devm_kzalloc(&pdev->dev, keymap_size, GFP_KERNEL);
+ if (!kscandat->keymap)
+ return -ENOMEM;
- kscandat->input = input = input_allocate_device();
+ kscandat->input = input = devm_input_allocate_device(&pdev->dev);
if (!input) {
dev_err(&pdev->dev, "failed to allocate input device\n");
- error = -ENOMEM;
- goto err_free_keymap;
+ return -ENOMEM;
}
/* Setup key input */
@@ -234,39 +228,26 @@ static int lpc32xx_kscan_probe(struct platform_device *pdev)
kscandat->keymap, kscandat->input);
if (error) {
dev_err(&pdev->dev, "failed to build keymap\n");
- goto err_free_input;
+ return error;
}
input_set_drvdata(kscandat->input, kscandat);
- kscandat->iores = request_mem_region(res->start, resource_size(res),
- pdev->name);
- if (!kscandat->iores) {
- dev_err(&pdev->dev, "failed to request I/O memory\n");
- error = -EBUSY;
- goto err_free_input;
- }
-
- kscandat->kscan_base = ioremap(kscandat->iores->start,
- resource_size(kscandat->iores));
- if (!kscandat->kscan_base) {
- dev_err(&pdev->dev, "failed to remap I/O memory\n");
- error = -EBUSY;
- goto err_release_memregion;
- }
+ kscandat->kscan_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(kscandat->kscan_base))
+ return PTR_ERR(kscandat->kscan_base);
/* Get the key scanner clock */
- kscandat->clk = clk_get(&pdev->dev, NULL);
+ kscandat->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(kscandat->clk)) {
dev_err(&pdev->dev, "failed to get clock\n");
- error = PTR_ERR(kscandat->clk);
- goto err_unmap;
+ return PTR_ERR(kscandat->clk);
}
/* Configure the key scanner */
error = clk_prepare_enable(kscandat->clk);
if (error)
- goto err_clk_put;
+ return error;
writel(kscandat->deb_clks, LPC32XX_KS_DEB(kscandat->kscan_base));
writel(kscandat->scan_delay, LPC32XX_KS_SCAN_CTL(kscandat->kscan_base));
@@ -277,52 +258,20 @@ static int lpc32xx_kscan_probe(struct platform_device *pdev)
writel(1, LPC32XX_KS_IRQ(kscandat->kscan_base));
clk_disable_unprepare(kscandat->clk);
- error = request_irq(irq, lpc32xx_kscan_irq, 0, pdev->name, kscandat);
+ error = devm_request_irq(&pdev->dev, irq, lpc32xx_kscan_irq, 0,
+ pdev->name, kscandat);
if (error) {
dev_err(&pdev->dev, "failed to request irq\n");
- goto err_clk_put;
+ return error;
}
error = input_register_device(kscandat->input);
if (error) {
dev_err(&pdev->dev, "failed to register input device\n");
- goto err_free_irq;
+ return error;
}
platform_set_drvdata(pdev, kscandat);
- return 0;
-
-err_free_irq:
- free_irq(irq, kscandat);
-err_clk_put:
- clk_put(kscandat->clk);
-err_unmap:
- iounmap(kscandat->kscan_base);
-err_release_memregion:
- release_mem_region(kscandat->iores->start,
- resource_size(kscandat->iores));
-err_free_input:
- input_free_device(kscandat->input);
-err_free_keymap:
- kfree(kscandat->keymap);
-err_free_mem:
- kfree(kscandat);
-
- return error;
-}
-
-static int lpc32xx_kscan_remove(struct platform_device *pdev)
-{
- struct lpc32xx_kscan_drv *kscandat = platform_get_drvdata(pdev);
-
- free_irq(platform_get_irq(pdev, 0), kscandat);
- clk_put(kscandat->clk);
- iounmap(kscandat->kscan_base);
- release_mem_region(kscandat->iores->start,
- resource_size(kscandat->iores));
- input_unregister_device(kscandat->input);
- kfree(kscandat->keymap);
- kfree(kscandat);
return 0;
}
@@ -378,7 +327,6 @@ MODULE_DEVICE_TABLE(of, lpc32xx_kscan_match);
static struct platform_driver lpc32xx_kscan_driver = {
.probe = lpc32xx_kscan_probe,
- .remove = lpc32xx_kscan_remove,
.driver = {
.name = DRV_NAME,
.pm = &lpc32xx_kscan_pm_ops,
diff --git a/drivers/input/keyboard/mpr121_touchkey.c b/drivers/input/keyboard/mpr121_touchkey.c
index 009c82256e8..3aa2ec45bca 100644
--- a/drivers/input/keyboard/mpr121_touchkey.c
+++ b/drivers/input/keyboard/mpr121_touchkey.c
@@ -214,13 +214,14 @@ static int mpr_touchkey_probe(struct i2c_client *client,
return -EINVAL;
}
- mpr121 = kzalloc(sizeof(struct mpr121_touchkey), GFP_KERNEL);
- input_dev = input_allocate_device();
- if (!mpr121 || !input_dev) {
- dev_err(&client->dev, "Failed to allocate memory\n");
- error = -ENOMEM;
- goto err_free_mem;
- }
+ mpr121 = devm_kzalloc(&client->dev, sizeof(*mpr121),
+ GFP_KERNEL);
+ if (!mpr121)
+ return -ENOMEM;
+
+ input_dev = devm_input_allocate_device(&client->dev);
+ if (!input_dev)
+ return -ENOMEM;
mpr121->client = client;
mpr121->input_dev = input_dev;
@@ -243,44 +244,26 @@ static int mpr_touchkey_probe(struct i2c_client *client,
error = mpr121_phys_init(pdata, mpr121, client);
if (error) {
dev_err(&client->dev, "Failed to init register\n");
- goto err_free_mem;
+ return error;
}
- error = request_threaded_irq(client->irq, NULL,
+ error = devm_request_threaded_irq(&client->dev, client->irq, NULL,
mpr_touchkey_interrupt,
IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
client->dev.driver->name, mpr121);
if (error) {
dev_err(&client->dev, "Failed to register interrupt\n");
- goto err_free_mem;
+ return error;
}
error = input_register_device(input_dev);
if (error)
- goto err_free_irq;
+ return error;
i2c_set_clientdata(client, mpr121);
device_init_wakeup(&client->dev, pdata->wakeup);
return 0;
-
-err_free_irq:
- free_irq(client->irq, mpr121);
-err_free_mem:
- input_free_device(input_dev);
- kfree(mpr121);
- return error;
-}
-
-static int mpr_touchkey_remove(struct i2c_client *client)
-{
- struct mpr121_touchkey *mpr121 = i2c_get_clientdata(client);
-
- free_irq(client->irq, mpr121);
- input_unregister_device(mpr121->input_dev);
- kfree(mpr121);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -327,7 +310,6 @@ static struct i2c_driver mpr_touchkey_driver = {
},
.id_table = mpr121_id,
.probe = mpr_touchkey_probe,
- .remove = mpr_touchkey_remove,
};
module_i2c_driver(mpr_touchkey_driver);
diff --git a/drivers/input/keyboard/pxa27x_keypad.c b/drivers/input/keyboard/pxa27x_keypad.c
index 6ab3e7c9632..a90d6bdc499 100644
--- a/drivers/input/keyboard/pxa27x_keypad.c
+++ b/drivers/input/keyboard/pxa27x_keypad.c
@@ -741,37 +741,27 @@ static int pxa27x_keypad_probe(struct platform_device *pdev)
return -ENXIO;
}
- keypad = kzalloc(sizeof(struct pxa27x_keypad), GFP_KERNEL);
- input_dev = input_allocate_device();
- if (!keypad || !input_dev) {
- dev_err(&pdev->dev, "failed to allocate memory\n");
- error = -ENOMEM;
- goto failed_free;
- }
+ keypad = devm_kzalloc(&pdev->dev, sizeof(*keypad),
+ GFP_KERNEL);
+ if (!keypad)
+ return -ENOMEM;
+
+ input_dev = devm_input_allocate_device(&pdev->dev);
+ if (!input_dev)
+ return -ENOMEM;
keypad->pdata = pdata;
keypad->input_dev = input_dev;
keypad->irq = irq;
- res = request_mem_region(res->start, resource_size(res), pdev->name);
- if (res == NULL) {
- dev_err(&pdev->dev, "failed to request I/O memory\n");
- error = -EBUSY;
- goto failed_free;
- }
-
- keypad->mmio_base = ioremap(res->start, resource_size(res));
- if (keypad->mmio_base == NULL) {
- dev_err(&pdev->dev, "failed to remap I/O memory\n");
- error = -ENXIO;
- goto failed_free_mem;
- }
+ keypad->mmio_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(keypad->mmio_base))
+ return PTR_ERR(keypad->mmio_base);
- keypad->clk = clk_get(&pdev->dev, NULL);
+ keypad->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(keypad->clk)) {
dev_err(&pdev->dev, "failed to get keypad clock\n");
- error = PTR_ERR(keypad->clk);
- goto failed_free_io;
+ return PTR_ERR(keypad->clk);
}
input_dev->name = pdev->name;
@@ -802,7 +792,7 @@ static int pxa27x_keypad_probe(struct platform_device *pdev)
}
if (error) {
dev_err(&pdev->dev, "failed to build keycode\n");
- goto failed_put_clk;
+ return error;
}
keypad->row_shift = get_count_order(pdata->matrix_key_cols);
@@ -812,61 +802,26 @@ static int pxa27x_keypad_probe(struct platform_device *pdev)
input_dev->evbit[0] |= BIT_MASK(EV_REL);
}
- error = request_irq(irq, pxa27x_keypad_irq_handler, 0,
- pdev->name, keypad);
+ error = devm_request_irq(&pdev->dev, irq, pxa27x_keypad_irq_handler,
+ 0, pdev->name, keypad);
if (error) {
dev_err(&pdev->dev, "failed to request IRQ\n");
- goto failed_put_clk;
+ return error;
}
/* Register the input device */
error = input_register_device(input_dev);
if (error) {
dev_err(&pdev->dev, "failed to register input device\n");
- goto failed_free_irq;
+ return error;
}
platform_set_drvdata(pdev, keypad);
device_init_wakeup(&pdev->dev, 1);
return 0;
-
-failed_free_irq:
- free_irq(irq, keypad);
-failed_put_clk:
- clk_put(keypad->clk);
-failed_free_io:
- iounmap(keypad->mmio_base);
-failed_free_mem:
- release_mem_region(res->start, resource_size(res));
-failed_free:
- input_free_device(input_dev);
- kfree(keypad);
- return error;
}
-static int pxa27x_keypad_remove(struct platform_device *pdev)
-{
- struct pxa27x_keypad *keypad = platform_get_drvdata(pdev);
- struct resource *res;
-
- free_irq(keypad->irq, keypad);
- clk_put(keypad->clk);
-
- input_unregister_device(keypad->input_dev);
- iounmap(keypad->mmio_base);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(res->start, resource_size(res));
-
- kfree(keypad);
-
- return 0;
-}
-
-/* work with hotplug and coldplug */
-MODULE_ALIAS("platform:pxa27x-keypad");
-
#ifdef CONFIG_OF
static const struct of_device_id pxa27x_keypad_dt_match[] = {
{ .compatible = "marvell,pxa27x-keypad" },
@@ -877,7 +832,6 @@ MODULE_DEVICE_TABLE(of, pxa27x_keypad_dt_match);
static struct platform_driver pxa27x_keypad_driver = {
.probe = pxa27x_keypad_probe,
- .remove = pxa27x_keypad_remove,
.driver = {
.name = "pxa27x-keypad",
.of_match_table = of_match_ptr(pxa27x_keypad_dt_match),
@@ -888,3 +842,5 @@ module_platform_driver(pxa27x_keypad_driver);
MODULE_DESCRIPTION("PXA27x Keypad Controller Driver");
MODULE_LICENSE("GPL");
+/* work with hotplug and coldplug */
+MODULE_ALIAS("platform:pxa27x-keypad");
diff --git a/drivers/input/misc/88pm860x_onkey.c b/drivers/input/misc/88pm860x_onkey.c
index cfdca6e9977..cc87443aa2e 100644
--- a/drivers/input/misc/88pm860x_onkey.c
+++ b/drivers/input/misc/88pm860x_onkey.c
@@ -112,8 +112,7 @@ static int pm860x_onkey_probe(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int pm860x_onkey_suspend(struct device *dev)
+static int __maybe_unused pm860x_onkey_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct pm860x_chip *chip = dev_get_drvdata(pdev->dev.parent);
@@ -122,7 +121,7 @@ static int pm860x_onkey_suspend(struct device *dev)
chip->wakeup_flag |= 1 << PM8607_IRQ_ONKEY;
return 0;
}
-static int pm860x_onkey_resume(struct device *dev)
+static int __maybe_unused pm860x_onkey_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct pm860x_chip *chip = dev_get_drvdata(pdev->dev.parent);
@@ -131,7 +130,6 @@ static int pm860x_onkey_resume(struct device *dev)
chip->wakeup_flag &= ~(1 << PM8607_IRQ_ONKEY);
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(pm860x_onkey_pm_ops, pm860x_onkey_suspend, pm860x_onkey_resume);
diff --git a/drivers/input/misc/ad714x-i2c.c b/drivers/input/misc/ad714x-i2c.c
index e0f522516ef..189bdc8e91a 100644
--- a/drivers/input/misc/ad714x-i2c.c
+++ b/drivers/input/misc/ad714x-i2c.c
@@ -13,17 +13,15 @@
#include <linux/pm.h>
#include "ad714x.h"
-#ifdef CONFIG_PM_SLEEP
-static int ad714x_i2c_suspend(struct device *dev)
+static int __maybe_unused ad714x_i2c_suspend(struct device *dev)
{
return ad714x_disable(i2c_get_clientdata(to_i2c_client(dev)));
}
-static int ad714x_i2c_resume(struct device *dev)
+static int __maybe_unused ad714x_i2c_resume(struct device *dev)
{
return ad714x_enable(i2c_get_clientdata(to_i2c_client(dev)));
}
-#endif
static SIMPLE_DEV_PM_OPS(ad714x_i2c_pm, ad714x_i2c_suspend, ad714x_i2c_resume);
diff --git a/drivers/input/misc/ad714x-spi.c b/drivers/input/misc/ad714x-spi.c
index 3a90b710e30..a79e50b58bf 100644
--- a/drivers/input/misc/ad714x-spi.c
+++ b/drivers/input/misc/ad714x-spi.c
@@ -16,17 +16,15 @@
#define AD714x_SPI_CMD_PREFIX 0xE000 /* bits 15:11 */
#define AD714x_SPI_READ BIT(10)
-#ifdef CONFIG_PM_SLEEP
-static int ad714x_spi_suspend(struct device *dev)
+static int __maybe_unused ad714x_spi_suspend(struct device *dev)
{
return ad714x_disable(spi_get_drvdata(to_spi_device(dev)));
}
-static int ad714x_spi_resume(struct device *dev)
+static int __maybe_unused ad714x_spi_resume(struct device *dev)
{
return ad714x_enable(spi_get_drvdata(to_spi_device(dev)));
}
-#endif
static SIMPLE_DEV_PM_OPS(ad714x_spi_pm, ad714x_spi_suspend, ad714x_spi_resume);
diff --git a/drivers/input/misc/adxl34x-i2c.c b/drivers/input/misc/adxl34x-i2c.c
index 416f47ddcc9..470bfd6f083 100644
--- a/drivers/input/misc/adxl34x-i2c.c
+++ b/drivers/input/misc/adxl34x-i2c.c
@@ -105,8 +105,7 @@ static int adxl34x_i2c_remove(struct i2c_client *client)
return adxl34x_remove(ac);
}
-#ifdef CONFIG_PM_SLEEP
-static int adxl34x_i2c_suspend(struct device *dev)
+static int __maybe_unused adxl34x_i2c_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct adxl34x *ac = i2c_get_clientdata(client);
@@ -116,7 +115,7 @@ static int adxl34x_i2c_suspend(struct device *dev)
return 0;
}
-static int adxl34x_i2c_resume(struct device *dev)
+static int __maybe_unused adxl34x_i2c_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct adxl34x *ac = i2c_get_clientdata(client);
@@ -125,7 +124,6 @@ static int adxl34x_i2c_resume(struct device *dev)
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(adxl34x_i2c_pm, adxl34x_i2c_suspend,
adxl34x_i2c_resume);
diff --git a/drivers/input/misc/adxl34x-spi.c b/drivers/input/misc/adxl34x-spi.c
index 76dc0679d3b..da6e76b58da 100644
--- a/drivers/input/misc/adxl34x-spi.c
+++ b/drivers/input/misc/adxl34x-spi.c
@@ -94,8 +94,7 @@ static int adxl34x_spi_remove(struct spi_device *spi)
return adxl34x_remove(ac);
}
-#ifdef CONFIG_PM_SLEEP
-static int adxl34x_spi_suspend(struct device *dev)
+static int __maybe_unused adxl34x_spi_suspend(struct device *dev)
{
struct spi_device *spi = to_spi_device(dev);
struct adxl34x *ac = spi_get_drvdata(spi);
@@ -105,7 +104,7 @@ static int adxl34x_spi_suspend(struct device *dev)
return 0;
}
-static int adxl34x_spi_resume(struct device *dev)
+static int __maybe_unused adxl34x_spi_resume(struct device *dev)
{
struct spi_device *spi = to_spi_device(dev);
struct adxl34x *ac = spi_get_drvdata(spi);
@@ -114,7 +113,6 @@ static int adxl34x_spi_resume(struct device *dev)
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(adxl34x_spi_pm, adxl34x_spi_suspend,
adxl34x_spi_resume);
diff --git a/drivers/input/misc/drv260x.c b/drivers/input/misc/drv260x.c
index cab87f5ce6d..a364e109ca7 100644
--- a/drivers/input/misc/drv260x.c
+++ b/drivers/input/misc/drv260x.c
@@ -639,8 +639,7 @@ static int drv260x_probe(struct i2c_client *client,
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int drv260x_suspend(struct device *dev)
+static int __maybe_unused drv260x_suspend(struct device *dev)
{
struct drv260x_data *haptics = dev_get_drvdata(dev);
int ret = 0;
@@ -672,7 +671,7 @@ out:
return ret;
}
-static int drv260x_resume(struct device *dev)
+static int __maybe_unused drv260x_resume(struct device *dev)
{
struct drv260x_data *haptics = dev_get_drvdata(dev);
int ret = 0;
@@ -702,7 +701,6 @@ out:
mutex_unlock(&haptics->input_dev->mutex);
return ret;
}
-#endif
static SIMPLE_DEV_PM_OPS(drv260x_pm_ops, drv260x_suspend, drv260x_resume);
diff --git a/drivers/input/misc/drv2667.c b/drivers/input/misc/drv2667.c
index 0f437581cc0..a021744e608 100644
--- a/drivers/input/misc/drv2667.c
+++ b/drivers/input/misc/drv2667.c
@@ -406,8 +406,7 @@ static int drv2667_probe(struct i2c_client *client,
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int drv2667_suspend(struct device *dev)
+static int __maybe_unused drv2667_suspend(struct device *dev)
{
struct drv2667_data *haptics = dev_get_drvdata(dev);
int ret = 0;
@@ -436,7 +435,7 @@ out:
return ret;
}
-static int drv2667_resume(struct device *dev)
+static int __maybe_unused drv2667_resume(struct device *dev)
{
struct drv2667_data *haptics = dev_get_drvdata(dev);
int ret = 0;
@@ -464,7 +463,6 @@ out:
mutex_unlock(&haptics->input_dev->mutex);
return ret;
}
-#endif
static SIMPLE_DEV_PM_OPS(drv2667_pm_ops, drv2667_suspend, drv2667_resume);
diff --git a/drivers/input/misc/gp2ap002a00f.c b/drivers/input/misc/gp2ap002a00f.c
index de21e317da3..0ac176d66a6 100644
--- a/drivers/input/misc/gp2ap002a00f.c
+++ b/drivers/input/misc/gp2ap002a00f.c
@@ -225,8 +225,7 @@ static int gp2a_remove(struct i2c_client *client)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int gp2a_suspend(struct device *dev)
+static int __maybe_unused gp2a_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct gp2a_data *dt = i2c_get_clientdata(client);
@@ -244,7 +243,7 @@ static int gp2a_suspend(struct device *dev)
return retval;
}
-static int gp2a_resume(struct device *dev)
+static int __maybe_unused gp2a_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct gp2a_data *dt = i2c_get_clientdata(client);
@@ -261,7 +260,6 @@ static int gp2a_resume(struct device *dev)
return retval;
}
-#endif
static SIMPLE_DEV_PM_OPS(gp2a_pm, gp2a_suspend, gp2a_resume);
diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
index afed8e2b2f9..ac1fa5f4458 100644
--- a/drivers/input/misc/ims-pcu.c
+++ b/drivers/input/misc/ims-pcu.c
@@ -1851,7 +1851,7 @@ static int ims_pcu_identify_type(struct ims_pcu *pcu, u8 *device_id)
static int ims_pcu_init_application_mode(struct ims_pcu *pcu)
{
- static atomic_t device_no = ATOMIC_INIT(0);
+ static atomic_t device_no = ATOMIC_INIT(-1);
const struct ims_pcu_device_info *info;
int error;
@@ -1882,7 +1882,7 @@ static int ims_pcu_init_application_mode(struct ims_pcu *pcu)
}
/* Device appears to be operable, complete initialization */
- pcu->device_no = atomic_inc_return(&device_no) - 1;
+ pcu->device_no = atomic_inc_return(&device_no);
/*
* PCU-B devices, both GEN_1 and GEN_2 do not have OFN sensor
diff --git a/drivers/input/misc/kxtj9.c b/drivers/input/misc/kxtj9.c
index d708478bc5b..6e29349da53 100644
--- a/drivers/input/misc/kxtj9.c
+++ b/drivers/input/misc/kxtj9.c
@@ -615,8 +615,7 @@ static int kxtj9_remove(struct i2c_client *client)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int kxtj9_suspend(struct device *dev)
+static int __maybe_unused kxtj9_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct kxtj9_data *tj9 = i2c_get_clientdata(client);
@@ -631,7 +630,7 @@ static int kxtj9_suspend(struct device *dev)
return 0;
}
-static int kxtj9_resume(struct device *dev)
+static int __maybe_unused kxtj9_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct kxtj9_data *tj9 = i2c_get_clientdata(client);
@@ -646,7 +645,6 @@ static int kxtj9_resume(struct device *dev)
mutex_unlock(&input_dev->mutex);
return retval;
}
-#endif
static SIMPLE_DEV_PM_OPS(kxtj9_pm_ops, kxtj9_suspend, kxtj9_resume);
diff --git a/drivers/input/misc/max77693-haptic.c b/drivers/input/misc/max77693-haptic.c
index 034093ee63b..39e930c10eb 100644
--- a/drivers/input/misc/max77693-haptic.c
+++ b/drivers/input/misc/max77693-haptic.c
@@ -309,8 +309,7 @@ static int max77693_haptic_probe(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int max77693_haptic_suspend(struct device *dev)
+static int __maybe_unused max77693_haptic_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct max77693_haptic *haptic = platform_get_drvdata(pdev);
@@ -323,7 +322,7 @@ static int max77693_haptic_suspend(struct device *dev)
return 0;
}
-static int max77693_haptic_resume(struct device *dev)
+static int __maybe_unused max77693_haptic_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct max77693_haptic *haptic = platform_get_drvdata(pdev);
@@ -335,7 +334,6 @@ static int max77693_haptic_resume(struct device *dev)
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(max77693_haptic_pm_ops,
max77693_haptic_suspend, max77693_haptic_resume);
diff --git a/drivers/input/misc/max8925_onkey.c b/drivers/input/misc/max8925_onkey.c
index 297e2a9169d..7c49b8d2389 100644
--- a/drivers/input/misc/max8925_onkey.c
+++ b/drivers/input/misc/max8925_onkey.c
@@ -133,8 +133,7 @@ static int max8925_onkey_probe(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int max8925_onkey_suspend(struct device *dev)
+static int __maybe_unused max8925_onkey_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct max8925_onkey_info *info = platform_get_drvdata(pdev);
@@ -148,7 +147,7 @@ static int max8925_onkey_suspend(struct device *dev)
return 0;
}
-static int max8925_onkey_resume(struct device *dev)
+static int __maybe_unused max8925_onkey_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct max8925_onkey_info *info = platform_get_drvdata(pdev);
@@ -161,7 +160,6 @@ static int max8925_onkey_resume(struct device *dev)
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(max8925_onkey_pm_ops, max8925_onkey_suspend, max8925_onkey_resume);
diff --git a/drivers/input/misc/max8997_haptic.c b/drivers/input/misc/max8997_haptic.c
index 5b3154edf82..d0f68728133 100644
--- a/drivers/input/misc/max8997_haptic.c
+++ b/drivers/input/misc/max8997_haptic.c
@@ -378,8 +378,7 @@ static int max8997_haptic_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int max8997_haptic_suspend(struct device *dev)
+static int __maybe_unused max8997_haptic_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct max8997_haptic *chip = platform_get_drvdata(pdev);
@@ -388,7 +387,6 @@ static int max8997_haptic_suspend(struct device *dev)
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(max8997_haptic_pm_ops, max8997_haptic_suspend, NULL);
diff --git a/drivers/input/misc/palmas-pwrbutton.c b/drivers/input/misc/palmas-pwrbutton.c
index 066c5ab632c..1f9b5ee9274 100644
--- a/drivers/input/misc/palmas-pwrbutton.c
+++ b/drivers/input/misc/palmas-pwrbutton.c
@@ -260,7 +260,6 @@ static int palmas_pwron_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
/**
* palmas_pwron_suspend() - suspend handler
* @dev: power button device
@@ -269,7 +268,7 @@ static int palmas_pwron_remove(struct platform_device *pdev)
*
* Return: 0
*/
-static int palmas_pwron_suspend(struct device *dev)
+static int __maybe_unused palmas_pwron_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct palmas_pwron *pwron = platform_get_drvdata(pdev);
@@ -290,7 +289,7 @@ static int palmas_pwron_suspend(struct device *dev)
*
* Return: 0
*/
-static int palmas_pwron_resume(struct device *dev)
+static int __maybe_unused palmas_pwron_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct palmas_pwron *pwron = platform_get_drvdata(pdev);
@@ -300,7 +299,6 @@ static int palmas_pwron_resume(struct device *dev)
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(palmas_pwron_pm,
palmas_pwron_suspend, palmas_pwron_resume);
diff --git a/drivers/input/misc/pm8xxx-vibrator.c b/drivers/input/misc/pm8xxx-vibrator.c
index e9c77a95717..5113877153d 100644
--- a/drivers/input/misc/pm8xxx-vibrator.c
+++ b/drivers/input/misc/pm8xxx-vibrator.c
@@ -199,8 +199,7 @@ static int pm8xxx_vib_probe(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int pm8xxx_vib_suspend(struct device *dev)
+static int __maybe_unused pm8xxx_vib_suspend(struct device *dev)
{
struct pm8xxx_vib *vib = dev_get_drvdata(dev);
@@ -209,7 +208,6 @@ static int pm8xxx_vib_suspend(struct device *dev)
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(pm8xxx_vib_pm_ops, pm8xxx_vib_suspend, NULL);
diff --git a/drivers/input/misc/pmic8xxx-pwrkey.c b/drivers/input/misc/pmic8xxx-pwrkey.c
index cb799177cbd..c4ca20e6322 100644
--- a/drivers/input/misc/pmic8xxx-pwrkey.c
+++ b/drivers/input/misc/pmic8xxx-pwrkey.c
@@ -53,8 +53,7 @@ static irqreturn_t pwrkey_release_irq(int irq, void *_pwr)
return IRQ_HANDLED;
}
-#ifdef CONFIG_PM_SLEEP
-static int pmic8xxx_pwrkey_suspend(struct device *dev)
+static int __maybe_unused pmic8xxx_pwrkey_suspend(struct device *dev)
{
struct pmic8xxx_pwrkey *pwrkey = dev_get_drvdata(dev);
@@ -64,7 +63,7 @@ static int pmic8xxx_pwrkey_suspend(struct device *dev)
return 0;
}
-static int pmic8xxx_pwrkey_resume(struct device *dev)
+static int __maybe_unused pmic8xxx_pwrkey_resume(struct device *dev)
{
struct pmic8xxx_pwrkey *pwrkey = dev_get_drvdata(dev);
@@ -73,7 +72,6 @@ static int pmic8xxx_pwrkey_resume(struct device *dev)
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(pm8xxx_pwr_key_pm_ops,
pmic8xxx_pwrkey_suspend, pmic8xxx_pwrkey_resume);
diff --git a/drivers/input/misc/pwm-beeper.c b/drivers/input/misc/pwm-beeper.c
index 294aa48bad5..a28ee70ff15 100644
--- a/drivers/input/misc/pwm-beeper.c
+++ b/drivers/input/misc/pwm-beeper.c
@@ -144,8 +144,7 @@ static int pwm_beeper_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int pwm_beeper_suspend(struct device *dev)
+static int __maybe_unused pwm_beeper_suspend(struct device *dev)
{
struct pwm_beeper *beeper = dev_get_drvdata(dev);
@@ -155,7 +154,7 @@ static int pwm_beeper_suspend(struct device *dev)
return 0;
}
-static int pwm_beeper_resume(struct device *dev)
+static int __maybe_unused pwm_beeper_resume(struct device *dev)
{
struct pwm_beeper *beeper = dev_get_drvdata(dev);
@@ -170,6 +169,7 @@ static int pwm_beeper_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(pwm_beeper_pm_ops,
pwm_beeper_suspend, pwm_beeper_resume);
+#ifdef CONFIG_PM_SLEEP
#define PWM_BEEPER_PM_OPS (&pwm_beeper_pm_ops)
#else
#define PWM_BEEPER_PM_OPS NULL
diff --git a/drivers/input/misc/sirfsoc-onkey.c b/drivers/input/misc/sirfsoc-onkey.c
index 4faf9f8d124..9d5b89befe6 100644
--- a/drivers/input/misc/sirfsoc-onkey.c
+++ b/drivers/input/misc/sirfsoc-onkey.c
@@ -179,8 +179,7 @@ static int sirfsoc_pwrc_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int sirfsoc_pwrc_resume(struct device *dev)
+static int __maybe_unused sirfsoc_pwrc_resume(struct device *dev)
{
struct sirfsoc_pwrc_drvdata *pwrcdrv = dev_get_drvdata(dev);
struct input_dev *input = pwrcdrv->input;
@@ -196,7 +195,6 @@ static int sirfsoc_pwrc_resume(struct device *dev)
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(sirfsoc_pwrc_pm_ops, NULL, sirfsoc_pwrc_resume);
diff --git a/drivers/input/misc/twl4030-vibra.c b/drivers/input/misc/twl4030-vibra.c
index ccd6dd18f8f..fc17b9592f5 100644
--- a/drivers/input/misc/twl4030-vibra.c
+++ b/drivers/input/misc/twl4030-vibra.c
@@ -157,8 +157,7 @@ static void twl4030_vibra_close(struct input_dev *input)
}
/*** Module ***/
-#ifdef CONFIG_PM_SLEEP
-static int twl4030_vibra_suspend(struct device *dev)
+static int __maybe_unused twl4030_vibra_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct vibra_info *info = platform_get_drvdata(pdev);
@@ -169,12 +168,11 @@ static int twl4030_vibra_suspend(struct device *dev)
return 0;
}
-static int twl4030_vibra_resume(struct device *dev)
+static int __maybe_unused twl4030_vibra_resume(struct device *dev)
{
vibra_disable_leds();
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(twl4030_vibra_pm_ops,
twl4030_vibra_suspend, twl4030_vibra_resume);
diff --git a/drivers/input/misc/twl6040-vibra.c b/drivers/input/misc/twl6040-vibra.c
index 96e0e0c0ccb..0e0d094df2e 100644
--- a/drivers/input/misc/twl6040-vibra.c
+++ b/drivers/input/misc/twl6040-vibra.c
@@ -236,8 +236,7 @@ static void twl6040_vibra_close(struct input_dev *input)
mutex_unlock(&info->mutex);
}
-#ifdef CONFIG_PM_SLEEP
-static int twl6040_vibra_suspend(struct device *dev)
+static int __maybe_unused twl6040_vibra_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct vibra_info *info = platform_get_drvdata(pdev);
@@ -251,7 +250,6 @@ static int twl6040_vibra_suspend(struct device *dev)
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(twl6040_vibra_pm_ops, twl6040_vibra_suspend, NULL);
diff --git a/drivers/input/mouse/Kconfig b/drivers/input/mouse/Kconfig
index 366fc7ad5eb..d8b46b0f2db 100644
--- a/drivers/input/mouse/Kconfig
+++ b/drivers/input/mouse/Kconfig
@@ -215,6 +215,36 @@ config MOUSE_CYAPA
To compile this driver as a module, choose M here: the module will be
called cyapa.
+config MOUSE_ELAN_I2C
+ tristate "ELAN I2C Touchpad support"
+ depends on I2C
+ help
+ This driver adds support for Elan I2C/SMbus Trackpads.
+
+ Say Y here if you have a ELAN I2C/SMbus Touchpad.
+
+ To compile this driver as a module, choose M here: the module will be
+ called elan_i2c.
+
+config MOUSE_ELAN_I2C_I2C
+ bool "Enable I2C support"
+ depends on MOUSE_ELAN_I2C
+ default y
+ help
+ Say Y here if Elan Touchpad in your system is connected to
+ a standard I2C controller.
+
+ If unsure, say Y.
+
+config MOUSE_ELAN_I2C_SMBUS
+ bool "Enable SMbus support"
+ depends on MOUSE_ELAN_I2C
+ help
+ Say Y here if Elan Touchpad in your system is connected to
+ a SMbus adapter.
+
+ If unsure, say Y.
+
config MOUSE_INPORT
tristate "InPort/MS/ATIXL busmouse"
depends on ISA
diff --git a/drivers/input/mouse/Makefile b/drivers/input/mouse/Makefile
index dda507f8b3a..560003dcac3 100644
--- a/drivers/input/mouse/Makefile
+++ b/drivers/input/mouse/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_MOUSE_APPLETOUCH) += appletouch.o
obj-$(CONFIG_MOUSE_ATARI) += atarimouse.o
obj-$(CONFIG_MOUSE_BCM5974) += bcm5974.o
obj-$(CONFIG_MOUSE_CYAPA) += cyapa.o
+obj-$(CONFIG_MOUSE_ELAN_I2C) += elan_i2c.o
obj-$(CONFIG_MOUSE_GPIO) += gpio_mouse.o
obj-$(CONFIG_MOUSE_INPORT) += inport.o
obj-$(CONFIG_MOUSE_LOGIBM) += logibm.o
@@ -34,3 +35,7 @@ psmouse-$(CONFIG_MOUSE_PS2_SENTELIC) += sentelic.o
psmouse-$(CONFIG_MOUSE_PS2_TRACKPOINT) += trackpoint.o
psmouse-$(CONFIG_MOUSE_PS2_TOUCHKIT) += touchkit_ps2.o
psmouse-$(CONFIG_MOUSE_PS2_CYPRESS) += cypress_ps2.o
+
+elan_i2c-objs := elan_i2c_core.o
+elan_i2c-$(CONFIG_MOUSE_ELAN_I2C_I2C) += elan_i2c_i2c.o
+elan_i2c-$(CONFIG_MOUSE_ELAN_I2C_SMBUS) += elan_i2c_smbus.o
diff --git a/drivers/input/mouse/cyapa.c b/drivers/input/mouse/cyapa.c
index b409c3d7d4f..1bece8cad46 100644
--- a/drivers/input/mouse/cyapa.c
+++ b/drivers/input/mouse/cyapa.c
@@ -6,7 +6,7 @@
* Daniel Kurtz <djkurtz@chromium.org>
* Benson Leung <bleung@chromium.org>
*
- * Copyright (C) 2011-2012 Cypress Semiconductor, Inc.
+ * Copyright (C) 2011-2014 Cypress Semiconductor, Inc.
* Copyright (C) 2011-2012 Google, Inc.
*
* This file is subject to the terms and conditions of the GNU General Public
@@ -206,7 +206,6 @@ struct cyapa {
struct i2c_client *client;
struct input_dev *input;
char phys[32]; /* device physical location */
- int irq;
bool irq_wake; /* irq wake is enabled */
bool smbus;
@@ -422,8 +421,8 @@ static ssize_t cyapa_read_block(struct cyapa *cyapa, u8 cmd_idx, u8 *values)
*/
static int cyapa_get_state(struct cyapa *cyapa)
{
- int ret;
u8 status[BL_STATUS_SIZE];
+ int error;
cyapa->state = CYAPA_STATE_NO_DEVICE;
@@ -433,18 +432,18 @@ static int cyapa_get_state(struct cyapa *cyapa)
* If the device is in operation mode, this will be the DATA regs.
*
*/
- ret = cyapa_i2c_reg_read_block(cyapa, BL_HEAD_OFFSET, BL_STATUS_SIZE,
- status);
+ error = cyapa_i2c_reg_read_block(cyapa, BL_HEAD_OFFSET, BL_STATUS_SIZE,
+ status);
/*
* On smbus systems in OP mode, the i2c_reg_read will fail with
* -ETIMEDOUT. In this case, try again using the smbus equivalent
* command. This should return a BL_HEAD indicating CYAPA_STATE_OP.
*/
- if (cyapa->smbus && (ret == -ETIMEDOUT || ret == -ENXIO))
- ret = cyapa_read_block(cyapa, CYAPA_CMD_BL_STATUS, status);
+ if (cyapa->smbus && (error == -ETIMEDOUT || error == -ENXIO))
+ error = cyapa_read_block(cyapa, CYAPA_CMD_BL_STATUS, status);
- if (ret != BL_STATUS_SIZE)
+ if (error != BL_STATUS_SIZE)
goto error;
if ((status[REG_OP_STATUS] & OP_STATUS_SRC) == OP_STATUS_SRC) {
@@ -454,7 +453,7 @@ static int cyapa_get_state(struct cyapa *cyapa)
cyapa->state = CYAPA_STATE_OP;
break;
default:
- ret = -EAGAIN;
+ error = -EAGAIN;
goto error;
}
} else {
@@ -468,7 +467,7 @@ static int cyapa_get_state(struct cyapa *cyapa)
return 0;
error:
- return (ret < 0) ? ret : -EAGAIN;
+ return (error < 0) ? error : -EAGAIN;
}
/*
@@ -487,31 +486,31 @@ error:
*/
static int cyapa_poll_state(struct cyapa *cyapa, unsigned int timeout)
{
- int ret;
+ int error;
int tries = timeout / 100;
- ret = cyapa_get_state(cyapa);
- while ((ret || cyapa->state >= CYAPA_STATE_BL_BUSY) && tries--) {
+ error = cyapa_get_state(cyapa);
+ while ((error || cyapa->state >= CYAPA_STATE_BL_BUSY) && tries--) {
msleep(100);
- ret = cyapa_get_state(cyapa);
+ error = cyapa_get_state(cyapa);
}
- return (ret == -EAGAIN || ret == -ETIMEDOUT) ? -ETIMEDOUT : ret;
+ return (error == -EAGAIN || error == -ETIMEDOUT) ? -ETIMEDOUT : error;
}
static int cyapa_bl_deactivate(struct cyapa *cyapa)
{
- int ret;
+ int error;
- ret = cyapa_i2c_reg_write_block(cyapa, 0, sizeof(bl_deactivate),
- bl_deactivate);
- if (ret < 0)
- return ret;
+ error = cyapa_i2c_reg_write_block(cyapa, 0, sizeof(bl_deactivate),
+ bl_deactivate);
+ if (error)
+ return error;
/* wait for bootloader to switch to idle state; should take < 100ms */
msleep(100);
- ret = cyapa_poll_state(cyapa, 500);
- if (ret < 0)
- return ret;
+ error = cyapa_poll_state(cyapa, 500);
+ if (error)
+ return error;
if (cyapa->state != CYAPA_STATE_BL_IDLE)
return -EAGAIN;
return 0;
@@ -532,11 +531,11 @@ static int cyapa_bl_deactivate(struct cyapa *cyapa)
*/
static int cyapa_bl_exit(struct cyapa *cyapa)
{
- int ret;
+ int error;
- ret = cyapa_i2c_reg_write_block(cyapa, 0, sizeof(bl_exit), bl_exit);
- if (ret < 0)
- return ret;
+ error = cyapa_i2c_reg_write_block(cyapa, 0, sizeof(bl_exit), bl_exit);
+ if (error)
+ return error;
/*
* Wait for bootloader to exit, and operation mode to start.
@@ -548,9 +547,9 @@ static int cyapa_bl_exit(struct cyapa *cyapa)
* updated to new firmware, it must first calibrate its sensors, which
* can take up to an additional 2 seconds.
*/
- ret = cyapa_poll_state(cyapa, 2000);
- if (ret < 0)
- return ret;
+ error = cyapa_poll_state(cyapa, 2000);
+ if (error < 0)
+ return error;
if (cyapa->state != CYAPA_STATE_OP)
return -EAGAIN;
@@ -577,10 +576,13 @@ static int cyapa_set_power_mode(struct cyapa *cyapa, u8 power_mode)
power = ret & ~PWR_MODE_MASK;
power |= power_mode & PWR_MODE_MASK;
ret = cyapa_write_byte(cyapa, CYAPA_CMD_POWER_MODE, power);
- if (ret < 0)
+ if (ret < 0) {
dev_err(dev, "failed to set power_mode 0x%02x err = %d\n",
power_mode, ret);
- return ret;
+ return ret;
+ }
+
+ return 0;
}
static int cyapa_get_query_data(struct cyapa *cyapa)
@@ -637,28 +639,28 @@ static int cyapa_check_is_operational(struct cyapa *cyapa)
{
struct device *dev = &cyapa->client->dev;
static const char unique_str[] = "CYTRA";
- int ret;
+ int error;
- ret = cyapa_poll_state(cyapa, 2000);
- if (ret < 0)
- return ret;
+ error = cyapa_poll_state(cyapa, 2000);
+ if (error)
+ return error;
switch (cyapa->state) {
case CYAPA_STATE_BL_ACTIVE:
- ret = cyapa_bl_deactivate(cyapa);
- if (ret)
- return ret;
+ error = cyapa_bl_deactivate(cyapa);
+ if (error)
+ return error;
/* Fallthrough state */
case CYAPA_STATE_BL_IDLE:
- ret = cyapa_bl_exit(cyapa);
- if (ret)
- return ret;
+ error = cyapa_bl_exit(cyapa);
+ if (error)
+ return error;
/* Fallthrough state */
case CYAPA_STATE_OP:
- ret = cyapa_get_query_data(cyapa);
- if (ret < 0)
- return ret;
+ error = cyapa_get_query_data(cyapa);
+ if (error)
+ return error;
/* only support firmware protocol gen3 */
if (cyapa->gen != CYAPA_GEN3) {
@@ -753,18 +755,42 @@ static u8 cyapa_check_adapter_functionality(struct i2c_client *client)
return ret;
}
+static int cyapa_open(struct input_dev *input)
+{
+ struct cyapa *cyapa = input_get_drvdata(input);
+ struct i2c_client *client = cyapa->client;
+ int error;
+
+ error = cyapa_set_power_mode(cyapa, PWR_MODE_FULL_ACTIVE);
+ if (error) {
+ dev_err(&client->dev, "set active power failed: %d\n", error);
+ return error;
+ }
+
+ enable_irq(client->irq);
+ return 0;
+}
+
+static void cyapa_close(struct input_dev *input)
+{
+ struct cyapa *cyapa = input_get_drvdata(input);
+
+ disable_irq(cyapa->client->irq);
+ cyapa_set_power_mode(cyapa, PWR_MODE_OFF);
+}
+
static int cyapa_create_input_dev(struct cyapa *cyapa)
{
struct device *dev = &cyapa->client->dev;
- int ret;
struct input_dev *input;
+ int error;
if (!cyapa->physical_size_x || !cyapa->physical_size_y)
return -EINVAL;
- input = cyapa->input = input_allocate_device();
+ input = devm_input_allocate_device(dev);
if (!input) {
- dev_err(dev, "allocate memory for input device failed\n");
+ dev_err(dev, "failed to allocate memory for input device.\n");
return -ENOMEM;
}
@@ -772,14 +798,17 @@ static int cyapa_create_input_dev(struct cyapa *cyapa)
input->phys = cyapa->phys;
input->id.bustype = BUS_I2C;
input->id.version = 1;
- input->id.product = 0; /* means any product in eventcomm. */
+ input->id.product = 0; /* Means any product in eventcomm. */
input->dev.parent = &cyapa->client->dev;
+ input->open = cyapa_open;
+ input->close = cyapa_close;
+
input_set_drvdata(input, cyapa);
__set_bit(EV_ABS, input->evbit);
- /* finger position */
+ /* Finger position */
input_set_abs_params(input, ABS_MT_POSITION_X, 0, cyapa->max_abs_x, 0,
0);
input_set_abs_params(input, ABS_MT_POSITION_Y, 0, cyapa->max_abs_y, 0,
@@ -801,35 +830,25 @@ static int cyapa_create_input_dev(struct cyapa *cyapa)
if (cyapa->btn_capability == CAPABILITY_LEFT_BTN_MASK)
__set_bit(INPUT_PROP_BUTTONPAD, input->propbit);
- /* handle pointer emulation and unused slots in core */
- ret = input_mt_init_slots(input, CYAPA_MAX_MT_SLOTS,
- INPUT_MT_POINTER | INPUT_MT_DROP_UNUSED);
- if (ret) {
- dev_err(dev, "allocate memory for MT slots failed, %d\n", ret);
- goto err_free_device;
+ /* Handle pointer emulation and unused slots in core */
+ error = input_mt_init_slots(input, CYAPA_MAX_MT_SLOTS,
+ INPUT_MT_POINTER | INPUT_MT_DROP_UNUSED);
+ if (error) {
+ dev_err(dev, "failed to initialize MT slots: %d\n", error);
+ return error;
}
- /* Register the device in input subsystem */
- ret = input_register_device(input);
- if (ret) {
- dev_err(dev, "input device register failed, %d\n", ret);
- goto err_free_device;
- }
+ cyapa->input = input;
return 0;
-
-err_free_device:
- input_free_device(input);
- cyapa->input = NULL;
- return ret;
}
static int cyapa_probe(struct i2c_client *client,
const struct i2c_device_id *dev_id)
{
- int ret;
- u8 adapter_func;
- struct cyapa *cyapa;
struct device *dev = &client->dev;
+ struct cyapa *cyapa;
+ u8 adapter_func;
+ int error;
adapter_func = cyapa_check_adapter_functionality(client);
if (adapter_func == CYAPA_ADAPTER_FUNC_NONE) {
@@ -837,11 +856,9 @@ static int cyapa_probe(struct i2c_client *client,
return -EIO;
}
- cyapa = kzalloc(sizeof(struct cyapa), GFP_KERNEL);
- if (!cyapa) {
- dev_err(dev, "allocate memory for cyapa failed\n");
+ cyapa = devm_kzalloc(dev, sizeof(struct cyapa), GFP_KERNEL);
+ if (!cyapa)
return -ENOMEM;
- }
cyapa->gen = CYAPA_GEN3;
cyapa->client = client;
@@ -852,67 +869,61 @@ static int cyapa_probe(struct i2c_client *client,
/* i2c isn't supported, use smbus */
if (adapter_func == CYAPA_ADAPTER_FUNC_SMBUS)
cyapa->smbus = true;
+
cyapa->state = CYAPA_STATE_NO_DEVICE;
- ret = cyapa_check_is_operational(cyapa);
- if (ret) {
- dev_err(dev, "device not operational, %d\n", ret);
- goto err_mem_free;
- }
- ret = cyapa_create_input_dev(cyapa);
- if (ret) {
- dev_err(dev, "create input_dev instance failed, %d\n", ret);
- goto err_mem_free;
+ error = cyapa_check_is_operational(cyapa);
+ if (error) {
+ dev_err(dev, "device not operational, %d\n", error);
+ return error;
}
- ret = cyapa_set_power_mode(cyapa, PWR_MODE_FULL_ACTIVE);
- if (ret) {
- dev_err(dev, "set active power failed, %d\n", ret);
- goto err_unregister_device;
+ /* Power down the device until we need it */
+ error = cyapa_set_power_mode(cyapa, PWR_MODE_OFF);
+ if (error) {
+ dev_err(dev, "failed to quiesce the device: %d\n", error);
+ return error;
}
- cyapa->irq = client->irq;
- ret = request_threaded_irq(cyapa->irq,
- NULL,
- cyapa_irq,
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
- "cyapa",
- cyapa);
- if (ret) {
- dev_err(dev, "IRQ request failed: %d\n, ", ret);
- goto err_unregister_device;
+ error = cyapa_create_input_dev(cyapa);
+ if (error)
+ return error;
+
+ error = devm_request_threaded_irq(dev, client->irq,
+ NULL, cyapa_irq,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ "cyapa", cyapa);
+ if (error) {
+ dev_err(dev, "failed to request threaded irq: %d\n", error);
+ return error;
}
- return 0;
+ /* Disable IRQ until the device is opened */
+ disable_irq(client->irq);
-err_unregister_device:
- input_unregister_device(cyapa->input);
-err_mem_free:
- kfree(cyapa);
-
- return ret;
-}
-
-static int cyapa_remove(struct i2c_client *client)
-{
- struct cyapa *cyapa = i2c_get_clientdata(client);
-
- free_irq(cyapa->irq, cyapa);
- input_unregister_device(cyapa->input);
- cyapa_set_power_mode(cyapa, PWR_MODE_OFF);
- kfree(cyapa);
+ /* Register the device in input subsystem */
+ error = input_register_device(cyapa->input);
+ if (error) {
+ dev_err(dev, "failed to register input device: %d\n", error);
+ return error;
+ }
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int cyapa_suspend(struct device *dev)
+static int __maybe_unused cyapa_suspend(struct device *dev)
{
- int ret;
+ struct i2c_client *client = to_i2c_client(dev);
+ struct cyapa *cyapa = i2c_get_clientdata(client);
+ struct input_dev *input = cyapa->input;
u8 power_mode;
- struct cyapa *cyapa = dev_get_drvdata(dev);
+ int error;
- disable_irq(cyapa->irq);
+ error = mutex_lock_interruptible(&input->mutex);
+ if (error)
+ return error;
+
+ disable_irq(client->irq);
/*
* Set trackpad device to idle mode if wakeup is allowed,
@@ -920,31 +931,44 @@ static int cyapa_suspend(struct device *dev)
*/
power_mode = device_may_wakeup(dev) ? PWR_MODE_IDLE
: PWR_MODE_OFF;
- ret = cyapa_set_power_mode(cyapa, power_mode);
- if (ret < 0)
- dev_err(dev, "set power mode failed, %d\n", ret);
+ error = cyapa_set_power_mode(cyapa, power_mode);
+ if (error)
+ dev_err(dev, "resume: set power mode to %d failed: %d\n",
+ power_mode, error);
if (device_may_wakeup(dev))
- cyapa->irq_wake = (enable_irq_wake(cyapa->irq) == 0);
+ cyapa->irq_wake = (enable_irq_wake(client->irq) == 0);
+
+ mutex_unlock(&input->mutex);
+
return 0;
}
-static int cyapa_resume(struct device *dev)
+static int __maybe_unused cyapa_resume(struct device *dev)
{
- int ret;
- struct cyapa *cyapa = dev_get_drvdata(dev);
+ struct i2c_client *client = to_i2c_client(dev);
+ struct cyapa *cyapa = i2c_get_clientdata(client);
+ struct input_dev *input = cyapa->input;
+ u8 power_mode;
+ int error;
+
+ mutex_lock(&input->mutex);
if (device_may_wakeup(dev) && cyapa->irq_wake)
- disable_irq_wake(cyapa->irq);
+ disable_irq_wake(client->irq);
+
+ power_mode = input->users ? PWR_MODE_FULL_ACTIVE : PWR_MODE_OFF;
+ error = cyapa_set_power_mode(cyapa, PWR_MODE_FULL_ACTIVE);
+ if (error)
+ dev_warn(dev, "resume: set power mode to %d failed: %d\n",
+ power_mode, error);
+
+ enable_irq(client->irq);
- ret = cyapa_set_power_mode(cyapa, PWR_MODE_FULL_ACTIVE);
- if (ret)
- dev_warn(dev, "resume active power failed, %d\n", ret);
+ mutex_unlock(&input->mutex);
- enable_irq(cyapa->irq);
return 0;
}
-#endif /* CONFIG_PM_SLEEP */
static SIMPLE_DEV_PM_OPS(cyapa_pm_ops, cyapa_suspend, cyapa_resume);
@@ -962,7 +986,6 @@ static struct i2c_driver cyapa_driver = {
},
.probe = cyapa_probe,
- .remove = cyapa_remove,
.id_table = cyapa_id_table,
};
diff --git a/drivers/input/mouse/elan_i2c.h b/drivers/input/mouse/elan_i2c.h
new file mode 100644
index 00000000000..2e838626205
--- /dev/null
+++ b/drivers/input/mouse/elan_i2c.h
@@ -0,0 +1,86 @@
+/*
+ * Elan I2C/SMBus Touchpad driver
+ *
+ * Copyright (c) 2013 ELAN Microelectronics Corp.
+ *
+ * Author: 林政維 (Duson Lin) <dusonlin@emc.com.tw>
+ * Version: 1.5.5
+ *
+ * Based on cyapa driver:
+ * copyright (c) 2011-2012 Cypress Semiconductor, Inc.
+ * copyright (c) 2011-2012 Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Trademarks are the property of their respective owners.
+ */
+
+#ifndef _ELAN_I2C_H
+#define _ELAN_i2C_H
+
+#include <linux/types.h>
+
+#define ETP_ENABLE_ABS 0x0001
+#define ETP_ENABLE_CALIBRATE 0x0002
+#define ETP_DISABLE_CALIBRATE 0x0000
+#define ETP_DISABLE_POWER 0x0001
+
+/* IAP Firmware handling */
+#define ETP_FW_NAME "elan_i2c.bin"
+#define ETP_IAP_START_ADDR 0x0083
+#define ETP_FW_IAP_PAGE_ERR (1 << 5)
+#define ETP_FW_IAP_INTF_ERR (1 << 4)
+#define ETP_FW_PAGE_SIZE 64
+#define ETP_FW_PAGE_COUNT 768
+#define ETP_FW_SIZE (ETP_FW_PAGE_SIZE * ETP_FW_PAGE_COUNT)
+
+struct i2c_client;
+struct completion;
+
+enum tp_mode {
+ IAP_MODE = 1,
+ MAIN_MODE
+};
+
+struct elan_transport_ops {
+ int (*initialize)(struct i2c_client *client);
+ int (*sleep_control)(struct i2c_client *, bool sleep);
+ int (*power_control)(struct i2c_client *, bool enable);
+ int (*set_mode)(struct i2c_client *client, u8 mode);
+
+ int (*calibrate)(struct i2c_client *client);
+ int (*calibrate_result)(struct i2c_client *client, u8 *val);
+
+ int (*get_baseline_data)(struct i2c_client *client,
+ bool max_baseliune, u8 *value);
+
+ int (*get_version)(struct i2c_client *client, bool iap, u8 *version);
+ int (*get_sm_version)(struct i2c_client *client, u8 *version);
+ int (*get_checksum)(struct i2c_client *client, bool iap, u16 *csum);
+ int (*get_product_id)(struct i2c_client *client, u8 *id);
+
+ int (*get_max)(struct i2c_client *client,
+ unsigned int *max_x, unsigned int *max_y);
+ int (*get_resolution)(struct i2c_client *client,
+ u8 *hw_res_x, u8 *hw_res_y);
+ int (*get_num_traces)(struct i2c_client *client,
+ unsigned int *x_tracenum,
+ unsigned int *y_tracenum);
+
+ int (*iap_get_mode)(struct i2c_client *client, enum tp_mode *mode);
+ int (*iap_reset)(struct i2c_client *client);
+
+ int (*prepare_fw_update)(struct i2c_client *client);
+ int (*write_fw_block)(struct i2c_client *client,
+ const u8 *page, u16 checksum, int idx);
+ int (*finish_fw_update)(struct i2c_client *client,
+ struct completion *reset_done);
+
+ int (*get_report)(struct i2c_client *client, u8 *report);
+};
+
+extern const struct elan_transport_ops elan_smbus_ops, elan_i2c_ops;
+
+#endif /* _ELAN_I2C_H */
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
new file mode 100644
index 00000000000..0cb2be48d53
--- /dev/null
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -0,0 +1,1137 @@
+/*
+ * Elan I2C/SMBus Touchpad driver
+ *
+ * Copyright (c) 2013 ELAN Microelectronics Corp.
+ *
+ * Author: 林政維 (Duson Lin) <dusonlin@emc.com.tw>
+ * Version: 1.5.5
+ *
+ * Based on cyapa driver:
+ * copyright (c) 2011-2012 Cypress Semiconductor, Inc.
+ * copyright (c) 2011-2012 Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Trademarks are the property of their respective owners.
+ */
+
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/firmware.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/input/mt.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/input.h>
+#include <linux/uaccess.h>
+#include <linux/jiffies.h>
+#include <linux/completion.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+#include <asm/unaligned.h>
+
+#include "elan_i2c.h"
+
+#define DRIVER_NAME "elan_i2c"
+#define ELAN_DRIVER_VERSION "1.5.5"
+#define ETP_PRESSURE_OFFSET 25
+#define ETP_MAX_PRESSURE 255
+#define ETP_FWIDTH_REDUCE 90
+#define ETP_FINGER_WIDTH 15
+#define ETP_RETRY_COUNT 3
+
+#define ETP_MAX_FINGERS 5
+#define ETP_FINGER_DATA_LEN 5
+#define ETP_REPORT_ID 0x5D
+#define ETP_REPORT_ID_OFFSET 2
+#define ETP_TOUCH_INFO_OFFSET 3
+#define ETP_FINGER_DATA_OFFSET 4
+#define ETP_MAX_REPORT_LEN 34
+
+/* The main device structure */
+struct elan_tp_data {
+ struct i2c_client *client;
+ struct input_dev *input;
+ struct regulator *vcc;
+
+ const struct elan_transport_ops *ops;
+
+ /* for fw update */
+ struct completion fw_completion;
+ bool in_fw_update;
+
+ struct mutex sysfs_mutex;
+
+ unsigned int max_x;
+ unsigned int max_y;
+ unsigned int width_x;
+ unsigned int width_y;
+ unsigned int x_res;
+ unsigned int y_res;
+
+ u8 product_id;
+ u8 fw_version;
+ u8 sm_version;
+ u8 iap_version;
+ u16 fw_checksum;
+
+ u8 mode;
+
+ bool irq_wake;
+
+ u8 min_baseline;
+ u8 max_baseline;
+ bool baseline_ready;
+};
+
+static int elan_enable_power(struct elan_tp_data *data)
+{
+ int repeat = ETP_RETRY_COUNT;
+ int error;
+
+ error = regulator_enable(data->vcc);
+ if (error) {
+ dev_err(&data->client->dev,
+ "Failed to enable regulator: %d\n", error);
+ return error;
+ }
+
+ do {
+ error = data->ops->power_control(data->client, true);
+ if (error >= 0)
+ return 0;
+
+ msleep(30);
+ } while (--repeat > 0);
+
+ return error;
+}
+
+static int elan_disable_power(struct elan_tp_data *data)
+{
+ int repeat = ETP_RETRY_COUNT;
+ int error;
+
+ do {
+ error = data->ops->power_control(data->client, false);
+ if (!error) {
+ error = regulator_disable(data->vcc);
+ if (error) {
+ dev_err(&data->client->dev,
+ "Failed to disable regulator: %d\n",
+ error);
+ /* Attempt to power the chip back up */
+ data->ops->power_control(data->client, true);
+ break;
+ }
+
+ return 0;
+ }
+
+ msleep(30);
+ } while (--repeat > 0);
+
+ return error;
+}
+
+static int elan_sleep(struct elan_tp_data *data)
+{
+ int repeat = ETP_RETRY_COUNT;
+ int error;
+
+ do {
+ error = data->ops->sleep_control(data->client, true);
+ if (!error)
+ return 0;
+
+ msleep(30);
+ } while (--repeat > 0);
+
+ return error;
+}
+
+static int __elan_initialize(struct elan_tp_data *data)
+{
+ struct i2c_client *client = data->client;
+ int error;
+
+ error = data->ops->initialize(client);
+ if (error) {
+ dev_err(&client->dev, "device initialize failed: %d\n", error);
+ return error;
+ }
+
+ data->mode |= ETP_ENABLE_ABS;
+ error = data->ops->set_mode(client, data->mode);
+ if (error) {
+ dev_err(&client->dev,
+ "failed to switch to absolute mode: %d\n", error);
+ return error;
+ }
+
+ error = data->ops->sleep_control(client, false);
+ if (error) {
+ dev_err(&client->dev,
+ "failed to wake device up: %d\n", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int elan_initialize(struct elan_tp_data *data)
+{
+ int repeat = ETP_RETRY_COUNT;
+ int error;
+
+ do {
+ error = __elan_initialize(data);
+ if (!error)
+ return 0;
+
+ repeat--;
+ msleep(30);
+ } while (--repeat > 0);
+
+ return error;
+}
+
+static int elan_query_device_info(struct elan_tp_data *data)
+{
+ int error;
+
+ error = data->ops->get_product_id(data->client, &data->product_id);
+ if (error)
+ return error;
+
+ error = data->ops->get_version(data->client, false, &data->fw_version);
+ if (error)
+ return error;
+
+ error = data->ops->get_checksum(data->client, false,
+ &data->fw_checksum);
+ if (error)
+ return error;
+
+ error = data->ops->get_sm_version(data->client, &data->sm_version);
+ if (error)
+ return error;
+
+ error = data->ops->get_version(data->client, true, &data->iap_version);
+ if (error)
+ return error;
+
+ return 0;
+}
+
+static unsigned int elan_convert_resolution(u8 val)
+{
+ /*
+ * (value from firmware) * 10 + 790 = dpi
+ *
+ * We also have to convert dpi to dots/mm (*10/254 to avoid floating
+ * point).
+ */
+
+ return ((int)(char)val * 10 + 790) * 10 / 254;
+}
+
+static int elan_query_device_parameters(struct elan_tp_data *data)
+{
+ unsigned int x_traces, y_traces;
+ u8 hw_x_res, hw_y_res;
+ int error;
+
+ error = data->ops->get_max(data->client, &data->max_x, &data->max_y);
+ if (error)
+ return error;
+
+ error = data->ops->get_num_traces(data->client, &x_traces, &y_traces);
+ if (error)
+ return error;
+
+ data->width_x = data->max_x / x_traces;
+ data->width_y = data->max_y / y_traces;
+
+ error = data->ops->get_resolution(data->client, &hw_x_res, &hw_y_res);
+ if (error)
+ return error;
+
+ data->x_res = elan_convert_resolution(hw_x_res);
+ data->y_res = elan_convert_resolution(hw_y_res);
+
+ return 0;
+}
+
+/*
+ **********************************************************
+ * IAP firmware updater related routines
+ **********************************************************
+ */
+static int elan_write_fw_block(struct elan_tp_data *data,
+ const u8 *page, u16 checksum, int idx)
+{
+ int retry = ETP_RETRY_COUNT;
+ int error;
+
+ do {
+ error = data->ops->write_fw_block(data->client,
+ page, checksum, idx);
+ if (!error)
+ return 0;
+
+ dev_dbg(&data->client->dev,
+ "IAP retrying page %d (error: %d)\n", idx, error);
+ } while (--retry > 0);
+
+ return error;
+}
+
+static int __elan_update_firmware(struct elan_tp_data *data,
+ const struct firmware *fw)
+{
+ struct i2c_client *client = data->client;
+ struct device *dev = &client->dev;
+ int i, j;
+ int error;
+ u16 iap_start_addr;
+ u16 boot_page_count;
+ u16 sw_checksum = 0, fw_checksum = 0;
+
+ error = data->ops->prepare_fw_update(client);
+ if (error)
+ return error;
+
+ iap_start_addr = get_unaligned_le16(&fw->data[ETP_IAP_START_ADDR * 2]);
+
+ boot_page_count = (iap_start_addr * 2) / ETP_FW_PAGE_SIZE;
+ for (i = boot_page_count; i < ETP_FW_PAGE_COUNT; i++) {
+ u16 checksum = 0;
+ const u8 *page = &fw->data[i * ETP_FW_PAGE_SIZE];
+
+ for (j = 0; j < ETP_FW_PAGE_SIZE; j += 2)
+ checksum += ((page[j + 1] << 8) | page[j]);
+
+ error = elan_write_fw_block(data, page, checksum, i);
+ if (error) {
+ dev_err(dev, "write page %d fail: %d\n", i, error);
+ return error;
+ }
+
+ sw_checksum += checksum;
+ }
+
+ /* Wait WDT reset and power on reset */
+ msleep(600);
+
+ error = data->ops->finish_fw_update(client, &data->fw_completion);
+ if (error)
+ return error;
+
+ error = data->ops->get_checksum(client, true, &fw_checksum);
+ if (error)
+ return error;
+
+ if (sw_checksum != fw_checksum) {
+ dev_err(dev, "checksum diff sw=[%04X], fw=[%04X]\n",
+ sw_checksum, fw_checksum);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int elan_update_firmware(struct elan_tp_data *data,
+ const struct firmware *fw)
+{
+ struct i2c_client *client = data->client;
+ int retval;
+
+ dev_dbg(&client->dev, "Starting firmware update....\n");
+
+ disable_irq(client->irq);
+ data->in_fw_update = true;
+
+ retval = __elan_update_firmware(data, fw);
+ if (retval) {
+ dev_err(&client->dev, "firmware update failed: %d\n", retval);
+ data->ops->iap_reset(client);
+ } else {
+ /* Reinitialize TP after fw is updated */
+ elan_initialize(data);
+ elan_query_device_info(data);
+ }
+
+ data->in_fw_update = false;
+ enable_irq(client->irq);
+
+ return retval;
+}
+
+/*
+ *******************************************************************
+ * SYSFS attributes
+ *******************************************************************
+ */
+static ssize_t elan_sysfs_read_fw_checksum(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct elan_tp_data *data = i2c_get_clientdata(client);
+
+ return sprintf(buf, "0x%04x\n", data->fw_checksum);
+}
+
+static ssize_t elan_sysfs_read_product_id(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct elan_tp_data *data = i2c_get_clientdata(client);
+
+ return sprintf(buf, "%d.0\n", data->product_id);
+}
+
+static ssize_t elan_sysfs_read_fw_ver(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct elan_tp_data *data = i2c_get_clientdata(client);
+
+ return sprintf(buf, "%d.0\n", data->fw_version);
+}
+
+static ssize_t elan_sysfs_read_sm_ver(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct elan_tp_data *data = i2c_get_clientdata(client);
+
+ return sprintf(buf, "%d.0\n", data->sm_version);
+}
+
+static ssize_t elan_sysfs_read_iap_ver(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct elan_tp_data *data = i2c_get_clientdata(client);
+
+ return sprintf(buf, "%d.0\n", data->iap_version);
+}
+
+static ssize_t elan_sysfs_update_fw(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct elan_tp_data *data = i2c_get_clientdata(client);
+ const struct firmware *fw;
+ int error;
+
+ error = request_firmware(&fw, ETP_FW_NAME, dev);
+ if (error) {
+ dev_err(dev, "cannot load firmware %s: %d\n",
+ ETP_FW_NAME, error);
+ return error;
+ }
+
+ /* Firmware must be exactly PAGE_NUM * PAGE_SIZE bytes */
+ if (fw->size != ETP_FW_SIZE) {
+ dev_err(dev, "invalid firmware size = %zu, expected %d.\n",
+ fw->size, ETP_FW_SIZE);
+ error = -EBADF;
+ goto out_release_fw;
+ }
+
+ error = mutex_lock_interruptible(&data->sysfs_mutex);
+ if (error)
+ goto out_release_fw;
+
+ error = elan_update_firmware(data, fw);
+
+ mutex_unlock(&data->sysfs_mutex);
+
+out_release_fw:
+ release_firmware(fw);
+ return error ?: count;
+}
+
+static ssize_t calibrate_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct elan_tp_data *data = i2c_get_clientdata(client);
+ int tries = 20;
+ int retval;
+ int error;
+ u8 val[3];
+
+ retval = mutex_lock_interruptible(&data->sysfs_mutex);
+ if (retval)
+ return retval;
+
+ disable_irq(client->irq);
+
+ data->mode |= ETP_ENABLE_CALIBRATE;
+ retval = data->ops->set_mode(client, data->mode);
+ if (retval) {
+ dev_err(dev, "failed to enable calibration mode: %d\n",
+ retval);
+ goto out;
+ }
+
+ retval = data->ops->calibrate(client);
+ if (retval) {
+ dev_err(dev, "failed to start calibration: %d\n",
+ retval);
+ goto out_disable_calibrate;
+ }
+
+ val[0] = 0xff;
+ do {
+ /* Wait 250ms before checking if calibration has completed. */
+ msleep(250);
+
+ retval = data->ops->calibrate_result(client, val);
+ if (retval)
+ dev_err(dev, "failed to check calibration result: %d\n",
+ retval);
+ else if (val[0] == 0)
+ break; /* calibration done */
+
+ } while (--tries);
+
+ if (tries == 0) {
+ dev_err(dev, "failed to calibrate. Timeout.\n");
+ retval = -ETIMEDOUT;
+ }
+
+out_disable_calibrate:
+ data->mode &= ~ETP_ENABLE_CALIBRATE;
+ error = data->ops->set_mode(data->client, data->mode);
+ if (error) {
+ dev_err(dev, "failed to disable calibration mode: %d\n",
+ error);
+ if (!retval)
+ retval = error;
+ }
+out:
+ enable_irq(client->irq);
+ mutex_unlock(&data->sysfs_mutex);
+ return retval ?: count;
+}
+
+static ssize_t elan_sysfs_read_mode(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct elan_tp_data *data = i2c_get_clientdata(client);
+ int error;
+ enum tp_mode mode;
+
+ error = mutex_lock_interruptible(&data->sysfs_mutex);
+ if (error)
+ return error;
+
+ error = data->ops->iap_get_mode(data->client, &mode);
+
+ mutex_unlock(&data->sysfs_mutex);
+
+ if (error)
+ return error;
+
+ return sprintf(buf, "%d\n", (int)mode);
+}
+
+static DEVICE_ATTR(product_id, S_IRUGO, elan_sysfs_read_product_id, NULL);
+static DEVICE_ATTR(firmware_version, S_IRUGO, elan_sysfs_read_fw_ver, NULL);
+static DEVICE_ATTR(sample_version, S_IRUGO, elan_sysfs_read_sm_ver, NULL);
+static DEVICE_ATTR(iap_version, S_IRUGO, elan_sysfs_read_iap_ver, NULL);
+static DEVICE_ATTR(fw_checksum, S_IRUGO, elan_sysfs_read_fw_checksum, NULL);
+static DEVICE_ATTR(mode, S_IRUGO, elan_sysfs_read_mode, NULL);
+static DEVICE_ATTR(update_fw, S_IWUSR, NULL, elan_sysfs_update_fw);
+
+static DEVICE_ATTR_WO(calibrate);
+
+static struct attribute *elan_sysfs_entries[] = {
+ &dev_attr_product_id.attr,
+ &dev_attr_firmware_version.attr,
+ &dev_attr_sample_version.attr,
+ &dev_attr_iap_version.attr,
+ &dev_attr_fw_checksum.attr,
+ &dev_attr_calibrate.attr,
+ &dev_attr_mode.attr,
+ &dev_attr_update_fw.attr,
+ NULL,
+};
+
+static const struct attribute_group elan_sysfs_group = {
+ .attrs = elan_sysfs_entries,
+};
+
+static ssize_t acquire_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct elan_tp_data *data = i2c_get_clientdata(client);
+ int error;
+ int retval;
+
+ retval = mutex_lock_interruptible(&data->sysfs_mutex);
+ if (retval)
+ return retval;
+
+ disable_irq(client->irq);
+
+ data->baseline_ready = false;
+
+ data->mode |= ETP_ENABLE_CALIBRATE;
+ retval = data->ops->set_mode(data->client, data->mode);
+ if (retval) {
+ dev_err(dev, "Failed to enable calibration mode to get baseline: %d\n",
+ retval);
+ goto out;
+ }
+
+ msleep(250);
+
+ retval = data->ops->get_baseline_data(data->client, true,
+ &data->max_baseline);
+ if (retval) {
+ dev_err(dev, "Failed to read max baseline form device: %d\n",
+ retval);
+ goto out_disable_calibrate;
+ }
+
+ retval = data->ops->get_baseline_data(data->client, false,
+ &data->min_baseline);
+ if (retval) {
+ dev_err(dev, "Failed to read min baseline form device: %d\n",
+ retval);
+ goto out_disable_calibrate;
+ }
+
+ data->baseline_ready = true;
+
+out_disable_calibrate:
+ data->mode &= ~ETP_ENABLE_CALIBRATE;
+ error = data->ops->set_mode(data->client, data->mode);
+ if (error) {
+ dev_err(dev, "Failed to disable calibration mode after acquiring baseline: %d\n",
+ error);
+ if (!retval)
+ retval = error;
+ }
+out:
+ enable_irq(client->irq);
+ mutex_unlock(&data->sysfs_mutex);
+ return retval ?: count;
+}
+
+static ssize_t min_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct elan_tp_data *data = i2c_get_clientdata(client);
+ int retval;
+
+ retval = mutex_lock_interruptible(&data->sysfs_mutex);
+ if (retval)
+ return retval;
+
+ if (!data->baseline_ready) {
+ retval = -ENODATA;
+ goto out;
+ }
+
+ retval = snprintf(buf, PAGE_SIZE, "%d", data->min_baseline);
+
+out:
+ mutex_unlock(&data->sysfs_mutex);
+ return retval;
+}
+
+static ssize_t max_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct elan_tp_data *data = i2c_get_clientdata(client);
+ int retval;
+
+ retval = mutex_lock_interruptible(&data->sysfs_mutex);
+ if (retval)
+ return retval;
+
+ if (!data->baseline_ready) {
+ retval = -ENODATA;
+ goto out;
+ }
+
+ retval = snprintf(buf, PAGE_SIZE, "%d", data->max_baseline);
+
+out:
+ mutex_unlock(&data->sysfs_mutex);
+ return retval;
+}
+
+
+static DEVICE_ATTR_WO(acquire);
+static DEVICE_ATTR_RO(min);
+static DEVICE_ATTR_RO(max);
+
+static struct attribute *elan_baseline_sysfs_entries[] = {
+ &dev_attr_acquire.attr,
+ &dev_attr_min.attr,
+ &dev_attr_max.attr,
+ NULL,
+};
+
+static const struct attribute_group elan_baseline_sysfs_group = {
+ .name = "baseline",
+ .attrs = elan_baseline_sysfs_entries,
+};
+
+static const struct attribute_group *elan_sysfs_groups[] = {
+ &elan_sysfs_group,
+ &elan_baseline_sysfs_group,
+ NULL
+};
+
+/*
+ ******************************************************************
+ * Elan isr functions
+ ******************************************************************
+ */
+static void elan_report_contact(struct elan_tp_data *data,
+ int contact_num, bool contact_valid,
+ u8 *finger_data)
+{
+ struct input_dev *input = data->input;
+ unsigned int pos_x, pos_y;
+ unsigned int pressure, mk_x, mk_y;
+ unsigned int area_x, area_y, major, minor, new_pressure;
+
+
+ if (contact_valid) {
+ pos_x = ((finger_data[0] & 0xf0) << 4) |
+ finger_data[1];
+ pos_y = ((finger_data[0] & 0x0f) << 8) |
+ finger_data[2];
+ mk_x = (finger_data[3] & 0x0f);
+ mk_y = (finger_data[3] >> 4);
+ pressure = finger_data[4];
+
+ if (pos_x > data->max_x || pos_y > data->max_y) {
+ dev_dbg(input->dev.parent,
+ "[%d] x=%d y=%d over max (%d, %d)",
+ contact_num, pos_x, pos_y,
+ data->max_x, data->max_y);
+ return;
+ }
+
+ /*
+ * To avoid treating large finger as palm, let's reduce the
+ * width x and y per trace.
+ */
+ area_x = mk_x * (data->width_x - ETP_FWIDTH_REDUCE);
+ area_y = mk_y * (data->width_y - ETP_FWIDTH_REDUCE);
+
+ major = max(area_x, area_y);
+ minor = min(area_x, area_y);
+
+ new_pressure = pressure + ETP_PRESSURE_OFFSET;
+ if (new_pressure > ETP_MAX_PRESSURE)
+ new_pressure = ETP_MAX_PRESSURE;
+
+ input_mt_slot(input, contact_num);
+ input_mt_report_slot_state(input, MT_TOOL_FINGER, true);
+ input_report_abs(input, ABS_MT_POSITION_X, pos_x);
+ input_report_abs(input, ABS_MT_POSITION_Y, data->max_y - pos_y);
+ input_report_abs(input, ABS_MT_PRESSURE, new_pressure);
+ input_report_abs(input, ABS_TOOL_WIDTH, mk_x);
+ input_report_abs(input, ABS_MT_TOUCH_MAJOR, major);
+ input_report_abs(input, ABS_MT_TOUCH_MINOR, minor);
+ } else {
+ input_mt_slot(input, contact_num);
+ input_mt_report_slot_state(input, MT_TOOL_FINGER, false);
+ }
+}
+
+static void elan_report_absolute(struct elan_tp_data *data, u8 *packet)
+{
+ struct input_dev *input = data->input;
+ u8 *finger_data = &packet[ETP_FINGER_DATA_OFFSET];
+ int i;
+ u8 tp_info = packet[ETP_TOUCH_INFO_OFFSET];
+ bool contact_valid;
+
+ for (i = 0; i < ETP_MAX_FINGERS; i++) {
+ contact_valid = tp_info & (1U << (3 + i));
+ elan_report_contact(data, i, contact_valid, finger_data);
+
+ if (contact_valid)
+ finger_data += ETP_FINGER_DATA_LEN;
+ }
+
+ input_report_key(input, BTN_LEFT, tp_info & 0x01);
+ input_mt_report_pointer_emulation(input, true);
+ input_sync(input);
+}
+
+static irqreturn_t elan_isr(int irq, void *dev_id)
+{
+ struct elan_tp_data *data = dev_id;
+ struct device *dev = &data->client->dev;
+ int error;
+ u8 report[ETP_MAX_REPORT_LEN];
+
+ /*
+ * When device is connected to i2c bus, when all IAP page writes
+ * complete, the driver will receive interrupt and must read
+ * 0000 to confirm that IAP is finished.
+ */
+ if (data->in_fw_update) {
+ complete(&data->fw_completion);
+ goto out;
+ }
+
+ error = data->ops->get_report(data->client, report);
+ if (error)
+ goto out;
+
+ if (report[ETP_REPORT_ID_OFFSET] != ETP_REPORT_ID)
+ dev_err(dev, "invalid report id data (%x)\n",
+ report[ETP_REPORT_ID_OFFSET]);
+ else
+ elan_report_absolute(data, report);
+
+out:
+ return IRQ_HANDLED;
+}
+
+/*
+ ******************************************************************
+ * Elan initialization functions
+ ******************************************************************
+ */
+static int elan_setup_input_device(struct elan_tp_data *data)
+{
+ struct device *dev = &data->client->dev;
+ struct input_dev *input;
+ unsigned int max_width = max(data->width_x, data->width_y);
+ unsigned int min_width = min(data->width_x, data->width_y);
+ int error;
+
+ input = devm_input_allocate_device(dev);
+ if (!input)
+ return -ENOMEM;
+
+ input->name = "Elan Touchpad";
+ input->id.bustype = BUS_I2C;
+ input_set_drvdata(input, data);
+
+ error = input_mt_init_slots(input, ETP_MAX_FINGERS,
+ INPUT_MT_POINTER | INPUT_MT_DROP_UNUSED);
+ if (error) {
+ dev_err(dev, "failed to initialize MT slots: %d\n", error);
+ return error;
+ }
+
+ __set_bit(EV_ABS, input->evbit);
+ __set_bit(INPUT_PROP_POINTER, input->propbit);
+ __set_bit(INPUT_PROP_BUTTONPAD, input->propbit);
+ __set_bit(BTN_LEFT, input->keybit);
+
+ /* Set up ST parameters */
+ input_set_abs_params(input, ABS_X, 0, data->max_x, 0, 0);
+ input_set_abs_params(input, ABS_Y, 0, data->max_y, 0, 0);
+ input_abs_set_res(input, ABS_X, data->x_res);
+ input_abs_set_res(input, ABS_Y, data->y_res);
+ input_set_abs_params(input, ABS_PRESSURE, 0, ETP_MAX_PRESSURE, 0, 0);
+ input_set_abs_params(input, ABS_TOOL_WIDTH, 0, ETP_FINGER_WIDTH, 0, 0);
+
+ /* And MT parameters */
+ input_set_abs_params(input, ABS_MT_POSITION_X, 0, data->max_x, 0, 0);
+ input_set_abs_params(input, ABS_MT_POSITION_Y, 0, data->max_y, 0, 0);
+ input_abs_set_res(input, ABS_MT_POSITION_X, data->x_res);
+ input_abs_set_res(input, ABS_MT_POSITION_Y, data->y_res);
+ input_set_abs_params(input, ABS_MT_PRESSURE, 0,
+ ETP_MAX_PRESSURE, 0, 0);
+ input_set_abs_params(input, ABS_MT_TOUCH_MAJOR, 0,
+ ETP_FINGER_WIDTH * max_width, 0, 0);
+ input_set_abs_params(input, ABS_MT_TOUCH_MINOR, 0,
+ ETP_FINGER_WIDTH * min_width, 0, 0);
+
+ data->input = input;
+
+ return 0;
+}
+
+static void elan_disable_regulator(void *_data)
+{
+ struct elan_tp_data *data = _data;
+
+ regulator_disable(data->vcc);
+}
+
+static void elan_remove_sysfs_groups(void *_data)
+{
+ struct elan_tp_data *data = _data;
+
+ sysfs_remove_groups(&data->client->dev.kobj, elan_sysfs_groups);
+}
+
+static int elan_probe(struct i2c_client *client,
+ const struct i2c_device_id *dev_id)
+{
+ const struct elan_transport_ops *transport_ops;
+ struct device *dev = &client->dev;
+ struct elan_tp_data *data;
+ unsigned long irqflags;
+ int error;
+
+ if (IS_ENABLED(CONFIG_MOUSE_ELAN_I2C_I2C) &&
+ i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ transport_ops = &elan_i2c_ops;
+ } else if (IS_ENABLED(CONFIG_MOUSE_ELAN_I2C_SMBUS) &&
+ i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_BYTE_DATA |
+ I2C_FUNC_SMBUS_BLOCK_DATA |
+ I2C_FUNC_SMBUS_I2C_BLOCK)) {
+ transport_ops = &elan_smbus_ops;
+ } else {
+ dev_err(dev, "not a supported I2C/SMBus adapter\n");
+ return -EIO;
+ }
+
+ data = devm_kzalloc(&client->dev, sizeof(struct elan_tp_data),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, data);
+
+ data->ops = transport_ops;
+ data->client = client;
+ init_completion(&data->fw_completion);
+ mutex_init(&data->sysfs_mutex);
+
+ data->vcc = devm_regulator_get(&client->dev, "vcc");
+ if (IS_ERR(data->vcc)) {
+ error = PTR_ERR(data->vcc);
+ if (error != -EPROBE_DEFER)
+ dev_err(&client->dev,
+ "Failed to get 'vcc' regulator: %d\n",
+ error);
+ return error;
+ }
+
+ error = regulator_enable(data->vcc);
+ if (error) {
+ dev_err(&client->dev,
+ "Failed to enable regulator: %d\n", error);
+ return error;
+ }
+
+ error = devm_add_action(&client->dev,
+ elan_disable_regulator, data);
+ if (error) {
+ regulator_disable(data->vcc);
+ dev_err(&client->dev,
+ "Failed to add disable regulator action: %d\n",
+ error);
+ return error;
+ }
+
+ /* Initialize the touchpad. */
+ error = elan_initialize(data);
+ if (error)
+ return error;
+
+ error = elan_query_device_info(data);
+ if (error)
+ return error;
+
+ error = elan_query_device_parameters(data);
+ if (error)
+ return error;
+
+ dev_dbg(&client->dev,
+ "Elan Touchpad Information:\n"
+ " Module product ID: 0x%04x\n"
+ " Firmware Version: 0x%04x\n"
+ " Sample Version: 0x%04x\n"
+ " IAP Version: 0x%04x\n"
+ " Max ABS X,Y: %d,%d\n"
+ " Width X,Y: %d,%d\n"
+ " Resolution X,Y: %d,%d (dots/mm)\n",
+ data->product_id,
+ data->fw_version,
+ data->sm_version,
+ data->iap_version,
+ data->max_x, data->max_y,
+ data->width_x, data->width_y,
+ data->x_res, data->y_res);
+
+ /* Set up input device properties based on queried parameters. */
+ error = elan_setup_input_device(data);
+ if (error)
+ return error;
+
+ /*
+ * Systems using device tree should set up interrupt via DTS,
+ * the rest will use the default falling edge interrupts.
+ */
+ irqflags = client->dev.of_node ? 0 : IRQF_TRIGGER_FALLING;
+
+ error = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, elan_isr,
+ irqflags | IRQF_ONESHOT,
+ client->name, data);
+ if (error) {
+ dev_err(&client->dev, "cannot register irq=%d\n", client->irq);
+ return error;
+ }
+
+ error = sysfs_create_groups(&client->dev.kobj, elan_sysfs_groups);
+ if (error) {
+ dev_err(&client->dev, "failed to create sysfs attributes: %d\n",
+ error);
+ return error;
+ }
+
+ error = devm_add_action(&client->dev,
+ elan_remove_sysfs_groups, data);
+ if (error) {
+ elan_remove_sysfs_groups(data);
+ dev_err(&client->dev,
+ "Failed to add sysfs cleanup action: %d\n",
+ error);
+ return error;
+ }
+
+ error = input_register_device(data->input);
+ if (error) {
+ dev_err(&client->dev, "failed to register input device: %d\n",
+ error);
+ return error;
+ }
+
+ /*
+ * Systems using device tree should set up wakeup via DTS,
+ * the rest will configure device as wakeup source by default.
+ */
+ if (!client->dev.of_node)
+ device_init_wakeup(&client->dev, true);
+
+ return 0;
+}
+
+static int __maybe_unused elan_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct elan_tp_data *data = i2c_get_clientdata(client);
+ int ret;
+
+ /*
+ * We are taking the mutex to make sure sysfs operations are
+ * complete before we attempt to bring the device into low[er]
+ * power mode.
+ */
+ ret = mutex_lock_interruptible(&data->sysfs_mutex);
+ if (ret)
+ return ret;
+
+ disable_irq(client->irq);
+
+ if (device_may_wakeup(dev)) {
+ ret = elan_sleep(data);
+ /* Enable wake from IRQ */
+ data->irq_wake = (enable_irq_wake(client->irq) == 0);
+ } else {
+ ret = elan_disable_power(data);
+ }
+
+ mutex_unlock(&data->sysfs_mutex);
+ return ret;
+}
+
+static int __maybe_unused elan_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct elan_tp_data *data = i2c_get_clientdata(client);
+ int error;
+
+ if (device_may_wakeup(dev) && data->irq_wake) {
+ disable_irq_wake(client->irq);
+ data->irq_wake = false;
+ }
+
+ error = elan_enable_power(data);
+ if (error)
+ dev_err(dev, "power up when resuming failed: %d\n", error);
+
+ error = elan_initialize(data);
+ if (error)
+ dev_err(dev, "initialize when resuming failed: %d\n", error);
+
+ enable_irq(data->client->irq);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(elan_pm_ops, elan_suspend, elan_resume);
+
+static const struct i2c_device_id elan_id[] = {
+ { DRIVER_NAME, 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, elan_id);
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id elan_acpi_id[] = {
+ { "ELAN0000", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, elan_acpi_id);
+#endif
+
+#ifdef CONFIG_OF
+static const struct of_device_id elan_of_match[] = {
+ { .compatible = "elan,ekth3000" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, elan_of_match);
+#endif
+
+static struct i2c_driver elan_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .pm = &elan_pm_ops,
+ .acpi_match_table = ACPI_PTR(elan_acpi_id),
+ .of_match_table = of_match_ptr(elan_of_match),
+ },
+ .probe = elan_probe,
+ .id_table = elan_id,
+};
+
+module_i2c_driver(elan_driver);
+
+MODULE_AUTHOR("Duson Lin <dusonlin@emc.com.tw>");
+MODULE_DESCRIPTION("Elan I2C/SMBus Touchpad driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(ELAN_DRIVER_VERSION);
diff --git a/drivers/input/mouse/elan_i2c_i2c.c b/drivers/input/mouse/elan_i2c_i2c.c
new file mode 100644
index 00000000000..97d4937fc24
--- /dev/null
+++ b/drivers/input/mouse/elan_i2c_i2c.c
@@ -0,0 +1,611 @@
+/*
+ * Elan I2C/SMBus Touchpad driver - I2C interface
+ *
+ * Copyright (c) 2013 ELAN Microelectronics Corp.
+ *
+ * Author: 林政維 (Duson Lin) <dusonlin@emc.com.tw>
+ * Version: 1.5.5
+ *
+ * Based on cyapa driver:
+ * copyright (c) 2011-2012 Cypress Semiconductor, Inc.
+ * copyright (c) 2011-2012 Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Trademarks are the property of their respective owners.
+ */
+
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <asm/unaligned.h>
+
+#include "elan_i2c.h"
+
+/* Elan i2c commands */
+#define ETP_I2C_RESET 0x0100
+#define ETP_I2C_WAKE_UP 0x0800
+#define ETP_I2C_SLEEP 0x0801
+#define ETP_I2C_DESC_CMD 0x0001
+#define ETP_I2C_REPORT_DESC_CMD 0x0002
+#define ETP_I2C_STAND_CMD 0x0005
+#define ETP_I2C_UNIQUEID_CMD 0x0101
+#define ETP_I2C_FW_VERSION_CMD 0x0102
+#define ETP_I2C_SM_VERSION_CMD 0x0103
+#define ETP_I2C_XY_TRACENUM_CMD 0x0105
+#define ETP_I2C_MAX_X_AXIS_CMD 0x0106
+#define ETP_I2C_MAX_Y_AXIS_CMD 0x0107
+#define ETP_I2C_RESOLUTION_CMD 0x0108
+#define ETP_I2C_IAP_VERSION_CMD 0x0110
+#define ETP_I2C_SET_CMD 0x0300
+#define ETP_I2C_POWER_CMD 0x0307
+#define ETP_I2C_FW_CHECKSUM_CMD 0x030F
+#define ETP_I2C_IAP_CTRL_CMD 0x0310
+#define ETP_I2C_IAP_CMD 0x0311
+#define ETP_I2C_IAP_RESET_CMD 0x0314
+#define ETP_I2C_IAP_CHECKSUM_CMD 0x0315
+#define ETP_I2C_CALIBRATE_CMD 0x0316
+#define ETP_I2C_MAX_BASELINE_CMD 0x0317
+#define ETP_I2C_MIN_BASELINE_CMD 0x0318
+
+#define ETP_I2C_REPORT_LEN 34
+#define ETP_I2C_DESC_LENGTH 30
+#define ETP_I2C_REPORT_DESC_LENGTH 158
+#define ETP_I2C_INF_LENGTH 2
+#define ETP_I2C_IAP_PASSWORD 0x1EA5
+#define ETP_I2C_IAP_RESET 0xF0F0
+#define ETP_I2C_MAIN_MODE_ON (1 << 9)
+#define ETP_I2C_IAP_REG_L 0x01
+#define ETP_I2C_IAP_REG_H 0x06
+
+static int elan_i2c_read_block(struct i2c_client *client,
+ u16 reg, u8 *val, u16 len)
+{
+ __le16 buf[] = {
+ cpu_to_le16(reg),
+ };
+ struct i2c_msg msgs[] = {
+ {
+ .addr = client->addr,
+ .flags = client->flags & I2C_M_TEN,
+ .len = sizeof(buf),
+ .buf = (u8 *)buf,
+ },
+ {
+ .addr = client->addr,
+ .flags = (client->flags & I2C_M_TEN) | I2C_M_RD,
+ .len = len,
+ .buf = val,
+ }
+ };
+ int ret;
+
+ ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ return ret == ARRAY_SIZE(msgs) ? 0 : (ret < 0 ? ret : -EIO);
+}
+
+static int elan_i2c_read_cmd(struct i2c_client *client, u16 reg, u8 *val)
+{
+ int retval;
+
+ retval = elan_i2c_read_block(client, reg, val, ETP_I2C_INF_LENGTH);
+ if (retval < 0) {
+ dev_err(&client->dev, "reading cmd (0x%04x) fail.\n", reg);
+ return retval;
+ }
+
+ return 0;
+}
+
+static int elan_i2c_write_cmd(struct i2c_client *client, u16 reg, u16 cmd)
+{
+ __le16 buf[] = {
+ cpu_to_le16(reg),
+ cpu_to_le16(cmd),
+ };
+ struct i2c_msg msg = {
+ .addr = client->addr,
+ .flags = client->flags & I2C_M_TEN,
+ .len = sizeof(buf),
+ .buf = (u8 *)buf,
+ };
+ int ret;
+
+ ret = i2c_transfer(client->adapter, &msg, 1);
+ return ret == 1 ? 0 : (ret < 0 ? ret : -EIO);
+}
+
+static int elan_i2c_initialize(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ int error;
+ u8 val[256];
+
+ error = elan_i2c_write_cmd(client, ETP_I2C_STAND_CMD, ETP_I2C_RESET);
+ if (error) {
+ dev_err(dev, "device reset failed: %d\n", error);
+ return error;
+ }
+
+ /* Wait for the device to reset */
+ msleep(100);
+
+ /* get reset acknowledgement 0000 */
+ error = i2c_master_recv(client, val, ETP_I2C_INF_LENGTH);
+ if (error < 0) {
+ dev_err(dev, "failed to read reset response: %d\n", error);
+ return error;
+ }
+
+ error = elan_i2c_read_block(client, ETP_I2C_DESC_CMD,
+ val, ETP_I2C_DESC_LENGTH);
+ if (error) {
+ dev_err(dev, "cannot get device descriptor: %d\n", error);
+ return error;
+ }
+
+ error = elan_i2c_read_block(client, ETP_I2C_REPORT_DESC_CMD,
+ val, ETP_I2C_REPORT_DESC_LENGTH);
+ if (error) {
+ dev_err(dev, "fetching report descriptor failed.: %d\n", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int elan_i2c_sleep_control(struct i2c_client *client, bool sleep)
+{
+ return elan_i2c_write_cmd(client, ETP_I2C_STAND_CMD,
+ sleep ? ETP_I2C_SLEEP : ETP_I2C_WAKE_UP);
+}
+
+static int elan_i2c_power_control(struct i2c_client *client, bool enable)
+{
+ u8 val[2];
+ u16 reg;
+ int error;
+
+ error = elan_i2c_read_cmd(client, ETP_I2C_POWER_CMD, val);
+ if (error) {
+ dev_err(&client->dev,
+ "failed to read current power state: %d\n",
+ error);
+ return error;
+ }
+
+ reg = le16_to_cpup((__le16 *)val);
+ if (enable)
+ reg &= ~ETP_DISABLE_POWER;
+ else
+ reg |= ETP_DISABLE_POWER;
+
+ error = elan_i2c_write_cmd(client, ETP_I2C_POWER_CMD, reg);
+ if (error) {
+ dev_err(&client->dev,
+ "failed to write current power state: %d\n",
+ error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int elan_i2c_set_mode(struct i2c_client *client, u8 mode)
+{
+ return elan_i2c_write_cmd(client, ETP_I2C_SET_CMD, mode);
+}
+
+
+static int elan_i2c_calibrate(struct i2c_client *client)
+{
+ return elan_i2c_write_cmd(client, ETP_I2C_CALIBRATE_CMD, 1);
+}
+
+static int elan_i2c_calibrate_result(struct i2c_client *client, u8 *val)
+{
+ return elan_i2c_read_block(client, ETP_I2C_CALIBRATE_CMD, val, 1);
+}
+
+static int elan_i2c_get_baseline_data(struct i2c_client *client,
+ bool max_baseline, u8 *value)
+{
+ int error;
+ u8 val[3];
+
+ error = elan_i2c_read_cmd(client,
+ max_baseline ? ETP_I2C_MAX_BASELINE_CMD :
+ ETP_I2C_MIN_BASELINE_CMD,
+ val);
+ if (error)
+ return error;
+
+ *value = le16_to_cpup((__le16 *)val);
+
+ return 0;
+}
+
+static int elan_i2c_get_version(struct i2c_client *client,
+ bool iap, u8 *version)
+{
+ int error;
+ u8 val[3];
+
+ error = elan_i2c_read_cmd(client,
+ iap ? ETP_I2C_IAP_VERSION_CMD :
+ ETP_I2C_FW_VERSION_CMD,
+ val);
+ if (error) {
+ dev_err(&client->dev, "failed to get %s version: %d\n",
+ iap ? "IAP" : "FW", error);
+ return error;
+ }
+
+ *version = val[0];
+ return 0;
+}
+
+static int elan_i2c_get_sm_version(struct i2c_client *client, u8 *version)
+{
+ int error;
+ u8 val[3];
+
+ error = elan_i2c_read_cmd(client, ETP_I2C_SM_VERSION_CMD, val);
+ if (error) {
+ dev_err(&client->dev, "failed to get SM version: %d\n", error);
+ return error;
+ }
+
+ *version = val[0];
+ return 0;
+}
+
+static int elan_i2c_get_product_id(struct i2c_client *client, u8 *id)
+{
+ int error;
+ u8 val[3];
+
+ error = elan_i2c_read_cmd(client, ETP_I2C_UNIQUEID_CMD, val);
+ if (error) {
+ dev_err(&client->dev, "failed to get product ID: %d\n", error);
+ return error;
+ }
+
+ *id = val[0];
+ return 0;
+}
+
+static int elan_i2c_get_checksum(struct i2c_client *client,
+ bool iap, u16 *csum)
+{
+ int error;
+ u8 val[3];
+
+ error = elan_i2c_read_cmd(client,
+ iap ? ETP_I2C_IAP_CHECKSUM_CMD :
+ ETP_I2C_FW_CHECKSUM_CMD,
+ val);
+ if (error) {
+ dev_err(&client->dev, "failed to get %s checksum: %d\n",
+ iap ? "IAP" : "FW", error);
+ return error;
+ }
+
+ *csum = le16_to_cpup((__le16 *)val);
+ return 0;
+}
+
+static int elan_i2c_get_max(struct i2c_client *client,
+ unsigned int *max_x, unsigned int *max_y)
+{
+ int error;
+ u8 val[3];
+
+ error = elan_i2c_read_cmd(client, ETP_I2C_MAX_X_AXIS_CMD, val);
+ if (error) {
+ dev_err(&client->dev, "failed to get X dimension: %d\n", error);
+ return error;
+ }
+
+ *max_x = le16_to_cpup((__le16 *)val) & 0x0fff;
+
+ error = elan_i2c_read_cmd(client, ETP_I2C_MAX_Y_AXIS_CMD, val);
+ if (error) {
+ dev_err(&client->dev, "failed to get Y dimension: %d\n", error);
+ return error;
+ }
+
+ *max_y = le16_to_cpup((__le16 *)val) & 0x0fff;
+
+ return 0;
+}
+
+static int elan_i2c_get_resolution(struct i2c_client *client,
+ u8 *hw_res_x, u8 *hw_res_y)
+{
+ int error;
+ u8 val[3];
+
+ error = elan_i2c_read_cmd(client, ETP_I2C_RESOLUTION_CMD, val);
+ if (error) {
+ dev_err(&client->dev, "failed to get resolution: %d\n", error);
+ return error;
+ }
+
+ *hw_res_x = val[0];
+ *hw_res_y = val[1];
+
+ return 0;
+}
+
+static int elan_i2c_get_num_traces(struct i2c_client *client,
+ unsigned int *x_traces,
+ unsigned int *y_traces)
+{
+ int error;
+ u8 val[3];
+
+ error = elan_i2c_read_cmd(client, ETP_I2C_XY_TRACENUM_CMD, val);
+ if (error) {
+ dev_err(&client->dev, "failed to get trace info: %d\n", error);
+ return error;
+ }
+
+ *x_traces = val[0] - 1;
+ *y_traces = val[1] - 1;
+
+ return 0;
+}
+
+static int elan_i2c_iap_get_mode(struct i2c_client *client, enum tp_mode *mode)
+{
+ int error;
+ u16 constant;
+ u8 val[3];
+
+ error = elan_i2c_read_cmd(client, ETP_I2C_IAP_CTRL_CMD, val);
+ if (error) {
+ dev_err(&client->dev,
+ "failed to read iap control register: %d\n",
+ error);
+ return error;
+ }
+
+ constant = le16_to_cpup((__le16 *)val);
+ dev_dbg(&client->dev, "iap control reg: 0x%04x.\n", constant);
+
+ *mode = (constant & ETP_I2C_MAIN_MODE_ON) ? MAIN_MODE : IAP_MODE;
+
+ return 0;
+}
+
+static int elan_i2c_iap_reset(struct i2c_client *client)
+{
+ int error;
+
+ error = elan_i2c_write_cmd(client, ETP_I2C_IAP_RESET_CMD,
+ ETP_I2C_IAP_RESET);
+ if (error) {
+ dev_err(&client->dev, "cannot reset IC: %d\n", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int elan_i2c_set_flash_key(struct i2c_client *client)
+{
+ int error;
+
+ error = elan_i2c_write_cmd(client, ETP_I2C_IAP_CMD,
+ ETP_I2C_IAP_PASSWORD);
+ if (error) {
+ dev_err(&client->dev, "cannot set flash key: %d\n", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int elan_i2c_prepare_fw_update(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ int error;
+ enum tp_mode mode;
+ u8 val[3];
+ u16 password;
+
+ /* Get FW in which mode (IAP_MODE/MAIN_MODE) */
+ error = elan_i2c_iap_get_mode(client, &mode);
+ if (error)
+ return error;
+
+ if (mode == IAP_MODE) {
+ /* Reset IC */
+ error = elan_i2c_iap_reset(client);
+ if (error)
+ return error;
+
+ msleep(30);
+ }
+
+ /* Set flash key*/
+ error = elan_i2c_set_flash_key(client);
+ if (error)
+ return error;
+
+ /* Wait for F/W IAP initialization */
+ msleep(mode == MAIN_MODE ? 100 : 30);
+
+ /* Check if we are in IAP mode or not */
+ error = elan_i2c_iap_get_mode(client, &mode);
+ if (error)
+ return error;
+
+ if (mode == MAIN_MODE) {
+ dev_err(dev, "wrong mode: %d\n", mode);
+ return -EIO;
+ }
+
+ /* Set flash key again */
+ error = elan_i2c_set_flash_key(client);
+ if (error)
+ return error;
+
+ /* Wait for F/W IAP initialization */
+ msleep(30);
+
+ /* read back to check we actually enabled successfully. */
+ error = elan_i2c_read_cmd(client, ETP_I2C_IAP_CMD, val);
+ if (error) {
+ dev_err(dev, "cannot read iap password: %d\n",
+ error);
+ return error;
+ }
+
+ password = le16_to_cpup((__le16 *)val);
+ if (password != ETP_I2C_IAP_PASSWORD) {
+ dev_err(dev, "wrong iap password: 0x%X\n", password);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int elan_i2c_write_fw_block(struct i2c_client *client,
+ const u8 *page, u16 checksum, int idx)
+{
+ struct device *dev = &client->dev;
+ u8 page_store[ETP_FW_PAGE_SIZE + 4];
+ u8 val[3];
+ u16 result;
+ int ret, error;
+
+ page_store[0] = ETP_I2C_IAP_REG_L;
+ page_store[1] = ETP_I2C_IAP_REG_H;
+ memcpy(&page_store[2], page, ETP_FW_PAGE_SIZE);
+ /* recode checksum at last two bytes */
+ put_unaligned_le16(checksum, &page_store[ETP_FW_PAGE_SIZE + 2]);
+
+ ret = i2c_master_send(client, page_store, sizeof(page_store));
+ if (ret != sizeof(page_store)) {
+ error = ret < 0 ? ret : -EIO;
+ dev_err(dev, "Failed to write page %d: %d\n", idx, error);
+ return error;
+ }
+
+ /* Wait for F/W to update one page ROM data. */
+ msleep(20);
+
+ error = elan_i2c_read_cmd(client, ETP_I2C_IAP_CTRL_CMD, val);
+ if (error) {
+ dev_err(dev, "Failed to read IAP write result: %d\n", error);
+ return error;
+ }
+
+ result = le16_to_cpup((__le16 *)val);
+ if (result & (ETP_FW_IAP_PAGE_ERR | ETP_FW_IAP_INTF_ERR)) {
+ dev_err(dev, "IAP reports failed write: %04hx\n",
+ result);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int elan_i2c_finish_fw_update(struct i2c_client *client,
+ struct completion *completion)
+{
+ struct device *dev = &client->dev;
+ long ret;
+ int error;
+ int len;
+ u8 buffer[ETP_I2C_INF_LENGTH];
+
+ reinit_completion(completion);
+ enable_irq(client->irq);
+
+ error = elan_i2c_write_cmd(client, ETP_I2C_STAND_CMD, ETP_I2C_RESET);
+ if (!error)
+ ret = wait_for_completion_interruptible_timeout(completion,
+ msecs_to_jiffies(300));
+ disable_irq(client->irq);
+
+ if (error) {
+ dev_err(dev, "device reset failed: %d\n", error);
+ return error;
+ } else if (ret == 0) {
+ dev_err(dev, "timeout waiting for device reset\n");
+ return -ETIMEDOUT;
+ } else if (ret < 0) {
+ error = ret;
+ dev_err(dev, "error waiting for device reset: %d\n", error);
+ return error;
+ }
+
+ len = i2c_master_recv(client, buffer, ETP_I2C_INF_LENGTH);
+ if (len != ETP_I2C_INF_LENGTH) {
+ error = len < 0 ? len : -EIO;
+ dev_err(dev, "failed to read INT signal: %d (%d)\n",
+ error, len);
+ return error;
+ }
+
+ return 0;
+}
+
+static int elan_i2c_get_report(struct i2c_client *client, u8 *report)
+{
+ int len;
+
+ len = i2c_master_recv(client, report, ETP_I2C_REPORT_LEN);
+ if (len < 0) {
+ dev_err(&client->dev, "failed to read report data: %d\n", len);
+ return len;
+ }
+
+ if (len != ETP_I2C_REPORT_LEN) {
+ dev_err(&client->dev,
+ "wrong report length (%d vs %d expected)\n",
+ len, ETP_I2C_REPORT_LEN);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+const struct elan_transport_ops elan_i2c_ops = {
+ .initialize = elan_i2c_initialize,
+ .sleep_control = elan_i2c_sleep_control,
+ .power_control = elan_i2c_power_control,
+ .set_mode = elan_i2c_set_mode,
+
+ .calibrate = elan_i2c_calibrate,
+ .calibrate_result = elan_i2c_calibrate_result,
+
+ .get_baseline_data = elan_i2c_get_baseline_data,
+
+ .get_version = elan_i2c_get_version,
+ .get_sm_version = elan_i2c_get_sm_version,
+ .get_product_id = elan_i2c_get_product_id,
+ .get_checksum = elan_i2c_get_checksum,
+
+ .get_max = elan_i2c_get_max,
+ .get_resolution = elan_i2c_get_resolution,
+ .get_num_traces = elan_i2c_get_num_traces,
+
+ .iap_get_mode = elan_i2c_iap_get_mode,
+ .iap_reset = elan_i2c_iap_reset,
+
+ .prepare_fw_update = elan_i2c_prepare_fw_update,
+ .write_fw_block = elan_i2c_write_fw_block,
+ .finish_fw_update = elan_i2c_finish_fw_update,
+
+ .get_report = elan_i2c_get_report,
+};
diff --git a/drivers/input/mouse/elan_i2c_smbus.c b/drivers/input/mouse/elan_i2c_smbus.c
new file mode 100644
index 00000000000..359bf8583d5
--- /dev/null
+++ b/drivers/input/mouse/elan_i2c_smbus.c
@@ -0,0 +1,514 @@
+/*
+ * Elan I2C/SMBus Touchpad driver - SMBus interface
+ *
+ * Copyright (c) 2013 ELAN Microelectronics Corp.
+ *
+ * Author: 林政維 (Duson Lin) <dusonlin@emc.com.tw>
+ * Version: 1.5.5
+ *
+ * Based on cyapa driver:
+ * copyright (c) 2011-2012 Cypress Semiconductor, Inc.
+ * copyright (c) 2011-2012 Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Trademarks are the property of their respective owners.
+ */
+
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+
+#include "elan_i2c.h"
+
+/* Elan SMbus commands */
+#define ETP_SMBUS_IAP_CMD 0x00
+#define ETP_SMBUS_ENABLE_TP 0x20
+#define ETP_SMBUS_SLEEP_CMD 0x21
+#define ETP_SMBUS_IAP_PASSWORD_WRITE 0x29
+#define ETP_SMBUS_IAP_PASSWORD_READ 0x80
+#define ETP_SMBUS_WRITE_FW_BLOCK 0x2A
+#define ETP_SMBUS_IAP_RESET_CMD 0x2B
+#define ETP_SMBUS_RANGE_CMD 0xA0
+#define ETP_SMBUS_FW_VERSION_CMD 0xA1
+#define ETP_SMBUS_XY_TRACENUM_CMD 0xA2
+#define ETP_SMBUS_SM_VERSION_CMD 0xA3
+#define ETP_SMBUS_UNIQUEID_CMD 0xA3
+#define ETP_SMBUS_RESOLUTION_CMD 0xA4
+#define ETP_SMBUS_HELLOPACKET_CMD 0xA7
+#define ETP_SMBUS_PACKET_QUERY 0xA8
+#define ETP_SMBUS_IAP_VERSION_CMD 0xAC
+#define ETP_SMBUS_IAP_CTRL_CMD 0xAD
+#define ETP_SMBUS_IAP_CHECKSUM_CMD 0xAE
+#define ETP_SMBUS_FW_CHECKSUM_CMD 0xAF
+#define ETP_SMBUS_MAX_BASELINE_CMD 0xC3
+#define ETP_SMBUS_MIN_BASELINE_CMD 0xC4
+#define ETP_SMBUS_CALIBRATE_QUERY 0xC5
+
+#define ETP_SMBUS_REPORT_LEN 32
+#define ETP_SMBUS_REPORT_OFFSET 2
+#define ETP_SMBUS_HELLOPACKET_LEN 5
+#define ETP_SMBUS_IAP_PASSWORD 0x1234
+#define ETP_SMBUS_IAP_MODE_ON (1 << 6)
+
+static int elan_smbus_initialize(struct i2c_client *client)
+{
+ u8 check[ETP_SMBUS_HELLOPACKET_LEN] = { 0x55, 0x55, 0x55, 0x55, 0x55 };
+ u8 values[ETP_SMBUS_HELLOPACKET_LEN] = { 0, 0, 0, 0, 0 };
+ int len, error;
+
+ /* Get hello packet */
+ len = i2c_smbus_read_block_data(client,
+ ETP_SMBUS_HELLOPACKET_CMD, values);
+ if (len != ETP_SMBUS_HELLOPACKET_LEN) {
+ dev_err(&client->dev, "hello packet length fail: %d\n", len);
+ error = len < 0 ? len : -EIO;
+ return error;
+ }
+
+ /* compare hello packet */
+ if (memcmp(values, check, ETP_SMBUS_HELLOPACKET_LEN)) {
+ dev_err(&client->dev, "hello packet fail [%*px]\n",
+ ETP_SMBUS_HELLOPACKET_LEN, values);
+ return -ENXIO;
+ }
+
+ /* enable tp */
+ error = i2c_smbus_write_byte(client, ETP_SMBUS_ENABLE_TP);
+ if (error) {
+ dev_err(&client->dev, "failed to enable touchpad: %d\n", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int elan_smbus_set_mode(struct i2c_client *client, u8 mode)
+{
+ u8 cmd[4] = { 0x00, 0x07, 0x00, mode };
+
+ return i2c_smbus_write_block_data(client, ETP_SMBUS_IAP_CMD,
+ sizeof(cmd), cmd);
+}
+
+static int elan_smbus_sleep_control(struct i2c_client *client, bool sleep)
+{
+ if (sleep)
+ return i2c_smbus_write_byte(client, ETP_SMBUS_SLEEP_CMD);
+ else
+ return 0; /* XXX should we send ETP_SMBUS_ENABLE_TP here? */
+}
+
+static int elan_smbus_power_control(struct i2c_client *client, bool enable)
+{
+ return 0; /* A no-op */
+}
+
+static int elan_smbus_calibrate(struct i2c_client *client)
+{
+ u8 cmd[4] = { 0x00, 0x08, 0x00, 0x01 };
+
+ return i2c_smbus_write_block_data(client, ETP_SMBUS_IAP_CMD,
+ sizeof(cmd), cmd);
+}
+
+static int elan_smbus_calibrate_result(struct i2c_client *client, u8 *val)
+{
+ int error;
+
+ error = i2c_smbus_read_block_data(client,
+ ETP_SMBUS_CALIBRATE_QUERY, val);
+ if (error < 0)
+ return error;
+
+ return 0;
+}
+
+static int elan_smbus_get_baseline_data(struct i2c_client *client,
+ bool max_baseline, u8 *value)
+{
+ int error;
+ u8 val[3];
+
+ error = i2c_smbus_read_block_data(client,
+ max_baseline ?
+ ETP_SMBUS_MAX_BASELINE_CMD :
+ ETP_SMBUS_MIN_BASELINE_CMD,
+ val);
+ if (error < 0)
+ return error;
+
+ *value = be16_to_cpup((__be16 *)val);
+
+ return 0;
+}
+
+static int elan_smbus_get_version(struct i2c_client *client,
+ bool iap, u8 *version)
+{
+ int error;
+ u8 val[3];
+
+ error = i2c_smbus_read_block_data(client,
+ iap ? ETP_SMBUS_IAP_VERSION_CMD :
+ ETP_SMBUS_FW_VERSION_CMD,
+ val);
+ if (error < 0) {
+ dev_err(&client->dev, "failed to get %s version: %d\n",
+ iap ? "IAP" : "FW", error);
+ return error;
+ }
+
+ *version = val[2];
+ return 0;
+}
+
+static int elan_smbus_get_sm_version(struct i2c_client *client, u8 *version)
+{
+ int error;
+ u8 val[3];
+
+ error = i2c_smbus_read_block_data(client,
+ ETP_SMBUS_SM_VERSION_CMD, val);
+ if (error < 0) {
+ dev_err(&client->dev, "failed to get SM version: %d\n", error);
+ return error;
+ }
+
+ *version = val[0]; /* XXX Why 0 and not 2 as in IAP/FW versions? */
+ return 0;
+}
+
+static int elan_smbus_get_product_id(struct i2c_client *client, u8 *id)
+{
+ int error;
+ u8 val[3];
+
+ error = i2c_smbus_read_block_data(client,
+ ETP_SMBUS_UNIQUEID_CMD, val);
+ if (error < 0) {
+ dev_err(&client->dev, "failed to get product ID: %d\n", error);
+ return error;
+ }
+
+ *id = val[1];
+ return 0;
+}
+
+static int elan_smbus_get_checksum(struct i2c_client *client,
+ bool iap, u16 *csum)
+{
+ int error;
+ u8 val[3];
+
+ error = i2c_smbus_read_block_data(client,
+ iap ? ETP_SMBUS_FW_CHECKSUM_CMD :
+ ETP_SMBUS_IAP_CHECKSUM_CMD,
+ val);
+ if (error < 0) {
+ dev_err(&client->dev, "failed to get %s checksum: %d\n",
+ iap ? "IAP" : "FW", error);
+ return error;
+ }
+
+ *csum = be16_to_cpup((__be16 *)val);
+ return 0;
+}
+
+static int elan_smbus_get_max(struct i2c_client *client,
+ unsigned int *max_x, unsigned int *max_y)
+{
+ int error;
+ u8 val[3];
+
+ error = i2c_smbus_read_block_data(client, ETP_SMBUS_RANGE_CMD, val);
+ if (error) {
+ dev_err(&client->dev, "failed to get dimensions: %d\n", error);
+ return error;
+ }
+
+ *max_x = (0x0f & val[0]) << 8 | val[1];
+ *max_y = (0xf0 & val[0]) << 4 | val[2];
+
+ return 0;
+}
+
+static int elan_smbus_get_resolution(struct i2c_client *client,
+ u8 *hw_res_x, u8 *hw_res_y)
+{
+ int error;
+ u8 val[3];
+
+ error = i2c_smbus_read_block_data(client,
+ ETP_SMBUS_RESOLUTION_CMD, val);
+ if (error) {
+ dev_err(&client->dev, "failed to get resolution: %d\n", error);
+ return error;
+ }
+
+ *hw_res_x = val[1] & 0x0F;
+ *hw_res_y = (val[1] & 0xF0) >> 4;
+
+ return 0;
+}
+
+static int elan_smbus_get_num_traces(struct i2c_client *client,
+ unsigned int *x_traces,
+ unsigned int *y_traces)
+{
+ int error;
+ u8 val[3];
+
+ error = i2c_smbus_read_block_data(client,
+ ETP_SMBUS_XY_TRACENUM_CMD, val);
+ if (error) {
+ dev_err(&client->dev, "failed to get trace info: %d\n", error);
+ return error;
+ }
+
+ *x_traces = val[1] - 1;
+ *y_traces = val[2] - 1;
+
+ return 0;
+}
+
+static int elan_smbus_iap_get_mode(struct i2c_client *client,
+ enum tp_mode *mode)
+{
+ int error;
+ u16 constant;
+ u8 val[3];
+
+ error = i2c_smbus_read_block_data(client, ETP_SMBUS_IAP_CTRL_CMD, val);
+ if (error < 0) {
+ dev_err(&client->dev, "failed to read iap ctrol register: %d\n",
+ error);
+ return error;
+ }
+
+ constant = be16_to_cpup((__be16 *)val);
+ dev_dbg(&client->dev, "iap control reg: 0x%04x.\n", constant);
+
+ *mode = (constant & ETP_SMBUS_IAP_MODE_ON) ? IAP_MODE : MAIN_MODE;
+
+ return 0;
+}
+
+static int elan_smbus_iap_reset(struct i2c_client *client)
+{
+ int error;
+
+ error = i2c_smbus_write_byte(client, ETP_SMBUS_IAP_RESET_CMD);
+ if (error) {
+ dev_err(&client->dev, "cannot reset IC: %d\n", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int elan_smbus_set_flash_key(struct i2c_client *client)
+{
+ int error;
+ u8 cmd[4] = { 0x00, 0x0B, 0x00, 0x5A };
+
+ error = i2c_smbus_write_block_data(client, ETP_SMBUS_IAP_CMD,
+ sizeof(cmd), cmd);
+ if (error) {
+ dev_err(&client->dev, "cannot set flash key: %d\n", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int elan_smbus_prepare_fw_update(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ int len;
+ int error;
+ enum tp_mode mode;
+ u8 val[3];
+ u8 cmd[4] = {0x0F, 0x78, 0x00, 0x06};
+ u16 password;
+
+ /* Get FW in which mode (IAP_MODE/MAIN_MODE) */
+ error = elan_smbus_iap_get_mode(client, &mode);
+ if (error)
+ return error;
+
+ if (mode == MAIN_MODE) {
+
+ /* set flash key */
+ error = elan_smbus_set_flash_key(client);
+ if (error)
+ return error;
+
+ /* write iap password */
+ if (i2c_smbus_write_byte(client,
+ ETP_SMBUS_IAP_PASSWORD_WRITE) < 0) {
+ dev_err(dev, "cannot write iap password\n");
+ return -EIO;
+ }
+
+ error = i2c_smbus_write_block_data(client, ETP_SMBUS_IAP_CMD,
+ sizeof(cmd), cmd);
+ if (error) {
+ dev_err(dev, "failed to write iap password: %d\n",
+ error);
+ return error;
+ }
+
+ /*
+ * Read back password to make sure we enabled flash
+ * successfully.
+ */
+ len = i2c_smbus_read_block_data(client,
+ ETP_SMBUS_IAP_PASSWORD_READ,
+ val);
+ if (len < sizeof(u16)) {
+ error = len < 0 ? len : -EIO;
+ dev_err(dev, "failed to read iap password: %d\n",
+ error);
+ return error;
+ }
+
+ password = be16_to_cpup((__be16 *)val);
+ if (password != ETP_SMBUS_IAP_PASSWORD) {
+ dev_err(dev, "wrong iap password = 0x%X\n", password);
+ return -EIO;
+ }
+
+ /* Wait 30ms for MAIN_MODE change to IAP_MODE */
+ msleep(30);
+ }
+
+ error = elan_smbus_set_flash_key(client);
+ if (error)
+ return error;
+
+ /* Reset IC */
+ error = elan_smbus_iap_reset(client);
+ if (error)
+ return error;
+
+ return 0;
+}
+
+
+static int elan_smbus_write_fw_block(struct i2c_client *client,
+ const u8 *page, u16 checksum, int idx)
+{
+ struct device *dev = &client->dev;
+ int error;
+ u16 result;
+ u8 val[3];
+
+ /*
+ * Due to the limitation of smbus protocol limiting
+ * transfer to 32 bytes at a time, we must split block
+ * in 2 transfers.
+ */
+ error = i2c_smbus_write_block_data(client,
+ ETP_SMBUS_WRITE_FW_BLOCK,
+ ETP_FW_PAGE_SIZE / 2,
+ page);
+ if (error) {
+ dev_err(dev, "Failed to write page %d (part %d): %d\n",
+ idx, 1, error);
+ return error;
+ }
+
+ error = i2c_smbus_write_block_data(client,
+ ETP_SMBUS_WRITE_FW_BLOCK,
+ ETP_FW_PAGE_SIZE / 2,
+ page + ETP_FW_PAGE_SIZE / 2);
+ if (error) {
+ dev_err(dev, "Failed to write page %d (part %d): %d\n",
+ idx, 2, error);
+ return error;
+ }
+
+
+ /* Wait for F/W to update one page ROM data. */
+ usleep_range(8000, 10000);
+
+ error = i2c_smbus_read_block_data(client,
+ ETP_SMBUS_IAP_CTRL_CMD, val);
+ if (error < 0) {
+ dev_err(dev, "Failed to read IAP write result: %d\n",
+ error);
+ return error;
+ }
+
+ result = be16_to_cpup((__be16 *)val);
+ if (result & (ETP_FW_IAP_PAGE_ERR | ETP_FW_IAP_INTF_ERR)) {
+ dev_err(dev, "IAP reports failed write: %04hx\n",
+ result);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int elan_smbus_get_report(struct i2c_client *client, u8 *report)
+{
+ int len;
+
+ len = i2c_smbus_read_block_data(client,
+ ETP_SMBUS_PACKET_QUERY,
+ &report[ETP_SMBUS_REPORT_OFFSET]);
+ if (len < 0) {
+ dev_err(&client->dev, "failed to read report data: %d\n", len);
+ return len;
+ }
+
+ if (len != ETP_SMBUS_REPORT_LEN) {
+ dev_err(&client->dev,
+ "wrong report length (%d vs %d expected)\n",
+ len, ETP_SMBUS_REPORT_LEN);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int elan_smbus_finish_fw_update(struct i2c_client *client,
+ struct completion *fw_completion)
+{
+ /* No special handling unlike I2C transport */
+ return 0;
+}
+
+const struct elan_transport_ops elan_smbus_ops = {
+ .initialize = elan_smbus_initialize,
+ .sleep_control = elan_smbus_sleep_control,
+ .power_control = elan_smbus_power_control,
+ .set_mode = elan_smbus_set_mode,
+
+ .calibrate = elan_smbus_calibrate,
+ .calibrate_result = elan_smbus_calibrate_result,
+
+ .get_baseline_data = elan_smbus_get_baseline_data,
+
+ .get_version = elan_smbus_get_version,
+ .get_sm_version = elan_smbus_get_sm_version,
+ .get_product_id = elan_smbus_get_product_id,
+ .get_checksum = elan_smbus_get_checksum,
+
+ .get_max = elan_smbus_get_max,
+ .get_resolution = elan_smbus_get_resolution,
+ .get_num_traces = elan_smbus_get_num_traces,
+
+ .iap_get_mode = elan_smbus_iap_get_mode,
+ .iap_reset = elan_smbus_iap_reset,
+
+ .prepare_fw_update = elan_smbus_prepare_fw_update,
+ .write_fw_block = elan_smbus_write_fw_block,
+ .finish_fw_update = elan_smbus_finish_fw_update,
+
+ .get_report = elan_smbus_get_report,
+};
diff --git a/drivers/input/mouse/lifebook.h b/drivers/input/mouse/lifebook.h
index 4c4326c6f50..0baf02a70a9 100644
--- a/drivers/input/mouse/lifebook.h
+++ b/drivers/input/mouse/lifebook.h
@@ -16,14 +16,14 @@ void lifebook_module_init(void);
int lifebook_detect(struct psmouse *psmouse, bool set_properties);
int lifebook_init(struct psmouse *psmouse);
#else
-inline void lifebook_module_init(void)
+static inline void lifebook_module_init(void)
{
}
-inline int lifebook_detect(struct psmouse *psmouse, bool set_properties)
+static inline int lifebook_detect(struct psmouse *psmouse, bool set_properties)
{
return -ENOSYS;
}
-inline int lifebook_init(struct psmouse *psmouse)
+static inline int lifebook_init(struct psmouse *psmouse)
{
return -ENOSYS;
}
diff --git a/drivers/input/mouse/navpoint.c b/drivers/input/mouse/navpoint.c
index 2a0360f5b5f..d6e8f58a1de 100644
--- a/drivers/input/mouse/navpoint.c
+++ b/drivers/input/mouse/navpoint.c
@@ -318,8 +318,7 @@ static int navpoint_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int navpoint_suspend(struct device *dev)
+static int __maybe_unused navpoint_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct navpoint *navpoint = platform_get_drvdata(pdev);
@@ -333,7 +332,7 @@ static int navpoint_suspend(struct device *dev)
return 0;
}
-static int navpoint_resume(struct device *dev)
+static int __maybe_unused navpoint_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct navpoint *navpoint = platform_get_drvdata(pdev);
@@ -346,7 +345,6 @@ static int navpoint_resume(struct device *dev)
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(navpoint_pm_ops, navpoint_suspend, navpoint_resume);
diff --git a/drivers/input/mouse/synaptics_i2c.c b/drivers/input/mouse/synaptics_i2c.c
index ad822608f6e..878f18498f3 100644
--- a/drivers/input/mouse/synaptics_i2c.c
+++ b/drivers/input/mouse/synaptics_i2c.c
@@ -614,8 +614,7 @@ static int synaptics_i2c_remove(struct i2c_client *client)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int synaptics_i2c_suspend(struct device *dev)
+static int __maybe_unused synaptics_i2c_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct synaptics_i2c *touch = i2c_get_clientdata(client);
@@ -628,7 +627,7 @@ static int synaptics_i2c_suspend(struct device *dev)
return 0;
}
-static int synaptics_i2c_resume(struct device *dev)
+static int __maybe_unused synaptics_i2c_resume(struct device *dev)
{
int ret;
struct i2c_client *client = to_i2c_client(dev);
@@ -643,7 +642,6 @@ static int synaptics_i2c_resume(struct device *dev)
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(synaptics_i2c_pm, synaptics_i2c_suspend,
synaptics_i2c_resume);
diff --git a/drivers/input/serio/altera_ps2.c b/drivers/input/serio/altera_ps2.c
index 8921c96589b..131d7826dc6 100644
--- a/drivers/input/serio/altera_ps2.c
+++ b/drivers/input/serio/altera_ps2.c
@@ -24,9 +24,7 @@
struct ps2if {
struct serio *io;
- struct resource *iomem_res;
void __iomem *base;
- unsigned irq;
};
/*
@@ -83,16 +81,34 @@ static void altera_ps2_close(struct serio *io)
static int altera_ps2_probe(struct platform_device *pdev)
{
struct ps2if *ps2if;
+ struct resource *res;
struct serio *serio;
int error, irq;
- ps2if = kzalloc(sizeof(struct ps2if), GFP_KERNEL);
- serio = kzalloc(sizeof(struct serio), GFP_KERNEL);
- if (!ps2if || !serio) {
- error = -ENOMEM;
- goto err_free_mem;
+ ps2if = devm_kzalloc(&pdev->dev, sizeof(struct ps2if), GFP_KERNEL);
+ if (!ps2if)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ps2if->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ps2if->base))
+ return PTR_ERR(ps2if->base);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return -ENXIO;
+
+ error = devm_request_irq(&pdev->dev, irq, altera_ps2_rxint, 0,
+ pdev->name, ps2if);
+ if (error) {
+ dev_err(&pdev->dev, "could not request IRQ %d\n", irq);
+ return error;
}
+ serio = kzalloc(sizeof(struct serio), GFP_KERNEL);
+ if (!serio)
+ return -ENOMEM;
+
serio->id.type = SERIO_8042;
serio->write = altera_ps2_write;
serio->open = altera_ps2_open;
@@ -103,56 +119,12 @@ static int altera_ps2_probe(struct platform_device *pdev)
serio->dev.parent = &pdev->dev;
ps2if->io = serio;
- ps2if->iomem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (ps2if->iomem_res == NULL) {
- error = -ENOENT;
- goto err_free_mem;
- }
-
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- error = -ENXIO;
- goto err_free_mem;
- }
- ps2if->irq = irq;
-
- if (!request_mem_region(ps2if->iomem_res->start,
- resource_size(ps2if->iomem_res), pdev->name)) {
- error = -EBUSY;
- goto err_free_mem;
- }
-
- ps2if->base = ioremap(ps2if->iomem_res->start,
- resource_size(ps2if->iomem_res));
- if (!ps2if->base) {
- error = -ENOMEM;
- goto err_free_res;
- }
-
- error = request_irq(ps2if->irq, altera_ps2_rxint, 0, pdev->name, ps2if);
- if (error) {
- dev_err(&pdev->dev, "could not allocate IRQ %d: %d\n",
- ps2if->irq, error);
- goto err_unmap;
- }
-
- dev_info(&pdev->dev, "base %p, irq %d\n", ps2if->base, ps2if->irq);
+ dev_info(&pdev->dev, "base %p, irq %d\n", ps2if->base, irq);
serio_register_port(ps2if->io);
platform_set_drvdata(pdev, ps2if);
return 0;
-
- err_unmap:
- iounmap(ps2if->base);
- err_free_res:
- release_mem_region(ps2if->iomem_res->start,
- resource_size(ps2if->iomem_res));
- err_free_mem:
- kfree(ps2if);
- kfree(serio);
- return error;
}
/*
@@ -163,11 +135,6 @@ static int altera_ps2_remove(struct platform_device *pdev)
struct ps2if *ps2if = platform_get_drvdata(pdev);
serio_unregister_port(ps2if->io);
- free_irq(ps2if->irq, ps2if);
- iounmap(ps2if->base);
- release_mem_region(ps2if->iomem_res->start,
- resource_size(ps2if->iomem_res));
- kfree(ps2if);
return 0;
}
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index faeeb137246..c66d1b53843 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -579,6 +579,16 @@ static const struct dmi_system_id __initconst i8042_dmi_nopnp_table[] = {
},
},
{
+ /*
+ * Intel NUC D54250WYK - does not have i8042 controller but
+ * declares PS/2 devices in DSDT.
+ */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "D54250WYK"),
+ DMI_MATCH(DMI_BOARD_VENDOR, "Intel Corporation"),
+ },
+ },
+ {
/* MSI Wind U-100 */
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "U-100"),
diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
index d399b8b0f00..a05a5179da3 100644
--- a/drivers/input/serio/serio.c
+++ b/drivers/input/serio/serio.c
@@ -514,7 +514,7 @@ static void serio_release_port(struct device *dev)
*/
static void serio_init_port(struct serio *serio)
{
- static atomic_t serio_no = ATOMIC_INIT(0);
+ static atomic_t serio_no = ATOMIC_INIT(-1);
__module_get(THIS_MODULE);
@@ -525,7 +525,7 @@ static void serio_init_port(struct serio *serio)
mutex_init(&serio->drv_mutex);
device_initialize(&serio->dev);
dev_set_name(&serio->dev, "serio%lu",
- (unsigned long)atomic_inc_return(&serio_no) - 1);
+ (unsigned long)atomic_inc_return(&serio_no));
serio->dev.bus = &serio_bus;
serio->dev.release = serio_release_port;
serio->dev.groups = serio_device_attr_groups;
diff --git a/drivers/input/serio/serio_raw.c b/drivers/input/serio/serio_raw.c
index c9a02fe5757..71ef5d65a0c 100644
--- a/drivers/input/serio/serio_raw.c
+++ b/drivers/input/serio/serio_raw.c
@@ -292,7 +292,7 @@ static irqreturn_t serio_raw_interrupt(struct serio *serio, unsigned char data,
static int serio_raw_connect(struct serio *serio, struct serio_driver *drv)
{
- static atomic_t serio_raw_no = ATOMIC_INIT(0);
+ static atomic_t serio_raw_no = ATOMIC_INIT(-1);
struct serio_raw *serio_raw;
int err;
@@ -303,7 +303,7 @@ static int serio_raw_connect(struct serio *serio, struct serio_driver *drv)
}
snprintf(serio_raw->name, sizeof(serio_raw->name),
- "serio_raw%ld", (long)atomic_inc_return(&serio_raw_no) - 1);
+ "serio_raw%ld", (long)atomic_inc_return(&serio_raw_no));
kref_init(&serio_raw->kref);
INIT_LIST_HEAD(&serio_raw->client_list);
init_waitqueue_head(&serio_raw->wait);
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index e1d8003d01f..58917525126 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -295,6 +295,19 @@ config TOUCHSCREEN_FUJITSU
To compile this driver as a module, choose M here: the
module will be called fujitsu-ts.
+config TOUCHSCREEN_GOODIX
+ tristate "Goodix I2C touchscreen"
+ depends on I2C && ACPI
+ help
+ Say Y here if you have the Goodix touchscreen (such as one
+ installed in Onda v975w tablets) connected to your
+ system.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called goodix.
+
config TOUCHSCREEN_ILI210X
tristate "Ilitek ILI210X based touchscreen"
depends on I2C
@@ -334,6 +347,18 @@ config TOUCHSCREEN_GUNZE
To compile this driver as a module, choose M here: the
module will be called gunze.
+config TOUCHSCREEN_ELAN
+ tristate "Elan eKTH I2C touchscreen"
+ depends on I2C
+ help
+ Say Y here if you have an Elan eKTH I2C touchscreen
+ connected to your system.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called elants_i2c.
+
config TOUCHSCREEN_ELO
tristate "Elo serial touchscreens"
select SERIO
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index 090e61cc917..0242fea2102 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -31,9 +31,11 @@ obj-$(CONFIG_TOUCHSCREEN_EDT_FT5X06) += edt-ft5x06.o
obj-$(CONFIG_TOUCHSCREEN_HAMPSHIRE) += hampshire.o
obj-$(CONFIG_TOUCHSCREEN_GUNZE) += gunze.o
obj-$(CONFIG_TOUCHSCREEN_EETI) += eeti_ts.o
+obj-$(CONFIG_TOUCHSCREEN_ELAN) += elants_i2c.o
obj-$(CONFIG_TOUCHSCREEN_ELO) += elo.o
obj-$(CONFIG_TOUCHSCREEN_EGALAX) += egalax_ts.o
obj-$(CONFIG_TOUCHSCREEN_FUJITSU) += fujitsu_ts.o
+obj-$(CONFIG_TOUCHSCREEN_GOODIX) += goodix.o
obj-$(CONFIG_TOUCHSCREEN_ILI210X) += ili210x.o
obj-$(CONFIG_TOUCHSCREEN_INEXIO) += inexio.o
obj-$(CONFIG_TOUCHSCREEN_INTEL_MID) += intel-mid-touch.o
diff --git a/drivers/input/touchscreen/ad7877.c b/drivers/input/touchscreen/ad7877.c
index 523865daa1d..da4e5bb5e04 100644
--- a/drivers/input/touchscreen/ad7877.c
+++ b/drivers/input/touchscreen/ad7877.c
@@ -820,8 +820,7 @@ static int ad7877_remove(struct spi_device *spi)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int ad7877_suspend(struct device *dev)
+static int __maybe_unused ad7877_suspend(struct device *dev)
{
struct ad7877 *ts = dev_get_drvdata(dev);
@@ -830,7 +829,7 @@ static int ad7877_suspend(struct device *dev)
return 0;
}
-static int ad7877_resume(struct device *dev)
+static int __maybe_unused ad7877_resume(struct device *dev)
{
struct ad7877 *ts = dev_get_drvdata(dev);
@@ -838,7 +837,6 @@ static int ad7877_resume(struct device *dev)
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(ad7877_pm, ad7877_suspend, ad7877_resume);
diff --git a/drivers/input/touchscreen/ad7879.c b/drivers/input/touchscreen/ad7879.c
index 1eb9d3c2088..fec66ad8051 100644
--- a/drivers/input/touchscreen/ad7879.c
+++ b/drivers/input/touchscreen/ad7879.c
@@ -284,8 +284,7 @@ static void ad7879_close(struct input_dev* input)
__ad7879_disable(ts);
}
-#ifdef CONFIG_PM_SLEEP
-static int ad7879_suspend(struct device *dev)
+static int __maybe_unused ad7879_suspend(struct device *dev)
{
struct ad7879 *ts = dev_get_drvdata(dev);
@@ -301,7 +300,7 @@ static int ad7879_suspend(struct device *dev)
return 0;
}
-static int ad7879_resume(struct device *dev)
+static int __maybe_unused ad7879_resume(struct device *dev)
{
struct ad7879 *ts = dev_get_drvdata(dev);
@@ -316,7 +315,6 @@ static int ad7879_resume(struct device *dev)
return 0;
}
-#endif
SIMPLE_DEV_PM_OPS(ad7879_pm_ops, ad7879_suspend, ad7879_resume);
EXPORT_SYMBOL(ad7879_pm_ops);
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index e57ba52bf48..e4eb8a6c658 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -883,8 +883,7 @@ static irqreturn_t ads7846_irq(int irq, void *handle)
return IRQ_HANDLED;
}
-#ifdef CONFIG_PM_SLEEP
-static int ads7846_suspend(struct device *dev)
+static int __maybe_unused ads7846_suspend(struct device *dev)
{
struct ads7846 *ts = dev_get_drvdata(dev);
@@ -906,7 +905,7 @@ static int ads7846_suspend(struct device *dev)
return 0;
}
-static int ads7846_resume(struct device *dev)
+static int __maybe_unused ads7846_resume(struct device *dev)
{
struct ads7846 *ts = dev_get_drvdata(dev);
@@ -927,7 +926,6 @@ static int ads7846_resume(struct device *dev)
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(ads7846_pm, ads7846_suspend, ads7846_resume);
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index aaacf8bfa61..bb070206223 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -2244,8 +2244,7 @@ static int mxt_remove(struct i2c_client *client)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int mxt_suspend(struct device *dev)
+static int __maybe_unused mxt_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct mxt_data *data = i2c_get_clientdata(client);
@@ -2261,7 +2260,7 @@ static int mxt_suspend(struct device *dev)
return 0;
}
-static int mxt_resume(struct device *dev)
+static int __maybe_unused mxt_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct mxt_data *data = i2c_get_clientdata(client);
@@ -2276,7 +2275,6 @@ static int mxt_resume(struct device *dev)
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(mxt_pm_ops, mxt_suspend, mxt_resume);
diff --git a/drivers/input/touchscreen/auo-pixcir-ts.c b/drivers/input/touchscreen/auo-pixcir-ts.c
index 7f3c9478778..40e02dd5b2f 100644
--- a/drivers/input/touchscreen/auo-pixcir-ts.c
+++ b/drivers/input/touchscreen/auo-pixcir-ts.c
@@ -417,8 +417,7 @@ static void auo_pixcir_input_close(struct input_dev *dev)
return;
}
-#ifdef CONFIG_PM_SLEEP
-static int auo_pixcir_suspend(struct device *dev)
+static int __maybe_unused auo_pixcir_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct auo_pixcir_ts *ts = i2c_get_clientdata(client);
@@ -450,7 +449,7 @@ unlock:
return ret;
}
-static int auo_pixcir_resume(struct device *dev)
+static int __maybe_unused auo_pixcir_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct auo_pixcir_ts *ts = i2c_get_clientdata(client);
@@ -479,7 +478,6 @@ unlock:
return ret;
}
-#endif
static SIMPLE_DEV_PM_OPS(auo_pixcir_pm_ops,
auo_pixcir_suspend, auo_pixcir_resume);
diff --git a/drivers/input/touchscreen/cy8ctmg110_ts.c b/drivers/input/touchscreen/cy8ctmg110_ts.c
index 5bf1aeeea82..f2119ee0e21 100644
--- a/drivers/input/touchscreen/cy8ctmg110_ts.c
+++ b/drivers/input/touchscreen/cy8ctmg110_ts.c
@@ -291,8 +291,7 @@ err_free_mem:
return err;
}
-#ifdef CONFIG_PM_SLEEP
-static int cy8ctmg110_suspend(struct device *dev)
+static int __maybe_unused cy8ctmg110_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct cy8ctmg110 *ts = i2c_get_clientdata(client);
@@ -306,7 +305,7 @@ static int cy8ctmg110_suspend(struct device *dev)
return 0;
}
-static int cy8ctmg110_resume(struct device *dev)
+static int __maybe_unused cy8ctmg110_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct cy8ctmg110 *ts = i2c_get_clientdata(client);
@@ -319,7 +318,6 @@ static int cy8ctmg110_resume(struct device *dev)
}
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(cy8ctmg110_pm, cy8ctmg110_suspend, cy8ctmg110_resume);
diff --git a/drivers/input/touchscreen/cyttsp_core.c b/drivers/input/touchscreen/cyttsp_core.c
index eee656f77a2..5b74e8b84e7 100644
--- a/drivers/input/touchscreen/cyttsp_core.c
+++ b/drivers/input/touchscreen/cyttsp_core.c
@@ -472,8 +472,7 @@ static int cyttsp_disable(struct cyttsp *ts)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int cyttsp_suspend(struct device *dev)
+static int __maybe_unused cyttsp_suspend(struct device *dev)
{
struct cyttsp *ts = dev_get_drvdata(dev);
int retval = 0;
@@ -491,7 +490,7 @@ static int cyttsp_suspend(struct device *dev)
return retval;
}
-static int cyttsp_resume(struct device *dev)
+static int __maybe_unused cyttsp_resume(struct device *dev)
{
struct cyttsp *ts = dev_get_drvdata(dev);
@@ -507,8 +506,6 @@ static int cyttsp_resume(struct device *dev)
return 0;
}
-#endif
-
SIMPLE_DEV_PM_OPS(cyttsp_pm_ops, cyttsp_suspend, cyttsp_resume);
EXPORT_SYMBOL_GPL(cyttsp_pm_ops);
diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c
index ee3434f1e94..3793fcc7e5d 100644
--- a/drivers/input/touchscreen/edt-ft5x06.c
+++ b/drivers/input/touchscreen/edt-ft5x06.c
@@ -1092,8 +1092,7 @@ static int edt_ft5x06_ts_remove(struct i2c_client *client)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int edt_ft5x06_ts_suspend(struct device *dev)
+static int __maybe_unused edt_ft5x06_ts_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
@@ -1103,7 +1102,7 @@ static int edt_ft5x06_ts_suspend(struct device *dev)
return 0;
}
-static int edt_ft5x06_ts_resume(struct device *dev)
+static int __maybe_unused edt_ft5x06_ts_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
@@ -1112,7 +1111,6 @@ static int edt_ft5x06_ts_resume(struct device *dev)
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(edt_ft5x06_ts_pm_ops,
edt_ft5x06_ts_suspend, edt_ft5x06_ts_resume);
diff --git a/drivers/input/touchscreen/eeti_ts.c b/drivers/input/touchscreen/eeti_ts.c
index b1884ddd7a8..09be6ced715 100644
--- a/drivers/input/touchscreen/eeti_ts.c
+++ b/drivers/input/touchscreen/eeti_ts.c
@@ -264,8 +264,7 @@ static int eeti_ts_remove(struct i2c_client *client)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int eeti_ts_suspend(struct device *dev)
+static int __maybe_unused eeti_ts_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct eeti_ts_priv *priv = i2c_get_clientdata(client);
@@ -284,7 +283,7 @@ static int eeti_ts_suspend(struct device *dev)
return 0;
}
-static int eeti_ts_resume(struct device *dev)
+static int __maybe_unused eeti_ts_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct eeti_ts_priv *priv = i2c_get_clientdata(client);
@@ -302,7 +301,6 @@ static int eeti_ts_resume(struct device *dev)
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(eeti_ts_pm, eeti_ts_suspend, eeti_ts_resume);
diff --git a/drivers/input/touchscreen/egalax_ts.c b/drivers/input/touchscreen/egalax_ts.c
index c8057847d71..4c56299284e 100644
--- a/drivers/input/touchscreen/egalax_ts.c
+++ b/drivers/input/touchscreen/egalax_ts.c
@@ -239,8 +239,7 @@ static const struct i2c_device_id egalax_ts_id[] = {
};
MODULE_DEVICE_TABLE(i2c, egalax_ts_id);
-#ifdef CONFIG_PM_SLEEP
-static int egalax_ts_suspend(struct device *dev)
+static int __maybe_unused egalax_ts_suspend(struct device *dev)
{
static const u8 suspend_cmd[MAX_I2C_DATA_LEN] = {
0x3, 0x6, 0xa, 0x3, 0x36, 0x3f, 0x2, 0, 0, 0
@@ -252,13 +251,12 @@ static int egalax_ts_suspend(struct device *dev)
return ret > 0 ? 0 : ret;
}
-static int egalax_ts_resume(struct device *dev)
+static int __maybe_unused egalax_ts_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
return egalax_wake_up_device(client);
}
-#endif
static SIMPLE_DEV_PM_OPS(egalax_ts_pm_ops, egalax_ts_suspend, egalax_ts_resume);
diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c
new file mode 100644
index 00000000000..a510f7ef9b6
--- /dev/null
+++ b/drivers/input/touchscreen/elants_i2c.c
@@ -0,0 +1,1271 @@
+/*
+ * Elan Microelectronics touch panels with I2C interface
+ *
+ * Copyright (C) 2014 Elan Microelectronics Corporation.
+ * Scott Liu <scott.liu@emc.com.tw>
+ *
+ * This code is partly based on hid-multitouch.c:
+ *
+ * Copyright (c) 2010-2012 Stephane Chatty <chatty@enac.fr>
+ * Copyright (c) 2010-2012 Benjamin Tissoires <benjamin.tissoires@gmail.com>
+ * Copyright (c) 2010-2012 Ecole Nationale de l'Aviation Civile, France
+ *
+ *
+ * This code is partly based on i2c-hid.c:
+ *
+ * Copyright (c) 2012 Benjamin Tissoires <benjamin.tissoires@gmail.com>
+ * Copyright (c) 2012 Ecole Nationale de l'Aviation Civile, France
+ * Copyright (c) 2012 Red Hat, Inc
+ */
+
+/*
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ */
+
+#include <linux/module.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/async.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/uaccess.h>
+#include <linux/buffer_head.h>
+#include <linux/version.h>
+#include <linux/slab.h>
+#include <linux/firmware.h>
+#include <linux/version.h>
+#include <linux/input/mt.h>
+#include <linux/acpi.h>
+#include <linux/of.h>
+#include <asm/unaligned.h>
+
+/* Device, Driver information */
+#define DEVICE_NAME "elants_i2c"
+#define DRV_VERSION "1.0.9"
+
+/* Convert from rows or columns into resolution */
+#define ELAN_TS_RESOLUTION(n, m) (((n) - 1) * (m))
+
+/* FW header data */
+#define HEADER_SIZE 4
+#define FW_HDR_TYPE 0
+#define FW_HDR_COUNT 1
+#define FW_HDR_LENGTH 2
+
+/* Buffer mode Queue Header information */
+#define QUEUE_HEADER_SINGLE 0x62
+#define QUEUE_HEADER_NORMAL 0X63
+#define QUEUE_HEADER_WAIT 0x64
+
+/* Command header definition */
+#define CMD_HEADER_WRITE 0x54
+#define CMD_HEADER_READ 0x53
+#define CMD_HEADER_6B_READ 0x5B
+#define CMD_HEADER_RESP 0x52
+#define CMD_HEADER_6B_RESP 0x9B
+#define CMD_HEADER_HELLO 0x55
+#define CMD_HEADER_REK 0x66
+
+/* FW position data */
+#define PACKET_SIZE 55
+#define MAX_CONTACT_NUM 10
+#define FW_POS_HEADER 0
+#define FW_POS_STATE 1
+#define FW_POS_TOTAL 2
+#define FW_POS_XY 3
+#define FW_POS_CHECKSUM 34
+#define FW_POS_WIDTH 35
+#define FW_POS_PRESSURE 45
+
+#define HEADER_REPORT_10_FINGER 0x62
+
+/* Header (4 bytes) plus 3 fill 10-finger packets */
+#define MAX_PACKET_SIZE 169
+
+#define BOOT_TIME_DELAY_MS 50
+
+/* FW read command, 0x53 0x?? 0x0, 0x01 */
+#define E_ELAN_INFO_FW_VER 0x00
+#define E_ELAN_INFO_BC_VER 0x10
+#define E_ELAN_INFO_TEST_VER 0xE0
+#define E_ELAN_INFO_FW_ID 0xF0
+#define E_INFO_OSR 0xD6
+#define E_INFO_PHY_SCAN 0xD7
+#define E_INFO_PHY_DRIVER 0xD8
+
+#define MAX_RETRIES 3
+#define MAX_FW_UPDATE_RETRIES 30
+
+#define ELAN_FW_PAGESIZE 132
+#define ELAN_FW_FILENAME "elants_i2c.bin"
+
+/* calibration timeout definition */
+#define ELAN_CALI_TIMEOUT_MSEC 10000
+
+enum elants_state {
+ ELAN_STATE_NORMAL,
+ ELAN_WAIT_QUEUE_HEADER,
+ ELAN_WAIT_RECALIBRATION,
+};
+
+enum elants_iap_mode {
+ ELAN_IAP_OPERATIONAL,
+ ELAN_IAP_RECOVERY,
+};
+
+/* struct elants_data - represents state of Elan touchscreen device */
+struct elants_data {
+ struct i2c_client *client;
+ struct input_dev *input;
+
+ u16 fw_version;
+ u8 test_version;
+ u8 solution_version;
+ u8 bc_version;
+ u8 iap_version;
+ u16 hw_version;
+ unsigned int x_res; /* resolution in units/mm */
+ unsigned int y_res;
+ unsigned int x_max;
+ unsigned int y_max;
+
+ enum elants_state state;
+ enum elants_iap_mode iap_mode;
+
+ /* Guards against concurrent access to the device via sysfs */
+ struct mutex sysfs_mutex;
+
+ u8 cmd_resp[HEADER_SIZE];
+ struct completion cmd_done;
+
+ u8 buf[MAX_PACKET_SIZE];
+
+ bool wake_irq_enabled;
+};
+
+static int elants_i2c_send(struct i2c_client *client,
+ const void *data, size_t size)
+{
+ int ret;
+
+ ret = i2c_master_send(client, data, size);
+ if (ret == size)
+ return 0;
+
+ if (ret >= 0)
+ ret = -EIO;
+
+ dev_err(&client->dev, "%s failed (%*ph): %d\n",
+ __func__, (int)size, data, ret);
+
+ return ret;
+}
+
+static int elants_i2c_read(struct i2c_client *client, void *data, size_t size)
+{
+ int ret;
+
+ ret = i2c_master_recv(client, data, size);
+ if (ret == size)
+ return 0;
+
+ if (ret >= 0)
+ ret = -EIO;
+
+ dev_err(&client->dev, "%s failed: %d\n", __func__, ret);
+
+ return ret;
+}
+
+static int elants_i2c_execute_command(struct i2c_client *client,
+ const u8 *cmd, size_t cmd_size,
+ u8 *resp, size_t resp_size)
+{
+ struct i2c_msg msgs[2];
+ int ret;
+ u8 expected_response;
+
+ switch (cmd[0]) {
+ case CMD_HEADER_READ:
+ expected_response = CMD_HEADER_RESP;
+ break;
+
+ case CMD_HEADER_6B_READ:
+ expected_response = CMD_HEADER_6B_RESP;
+ break;
+
+ default:
+ dev_err(&client->dev, "%s: invalid command %*ph\n",
+ __func__, (int)cmd_size, cmd);
+ return -EINVAL;
+ }
+
+ msgs[0].addr = client->addr;
+ msgs[0].flags = client->flags & I2C_M_TEN;
+ msgs[0].len = cmd_size;
+ msgs[0].buf = (u8 *)cmd;
+
+ msgs[1].addr = client->addr;
+ msgs[1].flags = client->flags & I2C_M_TEN;
+ msgs[1].flags |= I2C_M_RD;
+ msgs[1].len = resp_size;
+ msgs[1].buf = resp;
+
+ ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (ret < 0)
+ return ret;
+
+ if (ret != ARRAY_SIZE(msgs) || resp[FW_HDR_TYPE] != expected_response)
+ return -EIO;
+
+ return 0;
+}
+
+static int elants_i2c_calibrate(struct elants_data *ts)
+{
+ struct i2c_client *client = ts->client;
+ int ret, error;
+ static const u8 w_flashkey[] = { 0x54, 0xC0, 0xE1, 0x5A };
+ static const u8 rek[] = { 0x54, 0x29, 0x00, 0x01 };
+ static const u8 rek_resp[] = { CMD_HEADER_REK, 0x66, 0x66, 0x66 };
+
+ disable_irq(client->irq);
+
+ ts->state = ELAN_WAIT_RECALIBRATION;
+ reinit_completion(&ts->cmd_done);
+
+ elants_i2c_send(client, w_flashkey, sizeof(w_flashkey));
+ elants_i2c_send(client, rek, sizeof(rek));
+
+ enable_irq(client->irq);
+
+ ret = wait_for_completion_interruptible_timeout(&ts->cmd_done,
+ msecs_to_jiffies(ELAN_CALI_TIMEOUT_MSEC));
+
+ ts->state = ELAN_STATE_NORMAL;
+
+ if (ret <= 0) {
+ error = ret < 0 ? ret : -ETIMEDOUT;
+ dev_err(&client->dev,
+ "error while waiting for calibration to complete: %d\n",
+ error);
+ return error;
+ }
+
+ if (memcmp(rek_resp, ts->cmd_resp, sizeof(rek_resp))) {
+ dev_err(&client->dev,
+ "unexpected calibration response: %*ph\n",
+ (int)sizeof(ts->cmd_resp), ts->cmd_resp);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int elants_i2c_sw_reset(struct i2c_client *client)
+{
+ const u8 soft_rst_cmd[] = { 0x77, 0x77, 0x77, 0x77 };
+ int error;
+
+ error = elants_i2c_send(client, soft_rst_cmd,
+ sizeof(soft_rst_cmd));
+ if (error) {
+ dev_err(&client->dev, "software reset failed: %d\n", error);
+ return error;
+ }
+
+ /*
+ * We should wait at least 10 msec (but no more than 40) before
+ * sending fastboot or IAP command to the device.
+ */
+ msleep(30);
+
+ return 0;
+}
+
+static u16 elants_i2c_parse_version(u8 *buf)
+{
+ return get_unaligned_be32(buf) >> 4;
+}
+
+static int elants_i2c_query_fw_id(struct elants_data *ts)
+{
+ struct i2c_client *client = ts->client;
+ int error, retry_cnt;
+ const u8 cmd[] = { CMD_HEADER_READ, E_ELAN_INFO_FW_ID, 0x00, 0x01 };
+ u8 resp[HEADER_SIZE];
+
+ for (retry_cnt = 0; retry_cnt < MAX_RETRIES; retry_cnt++) {
+ error = elants_i2c_execute_command(client, cmd, sizeof(cmd),
+ resp, sizeof(resp));
+ if (!error) {
+ ts->hw_version = elants_i2c_parse_version(resp);
+ if (ts->hw_version != 0xffff)
+ return 0;
+ }
+
+ dev_dbg(&client->dev, "read fw id error=%d, buf=%*phC\n",
+ error, (int)sizeof(resp), resp);
+ }
+
+ dev_err(&client->dev,
+ "Failed to read fw id or fw id is invalid\n");
+
+ return -EINVAL;
+}
+
+static int elants_i2c_query_fw_version(struct elants_data *ts)
+{
+ struct i2c_client *client = ts->client;
+ int error, retry_cnt;
+ const u8 cmd[] = { CMD_HEADER_READ, E_ELAN_INFO_FW_VER, 0x00, 0x01 };
+ u8 resp[HEADER_SIZE];
+
+ for (retry_cnt = 0; retry_cnt < MAX_RETRIES; retry_cnt++) {
+ error = elants_i2c_execute_command(client, cmd, sizeof(cmd),
+ resp, sizeof(resp));
+ if (!error) {
+ ts->fw_version = elants_i2c_parse_version(resp);
+ if (ts->fw_version != 0x0000 &&
+ ts->fw_version != 0xffff)
+ return 0;
+ }
+
+ dev_dbg(&client->dev, "read fw version error=%d, buf=%*phC\n",
+ error, (int)sizeof(resp), resp);
+ }
+
+ dev_err(&client->dev,
+ "Failed to read fw version or fw version is invalid\n");
+
+ return -EINVAL;
+}
+
+static int elants_i2c_query_test_version(struct elants_data *ts)
+{
+ struct i2c_client *client = ts->client;
+ int error, retry_cnt;
+ u16 version;
+ const u8 cmd[] = { CMD_HEADER_READ, E_ELAN_INFO_TEST_VER, 0x00, 0x01 };
+ u8 resp[HEADER_SIZE];
+
+ for (retry_cnt = 0; retry_cnt < MAX_RETRIES; retry_cnt++) {
+ error = elants_i2c_execute_command(client, cmd, sizeof(cmd),
+ resp, sizeof(resp));
+ if (!error) {
+ version = elants_i2c_parse_version(resp);
+ ts->test_version = version >> 8;
+ ts->solution_version = version & 0xff;
+
+ return 0;
+ }
+
+ dev_dbg(&client->dev,
+ "read test version error rc=%d, buf=%*phC\n",
+ error, (int)sizeof(resp), resp);
+ }
+
+ dev_err(&client->dev, "Failed to read test version\n");
+
+ return -EINVAL;
+}
+
+static int elants_i2c_query_bc_version(struct elants_data *ts)
+{
+ struct i2c_client *client = ts->client;
+ const u8 cmd[] = { CMD_HEADER_READ, E_ELAN_INFO_BC_VER, 0x00, 0x01 };
+ u8 resp[HEADER_SIZE];
+ u16 version;
+ int error;
+
+ error = elants_i2c_execute_command(client, cmd, sizeof(cmd),
+ resp, sizeof(resp));
+ if (error) {
+ dev_err(&client->dev,
+ "read BC version error=%d, buf=%*phC\n",
+ error, (int)sizeof(resp), resp);
+ return error;
+ }
+
+ version = elants_i2c_parse_version(resp);
+ ts->bc_version = version >> 8;
+ ts->iap_version = version & 0xff;
+
+ return 0;
+}
+
+static int elants_i2c_query_ts_info(struct elants_data *ts)
+{
+ struct i2c_client *client = ts->client;
+ int error;
+ u8 resp[17];
+ u16 phy_x, phy_y, rows, cols, osr;
+ const u8 get_resolution_cmd[] = {
+ CMD_HEADER_6B_READ, 0x00, 0x00, 0x00, 0x00, 0x00
+ };
+ const u8 get_osr_cmd[] = {
+ CMD_HEADER_READ, E_INFO_OSR, 0x00, 0x01
+ };
+ const u8 get_physical_scan_cmd[] = {
+ CMD_HEADER_READ, E_INFO_PHY_SCAN, 0x00, 0x01
+ };
+ const u8 get_physical_drive_cmd[] = {
+ CMD_HEADER_READ, E_INFO_PHY_DRIVER, 0x00, 0x01
+ };
+
+ /* Get trace number */
+ error = elants_i2c_execute_command(client,
+ get_resolution_cmd,
+ sizeof(get_resolution_cmd),
+ resp, sizeof(resp));
+ if (error) {
+ dev_err(&client->dev, "get resolution command failed: %d\n",
+ error);
+ return error;
+ }
+
+ rows = resp[2] + resp[6] + resp[10];
+ cols = resp[3] + resp[7] + resp[11];
+
+ /* Process mm_to_pixel information */
+ error = elants_i2c_execute_command(client,
+ get_osr_cmd, sizeof(get_osr_cmd),
+ resp, sizeof(resp));
+ if (error) {
+ dev_err(&client->dev, "get osr command failed: %d\n",
+ error);
+ return error;
+ }
+
+ osr = resp[3];
+
+ error = elants_i2c_execute_command(client,
+ get_physical_scan_cmd,
+ sizeof(get_physical_scan_cmd),
+ resp, sizeof(resp));
+ if (error) {
+ dev_err(&client->dev, "get physical scan command failed: %d\n",
+ error);
+ return error;
+ }
+
+ phy_x = get_unaligned_be16(&resp[2]);
+
+ error = elants_i2c_execute_command(client,
+ get_physical_drive_cmd,
+ sizeof(get_physical_drive_cmd),
+ resp, sizeof(resp));
+ if (error) {
+ dev_err(&client->dev, "get physical drive command failed: %d\n",
+ error);
+ return error;
+ }
+
+ phy_y = get_unaligned_be16(&resp[2]);
+
+ dev_dbg(&client->dev, "phy_x=%d, phy_y=%d\n", phy_x, phy_y);
+
+ if (rows == 0 || cols == 0 || osr == 0) {
+ dev_warn(&client->dev,
+ "invalid trace number data: %d, %d, %d\n",
+ rows, cols, osr);
+ } else {
+ /* translate trace number to TS resolution */
+ ts->x_max = ELAN_TS_RESOLUTION(rows, osr);
+ ts->x_res = DIV_ROUND_CLOSEST(ts->x_max, phy_x);
+ ts->y_max = ELAN_TS_RESOLUTION(cols, osr);
+ ts->y_res = DIV_ROUND_CLOSEST(ts->y_max, phy_y);
+ }
+
+ return 0;
+}
+
+static int elants_i2c_fastboot(struct i2c_client *client)
+{
+ const u8 boot_cmd[] = { 0x4D, 0x61, 0x69, 0x6E };
+ int error;
+
+ error = elants_i2c_send(client, boot_cmd, sizeof(boot_cmd));
+ if (error) {
+ dev_err(&client->dev, "boot failed: %d\n", error);
+ return error;
+ }
+
+ dev_dbg(&client->dev, "boot success -- 0x%x\n", client->addr);
+ return 0;
+}
+
+static int elants_i2c_initialize(struct elants_data *ts)
+{
+ struct i2c_client *client = ts->client;
+ int error, retry_cnt;
+ const u8 hello_packet[] = { 0x55, 0x55, 0x55, 0x55 };
+ const u8 recov_packet[] = { 0x55, 0x55, 0x80, 0x80 };
+ u8 buf[HEADER_SIZE];
+
+ for (retry_cnt = 0; retry_cnt < MAX_RETRIES; retry_cnt++) {
+ error = elants_i2c_sw_reset(client);
+ if (error) {
+ /* Continue initializing if it's the last try */
+ if (retry_cnt < MAX_RETRIES - 1)
+ continue;
+ }
+
+ error = elants_i2c_fastboot(client);
+ if (error) {
+ /* Continue initializing if it's the last try */
+ if (retry_cnt < MAX_RETRIES - 1)
+ continue;
+ }
+
+ /* Wait for Hello packet */
+ msleep(BOOT_TIME_DELAY_MS);
+
+ error = elants_i2c_read(client, buf, sizeof(buf));
+ if (error) {
+ dev_err(&client->dev,
+ "failed to read 'hello' packet: %d\n", error);
+ } else if (!memcmp(buf, hello_packet, sizeof(hello_packet))) {
+ ts->iap_mode = ELAN_IAP_OPERATIONAL;
+ break;
+ } else if (!memcmp(buf, recov_packet, sizeof(recov_packet))) {
+ /*
+ * Setting error code will mark device
+ * in recovery mode below.
+ */
+ error = -EIO;
+ break;
+ } else {
+ error = -EINVAL;
+ dev_err(&client->dev,
+ "invalid 'hello' packet: %*ph\n",
+ (int)sizeof(buf), buf);
+ }
+ }
+
+ if (!error)
+ error = elants_i2c_query_fw_id(ts);
+ if (!error)
+ error = elants_i2c_query_fw_version(ts);
+
+ if (error) {
+ ts->iap_mode = ELAN_IAP_RECOVERY;
+ } else {
+ elants_i2c_query_test_version(ts);
+ elants_i2c_query_bc_version(ts);
+ elants_i2c_query_ts_info(ts);
+ }
+
+ return 0;
+}
+
+/*
+ * Firmware update interface.
+ */
+
+static int elants_i2c_fw_write_page(struct i2c_client *client,
+ const void *page)
+{
+ const u8 ack_ok[] = { 0xaa, 0xaa };
+ u8 buf[2];
+ int retry;
+ int error;
+
+ for (retry = 0; retry < MAX_FW_UPDATE_RETRIES; retry++) {
+ error = elants_i2c_send(client, page, ELAN_FW_PAGESIZE);
+ if (error) {
+ dev_err(&client->dev,
+ "IAP Write Page failed: %d\n", error);
+ continue;
+ }
+
+ error = elants_i2c_read(client, buf, 2);
+ if (error) {
+ dev_err(&client->dev,
+ "IAP Ack read failed: %d\n", error);
+ return error;
+ }
+
+ if (!memcmp(buf, ack_ok, sizeof(ack_ok)))
+ return 0;
+
+ error = -EIO;
+ dev_err(&client->dev,
+ "IAP Get Ack Error [%02x:%02x]\n",
+ buf[0], buf[1]);
+ }
+
+ return error;
+}
+
+static int elants_i2c_do_update_firmware(struct i2c_client *client,
+ const struct firmware *fw,
+ bool force)
+{
+ const u8 enter_iap[] = { 0x45, 0x49, 0x41, 0x50 };
+ const u8 enter_iap2[] = { 0x54, 0x00, 0x12, 0x34 };
+ const u8 iap_ack[] = { 0x55, 0xaa, 0x33, 0xcc };
+ u8 buf[HEADER_SIZE];
+ u16 send_id;
+ int page, n_fw_pages;
+ int error;
+
+ /* Recovery mode detection! */
+ if (force) {
+ dev_dbg(&client->dev, "Recovery mode procedure\n");
+ error = elants_i2c_send(client, enter_iap2, sizeof(enter_iap2));
+ } else {
+ /* Start IAP Procedure */
+ dev_dbg(&client->dev, "Normal IAP procedure\n");
+ elants_i2c_sw_reset(client);
+
+ error = elants_i2c_send(client, enter_iap, sizeof(enter_iap));
+ }
+
+ if (error) {
+ dev_err(&client->dev, "failed to enter IAP mode: %d\n", error);
+ return error;
+ }
+
+ msleep(20);
+
+ /* check IAP state */
+ error = elants_i2c_read(client, buf, 4);
+ if (error) {
+ dev_err(&client->dev,
+ "failed to read IAP acknowledgement: %d\n",
+ error);
+ return error;
+ }
+
+ if (memcmp(buf, iap_ack, sizeof(iap_ack))) {
+ dev_err(&client->dev,
+ "failed to enter IAP: %*ph (expected %*ph)\n",
+ (int)sizeof(buf), buf, (int)sizeof(iap_ack), iap_ack);
+ return -EIO;
+ }
+
+ dev_info(&client->dev, "successfully entered IAP mode");
+
+ send_id = client->addr;
+ error = elants_i2c_send(client, &send_id, 1);
+ if (error) {
+ dev_err(&client->dev, "sending dummy byte failed: %d\n",
+ error);
+ return error;
+ }
+
+ /* Clear the last page of Master */
+ error = elants_i2c_send(client, fw->data, ELAN_FW_PAGESIZE);
+ if (error) {
+ dev_err(&client->dev, "clearing of the last page failed: %d\n",
+ error);
+ return error;
+ }
+
+ error = elants_i2c_read(client, buf, 2);
+ if (error) {
+ dev_err(&client->dev,
+ "failed to read ACK for clearing the last page: %d\n",
+ error);
+ return error;
+ }
+
+ n_fw_pages = fw->size / ELAN_FW_PAGESIZE;
+ dev_dbg(&client->dev, "IAP Pages = %d\n", n_fw_pages);
+
+ for (page = 0; page < n_fw_pages; page++) {
+ error = elants_i2c_fw_write_page(client,
+ fw->data + page * ELAN_FW_PAGESIZE);
+ if (error) {
+ dev_err(&client->dev,
+ "failed to write FW page %d: %d\n",
+ page, error);
+ return error;
+ }
+ }
+
+ /* Old iap needs to wait 200ms for WDT and rest is for hello packets */
+ msleep(300);
+
+ dev_info(&client->dev, "firmware update completed\n");
+ return 0;
+}
+
+static int elants_i2c_fw_update(struct elants_data *ts)
+{
+ struct i2c_client *client = ts->client;
+ const struct firmware *fw;
+ int error;
+
+ error = request_firmware(&fw, ELAN_FW_FILENAME, &client->dev);
+ if (error) {
+ dev_err(&client->dev, "failed to request firmware %s: %d\n",
+ ELAN_FW_FILENAME, error);
+ return error;
+ }
+
+ if (fw->size % ELAN_FW_PAGESIZE) {
+ dev_err(&client->dev, "invalid firmware length: %zu\n",
+ fw->size);
+ error = -EINVAL;
+ goto out;
+ }
+
+ disable_irq(client->irq);
+
+ error = elants_i2c_do_update_firmware(client, fw,
+ ts->iap_mode == ELAN_IAP_RECOVERY);
+ if (error) {
+ dev_err(&client->dev, "firmware update failed: %d\n", error);
+ ts->iap_mode = ELAN_IAP_RECOVERY;
+ goto out_enable_irq;
+ }
+
+ error = elants_i2c_initialize(ts);
+ if (error) {
+ dev_err(&client->dev,
+ "failed to initialize device after firmware update: %d\n",
+ error);
+ ts->iap_mode = ELAN_IAP_RECOVERY;
+ goto out_enable_irq;
+ }
+
+ ts->iap_mode = ELAN_IAP_OPERATIONAL;
+
+out_enable_irq:
+ ts->state = ELAN_STATE_NORMAL;
+ enable_irq(client->irq);
+ msleep(100);
+
+ if (!error)
+ elants_i2c_calibrate(ts);
+out:
+ release_firmware(fw);
+ return error;
+}
+
+/*
+ * Event reporting.
+ */
+
+static void elants_i2c_mt_event(struct elants_data *ts, u8 *buf)
+{
+ struct input_dev *input = ts->input;
+ unsigned int n_fingers;
+ u16 finger_state;
+ int i;
+
+ n_fingers = buf[FW_POS_STATE + 1] & 0x0f;
+ finger_state = ((buf[FW_POS_STATE + 1] & 0x30) << 4) |
+ buf[FW_POS_STATE];
+
+ dev_dbg(&ts->client->dev,
+ "n_fingers: %u, state: %04x\n", n_fingers, finger_state);
+
+ for (i = 0; i < MAX_CONTACT_NUM && n_fingers; i++) {
+ if (finger_state & 1) {
+ unsigned int x, y, p, w;
+ u8 *pos;
+
+ pos = &buf[FW_POS_XY + i * 3];
+ x = (((u16)pos[0] & 0xf0) << 4) | pos[1];
+ y = (((u16)pos[0] & 0x0f) << 8) | pos[2];
+ p = buf[FW_POS_PRESSURE + i];
+ w = buf[FW_POS_WIDTH + i];
+
+ dev_dbg(&ts->client->dev, "i=%d x=%d y=%d p=%d w=%d\n",
+ i, x, y, p, w);
+
+ input_mt_slot(input, i);
+ input_mt_report_slot_state(input, MT_TOOL_FINGER, true);
+ input_event(input, EV_ABS, ABS_MT_POSITION_X, x);
+ input_event(input, EV_ABS, ABS_MT_POSITION_Y, y);
+ input_event(input, EV_ABS, ABS_MT_PRESSURE, p);
+ input_event(input, EV_ABS, ABS_MT_TOUCH_MAJOR, w);
+
+ n_fingers--;
+ }
+
+ finger_state >>= 1;
+ }
+
+ input_mt_sync_frame(input);
+ input_sync(input);
+}
+
+static u8 elants_i2c_calculate_checksum(u8 *buf)
+{
+ u8 checksum = 0;
+ u8 i;
+
+ for (i = 0; i < FW_POS_CHECKSUM; i++)
+ checksum += buf[i];
+
+ return checksum;
+}
+
+static void elants_i2c_event(struct elants_data *ts, u8 *buf)
+{
+ u8 checksum = elants_i2c_calculate_checksum(buf);
+
+ if (unlikely(buf[FW_POS_CHECKSUM] != checksum))
+ dev_warn(&ts->client->dev,
+ "%s: invalid checksum for packet %02x: %02x vs. %02x\n",
+ __func__, buf[FW_POS_HEADER],
+ checksum, buf[FW_POS_CHECKSUM]);
+ else if (unlikely(buf[FW_POS_HEADER] != HEADER_REPORT_10_FINGER))
+ dev_warn(&ts->client->dev,
+ "%s: unknown packet type: %02x\n",
+ __func__, buf[FW_POS_HEADER]);
+ else
+ elants_i2c_mt_event(ts, buf);
+}
+
+static irqreturn_t elants_i2c_irq(int irq, void *_dev)
+{
+ const u8 wait_packet[] = { 0x64, 0x64, 0x64, 0x64 };
+ struct elants_data *ts = _dev;
+ struct i2c_client *client = ts->client;
+ int report_count, report_len;
+ int i;
+ int len;
+
+ len = i2c_master_recv(client, ts->buf, sizeof(ts->buf));
+ if (len < 0) {
+ dev_err(&client->dev, "%s: failed to read data: %d\n",
+ __func__, len);
+ goto out;
+ }
+
+ dev_dbg(&client->dev, "%s: packet %*ph\n",
+ __func__, HEADER_SIZE, ts->buf);
+
+ switch (ts->state) {
+ case ELAN_WAIT_RECALIBRATION:
+ if (ts->buf[FW_HDR_TYPE] == CMD_HEADER_REK) {
+ memcpy(ts->cmd_resp, ts->buf, sizeof(ts->cmd_resp));
+ complete(&ts->cmd_done);
+ ts->state = ELAN_STATE_NORMAL;
+ }
+ break;
+
+ case ELAN_WAIT_QUEUE_HEADER:
+ if (ts->buf[FW_HDR_TYPE] != QUEUE_HEADER_NORMAL)
+ break;
+
+ ts->state = ELAN_STATE_NORMAL;
+ /* fall through */
+
+ case ELAN_STATE_NORMAL:
+
+ switch (ts->buf[FW_HDR_TYPE]) {
+ case CMD_HEADER_HELLO:
+ case CMD_HEADER_RESP:
+ case CMD_HEADER_REK:
+ break;
+
+ case QUEUE_HEADER_WAIT:
+ if (memcmp(ts->buf, wait_packet, sizeof(wait_packet))) {
+ dev_err(&client->dev,
+ "invalid wait packet %*ph\n",
+ HEADER_SIZE, ts->buf);
+ } else {
+ ts->state = ELAN_WAIT_QUEUE_HEADER;
+ udelay(30);
+ }
+ break;
+
+ case QUEUE_HEADER_SINGLE:
+ elants_i2c_event(ts, &ts->buf[HEADER_SIZE]);
+ break;
+
+ case QUEUE_HEADER_NORMAL:
+ report_count = ts->buf[FW_HDR_COUNT];
+ if (report_count > 3) {
+ dev_err(&client->dev,
+ "too large report count: %*ph\n",
+ HEADER_SIZE, ts->buf);
+ break;
+ }
+
+ report_len = ts->buf[FW_HDR_LENGTH] / report_count;
+ if (report_len != PACKET_SIZE) {
+ dev_err(&client->dev,
+ "mismatching report length: %*ph\n",
+ HEADER_SIZE, ts->buf);
+ break;
+ }
+
+ for (i = 0; i < report_count; i++) {
+ u8 *buf = ts->buf + HEADER_SIZE +
+ i * PACKET_SIZE;
+ elants_i2c_event(ts, buf);
+ }
+ break;
+
+ default:
+ dev_err(&client->dev, "unknown packet %*ph\n",
+ HEADER_SIZE, ts->buf);
+ break;
+ }
+ break;
+ }
+
+out:
+ return IRQ_HANDLED;
+}
+
+/*
+ * sysfs interface
+ */
+static ssize_t calibrate_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct elants_data *ts = i2c_get_clientdata(client);
+ int error;
+
+ error = mutex_lock_interruptible(&ts->sysfs_mutex);
+ if (error)
+ return error;
+
+ error = elants_i2c_calibrate(ts);
+
+ mutex_unlock(&ts->sysfs_mutex);
+ return error ?: count;
+}
+
+static ssize_t write_update_fw(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct elants_data *ts = i2c_get_clientdata(client);
+ int error;
+
+ error = mutex_lock_interruptible(&ts->sysfs_mutex);
+ if (error)
+ return error;
+
+ error = elants_i2c_fw_update(ts);
+ dev_dbg(dev, "firmware update result: %d\n", error);
+
+ mutex_unlock(&ts->sysfs_mutex);
+ return error ?: count;
+}
+
+static ssize_t show_iap_mode(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct elants_data *ts = i2c_get_clientdata(client);
+
+ return sprintf(buf, "%s\n",
+ ts->iap_mode == ELAN_IAP_OPERATIONAL ?
+ "Normal" : "Recovery");
+}
+
+static DEVICE_ATTR(calibrate, S_IWUSR, NULL, calibrate_store);
+static DEVICE_ATTR(iap_mode, S_IRUGO, show_iap_mode, NULL);
+static DEVICE_ATTR(update_fw, S_IWUSR, NULL, write_update_fw);
+
+struct elants_version_attribute {
+ struct device_attribute dattr;
+ size_t field_offset;
+ size_t field_size;
+};
+
+#define __ELANTS_FIELD_SIZE(_field) \
+ sizeof(((struct elants_data *)NULL)->_field)
+#define __ELANTS_VERIFY_SIZE(_field) \
+ (BUILD_BUG_ON_ZERO(__ELANTS_FIELD_SIZE(_field) > 2) + \
+ __ELANTS_FIELD_SIZE(_field))
+#define ELANTS_VERSION_ATTR(_field) \
+ struct elants_version_attribute elants_ver_attr_##_field = { \
+ .dattr = __ATTR(_field, S_IRUGO, \
+ elants_version_attribute_show, NULL), \
+ .field_offset = offsetof(struct elants_data, _field), \
+ .field_size = __ELANTS_VERIFY_SIZE(_field), \
+ }
+
+static ssize_t elants_version_attribute_show(struct device *dev,
+ struct device_attribute *dattr,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct elants_data *ts = i2c_get_clientdata(client);
+ struct elants_version_attribute *attr =
+ container_of(dattr, struct elants_version_attribute, dattr);
+ u8 *field = (u8 *)((char *)ts + attr->field_offset);
+ unsigned int fmt_size;
+ unsigned int val;
+
+ if (attr->field_size == 1) {
+ val = *field;
+ fmt_size = 2; /* 2 HEX digits */
+ } else {
+ val = *(u16 *)field;
+ fmt_size = 4; /* 4 HEX digits */
+ }
+
+ return sprintf(buf, "%0*x\n", fmt_size, val);
+}
+
+static ELANTS_VERSION_ATTR(fw_version);
+static ELANTS_VERSION_ATTR(hw_version);
+static ELANTS_VERSION_ATTR(test_version);
+static ELANTS_VERSION_ATTR(solution_version);
+static ELANTS_VERSION_ATTR(bc_version);
+static ELANTS_VERSION_ATTR(iap_version);
+
+static struct attribute *elants_attributes[] = {
+ &dev_attr_calibrate.attr,
+ &dev_attr_update_fw.attr,
+ &dev_attr_iap_mode.attr,
+
+ &elants_ver_attr_fw_version.dattr.attr,
+ &elants_ver_attr_hw_version.dattr.attr,
+ &elants_ver_attr_test_version.dattr.attr,
+ &elants_ver_attr_solution_version.dattr.attr,
+ &elants_ver_attr_bc_version.dattr.attr,
+ &elants_ver_attr_iap_version.dattr.attr,
+ NULL
+};
+
+static struct attribute_group elants_attribute_group = {
+ .attrs = elants_attributes,
+};
+
+static void elants_i2c_remove_sysfs_group(void *_data)
+{
+ struct elants_data *ts = _data;
+
+ sysfs_remove_group(&ts->client->dev.kobj, &elants_attribute_group);
+}
+
+static int elants_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ union i2c_smbus_data dummy;
+ struct elants_data *ts;
+ unsigned long irqflags;
+ int error;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_err(&client->dev,
+ "%s: i2c check functionality error\n", DEVICE_NAME);
+ return -ENXIO;
+ }
+
+ /* Make sure there is something at this address */
+ if (i2c_smbus_xfer(client->adapter, client->addr, 0,
+ I2C_SMBUS_READ, 0, I2C_SMBUS_BYTE, &dummy) < 0) {
+ dev_err(&client->dev, "nothing at this address\n");
+ return -ENXIO;
+ }
+
+ ts = devm_kzalloc(&client->dev, sizeof(struct elants_data), GFP_KERNEL);
+ if (!ts)
+ return -ENOMEM;
+
+ mutex_init(&ts->sysfs_mutex);
+ init_completion(&ts->cmd_done);
+
+ ts->client = client;
+ i2c_set_clientdata(client, ts);
+
+ error = elants_i2c_initialize(ts);
+ if (error) {
+ dev_err(&client->dev, "failed to initialize: %d\n", error);
+ return error;
+ }
+
+ ts->input = devm_input_allocate_device(&client->dev);
+ if (!ts->input) {
+ dev_err(&client->dev, "Failed to allocate input device\n");
+ return -ENOMEM;
+ }
+
+ ts->input->name = "Elan Touchscreen";
+ ts->input->id.bustype = BUS_I2C;
+
+ __set_bit(BTN_TOUCH, ts->input->keybit);
+ __set_bit(EV_ABS, ts->input->evbit);
+ __set_bit(EV_KEY, ts->input->evbit);
+
+ /* Single touch input params setup */
+ input_set_abs_params(ts->input, ABS_X, 0, ts->x_max, 0, 0);
+ input_set_abs_params(ts->input, ABS_Y, 0, ts->y_max, 0, 0);
+ input_set_abs_params(ts->input, ABS_PRESSURE, 0, 255, 0, 0);
+ input_abs_set_res(ts->input, ABS_X, ts->x_res);
+ input_abs_set_res(ts->input, ABS_Y, ts->y_res);
+
+ /* Multitouch input params setup */
+ error = input_mt_init_slots(ts->input, MAX_CONTACT_NUM,
+ INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED);
+ if (error) {
+ dev_err(&client->dev,
+ "failed to initialize MT slots: %d\n", error);
+ return error;
+ }
+
+ input_set_abs_params(ts->input, ABS_MT_POSITION_X, 0, ts->x_max, 0, 0);
+ input_set_abs_params(ts->input, ABS_MT_POSITION_Y, 0, ts->y_max, 0, 0);
+ input_set_abs_params(ts->input, ABS_MT_TOUCH_MAJOR, 0, 255, 0, 0);
+ input_set_abs_params(ts->input, ABS_MT_PRESSURE, 0, 255, 0, 0);
+ input_abs_set_res(ts->input, ABS_MT_POSITION_X, ts->x_res);
+ input_abs_set_res(ts->input, ABS_MT_POSITION_Y, ts->y_res);
+
+ input_set_drvdata(ts->input, ts);
+
+ error = input_register_device(ts->input);
+ if (error) {
+ dev_err(&client->dev,
+ "unable to register input device: %d\n", error);
+ return error;
+ }
+
+ /*
+ * Systems using device tree should set up interrupt via DTS,
+ * the rest will use the default falling edge interrupts.
+ */
+ irqflags = client->dev.of_node ? 0 : IRQF_TRIGGER_FALLING;
+
+ error = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, elants_i2c_irq,
+ irqflags | IRQF_ONESHOT,
+ client->name, ts);
+ if (error) {
+ dev_err(&client->dev, "Failed to register interrupt\n");
+ return error;
+ }
+
+ /*
+ * Systems using device tree should set up wakeup via DTS,
+ * the rest will configure device as wakeup source by default.
+ */
+ if (!client->dev.of_node)
+ device_init_wakeup(&client->dev, true);
+
+ error = sysfs_create_group(&client->dev.kobj, &elants_attribute_group);
+ if (error) {
+ dev_err(&client->dev, "failed to create sysfs attributes: %d\n",
+ error);
+ return error;
+ }
+
+ error = devm_add_action(&client->dev,
+ elants_i2c_remove_sysfs_group, ts);
+ if (error) {
+ elants_i2c_remove_sysfs_group(ts);
+ dev_err(&client->dev,
+ "Failed to add sysfs cleanup action: %d\n",
+ error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int __maybe_unused elants_i2c_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct elants_data *ts = i2c_get_clientdata(client);
+ const u8 set_sleep_cmd[] = { 0x54, 0x50, 0x00, 0x01 };
+ int retry_cnt;
+ int error;
+
+ /* Command not support in IAP recovery mode */
+ if (ts->iap_mode != ELAN_IAP_OPERATIONAL)
+ return -EBUSY;
+
+ disable_irq(client->irq);
+
+ for (retry_cnt = 0; retry_cnt < MAX_RETRIES; retry_cnt++) {
+ error = elants_i2c_send(client, set_sleep_cmd,
+ sizeof(set_sleep_cmd));
+ if (!error)
+ break;
+
+ dev_err(&client->dev, "suspend command failed: %d\n", error);
+ }
+
+ if (device_may_wakeup(dev))
+ ts->wake_irq_enabled = (enable_irq_wake(client->irq) == 0);
+
+ return 0;
+}
+
+static int __maybe_unused elants_i2c_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct elants_data *ts = i2c_get_clientdata(client);
+ const u8 set_active_cmd[] = { 0x54, 0x58, 0x00, 0x01 };
+ int retry_cnt;
+ int error;
+
+ if (device_may_wakeup(dev) && ts->wake_irq_enabled)
+ disable_irq_wake(client->irq);
+
+ for (retry_cnt = 0; retry_cnt < MAX_RETRIES; retry_cnt++) {
+ error = elants_i2c_send(client, set_active_cmd,
+ sizeof(set_active_cmd));
+ if (!error)
+ break;
+
+ dev_err(&client->dev, "resume command failed: %d\n", error);
+ }
+
+ ts->state = ELAN_STATE_NORMAL;
+ enable_irq(client->irq);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(elants_i2c_pm_ops,
+ elants_i2c_suspend, elants_i2c_resume);
+
+static const struct i2c_device_id elants_i2c_id[] = {
+ { DEVICE_NAME, 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, elants_i2c_id);
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id elants_acpi_id[] = {
+ { "ELAN0001", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, elants_acpi_id);
+#endif
+
+#ifdef CONFIG_OF
+static const struct of_device_id elants_of_match[] = {
+ { .compatible = "elan,ekth3500" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, elants_of_match);
+#endif
+
+static struct i2c_driver elants_i2c_driver = {
+ .probe = elants_i2c_probe,
+ .id_table = elants_i2c_id,
+ .driver = {
+ .name = DEVICE_NAME,
+ .owner = THIS_MODULE,
+ .pm = &elants_i2c_pm_ops,
+ .acpi_match_table = ACPI_PTR(elants_acpi_id),
+ .of_match_table = of_match_ptr(elants_of_match),
+ },
+};
+module_i2c_driver(elants_i2c_driver);
+
+MODULE_AUTHOR("Scott Liu <scott.liu@emc.com.tw>");
+MODULE_DESCRIPTION("Elan I2c Touchscreen driver");
+MODULE_VERSION(DRV_VERSION);
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
new file mode 100644
index 00000000000..ca196689f02
--- /dev/null
+++ b/drivers/input/touchscreen/goodix.c
@@ -0,0 +1,395 @@
+/*
+ * Driver for Goodix Touchscreens
+ *
+ * Copyright (c) 2014 Red Hat Inc.
+ *
+ * This code is based on gt9xx.c authored by andrew@goodix.com:
+ *
+ * 2010 - 2012 Goodix Technology.
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; version 2 of the License.
+ */
+
+#include <linux/kernel.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/input/mt.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <asm/unaligned.h>
+
+struct goodix_ts_data {
+ struct i2c_client *client;
+ struct input_dev *input_dev;
+ int abs_x_max;
+ int abs_y_max;
+ unsigned int max_touch_num;
+ unsigned int int_trigger_type;
+};
+
+#define GOODIX_MAX_HEIGHT 4096
+#define GOODIX_MAX_WIDTH 4096
+#define GOODIX_INT_TRIGGER 1
+#define GOODIX_CONTACT_SIZE 8
+#define GOODIX_MAX_CONTACTS 10
+
+#define GOODIX_CONFIG_MAX_LENGTH 240
+
+/* Register defines */
+#define GOODIX_READ_COOR_ADDR 0x814E
+#define GOODIX_REG_CONFIG_DATA 0x8047
+#define GOODIX_REG_VERSION 0x8140
+
+#define RESOLUTION_LOC 1
+#define TRIGGER_LOC 6
+
+static const unsigned long goodix_irq_flags[] = {
+ IRQ_TYPE_EDGE_RISING,
+ IRQ_TYPE_EDGE_FALLING,
+ IRQ_TYPE_LEVEL_LOW,
+ IRQ_TYPE_LEVEL_HIGH,
+};
+
+/**
+ * goodix_i2c_read - read data from a register of the i2c slave device.
+ *
+ * @client: i2c device.
+ * @reg: the register to read from.
+ * @buf: raw write data buffer.
+ * @len: length of the buffer to write
+ */
+static int goodix_i2c_read(struct i2c_client *client,
+ u16 reg, u8 *buf, int len)
+{
+ struct i2c_msg msgs[2];
+ u16 wbuf = cpu_to_be16(reg);
+ int ret;
+
+ msgs[0].flags = 0;
+ msgs[0].addr = client->addr;
+ msgs[0].len = 2;
+ msgs[0].buf = (u8 *) &wbuf;
+
+ msgs[1].flags = I2C_M_RD;
+ msgs[1].addr = client->addr;
+ msgs[1].len = len;
+ msgs[1].buf = buf;
+
+ ret = i2c_transfer(client->adapter, msgs, 2);
+ return ret < 0 ? ret : (ret != ARRAY_SIZE(msgs) ? -EIO : 0);
+}
+
+static int goodix_ts_read_input_report(struct goodix_ts_data *ts, u8 *data)
+{
+ int touch_num;
+ int error;
+
+ error = goodix_i2c_read(ts->client, GOODIX_READ_COOR_ADDR, data,
+ GOODIX_CONTACT_SIZE + 1);
+ if (error) {
+ dev_err(&ts->client->dev, "I2C transfer error: %d\n", error);
+ return error;
+ }
+
+ touch_num = data[0] & 0x0f;
+ if (touch_num > GOODIX_MAX_CONTACTS)
+ return -EPROTO;
+
+ if (touch_num > 1) {
+ data += 1 + GOODIX_CONTACT_SIZE;
+ error = goodix_i2c_read(ts->client,
+ GOODIX_READ_COOR_ADDR +
+ 1 + GOODIX_CONTACT_SIZE,
+ data,
+ GOODIX_CONTACT_SIZE * (touch_num - 1));
+ if (error)
+ return error;
+ }
+
+ return touch_num;
+}
+
+static void goodix_ts_report_touch(struct goodix_ts_data *ts, u8 *coor_data)
+{
+ int id = coor_data[0] & 0x0F;
+ int input_x = get_unaligned_le16(&coor_data[1]);
+ int input_y = get_unaligned_le16(&coor_data[3]);
+ int input_w = get_unaligned_le16(&coor_data[5]);
+
+ input_mt_slot(ts->input_dev, id);
+ input_mt_report_slot_state(ts->input_dev, MT_TOOL_FINGER, true);
+ input_report_abs(ts->input_dev, ABS_MT_POSITION_X, input_x);
+ input_report_abs(ts->input_dev, ABS_MT_POSITION_Y, input_y);
+ input_report_abs(ts->input_dev, ABS_MT_TOUCH_MAJOR, input_w);
+ input_report_abs(ts->input_dev, ABS_MT_WIDTH_MAJOR, input_w);
+}
+
+/**
+ * goodix_process_events - Process incoming events
+ *
+ * @ts: our goodix_ts_data pointer
+ *
+ * Called when the IRQ is triggered. Read the current device state, and push
+ * the input events to the user space.
+ */
+static void goodix_process_events(struct goodix_ts_data *ts)
+{
+ u8 point_data[1 + GOODIX_CONTACT_SIZE * GOODIX_MAX_CONTACTS];
+ int touch_num;
+ int i;
+
+ touch_num = goodix_ts_read_input_report(ts, point_data);
+ if (touch_num < 0)
+ return;
+
+ for (i = 0; i < touch_num; i++)
+ goodix_ts_report_touch(ts,
+ &point_data[1 + GOODIX_CONTACT_SIZE * i]);
+
+ input_mt_sync_frame(ts->input_dev);
+ input_sync(ts->input_dev);
+}
+
+/**
+ * goodix_ts_irq_handler - The IRQ handler
+ *
+ * @irq: interrupt number.
+ * @dev_id: private data pointer.
+ */
+static irqreturn_t goodix_ts_irq_handler(int irq, void *dev_id)
+{
+ static const u8 end_cmd[] = {
+ GOODIX_READ_COOR_ADDR >> 8,
+ GOODIX_READ_COOR_ADDR & 0xff,
+ 0
+ };
+ struct goodix_ts_data *ts = dev_id;
+
+ goodix_process_events(ts);
+
+ if (i2c_master_send(ts->client, end_cmd, sizeof(end_cmd)) < 0)
+ dev_err(&ts->client->dev, "I2C write end_cmd error\n");
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * goodix_read_config - Read the embedded configuration of the panel
+ *
+ * @ts: our goodix_ts_data pointer
+ *
+ * Must be called during probe
+ */
+static void goodix_read_config(struct goodix_ts_data *ts)
+{
+ u8 config[GOODIX_CONFIG_MAX_LENGTH];
+ int error;
+
+ error = goodix_i2c_read(ts->client, GOODIX_REG_CONFIG_DATA,
+ config,
+ GOODIX_CONFIG_MAX_LENGTH);
+ if (error) {
+ dev_warn(&ts->client->dev,
+ "Error reading config (%d), using defaults\n",
+ error);
+ ts->abs_x_max = GOODIX_MAX_WIDTH;
+ ts->abs_y_max = GOODIX_MAX_HEIGHT;
+ ts->int_trigger_type = GOODIX_INT_TRIGGER;
+ return;
+ }
+
+ ts->abs_x_max = get_unaligned_le16(&config[RESOLUTION_LOC]);
+ ts->abs_y_max = get_unaligned_le16(&config[RESOLUTION_LOC + 2]);
+ ts->int_trigger_type = (config[TRIGGER_LOC]) & 0x03;
+ if (!ts->abs_x_max || !ts->abs_y_max) {
+ dev_err(&ts->client->dev,
+ "Invalid config, using defaults\n");
+ ts->abs_x_max = GOODIX_MAX_WIDTH;
+ ts->abs_y_max = GOODIX_MAX_HEIGHT;
+ }
+}
+
+
+/**
+ * goodix_read_version - Read goodix touchscreen version
+ *
+ * @client: the i2c client
+ * @version: output buffer containing the version on success
+ */
+static int goodix_read_version(struct i2c_client *client, u16 *version)
+{
+ int error;
+ u8 buf[6];
+
+ error = goodix_i2c_read(client, GOODIX_REG_VERSION, buf, sizeof(buf));
+ if (error) {
+ dev_err(&client->dev, "read version failed: %d\n", error);
+ return error;
+ }
+
+ if (version)
+ *version = get_unaligned_le16(&buf[4]);
+
+ dev_info(&client->dev, "IC VERSION: %6ph\n", buf);
+
+ return 0;
+}
+
+/**
+ * goodix_i2c_test - I2C test function to check if the device answers.
+ *
+ * @client: the i2c client
+ */
+static int goodix_i2c_test(struct i2c_client *client)
+{
+ int retry = 0;
+ int error;
+ u8 test;
+
+ while (retry++ < 2) {
+ error = goodix_i2c_read(client, GOODIX_REG_CONFIG_DATA,
+ &test, 1);
+ if (!error)
+ return 0;
+
+ dev_err(&client->dev, "i2c test failed attempt %d: %d\n",
+ retry, error);
+ msleep(20);
+ }
+
+ return error;
+}
+
+/**
+ * goodix_request_input_dev - Allocate, populate and register the input device
+ *
+ * @ts: our goodix_ts_data pointer
+ *
+ * Must be called during probe
+ */
+static int goodix_request_input_dev(struct goodix_ts_data *ts)
+{
+ int error;
+
+ ts->input_dev = devm_input_allocate_device(&ts->client->dev);
+ if (!ts->input_dev) {
+ dev_err(&ts->client->dev, "Failed to allocate input device.");
+ return -ENOMEM;
+ }
+
+ ts->input_dev->evbit[0] = BIT_MASK(EV_SYN) |
+ BIT_MASK(EV_KEY) |
+ BIT_MASK(EV_ABS);
+
+ input_set_abs_params(ts->input_dev, ABS_MT_POSITION_X, 0,
+ ts->abs_x_max, 0, 0);
+ input_set_abs_params(ts->input_dev, ABS_MT_POSITION_Y, 0,
+ ts->abs_y_max, 0, 0);
+ input_set_abs_params(ts->input_dev, ABS_MT_WIDTH_MAJOR, 0, 255, 0, 0);
+ input_set_abs_params(ts->input_dev, ABS_MT_TOUCH_MAJOR, 0, 255, 0, 0);
+
+ input_mt_init_slots(ts->input_dev, GOODIX_MAX_CONTACTS,
+ INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED);
+
+ ts->input_dev->name = "Goodix Capacitive TouchScreen";
+ ts->input_dev->phys = "input/ts";
+ ts->input_dev->id.bustype = BUS_I2C;
+ ts->input_dev->id.vendor = 0x0416;
+ ts->input_dev->id.product = 0x1001;
+ ts->input_dev->id.version = 10427;
+
+ error = input_register_device(ts->input_dev);
+ if (error) {
+ dev_err(&ts->client->dev,
+ "Failed to register input device: %d", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int goodix_ts_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct goodix_ts_data *ts;
+ unsigned long irq_flags;
+ int error;
+ u16 version_info;
+
+ dev_dbg(&client->dev, "I2C Address: 0x%02x\n", client->addr);
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_err(&client->dev, "I2C check functionality failed.\n");
+ return -ENXIO;
+ }
+
+ ts = devm_kzalloc(&client->dev, sizeof(*ts), GFP_KERNEL);
+ if (!ts)
+ return -ENOMEM;
+
+ ts->client = client;
+ i2c_set_clientdata(client, ts);
+
+ error = goodix_i2c_test(client);
+ if (error) {
+ dev_err(&client->dev, "I2C communication failure: %d\n", error);
+ return error;
+ }
+
+ error = goodix_read_version(client, &version_info);
+ if (error) {
+ dev_err(&client->dev, "Read version failed.\n");
+ return error;
+ }
+
+ goodix_read_config(ts);
+
+ error = goodix_request_input_dev(ts);
+ if (error)
+ return error;
+
+ irq_flags = goodix_irq_flags[ts->int_trigger_type] | IRQF_ONESHOT;
+ error = devm_request_threaded_irq(&ts->client->dev, client->irq,
+ NULL, goodix_ts_irq_handler,
+ irq_flags, client->name, ts);
+ if (error) {
+ dev_err(&client->dev, "request IRQ failed: %d\n", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static const struct i2c_device_id goodix_ts_id[] = {
+ { "GDIX1001:00", 0 },
+ { }
+};
+
+static const struct acpi_device_id goodix_acpi_match[] = {
+ { "GDIX1001", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, goodix_acpi_match);
+
+static struct i2c_driver goodix_ts_driver = {
+ .probe = goodix_ts_probe,
+ .id_table = goodix_ts_id,
+ .driver = {
+ .name = "Goodix-TS",
+ .owner = THIS_MODULE,
+ .acpi_match_table = goodix_acpi_match,
+ },
+};
+module_i2c_driver(goodix_ts_driver);
+
+MODULE_AUTHOR("Benjamin Tissoires <benjamin.tissoires@gmail.com>");
+MODULE_AUTHOR("Bastien Nocera <hadess@hadess.net>");
+MODULE_DESCRIPTION("Goodix touchscreen driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/ili210x.c b/drivers/input/touchscreen/ili210x.c
index 2a508913981..da6dc819c84 100644
--- a/drivers/input/touchscreen/ili210x.c
+++ b/drivers/input/touchscreen/ili210x.c
@@ -311,8 +311,7 @@ static int ili210x_i2c_remove(struct i2c_client *client)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int ili210x_i2c_suspend(struct device *dev)
+static int __maybe_unused ili210x_i2c_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
@@ -322,7 +321,7 @@ static int ili210x_i2c_suspend(struct device *dev)
return 0;
}
-static int ili210x_i2c_resume(struct device *dev)
+static int __maybe_unused ili210x_i2c_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
@@ -331,7 +330,6 @@ static int ili210x_i2c_resume(struct device *dev)
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(ili210x_i2c_pm,
ili210x_i2c_suspend, ili210x_i2c_resume);
diff --git a/drivers/input/touchscreen/ipaq-micro-ts.c b/drivers/input/touchscreen/ipaq-micro-ts.c
index 62c8976e616..33c134820ef 100644
--- a/drivers/input/touchscreen/ipaq-micro-ts.c
+++ b/drivers/input/touchscreen/ipaq-micro-ts.c
@@ -122,8 +122,7 @@ static int micro_ts_probe(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int micro_ts_suspend(struct device *dev)
+static int __maybe_unused micro_ts_suspend(struct device *dev)
{
struct touchscreen_data *ts = dev_get_drvdata(dev);
@@ -132,7 +131,7 @@ static int micro_ts_suspend(struct device *dev)
return 0;
}
-static int micro_ts_resume(struct device *dev)
+static int __maybe_unused micro_ts_resume(struct device *dev)
{
struct touchscreen_data *ts = dev_get_drvdata(dev);
struct input_dev *input = ts->input;
@@ -146,7 +145,6 @@ static int micro_ts_resume(struct device *dev)
return 0;
}
-#endif
static const struct dev_pm_ops micro_ts_dev_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(micro_ts_suspend, micro_ts_resume)
diff --git a/drivers/input/touchscreen/mms114.c b/drivers/input/touchscreen/mms114.c
index 372bbf7658f..67c0d31613d 100644
--- a/drivers/input/touchscreen/mms114.c
+++ b/drivers/input/touchscreen/mms114.c
@@ -515,8 +515,7 @@ static int mms114_probe(struct i2c_client *client,
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int mms114_suspend(struct device *dev)
+static int __maybe_unused mms114_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct mms114_data *data = i2c_get_clientdata(client);
@@ -540,7 +539,7 @@ static int mms114_suspend(struct device *dev)
return 0;
}
-static int mms114_resume(struct device *dev)
+static int __maybe_unused mms114_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct mms114_data *data = i2c_get_clientdata(client);
@@ -559,7 +558,6 @@ static int mms114_resume(struct device *dev)
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(mms114_pm_ops, mms114_suspend, mms114_resume);
diff --git a/drivers/input/touchscreen/pixcir_i2c_ts.c b/drivers/input/touchscreen/pixcir_i2c_ts.c
index fc49c75317d..4fb5537fdd4 100644
--- a/drivers/input/touchscreen/pixcir_i2c_ts.c
+++ b/drivers/input/touchscreen/pixcir_i2c_ts.c
@@ -347,8 +347,7 @@ static void pixcir_input_close(struct input_dev *dev)
pixcir_stop(ts);
}
-#ifdef CONFIG_PM_SLEEP
-static int pixcir_i2c_ts_suspend(struct device *dev)
+static int __maybe_unused pixcir_i2c_ts_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct pixcir_i2c_ts_data *ts = i2c_get_clientdata(client);
@@ -377,7 +376,7 @@ unlock:
return ret;
}
-static int pixcir_i2c_ts_resume(struct device *dev)
+static int __maybe_unused pixcir_i2c_ts_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct pixcir_i2c_ts_data *ts = i2c_get_clientdata(client);
@@ -405,7 +404,6 @@ unlock:
return ret;
}
-#endif
static SIMPLE_DEV_PM_OPS(pixcir_dev_pm_ops,
pixcir_i2c_ts_suspend, pixcir_i2c_ts_resume);
diff --git a/drivers/input/touchscreen/st1232.c b/drivers/input/touchscreen/st1232.c
index 3c0f57efe7b..697e26e52d5 100644
--- a/drivers/input/touchscreen/st1232.c
+++ b/drivers/input/touchscreen/st1232.c
@@ -243,8 +243,7 @@ static int st1232_ts_remove(struct i2c_client *client)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int st1232_ts_suspend(struct device *dev)
+static int __maybe_unused st1232_ts_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct st1232_ts_data *ts = i2c_get_clientdata(client);
@@ -259,7 +258,7 @@ static int st1232_ts_suspend(struct device *dev)
return 0;
}
-static int st1232_ts_resume(struct device *dev)
+static int __maybe_unused st1232_ts_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct st1232_ts_data *ts = i2c_get_clientdata(client);
@@ -274,8 +273,6 @@ static int st1232_ts_resume(struct device *dev)
return 0;
}
-#endif
-
static SIMPLE_DEV_PM_OPS(st1232_ts_pm_ops,
st1232_ts_suspend, st1232_ts_resume);
diff --git a/drivers/input/touchscreen/tsc2005.c b/drivers/input/touchscreen/tsc2005.c
index 52380b68ebd..72657c57943 100644
--- a/drivers/input/touchscreen/tsc2005.c
+++ b/drivers/input/touchscreen/tsc2005.c
@@ -773,8 +773,7 @@ static int tsc2005_remove(struct spi_device *spi)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int tsc2005_suspend(struct device *dev)
+static int __maybe_unused tsc2005_suspend(struct device *dev)
{
struct spi_device *spi = to_spi_device(dev);
struct tsc2005 *ts = spi_get_drvdata(spi);
@@ -791,7 +790,7 @@ static int tsc2005_suspend(struct device *dev)
return 0;
}
-static int tsc2005_resume(struct device *dev)
+static int __maybe_unused tsc2005_resume(struct device *dev)
{
struct spi_device *spi = to_spi_device(dev);
struct tsc2005 *ts = spi_get_drvdata(spi);
@@ -807,7 +806,6 @@ static int tsc2005_resume(struct device *dev)
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(tsc2005_pm_ops, tsc2005_suspend, tsc2005_resume);
diff --git a/drivers/input/touchscreen/ucb1400_ts.c b/drivers/input/touchscreen/ucb1400_ts.c
index 0eca00da584..c1e23cfc615 100644
--- a/drivers/input/touchscreen/ucb1400_ts.c
+++ b/drivers/input/touchscreen/ucb1400_ts.c
@@ -406,8 +406,7 @@ static int ucb1400_ts_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int ucb1400_ts_suspend(struct device *dev)
+static int __maybe_unused ucb1400_ts_suspend(struct device *dev)
{
struct ucb1400_ts *ucb = dev_get_platdata(dev);
struct input_dev *idev = ucb->ts_idev;
@@ -421,7 +420,7 @@ static int ucb1400_ts_suspend(struct device *dev)
return 0;
}
-static int ucb1400_ts_resume(struct device *dev)
+static int __maybe_unused ucb1400_ts_resume(struct device *dev)
{
struct ucb1400_ts *ucb = dev_get_platdata(dev);
struct input_dev *idev = ucb->ts_idev;
@@ -434,7 +433,6 @@ static int ucb1400_ts_resume(struct device *dev)
mutex_unlock(&idev->mutex);
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(ucb1400_ts_pm_ops,
ucb1400_ts_suspend, ucb1400_ts_resume);
diff --git a/drivers/input/touchscreen/wacom_i2c.c b/drivers/input/touchscreen/wacom_i2c.c
index 7ccaa1b12b0..32f8ac00393 100644
--- a/drivers/input/touchscreen/wacom_i2c.c
+++ b/drivers/input/touchscreen/wacom_i2c.c
@@ -242,8 +242,7 @@ static int wacom_i2c_remove(struct i2c_client *client)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int wacom_i2c_suspend(struct device *dev)
+static int __maybe_unused wacom_i2c_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
@@ -252,7 +251,7 @@ static int wacom_i2c_suspend(struct device *dev)
return 0;
}
-static int wacom_i2c_resume(struct device *dev)
+static int __maybe_unused wacom_i2c_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
@@ -260,7 +259,6 @@ static int wacom_i2c_resume(struct device *dev)
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(wacom_i2c_pm, wacom_i2c_suspend, wacom_i2c_resume);
diff --git a/drivers/input/touchscreen/zforce_ts.c b/drivers/input/touchscreen/zforce_ts.c
index 8ba48f5eff7..19880c7385e 100644
--- a/drivers/input/touchscreen/zforce_ts.c
+++ b/drivers/input/touchscreen/zforce_ts.c
@@ -602,8 +602,7 @@ static void zforce_input_close(struct input_dev *dev)
return;
}
-#ifdef CONFIG_PM_SLEEP
-static int zforce_suspend(struct device *dev)
+static int __maybe_unused zforce_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct zforce_ts *ts = i2c_get_clientdata(client);
@@ -648,7 +647,7 @@ unlock:
return ret;
}
-static int zforce_resume(struct device *dev)
+static int __maybe_unused zforce_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct zforce_ts *ts = i2c_get_clientdata(client);
@@ -685,7 +684,6 @@ unlock:
return ret;
}
-#endif
static SIMPLE_DEV_PM_OPS(zforce_pm_ops, zforce_suspend, zforce_resume);
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 30f0e61341c..325188eef1c 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -15,7 +15,7 @@ if IOMMU_SUPPORT
config OF_IOMMU
def_bool y
- depends on OF
+ depends on OF && IOMMU_API
config FSL_PAMU
bool "Freescale IOMMU support"
@@ -187,7 +187,7 @@ config TEGRA_IOMMU_SMMU
config EXYNOS_IOMMU
bool "Exynos IOMMU Support"
- depends on ARCH_EXYNOS
+ depends on ARCH_EXYNOS && ARM
select IOMMU_API
select ARM_DMA_USE_IOMMU
help
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index b205f76d712..98024856df0 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -4071,7 +4071,7 @@ static int setup_ioapic_entry(int irq, struct IO_APIC_route_entry *entry,
int devid;
int ret;
- cfg = irq_get_chip_data(irq);
+ cfg = irq_cfg(irq);
if (!cfg)
return -EINVAL;
@@ -4134,7 +4134,7 @@ static int set_affinity(struct irq_data *data, const struct cpumask *mask,
if (!config_enabled(CONFIG_SMP))
return -1;
- cfg = data->chip_data;
+ cfg = irqd_cfg(data);
irq = data->irq;
irte_info = &cfg->irq_2_irte;
@@ -4172,7 +4172,7 @@ static int free_irq(int irq)
struct irq_2_irte *irte_info;
struct irq_cfg *cfg;
- cfg = irq_get_chip_data(irq);
+ cfg = irq_cfg(irq);
if (!cfg)
return -EINVAL;
@@ -4191,7 +4191,7 @@ static void compose_msi_msg(struct pci_dev *pdev,
struct irq_cfg *cfg;
union irte irte;
- cfg = irq_get_chip_data(irq);
+ cfg = irq_cfg(irq);
if (!cfg)
return;
@@ -4220,7 +4220,7 @@ static int msi_alloc_irq(struct pci_dev *pdev, int irq, int nvec)
if (!pdev)
return -EINVAL;
- cfg = irq_get_chip_data(irq);
+ cfg = irq_cfg(irq);
if (!cfg)
return -EINVAL;
@@ -4240,7 +4240,7 @@ static int msi_setup_irq(struct pci_dev *pdev, unsigned int irq,
if (!pdev)
return -EINVAL;
- cfg = irq_get_chip_data(irq);
+ cfg = irq_cfg(irq);
if (!cfg)
return -EINVAL;
@@ -4263,7 +4263,7 @@ static int alloc_hpet_msi(unsigned int irq, unsigned int id)
struct irq_cfg *cfg;
int index, devid;
- cfg = irq_get_chip_data(irq);
+ cfg = irq_cfg(irq);
if (!cfg)
return -EINVAL;
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
index bea878f8e7d..90f70d0e114 100644
--- a/drivers/iommu/amd_iommu_v2.c
+++ b/drivers/iommu/amd_iommu_v2.c
@@ -92,13 +92,6 @@ static spinlock_t state_lock;
static struct workqueue_struct *iommu_wq;
-/*
- * Empty page table - Used between
- * mmu_notifier_invalidate_range_start and
- * mmu_notifier_invalidate_range_end
- */
-static u64 *empty_page_table;
-
static void free_pasid_states(struct device_state *dev_state);
static u16 device_id(struct pci_dev *pdev)
@@ -414,46 +407,21 @@ static void mn_invalidate_page(struct mmu_notifier *mn,
__mn_flush_page(mn, address);
}
-static void mn_invalidate_range_start(struct mmu_notifier *mn,
- struct mm_struct *mm,
- unsigned long start, unsigned long end)
-{
- struct pasid_state *pasid_state;
- struct device_state *dev_state;
- unsigned long flags;
-
- pasid_state = mn_to_state(mn);
- dev_state = pasid_state->device_state;
-
- spin_lock_irqsave(&pasid_state->lock, flags);
- if (pasid_state->mmu_notifier_count == 0) {
- amd_iommu_domain_set_gcr3(dev_state->domain,
- pasid_state->pasid,
- __pa(empty_page_table));
- }
- pasid_state->mmu_notifier_count += 1;
- spin_unlock_irqrestore(&pasid_state->lock, flags);
-}
-
-static void mn_invalidate_range_end(struct mmu_notifier *mn,
- struct mm_struct *mm,
- unsigned long start, unsigned long end)
+static void mn_invalidate_range(struct mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long start, unsigned long end)
{
struct pasid_state *pasid_state;
struct device_state *dev_state;
- unsigned long flags;
pasid_state = mn_to_state(mn);
dev_state = pasid_state->device_state;
- spin_lock_irqsave(&pasid_state->lock, flags);
- pasid_state->mmu_notifier_count -= 1;
- if (pasid_state->mmu_notifier_count == 0) {
- amd_iommu_domain_set_gcr3(dev_state->domain,
- pasid_state->pasid,
- __pa(pasid_state->mm->pgd));
- }
- spin_unlock_irqrestore(&pasid_state->lock, flags);
+ if ((start ^ (end - 1)) < PAGE_SIZE)
+ amd_iommu_flush_page(dev_state->domain, pasid_state->pasid,
+ start);
+ else
+ amd_iommu_flush_tlb(dev_state->domain, pasid_state->pasid);
}
static void mn_release(struct mmu_notifier *mn, struct mm_struct *mm)
@@ -478,8 +446,7 @@ static struct mmu_notifier_ops iommu_mn = {
.release = mn_release,
.clear_flush_young = mn_clear_flush_young,
.invalidate_page = mn_invalidate_page,
- .invalidate_range_start = mn_invalidate_range_start,
- .invalidate_range_end = mn_invalidate_range_end,
+ .invalidate_range = mn_invalidate_range,
};
static void set_pri_tag_status(struct pasid_state *pasid_state,
@@ -972,18 +939,10 @@ static int __init amd_iommu_v2_init(void)
if (iommu_wq == NULL)
goto out;
- ret = -ENOMEM;
- empty_page_table = (u64 *)get_zeroed_page(GFP_KERNEL);
- if (empty_page_table == NULL)
- goto out_destroy_wq;
-
amd_iommu_register_ppr_notifier(&ppr_nb);
return 0;
-out_destroy_wq:
- destroy_workqueue(iommu_wq);
-
out:
return ret;
}
@@ -1017,8 +976,6 @@ static void __exit amd_iommu_v2_exit(void)
}
destroy_workqueue(iommu_wq);
-
- free_page((unsigned long)empty_page_table);
}
module_init(amd_iommu_v2_init);
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index 27541d44084..a55b207b942 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -54,7 +54,7 @@ static int __init parse_ioapics_under_ir(void);
static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
{
- struct irq_cfg *cfg = irq_get_chip_data(irq);
+ struct irq_cfg *cfg = irq_cfg(irq);
return cfg ? &cfg->irq_2_iommu : NULL;
}
@@ -85,7 +85,7 @@ static int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
{
struct ir_table *table = iommu->ir_table;
struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
- struct irq_cfg *cfg = irq_get_chip_data(irq);
+ struct irq_cfg *cfg = irq_cfg(irq);
unsigned int mask = 0;
unsigned long flags;
int index;
@@ -153,7 +153,7 @@ static int map_irq_to_irte_handle(int irq, u16 *sub_handle)
static int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
{
struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
- struct irq_cfg *cfg = irq_get_chip_data(irq);
+ struct irq_cfg *cfg = irq_cfg(irq);
unsigned long flags;
if (!irq_iommu)
@@ -1050,7 +1050,7 @@ static int
intel_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
bool force)
{
- struct irq_cfg *cfg = data->chip_data;
+ struct irq_cfg *cfg = irqd_cfg(data);
unsigned int dest, irq = data->irq;
struct irte irte;
int err;
@@ -1105,7 +1105,7 @@ static void intel_compose_msi_msg(struct pci_dev *pdev,
u16 sub_handle = 0;
int ir_index;
- cfg = irq_get_chip_data(irq);
+ cfg = irq_cfg(irq);
ir_index = map_irq_to_irte_handle(irq, &sub_handle);
BUG_ON(ir_index == -1);
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 1bd63352ab1..f7718d73e98 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -737,7 +737,7 @@ static int add_iommu_group(struct device *dev, void *data)
const struct iommu_ops *ops = cb->ops;
if (!ops->add_device)
- return -ENODEV;
+ return 0;
WARN_ON(dev->iommu_group);
diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
index 2c3f5ad0109..89c4846683b 100644
--- a/drivers/iommu/irq_remapping.c
+++ b/drivers/iommu/irq_remapping.c
@@ -298,7 +298,7 @@ static int set_remapped_irq_affinity(struct irq_data *data,
void free_remapped_irq(int irq)
{
- struct irq_cfg *cfg = irq_get_chip_data(irq);
+ struct irq_cfg *cfg = irq_cfg(irq);
if (!remap_ops || !remap_ops->free_irq)
return;
@@ -311,7 +311,7 @@ void compose_remapped_msi_msg(struct pci_dev *pdev,
unsigned int irq, unsigned int dest,
struct msi_msg *msg, u8 hpet_id)
{
- struct irq_cfg *cfg = irq_get_chip_data(irq);
+ struct irq_cfg *cfg = irq_cfg(irq);
if (!irq_remapped(cfg))
native_compose_msi_msg(pdev, irq, dest, msg, hpet_id);
@@ -364,7 +364,7 @@ static void ir_ack_apic_edge(struct irq_data *data)
static void ir_ack_apic_level(struct irq_data *data)
{
ack_APIC_irq();
- eoi_ioapic_irq(data->irq, data->chip_data);
+ eoi_ioapic_irq(data->irq, irqd_cfg(data));
}
static void ir_print_prefix(struct irq_data *data, struct seq_file *p)
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
index e550ccb7634..af1dc6a1c0a 100644
--- a/drivers/iommu/of_iommu.c
+++ b/drivers/iommu/of_iommu.c
@@ -18,9 +18,14 @@
*/
#include <linux/export.h>
+#include <linux/iommu.h>
#include <linux/limits.h>
#include <linux/of.h>
#include <linux/of_iommu.h>
+#include <linux/slab.h>
+
+static const struct of_device_id __iommu_of_table_sentinel
+ __used __section(__iommu_of_table_end);
/**
* of_get_dma_window - Parse *dma-window property and returns 0 if found.
@@ -89,3 +94,87 @@ int of_get_dma_window(struct device_node *dn, const char *prefix, int index,
return 0;
}
EXPORT_SYMBOL_GPL(of_get_dma_window);
+
+struct of_iommu_node {
+ struct list_head list;
+ struct device_node *np;
+ struct iommu_ops *ops;
+};
+static LIST_HEAD(of_iommu_list);
+static DEFINE_SPINLOCK(of_iommu_lock);
+
+void of_iommu_set_ops(struct device_node *np, struct iommu_ops *ops)
+{
+ struct of_iommu_node *iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
+
+ if (WARN_ON(!iommu))
+ return;
+
+ INIT_LIST_HEAD(&iommu->list);
+ iommu->np = np;
+ iommu->ops = ops;
+ spin_lock(&of_iommu_lock);
+ list_add_tail(&iommu->list, &of_iommu_list);
+ spin_unlock(&of_iommu_lock);
+}
+
+struct iommu_ops *of_iommu_get_ops(struct device_node *np)
+{
+ struct of_iommu_node *node;
+ struct iommu_ops *ops = NULL;
+
+ spin_lock(&of_iommu_lock);
+ list_for_each_entry(node, &of_iommu_list, list)
+ if (node->np == np) {
+ ops = node->ops;
+ break;
+ }
+ spin_unlock(&of_iommu_lock);
+ return ops;
+}
+
+struct iommu_ops *of_iommu_configure(struct device *dev)
+{
+ struct of_phandle_args iommu_spec;
+ struct device_node *np;
+ struct iommu_ops *ops = NULL;
+ int idx = 0;
+
+ /*
+ * We don't currently walk up the tree looking for a parent IOMMU.
+ * See the `Notes:' section of
+ * Documentation/devicetree/bindings/iommu/iommu.txt
+ */
+ while (!of_parse_phandle_with_args(dev->of_node, "iommus",
+ "#iommu-cells", idx,
+ &iommu_spec)) {
+ np = iommu_spec.np;
+ ops = of_iommu_get_ops(np);
+
+ if (!ops || !ops->of_xlate || ops->of_xlate(dev, &iommu_spec))
+ goto err_put_node;
+
+ of_node_put(np);
+ idx++;
+ }
+
+ return ops;
+
+err_put_node:
+ of_node_put(np);
+ return NULL;
+}
+
+void __init of_iommu_init(void)
+{
+ struct device_node *np;
+ const struct of_device_id *match, *matches = &__iommu_of_table;
+
+ for_each_matching_node_and_match(np, matches, &match) {
+ const of_iommu_init_fn init_fn = match->data;
+
+ if (init_fn(np))
+ pr_err("Failed to initialise IOMMU %s\n",
+ of_node_full_name(np));
+ }
+}
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index e12cb23d786..cc79d2a5a8c 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -5,8 +5,15 @@ config IRQCHIP
config ARM_GIC
bool
select IRQ_DOMAIN
+ select IRQ_DOMAIN_HIERARCHY
select MULTI_IRQ_HANDLER
+config ARM_GIC_V2M
+ bool
+ depends on ARM_GIC
+ depends on PCI && PCI_MSI
+ select PCI_MSI_IRQ_DOMAIN
+
config GIC_NON_BANKED
bool
@@ -14,6 +21,11 @@ config ARM_GIC_V3
bool
select IRQ_DOMAIN
select MULTI_IRQ_HANDLER
+ select IRQ_DOMAIN_HIERARCHY
+
+config ARM_GIC_V3_ITS
+ bool
+ select PCI_MSI_IRQ_DOMAIN
config ARM_NVIC
bool
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index 4954a314c31..9516a324be6 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -19,7 +19,9 @@ obj-$(CONFIG_ARCH_SUNXI) += irq-sun4i.o
obj-$(CONFIG_ARCH_SUNXI) += irq-sunxi-nmi.o
obj-$(CONFIG_ARCH_SPEAR3XX) += spear-shirq.o
obj-$(CONFIG_ARM_GIC) += irq-gic.o irq-gic-common.o
+obj-$(CONFIG_ARM_GIC_V2M) += irq-gic-v2m.o
obj-$(CONFIG_ARM_GIC_V3) += irq-gic-v3.o irq-gic-common.o
+obj-$(CONFIG_ARM_GIC_V3_ITS) += irq-gic-v3-its.o
obj-$(CONFIG_ARM_NVIC) += irq-nvic.o
obj-$(CONFIG_ARM_VIC) += irq-vic.o
obj-$(CONFIG_ATMEL_AIC_IRQ) += irq-atmel-aic-common.o irq-atmel-aic.o
@@ -39,3 +41,4 @@ obj-$(CONFIG_BCM7120_L2_IRQ) += irq-bcm7120-l2.o
obj-$(CONFIG_BRCMSTB_L2_IRQ) += irq-brcmstb-l2.o
obj-$(CONFIG_KEYSTONE_IRQ) += irq-keystone.o
obj-$(CONFIG_MIPS_GIC) += irq-mips-gic.o
+obj-$(CONFIG_ARCH_MEDIATEK) += irq-mtk-sysirq.o
diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c
new file mode 100644
index 00000000000..fdf706555d7
--- /dev/null
+++ b/drivers/irqchip/irq-gic-v2m.c
@@ -0,0 +1,333 @@
+/*
+ * ARM GIC v2m MSI(-X) support
+ * Support for Message Signaled Interrupts for systems that
+ * implement ARM Generic Interrupt Controller: GICv2m.
+ *
+ * Copyright (C) 2014 Advanced Micro Devices, Inc.
+ * Authors: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
+ * Harish Kasiviswanathan <harish.kasiviswanathan@amd.com>
+ * Brandon Anderson <brandon.anderson@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) "GICv2m: " fmt
+
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+/*
+* MSI_TYPER:
+* [31:26] Reserved
+* [25:16] lowest SPI assigned to MSI
+* [15:10] Reserved
+* [9:0] Numer of SPIs assigned to MSI
+*/
+#define V2M_MSI_TYPER 0x008
+#define V2M_MSI_TYPER_BASE_SHIFT 16
+#define V2M_MSI_TYPER_BASE_MASK 0x3FF
+#define V2M_MSI_TYPER_NUM_MASK 0x3FF
+#define V2M_MSI_SETSPI_NS 0x040
+#define V2M_MIN_SPI 32
+#define V2M_MAX_SPI 1019
+
+#define V2M_MSI_TYPER_BASE_SPI(x) \
+ (((x) >> V2M_MSI_TYPER_BASE_SHIFT) & V2M_MSI_TYPER_BASE_MASK)
+
+#define V2M_MSI_TYPER_NUM_SPI(x) ((x) & V2M_MSI_TYPER_NUM_MASK)
+
+struct v2m_data {
+ spinlock_t msi_cnt_lock;
+ struct msi_controller mchip;
+ struct resource res; /* GICv2m resource */
+ void __iomem *base; /* GICv2m virt address */
+ u32 spi_start; /* The SPI number that MSIs start */
+ u32 nr_spis; /* The number of SPIs for MSIs */
+ unsigned long *bm; /* MSI vector bitmap */
+ struct irq_domain *domain;
+};
+
+static void gicv2m_mask_msi_irq(struct irq_data *d)
+{
+ pci_msi_mask_irq(d);
+ irq_chip_mask_parent(d);
+}
+
+static void gicv2m_unmask_msi_irq(struct irq_data *d)
+{
+ pci_msi_unmask_irq(d);
+ irq_chip_unmask_parent(d);
+}
+
+static struct irq_chip gicv2m_msi_irq_chip = {
+ .name = "MSI",
+ .irq_mask = gicv2m_mask_msi_irq,
+ .irq_unmask = gicv2m_unmask_msi_irq,
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_write_msi_msg = pci_msi_domain_write_msg,
+};
+
+static struct msi_domain_info gicv2m_msi_domain_info = {
+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_PCI_MSIX),
+ .chip = &gicv2m_msi_irq_chip,
+};
+
+static int gicv2m_set_affinity(struct irq_data *irq_data,
+ const struct cpumask *mask, bool force)
+{
+ int ret;
+
+ ret = irq_chip_set_affinity_parent(irq_data, mask, force);
+ if (ret == IRQ_SET_MASK_OK)
+ ret = IRQ_SET_MASK_OK_DONE;
+
+ return ret;
+}
+
+static void gicv2m_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
+{
+ struct v2m_data *v2m = irq_data_get_irq_chip_data(data);
+ phys_addr_t addr = v2m->res.start + V2M_MSI_SETSPI_NS;
+
+ msg->address_hi = (u32) (addr >> 32);
+ msg->address_lo = (u32) (addr);
+ msg->data = data->hwirq;
+}
+
+static struct irq_chip gicv2m_irq_chip = {
+ .name = "GICv2m",
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_set_affinity = gicv2m_set_affinity,
+ .irq_compose_msi_msg = gicv2m_compose_msi_msg,
+};
+
+static int gicv2m_irq_gic_domain_alloc(struct irq_domain *domain,
+ unsigned int virq,
+ irq_hw_number_t hwirq)
+{
+ struct of_phandle_args args;
+ struct irq_data *d;
+ int err;
+
+ args.np = domain->parent->of_node;
+ args.args_count = 3;
+ args.args[0] = 0;
+ args.args[1] = hwirq - 32;
+ args.args[2] = IRQ_TYPE_EDGE_RISING;
+
+ err = irq_domain_alloc_irqs_parent(domain, virq, 1, &args);
+ if (err)
+ return err;
+
+ /* Configure the interrupt line to be edge */
+ d = irq_domain_get_irq_data(domain->parent, virq);
+ d->chip->irq_set_type(d, IRQ_TYPE_EDGE_RISING);
+ return 0;
+}
+
+static void gicv2m_unalloc_msi(struct v2m_data *v2m, unsigned int hwirq)
+{
+ int pos;
+
+ pos = hwirq - v2m->spi_start;
+ if (pos < 0 || pos >= v2m->nr_spis) {
+ pr_err("Failed to teardown msi. Invalid hwirq %d\n", hwirq);
+ return;
+ }
+
+ spin_lock(&v2m->msi_cnt_lock);
+ __clear_bit(pos, v2m->bm);
+ spin_unlock(&v2m->msi_cnt_lock);
+}
+
+static int gicv2m_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *args)
+{
+ struct v2m_data *v2m = domain->host_data;
+ int hwirq, offset, err = 0;
+
+ spin_lock(&v2m->msi_cnt_lock);
+ offset = find_first_zero_bit(v2m->bm, v2m->nr_spis);
+ if (offset < v2m->nr_spis)
+ __set_bit(offset, v2m->bm);
+ else
+ err = -ENOSPC;
+ spin_unlock(&v2m->msi_cnt_lock);
+
+ if (err)
+ return err;
+
+ hwirq = v2m->spi_start + offset;
+
+ err = gicv2m_irq_gic_domain_alloc(domain, virq, hwirq);
+ if (err) {
+ gicv2m_unalloc_msi(v2m, hwirq);
+ return err;
+ }
+
+ irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
+ &gicv2m_irq_chip, v2m);
+
+ return 0;
+}
+
+static void gicv2m_irq_domain_free(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs)
+{
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+ struct v2m_data *v2m = irq_data_get_irq_chip_data(d);
+
+ BUG_ON(nr_irqs != 1);
+ gicv2m_unalloc_msi(v2m, d->hwirq);
+ irq_domain_free_irqs_parent(domain, virq, nr_irqs);
+}
+
+static const struct irq_domain_ops gicv2m_domain_ops = {
+ .alloc = gicv2m_irq_domain_alloc,
+ .free = gicv2m_irq_domain_free,
+};
+
+static bool is_msi_spi_valid(u32 base, u32 num)
+{
+ if (base < V2M_MIN_SPI) {
+ pr_err("Invalid MSI base SPI (base:%u)\n", base);
+ return false;
+ }
+
+ if ((num == 0) || (base + num > V2M_MAX_SPI)) {
+ pr_err("Number of SPIs (%u) exceed maximum (%u)\n",
+ num, V2M_MAX_SPI - V2M_MIN_SPI + 1);
+ return false;
+ }
+
+ return true;
+}
+
+static int __init gicv2m_init_one(struct device_node *node,
+ struct irq_domain *parent)
+{
+ int ret;
+ struct v2m_data *v2m;
+
+ v2m = kzalloc(sizeof(struct v2m_data), GFP_KERNEL);
+ if (!v2m) {
+ pr_err("Failed to allocate struct v2m_data.\n");
+ return -ENOMEM;
+ }
+
+ ret = of_address_to_resource(node, 0, &v2m->res);
+ if (ret) {
+ pr_err("Failed to allocate v2m resource.\n");
+ goto err_free_v2m;
+ }
+
+ v2m->base = ioremap(v2m->res.start, resource_size(&v2m->res));
+ if (!v2m->base) {
+ pr_err("Failed to map GICv2m resource\n");
+ ret = -ENOMEM;
+ goto err_free_v2m;
+ }
+
+ if (!of_property_read_u32(node, "arm,msi-base-spi", &v2m->spi_start) &&
+ !of_property_read_u32(node, "arm,msi-num-spis", &v2m->nr_spis)) {
+ pr_info("Overriding V2M MSI_TYPER (base:%u, num:%u)\n",
+ v2m->spi_start, v2m->nr_spis);
+ } else {
+ u32 typer = readl_relaxed(v2m->base + V2M_MSI_TYPER);
+
+ v2m->spi_start = V2M_MSI_TYPER_BASE_SPI(typer);
+ v2m->nr_spis = V2M_MSI_TYPER_NUM_SPI(typer);
+ }
+
+ if (!is_msi_spi_valid(v2m->spi_start, v2m->nr_spis)) {
+ ret = -EINVAL;
+ goto err_iounmap;
+ }
+
+ v2m->bm = kzalloc(sizeof(long) * BITS_TO_LONGS(v2m->nr_spis),
+ GFP_KERNEL);
+ if (!v2m->bm) {
+ ret = -ENOMEM;
+ goto err_iounmap;
+ }
+
+ v2m->domain = irq_domain_add_tree(NULL, &gicv2m_domain_ops, v2m);
+ if (!v2m->domain) {
+ pr_err("Failed to create GICv2m domain\n");
+ ret = -ENOMEM;
+ goto err_free_bm;
+ }
+
+ v2m->domain->parent = parent;
+ v2m->mchip.of_node = node;
+ v2m->mchip.domain = pci_msi_create_irq_domain(node,
+ &gicv2m_msi_domain_info,
+ v2m->domain);
+ if (!v2m->mchip.domain) {
+ pr_err("Failed to create MSI domain\n");
+ ret = -ENOMEM;
+ goto err_free_domains;
+ }
+
+ spin_lock_init(&v2m->msi_cnt_lock);
+
+ ret = of_pci_msi_chip_add(&v2m->mchip);
+ if (ret) {
+ pr_err("Failed to add msi_chip.\n");
+ goto err_free_domains;
+ }
+
+ pr_info("Node %s: range[%#lx:%#lx], SPI[%d:%d]\n", node->name,
+ (unsigned long)v2m->res.start, (unsigned long)v2m->res.end,
+ v2m->spi_start, (v2m->spi_start + v2m->nr_spis));
+
+ return 0;
+
+err_free_domains:
+ if (v2m->mchip.domain)
+ irq_domain_remove(v2m->mchip.domain);
+ if (v2m->domain)
+ irq_domain_remove(v2m->domain);
+err_free_bm:
+ kfree(v2m->bm);
+err_iounmap:
+ iounmap(v2m->base);
+err_free_v2m:
+ kfree(v2m);
+ return ret;
+}
+
+static struct of_device_id gicv2m_device_id[] = {
+ { .compatible = "arm,gic-v2m-frame", },
+ {},
+};
+
+int __init gicv2m_of_init(struct device_node *node, struct irq_domain *parent)
+{
+ int ret = 0;
+ struct device_node *child;
+
+ for (child = of_find_matching_node(node, gicv2m_device_id); child;
+ child = of_find_matching_node(child, gicv2m_device_id)) {
+ if (!of_find_property(child, "msi-controller", NULL))
+ continue;
+
+ ret = gicv2m_init_one(child, parent);
+ if (ret) {
+ of_node_put(node);
+ break;
+ }
+ }
+
+ return ret;
+}
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
new file mode 100644
index 00000000000..86e4684adeb
--- /dev/null
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -0,0 +1,1425 @@
+/*
+ * Copyright (C) 2013, 2014 ARM Limited, All Rights Reserved.
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/bitmap.h>
+#include <linux/cpu.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/log2.h>
+#include <linux/mm.h>
+#include <linux/msi.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+#include <linux/percpu.h>
+#include <linux/slab.h>
+
+#include <linux/irqchip/arm-gic-v3.h>
+
+#include <asm/cacheflush.h>
+#include <asm/cputype.h>
+#include <asm/exception.h>
+
+#include "irqchip.h"
+
+#define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1 << 0)
+
+#define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0)
+
+/*
+ * Collection structure - just an ID, and a redistributor address to
+ * ping. We use one per CPU as a bag of interrupts assigned to this
+ * CPU.
+ */
+struct its_collection {
+ u64 target_address;
+ u16 col_id;
+};
+
+/*
+ * The ITS structure - contains most of the infrastructure, with the
+ * msi_controller, the command queue, the collections, and the list of
+ * devices writing to it.
+ */
+struct its_node {
+ raw_spinlock_t lock;
+ struct list_head entry;
+ struct msi_controller msi_chip;
+ struct irq_domain *domain;
+ void __iomem *base;
+ unsigned long phys_base;
+ struct its_cmd_block *cmd_base;
+ struct its_cmd_block *cmd_write;
+ void *tables[GITS_BASER_NR_REGS];
+ struct its_collection *collections;
+ struct list_head its_device_list;
+ u64 flags;
+ u32 ite_size;
+};
+
+#define ITS_ITT_ALIGN SZ_256
+
+/*
+ * The ITS view of a device - belongs to an ITS, a collection, owns an
+ * interrupt translation table, and a list of interrupts.
+ */
+struct its_device {
+ struct list_head entry;
+ struct its_node *its;
+ struct its_collection *collection;
+ void *itt;
+ unsigned long *lpi_map;
+ irq_hw_number_t lpi_base;
+ int nr_lpis;
+ u32 nr_ites;
+ u32 device_id;
+};
+
+static LIST_HEAD(its_nodes);
+static DEFINE_SPINLOCK(its_lock);
+static struct device_node *gic_root_node;
+static struct rdists *gic_rdists;
+
+#define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist))
+#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
+
+/*
+ * ITS command descriptors - parameters to be encoded in a command
+ * block.
+ */
+struct its_cmd_desc {
+ union {
+ struct {
+ struct its_device *dev;
+ u32 event_id;
+ } its_inv_cmd;
+
+ struct {
+ struct its_device *dev;
+ u32 event_id;
+ } its_int_cmd;
+
+ struct {
+ struct its_device *dev;
+ int valid;
+ } its_mapd_cmd;
+
+ struct {
+ struct its_collection *col;
+ int valid;
+ } its_mapc_cmd;
+
+ struct {
+ struct its_device *dev;
+ u32 phys_id;
+ u32 event_id;
+ } its_mapvi_cmd;
+
+ struct {
+ struct its_device *dev;
+ struct its_collection *col;
+ u32 id;
+ } its_movi_cmd;
+
+ struct {
+ struct its_device *dev;
+ u32 event_id;
+ } its_discard_cmd;
+
+ struct {
+ struct its_collection *col;
+ } its_invall_cmd;
+ };
+};
+
+/*
+ * The ITS command block, which is what the ITS actually parses.
+ */
+struct its_cmd_block {
+ u64 raw_cmd[4];
+};
+
+#define ITS_CMD_QUEUE_SZ SZ_64K
+#define ITS_CMD_QUEUE_NR_ENTRIES (ITS_CMD_QUEUE_SZ / sizeof(struct its_cmd_block))
+
+typedef struct its_collection *(*its_cmd_builder_t)(struct its_cmd_block *,
+ struct its_cmd_desc *);
+
+static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr)
+{
+ cmd->raw_cmd[0] &= ~0xffUL;
+ cmd->raw_cmd[0] |= cmd_nr;
+}
+
+static void its_encode_devid(struct its_cmd_block *cmd, u32 devid)
+{
+ cmd->raw_cmd[0] &= ~(0xffffUL << 32);
+ cmd->raw_cmd[0] |= ((u64)devid) << 32;
+}
+
+static void its_encode_event_id(struct its_cmd_block *cmd, u32 id)
+{
+ cmd->raw_cmd[1] &= ~0xffffffffUL;
+ cmd->raw_cmd[1] |= id;
+}
+
+static void its_encode_phys_id(struct its_cmd_block *cmd, u32 phys_id)
+{
+ cmd->raw_cmd[1] &= 0xffffffffUL;
+ cmd->raw_cmd[1] |= ((u64)phys_id) << 32;
+}
+
+static void its_encode_size(struct its_cmd_block *cmd, u8 size)
+{
+ cmd->raw_cmd[1] &= ~0x1fUL;
+ cmd->raw_cmd[1] |= size & 0x1f;
+}
+
+static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr)
+{
+ cmd->raw_cmd[2] &= ~0xffffffffffffUL;
+ cmd->raw_cmd[2] |= itt_addr & 0xffffffffff00UL;
+}
+
+static void its_encode_valid(struct its_cmd_block *cmd, int valid)
+{
+ cmd->raw_cmd[2] &= ~(1UL << 63);
+ cmd->raw_cmd[2] |= ((u64)!!valid) << 63;
+}
+
+static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr)
+{
+ cmd->raw_cmd[2] &= ~(0xffffffffUL << 16);
+ cmd->raw_cmd[2] |= (target_addr & (0xffffffffUL << 16));
+}
+
+static void its_encode_collection(struct its_cmd_block *cmd, u16 col)
+{
+ cmd->raw_cmd[2] &= ~0xffffUL;
+ cmd->raw_cmd[2] |= col;
+}
+
+static inline void its_fixup_cmd(struct its_cmd_block *cmd)
+{
+ /* Let's fixup BE commands */
+ cmd->raw_cmd[0] = cpu_to_le64(cmd->raw_cmd[0]);
+ cmd->raw_cmd[1] = cpu_to_le64(cmd->raw_cmd[1]);
+ cmd->raw_cmd[2] = cpu_to_le64(cmd->raw_cmd[2]);
+ cmd->raw_cmd[3] = cpu_to_le64(cmd->raw_cmd[3]);
+}
+
+static struct its_collection *its_build_mapd_cmd(struct its_cmd_block *cmd,
+ struct its_cmd_desc *desc)
+{
+ unsigned long itt_addr;
+ u8 size = ilog2(desc->its_mapd_cmd.dev->nr_ites);
+
+ itt_addr = virt_to_phys(desc->its_mapd_cmd.dev->itt);
+ itt_addr = ALIGN(itt_addr, ITS_ITT_ALIGN);
+
+ its_encode_cmd(cmd, GITS_CMD_MAPD);
+ its_encode_devid(cmd, desc->its_mapd_cmd.dev->device_id);
+ its_encode_size(cmd, size - 1);
+ its_encode_itt(cmd, itt_addr);
+ its_encode_valid(cmd, desc->its_mapd_cmd.valid);
+
+ its_fixup_cmd(cmd);
+
+ return desc->its_mapd_cmd.dev->collection;
+}
+
+static struct its_collection *its_build_mapc_cmd(struct its_cmd_block *cmd,
+ struct its_cmd_desc *desc)
+{
+ its_encode_cmd(cmd, GITS_CMD_MAPC);
+ its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id);
+ its_encode_target(cmd, desc->its_mapc_cmd.col->target_address);
+ its_encode_valid(cmd, desc->its_mapc_cmd.valid);
+
+ its_fixup_cmd(cmd);
+
+ return desc->its_mapc_cmd.col;
+}
+
+static struct its_collection *its_build_mapvi_cmd(struct its_cmd_block *cmd,
+ struct its_cmd_desc *desc)
+{
+ its_encode_cmd(cmd, GITS_CMD_MAPVI);
+ its_encode_devid(cmd, desc->its_mapvi_cmd.dev->device_id);
+ its_encode_event_id(cmd, desc->its_mapvi_cmd.event_id);
+ its_encode_phys_id(cmd, desc->its_mapvi_cmd.phys_id);
+ its_encode_collection(cmd, desc->its_mapvi_cmd.dev->collection->col_id);
+
+ its_fixup_cmd(cmd);
+
+ return desc->its_mapvi_cmd.dev->collection;
+}
+
+static struct its_collection *its_build_movi_cmd(struct its_cmd_block *cmd,
+ struct its_cmd_desc *desc)
+{
+ its_encode_cmd(cmd, GITS_CMD_MOVI);
+ its_encode_devid(cmd, desc->its_movi_cmd.dev->device_id);
+ its_encode_event_id(cmd, desc->its_movi_cmd.id);
+ its_encode_collection(cmd, desc->its_movi_cmd.col->col_id);
+
+ its_fixup_cmd(cmd);
+
+ return desc->its_movi_cmd.dev->collection;
+}
+
+static struct its_collection *its_build_discard_cmd(struct its_cmd_block *cmd,
+ struct its_cmd_desc *desc)
+{
+ its_encode_cmd(cmd, GITS_CMD_DISCARD);
+ its_encode_devid(cmd, desc->its_discard_cmd.dev->device_id);
+ its_encode_event_id(cmd, desc->its_discard_cmd.event_id);
+
+ its_fixup_cmd(cmd);
+
+ return desc->its_discard_cmd.dev->collection;
+}
+
+static struct its_collection *its_build_inv_cmd(struct its_cmd_block *cmd,
+ struct its_cmd_desc *desc)
+{
+ its_encode_cmd(cmd, GITS_CMD_INV);
+ its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id);
+ its_encode_event_id(cmd, desc->its_inv_cmd.event_id);
+
+ its_fixup_cmd(cmd);
+
+ return desc->its_inv_cmd.dev->collection;
+}
+
+static struct its_collection *its_build_invall_cmd(struct its_cmd_block *cmd,
+ struct its_cmd_desc *desc)
+{
+ its_encode_cmd(cmd, GITS_CMD_INVALL);
+ its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id);
+
+ its_fixup_cmd(cmd);
+
+ return NULL;
+}
+
+static u64 its_cmd_ptr_to_offset(struct its_node *its,
+ struct its_cmd_block *ptr)
+{
+ return (ptr - its->cmd_base) * sizeof(*ptr);
+}
+
+static int its_queue_full(struct its_node *its)
+{
+ int widx;
+ int ridx;
+
+ widx = its->cmd_write - its->cmd_base;
+ ridx = readl_relaxed(its->base + GITS_CREADR) / sizeof(struct its_cmd_block);
+
+ /* This is incredibly unlikely to happen, unless the ITS locks up. */
+ if (((widx + 1) % ITS_CMD_QUEUE_NR_ENTRIES) == ridx)
+ return 1;
+
+ return 0;
+}
+
+static struct its_cmd_block *its_allocate_entry(struct its_node *its)
+{
+ struct its_cmd_block *cmd;
+ u32 count = 1000000; /* 1s! */
+
+ while (its_queue_full(its)) {
+ count--;
+ if (!count) {
+ pr_err_ratelimited("ITS queue not draining\n");
+ return NULL;
+ }
+ cpu_relax();
+ udelay(1);
+ }
+
+ cmd = its->cmd_write++;
+
+ /* Handle queue wrapping */
+ if (its->cmd_write == (its->cmd_base + ITS_CMD_QUEUE_NR_ENTRIES))
+ its->cmd_write = its->cmd_base;
+
+ return cmd;
+}
+
+static struct its_cmd_block *its_post_commands(struct its_node *its)
+{
+ u64 wr = its_cmd_ptr_to_offset(its, its->cmd_write);
+
+ writel_relaxed(wr, its->base + GITS_CWRITER);
+
+ return its->cmd_write;
+}
+
+static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd)
+{
+ /*
+ * Make sure the commands written to memory are observable by
+ * the ITS.
+ */
+ if (its->flags & ITS_FLAGS_CMDQ_NEEDS_FLUSHING)
+ __flush_dcache_area(cmd, sizeof(*cmd));
+ else
+ dsb(ishst);
+}
+
+static void its_wait_for_range_completion(struct its_node *its,
+ struct its_cmd_block *from,
+ struct its_cmd_block *to)
+{
+ u64 rd_idx, from_idx, to_idx;
+ u32 count = 1000000; /* 1s! */
+
+ from_idx = its_cmd_ptr_to_offset(its, from);
+ to_idx = its_cmd_ptr_to_offset(its, to);
+
+ while (1) {
+ rd_idx = readl_relaxed(its->base + GITS_CREADR);
+ if (rd_idx >= to_idx || rd_idx < from_idx)
+ break;
+
+ count--;
+ if (!count) {
+ pr_err_ratelimited("ITS queue timeout\n");
+ return;
+ }
+ cpu_relax();
+ udelay(1);
+ }
+}
+
+static void its_send_single_command(struct its_node *its,
+ its_cmd_builder_t builder,
+ struct its_cmd_desc *desc)
+{
+ struct its_cmd_block *cmd, *sync_cmd, *next_cmd;
+ struct its_collection *sync_col;
+
+ raw_spin_lock(&its->lock);
+
+ cmd = its_allocate_entry(its);
+ if (!cmd) { /* We're soooooo screewed... */
+ pr_err_ratelimited("ITS can't allocate, dropping command\n");
+ raw_spin_unlock(&its->lock);
+ return;
+ }
+ sync_col = builder(cmd, desc);
+ its_flush_cmd(its, cmd);
+
+ if (sync_col) {
+ sync_cmd = its_allocate_entry(its);
+ if (!sync_cmd) {
+ pr_err_ratelimited("ITS can't SYNC, skipping\n");
+ goto post;
+ }
+ its_encode_cmd(sync_cmd, GITS_CMD_SYNC);
+ its_encode_target(sync_cmd, sync_col->target_address);
+ its_fixup_cmd(sync_cmd);
+ its_flush_cmd(its, sync_cmd);
+ }
+
+post:
+ next_cmd = its_post_commands(its);
+ raw_spin_unlock(&its->lock);
+
+ its_wait_for_range_completion(its, cmd, next_cmd);
+}
+
+static void its_send_inv(struct its_device *dev, u32 event_id)
+{
+ struct its_cmd_desc desc;
+
+ desc.its_inv_cmd.dev = dev;
+ desc.its_inv_cmd.event_id = event_id;
+
+ its_send_single_command(dev->its, its_build_inv_cmd, &desc);
+}
+
+static void its_send_mapd(struct its_device *dev, int valid)
+{
+ struct its_cmd_desc desc;
+
+ desc.its_mapd_cmd.dev = dev;
+ desc.its_mapd_cmd.valid = !!valid;
+
+ its_send_single_command(dev->its, its_build_mapd_cmd, &desc);
+}
+
+static void its_send_mapc(struct its_node *its, struct its_collection *col,
+ int valid)
+{
+ struct its_cmd_desc desc;
+
+ desc.its_mapc_cmd.col = col;
+ desc.its_mapc_cmd.valid = !!valid;
+
+ its_send_single_command(its, its_build_mapc_cmd, &desc);
+}
+
+static void its_send_mapvi(struct its_device *dev, u32 irq_id, u32 id)
+{
+ struct its_cmd_desc desc;
+
+ desc.its_mapvi_cmd.dev = dev;
+ desc.its_mapvi_cmd.phys_id = irq_id;
+ desc.its_mapvi_cmd.event_id = id;
+
+ its_send_single_command(dev->its, its_build_mapvi_cmd, &desc);
+}
+
+static void its_send_movi(struct its_device *dev,
+ struct its_collection *col, u32 id)
+{
+ struct its_cmd_desc desc;
+
+ desc.its_movi_cmd.dev = dev;
+ desc.its_movi_cmd.col = col;
+ desc.its_movi_cmd.id = id;
+
+ its_send_single_command(dev->its, its_build_movi_cmd, &desc);
+}
+
+static void its_send_discard(struct its_device *dev, u32 id)
+{
+ struct its_cmd_desc desc;
+
+ desc.its_discard_cmd.dev = dev;
+ desc.its_discard_cmd.event_id = id;
+
+ its_send_single_command(dev->its, its_build_discard_cmd, &desc);
+}
+
+static void its_send_invall(struct its_node *its, struct its_collection *col)
+{
+ struct its_cmd_desc desc;
+
+ desc.its_invall_cmd.col = col;
+
+ its_send_single_command(its, its_build_invall_cmd, &desc);
+}
+
+/*
+ * irqchip functions - assumes MSI, mostly.
+ */
+
+static inline u32 its_get_event_id(struct irq_data *d)
+{
+ struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+ return d->hwirq - its_dev->lpi_base;
+}
+
+static void lpi_set_config(struct irq_data *d, bool enable)
+{
+ struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+ irq_hw_number_t hwirq = d->hwirq;
+ u32 id = its_get_event_id(d);
+ u8 *cfg = page_address(gic_rdists->prop_page) + hwirq - 8192;
+
+ if (enable)
+ *cfg |= LPI_PROP_ENABLED;
+ else
+ *cfg &= ~LPI_PROP_ENABLED;
+
+ /*
+ * Make the above write visible to the redistributors.
+ * And yes, we're flushing exactly: One. Single. Byte.
+ * Humpf...
+ */
+ if (gic_rdists->flags & RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING)
+ __flush_dcache_area(cfg, sizeof(*cfg));
+ else
+ dsb(ishst);
+ its_send_inv(its_dev, id);
+}
+
+static void its_mask_irq(struct irq_data *d)
+{
+ lpi_set_config(d, false);
+}
+
+static void its_unmask_irq(struct irq_data *d)
+{
+ lpi_set_config(d, true);
+}
+
+static void its_eoi_irq(struct irq_data *d)
+{
+ gic_write_eoir(d->hwirq);
+}
+
+static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
+ bool force)
+{
+ unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask);
+ struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+ struct its_collection *target_col;
+ u32 id = its_get_event_id(d);
+
+ if (cpu >= nr_cpu_ids)
+ return -EINVAL;
+
+ target_col = &its_dev->its->collections[cpu];
+ its_send_movi(its_dev, target_col, id);
+ its_dev->collection = target_col;
+
+ return IRQ_SET_MASK_OK_DONE;
+}
+
+static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
+{
+ struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+ struct its_node *its;
+ u64 addr;
+
+ its = its_dev->its;
+ addr = its->phys_base + GITS_TRANSLATER;
+
+ msg->address_lo = addr & ((1UL << 32) - 1);
+ msg->address_hi = addr >> 32;
+ msg->data = its_get_event_id(d);
+}
+
+static struct irq_chip its_irq_chip = {
+ .name = "ITS",
+ .irq_mask = its_mask_irq,
+ .irq_unmask = its_unmask_irq,
+ .irq_eoi = its_eoi_irq,
+ .irq_set_affinity = its_set_affinity,
+ .irq_compose_msi_msg = its_irq_compose_msi_msg,
+};
+
+static void its_mask_msi_irq(struct irq_data *d)
+{
+ pci_msi_mask_irq(d);
+ irq_chip_mask_parent(d);
+}
+
+static void its_unmask_msi_irq(struct irq_data *d)
+{
+ pci_msi_unmask_irq(d);
+ irq_chip_unmask_parent(d);
+}
+
+static struct irq_chip its_msi_irq_chip = {
+ .name = "ITS-MSI",
+ .irq_unmask = its_unmask_msi_irq,
+ .irq_mask = its_mask_msi_irq,
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_write_msi_msg = pci_msi_domain_write_msg,
+};
+
+/*
+ * How we allocate LPIs:
+ *
+ * The GIC has id_bits bits for interrupt identifiers. From there, we
+ * must subtract 8192 which are reserved for SGIs/PPIs/SPIs. Then, as
+ * we allocate LPIs by chunks of 32, we can shift the whole thing by 5
+ * bits to the right.
+ *
+ * This gives us (((1UL << id_bits) - 8192) >> 5) possible allocations.
+ */
+#define IRQS_PER_CHUNK_SHIFT 5
+#define IRQS_PER_CHUNK (1 << IRQS_PER_CHUNK_SHIFT)
+
+static unsigned long *lpi_bitmap;
+static u32 lpi_chunks;
+static DEFINE_SPINLOCK(lpi_lock);
+
+static int its_lpi_to_chunk(int lpi)
+{
+ return (lpi - 8192) >> IRQS_PER_CHUNK_SHIFT;
+}
+
+static int its_chunk_to_lpi(int chunk)
+{
+ return (chunk << IRQS_PER_CHUNK_SHIFT) + 8192;
+}
+
+static int its_lpi_init(u32 id_bits)
+{
+ lpi_chunks = its_lpi_to_chunk(1UL << id_bits);
+
+ lpi_bitmap = kzalloc(BITS_TO_LONGS(lpi_chunks) * sizeof(long),
+ GFP_KERNEL);
+ if (!lpi_bitmap) {
+ lpi_chunks = 0;
+ return -ENOMEM;
+ }
+
+ pr_info("ITS: Allocated %d chunks for LPIs\n", (int)lpi_chunks);
+ return 0;
+}
+
+static unsigned long *its_lpi_alloc_chunks(int nr_irqs, int *base, int *nr_ids)
+{
+ unsigned long *bitmap = NULL;
+ int chunk_id;
+ int nr_chunks;
+ int i;
+
+ nr_chunks = DIV_ROUND_UP(nr_irqs, IRQS_PER_CHUNK);
+
+ spin_lock(&lpi_lock);
+
+ do {
+ chunk_id = bitmap_find_next_zero_area(lpi_bitmap, lpi_chunks,
+ 0, nr_chunks, 0);
+ if (chunk_id < lpi_chunks)
+ break;
+
+ nr_chunks--;
+ } while (nr_chunks > 0);
+
+ if (!nr_chunks)
+ goto out;
+
+ bitmap = kzalloc(BITS_TO_LONGS(nr_chunks * IRQS_PER_CHUNK) * sizeof (long),
+ GFP_ATOMIC);
+ if (!bitmap)
+ goto out;
+
+ for (i = 0; i < nr_chunks; i++)
+ set_bit(chunk_id + i, lpi_bitmap);
+
+ *base = its_chunk_to_lpi(chunk_id);
+ *nr_ids = nr_chunks * IRQS_PER_CHUNK;
+
+out:
+ spin_unlock(&lpi_lock);
+
+ return bitmap;
+}
+
+static void its_lpi_free(unsigned long *bitmap, int base, int nr_ids)
+{
+ int lpi;
+
+ spin_lock(&lpi_lock);
+
+ for (lpi = base; lpi < (base + nr_ids); lpi += IRQS_PER_CHUNK) {
+ int chunk = its_lpi_to_chunk(lpi);
+ BUG_ON(chunk > lpi_chunks);
+ if (test_bit(chunk, lpi_bitmap)) {
+ clear_bit(chunk, lpi_bitmap);
+ } else {
+ pr_err("Bad LPI chunk %d\n", chunk);
+ }
+ }
+
+ spin_unlock(&lpi_lock);
+
+ kfree(bitmap);
+}
+
+/*
+ * We allocate 64kB for PROPBASE. That gives us at most 64K LPIs to
+ * deal with (one configuration byte per interrupt). PENDBASE has to
+ * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI).
+ */
+#define LPI_PROPBASE_SZ SZ_64K
+#define LPI_PENDBASE_SZ (LPI_PROPBASE_SZ / 8 + SZ_1K)
+
+/*
+ * This is how many bits of ID we need, including the useless ones.
+ */
+#define LPI_NRBITS ilog2(LPI_PROPBASE_SZ + SZ_8K)
+
+#define LPI_PROP_DEFAULT_PRIO 0xa0
+
+static int __init its_alloc_lpi_tables(void)
+{
+ phys_addr_t paddr;
+
+ gic_rdists->prop_page = alloc_pages(GFP_NOWAIT,
+ get_order(LPI_PROPBASE_SZ));
+ if (!gic_rdists->prop_page) {
+ pr_err("Failed to allocate PROPBASE\n");
+ return -ENOMEM;
+ }
+
+ paddr = page_to_phys(gic_rdists->prop_page);
+ pr_info("GIC: using LPI property table @%pa\n", &paddr);
+
+ /* Priority 0xa0, Group-1, disabled */
+ memset(page_address(gic_rdists->prop_page),
+ LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1,
+ LPI_PROPBASE_SZ);
+
+ /* Make sure the GIC will observe the written configuration */
+ __flush_dcache_area(page_address(gic_rdists->prop_page), LPI_PROPBASE_SZ);
+
+ return 0;
+}
+
+static const char *its_base_type_string[] = {
+ [GITS_BASER_TYPE_DEVICE] = "Devices",
+ [GITS_BASER_TYPE_VCPU] = "Virtual CPUs",
+ [GITS_BASER_TYPE_CPU] = "Physical CPUs",
+ [GITS_BASER_TYPE_COLLECTION] = "Interrupt Collections",
+ [GITS_BASER_TYPE_RESERVED5] = "Reserved (5)",
+ [GITS_BASER_TYPE_RESERVED6] = "Reserved (6)",
+ [GITS_BASER_TYPE_RESERVED7] = "Reserved (7)",
+};
+
+static void its_free_tables(struct its_node *its)
+{
+ int i;
+
+ for (i = 0; i < GITS_BASER_NR_REGS; i++) {
+ if (its->tables[i]) {
+ free_page((unsigned long)its->tables[i]);
+ its->tables[i] = NULL;
+ }
+ }
+}
+
+static int its_alloc_tables(struct its_node *its)
+{
+ int err;
+ int i;
+ int psz = PAGE_SIZE;
+ u64 shr = GITS_BASER_InnerShareable;
+
+ for (i = 0; i < GITS_BASER_NR_REGS; i++) {
+ u64 val = readq_relaxed(its->base + GITS_BASER + i * 8);
+ u64 type = GITS_BASER_TYPE(val);
+ u64 entry_size = GITS_BASER_ENTRY_SIZE(val);
+ u64 tmp;
+ void *base;
+
+ if (type == GITS_BASER_TYPE_NONE)
+ continue;
+
+ /* We're lazy and only allocate a single page for now */
+ base = (void *)get_zeroed_page(GFP_KERNEL);
+ if (!base) {
+ err = -ENOMEM;
+ goto out_free;
+ }
+
+ its->tables[i] = base;
+
+retry_baser:
+ val = (virt_to_phys(base) |
+ (type << GITS_BASER_TYPE_SHIFT) |
+ ((entry_size - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) |
+ GITS_BASER_WaWb |
+ shr |
+ GITS_BASER_VALID);
+
+ switch (psz) {
+ case SZ_4K:
+ val |= GITS_BASER_PAGE_SIZE_4K;
+ break;
+ case SZ_16K:
+ val |= GITS_BASER_PAGE_SIZE_16K;
+ break;
+ case SZ_64K:
+ val |= GITS_BASER_PAGE_SIZE_64K;
+ break;
+ }
+
+ val |= (PAGE_SIZE / psz) - 1;
+
+ writeq_relaxed(val, its->base + GITS_BASER + i * 8);
+ tmp = readq_relaxed(its->base + GITS_BASER + i * 8);
+
+ if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) {
+ /*
+ * Shareability didn't stick. Just use
+ * whatever the read reported, which is likely
+ * to be the only thing this redistributor
+ * supports.
+ */
+ shr = tmp & GITS_BASER_SHAREABILITY_MASK;
+ goto retry_baser;
+ }
+
+ if ((val ^ tmp) & GITS_BASER_PAGE_SIZE_MASK) {
+ /*
+ * Page size didn't stick. Let's try a smaller
+ * size and retry. If we reach 4K, then
+ * something is horribly wrong...
+ */
+ switch (psz) {
+ case SZ_16K:
+ psz = SZ_4K;
+ goto retry_baser;
+ case SZ_64K:
+ psz = SZ_16K;
+ goto retry_baser;
+ }
+ }
+
+ if (val != tmp) {
+ pr_err("ITS: %s: GITS_BASER%d doesn't stick: %lx %lx\n",
+ its->msi_chip.of_node->full_name, i,
+ (unsigned long) val, (unsigned long) tmp);
+ err = -ENXIO;
+ goto out_free;
+ }
+
+ pr_info("ITS: allocated %d %s @%lx (psz %dK, shr %d)\n",
+ (int)(PAGE_SIZE / entry_size),
+ its_base_type_string[type],
+ (unsigned long)virt_to_phys(base),
+ psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT);
+ }
+
+ return 0;
+
+out_free:
+ its_free_tables(its);
+
+ return err;
+}
+
+static int its_alloc_collections(struct its_node *its)
+{
+ its->collections = kzalloc(nr_cpu_ids * sizeof(*its->collections),
+ GFP_KERNEL);
+ if (!its->collections)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void its_cpu_init_lpis(void)
+{
+ void __iomem *rbase = gic_data_rdist_rd_base();
+ struct page *pend_page;
+ u64 val, tmp;
+
+ /* If we didn't allocate the pending table yet, do it now */
+ pend_page = gic_data_rdist()->pend_page;
+ if (!pend_page) {
+ phys_addr_t paddr;
+ /*
+ * The pending pages have to be at least 64kB aligned,
+ * hence the 'max(LPI_PENDBASE_SZ, SZ_64K)' below.
+ */
+ pend_page = alloc_pages(GFP_NOWAIT | __GFP_ZERO,
+ get_order(max(LPI_PENDBASE_SZ, SZ_64K)));
+ if (!pend_page) {
+ pr_err("Failed to allocate PENDBASE for CPU%d\n",
+ smp_processor_id());
+ return;
+ }
+
+ /* Make sure the GIC will observe the zero-ed page */
+ __flush_dcache_area(page_address(pend_page), LPI_PENDBASE_SZ);
+
+ paddr = page_to_phys(pend_page);
+ pr_info("CPU%d: using LPI pending table @%pa\n",
+ smp_processor_id(), &paddr);
+ gic_data_rdist()->pend_page = pend_page;
+ }
+
+ /* Disable LPIs */
+ val = readl_relaxed(rbase + GICR_CTLR);
+ val &= ~GICR_CTLR_ENABLE_LPIS;
+ writel_relaxed(val, rbase + GICR_CTLR);
+
+ /*
+ * Make sure any change to the table is observable by the GIC.
+ */
+ dsb(sy);
+
+ /* set PROPBASE */
+ val = (page_to_phys(gic_rdists->prop_page) |
+ GICR_PROPBASER_InnerShareable |
+ GICR_PROPBASER_WaWb |
+ ((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK));
+
+ writeq_relaxed(val, rbase + GICR_PROPBASER);
+ tmp = readq_relaxed(rbase + GICR_PROPBASER);
+
+ if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) {
+ pr_info_once("GIC: using cache flushing for LPI property table\n");
+ gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING;
+ }
+
+ /* set PENDBASE */
+ val = (page_to_phys(pend_page) |
+ GICR_PROPBASER_InnerShareable |
+ GICR_PROPBASER_WaWb);
+
+ writeq_relaxed(val, rbase + GICR_PENDBASER);
+
+ /* Enable LPIs */
+ val = readl_relaxed(rbase + GICR_CTLR);
+ val |= GICR_CTLR_ENABLE_LPIS;
+ writel_relaxed(val, rbase + GICR_CTLR);
+
+ /* Make sure the GIC has seen the above */
+ dsb(sy);
+}
+
+static void its_cpu_init_collection(void)
+{
+ struct its_node *its;
+ int cpu;
+
+ spin_lock(&its_lock);
+ cpu = smp_processor_id();
+
+ list_for_each_entry(its, &its_nodes, entry) {
+ u64 target;
+
+ /*
+ * We now have to bind each collection to its target
+ * redistributor.
+ */
+ if (readq_relaxed(its->base + GITS_TYPER) & GITS_TYPER_PTA) {
+ /*
+ * This ITS wants the physical address of the
+ * redistributor.
+ */
+ target = gic_data_rdist()->phys_base;
+ } else {
+ /*
+ * This ITS wants a linear CPU number.
+ */
+ target = readq_relaxed(gic_data_rdist_rd_base() + GICR_TYPER);
+ target = GICR_TYPER_CPU_NUMBER(target);
+ }
+
+ /* Perform collection mapping */
+ its->collections[cpu].target_address = target;
+ its->collections[cpu].col_id = cpu;
+
+ its_send_mapc(its, &its->collections[cpu], 1);
+ its_send_invall(its, &its->collections[cpu]);
+ }
+
+ spin_unlock(&its_lock);
+}
+
+static struct its_device *its_find_device(struct its_node *its, u32 dev_id)
+{
+ struct its_device *its_dev = NULL, *tmp;
+
+ raw_spin_lock(&its->lock);
+
+ list_for_each_entry(tmp, &its->its_device_list, entry) {
+ if (tmp->device_id == dev_id) {
+ its_dev = tmp;
+ break;
+ }
+ }
+
+ raw_spin_unlock(&its->lock);
+
+ return its_dev;
+}
+
+static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
+ int nvecs)
+{
+ struct its_device *dev;
+ unsigned long *lpi_map;
+ void *itt;
+ int lpi_base;
+ int nr_lpis;
+ int nr_ites;
+ int cpu;
+ int sz;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ /*
+ * At least one bit of EventID is being used, hence a minimum
+ * of two entries. No, the architecture doesn't let you
+ * express an ITT with a single entry.
+ */
+ nr_ites = max(2, roundup_pow_of_two(nvecs));
+ sz = nr_ites * its->ite_size;
+ sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
+ itt = kmalloc(sz, GFP_KERNEL);
+ lpi_map = its_lpi_alloc_chunks(nvecs, &lpi_base, &nr_lpis);
+
+ if (!dev || !itt || !lpi_map) {
+ kfree(dev);
+ kfree(itt);
+ kfree(lpi_map);
+ return NULL;
+ }
+
+ dev->its = its;
+ dev->itt = itt;
+ dev->nr_ites = nr_ites;
+ dev->lpi_map = lpi_map;
+ dev->lpi_base = lpi_base;
+ dev->nr_lpis = nr_lpis;
+ dev->device_id = dev_id;
+ INIT_LIST_HEAD(&dev->entry);
+
+ raw_spin_lock(&its->lock);
+ list_add(&dev->entry, &its->its_device_list);
+ raw_spin_unlock(&its->lock);
+
+ /* Bind the device to the first possible CPU */
+ cpu = cpumask_first(cpu_online_mask);
+ dev->collection = &its->collections[cpu];
+
+ /* Map device to its ITT */
+ its_send_mapd(dev, 1);
+
+ return dev;
+}
+
+static void its_free_device(struct its_device *its_dev)
+{
+ raw_spin_lock(&its_dev->its->lock);
+ list_del(&its_dev->entry);
+ raw_spin_unlock(&its_dev->its->lock);
+ kfree(its_dev->itt);
+ kfree(its_dev);
+}
+
+static int its_alloc_device_irq(struct its_device *dev, irq_hw_number_t *hwirq)
+{
+ int idx;
+
+ idx = find_first_zero_bit(dev->lpi_map, dev->nr_lpis);
+ if (idx == dev->nr_lpis)
+ return -ENOSPC;
+
+ *hwirq = dev->lpi_base + idx;
+ set_bit(idx, dev->lpi_map);
+
+ return 0;
+}
+
+static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
+ int nvec, msi_alloc_info_t *info)
+{
+ struct pci_dev *pdev;
+ struct its_node *its;
+ u32 dev_id;
+ struct its_device *its_dev;
+
+ if (!dev_is_pci(dev))
+ return -EINVAL;
+
+ pdev = to_pci_dev(dev);
+ dev_id = PCI_DEVID(pdev->bus->number, pdev->devfn);
+ its = domain->parent->host_data;
+
+ its_dev = its_find_device(its, dev_id);
+ if (WARN_ON(its_dev))
+ return -EINVAL;
+
+ its_dev = its_create_device(its, dev_id, nvec);
+ if (!its_dev)
+ return -ENOMEM;
+
+ dev_dbg(&pdev->dev, "ITT %d entries, %d bits\n", nvec, ilog2(nvec));
+
+ info->scratchpad[0].ptr = its_dev;
+ info->scratchpad[1].ptr = dev;
+ return 0;
+}
+
+static struct msi_domain_ops its_pci_msi_ops = {
+ .msi_prepare = its_msi_prepare,
+};
+
+static struct msi_domain_info its_pci_msi_domain_info = {
+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX),
+ .ops = &its_pci_msi_ops,
+ .chip = &its_msi_irq_chip,
+};
+
+static int its_irq_gic_domain_alloc(struct irq_domain *domain,
+ unsigned int virq,
+ irq_hw_number_t hwirq)
+{
+ struct of_phandle_args args;
+
+ args.np = domain->parent->of_node;
+ args.args_count = 3;
+ args.args[0] = GIC_IRQ_TYPE_LPI;
+ args.args[1] = hwirq;
+ args.args[2] = IRQ_TYPE_EDGE_RISING;
+
+ return irq_domain_alloc_irqs_parent(domain, virq, 1, &args);
+}
+
+static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *args)
+{
+ msi_alloc_info_t *info = args;
+ struct its_device *its_dev = info->scratchpad[0].ptr;
+ irq_hw_number_t hwirq;
+ int err;
+ int i;
+
+ for (i = 0; i < nr_irqs; i++) {
+ err = its_alloc_device_irq(its_dev, &hwirq);
+ if (err)
+ return err;
+
+ err = its_irq_gic_domain_alloc(domain, virq + i, hwirq);
+ if (err)
+ return err;
+
+ irq_domain_set_hwirq_and_chip(domain, virq + i,
+ hwirq, &its_irq_chip, its_dev);
+ dev_dbg(info->scratchpad[1].ptr, "ID:%d pID:%d vID:%d\n",
+ (int)(hwirq - its_dev->lpi_base), (int)hwirq, virq + i);
+ }
+
+ return 0;
+}
+
+static void its_irq_domain_activate(struct irq_domain *domain,
+ struct irq_data *d)
+{
+ struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+ u32 event = its_get_event_id(d);
+
+ /* Map the GIC IRQ and event to the device */
+ its_send_mapvi(its_dev, d->hwirq, event);
+}
+
+static void its_irq_domain_deactivate(struct irq_domain *domain,
+ struct irq_data *d)
+{
+ struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+ u32 event = its_get_event_id(d);
+
+ /* Stop the delivery of interrupts */
+ its_send_discard(its_dev, event);
+}
+
+static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs)
+{
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+ struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+ int i;
+
+ for (i = 0; i < nr_irqs; i++) {
+ struct irq_data *data = irq_domain_get_irq_data(domain,
+ virq + i);
+ u32 event = its_get_event_id(data);
+
+ /* Mark interrupt index as unused */
+ clear_bit(event, its_dev->lpi_map);
+
+ /* Nuke the entry in the domain */
+ irq_domain_reset_irq_data(data);
+ }
+
+ /* If all interrupts have been freed, start mopping the floor */
+ if (bitmap_empty(its_dev->lpi_map, its_dev->nr_lpis)) {
+ its_lpi_free(its_dev->lpi_map,
+ its_dev->lpi_base,
+ its_dev->nr_lpis);
+
+ /* Unmap device/itt */
+ its_send_mapd(its_dev, 0);
+ its_free_device(its_dev);
+ }
+
+ irq_domain_free_irqs_parent(domain, virq, nr_irqs);
+}
+
+static const struct irq_domain_ops its_domain_ops = {
+ .alloc = its_irq_domain_alloc,
+ .free = its_irq_domain_free,
+ .activate = its_irq_domain_activate,
+ .deactivate = its_irq_domain_deactivate,
+};
+
+static int its_probe(struct device_node *node, struct irq_domain *parent)
+{
+ struct resource res;
+ struct its_node *its;
+ void __iomem *its_base;
+ u32 val;
+ u64 baser, tmp;
+ int err;
+
+ err = of_address_to_resource(node, 0, &res);
+ if (err) {
+ pr_warn("%s: no regs?\n", node->full_name);
+ return -ENXIO;
+ }
+
+ its_base = ioremap(res.start, resource_size(&res));
+ if (!its_base) {
+ pr_warn("%s: unable to map registers\n", node->full_name);
+ return -ENOMEM;
+ }
+
+ val = readl_relaxed(its_base + GITS_PIDR2) & GIC_PIDR2_ARCH_MASK;
+ if (val != 0x30 && val != 0x40) {
+ pr_warn("%s: no ITS detected, giving up\n", node->full_name);
+ err = -ENODEV;
+ goto out_unmap;
+ }
+
+ pr_info("ITS: %s\n", node->full_name);
+
+ its = kzalloc(sizeof(*its), GFP_KERNEL);
+ if (!its) {
+ err = -ENOMEM;
+ goto out_unmap;
+ }
+
+ raw_spin_lock_init(&its->lock);
+ INIT_LIST_HEAD(&its->entry);
+ INIT_LIST_HEAD(&its->its_device_list);
+ its->base = its_base;
+ its->phys_base = res.start;
+ its->msi_chip.of_node = node;
+ its->ite_size = ((readl_relaxed(its_base + GITS_TYPER) >> 4) & 0xf) + 1;
+
+ its->cmd_base = kzalloc(ITS_CMD_QUEUE_SZ, GFP_KERNEL);
+ if (!its->cmd_base) {
+ err = -ENOMEM;
+ goto out_free_its;
+ }
+ its->cmd_write = its->cmd_base;
+
+ err = its_alloc_tables(its);
+ if (err)
+ goto out_free_cmd;
+
+ err = its_alloc_collections(its);
+ if (err)
+ goto out_free_tables;
+
+ baser = (virt_to_phys(its->cmd_base) |
+ GITS_CBASER_WaWb |
+ GITS_CBASER_InnerShareable |
+ (ITS_CMD_QUEUE_SZ / SZ_4K - 1) |
+ GITS_CBASER_VALID);
+
+ writeq_relaxed(baser, its->base + GITS_CBASER);
+ tmp = readq_relaxed(its->base + GITS_CBASER);
+ writeq_relaxed(0, its->base + GITS_CWRITER);
+ writel_relaxed(1, its->base + GITS_CTLR);
+
+ if ((tmp ^ baser) & GITS_BASER_SHAREABILITY_MASK) {
+ pr_info("ITS: using cache flushing for cmd queue\n");
+ its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING;
+ }
+
+ if (of_property_read_bool(its->msi_chip.of_node, "msi-controller")) {
+ its->domain = irq_domain_add_tree(NULL, &its_domain_ops, its);
+ if (!its->domain) {
+ err = -ENOMEM;
+ goto out_free_tables;
+ }
+
+ its->domain->parent = parent;
+
+ its->msi_chip.domain = pci_msi_create_irq_domain(node,
+ &its_pci_msi_domain_info,
+ its->domain);
+ if (!its->msi_chip.domain) {
+ err = -ENOMEM;
+ goto out_free_domains;
+ }
+
+ err = of_pci_msi_chip_add(&its->msi_chip);
+ if (err)
+ goto out_free_domains;
+ }
+
+ spin_lock(&its_lock);
+ list_add(&its->entry, &its_nodes);
+ spin_unlock(&its_lock);
+
+ return 0;
+
+out_free_domains:
+ if (its->msi_chip.domain)
+ irq_domain_remove(its->msi_chip.domain);
+ if (its->domain)
+ irq_domain_remove(its->domain);
+out_free_tables:
+ its_free_tables(its);
+out_free_cmd:
+ kfree(its->cmd_base);
+out_free_its:
+ kfree(its);
+out_unmap:
+ iounmap(its_base);
+ pr_err("ITS: failed probing %s (%d)\n", node->full_name, err);
+ return err;
+}
+
+static bool gic_rdists_supports_plpis(void)
+{
+ return !!(readl_relaxed(gic_data_rdist_rd_base() + GICR_TYPER) & GICR_TYPER_PLPIS);
+}
+
+int its_cpu_init(void)
+{
+ if (!gic_rdists_supports_plpis()) {
+ pr_info("CPU%d: LPIs not supported\n", smp_processor_id());
+ return -ENXIO;
+ }
+
+ if (!list_empty(&its_nodes)) {
+ its_cpu_init_lpis();
+ its_cpu_init_collection();
+ }
+
+ return 0;
+}
+
+static struct of_device_id its_device_id[] = {
+ { .compatible = "arm,gic-v3-its", },
+ {},
+};
+
+int its_init(struct device_node *node, struct rdists *rdists,
+ struct irq_domain *parent_domain)
+{
+ struct device_node *np;
+
+ for (np = of_find_matching_node(node, its_device_id); np;
+ np = of_find_matching_node(np, its_device_id)) {
+ its_probe(np, parent_domain);
+ }
+
+ if (list_empty(&its_nodes)) {
+ pr_warn("ITS: No ITS available, not enabling LPIs\n");
+ return -ENXIO;
+ }
+
+ gic_rdists = rdists;
+ gic_root_node = node;
+
+ its_alloc_lpi_tables();
+ its_lpi_init(rdists->id_bits);
+
+ return 0;
+}
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index aa17ae805a7..1a146ccee70 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -34,20 +34,25 @@
#include "irq-gic-common.h"
#include "irqchip.h"
+struct redist_region {
+ void __iomem *redist_base;
+ phys_addr_t phys_base;
+};
+
struct gic_chip_data {
void __iomem *dist_base;
- void __iomem **redist_base;
- void __iomem * __percpu *rdist;
+ struct redist_region *redist_regions;
+ struct rdists rdists;
struct irq_domain *domain;
u64 redist_stride;
- u32 redist_regions;
+ u32 nr_redist_regions;
unsigned int irq_nr;
};
static struct gic_chip_data gic_data __read_mostly;
-#define gic_data_rdist() (this_cpu_ptr(gic_data.rdist))
-#define gic_data_rdist_rd_base() (*gic_data_rdist())
+#define gic_data_rdist() (this_cpu_ptr(gic_data.rdists.rdist))
+#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
#define gic_data_rdist_sgi_base() (gic_data_rdist_rd_base() + SZ_64K)
/* Our default, arbitrary priority value. Linux only uses one anyway. */
@@ -71,9 +76,6 @@ static inline void __iomem *gic_dist_base(struct irq_data *d)
if (d->hwirq <= 1023) /* SPI -> dist_base */
return gic_data.dist_base;
- if (d->hwirq >= 8192)
- BUG(); /* LPI Detected!!! */
-
return NULL;
}
@@ -271,11 +273,11 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs
do {
irqnr = gic_read_iar();
- if (likely(irqnr > 15 && irqnr < 1020)) {
+ if (likely(irqnr > 15 && irqnr < 1020) || irqnr >= 8192) {
int err;
err = handle_domain_irq(gic_data.domain, irqnr, regs);
if (err) {
- WARN_ONCE(true, "Unexpected SPI received!\n");
+ WARN_ONCE(true, "Unexpected interrupt received!\n");
gic_write_eoir(irqnr);
}
continue;
@@ -333,8 +335,8 @@ static int gic_populate_rdist(void)
MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
MPIDR_AFFINITY_LEVEL(mpidr, 0));
- for (i = 0; i < gic_data.redist_regions; i++) {
- void __iomem *ptr = gic_data.redist_base[i];
+ for (i = 0; i < gic_data.nr_redist_regions; i++) {
+ void __iomem *ptr = gic_data.redist_regions[i].redist_base;
u32 reg;
reg = readl_relaxed(ptr + GICR_PIDR2) & GIC_PIDR2_ARCH_MASK;
@@ -347,10 +349,13 @@ static int gic_populate_rdist(void)
do {
typer = readq_relaxed(ptr + GICR_TYPER);
if ((typer >> 32) == aff) {
+ u64 offset = ptr - gic_data.redist_regions[i].redist_base;
gic_data_rdist_rd_base() = ptr;
- pr_info("CPU%d: found redistributor %llx @%p\n",
+ gic_data_rdist()->phys_base = gic_data.redist_regions[i].phys_base + offset;
+ pr_info("CPU%d: found redistributor %llx region %d:%pa\n",
smp_processor_id(),
- (unsigned long long)mpidr, ptr);
+ (unsigned long long)mpidr,
+ i, &gic_data_rdist()->phys_base);
return 0;
}
@@ -385,6 +390,11 @@ static void gic_cpu_sys_reg_init(void)
gic_write_grpen1(1);
}
+static int gic_dist_supports_lpis(void)
+{
+ return !!(readl_relaxed(gic_data.dist_base + GICD_TYPER) & GICD_TYPER_LPIS);
+}
+
static void gic_cpu_init(void)
{
void __iomem *rbase;
@@ -399,6 +409,10 @@ static void gic_cpu_init(void)
gic_cpu_config(rbase, gic_redist_wait_for_rwp);
+ /* Give LPIs a spin */
+ if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis())
+ its_cpu_init();
+
/* initialise system registers */
gic_cpu_sys_reg_init();
}
@@ -585,26 +599,43 @@ static struct irq_chip gic_chip = {
.irq_set_affinity = gic_set_affinity,
};
+#define GIC_ID_NR (1U << gic_data.rdists.id_bits)
+
static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hw)
{
/* SGIs are private to the core kernel */
if (hw < 16)
return -EPERM;
+ /* Nothing here */
+ if (hw >= gic_data.irq_nr && hw < 8192)
+ return -EPERM;
+ /* Off limits */
+ if (hw >= GIC_ID_NR)
+ return -EPERM;
+
/* PPIs */
if (hw < 32) {
irq_set_percpu_devid(irq);
- irq_set_chip_and_handler(irq, &gic_chip,
- handle_percpu_devid_irq);
+ irq_domain_set_info(d, irq, hw, &gic_chip, d->host_data,
+ handle_percpu_devid_irq, NULL, NULL);
set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN);
}
/* SPIs */
if (hw >= 32 && hw < gic_data.irq_nr) {
- irq_set_chip_and_handler(irq, &gic_chip,
- handle_fasteoi_irq);
+ irq_domain_set_info(d, irq, hw, &gic_chip, d->host_data,
+ handle_fasteoi_irq, NULL, NULL);
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
}
- irq_set_chip_data(irq, d->host_data);
+ /* LPIs */
+ if (hw >= 8192 && hw < GIC_ID_NR) {
+ if (!gic_dist_supports_lpis())
+ return -EPERM;
+ irq_domain_set_info(d, irq, hw, &gic_chip, d->host_data,
+ handle_fasteoi_irq, NULL, NULL);
+ set_irq_flags(irq, IRQF_VALID);
+ }
+
return 0;
}
@@ -625,6 +656,9 @@ static int gic_irq_domain_xlate(struct irq_domain *d,
case 1: /* PPI */
*out_hwirq = intspec[1] + 16;
break;
+ case GIC_IRQ_TYPE_LPI: /* LPI */
+ *out_hwirq = intspec[1];
+ break;
default:
return -EINVAL;
}
@@ -633,17 +667,50 @@ static int gic_irq_domain_xlate(struct irq_domain *d,
return 0;
}
+static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ int i, ret;
+ irq_hw_number_t hwirq;
+ unsigned int type = IRQ_TYPE_NONE;
+ struct of_phandle_args *irq_data = arg;
+
+ ret = gic_irq_domain_xlate(domain, irq_data->np, irq_data->args,
+ irq_data->args_count, &hwirq, &type);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < nr_irqs; i++)
+ gic_irq_domain_map(domain, virq + i, hwirq + i);
+
+ return 0;
+}
+
+static void gic_irq_domain_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs)
+{
+ int i;
+
+ for (i = 0; i < nr_irqs; i++) {
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
+ irq_set_handler(virq + i, NULL);
+ irq_domain_reset_irq_data(d);
+ }
+}
+
static const struct irq_domain_ops gic_irq_domain_ops = {
- .map = gic_irq_domain_map,
.xlate = gic_irq_domain_xlate,
+ .alloc = gic_irq_domain_alloc,
+ .free = gic_irq_domain_free,
};
static int __init gic_of_init(struct device_node *node, struct device_node *parent)
{
void __iomem *dist_base;
- void __iomem **redist_base;
+ struct redist_region *rdist_regs;
u64 redist_stride;
- u32 redist_regions;
+ u32 nr_redist_regions;
+ u32 typer;
u32 reg;
int gic_irqs;
int err;
@@ -664,54 +731,63 @@ static int __init gic_of_init(struct device_node *node, struct device_node *pare
goto out_unmap_dist;
}
- if (of_property_read_u32(node, "#redistributor-regions", &redist_regions))
- redist_regions = 1;
+ if (of_property_read_u32(node, "#redistributor-regions", &nr_redist_regions))
+ nr_redist_regions = 1;
- redist_base = kzalloc(sizeof(*redist_base) * redist_regions, GFP_KERNEL);
- if (!redist_base) {
+ rdist_regs = kzalloc(sizeof(*rdist_regs) * nr_redist_regions, GFP_KERNEL);
+ if (!rdist_regs) {
err = -ENOMEM;
goto out_unmap_dist;
}
- for (i = 0; i < redist_regions; i++) {
- redist_base[i] = of_iomap(node, 1 + i);
- if (!redist_base[i]) {
+ for (i = 0; i < nr_redist_regions; i++) {
+ struct resource res;
+ int ret;
+
+ ret = of_address_to_resource(node, 1 + i, &res);
+ rdist_regs[i].redist_base = of_iomap(node, 1 + i);
+ if (ret || !rdist_regs[i].redist_base) {
pr_err("%s: couldn't map region %d\n",
node->full_name, i);
err = -ENODEV;
goto out_unmap_rdist;
}
+ rdist_regs[i].phys_base = res.start;
}
if (of_property_read_u64(node, "redistributor-stride", &redist_stride))
redist_stride = 0;
gic_data.dist_base = dist_base;
- gic_data.redist_base = redist_base;
- gic_data.redist_regions = redist_regions;
+ gic_data.redist_regions = rdist_regs;
+ gic_data.nr_redist_regions = nr_redist_regions;
gic_data.redist_stride = redist_stride;
/*
* Find out how many interrupts are supported.
* The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI)
*/
- gic_irqs = readl_relaxed(gic_data.dist_base + GICD_TYPER) & 0x1f;
- gic_irqs = (gic_irqs + 1) * 32;
+ typer = readl_relaxed(gic_data.dist_base + GICD_TYPER);
+ gic_data.rdists.id_bits = GICD_TYPER_ID_BITS(typer);
+ gic_irqs = GICD_TYPER_IRQS(typer);
if (gic_irqs > 1020)
gic_irqs = 1020;
gic_data.irq_nr = gic_irqs;
gic_data.domain = irq_domain_add_tree(node, &gic_irq_domain_ops,
&gic_data);
- gic_data.rdist = alloc_percpu(typeof(*gic_data.rdist));
+ gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist));
- if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdist)) {
+ if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) {
err = -ENOMEM;
goto out_free;
}
set_handle_irq(gic_handle_irq);
+ if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis())
+ its_init(node, &gic_data.rdists, gic_data.domain);
+
gic_smp_init();
gic_dist_init();
gic_cpu_init();
@@ -722,12 +798,12 @@ static int __init gic_of_init(struct device_node *node, struct device_node *pare
out_free:
if (gic_data.domain)
irq_domain_remove(gic_data.domain);
- free_percpu(gic_data.rdist);
+ free_percpu(gic_data.rdists.rdist);
out_unmap_rdist:
- for (i = 0; i < redist_regions; i++)
- if (redist_base[i])
- iounmap(redist_base[i]);
- kfree(redist_base);
+ for (i = 0; i < nr_redist_regions; i++)
+ if (rdist_regs[i].redist_base)
+ iounmap(rdist_regs[i].redist_base);
+ kfree(rdist_regs);
out_unmap_dist:
iounmap(dist_base);
return err;
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 7f9be0785c6..d617ee5a3d8 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -788,17 +788,16 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
{
if (hw < 32) {
irq_set_percpu_devid(irq);
- irq_set_chip_and_handler(irq, &gic_chip,
- handle_percpu_devid_irq);
+ irq_domain_set_info(d, irq, hw, &gic_chip, d->host_data,
+ handle_percpu_devid_irq, NULL, NULL);
set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN);
} else {
- irq_set_chip_and_handler(irq, &gic_chip,
- handle_fasteoi_irq);
+ irq_domain_set_info(d, irq, hw, &gic_chip, d->host_data,
+ handle_fasteoi_irq, NULL, NULL);
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
gic_routable_irq_domain_ops->map(d, irq, hw);
}
- irq_set_chip_data(irq, d->host_data);
return 0;
}
@@ -858,6 +857,31 @@ static struct notifier_block gic_cpu_notifier = {
};
#endif
+static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ int i, ret;
+ irq_hw_number_t hwirq;
+ unsigned int type = IRQ_TYPE_NONE;
+ struct of_phandle_args *irq_data = arg;
+
+ ret = gic_irq_domain_xlate(domain, irq_data->np, irq_data->args,
+ irq_data->args_count, &hwirq, &type);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < nr_irqs; i++)
+ gic_irq_domain_map(domain, virq + i, hwirq + i);
+
+ return 0;
+}
+
+static const struct irq_domain_ops gic_irq_domain_hierarchy_ops = {
+ .xlate = gic_irq_domain_xlate,
+ .alloc = gic_irq_domain_alloc,
+ .free = irq_domain_free_irqs_top,
+};
+
static const struct irq_domain_ops gic_irq_domain_ops = {
.map = gic_irq_domain_map,
.unmap = gic_irq_domain_unmap,
@@ -948,18 +972,6 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start,
gic_cpu_map[i] = 0xff;
/*
- * For primary GICs, skip over SGIs.
- * For secondary GICs, skip over PPIs, too.
- */
- if (gic_nr == 0 && (irq_start & 31) > 0) {
- hwirq_base = 16;
- if (irq_start != -1)
- irq_start = (irq_start & ~31) + 16;
- } else {
- hwirq_base = 32;
- }
-
- /*
* Find out how many interrupts are supported.
* The GIC only supports up to 1020 interrupt sources.
*/
@@ -969,10 +981,31 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start,
gic_irqs = 1020;
gic->gic_irqs = gic_irqs;
- gic_irqs -= hwirq_base; /* calculate # of irqs to allocate */
+ if (node) { /* DT case */
+ const struct irq_domain_ops *ops = &gic_irq_domain_hierarchy_ops;
+
+ if (!of_property_read_u32(node, "arm,routable-irqs",
+ &nr_routable_irqs)) {
+ ops = &gic_irq_domain_ops;
+ gic_irqs = nr_routable_irqs;
+ }
+
+ gic->domain = irq_domain_add_linear(node, gic_irqs, ops, gic);
+ } else { /* Non-DT case */
+ /*
+ * For primary GICs, skip over SGIs.
+ * For secondary GICs, skip over PPIs, too.
+ */
+ if (gic_nr == 0 && (irq_start & 31) > 0) {
+ hwirq_base = 16;
+ if (irq_start != -1)
+ irq_start = (irq_start & ~31) + 16;
+ } else {
+ hwirq_base = 32;
+ }
+
+ gic_irqs -= hwirq_base; /* calculate # of irqs to allocate */
- if (of_property_read_u32(node, "arm,routable-irqs",
- &nr_routable_irqs)) {
irq_base = irq_alloc_descs(irq_start, 16, gic_irqs,
numa_node_id());
if (IS_ERR_VALUE(irq_base)) {
@@ -983,10 +1016,6 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start,
gic->domain = irq_domain_add_legacy(node, gic_irqs, irq_base,
hwirq_base, &gic_irq_domain_ops, gic);
- } else {
- gic->domain = irq_domain_add_linear(node, nr_routable_irqs,
- &gic_irq_domain_ops,
- gic);
}
if (WARN_ON(!gic->domain))
@@ -1037,6 +1066,10 @@ gic_of_init(struct device_node *node, struct device_node *parent)
irq = irq_of_parse_and_map(node, 0);
gic_cascade_irq(gic_cnt, irq);
}
+
+ if (IS_ENABLED(CONFIG_ARM_GIC_V2M))
+ gicv2m_of_init(node, gic_data[gic_cnt].domain);
+
gic_cnt++;
return 0;
}
diff --git a/drivers/irqchip/irq-mtk-sysirq.c b/drivers/irqchip/irq-mtk-sysirq.c
new file mode 100644
index 00000000000..7e342df6a62
--- /dev/null
+++ b/drivers/irqchip/irq-mtk-sysirq.c
@@ -0,0 +1,163 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: Joe.C <yingjoe.chen@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include "irqchip.h"
+
+#define MT6577_SYS_INTPOL_NUM (224)
+
+struct mtk_sysirq_chip_data {
+ spinlock_t lock;
+ void __iomem *intpol_base;
+};
+
+static int mtk_sysirq_set_type(struct irq_data *data, unsigned int type)
+{
+ irq_hw_number_t hwirq = data->hwirq;
+ struct mtk_sysirq_chip_data *chip_data = data->chip_data;
+ u32 offset, reg_index, value;
+ unsigned long flags;
+ int ret;
+
+ offset = hwirq & 0x1f;
+ reg_index = hwirq >> 5;
+
+ spin_lock_irqsave(&chip_data->lock, flags);
+ value = readl_relaxed(chip_data->intpol_base + reg_index * 4);
+ if (type == IRQ_TYPE_LEVEL_LOW || type == IRQ_TYPE_EDGE_FALLING) {
+ if (type == IRQ_TYPE_LEVEL_LOW)
+ type = IRQ_TYPE_LEVEL_HIGH;
+ else
+ type = IRQ_TYPE_EDGE_RISING;
+ value |= (1 << offset);
+ } else {
+ value &= ~(1 << offset);
+ }
+ writel(value, chip_data->intpol_base + reg_index * 4);
+
+ data = data->parent_data;
+ ret = data->chip->irq_set_type(data, type);
+ spin_unlock_irqrestore(&chip_data->lock, flags);
+ return ret;
+}
+
+static struct irq_chip mtk_sysirq_chip = {
+ .name = "MT_SYSIRQ",
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_set_type = mtk_sysirq_set_type,
+ .irq_retrigger = irq_chip_retrigger_hierarchy,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+};
+
+static int mtk_sysirq_domain_xlate(struct irq_domain *d,
+ struct device_node *controller,
+ const u32 *intspec, unsigned int intsize,
+ unsigned long *out_hwirq,
+ unsigned int *out_type)
+{
+ if (intsize != 3)
+ return -EINVAL;
+
+ /* sysirq doesn't support PPI */
+ if (intspec[0])
+ return -EINVAL;
+
+ *out_hwirq = intspec[1];
+ *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
+ return 0;
+}
+
+static int mtk_sysirq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ int i;
+ irq_hw_number_t hwirq;
+ struct of_phandle_args *irq_data = arg;
+ struct of_phandle_args gic_data = *irq_data;
+
+ if (irq_data->args_count != 3)
+ return -EINVAL;
+
+ /* sysirq doesn't support PPI */
+ if (irq_data->args[0])
+ return -EINVAL;
+
+ hwirq = irq_data->args[1];
+ for (i = 0; i < nr_irqs; i++)
+ irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
+ &mtk_sysirq_chip,
+ domain->host_data);
+
+ gic_data.np = domain->parent->of_node;
+ return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &gic_data);
+}
+
+static struct irq_domain_ops sysirq_domain_ops = {
+ .xlate = mtk_sysirq_domain_xlate,
+ .alloc = mtk_sysirq_domain_alloc,
+ .free = irq_domain_free_irqs_common,
+};
+
+static int __init mtk_sysirq_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ struct irq_domain *domain, *domain_parent;
+ struct mtk_sysirq_chip_data *chip_data;
+ int ret = 0;
+
+ domain_parent = irq_find_host(parent);
+ if (!domain_parent) {
+ pr_err("mtk_sysirq: interrupt-parent not found\n");
+ return -EINVAL;
+ }
+
+ chip_data = kzalloc(sizeof(*chip_data), GFP_KERNEL);
+ if (!chip_data)
+ return -ENOMEM;
+
+ chip_data->intpol_base = of_io_request_and_map(node, 0, "intpol");
+ if (!chip_data->intpol_base) {
+ pr_err("mtk_sysirq: unable to map sysirq register\n");
+ ret = -ENOMEM;
+ goto out_free;
+ }
+
+ domain = irq_domain_add_hierarchy(domain_parent, 0,
+ MT6577_SYS_INTPOL_NUM, node,
+ &sysirq_domain_ops, chip_data);
+ if (!domain) {
+ ret = -ENOMEM;
+ goto out_unmap;
+ }
+ spin_lock_init(&chip_data->lock);
+
+ return 0;
+
+out_unmap:
+ iounmap(chip_data->intpol_base);
+out_free:
+ kfree(chip_data);
+ return ret;
+}
+IRQCHIP_DECLARE(mtk_sysirq, "mediatek,mt6577-sysirq", mtk_sysirq_of_init);
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index a210338cfeb..a6c3d2f153f 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -250,6 +250,17 @@ config LEDS_LP8788
help
This option enables support for the Keyboard LEDs on the LP8788 PMIC.
+config LEDS_LP8860
+ tristate "LED support for the TI LP8860 4 channel LED driver"
+ depends on LEDS_CLASS && I2C
+ select REGMAP_I2C
+ help
+ If you say yes here you get support for the TI LP8860 4 channel
+ LED driver.
+ This option enables support for the display cluster LEDs
+ on the LP8860 4 channel LED driver using the I2C communication
+ bus.
+
config LEDS_CLEVO_MAIL
tristate "Mail LED on Clevo notebook"
depends on LEDS_CLASS
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index a2b16474146..1c65a191d90 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -28,6 +28,7 @@ obj-$(CONFIG_LEDS_LP5523) += leds-lp5523.o
obj-$(CONFIG_LEDS_LP5562) += leds-lp5562.o
obj-$(CONFIG_LEDS_LP8501) += leds-lp8501.o
obj-$(CONFIG_LEDS_LP8788) += leds-lp8788.o
+obj-$(CONFIG_LEDS_LP8860) += leds-lp8860.o
obj-$(CONFIG_LEDS_TCA6507) += leds-tca6507.o
obj-$(CONFIG_LEDS_CLEVO_MAIL) += leds-clevo-mail.o
obj-$(CONFIG_LEDS_IPAQ_MICRO) += leds-ipaq-micro.o
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index 7440c58b8e6..dbeebac38d3 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -40,17 +40,27 @@ static ssize_t brightness_store(struct device *dev,
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
unsigned long state;
- ssize_t ret = -EINVAL;
+ ssize_t ret;
+
+ mutex_lock(&led_cdev->led_access);
+
+ if (led_sysfs_is_disabled(led_cdev)) {
+ ret = -EBUSY;
+ goto unlock;
+ }
ret = kstrtoul(buf, 10, &state);
if (ret)
- return ret;
+ goto unlock;
if (state == LED_OFF)
led_trigger_remove(led_cdev);
- __led_set_brightness(led_cdev, state);
+ led_set_brightness(led_cdev, state);
- return size;
+ ret = size;
+unlock:
+ mutex_unlock(&led_cdev->led_access);
+ return ret;
}
static DEVICE_ATTR_RW(brightness);
@@ -99,7 +109,7 @@ static void led_timer_function(unsigned long data)
unsigned long delay;
if (!led_cdev->blink_delay_on || !led_cdev->blink_delay_off) {
- __led_set_brightness(led_cdev, LED_OFF);
+ led_set_brightness_async(led_cdev, LED_OFF);
return;
}
@@ -122,7 +132,7 @@ static void led_timer_function(unsigned long data)
delay = led_cdev->blink_delay_off;
}
- __led_set_brightness(led_cdev, brightness);
+ led_set_brightness_async(led_cdev, brightness);
/* Return in next iteration if led is in one-shot mode and we are in
* the final blink state so that the led is toggled each delay_on +
@@ -148,7 +158,7 @@ static void set_brightness_delayed(struct work_struct *ws)
led_stop_software_blink(led_cdev);
- __led_set_brightness(led_cdev, led_cdev->delayed_set_value);
+ led_set_brightness_async(led_cdev, led_cdev->delayed_set_value);
}
/**
@@ -214,6 +224,7 @@ int led_classdev_register(struct device *parent, struct led_classdev *led_cdev)
#ifdef CONFIG_LEDS_TRIGGERS
init_rwsem(&led_cdev->trigger_lock);
#endif
+ mutex_init(&led_cdev->led_access);
/* add to the list of leds */
down_write(&leds_list_lock);
list_add_tail(&led_cdev->node, &leds_list);
@@ -222,6 +233,8 @@ int led_classdev_register(struct device *parent, struct led_classdev *led_cdev)
if (!led_cdev->max_brightness)
led_cdev->max_brightness = LED_FULL;
+ led_cdev->flags |= SET_BRIGHTNESS_ASYNC;
+
led_update_brightness(led_cdev);
INIT_WORK(&led_cdev->set_brightness_work, set_brightness_delayed);
@@ -267,6 +280,8 @@ void led_classdev_unregister(struct led_classdev *led_cdev)
down_write(&leds_list_lock);
list_del(&led_cdev->node);
up_write(&leds_list_lock);
+
+ mutex_destroy(&led_cdev->led_access);
}
EXPORT_SYMBOL_GPL(led_classdev_unregister);
diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c
index aaa8eba9099..9886dace5ad 100644
--- a/drivers/leds/led-core.c
+++ b/drivers/leds/led-core.c
@@ -42,13 +42,13 @@ static void led_set_software_blink(struct led_classdev *led_cdev,
/* never on - just set to off */
if (!delay_on) {
- __led_set_brightness(led_cdev, LED_OFF);
+ led_set_brightness_async(led_cdev, LED_OFF);
return;
}
/* never off - just set to brightness */
if (!delay_off) {
- __led_set_brightness(led_cdev, led_cdev->blink_brightness);
+ led_set_brightness_async(led_cdev, led_cdev->blink_brightness);
return;
}
@@ -117,6 +117,8 @@ EXPORT_SYMBOL_GPL(led_stop_software_blink);
void led_set_brightness(struct led_classdev *led_cdev,
enum led_brightness brightness)
{
+ int ret = 0;
+
/* delay brightness setting if need to stop soft-blink timer */
if (led_cdev->blink_delay_on || led_cdev->blink_delay_off) {
led_cdev->delayed_set_value = brightness;
@@ -124,7 +126,17 @@ void led_set_brightness(struct led_classdev *led_cdev,
return;
}
- __led_set_brightness(led_cdev, brightness);
+ if (led_cdev->flags & SET_BRIGHTNESS_ASYNC) {
+ led_set_brightness_async(led_cdev, brightness);
+ return;
+ } else if (led_cdev->flags & SET_BRIGHTNESS_SYNC)
+ ret = led_set_brightness_sync(led_cdev, brightness);
+ else
+ ret = -EINVAL;
+
+ if (ret < 0)
+ dev_dbg(led_cdev->dev, "Setting LED brightness failed (%d)\n",
+ ret);
}
EXPORT_SYMBOL(led_set_brightness);
@@ -143,3 +155,21 @@ int led_update_brightness(struct led_classdev *led_cdev)
return ret;
}
EXPORT_SYMBOL(led_update_brightness);
+
+/* Caller must ensure led_cdev->led_access held */
+void led_sysfs_disable(struct led_classdev *led_cdev)
+{
+ lockdep_assert_held(&led_cdev->led_access);
+
+ led_cdev->flags |= LED_SYSFS_DISABLE;
+}
+EXPORT_SYMBOL_GPL(led_sysfs_disable);
+
+/* Caller must ensure led_cdev->led_access held */
+void led_sysfs_enable(struct led_classdev *led_cdev)
+{
+ lockdep_assert_held(&led_cdev->led_access);
+
+ led_cdev->flags &= ~LED_SYSFS_DISABLE;
+}
+EXPORT_SYMBOL_GPL(led_sysfs_enable);
diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c
index c3734f10fdd..e8b1120f486 100644
--- a/drivers/leds/led-triggers.c
+++ b/drivers/leds/led-triggers.c
@@ -37,6 +37,14 @@ ssize_t led_trigger_store(struct device *dev, struct device_attribute *attr,
char trigger_name[TRIG_NAME_MAX];
struct led_trigger *trig;
size_t len;
+ int ret = count;
+
+ mutex_lock(&led_cdev->led_access);
+
+ if (led_sysfs_is_disabled(led_cdev)) {
+ ret = -EBUSY;
+ goto unlock;
+ }
trigger_name[sizeof(trigger_name) - 1] = '\0';
strncpy(trigger_name, buf, sizeof(trigger_name) - 1);
@@ -47,7 +55,7 @@ ssize_t led_trigger_store(struct device *dev, struct device_attribute *attr,
if (!strcmp(trigger_name, "none")) {
led_trigger_remove(led_cdev);
- return count;
+ goto unlock;
}
down_read(&triggers_list_lock);
@@ -58,12 +66,14 @@ ssize_t led_trigger_store(struct device *dev, struct device_attribute *attr,
up_write(&led_cdev->trigger_lock);
up_read(&triggers_list_lock);
- return count;
+ goto unlock;
}
}
up_read(&triggers_list_lock);
- return -EINVAL;
+unlock:
+ mutex_unlock(&led_cdev->led_access);
+ return ret;
}
EXPORT_SYMBOL_GPL(led_trigger_store);
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
index 8a8ba11c5c1..7ea1ea42c2d 100644
--- a/drivers/leds/leds-gpio.c
+++ b/drivers/leds/leds-gpio.c
@@ -203,7 +203,7 @@ static struct gpio_leds_priv *gpio_leds_create(struct platform_device *pdev)
fwnode_property_read_string(child, "linux,default-trigger",
&led.default_trigger);
- if (!fwnode_property_read_string(child, "linux,default_state",
+ if (!fwnode_property_read_string(child, "default-state",
&state)) {
if (!strcmp(state, "keep"))
led.default_state = LEDS_GPIO_DEFSTATE_KEEP;
diff --git a/drivers/leds/leds-lp8860.c b/drivers/leds/leds-lp8860.c
new file mode 100644
index 00000000000..840e93f3ab3
--- /dev/null
+++ b/drivers/leds/leds-lp8860.c
@@ -0,0 +1,491 @@
+/*
+ * TI LP8860 4-Channel LED Driver
+ *
+ * Copyright (C) 2014 Texas Instruments
+ *
+ * Author: Dan Murphy <dmurphy@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/leds.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/gpio/consumer.h>
+#include <linux/slab.h>
+
+#define LP8860_DISP_CL1_BRT_MSB 0x00
+#define LP8860_DISP_CL1_BRT_LSB 0x01
+#define LP8860_DISP_CL1_CURR_MSB 0x02
+#define LP8860_DISP_CL1_CURR_LSB 0x03
+#define LP8860_CL2_BRT_MSB 0x04
+#define LP8860_CL2_BRT_LSB 0x05
+#define LP8860_CL2_CURRENT 0x06
+#define LP8860_CL3_BRT_MSB 0x07
+#define LP8860_CL3_BRT_LSB 0x08
+#define LP8860_CL3_CURRENT 0x09
+#define LP8860_CL4_BRT_MSB 0x0a
+#define LP8860_CL4_BRT_LSB 0x0b
+#define LP8860_CL4_CURRENT 0x0c
+#define LP8860_CONFIG 0x0d
+#define LP8860_STATUS 0x0e
+#define LP8860_FAULT 0x0f
+#define LP8860_LED_FAULT 0x10
+#define LP8860_FAULT_CLEAR 0x11
+#define LP8860_ID 0x12
+#define LP8860_TEMP_MSB 0x13
+#define LP8860_TEMP_LSB 0x14
+#define LP8860_DISP_LED_CURR_MSB 0x15
+#define LP8860_DISP_LED_CURR_LSB 0x16
+#define LP8860_DISP_LED_PWM_MSB 0x17
+#define LP8860_DISP_LED_PWM_LSB 0x18
+#define LP8860_EEPROM_CNTRL 0x19
+#define LP8860_EEPROM_UNLOCK 0x1a
+
+#define LP8860_EEPROM_REG_0 0x60
+#define LP8860_EEPROM_REG_1 0x61
+#define LP8860_EEPROM_REG_2 0x62
+#define LP8860_EEPROM_REG_3 0x63
+#define LP8860_EEPROM_REG_4 0x64
+#define LP8860_EEPROM_REG_5 0x65
+#define LP8860_EEPROM_REG_6 0x66
+#define LP8860_EEPROM_REG_7 0x67
+#define LP8860_EEPROM_REG_8 0x68
+#define LP8860_EEPROM_REG_9 0x69
+#define LP8860_EEPROM_REG_10 0x6a
+#define LP8860_EEPROM_REG_11 0x6b
+#define LP8860_EEPROM_REG_12 0x6c
+#define LP8860_EEPROM_REG_13 0x6d
+#define LP8860_EEPROM_REG_14 0x6e
+#define LP8860_EEPROM_REG_15 0x6f
+#define LP8860_EEPROM_REG_16 0x70
+#define LP8860_EEPROM_REG_17 0x71
+#define LP8860_EEPROM_REG_18 0x72
+#define LP8860_EEPROM_REG_19 0x73
+#define LP8860_EEPROM_REG_20 0x74
+#define LP8860_EEPROM_REG_21 0x75
+#define LP8860_EEPROM_REG_22 0x76
+#define LP8860_EEPROM_REG_23 0x77
+#define LP8860_EEPROM_REG_24 0x78
+
+#define LP8860_LOCK_EEPROM 0x00
+#define LP8860_UNLOCK_EEPROM 0x01
+#define LP8860_PROGRAM_EEPROM 0x02
+#define LP8860_EEPROM_CODE_1 0x08
+#define LP8860_EEPROM_CODE_2 0xba
+#define LP8860_EEPROM_CODE_3 0xef
+
+#define LP8860_CLEAR_FAULTS 0x01
+
+#define LP8860_DISP_LED_NAME "display_cluster"
+
+/**
+ * struct lp8860_led -
+ * @lock - Lock for reading/writing the device
+ * @work - Work item used to off load the brightness register writes
+ * @client - Pointer to the I2C client
+ * @led_dev - led class device pointer
+ * @regmap - Devices register map
+ * @eeprom_regmap - EEPROM register map
+ * @enable_gpio - VDDIO/EN gpio to enable communication interface
+ * @regulator - LED supply regulator pointer
+ * @brightness - Current brightness value requested
+ * @label - LED label
+**/
+struct lp8860_led {
+ struct mutex lock;
+ struct work_struct work;
+ struct i2c_client *client;
+ struct led_classdev led_dev;
+ struct regmap *regmap;
+ struct regmap *eeprom_regmap;
+ struct gpio_desc *enable_gpio;
+ struct regulator *regulator;
+ enum led_brightness brightness;
+ const char *label;
+};
+
+struct lp8860_eeprom_reg {
+ uint8_t reg;
+ uint8_t value;
+};
+
+static struct lp8860_eeprom_reg lp8860_eeprom_disp_regs[] = {
+ { LP8860_EEPROM_REG_0, 0xed },
+ { LP8860_EEPROM_REG_1, 0xdf },
+ { LP8860_EEPROM_REG_2, 0xdc },
+ { LP8860_EEPROM_REG_3, 0xf0 },
+ { LP8860_EEPROM_REG_4, 0xdf },
+ { LP8860_EEPROM_REG_5, 0xe5 },
+ { LP8860_EEPROM_REG_6, 0xf2 },
+ { LP8860_EEPROM_REG_7, 0x77 },
+ { LP8860_EEPROM_REG_8, 0x77 },
+ { LP8860_EEPROM_REG_9, 0x71 },
+ { LP8860_EEPROM_REG_10, 0x3f },
+ { LP8860_EEPROM_REG_11, 0xb7 },
+ { LP8860_EEPROM_REG_12, 0x17 },
+ { LP8860_EEPROM_REG_13, 0xef },
+ { LP8860_EEPROM_REG_14, 0xb0 },
+ { LP8860_EEPROM_REG_15, 0x87 },
+ { LP8860_EEPROM_REG_16, 0xce },
+ { LP8860_EEPROM_REG_17, 0x72 },
+ { LP8860_EEPROM_REG_18, 0xe5 },
+ { LP8860_EEPROM_REG_19, 0xdf },
+ { LP8860_EEPROM_REG_20, 0x35 },
+ { LP8860_EEPROM_REG_21, 0x06 },
+ { LP8860_EEPROM_REG_22, 0xdc },
+ { LP8860_EEPROM_REG_23, 0x88 },
+ { LP8860_EEPROM_REG_24, 0x3E },
+};
+
+static int lp8860_unlock_eeprom(struct lp8860_led *led, int lock)
+{
+ int ret;
+
+ mutex_lock(&led->lock);
+
+ if (lock == LP8860_UNLOCK_EEPROM) {
+ ret = regmap_write(led->regmap,
+ LP8860_EEPROM_UNLOCK,
+ LP8860_EEPROM_CODE_1);
+ if (ret) {
+ dev_err(&led->client->dev, "EEPROM Unlock failed\n");
+ goto out;
+ }
+
+ ret = regmap_write(led->regmap,
+ LP8860_EEPROM_UNLOCK,
+ LP8860_EEPROM_CODE_2);
+ if (ret) {
+ dev_err(&led->client->dev, "EEPROM Unlock failed\n");
+ goto out;
+ }
+ ret = regmap_write(led->regmap,
+ LP8860_EEPROM_UNLOCK,
+ LP8860_EEPROM_CODE_3);
+ if (ret) {
+ dev_err(&led->client->dev, "EEPROM Unlock failed\n");
+ goto out;
+ }
+ } else {
+ ret = regmap_write(led->regmap,
+ LP8860_EEPROM_UNLOCK,
+ LP8860_LOCK_EEPROM);
+ }
+
+out:
+ mutex_unlock(&led->lock);
+ return ret;
+}
+
+static int lp8860_fault_check(struct lp8860_led *led)
+{
+ int ret, fault;
+ unsigned int read_buf;
+
+ ret = regmap_read(led->regmap, LP8860_LED_FAULT, &read_buf);
+ if (ret)
+ goto out;
+
+ fault = read_buf;
+
+ ret = regmap_read(led->regmap, LP8860_FAULT, &read_buf);
+ if (ret)
+ goto out;
+
+ fault |= read_buf;
+
+ /* Attempt to clear any faults */
+ if (fault)
+ ret = regmap_write(led->regmap, LP8860_FAULT_CLEAR,
+ LP8860_CLEAR_FAULTS);
+out:
+ return ret;
+}
+
+static void lp8860_led_brightness_work(struct work_struct *work)
+{
+ struct lp8860_led *led = container_of(work, struct lp8860_led, work);
+ int ret;
+ int disp_brightness = led->brightness * 255;
+
+ mutex_lock(&led->lock);
+
+ ret = lp8860_fault_check(led);
+ if (ret) {
+ dev_err(&led->client->dev, "Cannot read/clear faults\n");
+ goto out;
+ }
+
+ ret = regmap_write(led->regmap, LP8860_DISP_CL1_BRT_MSB,
+ (disp_brightness & 0xff00) >> 8);
+ if (ret) {
+ dev_err(&led->client->dev, "Cannot write CL1 MSB\n");
+ goto out;
+ }
+
+ ret = regmap_write(led->regmap, LP8860_DISP_CL1_BRT_LSB,
+ disp_brightness & 0xff);
+ if (ret) {
+ dev_err(&led->client->dev, "Cannot write CL1 LSB\n");
+ goto out;
+ }
+out:
+ mutex_unlock(&led->lock);
+}
+
+static void lp8860_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness brt_val)
+{
+ struct lp8860_led *led =
+ container_of(led_cdev, struct lp8860_led, led_dev);
+
+ led->brightness = brt_val;
+ schedule_work(&led->work);
+}
+
+static int lp8860_init(struct lp8860_led *led)
+{
+ unsigned int read_buf;
+ int ret, i, reg_count;
+
+ if (led->enable_gpio)
+ gpiod_direction_output(led->enable_gpio, 1);
+
+ ret = lp8860_fault_check(led);
+ if (ret)
+ goto out;
+
+ ret = regmap_read(led->regmap, LP8860_STATUS, &read_buf);
+ if (ret)
+ goto out;
+
+ ret = lp8860_unlock_eeprom(led, LP8860_UNLOCK_EEPROM);
+ if (ret) {
+ dev_err(&led->client->dev, "Failed unlocking EEPROM\n");
+ goto out;
+ }
+
+ reg_count = ARRAY_SIZE(lp8860_eeprom_disp_regs) / sizeof(lp8860_eeprom_disp_regs[0]);
+ for (i = 0; i < reg_count; i++) {
+ ret = regmap_write(led->eeprom_regmap,
+ lp8860_eeprom_disp_regs[i].reg,
+ lp8860_eeprom_disp_regs[i].value);
+ if (ret) {
+ dev_err(&led->client->dev, "Failed writing EEPROM\n");
+ goto out;
+ }
+ }
+
+ ret = lp8860_unlock_eeprom(led, LP8860_LOCK_EEPROM);
+ if (ret)
+ goto out;
+
+ ret = regmap_write(led->regmap,
+ LP8860_EEPROM_CNTRL,
+ LP8860_PROGRAM_EEPROM);
+ if (ret)
+ dev_err(&led->client->dev, "Failed programming EEPROM\n");
+out:
+ if (ret)
+ if (led->enable_gpio)
+ gpiod_direction_output(led->enable_gpio, 0);
+ return ret;
+}
+
+static struct reg_default lp8860_reg_defs[] = {
+ { LP8860_DISP_CL1_BRT_MSB, 0x00},
+ { LP8860_DISP_CL1_BRT_LSB, 0x00},
+ { LP8860_DISP_CL1_CURR_MSB, 0x00},
+ { LP8860_DISP_CL1_CURR_LSB, 0x00},
+ { LP8860_CL2_BRT_MSB, 0x00},
+ { LP8860_CL2_BRT_LSB, 0x00},
+ { LP8860_CL2_CURRENT, 0x00},
+ { LP8860_CL3_BRT_MSB, 0x00},
+ { LP8860_CL3_BRT_LSB, 0x00},
+ { LP8860_CL3_CURRENT, 0x00},
+ { LP8860_CL4_BRT_MSB, 0x00},
+ { LP8860_CL4_BRT_LSB, 0x00},
+ { LP8860_CL4_CURRENT, 0x00},
+ { LP8860_CONFIG, 0x00},
+ { LP8860_FAULT_CLEAR, 0x00},
+ { LP8860_EEPROM_CNTRL, 0x80},
+ { LP8860_EEPROM_UNLOCK, 0x00},
+};
+
+static const struct regmap_config lp8860_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = LP8860_EEPROM_UNLOCK,
+ .reg_defaults = lp8860_reg_defs,
+ .num_reg_defaults = ARRAY_SIZE(lp8860_reg_defs),
+ .cache_type = REGCACHE_NONE,
+};
+
+static struct reg_default lp8860_eeprom_defs[] = {
+ { LP8860_EEPROM_REG_0, 0x00 },
+ { LP8860_EEPROM_REG_1, 0x00 },
+ { LP8860_EEPROM_REG_2, 0x00 },
+ { LP8860_EEPROM_REG_3, 0x00 },
+ { LP8860_EEPROM_REG_4, 0x00 },
+ { LP8860_EEPROM_REG_5, 0x00 },
+ { LP8860_EEPROM_REG_6, 0x00 },
+ { LP8860_EEPROM_REG_7, 0x00 },
+ { LP8860_EEPROM_REG_8, 0x00 },
+ { LP8860_EEPROM_REG_9, 0x00 },
+ { LP8860_EEPROM_REG_10, 0x00 },
+ { LP8860_EEPROM_REG_11, 0x00 },
+ { LP8860_EEPROM_REG_12, 0x00 },
+ { LP8860_EEPROM_REG_13, 0x00 },
+ { LP8860_EEPROM_REG_14, 0x00 },
+ { LP8860_EEPROM_REG_15, 0x00 },
+ { LP8860_EEPROM_REG_16, 0x00 },
+ { LP8860_EEPROM_REG_17, 0x00 },
+ { LP8860_EEPROM_REG_18, 0x00 },
+ { LP8860_EEPROM_REG_19, 0x00 },
+ { LP8860_EEPROM_REG_20, 0x00 },
+ { LP8860_EEPROM_REG_21, 0x00 },
+ { LP8860_EEPROM_REG_22, 0x00 },
+ { LP8860_EEPROM_REG_23, 0x00 },
+ { LP8860_EEPROM_REG_24, 0x00 },
+};
+
+static const struct regmap_config lp8860_eeprom_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = LP8860_EEPROM_REG_24,
+ .reg_defaults = lp8860_eeprom_defs,
+ .num_reg_defaults = ARRAY_SIZE(lp8860_eeprom_defs),
+ .cache_type = REGCACHE_NONE,
+};
+
+static int lp8860_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret;
+ struct lp8860_led *led;
+ struct device_node *np = client->dev.of_node;
+
+ led = devm_kzalloc(&client->dev, sizeof(*led), GFP_KERNEL);
+ if (!led)
+ return -ENOMEM;
+
+ led->label = LP8860_DISP_LED_NAME;
+
+ if (client->dev.of_node) {
+ ret = of_property_read_string(np, "label", &led->label);
+ if (ret) {
+ dev_err(&client->dev, "Missing label in dt\n");
+ return -EINVAL;
+ }
+ }
+
+ led->enable_gpio = devm_gpiod_get(&client->dev, "enable");
+ if (IS_ERR(led->enable_gpio))
+ led->enable_gpio = NULL;
+ else
+ gpiod_direction_output(led->enable_gpio, 0);
+
+ led->regulator = devm_regulator_get(&client->dev, "vled");
+ if (IS_ERR(led->regulator))
+ led->regulator = NULL;
+
+ led->client = client;
+ led->led_dev.name = led->label;
+ led->led_dev.max_brightness = LED_FULL;
+ led->led_dev.brightness_set = lp8860_brightness_set;
+
+ mutex_init(&led->lock);
+ INIT_WORK(&led->work, lp8860_led_brightness_work);
+
+ i2c_set_clientdata(client, led);
+
+ led->regmap = devm_regmap_init_i2c(client, &lp8860_regmap_config);
+ if (IS_ERR(led->regmap)) {
+ ret = PTR_ERR(led->regmap);
+ dev_err(&client->dev, "Failed to allocate register map: %d\n",
+ ret);
+ return ret;
+ }
+
+ led->eeprom_regmap = devm_regmap_init_i2c(client, &lp8860_eeprom_regmap_config);
+ if (IS_ERR(led->eeprom_regmap)) {
+ ret = PTR_ERR(led->eeprom_regmap);
+ dev_err(&client->dev, "Failed to allocate register map: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = lp8860_init(led);
+ if (ret)
+ return ret;
+
+ ret = led_classdev_register(&client->dev, &led->led_dev);
+ if (ret) {
+ dev_err(&client->dev, "led register err: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int lp8860_remove(struct i2c_client *client)
+{
+ struct lp8860_led *led = i2c_get_clientdata(client);
+ int ret;
+
+ led_classdev_unregister(&led->led_dev);
+ cancel_work_sync(&led->work);
+
+ if (led->enable_gpio)
+ gpiod_direction_output(led->enable_gpio, 0);
+
+ if (led->regulator) {
+ ret = regulator_disable(led->regulator);
+ if (ret)
+ dev_err(&led->client->dev,
+ "Failed to disable regulator\n");
+ }
+
+ return 0;
+}
+
+static const struct i2c_device_id lp8860_id[] = {
+ { "lp8860", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, lp8860_id);
+
+#ifdef CONFIG_OF
+static const struct of_device_id of_lp8860_leds_match[] = {
+ { .compatible = "ti,lp8860", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, of_lp8860_leds_match);
+#endif
+
+static struct i2c_driver lp8860_driver = {
+ .driver = {
+ .name = "lp8860",
+ .of_match_table = of_match_ptr(of_lp8860_leds_match),
+ },
+ .probe = lp8860_probe,
+ .remove = lp8860_remove,
+ .id_table = lp8860_id,
+};
+module_i2c_driver(lp8860_driver);
+
+MODULE_DESCRIPTION("Texas Instruments LP8860 LED drvier");
+MODULE_AUTHOR("Dan Murphy <dmurphy@ti.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/leds/leds-regulator.c b/drivers/leds/leds-regulator.c
index fa1126f1206..ffc21397a67 100644
--- a/drivers/leds/leds-regulator.c
+++ b/drivers/leds/leds-regulator.c
@@ -153,24 +153,21 @@ static int regulator_led_probe(struct platform_device *pdev)
return -ENODEV;
}
- vcc = regulator_get_exclusive(&pdev->dev, "vled");
+ vcc = devm_regulator_get_exclusive(&pdev->dev, "vled");
if (IS_ERR(vcc)) {
dev_err(&pdev->dev, "Cannot get vcc for %s\n", pdata->name);
return PTR_ERR(vcc);
}
led = devm_kzalloc(&pdev->dev, sizeof(*led), GFP_KERNEL);
- if (led == NULL) {
- ret = -ENOMEM;
- goto err_vcc;
- }
+ if (led == NULL)
+ return -ENOMEM;
led->cdev.max_brightness = led_regulator_get_max_brightness(vcc);
if (pdata->brightness > led->cdev.max_brightness) {
dev_err(&pdev->dev, "Invalid default brightness %d\n",
pdata->brightness);
- ret = -EINVAL;
- goto err_vcc;
+ return -EINVAL;
}
led->value = pdata->brightness;
@@ -191,7 +188,7 @@ static int regulator_led_probe(struct platform_device *pdev)
ret = led_classdev_register(&pdev->dev, &led->cdev);
if (ret < 0) {
cancel_work_sync(&led->work);
- goto err_vcc;
+ return ret;
}
/* to expose the default value to userspace */
@@ -201,10 +198,6 @@ static int regulator_led_probe(struct platform_device *pdev)
regulator_led_set_value(led);
return 0;
-
-err_vcc:
- regulator_put(vcc);
- return ret;
}
static int regulator_led_remove(struct platform_device *pdev)
@@ -214,7 +207,6 @@ static int regulator_led_remove(struct platform_device *pdev)
led_classdev_unregister(&led->cdev);
cancel_work_sync(&led->work);
regulator_led_disable(led);
- regulator_put(led->vcc);
return 0;
}
diff --git a/drivers/leds/leds-syscon.c b/drivers/leds/leds-syscon.c
index 3afec79c43f..6896e2d9ba5 100644
--- a/drivers/leds/leds-syscon.c
+++ b/drivers/leds/leds-syscon.c
@@ -18,10 +18,6 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston,
* MA 02111-1307 USA
- *
- * This driver provides system reboot functionality for APM X-Gene SoC.
- * For system shutdown, this is board specify. If a board designer
- * implements GPIO shutdown, use the gpio-poweroff.c driver.
*/
#include <linux/io.h>
#include <linux/of_device.h>
@@ -70,39 +66,13 @@ static void syscon_led_set(struct led_classdev *led_cdev,
dev_err(sled->cdev.dev, "error updating LED status\n");
}
-static const struct of_device_id syscon_match[] = {
- { .compatible = "syscon", },
- {},
-};
-
-static int __init syscon_leds_init(void)
+static int __init syscon_leds_spawn(struct device_node *np,
+ struct device *dev,
+ struct regmap *map)
{
- const struct of_device_id *devid;
- struct device_node *np;
struct device_node *child;
- struct regmap *map;
- struct platform_device *pdev;
- struct device *dev;
int ret;
- np = of_find_matching_node_and_match(NULL, syscon_match,
- &devid);
- if (!np)
- return -ENODEV;
-
- map = syscon_node_to_regmap(np);
- if (IS_ERR(map))
- return PTR_ERR(map);
-
- /*
- * If the map is there, the device should be there, we allocate
- * memory on the syscon device's behalf here.
- */
- pdev = of_find_device_by_node(np);
- if (!pdev)
- return -ENODEV;
- dev = &pdev->dev;
-
for_each_available_child_of_node(np, child) {
struct syscon_led *sled;
const char *state;
@@ -150,7 +120,6 @@ static int __init syscon_leds_init(void)
if (ret < 0)
return ret;
}
-
}
sled->cdev.brightness_set = syscon_led_set;
@@ -160,7 +129,39 @@ static int __init syscon_leds_init(void)
dev_info(dev, "registered LED %s\n", sled->cdev.name);
}
+ return 0;
+}
+
+static int __init syscon_leds_init(void)
+{
+ struct device_node *np;
+
+ for_each_of_allnodes(np) {
+ struct platform_device *pdev;
+ struct regmap *map;
+ int ret;
+
+ if (!of_device_is_compatible(np, "syscon"))
+ continue;
+
+ map = syscon_node_to_regmap(np);
+ if (IS_ERR(map)) {
+ pr_err("error getting regmap for syscon LEDs\n");
+ continue;
+ }
+
+ /*
+ * If the map is there, the device should be there, we allocate
+ * memory on the syscon device's behalf here.
+ */
+ pdev = of_find_device_by_node(np);
+ if (!pdev)
+ return -ENODEV;
+ ret = syscon_leds_spawn(np, &pdev->dev, map);
+ if (ret)
+ dev_err(&pdev->dev, "could not spawn syscon LEDs\n");
+ }
- return 0;
+ return 0;
}
device_initcall(syscon_leds_init);
diff --git a/drivers/leds/leds.h b/drivers/leds/leds.h
index 4c50365344a..2348dbda526 100644
--- a/drivers/leds/leds.h
+++ b/drivers/leds/leds.h
@@ -17,16 +17,28 @@
#include <linux/rwsem.h>
#include <linux/leds.h>
-static inline void __led_set_brightness(struct led_classdev *led_cdev,
+static inline void led_set_brightness_async(struct led_classdev *led_cdev,
enum led_brightness value)
{
- if (value > led_cdev->max_brightness)
- value = led_cdev->max_brightness;
- led_cdev->brightness = value;
+ led_cdev->brightness = min(value, led_cdev->max_brightness);
+
if (!(led_cdev->flags & LED_SUSPENDED))
led_cdev->brightness_set(led_cdev, value);
}
+static inline int led_set_brightness_sync(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ int ret = 0;
+
+ led_cdev->brightness = min(value, led_cdev->max_brightness);
+
+ if (!(led_cdev->flags & LED_SUSPENDED))
+ ret = led_cdev->brightness_set_sync(led_cdev,
+ led_cdev->brightness);
+ return ret;
+}
+
static inline int led_get_brightness(struct led_classdev *led_cdev)
{
return led_cdev->brightness;
diff --git a/drivers/leds/trigger/ledtrig-backlight.c b/drivers/leds/trigger/ledtrig-backlight.c
index 47e55aa9eef..59eca17d966 100644
--- a/drivers/leds/trigger/ledtrig-backlight.c
+++ b/drivers/leds/trigger/ledtrig-backlight.c
@@ -51,9 +51,9 @@ static int fb_notifier_callback(struct notifier_block *p,
if ((n->old_status == UNBLANK) ^ n->invert) {
n->brightness = led->brightness;
- __led_set_brightness(led, LED_OFF);
+ led_set_brightness_async(led, LED_OFF);
} else {
- __led_set_brightness(led, n->brightness);
+ led_set_brightness_async(led, n->brightness);
}
n->old_status = new_status;
@@ -89,9 +89,9 @@ static ssize_t bl_trig_invert_store(struct device *dev,
/* After inverting, we need to update the LED. */
if ((n->old_status == BLANK) ^ n->invert)
- __led_set_brightness(led, LED_OFF);
+ led_set_brightness_async(led, LED_OFF);
else
- __led_set_brightness(led, n->brightness);
+ led_set_brightness_async(led, n->brightness);
return num;
}
diff --git a/drivers/leds/trigger/ledtrig-default-on.c b/drivers/leds/trigger/ledtrig-default-on.c
index 81a91be8e18..6f38f883aaf 100644
--- a/drivers/leds/trigger/ledtrig-default-on.c
+++ b/drivers/leds/trigger/ledtrig-default-on.c
@@ -19,7 +19,7 @@
static void defon_trig_activate(struct led_classdev *led_cdev)
{
- __led_set_brightness(led_cdev, led_cdev->max_brightness);
+ led_set_brightness_async(led_cdev, led_cdev->max_brightness);
}
static struct led_trigger defon_led_trigger = {
diff --git a/drivers/leds/trigger/ledtrig-gpio.c b/drivers/leds/trigger/ledtrig-gpio.c
index c86c4182647..4cc7040746c 100644
--- a/drivers/leds/trigger/ledtrig-gpio.c
+++ b/drivers/leds/trigger/ledtrig-gpio.c
@@ -54,12 +54,12 @@ static void gpio_trig_work(struct work_struct *work)
if (tmp) {
if (gpio_data->desired_brightness)
- __led_set_brightness(gpio_data->led,
+ led_set_brightness_async(gpio_data->led,
gpio_data->desired_brightness);
else
- __led_set_brightness(gpio_data->led, LED_FULL);
+ led_set_brightness_async(gpio_data->led, LED_FULL);
} else {
- __led_set_brightness(gpio_data->led, LED_OFF);
+ led_set_brightness_async(gpio_data->led, LED_OFF);
}
}
diff --git a/drivers/leds/trigger/ledtrig-heartbeat.c b/drivers/leds/trigger/ledtrig-heartbeat.c
index 5c8464a3317..fea6871d260 100644
--- a/drivers/leds/trigger/ledtrig-heartbeat.c
+++ b/drivers/leds/trigger/ledtrig-heartbeat.c
@@ -74,7 +74,7 @@ static void led_heartbeat_function(unsigned long data)
break;
}
- __led_set_brightness(led_cdev, brightness);
+ led_set_brightness_async(led_cdev, brightness);
mod_timer(&heartbeat_data->timer, jiffies + delay);
}
diff --git a/drivers/leds/trigger/ledtrig-oneshot.c b/drivers/leds/trigger/ledtrig-oneshot.c
index cb4c7466692..fbd02cdc3ad 100644
--- a/drivers/leds/trigger/ledtrig-oneshot.c
+++ b/drivers/leds/trigger/ledtrig-oneshot.c
@@ -63,9 +63,9 @@ static ssize_t led_invert_store(struct device *dev,
oneshot_data->invert = !!state;
if (oneshot_data->invert)
- __led_set_brightness(led_cdev, LED_FULL);
+ led_set_brightness_async(led_cdev, LED_FULL);
else
- __led_set_brightness(led_cdev, LED_OFF);
+ led_set_brightness_async(led_cdev, LED_OFF);
return size;
}
diff --git a/drivers/leds/trigger/ledtrig-transient.c b/drivers/leds/trigger/ledtrig-transient.c
index e5abc00bb00..3c34de404d1 100644
--- a/drivers/leds/trigger/ledtrig-transient.c
+++ b/drivers/leds/trigger/ledtrig-transient.c
@@ -41,7 +41,7 @@ static void transient_timer_function(unsigned long data)
struct transient_trig_data *transient_data = led_cdev->trigger_data;
transient_data->activate = 0;
- __led_set_brightness(led_cdev, transient_data->restore_state);
+ led_set_brightness_async(led_cdev, transient_data->restore_state);
}
static ssize_t transient_activate_show(struct device *dev,
@@ -72,7 +72,8 @@ static ssize_t transient_activate_store(struct device *dev,
if (state == 0 && transient_data->activate == 1) {
del_timer(&transient_data->timer);
transient_data->activate = state;
- __led_set_brightness(led_cdev, transient_data->restore_state);
+ led_set_brightness_async(led_cdev,
+ transient_data->restore_state);
return size;
}
@@ -80,7 +81,7 @@ static ssize_t transient_activate_store(struct device *dev,
if (state == 1 && transient_data->activate == 0 &&
transient_data->duration != 0) {
transient_data->activate = state;
- __led_set_brightness(led_cdev, transient_data->state);
+ led_set_brightness_async(led_cdev, transient_data->state);
transient_data->restore_state =
(transient_data->state == LED_FULL) ? LED_OFF : LED_FULL;
mod_timer(&transient_data->timer,
@@ -203,7 +204,8 @@ static void transient_trig_deactivate(struct led_classdev *led_cdev)
if (led_cdev->activated) {
del_timer_sync(&transient_data->timer);
- __led_set_brightness(led_cdev, transient_data->restore_state);
+ led_set_brightness_async(led_cdev,
+ transient_data->restore_state);
device_remove_file(led_cdev->dev, &dev_attr_activate);
device_remove_file(led_cdev->dev, &dev_attr_duration);
device_remove_file(led_cdev->dev, &dev_attr_state);
diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig
index 3c89fcbc621..49cd30870e0 100644
--- a/drivers/media/Kconfig
+++ b/drivers/media/Kconfig
@@ -160,7 +160,6 @@ source "drivers/media/usb/Kconfig"
source "drivers/media/pci/Kconfig"
source "drivers/media/platform/Kconfig"
source "drivers/media/mmc/Kconfig"
-source "drivers/media/parport/Kconfig"
source "drivers/media/radio/Kconfig"
comment "Supported FireWire (IEEE 1394) Adapters"
diff --git a/drivers/media/Makefile b/drivers/media/Makefile
index 620f275a45c..e608bbce0c3 100644
--- a/drivers/media/Makefile
+++ b/drivers/media/Makefile
@@ -28,6 +28,6 @@ obj-y += rc/
# Finally, merge the drivers that require the core
#
-obj-y += common/ platform/ pci/ usb/ mmc/ firewire/ parport/
+obj-y += common/ platform/ pci/ usb/ mmc/ firewire/
obj-$(CONFIG_VIDEO_DEV) += radio/
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index f40b4cf6107..205d7136434 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -284,15 +284,6 @@ config VIDEO_SAA711X
To compile this driver as a module, choose M here: the
module will be called saa7115.
-config VIDEO_SAA7191
- tristate "Philips SAA7191 video decoder"
- depends on VIDEO_V4L2 && I2C
- ---help---
- Support for the Philips SAA7191 video decoder.
-
- To compile this driver as a module, choose M here: the
- module will be called saa7191.
-
config VIDEO_TVP514X
tristate "Texas Instruments TVP514x video decoder"
depends on VIDEO_V4L2 && I2C
diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile
index 01ae9328e58..9858900168b 100644
--- a/drivers/media/i2c/Makefile
+++ b/drivers/media/i2c/Makefile
@@ -18,7 +18,6 @@ obj-$(CONFIG_VIDEO_SAA711X) += saa7115.o
obj-$(CONFIG_VIDEO_SAA717X) += saa717x.o
obj-$(CONFIG_VIDEO_SAA7127) += saa7127.o
obj-$(CONFIG_VIDEO_SAA7185) += saa7185.o
-obj-$(CONFIG_VIDEO_SAA7191) += saa7191.o
obj-$(CONFIG_VIDEO_SAA6752HS) += saa6752hs.o
obj-$(CONFIG_VIDEO_ADV7170) += adv7170.o
obj-$(CONFIG_VIDEO_ADV7175) += adv7175.o
diff --git a/drivers/media/pci/cx88/cx88-blackbird.c b/drivers/media/pci/cx88/cx88-blackbird.c
index 4160ca4e541..d3c79d964f2 100644
--- a/drivers/media/pci/cx88/cx88-blackbird.c
+++ b/drivers/media/pci/cx88/cx88-blackbird.c
@@ -647,6 +647,7 @@ static int queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
dev->ts_packet_size = 188 * 4;
dev->ts_packet_count = 32;
sizes[0] = dev->ts_packet_size * dev->ts_packet_count;
+ alloc_ctxs[0] = dev->alloc_ctx;
return 0;
}
@@ -662,14 +663,11 @@ static void buffer_finish(struct vb2_buffer *vb)
{
struct cx8802_dev *dev = vb->vb2_queue->drv_priv;
struct cx88_buffer *buf = container_of(vb, struct cx88_buffer, vb);
- struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
struct cx88_riscmem *risc = &buf->risc;
if (risc->cpu)
pci_free_consistent(dev->pci, risc->size, risc->cpu, risc->dma);
memset(risc, 0, sizeof(*risc));
-
- dma_unmap_sg(&dev->pci->dev, sgt->sgl, sgt->nents, DMA_FROM_DEVICE);
}
static void buffer_queue(struct vb2_buffer *vb)
diff --git a/drivers/media/pci/cx88/cx88-dvb.c b/drivers/media/pci/cx88/cx88-dvb.c
index c344bfd0b89..5780e2f013b 100644
--- a/drivers/media/pci/cx88/cx88-dvb.c
+++ b/drivers/media/pci/cx88/cx88-dvb.c
@@ -92,6 +92,7 @@ static int queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
dev->ts_packet_size = 188 * 4;
dev->ts_packet_count = dvb_buf_tscnt;
sizes[0] = dev->ts_packet_size * dev->ts_packet_count;
+ alloc_ctxs[0] = dev->alloc_ctx;
*num_buffers = dvb_buf_tscnt;
return 0;
}
@@ -108,14 +109,11 @@ static void buffer_finish(struct vb2_buffer *vb)
{
struct cx8802_dev *dev = vb->vb2_queue->drv_priv;
struct cx88_buffer *buf = container_of(vb, struct cx88_buffer, vb);
- struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
struct cx88_riscmem *risc = &buf->risc;
if (risc->cpu)
pci_free_consistent(dev->pci, risc->size, risc->cpu, risc->dma);
memset(risc, 0, sizeof(*risc));
-
- dma_unmap_sg(&dev->pci->dev, sgt->sgl, sgt->nents, DMA_FROM_DEVICE);
}
static void buffer_queue(struct vb2_buffer *vb)
diff --git a/drivers/media/pci/cx88/cx88-mpeg.c b/drivers/media/pci/cx88/cx88-mpeg.c
index f181a3a1038..1c1f69e6b0b 100644
--- a/drivers/media/pci/cx88/cx88-mpeg.c
+++ b/drivers/media/pci/cx88/cx88-mpeg.c
@@ -235,10 +235,6 @@ int cx8802_buf_prepare(struct vb2_queue *q, struct cx8802_dev *dev,
return -EINVAL;
vb2_set_plane_payload(&buf->vb, 0, size);
- rc = dma_map_sg(&dev->pci->dev, sgt->sgl, sgt->nents, DMA_FROM_DEVICE);
- if (!rc)
- return -EIO;
-
rc = cx88_risc_databuffer(dev->pci, risc, sgt->sgl,
dev->ts_packet_size, dev->ts_packet_count, 0);
if (rc) {
@@ -733,6 +729,11 @@ static int cx8802_probe(struct pci_dev *pci_dev,
if (NULL == dev)
goto fail_core;
dev->pci = pci_dev;
+ dev->alloc_ctx = vb2_dma_sg_init_ctx(&pci_dev->dev);
+ if (IS_ERR(dev->alloc_ctx)) {
+ err = PTR_ERR(dev->alloc_ctx);
+ goto fail_core;
+ }
dev->core = core;
/* Maintain a reference so cx88-video can query the 8802 device. */
@@ -752,6 +753,7 @@ static int cx8802_probe(struct pci_dev *pci_dev,
return 0;
fail_free:
+ vb2_dma_sg_cleanup_ctx(dev->alloc_ctx);
kfree(dev);
fail_core:
core->dvbdev = NULL;
@@ -798,6 +800,7 @@ static void cx8802_remove(struct pci_dev *pci_dev)
/* common */
cx8802_fini_common(dev);
cx88_core_put(dev->core,dev->pci);
+ vb2_dma_sg_cleanup_ctx(dev->alloc_ctx);
kfree(dev);
}
diff --git a/drivers/media/pci/cx88/cx88-vbi.c b/drivers/media/pci/cx88/cx88-vbi.c
index 6ab6e27648f..32eb7fdb875 100644
--- a/drivers/media/pci/cx88/cx88-vbi.c
+++ b/drivers/media/pci/cx88/cx88-vbi.c
@@ -120,6 +120,7 @@ static int queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
sizes[0] = VBI_LINE_NTSC_COUNT * VBI_LINE_LENGTH * 2;
else
sizes[0] = VBI_LINE_PAL_COUNT * VBI_LINE_LENGTH * 2;
+ alloc_ctxs[0] = dev->alloc_ctx;
return 0;
}
@@ -131,7 +132,6 @@ static int buffer_prepare(struct vb2_buffer *vb)
struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
unsigned int lines;
unsigned int size;
- int rc;
if (dev->core->tvnorm & V4L2_STD_525_60)
lines = VBI_LINE_NTSC_COUNT;
@@ -142,10 +142,6 @@ static int buffer_prepare(struct vb2_buffer *vb)
return -EINVAL;
vb2_set_plane_payload(vb, 0, size);
- rc = dma_map_sg(&dev->pci->dev, sgt->sgl, sgt->nents, DMA_FROM_DEVICE);
- if (!rc)
- return -EIO;
-
cx88_risc_buffer(dev->pci, &buf->risc, sgt->sgl,
0, VBI_LINE_LENGTH * lines,
VBI_LINE_LENGTH, 0,
@@ -157,14 +153,11 @@ static void buffer_finish(struct vb2_buffer *vb)
{
struct cx8800_dev *dev = vb->vb2_queue->drv_priv;
struct cx88_buffer *buf = container_of(vb, struct cx88_buffer, vb);
- struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
struct cx88_riscmem *risc = &buf->risc;
if (risc->cpu)
pci_free_consistent(dev->pci, risc->size, risc->cpu, risc->dma);
memset(risc, 0, sizeof(*risc));
-
- dma_unmap_sg(&dev->pci->dev, sgt->sgl, sgt->nents, DMA_FROM_DEVICE);
}
static void buffer_queue(struct vb2_buffer *vb)
diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
index a64ae31ae14..860c98fc72c 100644
--- a/drivers/media/pci/cx88/cx88-video.c
+++ b/drivers/media/pci/cx88/cx88-video.c
@@ -440,6 +440,7 @@ static int queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
*num_planes = 1;
sizes[0] = (dev->fmt->depth * core->width * core->height) >> 3;
+ alloc_ctxs[0] = dev->alloc_ctx;
return 0;
}
@@ -449,7 +450,6 @@ static int buffer_prepare(struct vb2_buffer *vb)
struct cx88_core *core = dev->core;
struct cx88_buffer *buf = container_of(vb, struct cx88_buffer, vb);
struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
- int rc;
buf->bpl = core->width * dev->fmt->depth >> 3;
@@ -457,10 +457,6 @@ static int buffer_prepare(struct vb2_buffer *vb)
return -EINVAL;
vb2_set_plane_payload(vb, 0, core->height * buf->bpl);
- rc = dma_map_sg(&dev->pci->dev, sgt->sgl, sgt->nents, DMA_FROM_DEVICE);
- if (!rc)
- return -EIO;
-
switch (core->field) {
case V4L2_FIELD_TOP:
cx88_risc_buffer(dev->pci, &buf->risc,
@@ -505,14 +501,11 @@ static void buffer_finish(struct vb2_buffer *vb)
{
struct cx8800_dev *dev = vb->vb2_queue->drv_priv;
struct cx88_buffer *buf = container_of(vb, struct cx88_buffer, vb);
- struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
struct cx88_riscmem *risc = &buf->risc;
if (risc->cpu)
pci_free_consistent(dev->pci, risc->size, risc->cpu, risc->dma);
memset(risc, 0, sizeof(*risc));
-
- dma_unmap_sg(&dev->pci->dev, sgt->sgl, sgt->nents, DMA_FROM_DEVICE);
}
static void buffer_queue(struct vb2_buffer *vb)
@@ -530,7 +523,6 @@ static void buffer_queue(struct vb2_buffer *vb)
if (list_empty(&q->active)) {
list_add_tail(&buf->list, &q->active);
- start_video_dma(dev, q, buf);
buf->count = q->count++;
dprintk(2,"[%p/%d] buffer_queue - first active\n",
buf, buf->vb.v4l2_buf.index);
@@ -1345,6 +1337,12 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
err = -EIO;
goto fail_core;
}
+ dev->alloc_ctx = vb2_dma_sg_init_ctx(&pci_dev->dev);
+ if (IS_ERR(dev->alloc_ctx)) {
+ err = PTR_ERR(dev->alloc_ctx);
+ goto fail_core;
+ }
+
/* initialize driver struct */
spin_lock_init(&dev->slock);
@@ -1549,6 +1547,7 @@ fail_unreg:
free_irq(pci_dev->irq, dev);
mutex_unlock(&core->lock);
fail_core:
+ vb2_dma_sg_cleanup_ctx(dev->alloc_ctx);
core->v4ldev = NULL;
cx88_core_put(core,dev->pci);
fail_free:
@@ -1582,6 +1581,7 @@ static void cx8800_finidev(struct pci_dev *pci_dev)
/* free memory */
cx88_core_put(core,dev->pci);
+ vb2_dma_sg_cleanup_ctx(dev->alloc_ctx);
kfree(dev);
}
diff --git a/drivers/media/pci/cx88/cx88.h b/drivers/media/pci/cx88/cx88.h
index 3b0ae754f16..7748ca9abb0 100644
--- a/drivers/media/pci/cx88/cx88.h
+++ b/drivers/media/pci/cx88/cx88.h
@@ -485,6 +485,7 @@ struct cx8800_dev {
/* pci i/o */
struct pci_dev *pci;
unsigned char pci_rev,pci_lat;
+ void *alloc_ctx;
const struct cx8800_fmt *fmt;
@@ -548,6 +549,7 @@ struct cx8802_dev {
/* pci i/o */
struct pci_dev *pci;
unsigned char pci_rev,pci_lat;
+ void *alloc_ctx;
/* dma queues */
struct cx88_dmaqueue mpegq;
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index 0c61155699f..765bffb49a7 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -65,14 +65,6 @@ config VIDEO_TIMBERDALE
---help---
Add support for the Video In peripherial of the timberdale FPGA.
-config VIDEO_VINO
- tristate "SGI Vino Video For Linux"
- depends on I2C && SGI_IP22 && VIDEO_V4L2
- select VIDEO_SAA7191 if MEDIA_SUBDRV_AUTOSELECT
- help
- Say Y here to build in support for the Vino video input system found
- on SGI Indy machines.
-
config VIDEO_M32R_AR
tristate "AR devices"
depends on VIDEO_V4L2
@@ -112,7 +104,7 @@ config VIDEO_OMAP3_DEBUG
config VIDEO_S3C_CAMIF
tristate "Samsung S3C24XX/S3C64XX SoC Camera Interface driver"
depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
- depends on PM_RUNTIME
+ depends on PM
depends on ARCH_S3C64XX || PLAT_S3C24XX || COMPILE_TEST
depends on HAS_DMA
select VIDEOBUF2_DMA_CONTIG
diff --git a/drivers/media/platform/Makefile b/drivers/media/platform/Makefile
index b818afb4d33..a49936b8ce8 100644
--- a/drivers/media/platform/Makefile
+++ b/drivers/media/platform/Makefile
@@ -2,9 +2,6 @@
# Makefile for the video capture/playback device drivers.
#
-obj-$(CONFIG_VIDEO_VINO) += indycam.o
-obj-$(CONFIG_VIDEO_VINO) += vino.o
-
obj-$(CONFIG_VIDEO_TIMBERDALE) += timblogiw.o
obj-$(CONFIG_VIDEO_M32R_AR_M64278) += arv.o
diff --git a/drivers/media/platform/s5p-tv/Kconfig b/drivers/media/platform/s5p-tv/Kconfig
index beb180e71ba..5a1835dd65e 100644
--- a/drivers/media/platform/s5p-tv/Kconfig
+++ b/drivers/media/platform/s5p-tv/Kconfig
@@ -8,7 +8,7 @@
config VIDEO_SAMSUNG_S5P_TV
bool "Samsung TV driver for S5P platform"
- depends on PM_RUNTIME
+ depends on PM
depends on ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
default n
---help---
diff --git a/drivers/media/platform/soc_camera/rcar_vin.c b/drivers/media/platform/soc_camera/rcar_vin.c
index 126ac7c5b6f..0c1f5564810 100644
--- a/drivers/media/platform/soc_camera/rcar_vin.c
+++ b/drivers/media/platform/soc_camera/rcar_vin.c
@@ -64,6 +64,30 @@
#define VNDMR_REG 0x58 /* Video n Data Mode Register */
#define VNDMR2_REG 0x5C /* Video n Data Mode Register 2 */
#define VNUVAOF_REG 0x60 /* Video n UV Address Offset Register */
+#define VNC1A_REG 0x80 /* Video n Coefficient Set C1A Register */
+#define VNC1B_REG 0x84 /* Video n Coefficient Set C1B Register */
+#define VNC1C_REG 0x88 /* Video n Coefficient Set C1C Register */
+#define VNC2A_REG 0x90 /* Video n Coefficient Set C2A Register */
+#define VNC2B_REG 0x94 /* Video n Coefficient Set C2B Register */
+#define VNC2C_REG 0x98 /* Video n Coefficient Set C2C Register */
+#define VNC3A_REG 0xA0 /* Video n Coefficient Set C3A Register */
+#define VNC3B_REG 0xA4 /* Video n Coefficient Set C3B Register */
+#define VNC3C_REG 0xA8 /* Video n Coefficient Set C3C Register */
+#define VNC4A_REG 0xB0 /* Video n Coefficient Set C4A Register */
+#define VNC4B_REG 0xB4 /* Video n Coefficient Set C4B Register */
+#define VNC4C_REG 0xB8 /* Video n Coefficient Set C4C Register */
+#define VNC5A_REG 0xC0 /* Video n Coefficient Set C5A Register */
+#define VNC5B_REG 0xC4 /* Video n Coefficient Set C5B Register */
+#define VNC5C_REG 0xC8 /* Video n Coefficient Set C5C Register */
+#define VNC6A_REG 0xD0 /* Video n Coefficient Set C6A Register */
+#define VNC6B_REG 0xD4 /* Video n Coefficient Set C6B Register */
+#define VNC6C_REG 0xD8 /* Video n Coefficient Set C6C Register */
+#define VNC7A_REG 0xE0 /* Video n Coefficient Set C7A Register */
+#define VNC7B_REG 0xE4 /* Video n Coefficient Set C7B Register */
+#define VNC7C_REG 0xE8 /* Video n Coefficient Set C7C Register */
+#define VNC8A_REG 0xF0 /* Video n Coefficient Set C8A Register */
+#define VNC8B_REG 0xF4 /* Video n Coefficient Set C8B Register */
+#define VNC8C_REG 0xF8 /* Video n Coefficient Set C8C Register */
/* Register bit fields for R-Car VIN */
/* Video n Main Control Register bits */
@@ -106,6 +130,7 @@
#define VNDMR2_VPS (1 << 30)
#define VNDMR2_HPS (1 << 29)
#define VNDMR2_FTEV (1 << 17)
+#define VNDMR2_VLV(n) ((n & 0xf) << 12)
#define VIN_MAX_WIDTH 2048
#define VIN_MAX_HEIGHT 2048
@@ -117,6 +142,324 @@ enum chip_id {
RCAR_E1,
};
+struct vin_coeff {
+ unsigned short xs_value;
+ u32 coeff_set[24];
+};
+
+static const struct vin_coeff vin_coeff_set[] = {
+ { 0x0000, {
+ 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000 },
+ },
+ { 0x1000, {
+ 0x000fa400, 0x000fa400, 0x09625902,
+ 0x000003f8, 0x00000403, 0x3de0d9f0,
+ 0x001fffed, 0x00000804, 0x3cc1f9c3,
+ 0x001003de, 0x00000c01, 0x3cb34d7f,
+ 0x002003d2, 0x00000c00, 0x3d24a92d,
+ 0x00200bca, 0x00000bff, 0x3df600d2,
+ 0x002013cc, 0x000007ff, 0x3ed70c7e,
+ 0x00100fde, 0x00000000, 0x3f87c036 },
+ },
+ { 0x1200, {
+ 0x002ffff1, 0x002ffff1, 0x02a0a9c8,
+ 0x002003e7, 0x001ffffa, 0x000185bc,
+ 0x002007dc, 0x000003ff, 0x3e52859c,
+ 0x00200bd4, 0x00000002, 0x3d53996b,
+ 0x00100fd0, 0x00000403, 0x3d04ad2d,
+ 0x00000bd5, 0x00000403, 0x3d35ace7,
+ 0x3ff003e4, 0x00000801, 0x3dc674a1,
+ 0x3fffe800, 0x00000800, 0x3e76f461 },
+ },
+ { 0x1400, {
+ 0x00100be3, 0x00100be3, 0x04d1359a,
+ 0x00000fdb, 0x002003ed, 0x0211fd93,
+ 0x00000fd6, 0x002003f4, 0x0002d97b,
+ 0x000007d6, 0x002ffffb, 0x3e93b956,
+ 0x3ff003da, 0x001003ff, 0x3db49926,
+ 0x3fffefe9, 0x00100001, 0x3d655cee,
+ 0x3fffd400, 0x00000003, 0x3d65f4b6,
+ 0x000fb421, 0x00000402, 0x3dc6547e },
+ },
+ { 0x1600, {
+ 0x00000bdd, 0x00000bdd, 0x06519578,
+ 0x3ff007da, 0x00000be3, 0x03c24973,
+ 0x3ff003d9, 0x00000be9, 0x01b30d5f,
+ 0x3ffff7df, 0x001003f1, 0x0003c542,
+ 0x000fdfec, 0x001003f7, 0x3ec4711d,
+ 0x000fc400, 0x002ffffd, 0x3df504f1,
+ 0x001fa81a, 0x002ffc00, 0x3d957cc2,
+ 0x002f8c3c, 0x00100000, 0x3db5c891 },
+ },
+ { 0x1800, {
+ 0x3ff003dc, 0x3ff003dc, 0x0791e558,
+ 0x000ff7dd, 0x3ff007de, 0x05328554,
+ 0x000fe7e3, 0x3ff00be2, 0x03232546,
+ 0x000fd7ee, 0x000007e9, 0x0143bd30,
+ 0x001fb800, 0x000007ee, 0x00044511,
+ 0x002fa015, 0x000007f4, 0x3ef4bcee,
+ 0x002f8832, 0x001003f9, 0x3e4514c7,
+ 0x001f7853, 0x001003fd, 0x3de54c9f },
+ },
+ { 0x1a00, {
+ 0x000fefe0, 0x000fefe0, 0x08721d3c,
+ 0x001fdbe7, 0x000ffbde, 0x0652a139,
+ 0x001fcbf0, 0x000003df, 0x0463292e,
+ 0x002fb3ff, 0x3ff007e3, 0x0293a91d,
+ 0x002f9c12, 0x3ff00be7, 0x01241905,
+ 0x001f8c29, 0x000007ed, 0x3fe470eb,
+ 0x000f7c46, 0x000007f2, 0x3f04b8ca,
+ 0x3fef7865, 0x000007f6, 0x3e74e4a8 },
+ },
+ { 0x1c00, {
+ 0x001fd3e9, 0x001fd3e9, 0x08f23d26,
+ 0x002fbff3, 0x001fe3e4, 0x0712ad23,
+ 0x002fa800, 0x000ff3e0, 0x05631d1b,
+ 0x001f9810, 0x000ffbe1, 0x03b3890d,
+ 0x000f8c23, 0x000003e3, 0x0233e8fa,
+ 0x3fef843b, 0x000003e7, 0x00f430e4,
+ 0x3fbf8456, 0x3ff00bea, 0x00046cc8,
+ 0x3f8f8c72, 0x3ff00bef, 0x3f3490ac },
+ },
+ { 0x1e00, {
+ 0x001fbbf4, 0x001fbbf4, 0x09425112,
+ 0x001fa800, 0x002fc7ed, 0x0792b110,
+ 0x000f980e, 0x001fdbe6, 0x0613110a,
+ 0x3fff8c20, 0x001fe7e3, 0x04a368fd,
+ 0x3fcf8c33, 0x000ff7e2, 0x0343b8ed,
+ 0x3f9f8c4a, 0x000fffe3, 0x0203f8da,
+ 0x3f5f9c61, 0x000003e6, 0x00e428c5,
+ 0x3f1fb07b, 0x000003eb, 0x3fe440af },
+ },
+ { 0x2000, {
+ 0x000fa400, 0x000fa400, 0x09625902,
+ 0x3fff980c, 0x001fb7f5, 0x0812b0ff,
+ 0x3fdf901c, 0x001fc7ed, 0x06b2fcfa,
+ 0x3faf902d, 0x001fd3e8, 0x055348f1,
+ 0x3f7f983f, 0x001fe3e5, 0x04038ce3,
+ 0x3f3fa454, 0x001fefe3, 0x02e3c8d1,
+ 0x3f0fb86a, 0x001ff7e4, 0x01c3e8c0,
+ 0x3ecfd880, 0x000fffe6, 0x00c404ac },
+ },
+ { 0x2200, {
+ 0x3fdf9c0b, 0x3fdf9c0b, 0x09725cf4,
+ 0x3fbf9818, 0x3fffa400, 0x0842a8f1,
+ 0x3f8f9827, 0x000fb3f7, 0x0702f0ec,
+ 0x3f5fa037, 0x000fc3ef, 0x05d330e4,
+ 0x3f2fac49, 0x001fcfea, 0x04a364d9,
+ 0x3effc05c, 0x001fdbe7, 0x038394ca,
+ 0x3ecfdc6f, 0x001fe7e6, 0x0273b0bb,
+ 0x3ea00083, 0x001fefe6, 0x0183c0a9 },
+ },
+ { 0x2400, {
+ 0x3f9fa014, 0x3f9fa014, 0x098260e6,
+ 0x3f7f9c23, 0x3fcf9c0a, 0x08629ce5,
+ 0x3f4fa431, 0x3fefa400, 0x0742d8e1,
+ 0x3f1fb440, 0x3fffb3f8, 0x062310d9,
+ 0x3eefc850, 0x000fbbf2, 0x050340d0,
+ 0x3ecfe062, 0x000fcbec, 0x041364c2,
+ 0x3ea00073, 0x001fd3ea, 0x03037cb5,
+ 0x3e902086, 0x001fdfe8, 0x022388a5 },
+ },
+ { 0x2600, {
+ 0x3f5fa81e, 0x3f5fa81e, 0x096258da,
+ 0x3f3fac2b, 0x3f8fa412, 0x088290d8,
+ 0x3f0fbc38, 0x3fafa408, 0x0772c8d5,
+ 0x3eefcc47, 0x3fcfa800, 0x0672f4ce,
+ 0x3ecfe456, 0x3fefaffa, 0x05531cc6,
+ 0x3eb00066, 0x3fffbbf3, 0x047334bb,
+ 0x3ea01c77, 0x000fc7ee, 0x039348ae,
+ 0x3ea04486, 0x000fd3eb, 0x02b350a1 },
+ },
+ { 0x2800, {
+ 0x3f2fb426, 0x3f2fb426, 0x094250ce,
+ 0x3f0fc032, 0x3f4fac1b, 0x086284cd,
+ 0x3eefd040, 0x3f7fa811, 0x0782acc9,
+ 0x3ecfe84c, 0x3f9fa807, 0x06a2d8c4,
+ 0x3eb0005b, 0x3fbfac00, 0x05b2f4bc,
+ 0x3eb0186a, 0x3fdfb3fa, 0x04c308b4,
+ 0x3eb04077, 0x3fefbbf4, 0x03f31ca8,
+ 0x3ec06884, 0x000fbff2, 0x03031c9e },
+ },
+ { 0x2a00, {
+ 0x3f0fc42d, 0x3f0fc42d, 0x090240c4,
+ 0x3eefd439, 0x3f2fb822, 0x08526cc2,
+ 0x3edfe845, 0x3f4fb018, 0x078294bf,
+ 0x3ec00051, 0x3f6fac0f, 0x06b2b4bb,
+ 0x3ec0185f, 0x3f8fac07, 0x05e2ccb4,
+ 0x3ec0386b, 0x3fafac00, 0x0502e8ac,
+ 0x3ed05c77, 0x3fcfb3fb, 0x0432f0a3,
+ 0x3ef08482, 0x3fdfbbf6, 0x0372f898 },
+ },
+ { 0x2c00, {
+ 0x3eefdc31, 0x3eefdc31, 0x08e238b8,
+ 0x3edfec3d, 0x3f0fc828, 0x082258b9,
+ 0x3ed00049, 0x3f1fc01e, 0x077278b6,
+ 0x3ed01455, 0x3f3fb815, 0x06c294b2,
+ 0x3ed03460, 0x3f5fb40d, 0x0602acac,
+ 0x3ef0506c, 0x3f7fb006, 0x0542c0a4,
+ 0x3f107476, 0x3f9fb400, 0x0472c89d,
+ 0x3f309c80, 0x3fbfb7fc, 0x03b2cc94 },
+ },
+ { 0x2e00, {
+ 0x3eefec37, 0x3eefec37, 0x088220b0,
+ 0x3ee00041, 0x3effdc2d, 0x07f244ae,
+ 0x3ee0144c, 0x3f0fd023, 0x07625cad,
+ 0x3ef02c57, 0x3f1fc81a, 0x06c274a9,
+ 0x3f004861, 0x3f3fbc13, 0x060288a6,
+ 0x3f20686b, 0x3f5fb80c, 0x05529c9e,
+ 0x3f408c74, 0x3f6fb805, 0x04b2ac96,
+ 0x3f80ac7e, 0x3f8fb800, 0x0402ac8e },
+ },
+ { 0x3000, {
+ 0x3ef0003a, 0x3ef0003a, 0x084210a6,
+ 0x3ef01045, 0x3effec32, 0x07b228a7,
+ 0x3f00284e, 0x3f0fdc29, 0x073244a4,
+ 0x3f104058, 0x3f0fd420, 0x06a258a2,
+ 0x3f305c62, 0x3f2fc818, 0x0612689d,
+ 0x3f508069, 0x3f3fc011, 0x05728496,
+ 0x3f80a072, 0x3f4fc00a, 0x04d28c90,
+ 0x3fc0c07b, 0x3f6fbc04, 0x04429088 },
+ },
+ { 0x3200, {
+ 0x3f00103e, 0x3f00103e, 0x07f1fc9e,
+ 0x3f102447, 0x3f000035, 0x0782149d,
+ 0x3f203c4f, 0x3f0ff02c, 0x07122c9c,
+ 0x3f405458, 0x3f0fe424, 0x06924099,
+ 0x3f607061, 0x3f1fd41d, 0x06024c97,
+ 0x3f909068, 0x3f2fcc16, 0x05726490,
+ 0x3fc0b070, 0x3f3fc80f, 0x04f26c8a,
+ 0x0000d077, 0x3f4fc409, 0x04627484 },
+ },
+ { 0x3400, {
+ 0x3f202040, 0x3f202040, 0x07a1e898,
+ 0x3f303449, 0x3f100c38, 0x0741fc98,
+ 0x3f504c50, 0x3f10002f, 0x06e21495,
+ 0x3f706459, 0x3f1ff028, 0x06722492,
+ 0x3fa08060, 0x3f1fe421, 0x05f2348f,
+ 0x3fd09c67, 0x3f1fdc19, 0x05824c89,
+ 0x0000bc6e, 0x3f2fd014, 0x04f25086,
+ 0x0040dc74, 0x3f3fcc0d, 0x04825c7f },
+ },
+ { 0x3600, {
+ 0x3f403042, 0x3f403042, 0x0761d890,
+ 0x3f504848, 0x3f301c3b, 0x0701f090,
+ 0x3f805c50, 0x3f200c33, 0x06a2008f,
+ 0x3fa07458, 0x3f10002b, 0x06520c8d,
+ 0x3fd0905e, 0x3f1ff424, 0x05e22089,
+ 0x0000ac65, 0x3f1fe81d, 0x05823483,
+ 0x0030cc6a, 0x3f2fdc18, 0x04f23c81,
+ 0x0080e871, 0x3f2fd412, 0x0482407c },
+ },
+ { 0x3800, {
+ 0x3f604043, 0x3f604043, 0x0721c88a,
+ 0x3f80544a, 0x3f502c3c, 0x06d1d88a,
+ 0x3fb06851, 0x3f301c35, 0x0681e889,
+ 0x3fd08456, 0x3f30082f, 0x0611fc88,
+ 0x00009c5d, 0x3f200027, 0x05d20884,
+ 0x0030b863, 0x3f2ff421, 0x05621880,
+ 0x0070d468, 0x3f2fe81b, 0x0502247c,
+ 0x00c0ec6f, 0x3f2fe015, 0x04a22877 },
+ },
+ { 0x3a00, {
+ 0x3f904c44, 0x3f904c44, 0x06e1b884,
+ 0x3fb0604a, 0x3f70383e, 0x0691c885,
+ 0x3fe07451, 0x3f502c36, 0x0661d483,
+ 0x00009055, 0x3f401831, 0x0601ec81,
+ 0x0030a85b, 0x3f300c2a, 0x05b1f480,
+ 0x0070c061, 0x3f300024, 0x0562047a,
+ 0x00b0d867, 0x3f3ff41e, 0x05020c77,
+ 0x00f0f46b, 0x3f2fec19, 0x04a21474 },
+ },
+ { 0x3c00, {
+ 0x3fb05c43, 0x3fb05c43, 0x06c1b07e,
+ 0x3fe06c4b, 0x3f902c3f, 0x0681c081,
+ 0x0000844f, 0x3f703838, 0x0631cc7d,
+ 0x00309855, 0x3f602433, 0x05d1d47e,
+ 0x0060b459, 0x3f50142e, 0x0581e47b,
+ 0x00a0c85f, 0x3f400828, 0x0531f078,
+ 0x00e0e064, 0x3f300021, 0x0501fc73,
+ 0x00b0fc6a, 0x3f3ff41d, 0x04a20873 },
+ },
+ { 0x3e00, {
+ 0x3fe06444, 0x3fe06444, 0x0681a07a,
+ 0x00007849, 0x3fc0503f, 0x0641b07a,
+ 0x0020904d, 0x3fa0403a, 0x05f1c07a,
+ 0x0060a453, 0x3f803034, 0x05c1c878,
+ 0x0090b858, 0x3f70202f, 0x0571d477,
+ 0x00d0d05d, 0x3f501829, 0x0531e073,
+ 0x0110e462, 0x3f500825, 0x04e1e471,
+ 0x01510065, 0x3f40001f, 0x04a1f06d },
+ },
+ { 0x4000, {
+ 0x00007044, 0x00007044, 0x06519476,
+ 0x00208448, 0x3fe05c3f, 0x0621a476,
+ 0x0050984d, 0x3fc04c3a, 0x05e1b075,
+ 0x0080ac52, 0x3fa03c35, 0x05a1b875,
+ 0x00c0c056, 0x3f803030, 0x0561c473,
+ 0x0100d45b, 0x3f70202b, 0x0521d46f,
+ 0x0140e860, 0x3f601427, 0x04d1d46e,
+ 0x01810064, 0x3f500822, 0x0491dc6b },
+ },
+ { 0x5000, {
+ 0x0110a442, 0x0110a442, 0x0551545e,
+ 0x0140b045, 0x00e0983f, 0x0531585f,
+ 0x0160c047, 0x00c08c3c, 0x0511645e,
+ 0x0190cc4a, 0x00908039, 0x04f1685f,
+ 0x01c0dc4c, 0x00707436, 0x04d1705e,
+ 0x0200e850, 0x00506833, 0x04b1785b,
+ 0x0230f453, 0x00305c30, 0x0491805a,
+ 0x02710056, 0x0010542d, 0x04718059 },
+ },
+ { 0x6000, {
+ 0x01c0bc40, 0x01c0bc40, 0x04c13052,
+ 0x01e0c841, 0x01a0b43d, 0x04c13851,
+ 0x0210cc44, 0x0180a83c, 0x04a13453,
+ 0x0230d845, 0x0160a03a, 0x04913c52,
+ 0x0260e047, 0x01409838, 0x04714052,
+ 0x0280ec49, 0x01208c37, 0x04514c50,
+ 0x02b0f44b, 0x01008435, 0x04414c50,
+ 0x02d1004c, 0x00e07c33, 0x0431544f },
+ },
+ { 0x7000, {
+ 0x0230c83e, 0x0230c83e, 0x04711c4c,
+ 0x0250d03f, 0x0210c43c, 0x0471204b,
+ 0x0270d840, 0x0200b83c, 0x0451244b,
+ 0x0290dc42, 0x01e0b43a, 0x0441244c,
+ 0x02b0e443, 0x01c0b038, 0x0441284b,
+ 0x02d0ec44, 0x01b0a438, 0x0421304a,
+ 0x02f0f445, 0x0190a036, 0x04213449,
+ 0x0310f847, 0x01709c34, 0x04213848 },
+ },
+ { 0x8000, {
+ 0x0280d03d, 0x0280d03d, 0x04310c48,
+ 0x02a0d43e, 0x0270c83c, 0x04311047,
+ 0x02b0dc3e, 0x0250c83a, 0x04311447,
+ 0x02d0e040, 0x0240c03a, 0x04211446,
+ 0x02e0e840, 0x0220bc39, 0x04111847,
+ 0x0300e842, 0x0210b438, 0x04012445,
+ 0x0310f043, 0x0200b037, 0x04012045,
+ 0x0330f444, 0x01e0ac36, 0x03f12445 },
+ },
+ { 0xefff, {
+ 0x0340dc3a, 0x0340dc3a, 0x03b0ec40,
+ 0x0340e03a, 0x0330e039, 0x03c0f03e,
+ 0x0350e03b, 0x0330dc39, 0x03c0ec3e,
+ 0x0350e43a, 0x0320dc38, 0x03c0f43e,
+ 0x0360e43b, 0x0320d839, 0x03b0f03e,
+ 0x0360e83b, 0x0310d838, 0x03c0fc3b,
+ 0x0370e83b, 0x0310d439, 0x03a0f83d,
+ 0x0370e83c, 0x0300d438, 0x03b0fc3c },
+ }
+};
+
enum rcar_vin_state {
STOPPED = 0,
RUNNING,
@@ -161,6 +504,9 @@ struct rcar_vin_cam {
/* Client output, as seen by the VIN */
unsigned int width;
unsigned int height;
+ /* User window from S_FMT */
+ unsigned int out_width;
+ unsigned int out_height;
/*
* User window from S_CROP / G_CROP, produced by client cropping and
* scaling, VIN scaling and VIN cropping, mapped back onto the client
@@ -332,7 +678,7 @@ static int rcar_vin_setup(struct rcar_vin_priv *priv)
vnmc |= VNMC_BPS;
/* progressive or interlaced mode */
- interrupts = progressive ? VNIE_FIE | VNIE_EFE : VNIE_EFE;
+ interrupts = progressive ? VNIE_FIE : VNIE_EFE;
/* ack interrupts */
iowrite32(interrupts, priv->base + VNINTS_REG);
@@ -667,6 +1013,60 @@ static void rcar_vin_clock_stop(struct soc_camera_host *ici)
/* VIN does not have "mclk" */
}
+static void set_coeff(struct rcar_vin_priv *priv, unsigned short xs)
+{
+ int i;
+ const struct vin_coeff *p_prev_set = NULL;
+ const struct vin_coeff *p_set = NULL;
+
+ /* Look for suitable coefficient values */
+ for (i = 0; i < ARRAY_SIZE(vin_coeff_set); i++) {
+ p_prev_set = p_set;
+ p_set = &vin_coeff_set[i];
+
+ if (xs < p_set->xs_value)
+ break;
+ }
+
+ /* Use previous value if its XS value is closer */
+ if (p_prev_set && p_set &&
+ xs - p_prev_set->xs_value < p_set->xs_value - xs)
+ p_set = p_prev_set;
+
+ /* Set coefficient registers */
+ iowrite32(p_set->coeff_set[0], priv->base + VNC1A_REG);
+ iowrite32(p_set->coeff_set[1], priv->base + VNC1B_REG);
+ iowrite32(p_set->coeff_set[2], priv->base + VNC1C_REG);
+
+ iowrite32(p_set->coeff_set[3], priv->base + VNC2A_REG);
+ iowrite32(p_set->coeff_set[4], priv->base + VNC2B_REG);
+ iowrite32(p_set->coeff_set[5], priv->base + VNC2C_REG);
+
+ iowrite32(p_set->coeff_set[6], priv->base + VNC3A_REG);
+ iowrite32(p_set->coeff_set[7], priv->base + VNC3B_REG);
+ iowrite32(p_set->coeff_set[8], priv->base + VNC3C_REG);
+
+ iowrite32(p_set->coeff_set[9], priv->base + VNC4A_REG);
+ iowrite32(p_set->coeff_set[10], priv->base + VNC4B_REG);
+ iowrite32(p_set->coeff_set[11], priv->base + VNC4C_REG);
+
+ iowrite32(p_set->coeff_set[12], priv->base + VNC5A_REG);
+ iowrite32(p_set->coeff_set[13], priv->base + VNC5B_REG);
+ iowrite32(p_set->coeff_set[14], priv->base + VNC5C_REG);
+
+ iowrite32(p_set->coeff_set[15], priv->base + VNC6A_REG);
+ iowrite32(p_set->coeff_set[16], priv->base + VNC6B_REG);
+ iowrite32(p_set->coeff_set[17], priv->base + VNC6C_REG);
+
+ iowrite32(p_set->coeff_set[18], priv->base + VNC7A_REG);
+ iowrite32(p_set->coeff_set[19], priv->base + VNC7B_REG);
+ iowrite32(p_set->coeff_set[20], priv->base + VNC7C_REG);
+
+ iowrite32(p_set->coeff_set[21], priv->base + VNC8A_REG);
+ iowrite32(p_set->coeff_set[22], priv->base + VNC8B_REG);
+ iowrite32(p_set->coeff_set[23], priv->base + VNC8C_REG);
+}
+
/* rect is guaranteed to not exceed the scaled camera rectangle */
static int rcar_vin_set_rect(struct soc_camera_device *icd)
{
@@ -676,6 +1076,7 @@ static int rcar_vin_set_rect(struct soc_camera_device *icd)
unsigned int left_offset, top_offset;
unsigned char dsize = 0;
struct v4l2_rect *cam_subrect = &cam->subrect;
+ u32 value;
dev_dbg(icd->parent, "Crop %ux%u@%u:%u\n",
icd->user_width, icd->user_height, cam->vin_left, cam->vin_top);
@@ -695,40 +1096,64 @@ static int rcar_vin_set_rect(struct soc_camera_device *icd)
/* Set Start/End Pixel/Line Pre-Clip */
iowrite32(left_offset << dsize, priv->base + VNSPPRC_REG);
- iowrite32((left_offset + cam->width - 1) << dsize,
+ iowrite32((left_offset + cam_subrect->width - 1) << dsize,
priv->base + VNEPPRC_REG);
switch (priv->field) {
case V4L2_FIELD_INTERLACED:
case V4L2_FIELD_INTERLACED_TB:
case V4L2_FIELD_INTERLACED_BT:
iowrite32(top_offset / 2, priv->base + VNSLPRC_REG);
- iowrite32((top_offset + cam->height) / 2 - 1,
+ iowrite32((top_offset + cam_subrect->height) / 2 - 1,
priv->base + VNELPRC_REG);
break;
default:
iowrite32(top_offset, priv->base + VNSLPRC_REG);
- iowrite32(top_offset + cam->height - 1,
+ iowrite32(top_offset + cam_subrect->height - 1,
priv->base + VNELPRC_REG);
break;
}
+ /* Set scaling coefficient */
+ value = 0;
+ if (cam_subrect->height != cam->out_height)
+ value = (4096 * cam_subrect->height) / cam->out_height;
+ dev_dbg(icd->parent, "YS Value: %x\n", value);
+ iowrite32(value, priv->base + VNYS_REG);
+
+ value = 0;
+ if (cam_subrect->width != cam->out_width)
+ value = (4096 * cam_subrect->width) / cam->out_width;
+
+ /* Horizontal upscaling is up to double size */
+ if (0 < value && value < 2048)
+ value = 2048;
+
+ dev_dbg(icd->parent, "XS Value: %x\n", value);
+ iowrite32(value, priv->base + VNXS_REG);
+
+ /* Horizontal upscaling is carried out by scaling down from double size */
+ if (value < 4096)
+ value *= 2;
+
+ set_coeff(priv, value);
+
/* Set Start/End Pixel/Line Post-Clip */
iowrite32(0, priv->base + VNSPPOC_REG);
iowrite32(0, priv->base + VNSLPOC_REG);
- iowrite32((cam_subrect->width - 1) << dsize, priv->base + VNEPPOC_REG);
+ iowrite32((cam->out_width - 1) << dsize, priv->base + VNEPPOC_REG);
switch (priv->field) {
case V4L2_FIELD_INTERLACED:
case V4L2_FIELD_INTERLACED_TB:
case V4L2_FIELD_INTERLACED_BT:
- iowrite32(cam_subrect->height / 2 - 1,
+ iowrite32(cam->out_height / 2 - 1,
priv->base + VNELPOC_REG);
break;
default:
- iowrite32(cam_subrect->height - 1, priv->base + VNELPOC_REG);
+ iowrite32(cam->out_height - 1, priv->base + VNELPOC_REG);
break;
}
- iowrite32(ALIGN(cam->width, 0x10), priv->base + VNIS_REG);
+ iowrite32(ALIGN(cam->out_width, 0x10), priv->base + VNIS_REG);
return 0;
}
@@ -819,7 +1244,7 @@ static int rcar_vin_set_bus_param(struct soc_camera_device *icd)
if (ret < 0 && ret != -ENOIOCTLCMD)
return ret;
- val = priv->field == V4L2_FIELD_NONE ? VNDMR2_FTEV : 0;
+ val = VNDMR2_FTEV | VNDMR2_VLV(1);
if (!(common_flags & V4L2_MBUS_VSYNC_ACTIVE_LOW))
val |= VNDMR2_VPS;
if (!(common_flags & V4L2_MBUS_HSYNC_ACTIVE_LOW))
@@ -880,6 +1305,14 @@ static const struct soc_mbus_pixelfmt rcar_vin_formats[] = {
.layout = SOC_MBUS_LAYOUT_PLANAR_Y_C,
},
{
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .name = "YUYV",
+ .bits_per_sample = 16,
+ .packing = SOC_MBUS_PACKING_NONE,
+ .order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
+ },
+ {
.fourcc = V4L2_PIX_FMT_UYVY,
.name = "UYVY",
.bits_per_sample = 16,
@@ -999,6 +1432,8 @@ static int rcar_vin_get_formats(struct soc_camera_device *icd, unsigned int idx,
cam->subrect = rect;
cam->width = mf.width;
cam->height = mf.height;
+ cam->out_width = mf.width;
+ cam->out_height = mf.height;
icd->host_priv = cam;
} else {
@@ -1259,6 +1694,9 @@ static int rcar_vin_set_fmt(struct soc_camera_device *icd,
dev_dbg(dev, "W: %u : %u, H: %u : %u\n",
vin_sub_width, pix->width, vin_sub_height, pix->height);
+ cam->out_width = pix->width;
+ cam->out_height = pix->height;
+
icd->current_fmt = xlate;
priv->field = field;
@@ -1310,8 +1748,12 @@ static int rcar_vin_try_fmt(struct soc_camera_device *icd,
if (ret < 0)
return ret;
- pix->width = mf.width;
- pix->height = mf.height;
+ /* Adjust only if VIN cannot scale */
+ if (pix->width > mf.width * 2)
+ pix->width = mf.width * 2;
+ if (pix->height > mf.height * 3)
+ pix->height = mf.height * 3;
+
pix->field = mf.field;
pix->colorspace = mf.colorspace;
@@ -1395,6 +1837,8 @@ static struct soc_camera_host_ops rcar_vin_host_ops = {
#ifdef CONFIG_OF
static struct of_device_id rcar_vin_of_table[] = {
+ { .compatible = "renesas,vin-r8a7794", .data = (void *)RCAR_GEN2 },
+ { .compatible = "renesas,vin-r8a7793", .data = (void *)RCAR_GEN2 },
{ .compatible = "renesas,vin-r8a7791", .data = (void *)RCAR_GEN2 },
{ .compatible = "renesas,vin-r8a7790", .data = (void *)RCAR_GEN2 },
{ .compatible = "renesas,vin-r8a7779", .data = (void *)RCAR_H1 },
diff --git a/drivers/media/platform/vivid/vivid-vid-out.c b/drivers/media/platform/vivid/vivid-vid-out.c
index ee5c3992b27..39ff79f6aa6 100644
--- a/drivers/media/platform/vivid/vivid-vid-out.c
+++ b/drivers/media/platform/vivid/vivid-vid-out.c
@@ -625,7 +625,7 @@ int vivid_vid_out_g_selection(struct file *file, void *priv,
sel->r = dev->fmt_out_rect;
break;
case V4L2_SEL_TGT_CROP_BOUNDS:
- if (!dev->has_compose_out)
+ if (!dev->has_crop_out)
return -EINVAL;
sel->r = vivid_max_rect;
break;
diff --git a/drivers/media/usb/Kconfig b/drivers/media/usb/Kconfig
index 056181f2f56..7496f332f3f 100644
--- a/drivers/media/usb/Kconfig
+++ b/drivers/media/usb/Kconfig
@@ -24,7 +24,6 @@ if MEDIA_ANALOG_TV_SUPPORT
comment "Analog TV USB devices"
source "drivers/media/usb/pvrusb2/Kconfig"
source "drivers/media/usb/hdpvr/Kconfig"
-source "drivers/media/usb/tlg2300/Kconfig"
source "drivers/media/usb/usbvision/Kconfig"
source "drivers/media/usb/stk1160/Kconfig"
source "drivers/media/usb/go7007/Kconfig"
diff --git a/drivers/media/usb/Makefile b/drivers/media/usb/Makefile
index 6f2eb7c8416..8874ba774a3 100644
--- a/drivers/media/usb/Makefile
+++ b/drivers/media/usb/Makefile
@@ -16,7 +16,6 @@ obj-$(CONFIG_VIDEO_CPIA2) += cpia2/
obj-$(CONFIG_VIDEO_AU0828) += au0828/
obj-$(CONFIG_VIDEO_HDPVR) += hdpvr/
obj-$(CONFIG_VIDEO_PVRUSB2) += pvrusb2/
-obj-$(CONFIG_VIDEO_TLG2300) += tlg2300/
obj-$(CONFIG_VIDEO_USBVISION) += usbvision/
obj-$(CONFIG_VIDEO_STK1160) += stk1160/
obj-$(CONFIG_VIDEO_CX231XX) += cx231xx/
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index 75658717961..faac2f4e0f3 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -1017,6 +1017,12 @@ static int v4l_querycap(const struct v4l2_ioctl_ops *ops,
ret = ops->vidioc_querycap(file, fh, cap);
cap->capabilities |= V4L2_CAP_EXT_PIX_FORMAT;
+ /*
+ * Drivers MUST fill in device_caps, so check for this and
+ * warn if it was forgotten.
+ */
+ WARN_ON(!(cap->capabilities & V4L2_CAP_DEVICE_CAPS) ||
+ !cap->device_caps);
cap->device_caps |= V4L2_CAP_EXT_PIX_FORMAT;
return ret;
diff --git a/drivers/memory/fsl_ifc.c b/drivers/memory/fsl_ifc.c
index 3d5d792d5cb..410c3974987 100644
--- a/drivers/memory/fsl_ifc.c
+++ b/drivers/memory/fsl_ifc.c
@@ -61,7 +61,7 @@ int fsl_ifc_find(phys_addr_t addr_base)
if (!fsl_ifc_ctrl_dev || !fsl_ifc_ctrl_dev->regs)
return -ENODEV;
- for (i = 0; i < ARRAY_SIZE(fsl_ifc_ctrl_dev->regs->cspr_cs); i++) {
+ for (i = 0; i < fsl_ifc_ctrl_dev->banks; i++) {
u32 cspr = in_be32(&fsl_ifc_ctrl_dev->regs->cspr_cs[i].cspr);
if (cspr & CSPR_V && (cspr & CSPR_BA) ==
convert_ifc_address(addr_base))
@@ -213,7 +213,7 @@ static irqreturn_t fsl_ifc_ctrl_irq(int irqno, void *data)
static int fsl_ifc_ctrl_probe(struct platform_device *dev)
{
int ret = 0;
-
+ int version, banks;
dev_info(&dev->dev, "Freescale Integrated Flash Controller\n");
@@ -231,6 +231,15 @@ static int fsl_ifc_ctrl_probe(struct platform_device *dev)
goto err;
}
+ version = ioread32be(&fsl_ifc_ctrl_dev->regs->ifc_rev) &
+ FSL_IFC_VERSION_MASK;
+ banks = (version == FSL_IFC_VERSION_1_0_0) ? 4 : 8;
+ dev_info(&dev->dev, "IFC version %d.%d, %d banks\n",
+ version >> 24, (version >> 16) & 0xf, banks);
+
+ fsl_ifc_ctrl_dev->version = version;
+ fsl_ifc_ctrl_dev->banks = banks;
+
/* get the Controller level irq */
fsl_ifc_ctrl_dev->irq = irq_of_parse_and_map(dev->dev.of_node, 0);
if (fsl_ifc_ctrl_dev->irq == NO_IRQ) {
diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c
index cca47210913..51fd6b52437 100644
--- a/drivers/misc/cxl/context.c
+++ b/drivers/misc/cxl/context.c
@@ -34,7 +34,8 @@ struct cxl_context *cxl_context_alloc(void)
/*
* Initialises a CXL context.
*/
-int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master)
+int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master,
+ struct address_space *mapping)
{
int i;
@@ -42,6 +43,8 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master)
ctx->afu = afu;
ctx->master = master;
ctx->pid = NULL; /* Set in start work ioctl */
+ mutex_init(&ctx->mapping_lock);
+ ctx->mapping = mapping;
/*
* Allocate the segment table before we put it in the IDR so that we
@@ -82,12 +85,12 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master)
* Allocating IDR! We better make sure everything's setup that
* dereferences from it.
*/
+ mutex_lock(&afu->contexts_lock);
idr_preload(GFP_KERNEL);
- spin_lock(&afu->contexts_lock);
i = idr_alloc(&ctx->afu->contexts_idr, ctx, 0,
ctx->afu->num_procs, GFP_NOWAIT);
- spin_unlock(&afu->contexts_lock);
idr_preload_end();
+ mutex_unlock(&afu->contexts_lock);
if (i < 0)
return i;
@@ -147,6 +150,12 @@ static void __detach_context(struct cxl_context *ctx)
afu_release_irqs(ctx);
flush_work(&ctx->fault_work); /* Only needed for dedicated process */
wake_up_all(&ctx->wq);
+
+ /* Release Problem State Area mapping */
+ mutex_lock(&ctx->mapping_lock);
+ if (ctx->mapping)
+ unmap_mapping_range(ctx->mapping, 0, 0, 1);
+ mutex_unlock(&ctx->mapping_lock);
}
/*
@@ -168,21 +177,22 @@ void cxl_context_detach_all(struct cxl_afu *afu)
struct cxl_context *ctx;
int tmp;
- rcu_read_lock();
- idr_for_each_entry(&afu->contexts_idr, ctx, tmp)
+ mutex_lock(&afu->contexts_lock);
+ idr_for_each_entry(&afu->contexts_idr, ctx, tmp) {
/*
* Anything done in here needs to be setup before the IDR is
* created and torn down after the IDR removed
*/
__detach_context(ctx);
- rcu_read_unlock();
+ }
+ mutex_unlock(&afu->contexts_lock);
}
void cxl_context_free(struct cxl_context *ctx)
{
- spin_lock(&ctx->afu->contexts_lock);
+ mutex_lock(&ctx->afu->contexts_lock);
idr_remove(&ctx->afu->contexts_idr, ctx->pe);
- spin_unlock(&ctx->afu->contexts_lock);
+ mutex_unlock(&ctx->afu->contexts_lock);
synchronize_rcu();
free_page((u64)ctx->sstp);
diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h
index b5b6bda44a0..28078f8894a 100644
--- a/drivers/misc/cxl/cxl.h
+++ b/drivers/misc/cxl/cxl.h
@@ -351,7 +351,7 @@ struct cxl_afu {
struct device *chardev_s, *chardev_m, *chardev_d;
struct idr contexts_idr;
struct dentry *debugfs;
- spinlock_t contexts_lock;
+ struct mutex contexts_lock;
struct mutex spa_mutex;
spinlock_t afu_cntl_lock;
@@ -398,6 +398,10 @@ struct cxl_context {
phys_addr_t psn_phys;
u64 psn_size;
+ /* Used to unmap any mmaps when force detaching */
+ struct address_space *mapping;
+ struct mutex mapping_lock;
+
spinlock_t sste_lock; /* Protects segment table entries */
struct cxl_sste *sstp;
u64 sstp0, sstp1;
@@ -599,7 +603,8 @@ int cxl_alloc_sst(struct cxl_context *ctx);
void init_cxl_native(void);
struct cxl_context *cxl_context_alloc(void);
-int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master);
+int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master,
+ struct address_space *mapping);
void cxl_context_free(struct cxl_context *ctx);
int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma);
diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c
index 378b099e7c0..e9f2f10dbb3 100644
--- a/drivers/misc/cxl/file.c
+++ b/drivers/misc/cxl/file.c
@@ -77,7 +77,7 @@ static int __afu_open(struct inode *inode, struct file *file, bool master)
goto err_put_afu;
}
- if ((rc = cxl_context_init(ctx, afu, master)))
+ if ((rc = cxl_context_init(ctx, afu, master, inode->i_mapping)))
goto err_put_afu;
pr_devel("afu_open pe: %i\n", ctx->pe);
@@ -113,6 +113,10 @@ static int afu_release(struct inode *inode, struct file *file)
__func__, ctx->pe);
cxl_context_detach(ctx);
+ mutex_lock(&ctx->mapping_lock);
+ ctx->mapping = NULL;
+ mutex_unlock(&ctx->mapping_lock);
+
put_device(&ctx->afu->dev);
/*
diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c
index 9a5a442269a..f2b37b41a0d 100644
--- a/drivers/misc/cxl/native.c
+++ b/drivers/misc/cxl/native.c
@@ -277,6 +277,7 @@ static int do_process_element_cmd(struct cxl_context *ctx,
u64 cmd, u64 pe_state)
{
u64 state;
+ unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
WARN_ON(!ctx->afu->enabled);
@@ -286,6 +287,10 @@ static int do_process_element_cmd(struct cxl_context *ctx,
smp_mb();
cxl_p1n_write(ctx->afu, CXL_PSL_LLCMD_An, cmd | ctx->pe);
while (1) {
+ if (time_after_eq(jiffies, timeout)) {
+ dev_warn(&ctx->afu->dev, "WARNING: Process Element Command timed out!\n");
+ return -EBUSY;
+ }
state = be64_to_cpup(ctx->afu->sw_command_status);
if (state == ~0ULL) {
pr_err("cxl: Error adding process element to AFU\n");
@@ -610,13 +615,6 @@ static inline int detach_process_native_dedicated(struct cxl_context *ctx)
return 0;
}
-/*
- * TODO: handle case when this is called inside a rcu_read_lock() which may
- * happen when we unbind the driver (ie. cxl_context_detach_all()) . Terminate
- * & remove use a mutex lock and schedule which will not good with lock held.
- * May need to write do_process_element_cmd() that handles outstanding page
- * faults synchronously.
- */
static inline int detach_process_native_afu_directed(struct cxl_context *ctx)
{
if (!ctx->pe_inserted)
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
index 10c98ab7f46..0f2cc9f8b4d 100644
--- a/drivers/misc/cxl/pci.c
+++ b/drivers/misc/cxl/pci.c
@@ -502,7 +502,7 @@ static struct cxl_afu *cxl_alloc_afu(struct cxl *adapter, int slice)
afu->dev.release = cxl_release_afu;
afu->slice = slice;
idr_init(&afu->contexts_idr);
- spin_lock_init(&afu->contexts_lock);
+ mutex_init(&afu->contexts_lock);
spin_lock_init(&afu->afu_cntl_lock);
mutex_init(&afu->spa_mutex);
diff --git a/drivers/misc/cxl/sysfs.c b/drivers/misc/cxl/sysfs.c
index ce7ec06d87d..461bdbd5d48 100644
--- a/drivers/misc/cxl/sysfs.c
+++ b/drivers/misc/cxl/sysfs.c
@@ -121,7 +121,7 @@ static ssize_t reset_store_afu(struct device *device,
int rc;
/* Not safe to reset if it is currently in use */
- spin_lock(&afu->contexts_lock);
+ mutex_lock(&afu->contexts_lock);
if (!idr_is_empty(&afu->contexts_idr)) {
rc = -EBUSY;
goto err;
@@ -132,7 +132,7 @@ static ssize_t reset_store_afu(struct device *device,
rc = count;
err:
- spin_unlock(&afu->contexts_lock);
+ mutex_unlock(&afu->contexts_lock);
return rc;
}
@@ -247,7 +247,7 @@ static ssize_t mode_store(struct device *device, struct device_attribute *attr,
int rc = -EBUSY;
/* can't change this if we have a user */
- spin_lock(&afu->contexts_lock);
+ mutex_lock(&afu->contexts_lock);
if (!idr_is_empty(&afu->contexts_idr))
goto err;
@@ -271,7 +271,7 @@ static ssize_t mode_store(struct device *device, struct device_attribute *attr,
afu->current_mode = 0;
afu->num_procs = 0;
- spin_unlock(&afu->contexts_lock);
+ mutex_unlock(&afu->contexts_lock);
if ((rc = _cxl_afu_deactivate_mode(afu, old_mode)))
return rc;
@@ -280,7 +280,7 @@ static ssize_t mode_store(struct device *device, struct device_attribute *attr,
return count;
err:
- spin_unlock(&afu->contexts_lock);
+ mutex_unlock(&afu->contexts_lock);
return rc;
}
diff --git a/drivers/misc/mic/host/mic_debugfs.c b/drivers/misc/mic/host/mic_debugfs.c
index 028ba5d6fd1..687e9aacf3b 100644
--- a/drivers/misc/mic/host/mic_debugfs.c
+++ b/drivers/misc/mic/host/mic_debugfs.c
@@ -326,21 +326,27 @@ static int mic_vdev_info_show(struct seq_file *s, void *unused)
}
avail = vrh->vring.avail;
seq_printf(s, "avail flags 0x%x idx %d\n",
- avail->flags, avail->idx & (num - 1));
+ vringh16_to_cpu(vrh, avail->flags),
+ vringh16_to_cpu(vrh, avail->idx) & (num - 1));
seq_printf(s, "avail flags 0x%x idx %d\n",
- avail->flags, avail->idx);
+ vringh16_to_cpu(vrh, avail->flags),
+ vringh16_to_cpu(vrh, avail->idx));
for (j = 0; j < num; j++)
seq_printf(s, "avail ring[%d] %d\n",
j, avail->ring[j]);
used = vrh->vring.used;
seq_printf(s, "used flags 0x%x idx %d\n",
- used->flags, used->idx & (num - 1));
+ vringh16_to_cpu(vrh, used->flags),
+ vringh16_to_cpu(vrh, used->idx) & (num - 1));
seq_printf(s, "used flags 0x%x idx %d\n",
- used->flags, used->idx);
+ vringh16_to_cpu(vrh, used->flags),
+ vringh16_to_cpu(vrh, used->idx));
for (j = 0; j < num; j++)
seq_printf(s, "used ring[%d] id %d len %d\n",
- j, used->ring[j].id,
- used->ring[j].len);
+ j, vringh32_to_cpu(vrh,
+ used->ring[j].id),
+ vringh32_to_cpu(vrh,
+ used->ring[j].len));
}
}
mutex_unlock(&mdev->mic_mutex);
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index 62aba9af19f..03d7c7521d9 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -2561,7 +2561,7 @@ static int atmci_runtime_resume(struct device *dev)
static const struct dev_pm_ops atmci_dev_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
pm_runtime_force_resume)
- SET_PM_RUNTIME_PM_OPS(atmci_runtime_suspend, atmci_runtime_resume, NULL)
+ SET_RUNTIME_PM_OPS(atmci_runtime_suspend, atmci_runtime_resume, NULL)
};
static struct platform_driver atmci_driver = {
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index 94b821042d9..71fea895ce3 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -133,7 +133,7 @@ config MTD_OF_PARTS
help
This provides a partition parsing function which derives
the partition map from the children of the flash node,
- as described in Documentation/devicetree/booting-without-of.txt.
+ as described in Documentation/devicetree/bindings/mtd/partition.txt.
config MTD_AR7_PARTS
tristate "TI AR7 partitioning support"
diff --git a/drivers/mtd/bcm47xxpart.c b/drivers/mtd/bcm47xxpart.c
index 8057f52a45b..cc13ea5ce4d 100644
--- a/drivers/mtd/bcm47xxpart.c
+++ b/drivers/mtd/bcm47xxpart.c
@@ -15,8 +15,12 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
-/* 10 parts were found on sflash on Netgear WNDR4500 */
-#define BCM47XXPART_MAX_PARTS 12
+/*
+ * NAND flash on Netgear R6250 was verified to contain 15 partitions.
+ * This will result in allocating too big array for some old devices, but the
+ * memory will be freed soon anyway (see mtd_device_parse_register).
+ */
+#define BCM47XXPART_MAX_PARTS 20
/*
* Amount of bytes we read when analyzing each block of flash memory.
@@ -168,18 +172,26 @@ static int bcm47xxpart_parse(struct mtd_info *master,
i++;
}
- bcm47xxpart_add_part(&parts[curr_part++], "linux",
- offset + trx->offset[i], 0);
- i++;
+ if (trx->offset[i]) {
+ bcm47xxpart_add_part(&parts[curr_part++],
+ "linux",
+ offset + trx->offset[i],
+ 0);
+ i++;
+ }
/*
* Pure rootfs size is known and can be calculated as:
* trx->length - trx->offset[i]. We don't fill it as
* we want to have jffs2 (overlay) in the same mtd.
*/
- bcm47xxpart_add_part(&parts[curr_part++], "rootfs",
- offset + trx->offset[i], 0);
- i++;
+ if (trx->offset[i]) {
+ bcm47xxpart_add_part(&parts[curr_part++],
+ "rootfs",
+ offset + trx->offset[i],
+ 0);
+ i++;
+ }
last_trx_part = curr_part - 1;
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index 3096f3ded3a..286b97a304c 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -2654,8 +2654,7 @@ static void cfi_intelext_destroy(struct mtd_info *mtd)
kfree(cfi);
for (i = 0; i < mtd->numeraseregions; i++) {
region = &mtd->eraseregions[i];
- if (region->lockmap)
- kfree(region->lockmap);
+ kfree(region->lockmap);
}
kfree(mtd->eraseregions);
}
diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c
index 72346048532..448ce42f951 100644
--- a/drivers/mtd/devices/docg3.c
+++ b/drivers/mtd/devices/docg3.c
@@ -22,6 +22,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/errno.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/string.h>
#include <linux/slab.h>
@@ -1655,22 +1656,21 @@ static int dbg_flashctrl_show(struct seq_file *s, void *p)
{
struct docg3 *docg3 = (struct docg3 *)s->private;
- int pos = 0;
u8 fctrl;
mutex_lock(&docg3->cascade->lock);
fctrl = doc_register_readb(docg3, DOC_FLASHCONTROL);
mutex_unlock(&docg3->cascade->lock);
- pos += seq_printf(s,
- "FlashControl : 0x%02x (%s,CE# %s,%s,%s,flash %s)\n",
- fctrl,
- fctrl & DOC_CTRL_VIOLATION ? "protocol violation" : "-",
- fctrl & DOC_CTRL_CE ? "active" : "inactive",
- fctrl & DOC_CTRL_PROTECTION_ERROR ? "protection error" : "-",
- fctrl & DOC_CTRL_SEQUENCE_ERROR ? "sequence error" : "-",
- fctrl & DOC_CTRL_FLASHREADY ? "ready" : "not ready");
- return pos;
+ seq_printf(s, "FlashControl : 0x%02x (%s,CE# %s,%s,%s,flash %s)\n",
+ fctrl,
+ fctrl & DOC_CTRL_VIOLATION ? "protocol violation" : "-",
+ fctrl & DOC_CTRL_CE ? "active" : "inactive",
+ fctrl & DOC_CTRL_PROTECTION_ERROR ? "protection error" : "-",
+ fctrl & DOC_CTRL_SEQUENCE_ERROR ? "sequence error" : "-",
+ fctrl & DOC_CTRL_FLASHREADY ? "ready" : "not ready");
+
+ return 0;
}
DEBUGFS_RO_ATTR(flashcontrol, dbg_flashctrl_show);
@@ -1678,58 +1678,56 @@ static int dbg_asicmode_show(struct seq_file *s, void *p)
{
struct docg3 *docg3 = (struct docg3 *)s->private;
- int pos = 0, pctrl, mode;
+ int pctrl, mode;
mutex_lock(&docg3->cascade->lock);
pctrl = doc_register_readb(docg3, DOC_ASICMODE);
mode = pctrl & 0x03;
mutex_unlock(&docg3->cascade->lock);
- pos += seq_printf(s,
- "%04x : RAM_WE=%d,RSTIN_RESET=%d,BDETCT_RESET=%d,WRITE_ENABLE=%d,POWERDOWN=%d,MODE=%d%d (",
- pctrl,
- pctrl & DOC_ASICMODE_RAM_WE ? 1 : 0,
- pctrl & DOC_ASICMODE_RSTIN_RESET ? 1 : 0,
- pctrl & DOC_ASICMODE_BDETCT_RESET ? 1 : 0,
- pctrl & DOC_ASICMODE_MDWREN ? 1 : 0,
- pctrl & DOC_ASICMODE_POWERDOWN ? 1 : 0,
- mode >> 1, mode & 0x1);
+ seq_printf(s,
+ "%04x : RAM_WE=%d,RSTIN_RESET=%d,BDETCT_RESET=%d,WRITE_ENABLE=%d,POWERDOWN=%d,MODE=%d%d (",
+ pctrl,
+ pctrl & DOC_ASICMODE_RAM_WE ? 1 : 0,
+ pctrl & DOC_ASICMODE_RSTIN_RESET ? 1 : 0,
+ pctrl & DOC_ASICMODE_BDETCT_RESET ? 1 : 0,
+ pctrl & DOC_ASICMODE_MDWREN ? 1 : 0,
+ pctrl & DOC_ASICMODE_POWERDOWN ? 1 : 0,
+ mode >> 1, mode & 0x1);
switch (mode) {
case DOC_ASICMODE_RESET:
- pos += seq_puts(s, "reset");
+ seq_puts(s, "reset");
break;
case DOC_ASICMODE_NORMAL:
- pos += seq_puts(s, "normal");
+ seq_puts(s, "normal");
break;
case DOC_ASICMODE_POWERDOWN:
- pos += seq_puts(s, "powerdown");
+ seq_puts(s, "powerdown");
break;
}
- pos += seq_puts(s, ")\n");
- return pos;
+ seq_puts(s, ")\n");
+ return 0;
}
DEBUGFS_RO_ATTR(asic_mode, dbg_asicmode_show);
static int dbg_device_id_show(struct seq_file *s, void *p)
{
struct docg3 *docg3 = (struct docg3 *)s->private;
- int pos = 0;
int id;
mutex_lock(&docg3->cascade->lock);
id = doc_register_readb(docg3, DOC_DEVICESELECT);
mutex_unlock(&docg3->cascade->lock);
- pos += seq_printf(s, "DeviceId = %d\n", id);
- return pos;
+ seq_printf(s, "DeviceId = %d\n", id);
+ return 0;
}
DEBUGFS_RO_ATTR(device_id, dbg_device_id_show);
static int dbg_protection_show(struct seq_file *s, void *p)
{
struct docg3 *docg3 = (struct docg3 *)s->private;
- int pos = 0;
int protect, dps0, dps0_low, dps0_high, dps1, dps1_low, dps1_high;
mutex_lock(&docg3->cascade->lock);
@@ -1742,45 +1740,40 @@ static int dbg_protection_show(struct seq_file *s, void *p)
dps1_high = doc_register_readw(docg3, DOC_DPS1_ADDRHIGH);
mutex_unlock(&docg3->cascade->lock);
- pos += seq_printf(s, "Protection = 0x%02x (",
- protect);
+ seq_printf(s, "Protection = 0x%02x (", protect);
if (protect & DOC_PROTECT_FOUNDRY_OTP_LOCK)
- pos += seq_puts(s, "FOUNDRY_OTP_LOCK,");
+ seq_puts(s, "FOUNDRY_OTP_LOCK,");
if (protect & DOC_PROTECT_CUSTOMER_OTP_LOCK)
- pos += seq_puts(s, "CUSTOMER_OTP_LOCK,");
+ seq_puts(s, "CUSTOMER_OTP_LOCK,");
if (protect & DOC_PROTECT_LOCK_INPUT)
- pos += seq_puts(s, "LOCK_INPUT,");
+ seq_puts(s, "LOCK_INPUT,");
if (protect & DOC_PROTECT_STICKY_LOCK)
- pos += seq_puts(s, "STICKY_LOCK,");
+ seq_puts(s, "STICKY_LOCK,");
if (protect & DOC_PROTECT_PROTECTION_ENABLED)
- pos += seq_puts(s, "PROTECTION ON,");
+ seq_puts(s, "PROTECTION ON,");
if (protect & DOC_PROTECT_IPL_DOWNLOAD_LOCK)
- pos += seq_puts(s, "IPL_DOWNLOAD_LOCK,");
+ seq_puts(s, "IPL_DOWNLOAD_LOCK,");
if (protect & DOC_PROTECT_PROTECTION_ERROR)
- pos += seq_puts(s, "PROTECT_ERR,");
+ seq_puts(s, "PROTECT_ERR,");
else
- pos += seq_puts(s, "NO_PROTECT_ERR");
- pos += seq_puts(s, ")\n");
-
- pos += seq_printf(s, "DPS0 = 0x%02x : "
- "Protected area [0x%x - 0x%x] : OTP=%d, READ=%d, "
- "WRITE=%d, HW_LOCK=%d, KEY_OK=%d\n",
- dps0, dps0_low, dps0_high,
- !!(dps0 & DOC_DPS_OTP_PROTECTED),
- !!(dps0 & DOC_DPS_READ_PROTECTED),
- !!(dps0 & DOC_DPS_WRITE_PROTECTED),
- !!(dps0 & DOC_DPS_HW_LOCK_ENABLED),
- !!(dps0 & DOC_DPS_KEY_OK));
- pos += seq_printf(s, "DPS1 = 0x%02x : "
- "Protected area [0x%x - 0x%x] : OTP=%d, READ=%d, "
- "WRITE=%d, HW_LOCK=%d, KEY_OK=%d\n",
- dps1, dps1_low, dps1_high,
- !!(dps1 & DOC_DPS_OTP_PROTECTED),
- !!(dps1 & DOC_DPS_READ_PROTECTED),
- !!(dps1 & DOC_DPS_WRITE_PROTECTED),
- !!(dps1 & DOC_DPS_HW_LOCK_ENABLED),
- !!(dps1 & DOC_DPS_KEY_OK));
- return pos;
+ seq_puts(s, "NO_PROTECT_ERR");
+ seq_puts(s, ")\n");
+
+ seq_printf(s, "DPS0 = 0x%02x : Protected area [0x%x - 0x%x] : OTP=%d, READ=%d, WRITE=%d, HW_LOCK=%d, KEY_OK=%d\n",
+ dps0, dps0_low, dps0_high,
+ !!(dps0 & DOC_DPS_OTP_PROTECTED),
+ !!(dps0 & DOC_DPS_READ_PROTECTED),
+ !!(dps0 & DOC_DPS_WRITE_PROTECTED),
+ !!(dps0 & DOC_DPS_HW_LOCK_ENABLED),
+ !!(dps0 & DOC_DPS_KEY_OK));
+ seq_printf(s, "DPS1 = 0x%02x : Protected area [0x%x - 0x%x] : OTP=%d, READ=%d, WRITE=%d, HW_LOCK=%d, KEY_OK=%d\n",
+ dps1, dps1_low, dps1_high,
+ !!(dps1 & DOC_DPS_OTP_PROTECTED),
+ !!(dps1 & DOC_DPS_READ_PROTECTED),
+ !!(dps1 & DOC_DPS_WRITE_PROTECTED),
+ !!(dps1 & DOC_DPS_HW_LOCK_ENABLED),
+ !!(dps1 & DOC_DPS_KEY_OK));
+ return 0;
}
DEBUGFS_RO_ATTR(protection, dbg_protection_show);
@@ -2126,9 +2119,18 @@ static int __exit docg3_release(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_OF
+static struct of_device_id docg3_dt_ids[] = {
+ { .compatible = "m-systems,diskonchip-g3" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, docg3_dt_ids);
+#endif
+
static struct platform_driver g3_driver = {
.driver = {
.name = "docg3",
+ .of_match_table = of_match_ptr(docg3_dt_ids),
},
.suspend = docg3_suspend,
.resume = docg3_resume,
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index ed827cf894e..85e35467fba 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -128,13 +128,10 @@ static int m25p80_read(struct spi_nor *nor, loff_t from, size_t len,
struct spi_device *spi = flash->spi;
struct spi_transfer t[2];
struct spi_message m;
- int dummy = nor->read_dummy;
- int ret;
+ unsigned int dummy = nor->read_dummy;
- /* Wait till previous write/erase is done. */
- ret = nor->wait_till_ready(nor);
- if (ret)
- return ret;
+ /* convert the dummy cycles to the number of bytes */
+ dummy /= 8;
spi_message_init(&m);
memset(t, 0, (sizeof t));
@@ -160,21 +157,10 @@ static int m25p80_read(struct spi_nor *nor, loff_t from, size_t len,
static int m25p80_erase(struct spi_nor *nor, loff_t offset)
{
struct m25p *flash = nor->priv;
- int ret;
dev_dbg(nor->dev, "%dKiB at 0x%08x\n",
flash->mtd.erasesize / 1024, (u32)offset);
- /* Wait until finished previous write command. */
- ret = nor->wait_till_ready(nor);
- if (ret)
- return ret;
-
- /* Send write enable, then erase commands. */
- ret = nor->write_reg(nor, SPINOR_OP_WREN, NULL, 0, 0);
- if (ret)
- return ret;
-
/* Set up command buffer. */
flash->command[0] = nor->erase_opcode;
m25p_addr2cmd(nor, offset, flash->command);
@@ -260,7 +246,6 @@ static int m25p_remove(struct spi_device *spi)
return mtd_device_unregister(&flash->mtd);
}
-
/*
* XXX This needs to be kept in sync with spi_nor_ids. We can't share
* it with spi-nor, because if this is built as a module then modpost
@@ -287,7 +272,7 @@ static const struct spi_device_id m25p_ids[] = {
{"s25fl512s"}, {"s70fl01gs"}, {"s25sl12800"}, {"s25sl12801"},
{"s25fl129p0"}, {"s25fl129p1"}, {"s25sl004a"}, {"s25sl008a"},
{"s25sl016a"}, {"s25sl032a"}, {"s25sl064a"}, {"s25fl008k"},
- {"s25fl016k"}, {"s25fl064k"},
+ {"s25fl016k"}, {"s25fl064k"}, {"s25fl132k"},
{"sst25vf040b"},{"sst25vf080b"},{"sst25vf016b"},{"sst25vf032b"},
{"sst25vf064c"},{"sst25wf512"}, {"sst25wf010"}, {"sst25wf020"},
{"sst25wf040"},
@@ -300,17 +285,16 @@ static const struct spi_device_id m25p_ids[] = {
{"m45pe10"}, {"m45pe80"}, {"m45pe16"},
{"m25pe20"}, {"m25pe80"}, {"m25pe16"},
{"m25px16"}, {"m25px32"}, {"m25px32-s0"}, {"m25px32-s1"},
- {"m25px64"},
+ {"m25px64"}, {"m25px80"},
{"w25x10"}, {"w25x20"}, {"w25x40"}, {"w25x80"},
{"w25x16"}, {"w25x32"}, {"w25q32"}, {"w25q32dw"},
- {"w25x64"}, {"w25q64"}, {"w25q128"}, {"w25q80"},
- {"w25q80bl"}, {"w25q128"}, {"w25q256"}, {"cat25c11"},
+ {"w25x64"}, {"w25q64"}, {"w25q80"}, {"w25q80bl"},
+ {"w25q128"}, {"w25q256"}, {"cat25c11"},
{"cat25c03"}, {"cat25c09"}, {"cat25c17"}, {"cat25128"},
{ },
};
MODULE_DEVICE_TABLE(spi, m25p_ids);
-
static struct spi_driver m25p80_driver = {
.driver = {
.name = "m25p80",
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index dd22ce2cc9a..0099aba72a8 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -149,7 +149,7 @@ static int dataflash_erase(struct mtd_info *mtd, struct erase_info *instr)
{
struct dataflash *priv = mtd->priv;
struct spi_device *spi = priv->spi;
- struct spi_transfer x = { .tx_dma = 0, };
+ struct spi_transfer x = { };
struct spi_message msg;
unsigned blocksize = priv->page_size << 3;
uint8_t *command;
@@ -235,7 +235,7 @@ static int dataflash_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
{
struct dataflash *priv = mtd->priv;
- struct spi_transfer x[2] = { { .tx_dma = 0, }, };
+ struct spi_transfer x[2] = { };
struct spi_message msg;
unsigned int addr;
uint8_t *command;
@@ -301,7 +301,7 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len,
{
struct dataflash *priv = mtd->priv;
struct spi_device *spi = priv->spi;
- struct spi_transfer x[2] = { { .tx_dma = 0, }, };
+ struct spi_transfer x[2] = { };
struct spi_message msg;
unsigned int pageaddr, addr, offset, writelen;
size_t remaining = len;
diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c
index effd9a4ef7e..8b66e52ca3c 100644
--- a/drivers/mtd/devices/phram.c
+++ b/drivers/mtd/devices/phram.c
@@ -17,7 +17,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <asm/io.h>
+#include <linux/io.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/list.h>
diff --git a/drivers/mtd/devices/pmc551.c b/drivers/mtd/devices/pmc551.c
index f02603e1bfe..708b7e8c8b1 100644
--- a/drivers/mtd/devices/pmc551.c
+++ b/drivers/mtd/devices/pmc551.c
@@ -812,8 +812,7 @@ static int __init init_pmc551(void)
}
/* Exited early, reference left over */
- if (PCI_Device)
- pci_dev_put(PCI_Device);
+ pci_dev_put(PCI_Device);
if (!pmc551list) {
printk(KERN_NOTICE "pmc551: not detected\n");
diff --git a/drivers/mtd/inftlmount.c b/drivers/mtd/inftlmount.c
index 487e64f411a..1388c8d7f30 100644
--- a/drivers/mtd/inftlmount.c
+++ b/drivers/mtd/inftlmount.c
@@ -518,7 +518,7 @@ void INFTL_dumpVUchains(struct INFTLrecord *s)
pr_debug("INFTL Virtual Unit Chains:\n");
for (logical = 0; logical < s->nb_blocks; logical++) {
block = s->VUtable[logical];
- if (block > s->nb_blocks)
+ if (block >= s->nb_blocks)
continue;
pr_debug(" LOGICAL %d --> %d ", logical, block);
for (i = 0; i < s->nb_blocks; i++) {
diff --git a/drivers/mtd/maps/bfin-async-flash.c b/drivers/mtd/maps/bfin-async-flash.c
index 6ea51e54904..41730feeace 100644
--- a/drivers/mtd/maps/bfin-async-flash.c
+++ b/drivers/mtd/maps/bfin-async-flash.c
@@ -126,7 +126,6 @@ static const char * const part_probe_types[] = {
static int bfin_flash_probe(struct platform_device *pdev)
{
- int ret;
struct physmap_flash_data *pdata = dev_get_platdata(&pdev->dev);
struct resource *memory = platform_get_resource(pdev, IORESOURCE_MEM, 0);
struct resource *flash_ambctl = platform_get_resource(pdev, IORESOURCE_MEM, 1);
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index 991d0cb871f..f35cd208131 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -47,14 +47,12 @@ static int of_flash_remove(struct platform_device *dev)
return 0;
dev_set_drvdata(&dev->dev, NULL);
- if (info->cmtd != info->list[0].mtd) {
+ if (info->cmtd) {
mtd_device_unregister(info->cmtd);
- mtd_concat_destroy(info->cmtd);
+ if (info->cmtd != info->list[0].mtd)
+ mtd_concat_destroy(info->cmtd);
}
- if (info->cmtd)
- mtd_device_unregister(info->cmtd);
-
for (i = 0; i < info->list_size; i++) {
if (info->list[i].mtd)
map_destroy(info->list[i].mtd);
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index dd10646982a..7d0150d2043 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -75,10 +75,12 @@ config MTD_NAND_DENALI_SCRATCH_REG_ADDR
boards, the scratch register is at 0xFF108018.
config MTD_NAND_GPIO
- tristate "GPIO NAND Flash driver"
+ tristate "GPIO assisted NAND Flash driver"
depends on GPIOLIB
help
- This enables a GPIO based NAND flash driver.
+ This enables a NAND flash driver where control signals are
+ connected to GPIO pins, and commands and data are communicated
+ via a memory mapped interface.
config MTD_NAND_AMS_DELTA
tristate "NAND Flash device on Amstrad E3"
@@ -516,4 +518,10 @@ config MTD_NAND_XWAY
Enables support for NAND Flash chips on Lantiq XWAY SoCs. NAND is attached
to the External Bus Unit (EBU).
+config MTD_NAND_SUNXI
+ tristate "Support for NAND on Allwinner SoCs"
+ depends on ARCH_SUNXI
+ help
+ Enables support for NAND Flash chips on Allwinner SoCs.
+
endif # MTD_NAND
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index 9c847e469ca..bd38f21d2e2 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -50,5 +50,6 @@ obj-$(CONFIG_MTD_NAND_JZ4740) += jz4740_nand.o
obj-$(CONFIG_MTD_NAND_GPMI_NAND) += gpmi-nand/
obj-$(CONFIG_MTD_NAND_XWAY) += xway_nand.o
obj-$(CONFIG_MTD_NAND_BCM47XXNFLASH) += bcm47xxnflash/
+obj-$(CONFIG_MTD_NAND_SUNXI) += sunxi_nand.o
nand-objs := nand_base.o nand_bbt.o nand_timings.o
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index 84c38f3c65b..a345e7b2463 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -92,7 +92,7 @@ static struct nand_ecclayout atmel_oobinfo_small = {
struct atmel_nfc {
void __iomem *base_cmd_regs;
void __iomem *hsmc_regs;
- void __iomem *sram_bank0;
+ void *sram_bank0;
dma_addr_t sram_bank0_phys;
bool use_nfc_sram;
bool write_by_sram;
@@ -105,7 +105,7 @@ struct atmel_nfc {
struct completion comp_xfer_done;
/* Point to the sram bank which include readed data via NFC */
- void __iomem *data_in_sram;
+ void *data_in_sram;
bool will_write_sram;
};
static struct atmel_nfc nand_nfc;
@@ -127,6 +127,7 @@ struct atmel_nand_host {
bool has_pmecc;
u8 pmecc_corr_cap;
u16 pmecc_sector_size;
+ bool has_no_lookup_table;
u32 pmecc_lookup_table_offset;
u32 pmecc_lookup_table_offset_512;
u32 pmecc_lookup_table_offset_1024;
@@ -256,26 +257,6 @@ static int atmel_nand_set_enable_ready_pins(struct mtd_info *mtd)
return res;
}
-static void memcpy32_fromio(void *trg, const void __iomem *src, size_t size)
-{
- int i;
- u32 *t = trg;
- const __iomem u32 *s = src;
-
- for (i = 0; i < (size >> 2); i++)
- *t++ = readl_relaxed(s++);
-}
-
-static void memcpy32_toio(void __iomem *trg, const void *src, int size)
-{
- int i;
- u32 __iomem *t = trg;
- const u32 *s = src;
-
- for (i = 0; i < (size >> 2); i++)
- writel_relaxed(*s++, t++);
-}
-
/*
* Minimal-overhead PIO for data access.
*/
@@ -285,7 +266,7 @@ static void atmel_read_buf8(struct mtd_info *mtd, u8 *buf, int len)
struct atmel_nand_host *host = nand_chip->priv;
if (host->nfc && host->nfc->use_nfc_sram && host->nfc->data_in_sram) {
- memcpy32_fromio(buf, host->nfc->data_in_sram, len);
+ memcpy(buf, host->nfc->data_in_sram, len);
host->nfc->data_in_sram += len;
} else {
__raw_readsb(nand_chip->IO_ADDR_R, buf, len);
@@ -298,7 +279,7 @@ static void atmel_read_buf16(struct mtd_info *mtd, u8 *buf, int len)
struct atmel_nand_host *host = nand_chip->priv;
if (host->nfc && host->nfc->use_nfc_sram && host->nfc->data_in_sram) {
- memcpy32_fromio(buf, host->nfc->data_in_sram, len);
+ memcpy(buf, host->nfc->data_in_sram, len);
host->nfc->data_in_sram += len;
} else {
__raw_readsw(nand_chip->IO_ADDR_R, buf, len / 2);
@@ -1112,12 +1093,66 @@ static int pmecc_choose_ecc(struct atmel_nand_host *host,
return 0;
}
+static inline int deg(unsigned int poly)
+{
+ /* polynomial degree is the most-significant bit index */
+ return fls(poly) - 1;
+}
+
+static int build_gf_tables(int mm, unsigned int poly,
+ int16_t *index_of, int16_t *alpha_to)
+{
+ unsigned int i, x = 1;
+ const unsigned int k = 1 << deg(poly);
+ unsigned int nn = (1 << mm) - 1;
+
+ /* primitive polynomial must be of degree m */
+ if (k != (1u << mm))
+ return -EINVAL;
+
+ for (i = 0; i < nn; i++) {
+ alpha_to[i] = x;
+ index_of[x] = i;
+ if (i && (x == 1))
+ /* polynomial is not primitive (a^i=1 with 0<i<2^m-1) */
+ return -EINVAL;
+ x <<= 1;
+ if (x & k)
+ x ^= poly;
+ }
+ alpha_to[nn] = 1;
+ index_of[0] = 0;
+
+ return 0;
+}
+
+static uint16_t *create_lookup_table(struct device *dev, int sector_size)
+{
+ int degree = (sector_size == 512) ?
+ PMECC_GF_DIMENSION_13 :
+ PMECC_GF_DIMENSION_14;
+ unsigned int poly = (sector_size == 512) ?
+ PMECC_GF_13_PRIMITIVE_POLY :
+ PMECC_GF_14_PRIMITIVE_POLY;
+ int table_size = (sector_size == 512) ?
+ PMECC_LOOKUP_TABLE_SIZE_512 :
+ PMECC_LOOKUP_TABLE_SIZE_1024;
+
+ int16_t *addr = devm_kzalloc(dev, 2 * table_size * sizeof(uint16_t),
+ GFP_KERNEL);
+ if (addr && build_gf_tables(degree, poly, addr, addr + table_size))
+ return NULL;
+
+ return addr;
+}
+
static int atmel_pmecc_nand_init_params(struct platform_device *pdev,
struct atmel_nand_host *host)
{
struct mtd_info *mtd = &host->mtd;
struct nand_chip *nand_chip = &host->nand_chip;
struct resource *regs, *regs_pmerr, *regs_rom;
+ uint16_t *galois_table;
int cap, sector_size, err_no;
err_no = pmecc_choose_ecc(host, &cap, &sector_size);
@@ -1163,8 +1198,24 @@ static int atmel_pmecc_nand_init_params(struct platform_device *pdev,
regs_rom = platform_get_resource(pdev, IORESOURCE_MEM, 3);
host->pmecc_rom_base = devm_ioremap_resource(&pdev->dev, regs_rom);
if (IS_ERR(host->pmecc_rom_base)) {
- err_no = PTR_ERR(host->pmecc_rom_base);
- goto err;
+ if (!host->has_no_lookup_table)
+ /* Don't display the information again */
+ dev_err(host->dev, "Can not get I/O resource for ROM, will build a lookup table in runtime!\n");
+
+ host->has_no_lookup_table = true;
+ }
+
+ if (host->has_no_lookup_table) {
+ /* Build the look-up table in runtime */
+ galois_table = create_lookup_table(host->dev, sector_size);
+ if (!galois_table) {
+ dev_err(host->dev, "Failed to build a lookup table in runtime!\n");
+ err_no = -EINVAL;
+ goto err;
+ }
+
+ host->pmecc_rom_base = (void __iomem *)galois_table;
+ host->pmecc_lookup_table_offset = 0;
}
nand_chip->ecc.size = sector_size;
@@ -1501,8 +1552,10 @@ static int atmel_of_init_port(struct atmel_nand_host *host,
if (of_property_read_u32_array(np, "atmel,pmecc-lookup-table-offset",
offset, 2) != 0) {
- dev_err(host->dev, "Cannot get PMECC lookup table offset\n");
- return -EINVAL;
+ dev_err(host->dev, "Cannot get PMECC lookup table offset, will build a lookup table in runtime.\n");
+ host->has_no_lookup_table = true;
+ /* Will build a lookup table and initialize the offset later */
+ return 0;
}
if (!offset[0] && !offset[1]) {
dev_err(host->dev, "Invalid PMECC lookup table offset\n");
@@ -1899,7 +1952,7 @@ static int nfc_sram_write_page(struct mtd_info *mtd, struct nand_chip *chip,
int cfg, len;
int status = 0;
struct atmel_nand_host *host = chip->priv;
- void __iomem *sram = host->nfc->sram_bank0 + nfc_get_sram_off(host);
+ void *sram = host->nfc->sram_bank0 + nfc_get_sram_off(host);
/* Subpage write is not supported */
if (offset || (data_len < mtd->writesize))
@@ -1910,14 +1963,14 @@ static int nfc_sram_write_page(struct mtd_info *mtd, struct nand_chip *chip,
if (use_dma) {
if (atmel_nand_dma_op(mtd, (void *)buf, len, 0) != 0)
/* Fall back to use cpu copy */
- memcpy32_toio(sram, buf, len);
+ memcpy(sram, buf, len);
} else {
- memcpy32_toio(sram, buf, len);
+ memcpy(sram, buf, len);
}
cfg = nfc_readl(host->nfc->hsmc_regs, CFG);
if (unlikely(raw) && oob_required) {
- memcpy32_toio(sram + len, chip->oob_poi, mtd->oobsize);
+ memcpy(sram + len, chip->oob_poi, mtd->oobsize);
len += mtd->oobsize;
nfc_writel(host->nfc->hsmc_regs, CFG, cfg | NFC_CFG_WSPARE);
} else {
@@ -2260,7 +2313,8 @@ static int atmel_nand_nfc_probe(struct platform_device *pdev)
nfc_sram = platform_get_resource(pdev, IORESOURCE_MEM, 2);
if (nfc_sram) {
- nfc->sram_bank0 = devm_ioremap_resource(&pdev->dev, nfc_sram);
+ nfc->sram_bank0 = (void * __force)
+ devm_ioremap_resource(&pdev->dev, nfc_sram);
if (IS_ERR(nfc->sram_bank0)) {
dev_warn(&pdev->dev, "Fail to ioremap the NFC sram with error: %ld. So disable NFC sram.\n",
PTR_ERR(nfc->sram_bank0));
diff --git a/drivers/mtd/nand/atmel_nand_ecc.h b/drivers/mtd/nand/atmel_nand_ecc.h
index 8a1e9a68675..d4035e335ad 100644
--- a/drivers/mtd/nand/atmel_nand_ecc.h
+++ b/drivers/mtd/nand/atmel_nand_ecc.h
@@ -142,6 +142,10 @@
#define PMECC_GF_DIMENSION_13 13
#define PMECC_GF_DIMENSION_14 14
+/* Primitive Polynomial used by PMECC */
+#define PMECC_GF_13_PRIMITIVE_POLY 0x201b
+#define PMECC_GF_14_PRIMITIVE_POLY 0x4443
+
#define PMECC_LOOKUP_TABLE_SIZE_512 0x2000
#define PMECC_LOOKUP_TABLE_SIZE_1024 0x4000
diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c
index 4e66726da9a..9a0f45f1d93 100644
--- a/drivers/mtd/nand/cafe_nand.c
+++ b/drivers/mtd/nand/cafe_nand.c
@@ -529,50 +529,6 @@ static int cafe_nand_write_page_lowlevel(struct mtd_info *mtd,
return 0;
}
-static int cafe_nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
- uint32_t offset, int data_len, const uint8_t *buf,
- int oob_required, int page, int cached, int raw)
-{
- int status;
-
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
-
- if (unlikely(raw))
- status = chip->ecc.write_page_raw(mtd, chip, buf, oob_required);
- else
- status = chip->ecc.write_page(mtd, chip, buf, oob_required);
-
- if (status < 0)
- return status;
-
- /*
- * Cached progamming disabled for now, Not sure if its worth the
- * trouble. The speed gain is not very impressive. (2.3->2.6Mib/s)
- */
- cached = 0;
-
- if (!cached || !(chip->options & NAND_CACHEPRG)) {
-
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
- status = chip->waitfunc(mtd, chip);
- /*
- * See if operation failed and additional status checks are
- * available
- */
- if ((status & NAND_STATUS_FAIL) && (chip->errstat))
- status = chip->errstat(mtd, chip, FL_WRITING, status,
- page);
-
- if (status & NAND_STATUS_FAIL)
- return -EIO;
- } else {
- chip->cmdfunc(mtd, NAND_CMD_CACHEDPROG, -1, -1);
- status = chip->waitfunc(mtd, chip);
- }
-
- return 0;
-}
-
static int cafe_nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
{
return 0;
@@ -800,7 +756,6 @@ static int cafe_nand_probe(struct pci_dev *pdev,
cafe->nand.ecc.hwctl = (void *)cafe_nand_bug;
cafe->nand.ecc.calculate = (void *)cafe_nand_bug;
cafe->nand.ecc.correct = (void *)cafe_nand_bug;
- cafe->nand.write_page = cafe_nand_write_page;
cafe->nand.ecc.write_page = cafe_nand_write_page_lowlevel;
cafe->nand.ecc.write_oob = cafe_nand_write_oob;
cafe->nand.ecc.read_page = cafe_nand_read_page;
diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c
index b9ef7a6bba4..4c05f4f6a5c 100644
--- a/drivers/mtd/nand/fsl_ifc_nand.c
+++ b/drivers/mtd/nand/fsl_ifc_nand.c
@@ -31,7 +31,6 @@
#include <linux/mtd/nand_ecc.h>
#include <linux/fsl_ifc.h>
-#define FSL_IFC_V1_1_0 0x01010000
#define ERR_BYTE 0xFF /* Value returned for read
bytes when read failed */
#define IFC_TIMEOUT_MSECS 500 /* Maximum number of mSecs to wait
@@ -877,7 +876,7 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
struct nand_chip *chip = &priv->chip;
struct nand_ecclayout *layout;
- u32 csor, ver;
+ u32 csor;
/* Fill in fsl_ifc_mtd structure */
priv->mtd.priv = chip;
@@ -984,8 +983,7 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
chip->ecc.mode = NAND_ECC_SOFT;
}
- ver = ioread32be(&ifc->ifc_rev);
- if (ver == FSL_IFC_V1_1_0)
+ if (ctrl->version == FSL_IFC_VERSION_1_1_0)
fsl_ifc_sram_init(priv);
return 0;
@@ -1045,12 +1043,12 @@ static int fsl_ifc_nand_probe(struct platform_device *dev)
}
/* find which chip select it is connected to */
- for (bank = 0; bank < FSL_IFC_BANK_COUNT; bank++) {
+ for (bank = 0; bank < fsl_ifc_ctrl_dev->banks; bank++) {
if (match_bank(ifc, bank, res.start))
break;
}
- if (bank >= FSL_IFC_BANK_COUNT) {
+ if (bank >= fsl_ifc_ctrl_dev->banks) {
dev_err(&dev->dev, "%s: address did not match any chip selects\n",
__func__);
return -ENODEV;
diff --git a/drivers/mtd/nand/gpio.c b/drivers/mtd/nand/gpio.c
index 918283999a4..73c4048c3a5 100644
--- a/drivers/mtd/nand/gpio.c
+++ b/drivers/mtd/nand/gpio.c
@@ -8,7 +8,9 @@
*
* © 2004 Simtec Electronics
*
- * Device driver for NAND connected via GPIO
+ * Device driver for NAND flash that uses a memory mapped interface to
+ * read/write the NAND commands and data, and GPIO pins for control signals
+ * (the DT binding refers to this as "GPIO assisted NAND flash")
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
index 87e658ce23e..27f272ed502 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
@@ -1353,3 +1353,156 @@ int gpmi_read_page(struct gpmi_nand_data *this,
set_dma_type(this, DMA_FOR_READ_ECC_PAGE);
return start_dma_with_bch_irq(this, desc);
}
+
+/**
+ * gpmi_copy_bits - copy bits from one memory region to another
+ * @dst: destination buffer
+ * @dst_bit_off: bit offset we're starting to write at
+ * @src: source buffer
+ * @src_bit_off: bit offset we're starting to read from
+ * @nbits: number of bits to copy
+ *
+ * This functions copies bits from one memory region to another, and is used by
+ * the GPMI driver to copy ECC sections which are not guaranteed to be byte
+ * aligned.
+ *
+ * src and dst should not overlap.
+ *
+ */
+void gpmi_copy_bits(u8 *dst, size_t dst_bit_off,
+ const u8 *src, size_t src_bit_off,
+ size_t nbits)
+{
+ size_t i;
+ size_t nbytes;
+ u32 src_buffer = 0;
+ size_t bits_in_src_buffer = 0;
+
+ if (!nbits)
+ return;
+
+ /*
+ * Move src and dst pointers to the closest byte pointer and store bit
+ * offsets within a byte.
+ */
+ src += src_bit_off / 8;
+ src_bit_off %= 8;
+
+ dst += dst_bit_off / 8;
+ dst_bit_off %= 8;
+
+ /*
+ * Initialize the src_buffer value with bits available in the first
+ * byte of data so that we end up with a byte aligned src pointer.
+ */
+ if (src_bit_off) {
+ src_buffer = src[0] >> src_bit_off;
+ if (nbits >= (8 - src_bit_off)) {
+ bits_in_src_buffer += 8 - src_bit_off;
+ } else {
+ src_buffer &= GENMASK(nbits - 1, 0);
+ bits_in_src_buffer += nbits;
+ }
+ nbits -= bits_in_src_buffer;
+ src++;
+ }
+
+ /* Calculate the number of bytes that can be copied from src to dst. */
+ nbytes = nbits / 8;
+
+ /* Try to align dst to a byte boundary. */
+ if (dst_bit_off) {
+ if (bits_in_src_buffer < (8 - dst_bit_off) && nbytes) {
+ src_buffer |= src[0] << bits_in_src_buffer;
+ bits_in_src_buffer += 8;
+ src++;
+ nbytes--;
+ }
+
+ if (bits_in_src_buffer >= (8 - dst_bit_off)) {
+ dst[0] &= GENMASK(dst_bit_off - 1, 0);
+ dst[0] |= src_buffer << dst_bit_off;
+ src_buffer >>= (8 - dst_bit_off);
+ bits_in_src_buffer -= (8 - dst_bit_off);
+ dst_bit_off = 0;
+ dst++;
+ if (bits_in_src_buffer > 7) {
+ bits_in_src_buffer -= 8;
+ dst[0] = src_buffer;
+ dst++;
+ src_buffer >>= 8;
+ }
+ }
+ }
+
+ if (!bits_in_src_buffer && !dst_bit_off) {
+ /*
+ * Both src and dst pointers are byte aligned, thus we can
+ * just use the optimized memcpy function.
+ */
+ if (nbytes)
+ memcpy(dst, src, nbytes);
+ } else {
+ /*
+ * src buffer is not byte aligned, hence we have to copy each
+ * src byte to the src_buffer variable before extracting a byte
+ * to store in dst.
+ */
+ for (i = 0; i < nbytes; i++) {
+ src_buffer |= src[i] << bits_in_src_buffer;
+ dst[i] = src_buffer;
+ src_buffer >>= 8;
+ }
+ }
+ /* Update dst and src pointers */
+ dst += nbytes;
+ src += nbytes;
+
+ /*
+ * nbits is the number of remaining bits. It should not exceed 8 as
+ * we've already copied as much bytes as possible.
+ */
+ nbits %= 8;
+
+ /*
+ * If there's no more bits to copy to the destination and src buffer
+ * was already byte aligned, then we're done.
+ */
+ if (!nbits && !bits_in_src_buffer)
+ return;
+
+ /* Copy the remaining bits to src_buffer */
+ if (nbits)
+ src_buffer |= (*src & GENMASK(nbits - 1, 0)) <<
+ bits_in_src_buffer;
+ bits_in_src_buffer += nbits;
+
+ /*
+ * In case there were not enough bits to get a byte aligned dst buffer
+ * prepare the src_buffer variable to match the dst organization (shift
+ * src_buffer by dst_bit_off and retrieve the least significant bits
+ * from dst).
+ */
+ if (dst_bit_off)
+ src_buffer = (src_buffer << dst_bit_off) |
+ (*dst & GENMASK(dst_bit_off - 1, 0));
+ bits_in_src_buffer += dst_bit_off;
+
+ /*
+ * Keep most significant bits from dst if we end up with an unaligned
+ * number of bits.
+ */
+ nbytes = bits_in_src_buffer / 8;
+ if (bits_in_src_buffer % 8) {
+ src_buffer |= (dst[nbytes] &
+ GENMASK(7, bits_in_src_buffer % 8)) <<
+ (nbytes * 8);
+ nbytes++;
+ }
+
+ /* Copy the remaining bytes to dst */
+ for (i = 0; i < nbytes; i++) {
+ dst[i] = src_buffer;
+ src_buffer >>= 8;
+ }
+}
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
index 959cb9b7031..4f3851a24bb 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
@@ -791,6 +791,7 @@ static void gpmi_free_dma_buffer(struct gpmi_nand_data *this)
this->page_buffer_phys);
kfree(this->cmd_buffer);
kfree(this->data_buffer_dma);
+ kfree(this->raw_buffer);
this->cmd_buffer = NULL;
this->data_buffer_dma = NULL;
@@ -837,6 +838,9 @@ static int gpmi_alloc_dma_buffer(struct gpmi_nand_data *this)
if (!this->page_buffer_virt)
goto error_alloc;
+ this->raw_buffer = kzalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL);
+ if (!this->raw_buffer)
+ goto error_alloc;
/* Slice up the page buffer. */
this->payload_virt = this->page_buffer_virt;
@@ -1347,6 +1351,199 @@ gpmi_ecc_write_oob(struct mtd_info *mtd, struct nand_chip *chip, int page)
return status & NAND_STATUS_FAIL ? -EIO : 0;
}
+/*
+ * This function reads a NAND page without involving the ECC engine (no HW
+ * ECC correction).
+ * The tricky part in the GPMI/BCH controller is that it stores ECC bits
+ * inline (interleaved with payload DATA), and do not align data chunk on
+ * byte boundaries.
+ * We thus need to take care moving the payload data and ECC bits stored in the
+ * page into the provided buffers, which is why we're using gpmi_copy_bits.
+ *
+ * See set_geometry_by_ecc_info inline comments to have a full description
+ * of the layout used by the GPMI controller.
+ */
+static int gpmi_ecc_read_page_raw(struct mtd_info *mtd,
+ struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
+{
+ struct gpmi_nand_data *this = chip->priv;
+ struct bch_geometry *nfc_geo = &this->bch_geometry;
+ int eccsize = nfc_geo->ecc_chunk_size;
+ int eccbits = nfc_geo->ecc_strength * nfc_geo->gf_len;
+ u8 *tmp_buf = this->raw_buffer;
+ size_t src_bit_off;
+ size_t oob_bit_off;
+ size_t oob_byte_off;
+ uint8_t *oob = chip->oob_poi;
+ int step;
+
+ chip->read_buf(mtd, tmp_buf,
+ mtd->writesize + mtd->oobsize);
+
+ /*
+ * If required, swap the bad block marker and the data stored in the
+ * metadata section, so that we don't wrongly consider a block as bad.
+ *
+ * See the layout description for a detailed explanation on why this
+ * is needed.
+ */
+ if (this->swap_block_mark) {
+ u8 swap = tmp_buf[0];
+
+ tmp_buf[0] = tmp_buf[mtd->writesize];
+ tmp_buf[mtd->writesize] = swap;
+ }
+
+ /*
+ * Copy the metadata section into the oob buffer (this section is
+ * guaranteed to be aligned on a byte boundary).
+ */
+ if (oob_required)
+ memcpy(oob, tmp_buf, nfc_geo->metadata_size);
+
+ oob_bit_off = nfc_geo->metadata_size * 8;
+ src_bit_off = oob_bit_off;
+
+ /* Extract interleaved payload data and ECC bits */
+ for (step = 0; step < nfc_geo->ecc_chunk_count; step++) {
+ if (buf)
+ gpmi_copy_bits(buf, step * eccsize * 8,
+ tmp_buf, src_bit_off,
+ eccsize * 8);
+ src_bit_off += eccsize * 8;
+
+ /* Align last ECC block to align a byte boundary */
+ if (step == nfc_geo->ecc_chunk_count - 1 &&
+ (oob_bit_off + eccbits) % 8)
+ eccbits += 8 - ((oob_bit_off + eccbits) % 8);
+
+ if (oob_required)
+ gpmi_copy_bits(oob, oob_bit_off,
+ tmp_buf, src_bit_off,
+ eccbits);
+
+ src_bit_off += eccbits;
+ oob_bit_off += eccbits;
+ }
+
+ if (oob_required) {
+ oob_byte_off = oob_bit_off / 8;
+
+ if (oob_byte_off < mtd->oobsize)
+ memcpy(oob + oob_byte_off,
+ tmp_buf + mtd->writesize + oob_byte_off,
+ mtd->oobsize - oob_byte_off);
+ }
+
+ return 0;
+}
+
+/*
+ * This function writes a NAND page without involving the ECC engine (no HW
+ * ECC generation).
+ * The tricky part in the GPMI/BCH controller is that it stores ECC bits
+ * inline (interleaved with payload DATA), and do not align data chunk on
+ * byte boundaries.
+ * We thus need to take care moving the OOB area at the right place in the
+ * final page, which is why we're using gpmi_copy_bits.
+ *
+ * See set_geometry_by_ecc_info inline comments to have a full description
+ * of the layout used by the GPMI controller.
+ */
+static int gpmi_ecc_write_page_raw(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ const uint8_t *buf,
+ int oob_required)
+{
+ struct gpmi_nand_data *this = chip->priv;
+ struct bch_geometry *nfc_geo = &this->bch_geometry;
+ int eccsize = nfc_geo->ecc_chunk_size;
+ int eccbits = nfc_geo->ecc_strength * nfc_geo->gf_len;
+ u8 *tmp_buf = this->raw_buffer;
+ uint8_t *oob = chip->oob_poi;
+ size_t dst_bit_off;
+ size_t oob_bit_off;
+ size_t oob_byte_off;
+ int step;
+
+ /*
+ * Initialize all bits to 1 in case we don't have a buffer for the
+ * payload or oob data in order to leave unspecified bits of data
+ * to their initial state.
+ */
+ if (!buf || !oob_required)
+ memset(tmp_buf, 0xff, mtd->writesize + mtd->oobsize);
+
+ /*
+ * First copy the metadata section (stored in oob buffer) at the
+ * beginning of the page, as imposed by the GPMI layout.
+ */
+ memcpy(tmp_buf, oob, nfc_geo->metadata_size);
+ oob_bit_off = nfc_geo->metadata_size * 8;
+ dst_bit_off = oob_bit_off;
+
+ /* Interleave payload data and ECC bits */
+ for (step = 0; step < nfc_geo->ecc_chunk_count; step++) {
+ if (buf)
+ gpmi_copy_bits(tmp_buf, dst_bit_off,
+ buf, step * eccsize * 8, eccsize * 8);
+ dst_bit_off += eccsize * 8;
+
+ /* Align last ECC block to align a byte boundary */
+ if (step == nfc_geo->ecc_chunk_count - 1 &&
+ (oob_bit_off + eccbits) % 8)
+ eccbits += 8 - ((oob_bit_off + eccbits) % 8);
+
+ if (oob_required)
+ gpmi_copy_bits(tmp_buf, dst_bit_off,
+ oob, oob_bit_off, eccbits);
+
+ dst_bit_off += eccbits;
+ oob_bit_off += eccbits;
+ }
+
+ oob_byte_off = oob_bit_off / 8;
+
+ if (oob_required && oob_byte_off < mtd->oobsize)
+ memcpy(tmp_buf + mtd->writesize + oob_byte_off,
+ oob + oob_byte_off, mtd->oobsize - oob_byte_off);
+
+ /*
+ * If required, swap the bad block marker and the first byte of the
+ * metadata section, so that we don't modify the bad block marker.
+ *
+ * See the layout description for a detailed explanation on why this
+ * is needed.
+ */
+ if (this->swap_block_mark) {
+ u8 swap = tmp_buf[0];
+
+ tmp_buf[0] = tmp_buf[mtd->writesize];
+ tmp_buf[mtd->writesize] = swap;
+ }
+
+ chip->write_buf(mtd, tmp_buf, mtd->writesize + mtd->oobsize);
+
+ return 0;
+}
+
+static int gpmi_ecc_read_oob_raw(struct mtd_info *mtd, struct nand_chip *chip,
+ int page)
+{
+ chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+
+ return gpmi_ecc_read_page_raw(mtd, chip, NULL, 1, page);
+}
+
+static int gpmi_ecc_write_oob_raw(struct mtd_info *mtd, struct nand_chip *chip,
+ int page)
+{
+ chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0, page);
+
+ return gpmi_ecc_write_page_raw(mtd, chip, NULL, 1);
+}
+
static int gpmi_block_markbad(struct mtd_info *mtd, loff_t ofs)
{
struct nand_chip *chip = mtd->priv;
@@ -1664,6 +1861,10 @@ static int gpmi_init_last(struct gpmi_nand_data *this)
ecc->write_page = gpmi_ecc_write_page;
ecc->read_oob = gpmi_ecc_read_oob;
ecc->write_oob = gpmi_ecc_write_oob;
+ ecc->read_page_raw = gpmi_ecc_read_page_raw;
+ ecc->write_page_raw = gpmi_ecc_write_page_raw;
+ ecc->read_oob_raw = gpmi_ecc_read_oob_raw;
+ ecc->write_oob_raw = gpmi_ecc_write_oob_raw;
ecc->mode = NAND_ECC_HW;
ecc->size = bch_geo->ecc_chunk_size;
ecc->strength = bch_geo->ecc_strength;
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
index 32c6ba49f98..544062f6502 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
@@ -189,6 +189,8 @@ struct gpmi_nand_data {
void *auxiliary_virt;
dma_addr_t auxiliary_phys;
+ void *raw_buffer;
+
/* DMA channels */
#define DMA_CHANS 8
struct dma_chan *dma_chans[DMA_CHANS];
@@ -290,6 +292,10 @@ extern int gpmi_send_page(struct gpmi_nand_data *,
extern int gpmi_read_page(struct gpmi_nand_data *,
dma_addr_t payload, dma_addr_t auxiliary);
+void gpmi_copy_bits(u8 *dst, size_t dst_bit_off,
+ const u8 *src, size_t src_bit_off,
+ size_t nbits);
+
/* BCH : Status Block Completion Codes */
#define STATUS_GOOD 0x00
#define STATUS_ERASED 0xff
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index e1d56beeca7..a8f550fec35 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -280,14 +280,10 @@ static void memcpy32_fromio(void *trg, const void __iomem *src, size_t size)
*t++ = __raw_readl(s++);
}
-static void memcpy32_toio(void __iomem *trg, const void *src, int size)
+static inline void memcpy32_toio(void __iomem *trg, const void *src, int size)
{
- int i;
- u32 __iomem *t = trg;
- const u32 *s = src;
-
- for (i = 0; i < (size >> 2); i++)
- __raw_writel(*s++, t++);
+ /* __iowrite32_copy use 32bit size values so divide by 4 */
+ __iowrite32_copy(trg, src, size / 4);
}
static int check_int_v3(struct mxc_nand_host *host)
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 5b5c6271281..41585dfb206 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -485,11 +485,11 @@ static int nand_check_wp(struct mtd_info *mtd)
}
/**
- * nand_block_checkbad - [GENERIC] Check if a block is marked bad
+ * nand_block_isreserved - [GENERIC] Check if a block is marked reserved.
* @mtd: MTD device structure
* @ofs: offset from device start
*
- * Check if the block is mark as reserved.
+ * Check if the block is marked as reserved.
*/
static int nand_block_isreserved(struct mtd_info *mtd, loff_t ofs)
{
@@ -720,7 +720,7 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
/*
* Program and erase have their own busy handlers status, sequential
- * in, and deplete1 need no delay.
+ * in and status need no delay.
*/
switch (command) {
@@ -3765,9 +3765,9 @@ ident_done:
pr_info("%s %s\n", nand_manuf_ids[maf_idx].name,
type->name);
- pr_info("%dMiB, %s, page size: %d, OOB size: %d\n",
+ pr_info("%d MiB, %s, erase size: %d KiB, page size: %d, OOB size: %d\n",
(int)(chip->chipsize >> 20), nand_is_slc(chip) ? "SLC" : "MLC",
- mtd->writesize, mtd->oobsize);
+ mtd->erasesize >> 10, mtd->writesize, mtd->oobsize);
return type;
}
@@ -4035,7 +4035,7 @@ int nand_scan_tail(struct mtd_info *mtd)
*/
if (!ecc->size && (mtd->oobsize >= 64)) {
ecc->size = 512;
- ecc->bytes = 7;
+ ecc->bytes = DIV_ROUND_UP(13 * ecc->strength, 8);
}
ecc->priv = nand_bch_init(mtd, ecc->size, ecc->bytes,
&ecc->layout);
diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c
index fbde8910524..dd620c19c61 100644
--- a/drivers/mtd/nand/nand_ids.c
+++ b/drivers/mtd/nand/nand_ids.c
@@ -178,6 +178,7 @@ struct nand_manufacturers nand_manuf_ids[] = {
{NAND_MFR_EON, "Eon"},
{NAND_MFR_SANDISK, "SanDisk"},
{NAND_MFR_INTEL, "Intel"},
+ {NAND_MFR_ATO, "ATO"},
{0x0, "Unknown"}
};
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index 7dc1dd28d89..ab5bbf56743 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -87,10 +87,6 @@
#define CONFIG_NANDSIM_MAX_PARTS 32
#endif
-static uint first_id_byte = CONFIG_NANDSIM_FIRST_ID_BYTE;
-static uint second_id_byte = CONFIG_NANDSIM_SECOND_ID_BYTE;
-static uint third_id_byte = CONFIG_NANDSIM_THIRD_ID_BYTE;
-static uint fourth_id_byte = CONFIG_NANDSIM_FOURTH_ID_BYTE;
static uint access_delay = CONFIG_NANDSIM_ACCESS_DELAY;
static uint programm_delay = CONFIG_NANDSIM_PROGRAMM_DELAY;
static uint erase_delay = CONFIG_NANDSIM_ERASE_DELAY;
@@ -111,11 +107,19 @@ static unsigned int overridesize = 0;
static char *cache_file = NULL;
static unsigned int bbt;
static unsigned int bch;
+static u_char id_bytes[8] = {
+ [0] = CONFIG_NANDSIM_FIRST_ID_BYTE,
+ [1] = CONFIG_NANDSIM_SECOND_ID_BYTE,
+ [2] = CONFIG_NANDSIM_THIRD_ID_BYTE,
+ [3] = CONFIG_NANDSIM_FOURTH_ID_BYTE,
+ [4 ... 7] = 0xFF,
+};
-module_param(first_id_byte, uint, 0400);
-module_param(second_id_byte, uint, 0400);
-module_param(third_id_byte, uint, 0400);
-module_param(fourth_id_byte, uint, 0400);
+module_param_array(id_bytes, byte, NULL, 0400);
+module_param_named(first_id_byte, id_bytes[0], byte, 0400);
+module_param_named(second_id_byte, id_bytes[1], byte, 0400);
+module_param_named(third_id_byte, id_bytes[2], byte, 0400);
+module_param_named(fourth_id_byte, id_bytes[3], byte, 0400);
module_param(access_delay, uint, 0400);
module_param(programm_delay, uint, 0400);
module_param(erase_delay, uint, 0400);
@@ -136,10 +140,11 @@ module_param(cache_file, charp, 0400);
module_param(bbt, uint, 0400);
module_param(bch, uint, 0400);
-MODULE_PARM_DESC(first_id_byte, "The first byte returned by NAND Flash 'read ID' command (manufacturer ID)");
-MODULE_PARM_DESC(second_id_byte, "The second byte returned by NAND Flash 'read ID' command (chip ID)");
-MODULE_PARM_DESC(third_id_byte, "The third byte returned by NAND Flash 'read ID' command");
-MODULE_PARM_DESC(fourth_id_byte, "The fourth byte returned by NAND Flash 'read ID' command");
+MODULE_PARM_DESC(id_bytes, "The ID bytes returned by NAND Flash 'read ID' command");
+MODULE_PARM_DESC(first_id_byte, "The first byte returned by NAND Flash 'read ID' command (manufacturer ID) (obsolete)");
+MODULE_PARM_DESC(second_id_byte, "The second byte returned by NAND Flash 'read ID' command (chip ID) (obsolete)");
+MODULE_PARM_DESC(third_id_byte, "The third byte returned by NAND Flash 'read ID' command (obsolete)");
+MODULE_PARM_DESC(fourth_id_byte, "The fourth byte returned by NAND Flash 'read ID' command (obsolete)");
MODULE_PARM_DESC(access_delay, "Initial page access delay (microseconds)");
MODULE_PARM_DESC(programm_delay, "Page programm delay (microseconds");
MODULE_PARM_DESC(erase_delay, "Sector erase delay (milliseconds)");
@@ -304,7 +309,7 @@ struct nandsim {
unsigned int nbparts;
uint busw; /* flash chip bus width (8 or 16) */
- u_char ids[4]; /* chip's ID bytes */
+ u_char ids[8]; /* chip's ID bytes */
uint32_t options; /* chip's characteristic bits */
uint32_t state; /* current chip state */
uint32_t nxstate; /* next expected state */
@@ -2279,17 +2284,18 @@ static int __init ns_init_module(void)
* Perform minimum nandsim structure initialization to handle
* the initial ID read command correctly
*/
- if (third_id_byte != 0xFF || fourth_id_byte != 0xFF)
+ if (id_bytes[6] != 0xFF || id_bytes[7] != 0xFF)
+ nand->geom.idbytes = 8;
+ else if (id_bytes[4] != 0xFF || id_bytes[5] != 0xFF)
+ nand->geom.idbytes = 6;
+ else if (id_bytes[2] != 0xFF || id_bytes[3] != 0xFF)
nand->geom.idbytes = 4;
else
nand->geom.idbytes = 2;
nand->regs.status = NS_STATUS_OK(nand);
nand->nxstate = STATE_UNKNOWN;
nand->options |= OPT_PAGE512; /* temporary value */
- nand->ids[0] = first_id_byte;
- nand->ids[1] = second_id_byte;
- nand->ids[2] = third_id_byte;
- nand->ids[3] = fourth_id_byte;
+ memcpy(nand->ids, id_bytes, sizeof(nand->ids));
if (bus_width == 16) {
nand->busw = 16;
chip->options |= NAND_BUSWIDTH_16;
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index 6d74b56dd9f..63f858e6bf3 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -144,11 +144,13 @@ static u_char bch8_vector[] = {0xf3, 0xdb, 0x14, 0x16, 0x8b, 0xd2, 0xbe, 0xcc,
0xac, 0x6b, 0xff, 0x99, 0x7b};
static u_char bch4_vector[] = {0x00, 0x6b, 0x31, 0xdd, 0x41, 0xbc, 0x10};
-/* oob info generated runtime depending on ecc algorithm and layout selected */
-static struct nand_ecclayout omap_oobinfo;
+/* Shared among all NAND instances to synchronize access to the ECC Engine */
+static struct nand_hw_control omap_gpmc_controller = {
+ .lock = __SPIN_LOCK_UNLOCKED(omap_gpmc_controller.lock),
+ .wq = __WAIT_QUEUE_HEAD_INITIALIZER(omap_gpmc_controller.wq),
+};
struct omap_nand_info {
- struct nand_hw_control controller;
struct omap_nand_platform_data *pdata;
struct mtd_info mtd;
struct nand_chip nand;
@@ -168,6 +170,8 @@ struct omap_nand_info {
u_char *buf;
int buf_len;
struct gpmc_nand_regs reg;
+ /* generated at runtime depending on ECC algorithm and layout selected */
+ struct nand_ecclayout oobinfo;
/* fields specific for BCHx_HW ECC scheme */
struct device *elm_dev;
struct device_node *of_node;
@@ -1686,9 +1690,6 @@ static int omap_nand_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, info);
- spin_lock_init(&info->controller.lock);
- init_waitqueue_head(&info->controller.wq);
-
info->pdev = pdev;
info->gpmc_cs = pdata->cs;
info->reg = pdata->reg;
@@ -1708,7 +1709,7 @@ static int omap_nand_probe(struct platform_device *pdev)
info->phys_base = res->start;
- nand_chip->controller = &info->controller;
+ nand_chip->controller = &omap_gpmc_controller;
nand_chip->IO_ADDR_W = nand_chip->IO_ADDR_R;
nand_chip->cmd_ctrl = omap_hwcontrol;
@@ -1741,13 +1742,6 @@ static int omap_nand_probe(struct platform_device *pdev)
goto return_error;
}
- /* check for small page devices */
- if ((mtd->oobsize < 64) && (pdata->ecc_opt != OMAP_ECC_HAM1_CODE_HW)) {
- dev_err(&info->pdev->dev, "small page devices are not supported\n");
- err = -EINVAL;
- goto return_error;
- }
-
/* re-populate low-level callbacks based on xfer modes */
switch (pdata->xfer_type) {
case NAND_OMAP_PREFETCH_POLLED:
@@ -1840,7 +1834,7 @@ static int omap_nand_probe(struct platform_device *pdev)
}
/* populate MTD interface based on ECC scheme */
- ecclayout = &omap_oobinfo;
+ ecclayout = &info->oobinfo;
switch (info->ecc_opt) {
case OMAP_ECC_HAM1_CODE_SW:
nand_chip->ecc.mode = NAND_ECC_SOFT;
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c
index c53e36956bf..c3c6d305caa 100644
--- a/drivers/mtd/nand/orion_nand.c
+++ b/drivers/mtd/nand/orion_nand.c
@@ -19,7 +19,7 @@
#include <linux/mtd/partitions.h>
#include <linux/clk.h>
#include <linux/err.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <asm/sizes.h>
#include <linux/platform_data/mtd-orion_nand.h>
@@ -85,33 +85,24 @@ static int __init orion_nand_probe(struct platform_device *pdev)
int ret = 0;
u32 val = 0;
- nc = kzalloc(sizeof(struct nand_chip) + sizeof(struct mtd_info), GFP_KERNEL);
- if (!nc) {
- ret = -ENOMEM;
- goto no_res;
- }
+ nc = devm_kzalloc(&pdev->dev,
+ sizeof(struct nand_chip) + sizeof(struct mtd_info),
+ GFP_KERNEL);
+ if (!nc)
+ return -ENOMEM;
mtd = (struct mtd_info *)(nc + 1);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- ret = -ENODEV;
- goto no_res;
- }
+ io_base = devm_ioremap_resource(&pdev->dev, res);
- io_base = ioremap(res->start, resource_size(res));
- if (!io_base) {
- dev_err(&pdev->dev, "ioremap failed\n");
- ret = -EIO;
- goto no_res;
- }
+ if (IS_ERR(io_base))
+ return PTR_ERR(io_base);
if (pdev->dev.of_node) {
board = devm_kzalloc(&pdev->dev, sizeof(struct orion_nand_data),
GFP_KERNEL);
- if (!board) {
- ret = -ENOMEM;
- goto no_res;
- }
+ if (!board)
+ return -ENOMEM;
if (!of_property_read_u32(pdev->dev.of_node, "cle", &val))
board->cle = (u8)val;
else
@@ -185,9 +176,6 @@ no_dev:
clk_disable_unprepare(clk);
clk_put(clk);
}
- iounmap(io_base);
-no_res:
- kfree(nc);
return ret;
}
@@ -195,15 +183,10 @@ no_res:
static int orion_nand_remove(struct platform_device *pdev)
{
struct mtd_info *mtd = platform_get_drvdata(pdev);
- struct nand_chip *nc = mtd->priv;
struct clk *clk;
nand_release(mtd);
- iounmap(nc->IO_ADDR_W);
-
- kfree(nc);
-
clk = clk_get(&pdev->dev, NULL);
if (!IS_ERR(clk)) {
clk_disable_unprepare(clk);
diff --git a/drivers/mtd/nand/sunxi_nand.c b/drivers/mtd/nand/sunxi_nand.c
new file mode 100644
index 00000000000..ccaa8e28338
--- /dev/null
+++ b/drivers/mtd/nand/sunxi_nand.c
@@ -0,0 +1,1432 @@
+/*
+ * Copyright (C) 2013 Boris BREZILLON <b.brezillon.dev@gmail.com>
+ *
+ * Derived from:
+ * https://github.com/yuq/sunxi-nfc-mtd
+ * Copyright (C) 2013 Qiang Yu <yuq825@gmail.com>
+ *
+ * https://github.com/hno/Allwinner-Info
+ * Copyright (C) 2013 Henrik Nordström <Henrik Nordström>
+ *
+ * Copyright (C) 2013 Dmitriy B. <rzk333@gmail.com>
+ * Copyright (C) 2013 Sergey Lapin <slapin@ossfans.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/of_mtd.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+
+#define NFC_REG_CTL 0x0000
+#define NFC_REG_ST 0x0004
+#define NFC_REG_INT 0x0008
+#define NFC_REG_TIMING_CTL 0x000C
+#define NFC_REG_TIMING_CFG 0x0010
+#define NFC_REG_ADDR_LOW 0x0014
+#define NFC_REG_ADDR_HIGH 0x0018
+#define NFC_REG_SECTOR_NUM 0x001C
+#define NFC_REG_CNT 0x0020
+#define NFC_REG_CMD 0x0024
+#define NFC_REG_RCMD_SET 0x0028
+#define NFC_REG_WCMD_SET 0x002C
+#define NFC_REG_IO_DATA 0x0030
+#define NFC_REG_ECC_CTL 0x0034
+#define NFC_REG_ECC_ST 0x0038
+#define NFC_REG_DEBUG 0x003C
+#define NFC_REG_ECC_CNT0 0x0040
+#define NFC_REG_ECC_CNT1 0x0044
+#define NFC_REG_ECC_CNT2 0x0048
+#define NFC_REG_ECC_CNT3 0x004c
+#define NFC_REG_USER_DATA_BASE 0x0050
+#define NFC_REG_SPARE_AREA 0x00A0
+#define NFC_RAM0_BASE 0x0400
+#define NFC_RAM1_BASE 0x0800
+
+/* define bit use in NFC_CTL */
+#define NFC_EN BIT(0)
+#define NFC_RESET BIT(1)
+#define NFC_BUS_WIDYH BIT(2)
+#define NFC_RB_SEL BIT(3)
+#define NFC_CE_SEL GENMASK(26, 24)
+#define NFC_CE_CTL BIT(6)
+#define NFC_CE_CTL1 BIT(7)
+#define NFC_PAGE_SIZE GENMASK(11, 8)
+#define NFC_SAM BIT(12)
+#define NFC_RAM_METHOD BIT(14)
+#define NFC_DEBUG_CTL BIT(31)
+
+/* define bit use in NFC_ST */
+#define NFC_RB_B2R BIT(0)
+#define NFC_CMD_INT_FLAG BIT(1)
+#define NFC_DMA_INT_FLAG BIT(2)
+#define NFC_CMD_FIFO_STATUS BIT(3)
+#define NFC_STA BIT(4)
+#define NFC_NATCH_INT_FLAG BIT(5)
+#define NFC_RB_STATE0 BIT(8)
+#define NFC_RB_STATE1 BIT(9)
+#define NFC_RB_STATE2 BIT(10)
+#define NFC_RB_STATE3 BIT(11)
+
+/* define bit use in NFC_INT */
+#define NFC_B2R_INT_ENABLE BIT(0)
+#define NFC_CMD_INT_ENABLE BIT(1)
+#define NFC_DMA_INT_ENABLE BIT(2)
+#define NFC_INT_MASK (NFC_B2R_INT_ENABLE | \
+ NFC_CMD_INT_ENABLE | \
+ NFC_DMA_INT_ENABLE)
+
+/* define bit use in NFC_CMD */
+#define NFC_CMD_LOW_BYTE GENMASK(7, 0)
+#define NFC_CMD_HIGH_BYTE GENMASK(15, 8)
+#define NFC_ADR_NUM GENMASK(18, 16)
+#define NFC_SEND_ADR BIT(19)
+#define NFC_ACCESS_DIR BIT(20)
+#define NFC_DATA_TRANS BIT(21)
+#define NFC_SEND_CMD1 BIT(22)
+#define NFC_WAIT_FLAG BIT(23)
+#define NFC_SEND_CMD2 BIT(24)
+#define NFC_SEQ BIT(25)
+#define NFC_DATA_SWAP_METHOD BIT(26)
+#define NFC_ROW_AUTO_INC BIT(27)
+#define NFC_SEND_CMD3 BIT(28)
+#define NFC_SEND_CMD4 BIT(29)
+#define NFC_CMD_TYPE GENMASK(31, 30)
+
+/* define bit use in NFC_RCMD_SET */
+#define NFC_READ_CMD GENMASK(7, 0)
+#define NFC_RANDOM_READ_CMD0 GENMASK(15, 8)
+#define NFC_RANDOM_READ_CMD1 GENMASK(23, 16)
+
+/* define bit use in NFC_WCMD_SET */
+#define NFC_PROGRAM_CMD GENMASK(7, 0)
+#define NFC_RANDOM_WRITE_CMD GENMASK(15, 8)
+#define NFC_READ_CMD0 GENMASK(23, 16)
+#define NFC_READ_CMD1 GENMASK(31, 24)
+
+/* define bit use in NFC_ECC_CTL */
+#define NFC_ECC_EN BIT(0)
+#define NFC_ECC_PIPELINE BIT(3)
+#define NFC_ECC_EXCEPTION BIT(4)
+#define NFC_ECC_BLOCK_SIZE BIT(5)
+#define NFC_RANDOM_EN BIT(9)
+#define NFC_RANDOM_DIRECTION BIT(10)
+#define NFC_ECC_MODE_SHIFT 12
+#define NFC_ECC_MODE GENMASK(15, 12)
+#define NFC_RANDOM_SEED GENMASK(30, 16)
+
+#define NFC_DEFAULT_TIMEOUT_MS 1000
+
+#define NFC_SRAM_SIZE 1024
+
+#define NFC_MAX_CS 7
+
+/*
+ * Ready/Busy detection type: describes the Ready/Busy detection modes
+ *
+ * @RB_NONE: no external detection available, rely on STATUS command
+ * and software timeouts
+ * @RB_NATIVE: use sunxi NAND controller Ready/Busy support. The Ready/Busy
+ * pin of the NAND flash chip must be connected to one of the
+ * native NAND R/B pins (those which can be muxed to the NAND
+ * Controller)
+ * @RB_GPIO: use a simple GPIO to handle Ready/Busy status. The Ready/Busy
+ * pin of the NAND flash chip must be connected to a GPIO capable
+ * pin.
+ */
+enum sunxi_nand_rb_type {
+ RB_NONE,
+ RB_NATIVE,
+ RB_GPIO,
+};
+
+/*
+ * Ready/Busy structure: stores information related to Ready/Busy detection
+ *
+ * @type: the Ready/Busy detection mode
+ * @info: information related to the R/B detection mode. Either a gpio
+ * id or a native R/B id (those supported by the NAND controller).
+ */
+struct sunxi_nand_rb {
+ enum sunxi_nand_rb_type type;
+ union {
+ int gpio;
+ int nativeid;
+ } info;
+};
+
+/*
+ * Chip Select structure: stores information related to NAND Chip Select
+ *
+ * @cs: the NAND CS id used to communicate with a NAND Chip
+ * @rb: the Ready/Busy description
+ */
+struct sunxi_nand_chip_sel {
+ u8 cs;
+ struct sunxi_nand_rb rb;
+};
+
+/*
+ * sunxi HW ECC infos: stores information related to HW ECC support
+ *
+ * @mode: the sunxi ECC mode field deduced from ECC requirements
+ * @layout: the OOB layout depending on the ECC requirements and the
+ * selected ECC mode
+ */
+struct sunxi_nand_hw_ecc {
+ int mode;
+ struct nand_ecclayout layout;
+};
+
+/*
+ * NAND chip structure: stores NAND chip device related information
+ *
+ * @node: used to store NAND chips into a list
+ * @nand: base NAND chip structure
+ * @mtd: base MTD structure
+ * @clk_rate: clk_rate required for this NAND chip
+ * @selected: current active CS
+ * @nsels: number of CS lines required by the NAND chip
+ * @sels: array of CS lines descriptions
+ */
+struct sunxi_nand_chip {
+ struct list_head node;
+ struct nand_chip nand;
+ struct mtd_info mtd;
+ unsigned long clk_rate;
+ int selected;
+ int nsels;
+ struct sunxi_nand_chip_sel sels[0];
+};
+
+static inline struct sunxi_nand_chip *to_sunxi_nand(struct nand_chip *nand)
+{
+ return container_of(nand, struct sunxi_nand_chip, nand);
+}
+
+/*
+ * NAND Controller structure: stores sunxi NAND controller information
+ *
+ * @controller: base controller structure
+ * @dev: parent device (used to print error messages)
+ * @regs: NAND controller registers
+ * @ahb_clk: NAND Controller AHB clock
+ * @mod_clk: NAND Controller mod clock
+ * @assigned_cs: bitmask describing already assigned CS lines
+ * @clk_rate: NAND controller current clock rate
+ * @chips: a list containing all the NAND chips attached to
+ * this NAND controller
+ * @complete: a completion object used to wait for NAND
+ * controller events
+ */
+struct sunxi_nfc {
+ struct nand_hw_control controller;
+ struct device *dev;
+ void __iomem *regs;
+ struct clk *ahb_clk;
+ struct clk *mod_clk;
+ unsigned long assigned_cs;
+ unsigned long clk_rate;
+ struct list_head chips;
+ struct completion complete;
+};
+
+static inline struct sunxi_nfc *to_sunxi_nfc(struct nand_hw_control *ctrl)
+{
+ return container_of(ctrl, struct sunxi_nfc, controller);
+}
+
+static irqreturn_t sunxi_nfc_interrupt(int irq, void *dev_id)
+{
+ struct sunxi_nfc *nfc = dev_id;
+ u32 st = readl(nfc->regs + NFC_REG_ST);
+ u32 ien = readl(nfc->regs + NFC_REG_INT);
+
+ if (!(ien & st))
+ return IRQ_NONE;
+
+ if ((ien & st) == ien)
+ complete(&nfc->complete);
+
+ writel(st & NFC_INT_MASK, nfc->regs + NFC_REG_ST);
+ writel(~st & ien & NFC_INT_MASK, nfc->regs + NFC_REG_INT);
+
+ return IRQ_HANDLED;
+}
+
+static int sunxi_nfc_wait_int(struct sunxi_nfc *nfc, u32 flags,
+ unsigned int timeout_ms)
+{
+ init_completion(&nfc->complete);
+
+ writel(flags, nfc->regs + NFC_REG_INT);
+
+ if (!timeout_ms)
+ timeout_ms = NFC_DEFAULT_TIMEOUT_MS;
+
+ if (!wait_for_completion_timeout(&nfc->complete,
+ msecs_to_jiffies(timeout_ms))) {
+ dev_err(nfc->dev, "wait interrupt timedout\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int sunxi_nfc_wait_cmd_fifo_empty(struct sunxi_nfc *nfc)
+{
+ unsigned long timeout = jiffies +
+ msecs_to_jiffies(NFC_DEFAULT_TIMEOUT_MS);
+
+ do {
+ if (!(readl(nfc->regs + NFC_REG_ST) & NFC_CMD_FIFO_STATUS))
+ return 0;
+ } while (time_before(jiffies, timeout));
+
+ dev_err(nfc->dev, "wait for empty cmd FIFO timedout\n");
+ return -ETIMEDOUT;
+}
+
+static int sunxi_nfc_rst(struct sunxi_nfc *nfc)
+{
+ unsigned long timeout = jiffies +
+ msecs_to_jiffies(NFC_DEFAULT_TIMEOUT_MS);
+
+ writel(0, nfc->regs + NFC_REG_ECC_CTL);
+ writel(NFC_RESET, nfc->regs + NFC_REG_CTL);
+
+ do {
+ if (!(readl(nfc->regs + NFC_REG_CTL) & NFC_RESET))
+ return 0;
+ } while (time_before(jiffies, timeout));
+
+ dev_err(nfc->dev, "wait for NAND controller reset timedout\n");
+ return -ETIMEDOUT;
+}
+
+static int sunxi_nfc_dev_ready(struct mtd_info *mtd)
+{
+ struct nand_chip *nand = mtd->priv;
+ struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
+ struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
+ struct sunxi_nand_rb *rb;
+ unsigned long timeo = (sunxi_nand->nand.state == FL_ERASING ? 400 : 20);
+ int ret;
+
+ if (sunxi_nand->selected < 0)
+ return 0;
+
+ rb = &sunxi_nand->sels[sunxi_nand->selected].rb;
+
+ switch (rb->type) {
+ case RB_NATIVE:
+ ret = !!(readl(nfc->regs + NFC_REG_ST) &
+ (NFC_RB_STATE0 << rb->info.nativeid));
+ if (ret)
+ break;
+
+ sunxi_nfc_wait_int(nfc, NFC_RB_B2R, timeo);
+ ret = !!(readl(nfc->regs + NFC_REG_ST) &
+ (NFC_RB_STATE0 << rb->info.nativeid));
+ break;
+ case RB_GPIO:
+ ret = gpio_get_value(rb->info.gpio);
+ break;
+ case RB_NONE:
+ default:
+ ret = 0;
+ dev_err(nfc->dev, "cannot check R/B NAND status!\n");
+ break;
+ }
+
+ return ret;
+}
+
+static void sunxi_nfc_select_chip(struct mtd_info *mtd, int chip)
+{
+ struct nand_chip *nand = mtd->priv;
+ struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
+ struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
+ struct sunxi_nand_chip_sel *sel;
+ u32 ctl;
+
+ if (chip > 0 && chip >= sunxi_nand->nsels)
+ return;
+
+ if (chip == sunxi_nand->selected)
+ return;
+
+ ctl = readl(nfc->regs + NFC_REG_CTL) &
+ ~(NFC_CE_SEL | NFC_RB_SEL | NFC_EN);
+
+ if (chip >= 0) {
+ sel = &sunxi_nand->sels[chip];
+
+ ctl |= (sel->cs << 24) | NFC_EN |
+ (((nand->page_shift - 10) & 0xf) << 8);
+ if (sel->rb.type == RB_NONE) {
+ nand->dev_ready = NULL;
+ } else {
+ nand->dev_ready = sunxi_nfc_dev_ready;
+ if (sel->rb.type == RB_NATIVE)
+ ctl |= (sel->rb.info.nativeid << 3);
+ }
+
+ writel(mtd->writesize, nfc->regs + NFC_REG_SPARE_AREA);
+
+ if (nfc->clk_rate != sunxi_nand->clk_rate) {
+ clk_set_rate(nfc->mod_clk, sunxi_nand->clk_rate);
+ nfc->clk_rate = sunxi_nand->clk_rate;
+ }
+ }
+
+ writel(ctl, nfc->regs + NFC_REG_CTL);
+
+ sunxi_nand->selected = chip;
+}
+
+static void sunxi_nfc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ struct nand_chip *nand = mtd->priv;
+ struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
+ struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
+ int ret;
+ int cnt;
+ int offs = 0;
+ u32 tmp;
+
+ while (len > offs) {
+ cnt = min(len - offs, NFC_SRAM_SIZE);
+
+ ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
+ if (ret)
+ break;
+
+ writel(cnt, nfc->regs + NFC_REG_CNT);
+ tmp = NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD;
+ writel(tmp, nfc->regs + NFC_REG_CMD);
+
+ ret = sunxi_nfc_wait_int(nfc, NFC_CMD_INT_FLAG, 0);
+ if (ret)
+ break;
+
+ if (buf)
+ memcpy_fromio(buf + offs, nfc->regs + NFC_RAM0_BASE,
+ cnt);
+ offs += cnt;
+ }
+}
+
+static void sunxi_nfc_write_buf(struct mtd_info *mtd, const uint8_t *buf,
+ int len)
+{
+ struct nand_chip *nand = mtd->priv;
+ struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
+ struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
+ int ret;
+ int cnt;
+ int offs = 0;
+ u32 tmp;
+
+ while (len > offs) {
+ cnt = min(len - offs, NFC_SRAM_SIZE);
+
+ ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
+ if (ret)
+ break;
+
+ writel(cnt, nfc->regs + NFC_REG_CNT);
+ memcpy_toio(nfc->regs + NFC_RAM0_BASE, buf + offs, cnt);
+ tmp = NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD |
+ NFC_ACCESS_DIR;
+ writel(tmp, nfc->regs + NFC_REG_CMD);
+
+ ret = sunxi_nfc_wait_int(nfc, NFC_CMD_INT_FLAG, 0);
+ if (ret)
+ break;
+
+ offs += cnt;
+ }
+}
+
+static uint8_t sunxi_nfc_read_byte(struct mtd_info *mtd)
+{
+ uint8_t ret;
+
+ sunxi_nfc_read_buf(mtd, &ret, 1);
+
+ return ret;
+}
+
+static void sunxi_nfc_cmd_ctrl(struct mtd_info *mtd, int dat,
+ unsigned int ctrl)
+{
+ struct nand_chip *nand = mtd->priv;
+ struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
+ struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
+ int ret;
+ u32 tmp;
+
+ ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
+ if (ret)
+ return;
+
+ if (ctrl & NAND_CTRL_CHANGE) {
+ tmp = readl(nfc->regs + NFC_REG_CTL);
+ if (ctrl & NAND_NCE)
+ tmp |= NFC_CE_CTL;
+ else
+ tmp &= ~NFC_CE_CTL;
+ writel(tmp, nfc->regs + NFC_REG_CTL);
+ }
+
+ if (dat == NAND_CMD_NONE)
+ return;
+
+ if (ctrl & NAND_CLE) {
+ writel(NFC_SEND_CMD1 | dat, nfc->regs + NFC_REG_CMD);
+ } else {
+ writel(dat, nfc->regs + NFC_REG_ADDR_LOW);
+ writel(NFC_SEND_ADR, nfc->regs + NFC_REG_CMD);
+ }
+
+ sunxi_nfc_wait_int(nfc, NFC_CMD_INT_FLAG, 0);
+}
+
+static int sunxi_nfc_hw_ecc_read_page(struct mtd_info *mtd,
+ struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
+{
+ struct sunxi_nfc *nfc = to_sunxi_nfc(chip->controller);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ struct nand_ecclayout *layout = ecc->layout;
+ struct sunxi_nand_hw_ecc *data = ecc->priv;
+ unsigned int max_bitflips = 0;
+ int offset;
+ int ret;
+ u32 tmp;
+ int i;
+ int cnt;
+
+ tmp = readl(nfc->regs + NFC_REG_ECC_CTL);
+ tmp &= ~(NFC_ECC_MODE | NFC_ECC_PIPELINE | NFC_ECC_BLOCK_SIZE);
+ tmp |= NFC_ECC_EN | (data->mode << NFC_ECC_MODE_SHIFT) |
+ NFC_ECC_EXCEPTION;
+
+ writel(tmp, nfc->regs + NFC_REG_ECC_CTL);
+
+ for (i = 0; i < ecc->steps; i++) {
+ if (i)
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT, i * ecc->size, -1);
+
+ offset = mtd->writesize + layout->eccpos[i * ecc->bytes] - 4;
+
+ chip->read_buf(mtd, NULL, ecc->size);
+
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT, offset, -1);
+
+ ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
+ if (ret)
+ return ret;
+
+ tmp = NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD | (1 << 30);
+ writel(tmp, nfc->regs + NFC_REG_CMD);
+
+ ret = sunxi_nfc_wait_int(nfc, NFC_CMD_INT_FLAG, 0);
+ if (ret)
+ return ret;
+
+ memcpy_fromio(buf + (i * ecc->size),
+ nfc->regs + NFC_RAM0_BASE, ecc->size);
+
+ if (readl(nfc->regs + NFC_REG_ECC_ST) & 0x1) {
+ mtd->ecc_stats.failed++;
+ } else {
+ tmp = readl(nfc->regs + NFC_REG_ECC_CNT0) & 0xff;
+ mtd->ecc_stats.corrected += tmp;
+ max_bitflips = max_t(unsigned int, max_bitflips, tmp);
+ }
+
+ if (oob_required) {
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT, offset, -1);
+
+ ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
+ if (ret)
+ return ret;
+
+ offset -= mtd->writesize;
+ chip->read_buf(mtd, chip->oob_poi + offset,
+ ecc->bytes + 4);
+ }
+ }
+
+ if (oob_required) {
+ cnt = ecc->layout->oobfree[ecc->steps].length;
+ if (cnt > 0) {
+ offset = mtd->writesize +
+ ecc->layout->oobfree[ecc->steps].offset;
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT, offset, -1);
+ offset -= mtd->writesize;
+ chip->read_buf(mtd, chip->oob_poi + offset, cnt);
+ }
+ }
+
+ tmp = readl(nfc->regs + NFC_REG_ECC_CTL);
+ tmp &= ~NFC_ECC_EN;
+
+ writel(tmp, nfc->regs + NFC_REG_ECC_CTL);
+
+ return max_bitflips;
+}
+
+static int sunxi_nfc_hw_ecc_write_page(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ const uint8_t *buf, int oob_required)
+{
+ struct sunxi_nfc *nfc = to_sunxi_nfc(chip->controller);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ struct nand_ecclayout *layout = ecc->layout;
+ struct sunxi_nand_hw_ecc *data = ecc->priv;
+ int offset;
+ int ret;
+ u32 tmp;
+ int i;
+ int cnt;
+
+ tmp = readl(nfc->regs + NFC_REG_ECC_CTL);
+ tmp &= ~(NFC_ECC_MODE | NFC_ECC_PIPELINE | NFC_ECC_BLOCK_SIZE);
+ tmp |= NFC_ECC_EN | (data->mode << NFC_ECC_MODE_SHIFT) |
+ NFC_ECC_EXCEPTION;
+
+ writel(tmp, nfc->regs + NFC_REG_ECC_CTL);
+
+ for (i = 0; i < ecc->steps; i++) {
+ if (i)
+ chip->cmdfunc(mtd, NAND_CMD_RNDIN, i * ecc->size, -1);
+
+ chip->write_buf(mtd, buf + (i * ecc->size), ecc->size);
+
+ offset = layout->eccpos[i * ecc->bytes] - 4 + mtd->writesize;
+
+ /* Fill OOB data in */
+ if (oob_required) {
+ tmp = 0xffffffff;
+ memcpy_toio(nfc->regs + NFC_REG_USER_DATA_BASE, &tmp,
+ 4);
+ } else {
+ memcpy_toio(nfc->regs + NFC_REG_USER_DATA_BASE,
+ chip->oob_poi + offset - mtd->writesize,
+ 4);
+ }
+
+ chip->cmdfunc(mtd, NAND_CMD_RNDIN, offset, -1);
+
+ ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
+ if (ret)
+ return ret;
+
+ tmp = NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD | NFC_ACCESS_DIR |
+ (1 << 30);
+ writel(tmp, nfc->regs + NFC_REG_CMD);
+ ret = sunxi_nfc_wait_int(nfc, NFC_CMD_INT_FLAG, 0);
+ if (ret)
+ return ret;
+ }
+
+ if (oob_required) {
+ cnt = ecc->layout->oobfree[i].length;
+ if (cnt > 0) {
+ offset = mtd->writesize +
+ ecc->layout->oobfree[i].offset;
+ chip->cmdfunc(mtd, NAND_CMD_RNDIN, offset, -1);
+ offset -= mtd->writesize;
+ chip->write_buf(mtd, chip->oob_poi + offset, cnt);
+ }
+ }
+
+ tmp = readl(nfc->regs + NFC_REG_ECC_CTL);
+ tmp &= ~NFC_ECC_EN;
+
+ writel(tmp, nfc->regs + NFC_REG_ECC_CTL);
+
+ return 0;
+}
+
+static int sunxi_nfc_hw_syndrome_ecc_read_page(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ uint8_t *buf, int oob_required,
+ int page)
+{
+ struct sunxi_nfc *nfc = to_sunxi_nfc(chip->controller);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ struct sunxi_nand_hw_ecc *data = ecc->priv;
+ unsigned int max_bitflips = 0;
+ uint8_t *oob = chip->oob_poi;
+ int offset = 0;
+ int ret;
+ int cnt;
+ u32 tmp;
+ int i;
+
+ tmp = readl(nfc->regs + NFC_REG_ECC_CTL);
+ tmp &= ~(NFC_ECC_MODE | NFC_ECC_PIPELINE | NFC_ECC_BLOCK_SIZE);
+ tmp |= NFC_ECC_EN | (data->mode << NFC_ECC_MODE_SHIFT) |
+ NFC_ECC_EXCEPTION;
+
+ writel(tmp, nfc->regs + NFC_REG_ECC_CTL);
+
+ for (i = 0; i < ecc->steps; i++) {
+ chip->read_buf(mtd, NULL, ecc->size);
+
+ tmp = NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD | (1 << 30);
+ writel(tmp, nfc->regs + NFC_REG_CMD);
+
+ ret = sunxi_nfc_wait_int(nfc, NFC_CMD_INT_FLAG, 0);
+ if (ret)
+ return ret;
+
+ memcpy_fromio(buf, nfc->regs + NFC_RAM0_BASE, ecc->size);
+ buf += ecc->size;
+ offset += ecc->size;
+
+ if (readl(nfc->regs + NFC_REG_ECC_ST) & 0x1) {
+ mtd->ecc_stats.failed++;
+ } else {
+ tmp = readl(nfc->regs + NFC_REG_ECC_CNT0) & 0xff;
+ mtd->ecc_stats.corrected += tmp;
+ max_bitflips = max_t(unsigned int, max_bitflips, tmp);
+ }
+
+ if (oob_required) {
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT, offset, -1);
+ chip->read_buf(mtd, oob, ecc->bytes + ecc->prepad);
+ oob += ecc->bytes + ecc->prepad;
+ }
+
+ offset += ecc->bytes + ecc->prepad;
+ }
+
+ if (oob_required) {
+ cnt = mtd->oobsize - (oob - chip->oob_poi);
+ if (cnt > 0) {
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT, offset, -1);
+ chip->read_buf(mtd, oob, cnt);
+ }
+ }
+
+ writel(readl(nfc->regs + NFC_REG_ECC_CTL) & ~NFC_ECC_EN,
+ nfc->regs + NFC_REG_ECC_CTL);
+
+ return max_bitflips;
+}
+
+static int sunxi_nfc_hw_syndrome_ecc_write_page(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ const uint8_t *buf,
+ int oob_required)
+{
+ struct sunxi_nfc *nfc = to_sunxi_nfc(chip->controller);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ struct sunxi_nand_hw_ecc *data = ecc->priv;
+ uint8_t *oob = chip->oob_poi;
+ int offset = 0;
+ int ret;
+ int cnt;
+ u32 tmp;
+ int i;
+
+ tmp = readl(nfc->regs + NFC_REG_ECC_CTL);
+ tmp &= ~(NFC_ECC_MODE | NFC_ECC_PIPELINE | NFC_ECC_BLOCK_SIZE);
+ tmp |= NFC_ECC_EN | (data->mode << NFC_ECC_MODE_SHIFT) |
+ NFC_ECC_EXCEPTION;
+
+ writel(tmp, nfc->regs + NFC_REG_ECC_CTL);
+
+ for (i = 0; i < ecc->steps; i++) {
+ chip->write_buf(mtd, buf + (i * ecc->size), ecc->size);
+ offset += ecc->size;
+
+ /* Fill OOB data in */
+ if (oob_required) {
+ tmp = 0xffffffff;
+ memcpy_toio(nfc->regs + NFC_REG_USER_DATA_BASE, &tmp,
+ 4);
+ } else {
+ memcpy_toio(nfc->regs + NFC_REG_USER_DATA_BASE, oob,
+ 4);
+ }
+
+ tmp = NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD | NFC_ACCESS_DIR |
+ (1 << 30);
+ writel(tmp, nfc->regs + NFC_REG_CMD);
+
+ ret = sunxi_nfc_wait_int(nfc, NFC_CMD_INT_FLAG, 0);
+ if (ret)
+ return ret;
+
+ offset += ecc->bytes + ecc->prepad;
+ oob += ecc->bytes + ecc->prepad;
+ }
+
+ if (oob_required) {
+ cnt = mtd->oobsize - (oob - chip->oob_poi);
+ if (cnt > 0) {
+ chip->cmdfunc(mtd, NAND_CMD_RNDIN, offset, -1);
+ chip->write_buf(mtd, oob, cnt);
+ }
+ }
+
+ tmp = readl(nfc->regs + NFC_REG_ECC_CTL);
+ tmp &= ~NFC_ECC_EN;
+
+ writel(tmp, nfc->regs + NFC_REG_ECC_CTL);
+
+ return 0;
+}
+
+static int sunxi_nand_chip_set_timings(struct sunxi_nand_chip *chip,
+ const struct nand_sdr_timings *timings)
+{
+ u32 min_clk_period = 0;
+
+ /* T1 <=> tCLS */
+ if (timings->tCLS_min > min_clk_period)
+ min_clk_period = timings->tCLS_min;
+
+ /* T2 <=> tCLH */
+ if (timings->tCLH_min > min_clk_period)
+ min_clk_period = timings->tCLH_min;
+
+ /* T3 <=> tCS */
+ if (timings->tCS_min > min_clk_period)
+ min_clk_period = timings->tCS_min;
+
+ /* T4 <=> tCH */
+ if (timings->tCH_min > min_clk_period)
+ min_clk_period = timings->tCH_min;
+
+ /* T5 <=> tWP */
+ if (timings->tWP_min > min_clk_period)
+ min_clk_period = timings->tWP_min;
+
+ /* T6 <=> tWH */
+ if (timings->tWH_min > min_clk_period)
+ min_clk_period = timings->tWH_min;
+
+ /* T7 <=> tALS */
+ if (timings->tALS_min > min_clk_period)
+ min_clk_period = timings->tALS_min;
+
+ /* T8 <=> tDS */
+ if (timings->tDS_min > min_clk_period)
+ min_clk_period = timings->tDS_min;
+
+ /* T9 <=> tDH */
+ if (timings->tDH_min > min_clk_period)
+ min_clk_period = timings->tDH_min;
+
+ /* T10 <=> tRR */
+ if (timings->tRR_min > (min_clk_period * 3))
+ min_clk_period = DIV_ROUND_UP(timings->tRR_min, 3);
+
+ /* T11 <=> tALH */
+ if (timings->tALH_min > min_clk_period)
+ min_clk_period = timings->tALH_min;
+
+ /* T12 <=> tRP */
+ if (timings->tRP_min > min_clk_period)
+ min_clk_period = timings->tRP_min;
+
+ /* T13 <=> tREH */
+ if (timings->tREH_min > min_clk_period)
+ min_clk_period = timings->tREH_min;
+
+ /* T14 <=> tRC */
+ if (timings->tRC_min > (min_clk_period * 2))
+ min_clk_period = DIV_ROUND_UP(timings->tRC_min, 2);
+
+ /* T15 <=> tWC */
+ if (timings->tWC_min > (min_clk_period * 2))
+ min_clk_period = DIV_ROUND_UP(timings->tWC_min, 2);
+
+
+ /* Convert min_clk_period from picoseconds to nanoseconds */
+ min_clk_period = DIV_ROUND_UP(min_clk_period, 1000);
+
+ /*
+ * Convert min_clk_period into a clk frequency, then get the
+ * appropriate rate for the NAND controller IP given this formula
+ * (specified in the datasheet):
+ * nand clk_rate = 2 * min_clk_rate
+ */
+ chip->clk_rate = (2 * NSEC_PER_SEC) / min_clk_period;
+
+ /* TODO: configure T16-T19 */
+
+ return 0;
+}
+
+static int sunxi_nand_chip_init_timings(struct sunxi_nand_chip *chip,
+ struct device_node *np)
+{
+ const struct nand_sdr_timings *timings;
+ int ret;
+ int mode;
+
+ mode = onfi_get_async_timing_mode(&chip->nand);
+ if (mode == ONFI_TIMING_MODE_UNKNOWN) {
+ mode = chip->nand.onfi_timing_mode_default;
+ } else {
+ uint8_t feature[ONFI_SUBFEATURE_PARAM_LEN] = {};
+
+ mode = fls(mode) - 1;
+ if (mode < 0)
+ mode = 0;
+
+ feature[0] = mode;
+ ret = chip->nand.onfi_set_features(&chip->mtd, &chip->nand,
+ ONFI_FEATURE_ADDR_TIMING_MODE,
+ feature);
+ if (ret)
+ return ret;
+ }
+
+ timings = onfi_async_timing_mode_to_sdr_timings(mode);
+ if (IS_ERR(timings))
+ return PTR_ERR(timings);
+
+ return sunxi_nand_chip_set_timings(chip, timings);
+}
+
+static int sunxi_nand_hw_common_ecc_ctrl_init(struct mtd_info *mtd,
+ struct nand_ecc_ctrl *ecc,
+ struct device_node *np)
+{
+ static const u8 strengths[] = { 16, 24, 28, 32, 40, 48, 56, 60, 64 };
+ struct nand_chip *nand = mtd->priv;
+ struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
+ struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
+ struct sunxi_nand_hw_ecc *data;
+ struct nand_ecclayout *layout;
+ int nsectors;
+ int ret;
+ int i;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ /* Add ECC info retrieval from DT */
+ for (i = 0; i < ARRAY_SIZE(strengths); i++) {
+ if (ecc->strength <= strengths[i])
+ break;
+ }
+
+ if (i >= ARRAY_SIZE(strengths)) {
+ dev_err(nfc->dev, "unsupported strength\n");
+ ret = -ENOTSUPP;
+ goto err;
+ }
+
+ data->mode = i;
+
+ /* HW ECC always request ECC bytes for 1024 bytes blocks */
+ ecc->bytes = DIV_ROUND_UP(ecc->strength * fls(8 * 1024), 8);
+
+ /* HW ECC always work with even numbers of ECC bytes */
+ ecc->bytes = ALIGN(ecc->bytes, 2);
+
+ layout = &data->layout;
+ nsectors = mtd->writesize / ecc->size;
+
+ if (mtd->oobsize < ((ecc->bytes + 4) * nsectors)) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ layout->eccbytes = (ecc->bytes * nsectors);
+
+ ecc->layout = layout;
+ ecc->priv = data;
+
+ return 0;
+
+err:
+ kfree(data);
+
+ return ret;
+}
+
+static void sunxi_nand_hw_common_ecc_ctrl_cleanup(struct nand_ecc_ctrl *ecc)
+{
+ kfree(ecc->priv);
+}
+
+static int sunxi_nand_hw_ecc_ctrl_init(struct mtd_info *mtd,
+ struct nand_ecc_ctrl *ecc,
+ struct device_node *np)
+{
+ struct nand_ecclayout *layout;
+ int nsectors;
+ int i, j;
+ int ret;
+
+ ret = sunxi_nand_hw_common_ecc_ctrl_init(mtd, ecc, np);
+ if (ret)
+ return ret;
+
+ ecc->read_page = sunxi_nfc_hw_ecc_read_page;
+ ecc->write_page = sunxi_nfc_hw_ecc_write_page;
+ layout = ecc->layout;
+ nsectors = mtd->writesize / ecc->size;
+
+ for (i = 0; i < nsectors; i++) {
+ if (i) {
+ layout->oobfree[i].offset =
+ layout->oobfree[i - 1].offset +
+ layout->oobfree[i - 1].length +
+ ecc->bytes;
+ layout->oobfree[i].length = 4;
+ } else {
+ /*
+ * The first 2 bytes are used for BB markers, hence we
+ * only have 2 bytes available in the first user data
+ * section.
+ */
+ layout->oobfree[i].length = 2;
+ layout->oobfree[i].offset = 2;
+ }
+
+ for (j = 0; j < ecc->bytes; j++)
+ layout->eccpos[(ecc->bytes * i) + j] =
+ layout->oobfree[i].offset +
+ layout->oobfree[i].length + j;
+ }
+
+ if (mtd->oobsize > (ecc->bytes + 4) * nsectors) {
+ layout->oobfree[nsectors].offset =
+ layout->oobfree[nsectors - 1].offset +
+ layout->oobfree[nsectors - 1].length +
+ ecc->bytes;
+ layout->oobfree[nsectors].length = mtd->oobsize -
+ ((ecc->bytes + 4) * nsectors);
+ }
+
+ return 0;
+}
+
+static int sunxi_nand_hw_syndrome_ecc_ctrl_init(struct mtd_info *mtd,
+ struct nand_ecc_ctrl *ecc,
+ struct device_node *np)
+{
+ struct nand_ecclayout *layout;
+ int nsectors;
+ int i;
+ int ret;
+
+ ret = sunxi_nand_hw_common_ecc_ctrl_init(mtd, ecc, np);
+ if (ret)
+ return ret;
+
+ ecc->prepad = 4;
+ ecc->read_page = sunxi_nfc_hw_syndrome_ecc_read_page;
+ ecc->write_page = sunxi_nfc_hw_syndrome_ecc_write_page;
+
+ layout = ecc->layout;
+ nsectors = mtd->writesize / ecc->size;
+
+ for (i = 0; i < (ecc->bytes * nsectors); i++)
+ layout->eccpos[i] = i;
+
+ layout->oobfree[0].length = mtd->oobsize - i;
+ layout->oobfree[0].offset = i;
+
+ return 0;
+}
+
+static void sunxi_nand_ecc_cleanup(struct nand_ecc_ctrl *ecc)
+{
+ switch (ecc->mode) {
+ case NAND_ECC_HW:
+ case NAND_ECC_HW_SYNDROME:
+ sunxi_nand_hw_common_ecc_ctrl_cleanup(ecc);
+ break;
+ case NAND_ECC_NONE:
+ kfree(ecc->layout);
+ default:
+ break;
+ }
+}
+
+static int sunxi_nand_ecc_init(struct mtd_info *mtd, struct nand_ecc_ctrl *ecc,
+ struct device_node *np)
+{
+ struct nand_chip *nand = mtd->priv;
+ int strength;
+ int blk_size;
+ int ret;
+
+ blk_size = of_get_nand_ecc_step_size(np);
+ strength = of_get_nand_ecc_strength(np);
+ if (blk_size > 0 && strength > 0) {
+ ecc->size = blk_size;
+ ecc->strength = strength;
+ } else {
+ ecc->size = nand->ecc_step_ds;
+ ecc->strength = nand->ecc_strength_ds;
+ }
+
+ if (!ecc->size || !ecc->strength)
+ return -EINVAL;
+
+ ecc->mode = NAND_ECC_HW;
+
+ ret = of_get_nand_ecc_mode(np);
+ if (ret >= 0)
+ ecc->mode = ret;
+
+ switch (ecc->mode) {
+ case NAND_ECC_SOFT_BCH:
+ ecc->bytes = DIV_ROUND_UP(ecc->strength * fls(8 * ecc->size),
+ 8);
+ break;
+ case NAND_ECC_HW:
+ ret = sunxi_nand_hw_ecc_ctrl_init(mtd, ecc, np);
+ if (ret)
+ return ret;
+ break;
+ case NAND_ECC_HW_SYNDROME:
+ ret = sunxi_nand_hw_syndrome_ecc_ctrl_init(mtd, ecc, np);
+ if (ret)
+ return ret;
+ break;
+ case NAND_ECC_NONE:
+ ecc->layout = kzalloc(sizeof(*ecc->layout), GFP_KERNEL);
+ if (!ecc->layout)
+ return -ENOMEM;
+ ecc->layout->oobfree[0].length = mtd->oobsize;
+ case NAND_ECC_SOFT:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc,
+ struct device_node *np)
+{
+ const struct nand_sdr_timings *timings;
+ struct sunxi_nand_chip *chip;
+ struct mtd_part_parser_data ppdata;
+ struct mtd_info *mtd;
+ struct nand_chip *nand;
+ int nsels;
+ int ret;
+ int i;
+ u32 tmp;
+
+ if (!of_get_property(np, "reg", &nsels))
+ return -EINVAL;
+
+ nsels /= sizeof(u32);
+ if (!nsels) {
+ dev_err(dev, "invalid reg property size\n");
+ return -EINVAL;
+ }
+
+ chip = devm_kzalloc(dev,
+ sizeof(*chip) +
+ (nsels * sizeof(struct sunxi_nand_chip_sel)),
+ GFP_KERNEL);
+ if (!chip) {
+ dev_err(dev, "could not allocate chip\n");
+ return -ENOMEM;
+ }
+
+ chip->nsels = nsels;
+ chip->selected = -1;
+
+ for (i = 0; i < nsels; i++) {
+ ret = of_property_read_u32_index(np, "reg", i, &tmp);
+ if (ret) {
+ dev_err(dev, "could not retrieve reg property: %d\n",
+ ret);
+ return ret;
+ }
+
+ if (tmp > NFC_MAX_CS) {
+ dev_err(dev,
+ "invalid reg value: %u (max CS = 7)\n",
+ tmp);
+ return -EINVAL;
+ }
+
+ if (test_and_set_bit(tmp, &nfc->assigned_cs)) {
+ dev_err(dev, "CS %d already assigned\n", tmp);
+ return -EINVAL;
+ }
+
+ chip->sels[i].cs = tmp;
+
+ if (!of_property_read_u32_index(np, "allwinner,rb", i, &tmp) &&
+ tmp < 2) {
+ chip->sels[i].rb.type = RB_NATIVE;
+ chip->sels[i].rb.info.nativeid = tmp;
+ } else {
+ ret = of_get_named_gpio(np, "rb-gpios", i);
+ if (ret >= 0) {
+ tmp = ret;
+ chip->sels[i].rb.type = RB_GPIO;
+ chip->sels[i].rb.info.gpio = tmp;
+ ret = devm_gpio_request(dev, tmp, "nand-rb");
+ if (ret)
+ return ret;
+
+ ret = gpio_direction_input(tmp);
+ if (ret)
+ return ret;
+ } else {
+ chip->sels[i].rb.type = RB_NONE;
+ }
+ }
+ }
+
+ timings = onfi_async_timing_mode_to_sdr_timings(0);
+ if (IS_ERR(timings)) {
+ ret = PTR_ERR(timings);
+ dev_err(dev,
+ "could not retrieve timings for ONFI mode 0: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = sunxi_nand_chip_set_timings(chip, timings);
+ if (ret) {
+ dev_err(dev, "could not configure chip timings: %d\n", ret);
+ return ret;
+ }
+
+ nand = &chip->nand;
+ /* Default tR value specified in the ONFI spec (chapter 4.15.1) */
+ nand->chip_delay = 200;
+ nand->controller = &nfc->controller;
+ nand->select_chip = sunxi_nfc_select_chip;
+ nand->cmd_ctrl = sunxi_nfc_cmd_ctrl;
+ nand->read_buf = sunxi_nfc_read_buf;
+ nand->write_buf = sunxi_nfc_write_buf;
+ nand->read_byte = sunxi_nfc_read_byte;
+
+ if (of_get_nand_on_flash_bbt(np))
+ nand->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB;
+
+ mtd = &chip->mtd;
+ mtd->dev.parent = dev;
+ mtd->priv = nand;
+ mtd->owner = THIS_MODULE;
+
+ ret = nand_scan_ident(mtd, nsels, NULL);
+ if (ret)
+ return ret;
+
+ ret = sunxi_nand_chip_init_timings(chip, np);
+ if (ret) {
+ dev_err(dev, "could not configure chip timings: %d\n", ret);
+ return ret;
+ }
+
+ ret = sunxi_nand_ecc_init(mtd, &nand->ecc, np);
+ if (ret) {
+ dev_err(dev, "ECC init failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = nand_scan_tail(mtd);
+ if (ret) {
+ dev_err(dev, "nand_scan_tail failed: %d\n", ret);
+ return ret;
+ }
+
+ ppdata.of_node = np;
+ ret = mtd_device_parse_register(mtd, NULL, &ppdata, NULL, 0);
+ if (ret) {
+ dev_err(dev, "failed to register mtd device: %d\n", ret);
+ nand_release(mtd);
+ return ret;
+ }
+
+ list_add_tail(&chip->node, &nfc->chips);
+
+ return 0;
+}
+
+static int sunxi_nand_chips_init(struct device *dev, struct sunxi_nfc *nfc)
+{
+ struct device_node *np = dev->of_node;
+ struct device_node *nand_np;
+ int nchips = of_get_child_count(np);
+ int ret;
+
+ if (nchips > 8) {
+ dev_err(dev, "too many NAND chips: %d (max = 8)\n", nchips);
+ return -EINVAL;
+ }
+
+ for_each_child_of_node(np, nand_np) {
+ ret = sunxi_nand_chip_init(dev, nfc, nand_np);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void sunxi_nand_chips_cleanup(struct sunxi_nfc *nfc)
+{
+ struct sunxi_nand_chip *chip;
+
+ while (!list_empty(&nfc->chips)) {
+ chip = list_first_entry(&nfc->chips, struct sunxi_nand_chip,
+ node);
+ nand_release(&chip->mtd);
+ sunxi_nand_ecc_cleanup(&chip->nand.ecc);
+ }
+}
+
+static int sunxi_nfc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *r;
+ struct sunxi_nfc *nfc;
+ int irq;
+ int ret;
+
+ nfc = devm_kzalloc(dev, sizeof(*nfc), GFP_KERNEL);
+ if (!nfc)
+ return -ENOMEM;
+
+ nfc->dev = dev;
+ spin_lock_init(&nfc->controller.lock);
+ init_waitqueue_head(&nfc->controller.wq);
+ INIT_LIST_HEAD(&nfc->chips);
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ nfc->regs = devm_ioremap_resource(dev, r);
+ if (IS_ERR(nfc->regs))
+ return PTR_ERR(nfc->regs);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "failed to retrieve irq\n");
+ return irq;
+ }
+
+ nfc->ahb_clk = devm_clk_get(dev, "ahb");
+ if (IS_ERR(nfc->ahb_clk)) {
+ dev_err(dev, "failed to retrieve ahb clk\n");
+ return PTR_ERR(nfc->ahb_clk);
+ }
+
+ ret = clk_prepare_enable(nfc->ahb_clk);
+ if (ret)
+ return ret;
+
+ nfc->mod_clk = devm_clk_get(dev, "mod");
+ if (IS_ERR(nfc->mod_clk)) {
+ dev_err(dev, "failed to retrieve mod clk\n");
+ ret = PTR_ERR(nfc->mod_clk);
+ goto out_ahb_clk_unprepare;
+ }
+
+ ret = clk_prepare_enable(nfc->mod_clk);
+ if (ret)
+ goto out_ahb_clk_unprepare;
+
+ ret = sunxi_nfc_rst(nfc);
+ if (ret)
+ goto out_mod_clk_unprepare;
+
+ writel(0, nfc->regs + NFC_REG_INT);
+ ret = devm_request_irq(dev, irq, sunxi_nfc_interrupt,
+ 0, "sunxi-nand", nfc);
+ if (ret)
+ goto out_mod_clk_unprepare;
+
+ platform_set_drvdata(pdev, nfc);
+
+ /*
+ * TODO: replace these magic values with proper flags as soon as we
+ * know what they are encoding.
+ */
+ writel(0x100, nfc->regs + NFC_REG_TIMING_CTL);
+ writel(0x7ff, nfc->regs + NFC_REG_TIMING_CFG);
+
+ ret = sunxi_nand_chips_init(dev, nfc);
+ if (ret) {
+ dev_err(dev, "failed to init nand chips\n");
+ goto out_mod_clk_unprepare;
+ }
+
+ return 0;
+
+out_mod_clk_unprepare:
+ clk_disable_unprepare(nfc->mod_clk);
+out_ahb_clk_unprepare:
+ clk_disable_unprepare(nfc->ahb_clk);
+
+ return ret;
+}
+
+static int sunxi_nfc_remove(struct platform_device *pdev)
+{
+ struct sunxi_nfc *nfc = platform_get_drvdata(pdev);
+
+ sunxi_nand_chips_cleanup(nfc);
+
+ return 0;
+}
+
+static const struct of_device_id sunxi_nfc_ids[] = {
+ { .compatible = "allwinner,sun4i-a10-nand" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, sunxi_nfc_ids);
+
+static struct platform_driver sunxi_nfc_driver = {
+ .driver = {
+ .name = "sunxi_nand",
+ .of_match_table = sunxi_nfc_ids,
+ },
+ .probe = sunxi_nfc_probe,
+ .remove = sunxi_nfc_remove,
+};
+module_platform_driver(sunxi_nfc_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Boris BREZILLON");
+MODULE_DESCRIPTION("Allwinner NAND Flash Controller driver");
+MODULE_ALIAS("platform:sunxi_nand");
diff --git a/drivers/mtd/spi-nor/fsl-quadspi.c b/drivers/mtd/spi-nor/fsl-quadspi.c
index 2fb07eced2b..39763b94f67 100644
--- a/drivers/mtd/spi-nor/fsl-quadspi.c
+++ b/drivers/mtd/spi-nor/fsl-quadspi.c
@@ -719,16 +719,10 @@ static int fsl_qspi_read(struct spi_nor *nor, loff_t from,
{
struct fsl_qspi *q = nor->priv;
u8 cmd = nor->read_opcode;
- int ret;
dev_dbg(q->dev, "cmd [%x],read from (0x%p, 0x%.8x, 0x%.8x),len:%d\n",
cmd, q->ahb_base, q->chip_base_addr, (unsigned int)from, len);
- /* Wait until the previous command is finished. */
- ret = nor->wait_till_ready(nor);
- if (ret)
- return ret;
-
/* Read out the data directly from the AHB buffer.*/
memcpy(buf, q->ahb_base + q->chip_base_addr + from, len);
@@ -744,16 +738,6 @@ static int fsl_qspi_erase(struct spi_nor *nor, loff_t offs)
dev_dbg(nor->dev, "%dKiB at 0x%08x:0x%08x\n",
nor->mtd->erasesize / 1024, q->chip_base_addr, (u32)offs);
- /* Wait until finished previous write command. */
- ret = nor->wait_till_ready(nor);
- if (ret)
- return ret;
-
- /* Send write enable, then erase commands. */
- ret = nor->write_reg(nor, SPINOR_OP_WREN, NULL, 0, 0);
- if (ret)
- return ret;
-
ret = fsl_qspi_runcmd(q, nor->erase_opcode, offs, 0);
if (ret)
return ret;
@@ -849,9 +833,8 @@ static int fsl_qspi_probe(struct platform_device *pdev)
ret = clk_prepare_enable(q->clk);
if (ret) {
- clk_disable_unprepare(q->clk_en);
dev_err(dev, "can not enable the qspi clock\n");
- goto map_failed;
+ goto clk_failed;
}
/* find the irq */
@@ -905,7 +888,8 @@ static int fsl_qspi_probe(struct platform_device *pdev)
nor->prepare = fsl_qspi_prep;
nor->unprepare = fsl_qspi_unprep;
- if (of_modalias_node(np, modalias, sizeof(modalias)) < 0)
+ ret = of_modalias_node(np, modalias, sizeof(modalias));
+ if (ret < 0)
goto map_failed;
ret = of_property_read_u32(np, "spi-max-frequency",
@@ -964,6 +948,7 @@ last_init_failed:
irq_failed:
clk_disable_unprepare(q->clk);
+clk_failed:
clk_disable_unprepare(q->clk_en);
map_failed:
dev_err(dev, "Freescale QuadSPI probe failed\n");
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
index c51ee52386a..0f8ec3c2d01 100644
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -26,7 +26,38 @@
/* Define max times to check status register before we give up. */
#define MAX_READY_WAIT_JIFFIES (40 * HZ) /* M25P16 specs 40s max chip erase */
-#define JEDEC_MFR(_jedec_id) ((_jedec_id) >> 16)
+#define SPI_NOR_MAX_ID_LEN 6
+
+struct flash_info {
+ /*
+ * This array stores the ID bytes.
+ * The first three bytes are the JEDIC ID.
+ * JEDEC ID zero means "no ID" (mostly older chips).
+ */
+ u8 id[SPI_NOR_MAX_ID_LEN];
+ u8 id_len;
+
+ /* The size listed here is what works with SPINOR_OP_SE, which isn't
+ * necessarily called a "sector" by the vendor.
+ */
+ unsigned sector_size;
+ u16 n_sectors;
+
+ u16 page_size;
+ u16 addr_width;
+
+ u16 flags;
+#define SECT_4K 0x01 /* SPINOR_OP_BE_4K works uniformly */
+#define SPI_NOR_NO_ERASE 0x02 /* No erase command needed */
+#define SST_WRITE 0x04 /* use SST byte programming */
+#define SPI_NOR_NO_FR 0x08 /* Can't do fastread */
+#define SECT_4K_PMC 0x10 /* SPINOR_OP_BE_4K_PMC works uniformly */
+#define SPI_NOR_DUAL_READ 0x20 /* Flash supports Dual Read */
+#define SPI_NOR_QUAD_READ 0x40 /* Flash supports Quad Read */
+#define USE_FSR 0x80 /* use flag status register */
+};
+
+#define JEDEC_MFR(info) ((info)->id[0])
static const struct spi_device_id *spi_nor_match_id(const char *name);
@@ -98,7 +129,7 @@ static inline int spi_nor_read_dummy_cycles(struct spi_nor *nor)
case SPI_NOR_FAST:
case SPI_NOR_DUAL:
case SPI_NOR_QUAD:
- return 1;
+ return 8;
case SPI_NOR_NORMAL:
return 0;
}
@@ -138,13 +169,14 @@ static inline struct spi_nor *mtd_to_spi_nor(struct mtd_info *mtd)
}
/* Enable/disable 4-byte addressing mode. */
-static inline int set_4byte(struct spi_nor *nor, u32 jedec_id, int enable)
+static inline int set_4byte(struct spi_nor *nor, struct flash_info *info,
+ int enable)
{
int status;
bool need_wren = false;
u8 cmd;
- switch (JEDEC_MFR(jedec_id)) {
+ switch (JEDEC_MFR(info)) {
case CFI_MFR_ST: /* Micron, actually */
/* Some Micron need WREN command; all will accept it */
need_wren = true;
@@ -165,81 +197,74 @@ static inline int set_4byte(struct spi_nor *nor, u32 jedec_id, int enable)
return nor->write_reg(nor, SPINOR_OP_BRWR, nor->cmd_buf, 1, 0);
}
}
-
-static int spi_nor_wait_till_ready(struct spi_nor *nor)
+static inline int spi_nor_sr_ready(struct spi_nor *nor)
{
- unsigned long deadline;
- int sr;
-
- deadline = jiffies + MAX_READY_WAIT_JIFFIES;
-
- do {
- cond_resched();
+ int sr = read_sr(nor);
+ if (sr < 0)
+ return sr;
+ else
+ return !(sr & SR_WIP);
+}
- sr = read_sr(nor);
- if (sr < 0)
- break;
- else if (!(sr & SR_WIP))
- return 0;
- } while (!time_after_eq(jiffies, deadline));
+static inline int spi_nor_fsr_ready(struct spi_nor *nor)
+{
+ int fsr = read_fsr(nor);
+ if (fsr < 0)
+ return fsr;
+ else
+ return fsr & FSR_READY;
+}
- return -ETIMEDOUT;
+static int spi_nor_ready(struct spi_nor *nor)
+{
+ int sr, fsr;
+ sr = spi_nor_sr_ready(nor);
+ if (sr < 0)
+ return sr;
+ fsr = nor->flags & SNOR_F_USE_FSR ? spi_nor_fsr_ready(nor) : 1;
+ if (fsr < 0)
+ return fsr;
+ return sr && fsr;
}
-static int spi_nor_wait_till_fsr_ready(struct spi_nor *nor)
+/*
+ * Service routine to read status register until ready, or timeout occurs.
+ * Returns non-zero if error.
+ */
+static int spi_nor_wait_till_ready(struct spi_nor *nor)
{
unsigned long deadline;
- int sr;
- int fsr;
+ int timeout = 0, ret;
deadline = jiffies + MAX_READY_WAIT_JIFFIES;
- do {
+ while (!timeout) {
+ if (time_after_eq(jiffies, deadline))
+ timeout = 1;
+
+ ret = spi_nor_ready(nor);
+ if (ret < 0)
+ return ret;
+ if (ret)
+ return 0;
+
cond_resched();
+ }
- sr = read_sr(nor);
- if (sr < 0) {
- break;
- } else if (!(sr & SR_WIP)) {
- fsr = read_fsr(nor);
- if (fsr < 0)
- break;
- if (fsr & FSR_READY)
- return 0;
- }
- } while (!time_after_eq(jiffies, deadline));
+ dev_err(nor->dev, "flash operation timed out\n");
return -ETIMEDOUT;
}
/*
- * Service routine to read status register until ready, or timeout occurs.
- * Returns non-zero if error.
- */
-static int wait_till_ready(struct spi_nor *nor)
-{
- return nor->wait_till_ready(nor);
-}
-
-/*
* Erase the whole flash memory
*
* Returns 0 if successful, non-zero otherwise.
*/
static int erase_chip(struct spi_nor *nor)
{
- int ret;
-
dev_dbg(nor->dev, " %lldKiB\n", (long long)(nor->mtd->size >> 10));
- /* Wait until finished previous write command. */
- ret = wait_till_ready(nor);
- if (ret)
- return ret;
-
- /* Send write enable, then erase commands. */
- write_enable(nor);
-
return nor->write_reg(nor, SPINOR_OP_CHIP_ERASE, NULL, 0, 0);
}
@@ -294,11 +319,17 @@ static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
/* whole-chip erase? */
if (len == mtd->size) {
+ write_enable(nor);
+
if (erase_chip(nor)) {
ret = -EIO;
goto erase_err;
}
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ goto erase_err;
+
/* REVISIT in some cases we could speed up erasing large regions
* by using SPINOR_OP_SE instead of SPINOR_OP_BE_4K. We may have set up
* to use "small sector erase", but that's not always optimal.
@@ -307,6 +338,8 @@ static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
/* "sector"-at-a-time erase */
} else {
while (len) {
+ write_enable(nor);
+
if (nor->erase(nor, addr)) {
ret = -EIO;
goto erase_err;
@@ -314,9 +347,15 @@ static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
addr += mtd->erasesize;
len -= mtd->erasesize;
+
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ goto erase_err;
}
}
+ write_disable(nor);
+
spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_ERASE);
instr->state = MTD_ERASE_DONE;
@@ -341,11 +380,6 @@ static int spi_nor_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
if (ret)
return ret;
- /* Wait until finished previous command */
- ret = wait_till_ready(nor);
- if (ret)
- goto err;
-
status_old = read_sr(nor);
if (offset < mtd->size - (mtd->size / 2))
@@ -388,11 +422,6 @@ static int spi_nor_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
if (ret)
return ret;
- /* Wait until finished previous command */
- ret = wait_till_ready(nor);
- if (ret)
- goto err;
-
status_old = read_sr(nor);
if (offset+len > mtd->size - (mtd->size / 64))
@@ -424,38 +453,34 @@ err:
return ret;
}
-struct flash_info {
- /* JEDEC id zero means "no ID" (most older chips); otherwise it has
- * a high byte of zero plus three data bytes: the manufacturer id,
- * then a two byte device id.
- */
- u32 jedec_id;
- u16 ext_id;
-
- /* The size listed here is what works with SPINOR_OP_SE, which isn't
- * necessarily called a "sector" by the vendor.
- */
- unsigned sector_size;
- u16 n_sectors;
-
- u16 page_size;
- u16 addr_width;
-
- u16 flags;
-#define SECT_4K 0x01 /* SPINOR_OP_BE_4K works uniformly */
-#define SPI_NOR_NO_ERASE 0x02 /* No erase command needed */
-#define SST_WRITE 0x04 /* use SST byte programming */
-#define SPI_NOR_NO_FR 0x08 /* Can't do fastread */
-#define SECT_4K_PMC 0x10 /* SPINOR_OP_BE_4K_PMC works uniformly */
-#define SPI_NOR_DUAL_READ 0x20 /* Flash supports Dual Read */
-#define SPI_NOR_QUAD_READ 0x40 /* Flash supports Quad Read */
-#define USE_FSR 0x80 /* use flag status register */
-};
-
+/* Used when the "_ext_id" is two bytes at most */
#define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
((kernel_ulong_t)&(struct flash_info) { \
- .jedec_id = (_jedec_id), \
- .ext_id = (_ext_id), \
+ .id = { \
+ ((_jedec_id) >> 16) & 0xff, \
+ ((_jedec_id) >> 8) & 0xff, \
+ (_jedec_id) & 0xff, \
+ ((_ext_id) >> 8) & 0xff, \
+ (_ext_id) & 0xff, \
+ }, \
+ .id_len = (!(_jedec_id) ? 0 : (3 + ((_ext_id) ? 2 : 0))), \
+ .sector_size = (_sector_size), \
+ .n_sectors = (_n_sectors), \
+ .page_size = 256, \
+ .flags = (_flags), \
+ })
+
+#define INFO6(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
+ ((kernel_ulong_t)&(struct flash_info) { \
+ .id = { \
+ ((_jedec_id) >> 16) & 0xff, \
+ ((_jedec_id) >> 8) & 0xff, \
+ (_jedec_id) & 0xff, \
+ ((_ext_id) >> 16) & 0xff, \
+ ((_ext_id) >> 8) & 0xff, \
+ (_ext_id) & 0xff, \
+ }, \
+ .id_len = 6, \
.sector_size = (_sector_size), \
.n_sectors = (_n_sectors), \
.page_size = 256, \
@@ -507,6 +532,9 @@ static const struct spi_device_id spi_nor_ids[] = {
{ "mr25h256", CAT25_INFO( 32 * 1024, 1, 256, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
{ "mr25h10", CAT25_INFO(128 * 1024, 1, 256, 3, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
+ /* Fujitsu */
+ { "mb85rs1mt", INFO(0x047f27, 0, 128 * 1024, 1, SPI_NOR_NO_ERASE) },
+
/* GigaDevice */
{ "gd25q32", INFO(0xc84016, 0, 64 * 1024, 64, SECT_4K) },
{ "gd25q64", INFO(0xc84017, 0, 64 * 1024, 128, SECT_4K) },
@@ -532,6 +560,7 @@ static const struct spi_device_id spi_nor_ids[] = {
{ "mx66l1g55g", INFO(0xc2261b, 0, 64 * 1024, 2048, SPI_NOR_QUAD_READ) },
/* Micron */
+ { "n25q032", INFO(0x20ba16, 0, 64 * 1024, 64, 0) },
{ "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128, 0) },
{ "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, 0) },
{ "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, 0) },
@@ -556,6 +585,7 @@ static const struct spi_device_id spi_nor_ids[] = {
{ "s70fl01gs", INFO(0x010221, 0x4d00, 256 * 1024, 256, 0) },
{ "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64, 0) },
{ "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0) },
+ { "s25fl128s", INFO6(0x012018, 0x4d0180, 64 * 1024, 256, SPI_NOR_QUAD_READ) },
{ "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64, 0) },
{ "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256, 0) },
{ "s25sl004a", INFO(0x010212, 0, 64 * 1024, 8, 0) },
@@ -566,6 +596,7 @@ static const struct spi_device_id spi_nor_ids[] = {
{ "s25fl008k", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) },
{ "s25fl016k", INFO(0xef4015, 0, 64 * 1024, 32, SECT_4K) },
{ "s25fl064k", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
+ { "s25fl132k", INFO(0x014016, 0, 64 * 1024, 64, 0) },
/* SST -- large erase sizes are "overlays", "sectors" are 4K */
{ "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024, 8, SECT_4K | SST_WRITE) },
@@ -577,6 +608,7 @@ static const struct spi_device_id spi_nor_ids[] = {
{ "sst25wf010", INFO(0xbf2502, 0, 64 * 1024, 2, SECT_4K | SST_WRITE) },
{ "sst25wf020", INFO(0xbf2503, 0, 64 * 1024, 4, SECT_4K | SST_WRITE) },
{ "sst25wf040", INFO(0xbf2504, 0, 64 * 1024, 8, SECT_4K | SST_WRITE) },
+ { "sst25wf080", INFO(0xbf2505, 0, 64 * 1024, 16, SECT_4K | SST_WRITE) },
/* ST Microelectronics -- newer production may have feature updates */
{ "m25p05", INFO(0x202010, 0, 32 * 1024, 2, 0) },
@@ -588,7 +620,6 @@ static const struct spi_device_id spi_nor_ids[] = {
{ "m25p32", INFO(0x202016, 0, 64 * 1024, 64, 0) },
{ "m25p64", INFO(0x202017, 0, 64 * 1024, 128, 0) },
{ "m25p128", INFO(0x202018, 0, 256 * 1024, 64, 0) },
- { "n25q032", INFO(0x20ba16, 0, 64 * 1024, 64, 0) },
{ "m25p05-nonjedec", INFO(0, 0, 32 * 1024, 2, 0) },
{ "m25p10-nonjedec", INFO(0, 0, 32 * 1024, 4, 0) },
@@ -643,32 +674,24 @@ static const struct spi_device_id spi_nor_ids[] = {
static const struct spi_device_id *spi_nor_read_id(struct spi_nor *nor)
{
int tmp;
- u8 id[5];
- u32 jedec;
- u16 ext_jedec;
+ u8 id[SPI_NOR_MAX_ID_LEN];
struct flash_info *info;
- tmp = nor->read_reg(nor, SPINOR_OP_RDID, id, 5);
+ tmp = nor->read_reg(nor, SPINOR_OP_RDID, id, SPI_NOR_MAX_ID_LEN);
if (tmp < 0) {
dev_dbg(nor->dev, " error %d reading JEDEC ID\n", tmp);
return ERR_PTR(tmp);
}
- jedec = id[0];
- jedec = jedec << 8;
- jedec |= id[1];
- jedec = jedec << 8;
- jedec |= id[2];
-
- ext_jedec = id[3] << 8 | id[4];
for (tmp = 0; tmp < ARRAY_SIZE(spi_nor_ids) - 1; tmp++) {
info = (void *)spi_nor_ids[tmp].driver_data;
- if (info->jedec_id == jedec) {
- if (info->ext_id == 0 || info->ext_id == ext_jedec)
+ if (info->id_len) {
+ if (!memcmp(info->id, id, info->id_len))
return &spi_nor_ids[tmp];
}
}
- dev_err(nor->dev, "unrecognized JEDEC id %06x\n", jedec);
+ dev_err(nor->dev, "unrecognized JEDEC id bytes: %02x, %2x, %2x\n",
+ id[0], id[1], id[2]);
return ERR_PTR(-ENODEV);
}
@@ -703,11 +726,6 @@ static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
if (ret)
return ret;
- /* Wait until finished previous write command. */
- ret = wait_till_ready(nor);
- if (ret)
- goto time_out;
-
write_enable(nor);
nor->sst_write_second = false;
@@ -719,7 +737,7 @@ static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
/* write one byte. */
nor->write(nor, to, 1, retlen, buf);
- ret = wait_till_ready(nor);
+ ret = spi_nor_wait_till_ready(nor);
if (ret)
goto time_out;
}
@@ -731,7 +749,7 @@ static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
/* write two bytes. */
nor->write(nor, to, 2, retlen, buf + actual);
- ret = wait_till_ready(nor);
+ ret = spi_nor_wait_till_ready(nor);
if (ret)
goto time_out;
to += 2;
@@ -740,7 +758,7 @@ static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
nor->sst_write_second = false;
write_disable(nor);
- ret = wait_till_ready(nor);
+ ret = spi_nor_wait_till_ready(nor);
if (ret)
goto time_out;
@@ -751,7 +769,7 @@ static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
nor->program_opcode = SPINOR_OP_BP;
nor->write(nor, to, 1, retlen, buf + actual);
- ret = wait_till_ready(nor);
+ ret = spi_nor_wait_till_ready(nor);
if (ret)
goto time_out;
write_disable(nor);
@@ -779,11 +797,6 @@ static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
if (ret)
return ret;
- /* Wait until finished previous write command. */
- ret = wait_till_ready(nor);
- if (ret)
- goto write_err;
-
write_enable(nor);
page_offset = to & (nor->page_size - 1);
@@ -802,16 +815,20 @@ static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
if (page_size > nor->page_size)
page_size = nor->page_size;
- wait_till_ready(nor);
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ goto write_err;
+
write_enable(nor);
nor->write(nor, to + i, page_size, retlen, buf + i);
}
}
+ ret = spi_nor_wait_till_ready(nor);
write_err:
spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_WRITE);
- return 0;
+ return ret;
}
static int macronix_quad_enable(struct spi_nor *nor)
@@ -824,7 +841,7 @@ static int macronix_quad_enable(struct spi_nor *nor)
nor->cmd_buf[0] = val | SR_QUAD_EN_MX;
nor->write_reg(nor, SPINOR_OP_WRSR, nor->cmd_buf, 1, 0);
- if (wait_till_ready(nor))
+ if (spi_nor_wait_till_ready(nor))
return 1;
ret = read_sr(nor);
@@ -874,11 +891,11 @@ static int spansion_quad_enable(struct spi_nor *nor)
return 0;
}
-static int set_quad_mode(struct spi_nor *nor, u32 jedec_id)
+static int set_quad_mode(struct spi_nor *nor, struct flash_info *info)
{
int status;
- switch (JEDEC_MFR(jedec_id)) {
+ switch (JEDEC_MFR(info)) {
case CFI_MFR_MACRONIX:
status = macronix_quad_enable(nor);
if (status) {
@@ -904,11 +921,6 @@ static int spi_nor_check(struct spi_nor *nor)
return -EINVAL;
}
- if (!nor->read_id)
- nor->read_id = spi_nor_read_id;
- if (!nor->wait_till_ready)
- nor->wait_till_ready = spi_nor_wait_till_ready;
-
return 0;
}
@@ -926,16 +938,24 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode)
if (ret)
return ret;
- id = spi_nor_match_id(name);
- if (!id)
+ /* Try to auto-detect if chip name wasn't specified */
+ if (!name)
+ id = spi_nor_read_id(nor);
+ else
+ id = spi_nor_match_id(name);
+ if (IS_ERR_OR_NULL(id))
return -ENOENT;
info = (void *)id->driver_data;
- if (info->jedec_id) {
+ /*
+ * If caller has specified name of flash model that can normally be
+ * detected using JEDEC, let's verify it.
+ */
+ if (name && info->id_len) {
const struct spi_device_id *jid;
- jid = nor->read_id(nor);
+ jid = spi_nor_read_id(nor);
if (IS_ERR(jid)) {
return PTR_ERR(jid);
} else if (jid != id) {
@@ -960,9 +980,9 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode)
* up with the software protection bits set
*/
- if (JEDEC_MFR(info->jedec_id) == CFI_MFR_ATMEL ||
- JEDEC_MFR(info->jedec_id) == CFI_MFR_INTEL ||
- JEDEC_MFR(info->jedec_id) == CFI_MFR_SST) {
+ if (JEDEC_MFR(info) == CFI_MFR_ATMEL ||
+ JEDEC_MFR(info) == CFI_MFR_INTEL ||
+ JEDEC_MFR(info) == CFI_MFR_SST) {
write_enable(nor);
write_sr(nor, 0);
}
@@ -977,7 +997,7 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode)
mtd->_read = spi_nor_read;
/* nor protection support for STmicro chips */
- if (JEDEC_MFR(info->jedec_id) == CFI_MFR_ST) {
+ if (JEDEC_MFR(info) == CFI_MFR_ST) {
mtd->_lock = spi_nor_lock;
mtd->_unlock = spi_nor_unlock;
}
@@ -988,9 +1008,8 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode)
else
mtd->_write = spi_nor_write;
- if ((info->flags & USE_FSR) &&
- nor->wait_till_ready == spi_nor_wait_till_ready)
- nor->wait_till_ready = spi_nor_wait_till_fsr_ready;
+ if (info->flags & USE_FSR)
+ nor->flags |= SNOR_F_USE_FSR;
#ifdef CONFIG_MTD_SPI_NOR_USE_4K_SECTORS
/* prefer "small sector" erase if possible */
@@ -1031,7 +1050,7 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode)
/* Quad/Dual-read mode takes precedence over fast/normal */
if (mode == SPI_NOR_QUAD && info->flags & SPI_NOR_QUAD_READ) {
- ret = set_quad_mode(nor, info->jedec_id);
+ ret = set_quad_mode(nor, info);
if (ret) {
dev_err(dev, "quad mode not supported\n");
return ret;
@@ -1067,7 +1086,7 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode)
else if (mtd->size > 0x1000000) {
/* enable 4-byte addressing if the device exceeds 16MiB */
nor->addr_width = 4;
- if (JEDEC_MFR(info->jedec_id) == CFI_MFR_AMD) {
+ if (JEDEC_MFR(info) == CFI_MFR_AMD) {
/* Dedicated 4-byte command set */
switch (nor->flash_read) {
case SPI_NOR_QUAD:
@@ -1088,7 +1107,7 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode)
nor->erase_opcode = SPINOR_OP_SE_4B;
mtd->erasesize = info->sector_size;
} else
- set_4byte(nor, info->jedec_id, 1);
+ set_4byte(nor, info, 1);
} else {
nor->addr_width = 3;
}
diff --git a/drivers/mtd/tests/oobtest.c b/drivers/mtd/tests/oobtest.c
index dc4f9602b97..5e061186eab 100644
--- a/drivers/mtd/tests/oobtest.c
+++ b/drivers/mtd/tests/oobtest.c
@@ -34,8 +34,11 @@
#include "mtd_test.h"
static int dev = -EINVAL;
+static int bitflip_limit;
module_param(dev, int, S_IRUGO);
MODULE_PARM_DESC(dev, "MTD device number to use");
+module_param(bitflip_limit, int, S_IRUGO);
+MODULE_PARM_DESC(bitflip_limit, "Max. allowed bitflips per page");
static struct mtd_info *mtd;
static unsigned char *readbuf;
@@ -115,12 +118,36 @@ static int write_whole_device(void)
return 0;
}
+/*
+ * Display the address, offset and data bytes at comparison failure.
+ * Return number of bitflips encountered.
+ */
+static size_t memcmpshow(loff_t addr, const void *cs, const void *ct, size_t count)
+{
+ const unsigned char *su1, *su2;
+ int res;
+ size_t i = 0;
+ size_t bitflips = 0;
+
+ for (su1 = cs, su2 = ct; 0 < count; ++su1, ++su2, count--, i++) {
+ res = *su1 ^ *su2;
+ if (res) {
+ pr_info("error @addr[0x%lx:0x%zx] 0x%x -> 0x%x diff 0x%x\n",
+ (unsigned long)addr, i, *su1, *su2, res);
+ bitflips += hweight8(res);
+ }
+ }
+
+ return bitflips;
+}
+
static int verify_eraseblock(int ebnum)
{
int i;
struct mtd_oob_ops ops;
int err = 0;
loff_t addr = (loff_t)ebnum * mtd->erasesize;
+ size_t bitflips;
prandom_bytes_state(&rnd_state, writebuf, use_len_max * pgcnt);
for (i = 0; i < pgcnt; ++i, addr += mtd->writesize) {
@@ -139,8 +166,11 @@ static int verify_eraseblock(int ebnum)
errcnt += 1;
return err ? err : -1;
}
- if (memcmp(readbuf, writebuf + (use_len_max * i) + use_offset,
- use_len)) {
+
+ bitflips = memcmpshow(addr, readbuf,
+ writebuf + (use_len_max * i) + use_offset,
+ use_len);
+ if (bitflips > bitflip_limit) {
pr_err("error: verify failed at %#llx\n",
(long long)addr);
errcnt += 1;
@@ -148,7 +178,10 @@ static int verify_eraseblock(int ebnum)
pr_err("error: too many errors\n");
return -1;
}
+ } else if (bitflips) {
+ pr_info("ignoring error as within bitflip_limit\n");
}
+
if (use_offset != 0 || use_len < mtd->ecclayout->oobavail) {
int k;
@@ -167,9 +200,10 @@ static int verify_eraseblock(int ebnum)
errcnt += 1;
return err ? err : -1;
}
- if (memcmp(readbuf + use_offset,
- writebuf + (use_len_max * i) + use_offset,
- use_len)) {
+ bitflips = memcmpshow(addr, readbuf + use_offset,
+ writebuf + (use_len_max * i) + use_offset,
+ use_len);
+ if (bitflips > bitflip_limit) {
pr_err("error: verify failed at %#llx\n",
(long long)addr);
errcnt += 1;
@@ -177,7 +211,10 @@ static int verify_eraseblock(int ebnum)
pr_err("error: too many errors\n");
return -1;
}
+ } else if (bitflips) {
+ pr_info("ignoring error as within bitflip_limit\n");
}
+
for (k = 0; k < use_offset; ++k)
if (readbuf[k] != 0xff) {
pr_err("error: verify 0xff "
@@ -216,6 +253,9 @@ static int verify_eraseblock_in_one_go(int ebnum)
int err = 0;
loff_t addr = (loff_t)ebnum * mtd->erasesize;
size_t len = mtd->ecclayout->oobavail * pgcnt;
+ size_t oobavail = mtd->ecclayout->oobavail;
+ size_t bitflips;
+ int i;
prandom_bytes_state(&rnd_state, writebuf, len);
ops.mode = MTD_OPS_AUTO_OOB;
@@ -226,6 +266,8 @@ static int verify_eraseblock_in_one_go(int ebnum)
ops.ooboffs = 0;
ops.datbuf = NULL;
ops.oobbuf = readbuf;
+
+ /* read entire block's OOB at one go */
err = mtd_read_oob(mtd, addr, &ops);
if (err || ops.oobretlen != len) {
pr_err("error: readoob failed at %#llx\n",
@@ -233,13 +275,21 @@ static int verify_eraseblock_in_one_go(int ebnum)
errcnt += 1;
return err ? err : -1;
}
- if (memcmp(readbuf, writebuf, len)) {
- pr_err("error: verify failed at %#llx\n",
- (long long)addr);
- errcnt += 1;
- if (errcnt > 1000) {
- pr_err("error: too many errors\n");
- return -1;
+
+ /* verify one page OOB at a time for bitflip per page limit check */
+ for (i = 0; i < pgcnt; ++i, addr += mtd->writesize) {
+ bitflips = memcmpshow(addr, readbuf + (i * oobavail),
+ writebuf + (i * oobavail), oobavail);
+ if (bitflips > bitflip_limit) {
+ pr_err("error: verify failed at %#llx\n",
+ (long long)addr);
+ errcnt += 1;
+ if (errcnt > 1000) {
+ pr_err("error: too many errors\n");
+ return -1;
+ }
+ } else if (bitflips) {
+ pr_info("ignoring error as within bitflip_limit\n");
}
}
@@ -610,7 +660,8 @@ static int __init mtd_oobtest_init(void)
err = mtd_read_oob(mtd, addr, &ops);
if (err)
goto out;
- if (memcmp(readbuf, writebuf, mtd->ecclayout->oobavail * 2)) {
+ if (memcmpshow(addr, readbuf, writebuf,
+ mtd->ecclayout->oobavail * 2)) {
pr_err("error: verify failed at %#llx\n",
(long long)addr);
errcnt += 1;
diff --git a/drivers/mtd/tests/torturetest.c b/drivers/mtd/tests/torturetest.c
index eeab96973cf..b55bc52a134 100644
--- a/drivers/mtd/tests/torturetest.c
+++ b/drivers/mtd/tests/torturetest.c
@@ -264,7 +264,9 @@ static int __init tort_init(void)
int i;
void *patt;
- mtdtest_erase_good_eraseblocks(mtd, bad_ebs, eb, ebcnt);
+ err = mtdtest_erase_good_eraseblocks(mtd, bad_ebs, eb, ebcnt);
+ if (err)
+ goto out;
/* Check if the eraseblocks contain only 0xFF bytes */
if (check) {
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index 7cf8f4ac281..48e62a34f7f 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -59,7 +59,7 @@ config NET_DSA_BCM_SF2
depends on HAS_IOMEM
select NET_DSA
select NET_DSA_TAG_BRCM
- select FIXED_PHY if NET_DSA_BCM_SF2=y
+ select FIXED_PHY
select BCM7XXX_PHY
select MDIO_BCM_UNIMAC
---help---
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 888247ad906..41a3c980442 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -64,7 +64,7 @@ config BCMGENET
tristate "Broadcom GENET internal MAC support"
select MII
select PHYLIB
- select FIXED_PHY if BCMGENET=y
+ select FIXED_PHY
select BCM7XXX_PHY
help
This driver supports the built-in Ethernet MACs found in the
@@ -155,7 +155,7 @@ config SYSTEMPORT
depends on OF
select MII
select PHYLIB
- select FIXED_PHY if SYSTEMPORT=y
+ select FIXED_PHY
help
This driver supports the built-in Ethernet MACs found in the
Broadcom BCM7xxx Set Top Box family chipset using an internal
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 691f0bf09ee..9f5e38769a2 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -13256,7 +13256,7 @@ static int bnx2x_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
return -EFAULT;
}
- DP(BNX2X_MSG_PTP, "Configrued val = %d, period = %d\n", best_val,
+ DP(BNX2X_MSG_PTP, "Configured val = %d, period = %d\n", best_val,
best_period);
return 0;
@@ -14784,7 +14784,7 @@ static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr)
-EFAULT : 0;
}
-/* Configrues HW for PTP */
+/* Configures HW for PTP */
static int bnx2x_configure_ptp(struct bnx2x *bp)
{
int rc, port = BP_PORT(bp);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index b0779d77334..6fe547c93e7 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -7549,7 +7549,7 @@ Theotherbitsarereservedandshouldbezero*/
#define IGU_REG_SISR_MDPC_WOMASK_UPPER 0x05a6
#define IGU_REG_RESERVED_UPPER 0x05ff
-/* Fields of IGU PF CONFIGRATION REGISTER */
+/* Fields of IGU PF CONFIGURATION REGISTER */
#define IGU_PF_CONF_FUNC_EN (0x1<<0) /* function enable */
#define IGU_PF_CONF_MSI_MSIX_EN (0x1<<1) /* MSI/MSIX enable */
#define IGU_PF_CONF_INT_LINE_EN (0x1<<2) /* INT enable */
@@ -7557,7 +7557,7 @@ Theotherbitsarereservedandshouldbezero*/
#define IGU_PF_CONF_SINGLE_ISR_EN (0x1<<4) /* single ISR mode enable */
#define IGU_PF_CONF_SIMD_MODE (0x1<<5) /* simd all ones mode */
-/* Fields of IGU VF CONFIGRATION REGISTER */
+/* Fields of IGU VF CONFIGURATION REGISTER */
#define IGU_VF_CONF_FUNC_EN (0x1<<0) /* function enable */
#define IGU_VF_CONF_MSI_MSIX_EN (0x1<<1) /* MSI/MSIX enable */
#define IGU_VF_CONF_PARENT_MASK (0x3<<2) /* Parent PF */
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index 06dea3dd463..3767271c766 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -2160,7 +2160,7 @@ static int __init macb_probe(struct platform_device *pdev)
int err = -ENXIO;
const char *mac;
void __iomem *mem;
- unsigned int hw_q, queue_mask, q, num_queues, q_irq = 0;
+ unsigned int hw_q, queue_mask, q, num_queues;
struct clk *pclk, *hclk, *tx_clk;
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -2235,11 +2235,11 @@ static int __init macb_probe(struct platform_device *pdev)
* register mapping but we don't want to test the queue index then
* compute the corresponding register offset at run time.
*/
- for (hw_q = 0; hw_q < MACB_MAX_QUEUES; ++hw_q) {
+ for (hw_q = 0, q = 0; hw_q < MACB_MAX_QUEUES; ++hw_q) {
if (!(queue_mask & (1 << hw_q)))
continue;
- queue = &bp->queues[q_irq];
+ queue = &bp->queues[q];
queue->bp = bp;
if (hw_q) {
queue->ISR = GEM_ISR(hw_q - 1);
@@ -2261,18 +2261,18 @@ static int __init macb_probe(struct platform_device *pdev)
* must remove the optional gaps that could exist in the
* hardware queue mask.
*/
- queue->irq = platform_get_irq(pdev, q_irq);
+ queue->irq = platform_get_irq(pdev, q);
err = devm_request_irq(&pdev->dev, queue->irq, macb_interrupt,
0, dev->name, queue);
if (err) {
dev_err(&pdev->dev,
"Unable to request IRQ %d (error %d)\n",
queue->irq, err);
- goto err_out_free_irq;
+ goto err_out_free_netdev;
}
INIT_WORK(&queue->tx_error_task, macb_tx_error_task);
- q_irq++;
+ q++;
}
dev->irq = bp->queues[0].irq;
@@ -2350,7 +2350,7 @@ static int __init macb_probe(struct platform_device *pdev)
err = register_netdev(dev);
if (err) {
dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
- goto err_out_free_irq;
+ goto err_out_free_netdev;
}
err = macb_mii_init(bp);
@@ -2373,9 +2373,7 @@ static int __init macb_probe(struct platform_device *pdev)
err_out_unregister_netdev:
unregister_netdev(dev);
-err_out_free_irq:
- for (q = 0, queue = bp->queues; q < q_irq; ++q, ++queue)
- devm_free_irq(&pdev->dev, queue->irq, queue);
+err_out_free_netdev:
free_netdev(dev);
err_out_disable_clocks:
if (!IS_ERR(tx_clk))
@@ -2392,8 +2390,6 @@ static int __exit macb_remove(struct platform_device *pdev)
{
struct net_device *dev;
struct macb *bp;
- struct macb_queue *queue;
- unsigned int q;
dev = platform_get_drvdata(pdev);
@@ -2405,14 +2401,11 @@ static int __exit macb_remove(struct platform_device *pdev)
kfree(bp->mii_bus->irq);
mdiobus_free(bp->mii_bus);
unregister_netdev(dev);
- queue = bp->queues;
- for (q = 0; q < bp->num_queues; ++q, ++queue)
- devm_free_irq(&pdev->dev, queue->irq, queue);
- free_netdev(dev);
if (!IS_ERR(bp->tx_clk))
clk_disable_unprepare(bp->tx_clk);
clk_disable_unprepare(bp->hclk);
clk_disable_unprepare(bp->pclk);
+ free_netdev(dev);
}
return 0;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 28d04153f99..c132d903072 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -2376,7 +2376,7 @@ const char *t4_get_port_type_description(enum fw_port_type port_type)
"KR/KX",
"KR/KX/KX4",
"R QSFP_10G",
- "",
+ "R QSA",
"R QSFP",
"R BP40_BA",
};
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index 291b6f21970..7c0aec85137 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -2470,8 +2470,8 @@ enum fw_port_type {
FW_PORT_TYPE_BP_AP,
FW_PORT_TYPE_BP4_AP,
FW_PORT_TYPE_QSFP_10G,
- FW_PORT_TYPE_QSFP,
FW_PORT_TYPE_QSA,
+ FW_PORT_TYPE_QSFP,
FW_PORT_TYPE_BP40_BA,
FW_PORT_TYPE_NONE = FW_PORT_CMD_PTYPE_M
diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c
index b2427928eb1..d1c025fd972 100644
--- a/drivers/net/ethernet/cirrus/cs89x0.c
+++ b/drivers/net/ethernet/cirrus/cs89x0.c
@@ -60,6 +60,7 @@
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/in.h>
+#include <linux/jiffies.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/string.h>
@@ -238,13 +239,13 @@ writereg(struct net_device *dev, u16 regno, u16 value)
static int __init
wait_eeprom_ready(struct net_device *dev)
{
- int timeout = jiffies;
+ unsigned long timeout = jiffies;
/* check to see if the EEPROM is ready,
* a timeout is used just in case EEPROM is ready when
* SI_BUSY in the PP_SelfST is clear
*/
while (readreg(dev, PP_SelfST) & SI_BUSY)
- if (jiffies - timeout >= 40)
+ if (time_after_eq(jiffies, timeout + 40))
return -1;
return 0;
}
@@ -485,7 +486,7 @@ control_dc_dc(struct net_device *dev, int on_not_off)
{
struct net_local *lp = netdev_priv(dev);
unsigned int selfcontrol;
- int timenow = jiffies;
+ unsigned long timenow = jiffies;
/* control the DC to DC convertor in the SelfControl register.
* Note: This is hooked up to a general purpose pin, might not
* always be a DC to DC convertor.
@@ -499,7 +500,7 @@ control_dc_dc(struct net_device *dev, int on_not_off)
writereg(dev, PP_SelfCTL, selfcontrol);
/* Wait for the DC/DC converter to power up - 500ms */
- while (jiffies - timenow < HZ)
+ while (time_before(jiffies, timenow + HZ))
;
}
@@ -514,7 +515,7 @@ send_test_pkt(struct net_device *dev)
0, 0, /* DSAP=0 & SSAP=0 fields */
0xf3, 0 /* Control (Test Req + P bit set) */
};
- long timenow = jiffies;
+ unsigned long timenow = jiffies;
writereg(dev, PP_LineCTL, readreg(dev, PP_LineCTL) | SERIAL_TX_ON);
@@ -525,10 +526,10 @@ send_test_pkt(struct net_device *dev)
iowrite16(ETH_ZLEN, lp->virt_addr + TX_LEN_PORT);
/* Test to see if the chip has allocated memory for the packet */
- while (jiffies - timenow < 5)
+ while (time_before(jiffies, timenow + 5))
if (readreg(dev, PP_BusST) & READY_FOR_TX_NOW)
break;
- if (jiffies - timenow >= 5)
+ if (time_after_eq(jiffies, timenow + 5))
return 0; /* this shouldn't happen */
/* Write the contents of the packet */
@@ -536,7 +537,7 @@ send_test_pkt(struct net_device *dev)
cs89_dbg(1, debug, "Sending test packet ");
/* wait a couple of jiffies for packet to be received */
- for (timenow = jiffies; jiffies - timenow < 3;)
+ for (timenow = jiffies; time_before(jiffies, timenow + 3);)
;
if ((readreg(dev, PP_TxEvent) & TX_SEND_OK_BITS) == TX_OK) {
cs89_dbg(1, cont, "succeeded\n");
@@ -556,7 +557,7 @@ static int
detect_tp(struct net_device *dev)
{
struct net_local *lp = netdev_priv(dev);
- int timenow = jiffies;
+ unsigned long timenow = jiffies;
int fdx;
cs89_dbg(1, debug, "%s: Attempting TP\n", dev->name);
@@ -574,7 +575,7 @@ detect_tp(struct net_device *dev)
/* Delay for the hardware to work out if the TP cable is present
* - 150ms
*/
- for (timenow = jiffies; jiffies - timenow < 15;)
+ for (timenow = jiffies; time_before(jiffies, timenow + 15);)
;
if ((readreg(dev, PP_LineST) & LINK_OK) == 0)
return DETECTED_NONE;
@@ -618,7 +619,7 @@ detect_tp(struct net_device *dev)
if ((lp->auto_neg_cnf & AUTO_NEG_BITS) == AUTO_NEG_ENABLE) {
pr_info("%s: negotiating duplex...\n", dev->name);
while (readreg(dev, PP_AutoNegST) & AUTO_NEG_BUSY) {
- if (jiffies - timenow > 4000) {
+ if (time_after(jiffies, timenow + 4000)) {
pr_err("**** Full / half duplex auto-negotiation timed out ****\n");
break;
}
@@ -1271,7 +1272,7 @@ static void __init reset_chip(struct net_device *dev)
{
#if !defined(CONFIG_MACH_MX31ADS)
struct net_local *lp = netdev_priv(dev);
- int reset_start_time;
+ unsigned long reset_start_time;
writereg(dev, PP_SelfCTL, readreg(dev, PP_SelfCTL) | POWER_ON_RESET);
@@ -1294,7 +1295,7 @@ static void __init reset_chip(struct net_device *dev)
/* Wait until the chip is reset */
reset_start_time = jiffies;
while ((readreg(dev, PP_SelfST) & INIT_DONE) == 0 &&
- jiffies - reset_start_time < 2)
+ time_before(jiffies, reset_start_time + 2))
;
#endif /* !CONFIG_MACH_MX31ADS */
}
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 2aacd473105..196073110e3 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -3138,6 +3138,7 @@ static void be_disable_vxlan_offloads(struct be_adapter *adapter)
netdev->hw_enc_features = 0;
netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
+ netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
}
#endif
@@ -4429,6 +4430,7 @@ static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
NETIF_F_TSO | NETIF_F_TSO6 |
NETIF_F_GSO_UDP_TUNNEL;
netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
+ netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
be16_to_cpu(port));
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index ebf76c496e7..5ebdf8dc8a3 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1558,20 +1558,21 @@ fec_enet_interrupt(int irq, void *dev_id)
{
struct net_device *ndev = dev_id;
struct fec_enet_private *fep = netdev_priv(ndev);
- const unsigned napi_mask = FEC_ENET_RXF | FEC_ENET_TXF;
uint int_events;
irqreturn_t ret = IRQ_NONE;
int_events = readl(fep->hwp + FEC_IEVENT);
- writel(int_events & ~napi_mask, fep->hwp + FEC_IEVENT);
+ writel(int_events, fep->hwp + FEC_IEVENT);
fec_enet_collect_events(fep, int_events);
- if (int_events & napi_mask) {
+ if (fep->work_tx || fep->work_rx) {
ret = IRQ_HANDLED;
- /* Disable the NAPI interrupts */
- writel(FEC_ENET_MII, fep->hwp + FEC_IMASK);
- napi_schedule(&fep->napi);
+ if (napi_schedule_prep(&fep->napi)) {
+ /* Disable the NAPI interrupts */
+ writel(FEC_ENET_MII, fep->hwp + FEC_IMASK);
+ __napi_schedule(&fep->napi);
+ }
}
if (int_events & FEC_ENET_MII) {
@@ -1591,12 +1592,6 @@ static int fec_enet_rx_napi(struct napi_struct *napi, int budget)
struct fec_enet_private *fep = netdev_priv(ndev);
int pkts;
- /*
- * Clear any pending transmit or receive interrupts before
- * processing the rings to avoid racing with the hardware.
- */
- writel(FEC_ENET_RXF | FEC_ENET_TXF, fep->hwp + FEC_IEVENT);
-
pkts = fec_enet_rx(ndev, budget);
fec_enet_tx(ndev);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 0a7ea4c5f9d..a5f2660d552 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -7549,6 +7549,11 @@ static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED))
return -EOPNOTSUPP;
+ if (vid) {
+ pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name);
+ return -EINVAL;
+ }
+
/* Hardware does not support aging addresses so if a
* ndm_state is given only allow permanent addresses
*/
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 6ff214de111..190cbd931f6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -1569,8 +1569,15 @@ int mlx4_en_start_port(struct net_device *dev)
mlx4_en_free_affinity_hint(priv, i);
goto cq_err;
}
- for (j = 0; j < cq->size; j++)
- cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK;
+
+ for (j = 0; j < cq->size; j++) {
+ struct mlx4_cqe *cqe = NULL;
+
+ cqe = mlx4_en_get_cqe(cq->buf, j, priv->cqe_size) +
+ priv->cqe_factor;
+ cqe->owner_sr_opcode = MLX4_CQE_OWNER_MASK;
+ }
+
err = mlx4_en_set_cq_moder(priv, cq);
if (err) {
en_err(priv, "Failed setting cq moderation parameters\n");
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index ef3b95bac2a..982861d1df4 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -787,11 +787,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
if ((1 << (field & 0x3f)) > (PAGE_SIZE / dev_cap->bf_reg_size))
field = 3;
dev_cap->bf_regs_per_page = 1 << (field & 0x3f);
- mlx4_dbg(dev, "BlueFlame available (reg size %d, regs/page %d)\n",
- dev_cap->bf_reg_size, dev_cap->bf_regs_per_page);
} else {
dev_cap->bf_reg_size = 0;
- mlx4_dbg(dev, "BlueFlame not available\n");
}
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SG_SQ_OFFSET);
@@ -902,9 +899,6 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
goto out;
}
- mlx4_dbg(dev, "Base MM extensions: flags %08x, rsvd L_Key %08x\n",
- dev_cap->bmme_flags, dev_cap->reserved_lkey);
-
/*
* Each UAR has 4 EQ doorbells; so if a UAR is reserved, then
* we can't use any EQs whose doorbell falls on that page,
@@ -916,6 +910,21 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
else
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_SYS_EQS;
+out:
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
+
+void mlx4_dev_cap_dump(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
+{
+ if (dev_cap->bf_reg_size > 0)
+ mlx4_dbg(dev, "BlueFlame available (reg size %d, regs/page %d)\n",
+ dev_cap->bf_reg_size, dev_cap->bf_regs_per_page);
+ else
+ mlx4_dbg(dev, "BlueFlame not available\n");
+
+ mlx4_dbg(dev, "Base MM extensions: flags %08x, rsvd L_Key %08x\n",
+ dev_cap->bmme_flags, dev_cap->reserved_lkey);
mlx4_dbg(dev, "Max ICM size %lld MB\n",
(unsigned long long) dev_cap->max_icm_sz >> 20);
mlx4_dbg(dev, "Max QPs: %d, reserved QPs: %d, entry size: %d\n",
@@ -949,13 +958,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev_cap->dmfs_high_rate_qpn_base);
mlx4_dbg(dev, "DMFS high rate steer QPn range: %d\n",
dev_cap->dmfs_high_rate_qpn_range);
-
dump_dev_cap_flags(dev, dev_cap->flags);
dump_dev_cap_flags2(dev, dev_cap->flags2);
-
-out:
- mlx4_free_cmd_mailbox(dev, mailbox);
- return err;
}
int mlx4_QUERY_PORT(struct mlx4_dev *dev, int port, struct mlx4_port_cap *port_cap)
@@ -1848,8 +1852,8 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
/* CX3 is capable of extending CQEs\EQEs to strides larger than 64B */
MLX4_GET(byte_field, outbox, INIT_HCA_EQE_CQE_STRIDE_OFFSET);
if (byte_field) {
- param->dev_cap_enabled |= MLX4_DEV_CAP_64B_EQE_ENABLED;
- param->dev_cap_enabled |= MLX4_DEV_CAP_64B_CQE_ENABLED;
+ param->dev_cap_enabled |= MLX4_DEV_CAP_EQE_STRIDE_ENABLED;
+ param->dev_cap_enabled |= MLX4_DEV_CAP_CQE_STRIDE_ENABLED;
param->cqe_size = 1 << ((byte_field &
MLX4_CQE_SIZE_MASK_STRIDE) + 5);
param->eqe_size = 1 << (((byte_field &
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index 794e2826609..62562b60fa8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -224,6 +224,7 @@ struct mlx4_set_ib_param {
u32 cap_mask;
};
+void mlx4_dev_cap_dump(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap);
int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap);
int mlx4_QUERY_PORT(struct mlx4_dev *dev, int port, struct mlx4_port_cap *port_cap);
int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u8 gen_or_port,
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index e25436b24ce..943cbd47d83 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -171,9 +171,9 @@ int mlx4_check_port_params(struct mlx4_dev *dev,
{
int i;
- for (i = 0; i < dev->caps.num_ports - 1; i++) {
- if (port_type[i] != port_type[i + 1]) {
- if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
+ if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
+ for (i = 0; i < dev->caps.num_ports - 1; i++) {
+ if (port_type[i] != port_type[i + 1]) {
mlx4_err(dev, "Only same port types supported on this HCA, aborting\n");
return -EINVAL;
}
@@ -305,6 +305,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
return err;
}
+ mlx4_dev_cap_dump(dev, dev_cap);
if (dev_cap->min_page_sz > PAGE_SIZE) {
mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n",
@@ -2488,41 +2489,42 @@ static u64 mlx4_enable_sriov(struct mlx4_dev *dev, struct pci_dev *pdev,
u8 total_vfs, int existing_vfs)
{
u64 dev_flags = dev->flags;
+ int err = 0;
- dev->dev_vfs = kzalloc(
- total_vfs * sizeof(*dev->dev_vfs),
- GFP_KERNEL);
+ atomic_inc(&pf_loading);
+ if (dev->flags & MLX4_FLAG_SRIOV) {
+ if (existing_vfs != total_vfs) {
+ mlx4_err(dev, "SR-IOV was already enabled, but with num_vfs (%d) different than requested (%d)\n",
+ existing_vfs, total_vfs);
+ total_vfs = existing_vfs;
+ }
+ }
+
+ dev->dev_vfs = kzalloc(total_vfs * sizeof(*dev->dev_vfs), GFP_KERNEL);
if (NULL == dev->dev_vfs) {
mlx4_err(dev, "Failed to allocate memory for VFs\n");
goto disable_sriov;
- } else if (!(dev->flags & MLX4_FLAG_SRIOV)) {
- int err = 0;
-
- atomic_inc(&pf_loading);
- if (existing_vfs) {
- if (existing_vfs != total_vfs)
- mlx4_err(dev, "SR-IOV was already enabled, but with num_vfs (%d) different than requested (%d)\n",
- existing_vfs, total_vfs);
- } else {
- mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", total_vfs);
- err = pci_enable_sriov(pdev, total_vfs);
- }
- if (err) {
- mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d)\n",
- err);
- atomic_dec(&pf_loading);
- goto disable_sriov;
- } else {
- mlx4_warn(dev, "Running in master mode\n");
- dev_flags |= MLX4_FLAG_SRIOV |
- MLX4_FLAG_MASTER;
- dev_flags &= ~MLX4_FLAG_SLAVE;
- dev->num_vfs = total_vfs;
- }
+ }
+
+ if (!(dev->flags & MLX4_FLAG_SRIOV)) {
+ mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", total_vfs);
+ err = pci_enable_sriov(pdev, total_vfs);
+ }
+ if (err) {
+ mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d)\n",
+ err);
+ goto disable_sriov;
+ } else {
+ mlx4_warn(dev, "Running in master mode\n");
+ dev_flags |= MLX4_FLAG_SRIOV |
+ MLX4_FLAG_MASTER;
+ dev_flags &= ~MLX4_FLAG_SLAVE;
+ dev->num_vfs = total_vfs;
}
return dev_flags;
disable_sriov:
+ atomic_dec(&pf_loading);
dev->num_vfs = 0;
kfree(dev->dev_vfs);
return dev_flags & ~MLX4_FLAG_MASTER;
@@ -2606,8 +2608,10 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
}
if (total_vfs) {
- existing_vfs = pci_num_vf(pdev);
dev->flags = MLX4_FLAG_MASTER;
+ existing_vfs = pci_num_vf(pdev);
+ if (existing_vfs)
+ dev->flags |= MLX4_FLAG_SRIOV;
dev->num_vfs = total_vfs;
}
}
@@ -2643,6 +2647,7 @@ slave_start:
}
if (mlx4_is_master(dev)) {
+ /* when we hit the goto slave_start below, dev_cap already initialized */
if (!dev_cap) {
dev_cap = kzalloc(sizeof(*dev_cap), GFP_KERNEL);
@@ -2849,6 +2854,7 @@ slave_start:
if (mlx4_is_master(dev) && dev->num_vfs)
atomic_dec(&pf_loading);
+ kfree(dev_cap);
return 0;
err_port:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index ab684463780..da82991239a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -157,6 +157,8 @@ static const char *eqe_type_str(u8 type)
return "MLX5_EVENT_TYPE_CMD";
case MLX5_EVENT_TYPE_PAGE_REQUEST:
return "MLX5_EVENT_TYPE_PAGE_REQUEST";
+ case MLX5_EVENT_TYPE_PAGE_FAULT:
+ return "MLX5_EVENT_TYPE_PAGE_FAULT";
default:
return "Unrecognized event";
}
@@ -279,6 +281,11 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
}
break;
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+ case MLX5_EVENT_TYPE_PAGE_FAULT:
+ mlx5_eq_pagefault(dev, eqe);
+ break;
+#endif
default:
mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n",
@@ -446,8 +453,12 @@ void mlx5_eq_cleanup(struct mlx5_core_dev *dev)
int mlx5_start_eqs(struct mlx5_core_dev *dev)
{
struct mlx5_eq_table *table = &dev->priv.eq_table;
+ u32 async_event_mask = MLX5_ASYNC_EVENT_MASK;
int err;
+ if (dev->caps.gen.flags & MLX5_DEV_CAP_FLAG_ON_DMND_PG)
+ async_event_mask |= (1ull << MLX5_EVENT_TYPE_PAGE_FAULT);
+
err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD,
MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD,
"mlx5_cmd_eq", &dev->priv.uuari.uars[0]);
@@ -459,7 +470,7 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev)
mlx5_cmd_use_events(dev);
err = mlx5_create_map_eq(dev, &table->async_eq, MLX5_EQ_VEC_ASYNC,
- MLX5_NUM_ASYNC_EQE, MLX5_ASYNC_EVENT_MASK,
+ MLX5_NUM_ASYNC_EQE, async_event_mask,
"mlx5_async_eq", &dev->priv.uuari.uars[0]);
if (err) {
mlx5_core_warn(dev, "failed to create async EQ %d\n", err);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index 087c4c797de..06f9036acd8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -69,6 +69,46 @@ int mlx5_cmd_query_hca_cap(struct mlx5_core_dev *dev, struct mlx5_caps *caps)
return mlx5_core_get_caps(dev, caps, HCA_CAP_OPMOD_GET_CUR);
}
+int mlx5_query_odp_caps(struct mlx5_core_dev *dev, struct mlx5_odp_caps *caps)
+{
+ u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)];
+ int out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ void *out;
+ int err;
+
+ if (!(dev->caps.gen.flags & MLX5_DEV_CAP_FLAG_ON_DMND_PG))
+ return -ENOTSUPP;
+
+ memset(in, 0, sizeof(in));
+ out = kzalloc(out_sz, GFP_KERNEL);
+ if (!out)
+ return -ENOMEM;
+ MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
+ MLX5_SET(query_hca_cap_in, in, op_mod, HCA_CAP_OPMOD_GET_ODP_CUR);
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
+ if (err)
+ goto out;
+
+ err = mlx5_cmd_status_to_err_v2(out);
+ if (err) {
+ mlx5_core_warn(dev, "query cur hca ODP caps failed, %d\n", err);
+ goto out;
+ }
+
+ memcpy(caps, MLX5_ADDR_OF(query_hca_cap_out, out, capability_struct),
+ sizeof(*caps));
+
+ mlx5_core_dbg(dev, "on-demand paging capabilities:\nrc: %08x\nuc: %08x\nud: %08x\n",
+ be32_to_cpu(caps->per_transport_caps.rc_odp_caps),
+ be32_to_cpu(caps->per_transport_caps.uc_odp_caps),
+ be32_to_cpu(caps->per_transport_caps.ud_odp_caps));
+
+out:
+ kfree(out);
+ return err;
+}
+EXPORT_SYMBOL(mlx5_query_odp_caps);
+
int mlx5_cmd_init_hca(struct mlx5_core_dev *dev)
{
struct mlx5_cmd_init_hca_mbox_in in;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
index 5261a2b0da4..575d853dbe0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
@@ -88,6 +88,95 @@ void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type)
mlx5_core_put_rsc(common);
}
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+void mlx5_eq_pagefault(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe)
+{
+ struct mlx5_eqe_page_fault *pf_eqe = &eqe->data.page_fault;
+ int qpn = be32_to_cpu(pf_eqe->flags_qpn) & MLX5_QPN_MASK;
+ struct mlx5_core_rsc_common *common = mlx5_get_rsc(dev, qpn);
+ struct mlx5_core_qp *qp =
+ container_of(common, struct mlx5_core_qp, common);
+ struct mlx5_pagefault pfault;
+
+ if (!qp) {
+ mlx5_core_warn(dev, "ODP event for non-existent QP %06x\n",
+ qpn);
+ return;
+ }
+
+ pfault.event_subtype = eqe->sub_type;
+ pfault.flags = (be32_to_cpu(pf_eqe->flags_qpn) >> MLX5_QPN_BITS) &
+ (MLX5_PFAULT_REQUESTOR | MLX5_PFAULT_WRITE | MLX5_PFAULT_RDMA);
+ pfault.bytes_committed = be32_to_cpu(
+ pf_eqe->bytes_committed);
+
+ mlx5_core_dbg(dev,
+ "PAGE_FAULT: subtype: 0x%02x, flags: 0x%02x,\n",
+ eqe->sub_type, pfault.flags);
+
+ switch (eqe->sub_type) {
+ case MLX5_PFAULT_SUBTYPE_RDMA:
+ /* RDMA based event */
+ pfault.rdma.r_key =
+ be32_to_cpu(pf_eqe->rdma.r_key);
+ pfault.rdma.packet_size =
+ be16_to_cpu(pf_eqe->rdma.packet_length);
+ pfault.rdma.rdma_op_len =
+ be32_to_cpu(pf_eqe->rdma.rdma_op_len);
+ pfault.rdma.rdma_va =
+ be64_to_cpu(pf_eqe->rdma.rdma_va);
+ mlx5_core_dbg(dev,
+ "PAGE_FAULT: qpn: 0x%06x, r_key: 0x%08x,\n",
+ qpn, pfault.rdma.r_key);
+ mlx5_core_dbg(dev,
+ "PAGE_FAULT: rdma_op_len: 0x%08x,\n",
+ pfault.rdma.rdma_op_len);
+ mlx5_core_dbg(dev,
+ "PAGE_FAULT: rdma_va: 0x%016llx,\n",
+ pfault.rdma.rdma_va);
+ mlx5_core_dbg(dev,
+ "PAGE_FAULT: bytes_committed: 0x%06x\n",
+ pfault.bytes_committed);
+ break;
+
+ case MLX5_PFAULT_SUBTYPE_WQE:
+ /* WQE based event */
+ pfault.wqe.wqe_index =
+ be16_to_cpu(pf_eqe->wqe.wqe_index);
+ pfault.wqe.packet_size =
+ be16_to_cpu(pf_eqe->wqe.packet_length);
+ mlx5_core_dbg(dev,
+ "PAGE_FAULT: qpn: 0x%06x, wqe_index: 0x%04x,\n",
+ qpn, pfault.wqe.wqe_index);
+ mlx5_core_dbg(dev,
+ "PAGE_FAULT: bytes_committed: 0x%06x\n",
+ pfault.bytes_committed);
+ break;
+
+ default:
+ mlx5_core_warn(dev,
+ "Unsupported page fault event sub-type: 0x%02hhx, QP %06x\n",
+ eqe->sub_type, qpn);
+ /* Unsupported page faults should still be resolved by the
+ * page fault handler
+ */
+ }
+
+ if (qp->pfault_handler) {
+ qp->pfault_handler(qp, &pfault);
+ } else {
+ mlx5_core_err(dev,
+ "ODP event for QP %08x, without a fault handler in QP\n",
+ qpn);
+ /* Page fault will remain unresolved. QP will hang until it is
+ * destroyed
+ */
+ }
+
+ mlx5_core_put_rsc(common);
+}
+#endif
+
int mlx5_core_create_qp(struct mlx5_core_dev *dev,
struct mlx5_core_qp *qp,
struct mlx5_create_qp_mbox_in *in,
@@ -322,3 +411,33 @@ int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn)
return err;
}
EXPORT_SYMBOL_GPL(mlx5_core_xrcd_dealloc);
+
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 qpn,
+ u8 flags, int error)
+{
+ struct mlx5_page_fault_resume_mbox_in in;
+ struct mlx5_page_fault_resume_mbox_out out;
+ int err;
+
+ memset(&in, 0, sizeof(in));
+ memset(&out, 0, sizeof(out));
+ in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_PAGE_FAULT_RESUME);
+ in.hdr.opmod = 0;
+ flags &= (MLX5_PAGE_FAULT_RESUME_REQUESTOR |
+ MLX5_PAGE_FAULT_RESUME_WRITE |
+ MLX5_PAGE_FAULT_RESUME_RDMA);
+ flags |= (error ? MLX5_PAGE_FAULT_RESUME_ERROR : 0);
+ in.flags_qpn = cpu_to_be32((qpn & MLX5_QPN_MASK) |
+ (flags << MLX5_QPN_BITS));
+ err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
+ if (err)
+ return err;
+
+ if (out.hdr.status)
+ err = mlx5_cmd_status_to_err(&out.hdr);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_core_page_fault_resume);
+#endif
diff --git a/drivers/net/ethernet/smsc/Kconfig b/drivers/net/ethernet/smsc/Kconfig
index 627926800ff..9468e64e600 100644
--- a/drivers/net/ethernet/smsc/Kconfig
+++ b/drivers/net/ethernet/smsc/Kconfig
@@ -39,7 +39,7 @@ config SMC91X
select CRC32
select MII
depends on (ARM || M32R || SUPERH || MIPS || BLACKFIN || \
- MN10300 || COLDFIRE || ARM64 || XTENSA || NIOS2)
+ MN10300 || COLDFIRE || ARM64 || XTENSA || NIOS2) && (!OF || GPIOLIB)
---help---
This is a driver for SMC's 91x series of Ethernet chipsets,
including the SMC91C94 and the SMC91C111. Say Y if you want it
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
index 0e137751e76..056b358b4a7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
@@ -309,16 +309,16 @@ static int sti_dwmac_parse_data(struct sti_dwmac *dwmac,
if (IS_PHY_IF_MODE_GBIT(dwmac->interface)) {
const char *rs;
- dwmac->tx_retime_src = TX_RETIME_SRC_CLKGEN;
err = of_property_read_string(np, "st,tx-retime-src", &rs);
- if (err < 0)
+ if (err < 0) {
dev_warn(dev, "Use internal clock source\n");
-
- if (!strcasecmp(rs, "clk_125"))
+ dwmac->tx_retime_src = TX_RETIME_SRC_CLKGEN;
+ } else if (!strcasecmp(rs, "clk_125")) {
dwmac->tx_retime_src = TX_RETIME_SRC_CLK_125;
- else if (!strcasecmp(rs, "txclk"))
+ } else if (!strcasecmp(rs, "txclk")) {
dwmac->tx_retime_src = TX_RETIME_SRC_TXCLK;
+ }
dwmac->speed = SPEED_1000;
}
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 60f7ee5fafb..7df221788cd 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -46,16 +46,18 @@ struct macvtap_queue {
struct list_head next;
};
-#define MACVTAP_FEATURES (IFF_VNET_HDR | IFF_VNET_LE | IFF_MULTI_QUEUE)
+#define MACVTAP_FEATURES (IFF_VNET_HDR | IFF_MULTI_QUEUE)
+
+#define MACVTAP_VNET_LE 0x80000000
static inline u16 macvtap16_to_cpu(struct macvtap_queue *q, __virtio16 val)
{
- return __virtio16_to_cpu(q->flags & IFF_VNET_LE, val);
+ return __virtio16_to_cpu(q->flags & MACVTAP_VNET_LE, val);
}
static inline __virtio16 cpu_to_macvtap16(struct macvtap_queue *q, u16 val)
{
- return __cpu_to_virtio16(q->flags & IFF_VNET_LE, val);
+ return __cpu_to_virtio16(q->flags & MACVTAP_VNET_LE, val);
}
static struct proto macvtap_proto = {
@@ -999,7 +1001,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
void __user *argp = (void __user *)arg;
struct ifreq __user *ifr = argp;
unsigned int __user *up = argp;
- unsigned int u;
+ unsigned short u;
int __user *sp = argp;
int s;
int ret;
@@ -1014,7 +1016,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
if ((u & ~MACVTAP_FEATURES) != (IFF_NO_PI | IFF_TAP))
ret = -EINVAL;
else
- q->flags = u;
+ q->flags = (q->flags & ~MACVTAP_FEATURES) | u;
return ret;
@@ -1027,8 +1029,9 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
}
ret = 0;
+ u = q->flags;
if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
- put_user(q->flags, &ifr->ifr_flags))
+ put_user(u, &ifr->ifr_flags))
ret = -EFAULT;
macvtap_put_vlan(vlan);
rtnl_unlock();
@@ -1069,6 +1072,21 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
q->vnet_hdr_sz = s;
return 0;
+ case TUNGETVNETLE:
+ s = !!(q->flags & MACVTAP_VNET_LE);
+ if (put_user(s, sp))
+ return -EFAULT;
+ return 0;
+
+ case TUNSETVNETLE:
+ if (get_user(s, sp))
+ return -EFAULT;
+ if (s)
+ q->flags |= MACVTAP_VNET_LE;
+ else
+ q->flags &= ~MACVTAP_VNET_LE;
+ return 0;
+
case TUNSETOFFLOAD:
/* let the user check for future flags */
if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 |
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index b4b0f804e84..a3c251b79f3 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -119,8 +119,8 @@ config MICREL_PHY
Supports the KSZ9021, VSC8201, KS8001 PHYs.
config FIXED_PHY
- bool "Driver for MDIO Bus/PHY emulation with fixed speed/link PHYs"
- depends on PHYLIB=y
+ tristate "Driver for MDIO Bus/PHY emulation with fixed speed/link PHYs"
+ depends on PHYLIB
---help---
Adds the platform "fixed" MDIO Bus to cover the boards that use
PHYs that are not connected to the real MDIO bus.
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index eb3b18b5978..501ea7699a2 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -17,7 +17,7 @@ obj-$(CONFIG_BCM87XX_PHY) += bcm87xx.o
obj-$(CONFIG_ICPLUS_PHY) += icplus.o
obj-$(CONFIG_REALTEK_PHY) += realtek.o
obj-$(CONFIG_LSI_ET1011C_PHY) += et1011c.o
-obj-$(CONFIG_FIXED_PHY) += fixed.o
+obj-$(CONFIG_FIXED_PHY) += fixed_phy.o
obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o
obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o
obj-$(CONFIG_NATIONAL_PHY) += national.o
diff --git a/drivers/net/phy/fixed.c b/drivers/net/phy/fixed_phy.c
index 3ad0e6e16c3..3ad0e6e16c3 100644
--- a/drivers/net/phy/fixed.c
+++ b/drivers/net/phy/fixed_phy.c
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index a5cbf67517f..8c8dc16839a 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -110,9 +110,11 @@ do { \
* overload it to mean fasync when stored there.
*/
#define TUN_FASYNC IFF_ATTACH_QUEUE
+/* High bits in flags field are unused. */
+#define TUN_VNET_LE 0x80000000
#define TUN_FEATURES (IFF_NO_PI | IFF_ONE_QUEUE | IFF_VNET_HDR | \
- IFF_VNET_LE | IFF_MULTI_QUEUE)
+ IFF_MULTI_QUEUE)
#define GOODCOPY_LEN 128
#define FLT_EXACT_COUNT 8
@@ -208,12 +210,12 @@ struct tun_struct {
static inline u16 tun16_to_cpu(struct tun_struct *tun, __virtio16 val)
{
- return __virtio16_to_cpu(tun->flags & IFF_VNET_LE, val);
+ return __virtio16_to_cpu(tun->flags & TUN_VNET_LE, val);
}
static inline __virtio16 cpu_to_tun16(struct tun_struct *tun, u16 val)
{
- return __cpu_to_virtio16(tun->flags & IFF_VNET_LE, val);
+ return __cpu_to_virtio16(tun->flags & TUN_VNET_LE, val);
}
static inline u32 tun_hashfn(u32 rxhash)
@@ -1843,6 +1845,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
int sndbuf;
int vnet_hdr_sz;
unsigned int ifindex;
+ int le;
int ret;
if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
@@ -2042,6 +2045,23 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
tun->vnet_hdr_sz = vnet_hdr_sz;
break;
+ case TUNGETVNETLE:
+ le = !!(tun->flags & TUN_VNET_LE);
+ if (put_user(le, (int __user *)argp))
+ ret = -EFAULT;
+ break;
+
+ case TUNSETVNETLE:
+ if (get_user(le, (int __user *)argp)) {
+ ret = -EFAULT;
+ break;
+ }
+ if (le)
+ tun->flags |= TUN_VNET_LE;
+ else
+ tun->flags &= ~TUN_VNET_LE;
+ break;
+
case TUNATTACHFILTER:
/* Can be set only for TAPs */
ret = -EINVAL;
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c
index a104d7ac379..eb8584a9c49 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c
@@ -316,7 +316,7 @@ static const u16 xmtfifo_sz[][NFIFO] = {
static const char * const fifo_names[] = {
"AC_BK", "AC_BE", "AC_VI", "AC_VO", "BCMC", "ATIM" };
#else
-static const char fifo_names[6][0];
+static const char fifo_names[6][1];
#endif
#ifdef DEBUG
diff --git a/drivers/net/wireless/hostap/hostap_cs.c b/drivers/net/wireless/hostap/hostap_cs.c
index b6ec51923b2..50033aa7c7d 100644
--- a/drivers/net/wireless/hostap/hostap_cs.c
+++ b/drivers/net/wireless/hostap/hostap_cs.c
@@ -381,18 +381,15 @@ static void prism2_pccard_genesis_reset(local_info_t *local, int hcr)
res = pcmcia_read_config_byte(hw_priv->link, CISREG_COR, &old_cor);
if (res != 0) {
- printk(KERN_DEBUG "prism2_pccard_genesis_sreset failed 1 "
- "(%d)\n", res);
+ printk(KERN_DEBUG "%s failed 1 (%d)\n", __func__, res);
return;
}
- printk(KERN_DEBUG "prism2_pccard_genesis_sreset: original COR %02x\n",
- old_cor);
+ printk(KERN_DEBUG "%s: original COR %02x\n", __func__, old_cor);
res = pcmcia_write_config_byte(hw_priv->link, CISREG_COR,
old_cor | COR_SOFT_RESET);
if (res != 0) {
- printk(KERN_DEBUG "prism2_pccard_genesis_sreset failed 2 "
- "(%d)\n", res);
+ printk(KERN_DEBUG "%s failed 2 (%d)\n", __func__, res);
return;
}
@@ -401,8 +398,7 @@ static void prism2_pccard_genesis_reset(local_info_t *local, int hcr)
/* Setup Genesis mode */
res = pcmcia_write_config_byte(hw_priv->link, CISREG_CCSR, hcr);
if (res != 0) {
- printk(KERN_DEBUG "prism2_pccard_genesis_sreset failed 3 "
- "(%d)\n", res);
+ printk(KERN_DEBUG "%s failed 3 (%d)\n", __func__, res);
return;
}
mdelay(10);
@@ -410,8 +406,7 @@ static void prism2_pccard_genesis_reset(local_info_t *local, int hcr)
res = pcmcia_write_config_byte(hw_priv->link, CISREG_COR,
old_cor & ~COR_SOFT_RESET);
if (res != 0) {
- printk(KERN_DEBUG "prism2_pccard_genesis_sreset failed 4 "
- "(%d)\n", res);
+ printk(KERN_DEBUG "%s failed 4 (%d)\n", __func__, res);
return;
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
index d2ec5160bbf..5c646d5f7bb 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
@@ -955,6 +955,7 @@ int rtl92ce_hw_init(struct ieee80211_hw *hw)
local_save_flags(flags);
local_irq_enable();
+ rtlhal->fw_ready = false;
rtlpriv->intf_ops->disable_aspm(hw);
rtstatus = _rtl92ce_init_mac(hw);
if (!rtstatus) {
@@ -971,6 +972,7 @@ int rtl92ce_hw_init(struct ieee80211_hw *hw)
goto exit;
}
+ rtlhal->fw_ready = true;
rtlhal->last_hmeboxnum = 0;
rtl92c_phy_mac_config(hw);
/* because last function modify RCR, so we update
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
index 873363acbac..551321728ae 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
@@ -1592,7 +1592,7 @@ void rtl92cu_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
}
}
-bool usb_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb)
+static bool usb_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb)
{
/* Currently nothing happens here.
* Traffic stops after some seconds in WPA2 802.11n mode.
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/dm.c b/drivers/net/wireless/rtlwifi/rtl8821ae/dm.c
index 9be10610992..ba30b0d250f 100644
--- a/drivers/net/wireless/rtlwifi/rtl8821ae/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8821ae/dm.c
@@ -2078,8 +2078,7 @@ void rtl8821ae_dm_txpwr_track_set_pwr(struct ieee80211_hw *hw,
if (rtldm->tx_rate != 0xFF)
tx_rate = rtl8821ae_hw_rate_to_mrate(hw, rtldm->tx_rate);
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "===>rtl8812ae_dm_txpwr_track_set_pwr\n");
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "===>%s\n", __func__);
if (tx_rate != 0xFF) { /* Mimic Modify High Rate BBSwing Limit.*/
/*CCK*/
@@ -2128,7 +2127,7 @@ void rtl8821ae_dm_txpwr_track_set_pwr(struct ieee80211_hw *hw,
if (method == BBSWING) {
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "===>rtl8812ae_dm_txpwr_track_set_pwr\n");
+ "===>%s\n", __func__);
if (rf_path == RF90_PATH_A) {
final_swing_idx[RF90_PATH_A] =
(rtldm->ofdm_index[RF90_PATH_A] >
@@ -2260,7 +2259,8 @@ void rtl8821ae_dm_txpower_tracking_callback_thermalmeter(
rtldm->txpower_trackinginit = true;
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "===>rtl8812ae_dm_txpower_tracking_callback_thermalmeter,\n pDM_Odm->BbSwingIdxCckBase: %d,pDM_Odm->BbSwingIdxOfdmBase[A]:%d, pDM_Odm->DefaultOfdmIndex: %d\n",
+ "===>%s,\n pDM_Odm->BbSwingIdxCckBase: %d,pDM_Odm->BbSwingIdxOfdmBase[A]:%d, pDM_Odm->DefaultOfdmIndex: %d\n",
+ __func__,
rtldm->swing_idx_cck_base,
rtldm->swing_idx_ofdm_base[RF90_PATH_A],
rtldm->default_ofdm_index);
@@ -2539,8 +2539,7 @@ void rtl8821ae_dm_txpower_tracking_callback_thermalmeter(
}
}
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "<===rtl8812ae_dm_txpower_tracking_callback_thermalmeter\n");
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "<===%s\n", __func__);
}
void rtl8821ae_dm_check_txpower_tracking_thermalmeter(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.c b/drivers/net/wireless/zd1211rw/zd_chip.c
index 73a49b86803..07b94eda960 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.c
+++ b/drivers/net/wireless/zd1211rw/zd_chip.c
@@ -129,7 +129,7 @@ int zd_ioread32v_locked(struct zd_chip *chip, u32 *values, const zd_addr_t *addr
r = zd_ioread16v_locked(chip, v16, a16, count16);
if (r) {
dev_dbg_f(zd_chip_dev(chip),
- "error: zd_ioread16v_locked. Error number %d\n", r);
+ "error: %s. Error number %d\n", __func__, r);
return r;
}
@@ -256,8 +256,8 @@ int zd_iowrite32a_locked(struct zd_chip *chip,
if (r) {
zd_usb_iowrite16v_async_end(&chip->usb, 0);
dev_dbg_f(zd_chip_dev(chip),
- "error _zd_iowrite32v_locked."
- " Error number %d\n", r);
+ "error _%s. Error number %d\n", __func__,
+ r);
return r;
}
}
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 083ecc93fe5..5f1fda44882 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -230,6 +230,8 @@ struct xenvif {
*/
bool disabled;
unsigned long status;
+ unsigned long drain_timeout;
+ unsigned long stall_timeout;
/* Queues */
struct xenvif_queue *queues;
@@ -328,7 +330,7 @@ irqreturn_t xenvif_interrupt(int irq, void *dev_id);
extern bool separate_tx_rx_irq;
extern unsigned int rx_drain_timeout_msecs;
-extern unsigned int rx_drain_timeout_jiffies;
+extern unsigned int rx_stall_timeout_msecs;
extern unsigned int xenvif_max_queues;
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index a6a32d337bb..9259a732e8a 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -166,7 +166,7 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
goto drop;
cb = XENVIF_RX_CB(skb);
- cb->expires = jiffies + rx_drain_timeout_jiffies;
+ cb->expires = jiffies + vif->drain_timeout;
xenvif_rx_queue_tail(queue, skb);
xenvif_kick_thread(queue);
@@ -414,6 +414,8 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
vif->ip_csum = 1;
vif->dev = dev;
vif->disabled = false;
+ vif->drain_timeout = msecs_to_jiffies(rx_drain_timeout_msecs);
+ vif->stall_timeout = msecs_to_jiffies(rx_stall_timeout_msecs);
/* Start out with no queues. */
vif->queues = NULL;
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 4a509f715fe..908e65e9b82 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -60,14 +60,12 @@ module_param(separate_tx_rx_irq, bool, 0644);
*/
unsigned int rx_drain_timeout_msecs = 10000;
module_param(rx_drain_timeout_msecs, uint, 0444);
-unsigned int rx_drain_timeout_jiffies;
/* The length of time before the frontend is considered unresponsive
* because it isn't providing Rx slots.
*/
-static unsigned int rx_stall_timeout_msecs = 60000;
+unsigned int rx_stall_timeout_msecs = 60000;
module_param(rx_stall_timeout_msecs, uint, 0444);
-static unsigned int rx_stall_timeout_jiffies;
unsigned int xenvif_max_queues;
module_param_named(max_queues, xenvif_max_queues, uint, 0644);
@@ -2020,7 +2018,7 @@ static bool xenvif_rx_queue_stalled(struct xenvif_queue *queue)
return !queue->stalled
&& prod - cons < XEN_NETBK_RX_SLOTS_MAX
&& time_after(jiffies,
- queue->last_rx_time + rx_stall_timeout_jiffies);
+ queue->last_rx_time + queue->vif->stall_timeout);
}
static bool xenvif_rx_queue_ready(struct xenvif_queue *queue)
@@ -2038,8 +2036,9 @@ static bool xenvif_have_rx_work(struct xenvif_queue *queue)
{
return (!skb_queue_empty(&queue->rx_queue)
&& xenvif_rx_ring_slots_available(queue, XEN_NETBK_RX_SLOTS_MAX))
- || xenvif_rx_queue_stalled(queue)
- || xenvif_rx_queue_ready(queue)
+ || (queue->vif->stall_timeout &&
+ (xenvif_rx_queue_stalled(queue)
+ || xenvif_rx_queue_ready(queue)))
|| kthread_should_stop()
|| queue->vif->disabled;
}
@@ -2092,6 +2091,9 @@ int xenvif_kthread_guest_rx(void *data)
struct xenvif_queue *queue = data;
struct xenvif *vif = queue->vif;
+ if (!vif->stall_timeout)
+ xenvif_queue_carrier_on(queue);
+
for (;;) {
xenvif_wait_for_rx_work(queue);
@@ -2118,10 +2120,12 @@ int xenvif_kthread_guest_rx(void *data)
* while it's probably not responsive, drop the
* carrier so packets are dropped earlier.
*/
- if (xenvif_rx_queue_stalled(queue))
- xenvif_queue_carrier_off(queue);
- else if (xenvif_rx_queue_ready(queue))
- xenvif_queue_carrier_on(queue);
+ if (vif->stall_timeout) {
+ if (xenvif_rx_queue_stalled(queue))
+ xenvif_queue_carrier_off(queue);
+ else if (xenvif_rx_queue_ready(queue))
+ xenvif_queue_carrier_on(queue);
+ }
/* Queued packets may have foreign pages from other
* domains. These cannot be queued indefinitely as
@@ -2192,9 +2196,6 @@ static int __init netback_init(void)
if (rc)
goto failed_init;
- rx_drain_timeout_jiffies = msecs_to_jiffies(rx_drain_timeout_msecs);
- rx_stall_timeout_jiffies = msecs_to_jiffies(rx_stall_timeout_msecs);
-
#ifdef CONFIG_DEBUG_FS
xen_netback_dbg_root = debugfs_create_dir("xen-netback", NULL);
if (IS_ERR_OR_NULL(xen_netback_dbg_root))
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index d44cd19169b..efbaf2ae199 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -887,9 +887,15 @@ static int read_xenbus_vif_flags(struct backend_info *be)
return -EOPNOTSUPP;
if (xenbus_scanf(XBT_NIL, dev->otherend,
- "feature-rx-notify", "%d", &val) < 0 || val == 0) {
- xenbus_dev_fatal(dev, -EINVAL, "feature-rx-notify is mandatory");
- return -EINVAL;
+ "feature-rx-notify", "%d", &val) < 0)
+ val = 0;
+ if (!val) {
+ /* - Reduce drain timeout to poll more frequently for
+ * Rx requests.
+ * - Disable Rx stall detection.
+ */
+ be->vif->drain_timeout = msecs_to_jiffies(30);
+ be->vif->stall_timeout = 0;
}
if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-sg",
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 2f0a9ce9ff7..22bcb4e12e2 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -977,7 +977,6 @@ static int xennet_poll(struct napi_struct *napi, int budget)
struct sk_buff_head rxq;
struct sk_buff_head errq;
struct sk_buff_head tmpq;
- unsigned long flags;
int err;
spin_lock(&queue->rx_lock);
@@ -1050,15 +1049,11 @@ err:
if (work_done < budget) {
int more_to_do = 0;
- napi_gro_flush(napi, false);
-
- local_irq_save(flags);
+ napi_complete(napi);
RING_FINAL_CHECK_FOR_RESPONSES(&queue->rx, more_to_do);
- if (!more_to_do)
- __napi_complete(napi);
-
- local_irq_restore(flags);
+ if (more_to_do)
+ napi_schedule(napi);
}
spin_unlock(&queue->rx_lock);
diff --git a/drivers/nfc/trf7970a.c b/drivers/nfc/trf7970a.c
index d2ccd289064..aa6a333b2ea 100644
--- a/drivers/nfc/trf7970a.c
+++ b/drivers/nfc/trf7970a.c
@@ -2154,7 +2154,7 @@ static int trf7970a_resume(struct device *dev)
}
#endif
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
static int trf7970a_pm_runtime_suspend(struct device *dev)
{
struct spi_device *spi = container_of(dev, struct spi_device, dev);
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index cd87a36495b..5b33c6a2180 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -19,6 +19,7 @@
#include <linux/slab.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
+#include <linux/of_iommu.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
@@ -164,6 +165,9 @@ static void of_dma_configure(struct device *dev)
{
u64 dma_addr, paddr, size;
int ret;
+ bool coherent;
+ unsigned long offset;
+ struct iommu_ops *iommu;
/*
* Set default dma-mask to 32 bit. Drivers are expected to setup
@@ -178,28 +182,30 @@ static void of_dma_configure(struct device *dev)
if (!dev->dma_mask)
dev->dma_mask = &dev->coherent_dma_mask;
- /*
- * if dma-coherent property exist, call arch hook to setup
- * dma coherent operations.
- */
- if (of_dma_is_coherent(dev->of_node)) {
- set_arch_dma_coherent_ops(dev);
- dev_dbg(dev, "device is dma coherent\n");
- }
-
- /*
- * if dma-ranges property doesn't exist - just return else
- * setup the dma offset
- */
ret = of_dma_get_range(dev->of_node, &dma_addr, &paddr, &size);
if (ret < 0) {
- dev_dbg(dev, "no dma range information to setup\n");
- return;
+ dma_addr = offset = 0;
+ size = dev->coherent_dma_mask;
+ } else {
+ offset = PFN_DOWN(paddr - dma_addr);
+ dev_dbg(dev, "dma_pfn_offset(%#08lx)\n", dev->dma_pfn_offset);
}
+ dev->dma_pfn_offset = offset;
+
+ coherent = of_dma_is_coherent(dev->of_node);
+ dev_dbg(dev, "device is%sdma coherent\n",
+ coherent ? " " : " not ");
+
+ iommu = of_iommu_configure(dev);
+ dev_dbg(dev, "device is%sbehind an iommu\n",
+ iommu ? " " : " not ");
- /* DMA ranges found. Calculate and set dma_pfn_offset */
- dev->dma_pfn_offset = PFN_DOWN(paddr - dma_addr);
- dev_dbg(dev, "dma_pfn_offset(%#08lx)\n", dev->dma_pfn_offset);
+ arch_setup_dma_ops(dev, dma_addr, size, iommu, coherent);
+}
+
+static void of_dma_deconfigure(struct device *dev)
+{
+ arch_teardown_dma_ops(dev);
}
/**
@@ -228,16 +234,12 @@ static struct platform_device *of_platform_device_create_pdata(
if (!dev)
goto err_clear_flag;
- of_dma_configure(&dev->dev);
dev->dev.bus = &platform_bus_type;
dev->dev.platform_data = platform_data;
-
- /* We do not fill the DMA ops for platform devices by default.
- * This is currently the responsibility of the platform code
- * to do such, possibly using a device notifier
- */
+ of_dma_configure(&dev->dev);
if (of_device_add(dev) != 0) {
+ of_dma_deconfigure(&dev->dev);
platform_device_put(dev);
goto err_clear_flag;
}
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index cced84233ac..7a8f1c5e65a 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -67,7 +67,7 @@ config XEN_PCIDEV_FRONTEND
config HT_IRQ
bool "Interrupts on hypertransport devices"
default y
- depends on PCI && X86_LOCAL_APIC && X86_IO_APIC
+ depends on PCI && X86_LOCAL_APIC
help
This allows native hypertransport devices to use interrupts.
@@ -110,13 +110,6 @@ config PCI_PASID
If unsure, say N.
-config PCI_IOAPIC
- bool "PCI IO-APIC hotplug support" if X86
- depends on PCI
- depends on ACPI
- depends on X86_IO_APIC
- default !X86
-
config PCI_LABEL
def_bool y if (DMI || ACPI)
select NLS
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index e04fe2d9df3..73e4af400a5 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -13,8 +13,6 @@ obj-$(CONFIG_PCI_QUIRKS) += quirks.o
# Build PCI Express stuff if needed
obj-$(CONFIG_PCIEPORTBUS) += pcie/
-obj-$(CONFIG_PCI_IOAPIC) += ioapic.o
-
# Build the PCI Hotplug drivers if we were asked to
obj-$(CONFIG_HOTPLUG_PCI) += hotplug/
ifdef CONFIG_HOTPLUG_PCI
diff --git a/drivers/pci/hotplug/ibmphp_core.c b/drivers/pci/hotplug/ibmphp_core.c
index 3efaf4c3852..96c5c729cdb 100644
--- a/drivers/pci/hotplug/ibmphp_core.c
+++ b/drivers/pci/hotplug/ibmphp_core.c
@@ -36,6 +36,7 @@
#include <linux/wait.h>
#include "../pci.h"
#include <asm/pci_x86.h> /* for struct irq_routing_table */
+#include <asm/io_apic.h>
#include "ibmphp.h"
#define attn_on(sl) ibmphp_hpc_writeslot (sl, HPC_SLOT_ATTNON)
@@ -155,13 +156,10 @@ int ibmphp_init_devno(struct slot **cur_slot)
for (loop = 0; loop < len; loop++) {
if ((*cur_slot)->number == rtable->slots[loop].slot &&
(*cur_slot)->bus == rtable->slots[loop].bus) {
- struct io_apic_irq_attr irq_attr;
-
(*cur_slot)->device = PCI_SLOT(rtable->slots[loop].devfn);
for (i = 0; i < 4; i++)
(*cur_slot)->irq[i] = IO_APIC_get_PCI_irq_vector((int) (*cur_slot)->bus,
- (int) (*cur_slot)->device, i,
- &irq_attr);
+ (int) (*cur_slot)->device, i);
debug("(*cur_slot)->irq[0] = %x\n",
(*cur_slot)->irq[0]);
diff --git a/drivers/pci/ioapic.c b/drivers/pci/ioapic.c
deleted file mode 100644
index f6219d36227..00000000000
--- a/drivers/pci/ioapic.c
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * IOAPIC/IOxAPIC/IOSAPIC driver
- *
- * Copyright (C) 2009 Fujitsu Limited.
- * (c) Copyright 2009 Hewlett-Packard Development Company, L.P.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-/*
- * This driver manages PCI I/O APICs added by hotplug after boot. We try to
- * claim all I/O APIC PCI devices, but those present at boot were registered
- * when we parsed the ACPI MADT, so we'll fail when we try to re-register
- * them.
- */
-
-#include <linux/pci.h>
-#include <linux/module.h>
-#include <linux/acpi.h>
-#include <linux/slab.h>
-
-struct ioapic {
- acpi_handle handle;
- u32 gsi_base;
-};
-
-static int ioapic_probe(struct pci_dev *dev, const struct pci_device_id *ent)
-{
- acpi_handle handle;
- acpi_status status;
- unsigned long long gsb;
- struct ioapic *ioapic;
- int ret;
- char *type;
- struct resource *res;
-
- handle = ACPI_HANDLE(&dev->dev);
- if (!handle)
- return -EINVAL;
-
- status = acpi_evaluate_integer(handle, "_GSB", NULL, &gsb);
- if (ACPI_FAILURE(status))
- return -EINVAL;
-
- /*
- * The previous code in acpiphp evaluated _MAT if _GSB failed, but
- * ACPI spec 4.0 sec 6.2.2 requires _GSB for hot-pluggable I/O APICs.
- */
-
- ioapic = kzalloc(sizeof(*ioapic), GFP_KERNEL);
- if (!ioapic)
- return -ENOMEM;
-
- ioapic->handle = handle;
- ioapic->gsi_base = (u32) gsb;
-
- if (dev->class == PCI_CLASS_SYSTEM_PIC_IOAPIC)
- type = "IOAPIC";
- else
- type = "IOxAPIC";
-
- ret = pci_enable_device(dev);
- if (ret < 0)
- goto exit_free;
-
- pci_set_master(dev);
-
- if (pci_request_region(dev, 0, type))
- goto exit_disable;
-
- res = &dev->resource[0];
- if (acpi_register_ioapic(ioapic->handle, res->start, ioapic->gsi_base))
- goto exit_release;
-
- pci_set_drvdata(dev, ioapic);
- dev_info(&dev->dev, "%s at %pR, GSI %u\n", type, res, ioapic->gsi_base);
- return 0;
-
-exit_release:
- pci_release_region(dev, 0);
-exit_disable:
- pci_disable_device(dev);
-exit_free:
- kfree(ioapic);
- return -ENODEV;
-}
-
-static void ioapic_remove(struct pci_dev *dev)
-{
- struct ioapic *ioapic = pci_get_drvdata(dev);
-
- acpi_unregister_ioapic(ioapic->handle, ioapic->gsi_base);
- pci_release_region(dev, 0);
- pci_disable_device(dev);
- kfree(ioapic);
-}
-
-
-static const struct pci_device_id ioapic_devices[] = {
- { PCI_DEVICE_CLASS(PCI_CLASS_SYSTEM_PIC_IOAPIC, ~0) },
- { PCI_DEVICE_CLASS(PCI_CLASS_SYSTEM_PIC_IOXAPIC, ~0) },
- { }
-};
-MODULE_DEVICE_TABLE(pci, ioapic_devices);
-
-static struct pci_driver ioapic_driver = {
- .name = "ioapic",
- .id_table = ioapic_devices,
- .probe = ioapic_probe,
- .remove = ioapic_remove,
-};
-
-static int __init ioapic_init(void)
-{
- return pci_register_driver(&ioapic_driver);
-}
-module_init(ioapic_init);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/phy/phy-omap-usb2.c b/drivers/phy/phy-omap-usb2.c
index 4e489a8850e..6f4aef3db24 100644
--- a/drivers/phy/phy-omap-usb2.c
+++ b/drivers/phy/phy-omap-usb2.c
@@ -318,7 +318,7 @@ static int omap_usb2_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
static int omap_usb2_runtime_suspend(struct device *dev)
{
diff --git a/drivers/phy/phy-ti-pipe3.c b/drivers/phy/phy-ti-pipe3.c
index c297b7a10d3..1387b4d4afe 100644
--- a/drivers/phy/phy-ti-pipe3.c
+++ b/drivers/phy/phy-ti-pipe3.c
@@ -423,7 +423,7 @@ static int ti_pipe3_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
static int ti_pipe3_runtime_suspend(struct device *dev)
{
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index a2eabe6ff9a..638e797037d 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -38,7 +38,8 @@ config ACER_WMI
config ACERHDF
tristate "Acer Aspire One temperature and fan driver"
- depends on THERMAL && ACPI
+ depends on ACPI && THERMAL
+ select THERMAL_GOV_BANG_BANG
---help---
This is a driver for Acer Aspire One netbooks. It allows to access
the temperature sensor and to control the fan.
@@ -128,10 +129,10 @@ config DELL_WMI_AIO
be called dell-wmi-aio.
config DELL_SMO8800
- tristate "Dell Latitude freefall driver (ACPI SMO8800/SMO8810)"
+ tristate "Dell Latitude freefall driver (ACPI SMO88XX)"
depends on ACPI
---help---
- Say Y here if you want to support SMO8800/SMO8810 freefall device
+ Say Y here if you want to support SMO88XX freefall devices
on Dell Latitude laptops.
To compile this driver as a module, choose M here: the module will
diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c
index aaf37c5f12f..594c918b553 100644
--- a/drivers/platform/x86/acerhdf.c
+++ b/drivers/platform/x86/acerhdf.c
@@ -50,7 +50,7 @@
*/
#undef START_IN_KERNEL_MODE
-#define DRV_VER "0.5.26"
+#define DRV_VER "0.7.0"
/*
* According to the Atom N270 datasheet,
@@ -119,116 +119,152 @@ struct fancmd {
u8 cmd_auto;
};
+struct manualcmd {
+ u8 mreg;
+ u8 moff;
+};
+
+/* default register and command to disable fan in manual mode */
+static const struct manualcmd mcmd = {
+ .mreg = 0x94,
+ .moff = 0xff,
+};
+
/* BIOS settings */
-struct bios_settings_t {
+struct bios_settings {
const char *vendor;
const char *product;
const char *version;
- unsigned char fanreg;
- unsigned char tempreg;
+ u8 fanreg;
+ u8 tempreg;
struct fancmd cmd;
+ int mcmd_enable;
};
/* Register addresses and values for different BIOS versions */
-static const struct bios_settings_t bios_tbl[] = {
+static const struct bios_settings bios_tbl[] = {
/* AOA110 */
- {"Acer", "AOA110", "v0.3109", 0x55, 0x58, {0x1f, 0x00} },
- {"Acer", "AOA110", "v0.3114", 0x55, 0x58, {0x1f, 0x00} },
- {"Acer", "AOA110", "v0.3301", 0x55, 0x58, {0xaf, 0x00} },
- {"Acer", "AOA110", "v0.3304", 0x55, 0x58, {0xaf, 0x00} },
- {"Acer", "AOA110", "v0.3305", 0x55, 0x58, {0xaf, 0x00} },
- {"Acer", "AOA110", "v0.3307", 0x55, 0x58, {0xaf, 0x00} },
- {"Acer", "AOA110", "v0.3308", 0x55, 0x58, {0x21, 0x00} },
- {"Acer", "AOA110", "v0.3309", 0x55, 0x58, {0x21, 0x00} },
- {"Acer", "AOA110", "v0.3310", 0x55, 0x58, {0x21, 0x00} },
+ {"Acer", "AOA110", "v0.3109", 0x55, 0x58, {0x1f, 0x00}, 0},
+ {"Acer", "AOA110", "v0.3114", 0x55, 0x58, {0x1f, 0x00}, 0},
+ {"Acer", "AOA110", "v0.3301", 0x55, 0x58, {0xaf, 0x00}, 0},
+ {"Acer", "AOA110", "v0.3304", 0x55, 0x58, {0xaf, 0x00}, 0},
+ {"Acer", "AOA110", "v0.3305", 0x55, 0x58, {0xaf, 0x00}, 0},
+ {"Acer", "AOA110", "v0.3307", 0x55, 0x58, {0xaf, 0x00}, 0},
+ {"Acer", "AOA110", "v0.3308", 0x55, 0x58, {0x21, 0x00}, 0},
+ {"Acer", "AOA110", "v0.3309", 0x55, 0x58, {0x21, 0x00}, 0},
+ {"Acer", "AOA110", "v0.3310", 0x55, 0x58, {0x21, 0x00}, 0},
/* AOA150 */
- {"Acer", "AOA150", "v0.3114", 0x55, 0x58, {0x1f, 0x00} },
- {"Acer", "AOA150", "v0.3301", 0x55, 0x58, {0x20, 0x00} },
- {"Acer", "AOA150", "v0.3304", 0x55, 0x58, {0x20, 0x00} },
- {"Acer", "AOA150", "v0.3305", 0x55, 0x58, {0x20, 0x00} },
- {"Acer", "AOA150", "v0.3307", 0x55, 0x58, {0x20, 0x00} },
- {"Acer", "AOA150", "v0.3308", 0x55, 0x58, {0x20, 0x00} },
- {"Acer", "AOA150", "v0.3309", 0x55, 0x58, {0x20, 0x00} },
- {"Acer", "AOA150", "v0.3310", 0x55, 0x58, {0x20, 0x00} },
+ {"Acer", "AOA150", "v0.3114", 0x55, 0x58, {0x1f, 0x00}, 0},
+ {"Acer", "AOA150", "v0.3301", 0x55, 0x58, {0x20, 0x00}, 0},
+ {"Acer", "AOA150", "v0.3304", 0x55, 0x58, {0x20, 0x00}, 0},
+ {"Acer", "AOA150", "v0.3305", 0x55, 0x58, {0x20, 0x00}, 0},
+ {"Acer", "AOA150", "v0.3307", 0x55, 0x58, {0x20, 0x00}, 0},
+ {"Acer", "AOA150", "v0.3308", 0x55, 0x58, {0x20, 0x00}, 0},
+ {"Acer", "AOA150", "v0.3309", 0x55, 0x58, {0x20, 0x00}, 0},
+ {"Acer", "AOA150", "v0.3310", 0x55, 0x58, {0x20, 0x00}, 0},
/* LT1005u */
- {"Acer", "LT-10Q", "v0.3310", 0x55, 0x58, {0x20, 0x00} },
+ {"Acer", "LT-10Q", "v0.3310", 0x55, 0x58, {0x20, 0x00}, 0},
/* Acer 1410 */
- {"Acer", "Aspire 1410", "v0.3108", 0x55, 0x58, {0x9e, 0x00} },
- {"Acer", "Aspire 1410", "v0.3113", 0x55, 0x58, {0x9e, 0x00} },
- {"Acer", "Aspire 1410", "v0.3115", 0x55, 0x58, {0x9e, 0x00} },
- {"Acer", "Aspire 1410", "v0.3117", 0x55, 0x58, {0x9e, 0x00} },
- {"Acer", "Aspire 1410", "v0.3119", 0x55, 0x58, {0x9e, 0x00} },
- {"Acer", "Aspire 1410", "v0.3120", 0x55, 0x58, {0x9e, 0x00} },
- {"Acer", "Aspire 1410", "v1.3204", 0x55, 0x58, {0x9e, 0x00} },
- {"Acer", "Aspire 1410", "v1.3303", 0x55, 0x58, {0x9e, 0x00} },
- {"Acer", "Aspire 1410", "v1.3308", 0x55, 0x58, {0x9e, 0x00} },
- {"Acer", "Aspire 1410", "v1.3310", 0x55, 0x58, {0x9e, 0x00} },
- {"Acer", "Aspire 1410", "v1.3314", 0x55, 0x58, {0x9e, 0x00} },
+ {"Acer", "Aspire 1410", "v0.3108", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1410", "v0.3113", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1410", "v0.3115", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1410", "v0.3117", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1410", "v0.3119", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1410", "v0.3120", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1410", "v1.3204", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1410", "v1.3303", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1410", "v1.3308", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1410", "v1.3310", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1410", "v1.3314", 0x55, 0x58, {0x9e, 0x00}, 0},
/* Acer 1810xx */
- {"Acer", "Aspire 1810TZ", "v0.3108", 0x55, 0x58, {0x9e, 0x00} },
- {"Acer", "Aspire 1810T", "v0.3108", 0x55, 0x58, {0x9e, 0x00} },
- {"Acer", "Aspire 1810TZ", "v0.3113", 0x55, 0x58, {0x9e, 0x00} },
- {"Acer", "Aspire 1810T", "v0.3113", 0x55, 0x58, {0x9e, 0x00} },
- {"Acer", "Aspire 1810TZ", "v0.3115", 0x55, 0x58, {0x9e, 0x00} },
- {"Acer", "Aspire 1810T", "v0.3115", 0x55, 0x58, {0x9e, 0x00} },
- {"Acer", "Aspire 1810TZ", "v0.3117", 0x55, 0x58, {0x9e, 0x00} },
- {"Acer", "Aspire 1810T", "v0.3117", 0x55, 0x58, {0x9e, 0x00} },
- {"Acer", "Aspire 1810TZ", "v0.3119", 0x55, 0x58, {0x9e, 0x00} },
- {"Acer", "Aspire 1810T", "v0.3119", 0x55, 0x58, {0x9e, 0x00} },
- {"Acer", "Aspire 1810TZ", "v0.3120", 0x55, 0x58, {0x9e, 0x00} },
- {"Acer", "Aspire 1810T", "v0.3120", 0x55, 0x58, {0x9e, 0x00} },
- {"Acer", "Aspire 1810TZ", "v1.3204", 0x55, 0x58, {0x9e, 0x00} },
- {"Acer", "Aspire 1810T", "v1.3204", 0x55, 0x58, {0x9e, 0x00} },
- {"Acer", "Aspire 1810TZ", "v1.3303", 0x55, 0x58, {0x9e, 0x00} },
- {"Acer", "Aspire 1810T", "v1.3303", 0x55, 0x58, {0x9e, 0x00} },
- {"Acer", "Aspire 1810TZ", "v1.3308", 0x55, 0x58, {0x9e, 0x00} },
- {"Acer", "Aspire 1810T", "v1.3308", 0x55, 0x58, {0x9e, 0x00} },
- {"Acer", "Aspire 1810TZ", "v1.3310", 0x55, 0x58, {0x9e, 0x00} },
- {"Acer", "Aspire 1810T", "v1.3310", 0x55, 0x58, {0x9e, 0x00} },
- {"Acer", "Aspire 1810TZ", "v1.3314", 0x55, 0x58, {0x9e, 0x00} },
- {"Acer", "Aspire 1810T", "v1.3314", 0x55, 0x58, {0x9e, 0x00} },
+ {"Acer", "Aspire 1810TZ", "v0.3108", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1810T", "v0.3108", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1810TZ", "v0.3113", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1810T", "v0.3113", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1810TZ", "v0.3115", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1810T", "v0.3115", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1810TZ", "v0.3117", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1810T", "v0.3117", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1810TZ", "v0.3119", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1810T", "v0.3119", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1810TZ", "v0.3120", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1810T", "v0.3120", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1810TZ", "v1.3204", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1810T", "v1.3204", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1810TZ", "v1.3303", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1810T", "v1.3303", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1810TZ", "v1.3308", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1810T", "v1.3308", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1810TZ", "v1.3310", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1810T", "v1.3310", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1810TZ", "v1.3314", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1810T", "v1.3314", 0x55, 0x58, {0x9e, 0x00}, 0},
+ /* Acer 5755G */
+ {"Acer", "Aspire 5755G", "V1.20", 0xab, 0xb4, {0x00, 0x08}, 0},
+ {"Acer", "Aspire 5755G", "V1.21", 0xab, 0xb3, {0x00, 0x08}, 0},
+ /* Acer 521 */
+ {"Acer", "AO521", "V1.11", 0x55, 0x58, {0x1f, 0x00}, 0},
/* Acer 531 */
- {"Acer", "AO531h", "v0.3104", 0x55, 0x58, {0x20, 0x00} },
- {"Acer", "AO531h", "v0.3201", 0x55, 0x58, {0x20, 0x00} },
- {"Acer", "AO531h", "v0.3304", 0x55, 0x58, {0x20, 0x00} },
+ {"Acer", "AO531h", "v0.3104", 0x55, 0x58, {0x20, 0x00}, 0},
+ {"Acer", "AO531h", "v0.3201", 0x55, 0x58, {0x20, 0x00}, 0},
+ {"Acer", "AO531h", "v0.3304", 0x55, 0x58, {0x20, 0x00}, 0},
/* Acer 751 */
- {"Acer", "AO751h", "V0.3212", 0x55, 0x58, {0x21, 0x00} },
+ {"Acer", "AO751h", "V0.3206", 0x55, 0x58, {0x21, 0x00}, 0},
+ {"Acer", "AO751h", "V0.3212", 0x55, 0x58, {0x21, 0x00}, 0},
+ /* Acer 753 */
+ {"Acer", "Aspire One 753", "V1.24", 0x93, 0xac, {0x14, 0x04}, 1},
/* Acer 1825 */
- {"Acer", "Aspire 1825PTZ", "V1.3118", 0x55, 0x58, {0x9e, 0x00} },
- {"Acer", "Aspire 1825PTZ", "V1.3127", 0x55, 0x58, {0x9e, 0x00} },
+ {"Acer", "Aspire 1825PTZ", "V1.3118", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1825PTZ", "V1.3127", 0x55, 0x58, {0x9e, 0x00}, 0},
+ /* Acer Extensa 5420 */
+ {"Acer", "Extensa 5420", "V1.17", 0x93, 0xac, {0x14, 0x04}, 1},
+ /* Acer Aspire 5315 */
+ {"Acer", "Aspire 5315", "V1.19", 0x93, 0xac, {0x14, 0x04}, 1},
+ /* Acer Aspire 5739 */
+ {"Acer", "Aspire 5739G", "V1.3311", 0x55, 0x58, {0x20, 0x00}, 0},
/* Acer TravelMate 7730 */
- {"Acer", "TravelMate 7730G", "v0.3509", 0x55, 0x58, {0xaf, 0x00} },
+ {"Acer", "TravelMate 7730G", "v0.3509", 0x55, 0x58, {0xaf, 0x00}, 0},
+ /* Acer TravelMate TM8573T */
+ {"Acer", "TM8573T", "V1.13", 0x93, 0xa8, {0x14, 0x04}, 1},
/* Gateway */
- {"Gateway", "AOA110", "v0.3103", 0x55, 0x58, {0x21, 0x00} },
- {"Gateway", "AOA150", "v0.3103", 0x55, 0x58, {0x20, 0x00} },
- {"Gateway", "LT31", "v1.3103", 0x55, 0x58, {0x9e, 0x00} },
- {"Gateway", "LT31", "v1.3201", 0x55, 0x58, {0x9e, 0x00} },
- {"Gateway", "LT31", "v1.3302", 0x55, 0x58, {0x9e, 0x00} },
- {"Gateway", "LT31", "v1.3303t", 0x55, 0x58, {0x9e, 0x00} },
+ {"Gateway", "AOA110", "v0.3103", 0x55, 0x58, {0x21, 0x00}, 0},
+ {"Gateway", "AOA150", "v0.3103", 0x55, 0x58, {0x20, 0x00}, 0},
+ {"Gateway", "LT31", "v1.3103", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Gateway", "LT31", "v1.3201", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Gateway", "LT31", "v1.3302", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Gateway", "LT31", "v1.3303t", 0x55, 0x58, {0x9e, 0x00}, 0},
/* Packard Bell */
- {"Packard Bell", "DOA150", "v0.3104", 0x55, 0x58, {0x21, 0x00} },
- {"Packard Bell", "DOA150", "v0.3105", 0x55, 0x58, {0x20, 0x00} },
- {"Packard Bell", "AOA110", "v0.3105", 0x55, 0x58, {0x21, 0x00} },
- {"Packard Bell", "AOA150", "v0.3105", 0x55, 0x58, {0x20, 0x00} },
- {"Packard Bell", "ENBFT", "V1.3118", 0x55, 0x58, {0x9e, 0x00} },
- {"Packard Bell", "ENBFT", "V1.3127", 0x55, 0x58, {0x9e, 0x00} },
- {"Packard Bell", "DOTMU", "v1.3303", 0x55, 0x58, {0x9e, 0x00} },
- {"Packard Bell", "DOTMU", "v0.3120", 0x55, 0x58, {0x9e, 0x00} },
- {"Packard Bell", "DOTMU", "v0.3108", 0x55, 0x58, {0x9e, 0x00} },
- {"Packard Bell", "DOTMU", "v0.3113", 0x55, 0x58, {0x9e, 0x00} },
- {"Packard Bell", "DOTMU", "v0.3115", 0x55, 0x58, {0x9e, 0x00} },
- {"Packard Bell", "DOTMU", "v0.3117", 0x55, 0x58, {0x9e, 0x00} },
- {"Packard Bell", "DOTMU", "v0.3119", 0x55, 0x58, {0x9e, 0x00} },
- {"Packard Bell", "DOTMU", "v1.3204", 0x55, 0x58, {0x9e, 0x00} },
- {"Packard Bell", "DOTMA", "v1.3201", 0x55, 0x58, {0x9e, 0x00} },
- {"Packard Bell", "DOTMA", "v1.3302", 0x55, 0x58, {0x9e, 0x00} },
- {"Packard Bell", "DOTMA", "v1.3303t", 0x55, 0x58, {0x9e, 0x00} },
- {"Packard Bell", "DOTVR46", "v1.3308", 0x55, 0x58, {0x9e, 0x00} },
+ {"Packard Bell", "DOA150", "v0.3104", 0x55, 0x58, {0x21, 0x00}, 0},
+ {"Packard Bell", "DOA150", "v0.3105", 0x55, 0x58, {0x20, 0x00}, 0},
+ {"Packard Bell", "AOA110", "v0.3105", 0x55, 0x58, {0x21, 0x00}, 0},
+ {"Packard Bell", "AOA150", "v0.3105", 0x55, 0x58, {0x20, 0x00}, 0},
+ {"Packard Bell", "ENBFT", "V1.3118", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Packard Bell", "ENBFT", "V1.3127", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Packard Bell", "DOTMU", "v1.3303", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Packard Bell", "DOTMU", "v0.3120", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Packard Bell", "DOTMU", "v0.3108", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Packard Bell", "DOTMU", "v0.3113", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Packard Bell", "DOTMU", "v0.3115", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Packard Bell", "DOTMU", "v0.3117", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Packard Bell", "DOTMU", "v0.3119", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Packard Bell", "DOTMU", "v1.3204", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Packard Bell", "DOTMA", "v1.3201", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Packard Bell", "DOTMA", "v1.3302", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Packard Bell", "DOTMA", "v1.3303t", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Packard Bell", "DOTVR46", "v1.3308", 0x55, 0x58, {0x9e, 0x00}, 0},
/* pewpew-terminator */
- {"", "", "", 0, 0, {0, 0} }
+ {"", "", "", 0, 0, {0, 0}, 0}
};
-static const struct bios_settings_t *bios_cfg __read_mostly;
+static const struct bios_settings *bios_cfg __read_mostly;
+
+/*
+ * this struct is used to instruct thermal layer to use bang_bang instead of
+ * default governor for acerhdf
+ */
+static struct thermal_zone_params acerhdf_zone_params = {
+ .governor_name = "bang_bang",
+};
static int acerhdf_get_temp(int *temp)
{
@@ -275,6 +311,12 @@ static void acerhdf_change_fanstate(int state)
fanstate = state;
ec_write(bios_cfg->fanreg, cmd);
+
+ if (bios_cfg->mcmd_enable && state == ACERHDF_FAN_OFF) {
+ if (verbose)
+ pr_notice("turning off fan manually\n");
+ ec_write(mcmd.mreg, mcmd.moff);
+ }
}
static void acerhdf_check_param(struct thermal_zone_device *thermal)
@@ -401,6 +443,21 @@ static int acerhdf_get_trip_type(struct thermal_zone_device *thermal, int trip,
{
if (trip == 0)
*type = THERMAL_TRIP_ACTIVE;
+ else if (trip == 1)
+ *type = THERMAL_TRIP_CRITICAL;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+static int acerhdf_get_trip_hyst(struct thermal_zone_device *thermal, int trip,
+ unsigned long *temp)
+{
+ if (trip != 0)
+ return -EINVAL;
+
+ *temp = fanon - fanoff;
return 0;
}
@@ -410,6 +467,10 @@ static int acerhdf_get_trip_temp(struct thermal_zone_device *thermal, int trip,
{
if (trip == 0)
*temp = fanon;
+ else if (trip == 1)
+ *temp = ACERHDF_TEMP_CRIT;
+ else
+ return -EINVAL;
return 0;
}
@@ -429,6 +490,7 @@ static struct thermal_zone_device_ops acerhdf_dev_ops = {
.get_mode = acerhdf_get_mode,
.set_mode = acerhdf_set_mode,
.get_trip_type = acerhdf_get_trip_type,
+ .get_trip_hyst = acerhdf_get_trip_hyst,
.get_trip_temp = acerhdf_get_trip_temp,
.get_crit_temp = acerhdf_get_crit_temp,
};
@@ -481,9 +543,7 @@ static int acerhdf_set_cur_state(struct thermal_cooling_device *cdev,
}
if (state == 0) {
- /* turn fan off only if below fanoff temperature */
- if ((cur_state == ACERHDF_FAN_AUTO) &&
- (cur_temp < fanoff))
+ if (cur_state == ACERHDF_FAN_AUTO)
acerhdf_change_fanstate(ACERHDF_FAN_OFF);
} else {
if (cur_state == ACERHDF_FAN_OFF)
@@ -558,7 +618,7 @@ static int str_starts_with(const char *str, const char *start)
static int acerhdf_check_hardware(void)
{
char const *vendor, *version, *product;
- const struct bios_settings_t *bt = NULL;
+ const struct bios_settings *bt = NULL;
/* get BIOS data */
vendor = dmi_get_system_info(DMI_SYS_VENDOR);
@@ -660,12 +720,20 @@ static int acerhdf_register_thermal(void)
if (IS_ERR(cl_dev))
return -EINVAL;
- thz_dev = thermal_zone_device_register("acerhdf", 1, 0, NULL,
- &acerhdf_dev_ops, NULL, 0,
+ thz_dev = thermal_zone_device_register("acerhdf", 2, 0, NULL,
+ &acerhdf_dev_ops,
+ &acerhdf_zone_params, 0,
(kernelmode) ? interval*1000 : 0);
if (IS_ERR(thz_dev))
return -EINVAL;
+ if (strcmp(thz_dev->governor->name,
+ acerhdf_zone_params.governor_name)) {
+ pr_err("Didn't get thermal governor %s, perhaps not compiled into thermal subsystem.\n",
+ acerhdf_zone_params.governor_name);
+ return -EINVAL;
+ }
+
return 0;
}
@@ -722,9 +790,15 @@ MODULE_ALIAS("dmi:*:*Acer*:pnAOA*:");
MODULE_ALIAS("dmi:*:*Acer*:pnAO751h*:");
MODULE_ALIAS("dmi:*:*Acer*:pnAspire*1410*:");
MODULE_ALIAS("dmi:*:*Acer*:pnAspire*1810*:");
+MODULE_ALIAS("dmi:*:*Acer*:pnAspire*5755G:");
MODULE_ALIAS("dmi:*:*Acer*:pnAspire*1825PTZ:");
+MODULE_ALIAS("dmi:*:*Acer*:pnAO521*:");
MODULE_ALIAS("dmi:*:*Acer*:pnAO531*:");
+MODULE_ALIAS("dmi:*:*Acer*:pnAspire*5739G:");
+MODULE_ALIAS("dmi:*:*Acer*:pnAspire*One*753:");
+MODULE_ALIAS("dmi:*:*Acer*:pnAspire*5315:");
MODULE_ALIAS("dmi:*:*Acer*:TravelMate*7730G:");
+MODULE_ALIAS("dmi:*:*Acer*:TM8573T:");
MODULE_ALIAS("dmi:*:*Gateway*:pnAOA*:");
MODULE_ALIAS("dmi:*:*Gateway*:pnLT31*:");
MODULE_ALIAS("dmi:*:*Packard*Bell*:pnAOA*:");
@@ -733,6 +807,7 @@ MODULE_ALIAS("dmi:*:*Packard*Bell*:pnDOTMU*:");
MODULE_ALIAS("dmi:*:*Packard*Bell*:pnENBFT*:");
MODULE_ALIAS("dmi:*:*Packard*Bell*:pnDOTMA*:");
MODULE_ALIAS("dmi:*:*Packard*Bell*:pnDOTVR46*:");
+MODULE_ALIAS("dmi:*:*Acer*:pnExtensa 5420*:");
module_init(acerhdf_init);
module_exit(acerhdf_exit);
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index 05647f1a427..f71700e0d13 100644
--- a/drivers/platform/x86/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
@@ -843,8 +843,7 @@ static int asus_backlight_init(struct asus_laptop *asus)
static void asus_backlight_exit(struct asus_laptop *asus)
{
- if (asus->backlight_device)
- backlight_device_unregister(asus->backlight_device);
+ backlight_device_unregister(asus->backlight_device);
asus->backlight_device = NULL;
}
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
index c1a6cd66af4..abdaed34c72 100644
--- a/drivers/platform/x86/asus-nb-wmi.c
+++ b/drivers/platform/x86/asus-nb-wmi.c
@@ -191,6 +191,15 @@ static const struct dmi_system_id asus_quirks[] = {
},
{
.callback = dmi_matched,
+ .ident = "ASUSTeK COMPUTER INC. X551CA",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X551CA"),
+ },
+ .driver_data = &quirk_asus_wapf4,
+ },
+ {
+ .callback = dmi_matched,
.ident = "ASUSTeK COMPUTER INC. X55A",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index 21fc932da3a..7543a56e0f4 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -1308,8 +1308,7 @@ static int asus_wmi_backlight_init(struct asus_wmi *asus)
static void asus_wmi_backlight_exit(struct asus_wmi *asus)
{
- if (asus->backlight_device)
- backlight_device_unregister(asus->backlight_device);
+ backlight_device_unregister(asus->backlight_device);
asus->backlight_device = NULL;
}
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index f6a28d7161f..9411eae39a4 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -2,9 +2,11 @@
* Driver for Dell laptop extras
*
* Copyright (c) Red Hat <mjg@redhat.com>
+ * Copyright (c) 2014 Gabriele Mazzotta <gabriele.mzt@gmail.com>
+ * Copyright (c) 2014 Pali Rohár <pali.rohar@gmail.com>
*
- * Based on documentation in the libsmbios package, Copyright (C) 2005 Dell
- * Inc.
+ * Based on documentation in the libsmbios package:
+ * Copyright (C) 2005-2014 Dell Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -32,6 +34,13 @@
#include "../../firmware/dcdbas.h"
#define BRIGHTNESS_TOKEN 0x7d
+#define KBD_LED_OFF_TOKEN 0x01E1
+#define KBD_LED_ON_TOKEN 0x01E2
+#define KBD_LED_AUTO_TOKEN 0x01E3
+#define KBD_LED_AUTO_25_TOKEN 0x02EA
+#define KBD_LED_AUTO_50_TOKEN 0x02EB
+#define KBD_LED_AUTO_75_TOKEN 0x02EC
+#define KBD_LED_AUTO_100_TOKEN 0x02F6
/* This structure will be modified by the firmware when we enter
* system management mode, hence the volatiles */
@@ -62,6 +71,13 @@ struct calling_interface_structure {
struct quirk_entry {
u8 touchpad_led;
+
+ int needs_kbd_timeouts;
+ /*
+ * Ordered list of timeouts expressed in seconds.
+ * The list must end with -1
+ */
+ int kbd_timeouts[];
};
static struct quirk_entry *quirks;
@@ -76,6 +92,15 @@ static int __init dmi_matched(const struct dmi_system_id *dmi)
return 1;
}
+/*
+ * These values come from Windows utility provided by Dell. If any other value
+ * is used then BIOS silently set timeout to 0 without any error message.
+ */
+static struct quirk_entry quirk_dell_xps13_9333 = {
+ .needs_kbd_timeouts = 1,
+ .kbd_timeouts = { 0, 5, 15, 60, 5 * 60, 15 * 60, -1 },
+};
+
static int da_command_address;
static int da_command_code;
static int da_num_tokens;
@@ -267,6 +292,15 @@ static const struct dmi_system_id dell_quirks[] __initconst = {
},
.driver_data = &quirk_dell_vostro_v130,
},
+ {
+ .callback = dmi_matched,
+ .ident = "Dell XPS13 9333",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "XPS13 9333"),
+ },
+ .driver_data = &quirk_dell_xps13_9333,
+ },
{ }
};
@@ -331,17 +365,29 @@ static void __init find_tokens(const struct dmi_header *dm, void *dummy)
}
}
-static int find_token_location(int tokenid)
+static int find_token_id(int tokenid)
{
int i;
+
for (i = 0; i < da_num_tokens; i++) {
if (da_tokens[i].tokenID == tokenid)
- return da_tokens[i].location;
+ return i;
}
return -1;
}
+static int find_token_location(int tokenid)
+{
+ int id;
+
+ id = find_token_id(tokenid);
+ if (id == -1)
+ return -1;
+
+ return da_tokens[id].location;
+}
+
static struct calling_interface_buffer *
dell_send_request(struct calling_interface_buffer *buffer, int class,
int select)
@@ -362,6 +408,20 @@ dell_send_request(struct calling_interface_buffer *buffer, int class,
return buffer;
}
+static inline int dell_smi_error(int value)
+{
+ switch (value) {
+ case 0: /* Completed successfully */
+ return 0;
+ case -1: /* Completed with error */
+ return -EIO;
+ case -2: /* Function not supported */
+ return -ENXIO;
+ default: /* Unknown error */
+ return -EINVAL;
+ }
+}
+
/* Derived from information in DellWirelessCtl.cpp:
Class 17, select 11 is radio control. It returns an array of 32-bit values.
@@ -563,7 +623,7 @@ static bool dell_laptop_i8042_filter(unsigned char data, unsigned char str,
{
static bool extended;
- if (str & 0x20)
+ if (str & I8042_STR_AUXDATA)
return false;
if (unlikely(data == 0xe0)) {
@@ -716,7 +776,7 @@ static int dell_send_intensity(struct backlight_device *bd)
else
dell_send_request(buffer, 1, 1);
-out:
+ out:
release_buffer();
return ret;
}
@@ -740,7 +800,7 @@ static int dell_get_intensity(struct backlight_device *bd)
ret = buffer->output[1];
-out:
+ out:
release_buffer();
return ret;
}
@@ -789,6 +849,984 @@ static void touchpad_led_exit(void)
led_classdev_unregister(&touchpad_led);
}
+/*
+ * Derived from information in smbios-keyboard-ctl:
+ *
+ * cbClass 4
+ * cbSelect 11
+ * Keyboard illumination
+ * cbArg1 determines the function to be performed
+ *
+ * cbArg1 0x0 = Get Feature Information
+ * cbRES1 Standard return codes (0, -1, -2)
+ * cbRES2, word0 Bitmap of user-selectable modes
+ * bit 0 Always off (All systems)
+ * bit 1 Always on (Travis ATG, Siberia)
+ * bit 2 Auto: ALS-based On; ALS-based Off (Travis ATG)
+ * bit 3 Auto: ALS- and input-activity-based On; input-activity based Off
+ * bit 4 Auto: Input-activity-based On; input-activity based Off
+ * bit 5 Auto: Input-activity-based On (illumination level 25%); input-activity based Off
+ * bit 6 Auto: Input-activity-based On (illumination level 50%); input-activity based Off
+ * bit 7 Auto: Input-activity-based On (illumination level 75%); input-activity based Off
+ * bit 8 Auto: Input-activity-based On (illumination level 100%); input-activity based Off
+ * bits 9-15 Reserved for future use
+ * cbRES2, byte2 Reserved for future use
+ * cbRES2, byte3 Keyboard illumination type
+ * 0 Reserved
+ * 1 Tasklight
+ * 2 Backlight
+ * 3-255 Reserved for future use
+ * cbRES3, byte0 Supported auto keyboard illumination trigger bitmap.
+ * bit 0 Any keystroke
+ * bit 1 Touchpad activity
+ * bit 2 Pointing stick
+ * bit 3 Any mouse
+ * bits 4-7 Reserved for future use
+ * cbRES3, byte1 Supported timeout unit bitmap
+ * bit 0 Seconds
+ * bit 1 Minutes
+ * bit 2 Hours
+ * bit 3 Days
+ * bits 4-7 Reserved for future use
+ * cbRES3, byte2 Number of keyboard light brightness levels
+ * cbRES4, byte0 Maximum acceptable seconds value (0 if seconds not supported).
+ * cbRES4, byte1 Maximum acceptable minutes value (0 if minutes not supported).
+ * cbRES4, byte2 Maximum acceptable hours value (0 if hours not supported).
+ * cbRES4, byte3 Maximum acceptable days value (0 if days not supported)
+ *
+ * cbArg1 0x1 = Get Current State
+ * cbRES1 Standard return codes (0, -1, -2)
+ * cbRES2, word0 Bitmap of current mode state
+ * bit 0 Always off (All systems)
+ * bit 1 Always on (Travis ATG, Siberia)
+ * bit 2 Auto: ALS-based On; ALS-based Off (Travis ATG)
+ * bit 3 Auto: ALS- and input-activity-based On; input-activity based Off
+ * bit 4 Auto: Input-activity-based On; input-activity based Off
+ * bit 5 Auto: Input-activity-based On (illumination level 25%); input-activity based Off
+ * bit 6 Auto: Input-activity-based On (illumination level 50%); input-activity based Off
+ * bit 7 Auto: Input-activity-based On (illumination level 75%); input-activity based Off
+ * bit 8 Auto: Input-activity-based On (illumination level 100%); input-activity based Off
+ * bits 9-15 Reserved for future use
+ * Note: Only One bit can be set
+ * cbRES2, byte2 Currently active auto keyboard illumination triggers.
+ * bit 0 Any keystroke
+ * bit 1 Touchpad activity
+ * bit 2 Pointing stick
+ * bit 3 Any mouse
+ * bits 4-7 Reserved for future use
+ * cbRES2, byte3 Current Timeout
+ * bits 7:6 Timeout units indicator:
+ * 00b Seconds
+ * 01b Minutes
+ * 10b Hours
+ * 11b Days
+ * bits 5:0 Timeout value (0-63) in sec/min/hr/day
+ * NOTE: A value of 0 means always on (no timeout) if any bits of RES3 byte
+ * are set upon return from the [Get feature information] call.
+ * cbRES3, byte0 Current setting of ALS value that turns the light on or off.
+ * cbRES3, byte1 Current ALS reading
+ * cbRES3, byte2 Current keyboard light level.
+ *
+ * cbArg1 0x2 = Set New State
+ * cbRES1 Standard return codes (0, -1, -2)
+ * cbArg2, word0 Bitmap of current mode state
+ * bit 0 Always off (All systems)
+ * bit 1 Always on (Travis ATG, Siberia)
+ * bit 2 Auto: ALS-based On; ALS-based Off (Travis ATG)
+ * bit 3 Auto: ALS- and input-activity-based On; input-activity based Off
+ * bit 4 Auto: Input-activity-based On; input-activity based Off
+ * bit 5 Auto: Input-activity-based On (illumination level 25%); input-activity based Off
+ * bit 6 Auto: Input-activity-based On (illumination level 50%); input-activity based Off
+ * bit 7 Auto: Input-activity-based On (illumination level 75%); input-activity based Off
+ * bit 8 Auto: Input-activity-based On (illumination level 100%); input-activity based Off
+ * bits 9-15 Reserved for future use
+ * Note: Only One bit can be set
+ * cbArg2, byte2 Desired auto keyboard illumination triggers. Must remain inactive to allow
+ * keyboard to turn off automatically.
+ * bit 0 Any keystroke
+ * bit 1 Touchpad activity
+ * bit 2 Pointing stick
+ * bit 3 Any mouse
+ * bits 4-7 Reserved for future use
+ * cbArg2, byte3 Desired Timeout
+ * bits 7:6 Timeout units indicator:
+ * 00b Seconds
+ * 01b Minutes
+ * 10b Hours
+ * 11b Days
+ * bits 5:0 Timeout value (0-63) in sec/min/hr/day
+ * cbArg3, byte0 Desired setting of ALS value that turns the light on or off.
+ * cbArg3, byte2 Desired keyboard light level.
+ */
+
+
+enum kbd_timeout_unit {
+ KBD_TIMEOUT_SECONDS = 0,
+ KBD_TIMEOUT_MINUTES,
+ KBD_TIMEOUT_HOURS,
+ KBD_TIMEOUT_DAYS,
+};
+
+enum kbd_mode_bit {
+ KBD_MODE_BIT_OFF = 0,
+ KBD_MODE_BIT_ON,
+ KBD_MODE_BIT_ALS,
+ KBD_MODE_BIT_TRIGGER_ALS,
+ KBD_MODE_BIT_TRIGGER,
+ KBD_MODE_BIT_TRIGGER_25,
+ KBD_MODE_BIT_TRIGGER_50,
+ KBD_MODE_BIT_TRIGGER_75,
+ KBD_MODE_BIT_TRIGGER_100,
+};
+
+#define kbd_is_als_mode_bit(bit) \
+ ((bit) == KBD_MODE_BIT_ALS || (bit) == KBD_MODE_BIT_TRIGGER_ALS)
+#define kbd_is_trigger_mode_bit(bit) \
+ ((bit) >= KBD_MODE_BIT_TRIGGER_ALS && (bit) <= KBD_MODE_BIT_TRIGGER_100)
+#define kbd_is_level_mode_bit(bit) \
+ ((bit) >= KBD_MODE_BIT_TRIGGER_25 && (bit) <= KBD_MODE_BIT_TRIGGER_100)
+
+struct kbd_info {
+ u16 modes;
+ u8 type;
+ u8 triggers;
+ u8 levels;
+ u8 seconds;
+ u8 minutes;
+ u8 hours;
+ u8 days;
+};
+
+struct kbd_state {
+ u8 mode_bit;
+ u8 triggers;
+ u8 timeout_value;
+ u8 timeout_unit;
+ u8 als_setting;
+ u8 als_value;
+ u8 level;
+};
+
+static const int kbd_tokens[] = {
+ KBD_LED_OFF_TOKEN,
+ KBD_LED_AUTO_25_TOKEN,
+ KBD_LED_AUTO_50_TOKEN,
+ KBD_LED_AUTO_75_TOKEN,
+ KBD_LED_AUTO_100_TOKEN,
+ KBD_LED_ON_TOKEN,
+};
+
+static u16 kbd_token_bits;
+
+static struct kbd_info kbd_info;
+static bool kbd_als_supported;
+static bool kbd_triggers_supported;
+
+static u8 kbd_mode_levels[16];
+static int kbd_mode_levels_count;
+
+static u8 kbd_previous_level;
+static u8 kbd_previous_mode_bit;
+
+static bool kbd_led_present;
+
+/*
+ * NOTE: there are three ways to set the keyboard backlight level.
+ * First, via kbd_state.mode_bit (assigning KBD_MODE_BIT_TRIGGER_* value).
+ * Second, via kbd_state.level (assigning numerical value <= kbd_info.levels).
+ * Third, via SMBIOS tokens (KBD_LED_* in kbd_tokens)
+ *
+ * There are laptops which support only one of these methods. If we want to
+ * support as many machines as possible we need to implement all three methods.
+ * The first two methods use the kbd_state structure. The third uses SMBIOS
+ * tokens. If kbd_info.levels == 0, the machine does not support setting the
+ * keyboard backlight level via kbd_state.level.
+ */
+
+static int kbd_get_info(struct kbd_info *info)
+{
+ u8 units;
+ int ret;
+
+ get_buffer();
+
+ buffer->input[0] = 0x0;
+ dell_send_request(buffer, 4, 11);
+ ret = buffer->output[0];
+
+ if (ret) {
+ ret = dell_smi_error(ret);
+ goto out;
+ }
+
+ info->modes = buffer->output[1] & 0xFFFF;
+ info->type = (buffer->output[1] >> 24) & 0xFF;
+ info->triggers = buffer->output[2] & 0xFF;
+ units = (buffer->output[2] >> 8) & 0xFF;
+ info->levels = (buffer->output[2] >> 16) & 0xFF;
+
+ if (units & BIT(0))
+ info->seconds = (buffer->output[3] >> 0) & 0xFF;
+ if (units & BIT(1))
+ info->minutes = (buffer->output[3] >> 8) & 0xFF;
+ if (units & BIT(2))
+ info->hours = (buffer->output[3] >> 16) & 0xFF;
+ if (units & BIT(3))
+ info->days = (buffer->output[3] >> 24) & 0xFF;
+
+ out:
+ release_buffer();
+ return ret;
+}
+
+static unsigned int kbd_get_max_level(void)
+{
+ if (kbd_info.levels != 0)
+ return kbd_info.levels;
+ if (kbd_mode_levels_count > 0)
+ return kbd_mode_levels_count - 1;
+ return 0;
+}
+
+static int kbd_get_level(struct kbd_state *state)
+{
+ int i;
+
+ if (kbd_info.levels != 0)
+ return state->level;
+
+ if (kbd_mode_levels_count > 0) {
+ for (i = 0; i < kbd_mode_levels_count; ++i)
+ if (kbd_mode_levels[i] == state->mode_bit)
+ return i;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int kbd_set_level(struct kbd_state *state, u8 level)
+{
+ if (kbd_info.levels != 0) {
+ if (level != 0)
+ kbd_previous_level = level;
+ if (state->level == level)
+ return 0;
+ state->level = level;
+ if (level != 0 && state->mode_bit == KBD_MODE_BIT_OFF)
+ state->mode_bit = kbd_previous_mode_bit;
+ else if (level == 0 && state->mode_bit != KBD_MODE_BIT_OFF) {
+ kbd_previous_mode_bit = state->mode_bit;
+ state->mode_bit = KBD_MODE_BIT_OFF;
+ }
+ return 0;
+ }
+
+ if (kbd_mode_levels_count > 0 && level < kbd_mode_levels_count) {
+ if (level != 0)
+ kbd_previous_level = level;
+ state->mode_bit = kbd_mode_levels[level];
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int kbd_get_state(struct kbd_state *state)
+{
+ int ret;
+
+ get_buffer();
+
+ buffer->input[0] = 0x1;
+ dell_send_request(buffer, 4, 11);
+ ret = buffer->output[0];
+
+ if (ret) {
+ ret = dell_smi_error(ret);
+ goto out;
+ }
+
+ state->mode_bit = ffs(buffer->output[1] & 0xFFFF);
+ if (state->mode_bit != 0)
+ state->mode_bit--;
+
+ state->triggers = (buffer->output[1] >> 16) & 0xFF;
+ state->timeout_value = (buffer->output[1] >> 24) & 0x3F;
+ state->timeout_unit = (buffer->output[1] >> 30) & 0x3;
+ state->als_setting = buffer->output[2] & 0xFF;
+ state->als_value = (buffer->output[2] >> 8) & 0xFF;
+ state->level = (buffer->output[2] >> 16) & 0xFF;
+
+ out:
+ release_buffer();
+ return ret;
+}
+
+static int kbd_set_state(struct kbd_state *state)
+{
+ int ret;
+
+ get_buffer();
+ buffer->input[0] = 0x2;
+ buffer->input[1] = BIT(state->mode_bit) & 0xFFFF;
+ buffer->input[1] |= (state->triggers & 0xFF) << 16;
+ buffer->input[1] |= (state->timeout_value & 0x3F) << 24;
+ buffer->input[1] |= (state->timeout_unit & 0x3) << 30;
+ buffer->input[2] = state->als_setting & 0xFF;
+ buffer->input[2] |= (state->level & 0xFF) << 16;
+ dell_send_request(buffer, 4, 11);
+ ret = buffer->output[0];
+ release_buffer();
+
+ return dell_smi_error(ret);
+}
+
+static int kbd_set_state_safe(struct kbd_state *state, struct kbd_state *old)
+{
+ int ret;
+
+ ret = kbd_set_state(state);
+ if (ret == 0)
+ return 0;
+
+ /*
+ * When setting the new state fails,try to restore the previous one.
+ * This is needed on some machines where BIOS sets a default state when
+ * setting a new state fails. This default state could be all off.
+ */
+
+ if (kbd_set_state(old))
+ pr_err("Setting old previous keyboard state failed\n");
+
+ return ret;
+}
+
+static int kbd_set_token_bit(u8 bit)
+{
+ int id;
+ int ret;
+
+ if (bit >= ARRAY_SIZE(kbd_tokens))
+ return -EINVAL;
+
+ id = find_token_id(kbd_tokens[bit]);
+ if (id == -1)
+ return -EINVAL;
+
+ get_buffer();
+ buffer->input[0] = da_tokens[id].location;
+ buffer->input[1] = da_tokens[id].value;
+ dell_send_request(buffer, 1, 0);
+ ret = buffer->output[0];
+ release_buffer();
+
+ return dell_smi_error(ret);
+}
+
+static int kbd_get_token_bit(u8 bit)
+{
+ int id;
+ int ret;
+ int val;
+
+ if (bit >= ARRAY_SIZE(kbd_tokens))
+ return -EINVAL;
+
+ id = find_token_id(kbd_tokens[bit]);
+ if (id == -1)
+ return -EINVAL;
+
+ get_buffer();
+ buffer->input[0] = da_tokens[id].location;
+ dell_send_request(buffer, 0, 0);
+ ret = buffer->output[0];
+ val = buffer->output[1];
+ release_buffer();
+
+ if (ret)
+ return dell_smi_error(ret);
+
+ return (val == da_tokens[id].value);
+}
+
+static int kbd_get_first_active_token_bit(void)
+{
+ int i;
+ int ret;
+
+ for (i = 0; i < ARRAY_SIZE(kbd_tokens); ++i) {
+ ret = kbd_get_token_bit(i);
+ if (ret == 1)
+ return i;
+ }
+
+ return ret;
+}
+
+static int kbd_get_valid_token_counts(void)
+{
+ return hweight16(kbd_token_bits);
+}
+
+static inline int kbd_init_info(void)
+{
+ struct kbd_state state;
+ int ret;
+ int i;
+
+ ret = kbd_get_info(&kbd_info);
+ if (ret)
+ return ret;
+
+ kbd_get_state(&state);
+
+ /* NOTE: timeout value is stored in 6 bits so max value is 63 */
+ if (kbd_info.seconds > 63)
+ kbd_info.seconds = 63;
+ if (kbd_info.minutes > 63)
+ kbd_info.minutes = 63;
+ if (kbd_info.hours > 63)
+ kbd_info.hours = 63;
+ if (kbd_info.days > 63)
+ kbd_info.days = 63;
+
+ /* NOTE: On tested machines ON mode did not work and caused
+ * problems (turned backlight off) so do not use it
+ */
+ kbd_info.modes &= ~BIT(KBD_MODE_BIT_ON);
+
+ kbd_previous_level = kbd_get_level(&state);
+ kbd_previous_mode_bit = state.mode_bit;
+
+ if (kbd_previous_level == 0 && kbd_get_max_level() != 0)
+ kbd_previous_level = 1;
+
+ if (kbd_previous_mode_bit == KBD_MODE_BIT_OFF) {
+ kbd_previous_mode_bit =
+ ffs(kbd_info.modes & ~BIT(KBD_MODE_BIT_OFF));
+ if (kbd_previous_mode_bit != 0)
+ kbd_previous_mode_bit--;
+ }
+
+ if (kbd_info.modes & (BIT(KBD_MODE_BIT_ALS) |
+ BIT(KBD_MODE_BIT_TRIGGER_ALS)))
+ kbd_als_supported = true;
+
+ if (kbd_info.modes & (
+ BIT(KBD_MODE_BIT_TRIGGER_ALS) | BIT(KBD_MODE_BIT_TRIGGER) |
+ BIT(KBD_MODE_BIT_TRIGGER_25) | BIT(KBD_MODE_BIT_TRIGGER_50) |
+ BIT(KBD_MODE_BIT_TRIGGER_75) | BIT(KBD_MODE_BIT_TRIGGER_100)
+ ))
+ kbd_triggers_supported = true;
+
+ /* kbd_mode_levels[0] is reserved, see below */
+ for (i = 0; i < 16; ++i)
+ if (kbd_is_level_mode_bit(i) && (BIT(i) & kbd_info.modes))
+ kbd_mode_levels[1 + kbd_mode_levels_count++] = i;
+
+ /*
+ * Find the first supported mode and assign to kbd_mode_levels[0].
+ * This should be 0 (off), but we cannot depend on the BIOS to
+ * support 0.
+ */
+ if (kbd_mode_levels_count > 0) {
+ for (i = 0; i < 16; ++i) {
+ if (BIT(i) & kbd_info.modes) {
+ kbd_mode_levels[0] = i;
+ break;
+ }
+ }
+ kbd_mode_levels_count++;
+ }
+
+ return 0;
+
+}
+
+static inline void kbd_init_tokens(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(kbd_tokens); ++i)
+ if (find_token_id(kbd_tokens[i]) != -1)
+ kbd_token_bits |= BIT(i);
+}
+
+static void kbd_init(void)
+{
+ int ret;
+
+ ret = kbd_init_info();
+ kbd_init_tokens();
+
+ if (kbd_token_bits != 0 || ret == 0)
+ kbd_led_present = true;
+}
+
+static ssize_t kbd_led_timeout_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct kbd_state new_state;
+ struct kbd_state state;
+ bool convert;
+ int value;
+ int ret;
+ char ch;
+ u8 unit;
+ int i;
+
+ ret = sscanf(buf, "%d %c", &value, &ch);
+ if (ret < 1)
+ return -EINVAL;
+ else if (ret == 1)
+ ch = 's';
+
+ if (value < 0)
+ return -EINVAL;
+
+ convert = false;
+
+ switch (ch) {
+ case 's':
+ if (value > kbd_info.seconds)
+ convert = true;
+ unit = KBD_TIMEOUT_SECONDS;
+ break;
+ case 'm':
+ if (value > kbd_info.minutes)
+ convert = true;
+ unit = KBD_TIMEOUT_MINUTES;
+ break;
+ case 'h':
+ if (value > kbd_info.hours)
+ convert = true;
+ unit = KBD_TIMEOUT_HOURS;
+ break;
+ case 'd':
+ if (value > kbd_info.days)
+ convert = true;
+ unit = KBD_TIMEOUT_DAYS;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (quirks && quirks->needs_kbd_timeouts)
+ convert = true;
+
+ if (convert) {
+ /* Convert value from current units to seconds */
+ switch (unit) {
+ case KBD_TIMEOUT_DAYS:
+ value *= 24;
+ case KBD_TIMEOUT_HOURS:
+ value *= 60;
+ case KBD_TIMEOUT_MINUTES:
+ value *= 60;
+ unit = KBD_TIMEOUT_SECONDS;
+ }
+
+ if (quirks && quirks->needs_kbd_timeouts) {
+ for (i = 0; quirks->kbd_timeouts[i] != -1; i++) {
+ if (value <= quirks->kbd_timeouts[i]) {
+ value = quirks->kbd_timeouts[i];
+ break;
+ }
+ }
+ }
+
+ if (value <= kbd_info.seconds && kbd_info.seconds) {
+ unit = KBD_TIMEOUT_SECONDS;
+ } else if (value / 60 <= kbd_info.minutes && kbd_info.minutes) {
+ value /= 60;
+ unit = KBD_TIMEOUT_MINUTES;
+ } else if (value / (60 * 60) <= kbd_info.hours && kbd_info.hours) {
+ value /= (60 * 60);
+ unit = KBD_TIMEOUT_HOURS;
+ } else if (value / (60 * 60 * 24) <= kbd_info.days && kbd_info.days) {
+ value /= (60 * 60 * 24);
+ unit = KBD_TIMEOUT_DAYS;
+ } else {
+ return -EINVAL;
+ }
+ }
+
+ ret = kbd_get_state(&state);
+ if (ret)
+ return ret;
+
+ new_state = state;
+ new_state.timeout_value = value;
+ new_state.timeout_unit = unit;
+
+ ret = kbd_set_state_safe(&new_state, &state);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static ssize_t kbd_led_timeout_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct kbd_state state;
+ int ret;
+ int len;
+
+ ret = kbd_get_state(&state);
+ if (ret)
+ return ret;
+
+ len = sprintf(buf, "%d", state.timeout_value);
+
+ switch (state.timeout_unit) {
+ case KBD_TIMEOUT_SECONDS:
+ return len + sprintf(buf+len, "s\n");
+ case KBD_TIMEOUT_MINUTES:
+ return len + sprintf(buf+len, "m\n");
+ case KBD_TIMEOUT_HOURS:
+ return len + sprintf(buf+len, "h\n");
+ case KBD_TIMEOUT_DAYS:
+ return len + sprintf(buf+len, "d\n");
+ default:
+ return -EINVAL;
+ }
+
+ return len;
+}
+
+static DEVICE_ATTR(stop_timeout, S_IRUGO | S_IWUSR,
+ kbd_led_timeout_show, kbd_led_timeout_store);
+
+static const char * const kbd_led_triggers[] = {
+ "keyboard",
+ "touchpad",
+ /*"trackstick"*/ NULL, /* NOTE: trackstick is just alias for touchpad */
+ "mouse",
+};
+
+static ssize_t kbd_led_triggers_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct kbd_state new_state;
+ struct kbd_state state;
+ bool triggers_enabled = false;
+ bool als_enabled = false;
+ bool disable_als = false;
+ bool enable_als = false;
+ int trigger_bit = -1;
+ char trigger[21];
+ int i, ret;
+
+ ret = sscanf(buf, "%20s", trigger);
+ if (ret != 1)
+ return -EINVAL;
+
+ if (trigger[0] != '+' && trigger[0] != '-')
+ return -EINVAL;
+
+ ret = kbd_get_state(&state);
+ if (ret)
+ return ret;
+
+ if (kbd_als_supported)
+ als_enabled = kbd_is_als_mode_bit(state.mode_bit);
+
+ if (kbd_triggers_supported)
+ triggers_enabled = kbd_is_trigger_mode_bit(state.mode_bit);
+
+ if (kbd_als_supported) {
+ if (strcmp(trigger, "+als") == 0) {
+ if (als_enabled)
+ return count;
+ enable_als = true;
+ } else if (strcmp(trigger, "-als") == 0) {
+ if (!als_enabled)
+ return count;
+ disable_als = true;
+ }
+ }
+
+ if (enable_als || disable_als) {
+ new_state = state;
+ if (enable_als) {
+ if (triggers_enabled)
+ new_state.mode_bit = KBD_MODE_BIT_TRIGGER_ALS;
+ else
+ new_state.mode_bit = KBD_MODE_BIT_ALS;
+ } else {
+ if (triggers_enabled) {
+ new_state.mode_bit = KBD_MODE_BIT_TRIGGER;
+ kbd_set_level(&new_state, kbd_previous_level);
+ } else {
+ new_state.mode_bit = KBD_MODE_BIT_ON;
+ }
+ }
+ if (!(kbd_info.modes & BIT(new_state.mode_bit)))
+ return -EINVAL;
+ ret = kbd_set_state_safe(&new_state, &state);
+ if (ret)
+ return ret;
+ kbd_previous_mode_bit = new_state.mode_bit;
+ return count;
+ }
+
+ if (kbd_triggers_supported) {
+ for (i = 0; i < ARRAY_SIZE(kbd_led_triggers); ++i) {
+ if (!(kbd_info.triggers & BIT(i)))
+ continue;
+ if (!kbd_led_triggers[i])
+ continue;
+ if (strcmp(trigger+1, kbd_led_triggers[i]) != 0)
+ continue;
+ if (trigger[0] == '+' &&
+ triggers_enabled && (state.triggers & BIT(i)))
+ return count;
+ if (trigger[0] == '-' &&
+ (!triggers_enabled || !(state.triggers & BIT(i))))
+ return count;
+ trigger_bit = i;
+ break;
+ }
+ }
+
+ if (trigger_bit != -1) {
+ new_state = state;
+ if (trigger[0] == '+')
+ new_state.triggers |= BIT(trigger_bit);
+ else {
+ new_state.triggers &= ~BIT(trigger_bit);
+ /* NOTE: trackstick bit (2) must be disabled when
+ * disabling touchpad bit (1), otherwise touchpad
+ * bit (1) will not be disabled */
+ if (trigger_bit == 1)
+ new_state.triggers &= ~BIT(2);
+ }
+ if ((kbd_info.triggers & new_state.triggers) !=
+ new_state.triggers)
+ return -EINVAL;
+ if (new_state.triggers && !triggers_enabled) {
+ if (als_enabled)
+ new_state.mode_bit = KBD_MODE_BIT_TRIGGER_ALS;
+ else {
+ new_state.mode_bit = KBD_MODE_BIT_TRIGGER;
+ kbd_set_level(&new_state, kbd_previous_level);
+ }
+ } else if (new_state.triggers == 0) {
+ if (als_enabled)
+ new_state.mode_bit = KBD_MODE_BIT_ALS;
+ else
+ kbd_set_level(&new_state, 0);
+ }
+ if (!(kbd_info.modes & BIT(new_state.mode_bit)))
+ return -EINVAL;
+ ret = kbd_set_state_safe(&new_state, &state);
+ if (ret)
+ return ret;
+ if (new_state.mode_bit != KBD_MODE_BIT_OFF)
+ kbd_previous_mode_bit = new_state.mode_bit;
+ return count;
+ }
+
+ return -EINVAL;
+}
+
+static ssize_t kbd_led_triggers_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct kbd_state state;
+ bool triggers_enabled;
+ int level, i, ret;
+ int len = 0;
+
+ ret = kbd_get_state(&state);
+ if (ret)
+ return ret;
+
+ len = 0;
+
+ if (kbd_triggers_supported) {
+ triggers_enabled = kbd_is_trigger_mode_bit(state.mode_bit);
+ level = kbd_get_level(&state);
+ for (i = 0; i < ARRAY_SIZE(kbd_led_triggers); ++i) {
+ if (!(kbd_info.triggers & BIT(i)))
+ continue;
+ if (!kbd_led_triggers[i])
+ continue;
+ if ((triggers_enabled || level <= 0) &&
+ (state.triggers & BIT(i)))
+ buf[len++] = '+';
+ else
+ buf[len++] = '-';
+ len += sprintf(buf+len, "%s ", kbd_led_triggers[i]);
+ }
+ }
+
+ if (kbd_als_supported) {
+ if (kbd_is_als_mode_bit(state.mode_bit))
+ len += sprintf(buf+len, "+als ");
+ else
+ len += sprintf(buf+len, "-als ");
+ }
+
+ if (len)
+ buf[len - 1] = '\n';
+
+ return len;
+}
+
+static DEVICE_ATTR(start_triggers, S_IRUGO | S_IWUSR,
+ kbd_led_triggers_show, kbd_led_triggers_store);
+
+static ssize_t kbd_led_als_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct kbd_state state;
+ struct kbd_state new_state;
+ u8 setting;
+ int ret;
+
+ ret = kstrtou8(buf, 10, &setting);
+ if (ret)
+ return ret;
+
+ ret = kbd_get_state(&state);
+ if (ret)
+ return ret;
+
+ new_state = state;
+ new_state.als_setting = setting;
+
+ ret = kbd_set_state_safe(&new_state, &state);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static ssize_t kbd_led_als_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct kbd_state state;
+ int ret;
+
+ ret = kbd_get_state(&state);
+ if (ret)
+ return ret;
+
+ return sprintf(buf, "%d\n", state.als_setting);
+}
+
+static DEVICE_ATTR(als_setting, S_IRUGO | S_IWUSR,
+ kbd_led_als_show, kbd_led_als_store);
+
+static struct attribute *kbd_led_attrs[] = {
+ &dev_attr_stop_timeout.attr,
+ &dev_attr_start_triggers.attr,
+ &dev_attr_als_setting.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(kbd_led);
+
+static enum led_brightness kbd_led_level_get(struct led_classdev *led_cdev)
+{
+ int ret;
+ u16 num;
+ struct kbd_state state;
+
+ if (kbd_get_max_level()) {
+ ret = kbd_get_state(&state);
+ if (ret)
+ return 0;
+ ret = kbd_get_level(&state);
+ if (ret < 0)
+ return 0;
+ return ret;
+ }
+
+ if (kbd_get_valid_token_counts()) {
+ ret = kbd_get_first_active_token_bit();
+ if (ret < 0)
+ return 0;
+ for (num = kbd_token_bits; num != 0 && ret > 0; --ret)
+ num &= num - 1; /* clear the first bit set */
+ if (num == 0)
+ return 0;
+ return ffs(num) - 1;
+ }
+
+ pr_warn("Keyboard brightness level control not supported\n");
+ return 0;
+}
+
+static void kbd_led_level_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct kbd_state state;
+ struct kbd_state new_state;
+ u16 num;
+
+ if (kbd_get_max_level()) {
+ if (kbd_get_state(&state))
+ return;
+ new_state = state;
+ if (kbd_set_level(&new_state, value))
+ return;
+ kbd_set_state_safe(&new_state, &state);
+ return;
+ }
+
+ if (kbd_get_valid_token_counts()) {
+ for (num = kbd_token_bits; num != 0 && value > 0; --value)
+ num &= num - 1; /* clear the first bit set */
+ if (num == 0)
+ return;
+ kbd_set_token_bit(ffs(num) - 1);
+ return;
+ }
+
+ pr_warn("Keyboard brightness level control not supported\n");
+}
+
+static struct led_classdev kbd_led = {
+ .name = "dell::kbd_backlight",
+ .brightness_set = kbd_led_level_set,
+ .brightness_get = kbd_led_level_get,
+ .groups = kbd_led_groups,
+};
+
+static int __init kbd_led_init(struct device *dev)
+{
+ kbd_init();
+ if (!kbd_led_present)
+ return -ENODEV;
+ kbd_led.max_brightness = kbd_get_max_level();
+ if (!kbd_led.max_brightness) {
+ kbd_led.max_brightness = kbd_get_valid_token_counts();
+ if (kbd_led.max_brightness)
+ kbd_led.max_brightness--;
+ }
+ return led_classdev_register(dev, &kbd_led);
+}
+
+static void brightness_set_exit(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ /* Don't change backlight level on exit */
+};
+
+static void kbd_led_exit(void)
+{
+ if (!kbd_led_present)
+ return;
+ kbd_led.brightness_set = brightness_set_exit;
+ led_classdev_unregister(&kbd_led);
+}
+
static int __init dell_init(void)
{
int max_intensity = 0;
@@ -841,6 +1879,8 @@ static int __init dell_init(void)
if (quirks && quirks->touchpad_led)
touchpad_led_init(&platform_device->dev);
+ kbd_led_init(&platform_device->dev);
+
dell_laptop_dir = debugfs_create_dir("dell_laptop", NULL);
if (dell_laptop_dir != NULL)
debugfs_create_file("rfkill", 0444, dell_laptop_dir, NULL,
@@ -908,6 +1948,7 @@ static void __exit dell_exit(void)
debugfs_remove_recursive(dell_laptop_dir);
if (quirks && quirks->touchpad_led)
touchpad_led_exit();
+ kbd_led_exit();
i8042_remove_filter(dell_laptop_i8042_filter);
cancel_delayed_work_sync(&dell_rfkill_work);
backlight_device_unregister(dell_backlight_device);
@@ -924,5 +1965,7 @@ module_init(dell_init);
module_exit(dell_exit);
MODULE_AUTHOR("Matthew Garrett <mjg@redhat.com>");
+MODULE_AUTHOR("Gabriele Mazzotta <gabriele.mzt@gmail.com>");
+MODULE_AUTHOR("Pali Rohár <pali.rohar@gmail.com>");
MODULE_DESCRIPTION("Dell laptop driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/dell-smo8800.c b/drivers/platform/x86/dell-smo8800.c
index a653716055d..0aec4fd4c48 100644
--- a/drivers/platform/x86/dell-smo8800.c
+++ b/drivers/platform/x86/dell-smo8800.c
@@ -1,5 +1,5 @@
/*
- * dell-smo8800.c - Dell Latitude ACPI SMO8800/SMO8810 freefall sensor driver
+ * dell-smo8800.c - Dell Latitude ACPI SMO88XX freefall sensor driver
*
* Copyright (C) 2012 Sonal Santan <sonal.santan@gmail.com>
* Copyright (C) 2014 Pali Rohár <pali.rohar@gmail.com>
@@ -209,7 +209,13 @@ static int smo8800_remove(struct acpi_device *device)
static const struct acpi_device_id smo8800_ids[] = {
{ "SMO8800", 0 },
+ { "SMO8801", 0 },
{ "SMO8810", 0 },
+ { "SMO8811", 0 },
+ { "SMO8820", 0 },
+ { "SMO8821", 0 },
+ { "SMO8830", 0 },
+ { "SMO8831", 0 },
{ "", 0 },
};
@@ -228,6 +234,6 @@ static struct acpi_driver smo8800_driver = {
module_acpi_driver(smo8800_driver);
-MODULE_DESCRIPTION("Dell Latitude freefall driver (ACPI SMO8800/SMO8810)");
+MODULE_DESCRIPTION("Dell Latitude freefall driver (ACPI SMO88XX)");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Sonal Santan, Pali Rohár");
diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c
index 25721bf2009..6512a06bc05 100644
--- a/drivers/platform/x86/dell-wmi.c
+++ b/drivers/platform/x86/dell-wmi.c
@@ -65,10 +65,8 @@ static const struct key_entry dell_wmi_legacy_keymap[] __initconst = {
/* Battery health status button */
{ KE_KEY, 0xe007, { KEY_BATTERY } },
- /* This is actually for all radios. Although physically a
- * switch, the notification does not provide an indication of
- * state and so it should be reported as a key */
- { KE_KEY, 0xe008, { KEY_WLAN } },
+ /* Radio devices state change */
+ { KE_IGNORE, 0xe008, { KEY_RFKILL } },
/* The next device is at offset 6, the active devices are at
offset 8 and the attached devices at offset 10 */
@@ -145,57 +143,154 @@ static const u16 bios_to_linux_keycode[256] __initconst = {
static struct input_dev *dell_wmi_input_dev;
+static void dell_wmi_process_key(int reported_key)
+{
+ const struct key_entry *key;
+
+ key = sparse_keymap_entry_from_scancode(dell_wmi_input_dev,
+ reported_key);
+ if (!key) {
+ pr_info("Unknown key %x pressed\n", reported_key);
+ return;
+ }
+
+ pr_debug("Key %x pressed\n", reported_key);
+
+ /* Don't report brightness notifications that will also come via ACPI */
+ if ((key->keycode == KEY_BRIGHTNESSUP ||
+ key->keycode == KEY_BRIGHTNESSDOWN) && acpi_video)
+ return;
+
+ sparse_keymap_report_entry(dell_wmi_input_dev, key, 1, true);
+}
+
static void dell_wmi_notify(u32 value, void *context)
{
struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *obj;
acpi_status status;
+ acpi_size buffer_size;
+ u16 *buffer_entry, *buffer_end;
+ int len, i;
status = wmi_get_event_data(value, &response);
if (status != AE_OK) {
- pr_info("bad event status 0x%x\n", status);
+ pr_warn("bad event status 0x%x\n", status);
return;
}
obj = (union acpi_object *)response.pointer;
+ if (!obj) {
+ pr_warn("no response\n");
+ return;
+ }
- if (obj && obj->type == ACPI_TYPE_BUFFER) {
- const struct key_entry *key;
- int reported_key;
- u16 *buffer_entry = (u16 *)obj->buffer.pointer;
- int buffer_size = obj->buffer.length/2;
-
- if (buffer_size >= 2 && dell_new_hk_type && buffer_entry[1] != 0x10) {
- pr_info("Received unknown WMI event (0x%x)\n",
- buffer_entry[1]);
- kfree(obj);
- return;
- }
+ if (obj->type != ACPI_TYPE_BUFFER) {
+ pr_warn("bad response type %x\n", obj->type);
+ kfree(obj);
+ return;
+ }
- if (buffer_size >= 3 && (dell_new_hk_type || buffer_entry[1] == 0x0))
- reported_key = (int)buffer_entry[2];
+ pr_debug("Received WMI event (%*ph)\n",
+ obj->buffer.length, obj->buffer.pointer);
+
+ buffer_entry = (u16 *)obj->buffer.pointer;
+ buffer_size = obj->buffer.length/2;
+
+ if (!dell_new_hk_type) {
+ if (buffer_size >= 3 && buffer_entry[1] == 0x0)
+ dell_wmi_process_key(buffer_entry[2]);
else if (buffer_size >= 2)
- reported_key = (int)buffer_entry[1] & 0xffff;
- else {
+ dell_wmi_process_key(buffer_entry[1]);
+ else
pr_info("Received unknown WMI event\n");
- kfree(obj);
- return;
+ kfree(obj);
+ return;
+ }
+
+ buffer_end = buffer_entry + buffer_size;
+
+ while (buffer_entry < buffer_end) {
+
+ len = buffer_entry[0];
+ if (len == 0)
+ break;
+
+ len++;
+
+ if (buffer_entry + len > buffer_end) {
+ pr_warn("Invalid length of WMI event\n");
+ break;
}
- key = sparse_keymap_entry_from_scancode(dell_wmi_input_dev,
- reported_key);
- if (!key) {
- pr_info("Unknown key %x pressed\n", reported_key);
- } else if ((key->keycode == KEY_BRIGHTNESSUP ||
- key->keycode == KEY_BRIGHTNESSDOWN) && acpi_video) {
- /* Don't report brightness notifications that will also
- * come via ACPI */
- ;
- } else {
- sparse_keymap_report_entry(dell_wmi_input_dev, key,
- 1, true);
+ pr_debug("Process buffer (%*ph)\n", len*2, buffer_entry);
+
+ switch (buffer_entry[1]) {
+ case 0x00:
+ for (i = 2; i < len; ++i) {
+ switch (buffer_entry[i]) {
+ case 0xe043:
+ /* NIC Link is Up */
+ pr_debug("NIC Link is Up\n");
+ break;
+ case 0xe044:
+ /* NIC Link is Down */
+ pr_debug("NIC Link is Down\n");
+ break;
+ case 0xe045:
+ /* Unknown event but defined in DSDT */
+ default:
+ /* Unknown event */
+ pr_info("Unknown WMI event type 0x00: "
+ "0x%x\n", (int)buffer_entry[i]);
+ break;
+ }
+ }
+ break;
+ case 0x10:
+ /* Keys pressed */
+ for (i = 2; i < len; ++i)
+ dell_wmi_process_key(buffer_entry[i]);
+ break;
+ case 0x11:
+ for (i = 2; i < len; ++i) {
+ switch (buffer_entry[i]) {
+ case 0xfff0:
+ /* Battery unplugged */
+ pr_debug("Battery unplugged\n");
+ break;
+ case 0xfff1:
+ /* Battery inserted */
+ pr_debug("Battery inserted\n");
+ break;
+ case 0x01e1:
+ case 0x02ea:
+ case 0x02eb:
+ case 0x02ec:
+ case 0x02f6:
+ /* Keyboard backlight level changed */
+ pr_debug("Keyboard backlight level "
+ "changed\n");
+ break;
+ default:
+ /* Unknown event */
+ pr_info("Unknown WMI event type 0x11: "
+ "0x%x\n", (int)buffer_entry[i]);
+ break;
+ }
+ }
+ break;
+ default:
+ /* Unknown event */
+ pr_info("Unknown WMI event type 0x%x\n",
+ (int)buffer_entry[1]);
+ break;
}
+
+ buffer_entry += len;
+
}
+
kfree(obj);
}
@@ -213,11 +308,16 @@ static const struct key_entry * __init dell_wmi_prepare_new_keymap(void)
for (i = 0; i < hotkey_num; i++) {
const struct dell_bios_keymap_entry *bios_entry =
&dell_bios_hotkey_table->keymap[i];
- keymap[i].type = KE_KEY;
- keymap[i].code = bios_entry->scancode;
- keymap[i].keycode = bios_entry->keycode < 256 ?
+ u16 keycode = bios_entry->keycode < 256 ?
bios_to_linux_keycode[bios_entry->keycode] :
KEY_RESERVED;
+
+ if (keycode == KEY_KBDILLUMTOGGLE)
+ keymap[i].type = KE_IGNORE;
+ else
+ keymap[i].type = KE_KEY;
+ keymap[i].code = bios_entry->scancode;
+ keymap[i].keycode = keycode;
}
keymap[hotkey_num].type = KE_END;
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
index 5a54d35a61d..844c2096bde 100644
--- a/drivers/platform/x86/eeepc-laptop.c
+++ b/drivers/platform/x86/eeepc-laptop.c
@@ -417,8 +417,7 @@ static ssize_t cpufv_disabled_store(struct device *dev,
switch (value) {
case 0:
if (eeepc->cpufv_disabled)
- pr_warn("cpufv enabled (not officially supported "
- "on this model)\n");
+ pr_warn("cpufv enabled (not officially supported on this model)\n");
eeepc->cpufv_disabled = false;
return count;
case 1:
@@ -580,59 +579,58 @@ static void eeepc_rfkill_hotplug(struct eeepc_laptop *eeepc, acpi_handle handle)
mutex_lock(&eeepc->hotplug_lock);
pci_lock_rescan_remove();
- if (eeepc->hotplug_slot) {
- port = acpi_get_pci_dev(handle);
- if (!port) {
- pr_warning("Unable to find port\n");
- goto out_unlock;
- }
+ if (!eeepc->hotplug_slot)
+ goto out_unlock;
- bus = port->subordinate;
+ port = acpi_get_pci_dev(handle);
+ if (!port) {
+ pr_warning("Unable to find port\n");
+ goto out_unlock;
+ }
- if (!bus) {
- pr_warn("Unable to find PCI bus 1?\n");
- goto out_put_dev;
- }
+ bus = port->subordinate;
- if (pci_bus_read_config_dword(bus, 0, PCI_VENDOR_ID, &l)) {
- pr_err("Unable to read PCI config space?\n");
- goto out_put_dev;
- }
+ if (!bus) {
+ pr_warn("Unable to find PCI bus 1?\n");
+ goto out_put_dev;
+ }
+
+ if (pci_bus_read_config_dword(bus, 0, PCI_VENDOR_ID, &l)) {
+ pr_err("Unable to read PCI config space?\n");
+ goto out_put_dev;
+ }
- absent = (l == 0xffffffff);
+ absent = (l == 0xffffffff);
- if (blocked != absent) {
- pr_warn("BIOS says wireless lan is %s, "
- "but the pci device is %s\n",
- blocked ? "blocked" : "unblocked",
- absent ? "absent" : "present");
- pr_warn("skipped wireless hotplug as probably "
- "inappropriate for this model\n");
+ if (blocked != absent) {
+ pr_warn("BIOS says wireless lan is %s, but the pci device is %s\n",
+ blocked ? "blocked" : "unblocked",
+ absent ? "absent" : "present");
+ pr_warn("skipped wireless hotplug as probably inappropriate for this model\n");
+ goto out_put_dev;
+ }
+
+ if (!blocked) {
+ dev = pci_get_slot(bus, 0);
+ if (dev) {
+ /* Device already present */
+ pci_dev_put(dev);
goto out_put_dev;
}
-
- if (!blocked) {
- dev = pci_get_slot(bus, 0);
- if (dev) {
- /* Device already present */
- pci_dev_put(dev);
- goto out_put_dev;
- }
- dev = pci_scan_single_device(bus, 0);
- if (dev) {
- pci_bus_assign_resources(bus);
- pci_bus_add_device(dev);
- }
- } else {
- dev = pci_get_slot(bus, 0);
- if (dev) {
- pci_stop_and_remove_bus_device(dev);
- pci_dev_put(dev);
- }
+ dev = pci_scan_single_device(bus, 0);
+ if (dev) {
+ pci_bus_assign_resources(bus);
+ pci_bus_add_device(dev);
+ }
+ } else {
+ dev = pci_get_slot(bus, 0);
+ if (dev) {
+ pci_stop_and_remove_bus_device(dev);
+ pci_dev_put(dev);
}
-out_put_dev:
- pci_dev_put(port);
}
+out_put_dev:
+ pci_dev_put(port);
out_unlock:
pci_unlock_rescan_remove();
@@ -821,11 +819,15 @@ static int eeepc_new_rfkill(struct eeepc_laptop *eeepc,
return 0;
}
+static char EEEPC_RFKILL_NODE_1[] = "\\_SB.PCI0.P0P5";
+static char EEEPC_RFKILL_NODE_2[] = "\\_SB.PCI0.P0P6";
+static char EEEPC_RFKILL_NODE_3[] = "\\_SB.PCI0.P0P7";
+
static void eeepc_rfkill_exit(struct eeepc_laptop *eeepc)
{
- eeepc_unregister_rfkill_notifier(eeepc, "\\_SB.PCI0.P0P5");
- eeepc_unregister_rfkill_notifier(eeepc, "\\_SB.PCI0.P0P6");
- eeepc_unregister_rfkill_notifier(eeepc, "\\_SB.PCI0.P0P7");
+ eeepc_unregister_rfkill_notifier(eeepc, EEEPC_RFKILL_NODE_1);
+ eeepc_unregister_rfkill_notifier(eeepc, EEEPC_RFKILL_NODE_2);
+ eeepc_unregister_rfkill_notifier(eeepc, EEEPC_RFKILL_NODE_3);
if (eeepc->wlan_rfkill) {
rfkill_unregister(eeepc->wlan_rfkill);
rfkill_destroy(eeepc->wlan_rfkill);
@@ -897,9 +899,9 @@ static int eeepc_rfkill_init(struct eeepc_laptop *eeepc)
if (result == -EBUSY)
result = 0;
- eeepc_register_rfkill_notifier(eeepc, "\\_SB.PCI0.P0P5");
- eeepc_register_rfkill_notifier(eeepc, "\\_SB.PCI0.P0P6");
- eeepc_register_rfkill_notifier(eeepc, "\\_SB.PCI0.P0P7");
+ eeepc_register_rfkill_notifier(eeepc, EEEPC_RFKILL_NODE_1);
+ eeepc_register_rfkill_notifier(eeepc, EEEPC_RFKILL_NODE_2);
+ eeepc_register_rfkill_notifier(eeepc, EEEPC_RFKILL_NODE_3);
exit:
if (result && result != -ENODEV)
@@ -915,7 +917,7 @@ static int eeepc_hotk_thaw(struct device *device)
struct eeepc_laptop *eeepc = dev_get_drvdata(device);
if (eeepc->wlan_rfkill) {
- bool wlan;
+ int wlan;
/*
* Work around bios bug - acpi _PTS turns off the wireless led
@@ -923,7 +925,8 @@ static int eeepc_hotk_thaw(struct device *device)
* we should kick it ourselves in case hibernation is aborted.
*/
wlan = get_acpi(eeepc, CM_ASL_WLAN);
- set_acpi(eeepc, CM_ASL_WLAN, wlan);
+ if (wlan >= 0)
+ set_acpi(eeepc, CM_ASL_WLAN, wlan);
}
return 0;
@@ -935,9 +938,9 @@ static int eeepc_hotk_restore(struct device *device)
/* Refresh both wlan rfkill state and pci hotplug */
if (eeepc->wlan_rfkill) {
- eeepc_rfkill_hotplug_update(eeepc, "\\_SB.PCI0.P0P5");
- eeepc_rfkill_hotplug_update(eeepc, "\\_SB.PCI0.P0P6");
- eeepc_rfkill_hotplug_update(eeepc, "\\_SB.PCI0.P0P7");
+ eeepc_rfkill_hotplug_update(eeepc, EEEPC_RFKILL_NODE_1);
+ eeepc_rfkill_hotplug_update(eeepc, EEEPC_RFKILL_NODE_2);
+ eeepc_rfkill_hotplug_update(eeepc, EEEPC_RFKILL_NODE_3);
}
if (eeepc->bluetooth_rfkill)
@@ -977,18 +980,28 @@ static struct platform_driver platform_driver = {
#define EEEPC_EC_SFB0 0xD0
#define EEEPC_EC_FAN_CTRL (EEEPC_EC_SFB0 + 3) /* Byte containing SF25 */
+static inline int eeepc_pwm_to_lmsensors(int value)
+{
+ return value * 255 / 100;
+}
+
+static inline int eeepc_lmsensors_to_pwm(int value)
+{
+ value = clamp_val(value, 0, 255);
+ return value * 100 / 255;
+}
+
static int eeepc_get_fan_pwm(void)
{
u8 value = 0;
ec_read(EEEPC_EC_FAN_PWM, &value);
- return value * 255 / 100;
+ return eeepc_pwm_to_lmsensors(value);
}
static void eeepc_set_fan_pwm(int value)
{
- value = clamp_val(value, 0, 255);
- value = value * 100 / 255;
+ value = eeepc_lmsensors_to_pwm(value);
ec_write(EEEPC_EC_FAN_PWM, value);
}
@@ -1002,15 +1015,19 @@ static int eeepc_get_fan_rpm(void)
return high << 8 | low;
}
+#define EEEPC_EC_FAN_CTRL_BIT 0x02
+#define EEEPC_FAN_CTRL_MANUAL 1
+#define EEEPC_FAN_CTRL_AUTO 2
+
static int eeepc_get_fan_ctrl(void)
{
u8 value = 0;
ec_read(EEEPC_EC_FAN_CTRL, &value);
- if (value & 0x02)
- return 1; /* manual */
+ if (value & EEEPC_EC_FAN_CTRL_BIT)
+ return EEEPC_FAN_CTRL_MANUAL;
else
- return 2; /* automatic */
+ return EEEPC_FAN_CTRL_AUTO;
}
static void eeepc_set_fan_ctrl(int manual)
@@ -1018,10 +1035,10 @@ static void eeepc_set_fan_ctrl(int manual)
u8 value = 0;
ec_read(EEEPC_EC_FAN_CTRL, &value);
- if (manual == 1)
- value |= 0x02;
+ if (manual == EEEPC_FAN_CTRL_MANUAL)
+ value |= EEEPC_EC_FAN_CTRL_BIT;
else
- value &= ~0x02;
+ value &= ~EEEPC_EC_FAN_CTRL_BIT;
ec_write(EEEPC_EC_FAN_CTRL, value);
}
@@ -1156,8 +1173,7 @@ static int eeepc_backlight_init(struct eeepc_laptop *eeepc)
static void eeepc_backlight_exit(struct eeepc_laptop *eeepc)
{
- if (eeepc->backlight_device)
- backlight_device_unregister(eeepc->backlight_device);
+ backlight_device_unregister(eeepc->backlight_device);
eeepc->backlight_device = NULL;
}
@@ -1216,7 +1232,7 @@ static void eeepc_input_exit(struct eeepc_laptop *eeepc)
static void eeepc_input_notify(struct eeepc_laptop *eeepc, int event)
{
if (!eeepc->inputdev)
- return ;
+ return;
if (!sparse_keymap_report_event(eeepc->inputdev, event, 1, true))
pr_info("Unknown key %x pressed\n", event);
}
@@ -1224,6 +1240,7 @@ static void eeepc_input_notify(struct eeepc_laptop *eeepc, int event)
static void eeepc_acpi_notify(struct acpi_device *device, u32 event)
{
struct eeepc_laptop *eeepc = acpi_driver_data(device);
+ int old_brightness, new_brightness;
u16 count;
if (event > ACPI_MAX_SYS_NOTIFY)
@@ -1234,34 +1251,32 @@ static void eeepc_acpi_notify(struct acpi_device *device, u32 event)
count);
/* Brightness events are special */
- if (event >= NOTIFY_BRN_MIN && event <= NOTIFY_BRN_MAX) {
-
- /* Ignore them completely if the acpi video driver is used */
- if (eeepc->backlight_device != NULL) {
- int old_brightness, new_brightness;
-
- /* Update the backlight device. */
- old_brightness = eeepc_backlight_notify(eeepc);
-
- /* Convert event to keypress (obsolescent hack) */
- new_brightness = event - NOTIFY_BRN_MIN;
-
- if (new_brightness < old_brightness) {
- event = NOTIFY_BRN_MIN; /* brightness down */
- } else if (new_brightness > old_brightness) {
- event = NOTIFY_BRN_MAX; /* brightness up */
- } else {
- /*
- * no change in brightness - already at min/max,
- * event will be desired value (or else ignored)
- */
- }
- eeepc_input_notify(eeepc, event);
- }
- } else {
- /* Everything else is a bona-fide keypress event */
+ if (event < NOTIFY_BRN_MIN || event > NOTIFY_BRN_MAX) {
eeepc_input_notify(eeepc, event);
+ return;
+ }
+
+ /* Ignore them completely if the acpi video driver is used */
+ if (!eeepc->backlight_device)
+ return;
+
+ /* Update the backlight device. */
+ old_brightness = eeepc_backlight_notify(eeepc);
+
+ /* Convert event to keypress (obsolescent hack) */
+ new_brightness = event - NOTIFY_BRN_MIN;
+
+ if (new_brightness < old_brightness) {
+ event = NOTIFY_BRN_MIN; /* brightness down */
+ } else if (new_brightness > old_brightness) {
+ event = NOTIFY_BRN_MAX; /* brightness up */
+ } else {
+ /*
+ * no change in brightness - already at min/max,
+ * event will be desired value (or else ignored)
+ */
}
+ eeepc_input_notify(eeepc, event);
}
static void eeepc_dmi_check(struct eeepc_laptop *eeepc)
@@ -1293,8 +1308,8 @@ static void eeepc_dmi_check(struct eeepc_laptop *eeepc)
*/
if (strcmp(model, "701") == 0 || strcmp(model, "702") == 0) {
eeepc->cpufv_disabled = true;
- pr_info("model %s does not officially support setting cpu "
- "speed\n", model);
+ pr_info("model %s does not officially support setting cpu speed\n",
+ model);
pr_info("cpufv disabled to avoid instability\n");
}
@@ -1320,8 +1335,8 @@ static void cmsg_quirk(struct eeepc_laptop *eeepc, int cm, const char *name)
Check if cm_getv[cm] works and, if yes, assume cm should be set. */
if (!(eeepc->cm_supported & (1 << cm))
&& !read_acpi_int(eeepc->handle, cm_getv[cm], &dummy)) {
- pr_info("%s (%x) not reported by BIOS,"
- " enabling anyway\n", name, 1 << cm);
+ pr_info("%s (%x) not reported by BIOS, enabling anyway\n",
+ name, 1 << cm);
eeepc->cm_supported |= 1 << cm;
}
}
diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
index be55bd78b50..7c21c1c44df 100644
--- a/drivers/platform/x86/fujitsu-laptop.c
+++ b/drivers/platform/x86/fujitsu-laptop.c
@@ -1153,8 +1153,7 @@ fail_hotkey1:
fail_hotkey:
platform_driver_unregister(&fujitsupf_driver);
fail_backlight:
- if (fujitsu->bl_device)
- backlight_device_unregister(fujitsu->bl_device);
+ backlight_device_unregister(fujitsu->bl_device);
fail_sysfs_group:
sysfs_remove_group(&fujitsu->pf_device->dev.kobj,
&fujitsupf_attribute_group);
@@ -1178,8 +1177,7 @@ static void __exit fujitsu_cleanup(void)
platform_driver_unregister(&fujitsupf_driver);
- if (fujitsu->bl_device)
- backlight_device_unregister(fujitsu->bl_device);
+ backlight_device_unregister(fujitsu->bl_device);
sysfs_remove_group(&fujitsu->pf_device->dev.kobj,
&fujitsupf_attribute_group);
diff --git a/drivers/platform/x86/hp-wireless.c b/drivers/platform/x86/hp-wireless.c
index 415348fc121..4e4cc8bd755 100644
--- a/drivers/platform/x86/hp-wireless.c
+++ b/drivers/platform/x86/hp-wireless.c
@@ -85,6 +85,9 @@ static int hpwl_add(struct acpi_device *device)
int err;
err = hp_wireless_input_setup();
+ if (err)
+ pr_err("Failed to setup hp wireless hotkeys\n");
+
return err;
}
diff --git a/drivers/platform/x86/hp_accel.c b/drivers/platform/x86/hp_accel.c
index 6bec745b6b9..10ce6cba445 100644
--- a/drivers/platform/x86/hp_accel.c
+++ b/drivers/platform/x86/hp_accel.c
@@ -246,6 +246,7 @@ static const struct dmi_system_id lis3lv02d_dmi_ids[] = {
AXIS_DMI_MATCH("HPB64xx", "HP ProBook 64", xy_swap),
AXIS_DMI_MATCH("HPB64xx", "HP EliteBook 84", xy_swap),
AXIS_DMI_MATCH("HPB65xx", "HP ProBook 65", x_inverted),
+ AXIS_DMI_MATCH("HPZBook15", "HP ZBook 15", x_inverted),
{ NULL, }
/* Laptop models without axis info (yet):
* "NC6910" "HP Compaq 6910"
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index c860eace1ce..b3d419a8472 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -729,8 +729,7 @@ static int ideapad_backlight_init(struct ideapad_private *priv)
static void ideapad_backlight_exit(struct ideapad_private *priv)
{
- if (priv->blightdev)
- backlight_device_unregister(priv->blightdev);
+ backlight_device_unregister(priv->blightdev);
priv->blightdev = NULL;
}
diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c
index ecd36e332c3..e2065e06a3f 100644
--- a/drivers/platform/x86/intel_ips.c
+++ b/drivers/platform/x86/intel_ips.c
@@ -33,7 +33,7 @@
* performance by allocating more power or thermal budget to the CPU or GPU
* based on available headroom and activity.
*
- * The basic algorithm is driven by a 5s moving average of tempurature. If
+ * The basic algorithm is driven by a 5s moving average of temperature. If
* thermal headroom is available, the CPU and/or GPU power clamps may be
* adjusted upwards. If we hit the thermal ceiling or a thermal trigger,
* we scale back the clamp. Aside from trigger events (when we're critically
diff --git a/drivers/platform/x86/intel_oaktrail.c b/drivers/platform/x86/intel_oaktrail.c
index 0afaaef5711..a4a4258f613 100644
--- a/drivers/platform/x86/intel_oaktrail.c
+++ b/drivers/platform/x86/intel_oaktrail.c
@@ -271,8 +271,7 @@ static int oaktrail_backlight_init(void)
static void oaktrail_backlight_exit(void)
{
- if (oaktrail_bl_device)
- backlight_device_unregister(oaktrail_bl_device);
+ backlight_device_unregister(oaktrail_bl_device);
}
static int oaktrail_probe(struct platform_device *pdev)
diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
index a3f06cb1063..085987730aa 100644
--- a/drivers/platform/x86/msi-laptop.c
+++ b/drivers/platform/x86/msi-laptop.c
@@ -820,7 +820,7 @@ static bool msi_laptop_i8042_filter(unsigned char data, unsigned char str,
{
static bool extended;
- if (str & 0x20)
+ if (str & I8042_STR_AUXDATA)
return false;
/* 0x54 wwan, 0x62 bluetooth, 0x76 wlan, 0xE4 touchpad toggle*/
diff --git a/drivers/platform/x86/msi-wmi.c b/drivers/platform/x86/msi-wmi.c
index 70222f265f6..6d2bac0c463 100644
--- a/drivers/platform/x86/msi-wmi.c
+++ b/drivers/platform/x86/msi-wmi.c
@@ -354,8 +354,7 @@ static void __exit msi_wmi_exit(void)
sparse_keymap_free(msi_wmi_input_dev);
input_unregister_device(msi_wmi_input_dev);
}
- if (backlight)
- backlight_device_unregister(backlight);
+ backlight_device_unregister(backlight);
}
module_init(msi_wmi_init);
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index a1a0fd72e9b..6dd1c0e7dcd 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -3140,8 +3140,7 @@ static void sony_nc_backlight_setup(void)
static void sony_nc_backlight_cleanup(void)
{
- if (sony_bl_props.dev)
- backlight_device_unregister(sony_bl_props.dev);
+ backlight_device_unregister(sony_bl_props.dev);
}
static int sony_nc_add(struct acpi_device *device)
@@ -3716,8 +3715,7 @@ static void sony_pic_detect_device_type(struct sony_pic_dev *dev)
dev->event_types = type2_events;
out:
- if (pcidev)
- pci_dev_put(pcidev);
+ pci_dev_put(pcidev);
pr_info("detected Type%d model\n",
dev->model == SONYPI_DEVICE_TYPE1 ? 1 :
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 6414cfe5d84..c3d11fabc46 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -6557,6 +6557,17 @@ static struct ibm_struct brightness_driver_data = {
* bits 3-0 (volume). Other bits in NVRAM may have other functions,
* such as bit 7 which is used to detect repeated presses of MUTE,
* and we leave them unchanged.
+ *
+ * On newer Lenovo ThinkPads, the EC can automatically change the volume
+ * in response to user input. Unfortunately, this rarely works well.
+ * The laptop changes the state of its internal MUTE gate and, on some
+ * models, sends KEY_MUTE, causing any user code that responds to the
+ * mute button to get confused. The hardware MUTE gate is also
+ * unnecessary, since user code can handle the mute button without
+ * kernel or EC help.
+ *
+ * To avoid confusing userspace, we simply disable all EC-based mute
+ * and volume controls when possible.
*/
#ifdef CONFIG_THINKPAD_ACPI_ALSA_SUPPORT
@@ -6611,11 +6622,21 @@ enum tpacpi_volume_capabilities {
TPACPI_VOL_CAP_MAX
};
+enum tpacpi_mute_btn_mode {
+ TP_EC_MUTE_BTN_LATCH = 0, /* Mute mutes; up/down unmutes */
+ /* We don't know what mode 1 is. */
+ TP_EC_MUTE_BTN_NONE = 2, /* Mute and up/down are just keys */
+ TP_EC_MUTE_BTN_TOGGLE = 3, /* Mute toggles; up/down unmutes */
+};
+
static enum tpacpi_volume_access_mode volume_mode =
TPACPI_VOL_MODE_MAX;
static enum tpacpi_volume_capabilities volume_capabilities;
static bool volume_control_allowed;
+static bool software_mute_requested = true;
+static bool software_mute_active;
+static int software_mute_orig_mode;
/*
* Used to syncronize writers to TP_EC_AUDIO and
@@ -6633,6 +6654,8 @@ static void tpacpi_volume_checkpoint_nvram(void)
return;
if (!volume_control_allowed)
return;
+ if (software_mute_active)
+ return;
vdbg_printk(TPACPI_DBG_MIXER,
"trying to checkpoint mixer state to NVRAM...\n");
@@ -6694,6 +6717,12 @@ static int volume_set_status_ec(const u8 status)
dbg_printk(TPACPI_DBG_MIXER, "set EC mixer to 0x%02x\n", status);
+ /*
+ * On X200s, and possibly on others, it can take a while for
+ * reads to become correct.
+ */
+ msleep(1);
+
return 0;
}
@@ -6776,6 +6805,57 @@ unlock:
return rc;
}
+static int volume_set_software_mute(bool startup)
+{
+ int result;
+
+ if (!tpacpi_is_lenovo())
+ return -ENODEV;
+
+ if (startup) {
+ if (!acpi_evalf(ec_handle, &software_mute_orig_mode,
+ "HAUM", "qd"))
+ return -EIO;
+
+ dbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_MIXER,
+ "Initial HAUM setting was %d\n",
+ software_mute_orig_mode);
+ }
+
+ if (!acpi_evalf(ec_handle, &result, "SAUM", "qdd",
+ (int)TP_EC_MUTE_BTN_NONE))
+ return -EIO;
+
+ if (result != TP_EC_MUTE_BTN_NONE)
+ pr_warn("Unexpected SAUM result %d\n",
+ result);
+
+ /*
+ * In software mute mode, the standard codec controls take
+ * precendence, so we unmute the ThinkPad HW switch at
+ * startup. Just on case there are SAUM-capable ThinkPads
+ * with level controls, set max HW volume as well.
+ */
+ if (tp_features.mixer_no_level_control)
+ result = volume_set_mute(false);
+ else
+ result = volume_set_status(TP_EC_VOLUME_MAX);
+
+ if (result != 0)
+ pr_warn("Failed to unmute the HW mute switch\n");
+
+ return 0;
+}
+
+static void volume_exit_software_mute(void)
+{
+ int r;
+
+ if (!acpi_evalf(ec_handle, &r, "SAUM", "qdd", software_mute_orig_mode)
+ || r != software_mute_orig_mode)
+ pr_warn("Failed to restore mute mode\n");
+}
+
static int volume_alsa_set_volume(const u8 vol)
{
dbg_printk(TPACPI_DBG_MIXER,
@@ -6883,7 +6963,12 @@ static void volume_suspend(void)
static void volume_resume(void)
{
- volume_alsa_notify_change();
+ if (software_mute_active) {
+ if (volume_set_software_mute(false) < 0)
+ pr_warn("Failed to restore software mute\n");
+ } else {
+ volume_alsa_notify_change();
+ }
}
static void volume_shutdown(void)
@@ -6899,6 +6984,9 @@ static void volume_exit(void)
}
tpacpi_volume_checkpoint_nvram();
+
+ if (software_mute_active)
+ volume_exit_software_mute();
}
static int __init volume_create_alsa_mixer(void)
@@ -7083,16 +7171,20 @@ static int __init volume_init(struct ibm_init_struct *iibm)
"mute is supported, volume control is %s\n",
str_supported(!tp_features.mixer_no_level_control));
- rc = volume_create_alsa_mixer();
- if (rc) {
- pr_err("Could not create the ALSA mixer interface\n");
- return rc;
- }
+ if (software_mute_requested && volume_set_software_mute(true) == 0) {
+ software_mute_active = true;
+ } else {
+ rc = volume_create_alsa_mixer();
+ if (rc) {
+ pr_err("Could not create the ALSA mixer interface\n");
+ return rc;
+ }
- pr_info("Console audio control enabled, mode: %s\n",
- (volume_control_allowed) ?
- "override (read/write)" :
- "monitor (read only)");
+ pr_info("Console audio control enabled, mode: %s\n",
+ (volume_control_allowed) ?
+ "override (read/write)" :
+ "monitor (read only)");
+ }
vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_MIXER,
"registering volume hotkeys as change notification\n");
@@ -9089,6 +9181,10 @@ MODULE_PARM_DESC(volume_control,
"Enables software override for the console audio "
"control when true");
+module_param_named(software_mute, software_mute_requested, bool, 0444);
+MODULE_PARM_DESC(software_mute,
+ "Request full software mute control");
+
/* ALSA module API parameters */
module_param_named(index, alsa_index, int, 0444);
MODULE_PARM_DESC(index, "ALSA index for the ACPI EC Mixer");
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index ab6151f0542..fc34a71866e 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -186,6 +186,7 @@ static struct toshiba_acpi_dev *toshiba_acpi;
static const struct acpi_device_id toshiba_device_ids[] = {
{"TOS6200", 0},
+ {"TOS6207", 0},
{"TOS6208", 0},
{"TOS1900", 0},
{"", 0},
@@ -928,9 +929,7 @@ static int lcd_proc_open(struct inode *inode, struct file *file)
static int set_lcd_brightness(struct toshiba_acpi_dev *dev, int value)
{
- u32 in[TCI_WORDS] = { HCI_SET, HCI_LCD_BRIGHTNESS, 0, 0, 0, 0 };
- u32 out[TCI_WORDS];
- acpi_status status;
+ u32 hci_result;
if (dev->tr_backlight_supported) {
bool enable = !value;
@@ -941,20 +940,9 @@ static int set_lcd_brightness(struct toshiba_acpi_dev *dev, int value)
value--;
}
- in[2] = value << HCI_LCD_BRIGHTNESS_SHIFT;
- status = tci_raw(dev, in, out);
- if (ACPI_FAILURE(status) || out[0] == TOS_FAILURE) {
- pr_err("ACPI call to set brightness failed");
- return -EIO;
- }
- /* Extra check for "incomplete" backlight method, where the AML code
- * doesn't check for HCI_SET or HCI_GET and returns TOS_SUCCESS,
- * the actual brightness, and in some cases the max brightness.
- */
- if (out[2] > 0 || out[3] == 0xE000)
- return -ENODEV;
-
- return out[0] == TOS_SUCCESS ? 0 : -EIO;
+ value = value << HCI_LCD_BRIGHTNESS_SHIFT;
+ hci_result = hci_write1(dev, HCI_LCD_BRIGHTNESS, value);
+ return hci_result == TOS_SUCCESS ? 0 : -EIO;
}
static int set_lcd_status(struct backlight_device *bd)
@@ -1406,12 +1394,6 @@ static ssize_t toshiba_kbd_bl_mode_store(struct device *dev,
if (ret)
return ret;
- /* Update sysfs entries on successful mode change*/
- ret = sysfs_update_group(&toshiba->acpi_dev->dev.kobj,
- &toshiba_attr_group);
- if (ret)
- return ret;
-
toshiba->kbd_mode = mode;
}
@@ -1586,10 +1568,32 @@ static umode_t toshiba_sysfs_is_visible(struct kobject *kobj,
return exists ? attr->mode : 0;
}
+/*
+ * Hotkeys
+ */
+static int toshiba_acpi_enable_hotkeys(struct toshiba_acpi_dev *dev)
+{
+ acpi_status status;
+ u32 result;
+
+ status = acpi_evaluate_object(dev->acpi_dev->handle,
+ "ENAB", NULL, NULL);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ result = hci_write1(dev, HCI_HOTKEY_EVENT, HCI_HOTKEY_ENABLE);
+ if (result == TOS_FAILURE)
+ return -EIO;
+ else if (result == TOS_NOT_SUPPORTED)
+ return -ENODEV;
+
+ return 0;
+}
+
static bool toshiba_acpi_i8042_filter(unsigned char data, unsigned char str,
struct serio *port)
{
- if (str & 0x20)
+ if (str & I8042_STR_AUXDATA)
return false;
if (unlikely(data == 0xe0))
@@ -1648,9 +1652,45 @@ static void toshiba_acpi_report_hotkey(struct toshiba_acpi_dev *dev,
pr_info("Unknown key %x\n", scancode);
}
+static void toshiba_acpi_process_hotkeys(struct toshiba_acpi_dev *dev)
+{
+ u32 hci_result, value;
+ int retries = 3;
+ int scancode;
+
+ if (dev->info_supported) {
+ scancode = toshiba_acpi_query_hotkey(dev);
+ if (scancode < 0)
+ pr_err("Failed to query hotkey event\n");
+ else if (scancode != 0)
+ toshiba_acpi_report_hotkey(dev, scancode);
+ } else if (dev->system_event_supported) {
+ do {
+ hci_result = hci_read1(dev, HCI_SYSTEM_EVENT, &value);
+ switch (hci_result) {
+ case TOS_SUCCESS:
+ toshiba_acpi_report_hotkey(dev, (int)value);
+ break;
+ case TOS_NOT_SUPPORTED:
+ /*
+ * This is a workaround for an unresolved
+ * issue on some machines where system events
+ * sporadically become disabled.
+ */
+ hci_result =
+ hci_write1(dev, HCI_SYSTEM_EVENT, 1);
+ pr_notice("Re-enabled hotkeys\n");
+ /* fall through */
+ default:
+ retries--;
+ break;
+ }
+ } while (retries && hci_result != TOS_FIFO_EMPTY);
+ }
+}
+
static int toshiba_acpi_setup_keyboard(struct toshiba_acpi_dev *dev)
{
- acpi_status status;
acpi_handle ec_handle;
int error;
u32 hci_result;
@@ -1677,7 +1717,6 @@ static int toshiba_acpi_setup_keyboard(struct toshiba_acpi_dev *dev)
* supported, so if it's present set up an i8042 key filter
* for this purpose.
*/
- status = AE_ERROR;
ec_handle = ec_get_handle();
if (ec_handle && acpi_has_method(ec_handle, "NTFY")) {
INIT_WORK(&dev->hotkey_work, toshiba_acpi_hotkey_work);
@@ -1708,10 +1747,9 @@ static int toshiba_acpi_setup_keyboard(struct toshiba_acpi_dev *dev)
goto err_remove_filter;
}
- status = acpi_evaluate_object(dev->acpi_dev->handle, "ENAB", NULL, NULL);
- if (ACPI_FAILURE(status)) {
+ error = toshiba_acpi_enable_hotkeys(dev);
+ if (error) {
pr_info("Unable to enable hotkeys\n");
- error = -ENODEV;
goto err_remove_filter;
}
@@ -1721,7 +1759,6 @@ static int toshiba_acpi_setup_keyboard(struct toshiba_acpi_dev *dev)
goto err_remove_filter;
}
- hci_result = hci_write1(dev, HCI_HOTKEY_EVENT, HCI_HOTKEY_ENABLE);
return 0;
err_remove_filter:
@@ -1810,8 +1847,7 @@ static int toshiba_acpi_remove(struct acpi_device *acpi_dev)
rfkill_destroy(dev->bt_rfk);
}
- if (dev->backlight_dev)
- backlight_device_unregister(dev->backlight_dev);
+ backlight_device_unregister(dev->backlight_dev);
if (dev->illumination_supported)
led_classdev_unregister(&dev->led_dev);
@@ -1967,41 +2003,29 @@ error:
static void toshiba_acpi_notify(struct acpi_device *acpi_dev, u32 event)
{
struct toshiba_acpi_dev *dev = acpi_driver_data(acpi_dev);
- u32 hci_result, value;
- int retries = 3;
- int scancode;
-
- if (event != 0x80)
- return;
+ int ret;
- if (dev->info_supported) {
- scancode = toshiba_acpi_query_hotkey(dev);
- if (scancode < 0)
- pr_err("Failed to query hotkey event\n");
- else if (scancode != 0)
- toshiba_acpi_report_hotkey(dev, scancode);
- } else if (dev->system_event_supported) {
- do {
- hci_result = hci_read1(dev, HCI_SYSTEM_EVENT, &value);
- switch (hci_result) {
- case TOS_SUCCESS:
- toshiba_acpi_report_hotkey(dev, (int)value);
- break;
- case TOS_NOT_SUPPORTED:
- /*
- * This is a workaround for an unresolved
- * issue on some machines where system events
- * sporadically become disabled.
- */
- hci_result =
- hci_write1(dev, HCI_SYSTEM_EVENT, 1);
- pr_notice("Re-enabled hotkeys\n");
- /* fall through */
- default:
- retries--;
- break;
- }
- } while (retries && hci_result != TOS_FIFO_EMPTY);
+ switch (event) {
+ case 0x80: /* Hotkeys and some system events */
+ toshiba_acpi_process_hotkeys(dev);
+ break;
+ case 0x92: /* Keyboard backlight mode changed */
+ /* Update sysfs entries */
+ ret = sysfs_update_group(&acpi_dev->dev.kobj,
+ &toshiba_attr_group);
+ if (ret)
+ pr_err("Unable to update sysfs entries\n");
+ break;
+ case 0x81: /* Unknown */
+ case 0x82: /* Unknown */
+ case 0x83: /* Unknown */
+ case 0x8c: /* Unknown */
+ case 0x8e: /* Unknown */
+ case 0x8f: /* Unknown */
+ case 0x90: /* Unknown */
+ default:
+ pr_info("Unknown event received %x\n", event);
+ break;
}
}
@@ -2020,16 +2044,12 @@ static int toshiba_acpi_suspend(struct device *device)
static int toshiba_acpi_resume(struct device *device)
{
struct toshiba_acpi_dev *dev = acpi_driver_data(to_acpi_device(device));
- u32 result;
- acpi_status status;
+ int error;
if (dev->hotkey_dev) {
- status = acpi_evaluate_object(dev->acpi_dev->handle, "ENAB",
- NULL, NULL);
- if (ACPI_FAILURE(status))
+ error = toshiba_acpi_enable_hotkeys(dev);
+ if (error)
pr_info("Unable to re-enable hotkeys\n");
-
- result = hci_write1(dev, HCI_HOTKEY_EVENT, HCI_HOTKEY_ENABLE);
}
return 0;
diff --git a/drivers/power/ds2782_battery.c b/drivers/power/ds2782_battery.c
index 041f9b638d2..39694883d3b 100644
--- a/drivers/power/ds2782_battery.c
+++ b/drivers/power/ds2782_battery.c
@@ -351,13 +351,9 @@ static int ds278x_resume(struct device *dev)
schedule_delayed_work(&info->bat_work, DS278x_DELAY);
return 0;
}
+#endif /* CONFIG_PM_SLEEP */
static SIMPLE_DEV_PM_OPS(ds278x_battery_pm_ops, ds278x_suspend, ds278x_resume);
-#define DS278X_BATTERY_PM_OPS (&ds278x_battery_pm_ops)
-
-#else
-#define DS278X_BATTERY_PM_OPS NULL
-#endif /* CONFIG_PM_SLEEP */
enum ds278x_num_id {
DS2782 = 0,
@@ -460,7 +456,7 @@ MODULE_DEVICE_TABLE(i2c, ds278x_id);
static struct i2c_driver ds278x_battery_driver = {
.driver = {
.name = "ds2782-battery",
- .pm = DS278X_BATTERY_PM_OPS,
+ .pm = &ds278x_battery_pm_ops,
},
.probe = ds278x_battery_probe,
.remove = ds278x_battery_remove,
diff --git a/drivers/power/gpio-charger.c b/drivers/power/gpio-charger.c
index 3ee889fe002..aef74bdf7ab 100644
--- a/drivers/power/gpio-charger.c
+++ b/drivers/power/gpio-charger.c
@@ -22,6 +22,8 @@
#include <linux/platform_device.h>
#include <linux/power_supply.h>
#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
#include <linux/power/gpio-charger.h>
@@ -69,6 +71,59 @@ static enum power_supply_property gpio_charger_properties[] = {
POWER_SUPPLY_PROP_ONLINE,
};
+static
+struct gpio_charger_platform_data *gpio_charger_parse_dt(struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+ struct gpio_charger_platform_data *pdata;
+ const char *chargetype;
+ enum of_gpio_flags flags;
+ int ret;
+
+ if (!np)
+ return ERR_PTR(-ENOENT);
+
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return ERR_PTR(-ENOMEM);
+
+ pdata->name = np->name;
+
+ pdata->gpio = of_get_gpio_flags(np, 0, &flags);
+ if (pdata->gpio < 0) {
+ if (pdata->gpio != -EPROBE_DEFER)
+ dev_err(dev, "could not get charger gpio\n");
+ return ERR_PTR(pdata->gpio);
+ }
+
+ pdata->gpio_active_low = !!(flags & OF_GPIO_ACTIVE_LOW);
+
+ pdata->type = POWER_SUPPLY_TYPE_UNKNOWN;
+ ret = of_property_read_string(np, "charger-type", &chargetype);
+ if (ret >= 0) {
+ if (!strncmp("unknown", chargetype, 7))
+ pdata->type = POWER_SUPPLY_TYPE_UNKNOWN;
+ else if (!strncmp("battery", chargetype, 7))
+ pdata->type = POWER_SUPPLY_TYPE_BATTERY;
+ else if (!strncmp("ups", chargetype, 3))
+ pdata->type = POWER_SUPPLY_TYPE_UPS;
+ else if (!strncmp("mains", chargetype, 5))
+ pdata->type = POWER_SUPPLY_TYPE_MAINS;
+ else if (!strncmp("usb-sdp", chargetype, 7))
+ pdata->type = POWER_SUPPLY_TYPE_USB;
+ else if (!strncmp("usb-dcp", chargetype, 7))
+ pdata->type = POWER_SUPPLY_TYPE_USB_DCP;
+ else if (!strncmp("usb-cdp", chargetype, 7))
+ pdata->type = POWER_SUPPLY_TYPE_USB_CDP;
+ else if (!strncmp("usb-aca", chargetype, 7))
+ pdata->type = POWER_SUPPLY_TYPE_USB_ACA;
+ else
+ dev_warn(dev, "unknown charger type %s\n", chargetype);
+ }
+
+ return pdata;
+}
+
static int gpio_charger_probe(struct platform_device *pdev)
{
const struct gpio_charger_platform_data *pdata = pdev->dev.platform_data;
@@ -78,8 +133,13 @@ static int gpio_charger_probe(struct platform_device *pdev)
int irq;
if (!pdata) {
- dev_err(&pdev->dev, "No platform data\n");
- return -EINVAL;
+ pdata = gpio_charger_parse_dt(&pdev->dev);
+ if (IS_ERR(pdata)) {
+ ret = PTR_ERR(pdata);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "No platform data\n");
+ return ret;
+ }
}
if (!gpio_is_valid(pdata->gpio)) {
@@ -103,6 +163,7 @@ static int gpio_charger_probe(struct platform_device *pdev)
charger->get_property = gpio_charger_get_property;
charger->supplied_to = pdata->supplied_to;
charger->num_supplicants = pdata->num_supplicants;
+ charger->of_node = pdev->dev.of_node;
ret = gpio_request(pdata->gpio, dev_name(&pdev->dev));
if (ret) {
@@ -189,12 +250,19 @@ static int gpio_charger_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(gpio_charger_pm_ops,
gpio_charger_suspend, gpio_charger_resume);
+static const struct of_device_id gpio_charger_match[] = {
+ { .compatible = "gpio-charger" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gpio_charger_match);
+
static struct platform_driver gpio_charger_driver = {
.probe = gpio_charger_probe,
.remove = gpio_charger_remove,
.driver = {
.name = "gpio-charger",
.pm = &gpio_charger_pm_ops,
+ .of_match_table = gpio_charger_match,
},
};
diff --git a/drivers/power/pm2301_charger.c b/drivers/power/pm2301_charger.c
index 62c15af58c9..777324992c5 100644
--- a/drivers/power/pm2301_charger.c
+++ b/drivers/power/pm2301_charger.c
@@ -951,8 +951,6 @@ static int pm2xxx_wall_charger_suspend(struct device *dev)
#endif
-#ifdef CONFIG_PM_RUNTIME
-
static int pm2xxx_runtime_suspend(struct device *dev)
{
struct i2c_client *pm2xxx_i2c_client = to_i2c_client(dev);
@@ -977,8 +975,6 @@ static int pm2xxx_runtime_resume(struct device *dev)
return 0;
}
-#endif
-
static const struct dev_pm_ops pm2xxx_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(pm2xxx_wall_charger_suspend,
pm2xxx_wall_charger_resume)
diff --git a/drivers/power/reset/axxia-reset.c b/drivers/power/reset/axxia-reset.c
index 3b1f8d60178..4e4cd1c8fe5 100644
--- a/drivers/power/reset/axxia-reset.c
+++ b/drivers/power/reset/axxia-reset.c
@@ -19,14 +19,12 @@
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
+#include <linux/notifier.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/reboot.h>
#include <linux/regmap.h>
-#include <asm/system_misc.h>
-
-
#define SC_CRIT_WRITE_KEY 0x1000
#define SC_LATCH_ON_RESET 0x1004
#define SC_RESET_CONTROL 0x1008
@@ -39,7 +37,8 @@
static struct regmap *syscon;
-static void do_axxia_restart(enum reboot_mode reboot_mode, const char *cmd)
+static int axxia_restart_handler(struct notifier_block *this,
+ unsigned long mode, void *cmd)
{
/* Access Key (0xab) */
regmap_write(syscon, SC_CRIT_WRITE_KEY, 0xab);
@@ -50,11 +49,19 @@ static void do_axxia_restart(enum reboot_mode reboot_mode, const char *cmd)
/* Assert chip reset */
regmap_update_bits(syscon, SC_RESET_CONTROL,
RSTCTL_RST_CHIP, RSTCTL_RST_CHIP);
+
+ return NOTIFY_DONE;
}
+static struct notifier_block axxia_restart_nb = {
+ .notifier_call = axxia_restart_handler,
+ .priority = 128,
+};
+
static int axxia_reset_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ int err;
syscon = syscon_regmap_lookup_by_phandle(dev->of_node, "syscon");
if (IS_ERR(syscon)) {
@@ -62,9 +69,11 @@ static int axxia_reset_probe(struct platform_device *pdev)
return PTR_ERR(syscon);
}
- arm_pm_restart = do_axxia_restart;
+ err = register_restart_handler(&axxia_restart_nb);
+ if (err)
+ dev_err(dev, "cannot register restart handler (err=%d)\n", err);
- return 0;
+ return err;
}
static const struct of_device_id of_axxia_reset_match[] = {
diff --git a/drivers/power/reset/brcmstb-reboot.c b/drivers/power/reset/brcmstb-reboot.c
index c523ea7a90e..100606f9b3d 100644
--- a/drivers/power/reset/brcmstb-reboot.c
+++ b/drivers/power/reset/brcmstb-reboot.c
@@ -16,6 +16,7 @@
#include <linux/init.h>
#include <linux/io.h>
#include <linux/jiffies.h>
+#include <linux/notifier.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
@@ -26,8 +27,6 @@
#include <linux/smp.h>
#include <linux/mfd/syscon.h>
-#include <asm/system_misc.h>
-
#define RESET_SOURCE_ENABLE_REG 1
#define SW_MASTER_RESET_REG 2
@@ -35,7 +34,8 @@ static struct regmap *regmap;
static u32 rst_src_en;
static u32 sw_mstr_rst;
-static void brcmstb_reboot(enum reboot_mode mode, const char *cmd)
+static int brcmstb_restart_handler(struct notifier_block *this,
+ unsigned long mode, void *cmd)
{
int rc;
u32 tmp;
@@ -43,31 +43,38 @@ static void brcmstb_reboot(enum reboot_mode mode, const char *cmd)
rc = regmap_write(regmap, rst_src_en, 1);
if (rc) {
pr_err("failed to write rst_src_en (%d)\n", rc);
- return;
+ return NOTIFY_DONE;
}
rc = regmap_read(regmap, rst_src_en, &tmp);
if (rc) {
pr_err("failed to read rst_src_en (%d)\n", rc);
- return;
+ return NOTIFY_DONE;
}
rc = regmap_write(regmap, sw_mstr_rst, 1);
if (rc) {
pr_err("failed to write sw_mstr_rst (%d)\n", rc);
- return;
+ return NOTIFY_DONE;
}
rc = regmap_read(regmap, sw_mstr_rst, &tmp);
if (rc) {
pr_err("failed to read sw_mstr_rst (%d)\n", rc);
- return;
+ return NOTIFY_DONE;
}
while (1)
;
+
+ return NOTIFY_DONE;
}
+static struct notifier_block brcmstb_restart_nb = {
+ .notifier_call = brcmstb_restart_handler,
+ .priority = 128,
+};
+
static int brcmstb_reboot_probe(struct platform_device *pdev)
{
int rc;
@@ -93,9 +100,12 @@ static int brcmstb_reboot_probe(struct platform_device *pdev)
return -EINVAL;
}
- arm_pm_restart = brcmstb_reboot;
+ rc = register_restart_handler(&brcmstb_restart_nb);
+ if (rc)
+ dev_err(&pdev->dev,
+ "cannot register restart handler (err=%d)\n", rc);
- return 0;
+ return rc;
}
static const struct of_device_id of_match[] = {
diff --git a/drivers/power/reset/hisi-reboot.c b/drivers/power/reset/hisi-reboot.c
index 0c91d0231d3..5385460e23b 100644
--- a/drivers/power/reset/hisi-reboot.c
+++ b/drivers/power/reset/hisi-reboot.c
@@ -14,27 +14,36 @@
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/notifier.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/reboot.h>
#include <asm/proc-fns.h>
-#include <asm/system_misc.h>
static void __iomem *base;
static u32 reboot_offset;
-static void hisi_restart(enum reboot_mode mode, const char *cmd)
+static int hisi_restart_handler(struct notifier_block *this,
+ unsigned long mode, void *cmd)
{
writel_relaxed(0xdeadbeef, base + reboot_offset);
while (1)
cpu_do_idle();
+
+ return NOTIFY_DONE;
}
+static struct notifier_block hisi_restart_nb = {
+ .notifier_call = hisi_restart_handler,
+ .priority = 128,
+};
+
static int hisi_reboot_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
+ int err;
base = of_iomap(np, 0);
if (!base) {
@@ -47,9 +56,12 @@ static int hisi_reboot_probe(struct platform_device *pdev)
return -EINVAL;
}
- arm_pm_restart = hisi_restart;
+ err = register_restart_handler(&hisi_restart_nb);
+ if (err)
+ dev_err(&pdev->dev, "cannot register restart handler (err=%d)\n",
+ err);
- return 0;
+ return err;
}
static struct of_device_id hisi_reboot_of_match[] = {
diff --git a/drivers/power/reset/keystone-reset.c b/drivers/power/reset/keystone-reset.c
index 86bc100818b..faedf16c811 100644
--- a/drivers/power/reset/keystone-reset.c
+++ b/drivers/power/reset/keystone-reset.c
@@ -12,9 +12,9 @@
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/notifier.h>
#include <linux/reboot.h>
#include <linux/regmap.h>
-#include <asm/system_misc.h>
#include <linux/mfd/syscon.h>
#include <linux/of_platform.h>
@@ -52,7 +52,8 @@ static inline int rsctrl_enable_rspll_write(void)
RSCTRL_KEY_MASK, RSCTRL_KEY);
}
-static void rsctrl_restart(enum reboot_mode mode, const char *cmd)
+static int rsctrl_restart_handler(struct notifier_block *this,
+ unsigned long mode, void *cmd)
{
/* enable write access to RSTCTRL */
rsctrl_enable_rspll_write();
@@ -60,8 +61,15 @@ static void rsctrl_restart(enum reboot_mode mode, const char *cmd)
/* reset the SOC */
regmap_update_bits(pllctrl_regs, rspll_offset + RSCTRL_RG,
RSCTRL_RESET_MASK, 0);
+
+ return NOTIFY_DONE;
}
+static struct notifier_block rsctrl_restart_nb = {
+ .notifier_call = rsctrl_restart_handler,
+ .priority = 128,
+};
+
static struct of_device_id rsctrl_of_match[] = {
{.compatible = "ti,keystone-reset", },
{},
@@ -114,8 +122,6 @@ static int rsctrl_probe(struct platform_device *pdev)
if (ret)
return ret;
- arm_pm_restart = rsctrl_restart;
-
/* disable a reset isolation for all module clocks */
ret = regmap_write(pllctrl_regs, rspll_offset + RSISO_RG, 0);
if (ret)
@@ -147,7 +153,11 @@ static int rsctrl_probe(struct platform_device *pdev)
return ret;
}
- return 0;
+ ret = register_restart_handler(&rsctrl_restart_nb);
+ if (ret)
+ dev_err(dev, "cannot register restart handler (err=%d)\n", ret);
+
+ return ret;
}
static struct platform_driver rsctrl_driver = {
diff --git a/drivers/power/reset/syscon-reboot.c b/drivers/power/reset/syscon-reboot.c
index 815b901822c..c4049f45663 100644
--- a/drivers/power/reset/syscon-reboot.c
+++ b/drivers/power/reset/syscon-reboot.c
@@ -68,7 +68,7 @@ static int syscon_reboot_probe(struct platform_device *pdev)
return -EINVAL;
ctx->restart_handler.notifier_call = syscon_restart_handle;
- ctx->restart_handler.priority = 128;
+ ctx->restart_handler.priority = 192;
err = register_restart_handler(&ctx->restart_handler);
if (err)
dev_err(dev, "can't register restart notifier (err=%d)\n", err);
diff --git a/drivers/power/reset/vexpress-poweroff.c b/drivers/power/reset/vexpress-poweroff.c
index 4dc102e2b23..9dfc9cee323 100644
--- a/drivers/power/reset/vexpress-poweroff.c
+++ b/drivers/power/reset/vexpress-poweroff.c
@@ -12,14 +12,14 @@
*/
#include <linux/delay.h>
+#include <linux/notifier.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/reboot.h>
#include <linux/stat.h>
#include <linux/vexpress.h>
-#include <asm/system_misc.h>
-
static void vexpress_reset_do(struct device *dev, const char *what)
{
int err = -ENOENT;
@@ -43,11 +43,19 @@ static void vexpress_power_off(void)
static struct device *vexpress_restart_device;
-static void vexpress_restart(enum reboot_mode reboot_mode, const char *cmd)
+static int vexpress_restart(struct notifier_block *this, unsigned long mode,
+ void *cmd)
{
vexpress_reset_do(vexpress_restart_device, "restart");
+
+ return NOTIFY_DONE;
}
+static struct notifier_block vexpress_restart_nb = {
+ .notifier_call = vexpress_restart,
+ .priority = 128,
+};
+
static ssize_t vexpress_reset_active_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -86,12 +94,28 @@ static struct of_device_id vexpress_reset_of_match[] = {
{}
};
+static int _vexpress_register_restart_handler(struct device *dev)
+{
+ int err;
+
+ vexpress_restart_device = dev;
+ err = register_restart_handler(&vexpress_restart_nb);
+ if (err) {
+ dev_err(dev, "cannot register restart handler (err=%d)\n", err);
+ return err;
+ }
+ device_create_file(dev, &dev_attr_active);
+
+ return 0;
+}
+
static int vexpress_reset_probe(struct platform_device *pdev)
{
enum vexpress_reset_func func;
const struct of_device_id *match =
of_match_device(vexpress_reset_of_match, &pdev->dev);
struct regmap *regmap;
+ int ret = 0;
if (match)
func = (enum vexpress_reset_func)match->data;
@@ -110,18 +134,14 @@ static int vexpress_reset_probe(struct platform_device *pdev)
break;
case FUNC_RESET:
if (!vexpress_restart_device)
- vexpress_restart_device = &pdev->dev;
- arm_pm_restart = vexpress_restart;
- device_create_file(&pdev->dev, &dev_attr_active);
+ ret = _vexpress_register_restart_handler(&pdev->dev);
break;
case FUNC_REBOOT:
- vexpress_restart_device = &pdev->dev;
- arm_pm_restart = vexpress_restart;
- device_create_file(&pdev->dev, &dev_attr_active);
+ ret = _vexpress_register_restart_handler(&pdev->dev);
break;
};
- return 0;
+ return ret;
}
static const struct platform_device_id vexpress_reset_id_table[] = {
diff --git a/drivers/power/reset/xgene-reboot.c b/drivers/power/reset/xgene-reboot.c
index 6b49be6867a..b0e5002f8de 100644
--- a/drivers/power/reset/xgene-reboot.c
+++ b/drivers/power/reset/xgene-reboot.c
@@ -24,63 +24,67 @@
* For system shutdown, this is board specify. If a board designer
* implements GPIO shutdown, use the gpio-poweroff.c driver.
*/
+#include <linux/delay.h>
#include <linux/io.h>
+#include <linux/notifier.h>
#include <linux/of_device.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
+#include <linux/reboot.h>
#include <linux/stat.h>
#include <linux/slab.h>
-#include <asm/system_misc.h>
struct xgene_reboot_context {
- struct platform_device *pdev;
+ struct device *dev;
void *csr;
u32 mask;
+ struct notifier_block restart_handler;
};
-static struct xgene_reboot_context *xgene_restart_ctx;
-
-static void xgene_restart(enum reboot_mode mode, const char *cmd)
+static int xgene_restart_handler(struct notifier_block *this,
+ unsigned long mode, void *cmd)
{
- struct xgene_reboot_context *ctx = xgene_restart_ctx;
- unsigned long timeout;
+ struct xgene_reboot_context *ctx =
+ container_of(this, struct xgene_reboot_context,
+ restart_handler);
/* Issue the reboot */
- if (ctx)
- writel(ctx->mask, ctx->csr);
+ writel(ctx->mask, ctx->csr);
+
+ mdelay(1000);
- timeout = jiffies + HZ;
- while (time_before(jiffies, timeout))
- cpu_relax();
+ dev_emerg(ctx->dev, "Unable to restart system\n");
- dev_emerg(&ctx->pdev->dev, "Unable to restart system\n");
+ return NOTIFY_DONE;
}
static int xgene_reboot_probe(struct platform_device *pdev)
{
struct xgene_reboot_context *ctx;
+ struct device *dev = &pdev->dev;
+ int err;
- ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx) {
- dev_err(&pdev->dev, "out of memory for context\n");
- return -ENODEV;
- }
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
- ctx->csr = of_iomap(pdev->dev.of_node, 0);
+ ctx->csr = of_iomap(dev->of_node, 0);
if (!ctx->csr) {
- devm_kfree(&pdev->dev, ctx);
- dev_err(&pdev->dev, "can not map resource\n");
+ dev_err(dev, "can not map resource\n");
return -ENODEV;
}
- if (of_property_read_u32(pdev->dev.of_node, "mask", &ctx->mask))
+ if (of_property_read_u32(dev->of_node, "mask", &ctx->mask))
ctx->mask = 0xFFFFFFFF;
- ctx->pdev = pdev;
- arm_pm_restart = xgene_restart;
- xgene_restart_ctx = ctx;
+ ctx->dev = dev;
+ ctx->restart_handler.notifier_call = xgene_restart_handler;
+ ctx->restart_handler.priority = 128;
+ err = register_restart_handler(&ctx->restart_handler);
+ if (err)
+ dev_err(dev, "cannot register restart handler (err=%d)\n", err);
- return 0;
+ return err;
}
static struct of_device_id xgene_reboot_of_match[] = {
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
index ef2dd2e4754..a3ecf580963 100644
--- a/drivers/pwm/Kconfig
+++ b/drivers/pwm/Kconfig
@@ -50,6 +50,17 @@ config PWM_ATMEL
To compile this driver as a module, choose M here: the module
will be called pwm-atmel.
+config PWM_ATMEL_HLCDC_PWM
+ tristate "Atmel HLCDC PWM support"
+ depends on MFD_ATMEL_HLCDC
+ help
+ Generic PWM framework driver for the PWM output of the HLCDC
+ (Atmel High-end LCD Controller). This PWM output is mainly used
+ to control the LCD backlight.
+
+ To compile this driver as a module, choose M here: the module
+ will be called pwm-atmel-hlcdc.
+
config PWM_ATMEL_TCB
tristate "Atmel TC Block PWM support"
depends on ATMEL_TCLIB && OF
@@ -71,6 +82,15 @@ config PWM_BCM_KONA
To compile this driver as a module, choose M here: the module
will be called pwm-bcm-kona.
+config PWM_BCM2835
+ tristate "BCM2835 PWM support"
+ depends on ARCH_BCM2835
+ help
+ PWM framework driver for BCM2835 controller (Raspberry Pi)
+
+ To compile this driver as a module, choose M here: the module
+ will be called pwm-bcm2835.
+
config PWM_BFIN
tristate "Blackfin PWM support"
depends on BFIN_GPTIMERS
@@ -235,7 +255,7 @@ config PWM_ROCKCHIP
config PWM_SAMSUNG
tristate "Samsung PWM support"
- depends on PLAT_SAMSUNG
+ depends on PLAT_SAMSUNG || ARCH_EXYNOS
help
Generic PWM framework driver for Samsung.
diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile
index c458606c375..65259ac1e8d 100644
--- a/drivers/pwm/Makefile
+++ b/drivers/pwm/Makefile
@@ -2,8 +2,10 @@ obj-$(CONFIG_PWM) += core.o
obj-$(CONFIG_PWM_SYSFS) += sysfs.o
obj-$(CONFIG_PWM_AB8500) += pwm-ab8500.o
obj-$(CONFIG_PWM_ATMEL) += pwm-atmel.o
+obj-$(CONFIG_PWM_ATMEL_HLCDC_PWM) += pwm-atmel-hlcdc.o
obj-$(CONFIG_PWM_ATMEL_TCB) += pwm-atmel-tcb.o
obj-$(CONFIG_PWM_BCM_KONA) += pwm-bcm-kona.o
+obj-$(CONFIG_PWM_BCM2835) += pwm-bcm2835.o
obj-$(CONFIG_PWM_BFIN) += pwm-bfin.o
obj-$(CONFIG_PWM_CLPS711X) += pwm-clps711x.o
obj-$(CONFIG_PWM_EP93XX) += pwm-ep93xx.o
diff --git a/drivers/pwm/pwm-atmel-hlcdc.c b/drivers/pwm/pwm-atmel-hlcdc.c
new file mode 100644
index 00000000000..e7a785fadcd
--- /dev/null
+++ b/drivers/pwm/pwm-atmel-hlcdc.c
@@ -0,0 +1,299 @@
+/*
+ * Copyright (C) 2014 Free Electrons
+ * Copyright (C) 2014 Atmel
+ *
+ * Author: Boris BREZILLON <boris.brezillon@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/mfd/atmel-hlcdc.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+#include <linux/regmap.h>
+
+#define ATMEL_HLCDC_PWMCVAL_MASK GENMASK(15, 8)
+#define ATMEL_HLCDC_PWMCVAL(x) (((x) << 8) & ATMEL_HLCDC_PWMCVAL_MASK)
+#define ATMEL_HLCDC_PWMPOL BIT(4)
+#define ATMEL_HLCDC_PWMPS_MASK GENMASK(2, 0)
+#define ATMEL_HLCDC_PWMPS_MAX 0x6
+#define ATMEL_HLCDC_PWMPS(x) ((x) & ATMEL_HLCDC_PWMPS_MASK)
+
+struct atmel_hlcdc_pwm_errata {
+ bool slow_clk_erratum;
+ bool div1_clk_erratum;
+};
+
+struct atmel_hlcdc_pwm {
+ struct pwm_chip chip;
+ struct atmel_hlcdc *hlcdc;
+ struct clk *cur_clk;
+ const struct atmel_hlcdc_pwm_errata *errata;
+};
+
+static inline struct atmel_hlcdc_pwm *to_atmel_hlcdc_pwm(struct pwm_chip *chip)
+{
+ return container_of(chip, struct atmel_hlcdc_pwm, chip);
+}
+
+static int atmel_hlcdc_pwm_config(struct pwm_chip *c,
+ struct pwm_device *pwm,
+ int duty_ns, int period_ns)
+{
+ struct atmel_hlcdc_pwm *chip = to_atmel_hlcdc_pwm(c);
+ struct atmel_hlcdc *hlcdc = chip->hlcdc;
+ struct clk *new_clk = hlcdc->slow_clk;
+ u64 pwmcval = duty_ns * 256;
+ unsigned long clk_freq;
+ u64 clk_period_ns;
+ u32 pwmcfg;
+ int pres;
+
+ if (!chip->errata || !chip->errata->slow_clk_erratum) {
+ clk_freq = clk_get_rate(new_clk);
+ clk_period_ns = (u64)NSEC_PER_SEC * 256;
+ do_div(clk_period_ns, clk_freq);
+ }
+
+ /* Errata: cannot use slow clk on some IP revisions */
+ if ((chip->errata && chip->errata->slow_clk_erratum) ||
+ clk_period_ns > period_ns) {
+ new_clk = hlcdc->sys_clk;
+ clk_freq = clk_get_rate(new_clk);
+ clk_period_ns = (u64)NSEC_PER_SEC * 256;
+ do_div(clk_period_ns, clk_freq);
+ }
+
+ for (pres = 0; pres <= ATMEL_HLCDC_PWMPS_MAX; pres++) {
+ /* Errata: cannot divide by 1 on some IP revisions */
+ if (!pres && chip->errata && chip->errata->div1_clk_erratum)
+ continue;
+
+ if ((clk_period_ns << pres) >= period_ns)
+ break;
+ }
+
+ if (pres > ATMEL_HLCDC_PWMPS_MAX)
+ return -EINVAL;
+
+ pwmcfg = ATMEL_HLCDC_PWMPS(pres);
+
+ if (new_clk != chip->cur_clk) {
+ u32 gencfg = 0;
+ int ret;
+
+ ret = clk_prepare_enable(new_clk);
+ if (ret)
+ return ret;
+
+ clk_disable_unprepare(chip->cur_clk);
+ chip->cur_clk = new_clk;
+
+ if (new_clk == hlcdc->sys_clk)
+ gencfg = ATMEL_HLCDC_CLKPWMSEL;
+
+ ret = regmap_update_bits(hlcdc->regmap, ATMEL_HLCDC_CFG(0),
+ ATMEL_HLCDC_CLKPWMSEL, gencfg);
+ if (ret)
+ return ret;
+ }
+
+ do_div(pwmcval, period_ns);
+
+ /*
+ * The PWM duty cycle is configurable from 0/256 to 255/256 of the
+ * period cycle. Hence we can't set a duty cycle occupying the
+ * whole period cycle if we're asked to.
+ * Set it to 255 if pwmcval is greater than 256.
+ */
+ if (pwmcval > 255)
+ pwmcval = 255;
+
+ pwmcfg |= ATMEL_HLCDC_PWMCVAL(pwmcval);
+
+ return regmap_update_bits(hlcdc->regmap, ATMEL_HLCDC_CFG(6),
+ ATMEL_HLCDC_PWMCVAL_MASK |
+ ATMEL_HLCDC_PWMPS_MASK,
+ pwmcfg);
+}
+
+static int atmel_hlcdc_pwm_set_polarity(struct pwm_chip *c,
+ struct pwm_device *pwm,
+ enum pwm_polarity polarity)
+{
+ struct atmel_hlcdc_pwm *chip = to_atmel_hlcdc_pwm(c);
+ struct atmel_hlcdc *hlcdc = chip->hlcdc;
+ u32 cfg = 0;
+
+ if (polarity == PWM_POLARITY_NORMAL)
+ cfg = ATMEL_HLCDC_PWMPOL;
+
+ return regmap_update_bits(hlcdc->regmap, ATMEL_HLCDC_CFG(6),
+ ATMEL_HLCDC_PWMPOL, cfg);
+}
+
+static int atmel_hlcdc_pwm_enable(struct pwm_chip *c, struct pwm_device *pwm)
+{
+ struct atmel_hlcdc_pwm *chip = to_atmel_hlcdc_pwm(c);
+ struct atmel_hlcdc *hlcdc = chip->hlcdc;
+ u32 status;
+ int ret;
+
+ ret = regmap_write(hlcdc->regmap, ATMEL_HLCDC_EN, ATMEL_HLCDC_PWM);
+ if (ret)
+ return ret;
+
+ while (true) {
+ ret = regmap_read(hlcdc->regmap, ATMEL_HLCDC_SR, &status);
+ if (ret)
+ return ret;
+
+ if ((status & ATMEL_HLCDC_PWM) != 0)
+ break;
+
+ usleep_range(1, 10);
+ }
+
+ return 0;
+}
+
+static void atmel_hlcdc_pwm_disable(struct pwm_chip *c,
+ struct pwm_device *pwm)
+{
+ struct atmel_hlcdc_pwm *chip = to_atmel_hlcdc_pwm(c);
+ struct atmel_hlcdc *hlcdc = chip->hlcdc;
+ u32 status;
+ int ret;
+
+ ret = regmap_write(hlcdc->regmap, ATMEL_HLCDC_DIS, ATMEL_HLCDC_PWM);
+ if (ret)
+ return;
+
+ while (true) {
+ ret = regmap_read(hlcdc->regmap, ATMEL_HLCDC_SR, &status);
+ if (ret)
+ return;
+
+ if ((status & ATMEL_HLCDC_PWM) == 0)
+ break;
+
+ usleep_range(1, 10);
+ }
+}
+
+static const struct pwm_ops atmel_hlcdc_pwm_ops = {
+ .config = atmel_hlcdc_pwm_config,
+ .set_polarity = atmel_hlcdc_pwm_set_polarity,
+ .enable = atmel_hlcdc_pwm_enable,
+ .disable = atmel_hlcdc_pwm_disable,
+ .owner = THIS_MODULE,
+};
+
+static const struct atmel_hlcdc_pwm_errata atmel_hlcdc_pwm_at91sam9x5_errata = {
+ .slow_clk_erratum = true,
+};
+
+static const struct atmel_hlcdc_pwm_errata atmel_hlcdc_pwm_sama5d3_errata = {
+ .div1_clk_erratum = true,
+};
+
+static const struct of_device_id atmel_hlcdc_dt_ids[] = {
+ {
+ .compatible = "atmel,at91sam9x5-hlcdc",
+ .data = &atmel_hlcdc_pwm_at91sam9x5_errata,
+ },
+ {
+ .compatible = "atmel,sama5d3-hlcdc",
+ .data = &atmel_hlcdc_pwm_sama5d3_errata,
+ },
+ { /* sentinel */ },
+};
+
+static int atmel_hlcdc_pwm_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *match;
+ struct device *dev = &pdev->dev;
+ struct atmel_hlcdc_pwm *chip;
+ struct atmel_hlcdc *hlcdc;
+ int ret;
+
+ hlcdc = dev_get_drvdata(dev->parent);
+
+ chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ ret = clk_prepare_enable(hlcdc->periph_clk);
+ if (ret)
+ return ret;
+
+ match = of_match_node(atmel_hlcdc_dt_ids, dev->parent->of_node);
+ if (match)
+ chip->errata = match->data;
+
+ chip->hlcdc = hlcdc;
+ chip->chip.ops = &atmel_hlcdc_pwm_ops;
+ chip->chip.dev = dev;
+ chip->chip.base = -1;
+ chip->chip.npwm = 1;
+ chip->chip.of_xlate = of_pwm_xlate_with_flags;
+ chip->chip.of_pwm_n_cells = 3;
+ chip->chip.can_sleep = 1;
+
+ ret = pwmchip_add(&chip->chip);
+ if (ret) {
+ clk_disable_unprepare(hlcdc->periph_clk);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, chip);
+
+ return 0;
+}
+
+static int atmel_hlcdc_pwm_remove(struct platform_device *pdev)
+{
+ struct atmel_hlcdc_pwm *chip = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = pwmchip_remove(&chip->chip);
+ if (ret)
+ return ret;
+
+ clk_disable_unprepare(chip->hlcdc->periph_clk);
+
+ return 0;
+}
+
+static const struct of_device_id atmel_hlcdc_pwm_dt_ids[] = {
+ { .compatible = "atmel,hlcdc-pwm" },
+ { /* sentinel */ },
+};
+
+static struct platform_driver atmel_hlcdc_pwm_driver = {
+ .driver = {
+ .name = "atmel-hlcdc-pwm",
+ .of_match_table = atmel_hlcdc_pwm_dt_ids,
+ },
+ .probe = atmel_hlcdc_pwm_probe,
+ .remove = atmel_hlcdc_pwm_remove,
+};
+module_platform_driver(atmel_hlcdc_pwm_driver);
+
+MODULE_ALIAS("platform:atmel-hlcdc-pwm");
+MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>");
+MODULE_DESCRIPTION("Atmel HLCDC PWM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pwm/pwm-bcm2835.c b/drivers/pwm/pwm-bcm2835.c
new file mode 100644
index 00000000000..b4c7f956b6f
--- /dev/null
+++ b/drivers/pwm/pwm-bcm2835.c
@@ -0,0 +1,205 @@
+/*
+ * Copyright 2014 Bart Tanghe <bart.tanghe@thomasmore.be>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2.
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+
+#define PWM_CONTROL 0x000
+#define PWM_CONTROL_SHIFT(x) ((x) * 8)
+#define PWM_CONTROL_MASK 0xff
+#define PWM_MODE 0x80 /* set timer in PWM mode */
+#define PWM_ENABLE (1 << 0)
+#define PWM_POLARITY (1 << 4)
+
+#define PERIOD(x) (((x) * 0x10) + 0x10)
+#define DUTY(x) (((x) * 0x10) + 0x14)
+
+#define MIN_PERIOD 108 /* 9.2 MHz max. PWM clock */
+
+struct bcm2835_pwm {
+ struct pwm_chip chip;
+ struct device *dev;
+ unsigned long scaler;
+ void __iomem *base;
+ struct clk *clk;
+};
+
+static inline struct bcm2835_pwm *to_bcm2835_pwm(struct pwm_chip *chip)
+{
+ return container_of(chip, struct bcm2835_pwm, chip);
+}
+
+static int bcm2835_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct bcm2835_pwm *pc = to_bcm2835_pwm(chip);
+ u32 value;
+
+ value = readl(pc->base + PWM_CONTROL);
+ value &= ~(PWM_CONTROL_MASK << PWM_CONTROL_SHIFT(pwm->hwpwm));
+ value |= (PWM_MODE << PWM_CONTROL_SHIFT(pwm->hwpwm));
+ writel(value, pc->base + PWM_CONTROL);
+
+ return 0;
+}
+
+static void bcm2835_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct bcm2835_pwm *pc = to_bcm2835_pwm(chip);
+ u32 value;
+
+ value = readl(pc->base + PWM_CONTROL);
+ value &= ~(PWM_CONTROL_MASK << PWM_CONTROL_SHIFT(pwm->hwpwm));
+ writel(value, pc->base + PWM_CONTROL);
+}
+
+static int bcm2835_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
+ int duty_ns, int period_ns)
+{
+ struct bcm2835_pwm *pc = to_bcm2835_pwm(chip);
+
+ if (period_ns <= MIN_PERIOD) {
+ dev_err(pc->dev, "period %d not supported, minimum %d\n",
+ period_ns, MIN_PERIOD);
+ return -EINVAL;
+ }
+
+ writel(duty_ns / pc->scaler, pc->base + DUTY(pwm->hwpwm));
+ writel(period_ns / pc->scaler, pc->base + PERIOD(pwm->hwpwm));
+
+ return 0;
+}
+
+static int bcm2835_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct bcm2835_pwm *pc = to_bcm2835_pwm(chip);
+ u32 value;
+
+ value = readl(pc->base + PWM_CONTROL);
+ value |= PWM_ENABLE << PWM_CONTROL_SHIFT(pwm->hwpwm);
+ writel(value, pc->base + PWM_CONTROL);
+
+ return 0;
+}
+
+static void bcm2835_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct bcm2835_pwm *pc = to_bcm2835_pwm(chip);
+ u32 value;
+
+ value = readl(pc->base + PWM_CONTROL);
+ value &= ~(PWM_ENABLE << PWM_CONTROL_SHIFT(pwm->hwpwm));
+ writel(value, pc->base + PWM_CONTROL);
+}
+
+static int bcm2835_set_polarity(struct pwm_chip *chip, struct pwm_device *pwm,
+ enum pwm_polarity polarity)
+{
+ struct bcm2835_pwm *pc = to_bcm2835_pwm(chip);
+ u32 value;
+
+ value = readl(pc->base + PWM_CONTROL);
+
+ if (polarity == PWM_POLARITY_NORMAL)
+ value &= ~(PWM_POLARITY << PWM_CONTROL_SHIFT(pwm->hwpwm));
+ else
+ value |= PWM_POLARITY << PWM_CONTROL_SHIFT(pwm->hwpwm);
+
+ writel(value, pc->base + PWM_CONTROL);
+
+ return 0;
+}
+
+static const struct pwm_ops bcm2835_pwm_ops = {
+ .request = bcm2835_pwm_request,
+ .free = bcm2835_pwm_free,
+ .config = bcm2835_pwm_config,
+ .enable = bcm2835_pwm_enable,
+ .disable = bcm2835_pwm_disable,
+ .set_polarity = bcm2835_set_polarity,
+ .owner = THIS_MODULE,
+};
+
+static int bcm2835_pwm_probe(struct platform_device *pdev)
+{
+ struct bcm2835_pwm *pc;
+ struct resource *res;
+ int ret;
+
+ pc = devm_kzalloc(&pdev->dev, sizeof(*pc), GFP_KERNEL);
+ if (!pc)
+ return -ENOMEM;
+
+ pc->dev = &pdev->dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pc->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pc->base))
+ return PTR_ERR(pc->base);
+
+ pc->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(pc->clk)) {
+ dev_err(&pdev->dev, "clock not found: %ld\n", PTR_ERR(pc->clk));
+ return PTR_ERR(pc->clk);
+ }
+
+ ret = clk_prepare_enable(pc->clk);
+ if (ret)
+ return ret;
+
+ pc->scaler = NSEC_PER_SEC / clk_get_rate(pc->clk);
+
+ pc->chip.dev = &pdev->dev;
+ pc->chip.ops = &bcm2835_pwm_ops;
+ pc->chip.npwm = 2;
+
+ platform_set_drvdata(pdev, pc);
+
+ ret = pwmchip_add(&pc->chip);
+ if (ret < 0)
+ goto add_fail;
+
+ return 0;
+
+add_fail:
+ clk_disable_unprepare(pc->clk);
+ return ret;
+}
+
+static int bcm2835_pwm_remove(struct platform_device *pdev)
+{
+ struct bcm2835_pwm *pc = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(pc->clk);
+
+ return pwmchip_remove(&pc->chip);
+}
+
+static const struct of_device_id bcm2835_pwm_of_match[] = {
+ { .compatible = "brcm,bcm2835-pwm", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, bcm2835_pwm_of_match);
+
+static struct platform_driver bcm2835_pwm_driver = {
+ .driver = {
+ .name = "bcm2835-pwm",
+ .of_match_table = bcm2835_pwm_of_match,
+ },
+ .probe = bcm2835_pwm_probe,
+ .remove = bcm2835_pwm_remove,
+};
+module_platform_driver(bcm2835_pwm_driver);
+
+MODULE_AUTHOR("Bart Tanghe <bart.tanghe@thomasmore.be");
+MODULE_DESCRIPTION("Broadcom BCM2835 PWM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pwm/pwm-fsl-ftm.c b/drivers/pwm/pwm-fsl-ftm.c
index 0f2cc7ef778..f9dfc8b6407 100644
--- a/drivers/pwm/pwm-fsl-ftm.c
+++ b/drivers/pwm/pwm-fsl-ftm.c
@@ -17,6 +17,7 @@
#include <linux/mutex.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
+#include <linux/pm.h>
#include <linux/pwm.h>
#include <linux/regmap.h>
#include <linux/slab.h>
@@ -299,7 +300,7 @@ static int fsl_counter_clock_enable(struct fsl_pwm_chip *fpc)
{
int ret;
- if (fpc->use_count != 0)
+ if (fpc->use_count++ != 0)
return 0;
/* select counter clock source */
@@ -316,8 +317,6 @@ static int fsl_counter_clock_enable(struct fsl_pwm_chip *fpc)
return ret;
}
- fpc->use_count++;
-
return 0;
}
@@ -399,12 +398,23 @@ static int fsl_pwm_init(struct fsl_pwm_chip *fpc)
return 0;
}
+static bool fsl_pwm_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case FTM_CNT:
+ return true;
+ }
+ return false;
+}
+
static const struct regmap_config fsl_pwm_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
.max_register = FTM_PWMLOAD,
+ .volatile_reg = fsl_pwm_volatile_reg,
+ .cache_type = REGCACHE_RBTREE,
};
static int fsl_pwm_probe(struct platform_device *pdev)
@@ -427,7 +437,7 @@ static int fsl_pwm_probe(struct platform_device *pdev)
if (IS_ERR(base))
return PTR_ERR(base);
- fpc->regmap = devm_regmap_init_mmio_clk(&pdev->dev, NULL, base,
+ fpc->regmap = devm_regmap_init_mmio_clk(&pdev->dev, "ftm_sys", base,
&fsl_pwm_regmap_config);
if (IS_ERR(fpc->regmap)) {
dev_err(&pdev->dev, "regmap init failed\n");
@@ -478,6 +488,51 @@ static int fsl_pwm_remove(struct platform_device *pdev)
return pwmchip_remove(&fpc->chip);
}
+#ifdef CONFIG_PM_SLEEP
+static int fsl_pwm_suspend(struct device *dev)
+{
+ struct fsl_pwm_chip *fpc = dev_get_drvdata(dev);
+ u32 val;
+
+ regcache_cache_only(fpc->regmap, true);
+ regcache_mark_dirty(fpc->regmap);
+
+ /* read from cache */
+ regmap_read(fpc->regmap, FTM_OUTMASK, &val);
+ if ((val & 0xFF) != 0xFF) {
+ clk_disable_unprepare(fpc->clk[FSL_PWM_CLK_CNTEN]);
+ clk_disable_unprepare(fpc->clk[fpc->cnt_select]);
+ clk_disable_unprepare(fpc->clk[FSL_PWM_CLK_SYS]);
+ }
+
+ return 0;
+}
+
+static int fsl_pwm_resume(struct device *dev)
+{
+ struct fsl_pwm_chip *fpc = dev_get_drvdata(dev);
+ u32 val;
+
+ /* read from cache */
+ regmap_read(fpc->regmap, FTM_OUTMASK, &val);
+ if ((val & 0xFF) != 0xFF) {
+ clk_prepare_enable(fpc->clk[FSL_PWM_CLK_SYS]);
+ clk_prepare_enable(fpc->clk[fpc->cnt_select]);
+ clk_prepare_enable(fpc->clk[FSL_PWM_CLK_CNTEN]);
+ }
+
+ /* restore all registers from cache */
+ regcache_cache_only(fpc->regmap, false);
+ regcache_sync(fpc->regmap);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops fsl_pwm_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(fsl_pwm_suspend, fsl_pwm_resume)
+};
+
static const struct of_device_id fsl_pwm_dt_ids[] = {
{ .compatible = "fsl,vf610-ftm-pwm", },
{ /* sentinel */ }
@@ -488,6 +543,7 @@ static struct platform_driver fsl_pwm_driver = {
.driver = {
.name = "fsl-ftm-pwm",
.of_match_table = fsl_pwm_dt_ids,
+ .pm = &fsl_pwm_pm_ops,
},
.probe = fsl_pwm_probe,
.remove = fsl_pwm_remove,
diff --git a/drivers/rpmsg/virtio_rpmsg_bus.c b/drivers/rpmsg/virtio_rpmsg_bus.c
index b6135d4d54e..92f6af6da69 100644
--- a/drivers/rpmsg/virtio_rpmsg_bus.c
+++ b/drivers/rpmsg/virtio_rpmsg_bus.c
@@ -41,6 +41,7 @@
* @svq: tx virtqueue
* @rbufs: kernel address of rx buffers
* @sbufs: kernel address of tx buffers
+ * @num_bufs: total number of buffers for rx and tx
* @last_sbuf: index of last tx buffer used
* @bufs_dma: dma base addr of the buffers
* @tx_lock: protects svq, sbufs and sleepers, to allow concurrent senders.
@@ -60,6 +61,7 @@ struct virtproc_info {
struct virtio_device *vdev;
struct virtqueue *rvq, *svq;
void *rbufs, *sbufs;
+ unsigned int num_bufs;
int last_sbuf;
dma_addr_t bufs_dma;
struct mutex tx_lock;
@@ -86,13 +88,14 @@ struct rpmsg_channel_info {
#define to_rpmsg_driver(d) container_of(d, struct rpmsg_driver, drv)
/*
- * We're allocating 512 buffers of 512 bytes for communications, and then
- * using the first 256 buffers for RX, and the last 256 buffers for TX.
+ * We're allocating buffers of 512 bytes each for communications. The
+ * number of buffers will be computed from the number of buffers supported
+ * by the vring, upto a maximum of 512 buffers (256 in each direction).
*
* Each buffer will have 16 bytes for the msg header and 496 bytes for
* the payload.
*
- * This will require a total space of 256KB for the buffers.
+ * This will utilize a maximum total space of 256KB for the buffers.
*
* We might also want to add support for user-provided buffers in time.
* This will allow bigger buffer size flexibility, and can also be used
@@ -102,9 +105,8 @@ struct rpmsg_channel_info {
* can change this without changing anything in the firmware of the remote
* processor.
*/
-#define RPMSG_NUM_BUFS (512)
+#define MAX_RPMSG_NUM_BUFS (512)
#define RPMSG_BUF_SIZE (512)
-#define RPMSG_TOTAL_BUF_SPACE (RPMSG_NUM_BUFS * RPMSG_BUF_SIZE)
/*
* Local addresses are dynamically allocated on-demand.
@@ -579,7 +581,7 @@ static void *get_a_tx_buf(struct virtproc_info *vrp)
* either pick the next unused tx buffer
* (half of our buffers are used for sending messages)
*/
- if (vrp->last_sbuf < RPMSG_NUM_BUFS / 2)
+ if (vrp->last_sbuf < vrp->num_bufs / 2)
ret = vrp->sbufs + RPMSG_BUF_SIZE * vrp->last_sbuf++;
/* or recycle a used one */
else
@@ -948,6 +950,7 @@ static int rpmsg_probe(struct virtio_device *vdev)
struct virtproc_info *vrp;
void *bufs_va;
int err = 0, i;
+ size_t total_buf_space;
vrp = kzalloc(sizeof(*vrp), GFP_KERNEL);
if (!vrp)
@@ -968,10 +971,22 @@ static int rpmsg_probe(struct virtio_device *vdev)
vrp->rvq = vqs[0];
vrp->svq = vqs[1];
+ /* we expect symmetric tx/rx vrings */
+ WARN_ON(virtqueue_get_vring_size(vrp->rvq) !=
+ virtqueue_get_vring_size(vrp->svq));
+
+ /* we need less buffers if vrings are small */
+ if (virtqueue_get_vring_size(vrp->rvq) < MAX_RPMSG_NUM_BUFS / 2)
+ vrp->num_bufs = virtqueue_get_vring_size(vrp->rvq) * 2;
+ else
+ vrp->num_bufs = MAX_RPMSG_NUM_BUFS;
+
+ total_buf_space = vrp->num_bufs * RPMSG_BUF_SIZE;
+
/* allocate coherent memory for the buffers */
bufs_va = dma_alloc_coherent(vdev->dev.parent->parent,
- RPMSG_TOTAL_BUF_SPACE,
- &vrp->bufs_dma, GFP_KERNEL);
+ total_buf_space, &vrp->bufs_dma,
+ GFP_KERNEL);
if (!bufs_va) {
err = -ENOMEM;
goto vqs_del;
@@ -984,10 +999,10 @@ static int rpmsg_probe(struct virtio_device *vdev)
vrp->rbufs = bufs_va;
/* and half is dedicated for TX */
- vrp->sbufs = bufs_va + RPMSG_TOTAL_BUF_SPACE / 2;
+ vrp->sbufs = bufs_va + total_buf_space / 2;
/* set up the receive buffers */
- for (i = 0; i < RPMSG_NUM_BUFS / 2; i++) {
+ for (i = 0; i < vrp->num_bufs / 2; i++) {
struct scatterlist sg;
void *cpu_addr = vrp->rbufs + i * RPMSG_BUF_SIZE;
@@ -1023,8 +1038,8 @@ static int rpmsg_probe(struct virtio_device *vdev)
return 0;
free_coherent:
- dma_free_coherent(vdev->dev.parent->parent, RPMSG_TOTAL_BUF_SPACE,
- bufs_va, vrp->bufs_dma);
+ dma_free_coherent(vdev->dev.parent->parent, total_buf_space,
+ bufs_va, vrp->bufs_dma);
vqs_del:
vdev->config->del_vqs(vrp->vdev);
free_vrp:
@@ -1042,6 +1057,7 @@ static int rpmsg_remove_device(struct device *dev, void *data)
static void rpmsg_remove(struct virtio_device *vdev)
{
struct virtproc_info *vrp = vdev->priv;
+ size_t total_buf_space = vrp->num_bufs * RPMSG_BUF_SIZE;
int ret;
vdev->config->reset(vdev);
@@ -1057,8 +1073,8 @@ static void rpmsg_remove(struct virtio_device *vdev)
vdev->config->del_vqs(vrp->vdev);
- dma_free_coherent(vdev->dev.parent->parent, RPMSG_TOTAL_BUF_SPACE,
- vrp->rbufs, vrp->bufs_dma);
+ dma_free_coherent(vdev->dev.parent->parent, total_buf_space,
+ vrp->rbufs, vrp->bufs_dma);
kfree(vrp);
}
diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
index 7454498c409..9e43ae1d216 100644
--- a/drivers/scsi/scsi_pm.c
+++ b/drivers/scsi/scsi_pm.c
@@ -213,8 +213,6 @@ static int scsi_bus_restore(struct device *dev)
#endif /* CONFIG_PM_SLEEP */
-#ifdef CONFIG_PM_RUNTIME
-
static int sdev_runtime_suspend(struct device *dev)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
@@ -332,14 +330,6 @@ void scsi_autopm_put_host(struct Scsi_Host *shost)
pm_runtime_put_sync(&shost->shost_gendev);
}
-#else
-
-#define scsi_runtime_suspend NULL
-#define scsi_runtime_resume NULL
-#define scsi_runtime_idle NULL
-
-#endif /* CONFIG_PM_RUNTIME */
-
const struct dev_pm_ops scsi_bus_pm_ops = {
.prepare = scsi_bus_prepare,
.suspend = scsi_bus_suspend,
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index 2dc4a83fb84..e3902fc6627 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -155,8 +155,7 @@ static inline void scsi_netlink_exit(void) {}
/* scsi_pm.c */
#ifdef CONFIG_PM
extern const struct dev_pm_ops scsi_bus_pm_ops;
-#endif
-#ifdef CONFIG_PM_RUNTIME
+
extern void scsi_autopm_get_target(struct scsi_target *);
extern void scsi_autopm_put_target(struct scsi_target *);
extern int scsi_autopm_get_host(struct Scsi_Host *);
@@ -166,7 +165,7 @@ static inline void scsi_autopm_get_target(struct scsi_target *t) {}
static inline void scsi_autopm_put_target(struct scsi_target *t) {}
static inline int scsi_autopm_get_host(struct Scsi_Host *h) { return 0; }
static inline void scsi_autopm_put_host(struct Scsi_Host *h) {}
-#endif /* CONFIG_PM_RUNTIME */
+#endif /* CONFIG_PM */
extern struct async_domain scsi_sd_pm_domain;
extern struct async_domain scsi_sd_probe_domain;
diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c
index 955ed558701..d15eaa466c5 100644
--- a/drivers/scsi/ufs/ufshcd-pci.c
+++ b/drivers/scsi/ufs/ufshcd-pci.c
@@ -62,12 +62,7 @@ static int ufshcd_pci_resume(struct device *dev)
{
return ufshcd_system_resume(dev_get_drvdata(dev));
}
-#else
-#define ufshcd_pci_suspend NULL
-#define ufshcd_pci_resume NULL
-#endif /* CONFIG_PM */
-#ifdef CONFIG_PM_RUNTIME
static int ufshcd_pci_runtime_suspend(struct device *dev)
{
return ufshcd_runtime_suspend(dev_get_drvdata(dev));
@@ -80,11 +75,13 @@ static int ufshcd_pci_runtime_idle(struct device *dev)
{
return ufshcd_runtime_idle(dev_get_drvdata(dev));
}
-#else /* !CONFIG_PM_RUNTIME */
+#else /* !CONFIG_PM */
+#define ufshcd_pci_suspend NULL
+#define ufshcd_pci_resume NULL
#define ufshcd_pci_runtime_suspend NULL
#define ufshcd_pci_runtime_resume NULL
#define ufshcd_pci_runtime_idle NULL
-#endif /* CONFIG_PM_RUNTIME */
+#endif /* CONFIG_PM */
/**
* ufshcd_pci_shutdown - main function to put the controller in reset state
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
index 0c030ad8a96..7db9564f507 100644
--- a/drivers/scsi/ufs/ufshcd-pltfrm.c
+++ b/drivers/scsi/ufs/ufshcd-pltfrm.c
@@ -261,12 +261,7 @@ static int ufshcd_pltfrm_resume(struct device *dev)
{
return ufshcd_system_resume(dev_get_drvdata(dev));
}
-#else
-#define ufshcd_pltfrm_suspend NULL
-#define ufshcd_pltfrm_resume NULL
-#endif
-#ifdef CONFIG_PM_RUNTIME
static int ufshcd_pltfrm_runtime_suspend(struct device *dev)
{
return ufshcd_runtime_suspend(dev_get_drvdata(dev));
@@ -279,11 +274,13 @@ static int ufshcd_pltfrm_runtime_idle(struct device *dev)
{
return ufshcd_runtime_idle(dev_get_drvdata(dev));
}
-#else /* !CONFIG_PM_RUNTIME */
+#else /* !CONFIG_PM */
+#define ufshcd_pltfrm_suspend NULL
+#define ufshcd_pltfrm_resume NULL
#define ufshcd_pltfrm_runtime_suspend NULL
#define ufshcd_pltfrm_runtime_resume NULL
#define ufshcd_pltfrm_runtime_idle NULL
-#endif /* CONFIG_PM_RUNTIME */
+#endif /* CONFIG_PM */
static void ufshcd_pltfrm_shutdown(struct platform_device *pdev)
{
diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c
index 11a5043959d..011a3363c26 100644
--- a/drivers/soc/tegra/fuse/fuse-tegra.c
+++ b/drivers/soc/tegra/fuse/fuse-tegra.c
@@ -31,6 +31,7 @@
static u32 (*fuse_readl)(const unsigned int offset);
static int fuse_size;
struct tegra_sku_info tegra_sku_info;
+EXPORT_SYMBOL(tegra_sku_info);
static const char *tegra_revision_name[TEGRA_REVISION_MAX] = {
[TEGRA_REVISION_UNKNOWN] = "unknown",
diff --git a/drivers/spi/spi-coldfire-qspi.c b/drivers/spi/spi-coldfire-qspi.c
index e2fa628e55e..41b5dc4445f 100644
--- a/drivers/spi/spi-coldfire-qspi.c
+++ b/drivers/spi/spi-coldfire-qspi.c
@@ -491,7 +491,7 @@ static int mcfqspi_resume(struct device *dev)
}
#endif
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
static int mcfqspi_runtime_suspend(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c
index 43781c9fe52..b410499cddc 100644
--- a/drivers/spi/spi-img-spfi.c
+++ b/drivers/spi/spi-img-spfi.c
@@ -663,7 +663,7 @@ static int img_spfi_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
static int img_spfi_runtime_suspend(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
@@ -692,7 +692,7 @@ static int img_spfi_runtime_resume(struct device *dev)
return 0;
}
-#endif /* CONFIG_PM_RUNTIME */
+#endif /* CONFIG_PM */
#ifdef CONFIG_PM_SLEEP
static int img_spfi_suspend(struct device *dev)
diff --git a/drivers/spi/spi-meson-spifc.c b/drivers/spi/spi-meson-spifc.c
index 0e48f8c2037..1bbac0378bf 100644
--- a/drivers/spi/spi-meson-spifc.c
+++ b/drivers/spi/spi-meson-spifc.c
@@ -413,7 +413,7 @@ static int meson_spifc_resume(struct device *dev)
}
#endif /* CONFIG_PM_SLEEP */
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
static int meson_spifc_runtime_suspend(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
@@ -431,7 +431,7 @@ static int meson_spifc_runtime_resume(struct device *dev)
return clk_prepare_enable(spifc->clk);
}
-#endif /* CONFIG_PM_RUNTIME */
+#endif /* CONFIG_PM */
static const struct dev_pm_ops meson_spifc_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(meson_spifc_suspend, meson_spifc_resume)
diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c
index 932da4825e7..3dec9e0b99b 100644
--- a/drivers/spi/spi-orion.c
+++ b/drivers/spi/spi-orion.c
@@ -523,7 +523,7 @@ static int orion_spi_remove(struct platform_device *pdev)
MODULE_ALIAS("platform:" DRIVER_NAME);
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
static int orion_spi_runtime_suspend(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index 2a41b2d6fa1..05c623cfb07 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -1531,7 +1531,7 @@ static int pxa2xx_spi_resume(struct device *dev)
}
#endif
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
static int pxa2xx_spi_runtime_suspend(struct device *dev)
{
struct driver_data *drv_data = dev_get_drvdata(dev);
diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c
index 390ed712eee..e7fb5a0d2e8 100644
--- a/drivers/spi/spi-qup.c
+++ b/drivers/spi/spi-qup.c
@@ -646,7 +646,7 @@ error:
return ret;
}
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
static int spi_qup_pm_suspend_runtime(struct device *device)
{
struct spi_master *master = dev_get_drvdata(device);
@@ -672,7 +672,7 @@ static int spi_qup_pm_resume_runtime(struct device *device)
writel_relaxed(config, controller->base + QUP_CONFIG);
return 0;
}
-#endif /* CONFIG_PM_RUNTIME */
+#endif /* CONFIG_PM */
#ifdef CONFIG_PM_SLEEP
static int spi_qup_suspend(struct device *device)
diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
index 44c12255890..daabbabd26b 100644
--- a/drivers/spi/spi-rockchip.c
+++ b/drivers/spi/spi-rockchip.c
@@ -799,7 +799,7 @@ static int rockchip_spi_resume(struct device *dev)
}
#endif /* CONFIG_PM_SLEEP */
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
static int rockchip_spi_runtime_suspend(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
@@ -827,7 +827,7 @@ static int rockchip_spi_runtime_resume(struct device *dev)
return ret;
}
-#endif /* CONFIG_PM_RUNTIME */
+#endif /* CONFIG_PM */
static const struct dev_pm_ops rockchip_spi_pm = {
SET_SYSTEM_SLEEP_PM_OPS(rockchip_spi_suspend, rockchip_spi_resume)
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index 197bcf09317..37b19836f5c 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -1267,7 +1267,7 @@ static int s3c64xx_spi_resume(struct device *dev)
}
#endif /* CONFIG_PM_SLEEP */
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
static int s3c64xx_spi_runtime_suspend(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
@@ -1297,7 +1297,7 @@ static int s3c64xx_spi_runtime_resume(struct device *dev)
return 0;
}
-#endif /* CONFIG_PM_RUNTIME */
+#endif /* CONFIG_PM */
static const struct dev_pm_ops s3c64xx_spi_pm = {
SET_SYSTEM_SLEEP_PM_OPS(s3c64xx_spi_suspend, s3c64xx_spi_resume)
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 4690ae9a267..815de379a13 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -62,8 +62,6 @@ source "drivers/staging/xgifb/Kconfig"
source "drivers/staging/emxx_udc/Kconfig"
-source "drivers/staging/bcm/Kconfig"
-
source "drivers/staging/ft1000/Kconfig"
source "drivers/staging/speakup/Kconfig"
@@ -86,8 +84,6 @@ source "drivers/staging/gdm72xx/Kconfig"
source "drivers/staging/gdm724x/Kconfig"
-source "drivers/staging/imx-drm/Kconfig"
-
source "drivers/staging/fwserial/Kconfig"
source "drivers/staging/goldfish/Kconfig"
@@ -108,4 +104,6 @@ source "drivers/staging/skein/Kconfig"
source "drivers/staging/unisys/Kconfig"
+source "drivers/staging/clocking-wizard/Kconfig"
+
endif # STAGING
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index c780a0e70e1..33c640b4956 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -25,7 +25,6 @@ obj-$(CONFIG_VME_BUS) += vme/
obj-$(CONFIG_IIO) += iio/
obj-$(CONFIG_FB_XGI) += xgifb/
obj-$(CONFIG_USB_EMXX) += emxx_udc/
-obj-$(CONFIG_BCM_WIMAX) += bcm/
obj-$(CONFIG_FT1000) += ft1000/
obj-$(CONFIG_SPEAKUP) += speakup/
obj-$(CONFIG_TOUCHSCREEN_CLEARPAD_TM1217) += cptm1217/
@@ -36,7 +35,6 @@ obj-$(CONFIG_STAGING_BOARD) += board/
obj-$(CONFIG_USB_WPAN_HCD) += ozwpan/
obj-$(CONFIG_WIMAX_GDM72XX) += gdm72xx/
obj-$(CONFIG_LTE_GDM724X) += gdm724x/
-obj-$(CONFIG_DRM_IMX) += imx-drm/
obj-$(CONFIG_FIREWIRE_SERIAL) += fwserial/
obj-$(CONFIG_GOLDFISH) += goldfish/
obj-$(CONFIG_LUSTRE_FS) += lustre/
@@ -46,3 +44,4 @@ obj-$(CONFIG_MTD_SPINAND_MT29F) += mt29f_spinand/
obj-$(CONFIG_GS_FPGABOOT) += gs_fpgaboot/
obj-$(CONFIG_CRYPTO_SKEIN) += skein/
obj-$(CONFIG_UNISYSSPAR) += unisys/
+obj-$(CONFIG_COMMON_CLK_XLNX_CLKWZRD) += clocking-wizard/
diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig
index 7a0e2885296..7e012f37792 100644
--- a/drivers/staging/android/Kconfig
+++ b/drivers/staging/android/Kconfig
@@ -1,37 +1,7 @@
menu "Android"
-config ANDROID
- bool "Android Drivers"
- ---help---
- Enable support for various drivers needed on the Android platform
-
if ANDROID
-config ANDROID_BINDER_IPC
- bool "Android Binder IPC Driver"
- depends on MMU
- default n
- ---help---
- Binder is used in Android for both communication between processes,
- and remote method invocation.
-
- This means one Android process can call a method/routine in another
- Android process, using Binder to identify, invoke and pass arguments
- between said processes.
-
-config ANDROID_BINDER_IPC_32BIT
- bool
- depends on !64BIT && ANDROID_BINDER_IPC
- default y
- ---help---
- The Binder API has been changed to support both 32 and 64bit
- applications in a mixed environment.
-
- Enable this to support an old 32-bit Android user-space (v4.4 and
- earlier).
-
- Note that enabling this will break newer Android user-space.
-
config ASHMEM
bool "Enable the Anonymous Shared Memory Subsystem"
default n
diff --git a/drivers/staging/android/Makefile b/drivers/staging/android/Makefile
index 517ad5ffa42..479b2b86f8c 100644
--- a/drivers/staging/android/Makefile
+++ b/drivers/staging/android/Makefile
@@ -2,7 +2,6 @@ ccflags-y += -I$(src) # needed for trace events
obj-y += ion/
-obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o
obj-$(CONFIG_ASHMEM) += ashmem.o
obj-$(CONFIG_ANDROID_LOGGER) += logger.o
obj-$(CONFIG_ANDROID_TIMED_OUTPUT) += timed_output.o
diff --git a/drivers/staging/android/TODO b/drivers/staging/android/TODO
index b15fb0d6b15..06954cdf3db 100644
--- a/drivers/staging/android/TODO
+++ b/drivers/staging/android/TODO
@@ -5,6 +5,13 @@ TODO:
- make sure things build as modules properly
- add proper arch dependencies as needed
- audit userspace interfaces to make sure they are sane
+ - kuid_t should never be exposed to user space as it is
+ kernel internal type. Data structure for this kuid_t is:
+ typedef struct {
+ uid_t val;
+ } kuid_t;
+ - This bug is introduced by Xiong Zhou in the patch bd471258f2e09
+ - ("staging: android: logger: use kuid_t instead of uid_t")
Please send patches to Greg Kroah-Hartman <greg@kroah.com> and Cc:
Brian Swetland <swetland@google.com>
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index 46f8ef42559..8c7852742f4 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -446,7 +446,7 @@ ashmem_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
loff_t start = range->pgstart * PAGE_SIZE;
loff_t end = (range->pgend + 1) * PAGE_SIZE;
- do_fallocate(range->asma->file,
+ vfs_fallocate(range->asma->file,
FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
start, end - start);
range->purged = ASHMEM_WAS_PURGED;
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index 56604f41ec4..296d347660f 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -250,7 +250,7 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
our systems the only dma_address space is physical addresses.
Additionally, we can't afford the overhead of invalidating every
allocation via dma_map_sg. The implicit contract here is that
- memory comming from the heaps is ready for dma, ie if it has a
+ memory coming from the heaps is ready for dma, ie if it has a
cached mapping that mapping has been invalidated */
for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
sg_dma_address(sg) = sg_phys(sg);
@@ -263,8 +263,7 @@ err:
heap->ops->unmap_dma(heap, buffer);
heap->ops->free(buffer);
err1:
- if (buffer->pages)
- vfree(buffer->pages);
+ vfree(buffer->pages);
err2:
kfree(buffer);
return ERR_PTR(ret);
@@ -276,8 +275,7 @@ void ion_buffer_destroy(struct ion_buffer *buffer)
buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
buffer->heap->ops->unmap_dma(buffer->heap, buffer);
buffer->heap->ops->free(buffer);
- if (buffer->pages)
- vfree(buffer->pages);
+ vfree(buffer->pages);
kfree(buffer);
}
@@ -902,7 +900,7 @@ void ion_pages_sync_for_device(struct device *dev, struct page *page,
sg_set_page(&sg, page, size, 0);
/*
* This is not correct - sg_dma_address needs a dma_addr_t that is valid
- * for the the targeted device, but this works on the currently targeted
+ * for the targeted device, but this works on the currently targeted
* hardware.
*/
sg_dma_address(&sg) = page_to_phys(page);
diff --git a/drivers/staging/android/ion/ion.h b/drivers/staging/android/ion/ion.h
index d305bb7e9a7..443db8459a9 100644
--- a/drivers/staging/android/ion/ion.h
+++ b/drivers/staging/android/ion/ion.h
@@ -76,7 +76,7 @@ struct ion_platform_data {
* size
*
* Calls memblock reserve to set aside memory for heaps that are
- * located at specific memory addresses or of specfic sizes not
+ * located at specific memory addresses or of specific sizes not
* managed by the kernel
*/
void ion_reserve(struct ion_platform_data *data);
diff --git a/drivers/staging/android/ion/ion_dummy_driver.c b/drivers/staging/android/ion/ion_dummy_driver.c
index f3ea1c31e53..5678870bff4 100644
--- a/drivers/staging/android/ion/ion_dummy_driver.c
+++ b/drivers/staging/android/ion/ion_dummy_driver.c
@@ -112,10 +112,8 @@ static int __init ion_dummy_init(void)
}
return 0;
err:
- for (i = 0; i < dummy_ion_pdata.nr; i++) {
- if (heaps[i])
- ion_heap_destroy(heaps[i]);
- }
+ for (i = 0; i < dummy_ion_pdata.nr; ++i)
+ ion_heap_destroy(heaps[i]);
kfree(heaps);
if (carveout_ptr) {
diff --git a/drivers/staging/android/ion/ion_page_pool.c b/drivers/staging/android/ion/ion_page_pool.c
index 5864f3dfcbc..4b88f11e52d 100644
--- a/drivers/staging/android/ion/ion_page_pool.c
+++ b/drivers/staging/android/ion/ion_page_pool.c
@@ -120,7 +120,7 @@ int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
bool high;
if (current_is_kswapd())
- high = 1;
+ high = true;
else
high = !!(gfp_mask & __GFP_HIGHMEM);
diff --git a/drivers/staging/android/ion/ion_priv.h b/drivers/staging/android/ion/ion_priv.h
index c8f01757abf..18a5f93e13b 100644
--- a/drivers/staging/android/ion/ion_priv.h
+++ b/drivers/staging/android/ion/ion_priv.h
@@ -345,7 +345,7 @@ void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
* functions for creating and destroying a heap pool -- allows you
* to keep a pool of pre allocated memory to use from your heap. Keeping
* a pool of memory that is ready for dma, ie any cached mapping have been
- * invalidated from the cache, provides a significant peformance benefit on
+ * invalidated from the cache, provides a significant performance benefit on
* many systems */
/**
@@ -362,7 +362,7 @@ void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
*
* Allows you to keep a pool of pre allocated pages to use from your heap.
* Keeping a pool of pages that is ready for dma, ie any cached mapping have
- * been invalidated from the cache, provides a significant peformance benefit
+ * been invalidated from the cache, provides a significant performance benefit
* on many systems
*/
struct ion_page_pool {
diff --git a/drivers/staging/android/ion/tegra/tegra_ion.c b/drivers/staging/android/ion/tegra/tegra_ion.c
index 11c7cceb3c7..5b8ef0e6601 100644
--- a/drivers/staging/android/ion/tegra/tegra_ion.c
+++ b/drivers/staging/android/ion/tegra/tegra_ion.c
@@ -54,10 +54,8 @@ static int tegra_ion_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, idev);
return 0;
err:
- for (i = 0; i < num_heaps; i++) {
- if (heaps[i])
- ion_heap_destroy(heaps[i]);
- }
+ for (i = 0; i < num_heaps; ++i)
+ ion_heap_destroy(heaps[i]);
return err;
}
diff --git a/drivers/staging/android/sync_debug.c b/drivers/staging/android/sync_debug.c
index 257fc91bf02..1532a86404b 100644
--- a/drivers/staging/android/sync_debug.c
+++ b/drivers/staging/android/sync_debug.c
@@ -25,6 +25,7 @@
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/anon_inodes.h>
+#include <linux/time64.h>
#include "sync.h"
#ifdef CONFIG_DEBUG_FS
@@ -95,9 +96,9 @@ static void sync_print_pt(struct seq_file *s, struct sync_pt *pt, bool fence)
sync_status_str(status));
if (status <= 0) {
- struct timeval tv = ktime_to_timeval(pt->base.timestamp);
+ struct timespec64 ts64 = ktime_to_timespec64(pt->base.timestamp);
- seq_printf(s, "@%ld.%06ld", tv.tv_sec, tv.tv_usec);
+ seq_printf(s, "@%lld.%09ld", (s64)ts64.tv_sec, ts64.tv_nsec);
}
if (parent->ops->timeline_value_str &&
diff --git a/drivers/staging/android/timed_gpio.c b/drivers/staging/android/timed_gpio.c
index c71ed64931b..938a35cd99b 100644
--- a/drivers/staging/android/timed_gpio.c
+++ b/drivers/staging/android/timed_gpio.c
@@ -20,6 +20,7 @@
#include <linux/hrtimer.h>
#include <linux/err.h>
#include <linux/gpio.h>
+#include <linux/ktime.h>
#include "timed_output.h"
#include "timed_gpio.h"
@@ -46,16 +47,16 @@ static enum hrtimer_restart gpio_timer_func(struct hrtimer *timer)
static int gpio_get_time(struct timed_output_dev *dev)
{
struct timed_gpio_data *data;
- struct timeval t;
+ ktime_t t;
data = container_of(dev, struct timed_gpio_data, dev);
if (!hrtimer_active(&data->timer))
return 0;
- t = ktime_to_timeval(hrtimer_get_remaining(&data->timer));
+ t = hrtimer_get_remaining(&data->timer);
- return t.tv_sec * 1000 + t.tv_usec / 1000;
+ return ktime_to_ms(t);
}
static void gpio_enable(struct timed_output_dev *dev, int value)
diff --git a/drivers/staging/bcm/Adapter.h b/drivers/staging/bcm/Adapter.h
deleted file mode 100644
index 940c852e17b..00000000000
--- a/drivers/staging/bcm/Adapter.h
+++ /dev/null
@@ -1,474 +0,0 @@
-/***********************************
-* Adapter.h
-************************************/
-#ifndef __ADAPTER_H__
-#define __ADAPTER_H__
-
-#define MAX_FRAGMENTEDIP_CLASSIFICATION_ENTRIES 256
-#include "Debug.h"
-
-struct bcm_leader {
- USHORT Vcid;
- USHORT PLength;
- UCHAR Status;
- UCHAR Unused[3];
-} __packed;
-
-struct bcm_packettosend {
- struct bcm_leader Leader;
- UCHAR ucPayload;
-} __packed;
-
-struct bcm_control_packet {
- PVOID ControlBuff;
- UINT ControlBuffLen;
- struct bcm_control_packet *next;
-} __packed;
-
-struct bcm_link_request {
- struct bcm_leader Leader;
- UCHAR szData[4];
-} __packed;
-
-#define MAX_IP_RANGE_LENGTH 4
-#define MAX_PORT_RANGE 4
-#define MAX_PROTOCOL_LENGTH 32
-#define IPV6_ADDRESS_SIZEINBYTES 0x10
-
-union u_ip_address {
- struct {
- /* Source Ip Address Range */
- ULONG ulIpv4Addr[MAX_IP_RANGE_LENGTH];
- /* Source Ip Mask Address Range */
- ULONG ulIpv4Mask[MAX_IP_RANGE_LENGTH];
- };
- struct {
- /* Source Ip Address Range */
- ULONG ulIpv6Addr[MAX_IP_RANGE_LENGTH * 4];
- /* Source Ip Mask Address Range */
- ULONG ulIpv6Mask[MAX_IP_RANGE_LENGTH * 4];
- };
- struct {
- UCHAR ucIpv4Address[MAX_IP_RANGE_LENGTH * IP_LENGTH_OF_ADDRESS];
- UCHAR ucIpv4Mask[MAX_IP_RANGE_LENGTH * IP_LENGTH_OF_ADDRESS];
- };
- struct {
- UCHAR ucIpv6Address[MAX_IP_RANGE_LENGTH * IPV6_ADDRESS_SIZEINBYTES];
- UCHAR ucIpv6Mask[MAX_IP_RANGE_LENGTH * IPV6_ADDRESS_SIZEINBYTES];
- };
-};
-
-struct bcm_hdr_suppression_contextinfo {
- /* Intermediate buffer to accumulate pkt Header for PHS */
- UCHAR ucaHdrSuppressionInBuf[MAX_PHS_LENGTHS];
- /* Intermediate buffer containing pkt Header after PHS */
- UCHAR ucaHdrSuppressionOutBuf[MAX_PHS_LENGTHS + PHSI_LEN];
-};
-
-struct bcm_classifier_rule {
- ULONG ulSFID;
- UCHAR ucReserved[2];
- B_UINT16 uiClassifierRuleIndex;
- bool bUsed;
- USHORT usVCID_Value;
- /* This field detemines the Classifier Priority */
- B_UINT8 u8ClassifierRulePriority;
- union u_ip_address stSrcIpAddress;
- UCHAR ucIPSourceAddressLength; /* Ip Source Address Length */
-
- union u_ip_address stDestIpAddress;
- /* Ip Destination Address Length */
- UCHAR ucIPDestinationAddressLength;
- UCHAR ucIPTypeOfServiceLength; /* Type of service Length */
- UCHAR ucTosLow; /* Tos Low */
- UCHAR ucTosHigh; /* Tos High */
- UCHAR ucTosMask; /* Tos Mask */
-
- UCHAR ucProtocolLength; /* protocol Length */
- UCHAR ucProtocol[MAX_PROTOCOL_LENGTH]; /* protocol Length */
- USHORT usSrcPortRangeLo[MAX_PORT_RANGE];
- USHORT usSrcPortRangeHi[MAX_PORT_RANGE];
- UCHAR ucSrcPortRangeLength;
-
- USHORT usDestPortRangeLo[MAX_PORT_RANGE];
- USHORT usDestPortRangeHi[MAX_PORT_RANGE];
- UCHAR ucDestPortRangeLength;
-
- bool bProtocolValid;
- bool bTOSValid;
- bool bDestIpValid;
- bool bSrcIpValid;
-
- /* For IPv6 Addressing */
- UCHAR ucDirection;
- bool bIpv6Protocol;
- UINT32 u32PHSRuleID;
- struct bcm_phs_rule sPhsRule;
- UCHAR u8AssociatedPHSI;
-
- /* Classification fields for ETH CS */
- UCHAR ucEthCSSrcMACLen;
- UCHAR au8EThCSSrcMAC[MAC_ADDRESS_SIZE];
- UCHAR au8EThCSSrcMACMask[MAC_ADDRESS_SIZE];
- UCHAR ucEthCSDestMACLen;
- UCHAR au8EThCSDestMAC[MAC_ADDRESS_SIZE];
- UCHAR au8EThCSDestMACMask[MAC_ADDRESS_SIZE];
- UCHAR ucEtherTypeLen;
- UCHAR au8EthCSEtherType[NUM_ETHERTYPE_BYTES];
- UCHAR usUserPriority[2];
- USHORT usVLANID;
- USHORT usValidityBitMap;
-};
-
-struct bcm_fragmented_packet_info {
- bool bUsed;
- ULONG ulSrcIpAddress;
- USHORT usIpIdentification;
- struct bcm_classifier_rule *pstMatchedClassifierEntry;
- bool bOutOfOrderFragment;
-};
-
-struct bcm_packet_info {
- /* classification extension Rule */
- ULONG ulSFID;
- USHORT usVCID_Value;
- UINT uiThreshold;
- /* This field determines the priority of the SF Queues */
- B_UINT8 u8TrafficPriority;
-
- bool bValid;
- bool bActive;
- bool bActivateRequestSent;
-
- B_UINT8 u8QueueType; /* BE or rtPS */
-
- /* maximum size of the bucket for the queue */
- UINT uiMaxBucketSize;
- UINT uiCurrentQueueDepthOnTarget;
- UINT uiCurrentBytesOnHost;
- UINT uiCurrentPacketsOnHost;
- UINT uiDroppedCountBytes;
- UINT uiDroppedCountPackets;
- UINT uiSentBytes;
- UINT uiSentPackets;
- UINT uiCurrentDrainRate;
- UINT uiThisPeriodSentBytes;
- LARGE_INTEGER liDrainCalculated;
- UINT uiCurrentTokenCount;
- LARGE_INTEGER liLastUpdateTokenAt;
- UINT uiMaxAllowedRate;
- UINT NumOfPacketsSent;
- UCHAR ucDirection;
- USHORT usCID;
- struct bcm_mibs_parameters stMibsExtServiceFlowTable;
- UINT uiCurrentRxRate;
- UINT uiThisPeriodRxBytes;
- UINT uiTotalRxBytes;
- UINT uiTotalTxBytes;
- UINT uiPendedLast;
- UCHAR ucIpVersion;
-
- union {
- struct {
- struct sk_buff *FirstTxQueue;
- struct sk_buff *LastTxQueue;
- };
- struct {
- struct sk_buff *ControlHead;
- struct sk_buff *ControlTail;
- };
- };
-
- bool bProtocolValid;
- bool bTOSValid;
- bool bDestIpValid;
- bool bSrcIpValid;
-
- bool bActiveSet;
- bool bAdmittedSet;
- bool bAuthorizedSet;
- bool bClassifierPriority;
- UCHAR ucServiceClassName[MAX_CLASS_NAME_LENGTH];
- bool bHeaderSuppressionEnabled;
- spinlock_t SFQueueLock;
- void *pstSFIndication;
- struct timeval stLastUpdateTokenAt;
- atomic_t uiPerSFTxResourceCount;
- UINT uiMaxLatency;
- UCHAR bIPCSSupport;
- UCHAR bEthCSSupport;
-};
-
-struct bcm_tarang_data {
- struct bcm_tarang_data *next;
- struct bcm_mini_adapter *Adapter;
- struct sk_buff *RxAppControlHead;
- struct sk_buff *RxAppControlTail;
- int AppCtrlQueueLen;
- bool MacTracingEnabled;
- bool bApplicationToExit;
- struct bcm_mibs_dropped_cntrl_msg stDroppedAppCntrlMsgs;
- ULONG RxCntrlMsgBitMask;
-};
-
-struct bcm_targetdsx_buffer {
- ULONG ulTargetDsxBuffer;
- B_UINT16 tid;
- bool valid;
-};
-
-typedef int (*FP_FLASH_WRITE)(struct bcm_mini_adapter *, UINT, PVOID);
-
-typedef int (*FP_FLASH_WRITE_STATUS)(struct bcm_mini_adapter *, UINT, PVOID);
-
-/*
- * Driver adapter data structure
- */
-struct bcm_mini_adapter {
- struct bcm_mini_adapter *next;
- struct net_device *dev;
- u32 msg_enable;
- CHAR *caDsxReqResp;
- atomic_t ApplicationRunning;
- bool AppCtrlQueueOverFlow;
- atomic_t CurrentApplicationCount;
- atomic_t RegisteredApplicationCount;
- bool LinkUpStatus;
- bool TimerActive;
- u32 StatisticsPointer;
- struct sk_buff *RxControlHead;
- struct sk_buff *RxControlTail;
- struct semaphore RxAppControlQueuelock;
- struct semaphore fw_download_sema;
- struct bcm_tarang_data *pTarangs;
- spinlock_t control_queue_lock;
- wait_queue_head_t process_read_wait_queue;
-
- /* the pointer to the first packet we have queued in send
- * deserialized miniport support variables
- */
- atomic_t TotalPacketCount;
- atomic_t TxPktAvail;
-
- /* this to keep track of the Tx and Rx MailBox Registers. */
- atomic_t CurrNumFreeTxDesc;
- /* to keep track the no of byte received */
- USHORT PrevNumRecvDescs;
- USHORT CurrNumRecvDescs;
- UINT u32TotalDSD;
- struct bcm_packet_info PackInfo[NO_OF_QUEUES];
- struct bcm_classifier_rule astClassifierTable[MAX_CLASSIFIERS];
- bool TransferMode;
-
- /*************** qos ******************/
- bool bETHCSEnabled;
- ULONG BEBucketSize;
- ULONG rtPSBucketSize;
- UCHAR LinkStatus;
- bool AutoLinkUp;
- bool AutoSyncup;
-
- int major;
- int minor;
- wait_queue_head_t tx_packet_wait_queue;
- wait_queue_head_t process_rx_cntrlpkt;
- atomic_t process_waiting;
- bool fw_download_done;
-
- char *txctlpacket[MAX_CNTRL_PKTS];
- atomic_t cntrlpktCnt;
- atomic_t index_app_read_cntrlpkt;
- atomic_t index_wr_txcntrlpkt;
- atomic_t index_rd_txcntrlpkt;
- UINT index_datpkt;
- struct semaphore rdmwrmsync;
-
- struct bcm_targetdsx_buffer astTargetDsxBuffer[MAX_TARGET_DSX_BUFFERS];
- ULONG ulFreeTargetBufferCnt;
- ULONG ulCurrentTargetBuffer;
- ULONG ulTotalTargetBuffersAvailable;
- unsigned long chip_id;
- wait_queue_head_t lowpower_mode_wait_queue;
- bool bFlashBoot;
- bool bBinDownloaded;
- bool bCfgDownloaded;
- bool bSyncUpRequestSent;
- USHORT usBestEffortQueueIndex;
- wait_queue_head_t ioctl_fw_dnld_wait_queue;
- bool waiting_to_fw_download_done;
- pid_t fw_download_process_pid;
- struct bcm_target_params *pstargetparams;
- bool device_removed;
- bool DeviceAccess;
- bool bIsAutoCorrectEnabled;
- bool bDDRInitDone;
- int DDRSetting;
- ULONG ulPowerSaveMode;
- spinlock_t txtransmitlock;
- B_UINT8 txtransmit_running;
- /* Thread for control packet handling */
- struct task_struct *control_packet_handler;
- /* thread for transmitting packets. */
- struct task_struct *transmit_packet_thread;
-
- /* LED Related Structures */
- struct bcm_led_info LEDInfo;
-
- /* Driver State for LED Blinking */
- enum bcm_led_events DriverState;
- /* Interface Specific */
- PVOID pvInterfaceAdapter;
- int (*bcm_file_download)(PVOID,
- struct file *,
- unsigned int);
- int (*bcm_file_readback_from_chip)(PVOID,
- struct file *,
- unsigned int);
- int (*interface_rdm)(PVOID,
- UINT,
- PVOID,
- int);
- int (*interface_wrm)(PVOID,
- UINT,
- PVOID,
- int);
- int (*interface_transmit)(PVOID, PVOID , UINT);
- bool IdleMode;
- bool bDregRequestSentInIdleMode;
- bool bTriedToWakeUpFromlowPowerMode;
- bool bShutStatus;
- bool bWakeUpDevice;
- unsigned int usIdleModePattern;
- /* BOOLEAN bTriedToWakeUpFromShutdown; */
- bool bLinkDownRequested;
- int downloadDDR;
- struct bcm_phs_extension stBCMPhsContext;
- struct bcm_hdr_suppression_contextinfo stPhsTxContextInfo;
- uint8_t ucaPHSPktRestoreBuf[2048];
- uint8_t bPHSEnabled;
- bool AutoFirmDld;
- bool bMipsConfig;
- bool bDPLLConfig;
- UINT32 aTxPktSizeHist[MIBS_MAX_HIST_ENTRIES];
- UINT32 aRxPktSizeHist[MIBS_MAX_HIST_ENTRIES];
- struct bcm_fragmented_packet_info
- astFragmentedPktClassifierTable[MAX_FRAGMENTEDIP_CLASSIFICATION_ENTRIES];
- atomic_t uiMBupdate;
- UINT32 PmuMode;
- enum bcm_nvm_type eNVMType;
- UINT uiSectorSize;
- UINT uiSectorSizeInCFG;
- bool bSectorSizeOverride;
- bool bStatusWrite;
- UINT uiNVMDSDSize;
- UINT uiVendorExtnFlag;
- /* it will always represent chosen DSD at any point of time.
- * Generally it is Active DSD but in case of NVM RD/WR it
- * might be different.
- */
- UINT ulFlashCalStart;
- ULONG ulFlashControlSectionStart;
- ULONG ulFlashWriteSize;
- ULONG ulFlashID;
- FP_FLASH_WRITE fpFlashWrite;
- FP_FLASH_WRITE_STATUS fpFlashWriteWithStatusCheck;
-
- struct semaphore NVMRdmWrmLock;
- struct device *pstCreatedClassDevice;
-
- /* BOOLEAN InterfaceUpStatus; */
- struct bcm_flash2x_cs_info *psFlash2xCSInfo;
- struct bcm_flash_cs_info *psFlashCSInfo;
- struct bcm_flash2x_vendor_info *psFlash2xVendorInfo;
- UINT uiFlashBaseAdd; /* Flash start address */
- /* Active ISO offset chosen before f/w download */
- UINT uiActiveISOOffset;
- enum bcm_flash2x_section_val eActiveISO; /* Active ISO section val */
- /* Active DSD val chosen before f/w download */
- enum bcm_flash2x_section_val eActiveDSD;
- /* For accessing Active DSD chosen before f/w download */
- UINT uiActiveDSDOffsetAtFwDld;
- UINT uiFlashLayoutMajorVersion;
- UINT uiFlashLayoutMinorVersion;
- bool bAllDSDWriteAllow;
- bool bSigCorrupted;
- /* this should be set who so ever want to change the Headers.
- * after Write it should be reset immediately.
- */
- bool bHeaderChangeAllowed;
- int SelectedChip;
- bool bEndPointHalted;
- /* while bFlashRawRead will be true, Driver
- * ignore map lay out and consider flash as of without any map.
- */
- bool bFlashRawRead;
- bool bPreparingForLowPowerMode;
- bool bDoSuspend;
- UINT syscfgBefFwDld;
- bool StopAllXaction;
- /* Used to Support extended CAPI requirements from */
- UINT32 liTimeSinceLastNetEntry;
- struct semaphore LowPowerModeSync;
- ULONG liDrainCalculated;
- UINT gpioBitMap;
- struct bcm_debug_state stDebugState;
-};
-
-#define GET_BCM_ADAPTER(net_dev) netdev_priv(net_dev)
-
-struct bcm_eth_header {
- UCHAR au8DestinationAddress[6];
- UCHAR au8SourceAddress[6];
- USHORT u16Etype;
-} __packed;
-
-struct bcm_firmware_info {
- void __user *pvMappedFirmwareAddress;
- ULONG u32FirmwareLength;
- ULONG u32StartingAddress;
-} __packed;
-
-/* holds the value of net_device structure.. */
-extern struct net_device *gblpnetdev;
-
-struct bcm_ddr_setting {
- UINT ulRegAddress;
- UINT ulRegValue;
-};
-int InitAdapter(struct bcm_mini_adapter *psAdapter);
-
-/* =====================================================================
- * Beceem vendor request codes for EP0
- * =====================================================================
- */
-
-#define BCM_REQUEST_READ 0x2
-#define BCM_REQUEST_WRITE 0x1
-#define EP2_MPS_REG 0x0F0110A0
-#define EP2_MPS 0x40
-
-#define EP2_CFG_REG 0x0F0110A8
-#define EP2_CFG_INT 0x27
-#define EP2_CFG_BULK 0x25
-
-#define EP4_MPS_REG 0x0F0110F0
-#define EP4_MPS 0x8C
-
-#define EP4_CFG_REG 0x0F0110F8
-
-#define ISO_MPS_REG 0x0F0110C8
-#define ISO_MPS 0x00000000
-
-#define EP1 0
-#define EP2 1
-#define EP3 2
-#define EP4 3
-#define EP5 4
-#define EP6 5
-
-enum bcm_einterface_setting {
- DEFAULT_SETTING_0 = 0,
- ALTERNATE_SETTING_1 = 1,
-};
-
-#endif /* __ADAPTER_H__ */
diff --git a/drivers/staging/bcm/Bcmchar.c b/drivers/staging/bcm/Bcmchar.c
deleted file mode 100644
index 88ce2da531c..00000000000
--- a/drivers/staging/bcm/Bcmchar.c
+++ /dev/null
@@ -1,2652 +0,0 @@
-#include <linux/fs.h>
-
-#include "headers.h"
-
-static int bcm_handle_nvm_read_cmd(struct bcm_mini_adapter *ad,
- PUCHAR read_data,
- struct bcm_nvm_readwrite *nvm_rw)
-{
- INT status = STATUS_FAILURE;
-
- down(&ad->NVMRdmWrmLock);
-
- if ((ad->IdleMode == TRUE) || (ad->bShutStatus == TRUE) ||
- (ad->bPreparingForLowPowerMode == TRUE)) {
-
- BCM_DEBUG_PRINT(ad,
- DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "Device is in Idle/Shutdown Mode\n");
- up(&ad->NVMRdmWrmLock);
- kfree(read_data);
- return -EACCES;
- }
-
- status = BeceemNVMRead(ad, (PUINT)read_data,
- nvm_rw->uiOffset,
- nvm_rw->uiNumBytes);
- up(&ad->NVMRdmWrmLock);
-
- if (status != STATUS_SUCCESS) {
- kfree(read_data);
- return status;
- }
-
- if (copy_to_user(nvm_rw->pBuffer, read_data, nvm_rw->uiNumBytes)) {
- kfree(read_data);
- return -EFAULT;
- }
-
- return STATUS_SUCCESS;
-}
-
-static int handle_flash2x_adapter(struct bcm_mini_adapter *ad,
- PUCHAR read_data,
- struct bcm_nvm_readwrite *nvm_rw)
-{
- /*
- * New Requirement:-
- * DSD section updation will be allowed in two case:-
- * 1. if DSD sig is present in DSD header means dongle
- * is ok and updation is fruitfull
- * 2. if point 1 failes then user buff should have
- * DSD sig. this point ensures that if dongle is
- * corrupted then user space program first modify
- * the DSD header with valid DSD sig so that this
- * as well as further write may be worthwhile.
- *
- * This restriction has been put assuming that
- * if DSD sig is corrupted, DSD data won't be
- * considered valid.
- */
- INT status;
- ULONG dsd_magic_num_in_usr_buff = 0;
-
- status = BcmFlash2xCorruptSig(ad, ad->eActiveDSD);
- if (status == STATUS_SUCCESS)
- return STATUS_SUCCESS;
-
- if (((nvm_rw->uiOffset + nvm_rw->uiNumBytes) !=
- ad->uiNVMDSDSize) ||
- (nvm_rw->uiNumBytes < SIGNATURE_SIZE)) {
-
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "DSD Sig is present neither in Flash nor User provided Input..");
- up(&ad->NVMRdmWrmLock);
- kfree(read_data);
- return status;
- }
-
- dsd_magic_num_in_usr_buff =
- ntohl(*(PUINT)(read_data + nvm_rw->uiNumBytes -
- SIGNATURE_SIZE));
- if (dsd_magic_num_in_usr_buff != DSD_IMAGE_MAGIC_NUMBER) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "DSD Sig is present neither in Flash nor User provided Input..");
- up(&ad->NVMRdmWrmLock);
- kfree(read_data);
- return status;
- }
-
- return STATUS_SUCCESS;
-}
-
-/***************************************************************
-* Function - bcm_char_open()
-*
-* Description - This is the "open" entry point for the character
-* driver.
-*
-* Parameters - inode: Pointer to the Inode structure of char device
-* filp : File pointer of the char device
-*
-* Returns - Zero(Success)
-****************************************************************/
-
-static int bcm_char_open(struct inode *inode, struct file *filp)
-{
- struct bcm_mini_adapter *ad = NULL;
- struct bcm_tarang_data *tarang = NULL;
-
- ad = GET_BCM_ADAPTER(gblpnetdev);
- tarang = kzalloc(sizeof(struct bcm_tarang_data), GFP_KERNEL);
- if (!tarang)
- return -ENOMEM;
-
- tarang->Adapter = ad;
- tarang->RxCntrlMsgBitMask = 0xFFFFFFFF & ~(1 << 0xB);
-
- down(&ad->RxAppControlQueuelock);
- tarang->next = ad->pTarangs;
- ad->pTarangs = tarang;
- up(&ad->RxAppControlQueuelock);
-
- /* Store the Adapter structure */
- filp->private_data = tarang;
-
- /* Start Queuing the control response Packets */
- atomic_inc(&ad->ApplicationRunning);
-
- nonseekable_open(inode, filp);
- return 0;
-}
-
-static int bcm_char_release(struct inode *inode, struct file *filp)
-{
- struct bcm_tarang_data *tarang, *tmp, *ptmp;
- struct bcm_mini_adapter *ad = NULL;
- struct sk_buff *pkt, *npkt;
-
- tarang = (struct bcm_tarang_data *)filp->private_data;
-
- if (tarang == NULL)
- return 0;
-
- ad = tarang->Adapter;
-
- down(&ad->RxAppControlQueuelock);
-
- tmp = ad->pTarangs;
- for (ptmp = NULL; tmp; ptmp = tmp, tmp = tmp->next) {
- if (tmp == tarang)
- break;
- }
-
- if (tmp) {
- if (!ptmp)
- ad->pTarangs = tmp->next;
- else
- ptmp->next = tmp->next;
- } else {
- up(&ad->RxAppControlQueuelock);
- return 0;
- }
-
- pkt = tarang->RxAppControlHead;
- while (pkt) {
- npkt = pkt->next;
- kfree_skb(pkt);
- pkt = npkt;
- }
-
- up(&ad->RxAppControlQueuelock);
-
- /* Stop Queuing the control response Packets */
- atomic_dec(&ad->ApplicationRunning);
-
- kfree(tarang);
-
- /* remove this filp from the asynchronously notified filp's */
- filp->private_data = NULL;
- return 0;
-}
-
-static ssize_t bcm_char_read(struct file *filp,
- char __user *buf,
- size_t size,
- loff_t *f_pos)
-{
- struct bcm_tarang_data *tarang = filp->private_data;
- struct bcm_mini_adapter *ad = tarang->Adapter;
- struct sk_buff *packet = NULL;
- ssize_t pkt_len = 0;
- int wait_ret_val = 0;
- unsigned long ret = 0;
-
- wait_ret_val = wait_event_interruptible(
- ad->process_read_wait_queue,
- (tarang->RxAppControlHead ||
- ad->device_removed));
-
- if ((wait_ret_val == -ERESTARTSYS)) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "Exiting as i've been asked to exit!!!\n");
- return wait_ret_val;
- }
-
- if (ad->device_removed) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "Device Removed... Killing the Apps...\n");
- return -ENODEV;
- }
-
- if (false == ad->fw_download_done)
- return -EACCES;
-
- down(&ad->RxAppControlQueuelock);
-
- if (tarang->RxAppControlHead) {
- packet = tarang->RxAppControlHead;
- DEQUEUEPACKET(tarang->RxAppControlHead,
- tarang->RxAppControlTail);
- tarang->AppCtrlQueueLen--;
- }
-
- up(&ad->RxAppControlQueuelock);
-
- if (packet) {
- pkt_len = packet->len;
- ret = copy_to_user(buf, packet->data,
- min_t(size_t, pkt_len, size));
- if (ret) {
- dev_kfree_skb(packet);
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Returning from copy to user failure\n");
- return -EFAULT;
- }
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "Read %zd Bytes From Adapter packet = %p by process %d!\n",
- pkt_len, packet, current->pid);
- dev_kfree_skb(packet);
- }
-
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "<\n");
- return pkt_len;
-}
-
-static int bcm_char_ioctl_reg_read_private(void __user *argp,
- struct bcm_mini_adapter *ad)
-{
- struct bcm_rdm_buffer rdm_buff = {0};
- struct bcm_ioctl_buffer io_buff;
- PCHAR temp_buff;
- INT status = STATUS_FAILURE;
- UINT buff_len;
- u16 temp_value;
- int bytes;
-
- /* Copy Ioctl Buffer structure */
- if (copy_from_user(&io_buff, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
-
- if (io_buff.InputLength > sizeof(rdm_buff))
- return -EINVAL;
-
- if (copy_from_user(&rdm_buff, io_buff.InputBuffer,
- io_buff.InputLength))
- return -EFAULT;
-
- if (io_buff.OutputLength > USHRT_MAX ||
- io_buff.OutputLength == 0) {
- return -EINVAL;
- }
-
- buff_len = io_buff.OutputLength;
- temp_value = 4 - (buff_len % 4);
- buff_len += temp_value % 4;
-
- temp_buff = kmalloc(buff_len, GFP_KERNEL);
- if (!temp_buff)
- return -ENOMEM;
-
- bytes = rdmalt(ad, (UINT)rdm_buff.Register,
- (PUINT)temp_buff, buff_len);
- if (bytes > 0) {
- status = STATUS_SUCCESS;
- if (copy_to_user(io_buff.OutputBuffer, temp_buff, bytes)) {
- kfree(temp_buff);
- return -EFAULT;
- }
- } else {
- status = bytes;
- }
-
- kfree(temp_buff);
- return status;
-}
-
-static int bcm_char_ioctl_reg_write_private(void __user *argp,
- struct bcm_mini_adapter *ad)
-{
- struct bcm_wrm_buffer wrm_buff = {0};
- struct bcm_ioctl_buffer io_buff;
- UINT tmp = 0;
- INT status;
-
- /* Copy Ioctl Buffer structure */
-
- if (copy_from_user(&io_buff, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
-
- if (io_buff.InputLength > sizeof(wrm_buff))
- return -EINVAL;
-
- /* Get WrmBuffer structure */
- if (copy_from_user(&wrm_buff, io_buff.InputBuffer,
- io_buff.InputLength))
- return -EFAULT;
-
- tmp = wrm_buff.Register & EEPROM_REJECT_MASK;
- if (!((ad->pstargetparams->m_u32Customize) & VSG_MODE) &&
- ((tmp == EEPROM_REJECT_REG_1) ||
- (tmp == EEPROM_REJECT_REG_2) ||
- (tmp == EEPROM_REJECT_REG_3) ||
- (tmp == EEPROM_REJECT_REG_4))) {
-
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "EEPROM Access Denied, not in VSG Mode\n");
- return -EFAULT;
- }
-
- status = wrmalt(ad, (UINT)wrm_buff.Register,
- (PUINT)wrm_buff.Data, sizeof(ULONG));
-
- if (status == STATUS_SUCCESS) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG,
- DBG_LVL_ALL, "WRM Done\n");
- } else {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG,
- DBG_LVL_ALL, "WRM Failed\n");
- status = -EFAULT;
- }
- return status;
-}
-
-static int bcm_char_ioctl_eeprom_reg_read(void __user *argp,
- struct bcm_mini_adapter *ad)
-{
- struct bcm_rdm_buffer rdm_buff = {0};
- struct bcm_ioctl_buffer io_buff;
- PCHAR temp_buff = NULL;
- UINT tmp = 0;
- INT status;
- int bytes;
-
- if ((ad->IdleMode == TRUE) ||
- (ad->bShutStatus == TRUE) ||
- (ad->bPreparingForLowPowerMode == TRUE)) {
-
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Device in Idle Mode, Blocking Rdms\n");
- return -EACCES;
- }
-
- /* Copy Ioctl Buffer structure */
- if (copy_from_user(&io_buff, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
-
- if (io_buff.InputLength > sizeof(rdm_buff))
- return -EINVAL;
-
- if (copy_from_user(&rdm_buff, io_buff.InputBuffer,
- io_buff.InputLength))
- return -EFAULT;
-
- if (io_buff.OutputLength > USHRT_MAX ||
- io_buff.OutputLength == 0) {
- return -EINVAL;
- }
-
- temp_buff = kmalloc(io_buff.OutputLength, GFP_KERNEL);
- if (!temp_buff)
- return STATUS_FAILURE;
-
- if ((((ULONG)rdm_buff.Register & 0x0F000000) != 0x0F000000) ||
- ((ULONG)rdm_buff.Register & 0x3)) {
-
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "RDM Done On invalid Address : %x Access Denied.\n",
- (int)rdm_buff.Register);
-
- kfree(temp_buff);
- return -EINVAL;
- }
-
- tmp = rdm_buff.Register & EEPROM_REJECT_MASK;
- bytes = rdmaltWithLock(ad, (UINT)rdm_buff.Register,
- (PUINT)temp_buff, io_buff.OutputLength);
-
- if (bytes > 0) {
- status = STATUS_SUCCESS;
- if (copy_to_user(io_buff.OutputBuffer, temp_buff, bytes)) {
- kfree(temp_buff);
- return -EFAULT;
- }
- } else {
- status = bytes;
- }
-
- kfree(temp_buff);
- return status;
-}
-
-static int bcm_char_ioctl_eeprom_reg_write(void __user *argp,
- struct bcm_mini_adapter *ad,
- UINT cmd)
-{
- struct bcm_wrm_buffer wrm_buff = {0};
- struct bcm_ioctl_buffer io_buff;
- UINT tmp = 0;
- INT status;
-
- if ((ad->IdleMode == TRUE) ||
- (ad->bShutStatus == TRUE) ||
- (ad->bPreparingForLowPowerMode == TRUE)) {
-
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Device in Idle Mode, Blocking Wrms\n");
- return -EACCES;
- }
-
- /* Copy Ioctl Buffer structure */
- if (copy_from_user(&io_buff, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
-
- if (io_buff.InputLength > sizeof(wrm_buff))
- return -EINVAL;
-
- /* Get WrmBuffer structure */
- if (copy_from_user(&wrm_buff, io_buff.InputBuffer,
- io_buff.InputLength))
- return -EFAULT;
-
- if ((((ULONG)wrm_buff.Register & 0x0F000000) != 0x0F000000) ||
- ((ULONG)wrm_buff.Register & 0x3)) {
-
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "WRM Done On invalid Address : %x Access Denied.\n",
- (int)wrm_buff.Register);
- return -EINVAL;
- }
-
- tmp = wrm_buff.Register & EEPROM_REJECT_MASK;
- if (!((ad->pstargetparams->m_u32Customize) & VSG_MODE) &&
- ((tmp == EEPROM_REJECT_REG_1) ||
- (tmp == EEPROM_REJECT_REG_2) ||
- (tmp == EEPROM_REJECT_REG_3) ||
- (tmp == EEPROM_REJECT_REG_4)) &&
- (cmd == IOCTL_BCM_REGISTER_WRITE)) {
-
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "EEPROM Access Denied, not in VSG Mode\n");
- return -EFAULT;
- }
-
- status = wrmaltWithLock(ad, (UINT)wrm_buff.Register,
- (PUINT)wrm_buff.Data,
- wrm_buff.Length);
-
- if (status == STATUS_SUCCESS) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, OSAL_DBG,
- DBG_LVL_ALL, "WRM Done\n");
- } else {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG,
- DBG_LVL_ALL, "WRM Failed\n");
- status = -EFAULT;
- }
- return status;
-}
-
-static int bcm_char_ioctl_gpio_set_request(void __user *argp,
- struct bcm_mini_adapter *ad)
-{
- struct bcm_gpio_info gpio_info = {0};
- struct bcm_ioctl_buffer io_buff;
- UCHAR reset_val[4];
- UINT value = 0;
- UINT bit = 0;
- UINT operation = 0;
- INT status;
- int bytes;
-
- if ((ad->IdleMode == TRUE) ||
- (ad->bShutStatus == TRUE) ||
- (ad->bPreparingForLowPowerMode == TRUE)) {
-
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG,
- DBG_LVL_ALL,
- "GPIO Can't be set/clear in Low power Mode");
- return -EACCES;
- }
-
- if (copy_from_user(&io_buff, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
-
- if (io_buff.InputLength > sizeof(gpio_info))
- return -EINVAL;
-
- if (copy_from_user(&gpio_info, io_buff.InputBuffer,
- io_buff.InputLength))
- return -EFAULT;
-
- bit = gpio_info.uiGpioNumber;
- operation = gpio_info.uiGpioValue;
- value = (1<<bit);
-
- if (IsReqGpioIsLedInNVM(ad, value) == false) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG,
- DBG_LVL_ALL,
- "Sorry, Requested GPIO<0x%X> is not correspond to LED !!!",
- value);
- return -EINVAL;
- }
-
- /* Set - setting 1 */
- if (operation) {
- /* Set the gpio output register */
- status = wrmaltWithLock(ad,
- BCM_GPIO_OUTPUT_SET_REG,
- (PUINT)(&value), sizeof(UINT));
-
- if (status == STATUS_SUCCESS) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS,
- OSAL_DBG, DBG_LVL_ALL,
- "Set the GPIO bit\n");
- } else {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS,
- OSAL_DBG, DBG_LVL_ALL,
- "Failed to set the %dth GPIO\n",
- bit);
- return status;
- }
- } else {
- /* Set the gpio output register */
- status = wrmaltWithLock(ad,
- BCM_GPIO_OUTPUT_CLR_REG,
- (PUINT)(&value), sizeof(UINT));
-
- if (status == STATUS_SUCCESS) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS,
- OSAL_DBG, DBG_LVL_ALL,
- "Set the GPIO bit\n");
- } else {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS,
- OSAL_DBG, DBG_LVL_ALL,
- "Failed to clear the %dth GPIO\n",
- bit);
- return status;
- }
- }
-
- bytes = rdmaltWithLock(ad, (UINT)GPIO_MODE_REGISTER,
- (PUINT)reset_val, sizeof(UINT));
- if (bytes < 0) {
- status = bytes;
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "GPIO_MODE_REGISTER read failed");
- return status;
- }
- status = STATUS_SUCCESS;
-
- /* Set the gpio mode register to output */
- *(UINT *)reset_val |= (1<<bit);
- status = wrmaltWithLock(ad, GPIO_MODE_REGISTER,
- (PUINT)reset_val, sizeof(UINT));
-
- if (status == STATUS_SUCCESS) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG,
- DBG_LVL_ALL,
- "Set the GPIO to output Mode\n");
- } else {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG,
- DBG_LVL_ALL,
- "Failed to put GPIO in Output Mode\n");
- }
-
- return status;
-}
-
-static int bcm_char_ioctl_led_thread_state_change_req(void __user *argp,
- struct bcm_mini_adapter *ad)
-{
- struct bcm_user_thread_req thread_req = {0};
- struct bcm_ioctl_buffer io_buff;
-
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "User made LED thread InActive");
-
- if ((ad->IdleMode == TRUE) ||
- (ad->bShutStatus == TRUE) ||
- (ad->bPreparingForLowPowerMode == TRUE)) {
-
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG,
- DBG_LVL_ALL,
- "GPIO Can't be set/clear in Low power Mode");
- return -EACCES;
- }
-
- if (copy_from_user(&io_buff, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
-
- if (io_buff.InputLength > sizeof(thread_req))
- return -EINVAL;
-
- if (copy_from_user(&thread_req, io_buff.InputBuffer,
- io_buff.InputLength))
- return -EFAULT;
-
- /* if LED thread is running(Actively or Inactively)
- * set it state to make inactive
- */
- if (ad->LEDInfo.led_thread_running) {
- if (thread_req.ThreadState == LED_THREAD_ACTIVATION_REQ) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS,
- OSAL_DBG, DBG_LVL_ALL,
- "Activating thread req");
- ad->DriverState = LED_THREAD_ACTIVE;
- } else {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS,
- OSAL_DBG, DBG_LVL_ALL,
- "DeActivating Thread req.....");
- ad->DriverState = LED_THREAD_INACTIVE;
- }
-
- /* signal thread. */
- wake_up(&ad->LEDInfo.notify_led_event);
- }
- return STATUS_SUCCESS;
-}
-
-static int bcm_char_ioctl_gpio_status_request(void __user *argp,
- struct bcm_mini_adapter *ad)
-{
- struct bcm_gpio_info gpio_info = {0};
- struct bcm_ioctl_buffer io_buff;
- ULONG bit = 0;
- UCHAR read[4];
- INT status;
- int bytes;
-
- if ((ad->IdleMode == TRUE) ||
- (ad->bShutStatus == TRUE) ||
- (ad->bPreparingForLowPowerMode == TRUE))
- return -EACCES;
-
- if (copy_from_user(&io_buff, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
-
- if (io_buff.InputLength > sizeof(gpio_info))
- return -EINVAL;
-
- if (copy_from_user(&gpio_info, io_buff.InputBuffer,
- io_buff.InputLength))
- return -EFAULT;
-
- bit = gpio_info.uiGpioNumber;
-
- /* Set the gpio output register */
- bytes = rdmaltWithLock(ad, (UINT)GPIO_PIN_STATE_REGISTER,
- (PUINT)read, sizeof(UINT));
-
- if (bytes < 0) {
- status = bytes;
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "RDM Failed\n");
- return status;
- }
- status = STATUS_SUCCESS;
- return status;
-}
-
-static int bcm_char_ioctl_gpio_multi_request(void __user *argp,
- struct bcm_mini_adapter *ad)
-{
- struct bcm_gpio_multi_info gpio_multi_info[MAX_IDX];
- struct bcm_gpio_multi_info *pgpio_multi_info =
- (struct bcm_gpio_multi_info *)gpio_multi_info;
- struct bcm_ioctl_buffer io_buff;
- UCHAR reset_val[4];
- INT status = STATUS_FAILURE;
- int bytes;
-
- memset(pgpio_multi_info, 0,
- MAX_IDX * sizeof(struct bcm_gpio_multi_info));
-
- if ((ad->IdleMode == TRUE) ||
- (ad->bShutStatus == TRUE) ||
- (ad->bPreparingForLowPowerMode == TRUE))
- return -EINVAL;
-
- if (copy_from_user(&io_buff, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
-
- if (io_buff.InputLength > sizeof(gpio_multi_info))
- return -EINVAL;
- if (io_buff.OutputLength > sizeof(gpio_multi_info))
- io_buff.OutputLength = sizeof(gpio_multi_info);
-
- if (copy_from_user(&gpio_multi_info, io_buff.InputBuffer,
- io_buff.InputLength))
- return -EFAULT;
-
- if (IsReqGpioIsLedInNVM(ad, pgpio_multi_info[WIMAX_IDX].uiGPIOMask)
- == false) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG,
- DBG_LVL_ALL,
- "Sorry, Requested GPIO<0x%X> is not correspond to NVM LED bit map<0x%X>!!!",
- pgpio_multi_info[WIMAX_IDX].uiGPIOMask,
- ad->gpioBitMap);
- return -EINVAL;
- }
-
- /* Set the gpio output register */
- if ((pgpio_multi_info[WIMAX_IDX].uiGPIOMask) &
- (pgpio_multi_info[WIMAX_IDX].uiGPIOCommand)) {
- /* Set 1's in GPIO OUTPUT REGISTER */
- *(UINT *)reset_val = pgpio_multi_info[WIMAX_IDX].uiGPIOMask &
- pgpio_multi_info[WIMAX_IDX].uiGPIOCommand &
- pgpio_multi_info[WIMAX_IDX].uiGPIOValue;
-
- if (*(UINT *) reset_val)
- status = wrmaltWithLock(ad,
- BCM_GPIO_OUTPUT_SET_REG,
- (PUINT)reset_val, sizeof(ULONG));
-
- if (status != STATUS_SUCCESS) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "WRM to BCM_GPIO_OUTPUT_SET_REG Failed.");
- return status;
- }
-
- /* Clear to 0's in GPIO OUTPUT REGISTER */
- *(UINT *)reset_val =
- (pgpio_multi_info[WIMAX_IDX].uiGPIOMask &
- pgpio_multi_info[WIMAX_IDX].uiGPIOCommand &
- (~(pgpio_multi_info[WIMAX_IDX].uiGPIOValue)));
-
- if (*(UINT *) reset_val)
- status = wrmaltWithLock(ad,
- BCM_GPIO_OUTPUT_CLR_REG, (PUINT)reset_val,
- sizeof(ULONG));
-
- if (status != STATUS_SUCCESS) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "WRM to BCM_GPIO_OUTPUT_CLR_REG Failed.");
- return status;
- }
- }
-
- if (pgpio_multi_info[WIMAX_IDX].uiGPIOMask) {
- bytes = rdmaltWithLock(ad, (UINT)GPIO_PIN_STATE_REGISTER,
- (PUINT)reset_val, sizeof(UINT));
-
- if (bytes < 0) {
- status = bytes;
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "RDM to GPIO_PIN_STATE_REGISTER Failed.");
- return status;
- }
- status = STATUS_SUCCESS;
-
- pgpio_multi_info[WIMAX_IDX].uiGPIOValue =
- (*(UINT *)reset_val &
- pgpio_multi_info[WIMAX_IDX].uiGPIOMask);
- }
-
- status = copy_to_user(io_buff.OutputBuffer, &gpio_multi_info,
- io_buff.OutputLength);
- if (status) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Failed while copying Content to IOBufer for user space err:%d",
- status);
- return -EFAULT;
- }
- return status;
-}
-
-static int bcm_char_ioctl_gpio_mode_request(void __user *argp,
- struct bcm_mini_adapter *ad)
-{
- struct bcm_gpio_multi_mode gpio_multi_mode[MAX_IDX];
- struct bcm_gpio_multi_mode *pgpio_multi_mode =
- (struct bcm_gpio_multi_mode *)gpio_multi_mode;
- struct bcm_ioctl_buffer io_buff;
- UCHAR reset_val[4];
- INT status;
- int bytes;
-
- if ((ad->IdleMode == TRUE) ||
- (ad->bShutStatus == TRUE) ||
- (ad->bPreparingForLowPowerMode == TRUE))
- return -EINVAL;
-
- if (copy_from_user(&io_buff, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
-
- if (io_buff.InputLength > sizeof(gpio_multi_mode))
- return -EINVAL;
- if (io_buff.OutputLength > sizeof(gpio_multi_mode))
- io_buff.OutputLength = sizeof(gpio_multi_mode);
-
- if (copy_from_user(&gpio_multi_mode, io_buff.InputBuffer,
- io_buff.InputLength))
- return -EFAULT;
-
- bytes = rdmaltWithLock(ad, (UINT)GPIO_MODE_REGISTER,
- (PUINT)reset_val, sizeof(UINT));
-
- if (bytes < 0) {
- status = bytes;
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Read of GPIO_MODE_REGISTER failed");
- return status;
- }
- status = STATUS_SUCCESS;
-
- /* Validating the request */
- if (IsReqGpioIsLedInNVM(ad, pgpio_multi_mode[WIMAX_IDX].uiGPIOMask)
- == false) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "Sorry, Requested GPIO<0x%X> is not correspond to NVM LED bit map<0x%X>!!!",
- pgpio_multi_mode[WIMAX_IDX].uiGPIOMask,
- ad->gpioBitMap);
- return -EINVAL;
- }
-
- if (pgpio_multi_mode[WIMAX_IDX].uiGPIOMask) {
- /* write all OUT's (1's) */
- *(UINT *) reset_val |=
- (pgpio_multi_mode[WIMAX_IDX].uiGPIOMode &
- pgpio_multi_mode[WIMAX_IDX].uiGPIOMask);
-
- /* write all IN's (0's) */
- *(UINT *) reset_val &=
- ~((~pgpio_multi_mode[WIMAX_IDX].uiGPIOMode) &
- pgpio_multi_mode[WIMAX_IDX].uiGPIOMask);
-
- /* Currently implemented return the modes of all GPIO's
- * else needs to bit AND with mask
- */
- pgpio_multi_mode[WIMAX_IDX].uiGPIOMode = *(UINT *)reset_val;
-
- status = wrmaltWithLock(ad, GPIO_MODE_REGISTER,
- (PUINT)reset_val, sizeof(ULONG));
- if (status == STATUS_SUCCESS) {
- BCM_DEBUG_PRINT(ad,
- DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "WRM to GPIO_MODE_REGISTER Done");
- } else {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "WRM to GPIO_MODE_REGISTER Failed");
- return -EFAULT;
- }
- } else {
- /* if uiGPIOMask is 0 then return mode register configuration */
- pgpio_multi_mode[WIMAX_IDX].uiGPIOMode = *(UINT *)reset_val;
- }
-
- status = copy_to_user(io_buff.OutputBuffer, &gpio_multi_mode,
- io_buff.OutputLength);
- if (status) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Failed while copying Content to IOBufer for user space err:%d",
- status);
- return -EFAULT;
- }
- return status;
-}
-
-static int bcm_char_ioctl_misc_request(void __user *argp,
- struct bcm_mini_adapter *ad)
-{
- struct bcm_ioctl_buffer io_buff;
- PVOID buff = NULL;
- INT status;
-
- /* Copy Ioctl Buffer structure */
- if (copy_from_user(&io_buff, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
-
- if (io_buff.InputLength < sizeof(struct bcm_link_request))
- return -EINVAL;
-
- if (io_buff.InputLength > MAX_CNTL_PKT_SIZE)
- return -EINVAL;
-
- buff = memdup_user(io_buff.InputBuffer,
- io_buff.InputLength);
- if (IS_ERR(buff))
- return PTR_ERR(buff);
-
- down(&ad->LowPowerModeSync);
- status = wait_event_interruptible_timeout(
- ad->lowpower_mode_wait_queue,
- !ad->bPreparingForLowPowerMode,
- (1 * HZ));
-
- if (status == -ERESTARTSYS)
- goto cntrlEnd;
-
- if (ad->bPreparingForLowPowerMode) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "Preparing Idle Mode is still True - Hence Rejecting control message\n");
- status = STATUS_FAILURE;
- goto cntrlEnd;
- }
- status = CopyBufferToControlPacket(ad, (PVOID)buff);
-
-cntrlEnd:
- up(&ad->LowPowerModeSync);
- kfree(buff);
- return status;
-}
-
-static int bcm_char_ioctl_buffer_download_start(
- struct bcm_mini_adapter *ad)
-{
- INT status;
-
- if (down_trylock(&ad->NVMRdmWrmLock)) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "IOCTL_BCM_CHIP_RESET not allowed as EEPROM Read/Write is in progress\n");
- return -EACCES;
- }
-
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Starting the firmware download PID =0x%x!!!!\n",
- current->pid);
-
- if (down_trylock(&ad->fw_download_sema))
- return -EBUSY;
-
- ad->bBinDownloaded = false;
- ad->fw_download_process_pid = current->pid;
- ad->bCfgDownloaded = false;
- ad->fw_download_done = false;
- netif_carrier_off(ad->dev);
- netif_stop_queue(ad->dev);
- status = reset_card_proc(ad);
- if (status) {
- pr_err(PFX "%s: reset_card_proc Failed!\n", ad->dev->name);
- up(&ad->fw_download_sema);
- up(&ad->NVMRdmWrmLock);
- return status;
- }
- mdelay(10);
-
- up(&ad->NVMRdmWrmLock);
- return status;
-}
-
-static int bcm_char_ioctl_buffer_download(void __user *argp,
- struct bcm_mini_adapter *ad)
-{
- struct bcm_firmware_info *fw_info = NULL;
- struct bcm_ioctl_buffer io_buff;
- INT status;
-
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Starting the firmware download PID =0x%x!!!!\n", current->pid);
-
- if (!down_trylock(&ad->fw_download_sema)) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Invalid way to download buffer. Use Start and then call this!!!\n");
- up(&ad->fw_download_sema);
- return -EINVAL;
- }
-
- /* Copy Ioctl Buffer structure */
- if (copy_from_user(&io_buff, argp, sizeof(struct bcm_ioctl_buffer))) {
- up(&ad->fw_download_sema);
- return -EFAULT;
- }
-
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Length for FW DLD is : %lx\n", io_buff.InputLength);
-
- if (io_buff.InputLength > sizeof(struct bcm_firmware_info)) {
- up(&ad->fw_download_sema);
- return -EINVAL;
- }
-
- fw_info = kmalloc(sizeof(*fw_info), GFP_KERNEL);
- if (!fw_info) {
- up(&ad->fw_download_sema);
- return -ENOMEM;
- }
-
- if (copy_from_user(fw_info, io_buff.InputBuffer,
- io_buff.InputLength)) {
- up(&ad->fw_download_sema);
- kfree(fw_info);
- return -EFAULT;
- }
-
- if (!fw_info->pvMappedFirmwareAddress ||
- (fw_info->u32FirmwareLength == 0)) {
-
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Something else is wrong %lu\n",
- fw_info->u32FirmwareLength);
- up(&ad->fw_download_sema);
- kfree(fw_info);
- status = -EINVAL;
- return status;
- }
-
- status = bcm_ioctl_fw_download(ad, fw_info);
-
- if (status != STATUS_SUCCESS) {
- if (fw_info->u32StartingAddress == CONFIG_BEGIN_ADDR)
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "IOCTL: Configuration File Upload Failed\n");
- else
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "IOCTL: Firmware File Upload Failed\n");
-
- /* up(&ad->fw_download_sema); */
-
- if (ad->LEDInfo.led_thread_running &
- BCM_LED_THREAD_RUNNING_ACTIVELY) {
- ad->DriverState = DRIVER_INIT;
- ad->LEDInfo.bLedInitDone = false;
- wake_up(&ad->LEDInfo.notify_led_event);
- }
- }
-
- if (status != STATUS_SUCCESS)
- up(&ad->fw_download_sema);
-
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, OSAL_DBG, DBG_LVL_ALL,
- "IOCTL: Firmware File Uploaded\n");
- kfree(fw_info);
- return status;
-}
-
-static int bcm_char_ioctl_buffer_download_stop(void __user *argp,
- struct bcm_mini_adapter *ad)
-{
- INT status;
- int timeout = 0;
-
- if (!down_trylock(&ad->fw_download_sema)) {
- up(&ad->fw_download_sema);
- return -EINVAL;
- }
-
- if (down_trylock(&ad->NVMRdmWrmLock)) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "FW download blocked as EEPROM Read/Write is in progress\n");
- up(&ad->fw_download_sema);
- return -EACCES;
- }
-
- ad->bBinDownloaded = TRUE;
- ad->bCfgDownloaded = TRUE;
- atomic_set(&ad->CurrNumFreeTxDesc, 0);
- ad->CurrNumRecvDescs = 0;
- ad->downloadDDR = 0;
-
- /* setting the Mips to Run */
- status = run_card_proc(ad);
-
- if (status) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Firm Download Failed\n");
- up(&ad->fw_download_sema);
- up(&ad->NVMRdmWrmLock);
- return status;
- }
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG,
- DBG_LVL_ALL, "Firm Download Over...\n");
-
- mdelay(10);
-
- /* Wait for MailBox Interrupt */
- if (StartInterruptUrb((struct bcm_interface_adapter *)ad->pvInterfaceAdapter))
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Unable to send interrupt...\n");
-
- timeout = 5*HZ;
- ad->waiting_to_fw_download_done = false;
- wait_event_timeout(ad->ioctl_fw_dnld_wait_queue,
- ad->waiting_to_fw_download_done, timeout);
- ad->fw_download_process_pid = INVALID_PID;
- ad->fw_download_done = TRUE;
- atomic_set(&ad->CurrNumFreeTxDesc, 0);
- ad->CurrNumRecvDescs = 0;
- ad->PrevNumRecvDescs = 0;
- atomic_set(&ad->cntrlpktCnt, 0);
- ad->LinkUpStatus = 0;
- ad->LinkStatus = 0;
-
- if (ad->LEDInfo.led_thread_running &
- BCM_LED_THREAD_RUNNING_ACTIVELY) {
- ad->DriverState = FW_DOWNLOAD_DONE;
- wake_up(&ad->LEDInfo.notify_led_event);
- }
-
- if (!timeout)
- status = -ENODEV;
-
- up(&ad->fw_download_sema);
- up(&ad->NVMRdmWrmLock);
- return status;
-}
-
-static int bcm_char_ioctl_chip_reset(struct bcm_mini_adapter *ad)
-{
- INT status;
- INT nvm_access;
-
- nvm_access = down_trylock(&ad->NVMRdmWrmLock);
- if (nvm_access) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- " IOCTL_BCM_CHIP_RESET not allowed as EEPROM Read/Write is in progress\n");
- return -EACCES;
- }
-
- down(&ad->RxAppControlQueuelock);
- status = reset_card_proc(ad);
- flushAllAppQ();
- up(&ad->RxAppControlQueuelock);
- up(&ad->NVMRdmWrmLock);
- ResetCounters(ad);
- return status;
-}
-
-static int bcm_char_ioctl_qos_threshold(ULONG arg,
- struct bcm_mini_adapter *ad)
-{
- USHORT i;
-
- for (i = 0; i < NO_OF_QUEUES; i++) {
- if (get_user(ad->PackInfo[i].uiThreshold,
- (unsigned long __user *)arg)) {
- return -EFAULT;
- }
- }
- return 0;
-}
-
-static int bcm_char_ioctl_switch_transfer_mode(void __user *argp,
- struct bcm_mini_adapter *ad)
-{
- UINT data = 0;
-
- if (copy_from_user(&data, argp, sizeof(UINT)))
- return -EFAULT;
-
- if (data) {
- /* Allow All Packets */
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "IOCTL_BCM_SWITCH_TRANSFER_MODE: ETH_PACKET_TUNNELING_MODE\n");
- ad->TransferMode = ETH_PACKET_TUNNELING_MODE;
- } else {
- /* Allow IP only Packets */
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "IOCTL_BCM_SWITCH_TRANSFER_MODE: IP_PACKET_ONLY_MODE\n");
- ad->TransferMode = IP_PACKET_ONLY_MODE;
- }
- return STATUS_SUCCESS;
-}
-
-static int bcm_char_ioctl_get_driver_version(void __user *argp)
-{
- struct bcm_ioctl_buffer io_buff;
- ulong len;
-
- /* Copy Ioctl Buffer structure */
- if (copy_from_user(&io_buff, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
-
- len = min_t(ulong, io_buff.OutputLength, strlen(DRV_VERSION) + 1);
-
- if (copy_to_user(io_buff.OutputBuffer, DRV_VERSION, len))
- return -EFAULT;
-
- return STATUS_SUCCESS;
-}
-
-static int bcm_char_ioctl_get_current_status(void __user *argp,
- struct bcm_mini_adapter *ad)
-{
- struct bcm_link_state link_state;
- struct bcm_ioctl_buffer io_buff;
-
- /* Copy Ioctl Buffer structure */
- if (copy_from_user(&io_buff, argp, sizeof(struct bcm_ioctl_buffer))) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "copy_from_user failed..\n");
- return -EFAULT;
- }
-
- if (io_buff.OutputLength != sizeof(link_state))
- return -EINVAL;
-
- memset(&link_state, 0, sizeof(link_state));
- link_state.bIdleMode = ad->IdleMode;
- link_state.bShutdownMode = ad->bShutStatus;
- link_state.ucLinkStatus = ad->LinkStatus;
-
- if (copy_to_user(io_buff.OutputBuffer, &link_state, min_t(size_t,
- sizeof(link_state), io_buff.OutputLength))) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Copy_to_user Failed..\n");
- return -EFAULT;
- }
- return STATUS_SUCCESS;
-}
-
-
-static int bcm_char_ioctl_set_mac_tracing(void __user *argp,
- struct bcm_mini_adapter *ad)
-{
- struct bcm_ioctl_buffer io_buff;
- UINT tracing_flag;
-
- /* copy ioctl Buffer structure */
- if (copy_from_user(&io_buff, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
-
- if (copy_from_user(&tracing_flag, io_buff.InputBuffer, sizeof(UINT)))
- return -EFAULT;
-
- if (tracing_flag)
- ad->pTarangs->MacTracingEnabled = TRUE;
- else
- ad->pTarangs->MacTracingEnabled = false;
-
- return STATUS_SUCCESS;
-}
-
-static int bcm_char_ioctl_get_dsx_indication(void __user *argp,
- struct bcm_mini_adapter *ad)
-{
- struct bcm_ioctl_buffer io_buff;
- ULONG sf_id = 0;
-
- if (copy_from_user(&io_buff, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
-
- if (io_buff.OutputLength < sizeof(struct bcm_add_indication_alt)) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Mismatch req: %lx needed is =0x%zx!!!",
- io_buff.OutputLength,
- sizeof(struct bcm_add_indication_alt));
- return -EINVAL;
- }
-
- if (copy_from_user(&sf_id, io_buff.InputBuffer, sizeof(sf_id)))
- return -EFAULT;
-
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "Get DSX Data SF ID is =%lx\n", sf_id);
- get_dsx_sf_data_to_application(ad, sf_id, io_buff.OutputBuffer);
- return STATUS_SUCCESS;
-}
-
-static int bcm_char_ioctl_get_host_mibs(void __user *argp,
- struct bcm_mini_adapter *ad,
- struct bcm_tarang_data *tarang)
-{
- struct bcm_ioctl_buffer io_buff;
- INT status = STATUS_FAILURE;
- PVOID temp_buff;
-
- if (copy_from_user(&io_buff, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
-
- if (io_buff.OutputLength != sizeof(struct bcm_host_stats_mibs)) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Length Check failed %lu %zd\n", io_buff.OutputLength,
- sizeof(struct bcm_host_stats_mibs));
- return -EINVAL;
- }
-
- /* FIXME: HOST_STATS are too big for kmalloc (122048)! */
- temp_buff = kzalloc(sizeof(struct bcm_host_stats_mibs), GFP_KERNEL);
- if (!temp_buff)
- return STATUS_FAILURE;
-
- status = ProcessGetHostMibs(ad, temp_buff);
- GetDroppedAppCntrlPktMibs(temp_buff, tarang);
-
- if (status != STATUS_FAILURE) {
- if (copy_to_user(io_buff.OutputBuffer, temp_buff,
- sizeof(struct bcm_host_stats_mibs))) {
- kfree(temp_buff);
- return -EFAULT;
- }
- }
-
- kfree(temp_buff);
- return status;
-}
-
-static int bcm_char_ioctl_bulk_wrm(void __user *argp,
- struct bcm_mini_adapter *ad, UINT cmd)
-{
- struct bcm_bulk_wrm_buffer *bulk_buff;
- struct bcm_ioctl_buffer io_buff;
- UINT tmp = 0;
- INT status = STATUS_FAILURE;
- PCHAR buff = NULL;
-
- if ((ad->IdleMode == TRUE) ||
- (ad->bShutStatus == TRUE) ||
- (ad->bPreparingForLowPowerMode == TRUE)) {
-
- BCM_DEBUG_PRINT (ad, DBG_TYPE_PRINTK, 0, 0,
- "Device in Idle/Shutdown Mode, Blocking Wrms\n");
- return -EACCES;
- }
-
- /* Copy Ioctl Buffer structure */
- if (copy_from_user(&io_buff, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
-
- if (io_buff.InputLength < sizeof(ULONG) * 2)
- return -EINVAL;
-
- buff = memdup_user(io_buff.InputBuffer,
- io_buff.InputLength);
- if (IS_ERR(buff))
- return PTR_ERR(buff);
-
- bulk_buff = (struct bcm_bulk_wrm_buffer *)buff;
-
- if (((ULONG)bulk_buff->Register & 0x0F000000) != 0x0F000000 ||
- ((ULONG)bulk_buff->Register & 0x3)) {
- BCM_DEBUG_PRINT (ad, DBG_TYPE_PRINTK, 0, 0,
- "WRM Done On invalid Address : %x Access Denied.\n",
- (int)bulk_buff->Register);
- kfree(buff);
- return -EINVAL;
- }
-
- tmp = bulk_buff->Register & EEPROM_REJECT_MASK;
- if (!((ad->pstargetparams->m_u32Customize)&VSG_MODE) &&
- ((tmp == EEPROM_REJECT_REG_1) ||
- (tmp == EEPROM_REJECT_REG_2) ||
- (tmp == EEPROM_REJECT_REG_3) ||
- (tmp == EEPROM_REJECT_REG_4)) &&
- (cmd == IOCTL_BCM_REGISTER_WRITE)) {
-
- kfree(buff);
- BCM_DEBUG_PRINT (ad, DBG_TYPE_PRINTK, 0, 0,
- "EEPROM Access Denied, not in VSG Mode\n");
- return -EFAULT;
- }
-
- if (bulk_buff->SwapEndian == false)
- status = wrmWithLock(ad, (UINT)bulk_buff->Register,
- (PCHAR)bulk_buff->Values,
- io_buff.InputLength - 2*sizeof(ULONG));
- else
- status = wrmaltWithLock(ad, (UINT)bulk_buff->Register,
- (PUINT)bulk_buff->Values,
- io_buff.InputLength - 2*sizeof(ULONG));
-
- if (status != STATUS_SUCCESS)
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0, "WRM Failed\n");
-
- kfree(buff);
- return status;
-}
-
-static int bcm_char_ioctl_get_nvm_size(void __user *argp,
- struct bcm_mini_adapter *ad)
-{
- struct bcm_ioctl_buffer io_buff;
-
- if (copy_from_user(&io_buff, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
-
- if (ad->eNVMType == NVM_EEPROM || ad->eNVMType == NVM_FLASH) {
- if (copy_to_user(io_buff.OutputBuffer, &ad->uiNVMDSDSize,
- sizeof(UINT)))
- return -EFAULT;
- }
-
- return STATUS_SUCCESS;
-}
-
-static int bcm_char_ioctl_cal_init(void __user *argp,
- struct bcm_mini_adapter *ad)
-{
- struct bcm_ioctl_buffer io_buff;
- UINT sector_size = 0;
- INT status = STATUS_FAILURE;
-
- if (ad->eNVMType == NVM_FLASH) {
- if (copy_from_user(&io_buff, argp,
- sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
-
- if (copy_from_user(&sector_size, io_buff.InputBuffer,
- sizeof(UINT)))
- return -EFAULT;
-
- if ((sector_size < MIN_SECTOR_SIZE) ||
- (sector_size > MAX_SECTOR_SIZE)) {
- if (copy_to_user(io_buff.OutputBuffer,
- &ad->uiSectorSize, sizeof(UINT)))
- return -EFAULT;
- } else {
- if (IsFlash2x(ad)) {
- if (copy_to_user(io_buff.OutputBuffer,
- &ad->uiSectorSize, sizeof(UINT)))
- return -EFAULT;
- } else {
- if ((TRUE == ad->bShutStatus) ||
- (TRUE == ad->IdleMode)) {
- BCM_DEBUG_PRINT(ad,
- DBG_TYPE_PRINTK, 0, 0,
- "Device is in Idle/Shutdown Mode\n");
- return -EACCES;
- }
-
- ad->uiSectorSize = sector_size;
- BcmUpdateSectorSize(ad,
- ad->uiSectorSize);
- }
- }
- status = STATUS_SUCCESS;
- } else {
- status = STATUS_FAILURE;
- }
- return status;
-}
-
-static int bcm_char_ioctl_set_debug(void __user *argp,
- struct bcm_mini_adapter *ad)
-{
-#ifdef DEBUG
- struct bcm_ioctl_buffer io_buff;
- struct bcm_user_debug_state user_debug_state;
-
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "In SET_DEBUG ioctl\n");
- if (copy_from_user(&io_buff, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
-
- if (copy_from_user(&user_debug_state, io_buff.InputBuffer,
- sizeof(struct bcm_user_debug_state)))
- return -EFAULT;
-
- BCM_DEBUG_PRINT (ad, DBG_TYPE_PRINTK, 0, 0,
- "IOCTL_BCM_SET_DEBUG: OnOff=%d Type = 0x%x ",
- user_debug_state.OnOff, user_debug_state.Type);
- /* user_debug_state.Subtype <<= 1; */
- user_debug_state.Subtype = 1 << user_debug_state.Subtype;
- BCM_DEBUG_PRINT (ad, DBG_TYPE_PRINTK, 0, 0,
- "actual Subtype=0x%x\n", user_debug_state.Subtype);
-
- /* Update new 'DebugState' in the ad */
- ad->stDebugState.type |= user_debug_state.Type;
- /* Subtype: A bitmap of 32 bits for Subtype per Type.
- * Valid indexes in 'subtype' array: 1,2,4,8
- * corresponding to valid Type values. Hence we can use the 'Type' field
- * as the index value, ignoring the array entries 0,3,5,6,7 !
- */
- if (user_debug_state.OnOff)
- ad->stDebugState.subtype[user_debug_state.Type] |=
- user_debug_state.Subtype;
- else
- ad->stDebugState.subtype[user_debug_state.Type] &=
- ~user_debug_state.Subtype;
-
- BCM_SHOW_DEBUG_BITMAP(ad);
-#endif
- return STATUS_SUCCESS;
-}
-
-static int bcm_char_ioctl_nvm_rw(void __user *argp,
- struct bcm_mini_adapter *ad, UINT cmd)
-{
- struct bcm_nvm_readwrite nvm_rw;
- struct timeval tv0, tv1;
- struct bcm_ioctl_buffer io_buff;
- PUCHAR read_data = NULL;
- INT status = STATUS_FAILURE;
-
- memset(&tv0, 0, sizeof(struct timeval));
- memset(&tv1, 0, sizeof(struct timeval));
- if ((ad->eNVMType == NVM_FLASH) &&
- (ad->uiFlashLayoutMajorVersion == 0)) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "The Flash Control Section is Corrupted. Hence Rejection on NVM Read/Write\n");
- return -EFAULT;
- }
-
- if (IsFlash2x(ad)) {
- if ((ad->eActiveDSD != DSD0) &&
- (ad->eActiveDSD != DSD1) &&
- (ad->eActiveDSD != DSD2)) {
-
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "No DSD is active..hence NVM Command is blocked");
- return STATUS_FAILURE;
- }
- }
-
- /* Copy Ioctl Buffer structure */
- if (copy_from_user(&io_buff, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
-
- if (copy_from_user(&nvm_rw,
- (IOCTL_BCM_NVM_READ == cmd) ?
- io_buff.OutputBuffer : io_buff.InputBuffer,
- sizeof(struct bcm_nvm_readwrite)))
- return -EFAULT;
-
- /*
- * Deny the access if the offset crosses the cal area limit.
- */
- if (nvm_rw.uiNumBytes > ad->uiNVMDSDSize)
- return STATUS_FAILURE;
-
- if (nvm_rw.uiOffset >
- ad->uiNVMDSDSize - nvm_rw.uiNumBytes)
- return STATUS_FAILURE;
-
- read_data = memdup_user(nvm_rw.pBuffer,
- nvm_rw.uiNumBytes);
- if (IS_ERR(read_data))
- return PTR_ERR(read_data);
-
- do_gettimeofday(&tv0);
- if (IOCTL_BCM_NVM_READ == cmd) {
- int ret = bcm_handle_nvm_read_cmd(ad, read_data,
- &nvm_rw);
- if (ret != STATUS_SUCCESS)
- return ret;
- } else {
- down(&ad->NVMRdmWrmLock);
-
- if ((ad->IdleMode == TRUE) ||
- (ad->bShutStatus == TRUE) ||
- (ad->bPreparingForLowPowerMode == TRUE)) {
-
- BCM_DEBUG_PRINT(ad,
- DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "Device is in Idle/Shutdown Mode\n");
- up(&ad->NVMRdmWrmLock);
- kfree(read_data);
- return -EACCES;
- }
-
- ad->bHeaderChangeAllowed = TRUE;
- if (IsFlash2x(ad)) {
- int ret = handle_flash2x_adapter(ad,
- read_data,
- &nvm_rw);
- if (ret != STATUS_SUCCESS)
- return ret;
- }
-
- status = BeceemNVMWrite(ad, (PUINT)read_data,
- nvm_rw.uiOffset, nvm_rw.uiNumBytes,
- nvm_rw.bVerify);
- if (IsFlash2x(ad))
- BcmFlash2xWriteSig(ad, ad->eActiveDSD);
-
- ad->bHeaderChangeAllowed = false;
-
- up(&ad->NVMRdmWrmLock);
-
- if (status != STATUS_SUCCESS) {
- kfree(read_data);
- return status;
- }
- }
-
- do_gettimeofday(&tv1);
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- " timetaken by Write/read :%ld msec\n",
- (tv1.tv_sec - tv0.tv_sec)*1000 +
- (tv1.tv_usec - tv0.tv_usec)/1000);
-
- kfree(read_data);
- return STATUS_SUCCESS;
-}
-
-static int bcm_char_ioctl_flash2x_section_read(void __user *argp,
- struct bcm_mini_adapter *ad)
-{
- struct bcm_flash2x_readwrite flash_2x_read = {0};
- struct bcm_ioctl_buffer io_buff;
- PUCHAR read_buff = NULL;
- UINT nob = 0;
- UINT buff_size = 0;
- UINT read_bytes = 0;
- UINT read_offset = 0;
- INT status = STATUS_FAILURE;
- void __user *OutPutBuff;
-
- if (IsFlash2x(ad) != TRUE) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Flash Does not have 2.x map");
- return -EINVAL;
- }
-
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG,
- DBG_LVL_ALL, "IOCTL_BCM_FLASH2X_SECTION_READ Called");
- if (copy_from_user(&io_buff, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
-
- /* Reading FLASH 2.x READ structure */
- if (copy_from_user(&flash_2x_read, io_buff.InputBuffer,
- sizeof(struct bcm_flash2x_readwrite)))
- return -EFAULT;
-
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "\nflash_2x_read.Section :%x",
- flash_2x_read.Section);
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "\nflash_2x_read.offset :%x",
- flash_2x_read.offset);
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "\nflash_2x_read.numOfBytes :%x",
- flash_2x_read.numOfBytes);
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "\nflash_2x_read.bVerify :%x\n",
- flash_2x_read.bVerify);
-
- /* This was internal to driver for raw read.
- * now it has ben exposed to user space app.
- */
- if (validateFlash2xReadWrite(ad, &flash_2x_read) == false)
- return STATUS_FAILURE;
-
- nob = flash_2x_read.numOfBytes;
- if (nob > ad->uiSectorSize)
- buff_size = ad->uiSectorSize;
- else
- buff_size = nob;
-
- read_offset = flash_2x_read.offset;
- OutPutBuff = io_buff.OutputBuffer;
- read_buff = kzalloc(buff_size , GFP_KERNEL);
-
- if (read_buff == NULL) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Memory allocation failed for Flash 2.x Read Structure");
- return -ENOMEM;
- }
- down(&ad->NVMRdmWrmLock);
-
- if ((ad->IdleMode == TRUE) ||
- (ad->bShutStatus == TRUE) ||
- (ad->bPreparingForLowPowerMode == TRUE)) {
-
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG,
- DBG_LVL_ALL,
- "Device is in Idle/Shutdown Mode\n");
- up(&ad->NVMRdmWrmLock);
- kfree(read_buff);
- return -EACCES;
- }
-
- while (nob) {
- if (nob > ad->uiSectorSize)
- read_bytes = ad->uiSectorSize;
- else
- read_bytes = nob;
-
- /* Reading the data from Flash 2.x */
- status = BcmFlash2xBulkRead(ad, (PUINT)read_buff,
- flash_2x_read.Section, read_offset, read_bytes);
- if (status) {
- BCM_DEBUG_PRINT(ad,
- DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "Flash 2x read err with status :%d",
- status);
- break;
- }
-
- BCM_DEBUG_PRINT_BUFFER(ad, DBG_TYPE_OTHERS, OSAL_DBG,
- DBG_LVL_ALL, read_buff, read_bytes);
-
- status = copy_to_user(OutPutBuff, read_buff, read_bytes);
- if (status) {
- BCM_DEBUG_PRINT(ad,
- DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "Copy to use failed with status :%d", status);
- up(&ad->NVMRdmWrmLock);
- kfree(read_buff);
- return -EFAULT;
- }
- nob = nob - read_bytes;
- if (nob) {
- read_offset = read_offset + read_bytes;
- OutPutBuff = OutPutBuff + read_bytes;
- }
- }
-
- up(&ad->NVMRdmWrmLock);
- kfree(read_buff);
- return status;
-}
-
-static int bcm_char_ioctl_flash2x_section_write(void __user *argp,
- struct bcm_mini_adapter *ad)
-{
- struct bcm_flash2x_readwrite sFlash2xWrite = {0};
- struct bcm_ioctl_buffer io_buff;
- PUCHAR write_buff;
- void __user *input_addr;
- UINT nob = 0;
- UINT buff_size = 0;
- UINT write_off = 0;
- UINT write_bytes = 0;
- INT status = STATUS_FAILURE;
-
- if (IsFlash2x(ad) != TRUE) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Flash Does not have 2.x map");
- return -EINVAL;
- }
-
- /* First make this False so that we can enable the Sector
- * Permission Check in BeceemFlashBulkWrite
- */
- ad->bAllDSDWriteAllow = false;
-
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "IOCTL_BCM_FLASH2X_SECTION_WRITE Called");
-
- if (copy_from_user(&io_buff, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
-
- /* Reading FLASH 2.x READ structure */
- if (copy_from_user(&sFlash2xWrite, io_buff.InputBuffer,
- sizeof(struct bcm_flash2x_readwrite)))
- return -EFAULT;
-
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "\nsFlash2xWrite.Section :%x", sFlash2xWrite.Section);
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "\nsFlash2xWrite.offset :%d", sFlash2xWrite.offset);
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "\nsFlash2xWrite.numOfBytes :%x", sFlash2xWrite.numOfBytes);
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "\nsFlash2xWrite.bVerify :%x\n", sFlash2xWrite.bVerify);
-
- if ((sFlash2xWrite.Section != VSA0) && (sFlash2xWrite.Section != VSA1)
- && (sFlash2xWrite.Section != VSA2)) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "Only VSA write is allowed");
- return -EINVAL;
- }
-
- if (validateFlash2xReadWrite(ad, &sFlash2xWrite) == false)
- return STATUS_FAILURE;
-
- input_addr = sFlash2xWrite.pDataBuff;
- write_off = sFlash2xWrite.offset;
- nob = sFlash2xWrite.numOfBytes;
-
- if (nob > ad->uiSectorSize)
- buff_size = ad->uiSectorSize;
- else
- buff_size = nob;
-
- write_buff = kmalloc(buff_size, GFP_KERNEL);
-
- if (write_buff == NULL)
- return -ENOMEM;
-
- /* extracting the remainder of the given offset. */
- write_bytes = ad->uiSectorSize;
- if (write_off % ad->uiSectorSize) {
- write_bytes = ad->uiSectorSize -
- (write_off % ad->uiSectorSize);
- }
-
- if (nob < write_bytes)
- write_bytes = nob;
-
- down(&ad->NVMRdmWrmLock);
-
- if ((ad->IdleMode == TRUE) ||
- (ad->bShutStatus == TRUE) ||
- (ad->bPreparingForLowPowerMode == TRUE)) {
-
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "Device is in Idle/Shutdown Mode\n");
- up(&ad->NVMRdmWrmLock);
- kfree(write_buff);
- return -EACCES;
- }
-
- BcmFlash2xCorruptSig(ad, sFlash2xWrite.Section);
- do {
- status = copy_from_user(write_buff, input_addr, write_bytes);
- if (status) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Copy to user failed with status :%d", status);
- up(&ad->NVMRdmWrmLock);
- kfree(write_buff);
- return -EFAULT;
- }
- BCM_DEBUG_PRINT_BUFFER(ad, DBG_TYPE_OTHERS,
- OSAL_DBG, DBG_LVL_ALL, write_buff, write_bytes);
-
- /* Writing the data from Flash 2.x */
- status = BcmFlash2xBulkWrite(ad, (PUINT)write_buff,
- sFlash2xWrite.Section,
- write_off,
- write_bytes,
- sFlash2xWrite.bVerify);
-
- if (status) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Flash 2x read err with status :%d", status);
- break;
- }
-
- nob = nob - write_bytes;
- if (nob) {
- write_off = write_off + write_bytes;
- input_addr = input_addr + write_bytes;
- if (nob > ad->uiSectorSize)
- write_bytes = ad->uiSectorSize;
- else
- write_bytes = nob;
- }
- } while (nob > 0);
-
- BcmFlash2xWriteSig(ad, sFlash2xWrite.Section);
- up(&ad->NVMRdmWrmLock);
- kfree(write_buff);
- return status;
-}
-
-static int bcm_char_ioctl_flash2x_section_bitmap(void __user *argp,
- struct bcm_mini_adapter *ad)
-{
- struct bcm_flash2x_bitmap *flash_2x_bit_map;
- struct bcm_ioctl_buffer io_buff;
-
-BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "IOCTL_BCM_GET_FLASH2X_SECTION_BITMAP Called");
-
- if (copy_from_user(&io_buff, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
-
- if (io_buff.OutputLength != sizeof(struct bcm_flash2x_bitmap))
- return -EINVAL;
-
- flash_2x_bit_map = kzalloc(sizeof(struct bcm_flash2x_bitmap),
- GFP_KERNEL);
-
- if (flash_2x_bit_map == NULL) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Memory is not available");
- return -ENOMEM;
- }
-
- /* Reading the Flash Sectio Bit map */
- down(&ad->NVMRdmWrmLock);
-
- if ((ad->IdleMode == TRUE) ||
- (ad->bShutStatus == TRUE) ||
- (ad->bPreparingForLowPowerMode == TRUE)) {
-
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "Device is in Idle/Shutdown Mode\n");
- up(&ad->NVMRdmWrmLock);
- kfree(flash_2x_bit_map);
- return -EACCES;
- }
-
- BcmGetFlash2xSectionalBitMap(ad, flash_2x_bit_map);
- up(&ad->NVMRdmWrmLock);
- if (copy_to_user(io_buff.OutputBuffer, flash_2x_bit_map,
- sizeof(struct bcm_flash2x_bitmap))) {
- kfree(flash_2x_bit_map);
- return -EFAULT;
- }
-
- kfree(flash_2x_bit_map);
- return STATUS_FAILURE;
-}
-
-static int bcm_char_ioctl_set_active_section(void __user *argp,
- struct bcm_mini_adapter *ad)
-{
- enum bcm_flash2x_section_val flash_2x_section_val = 0;
- INT status = STATUS_FAILURE;
- struct bcm_ioctl_buffer io_buff;
-
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "IOCTL_BCM_SET_ACTIVE_SECTION Called");
-
- if (IsFlash2x(ad) != TRUE) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Flash Does not have 2.x map");
- return -EINVAL;
- }
-
- status = copy_from_user(&io_buff, argp,
- sizeof(struct bcm_ioctl_buffer));
- if (status) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Copy of IOCTL BUFFER failed");
- return -EFAULT;
- }
-
- status = copy_from_user(&flash_2x_section_val,
- io_buff.InputBuffer, sizeof(INT));
- if (status) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Copy of flash section val failed");
- return -EFAULT;
- }
-
- down(&ad->NVMRdmWrmLock);
-
- if ((ad->IdleMode == TRUE) ||
- (ad->bShutStatus == TRUE) ||
- (ad->bPreparingForLowPowerMode == TRUE)) {
-
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "Device is in Idle/Shutdown Mode\n");
- up(&ad->NVMRdmWrmLock);
- return -EACCES;
- }
-
- status = BcmSetActiveSection(ad, flash_2x_section_val);
- if (status)
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Failed to make it's priority Highest. status %d",
- status);
-
- up(&ad->NVMRdmWrmLock);
-
- return status;
-}
-
-static int bcm_char_ioctl_copy_section(void __user *argp,
- struct bcm_mini_adapter *ad)
-{
- struct bcm_flash2x_copy_section copy_sect_strut = {0};
- struct bcm_ioctl_buffer io_buff;
- INT status = STATUS_SUCCESS;
-
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "IOCTL_BCM_COPY_SECTION Called");
-
- ad->bAllDSDWriteAllow = false;
- if (IsFlash2x(ad) != TRUE) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Flash Does not have 2.x map");
- return -EINVAL;
- }
-
- status = copy_from_user(&io_buff, argp,
- sizeof(struct bcm_ioctl_buffer));
- if (status) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Copy of IOCTL BUFFER failed status :%d",
- status);
- return -EFAULT;
- }
-
- status = copy_from_user(&copy_sect_strut, io_buff.InputBuffer,
- sizeof(struct bcm_flash2x_copy_section));
- if (status) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Copy of Copy_Section_Struct failed with status :%d",
- status);
- return -EFAULT;
- }
-
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "Source SEction :%x", copy_sect_strut.SrcSection);
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "Destination SEction :%x", copy_sect_strut.DstSection);
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "offset :%x", copy_sect_strut.offset);
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "nob :%x", copy_sect_strut.numOfBytes);
-
- if (IsSectionExistInFlash(ad, copy_sect_strut.SrcSection) == false) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Source Section<%x> does not exist in Flash ",
- copy_sect_strut.SrcSection);
- return -EINVAL;
- }
-
- if (IsSectionExistInFlash(ad, copy_sect_strut.DstSection) == false) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Destinatio Section<%x> does not exist in Flash ",
- copy_sect_strut.DstSection);
- return -EINVAL;
- }
-
- if (copy_sect_strut.SrcSection == copy_sect_strut.DstSection) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "Source and Destination section should be different");
- return -EINVAL;
- }
-
- down(&ad->NVMRdmWrmLock);
-
- if ((ad->IdleMode == TRUE) ||
- (ad->bShutStatus == TRUE) ||
- (ad->bPreparingForLowPowerMode == TRUE)) {
-
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "Device is in Idle/Shutdown Mode\n");
- up(&ad->NVMRdmWrmLock);
- return -EACCES;
- }
-
- if (copy_sect_strut.SrcSection == ISO_IMAGE1 ||
- copy_sect_strut.SrcSection == ISO_IMAGE2) {
- if (IsNonCDLessDevice(ad)) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Device is Non-CDLess hence won't have ISO !!");
- status = -EINVAL;
- } else if (copy_sect_strut.numOfBytes == 0) {
- status = BcmCopyISO(ad, copy_sect_strut);
- } else {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Partial Copy of ISO section is not Allowed..");
- status = STATUS_FAILURE;
- }
- up(&ad->NVMRdmWrmLock);
- return status;
- }
-
- status = BcmCopySection(ad, copy_sect_strut.SrcSection,
- copy_sect_strut.DstSection,
- copy_sect_strut.offset,
- copy_sect_strut.numOfBytes);
- up(&ad->NVMRdmWrmLock);
- return status;
-}
-
-static int bcm_char_ioctl_get_flash_cs_info(void __user *argp,
- struct bcm_mini_adapter *ad)
-{
- struct bcm_ioctl_buffer io_buff;
- INT status = STATUS_SUCCESS;
-
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- " IOCTL_BCM_GET_FLASH_CS_INFO Called");
-
- status = copy_from_user(&io_buff, argp,
- sizeof(struct bcm_ioctl_buffer));
- if (status) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Copy of IOCTL BUFFER failed");
- return -EFAULT;
- }
-
- if (ad->eNVMType != NVM_FLASH) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Connected device does not have flash");
- return -EINVAL;
- }
-
- if (IsFlash2x(ad) == TRUE) {
- if (io_buff.OutputLength < sizeof(struct bcm_flash2x_cs_info))
- return -EINVAL;
-
- if (copy_to_user(io_buff.OutputBuffer,
- ad->psFlash2xCSInfo,
- sizeof(struct bcm_flash2x_cs_info)))
- return -EFAULT;
- } else {
- if (io_buff.OutputLength < sizeof(struct bcm_flash_cs_info))
- return -EINVAL;
-
- if (copy_to_user(io_buff.OutputBuffer, ad->psFlashCSInfo,
- sizeof(struct bcm_flash_cs_info)))
- return -EFAULT;
- }
- return status;
-}
-
-static int bcm_char_ioctl_select_dsd(void __user *argp,
- struct bcm_mini_adapter *ad)
-{
- struct bcm_ioctl_buffer io_buff;
- INT status = STATUS_FAILURE;
- UINT sect_offset = 0;
- enum bcm_flash2x_section_val flash_2x_section_val;
-
- flash_2x_section_val = NO_SECTION_VAL;
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "IOCTL_BCM_SELECT_DSD Called");
-
- if (IsFlash2x(ad) != TRUE) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Flash Does not have 2.x map");
- return -EINVAL;
- }
-
- status = copy_from_user(&io_buff, argp,
- sizeof(struct bcm_ioctl_buffer));
- if (status) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Copy of IOCTL BUFFER failed");
- return -EFAULT;
- }
- status = copy_from_user(&flash_2x_section_val, io_buff.InputBuffer,
- sizeof(INT));
- if (status) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Copy of flash section val failed");
- return -EFAULT;
- }
-
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "Read Section :%d", flash_2x_section_val);
- if ((flash_2x_section_val != DSD0) &&
- (flash_2x_section_val != DSD1) &&
- (flash_2x_section_val != DSD2)) {
-
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Passed section<%x> is not DSD section",
- flash_2x_section_val);
- return STATUS_FAILURE;
- }
-
- sect_offset = BcmGetSectionValStartOffset(ad, flash_2x_section_val);
- if (sect_offset == INVALID_OFFSET) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Provided Section val <%d> does not exist in Flash 2.x",
- flash_2x_section_val);
- return -EINVAL;
- }
-
- ad->bAllDSDWriteAllow = TRUE;
- ad->ulFlashCalStart = sect_offset;
- ad->eActiveDSD = flash_2x_section_val;
-
- return STATUS_SUCCESS;
-}
-
-static int bcm_char_ioctl_nvm_raw_read(void __user *argp,
- struct bcm_mini_adapter *ad)
-{
- struct bcm_nvm_readwrite nvm_read;
- struct bcm_ioctl_buffer io_buff;
- unsigned int nob;
- INT buff_size;
- INT read_offset = 0;
- UINT read_bytes = 0;
- PUCHAR read_buff;
- void __user *OutPutBuff;
- INT status = STATUS_FAILURE;
-
- if (ad->eNVMType != NVM_FLASH) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "NVM TYPE is not Flash");
- return -EINVAL;
- }
-
- /* Copy Ioctl Buffer structure */
- if (copy_from_user(&io_buff, argp, sizeof(struct bcm_ioctl_buffer))) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "copy_from_user 1 failed\n");
- return -EFAULT;
- }
-
- if (copy_from_user(&nvm_read, io_buff.OutputBuffer,
- sizeof(struct bcm_nvm_readwrite)))
- return -EFAULT;
-
- nob = nvm_read.uiNumBytes;
- /* In Raw-Read max Buff size : 64MB */
-
- if (nob > DEFAULT_BUFF_SIZE)
- buff_size = DEFAULT_BUFF_SIZE;
- else
- buff_size = nob;
-
- read_offset = nvm_read.uiOffset;
- OutPutBuff = nvm_read.pBuffer;
-
- read_buff = kzalloc(buff_size , GFP_KERNEL);
- if (read_buff == NULL) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Memory allocation failed for Flash 2.x Read Structure");
- return -ENOMEM;
- }
- down(&ad->NVMRdmWrmLock);
-
- if ((ad->IdleMode == TRUE) ||
- (ad->bShutStatus == TRUE) ||
- (ad->bPreparingForLowPowerMode == TRUE)) {
-
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "Device is in Idle/Shutdown Mode\n");
- kfree(read_buff);
- up(&ad->NVMRdmWrmLock);
- return -EACCES;
- }
-
- ad->bFlashRawRead = TRUE;
-
- while (nob) {
- if (nob > DEFAULT_BUFF_SIZE)
- read_bytes = DEFAULT_BUFF_SIZE;
- else
- read_bytes = nob;
-
- /* Reading the data from Flash 2.x */
- status = BeceemNVMRead(ad, (PUINT)read_buff,
- read_offset, read_bytes);
- if (status) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Flash 2x read err with status :%d",
- status);
- break;
- }
-
- BCM_DEBUG_PRINT_BUFFER(ad, DBG_TYPE_OTHERS, OSAL_DBG,
- DBG_LVL_ALL, read_buff, read_bytes);
-
- status = copy_to_user(OutPutBuff, read_buff, read_bytes);
- if (status) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Copy to use failed with status :%d",
- status);
- up(&ad->NVMRdmWrmLock);
- kfree(read_buff);
- return -EFAULT;
- }
- nob = nob - read_bytes;
- if (nob) {
- read_offset = read_offset + read_bytes;
- OutPutBuff = OutPutBuff + read_bytes;
- }
- }
- ad->bFlashRawRead = false;
- up(&ad->NVMRdmWrmLock);
- kfree(read_buff);
- return status;
-}
-
-static int bcm_char_ioctl_cntrlmsg_mask(void __user *argp,
- struct bcm_mini_adapter *ad,
- struct bcm_tarang_data *tarang)
-{
- struct bcm_ioctl_buffer io_buff;
- INT status = STATUS_FAILURE;
- ULONG rx_cntrl_msg_bit_mask = 0;
-
- /* Copy Ioctl Buffer structure */
- status = copy_from_user(&io_buff, argp,
- sizeof(struct bcm_ioctl_buffer));
- if (status) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "copy of Ioctl buffer is failed from user space");
- return -EFAULT;
- }
-
- if (io_buff.InputLength != sizeof(unsigned long))
- return -EINVAL;
-
- status = copy_from_user(&rx_cntrl_msg_bit_mask, io_buff.InputBuffer,
- io_buff.InputLength);
- if (status) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "copy of control bit mask failed from user space");
- return -EFAULT;
- }
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "\n Got user defined cntrl msg bit mask :%lx",
- rx_cntrl_msg_bit_mask);
- tarang->RxCntrlMsgBitMask = rx_cntrl_msg_bit_mask;
-
- return status;
-}
-
-static int bcm_char_ioctl_get_device_driver_info(void __user *argp,
- struct bcm_mini_adapter *ad)
-{
- struct bcm_driver_info dev_info;
- struct bcm_ioctl_buffer io_buff;
-
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "Called IOCTL_BCM_GET_DEVICE_DRIVER_INFO\n");
-
- memset(&dev_info, 0, sizeof(dev_info));
- dev_info.MaxRDMBufferSize = BUFFER_4K;
- dev_info.u32DSDStartOffset = EEPROM_CALPARAM_START;
- dev_info.u32RxAlignmentCorrection = 0;
- dev_info.u32NVMType = ad->eNVMType;
- dev_info.u32InterfaceType = BCM_USB;
-
- if (copy_from_user(&io_buff, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
-
- if (io_buff.OutputLength < sizeof(dev_info))
- return -EINVAL;
-
- if (copy_to_user(io_buff.OutputBuffer, &dev_info, sizeof(dev_info)))
- return -EFAULT;
-
- return STATUS_SUCCESS;
-}
-
-static int bcm_char_ioctl_time_since_net_entry(void __user *argp,
- struct bcm_mini_adapter *ad)
-{
- struct bcm_time_elapsed time_elapsed_since_net_entry = {0};
- struct bcm_ioctl_buffer io_buff;
-
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "IOCTL_BCM_TIME_SINCE_NET_ENTRY called");
-
- if (copy_from_user(&io_buff, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
-
- if (io_buff.OutputLength < sizeof(struct bcm_time_elapsed))
- return -EINVAL;
-
- time_elapsed_since_net_entry.ul64TimeElapsedSinceNetEntry =
- get_seconds() - ad->liTimeSinceLastNetEntry;
-
- if (copy_to_user(io_buff.OutputBuffer, &time_elapsed_since_net_entry,
- sizeof(struct bcm_time_elapsed)))
- return -EFAULT;
-
- return STATUS_SUCCESS;
-}
-
-
-static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
-{
- struct bcm_tarang_data *tarang = filp->private_data;
- void __user *argp = (void __user *)arg;
- struct bcm_mini_adapter *ad = tarang->Adapter;
- INT status = STATUS_FAILURE;
-
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "Parameters Passed to control IOCTL cmd=0x%X arg=0x%lX",
- cmd, arg);
-
- if (_IOC_TYPE(cmd) != BCM_IOCTL)
- return -EFAULT;
- if (_IOC_DIR(cmd) & _IOC_READ)
- status = !access_ok(VERIFY_WRITE, argp, _IOC_SIZE(cmd));
- else if (_IOC_DIR(cmd) & _IOC_WRITE)
- status = !access_ok(VERIFY_READ, argp, _IOC_SIZE(cmd));
- else if (_IOC_NONE == (_IOC_DIR(cmd) & _IOC_NONE))
- status = STATUS_SUCCESS;
-
- if (status)
- return -EFAULT;
-
- if (ad->device_removed)
- return -EFAULT;
-
- if (false == ad->fw_download_done) {
- switch (cmd) {
- case IOCTL_MAC_ADDR_REQ:
- case IOCTL_LINK_REQ:
- case IOCTL_CM_REQUEST:
- case IOCTL_SS_INFO_REQ:
- case IOCTL_SEND_CONTROL_MESSAGE:
- case IOCTL_IDLE_REQ:
- case IOCTL_BCM_GPIO_SET_REQUEST:
- case IOCTL_BCM_GPIO_STATUS_REQUEST:
- return -EACCES;
- default:
- break;
- }
- }
-
- status = vendorextnIoctl(ad, cmd, arg);
- if (status != CONTINUE_COMMON_PATH)
- return status;
-
- switch (cmd) {
- /* Rdms for Swin Idle... */
- case IOCTL_BCM_REGISTER_READ_PRIVATE:
- status = bcm_char_ioctl_reg_read_private(argp, ad);
- return status;
-
- case IOCTL_BCM_REGISTER_WRITE_PRIVATE:
- status = bcm_char_ioctl_reg_write_private(argp, ad);
- return status;
-
- case IOCTL_BCM_REGISTER_READ:
- case IOCTL_BCM_EEPROM_REGISTER_READ:
- status = bcm_char_ioctl_eeprom_reg_read(argp, ad);
- return status;
-
- case IOCTL_BCM_REGISTER_WRITE:
- case IOCTL_BCM_EEPROM_REGISTER_WRITE:
- status = bcm_char_ioctl_eeprom_reg_write(argp, ad, cmd);
- return status;
-
- case IOCTL_BCM_GPIO_SET_REQUEST:
- status = bcm_char_ioctl_gpio_set_request(argp, ad);
- return status;
-
- case BCM_LED_THREAD_STATE_CHANGE_REQ:
- status = bcm_char_ioctl_led_thread_state_change_req(argp,
- ad);
- return status;
-
- case IOCTL_BCM_GPIO_STATUS_REQUEST:
- status = bcm_char_ioctl_gpio_status_request(argp, ad);
- return status;
-
- case IOCTL_BCM_GPIO_MULTI_REQUEST:
- status = bcm_char_ioctl_gpio_multi_request(argp, ad);
- return status;
-
- case IOCTL_BCM_GPIO_MODE_REQUEST:
- status = bcm_char_ioctl_gpio_mode_request(argp, ad);
- return status;
-
- case IOCTL_MAC_ADDR_REQ:
- case IOCTL_LINK_REQ:
- case IOCTL_CM_REQUEST:
- case IOCTL_SS_INFO_REQ:
- case IOCTL_SEND_CONTROL_MESSAGE:
- case IOCTL_IDLE_REQ:
- status = bcm_char_ioctl_misc_request(argp, ad);
- return status;
-
- case IOCTL_BCM_BUFFER_DOWNLOAD_START:
- status = bcm_char_ioctl_buffer_download_start(ad);
- return status;
-
- case IOCTL_BCM_BUFFER_DOWNLOAD:
- status = bcm_char_ioctl_buffer_download(argp, ad);
- return status;
-
- case IOCTL_BCM_BUFFER_DOWNLOAD_STOP:
- status = bcm_char_ioctl_buffer_download_stop(argp, ad);
- return status;
-
-
- case IOCTL_BE_BUCKET_SIZE:
- status = 0;
- if (get_user(ad->BEBucketSize,
- (unsigned long __user *)arg))
- status = -EFAULT;
- break;
-
- case IOCTL_RTPS_BUCKET_SIZE:
- status = 0;
- if (get_user(ad->rtPSBucketSize,
- (unsigned long __user *)arg))
- status = -EFAULT;
- break;
-
- case IOCTL_CHIP_RESET:
- status = bcm_char_ioctl_chip_reset(ad);
- return status;
-
- case IOCTL_QOS_THRESHOLD:
- status = bcm_char_ioctl_qos_threshold(arg, ad);
- return status;
-
- case IOCTL_DUMP_PACKET_INFO:
- DumpPackInfo(ad);
- DumpPhsRules(&ad->stBCMPhsContext);
- status = STATUS_SUCCESS;
- break;
-
- case IOCTL_GET_PACK_INFO:
- if (copy_to_user(argp, &ad->PackInfo,
- sizeof(struct bcm_packet_info)*NO_OF_QUEUES))
- return -EFAULT;
- status = STATUS_SUCCESS;
- break;
-
- case IOCTL_BCM_SWITCH_TRANSFER_MODE:
- status = bcm_char_ioctl_switch_transfer_mode(argp, ad);
- return status;
-
- case IOCTL_BCM_GET_DRIVER_VERSION:
- status = bcm_char_ioctl_get_driver_version(argp);
- return status;
-
- case IOCTL_BCM_GET_CURRENT_STATUS:
- status = bcm_char_ioctl_get_current_status(argp, ad);
- return status;
-
- case IOCTL_BCM_SET_MAC_TRACING:
- status = bcm_char_ioctl_set_mac_tracing(argp, ad);
- return status;
-
- case IOCTL_BCM_GET_DSX_INDICATION:
- status = bcm_char_ioctl_get_dsx_indication(argp, ad);
- return status;
-
- case IOCTL_BCM_GET_HOST_MIBS:
- status = bcm_char_ioctl_get_host_mibs(argp, ad, tarang);
- return status;
-
- case IOCTL_BCM_WAKE_UP_DEVICE_FROM_IDLE:
- if ((false == ad->bTriedToWakeUpFromlowPowerMode) &&
- (TRUE == ad->IdleMode)) {
- ad->usIdleModePattern = ABORT_IDLE_MODE;
- ad->bWakeUpDevice = TRUE;
- wake_up(&ad->process_rx_cntrlpkt);
- }
-
- status = STATUS_SUCCESS;
- break;
-
- case IOCTL_BCM_BULK_WRM:
- status = bcm_char_ioctl_bulk_wrm(argp, ad, cmd);
- return status;
-
- case IOCTL_BCM_GET_NVM_SIZE:
- status = bcm_char_ioctl_get_nvm_size(argp, ad);
- return status;
-
- case IOCTL_BCM_CAL_INIT:
- status = bcm_char_ioctl_cal_init(argp, ad);
- return status;
-
- case IOCTL_BCM_SET_DEBUG:
- status = bcm_char_ioctl_set_debug(argp, ad);
- return status;
-
- case IOCTL_BCM_NVM_READ:
- case IOCTL_BCM_NVM_WRITE:
- status = bcm_char_ioctl_nvm_rw(argp, ad, cmd);
- return status;
-
- case IOCTL_BCM_FLASH2X_SECTION_READ:
- status = bcm_char_ioctl_flash2x_section_read(argp, ad);
- return status;
-
- case IOCTL_BCM_FLASH2X_SECTION_WRITE:
- status = bcm_char_ioctl_flash2x_section_write(argp, ad);
- return status;
-
- case IOCTL_BCM_GET_FLASH2X_SECTION_BITMAP:
- status = bcm_char_ioctl_flash2x_section_bitmap(argp, ad);
- return status;
-
- case IOCTL_BCM_SET_ACTIVE_SECTION:
- status = bcm_char_ioctl_set_active_section(argp, ad);
- return status;
-
- case IOCTL_BCM_IDENTIFY_ACTIVE_SECTION:
- /* Right Now we are taking care of only DSD */
- ad->bAllDSDWriteAllow = false;
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "IOCTL_BCM_IDENTIFY_ACTIVE_SECTION called");
- status = STATUS_SUCCESS;
- break;
-
- case IOCTL_BCM_COPY_SECTION:
- status = bcm_char_ioctl_copy_section(argp, ad);
- return status;
-
- case IOCTL_BCM_GET_FLASH_CS_INFO:
- status = bcm_char_ioctl_get_flash_cs_info(argp, ad);
- return status;
-
- case IOCTL_BCM_SELECT_DSD:
- status = bcm_char_ioctl_select_dsd(argp, ad);
- return status;
-
- case IOCTL_BCM_NVM_RAW_READ:
- status = bcm_char_ioctl_nvm_raw_read(argp, ad);
- return status;
-
- case IOCTL_BCM_CNTRLMSG_MASK:
- status = bcm_char_ioctl_cntrlmsg_mask(argp, ad, tarang);
- return status;
-
- case IOCTL_BCM_GET_DEVICE_DRIVER_INFO:
- status = bcm_char_ioctl_get_device_driver_info(argp, ad);
- return status;
-
- case IOCTL_BCM_TIME_SINCE_NET_ENTRY:
- status = bcm_char_ioctl_time_since_net_entry(argp, ad);
- return status;
-
- case IOCTL_CLOSE_NOTIFICATION:
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "IOCTL_CLOSE_NOTIFICATION");
- break;
-
- default:
- pr_info(DRV_NAME ": unknown ioctl cmd=%#x\n", cmd);
- status = STATUS_FAILURE;
- break;
- }
- return status;
-}
-
-
-static const struct file_operations bcm_fops = {
- .owner = THIS_MODULE,
- .open = bcm_char_open,
- .release = bcm_char_release,
- .read = bcm_char_read,
- .unlocked_ioctl = bcm_char_ioctl,
- .llseek = no_llseek,
-};
-
-int register_control_device_interface(struct bcm_mini_adapter *ad)
-{
-
- if (ad->major > 0)
- return ad->major;
-
- ad->major = register_chrdev(0, DEV_NAME, &bcm_fops);
- if (ad->major < 0) {
- pr_err(DRV_NAME ": could not created character device\n");
- return ad->major;
- }
-
- ad->pstCreatedClassDevice = device_create(bcm_class, NULL,
- MKDEV(ad->major, 0),
- ad, DEV_NAME);
-
- if (IS_ERR(ad->pstCreatedClassDevice)) {
- pr_err(DRV_NAME ": class device create failed\n");
- unregister_chrdev(ad->major, DEV_NAME);
- return PTR_ERR(ad->pstCreatedClassDevice);
- }
-
- return 0;
-}
-
-void unregister_control_device_interface(struct bcm_mini_adapter *ad)
-{
- if (ad->major > 0) {
- device_destroy(bcm_class, MKDEV(ad->major, 0));
- unregister_chrdev(ad->major, DEV_NAME);
- }
-}
-
diff --git a/drivers/staging/bcm/Bcmnet.c b/drivers/staging/bcm/Bcmnet.c
deleted file mode 100644
index e57767684ce..00000000000
--- a/drivers/staging/bcm/Bcmnet.c
+++ /dev/null
@@ -1,240 +0,0 @@
-#include "headers.h"
-
-struct net_device *gblpnetdev;
-
-static INT bcm_open(struct net_device *dev)
-{
- struct bcm_mini_adapter *ad = GET_BCM_ADAPTER(dev);
-
- if (ad->fw_download_done == false) {
- pr_notice(PFX "%s: link up failed (download in progress)\n",
- dev->name);
- return -EBUSY;
- }
-
- if (netif_msg_ifup(ad))
- pr_info(PFX "%s: enabling interface\n", dev->name);
-
- if (ad->LinkUpStatus) {
- if (netif_msg_link(ad))
- pr_info(PFX "%s: link up\n", dev->name);
-
- netif_carrier_on(ad->dev);
- netif_start_queue(ad->dev);
- }
-
- return 0;
-}
-
-static INT bcm_close(struct net_device *dev)
-{
- struct bcm_mini_adapter *ad = GET_BCM_ADAPTER(dev);
-
- if (netif_msg_ifdown(ad))
- pr_info(PFX "%s: disabling interface\n", dev->name);
-
- netif_carrier_off(dev);
- netif_stop_queue(dev);
-
- return 0;
-}
-
-static u16 bcm_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
-{
- return ClassifyPacket(netdev_priv(dev), skb);
-}
-
-/*******************************************************************
-* Function - bcm_transmit()
-*
-* Description - This is the main transmit function for our virtual
-* interface(eth0). It handles the ARP packets. It
-* clones this packet and then Queue it to a suitable
-* Queue. Then calls the transmit_packet().
-*
-* Parameter - skb - Pointer to the socket buffer structure
-* dev - Pointer to the virtual net device structure
-*
-*********************************************************************/
-
-static netdev_tx_t bcm_transmit(struct sk_buff *skb, struct net_device *dev)
-{
- struct bcm_mini_adapter *ad = GET_BCM_ADAPTER(dev);
- u16 qindex = skb_get_queue_mapping(skb);
-
-
- if (ad->device_removed || !ad->LinkUpStatus)
- goto drop;
-
- if (ad->TransferMode != IP_PACKET_ONLY_MODE)
- goto drop;
-
- if (INVALID_QUEUE_INDEX == qindex)
- goto drop;
-
- if (ad->PackInfo[qindex].uiCurrentPacketsOnHost >=
- SF_MAX_ALLOWED_PACKETS_TO_BACKUP)
- return NETDEV_TX_BUSY;
-
- /* Now Enqueue the packet */
- if (netif_msg_tx_queued(ad))
- pr_info(PFX "%s: enqueueing packet to queue %d\n",
- dev->name, qindex);
-
- spin_lock(&ad->PackInfo[qindex].SFQueueLock);
- ad->PackInfo[qindex].uiCurrentBytesOnHost += skb->len;
- ad->PackInfo[qindex].uiCurrentPacketsOnHost++;
-
- *((B_UINT32 *) skb->cb + SKB_CB_LATENCY_OFFSET) = jiffies;
- ENQUEUEPACKET(ad->PackInfo[qindex].FirstTxQueue,
- ad->PackInfo[qindex].LastTxQueue, skb);
- atomic_inc(&ad->TotalPacketCount);
- spin_unlock(&ad->PackInfo[qindex].SFQueueLock);
-
- /* FIXME - this is racy and incorrect, replace with work queue */
- if (!atomic_read(&ad->TxPktAvail)) {
- atomic_set(&ad->TxPktAvail, 1);
- wake_up(&ad->tx_packet_wait_queue);
- }
- return NETDEV_TX_OK;
-
- drop:
- dev_kfree_skb(skb);
- return NETDEV_TX_OK;
-}
-
-
-
-/**
-@ingroup init_functions
-Register other driver entry points with the kernel
-*/
-static const struct net_device_ops bcmNetDevOps = {
- .ndo_open = bcm_open,
- .ndo_stop = bcm_close,
- .ndo_start_xmit = bcm_transmit,
- .ndo_change_mtu = eth_change_mtu,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_select_queue = bcm_select_queue,
-};
-
-static struct device_type wimax_type = {
- .name = "wimax",
-};
-
-static int bcm_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- cmd->supported = 0;
- cmd->advertising = 0;
- cmd->speed = SPEED_10000;
- cmd->duplex = DUPLEX_FULL;
- cmd->port = PORT_TP;
- cmd->phy_address = 0;
- cmd->transceiver = XCVR_INTERNAL;
- cmd->autoneg = AUTONEG_DISABLE;
- cmd->maxtxpkt = 0;
- cmd->maxrxpkt = 0;
- return 0;
-}
-
-static void bcm_get_drvinfo(struct net_device *dev,
- struct ethtool_drvinfo *info)
-{
- struct bcm_mini_adapter *ad = GET_BCM_ADAPTER(dev);
- struct bcm_interface_adapter *intf_ad = ad->pvInterfaceAdapter;
- struct usb_device *udev = interface_to_usbdev(intf_ad->interface);
-
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- snprintf(info->fw_version, sizeof(info->fw_version), "%u.%u",
- ad->uiFlashLayoutMajorVersion,
- ad->uiFlashLayoutMinorVersion);
-
- usb_make_path(udev, info->bus_info, sizeof(info->bus_info));
-}
-
-static u32 bcm_get_link(struct net_device *dev)
-{
- struct bcm_mini_adapter *ad = GET_BCM_ADAPTER(dev);
-
- return ad->LinkUpStatus;
-}
-
-static u32 bcm_get_msglevel(struct net_device *dev)
-{
- struct bcm_mini_adapter *ad = GET_BCM_ADAPTER(dev);
-
- return ad->msg_enable;
-}
-
-static void bcm_set_msglevel(struct net_device *dev, u32 level)
-{
- struct bcm_mini_adapter *ad = GET_BCM_ADAPTER(dev);
-
- ad->msg_enable = level;
-}
-
-static const struct ethtool_ops bcm_ethtool_ops = {
- .get_settings = bcm_get_settings,
- .get_drvinfo = bcm_get_drvinfo,
- .get_link = bcm_get_link,
- .get_msglevel = bcm_get_msglevel,
- .set_msglevel = bcm_set_msglevel,
-};
-
-int register_networkdev(struct bcm_mini_adapter *ad)
-{
- struct net_device *net = ad->dev;
- struct bcm_interface_adapter *intf_ad = ad->pvInterfaceAdapter;
- struct usb_interface *udev = intf_ad->interface;
- struct usb_device *xdev = intf_ad->udev;
-
- int result;
-
- net->netdev_ops = &bcmNetDevOps;
- net->ethtool_ops = &bcm_ethtool_ops;
- net->mtu = MTU_SIZE; /* 1400 Bytes */
- net->tx_queue_len = TX_QLEN;
- net->flags |= IFF_NOARP;
-
- netif_carrier_off(net);
-
- SET_NETDEV_DEVTYPE(net, &wimax_type);
-
- /* Read the MAC Address from EEPROM */
- result = ReadMacAddressFromNVM(ad);
- if (result != STATUS_SUCCESS) {
- dev_err(&udev->dev,
- PFX "Error in Reading the mac Address: %d", result);
- return -EIO;
- }
-
- result = register_netdev(net);
- if (result)
- return result;
-
- gblpnetdev = ad->dev;
-
- if (netif_msg_probe(ad))
- dev_info(&udev->dev, PFX "%s: register usb-%s-%s %pM\n",
- net->name, xdev->bus->bus_name, xdev->devpath,
- net->dev_addr);
-
- return 0;
-}
-
-void unregister_networkdev(struct bcm_mini_adapter *ad)
-{
- struct net_device *net = ad->dev;
- struct bcm_interface_adapter *intf_ad = ad->pvInterfaceAdapter;
- struct usb_interface *udev = intf_ad->interface;
- struct usb_device *xdev = intf_ad->udev;
-
- if (netif_msg_probe(ad))
- dev_info(&udev->dev, PFX "%s: unregister usb-%s%s\n",
- net->name, xdev->bus->bus_name, xdev->devpath);
-
- unregister_netdev(ad->dev);
-}
diff --git a/drivers/staging/bcm/CmHost.c b/drivers/staging/bcm/CmHost.c
deleted file mode 100644
index adca0ce4d05..00000000000
--- a/drivers/staging/bcm/CmHost.c
+++ /dev/null
@@ -1,2254 +0,0 @@
-/************************************************************
- * CMHOST.C
- * This file contains the routines for handling Connection
- * Management.
- ************************************************************/
-
-#include "headers.h"
-
-enum E_CLASSIFIER_ACTION {
- eInvalidClassifierAction,
- eAddClassifier,
- eReplaceClassifier,
- eDeleteClassifier
-};
-
-static ULONG GetNextTargetBufferLocation(struct bcm_mini_adapter *Adapter,
- B_UINT16 tid);
-static void restore_endianess_of_pstClassifierEntry(
- struct bcm_classifier_rule *pstClassifierEntry,
- enum bcm_ipaddr_context eIpAddrContext);
-
-static void apply_phs_rule_to_all_classifiers(
- register struct bcm_mini_adapter *Adapter,
- register UINT uiSearchRuleIndex,
- USHORT uVCID,
- struct bcm_phs_rule *sPhsRule,
- struct bcm_phs_rules *cPhsRule,
- struct bcm_add_indication_alt *pstAddIndication);
-
-/************************************************************
- * Function - SearchSfid
- *
- * Description - This routinue would search QOS queues having
- * specified SFID as input parameter.
- *
- * Parameters - Adapter: Pointer to the Adapter structure
- * uiSfid : Given SFID for matching
- *
- * Returns - Queue index for this SFID(If matched)
- * Else Invalid Queue Index(If Not matched)
- ************************************************************/
-int SearchSfid(struct bcm_mini_adapter *Adapter, UINT uiSfid)
-{
- int i;
-
- for (i = (NO_OF_QUEUES-1); i >= 0; i--)
- if (Adapter->PackInfo[i].ulSFID == uiSfid)
- return i;
-
- return NO_OF_QUEUES+1;
-}
-
-/***************************************************************
- * Function -SearchFreeSfid
- *
- * Description - This routinue would search Free available SFID.
- *
- * Parameter - Adapter: Pointer to the Adapter structure
- *
- * Returns - Queue index for the free SFID
- * Else returns Invalid Index.
- ****************************************************************/
-static int SearchFreeSfid(struct bcm_mini_adapter *Adapter)
-{
- int i;
-
- for (i = 0; i < (NO_OF_QUEUES-1); i++)
- if (Adapter->PackInfo[i].ulSFID == 0)
- return i;
-
- return NO_OF_QUEUES+1;
-}
-
-/*
- * Function: SearchClsid
- * Description: This routinue would search Classifier having specified ClassifierID as input parameter
- * Input parameters: struct bcm_mini_adapter *Adapter - Adapter Context
- * unsigned int uiSfid - The SF in which the classifier is to searched
- * B_UINT16 uiClassifierID - The classifier ID to be searched
- * Return: int :Classifier table index of matching entry
- */
-static int SearchClsid(struct bcm_mini_adapter *Adapter,
- ULONG ulSFID,
- B_UINT16 uiClassifierID)
-{
- int i;
-
- for (i = 0; i < MAX_CLASSIFIERS; i++) {
- if ((Adapter->astClassifierTable[i].bUsed) &&
- (Adapter->astClassifierTable[i].uiClassifierRuleIndex
- == uiClassifierID) &&
- (Adapter->astClassifierTable[i].ulSFID == ulSFID))
- return i;
- }
-
- return MAX_CLASSIFIERS+1;
-}
-
-/*
- * @ingroup ctrl_pkt_functions
- * This routinue would search Free available Classifier entry in classifier table.
- * @return free Classifier Entry index in classifier table for specified SF
- */
-static int SearchFreeClsid(struct bcm_mini_adapter *Adapter /**Adapter Context*/)
-{
- int i;
-
- for (i = 0; i < MAX_CLASSIFIERS; i++) {
- if (!Adapter->astClassifierTable[i].bUsed)
- return i;
- }
-
- return MAX_CLASSIFIERS+1;
-}
-
-static VOID deleteSFBySfid(struct bcm_mini_adapter *Adapter,
- UINT uiSearchRuleIndex)
-{
- /* deleting all the packet held in the SF */
- flush_queue(Adapter, uiSearchRuleIndex);
-
- /* Deleting the all classifiers for this SF */
- DeleteAllClassifiersForSF(Adapter, uiSearchRuleIndex);
-
- /* Resetting only MIBS related entries in the SF */
- memset((PVOID)&Adapter->PackInfo[uiSearchRuleIndex], 0,
- sizeof(struct bcm_mibs_table));
-}
-
-static inline VOID
-CopyIpAddrToClassifier(struct bcm_classifier_rule *pstClassifierEntry,
- B_UINT8 u8IpAddressLen, B_UINT8 *pu8IpAddressMaskSrc,
- bool bIpVersion6, enum bcm_ipaddr_context eIpAddrContext)
-{
- int i = 0;
- UINT nSizeOfIPAddressInBytes = IP_LENGTH_OF_ADDRESS;
- UCHAR *ptrClassifierIpAddress = NULL;
- UCHAR *ptrClassifierIpMask = NULL;
- struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
-
- if (bIpVersion6)
- nSizeOfIPAddressInBytes = IPV6_ADDRESS_SIZEINBYTES;
-
- /* Destination Ip Address */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "Ip Address Range Length:0x%X ", u8IpAddressLen);
- if ((bIpVersion6 ? (IPV6_ADDRESS_SIZEINBYTES * MAX_IP_RANGE_LENGTH * 2) :
- (TOTAL_MASKED_ADDRESS_IN_BYTES)) >= u8IpAddressLen) {
-
- union u_ip_address *st_dest_ip =
- &pstClassifierEntry->stDestIpAddress;
-
- union u_ip_address *st_src_ip =
- &pstClassifierEntry->stSrcIpAddress;
-
- /*
- * checking both the mask and address togethor in Classification.
- * So length will be : TotalLengthInBytes/nSizeOfIPAddressInBytes * 2
- * (nSizeOfIPAddressInBytes for address and nSizeOfIPAddressInBytes for mask)
- */
- if (eIpAddrContext == eDestIpAddress) {
- pstClassifierEntry->ucIPDestinationAddressLength =
- u8IpAddressLen/(nSizeOfIPAddressInBytes * 2);
- if (bIpVersion6) {
- ptrClassifierIpAddress =
- st_dest_ip->ucIpv6Address;
- ptrClassifierIpMask =
- st_dest_ip->ucIpv6Mask;
- } else {
- ptrClassifierIpAddress =
- st_dest_ip->ucIpv4Address;
- ptrClassifierIpMask =
- st_dest_ip->ucIpv4Mask;
- }
- } else if (eIpAddrContext == eSrcIpAddress) {
- pstClassifierEntry->ucIPSourceAddressLength =
- u8IpAddressLen/(nSizeOfIPAddressInBytes * 2);
- if (bIpVersion6) {
- ptrClassifierIpAddress =
- st_src_ip->ucIpv6Address;
- ptrClassifierIpMask = st_src_ip->ucIpv6Mask;
- } else {
- ptrClassifierIpAddress =
- st_src_ip->ucIpv4Address;
- ptrClassifierIpMask = st_src_ip->ucIpv4Mask;
- }
- }
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "Address Length:0x%X\n",
- pstClassifierEntry->ucIPDestinationAddressLength);
- while ((u8IpAddressLen >= nSizeOfIPAddressInBytes)
- && (i < MAX_IP_RANGE_LENGTH)) {
- memcpy(ptrClassifierIpAddress +
- (i * nSizeOfIPAddressInBytes),
- (pu8IpAddressMaskSrc
- + (i * nSizeOfIPAddressInBytes * 2)),
- nSizeOfIPAddressInBytes);
-
- if (!bIpVersion6) {
- if (eIpAddrContext == eSrcIpAddress) {
- st_src_ip->ulIpv4Addr[i] =
- ntohl(st_src_ip->ulIpv4Addr[i]);
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_OTHERS,
- CONN_MSG,
- DBG_LVL_ALL,
- "Src Ip Address:0x%luX ",
- st_src_ip->ulIpv4Addr[i]);
- } else if (eIpAddrContext == eDestIpAddress) {
- st_dest_ip->ulIpv4Addr[i] =
- ntohl(st_dest_ip->ulIpv4Addr[i]);
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_OTHERS,
- CONN_MSG,
- DBG_LVL_ALL,
- "Dest Ip Address:0x%luX ",
- st_dest_ip->ulIpv4Addr[i]);
- }
- }
- u8IpAddressLen -= nSizeOfIPAddressInBytes;
- if (u8IpAddressLen >= nSizeOfIPAddressInBytes) {
- memcpy(ptrClassifierIpMask +
- (i * nSizeOfIPAddressInBytes),
- (pu8IpAddressMaskSrc
- + nSizeOfIPAddressInBytes
- + (i * nSizeOfIPAddressInBytes * 2)),
- nSizeOfIPAddressInBytes);
-
- if (!bIpVersion6) {
- if (eIpAddrContext == eSrcIpAddress) {
- st_src_ip->ulIpv4Mask[i] =
- ntohl(st_src_ip->ulIpv4Mask[i]);
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_OTHERS,
- CONN_MSG,
- DBG_LVL_ALL,
- "Src Ip Mask Address:0x%luX ",
- st_src_ip->ulIpv4Mask[i]);
- } else if (eIpAddrContext == eDestIpAddress) {
- st_dest_ip->ulIpv4Mask[i] =
- ntohl(st_dest_ip->ulIpv4Mask[i]);
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_OTHERS,
- CONN_MSG,
- DBG_LVL_ALL,
- "Dest Ip Mask Address:0x%luX ",
- st_dest_ip->ulIpv4Mask[i]);
- }
- }
- u8IpAddressLen -= nSizeOfIPAddressInBytes;
- }
- if (u8IpAddressLen == 0)
- pstClassifierEntry->bDestIpValid = TRUE;
-
- i++;
- }
- if (bIpVersion6) {
- /* Restore EndianNess of Struct */
- restore_endianess_of_pstClassifierEntry(
- pstClassifierEntry,
- eIpAddrContext
- );
- }
- }
-}
-
-void ClearTargetDSXBuffer(struct bcm_mini_adapter *Adapter, B_UINT16 TID, bool bFreeAll)
-{
- int i;
- struct bcm_targetdsx_buffer *curr_buf;
-
- for (i = 0; i < Adapter->ulTotalTargetBuffersAvailable; i++) {
- curr_buf = &Adapter->astTargetDsxBuffer[i];
-
- if (curr_buf->valid)
- continue;
-
- if ((bFreeAll) || (curr_buf->tid == TID)) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
- "ClearTargetDSXBuffer: found tid %d buffer cleared %lx\n",
- TID, curr_buf->ulTargetDsxBuffer);
- curr_buf->valid = 1;
- curr_buf->tid = 0;
- Adapter->ulFreeTargetBufferCnt++;
- }
- }
-}
-
-/*
- * @ingroup ctrl_pkt_functions
- * copy classifier rule into the specified SF index
- */
-static inline VOID CopyClassifierRuleToSF(struct bcm_mini_adapter *Adapter,
- struct bcm_convergence_types *psfCSType,
- UINT uiSearchRuleIndex,
- UINT nClassifierIndex)
-{
- struct bcm_classifier_rule *pstClassifierEntry = NULL;
- /* VOID *pvPhsContext = NULL; */
- int i;
- /* UCHAR ucProtocolLength=0; */
- /* ULONG ulPhsStatus; */
-
- struct bcm_packet_class_rules *pack_class_rule =
- &psfCSType->cCPacketClassificationRule;
-
- if (Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value == 0 ||
- nClassifierIndex > (MAX_CLASSIFIERS-1))
- return;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "Storing Classifier Rule Index : %X",
- ntohs(pack_class_rule->u16PacketClassificationRuleIndex));
-
- if (nClassifierIndex > MAX_CLASSIFIERS-1)
- return;
-
- pstClassifierEntry = &Adapter->astClassifierTable[nClassifierIndex];
- if (pstClassifierEntry) {
- /* Store if Ipv6 */
- pstClassifierEntry->bIpv6Protocol =
- (Adapter->PackInfo[uiSearchRuleIndex].ucIpVersion == IPV6) ? TRUE : false;
-
- /* Destinaiton Port */
- pstClassifierEntry->ucDestPortRangeLength =
- pack_class_rule->u8ProtocolDestPortRangeLength / 4;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "Destination Port Range Length:0x%X ",
- pstClassifierEntry->ucDestPortRangeLength);
-
- if (pack_class_rule->u8ProtocolDestPortRangeLength <= MAX_PORT_RANGE) {
- for (i = 0; i < (pstClassifierEntry->ucDestPortRangeLength); i++) {
- pstClassifierEntry->usDestPortRangeLo[i] =
- *((PUSHORT)(pack_class_rule->u8ProtocolDestPortRange+i));
- pstClassifierEntry->usDestPortRangeHi[i] =
- *((PUSHORT)(pack_class_rule->u8ProtocolDestPortRange+2+i));
- pstClassifierEntry->usDestPortRangeLo[i] =
- ntohs(pstClassifierEntry->usDestPortRangeLo[i]);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
- CONN_MSG, DBG_LVL_ALL,
- "Destination Port Range Lo:0x%X ",
- pstClassifierEntry->usDestPortRangeLo[i]);
- pstClassifierEntry->usDestPortRangeHi[i] =
- ntohs(pstClassifierEntry->usDestPortRangeHi[i]);
- }
- } else {
- pstClassifierEntry->ucDestPortRangeLength = 0;
- }
-
- /* Source Port */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "Source Port Range Length:0x%X ",
- pack_class_rule->u8ProtocolSourcePortRangeLength);
- if (pack_class_rule->u8ProtocolSourcePortRangeLength <= MAX_PORT_RANGE) {
- pstClassifierEntry->ucSrcPortRangeLength =
- pack_class_rule->u8ProtocolSourcePortRangeLength/4;
- for (i = 0; i < (pstClassifierEntry->ucSrcPortRangeLength); i++) {
- pstClassifierEntry->usSrcPortRangeLo[i] =
- *((PUSHORT)(pack_class_rule->
- u8ProtocolSourcePortRange+i));
- pstClassifierEntry->usSrcPortRangeHi[i] =
- *((PUSHORT)(pack_class_rule->
- u8ProtocolSourcePortRange+2+i));
- pstClassifierEntry->usSrcPortRangeLo[i] =
- ntohs(pstClassifierEntry->usSrcPortRangeLo[i]);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
- CONN_MSG, DBG_LVL_ALL,
- "Source Port Range Lo:0x%X ",
- pstClassifierEntry->usSrcPortRangeLo[i]);
- pstClassifierEntry->usSrcPortRangeHi[i] =
- ntohs(pstClassifierEntry->usSrcPortRangeHi[i]);
- }
- }
- /* Destination Ip Address and Mask */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "Ip Destination Parameters : ");
- CopyIpAddrToClassifier(pstClassifierEntry,
- pack_class_rule->u8IPDestinationAddressLength,
- pack_class_rule->u8IPDestinationAddress,
- (Adapter->PackInfo[uiSearchRuleIndex].ucIpVersion == IPV6) ?
- TRUE : false, eDestIpAddress);
-
- /* Source Ip Address and Mask */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "Ip Source Parameters : ");
-
- CopyIpAddrToClassifier(pstClassifierEntry,
- pack_class_rule->u8IPMaskedSourceAddressLength,
- pack_class_rule->u8IPMaskedSourceAddress,
- (Adapter->PackInfo[uiSearchRuleIndex].ucIpVersion == IPV6) ? TRUE : false,
- eSrcIpAddress);
-
- /* TOS */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "TOS Length:0x%X ",
- pack_class_rule->u8IPTypeOfServiceLength);
- if (pack_class_rule->u8IPTypeOfServiceLength == 3) {
- pstClassifierEntry->ucIPTypeOfServiceLength =
- pack_class_rule->u8IPTypeOfServiceLength;
- pstClassifierEntry->ucTosLow =
- pack_class_rule->u8IPTypeOfService[0];
- pstClassifierEntry->ucTosHigh =
- pack_class_rule->u8IPTypeOfService[1];
- pstClassifierEntry->ucTosMask =
- pack_class_rule->u8IPTypeOfService[2];
- pstClassifierEntry->bTOSValid = TRUE;
- }
- if (pack_class_rule->u8Protocol == 0) {
- /* we didn't get protocol field filled in by the BS */
- pstClassifierEntry->ucProtocolLength = 0;
- } else {
- pstClassifierEntry->ucProtocolLength = 1; /* 1 valid protocol */
- }
-
- pstClassifierEntry->ucProtocol[0] = pack_class_rule->u8Protocol;
- pstClassifierEntry->u8ClassifierRulePriority =
- pack_class_rule->u8ClassifierRulePriority;
-
- /* store the classifier rule ID and set this classifier entry as valid */
- pstClassifierEntry->ucDirection =
- Adapter->PackInfo[uiSearchRuleIndex].ucDirection;
- pstClassifierEntry->uiClassifierRuleIndex =
- ntohs(pack_class_rule->u16PacketClassificationRuleIndex);
- pstClassifierEntry->usVCID_Value =
- Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value;
- pstClassifierEntry->ulSFID =
- Adapter->PackInfo[uiSearchRuleIndex].ulSFID;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "Search Index %d Dir: %d, Index: %d, Vcid: %d\n",
- uiSearchRuleIndex,
- pstClassifierEntry->ucDirection,
- pstClassifierEntry->uiClassifierRuleIndex,
- pstClassifierEntry->usVCID_Value);
-
- if (pack_class_rule->u8AssociatedPHSI)
- pstClassifierEntry->u8AssociatedPHSI =
- pack_class_rule->u8AssociatedPHSI;
-
- /* Copy ETH CS Parameters */
- pstClassifierEntry->ucEthCSSrcMACLen =
- (pack_class_rule->u8EthernetSourceMACAddressLength);
- memcpy(pstClassifierEntry->au8EThCSSrcMAC,
- pack_class_rule->u8EthernetSourceMACAddress,
- MAC_ADDRESS_SIZE);
- memcpy(pstClassifierEntry->au8EThCSSrcMACMask,
- pack_class_rule->u8EthernetSourceMACAddress
- + MAC_ADDRESS_SIZE, MAC_ADDRESS_SIZE);
- pstClassifierEntry->ucEthCSDestMACLen =
- (pack_class_rule->u8EthernetDestMacAddressLength);
- memcpy(pstClassifierEntry->au8EThCSDestMAC,
- pack_class_rule->u8EthernetDestMacAddress,
- MAC_ADDRESS_SIZE);
- memcpy(pstClassifierEntry->au8EThCSDestMACMask,
- pack_class_rule->u8EthernetDestMacAddress
- + MAC_ADDRESS_SIZE, MAC_ADDRESS_SIZE);
- pstClassifierEntry->ucEtherTypeLen =
- (pack_class_rule->u8EthertypeLength);
- memcpy(pstClassifierEntry->au8EthCSEtherType,
- pack_class_rule->u8Ethertype,
- NUM_ETHERTYPE_BYTES);
- memcpy(pstClassifierEntry->usUserPriority,
- &pack_class_rule->u16UserPriority, 2);
- pstClassifierEntry->usVLANID =
- ntohs(pack_class_rule->u16VLANID);
- pstClassifierEntry->usValidityBitMap =
- ntohs(pack_class_rule->u16ValidityBitMap);
-
- pstClassifierEntry->bUsed = TRUE;
- }
-}
-
-/*
- * @ingroup ctrl_pkt_functions
- */
-static inline VOID DeleteClassifierRuleFromSF(struct bcm_mini_adapter *Adapter,
- UINT uiSearchRuleIndex, UINT nClassifierIndex)
-{
- struct bcm_classifier_rule *pstClassifierEntry = NULL;
- B_UINT16 u16PacketClassificationRuleIndex;
- USHORT usVCID;
- /* VOID *pvPhsContext = NULL; */
- /*ULONG ulPhsStatus; */
-
- usVCID = Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value;
-
- if (nClassifierIndex > MAX_CLASSIFIERS-1)
- return;
-
- if (usVCID == 0)
- return;
-
- u16PacketClassificationRuleIndex =
- Adapter->astClassifierTable[nClassifierIndex].uiClassifierRuleIndex;
- pstClassifierEntry = &Adapter->astClassifierTable[nClassifierIndex];
- if (pstClassifierEntry) {
- pstClassifierEntry->bUsed = false;
- pstClassifierEntry->uiClassifierRuleIndex = 0;
- memset(pstClassifierEntry, 0,
- sizeof(struct bcm_classifier_rule));
-
- /* Delete the PHS Rule for this classifier */
- PhsDeleteClassifierRule(&Adapter->stBCMPhsContext, usVCID,
- u16PacketClassificationRuleIndex);
- }
-}
-
-/*
- * @ingroup ctrl_pkt_functions
- */
-VOID DeleteAllClassifiersForSF(struct bcm_mini_adapter *Adapter,
- UINT uiSearchRuleIndex)
-{
- struct bcm_classifier_rule *pstClassifierEntry = NULL;
- int i;
- /* B_UINT16 u16PacketClassificationRuleIndex; */
- USHORT ulVCID;
- /* VOID *pvPhsContext = NULL; */
- /* ULONG ulPhsStatus; */
-
- ulVCID = Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value;
-
- if (ulVCID == 0)
- return;
-
- for (i = 0; i < MAX_CLASSIFIERS; i++) {
- if (Adapter->astClassifierTable[i].usVCID_Value == ulVCID) {
- pstClassifierEntry = &Adapter->astClassifierTable[i];
-
- if (pstClassifierEntry->bUsed)
- DeleteClassifierRuleFromSF(Adapter,
- uiSearchRuleIndex, i);
- }
- }
-
- /* Delete All Phs Rules Associated with this SF */
- PhsDeleteSFRules(&Adapter->stBCMPhsContext, ulVCID);
-}
-
-/*
- * This routinue copies the Connection Management
- * related data into the Adapter structure.
- * @ingroup ctrl_pkt_functions
- */
-static VOID CopyToAdapter(register struct bcm_mini_adapter *Adapter, /* <Pointer to the Adapter structure */
- register struct bcm_connect_mgr_params *psfLocalSet, /* Pointer to the connection manager parameters structure */
- register UINT uiSearchRuleIndex, /* <Index of Queue, to which this data belongs */
- register UCHAR ucDsxType,
- struct bcm_add_indication_alt *pstAddIndication) {
-
- /* UCHAR ucProtocolLength = 0; */
- ULONG ulSFID;
- UINT nClassifierIndex = 0;
- enum E_CLASSIFIER_ACTION eClassifierAction = eInvalidClassifierAction;
- B_UINT16 u16PacketClassificationRuleIndex = 0;
- int i;
- struct bcm_convergence_types *psfCSType = NULL;
- struct bcm_phs_rule sPhsRule;
- struct bcm_packet_info *curr_packinfo =
- &Adapter->PackInfo[uiSearchRuleIndex];
- USHORT uVCID = curr_packinfo->usVCID_Value;
- UINT UGIValue = 0;
-
- curr_packinfo->bValid = TRUE;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "Search Rule Index = %d\n", uiSearchRuleIndex);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "%s: SFID= %x ", __func__, ntohl(psfLocalSet->u32SFID));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "Updating Queue %d", uiSearchRuleIndex);
-
- ulSFID = ntohl(psfLocalSet->u32SFID);
- /* Store IP Version used */
- /* Get The Version Of IP used (IPv6 or IPv4) from CSSpecification field of SF */
-
- curr_packinfo->bIPCSSupport = 0;
- curr_packinfo->bEthCSSupport = 0;
-
- /* Enable IP/ETh CS Support As Required */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "CopyToAdapter : u8CSSpecification : %X\n",
- psfLocalSet->u8CSSpecification);
- switch (psfLocalSet->u8CSSpecification) {
- case eCSPacketIPV4:
- curr_packinfo->bIPCSSupport = IPV4_CS;
- break;
- case eCSPacketIPV6:
- curr_packinfo->bIPCSSupport = IPV6_CS;
- break;
- case eCS802_3PacketEthernet:
- case eCS802_1QPacketVLAN:
- curr_packinfo->bEthCSSupport = ETH_CS_802_3;
- break;
- case eCSPacketIPV4Over802_1QVLAN:
- case eCSPacketIPV4Over802_3Ethernet:
- curr_packinfo->bIPCSSupport = IPV4_CS;
- curr_packinfo->bEthCSSupport = ETH_CS_802_3;
- break;
- case eCSPacketIPV6Over802_1QVLAN:
- case eCSPacketIPV6Over802_3Ethernet:
- curr_packinfo->bIPCSSupport = IPV6_CS;
- curr_packinfo->bEthCSSupport = ETH_CS_802_3;
- break;
- default:
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "Error in value of CS Classification.. setting default to IP CS\n");
- curr_packinfo->bIPCSSupport = IPV4_CS;
- break;
- }
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "CopyToAdapter : Queue No : %X ETH CS Support : %X , IP CS Support : %X\n",
- uiSearchRuleIndex,
- curr_packinfo->bEthCSSupport,
- curr_packinfo->bIPCSSupport);
-
- /* Store IP Version used */
- /* Get The Version Of IP used (IPv6 or IPv4) from CSSpecification field of SF */
- if (curr_packinfo->bIPCSSupport == IPV6_CS)
- curr_packinfo->ucIpVersion = IPV6;
- else
- curr_packinfo->ucIpVersion = IPV4;
-
- /* To ensure that the ETH CS code doesn't gets executed if the BS doesn't supports ETH CS */
- if (!Adapter->bETHCSEnabled)
- curr_packinfo->bEthCSSupport = 0;
-
- if (psfLocalSet->u8ServiceClassNameLength > 0 && psfLocalSet->u8ServiceClassNameLength < 32)
- memcpy(curr_packinfo->ucServiceClassName,
- psfLocalSet->u8ServiceClassName,
- psfLocalSet->u8ServiceClassNameLength);
-
- curr_packinfo->u8QueueType = psfLocalSet->u8ServiceFlowSchedulingType;
-
- if (curr_packinfo->u8QueueType == BE && curr_packinfo->ucDirection)
- Adapter->usBestEffortQueueIndex = uiSearchRuleIndex;
-
- curr_packinfo->ulSFID = ntohl(psfLocalSet->u32SFID);
-
- curr_packinfo->u8TrafficPriority = psfLocalSet->u8TrafficPriority;
-
- /* copy all the classifier in the Service Flow param structure */
- for (i = 0; i < psfLocalSet->u8TotalClassifiers; i++) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "Classifier index =%d", i);
- psfCSType = &psfLocalSet->cConvergenceSLTypes[i];
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "Classifier index =%d", i);
-
- if (psfCSType->cCPacketClassificationRule.u8ClassifierRulePriority)
- curr_packinfo->bClassifierPriority = TRUE;
-
- if (psfCSType->cCPacketClassificationRule.u8ClassifierRulePriority)
- curr_packinfo->bClassifierPriority = TRUE;
-
- if (ucDsxType == DSA_ACK) {
- eClassifierAction = eAddClassifier;
- } else if (ucDsxType == DSC_ACK) {
- switch (psfCSType->u8ClassfierDSCAction) {
- case 0: /* DSC Add Classifier */
- eClassifierAction = eAddClassifier;
- break;
- case 1: /* DSC Replace Classifier */
- eClassifierAction = eReplaceClassifier;
- break;
- case 2: /* DSC Delete Classifier */
- eClassifierAction = eDeleteClassifier;
- break;
- default:
- eClassifierAction = eInvalidClassifierAction;
- }
- }
-
- u16PacketClassificationRuleIndex = ntohs(psfCSType->cCPacketClassificationRule.u16PacketClassificationRuleIndex);
-
- switch (eClassifierAction) {
- case eAddClassifier:
- /* Get a Free Classifier Index From Classifier table for this SF to add the Classifier */
- /* Contained in this message */
- nClassifierIndex = SearchClsid(Adapter,
- ulSFID,
- u16PacketClassificationRuleIndex);
-
- if (nClassifierIndex > MAX_CLASSIFIERS) {
- nClassifierIndex = SearchFreeClsid(Adapter);
- if (nClassifierIndex > MAX_CLASSIFIERS) {
- /* Failed To get a free Entry */
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_OTHERS,
- CONN_MSG,
- DBG_LVL_ALL,
- "Error Failed To get a free Classifier Entry");
- break;
- }
- /* Copy the Classifier Rule for this service flow into our Classifier table maintained per SF. */
- CopyClassifierRuleToSF(Adapter, psfCSType,
- uiSearchRuleIndex,
- nClassifierIndex);
- } else {
- /* This Classifier Already Exists and it is invalid to Add Classifier with existing PCRI */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
- CONN_MSG,
- DBG_LVL_ALL,
- "CopyToAdapter: Error The Specified Classifier Already Exists and attempted To Add Classifier with Same PCRI : 0x%x\n",
- u16PacketClassificationRuleIndex);
- }
- break;
- case eReplaceClassifier:
- /* Get the Classifier Index From Classifier table for this SF and replace existing Classifier */
- /* with the new classifier Contained in this message */
- nClassifierIndex = SearchClsid(Adapter, ulSFID,
- u16PacketClassificationRuleIndex);
- if (nClassifierIndex > MAX_CLASSIFIERS) {
- /* Failed To search the classifier */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
- CONN_MSG, DBG_LVL_ALL,
- "Error Search for Classifier To be replaced failed");
- break;
- }
- /* Copy the Classifier Rule for this service flow into our Classifier table maintained per SF. */
- CopyClassifierRuleToSF(Adapter, psfCSType,
- uiSearchRuleIndex, nClassifierIndex);
- break;
- case eDeleteClassifier:
- /* Get the Classifier Index From Classifier table for this SF and replace existing Classifier */
- /* with the new classifier Contained in this message */
- nClassifierIndex = SearchClsid(Adapter, ulSFID,
- u16PacketClassificationRuleIndex);
- if (nClassifierIndex > MAX_CLASSIFIERS) {
- /* Failed To search the classifier */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
- CONN_MSG, DBG_LVL_ALL,
- "Error Search for Classifier To be deleted failed");
- break;
- }
-
- /* Delete This classifier */
- DeleteClassifierRuleFromSF(Adapter, uiSearchRuleIndex,
- nClassifierIndex);
- break;
- default:
- /* Invalid Action for classifier */
- break;
- }
- }
-
- /* Repeat parsing Classification Entries to process PHS Rules */
- for (i = 0; i < psfLocalSet->u8TotalClassifiers; i++) {
- psfCSType = &psfLocalSet->cConvergenceSLTypes[i];
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "psfCSType->u8PhsDSCAction : 0x%x\n",
- psfCSType->u8PhsDSCAction);
-
- switch (psfCSType->u8PhsDSCAction) {
- case eDeleteAllPHSRules:
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG,
- DBG_LVL_ALL,
- "Deleting All PHS Rules For VCID: 0x%X\n",
- uVCID);
-
- /* Delete All the PHS rules for this Service flow */
- PhsDeleteSFRules(&Adapter->stBCMPhsContext, uVCID);
- break;
- case eDeletePHSRule:
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG,
- DBG_LVL_ALL,
- "PHS DSC Action = Delete PHS Rule\n");
-
- if (psfCSType->cPhsRule.u8PHSI)
- PhsDeletePHSRule(&Adapter->stBCMPhsContext,
- uVCID,
- psfCSType->cCPacketClassificationRule.u8AssociatedPHSI);
-
- break;
- default:
- if (ucDsxType == DSC_ACK) {
- /* BCM_DEBUG_PRINT(CONN_MSG,("Invalid PHS DSC Action For DSC\n",psfCSType->cPhsRule.u8PHSI)); */
- break; /* FOr DSC ACK Case PHS DSC Action must be in valid set */
- }
- /* Proceed To Add PHS rule for DSA_ACK case even if PHS DSC action is unspecified */
- /* No Break Here . Intentionally! */
-
- case eAddPHSRule:
- case eSetPHSRule:
- if (psfCSType->cPhsRule.u8PHSI) {
- /* Apply This PHS Rule to all classifiers whose Associated PHSI Match */
- apply_phs_rule_to_all_classifiers(Adapter,
- uiSearchRuleIndex,
- uVCID,
- &sPhsRule,
- &psfCSType->cPhsRule,
- pstAddIndication);
- }
- break;
- }
- }
-
- if (psfLocalSet->u32MaxSustainedTrafficRate == 0) {
- /* No Rate Limit . Set Max Sustained Traffic Rate to Maximum */
- curr_packinfo->uiMaxAllowedRate = WIMAX_MAX_ALLOWED_RATE;
- } else if (ntohl(psfLocalSet->u32MaxSustainedTrafficRate) > WIMAX_MAX_ALLOWED_RATE) {
- /* Too large Allowed Rate specified. Limiting to Wi Max Allowed rate */
- curr_packinfo->uiMaxAllowedRate = WIMAX_MAX_ALLOWED_RATE;
- } else {
- curr_packinfo->uiMaxAllowedRate =
- ntohl(psfLocalSet->u32MaxSustainedTrafficRate);
- }
-
- curr_packinfo->uiMaxLatency = ntohl(psfLocalSet->u32MaximumLatency);
- if (curr_packinfo->uiMaxLatency == 0) /* 0 should be treated as infinite */
- curr_packinfo->uiMaxLatency = MAX_LATENCY_ALLOWED;
-
- if ((curr_packinfo->u8QueueType == ERTPS ||
- curr_packinfo->u8QueueType == UGS))
- UGIValue = ntohs(psfLocalSet->u16UnsolicitedGrantInterval);
-
- if (UGIValue == 0)
- UGIValue = DEFAULT_UG_INTERVAL;
-
- /*
- * For UGI based connections...
- * DEFAULT_UGI_FACTOR*UGIInterval worth of data is the max token count at host...
- * The extra amount of token is to ensure that a large amount of jitter won't have loss in throughput...
- * In case of non-UGI based connection, 200 frames worth of data is the max token count at host...
- */
- curr_packinfo->uiMaxBucketSize =
- (DEFAULT_UGI_FACTOR*curr_packinfo->uiMaxAllowedRate*UGIValue)/1000;
-
- if (curr_packinfo->uiMaxBucketSize < WIMAX_MAX_MTU*8) {
- UINT UGIFactor = 0;
- /* Special Handling to ensure the biggest size of packet can go out from host to FW as follows:
- * 1. Any packet from Host to FW can go out in different packet size.
- * 2. So in case the Bucket count is smaller than MTU, the packets of size (Size > TokenCount), will get dropped.
- * 3. We can allow packets of MaxSize from Host->FW that can go out from FW in multiple SDUs by fragmentation at Wimax Layer
- */
- UGIFactor = (curr_packinfo->uiMaxLatency/UGIValue + 1);
-
- if (UGIFactor > DEFAULT_UGI_FACTOR)
- curr_packinfo->uiMaxBucketSize =
- (UGIFactor*curr_packinfo->uiMaxAllowedRate*UGIValue)/1000;
-
- if (curr_packinfo->uiMaxBucketSize > WIMAX_MAX_MTU*8)
- curr_packinfo->uiMaxBucketSize = WIMAX_MAX_MTU*8;
- }
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "LAT: %d, UGI: %d\n", curr_packinfo->uiMaxLatency,
- UGIValue);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "uiMaxAllowedRate: 0x%x, u32MaxSustainedTrafficRate: 0x%x ,uiMaxBucketSize: 0x%x",
- curr_packinfo->uiMaxAllowedRate,
- ntohl(psfLocalSet->u32MaxSustainedTrafficRate),
- curr_packinfo->uiMaxBucketSize);
-
- /* copy the extended SF Parameters to Support MIBS */
- CopyMIBSExtendedSFParameters(Adapter, psfLocalSet, uiSearchRuleIndex);
-
- /* store header suppression enabled flag per SF */
- curr_packinfo->bHeaderSuppressionEnabled =
- !(psfLocalSet->u8RequesttransmissionPolicy &
- MASK_DISABLE_HEADER_SUPPRESSION);
-
- kfree(curr_packinfo->pstSFIndication);
- curr_packinfo->pstSFIndication = pstAddIndication;
-
- /* Re Sort the SF list in PackInfo according to Traffic Priority */
- SortPackInfo(Adapter);
-
- /* Re Sort the Classifier Rules table and re - arrange
- * according to Classifier Rule Priority
- */
- SortClassifiers(Adapter);
- DumpPhsRules(&Adapter->stBCMPhsContext);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "%s <=====", __func__);
-}
-
-/***********************************************************************
- * Function - DumpCmControlPacket
- *
- * Description - This routinue Dumps the Contents of the AddIndication
- * Structure in the Connection Management Control Packet
- *
- * Parameter - pvBuffer: Pointer to the buffer containing the
- * AddIndication data.
- *
- * Returns - None
- *************************************************************************/
-static VOID DumpCmControlPacket(PVOID pvBuffer)
-{
- int uiLoopIndex;
- int nIndex;
- struct bcm_add_indication_alt *pstAddIndication;
- UINT nCurClassifierCnt;
- struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
-
- pstAddIndication = pvBuffer;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "======>");
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8Type: 0x%X", pstAddIndication->u8Type);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8Direction: 0x%X", pstAddIndication->u8Direction);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16TID: 0x%X", ntohs(pstAddIndication->u16TID));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16CID: 0x%X", ntohs(pstAddIndication->u16CID));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16VCID: 0x%X", ntohs(pstAddIndication->u16VCID));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " AuthorizedSet--->");
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32SFID: 0x%X", htonl(pstAddIndication->sfAuthorizedSet.u32SFID));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16CID: 0x%X", htons(pstAddIndication->sfAuthorizedSet.u16CID));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceClassNameLength: 0x%X",
- pstAddIndication->sfAuthorizedSet.u8ServiceClassNameLength);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceClassName: 0x%X ,0x%X , 0x%X, 0x%X, 0x%X, 0x%X",
- pstAddIndication->sfAuthorizedSet.u8ServiceClassName[0],
- pstAddIndication->sfAuthorizedSet.u8ServiceClassName[1],
- pstAddIndication->sfAuthorizedSet.u8ServiceClassName[2],
- pstAddIndication->sfAuthorizedSet.u8ServiceClassName[3],
- pstAddIndication->sfAuthorizedSet.u8ServiceClassName[4],
- pstAddIndication->sfAuthorizedSet.u8ServiceClassName[5]);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8MBSService: 0x%X", pstAddIndication->sfAuthorizedSet.u8MBSService);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8QosParamSet: 0x%X", pstAddIndication->sfAuthorizedSet.u8QosParamSet);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TrafficPriority: 0x%X, %p",
- pstAddIndication->sfAuthorizedSet.u8TrafficPriority, &pstAddIndication->sfAuthorizedSet.u8TrafficPriority);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaxSustainedTrafficRate: 0x%X 0x%p",
- pstAddIndication->sfAuthorizedSet.u32MaxSustainedTrafficRate,
- &pstAddIndication->sfAuthorizedSet.u32MaxSustainedTrafficRate);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaxTrafficBurst: 0x%X", pstAddIndication->sfAuthorizedSet.u32MaxTrafficBurst);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MinReservedTrafficRate : 0x%X",
- pstAddIndication->sfAuthorizedSet.u32MinReservedTrafficRate);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificQoSParamLength: 0x%X",
- pstAddIndication->sfAuthorizedSet.u8VendorSpecificQoSParamLength);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificQoSParam: 0x%X",
- pstAddIndication->sfAuthorizedSet.u8VendorSpecificQoSParam[0]);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceFlowSchedulingType: 0x%X",
- pstAddIndication->sfAuthorizedSet.u8ServiceFlowSchedulingType);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32ToleratedJitter: 0x%X", pstAddIndication->sfAuthorizedSet.u32ToleratedJitter);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaximumLatency: 0x%X", pstAddIndication->sfAuthorizedSet.u32MaximumLatency);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8FixedLengthVSVariableLengthSDUIndicator: 0x%X",
- pstAddIndication->sfAuthorizedSet.u8FixedLengthVSVariableLengthSDUIndicator);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8SDUSize: 0x%X", pstAddIndication->sfAuthorizedSet.u8SDUSize);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16TargetSAID: 0x%X", pstAddIndication->sfAuthorizedSet.u16TargetSAID);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ARQEnable: 0x%X", pstAddIndication->sfAuthorizedSet.u8ARQEnable);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQWindowSize: 0x%X", pstAddIndication->sfAuthorizedSet.u16ARQWindowSize);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQRetryTxTimeOut: 0x%X", pstAddIndication->sfAuthorizedSet.u16ARQRetryTxTimeOut);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQRetryRxTimeOut: 0x%X", pstAddIndication->sfAuthorizedSet.u16ARQRetryRxTimeOut);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQBlockLifeTime: 0x%X", pstAddIndication->sfAuthorizedSet.u16ARQBlockLifeTime);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQSyncLossTimeOut: 0x%X", pstAddIndication->sfAuthorizedSet.u16ARQSyncLossTimeOut);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ARQDeliverInOrder: 0x%X", pstAddIndication->sfAuthorizedSet.u8ARQDeliverInOrder);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQRxPurgeTimeOut: 0x%X", pstAddIndication->sfAuthorizedSet.u16ARQRxPurgeTimeOut);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQBlockSize: 0x%X", pstAddIndication->sfAuthorizedSet.u16ARQBlockSize);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8CSSpecification: 0x%X", pstAddIndication->sfAuthorizedSet.u8CSSpecification);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TypeOfDataDeliveryService: 0x%X",
- pstAddIndication->sfAuthorizedSet.u8TypeOfDataDeliveryService);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16SDUInterArrivalTime: 0x%X", pstAddIndication->sfAuthorizedSet.u16SDUInterArrivalTime);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16TimeBase: 0x%X", pstAddIndication->sfAuthorizedSet.u16TimeBase);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8PagingPreference: 0x%X", pstAddIndication->sfAuthorizedSet.u8PagingPreference);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16UnsolicitedPollingInterval: 0x%X",
- pstAddIndication->sfAuthorizedSet.u16UnsolicitedPollingInterval);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "sfAuthorizedSet.u8HARQChannelMapping %x %x %x ",
- *(unsigned int *)pstAddIndication->sfAuthorizedSet.u8HARQChannelMapping,
- *(unsigned int *)&pstAddIndication->sfAuthorizedSet.u8HARQChannelMapping[4],
- *(USHORT *)&pstAddIndication->sfAuthorizedSet.u8HARQChannelMapping[8]);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TrafficIndicationPreference: 0x%X",
- pstAddIndication->sfAuthorizedSet.u8TrafficIndicationPreference);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " Total Classifiers Received: 0x%X", pstAddIndication->sfAuthorizedSet.u8TotalClassifiers);
-
- nCurClassifierCnt = pstAddIndication->sfAuthorizedSet.u8TotalClassifiers;
- if (nCurClassifierCnt > MAX_CLASSIFIERS_IN_SF)
- nCurClassifierCnt = MAX_CLASSIFIERS_IN_SF;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "pstAddIndication->sfAuthorizedSet.bValid %d", pstAddIndication->sfAuthorizedSet.bValid);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "pstAddIndication->sfAuthorizedSet.u16MacOverhead %x", pstAddIndication->sfAuthorizedSet.u16MacOverhead);
- if (!pstAddIndication->sfAuthorizedSet.bValid)
- pstAddIndication->sfAuthorizedSet.bValid = 1;
- for (nIndex = 0; nIndex < nCurClassifierCnt; nIndex++) {
- struct bcm_convergence_types *psfCSType = NULL;
-
- psfCSType = &pstAddIndication->sfAuthorizedSet.cConvergenceSLTypes[nIndex];
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "psfCSType = %p", psfCSType);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "CCPacketClassificationRuleSI====>");
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ClassifierRulePriority: 0x%X ",
- psfCSType->cCPacketClassificationRule.u8ClassifierRulePriority);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPTypeOfServiceLength: 0x%X ",
- psfCSType->cCPacketClassificationRule.u8IPTypeOfServiceLength);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPTypeOfService[3]: 0x%X ,0x%X ,0x%X ",
- psfCSType->cCPacketClassificationRule.u8IPTypeOfService[0],
- psfCSType->cCPacketClassificationRule.u8IPTypeOfService[1],
- psfCSType->cCPacketClassificationRule.u8IPTypeOfService[2]);
-
- for (uiLoopIndex = 0; uiLoopIndex < 1; uiLoopIndex++)
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8Protocol: 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8Protocol);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPMaskedSourceAddressLength: 0x%X ",
- psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddressLength);
-
- for (uiLoopIndex = 0; uiLoopIndex < 32; uiLoopIndex++)
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPMaskedSourceAddress[32]: 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddress[uiLoopIndex]);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPDestinationAddressLength: 0x%X ",
- psfCSType->cCPacketClassificationRule.u8IPDestinationAddressLength);
-
- for (uiLoopIndex = 0; uiLoopIndex < 32; uiLoopIndex++)
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPDestinationAddress[32]: 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8IPDestinationAddress[uiLoopIndex]);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolSourcePortRangeLength:0x%X ",
- psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRangeLength);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolSourcePortRange[4]: 0x%02X ,0x%02X ,0x%02X ,0x%02X ",
- psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[0],
- psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[1],
- psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[2],
- psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[3]);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolDestPortRangeLength: 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRangeLength);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolDestPortRange[4]: 0x%02X ,0x%02X ,0x%02X ,0x%02X ",
- psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[0],
- psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[1],
- psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[2],
- psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[3]);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetDestMacAddressLength: 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddressLength);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL,
- DBG_LVL_ALL, "u8EthernetDestMacAddress[6]: %pM",
- psfCSType->cCPacketClassificationRule.
- u8EthernetDestMacAddress);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetSourceMACAddressLength: 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddressLength);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL,
- DBG_LVL_ALL, "u8EthernetSourceMACAddress[6]: %pM",
- psfCSType->cCPacketClassificationRule.
- u8EthernetSourceMACAddress);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthertypeLength: 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8EthertypeLength);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8Ethertype[3]: 0x%02X ,0x%02X ,0x%02X ",
- psfCSType->cCPacketClassificationRule.u8Ethertype[0],
- psfCSType->cCPacketClassificationRule.u8Ethertype[1],
- psfCSType->cCPacketClassificationRule.u8Ethertype[2]);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16UserPriority: 0x%X ", psfCSType->cCPacketClassificationRule.u16UserPriority);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16VLANID: 0x%X ", psfCSType->cCPacketClassificationRule.u16VLANID);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8AssociatedPHSI: 0x%02X ", psfCSType->cCPacketClassificationRule.u8AssociatedPHSI);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16PacketClassificationRuleIndex: 0x%X ",
- psfCSType->cCPacketClassificationRule.u16PacketClassificationRuleIndex);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificClassifierParamLength: 0x%X ",
- psfCSType->cCPacketClassificationRule.u8VendorSpecificClassifierParamLength);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificClassifierParam[1]: 0x%X ",
- psfCSType->cCPacketClassificationRule.u8VendorSpecificClassifierParam[0]);
-#ifdef VERSION_D5
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPv6FlowLableLength: 0x%X ",
- psfCSType->cCPacketClassificationRule.u8IPv6FlowLableLength);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL,
- DBG_LVL_ALL, "u8IPv6FlowLable[6]: 0x%*ph ",
- 6, psfCSType->cCPacketClassificationRule.
- u8IPv6FlowLable);
-#endif
- }
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "bValid: 0x%02X", pstAddIndication->sfAuthorizedSet.bValid);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "AdmittedSet--->");
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32SFID: 0x%X", pstAddIndication->sfAdmittedSet.u32SFID);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16CID: 0x%X", pstAddIndication->sfAdmittedSet.u16CID);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceClassNameLength: 0x%X",
- pstAddIndication->sfAdmittedSet.u8ServiceClassNameLength);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL,
- "u8ServiceClassName: 0x%*ph",
- 6, pstAddIndication->sfAdmittedSet.u8ServiceClassName);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8MBSService: 0x%02X", pstAddIndication->sfAdmittedSet.u8MBSService);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8QosParamSet: 0x%02X", pstAddIndication->sfAdmittedSet.u8QosParamSet);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TrafficPriority: 0x%02X", pstAddIndication->sfAdmittedSet.u8TrafficPriority);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaxTrafficBurst: 0x%X", pstAddIndication->sfAdmittedSet.u32MaxTrafficBurst);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MinReservedTrafficRate: 0x%X",
- pstAddIndication->sfAdmittedSet.u32MinReservedTrafficRate);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificQoSParamLength: 0x%02X",
- pstAddIndication->sfAdmittedSet.u8VendorSpecificQoSParamLength);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificQoSParam: 0x%02X",
- pstAddIndication->sfAdmittedSet.u8VendorSpecificQoSParam[0]);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceFlowSchedulingType: 0x%02X",
- pstAddIndication->sfAdmittedSet.u8ServiceFlowSchedulingType);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32ToleratedJitter: 0x%X", pstAddIndication->sfAdmittedSet.u32ToleratedJitter);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaximumLatency: 0x%X", pstAddIndication->sfAdmittedSet.u32MaximumLatency);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8FixedLengthVSVariableLengthSDUIndicator: 0x%02X",
- pstAddIndication->sfAdmittedSet.u8FixedLengthVSVariableLengthSDUIndicator);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8SDUSize: 0x%02X", pstAddIndication->sfAdmittedSet.u8SDUSize);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16TargetSAID: 0x%02X", pstAddIndication->sfAdmittedSet.u16TargetSAID);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ARQEnable: 0x%02X", pstAddIndication->sfAdmittedSet.u8ARQEnable);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQWindowSize: 0x%X", pstAddIndication->sfAdmittedSet.u16ARQWindowSize);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQRetryTxTimeOut: 0x%X", pstAddIndication->sfAdmittedSet.u16ARQRetryTxTimeOut);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQRetryRxTimeOut: 0x%X", pstAddIndication->sfAdmittedSet.u16ARQRetryRxTimeOut);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQBlockLifeTime: 0x%X", pstAddIndication->sfAdmittedSet.u16ARQBlockLifeTime);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQSyncLossTimeOut: 0x%X", pstAddIndication->sfAdmittedSet.u16ARQSyncLossTimeOut);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ARQDeliverInOrder: 0x%02X", pstAddIndication->sfAdmittedSet.u8ARQDeliverInOrder);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQRxPurgeTimeOut: 0x%X", pstAddIndication->sfAdmittedSet.u16ARQRxPurgeTimeOut);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQBlockSize: 0x%X", pstAddIndication->sfAdmittedSet.u16ARQBlockSize);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8CSSpecification: 0x%02X", pstAddIndication->sfAdmittedSet.u8CSSpecification);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TypeOfDataDeliveryService: 0x%02X",
- pstAddIndication->sfAdmittedSet.u8TypeOfDataDeliveryService);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16SDUInterArrivalTime: 0x%X", pstAddIndication->sfAdmittedSet.u16SDUInterArrivalTime);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16TimeBase: 0x%X", pstAddIndication->sfAdmittedSet.u16TimeBase);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8PagingPreference: 0x%X", pstAddIndication->sfAdmittedSet.u8PagingPreference);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TrafficIndicationPreference: 0x%02X",
- pstAddIndication->sfAdmittedSet.u8TrafficIndicationPreference);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " Total Classifiers Received: 0x%X", pstAddIndication->sfAdmittedSet.u8TotalClassifiers);
-
- nCurClassifierCnt = pstAddIndication->sfAdmittedSet.u8TotalClassifiers;
- if (nCurClassifierCnt > MAX_CLASSIFIERS_IN_SF)
- nCurClassifierCnt = MAX_CLASSIFIERS_IN_SF;
-
- for (nIndex = 0; nIndex < nCurClassifierCnt; nIndex++) {
- struct bcm_convergence_types *psfCSType = NULL;
-
- psfCSType = &pstAddIndication->sfAdmittedSet.cConvergenceSLTypes[nIndex];
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " CCPacketClassificationRuleSI====>");
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ClassifierRulePriority: 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8ClassifierRulePriority);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPTypeOfServiceLength: 0x%02X",
- psfCSType->cCPacketClassificationRule.u8IPTypeOfServiceLength);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL,
- DBG_LVL_ALL, "u8IPTypeOfService[3]: 0x%*ph",
- 3, psfCSType->cCPacketClassificationRule.
- u8IPTypeOfService);
- for (uiLoopIndex = 0; uiLoopIndex < 1; uiLoopIndex++)
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8Protocol: 0x%02X ", psfCSType->cCPacketClassificationRule.u8Protocol);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPMaskedSourceAddressLength: 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddressLength);
-
- for (uiLoopIndex = 0; uiLoopIndex < 32; uiLoopIndex++)
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPMaskedSourceAddress[32]: 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddress[uiLoopIndex]);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPDestinationAddressLength: 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8IPDestinationAddressLength);
-
- for (uiLoopIndex = 0; uiLoopIndex < 32; uiLoopIndex++)
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPDestinationAddress[32]: 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8IPDestinationAddress[uiLoopIndex]);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolSourcePortRangeLength: 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRangeLength);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL,
- DBG_LVL_ALL, "u8ProtocolSourcePortRange[4]: 0x%*ph ",
- 4, psfCSType->cCPacketClassificationRule.
- u8ProtocolSourcePortRange);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolDestPortRangeLength: 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRangeLength);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL,
- DBG_LVL_ALL, "u8ProtocolDestPortRange[4]: 0x%*ph ",
- 4, psfCSType->cCPacketClassificationRule.
- u8ProtocolDestPortRange);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetDestMacAddressLength: 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddressLength);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL,
- DBG_LVL_ALL, "u8EthernetDestMacAddress[6]: %pM",
- psfCSType->cCPacketClassificationRule.
- u8EthernetDestMacAddress);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetSourceMACAddressLength: 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddressLength);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL,
- DBG_LVL_ALL, "u8EthernetSourceMACAddress[6]: %pM",
- psfCSType->cCPacketClassificationRule.
- u8EthernetSourceMACAddress);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthertypeLength: 0x%02X ", psfCSType->cCPacketClassificationRule.u8EthertypeLength);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL,
- DBG_LVL_ALL, "u8Ethertype[3]: 0x%*ph",
- 3, psfCSType->cCPacketClassificationRule.
- u8Ethertype);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16UserPriority: 0x%X ", psfCSType->cCPacketClassificationRule.u16UserPriority);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16VLANID: 0x%X ", psfCSType->cCPacketClassificationRule.u16VLANID);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8AssociatedPHSI: 0x%02X ", psfCSType->cCPacketClassificationRule.u8AssociatedPHSI);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16PacketClassificationRuleIndex: 0x%X ",
- psfCSType->cCPacketClassificationRule.u16PacketClassificationRuleIndex);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificClassifierParamLength: 0x%02X",
- psfCSType->cCPacketClassificationRule.u8VendorSpecificClassifierParamLength);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificClassifierParam[1]: 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8VendorSpecificClassifierParam[0]);
-#ifdef VERSION_D5
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPv6FlowLableLength: 0x%X ",
- psfCSType->cCPacketClassificationRule.u8IPv6FlowLableLength);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL,
- DBG_LVL_ALL, "u8IPv6FlowLable[6]: 0x%*ph ",
- 6, psfCSType->cCPacketClassificationRule.
- u8IPv6FlowLable);
-#endif
- }
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "bValid: 0x%X", pstAddIndication->sfAdmittedSet.bValid);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " ActiveSet--->");
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32SFID: 0x%X", pstAddIndication->sfActiveSet.u32SFID);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16CID: 0x%X", pstAddIndication->sfActiveSet.u16CID);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceClassNameLength: 0x%X", pstAddIndication->sfActiveSet.u8ServiceClassNameLength);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL,
- "u8ServiceClassName: 0x%*ph",
- 6, pstAddIndication->sfActiveSet.u8ServiceClassName);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8MBSService: 0x%02X", pstAddIndication->sfActiveSet.u8MBSService);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8QosParamSet: 0x%02X", pstAddIndication->sfActiveSet.u8QosParamSet);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TrafficPriority: 0x%02X", pstAddIndication->sfActiveSet.u8TrafficPriority);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaxTrafficBurst: 0x%X", pstAddIndication->sfActiveSet.u32MaxTrafficBurst);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MinReservedTrafficRate: 0x%X",
- pstAddIndication->sfActiveSet.u32MinReservedTrafficRate);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificQoSParamLength: 0x%02X",
- pstAddIndication->sfActiveSet.u8VendorSpecificQoSParamLength);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificQoSParam: 0x%02X",
- pstAddIndication->sfActiveSet.u8VendorSpecificQoSParam[0]);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceFlowSchedulingType: 0x%02X",
- pstAddIndication->sfActiveSet.u8ServiceFlowSchedulingType);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32ToleratedJitter: 0x%X", pstAddIndication->sfActiveSet.u32ToleratedJitter);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaximumLatency: 0x%X", pstAddIndication->sfActiveSet.u32MaximumLatency);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8FixedLengthVSVariableLengthSDUIndicator: 0x%02X",
- pstAddIndication->sfActiveSet.u8FixedLengthVSVariableLengthSDUIndicator);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8SDUSize: 0x%X", pstAddIndication->sfActiveSet.u8SDUSize);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16TargetSAID: 0x%X", pstAddIndication->sfActiveSet.u16TargetSAID);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8ARQEnable: 0x%X", pstAddIndication->sfActiveSet.u8ARQEnable);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16ARQWindowSize: 0x%X", pstAddIndication->sfActiveSet.u16ARQWindowSize);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16ARQRetryTxTimeOut: 0x%X", pstAddIndication->sfActiveSet.u16ARQRetryTxTimeOut);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16ARQRetryRxTimeOut: 0x%X", pstAddIndication->sfActiveSet.u16ARQRetryRxTimeOut);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16ARQBlockLifeTime: 0x%X", pstAddIndication->sfActiveSet.u16ARQBlockLifeTime);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16ARQSyncLossTimeOut: 0x%X", pstAddIndication->sfActiveSet.u16ARQSyncLossTimeOut);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8ARQDeliverInOrder: 0x%X", pstAddIndication->sfActiveSet.u8ARQDeliverInOrder);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16ARQRxPurgeTimeOut: 0x%X", pstAddIndication->sfActiveSet.u16ARQRxPurgeTimeOut);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16ARQBlockSize: 0x%X", pstAddIndication->sfActiveSet.u16ARQBlockSize);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8CSSpecification: 0x%X", pstAddIndication->sfActiveSet.u8CSSpecification);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8TypeOfDataDeliveryService: 0x%X",
- pstAddIndication->sfActiveSet.u8TypeOfDataDeliveryService);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16SDUInterArrivalTime: 0x%X", pstAddIndication->sfActiveSet.u16SDUInterArrivalTime);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16TimeBase: 0x%X", pstAddIndication->sfActiveSet.u16TimeBase);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8PagingPreference: 0x%X", pstAddIndication->sfActiveSet.u8PagingPreference);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8TrafficIndicationPreference: 0x%X",
- pstAddIndication->sfActiveSet.u8TrafficIndicationPreference);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " Total Classifiers Received: 0x%X", pstAddIndication->sfActiveSet.u8TotalClassifiers);
-
- nCurClassifierCnt = pstAddIndication->sfActiveSet.u8TotalClassifiers;
- if (nCurClassifierCnt > MAX_CLASSIFIERS_IN_SF)
- nCurClassifierCnt = MAX_CLASSIFIERS_IN_SF;
-
- for (nIndex = 0; nIndex < nCurClassifierCnt; nIndex++) {
- struct bcm_convergence_types *psfCSType = NULL;
- struct bcm_packet_class_rules *clsRule = NULL;
-
- psfCSType = &pstAddIndication->sfActiveSet.cConvergenceSLTypes[nIndex];
- clsRule = &psfCSType->cCPacketClassificationRule;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL,
- DBG_LVL_ALL, " CCPacketClassificationRuleSI====>");
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL,
- DBG_LVL_ALL, " u8ClassifierRulePriority: 0x%X ",
- clsRule->u8ClassifierRulePriority);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL,
- DBG_LVL_ALL, " u8IPTypeOfServiceLength: 0x%X ",
- clsRule->u8IPTypeOfServiceLength);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL,
- DBG_LVL_ALL,
- " u8IPTypeOfService[3]: 0x%X ,0x%X ,0x%X ",
- clsRule->u8IPTypeOfService[0],
- clsRule->u8IPTypeOfService[1],
- clsRule->u8IPTypeOfService[2]);
-
- for (uiLoopIndex = 0; uiLoopIndex < 1; uiLoopIndex++)
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL,
- DBG_LVL_ALL,
- " u8Protocol: 0x%X ",
- clsRule->u8Protocol);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL,
- DBG_LVL_ALL,
- "u8IPMaskedSourceAddressLength: 0x%X ",
- clsRule->u8IPMaskedSourceAddressLength);
-
- for (uiLoopIndex = 0; uiLoopIndex < 32; uiLoopIndex++)
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL,
- DBG_LVL_ALL,
- "u8IPMaskedSourceAddress[32]: 0x%X ",
- clsRule->u8IPMaskedSourceAddress[uiLoopIndex]);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL,
- DBG_LVL_ALL,
- "u8IPDestinationAddressLength: 0x%02X ",
- clsRule->u8IPDestinationAddressLength);
-
- for (uiLoopIndex = 0; uiLoopIndex < 32; uiLoopIndex++)
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL,
- DBG_LVL_ALL,
- " u8IPDestinationAddress[32]:0x%X ",
- clsRule->u8IPDestinationAddress[uiLoopIndex]);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL,
- DBG_LVL_ALL,
- " u8ProtocolSourcePortRangeLength: 0x%X ",
- clsRule->u8ProtocolSourcePortRangeLength);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL,
- DBG_LVL_ALL,
- " u8ProtocolSourcePortRange[4]: 0x%X ,0x%X ,0x%X ,0x%X ",
- clsRule->u8ProtocolSourcePortRange[0],
- clsRule->u8ProtocolSourcePortRange[1],
- clsRule->u8ProtocolSourcePortRange[2],
- clsRule->u8ProtocolSourcePortRange[3]);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL,
- DBG_LVL_ALL,
- " u8ProtocolDestPortRangeLength: 0x%X ",
- clsRule->u8ProtocolDestPortRangeLength);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL,
- DBG_LVL_ALL,
- " u8ProtocolDestPortRange[4]: 0x%X ,0x%X ,0x%X ,0x%X ",
- clsRule->u8ProtocolDestPortRange[0],
- clsRule->u8ProtocolDestPortRange[1],
- clsRule->u8ProtocolDestPortRange[2],
- clsRule->u8ProtocolDestPortRange[3]);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL,
- DBG_LVL_ALL,
- " u8EthernetDestMacAddressLength: 0x%X ",
- clsRule->u8EthernetDestMacAddressLength);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL,
- DBG_LVL_ALL,
- " u8EthernetDestMacAddress[6]: 0x%X ,0x%X ,0x%X ,0x%X ,0x%X ,0x%X",
- clsRule->u8EthernetDestMacAddress[0],
- clsRule->u8EthernetDestMacAddress[1],
- clsRule->u8EthernetDestMacAddress[2],
- clsRule->u8EthernetDestMacAddress[3],
- clsRule->u8EthernetDestMacAddress[4],
- clsRule->u8EthernetDestMacAddress[5]);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL,
- DBG_LVL_ALL,
- " u8EthernetSourceMACAddressLength: 0x%X ",
- clsRule->u8EthernetDestMacAddressLength);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL,
- DBG_LVL_ALL,
- "u8EthernetSourceMACAddress[6]: 0x%X ,0x%X ,0x%X ,0x%X ,0x%X ,0x%X",
- clsRule->u8EthernetSourceMACAddress[0],
- clsRule->u8EthernetSourceMACAddress[1],
- clsRule->u8EthernetSourceMACAddress[2],
- clsRule->u8EthernetSourceMACAddress[3],
- clsRule->u8EthernetSourceMACAddress[4],
- clsRule->u8EthernetSourceMACAddress[5]);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL,
- DBG_LVL_ALL, " u8EthertypeLength: 0x%X ",
- clsRule->u8EthertypeLength);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL,
- DBG_LVL_ALL,
- " u8Ethertype[3]: 0x%X ,0x%X ,0x%X ",
- clsRule->u8Ethertype[0],
- clsRule->u8Ethertype[1],
- clsRule->u8Ethertype[2]);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL,
- DBG_LVL_ALL, " u16UserPriority: 0x%X ",
- clsRule->u16UserPriority);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL,
- DBG_LVL_ALL, " u16VLANID: 0x%X ",
- clsRule->u16VLANID);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL,
- DBG_LVL_ALL, " u8AssociatedPHSI: 0x%X ",
- clsRule->u8AssociatedPHSI);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL,
- DBG_LVL_ALL,
- " u16PacketClassificationRuleIndex:0x%X ",
- clsRule->u16PacketClassificationRuleIndex);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL,
- DBG_LVL_ALL,
- " u8VendorSpecificClassifierParamLength:0x%X ",
- clsRule->u8VendorSpecificClassifierParamLength);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL,
- DBG_LVL_ALL,
- " u8VendorSpecificClassifierParam[1]:0x%X ",
- clsRule->u8VendorSpecificClassifierParam[0]);
-#ifdef VERSION_D5
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL,
- DBG_LVL_ALL, " u8IPv6FlowLableLength: 0x%X ",
- clsRule->u8IPv6FlowLableLength);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL,
- DBG_LVL_ALL,
- " u8IPv6FlowLable[6]: 0x%X ,0x%X ,0x%X ,0x%X ,0x%X ,0x%X ",
- clsRule->u8IPv6FlowLable[0],
- clsRule->u8IPv6FlowLable[1],
- clsRule->u8IPv6FlowLable[2],
- clsRule->u8IPv6FlowLable[3],
- clsRule->u8IPv6FlowLable[4],
- clsRule->u8IPv6FlowLable[5]);
-#endif
- }
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL,
- " bValid: 0x%X", pstAddIndication->sfActiveSet.bValid);
-}
-
-static inline ULONG RestoreSFParam(struct bcm_mini_adapter *Adapter,
- ULONG ulAddrSFParamSet, PUCHAR pucDestBuffer)
-{
- UINT nBytesToRead = sizeof(struct bcm_connect_mgr_params);
-
- if (ulAddrSFParamSet == 0 || NULL == pucDestBuffer) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "Got Param address as 0!!");
- return 0;
- }
- ulAddrSFParamSet = ntohl(ulAddrSFParamSet);
-
- /* Read out the SF Param Set At the indicated Location */
- if (rdm(Adapter, ulAddrSFParamSet, (PUCHAR)pucDestBuffer, nBytesToRead) < 0)
- return STATUS_FAILURE;
-
- return 1;
-}
-
-static ULONG StoreSFParam(struct bcm_mini_adapter *Adapter, PUCHAR pucSrcBuffer,
- ULONG ulAddrSFParamSet)
-{
- UINT nBytesToWrite = sizeof(struct bcm_connect_mgr_params);
- int ret = 0;
-
- if (ulAddrSFParamSet == 0 || NULL == pucSrcBuffer)
- return 0;
-
- ret = wrm(Adapter, ulAddrSFParamSet, (u8 *)pucSrcBuffer, nBytesToWrite);
- if (ret < 0) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "%s:%d WRM failed", __func__, __LINE__);
- return ret;
- }
- return 1;
-}
-
-ULONG StoreCmControlResponseMessage(struct bcm_mini_adapter *Adapter,
- PVOID pvBuffer, UINT *puBufferLength)
-{
- struct bcm_add_indication_alt *pstAddIndicationAlt = NULL;
- struct bcm_add_indication *pstAddIndication = NULL;
- struct bcm_del_request *pstDeletionRequest;
- UINT uiSearchRuleIndex;
- ULONG ulSFID;
-
- pstAddIndicationAlt = pvBuffer;
-
- /*
- * In case of DSD Req By MS, we should immediately delete this SF so that
- * we can stop the further classifying the pkt for this SF.
- */
- if (pstAddIndicationAlt->u8Type == DSD_REQ) {
- pstDeletionRequest = pvBuffer;
-
- ulSFID = ntohl(pstDeletionRequest->u32SFID);
- uiSearchRuleIndex = SearchSfid(Adapter, ulSFID);
-
- if (uiSearchRuleIndex < NO_OF_QUEUES) {
- deleteSFBySfid(Adapter, uiSearchRuleIndex);
- Adapter->u32TotalDSD++;
- }
- return 1;
- }
-
- if ((pstAddIndicationAlt->u8Type == DSD_RSP) ||
- (pstAddIndicationAlt->u8Type == DSD_ACK)) {
- /* No Special handling send the message as it is */
- return 1;
- }
- /* For DSA_REQ, only up to "psfAuthorizedSet" parameter should be accessed by driver! */
-
- pstAddIndication = kmalloc(sizeof(struct bcm_add_indication),
- GFP_KERNEL);
- if (pstAddIndication == NULL)
- return 0;
-
- /* AUTHORIZED SET */
- pstAddIndication->psfAuthorizedSet = (struct bcm_connect_mgr_params *)
- GetNextTargetBufferLocation(Adapter,
- pstAddIndicationAlt->u16TID);
- if (!pstAddIndication->psfAuthorizedSet) {
- kfree(pstAddIndication);
- return 0;
- }
-
- if (StoreSFParam(Adapter, (PUCHAR)&pstAddIndicationAlt->sfAuthorizedSet,
- (ULONG)pstAddIndication->psfAuthorizedSet) != 1) {
- kfree(pstAddIndication);
- return 0;
- }
-
- /* this can't possibly be right */
- pstAddIndication->psfAuthorizedSet =
- (struct bcm_connect_mgr_params *) ntohl(
- (ULONG)pstAddIndication->psfAuthorizedSet);
-
- if (pstAddIndicationAlt->u8Type == DSA_REQ) {
- struct bcm_add_request AddRequest;
-
- AddRequest.u8Type = pstAddIndicationAlt->u8Type;
- AddRequest.eConnectionDir = pstAddIndicationAlt->u8Direction;
- AddRequest.u16TID = pstAddIndicationAlt->u16TID;
- AddRequest.u16CID = pstAddIndicationAlt->u16CID;
- AddRequest.u16VCID = pstAddIndicationAlt->u16VCID;
- AddRequest.psfParameterSet = pstAddIndication->psfAuthorizedSet;
- (*puBufferLength) = sizeof(struct bcm_add_request);
- memcpy(pvBuffer, &AddRequest, sizeof(struct bcm_add_request));
- kfree(pstAddIndication);
- return 1;
- }
-
- /* Since it's not DSA_REQ, we can access all field in pstAddIndicationAlt */
- /* We need to extract the structure from the buffer and pack it differently */
-
- pstAddIndication->u8Type = pstAddIndicationAlt->u8Type;
- pstAddIndication->eConnectionDir = pstAddIndicationAlt->u8Direction;
- pstAddIndication->u16TID = pstAddIndicationAlt->u16TID;
- pstAddIndication->u16CID = pstAddIndicationAlt->u16CID;
- pstAddIndication->u16VCID = pstAddIndicationAlt->u16VCID;
- pstAddIndication->u8CC = pstAddIndicationAlt->u8CC;
-
- /* ADMITTED SET */
- pstAddIndication->psfAdmittedSet = (struct bcm_connect_mgr_params *)
- GetNextTargetBufferLocation(Adapter,
- pstAddIndicationAlt->u16TID);
- if (!pstAddIndication->psfAdmittedSet) {
- kfree(pstAddIndication);
- return 0;
- }
- if (StoreSFParam(Adapter, (PUCHAR)&pstAddIndicationAlt->sfAdmittedSet,
- (ULONG)pstAddIndication->psfAdmittedSet) != 1) {
- kfree(pstAddIndication);
- return 0;
- }
-
- pstAddIndication->psfAdmittedSet =
- (struct bcm_connect_mgr_params *) ntohl(
- (ULONG) pstAddIndication->psfAdmittedSet);
-
- /* ACTIVE SET */
- pstAddIndication->psfActiveSet = (struct bcm_connect_mgr_params *)
- GetNextTargetBufferLocation(Adapter,
- pstAddIndicationAlt->u16TID);
- if (!pstAddIndication->psfActiveSet) {
- kfree(pstAddIndication);
- return 0;
- }
- if (StoreSFParam(Adapter, (PUCHAR)&pstAddIndicationAlt->sfActiveSet,
- (ULONG)pstAddIndication->psfActiveSet) != 1) {
- kfree(pstAddIndication);
- return 0;
- }
-
- pstAddIndication->psfActiveSet =
- (struct bcm_connect_mgr_params *) ntohl(
- (ULONG)pstAddIndication->psfActiveSet);
-
- (*puBufferLength) = sizeof(struct bcm_add_indication);
- *(struct bcm_add_indication *)pvBuffer = *pstAddIndication;
- kfree(pstAddIndication);
- return 1;
-}
-
-static inline struct bcm_add_indication_alt
-*RestoreCmControlResponseMessage(register struct bcm_mini_adapter *Adapter,
- register PVOID pvBuffer)
-{
- ULONG ulStatus = 0;
- struct bcm_add_indication *pstAddIndication = NULL;
- struct bcm_add_indication_alt *pstAddIndicationDest = NULL;
-
- pstAddIndication = pvBuffer;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "=====>");
- if ((pstAddIndication->u8Type == DSD_REQ) ||
- (pstAddIndication->u8Type == DSD_RSP) ||
- (pstAddIndication->u8Type == DSD_ACK))
- return pvBuffer;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "Inside RestoreCmControlResponseMessage ");
- /*
- * Need to Allocate memory to contain the SUPER Large structures
- * Our driver can't create these structures on Stack :(
- */
- pstAddIndicationDest = kmalloc(sizeof(struct bcm_add_indication_alt),
- GFP_KERNEL);
-
- if (pstAddIndicationDest) {
- memset(pstAddIndicationDest, 0,
- sizeof(struct bcm_add_indication_alt));
- } else {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG,
- DBG_LVL_ALL,
- "Failed to allocate memory for SF Add Indication Structure ");
- return NULL;
- }
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "AddIndication-u8Type : 0x%X",
- pstAddIndication->u8Type);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "AddIndication-u8Direction : 0x%X",
- pstAddIndication->eConnectionDir);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "AddIndication-u8TID : 0x%X",
- ntohs(pstAddIndication->u16TID));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "AddIndication-u8CID : 0x%X",
- ntohs(pstAddIndication->u16CID));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "AddIndication-u16VCID : 0x%X",
- ntohs(pstAddIndication->u16VCID));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "AddIndication-autorized set loc : %p",
- pstAddIndication->psfAuthorizedSet);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "AddIndication-admitted set loc : %p",
- pstAddIndication->psfAdmittedSet);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "AddIndication-Active set loc : %p",
- pstAddIndication->psfActiveSet);
-
- pstAddIndicationDest->u8Type = pstAddIndication->u8Type;
- pstAddIndicationDest->u8Direction = pstAddIndication->eConnectionDir;
- pstAddIndicationDest->u16TID = pstAddIndication->u16TID;
- pstAddIndicationDest->u16CID = pstAddIndication->u16CID;
- pstAddIndicationDest->u16VCID = pstAddIndication->u16VCID;
- pstAddIndicationDest->u8CC = pstAddIndication->u8CC;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "Restoring Active Set ");
- ulStatus = RestoreSFParam(Adapter,
- (ULONG)pstAddIndication->psfActiveSet,
- (PUCHAR)&pstAddIndicationDest->sfActiveSet);
- if (ulStatus != 1)
- goto failed_restore_sf_param;
-
- if (pstAddIndicationDest->sfActiveSet.u8TotalClassifiers > MAX_CLASSIFIERS_IN_SF)
- pstAddIndicationDest->sfActiveSet.u8TotalClassifiers =
- MAX_CLASSIFIERS_IN_SF;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "Restoring Admitted Set ");
- ulStatus = RestoreSFParam(Adapter,
- (ULONG)pstAddIndication->psfAdmittedSet,
- (PUCHAR)&pstAddIndicationDest->sfAdmittedSet);
- if (ulStatus != 1)
- goto failed_restore_sf_param;
-
- if (pstAddIndicationDest->sfAdmittedSet.u8TotalClassifiers > MAX_CLASSIFIERS_IN_SF)
- pstAddIndicationDest->sfAdmittedSet.u8TotalClassifiers =
- MAX_CLASSIFIERS_IN_SF;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "Restoring Authorized Set ");
- ulStatus = RestoreSFParam(Adapter,
- (ULONG)pstAddIndication->psfAuthorizedSet,
- (PUCHAR)&pstAddIndicationDest->sfAuthorizedSet);
- if (ulStatus != 1)
- goto failed_restore_sf_param;
-
- if (pstAddIndicationDest->sfAuthorizedSet.u8TotalClassifiers > MAX_CLASSIFIERS_IN_SF)
- pstAddIndicationDest->sfAuthorizedSet.u8TotalClassifiers =
- MAX_CLASSIFIERS_IN_SF;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "Dumping the whole raw packet");
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "============================================================");
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- " pstAddIndicationDest->sfActiveSet size %zx %p",
- sizeof(*pstAddIndicationDest), pstAddIndicationDest);
- /* BCM_DEBUG_PRINT_BUFFER(Adapter,DBG_TYPE_OTHERS, CONN_MSG,
- * DBG_LVL_ALL, (unsigned char *)pstAddIndicationDest,
- * sizeof(*pstAddIndicationDest));
- */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "============================================================");
- return pstAddIndicationDest;
-failed_restore_sf_param:
- kfree(pstAddIndicationDest);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "<=====");
- return NULL;
-}
-
-ULONG SetUpTargetDsxBuffers(struct bcm_mini_adapter *Adapter)
-{
- ULONG ulTargetDsxBuffersBase = 0;
- ULONG ulCntTargetBuffers;
- ULONG i;
- int Status;
-
- if (!Adapter) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "Adapter was NULL!!!");
- return 0;
- }
-
- if (Adapter->astTargetDsxBuffer[0].ulTargetDsxBuffer)
- return 1;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "Size of Each DSX Buffer(Also size of connection manager parameters): %zx ",
- sizeof(struct bcm_connect_mgr_params));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "Reading DSX buffer From Target location %x ",
- DSX_MESSAGE_EXCHANGE_BUFFER);
-
- Status = rdmalt(Adapter, DSX_MESSAGE_EXCHANGE_BUFFER,
- (PUINT)&ulTargetDsxBuffersBase, sizeof(UINT));
- if (Status < 0) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "RDM failed!!");
- return 0;
- }
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "Base Address Of DSX Target Buffer : 0x%lx",
- ulTargetDsxBuffersBase);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "Tgt Buffer is Now %lx :", ulTargetDsxBuffersBase);
- ulCntTargetBuffers = DSX_MESSAGE_EXCHANGE_BUFFER_SIZE /
- sizeof(struct bcm_connect_mgr_params);
-
- Adapter->ulTotalTargetBuffersAvailable =
- ulCntTargetBuffers > MAX_TARGET_DSX_BUFFERS ?
- MAX_TARGET_DSX_BUFFERS : ulCntTargetBuffers;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- " Total Target DSX Buffer setup %lx ",
- Adapter->ulTotalTargetBuffersAvailable);
-
- for (i = 0; i < Adapter->ulTotalTargetBuffersAvailable; i++) {
- Adapter->astTargetDsxBuffer[i].ulTargetDsxBuffer = ulTargetDsxBuffersBase;
- Adapter->astTargetDsxBuffer[i].valid = 1;
- Adapter->astTargetDsxBuffer[i].tid = 0;
- ulTargetDsxBuffersBase += sizeof(struct bcm_connect_mgr_params);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, " Target DSX Buffer %lx setup at 0x%lx",
- i, Adapter->astTargetDsxBuffer[i].ulTargetDsxBuffer);
- }
- Adapter->ulCurrentTargetBuffer = 0;
- Adapter->ulFreeTargetBufferCnt = Adapter->ulTotalTargetBuffersAvailable;
- return 1;
-}
-
-static ULONG GetNextTargetBufferLocation(struct bcm_mini_adapter *Adapter,
- B_UINT16 tid)
-{
- ULONG dsx_buf;
- ULONG idx, max_try;
-
- if ((Adapter->ulTotalTargetBuffersAvailable == 0)
- || (Adapter->ulFreeTargetBufferCnt == 0)) {
- ClearTargetDSXBuffer(Adapter, tid, false);
- return 0;
- }
-
- idx = Adapter->ulCurrentTargetBuffer;
- max_try = Adapter->ulTotalTargetBuffersAvailable;
- while ((max_try) && (Adapter->astTargetDsxBuffer[idx].valid != 1)) {
- idx = (idx+1) % Adapter->ulTotalTargetBuffersAvailable;
- max_try--;
- }
-
- if (max_try == 0) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
- "\n GetNextTargetBufferLocation : Error No Free Target DSX Buffers FreeCnt : %lx ",
- Adapter->ulFreeTargetBufferCnt);
- ClearTargetDSXBuffer(Adapter, tid, false);
- return 0;
- }
-
- dsx_buf = Adapter->astTargetDsxBuffer[idx].ulTargetDsxBuffer;
- Adapter->astTargetDsxBuffer[idx].valid = 0;
- Adapter->astTargetDsxBuffer[idx].tid = tid;
- Adapter->ulFreeTargetBufferCnt--;
- idx = (idx+1)%Adapter->ulTotalTargetBuffersAvailable;
- Adapter->ulCurrentTargetBuffer = idx;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
- "GetNextTargetBufferLocation :Returning address %lx tid %d\n",
- dsx_buf, tid);
-
- return dsx_buf;
-}
-
-int AllocAdapterDsxBuffer(struct bcm_mini_adapter *Adapter)
-{
- /*
- * Need to Allocate memory to contain the SUPER Large structures
- * Our driver can't create these structures on Stack
- */
- Adapter->caDsxReqResp = kmalloc(sizeof(struct bcm_add_indication_alt)
- + LEADER_SIZE, GFP_KERNEL);
- if (!Adapter->caDsxReqResp)
- return -ENOMEM;
-
- return 0;
-}
-
-int FreeAdapterDsxBuffer(struct bcm_mini_adapter *Adapter)
-{
- kfree(Adapter->caDsxReqResp);
- return 0;
-}
-
-/*
- * @ingroup ctrl_pkt_functions
- * This routinue would process the Control responses
- * for the Connection Management.
- * @return - Queue index for the free SFID else returns Invalid Index.
- */
-bool CmControlResponseMessage(struct bcm_mini_adapter *Adapter, /* <Pointer to the Adapter structure */
- PVOID pvBuffer /* Starting Address of the Buffer, that contains the AddIndication Data */)
-{
- struct bcm_connect_mgr_params *psfLocalSet = NULL;
- struct bcm_add_indication_alt *pstAddIndication = NULL;
- struct bcm_change_indication *pstChangeIndication = NULL;
- struct bcm_leader *pLeader = NULL;
- INT uiSearchRuleIndex = 0;
- ULONG ulSFID;
-
- /*
- * Otherwise the message contains a target address from where we need to
- * read out the rest of the service flow param structure
- */
- pstAddIndication = RestoreCmControlResponseMessage(Adapter, pvBuffer);
- if (pstAddIndication == NULL) {
- ClearTargetDSXBuffer(Adapter, ((struct bcm_add_indication *)pvBuffer)->u16TID, false);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Error in restoring Service Flow param structure from DSx message");
- return false;
- }
-
- DumpCmControlPacket(pstAddIndication);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "====>");
- pLeader = (struct bcm_leader *)Adapter->caDsxReqResp;
-
- pLeader->Status = CM_CONTROL_NEWDSX_MULTICLASSIFIER_REQ;
- pLeader->Vcid = 0;
-
- ClearTargetDSXBuffer(Adapter, pstAddIndication->u16TID, false);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "### TID RECEIVED %d\n", pstAddIndication->u16TID);
- switch (pstAddIndication->u8Type) {
- case DSA_REQ:
- pLeader->PLength = sizeof(struct bcm_add_indication_alt);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Sending DSA Response....\n");
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "SENDING DSA RESPONSE TO MAC %d", pLeader->PLength);
- *((struct bcm_add_indication_alt *)&(Adapter->caDsxReqResp[LEADER_SIZE]))
- = *pstAddIndication;
- ((struct bcm_add_indication_alt *)&(Adapter->caDsxReqResp[LEADER_SIZE]))->u8Type = DSA_RSP;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, " VCID = %x", ntohs(pstAddIndication->u16VCID));
- CopyBufferToControlPacket(Adapter, (PVOID)Adapter->caDsxReqResp);
- kfree(pstAddIndication);
- break;
- case DSA_RSP:
- pLeader->PLength = sizeof(struct bcm_add_indication_alt);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "SENDING DSA ACK TO MAC %d",
- pLeader->PLength);
- *((struct bcm_add_indication_alt *)&(Adapter->caDsxReqResp[LEADER_SIZE]))
- = *pstAddIndication;
- ((struct bcm_add_indication_alt *)&(Adapter->caDsxReqResp[LEADER_SIZE]))->u8Type = DSA_ACK;
- /* FALLTHROUGH */
- case DSA_ACK:
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "VCID:0x%X",
- ntohs(pstAddIndication->u16VCID));
- uiSearchRuleIndex = SearchFreeSfid(Adapter);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "uiSearchRuleIndex:0x%X ",
- uiSearchRuleIndex);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Direction:0x%X ",
- pstAddIndication->u8Direction);
- if (uiSearchRuleIndex < NO_OF_QUEUES) {
- Adapter->PackInfo[uiSearchRuleIndex].ucDirection =
- pstAddIndication->u8Direction;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "bValid:0x%X ",
- pstAddIndication->sfActiveSet.bValid);
- if (pstAddIndication->sfActiveSet.bValid == TRUE)
- Adapter->PackInfo[uiSearchRuleIndex].bActiveSet = TRUE;
-
- if (pstAddIndication->sfAuthorizedSet.bValid == TRUE)
- Adapter->PackInfo[uiSearchRuleIndex].bAuthorizedSet = TRUE;
-
- if (pstAddIndication->sfAdmittedSet.bValid == TRUE)
- Adapter->PackInfo[uiSearchRuleIndex].bAdmittedSet = TRUE;
-
- if (pstAddIndication->sfActiveSet.bValid == false) {
- Adapter->PackInfo[uiSearchRuleIndex].bActive = false;
- Adapter->PackInfo[uiSearchRuleIndex].bActivateRequestSent = false;
- if (pstAddIndication->sfAdmittedSet.bValid)
- psfLocalSet = &pstAddIndication->sfAdmittedSet;
- else if (pstAddIndication->sfAuthorizedSet.bValid)
- psfLocalSet = &pstAddIndication->sfAuthorizedSet;
- } else {
- psfLocalSet = &pstAddIndication->sfActiveSet;
- Adapter->PackInfo[uiSearchRuleIndex].bActive = TRUE;
- }
-
- if (!psfLocalSet) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "No set is valid\n");
- Adapter->PackInfo[uiSearchRuleIndex].bActive = false;
- Adapter->PackInfo[uiSearchRuleIndex].bValid = false;
- Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value = 0;
- kfree(pstAddIndication);
- } else if (psfLocalSet->bValid && (pstAddIndication->u8CC == 0)) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "DSA ACK");
- Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value = ntohs(pstAddIndication->u16VCID);
- Adapter->PackInfo[uiSearchRuleIndex].usCID = ntohs(pstAddIndication->u16CID);
-
- if (UPLINK_DIR == pstAddIndication->u8Direction)
- atomic_set(&Adapter->PackInfo[uiSearchRuleIndex].uiPerSFTxResourceCount, DEFAULT_PERSFCOUNT);
-
- CopyToAdapter(Adapter, psfLocalSet, uiSearchRuleIndex, DSA_ACK, pstAddIndication);
- /* don't free pstAddIndication */
-
- /* Inside CopyToAdapter, Sorting of all the SFs take place.
- * Hence any access to the newly added SF through uiSearchRuleIndex is invalid.
- * SHOULD BE STRICTLY AVOIDED.
- */
- /* *(PULONG)(((PUCHAR)pvBuffer)+1)=psfLocalSet->u32SFID; */
- memcpy((((PUCHAR)pvBuffer)+1), &psfLocalSet->u32SFID, 4);
-
- if (pstAddIndication->sfActiveSet.bValid == TRUE) {
- if (UPLINK_DIR == pstAddIndication->u8Direction) {
- if (!Adapter->LinkUpStatus) {
- netif_carrier_on(Adapter->dev);
- netif_start_queue(Adapter->dev);
- Adapter->LinkUpStatus = 1;
- if (netif_msg_link(Adapter))
- pr_info(PFX "%s: link up\n", Adapter->dev->name);
- atomic_set(&Adapter->TxPktAvail, 1);
- wake_up(&Adapter->tx_packet_wait_queue);
- Adapter->liTimeSinceLastNetEntry = get_seconds();
- }
- }
- }
- } else {
- Adapter->PackInfo[uiSearchRuleIndex].bActive = false;
- Adapter->PackInfo[uiSearchRuleIndex].bValid = false;
- Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value = 0;
- kfree(pstAddIndication);
- }
- } else {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "DSA ACK did not get valid SFID");
- kfree(pstAddIndication);
- return false;
- }
- break;
- case DSC_REQ:
- pLeader->PLength = sizeof(struct bcm_change_indication);
- pstChangeIndication = (struct bcm_change_indication *)pstAddIndication;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "SENDING DSC RESPONSE TO MAC %d", pLeader->PLength);
-
- *((struct bcm_change_indication *)&(Adapter->caDsxReqResp[LEADER_SIZE])) = *pstChangeIndication;
- ((struct bcm_change_indication *)&(Adapter->caDsxReqResp[LEADER_SIZE]))->u8Type = DSC_RSP;
-
- CopyBufferToControlPacket(Adapter, (PVOID)Adapter->caDsxReqResp);
- kfree(pstAddIndication);
- break;
- case DSC_RSP:
- pLeader->PLength = sizeof(struct bcm_change_indication);
- pstChangeIndication = (struct bcm_change_indication *)pstAddIndication;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "SENDING DSC ACK TO MAC %d", pLeader->PLength);
- *((struct bcm_change_indication *)&(Adapter->caDsxReqResp[LEADER_SIZE])) = *pstChangeIndication;
- ((struct bcm_change_indication *)&(Adapter->caDsxReqResp[LEADER_SIZE]))->u8Type = DSC_ACK;
- /* FALLTHROUGH */
- case DSC_ACK:
- pstChangeIndication = (struct bcm_change_indication *)pstAddIndication;
- uiSearchRuleIndex = SearchSfid(Adapter, ntohl(pstChangeIndication->sfActiveSet.u32SFID));
- if (uiSearchRuleIndex > NO_OF_QUEUES-1)
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "SF doesn't exist for which DSC_ACK is received");
-
- if (uiSearchRuleIndex < NO_OF_QUEUES) {
- Adapter->PackInfo[uiSearchRuleIndex].ucDirection = pstChangeIndication->u8Direction;
- if (pstChangeIndication->sfActiveSet.bValid == TRUE)
- Adapter->PackInfo[uiSearchRuleIndex].bActiveSet = TRUE;
-
- if (pstChangeIndication->sfAuthorizedSet.bValid == TRUE)
- Adapter->PackInfo[uiSearchRuleIndex].bAuthorizedSet = TRUE;
-
- if (pstChangeIndication->sfAdmittedSet.bValid == TRUE)
- Adapter->PackInfo[uiSearchRuleIndex].bAdmittedSet = TRUE;
-
- if (pstChangeIndication->sfActiveSet.bValid == false) {
- Adapter->PackInfo[uiSearchRuleIndex].bActive = false;
- Adapter->PackInfo[uiSearchRuleIndex].bActivateRequestSent = false;
-
- if (pstChangeIndication->sfAdmittedSet.bValid)
- psfLocalSet = &pstChangeIndication->sfAdmittedSet;
- else if (pstChangeIndication->sfAuthorizedSet.bValid)
- psfLocalSet = &pstChangeIndication->sfAuthorizedSet;
- } else {
- psfLocalSet = &pstChangeIndication->sfActiveSet;
- Adapter->PackInfo[uiSearchRuleIndex].bActive = TRUE;
- }
-
- if (!psfLocalSet) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "No set is valid\n");
- Adapter->PackInfo[uiSearchRuleIndex].bActive = false;
- Adapter->PackInfo[uiSearchRuleIndex].bValid = false;
- Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value = 0;
- kfree(pstAddIndication);
- } else if (psfLocalSet->bValid && (pstChangeIndication->u8CC == 0)) {
- Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value = ntohs(pstChangeIndication->u16VCID);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "CC field is %d bvalid = %d\n",
- pstChangeIndication->u8CC, psfLocalSet->bValid);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "VCID= %d\n", ntohs(pstChangeIndication->u16VCID));
- Adapter->PackInfo[uiSearchRuleIndex].usCID = ntohs(pstChangeIndication->u16CID);
- CopyToAdapter(Adapter, psfLocalSet, uiSearchRuleIndex, DSC_ACK, pstAddIndication);
-
- *(PULONG)(((PUCHAR)pvBuffer)+1) = psfLocalSet->u32SFID;
- } else if (pstChangeIndication->u8CC == 6) {
- deleteSFBySfid(Adapter, uiSearchRuleIndex);
- kfree(pstAddIndication);
- }
- } else {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "DSC ACK did not get valid SFID");
- kfree(pstAddIndication);
- return false;
- }
- break;
- case DSD_REQ:
- pLeader->PLength = sizeof(struct bcm_del_indication);
- *((struct bcm_del_indication *)&(Adapter->caDsxReqResp[LEADER_SIZE])) = *((struct bcm_del_indication *)pstAddIndication);
-
- ulSFID = ntohl(((struct bcm_del_indication *)pstAddIndication)->u32SFID);
- uiSearchRuleIndex = SearchSfid(Adapter, ulSFID);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "DSD - Removing connection %x", uiSearchRuleIndex);
-
- if (uiSearchRuleIndex < NO_OF_QUEUES) {
- /* Delete All Classifiers Associated with this SFID */
- deleteSFBySfid(Adapter, uiSearchRuleIndex);
- Adapter->u32TotalDSD++;
- }
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "SENDING DSD RESPONSE TO MAC");
- ((struct bcm_del_indication *)&(Adapter->caDsxReqResp[LEADER_SIZE]))->u8Type = DSD_RSP;
- CopyBufferToControlPacket(Adapter, (PVOID)Adapter->caDsxReqResp);
- /* FALLTHROUGH */
- case DSD_RSP:
- /* Do nothing as SF has already got Deleted */
- break;
- case DSD_ACK:
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "DSD ACK Rcd, let App handle it\n");
- break;
- default:
- kfree(pstAddIndication);
- return false;
- }
- return TRUE;
-}
-
-int get_dsx_sf_data_to_application(struct bcm_mini_adapter *Adapter,
- UINT uiSFId, void __user *user_buffer)
-{
- int status = 0;
- struct bcm_packet_info *psSfInfo = NULL;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "status =%d", status);
- status = SearchSfid(Adapter, uiSFId);
- if (status >= NO_OF_QUEUES) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "SFID %d not present in queue !!!", uiSFId);
- return -EINVAL;
- }
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "status =%d", status);
- psSfInfo = &Adapter->PackInfo[status];
- if (psSfInfo->pstSFIndication
- && copy_to_user(user_buffer, psSfInfo->pstSFIndication,
- sizeof(struct bcm_add_indication_alt))) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
- "copy to user failed SFID %d, present in queue !!!",
- uiSFId);
- status = -EFAULT;
- return status;
- }
- return STATUS_SUCCESS;
-}
-
-VOID OverrideServiceFlowParams(struct bcm_mini_adapter *Adapter,
- PUINT puiBuffer)
-{
- B_UINT32 u32NumofSFsinMsg = ntohl(*(puiBuffer + 1));
- struct bcm_stim_sfhostnotify *pHostInfo = NULL;
- UINT uiSearchRuleIndex = 0;
- ULONG ulSFID = 0;
-
- puiBuffer += 2;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "u32NumofSFsinMsg: 0x%x\n", u32NumofSFsinMsg);
-
- while (u32NumofSFsinMsg != 0 && u32NumofSFsinMsg < NO_OF_QUEUES) {
- u32NumofSFsinMsg--;
- pHostInfo = (struct bcm_stim_sfhostnotify *)puiBuffer;
- puiBuffer = (PUINT)(pHostInfo + 1);
-
- ulSFID = ntohl(pHostInfo->SFID);
- uiSearchRuleIndex = SearchSfid(Adapter, ulSFID);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "SFID: 0x%lx\n", ulSFID);
-
- if (uiSearchRuleIndex >= NO_OF_QUEUES
- || uiSearchRuleIndex == HiPriority) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG,
- DBG_LVL_ALL,
- "The SFID <%lx> doesn't exist in host entry or is Invalid\n",
- ulSFID);
- continue;
- }
-
- if (pHostInfo->RetainSF == false) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG,
- DBG_LVL_ALL, "Going to Delete SF");
- deleteSFBySfid(Adapter, uiSearchRuleIndex);
- } else {
- struct bcm_packet_info *packinfo =
- &Adapter->PackInfo[uiSearchRuleIndex];
-
- packinfo->usVCID_Value = ntohs(pHostInfo->VCID);
- packinfo->usCID = ntohs(pHostInfo->newCID);
- packinfo->bActive = false;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG,
- DBG_LVL_ALL,
- "pHostInfo->QoSParamSet: 0x%x\n",
- pHostInfo->QoSParamSet);
-
- if (pHostInfo->QoSParamSet & 0x1)
- packinfo->bAuthorizedSet = TRUE;
- if (pHostInfo->QoSParamSet & 0x2)
- packinfo->bAdmittedSet = TRUE;
- if (pHostInfo->QoSParamSet & 0x4) {
- packinfo->bActiveSet = TRUE;
- packinfo->bActive = TRUE;
- }
- }
- }
-}
-
-static void restore_endianess_of_pstClassifierEntry(
- struct bcm_classifier_rule *pstClassifierEntry,
- enum bcm_ipaddr_context eIpAddrContext)
-{
- int i;
- union u_ip_address *stSrc = &pstClassifierEntry->stSrcIpAddress;
- union u_ip_address *stDest = &pstClassifierEntry->stDestIpAddress;
-
- for (i = 0; i < MAX_IP_RANGE_LENGTH * 4; i++) {
- if (eIpAddrContext == eSrcIpAddress) {
- stSrc->ulIpv6Addr[i] = ntohl(stSrc->ulIpv6Addr[i]);
- stSrc->ulIpv6Mask[i] = ntohl(stSrc->ulIpv6Mask[i]);
- } else if (eIpAddrContext == eDestIpAddress) {
- stDest->ulIpv6Addr[i] = ntohl(stDest->ulIpv6Addr[i]);
- stDest->ulIpv6Mask[i] = ntohl(stDest->ulIpv6Mask[i]);
- }
- }
-}
-
-static void apply_phs_rule_to_all_classifiers(
- register struct bcm_mini_adapter *Adapter, /* <Pointer to the Adapter structure */
- register UINT uiSearchRuleIndex, /* <Index of Queue, to which this data belongs */
- USHORT uVCID,
- struct bcm_phs_rule *sPhsRule,
- struct bcm_phs_rules *cPhsRule,
- struct bcm_add_indication_alt *pstAddIndication)
-{
- unsigned int uiClassifierIndex = 0;
- struct bcm_classifier_rule *curr_classifier = NULL;
-
- if (pstAddIndication->u8Direction == UPLINK_DIR) {
- for (uiClassifierIndex = 0; uiClassifierIndex < MAX_CLASSIFIERS; uiClassifierIndex++) {
- curr_classifier =
- &Adapter->astClassifierTable[uiClassifierIndex];
- if ((curr_classifier->bUsed) &&
- (curr_classifier->ulSFID == Adapter->PackInfo[uiSearchRuleIndex].ulSFID) &&
- (curr_classifier->u8AssociatedPHSI == cPhsRule->u8PHSI)) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "Adding PHS Rule For Classifier: 0x%x cPhsRule.u8PHSI: 0x%x\n",
- curr_classifier->uiClassifierRuleIndex,
- cPhsRule->u8PHSI);
- /* Update The PHS Rule for this classifier as Associated PHSI id defined */
-
- /* Copy the PHS Rule */
- sPhsRule->u8PHSI = cPhsRule->u8PHSI;
- sPhsRule->u8PHSFLength = cPhsRule->u8PHSFLength;
- sPhsRule->u8PHSMLength = cPhsRule->u8PHSMLength;
- sPhsRule->u8PHSS = cPhsRule->u8PHSS;
- sPhsRule->u8PHSV = cPhsRule->u8PHSV;
- memcpy(sPhsRule->u8PHSF, cPhsRule->u8PHSF, MAX_PHS_LENGTHS);
- memcpy(sPhsRule->u8PHSM, cPhsRule->u8PHSM, MAX_PHS_LENGTHS);
- sPhsRule->u8RefCnt = 0;
- sPhsRule->bUnclassifiedPHSRule = false;
- sPhsRule->PHSModifiedBytes = 0;
- sPhsRule->PHSModifiedNumPackets = 0;
- sPhsRule->PHSErrorNumPackets = 0;
-
- /* bPHSRuleAssociated = TRUE; */
- /* Store The PHS Rule for this classifier */
-
- PhsUpdateClassifierRule(
- &Adapter->stBCMPhsContext,
- uVCID,
- curr_classifier->uiClassifierRuleIndex,
- sPhsRule,
- curr_classifier->u8AssociatedPHSI);
-
- /* Update PHS Rule For the Classifier */
- if (sPhsRule->u8PHSI) {
- curr_classifier->u32PHSRuleID = sPhsRule->u8PHSI;
- memcpy(&curr_classifier->sPhsRule, sPhsRule, sizeof(struct bcm_phs_rule));
- }
- }
- }
- } else {
- /* Error PHS Rule specified in signaling could not be applied to any classifier */
-
- /* Copy the PHS Rule */
- sPhsRule->u8PHSI = cPhsRule->u8PHSI;
- sPhsRule->u8PHSFLength = cPhsRule->u8PHSFLength;
- sPhsRule->u8PHSMLength = cPhsRule->u8PHSMLength;
- sPhsRule->u8PHSS = cPhsRule->u8PHSS;
- sPhsRule->u8PHSV = cPhsRule->u8PHSV;
- memcpy(sPhsRule->u8PHSF, cPhsRule->u8PHSF, MAX_PHS_LENGTHS);
- memcpy(sPhsRule->u8PHSM, cPhsRule->u8PHSM, MAX_PHS_LENGTHS);
- sPhsRule->u8RefCnt = 0;
- sPhsRule->bUnclassifiedPHSRule = TRUE;
- sPhsRule->PHSModifiedBytes = 0;
- sPhsRule->PHSModifiedNumPackets = 0;
- sPhsRule->PHSErrorNumPackets = 0;
- /* Store The PHS Rule for this classifier */
-
- /*
- * Passing the argument u8PHSI instead of clsid. Because for DL with no classifier rule,
- * clsid will be zero hence we can't have multiple PHS rules for the same SF.
- * To support multiple PHS rule, passing u8PHSI.
- */
- PhsUpdateClassifierRule(
- &Adapter->stBCMPhsContext,
- uVCID,
- sPhsRule->u8PHSI,
- sPhsRule,
- sPhsRule->u8PHSI);
- }
-}
diff --git a/drivers/staging/bcm/CmHost.h b/drivers/staging/bcm/CmHost.h
deleted file mode 100644
index 0887d3f49e2..00000000000
--- a/drivers/staging/bcm/CmHost.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/***************************************************************************
- * (c) Beceem Communications Inc.
- * All Rights Reserved
- *
- * file : CmHost.h
- * author: Rajeev Tirumala
- * date : September 8 , 2006
- * brief : Definitions for Connection Management Requests structure
- * which we will use to setup our connection structures.Its high
- * time we had a header file for CmHost.cpp to isolate the way
- * f/w sends DSx messages and the way we interpret them in code.
- * Revision History
- *
- * Date Author Version Description
- * 08-Sep-06 Rajeev 0.1 Created
- ***************************************************************************/
-#ifndef _CM_HOST_H
-#define _CM_HOST_H
-
-#pragma once
-#pragma pack(push, 4)
-
-#define DSX_MESSAGE_EXCHANGE_BUFFER 0xBF60AC84 /* This contains the pointer */
-#define DSX_MESSAGE_EXCHANGE_BUFFER_SIZE 72000 /* 24 K Bytes */
-
-struct bcm_add_indication_alt {
- u8 u8Type;
- u8 u8Direction;
- u16 u16TID;
- u16 u16CID;
- u16 u16VCID;
- struct bcm_connect_mgr_params sfAuthorizedSet;
- struct bcm_connect_mgr_params sfAdmittedSet;
- struct bcm_connect_mgr_params sfActiveSet;
- u8 u8CC; /* < Confirmation Code */
- u8 u8Padd;
- u16 u16Padd;
-};
-
-struct bcm_change_indication {
- u8 u8Type;
- u8 u8Direction;
- u16 u16TID;
- u16 u16CID;
- u16 u16VCID;
- struct bcm_connect_mgr_params sfAuthorizedSet;
- struct bcm_connect_mgr_params sfAdmittedSet;
- struct bcm_connect_mgr_params sfActiveSet;
- u8 u8CC; /* < Confirmation Code */
- u8 u8Padd;
- u16 u16Padd;
-};
-
-unsigned long StoreCmControlResponseMessage(struct bcm_mini_adapter *Adapter, void *pvBuffer, unsigned int *puBufferLength);
-int AllocAdapterDsxBuffer(struct bcm_mini_adapter *Adapter);
-int FreeAdapterDsxBuffer(struct bcm_mini_adapter *Adapter);
-unsigned long SetUpTargetDsxBuffers(struct bcm_mini_adapter *Adapter);
-bool CmControlResponseMessage(struct bcm_mini_adapter *Adapter, void *pvBuffer);
-
-#pragma pack(pop)
-
-#endif
diff --git a/drivers/staging/bcm/DDRInit.c b/drivers/staging/bcm/DDRInit.c
deleted file mode 100644
index 4226c931cd4..00000000000
--- a/drivers/staging/bcm/DDRInit.c
+++ /dev/null
@@ -1,1355 +0,0 @@
-#include "headers.h"
-
-
-
-#define DDR_DUMP_INTERNAL_DEVICE_MEMORY 0xBFC02B00
-#define MIPS_CLOCK_REG 0x0f000820
-
-/* DDR INIT-133Mhz */
-#define T3_SKIP_CLOCK_PROGRAM_DUMP_133MHZ 12 /* index for 0x0F007000 */
-static struct bcm_ddr_setting asT3_DDRSetting133MHz[] = {
- /* DPLL Clock Setting */
- {0x0F000800, 0x00007212},
- {0x0f000820, 0x07F13FFF},
- {0x0f000810, 0x00000F95},
- {0x0f000860, 0x00000000},
- {0x0f000880, 0x000003DD},
- /* Changed source for X-bar and MIPS clock to APLL */
- {0x0f000840, 0x0FFF1B00},
- {0x0f000870, 0x00000002},
- {0x0F00a044, 0x1fffffff},
- {0x0F00a040, 0x1f000000},
- {0x0F00a084, 0x1Cffffff},
- {0x0F00a080, 0x1C000000},
- {0x0F00a04C, 0x0000000C},
- /* Memcontroller Default values */
- {0x0F007000, 0x00010001},
- {0x0F007004, 0x01010100},
- {0x0F007008, 0x01000001},
- {0x0F00700c, 0x00000000},
- {0x0F007010, 0x01000000},
- {0x0F007014, 0x01000100},
- {0x0F007018, 0x01000000},
- {0x0F00701c, 0x01020001},
- {0x0F007020, 0x04030107},
- {0x0F007024, 0x02000007},
- {0x0F007028, 0x02020202},
- {0x0F00702c, 0x0206060a},
- {0x0F007030, 0x05000000},
- {0x0F007034, 0x00000003},
- {0x0F007038, 0x110a0200},
- {0x0F00703C, 0x02101010},
- {0x0F007040, 0x45751200},
- {0x0F007044, 0x110a0d00},
- {0x0F007048, 0x081b0306},
- {0x0F00704c, 0x00000000},
- {0x0F007050, 0x0000001c},
- {0x0F007054, 0x00000000},
- {0x0F007058, 0x00000000},
- {0x0F00705c, 0x00000000},
- {0x0F007060, 0x0010246c},
- {0x0F007064, 0x00000010},
- {0x0F007068, 0x00000000},
- {0x0F00706c, 0x00000001},
- {0x0F007070, 0x00007000},
- {0x0F007074, 0x00000000},
- {0x0F007078, 0x00000000},
- {0x0F00707C, 0x00000000},
- {0x0F007080, 0x00000000},
- {0x0F007084, 0x00000000},
- /* Enable BW improvement within memory controller */
- {0x0F007094, 0x00000104},
- /* Enable 2 ports within X-bar */
- {0x0F00A000, 0x00000016},
- /* Enable start bit within memory controller */
- {0x0F007018, 0x01010000}
-};
-/* 80Mhz */
-#define T3_SKIP_CLOCK_PROGRAM_DUMP_80MHZ 10 /* index for 0x0F007000 */
-static struct bcm_ddr_setting asT3_DDRSetting80MHz[] = {
- /* DPLL Clock Setting */
- {0x0f000810, 0x00000F95},
- {0x0f000820, 0x07f1ffff},
- {0x0f000860, 0x00000000},
- {0x0f000880, 0x000003DD},
- {0x0F00a044, 0x1fffffff},
- {0x0F00a040, 0x1f000000},
- {0x0F00a084, 0x1Cffffff},
- {0x0F00a080, 0x1C000000},
- {0x0F00a000, 0x00000016},
- {0x0F00a04C, 0x0000000C},
- /* Memcontroller Default values */
- {0x0F007000, 0x00010001},
- {0x0F007004, 0x01000000},
- {0x0F007008, 0x01000001},
- {0x0F00700c, 0x00000000},
- {0x0F007010, 0x01000000},
- {0x0F007014, 0x01000100},
- {0x0F007018, 0x01000000},
- {0x0F00701c, 0x01020000},
- {0x0F007020, 0x04020107},
- {0x0F007024, 0x00000007},
- {0x0F007028, 0x02020201},
- {0x0F00702c, 0x0204040a},
- {0x0F007030, 0x04000000},
- {0x0F007034, 0x00000002},
- {0x0F007038, 0x1F060200},
- {0x0F00703C, 0x1C22221F},
- {0x0F007040, 0x8A006600},
- {0x0F007044, 0x221a0800},
- {0x0F007048, 0x02690204},
- {0x0F00704c, 0x00000000},
- {0x0F007050, 0x0000001c},
- {0x0F007054, 0x00000000},
- {0x0F007058, 0x00000000},
- {0x0F00705c, 0x00000000},
- {0x0F007060, 0x000A15D6},
- {0x0F007064, 0x0000000A},
- {0x0F007068, 0x00000000},
- {0x0F00706c, 0x00000001},
- {0x0F007070, 0x00004000},
- {0x0F007074, 0x00000000},
- {0x0F007078, 0x00000000},
- {0x0F00707C, 0x00000000},
- {0x0F007080, 0x00000000},
- {0x0F007084, 0x00000000},
- {0x0F007094, 0x00000104},
- /* Enable start bit within memory controller */
- {0x0F007018, 0x01010000}
-};
-/* 100Mhz */
-#define T3_SKIP_CLOCK_PROGRAM_DUMP_100MHZ 13 /* index for 0x0F007000 */
-static struct bcm_ddr_setting asT3_DDRSetting100MHz[] = {
- /* DPLL Clock Setting */
- {0x0F000800, 0x00007008},
- {0x0f000810, 0x00000F95},
- {0x0f000820, 0x07F13E3F},
- {0x0f000860, 0x00000000},
- {0x0f000880, 0x000003DD},
- /* Changed source for X-bar and MIPS clock to APLL */
- {0x0f000840, 0x0FFF1B00},
- {0x0f000870, 0x00000002},
- {0x0F00a044, 0x1fffffff},
- {0x0F00a040, 0x1f000000},
- {0x0F00a084, 0x1Cffffff},
- {0x0F00a080, 0x1C000000},
- {0x0F00a04C, 0x0000000C},
- /* Enable 2 ports within X-bar */
- {0x0F00A000, 0x00000016},
- /* Memcontroller Default values */
- {0x0F007000, 0x00010001},
- {0x0F007004, 0x01010100},
- {0x0F007008, 0x01000001},
- {0x0F00700c, 0x00000000},
- {0x0F007010, 0x01000000},
- {0x0F007014, 0x01000100},
- {0x0F007018, 0x01000000},
- {0x0F00701c, 0x01020001},
- {0x0F007020, 0x04020107},
- {0x0F007024, 0x00000007},
- {0x0F007028, 0x01020201},
- {0x0F00702c, 0x0204040A},
- {0x0F007030, 0x06000000},
- {0x0F007034, 0x00000004},
- {0x0F007038, 0x20080200},
- {0x0F00703C, 0x02030320},
- {0x0F007040, 0x6E7F1200},
- {0x0F007044, 0x01190A00},
- {0x0F007048, 0x06120305},
- {0x0F00704c, 0x00000000},
- {0x0F007050, 0x0000001C},
- {0x0F007054, 0x00000000},
- {0x0F007058, 0x00000000},
- {0x0F00705c, 0x00000000},
- {0x0F007060, 0x00082ED6},
- {0x0F007064, 0x0000000A},
- {0x0F007068, 0x00000000},
- {0x0F00706c, 0x00000001},
- {0x0F007070, 0x00005000},
- {0x0F007074, 0x00000000},
- {0x0F007078, 0x00000000},
- {0x0F00707C, 0x00000000},
- {0x0F007080, 0x00000000},
- {0x0F007084, 0x00000000},
- /* Enable BW improvement within memory controller */
- {0x0F007094, 0x00000104},
- /* Enable start bit within memory controller */
- {0x0F007018, 0x01010000}
-};
-
-/* Net T3B DDR Settings
- * DDR INIT-133Mhz
- */
-static struct bcm_ddr_setting asDPLL_266MHZ[] = {
- {0x0F000800, 0x00007212},
- {0x0f000820, 0x07F13FFF},
- {0x0f000810, 0x00000F95},
- {0x0f000860, 0x00000000},
- {0x0f000880, 0x000003DD},
- /* Changed source for X-bar and MIPS clock to APLL */
- {0x0f000840, 0x0FFF1B00},
- {0x0f000870, 0x00000002}
-};
-
-#define T3B_SKIP_CLOCK_PROGRAM_DUMP_133MHZ 11 /* index for 0x0F007000 */
-static struct bcm_ddr_setting asT3B_DDRSetting133MHz[] = {
- /* DPLL Clock Setting */
- {0x0f000810, 0x00000F95},
- {0x0f000810, 0x00000F95},
- {0x0f000810, 0x00000F95},
- {0x0f000820, 0x07F13652},
- {0x0f000840, 0x0FFF0800},
- /* Changed source for X-bar and MIPS clock to APLL */
- {0x0f000880, 0x000003DD},
- {0x0f000860, 0x00000000},
- /* Changed source for X-bar and MIPS clock to APLL */
- {0x0F00a044, 0x1fffffff},
- {0x0F00a040, 0x1f000000},
- {0x0F00a084, 0x1Cffffff},
- {0x0F00a080, 0x1C000000},
- /* Enable 2 ports within X-bar */
- {0x0F00A000, 0x00000016},
- /* Memcontroller Default values */
- {0x0F007000, 0x00010001},
- {0x0F007004, 0x01010100},
- {0x0F007008, 0x01000001},
- {0x0F00700c, 0x00000000},
- {0x0F007010, 0x01000000},
- {0x0F007014, 0x01000100},
- {0x0F007018, 0x01000000},
- {0x0F00701c, 0x01020001},
- {0x0F007020, 0x04030107},
- {0x0F007024, 0x02000007},
- {0x0F007028, 0x02020202},
- {0x0F00702c, 0x0206060a},
- {0x0F007030, 0x05000000},
- {0x0F007034, 0x00000003},
- {0x0F007038, 0x130a0200},
- {0x0F00703C, 0x02101012},
- {0x0F007040, 0x457D1200},
- {0x0F007044, 0x11130d00},
- {0x0F007048, 0x040D0306},
- {0x0F00704c, 0x00000000},
- {0x0F007050, 0x0000001c},
- {0x0F007054, 0x00000000},
- {0x0F007058, 0x00000000},
- {0x0F00705c, 0x00000000},
- {0x0F007060, 0x0010246c},
- {0x0F007064, 0x00000012},
- {0x0F007068, 0x00000000},
- {0x0F00706c, 0x00000001},
- {0x0F007070, 0x00007000},
- {0x0F007074, 0x00000000},
- {0x0F007078, 0x00000000},
- {0x0F00707C, 0x00000000},
- {0x0F007080, 0x00000000},
- {0x0F007084, 0x00000000},
- /* Enable BW improvement within memory controller */
- {0x0F007094, 0x00000104},
- /* Enable start bit within memory controller */
- {0x0F007018, 0x01010000},
- };
-
-#define T3B_SKIP_CLOCK_PROGRAM_DUMP_80MHZ 9 /* index for 0x0F007000 */
-static struct bcm_ddr_setting asT3B_DDRSetting80MHz[] = {
- /* DPLL Clock Setting */
- {0x0f000810, 0x00000F95},
- {0x0f000820, 0x07F13FFF},
- {0x0f000840, 0x0FFF1F00},
- {0x0f000880, 0x000003DD},
- {0x0f000860, 0x00000000},
-
- {0x0F00a044, 0x1fffffff},
- {0x0F00a040, 0x1f000000},
- {0x0F00a084, 0x1Cffffff},
- {0x0F00a080, 0x1C000000},
- {0x0F00a000, 0x00000016},
- /* Memcontroller Default values */
- {0x0F007000, 0x00010001},
- {0x0F007004, 0x01000000},
- {0x0F007008, 0x01000001},
- {0x0F00700c, 0x00000000},
- {0x0F007010, 0x01000000},
- {0x0F007014, 0x01000100},
- {0x0F007018, 0x01000000},
- {0x0F00701c, 0x01020000},
- {0x0F007020, 0x04020107},
- {0x0F007024, 0x00000007},
- {0x0F007028, 0x02020201},
- {0x0F00702c, 0x0204040a},
- {0x0F007030, 0x04000000},
- {0x0F007034, 0x02000002},
- {0x0F007038, 0x1F060202},
- {0x0F00703C, 0x1C22221F},
- {0x0F007040, 0x8A006600},
- {0x0F007044, 0x221a0800},
- {0x0F007048, 0x02690204},
- {0x0F00704c, 0x00000000},
- {0x0F007050, 0x0100001c},
- {0x0F007054, 0x00000000},
- {0x0F007058, 0x00000000},
- {0x0F00705c, 0x00000000},
- {0x0F007060, 0x000A15D6},
- {0x0F007064, 0x0000000A},
- {0x0F007068, 0x00000000},
- {0x0F00706c, 0x00000001},
- {0x0F007070, 0x00004000},
- {0x0F007074, 0x00000000},
- {0x0F007078, 0x00000000},
- {0x0F00707C, 0x00000000},
- {0x0F007080, 0x00000000},
- {0x0F007084, 0x00000000},
- {0x0F007094, 0x00000104},
- /* Enable start bit within memory controller */
- {0x0F007018, 0x01010000}
-};
-
-/* 100Mhz */
-#define T3B_SKIP_CLOCK_PROGRAM_DUMP_100MHZ 9 /* index for 0x0F007000 */
-static struct bcm_ddr_setting asT3B_DDRSetting100MHz[] = {
- /* DPLL Clock Setting */
- {0x0f000810, 0x00000F95},
- {0x0f000820, 0x07F1369B},
- {0x0f000840, 0x0FFF0800},
- {0x0f000880, 0x000003DD},
- {0x0f000860, 0x00000000},
- {0x0F00a044, 0x1fffffff},
- {0x0F00a040, 0x1f000000},
- {0x0F00a084, 0x1Cffffff},
- {0x0F00a080, 0x1C000000},
- /* Enable 2 ports within X-bar */
- {0x0F00A000, 0x00000016},
- /* Memcontroller Default values */
- {0x0F007000, 0x00010001},
- {0x0F007004, 0x01010100},
- {0x0F007008, 0x01000001},
- {0x0F00700c, 0x00000000},
- {0x0F007010, 0x01000000},
- {0x0F007014, 0x01000100},
- {0x0F007018, 0x01000000},
- {0x0F00701c, 0x01020000},
- {0x0F007020, 0x04020107},
- {0x0F007024, 0x00000007},
- {0x0F007028, 0x01020201},
- {0x0F00702c, 0x0204040A},
- {0x0F007030, 0x06000000},
- {0x0F007034, 0x02000004},
- {0x0F007038, 0x20080200},
- {0x0F00703C, 0x02030320},
- {0x0F007040, 0x6E7F1200},
- {0x0F007044, 0x01190A00},
- {0x0F007048, 0x06120305},
- {0x0F00704c, 0x00000000},
- {0x0F007050, 0x0100001C},
- {0x0F007054, 0x00000000},
- {0x0F007058, 0x00000000},
- {0x0F00705c, 0x00000000},
- {0x0F007060, 0x00082ED6},
- {0x0F007064, 0x0000000A},
- {0x0F007068, 0x00000000},
- {0x0F00706c, 0x00000001},
- {0x0F007070, 0x00005000},
- {0x0F007074, 0x00000000},
- {0x0F007078, 0x00000000},
- {0x0F00707C, 0x00000000},
- {0x0F007080, 0x00000000},
- {0x0F007084, 0x00000000},
- /* Enable BW improvement within memory controller */
- {0x0F007094, 0x00000104},
- /* Enable start bit within memory controller */
- {0x0F007018, 0x01010000}
-};
-
-
-#define T3LP_SKIP_CLOCK_PROGRAM_DUMP_133MHZ 9 /* index for 0x0F007000 */
-static struct bcm_ddr_setting asT3LP_DDRSetting133MHz[] = {
- /* DPLL Clock Setting */
- {0x0f000820, 0x03F1365B},
- {0x0f000810, 0x00002F95},
- {0x0f000880, 0x000003DD},
- /* Changed source for X-bar and MIPS clock to APLL */
- {0x0f000840, 0x0FFF0000},
- {0x0f000860, 0x00000000},
- {0x0F00a044, 0x1fffffff},
- {0x0F00a040, 0x1f000000},
- {0x0F00a084, 0x1Cffffff},
- {0x0F00a080, 0x1C000000},
- {0x0F00A000, 0x00000016},
- /* Memcontroller Default values */
- {0x0F007000, 0x00010001},
- {0x0F007004, 0x01010100},
- {0x0F007008, 0x01000001},
- {0x0F00700c, 0x00000000},
- {0x0F007010, 0x01000000},
- {0x0F007014, 0x01000100},
- {0x0F007018, 0x01000000},
- {0x0F00701c, 0x01020001},
- {0x0F007020, 0x04030107},
- {0x0F007024, 0x02000007},
- {0x0F007028, 0x02020200},
- {0x0F00702c, 0x0206060a},
- {0x0F007030, 0x05000000},
- {0x0F007034, 0x00000003},
- {0x0F007038, 0x200a0200},
- {0x0F00703C, 0x02101020},
- {0x0F007040, 0x45711200},
- {0x0F007044, 0x110D0D00},
- {0x0F007048, 0x04080306},
- {0x0F00704c, 0x00000000},
- {0x0F007050, 0x0100001c},
- {0x0F007054, 0x00000000},
- {0x0F007058, 0x00000000},
- {0x0F00705c, 0x00000000},
- {0x0F007060, 0x0010245F},
- {0x0F007064, 0x00000010},
- {0x0F007068, 0x00000000},
- {0x0F00706c, 0x00000001},
- {0x0F007070, 0x00007000},
- {0x0F007074, 0x00000000},
- {0x0F007078, 0x00000000},
- {0x0F00707C, 0x00000000},
- {0x0F007080, 0x00000000},
- {0x0F007084, 0x00000000},
- {0x0F007088, 0x01000001},
- {0x0F00708c, 0x00000101},
- {0x0F007090, 0x00000000},
- /* Enable BW improvement within memory controller */
- {0x0F007094, 0x00040000},
- {0x0F007098, 0x00000000},
- {0x0F0070c8, 0x00000104},
- /* Enable 2 ports within X-bar */
- /* Enable start bit within memory controller */
- {0x0F007018, 0x01010000}
-};
-
-#define T3LP_SKIP_CLOCK_PROGRAM_DUMP_100MHZ 11 /* index for 0x0F007000 */
-static struct bcm_ddr_setting asT3LP_DDRSetting100MHz[] = {
- /* DPLL Clock Setting */
- {0x0f000810, 0x00002F95},
- {0x0f000820, 0x03F1369B},
- {0x0f000840, 0x0fff0000},
- {0x0f000860, 0x00000000},
- {0x0f000880, 0x000003DD},
- /* Changed source for X-bar and MIPS clock to APLL */
- {0x0f000840, 0x0FFF0000},
- {0x0F00a044, 0x1fffffff},
- {0x0F00a040, 0x1f000000},
- {0x0F00a084, 0x1Cffffff},
- {0x0F00a080, 0x1C000000},
- /* Memcontroller Default values */
- {0x0F007000, 0x00010001},
- {0x0F007004, 0x01010100},
- {0x0F007008, 0x01000001},
- {0x0F00700c, 0x00000000},
- {0x0F007010, 0x01000000},
- {0x0F007014, 0x01000100},
- {0x0F007018, 0x01000000},
- {0x0F00701c, 0x01020000},
- {0x0F007020, 0x04020107},
- {0x0F007024, 0x00000007},
- {0x0F007028, 0x01020200},
- {0x0F00702c, 0x0204040a},
- {0x0F007030, 0x06000000},
- {0x0F007034, 0x00000004},
- {0x0F007038, 0x1F080200},
- {0x0F00703C, 0x0203031F},
- {0x0F007040, 0x6e001200},
- {0x0F007044, 0x011a0a00},
- {0x0F007048, 0x03000305},
- {0x0F00704c, 0x00000000},
- {0x0F007050, 0x0100001c},
- {0x0F007054, 0x00000000},
- {0x0F007058, 0x00000000},
- {0x0F00705c, 0x00000000},
- {0x0F007060, 0x00082ED6},
- {0x0F007064, 0x0000000A},
- {0x0F007068, 0x00000000},
- {0x0F00706c, 0x00000001},
- {0x0F007070, 0x00005000},
- {0x0F007074, 0x00000000},
- {0x0F007078, 0x00000000},
- {0x0F00707C, 0x00000000},
- {0x0F007080, 0x00000000},
- {0x0F007084, 0x00000000},
- {0x0F007088, 0x01000001},
- {0x0F00708c, 0x00000101},
- {0x0F007090, 0x00000000},
- {0x0F007094, 0x00010000},
- {0x0F007098, 0x00000000},
- {0x0F0070C8, 0x00000104},
- /* Enable 2 ports within X-bar */
- {0x0F00A000, 0x00000016},
- /* Enable start bit within memory controller */
- {0x0F007018, 0x01010000}
-};
-
-#define T3LP_SKIP_CLOCK_PROGRAM_DUMP_80MHZ 9 /* index for 0x0F007000 */
-static struct bcm_ddr_setting asT3LP_DDRSetting80MHz[] = {
- /* DPLL Clock Setting */
- {0x0f000820, 0x07F13FFF},
- {0x0f000810, 0x00002F95},
- {0x0f000860, 0x00000000},
- {0x0f000880, 0x000003DD},
- {0x0f000840, 0x0FFF1F00},
- {0x0F00a044, 0x1fffffff},
- {0x0F00a040, 0x1f000000},
- {0x0F00a084, 0x1Cffffff},
- {0x0F00a080, 0x1C000000},
- {0x0F00A000, 0x00000016},
- {0x0f007000, 0x00010001},
- {0x0f007004, 0x01000000},
- {0x0f007008, 0x01000001},
- {0x0f00700c, 0x00000000},
- {0x0f007010, 0x01000000},
- {0x0f007014, 0x01000100},
- {0x0f007018, 0x01000000},
- {0x0f00701c, 0x01020000},
- {0x0f007020, 0x04020107},
- {0x0f007024, 0x00000007},
- {0x0f007028, 0x02020200},
- {0x0f00702c, 0x0204040a},
- {0x0f007030, 0x04000000},
- {0x0f007034, 0x00000002},
- {0x0f007038, 0x1d060200},
- {0x0f00703c, 0x1c22221d},
- {0x0f007040, 0x8A116600},
- {0x0f007044, 0x222d0800},
- {0x0f007048, 0x02690204},
- {0x0f00704c, 0x00000000},
- {0x0f007050, 0x0100001c},
- {0x0f007054, 0x00000000},
- {0x0f007058, 0x00000000},
- {0x0f00705c, 0x00000000},
- {0x0f007060, 0x000A15D6},
- {0x0f007064, 0x0000000A},
- {0x0f007068, 0x00000000},
- {0x0f00706c, 0x00000001},
- {0x0f007070, 0x00004000},
- {0x0f007074, 0x00000000},
- {0x0f007078, 0x00000000},
- {0x0f00707c, 0x00000000},
- {0x0f007080, 0x00000000},
- {0x0f007084, 0x00000000},
- {0x0f007088, 0x01000001},
- {0x0f00708c, 0x00000101},
- {0x0f007090, 0x00000000},
- {0x0f007094, 0x00010000},
- {0x0f007098, 0x00000000},
- {0x0F0070C8, 0x00000104},
- {0x0F007018, 0x01010000}
-};
-
-
-
-
-/* T3 LP-B (UMA-B) */
-
-#define T3LPB_SKIP_CLOCK_PROGRAM_DUMP_160MHZ 7 /* index for 0x0F007000 */
-static struct bcm_ddr_setting asT3LPB_DDRSetting160MHz[] = {
- /* DPLL Clock Setting */
- {0x0f000820, 0x03F137DB},
- {0x0f000810, 0x01842795},
- {0x0f000860, 0x00000000},
- {0x0f000880, 0x000003DD},
- {0x0f000840, 0x0FFF0400},
- {0x0F00a044, 0x1fffffff},
- {0x0F00a040, 0x1f000000},
- {0x0f003050, 0x00000021}, /* this is flash/eeprom clock divisor which
- * set the flash clock to 20 MHz */
- {0x0F00a084, 0x1Cffffff}, /* Now dump from her in internal memory */
- {0x0F00a080, 0x1C000000},
- {0x0F00A000, 0x00000016},
- {0x0f007000, 0x00010001},
- {0x0f007004, 0x01000001},
- {0x0f007008, 0x01000101},
- {0x0f00700c, 0x00000000},
- {0x0f007010, 0x01000100},
- {0x0f007014, 0x01000100},
- {0x0f007018, 0x01000000},
- {0x0f00701c, 0x01020000},
- {0x0f007020, 0x04030107},
- {0x0f007024, 0x02000007},
- {0x0f007028, 0x02020200},
- {0x0f00702c, 0x0206060a},
- {0x0f007030, 0x050d0d00},
- {0x0f007034, 0x00000003},
- {0x0f007038, 0x170a0200},
- {0x0f00703c, 0x02101012},
- {0x0f007040, 0x45161200},
- {0x0f007044, 0x11250c00},
- {0x0f007048, 0x04da0307},
- {0x0f00704c, 0x00000000},
- {0x0f007050, 0x0000001c},
- {0x0f007054, 0x00000000},
- {0x0f007058, 0x00000000},
- {0x0f00705c, 0x00000000},
- {0x0f007060, 0x00142bb6},
- {0x0f007064, 0x20430014},
- {0x0f007068, 0x00000000},
- {0x0f00706c, 0x00000001},
- {0x0f007070, 0x00009000},
- {0x0f007074, 0x00000000},
- {0x0f007078, 0x00000000},
- {0x0f00707c, 0x00000000},
- {0x0f007080, 0x00000000},
- {0x0f007084, 0x00000000},
- {0x0f007088, 0x01000001},
- {0x0f00708c, 0x00000101},
- {0x0f007090, 0x00000000},
- {0x0f007094, 0x00040000},
- {0x0f007098, 0x00000000},
- {0x0F0070C8, 0x00000104},
- {0x0F007018, 0x01010000}
-};
-
-
-#define T3LPB_SKIP_CLOCK_PROGRAM_DUMP_133MHZ 7 /* index for 0x0F007000 */
-static struct bcm_ddr_setting asT3LPB_DDRSetting133MHz[] = {
- /* DPLL Clock Setting */
- {0x0f000820, 0x03F1365B},
- {0x0f000810, 0x00002F95},
- {0x0f000880, 0x000003DD},
- /* Changed source for X-bar and MIPS clock to APLL */
- {0x0f000840, 0x0FFF0000},
- {0x0f000860, 0x00000000},
- {0x0F00a044, 0x1fffffff},
- {0x0F00a040, 0x1f000000},
- {0x0f003050, 0x00000021}, /* flash/eeprom clock divisor which
- * set the flash clock to 20 MHz */
- {0x0F00a084, 0x1Cffffff}, /* dump from here in internal memory */
- {0x0F00a080, 0x1C000000},
- {0x0F00A000, 0x00000016},
- /* Memcontroller Default values */
- {0x0F007000, 0x00010001},
- {0x0F007004, 0x01010100},
- {0x0F007008, 0x01000001},
- {0x0F00700c, 0x00000000},
- {0x0F007010, 0x01000000},
- {0x0F007014, 0x01000100},
- {0x0F007018, 0x01000000},
- {0x0F00701c, 0x01020001},
- {0x0F007020, 0x04030107},
- {0x0F007024, 0x02000007},
- {0x0F007028, 0x02020200},
- {0x0F00702c, 0x0206060a},
- {0x0F007030, 0x05000000},
- {0x0F007034, 0x00000003},
- {0x0F007038, 0x190a0200},
- {0x0F00703C, 0x02101017},
- {0x0F007040, 0x45171200},
- {0x0F007044, 0x11290D00},
- {0x0F007048, 0x04080306},
- {0x0F00704c, 0x00000000},
- {0x0F007050, 0x0100001c},
- {0x0F007054, 0x00000000},
- {0x0F007058, 0x00000000},
- {0x0F00705c, 0x00000000},
- {0x0F007060, 0x0010245F},
- {0x0F007064, 0x00000010},
- {0x0F007068, 0x00000000},
- {0x0F00706c, 0x00000001},
- {0x0F007070, 0x00007000},
- {0x0F007074, 0x00000000},
- {0x0F007078, 0x00000000},
- {0x0F00707C, 0x00000000},
- {0x0F007080, 0x00000000},
- {0x0F007084, 0x00000000},
- {0x0F007088, 0x01000001},
- {0x0F00708c, 0x00000101},
- {0x0F007090, 0x00000000},
- /* Enable BW improvement within memory controller */
- {0x0F007094, 0x00040000},
- {0x0F007098, 0x00000000},
- {0x0F0070c8, 0x00000104},
- /* Enable 2 ports within X-bar */
- /* Enable start bit within memory controller */
- {0x0F007018, 0x01010000}
-};
-
-#define T3LPB_SKIP_CLOCK_PROGRAM_DUMP_100MHZ 8 /* index for 0x0F007000 */
-static struct bcm_ddr_setting asT3LPB_DDRSetting100MHz[] = {
- /* DPLL Clock Setting */
- {0x0f000810, 0x00002F95},
- {0x0f000820, 0x03F1369B},
- {0x0f000840, 0x0fff0000},
- {0x0f000860, 0x00000000},
- {0x0f000880, 0x000003DD},
- /* Changed source for X-bar and MIPS clock to APLL */
- {0x0f000840, 0x0FFF0000},
- {0x0F00a044, 0x1fffffff},
- {0x0F00a040, 0x1f000000},
- {0x0f003050, 0x00000021}, /* flash/eeprom clock divisor which
- * set the flash clock to 20 MHz */
- {0x0F00a084, 0x1Cffffff}, /* dump from here in internal memory */
- {0x0F00a080, 0x1C000000},
- /* Memcontroller Default values */
- {0x0F007000, 0x00010001},
- {0x0F007004, 0x01010100},
- {0x0F007008, 0x01000001},
- {0x0F00700c, 0x00000000},
- {0x0F007010, 0x01000000},
- {0x0F007014, 0x01000100},
- {0x0F007018, 0x01000000},
- {0x0F00701c, 0x01020000},
- {0x0F007020, 0x04020107},
- {0x0F007024, 0x00000007},
- {0x0F007028, 0x01020200},
- {0x0F00702c, 0x0204040a},
- {0x0F007030, 0x06000000},
- {0x0F007034, 0x00000004},
- {0x0F007038, 0x1F080200},
- {0x0F00703C, 0x0203031F},
- {0x0F007040, 0x6e001200},
- {0x0F007044, 0x011a0a00},
- {0x0F007048, 0x03000305},
- {0x0F00704c, 0x00000000},
- {0x0F007050, 0x0100001c},
- {0x0F007054, 0x00000000},
- {0x0F007058, 0x00000000},
- {0x0F00705c, 0x00000000},
- {0x0F007060, 0x00082ED6},
- {0x0F007064, 0x0000000A},
- {0x0F007068, 0x00000000},
- {0x0F00706c, 0x00000001},
- {0x0F007070, 0x00005000},
- {0x0F007074, 0x00000000},
- {0x0F007078, 0x00000000},
- {0x0F00707C, 0x00000000},
- {0x0F007080, 0x00000000},
- {0x0F007084, 0x00000000},
- {0x0F007088, 0x01000001},
- {0x0F00708c, 0x00000101},
- {0x0F007090, 0x00000000},
- {0x0F007094, 0x00010000},
- {0x0F007098, 0x00000000},
- {0x0F0070C8, 0x00000104},
- /* Enable 2 ports within X-bar */
- {0x0F00A000, 0x00000016},
- /* Enable start bit within memory controller */
- {0x0F007018, 0x01010000}
-};
-
-#define T3LPB_SKIP_CLOCK_PROGRAM_DUMP_80MHZ 7 /* index for 0x0F007000 */
-static struct bcm_ddr_setting asT3LPB_DDRSetting80MHz[] = {
- /* DPLL Clock Setting */
- {0x0f000820, 0x07F13FFF},
- {0x0f000810, 0x00002F95},
- {0x0f000860, 0x00000000},
- {0x0f000880, 0x000003DD},
- {0x0f000840, 0x0FFF1F00},
- {0x0F00a044, 0x1fffffff},
- {0x0F00a040, 0x1f000000},
- {0x0f003050, 0x00000021}, /* flash/eeprom clock divisor
- * which set the flash clock to 20 MHz */
- {0x0F00a084, 0x1Cffffff}, /* dump from here in internal memory */
- {0x0F00a080, 0x1C000000},
- {0x0F00A000, 0x00000016},
- {0x0f007000, 0x00010001},
- {0x0f007004, 0x01000000},
- {0x0f007008, 0x01000001},
- {0x0f00700c, 0x00000000},
- {0x0f007010, 0x01000000},
- {0x0f007014, 0x01000100},
- {0x0f007018, 0x01000000},
- {0x0f00701c, 0x01020000},
- {0x0f007020, 0x04020107},
- {0x0f007024, 0x00000007},
- {0x0f007028, 0x02020200},
- {0x0f00702c, 0x0204040a},
- {0x0f007030, 0x04000000},
- {0x0f007034, 0x00000002},
- {0x0f007038, 0x1d060200},
- {0x0f00703c, 0x1c22221d},
- {0x0f007040, 0x8A116600},
- {0x0f007044, 0x222d0800},
- {0x0f007048, 0x02690204},
- {0x0f00704c, 0x00000000},
- {0x0f007050, 0x0100001c},
- {0x0f007054, 0x00000000},
- {0x0f007058, 0x00000000},
- {0x0f00705c, 0x00000000},
- {0x0f007060, 0x000A15D6},
- {0x0f007064, 0x0000000A},
- {0x0f007068, 0x00000000},
- {0x0f00706c, 0x00000001},
- {0x0f007070, 0x00004000},
- {0x0f007074, 0x00000000},
- {0x0f007078, 0x00000000},
- {0x0f00707c, 0x00000000},
- {0x0f007080, 0x00000000},
- {0x0f007084, 0x00000000},
- {0x0f007088, 0x01000001},
- {0x0f00708c, 0x00000101},
- {0x0f007090, 0x00000000},
- {0x0f007094, 0x00010000},
- {0x0f007098, 0x00000000},
- {0x0F0070C8, 0x00000104},
- {0x0F007018, 0x01010000}
-};
-
-
-int ddr_init(struct bcm_mini_adapter *Adapter)
-{
- struct bcm_ddr_setting *psDDRSetting = NULL;
- ULONG RegCount = 0;
- UINT value = 0;
- UINT uiResetValue = 0;
- UINT uiClockSetting = 0;
- int retval = STATUS_SUCCESS;
-
- switch (Adapter->chip_id) {
- case 0xbece3200:
- switch (Adapter->DDRSetting) {
- case DDR_80_MHZ:
- psDDRSetting = asT3LP_DDRSetting80MHz;
- RegCount = (sizeof(asT3LP_DDRSetting80MHz) /
- sizeof(struct bcm_ddr_setting));
- break;
- case DDR_100_MHZ:
- psDDRSetting = asT3LP_DDRSetting100MHz;
- RegCount = (sizeof(asT3LP_DDRSetting100MHz) /
- sizeof(struct bcm_ddr_setting));
- break;
- case DDR_133_MHZ:
- psDDRSetting = asT3LP_DDRSetting133MHz;
- RegCount = (sizeof(asT3LP_DDRSetting133MHz) /
- sizeof(struct bcm_ddr_setting));
- if (Adapter->bMipsConfig == MIPS_200_MHZ)
- uiClockSetting = 0x03F13652;
- else
- uiClockSetting = 0x03F1365B;
- break;
- default:
- return -EINVAL;
- }
-
- break;
- case T3LPB:
- case BCS220_2:
- case BCS220_2BC:
- case BCS250_BC:
- case BCS220_3:
- /* Set bit 2 and bit 6 to 1 for BBIC 2mA drive
- * (please check current value and additionally set these bits)
- */
- if ((Adapter->chip_id != BCS220_2) &&
- (Adapter->chip_id != BCS220_2BC) &&
- (Adapter->chip_id != BCS220_3)) {
- retval = rdmalt(Adapter, (UINT)0x0f000830, &uiResetValue,
- sizeof(uiResetValue));
- if (retval < 0) {
- BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL,
- "%s:%d RDM failed\n",
- __func__, __LINE__);
- return retval;
- }
- uiResetValue |= 0x44;
- retval = wrmalt(Adapter, (UINT)0x0f000830, &uiResetValue,
- sizeof(uiResetValue));
- if (retval < 0) {
- BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL,
- "%s:%d RDM failed\n",
- __func__, __LINE__);
- return retval;
- }
- }
- switch (Adapter->DDRSetting) {
-
-
-
- case DDR_80_MHZ:
- psDDRSetting = asT3LPB_DDRSetting80MHz;
- RegCount = (sizeof(asT3B_DDRSetting80MHz) /
- sizeof(struct bcm_ddr_setting));
- break;
- case DDR_100_MHZ:
- psDDRSetting = asT3LPB_DDRSetting100MHz;
- RegCount = (sizeof(asT3B_DDRSetting100MHz) /
- sizeof(struct bcm_ddr_setting));
- break;
- case DDR_133_MHZ:
- psDDRSetting = asT3LPB_DDRSetting133MHz;
- RegCount = (sizeof(asT3B_DDRSetting133MHz) /
- sizeof(struct bcm_ddr_setting));
-
- if (Adapter->bMipsConfig == MIPS_200_MHZ)
- uiClockSetting = 0x03F13652;
- else
- uiClockSetting = 0x03F1365B;
- break;
-
- case DDR_160_MHZ:
- psDDRSetting = asT3LPB_DDRSetting160MHz;
- RegCount = sizeof(asT3LPB_DDRSetting160MHz) /
- sizeof(struct bcm_ddr_setting);
-
- if (Adapter->bMipsConfig == MIPS_200_MHZ)
- uiClockSetting = 0x03F137D2;
- else
- uiClockSetting = 0x03F137DB;
- }
- break;
-
- case 0xbece0110:
- case 0xbece0120:
- case 0xbece0121:
- case 0xbece0130:
- case 0xbece0300:
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
- "DDR Setting: %x\n", Adapter->DDRSetting);
- switch (Adapter->DDRSetting) {
- case DDR_80_MHZ:
- psDDRSetting = asT3_DDRSetting80MHz;
- RegCount = (sizeof(asT3_DDRSetting80MHz) /
- sizeof(struct bcm_ddr_setting));
- break;
- case DDR_100_MHZ:
- psDDRSetting = asT3_DDRSetting100MHz;
- RegCount = (sizeof(asT3_DDRSetting100MHz) /
- sizeof(struct bcm_ddr_setting));
- break;
- case DDR_133_MHZ:
- psDDRSetting = asT3_DDRSetting133MHz;
- RegCount = (sizeof(asT3_DDRSetting133MHz) /
- sizeof(struct bcm_ddr_setting));
- break;
- default:
- return -EINVAL;
- }
- case 0xbece0310:
- {
- switch (Adapter->DDRSetting) {
- case DDR_80_MHZ:
- psDDRSetting = asT3B_DDRSetting80MHz;
- RegCount = (sizeof(asT3B_DDRSetting80MHz) /
- sizeof(struct bcm_ddr_setting));
- break;
- case DDR_100_MHZ:
- psDDRSetting = asT3B_DDRSetting100MHz;
- RegCount = (sizeof(asT3B_DDRSetting100MHz) /
- sizeof(struct bcm_ddr_setting));
- break;
- case DDR_133_MHZ:
-
- /* 266Mhz PLL selected. */
- if (Adapter->bDPLLConfig == PLL_266_MHZ) {
- memcpy(asT3B_DDRSetting133MHz, asDPLL_266MHZ,
- sizeof(asDPLL_266MHZ));
- psDDRSetting = asT3B_DDRSetting133MHz;
- RegCount = (sizeof(asT3B_DDRSetting133MHz) /
- sizeof(struct bcm_ddr_setting));
- } else {
- psDDRSetting = asT3B_DDRSetting133MHz;
- RegCount = (sizeof(asT3B_DDRSetting133MHz) /
- sizeof(struct bcm_ddr_setting));
- if (Adapter->bMipsConfig == MIPS_200_MHZ)
- uiClockSetting = 0x07F13652;
- else
- uiClockSetting = 0x07F1365B;
- }
- break;
- default:
- return -EINVAL;
- }
- break;
-
- }
- default:
- return -EINVAL;
- }
-
- value = 0;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
- "Register Count is =%lu\n", RegCount);
- while (RegCount && !retval) {
- if (uiClockSetting
- && psDDRSetting->ulRegAddress == MIPS_CLOCK_REG)
- value = uiClockSetting;
- else
- value = psDDRSetting->ulRegValue;
- retval = wrmalt(Adapter, psDDRSetting->ulRegAddress, &value,
- sizeof(value));
- if (STATUS_SUCCESS != retval) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
- "%s:%d\n", __func__, __LINE__);
- break;
- }
-
- RegCount--;
- psDDRSetting++;
- }
-
- if (Adapter->chip_id >= 0xbece3300) {
-
- mdelay(3);
- if ((Adapter->chip_id != BCS220_2) &&
- (Adapter->chip_id != BCS220_2BC) &&
- (Adapter->chip_id != BCS220_3)) {
- /* drive MDDR to half in case of UMA-B: */
- uiResetValue = 0x01010001;
- retval = wrmalt(Adapter, (UINT)0x0F007018,
- &uiResetValue, sizeof(uiResetValue));
- if (retval < 0) {
- BCM_DEBUG_PRINT(Adapter, CMHOST, RDM,
- DBG_LVL_ALL,
- "%s:%d RDM failed\n",
- __func__,
- __LINE__);
- return retval;
- }
- uiResetValue = 0x00040020;
- retval = wrmalt(Adapter, (UINT)0x0F007094,
- &uiResetValue, sizeof(uiResetValue));
- if (retval < 0) {
- BCM_DEBUG_PRINT(Adapter, CMHOST, RDM,
- DBG_LVL_ALL,
- "%s:%d RDM failed\n",
- __func__,
- __LINE__);
- return retval;
- }
- uiResetValue = 0x01020101;
- retval = wrmalt(Adapter, (UINT)0x0F00701c,
- &uiResetValue, sizeof(uiResetValue));
- if (retval < 0) {
- BCM_DEBUG_PRINT(Adapter, CMHOST, RDM,
- DBG_LVL_ALL,
- "%s:%d RDM failed\n",
- __func__,
- __LINE__);
- return retval;
- }
- uiResetValue = 0x01010000;
- retval = wrmalt(Adapter, (UINT)0x0F007018,
- &uiResetValue, sizeof(uiResetValue));
- if (retval < 0) {
- BCM_DEBUG_PRINT(Adapter, CMHOST, RDM,
- DBG_LVL_ALL,
- "%s:%d RDM failed\n",
- __func__,
- __LINE__);
- return retval;
- }
- }
- mdelay(3);
-
- /* DC/DC standby change...
- * This is to be done only for Hybrid PMU mode.
- * with the current h/w there is no way to detect this.
- * and since we dont have internal PMU lets do it under
- * UMA-B chip id. we will change this when we will have
- * internal PMU.
- */
- if (Adapter->PmuMode == HYBRID_MODE_7C) {
- retval = rdmalt(Adapter, (UINT)0x0f000c00,
- &uiResetValue, sizeof(uiResetValue));
- if (retval < 0) {
- BCM_DEBUG_PRINT(Adapter, CMHOST, RDM,
- DBG_LVL_ALL,
- "%s:%d RDM failed\n",
- __func__,
- __LINE__);
- return retval;
- }
- retval = rdmalt(Adapter, (UINT)0x0f000c00,
- &uiResetValue, sizeof(uiResetValue));
- if (retval < 0) {
- BCM_DEBUG_PRINT(Adapter, CMHOST, RDM,
- DBG_LVL_ALL,
- "%s:%d RDM failed\n",
- __func__,
- __LINE__);
- return retval;
- }
- uiResetValue = 0x1322a8;
- retval = wrmalt(Adapter, (UINT)0x0f000d1c,
- &uiResetValue, sizeof(uiResetValue));
- if (retval < 0) {
- BCM_DEBUG_PRINT(Adapter, CMHOST, RDM,
- DBG_LVL_ALL,
- "%s:%d RDM failed\n",
- __func__,
- __LINE__);
- return retval;
- }
- retval = rdmalt(Adapter, (UINT)0x0f000c00,
- &uiResetValue, sizeof(uiResetValue));
- if (retval < 0) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, RDM,
- DBG_LVL_ALL,
- "%s:%d RDM failed\n",
- __func__,
- __LINE__);
- return retval;
- }
- retval = rdmalt(Adapter, (UINT)0x0f000c00,
- &uiResetValue, sizeof(uiResetValue));
- if (retval < 0) {
- BCM_DEBUG_PRINT(Adapter, CMHOST, RDM,
- DBG_LVL_ALL,
- "%s:%d RDM failed\n",
- __func__,
- __LINE__);
- return retval;
- }
- uiResetValue = 0x132296;
- retval = wrmalt(Adapter, (UINT)0x0f000d14,
- &uiResetValue, sizeof(uiResetValue));
- if (retval < 0) {
- BCM_DEBUG_PRINT(Adapter, CMHOST, RDM,
- DBG_LVL_ALL,
- "%s:%d RDM failed\n",
- __func__,
- __LINE__);
- return retval;
- }
- } else if (Adapter->PmuMode == HYBRID_MODE_6) {
-
- retval = rdmalt(Adapter, (UINT)0x0f000c00,
- &uiResetValue, sizeof(uiResetValue));
- if (retval < 0) {
- BCM_DEBUG_PRINT(Adapter, CMHOST, RDM,
- DBG_LVL_ALL,
- "%s:%d RDM failed\n",
- __func__,
- __LINE__);
- return retval;
- }
- retval = rdmalt(Adapter, (UINT)0x0f000c00,
- &uiResetValue, sizeof(uiResetValue));
- if (retval < 0) {
- BCM_DEBUG_PRINT(Adapter, CMHOST, RDM,
- DBG_LVL_ALL,
- "%s:%d RDM failed\n",
- __func__,
- __LINE__);
- return retval;
- }
- uiResetValue = 0x6003229a;
- retval = wrmalt(Adapter, (UINT)0x0f000d14,
- &uiResetValue, sizeof(uiResetValue));
- if (retval < 0) {
- BCM_DEBUG_PRINT(Adapter, CMHOST, RDM,
- DBG_LVL_ALL,
- "%s:%d RDM failed\n",
- __func__,
- __LINE__);
- return retval;
- }
- retval = rdmalt(Adapter, (UINT)0x0f000c00,
- &uiResetValue, sizeof(uiResetValue));
- if (retval < 0) {
- BCM_DEBUG_PRINT(Adapter, CMHOST, RDM,
- DBG_LVL_ALL,
- "%s:%d RDM failed\n",
- __func__,
- __LINE__);
- return retval;
- }
- retval = rdmalt(Adapter, (UINT)0x0f000c00,
- &uiResetValue, sizeof(uiResetValue));
- if (retval < 0) {
- BCM_DEBUG_PRINT(Adapter, CMHOST, RDM,
- DBG_LVL_ALL,
- "%s:%d RDM failed\n",
- __func__,
- __LINE__);
- return retval;
- }
- uiResetValue = 0x1322a8;
- retval = wrmalt(Adapter, (UINT)0x0f000d1c,
- &uiResetValue, sizeof(uiResetValue));
- if (retval < 0) {
- BCM_DEBUG_PRINT(Adapter, CMHOST, RDM,
- DBG_LVL_ALL,
- "%s:%d RDM failed\n",
- __func__,
- __LINE__);
- return retval;
- }
- }
-
- }
- Adapter->bDDRInitDone = TRUE;
- return retval;
-}
-
-int download_ddr_settings(struct bcm_mini_adapter *Adapter)
-{
- struct bcm_ddr_setting *psDDRSetting = NULL;
- ULONG RegCount = 0;
- unsigned long ul_ddr_setting_load_addr =
- DDR_DUMP_INTERNAL_DEVICE_MEMORY;
- UINT value = 0;
- int retval = STATUS_SUCCESS;
- bool bOverrideSelfRefresh = false;
-
- switch (Adapter->chip_id) {
- case 0xbece3200:
- switch (Adapter->DDRSetting) {
- case DDR_80_MHZ:
- psDDRSetting = asT3LP_DDRSetting80MHz;
- RegCount = ARRAY_SIZE(asT3LP_DDRSetting80MHz);
- RegCount -= T3LP_SKIP_CLOCK_PROGRAM_DUMP_80MHZ;
- psDDRSetting += T3LP_SKIP_CLOCK_PROGRAM_DUMP_80MHZ;
- break;
- case DDR_100_MHZ:
- psDDRSetting = asT3LP_DDRSetting100MHz;
- RegCount = ARRAY_SIZE(asT3LP_DDRSetting100MHz);
- RegCount -= T3LP_SKIP_CLOCK_PROGRAM_DUMP_100MHZ;
- psDDRSetting += T3LP_SKIP_CLOCK_PROGRAM_DUMP_100MHZ;
- break;
- case DDR_133_MHZ:
- bOverrideSelfRefresh = TRUE;
- psDDRSetting = asT3LP_DDRSetting133MHz;
- RegCount = ARRAY_SIZE(asT3LP_DDRSetting133MHz);
- RegCount -= T3LP_SKIP_CLOCK_PROGRAM_DUMP_133MHZ;
- psDDRSetting += T3LP_SKIP_CLOCK_PROGRAM_DUMP_133MHZ;
- break;
- default:
- return -EINVAL;
- }
- break;
-
- case T3LPB:
- case BCS220_2:
- case BCS220_2BC:
- case BCS250_BC:
- case BCS220_3:
- switch (Adapter->DDRSetting) {
- case DDR_80_MHZ:
- psDDRSetting = asT3LPB_DDRSetting80MHz;
- RegCount = ARRAY_SIZE(asT3LPB_DDRSetting80MHz);
- RegCount -= T3LPB_SKIP_CLOCK_PROGRAM_DUMP_80MHZ;
- psDDRSetting += T3LPB_SKIP_CLOCK_PROGRAM_DUMP_80MHZ;
- break;
- case DDR_100_MHZ:
- psDDRSetting = asT3LPB_DDRSetting100MHz;
- RegCount = ARRAY_SIZE(asT3LPB_DDRSetting100MHz);
- RegCount -= T3LPB_SKIP_CLOCK_PROGRAM_DUMP_100MHZ;
- psDDRSetting += T3LPB_SKIP_CLOCK_PROGRAM_DUMP_100MHZ;
- break;
- case DDR_133_MHZ:
- bOverrideSelfRefresh = TRUE;
- psDDRSetting = asT3LPB_DDRSetting133MHz;
- RegCount = ARRAY_SIZE(asT3LPB_DDRSetting133MHz);
- RegCount -= T3LPB_SKIP_CLOCK_PROGRAM_DUMP_133MHZ;
- psDDRSetting += T3LPB_SKIP_CLOCK_PROGRAM_DUMP_133MHZ;
- break;
-
- case DDR_160_MHZ:
- bOverrideSelfRefresh = TRUE;
- psDDRSetting = asT3LPB_DDRSetting160MHz;
- RegCount = ARRAY_SIZE(asT3LPB_DDRSetting160MHz);
- RegCount -= T3LPB_SKIP_CLOCK_PROGRAM_DUMP_160MHZ;
- psDDRSetting += T3LPB_SKIP_CLOCK_PROGRAM_DUMP_160MHZ;
-
- break;
- default:
- return -EINVAL;
- }
- break;
- case 0xbece0300:
- switch (Adapter->DDRSetting) {
- case DDR_80_MHZ:
- psDDRSetting = asT3_DDRSetting80MHz;
- RegCount = ARRAY_SIZE(asT3_DDRSetting80MHz);
- RegCount -= T3_SKIP_CLOCK_PROGRAM_DUMP_80MHZ;
- psDDRSetting += T3_SKIP_CLOCK_PROGRAM_DUMP_80MHZ;
- break;
- case DDR_100_MHZ:
- psDDRSetting = asT3_DDRSetting100MHz;
- RegCount = ARRAY_SIZE(asT3_DDRSetting100MHz);
- RegCount -= T3_SKIP_CLOCK_PROGRAM_DUMP_100MHZ;
- psDDRSetting += T3_SKIP_CLOCK_PROGRAM_DUMP_100MHZ;
- break;
- case DDR_133_MHZ:
- psDDRSetting = asT3_DDRSetting133MHz;
- RegCount = ARRAY_SIZE(asT3_DDRSetting133MHz);
- RegCount -= T3_SKIP_CLOCK_PROGRAM_DUMP_133MHZ;
- psDDRSetting += T3_SKIP_CLOCK_PROGRAM_DUMP_133MHZ;
- break;
- default:
- return -EINVAL;
- }
- break;
- case 0xbece0310:
- {
- switch (Adapter->DDRSetting) {
- case DDR_80_MHZ:
- psDDRSetting = asT3B_DDRSetting80MHz;
- RegCount = ARRAY_SIZE(asT3B_DDRSetting80MHz);
- RegCount -= T3B_SKIP_CLOCK_PROGRAM_DUMP_80MHZ;
- psDDRSetting += T3B_SKIP_CLOCK_PROGRAM_DUMP_80MHZ;
- break;
- case DDR_100_MHZ:
- psDDRSetting = asT3B_DDRSetting100MHz;
- RegCount = ARRAY_SIZE(asT3B_DDRSetting100MHz);
- RegCount -= T3B_SKIP_CLOCK_PROGRAM_DUMP_100MHZ;
- psDDRSetting += T3B_SKIP_CLOCK_PROGRAM_DUMP_100MHZ;
- break;
- case DDR_133_MHZ:
- bOverrideSelfRefresh = TRUE;
- psDDRSetting = asT3B_DDRSetting133MHz;
- RegCount = ARRAY_SIZE(asT3B_DDRSetting133MHz);
- RegCount -= T3B_SKIP_CLOCK_PROGRAM_DUMP_133MHZ;
- psDDRSetting += T3B_SKIP_CLOCK_PROGRAM_DUMP_133MHZ;
- break;
- }
- break;
- }
- default:
- return -EINVAL;
- }
- /* total number of Register that has to be dumped */
- value = RegCount;
- retval = wrmalt(Adapter, ul_ddr_setting_load_addr, &value,
- sizeof(value));
- if (retval) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
- "%s:%d\n", __func__, __LINE__);
-
- return retval;
- }
- ul_ddr_setting_load_addr += sizeof(ULONG);
- /* signature */
- value = (0x1d1e0dd0);
- retval = wrmalt(Adapter, ul_ddr_setting_load_addr, &value,
- sizeof(value));
- if (retval) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
- "%s:%d\n", __func__, __LINE__);
- return retval;
- }
-
- ul_ddr_setting_load_addr += sizeof(ULONG);
- RegCount *= (sizeof(struct bcm_ddr_setting)/sizeof(ULONG));
-
- while (RegCount && !retval) {
- value = psDDRSetting->ulRegAddress;
- retval = wrmalt(Adapter, ul_ddr_setting_load_addr, &value,
- sizeof(value));
- ul_ddr_setting_load_addr += sizeof(ULONG);
- if (!retval) {
- if (bOverrideSelfRefresh
- && (psDDRSetting->ulRegAddress
- == 0x0F007018))
- value = (psDDRSetting->ulRegValue | (1<<8));
- else
- value = psDDRSetting->ulRegValue;
-
- if (STATUS_SUCCESS != wrmalt(Adapter,
- ul_ddr_setting_load_addr,
- &value,
- sizeof(value))) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
- "%s:%d\n", __func__, __LINE__);
- break;
- }
- }
- ul_ddr_setting_load_addr += sizeof(ULONG);
- RegCount--;
- psDDRSetting++;
- }
- return retval;
-}
diff --git a/drivers/staging/bcm/DDRInit.h b/drivers/staging/bcm/DDRInit.h
deleted file mode 100644
index b0196fce925..00000000000
--- a/drivers/staging/bcm/DDRInit.h
+++ /dev/null
@@ -1,9 +0,0 @@
-#ifndef _DDR_INIT_H_
-#define _DDR_INIT_H_
-
-
-
-int ddr_init(struct bcm_mini_adapter *psAdapter);
-int download_ddr_settings(struct bcm_mini_adapter *psAdapter);
-
-#endif
diff --git a/drivers/staging/bcm/Debug.h b/drivers/staging/bcm/Debug.h
deleted file mode 100644
index 7b331215c1a..00000000000
--- a/drivers/staging/bcm/Debug.h
+++ /dev/null
@@ -1,242 +0,0 @@
-/*
- * Debug.h
- *
- * Dynamic (runtime) debug framework implementation.
- * -kaiwan.
- */
-#ifndef _DEBUG_H
-#define _DEBUG_H
-#include <linux/string.h>
-#define NONE 0xFFFF
-
-/* TYPE and SUBTYPE
- * Define valid TYPE (or category or code-path, however you like to think of it)
- * and SUBTYPE s.
- * Type and SubType are treated as bitmasks.
- */
-#define DBG_TYPE_INITEXIT (1 << 0) /* 1 */
-#define DBG_TYPE_TX (1 << 1) /* 2 */
-#define DBG_TYPE_RX (1 << 2) /* 4 */
-#define DBG_TYPE_OTHERS (1 << 3) /* 8 */
-#define NUMTYPES 4
-
-/* -SUBTYPEs for TX : TYPE is DBG_TYPE_TX -----//
- * Transmit.c ,Arp.c, LeakyBucket.c, And Qos.c
- * total 17 macros
- */
-/* Transmit.c */
-#define TX 1
-#define MP_SEND (TX << 0)
-#define NEXT_SEND (TX << 1)
-#define TX_FIFO (TX << 2)
-#define TX_CONTROL (TX << 3)
-
-/* Arp.c */
-#define IP_ADDR (TX << 4)
-#define ARP_REQ (TX << 5)
-#define ARP_RESP (TX << 6)
-
-/* Leakybucket.c */
-#define TOKEN_COUNTS (TX << 8)
-#define CHECK_TOKENS (TX << 9)
-#define TX_PACKETS (TX << 10)
-#define TIMER (TX << 11)
-
-/* Qos.c */
-#define QOS TX
-#define QUEUE_INDEX (QOS << 12)
-#define IPV4_DBG (QOS << 13)
-#define IPV6_DBG (QOS << 14)
-#define PRUNE_QUEUE (QOS << 15)
-#define SEND_QUEUE (QOS << 16)
-
-/* TX_Misc */
-#define TX_OSAL_DBG (TX << 17)
-
-/* --SUBTYPEs for ------INIT & EXIT---------------------
- * ------------ TYPE is DBG_TYPE_INITEXIT -----//
- * DriverEntry.c, bcmfwup.c, ChipDetectTask.c, HaltnReset.c, InterfaceDDR.c
- */
-#define MP 1
-#define DRV_ENTRY (MP << 0)
-#define MP_INIT (MP << 1)
-#define READ_REG (MP << 3)
-#define DISPATCH (MP << 2)
-#define CLAIM_ADAP (MP << 4)
-#define REG_IO_PORT (MP << 5)
-#define INIT_DISP (MP << 6)
-#define RX_INIT (MP << 7)
-
-/* -SUBTYPEs for --RX----------------------------------
- * ------------RX : TYPE is DBG_TYPE_RX -----//
- * Receive.c
- */
-#define RX 1
-#define RX_DPC (RX << 0)
-#define RX_CTRL (RX << 3)
-#define RX_DATA (RX << 4)
-#define MP_RETURN (RX << 1)
-#define LINK_MSG (RX << 2)
-
-/* -SUBTYPEs for ----OTHER ROUTINES------------------
- * ------------OTHERS : TYPE is DBG_TYPE_OTHER -----//
- * HaltnReset,CheckForHang,PnP,Misc,CmHost
- * total 12 macros
- */
-#define OTHERS 1
-#define ISR OTHERS
-#define MP_DPC (ISR << 0)
-
-/* HaltnReset.c */
-#define HALT OTHERS
-#define MP_HALT (HALT << 1)
-#define CHECK_HANG (HALT << 2)
-#define MP_RESET (HALT << 3)
-#define MP_SHUTDOWN (HALT << 4)
-
-/* pnp.c */
-#define PNP OTHERS
-#define MP_PNP (PNP << 5)
-
-/* Misc.c */
-#define MISC OTHERS
-#define DUMP_INFO (MISC << 6)
-#define CLASSIFY (MISC << 7)
-#define LINK_UP_MSG (MISC << 8)
-#define CP_CTRL_PKT (MISC << 9)
-#define DUMP_CONTROL (MISC << 10)
-#define LED_DUMP_INFO (MISC << 11)
-
-/* CmHost.c */
-#define CMHOST OTHERS
-#define SERIAL (OTHERS << 12)
-#define IDLE_MODE (OTHERS << 13)
-#define WRM (OTHERS << 14)
-#define RDM (OTHERS << 15)
-
-/* TODO - put PHS_SEND in Tx PHS_RECEIVE in Rx path ? */
-#define PHS_SEND (OTHERS << 16)
-#define PHS_RECEIVE (OTHERS << 17)
-#define PHS_MODULE (OTHERS << 18)
-
-#define INTF_INIT (OTHERS << 19)
-#define INTF_ERR (OTHERS << 20)
-#define INTF_WARN (OTHERS << 21)
-#define INTF_NORM (OTHERS << 22)
-
-#define IRP_COMPLETION (OTHERS << 23)
-#define SF_DESCRIPTOR_CNTS (OTHERS << 24)
-#define PHS_DISPATCH (OTHERS << 25)
-#define OSAL_DBG (OTHERS << 26)
-#define NVM_RW (OTHERS << 27)
-
-#define HOST_MIBS (OTHERS << 28)
-#define CONN_MSG (CMHOST << 29)
-
-/* Debug level
- * We have 8 debug levels, in (numerical) increasing order of verbosity.
- * IMP: Currently implementing ONLY DBG_LVL_ALL , i.e. , all debug prints will
- * appear (of course, iff global debug flag is ON and we match the Type and SubType).
- * Finer granularity debug levels are currently not in use, although the feature exists.
- *
- * Another way to say this:
- * All the debug prints currently have 'debug_level' set to DBG_LVL_ALL .
- * You can compile-time change that to any of the below, if you wish to. However, as of now, there's
- * no dynamic facility to have the userspace 'TestApp' set debug_level. Slated for future expansion.
- */
-#define BCM_ALL 7
-#define BCM_LOW 6
-#define BCM_PRINT 5
-#define BCM_NORMAL 4
-#define BCM_MEDIUM 3
-#define BCM_SCREAM 2
-#define BCM_ERR 1
-/* Not meant for developer in debug prints.
- * To be used to disable all prints by setting the DBG_LVL_CURR to this value
- */
-#define BCM_NONE 0
-
-/* The current driver logging level.
- * Everything at this level and (numerically) lower (meaning higher prio)
- * is logged.
- * Replace 'BCM_ALL' in the DBG_LVL_CURR macro with the logging level desired.
- * For eg. to set the logging level to 'errors only' use:
- * #define DBG_LVL_CURR (BCM_ERR)
- */
-
-#define DBG_LVL_CURR (BCM_ALL)
-#define DBG_LVL_ALL BCM_ALL
-
-/* ---Userspace mapping of Debug State.
- * Delibrately matches that of the Windows driver..
- * The TestApp's ioctl passes this struct to us.
- */
-struct bcm_user_debug_state {
- unsigned int Subtype, Type;
- unsigned int OnOff;
-/* unsigned int debug_level; future expansion */
-} __packed;
-
-/* ---Kernel-space mapping of Debug State */
-struct bcm_debug_state {
- unsigned int type;
- /* A bitmap of 32 bits for Subtype per Type.
- * Valid indexes in 'subtype' array are *only* 1,2,4 and 8,
- * corresponding to valid Type values. Hence we use the 'Type' field
- * as the index value, ignoring the array entries 0,3,5,6,7 !
- */
- unsigned int subtype[(NUMTYPES*2)+1];
- unsigned int debug_level;
-};
-/* Instantiated in the Adapter structure
- * We'll reuse the debug level parameter to include a bit (the MSB) to indicate whether or not
- * we want the function's name printed.
- */
-#define DBG_NO_FUNC_PRINT (1 << 31)
-#define DBG_LVL_BITMASK 0xFF
-
-/* --- Only for direct printk's; "hidden" to API. */
-#define DBG_TYPE_PRINTK 3
-
-#define BCM_DEBUG_PRINT(Adapter, Type, SubType, dbg_level, string, args...) \
- do { \
- if (DBG_TYPE_PRINTK == Type) \
- pr_info("%s:" string, __func__, ##args); \
- else if (Adapter && \
- (dbg_level & DBG_LVL_BITMASK) <= Adapter->stDebugState.debug_level && \
- (Type & Adapter->stDebugState.type) && \
- (SubType & Adapter->stDebugState.subtype[Type])) { \
- if (dbg_level & DBG_NO_FUNC_PRINT) \
- pr_debug("%s:\n", string); \
- else \
- pr_debug("%s:\n" string, __func__, ##args); \
- } \
- } while (0)
-
-#define BCM_DEBUG_PRINT_BUFFER(Adapter, Type, SubType, dbg_level, buffer, bufferlen) \
- do { \
- if (DBG_TYPE_PRINTK == Type || \
- (Adapter && \
- (dbg_level & DBG_LVL_BITMASK) <= Adapter->stDebugState.debug_level && \
- (Type & Adapter->stDebugState.type) && \
- (SubType & Adapter->stDebugState.subtype[Type]))) { \
- pr_debug("%s:\n", __func__); \
- print_hex_dump(KERN_DEBUG, " ", DUMP_PREFIX_OFFSET, \
- 16, 1, buffer, bufferlen, false); \
- } \
- } while (0)
-
-#define BCM_SHOW_DEBUG_BITMAP(Adapter) do { \
- int i; \
- for (i = 0; i < (NUMTYPES * 2) + 1; i++) { \
- if ((i == 1) || (i == 2) || (i == 4) || (i == 8)) { \
- /* CAUTION! Forcefully turn on ALL debug paths and subpaths! \
- * Adapter->stDebugState.subtype[i] = 0xffffffff; \
- */ \
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "subtype[%d] = 0x%08x\n", \
- i, Adapter->stDebugState.subtype[i]); \
- } \
- } \
-} while (0)
-
-#endif
diff --git a/drivers/staging/bcm/HandleControlPacket.c b/drivers/staging/bcm/HandleControlPacket.c
deleted file mode 100644
index dd5d138a652..00000000000
--- a/drivers/staging/bcm/HandleControlPacket.c
+++ /dev/null
@@ -1,241 +0,0 @@
-/**
- * @file HandleControlPacket.c
- * This file contains the routines to deal with
- * sending and receiving of control packets.
- */
-#include "headers.h"
-
-/**
- * When a control packet is received, analyze the
- * "status" and call appropriate response function.
- * Enqueue the control packet for Application.
- * @return None
- */
-static VOID handle_rx_control_packet(struct bcm_mini_adapter *Adapter,
- struct sk_buff *skb)
-{
- struct bcm_tarang_data *pTarang = NULL;
- bool HighPriorityMessage = false;
- struct sk_buff *newPacket = NULL;
- CHAR cntrl_msg_mask_bit = 0;
- bool drop_pkt_flag = TRUE;
- USHORT usStatus = *(PUSHORT)(skb->data);
-
- if (netif_msg_pktdata(Adapter))
- print_hex_dump(KERN_DEBUG, PFX "rx control: ", DUMP_PREFIX_NONE,
- 16, 1, skb->data, skb->len, 0);
-
- switch (usStatus) {
- case CM_RESPONSES: /* 0xA0 */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CP_CTRL_PKT,
- DBG_LVL_ALL,
- "MAC Version Seems to be Non Multi-Classifier, rejected by Driver");
- HighPriorityMessage = TRUE;
- break;
- case CM_CONTROL_NEWDSX_MULTICLASSIFIER_RESP:
- HighPriorityMessage = TRUE;
- if (Adapter->LinkStatus == LINKUP_DONE)
- CmControlResponseMessage(Adapter,
- (skb->data + sizeof(USHORT)));
- break;
- case LINK_CONTROL_RESP: /* 0xA2 */
- case STATUS_RSP: /* 0xA1 */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CP_CTRL_PKT,
- DBG_LVL_ALL, "LINK_CONTROL_RESP");
- HighPriorityMessage = TRUE;
- LinkControlResponseMessage(Adapter,
- (skb->data + sizeof(USHORT)));
- break;
- case STATS_POINTER_RESP: /* 0xA6 */
- HighPriorityMessage = TRUE;
- StatisticsResponse(Adapter, (skb->data + sizeof(USHORT)));
- break;
- case IDLE_MODE_STATUS: /* 0xA3 */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CP_CTRL_PKT,
- DBG_LVL_ALL,
- "IDLE_MODE_STATUS Type Message Got from F/W");
- InterfaceIdleModeRespond(Adapter, (PUINT)(skb->data +
- sizeof(USHORT)));
- HighPriorityMessage = TRUE;
- break;
-
- case AUTH_SS_HOST_MSG:
- HighPriorityMessage = TRUE;
- break;
-
- default:
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CP_CTRL_PKT,
- DBG_LVL_ALL, "Got Default Response");
- /* Let the Application Deal with This Packet */
- break;
- }
-
- /* Queue The Control Packet to The Application Queues */
- down(&Adapter->RxAppControlQueuelock);
-
- for (pTarang = Adapter->pTarangs; pTarang; pTarang = pTarang->next) {
- if (Adapter->device_removed)
- break;
-
- drop_pkt_flag = TRUE;
- /*
- * There are cntrl msg from A0 to AC. It has been mapped to 0 to
- * C bit in the cntrl mask.
- * Also, by default AD to BF has been masked to the rest of the
- * bits... which wil be ON by default.
- * if mask bit is enable to particular pkt status, send it out
- * to app else stop it.
- */
- cntrl_msg_mask_bit = (usStatus & 0x1F);
- /*
- * printk("\ninew msg mask bit which is disable in mask:%X",
- * cntrl_msg_mask_bit);
- */
- if (pTarang->RxCntrlMsgBitMask & (1 << cntrl_msg_mask_bit))
- drop_pkt_flag = false;
-
- if ((drop_pkt_flag == TRUE) ||
- (pTarang->AppCtrlQueueLen > MAX_APP_QUEUE_LEN)
- || ((pTarang->AppCtrlQueueLen >
- MAX_APP_QUEUE_LEN / 2) &&
- (HighPriorityMessage == false))) {
- /*
- * Assumption:-
- * 1. every tarang manages it own dropped pkt
- * statitistics
- * 2. Total packet dropped per tarang will be equal to
- * the sum of all types of dropped pkt by that
- * tarang only.
- */
- struct bcm_mibs_dropped_cntrl_msg *msg =
- &pTarang->stDroppedAppCntrlMsgs;
- switch (*(PUSHORT)skb->data) {
- case CM_RESPONSES:
- msg->cm_responses++;
- break;
- case CM_CONTROL_NEWDSX_MULTICLASSIFIER_RESP:
- msg->cm_control_newdsx_multiclassifier_resp++;
- break;
- case LINK_CONTROL_RESP:
- msg->link_control_resp++;
- break;
- case STATUS_RSP:
- msg->status_rsp++;
- break;
- case STATS_POINTER_RESP:
- msg->stats_pointer_resp++;
- break;
- case IDLE_MODE_STATUS:
- msg->idle_mode_status++;
- break;
- case AUTH_SS_HOST_MSG:
- msg->auth_ss_host_msg++;
- break;
- default:
- msg->low_priority_message++;
- break;
- }
-
- continue;
- }
-
- newPacket = skb_clone(skb, GFP_KERNEL);
- if (!newPacket)
- break;
- ENQUEUEPACKET(pTarang->RxAppControlHead,
- pTarang->RxAppControlTail, newPacket);
- pTarang->AppCtrlQueueLen++;
- }
- up(&Adapter->RxAppControlQueuelock);
- wake_up(&Adapter->process_read_wait_queue);
- dev_kfree_skb(skb);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CP_CTRL_PKT, DBG_LVL_ALL,
- "After wake_up_interruptible");
-}
-
-/**
- * @ingroup ctrl_pkt_functions
- * Thread to handle control pkt reception
- */
-
-/* pointer to adapter object*/
-int control_packet_handler(struct bcm_mini_adapter *Adapter)
-{
- struct sk_buff *ctrl_packet = NULL;
- unsigned long flags = 0;
- /* struct timeval tv; */
- /* int *puiBuffer = NULL; */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CP_CTRL_PKT, DBG_LVL_ALL,
- "Entering to make thread wait on control packet event!");
- while (1) {
- wait_event_interruptible(Adapter->process_rx_cntrlpkt,
- atomic_read(&Adapter->cntrlpktCnt) ||
- Adapter->bWakeUpDevice ||
- kthread_should_stop());
-
-
- if (kthread_should_stop()) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CP_CTRL_PKT,
- DBG_LVL_ALL, "Exiting\n");
- return 0;
- }
- if (TRUE == Adapter->bWakeUpDevice) {
- Adapter->bWakeUpDevice = false;
- if ((false == Adapter->bTriedToWakeUpFromlowPowerMode)
- && ((TRUE == Adapter->IdleMode) ||
- (TRUE == Adapter->bShutStatus))) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
- CP_CTRL_PKT, DBG_LVL_ALL,
- "Calling InterfaceAbortIdlemode\n");
- /*
- * Adapter->bTriedToWakeUpFromlowPowerMode
- * = TRUE;
- */
- InterfaceIdleModeWakeup(Adapter);
- }
- continue;
- }
-
- while (atomic_read(&Adapter->cntrlpktCnt)) {
- spin_lock_irqsave(&Adapter->control_queue_lock, flags);
- ctrl_packet = Adapter->RxControlHead;
- if (ctrl_packet) {
- DEQUEUEPACKET(Adapter->RxControlHead,
- Adapter->RxControlTail);
- /* Adapter->RxControlHead=ctrl_packet->next; */
- }
-
- spin_unlock_irqrestore(&Adapter->control_queue_lock,
- flags);
- handle_rx_control_packet(Adapter, ctrl_packet);
- atomic_dec(&Adapter->cntrlpktCnt);
- }
-
- SetUpTargetDsxBuffers(Adapter);
- }
- return STATUS_SUCCESS;
-}
-
-INT flushAllAppQ(void)
-{
- struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
- struct bcm_tarang_data *pTarang = NULL;
- struct sk_buff *PacketToDrop = NULL;
-
- for (pTarang = Adapter->pTarangs; pTarang; pTarang = pTarang->next) {
- while (pTarang->RxAppControlHead != NULL) {
- PacketToDrop = pTarang->RxAppControlHead;
- DEQUEUEPACKET(pTarang->RxAppControlHead,
- pTarang->RxAppControlTail);
- dev_kfree_skb(PacketToDrop);
- }
- pTarang->AppCtrlQueueLen = 0;
- /* dropped contrl packet statistics also should be reset. */
- memset((PVOID)&pTarang->stDroppedAppCntrlMsgs, 0,
- sizeof(struct bcm_mibs_dropped_cntrl_msg));
-
- }
- return STATUS_SUCCESS;
-}
-
-
diff --git a/drivers/staging/bcm/HostMIBSInterface.h b/drivers/staging/bcm/HostMIBSInterface.h
deleted file mode 100644
index f922ac49b70..00000000000
--- a/drivers/staging/bcm/HostMIBSInterface.h
+++ /dev/null
@@ -1,192 +0,0 @@
-#ifndef _HOST_MIBSINTERFACE_H
-#define _HOST_MIBSINTERFACE_H
-
-/*
- * Copyright (c) 2007 Beceem Communications Pvt. Ltd
- * File Name: HostMIBSInterface.h
- * Abstract: This file contains DS used by the Host to update the Host
- * statistics used for the MIBS.
- */
-
-#define MIBS_MAX_CLASSIFIERS 100
-#define MIBS_MAX_PHSRULES 100
-#define MIBS_MAX_SERVICEFLOWS 17
-#define MIBS_MAX_IP_RANGE_LENGTH 4
-#define MIBS_MAX_PORT_RANGE 4
-#define MIBS_MAX_PROTOCOL_LENGTH 32
-#define MIBS_MAX_PHS_LENGTHS 255
-#define MIBS_IPV6_ADDRESS_SIZEINBYTES 0x10
-#define MIBS_IP_LENGTH_OF_ADDRESS 4
-#define MIBS_MAX_HIST_ENTRIES 12
-#define MIBS_PKTSIZEHIST_RANGE 128
-
-union bcm_mibs_ip_addr {
- struct {
- /* Source Ip Address Range */
- unsigned long ulIpv4Addr[MIBS_MAX_IP_RANGE_LENGTH];
- /* Source Ip Mask Address Range */
- unsigned long ulIpv4Mask[MIBS_MAX_IP_RANGE_LENGTH];
- };
- struct {
- /* Source Ip Address Range */
- unsigned long ulIpv6Addr[MIBS_MAX_IP_RANGE_LENGTH * 4];
- /* Source Ip Mask Address Range */
- unsigned long ulIpv6Mask[MIBS_MAX_IP_RANGE_LENGTH * 4];
- };
- struct {
- unsigned char ucIpv4Address[MIBS_MAX_IP_RANGE_LENGTH * MIBS_IP_LENGTH_OF_ADDRESS];
- unsigned char ucIpv4Mask[MIBS_MAX_IP_RANGE_LENGTH * MIBS_IP_LENGTH_OF_ADDRESS];
- };
- struct {
- unsigned char ucIpv6Address[MIBS_MAX_IP_RANGE_LENGTH * MIBS_IPV6_ADDRESS_SIZEINBYTES];
- unsigned char ucIpv6Mask[MIBS_MAX_IP_RANGE_LENGTH * MIBS_IPV6_ADDRESS_SIZEINBYTES];
- };
-};
-
-struct bcm_mibs_host_info {
- u64 GoodTransmits;
- u64 GoodReceives;
- /* this to keep track of the Tx and Rx MailBox Registers. */
- unsigned long NumDesUsed;
- unsigned long CurrNumFreeDesc;
- unsigned long PrevNumFreeDesc;
- /* to keep track the no of byte received */
- unsigned long PrevNumRcevBytes;
- unsigned long CurrNumRcevBytes;
- /* QOS Related */
- unsigned long BEBucketSize;
- unsigned long rtPSBucketSize;
- unsigned long LastTxQueueIndex;
- bool TxOutofDescriptors;
- bool TimerActive;
- u32 u32TotalDSD;
- u32 aTxPktSizeHist[MIBS_MAX_HIST_ENTRIES];
- u32 aRxPktSizeHist[MIBS_MAX_HIST_ENTRIES];
-};
-
-struct bcm_mibs_classifier_rule {
- unsigned long ulSFID;
- unsigned char ucReserved[2];
- u16 uiClassifierRuleIndex;
- bool bUsed;
- unsigned short usVCID_Value;
- u8 u8ClassifierRulePriority;
- union bcm_mibs_ip_addr stSrcIpAddress;
- /* IP Source Address Length */
- unsigned char ucIPSourceAddressLength;
- union bcm_mibs_ip_addr stDestIpAddress;
- /* IP Destination Address Length */
- unsigned char ucIPDestinationAddressLength;
- unsigned char ucIPTypeOfServiceLength;
- unsigned char ucTosLow;
- unsigned char ucTosHigh;
- unsigned char ucTosMask;
- unsigned char ucProtocolLength;
- unsigned char ucProtocol[MIBS_MAX_PROTOCOL_LENGTH];
- unsigned short usSrcPortRangeLo[MIBS_MAX_PORT_RANGE];
- unsigned short usSrcPortRangeHi[MIBS_MAX_PORT_RANGE];
- unsigned char ucSrcPortRangeLength;
- unsigned short usDestPortRangeLo[MIBS_MAX_PORT_RANGE];
- unsigned short usDestPortRangeHi[MIBS_MAX_PORT_RANGE];
- unsigned char ucDestPortRangeLength;
- bool bProtocolValid;
- bool bTOSValid;
- bool bDestIpValid;
- bool bSrcIpValid;
- unsigned char ucDirection;
- bool bIpv6Protocol;
- u32 u32PHSRuleID;
-};
-
-struct bcm_mibs_phs_rule {
- unsigned long ulSFID;
- u8 u8PHSI;
- u8 u8PHSFLength;
- u8 u8PHSF[MIBS_MAX_PHS_LENGTHS];
- u8 u8PHSMLength;
- u8 u8PHSM[MIBS_MAX_PHS_LENGTHS];
- u8 u8PHSS;
- u8 u8PHSV;
- u8 reserved[5];
- long PHSModifiedBytes;
- unsigned long PHSModifiedNumPackets;
- unsigned long PHSErrorNumPackets;
-};
-
-struct bcm_mibs_parameters {
- u32 wmanIfSfid;
- u32 wmanIfCmnCpsSfState;
- u32 wmanIfCmnCpsMaxSustainedRate;
- u32 wmanIfCmnCpsMaxTrafficBurst;
- u32 wmanIfCmnCpsMinReservedRate;
- u32 wmanIfCmnCpsToleratedJitter;
- u32 wmanIfCmnCpsMaxLatency;
- u32 wmanIfCmnCpsFixedVsVariableSduInd;
- u32 wmanIfCmnCpsSduSize;
- u32 wmanIfCmnCpsSfSchedulingType;
- u32 wmanIfCmnCpsArqEnable;
- u32 wmanIfCmnCpsArqWindowSize;
- u32 wmanIfCmnCpsArqBlockLifetime;
- u32 wmanIfCmnCpsArqSyncLossTimeout;
- u32 wmanIfCmnCpsArqDeliverInOrder;
- u32 wmanIfCmnCpsArqRxPurgeTimeout;
- u32 wmanIfCmnCpsArqBlockSize;
- u32 wmanIfCmnCpsMinRsvdTolerableRate;
- u32 wmanIfCmnCpsReqTxPolicy;
- u32 wmanIfCmnSfCsSpecification;
- u32 wmanIfCmnCpsTargetSaid;
-};
-
-struct bcm_mibs_table {
- unsigned long ulSFID;
- unsigned short usVCID_Value;
- unsigned int uiThreshold;
- u8 u8TrafficPriority;
- bool bValid;
- bool bActive;
- bool bActivateRequestSent;
- u8 u8QueueType;
- unsigned int uiMaxBucketSize;
- unsigned int uiCurrentQueueDepthOnTarget;
- unsigned int uiCurrentBytesOnHost;
- unsigned int uiCurrentPacketsOnHost;
- unsigned int uiDroppedCountBytes;
- unsigned int uiDroppedCountPackets;
- unsigned int uiSentBytes;
- unsigned int uiSentPackets;
- unsigned int uiCurrentDrainRate;
- unsigned int uiThisPeriodSentBytes;
- u64 liDrainCalculated;
- unsigned int uiCurrentTokenCount;
- u64 liLastUpdateTokenAt;
- unsigned int uiMaxAllowedRate;
- unsigned int NumOfPacketsSent;
- unsigned char ucDirection;
- unsigned short usCID;
- struct bcm_mibs_parameters stMibsExtServiceFlowTable;
- unsigned int uiCurrentRxRate;
- unsigned int uiThisPeriodRxBytes;
- unsigned int uiTotalRxBytes;
- unsigned int uiTotalTxBytes;
-};
-
-struct bcm_mibs_dropped_cntrl_msg {
- unsigned long cm_responses;
- unsigned long cm_control_newdsx_multiclassifier_resp;
- unsigned long link_control_resp;
- unsigned long status_rsp;
- unsigned long stats_pointer_resp;
- unsigned long idle_mode_status;
- unsigned long auth_ss_host_msg;
- unsigned long low_priority_message;
-};
-
-struct bcm_host_stats_mibs {
- struct bcm_mibs_host_info stHostInfo;
- struct bcm_mibs_classifier_rule astClassifierTable[MIBS_MAX_CLASSIFIERS];
- struct bcm_mibs_table astSFtable[MIBS_MAX_SERVICEFLOWS];
- struct bcm_mibs_phs_rule astPhsRulesTable[MIBS_MAX_PHSRULES];
- struct bcm_mibs_dropped_cntrl_msg stDroppedAppCntrlMsgs;
-};
-
-#endif
diff --git a/drivers/staging/bcm/IPv6Protocol.c b/drivers/staging/bcm/IPv6Protocol.c
deleted file mode 100644
index 27f3f416f18..00000000000
--- a/drivers/staging/bcm/IPv6Protocol.c
+++ /dev/null
@@ -1,476 +0,0 @@
-#include "headers.h"
-
-static bool MatchSrcIpv6Address(struct bcm_classifier_rule *pstClassifierRule,
- struct bcm_ipv6_hdr *pstIpv6Header);
-static bool MatchDestIpv6Address(struct bcm_classifier_rule *pstClassifierRule,
- struct bcm_ipv6_hdr *pstIpv6Header);
-static VOID DumpIpv6Header(struct bcm_ipv6_hdr *pstIpv6Header);
-
-static UCHAR *GetNextIPV6ChainedHeader(UCHAR **ppucPayload,
- UCHAR *pucNextHeader, bool *bParseDone, USHORT *pusPayloadLength)
-{
- UCHAR *pucRetHeaderPtr = NULL;
- UCHAR *pucPayloadPtr = NULL;
- USHORT usNextHeaderOffset = 0;
- struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
-
- if ((ppucPayload == NULL) || (*pusPayloadLength == 0) ||
- (*bParseDone)) {
- *bParseDone = TRUE;
- return NULL;
- }
-
- pucRetHeaderPtr = *ppucPayload;
- pucPayloadPtr = *ppucPayload;
-
- if (!pucRetHeaderPtr || !pucPayloadPtr) {
- *bParseDone = TRUE;
- return NULL;
- }
-
- /* Get the Nextt Header Type */
- *bParseDone = false;
-
-
- switch (*pucNextHeader) {
- case IPV6HDR_TYPE_HOPBYHOP:
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG,
- DBG_LVL_ALL, "\nIPv6 HopByHop Header");
- usNextHeaderOffset += sizeof(struct bcm_ipv6_options_hdr);
- break;
-
- case IPV6HDR_TYPE_ROUTING:
- {
- struct bcm_ipv6_routing_hdr *pstIpv6RoutingHeader;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG,
- DBG_LVL_ALL, "\nIPv6 Routing Header");
- pstIpv6RoutingHeader =
- (struct bcm_ipv6_routing_hdr *)pucPayloadPtr;
- usNextHeaderOffset += sizeof(struct bcm_ipv6_routing_hdr);
- usNextHeaderOffset += pstIpv6RoutingHeader->ucNumAddresses *
- IPV6_ADDRESS_SIZEINBYTES;
- }
- break;
-
- case IPV6HDR_TYPE_FRAGMENTATION:
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG,
- DBG_LVL_ALL,
- "\nIPv6 Fragmentation Header");
- usNextHeaderOffset += sizeof(struct bcm_ipv6_fragment_hdr);
- break;
-
- case IPV6HDR_TYPE_DESTOPTS:
- {
- struct bcm_ipv6_dest_options_hdr *pstIpv6DestOptsHdr =
- (struct bcm_ipv6_dest_options_hdr *)pucPayloadPtr;
- int nTotalOptions = pstIpv6DestOptsHdr->ucHdrExtLen;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG,
- DBG_LVL_ALL,
- "\nIPv6 DestOpts Header Header");
- usNextHeaderOffset += sizeof(struct bcm_ipv6_dest_options_hdr);
- usNextHeaderOffset += nTotalOptions *
- IPV6_DESTOPTS_HDR_OPTIONSIZE;
- }
- break;
-
-
- case IPV6HDR_TYPE_AUTHENTICATION:
- {
- struct bcm_ipv6_authentication_hdr *pstIpv6AuthHdr =
- (struct bcm_ipv6_authentication_hdr *)pucPayloadPtr;
- int nHdrLen = pstIpv6AuthHdr->ucLength;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG,
- DBG_LVL_ALL,
- "\nIPv6 Authentication Header");
- usNextHeaderOffset += nHdrLen * 4;
- }
- break;
-
- case IPV6HDR_TYPE_ENCRYPTEDSECURITYPAYLOAD:
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG,
- DBG_LVL_ALL,
- "\nIPv6 Encrypted Security Payload Header");
- *bParseDone = TRUE;
- break;
-
- case IPV6_ICMP_HDR_TYPE:
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG,
- DBG_LVL_ALL, "\nICMP Header");
- *bParseDone = TRUE;
- break;
-
- case TCP_HEADER_TYPE:
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG,
- DBG_LVL_ALL, "\nTCP Header");
- *bParseDone = TRUE;
- break;
-
- case UDP_HEADER_TYPE:
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG,
- DBG_LVL_ALL, "\nUDP Header");
- *bParseDone = TRUE;
- break;
-
- default:
- *bParseDone = TRUE;
- break;
- }
-
- if (*bParseDone == false) {
- if (*pusPayloadLength <= usNextHeaderOffset) {
- *bParseDone = TRUE;
- } else {
- *pucNextHeader = *pucPayloadPtr;
- pucPayloadPtr += usNextHeaderOffset;
- (*pusPayloadLength) -= usNextHeaderOffset;
- }
-
- }
-
- *ppucPayload = pucPayloadPtr;
- return pucRetHeaderPtr;
-}
-
-
-static UCHAR GetIpv6ProtocolPorts(UCHAR *pucPayload, USHORT *pusSrcPort,
- USHORT *pusDestPort, USHORT usPayloadLength, UCHAR ucNextHeader)
-{
- UCHAR *pIpv6HdrScanContext = pucPayload;
- bool bDone = false;
- UCHAR ucHeaderType = 0;
- UCHAR *pucNextHeader = NULL;
- struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
-
- if (!pucPayload || (usPayloadLength == 0))
- return 0;
-
- *pusSrcPort = *pusDestPort = 0;
- ucHeaderType = ucNextHeader;
- while (!bDone) {
- pucNextHeader = GetNextIPV6ChainedHeader(&pIpv6HdrScanContext,
- &ucHeaderType,
- &bDone,
- &usPayloadLength);
- if (bDone) {
- if ((ucHeaderType == TCP_HEADER_TYPE) ||
- (ucHeaderType == UDP_HEADER_TYPE)) {
- *pusSrcPort = *((PUSHORT)(pucNextHeader));
- *pusDestPort = *((PUSHORT)(pucNextHeader+2));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG,
- DBG_LVL_ALL,
- "\nProtocol Ports - Src Port :0x%x Dest Port : 0x%x",
- ntohs(*pusSrcPort),
- ntohs(*pusDestPort));
- }
- break;
-
- }
- }
- return ucHeaderType;
-}
-
-
-/*
- * Arg 1 struct bcm_mini_adapter *Adapter is a pointer ot the driver control
- * structure
- * Arg 2 PVOID pcIpHeader is a pointer to the IP header of the packet
- */
-USHORT IpVersion6(struct bcm_mini_adapter *Adapter, PVOID pcIpHeader,
- struct bcm_classifier_rule *pstClassifierRule)
-{
- USHORT ushDestPort = 0;
- USHORT ushSrcPort = 0;
- UCHAR ucNextProtocolAboveIP = 0;
- struct bcm_ipv6_hdr *pstIpv6Header = NULL;
- bool bClassificationSucceed = false;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG,
- DBG_LVL_ALL, "IpVersion6 ==========>\n");
-
- pstIpv6Header = pcIpHeader;
-
- DumpIpv6Header(pstIpv6Header);
-
- /*
- * Try to get the next higher layer protocol
- * and the Ports Nos if TCP or UDP
- */
- ucNextProtocolAboveIP = GetIpv6ProtocolPorts((UCHAR *)(pcIpHeader +
- sizeof(struct bcm_ipv6_hdr)),
- &ushSrcPort,
- &ushDestPort,
- pstIpv6Header->usPayloadLength,
- pstIpv6Header->ucNextHeader);
-
- do {
- if (pstClassifierRule->ucDirection == 0) {
- /*
- * cannot be processed for classification.
- * it is a down link connection
- */
- break;
- }
-
- if (!pstClassifierRule->bIpv6Protocol) {
- /*
- * We are looking for Ipv6 Classifiers
- * Lets ignore this classifier and try the next one
- */
- break;
- }
-
- bClassificationSucceed = MatchSrcIpv6Address(pstClassifierRule,
- pstIpv6Header);
- if (!bClassificationSucceed)
- break;
-
- bClassificationSucceed = MatchDestIpv6Address(pstClassifierRule,
- pstIpv6Header);
- if (!bClassificationSucceed)
- break;
-
- /*
- * Match the protocol type.
- * For IPv6 the next protocol at end of
- * Chain of IPv6 prot headers
- */
- bClassificationSucceed = MatchProtocol(pstClassifierRule,
- ucNextProtocolAboveIP);
- if (!bClassificationSucceed)
- break;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG,
- DBG_LVL_ALL, "\nIPv6 Protocol Matched");
-
- if ((ucNextProtocolAboveIP == TCP_HEADER_TYPE) ||
- (ucNextProtocolAboveIP == UDP_HEADER_TYPE)) {
- /* Match Src Port */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG,
- DBG_LVL_ALL, "\nIPv6 Source Port:%x\n",
- ntohs(ushSrcPort));
- bClassificationSucceed = MatchSrcPort(pstClassifierRule,
- ntohs(ushSrcPort));
- if (!bClassificationSucceed)
- break;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG,
- DBG_LVL_ALL, "\nIPv6 Src Port Matched");
-
- /* Match Dest Port */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG,
- DBG_LVL_ALL,
- "\nIPv6 Destination Port:%x\n",
- ntohs(ushDestPort));
- bClassificationSucceed = MatchDestPort(pstClassifierRule,
- ntohs(ushDestPort));
- if (!bClassificationSucceed)
- break;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG,
- DBG_LVL_ALL,
- "\nIPv6 Dest Port Matched");
- }
- } while (0);
-
- if (bClassificationSucceed == TRUE) {
- INT iMatchedSFQueueIndex = 0;
-
- iMatchedSFQueueIndex = SearchSfid(Adapter,
- pstClassifierRule->ulSFID);
- if ((iMatchedSFQueueIndex >= NO_OF_QUEUES) ||
- (Adapter->PackInfo[iMatchedSFQueueIndex].bActive == false))
- bClassificationSucceed = false;
- }
-
- return bClassificationSucceed;
-}
-
-
-static bool MatchSrcIpv6Address(struct bcm_classifier_rule *pstClassifierRule,
- struct bcm_ipv6_hdr *pstIpv6Header)
-{
- UINT uiLoopIndex = 0;
- UINT uiIpv6AddIndex = 0;
- UINT uiIpv6AddrNoLongWords = 4;
- ULONG aulSrcIP[4];
- struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
- union u_ip_address *src_addr = &pstClassifierRule->stSrcIpAddress;
-
- /*
- * This is the no. of Src Addresses ie Range of IP Addresses contained
- * in the classifier rule for which we need to match
- */
- UINT uiCountIPSrcAddresses =
- (UINT)pstClassifierRule->ucIPSourceAddressLength;
-
-
- if (uiCountIPSrcAddresses == 0)
- return TRUE;
-
-
- /* First Convert the Ip Address in the packet to Host Endian order */
- for (uiIpv6AddIndex = 0;
- uiIpv6AddIndex < uiIpv6AddrNoLongWords;
- uiIpv6AddIndex++)
- aulSrcIP[uiIpv6AddIndex] =
- ntohl(pstIpv6Header->ulSrcIpAddress[uiIpv6AddIndex]);
-
- for (uiLoopIndex = 0;
- uiLoopIndex < uiCountIPSrcAddresses;
- uiLoopIndex += uiIpv6AddrNoLongWords) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG, DBG_LVL_ALL,
- "\n Src Ipv6 Address In Received Packet :\n ");
- DumpIpv6Address(aulSrcIP);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG, DBG_LVL_ALL,
- "\n Src Ipv6 Mask In Classifier Rule:\n");
- DumpIpv6Address(&src_addr->ulIpv6Mask[uiLoopIndex]);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG, DBG_LVL_ALL,
- "\n Src Ipv6 Address In Classifier Rule :\n");
- DumpIpv6Address(&src_addr->ulIpv6Addr[uiLoopIndex]);
-
- for (uiIpv6AddIndex = 0;
- uiIpv6AddIndex < uiIpv6AddrNoLongWords;
- uiIpv6AddIndex++) {
- if ((src_addr->ulIpv6Mask[uiLoopIndex+uiIpv6AddIndex] &
- aulSrcIP[uiIpv6AddIndex]) !=
- src_addr->ulIpv6Addr[uiLoopIndex+uiIpv6AddIndex]) {
- /*
- * Match failed for current Ipv6 Address
- * Try next Ipv6 Address
- */
- break;
- }
-
- if (uiIpv6AddIndex == uiIpv6AddrNoLongWords-1) {
- /* Match Found */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG,
- DBG_LVL_ALL,
- "Ipv6 Src Ip Address Matched\n");
- return TRUE;
- }
- }
- }
- return false;
-}
-
-static bool MatchDestIpv6Address(struct bcm_classifier_rule *pstClassifierRule,
- struct bcm_ipv6_hdr *pstIpv6Header)
-{
- UINT uiLoopIndex = 0;
- UINT uiIpv6AddIndex = 0;
- UINT uiIpv6AddrNoLongWords = 4;
- ULONG aulDestIP[4];
- struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
- union u_ip_address *dest_addr = &pstClassifierRule->stDestIpAddress;
-
- /*
- * This is the no. of Destination Addresses
- * ie Range of IP Addresses contained in the classifier rule
- * for which we need to match
- */
- UINT uiCountIPDestinationAddresses =
- (UINT)pstClassifierRule->ucIPDestinationAddressLength;
-
- if (uiCountIPDestinationAddresses == 0)
- return TRUE;
-
-
- /* First Convert the Ip Address in the packet to Host Endian order */
- for (uiIpv6AddIndex = 0;
- uiIpv6AddIndex < uiIpv6AddrNoLongWords;
- uiIpv6AddIndex++)
- aulDestIP[uiIpv6AddIndex] =
- ntohl(pstIpv6Header->ulDestIpAddress[uiIpv6AddIndex]);
-
- for (uiLoopIndex = 0;
- uiLoopIndex < uiCountIPDestinationAddresses;
- uiLoopIndex += uiIpv6AddrNoLongWords) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG, DBG_LVL_ALL,
- "\n Destination Ipv6 Address In Received Packet :\n ");
- DumpIpv6Address(aulDestIP);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG, DBG_LVL_ALL,
- "\n Destination Ipv6 Mask In Classifier Rule :\n");
- DumpIpv6Address(&dest_addr->ulIpv6Mask[uiLoopIndex]);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG, DBG_LVL_ALL,
- "\n Destination Ipv6 Address In Classifier Rule :\n");
- DumpIpv6Address(&dest_addr->ulIpv6Addr[uiLoopIndex]);
-
- for (uiIpv6AddIndex = 0;
- uiIpv6AddIndex < uiIpv6AddrNoLongWords;
- uiIpv6AddIndex++) {
- if ((dest_addr->ulIpv6Mask[uiLoopIndex+uiIpv6AddIndex] &
- aulDestIP[uiIpv6AddIndex]) !=
- dest_addr->ulIpv6Addr[uiLoopIndex+uiIpv6AddIndex]) {
- /*
- * Match failed for current Ipv6 Address.
- * Try next Ipv6 Address
- */
- break;
- }
-
- if (uiIpv6AddIndex == uiIpv6AddrNoLongWords-1) {
- /* Match Found */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG,
- DBG_LVL_ALL,
- "Ipv6 Destination Ip Address Matched\n");
- return TRUE;
- }
- }
- }
- return false;
-
-}
-
-VOID DumpIpv6Address(ULONG *puIpv6Address)
-{
- UINT uiIpv6AddrNoLongWords = 4;
- UINT uiIpv6AddIndex = 0;
- struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
-
- for (uiIpv6AddIndex = 0;
- uiIpv6AddIndex < uiIpv6AddrNoLongWords;
- uiIpv6AddIndex++) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG, DBG_LVL_ALL,
- ":%lx", puIpv6Address[uiIpv6AddIndex]);
- }
-
-}
-
-static VOID DumpIpv6Header(struct bcm_ipv6_hdr *pstIpv6Header)
-{
- UCHAR ucVersion;
- UCHAR ucPrio;
- struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG, DBG_LVL_ALL,
- "----Ipv6 Header---");
- ucVersion = pstIpv6Header->ucVersionPrio & 0xf0;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG, DBG_LVL_ALL,
- "Version : %x\n", ucVersion);
- ucPrio = pstIpv6Header->ucVersionPrio & 0x0f;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG, DBG_LVL_ALL,
- "Priority : %x\n", ucPrio);
- /*
- * BCM_DEBUG_PRINT( Adapter,DBG_TYPE_TX, IPV6_DBG, DBG_LVL_ALL,
- * "Flow Label : %x\n",(pstIpv6Header->ucVersionPrio &0xf0);
- */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG, DBG_LVL_ALL,
- "Payload Length : %x\n",
- ntohs(pstIpv6Header->usPayloadLength));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG, DBG_LVL_ALL,
- "Next Header : %x\n", pstIpv6Header->ucNextHeader);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG, DBG_LVL_ALL,
- "Hop Limit : %x\n", pstIpv6Header->ucHopLimit);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG, DBG_LVL_ALL,
- "Src Address :\n");
- DumpIpv6Address(pstIpv6Header->ulSrcIpAddress);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG, DBG_LVL_ALL,
- "Dest Address :\n");
- DumpIpv6Address(pstIpv6Header->ulDestIpAddress);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG, DBG_LVL_ALL,
- "----Ipv6 Header End---");
-
-
-}
diff --git a/drivers/staging/bcm/IPv6ProtocolHdr.h b/drivers/staging/bcm/IPv6ProtocolHdr.h
deleted file mode 100644
index 96b36a579af..00000000000
--- a/drivers/staging/bcm/IPv6ProtocolHdr.h
+++ /dev/null
@@ -1,85 +0,0 @@
-#ifndef _IPV6_PROTOCOL_DEFINES_
-#define _IPV6_PROTOCOL_DEFINES_
-
-#define IPV6HDR_TYPE_HOPBYHOP 0x0
-#define IPV6HDR_TYPE_ROUTING 0x2B
-#define IPV6HDR_TYPE_FRAGMENTATION 0x2C
-#define IPV6HDR_TYPE_DESTOPTS 0x3c
-#define IPV6HDR_TYPE_AUTHENTICATION 0x33
-#define IPV6HDR_TYPE_ENCRYPTEDSECURITYPAYLOAD 0x34
-#define MASK_IPV6_CS_SPEC 0x2
-
-#define TCP_HEADER_TYPE 0x6
-#define UDP_HEADER_TYPE 0x11
-#define IPV6_ICMP_HDR_TYPE 0x2
-#define IPV6_FLOWLABEL_BITOFFSET 9
-
-#define IPV6_MAX_CHAINEDHDR_BUFFBYTES 0x64
-/*
- * Size of Dest Options field of Destinations Options Header
- * in bytes.
- */
-#define IPV6_DESTOPTS_HDR_OPTIONSIZE 0x8
-
-struct bcm_ipv6_hdr {
- unsigned char ucVersionPrio;
- unsigned char aucFlowLabel[3];
- unsigned short usPayloadLength;
- unsigned char ucNextHeader;
- unsigned char ucHopLimit;
- unsigned long ulSrcIpAddress[4];
- unsigned long ulDestIpAddress[4];
-};
-
-struct bcm_ipv6_routing_hdr {
- unsigned char ucNextHeader;
- unsigned char ucRoutingType;
- unsigned char ucNumAddresses;
- unsigned char ucNextAddress;
- unsigned long ulReserved;
-};
-
-struct bcm_ipv6_fragment_hdr {
- unsigned char ucNextHeader;
- unsigned char ucReserved;
- unsigned short usFragmentOffset;
- unsigned long ulIdentification;
-};
-
-struct bcm_ipv6_dest_options_hdr {
- unsigned char ucNextHeader;
- unsigned char ucHdrExtLen;
- unsigned char ucDestOptions[6];
-};
-
-struct bcm_ipv6_options_hdr {
- unsigned char ucNextHeader;
- unsigned char ucMisc[3];
- unsigned long ulJumboPayloadLen;
-};
-
-struct bcm_ipv6_authentication_hdr {
- unsigned char ucNextHeader;
- unsigned char ucLength;
- unsigned short usReserved;
- unsigned long ulSecurityParametersIndex;
-};
-
-enum bcm_ipaddr_context {
- eSrcIpAddress,
- eDestIpAddress
-};
-
-/* Function Prototypes */
-
-unsigned short IpVersion6(struct bcm_mini_adapter *Adapter, /* < Pointer to the driver control structure */
- void *pcIpHeader, /* <Pointer to the IP Hdr of the packet */
- struct bcm_classifier_rule *pstClassifierRule);
-
-void DumpIpv6Address(unsigned long *puIpv6Address);
-
-extern bool MatchSrcPort(struct bcm_classifier_rule *pstClassifierRule, unsigned short ushSrcPort);
-extern bool MatchDestPort(struct bcm_classifier_rule *pstClassifierRule, unsigned short ushSrcPort);
-extern bool MatchProtocol(struct bcm_classifier_rule *pstClassifierRule, unsigned char ucProtocol);
-
-#endif
diff --git a/drivers/staging/bcm/InterfaceAdapter.h b/drivers/staging/bcm/InterfaceAdapter.h
deleted file mode 100644
index 06a6b18bca4..00000000000
--- a/drivers/staging/bcm/InterfaceAdapter.h
+++ /dev/null
@@ -1,79 +0,0 @@
-#ifndef _INTERFACE_ADAPTER_H
-#define _INTERFACE_ADAPTER_H
-
-struct bcm_bulk_endpoint_in {
- char *bulk_in_buffer;
- size_t bulk_in_size;
- unsigned char bulk_in_endpointAddr;
- unsigned int bulk_in_pipe;
-};
-
-struct bcm_bulk_endpoint_out {
- unsigned char bulk_out_buffer;
- size_t bulk_out_size;
- unsigned char bulk_out_endpointAddr;
- unsigned int bulk_out_pipe;
- /* this is used when int out endpoint is used as bulk out end point */
- unsigned char int_out_interval;
-};
-
-struct bcm_intr_endpoint_in {
- char *int_in_buffer;
- size_t int_in_size;
- unsigned char int_in_endpointAddr;
- unsigned char int_in_interval;
- unsigned int int_in_pipe;
-};
-
-struct bcm_intr_endpoint_out {
- char *int_out_buffer;
- size_t int_out_size;
- unsigned char int_out_endpointAddr;
- unsigned char int_out_interval;
- unsigned int int_out_pipe;
-};
-
-struct bcm_usb_tcb {
- struct urb *urb;
- void *psIntfAdapter;
- bool bUsed;
-};
-
-struct bcm_usb_rcb {
- struct urb *urb;
- void *psIntfAdapter;
- bool bUsed;
-};
-
-/*
- * This is the interface specific Sub-Adapter
- * Structure.
- */
-struct bcm_interface_adapter {
- struct usb_device *udev;
- struct usb_interface *interface;
- /* Bulk endpoint in info */
- struct bcm_bulk_endpoint_in sBulkIn;
- /* Bulk endpoint out info */
- struct bcm_bulk_endpoint_out sBulkOut;
- /* Interrupt endpoint in info */
- struct bcm_intr_endpoint_in sIntrIn;
- /* Interrupt endpoint out info */
- struct bcm_intr_endpoint_out sIntrOut;
- unsigned long ulInterruptData[2];
- struct urb *psInterruptUrb;
- struct bcm_usb_tcb asUsbTcb[MAXIMUM_USB_TCB];
- struct bcm_usb_rcb asUsbRcb[MAXIMUM_USB_RCB];
- atomic_t uNumTcbUsed;
- atomic_t uCurrTcb;
- atomic_t uNumRcbUsed;
- atomic_t uCurrRcb;
- struct bcm_mini_adapter *psAdapter;
- bool bFlashBoot;
- bool bHighSpeedDevice;
- bool bSuspended;
- bool bPreparingForBusSuspend;
- struct work_struct usbSuspendWork;
-};
-
-#endif
diff --git a/drivers/staging/bcm/InterfaceDld.c b/drivers/staging/bcm/InterfaceDld.c
deleted file mode 100644
index abc7a7ab782..00000000000
--- a/drivers/staging/bcm/InterfaceDld.c
+++ /dev/null
@@ -1,317 +0,0 @@
-#include "headers.h"
-
-int InterfaceFileDownload(PVOID arg, struct file *flp, unsigned int on_chip_loc)
-{
- /* unsigned int reg = 0; */
- mm_segment_t oldfs = {0};
- int errno = 0, len = 0; /* ,is_config_file = 0 */
- loff_t pos = 0;
- struct bcm_interface_adapter *psIntfAdapter = arg;
- /* struct bcm_mini_adapter *Adapter = psIntfAdapter->psAdapter; */
- char *buff = kmalloc(MAX_TRANSFER_CTRL_BYTE_USB, GFP_KERNEL);
-
- if (!buff)
- return -ENOMEM;
-
- while (1) {
- oldfs = get_fs();
- set_fs(get_ds());
- len = vfs_read(flp, (void __force __user *)buff,
- MAX_TRANSFER_CTRL_BYTE_USB, &pos);
- set_fs(oldfs);
- if (len <= 0) {
- if (len < 0)
- errno = len;
- else
- errno = 0;
- break;
- }
- /* BCM_DEBUG_PRINT_BUFFER(Adapter,DBG_TYPE_INITEXIT, MP_INIT,
- * DBG_LVL_ALL, buff,
- * MAX_TRANSFER_CTRL_BYTE_USB);
- */
- errno = InterfaceWRM(psIntfAdapter, on_chip_loc, buff, len);
- if (errno)
- break;
- on_chip_loc += MAX_TRANSFER_CTRL_BYTE_USB;
- }
-
- kfree(buff);
- return errno;
-}
-
-int InterfaceFileReadbackFromChip(PVOID arg, struct file *flp,
- unsigned int on_chip_loc)
-{
- char *buff, *buff_readback;
- unsigned int reg = 0;
- mm_segment_t oldfs = {0};
- int errno = 0, len = 0, is_config_file = 0;
- loff_t pos = 0;
- static int fw_down;
- INT Status = STATUS_SUCCESS;
- struct bcm_interface_adapter *psIntfAdapter = arg;
- int bytes;
-
- buff = kzalloc(MAX_TRANSFER_CTRL_BYTE_USB, GFP_DMA);
- buff_readback = kzalloc(MAX_TRANSFER_CTRL_BYTE_USB , GFP_DMA);
- if (!buff || !buff_readback) {
- kfree(buff);
- kfree(buff_readback);
-
- return -ENOMEM;
- }
-
- is_config_file = (on_chip_loc == CONFIG_BEGIN_ADDR) ? 1 : 0;
-
- while (1) {
- oldfs = get_fs();
- set_fs(get_ds());
- len = vfs_read(flp, (void __force __user *)buff,
- MAX_TRANSFER_CTRL_BYTE_USB, &pos);
- set_fs(oldfs);
- fw_down++;
-
- if (len <= 0) {
- if (len < 0)
- errno = len;
- else
- errno = 0;
- break;
- }
-
- bytes = InterfaceRDM(psIntfAdapter, on_chip_loc,
- buff_readback, len);
- if (bytes < 0) {
- Status = bytes;
- goto exit;
- }
- reg++;
- if ((len-sizeof(unsigned int)) < 4) {
- if (memcmp(buff_readback, buff, len)) {
- Status = -EIO;
- goto exit;
- }
- } else {
- len -= 4;
-
- while (len) {
- if (*(unsigned int *)&buff_readback[len] !=
- *(unsigned int *)&buff[len]) {
- Status = -EIO;
- goto exit;
- }
- len -= 4;
- }
- }
- on_chip_loc += MAX_TRANSFER_CTRL_BYTE_USB;
- } /* End of while(1) */
-
-exit:
- kfree(buff);
- kfree(buff_readback);
- return Status;
-}
-
-static int bcm_download_config_file(struct bcm_mini_adapter *Adapter,
- struct bcm_firmware_info *psFwInfo)
-{
- int retval = STATUS_SUCCESS;
- B_UINT32 value = 0;
-
- if (Adapter->pstargetparams == NULL) {
- Adapter->pstargetparams =
- kmalloc(sizeof(struct bcm_target_params), GFP_KERNEL);
- if (Adapter->pstargetparams == NULL)
- return -ENOMEM;
- }
-
- if (psFwInfo->u32FirmwareLength != sizeof(struct bcm_target_params))
- return -EIO;
-
- retval = copy_from_user(Adapter->pstargetparams,
- psFwInfo->pvMappedFirmwareAddress,
- psFwInfo->u32FirmwareLength);
- if (retval) {
- kfree(Adapter->pstargetparams);
- Adapter->pstargetparams = NULL;
- return -EFAULT;
- }
-
- /* Parse the structure and then Download the Firmware */
- beceem_parse_target_struct(Adapter);
-
- /* Initializing the NVM. */
- BcmInitNVM(Adapter);
- retval = InitLedSettings(Adapter);
-
- if (retval)
- return retval;
-
- if (Adapter->LEDInfo.led_thread_running &
- BCM_LED_THREAD_RUNNING_ACTIVELY) {
- Adapter->LEDInfo.bLedInitDone = false;
- Adapter->DriverState = DRIVER_INIT;
- wake_up(&Adapter->LEDInfo.notify_led_event);
- }
-
- if (Adapter->LEDInfo.led_thread_running &
- BCM_LED_THREAD_RUNNING_ACTIVELY) {
- Adapter->DriverState = FW_DOWNLOAD;
- wake_up(&Adapter->LEDInfo.notify_led_event);
- }
-
- /* Initialize the DDR Controller */
- retval = ddr_init(Adapter);
- if (retval)
- return retval;
-
- value = 0;
- wrmalt(Adapter, EEPROM_CAL_DATA_INTERNAL_LOC - 4,
- &value, sizeof(value));
- wrmalt(Adapter, EEPROM_CAL_DATA_INTERNAL_LOC - 8,
- &value, sizeof(value));
-
- if (Adapter->eNVMType == NVM_FLASH) {
- retval = PropagateCalParamsFromFlashToMemory(Adapter);
- if (retval)
- return retval;
- }
-
- retval = buffDnldVerify(Adapter, (PUCHAR)Adapter->pstargetparams,
- sizeof(struct bcm_target_params), CONFIG_BEGIN_ADDR);
-
- if (retval)
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT,
- MP_INIT, DBG_LVL_ALL,
- "configuration file not downloaded properly");
- else
- Adapter->bCfgDownloaded = TRUE;
-
- return retval;
-}
-
-int bcm_ioctl_fw_download(struct bcm_mini_adapter *Adapter,
- struct bcm_firmware_info *psFwInfo)
-{
- int retval = STATUS_SUCCESS;
- PUCHAR buff = NULL;
-
- /* Config File is needed for the Driver to download the Config file and
- * Firmware. Check for the Config file to be first to be sent from the
- * Application
- */
- atomic_set(&Adapter->uiMBupdate, false);
- if (!Adapter->bCfgDownloaded &&
- psFwInfo->u32StartingAddress != CONFIG_BEGIN_ADDR) {
- /* Can't Download Firmware. */
- return -EINVAL;
- }
-
- /* If Config File, Finish the DDR Settings and then Download CFG File */
- if (psFwInfo->u32StartingAddress == CONFIG_BEGIN_ADDR) {
- retval = bcm_download_config_file(Adapter, psFwInfo);
- } else {
- buff = kzalloc(psFwInfo->u32FirmwareLength, GFP_KERNEL);
- if (buff == NULL)
- return -ENOMEM;
-
- retval = copy_from_user(buff,
- psFwInfo->pvMappedFirmwareAddress,
- psFwInfo->u32FirmwareLength);
- if (retval != STATUS_SUCCESS) {
- retval = -EFAULT;
- goto error;
- }
-
- retval = buffDnldVerify(Adapter,
- buff,
- psFwInfo->u32FirmwareLength,
- psFwInfo->u32StartingAddress);
-
- if (retval != STATUS_SUCCESS)
- goto error;
- }
-
-error:
- kfree(buff);
- return retval;
-}
-
-static INT buffDnld(struct bcm_mini_adapter *Adapter,
- PUCHAR mappedbuffer, UINT u32FirmwareLength,
- ULONG u32StartingAddress)
-{
- unsigned int len = 0;
- int retval = STATUS_SUCCESS;
-
- len = u32FirmwareLength;
-
- while (u32FirmwareLength) {
- len = MIN_VAL(u32FirmwareLength, MAX_TRANSFER_CTRL_BYTE_USB);
- retval = wrm(Adapter, u32StartingAddress, mappedbuffer, len);
-
- if (retval)
- break;
- u32StartingAddress += len;
- u32FirmwareLength -= len;
- mappedbuffer += len;
- }
- return retval;
-}
-
-static INT buffRdbkVerify(struct bcm_mini_adapter *Adapter,
- PUCHAR mappedbuffer, UINT u32FirmwareLength,
- ULONG u32StartingAddress)
-{
- UINT len = u32FirmwareLength;
- INT retval = STATUS_SUCCESS;
- PUCHAR readbackbuff = kzalloc(MAX_TRANSFER_CTRL_BYTE_USB, GFP_KERNEL);
- int bytes;
-
- if (NULL == readbackbuff)
- return -ENOMEM;
-
- while (u32FirmwareLength && !retval) {
- len = MIN_VAL(u32FirmwareLength, MAX_TRANSFER_CTRL_BYTE_USB);
- bytes = rdm(Adapter, u32StartingAddress, readbackbuff, len);
-
- if (bytes < 0) {
- retval = bytes;
- break;
- }
-
- if (memcmp(readbackbuff, mappedbuffer, len) != 0) {
- pr_err("%s() failed. The firmware doesn't match what was written",
- __func__);
- retval = -EIO;
- }
-
- u32StartingAddress += len;
- u32FirmwareLength -= len;
- mappedbuffer += len;
-
- } /* end of while (u32FirmwareLength && !retval) */
- kfree(readbackbuff);
- return retval;
-}
-
-INT buffDnldVerify(struct bcm_mini_adapter *Adapter,
- unsigned char *mappedbuffer,
- unsigned int u32FirmwareLength,
- unsigned long u32StartingAddress)
-{
- INT status = STATUS_SUCCESS;
-
- status = buffDnld(Adapter, mappedbuffer,
- u32FirmwareLength, u32StartingAddress);
- if (status != STATUS_SUCCESS)
- goto error;
-
- status = buffRdbkVerify(Adapter, mappedbuffer,
- u32FirmwareLength, u32StartingAddress);
- if (status != STATUS_SUCCESS)
- goto error;
-error:
- return status;
-}
diff --git a/drivers/staging/bcm/InterfaceIdleMode.c b/drivers/staging/bcm/InterfaceIdleMode.c
deleted file mode 100644
index 612c89fba34..00000000000
--- a/drivers/staging/bcm/InterfaceIdleMode.c
+++ /dev/null
@@ -1,274 +0,0 @@
-#include "headers.h"
-
-/*
-Function: InterfaceIdleModeWakeup
-
-Description: This is the hardware specific Function for
- waking up HW device from Idle mode.
- A software abort pattern is written to the
- device to wake it and necessary power state
- transitions from host are performed here.
-
-Input parameters: IN struct bcm_mini_adapter *Adapter
- - Miniport Adapter Context
-
-Return: BCM_STATUS_SUCCESS - If Wakeup of the HW Interface
- was successful.
- Other - If an error occurred.
-*/
-
-/*
-Function: InterfaceIdleModeRespond
-
-Description: This is the hardware specific Function for
- responding to Idle mode request from target.
- Necessary power state transitions from host for
- idle mode or other device specific initializations
- are performed here.
-
-Input parameters: IN struct bcm_mini_adapter * Adapter
- - Miniport Adapter Context
-
-Return: BCM_STATUS_SUCCESS - If Idle mode response related
- HW configuration was successful.
- Other - If an error occurred.
-*/
-
-/*
-"dmem bfc02f00 100" tells how many time device went in Idle mode.
-this value will be at address bfc02fa4.just before value d0ea1dle.
-
-Set time value by writing at bfc02f98 7d0
-
-checking the Ack timer expire on kannon by running command
-d qcslog .. if it shows e means host has not send response
-to f/w with in 200 ms. Response should be
-send to f/w with in 200 ms after the Idle/Shutdown req issued
-
-*/
-
-
-int InterfaceIdleModeRespond(struct bcm_mini_adapter *Adapter,
- unsigned int *puiBuffer)
-{
- int status = STATUS_SUCCESS;
- unsigned int uiRegRead = 0;
- int bytes;
-
- if (ntohl(*puiBuffer) == GO_TO_IDLE_MODE_PAYLOAD) {
- if (ntohl(*(puiBuffer+1)) == 0) {
-
- status = wrmalt(Adapter, SW_ABORT_IDLEMODE_LOC,
- &uiRegRead, sizeof(uiRegRead));
- if (status)
- return status;
-
- if (Adapter->ulPowerSaveMode ==
- DEVICE_POWERSAVE_MODE_AS_MANUAL_CLOCK_GATING) {
- uiRegRead = 0x00000000;
- status = wrmalt(Adapter,
- DEBUG_INTERRUPT_GENERATOR_REGISTOR,
- &uiRegRead, sizeof(uiRegRead));
- if (status)
- return status;
- }
- /* Below Register should not br read in case of
- * Manual and Protocol Idle mode */
- else if (Adapter->ulPowerSaveMode !=
- DEVICE_POWERSAVE_MODE_AS_PROTOCOL_IDLE_MODE) {
- /* clear on read Register */
- bytes = rdmalt(Adapter, DEVICE_INT_OUT_EP_REG0,
- &uiRegRead, sizeof(uiRegRead));
- if (bytes < 0) {
- status = bytes;
- return status;
- }
- /* clear on read Register */
- bytes = rdmalt(Adapter, DEVICE_INT_OUT_EP_REG1,
- &uiRegRead, sizeof(uiRegRead));
- if (bytes < 0) {
- status = bytes;
- return status;
- }
- }
-
- /* Set Idle Mode Flag to False and
- * Clear IdleMode reg. */
- Adapter->IdleMode = false;
- Adapter->bTriedToWakeUpFromlowPowerMode = false;
-
- wake_up(&Adapter->lowpower_mode_wait_queue);
-
- } else {
- if (TRUE == Adapter->IdleMode)
- return status;
-
- uiRegRead = 0;
-
- if (Adapter->chip_id == BCS220_2 ||
- Adapter->chip_id == BCS220_2BC ||
- Adapter->chip_id == BCS250_BC ||
- Adapter->chip_id == BCS220_3) {
-
- bytes = rdmalt(Adapter, HPM_CONFIG_MSW,
- &uiRegRead, sizeof(uiRegRead));
- if (bytes < 0) {
- status = bytes;
- return status;
- }
-
-
- uiRegRead |= (1<<17);
-
- status = wrmalt(Adapter, HPM_CONFIG_MSW,
- &uiRegRead, sizeof(uiRegRead));
- if (status)
- return status;
- }
- SendIdleModeResponse(Adapter);
- }
- } else if (ntohl(*puiBuffer) == IDLE_MODE_SF_UPDATE_MSG) {
- OverrideServiceFlowParams(Adapter, puiBuffer);
- }
- return status;
-}
-
-static int InterfaceAbortIdlemode(struct bcm_mini_adapter *Adapter,
- unsigned int Pattern)
-{
- int status = STATUS_SUCCESS;
- unsigned int value;
- unsigned int chip_id;
- unsigned long timeout = 0, itr = 0;
-
- int lenwritten = 0;
- unsigned char aucAbortPattern[8] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF};
- struct bcm_interface_adapter *psInterfaceAdapter =
- Adapter->pvInterfaceAdapter;
-
- /* Abort Bus suspend if its already suspended */
- if ((TRUE == psInterfaceAdapter->bSuspended) &&
- (TRUE == Adapter->bDoSuspend))
- status = usb_autopm_get_interface(
- psInterfaceAdapter->interface);
-
- if ((Adapter->ulPowerSaveMode ==
- DEVICE_POWERSAVE_MODE_AS_MANUAL_CLOCK_GATING) ||
- (Adapter->ulPowerSaveMode ==
- DEVICE_POWERSAVE_MODE_AS_PROTOCOL_IDLE_MODE)) {
- /* write the SW abort pattern. */
- status = wrmalt(Adapter, SW_ABORT_IDLEMODE_LOC,
- &Pattern, sizeof(Pattern));
- if (status)
- return status;
- }
-
- if (Adapter->ulPowerSaveMode ==
- DEVICE_POWERSAVE_MODE_AS_MANUAL_CLOCK_GATING) {
- value = 0x80000000;
- status = wrmalt(Adapter,
- DEBUG_INTERRUPT_GENERATOR_REGISTOR,
- &value, sizeof(value));
- if (status)
- return status;
- } else if (Adapter->ulPowerSaveMode !=
- DEVICE_POWERSAVE_MODE_AS_PROTOCOL_IDLE_MODE) {
- /*
- * Get a Interrupt Out URB and send 8 Bytes Down
- * To be Done in Thread Context.
- * Not using Asynchronous Mechanism.
- */
- status = usb_interrupt_msg(psInterfaceAdapter->udev,
- usb_sndintpipe(psInterfaceAdapter->udev,
- psInterfaceAdapter->sIntrOut.int_out_endpointAddr),
- aucAbortPattern,
- 8,
- &lenwritten,
- 5000);
- if (status)
- return status;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
- IDLE_MODE, DBG_LVL_ALL,
- "NOB Sent down :%d", lenwritten);
-
- /* mdelay(25); */
-
- timeout = jiffies + msecs_to_jiffies(50);
- while (time_after(timeout, jiffies)) {
- itr++;
- rdmalt(Adapter, CHIP_ID_REG, &chip_id, sizeof(UINT));
- if (0xbece3200 == (chip_id&~(0xF0)))
- chip_id = chip_id&~(0xF0);
- if (chip_id == Adapter->chip_id)
- break;
- }
- if (time_before(timeout, jiffies))
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
- IDLE_MODE, DBG_LVL_ALL,
- "Not able to read chip-id even after 25 msec");
- else
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
- IDLE_MODE, DBG_LVL_ALL,
- "Number of completed iteration to read chip-id :%lu", itr);
-
- status = wrmalt(Adapter, SW_ABORT_IDLEMODE_LOC,
- &Pattern, sizeof(status));
- if (status)
- return status;
- }
- return status;
-}
-int InterfaceIdleModeWakeup(struct bcm_mini_adapter *Adapter)
-{
- if (Adapter->bTriedToWakeUpFromlowPowerMode) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
- IDLE_MODE, DBG_LVL_ALL,
- "Wake up already attempted.. ignoring\n");
- } else {
- Adapter->bTriedToWakeUpFromlowPowerMode = TRUE;
- InterfaceAbortIdlemode(Adapter, Adapter->usIdleModePattern);
-
- }
- return 0;
-}
-
-void InterfaceHandleShutdownModeWakeup(struct bcm_mini_adapter *Adapter)
-{
- unsigned int uiRegVal = 0;
- INT Status = 0;
- int bytes;
-
- if (Adapter->ulPowerSaveMode ==
- DEVICE_POWERSAVE_MODE_AS_MANUAL_CLOCK_GATING) {
- /* clear idlemode interrupt. */
- uiRegVal = 0;
- Status = wrmalt(Adapter,
- DEBUG_INTERRUPT_GENERATOR_REGISTOR,
- &uiRegVal, sizeof(uiRegVal));
- if (Status)
- return;
- }
-
- else {
-
-/* clear Interrupt EP registers. */
- bytes = rdmalt(Adapter,
- DEVICE_INT_OUT_EP_REG0,
- &uiRegVal, sizeof(uiRegVal));
- if (bytes < 0) {
- Status = bytes;
- return;
- }
-
- bytes = rdmalt(Adapter,
- DEVICE_INT_OUT_EP_REG1,
- &uiRegVal, sizeof(uiRegVal));
- if (bytes < 0) {
- Status = bytes;
- return;
- }
- }
-}
-
diff --git a/drivers/staging/bcm/InterfaceIdleMode.h b/drivers/staging/bcm/InterfaceIdleMode.h
deleted file mode 100644
index 2ef64003aa8..00000000000
--- a/drivers/staging/bcm/InterfaceIdleMode.h
+++ /dev/null
@@ -1,15 +0,0 @@
-#ifndef _INTERFACE_IDLEMODE_H
-#define _INTERFACE_IDLEMODE_H
-
-INT InterfaceIdleModeWakeup(struct bcm_mini_adapter *Adapter);
-
-INT InterfaceIdleModeRespond(struct bcm_mini_adapter *Adapter,
- unsigned int *puiBuffer);
-
-VOID InterfaceWriteIdleModeWakePattern(struct bcm_mini_adapter *Adapter);
-
-INT InterfaceWakeUp(struct bcm_mini_adapter *Adapter);
-
-VOID InterfaceHandleShutdownModeWakeup(struct bcm_mini_adapter *Adapter);
-#endif
-
diff --git a/drivers/staging/bcm/InterfaceInit.c b/drivers/staging/bcm/InterfaceInit.c
deleted file mode 100644
index bb61d34886b..00000000000
--- a/drivers/staging/bcm/InterfaceInit.c
+++ /dev/null
@@ -1,729 +0,0 @@
-#include "headers.h"
-#include <linux/usb/ch9.h>
-static struct usb_device_id InterfaceUsbtable[] = {
- { USB_DEVICE(BCM_USB_VENDOR_ID_T3, BCM_USB_PRODUCT_ID_T3) },
- { USB_DEVICE(BCM_USB_VENDOR_ID_T3, BCM_USB_PRODUCT_ID_T3B) },
- { USB_DEVICE(BCM_USB_VENDOR_ID_T3, BCM_USB_PRODUCT_ID_T3L) },
- { USB_DEVICE(BCM_USB_VENDOR_ID_T3, BCM_USB_PRODUCT_ID_SYM) },
- { USB_DEVICE(BCM_USB_VENDOR_ID_ZTE, BCM_USB_PRODUCT_ID_226) },
- { USB_DEVICE(BCM_USB_VENDOR_ID_FOXCONN, BCM_USB_PRODUCT_ID_1901) },
- { USB_DEVICE(BCM_USB_VENDOR_ID_ZTE, BCM_USB_PRODUCT_ID_ZTE_TU25) },
- { USB_DEVICE(BCM_USB_VENDOR_ID_ZTE, BCM_USB_PRODUCT_ID_ZTE_226) },
- { USB_DEVICE(BCM_USB_VENDOR_ID_ZTE, BCM_USB_PRODUCT_ID_ZTE_326) },
- { }
-};
-MODULE_DEVICE_TABLE(usb, InterfaceUsbtable);
-
-static int debug = -1;
-module_param(debug, uint, 0600);
-MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
-
-static const u32 default_msg =
- NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
- | NETIF_MSG_TIMER | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR
- | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN;
-
-static int InterfaceAdapterInit(struct bcm_interface_adapter *Adapter);
-
-static void InterfaceAdapterFree(struct bcm_interface_adapter *psIntfAdapter)
-{
- int i = 0;
- struct bcm_mini_adapter *ps_ad = psIntfAdapter->psAdapter;
-
- /* Wake up the wait_queue... */
- if (ps_ad->LEDInfo.led_thread_running &
- BCM_LED_THREAD_RUNNING_ACTIVELY) {
- ps_ad->DriverState = DRIVER_HALT;
- wake_up(&ps_ad->LEDInfo.notify_led_event);
- }
- reset_card_proc(ps_ad);
-
- /*
- * worst case time taken by the RDM/WRM will be 5 sec. will check after
- * every 100 ms to accertain the device is not being accessed. After
- * this No RDM/WRM should be made.
- */
- while (ps_ad->DeviceAccess) {
- BCM_DEBUG_PRINT(ps_ad, DBG_TYPE_INITEXIT, DRV_ENTRY,
- DBG_LVL_ALL, "Device is being accessed.\n");
- msleep(100);
- }
- /* Free interrupt URB */
- /* ps_ad->device_removed = TRUE; */
- usb_free_urb(psIntfAdapter->psInterruptUrb);
-
- /* Free transmit URBs */
- for (i = 0; i < MAXIMUM_USB_TCB; i++) {
- if (psIntfAdapter->asUsbTcb[i].urb != NULL) {
- usb_free_urb(psIntfAdapter->asUsbTcb[i].urb);
- psIntfAdapter->asUsbTcb[i].urb = NULL;
- }
- }
- /* Free receive URB and buffers */
- for (i = 0; i < MAXIMUM_USB_RCB; i++) {
- if (psIntfAdapter->asUsbRcb[i].urb != NULL) {
- kfree(psIntfAdapter->asUsbRcb[i].urb->transfer_buffer);
- usb_free_urb(psIntfAdapter->asUsbRcb[i].urb);
- psIntfAdapter->asUsbRcb[i].urb = NULL;
- }
- }
- AdapterFree(ps_ad);
-}
-
-static void ConfigureEndPointTypesThroughEEPROM(
- struct bcm_mini_adapter *Adapter)
-{
- u32 ulReg;
- int bytes;
- struct bcm_interface_adapter *interfaceAdapter;
-
- /* Program EP2 MAX_PKT_SIZE */
- ulReg = ntohl(EP2_MPS_REG);
- BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x128, 4, TRUE);
- ulReg = ntohl(EP2_MPS);
- BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x12C, 4, TRUE);
-
- ulReg = ntohl(EP2_CFG_REG);
- BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x132, 4, TRUE);
- interfaceAdapter =
- (struct bcm_interface_adapter *)(Adapter->pvInterfaceAdapter);
- if (interfaceAdapter->bHighSpeedDevice) {
- ulReg = ntohl(EP2_CFG_INT);
- BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x136, 4, TRUE);
- } else {
- /* USE BULK EP as TX in FS mode. */
- ulReg = ntohl(EP2_CFG_BULK);
- BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x136, 4, TRUE);
- }
-
- /* Program EP4 MAX_PKT_SIZE. */
- ulReg = ntohl(EP4_MPS_REG);
- BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x13C, 4, TRUE);
- ulReg = ntohl(EP4_MPS);
- BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x140, 4, TRUE);
-
- /* Program TX EP as interrupt(Alternate Setting) */
- bytes = rdmalt(Adapter, 0x0F0110F8, &ulReg, sizeof(u32));
- if (bytes < 0) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, DRV_ENTRY,
- DBG_LVL_ALL, "reading of Tx EP failed\n");
- return;
- }
- ulReg |= 0x6;
-
- ulReg = ntohl(ulReg);
- BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x1CC, 4, TRUE);
-
- ulReg = ntohl(EP4_CFG_REG);
- BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x1C8, 4, TRUE);
- /* Program ISOCHRONOUS EP size to zero. */
- ulReg = ntohl(ISO_MPS_REG);
- BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x1D2, 4, TRUE);
- ulReg = ntohl(ISO_MPS);
- BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x1D6, 4, TRUE);
-
- /*
- * Update EEPROM Version.
- * Read 4 bytes from 508 and modify 511 and 510.
- */
- ReadBeceemEEPROM(Adapter, 0x1FC, &ulReg);
- ulReg &= 0x0101FFFF;
- BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x1FC, 4, TRUE);
-
- /*
- * Update length field if required.
- * Also make the string NULL terminated.
- */
-
- ReadBeceemEEPROM(Adapter, 0xA8, &ulReg);
- if ((ulReg&0x00FF0000)>>16 > 0x30) {
- ulReg = (ulReg&0xFF00FFFF)|(0x30<<16);
- BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0xA8, 4, TRUE);
- }
- ReadBeceemEEPROM(Adapter, 0x148, &ulReg);
- if ((ulReg&0x00FF0000)>>16 > 0x30) {
- ulReg = (ulReg&0xFF00FFFF)|(0x30<<16);
- BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x148, 4, TRUE);
- }
- ulReg = 0;
- BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x122, 4, TRUE);
- ulReg = 0;
- BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x1C2, 4, TRUE);
-}
-
-static int usbbcm_device_probe(struct usb_interface *intf,
- const struct usb_device_id *id)
-{
- struct usb_device *udev = interface_to_usbdev(intf);
- int retval;
- struct bcm_mini_adapter *psAdapter;
- struct bcm_interface_adapter *psIntfAdapter;
- struct net_device *ndev;
-
- /* Reserve one extra queue for the bit-bucket */
- ndev = alloc_etherdev_mq(sizeof(struct bcm_mini_adapter),
- NO_OF_QUEUES + 1);
- if (ndev == NULL) {
- dev_err(&udev->dev, DRV_NAME ": no memory for device\n");
- return -ENOMEM;
- }
-
- SET_NETDEV_DEV(ndev, &intf->dev);
-
- psAdapter = netdev_priv(ndev);
- psAdapter->dev = ndev;
- psAdapter->msg_enable = netif_msg_init(debug, default_msg);
-
- /* Init default driver debug state */
-
- psAdapter->stDebugState.debug_level = DBG_LVL_CURR;
- psAdapter->stDebugState.type = DBG_TYPE_INITEXIT;
-
- /*
- * Technically, one can start using BCM_DEBUG_PRINT after this point.
- * However, realize that by default the Type/Subtype bitmaps are all
- * zero now; so no prints will actually appear until the TestApp turns
- * on debug paths via the ioctl(); so practically speaking, in early
- * init, no logging happens.
- *
- * A solution (used below): we explicitly set the bitmaps to 1 for
- * Type=DBG_TYPE_INITEXIT and ALL subtype's of the same. Now all bcm
- * debug statements get logged, enabling debug during early init.
- * Further, we turn this OFF once init_module() completes.
- */
-
- psAdapter->stDebugState.subtype[DBG_TYPE_INITEXIT] = 0xff;
- BCM_SHOW_DEBUG_BITMAP(psAdapter);
-
- retval = InitAdapter(psAdapter);
- if (retval) {
- dev_err(&udev->dev, DRV_NAME ": InitAdapter Failed\n");
- AdapterFree(psAdapter);
- return retval;
- }
-
- /* Allocate interface adapter structure */
- psIntfAdapter = kzalloc(sizeof(struct bcm_interface_adapter),
- GFP_KERNEL);
- if (psIntfAdapter == NULL) {
- AdapterFree(psAdapter);
- return -ENOMEM;
- }
-
- psAdapter->pvInterfaceAdapter = psIntfAdapter;
- psIntfAdapter->psAdapter = psAdapter;
-
- /* Store usb interface in Interface Adapter */
- psIntfAdapter->interface = intf;
- usb_set_intfdata(intf, psIntfAdapter);
-
- BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
- "psIntfAdapter 0x%p\n", psIntfAdapter);
- retval = InterfaceAdapterInit(psIntfAdapter);
- if (retval) {
- /* If the Firmware/Cfg File is not present
- * then return success, let the application
- * download the files.
- */
- if (-ENOENT == retval) {
- BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY,
- DBG_LVL_ALL,
- "File Not Found. Use app to download.\n");
- return STATUS_SUCCESS;
- }
- BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY,
- DBG_LVL_ALL, "InterfaceAdapterInit failed.\n");
- usb_set_intfdata(intf, NULL);
- udev = interface_to_usbdev(intf);
- usb_put_dev(udev);
- InterfaceAdapterFree(psIntfAdapter);
- return retval;
- }
- if (psAdapter->chip_id > T3) {
- uint32_t uiNackZeroLengthInt = 4;
-
- retval =
- wrmalt(psAdapter, DISABLE_USB_ZERO_LEN_INT,
- &uiNackZeroLengthInt,
- sizeof(uiNackZeroLengthInt));
- if (retval)
- return retval;
- }
-
- /* Check whether the USB-Device Supports remote Wake-Up */
- if (USB_CONFIG_ATT_WAKEUP & udev->actconfig->desc.bmAttributes) {
- /* If Suspend then only support dynamic suspend */
- if (psAdapter->bDoSuspend) {
-#ifdef CONFIG_PM
- pm_runtime_set_autosuspend_delay(&udev->dev, 0);
- intf->needs_remote_wakeup = 1;
- usb_enable_autosuspend(udev);
- device_init_wakeup(&intf->dev, 1);
- INIT_WORK(&psIntfAdapter->usbSuspendWork,
- putUsbSuspend);
- BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY,
- DBG_LVL_ALL,
- "Enabling USB Auto-Suspend\n");
-#endif
- } else {
- intf->needs_remote_wakeup = 0;
- usb_disable_autosuspend(udev);
- }
- }
-
- psAdapter->stDebugState.subtype[DBG_TYPE_INITEXIT] = 0x0;
- return retval;
-}
-
-static void usbbcm_disconnect(struct usb_interface *intf)
-{
- struct bcm_interface_adapter *psIntfAdapter = usb_get_intfdata(intf);
- struct bcm_mini_adapter *psAdapter;
- struct usb_device *udev = interface_to_usbdev(intf);
-
- if (psIntfAdapter == NULL)
- return;
-
- psAdapter = psIntfAdapter->psAdapter;
- netif_device_detach(psAdapter->dev);
-
- if (psAdapter->bDoSuspend)
- intf->needs_remote_wakeup = 0;
-
- psAdapter->device_removed = TRUE;
- usb_set_intfdata(intf, NULL);
- InterfaceAdapterFree(psIntfAdapter);
- usb_put_dev(udev);
-}
-
-static int AllocUsbCb(struct bcm_interface_adapter *psIntfAdapter)
-{
- int i = 0;
-
- for (i = 0; i < MAXIMUM_USB_TCB; i++) {
- psIntfAdapter->asUsbTcb[i].urb = usb_alloc_urb(0, GFP_KERNEL);
-
- if (psIntfAdapter->asUsbTcb[i].urb == NULL) {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,
- DBG_TYPE_PRINTK, 0, 0,
- "Can't allocate Tx urb for index %d\n",
- i);
- return -ENOMEM;
- }
- }
-
- for (i = 0; i < MAXIMUM_USB_RCB; i++) {
- psIntfAdapter->asUsbRcb[i].urb = usb_alloc_urb(0, GFP_KERNEL);
-
- if (psIntfAdapter->asUsbRcb[i].urb == NULL) {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,
- DBG_TYPE_PRINTK, 0, 0,
- "Can't allocate Rx urb for index %d\n",
- i);
- return -ENOMEM;
- }
-
- psIntfAdapter->asUsbRcb[i].urb->transfer_buffer =
- kmalloc(MAX_DATA_BUFFER_SIZE, GFP_KERNEL);
-
- if (psIntfAdapter->asUsbRcb[i].urb->transfer_buffer == NULL) {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,
- DBG_TYPE_PRINTK, 0, 0,
- "Can't allocate Rx buffer for index %d\n",
- i);
- return -ENOMEM;
- }
- psIntfAdapter->asUsbRcb[i].urb->transfer_buffer_length =
- MAX_DATA_BUFFER_SIZE;
- }
- return 0;
-}
-
-static int device_run(struct bcm_interface_adapter *psIntfAdapter)
-{
- int value = 0;
- UINT status = STATUS_SUCCESS;
- struct bcm_mini_adapter *psAd = psIntfAdapter->psAdapter;
-
- status = InitCardAndDownloadFirmware(psAd);
- if (status != STATUS_SUCCESS) {
- pr_err(DRV_NAME "InitCardAndDownloadFirmware failed.\n");
- return status;
- }
- if (psAd->fw_download_done) {
- if (StartInterruptUrb(psIntfAdapter)) {
- BCM_DEBUG_PRINT(psAd, DBG_TYPE_INITEXIT, DRV_ENTRY,
- DBG_LVL_ALL,
- "Cannot send interrupt in URB\n");
- }
-
- /*
- * now register the cntrl interface. after downloading the f/w
- * waiting for 5 sec to get the mailbox interrupt.
- */
- psAd->waiting_to_fw_download_done = false;
- value = wait_event_timeout(psAd->ioctl_fw_dnld_wait_queue,
- psAd->waiting_to_fw_download_done,
- 5 * HZ);
-
- if (value == 0)
- pr_err(DRV_NAME ": Timeout waiting for mailbox interrupt.\n");
-
- if (register_control_device_interface(psAd) < 0) {
- pr_err(DRV_NAME ": Register Control Device failed.\n");
- return -EIO;
- }
- }
- return 0;
-}
-
-static int select_alternate_setting_for_highspeed_modem(
- struct bcm_interface_adapter *psIntfAdapter,
- struct usb_endpoint_descriptor **endpoint,
- const struct usb_host_interface *iface_desc,
- int *usedIntOutForBulkTransfer)
-{
- int retval = 0;
- struct bcm_mini_adapter *psAd = psIntfAdapter->psAdapter;
-
- /* selecting alternate setting one as a default setting
- * for High Speed modem. */
- if (psIntfAdapter->bHighSpeedDevice)
- retval = usb_set_interface(psIntfAdapter->udev,
- DEFAULT_SETTING_0,
- ALTERNATE_SETTING_1);
- BCM_DEBUG_PRINT(psAd, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
- "BCM16 is applicable on this dongle\n");
- if (retval || !psIntfAdapter->bHighSpeedDevice) {
- *usedIntOutForBulkTransfer = EP2;
- *endpoint = &iface_desc->endpoint[EP2].desc;
- BCM_DEBUG_PRINT(psAd, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
- "Interface altsetting failed or modem is configured to Full Speed, hence will work on default setting 0\n");
- /*
- * If Modem is high speed device EP2 should be
- * INT OUT End point
- *
- * If Mode is FS then EP2 should be bulk end
- * point
- */
- if ((psIntfAdapter->bHighSpeedDevice &&
- !usb_endpoint_is_int_out(*endpoint)) ||
- (!psIntfAdapter->bHighSpeedDevice &&
- !usb_endpoint_is_bulk_out(*endpoint))) {
- BCM_DEBUG_PRINT(psAd, DBG_TYPE_INITEXIT, DRV_ENTRY,
- DBG_LVL_ALL,
- "Configuring the EEPROM\n");
- /* change the EP2, EP4 to INT OUT end point */
- ConfigureEndPointTypesThroughEEPROM(
- psAd);
-
- /*
- * It resets the device and if any thing
- * gets changed in USB descriptor it
- * will show fail and re-enumerate the
- * device
- */
- retval = usb_reset_device(psIntfAdapter->udev);
- if (retval) {
- BCM_DEBUG_PRINT(psAd, DBG_TYPE_INITEXIT,
- DRV_ENTRY, DBG_LVL_ALL,
- "reset failed. Re-enumerating the device.\n");
- return retval;
- }
-
- }
- if (!psIntfAdapter->bHighSpeedDevice &&
- usb_endpoint_is_bulk_out(*endpoint)) {
- /*
- * Once BULK is selected in FS mode.
- * Revert it back to INT.
- * Else USB_IF will fail.
- */
- UINT _uiData = ntohl(EP2_CFG_INT);
-
- BCM_DEBUG_PRINT(psAd, DBG_TYPE_INITEXIT, DRV_ENTRY,
- DBG_LVL_ALL,
- "Reverting Bulk to INT as it is in Full Speed mode.\n");
- BeceemEEPROMBulkWrite(psAd, (PUCHAR) & _uiData, 0x136,
- 4, TRUE);
- }
- } else {
- *usedIntOutForBulkTransfer = EP4;
- *endpoint = &iface_desc->endpoint[EP4].desc;
- BCM_DEBUG_PRINT(psAd, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
- "Choosing AltSetting as a default setting.\n");
- if (!usb_endpoint_is_int_out(*endpoint)) {
- BCM_DEBUG_PRINT(psAd, DBG_TYPE_INITEXIT, DRV_ENTRY,
- DBG_LVL_ALL,
- "Dongle does not have BCM16 Fix.\n");
- /*
- * change the EP2, EP4 to INT OUT end point and use EP4
- * in altsetting
- */
- ConfigureEndPointTypesThroughEEPROM(psAd);
-
- /*
- * It resets the device and if any thing
- * gets changed in USB descriptor it
- * will show fail and re-enumerate the
- * device
- */
- retval = usb_reset_device(psIntfAdapter->udev);
- if (retval) {
- BCM_DEBUG_PRINT(psAd, DBG_TYPE_INITEXIT,
- DRV_ENTRY, DBG_LVL_ALL,
- "reset failed. Re-enumerating the device.\n");
- return retval;
- }
- }
- }
-
- return 0;
-}
-
-static int InterfaceAdapterInit(struct bcm_interface_adapter *psIntfAdapter)
-{
- struct usb_host_interface *iface_desc;
- struct usb_endpoint_descriptor *endpoint;
- size_t buffer_size;
- unsigned long value;
- int retval = 0;
- int usedIntOutForBulkTransfer = 0;
- bool bBcm16 = false;
- UINT uiData = 0;
- int bytes;
- struct bcm_mini_adapter *psAd = psIntfAdapter->psAdapter;
-
- /* Store the usb dev into interface adapter */
- psIntfAdapter->udev =
- usb_get_dev(interface_to_usbdev(psIntfAdapter->interface));
-
- psIntfAdapter->bHighSpeedDevice =
- (psIntfAdapter->udev->speed == USB_SPEED_HIGH);
- psAd->interface_rdm = BcmRDM;
- psAd->interface_wrm = BcmWRM;
-
- bytes = rdmalt(psAd, CHIP_ID_REG, (u32 *) &(psAd->chip_id),
- sizeof(u32));
- if (bytes < 0) {
- retval = bytes;
- BCM_DEBUG_PRINT(psAd, DBG_TYPE_PRINTK, 0, 0,
- "CHIP ID Read Failed\n");
- return retval;
- }
-
- if (0xbece3200 == (psAd->chip_id & ~(0xF0)))
- psAd->chip_id &= ~0xF0;
-
- dev_info(&psIntfAdapter->udev->dev, "RDM Chip ID 0x%lx\n",
- psAd->chip_id);
-
- iface_desc = psIntfAdapter->interface->cur_altsetting;
-
- if (psAd->chip_id == T3B) {
- /* T3B device will have EEPROM, check if EEPROM is proper and
- * BCM16 can be done or not. */
- BeceemEEPROMBulkRead(psAd, &uiData, 0x0, 4);
- if (uiData == BECM)
- bBcm16 = TRUE;
-
- dev_info(&psIntfAdapter->udev->dev,
- "number of alternate setting %d\n",
- psIntfAdapter->interface->num_altsetting);
-
- if (bBcm16 == TRUE) {
- retval = select_alternate_setting_for_highspeed_modem(
- psIntfAdapter, &endpoint, iface_desc,
- &usedIntOutForBulkTransfer);
- if (retval)
- return retval;
- }
- }
-
- iface_desc = psIntfAdapter->interface->cur_altsetting;
-
- for (value = 0; value < iface_desc->desc.bNumEndpoints; ++value) {
- endpoint = &iface_desc->endpoint[value].desc;
-
- if (!psIntfAdapter->sBulkIn.bulk_in_endpointAddr &&
- usb_endpoint_is_bulk_in(endpoint)) {
- buffer_size = le16_to_cpu(endpoint->wMaxPacketSize);
- psIntfAdapter->sBulkIn.bulk_in_size = buffer_size;
- psIntfAdapter->sBulkIn.bulk_in_endpointAddr =
- endpoint->bEndpointAddress;
- psIntfAdapter->sBulkIn.bulk_in_pipe = usb_rcvbulkpipe(
- psIntfAdapter->udev,
- psIntfAdapter->sBulkIn.bulk_in_endpointAddr);
- }
-
- if (!psIntfAdapter->sBulkOut.bulk_out_endpointAddr &&
- usb_endpoint_is_bulk_out(endpoint)) {
- psIntfAdapter->sBulkOut.bulk_out_endpointAddr =
- endpoint->bEndpointAddress;
- psIntfAdapter->sBulkOut.bulk_out_pipe = usb_sndbulkpipe(
- psIntfAdapter->udev,
- psIntfAdapter->sBulkOut.bulk_out_endpointAddr);
- }
-
- if (!psIntfAdapter->sIntrIn.int_in_endpointAddr &&
- usb_endpoint_is_int_in(endpoint)) {
- buffer_size = le16_to_cpu(endpoint->wMaxPacketSize);
- psIntfAdapter->sIntrIn.int_in_size = buffer_size;
- psIntfAdapter->sIntrIn.int_in_endpointAddr =
- endpoint->bEndpointAddress;
- psIntfAdapter->sIntrIn.int_in_interval =
- endpoint->bInterval;
- psIntfAdapter->sIntrIn.int_in_buffer =
- kmalloc(buffer_size, GFP_KERNEL);
- if (!psIntfAdapter->sIntrIn.int_in_buffer)
- return -EINVAL;
- }
-
- if (!psIntfAdapter->sIntrOut.int_out_endpointAddr &&
- usb_endpoint_is_int_out(endpoint)) {
- if (!psIntfAdapter->sBulkOut.bulk_out_endpointAddr &&
- (psAd->chip_id == T3B) &&
- (value == usedIntOutForBulkTransfer)) {
- /*
- * use first intout end point as a bulk out end
- * point
- */
- buffer_size =
- le16_to_cpu(endpoint->wMaxPacketSize);
- psIntfAdapter->sBulkOut.bulk_out_size =
- buffer_size;
- psIntfAdapter->sBulkOut.bulk_out_endpointAddr =
- endpoint->bEndpointAddress;
- psIntfAdapter->sBulkOut.bulk_out_pipe =
- usb_sndintpipe(psIntfAdapter->udev,
- psIntfAdapter->sBulkOut
- .bulk_out_endpointAddr);
- psIntfAdapter->sBulkOut.int_out_interval =
- endpoint->bInterval;
- } else if (value == EP6) {
- buffer_size =
- le16_to_cpu(endpoint->wMaxPacketSize);
- psIntfAdapter->sIntrOut.int_out_size =
- buffer_size;
- psIntfAdapter->sIntrOut.int_out_endpointAddr =
- endpoint->bEndpointAddress;
- psIntfAdapter->sIntrOut.int_out_interval =
- endpoint->bInterval;
- psIntfAdapter->sIntrOut.int_out_buffer =
- kmalloc(buffer_size, GFP_KERNEL);
- if (!psIntfAdapter->sIntrOut.int_out_buffer)
- return -EINVAL;
- }
- }
- }
-
- usb_set_intfdata(psIntfAdapter->interface, psIntfAdapter);
-
- psAd->bcm_file_download = InterfaceFileDownload;
- psAd->bcm_file_readback_from_chip = InterfaceFileReadbackFromChip;
- psAd->interface_transmit = InterfaceTransmitPacket;
-
- retval = CreateInterruptUrb(psIntfAdapter);
-
- if (retval) {
- BCM_DEBUG_PRINT(psAd, DBG_TYPE_PRINTK, 0, 0,
- "Cannot create interrupt urb\n");
- return retval;
- }
-
- retval = AllocUsbCb(psIntfAdapter);
- if (retval)
- return retval;
-
- return device_run(psIntfAdapter);
-}
-
-static int InterfaceSuspend(struct usb_interface *intf, pm_message_t message)
-{
- struct bcm_interface_adapter *psIntfAdapter = usb_get_intfdata(intf);
-
- psIntfAdapter->bSuspended = TRUE;
-
- if (psIntfAdapter->bPreparingForBusSuspend) {
- psIntfAdapter->bPreparingForBusSuspend = false;
-
- if (psIntfAdapter->psAdapter->LinkStatus == LINKUP_DONE) {
- psIntfAdapter->psAdapter->IdleMode = TRUE;
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,
- DBG_TYPE_INITEXIT, DRV_ENTRY,
- DBG_LVL_ALL,
- "Host Entered in PMU Idle Mode.\n");
- } else {
- psIntfAdapter->psAdapter->bShutStatus = TRUE;
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,
- DBG_TYPE_INITEXIT, DRV_ENTRY,
- DBG_LVL_ALL,
- "Host Entered in PMU Shutdown Mode.\n");
- }
- }
- psIntfAdapter->psAdapter->bPreparingForLowPowerMode = false;
-
- /* Signaling the control pkt path */
- wake_up(&psIntfAdapter->psAdapter->lowpower_mode_wait_queue);
-
- return 0;
-}
-
-static int InterfaceResume(struct usb_interface *intf)
-{
- struct bcm_interface_adapter *psIntfAdapter = usb_get_intfdata(intf);
-
- mdelay(100);
- psIntfAdapter->bSuspended = false;
-
- StartInterruptUrb(psIntfAdapter);
- InterfaceRx(psIntfAdapter);
- return 0;
-}
-
-static struct usb_driver usbbcm_driver = {
- .name = "usbbcm",
- .probe = usbbcm_device_probe,
- .disconnect = usbbcm_disconnect,
- .suspend = InterfaceSuspend,
- .resume = InterfaceResume,
- .id_table = InterfaceUsbtable,
- .supports_autosuspend = 1,
-};
-
-struct class *bcm_class;
-
-static __init int bcm_init(void)
-{
- int retval;
-
- pr_info("%s: %s, %s\n", DRV_NAME, DRV_DESCRIPTION, DRV_VERSION);
- pr_info("%s\n", DRV_COPYRIGHT);
-
- bcm_class = class_create(THIS_MODULE, DRV_NAME);
- if (IS_ERR(bcm_class)) {
- pr_err(DRV_NAME ": could not create class\n");
- return PTR_ERR(bcm_class);
- }
-
- retval = usb_register(&usbbcm_driver);
- if (retval < 0) {
- pr_err(DRV_NAME ": could not register usb driver\n");
- class_destroy(bcm_class);
- return retval;
- }
- return 0;
-}
-
-static __exit void bcm_exit(void)
-{
- usb_deregister(&usbbcm_driver);
- class_destroy(bcm_class);
-}
-
-module_init(bcm_init);
-module_exit(bcm_exit);
-
-MODULE_DESCRIPTION(DRV_DESCRIPTION);
-MODULE_VERSION(DRV_VERSION);
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/bcm/InterfaceInit.h b/drivers/staging/bcm/InterfaceInit.h
deleted file mode 100644
index ffa6e9667ec..00000000000
--- a/drivers/staging/bcm/InterfaceInit.h
+++ /dev/null
@@ -1,26 +0,0 @@
-#ifndef _INTERFACE_INIT_H
-#define _INTERFACE_INIT_H
-
-#define BCM_USB_VENDOR_ID_T3 0x198f
-#define BCM_USB_VENDOR_ID_FOXCONN 0x0489
-#define BCM_USB_VENDOR_ID_ZTE 0x19d2
-
-#define BCM_USB_PRODUCT_ID_T3 0x0300
-#define BCM_USB_PRODUCT_ID_T3B 0x0210
-#define BCM_USB_PRODUCT_ID_T3L 0x0220
-#define BCM_USB_PRODUCT_ID_SYM 0x15E
-#define BCM_USB_PRODUCT_ID_1901 0xe017
-#define BCM_USB_PRODUCT_ID_226 0x0132 /* not sure if this is valid */
-#define BCM_USB_PRODUCT_ID_ZTE_226 0x172
-#define BCM_USB_PRODUCT_ID_ZTE_326 0x173 /* ZTE AX326 */
-#define BCM_USB_PRODUCT_ID_ZTE_TU25 0x0007
-
-#define BCM_USB_MINOR_BASE 192
-
-int InterfaceInitialize(void);
-
-int InterfaceExit(void);
-
-int usbbcm_worker_thread(struct bcm_interface_adapter *psIntfAdapter);
-
-#endif
diff --git a/drivers/staging/bcm/InterfaceIsr.c b/drivers/staging/bcm/InterfaceIsr.c
deleted file mode 100644
index b9f8a7aa24f..00000000000
--- a/drivers/staging/bcm/InterfaceIsr.c
+++ /dev/null
@@ -1,190 +0,0 @@
-#include "headers.h"
-
-
-static void read_int_callback(struct urb *urb/*, struct pt_regs *regs*/)
-{
- int status = urb->status;
- struct bcm_interface_adapter *psIntfAdapter =
- (struct bcm_interface_adapter *)urb->context;
- struct bcm_mini_adapter *Adapter = psIntfAdapter->psAdapter;
-
- if (netif_msg_intr(Adapter))
- pr_info(PFX "%s: interrupt status %d\n",
- Adapter->dev->name, status);
-
- if (Adapter->device_removed) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, INTF_INIT,
- DBG_LVL_ALL, "Device has Got Removed.");
- return;
- }
-
- if ((Adapter->bPreparingForLowPowerMode && Adapter->bDoSuspend) ||
- psIntfAdapter->bSuspended ||
- psIntfAdapter->bPreparingForBusSuspend) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, INTF_INIT,
- DBG_LVL_ALL,
- "Interrupt call back is called while suspending the device");
- return;
- }
-
- switch (status) {
- /* success */
- case STATUS_SUCCESS:
- if (urb->actual_length) {
-
- if (psIntfAdapter->ulInterruptData[1] & 0xFF) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
- INTF_INIT, DBG_LVL_ALL,
- "Got USIM interrupt");
- }
-
- if (psIntfAdapter->ulInterruptData[1] & 0xFF00) {
- atomic_set(&Adapter->CurrNumFreeTxDesc,
- (psIntfAdapter->ulInterruptData[1] &
- 0xFF00) >> 8);
- atomic_set(&Adapter->uiMBupdate, TRUE);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
- INTF_INIT, DBG_LVL_ALL,
- "TX mailbox contains %d",
- atomic_read(&Adapter->CurrNumFreeTxDesc));
- }
- if (psIntfAdapter->ulInterruptData[1] >> 16) {
- Adapter->CurrNumRecvDescs =
- (psIntfAdapter->ulInterruptData[1] >> 16);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
- INTF_INIT, DBG_LVL_ALL,
- "RX mailbox contains %d",
- Adapter->CurrNumRecvDescs);
- InterfaceRx(psIntfAdapter);
- }
- if (Adapter->fw_download_done &&
- !Adapter->downloadDDR &&
- atomic_read(&Adapter->CurrNumFreeTxDesc)) {
-
- psIntfAdapter->psAdapter->downloadDDR += 1;
- wake_up(&Adapter->tx_packet_wait_queue);
- }
- if (!Adapter->waiting_to_fw_download_done) {
- Adapter->waiting_to_fw_download_done = TRUE;
- wake_up(&Adapter->ioctl_fw_dnld_wait_queue);
- }
- if (!atomic_read(&Adapter->TxPktAvail)) {
- atomic_set(&Adapter->TxPktAvail, 1);
- wake_up(&Adapter->tx_packet_wait_queue);
- }
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, INTF_INIT,
- DBG_LVL_ALL, "Firing interrupt in URB");
- }
- break;
- case -ENOENT:
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, INTF_INIT,
- DBG_LVL_ALL, "URB has got disconnected....");
- return;
- case -EINPROGRESS:
- /*
- * This situation may happened when URBunlink is used. for
- * detail check usb_unlink_urb documentation.
- */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, INTF_INIT,
- DBG_LVL_ALL,
- "Impossibe condition has occurred... something very bad is going on");
- break;
- /* return; */
- case -EPIPE:
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, INTF_INIT,
- DBG_LVL_ALL,
- "Interrupt IN endPoint has got halted/stalled...need to clear this");
- Adapter->bEndPointHalted = TRUE;
- wake_up(&Adapter->tx_packet_wait_queue);
- urb->status = STATUS_SUCCESS;
- return;
- /* software-driven interface shutdown */
- case -ECONNRESET: /* URB got unlinked */
- case -ESHUTDOWN: /* hardware gone. this is the serious problem */
- /*
- * Occurs only when something happens with the
- * host controller device
- */
- case -ENODEV: /* Device got removed */
- case -EINVAL:
- /*
- * Some thing very bad happened with the URB. No
- * description is available.
- */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, INTF_INIT,
- DBG_LVL_ALL, "interrupt urb error %d", status);
- urb->status = STATUS_SUCCESS;
- break;
- /* return; */
- default:
- /*
- * This is required to check what is the defaults conditions
- * when it occurs..
- */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL,
- "GOT DEFAULT INTERRUPT URB STATUS :%d..Please Analyze it...",
- status);
- break;
- }
-
- StartInterruptUrb(psIntfAdapter);
-
-
-}
-
-int CreateInterruptUrb(struct bcm_interface_adapter *psIntfAdapter)
-{
- psIntfAdapter->psInterruptUrb = usb_alloc_urb(0, GFP_KERNEL);
- if (!psIntfAdapter->psInterruptUrb) {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_OTHERS,
- INTF_INIT, DBG_LVL_ALL,
- "Cannot allocate interrupt urb");
- return -ENOMEM;
- }
- psIntfAdapter->psInterruptUrb->transfer_buffer =
- psIntfAdapter->ulInterruptData;
- psIntfAdapter->psInterruptUrb->transfer_buffer_length =
- sizeof(psIntfAdapter->ulInterruptData);
-
- psIntfAdapter->sIntrIn.int_in_pipe = usb_rcvintpipe(psIntfAdapter->udev,
- psIntfAdapter->sIntrIn.int_in_endpointAddr);
-
- usb_fill_int_urb(psIntfAdapter->psInterruptUrb, psIntfAdapter->udev,
- psIntfAdapter->sIntrIn.int_in_pipe,
- psIntfAdapter->psInterruptUrb->transfer_buffer,
- psIntfAdapter->psInterruptUrb->transfer_buffer_length,
- read_int_callback, psIntfAdapter,
- psIntfAdapter->sIntrIn.int_in_interval);
-
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_OTHERS, INTF_INIT,
- DBG_LVL_ALL, "Interrupt Interval: %d\n",
- psIntfAdapter->sIntrIn.int_in_interval);
- return 0;
-}
-
-
-INT StartInterruptUrb(struct bcm_interface_adapter *psIntfAdapter)
-{
- INT status = 0;
-
- if (!(psIntfAdapter->psAdapter->device_removed ||
- psIntfAdapter->psAdapter->bEndPointHalted ||
- psIntfAdapter->bSuspended ||
- psIntfAdapter->bPreparingForBusSuspend ||
- psIntfAdapter->psAdapter->StopAllXaction)) {
- status =
- usb_submit_urb(psIntfAdapter->psInterruptUrb, GFP_ATOMIC);
- if (status) {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,
- DBG_TYPE_OTHERS, INTF_INIT, DBG_LVL_ALL,
- "Cannot send inturb %d\n", status);
- if (status == -EPIPE) {
- psIntfAdapter->psAdapter->bEndPointHalted =
- TRUE;
- wake_up(&psIntfAdapter->psAdapter->tx_packet_wait_queue);
- }
- }
- }
- return status;
-}
-
diff --git a/drivers/staging/bcm/InterfaceIsr.h b/drivers/staging/bcm/InterfaceIsr.h
deleted file mode 100644
index 3073bd71cfe..00000000000
--- a/drivers/staging/bcm/InterfaceIsr.h
+++ /dev/null
@@ -1,15 +0,0 @@
-#ifndef _INTERFACE_ISR_H
-#define _INTERFACE_ISR_H
-
-int CreateInterruptUrb(struct bcm_interface_adapter *psIntfAdapter);
-
-
-INT StartInterruptUrb(struct bcm_interface_adapter *psIntfAdapter);
-
-
-VOID InterfaceEnableInterrupt(struct bcm_mini_adapter *Adapter);
-
-VOID InterfaceDisableInterrupt(struct bcm_mini_adapter *Adapter);
-
-#endif
-
diff --git a/drivers/staging/bcm/InterfaceMacros.h b/drivers/staging/bcm/InterfaceMacros.h
deleted file mode 100644
index fedb79437f3..00000000000
--- a/drivers/staging/bcm/InterfaceMacros.h
+++ /dev/null
@@ -1,18 +0,0 @@
-#ifndef _INTERFACE_MACROS_H
-#define _INTERFACE_MACROS_H
-
-#define BCM_USB_MAX_READ_LENGTH 2048
-
-#define MAXIMUM_USB_TCB 128
-#define MAXIMUM_USB_RCB 128
-
-#define MAX_BUFFERS_PER_QUEUE 256
-
-#define MAX_DATA_BUFFER_SIZE 2048
-
-/* Num of Asynchronous reads pending */
-#define NUM_RX_DESC 64
-
-#define SYS_CFG 0x0F000C00
-
-#endif
diff --git a/drivers/staging/bcm/InterfaceMisc.c b/drivers/staging/bcm/InterfaceMisc.c
deleted file mode 100644
index e5bcfec2a6c..00000000000
--- a/drivers/staging/bcm/InterfaceMisc.c
+++ /dev/null
@@ -1,247 +0,0 @@
-#include "headers.h"
-
-static int adapter_err_occurred(const struct bcm_interface_adapter *ad)
-{
- if (ad->psAdapter->device_removed == TRUE) {
- BCM_DEBUG_PRINT(ad->psAdapter, DBG_TYPE_PRINTK, 0, 0,
- "Device got removed");
- return -ENODEV;
- }
-
- if ((ad->psAdapter->StopAllXaction == TRUE) &&
- (ad->psAdapter->chip_id >= T3LPB)) {
- BCM_DEBUG_PRINT(ad->psAdapter, DBG_TYPE_OTHERS, RDM,
- DBG_LVL_ALL,
- "Currently Xaction is not allowed on the bus");
- return -EACCES;
- }
-
- if (ad->bSuspended == TRUE || ad->bPreparingForBusSuspend == TRUE) {
- BCM_DEBUG_PRINT(ad->psAdapter, DBG_TYPE_OTHERS, RDM,
- DBG_LVL_ALL,
- "Bus is in suspended states hence RDM not allowed..");
- return -EACCES;
- }
-
- return 0;
-}
-
-int InterfaceRDM(struct bcm_interface_adapter *psIntfAdapter,
- unsigned int addr,
- void *buff,
- int len)
-{
- int bytes;
- int err = 0;
-
- if (!psIntfAdapter)
- return -EINVAL;
-
- err = adapter_err_occurred(psIntfAdapter);
- if (err)
- return err;
-
- psIntfAdapter->psAdapter->DeviceAccess = TRUE;
-
- bytes = usb_control_msg(psIntfAdapter->udev,
- usb_rcvctrlpipe(psIntfAdapter->udev, 0),
- 0x02,
- 0xC2,
- (addr & 0xFFFF),
- ((addr >> 16) & 0xFFFF),
- buff,
- len,
- 5000);
-
- if (-ENODEV == bytes)
- psIntfAdapter->psAdapter->device_removed = TRUE;
-
- if (bytes < 0)
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_OTHERS, RDM,
- DBG_LVL_ALL, "RDM failed status :%d", bytes);
- else
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_OTHERS, RDM,
- DBG_LVL_ALL, "RDM sent %d", bytes);
-
- psIntfAdapter->psAdapter->DeviceAccess = false;
- return bytes;
-}
-
-int InterfaceWRM(struct bcm_interface_adapter *psIntfAdapter,
- unsigned int addr,
- void *buff,
- int len)
-{
- int retval = 0;
- int err = 0;
-
- if (!psIntfAdapter)
- return -EINVAL;
-
- err = adapter_err_occurred(psIntfAdapter);
- if (err)
- return err;
-
- psIntfAdapter->psAdapter->DeviceAccess = TRUE;
-
- retval = usb_control_msg(psIntfAdapter->udev,
- usb_sndctrlpipe(psIntfAdapter->udev, 0),
- 0x01,
- 0x42,
- (addr & 0xFFFF),
- ((addr >> 16) & 0xFFFF),
- buff,
- len,
- 5000);
-
- if (-ENODEV == retval)
- psIntfAdapter->psAdapter->device_removed = TRUE;
-
- if (retval < 0) {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_OTHERS, WRM,
- DBG_LVL_ALL, "WRM failed status :%d", retval);
- psIntfAdapter->psAdapter->DeviceAccess = false;
- return retval;
- } else {
- psIntfAdapter->psAdapter->DeviceAccess = false;
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_OTHERS, WRM,
- DBG_LVL_ALL, "WRM sent %d", retval);
- return STATUS_SUCCESS;
- }
-}
-
-int BcmRDM(void *arg,
- unsigned int addr,
- void *buff,
- int len)
-{
- return InterfaceRDM((struct bcm_interface_adapter *)arg, addr, buff,
- len);
-}
-
-int BcmWRM(void *arg,
- unsigned int addr,
- void *buff,
- int len)
-{
- return InterfaceWRM((struct bcm_interface_adapter *)arg, addr, buff,
- len);
-}
-
-int Bcm_clear_halt_of_endpoints(struct bcm_mini_adapter *Adapter)
-{
- struct bcm_interface_adapter *psIntfAdapter =
- (struct bcm_interface_adapter *)(Adapter->pvInterfaceAdapter);
- int status = STATUS_SUCCESS;
-
- /*
- * usb_clear_halt - tells device to clear endpoint halt/stall condition
- * @dev: device whose endpoint is halted
- * @pipe: endpoint "pipe" being cleared
- * @ Context: !in_interrupt ()
- *
- * usb_clear_halt is the synchrnous call and returns 0 on success else
- * returns with error code.
- * This is used to clear halt conditions for bulk and interrupt
- * endpoints only.
- * Control and isochronous endpoints never halts.
- *
- * Any URBs queued for such an endpoint should normally be unlinked by
- * the driver before clearing the halt condition.
- *
- */
-
- /* Killing all the submitted urbs to different end points. */
- Bcm_kill_all_URBs(psIntfAdapter);
-
- /* clear the halted/stalled state for every end point */
- status = usb_clear_halt(psIntfAdapter->udev,
- psIntfAdapter->sIntrIn.int_in_pipe);
- if (status != STATUS_SUCCESS)
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, INTF_INIT,
- DBG_LVL_ALL,
- "Unable to Clear Halt of Interrupt IN end point. :%d ",
- status);
-
- status = usb_clear_halt(psIntfAdapter->udev,
- psIntfAdapter->sBulkIn.bulk_in_pipe);
- if (status != STATUS_SUCCESS)
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, INTF_INIT,
- DBG_LVL_ALL,
- "Unable to Clear Halt of Bulk IN end point. :%d ",
- status);
-
- status = usb_clear_halt(psIntfAdapter->udev,
- psIntfAdapter->sBulkOut.bulk_out_pipe);
- if (status != STATUS_SUCCESS)
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, INTF_INIT,
- DBG_LVL_ALL,
- "Unable to Clear Halt of Bulk OUT end point. :%d ",
- status);
-
- return status;
-}
-
-void Bcm_kill_all_URBs(struct bcm_interface_adapter *psIntfAdapter)
-{
- struct urb *tempUrb = NULL;
- unsigned int i;
-
- /*
- * usb_kill_urb - cancel a transfer request and wait for it to finish
- * @urb: pointer to URB describing a previously submitted request,
- * returns nothing as it is void returned API.
- *
- * This routine cancels an in-progress request. It is guaranteed that
- * upon return all completion handlers will have finished and the URB
- * will be totally idle and available for reuse
- *
- * This routine may not be used in an interrupt context (such as a
- * bottom half or a completion handler), or when holding a spinlock, or
- * in other situations where the caller can't schedule().
- *
- */
-
- /* Cancel submitted Interrupt-URB's */
- if (psIntfAdapter->psInterruptUrb) {
- if (psIntfAdapter->psInterruptUrb->status == -EINPROGRESS)
- usb_kill_urb(psIntfAdapter->psInterruptUrb);
- }
-
- /* Cancel All submitted TX URB's */
- for (i = 0; i < MAXIMUM_USB_TCB; i++) {
- tempUrb = psIntfAdapter->asUsbTcb[i].urb;
- if (tempUrb) {
- if (tempUrb->status == -EINPROGRESS)
- usb_kill_urb(tempUrb);
- }
- }
-
- for (i = 0; i < MAXIMUM_USB_RCB; i++) {
- tempUrb = psIntfAdapter->asUsbRcb[i].urb;
- if (tempUrb) {
- if (tempUrb->status == -EINPROGRESS)
- usb_kill_urb(tempUrb);
- }
- }
-
- atomic_set(&psIntfAdapter->uNumTcbUsed, 0);
- atomic_set(&psIntfAdapter->uCurrTcb, 0);
-
- atomic_set(&psIntfAdapter->uNumRcbUsed, 0);
- atomic_set(&psIntfAdapter->uCurrRcb, 0);
-}
-
-void putUsbSuspend(struct work_struct *work)
-{
- struct bcm_interface_adapter *psIntfAdapter = NULL;
- struct usb_interface *intf = NULL;
-
- psIntfAdapter = container_of(work, struct bcm_interface_adapter,
- usbSuspendWork);
- intf = psIntfAdapter->interface;
-
- if (psIntfAdapter->bSuspended == false)
- usb_autopm_put_interface(intf);
-}
-
diff --git a/drivers/staging/bcm/InterfaceMisc.h b/drivers/staging/bcm/InterfaceMisc.h
deleted file mode 100644
index 0e5e38b2632..00000000000
--- a/drivers/staging/bcm/InterfaceMisc.h
+++ /dev/null
@@ -1,42 +0,0 @@
-#ifndef __INTERFACE_MISC_H
-#define __INTERFACE_MISC_H
-
-INT
-InterfaceRDM(struct bcm_interface_adapter *psIntfAdapter,
- UINT addr,
- PVOID buff,
- INT len);
-
-INT
-InterfaceWRM(struct bcm_interface_adapter *psIntfAdapter,
- UINT addr,
- PVOID buff,
- INT len);
-
-
-int InterfaceFileDownload(PVOID psIntfAdapter,
- struct file *flp,
- unsigned int on_chip_loc);
-
-int InterfaceFileReadbackFromChip(PVOID psIntfAdapter,
- struct file *flp,
- unsigned int on_chip_loc);
-
-
-int BcmRDM(PVOID arg,
- UINT addr,
- PVOID buff,
- INT len);
-
-int BcmWRM(PVOID arg,
- UINT addr,
- PVOID buff,
- INT len);
-
-INT Bcm_clear_halt_of_endpoints(struct bcm_mini_adapter *Adapter);
-
-VOID Bcm_kill_all_URBs(struct bcm_interface_adapter *psIntfAdapter);
-
-#define DISABLE_USB_ZERO_LEN_INT 0x0F011878
-
-#endif /* __INTERFACE_MISC_H */
diff --git a/drivers/staging/bcm/InterfaceRx.c b/drivers/staging/bcm/InterfaceRx.c
deleted file mode 100644
index 0f179b9382d..00000000000
--- a/drivers/staging/bcm/InterfaceRx.c
+++ /dev/null
@@ -1,289 +0,0 @@
-#include "headers.h"
-
-static void handle_control_packet(struct bcm_interface_adapter *interface,
- struct bcm_mini_adapter *ad,
- struct bcm_leader *leader,
- struct sk_buff *skb,
- struct urb *urb)
-{
- BCM_DEBUG_PRINT(interface->psAdapter, DBG_TYPE_RX, RX_CTRL, DBG_LVL_ALL,
- "Received control pkt...");
- *(PUSHORT)skb->data = leader->Status;
- memcpy(skb->data+sizeof(USHORT), urb->transfer_buffer +
- (sizeof(struct bcm_leader)), leader->PLength);
- skb->len = leader->PLength + sizeof(USHORT);
-
- spin_lock(&ad->control_queue_lock);
- ENQUEUEPACKET(ad->RxControlHead, ad->RxControlTail, skb);
- spin_unlock(&ad->control_queue_lock);
-
- atomic_inc(&ad->cntrlpktCnt);
- wake_up(&ad->process_rx_cntrlpkt);
-}
-
-static void format_eth_hdr_to_stack(struct bcm_interface_adapter *interface,
- struct bcm_mini_adapter *ad,
- struct bcm_leader *p_leader,
- struct sk_buff *skb,
- struct urb *urb,
- UINT ui_index,
- int queue_index,
- bool b_header_supression_endabled)
-{
- /*
- * Data Packet, Format a proper Ethernet Header
- * and give it to the stack
- */
- BCM_DEBUG_PRINT(interface->psAdapter, DBG_TYPE_RX, RX_DATA,
- DBG_LVL_ALL, "Received Data pkt...");
- skb_reserve(skb, 2 + SKB_RESERVE_PHS_BYTES);
- memcpy(skb->data+ETH_HLEN, (PUCHAR)urb->transfer_buffer +
- sizeof(struct bcm_leader), p_leader->PLength);
- skb->dev = ad->dev;
-
- /* currently skb->len has extra ETH_HLEN bytes in the beginning */
- skb_put(skb, p_leader->PLength + ETH_HLEN);
- ad->PackInfo[queue_index].uiTotalRxBytes += p_leader->PLength;
- ad->PackInfo[queue_index].uiThisPeriodRxBytes += p_leader->PLength;
- BCM_DEBUG_PRINT(interface->psAdapter, DBG_TYPE_RX, RX_DATA,
- DBG_LVL_ALL, "Received Data pkt of len :0x%X",
- p_leader->PLength);
-
- if (netif_running(ad->dev)) {
- /* Moving ahead by ETH_HLEN to the data ptr as received from FW */
- skb_pull(skb, ETH_HLEN);
- PHSReceive(ad, p_leader->Vcid, skb, &skb->len,
- NULL, b_header_supression_endabled);
-
- if (!ad->PackInfo[queue_index].bEthCSSupport) {
- skb_push(skb, ETH_HLEN);
-
- memcpy(skb->data, skb->dev->dev_addr, 6);
- memcpy(skb->data+6, skb->dev->dev_addr, 6);
- (*(skb->data+11))++;
- *(skb->data+12) = 0x08;
- *(skb->data+13) = 0x00;
- p_leader->PLength += ETH_HLEN;
- }
-
- skb->protocol = eth_type_trans(skb, ad->dev);
- netif_rx(skb);
- } else {
- BCM_DEBUG_PRINT(interface->psAdapter, DBG_TYPE_RX,
- RX_DATA, DBG_LVL_ALL,
- "i/f not up hance freeing SKB...");
- dev_kfree_skb(skb);
- }
-
- ++ad->dev->stats.rx_packets;
- ad->dev->stats.rx_bytes += p_leader->PLength;
-
- for (ui_index = 0; ui_index < MIBS_MAX_HIST_ENTRIES; ui_index++) {
- if ((p_leader->PLength <=
- MIBS_PKTSIZEHIST_RANGE*(ui_index+1)) &&
- (p_leader->PLength > MIBS_PKTSIZEHIST_RANGE*(ui_index)))
-
- ad->aRxPktSizeHist[ui_index]++;
- }
-}
-
-static int SearchVcid(struct bcm_mini_adapter *Adapter, unsigned short usVcid)
-{
- int iIndex = 0;
-
- for (iIndex = (NO_OF_QUEUES-1); iIndex >= 0; iIndex--)
- if (Adapter->PackInfo[iIndex].usVCID_Value == usVcid)
- return iIndex;
- return NO_OF_QUEUES+1;
-
-}
-
-
-static struct bcm_usb_rcb *
-GetBulkInRcb(struct bcm_interface_adapter *psIntfAdapter)
-{
- struct bcm_usb_rcb *pRcb = NULL;
- UINT index = 0;
-
- if ((atomic_read(&psIntfAdapter->uNumRcbUsed) < MAXIMUM_USB_RCB) &&
- (psIntfAdapter->psAdapter->StopAllXaction == false)) {
- index = atomic_read(&psIntfAdapter->uCurrRcb);
- pRcb = &psIntfAdapter->asUsbRcb[index];
- pRcb->bUsed = TRUE;
- pRcb->psIntfAdapter = psIntfAdapter;
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_RX, RX_DPC,
- DBG_LVL_ALL, "Got Rx desc %d used %d", index,
- atomic_read(&psIntfAdapter->uNumRcbUsed));
- index = (index + 1) % MAXIMUM_USB_RCB;
- atomic_set(&psIntfAdapter->uCurrRcb, index);
- atomic_inc(&psIntfAdapter->uNumRcbUsed);
- }
- return pRcb;
-}
-
-/*this is receive call back - when pkt available for receive (BULK IN- end point)*/
-static void read_bulk_callback(struct urb *urb)
-{
- struct sk_buff *skb = NULL;
- bool bHeaderSupressionEnabled = false;
- int QueueIndex = NO_OF_QUEUES + 1;
- UINT uiIndex = 0;
- struct bcm_usb_rcb *pRcb = (struct bcm_usb_rcb *)urb->context;
- struct bcm_interface_adapter *psIntfAdapter = pRcb->psIntfAdapter;
- struct bcm_mini_adapter *Adapter = psIntfAdapter->psAdapter;
- struct bcm_leader *pLeader = urb->transfer_buffer;
-
- if (unlikely(netif_msg_rx_status(Adapter)))
- pr_info(PFX "%s: rx urb status %d length %d\n",
- Adapter->dev->name, urb->status, urb->actual_length);
-
- if ((Adapter->device_removed == TRUE) ||
- (TRUE == Adapter->bEndPointHalted) ||
- (0 == urb->actual_length)) {
- pRcb->bUsed = false;
- atomic_dec(&psIntfAdapter->uNumRcbUsed);
- return;
- }
-
- if (urb->status != STATUS_SUCCESS) {
- if (urb->status == -EPIPE) {
- Adapter->bEndPointHalted = TRUE;
- wake_up(&Adapter->tx_packet_wait_queue);
- } else {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_RX, RX_DPC,
- DBG_LVL_ALL,
- "Rx URB has got cancelled. status :%d",
- urb->status);
- }
- pRcb->bUsed = false;
- atomic_dec(&psIntfAdapter->uNumRcbUsed);
- urb->status = STATUS_SUCCESS;
- return;
- }
-
- if (Adapter->bDoSuspend && (Adapter->bPreparingForLowPowerMode)) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL,
- "device is going in low power mode while PMU option selected..hence rx packet should not be process");
- return;
- }
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL,
- "Read back done len %d\n", pLeader->PLength);
- if (!pLeader->PLength) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL,
- "Leader Length 0");
- atomic_dec(&psIntfAdapter->uNumRcbUsed);
- return;
- }
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL,
- "Leader Status:0x%hX, Length:0x%hX, VCID:0x%hX",
- pLeader->Status, pLeader->PLength, pLeader->Vcid);
- if (MAX_CNTL_PKT_SIZE < pLeader->PLength) {
- if (netif_msg_rx_err(Adapter))
- pr_info(PFX "%s: corrupted leader length...%d\n",
- Adapter->dev->name, pLeader->PLength);
- ++Adapter->dev->stats.rx_dropped;
- atomic_dec(&psIntfAdapter->uNumRcbUsed);
- return;
- }
-
- QueueIndex = SearchVcid(Adapter, pLeader->Vcid);
- if (QueueIndex < NO_OF_QUEUES) {
- bHeaderSupressionEnabled =
- Adapter->PackInfo[QueueIndex].bHeaderSuppressionEnabled;
- bHeaderSupressionEnabled =
- bHeaderSupressionEnabled & Adapter->bPHSEnabled;
- }
-
- skb = dev_alloc_skb(pLeader->PLength + SKB_RESERVE_PHS_BYTES +
- SKB_RESERVE_ETHERNET_HEADER);
- if (!skb) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
- "NO SKBUFF!!! Dropping the Packet");
- atomic_dec(&psIntfAdapter->uNumRcbUsed);
- return;
- }
- /* If it is a control Packet, then call handle_bcm_packet ()*/
- if ((ntohs(pLeader->Vcid) == VCID_CONTROL_PACKET) ||
- (!(pLeader->Status >= 0x20 && pLeader->Status <= 0x3F))) {
- handle_control_packet(psIntfAdapter, Adapter, pLeader, skb,
- urb);
- } else {
- format_eth_hdr_to_stack(psIntfAdapter, Adapter, pLeader, skb,
- urb, uiIndex, QueueIndex,
- bHeaderSupressionEnabled);
- }
- Adapter->PrevNumRecvDescs++;
- pRcb->bUsed = false;
- atomic_dec(&psIntfAdapter->uNumRcbUsed);
-}
-
-static int ReceiveRcb(struct bcm_interface_adapter *psIntfAdapter,
- struct bcm_usb_rcb *pRcb)
-{
- struct urb *urb = pRcb->urb;
- int retval = 0;
-
- usb_fill_bulk_urb(urb, psIntfAdapter->udev,
- usb_rcvbulkpipe(psIntfAdapter->udev,
- psIntfAdapter->sBulkIn.bulk_in_endpointAddr),
- urb->transfer_buffer,
- BCM_USB_MAX_READ_LENGTH,
- read_bulk_callback, pRcb);
-
- if (false == psIntfAdapter->psAdapter->device_removed &&
- false == psIntfAdapter->psAdapter->bEndPointHalted &&
- false == psIntfAdapter->bSuspended &&
- false == psIntfAdapter->bPreparingForBusSuspend) {
- retval = usb_submit_urb(urb, GFP_ATOMIC);
- if (retval) {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_RX,
- RX_DPC, DBG_LVL_ALL,
- "failed submitting read urb, error %d",
- retval);
- /* if this return value is because of pipe halt. need to clear this. */
- if (retval == -EPIPE) {
- psIntfAdapter->psAdapter->bEndPointHalted = TRUE;
- wake_up(&psIntfAdapter->psAdapter->tx_packet_wait_queue);
- }
-
- }
- }
- return retval;
-}
-
-/*
-Function: InterfaceRx
-
-Description: This is the hardware specific Function for Receiving
- data packet/control packets from the device.
-
-Input parameters: IN struct bcm_mini_adapter *Adapter - Miniport Adapter Context
-
-
-
-Return: TRUE - If Rx was successful.
- Other - If an error occurred.
-*/
-
-bool InterfaceRx(struct bcm_interface_adapter *psIntfAdapter)
-{
- USHORT RxDescCount = NUM_RX_DESC -
- atomic_read(&psIntfAdapter->uNumRcbUsed);
-
- struct bcm_usb_rcb *pRcb = NULL;
-
- while (RxDescCount) {
- pRcb = GetBulkInRcb(psIntfAdapter);
- if (pRcb == NULL) {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,
- DBG_TYPE_PRINTK, 0, 0,
- "Unable to get Rcb pointer");
- return false;
- }
- ReceiveRcb(psIntfAdapter, pRcb);
- RxDescCount--;
- }
- return TRUE;
-}
-
diff --git a/drivers/staging/bcm/InterfaceRx.h b/drivers/staging/bcm/InterfaceRx.h
deleted file mode 100644
index b4e858bcda3..00000000000
--- a/drivers/staging/bcm/InterfaceRx.h
+++ /dev/null
@@ -1,7 +0,0 @@
-#ifndef _INTERFACE_RX_H
-#define _INTERFACE_RX_H
-
-bool InterfaceRx(struct bcm_interface_adapter *Adapter);
-
-#endif
-
diff --git a/drivers/staging/bcm/InterfaceTx.c b/drivers/staging/bcm/InterfaceTx.c
deleted file mode 100644
index 9b3f64b821e..00000000000
--- a/drivers/staging/bcm/InterfaceTx.c
+++ /dev/null
@@ -1,213 +0,0 @@
-#include "headers.h"
-
-static void prepare_low_power_mode(struct urb *urb,
- struct bcm_interface_adapter *interface,
- struct bcm_mini_adapter *ps_adapter,
- struct bcm_mini_adapter *ad,
- struct bcm_link_request *p_control_msg,
- bool *b_power_down_msg)
-{
- if (((p_control_msg->szData[0] == GO_TO_IDLE_MODE_PAYLOAD) &&
- (p_control_msg->szData[1] == TARGET_CAN_GO_TO_IDLE_MODE))) {
-
- *b_power_down_msg = TRUE;
- /*
- * This covers the bus err while Idle Request msg
- * sent down.
- */
- if (urb->status != STATUS_SUCCESS) {
- ps_adapter->bPreparingForLowPowerMode = false;
- BCM_DEBUG_PRINT(ad, DBG_TYPE_TX, NEXT_SEND,
- DBG_LVL_ALL,
- "Idle Mode Request msg failed to reach to Modem");
- /* Signalling the cntrl pkt path in Ioctl */
- wake_up(&ps_adapter->lowpower_mode_wait_queue);
- StartInterruptUrb(interface);
- return;
- }
-
- if (ps_adapter->bDoSuspend == false) {
- ps_adapter->IdleMode = TRUE;
- /* since going in Idle mode completed hence making this var false */
- ps_adapter->bPreparingForLowPowerMode = false;
-
- BCM_DEBUG_PRINT(ad, DBG_TYPE_TX, NEXT_SEND,
- DBG_LVL_ALL,
- "Host Entered in Idle Mode State...");
- /* Signalling the cntrl pkt path in Ioctl*/
- wake_up(&ps_adapter->lowpower_mode_wait_queue);
- }
-
- } else if ((p_control_msg->Leader.Status == LINK_UP_CONTROL_REQ) &&
- (p_control_msg->szData[0] == LINK_UP_ACK) &&
- (p_control_msg->szData[1] == LINK_SHUTDOWN_REQ_FROM_FIRMWARE) &&
- (p_control_msg->szData[2] == SHUTDOWN_ACK_FROM_DRIVER)) {
- /*
- * This covers the bus err while shutdown Request
- * msg sent down.
- */
- if (urb->status != STATUS_SUCCESS) {
- ps_adapter->bPreparingForLowPowerMode = false;
- BCM_DEBUG_PRINT(ad, DBG_TYPE_TX, NEXT_SEND,
- DBG_LVL_ALL,
- "Shutdown Request Msg failed to reach to Modem");
- /* Signalling the cntrl pkt path in Ioctl */
- wake_up(&ps_adapter->lowpower_mode_wait_queue);
- StartInterruptUrb(interface);
- return;
- }
-
- *b_power_down_msg = TRUE;
- if (ps_adapter->bDoSuspend == false) {
- ps_adapter->bShutStatus = TRUE;
- /*
- * since going in shutdown mode completed hence
- * making this var false
- */
- ps_adapter->bPreparingForLowPowerMode = false;
- BCM_DEBUG_PRINT(ad, DBG_TYPE_TX, NEXT_SEND,
- DBG_LVL_ALL,
- "Host Entered in shutdown Mode State...");
- /* Signalling the cntrl pkt path in Ioctl */
- wake_up(&ps_adapter->lowpower_mode_wait_queue);
- }
- }
-
- if (ps_adapter->bDoSuspend && *b_power_down_msg) {
- /* issuing bus suspend request */
- BCM_DEBUG_PRINT(ad, DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL,
- "Issuing the Bus suspend request to USB stack");
- interface->bPreparingForBusSuspend = TRUE;
- schedule_work(&interface->usbSuspendWork);
- }
-}
-
-/*this is transmit call-back(BULK OUT)*/
-static void write_bulk_callback(struct urb *urb/*, struct pt_regs *regs*/)
-{
- struct bcm_usb_tcb *pTcb = (struct bcm_usb_tcb *)urb->context;
- struct bcm_interface_adapter *psIntfAdapter = pTcb->psIntfAdapter;
- struct bcm_link_request *pControlMsg =
- (struct bcm_link_request *)urb->transfer_buffer;
- struct bcm_mini_adapter *psAdapter = psIntfAdapter->psAdapter;
- bool bpowerDownMsg = false;
- struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
-
- if (unlikely(netif_msg_tx_done(Adapter)))
- pr_info(PFX "%s: transmit status %d\n", Adapter->dev->name,
- urb->status);
-
- if (urb->status != STATUS_SUCCESS) {
- if (urb->status == -EPIPE) {
- psIntfAdapter->psAdapter->bEndPointHalted = TRUE;
- wake_up(&psIntfAdapter->psAdapter->tx_packet_wait_queue);
- } else {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, NEXT_SEND,
- DBG_LVL_ALL,
- "Tx URB has got cancelled. status :%d",
- urb->status);
- }
- }
-
- pTcb->bUsed = false;
- atomic_dec(&psIntfAdapter->uNumTcbUsed);
-
- if (TRUE == psAdapter->bPreparingForLowPowerMode) {
- prepare_low_power_mode(urb, psIntfAdapter, psAdapter, Adapter,
- pControlMsg, &bpowerDownMsg);
- }
-
- usb_free_coherent(urb->dev, urb->transfer_buffer_length,
- urb->transfer_buffer, urb->transfer_dma);
-}
-
-
-static struct bcm_usb_tcb *GetBulkOutTcb(struct bcm_interface_adapter *psIntfAdapter)
-{
- struct bcm_usb_tcb *pTcb = NULL;
- UINT index = 0;
-
- if ((atomic_read(&psIntfAdapter->uNumTcbUsed) < MAXIMUM_USB_TCB) &&
- (psIntfAdapter->psAdapter->StopAllXaction == false)) {
- index = atomic_read(&psIntfAdapter->uCurrTcb);
- pTcb = &psIntfAdapter->asUsbTcb[index];
- pTcb->bUsed = TRUE;
- pTcb->psIntfAdapter = psIntfAdapter;
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_TX,
- NEXT_SEND, DBG_LVL_ALL,
- "Got Tx desc %d used %d",
- index,
- atomic_read(&psIntfAdapter->uNumTcbUsed));
- index = (index + 1) % MAXIMUM_USB_TCB;
- atomic_set(&psIntfAdapter->uCurrTcb, index);
- atomic_inc(&psIntfAdapter->uNumTcbUsed);
- }
- return pTcb;
-}
-
-static int TransmitTcb(struct bcm_interface_adapter *psIntfAdapter,
- struct bcm_usb_tcb *pTcb, PVOID data, int len)
-{
-
- struct urb *urb = pTcb->urb;
- int retval = 0;
-
- urb->transfer_buffer = usb_alloc_coherent(psIntfAdapter->udev, len,
- GFP_ATOMIC, &urb->transfer_dma);
- if (!urb->transfer_buffer) {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_PRINTK, 0, 0,
- "Error allocating memory\n");
- return -ENOMEM;
- }
- memcpy(urb->transfer_buffer, data, len);
- urb->transfer_buffer_length = len;
-
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_TX, NEXT_SEND,
- DBG_LVL_ALL, "Sending Bulk out packet\n");
- /* For T3B,INT OUT end point will be used as bulk out end point */
- if ((psIntfAdapter->psAdapter->chip_id == T3B) &&
- (psIntfAdapter->bHighSpeedDevice == TRUE)) {
- usb_fill_int_urb(urb, psIntfAdapter->udev,
- psIntfAdapter->sBulkOut.bulk_out_pipe,
- urb->transfer_buffer, len, write_bulk_callback, pTcb,
- psIntfAdapter->sBulkOut.int_out_interval);
- } else {
- usb_fill_bulk_urb(urb, psIntfAdapter->udev,
- psIntfAdapter->sBulkOut.bulk_out_pipe,
- urb->transfer_buffer, len, write_bulk_callback, pTcb);
- }
- urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; /* For DMA transfer */
-
- if (false == psIntfAdapter->psAdapter->device_removed &&
- false == psIntfAdapter->psAdapter->bEndPointHalted &&
- false == psIntfAdapter->bSuspended &&
- false == psIntfAdapter->bPreparingForBusSuspend) {
- retval = usb_submit_urb(urb, GFP_ATOMIC);
- if (retval) {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_TX,
- NEXT_SEND, DBG_LVL_ALL,
- "failed submitting write urb, error %d",
- retval);
- if (retval == -EPIPE) {
- psIntfAdapter->psAdapter->bEndPointHalted = TRUE;
- wake_up(&psIntfAdapter->psAdapter->tx_packet_wait_queue);
- }
- }
- }
- return retval;
-}
-
-int InterfaceTransmitPacket(PVOID arg, PVOID data, UINT len)
-{
- struct bcm_usb_tcb *pTcb = NULL;
- struct bcm_interface_adapter *psIntfAdapter = arg;
-
- pTcb = GetBulkOutTcb(psIntfAdapter);
- if (pTcb == NULL) {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_PRINTK, 0, 0,
- "No URB to transmit packet, dropping packet");
- return -EFAULT;
- }
- return TransmitTcb(psIntfAdapter, pTcb, data, len);
-}
-
diff --git a/drivers/staging/bcm/InterfaceTx.h b/drivers/staging/bcm/InterfaceTx.h
deleted file mode 100644
index 273147577c1..00000000000
--- a/drivers/staging/bcm/InterfaceTx.h
+++ /dev/null
@@ -1,7 +0,0 @@
-#ifndef _INTERFACE_TX_H
-#define _INTERFACE_TX_H
-
-INT InterfaceTransmitPacket(PVOID arg, PVOID data, UINT len);
-
-#endif
-
diff --git a/drivers/staging/bcm/Ioctl.h b/drivers/staging/bcm/Ioctl.h
deleted file mode 100644
index fa5f8671612..00000000000
--- a/drivers/staging/bcm/Ioctl.h
+++ /dev/null
@@ -1,226 +0,0 @@
-#ifndef _IOCTL_H_
-#define _IOCTL_H_
-
-struct bcm_rdm_buffer {
- unsigned long Register;
- unsigned long Length;
-} __packed;
-
-struct bcm_wrm_buffer {
- unsigned long Register;
- unsigned long Length;
- unsigned char Data[4];
-} __packed;
-
-struct bcm_ioctl_buffer {
- void __user *InputBuffer;
- unsigned long InputLength;
- void __user *OutputBuffer;
- unsigned long OutputLength;
-} __packed;
-
-struct bcm_gpio_info {
- unsigned int uiGpioNumber; /* valid numbers 0-15 */
- unsigned int uiGpioValue; /* 1 set ; 0 not set */
-} __packed;
-
-struct bcm_user_thread_req {
- /* 0->Inactivate LED thread. */
- /* 1->Activate the LED thread */
- unsigned int ThreadState;
-} __packed;
-
-#define LED_THREAD_ACTIVATION_REQ 1
-#define BCM_IOCTL 'k'
-#define IOCTL_SEND_CONTROL_MESSAGE _IOW(BCM_IOCTL, 0x801, int)
-#define IOCTL_BCM_REGISTER_WRITE _IOW(BCM_IOCTL, 0x802, int)
-#define IOCTL_BCM_REGISTER_READ _IOR(BCM_IOCTL, 0x803, int)
-#define IOCTL_BCM_COMMON_MEMORY_WRITE _IOW(BCM_IOCTL, 0x804, int)
-#define IOCTL_BCM_COMMON_MEMORY_READ _IOR(BCM_IOCTL, 0x805, int)
-#define IOCTL_GET_CONTROL_MESSAGE _IOR(BCM_IOCTL, 0x806, int)
-#define IOCTL_BCM_FIRMWARE_DOWNLOAD _IOW(BCM_IOCTL, 0x807, int)
-#define IOCTL_BCM_SET_SEND_VCID _IOW(BCM_IOCTL, 0x808, int)
-#define IOCTL_BCM_SWITCH_TRANSFER_MODE _IOW(BCM_IOCTL, 0x809, int)
-#define IOCTL_LINK_REQ _IOW(BCM_IOCTL, 0x80A, int)
-#define IOCTL_RSSI_LEVEL_REQ _IOW(BCM_IOCTL, 0x80B, int)
-#define IOCTL_IDLE_REQ _IOW(BCM_IOCTL, 0x80C, int)
-#define IOCTL_SS_INFO_REQ _IOW(BCM_IOCTL, 0x80D, int)
-#define IOCTL_GET_STATISTICS_POINTER _IOW(BCM_IOCTL, 0x80E, int)
-#define IOCTL_CM_REQUEST _IOW(BCM_IOCTL, 0x80F, int)
-#define IOCTL_INIT_PARAM_REQ _IOW(BCM_IOCTL, 0x810, int)
-#define IOCTL_MAC_ADDR_REQ _IOW(BCM_IOCTL, 0x811, int)
-#define IOCTL_MAC_ADDR_RESP _IOWR(BCM_IOCTL, 0x812, int)
-#define IOCTL_CLASSIFICATION_RULE _IOW(BCM_IOCTL, 0x813, char)
-#define IOCTL_CLOSE_NOTIFICATION _IO(BCM_IOCTL, 0x814)
-#define IOCTL_LINK_UP _IO(BCM_IOCTL, 0x815)
-#define IOCTL_LINK_DOWN _IO(BCM_IOCTL, 0x816, struct bcm_ioctl_buffer)
-#define IOCTL_CHIP_RESET _IO(BCM_IOCTL, 0x816)
-#define IOCTL_CINR_LEVEL_REQ _IOW(BCM_IOCTL, 0x817, char)
-#define IOCTL_WTM_CONTROL_REQ _IOW(BCM_IOCTL, 0x817, char)
-#define IOCTL_BE_BUCKET_SIZE _IOW(BCM_IOCTL, 0x818, unsigned long)
-#define IOCTL_RTPS_BUCKET_SIZE _IOW(BCM_IOCTL, 0x819, unsigned long)
-#define IOCTL_QOS_THRESHOLD _IOW(BCM_IOCTL, 0x820, unsigned long)
-#define IOCTL_DUMP_PACKET_INFO _IO(BCM_IOCTL, 0x821)
-#define IOCTL_GET_PACK_INFO _IOR(BCM_IOCTL, 0x823, int)
-#define IOCTL_BCM_GET_DRIVER_VERSION _IOR(BCM_IOCTL, 0x829, int)
-#define IOCTL_BCM_GET_CURRENT_STATUS _IOW(BCM_IOCTL, 0x828, int)
-#define IOCTL_BCM_GPIO_SET_REQUEST _IOW(BCM_IOCTL, 0x82A, int)
-#define IOCTL_BCM_GPIO_STATUS_REQUEST _IOW(BCM_IOCTL, 0x82b, int)
-#define IOCTL_BCM_GET_DSX_INDICATION _IOR(BCM_IOCTL, 0x854, int)
-#define IOCTL_BCM_BUFFER_DOWNLOAD_START _IOW(BCM_IOCTL, 0x855, int)
-#define IOCTL_BCM_BUFFER_DOWNLOAD _IOW(BCM_IOCTL, 0x856, int)
-#define IOCTL_BCM_BUFFER_DOWNLOAD_STOP _IOW(BCM_IOCTL, 0x857, int)
-#define IOCTL_BCM_REGISTER_WRITE_PRIVATE _IOW(BCM_IOCTL, 0x826, char)
-#define IOCTL_BCM_REGISTER_READ_PRIVATE _IOW(BCM_IOCTL, 0x827, char)
-#define IOCTL_BCM_SET_DEBUG _IOW(BCM_IOCTL, 0x824, struct bcm_ioctl_buffer)
-#define IOCTL_BCM_EEPROM_REGISTER_WRITE _IOW(BCM_IOCTL, 0x858, int)
-#define IOCTL_BCM_EEPROM_REGISTER_READ _IOR(BCM_IOCTL, 0x859, int)
-#define IOCTL_BCM_WAKE_UP_DEVICE_FROM_IDLE _IOR(BCM_IOCTL, 0x860, int)
-#define IOCTL_BCM_SET_MAC_TRACING _IOW(BCM_IOCTL, 0x82c, int)
-#define IOCTL_BCM_GET_HOST_MIBS _IOW(BCM_IOCTL, 0x853, int)
-#define IOCTL_BCM_NVM_READ _IOR(BCM_IOCTL, 0x861, int)
-#define IOCTL_BCM_NVM_WRITE _IOW(BCM_IOCTL, 0x862, int)
-#define IOCTL_BCM_GET_NVM_SIZE _IOR(BCM_IOCTL, 0x863, int)
-#define IOCTL_BCM_CAL_INIT _IOR(BCM_IOCTL, 0x864, int)
-#define IOCTL_BCM_BULK_WRM _IOW(BCM_IOCTL, 0x90B, int)
-#define IOCTL_BCM_FLASH2X_SECTION_READ _IOR(BCM_IOCTL, 0x865, int)
-#define IOCTL_BCM_FLASH2X_SECTION_WRITE _IOW(BCM_IOCTL, 0x866, int)
-#define IOCTL_BCM_GET_FLASH2X_SECTION_BITMAP _IOR(BCM_IOCTL, 0x867, int)
-#define IOCTL_BCM_SET_ACTIVE_SECTION _IOW(BCM_IOCTL, 0x868, int)
-#define IOCTL_BCM_IDENTIFY_ACTIVE_SECTION _IO(BCM_IOCTL, 0x869)
-#define IOCTL_BCM_COPY_SECTION _IOW(BCM_IOCTL, 0x870, int)
-#define IOCTL_BCM_GET_FLASH_CS_INFO _IOR(BCM_IOCTL, 0x871, int)
-#define IOCTL_BCM_SELECT_DSD _IOW(BCM_IOCTL, 0x872, int)
-#define IOCTL_BCM_NVM_RAW_READ _IOR(BCM_IOCTL, 0x875, int)
-#define IOCTL_BCM_CNTRLMSG_MASK _IOW(BCM_IOCTL, 0x874, int)
-#define IOCTL_BCM_GET_DEVICE_DRIVER_INFO _IOR(BCM_IOCTL, 0x877, int)
-#define IOCTL_BCM_TIME_SINCE_NET_ENTRY _IOR(BCM_IOCTL, 0x876, int)
-#define BCM_LED_THREAD_STATE_CHANGE_REQ _IOW(BCM_IOCTL, 0x878, int)
-#define IOCTL_BCM_GPIO_MULTI_REQUEST _IOW(BCM_IOCTL, 0x82D, struct bcm_ioctl_buffer)
-#define IOCTL_BCM_GPIO_MODE_REQUEST _IOW(BCM_IOCTL, 0x82E, struct bcm_ioctl_buffer)
-
-enum bcm_interface_type {
- BCM_MII,
- BCM_CARDBUS,
- BCM_USB,
- BCM_SDIO,
- BCM_PCMCIA
-};
-
-struct bcm_driver_info {
- enum bcm_nvm_type u32NVMType;
- unsigned int MaxRDMBufferSize;
- enum bcm_interface_type u32InterfaceType;
- unsigned int u32DSDStartOffset;
- unsigned int u32RxAlignmentCorrection;
- unsigned int u32Reserved[10];
-};
-
-struct bcm_nvm_readwrite {
- void __user *pBuffer;
- uint32_t uiOffset;
- uint32_t uiNumBytes;
- bool bVerify;
-};
-
-struct bcm_bulk_wrm_buffer {
- unsigned long Register;
- unsigned long SwapEndian;
- unsigned long Values[1];
-};
-
-enum bcm_flash2x_section_val {
- NO_SECTION_VAL = 0, /* no section chosen when absolute offset is given for RD/WR */
- ISO_IMAGE1,
- ISO_IMAGE2,
- DSD0,
- DSD1,
- DSD2,
- VSA0,
- VSA1,
- VSA2,
- SCSI,
- CONTROL_SECTION,
- ISO_IMAGE1_PART2,
- ISO_IMAGE1_PART3,
- ISO_IMAGE2_PART2,
- ISO_IMAGE2_PART3,
- TOTAL_SECTIONS
-};
-
-/*
- * Structure used for READ/WRITE Flash Map2.x
- */
-struct bcm_flash2x_readwrite {
- enum bcm_flash2x_section_val Section; /* section to be read/written */
- u32 offset; /* offset within section. */
- u32 numOfBytes; /* number of bytes from the offset */
- u32 bVerify;
- void __user *pDataBuff; /* buffer for reading/writing */
-};
-
-/*
- * This structure is used for coping one section to other.
- * there are two ways to copy one section to other.
- * it NOB =0, complete section will be copied on to other.
- * if NOB !=0, only NOB will be copied from the given offset.
- */
-
-struct bcm_flash2x_copy_section {
- enum bcm_flash2x_section_val SrcSection;
- enum bcm_flash2x_section_val DstSection;
- u32 offset;
- u32 numOfBytes;
-};
-
-/*
- * This section provide the complete bitmap of the Flash.
- * using this map lib/APP will issue read/write command.
- * Fields are defined as :
- * Bit [0] = section is present //1:present, 0: Not present
- * Bit [1] = section is valid //1: valid, 0: not valid
- * Bit [2] = Section is R/W //0: RW, 1: RO
- * Bit [3] = Section is Active or not 1 means Active, 0->inactive
- * Bit [7...3] = Reserved
- */
-
-struct bcm_flash2x_bitmap {
- unsigned char ISO_IMAGE1;
- unsigned char ISO_IMAGE2;
- unsigned char DSD0;
- unsigned char DSD1;
- unsigned char DSD2;
- unsigned char VSA0;
- unsigned char VSA1;
- unsigned char VSA2;
- unsigned char SCSI;
- unsigned char CONTROL_SECTION;
- /* Reserved for future use */
- unsigned char Reserved0;
- unsigned char Reserved1;
- unsigned char Reserved2;
-};
-
-struct bcm_time_elapsed {
- u64 ul64TimeElapsedSinceNetEntry;
- u32 uiReserved[4];
-};
-
-enum {
- WIMAX_IDX = 0, /* To access WiMAX chip GPIO's for GPIO_MULTI_INFO or GPIO_MULTI_MODE */
- HOST_IDX, /* To access Host chip GPIO's for GPIO_MULTI_INFO or GPIO_MULTI_MODE */
- MAX_IDX
-};
-
-struct bcm_gpio_multi_info {
- unsigned int uiGPIOCommand; /* 1 for set and 0 for get */
- unsigned int uiGPIOMask; /* set the corresponding bit to 1 to access GPIO */
- unsigned int uiGPIOValue; /* 0 or 1; value to be set when command is 1. */
-} __packed;
-
-struct bcm_gpio_multi_mode {
- unsigned int uiGPIOMode; /* 1 for OUT mode, 0 for IN mode */
- unsigned int uiGPIOMask; /* GPIO mask to set mode */
-} __packed;
-
-#endif
diff --git a/drivers/staging/bcm/Kconfig b/drivers/staging/bcm/Kconfig
deleted file mode 100644
index 8acf4b24a7c..00000000000
--- a/drivers/staging/bcm/Kconfig
+++ /dev/null
@@ -1,6 +0,0 @@
-config BCM_WIMAX
- tristate "Beceem BCS200/BCS220-3 and BCSM250 wimax support"
- depends on USB && NET
- help
- This is an experimental driver for the Beceem WIMAX chipset used
- by Sprint 4G.
diff --git a/drivers/staging/bcm/LeakyBucket.c b/drivers/staging/bcm/LeakyBucket.c
deleted file mode 100644
index d6b55f993b5..00000000000
--- a/drivers/staging/bcm/LeakyBucket.c
+++ /dev/null
@@ -1,364 +0,0 @@
-/**********************************************************************
-* LEAKYBUCKET.C
-* This file contains the routines related to Leaky Bucket Algorithm.
-***********************************************************************/
-#include "headers.h"
-
-/**
- * UpdateTokenCount() - Calculates the token count for each channel
- * and updates the same in Adapter structure
- * @Adapter: Pointer to the Adapter structure.
- *
- * Return: None
- */
-static VOID UpdateTokenCount(register struct bcm_mini_adapter *Adapter)
-{
- ULONG liCurrentTime;
- INT i = 0;
- struct timeval tv;
- struct bcm_packet_info *curr_pi;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TOKEN_COUNTS, DBG_LVL_ALL,
- "=====>\n");
- if (NULL == Adapter) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TOKEN_COUNTS,
- DBG_LVL_ALL, "Adapter found NULL!\n");
- return;
- }
-
- do_gettimeofday(&tv);
- for (i = 0; i < NO_OF_QUEUES; i++) {
- curr_pi = &Adapter->PackInfo[i];
-
- if (TRUE == curr_pi->bValid && (1 == curr_pi->ucDirection)) {
- liCurrentTime = ((tv.tv_sec -
- curr_pi->stLastUpdateTokenAt.tv_sec)*1000 +
- (tv.tv_usec - curr_pi->stLastUpdateTokenAt.tv_usec) /
- 1000);
- if (0 != liCurrentTime) {
- curr_pi->uiCurrentTokenCount += (ULONG)
- ((curr_pi->uiMaxAllowedRate) *
- ((ULONG)((liCurrentTime)))/1000);
- memcpy(&curr_pi->stLastUpdateTokenAt, &tv,
- sizeof(struct timeval));
- curr_pi->liLastUpdateTokenAt = liCurrentTime;
- if (curr_pi->uiCurrentTokenCount >=
- curr_pi->uiMaxBucketSize) {
- curr_pi->uiCurrentTokenCount =
- curr_pi->uiMaxBucketSize;
- }
- }
- }
- }
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TOKEN_COUNTS, DBG_LVL_ALL,
- "<=====\n");
-}
-
-
-/**
- * IsPacketAllowedForFlow() - This function checks whether the given
- * packet from the specified queue can be allowed for transmission by
- * checking the token count.
- * @Adapter: Pointer to the Adpater structure.
- * @iQIndex: The queue Identifier.
- * @ulPacketLength: Number of bytes to be transmitted.
- *
- * Returns: The number of bytes allowed for transmission.
- */
-static ULONG GetSFTokenCount(struct bcm_mini_adapter *Adapter, struct bcm_packet_info *psSF)
-{
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TOKEN_COUNTS, DBG_LVL_ALL,
- "IsPacketAllowedForFlow ===>");
-
- /* Validate the parameters */
- if (NULL == Adapter || (psSF < Adapter->PackInfo &&
- (uintptr_t)psSF > (uintptr_t) &Adapter->PackInfo[HiPriority])) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TOKEN_COUNTS, DBG_LVL_ALL,
- "IPAFF: Got wrong Parameters:Adapter: %p, QIndex: %zd\n",
- Adapter, (psSF-Adapter->PackInfo));
- return 0;
- }
-
- if (false != psSF->bValid && psSF->ucDirection) {
- if (0 != psSF->uiCurrentTokenCount) {
- return psSF->uiCurrentTokenCount;
- } else {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TOKEN_COUNTS,
- DBG_LVL_ALL,
- "Not enough tokens in queue %zd Available %u\n",
- psSF-Adapter->PackInfo, psSF->uiCurrentTokenCount);
- psSF->uiPendedLast = 1;
- }
- } else {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TOKEN_COUNTS, DBG_LVL_ALL,
- "IPAFF: Queue %zd not valid\n",
- psSF-Adapter->PackInfo);
- }
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TOKEN_COUNTS, DBG_LVL_ALL,
- "IsPacketAllowedForFlow <===");
- return 0;
-}
-
-/**
-@ingroup tx_functions
-This function despatches packet from the specified queue.
-@return Zero(success) or Negative value(failure)
-*/
-static INT SendPacketFromQueue(struct bcm_mini_adapter *Adapter,/**<Logical Adapter*/
- struct bcm_packet_info *psSF, /**<Queue identifier*/
- struct sk_buff *Packet) /**<Pointer to the packet to be sent*/
-{
- INT Status = STATUS_FAILURE;
- UINT uiIndex = 0, PktLen = 0;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, SEND_QUEUE, DBG_LVL_ALL,
- "=====>");
- if (!Adapter || !Packet || !psSF) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, SEND_QUEUE, DBG_LVL_ALL,
- "Got NULL Adapter or Packet");
- return -EINVAL;
- }
-
- if (psSF->liDrainCalculated == 0)
- psSF->liDrainCalculated = jiffies;
- /* send the packet to the fifo.. */
- PktLen = Packet->len;
- Status = SetupNextSend(Adapter, Packet, psSF->usVCID_Value);
- if (Status == 0) {
- for (uiIndex = 0; uiIndex < MIBS_MAX_HIST_ENTRIES; uiIndex++) {
- if ((PktLen <= MIBS_PKTSIZEHIST_RANGE*(uiIndex+1)) &&
- (PktLen > MIBS_PKTSIZEHIST_RANGE*(uiIndex)))
- Adapter->aTxPktSizeHist[uiIndex]++;
- }
- }
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, SEND_QUEUE, DBG_LVL_ALL,
- "<=====");
- return Status;
-}
-
-static void get_data_packet(struct bcm_mini_adapter *ad,
- struct bcm_packet_info *ps_sf)
-{
- int packet_len;
- struct sk_buff *qpacket;
-
- if (!ps_sf->ucDirection)
- return;
-
- BCM_DEBUG_PRINT(ad, DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL,
- "UpdateTokenCount ");
- if (ad->IdleMode || ad->bPreparingForLowPowerMode)
- return; /* in idle mode */
-
- /* Check for Free Descriptors */
- if (atomic_read(&ad->CurrNumFreeTxDesc) <=
- MINIMUM_PENDING_DESCRIPTORS) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL,
- " No Free Tx Descriptor(%d) is available for Data pkt..",
- atomic_read(&ad->CurrNumFreeTxDesc));
- return;
- }
-
- spin_lock_bh(&ps_sf->SFQueueLock);
- qpacket = ps_sf->FirstTxQueue;
-
- if (qpacket) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL,
- "Dequeuing Data Packet");
-
- if (ps_sf->bEthCSSupport)
- packet_len = qpacket->len;
- else
- packet_len = qpacket->len - ETH_HLEN;
-
- packet_len <<= 3;
- if (packet_len <= GetSFTokenCount(ad, ps_sf)) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_TX, TX_PACKETS,
- DBG_LVL_ALL, "Allowed bytes %d",
- (packet_len >> 3));
-
- DEQUEUEPACKET(ps_sf->FirstTxQueue, ps_sf->LastTxQueue);
- ps_sf->uiCurrentBytesOnHost -= (qpacket->len);
- ps_sf->uiCurrentPacketsOnHost--;
- atomic_dec(&ad->TotalPacketCount);
- spin_unlock_bh(&ps_sf->SFQueueLock);
-
- SendPacketFromQueue(ad, ps_sf, qpacket);
- ps_sf->uiPendedLast = false;
- } else {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_TX, TX_PACKETS,
- DBG_LVL_ALL, "For Queue: %zd\n",
- ps_sf - ad->PackInfo);
- BCM_DEBUG_PRINT(ad, DBG_TYPE_TX, TX_PACKETS,
- DBG_LVL_ALL,
- "\nAvailable Tokens = %d required = %d\n",
- ps_sf->uiCurrentTokenCount,
- packet_len);
- /*
- this part indicates that because of
- non-availability of the tokens
- pkt has not been send out hence setting the
- pending flag indicating the host to send it out
- first next iteration.
- */
- ps_sf->uiPendedLast = TRUE;
- spin_unlock_bh(&ps_sf->SFQueueLock);
- }
- } else {
- spin_unlock_bh(&ps_sf->SFQueueLock);
- }
-}
-
-static void send_control_packet(struct bcm_mini_adapter *ad,
- struct bcm_packet_info *ps_sf)
-{
- char *ctrl_packet = NULL;
- INT status = 0;
-
- if ((atomic_read(&ad->CurrNumFreeTxDesc) > 0) &&
- (atomic_read(&ad->index_rd_txcntrlpkt) !=
- atomic_read(&ad->index_wr_txcntrlpkt))) {
- ctrl_packet = ad->txctlpacket
- [(atomic_read(&ad->index_rd_txcntrlpkt)%MAX_CNTRL_PKTS)];
- if (ctrl_packet) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_TX, TX_PACKETS,
- DBG_LVL_ALL,
- "Sending Control packet");
- status = SendControlPacket(ad, ctrl_packet);
- if (STATUS_SUCCESS == status) {
- spin_lock_bh(&ps_sf->SFQueueLock);
- ps_sf->NumOfPacketsSent++;
- ps_sf->uiSentBytes += ((struct bcm_leader *)ctrl_packet)->PLength;
- ps_sf->uiSentPackets++;
- atomic_dec(&ad->TotalPacketCount);
- ps_sf->uiCurrentBytesOnHost -= ((struct bcm_leader *)ctrl_packet)->PLength;
- ps_sf->uiCurrentPacketsOnHost--;
- atomic_inc(&ad->index_rd_txcntrlpkt);
- spin_unlock_bh(&ps_sf->SFQueueLock);
- } else {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_TX, TX_PACKETS,
- DBG_LVL_ALL,
- "SendControlPacket Failed\n");
- }
- } else {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_TX, TX_PACKETS,
- DBG_LVL_ALL,
- " Control Pkt is not available, Indexing is wrong....");
- }
- }
-}
-
-/**
- * CheckAndSendPacketFromIndex() - This function dequeues the
- * data/control packet from the specified queue for transmission.
- * @Adapter: Pointer to the driver control structure.
- * @iQIndex: The queue Identifier.
- *
- * Returns: None.
- */
-static VOID CheckAndSendPacketFromIndex(struct bcm_mini_adapter *Adapter,
- struct bcm_packet_info *psSF)
-{
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL,
- "%zd ====>", (psSF-Adapter->PackInfo));
- if ((psSF != &Adapter->PackInfo[HiPriority]) &&
- Adapter->LinkUpStatus &&
- atomic_read(&psSF->uiPerSFTxResourceCount)) { /* Get data packet */
-
- get_data_packet(Adapter, psSF);
- } else {
- send_control_packet(Adapter, psSF);
- }
-}
-
-
-/**
- * transmit_packets() - This function transmits the packets from
- * different queues, if free descriptors are available on target.
- * @Adapter: Pointer to the Adapter structure.
- *
- * Returns: None.
- */
-VOID transmit_packets(struct bcm_mini_adapter *Adapter)
-{
- UINT uiPrevTotalCount = 0;
- int iIndex = 0;
-
- bool exit_flag = TRUE;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL,
- "=====>");
-
- if (NULL == Adapter) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL,
- "Got NULL Adapter");
- return;
- }
- if (Adapter->device_removed == TRUE) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL,
- "Device removed");
- return;
- }
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL,
- "\nUpdateTokenCount ====>\n");
-
- UpdateTokenCount(Adapter);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL,
- "\nPruneQueueAllSF ====>\n");
-
- PruneQueueAllSF(Adapter);
-
- uiPrevTotalCount = atomic_read(&Adapter->TotalPacketCount);
-
- for (iIndex = HiPriority; iIndex >= 0; iIndex--) {
- if (!uiPrevTotalCount || (TRUE == Adapter->device_removed))
- break;
-
- if (Adapter->PackInfo[iIndex].bValid &&
- Adapter->PackInfo[iIndex].uiPendedLast &&
- Adapter->PackInfo[iIndex].uiCurrentBytesOnHost) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_PACKETS,
- DBG_LVL_ALL,
- "Calling CheckAndSendPacketFromIndex..");
- CheckAndSendPacketFromIndex(Adapter,
- &Adapter->PackInfo[iIndex]);
- uiPrevTotalCount--;
- }
- }
-
- while (uiPrevTotalCount > 0 && !Adapter->device_removed) {
- exit_flag = TRUE;
- /* second iteration to parse non-pending queues */
- for (iIndex = HiPriority; iIndex >= 0; iIndex--) {
- if (!uiPrevTotalCount ||
- (TRUE == Adapter->device_removed))
- break;
-
- if (Adapter->PackInfo[iIndex].bValid &&
- Adapter->PackInfo[iIndex].uiCurrentBytesOnHost &&
- !Adapter->PackInfo[iIndex].uiPendedLast) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX,
- TX_PACKETS, DBG_LVL_ALL,
- "Calling CheckAndSendPacketFromIndex..");
- CheckAndSendPacketFromIndex(Adapter, &Adapter->PackInfo[iIndex]);
- uiPrevTotalCount--;
- exit_flag = false;
- }
- }
-
- if (Adapter->IdleMode || Adapter->bPreparingForLowPowerMode) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_PACKETS,
- DBG_LVL_ALL, "In Idle Mode\n");
- break;
- }
- if (exit_flag == TRUE)
- break;
- } /* end of inner while loop */
-
- update_per_cid_rx(Adapter);
- Adapter->txtransmit_running = 0;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL,
- "<======");
-}
diff --git a/drivers/staging/bcm/Macros.h b/drivers/staging/bcm/Macros.h
deleted file mode 100644
index dc01e3016d4..00000000000
--- a/drivers/staging/bcm/Macros.h
+++ /dev/null
@@ -1,352 +0,0 @@
-/*************************************
-* Macros.h
-**************************************/
-#ifndef __MACROS_H__
-#define __MACROS_H__
-
-#define TX_TIMER_PERIOD 10 /*10 msec*/
-#define MAX_CLASSIFIERS 100
-#define MAX_TARGET_DSX_BUFFERS 24
-
-#define MAX_CNTRL_PKTS 100
-#define MAX_DATA_PKTS 200
-#define MAX_ETH_SIZE 1536
-#define MAX_CNTL_PKT_SIZE 2048
-
-#define MTU_SIZE 1400
-#define TX_QLEN 5
-
-#define MAC_ADDR_REGISTER 0xbf60d000
-
-
-/* Quality of Service */
-#define NO_OF_QUEUES 17
-#define HiPriority (NO_OF_QUEUES-1)
-#define LowPriority 0
-#define BE 2
-#define rtPS 4
-#define ERTPS 5
-#define UGS 6
-
-#define BE_BUCKET_SIZE (1024*1024*100) /* 32kb */
-#define rtPS_BUCKET_SIZE (1024*1024*100) /* 8kb */
-#define MAX_ALLOWED_RATE (1024*1024*100)
-#define TX_PACKET_THRESHOLD 10
-#define XSECONDS (1*HZ)
-#define DSC_ACTIVATE_REQUEST 248
-#define QUEUE_DEPTH_OFFSET 0x1fc01000
-#define MAX_DEVICE_DESC_SIZE 2040
-#define MAX_CTRL_QUEUE_LEN 100
-#define MAX_APP_QUEUE_LEN 200
-#define MAX_LATENCY_ALLOWED 0xFFFFFFFF
-#define DEFAULT_UG_INTERVAL 250
-#define DEFAULT_UGI_FACTOR 4
-
-#define DEFAULT_PERSFCOUNT 60
-#define MAX_CONNECTIONS 10
-#define MAX_CLASS_NAME_LENGTH 32
-
-#define ETH_LENGTH_OF_ADDRESS 6
-#define MAX_MULTICAST_ADDRESSES 32
-#define IP_LENGTH_OF_ADDRESS 4
-
-#define IP_PACKET_ONLY_MODE 0
-#define ETH_PACKET_TUNNELING_MODE 1
-
-/* Link Request */
-#define SET_MAC_ADDRESS_REQUEST 0
-#define SYNC_UP_REQUEST 1
-#define SYNCED_UP 2
-#define LINK_UP_REQUEST 3
-#define LINK_CONNECTED 4
-#define SYNC_UP_NOTIFICATION 2
-#define LINK_UP_NOTIFICATION 4
-
-
-#define LINK_NET_ENTRY 0x0002
-#define HMC_STATUS 0x0004
-#define LINK_UP_CONTROL_REQ 0x83
-
-#define STATS_POINTER_REQ_STATUS 0x86
-#define NETWORK_ENTRY_REQ_PAYLOAD 198
-#define LINK_DOWN_REQ_PAYLOAD 226
-#define SYNC_UP_REQ_PAYLOAD 228
-#define STATISTICS_POINTER_REQ 237
-#define LINK_UP_REQ_PAYLOAD 245
-#define LINK_UP_ACK 246
-
-#define STATS_MSG_SIZE 4
-#define INDEX_TO_DATA 4
-
-#define GO_TO_IDLE_MODE_PAYLOAD 210
-#define COME_UP_FROM_IDLE_MODE_PAYLOAD 211
-#define IDLE_MODE_SF_UPDATE_MSG 187
-
-#define SKB_RESERVE_ETHERNET_HEADER 16
-#define SKB_RESERVE_PHS_BYTES 32
-
-#define IP_PACKET_ONLY_MODE 0
-#define ETH_PACKET_TUNNELING_MODE 1
-
-#define ETH_CS_802_3 1
-#define ETH_CS_802_1Q_VLAN 3
-#define IPV4_CS 1
-#define IPV6_CS 2
-#define ETH_CS_MASK 0x3f
-
-/** \brief Validity bit maps for TLVs in packet classification rule */
-
-#define PKT_CLASSIFICATION_USER_PRIORITY_VALID 0
-#define PKT_CLASSIFICATION_VLANID_VALID 1
-
-#ifndef MIN
-#define MIN(_a, _b) ((_a) < (_b) ? (_a) : (_b))
-#endif
-
-
-/*Leader related terms */
-#define LEADER_STATUS 0x00
-#define LEADER_STATUS_TCP_ACK 0x1
-#define LEADER_SIZE sizeof(struct bcm_leader)
-#define MAC_ADDR_REQ_SIZE sizeof(struct bcm_packettosend)
-#define SS_INFO_REQ_SIZE sizeof(struct bcm_packettosend)
-#define CM_REQUEST_SIZE (LEADER_SIZE + sizeof(stLocalSFChangeRequest))
-#define IDLE_REQ_SIZE sizeof(struct bcm_packettosend)
-
-
-#define MAX_TRANSFER_CTRL_BYTE_USB (2*1024)
-
-#define GET_MAILBOX1_REG_REQUEST 0x87
-#define GET_MAILBOX1_REG_RESPONSE 0x67
-#define VCID_CONTROL_PACKET 0x00
-
-#define TRANSMIT_NETWORK_DATA 0x00
-#define RECEIVED_NETWORK_DATA 0x20
-
-#define CM_RESPONSES 0xA0
-#define STATUS_RSP 0xA1
-#define LINK_CONTROL_RESP 0xA2
-#define IDLE_MODE_STATUS 0xA3
-#define STATS_POINTER_RESP 0xA6
-#define MGMT_MSG_INFO_SW_STATUS 0xA7
-#define AUTH_SS_HOST_MSG 0xA8
-
-#define CM_DSA_ACK_PAYLOAD 247
-#define CM_DSC_ACK_PAYLOAD 248
-#define CM_DSD_ACK_PAYLOAD 249
-#define CM_DSDEACTVATE 250
-#define TOTAL_MASKED_ADDRESS_IN_BYTES 32
-
-#define MAC_REQ 0
-#define LINK_RESP 1
-#define RSSI_INDICATION 2
-
-#define SS_INFO 4
-#define STATISTICS_INFO 5
-#define CM_INDICATION 6
-#define PARAM_RESP 7
-#define BUFFER_1K 1024
-#define BUFFER_2K (BUFFER_1K*2)
-#define BUFFER_4K (BUFFER_2K*2)
-#define BUFFER_8K (BUFFER_4K*2)
-#define BUFFER_16K (BUFFER_8K*2)
-#define DOWNLINK_DIR 0
-#define UPLINK_DIR 1
-
-#define BCM_SIGNATURE "BECEEM"
-
-
-#define GPIO_OUTPUT_REGISTER 0x0F00003C
-#define BCM_GPIO_OUTPUT_SET_REG 0x0F000040
-#define BCM_GPIO_OUTPUT_CLR_REG 0x0F000044
-#define GPIO_MODE_REGISTER 0x0F000034
-#define GPIO_PIN_STATE_REGISTER 0x0F000038
-
-struct bcm_link_state {
- unsigned char ucLinkStatus;
- unsigned char bIdleMode;
- unsigned char bShutdownMode;
-};
-
-enum enLinkStatus {
- WAIT_FOR_SYNC = 1,
- PHY_SYNC_ACHIVED = 2,
- LINKUP_IN_PROGRESS = 3,
- LINKUP_DONE = 4,
- DREG_RECEIVED = 5,
- LINK_STATUS_RESET_RECEIVED = 6,
- PERIODIC_WAKE_UP_NOTIFICATION_FRM_FW = 7,
- LINK_SHUTDOWN_REQ_FROM_FIRMWARE = 8,
- COMPLETE_WAKE_UP_NOTIFICATION_FRM_FW = 9
-};
-
-enum bcm_phs_dsc_action {
- eAddPHSRule = 0,
- eSetPHSRule,
- eDeletePHSRule,
- eDeleteAllPHSRules
-};
-
-#define CM_CONTROL_NEWDSX_MULTICLASSIFIER_REQ 0x89 /* Host to Mac */
-#define CM_CONTROL_NEWDSX_MULTICLASSIFIER_RESP 0xA9 /* Mac to Host */
-#define MASK_DISABLE_HEADER_SUPPRESSION 0x10 /* 0b000010000 */
-#define MINIMUM_PENDING_DESCRIPTORS 5
-
-#define SHUTDOWN_HOSTINITIATED_REQUESTPAYLOAD 0xCC
-#define SHUTDOWN_ACK_FROM_DRIVER 0x1
-#define SHUTDOWN_NACK_FROM_DRIVER 0x2
-
-#define LINK_SYNC_UP_SUBTYPE 0x0001
-#define LINK_SYNC_DOWN_SUBTYPE 0x0001
-
-
-
-#define CONT_MODE 1
-#define SINGLE_DESCRIPTOR 1
-
-
-#define DESCRIPTOR_LENGTH 0x30
-#define FIRMWARE_DESCS_ADDRESS 0x1F100000
-
-
-#define CLOCK_RESET_CNTRL_REG_1 0x0F00000C
-#define CLOCK_RESET_CNTRL_REG_2 0x0F000840
-
-
-
-#define TX_DESCRIPTOR_HEAD_REGISTER 0x0F010034
-#define RX_DESCRIPTOR_HEAD_REGISTER 0x0F010094
-
-#define STATISTICS_BEGIN_ADDR 0xbf60f02c
-
-#define MAX_PENDING_CTRL_PACKET (MAX_CTRL_QUEUE_LEN-10)
-
-#define WIMAX_MAX_MTU (MTU_SIZE + ETH_HLEN)
-#define AUTO_LINKUP_ENABLE 0x2
-#define AUTO_SYNC_DISABLE 0x1
-#define AUTO_FIRM_DOWNLOAD 0x1
-#define SETTLE_DOWN_TIME 50
-
-#define HOST_BUS_SUSPEND_BIT 16
-
-#define IDLE_MESSAGE 0x81
-
-#define MIPS_CLOCK_133MHz 1
-
-#define TARGET_CAN_GO_TO_IDLE_MODE 2
-#define TARGET_CAN_NOT_GO_TO_IDLE_MODE 3
-#define IDLE_MODE_PAYLOAD_LENGTH 8
-
-#define IP_HEADER(Buffer) ((IPHeaderFormat *)(Buffer))
-#define IPV4 4
-#define IP_VERSION(byte) (((byte&0xF0)>>4))
-
-#define SET_MAC_ADDRESS 193
-#define SET_MAC_ADDRESS_RESPONSE 236
-
-#define IDLE_MODE_WAKEUP_PATTERN 0xd0ea1d1e
-#define IDLE_MODE_WAKEUP_NOTIFIER_ADDRESS 0x1FC02FA8
-#define IDLE_MODE_MAX_RETRY_COUNT 1000
-
-#define CONFIG_BEGIN_ADDR 0xBF60B000
-
-#define FIRMWARE_BEGIN_ADDR 0xBFC00000
-
-#define INVALID_QUEUE_INDEX NO_OF_QUEUES
-
-#define INVALID_PID ((pid_t)-1)
-#define DDR_80_MHZ 0
-#define DDR_100_MHZ 1
-#define DDR_120_MHZ 2 /* Additional Frequency for T3LP */
-#define DDR_133_MHZ 3
-#define DDR_140_MHZ 4 /* Not Used (Reserved for future) */
-#define DDR_160_MHZ 5 /* Additional Frequency for T3LP */
-#define DDR_180_MHZ 6 /* Not Used (Reserved for future) */
-#define DDR_200_MHZ 7 /* Not Used (Reserved for future) */
-
-#define MIPS_200_MHZ 0
-#define MIPS_160_MHZ 1
-
-#define PLL_800_MHZ 0
-#define PLL_266_MHZ 1
-
-#define DEVICE_POWERSAVE_MODE_AS_MANUAL_CLOCK_GATING 0
-#define DEVICE_POWERSAVE_MODE_AS_PMU_CLOCK_GATING 1
-#define DEVICE_POWERSAVE_MODE_AS_PMU_SHUTDOWN 2
-#define DEVICE_POWERSAVE_MODE_AS_RESERVED 3
-#define DEVICE_POWERSAVE_MODE_AS_PROTOCOL_IDLE_MODE 4
-
-
-#define EEPROM_REJECT_REG_1 0x0f003018
-#define EEPROM_REJECT_REG_2 0x0f00301c
-#define EEPROM_REJECT_REG_3 0x0f003008
-#define EEPROM_REJECT_REG_4 0x0f003020
-#define EEPROM_REJECT_MASK 0x0fffffff
-#define VSG_MODE 0x3
-
-/* Idle Mode Related Registers */
-#define DEBUG_INTERRUPT_GENERATOR_REGISTOR 0x0F00007C
-#define SW_ABORT_IDLEMODE_LOC 0x0FF01FFC
-
-#define SW_ABORT_IDLEMODE_PATTERN 0xd0ea1d1e
-#define DEVICE_INT_OUT_EP_REG0 0x0F011870
-#define DEVICE_INT_OUT_EP_REG1 0x0F011874
-
-#define BIN_FILE "/lib/firmware/macxvi200.bin"
-#define CFG_FILE "/lib/firmware/macxvi.cfg"
-#define SF_MAX_ALLOWED_PACKETS_TO_BACKUP 128
-#define MIN_VAL(x, y) ((x) < (y) ? (x) : (y))
-#define MAC_ADDRESS_SIZE 6
-#define EEPROM_COMMAND_Q_REG 0x0F003018
-#define EEPROM_READ_DATA_Q_REG 0x0F003020
-#define CHIP_ID_REG 0x0F000000
-#define GPIO_MODE_REG 0x0F000034
-#define GPIO_OUTPUT_REG 0x0F00003C
-#define WIMAX_MAX_ALLOWED_RATE (1024*1024*50)
-
-#define T3 0xbece0300
-#define TARGET_SFID_TXDESC_MAP_LOC 0xBFFFF400
-
-#define RWM_READ 0
-#define RWM_WRITE 1
-
-#define T3LPB 0xbece3300
-#define BCS220_2 0xbece3311
-#define BCS220_2BC 0xBECE3310
-#define BCS250_BC 0xbece3301
-#define BCS220_3 0xbece3321
-
-
-#define HPM_CONFIG_LDO145 0x0F000D54
-#define HPM_CONFIG_MSW 0x0F000D58
-
-#define T3B 0xbece0310
-enum bcm_nvm_type {
- NVM_AUTODETECT = 0,
- NVM_EEPROM,
- NVM_FLASH,
- NVM_UNKNOWN
-};
-
-enum bcm_pmu_modes {
- HYBRID_MODE_7C = 0,
- INTERNAL_MODE_6 = 1,
- HYBRID_MODE_6 = 2
-};
-
-#define MAX_RDM_WRM_RETIRES 1
-
-enum eAbortPattern {
- ABORT_SHUTDOWN_MODE = 1,
- ABORT_IDLE_REG = 1,
- ABORT_IDLE_MODE = 2,
- ABORT_IDLE_SYNCDOWN = 3
-};
-
-
-/* Offsets used by driver in skb cb variable */
-#define SKB_CB_CLASSIFICATION_OFFSET 0
-#define SKB_CB_LATENCY_OFFSET 1
-#define SKB_CB_TCPACK_OFFSET 2
-
-#endif /* __MACROS_H__ */
diff --git a/drivers/staging/bcm/Makefile b/drivers/staging/bcm/Makefile
deleted file mode 100644
index 652b7f87737..00000000000
--- a/drivers/staging/bcm/Makefile
+++ /dev/null
@@ -1,12 +0,0 @@
-#
-# Makefile for Beceem USB Wimax card
-#
-
-obj-$(CONFIG_BCM_WIMAX) += bcm_wimax.o
-
-bcm_wimax-y := InterfaceDld.o InterfaceIdleMode.o InterfaceInit.o InterfaceRx.o \
- InterfaceIsr.o InterfaceMisc.o InterfaceTx.o \
- CmHost.o IPv6Protocol.o Qos.o Transmit.o\
- Bcmnet.o DDRInit.o HandleControlPacket.o\
- LeakyBucket.o Misc.o sort.o Bcmchar.o hostmibs.o PHSModule.o\
- led_control.o nvm.o vendorspecificextn.o
diff --git a/drivers/staging/bcm/Misc.c b/drivers/staging/bcm/Misc.c
deleted file mode 100644
index 883f7394dee..00000000000
--- a/drivers/staging/bcm/Misc.c
+++ /dev/null
@@ -1,1587 +0,0 @@
-#include "headers.h"
-
-static int BcmFileDownload(struct bcm_mini_adapter *Adapter, const char *path, unsigned int loc);
-static void doPowerAutoCorrection(struct bcm_mini_adapter *psAdapter);
-static void HandleShutDownModeRequest(struct bcm_mini_adapter *Adapter, PUCHAR pucBuffer);
-static int bcm_parse_target_params(struct bcm_mini_adapter *Adapter);
-static void beceem_protocol_reset(struct bcm_mini_adapter *Adapter);
-
-static void default_wimax_protocol_initialize(struct bcm_mini_adapter *Adapter)
-{
- unsigned int uiLoopIndex;
-
- for (uiLoopIndex = 0; uiLoopIndex < NO_OF_QUEUES-1; uiLoopIndex++) {
- Adapter->PackInfo[uiLoopIndex].uiThreshold = TX_PACKET_THRESHOLD;
- Adapter->PackInfo[uiLoopIndex].uiMaxAllowedRate = MAX_ALLOWED_RATE;
- Adapter->PackInfo[uiLoopIndex].uiMaxBucketSize = 20*1024*1024;
- }
-
- Adapter->BEBucketSize = BE_BUCKET_SIZE;
- Adapter->rtPSBucketSize = rtPS_BUCKET_SIZE;
- Adapter->LinkStatus = SYNC_UP_REQUEST;
- Adapter->TransferMode = IP_PACKET_ONLY_MODE;
- Adapter->usBestEffortQueueIndex = -1;
-}
-
-int InitAdapter(struct bcm_mini_adapter *psAdapter)
-{
- int i = 0;
- int Status = STATUS_SUCCESS;
-
- BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Initialising Adapter = %p", psAdapter);
-
- if (psAdapter == NULL) {
- BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Adapter is NULL");
- return -EINVAL;
- }
-
- sema_init(&psAdapter->NVMRdmWrmLock, 1);
- sema_init(&psAdapter->rdmwrmsync, 1);
- spin_lock_init(&psAdapter->control_queue_lock);
- spin_lock_init(&psAdapter->txtransmitlock);
- sema_init(&psAdapter->RxAppControlQueuelock, 1);
- sema_init(&psAdapter->fw_download_sema, 1);
- sema_init(&psAdapter->LowPowerModeSync, 1);
-
- for (i = 0; i < NO_OF_QUEUES; i++)
- spin_lock_init(&psAdapter->PackInfo[i].SFQueueLock);
- i = 0;
-
- init_waitqueue_head(&psAdapter->process_rx_cntrlpkt);
- init_waitqueue_head(&psAdapter->tx_packet_wait_queue);
- init_waitqueue_head(&psAdapter->process_read_wait_queue);
- init_waitqueue_head(&psAdapter->ioctl_fw_dnld_wait_queue);
- init_waitqueue_head(&psAdapter->lowpower_mode_wait_queue);
- psAdapter->waiting_to_fw_download_done = TRUE;
- psAdapter->fw_download_done = false;
-
- default_wimax_protocol_initialize(psAdapter);
- for (i = 0; i < MAX_CNTRL_PKTS; i++) {
- psAdapter->txctlpacket[i] = kmalloc(MAX_CNTL_PKT_SIZE, GFP_KERNEL);
- if (!psAdapter->txctlpacket[i]) {
- BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "No More Cntl pkts got, max got is %d", i);
- return -ENOMEM;
- }
- }
-
- if (AllocAdapterDsxBuffer(psAdapter)) {
- BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Failed to allocate DSX buffers");
- return -EINVAL;
- }
-
- /* Initialize PHS interface */
- if (phs_init(&psAdapter->stBCMPhsContext, psAdapter) != 0) {
- BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "%s:%s:%d:Error PHS Init Failed=====>\n", __FILE__, __func__, __LINE__);
- return -ENOMEM;
- }
-
- Status = BcmAllocFlashCSStructure(psAdapter);
- if (Status) {
- BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Memory Allocation for Flash structure failed");
- return Status;
- }
-
- Status = vendorextnInit(psAdapter);
-
- if (STATUS_SUCCESS != Status) {
- BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Vendor Init Failed");
- return Status;
- }
-
- BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Adapter initialised");
-
- return STATUS_SUCCESS;
-}
-
-void AdapterFree(struct bcm_mini_adapter *Adapter)
-{
- int count;
-
- beceem_protocol_reset(Adapter);
- vendorextnExit(Adapter);
-
- if (Adapter->control_packet_handler && !IS_ERR(Adapter->control_packet_handler))
- kthread_stop(Adapter->control_packet_handler);
-
- if (Adapter->transmit_packet_thread && !IS_ERR(Adapter->transmit_packet_thread))
- kthread_stop(Adapter->transmit_packet_thread);
-
- wake_up(&Adapter->process_read_wait_queue);
-
- if (Adapter->LEDInfo.led_thread_running & (BCM_LED_THREAD_RUNNING_ACTIVELY | BCM_LED_THREAD_RUNNING_INACTIVELY))
- kthread_stop(Adapter->LEDInfo.led_cntrl_threadid);
-
- unregister_networkdev(Adapter);
-
- /* FIXME: use proper wait_event and refcounting */
- while (atomic_read(&Adapter->ApplicationRunning)) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Waiting for Application to close.. %d\n", atomic_read(&Adapter->ApplicationRunning));
- msleep(100);
- }
- unregister_control_device_interface(Adapter);
- kfree(Adapter->pstargetparams);
-
- for (count = 0; count < MAX_CNTRL_PKTS; count++)
- kfree(Adapter->txctlpacket[count]);
-
- FreeAdapterDsxBuffer(Adapter);
- kfree(Adapter->pvInterfaceAdapter);
-
- /* Free the PHS Interface */
- PhsCleanup(&Adapter->stBCMPhsContext);
-
- BcmDeAllocFlashCSStructure(Adapter);
-
- free_netdev(Adapter->dev);
-}
-
-static int create_worker_threads(struct bcm_mini_adapter *psAdapter)
-{
- /* Rx Control Packets Processing */
- psAdapter->control_packet_handler = kthread_run((int (*)(void *))
- control_packet_handler, psAdapter, "%s-rx", DRV_NAME);
- if (IS_ERR(psAdapter->control_packet_handler)) {
- pr_notice(DRV_NAME ": could not create control thread\n");
- return PTR_ERR(psAdapter->control_packet_handler);
- }
-
- /* Tx Thread */
- psAdapter->transmit_packet_thread = kthread_run((int (*)(void *))
- tx_pkt_handler, psAdapter, "%s-tx", DRV_NAME);
- if (IS_ERR(psAdapter->transmit_packet_thread)) {
- pr_notice(DRV_NAME ": could not creat transmit thread\n");
- kthread_stop(psAdapter->control_packet_handler);
- return PTR_ERR(psAdapter->transmit_packet_thread);
- }
- return 0;
-}
-
-static struct file *open_firmware_file(struct bcm_mini_adapter *Adapter, const char *path)
-{
- struct file *flp = filp_open(path, O_RDONLY, S_IRWXU);
-
- if (IS_ERR(flp)) {
- pr_err(DRV_NAME "Unable To Open File %s, err %ld", path, PTR_ERR(flp));
- flp = NULL;
- }
-
- if (Adapter->device_removed)
- flp = NULL;
-
- return flp;
-}
-
-/* Arguments:
- * Logical Adapter
- * Path to image file
- * Download Address on the chip
- */
-static int BcmFileDownload(struct bcm_mini_adapter *Adapter, const char *path, unsigned int loc)
-{
- int errorno = 0;
- struct file *flp = NULL;
- struct timeval tv = {0};
-
- flp = open_firmware_file(Adapter, path);
- if (!flp) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Unable to Open %s\n", path);
- return -ENOENT;
- }
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Opened file is = %s and length =0x%lx to be downloaded at =0x%x", path, (unsigned long)file_inode(flp)->i_size, loc);
- do_gettimeofday(&tv);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "download start %lx", ((tv.tv_sec * 1000) + (tv.tv_usec / 1000)));
- if (Adapter->bcm_file_download(Adapter->pvInterfaceAdapter, flp, loc)) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Failed to download the firmware with error %x!!!", -EIO);
- errorno = -EIO;
- goto exit_download;
- }
- vfs_llseek(flp, 0, 0);
- if (Adapter->bcm_file_readback_from_chip(Adapter->pvInterfaceAdapter, flp, loc)) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Failed to read back firmware!");
- errorno = -EIO;
- goto exit_download;
- }
-
-exit_download:
- filp_close(flp, NULL);
- return errorno;
-}
-
-/**
- * @ingroup ctrl_pkt_functions
- * This function copies the contents of given buffer
- * to the control packet and queues it for transmission.
- * @note Do not acquire the spinlock, as it it already acquired.
- * @return SUCCESS/FAILURE.
- * Arguments:
- * Logical Adapter
- * Control Packet Buffer
- */
-int CopyBufferToControlPacket(struct bcm_mini_adapter *Adapter, void *ioBuffer)
-{
- struct bcm_leader *pLeader = NULL;
- int Status = 0;
- unsigned char *ctrl_buff;
- unsigned int pktlen = 0;
- struct bcm_link_request *pLinkReq = NULL;
- PUCHAR pucAddIndication = NULL;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "======>");
- if (!ioBuffer) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "Got Null Buffer\n");
- return -EINVAL;
- }
-
- pLinkReq = (struct bcm_link_request *)ioBuffer;
- pLeader = (struct bcm_leader *)ioBuffer; /* ioBuffer Contains sw_Status and Payload */
-
- if (Adapter->bShutStatus == TRUE &&
- pLinkReq->szData[0] == LINK_DOWN_REQ_PAYLOAD &&
- pLinkReq->szData[1] == LINK_SYNC_UP_SUBTYPE) {
-
- /* Got sync down in SHUTDOWN..we could not process this. */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "SYNC DOWN Request in Shut Down Mode..\n");
- return STATUS_FAILURE;
- }
-
- if ((pLeader->Status == LINK_UP_CONTROL_REQ) &&
- ((pLinkReq->szData[0] == LINK_UP_REQ_PAYLOAD &&
- (pLinkReq->szData[1] == LINK_SYNC_UP_SUBTYPE)) || /* Sync Up Command */
- pLinkReq->szData[0] == NETWORK_ENTRY_REQ_PAYLOAD)) /* Net Entry Command */ {
-
- if (Adapter->LinkStatus > PHY_SYNC_ACHIVED) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "LinkStatus is Greater than PHY_SYN_ACHIEVED");
- return STATUS_FAILURE;
- }
-
- if (Adapter->bShutStatus == TRUE) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "SYNC UP IN SHUTDOWN..Device WakeUp\n");
- if (Adapter->bTriedToWakeUpFromlowPowerMode == false) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "Waking up for the First Time..\n");
- Adapter->usIdleModePattern = ABORT_SHUTDOWN_MODE; /* change it to 1 for current support. */
- Adapter->bWakeUpDevice = TRUE;
- wake_up(&Adapter->process_rx_cntrlpkt);
- Status = wait_event_interruptible_timeout(Adapter->lowpower_mode_wait_queue, !Adapter->bShutStatus, (5 * HZ));
-
- if (Status == -ERESTARTSYS)
- return Status;
-
- if (Adapter->bShutStatus) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "Shutdown Mode Wake up Failed - No Wake Up Received\n");
- return STATUS_FAILURE;
- }
- } else {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "Wakeup has been tried already...\n");
- }
- }
- }
-
- if (Adapter->IdleMode == TRUE) {
- /* BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Device is in Idle mode ... hence\n"); */
- if (pLeader->Status == LINK_UP_CONTROL_REQ || pLeader->Status == 0x80 ||
- pLeader->Status == CM_CONTROL_NEWDSX_MULTICLASSIFIER_REQ) {
-
- if ((pLeader->Status == LINK_UP_CONTROL_REQ) && (pLinkReq->szData[0] == LINK_DOWN_REQ_PAYLOAD)) {
- if (pLinkReq->szData[1] == LINK_SYNC_DOWN_SUBTYPE) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "Link Down Sent in Idle Mode\n");
- Adapter->usIdleModePattern = ABORT_IDLE_SYNCDOWN; /* LINK DOWN sent in Idle Mode */
- } else {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "ABORT_IDLE_MODE pattern is being written\n");
- Adapter->usIdleModePattern = ABORT_IDLE_REG;
- }
- } else {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "ABORT_IDLE_MODE pattern is being written\n");
- Adapter->usIdleModePattern = ABORT_IDLE_MODE;
- }
-
- /*Setting bIdleMode_tx_from_host to TRUE to indicate LED control thread to represent
- * the wake up from idlemode is from host
- */
- /* Adapter->LEDInfo.bIdleMode_tx_from_host = TRUE; */
- Adapter->bWakeUpDevice = TRUE;
- wake_up(&Adapter->process_rx_cntrlpkt);
-
- /* We should not send DREG message down while in idlemode. */
- if (LINK_DOWN_REQ_PAYLOAD == pLinkReq->szData[0])
- return STATUS_SUCCESS;
-
- Status = wait_event_interruptible_timeout(Adapter->lowpower_mode_wait_queue, !Adapter->IdleMode, (5 * HZ));
-
- if (Status == -ERESTARTSYS)
- return Status;
-
- if (Adapter->IdleMode) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "Idle Mode Wake up Failed - No Wake Up Received\n");
- return STATUS_FAILURE;
- }
- } else {
- return STATUS_SUCCESS;
- }
- }
-
- /* The Driver has to send control messages with a particular VCID */
- pLeader->Vcid = VCID_CONTROL_PACKET; /* VCID for control packet. */
-
- /* Allocate skb for Control Packet */
- pktlen = pLeader->PLength;
- ctrl_buff = (char *)Adapter->txctlpacket[atomic_read(&Adapter->index_wr_txcntrlpkt)%MAX_CNTRL_PKTS];
-
- if (!ctrl_buff) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "mem allocation Failed");
- return -ENOMEM;
- }
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "Control packet to be taken =%d and address is =%pincoming address is =%p and packet len=%x",
- atomic_read(&Adapter->index_wr_txcntrlpkt), ctrl_buff, ioBuffer, pktlen);
-
- if (pLeader) {
- if ((pLeader->Status == 0x80) ||
- (pLeader->Status == CM_CONTROL_NEWDSX_MULTICLASSIFIER_REQ)) {
- /*
- * Restructure the DSX message to handle Multiple classifier Support
- * Write the Service Flow param Structures directly to the target
- * and embed the pointers in the DSX messages sent to target.
- */
- /* Lets store the current length of the control packet we are transmitting */
- pucAddIndication = (PUCHAR)ioBuffer + LEADER_SIZE;
- pktlen = pLeader->PLength;
- Status = StoreCmControlResponseMessage(Adapter, pucAddIndication, &pktlen);
- if (Status != 1) {
- ClearTargetDSXBuffer(Adapter, ((struct bcm_add_indication_alt *)pucAddIndication)->u16TID, false);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, " Error Restoring The DSX Control Packet. Dsx Buffers on Target may not be Setup Properly ");
- return STATUS_FAILURE;
- }
- /*
- * update the leader to use the new length
- * The length of the control packet is length of message being sent + Leader length
- */
- pLeader->PLength = pktlen;
- }
- }
-
- if (pktlen + LEADER_SIZE > MAX_CNTL_PKT_SIZE)
- return -EINVAL;
-
- memset(ctrl_buff, 0, pktlen+LEADER_SIZE);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "Copying the Control Packet Buffer with length=%d\n", pLeader->PLength);
- *(struct bcm_leader *)ctrl_buff = *pLeader;
- memcpy(ctrl_buff + LEADER_SIZE, ((PUCHAR)ioBuffer + LEADER_SIZE), pLeader->PLength);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "Enqueuing the Control Packet");
-
- /* Update the statistics counters */
- spin_lock_bh(&Adapter->PackInfo[HiPriority].SFQueueLock);
- Adapter->PackInfo[HiPriority].uiCurrentBytesOnHost += pLeader->PLength;
- Adapter->PackInfo[HiPriority].uiCurrentPacketsOnHost++;
- atomic_inc(&Adapter->TotalPacketCount);
- spin_unlock_bh(&Adapter->PackInfo[HiPriority].SFQueueLock);
- Adapter->PackInfo[HiPriority].bValid = TRUE;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "CurrBytesOnHost: %x bValid: %x",
- Adapter->PackInfo[HiPriority].uiCurrentBytesOnHost,
- Adapter->PackInfo[HiPriority].bValid);
- Status = STATUS_SUCCESS;
- /*Queue the packet for transmission */
- atomic_inc(&Adapter->index_wr_txcntrlpkt);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "Calling transmit_packets");
- atomic_set(&Adapter->TxPktAvail, 1);
- wake_up(&Adapter->tx_packet_wait_queue);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "<====");
- return Status;
-}
-
-/******************************************************************
-* Function - LinkMessage()
-*
-* Description - This function builds the Sync-up and Link-up request
-* packet messages depending on the device Link status.
-*
-* Parameters - Adapter: Pointer to the Adapter structure.
-*
-* Returns - None.
-*******************************************************************/
-void LinkMessage(struct bcm_mini_adapter *Adapter)
-{
- struct bcm_link_request *pstLinkRequest = NULL;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LINK_UP_MSG, DBG_LVL_ALL, "=====>");
- if (Adapter->LinkStatus == SYNC_UP_REQUEST && Adapter->AutoSyncup) {
- pstLinkRequest = kzalloc(sizeof(struct bcm_link_request), GFP_ATOMIC);
- if (!pstLinkRequest) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LINK_UP_MSG, DBG_LVL_ALL, "Can not allocate memory for Link request!");
- return;
- }
- /* sync up request... */
- Adapter->LinkStatus = WAIT_FOR_SYNC; /* current link status */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LINK_UP_MSG, DBG_LVL_ALL, "Requesting For SyncUp...");
- pstLinkRequest->szData[0] = LINK_UP_REQ_PAYLOAD;
- pstLinkRequest->szData[1] = LINK_SYNC_UP_SUBTYPE;
- pstLinkRequest->Leader.Status = LINK_UP_CONTROL_REQ;
- pstLinkRequest->Leader.PLength = sizeof(ULONG);
- Adapter->bSyncUpRequestSent = TRUE;
-
- } else if (Adapter->LinkStatus == PHY_SYNC_ACHIVED && Adapter->AutoLinkUp) {
- pstLinkRequest = kzalloc(sizeof(struct bcm_link_request), GFP_ATOMIC);
- if (!pstLinkRequest) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LINK_UP_MSG, DBG_LVL_ALL, "Can not allocate memory for Link request!");
- return;
- }
- /* LINK_UP_REQUEST */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LINK_UP_MSG, DBG_LVL_ALL, "Requesting For LinkUp...");
- pstLinkRequest->szData[0] = LINK_UP_REQ_PAYLOAD;
- pstLinkRequest->szData[1] = LINK_NET_ENTRY;
- pstLinkRequest->Leader.Status = LINK_UP_CONTROL_REQ;
- pstLinkRequest->Leader.PLength = sizeof(ULONG);
- }
- if (pstLinkRequest) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LINK_UP_MSG, DBG_LVL_ALL, "Calling CopyBufferToControlPacket");
- CopyBufferToControlPacket(Adapter, pstLinkRequest);
- kfree(pstLinkRequest);
- }
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LINK_UP_MSG, DBG_LVL_ALL, "LinkMessage <=====");
- return;
-}
-
-/**********************************************************************
-* Function - StatisticsResponse()
-*
-* Description - This function handles the Statistics response packet.
-*
-* Parameters - Adapter : Pointer to the Adapter structure.
-* - pvBuffer: Starting address of Statistic response data.
-*
-* Returns - None.
-************************************************************************/
-void StatisticsResponse(struct bcm_mini_adapter *Adapter, void *pvBuffer)
-{
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "%s====>", __func__);
- Adapter->StatisticsPointer = ntohl(*(__be32 *)pvBuffer);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "Stats at %x", (unsigned int)Adapter->StatisticsPointer);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "%s <====", __func__);
-}
-
-/**********************************************************************
-* Function - LinkControlResponseMessage()
-*
-* Description - This function handles the Link response packets.
-*
-* Parameters - Adapter : Pointer to the Adapter structure.
-* - pucBuffer: Starting address of Link response data.
-*
-* Returns - None.
-***********************************************************************/
-void LinkControlResponseMessage(struct bcm_mini_adapter *Adapter, PUCHAR pucBuffer)
-{
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "=====>");
-
- if (*pucBuffer == LINK_UP_ACK) {
- switch (*(pucBuffer+1)) {
- case PHY_SYNC_ACHIVED: /* SYNCed UP */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "PHY_SYNC_ACHIVED");
-
- if (Adapter->LinkStatus == LINKUP_DONE)
- beceem_protocol_reset(Adapter);
-
- Adapter->usBestEffortQueueIndex = INVALID_QUEUE_INDEX;
- Adapter->LinkStatus = PHY_SYNC_ACHIVED;
-
- if (Adapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY) {
- Adapter->DriverState = NO_NETWORK_ENTRY;
- wake_up(&Adapter->LEDInfo.notify_led_event);
- }
-
- LinkMessage(Adapter);
- break;
-
- case LINKUP_DONE:
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "LINKUP_DONE");
- Adapter->LinkStatus = LINKUP_DONE;
- Adapter->bPHSEnabled = *(pucBuffer+3);
- Adapter->bETHCSEnabled = *(pucBuffer+4) & ETH_CS_MASK;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "PHS Support Status Received In LinkUp Ack : %x\n", Adapter->bPHSEnabled);
-
- if ((false == Adapter->bShutStatus) && (false == Adapter->IdleMode)) {
- if (Adapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY) {
- Adapter->DriverState = NORMAL_OPERATION;
- wake_up(&Adapter->LEDInfo.notify_led_event);
- }
- }
- LinkMessage(Adapter);
- break;
-
- case WAIT_FOR_SYNC:
- /*
- * Driver to ignore the DREG_RECEIVED
- * WiMAX Application should handle this Message
- */
- /* Adapter->liTimeSinceLastNetEntry = 0; */
- Adapter->LinkUpStatus = 0;
- Adapter->LinkStatus = 0;
- Adapter->usBestEffortQueueIndex = INVALID_QUEUE_INDEX;
- Adapter->bTriedToWakeUpFromlowPowerMode = false;
- Adapter->IdleMode = false;
- beceem_protocol_reset(Adapter);
-
- break;
- case LINK_SHUTDOWN_REQ_FROM_FIRMWARE:
- case COMPLETE_WAKE_UP_NOTIFICATION_FRM_FW:
- {
- HandleShutDownModeRequest(Adapter, pucBuffer);
- }
- break;
- default:
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "default case:LinkResponse %x", *(pucBuffer + 1));
- break;
- }
- } else if (SET_MAC_ADDRESS_RESPONSE == *pucBuffer) {
- PUCHAR puMacAddr = (pucBuffer + 1);
-
- Adapter->LinkStatus = SYNC_UP_REQUEST;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "MAC address response, sending SYNC_UP");
- LinkMessage(Adapter);
- memcpy(Adapter->dev->dev_addr, puMacAddr, MAC_ADDRESS_SIZE);
- }
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "%s <=====", __func__);
-}
-
-void SendIdleModeResponse(struct bcm_mini_adapter *Adapter)
-{
- int status = 0, NVMAccess = 0, lowPwrAbortMsg = 0;
- struct timeval tv;
- struct bcm_link_request stIdleResponse = {{0} };
-
- memset(&tv, 0, sizeof(tv));
- stIdleResponse.Leader.Status = IDLE_MESSAGE;
- stIdleResponse.Leader.PLength = IDLE_MODE_PAYLOAD_LENGTH;
- stIdleResponse.szData[0] = GO_TO_IDLE_MODE_PAYLOAD;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, " ============>");
-
- /*********************************
- *down_trylock -
- * if [ semaphore is available ]
- * acquire semaphone and return value 0 ;
- * else
- * return non-zero value ;
- *
- ***********************************/
-
- NVMAccess = down_trylock(&Adapter->NVMRdmWrmLock);
- lowPwrAbortMsg = down_trylock(&Adapter->LowPowerModeSync);
-
-
- if ((NVMAccess || lowPwrAbortMsg || atomic_read(&Adapter->TotalPacketCount)) &&
- (Adapter->ulPowerSaveMode != DEVICE_POWERSAVE_MODE_AS_PROTOCOL_IDLE_MODE)) {
-
- if (!NVMAccess)
- up(&Adapter->NVMRdmWrmLock);
-
- if (!lowPwrAbortMsg)
- up(&Adapter->LowPowerModeSync);
-
- stIdleResponse.szData[1] = TARGET_CAN_NOT_GO_TO_IDLE_MODE; /* NACK- device access is going on. */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "HOST IS NACKING Idle mode To F/W!!!!!!!!");
- Adapter->bPreparingForLowPowerMode = false;
- } else {
- stIdleResponse.szData[1] = TARGET_CAN_GO_TO_IDLE_MODE; /* 2; Idle ACK */
- Adapter->StatisticsPointer = 0;
-
- /* Wait for the LED to TURN OFF before sending ACK response */
- if (Adapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY) {
- int iRetVal = 0;
-
- /* Wake the LED Thread with IDLEMODE_ENTER State */
- Adapter->DriverState = LOWPOWER_MODE_ENTER;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "LED Thread is Running..Hence Setting LED Event as IDLEMODE_ENTER jiffies:%ld", jiffies);
- wake_up(&Adapter->LEDInfo.notify_led_event);
-
- /* Wait for 1 SEC for LED to OFF */
- iRetVal = wait_event_timeout(Adapter->LEDInfo.idleModeSyncEvent, Adapter->LEDInfo.bIdle_led_off, msecs_to_jiffies(1000));
-
- /* If Timed Out to Sync IDLE MODE Enter, do IDLE mode Exit and Send NACK to device */
- if (iRetVal <= 0) {
- stIdleResponse.szData[1] = TARGET_CAN_NOT_GO_TO_IDLE_MODE; /* NACK- device access is going on. */
- Adapter->DriverState = NORMAL_OPERATION;
- wake_up(&Adapter->LEDInfo.notify_led_event);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "NACKING Idle mode as time out happen from LED side!!!!!!!!");
- }
- }
-
- if (stIdleResponse.szData[1] == TARGET_CAN_GO_TO_IDLE_MODE) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "ACKING IDLE MODE !!!!!!!!!");
- down(&Adapter->rdmwrmsync);
- Adapter->bPreparingForLowPowerMode = TRUE;
- up(&Adapter->rdmwrmsync);
- /* Killing all URBS. */
- if (Adapter->bDoSuspend == TRUE)
- Bcm_kill_all_URBs((struct bcm_interface_adapter *)(Adapter->pvInterfaceAdapter));
- } else {
- Adapter->bPreparingForLowPowerMode = false;
- }
-
- if (!NVMAccess)
- up(&Adapter->NVMRdmWrmLock);
-
- if (!lowPwrAbortMsg)
- up(&Adapter->LowPowerModeSync);
- }
-
- status = CopyBufferToControlPacket(Adapter, &stIdleResponse);
- if (status != STATUS_SUCCESS) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "fail to send the Idle mode Request\n");
- Adapter->bPreparingForLowPowerMode = false;
- StartInterruptUrb((struct bcm_interface_adapter *)(Adapter->pvInterfaceAdapter));
- }
- do_gettimeofday(&tv);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "IdleMode Msg submitter to Q :%ld ms", tv.tv_sec * 1000 + tv.tv_usec / 1000);
-}
-
-/******************************************************************
-* Function - DumpPackInfo()
-*
-* Description - This function dumps the all Queue(PackInfo[]) details.
-*
-* Parameters - Adapter: Pointer to the Adapter structure.
-*
-* Returns - None.
-*******************************************************************/
-void DumpPackInfo(struct bcm_mini_adapter *Adapter)
-{
- unsigned int uiLoopIndex = 0;
- unsigned int uiIndex = 0;
- unsigned int uiClsfrIndex = 0;
- struct bcm_classifier_rule *pstClassifierEntry = NULL;
-
- for (uiLoopIndex = 0; uiLoopIndex < NO_OF_QUEUES; uiLoopIndex++) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "*********** Showing Details Of Queue %d***** ******", uiLoopIndex);
- if (false == Adapter->PackInfo[uiLoopIndex].bValid) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "bValid is false for %X index\n", uiLoopIndex);
- continue;
- }
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, " Dumping SF Rule Entry For SFID %lX\n", Adapter->PackInfo[uiLoopIndex].ulSFID);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, " ucDirection %X\n", Adapter->PackInfo[uiLoopIndex].ucDirection);
-
- if (Adapter->PackInfo[uiLoopIndex].ucIpVersion == IPV6)
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "Ipv6 Service Flow\n");
- else
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "Ipv4 Service Flow\n");
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "SF Traffic Priority %X\n", Adapter->PackInfo[uiLoopIndex].u8TrafficPriority);
-
- for (uiClsfrIndex = 0; uiClsfrIndex < MAX_CLASSIFIERS; uiClsfrIndex++) {
- pstClassifierEntry = &Adapter->astClassifierTable[uiClsfrIndex];
- if (!pstClassifierEntry->bUsed)
- continue;
-
- if (pstClassifierEntry->ulSFID != Adapter->PackInfo[uiLoopIndex].ulSFID)
- continue;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tDumping Classifier Rule Entry For Index: %X Classifier Rule ID : %X\n", uiClsfrIndex, pstClassifierEntry->uiClassifierRuleIndex);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tDumping Classifier Rule Entry For Index: %X usVCID_Value : %X\n", uiClsfrIndex, pstClassifierEntry->usVCID_Value);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tDumping Classifier Rule Entry For Index: %X bProtocolValid : %X\n", uiClsfrIndex, pstClassifierEntry->bProtocolValid);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tDumping Classifier Rule Entry For Index: %X bTOSValid : %X\n", uiClsfrIndex, pstClassifierEntry->bTOSValid);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tDumping Classifier Rule Entry For Index: %X bDestIpValid : %X\n", uiClsfrIndex, pstClassifierEntry->bDestIpValid);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tDumping Classifier Rule Entry For Index: %X bSrcIpValid : %X\n", uiClsfrIndex, pstClassifierEntry->bSrcIpValid);
-
- for (uiIndex = 0; uiIndex < MAX_PORT_RANGE; uiIndex++) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tusSrcPortRangeLo:%X\n", pstClassifierEntry->usSrcPortRangeLo[uiIndex]);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tusSrcPortRangeHi:%X\n", pstClassifierEntry->usSrcPortRangeHi[uiIndex]);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tusDestPortRangeLo:%X\n", pstClassifierEntry->usDestPortRangeLo[uiIndex]);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tusDestPortRangeHi:%X\n", pstClassifierEntry->usDestPortRangeHi[uiIndex]);
- }
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tucIPSourceAddressLength : 0x%x\n", pstClassifierEntry->ucIPSourceAddressLength);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tucIPDestinationAddressLength : 0x%x\n", pstClassifierEntry->ucIPDestinationAddressLength);
- for (uiIndex = 0; uiIndex < pstClassifierEntry->ucIPSourceAddressLength; uiIndex++) {
- if (Adapter->PackInfo[uiLoopIndex].ucIpVersion == IPV6) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tIpv6 ulSrcIpAddr :\n");
- DumpIpv6Address(pstClassifierEntry->stSrcIpAddress.ulIpv6Addr);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tIpv6 ulSrcIpMask :\n");
- DumpIpv6Address(pstClassifierEntry->stSrcIpAddress.ulIpv6Mask);
- } else {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tulSrcIpAddr:%lX\n", pstClassifierEntry->stSrcIpAddress.ulIpv4Addr[uiIndex]);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tulSrcIpMask:%lX\n", pstClassifierEntry->stSrcIpAddress.ulIpv4Mask[uiIndex]);
- }
- }
-
- for (uiIndex = 0; uiIndex < pstClassifierEntry->ucIPDestinationAddressLength; uiIndex++) {
- if (Adapter->PackInfo[uiLoopIndex].ucIpVersion == IPV6) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tIpv6 ulDestIpAddr :\n");
- DumpIpv6Address(pstClassifierEntry->stDestIpAddress.ulIpv6Addr);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tIpv6 ulDestIpMask :\n");
- DumpIpv6Address(pstClassifierEntry->stDestIpAddress.ulIpv6Mask);
- } else {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tulDestIpAddr:%lX\n", pstClassifierEntry->stDestIpAddress.ulIpv4Addr[uiIndex]);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tulDestIpMask:%lX\n", pstClassifierEntry->stDestIpAddress.ulIpv4Mask[uiIndex]);
- }
- }
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tucProtocol:0x%X\n", pstClassifierEntry->ucProtocol[0]);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tu8ClassifierRulePriority:%X\n", pstClassifierEntry->u8ClassifierRulePriority);
- }
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "ulSFID:%lX\n", Adapter->PackInfo[uiLoopIndex].ulSFID);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "usVCID_Value:%X\n", Adapter->PackInfo[uiLoopIndex].usVCID_Value);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "PhsEnabled: 0x%X\n", Adapter->PackInfo[uiLoopIndex].bHeaderSuppressionEnabled);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "uiThreshold:%X\n", Adapter->PackInfo[uiLoopIndex].uiThreshold);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "bValid:%X\n", Adapter->PackInfo[uiLoopIndex].bValid);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "bActive:%X\n", Adapter->PackInfo[uiLoopIndex].bActive);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "ActivateReqSent: %x", Adapter->PackInfo[uiLoopIndex].bActivateRequestSent);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "u8QueueType:%X\n", Adapter->PackInfo[uiLoopIndex].u8QueueType);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "uiMaxBucketSize:%X\n", Adapter->PackInfo[uiLoopIndex].uiMaxBucketSize);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "uiPerSFTxResourceCount:%X\n", atomic_read(&Adapter->PackInfo[uiLoopIndex].uiPerSFTxResourceCount));
- /* DumpDebug(DUMP_INFO,("bCSSupport:%X\n",Adapter->PackInfo[uiLoopIndex].bCSSupport)); */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "CurrQueueDepthOnTarget: %x\n", Adapter->PackInfo[uiLoopIndex].uiCurrentQueueDepthOnTarget);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "uiCurrentBytesOnHost:%X\n", Adapter->PackInfo[uiLoopIndex].uiCurrentBytesOnHost);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "uiCurrentPacketsOnHost:%X\n", Adapter->PackInfo[uiLoopIndex].uiCurrentPacketsOnHost);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "uiDroppedCountBytes:%X\n", Adapter->PackInfo[uiLoopIndex].uiDroppedCountBytes);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "uiDroppedCountPackets:%X\n", Adapter->PackInfo[uiLoopIndex].uiDroppedCountPackets);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "uiSentBytes:%X\n", Adapter->PackInfo[uiLoopIndex].uiSentBytes);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "uiSentPackets:%X\n", Adapter->PackInfo[uiLoopIndex].uiSentPackets);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "uiCurrentDrainRate:%X\n", Adapter->PackInfo[uiLoopIndex].uiCurrentDrainRate);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "uiThisPeriodSentBytes:%X\n", Adapter->PackInfo[uiLoopIndex].uiThisPeriodSentBytes);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "liDrainCalculated:%llX\n", Adapter->PackInfo[uiLoopIndex].liDrainCalculated);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "uiCurrentTokenCount:%X\n", Adapter->PackInfo[uiLoopIndex].uiCurrentTokenCount);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "liLastUpdateTokenAt:%llX\n", Adapter->PackInfo[uiLoopIndex].liLastUpdateTokenAt);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "uiMaxAllowedRate:%X\n", Adapter->PackInfo[uiLoopIndex].uiMaxAllowedRate);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "uiPendedLast:%X\n", Adapter->PackInfo[uiLoopIndex].uiPendedLast);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "NumOfPacketsSent:%X\n", Adapter->PackInfo[uiLoopIndex].NumOfPacketsSent);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "Direction: %x\n", Adapter->PackInfo[uiLoopIndex].ucDirection);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "CID: %x\n", Adapter->PackInfo[uiLoopIndex].usCID);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "ProtocolValid: %x\n", Adapter->PackInfo[uiLoopIndex].bProtocolValid);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "TOSValid: %x\n", Adapter->PackInfo[uiLoopIndex].bTOSValid);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "DestIpValid: %x\n", Adapter->PackInfo[uiLoopIndex].bDestIpValid);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "SrcIpValid: %x\n", Adapter->PackInfo[uiLoopIndex].bSrcIpValid);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "ActiveSet: %x\n", Adapter->PackInfo[uiLoopIndex].bActiveSet);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "AdmittedSet: %x\n", Adapter->PackInfo[uiLoopIndex].bAdmittedSet);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "AuthzSet: %x\n", Adapter->PackInfo[uiLoopIndex].bAuthorizedSet);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "ClassifyPrority: %x\n", Adapter->PackInfo[uiLoopIndex].bClassifierPriority);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "uiMaxLatency: %x\n", Adapter->PackInfo[uiLoopIndex].uiMaxLatency);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO,
- DBG_LVL_ALL, "ServiceClassName: %*ph\n",
- 4, Adapter->PackInfo[uiLoopIndex].
- ucServiceClassName);
-/* BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "bHeaderSuppressionEnabled :%X\n", Adapter->PackInfo[uiLoopIndex].bHeaderSuppressionEnabled);
- * BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "uiTotalTxBytes:%X\n", Adapter->PackInfo[uiLoopIndex].uiTotalTxBytes);
- * BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "uiTotalRxBytes:%X\n", Adapter->PackInfo[uiLoopIndex].uiTotalRxBytes);
- * DumpDebug(DUMP_INFO,(" uiRanOutOfResCount:%X\n",Adapter->PackInfo[uiLoopIndex].uiRanOutOfResCount));
- */
- }
-
- for (uiLoopIndex = 0; uiLoopIndex < MIBS_MAX_HIST_ENTRIES; uiLoopIndex++)
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "Adapter->aRxPktSizeHist[%x] = %x\n", uiLoopIndex, Adapter->aRxPktSizeHist[uiLoopIndex]);
-
- for (uiLoopIndex = 0; uiLoopIndex < MIBS_MAX_HIST_ENTRIES; uiLoopIndex++)
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "Adapter->aTxPktSizeHist[%x] = %x\n", uiLoopIndex, Adapter->aTxPktSizeHist[uiLoopIndex]);
-}
-
-int reset_card_proc(struct bcm_mini_adapter *ps_adapter)
-{
- int retval = STATUS_SUCCESS;
- struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
- struct bcm_interface_adapter *psIntfAdapter = NULL;
- unsigned int value = 0, uiResetValue = 0;
- int bytes;
-
- psIntfAdapter = ((struct bcm_interface_adapter *)(ps_adapter->pvInterfaceAdapter));
- ps_adapter->bDDRInitDone = false;
-
- if (ps_adapter->chip_id >= T3LPB) {
- /* SYS_CFG register is write protected hence for modifying this reg value, it should be read twice before */
- rdmalt(ps_adapter, SYS_CFG, &value, sizeof(value));
- rdmalt(ps_adapter, SYS_CFG, &value, sizeof(value));
-
- /* making bit[6...5] same as was before f/w download. this setting force the h/w to */
- /* re-populated the SP RAM area with the string descriptor. */
- value = value | (ps_adapter->syscfgBefFwDld & 0x00000060);
- wrmalt(ps_adapter, SYS_CFG, &value, sizeof(value));
- }
-
- /* killing all submitted URBs. */
- psIntfAdapter->psAdapter->StopAllXaction = TRUE;
- Bcm_kill_all_URBs(psIntfAdapter);
- /* Reset the UMA-B Device */
- if (ps_adapter->chip_id >= T3LPB) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Resetting UMA-B\n");
- retval = usb_reset_device(psIntfAdapter->udev);
- psIntfAdapter->psAdapter->StopAllXaction = false;
-
- if (retval != STATUS_SUCCESS) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Reset failed with ret value :%d", retval);
- goto err_exit;
- }
-
- if (ps_adapter->chip_id == BCS220_2 ||
- ps_adapter->chip_id == BCS220_2BC ||
- ps_adapter->chip_id == BCS250_BC ||
- ps_adapter->chip_id == BCS220_3) {
-
- bytes = rdmalt(ps_adapter, HPM_CONFIG_LDO145, &value, sizeof(value));
- if (bytes < 0) {
- retval = bytes;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "read failed with status :%d", retval);
- goto err_exit;
- }
- /* setting 0th bit */
- value |= (1<<0);
- retval = wrmalt(ps_adapter, HPM_CONFIG_LDO145, &value, sizeof(value));
- if (retval < 0) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "write failed with status :%d", retval);
- goto err_exit;
- }
- }
- } else {
- bytes = rdmalt(ps_adapter, 0x0f007018, &value, sizeof(value));
- if (bytes < 0) {
- retval = bytes;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "read failed with status :%d", retval);
- goto err_exit;
- }
- value &= (~(1<<16));
- retval = wrmalt(ps_adapter, 0x0f007018, &value, sizeof(value));
- if (retval < 0) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "write failed with status :%d", retval);
- goto err_exit;
- }
-
- /* Toggling the GPIO 8, 9 */
- value = 0;
- retval = wrmalt(ps_adapter, GPIO_OUTPUT_REGISTER, &value, sizeof(value));
- if (retval < 0) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "write failed with status :%d", retval);
- goto err_exit;
- }
- value = 0x300;
- retval = wrmalt(ps_adapter, GPIO_MODE_REGISTER, &value, sizeof(value));
- if (retval < 0) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "write failed with status :%d", retval);
- goto err_exit;
- }
- mdelay(50);
- }
-
- /* ps_adapter->downloadDDR = false; */
- if (ps_adapter->bFlashBoot) {
- /* In flash boot mode MIPS state register has reverse polarity.
- * So just or with setting bit 30.
- * Make the MIPS in Reset state.
- */
- rdmalt(ps_adapter, CLOCK_RESET_CNTRL_REG_1, &uiResetValue, sizeof(uiResetValue));
- uiResetValue |= (1<<30);
- wrmalt(ps_adapter, CLOCK_RESET_CNTRL_REG_1, &uiResetValue, sizeof(uiResetValue));
- }
-
- if (ps_adapter->chip_id >= T3LPB) {
- uiResetValue = 0;
- /*
- * WA for SYSConfig Issue.
- * Read SYSCFG Twice to make it writable.
- */
- rdmalt(ps_adapter, SYS_CFG, &uiResetValue, sizeof(uiResetValue));
- if (uiResetValue & (1<<4)) {
- uiResetValue = 0;
- rdmalt(ps_adapter, SYS_CFG, &uiResetValue, sizeof(uiResetValue)); /* 2nd read to make it writable. */
- uiResetValue &= (~(1<<4));
- wrmalt(ps_adapter, SYS_CFG, &uiResetValue, sizeof(uiResetValue));
- }
- }
- uiResetValue = 0;
- wrmalt(ps_adapter, 0x0f01186c, &uiResetValue, sizeof(uiResetValue));
-
-err_exit:
- psIntfAdapter->psAdapter->StopAllXaction = false;
- return retval;
-}
-
-int run_card_proc(struct bcm_mini_adapter *ps_adapter)
-{
- int status = STATUS_SUCCESS;
- int bytes;
-
- unsigned int value = 0;
- {
- bytes = rdmalt(ps_adapter, CLOCK_RESET_CNTRL_REG_1, &value, sizeof(value));
- if (bytes < 0) {
- status = bytes;
- BCM_DEBUG_PRINT(ps_adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "%s:%d\n", __func__, __LINE__);
- return status;
- }
-
- if (ps_adapter->bFlashBoot)
- value &= (~(1<<30));
- else
- value |= (1<<30);
-
- if (wrmalt(ps_adapter, CLOCK_RESET_CNTRL_REG_1, &value, sizeof(value)) < 0) {
- BCM_DEBUG_PRINT(ps_adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "%s:%d\n", __func__, __LINE__);
- return STATUS_FAILURE;
- }
- }
- return status;
-}
-
-int InitCardAndDownloadFirmware(struct bcm_mini_adapter *ps_adapter)
-{
- int status;
- unsigned int value = 0;
- /*
- * Create the threads first and then download the
- * Firm/DDR Settings..
- */
- status = create_worker_threads(ps_adapter);
- if (status < 0)
- return status;
-
- status = bcm_parse_target_params(ps_adapter);
- if (status)
- return status;
-
- if (ps_adapter->chip_id >= T3LPB) {
- rdmalt(ps_adapter, SYS_CFG, &value, sizeof(value));
- ps_adapter->syscfgBefFwDld = value;
-
- if ((value & 0x60) == 0)
- ps_adapter->bFlashBoot = TRUE;
- }
-
- reset_card_proc(ps_adapter);
-
- /* Initializing the NVM. */
- BcmInitNVM(ps_adapter);
- status = ddr_init(ps_adapter);
- if (status) {
- pr_err(DRV_NAME "ddr_init Failed\n");
- return status;
- }
-
- /* Download cfg file */
- status = buffDnldVerify(ps_adapter,
- (PUCHAR)ps_adapter->pstargetparams,
- sizeof(struct bcm_target_params),
- CONFIG_BEGIN_ADDR);
- if (status) {
- BCM_DEBUG_PRINT(ps_adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Error downloading CFG file");
- goto OUT;
- }
-
- if (register_networkdev(ps_adapter)) {
- BCM_DEBUG_PRINT(ps_adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Register Netdevice failed. Cleanup needs to be performed.");
- return -EIO;
- }
-
- if (false == ps_adapter->AutoFirmDld) {
- BCM_DEBUG_PRINT(ps_adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "AutoFirmDld Disabled in CFG File..\n");
- /* If Auto f/w download is disable, register the control interface, */
- /* register the control interface after the mailbox. */
- if (register_control_device_interface(ps_adapter) < 0) {
- BCM_DEBUG_PRINT(ps_adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Register Control Device failed. Cleanup needs to be performed.");
- return -EIO;
- }
- return STATUS_SUCCESS;
- }
-
- /*
- * Do the LED Settings here. It will be used by the Firmware Download
- * Thread.
- */
-
- /*
- * 1. If the LED Settings fails, do not stop and do the Firmware download.
- * 2. This init would happened only if the cfg file is present, else
- * call from the ioctl context.
- */
-
- status = InitLedSettings(ps_adapter);
- if (status) {
- BCM_DEBUG_PRINT(ps_adapter, DBG_TYPE_PRINTK, 0, 0, "INIT LED FAILED\n");
- return status;
- }
-
- if (ps_adapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY) {
- ps_adapter->DriverState = DRIVER_INIT;
- wake_up(&ps_adapter->LEDInfo.notify_led_event);
- }
-
- if (ps_adapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY) {
- ps_adapter->DriverState = FW_DOWNLOAD;
- wake_up(&ps_adapter->LEDInfo.notify_led_event);
- }
-
- value = 0;
- wrmalt(ps_adapter, EEPROM_CAL_DATA_INTERNAL_LOC - 4, &value, sizeof(value));
- wrmalt(ps_adapter, EEPROM_CAL_DATA_INTERNAL_LOC - 8, &value, sizeof(value));
-
- if (ps_adapter->eNVMType == NVM_FLASH) {
- status = PropagateCalParamsFromFlashToMemory(ps_adapter);
- if (status) {
- BCM_DEBUG_PRINT(ps_adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Propagation of Cal param failed ..");
- goto OUT;
- }
- }
-
- /* Download Firmare */
- status = BcmFileDownload(ps_adapter, BIN_FILE, FIRMWARE_BEGIN_ADDR);
- if (status != 0) {
- BCM_DEBUG_PRINT(ps_adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "No Firmware File is present...\n");
- goto OUT;
- }
-
- status = run_card_proc(ps_adapter);
- if (status) {
- BCM_DEBUG_PRINT(ps_adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "run_card_proc Failed\n");
- goto OUT;
- }
-
- ps_adapter->fw_download_done = TRUE;
- mdelay(10);
-
-OUT:
- if (ps_adapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY) {
- ps_adapter->DriverState = FW_DOWNLOAD_DONE;
- wake_up(&ps_adapter->LEDInfo.notify_led_event);
- }
-
- return status;
-}
-
-static int bcm_parse_target_params(struct bcm_mini_adapter *Adapter)
-{
- struct file *flp = NULL;
- char *buff;
- int len = 0;
-
- buff = kmalloc(BUFFER_1K, GFP_KERNEL);
- if (!buff)
- return -ENOMEM;
-
- Adapter->pstargetparams = kmalloc(sizeof(struct bcm_target_params), GFP_KERNEL);
- if (Adapter->pstargetparams == NULL) {
- kfree(buff);
- return -ENOMEM;
- }
-
- flp = open_firmware_file(Adapter, CFG_FILE);
- if (!flp) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "NOT ABLE TO OPEN THE %s FILE\n", CFG_FILE);
- kfree(buff);
- kfree(Adapter->pstargetparams);
- Adapter->pstargetparams = NULL;
- return -ENOENT;
- }
- len = kernel_read(flp, 0, buff, BUFFER_1K);
- filp_close(flp, NULL);
-
- if (len != sizeof(struct bcm_target_params)) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Mismatch in Target Param Structure!\n");
- kfree(buff);
- kfree(Adapter->pstargetparams);
- Adapter->pstargetparams = NULL;
- return -ENOENT;
- }
-
- /* Check for autolink in config params */
- /*
- * Values in Adapter->pstargetparams are in network byte order
- */
- memcpy(Adapter->pstargetparams, buff, sizeof(struct bcm_target_params));
- kfree(buff);
- beceem_parse_target_struct(Adapter);
- return STATUS_SUCCESS;
-}
-
-void beceem_parse_target_struct(struct bcm_mini_adapter *Adapter)
-{
- unsigned int uiHostDrvrCfg6 = 0, uiEEPROMFlag = 0;
-
- if (ntohl(Adapter->pstargetparams->m_u32PhyParameter2) & AUTO_SYNC_DISABLE) {
- pr_info(DRV_NAME ": AutoSyncup is Disabled\n");
- Adapter->AutoSyncup = false;
- } else {
- pr_info(DRV_NAME ": AutoSyncup is Enabled\n");
- Adapter->AutoSyncup = TRUE;
- }
-
- if (ntohl(Adapter->pstargetparams->HostDrvrConfig6) & AUTO_LINKUP_ENABLE) {
- pr_info(DRV_NAME ": Enabling autolink up");
- Adapter->AutoLinkUp = TRUE;
- } else {
- pr_info(DRV_NAME ": Disabling autolink up");
- Adapter->AutoLinkUp = false;
- }
- /* Setting the DDR Setting.. */
- Adapter->DDRSetting = (ntohl(Adapter->pstargetparams->HostDrvrConfig6) >> 8)&0x0F;
- Adapter->ulPowerSaveMode = (ntohl(Adapter->pstargetparams->HostDrvrConfig6)>>12)&0x0F;
- pr_info(DRV_NAME ": DDR Setting: %x\n", Adapter->DDRSetting);
- pr_info(DRV_NAME ": Power Save Mode: %lx\n", Adapter->ulPowerSaveMode);
- if (ntohl(Adapter->pstargetparams->HostDrvrConfig6) & AUTO_FIRM_DOWNLOAD) {
- pr_info(DRV_NAME ": Enabling Auto Firmware Download\n");
- Adapter->AutoFirmDld = TRUE;
- } else {
- pr_info(DRV_NAME ": Disabling Auto Firmware Download\n");
- Adapter->AutoFirmDld = false;
- }
- uiHostDrvrCfg6 = ntohl(Adapter->pstargetparams->HostDrvrConfig6);
- Adapter->bMipsConfig = (uiHostDrvrCfg6>>20)&0x01;
- pr_info(DRV_NAME ": MIPSConfig : 0x%X\n", Adapter->bMipsConfig);
- /* used for backward compatibility. */
- Adapter->bDPLLConfig = (uiHostDrvrCfg6>>19)&0x01;
- Adapter->PmuMode = (uiHostDrvrCfg6 >> 24) & 0x03;
- pr_info(DRV_NAME ": PMU MODE: %x", Adapter->PmuMode);
-
- if ((uiHostDrvrCfg6 >> HOST_BUS_SUSPEND_BIT) & (0x01)) {
- Adapter->bDoSuspend = TRUE;
- pr_info(DRV_NAME ": Making DoSuspend TRUE as per configFile");
- }
-
- uiEEPROMFlag = ntohl(Adapter->pstargetparams->m_u32EEPROMFlag);
- pr_info(DRV_NAME ": uiEEPROMFlag : 0x%X\n", uiEEPROMFlag);
- Adapter->eNVMType = (enum bcm_nvm_type)((uiEEPROMFlag>>4)&0x3);
- Adapter->bStatusWrite = (uiEEPROMFlag>>6)&0x1;
- Adapter->uiSectorSizeInCFG = 1024*(0xFFFF & ntohl(Adapter->pstargetparams->HostDrvrConfig4));
- Adapter->bSectorSizeOverride = (bool) ((ntohl(Adapter->pstargetparams->HostDrvrConfig4))>>16)&0x1;
-
- if (ntohl(Adapter->pstargetparams->m_u32PowerSavingModeOptions) & 0x01)
- Adapter->ulPowerSaveMode = DEVICE_POWERSAVE_MODE_AS_PROTOCOL_IDLE_MODE;
-
- if (Adapter->ulPowerSaveMode != DEVICE_POWERSAVE_MODE_AS_PROTOCOL_IDLE_MODE)
- doPowerAutoCorrection(Adapter);
-}
-
-static void doPowerAutoCorrection(struct bcm_mini_adapter *psAdapter)
-{
- unsigned int reporting_mode;
-
- reporting_mode = ntohl(psAdapter->pstargetparams->m_u32PowerSavingModeOptions) & 0x02;
- psAdapter->bIsAutoCorrectEnabled = !((char)(psAdapter->ulPowerSaveMode >> 3) & 0x1);
-
- if (reporting_mode) {
- BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "can't do suspen/resume as reporting mode is enable");
- psAdapter->bDoSuspend = false;
- }
-
- if (psAdapter->bIsAutoCorrectEnabled && (psAdapter->chip_id >= T3LPB)) {
- /* If reporting mode is enable, switch PMU to PMC */
- {
- psAdapter->ulPowerSaveMode = DEVICE_POWERSAVE_MODE_AS_PMU_CLOCK_GATING;
- psAdapter->bDoSuspend = false;
- }
-
- /* clearing space bit[15..12] */
- psAdapter->pstargetparams->HostDrvrConfig6 &= ~(htonl((0xF << 12)));
- /* placing the power save mode option */
- psAdapter->pstargetparams->HostDrvrConfig6 |= htonl((psAdapter->ulPowerSaveMode << 12));
- } else if (psAdapter->bIsAutoCorrectEnabled == false) {
- /* remove the autocorrect disable bit set before dumping. */
- psAdapter->ulPowerSaveMode &= ~(1 << 3);
- psAdapter->pstargetparams->HostDrvrConfig6 &= ~(htonl(1 << 15));
- BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Using Forced User Choice: %lx\n", psAdapter->ulPowerSaveMode);
- }
-}
-
-static void convertEndian(unsigned char rwFlag, unsigned int *puiBuffer, unsigned int uiByteCount)
-{
- unsigned int uiIndex = 0;
-
- if (RWM_WRITE == rwFlag) {
- for (uiIndex = 0; uiIndex < (uiByteCount/sizeof(unsigned int)); uiIndex++)
- puiBuffer[uiIndex] = htonl(puiBuffer[uiIndex]);
- } else {
- for (uiIndex = 0; uiIndex < (uiByteCount/sizeof(unsigned int)); uiIndex++)
- puiBuffer[uiIndex] = ntohl(puiBuffer[uiIndex]);
- }
-}
-
-int rdm(struct bcm_mini_adapter *Adapter, unsigned int uiAddress, PCHAR pucBuff, size_t sSize)
-{
- return Adapter->interface_rdm(Adapter->pvInterfaceAdapter,
- uiAddress, pucBuff, sSize);
-}
-
-int wrm(struct bcm_mini_adapter *Adapter, unsigned int uiAddress, PCHAR pucBuff, size_t sSize)
-{
- int iRetVal;
-
- iRetVal = Adapter->interface_wrm(Adapter->pvInterfaceAdapter,
- uiAddress, pucBuff, sSize);
- return iRetVal;
-}
-
-int wrmalt(struct bcm_mini_adapter *Adapter, unsigned int uiAddress, unsigned int *pucBuff, size_t size)
-{
- convertEndian(RWM_WRITE, pucBuff, size);
- return wrm(Adapter, uiAddress, (PUCHAR)pucBuff, size);
-}
-
-int rdmalt(struct bcm_mini_adapter *Adapter, unsigned int uiAddress, unsigned int *pucBuff, size_t size)
-{
- int uiRetVal = 0;
-
- uiRetVal = rdm(Adapter, uiAddress, (PUCHAR)pucBuff, size);
- convertEndian(RWM_READ, (unsigned int *)pucBuff, size);
-
- return uiRetVal;
-}
-
-int wrmWithLock(struct bcm_mini_adapter *Adapter, unsigned int uiAddress, PCHAR pucBuff, size_t sSize)
-{
- int status = STATUS_SUCCESS;
-
- down(&Adapter->rdmwrmsync);
-
- if ((Adapter->IdleMode == TRUE) ||
- (Adapter->bShutStatus == TRUE) ||
- (Adapter->bPreparingForLowPowerMode == TRUE)) {
-
- status = -EACCES;
- goto exit;
- }
-
- status = wrm(Adapter, uiAddress, pucBuff, sSize);
-exit:
- up(&Adapter->rdmwrmsync);
- return status;
-}
-
-int wrmaltWithLock(struct bcm_mini_adapter *Adapter, unsigned int uiAddress, unsigned int *pucBuff, size_t size)
-{
- int iRetVal = STATUS_SUCCESS;
-
- down(&Adapter->rdmwrmsync);
-
- if ((Adapter->IdleMode == TRUE) ||
- (Adapter->bShutStatus == TRUE) ||
- (Adapter->bPreparingForLowPowerMode == TRUE)) {
-
- iRetVal = -EACCES;
- goto exit;
- }
-
- iRetVal = wrmalt(Adapter, uiAddress, pucBuff, size);
-exit:
- up(&Adapter->rdmwrmsync);
- return iRetVal;
-}
-
-int rdmaltWithLock(struct bcm_mini_adapter *Adapter, unsigned int uiAddress, unsigned int *pucBuff, size_t size)
-{
- int uiRetVal = STATUS_SUCCESS;
-
- down(&Adapter->rdmwrmsync);
- if ((Adapter->IdleMode == TRUE) ||
- (Adapter->bShutStatus == TRUE) ||
- (Adapter->bPreparingForLowPowerMode == TRUE)) {
-
- uiRetVal = -EACCES;
- goto exit;
- }
-
- uiRetVal = rdmalt(Adapter, uiAddress, pucBuff, size);
-exit:
- up(&Adapter->rdmwrmsync);
- return uiRetVal;
-}
-
-static void HandleShutDownModeWakeup(struct bcm_mini_adapter *Adapter)
-{
- int clear_abort_pattern = 0, Status = 0;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, MP_SHUTDOWN, DBG_LVL_ALL, "====>\n");
- /* target has woken up From Shut Down */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, MP_SHUTDOWN, DBG_LVL_ALL, "Clearing Shut Down Software abort pattern\n");
- Status = wrmalt(Adapter, SW_ABORT_IDLEMODE_LOC, (unsigned int *)&clear_abort_pattern, sizeof(clear_abort_pattern));
- if (Status) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, MP_SHUTDOWN, DBG_LVL_ALL, "WRM to SW_ABORT_IDLEMODE_LOC failed with err:%d", Status);
- return;
- }
-
- if (Adapter->ulPowerSaveMode != DEVICE_POWERSAVE_MODE_AS_PROTOCOL_IDLE_MODE) {
- msleep(100);
- InterfaceHandleShutdownModeWakeup(Adapter);
- msleep(100);
- }
-
- if (Adapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY) {
- Adapter->DriverState = NO_NETWORK_ENTRY;
- wake_up(&Adapter->LEDInfo.notify_led_event);
- }
-
- Adapter->bTriedToWakeUpFromlowPowerMode = false;
- Adapter->bShutStatus = false;
- wake_up(&Adapter->lowpower_mode_wait_queue);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, MP_SHUTDOWN, DBG_LVL_ALL, "<====\n");
-}
-
-static void SendShutModeResponse(struct bcm_mini_adapter *Adapter)
-{
- struct bcm_link_request stShutdownResponse;
- unsigned int NVMAccess = 0, lowPwrAbortMsg = 0;
- unsigned int Status = 0;
-
- memset(&stShutdownResponse, 0, sizeof(struct bcm_link_request));
- stShutdownResponse.Leader.Status = LINK_UP_CONTROL_REQ;
- stShutdownResponse.Leader.PLength = 8; /* 8 bytes; */
- stShutdownResponse.szData[0] = LINK_UP_ACK;
- stShutdownResponse.szData[1] = LINK_SHUTDOWN_REQ_FROM_FIRMWARE;
-
- /*********************************
- * down_trylock -
- * if [ semaphore is available ]
- * acquire semaphone and return value 0 ;
- * else
- * return non-zero value ;
- *
- ***********************************/
-
- NVMAccess = down_trylock(&Adapter->NVMRdmWrmLock);
- lowPwrAbortMsg = down_trylock(&Adapter->LowPowerModeSync);
-
- if (NVMAccess || lowPwrAbortMsg || atomic_read(&Adapter->TotalPacketCount)) {
- if (!NVMAccess)
- up(&Adapter->NVMRdmWrmLock);
-
- if (!lowPwrAbortMsg)
- up(&Adapter->LowPowerModeSync);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, MP_SHUTDOWN, DBG_LVL_ALL, "Device Access is going on NACK the Shut Down MODE\n");
- stShutdownResponse.szData[2] = SHUTDOWN_NACK_FROM_DRIVER; /* NACK- device access is going on. */
- Adapter->bPreparingForLowPowerMode = false;
- } else {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, MP_SHUTDOWN, DBG_LVL_ALL, "Sending SHUTDOWN MODE ACK\n");
- stShutdownResponse.szData[2] = SHUTDOWN_ACK_FROM_DRIVER; /* ShutDown ACK */
-
- /* Wait for the LED to TURN OFF before sending ACK response */
- if (Adapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY) {
- int iRetVal = 0;
-
- /* Wake the LED Thread with LOWPOWER_MODE_ENTER State */
- Adapter->DriverState = LOWPOWER_MODE_ENTER;
- wake_up(&Adapter->LEDInfo.notify_led_event);
-
- /* Wait for 1 SEC for LED to OFF */
- iRetVal = wait_event_timeout(Adapter->LEDInfo.idleModeSyncEvent, Adapter->LEDInfo.bIdle_led_off, msecs_to_jiffies(1000));
-
- /* If Timed Out to Sync IDLE MODE Enter, do IDLE mode Exit and Send NACK to device */
- if (iRetVal <= 0) {
- stShutdownResponse.szData[1] = SHUTDOWN_NACK_FROM_DRIVER; /* NACK- device access is going on. */
- Adapter->DriverState = NO_NETWORK_ENTRY;
- wake_up(&Adapter->LEDInfo.notify_led_event);
- }
- }
-
- if (stShutdownResponse.szData[2] == SHUTDOWN_ACK_FROM_DRIVER) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, MP_SHUTDOWN, DBG_LVL_ALL, "ACKING SHUTDOWN MODE !!!!!!!!!");
- down(&Adapter->rdmwrmsync);
- Adapter->bPreparingForLowPowerMode = TRUE;
- up(&Adapter->rdmwrmsync);
- /* Killing all URBS. */
- if (Adapter->bDoSuspend == TRUE)
- Bcm_kill_all_URBs((struct bcm_interface_adapter *)(Adapter->pvInterfaceAdapter));
- } else {
- Adapter->bPreparingForLowPowerMode = false;
- }
-
- if (!NVMAccess)
- up(&Adapter->NVMRdmWrmLock);
-
- if (!lowPwrAbortMsg)
- up(&Adapter->LowPowerModeSync);
- }
-
- Status = CopyBufferToControlPacket(Adapter, &stShutdownResponse);
- if (Status != STATUS_SUCCESS) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, MP_SHUTDOWN, DBG_LVL_ALL, "fail to send the Idle mode Request\n");
- Adapter->bPreparingForLowPowerMode = false;
- StartInterruptUrb((struct bcm_interface_adapter *)(Adapter->pvInterfaceAdapter));
- }
-}
-
-static void HandleShutDownModeRequest(struct bcm_mini_adapter *Adapter, PUCHAR pucBuffer)
-{
- unsigned int uiResetValue = 0;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, MP_SHUTDOWN, DBG_LVL_ALL, "====>\n");
-
- if (*(pucBuffer+1) == COMPLETE_WAKE_UP_NOTIFICATION_FRM_FW) {
- HandleShutDownModeWakeup(Adapter);
- } else if (*(pucBuffer+1) == LINK_SHUTDOWN_REQ_FROM_FIRMWARE) {
- /* Target wants to go to Shut Down Mode */
- /* InterfacePrepareForShutdown(Adapter); */
- if (Adapter->chip_id == BCS220_2 ||
- Adapter->chip_id == BCS220_2BC ||
- Adapter->chip_id == BCS250_BC ||
- Adapter->chip_id == BCS220_3) {
-
- rdmalt(Adapter, HPM_CONFIG_MSW, &uiResetValue, 4);
- uiResetValue |= (1<<17);
- wrmalt(Adapter, HPM_CONFIG_MSW, &uiResetValue, 4);
- }
-
- SendShutModeResponse(Adapter);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, MP_SHUTDOWN, DBG_LVL_ALL, "ShutDownModeResponse:Notification received: Sending the response(Ack/Nack)\n");
- }
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, MP_SHUTDOWN, DBG_LVL_ALL, "<====\n");
-}
-
-void ResetCounters(struct bcm_mini_adapter *Adapter)
-{
- beceem_protocol_reset(Adapter);
- Adapter->CurrNumRecvDescs = 0;
- Adapter->PrevNumRecvDescs = 0;
- Adapter->LinkUpStatus = 0;
- Adapter->LinkStatus = 0;
- atomic_set(&Adapter->cntrlpktCnt, 0);
- atomic_set(&Adapter->TotalPacketCount, 0);
- Adapter->fw_download_done = false;
- Adapter->LinkStatus = 0;
- Adapter->AutoLinkUp = false;
- Adapter->IdleMode = false;
- Adapter->bShutStatus = false;
-}
-
-struct bcm_classifier_rule *GetFragIPClsEntry(struct bcm_mini_adapter *Adapter, USHORT usIpIdentification, ULONG SrcIP)
-{
- unsigned int uiIndex = 0;
-
- for (uiIndex = 0; uiIndex < MAX_FRAGMENTEDIP_CLASSIFICATION_ENTRIES; uiIndex++) {
- if ((Adapter->astFragmentedPktClassifierTable[uiIndex].bUsed) &&
- (Adapter->astFragmentedPktClassifierTable[uiIndex].usIpIdentification == usIpIdentification) &&
- (Adapter->astFragmentedPktClassifierTable[uiIndex].ulSrcIpAddress == SrcIP) &&
- !Adapter->astFragmentedPktClassifierTable[uiIndex].bOutOfOrderFragment)
-
- return Adapter->astFragmentedPktClassifierTable[uiIndex].pstMatchedClassifierEntry;
- }
- return NULL;
-}
-
-void AddFragIPClsEntry(struct bcm_mini_adapter *Adapter, struct bcm_fragmented_packet_info *psFragPktInfo)
-{
- unsigned int uiIndex = 0;
-
- for (uiIndex = 0; uiIndex < MAX_FRAGMENTEDIP_CLASSIFICATION_ENTRIES; uiIndex++) {
- if (!Adapter->astFragmentedPktClassifierTable[uiIndex].bUsed) {
- memcpy(&Adapter->astFragmentedPktClassifierTable[uiIndex], psFragPktInfo, sizeof(struct bcm_fragmented_packet_info));
- break;
- }
- }
-}
-
-void DelFragIPClsEntry(struct bcm_mini_adapter *Adapter, USHORT usIpIdentification, ULONG SrcIp)
-{
- unsigned int uiIndex = 0;
-
- for (uiIndex = 0; uiIndex < MAX_FRAGMENTEDIP_CLASSIFICATION_ENTRIES; uiIndex++) {
- if ((Adapter->astFragmentedPktClassifierTable[uiIndex].bUsed) &&
- (Adapter->astFragmentedPktClassifierTable[uiIndex].usIpIdentification == usIpIdentification) &&
- (Adapter->astFragmentedPktClassifierTable[uiIndex].ulSrcIpAddress == SrcIp))
-
- memset(&Adapter->astFragmentedPktClassifierTable[uiIndex], 0, sizeof(struct bcm_fragmented_packet_info));
- }
-}
-
-void update_per_cid_rx(struct bcm_mini_adapter *Adapter)
-{
- unsigned int qindex = 0;
-
- if ((jiffies - Adapter->liDrainCalculated) < XSECONDS)
- return;
-
- for (qindex = 0; qindex < HiPriority; qindex++) {
- if (Adapter->PackInfo[qindex].ucDirection == 0) {
- Adapter->PackInfo[qindex].uiCurrentRxRate =
- (Adapter->PackInfo[qindex].uiCurrentRxRate +
- Adapter->PackInfo[qindex].uiThisPeriodRxBytes) / 2;
-
- Adapter->PackInfo[qindex].uiThisPeriodRxBytes = 0;
- } else {
- Adapter->PackInfo[qindex].uiCurrentDrainRate =
- (Adapter->PackInfo[qindex].uiCurrentDrainRate +
- Adapter->PackInfo[qindex].uiThisPeriodSentBytes) / 2;
- Adapter->PackInfo[qindex].uiThisPeriodSentBytes = 0;
- }
- }
- Adapter->liDrainCalculated = jiffies;
-}
-
-void update_per_sf_desc_cnts(struct bcm_mini_adapter *Adapter)
-{
- int iIndex = 0;
- u32 uibuff[MAX_TARGET_DSX_BUFFERS];
- int bytes;
-
- if (!atomic_read(&Adapter->uiMBupdate))
- return;
-
- bytes = rdmaltWithLock(Adapter, TARGET_SFID_TXDESC_MAP_LOC, (unsigned int *)uibuff, sizeof(unsigned int) * MAX_TARGET_DSX_BUFFERS);
- if (bytes < 0) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "rdm failed\n");
- return;
- }
-
- for (iIndex = 0; iIndex < HiPriority; iIndex++) {
- if (Adapter->PackInfo[iIndex].bValid && Adapter->PackInfo[iIndex].ucDirection) {
- if (Adapter->PackInfo[iIndex].usVCID_Value < MAX_TARGET_DSX_BUFFERS)
- atomic_set(&Adapter->PackInfo[iIndex].uiPerSFTxResourceCount, uibuff[Adapter->PackInfo[iIndex].usVCID_Value]);
- else
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Invalid VCID : %x\n", Adapter->PackInfo[iIndex].usVCID_Value);
- }
- }
- atomic_set(&Adapter->uiMBupdate, false);
-}
-
-void flush_queue(struct bcm_mini_adapter *Adapter, unsigned int iQIndex)
-{
- struct sk_buff *PacketToDrop = NULL;
- struct net_device_stats *netstats = &Adapter->dev->stats;
-
- spin_lock_bh(&Adapter->PackInfo[iQIndex].SFQueueLock);
-
- while (Adapter->PackInfo[iQIndex].FirstTxQueue && atomic_read(&Adapter->TotalPacketCount)) {
- PacketToDrop = Adapter->PackInfo[iQIndex].FirstTxQueue;
- if (PacketToDrop && PacketToDrop->len) {
- netstats->tx_dropped++;
- DEQUEUEPACKET(Adapter->PackInfo[iQIndex].FirstTxQueue, Adapter->PackInfo[iQIndex].LastTxQueue);
- Adapter->PackInfo[iQIndex].uiCurrentPacketsOnHost--;
- Adapter->PackInfo[iQIndex].uiCurrentBytesOnHost -= PacketToDrop->len;
-
- /* Adding dropped statistics */
- Adapter->PackInfo[iQIndex].uiDroppedCountBytes += PacketToDrop->len;
- Adapter->PackInfo[iQIndex].uiDroppedCountPackets++;
- dev_kfree_skb(PacketToDrop);
- atomic_dec(&Adapter->TotalPacketCount);
- }
- }
- spin_unlock_bh(&Adapter->PackInfo[iQIndex].SFQueueLock);
-}
-
-static void beceem_protocol_reset(struct bcm_mini_adapter *Adapter)
-{
- int i;
-
- if (netif_msg_link(Adapter))
- pr_notice(PFX "%s: protocol reset\n", Adapter->dev->name);
-
- netif_carrier_off(Adapter->dev);
- netif_stop_queue(Adapter->dev);
-
- Adapter->IdleMode = false;
- Adapter->LinkUpStatus = false;
- ClearTargetDSXBuffer(Adapter, 0, TRUE);
- /* Delete All Classifier Rules */
-
- for (i = 0; i < HiPriority; i++)
- DeleteAllClassifiersForSF(Adapter, i);
-
- flush_all_queues(Adapter);
-
- if (Adapter->TimerActive == TRUE)
- Adapter->TimerActive = false;
-
- memset(Adapter->astFragmentedPktClassifierTable, 0, sizeof(struct bcm_fragmented_packet_info) * MAX_FRAGMENTEDIP_CLASSIFICATION_ENTRIES);
-
- for (i = 0; i < HiPriority; i++) {
- /* resetting only the first size (S_MIBS_SERVICEFLOW_TABLE) for the SF. */
- /* It is same between MIBs and SF. */
- memset(&Adapter->PackInfo[i].stMibsExtServiceFlowTable, 0, sizeof(struct bcm_mibs_parameters));
- }
-}
diff --git a/drivers/staging/bcm/PHSDefines.h b/drivers/staging/bcm/PHSDefines.h
deleted file mode 100644
index cd78ee4ffa2..00000000000
--- a/drivers/staging/bcm/PHSDefines.h
+++ /dev/null
@@ -1,94 +0,0 @@
-#ifndef BCM_PHS_DEFINES_H
-#define BCM_PHS_DEFINES_H
-
-#define PHS_INVALID_TABLE_INDEX 0xffffffff
-#define PHS_MEM_TAG "_SHP"
-
-/* PHS Defines */
-#define STATUS_PHS_COMPRESSED 0xa1
-#define STATUS_PHS_NOCOMPRESSION 0xa2
-#define APPLY_PHS 1
-#define MAX_NO_BIT 7
-#define ZERO_PHSI 0
-#define VERIFY 0
-#define SIZE_MULTIPLE_32 4
-#define UNCOMPRESSED_PACKET 0
-#define DYNAMIC 0
-#define SUPPRESS 0x80
-#define NO_CLASSIFIER_MATCH 0
-#define SEND_PACKET_UNCOMPRESSED 0
-#define PHSI_IS_ZERO 0
-#define PHSI_LEN 1
-#define ERROR_LEN 0
-#define PHS_BUFFER_SIZE 1532
-#define MAX_PHSRULE_PER_SF 20
-#define MAX_SERVICEFLOWS 17
-
-/* PHS Error Defines */
-#define PHS_SUCCESS 0
-#define ERR_PHS_INVALID_DEVICE_EXETENSION 0x800
-#define ERR_PHS_INVALID_PHS_RULE 0x801
-#define ERR_PHS_RULE_ALREADY_EXISTS 0x802
-#define ERR_SF_MATCH_FAIL 0x803
-#define ERR_INVALID_CLASSIFIERTABLE_FOR_SF 0x804
-#define ERR_SFTABLE_FULL 0x805
-#define ERR_CLSASSIFIER_TABLE_FULL 0x806
-#define ERR_PHSRULE_MEMALLOC_FAIL 0x807
-#define ERR_CLSID_MATCH_FAIL 0x808
-#define ERR_PHSRULE_MATCH_FAIL 0x809
-
-struct bcm_phs_rule {
- u8 u8PHSI;
- u8 u8PHSFLength;
- u8 u8PHSF[MAX_PHS_LENGTHS];
- u8 u8PHSMLength;
- u8 u8PHSM[MAX_PHS_LENGTHS];
- u8 u8PHSS;
- u8 u8PHSV;
- u8 u8RefCnt;
- u8 bUnclassifiedPHSRule;
- u8 u8Reserved[3];
- long PHSModifiedBytes;
- unsigned long PHSModifiedNumPackets;
- unsigned long PHSErrorNumPackets;
-};
-
-enum bcm_phs_classifier_context {
- eActiveClassifierRuleContext,
- eOldClassifierRuleContext
-};
-
-struct bcm_phs_classifier_entry {
- u8 bUsed;
- u16 uiClassifierRuleId;
- u8 u8PHSI;
- struct bcm_phs_rule *pstPhsRule;
- u8 bUnclassifiedPHSRule;
-};
-
-struct bcm_phs_classifier_table {
- u16 uiTotalClassifiers;
- struct bcm_phs_classifier_entry stActivePhsRulesList[MAX_PHSRULE_PER_SF];
- struct bcm_phs_classifier_entry stOldPhsRulesList[MAX_PHSRULE_PER_SF];
- u16 uiOldestPhsRuleIndex;
-};
-
-struct bcm_phs_entry {
- u8 bUsed;
- u16 uiVcid;
- struct bcm_phs_classifier_table *pstClassifierTable;
-};
-
-struct bcm_phs_table {
- u16 uiTotalServiceFlows;
- struct bcm_phs_entry stSFList[MAX_SERVICEFLOWS];
-};
-
-struct bcm_phs_extension {
- /* PHS Specific data */
- struct bcm_phs_table *pstServiceFlowPhsRulesTable;
- void *CompressedTxBuffer;
- void *UnCompressedRxBuffer;
-};
-
-#endif
diff --git a/drivers/staging/bcm/PHSModule.c b/drivers/staging/bcm/PHSModule.c
deleted file mode 100644
index 5f4e503d54e..00000000000
--- a/drivers/staging/bcm/PHSModule.c
+++ /dev/null
@@ -1,1703 +0,0 @@
-#include "headers.h"
-
-static UINT CreateSFToClassifierRuleMapping(B_UINT16 uiVcid,
- B_UINT16 uiClsId,
- struct bcm_phs_table *psServiceFlowTable,
- struct bcm_phs_rule *psPhsRule,
- B_UINT8 u8AssociatedPHSI);
-
-static UINT CreateClassiferToPHSRuleMapping(B_UINT16 uiVcid,
- B_UINT16 uiClsId,
- struct bcm_phs_entry *pstServiceFlowEntry,
- struct bcm_phs_rule *psPhsRule,
- B_UINT8 u8AssociatedPHSI);
-
-static UINT CreateClassifierPHSRule(B_UINT16 uiClsId,
- struct bcm_phs_classifier_table *psaClassifiertable,
- struct bcm_phs_rule *psPhsRule,
- enum bcm_phs_classifier_context eClsContext,
- B_UINT8 u8AssociatedPHSI);
-
-static UINT UpdateClassifierPHSRule(B_UINT16 uiClsId,
- struct bcm_phs_classifier_entry *pstClassifierEntry,
- struct bcm_phs_classifier_table *psaClassifiertable,
- struct bcm_phs_rule *psPhsRule,
- B_UINT8 u8AssociatedPHSI);
-
-static bool ValidatePHSRuleComplete(const struct bcm_phs_rule *psPhsRule);
-
-static bool DerefPhsRule(B_UINT16 uiClsId,
- struct bcm_phs_classifier_table *psaClassifiertable,
- struct bcm_phs_rule *pstPhsRule);
-
-static UINT GetClassifierEntry(struct bcm_phs_classifier_table *pstClassifierTable,
- B_UINT32 uiClsid,
- enum bcm_phs_classifier_context eClsContext,
- struct bcm_phs_classifier_entry **ppstClassifierEntry);
-
-static UINT GetPhsRuleEntry(struct bcm_phs_classifier_table *pstClassifierTable,
- B_UINT32 uiPHSI,
- enum bcm_phs_classifier_context eClsContext,
- struct bcm_phs_rule **ppstPhsRule);
-
-static void free_phs_serviceflow_rules(struct bcm_phs_table *psServiceFlowRulesTable);
-
-static int phs_compress(struct bcm_phs_rule *phs_members,
- unsigned char *in_buf,
- unsigned char *out_buf,
- unsigned int *header_size,
- UINT *new_header_size);
-
-static int verify_suppress_phsf(unsigned char *in_buffer,
- unsigned char *out_buffer,
- unsigned char *phsf,
- unsigned char *phsm,
- unsigned int phss,
- unsigned int phsv,
- UINT *new_header_size);
-
-static int phs_decompress(unsigned char *in_buf,
- unsigned char *out_buf,
- struct bcm_phs_rule *phs_rules,
- UINT *header_size);
-
-static ULONG PhsCompress(void *pvContext,
- B_UINT16 uiVcid,
- B_UINT16 uiClsId,
- void *pvInputBuffer,
- void *pvOutputBuffer,
- UINT *pOldHeaderSize,
- UINT *pNewHeaderSize);
-
-static ULONG PhsDeCompress(void *pvContext,
- B_UINT16 uiVcid,
- void *pvInputBuffer,
- void *pvOutputBuffer,
- UINT *pInHeaderSize,
- UINT *pOutHeaderSize);
-
-#define IN
-#define OUT
-
-/*
- * Function: PHSTransmit
- * Description: This routine handle PHS(Payload Header Suppression for Tx path.
- * It extracts a fragment of the NDIS_PACKET containing the header
- * to be suppressed. It then suppresses the header by invoking PHS exported compress routine.
- * The header data after suppression is copied back to the NDIS_PACKET.
- *
- * Input parameters: IN struct bcm_mini_adapter *Adapter - Miniport Adapter Context
- * IN Packet - NDIS packet containing data to be transmitted
- * IN USHORT Vcid - vcid pertaining to connection on which the packet is being sent.Used to
- * identify PHS rule to be applied.
- * B_UINT16 uiClassifierRuleID - Classifier Rule ID
- * BOOLEAN bHeaderSuppressionEnabled - indicates if header suprression is enabled for SF.
- *
- * Return: STATUS_SUCCESS - If the send was successful.
- * Other - If an error occurred.
- */
-
-int PHSTransmit(struct bcm_mini_adapter *Adapter,
- struct sk_buff **pPacket,
- USHORT Vcid,
- B_UINT16 uiClassifierRuleID,
- bool bHeaderSuppressionEnabled,
- UINT *PacketLen,
- UCHAR bEthCSSupport)
-{
- /* PHS Sepcific */
- UINT unPHSPktHdrBytesCopied = 0;
- UINT unPhsOldHdrSize = 0;
- UINT unPHSNewPktHeaderLen = 0;
- /* Pointer to PHS IN Hdr Buffer */
- PUCHAR pucPHSPktHdrInBuf =
- Adapter->stPhsTxContextInfo.ucaHdrSuppressionInBuf;
- /* Pointer to PHS OUT Hdr Buffer */
- PUCHAR pucPHSPktHdrOutBuf =
- Adapter->stPhsTxContextInfo.ucaHdrSuppressionOutBuf;
- UINT usPacketType;
- UINT BytesToRemove = 0;
- bool bPHSI = 0;
- LONG ulPhsStatus = 0;
- UINT numBytesCompressed = 0;
- struct sk_buff *newPacket = NULL;
- struct sk_buff *Packet = *pPacket;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_SEND, DBG_LVL_ALL,
- "In PHSTransmit");
-
- if (!bEthCSSupport)
- BytesToRemove = ETH_HLEN;
- /*
- * Accumulate the header upto the size we support suppression
- * from NDIS packet
- */
-
- usPacketType = ((struct ethhdr *)(Packet->data))->h_proto;
-
- pucPHSPktHdrInBuf = Packet->data + BytesToRemove;
- /* considering data after ethernet header */
- if ((*PacketLen - BytesToRemove) < MAX_PHS_LENGTHS)
- unPHSPktHdrBytesCopied = (*PacketLen - BytesToRemove);
- else
- unPHSPktHdrBytesCopied = MAX_PHS_LENGTHS;
-
- if ((unPHSPktHdrBytesCopied > 0) &&
- (unPHSPktHdrBytesCopied <= MAX_PHS_LENGTHS)) {
-
- /*
- * Step 2 Suppress Header using PHS and fill into intermediate
- * ucaPHSPktHdrOutBuf.
- * Suppress only if IP Header and PHS Enabled For the
- * Service Flow
- */
- if (((usPacketType == ETHERNET_FRAMETYPE_IPV4) ||
- (usPacketType == ETHERNET_FRAMETYPE_IPV6)) &&
- (bHeaderSuppressionEnabled)) {
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_SEND,
- DBG_LVL_ALL,
- "\nTrying to PHS Compress Using Classifier rule 0x%X",
- uiClassifierRuleID);
- unPHSNewPktHeaderLen = unPHSPktHdrBytesCopied;
- ulPhsStatus = PhsCompress(&Adapter->stBCMPhsContext,
- Vcid,
- uiClassifierRuleID,
- pucPHSPktHdrInBuf,
- pucPHSPktHdrOutBuf,
- &unPhsOldHdrSize,
- &unPHSNewPktHeaderLen);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_SEND,
- DBG_LVL_ALL,
- "\nPHS Old header Size : %d New Header Size %d\n",
- unPhsOldHdrSize, unPHSNewPktHeaderLen);
-
- if (unPHSNewPktHeaderLen == unPhsOldHdrSize) {
-
- if (ulPhsStatus == STATUS_PHS_COMPRESSED)
- bPHSI = *pucPHSPktHdrOutBuf;
-
- ulPhsStatus = STATUS_PHS_NOCOMPRESSION;
- }
-
- if (ulPhsStatus == STATUS_PHS_COMPRESSED) {
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
- PHS_SEND, DBG_LVL_ALL,
- "PHS Sending packet Compressed");
-
- if (skb_cloned(Packet)) {
- newPacket =
- skb_copy(Packet, GFP_ATOMIC);
-
- if (newPacket == NULL)
- return STATUS_FAILURE;
-
- dev_kfree_skb(Packet);
- *pPacket = Packet = newPacket;
- pucPHSPktHdrInBuf =
- Packet->data + BytesToRemove;
- }
-
- numBytesCompressed = unPhsOldHdrSize -
- (unPHSNewPktHeaderLen + PHSI_LEN);
-
- memcpy(pucPHSPktHdrInBuf + numBytesCompressed,
- pucPHSPktHdrOutBuf,
- unPHSNewPktHeaderLen + PHSI_LEN);
- memcpy(Packet->data + numBytesCompressed,
- Packet->data, BytesToRemove);
- skb_pull(Packet, numBytesCompressed);
-
- return STATUS_SUCCESS;
- } else {
- /* if one byte headroom is not available,
- * increase it through skb_cow
- */
- if (!(skb_headroom(Packet) > 0)) {
-
- if (skb_cow(Packet, 1)) {
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_PRINTK,
- 0, 0,
- "SKB Cow Failed\n");
- return STATUS_FAILURE;
- }
- }
- skb_push(Packet, 1);
-
- /*
- * CAUTION: The MAC Header is getting corrupted
- * here for IP CS - can be saved by copying 14
- * Bytes. not needed .... hence corrupting it.
- */
- *(Packet->data + BytesToRemove) = bPHSI;
- return STATUS_SUCCESS;
- }
- } else {
-
- if (!bHeaderSuppressionEnabled)
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
- PHS_SEND, DBG_LVL_ALL,
- "\nHeader Suppression Disabled For SF: No PHS\n");
-
- return STATUS_SUCCESS;
- }
- }
-
- /* BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, PHS_SEND, DBG_LVL_ALL,
- * "PHSTransmit : Dumping data packet After PHS"); */
- return STATUS_SUCCESS;
-}
-
-int PHSReceive(struct bcm_mini_adapter *Adapter,
- USHORT usVcid,
- struct sk_buff *packet,
- UINT *punPacketLen,
- UCHAR *pucEthernetHdr,
- UINT bHeaderSuppressionEnabled)
-{
- u32 nStandardPktHdrLen = 0;
- u32 nTotalsuppressedPktHdrBytes = 0;
- int ulPhsStatus = 0;
- PUCHAR pucInBuff = NULL;
- UINT TotalBytesAdded = 0;
-
- if (!bHeaderSuppressionEnabled) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_RECEIVE,
- DBG_LVL_ALL,
- "\nPhs Disabled for incoming packet");
- return ulPhsStatus;
- }
-
- pucInBuff = packet->data;
-
- /* Restore PHS suppressed header */
- nStandardPktHdrLen = packet->len;
- ulPhsStatus = PhsDeCompress(&Adapter->stBCMPhsContext,
- usVcid,
- pucInBuff,
- Adapter->ucaPHSPktRestoreBuf,
- &nTotalsuppressedPktHdrBytes,
- &nStandardPktHdrLen);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_RECEIVE, DBG_LVL_ALL,
- "\nSuppressed PktHdrLen : 0x%x Restored PktHdrLen : 0x%x",
- nTotalsuppressedPktHdrBytes, nStandardPktHdrLen);
-
- if (ulPhsStatus != STATUS_PHS_COMPRESSED) {
- skb_pull(packet, 1);
- return STATUS_SUCCESS;
- } else {
- TotalBytesAdded = nStandardPktHdrLen -
- nTotalsuppressedPktHdrBytes - PHSI_LEN;
-
- if (TotalBytesAdded) {
- if (skb_headroom(packet) >= (SKB_RESERVE_ETHERNET_HEADER + TotalBytesAdded))
- skb_push(packet, TotalBytesAdded);
- else {
- if (skb_cow(packet, skb_headroom(packet) + TotalBytesAdded)) {
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_PRINTK, 0, 0,
- "cow failed in receive\n");
- return STATUS_FAILURE;
- }
-
- skb_push(packet, TotalBytesAdded);
- }
- }
-
- memcpy(packet->data, Adapter->ucaPHSPktRestoreBuf,
- nStandardPktHdrLen);
- }
-
- return STATUS_SUCCESS;
-}
-
-void DumpFullPacket(UCHAR *pBuf, UINT nPktLen)
-{
- struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL,
- "Dumping Data Packet");
- BCM_DEBUG_PRINT_BUFFER(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL,
- pBuf, nPktLen);
-}
-
-/*
- * Procedure: phs_init
- *
- * Description: This routine is responsible for allocating memory for classifier
- * and PHS rules.
- *
- * Arguments:
- * pPhsdeviceExtension - ptr to Device extension containing PHS Classifier rules
- * and PHS Rules , RX, TX buffer etc
- *
- * Returns:
- * TRUE(1) -If allocation of memory was successful.
- * FALSE -If allocation of memory fails.
- */
-int phs_init(struct bcm_phs_extension *pPhsdeviceExtension,
- struct bcm_mini_adapter *Adapter)
-{
- int i;
- struct bcm_phs_table *pstServiceFlowTable;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_DISPATCH, DBG_LVL_ALL,
- "\nPHS:phs_init function");
-
- if (pPhsdeviceExtension->pstServiceFlowPhsRulesTable)
- return -EINVAL;
-
- pPhsdeviceExtension->pstServiceFlowPhsRulesTable =
- kzalloc(sizeof(struct bcm_phs_table), GFP_KERNEL);
-
- if (!pPhsdeviceExtension->pstServiceFlowPhsRulesTable) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_DISPATCH,
- DBG_LVL_ALL,
- "\nAllocation ServiceFlowPhsRulesTable failed");
- return -ENOMEM;
- }
-
- pstServiceFlowTable = pPhsdeviceExtension->pstServiceFlowPhsRulesTable;
- for (i = 0; i < MAX_SERVICEFLOWS; i++) {
- struct bcm_phs_entry sServiceFlow =
- pstServiceFlowTable->stSFList[i];
- sServiceFlow.pstClassifierTable =
- kzalloc(sizeof(struct bcm_phs_classifier_table),
- GFP_KERNEL);
- if (!sServiceFlow.pstClassifierTable) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_DISPATCH,
- DBG_LVL_ALL, "\nAllocation failed");
- free_phs_serviceflow_rules(pPhsdeviceExtension->pstServiceFlowPhsRulesTable);
- pPhsdeviceExtension->pstServiceFlowPhsRulesTable = NULL;
- return -ENOMEM;
- }
- }
-
- pPhsdeviceExtension->CompressedTxBuffer = kmalloc(PHS_BUFFER_SIZE, GFP_KERNEL);
- if (pPhsdeviceExtension->CompressedTxBuffer == NULL) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_DISPATCH,
- DBG_LVL_ALL, "\nAllocation failed");
- free_phs_serviceflow_rules(pPhsdeviceExtension->pstServiceFlowPhsRulesTable);
- pPhsdeviceExtension->pstServiceFlowPhsRulesTable = NULL;
- return -ENOMEM;
- }
-
- pPhsdeviceExtension->UnCompressedRxBuffer =
- kmalloc(PHS_BUFFER_SIZE, GFP_KERNEL);
- if (pPhsdeviceExtension->UnCompressedRxBuffer == NULL) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_DISPATCH,
- DBG_LVL_ALL, "\nAllocation failed");
- kfree(pPhsdeviceExtension->CompressedTxBuffer);
- free_phs_serviceflow_rules(pPhsdeviceExtension->pstServiceFlowPhsRulesTable);
- pPhsdeviceExtension->pstServiceFlowPhsRulesTable = NULL;
- return -ENOMEM;
- }
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_DISPATCH, DBG_LVL_ALL,
- "\n phs_init Successful");
- return STATUS_SUCCESS;
-}
-
-int PhsCleanup(IN struct bcm_phs_extension *pPHSDeviceExt)
-{
- if (pPHSDeviceExt->pstServiceFlowPhsRulesTable) {
- free_phs_serviceflow_rules(pPHSDeviceExt->pstServiceFlowPhsRulesTable);
- pPHSDeviceExt->pstServiceFlowPhsRulesTable = NULL;
- }
-
- kfree(pPHSDeviceExt->CompressedTxBuffer);
- pPHSDeviceExt->CompressedTxBuffer = NULL;
-
- kfree(pPHSDeviceExt->UnCompressedRxBuffer);
- pPHSDeviceExt->UnCompressedRxBuffer = NULL;
-
- return 0;
-}
-
-/*
- * PHS functions
- * PhsUpdateClassifierRule
- *
- * Routine Description:
- * Exported function to add or modify a PHS Rule.
- *
- * Arguments:
- * IN void* pvContext - PHS Driver Specific Context
- * IN B_UINT16 uiVcid - The Service Flow ID for which the PHS rule applies
- * IN B_UINT16 uiClsId - The Classifier ID within the Service Flow for which the PHS rule applies.
- * IN struct bcm_phs_rule *psPhsRule - The PHS Rule strcuture to be added to the PHS Rule table.
- *
- * Return Value:
- *
- * 0 if successful,
- * >0 Error.
- */
-ULONG PhsUpdateClassifierRule(IN void *pvContext,
- IN B_UINT16 uiVcid ,
- IN B_UINT16 uiClsId ,
- IN struct bcm_phs_rule *psPhsRule,
- IN B_UINT8 u8AssociatedPHSI)
-{
- ULONG lStatus = 0;
- UINT nSFIndex = 0;
- struct bcm_phs_entry *pstServiceFlowEntry = NULL;
- struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
- struct bcm_phs_extension *pDeviceExtension =
- (struct bcm_phs_extension *)pvContext;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_DISPATCH, DBG_LVL_ALL,
- "PHS With Corr2 Changes\n");
-
- if (pDeviceExtension == NULL) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_DISPATCH,
- DBG_LVL_ALL, "Invalid Device Extension\n");
- return ERR_PHS_INVALID_DEVICE_EXETENSION;
- }
-
- if (u8AssociatedPHSI == 0)
- return ERR_PHS_INVALID_PHS_RULE;
-
- /* Retrieve the SFID Entry Index for requested Service Flow */
- nSFIndex = GetServiceFlowEntry(pDeviceExtension->pstServiceFlowPhsRulesTable,
- uiVcid, &pstServiceFlowEntry);
-
- if (nSFIndex == PHS_INVALID_TABLE_INDEX) {
- /* This is a new SF. Create a mapping entry for this */
- lStatus = CreateSFToClassifierRuleMapping(uiVcid, uiClsId,
- pDeviceExtension->pstServiceFlowPhsRulesTable,
- psPhsRule,
- u8AssociatedPHSI);
- return lStatus;
- }
-
- /* SF already Exists Add PHS Rule to existing SF */
- lStatus = CreateClassiferToPHSRuleMapping(uiVcid, uiClsId,
- pstServiceFlowEntry,
- psPhsRule,
- u8AssociatedPHSI);
-
- return lStatus;
-}
-
-/*
- * PhsDeletePHSRule
- *
- * Routine Description:
- * Deletes the specified phs Rule within Vcid
- *
- * Arguments:
- * IN void* pvContext - PHS Driver Specific Context
- * IN B_UINT16 uiVcid - The Service Flow ID for which the PHS rule applies
- * IN B_UINT8 u8PHSI - the PHS Index identifying PHS rule to be deleted.
- *
- * Return Value:
- *
- * 0 if successful,
- * >0 Error.
- */
-ULONG PhsDeletePHSRule(IN void *pvContext,
- IN B_UINT16 uiVcid,
- IN B_UINT8 u8PHSI)
-{
- UINT nSFIndex = 0, nClsidIndex = 0;
- struct bcm_phs_entry *pstServiceFlowEntry = NULL;
- struct bcm_phs_classifier_table *pstClassifierRulesTable = NULL;
- struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
- struct bcm_phs_extension *pDeviceExtension = (struct bcm_phs_extension *)pvContext;
- struct bcm_phs_classifier_entry *curr_entry;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_DISPATCH, DBG_LVL_ALL,
- "======>\n");
-
- if (pDeviceExtension) {
- /* Retrieve the SFID Entry Index for requested Service Flow */
- nSFIndex = GetServiceFlowEntry(pDeviceExtension->pstServiceFlowPhsRulesTable,
- uiVcid, &pstServiceFlowEntry);
-
- if (nSFIndex == PHS_INVALID_TABLE_INDEX) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_DISPATCH,
- DBG_LVL_ALL, "SFID Match Failed\n");
- return ERR_SF_MATCH_FAIL;
- }
-
- pstClassifierRulesTable = pstServiceFlowEntry->pstClassifierTable;
- if (pstClassifierRulesTable) {
- for (nClsidIndex = 0; nClsidIndex < MAX_PHSRULE_PER_SF; nClsidIndex++) {
- curr_entry = &pstClassifierRulesTable->stActivePhsRulesList[nClsidIndex];
- if (curr_entry->bUsed &&
- curr_entry->pstPhsRule &&
- (curr_entry->pstPhsRule->u8PHSI == u8PHSI)) {
-
- if (curr_entry->pstPhsRule->u8RefCnt)
- curr_entry->pstPhsRule->u8RefCnt--;
-
- if (0 == curr_entry->pstPhsRule->u8RefCnt)
- kfree(curr_entry->pstPhsRule);
-
- memset(curr_entry,
- 0,
- sizeof(struct bcm_phs_classifier_entry));
- }
- }
- }
- }
- return 0;
-}
-
-/*
- * PhsDeleteClassifierRule
- *
- * Routine Description:
- * Exported function to Delete a PHS Rule for the SFID,CLSID Pair.
- *
- * Arguments:
- * IN void* pvContext - PHS Driver Specific Context
- * IN B_UINT16 uiVcid - The Service Flow ID for which the PHS rule applies
- * IN B_UINT16 uiClsId - The Classifier ID within the Service Flow for which the PHS rule applies.
- *
- * Return Value:
- *
- * 0 if successful,
- * >0 Error.
- */
-ULONG PhsDeleteClassifierRule(IN void *pvContext,
- IN B_UINT16 uiVcid,
- IN B_UINT16 uiClsId)
-{
- UINT nSFIndex = 0, nClsidIndex = 0;
- struct bcm_phs_entry *pstServiceFlowEntry = NULL;
- struct bcm_phs_classifier_entry *pstClassifierEntry = NULL;
- struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
- struct bcm_phs_extension *pDeviceExtension =
- (struct bcm_phs_extension *)pvContext;
-
- if (!pDeviceExtension)
- goto out;
-
- /* Retrieve the SFID Entry Index for requested Service Flow */
- nSFIndex = GetServiceFlowEntry(pDeviceExtension->pstServiceFlowPhsRulesTable,
- uiVcid, &pstServiceFlowEntry);
- if (nSFIndex == PHS_INVALID_TABLE_INDEX) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_DISPATCH,
- DBG_LVL_ALL, "SFID Match Failed\n");
- return ERR_SF_MATCH_FAIL;
- }
-
- nClsidIndex =
- GetClassifierEntry(pstServiceFlowEntry->pstClassifierTable,
- uiClsId,
- eActiveClassifierRuleContext,
- &pstClassifierEntry);
-
- if ((nClsidIndex != PHS_INVALID_TABLE_INDEX) &&
- (!pstClassifierEntry->bUnclassifiedPHSRule)) {
- if (pstClassifierEntry->pstPhsRule) {
- if (pstClassifierEntry->pstPhsRule->u8RefCnt)
- pstClassifierEntry->pstPhsRule->u8RefCnt--;
-
- if (0 == pstClassifierEntry->pstPhsRule->u8RefCnt)
- kfree(pstClassifierEntry->pstPhsRule);
- }
- memset(pstClassifierEntry, 0,
- sizeof(struct bcm_phs_classifier_entry));
- }
-
- nClsidIndex =
- GetClassifierEntry(pstServiceFlowEntry->pstClassifierTable,
- uiClsId,
- eOldClassifierRuleContext,
- &pstClassifierEntry);
-
- if ((nClsidIndex != PHS_INVALID_TABLE_INDEX) &&
- (!pstClassifierEntry->bUnclassifiedPHSRule)) {
- kfree(pstClassifierEntry->pstPhsRule);
- memset(pstClassifierEntry, 0,
- sizeof(struct bcm_phs_classifier_entry));
- }
-
-out:
- return 0;
-}
-
-/*
- * PhsDeleteSFRules
- *
- * Routine Description:
- * Exported function to Delete a all PHS Rules for the SFID.
- *
- * Arguments:
- * IN void* pvContext - PHS Driver Specific Context
- * IN B_UINT16 uiVcid - The Service Flow ID for which the PHS rules need to be deleted
- *
- * Return Value:
- *
- * 0 if successful,
- * >0 Error.
- */
-ULONG PhsDeleteSFRules(IN void *pvContext, IN B_UINT16 uiVcid)
-{
- UINT nSFIndex = 0, nClsidIndex = 0;
- struct bcm_phs_entry *pstServiceFlowEntry = NULL;
- struct bcm_phs_classifier_table *pstClassifierRulesTable = NULL;
- struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
- struct bcm_phs_extension *pDeviceExtension =
- (struct bcm_phs_extension *)pvContext;
- struct bcm_phs_classifier_entry *curr_clsf_entry;
- struct bcm_phs_classifier_entry *curr_rules_list;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_DISPATCH, DBG_LVL_ALL,
- "====>\n");
-
- if (!pDeviceExtension)
- goto out;
-
- /* Retrieve the SFID Entry Index for requested Service Flow */
- nSFIndex = GetServiceFlowEntry(pDeviceExtension->pstServiceFlowPhsRulesTable,
- uiVcid, &pstServiceFlowEntry);
- if (nSFIndex == PHS_INVALID_TABLE_INDEX) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_DISPATCH,
- DBG_LVL_ALL, "SFID Match Failed\n");
- return ERR_SF_MATCH_FAIL;
- }
-
- pstClassifierRulesTable = pstServiceFlowEntry->pstClassifierTable;
- if (pstClassifierRulesTable) {
- for (nClsidIndex = 0; nClsidIndex < MAX_PHSRULE_PER_SF; nClsidIndex++) {
- curr_clsf_entry =
- &pstClassifierRulesTable->stActivePhsRulesList[nClsidIndex];
-
- curr_rules_list =
- &pstClassifierRulesTable->stOldPhsRulesList[nClsidIndex];
-
- if (curr_clsf_entry->pstPhsRule) {
-
- if (curr_clsf_entry->pstPhsRule->u8RefCnt)
- curr_clsf_entry->pstPhsRule->u8RefCnt--;
-
- if (0 == curr_clsf_entry->pstPhsRule->u8RefCnt)
- kfree(curr_clsf_entry->pstPhsRule);
-
- curr_clsf_entry->pstPhsRule = NULL;
- }
- memset(curr_clsf_entry, 0,
- sizeof(struct bcm_phs_classifier_entry));
- if (curr_rules_list->pstPhsRule) {
-
- if (curr_rules_list->pstPhsRule->u8RefCnt)
- curr_rules_list->pstPhsRule->u8RefCnt--;
-
- if (0 == curr_rules_list->pstPhsRule->u8RefCnt)
- kfree(curr_rules_list->pstPhsRule);
-
- curr_rules_list->pstPhsRule = NULL;
- }
- memset(curr_rules_list, 0,
- sizeof(struct bcm_phs_classifier_entry));
- }
- }
- pstServiceFlowEntry->bUsed = false;
- pstServiceFlowEntry->uiVcid = 0;
-
-out:
- return 0;
-}
-
-/*
- * PhsCompress
- *
- * Routine Description:
- * Exported function to compress the data using PHS.
- *
- * Arguments:
- * IN void* pvContext - PHS Driver Specific Context.
- * IN B_UINT16 uiVcid - The Service Flow ID to which current
- * packet header compression applies.
- * IN UINT uiClsId - The Classifier ID to which current packet
- * header compression applies.
- * IN void *pvInputBuffer - The Input buffer containg packet header
- * data
- * IN void *pvOutputBuffer - The output buffer returned by this
- * function after PHS
- * IN UINT *pOldHeaderSize - The actual size of the header before PHS
- * IN UINT *pNewHeaderSize - The new size of the header after applying
- * PHS
- *
- * Return Value:
- *
- * 0 if successful,
- * >0 Error.
- */
-static ULONG PhsCompress(IN void *pvContext,
- IN B_UINT16 uiVcid,
- IN B_UINT16 uiClsId,
- IN void *pvInputBuffer,
- OUT void *pvOutputBuffer,
- OUT UINT *pOldHeaderSize,
- OUT UINT *pNewHeaderSize)
-{
- UINT nSFIndex = 0, nClsidIndex = 0;
- struct bcm_phs_entry *pstServiceFlowEntry = NULL;
- struct bcm_phs_classifier_entry *pstClassifierEntry = NULL;
- struct bcm_phs_rule *pstPhsRule = NULL;
- ULONG lStatus = 0;
- struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
- struct bcm_phs_extension *pDeviceExtension =
- (struct bcm_phs_extension *)pvContext;
-
- if (pDeviceExtension == NULL) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_SEND, DBG_LVL_ALL,
- "Invalid Device Extension\n");
- lStatus = STATUS_PHS_NOCOMPRESSION;
- return lStatus;
- }
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_SEND, DBG_LVL_ALL,
- "Suppressing header\n");
-
- /* Retrieve the SFID Entry Index for requested Service Flow */
- nSFIndex = GetServiceFlowEntry(pDeviceExtension->pstServiceFlowPhsRulesTable,
- uiVcid, &pstServiceFlowEntry);
- if (nSFIndex == PHS_INVALID_TABLE_INDEX) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_SEND, DBG_LVL_ALL,
- "SFID Match Failed\n");
- lStatus = STATUS_PHS_NOCOMPRESSION;
- return lStatus;
- }
-
- nClsidIndex = GetClassifierEntry(pstServiceFlowEntry->pstClassifierTable,
- uiClsId, eActiveClassifierRuleContext,
- &pstClassifierEntry);
-
- if (nClsidIndex == PHS_INVALID_TABLE_INDEX) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_SEND, DBG_LVL_ALL,
- "No PHS Rule Defined For Classifier\n");
- lStatus = STATUS_PHS_NOCOMPRESSION;
- return lStatus;
- }
-
- /* get rule from SF id,Cls ID pair and proceed */
- pstPhsRule = pstClassifierEntry->pstPhsRule;
- if (!ValidatePHSRuleComplete(pstPhsRule)) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_DISPATCH, DBG_LVL_ALL,
- "PHS Rule Defined For Classifier But Not Complete\n");
- lStatus = STATUS_PHS_NOCOMPRESSION;
- return lStatus;
- }
-
- /* Compress Packet */
- lStatus = phs_compress(pstPhsRule,
- (PUCHAR)pvInputBuffer,
- (PUCHAR)pvOutputBuffer,
- pOldHeaderSize,
- pNewHeaderSize);
-
- if (lStatus == STATUS_PHS_COMPRESSED) {
- pstPhsRule->PHSModifiedBytes +=
- *pOldHeaderSize - *pNewHeaderSize - 1;
- pstPhsRule->PHSModifiedNumPackets++;
- } else {
- pstPhsRule->PHSErrorNumPackets++;
- }
-
- return lStatus;
-}
-
-/*
- * PhsDeCompress
- *
- * Routine Description:
- * Exported function to restore the packet header in Rx path.
- *
- * Arguments:
- * IN void* pvContext - PHS Driver Specific Context.
- * IN B_UINT16 uiVcid - The Service Flow ID to which current
- * packet header restoration applies.
- * IN void *pvInputBuffer - The Input buffer containg suppressed
- * packet header data
- * OUT void *pvOutputBuffer - The output buffer returned by this
- * function after restoration
- * OUT UINT *pHeaderSize - The packet header size after restoration
- * is returned in this parameter.
- *
- * Return Value:
- *
- * 0 if successful,
- * >0 Error.
- */
-static ULONG PhsDeCompress(IN void *pvContext,
- IN B_UINT16 uiVcid,
- IN void *pvInputBuffer,
- OUT void *pvOutputBuffer,
- OUT UINT *pInHeaderSize,
- OUT UINT *pOutHeaderSize)
-{
- UINT nSFIndex = 0, nPhsRuleIndex = 0;
- struct bcm_phs_entry *pstServiceFlowEntry = NULL;
- struct bcm_phs_rule *pstPhsRule = NULL;
- UINT phsi;
- struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
- struct bcm_phs_extension *pDeviceExtension =
- (struct bcm_phs_extension *)pvContext;
-
- *pInHeaderSize = 0;
- if (pDeviceExtension == NULL) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_RECEIVE,
- DBG_LVL_ALL, "Invalid Device Extension\n");
- return ERR_PHS_INVALID_DEVICE_EXETENSION;
- }
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_RECEIVE, DBG_LVL_ALL,
- "Restoring header\n");
-
- phsi = *((unsigned char *)(pvInputBuffer));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_RECEIVE, DBG_LVL_ALL,
- "PHSI To Be Used For restore : %x\n", phsi);
- if (phsi == UNCOMPRESSED_PACKET)
- return STATUS_PHS_NOCOMPRESSION;
-
- /* Retrieve the SFID Entry Index for requested Service Flow */
- nSFIndex = GetServiceFlowEntry(pDeviceExtension->pstServiceFlowPhsRulesTable,
- uiVcid, &pstServiceFlowEntry);
- if (nSFIndex == PHS_INVALID_TABLE_INDEX) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_RECEIVE,
- DBG_LVL_ALL,
- "SFID Match Failed During Lookup\n");
- return ERR_SF_MATCH_FAIL;
- }
-
- nPhsRuleIndex = GetPhsRuleEntry(pstServiceFlowEntry->pstClassifierTable,
- phsi,
- eActiveClassifierRuleContext,
- &pstPhsRule);
- if (nPhsRuleIndex == PHS_INVALID_TABLE_INDEX) {
- /* Phs Rule does not exist in active rules table. Lets try
- * in the old rules table. */
- nPhsRuleIndex = GetPhsRuleEntry(pstServiceFlowEntry->pstClassifierTable,
- phsi,
- eOldClassifierRuleContext,
- &pstPhsRule);
- if (nPhsRuleIndex == PHS_INVALID_TABLE_INDEX)
- return ERR_PHSRULE_MATCH_FAIL;
- }
-
- *pInHeaderSize = phs_decompress((PUCHAR)pvInputBuffer,
- (PUCHAR)pvOutputBuffer,
- pstPhsRule,
- pOutHeaderSize);
-
- pstPhsRule->PHSModifiedBytes += *pOutHeaderSize - *pInHeaderSize - 1;
-
- pstPhsRule->PHSModifiedNumPackets++;
- return STATUS_PHS_COMPRESSED;
-}
-
-/*
- * Procedure: free_phs_serviceflow_rules
- *
- * Description: This routine is responsible for freeing memory allocated for
- * PHS rules.
- *
- * Arguments:
- * rules - ptr to S_SERVICEFLOW_TABLE structure.
- *
- * Returns:
- * Does not return any value.
- */
-static void free_phs_serviceflow_rules(struct bcm_phs_table *psServiceFlowRulesTable)
-{
- int i, j;
- struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
- struct bcm_phs_classifier_entry *curr_act_rules_list;
- struct bcm_phs_classifier_entry *curr_old_rules_list;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_DISPATCH, DBG_LVL_ALL,
- "=======>\n");
-
- if (!psServiceFlowRulesTable)
- goto out;
-
- for (i = 0; i < MAX_SERVICEFLOWS; i++) {
- struct bcm_phs_entry stServiceFlowEntry =
- psServiceFlowRulesTable->stSFList[i];
- struct bcm_phs_classifier_table *pstClassifierRulesTable =
- stServiceFlowEntry.pstClassifierTable;
-
- if (pstClassifierRulesTable) {
- for (j = 0; j < MAX_PHSRULE_PER_SF; j++) {
- curr_act_rules_list =
- &pstClassifierRulesTable->stActivePhsRulesList[j];
-
- curr_old_rules_list =
- &pstClassifierRulesTable->stOldPhsRulesList[j];
-
- if (curr_act_rules_list->pstPhsRule) {
-
- if (curr_act_rules_list->pstPhsRule->u8RefCnt)
- curr_act_rules_list->pstPhsRule->u8RefCnt--;
-
- if (0 == curr_act_rules_list->pstPhsRule->u8RefCnt)
- kfree(curr_act_rules_list->pstPhsRule);
-
- curr_act_rules_list->pstPhsRule = NULL;
- }
-
- if (curr_old_rules_list->pstPhsRule) {
-
- if (curr_old_rules_list->pstPhsRule->u8RefCnt)
- curr_old_rules_list->pstPhsRule->u8RefCnt--;
-
- if (0 == curr_old_rules_list->pstPhsRule->u8RefCnt)
- kfree(curr_old_rules_list->pstPhsRule);
-
- curr_old_rules_list->pstPhsRule = NULL;
- }
- }
- kfree(pstClassifierRulesTable);
- stServiceFlowEntry.pstClassifierTable =
- pstClassifierRulesTable = NULL;
- }
- }
-
-out:
-
- kfree(psServiceFlowRulesTable);
- psServiceFlowRulesTable = NULL;
-}
-
-static bool ValidatePHSRuleComplete(IN const struct bcm_phs_rule *psPhsRule)
-{
- return (psPhsRule &&
- psPhsRule->u8PHSI &&
- psPhsRule->u8PHSS &&
- psPhsRule->u8PHSFLength);
-}
-
-UINT GetServiceFlowEntry(IN struct bcm_phs_table *psServiceFlowTable,
- IN B_UINT16 uiVcid,
- struct bcm_phs_entry **ppstServiceFlowEntry)
-{
- int i;
- struct bcm_phs_entry *curr_sf_list;
-
- for (i = 0; i < MAX_SERVICEFLOWS; i++) {
- curr_sf_list = &psServiceFlowTable->stSFList[i];
- if (curr_sf_list->bUsed && (curr_sf_list->uiVcid == uiVcid)) {
- *ppstServiceFlowEntry = curr_sf_list;
- return i;
- }
- }
-
- *ppstServiceFlowEntry = NULL;
- return PHS_INVALID_TABLE_INDEX;
-}
-
-static UINT GetClassifierEntry(IN struct bcm_phs_classifier_table *pstClassifierTable,
- IN B_UINT32 uiClsid,
- enum bcm_phs_classifier_context eClsContext,
- OUT struct bcm_phs_classifier_entry **ppstClassifierEntry)
-{
- int i;
- struct bcm_phs_classifier_entry *psClassifierRules = NULL;
-
- for (i = 0; i < MAX_PHSRULE_PER_SF; i++) {
-
- if (eClsContext == eActiveClassifierRuleContext)
- psClassifierRules =
- &pstClassifierTable->stActivePhsRulesList[i];
- else
- psClassifierRules =
- &pstClassifierTable->stOldPhsRulesList[i];
-
- if (psClassifierRules->bUsed &&
- (psClassifierRules->uiClassifierRuleId == uiClsid)) {
- *ppstClassifierEntry = psClassifierRules;
- return i;
- }
- }
-
- *ppstClassifierEntry = NULL;
- return PHS_INVALID_TABLE_INDEX;
-}
-
-static UINT GetPhsRuleEntry(IN struct bcm_phs_classifier_table *pstClassifierTable,
- IN B_UINT32 uiPHSI,
- enum bcm_phs_classifier_context eClsContext,
- OUT struct bcm_phs_rule **ppstPhsRule)
-{
- int i;
- struct bcm_phs_classifier_entry *pstClassifierRule = NULL;
-
- for (i = 0; i < MAX_PHSRULE_PER_SF; i++) {
- if (eClsContext == eActiveClassifierRuleContext)
- pstClassifierRule =
- &pstClassifierTable->stActivePhsRulesList[i];
- else
- pstClassifierRule =
- &pstClassifierTable->stOldPhsRulesList[i];
-
- if (pstClassifierRule->bUsed &&
- (pstClassifierRule->u8PHSI == uiPHSI)) {
- *ppstPhsRule = pstClassifierRule->pstPhsRule;
- return i;
- }
- }
-
- *ppstPhsRule = NULL;
- return PHS_INVALID_TABLE_INDEX;
-}
-
-static UINT CreateSFToClassifierRuleMapping(IN B_UINT16 uiVcid,
- IN B_UINT16 uiClsId,
- IN struct bcm_phs_table *psServiceFlowTable,
- struct bcm_phs_rule *psPhsRule,
- B_UINT8 u8AssociatedPHSI)
-{
- struct bcm_phs_classifier_table *psaClassifiertable = NULL;
- UINT uiStatus = 0;
- int iSfIndex;
- bool bFreeEntryFound = false;
- struct bcm_phs_entry *curr_list;
-
- /* Check for a free entry in SFID table */
- for (iSfIndex = 0; iSfIndex < MAX_SERVICEFLOWS; iSfIndex++) {
- curr_list = &psServiceFlowTable->stSFList[iSfIndex];
- if (!curr_list->bUsed) {
- bFreeEntryFound = TRUE;
- break;
- }
- }
-
- if (!bFreeEntryFound)
- return ERR_SFTABLE_FULL;
-
- psaClassifiertable = curr_list->pstClassifierTable;
- uiStatus = CreateClassifierPHSRule(uiClsId,
- psaClassifiertable,
- psPhsRule,
- eActiveClassifierRuleContext,
- u8AssociatedPHSI);
- if (uiStatus == PHS_SUCCESS) {
- /* Add entry at free index to the SF */
- curr_list->bUsed = TRUE;
- curr_list->uiVcid = uiVcid;
- }
-
- return uiStatus;
-}
-
-static UINT CreateClassiferToPHSRuleMapping(IN B_UINT16 uiVcid,
- IN B_UINT16 uiClsId,
- IN struct bcm_phs_entry *pstServiceFlowEntry,
- struct bcm_phs_rule *psPhsRule,
- B_UINT8 u8AssociatedPHSI)
-{
- struct bcm_phs_classifier_entry *pstClassifierEntry = NULL;
- UINT uiStatus = PHS_SUCCESS;
- UINT nClassifierIndex = 0;
- struct bcm_phs_classifier_table *psaClassifiertable = NULL;
- struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
-
- psaClassifiertable = pstServiceFlowEntry->pstClassifierTable;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_DISPATCH, DBG_LVL_ALL,
- "==>");
-
- /* Check if the supplied Classifier already exists */
- nClassifierIndex = GetClassifierEntry(
- pstServiceFlowEntry->pstClassifierTable,
- uiClsId,
- eActiveClassifierRuleContext,
- &pstClassifierEntry);
-
- if (nClassifierIndex == PHS_INVALID_TABLE_INDEX) {
- /*
- * The Classifier doesn't exist. So its a new classifier being
- * added.
- * Add new entry to associate PHS Rule to the Classifier
- */
-
- uiStatus = CreateClassifierPHSRule(uiClsId, psaClassifiertable,
- psPhsRule,
- eActiveClassifierRuleContext,
- u8AssociatedPHSI);
- return uiStatus;
- }
-
- /*
- * The Classifier exists.The PHS Rule for this classifier
- * is being modified
- */
-
- if (pstClassifierEntry->u8PHSI == psPhsRule->u8PHSI) {
- if (pstClassifierEntry->pstPhsRule == NULL)
- return ERR_PHS_INVALID_PHS_RULE;
-
- /*
- * This rule already exists if any fields are changed for this
- * PHS rule update them.
- */
- /* If any part of PHSF is valid then we update PHSF */
- if (psPhsRule->u8PHSFLength) {
- /* update PHSF */
- memcpy(pstClassifierEntry->pstPhsRule->u8PHSF,
- psPhsRule->u8PHSF,
- MAX_PHS_LENGTHS);
- }
-
- if (psPhsRule->u8PHSFLength) {
- /* update PHSFLen */
- pstClassifierEntry->pstPhsRule->u8PHSFLength =
- psPhsRule->u8PHSFLength;
- }
-
- if (psPhsRule->u8PHSMLength) {
- /* update PHSM */
- memcpy(pstClassifierEntry->pstPhsRule->u8PHSM,
- psPhsRule->u8PHSM,
- MAX_PHS_LENGTHS);
- }
-
- if (psPhsRule->u8PHSMLength) {
- /* update PHSM Len */
- pstClassifierEntry->pstPhsRule->u8PHSMLength =
- psPhsRule->u8PHSMLength;
- }
-
- if (psPhsRule->u8PHSS) {
- /* update PHSS */
- pstClassifierEntry->pstPhsRule->u8PHSS =
- psPhsRule->u8PHSS;
- }
-
- /* update PHSV */
- pstClassifierEntry->pstPhsRule->u8PHSV = psPhsRule->u8PHSV;
- } else {
- /* A new rule is being set for this classifier. */
- uiStatus = UpdateClassifierPHSRule(uiClsId,
- pstClassifierEntry,
- psaClassifiertable,
- psPhsRule,
- u8AssociatedPHSI);
- }
-
- return uiStatus;
-}
-
-static UINT CreateClassifierPHSRule(IN B_UINT16 uiClsId,
- struct bcm_phs_classifier_table *psaClassifiertable,
- struct bcm_phs_rule *psPhsRule,
- enum bcm_phs_classifier_context eClsContext,
- B_UINT8 u8AssociatedPHSI)
-{
- UINT iClassifierIndex = 0;
- bool bFreeEntryFound = false;
- struct bcm_phs_classifier_entry *psClassifierRules = NULL;
- UINT nStatus = PHS_SUCCESS;
- struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_DISPATCH, DBG_LVL_ALL,
- "Inside CreateClassifierPHSRule");
-
- if (psaClassifiertable == NULL)
- return ERR_INVALID_CLASSIFIERTABLE_FOR_SF;
-
- if (eClsContext == eOldClassifierRuleContext) {
- /*
- * If An Old Entry for this classifier ID already exists in the
- * old rules table replace it.
- */
-
- iClassifierIndex = GetClassifierEntry(psaClassifiertable,
- uiClsId,
- eClsContext,
- &psClassifierRules);
-
- if (iClassifierIndex != PHS_INVALID_TABLE_INDEX) {
- /*
- * The Classifier already exists in the old rules table
- * Lets replace the old classifier with the new one.
- */
- bFreeEntryFound = TRUE;
- }
- }
-
- if (!bFreeEntryFound) {
- /* Continue to search for a free location to add the rule */
- for (iClassifierIndex = 0; iClassifierIndex <
- MAX_PHSRULE_PER_SF; iClassifierIndex++) {
- if (eClsContext == eActiveClassifierRuleContext)
- psClassifierRules = &psaClassifiertable->stActivePhsRulesList[iClassifierIndex];
- else
- psClassifierRules = &psaClassifiertable->stOldPhsRulesList[iClassifierIndex];
-
- if (!psClassifierRules->bUsed) {
- bFreeEntryFound = TRUE;
- break;
- }
- }
- }
-
- if (!bFreeEntryFound) {
-
- if (eClsContext == eActiveClassifierRuleContext)
- return ERR_CLSASSIFIER_TABLE_FULL;
- else {
- /* Lets replace the oldest rule if we are looking in
- * old Rule table */
- if (psaClassifiertable->uiOldestPhsRuleIndex >= MAX_PHSRULE_PER_SF)
- psaClassifiertable->uiOldestPhsRuleIndex = 0;
-
- iClassifierIndex =
- psaClassifiertable->uiOldestPhsRuleIndex;
- psClassifierRules =
- &psaClassifiertable->stOldPhsRulesList[iClassifierIndex];
-
- (psaClassifiertable->uiOldestPhsRuleIndex)++;
- }
- }
-
- if (eClsContext == eOldClassifierRuleContext) {
-
- if (psClassifierRules->pstPhsRule == NULL) {
-
- psClassifierRules->pstPhsRule =
- kmalloc(sizeof(struct bcm_phs_rule),
- GFP_KERNEL);
-
- if (NULL == psClassifierRules->pstPhsRule)
- return ERR_PHSRULE_MEMALLOC_FAIL;
- }
-
- psClassifierRules->bUsed = TRUE;
- psClassifierRules->uiClassifierRuleId = uiClsId;
- psClassifierRules->u8PHSI = psPhsRule->u8PHSI;
- psClassifierRules->bUnclassifiedPHSRule =
- psPhsRule->bUnclassifiedPHSRule;
-
- /* Update The PHS rule */
- memcpy(psClassifierRules->pstPhsRule, psPhsRule,
- sizeof(struct bcm_phs_rule));
- } else
- nStatus = UpdateClassifierPHSRule(uiClsId,
- psClassifierRules,
- psaClassifiertable,
- psPhsRule,
- u8AssociatedPHSI);
-
- return nStatus;
-}
-
-static UINT UpdateClassifierPHSRule(IN B_UINT16 uiClsId,
- IN struct bcm_phs_classifier_entry *pstClassifierEntry,
- struct bcm_phs_classifier_table *psaClassifiertable,
- struct bcm_phs_rule *psPhsRule,
- B_UINT8 u8AssociatedPHSI)
-{
- struct bcm_phs_rule *pstAddPhsRule = NULL;
- UINT nPhsRuleIndex = 0;
- bool bPHSRuleOrphaned = false;
- struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
-
- psPhsRule->u8RefCnt = 0;
-
- /* Step 1 Deref Any Exisiting PHS Rule in this classifier Entry */
- bPHSRuleOrphaned = DerefPhsRule(uiClsId, psaClassifiertable,
- pstClassifierEntry->pstPhsRule);
-
- /* Step 2 Search if there is a PHS Rule with u8AssociatedPHSI in
- * Classifier table for this SF */
- nPhsRuleIndex = GetPhsRuleEntry(psaClassifiertable, u8AssociatedPHSI,
- eActiveClassifierRuleContext,
- &pstAddPhsRule);
- if (PHS_INVALID_TABLE_INDEX == nPhsRuleIndex) {
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_DISPATCH,
- DBG_LVL_ALL,
- "\nAdding New PHSRuleEntry For Classifier");
-
- if (psPhsRule->u8PHSI == 0) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_DISPATCH,
- DBG_LVL_ALL, "\nError PHSI is Zero\n");
- return ERR_PHS_INVALID_PHS_RULE;
- }
-
- /* Step 2.a PHS Rule Does Not Exist .Create New PHS Rule for
- * uiClsId */
- if (false == bPHSRuleOrphaned) {
-
- pstClassifierEntry->pstPhsRule =
- kmalloc(sizeof(struct bcm_phs_rule),
- GFP_KERNEL);
- if (NULL == pstClassifierEntry->pstPhsRule)
- return ERR_PHSRULE_MEMALLOC_FAIL;
- }
- memcpy(pstClassifierEntry->pstPhsRule, psPhsRule,
- sizeof(struct bcm_phs_rule));
- } else {
- /* Step 2.b PHS Rule Exists Tie uiClsId with the existing
- * PHS Rule */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_DISPATCH,
- DBG_LVL_ALL,
- "\nTying Classifier to Existing PHS Rule");
- if (bPHSRuleOrphaned) {
- kfree(pstClassifierEntry->pstPhsRule);
- pstClassifierEntry->pstPhsRule = NULL;
- }
- pstClassifierEntry->pstPhsRule = pstAddPhsRule;
- }
-
- pstClassifierEntry->bUsed = TRUE;
- pstClassifierEntry->u8PHSI = pstClassifierEntry->pstPhsRule->u8PHSI;
- pstClassifierEntry->uiClassifierRuleId = uiClsId;
- pstClassifierEntry->pstPhsRule->u8RefCnt++;
- pstClassifierEntry->bUnclassifiedPHSRule =
- pstClassifierEntry->pstPhsRule->bUnclassifiedPHSRule;
-
- return PHS_SUCCESS;
-}
-
-static bool DerefPhsRule(IN B_UINT16 uiClsId,
- struct bcm_phs_classifier_table *psaClassifiertable,
- struct bcm_phs_rule *pstPhsRule)
-{
- if (pstPhsRule == NULL)
- return false;
-
- if (pstPhsRule->u8RefCnt)
- pstPhsRule->u8RefCnt--;
-
- return (0 == pstPhsRule->u8RefCnt);
-}
-
-static void dbg_print_st_cls_entry(struct bcm_mini_adapter *ad,
- struct bcm_phs_entry *st_serv_flow_entry,
- struct bcm_phs_classifier_entry *st_cls_entry)
-{
- int k;
-
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\n VCID : %#X", st_serv_flow_entry->uiVcid);
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, DUMP_INFO, (DBG_LVL_ALL|DBG_NO_FUNC_PRINT), "\n ClassifierID : %#X", st_cls_entry->uiClassifierRuleId);
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, DUMP_INFO, (DBG_LVL_ALL|DBG_NO_FUNC_PRINT), "\n PHSRuleID : %#X", st_cls_entry->u8PHSI);
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, DUMP_INFO, (DBG_LVL_ALL|DBG_NO_FUNC_PRINT), "\n****************PHS Rule********************\n");
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, DUMP_INFO, (DBG_LVL_ALL|DBG_NO_FUNC_PRINT), "\n PHSI : %#X", st_cls_entry->pstPhsRule->u8PHSI);
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, DUMP_INFO, (DBG_LVL_ALL|DBG_NO_FUNC_PRINT), "\n PHSFLength : %#X ", st_cls_entry->pstPhsRule->u8PHSFLength);
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, DUMP_INFO, (DBG_LVL_ALL|DBG_NO_FUNC_PRINT), "\n PHSF : ");
-
- for (k = 0 ; k < st_cls_entry->pstPhsRule->u8PHSFLength; k++)
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, DUMP_INFO, (DBG_LVL_ALL|DBG_NO_FUNC_PRINT), "%#X ", st_cls_entry->pstPhsRule->u8PHSF[k]);
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, DUMP_INFO, (DBG_LVL_ALL|DBG_NO_FUNC_PRINT), "\n PHSMLength : %#X", st_cls_entry->pstPhsRule->u8PHSMLength);
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, DUMP_INFO, (DBG_LVL_ALL|DBG_NO_FUNC_PRINT), "\n PHSM :");
-
- for (k = 0; k < st_cls_entry->pstPhsRule->u8PHSMLength; k++)
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, DUMP_INFO, (DBG_LVL_ALL|DBG_NO_FUNC_PRINT), "%#X ", st_cls_entry->pstPhsRule->u8PHSM[k]);
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, DUMP_INFO, (DBG_LVL_ALL|DBG_NO_FUNC_PRINT), "\n PHSS : %#X ", st_cls_entry->pstPhsRule->u8PHSS);
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, DUMP_INFO, (DBG_LVL_ALL|DBG_NO_FUNC_PRINT), "\n PHSV : %#X", st_cls_entry->pstPhsRule->u8PHSV);
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\n********************************************\n");
-}
-
-static void phsrules_per_sf_dbg_print(struct bcm_mini_adapter *ad,
- struct bcm_phs_entry *st_serv_flow_entry)
-{
- int j, l;
- struct bcm_phs_classifier_entry st_cls_entry;
-
- for (j = 0; j < MAX_PHSRULE_PER_SF; j++) {
-
- for (l = 0; l < 2; l++) {
-
- if (l == 0) {
- st_cls_entry = st_serv_flow_entry->pstClassifierTable->stActivePhsRulesList[j];
- if (st_cls_entry.bUsed)
- BCM_DEBUG_PRINT(ad,
- DBG_TYPE_OTHERS,
- DUMP_INFO,
- (DBG_LVL_ALL | DBG_NO_FUNC_PRINT),
- "\n Active PHS Rule :\n");
- } else {
- st_cls_entry = st_serv_flow_entry->pstClassifierTable->stOldPhsRulesList[j];
- if (st_cls_entry.bUsed)
- BCM_DEBUG_PRINT(ad,
- DBG_TYPE_OTHERS,
- DUMP_INFO,
- (DBG_LVL_ALL | DBG_NO_FUNC_PRINT),
- "\n Old PHS Rule :\n");
- }
-
- if (st_cls_entry.bUsed) {
- dbg_print_st_cls_entry(ad,
- st_serv_flow_entry,
- &st_cls_entry);
- }
- }
- }
-}
-
-void DumpPhsRules(struct bcm_phs_extension *pDeviceExtension)
-{
- int i;
- struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL,
- "\n Dumping PHS Rules :\n");
-
- for (i = 0; i < MAX_SERVICEFLOWS; i++) {
-
- struct bcm_phs_entry stServFlowEntry =
- pDeviceExtension->pstServiceFlowPhsRulesTable->stSFList[i];
-
- if (!stServFlowEntry.bUsed)
- continue;
-
- phsrules_per_sf_dbg_print(Adapter, &stServFlowEntry);
- }
-}
-
-/*
- * Procedure: phs_decompress
- *
- * Description: This routine restores the static fields within the packet.
- *
- * Arguments:
- * in_buf - ptr to incoming packet buffer.
- * out_buf - ptr to output buffer where the suppressed
- * header is copied.
- * decomp_phs_rules - ptr to PHS rule.
- * header_size - ptr to field which holds the phss or
- * phsf_length.
- *
- * Returns:
- * size - The number of bytes of dynamic fields present with in the
- * incoming packet header.
- * 0 - If PHS rule is NULL.If PHSI is 0 indicateing packet as
- * uncompressed.
- */
-static int phs_decompress(unsigned char *in_buf,
- unsigned char *out_buf,
- struct bcm_phs_rule *decomp_phs_rules,
- UINT *header_size)
-{
- int phss, size = 0;
- struct bcm_phs_rule *tmp_memb;
- int bit, i = 0;
- unsigned char *phsf, *phsm;
- int in_buf_len = *header_size - 1;
- struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
-
- in_buf++;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_RECEIVE, DBG_LVL_ALL,
- "====>\n");
- *header_size = 0;
-
- if (decomp_phs_rules == NULL)
- return 0;
-
- tmp_memb = decomp_phs_rules;
- /*
- * BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, PHS_RECEIVE,DBG_LVL_ALL,
- * "\nDECOMP:In phs_decompress PHSI 1 %d",phsi));
- * header_size = tmp_memb->u8PHSFLength;
- */
- phss = tmp_memb->u8PHSS;
- phsf = tmp_memb->u8PHSF;
- phsm = tmp_memb->u8PHSM;
-
- if (phss > MAX_PHS_LENGTHS)
- phss = MAX_PHS_LENGTHS;
-
- /*
- * BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, PHS_RECEIVE,DBG_LVL_ALL,
- * "\nDECOMP:
- * In phs_decompress PHSI %d phss %d index %d",phsi,phss,index));
- */
- while ((phss > 0) && (size < in_buf_len)) {
- bit = ((*phsm << i) & SUPPRESS);
-
- if (bit == SUPPRESS) {
- *out_buf = *phsf;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_RECEIVE,
- DBG_LVL_ALL,
- "\nDECOMP:In phss %d phsf %d output %d",
- phss, *phsf, *out_buf);
- } else {
- *out_buf = *in_buf;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_RECEIVE,
- DBG_LVL_ALL,
- "\nDECOMP:In phss %d input %d output %d",
- phss, *in_buf, *out_buf);
- in_buf++;
- size++;
- }
- out_buf++;
- phsf++;
- phss--;
- i++;
- *header_size = *header_size + 1;
-
- if (i > MAX_NO_BIT) {
- i = 0;
- phsm++;
- }
- }
-
- return size;
-}
-
-/*
- * Procedure: phs_compress
- *
- * Description: This routine suppresses the static fields within the packet.
- * Before that it will verify the fields to be suppressed with the corresponding
- * fields in the phsf. For verification it checks the phsv field of PHS rule.
- * If set and verification succeeds it suppresses the field.If any one static
- * field is found different none of the static fields are suppressed then the
- * packet is sent as uncompressed packet with phsi=0.
- *
- * Arguments:
- * phs_rule - ptr to PHS rule.
- * in_buf - ptr to incoming packet buffer.
- * out_buf - ptr to output buffer where the suppressed header is
- * copied.
- * header_size - ptr to field which holds the phss.
- *
- * Returns:
- * size - The number of bytes copied into the output buffer i.e
- * dynamic fields
- * 0 - If PHS rule is NULL.If PHSV field is not set. If the
- * verification fails.
- */
-static int phs_compress(struct bcm_phs_rule *phs_rule,
- unsigned char *in_buf,
- unsigned char *out_buf,
- UINT *header_size,
- UINT *new_header_size)
-{
- unsigned char *old_addr = out_buf;
- int suppress = 0;
- struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
-
- if (phs_rule == NULL) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_SEND, DBG_LVL_ALL,
- "\nphs_compress(): phs_rule null!");
- *out_buf = ZERO_PHSI;
- return STATUS_PHS_NOCOMPRESSION;
- }
-
- if (phs_rule->u8PHSS <= *new_header_size)
- *header_size = phs_rule->u8PHSS;
- else
- *header_size = *new_header_size;
-
- /* To copy PHSI */
- out_buf++;
- suppress = verify_suppress_phsf(in_buf, out_buf, phs_rule->u8PHSF,
- phs_rule->u8PHSM, phs_rule->u8PHSS,
- phs_rule->u8PHSV, new_header_size);
-
- if (suppress == STATUS_PHS_COMPRESSED) {
- *old_addr = (unsigned char)phs_rule->u8PHSI;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_SEND, DBG_LVL_ALL,
- "\nCOMP:In phs_compress phsi %d",
- phs_rule->u8PHSI);
- } else {
- *old_addr = ZERO_PHSI;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_SEND, DBG_LVL_ALL,
- "\nCOMP:In phs_compress PHSV Verification failed");
- }
-
- return suppress;
-}
-
-/*
- * Procedure: verify_suppress_phsf
- *
- * Description: This routine verifies the fields of the packet and if all the
- * static fields are equal it adds the phsi of that PHS rule.If any static
- * field differs it woun't suppress any field.
- *
- * Arguments:
- * rules_set - ptr to classifier_rules.
- * in_buffer - ptr to incoming packet buffer.
- * out_buffer - ptr to output buffer where the suppressed header is copied.
- * phsf - ptr to phsf.
- * phsm - ptr to phsm.
- * phss - variable holding phss.
- *
- * Returns:
- * size - The number of bytes copied into the output buffer i.e dynamic
- * fields.
- * 0 - Packet has failed the verification.
- */
-static int verify_suppress_phsf(unsigned char *in_buffer,
- unsigned char *out_buffer,
- unsigned char *phsf,
- unsigned char *phsm,
- unsigned int phss,
- unsigned int phsv,
- UINT *new_header_size)
-{
- unsigned int size = 0;
- int bit, i = 0;
- struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_SEND, DBG_LVL_ALL,
- "\nCOMP:In verify_phsf PHSM - 0x%X", *phsm);
-
- if (phss > (*new_header_size))
- phss = *new_header_size;
-
- while (phss > 0) {
- bit = ((*phsm << i) & SUPPRESS);
- if (bit == SUPPRESS) {
- if (*in_buffer != *phsf) {
- if (phsv == VERIFY) {
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_OTHERS,
- PHS_SEND,
- DBG_LVL_ALL,
- "\nCOMP:In verify_phsf failed for field %d buf %d phsf %d",
- phss,
- *in_buffer,
- *phsf);
- return STATUS_PHS_NOCOMPRESSION;
- }
- } else
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_OTHERS,
- PHS_SEND,
- DBG_LVL_ALL,
- "\nCOMP:In verify_phsf success for field %d buf %d phsf %d",
- phss,
- *in_buffer,
- *phsf);
- } else {
- *out_buffer = *in_buffer;
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_OTHERS,
- PHS_SEND,
- DBG_LVL_ALL,
- "\nCOMP:In copying_header input %d out %d",
- *in_buffer,
- *out_buffer);
- out_buffer++;
- size++;
- }
-
- in_buffer++;
- phsf++;
- phss--;
- i++;
-
- if (i > MAX_NO_BIT) {
- i = 0;
- phsm++;
- }
- }
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_SEND, DBG_LVL_ALL,
- "\nCOMP:In verify_phsf success");
- *new_header_size = size;
- return STATUS_PHS_COMPRESSED;
-}
diff --git a/drivers/staging/bcm/PHSModule.h b/drivers/staging/bcm/PHSModule.h
deleted file mode 100644
index d84d60ba48f..00000000000
--- a/drivers/staging/bcm/PHSModule.h
+++ /dev/null
@@ -1,59 +0,0 @@
-#ifndef BCM_MINIPORT_PHSMODULE_H
-#define BCM_MINIPORT_PHSMODULE_H
-
-int PHSTransmit(struct bcm_mini_adapter *Adapter,
- struct sk_buff **pPacket,
- USHORT Vcid,
- B_UINT16 uiClassifierRuleID,
- bool bHeaderSuppressionEnabled,
- PUINT PacketLen,
- UCHAR bEthCSSupport);
-
-int PHSReceive(struct bcm_mini_adapter *Adapter,
- USHORT usVcid,
- struct sk_buff *packet,
- UINT *punPacketLen,
- UCHAR *pucEthernetHdr,
- UINT
- );
-
-
-void DumpDataPacketHeader(PUCHAR pPkt);
-
-void DumpFullPacket(UCHAR *pBuf, UINT nPktLen);
-
-void DumpPhsRules(struct bcm_phs_extension *pDeviceExtension);
-
-
-int phs_init(struct bcm_phs_extension *pPhsdeviceExtension,
- struct bcm_mini_adapter *Adapter);
-
-int PhsCleanup(struct bcm_phs_extension *pPHSDeviceExt);
-
-/* Utility Functions */
-ULONG PhsUpdateClassifierRule(void *pvContext,
- B_UINT16 uiVcid,
- B_UINT16 uiClsId,
- struct bcm_phs_rule *psPhsRule,
- B_UINT8 u8AssociatedPHSI);
-
-ULONG PhsDeletePHSRule(void *pvContext, B_UINT16 uiVcid, B_UINT8 u8PHSI);
-
-ULONG PhsDeleteClassifierRule(void *pvContext,
- B_UINT16 uiVcid,
- B_UINT16 uiClsId);
-
-ULONG PhsDeleteSFRules(void *pvContext, B_UINT16 uiVcid);
-
-
-bool ValidatePHSRule(struct bcm_phs_rule *psPhsRule);
-
-UINT GetServiceFlowEntry(struct bcm_phs_table *psServiceFlowTable,
- B_UINT16 uiVcid,
- struct bcm_phs_entry **ppstServiceFlowEntry);
-
-
-void DumpPhsRules(struct bcm_phs_extension *pDeviceExtension);
-
-
-#endif
diff --git a/drivers/staging/bcm/Protocol.h b/drivers/staging/bcm/Protocol.h
deleted file mode 100644
index 9818128d932..00000000000
--- a/drivers/staging/bcm/Protocol.h
+++ /dev/null
@@ -1,128 +0,0 @@
-/************************************
-* Protocol.h
-*************************************/
-#ifndef __PROTOCOL_H__
-#define __PROTOCOL_H__
-
-#define IPV4 4
-#define IPV6 6
-
-struct ArpHeader {
- struct arphdr arp;
- unsigned char ar_sha[ETH_ALEN]; /* sender hardware address */
- unsigned char ar_sip[4]; /* sender IP address */
- unsigned char ar_tha[ETH_ALEN]; /* target hardware address */
- unsigned char ar_tip[4]; /* target IP address */
-};
-
-struct bcm_transport_header {
- union {
- struct udphdr uhdr;
- struct tcphdr thdr;
- };
-} __packed;
-
-enum bcm_ip_frame_type {
- eNonIPPacket,
- eIPv4Packet,
- eIPv6Packet
-};
-
-enum bcm_eth_frame_type {
- eEthUnsupportedFrame,
- eEth802LLCFrame,
- eEth802LLCSNAPFrame,
- eEth802QVLANFrame,
- eEthOtherFrame
-};
-
-struct bcm_eth_packet_info {
- enum bcm_ip_frame_type eNwpktIPFrameType;
- enum bcm_eth_frame_type eNwpktEthFrameType;
- unsigned short usEtherType;
- unsigned char ucDSAP;
-};
-
-struct bcm_eth_q_frame {
- struct bcm_eth_header EThHdr;
- unsigned short UserPriority:3;
- unsigned short CFI:1;
- unsigned short VLANID:12;
- unsigned short EthType;
-} __packed;
-
-struct bcm_eth_llc_frame {
- struct bcm_eth_header EThHdr;
- unsigned char DSAP;
- unsigned char SSAP;
- unsigned char Control;
-} __packed;
-
-struct bcm_eth_llc_snap_frame {
- struct bcm_eth_header EThHdr;
- unsigned char DSAP;
- unsigned char SSAP;
- unsigned char Control;
- unsigned char OUI[3];
- unsigned short usEtherType;
-} __packed;
-
-struct bcm_ethernet2_frame {
- struct bcm_eth_header EThHdr;
-} __packed;
-
-#define ETHERNET_FRAMETYPE_IPV4 ntohs(0x0800)
-#define ETHERNET_FRAMETYPE_IPV6 ntohs(0x86dd)
-#define ETHERNET_FRAMETYPE_802QVLAN ntohs(0x8100)
-
-/* Per SF CS Specification Encodings */
-enum bcm_spec_encoding {
- eCSSpecUnspecified = 0,
- eCSPacketIPV4,
- eCSPacketIPV6,
- eCS802_3PacketEthernet,
- eCS802_1QPacketVLAN,
- eCSPacketIPV4Over802_3Ethernet,
- eCSPacketIPV6Over802_3Ethernet,
- eCSPacketIPV4Over802_1QVLAN,
- eCSPacketIPV6Over802_1QVLAN,
- eCSPacketUnsupported
-};
-
-#define IP6_HEADER_LEN 40
-#define IP_VERSION(byte) (((byte&0xF0)>>4))
-
-#define MAC_ADDRESS_SIZE 6
-#define ETH_AND_IP_HEADER_LEN (14 + 20)
-#define L4_SRC_PORT_LEN 2
-#define L4_DEST_PORT_LEN 2
-#define CTRL_PKT_LEN (8 + ETH_AND_IP_HEADER_LEN)
-
-#define ETH_ARP_FRAME 0x806
-#define ETH_IPV4_FRAME 0x800
-#define ETH_IPV6_FRAME 0x86DD
-#define UDP 0x11
-#define TCP 0x06
-
-#define ARP_OP_REQUEST 0x01
-#define ARP_OP_REPLY 0x02
-#define ARP_PKT_SIZE 60
-
-/* This is the format for the TCP packet header */
-struct bcm_tcp_header {
- unsigned short usSrcPort;
- unsigned short usDestPort;
- unsigned long ulSeqNumber;
- unsigned long ulAckNumber;
- unsigned char HeaderLength;
- unsigned char ucFlags;
- unsigned short usWindowsSize;
- unsigned short usChkSum;
- unsigned short usUrgetPtr;
-};
-
-#define TCP_HEADER_LEN sizeof(struct bcm_tcp_header)
-#define TCP_ACK 0x10 /* Bit 4 in tcpflags field. */
-#define GET_TCP_HEADER_LEN(byte) ((byte&0xF0)>>4)
-
-#endif /* __PROTOCOL_H__ */
diff --git a/drivers/staging/bcm/Prototypes.h b/drivers/staging/bcm/Prototypes.h
deleted file mode 100644
index 1ddc8b2539f..00000000000
--- a/drivers/staging/bcm/Prototypes.h
+++ /dev/null
@@ -1,217 +0,0 @@
-#ifndef _PROTOTYPES_H_
-#define _PROTOTYPES_H_
-
-VOID LinkControlResponseMessage(struct bcm_mini_adapter *Adapter, PUCHAR pucBuffer);
-
-VOID StatisticsResponse(struct bcm_mini_adapter *Adapter, PVOID pvBuffer);
-
-VOID IdleModeResponse(struct bcm_mini_adapter *Adapter, PUINT puiBuffer);
-
-int control_packet_handler(struct bcm_mini_adapter *Adapter);
-
-VOID DeleteAllClassifiersForSF(struct bcm_mini_adapter *Adapter, UINT uiSearchRuleIndex);
-
-VOID flush_all_queues(struct bcm_mini_adapter *Adapter);
-
-int register_control_device_interface(struct bcm_mini_adapter *ps_adapter);
-
-void unregister_control_device_interface(struct bcm_mini_adapter *Adapter);
-
-INT CopyBufferToControlPacket(struct bcm_mini_adapter *Adapter,/**<Logical Adapter*/
- PVOID ioBuffer/**<Control Packet Buffer*/
- );
-
-VOID SortPackInfo(struct bcm_mini_adapter *Adapter);
-
-VOID SortClassifiers(struct bcm_mini_adapter *Adapter);
-
-VOID flush_all_queues(struct bcm_mini_adapter *Adapter);
-
-VOID PruneQueueAllSF(struct bcm_mini_adapter *Adapter);
-
-INT SearchSfid(struct bcm_mini_adapter *Adapter, UINT uiSfid);
-
-USHORT ClassifyPacket(struct bcm_mini_adapter *Adapter, struct sk_buff *skb);
-
-bool MatchSrcPort(struct bcm_classifier_rule *pstClassifierRule, USHORT ushSrcPort);
-
-bool MatchDestPort(struct bcm_classifier_rule *pstClassifierRule, USHORT ushSrcPort);
-
-bool MatchProtocol(struct bcm_classifier_rule *pstClassifierRule, UCHAR ucProtocol);
-
-INT SetupNextSend(struct bcm_mini_adapter *Adapter, /**<Logical Adapter*/
- struct sk_buff *Packet, /**<data buffer*/
- USHORT Vcid);
-
-VOID LinkMessage(struct bcm_mini_adapter *Adapter);
-
-VOID transmit_packets(struct bcm_mini_adapter *Adapter);
-
-INT SendControlPacket(struct bcm_mini_adapter *Adapter, /**<Logical Adapter*/
- char *pControlPacket/**<Control Packet*/
- );
-
-int register_networkdev(struct bcm_mini_adapter *Adapter);
-
-void unregister_networkdev(struct bcm_mini_adapter *Adapter);
-
-INT AllocAdapterDsxBuffer(struct bcm_mini_adapter *Adapter);
-
-VOID AdapterFree(struct bcm_mini_adapter *Adapter);
-
-INT FreeAdapterDsxBuffer(struct bcm_mini_adapter *Adapter);
-
-int tx_pkt_handler(struct bcm_mini_adapter *Adapter);
-
-int reset_card_proc(struct bcm_mini_adapter *Adapter);
-
-int run_card_proc(struct bcm_mini_adapter *Adapter);
-
-int InitCardAndDownloadFirmware(struct bcm_mini_adapter *ps_adapter);
-
-INT ReadMacAddressFromNVM(struct bcm_mini_adapter *Adapter);
-
-int register_control_device_interface(struct bcm_mini_adapter *ps_adapter);
-
-void DumpPackInfo(struct bcm_mini_adapter *Adapter);
-
-int rdm(struct bcm_mini_adapter *Adapter, UINT uiAddress, PCHAR pucBuff, size_t size);
-
-int wrm(struct bcm_mini_adapter *Adapter, UINT uiAddress, PCHAR pucBuff, size_t size);
-
-int wrmalt(struct bcm_mini_adapter *Adapter, UINT uiAddress, unsigned int *pucBuff, size_t sSize);
-
-int rdmalt(struct bcm_mini_adapter *Adapter, UINT uiAddress, unsigned int *pucBuff, size_t sSize);
-
-int get_dsx_sf_data_to_application(struct bcm_mini_adapter *Adapter, UINT uiSFId, void __user *user_buffer);
-
-void SendIdleModeResponse(struct bcm_mini_adapter *Adapter);
-
-int ProcessGetHostMibs(struct bcm_mini_adapter *Adapter, struct bcm_host_stats_mibs *buf);
-
-void GetDroppedAppCntrlPktMibs(struct bcm_host_stats_mibs *ioBuffer, struct bcm_tarang_data *pTarang);
-
-void beceem_parse_target_struct(struct bcm_mini_adapter *Adapter);
-
-int bcm_ioctl_fw_download(struct bcm_mini_adapter *Adapter, struct bcm_firmware_info *psFwInfo);
-
-void CopyMIBSExtendedSFParameters(struct bcm_mini_adapter *Adapter,
- struct bcm_connect_mgr_params *psfLocalSet, UINT uiSearchRuleIndex);
-
-VOID ResetCounters(struct bcm_mini_adapter *Adapter);
-
-int InitLedSettings(struct bcm_mini_adapter *Adapter);
-
-struct bcm_classifier_rule *GetFragIPClsEntry(struct bcm_mini_adapter *Adapter, USHORT usIpIdentification, ULONG SrcIP);
-
-void AddFragIPClsEntry(struct bcm_mini_adapter *Adapter, struct bcm_fragmented_packet_info *psFragPktInfo);
-
-void DelFragIPClsEntry(struct bcm_mini_adapter *Adapter, USHORT usIpIdentification, ULONG SrcIp);
-
-void update_per_cid_rx(struct bcm_mini_adapter *Adapter);
-
-void update_per_sf_desc_cnts(struct bcm_mini_adapter *Adapter);
-
-void ClearTargetDSXBuffer(struct bcm_mini_adapter *Adapter, B_UINT16 TID, bool bFreeAll);
-
-void flush_queue(struct bcm_mini_adapter *Adapter, UINT iQIndex);
-
-INT flushAllAppQ(VOID);
-
-INT BeceemEEPROMBulkRead(
- struct bcm_mini_adapter *Adapter,
- PUINT pBuffer,
- UINT uiOffset,
- UINT uiNumBytes);
-
-INT WriteBeceemEEPROM(struct bcm_mini_adapter *Adapter, UINT uiEEPROMOffset, UINT uiData);
-
-INT PropagateCalParamsFromFlashToMemory(struct bcm_mini_adapter *Adapter);
-
-INT BeceemEEPROMBulkWrite(
- struct bcm_mini_adapter *Adapter,
- PUCHAR pBuffer,
- UINT uiOffset,
- UINT uiNumBytes,
- bool bVerify);
-
-INT ReadBeceemEEPROM(struct bcm_mini_adapter *Adapter, UINT dwAddress, UINT *pdwData);
-
-INT BeceemNVMRead(
- struct bcm_mini_adapter *Adapter,
- PUINT pBuffer,
- UINT uiOffset,
- UINT uiNumBytes);
-
-INT BeceemNVMWrite(
- struct bcm_mini_adapter *Adapter,
- PUINT pBuffer,
- UINT uiOffset,
- UINT uiNumBytes,
- bool bVerify);
-
-INT BcmInitNVM(struct bcm_mini_adapter *Adapter);
-
-INT BcmUpdateSectorSize(struct bcm_mini_adapter *Adapter, UINT uiSectorSize);
-
-bool IsSectionExistInFlash(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_val section);
-
-INT BcmGetFlash2xSectionalBitMap(struct bcm_mini_adapter *Adapter, struct bcm_flash2x_bitmap *psFlash2xBitMap);
-
-INT BcmFlash2xBulkWrite(
- struct bcm_mini_adapter *Adapter,
- PUINT pBuffer,
- enum bcm_flash2x_section_val eFlashSectionVal,
- UINT uiOffset,
- UINT uiNumBytes,
- UINT bVerify);
-
-INT BcmFlash2xBulkRead(
- struct bcm_mini_adapter *Adapter,
- PUINT pBuffer,
- enum bcm_flash2x_section_val eFlashSectionVal,
- UINT uiOffsetWithinSectionVal,
- UINT uiNumBytes);
-
-INT BcmGetSectionValStartOffset(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_val eFlashSectionVal);
-
-INT BcmSetActiveSection(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_val eFlash2xSectVal);
-
-INT BcmAllocFlashCSStructure(struct bcm_mini_adapter *psAdapter);
-
-INT BcmDeAllocFlashCSStructure(struct bcm_mini_adapter *psAdapter);
-
-INT BcmCopyISO(struct bcm_mini_adapter *Adapter, struct bcm_flash2x_copy_section sCopySectStrut);
-
-INT BcmFlash2xCorruptSig(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_val eFlash2xSectionVal);
-
-INT BcmFlash2xWriteSig(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_val eFlashSectionVal);
-
-INT validateFlash2xReadWrite(struct bcm_mini_adapter *Adapter, struct bcm_flash2x_readwrite *psFlash2xReadWrite);
-
-INT IsFlash2x(struct bcm_mini_adapter *Adapter);
-
-INT BcmCopySection(struct bcm_mini_adapter *Adapter,
- enum bcm_flash2x_section_val SrcSection,
- enum bcm_flash2x_section_val DstSection,
- UINT offset,
- UINT numOfBytes);
-
-bool IsNonCDLessDevice(struct bcm_mini_adapter *Adapter);
-
-VOID OverrideServiceFlowParams(struct bcm_mini_adapter *Adapter, PUINT puiBuffer);
-
-int wrmaltWithLock(struct bcm_mini_adapter *Adapter, UINT uiAddress, unsigned int *pucBuff, size_t sSize);
-
-int rdmaltWithLock(struct bcm_mini_adapter *Adapter, UINT uiAddress, unsigned int *pucBuff, size_t sSize);
-
-int wrmWithLock(struct bcm_mini_adapter *Adapter, UINT uiAddress, PCHAR pucBuff, size_t size);
-
-INT buffDnldVerify(struct bcm_mini_adapter *Adapter, unsigned char *mappedbuffer, unsigned int u32FirmwareLength,
- unsigned long u32StartingAddress);
-
-VOID putUsbSuspend(struct work_struct *work);
-
-bool IsReqGpioIsLedInNVM(struct bcm_mini_adapter *Adapter, UINT gpios);
-
-#endif
diff --git a/drivers/staging/bcm/Qos.c b/drivers/staging/bcm/Qos.c
deleted file mode 100644
index b3ac614cd35..00000000000
--- a/drivers/staging/bcm/Qos.c
+++ /dev/null
@@ -1,1200 +0,0 @@
-/**
- * @file Qos.C
- * This file contains the routines related to Quality of Service.
-*/
-#include "headers.h"
-
-static void EThCSGetPktInfo(struct bcm_mini_adapter *Adapter,
- PVOID pvEthPayload,
- struct bcm_eth_packet_info *pstEthCsPktInfo);
-
-static bool EThCSClassifyPkt(struct bcm_mini_adapter *Adapter,
- struct sk_buff *skb,
- struct bcm_eth_packet_info *pstEthCsPktInfo,
- struct bcm_classifier_rule *pstClassifierRule,
- B_UINT8 EthCSCupport);
-
-static USHORT IpVersion4(struct bcm_mini_adapter *Adapter, struct iphdr *iphd,
- struct bcm_classifier_rule *pstClassifierRule);
-
-static VOID PruneQueue(struct bcm_mini_adapter *Adapter, INT iIndex);
-
-
-/*******************************************************************
-* Function - MatchSrcIpAddress()
-*
-* Description - Checks whether the Source IP address from the packet
-* matches with that of Queue.
-*
-* Parameters - pstClassifierRule: Pointer to the packet info structure.
-* - ulSrcIP : Source IP address from the packet.
-*
-* Returns - TRUE(If address matches) else FAIL .
-*********************************************************************/
-static bool MatchSrcIpAddress(struct bcm_classifier_rule *pstClassifierRule,
- ULONG ulSrcIP)
-{
- UCHAR ucLoopIndex = 0;
-
- struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
- union u_ip_address *src_addr;
-
- ulSrcIP = ntohl(ulSrcIP);
- if (0 == pstClassifierRule->ucIPSourceAddressLength)
- return TRUE;
- for (ucLoopIndex = 0;
- ucLoopIndex < (pstClassifierRule->ucIPSourceAddressLength);
- ucLoopIndex++) {
- src_addr = &pstClassifierRule->stSrcIpAddress;
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "Src Ip Address Mask:0x%x PacketIp:0x%x and Classification:0x%x",
- (UINT)src_addr->ulIpv4Mask[ucLoopIndex],
- (UINT)ulSrcIP,
- (UINT)src_addr->ulIpv6Addr[ucLoopIndex]);
-
- if ((src_addr->ulIpv4Mask[ucLoopIndex] & ulSrcIP) ==
- (src_addr->ulIpv4Addr[ucLoopIndex] &
- src_addr->ulIpv4Mask[ucLoopIndex]))
- return TRUE;
- }
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "Src Ip Address Not Matched");
- return false;
-}
-
-
-/*******************************************************************
-* Function - MatchDestIpAddress()
-*
-* Description - Checks whether the Destination IP address from the packet
-* matches with that of Queue.
-*
-* Parameters - pstClassifierRule: Pointer to the packet info structure.
-* - ulDestIP : Destination IP address from the packet.
-*
-* Returns - TRUE(If address matches) else FAIL .
-*********************************************************************/
-static bool MatchDestIpAddress(struct bcm_classifier_rule *pstClassifierRule, ULONG ulDestIP)
-{
- UCHAR ucLoopIndex = 0;
- struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
- union u_ip_address *dest_addr = &pstClassifierRule->stDestIpAddress;
-
- ulDestIP = ntohl(ulDestIP);
- if (0 == pstClassifierRule->ucIPDestinationAddressLength)
- return TRUE;
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "Destination Ip Address 0x%x 0x%x 0x%x ",
- (UINT)ulDestIP,
- (UINT)dest_addr->ulIpv4Mask[ucLoopIndex],
- (UINT)dest_addr->ulIpv4Addr[ucLoopIndex]);
-
- for (ucLoopIndex = 0;
- ucLoopIndex < (pstClassifierRule->ucIPDestinationAddressLength);
- ucLoopIndex++) {
- if ((dest_addr->ulIpv4Mask[ucLoopIndex] & ulDestIP) ==
- (dest_addr->ulIpv4Addr[ucLoopIndex] &
- dest_addr->ulIpv4Mask[ucLoopIndex]))
- return TRUE;
- }
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "Destination Ip Address Not Matched");
- return false;
-}
-
-
-/************************************************************************
-* Function - MatchTos()
-*
-* Description - Checks the TOS from the packet matches with that of queue.
-*
-* Parameters - pstClassifierRule : Pointer to the packet info structure.
-* - ucTypeOfService: TOS from the packet.
-*
-* Returns - TRUE(If address matches) else FAIL.
-**************************************************************************/
-static bool MatchTos(struct bcm_classifier_rule *pstClassifierRule,
- UCHAR ucTypeOfService)
-{
- struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
-
- if (3 != pstClassifierRule->ucIPTypeOfServiceLength)
- return TRUE;
-
- if (((pstClassifierRule->ucTosMask & ucTypeOfService) <=
- pstClassifierRule->ucTosHigh) &&
- ((pstClassifierRule->ucTosMask & ucTypeOfService) >=
- pstClassifierRule->ucTosLow))
- return TRUE;
-
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "Type Of Service Not Matched");
- return false;
-}
-
-
-/***************************************************************************
-* Function - MatchProtocol()
-*
-* Description - Checks the protocol from the packet matches with that of queue.
-*
-* Parameters - pstClassifierRule: Pointer to the packet info structure.
-* - ucProtocol : Protocol from the packet.
-*
-* Returns - TRUE(If address matches) else FAIL.
-****************************************************************************/
-bool MatchProtocol(struct bcm_classifier_rule *pstClassifierRule,
- UCHAR ucProtocol)
-{
- UCHAR ucLoopIndex = 0;
- struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
-
- if (0 == pstClassifierRule->ucProtocolLength)
- return TRUE;
- for (ucLoopIndex = 0;
- ucLoopIndex < pstClassifierRule->ucProtocolLength;
- ucLoopIndex++) {
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "Protocol:0x%X Classification Protocol:0x%X",
- ucProtocol,
- pstClassifierRule->ucProtocol[ucLoopIndex]);
- if (pstClassifierRule->ucProtocol[ucLoopIndex] == ucProtocol)
- return TRUE;
- }
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "Protocol Not Matched");
- return false;
-}
-
-
-/***********************************************************************
-* Function - MatchSrcPort()
-*
-* Description - Checks, Source port from the packet matches with that of queue.
-*
-* Parameters - pstClassifierRule: Pointer to the packet info structure.
-* - ushSrcPort : Source port from the packet.
-*
-* Returns - TRUE(If address matches) else FAIL.
-***************************************************************************/
-bool MatchSrcPort(struct bcm_classifier_rule *pstClassifierRule,
- USHORT ushSrcPort)
-{
- UCHAR ucLoopIndex = 0;
-
- struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
-
-
- if (0 == pstClassifierRule->ucSrcPortRangeLength)
- return TRUE;
- for (ucLoopIndex = 0;
- ucLoopIndex < pstClassifierRule->ucSrcPortRangeLength;
- ucLoopIndex++) {
- if (ushSrcPort <= pstClassifierRule->usSrcPortRangeHi[ucLoopIndex] &&
- ushSrcPort >= pstClassifierRule->usSrcPortRangeLo[ucLoopIndex])
- return TRUE;
- }
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "Src Port: %x Not Matched ",
- ushSrcPort);
- return false;
-}
-
-
-/***********************************************************************
-* Function - MatchDestPort()
-*
-* Description - Checks, Destination port from packet matches with that of queue.
-*
-* Parameters - pstClassifierRule: Pointer to the packet info structure.
-* - ushDestPort : Destination port from the packet.
-*
-* Returns - TRUE(If address matches) else FAIL.
-***************************************************************************/
-bool MatchDestPort(struct bcm_classifier_rule *pstClassifierRule,
- USHORT ushDestPort)
-{
- UCHAR ucLoopIndex = 0;
- struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
-
- if (0 == pstClassifierRule->ucDestPortRangeLength)
- return TRUE;
-
- for (ucLoopIndex = 0;
- ucLoopIndex < pstClassifierRule->ucDestPortRangeLength;
- ucLoopIndex++) {
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "Matching Port:0x%X 0x%X 0x%X",
- ushDestPort,
- pstClassifierRule->usDestPortRangeLo[ucLoopIndex],
- pstClassifierRule->usDestPortRangeHi[ucLoopIndex]);
-
- if (ushDestPort <= pstClassifierRule->usDestPortRangeHi[ucLoopIndex] &&
- ushDestPort >= pstClassifierRule->usDestPortRangeLo[ucLoopIndex])
- return TRUE;
- }
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "Dest Port: %x Not Matched",
- ushDestPort);
- return false;
-}
-/**
- * @ingroup tx_functions
- * Compares IPV4 Ip address and port number
- * @return Queue Index.
-*/
-static USHORT IpVersion4(struct bcm_mini_adapter *Adapter,
- struct iphdr *iphd,
- struct bcm_classifier_rule *pstClassifierRule)
-{
- struct bcm_transport_header *xprt_hdr = NULL;
- bool bClassificationSucceed = false;
-
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "========>");
-
- xprt_hdr = (struct bcm_transport_header *)((PUCHAR)iphd + sizeof(struct iphdr));
-
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "Trying to see Direction = %d %d",
- pstClassifierRule->ucDirection,
- pstClassifierRule->usVCID_Value);
-
- /* Checking classifier validity */
- if (!pstClassifierRule->bUsed ||
- pstClassifierRule->ucDirection == DOWNLINK_DIR)
- goto out;
-
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "is IPv6 check!");
- if (pstClassifierRule->bIpv6Protocol)
- goto out;
-
- /* Checking IP header parameter */
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "Trying to match Source IP Address");
- if (!MatchSrcIpAddress(pstClassifierRule, iphd->saddr))
- goto out;
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "Source IP Address Matched");
-
- if (!MatchDestIpAddress(pstClassifierRule, iphd->daddr))
- goto out;
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "Destination IP Address Matched");
-
- if (!MatchTos(pstClassifierRule, iphd->tos)) {
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "TOS Match failed\n");
- goto out;
- }
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "TOS Matched");
-
- if (!MatchProtocol(pstClassifierRule, iphd->protocol))
- goto out;
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "Protocol Matched");
-
- /*
- * if protocol is not TCP or UDP then no
- * need of comparing source port and destination port
- */
- if (iphd->protocol != TCP && iphd->protocol != UDP) {
- bClassificationSucceed = TRUE;
- goto out;
- }
- /* Checking Transport Layer Header field if present */
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "Source Port %04x",
- (iphd->protocol == UDP) ? xprt_hdr->uhdr.source : xprt_hdr->thdr.source);
-
- if (!MatchSrcPort(pstClassifierRule,
- ntohs((iphd->protocol == UDP) ?
- xprt_hdr->uhdr.source : xprt_hdr->thdr.source)))
- goto out;
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "Src Port Matched");
-
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "Destination Port %04x",
- (iphd->protocol == UDP) ? xprt_hdr->uhdr.dest :
- xprt_hdr->thdr.dest);
-
- if (!MatchDestPort(pstClassifierRule,
- ntohs((iphd->protocol == UDP) ?
- xprt_hdr->uhdr.dest : xprt_hdr->thdr.dest)))
- goto out;
- bClassificationSucceed = TRUE;
-
-out:
- if (TRUE == bClassificationSucceed) {
- INT iMatchedSFQueueIndex = 0;
-
- iMatchedSFQueueIndex =
- SearchSfid(Adapter, pstClassifierRule->ulSFID);
- if (iMatchedSFQueueIndex >= NO_OF_QUEUES)
- bClassificationSucceed = false;
- else if (false == Adapter->PackInfo[iMatchedSFQueueIndex].bActive)
- bClassificationSucceed = false;
- }
-
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "IpVersion4 <==========");
-
- return bClassificationSucceed;
-}
-
-VOID PruneQueueAllSF(struct bcm_mini_adapter *Adapter)
-{
- UINT iIndex = 0;
-
- for (iIndex = 0; iIndex < HiPriority; iIndex++) {
- if (!Adapter->PackInfo[iIndex].bValid)
- continue;
-
- PruneQueue(Adapter, iIndex);
- }
-}
-
-
-/**
- * @ingroup tx_functions
- * This function checks if the max queue size for a queue
- * is less than number of bytes in the queue. If so -
- * drops packets from the Head till the number of bytes is
- * less than or equal to max queue size for the queue.
- */
-static VOID PruneQueue(struct bcm_mini_adapter *Adapter, INT iIndex)
-{
- struct sk_buff *PacketToDrop = NULL;
- struct net_device_stats *netstats;
- struct bcm_packet_info *curr_pack_info = &Adapter->PackInfo[iIndex];
-
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- PRUNE_QUEUE,
- DBG_LVL_ALL,
- "=====> Index %d",
- iIndex);
-
- if (iIndex == HiPriority)
- return;
-
- if (!Adapter || (iIndex < 0) || (iIndex > HiPriority))
- return;
-
- /* To Store the netdevice statistic */
- netstats = &Adapter->dev->stats;
-
- spin_lock_bh(&curr_pack_info->SFQueueLock);
-
- while (1) {
-/* while((UINT)curr_pack_info->uiCurrentPacketsOnHost >
- SF_MAX_ALLOWED_PACKETS_TO_BACKUP) { */
-
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- PRUNE_QUEUE,
- DBG_LVL_ALL,
- "uiCurrentBytesOnHost:%x uiMaxBucketSize :%x",
- curr_pack_info->uiCurrentBytesOnHost,
- curr_pack_info->uiMaxBucketSize);
-
- PacketToDrop = curr_pack_info->FirstTxQueue;
-
- if (PacketToDrop == NULL)
- break;
- if ((curr_pack_info->uiCurrentPacketsOnHost <
- SF_MAX_ALLOWED_PACKETS_TO_BACKUP) &&
- ((1000*(jiffies - *((B_UINT32 *)(PacketToDrop->cb) +
- SKB_CB_LATENCY_OFFSET))/HZ) <=
- curr_pack_info->uiMaxLatency))
- break;
-
- if (PacketToDrop) {
- if (netif_msg_tx_err(Adapter))
- pr_info(PFX "%s: tx queue %d overlimit\n",
- Adapter->dev->name, iIndex);
-
- netstats->tx_dropped++;
-
- DEQUEUEPACKET(curr_pack_info->FirstTxQueue,
- curr_pack_info->LastTxQueue);
- /* update current bytes and packets count */
- curr_pack_info->uiCurrentBytesOnHost -=
- PacketToDrop->len;
- curr_pack_info->uiCurrentPacketsOnHost--;
- /* update dropped bytes and packets counts */
- curr_pack_info->uiDroppedCountBytes += PacketToDrop->len;
- curr_pack_info->uiDroppedCountPackets++;
- dev_kfree_skb(PacketToDrop);
-
- }
-
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- PRUNE_QUEUE,
- DBG_LVL_ALL,
- "Dropped Bytes:%x Dropped Packets:%x",
- curr_pack_info->uiDroppedCountBytes,
- curr_pack_info->uiDroppedCountPackets);
-
- atomic_dec(&Adapter->TotalPacketCount);
- }
-
- spin_unlock_bh(&curr_pack_info->SFQueueLock);
-
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- PRUNE_QUEUE,
- DBG_LVL_ALL,
- "TotalPacketCount:%x",
- atomic_read(&Adapter->TotalPacketCount));
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- PRUNE_QUEUE,
- DBG_LVL_ALL,
- "<=====");
-}
-
-VOID flush_all_queues(struct bcm_mini_adapter *Adapter)
-{
- INT iQIndex;
- UINT uiTotalPacketLength;
- struct sk_buff *PacketToDrop = NULL;
- struct bcm_packet_info *curr_packet_info;
-
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_OTHERS,
- DUMP_INFO,
- DBG_LVL_ALL,
- "=====>");
-
- /* down(&Adapter->data_packet_queue_lock); */
- for (iQIndex = LowPriority; iQIndex < HiPriority; iQIndex++) {
- struct net_device_stats *netstats = &Adapter->dev->stats;
-
- curr_packet_info = &Adapter->PackInfo[iQIndex];
-
- spin_lock_bh(&curr_packet_info->SFQueueLock);
- while (curr_packet_info->FirstTxQueue) {
- PacketToDrop = curr_packet_info->FirstTxQueue;
- if (PacketToDrop) {
- uiTotalPacketLength = PacketToDrop->len;
- netstats->tx_dropped++;
- } else
- uiTotalPacketLength = 0;
-
- DEQUEUEPACKET(curr_packet_info->FirstTxQueue,
- curr_packet_info->LastTxQueue);
-
- /* Free the skb */
- dev_kfree_skb(PacketToDrop);
-
- /* update current bytes and packets count */
- curr_packet_info->uiCurrentBytesOnHost -= uiTotalPacketLength;
- curr_packet_info->uiCurrentPacketsOnHost--;
-
- /* update dropped bytes and packets counts */
- curr_packet_info->uiDroppedCountBytes += uiTotalPacketLength;
- curr_packet_info->uiDroppedCountPackets++;
-
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_OTHERS,
- DUMP_INFO,
- DBG_LVL_ALL,
- "Dropped Bytes:%x Dropped Packets:%x",
- curr_packet_info->uiDroppedCountBytes,
- curr_packet_info->uiDroppedCountPackets);
- atomic_dec(&Adapter->TotalPacketCount);
- }
- spin_unlock_bh(&curr_packet_info->SFQueueLock);
- }
- /* up(&Adapter->data_packet_queue_lock); */
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_OTHERS,
- DUMP_INFO,
- DBG_LVL_ALL,
- "<=====");
-}
-
-USHORT ClassifyPacket(struct bcm_mini_adapter *Adapter, struct sk_buff *skb)
-{
- INT uiLoopIndex = 0;
- struct bcm_classifier_rule *pstClassifierRule = NULL;
- struct bcm_eth_packet_info stEthCsPktInfo;
- PVOID pvEThPayload = NULL;
- struct iphdr *pIpHeader = NULL;
- INT uiSfIndex = 0;
- USHORT usIndex = Adapter->usBestEffortQueueIndex;
- bool bFragmentedPkt = false, bClassificationSucceed = false;
- USHORT usCurrFragment = 0;
-
- struct bcm_tcp_header *pTcpHeader;
- UCHAR IpHeaderLength;
- UCHAR TcpHeaderLength;
-
- pvEThPayload = skb->data;
- *((UINT32 *) (skb->cb) + SKB_CB_TCPACK_OFFSET) = 0;
- EThCSGetPktInfo(Adapter, pvEThPayload, &stEthCsPktInfo);
-
- switch (stEthCsPktInfo.eNwpktEthFrameType) {
- case eEth802LLCFrame:
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "ClassifyPacket : 802LLCFrame\n");
- pIpHeader = pvEThPayload + sizeof(struct bcm_eth_llc_frame);
- break;
- case eEth802LLCSNAPFrame:
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "ClassifyPacket : 802LLC SNAP Frame\n");
- pIpHeader = pvEThPayload +
- sizeof(struct bcm_eth_llc_snap_frame);
- break;
- case eEth802QVLANFrame:
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "ClassifyPacket : 802.1Q VLANFrame\n");
- pIpHeader = pvEThPayload + sizeof(struct bcm_eth_q_frame);
- break;
- case eEthOtherFrame:
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "ClassifyPacket : ETH Other Frame\n");
- pIpHeader = pvEThPayload + sizeof(struct bcm_ethernet2_frame);
- break;
- default:
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "ClassifyPacket : Unrecognized ETH Frame\n");
- pIpHeader = pvEThPayload + sizeof(struct bcm_ethernet2_frame);
- break;
- }
-
- if (stEthCsPktInfo.eNwpktIPFrameType == eIPv4Packet) {
- usCurrFragment = (ntohs(pIpHeader->frag_off) & IP_OFFSET);
- if ((ntohs(pIpHeader->frag_off) & IP_MF) || usCurrFragment)
- bFragmentedPkt = TRUE;
-
- if (bFragmentedPkt) {
- /* Fragmented Packet. Get Frag Classifier Entry. */
- pstClassifierRule = GetFragIPClsEntry(Adapter,
- pIpHeader->id,
- pIpHeader->saddr);
- if (pstClassifierRule) {
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "It is next Fragmented pkt");
- bClassificationSucceed = TRUE;
- }
- if (!(ntohs(pIpHeader->frag_off) & IP_MF)) {
- /* Fragmented Last packet . Remove Frag Classifier Entry */
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "This is the last fragmented Pkt");
- DelFragIPClsEntry(Adapter,
- pIpHeader->id,
- pIpHeader->saddr);
- }
- }
- }
-
- for (uiLoopIndex = MAX_CLASSIFIERS - 1; uiLoopIndex >= 0; uiLoopIndex--) {
- if (bClassificationSucceed)
- break;
- /*
- * Iterate through all classifiers which are already in order of priority
- * to classify the packet until match found
- */
- if (false == Adapter->astClassifierTable[uiLoopIndex].bUsed) {
- bClassificationSucceed = false;
- continue;
- }
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "Adapter->PackInfo[%d].bvalid=True\n",
- uiLoopIndex);
-
- if (0 == Adapter->astClassifierTable[uiLoopIndex].ucDirection) {
- bClassificationSucceed = false; /* cannot be processed for classification. */
- continue; /* it is a down link connection */
- }
-
- pstClassifierRule = &Adapter->astClassifierTable[uiLoopIndex];
-
- uiSfIndex = SearchSfid(Adapter, pstClassifierRule->ulSFID);
- if (uiSfIndex >= NO_OF_QUEUES) {
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "Queue Not Valid. SearchSfid for this classifier Failed\n");
- continue;
- }
-
- if (Adapter->PackInfo[uiSfIndex].bEthCSSupport) {
-
- if (eEthUnsupportedFrame == stEthCsPktInfo.eNwpktEthFrameType) {
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- " ClassifyPacket : Packet Not a Valid Supported Ethernet Frame\n");
- bClassificationSucceed = false;
- continue;
- }
-
-
-
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "Performing ETH CS Classification on Classifier Rule ID : %x Service Flow ID : %lx\n",
- pstClassifierRule->uiClassifierRuleIndex,
- Adapter->PackInfo[uiSfIndex].ulSFID);
- bClassificationSucceed = EThCSClassifyPkt(Adapter,
- skb,
- &stEthCsPktInfo,
- pstClassifierRule,
- Adapter->PackInfo[uiSfIndex].bEthCSSupport);
-
- if (!bClassificationSucceed) {
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "ClassifyPacket : Ethernet CS Classification Failed\n");
- continue;
- }
- } else { /* No ETH Supported on this SF */
- if (eEthOtherFrame != stEthCsPktInfo.eNwpktEthFrameType) {
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- " ClassifyPacket : Packet Not a 802.3 Ethernet Frame... hence not allowed over non-ETH CS SF\n");
- bClassificationSucceed = false;
- continue;
- }
- }
-
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "Proceeding to IP CS Clasification");
-
- if (Adapter->PackInfo[uiSfIndex].bIPCSSupport) {
-
- if (stEthCsPktInfo.eNwpktIPFrameType == eNonIPPacket) {
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- " ClassifyPacket : Packet is Not an IP Packet\n");
- bClassificationSucceed = false;
- continue;
- }
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "Dump IP Header :\n");
- DumpFullPacket((PUCHAR)pIpHeader, 20);
-
- if (stEthCsPktInfo.eNwpktIPFrameType == eIPv4Packet)
- bClassificationSucceed = IpVersion4(Adapter,
- pIpHeader,
- pstClassifierRule);
- else if (stEthCsPktInfo.eNwpktIPFrameType == eIPv6Packet)
- bClassificationSucceed = IpVersion6(Adapter,
- pIpHeader,
- pstClassifierRule);
- }
- }
-
- if (bClassificationSucceed == TRUE) {
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "CF id : %d, SF ID is =%lu",
- pstClassifierRule->uiClassifierRuleIndex,
- pstClassifierRule->ulSFID);
-
- /* Store The matched Classifier in SKB */
- *((UINT32 *)(skb->cb)+SKB_CB_CLASSIFICATION_OFFSET) =
- pstClassifierRule->uiClassifierRuleIndex;
- if ((TCP == pIpHeader->protocol) && !bFragmentedPkt &&
- (ETH_AND_IP_HEADER_LEN + TCP_HEADER_LEN <=
- skb->len)) {
- IpHeaderLength = pIpHeader->ihl;
- pTcpHeader =
- (struct bcm_tcp_header *)(((PUCHAR)pIpHeader) +
- (IpHeaderLength*4));
- TcpHeaderLength = GET_TCP_HEADER_LEN(pTcpHeader->HeaderLength);
-
- if ((pTcpHeader->ucFlags & TCP_ACK) &&
- (ntohs(pIpHeader->tot_len) ==
- (IpHeaderLength*4)+(TcpHeaderLength*4)))
- *((UINT32 *) (skb->cb) + SKB_CB_TCPACK_OFFSET) =
- TCP_ACK;
- }
-
- usIndex = SearchSfid(Adapter, pstClassifierRule->ulSFID);
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "index is =%d",
- usIndex);
-
- /*
- * If this is the first fragment of a Fragmented pkt,
- * add this CF. Only This CF should be used for all other
- * fragment of this Pkt.
- */
- if (bFragmentedPkt && (usCurrFragment == 0)) {
- /*
- * First Fragment of Fragmented Packet.
- * Create Frag CLS Entry
- */
- struct bcm_fragmented_packet_info stFragPktInfo;
-
- stFragPktInfo.bUsed = TRUE;
- stFragPktInfo.ulSrcIpAddress = pIpHeader->saddr;
- stFragPktInfo.usIpIdentification = pIpHeader->id;
- stFragPktInfo.pstMatchedClassifierEntry =
- pstClassifierRule;
- stFragPktInfo.bOutOfOrderFragment = false;
- AddFragIPClsEntry(Adapter, &stFragPktInfo);
- }
-
-
- }
-
- return bClassificationSucceed ? usIndex : INVALID_QUEUE_INDEX;
-}
-
-static bool EthCSMatchSrcMACAddress(struct bcm_classifier_rule *pstClassifierRule,
- PUCHAR Mac)
-{
- UINT i = 0;
- struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
-
- if (pstClassifierRule->ucEthCSSrcMACLen == 0)
- return TRUE;
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "%s\n", __func__);
- for (i = 0; i < MAC_ADDRESS_SIZE; i++) {
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "SRC MAC[%x] = %x ClassifierRuleSrcMAC = %x Mask : %x\n",
- i,
- Mac[i],
- pstClassifierRule->au8EThCSSrcMAC[i],
- pstClassifierRule->au8EThCSSrcMACMask[i]);
- if ((pstClassifierRule->au8EThCSSrcMAC[i] &
- pstClassifierRule->au8EThCSSrcMACMask[i]) !=
- (Mac[i] & pstClassifierRule->au8EThCSSrcMACMask[i]))
- return false;
- }
- return TRUE;
-}
-
-static bool EthCSMatchDestMACAddress(struct bcm_classifier_rule *pstClassifierRule,
- PUCHAR Mac)
-{
- UINT i = 0;
- struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
-
- if (pstClassifierRule->ucEthCSDestMACLen == 0)
- return TRUE;
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "%s\n",
- __func__);
- for (i = 0; i < MAC_ADDRESS_SIZE; i++) {
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "SRC MAC[%x] = %x ClassifierRuleSrcMAC = %x Mask : %x\n",
- i,
- Mac[i],
- pstClassifierRule->au8EThCSDestMAC[i],
- pstClassifierRule->au8EThCSDestMACMask[i]);
- if ((pstClassifierRule->au8EThCSDestMAC[i] &
- pstClassifierRule->au8EThCSDestMACMask[i]) !=
- (Mac[i] & pstClassifierRule->au8EThCSDestMACMask[i]))
- return false;
- }
- return TRUE;
-}
-
-static bool EthCSMatchEThTypeSAP(struct bcm_classifier_rule *pstClassifierRule,
- struct sk_buff *skb,
- struct bcm_eth_packet_info *pstEthCsPktInfo)
-{
- struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
-
- if ((pstClassifierRule->ucEtherTypeLen == 0) ||
- (pstClassifierRule->au8EthCSEtherType[0] == 0))
- return TRUE;
-
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "%s SrcEtherType:%x CLS EtherType[0]:%x\n",
- __func__,
- pstEthCsPktInfo->usEtherType,
- pstClassifierRule->au8EthCSEtherType[0]);
- if (pstClassifierRule->au8EthCSEtherType[0] == 1) {
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "%s CLS EtherType[1]:%x EtherType[2]:%x\n",
- __func__,
- pstClassifierRule->au8EthCSEtherType[1],
- pstClassifierRule->au8EthCSEtherType[2]);
-
- if (memcmp(&pstEthCsPktInfo->usEtherType,
- &pstClassifierRule->au8EthCSEtherType[1],
- 2) == 0)
- return TRUE;
- else
- return false;
- }
-
- if (pstClassifierRule->au8EthCSEtherType[0] == 2) {
- if (eEth802LLCFrame != pstEthCsPktInfo->eNwpktEthFrameType)
- return false;
-
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "%s EthCS DSAP:%x EtherType[2]:%x\n",
- __func__,
- pstEthCsPktInfo->ucDSAP,
- pstClassifierRule->au8EthCSEtherType[2]);
- if (pstEthCsPktInfo->ucDSAP ==
- pstClassifierRule->au8EthCSEtherType[2])
- return TRUE;
- else
- return false;
-
- }
-
- return false;
-
-}
-
-static bool EthCSMatchVLANRules(struct bcm_classifier_rule *pstClassifierRule,
- struct sk_buff *skb,
- struct bcm_eth_packet_info *pstEthCsPktInfo)
-{
- bool bClassificationSucceed = false;
- USHORT usVLANID;
- B_UINT8 uPriority = 0;
- struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
-
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "%s CLS UserPrio:%x CLS VLANID:%x\n",
- __func__,
- ntohs(*((USHORT *)pstClassifierRule->usUserPriority)),
- pstClassifierRule->usVLANID);
-
- /*
- * In case FW didn't receive the TLV,
- * the priority field should be ignored
- */
- if (pstClassifierRule->usValidityBitMap &
- (1<<PKT_CLASSIFICATION_USER_PRIORITY_VALID)) {
- if (pstEthCsPktInfo->eNwpktEthFrameType != eEth802QVLANFrame)
- return false;
-
- uPriority = (ntohs(*(USHORT *)(skb->data +
- sizeof(struct bcm_eth_header))) &
- 0xF000) >> 13;
-
- if ((uPriority >= pstClassifierRule->usUserPriority[0]) &&
- (uPriority <=
- pstClassifierRule->usUserPriority[1]))
- bClassificationSucceed = TRUE;
-
- if (!bClassificationSucceed)
- return false;
- }
-
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "ETH CS 802.1 D User Priority Rule Matched\n");
-
- bClassificationSucceed = false;
-
- if (pstClassifierRule->usValidityBitMap &
- (1<<PKT_CLASSIFICATION_VLANID_VALID)) {
- if (pstEthCsPktInfo->eNwpktEthFrameType != eEth802QVLANFrame)
- return false;
-
- usVLANID = ntohs(*(USHORT *)(skb->data +
- sizeof(struct bcm_eth_header))) & 0xFFF;
-
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "%s Pkt VLANID %x Priority: %d\n",
- __func__,
- usVLANID,
- uPriority);
-
- if (usVLANID == ((pstClassifierRule->usVLANID & 0xFFF0) >> 4))
- bClassificationSucceed = TRUE;
-
- if (!bClassificationSucceed)
- return false;
- }
-
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "ETH CS 802.1 Q VLAN ID Rule Matched\n");
-
- return TRUE;
-}
-
-
-static bool EThCSClassifyPkt(struct bcm_mini_adapter *Adapter,
- struct sk_buff *skb,
- struct bcm_eth_packet_info *pstEthCsPktInfo,
- struct bcm_classifier_rule *pstClassifierRule,
- B_UINT8 EthCSCupport)
-{
- bool bClassificationSucceed = false;
-
- bClassificationSucceed = EthCSMatchSrcMACAddress(pstClassifierRule,
- ((struct bcm_eth_header *)(skb->data))->au8SourceAddress);
- if (!bClassificationSucceed)
- return false;
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "ETH CS SrcMAC Matched\n");
-
- bClassificationSucceed = EthCSMatchDestMACAddress(pstClassifierRule,
- ((struct bcm_eth_header *)(skb->data))->au8DestinationAddress);
- if (!bClassificationSucceed)
- return false;
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "ETH CS DestMAC Matched\n");
-
- /* classify on ETHType/802.2SAP TLV */
- bClassificationSucceed = EthCSMatchEThTypeSAP(pstClassifierRule,
- skb,
- pstEthCsPktInfo);
- if (!bClassificationSucceed)
- return false;
-
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "ETH CS EthType/802.2SAP Matched\n");
-
- /* classify on 802.1VLAN Header Parameters */
- bClassificationSucceed = EthCSMatchVLANRules(pstClassifierRule,
- skb,
- pstEthCsPktInfo);
- if (!bClassificationSucceed)
- return false;
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "ETH CS 802.1 VLAN Rules Matched\n");
-
- return bClassificationSucceed;
-}
-
-static void EThCSGetPktInfo(struct bcm_mini_adapter *Adapter,
- PVOID pvEthPayload,
- struct bcm_eth_packet_info *pstEthCsPktInfo)
-{
- USHORT u16Etype = ntohs(
- ((struct bcm_eth_header *)pvEthPayload)->u16Etype);
-
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "EthCSGetPktInfo : Eth Hdr Type : %X\n",
- u16Etype);
- if (u16Etype > 0x5dc) {
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "EthCSGetPktInfo : ETH2 Frame\n");
- /* ETH2 Frame */
- if (u16Etype == ETHERNET_FRAMETYPE_802QVLAN) {
- /* 802.1Q VLAN Header */
- pstEthCsPktInfo->eNwpktEthFrameType = eEth802QVLANFrame;
- u16Etype = ((struct bcm_eth_q_frame *)pvEthPayload)->EthType;
- /* ((ETH_CS_802_Q_FRAME*)pvEthPayload)->UserPriority */
- } else {
- pstEthCsPktInfo->eNwpktEthFrameType = eEthOtherFrame;
- u16Etype = ntohs(u16Etype);
- }
- } else {
- /* 802.2 LLC */
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "802.2 LLC Frame\n");
- pstEthCsPktInfo->eNwpktEthFrameType = eEth802LLCFrame;
- pstEthCsPktInfo->ucDSAP =
- ((struct bcm_eth_llc_frame *)pvEthPayload)->DSAP;
- if (pstEthCsPktInfo->ucDSAP == 0xAA && ((struct bcm_eth_llc_frame *)pvEthPayload)->SSAP == 0xAA) {
- /* SNAP Frame */
- pstEthCsPktInfo->eNwpktEthFrameType = eEth802LLCSNAPFrame;
- u16Etype = ((struct bcm_eth_llc_snap_frame *)pvEthPayload)->usEtherType;
- }
- }
- if (u16Etype == ETHERNET_FRAMETYPE_IPV4)
- pstEthCsPktInfo->eNwpktIPFrameType = eIPv4Packet;
- else if (u16Etype == ETHERNET_FRAMETYPE_IPV6)
- pstEthCsPktInfo->eNwpktIPFrameType = eIPv6Packet;
- else
- pstEthCsPktInfo->eNwpktIPFrameType = eNonIPPacket;
-
- pstEthCsPktInfo->usEtherType = ((struct bcm_eth_header *)pvEthPayload)->u16Etype;
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "EthCsPktInfo->eNwpktIPFrameType : %x\n",
- pstEthCsPktInfo->eNwpktIPFrameType);
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "EthCsPktInfo->eNwpktEthFrameType : %x\n",
- pstEthCsPktInfo->eNwpktEthFrameType);
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "EthCsPktInfo->usEtherType : %x\n",
- pstEthCsPktInfo->usEtherType);
-}
-
diff --git a/drivers/staging/bcm/Queue.h b/drivers/staging/bcm/Queue.h
deleted file mode 100644
index 460c0aee67f..00000000000
--- a/drivers/staging/bcm/Queue.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/*************************************
-* Queue.h
-**************************************/
-#ifndef __QUEUE_H__
-#define __QUEUE_H__
-
-
-
-#define ENQUEUEPACKET(_Head, _Tail, _Packet) \
-do { \
- if (!_Head) { \
- _Head = _Packet; \
- } \
- else { \
- (_Tail)->next = _Packet; \
- } \
- (_Packet)->next = NULL; \
- _Tail = _Packet; \
-} while (0)
-#define DEQUEUEPACKET(Head, Tail) \
-do { \
- if (Head) { \
- if (!Head->next) { \
- Tail = NULL; \
- } \
- Head = Head->next; \
- } \
-} while (0)
-#endif /* __QUEUE_H__ */
diff --git a/drivers/staging/bcm/TODO b/drivers/staging/bcm/TODO
deleted file mode 100644
index 8467f45d08a..00000000000
--- a/drivers/staging/bcm/TODO
+++ /dev/null
@@ -1,26 +0,0 @@
-This driver is barely functional in its current state.
-
-Kevin McKinney(klmckinney1@gmail.com) and Matthias Beyer(mail@beyermatthias.de)
-are currently maintaining/cleaning up this driver. Please copy us on all
-patches. More maintainers are aways welcomed.
-
-BIG:
- - existing API is (/dev/tarang) should be replaced
- Is it possible to use same API as Intel Wimax stack and
- have same user level components.
- - Qos and queue model is non-standard and inflexible.
- Use existing TC Qos?
-
-TODO:
- - support more than one board - eliminate global variables
- - remove developer debug BCM_DEBUG() macros
- add a limited number of messages through netif_msg()
- - fix non-standard kernel style
- - checkpatch warnings
- - use request firmware
- - fix use of file I/O to load config with better API
- - merge some files together?
- - cleanup/eliminate debug messages
-
-
-
diff --git a/drivers/staging/bcm/Transmit.c b/drivers/staging/bcm/Transmit.c
deleted file mode 100644
index 622a482e982..00000000000
--- a/drivers/staging/bcm/Transmit.c
+++ /dev/null
@@ -1,271 +0,0 @@
-/**
- * @file Transmit.c
- * @defgroup tx_functions Transmission
- * @section Queueing
- * @dot
- * digraph transmit1 {
- * node[shape=box]
- * edge[weight=5;color=red]
- *
- * bcm_transmit->GetPacketQueueIndex[label="IP Packet"]
- * GetPacketQueueIndex->IpVersion4[label="IPV4"]
- * GetPacketQueueIndex->IpVersion6[label="IPV6"]
- * }
- *
- * @enddot
- *
- * @section De-Queueing
- * @dot
- * digraph transmit2 {
- * node[shape=box]
- * edge[weight=5;color=red]
- * interrupt_service_thread->transmit_packets
- * tx_pkt_hdler->transmit_packets
- * transmit_packets->CheckAndSendPacketFromIndex
- * transmit_packets->UpdateTokenCount
- * CheckAndSendPacketFromIndex->PruneQueue
- * CheckAndSendPacketFromIndex->IsPacketAllowedForFlow
- * CheckAndSendPacketFromIndex->SendControlPacket[label="control pkt"]
- * SendControlPacket->bcm_cmd53
- * CheckAndSendPacketFromIndex->SendPacketFromQueue[label="data pkt"]
- * SendPacketFromQueue->SetupNextSend->bcm_cmd53
- * }
- * @enddot
- */
-
-#include "headers.h"
-
-/**
- * @ingroup ctrl_pkt_functions
- * This function dispatches control packet to the h/w interface
- * @return zero(success) or -ve value(failure)
- */
-int SendControlPacket(struct bcm_mini_adapter *Adapter, char *pControlPacket)
-{
- struct bcm_leader *PLeader = (struct bcm_leader *)pControlPacket;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "Tx");
- if (!pControlPacket || !Adapter) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL,
- "Got NULL Control Packet or Adapter");
- return STATUS_FAILURE;
- }
- if ((atomic_read(&Adapter->CurrNumFreeTxDesc) <
- ((PLeader->PLength-1)/MAX_DEVICE_DESC_SIZE)+1)) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL,
- "NO FREE DESCRIPTORS TO SEND CONTROL PACKET");
- return STATUS_FAILURE;
- }
-
- /* Update the netdevice statistics */
- /* Dump Packet */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL,
- "Leader Status: %x", PLeader->Status);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL,
- "Leader VCID: %x", PLeader->Vcid);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL,
- "Leader Length: %x", PLeader->PLength);
- if (Adapter->device_removed)
- return 0;
-
- if (netif_msg_pktdata(Adapter))
- print_hex_dump(KERN_DEBUG, PFX "tx control: ", DUMP_PREFIX_NONE,
- 16, 1, pControlPacket,
- PLeader->PLength + LEADER_SIZE, 0);
-
- Adapter->interface_transmit(Adapter->pvInterfaceAdapter,
- pControlPacket,
- (PLeader->PLength + LEADER_SIZE));
-
- atomic_dec(&Adapter->CurrNumFreeTxDesc);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL,
- "<=========");
- return STATUS_SUCCESS;
-}
-
-/**
- * @ingroup tx_functions
- * This function despatches the IP packets with the given vcid
- * to the target via the host h/w interface.
- * @return zero(success) or -ve value(failure)
- */
-int SetupNextSend(struct bcm_mini_adapter *Adapter,
- struct sk_buff *Packet, USHORT Vcid)
-{
- int status = 0;
- bool bHeaderSupressionEnabled = false;
- B_UINT16 uiClassifierRuleID;
- u16 QueueIndex = skb_get_queue_mapping(Packet);
- struct bcm_packet_info *curr_packet_info =
- &Adapter->PackInfo[QueueIndex];
- struct bcm_leader Leader = {0};
-
- if (Packet->len > MAX_DEVICE_DESC_SIZE) {
- status = STATUS_FAILURE;
- goto errExit;
- }
-
- /* Get the Classifier Rule ID */
- uiClassifierRuleID = *((UINT32 *) (Packet->cb) +
- SKB_CB_CLASSIFICATION_OFFSET);
-
- bHeaderSupressionEnabled = curr_packet_info->bHeaderSuppressionEnabled &
- Adapter->bPHSEnabled;
-
- if (Adapter->device_removed) {
- status = STATUS_FAILURE;
- goto errExit;
- }
-
- status = PHSTransmit(Adapter, &Packet, Vcid, uiClassifierRuleID,
- bHeaderSupressionEnabled,
- (UINT *)&Packet->len,
- curr_packet_info->bEthCSSupport);
-
- if (status != STATUS_SUCCESS) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL,
- "PHS Transmit failed..\n");
- goto errExit;
- }
-
- Leader.Vcid = Vcid;
-
- if (TCP_ACK == *((UINT32 *) (Packet->cb) + SKB_CB_TCPACK_OFFSET))
- Leader.Status = LEADER_STATUS_TCP_ACK;
- else
- Leader.Status = LEADER_STATUS;
-
- if (curr_packet_info->bEthCSSupport) {
- Leader.PLength = Packet->len;
- if (skb_headroom(Packet) < LEADER_SIZE) {
- status = skb_cow(Packet, LEADER_SIZE);
- if (status) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, NEXT_SEND,
- DBG_LVL_ALL,
- "bcm_transmit : Failed To Increase headRoom\n");
- goto errExit;
- }
- }
- skb_push(Packet, LEADER_SIZE);
- memcpy(Packet->data, &Leader, LEADER_SIZE);
- } else {
- Leader.PLength = Packet->len - ETH_HLEN;
- memcpy((struct bcm_leader *)skb_pull(Packet,
- (ETH_HLEN - LEADER_SIZE)),
- &Leader,
- LEADER_SIZE);
- }
-
- status = Adapter->interface_transmit(Adapter->pvInterfaceAdapter,
- Packet->data,
- (Leader.PLength + LEADER_SIZE));
- if (status) {
- ++Adapter->dev->stats.tx_errors;
- if (netif_msg_tx_err(Adapter))
- pr_info(PFX "%s: transmit error %d\n",
- Adapter->dev->name,
- status);
- } else {
- struct net_device_stats *netstats = &Adapter->dev->stats;
-
- curr_packet_info->uiTotalTxBytes += Leader.PLength;
-
- netstats->tx_bytes += Leader.PLength;
- ++netstats->tx_packets;
-
- curr_packet_info->uiCurrentTokenCount -= Leader.PLength << 3;
- curr_packet_info->uiSentBytes += (Packet->len);
- curr_packet_info->uiSentPackets++;
- curr_packet_info->NumOfPacketsSent++;
-
- atomic_dec(&curr_packet_info->uiPerSFTxResourceCount);
- curr_packet_info->uiThisPeriodSentBytes += Leader.PLength;
- }
-
- atomic_dec(&Adapter->CurrNumFreeTxDesc);
-
-errExit:
- dev_kfree_skb(Packet);
- return status;
-}
-
-static int tx_pending(struct bcm_mini_adapter *Adapter)
-{
- return (atomic_read(&Adapter->TxPktAvail)
- && MINIMUM_PENDING_DESCRIPTORS <
- atomic_read(&Adapter->CurrNumFreeTxDesc))
- || Adapter->device_removed || (1 == Adapter->downloadDDR);
-}
-
-/**
- * @ingroup tx_functions
- * Transmit thread
- */
-int tx_pkt_handler(struct bcm_mini_adapter *Adapter)
-{
- int status = 0;
-
- while (!kthread_should_stop()) {
- /* FIXME - the timeout looks like workaround
- * for racey usage of TxPktAvail
- */
- if (Adapter->LinkUpStatus)
- wait_event_timeout(Adapter->tx_packet_wait_queue,
- tx_pending(Adapter),
- msecs_to_jiffies(10));
- else
- wait_event_interruptible(Adapter->tx_packet_wait_queue,
- tx_pending(Adapter));
-
- if (Adapter->device_removed)
- break;
-
- if (Adapter->downloadDDR == 1) {
- Adapter->downloadDDR += 1;
- status = download_ddr_settings(Adapter);
- if (status)
- pr_err(PFX "DDR DOWNLOAD FAILED! %d\n", status);
- continue;
- }
-
- /* Check end point for halt/stall. */
- if (Adapter->bEndPointHalted == TRUE) {
- Bcm_clear_halt_of_endpoints(Adapter);
- Adapter->bEndPointHalted = false;
- StartInterruptUrb((struct bcm_interface_adapter *)
- (Adapter->pvInterfaceAdapter));
- }
-
- if (Adapter->LinkUpStatus && !Adapter->IdleMode) {
- if (atomic_read(&Adapter->TotalPacketCount))
- update_per_sf_desc_cnts(Adapter);
- }
-
- if (atomic_read(&Adapter->CurrNumFreeTxDesc) &&
- Adapter->LinkStatus == SYNC_UP_REQUEST &&
- !Adapter->bSyncUpRequestSent) {
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_PACKETS,
- DBG_LVL_ALL, "Calling LinkMessage");
- LinkMessage(Adapter);
- }
-
- if ((Adapter->IdleMode || Adapter->bShutStatus) &&
- atomic_read(&Adapter->TotalPacketCount)) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX,
- TX_PACKETS, DBG_LVL_ALL,
- "Device in Low Power mode...waking up");
- Adapter->usIdleModePattern = ABORT_IDLE_MODE;
- Adapter->bWakeUpDevice = TRUE;
- wake_up(&Adapter->process_rx_cntrlpkt);
- }
-
- transmit_packets(Adapter);
- atomic_set(&Adapter->TxPktAvail, 0);
- }
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL,
- "Exiting the tx thread..\n");
- Adapter->transmit_packet_thread = NULL;
- return 0;
-}
diff --git a/drivers/staging/bcm/Typedefs.h b/drivers/staging/bcm/Typedefs.h
deleted file mode 100644
index 90b3b25dd60..00000000000
--- a/drivers/staging/bcm/Typedefs.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/****************************
-* Typedefs.h
-****************************/
-#ifndef __TYPEDEFS_H__
-#define __TYPEDEFS_H__
-#define STATUS_SUCCESS 0
-#define STATUS_FAILURE -1
-
-
-#define TRUE 1
-
-
-typedef char CHAR;
-typedef int INT;
-typedef short SHORT;
-typedef long LONG;
-typedef void VOID;
-
-typedef unsigned char UCHAR;
-typedef unsigned char B_UINT8;
-typedef unsigned short USHORT;
-typedef unsigned short B_UINT16;
-typedef unsigned int UINT;
-typedef unsigned int B_UINT32;
-typedef unsigned long ULONG;
-typedef unsigned long DWORD;
-
-typedef char *PCHAR;
-typedef short *PSHORT;
-typedef int *PINT;
-typedef long *PLONG;
-typedef void *PVOID;
-
-typedef unsigned char *PUCHAR;
-typedef unsigned short *PUSHORT;
-typedef unsigned int *PUINT;
-typedef unsigned long *PULONG;
-typedef unsigned long long ULONG64;
-typedef unsigned long long LARGE_INTEGER;
-typedef unsigned int UINT32;
-#ifndef NULL
-#define NULL 0
-#endif
-
-
-#endif /* __TYPEDEFS_H__ */
-
diff --git a/drivers/staging/bcm/cntrl_SignalingInterface.h b/drivers/staging/bcm/cntrl_SignalingInterface.h
deleted file mode 100644
index 8683c2d4276..00000000000
--- a/drivers/staging/bcm/cntrl_SignalingInterface.h
+++ /dev/null
@@ -1,311 +0,0 @@
-#ifndef CNTRL_SIGNALING_INTERFACE_
-#define CNTRL_SIGNALING_INTERFACE_
-
-#define DSA_REQ 11
-#define DSA_RSP 12
-#define DSA_ACK 13
-#define DSC_REQ 14
-#define DSC_RSP 15
-#define DSC_ACK 16
-#define DSD_REQ 17
-#define DSD_RSP 18
-#define DSD_ACK 19
-#define MAX_CLASSIFIERS_IN_SF 4
-
-#define MAX_STRING_LEN 20
-#define MAX_PHS_LENGTHS 255
-#define VENDOR_PHS_PARAM_LENGTH 10
-#define MAX_NUM_ACTIVE_BS 10
-#define AUTH_TOKEN_LENGTH 10
-#define NUM_HARQ_CHANNELS 16 /* Changed from 10 to 16 to accommodate all HARQ channels */
-#define VENDOR_CLASSIFIER_PARAM_LENGTH 1 /* Changed the size to 1 byte since we dnt use it */
-#define VENDOR_SPECIF_QOS_PARAM 1
-#define VENDOR_PHS_PARAM_LENGTH 10
-#define MBS_CONTENTS_ID_LENGTH 10
-#define GLOBAL_SF_CLASSNAME_LENGTH 6
-
-#define TYPE_OF_SERVICE_LENGTH 3
-#define IP_MASKED_SRC_ADDRESS_LENGTH 32
-#define IP_MASKED_DEST_ADDRESS_LENGTH 32
-#define PROTOCOL_SRC_PORT_RANGE_LENGTH 4
-#define PROTOCOL_DEST_PORT_RANGE_LENGTH 4
-#define ETHERNET_DEST_MAC_ADDR_LENGTH 12
-#define ETHERNET_SRC_MAC_ADDR_LENGTH 12
-#define NUM_ETHERTYPE_BYTES 3
-#define NUM_IPV6_FLOWLABLE_BYTES 3
-
-struct bcm_packet_class_rules {
- /* 16bit UserPriority Of The Service Flow */
- u16 u16UserPriority;
- /* 16bit VLANID Of The Service Flow */
- u16 u16VLANID;
- /* 16bit Packet Classification RuleIndex Of The Service Flow */
- u16 u16PacketClassificationRuleIndex;
- /* 8bit Classifier Rule Priority Of The Service Flow */
- u8 u8ClassifierRulePriority;
- /* Length of IP TypeOfService field */
- u8 u8IPTypeOfServiceLength;
- /* 3bytes IP TypeOfService */
- u8 u8IPTypeOfService[TYPE_OF_SERVICE_LENGTH];
- /* Protocol used in classification of Service Flow */
- u8 u8Protocol;
- /* Length of IP Masked Source Address */
- u8 u8IPMaskedSourceAddressLength;
- /* IP Masked Source Address used in classification for the Service Flow */
- u8 u8IPMaskedSourceAddress[IP_MASKED_SRC_ADDRESS_LENGTH];
- /* Length of IP Destination Address */
- u8 u8IPDestinationAddressLength;
- /* IP Destination Address used in classification for the Service Flow */
- u8 u8IPDestinationAddress[IP_MASKED_DEST_ADDRESS_LENGTH];
- /* Length of Protocol Source Port Range */
- u8 u8ProtocolSourcePortRangeLength;
- /* Protocol Source Port Range used in the Service Flow */
- u8 u8ProtocolSourcePortRange[PROTOCOL_SRC_PORT_RANGE_LENGTH];
- /* Length of Protocol Dest Port Range */
- u8 u8ProtocolDestPortRangeLength;
- /* Protocol Dest Port Range used in the Service Flow */
- u8 u8ProtocolDestPortRange[PROTOCOL_DEST_PORT_RANGE_LENGTH];
- /* Length of Ethernet Destination MAC Address */
- u8 u8EthernetDestMacAddressLength;
- /* Ethernet Destination MAC Address used in classification of the Service Flow */
- u8 u8EthernetDestMacAddress[ETHERNET_DEST_MAC_ADDR_LENGTH];
- /* Length of Ethernet Source MAC Address */
- u8 u8EthernetSourceMACAddressLength;
- /* Ethernet Source MAC Address used in classification of the Service Flow */
- u8 u8EthernetSourceMACAddress[ETHERNET_SRC_MAC_ADDR_LENGTH];
- /* Length of Ethertype */
- u8 u8EthertypeLength;
- /* 3bytes Ethertype Of The Service Flow */
- u8 u8Ethertype[NUM_ETHERTYPE_BYTES];
- /* 8bit Associated PHSI Of The Service Flow */
- u8 u8AssociatedPHSI;
- /* Length of Vendor Specific Classifier Param length Of The Service Flow */
- u8 u8VendorSpecificClassifierParamLength;
- /* Vendor Specific Classifier Param Of The Service Flow */
- u8 u8VendorSpecificClassifierParam[VENDOR_CLASSIFIER_PARAM_LENGTH];
- /* Length Of IPv6 Flow Lable of the Service Flow */
- u8 u8IPv6FlowLableLength;
- /* IPv6 Flow Lable Of The Service Flow */
- u8 u8IPv6FlowLable[NUM_IPV6_FLOWLABLE_BYTES];
- /* Action associated with the classifier rule */
- u8 u8ClassifierActionRule;
- u16 u16ValidityBitMap;
-};
-
-struct bcm_phs_rules {
- /* 8bit PHS Index Of The Service Flow */
- u8 u8PHSI;
- /* PHSF Length Of The Service Flow */
- u8 u8PHSFLength;
- /* String of bytes containing header information to be suppressed by the sending CS and reconstructed by the receiving CS */
- u8 u8PHSF[MAX_PHS_LENGTHS];
- /* PHSM Length Of The Service Flow */
- u8 u8PHSMLength;
- /* PHS Mask for the SF */
- u8 u8PHSM[MAX_PHS_LENGTHS];
- /* 8bit Total number of bytes to be suppressed for the Service Flow */
- u8 u8PHSS;
- /* 8bit Indicates whether or not Packet Header contents need to be verified prior to suppression */
- u8 u8PHSV;
- /* Vendor Specific PHS param Length Of The Service Flow */
- u8 u8VendorSpecificPHSParamsLength;
- /* Vendor Specific PHS param Of The Service Flow */
- u8 u8VendorSpecificPHSParams[VENDOR_PHS_PARAM_LENGTH];
- u8 u8Padding[2];
-};
-
-struct bcm_convergence_types {
- /* 8bit Phs Classfier Action Of The Service Flow */
- u8 u8ClassfierDSCAction;
- /* 8bit Phs DSC Action Of The Service Flow */
- u8 u8PhsDSCAction;
- /* 16bit Padding */
- u8 u8Padding[2];
- /* Packet classification rules structure */
- struct bcm_packet_class_rules cCPacketClassificationRule;
- /* Payload header suppression rules structure */
- struct bcm_phs_rules cPhsRule;
-};
-
-struct bcm_connect_mgr_params {
- /* 32bitSFID Of The Service Flow */
- u32 u32SFID;
- /* 32bit Maximum Sustained Traffic Rate of the Service Flow */
- u32 u32MaxSustainedTrafficRate;
- /* 32bit Maximum Traffic Burst allowed for the Service Flow */
- u32 u32MaxTrafficBurst;
- /* 32bit Minimum Reserved Traffic Rate of the Service Flow */
- u32 u32MinReservedTrafficRate;
- /* 32bit Tolerated Jitter of the Service Flow */
- u32 u32ToleratedJitter;
- /* 32bit Maximum Latency of the Service Flow */
- u32 u32MaximumLatency;
- /* 16bitCID Of The Service Flow */
- u16 u16CID;
- /* 16bit SAID on which the service flow being set up shall be mapped */
- u16 u16TargetSAID;
- /* 16bit ARQ window size negotiated */
- u16 u16ARQWindowSize;
- /* 16bit Total Tx delay incl sending, receiving & processing delays */
- u16 u16ARQRetryTxTimeOut;
- /* 16bit Total Rx delay incl sending, receiving & processing delays */
- u16 u16ARQRetryRxTimeOut;
- /* 16bit ARQ block lifetime */
- u16 u16ARQBlockLifeTime;
- /* 16bit ARQ Sync loss timeout */
- u16 u16ARQSyncLossTimeOut;
- /* 16bit ARQ Purge timeout */
- u16 u16ARQRxPurgeTimeOut;
- /* TODO::Remove this once we move to a new CORR2 driver
- * brief Size of an ARQ block
- */
- u16 u16ARQBlockSize;
- /* #endif */
- /* 16bit Nominal interval b/w consecutive SDU arrivals at MAC SAP */
- u16 u16SDUInterArrivalTime;
- /* 16bit Specifies the time base for rate measurement */
- u16 u16TimeBase;
- /* 16bit Interval b/w Successive Grant oppurtunities */
- u16 u16UnsolicitedGrantInterval;
- /* 16bit Interval b/w Successive Polling grant oppurtunities */
- u16 u16UnsolicitedPollingInterval;
- /* internal var to get the overhead */
- u16 u16MacOverhead;
- /* MBS contents Identifier */
- u16 u16MBSContentsID[MBS_CONTENTS_ID_LENGTH];
- /* MBS contents Identifier length */
- u8 u8MBSContentsIDLength;
- /* ServiceClassName Length Of The Service Flow */
- u8 u8ServiceClassNameLength;
- /* 32bytes ServiceClassName Of The Service Flow */
- u8 u8ServiceClassName[32];
- /* 8bit Indicates whether or not MBS service is requested for this Serivce Flow */
- u8 u8MBSService;
- /* 8bit QOS Parameter Set specifies proper application of QoS parameters to Provisioned, Admitted and Active sets */
- u8 u8QosParamSet;
- /* 8bit Traffic Priority Of the Service Flow */
- u8 u8TrafficPriority;
- /* 8bit Uplink Grant Scheduling Type of The Service Flow */
- u8 u8ServiceFlowSchedulingType;
- /* 8bit Request transmission Policy of the Service Flow */
- u8 u8RequesttransmissionPolicy;
- /* 8bit Specifies whether SDUs for this Service flow are of FixedLength or Variable length */
- u8 u8FixedLengthVSVariableLengthSDUIndicator;
- /* 8bit Length of the SDU for a fixed length SDU service flow */
- u8 u8SDUSize;
- /* 8bit Indicates whether or not ARQ is requested for this connection */
- u8 u8ARQEnable;
- /* < 8bit Indicates whether or not data has tobe delivered in order to higher layer */
- u8 u8ARQDeliverInOrder;
- /* 8bit Receiver ARQ ACK processing time */
- u8 u8RxARQAckProcessingTime;
- /* 8bit Convergence Sublayer Specification Of The Service Flow */
- u8 u8CSSpecification;
- /* 8 bit Type of data delivery service */
- u8 u8TypeOfDataDeliveryService;
- /* 8bit Specifies whether a service flow may generate Paging */
- u8 u8PagingPreference;
- /* 8bit Indicates the MBS Zone through which the connection or virtual connection is valid */
- u8 u8MBSZoneIdentifierassignment;
- /* 8bit Specifies whether traffic on SF should generate MOB_TRF_IND to MS in sleep mode */
- u8 u8TrafficIndicationPreference;
- /* 8bit Speciifes the length of predefined Global QoS parameter set encoding for this SF */
- u8 u8GlobalServicesClassNameLength;
- /* 6 byte Speciifes the predefined Global QoS parameter set encoding for this SF */
- u8 u8GlobalServicesClassName[GLOBAL_SF_CLASSNAME_LENGTH];
- /* 8bit Indicates whether or not SN feedback is enabled for the conn */
- u8 u8SNFeedbackEnabled;
- /* Indicates the size of the Fragment Sequence Number for the connection */
- u8 u8FSNSize;
- /* 8bit Number of CIDs in active BS list */
- u8 u8CIDAllocation4activeBSsLength;
- /* CIDs of BS in the active list */
- u8 u8CIDAllocation4activeBSs[MAX_NUM_ACTIVE_BS];
- /* Specifies if PDU extended subheader should be applied on every PDU on this conn */
- u8 u8PDUSNExtendedSubheader4HarqReordering;
- /* 8bit Specifies whether the connection uses HARQ or not */
- u8 u8HARQServiceFlows;
- /* Specifies the length of Authorization token */
- u8 u8AuthTokenLength;
- /* Specifies the Authorization token */
- u8 u8AuthToken[AUTH_TOKEN_LENGTH];
- /* specifes Number of HARQ channels used to carry data length */
- u8 u8HarqChannelMappingLength;
- /* specifes HARQ channels used to carry data */
- u8 u8HARQChannelMapping[NUM_HARQ_CHANNELS];
- /* 8bit Length of Vendor Specific QoS Params */
- u8 u8VendorSpecificQoSParamLength;
- /* 1byte Vendor Specific QoS Param Of The Service Flow */
- u8 u8VendorSpecificQoSParam[VENDOR_SPECIF_QOS_PARAM];
- /* indicates total classifiers in the SF */
- u8 u8TotalClassifiers; /* < Total number of valid classifiers */
- u8 bValid; /* < Validity flag */
- u8 u8Padding; /* < Padding byte */
- /*
- * Structure for Convergence SubLayer Types with a maximum of 4 classifiers
- */
- struct bcm_convergence_types cConvergenceSLTypes[MAX_CLASSIFIERS_IN_SF];
-};
-
-struct bcm_add_request {
- u8 u8Type; /* < Type */
- u8 eConnectionDir; /* < Connection direction */
- /* brief 16 bit TID */
- u16 u16TID; /* < 16bit TID */
- /* brief 16bitCID */
- u16 u16CID; /* < 16bit CID */
- /* brief 16bitVCID */
- u16 u16VCID; /* < 16bit VCID */
- struct bcm_connect_mgr_params *psfParameterSet; /* < connection manager parameters */
-};
-
-struct bcm_add_indication {
- u8 u8Type; /* < Type */
- u8 eConnectionDir; /* < Connection Direction */
- /* brief 16 bit TID */
- u16 u16TID; /* < TID */
- /* brief 16bitCID */
- u16 u16CID; /* < 16bitCID */
- /* brief 16bitVCID */
- u16 u16VCID; /* < 16bitVCID */
- struct bcm_connect_mgr_params *psfAuthorizedSet; /* Authorized set of connection manager parameters */
- struct bcm_connect_mgr_params *psfAdmittedSet; /* Admitted set of connection manager parameters */
- struct bcm_connect_mgr_params *psfActiveSet; /* Activeset of connection manager parameters */
- u8 u8CC; /* <Confirmation Code */
- u8 u8Padd; /* < 8-bit Padding */
- u16 u16Padd; /* < 16 bit Padding */
-};
-
-struct bcm_del_request {
- u8 u8Type; /* < Type */
- u8 u8Padding; /* < Padding byte */
- u16 u16TID; /* < TID */
- /* brief 32bitSFID */
- u32 u32SFID; /* < SFID */
-};
-
-struct bcm_del_indication {
- u8 u8Type; /* < Type */
- u8 u8Padding; /* < Padding */
- u16 u16TID; /* < TID */
- /* brief 16bitCID */
- u16 u16CID; /* < CID */
- /* brief 16bitVCID */
- u16 u16VCID; /* < VCID */
- /* brief 32bitSFID */
- u32 u32SFID; /* < SFID */
- /* brief 8bit Confirmation code */
- u8 u8ConfirmationCode; /* < Confirmation code */
- u8 u8Padding1[3]; /* < 3 byte Padding */
-};
-
-struct bcm_stim_sfhostnotify {
- u32 SFID; /* SFID of the service flow */
- u16 newCID; /* the new/changed CID */
- u16 VCID; /* Get new Vcid if the flow has been made active in CID update TLV, but was inactive earlier or the orig vcid */
- u8 RetainSF; /* Indication to Host if the SF is to be retained or deleted; if TRUE-retain else delete */
- u8 QoSParamSet; /* QoS paramset of the retained SF */
- u16 u16reserved; /* For byte alignment */
-};
-
-#endif
diff --git a/drivers/staging/bcm/headers.h b/drivers/staging/bcm/headers.h
deleted file mode 100644
index a7d4af5ea9a..00000000000
--- a/drivers/staging/bcm/headers.h
+++ /dev/null
@@ -1,78 +0,0 @@
-
-/*******************************************************************
-* Headers.h
-*******************************************************************/
-#ifndef __HEADERS_H__
-#define __HEADERS_H__
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/netdevice.h>
-#include <linux/skbuff.h>
-#include <linux/socket.h>
-#include <linux/netfilter.h>
-#include <linux/netfilter_ipv4.h>
-#include <linux/if_arp.h>
-#include <linux/delay.h>
-#include <linux/spinlock.h>
-#include <linux/fs.h>
-#include <linux/file.h>
-#include <linux/string.h>
-#include <linux/etherdevice.h>
-#include <linux/wait.h>
-#include <linux/proc_fs.h>
-#include <linux/interrupt.h>
-#include <linux/stddef.h>
-#include <linux/stat.h>
-#include <linux/fcntl.h>
-#include <linux/unistd.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/pagemap.h>
-#include <linux/kthread.h>
-#include <linux/tcp.h>
-#include <linux/udp.h>
-#include <linux/usb.h>
-#include <linux/uaccess.h>
-#include <net/ip.h>
-
-#include "Typedefs.h"
-#include "Macros.h"
-#include "HostMIBSInterface.h"
-#include "cntrl_SignalingInterface.h"
-#include "PHSDefines.h"
-#include "led_control.h"
-#include "Ioctl.h"
-#include "nvm.h"
-#include "target_params.h"
-#include "Adapter.h"
-#include "CmHost.h"
-#include "DDRInit.h"
-#include "Debug.h"
-#include "IPv6ProtocolHdr.h"
-#include "PHSModule.h"
-#include "Protocol.h"
-#include "Prototypes.h"
-#include "Queue.h"
-#include "vendorspecificextn.h"
-
-#include "InterfaceMacros.h"
-#include "InterfaceAdapter.h"
-#include "InterfaceIsr.h"
-#include "InterfaceMisc.h"
-#include "InterfaceRx.h"
-#include "InterfaceTx.h"
-#include "InterfaceIdleMode.h"
-#include "InterfaceInit.h"
-
-#define DRV_NAME "beceem"
-#define DEV_NAME "tarang"
-#define DRV_DESCRIPTION "Beceem Communications Inc. WiMAX driver"
-#define DRV_COPYRIGHT "Copyright 2010. Beceem Communications Inc"
-#define DRV_VERSION "5.2.45"
-#define PFX DRV_NAME " "
-
-extern struct class *bcm_class;
-
-#endif
diff --git a/drivers/staging/bcm/hostmibs.c b/drivers/staging/bcm/hostmibs.c
deleted file mode 100644
index f9b08a5d8ce..00000000000
--- a/drivers/staging/bcm/hostmibs.c
+++ /dev/null
@@ -1,164 +0,0 @@
-/*
- * File Name: hostmibs.c
- *
- * Author: Beceem Communications Pvt. Ltd
- *
- * Abstract: This file contains the routines to copy the statistics used by
- * the driver to the Host MIBS structure and giving the same to Application.
- */
-
-#include "headers.h"
-
-INT ProcessGetHostMibs(struct bcm_mini_adapter *Adapter,
- struct bcm_host_stats_mibs *pstHostMibs)
-{
- struct bcm_phs_entry *pstServiceFlowEntry = NULL;
- struct bcm_phs_rule *pstPhsRule = NULL;
- struct bcm_phs_classifier_table *pstClassifierTable = NULL;
- struct bcm_phs_classifier_entry *pstClassifierRule = NULL;
- struct bcm_phs_extension *pDeviceExtension = &Adapter->stBCMPhsContext;
- struct bcm_mibs_host_info *host_info;
- UINT nClassifierIndex = 0;
- UINT nPhsTableIndex = 0;
- UINT nSfIndex = 0;
- UINT uiIndex = 0;
-
- if (pDeviceExtension == NULL) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, HOST_MIBS,
- DBG_LVL_ALL, "Invalid Device Extension\n");
- return STATUS_FAILURE;
- }
-
- /* Copy the classifier Table */
- for (nClassifierIndex = 0; nClassifierIndex < MAX_CLASSIFIERS;
- nClassifierIndex++) {
- if (Adapter->astClassifierTable[nClassifierIndex].bUsed == TRUE)
- memcpy(&pstHostMibs->astClassifierTable[nClassifierIndex],
- &Adapter->astClassifierTable[nClassifierIndex],
- sizeof(struct bcm_mibs_classifier_rule));
- }
-
- /* Copy the SF Table */
- for (nSfIndex = 0; nSfIndex < NO_OF_QUEUES; nSfIndex++) {
- if (Adapter->PackInfo[nSfIndex].bValid) {
- memcpy(&pstHostMibs->astSFtable[nSfIndex],
- &Adapter->PackInfo[nSfIndex],
- sizeof(struct bcm_mibs_table));
- } else {
- /* If index in not valid,
- * don't process this for the PHS table.
- * Go For the next entry.
- */
- continue;
- }
-
- /* Retrieve the SFID Entry Index for requested Service Flow */
- if (PHS_INVALID_TABLE_INDEX ==
- GetServiceFlowEntry(pDeviceExtension->
- pstServiceFlowPhsRulesTable,
- Adapter->PackInfo[nSfIndex].
- usVCID_Value, &pstServiceFlowEntry))
-
- continue;
-
- pstClassifierTable = pstServiceFlowEntry->pstClassifierTable;
-
- for (uiIndex = 0; uiIndex < MAX_PHSRULE_PER_SF; uiIndex++) {
- pstClassifierRule = &pstClassifierTable->stActivePhsRulesList[uiIndex];
-
- if (pstClassifierRule->bUsed) {
- pstPhsRule = pstClassifierRule->pstPhsRule;
-
- pstHostMibs->astPhsRulesTable[nPhsTableIndex].
- ulSFID = Adapter->PackInfo[nSfIndex].ulSFID;
-
- memcpy(&pstHostMibs->astPhsRulesTable[nPhsTableIndex].u8PHSI,
- &pstPhsRule->u8PHSI,
- sizeof(struct bcm_phs_rule));
- nPhsTableIndex++;
-
- }
-
- }
-
- }
-
- /* Copy other Host Statistics parameters */
- host_info = &pstHostMibs->stHostInfo;
- host_info->GoodTransmits = Adapter->dev->stats.tx_packets;
- host_info->GoodReceives = Adapter->dev->stats.rx_packets;
- host_info->CurrNumFreeDesc = atomic_read(&Adapter->CurrNumFreeTxDesc);
- host_info->BEBucketSize = Adapter->BEBucketSize;
- host_info->rtPSBucketSize = Adapter->rtPSBucketSize;
- host_info->TimerActive = Adapter->TimerActive;
- host_info->u32TotalDSD = Adapter->u32TotalDSD;
-
- memcpy(host_info->aTxPktSizeHist, Adapter->aTxPktSizeHist,
- sizeof(UINT32) * MIBS_MAX_HIST_ENTRIES);
- memcpy(host_info->aRxPktSizeHist, Adapter->aRxPktSizeHist,
- sizeof(UINT32) * MIBS_MAX_HIST_ENTRIES);
-
- return STATUS_SUCCESS;
-}
-
-VOID GetDroppedAppCntrlPktMibs(struct bcm_host_stats_mibs *pstHostMibs,
- struct bcm_tarang_data *pTarang)
-{
- memcpy(&(pstHostMibs->stDroppedAppCntrlMsgs),
- &(pTarang->stDroppedAppCntrlMsgs),
- sizeof(struct bcm_mibs_dropped_cntrl_msg));
-}
-
-VOID CopyMIBSExtendedSFParameters(struct bcm_mini_adapter *Adapter,
- struct bcm_connect_mgr_params *psfLocalSet,
- UINT uiSearchRuleIndex)
-{
- struct bcm_mibs_parameters *t =
- &Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable;
-
- t->wmanIfSfid = psfLocalSet->u32SFID;
- t->wmanIfCmnCpsMaxSustainedRate =
- psfLocalSet->u32MaxSustainedTrafficRate;
- t->wmanIfCmnCpsMaxTrafficBurst = psfLocalSet->u32MaxTrafficBurst;
- t->wmanIfCmnCpsMinReservedRate = psfLocalSet->u32MinReservedTrafficRate;
- t->wmanIfCmnCpsToleratedJitter = psfLocalSet->u32ToleratedJitter;
- t->wmanIfCmnCpsMaxLatency = psfLocalSet->u32MaximumLatency;
- t->wmanIfCmnCpsFixedVsVariableSduInd =
- psfLocalSet->u8FixedLengthVSVariableLengthSDUIndicator;
- t->wmanIfCmnCpsFixedVsVariableSduInd =
- ntohl(t->wmanIfCmnCpsFixedVsVariableSduInd);
- t->wmanIfCmnCpsSduSize = psfLocalSet->u8SDUSize;
- t->wmanIfCmnCpsSduSize = ntohl(t->wmanIfCmnCpsSduSize);
- t->wmanIfCmnCpsSfSchedulingType =
- psfLocalSet->u8ServiceFlowSchedulingType;
- t->wmanIfCmnCpsSfSchedulingType =
- ntohl(t->wmanIfCmnCpsSfSchedulingType);
- t->wmanIfCmnCpsArqEnable = psfLocalSet->u8ARQEnable;
- t->wmanIfCmnCpsArqEnable = ntohl(t->wmanIfCmnCpsArqEnable);
- t->wmanIfCmnCpsArqWindowSize = ntohs(psfLocalSet->u16ARQWindowSize);
- t->wmanIfCmnCpsArqWindowSize = ntohl(t->wmanIfCmnCpsArqWindowSize);
- t->wmanIfCmnCpsArqBlockLifetime =
- ntohs(psfLocalSet->u16ARQBlockLifeTime);
- t->wmanIfCmnCpsArqBlockLifetime =
- ntohl(t->wmanIfCmnCpsArqBlockLifetime);
- t->wmanIfCmnCpsArqSyncLossTimeout =
- ntohs(psfLocalSet->u16ARQSyncLossTimeOut);
- t->wmanIfCmnCpsArqSyncLossTimeout =
- ntohl(t->wmanIfCmnCpsArqSyncLossTimeout);
- t->wmanIfCmnCpsArqDeliverInOrder = psfLocalSet->u8ARQDeliverInOrder;
- t->wmanIfCmnCpsArqDeliverInOrder =
- ntohl(t->wmanIfCmnCpsArqDeliverInOrder);
- t->wmanIfCmnCpsArqRxPurgeTimeout =
- ntohs(psfLocalSet->u16ARQRxPurgeTimeOut);
- t->wmanIfCmnCpsArqRxPurgeTimeout =
- ntohl(t->wmanIfCmnCpsArqRxPurgeTimeout);
- t->wmanIfCmnCpsArqBlockSize = ntohs(psfLocalSet->u16ARQBlockSize);
- t->wmanIfCmnCpsArqBlockSize = ntohl(t->wmanIfCmnCpsArqBlockSize);
- t->wmanIfCmnCpsReqTxPolicy = psfLocalSet->u8RequesttransmissionPolicy;
- t->wmanIfCmnCpsReqTxPolicy = ntohl(t->wmanIfCmnCpsReqTxPolicy);
- t->wmanIfCmnSfCsSpecification = psfLocalSet->u8CSSpecification;
- t->wmanIfCmnSfCsSpecification = ntohl(t->wmanIfCmnSfCsSpecification);
- t->wmanIfCmnCpsTargetSaid = ntohs(psfLocalSet->u16TargetSAID);
- t->wmanIfCmnCpsTargetSaid = ntohl(t->wmanIfCmnCpsTargetSaid);
-
-}
diff --git a/drivers/staging/bcm/led_control.c b/drivers/staging/bcm/led_control.c
deleted file mode 100644
index 074fc39ed67..00000000000
--- a/drivers/staging/bcm/led_control.c
+++ /dev/null
@@ -1,952 +0,0 @@
-#include "headers.h"
-
-#define STATUS_IMAGE_CHECKSUM_MISMATCH -199
-#define EVENT_SIGNALED 1
-
-static B_UINT16 CFG_CalculateChecksum(B_UINT8 *pu8Buffer, B_UINT32 u32Size)
-{
- B_UINT16 u16CheckSum = 0;
-
- while (u32Size--) {
- u16CheckSum += (B_UINT8)~(*pu8Buffer);
- pu8Buffer++;
- }
- return u16CheckSum;
-}
-
-bool IsReqGpioIsLedInNVM(struct bcm_mini_adapter *Adapter, UINT gpios)
-{
- INT Status;
-
- Status = (Adapter->gpioBitMap & gpios) ^ gpios;
- if (Status)
- return false;
- else
- return TRUE;
-}
-
-static INT LED_Blink(struct bcm_mini_adapter *Adapter,
- UINT GPIO_Num,
- UCHAR uiLedIndex,
- ULONG timeout,
- INT num_of_time,
- enum bcm_led_events currdriverstate)
-{
- int Status = STATUS_SUCCESS;
- bool bInfinite = false;
-
- /* Check if num_of_time is -ve. If yes, blink led in infinite loop */
- if (num_of_time < 0) {
- bInfinite = TRUE;
- num_of_time = 1;
- }
- while (num_of_time) {
- if (currdriverstate == Adapter->DriverState)
- TURN_ON_LED(Adapter, GPIO_Num, uiLedIndex);
-
- /* Wait for timeout after setting on the LED */
- Status = wait_event_interruptible_timeout(
- Adapter->LEDInfo.notify_led_event,
- currdriverstate != Adapter->DriverState ||
- kthread_should_stop(),
- msecs_to_jiffies(timeout));
-
- if (kthread_should_stop()) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO,
- DBG_LVL_ALL,
- "Led thread got signal to exit..hence exiting");
- Adapter->LEDInfo.led_thread_running =
- BCM_LED_THREAD_DISABLED;
- TURN_OFF_LED(Adapter, GPIO_Num, uiLedIndex);
- Status = EVENT_SIGNALED;
- break;
- }
- if (Status) {
- TURN_OFF_LED(Adapter, GPIO_Num, uiLedIndex);
- Status = EVENT_SIGNALED;
- break;
- }
-
- TURN_OFF_LED(Adapter, GPIO_Num, uiLedIndex);
- Status = wait_event_interruptible_timeout(
- Adapter->LEDInfo.notify_led_event,
- currdriverstate != Adapter->DriverState ||
- kthread_should_stop(),
- msecs_to_jiffies(timeout));
- if (bInfinite == false)
- num_of_time--;
- }
- return Status;
-}
-
-static INT ScaleRateofTransfer(ULONG rate)
-{
- if (rate <= 3)
- return rate;
- else if ((rate > 3) && (rate <= 100))
- return 5;
- else if ((rate > 100) && (rate <= 200))
- return 6;
- else if ((rate > 200) && (rate <= 300))
- return 7;
- else if ((rate > 300) && (rate <= 400))
- return 8;
- else if ((rate > 400) && (rate <= 500))
- return 9;
- else if ((rate > 500) && (rate <= 600))
- return 10;
- else
- return MAX_NUM_OF_BLINKS;
-}
-
-static INT blink_in_normal_bandwidth(struct bcm_mini_adapter *ad,
- INT *time,
- INT *time_tx,
- INT *time_rx,
- UCHAR GPIO_Num_tx,
- UCHAR uiTxLedIndex,
- UCHAR GPIO_Num_rx,
- UCHAR uiRxLedIndex,
- enum bcm_led_events currdriverstate,
- ulong *timeout)
-{
- /*
- * Assign minimum number of blinks of
- * either Tx or Rx.
- */
- *time = (*time_tx > *time_rx ? *time_rx : *time_tx);
-
- if (*time > 0) {
- /* Blink both Tx and Rx LEDs */
- if ((LED_Blink(ad, 1 << GPIO_Num_tx, uiTxLedIndex, *timeout,
- *time, currdriverstate) == EVENT_SIGNALED) ||
- (LED_Blink(ad, 1 << GPIO_Num_rx, uiRxLedIndex, *timeout,
- *time, currdriverstate) == EVENT_SIGNALED))
- return EVENT_SIGNALED;
- }
-
- if (*time == *time_tx) {
- /* Blink pending rate of Rx */
- if (LED_Blink(ad, (1 << GPIO_Num_rx), uiRxLedIndex, *timeout,
- *time_rx - *time,
- currdriverstate) == EVENT_SIGNALED)
- return EVENT_SIGNALED;
-
- *time = *time_rx;
- } else {
- /* Blink pending rate of Tx */
- if (LED_Blink(ad, 1 << GPIO_Num_tx, uiTxLedIndex, *timeout,
- *time_tx - *time,
- currdriverstate) == EVENT_SIGNALED)
- return EVENT_SIGNALED;
-
- *time = *time_tx;
- }
-
- return 0;
-}
-
-static INT LED_Proportional_Blink(struct bcm_mini_adapter *Adapter,
- UCHAR GPIO_Num_tx,
- UCHAR uiTxLedIndex,
- UCHAR GPIO_Num_rx,
- UCHAR uiRxLedIndex,
- enum bcm_led_events currdriverstate)
-{
- /* Initial values of TX and RX packets */
- ULONG64 Initial_num_of_packts_tx = 0, Initial_num_of_packts_rx = 0;
- /* values of TX and RX packets after 1 sec */
- ULONG64 Final_num_of_packts_tx = 0, Final_num_of_packts_rx = 0;
- /* Rate of transfer of Tx and Rx in 1 sec */
- ULONG64 rate_of_transfer_tx = 0, rate_of_transfer_rx = 0;
- int Status = STATUS_SUCCESS;
- INT num_of_time = 0, num_of_time_tx = 0, num_of_time_rx = 0;
- UINT remDelay = 0;
- /* UINT GPIO_num = DISABLE_GPIO_NUM; */
- ulong timeout = 0;
-
- /* Read initial value of packets sent/received */
- Initial_num_of_packts_tx = Adapter->dev->stats.tx_packets;
- Initial_num_of_packts_rx = Adapter->dev->stats.rx_packets;
-
- /* Scale the rate of transfer to no of blinks. */
- num_of_time_tx = ScaleRateofTransfer((ULONG)rate_of_transfer_tx);
- num_of_time_rx = ScaleRateofTransfer((ULONG)rate_of_transfer_rx);
-
- while ((Adapter->device_removed == false)) {
- timeout = 50;
-
- if (EVENT_SIGNALED == blink_in_normal_bandwidth(Adapter,
- &num_of_time,
- &num_of_time_tx,
- &num_of_time_rx,
- GPIO_Num_tx,
- uiTxLedIndex,
- GPIO_Num_rx,
- uiRxLedIndex,
- currdriverstate,
- &timeout))
- return EVENT_SIGNALED;
-
-
- /*
- * If Tx/Rx rate is less than maximum blinks per second,
- * wait till delay completes to 1 second
- */
- remDelay = MAX_NUM_OF_BLINKS - num_of_time;
- if (remDelay > 0) {
- timeout = 100 * remDelay;
- Status = wait_event_interruptible_timeout(
- Adapter->LEDInfo.notify_led_event,
- currdriverstate != Adapter->DriverState
- || kthread_should_stop(),
- msecs_to_jiffies(timeout));
-
- if (kthread_should_stop()) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
- LED_DUMP_INFO, DBG_LVL_ALL,
- "Led thread got signal to exit..hence exiting");
- Adapter->LEDInfo.led_thread_running =
- BCM_LED_THREAD_DISABLED;
- return EVENT_SIGNALED;
- }
- if (Status)
- return EVENT_SIGNALED;
- }
-
- /* Turn off both Tx and Rx LEDs before next second */
- TURN_OFF_LED(Adapter, 1 << GPIO_Num_tx, uiTxLedIndex);
- TURN_OFF_LED(Adapter, 1 << GPIO_Num_rx, uiTxLedIndex);
-
- /*
- * Read the Tx & Rx packets transmission after 1 second and
- * calculate rate of transfer
- */
- Final_num_of_packts_tx = Adapter->dev->stats.tx_packets;
- Final_num_of_packts_rx = Adapter->dev->stats.rx_packets;
-
- rate_of_transfer_tx = Final_num_of_packts_tx -
- Initial_num_of_packts_tx;
- rate_of_transfer_rx = Final_num_of_packts_rx -
- Initial_num_of_packts_rx;
-
- /* Read initial value of packets sent/received */
- Initial_num_of_packts_tx = Final_num_of_packts_tx;
- Initial_num_of_packts_rx = Final_num_of_packts_rx;
-
- /* Scale the rate of transfer to no of blinks. */
- num_of_time_tx =
- ScaleRateofTransfer((ULONG)rate_of_transfer_tx);
- num_of_time_rx =
- ScaleRateofTransfer((ULONG)rate_of_transfer_rx);
-
- }
- return Status;
-}
-
-/*
- * -----------------------------------------------------------------------------
- * Procedure: ValidateDSDParamsChecksum
- *
- * Description: Reads DSD Params and validates checkusm.
- *
- * Arguments:
- * Adapter - Pointer to Adapter structure.
- * ulParamOffset - Start offset of the DSD parameter to be read and
- * validated.
- * usParamLen - Length of the DSD Parameter.
- *
- * Returns:
- * <OSAL_STATUS_CODE>
- * -----------------------------------------------------------------------------
- */
-static INT ValidateDSDParamsChecksum(struct bcm_mini_adapter *Adapter,
- ULONG ulParamOffset,
- USHORT usParamLen)
-{
- INT Status = STATUS_SUCCESS;
- PUCHAR puBuffer = NULL;
- USHORT usChksmOrg = 0;
- USHORT usChecksumCalculated = 0;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,
- "LED Thread:ValidateDSDParamsChecksum: 0x%lx 0x%X",
- ulParamOffset, usParamLen);
-
- puBuffer = kmalloc(usParamLen, GFP_KERNEL);
- if (!puBuffer) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO,
- DBG_LVL_ALL,
- "LED Thread: ValidateDSDParamsChecksum Allocation failed");
- return -ENOMEM;
-
- }
-
- /* Read the DSD data from the parameter offset. */
- if (STATUS_SUCCESS != BeceemNVMRead(Adapter, (PUINT)puBuffer,
- ulParamOffset, usParamLen)) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO,
- DBG_LVL_ALL,
- "LED Thread: ValidateDSDParamsChecksum BeceemNVMRead failed");
- Status = STATUS_IMAGE_CHECKSUM_MISMATCH;
- goto exit;
- }
-
- /* Calculate the checksum of the data read from the DSD parameter. */
- usChecksumCalculated = CFG_CalculateChecksum(puBuffer, usParamLen);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,
- "LED Thread: usCheckSumCalculated = 0x%x\n",
- usChecksumCalculated);
-
- /*
- * End of the DSD parameter will have a TWO bytes checksum stored in it.
- * Read it and compare with the calculated Checksum.
- */
- if (STATUS_SUCCESS != BeceemNVMRead(Adapter, (PUINT)&usChksmOrg,
- ulParamOffset+usParamLen, 2)) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO,
- DBG_LVL_ALL,
- "LED Thread: ValidateDSDParamsChecksum BeceemNVMRead failed");
- Status = STATUS_IMAGE_CHECKSUM_MISMATCH;
- goto exit;
- }
- usChksmOrg = ntohs(usChksmOrg);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,
- "LED Thread: usChksmOrg = 0x%x", usChksmOrg);
-
- /*
- * Compare the checksum calculated with the checksum read
- * from DSD section
- */
- if (usChecksumCalculated ^ usChksmOrg) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO,
- DBG_LVL_ALL,
- "LED Thread: ValidateDSDParamsChecksum: Checksums don't match");
- Status = STATUS_IMAGE_CHECKSUM_MISMATCH;
- goto exit;
- }
-
-exit:
- kfree(puBuffer);
- return Status;
-}
-
-
-/*
- * -----------------------------------------------------------------------------
- * Procedure: ValidateHWParmStructure
- *
- * Description: Validates HW Parameters.
- *
- * Arguments:
- * Adapter - Pointer to Adapter structure.
- * ulHwParamOffset - Start offset of the HW parameter Section to be read
- * and validated.
- *
- * Returns:
- * <OSAL_STATUS_CODE>
- * -----------------------------------------------------------------------------
- */
-static INT ValidateHWParmStructure(struct bcm_mini_adapter *Adapter,
- ULONG ulHwParamOffset)
-{
-
- INT Status = STATUS_SUCCESS;
- USHORT HwParamLen = 0;
- /*
- * Add DSD start offset to the hwParamOffset to get
- * the actual address.
- */
- ulHwParamOffset += DSD_START_OFFSET;
-
- /* Read the Length of HW_PARAM structure */
- BeceemNVMRead(Adapter, (PUINT)&HwParamLen, ulHwParamOffset, 2);
- HwParamLen = ntohs(HwParamLen);
- if (0 == HwParamLen || HwParamLen > Adapter->uiNVMDSDSize)
- return STATUS_IMAGE_CHECKSUM_MISMATCH;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,
- "LED Thread:HwParamLen = 0x%x", HwParamLen);
- Status = ValidateDSDParamsChecksum(Adapter, ulHwParamOffset,
- HwParamLen);
- return Status;
-} /* ValidateHWParmStructure() */
-
-static int ReadLEDInformationFromEEPROM(struct bcm_mini_adapter *Adapter,
- UCHAR GPIO_Array[])
-{
- int Status = STATUS_SUCCESS;
-
- ULONG dwReadValue = 0;
- USHORT usHwParamData = 0;
- USHORT usEEPROMVersion = 0;
- UCHAR ucIndex = 0;
- UCHAR ucGPIOInfo[32] = {0};
-
- BeceemNVMRead(Adapter, (PUINT)&usEEPROMVersion,
- EEPROM_VERSION_OFFSET, 2);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,
- "usEEPROMVersion: Minor:0x%X Major:0x%x",
- usEEPROMVersion & 0xFF,
- ((usEEPROMVersion >> 8) & 0xFF));
-
-
- if (((usEEPROMVersion>>8)&0xFF) < EEPROM_MAP5_MAJORVERSION) {
- BeceemNVMRead(Adapter, (PUINT)&usHwParamData,
- EEPROM_HW_PARAM_POINTER_ADDRESS, 2);
- usHwParamData = ntohs(usHwParamData);
- dwReadValue = usHwParamData;
- } else {
- /*
- * Validate Compatibility section and then read HW param
- * if compatibility section is valid.
- */
- Status = ValidateDSDParamsChecksum(Adapter,
- DSD_START_OFFSET,
- COMPATIBILITY_SECTION_LENGTH_MAP5);
-
- if (Status != STATUS_SUCCESS)
- return Status;
-
- BeceemNVMRead(Adapter, (PUINT)&dwReadValue,
- EEPROM_HW_PARAM_POINTER_ADDRRES_MAP5, 4);
- dwReadValue = ntohl(dwReadValue);
- }
-
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,
- "LED Thread: Start address of HW_PARAM structure = 0x%lx",
- dwReadValue);
-
- /*
- * Validate if the address read out is within the DSD.
- * Adapter->uiNVMDSDSize gives whole DSD size inclusive of Autoinit.
- * lower limit should be above DSD_START_OFFSET and
- * upper limit should be below (Adapter->uiNVMDSDSize-DSD_START_OFFSET)
- */
- if (dwReadValue < DSD_START_OFFSET ||
- dwReadValue > (Adapter->uiNVMDSDSize-DSD_START_OFFSET))
- return STATUS_IMAGE_CHECKSUM_MISMATCH;
-
- Status = ValidateHWParmStructure(Adapter, dwReadValue);
- if (Status)
- return Status;
-
- /*
- * Add DSD_START_OFFSET to the offset read from the EEPROM.
- * This will give the actual start HW Parameters start address.
- * To read GPIO section, add GPIO offset further.
- */
-
- dwReadValue += DSD_START_OFFSET;
- /* = start address of hw param section. */
- dwReadValue += GPIO_SECTION_START_OFFSET;
- /* = GPIO start offset within HW Param section. */
-
- /*
- * Read the GPIO values for 32 GPIOs from EEPROM and map the function
- * number to GPIO pin number to GPIO_Array
- */
- BeceemNVMRead(Adapter, (UINT *)ucGPIOInfo, dwReadValue, 32);
- for (ucIndex = 0; ucIndex < 32; ucIndex++) {
-
- switch (ucGPIOInfo[ucIndex]) {
- case RED_LED:
- GPIO_Array[RED_LED] = ucIndex;
- Adapter->gpioBitMap |= (1 << ucIndex);
- break;
- case BLUE_LED:
- GPIO_Array[BLUE_LED] = ucIndex;
- Adapter->gpioBitMap |= (1 << ucIndex);
- break;
- case YELLOW_LED:
- GPIO_Array[YELLOW_LED] = ucIndex;
- Adapter->gpioBitMap |= (1 << ucIndex);
- break;
- case GREEN_LED:
- GPIO_Array[GREEN_LED] = ucIndex;
- Adapter->gpioBitMap |= (1 << ucIndex);
- break;
- default:
- break;
- }
-
- }
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,
- "GPIO's bit map correspond to LED :0x%X",
- Adapter->gpioBitMap);
- return Status;
-}
-
-
-static int ReadConfigFileStructure(struct bcm_mini_adapter *Adapter,
- bool *bEnableThread)
-{
- int Status = STATUS_SUCCESS;
- /* Array to store GPIO numbers from EEPROM */
- UCHAR GPIO_Array[NUM_OF_LEDS+1];
- UINT uiIndex = 0;
- UINT uiNum_of_LED_Type = 0;
- PUCHAR puCFGData = NULL;
- UCHAR bData = 0;
- struct bcm_led_state_info *curr_led_state;
-
- memset(GPIO_Array, DISABLE_GPIO_NUM, NUM_OF_LEDS+1);
-
- if (!Adapter->pstargetparams || IS_ERR(Adapter->pstargetparams)) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO,
- DBG_LVL_ALL, "Target Params not Avail.\n");
- return -ENOENT;
- }
-
- /* Populate GPIO_Array with GPIO numbers for LED functions */
- /* Read the GPIO numbers from EEPROM */
- Status = ReadLEDInformationFromEEPROM(Adapter, GPIO_Array);
- if (Status == STATUS_IMAGE_CHECKSUM_MISMATCH) {
- *bEnableThread = false;
- return STATUS_SUCCESS;
- } else if (Status) {
- *bEnableThread = false;
- return Status;
- }
-
- /*
- * CONFIG file read successfully. Deallocate the memory of
- * uiFileNameBufferSize
- */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,
- "LED Thread: Config file read successfully\n");
- puCFGData = (PUCHAR) &Adapter->pstargetparams->HostDrvrConfig1;
-
- /*
- * Offset for HostDrvConfig1, HostDrvConfig2, HostDrvConfig3 which
- * will have the information of LED type, LED on state for different
- * driver state and LED blink state.
- */
-
- for (uiIndex = 0; uiIndex < NUM_OF_LEDS; uiIndex++) {
- bData = *puCFGData;
- curr_led_state = &Adapter->LEDInfo.LEDState[uiIndex];
-
- /*
- * Check Bit 8 for polarity. If it is set,
- * polarity is reverse polarity
- */
- if (bData & 0x80) {
- curr_led_state->BitPolarity = 0;
- /* unset the bit 8 */
- bData = bData & 0x7f;
- }
-
- curr_led_state->LED_Type = bData;
- if (bData <= NUM_OF_LEDS)
- curr_led_state->GPIO_Num = GPIO_Array[bData];
- else
- curr_led_state->GPIO_Num = DISABLE_GPIO_NUM;
-
- puCFGData++;
- bData = *puCFGData;
- curr_led_state->LED_On_State = bData;
- puCFGData++;
- bData = *puCFGData;
- curr_led_state->LED_Blink_State = bData;
- puCFGData++;
- }
-
- /*
- * Check if all the LED settings are disabled. If it is disabled,
- * dont launch the LED control thread.
- */
- for (uiIndex = 0; uiIndex < NUM_OF_LEDS; uiIndex++) {
- curr_led_state = &Adapter->LEDInfo.LEDState[uiIndex];
-
- if ((curr_led_state->LED_Type == DISABLE_GPIO_NUM) ||
- (curr_led_state->LED_Type == 0x7f) ||
- (curr_led_state->LED_Type == 0))
- uiNum_of_LED_Type++;
- }
- if (uiNum_of_LED_Type >= NUM_OF_LEDS)
- *bEnableThread = false;
-
- return Status;
-}
-
-/*
- * -----------------------------------------------------------------------------
- * Procedure: LedGpioInit
- *
- * Description: Initializes LED GPIOs. Makes the LED GPIOs to OUTPUT mode
- * and make the initial state to be OFF.
- *
- * Arguments:
- * Adapter - Pointer to MINI_ADAPTER structure.
- *
- * Returns: VOID
- *
- * -----------------------------------------------------------------------------
- */
-static VOID LedGpioInit(struct bcm_mini_adapter *Adapter)
-{
- UINT uiResetValue = 0;
- UINT uiIndex = 0;
- struct bcm_led_state_info *curr_led_state;
-
- /* Set all LED GPIO Mode to output mode */
- if (rdmalt(Adapter, GPIO_MODE_REGISTER, &uiResetValue,
- sizeof(uiResetValue)) < 0)
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO,
- DBG_LVL_ALL, "LED Thread: RDM Failed\n");
- for (uiIndex = 0; uiIndex < NUM_OF_LEDS; uiIndex++) {
- curr_led_state = &Adapter->LEDInfo.LEDState[uiIndex];
-
- if (curr_led_state->GPIO_Num != DISABLE_GPIO_NUM)
- uiResetValue |= (1 << curr_led_state->GPIO_Num);
-
- TURN_OFF_LED(Adapter, 1 << curr_led_state->GPIO_Num, uiIndex);
-
- }
- if (wrmalt(Adapter, GPIO_MODE_REGISTER, &uiResetValue,
- sizeof(uiResetValue)) < 0)
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO,
- DBG_LVL_ALL, "LED Thread: WRM Failed\n");
-
- Adapter->LEDInfo.bIdle_led_off = false;
-}
-
-static INT BcmGetGPIOPinInfo(struct bcm_mini_adapter *Adapter,
- UCHAR *GPIO_num_tx,
- UCHAR *GPIO_num_rx,
- UCHAR *uiLedTxIndex,
- UCHAR *uiLedRxIndex,
- enum bcm_led_events currdriverstate)
-{
- UINT uiIndex = 0;
- struct bcm_led_state_info *led_state_info;
-
- *GPIO_num_tx = DISABLE_GPIO_NUM;
- *GPIO_num_rx = DISABLE_GPIO_NUM;
-
- for (uiIndex = 0; uiIndex < NUM_OF_LEDS; uiIndex++) {
- led_state_info = &Adapter->LEDInfo.LEDState[uiIndex];
-
- if (((currdriverstate == NORMAL_OPERATION) ||
- (currdriverstate == IDLEMODE_EXIT) ||
- (currdriverstate == FW_DOWNLOAD)) &&
- (led_state_info->LED_Blink_State & currdriverstate) &&
- (led_state_info->GPIO_Num != DISABLE_GPIO_NUM)) {
- if (*GPIO_num_tx == DISABLE_GPIO_NUM) {
- *GPIO_num_tx = led_state_info->GPIO_Num;
- *uiLedTxIndex = uiIndex;
- } else {
- *GPIO_num_rx = led_state_info->GPIO_Num;
- *uiLedRxIndex = uiIndex;
- }
- } else {
- if ((led_state_info->LED_On_State & currdriverstate) &&
- (led_state_info->GPIO_Num != DISABLE_GPIO_NUM)) {
- *GPIO_num_tx = led_state_info->GPIO_Num;
- *uiLedTxIndex = uiIndex;
- }
- }
- }
- return STATUS_SUCCESS;
-}
-
-static void handle_adapter_driver_state(struct bcm_mini_adapter *ad,
- enum bcm_led_events currdriverstate,
- UCHAR GPIO_num,
- UCHAR dummyGPIONum,
- UCHAR uiLedIndex,
- UCHAR dummyIndex,
- ulong timeout,
- UINT uiResetValue,
- UINT uiIndex)
-{
- switch (ad->DriverState) {
- case DRIVER_INIT:
- currdriverstate = DRIVER_INIT;
- /* ad->DriverState; */
- BcmGetGPIOPinInfo(ad, &GPIO_num, &dummyGPIONum,
- &uiLedIndex, &dummyIndex,
- currdriverstate);
-
- if (GPIO_num != DISABLE_GPIO_NUM)
- TURN_ON_LED(ad, 1 << GPIO_num, uiLedIndex);
-
- break;
- case FW_DOWNLOAD:
- /*
- * BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS,
- * LED_DUMP_INFO, DBG_LVL_ALL,
- * "LED Thread: FW_DN_DONE called\n");
- */
- currdriverstate = FW_DOWNLOAD;
- BcmGetGPIOPinInfo(ad, &GPIO_num, &dummyGPIONum,
- &uiLedIndex, &dummyIndex,
- currdriverstate);
-
- if (GPIO_num != DISABLE_GPIO_NUM) {
- timeout = 50;
- LED_Blink(ad, 1 << GPIO_num, uiLedIndex, timeout,
- -1, currdriverstate);
- }
- break;
- case FW_DOWNLOAD_DONE:
- currdriverstate = FW_DOWNLOAD_DONE;
- BcmGetGPIOPinInfo(ad, &GPIO_num, &dummyGPIONum,
- &uiLedIndex, &dummyIndex, currdriverstate);
- if (GPIO_num != DISABLE_GPIO_NUM)
- TURN_ON_LED(ad, 1 << GPIO_num, uiLedIndex);
- break;
-
- case SHUTDOWN_EXIT:
- /*
- * no break, continue to NO_NETWORK_ENTRY
- * state as well.
- */
- case NO_NETWORK_ENTRY:
- currdriverstate = NO_NETWORK_ENTRY;
- BcmGetGPIOPinInfo(ad, &GPIO_num, &dummyGPIONum,
- &uiLedIndex, &dummyGPIONum, currdriverstate);
- if (GPIO_num != DISABLE_GPIO_NUM)
- TURN_ON_LED(ad, 1 << GPIO_num, uiLedIndex);
- break;
- case NORMAL_OPERATION:
- {
- UCHAR GPIO_num_tx = DISABLE_GPIO_NUM;
- UCHAR GPIO_num_rx = DISABLE_GPIO_NUM;
- UCHAR uiLEDTx = 0;
- UCHAR uiLEDRx = 0;
-
- currdriverstate = NORMAL_OPERATION;
- ad->LEDInfo.bIdle_led_off = false;
-
- BcmGetGPIOPinInfo(ad, &GPIO_num_tx, &GPIO_num_rx,
- &uiLEDTx, &uiLEDRx, currdriverstate);
- if ((GPIO_num_tx == DISABLE_GPIO_NUM) &&
- (GPIO_num_rx == DISABLE_GPIO_NUM)) {
- GPIO_num = DISABLE_GPIO_NUM;
- } else {
- /*
- * If single LED is selected, use same
- * for both Tx and Rx
- */
- if (GPIO_num_tx == DISABLE_GPIO_NUM) {
- GPIO_num_tx = GPIO_num_rx;
- uiLEDTx = uiLEDRx;
- } else if (GPIO_num_rx == DISABLE_GPIO_NUM) {
- GPIO_num_rx = GPIO_num_tx;
- uiLEDRx = uiLEDTx;
- }
- /*
- * Blink the LED in proportionate
- * to Tx and Rx transmissions.
- */
- LED_Proportional_Blink(ad,
- GPIO_num_tx, uiLEDTx,
- GPIO_num_rx, uiLEDRx,
- currdriverstate);
- }
- }
- break;
- case LOWPOWER_MODE_ENTER:
- currdriverstate = LOWPOWER_MODE_ENTER;
- if (DEVICE_POWERSAVE_MODE_AS_MANUAL_CLOCK_GATING ==
- ad->ulPowerSaveMode) {
- /* Turn OFF all the LED */
- uiResetValue = 0;
- for (uiIndex = 0; uiIndex < NUM_OF_LEDS; uiIndex++) {
- if (ad->LEDInfo.LEDState[uiIndex].GPIO_Num != DISABLE_GPIO_NUM)
- TURN_OFF_LED(ad,
- (1 << ad->LEDInfo.LEDState[uiIndex].GPIO_Num),
- uiIndex);
- }
-
- }
- /* Turn off LED And WAKE-UP for Sendinf IDLE mode ACK */
- ad->LEDInfo.bLedInitDone = false;
- ad->LEDInfo.bIdle_led_off = TRUE;
- wake_up(&ad->LEDInfo.idleModeSyncEvent);
- GPIO_num = DISABLE_GPIO_NUM;
- break;
- case IDLEMODE_CONTINUE:
- currdriverstate = IDLEMODE_CONTINUE;
- GPIO_num = DISABLE_GPIO_NUM;
- break;
- case IDLEMODE_EXIT:
- break;
- case DRIVER_HALT:
- currdriverstate = DRIVER_HALT;
- GPIO_num = DISABLE_GPIO_NUM;
- for (uiIndex = 0; uiIndex < NUM_OF_LEDS; uiIndex++) {
- if (ad->LEDInfo.LEDState[uiIndex].GPIO_Num !=
- DISABLE_GPIO_NUM)
- TURN_OFF_LED(ad,
- (1 << ad->LEDInfo.LEDState[uiIndex].GPIO_Num),
- uiIndex);
- }
- /* ad->DriverState = DRIVER_INIT; */
- break;
- case LED_THREAD_INACTIVE:
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, LED_DUMP_INFO,
- DBG_LVL_ALL, "InActivating LED thread...");
- currdriverstate = LED_THREAD_INACTIVE;
- ad->LEDInfo.led_thread_running =
- BCM_LED_THREAD_RUNNING_INACTIVELY;
- ad->LEDInfo.bLedInitDone = false;
- /* disable ALL LED */
- for (uiIndex = 0; uiIndex < NUM_OF_LEDS; uiIndex++) {
- if (ad->LEDInfo.LEDState[uiIndex].GPIO_Num !=
- DISABLE_GPIO_NUM)
- TURN_OFF_LED(ad,
- (1 << ad->LEDInfo.LEDState[uiIndex].GPIO_Num),
- uiIndex);
- }
- break;
- case LED_THREAD_ACTIVE:
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, LED_DUMP_INFO,
- DBG_LVL_ALL, "Activating LED thread again...");
- if (ad->LinkUpStatus == false)
- ad->DriverState = NO_NETWORK_ENTRY;
- else
- ad->DriverState = NORMAL_OPERATION;
-
- ad->LEDInfo.led_thread_running =
- BCM_LED_THREAD_RUNNING_ACTIVELY;
- break;
- /* return; */
- default:
- break;
- }
-}
-
-static VOID LEDControlThread(struct bcm_mini_adapter *Adapter)
-{
- UINT uiIndex = 0;
- UCHAR GPIO_num = 0;
- UCHAR uiLedIndex = 0;
- UINT uiResetValue = 0;
- enum bcm_led_events currdriverstate = 0;
- ulong timeout = 0;
-
- INT Status = 0;
-
- UCHAR dummyGPIONum = 0;
- UCHAR dummyIndex = 0;
-
- /* currdriverstate = Adapter->DriverState; */
- Adapter->LEDInfo.bIdleMode_tx_from_host = false;
-
- /*
- * Wait till event is triggered
- *
- * wait_event(Adapter->LEDInfo.notify_led_event,
- * currdriverstate!= Adapter->DriverState);
- */
-
- GPIO_num = DISABLE_GPIO_NUM;
-
- while (TRUE) {
- /* Wait till event is triggered */
- if ((GPIO_num == DISABLE_GPIO_NUM)
- ||
- ((currdriverstate != FW_DOWNLOAD) &&
- (currdriverstate != NORMAL_OPERATION) &&
- (currdriverstate != LOWPOWER_MODE_ENTER))
- ||
- (currdriverstate == LED_THREAD_INACTIVE))
- Status = wait_event_interruptible(
- Adapter->LEDInfo.notify_led_event,
- currdriverstate != Adapter->DriverState
- || kthread_should_stop());
-
- if (kthread_should_stop() || Adapter->device_removed) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO,
- DBG_LVL_ALL,
- "Led thread got signal to exit..hence exiting");
- Adapter->LEDInfo.led_thread_running =
- BCM_LED_THREAD_DISABLED;
- TURN_OFF_LED(Adapter, 1 << GPIO_num, uiLedIndex);
- return; /* STATUS_FAILURE; */
- }
-
- if (GPIO_num != DISABLE_GPIO_NUM)
- TURN_OFF_LED(Adapter, 1 << GPIO_num, uiLedIndex);
-
- if (Adapter->LEDInfo.bLedInitDone == false) {
- LedGpioInit(Adapter);
- Adapter->LEDInfo.bLedInitDone = TRUE;
- }
-
- handle_adapter_driver_state(Adapter,
- currdriverstate,
- GPIO_num,
- dummyGPIONum,
- uiLedIndex,
- dummyIndex,
- timeout,
- uiResetValue,
- uiIndex
- );
- }
- Adapter->LEDInfo.led_thread_running = BCM_LED_THREAD_DISABLED;
-}
-
-int InitLedSettings(struct bcm_mini_adapter *Adapter)
-{
- int Status = STATUS_SUCCESS;
- bool bEnableThread = TRUE;
- UCHAR uiIndex = 0;
-
- /*
- * Initially set BitPolarity to normal polarity. The bit 8 of LED type
- * is used to change the polarity of the LED.
- */
-
- for (uiIndex = 0; uiIndex < NUM_OF_LEDS; uiIndex++)
- Adapter->LEDInfo.LEDState[uiIndex].BitPolarity = 1;
-
- /*
- * Read the LED settings of CONFIG file and map it
- * to GPIO numbers in EEPROM
- */
- Status = ReadConfigFileStructure(Adapter, &bEnableThread);
- if (STATUS_SUCCESS != Status) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO,
- DBG_LVL_ALL,
- "LED Thread: FAILED in ReadConfigFileStructure\n");
- return Status;
- }
-
- if (Adapter->LEDInfo.led_thread_running) {
- if (bEnableThread) {
- ;
- } else {
- Adapter->DriverState = DRIVER_HALT;
- wake_up(&Adapter->LEDInfo.notify_led_event);
- Adapter->LEDInfo.led_thread_running =
- BCM_LED_THREAD_DISABLED;
- }
-
- } else if (bEnableThread) {
- /* Create secondary thread to handle the LEDs */
- init_waitqueue_head(&Adapter->LEDInfo.notify_led_event);
- init_waitqueue_head(&Adapter->LEDInfo.idleModeSyncEvent);
- Adapter->LEDInfo.led_thread_running =
- BCM_LED_THREAD_RUNNING_ACTIVELY;
- Adapter->LEDInfo.bIdle_led_off = false;
- Adapter->LEDInfo.led_cntrl_threadid =
- kthread_run((int (*)(void *)) LEDControlThread,
- Adapter, "led_control_thread");
- if (IS_ERR(Adapter->LEDInfo.led_cntrl_threadid)) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO,
- DBG_LVL_ALL,
- "Not able to spawn Kernel Thread\n");
- Adapter->LEDInfo.led_thread_running =
- BCM_LED_THREAD_DISABLED;
- return PTR_ERR(Adapter->LEDInfo.led_cntrl_threadid);
- }
- }
- return Status;
-}
diff --git a/drivers/staging/bcm/led_control.h b/drivers/staging/bcm/led_control.h
deleted file mode 100644
index 1b24bf4658a..00000000000
--- a/drivers/staging/bcm/led_control.h
+++ /dev/null
@@ -1,84 +0,0 @@
-#ifndef _LED_CONTROL_H
-#define _LED_CONTROL_H
-
-#define NUM_OF_LEDS 4
-#define DSD_START_OFFSET 0x0200
-#define EEPROM_VERSION_OFFSET 0x020E
-#define EEPROM_HW_PARAM_POINTER_ADDRESS 0x0218
-#define EEPROM_HW_PARAM_POINTER_ADDRRES_MAP5 0x0220
-#define GPIO_SECTION_START_OFFSET 0x03
-#define COMPATIBILITY_SECTION_LENGTH 42
-#define COMPATIBILITY_SECTION_LENGTH_MAP5 84
-#define EEPROM_MAP5_MAJORVERSION 5
-#define EEPROM_MAP5_MINORVERSION 0
-#define MAX_NUM_OF_BLINKS 10
-#define NUM_OF_GPIO_PINS 16
-#define DISABLE_GPIO_NUM 0xFF
-#define EVENT_SIGNALED 1
-#define MAX_FILE_NAME_BUFFER_SIZE 100
-
-#define TURN_ON_LED(ad, GPIO, index) do { \
- unsigned int gpio_val = GPIO; \
- (ad->LEDInfo.LEDState[index].BitPolarity == 1) ? \
- wrmaltWithLock(ad, BCM_GPIO_OUTPUT_SET_REG, &gpio_val, sizeof(gpio_val)) : \
- wrmaltWithLock(ad, BCM_GPIO_OUTPUT_CLR_REG, &gpio_val, sizeof(gpio_val)); \
- } while (0)
-
-#define TURN_OFF_LED(ad, GPIO, index) do { \
- unsigned int gpio_val = GPIO; \
- (ad->LEDInfo.LEDState[index].BitPolarity == 1) ? \
- wrmaltWithLock(ad, BCM_GPIO_OUTPUT_CLR_REG, &gpio_val, sizeof(gpio_val)) : \
- wrmaltWithLock(ad, BCM_GPIO_OUTPUT_SET_REG, &gpio_val, sizeof(gpio_val)); \
- } while (0)
-
-enum bcm_led_colors {
- RED_LED = 1,
- BLUE_LED = 2,
- YELLOW_LED = 3,
- GREEN_LED = 4
-};
-
-enum bcm_led_events {
- SHUTDOWN_EXIT = 0x00,
- DRIVER_INIT = 0x1,
- FW_DOWNLOAD = 0x2,
- FW_DOWNLOAD_DONE = 0x4,
- NO_NETWORK_ENTRY = 0x8,
- NORMAL_OPERATION = 0x10,
- LOWPOWER_MODE_ENTER = 0x20,
- IDLEMODE_CONTINUE = 0x40,
- IDLEMODE_EXIT = 0x80,
- LED_THREAD_INACTIVE = 0x100, /* Makes the LED thread Inactivce. It wil be equivallent to putting the thread on hold. */
- LED_THREAD_ACTIVE = 0x200, /* Makes the LED Thread Active back. */
- DRIVER_HALT = 0xff
-}; /* Enumerated values of different driver states */
-
-/*
- * Structure which stores the information of different LED types
- * and corresponding LED state information of driver states
- */
-struct bcm_led_state_info {
- unsigned char LED_Type; /* specify GPIO number - use 0xFF if not used */
- unsigned char LED_On_State; /* Bits set or reset for different states */
- unsigned char LED_Blink_State; /* Bits set or reset for blinking LEDs for different states */
- unsigned char GPIO_Num;
- unsigned char BitPolarity; /* To represent whether H/W is normal polarity or reverse polarity */
-};
-
-struct bcm_led_info {
- struct bcm_led_state_info LEDState[NUM_OF_LEDS];
- bool bIdleMode_tx_from_host; /* Variable to notify whether driver came out from idlemode due to Host or target */
- bool bIdle_led_off;
- wait_queue_head_t notify_led_event;
- wait_queue_head_t idleModeSyncEvent;
- struct task_struct *led_cntrl_threadid;
- int led_thread_running;
- bool bLedInitDone;
-};
-
-/* LED Thread state. */
-#define BCM_LED_THREAD_DISABLED 0 /* LED Thread is not running. */
-#define BCM_LED_THREAD_RUNNING_ACTIVELY 1 /* LED thread is running. */
-#define BCM_LED_THREAD_RUNNING_INACTIVELY 2 /* LED thread has been put on hold */
-
-#endif
diff --git a/drivers/staging/bcm/nvm.c b/drivers/staging/bcm/nvm.c
deleted file mode 100644
index ce09473fbb1..00000000000
--- a/drivers/staging/bcm/nvm.c
+++ /dev/null
@@ -1,4661 +0,0 @@
-#include "headers.h"
-
-#define DWORD unsigned int
-
-static int BcmDoChipSelect(struct bcm_mini_adapter *Adapter,
- unsigned int offset);
-static int BcmGetActiveDSD(struct bcm_mini_adapter *Adapter);
-static int BcmGetActiveISO(struct bcm_mini_adapter *Adapter);
-static unsigned int BcmGetEEPROMSize(struct bcm_mini_adapter *Adapter);
-static int BcmGetFlashCSInfo(struct bcm_mini_adapter *Adapter);
-static unsigned int BcmGetFlashSectorSize(struct bcm_mini_adapter *Adapter,
- unsigned int FlashSectorSizeSig,
- unsigned int FlashSectorSize);
-
-static VOID BcmValidateNvmType(struct bcm_mini_adapter *Adapter);
-static int BcmGetNvmSize(struct bcm_mini_adapter *Adapter);
-static unsigned int BcmGetFlashSize(struct bcm_mini_adapter *Adapter);
-static enum bcm_nvm_type BcmGetNvmType(struct bcm_mini_adapter *Adapter);
-
-static int BcmGetSectionValEndOffset(struct bcm_mini_adapter *Adapter,
- enum bcm_flash2x_section_val eFlash2xSectionVal);
-
-static B_UINT8 IsOffsetWritable(struct bcm_mini_adapter *Adapter,
- unsigned int uiOffset);
-static int IsSectionWritable(struct bcm_mini_adapter *Adapter,
- enum bcm_flash2x_section_val Section);
-static int IsSectionExistInVendorInfo(struct bcm_mini_adapter *Adapter,
- enum bcm_flash2x_section_val section);
-
-static int ReadDSDPriority(struct bcm_mini_adapter *Adapter,
- enum bcm_flash2x_section_val dsd);
-static int ReadDSDSignature(struct bcm_mini_adapter *Adapter,
- enum bcm_flash2x_section_val dsd);
-static int ReadISOPriority(struct bcm_mini_adapter *Adapter,
- enum bcm_flash2x_section_val iso);
-static int ReadISOSignature(struct bcm_mini_adapter *Adapter,
- enum bcm_flash2x_section_val iso);
-
-static int CorruptDSDSig(struct bcm_mini_adapter *Adapter,
- enum bcm_flash2x_section_val eFlash2xSectionVal);
-static int CorruptISOSig(struct bcm_mini_adapter *Adapter,
- enum bcm_flash2x_section_val eFlash2xSectionVal);
-static int SaveHeaderIfPresent(struct bcm_mini_adapter *Adapter,
- PUCHAR pBuff,
- unsigned int uiSectAlignAddr);
-static int WriteToFlashWithoutSectorErase(struct bcm_mini_adapter *Adapter,
- PUINT pBuff,
- enum bcm_flash2x_section_val eFlash2xSectionVal,
- unsigned int uiOffset,
- unsigned int uiNumBytes);
-static enum bcm_flash2x_section_val getHighestPriDSD(struct bcm_mini_adapter *Adapter);
-static enum bcm_flash2x_section_val getHighestPriISO(struct bcm_mini_adapter *Adapter);
-
-static int BeceemFlashBulkRead(
- struct bcm_mini_adapter *Adapter,
- PUINT pBuffer,
- unsigned int uiOffset,
- unsigned int uiNumBytes);
-
-static int BeceemFlashBulkWrite(
- struct bcm_mini_adapter *Adapter,
- PUINT pBuffer,
- unsigned int uiOffset,
- unsigned int uiNumBytes,
- bool bVerify);
-
-static int GetFlashBaseAddr(struct bcm_mini_adapter *Adapter);
-
-static int ReadBeceemEEPROMBulk(struct bcm_mini_adapter *Adapter, unsigned int dwAddress, unsigned int *pdwData, unsigned int dwNumData);
-
-/* Procedure: ReadEEPROMStatusRegister
- *
- * Description: Reads the standard EEPROM Status Register.
- *
- * Arguments:
- * Adapter - ptr to Adapter object instance
- * Returns:
- * OSAL_STATUS_CODE
- */
-static UCHAR ReadEEPROMStatusRegister(struct bcm_mini_adapter *Adapter)
-{
- UCHAR uiData = 0;
- DWORD dwRetries = MAX_EEPROM_RETRIES * RETRIES_PER_DELAY;
- unsigned int uiStatus = 0;
- unsigned int value = 0;
- unsigned int value1 = 0;
-
- /* Read the EEPROM status register */
- value = EEPROM_READ_STATUS_REGISTER;
- wrmalt(Adapter, EEPROM_CMDQ_SPI_REG, &value, sizeof(value));
-
- while (dwRetries != 0) {
- value = 0;
- uiStatus = 0;
- rdmalt(Adapter, EEPROM_SPI_Q_STATUS1_REG, &uiStatus, sizeof(uiStatus));
- if (Adapter->device_removed == TRUE) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Modem has got removed hence exiting....");
- break;
- }
-
- /* Wait for Avail bit to be set. */
- if ((uiStatus & EEPROM_READ_DATA_AVAIL) != 0) {
- /* Clear the Avail/Full bits - which ever is set. */
- value = uiStatus & (EEPROM_READ_DATA_AVAIL | EEPROM_READ_DATA_FULL);
- wrmalt(Adapter, EEPROM_SPI_Q_STATUS1_REG, &value, sizeof(value));
-
- value = 0;
- rdmalt(Adapter, EEPROM_READ_DATAQ_REG, &value, sizeof(value));
- uiData = (UCHAR)value;
-
- break;
- }
-
- dwRetries--;
- if (dwRetries == 0) {
- rdmalt(Adapter, EEPROM_SPI_Q_STATUS1_REG, &value, sizeof(value));
- rdmalt(Adapter, EEPROM_SPI_Q_STATUS_REG, &value1, sizeof(value1));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "0x3004 = %x 0x3008 = %x, retries = %d failed.\n", value, value1, MAX_EEPROM_RETRIES * RETRIES_PER_DELAY);
- return uiData;
- }
- if (!(dwRetries%RETRIES_PER_DELAY))
- udelay(1000);
- uiStatus = 0;
- }
- return uiData;
-} /* ReadEEPROMStatusRegister */
-
-/*
- * Procedure: ReadBeceemEEPROMBulk
- *
- * Description: This routine reads 16Byte data from EEPROM
- *
- * Arguments:
- * Adapter - ptr to Adapter object instance
- * dwAddress - EEPROM Offset to read the data from.
- * pdwData - Pointer to double word where data needs to be stored in. // dwNumWords - Number of words. Valid values are 4 ONLY.
- *
- * Returns:
- * OSAL_STATUS_CODE:
- */
-
-static int ReadBeceemEEPROMBulk(struct bcm_mini_adapter *Adapter,
- DWORD dwAddress,
- DWORD *pdwData,
- DWORD dwNumWords)
-{
- DWORD dwIndex = 0;
- DWORD dwRetries = MAX_EEPROM_RETRIES * RETRIES_PER_DELAY;
- unsigned int uiStatus = 0;
- unsigned int value = 0;
- unsigned int value1 = 0;
- UCHAR *pvalue;
-
- /* Flush the read and cmd queue. */
- value = (EEPROM_READ_QUEUE_FLUSH | EEPROM_CMD_QUEUE_FLUSH);
- wrmalt(Adapter, SPI_FLUSH_REG, &value, sizeof(value));
- value = 0;
- wrmalt(Adapter, SPI_FLUSH_REG, &value, sizeof(value));
-
- /* Clear the Avail/Full bits. */
- value = (EEPROM_READ_DATA_AVAIL | EEPROM_READ_DATA_FULL);
- wrmalt(Adapter, EEPROM_SPI_Q_STATUS1_REG, &value, sizeof(value));
-
- value = dwAddress | ((dwNumWords == 4) ? EEPROM_16_BYTE_PAGE_READ : EEPROM_4_BYTE_PAGE_READ);
- wrmalt(Adapter, EEPROM_CMDQ_SPI_REG, &value, sizeof(value));
-
- while (dwRetries != 0) {
- uiStatus = 0;
- rdmalt(Adapter, EEPROM_SPI_Q_STATUS1_REG, &uiStatus, sizeof(uiStatus));
- if (Adapter->device_removed == TRUE) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Modem has got Removed.hence exiting from loop...");
- return -ENODEV;
- }
-
- /* If we are reading 16 bytes we want to be sure that the queue
- * is full before we read. In the other cases we are ok if the
- * queue has data available
- */
- if (dwNumWords == 4) {
- if ((uiStatus & EEPROM_READ_DATA_FULL) != 0) {
- /* Clear the Avail/Full bits - which ever is set. */
- value = (uiStatus & (EEPROM_READ_DATA_AVAIL | EEPROM_READ_DATA_FULL));
- wrmalt(Adapter, EEPROM_SPI_Q_STATUS1_REG, &value, sizeof(value));
- break;
- }
- } else if (dwNumWords == 1) {
- if ((uiStatus & EEPROM_READ_DATA_AVAIL) != 0) {
- /* We just got Avail and we have to read 32bits so we
- * need this sleep for Cardbus kind of devices.
- */
- if (Adapter->chip_id == 0xBECE0210)
- udelay(800);
-
- /* Clear the Avail/Full bits - which ever is set. */
- value = (uiStatus & (EEPROM_READ_DATA_AVAIL | EEPROM_READ_DATA_FULL));
- wrmalt(Adapter, EEPROM_SPI_Q_STATUS1_REG, &value, sizeof(value));
- break;
- }
- }
-
- uiStatus = 0;
-
- dwRetries--;
- if (dwRetries == 0) {
- value = 0;
- value1 = 0;
- rdmalt(Adapter, EEPROM_SPI_Q_STATUS1_REG, &value, sizeof(value));
- rdmalt(Adapter, EEPROM_SPI_Q_STATUS_REG, &value1, sizeof(value1));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "dwNumWords %d 0x3004 = %x 0x3008 = %x retries = %d failed.\n",
- dwNumWords, value, value1, MAX_EEPROM_RETRIES * RETRIES_PER_DELAY);
- return STATUS_FAILURE;
- }
-
- if (!(dwRetries%RETRIES_PER_DELAY))
- udelay(1000);
- }
-
- for (dwIndex = 0; dwIndex < dwNumWords; dwIndex++) {
- /* We get only a byte at a time - from LSB to MSB. We shift it into an integer. */
- pvalue = (PUCHAR)(pdwData + dwIndex);
-
- value = 0;
- rdmalt(Adapter, EEPROM_READ_DATAQ_REG, &value, sizeof(value));
-
- pvalue[0] = value;
-
- value = 0;
- rdmalt(Adapter, EEPROM_READ_DATAQ_REG, &value, sizeof(value));
-
- pvalue[1] = value;
-
- value = 0;
- rdmalt(Adapter, EEPROM_READ_DATAQ_REG, &value, sizeof(value));
-
- pvalue[2] = value;
-
- value = 0;
- rdmalt(Adapter, EEPROM_READ_DATAQ_REG, &value, sizeof(value));
-
- pvalue[3] = value;
- }
-
- return STATUS_SUCCESS;
-} /* ReadBeceemEEPROMBulk() */
-
-/*
- * Procedure: ReadBeceemEEPROM
- *
- * Description: This routine reads 4 data from EEPROM. It uses 1 or 2 page
- * reads to do this operation.
- *
- * Arguments:
- * Adapter - ptr to Adapter object instance
- * uiOffset - EEPROM Offset to read the data from.
- * pBuffer - Pointer to word where data needs to be stored in.
- *
- * Returns:
- * OSAL_STATUS_CODE:
- */
-
-int ReadBeceemEEPROM(struct bcm_mini_adapter *Adapter,
- DWORD uiOffset,
- DWORD *pBuffer)
-{
- unsigned int uiData[8] = {0};
- unsigned int uiByteOffset = 0;
- unsigned int uiTempOffset = 0;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, " ====> ");
-
- uiTempOffset = uiOffset - (uiOffset % MAX_RW_SIZE);
- uiByteOffset = uiOffset - uiTempOffset;
-
- ReadBeceemEEPROMBulk(Adapter, uiTempOffset, (PUINT)&uiData[0], 4);
-
- /* A word can overlap at most over 2 pages. In that case we read the
- * next page too.
- */
- if (uiByteOffset > 12)
- ReadBeceemEEPROMBulk(Adapter, uiTempOffset + MAX_RW_SIZE, (PUINT)&uiData[4], 4);
-
- memcpy((PUCHAR)pBuffer, (((PUCHAR)&uiData[0]) + uiByteOffset), 4);
-
- return STATUS_SUCCESS;
-} /* ReadBeceemEEPROM() */
-
-int ReadMacAddressFromNVM(struct bcm_mini_adapter *Adapter)
-{
- int Status;
- unsigned char puMacAddr[6];
-
- Status = BeceemNVMRead(Adapter,
- (PUINT)&puMacAddr[0],
- INIT_PARAMS_1_MACADDRESS_ADDRESS,
- MAC_ADDRESS_SIZE);
-
- if (Status == STATUS_SUCCESS)
- memcpy(Adapter->dev->dev_addr, puMacAddr, MAC_ADDRESS_SIZE);
-
- return Status;
-}
-
-/*
- * Procedure: BeceemEEPROMBulkRead
- *
- * Description: Reads the EEPROM and returns the Data.
- *
- * Arguments:
- * Adapter - ptr to Adapter object instance
- * pBuffer - Buffer to store the data read from EEPROM
- * uiOffset - Offset of EEPROM from where data should be read
- * uiNumBytes - Number of bytes to be read from the EEPROM.
- *
- * Returns:
- * OSAL_STATUS_SUCCESS - if EEPROM read is successful.
- * <FAILURE> - if failed.
- */
-
-int BeceemEEPROMBulkRead(struct bcm_mini_adapter *Adapter,
- PUINT pBuffer,
- unsigned int uiOffset,
- unsigned int uiNumBytes)
-{
- unsigned int uiData[4] = {0};
- /* unsigned int uiAddress = 0; */
- unsigned int uiBytesRemaining = uiNumBytes;
- unsigned int uiIndex = 0;
- unsigned int uiTempOffset = 0;
- unsigned int uiExtraBytes = 0;
- unsigned int uiFailureRetries = 0;
- PUCHAR pcBuff = (PUCHAR)pBuffer;
-
- if (uiOffset % MAX_RW_SIZE && uiBytesRemaining) {
- uiTempOffset = uiOffset - (uiOffset % MAX_RW_SIZE);
- uiExtraBytes = uiOffset - uiTempOffset;
- ReadBeceemEEPROMBulk(Adapter, uiTempOffset, (PUINT)&uiData[0], 4);
- if (uiBytesRemaining >= (MAX_RW_SIZE - uiExtraBytes)) {
- memcpy(pBuffer, (((PUCHAR)&uiData[0]) + uiExtraBytes), MAX_RW_SIZE - uiExtraBytes);
- uiBytesRemaining -= (MAX_RW_SIZE - uiExtraBytes);
- uiIndex += (MAX_RW_SIZE - uiExtraBytes);
- uiOffset += (MAX_RW_SIZE - uiExtraBytes);
- } else {
- memcpy(pBuffer, (((PUCHAR)&uiData[0]) + uiExtraBytes), uiBytesRemaining);
- uiIndex += uiBytesRemaining;
- uiOffset += uiBytesRemaining;
- uiBytesRemaining = 0;
- }
- }
-
- while (uiBytesRemaining && uiFailureRetries != 128) {
- if (Adapter->device_removed)
- return -1;
-
- if (uiBytesRemaining >= MAX_RW_SIZE) {
- /* For the requests more than or equal to 16 bytes, use bulk
- * read function to make the access faster.
- * We read 4 Dwords of data
- */
- if (ReadBeceemEEPROMBulk(Adapter, uiOffset, &uiData[0], 4) == 0) {
- memcpy(pcBuff + uiIndex, &uiData[0], MAX_RW_SIZE);
- uiOffset += MAX_RW_SIZE;
- uiBytesRemaining -= MAX_RW_SIZE;
- uiIndex += MAX_RW_SIZE;
- } else {
- uiFailureRetries++;
- mdelay(3); /* sleep for a while before retry... */
- }
- } else if (uiBytesRemaining >= 4) {
- if (ReadBeceemEEPROM(Adapter, uiOffset, &uiData[0]) == 0) {
- memcpy(pcBuff + uiIndex, &uiData[0], 4);
- uiOffset += 4;
- uiBytesRemaining -= 4;
- uiIndex += 4;
- } else {
- uiFailureRetries++;
- mdelay(3); /* sleep for a while before retry... */
- }
- } else {
- /* Handle the reads less than 4 bytes... */
- PUCHAR pCharBuff = (PUCHAR)pBuffer;
-
- pCharBuff += uiIndex;
- if (ReadBeceemEEPROM(Adapter, uiOffset, &uiData[0]) == 0) {
- memcpy(pCharBuff, &uiData[0], uiBytesRemaining); /* copy only bytes requested. */
- uiBytesRemaining = 0;
- } else {
- uiFailureRetries++;
- mdelay(3); /* sleep for a while before retry... */
- }
- }
- }
-
- return 0;
-}
-
-/*
- * Procedure: BeceemFlashBulkRead
- *
- * Description: Reads the FLASH and returns the Data.
- *
- * Arguments:
- * Adapter - ptr to Adapter object instance
- * pBuffer - Buffer to store the data read from FLASH
- * uiOffset - Offset of FLASH from where data should be read
- * uiNumBytes - Number of bytes to be read from the FLASH.
- *
- * Returns:
- * OSAL_STATUS_SUCCESS - if FLASH read is successful.
- * <FAILURE> - if failed.
- */
-
-static int BeceemFlashBulkRead(struct bcm_mini_adapter *Adapter,
- PUINT pBuffer,
- unsigned int uiOffset,
- unsigned int uiNumBytes)
-{
- unsigned int uiIndex = 0;
- unsigned int uiBytesToRead = uiNumBytes;
- int Status = 0;
- unsigned int uiPartOffset = 0;
- int bytes;
-
- if (Adapter->device_removed) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Device Got Removed");
- return -ENODEV;
- }
-
- /* Adding flash Base address
- * uiOffset = uiOffset + GetFlashBaseAddr(Adapter);
- */
- #if defined(BCM_SHM_INTERFACE) && !defined(FLASH_DIRECT_ACCESS)
- Status = bcmflash_raw_read((uiOffset/FLASH_PART_SIZE), (uiOffset % FLASH_PART_SIZE), (unsigned char *)pBuffer, uiNumBytes);
- return Status;
- #endif
-
- Adapter->SelectedChip = RESET_CHIP_SELECT;
-
- if (uiOffset % MAX_RW_SIZE) {
- BcmDoChipSelect(Adapter, uiOffset);
- uiPartOffset = (uiOffset & (FLASH_PART_SIZE - 1)) + GetFlashBaseAddr(Adapter);
-
- uiBytesToRead = MAX_RW_SIZE - (uiOffset % MAX_RW_SIZE);
- uiBytesToRead = MIN(uiNumBytes, uiBytesToRead);
-
- bytes = rdm(Adapter, uiPartOffset, (PCHAR)pBuffer + uiIndex, uiBytesToRead);
- if (bytes < 0) {
- Status = bytes;
- Adapter->SelectedChip = RESET_CHIP_SELECT;
- return Status;
- }
-
- uiIndex += uiBytesToRead;
- uiOffset += uiBytesToRead;
- uiNumBytes -= uiBytesToRead;
- }
-
- while (uiNumBytes) {
- BcmDoChipSelect(Adapter, uiOffset);
- uiPartOffset = (uiOffset & (FLASH_PART_SIZE - 1)) + GetFlashBaseAddr(Adapter);
-
- uiBytesToRead = MIN(uiNumBytes, MAX_RW_SIZE);
-
- bytes = rdm(Adapter, uiPartOffset, (PCHAR)pBuffer + uiIndex, uiBytesToRead);
- if (bytes < 0) {
- Status = bytes;
- break;
- }
-
- uiIndex += uiBytesToRead;
- uiOffset += uiBytesToRead;
- uiNumBytes -= uiBytesToRead;
- }
- Adapter->SelectedChip = RESET_CHIP_SELECT;
- return Status;
-}
-
-/*
- * Procedure: BcmGetFlashSize
- *
- * Description: Finds the size of FLASH.
- *
- * Arguments:
- * Adapter - ptr to Adapter object instance
- *
- * Returns:
- * unsigned int - size of the FLASH Storage.
- *
- */
-
-static unsigned int BcmGetFlashSize(struct bcm_mini_adapter *Adapter)
-{
- if (IsFlash2x(Adapter))
- return Adapter->psFlash2xCSInfo->OffsetFromDSDStartForDSDHeader + sizeof(struct bcm_dsd_header);
- else
- return 32 * 1024;
-}
-
-/*
- * Procedure: BcmGetEEPROMSize
- *
- * Description: Finds the size of EEPROM.
- *
- * Arguments:
- * Adapter - ptr to Adapter object instance
- *
- * Returns:
- * unsigned int - size of the EEPROM Storage.
- *
- */
-
-static unsigned int BcmGetEEPROMSize(struct bcm_mini_adapter *Adapter)
-{
- unsigned int uiData = 0;
- unsigned int uiIndex = 0;
-
- /*
- * if EEPROM is present and already Calibrated,it will have
- * 'BECM' string at 0th offset.
- * To find the EEPROM size read the possible boundaries of the
- * EEPROM like 4K,8K etc..accessing the EEPROM beyond its size will
- * result in wrap around. So when we get the End of the EEPROM we will
- * get 'BECM' string which is indeed at offset 0.
- */
- BeceemEEPROMBulkRead(Adapter, &uiData, 0x0, 4);
- if (uiData == BECM) {
- for (uiIndex = 2; uiIndex <= 256; uiIndex *= 2) {
- BeceemEEPROMBulkRead(Adapter, &uiData, uiIndex * 1024, 4);
- if (uiData == BECM)
- return uiIndex * 1024;
- }
- } else {
- /*
- * EEPROM may not be present or not programmed
- */
- uiData = 0xBABEFACE;
- if (BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&uiData, 0, 4, TRUE) == 0) {
- uiData = 0;
- for (uiIndex = 2; uiIndex <= 256; uiIndex *= 2) {
- BeceemEEPROMBulkRead(Adapter, &uiData, uiIndex * 1024, 4);
- if (uiData == 0xBABEFACE)
- return uiIndex * 1024;
- }
- }
- }
- return 0;
-}
-
-/*
- * Procedure: FlashSectorErase
- *
- * Description: Finds the sector size of the FLASH.
- *
- * Arguments:
- * Adapter - ptr to Adapter object instance
- * addr - sector start address
- * numOfSectors - number of sectors to be erased.
- *
- * Returns:
- * OSAL_STATUS_CODE
- *
- */
-
-static int FlashSectorErase(struct bcm_mini_adapter *Adapter,
- unsigned int addr,
- unsigned int numOfSectors)
-{
- unsigned int iIndex = 0, iRetries = 0;
- unsigned int uiStatus = 0;
- unsigned int value;
- int bytes;
-
- for (iIndex = 0; iIndex < numOfSectors; iIndex++) {
- value = 0x06000000;
- wrmalt(Adapter, FLASH_SPI_CMDQ_REG, &value, sizeof(value));
-
- value = (0xd8000000 | (addr & 0xFFFFFF));
- wrmalt(Adapter, FLASH_SPI_CMDQ_REG, &value, sizeof(value));
- iRetries = 0;
-
- do {
- value = (FLASH_CMD_STATUS_REG_READ << 24);
- if (wrmalt(Adapter, FLASH_SPI_CMDQ_REG, &value, sizeof(value)) < 0) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Programing of FLASH_SPI_CMDQ_REG fails");
- return STATUS_FAILURE;
- }
-
- bytes = rdmalt(Adapter, FLASH_SPI_READQ_REG, &uiStatus, sizeof(uiStatus));
- if (bytes < 0) {
- uiStatus = bytes;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Reading status of FLASH_SPI_READQ_REG fails");
- return uiStatus;
- }
- iRetries++;
- /* After every try lets make the CPU free for 10 ms. generally time taken by the
- * the sector erase cycle is 500 ms to 40000 msec. hence sleeping 10 ms
- * won't hamper performance in any case.
- */
- mdelay(10);
- } while ((uiStatus & 0x1) && (iRetries < 400));
-
- if (uiStatus & 0x1) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "iRetries crossing the limit of 80000\n");
- return STATUS_FAILURE;
- }
-
- addr += Adapter->uiSectorSize;
- }
- return 0;
-}
-/*
- * Procedure: flashByteWrite
- *
- * Description: Performs Byte by Byte write to flash
- *
- * Arguments:
- * Adapter - ptr to Adapter object instance
- * uiOffset - Offset of the flash where data needs to be written to.
- * pData - Address of Data to be written.
- * Returns:
- * OSAL_STATUS_CODE
- *
- */
-
-static int flashByteWrite(struct bcm_mini_adapter *Adapter,
- unsigned int uiOffset,
- PVOID pData)
-{
- unsigned int uiStatus = 0;
- int iRetries = MAX_FLASH_RETRIES * FLASH_PER_RETRIES_DELAY; /* 3 */
- unsigned int value;
- ULONG ulData = *(PUCHAR)pData;
- int bytes;
- /*
- * need not write 0xFF because write requires an erase and erase will
- * make whole sector 0xFF.
- */
-
- if (0xFF == ulData)
- return STATUS_SUCCESS;
-
- /* DumpDebug(NVM_RW,("flashWrite ====>\n")); */
- value = (FLASH_CMD_WRITE_ENABLE << 24);
- if (wrmalt(Adapter, FLASH_SPI_CMDQ_REG, &value, sizeof(value)) < 0) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Write enable in FLASH_SPI_CMDQ_REG register fails");
- return STATUS_FAILURE;
- }
-
- if (wrm(Adapter, FLASH_SPI_WRITEQ_REG, (PCHAR)&ulData, 4) < 0) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "DATA Write on FLASH_SPI_WRITEQ_REG fails");
- return STATUS_FAILURE;
- }
- value = (0x02000000 | (uiOffset & 0xFFFFFF));
- if (wrmalt(Adapter, FLASH_SPI_CMDQ_REG, &value, sizeof(value)) < 0) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Programming of FLASH_SPI_CMDQ_REG fails");
- return STATUS_FAILURE;
- }
-
- /* __udelay(950); */
-
- do {
- value = (FLASH_CMD_STATUS_REG_READ << 24);
- if (wrmalt(Adapter, FLASH_SPI_CMDQ_REG, &value, sizeof(value)) < 0) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Programing of FLASH_SPI_CMDQ_REG fails");
- return STATUS_FAILURE;
- }
- /* __udelay(1); */
- bytes = rdmalt(Adapter, FLASH_SPI_READQ_REG, &uiStatus, sizeof(uiStatus));
- if (bytes < 0) {
- uiStatus = bytes;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Reading status of FLASH_SPI_READQ_REG fails");
- return uiStatus;
- }
- iRetries--;
- if (iRetries && ((iRetries % FLASH_PER_RETRIES_DELAY) == 0))
- udelay(1000);
-
- } while ((uiStatus & 0x1) && (iRetries > 0));
-
- if (uiStatus & 0x1) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Flash Write fails even after checking status for 200 times.");
- return STATUS_FAILURE;
- }
-
- return STATUS_SUCCESS;
-}
-
-/*
- * Procedure: flashWrite
- *
- * Description: Performs write to flash
- *
- * Arguments:
- * Adapter - ptr to Adapter object instance
- * uiOffset - Offset of the flash where data needs to be written to.
- * pData - Address of Data to be written.
- * Returns:
- * OSAL_STATUS_CODE
- *
- */
-
-static int flashWrite(struct bcm_mini_adapter *Adapter,
- unsigned int uiOffset,
- PVOID pData)
-{
- /* unsigned int uiStatus = 0;
- * int iRetries = 0;
- * unsigned int uiReadBack = 0;
- */
- unsigned int uiStatus = 0;
- int iRetries = MAX_FLASH_RETRIES * FLASH_PER_RETRIES_DELAY; /* 3 */
- unsigned int value;
- unsigned int uiErasePattern[4] = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF};
- int bytes;
- /*
- * need not write 0xFFFFFFFF because write requires an erase and erase will
- * make whole sector 0xFFFFFFFF.
- */
- if (!memcmp(pData, uiErasePattern, MAX_RW_SIZE))
- return 0;
-
- value = (FLASH_CMD_WRITE_ENABLE << 24);
-
- if (wrmalt(Adapter, FLASH_SPI_CMDQ_REG, &value, sizeof(value)) < 0) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Write Enable of FLASH_SPI_CMDQ_REG fails");
- return STATUS_FAILURE;
- }
-
- if (wrm(Adapter, uiOffset, (PCHAR)pData, MAX_RW_SIZE) < 0) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Data write fails...");
- return STATUS_FAILURE;
- }
-
- /* __udelay(950); */
- do {
- value = (FLASH_CMD_STATUS_REG_READ << 24);
- if (wrmalt(Adapter, FLASH_SPI_CMDQ_REG, &value, sizeof(value)) < 0) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Programing of FLASH_SPI_CMDQ_REG fails");
- return STATUS_FAILURE;
- }
- /* __udelay(1); */
- bytes = rdmalt(Adapter, FLASH_SPI_READQ_REG, &uiStatus, sizeof(uiStatus));
- if (bytes < 0) {
- uiStatus = bytes;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Reading status of FLASH_SPI_READQ_REG fails");
- return uiStatus;
- }
-
- iRetries--;
- /* this will ensure that in there will be no changes in the current path.
- * currently one rdm/wrm takes 125 us.
- * Hence 125 *2 * FLASH_PER_RETRIES_DELAY > 3 ms(worst case delay)
- * Hence current implementation cycle will intoduce no delay in current path
- */
- if (iRetries && ((iRetries % FLASH_PER_RETRIES_DELAY) == 0))
- udelay(1000);
- } while ((uiStatus & 0x1) && (iRetries > 0));
-
- if (uiStatus & 0x1) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Flash Write fails even after checking status for 200 times.");
- return STATUS_FAILURE;
- }
-
- return STATUS_SUCCESS;
-}
-
-/*-----------------------------------------------------------------------------
- * Procedure: flashByteWriteStatus
- *
- * Description: Performs byte by byte write to flash with write done status check
- *
- * Arguments:
- * Adapter - ptr to Adapter object instance
- * uiOffset - Offset of the flash where data needs to be written to.
- * pData - Address of the Data to be written.
- * Returns:
- * OSAL_STATUS_CODE
- *
- */
-static int flashByteWriteStatus(struct bcm_mini_adapter *Adapter,
- unsigned int uiOffset,
- PVOID pData)
-{
- unsigned int uiStatus = 0;
- int iRetries = MAX_FLASH_RETRIES * FLASH_PER_RETRIES_DELAY; /* 3 */
- ULONG ulData = *(PUCHAR)pData;
- unsigned int value;
- int bytes;
-
- /*
- * need not write 0xFFFFFFFF because write requires an erase and erase will
- * make whole sector 0xFFFFFFFF.
- */
-
- if (0xFF == ulData)
- return STATUS_SUCCESS;
-
- /* DumpDebug(NVM_RW,("flashWrite ====>\n")); */
-
- value = (FLASH_CMD_WRITE_ENABLE << 24);
- if (wrmalt(Adapter, FLASH_SPI_CMDQ_REG, &value, sizeof(value)) < 0) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Write enable in FLASH_SPI_CMDQ_REG register fails");
- return STATUS_SUCCESS;
- }
- if (wrm(Adapter, FLASH_SPI_WRITEQ_REG, (PCHAR)&ulData, 4) < 0) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "DATA Write on FLASH_SPI_WRITEQ_REG fails");
- return STATUS_FAILURE;
- }
- value = (0x02000000 | (uiOffset & 0xFFFFFF));
- if (wrmalt(Adapter, FLASH_SPI_CMDQ_REG, &value, sizeof(value)) < 0) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Programming of FLASH_SPI_CMDQ_REG fails");
- return STATUS_FAILURE;
- }
-
- /* msleep(1); */
-
- do {
- value = (FLASH_CMD_STATUS_REG_READ << 24);
- if (wrmalt(Adapter, FLASH_SPI_CMDQ_REG, &value, sizeof(value)) < 0) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Programing of FLASH_SPI_CMDQ_REG fails");
- return STATUS_FAILURE;
- }
- /* __udelay(1); */
- bytes = rdmalt(Adapter, FLASH_SPI_READQ_REG, &uiStatus, sizeof(uiStatus));
- if (bytes < 0) {
- uiStatus = bytes;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Reading status of FLASH_SPI_READQ_REG fails");
- return uiStatus;
- }
-
- iRetries--;
- if (iRetries && ((iRetries % FLASH_PER_RETRIES_DELAY) == 0))
- udelay(1000);
-
- } while ((uiStatus & 0x1) && (iRetries > 0));
-
- if (uiStatus & 0x1) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Flash Write fails even after checking status for 200 times.");
- return STATUS_FAILURE;
- }
-
- return STATUS_SUCCESS;
-}
-/*
- * Procedure: flashWriteStatus
- *
- * Description: Performs write to flash with write done status check
- *
- * Arguments:
- * Adapter - ptr to Adapter object instance
- * uiOffset - Offset of the flash where data needs to be written to.
- * pData - Address of the Data to be written.
- * Returns:
- * OSAL_STATUS_CODE
- *
- */
-
-static int flashWriteStatus(struct bcm_mini_adapter *Adapter,
- unsigned int uiOffset,
- PVOID pData)
-{
- unsigned int uiStatus = 0;
- int iRetries = MAX_FLASH_RETRIES * FLASH_PER_RETRIES_DELAY; /* 3 */
- /* unsigned int uiReadBack = 0; */
- unsigned int value;
- unsigned int uiErasePattern[4] = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF};
- int bytes;
-
- /*
- * need not write 0xFFFFFFFF because write requires an erase and erase will
- * make whole sector 0xFFFFFFFF.
- */
- if (!memcmp(pData, uiErasePattern, MAX_RW_SIZE))
- return 0;
-
- value = (FLASH_CMD_WRITE_ENABLE << 24);
- if (wrmalt(Adapter, FLASH_SPI_CMDQ_REG, &value, sizeof(value)) < 0) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Write Enable of FLASH_SPI_CMDQ_REG fails");
- return STATUS_FAILURE;
- }
-
- if (wrm(Adapter, uiOffset, (PCHAR)pData, MAX_RW_SIZE) < 0) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Data write fails...");
- return STATUS_FAILURE;
- }
- /* __udelay(1); */
-
- do {
- value = (FLASH_CMD_STATUS_REG_READ << 24);
- if (wrmalt(Adapter, FLASH_SPI_CMDQ_REG, &value, sizeof(value)) < 0) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Programing of FLASH_SPI_CMDQ_REG fails");
- return STATUS_FAILURE;
- }
- /* __udelay(1); */
- bytes = rdmalt(Adapter, FLASH_SPI_READQ_REG, &uiStatus, sizeof(uiStatus));
- if (bytes < 0) {
- uiStatus = bytes;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Reading status of FLASH_SPI_READQ_REG fails");
- return uiStatus;
- }
- iRetries--;
- /* this will ensure that in there will be no changes in the current path.
- * currently one rdm/wrm takes 125 us.
- * Hence 125 *2 * FLASH_PER_RETRIES_DELAY >3 ms(worst case delay)
- * Hence current implementation cycle will intoduce no delay in current path
- */
- if (iRetries && ((iRetries % FLASH_PER_RETRIES_DELAY) == 0))
- udelay(1000);
-
- } while ((uiStatus & 0x1) && (iRetries > 0));
-
- if (uiStatus & 0x1) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Flash Write fails even after checking status for 200 times.");
- return STATUS_FAILURE;
- }
-
- return STATUS_SUCCESS;
-}
-
-/*
- * Procedure: BcmRestoreBlockProtectStatus
- *
- * Description: Restores the original block protection status.
- *
- * Arguments:
- * Adapter - ptr to Adapter object instance
- * ulWriteStatus -Original status
- * Returns:
- * <VOID>
- *
- */
-
-static VOID BcmRestoreBlockProtectStatus(struct bcm_mini_adapter *Adapter, ULONG ulWriteStatus)
-{
- unsigned int value;
-
- value = (FLASH_CMD_WRITE_ENABLE << 24);
- wrmalt(Adapter, FLASH_SPI_CMDQ_REG, &value, sizeof(value));
-
- udelay(20);
- value = (FLASH_CMD_STATUS_REG_WRITE << 24) | (ulWriteStatus << 16);
- wrmalt(Adapter, FLASH_SPI_CMDQ_REG, &value, sizeof(value));
- udelay(20);
-}
-
-/*
- * Procedure: BcmFlashUnProtectBlock
- *
- * Description: UnProtects appropriate blocks for writing.
- *
- * Arguments:
- * Adapter - ptr to Adapter object instance
- * uiOffset - Offset of the flash where data needs to be written to. This should be Sector aligned.
- * Returns:
- * ULONG - Status value before UnProtect.
- *
- */
-
-static ULONG BcmFlashUnProtectBlock(struct bcm_mini_adapter *Adapter, unsigned int uiOffset, unsigned int uiLength)
-{
- ULONG ulStatus = 0;
- ULONG ulWriteStatus = 0;
- unsigned int value;
-
- uiOffset = uiOffset&0x000FFFFF;
- /*
- * Implemented only for 1MB Flash parts.
- */
- if (FLASH_PART_SST25VF080B == Adapter->ulFlashID) {
- /*
- * Get Current BP status.
- */
- value = (FLASH_CMD_STATUS_REG_READ << 24);
- wrmalt(Adapter, FLASH_SPI_CMDQ_REG, &value, sizeof(value));
- udelay(10);
- /*
- * Read status will be WWXXYYZZ. We have to take only WW.
- */
- rdmalt(Adapter, FLASH_SPI_READQ_REG, (PUINT)&ulStatus, sizeof(ulStatus));
- ulStatus >>= 24;
- ulWriteStatus = ulStatus;
- /*
- * Bits [5-2] give current block level protection status.
- * Bit5: BP3 - DONT CARE
- * BP2-BP0: 0 - NO PROTECTION, 1 - UPPER 1/16, 2 - UPPER 1/8, 3 - UPPER 1/4
- * 4 - UPPER 1/2. 5 to 7 - ALL BLOCKS
- */
-
- if (ulStatus) {
- if ((uiOffset+uiLength) <= 0x80000) {
- /*
- * Offset comes in lower half of 1MB. Protect the upper half.
- * Clear BP1 and BP0 and set BP2.
- */
- ulWriteStatus |= (0x4<<2);
- ulWriteStatus &= ~(0x3<<2);
- } else if ((uiOffset + uiLength) <= 0xC0000) {
- /*
- * Offset comes below Upper 1/4. Upper 1/4 can be protected.
- * Clear BP2 and set BP1 and BP0.
- */
- ulWriteStatus |= (0x3<<2);
- ulWriteStatus &= ~(0x1<<4);
- } else if ((uiOffset + uiLength) <= 0xE0000) {
- /*
- * Offset comes below Upper 1/8. Upper 1/8 can be protected.
- * Clear BP2 and BP0 and set BP1
- */
- ulWriteStatus |= (0x1<<3);
- ulWriteStatus &= ~(0x5<<2);
- } else if ((uiOffset + uiLength) <= 0xF0000) {
- /*
- * Offset comes below Upper 1/16. Only upper 1/16 can be protected.
- * Set BP0 and Clear BP2,BP1.
- */
- ulWriteStatus |= (0x1<<2);
- ulWriteStatus &= ~(0x3<<3);
- } else {
- /*
- * Unblock all.
- * Clear BP2,BP1 and BP0.
- */
- ulWriteStatus &= ~(0x7<<2);
- }
-
- value = (FLASH_CMD_WRITE_ENABLE << 24);
- wrmalt(Adapter, FLASH_SPI_CMDQ_REG, &value, sizeof(value));
- udelay(20);
- value = (FLASH_CMD_STATUS_REG_WRITE << 24) | (ulWriteStatus << 16);
- wrmalt(Adapter, FLASH_SPI_CMDQ_REG, &value, sizeof(value));
- udelay(20);
- }
- }
- return ulStatus;
-}
-
-static int bulk_read_complete_sector(struct bcm_mini_adapter *ad,
- UCHAR read_bk[],
- PCHAR tmpbuff,
- unsigned int offset,
- unsigned int partoff)
-{
- unsigned int i;
- int j;
- int bulk_read_stat;
- FP_FLASH_WRITE_STATUS writef =
- ad->fpFlashWriteWithStatusCheck;
-
- for (i = 0; i < ad->uiSectorSize; i += MAX_RW_SIZE) {
- bulk_read_stat = BeceemFlashBulkRead(ad,
- (PUINT)read_bk,
- offset + i,
- MAX_RW_SIZE);
-
- if (bulk_read_stat != STATUS_SUCCESS)
- continue;
-
- if (ad->ulFlashWriteSize == 1) {
- for (j = 0; j < 16; j++) {
- if ((read_bk[j] != tmpbuff[i + j]) &&
- (STATUS_SUCCESS != (*writef)(ad, partoff + i + j, &tmpbuff[i + j]))) {
- return STATUS_FAILURE;
- }
- }
- } else {
- if ((memcmp(read_bk, &tmpbuff[i], MAX_RW_SIZE)) &&
- (STATUS_SUCCESS != (*writef)(ad, partoff + i, &tmpbuff[i]))) {
- return STATUS_FAILURE;
- }
- }
- }
-
- return STATUS_SUCCESS;
-}
-
-/*
- * Procedure: BeceemFlashBulkWrite
- *
- * Description: Performs write to the flash
- *
- * Arguments:
- * Adapter - ptr to Adapter object instance
- * pBuffer - Data to be written.
- * uiOffset - Offset of the flash where data needs to be written to.
- * uiNumBytes - Number of bytes to be written.
- * bVerify - read verify flag.
- * Returns:
- * OSAL_STATUS_CODE
- *
- */
-
-static int BeceemFlashBulkWrite(struct bcm_mini_adapter *Adapter,
- PUINT pBuffer,
- unsigned int uiOffset,
- unsigned int uiNumBytes,
- bool bVerify)
-{
- PCHAR pTempBuff = NULL;
- PUCHAR pcBuffer = (PUCHAR)pBuffer;
- unsigned int uiIndex = 0;
- unsigned int uiOffsetFromSectStart = 0;
- unsigned int uiSectAlignAddr = 0;
- unsigned int uiCurrSectOffsetAddr = 0;
- unsigned int uiSectBoundary = 0;
- unsigned int uiNumSectTobeRead = 0;
- UCHAR ucReadBk[16] = {0};
- ULONG ulStatus = 0;
- int Status = STATUS_SUCCESS;
- unsigned int uiTemp = 0;
- unsigned int index = 0;
- unsigned int uiPartOffset = 0;
-
- #if defined(BCM_SHM_INTERFACE) && !defined(FLASH_DIRECT_ACCESS)
- Status = bcmflash_raw_write((uiOffset / FLASH_PART_SIZE), (uiOffset % FLASH_PART_SIZE), (unsigned char *)pBuffer, uiNumBytes);
- return Status;
- #endif
-
- uiOffsetFromSectStart = uiOffset & ~(Adapter->uiSectorSize - 1);
-
- /* Adding flash Base address
- * uiOffset = uiOffset + GetFlashBaseAddr(Adapter);
- */
-
- uiSectAlignAddr = uiOffset & ~(Adapter->uiSectorSize - 1);
- uiCurrSectOffsetAddr = uiOffset & (Adapter->uiSectorSize - 1);
- uiSectBoundary = uiSectAlignAddr + Adapter->uiSectorSize;
-
- pTempBuff = kmalloc(Adapter->uiSectorSize, GFP_KERNEL);
- if (!pTempBuff)
- goto BeceemFlashBulkWrite_EXIT;
- /*
- * check if the data to be written is overlapped across sectors
- */
- if (uiOffset+uiNumBytes < uiSectBoundary) {
- uiNumSectTobeRead = 1;
- } else {
- /* Number of sectors = Last sector start address/First sector start address */
- uiNumSectTobeRead = (uiCurrSectOffsetAddr + uiNumBytes) / Adapter->uiSectorSize;
- if ((uiCurrSectOffsetAddr + uiNumBytes)%Adapter->uiSectorSize)
- uiNumSectTobeRead++;
- }
- /* Check whether Requested sector is writable or not in case of flash2x write. But if write call is
- * for DSD calibration, allow it without checking of sector permission
- */
-
- if (IsFlash2x(Adapter) && (Adapter->bAllDSDWriteAllow == false)) {
- index = 0;
- uiTemp = uiNumSectTobeRead;
- while (uiTemp) {
- if (IsOffsetWritable(Adapter, uiOffsetFromSectStart + index * Adapter->uiSectorSize) == false) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Sector Starting at offset <0X%X> is not writable",
- (uiOffsetFromSectStart + index * Adapter->uiSectorSize));
- Status = SECTOR_IS_NOT_WRITABLE;
- goto BeceemFlashBulkWrite_EXIT;
- }
- uiTemp = uiTemp - 1;
- index = index + 1;
- }
- }
- Adapter->SelectedChip = RESET_CHIP_SELECT;
- while (uiNumSectTobeRead) {
- /* do_gettimeofday(&tv1);
- * BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "\nTime In start of write :%ld ms\n",(tv1.tv_sec *1000 + tv1.tv_usec /1000));
- */
- uiPartOffset = (uiSectAlignAddr & (FLASH_PART_SIZE - 1)) + GetFlashBaseAddr(Adapter);
-
- BcmDoChipSelect(Adapter, uiSectAlignAddr);
-
- if (0 != BeceemFlashBulkRead(Adapter,
- (PUINT)pTempBuff,
- uiOffsetFromSectStart,
- Adapter->uiSectorSize)) {
- Status = -1;
- goto BeceemFlashBulkWrite_EXIT;
- }
-
- /* do_gettimeofday(&tr);
- * BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Total time taken by Read :%ld ms\n", (tr.tv_sec *1000 + tr.tv_usec/1000) - (tv1.tv_sec *1000 + tv1.tv_usec/1000));
- */
- ulStatus = BcmFlashUnProtectBlock(Adapter, uiSectAlignAddr, Adapter->uiSectorSize);
-
- if (uiNumSectTobeRead > 1) {
- memcpy(&pTempBuff[uiCurrSectOffsetAddr], pcBuffer, uiSectBoundary - (uiSectAlignAddr + uiCurrSectOffsetAddr));
- pcBuffer += ((uiSectBoundary - (uiSectAlignAddr + uiCurrSectOffsetAddr)));
- uiNumBytes -= (uiSectBoundary - (uiSectAlignAddr + uiCurrSectOffsetAddr));
- } else {
- memcpy(&pTempBuff[uiCurrSectOffsetAddr], pcBuffer, uiNumBytes);
- }
-
- if (IsFlash2x(Adapter))
- SaveHeaderIfPresent(Adapter, (PUCHAR)pTempBuff, uiOffsetFromSectStart);
-
- FlashSectorErase(Adapter, uiPartOffset, 1);
- /* do_gettimeofday(&te);
- * BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Total time taken by Erase :%ld ms\n", (te.tv_sec *1000 + te.tv_usec/1000) - (tr.tv_sec *1000 + tr.tv_usec/1000));
- */
- for (uiIndex = 0; uiIndex < Adapter->uiSectorSize; uiIndex += Adapter->ulFlashWriteSize) {
- if (Adapter->device_removed) {
- Status = -1;
- goto BeceemFlashBulkWrite_EXIT;
- }
-
- if (STATUS_SUCCESS != (*Adapter->fpFlashWrite)(Adapter, uiPartOffset + uiIndex, (&pTempBuff[uiIndex]))) {
- Status = -1;
- goto BeceemFlashBulkWrite_EXIT;
- }
- }
-
- /* do_gettimeofday(&tw);
- * BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Total time taken in Write to Flash :%ld ms\n", (tw.tv_sec *1000 + tw.tv_usec/1000) - (te.tv_sec *1000 + te.tv_usec/1000));
- */
-
- if (STATUS_FAILURE == bulk_read_complete_sector(Adapter,
- ucReadBk,
- pTempBuff,
- uiOffsetFromSectStart,
- uiPartOffset)) {
- Status = STATUS_FAILURE;
- goto BeceemFlashBulkWrite_EXIT;
- }
-
- /* do_gettimeofday(&twv);
- * BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Total time taken in Write to Flash verification :%ld ms\n", (twv.tv_sec *1000 + twv.tv_usec/1000) - (tw.tv_sec *1000 + tw.tv_usec/1000));
- */
- if (ulStatus) {
- BcmRestoreBlockProtectStatus(Adapter, ulStatus);
- ulStatus = 0;
- }
-
- uiCurrSectOffsetAddr = 0;
- uiSectAlignAddr = uiSectBoundary;
- uiSectBoundary += Adapter->uiSectorSize;
- uiOffsetFromSectStart += Adapter->uiSectorSize;
- uiNumSectTobeRead--;
- }
- /* do_gettimeofday(&tv2);
- * BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Time after Write :%ld ms\n",(tv2.tv_sec *1000 + tv2.tv_usec/1000));
- * BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Total time taken by in Write is :%ld ms\n", (tv2.tv_sec *1000 + tv2.tv_usec/1000) - (tv1.tv_sec *1000 + tv1.tv_usec/1000));
- *
- * Cleanup.
- */
-BeceemFlashBulkWrite_EXIT:
- if (ulStatus)
- BcmRestoreBlockProtectStatus(Adapter, ulStatus);
-
- kfree(pTempBuff);
-
- Adapter->SelectedChip = RESET_CHIP_SELECT;
- return Status;
-}
-
-/*
- * Procedure: BeceemFlashBulkWriteStatus
- *
- * Description: Writes to Flash. Checks the SPI status after each write.
- *
- * Arguments:
- * Adapter - ptr to Adapter object instance
- * pBuffer - Data to be written.
- * uiOffset - Offset of the flash where data needs to be written to.
- * uiNumBytes - Number of bytes to be written.
- * bVerify - read verify flag.
- * Returns:
- * OSAL_STATUS_CODE
- *
- */
-
-static int BeceemFlashBulkWriteStatus(struct bcm_mini_adapter *Adapter,
- PUINT pBuffer,
- unsigned int uiOffset,
- unsigned int uiNumBytes,
- bool bVerify)
-{
- PCHAR pTempBuff = NULL;
- PUCHAR pcBuffer = (PUCHAR)pBuffer;
- unsigned int uiIndex = 0;
- unsigned int uiOffsetFromSectStart = 0;
- unsigned int uiSectAlignAddr = 0;
- unsigned int uiCurrSectOffsetAddr = 0;
- unsigned int uiSectBoundary = 0;
- unsigned int uiNumSectTobeRead = 0;
- UCHAR ucReadBk[16] = {0};
- ULONG ulStatus = 0;
- unsigned int Status = STATUS_SUCCESS;
- unsigned int uiTemp = 0;
- unsigned int index = 0;
- unsigned int uiPartOffset = 0;
-
- uiOffsetFromSectStart = uiOffset & ~(Adapter->uiSectorSize - 1);
-
- /* uiOffset += Adapter->ulFlashCalStart;
- * Adding flash Base address
- * uiOffset = uiOffset + GetFlashBaseAddr(Adapter);
- */
- uiSectAlignAddr = uiOffset & ~(Adapter->uiSectorSize - 1);
- uiCurrSectOffsetAddr = uiOffset & (Adapter->uiSectorSize - 1);
- uiSectBoundary = uiSectAlignAddr + Adapter->uiSectorSize;
-
- pTempBuff = kmalloc(Adapter->uiSectorSize, GFP_KERNEL);
- if (!pTempBuff)
- goto BeceemFlashBulkWriteStatus_EXIT;
-
- /*
- * check if the data to be written is overlapped across sectors
- */
- if (uiOffset+uiNumBytes < uiSectBoundary) {
- uiNumSectTobeRead = 1;
- } else {
- /* Number of sectors = Last sector start address/First sector start address */
- uiNumSectTobeRead = (uiCurrSectOffsetAddr + uiNumBytes) / Adapter->uiSectorSize;
- if ((uiCurrSectOffsetAddr + uiNumBytes)%Adapter->uiSectorSize)
- uiNumSectTobeRead++;
- }
-
- if (IsFlash2x(Adapter) && (Adapter->bAllDSDWriteAllow == false)) {
- index = 0;
- uiTemp = uiNumSectTobeRead;
- while (uiTemp) {
- if (IsOffsetWritable(Adapter, uiOffsetFromSectStart + index * Adapter->uiSectorSize) == false) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Sector Starting at offset <0X%x> is not writable",
- (uiOffsetFromSectStart + index * Adapter->uiSectorSize));
- Status = SECTOR_IS_NOT_WRITABLE;
- goto BeceemFlashBulkWriteStatus_EXIT;
- }
- uiTemp = uiTemp - 1;
- index = index + 1;
- }
- }
-
- Adapter->SelectedChip = RESET_CHIP_SELECT;
- while (uiNumSectTobeRead) {
- uiPartOffset = (uiSectAlignAddr & (FLASH_PART_SIZE - 1)) + GetFlashBaseAddr(Adapter);
-
- BcmDoChipSelect(Adapter, uiSectAlignAddr);
- if (0 != BeceemFlashBulkRead(Adapter,
- (PUINT)pTempBuff,
- uiOffsetFromSectStart,
- Adapter->uiSectorSize)) {
- Status = -1;
- goto BeceemFlashBulkWriteStatus_EXIT;
- }
-
- ulStatus = BcmFlashUnProtectBlock(Adapter, uiOffsetFromSectStart, Adapter->uiSectorSize);
-
- if (uiNumSectTobeRead > 1) {
- memcpy(&pTempBuff[uiCurrSectOffsetAddr], pcBuffer, uiSectBoundary - (uiSectAlignAddr + uiCurrSectOffsetAddr));
- pcBuffer += ((uiSectBoundary - (uiSectAlignAddr + uiCurrSectOffsetAddr)));
- uiNumBytes -= (uiSectBoundary - (uiSectAlignAddr + uiCurrSectOffsetAddr));
- } else {
- memcpy(&pTempBuff[uiCurrSectOffsetAddr], pcBuffer, uiNumBytes);
- }
-
- if (IsFlash2x(Adapter))
- SaveHeaderIfPresent(Adapter, (PUCHAR)pTempBuff, uiOffsetFromSectStart);
-
- FlashSectorErase(Adapter, uiPartOffset, 1);
-
- for (uiIndex = 0; uiIndex < Adapter->uiSectorSize; uiIndex += Adapter->ulFlashWriteSize) {
- if (Adapter->device_removed) {
- Status = -1;
- goto BeceemFlashBulkWriteStatus_EXIT;
- }
-
- if (STATUS_SUCCESS != (*Adapter->fpFlashWriteWithStatusCheck)(Adapter, uiPartOffset+uiIndex, &pTempBuff[uiIndex])) {
- Status = -1;
- goto BeceemFlashBulkWriteStatus_EXIT;
- }
- }
-
- if (bVerify) {
- for (uiIndex = 0; uiIndex < Adapter->uiSectorSize; uiIndex += MAX_RW_SIZE) {
- if (STATUS_SUCCESS == BeceemFlashBulkRead(Adapter, (PUINT)ucReadBk, uiOffsetFromSectStart + uiIndex, MAX_RW_SIZE)) {
- if (memcmp(ucReadBk, &pTempBuff[uiIndex], MAX_RW_SIZE)) {
- Status = STATUS_FAILURE;
- goto BeceemFlashBulkWriteStatus_EXIT;
- }
- }
- }
- }
-
- if (ulStatus) {
- BcmRestoreBlockProtectStatus(Adapter, ulStatus);
- ulStatus = 0;
- }
-
- uiCurrSectOffsetAddr = 0;
- uiSectAlignAddr = uiSectBoundary;
- uiSectBoundary += Adapter->uiSectorSize;
- uiOffsetFromSectStart += Adapter->uiSectorSize;
- uiNumSectTobeRead--;
- }
-/*
- * Cleanup.
- */
-BeceemFlashBulkWriteStatus_EXIT:
- if (ulStatus)
- BcmRestoreBlockProtectStatus(Adapter, ulStatus);
-
- kfree(pTempBuff);
- Adapter->SelectedChip = RESET_CHIP_SELECT;
- return Status;
-}
-
-/*
- * Procedure: PropagateCalParamsFromFlashToMemory
- *
- * Description: Dumps the calibration section of EEPROM to DDR.
- *
- * Arguments:
- * Adapter - ptr to Adapter object instance
- * Returns:
- * OSAL_STATUS_CODE
- *
- */
-
-int PropagateCalParamsFromFlashToMemory(struct bcm_mini_adapter *Adapter)
-{
- PCHAR pBuff, pPtr;
- unsigned int uiEepromSize = 0;
- unsigned int uiBytesToCopy = 0;
- /* unsigned int uiIndex = 0; */
- unsigned int uiCalStartAddr = EEPROM_CALPARAM_START;
- unsigned int uiMemoryLoc = EEPROM_CAL_DATA_INTERNAL_LOC;
- unsigned int value;
- int Status = 0;
-
- /*
- * Write the signature first. This will ensure firmware does not access EEPROM.
- */
- value = 0xbeadbead;
- wrmalt(Adapter, EEPROM_CAL_DATA_INTERNAL_LOC - 4, &value, sizeof(value));
- value = 0xbeadbead;
- wrmalt(Adapter, EEPROM_CAL_DATA_INTERNAL_LOC - 8, &value, sizeof(value));
-
- if (0 != BeceemNVMRead(Adapter, &uiEepromSize, EEPROM_SIZE_OFFSET, 4))
- return -1;
-
- uiEepromSize = ntohl(uiEepromSize);
- uiEepromSize >>= 16;
-
- /*
- * subtract the auto init section size
- */
- uiEepromSize -= EEPROM_CALPARAM_START;
-
- if (uiEepromSize > 1024 * 1024)
- return -1;
-
- pBuff = kmalloc(uiEepromSize, GFP_KERNEL);
- if (pBuff == NULL)
- return -ENOMEM;
-
- if (0 != BeceemNVMRead(Adapter, (PUINT)pBuff, uiCalStartAddr, uiEepromSize)) {
- kfree(pBuff);
- return -1;
- }
-
- pPtr = pBuff;
-
- uiBytesToCopy = MIN(BUFFER_4K, uiEepromSize);
-
- while (uiBytesToCopy) {
- Status = wrm(Adapter, uiMemoryLoc, (PCHAR)pPtr, uiBytesToCopy);
- if (Status) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "wrm failed with status :%d", Status);
- break;
- }
-
- pPtr += uiBytesToCopy;
- uiEepromSize -= uiBytesToCopy;
- uiMemoryLoc += uiBytesToCopy;
- uiBytesToCopy = MIN(BUFFER_4K, uiEepromSize);
- }
-
- kfree(pBuff);
- return Status;
-}
-
-/*
- * Procedure: BeceemEEPROMReadBackandVerify
- *
- * Description: Read back the data written and verifies.
- *
- * Arguments:
- * Adapter - ptr to Adapter object instance
- * pBuffer - Data to be written.
- * uiOffset - Offset of the flash where data needs to be written to.
- * uiNumBytes - Number of bytes to be written.
- * Returns:
- * OSAL_STATUS_CODE
- *
- */
-
-static int BeceemEEPROMReadBackandVerify(struct bcm_mini_adapter *Adapter,
- PUINT pBuffer,
- unsigned int uiOffset,
- unsigned int uiNumBytes)
-{
- unsigned int uiRdbk = 0;
- unsigned int uiIndex = 0;
- unsigned int uiData = 0;
- unsigned int auiData[4] = {0};
-
- while (uiNumBytes) {
- if (Adapter->device_removed)
- return -1;
-
- if (uiNumBytes >= MAX_RW_SIZE) {
- /* for the requests more than or equal to MAX_RW_SIZE bytes, use bulk read function to make the access faster. */
- BeceemEEPROMBulkRead(Adapter, &auiData[0], uiOffset, MAX_RW_SIZE);
-
- if (memcmp(&pBuffer[uiIndex], &auiData[0], MAX_RW_SIZE)) {
- /* re-write */
- BeceemEEPROMBulkWrite(Adapter, (PUCHAR)(pBuffer + uiIndex), uiOffset, MAX_RW_SIZE, false);
- mdelay(3);
- BeceemEEPROMBulkRead(Adapter, &auiData[0], uiOffset, MAX_RW_SIZE);
-
- if (memcmp(&pBuffer[uiIndex], &auiData[0], MAX_RW_SIZE))
- return -1;
- }
- uiOffset += MAX_RW_SIZE;
- uiNumBytes -= MAX_RW_SIZE;
- uiIndex += 4;
- } else if (uiNumBytes >= 4) {
- BeceemEEPROMBulkRead(Adapter, &uiData, uiOffset, 4);
- if (uiData != pBuffer[uiIndex]) {
- /* re-write */
- BeceemEEPROMBulkWrite(Adapter, (PUCHAR)(pBuffer + uiIndex), uiOffset, 4, false);
- mdelay(3);
- BeceemEEPROMBulkRead(Adapter, &uiData, uiOffset, 4);
- if (uiData != pBuffer[uiIndex])
- return -1;
- }
- uiOffset += 4;
- uiNumBytes -= 4;
- uiIndex++;
- } else {
- /* Handle the reads less than 4 bytes... */
- uiData = 0;
- memcpy(&uiData, ((PUCHAR)pBuffer) + (uiIndex * sizeof(unsigned int)), uiNumBytes);
- BeceemEEPROMBulkRead(Adapter, &uiRdbk, uiOffset, 4);
-
- if (memcmp(&uiData, &uiRdbk, uiNumBytes))
- return -1;
-
- uiNumBytes = 0;
- }
- }
-
- return 0;
-}
-
-static VOID BcmSwapWord(unsigned int *ptr1)
-{
- unsigned int tempval = (unsigned int)*ptr1;
- char *ptr2 = (char *)&tempval;
- char *ptr = (char *)ptr1;
-
- ptr[0] = ptr2[3];
- ptr[1] = ptr2[2];
- ptr[2] = ptr2[1];
- ptr[3] = ptr2[0];
-}
-
-/*
- * Procedure: BeceemEEPROMWritePage
- *
- * Description: Performs page write (16bytes) to the EEPROM
- *
- * Arguments:
- * Adapter - ptr to Adapter object instance
- * uiData - Data to be written.
- * uiOffset - Offset of the EEPROM where data needs to be written to.
- * Returns:
- * OSAL_STATUS_CODE
- *
- */
-
-static int BeceemEEPROMWritePage(struct bcm_mini_adapter *Adapter, unsigned int uiData[], unsigned int uiOffset)
-{
- unsigned int uiRetries = MAX_EEPROM_RETRIES * RETRIES_PER_DELAY;
- unsigned int uiStatus = 0;
- UCHAR uiEpromStatus = 0;
- unsigned int value = 0;
-
- /* Flush the Write/Read/Cmd queues. */
- value = (EEPROM_WRITE_QUEUE_FLUSH | EEPROM_CMD_QUEUE_FLUSH | EEPROM_READ_QUEUE_FLUSH);
- wrmalt(Adapter, SPI_FLUSH_REG, &value, sizeof(value));
- value = 0;
- wrmalt(Adapter, SPI_FLUSH_REG, &value, sizeof(value));
-
- /* Clear the Empty/Avail/Full bits. After this it has been confirmed
- * that the bit was cleared by reading back the register. See NOTE below.
- * We also clear the Read queues as we do a EEPROM status register read
- * later.
- */
- value = (EEPROM_WRITE_QUEUE_EMPTY | EEPROM_WRITE_QUEUE_AVAIL | EEPROM_WRITE_QUEUE_FULL | EEPROM_READ_DATA_AVAIL | EEPROM_READ_DATA_FULL);
- wrmalt(Adapter, EEPROM_SPI_Q_STATUS1_REG, &value, sizeof(value));
-
- /* Enable write */
- value = EEPROM_WRITE_ENABLE;
- wrmalt(Adapter, EEPROM_CMDQ_SPI_REG, &value, sizeof(value));
-
- /* We can write back to back 8bits * 16 into the queue and as we have
- * checked for the queue to be empty we can write in a burst.
- */
-
- value = uiData[0];
- BcmSwapWord(&value);
- wrm(Adapter, EEPROM_WRITE_DATAQ_REG, (PUCHAR)&value, 4);
-
- value = uiData[1];
- BcmSwapWord(&value);
- wrm(Adapter, EEPROM_WRITE_DATAQ_REG, (PUCHAR)&value, 4);
-
- value = uiData[2];
- BcmSwapWord(&value);
- wrm(Adapter, EEPROM_WRITE_DATAQ_REG, (PUCHAR)&value, 4);
-
- value = uiData[3];
- BcmSwapWord(&value);
- wrm(Adapter, EEPROM_WRITE_DATAQ_REG, (PUCHAR)&value, 4);
-
- /* NOTE : After this write, on readback of EEPROM_SPI_Q_STATUS1_REG
- * shows that we see 7 for the EEPROM data write. Which means that
- * queue got full, also space is available as well as the queue is empty.
- * This may happen in sequence.
- */
- value = EEPROM_16_BYTE_PAGE_WRITE | uiOffset;
- wrmalt(Adapter, EEPROM_CMDQ_SPI_REG, &value, sizeof(value));
-
- /* Ideally we should loop here without tries and eventually succeed.
- * What we are checking if the previous write has completed, and this
- * may take time. We should wait till the Empty bit is set.
- */
- uiStatus = 0;
- rdmalt(Adapter, EEPROM_SPI_Q_STATUS1_REG, &uiStatus, sizeof(uiStatus));
- while ((uiStatus & EEPROM_WRITE_QUEUE_EMPTY) == 0) {
- uiRetries--;
- if (uiRetries == 0) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "0x0f003004 = %x, %d retries failed.\n", uiStatus, MAX_EEPROM_RETRIES * RETRIES_PER_DELAY);
- return STATUS_FAILURE;
- }
-
- if (!(uiRetries%RETRIES_PER_DELAY))
- udelay(1000);
-
- uiStatus = 0;
- rdmalt(Adapter, EEPROM_SPI_Q_STATUS1_REG, &uiStatus, sizeof(uiStatus));
- if (Adapter->device_removed == TRUE) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Modem got removed hence exiting from loop....");
- return -ENODEV;
- }
- }
-
- if (uiRetries != 0) {
- /* Clear the ones that are set - either, Empty/Full/Avail bits */
- value = (uiStatus & (EEPROM_WRITE_QUEUE_EMPTY | EEPROM_WRITE_QUEUE_AVAIL | EEPROM_WRITE_QUEUE_FULL));
- wrmalt(Adapter, EEPROM_SPI_Q_STATUS1_REG, &value, sizeof(value));
- }
-
- /* Here we should check if the EEPROM status register is correct before
- * proceeding. Bit 0 in the EEPROM Status register should be 0 before
- * we proceed further. A 1 at Bit 0 indicates that the EEPROM is busy
- * with the previous write. Note also that issuing this read finally
- * means the previous write to the EEPROM has completed.
- */
- uiRetries = MAX_EEPROM_RETRIES * RETRIES_PER_DELAY;
- uiEpromStatus = 0;
- while (uiRetries != 0) {
- uiEpromStatus = ReadEEPROMStatusRegister(Adapter);
- if (Adapter->device_removed == TRUE) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Modem has got removed hence exiting from loop...");
- return -ENODEV;
- }
- if ((EEPROM_STATUS_REG_WRITE_BUSY & uiEpromStatus) == 0) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "EEPROM status register = %x tries = %d\n", uiEpromStatus, (MAX_EEPROM_RETRIES * RETRIES_PER_DELAY - uiRetries));
- return STATUS_SUCCESS;
- }
- uiRetries--;
- if (uiRetries == 0) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "0x0f003004 = %x, for EEPROM status read %d retries failed.\n", uiEpromStatus, MAX_EEPROM_RETRIES * RETRIES_PER_DELAY);
- return STATUS_FAILURE;
- }
- uiEpromStatus = 0;
- if (!(uiRetries%RETRIES_PER_DELAY))
- udelay(1000);
- }
-
- return STATUS_SUCCESS;
-} /* BeceemEEPROMWritePage */
-
-/*
- * Procedure: BeceemEEPROMBulkWrite
- *
- * Description: Performs write to the EEPROM
- *
- * Arguments:
- * Adapter - ptr to Adapter object instance
- * pBuffer - Data to be written.
- * uiOffset - Offset of the EEPROM where data needs to be written to.
- * uiNumBytes - Number of bytes to be written.
- * bVerify - read verify flag.
- * Returns:
- * OSAL_STATUS_CODE
- *
- */
-
-int BeceemEEPROMBulkWrite(struct bcm_mini_adapter *Adapter,
- PUCHAR pBuffer,
- unsigned int uiOffset,
- unsigned int uiNumBytes,
- bool bVerify)
-{
- unsigned int uiBytesToCopy = uiNumBytes;
- /* unsigned int uiRdbk = 0; */
- unsigned int uiData[4] = {0};
- unsigned int uiIndex = 0;
- unsigned int uiTempOffset = 0;
- unsigned int uiExtraBytes = 0;
- /* PUINT puiBuffer = (PUINT)pBuffer;
- * int value;
- */
-
- if (uiOffset % MAX_RW_SIZE && uiBytesToCopy) {
- uiTempOffset = uiOffset - (uiOffset % MAX_RW_SIZE);
- uiExtraBytes = uiOffset - uiTempOffset;
-
- BeceemEEPROMBulkRead(Adapter, &uiData[0], uiTempOffset, MAX_RW_SIZE);
-
- if (uiBytesToCopy >= (16 - uiExtraBytes)) {
- memcpy((((PUCHAR)&uiData[0]) + uiExtraBytes), pBuffer, MAX_RW_SIZE - uiExtraBytes);
-
- if (STATUS_FAILURE == BeceemEEPROMWritePage(Adapter, uiData, uiTempOffset))
- return STATUS_FAILURE;
-
- uiBytesToCopy -= (MAX_RW_SIZE - uiExtraBytes);
- uiIndex += (MAX_RW_SIZE - uiExtraBytes);
- uiOffset += (MAX_RW_SIZE - uiExtraBytes);
- } else {
- memcpy((((PUCHAR)&uiData[0]) + uiExtraBytes), pBuffer, uiBytesToCopy);
-
- if (STATUS_FAILURE == BeceemEEPROMWritePage(Adapter, uiData, uiTempOffset))
- return STATUS_FAILURE;
-
- uiIndex += uiBytesToCopy;
- uiOffset += uiBytesToCopy;
- uiBytesToCopy = 0;
- }
- }
-
- while (uiBytesToCopy) {
- if (Adapter->device_removed)
- return -1;
-
- if (uiBytesToCopy >= MAX_RW_SIZE) {
- if (STATUS_FAILURE == BeceemEEPROMWritePage(Adapter, (PUINT) &pBuffer[uiIndex], uiOffset))
- return STATUS_FAILURE;
-
- uiIndex += MAX_RW_SIZE;
- uiOffset += MAX_RW_SIZE;
- uiBytesToCopy -= MAX_RW_SIZE;
- } else {
- /*
- * To program non 16byte aligned data, read 16byte and then update.
- */
- BeceemEEPROMBulkRead(Adapter, &uiData[0], uiOffset, 16);
- memcpy(&uiData[0], pBuffer + uiIndex, uiBytesToCopy);
-
- if (STATUS_FAILURE == BeceemEEPROMWritePage(Adapter, uiData, uiOffset))
- return STATUS_FAILURE;
-
- uiBytesToCopy = 0;
- }
- }
-
- return 0;
-}
-
-/*
- * Procedure: BeceemNVMRead
- *
- * Description: Reads n number of bytes from NVM.
- *
- * Arguments:
- * Adapter - ptr to Adapter object instance
- * pBuffer - Buffer to store the data read from NVM
- * uiOffset - Offset of NVM from where data should be read
- * uiNumBytes - Number of bytes to be read from the NVM.
- *
- * Returns:
- * OSAL_STATUS_SUCCESS - if NVM read is successful.
- * <FAILURE> - if failed.
- */
-
-int BeceemNVMRead(struct bcm_mini_adapter *Adapter,
- PUINT pBuffer,
- unsigned int uiOffset,
- unsigned int uiNumBytes)
-{
- int Status = 0;
-
- #if !defined(BCM_SHM_INTERFACE) || defined(FLASH_DIRECT_ACCESS)
- unsigned int uiTemp = 0, value;
- #endif
-
- if (Adapter->eNVMType == NVM_FLASH) {
- if (Adapter->bFlashRawRead == false) {
- if (IsSectionExistInVendorInfo(Adapter, Adapter->eActiveDSD))
- return vendorextnReadSection(Adapter, (PUCHAR)pBuffer, Adapter->eActiveDSD, uiOffset, uiNumBytes);
-
- uiOffset = uiOffset + Adapter->ulFlashCalStart;
- }
-
- #if defined(BCM_SHM_INTERFACE) && !defined(FLASH_DIRECT_ACCESS)
- Status = bcmflash_raw_read((uiOffset / FLASH_PART_SIZE), (uiOffset % FLASH_PART_SIZE), (unsigned char *)pBuffer, uiNumBytes);
- #else
- rdmalt(Adapter, 0x0f000C80, &uiTemp, sizeof(uiTemp));
- value = 0;
- wrmalt(Adapter, 0x0f000C80, &value, sizeof(value));
- Status = BeceemFlashBulkRead(Adapter,
- pBuffer,
- uiOffset,
- uiNumBytes);
- wrmalt(Adapter, 0x0f000C80, &uiTemp, sizeof(uiTemp));
- #endif
- } else if (Adapter->eNVMType == NVM_EEPROM) {
- Status = BeceemEEPROMBulkRead(Adapter,
- pBuffer,
- uiOffset,
- uiNumBytes);
- } else {
- Status = -1;
- }
-
- return Status;
-}
-
-/*
- * Procedure: BeceemNVMWrite
- *
- * Description: Writes n number of bytes to NVM.
- *
- * Arguments:
- * Adapter - ptr to Adapter object instance
- * pBuffer - Buffer contains the data to be written.
- * uiOffset - Offset of NVM where data to be written to.
- * uiNumBytes - Number of bytes to be written..
- *
- * Returns:
- * OSAL_STATUS_SUCCESS - if NVM write is successful.
- * <FAILURE> - if failed.
- */
-
-int BeceemNVMWrite(struct bcm_mini_adapter *Adapter,
- PUINT pBuffer,
- unsigned int uiOffset,
- unsigned int uiNumBytes,
- bool bVerify)
-{
- int Status = 0;
- unsigned int uiTemp = 0;
- unsigned int uiMemoryLoc = EEPROM_CAL_DATA_INTERNAL_LOC;
- unsigned int uiIndex = 0;
-
- #if !defined(BCM_SHM_INTERFACE) || defined(FLASH_DIRECT_ACCESS)
- unsigned int value;
- #endif
-
- unsigned int uiFlashOffset = 0;
-
- if (Adapter->eNVMType == NVM_FLASH) {
- if (IsSectionExistInVendorInfo(Adapter, Adapter->eActiveDSD))
- Status = vendorextnWriteSection(Adapter, (PUCHAR)pBuffer, Adapter->eActiveDSD, uiOffset, uiNumBytes, bVerify);
- else {
- uiFlashOffset = uiOffset + Adapter->ulFlashCalStart;
-
- #if defined(BCM_SHM_INTERFACE) && !defined(FLASH_DIRECT_ACCESS)
- Status = bcmflash_raw_write((uiFlashOffset / FLASH_PART_SIZE), (uiFlashOffset % FLASH_PART_SIZE), (unsigned char *)pBuffer, uiNumBytes);
- #else
- rdmalt(Adapter, 0x0f000C80, &uiTemp, sizeof(uiTemp));
- value = 0;
- wrmalt(Adapter, 0x0f000C80, &value, sizeof(value));
-
- if (Adapter->bStatusWrite == TRUE)
- Status = BeceemFlashBulkWriteStatus(Adapter,
- pBuffer,
- uiFlashOffset,
- uiNumBytes ,
- bVerify);
- else
-
- Status = BeceemFlashBulkWrite(Adapter,
- pBuffer,
- uiFlashOffset,
- uiNumBytes,
- bVerify);
- #endif
- }
-
- if (uiOffset >= EEPROM_CALPARAM_START) {
- uiMemoryLoc += (uiOffset - EEPROM_CALPARAM_START);
- while (uiNumBytes) {
- if (uiNumBytes > BUFFER_4K) {
- wrm(Adapter, (uiMemoryLoc+uiIndex), (PCHAR)(pBuffer + (uiIndex / 4)), BUFFER_4K);
- uiNumBytes -= BUFFER_4K;
- uiIndex += BUFFER_4K;
- } else {
- wrm(Adapter, uiMemoryLoc+uiIndex, (PCHAR)(pBuffer + (uiIndex / 4)), uiNumBytes);
- uiNumBytes = 0;
- break;
- }
- }
- } else {
- if ((uiOffset + uiNumBytes) > EEPROM_CALPARAM_START) {
- ULONG ulBytesTobeSkipped = 0;
- PUCHAR pcBuffer = (PUCHAR)pBuffer; /* char pointer to take care of odd byte cases. */
-
- uiNumBytes -= (EEPROM_CALPARAM_START - uiOffset);
- ulBytesTobeSkipped += (EEPROM_CALPARAM_START - uiOffset);
- uiOffset += (EEPROM_CALPARAM_START - uiOffset);
- while (uiNumBytes) {
- if (uiNumBytes > BUFFER_4K) {
- wrm(Adapter, uiMemoryLoc + uiIndex, (PCHAR)&pcBuffer[ulBytesTobeSkipped + uiIndex], BUFFER_4K);
- uiNumBytes -= BUFFER_4K;
- uiIndex += BUFFER_4K;
- } else {
- wrm(Adapter, uiMemoryLoc + uiIndex, (PCHAR)&pcBuffer[ulBytesTobeSkipped + uiIndex], uiNumBytes);
- uiNumBytes = 0;
- break;
- }
- }
- }
- }
- /* restore the values. */
- wrmalt(Adapter, 0x0f000C80, &uiTemp, sizeof(uiTemp));
- } else if (Adapter->eNVMType == NVM_EEPROM) {
- Status = BeceemEEPROMBulkWrite(Adapter,
- (PUCHAR)pBuffer,
- uiOffset,
- uiNumBytes,
- bVerify);
- if (bVerify)
- Status = BeceemEEPROMReadBackandVerify(Adapter, (PUINT)pBuffer, uiOffset, uiNumBytes);
- } else {
- Status = -1;
- }
- return Status;
-}
-
-/*
- * Procedure: BcmUpdateSectorSize
- *
- * Description: Updates the sector size to FLASH.
- *
- * Arguments:
- * Adapter - ptr to Adapter object instance
- * uiSectorSize - sector size
- *
- * Returns:
- * OSAL_STATUS_SUCCESS - if NVM write is successful.
- * <FAILURE> - if failed.
- */
-
-int BcmUpdateSectorSize(struct bcm_mini_adapter *Adapter, unsigned int uiSectorSize)
-{
- int Status = -1;
- struct bcm_flash_cs_info sFlashCsInfo = {0};
- unsigned int uiTemp = 0;
- unsigned int uiSectorSig = 0;
- unsigned int uiCurrentSectorSize = 0;
- unsigned int value;
-
- rdmalt(Adapter, 0x0f000C80, &uiTemp, sizeof(uiTemp));
- value = 0;
- wrmalt(Adapter, 0x0f000C80, &value, sizeof(value));
-
- /*
- * Before updating the sector size in the reserved area, check if already present.
- */
- BeceemFlashBulkRead(Adapter, (PUINT)&sFlashCsInfo, Adapter->ulFlashControlSectionStart, sizeof(sFlashCsInfo));
- uiSectorSig = ntohl(sFlashCsInfo.FlashSectorSizeSig);
- uiCurrentSectorSize = ntohl(sFlashCsInfo.FlashSectorSize);
-
- if (uiSectorSig == FLASH_SECTOR_SIZE_SIG) {
- if ((uiCurrentSectorSize <= MAX_SECTOR_SIZE) && (uiCurrentSectorSize >= MIN_SECTOR_SIZE)) {
- if (uiSectorSize == uiCurrentSectorSize) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Provided sector size is same as programmed in Flash");
- Status = STATUS_SUCCESS;
- goto Restore;
- }
- }
- }
-
- if ((uiSectorSize <= MAX_SECTOR_SIZE) && (uiSectorSize >= MIN_SECTOR_SIZE)) {
- sFlashCsInfo.FlashSectorSize = htonl(uiSectorSize);
- sFlashCsInfo.FlashSectorSizeSig = htonl(FLASH_SECTOR_SIZE_SIG);
-
- Status = BeceemFlashBulkWrite(Adapter,
- (PUINT)&sFlashCsInfo,
- Adapter->ulFlashControlSectionStart,
- sizeof(sFlashCsInfo),
- TRUE);
- }
-
-Restore:
- /* restore the values. */
- wrmalt(Adapter, 0x0f000C80, &uiTemp, sizeof(uiTemp));
-
- return Status;
-}
-
-/*
- * Procedure: BcmGetFlashSectorSize
- *
- * Description: Finds the sector size of the FLASH.
- *
- * Arguments:
- * Adapter - ptr to Adapter object instance
- *
- * Returns:
- * unsigned int - sector size.
- *
- */
-
-static unsigned int BcmGetFlashSectorSize(struct bcm_mini_adapter *Adapter, unsigned int FlashSectorSizeSig, unsigned int FlashSectorSize)
-{
- unsigned int uiSectorSize = 0;
- unsigned int uiSectorSig = 0;
-
- if (Adapter->bSectorSizeOverride &&
- (Adapter->uiSectorSizeInCFG <= MAX_SECTOR_SIZE &&
- Adapter->uiSectorSizeInCFG >= MIN_SECTOR_SIZE)) {
- Adapter->uiSectorSize = Adapter->uiSectorSizeInCFG;
- } else {
- uiSectorSig = FlashSectorSizeSig;
-
- if (uiSectorSig == FLASH_SECTOR_SIZE_SIG) {
- uiSectorSize = FlashSectorSize;
- /*
- * If the sector size stored in the FLASH makes sense then use it.
- */
- if (uiSectorSize <= MAX_SECTOR_SIZE && uiSectorSize >= MIN_SECTOR_SIZE) {
- Adapter->uiSectorSize = uiSectorSize;
- } else if (Adapter->uiSectorSizeInCFG <= MAX_SECTOR_SIZE &&
- Adapter->uiSectorSizeInCFG >= MIN_SECTOR_SIZE) {
- /* No valid size in FLASH, check if Config file has it. */
- Adapter->uiSectorSize = Adapter->uiSectorSizeInCFG;
- } else {
- /* Init to Default, if none of the above works. */
- Adapter->uiSectorSize = DEFAULT_SECTOR_SIZE;
- }
- } else {
- if (Adapter->uiSectorSizeInCFG <= MAX_SECTOR_SIZE &&
- Adapter->uiSectorSizeInCFG >= MIN_SECTOR_SIZE)
- Adapter->uiSectorSize = Adapter->uiSectorSizeInCFG;
- else
- Adapter->uiSectorSize = DEFAULT_SECTOR_SIZE;
- }
- }
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Sector size :%x\n", Adapter->uiSectorSize);
-
- return Adapter->uiSectorSize;
-}
-
-/*
- * Procedure: BcmInitEEPROMQueues
- *
- * Description: Initialization of EEPROM queues.
- *
- * Arguments:
- * Adapter - ptr to Adapter object instance
- *
- * Returns:
- * <OSAL_STATUS_CODE>
- */
-
-static int BcmInitEEPROMQueues(struct bcm_mini_adapter *Adapter)
-{
- unsigned int value = 0;
- /* CHIP Bug : Clear the Avail bits on the Read queue. The default
- * value on this register is supposed to be 0x00001102.
- * But we get 0x00001122.
- */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Fixing reset value on 0x0f003004 register\n");
- value = EEPROM_READ_DATA_AVAIL;
- wrmalt(Adapter, EEPROM_SPI_Q_STATUS1_REG, &value, sizeof(value));
-
- /* Flush the all the EEPROM queues. */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, " Flushing the queues\n");
- value = EEPROM_ALL_QUEUE_FLUSH;
- wrmalt(Adapter, SPI_FLUSH_REG, &value, sizeof(value));
-
- value = 0;
- wrmalt(Adapter, SPI_FLUSH_REG, &value, sizeof(value));
-
- /* Read the EEPROM Status Register. Just to see, no real purpose. */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "EEPROM Status register value = %x\n", ReadEEPROMStatusRegister(Adapter));
-
- return STATUS_SUCCESS;
-} /* BcmInitEEPROMQueues() */
-
-/*
- * Procedure: BcmInitNVM
- *
- * Description: Initialization of NVM, EEPROM size,FLASH size, sector size etc.
- *
- * Arguments:
- * Adapter - ptr to Adapter object instance
- *
- * Returns:
- * <OSAL_STATUS_CODE>
- */
-
-int BcmInitNVM(struct bcm_mini_adapter *ps_adapter)
-{
- BcmValidateNvmType(ps_adapter);
- BcmInitEEPROMQueues(ps_adapter);
-
- if (ps_adapter->eNVMType == NVM_AUTODETECT) {
- ps_adapter->eNVMType = BcmGetNvmType(ps_adapter);
- if (ps_adapter->eNVMType == NVM_UNKNOWN)
- BCM_DEBUG_PRINT(ps_adapter, DBG_TYPE_PRINTK, 0, 0, "NVM Type is unknown!!\n");
- } else if (ps_adapter->eNVMType == NVM_FLASH) {
- BcmGetFlashCSInfo(ps_adapter);
- }
-
- BcmGetNvmSize(ps_adapter);
-
- return STATUS_SUCCESS;
-}
-
-/* BcmGetNvmSize : set the EEPROM or flash size in Adapter.
- *
- * Input Parameter:
- * Adapter data structure
- * Return Value :
- * 0. means success;
- */
-
-static int BcmGetNvmSize(struct bcm_mini_adapter *Adapter)
-{
- if (Adapter->eNVMType == NVM_EEPROM)
- Adapter->uiNVMDSDSize = BcmGetEEPROMSize(Adapter);
- else if (Adapter->eNVMType == NVM_FLASH)
- Adapter->uiNVMDSDSize = BcmGetFlashSize(Adapter);
-
- return 0;
-}
-
-/*
- * Procedure: BcmValidateNvm
- *
- * Description: Validates the NVM Type option selected against the device
- *
- * Arguments:
- * Adapter - ptr to Adapter object instance
- *
- * Returns:
- * <VOID>
- */
-
-static VOID BcmValidateNvmType(struct bcm_mini_adapter *Adapter)
-{
- /*
- * if forcing the FLASH through CFG file, we should ensure device really has a FLASH.
- * Accessing the FLASH address without the FLASH being present can cause hang/freeze etc.
- * So if NVM_FLASH is selected for older chipsets, change it to AUTODETECT where EEPROM is 1st choice.
- */
-
- if (Adapter->eNVMType == NVM_FLASH &&
- Adapter->chip_id < 0xBECE3300)
- Adapter->eNVMType = NVM_AUTODETECT;
-}
-
-/*
- * Procedure: BcmReadFlashRDID
- *
- * Description: Reads ID from Serial Flash
- *
- * Arguments:
- * Adapter - ptr to Adapter object instance
- *
- * Returns:
- * Flash ID
- */
-
-static ULONG BcmReadFlashRDID(struct bcm_mini_adapter *Adapter)
-{
- ULONG ulRDID = 0;
- unsigned int value;
-
- /*
- * Read ID Instruction.
- */
- value = (FLASH_CMD_READ_ID << 24);
- wrmalt(Adapter, FLASH_SPI_CMDQ_REG, &value, sizeof(value));
-
- /* Delay */
- udelay(10);
-
- /*
- * Read SPI READQ REG. The output will be WWXXYYZZ.
- * The ID is 3Bytes long and is WWXXYY. ZZ needs to be Ignored.
- */
- rdmalt(Adapter, FLASH_SPI_READQ_REG, (PUINT)&ulRDID, sizeof(ulRDID));
-
- return ulRDID >> 8;
-}
-
-int BcmAllocFlashCSStructure(struct bcm_mini_adapter *psAdapter)
-{
- if (!psAdapter) {
- BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_PRINTK, 0, 0, "Adapter structure point is NULL");
- return -EINVAL;
- }
- psAdapter->psFlashCSInfo = kzalloc(sizeof(struct bcm_flash_cs_info), GFP_KERNEL);
- if (psAdapter->psFlashCSInfo == NULL) {
- BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_PRINTK, 0, 0, "Can't Allocate memory for Flash 1.x");
- return -ENOMEM;
- }
-
- psAdapter->psFlash2xCSInfo = kzalloc(sizeof(struct bcm_flash2x_cs_info), GFP_KERNEL);
- if (!psAdapter->psFlash2xCSInfo) {
- BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_PRINTK, 0, 0, "Can't Allocate memory for Flash 2.x");
- kfree(psAdapter->psFlashCSInfo);
- return -ENOMEM;
- }
-
- psAdapter->psFlash2xVendorInfo = kzalloc(sizeof(struct bcm_flash2x_vendor_info), GFP_KERNEL);
- if (!psAdapter->psFlash2xVendorInfo) {
- BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_PRINTK, 0, 0, "Can't Allocate Vendor Info Memory for Flash 2.x");
- kfree(psAdapter->psFlashCSInfo);
- kfree(psAdapter->psFlash2xCSInfo);
- return -ENOMEM;
- }
-
- return STATUS_SUCCESS;
-}
-
-int BcmDeAllocFlashCSStructure(struct bcm_mini_adapter *psAdapter)
-{
- if (!psAdapter) {
- BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_PRINTK, 0, 0, "Adapter structure point is NULL");
- return -EINVAL;
- }
- kfree(psAdapter->psFlashCSInfo);
- kfree(psAdapter->psFlash2xCSInfo);
- kfree(psAdapter->psFlash2xVendorInfo);
- return STATUS_SUCCESS;
-}
-
-static int BcmDumpFlash2XCSStructure(struct bcm_flash2x_cs_info *psFlash2xCSInfo, struct bcm_mini_adapter *Adapter)
-{
- unsigned int Index = 0;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "**********************FLASH2X CS Structure *******************");
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Signature is :%x", (psFlash2xCSInfo->MagicNumber));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Flash Major Version :%d", MAJOR_VERSION(psFlash2xCSInfo->FlashLayoutVersion));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Flash Minor Version :%d", MINOR_VERSION(psFlash2xCSInfo->FlashLayoutVersion));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, " ISOImageMajorVersion:0x%x", (psFlash2xCSInfo->ISOImageVersion));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "SCSIFirmwareMajorVersion :0x%x", (psFlash2xCSInfo->SCSIFirmwareVersion));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetFromZeroForPart1ISOImage :0x%x", (psFlash2xCSInfo->OffsetFromZeroForPart1ISOImage));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetFromZeroForScsiFirmware :0x%x", (psFlash2xCSInfo->OffsetFromZeroForScsiFirmware));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "SizeOfScsiFirmware :0x%x", (psFlash2xCSInfo->SizeOfScsiFirmware));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetFromZeroForPart2ISOImage :0x%x", (psFlash2xCSInfo->OffsetFromZeroForPart2ISOImage));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetFromZeroForDSDStart :0x%x", (psFlash2xCSInfo->OffsetFromZeroForDSDStart));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetFromZeroForDSDEnd :0x%x", (psFlash2xCSInfo->OffsetFromZeroForDSDEnd));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetFromZeroForVSAStart :0x%x", (psFlash2xCSInfo->OffsetFromZeroForVSAStart));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetFromZeroForVSAEnd :0x%x", (psFlash2xCSInfo->OffsetFromZeroForVSAEnd));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetFromZeroForControlSectionStart :0x%x", (psFlash2xCSInfo->OffsetFromZeroForControlSectionStart));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetFromZeroForControlSectionData :0x%x", (psFlash2xCSInfo->OffsetFromZeroForControlSectionData));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "CDLessInactivityTimeout :0x%x", (psFlash2xCSInfo->CDLessInactivityTimeout));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "NewImageSignature :0x%x", (psFlash2xCSInfo->NewImageSignature));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "FlashSectorSizeSig :0x%x", (psFlash2xCSInfo->FlashSectorSizeSig));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "FlashSectorSize :0x%x", (psFlash2xCSInfo->FlashSectorSize));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "FlashWriteSupportSize :0x%x", (psFlash2xCSInfo->FlashWriteSupportSize));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "TotalFlashSize :0x%X", (psFlash2xCSInfo->TotalFlashSize));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "FlashBaseAddr :0x%x", (psFlash2xCSInfo->FlashBaseAddr));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "FlashPartMaxSize :0x%x", (psFlash2xCSInfo->FlashPartMaxSize));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "IsCDLessDeviceBootSig :0x%x", (psFlash2xCSInfo->IsCDLessDeviceBootSig));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "MassStorageTimeout :0x%x", (psFlash2xCSInfo->MassStorageTimeout));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetISOImage1Part1Start :0x%x", (psFlash2xCSInfo->OffsetISOImage1Part1Start));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetISOImage1Part1End :0x%x", (psFlash2xCSInfo->OffsetISOImage1Part1End));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetISOImage1Part2Start :0x%x", (psFlash2xCSInfo->OffsetISOImage1Part2Start));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetISOImage1Part2End :0x%x", (psFlash2xCSInfo->OffsetISOImage1Part2End));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetISOImage1Part3Start :0x%x", (psFlash2xCSInfo->OffsetISOImage1Part3Start));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetISOImage1Part3End :0x%x", (psFlash2xCSInfo->OffsetISOImage1Part3End));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetISOImage2Part1Start :0x%x", (psFlash2xCSInfo->OffsetISOImage2Part1Start));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetISOImage2Part1End :0x%x", (psFlash2xCSInfo->OffsetISOImage2Part1End));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetISOImage2Part2Start :0x%x", (psFlash2xCSInfo->OffsetISOImage2Part2Start));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetISOImage2Part2End :0x%x", (psFlash2xCSInfo->OffsetISOImage2Part2End));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetISOImage2Part3Start :0x%x", (psFlash2xCSInfo->OffsetISOImage2Part3Start));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetISOImage2Part3End :0x%x", (psFlash2xCSInfo->OffsetISOImage2Part3End));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetFromDSDStartForDSDHeader :0x%x", (psFlash2xCSInfo->OffsetFromDSDStartForDSDHeader));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetFromZeroForDSD1Start :0x%x", (psFlash2xCSInfo->OffsetFromZeroForDSD1Start));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetFromZeroForDSD1End :0x%x", (psFlash2xCSInfo->OffsetFromZeroForDSD1End));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetFromZeroForDSD2Start :0x%x", (psFlash2xCSInfo->OffsetFromZeroForDSD2Start));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetFromZeroForDSD2End :0x%x", (psFlash2xCSInfo->OffsetFromZeroForDSD2End));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetFromZeroForVSA1Start :0x%x", (psFlash2xCSInfo->OffsetFromZeroForVSA1Start));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetFromZeroForVSA1End :0x%x", (psFlash2xCSInfo->OffsetFromZeroForVSA1End));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetFromZeroForVSA2Start :0x%x", (psFlash2xCSInfo->OffsetFromZeroForVSA2Start));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetFromZeroForVSA2End :0x%x", (psFlash2xCSInfo->OffsetFromZeroForVSA2End));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Sector Access Bit Map is Defined as :");
-
- for (Index = 0; Index < (FLASH2X_TOTAL_SIZE / (DEFAULT_SECTOR_SIZE * 16)); Index++)
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "SectorAccessBitMap[%d] :0x%x", Index,
- (psFlash2xCSInfo->SectorAccessBitMap[Index]));
-
- return STATUS_SUCCESS;
-}
-
-static int ConvertEndianOf2XCSStructure(struct bcm_flash2x_cs_info *psFlash2xCSInfo)
-{
- unsigned int Index = 0;
-
- psFlash2xCSInfo->MagicNumber = ntohl(psFlash2xCSInfo->MagicNumber);
- psFlash2xCSInfo->FlashLayoutVersion = ntohl(psFlash2xCSInfo->FlashLayoutVersion);
- /* psFlash2xCSInfo->FlashLayoutMinorVersion = ntohs(psFlash2xCSInfo->FlashLayoutMinorVersion); */
- psFlash2xCSInfo->ISOImageVersion = ntohl(psFlash2xCSInfo->ISOImageVersion);
- psFlash2xCSInfo->SCSIFirmwareVersion = ntohl(psFlash2xCSInfo->SCSIFirmwareVersion);
- psFlash2xCSInfo->OffsetFromZeroForPart1ISOImage = ntohl(psFlash2xCSInfo->OffsetFromZeroForPart1ISOImage);
- psFlash2xCSInfo->OffsetFromZeroForScsiFirmware = ntohl(psFlash2xCSInfo->OffsetFromZeroForScsiFirmware);
- psFlash2xCSInfo->SizeOfScsiFirmware = ntohl(psFlash2xCSInfo->SizeOfScsiFirmware);
- psFlash2xCSInfo->OffsetFromZeroForPart2ISOImage = ntohl(psFlash2xCSInfo->OffsetFromZeroForPart2ISOImage);
- psFlash2xCSInfo->OffsetFromZeroForDSDStart = ntohl(psFlash2xCSInfo->OffsetFromZeroForDSDStart);
- psFlash2xCSInfo->OffsetFromZeroForDSDEnd = ntohl(psFlash2xCSInfo->OffsetFromZeroForDSDEnd);
- psFlash2xCSInfo->OffsetFromZeroForVSAStart = ntohl(psFlash2xCSInfo->OffsetFromZeroForVSAStart);
- psFlash2xCSInfo->OffsetFromZeroForVSAEnd = ntohl(psFlash2xCSInfo->OffsetFromZeroForVSAEnd);
- psFlash2xCSInfo->OffsetFromZeroForControlSectionStart = ntohl(psFlash2xCSInfo->OffsetFromZeroForControlSectionStart);
- psFlash2xCSInfo->OffsetFromZeroForControlSectionData = ntohl(psFlash2xCSInfo->OffsetFromZeroForControlSectionData);
- psFlash2xCSInfo->CDLessInactivityTimeout = ntohl(psFlash2xCSInfo->CDLessInactivityTimeout);
- psFlash2xCSInfo->NewImageSignature = ntohl(psFlash2xCSInfo->NewImageSignature);
- psFlash2xCSInfo->FlashSectorSizeSig = ntohl(psFlash2xCSInfo->FlashSectorSizeSig);
- psFlash2xCSInfo->FlashSectorSize = ntohl(psFlash2xCSInfo->FlashSectorSize);
- psFlash2xCSInfo->FlashWriteSupportSize = ntohl(psFlash2xCSInfo->FlashWriteSupportSize);
- psFlash2xCSInfo->TotalFlashSize = ntohl(psFlash2xCSInfo->TotalFlashSize);
- psFlash2xCSInfo->FlashBaseAddr = ntohl(psFlash2xCSInfo->FlashBaseAddr);
- psFlash2xCSInfo->FlashPartMaxSize = ntohl(psFlash2xCSInfo->FlashPartMaxSize);
- psFlash2xCSInfo->IsCDLessDeviceBootSig = ntohl(psFlash2xCSInfo->IsCDLessDeviceBootSig);
- psFlash2xCSInfo->MassStorageTimeout = ntohl(psFlash2xCSInfo->MassStorageTimeout);
- psFlash2xCSInfo->OffsetISOImage1Part1Start = ntohl(psFlash2xCSInfo->OffsetISOImage1Part1Start);
- psFlash2xCSInfo->OffsetISOImage1Part1End = ntohl(psFlash2xCSInfo->OffsetISOImage1Part1End);
- psFlash2xCSInfo->OffsetISOImage1Part2Start = ntohl(psFlash2xCSInfo->OffsetISOImage1Part2Start);
- psFlash2xCSInfo->OffsetISOImage1Part2End = ntohl(psFlash2xCSInfo->OffsetISOImage1Part2End);
- psFlash2xCSInfo->OffsetISOImage1Part3Start = ntohl(psFlash2xCSInfo->OffsetISOImage1Part3Start);
- psFlash2xCSInfo->OffsetISOImage1Part3End = ntohl(psFlash2xCSInfo->OffsetISOImage1Part3End);
- psFlash2xCSInfo->OffsetISOImage2Part1Start = ntohl(psFlash2xCSInfo->OffsetISOImage2Part1Start);
- psFlash2xCSInfo->OffsetISOImage2Part1End = ntohl(psFlash2xCSInfo->OffsetISOImage2Part1End);
- psFlash2xCSInfo->OffsetISOImage2Part2Start = ntohl(psFlash2xCSInfo->OffsetISOImage2Part2Start);
- psFlash2xCSInfo->OffsetISOImage2Part2End = ntohl(psFlash2xCSInfo->OffsetISOImage2Part2End);
- psFlash2xCSInfo->OffsetISOImage2Part3Start = ntohl(psFlash2xCSInfo->OffsetISOImage2Part3Start);
- psFlash2xCSInfo->OffsetISOImage2Part3End = ntohl(psFlash2xCSInfo->OffsetISOImage2Part3End);
- psFlash2xCSInfo->OffsetFromDSDStartForDSDHeader = ntohl(psFlash2xCSInfo->OffsetFromDSDStartForDSDHeader);
- psFlash2xCSInfo->OffsetFromZeroForDSD1Start = ntohl(psFlash2xCSInfo->OffsetFromZeroForDSD1Start);
- psFlash2xCSInfo->OffsetFromZeroForDSD1End = ntohl(psFlash2xCSInfo->OffsetFromZeroForDSD1End);
- psFlash2xCSInfo->OffsetFromZeroForDSD2Start = ntohl(psFlash2xCSInfo->OffsetFromZeroForDSD2Start);
- psFlash2xCSInfo->OffsetFromZeroForDSD2End = ntohl(psFlash2xCSInfo->OffsetFromZeroForDSD2End);
- psFlash2xCSInfo->OffsetFromZeroForVSA1Start = ntohl(psFlash2xCSInfo->OffsetFromZeroForVSA1Start);
- psFlash2xCSInfo->OffsetFromZeroForVSA1End = ntohl(psFlash2xCSInfo->OffsetFromZeroForVSA1End);
- psFlash2xCSInfo->OffsetFromZeroForVSA2Start = ntohl(psFlash2xCSInfo->OffsetFromZeroForVSA2Start);
- psFlash2xCSInfo->OffsetFromZeroForVSA2End = ntohl(psFlash2xCSInfo->OffsetFromZeroForVSA2End);
-
- for (Index = 0; Index < (FLASH2X_TOTAL_SIZE / (DEFAULT_SECTOR_SIZE * 16)); Index++)
- psFlash2xCSInfo->SectorAccessBitMap[Index] = ntohl(psFlash2xCSInfo->SectorAccessBitMap[Index]);
-
- return STATUS_SUCCESS;
-}
-
-static int ConvertEndianOfCSStructure(struct bcm_flash_cs_info *psFlashCSInfo)
-{
- /* unsigned int Index = 0; */
- psFlashCSInfo->MagicNumber = ntohl(psFlashCSInfo->MagicNumber);
- psFlashCSInfo->FlashLayoutVersion = ntohl(psFlashCSInfo->FlashLayoutVersion);
- psFlashCSInfo->ISOImageVersion = ntohl(psFlashCSInfo->ISOImageVersion);
- /* won't convert according to old assumption */
- psFlashCSInfo->SCSIFirmwareVersion = (psFlashCSInfo->SCSIFirmwareVersion);
- psFlashCSInfo->OffsetFromZeroForPart1ISOImage = ntohl(psFlashCSInfo->OffsetFromZeroForPart1ISOImage);
- psFlashCSInfo->OffsetFromZeroForScsiFirmware = ntohl(psFlashCSInfo->OffsetFromZeroForScsiFirmware);
- psFlashCSInfo->SizeOfScsiFirmware = ntohl(psFlashCSInfo->SizeOfScsiFirmware);
- psFlashCSInfo->OffsetFromZeroForPart2ISOImage = ntohl(psFlashCSInfo->OffsetFromZeroForPart2ISOImage);
- psFlashCSInfo->OffsetFromZeroForCalibrationStart = ntohl(psFlashCSInfo->OffsetFromZeroForCalibrationStart);
- psFlashCSInfo->OffsetFromZeroForCalibrationEnd = ntohl(psFlashCSInfo->OffsetFromZeroForCalibrationEnd);
- psFlashCSInfo->OffsetFromZeroForVSAStart = ntohl(psFlashCSInfo->OffsetFromZeroForVSAStart);
- psFlashCSInfo->OffsetFromZeroForVSAEnd = ntohl(psFlashCSInfo->OffsetFromZeroForVSAEnd);
- psFlashCSInfo->OffsetFromZeroForControlSectionStart = ntohl(psFlashCSInfo->OffsetFromZeroForControlSectionStart);
- psFlashCSInfo->OffsetFromZeroForControlSectionData = ntohl(psFlashCSInfo->OffsetFromZeroForControlSectionData);
- psFlashCSInfo->CDLessInactivityTimeout = ntohl(psFlashCSInfo->CDLessInactivityTimeout);
- psFlashCSInfo->NewImageSignature = ntohl(psFlashCSInfo->NewImageSignature);
- psFlashCSInfo->FlashSectorSizeSig = ntohl(psFlashCSInfo->FlashSectorSizeSig);
- psFlashCSInfo->FlashSectorSize = ntohl(psFlashCSInfo->FlashSectorSize);
- psFlashCSInfo->FlashWriteSupportSize = ntohl(psFlashCSInfo->FlashWriteSupportSize);
- psFlashCSInfo->TotalFlashSize = ntohl(psFlashCSInfo->TotalFlashSize);
- psFlashCSInfo->FlashBaseAddr = ntohl(psFlashCSInfo->FlashBaseAddr);
- psFlashCSInfo->FlashPartMaxSize = ntohl(psFlashCSInfo->FlashPartMaxSize);
- psFlashCSInfo->IsCDLessDeviceBootSig = ntohl(psFlashCSInfo->IsCDLessDeviceBootSig);
- psFlashCSInfo->MassStorageTimeout = ntohl(psFlashCSInfo->MassStorageTimeout);
-
- return STATUS_SUCCESS;
-}
-
-static int IsSectionExistInVendorInfo(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_val section)
-{
- return (Adapter->uiVendorExtnFlag &&
- (Adapter->psFlash2xVendorInfo->VendorSection[section].AccessFlags & FLASH2X_SECTION_PRESENT) &&
- (Adapter->psFlash2xVendorInfo->VendorSection[section].OffsetFromZeroForSectionStart != UNINIT_PTR_IN_CS));
-}
-
-static VOID UpdateVendorInfo(struct bcm_mini_adapter *Adapter)
-{
- B_UINT32 i = 0;
- unsigned int uiSizeSection = 0;
-
- Adapter->uiVendorExtnFlag = false;
-
- for (i = 0; i < TOTAL_SECTIONS; i++)
- Adapter->psFlash2xVendorInfo->VendorSection[i].OffsetFromZeroForSectionStart = UNINIT_PTR_IN_CS;
-
- if (STATUS_SUCCESS != vendorextnGetSectionInfo(Adapter, Adapter->psFlash2xVendorInfo))
- return;
-
- i = 0;
- while (i < TOTAL_SECTIONS) {
- if (!(Adapter->psFlash2xVendorInfo->VendorSection[i].AccessFlags & FLASH2X_SECTION_PRESENT)) {
- i++;
- continue;
- }
-
- Adapter->uiVendorExtnFlag = TRUE;
- uiSizeSection = (Adapter->psFlash2xVendorInfo->VendorSection[i].OffsetFromZeroForSectionEnd -
- Adapter->psFlash2xVendorInfo->VendorSection[i].OffsetFromZeroForSectionStart);
-
- switch (i) {
- case DSD0:
- if ((uiSizeSection >= (Adapter->psFlash2xCSInfo->OffsetFromDSDStartForDSDHeader + sizeof(struct bcm_dsd_header))) &&
- (UNINIT_PTR_IN_CS != Adapter->psFlash2xVendorInfo->VendorSection[i].OffsetFromZeroForSectionStart))
- Adapter->psFlash2xCSInfo->OffsetFromZeroForDSDStart = Adapter->psFlash2xCSInfo->OffsetFromZeroForDSDEnd = VENDOR_PTR_IN_CS;
- else
- Adapter->psFlash2xCSInfo->OffsetFromZeroForDSDStart = Adapter->psFlash2xCSInfo->OffsetFromZeroForDSDEnd = UNINIT_PTR_IN_CS;
- break;
-
- case DSD1:
- if ((uiSizeSection >= (Adapter->psFlash2xCSInfo->OffsetFromDSDStartForDSDHeader + sizeof(struct bcm_dsd_header))) &&
- (UNINIT_PTR_IN_CS != Adapter->psFlash2xVendorInfo->VendorSection[i].OffsetFromZeroForSectionStart))
- Adapter->psFlash2xCSInfo->OffsetFromZeroForDSD1Start = Adapter->psFlash2xCSInfo->OffsetFromZeroForDSD1End = VENDOR_PTR_IN_CS;
- else
- Adapter->psFlash2xCSInfo->OffsetFromZeroForDSD1Start = Adapter->psFlash2xCSInfo->OffsetFromZeroForDSD1End = UNINIT_PTR_IN_CS;
- break;
-
- case DSD2:
- if ((uiSizeSection >= (Adapter->psFlash2xCSInfo->OffsetFromDSDStartForDSDHeader + sizeof(struct bcm_dsd_header))) &&
- (UNINIT_PTR_IN_CS != Adapter->psFlash2xVendorInfo->VendorSection[i].OffsetFromZeroForSectionStart))
- Adapter->psFlash2xCSInfo->OffsetFromZeroForDSD2Start = Adapter->psFlash2xCSInfo->OffsetFromZeroForDSD2End = VENDOR_PTR_IN_CS;
- else
- Adapter->psFlash2xCSInfo->OffsetFromZeroForDSD2Start = Adapter->psFlash2xCSInfo->OffsetFromZeroForDSD2End = UNINIT_PTR_IN_CS;
- break;
- case VSA0:
- if (UNINIT_PTR_IN_CS != Adapter->psFlash2xVendorInfo->VendorSection[i].OffsetFromZeroForSectionStart)
- Adapter->psFlash2xCSInfo->OffsetFromZeroForVSAStart = Adapter->psFlash2xCSInfo->OffsetFromZeroForVSAEnd = VENDOR_PTR_IN_CS;
- else
- Adapter->psFlash2xCSInfo->OffsetFromZeroForVSAStart = Adapter->psFlash2xCSInfo->OffsetFromZeroForVSAEnd = UNINIT_PTR_IN_CS;
- break;
-
- case VSA1:
- if (UNINIT_PTR_IN_CS != Adapter->psFlash2xVendorInfo->VendorSection[i].OffsetFromZeroForSectionStart)
- Adapter->psFlash2xCSInfo->OffsetFromZeroForVSA1Start = Adapter->psFlash2xCSInfo->OffsetFromZeroForVSA1End = VENDOR_PTR_IN_CS;
- else
- Adapter->psFlash2xCSInfo->OffsetFromZeroForVSA1Start = Adapter->psFlash2xCSInfo->OffsetFromZeroForVSA1End = UNINIT_PTR_IN_CS;
- break;
- case VSA2:
- if (UNINIT_PTR_IN_CS != Adapter->psFlash2xVendorInfo->VendorSection[i].OffsetFromZeroForSectionStart)
- Adapter->psFlash2xCSInfo->OffsetFromZeroForVSA2Start = Adapter->psFlash2xCSInfo->OffsetFromZeroForVSA2End = VENDOR_PTR_IN_CS;
- else
- Adapter->psFlash2xCSInfo->OffsetFromZeroForVSA2Start = Adapter->psFlash2xCSInfo->OffsetFromZeroForVSA2End = UNINIT_PTR_IN_CS;
- break;
-
- default:
- break;
- }
- i++;
- }
-}
-
-/*
- * Procedure: BcmGetFlashCSInfo
- *
- * Description: Reads control structure and gets Cal section addresses.
- *
- * Arguments:
- * Adapter - ptr to Adapter object instance
- *
- * Returns:
- * <VOID>
- */
-
-static int BcmGetFlashCSInfo(struct bcm_mini_adapter *Adapter)
-{
- /* struct bcm_flash_cs_info sFlashCsInfo = {0}; */
-
- #if !defined(BCM_SHM_INTERFACE) || defined(FLASH_DIRECT_ACCESS)
- unsigned int value;
- #endif
-
- unsigned int uiFlashLayoutMajorVersion;
-
- Adapter->uiFlashLayoutMinorVersion = 0;
- Adapter->uiFlashLayoutMajorVersion = 0;
- Adapter->ulFlashControlSectionStart = FLASH_CS_INFO_START_ADDR;
-
- Adapter->uiFlashBaseAdd = 0;
- Adapter->ulFlashCalStart = 0;
- memset(Adapter->psFlashCSInfo, 0 , sizeof(struct bcm_flash_cs_info));
- memset(Adapter->psFlash2xCSInfo, 0 , sizeof(struct bcm_flash2x_cs_info));
-
- if (!Adapter->bDDRInitDone) {
- value = FLASH_CONTIGIOUS_START_ADDR_BEFORE_INIT;
- wrmalt(Adapter, 0xAF00A080, &value, sizeof(value));
- }
-
- /* Reading first 8 Bytes to get the Flash Layout
- * MagicNumber(4 bytes) +FlashLayoutMinorVersion(2 Bytes) +FlashLayoutMajorVersion(2 Bytes)
- */
- BeceemFlashBulkRead(Adapter, (PUINT)Adapter->psFlashCSInfo, Adapter->ulFlashControlSectionStart, 8);
-
- Adapter->psFlashCSInfo->FlashLayoutVersion = ntohl(Adapter->psFlashCSInfo->FlashLayoutVersion);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Flash Layout Version :%X", (Adapter->psFlashCSInfo->FlashLayoutVersion));
- /* BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Flash Layout Minor Version :%d\n", ntohs(sFlashCsInfo.FlashLayoutMinorVersion)); */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Signature is :%x\n", ntohl(Adapter->psFlashCSInfo->MagicNumber));
-
- if (FLASH_CONTROL_STRUCT_SIGNATURE == ntohl(Adapter->psFlashCSInfo->MagicNumber)) {
- uiFlashLayoutMajorVersion = MAJOR_VERSION((Adapter->psFlashCSInfo->FlashLayoutVersion));
- Adapter->uiFlashLayoutMinorVersion = MINOR_VERSION((Adapter->psFlashCSInfo->FlashLayoutVersion));
- } else {
- Adapter->uiFlashLayoutMinorVersion = 0;
- uiFlashLayoutMajorVersion = 0;
- }
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "FLASH LAYOUT MAJOR VERSION :%X", uiFlashLayoutMajorVersion);
-
- if (uiFlashLayoutMajorVersion < FLASH_2X_MAJOR_NUMBER) {
- BeceemFlashBulkRead(Adapter, (PUINT)Adapter->psFlashCSInfo, Adapter->ulFlashControlSectionStart, sizeof(struct bcm_flash_cs_info));
- ConvertEndianOfCSStructure(Adapter->psFlashCSInfo);
- Adapter->ulFlashCalStart = (Adapter->psFlashCSInfo->OffsetFromZeroForCalibrationStart);
-
- if (!((Adapter->uiFlashLayoutMajorVersion == 1) && (Adapter->uiFlashLayoutMinorVersion == 1)))
- Adapter->ulFlashControlSectionStart = Adapter->psFlashCSInfo->OffsetFromZeroForControlSectionStart;
-
- if ((FLASH_CONTROL_STRUCT_SIGNATURE == (Adapter->psFlashCSInfo->MagicNumber)) &&
- (SCSI_FIRMWARE_MINOR_VERSION <= MINOR_VERSION(Adapter->psFlashCSInfo->SCSIFirmwareVersion)) &&
- (FLASH_SECTOR_SIZE_SIG == (Adapter->psFlashCSInfo->FlashSectorSizeSig)) &&
- (BYTE_WRITE_SUPPORT == (Adapter->psFlashCSInfo->FlashWriteSupportSize))) {
- Adapter->ulFlashWriteSize = (Adapter->psFlashCSInfo->FlashWriteSupportSize);
- Adapter->fpFlashWrite = flashByteWrite;
- Adapter->fpFlashWriteWithStatusCheck = flashByteWriteStatus;
- } else {
- Adapter->ulFlashWriteSize = MAX_RW_SIZE;
- Adapter->fpFlashWrite = flashWrite;
- Adapter->fpFlashWriteWithStatusCheck = flashWriteStatus;
- }
-
- BcmGetFlashSectorSize(Adapter, (Adapter->psFlashCSInfo->FlashSectorSizeSig),
- (Adapter->psFlashCSInfo->FlashSectorSize));
- Adapter->uiFlashBaseAdd = Adapter->psFlashCSInfo->FlashBaseAddr & 0xFCFFFFFF;
- } else {
- if (BcmFlash2xBulkRead(Adapter, (PUINT)Adapter->psFlash2xCSInfo, NO_SECTION_VAL,
- Adapter->ulFlashControlSectionStart, sizeof(struct bcm_flash2x_cs_info))) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Unable to read CS structure\n");
- return STATUS_FAILURE;
- }
-
- ConvertEndianOf2XCSStructure(Adapter->psFlash2xCSInfo);
- BcmDumpFlash2XCSStructure(Adapter->psFlash2xCSInfo, Adapter);
- if ((FLASH_CONTROL_STRUCT_SIGNATURE == Adapter->psFlash2xCSInfo->MagicNumber) &&
- (SCSI_FIRMWARE_MINOR_VERSION <= MINOR_VERSION(Adapter->psFlash2xCSInfo->SCSIFirmwareVersion)) &&
- (FLASH_SECTOR_SIZE_SIG == Adapter->psFlash2xCSInfo->FlashSectorSizeSig) &&
- (BYTE_WRITE_SUPPORT == Adapter->psFlash2xCSInfo->FlashWriteSupportSize)) {
- Adapter->ulFlashWriteSize = Adapter->psFlash2xCSInfo->FlashWriteSupportSize;
- Adapter->fpFlashWrite = flashByteWrite;
- Adapter->fpFlashWriteWithStatusCheck = flashByteWriteStatus;
- } else {
- Adapter->ulFlashWriteSize = MAX_RW_SIZE;
- Adapter->fpFlashWrite = flashWrite;
- Adapter->fpFlashWriteWithStatusCheck = flashWriteStatus;
- }
-
- BcmGetFlashSectorSize(Adapter, Adapter->psFlash2xCSInfo->FlashSectorSizeSig,
- Adapter->psFlash2xCSInfo->FlashSectorSize);
-
- UpdateVendorInfo(Adapter);
-
- BcmGetActiveDSD(Adapter);
- BcmGetActiveISO(Adapter);
- Adapter->uiFlashBaseAdd = Adapter->psFlash2xCSInfo->FlashBaseAddr & 0xFCFFFFFF;
- Adapter->ulFlashControlSectionStart = Adapter->psFlash2xCSInfo->OffsetFromZeroForControlSectionStart;
- }
- /*
- * Concerns: what if CS sector size does not match with this sector size ???
- * what is the indication of AccessBitMap in CS in flash 2.x ????
- */
- Adapter->ulFlashID = BcmReadFlashRDID(Adapter);
- Adapter->uiFlashLayoutMajorVersion = uiFlashLayoutMajorVersion;
-
- return STATUS_SUCCESS;
-}
-
-/*
- * Procedure: BcmGetNvmType
- *
- * Description: Finds the type of NVM used.
- *
- * Arguments:
- * Adapter - ptr to Adapter object instance
- *
- * Returns:
- * NVM_TYPE
- *
- */
-
-static enum bcm_nvm_type BcmGetNvmType(struct bcm_mini_adapter *Adapter)
-{
- unsigned int uiData = 0;
-
- BeceemEEPROMBulkRead(Adapter, &uiData, 0x0, 4);
- if (uiData == BECM)
- return NVM_EEPROM;
-
- /*
- * Read control struct and get cal addresses before accessing the flash
- */
- BcmGetFlashCSInfo(Adapter);
-
- BeceemFlashBulkRead(Adapter, &uiData, 0x0 + Adapter->ulFlashCalStart, 4);
- if (uiData == BECM)
- return NVM_FLASH;
-
- /*
- * even if there is no valid signature on EEPROM/FLASH find out if they really exist.
- * if exist select it.
- */
- if (BcmGetEEPROMSize(Adapter))
- return NVM_EEPROM;
-
- /* TBD for Flash. */
- return NVM_UNKNOWN;
-}
-
-/*
- * BcmGetSectionValStartOffset - this will calculate the section's starting offset if section val is given
- * @Adapter : Drivers Private Data structure
- * @eFlashSectionVal : Flash secion value defined in enum bcm_flash2x_section_val
- *
- * Return value:-
- * On success it return the start offset of the provided section val
- * On Failure -returns STATUS_FAILURE
- */
-
-int BcmGetSectionValStartOffset(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_val eFlashSectionVal)
-{
- /*
- * Considering all the section for which end offset can be calculated or directly given
- * in CS Structure. if matching case does not exist, return STATUS_FAILURE indicating section
- * endoffset can't be calculated or given in CS Structure.
- */
-
- int SectStartOffset = 0;
-
- SectStartOffset = INVALID_OFFSET;
-
- if (IsSectionExistInVendorInfo(Adapter, eFlashSectionVal))
- return Adapter->psFlash2xVendorInfo->VendorSection[eFlashSectionVal].OffsetFromZeroForSectionStart;
-
- switch (eFlashSectionVal) {
- case ISO_IMAGE1:
- if ((Adapter->psFlash2xCSInfo->OffsetISOImage1Part1Start != UNINIT_PTR_IN_CS) &&
- (IsNonCDLessDevice(Adapter) == false))
- SectStartOffset = (Adapter->psFlash2xCSInfo->OffsetISOImage1Part1Start);
- break;
- case ISO_IMAGE2:
- if ((Adapter->psFlash2xCSInfo->OffsetISOImage2Part1Start != UNINIT_PTR_IN_CS) &&
- (IsNonCDLessDevice(Adapter) == false))
- SectStartOffset = (Adapter->psFlash2xCSInfo->OffsetISOImage2Part1Start);
- break;
- case DSD0:
- if (Adapter->psFlash2xCSInfo->OffsetFromZeroForDSDStart != UNINIT_PTR_IN_CS)
- SectStartOffset = (Adapter->psFlash2xCSInfo->OffsetFromZeroForDSDStart);
- break;
- case DSD1:
- if (Adapter->psFlash2xCSInfo->OffsetFromZeroForDSD1Start != UNINIT_PTR_IN_CS)
- SectStartOffset = (Adapter->psFlash2xCSInfo->OffsetFromZeroForDSD1Start);
- break;
- case DSD2:
- if (Adapter->psFlash2xCSInfo->OffsetFromZeroForDSD2Start != UNINIT_PTR_IN_CS)
- SectStartOffset = (Adapter->psFlash2xCSInfo->OffsetFromZeroForDSD2Start);
- break;
- case VSA0:
- if (Adapter->psFlash2xCSInfo->OffsetFromZeroForVSAStart != UNINIT_PTR_IN_CS)
- SectStartOffset = (Adapter->psFlash2xCSInfo->OffsetFromZeroForVSAStart);
- break;
- case VSA1:
- if (Adapter->psFlash2xCSInfo->OffsetFromZeroForVSA1Start != UNINIT_PTR_IN_CS)
- SectStartOffset = (Adapter->psFlash2xCSInfo->OffsetFromZeroForVSA1Start);
- break;
- case VSA2:
- if (Adapter->psFlash2xCSInfo->OffsetFromZeroForVSA2Start != UNINIT_PTR_IN_CS)
- SectStartOffset = (Adapter->psFlash2xCSInfo->OffsetFromZeroForVSA2Start);
- break;
- case SCSI:
- if (Adapter->psFlash2xCSInfo->OffsetFromZeroForScsiFirmware != UNINIT_PTR_IN_CS)
- SectStartOffset = (Adapter->psFlash2xCSInfo->OffsetFromZeroForScsiFirmware);
- break;
- case CONTROL_SECTION:
- if (Adapter->psFlash2xCSInfo->OffsetFromZeroForControlSectionStart != UNINIT_PTR_IN_CS)
- SectStartOffset = (Adapter->psFlash2xCSInfo->OffsetFromZeroForControlSectionStart);
- break;
- case ISO_IMAGE1_PART2:
- if (Adapter->psFlash2xCSInfo->OffsetISOImage1Part2Start != UNINIT_PTR_IN_CS)
- SectStartOffset = (Adapter->psFlash2xCSInfo->OffsetISOImage1Part2Start);
- break;
- case ISO_IMAGE1_PART3:
- if (Adapter->psFlash2xCSInfo->OffsetISOImage1Part3Start != UNINIT_PTR_IN_CS)
- SectStartOffset = (Adapter->psFlash2xCSInfo->OffsetISOImage1Part3Start);
- break;
- case ISO_IMAGE2_PART2:
- if (Adapter->psFlash2xCSInfo->OffsetISOImage2Part2Start != UNINIT_PTR_IN_CS)
- SectStartOffset = (Adapter->psFlash2xCSInfo->OffsetISOImage2Part2Start);
- break;
- case ISO_IMAGE2_PART3:
- if (Adapter->psFlash2xCSInfo->OffsetISOImage2Part3Start != UNINIT_PTR_IN_CS)
- SectStartOffset = (Adapter->psFlash2xCSInfo->OffsetISOImage2Part3Start);
- break;
- default:
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Section Does not exist in Flash 2.x");
- SectStartOffset = INVALID_OFFSET;
- }
-
- return SectStartOffset;
-}
-
-/*
- * BcmGetSectionValEndOffset - this will calculate the section's Ending offset if section val is given
- * @Adapter : Drivers Private Data structure
- * @eFlashSectionVal : Flash secion value defined in enum bcm_flash2x_section_val
- *
- * Return value:-
- * On success it return the end offset of the provided section val
- * On Failure -returns STATUS_FAILURE
- */
-
-static int BcmGetSectionValEndOffset(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_val eFlash2xSectionVal)
-{
- int SectEndOffset = 0;
-
- SectEndOffset = INVALID_OFFSET;
- if (IsSectionExistInVendorInfo(Adapter, eFlash2xSectionVal))
- return Adapter->psFlash2xVendorInfo->VendorSection[eFlash2xSectionVal].OffsetFromZeroForSectionEnd;
-
- switch (eFlash2xSectionVal) {
- case ISO_IMAGE1:
- if ((Adapter->psFlash2xCSInfo->OffsetISOImage1Part1End != UNINIT_PTR_IN_CS) &&
- (IsNonCDLessDevice(Adapter) == false))
- SectEndOffset = (Adapter->psFlash2xCSInfo->OffsetISOImage1Part1End);
- break;
- case ISO_IMAGE2:
- if ((Adapter->psFlash2xCSInfo->OffsetISOImage2Part1End != UNINIT_PTR_IN_CS) &&
- (IsNonCDLessDevice(Adapter) == false))
- SectEndOffset = (Adapter->psFlash2xCSInfo->OffsetISOImage2Part1End);
- break;
- case DSD0:
- if (Adapter->psFlash2xCSInfo->OffsetFromZeroForDSDEnd != UNINIT_PTR_IN_CS)
- SectEndOffset = (Adapter->psFlash2xCSInfo->OffsetFromZeroForDSDEnd);
- break;
- case DSD1:
- if (Adapter->psFlash2xCSInfo->OffsetFromZeroForDSD1End != UNINIT_PTR_IN_CS)
- SectEndOffset = (Adapter->psFlash2xCSInfo->OffsetFromZeroForDSD1End);
- break;
- case DSD2:
- if (Adapter->psFlash2xCSInfo->OffsetFromZeroForDSD2End != UNINIT_PTR_IN_CS)
- SectEndOffset = (Adapter->psFlash2xCSInfo->OffsetFromZeroForDSD2End);
- break;
- case VSA0:
- if (Adapter->psFlash2xCSInfo->OffsetFromZeroForVSAEnd != UNINIT_PTR_IN_CS)
- SectEndOffset = (Adapter->psFlash2xCSInfo->OffsetFromZeroForVSAEnd);
- break;
- case VSA1:
- if (Adapter->psFlash2xCSInfo->OffsetFromZeroForVSA1End != UNINIT_PTR_IN_CS)
- SectEndOffset = (Adapter->psFlash2xCSInfo->OffsetFromZeroForVSA1End);
- break;
- case VSA2:
- if (Adapter->psFlash2xCSInfo->OffsetFromZeroForVSA2End != UNINIT_PTR_IN_CS)
- SectEndOffset = (Adapter->psFlash2xCSInfo->OffsetFromZeroForVSA2End);
- break;
- case SCSI:
- if (Adapter->psFlash2xCSInfo->OffsetFromZeroForScsiFirmware != UNINIT_PTR_IN_CS)
- SectEndOffset = ((Adapter->psFlash2xCSInfo->OffsetFromZeroForScsiFirmware) +
- (Adapter->psFlash2xCSInfo->SizeOfScsiFirmware));
- break;
- case CONTROL_SECTION:
- /* Not Clear So Putting failure. confirm and fix it. */
- SectEndOffset = STATUS_FAILURE;
- break;
- case ISO_IMAGE1_PART2:
- if (Adapter->psFlash2xCSInfo->OffsetISOImage1Part2End != UNINIT_PTR_IN_CS)
- SectEndOffset = (Adapter->psFlash2xCSInfo->OffsetISOImage1Part2End);
- break;
- case ISO_IMAGE1_PART3:
- if (Adapter->psFlash2xCSInfo->OffsetISOImage1Part3End != UNINIT_PTR_IN_CS)
- SectEndOffset = (Adapter->psFlash2xCSInfo->OffsetISOImage1Part3End);
- break;
- case ISO_IMAGE2_PART2:
- if (Adapter->psFlash2xCSInfo->OffsetISOImage2Part2End != UNINIT_PTR_IN_CS)
- SectEndOffset = (Adapter->psFlash2xCSInfo->OffsetISOImage2Part2End);
- break;
- case ISO_IMAGE2_PART3:
- if (Adapter->psFlash2xCSInfo->OffsetISOImage2Part3End != UNINIT_PTR_IN_CS)
- SectEndOffset = (Adapter->psFlash2xCSInfo->OffsetISOImage2Part3End);
- break;
- default:
- SectEndOffset = INVALID_OFFSET;
- }
-
- return SectEndOffset;
-}
-
-/*
- * BcmFlash2xBulkRead:- Read API for Flash Map 2.x .
- * @Adapter :Driver Private Data Structure
- * @pBuffer : Buffer where data has to be put after reading
- * @eFlashSectionVal :Flash Section Val defined in enum bcm_flash2x_section_val
- * @uiOffsetWithinSectionVal :- Offset with in provided section
- * @uiNumBytes : Number of Bytes for Read
- *
- * Return value:-
- * return true on success and STATUS_FAILURE on fail.
- */
-
-int BcmFlash2xBulkRead(struct bcm_mini_adapter *Adapter,
- PUINT pBuffer,
- enum bcm_flash2x_section_val eFlash2xSectionVal,
- unsigned int uiOffsetWithinSectionVal,
- unsigned int uiNumBytes)
-{
- int Status = STATUS_SUCCESS;
- int SectionStartOffset = 0;
- unsigned int uiAbsoluteOffset = 0;
- unsigned int uiTemp = 0, value = 0;
-
- if (!Adapter) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Adapter structure is NULL");
- return -EINVAL;
- }
- if (Adapter->device_removed) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Device has been removed");
- return -ENODEV;
- }
-
- /* NO_SECTION_VAL means absolute offset is given. */
- if (eFlash2xSectionVal == NO_SECTION_VAL)
- SectionStartOffset = 0;
- else
- SectionStartOffset = BcmGetSectionValStartOffset(Adapter, eFlash2xSectionVal);
-
- if (SectionStartOffset == STATUS_FAILURE) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "This Section<%d> does not exist in Flash 2.x Map ", eFlash2xSectionVal);
- return -EINVAL;
- }
-
- if (IsSectionExistInVendorInfo(Adapter, eFlash2xSectionVal))
- return vendorextnReadSection(Adapter, (PUCHAR)pBuffer, eFlash2xSectionVal, uiOffsetWithinSectionVal, uiNumBytes);
-
- /* calculating the absolute offset from FLASH; */
- uiAbsoluteOffset = uiOffsetWithinSectionVal + SectionStartOffset;
- rdmalt(Adapter, 0x0f000C80, &uiTemp, sizeof(uiTemp));
- value = 0;
- wrmalt(Adapter, 0x0f000C80, &value, sizeof(value));
- Status = BeceemFlashBulkRead(Adapter, pBuffer, uiAbsoluteOffset, uiNumBytes);
- wrmalt(Adapter, 0x0f000C80, &uiTemp, sizeof(uiTemp));
- if (Status) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Flash Read Failed with Status :%d", Status);
- return Status;
- }
-
- return Status;
-}
-
-/*
- * BcmFlash2xBulkWrite :-API for Writing on the Flash Map 2.x.
- * @Adapter :Driver Private Data Structure
- * @pBuffer : Buffer From where data has to taken for writing
- * @eFlashSectionVal :Flash Section Val defined in enum bcm_flash2x_section_val
- * @uiOffsetWithinSectionVal :- Offset with in provided section
- * @uiNumBytes : Number of Bytes for Write
- *
- * Return value:-
- * return true on success and STATUS_FAILURE on fail.
- *
- */
-
-int BcmFlash2xBulkWrite(struct bcm_mini_adapter *Adapter,
- PUINT pBuffer,
- enum bcm_flash2x_section_val eFlash2xSectVal,
- unsigned int uiOffset,
- unsigned int uiNumBytes,
- unsigned int bVerify)
-{
- int Status = STATUS_SUCCESS;
- unsigned int FlashSectValStartOffset = 0;
- unsigned int uiTemp = 0, value = 0;
-
- if (!Adapter) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Adapter structure is NULL");
- return -EINVAL;
- }
-
- if (Adapter->device_removed) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Device has been removed");
- return -ENODEV;
- }
-
- /* NO_SECTION_VAL means absolute offset is given. */
- if (eFlash2xSectVal == NO_SECTION_VAL)
- FlashSectValStartOffset = 0;
- else
- FlashSectValStartOffset = BcmGetSectionValStartOffset(Adapter, eFlash2xSectVal);
-
- if (FlashSectValStartOffset == STATUS_FAILURE) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "This Section<%d> does not exist in Flash Map 2.x", eFlash2xSectVal);
- return -EINVAL;
- }
-
- if (IsSectionExistInVendorInfo(Adapter, eFlash2xSectVal))
- return vendorextnWriteSection(Adapter, (PUCHAR)pBuffer, eFlash2xSectVal, uiOffset, uiNumBytes, bVerify);
-
- /* calculating the absolute offset from FLASH; */
- uiOffset = uiOffset + FlashSectValStartOffset;
-
- rdmalt(Adapter, 0x0f000C80, &uiTemp, sizeof(uiTemp));
- value = 0;
- wrmalt(Adapter, 0x0f000C80, &value, sizeof(value));
-
- Status = BeceemFlashBulkWrite(Adapter, pBuffer, uiOffset, uiNumBytes, bVerify);
-
- wrmalt(Adapter, 0x0f000C80, &uiTemp, sizeof(uiTemp));
- if (Status) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Flash Write failed with Status :%d", Status);
- return Status;
- }
-
- return Status;
-}
-
-/*
- * BcmGetActiveDSD : Set the Active DSD in Adapter Structure which has to be dumped in DDR
- * @Adapter :-Drivers private Data Structure
- *
- * Return Value:-
- * Return STATUS_SUCESS if get success in setting the right DSD else negative error code
- *
- */
-
-static int BcmGetActiveDSD(struct bcm_mini_adapter *Adapter)
-{
- enum bcm_flash2x_section_val uiHighestPriDSD = 0;
-
- uiHighestPriDSD = getHighestPriDSD(Adapter);
- Adapter->eActiveDSD = uiHighestPriDSD;
-
- if (DSD0 == uiHighestPriDSD)
- Adapter->ulFlashCalStart = Adapter->psFlash2xCSInfo->OffsetFromZeroForDSDStart;
- if (DSD1 == uiHighestPriDSD)
- Adapter->ulFlashCalStart = Adapter->psFlash2xCSInfo->OffsetFromZeroForDSD1Start;
- if (DSD2 == uiHighestPriDSD)
- Adapter->ulFlashCalStart = Adapter->psFlash2xCSInfo->OffsetFromZeroForDSD2Start;
- if (Adapter->eActiveDSD)
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Active DSD :%d", Adapter->eActiveDSD);
- if (Adapter->eActiveDSD == 0) {
- /* if No DSD gets Active, Make Active the DSD with WR permission */
- if (IsSectionWritable(Adapter, DSD2)) {
- Adapter->eActiveDSD = DSD2;
- Adapter->ulFlashCalStart = Adapter->psFlash2xCSInfo->OffsetFromZeroForDSD2Start;
- } else if (IsSectionWritable(Adapter, DSD1)) {
- Adapter->eActiveDSD = DSD1;
- Adapter->ulFlashCalStart = Adapter->psFlash2xCSInfo->OffsetFromZeroForDSD1Start;
- } else if (IsSectionWritable(Adapter, DSD0)) {
- Adapter->eActiveDSD = DSD0;
- Adapter->ulFlashCalStart = Adapter->psFlash2xCSInfo->OffsetFromZeroForDSDStart;
- }
- }
-
- return STATUS_SUCCESS;
-}
-
-/*
- * BcmGetActiveISO :- Set the Active ISO in Adapter Data Structue
- * @Adapter : Driver private Data Structure
- *
- * Return Value:-
- * Sucsess:- STATUS_SUCESS
- * Failure- : negative erro code
- *
- */
-
-static int BcmGetActiveISO(struct bcm_mini_adapter *Adapter)
-{
- int HighestPriISO = 0;
-
- HighestPriISO = getHighestPriISO(Adapter);
-
- Adapter->eActiveISO = HighestPriISO;
- if (Adapter->eActiveISO == ISO_IMAGE2)
- Adapter->uiActiveISOOffset = (Adapter->psFlash2xCSInfo->OffsetISOImage2Part1Start);
- else if (Adapter->eActiveISO == ISO_IMAGE1)
- Adapter->uiActiveISOOffset = (Adapter->psFlash2xCSInfo->OffsetISOImage1Part1Start);
-
- if (Adapter->eActiveISO)
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Active ISO :%x", Adapter->eActiveISO);
-
- return STATUS_SUCCESS;
-}
-
-/*
- * IsOffsetWritable :- it will tell the access permission of the sector having passed offset
- * @Adapter : Drivers Private Data Structure
- * @uiOffset : Offset provided in the Flash
- *
- * Return Value:-
- * Success:-TRUE , offset is writable
- * Failure:-false, offset is RO
- *
- */
-
-static B_UINT8 IsOffsetWritable(struct bcm_mini_adapter *Adapter, unsigned int uiOffset)
-{
- unsigned int uiSectorNum = 0;
- unsigned int uiWordOfSectorPermission = 0;
- unsigned int uiBitofSectorePermission = 0;
- B_UINT32 permissionBits = 0;
-
- uiSectorNum = uiOffset/Adapter->uiSectorSize;
-
- /* calculating the word having this Sector Access permission from SectorAccessBitMap Array */
- uiWordOfSectorPermission = Adapter->psFlash2xCSInfo->SectorAccessBitMap[uiSectorNum / 16];
-
- /* calculating the bit index inside the word for this sector */
- uiBitofSectorePermission = 2 * (15 - uiSectorNum % 16);
-
- /* Setting Access permission */
- permissionBits = uiWordOfSectorPermission & (0x3 << uiBitofSectorePermission);
- permissionBits = (permissionBits >> uiBitofSectorePermission) & 0x3;
- if (permissionBits == SECTOR_READWRITE_PERMISSION)
- return TRUE;
- else
- return false;
-}
-
-static int BcmDumpFlash2xSectionBitMap(struct bcm_flash2x_bitmap *psFlash2xBitMap)
-{
- struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "***************Flash 2.x Section Bitmap***************");
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "ISO_IMAGE1 :0X%x", psFlash2xBitMap->ISO_IMAGE1);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "ISO_IMAGE2 :0X%x", psFlash2xBitMap->ISO_IMAGE2);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "DSD0 :0X%x", psFlash2xBitMap->DSD0);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "DSD1 :0X%x", psFlash2xBitMap->DSD1);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "DSD2 :0X%x", psFlash2xBitMap->DSD2);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "VSA0 :0X%x", psFlash2xBitMap->VSA0);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "VSA1 :0X%x", psFlash2xBitMap->VSA1);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "VSA2 :0X%x", psFlash2xBitMap->VSA2);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "SCSI :0X%x", psFlash2xBitMap->SCSI);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "CONTROL_SECTION :0X%x", psFlash2xBitMap->CONTROL_SECTION);
-
- return STATUS_SUCCESS;
-}
-
-/*
- * BcmGetFlash2xSectionalBitMap :- It will provide the bit map of all the section present in Flash
- * 8bit has been assigned to every section.
- * bit[0] :Section present or not
- * bit[1] :section is valid or not
- * bit[2] : Secton is read only or has write permission too.
- * bit[3] : Active Section -
- * bit[7...4] = Reserved .
- *
- * @Adapter:-Driver private Data Structure
- *
- * Return value:-
- * Success:- STATUS_SUCESS
- * Failure:- negative error code
- */
-
-int BcmGetFlash2xSectionalBitMap(struct bcm_mini_adapter *Adapter, struct bcm_flash2x_bitmap *psFlash2xBitMap)
-{
- struct bcm_flash2x_cs_info *psFlash2xCSInfo = Adapter->psFlash2xCSInfo;
- enum bcm_flash2x_section_val uiHighestPriDSD = 0;
- enum bcm_flash2x_section_val uiHighestPriISO = 0;
- bool SetActiveDSDDone = false;
- bool SetActiveISODone = false;
-
- /* For 1.x map all the section except DSD0 will be shown as not present
- * This part will be used by calibration tool to detect the number of DSD present in Flash.
- */
- if (IsFlash2x(Adapter) == false) {
- psFlash2xBitMap->ISO_IMAGE2 = 0;
- psFlash2xBitMap->ISO_IMAGE1 = 0;
- psFlash2xBitMap->DSD0 = FLASH2X_SECTION_VALID | FLASH2X_SECTION_ACT | FLASH2X_SECTION_PRESENT; /* 0xF; 0000(Reseved)1(Active)0(RW)1(valid)1(present) */
- psFlash2xBitMap->DSD1 = 0;
- psFlash2xBitMap->DSD2 = 0;
- psFlash2xBitMap->VSA0 = 0;
- psFlash2xBitMap->VSA1 = 0;
- psFlash2xBitMap->VSA2 = 0;
- psFlash2xBitMap->CONTROL_SECTION = 0;
- psFlash2xBitMap->SCSI = 0;
- psFlash2xBitMap->Reserved0 = 0;
- psFlash2xBitMap->Reserved1 = 0;
- psFlash2xBitMap->Reserved2 = 0;
-
- return STATUS_SUCCESS;
- }
-
- uiHighestPriDSD = getHighestPriDSD(Adapter);
- uiHighestPriISO = getHighestPriISO(Adapter);
-
- /*
- * IS0 IMAGE 2
- */
- if ((psFlash2xCSInfo->OffsetISOImage2Part1Start) != UNINIT_PTR_IN_CS) {
- /* Setting the 0th Bit representing the Section is present or not. */
- psFlash2xBitMap->ISO_IMAGE2 = psFlash2xBitMap->ISO_IMAGE2 | FLASH2X_SECTION_PRESENT;
-
- if (ReadISOSignature(Adapter, ISO_IMAGE2) == ISO_IMAGE_MAGIC_NUMBER)
- psFlash2xBitMap->ISO_IMAGE2 |= FLASH2X_SECTION_VALID;
-
- /* Calculation for extrating the Access permission */
- if (IsSectionWritable(Adapter, ISO_IMAGE2) == false)
- psFlash2xBitMap->ISO_IMAGE2 |= FLASH2X_SECTION_RO;
-
- if (SetActiveISODone == false && uiHighestPriISO == ISO_IMAGE2) {
- psFlash2xBitMap->ISO_IMAGE2 |= FLASH2X_SECTION_ACT;
- SetActiveISODone = TRUE;
- }
- }
-
- /*
- * IS0 IMAGE 1
- */
- if ((psFlash2xCSInfo->OffsetISOImage1Part1Start) != UNINIT_PTR_IN_CS) {
- /* Setting the 0th Bit representing the Section is present or not. */
- psFlash2xBitMap->ISO_IMAGE1 = psFlash2xBitMap->ISO_IMAGE1 | FLASH2X_SECTION_PRESENT;
-
- if (ReadISOSignature(Adapter, ISO_IMAGE1) == ISO_IMAGE_MAGIC_NUMBER)
- psFlash2xBitMap->ISO_IMAGE1 |= FLASH2X_SECTION_VALID;
-
- /* Calculation for extrating the Access permission */
- if (IsSectionWritable(Adapter, ISO_IMAGE1) == false)
- psFlash2xBitMap->ISO_IMAGE1 |= FLASH2X_SECTION_RO;
-
- if (SetActiveISODone == false && uiHighestPriISO == ISO_IMAGE1) {
- psFlash2xBitMap->ISO_IMAGE1 |= FLASH2X_SECTION_ACT;
- SetActiveISODone = TRUE;
- }
- }
-
- /*
- * DSD2
- */
- if ((psFlash2xCSInfo->OffsetFromZeroForDSD2Start) != UNINIT_PTR_IN_CS) {
- /* Setting the 0th Bit representing the Section is present or not. */
- psFlash2xBitMap->DSD2 = psFlash2xBitMap->DSD2 | FLASH2X_SECTION_PRESENT;
-
- if (ReadDSDSignature(Adapter, DSD2) == DSD_IMAGE_MAGIC_NUMBER)
- psFlash2xBitMap->DSD2 |= FLASH2X_SECTION_VALID;
-
- /* Calculation for extrating the Access permission */
- if (IsSectionWritable(Adapter, DSD2) == false) {
- psFlash2xBitMap->DSD2 |= FLASH2X_SECTION_RO;
- } else {
- /* Means section is writable */
- if ((SetActiveDSDDone == false) && (uiHighestPriDSD == DSD2)) {
- psFlash2xBitMap->DSD2 |= FLASH2X_SECTION_ACT;
- SetActiveDSDDone = TRUE;
- }
- }
- }
-
- /*
- * DSD 1
- */
- if ((psFlash2xCSInfo->OffsetFromZeroForDSD1Start) != UNINIT_PTR_IN_CS) {
- /* Setting the 0th Bit representing the Section is present or not. */
- psFlash2xBitMap->DSD1 = psFlash2xBitMap->DSD1 | FLASH2X_SECTION_PRESENT;
-
- if (ReadDSDSignature(Adapter, DSD1) == DSD_IMAGE_MAGIC_NUMBER)
- psFlash2xBitMap->DSD1 |= FLASH2X_SECTION_VALID;
-
- /* Calculation for extrating the Access permission */
- if (IsSectionWritable(Adapter, DSD1) == false) {
- psFlash2xBitMap->DSD1 |= FLASH2X_SECTION_RO;
- } else {
- /* Means section is writable */
- if ((SetActiveDSDDone == false) && (uiHighestPriDSD == DSD1)) {
- psFlash2xBitMap->DSD1 |= FLASH2X_SECTION_ACT;
- SetActiveDSDDone = TRUE;
- }
- }
- }
-
- /*
- * For DSD 0
- */
- if ((psFlash2xCSInfo->OffsetFromZeroForDSDStart) != UNINIT_PTR_IN_CS) {
- /* Setting the 0th Bit representing the Section is present or not. */
- psFlash2xBitMap->DSD0 = psFlash2xBitMap->DSD0 | FLASH2X_SECTION_PRESENT;
-
- if (ReadDSDSignature(Adapter, DSD0) == DSD_IMAGE_MAGIC_NUMBER)
- psFlash2xBitMap->DSD0 |= FLASH2X_SECTION_VALID;
-
- /* Setting Access permission */
- if (IsSectionWritable(Adapter, DSD0) == false) {
- psFlash2xBitMap->DSD0 |= FLASH2X_SECTION_RO;
- } else {
- /* Means section is writable */
- if ((SetActiveDSDDone == false) && (uiHighestPriDSD == DSD0)) {
- psFlash2xBitMap->DSD0 |= FLASH2X_SECTION_ACT;
- SetActiveDSDDone = TRUE;
- }
- }
- }
-
- /*
- * VSA 0
- */
- if ((psFlash2xCSInfo->OffsetFromZeroForVSAStart) != UNINIT_PTR_IN_CS) {
- /* Setting the 0th Bit representing the Section is present or not. */
- psFlash2xBitMap->VSA0 = psFlash2xBitMap->VSA0 | FLASH2X_SECTION_PRESENT;
-
- /* Setting the Access Bit. Map is not defined hece setting it always valid */
- psFlash2xBitMap->VSA0 |= FLASH2X_SECTION_VALID;
-
- /* Calculation for extrating the Access permission */
- if (IsSectionWritable(Adapter, VSA0) == false)
- psFlash2xBitMap->VSA0 |= FLASH2X_SECTION_RO;
-
- /* By Default section is Active */
- psFlash2xBitMap->VSA0 |= FLASH2X_SECTION_ACT;
- }
-
- /*
- * VSA 1
- */
- if ((psFlash2xCSInfo->OffsetFromZeroForVSA1Start) != UNINIT_PTR_IN_CS) {
- /* Setting the 0th Bit representing the Section is present or not. */
- psFlash2xBitMap->VSA1 = psFlash2xBitMap->VSA1 | FLASH2X_SECTION_PRESENT;
-
- /* Setting the Access Bit. Map is not defined hece setting it always valid */
- psFlash2xBitMap->VSA1 |= FLASH2X_SECTION_VALID;
-
- /* Checking For Access permission */
- if (IsSectionWritable(Adapter, VSA1) == false)
- psFlash2xBitMap->VSA1 |= FLASH2X_SECTION_RO;
-
- /* By Default section is Active */
- psFlash2xBitMap->VSA1 |= FLASH2X_SECTION_ACT;
- }
-
- /*
- * VSA 2
- */
- if ((psFlash2xCSInfo->OffsetFromZeroForVSA2Start) != UNINIT_PTR_IN_CS) {
- /* Setting the 0th Bit representing the Section is present or not. */
- psFlash2xBitMap->VSA2 = psFlash2xBitMap->VSA2 | FLASH2X_SECTION_PRESENT;
-
- /* Setting the Access Bit. Map is not defined hece setting it always valid */
- psFlash2xBitMap->VSA2 |= FLASH2X_SECTION_VALID;
-
- /* Checking For Access permission */
- if (IsSectionWritable(Adapter, VSA2) == false)
- psFlash2xBitMap->VSA2 |= FLASH2X_SECTION_RO;
-
- /* By Default section is Active */
- psFlash2xBitMap->VSA2 |= FLASH2X_SECTION_ACT;
- }
-
- /*
- * SCSI Section
- */
- if ((psFlash2xCSInfo->OffsetFromZeroForScsiFirmware) != UNINIT_PTR_IN_CS) {
- /* Setting the 0th Bit representing the Section is present or not. */
- psFlash2xBitMap->SCSI = psFlash2xBitMap->SCSI | FLASH2X_SECTION_PRESENT;
-
- /* Setting the Access Bit. Map is not defined hece setting it always valid */
- psFlash2xBitMap->SCSI |= FLASH2X_SECTION_VALID;
-
- /* Checking For Access permission */
- if (IsSectionWritable(Adapter, SCSI) == false)
- psFlash2xBitMap->SCSI |= FLASH2X_SECTION_RO;
-
- /* By Default section is Active */
- psFlash2xBitMap->SCSI |= FLASH2X_SECTION_ACT;
- }
-
- /*
- * Control Section
- */
- if ((psFlash2xCSInfo->OffsetFromZeroForControlSectionStart) != UNINIT_PTR_IN_CS) {
- /* Setting the 0th Bit representing the Section is present or not. */
- psFlash2xBitMap->CONTROL_SECTION = psFlash2xBitMap->CONTROL_SECTION | (FLASH2X_SECTION_PRESENT);
-
- /* Setting the Access Bit. Map is not defined hece setting it always valid */
- psFlash2xBitMap->CONTROL_SECTION |= FLASH2X_SECTION_VALID;
-
- /* Checking For Access permission */
- if (IsSectionWritable(Adapter, CONTROL_SECTION) == false)
- psFlash2xBitMap->CONTROL_SECTION |= FLASH2X_SECTION_RO;
-
- /* By Default section is Active */
- psFlash2xBitMap->CONTROL_SECTION |= FLASH2X_SECTION_ACT;
- }
-
- /*
- * For Reserved Sections
- */
- psFlash2xBitMap->Reserved0 = 0;
- psFlash2xBitMap->Reserved0 = 0;
- psFlash2xBitMap->Reserved0 = 0;
- BcmDumpFlash2xSectionBitMap(psFlash2xBitMap);
-
- return STATUS_SUCCESS;
-}
-
-/*
- * BcmSetActiveSection :- Set Active section is used to make priority field highest over other
- * section of same type.
- *
- * @Adapater :- Bcm Driver Private Data Structure
- * @eFlash2xSectionVal :- Flash section val whose priority has to be made highest.
- *
- * Return Value:- Make the priorit highest else return erorr code
- *
- */
-
-int BcmSetActiveSection(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_val eFlash2xSectVal)
-{
- unsigned int SectImagePriority = 0;
- int Status = STATUS_SUCCESS;
-
- /* struct bcm_dsd_header sDSD = {0};
- * struct bcm_iso_header sISO = {0};
- */
- int HighestPriDSD = 0;
- int HighestPriISO = 0;
-
- Status = IsSectionWritable(Adapter, eFlash2xSectVal);
- if (Status != TRUE) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Provided Section <%d> is not writable", eFlash2xSectVal);
- return STATUS_FAILURE;
- }
-
- Adapter->bHeaderChangeAllowed = TRUE;
- switch (eFlash2xSectVal) {
- case ISO_IMAGE1:
- case ISO_IMAGE2:
- if (ReadISOSignature(Adapter, eFlash2xSectVal) == ISO_IMAGE_MAGIC_NUMBER) {
- HighestPriISO = getHighestPriISO(Adapter);
-
- if (HighestPriISO == eFlash2xSectVal) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Given ISO<%x> already has highest priority", eFlash2xSectVal);
- Status = STATUS_SUCCESS;
- break;
- }
-
- SectImagePriority = ReadISOPriority(Adapter, HighestPriISO) + 1;
-
- if ((SectImagePriority == 0) && IsSectionWritable(Adapter, HighestPriISO)) {
- /* This is a SPECIAL Case which will only happen if the current highest priority ISO has priority value = 0x7FFFFFFF.
- * We will write 1 to the current Highest priority ISO And then shall increase the priority of the requested ISO
- * by user
- */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "SectImagePriority wraparound happened, eFlash2xSectVal: 0x%x\n", eFlash2xSectVal);
- SectImagePriority = htonl(0x1);
- Status = BcmFlash2xBulkWrite(Adapter,
- &SectImagePriority,
- HighestPriISO,
- 0 + FIELD_OFFSET_IN_HEADER(struct bcm_iso_header *, ISOImagePriority),
- SIGNATURE_SIZE,
- TRUE);
- if (Status) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Priority has not been written properly");
- Status = STATUS_FAILURE;
- break;
- }
-
- HighestPriISO = getHighestPriISO(Adapter);
-
- if (HighestPriISO == eFlash2xSectVal) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Given ISO<%x> already has highest priority", eFlash2xSectVal);
- Status = STATUS_SUCCESS;
- break;
- }
-
- SectImagePriority = 2;
- }
-
- SectImagePriority = htonl(SectImagePriority);
-
- Status = BcmFlash2xBulkWrite(Adapter,
- &SectImagePriority,
- eFlash2xSectVal,
- 0 + FIELD_OFFSET_IN_HEADER(struct bcm_iso_header *, ISOImagePriority),
- SIGNATURE_SIZE,
- TRUE);
- if (Status) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Priority has not been written properly");
- break;
- }
- } else {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Signature is currupted. Hence can't increase the priority");
- Status = STATUS_FAILURE;
- break;
- }
- break;
- case DSD0:
- case DSD1:
- case DSD2:
- if (ReadDSDSignature(Adapter, eFlash2xSectVal) == DSD_IMAGE_MAGIC_NUMBER) {
- HighestPriDSD = getHighestPriDSD(Adapter);
- if (HighestPriDSD == eFlash2xSectVal) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Given DSD<%x> already has highest priority", eFlash2xSectVal);
- Status = STATUS_SUCCESS;
- break;
- }
-
- SectImagePriority = ReadDSDPriority(Adapter, HighestPriDSD) + 1;
- if (SectImagePriority == 0) {
- /* This is a SPECIAL Case which will only happen if the current highest priority DSD has priority value = 0x7FFFFFFF.
- * We will write 1 to the current Highest priority DSD And then shall increase the priority of the requested DSD
- * by user
- */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, NVM_RW, DBG_LVL_ALL, "SectImagePriority wraparound happened, eFlash2xSectVal: 0x%x\n", eFlash2xSectVal);
- SectImagePriority = htonl(0x1);
-
- Status = BcmFlash2xBulkWrite(Adapter,
- &SectImagePriority,
- HighestPriDSD,
- Adapter->psFlash2xCSInfo->OffsetFromDSDStartForDSDHeader + FIELD_OFFSET_IN_HEADER(struct bcm_dsd_header *, DSDImagePriority),
- SIGNATURE_SIZE,
- TRUE);
- if (Status) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Priority has not been written properly");
- break;
- }
-
- HighestPriDSD = getHighestPriDSD(Adapter);
-
- if (HighestPriDSD == eFlash2xSectVal) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Made the DSD: %x highest by reducing priority of other\n", eFlash2xSectVal);
- Status = STATUS_SUCCESS;
- break;
- }
-
- SectImagePriority = htonl(0x2);
- Status = BcmFlash2xBulkWrite(Adapter,
- &SectImagePriority,
- HighestPriDSD,
- Adapter->psFlash2xCSInfo->OffsetFromDSDStartForDSDHeader + FIELD_OFFSET_IN_HEADER(struct bcm_dsd_header *, DSDImagePriority),
- SIGNATURE_SIZE,
- TRUE);
- if (Status) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Priority has not been written properly");
- break;
- }
-
- HighestPriDSD = getHighestPriDSD(Adapter);
- if (HighestPriDSD == eFlash2xSectVal) {
- Status = STATUS_SUCCESS;
- break;
- }
-
- SectImagePriority = 3;
- }
- SectImagePriority = htonl(SectImagePriority);
- Status = BcmFlash2xBulkWrite(Adapter,
- &SectImagePriority,
- eFlash2xSectVal,
- Adapter->psFlash2xCSInfo->OffsetFromDSDStartForDSDHeader + FIELD_OFFSET_IN_HEADER(struct bcm_dsd_header *, DSDImagePriority),
- SIGNATURE_SIZE,
- TRUE);
- if (Status) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Priority has not been written properly");
- Status = STATUS_FAILURE;
- break;
- }
- } else {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Signature is currupted. Hence can't increase the priority");
- Status = STATUS_FAILURE;
- break;
- }
- break;
- case VSA0:
- case VSA1:
- case VSA2:
- /* Has to be decided */
- break;
- default:
- Status = STATUS_FAILURE;
- break;
- }
-
- Adapter->bHeaderChangeAllowed = false;
- return Status;
-}
-
-/*
- * BcmCopyISO - Used only for copying the ISO section
- * @Adapater :- Bcm Driver Private Data Structure
- * @sCopySectStrut :- Section copy structure
- *
- * Return value:- SUCCESS if copies successfully else negative error code
- *
- */
-
-int BcmCopyISO(struct bcm_mini_adapter *Adapter, struct bcm_flash2x_copy_section sCopySectStrut)
-{
- PCHAR Buff = NULL;
- enum bcm_flash2x_section_val eISOReadPart = 0, eISOWritePart = 0;
- unsigned int uiReadOffsetWithinPart = 0, uiWriteOffsetWithinPart = 0;
- unsigned int uiTotalDataToCopy = 0;
- bool IsThisHeaderSector = false;
- unsigned int sigOffset = 0;
- unsigned int ISOLength = 0;
- unsigned int Status = STATUS_SUCCESS;
- unsigned int SigBuff[MAX_RW_SIZE];
- unsigned int i = 0;
-
- if (ReadISOSignature(Adapter, sCopySectStrut.SrcSection) != ISO_IMAGE_MAGIC_NUMBER) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "error as Source ISO Section does not have valid signature");
- return STATUS_FAILURE;
- }
-
- Status = BcmFlash2xBulkRead(Adapter, &ISOLength,
- sCopySectStrut.SrcSection,
- 0 + FIELD_OFFSET_IN_HEADER(struct bcm_iso_header *, ISOImageSize),
- 4);
- if (Status) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Read failed while copying ISO\n");
- return Status;
- }
-
- ISOLength = htonl(ISOLength);
- if (ISOLength % Adapter->uiSectorSize)
- ISOLength = Adapter->uiSectorSize * (1 + ISOLength/Adapter->uiSectorSize);
-
- sigOffset = FIELD_OFFSET_IN_HEADER(struct bcm_iso_header *, ISOImageMagicNumber);
-
- Buff = kzalloc(Adapter->uiSectorSize, GFP_KERNEL);
-
- if (!Buff) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Memory allocation failed for section size");
- return -ENOMEM;
- }
-
- if (sCopySectStrut.SrcSection == ISO_IMAGE1 && sCopySectStrut.DstSection == ISO_IMAGE2) {
- eISOReadPart = ISO_IMAGE1;
- eISOWritePart = ISO_IMAGE2;
- uiReadOffsetWithinPart = 0;
- uiWriteOffsetWithinPart = 0;
-
- uiTotalDataToCopy = (Adapter->psFlash2xCSInfo->OffsetISOImage1Part1End) -
- (Adapter->psFlash2xCSInfo->OffsetISOImage1Part1Start) +
- (Adapter->psFlash2xCSInfo->OffsetISOImage1Part2End) -
- (Adapter->psFlash2xCSInfo->OffsetISOImage1Part2Start) +
- (Adapter->psFlash2xCSInfo->OffsetISOImage1Part3End) -
- (Adapter->psFlash2xCSInfo->OffsetISOImage1Part3Start);
-
- if (uiTotalDataToCopy < ISOLength) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "error as Source ISO Section does not have valid signature");
- Status = STATUS_FAILURE;
- goto out;
- }
-
- uiTotalDataToCopy = (Adapter->psFlash2xCSInfo->OffsetISOImage2Part1End) -
- (Adapter->psFlash2xCSInfo->OffsetISOImage2Part1Start) +
- (Adapter->psFlash2xCSInfo->OffsetISOImage2Part2End) -
- (Adapter->psFlash2xCSInfo->OffsetISOImage2Part2Start) +
- (Adapter->psFlash2xCSInfo->OffsetISOImage2Part3End) -
- (Adapter->psFlash2xCSInfo->OffsetISOImage2Part3Start);
-
- if (uiTotalDataToCopy < ISOLength) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "error as Dest ISO Section does not have enough section size");
- Status = STATUS_FAILURE;
- goto out;
- }
-
- uiTotalDataToCopy = ISOLength;
-
- CorruptISOSig(Adapter, ISO_IMAGE2);
- while (uiTotalDataToCopy) {
- if (uiTotalDataToCopy == Adapter->uiSectorSize) {
- /* Setting for write of first sector. First sector is assumed to be written in last */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Writing the signature sector");
- eISOReadPart = ISO_IMAGE1;
- uiReadOffsetWithinPart = 0;
- eISOWritePart = ISO_IMAGE2;
- uiWriteOffsetWithinPart = 0;
- IsThisHeaderSector = TRUE;
- } else {
- uiReadOffsetWithinPart = uiReadOffsetWithinPart + Adapter->uiSectorSize;
- uiWriteOffsetWithinPart = uiWriteOffsetWithinPart + Adapter->uiSectorSize;
-
- if ((eISOReadPart == ISO_IMAGE1) && (uiReadOffsetWithinPart == (Adapter->psFlash2xCSInfo->OffsetISOImage1Part1End - Adapter->psFlash2xCSInfo->OffsetISOImage1Part1Start))) {
- eISOReadPart = ISO_IMAGE1_PART2;
- uiReadOffsetWithinPart = 0;
- }
-
- if ((eISOReadPart == ISO_IMAGE1_PART2) && (uiReadOffsetWithinPart == (Adapter->psFlash2xCSInfo->OffsetISOImage1Part2End - Adapter->psFlash2xCSInfo->OffsetISOImage1Part2Start))) {
- eISOReadPart = ISO_IMAGE1_PART3;
- uiReadOffsetWithinPart = 0;
- }
-
- if ((eISOWritePart == ISO_IMAGE2) && (uiWriteOffsetWithinPart == (Adapter->psFlash2xCSInfo->OffsetISOImage2Part1End - Adapter->psFlash2xCSInfo->OffsetISOImage2Part1Start))) {
- eISOWritePart = ISO_IMAGE2_PART2;
- uiWriteOffsetWithinPart = 0;
- }
-
- if ((eISOWritePart == ISO_IMAGE2_PART2) && (uiWriteOffsetWithinPart == (Adapter->psFlash2xCSInfo->OffsetISOImage2Part2End - Adapter->psFlash2xCSInfo->OffsetISOImage2Part2Start))) {
- eISOWritePart = ISO_IMAGE2_PART3;
- uiWriteOffsetWithinPart = 0;
- }
- }
-
- Status = BcmFlash2xBulkRead(Adapter,
- (PUINT)Buff,
- eISOReadPart,
- uiReadOffsetWithinPart,
- Adapter->uiSectorSize);
- if (Status) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Read failed while copying ISO: Part: %x, OffsetWithinPart: %x\n", eISOReadPart, uiReadOffsetWithinPart);
- break;
- }
-
- if (IsThisHeaderSector == TRUE) {
- /* If this is header sector write 0xFFFFFFFF at the sig time and in last write sig */
- memcpy(SigBuff, Buff + sigOffset, sizeof(SigBuff));
-
- for (i = 0; i < MAX_RW_SIZE; i++)
- *(Buff + sigOffset + i) = 0xFF;
- }
- Adapter->bHeaderChangeAllowed = TRUE;
- Status = BcmFlash2xBulkWrite(Adapter,
- (PUINT)Buff,
- eISOWritePart,
- uiWriteOffsetWithinPart,
- Adapter->uiSectorSize,
- TRUE);
- if (Status) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Write failed while copying ISO: Part: %x, OffsetWithinPart: %x\n", eISOWritePart, uiWriteOffsetWithinPart);
- break;
- }
-
- Adapter->bHeaderChangeAllowed = false;
- if (IsThisHeaderSector == TRUE) {
- WriteToFlashWithoutSectorErase(Adapter,
- SigBuff,
- eISOWritePart,
- sigOffset,
- MAX_RW_SIZE);
- IsThisHeaderSector = false;
- }
- /* subtracting the written Data */
- uiTotalDataToCopy = uiTotalDataToCopy - Adapter->uiSectorSize;
- }
- }
-
- if (sCopySectStrut.SrcSection == ISO_IMAGE2 && sCopySectStrut.DstSection == ISO_IMAGE1) {
- eISOReadPart = ISO_IMAGE2;
- eISOWritePart = ISO_IMAGE1;
- uiReadOffsetWithinPart = 0;
- uiWriteOffsetWithinPart = 0;
-
- uiTotalDataToCopy = (Adapter->psFlash2xCSInfo->OffsetISOImage2Part1End) -
- (Adapter->psFlash2xCSInfo->OffsetISOImage2Part1Start) +
- (Adapter->psFlash2xCSInfo->OffsetISOImage2Part2End) -
- (Adapter->psFlash2xCSInfo->OffsetISOImage2Part2Start) +
- (Adapter->psFlash2xCSInfo->OffsetISOImage2Part3End) -
- (Adapter->psFlash2xCSInfo->OffsetISOImage2Part3Start);
-
- if (uiTotalDataToCopy < ISOLength) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "error as Source ISO Section does not have valid signature");
- Status = STATUS_FAILURE;
- goto out;
- }
-
- uiTotalDataToCopy = (Adapter->psFlash2xCSInfo->OffsetISOImage1Part1End) -
- (Adapter->psFlash2xCSInfo->OffsetISOImage1Part1Start) +
- (Adapter->psFlash2xCSInfo->OffsetISOImage1Part2End) -
- (Adapter->psFlash2xCSInfo->OffsetISOImage1Part2Start) +
- (Adapter->psFlash2xCSInfo->OffsetISOImage1Part3End) -
- (Adapter->psFlash2xCSInfo->OffsetISOImage1Part3Start);
-
- if (uiTotalDataToCopy < ISOLength) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "error as Dest ISO Section does not have enough section size");
- Status = STATUS_FAILURE;
- goto out;
- }
-
- uiTotalDataToCopy = ISOLength;
-
- CorruptISOSig(Adapter, ISO_IMAGE1);
-
- while (uiTotalDataToCopy) {
- if (uiTotalDataToCopy == Adapter->uiSectorSize) {
- /* Setting for write of first sector. First sector is assumed to be written in last */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Writing the signature sector");
- eISOReadPart = ISO_IMAGE2;
- uiReadOffsetWithinPart = 0;
- eISOWritePart = ISO_IMAGE1;
- uiWriteOffsetWithinPart = 0;
- IsThisHeaderSector = TRUE;
- } else {
- uiReadOffsetWithinPart = uiReadOffsetWithinPart + Adapter->uiSectorSize;
- uiWriteOffsetWithinPart = uiWriteOffsetWithinPart + Adapter->uiSectorSize;
-
- if ((eISOReadPart == ISO_IMAGE2) && (uiReadOffsetWithinPart == (Adapter->psFlash2xCSInfo->OffsetISOImage2Part1End - Adapter->psFlash2xCSInfo->OffsetISOImage2Part1Start))) {
- eISOReadPart = ISO_IMAGE2_PART2;
- uiReadOffsetWithinPart = 0;
- }
-
- if ((eISOReadPart == ISO_IMAGE2_PART2) && (uiReadOffsetWithinPart == (Adapter->psFlash2xCSInfo->OffsetISOImage2Part2End - Adapter->psFlash2xCSInfo->OffsetISOImage2Part2Start))) {
- eISOReadPart = ISO_IMAGE2_PART3;
- uiReadOffsetWithinPart = 0;
- }
-
- if ((eISOWritePart == ISO_IMAGE1) && (uiWriteOffsetWithinPart == (Adapter->psFlash2xCSInfo->OffsetISOImage1Part1End - Adapter->psFlash2xCSInfo->OffsetISOImage1Part1Start))) {
- eISOWritePart = ISO_IMAGE1_PART2;
- uiWriteOffsetWithinPart = 0;
- }
-
- if ((eISOWritePart == ISO_IMAGE1_PART2) && (uiWriteOffsetWithinPart == (Adapter->psFlash2xCSInfo->OffsetISOImage1Part2End - Adapter->psFlash2xCSInfo->OffsetISOImage1Part2Start))) {
- eISOWritePart = ISO_IMAGE1_PART3;
- uiWriteOffsetWithinPart = 0;
- }
- }
-
- Status = BcmFlash2xBulkRead(Adapter,
- (PUINT)Buff,
- eISOReadPart,
- uiReadOffsetWithinPart,
- Adapter->uiSectorSize);
- if (Status) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Read failed while copying ISO: Part: %x, OffsetWithinPart: %x\n", eISOReadPart, uiReadOffsetWithinPart);
- break;
- }
-
- if (IsThisHeaderSector == TRUE) {
- /* If this is header sector write 0xFFFFFFFF at the sig time and in last write sig */
- memcpy(SigBuff, Buff + sigOffset, sizeof(SigBuff));
-
- for (i = 0; i < MAX_RW_SIZE; i++)
- *(Buff + sigOffset + i) = 0xFF;
- }
- Adapter->bHeaderChangeAllowed = TRUE;
- Status = BcmFlash2xBulkWrite(Adapter,
- (PUINT)Buff,
- eISOWritePart,
- uiWriteOffsetWithinPart,
- Adapter->uiSectorSize,
- TRUE);
- if (Status) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Write failed while copying ISO: Part: %x, OffsetWithinPart: %x\n", eISOWritePart, uiWriteOffsetWithinPart);
- break;
- }
-
- Adapter->bHeaderChangeAllowed = false;
- if (IsThisHeaderSector == TRUE) {
- WriteToFlashWithoutSectorErase(Adapter,
- SigBuff,
- eISOWritePart,
- sigOffset,
- MAX_RW_SIZE);
-
- IsThisHeaderSector = false;
- }
-
- /* subtracting the written Data */
- uiTotalDataToCopy = uiTotalDataToCopy - Adapter->uiSectorSize;
- }
- }
-out:
- kfree(Buff);
-
- return Status;
-}
-
-/*
- * BcmFlash2xCorruptSig : this API is used to corrupt the written sig in Bcm Header present in flash section.
- * It will corrupt the sig, if Section is writable, by making first bytes as zero.
- * @Adapater :- Bcm Driver Private Data Structure
- * @eFlash2xSectionVal :- Flash section val which has header
- *
- * Return Value :-
- * Success :- If Section is present and writable, corrupt the sig and return STATUS_SUCCESS
- * Failure :-Return negative error code
- */
-
-int BcmFlash2xCorruptSig(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_val eFlash2xSectionVal)
-{
- int Status = STATUS_SUCCESS;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Section Value :%x\n", eFlash2xSectionVal);
-
- if ((eFlash2xSectionVal == DSD0) || (eFlash2xSectionVal == DSD1) || (eFlash2xSectionVal == DSD2)) {
- Status = CorruptDSDSig(Adapter, eFlash2xSectionVal);
- } else if (eFlash2xSectionVal == ISO_IMAGE1 || eFlash2xSectionVal == ISO_IMAGE2) {
- Status = CorruptISOSig(Adapter, eFlash2xSectionVal);
- } else {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Given Section <%d>does not have Header", eFlash2xSectionVal);
- return STATUS_SUCCESS;
- }
- return Status;
-}
-
-/*
- *BcmFlash2xWriteSig :-this API is used to Write the sig if requested Section has
- * header and Write Permission.
- * @Adapater :- Bcm Driver Private Data Structure
- * @eFlashSectionVal :- Flash section val which has header
- *
- * Return Value :-
- * Success :- If Section is present and writable write the sig and return STATUS_SUCCESS
- * Failure :-Return negative error code
- */
-
-int BcmFlash2xWriteSig(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_val eFlashSectionVal)
-{
- unsigned int uiSignature = 0;
- unsigned int uiOffset = 0;
-
- /* struct bcm_dsd_header dsdHeader = {0}; */
- if (Adapter->bSigCorrupted == false) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Signature is not corrupted by driver, hence not restoring\n");
- return STATUS_SUCCESS;
- }
-
- if (Adapter->bAllDSDWriteAllow == false) {
- if (IsSectionWritable(Adapter, eFlashSectionVal) == false) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Section is not Writable...Hence can't Write signature");
- return SECTOR_IS_NOT_WRITABLE;
- }
- }
-
- if ((eFlashSectionVal == DSD0) || (eFlashSectionVal == DSD1) || (eFlashSectionVal == DSD2)) {
- uiSignature = htonl(DSD_IMAGE_MAGIC_NUMBER);
- uiOffset = Adapter->psFlash2xCSInfo->OffsetFromDSDStartForDSDHeader;
-
- uiOffset += FIELD_OFFSET_IN_HEADER(struct bcm_dsd_header *, DSDImageMagicNumber);
-
- if ((ReadDSDSignature(Adapter, eFlashSectionVal) & 0xFF000000) != CORRUPTED_PATTERN) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Corrupted Pattern is not there. Hence won't write sig");
- return STATUS_FAILURE;
- }
- } else if ((eFlashSectionVal == ISO_IMAGE1) || (eFlashSectionVal == ISO_IMAGE2)) {
- uiSignature = htonl(ISO_IMAGE_MAGIC_NUMBER);
- /* uiOffset = 0; */
- uiOffset = FIELD_OFFSET_IN_HEADER(struct bcm_iso_header *, ISOImageMagicNumber);
- if ((ReadISOSignature(Adapter, eFlashSectionVal) & 0xFF000000) != CORRUPTED_PATTERN) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Currupted Pattern is not there. Hence won't write sig");
- return STATUS_FAILURE;
- }
- } else {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "GIVEN SECTION< %d > IS NOT VALID FOR SIG WRITE...", eFlashSectionVal);
- return STATUS_FAILURE;
- }
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Restoring the signature");
-
- Adapter->bHeaderChangeAllowed = TRUE;
- Adapter->bSigCorrupted = false;
- BcmFlash2xBulkWrite(Adapter, &uiSignature, eFlashSectionVal, uiOffset, SIGNATURE_SIZE, TRUE);
- Adapter->bHeaderChangeAllowed = false;
-
- return STATUS_SUCCESS;
-}
-
-/*
- * validateFlash2xReadWrite :- This API is used to validate the user request for Read/Write.
- * if requested Bytes goes beyond the Requested section, it reports error.
- * @Adapater :- Bcm Driver Private Data Structure
- * @psFlash2xReadWrite :-Flash2x Read/write structure pointer
- *
- * Return values:-Return TRUE is request is valid else false.
- */
-
-int validateFlash2xReadWrite(struct bcm_mini_adapter *Adapter, struct bcm_flash2x_readwrite *psFlash2xReadWrite)
-{
- unsigned int uiNumOfBytes = 0;
- unsigned int uiSectStartOffset = 0;
- unsigned int uiSectEndOffset = 0;
-
- uiNumOfBytes = psFlash2xReadWrite->numOfBytes;
-
- if (IsSectionExistInFlash(Adapter, psFlash2xReadWrite->Section) != TRUE) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Section<%x> does not exist in Flash", psFlash2xReadWrite->Section);
- return false;
- }
- uiSectStartOffset = BcmGetSectionValStartOffset(Adapter, psFlash2xReadWrite->Section);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Start offset :%x ,section :%d\n", uiSectStartOffset, psFlash2xReadWrite->Section);
- if ((psFlash2xReadWrite->Section == ISO_IMAGE1) || (psFlash2xReadWrite->Section == ISO_IMAGE2)) {
- if (psFlash2xReadWrite->Section == ISO_IMAGE1) {
- uiSectEndOffset = BcmGetSectionValEndOffset(Adapter, ISO_IMAGE1) -
- BcmGetSectionValStartOffset(Adapter, ISO_IMAGE1) +
- BcmGetSectionValEndOffset(Adapter, ISO_IMAGE1_PART2) -
- BcmGetSectionValStartOffset(Adapter, ISO_IMAGE1_PART2) +
- BcmGetSectionValEndOffset(Adapter, ISO_IMAGE1_PART3) -
- BcmGetSectionValStartOffset(Adapter, ISO_IMAGE1_PART3);
- } else if (psFlash2xReadWrite->Section == ISO_IMAGE2) {
- uiSectEndOffset = BcmGetSectionValEndOffset(Adapter, ISO_IMAGE2) -
- BcmGetSectionValStartOffset(Adapter, ISO_IMAGE2) +
- BcmGetSectionValEndOffset(Adapter, ISO_IMAGE2_PART2) -
- BcmGetSectionValStartOffset(Adapter, ISO_IMAGE2_PART2) +
- BcmGetSectionValEndOffset(Adapter, ISO_IMAGE2_PART3) -
- BcmGetSectionValStartOffset(Adapter, ISO_IMAGE2_PART3);
- }
-
- /* since this uiSectEndoffset is the size of iso Image. hence for calculating the virtual endoffset
- * it should be added in startoffset. so that check done in last of this function can be valued.
- */
- uiSectEndOffset = uiSectStartOffset + uiSectEndOffset;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Total size of the ISO Image :%x", uiSectEndOffset);
- } else
- uiSectEndOffset = BcmGetSectionValEndOffset(Adapter, psFlash2xReadWrite->Section);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "End offset :%x\n", uiSectEndOffset);
-
- /* psFlash2xReadWrite->offset and uiNumOfBytes are user controlled and can lead to integer overflows */
- if (psFlash2xReadWrite->offset > uiSectEndOffset) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Invalid Request....");
- return false;
- }
- if (uiNumOfBytes > uiSectEndOffset) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Invalid Request....");
- return false;
- }
- /* Checking the boundary condition */
- if ((uiSectStartOffset + psFlash2xReadWrite->offset + uiNumOfBytes) <= uiSectEndOffset)
- return TRUE;
- else {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Invalid Request....");
- return false;
- }
-}
-
-/*
- * IsFlash2x :- check for Flash 2.x
- * Adapater :- Bcm Driver Private Data Structure
- *
- * Return value:-
- * return TRUE if flah2.x of hgher version else return false.
- */
-
-int IsFlash2x(struct bcm_mini_adapter *Adapter)
-{
- if (Adapter->uiFlashLayoutMajorVersion >= FLASH_2X_MAJOR_NUMBER)
- return TRUE;
- else
- return false;
-}
-
-/*
- * GetFlashBaseAddr :- Calculate the Flash Base address
- * @Adapater :- Bcm Driver Private Data Structure
- *
- * Return Value:-
- * Success :- Base Address of the Flash
- */
-
-static int GetFlashBaseAddr(struct bcm_mini_adapter *Adapter)
-{
- unsigned int uiBaseAddr = 0;
-
- if (Adapter->bDDRInitDone) {
- /*
- * For All Valid Flash Versions... except 1.1, take the value from FlashBaseAddr
- * In case of Raw Read... use the default value
- */
- if (Adapter->uiFlashLayoutMajorVersion && (Adapter->bFlashRawRead == false) &&
- !((Adapter->uiFlashLayoutMajorVersion == 1) && (Adapter->uiFlashLayoutMinorVersion == 1)))
- uiBaseAddr = Adapter->uiFlashBaseAdd;
- else
- uiBaseAddr = FLASH_CONTIGIOUS_START_ADDR_AFTER_INIT;
- } else {
- /*
- * For All Valid Flash Versions... except 1.1, take the value from FlashBaseAddr
- * In case of Raw Read... use the default value
- */
- if (Adapter->uiFlashLayoutMajorVersion && (Adapter->bFlashRawRead == false) &&
- !((Adapter->uiFlashLayoutMajorVersion == 1) && (Adapter->uiFlashLayoutMinorVersion == 1)))
- uiBaseAddr = Adapter->uiFlashBaseAdd | FLASH_CONTIGIOUS_START_ADDR_BEFORE_INIT;
- else
- uiBaseAddr = FLASH_CONTIGIOUS_START_ADDR_BEFORE_INIT;
- }
-
- return uiBaseAddr;
-}
-
-/*
- * BcmCopySection :- This API is used to copy the One section in another. Both section should
- * be contiuous and of same size. Hence this Will not be applicabe to copy ISO.
- *
- * @Adapater :- Bcm Driver Private Data Structure
- * @SrcSection :- Source section From where data has to be copied
- * @DstSection :- Destination section to which data has to be copied
- * @offset :- Offset from/to where data has to be copied from one section to another.
- * @numOfBytes :- number of byes that has to be copyed from one section to another at given offset.
- * in case of numofBytes equal zero complete section will be copied.
- * Return Values-
- * Success : Return STATUS_SUCCESS
- * Faillure :- return negative error code
- */
-
-int BcmCopySection(struct bcm_mini_adapter *Adapter,
- enum bcm_flash2x_section_val SrcSection,
- enum bcm_flash2x_section_val DstSection,
- unsigned int offset,
- unsigned int numOfBytes)
-{
- unsigned int BuffSize = 0;
- unsigned int BytesToBeCopied = 0;
- PUCHAR pBuff = NULL;
- int Status = STATUS_SUCCESS;
-
- if (SrcSection == DstSection) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Source and Destination should be different ...try again");
- return -EINVAL;
- }
-
- if ((SrcSection != DSD0) && (SrcSection != DSD1) && (SrcSection != DSD2)) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Source should be DSD subsection");
- return -EINVAL;
- }
-
- if ((DstSection != DSD0) && (DstSection != DSD1) && (DstSection != DSD2)) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Destination should be DSD subsection");
- return -EINVAL;
- }
-
- /* if offset zero means have to copy complete secton */
- if (numOfBytes == 0) {
- numOfBytes = BcmGetSectionValEndOffset(Adapter, SrcSection)
- - BcmGetSectionValStartOffset(Adapter, SrcSection);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Section Size :0x%x", numOfBytes);
- }
-
- if ((offset + numOfBytes) > BcmGetSectionValEndOffset(Adapter, SrcSection)
- - BcmGetSectionValStartOffset(Adapter, SrcSection)) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, " Input parameters going beyond the section offS: %x numB: %x of Source Section\n",
- offset, numOfBytes);
- return -EINVAL;
- }
-
- if ((offset + numOfBytes) > BcmGetSectionValEndOffset(Adapter, DstSection)
- - BcmGetSectionValStartOffset(Adapter, DstSection)) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Input parameters going beyond the section offS: %x numB: %x of Destination Section\n",
- offset, numOfBytes);
- return -EINVAL;
- }
-
- if (numOfBytes > Adapter->uiSectorSize)
- BuffSize = Adapter->uiSectorSize;
- else
- BuffSize = numOfBytes;
-
- pBuff = kzalloc(BuffSize, GFP_KERNEL);
- if (!pBuff) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Memory allocation failed.. ");
- return -ENOMEM;
- }
-
- BytesToBeCopied = Adapter->uiSectorSize;
- if (offset % Adapter->uiSectorSize)
- BytesToBeCopied = Adapter->uiSectorSize - (offset % Adapter->uiSectorSize);
- if (BytesToBeCopied > numOfBytes)
- BytesToBeCopied = numOfBytes;
-
- Adapter->bHeaderChangeAllowed = TRUE;
-
- do {
- Status = BcmFlash2xBulkRead(Adapter, (PUINT)pBuff, SrcSection , offset, BytesToBeCopied);
- if (Status) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Read failed at offset :%d for NOB :%d", SrcSection, BytesToBeCopied);
- break;
- }
- Status = BcmFlash2xBulkWrite(Adapter, (PUINT)pBuff, DstSection, offset, BytesToBeCopied, false);
- if (Status) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Write failed at offset :%d for NOB :%d", DstSection, BytesToBeCopied);
- break;
- }
- offset = offset + BytesToBeCopied;
- numOfBytes = numOfBytes - BytesToBeCopied;
- if (numOfBytes) {
- if (numOfBytes > Adapter->uiSectorSize)
- BytesToBeCopied = Adapter->uiSectorSize;
- else
- BytesToBeCopied = numOfBytes;
- }
- } while (numOfBytes > 0);
-
- kfree(pBuff);
- Adapter->bHeaderChangeAllowed = false;
-
- return Status;
-}
-
-/*
- * SaveHeaderIfPresent :- This API is use to Protect the Header in case of Header Sector write
- * @Adapater :- Bcm Driver Private Data Structure
- * @pBuff :- Data buffer that has to be written in sector having the header map.
- * @uiOffset :- Flash offset that has to be written.
- *
- * Return value :-
- * Success :- On success return STATUS_SUCCESS
- * Faillure :- Return negative error code
- */
-
-static int SaveHeaderIfPresent(struct bcm_mini_adapter *Adapter, PUCHAR pBuff, unsigned int uiOffset)
-{
- unsigned int offsetToProtect = 0, HeaderSizeToProtect = 0;
- bool bHasHeader = false;
- PUCHAR pTempBuff = NULL;
- unsigned int uiSectAlignAddr = 0;
- unsigned int sig = 0;
-
- /* making the offset sector aligned */
- uiSectAlignAddr = uiOffset & ~(Adapter->uiSectorSize - 1);
-
- if ((uiSectAlignAddr == BcmGetSectionValEndOffset(Adapter, DSD2) - Adapter->uiSectorSize) ||
- (uiSectAlignAddr == BcmGetSectionValEndOffset(Adapter, DSD1) - Adapter->uiSectorSize) ||
- (uiSectAlignAddr == BcmGetSectionValEndOffset(Adapter, DSD0) - Adapter->uiSectorSize)) {
- /* offset from the sector boundary having the header map */
- offsetToProtect = Adapter->psFlash2xCSInfo->OffsetFromDSDStartForDSDHeader % Adapter->uiSectorSize;
- HeaderSizeToProtect = sizeof(struct bcm_dsd_header);
- bHasHeader = TRUE;
- }
-
- if (uiSectAlignAddr == BcmGetSectionValStartOffset(Adapter, ISO_IMAGE1) ||
- uiSectAlignAddr == BcmGetSectionValStartOffset(Adapter, ISO_IMAGE2)) {
- offsetToProtect = 0;
- HeaderSizeToProtect = sizeof(struct bcm_iso_header);
- bHasHeader = TRUE;
- }
- /* If Header is present overwrite passed buffer with this */
- if (bHasHeader && (Adapter->bHeaderChangeAllowed == false)) {
- pTempBuff = kzalloc(HeaderSizeToProtect, GFP_KERNEL);
- if (!pTempBuff) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Memory allocation failed");
- return -ENOMEM;
- }
- /* Read header */
- BeceemFlashBulkRead(Adapter, (PUINT)pTempBuff, (uiSectAlignAddr + offsetToProtect), HeaderSizeToProtect);
- BCM_DEBUG_PRINT_BUFFER(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, pTempBuff, HeaderSizeToProtect);
- /* Replace Buffer content with Header */
- memcpy(pBuff + offsetToProtect, pTempBuff, HeaderSizeToProtect);
-
- kfree(pTempBuff);
- }
- if (bHasHeader && Adapter->bSigCorrupted) {
- sig = *((PUINT)(pBuff + offsetToProtect + FIELD_OFFSET_IN_HEADER(struct bcm_dsd_header *, DSDImageMagicNumber)));
- sig = ntohl(sig);
- if ((sig & 0xFF000000) != CORRUPTED_PATTERN) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Desired pattern is not at sig offset. Hence won't restore");
- Adapter->bSigCorrupted = false;
- return STATUS_SUCCESS;
- }
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, " Corrupted sig is :%X", sig);
- *((PUINT)(pBuff + offsetToProtect + FIELD_OFFSET_IN_HEADER(struct bcm_dsd_header *, DSDImageMagicNumber))) = htonl(DSD_IMAGE_MAGIC_NUMBER);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Restoring the signature in Header Write only");
- Adapter->bSigCorrupted = false;
- }
-
- return STATUS_SUCCESS;
-}
-
-/*
- * BcmDoChipSelect : This will selcet the appropriate chip for writing.
- * @Adapater :- Bcm Driver Private Data Structure
- *
- * OutPut:-
- * Select the Appropriate chip and retrn status Success
- */
-static int BcmDoChipSelect(struct bcm_mini_adapter *Adapter, unsigned int offset)
-{
- unsigned int FlashConfig = 0;
- int ChipNum = 0;
- unsigned int GPIOConfig = 0;
- unsigned int PartNum = 0;
-
- ChipNum = offset / FLASH_PART_SIZE;
-
- /*
- * Chip Select mapping to enable flash0.
- * To select flash 0, we have to OR with (0<<12).
- * ORing 0 will have no impact so not doing that part.
- * In future if Chip select value changes from 0 to non zero,
- * That needs be taken care with backward comaptibility. No worries for now.
- */
-
- /*
- * SelectedChip Variable is the selection that the host is 100% Sure the same as what the register will hold. This can be ONLY ensured
- * if the Chip doesn't goes to low power mode while the flash operation is in progress (NVMRdmWrmLock is taken)
- * Before every new Flash Write operation, we reset the variable. This is to ensure that after any wake-up from
- * power down modes (Idle mode/shutdown mode), the values in the register will be different.
- */
-
- if (Adapter->SelectedChip == ChipNum)
- return STATUS_SUCCESS;
-
- /* BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Selected Chip :%x", ChipNum); */
- Adapter->SelectedChip = ChipNum;
-
- /* bit[13..12] will select the appropriate chip */
- rdmalt(Adapter, FLASH_CONFIG_REG, &FlashConfig, 4);
- rdmalt(Adapter, FLASH_GPIO_CONFIG_REG, &GPIOConfig, 4);
- {
- switch (ChipNum) {
- case 0:
- PartNum = 0;
- break;
- case 1:
- PartNum = 3;
- GPIOConfig |= (0x4 << CHIP_SELECT_BIT12);
- break;
- case 2:
- PartNum = 1;
- GPIOConfig |= (0x1 << CHIP_SELECT_BIT12);
- break;
- case 3:
- PartNum = 2;
- GPIOConfig |= (0x2 << CHIP_SELECT_BIT12);
- break;
- }
- }
- /* In case the bits already written in the FLASH_CONFIG_REG is same as what the user desired,
- * nothing to do... can return immediately.
- * ASSUMPTION: FLASH_GPIO_CONFIG_REG will be in sync with FLASH_CONFIG_REG.
- * Even if the chip goes to low power mode, it should wake with values in each register in sync with each other.
- * These values are not written by host other than during CHIP_SELECT.
- */
- if (PartNum == ((FlashConfig >> CHIP_SELECT_BIT12) & 0x3))
- return STATUS_SUCCESS;
-
- /* clearing the bit[13..12] */
- FlashConfig &= 0xFFFFCFFF;
- FlashConfig = (FlashConfig | (PartNum<<CHIP_SELECT_BIT12)); /* 00 */
-
- wrmalt(Adapter, FLASH_GPIO_CONFIG_REG, &GPIOConfig, 4);
- udelay(100);
-
- wrmalt(Adapter, FLASH_CONFIG_REG, &FlashConfig, 4);
- udelay(100);
-
- return STATUS_SUCCESS;
-}
-
-static int ReadDSDSignature(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_val dsd)
-{
- unsigned int uiDSDsig = 0;
- /* unsigned int sigoffsetInMap = 0;
- * struct bcm_dsd_header dsdHeader = {0};
- */
-
- /* sigoffsetInMap =(PUCHAR)&(dsdHeader.DSDImageMagicNumber) -(PUCHAR)&dsdHeader; */
-
- if (dsd != DSD0 && dsd != DSD1 && dsd != DSD2) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "passed section value is not for DSDs");
- return STATUS_FAILURE;
- }
- BcmFlash2xBulkRead(Adapter,
- &uiDSDsig,
- dsd,
- Adapter->psFlash2xCSInfo->OffsetFromDSDStartForDSDHeader + FIELD_OFFSET_IN_HEADER(struct bcm_dsd_header *, DSDImageMagicNumber),
- SIGNATURE_SIZE);
-
- uiDSDsig = ntohl(uiDSDsig);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "DSD SIG :%x", uiDSDsig);
-
- return uiDSDsig;
-}
-
-static int ReadDSDPriority(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_val dsd)
-{
- /* unsigned int priOffsetInMap = 0 ; */
- unsigned int uiDSDPri = STATUS_FAILURE;
- /* struct bcm_dsd_header dsdHeader = {0};
- * priOffsetInMap = (PUCHAR)&(dsdHeader.DSDImagePriority) -(PUCHAR)&dsdHeader;
- */
- if (IsSectionWritable(Adapter, dsd)) {
- if (ReadDSDSignature(Adapter, dsd) == DSD_IMAGE_MAGIC_NUMBER) {
- BcmFlash2xBulkRead(Adapter,
- &uiDSDPri,
- dsd,
- Adapter->psFlash2xCSInfo->OffsetFromDSDStartForDSDHeader + FIELD_OFFSET_IN_HEADER(struct bcm_dsd_header *, DSDImagePriority),
- 4);
-
- uiDSDPri = ntohl(uiDSDPri);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "DSD<%x> Priority :%x", dsd, uiDSDPri);
- }
- }
-
- return uiDSDPri;
-}
-
-static enum bcm_flash2x_section_val getHighestPriDSD(struct bcm_mini_adapter *Adapter)
-{
- int DSDHighestPri = STATUS_FAILURE;
- int DsdPri = 0;
- enum bcm_flash2x_section_val HighestPriDSD = 0;
-
- if (IsSectionWritable(Adapter, DSD2)) {
- DSDHighestPri = ReadDSDPriority(Adapter, DSD2);
- HighestPriDSD = DSD2;
- }
-
- if (IsSectionWritable(Adapter, DSD1)) {
- DsdPri = ReadDSDPriority(Adapter, DSD1);
- if (DSDHighestPri < DsdPri) {
- DSDHighestPri = DsdPri;
- HighestPriDSD = DSD1;
- }
- }
-
- if (IsSectionWritable(Adapter, DSD0)) {
- DsdPri = ReadDSDPriority(Adapter, DSD0);
- if (DSDHighestPri < DsdPri) {
- DSDHighestPri = DsdPri;
- HighestPriDSD = DSD0;
- }
- }
- if (HighestPriDSD)
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Highest DSD :%x , and its Pri :%x", HighestPriDSD, DSDHighestPri);
-
- return HighestPriDSD;
-}
-
-static int ReadISOSignature(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_val iso)
-{
- unsigned int uiISOsig = 0;
- /* unsigned int sigoffsetInMap = 0;
- * struct bcm_iso_header ISOHeader = {0};
- * sigoffsetInMap =(PUCHAR)&(ISOHeader.ISOImageMagicNumber) -(PUCHAR)&ISOHeader;
- */
- if (iso != ISO_IMAGE1 && iso != ISO_IMAGE2) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "passed section value is not for ISOs");
- return STATUS_FAILURE;
- }
- BcmFlash2xBulkRead(Adapter,
- &uiISOsig,
- iso,
- 0 + FIELD_OFFSET_IN_HEADER(struct bcm_iso_header *, ISOImageMagicNumber),
- SIGNATURE_SIZE);
-
- uiISOsig = ntohl(uiISOsig);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "ISO SIG :%x", uiISOsig);
-
- return uiISOsig;
-}
-
-static int ReadISOPriority(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_val iso)
-{
- unsigned int ISOPri = STATUS_FAILURE;
-
- if (IsSectionWritable(Adapter, iso)) {
- if (ReadISOSignature(Adapter, iso) == ISO_IMAGE_MAGIC_NUMBER) {
- BcmFlash2xBulkRead(Adapter,
- &ISOPri,
- iso,
- 0 + FIELD_OFFSET_IN_HEADER(struct bcm_iso_header *, ISOImagePriority),
- 4);
-
- ISOPri = ntohl(ISOPri);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "ISO<%x> Priority :%x", iso, ISOPri);
- }
- }
-
- return ISOPri;
-}
-
-static enum bcm_flash2x_section_val getHighestPriISO(struct bcm_mini_adapter *Adapter)
-{
- int ISOHighestPri = STATUS_FAILURE;
- int ISOPri = 0;
- enum bcm_flash2x_section_val HighestPriISO = NO_SECTION_VAL;
-
- if (IsSectionWritable(Adapter, ISO_IMAGE2)) {
- ISOHighestPri = ReadISOPriority(Adapter, ISO_IMAGE2);
- HighestPriISO = ISO_IMAGE2;
- }
-
- if (IsSectionWritable(Adapter, ISO_IMAGE1)) {
- ISOPri = ReadISOPriority(Adapter, ISO_IMAGE1);
- if (ISOHighestPri < ISOPri) {
- ISOHighestPri = ISOPri;
- HighestPriISO = ISO_IMAGE1;
- }
- }
- if (HighestPriISO)
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Highest ISO :%x and its Pri :%x", HighestPriISO, ISOHighestPri);
-
- return HighestPriISO;
-}
-
-static int WriteToFlashWithoutSectorErase(struct bcm_mini_adapter *Adapter,
- PUINT pBuff,
- enum bcm_flash2x_section_val eFlash2xSectionVal,
- unsigned int uiOffset,
- unsigned int uiNumBytes)
-{
- #if !defined(BCM_SHM_INTERFACE) || defined(FLASH_DIRECT_ACCESS)
- unsigned int uiTemp = 0, value = 0;
- unsigned int i = 0;
- unsigned int uiPartOffset = 0;
- #endif
- unsigned int uiStartOffset = 0;
- /* Adding section start address */
- int Status = STATUS_SUCCESS;
- PUCHAR pcBuff = (PUCHAR)pBuff;
-
- if (uiNumBytes % Adapter->ulFlashWriteSize) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Writing without Sector Erase for non-FlashWriteSize number of bytes 0x%x\n", uiNumBytes);
- return STATUS_FAILURE;
- }
-
- uiStartOffset = BcmGetSectionValStartOffset(Adapter, eFlash2xSectionVal);
-
- if (IsSectionExistInVendorInfo(Adapter, eFlash2xSectionVal))
- return vendorextnWriteSectionWithoutErase(Adapter, pcBuff, eFlash2xSectionVal, uiOffset, uiNumBytes);
-
- uiOffset = uiOffset + uiStartOffset;
-
- #if defined(BCM_SHM_INTERFACE) && !defined(FLASH_DIRECT_ACCESS)
- Status = bcmflash_raw_writenoerase((uiOffset / FLASH_PART_SIZE), (uiOffset % FLASH_PART_SIZE), pcBuff, uiNumBytes);
- #else
- rdmalt(Adapter, 0x0f000C80, &uiTemp, sizeof(uiTemp));
- value = 0;
- wrmalt(Adapter, 0x0f000C80, &value, sizeof(value));
-
- Adapter->SelectedChip = RESET_CHIP_SELECT;
- BcmDoChipSelect(Adapter, uiOffset);
- uiPartOffset = (uiOffset & (FLASH_PART_SIZE - 1)) + GetFlashBaseAddr(Adapter);
-
- for (i = 0; i < uiNumBytes; i += Adapter->ulFlashWriteSize) {
- if (Adapter->ulFlashWriteSize == BYTE_WRITE_SUPPORT)
- Status = flashByteWrite(Adapter, uiPartOffset, pcBuff);
- else
- Status = flashWrite(Adapter, uiPartOffset, pcBuff);
-
- if (Status != STATUS_SUCCESS)
- break;
-
- pcBuff = pcBuff + Adapter->ulFlashWriteSize;
- uiPartOffset = uiPartOffset + Adapter->ulFlashWriteSize;
- }
- wrmalt(Adapter, 0x0f000C80, &uiTemp, sizeof(uiTemp));
- Adapter->SelectedChip = RESET_CHIP_SELECT;
- #endif
-
- return Status;
-}
-
-bool IsSectionExistInFlash(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_val section)
-{
- bool SectionPresent = false;
-
- switch (section) {
- case ISO_IMAGE1:
- if ((Adapter->psFlash2xCSInfo->OffsetISOImage1Part1Start != UNINIT_PTR_IN_CS) &&
- (IsNonCDLessDevice(Adapter) == false))
- SectionPresent = TRUE;
- break;
- case ISO_IMAGE2:
- if ((Adapter->psFlash2xCSInfo->OffsetISOImage2Part1Start != UNINIT_PTR_IN_CS) &&
- (IsNonCDLessDevice(Adapter) == false))
- SectionPresent = TRUE;
- break;
- case DSD0:
- if (Adapter->psFlash2xCSInfo->OffsetFromZeroForDSDStart != UNINIT_PTR_IN_CS)
- SectionPresent = TRUE;
- break;
- case DSD1:
- if (Adapter->psFlash2xCSInfo->OffsetFromZeroForDSD1Start != UNINIT_PTR_IN_CS)
- SectionPresent = TRUE;
- break;
- case DSD2:
- if (Adapter->psFlash2xCSInfo->OffsetFromZeroForDSD2Start != UNINIT_PTR_IN_CS)
- SectionPresent = TRUE;
- break;
- case VSA0:
- if (Adapter->psFlash2xCSInfo->OffsetFromZeroForVSAStart != UNINIT_PTR_IN_CS)
- SectionPresent = TRUE;
- break;
- case VSA1:
- if (Adapter->psFlash2xCSInfo->OffsetFromZeroForVSA1Start != UNINIT_PTR_IN_CS)
- SectionPresent = TRUE;
- break;
- case VSA2:
- if (Adapter->psFlash2xCSInfo->OffsetFromZeroForVSA2Start != UNINIT_PTR_IN_CS)
- SectionPresent = TRUE;
- break;
- case SCSI:
- if (Adapter->psFlash2xCSInfo->OffsetFromZeroForScsiFirmware != UNINIT_PTR_IN_CS)
- SectionPresent = TRUE;
- break;
- case CONTROL_SECTION:
- if (Adapter->psFlash2xCSInfo->OffsetFromZeroForControlSectionStart != UNINIT_PTR_IN_CS)
- SectionPresent = TRUE;
- break;
- default:
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Section Does not exist in Flash 2.x");
- SectionPresent = false;
- }
-
- return SectionPresent;
-}
-
-static int IsSectionWritable(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_val Section)
-{
- int offset = STATUS_FAILURE;
- int Status = false;
-
- if (IsSectionExistInFlash(Adapter, Section) == false) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Section <%d> does not exist", Section);
- return false;
- }
-
- offset = BcmGetSectionValStartOffset(Adapter, Section);
- if (offset == INVALID_OFFSET) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Section<%d> does not exist", Section);
- return false;
- }
-
- if (IsSectionExistInVendorInfo(Adapter, Section))
- return !(Adapter->psFlash2xVendorInfo->VendorSection[Section].AccessFlags & FLASH2X_SECTION_RO);
-
- Status = IsOffsetWritable(Adapter, offset);
- return Status;
-}
-
-static int CorruptDSDSig(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_val eFlash2xSectionVal)
-{
- PUCHAR pBuff = NULL;
- unsigned int sig = 0;
- unsigned int uiOffset = 0;
- unsigned int BlockStatus = 0;
- unsigned int uiSectAlignAddr = 0;
-
- Adapter->bSigCorrupted = false;
- if (Adapter->bAllDSDWriteAllow == false) {
- if (IsSectionWritable(Adapter, eFlash2xSectionVal) != TRUE) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Section is not Writable...Hence can't Corrupt signature");
- return SECTOR_IS_NOT_WRITABLE;
- }
- }
-
- pBuff = kzalloc(MAX_RW_SIZE, GFP_KERNEL);
- if (!pBuff) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Can't allocate memorey");
- return -ENOMEM;
- }
-
- uiOffset = Adapter->psFlash2xCSInfo->OffsetFromDSDStartForDSDHeader + sizeof(struct bcm_dsd_header);
- uiOffset -= MAX_RW_SIZE;
-
- BcmFlash2xBulkRead(Adapter, (PUINT)pBuff, eFlash2xSectionVal, uiOffset, MAX_RW_SIZE);
-
- sig = *((PUINT)(pBuff + 12));
- sig = ntohl(sig);
- BCM_DEBUG_PRINT_BUFFER(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, pBuff, MAX_RW_SIZE);
- /* Now corrupting the sig by corrupting 4th last Byte. */
- *(pBuff + 12) = 0;
-
- if (sig == DSD_IMAGE_MAGIC_NUMBER) {
- Adapter->bSigCorrupted = TRUE;
- if (Adapter->ulFlashWriteSize == BYTE_WRITE_SUPPORT) {
- uiSectAlignAddr = uiOffset & ~(Adapter->uiSectorSize - 1);
- BlockStatus = BcmFlashUnProtectBlock(Adapter, uiSectAlignAddr, Adapter->uiSectorSize);
-
- WriteToFlashWithoutSectorErase(Adapter, (PUINT)(pBuff + 12), eFlash2xSectionVal,
- (uiOffset + 12), BYTE_WRITE_SUPPORT);
- if (BlockStatus) {
- BcmRestoreBlockProtectStatus(Adapter, BlockStatus);
- BlockStatus = 0;
- }
- } else {
- WriteToFlashWithoutSectorErase(Adapter, (PUINT)pBuff, eFlash2xSectionVal,
- uiOffset, MAX_RW_SIZE);
- }
- } else {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "BCM Signature is not present in header");
- kfree(pBuff);
-
- return STATUS_FAILURE;
- }
-
- kfree(pBuff);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Corrupted the signature");
-
- return STATUS_SUCCESS;
-}
-
-static int CorruptISOSig(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_val eFlash2xSectionVal)
-{
- PUCHAR pBuff = NULL;
- unsigned int sig = 0;
- unsigned int uiOffset = 0;
-
- Adapter->bSigCorrupted = false;
-
- if (IsSectionWritable(Adapter, eFlash2xSectionVal) != TRUE) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Section is not Writable...Hence can't Corrupt signature");
- return SECTOR_IS_NOT_WRITABLE;
- }
-
- pBuff = kzalloc(MAX_RW_SIZE, GFP_KERNEL);
- if (!pBuff) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Can't allocate memorey");
- return -ENOMEM;
- }
-
- uiOffset = 0;
-
- BcmFlash2xBulkRead(Adapter, (PUINT)pBuff, eFlash2xSectionVal, uiOffset, MAX_RW_SIZE);
-
- sig = *((PUINT)pBuff);
- sig = ntohl(sig);
-
- /* corrupt signature */
- *pBuff = 0;
-
- if (sig == ISO_IMAGE_MAGIC_NUMBER) {
- Adapter->bSigCorrupted = TRUE;
- WriteToFlashWithoutSectorErase(Adapter, (PUINT)pBuff, eFlash2xSectionVal,
- uiOffset, Adapter->ulFlashWriteSize);
- } else {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "BCM Signature is not present in header");
- kfree(pBuff);
-
- return STATUS_FAILURE;
- }
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Corrupted the signature");
- BCM_DEBUG_PRINT_BUFFER(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, pBuff, MAX_RW_SIZE);
-
- kfree(pBuff);
- return STATUS_SUCCESS;
-}
-
-bool IsNonCDLessDevice(struct bcm_mini_adapter *Adapter)
-{
- if (Adapter->psFlash2xCSInfo->IsCDLessDeviceBootSig == NON_CDLESS_DEVICE_BOOT_SIG)
- return TRUE;
- else
- return false;
-}
diff --git a/drivers/staging/bcm/nvm.h b/drivers/staging/bcm/nvm.h
deleted file mode 100644
index e765cca5d96..00000000000
--- a/drivers/staging/bcm/nvm.h
+++ /dev/null
@@ -1,286 +0,0 @@
-/***************************************************************************************
- *
- * Copyright (c) Beceem Communications Inc.
- *
- * Module Name:
- * NVM.h
- *
- * Abstract:
- * This file has the prototypes,preprocessors and definitions various NVM libraries.
- *
- *
- * Revision History:
- * Who When What
- * -------- -------- ----------------------------------------------
- * Name Date Created/reviewed/modified
- *
- * Notes:
- *
- ****************************************************************************************/
-
-#ifndef _NVM_H_
-#define _NVM_H_
-
-struct bcm_flash_cs_info {
- u32 MagicNumber;
- /* let the magic number be 0xBECE-F1A5 - F1A5 for "flas-h" */
- u32 FlashLayoutVersion;
- u32 ISOImageVersion;
- u32 SCSIFirmwareVersion;
- u32 OffsetFromZeroForPart1ISOImage;
- u32 OffsetFromZeroForScsiFirmware;
- u32 SizeOfScsiFirmware;
- u32 OffsetFromZeroForPart2ISOImage;
- u32 OffsetFromZeroForCalibrationStart;
- u32 OffsetFromZeroForCalibrationEnd;
- u32 OffsetFromZeroForVSAStart;
- u32 OffsetFromZeroForVSAEnd;
- u32 OffsetFromZeroForControlSectionStart;
- u32 OffsetFromZeroForControlSectionData;
- u32 CDLessInactivityTimeout;
- u32 NewImageSignature;
- u32 FlashSectorSizeSig;
- u32 FlashSectorSize;
- u32 FlashWriteSupportSize;
- u32 TotalFlashSize;
- u32 FlashBaseAddr;
- u32 FlashPartMaxSize;
- u32 IsCDLessDeviceBootSig;
- /* MSC Timeout after reset to switch from MSC to NW Mode */
- u32 MassStorageTimeout;
-};
-
-#define FLASH2X_TOTAL_SIZE (64 * 1024 * 1024)
-#define DEFAULT_SECTOR_SIZE (64 * 1024)
-
-struct bcm_flash2x_cs_info {
- /* magic number as 0xBECE-F1A5 - F1A5 for "flas-h" */
- u32 MagicNumber;
- u32 FlashLayoutVersion;
- u32 ISOImageVersion;
- u32 SCSIFirmwareVersion;
- u32 OffsetFromZeroForPart1ISOImage;
- u32 OffsetFromZeroForScsiFirmware;
- u32 SizeOfScsiFirmware;
- u32 OffsetFromZeroForPart2ISOImage;
- u32 OffsetFromZeroForDSDStart;
- u32 OffsetFromZeroForDSDEnd;
- u32 OffsetFromZeroForVSAStart;
- u32 OffsetFromZeroForVSAEnd;
- u32 OffsetFromZeroForControlSectionStart;
- u32 OffsetFromZeroForControlSectionData;
- /* NO Data Activity timeout to switch from MSC to NW Mode */
- u32 CDLessInactivityTimeout;
- u32 NewImageSignature;
- u32 FlashSectorSizeSig;
- u32 FlashSectorSize;
- u32 FlashWriteSupportSize;
- u32 TotalFlashSize;
- u32 FlashBaseAddr;
- u32 FlashPartMaxSize;
- u32 IsCDLessDeviceBootSig;
- /* MSC Timeout after reset to switch from MSC to NW Mode */
- u32 MassStorageTimeout;
- /* Flash Map 2.0 Field */
- u32 OffsetISOImage1Part1Start;
- u32 OffsetISOImage1Part1End;
- u32 OffsetISOImage1Part2Start;
- u32 OffsetISOImage1Part2End;
- u32 OffsetISOImage1Part3Start;
- u32 OffsetISOImage1Part3End;
- u32 OffsetISOImage2Part1Start;
- u32 OffsetISOImage2Part1End;
- u32 OffsetISOImage2Part2Start;
- u32 OffsetISOImage2Part2End;
- u32 OffsetISOImage2Part3Start;
- u32 OffsetISOImage2Part3End;
- /* DSD Header offset from start of DSD */
- u32 OffsetFromDSDStartForDSDHeader;
- u32 OffsetFromZeroForDSD1Start;
- u32 OffsetFromZeroForDSD1End;
- u32 OffsetFromZeroForDSD2Start;
- u32 OffsetFromZeroForDSD2End;
- u32 OffsetFromZeroForVSA1Start;
- u32 OffsetFromZeroForVSA1End;
- u32 OffsetFromZeroForVSA2Start;
- u32 OffsetFromZeroForVSA2End;
- /*
- * ACCESS_BITS_PER_SECTOR 2
- * ACCESS_RW 0
- * ACCESS_RO 1
- * ACCESS_RESVD 2
- * ACCESS_RESVD 3
- */
- u32 SectorAccessBitMap[FLASH2X_TOTAL_SIZE / (DEFAULT_SECTOR_SIZE * 16)];
- /* All expansions to the control data structure should add here */
-};
-
-struct bcm_vendor_section_info {
- u32 OffsetFromZeroForSectionStart;
- u32 OffsetFromZeroForSectionEnd;
- u32 AccessFlags;
- u32 Reserved[16];
-};
-
-struct bcm_flash2x_vendor_info {
- struct bcm_vendor_section_info VendorSection[TOTAL_SECTIONS];
- u32 Reserved[16];
-};
-
-struct bcm_dsd_header {
- u32 DSDImageSize;
- u32 DSDImageCRC;
- u32 DSDImagePriority;
- /* We should not consider right now. Reading reserve is worthless. */
- u32 Reserved[252]; /* Resvd for DSD Header */
- u32 DSDImageMagicNumber;
-};
-
-struct bcm_iso_header {
- u32 ISOImageMagicNumber;
- u32 ISOImageSize;
- u32 ISOImageCRC;
- u32 ISOImagePriority;
- /* We should not consider right now. Reading reserve is worthless. */
- u32 Reserved[60]; /* Resvd for ISO Header extension */
-};
-
-#define EEPROM_BEGIN_CIS (0)
-#define EEPROM_BEGIN_NON_CIS (0x200)
-#define EEPROM_END (0x2000)
-#define INIT_PARAMS_SIGNATURE (0x95a7a597)
-#define MAX_INIT_PARAMS_LENGTH (2048)
-#define MAC_ADDRESS_OFFSET 0x200
-
-#define INIT_PARAMS_1_SIGNATURE_ADDRESS EEPROM_BEGIN_NON_CIS
-#define INIT_PARAMS_1_DATA_ADDRESS (INIT_PARAMS_1_SIGNATURE_ADDRESS+16)
-#define INIT_PARAMS_1_MACADDRESS_ADDRESS (MAC_ADDRESS_OFFSET)
-#define INIT_PARAMS_1_LENGTH_ADDRESS (INIT_PARAMS_1_SIGNATURE_ADDRESS+4)
-
-#define INIT_PARAMS_2_SIGNATURE_ADDRESS (EEPROM_BEGIN_NON_CIS + 2048 + 16)
-#define INIT_PARAMS_2_DATA_ADDRESS (INIT_PARAMS_2_SIGNATURE_ADDRESS + 16)
-#define INIT_PARAMS_2_MACADDRESS_ADDRESS (INIT_PARAMS_2_SIGNATURE_ADDRESS + 8)
-#define INIT_PARAMS_2_LENGTH_ADDRESS (INIT_PARAMS_2_SIGNATURE_ADDRESS + 4)
-
-#define EEPROM_SPI_DEV_CONFIG_REG 0x0F003000
-#define EEPROM_SPI_Q_STATUS1_REG 0x0F003004
-#define EEPROM_SPI_Q_STATUS1_MASK_REG 0x0F00300C
-
-#define EEPROM_SPI_Q_STATUS_REG 0x0F003008
-#define EEPROM_CMDQ_SPI_REG 0x0F003018
-#define EEPROM_WRITE_DATAQ_REG 0x0F00301C
-#define EEPROM_READ_DATAQ_REG 0x0F003020
-#define SPI_FLUSH_REG 0x0F00304C
-
-#define EEPROM_WRITE_ENABLE 0x06000000
-#define EEPROM_READ_STATUS_REGISTER 0x05000000
-#define EEPROM_16_BYTE_PAGE_WRITE 0xFA000000
-#define EEPROM_WRITE_QUEUE_EMPTY 0x00001000
-#define EEPROM_WRITE_QUEUE_AVAIL 0x00002000
-#define EEPROM_WRITE_QUEUE_FULL 0x00004000
-#define EEPROM_16_BYTE_PAGE_READ 0xFB000000
-#define EEPROM_4_BYTE_PAGE_READ 0x3B000000
-
-#define EEPROM_CMD_QUEUE_FLUSH 0x00000001
-#define EEPROM_WRITE_QUEUE_FLUSH 0x00000002
-#define EEPROM_READ_QUEUE_FLUSH 0x00000004
-#define EEPROM_ETH_QUEUE_FLUSH 0x00000008
-#define EEPROM_ALL_QUEUE_FLUSH 0x0000000f
-#define EEPROM_READ_ENABLE 0x06000000
-#define EEPROM_16_BYTE_PAGE_WRITE 0xFA000000
-#define EEPROM_READ_DATA_FULL 0x00000010
-#define EEPROM_READ_DATA_AVAIL 0x00000020
-#define EEPROM_READ_QUEUE_EMPTY 0x00000002
-#define EEPROM_CMD_QUEUE_EMPTY 0x00000100
-#define EEPROM_CMD_QUEUE_AVAIL 0x00000200
-#define EEPROM_CMD_QUEUE_FULL 0x00000400
-
-/* Most EEPROM status register bit 0 indicates if the EEPROM is busy
- * with a write if set 1. See the details of the EEPROM Status Register
- * in the EEPROM data sheet.
- */
-#define EEPROM_STATUS_REG_WRITE_BUSY 0x00000001
-
-/* We will have 1 mSec for every RETRIES_PER_DELAY count and have a max attempts of MAX_EEPROM_RETRIES
- * This will give us 80 mSec minimum of delay = 80mSecs
- */
-#define MAX_EEPROM_RETRIES 80
-#define RETRIES_PER_DELAY 64
-#define MAX_RW_SIZE 0x10
-#define MAX_READ_SIZE 0x10
-#define MAX_SECTOR_SIZE (512 * 1024)
-#define MIN_SECTOR_SIZE (1024)
-#define FLASH_SECTOR_SIZE_OFFSET 0xEFFFC
-#define FLASH_SECTOR_SIZE_SIG_OFFSET 0xEFFF8
-#define FLASH_SECTOR_SIZE_SIG 0xCAFEBABE
-#define FLASH_CS_INFO_START_ADDR 0xFF0000
-#define FLASH_CONTROL_STRUCT_SIGNATURE 0xBECEF1A5
-#define SCSI_FIRMWARE_MAJOR_VERSION 0x1
-#define SCSI_FIRMWARE_MINOR_VERSION 0x5
-#define BYTE_WRITE_SUPPORT 0x1
-#define FLASH_AUTO_INIT_BASE_ADDR 0xF00000
-#define FLASH_CONTIGIOUS_START_ADDR_AFTER_INIT 0x1C000000
-#define FLASH_CONTIGIOUS_START_ADDR_BEFORE_INIT 0x1F000000
-#define FLASH_CONTIGIOUS_START_ADDR_BCS350 0x08000000
-#define FLASH_CONTIGIOUS_END_ADDR_BCS350 0x08FFFFFF
-#define FLASH_SIZE_ADDR 0xFFFFEC
-#define FLASH_SPI_CMDQ_REG 0xAF003040
-#define FLASH_SPI_WRITEQ_REG 0xAF003044
-#define FLASH_SPI_READQ_REG 0xAF003048
-#define FLASH_CONFIG_REG 0xAF003050
-#define FLASH_GPIO_CONFIG_REG 0xAF000030
-#define FLASH_CMD_WRITE_ENABLE 0x06
-#define FLASH_CMD_READ_ENABLE 0x03
-#define FLASH_CMD_RESET_WRITE_ENABLE 0x04
-#define FLASH_CMD_STATUS_REG_READ 0x05
-#define FLASH_CMD_STATUS_REG_WRITE 0x01
-#define FLASH_CMD_READ_ID 0x9F
-#define PAD_SELECT_REGISTER 0xAF000410
-#define FLASH_PART_SST25VF080B 0xBF258E
-#define EEPROM_CAL_DATA_INTERNAL_LOC 0xbFB00008
-#define EEPROM_CALPARAM_START 0x200
-#define EEPROM_SIZE_OFFSET 524
-
-/* As Read/Write time vaires from 1.5 to 3.0 ms.
- * so After Ignoring the rdm/wrm time(that is dependent on many factor like interface etc.),
- * here time calculated meets the worst case delay, 3.0 ms
- */
-#define MAX_FLASH_RETRIES 4
-#define FLASH_PER_RETRIES_DELAY 16
-#define EEPROM_MAX_CAL_AREA_SIZE 0xF0000
-#define BECM ntohl(0x4245434d)
-#define FLASH_2X_MAJOR_NUMBER 0x2
-#define DSD_IMAGE_MAGIC_NUMBER 0xBECE0D5D
-#define ISO_IMAGE_MAGIC_NUMBER 0xBECE0150
-#define NON_CDLESS_DEVICE_BOOT_SIG 0xBECEB007
-
-#define MINOR_VERSION(x) ((x >> 16) & 0xFFFF)
-#define MAJOR_VERSION(x) (x & 0xFFFF)
-
-#define CORRUPTED_PATTERN 0x0
-#define UNINIT_PTR_IN_CS 0xBBBBDDDD
-#define VENDOR_PTR_IN_CS 0xAAAACCCC
-#define FLASH2X_SECTION_PRESENT (1 << 0)
-#define FLASH2X_SECTION_VALID (1 << 1)
-#define FLASH2X_SECTION_RO (1 << 2)
-#define FLASH2X_SECTION_ACT (1 << 3)
-#define SECTOR_IS_NOT_WRITABLE STATUS_FAILURE
-#define INVALID_OFFSET STATUS_FAILURE
-#define INVALID_SECTION STATUS_FAILURE
-#define SECTOR_1K 1024
-#define SECTOR_64K (64 * SECTOR_1K)
-#define SECTOR_128K (2 * SECTOR_64K)
-#define SECTOR_256k (2 * SECTOR_128K)
-#define SECTOR_512K (2 * SECTOR_256k)
-#define FLASH_PART_SIZE (16 * 1024 * 1024)
-#define RESET_CHIP_SELECT -1
-#define CHIP_SELECT_BIT12 12
-#define SECTOR_READWRITE_PERMISSION 0
-#define SECTOR_READONLY 1
-#define SIGNATURE_SIZE 4
-#define DEFAULT_BUFF_SIZE 0x10000
-
-#define FIELD_OFFSET_IN_HEADER(HeaderPointer, Field) ((u8 *)&((HeaderPointer)(NULL))->Field - (u8 *)(NULL))
-
-#endif
-
diff --git a/drivers/staging/bcm/sort.c b/drivers/staging/bcm/sort.c
deleted file mode 100644
index ca0b1799151..00000000000
--- a/drivers/staging/bcm/sort.c
+++ /dev/null
@@ -1,52 +0,0 @@
-#include "headers.h"
-#include <linux/sort.h>
-
-/*
- * File Name: sort.c
- *
- * Author: Beceem Communications Pvt. Ltd
- *
- * Abstract: This file contains the routines sorting the classification rules.
- *
- * Copyright (c) 2007 Beceem Communications Pvt. Ltd
- */
-
-static int compare_packet_info(void const *a, void const *b)
-{
- struct bcm_packet_info const *pa = a;
- struct bcm_packet_info const *pb = b;
-
- if (!pa->bValid || !pb->bValid)
- return 0;
-
- return pa->u8TrafficPriority - pb->u8TrafficPriority;
-}
-
-VOID SortPackInfo(struct bcm_mini_adapter *Adapter)
-{
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG,
- DBG_LVL_ALL, "<=======");
-
- sort(Adapter->PackInfo, NO_OF_QUEUES, sizeof(struct bcm_packet_info),
- compare_packet_info, NULL);
-}
-
-static int compare_classifiers(void const *a, void const *b)
-{
- struct bcm_classifier_rule const *pa = a;
- struct bcm_classifier_rule const *pb = b;
-
- if (!pa->bUsed || !pb->bUsed)
- return 0;
-
- return pa->u8ClassifierRulePriority - pb->u8ClassifierRulePriority;
-}
-
-VOID SortClassifiers(struct bcm_mini_adapter *Adapter)
-{
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG,
- DBG_LVL_ALL, "<=======");
-
- sort(Adapter->astClassifierTable, MAX_CLASSIFIERS,
- sizeof(struct bcm_classifier_rule), compare_classifiers, NULL);
-}
diff --git a/drivers/staging/bcm/target_params.h b/drivers/staging/bcm/target_params.h
deleted file mode 100644
index dc45f9ab854..00000000000
--- a/drivers/staging/bcm/target_params.h
+++ /dev/null
@@ -1,57 +0,0 @@
-#ifndef TARGET_PARAMS_H
-#define TARGET_PARAMS_H
-
-struct bcm_target_params {
- u32 m_u32CfgVersion;
- u32 m_u32CenterFrequency;
- u32 m_u32BandAScan;
- u32 m_u32BandBScan;
- u32 m_u32BandCScan;
- u32 m_u32ErtpsOptions;
- u32 m_u32PHSEnable;
- u32 m_u32HoEnable;
- u32 m_u32HoReserved1;
- u32 m_u32HoReserved2;
- u32 m_u32MimoEnable;
- u32 m_u32SecurityEnable;
- u32 m_u32PowerSavingModesEnable; /* bit 1: 1 Idlemode enable; bit2: 1 Sleepmode Enable */
- /* PowerSaving Mode Options:
- * bit 0 = 1: CPE mode - to keep pcmcia if alive;
- * bit 1 = 1: CINR reporting in Idlemode Msg
- * bit 2 = 1: Default PSC Enable in sleepmode
- */
- u32 m_u32PowerSavingModeOptions;
- u32 m_u32ArqEnable;
- /* From Version #3, the HARQ section renamed as general */
- u32 m_u32HarqEnable;
- u32 m_u32EEPROMFlag;
- /* BINARY TYPE - 4th MSByte: Interface Type - 3rd MSByte: Vendor Type - 2nd MSByte
- * Unused - LSByte
- */
- u32 m_u32Customize;
- u32 m_u32ConfigBW; /* In Hz */
- u32 m_u32ShutDownInitThresholdTimer;
- u32 m_u32RadioParameter;
- u32 m_u32PhyParameter1;
- u32 m_u32PhyParameter2;
- u32 m_u32PhyParameter3;
- u32 m_u32TestOptions; /* in eval mode only; lower 16bits = basic cid for testing; then bit 16 is test cqich,bit 17 test init rang; bit 18 test periodic rang and bit 19 is test harq ack/nack */
- u32 m_u32MaxMACDataperDLFrame;
- u32 m_u32MaxMACDataperULFrame;
- u32 m_u32Corr2MacFlags;
- u32 HostDrvrConfig1;
- u32 HostDrvrConfig2;
- u32 HostDrvrConfig3;
- u32 HostDrvrConfig4;
- u32 HostDrvrConfig5;
- u32 HostDrvrConfig6;
- u32 m_u32SegmentedPUSCenable;
- /* removed SHUT down related 'unused' params from here to sync 4.x and 5.x CFG files..
- * BAMC Related Parameters
- * Bit 0-15 Band AMC signaling configuration: Bit 1 = 1 – Enable Band AMC signaling.
- * bit 16-31 Band AMC Data configuration: Bit 16 = 1 – Band AMC 2x3 support.
- */
- u32 m_u32BandAMCEnable;
-};
-
-#endif
diff --git a/drivers/staging/bcm/vendorspecificextn.c b/drivers/staging/bcm/vendorspecificextn.c
deleted file mode 100644
index 1d9bef6e427..00000000000
--- a/drivers/staging/bcm/vendorspecificextn.c
+++ /dev/null
@@ -1,145 +0,0 @@
-#include "headers.h"
-/*
- * Procedure: vendorextnGetSectionInfo
- *
- * Description: Finds the type of NVM used.
- *
- * Arguments:
- * Adapter - ptr to Adapter object instance
- * pNVMType - ptr to NVM type.
- * Returns:
- * STATUS_SUCCESS/STATUS_FAILURE
- *
- */
-INT vendorextnGetSectionInfo(PVOID pContext,
- struct bcm_flash2x_vendor_info *pVendorInfo)
-{
- return STATUS_FAILURE;
-}
-
-/*
- * Procedure: vendorextnInit
- *
- * Description: Initializing the vendor extension NVM interface
- *
- * Arguments:
- * Adapter - Pointer to MINI Adapter Structure
- * Returns:
- * STATUS_SUCCESS/STATUS_FAILURE
- *
- *
- */
-INT vendorextnInit(struct bcm_mini_adapter *Adapter)
-{
- return STATUS_SUCCESS;
-}
-
-/*
- * Procedure: vendorextnExit
- *
- * Description: Free the resource associated with vendor extension NVM interface
- *
- * Arguments:
- *
- * Returns:
- * STATUS_SUCCESS/STATUS_FAILURE
- *
- *
- */
-INT vendorextnExit(struct bcm_mini_adapter *Adapter)
-{
- return STATUS_SUCCESS;
-}
-
-/*
- * Procedure: vendorextnIoctl
- *
- * Description: execute the vendor extension specific ioctl
- *
- * Arguments:
- * Adapter -Beceem private Adapter Structure
- * cmd -vendor extension specific Ioctl commad
- * arg -input parameter sent by vendor
- *
- * Returns:
- * CONTINUE_COMMON_PATH in case it is not meant to be processed
- * by vendor ioctls
- * STATUS_SUCCESS/STATUS_FAILURE as per the IOCTL return value
- */
-
-INT vendorextnIoctl(struct bcm_mini_adapter *Adapter, UINT cmd, ULONG arg)
-{
- return CONTINUE_COMMON_PATH;
-}
-
-
-
-/*
- * Procedure: vendorextnReadSection
- *
- * Description: Reads from a section of NVM
- *
- * Arguments:
- * pContext - ptr to Adapter object instance
- * pBuffer - Read the data from Vendor Area to this buffer
- * SectionVal - Value of type of Section
- * Offset - Read from the Offset of the Vendor Section.
- * numOfBytes - Read numOfBytes from the Vendor section to Buffer
- *
- * Returns:
- * STATUS_SUCCESS/STATUS_FAILURE
- */
-
-INT vendorextnReadSection(PVOID pContext, PUCHAR pBuffer,
- enum bcm_flash2x_section_val SectionVal, UINT offset, UINT numOfBytes)
-{
- return STATUS_FAILURE;
-}
-
-
-
-/*
- * Procedure: vendorextnWriteSection
- *
- * Description: Write to a Section of NVM
- *
- * Arguments:
- * pContext - ptr to Adapter object instance
- * pBuffer - Write the data provided in the buffer
- * SectionVal - Value of type of Section
- * Offset - Writes to the Offset of the Vendor Section.
- * numOfBytes - Write num Bytes after reading from pBuffer.
- * bVerify - the Buffer Written should be verified.
- *
- * Returns:
- * STATUS_SUCCESS/STATUS_FAILURE
- */
-INT vendorextnWriteSection(PVOID pContext, PUCHAR pBuffer,
- enum bcm_flash2x_section_val SectionVal, UINT offset,
- UINT numOfBytes, bool bVerify)
-{
- return STATUS_FAILURE;
-}
-
-
-
-/*
- * Procedure: vendorextnWriteSectionWithoutErase
- *
- * Description: Write to a Section of NVM without erasing the sector
- *
- * Arguments:
- * pContext - ptr to Adapter object instance
- * pBuffer - Write the data provided in the buffer
- * SectionVal - Value of type of Section
- * Offset - Writes to the Offset of the Vendor Section.
- * numOfBytes - Write num Bytes after reading from pBuffer.
- *
- * Returns:
- * STATUS_SUCCESS/STATUS_FAILURE
- */
-INT vendorextnWriteSectionWithoutErase(PVOID pContext, PUCHAR pBuffer,
- enum bcm_flash2x_section_val SectionVal, UINT offset, UINT numOfBytes)
-{
- return STATUS_FAILURE;
-}
diff --git a/drivers/staging/bcm/vendorspecificextn.h b/drivers/staging/bcm/vendorspecificextn.h
deleted file mode 100644
index ff57f057045..00000000000
--- a/drivers/staging/bcm/vendorspecificextn.h
+++ /dev/null
@@ -1,18 +0,0 @@
-
-#ifndef __VENDOR_EXTN_NVM_H__
-#define __VENDOR_EXTN_NVM_H__
-
-#define CONTINUE_COMMON_PATH 0xFFFF
-
-INT vendorextnGetSectionInfo(PVOID pContext, struct bcm_flash2x_vendor_info *pVendorInfo);
-INT vendorextnExit(struct bcm_mini_adapter *Adapter);
-INT vendorextnInit(struct bcm_mini_adapter *Adapter);
-INT vendorextnIoctl(struct bcm_mini_adapter *Adapter, UINT cmd, ULONG arg);
-INT vendorextnReadSection(PVOID pContext, PUCHAR pBuffer, enum bcm_flash2x_section_val SectionVal,
- UINT offset, UINT numOfBytes);
-INT vendorextnWriteSection(PVOID pContext, PUCHAR pBuffer, enum bcm_flash2x_section_val SectionVal,
- UINT offset, UINT numOfBytes, bool bVerify);
-INT vendorextnWriteSectionWithoutErase(PVOID pContext, PUCHAR pBuffer, enum bcm_flash2x_section_val SectionVal,
- UINT offset, UINT numOfBytes);
-
-#endif /* */
diff --git a/drivers/staging/clocking-wizard/Kconfig b/drivers/staging/clocking-wizard/Kconfig
new file mode 100644
index 00000000000..357af02c562
--- /dev/null
+++ b/drivers/staging/clocking-wizard/Kconfig
@@ -0,0 +1,9 @@
+#
+# Xilinx Clocking Wizard Driver
+#
+
+config COMMON_CLK_XLNX_CLKWZRD
+ tristate "Xilinx Clocking Wizard"
+ depends on COMMON_CLK && OF
+ ---help---
+ Support for the Xilinx Clocking Wizard IP core clock generator.
diff --git a/drivers/staging/clocking-wizard/Makefile b/drivers/staging/clocking-wizard/Makefile
new file mode 100644
index 00000000000..5ad352f521f
--- /dev/null
+++ b/drivers/staging/clocking-wizard/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_COMMON_CLK_XLNX_CLKWZRD) += clk-xlnx-clock-wizard.o
diff --git a/drivers/staging/clocking-wizard/TODO b/drivers/staging/clocking-wizard/TODO
new file mode 100644
index 00000000000..ebe99db7d15
--- /dev/null
+++ b/drivers/staging/clocking-wizard/TODO
@@ -0,0 +1,12 @@
+TODO:
+ - support for fractional multiplier
+ - support for fractional divider (output 0 only)
+ - support for set_rate() operations (may benefit from Stephen Boyd's
+ refactoring of the clk primitives: https://lkml.org/lkml/2014/9/5/766)
+ - review arithmetic
+ - overflow after multiplication?
+ - maximize accuracy before divisions
+
+Patches to:
+ Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+ Sören Brinkmann <soren.brinkmann@xilinx.com>
diff --git a/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c b/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c
new file mode 100644
index 00000000000..471d0877f38
--- /dev/null
+++ b/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c
@@ -0,0 +1,341 @@
+/*
+ * Xilinx 'Clocking Wizard' driver
+ *
+ * Copyright (C) 2013 - 2014 Xilinx
+ *
+ * Sören Brinkmann <soren.brinkmann@xilinx.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License v2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/err.h>
+
+#define WZRD_NUM_OUTPUTS 7
+#define WZRD_ACLK_MAX_FREQ 250000000UL
+
+#define WZRD_CLK_CFG_REG(n) (0x200 + 4 * (n))
+
+#define WZRD_CLkOUT0_FRAC_EN BIT(18)
+#define WZRD_CLkFBOUT_FRAC_EN BIT(26)
+
+#define WZRD_CLKFBOUT_MULT_SHIFT 8
+#define WZRD_CLKFBOUT_MULT_MASK (0xff << WZRD_CLKFBOUT_MULT_SHIFT)
+#define WZRD_DIVCLK_DIVIDE_SHIFT 0
+#define WZRD_DIVCLK_DIVIDE_MASK (0xff << WZRD_DIVCLK_DIVIDE_SHIFT)
+#define WZRD_CLKOUT_DIVIDE_SHIFT 0
+#define WZRD_CLKOUT_DIVIDE_MASK (0xff << WZRD_DIVCLK_DIVIDE_SHIFT)
+
+enum clk_wzrd_int_clks {
+ wzrd_clk_mul,
+ wzrd_clk_mul_div,
+ wzrd_clk_int_max
+};
+
+/**
+ * struct clk_wzrd:
+ * @clk_data: Clock data
+ * @nb: Notifier block
+ * @base: Memory base
+ * @clk_in1: Handle to input clock 'clk_in1'
+ * @axi_clk: Handle to input clock 's_axi_aclk'
+ * @clks_internal: Internal clocks
+ * @clkout: Output clocks
+ * @speed_grade: Speed grade of the device
+ * @suspended: Flag indicating power state of the device
+ */
+struct clk_wzrd {
+ struct clk_onecell_data clk_data;
+ struct notifier_block nb;
+ void __iomem *base;
+ struct clk *clk_in1;
+ struct clk *axi_clk;
+ struct clk *clks_internal[wzrd_clk_int_max];
+ struct clk *clkout[WZRD_NUM_OUTPUTS];
+ int speed_grade;
+ bool suspended;
+};
+#define to_clk_wzrd(_nb) container_of(_nb, struct clk_wzrd, nb)
+
+/* maximum frequencies for input/output clocks per speed grade */
+static const unsigned long clk_wzrd_max_freq[] = {
+ 800000000UL,
+ 933000000UL,
+ 1066000000UL
+};
+
+static int clk_wzrd_clk_notifier(struct notifier_block *nb, unsigned long event,
+ void *data)
+{
+ unsigned long max;
+ struct clk_notifier_data *ndata = data;
+ struct clk_wzrd *clk_wzrd = to_clk_wzrd(nb);
+
+ if (clk_wzrd->suspended)
+ return NOTIFY_OK;
+
+ if (ndata->clk == clk_wzrd->clk_in1)
+ max = clk_wzrd_max_freq[clk_wzrd->speed_grade - 1];
+ if (ndata->clk == clk_wzrd->axi_clk)
+ max = WZRD_ACLK_MAX_FREQ;
+
+ switch (event) {
+ case PRE_RATE_CHANGE:
+ if (ndata->new_rate > max)
+ return NOTIFY_BAD;
+ return NOTIFY_OK;
+ case POST_RATE_CHANGE:
+ case ABORT_RATE_CHANGE:
+ default:
+ return NOTIFY_DONE;
+ }
+}
+
+static int __maybe_unused clk_wzrd_suspend(struct device *dev)
+{
+ struct clk_wzrd *clk_wzrd = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(clk_wzrd->axi_clk);
+ clk_wzrd->suspended = true;
+
+ return 0;
+}
+
+static int __maybe_unused clk_wzrd_resume(struct device *dev)
+{
+ int ret;
+ struct clk_wzrd *clk_wzrd = dev_get_drvdata(dev);
+
+ ret = clk_prepare_enable(clk_wzrd->axi_clk);
+ if (ret) {
+ dev_err(dev, "unable to enable s_axi_aclk\n");
+ return ret;
+ }
+
+ clk_wzrd->suspended = false;
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(clk_wzrd_dev_pm_ops, clk_wzrd_suspend,
+ clk_wzrd_resume);
+
+static int clk_wzrd_probe(struct platform_device *pdev)
+{
+ int i, ret;
+ u32 reg;
+ unsigned long rate;
+ const char *clk_name;
+ struct clk_wzrd *clk_wzrd;
+ struct resource *mem;
+ struct device_node *np = pdev->dev.of_node;
+
+ clk_wzrd = devm_kzalloc(&pdev->dev, sizeof(*clk_wzrd), GFP_KERNEL);
+ if (!clk_wzrd)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, clk_wzrd);
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ clk_wzrd->base = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(clk_wzrd->base))
+ return PTR_ERR(clk_wzrd->base);
+
+ ret = of_property_read_u32(np, "speed-grade", &clk_wzrd->speed_grade);
+ if (!ret) {
+ if (clk_wzrd->speed_grade < 1 || clk_wzrd->speed_grade > 3) {
+ dev_warn(&pdev->dev, "invalid speed grade '%d'\n",
+ clk_wzrd->speed_grade);
+ clk_wzrd->speed_grade = 0;
+ }
+ }
+
+ clk_wzrd->clk_in1 = devm_clk_get(&pdev->dev, "clk_in1");
+ if (IS_ERR(clk_wzrd->clk_in1)) {
+ if (clk_wzrd->clk_in1 != ERR_PTR(-EPROBE_DEFER))
+ dev_err(&pdev->dev, "clk_in1 not found\n");
+ return PTR_ERR(clk_wzrd->clk_in1);
+ }
+
+ clk_wzrd->axi_clk = devm_clk_get(&pdev->dev, "s_axi_aclk");
+ if (IS_ERR(clk_wzrd->axi_clk)) {
+ if (clk_wzrd->axi_clk != ERR_PTR(-EPROBE_DEFER))
+ dev_err(&pdev->dev, "s_axi_aclk not found\n");
+ return PTR_ERR(clk_wzrd->axi_clk);
+ }
+ ret = clk_prepare_enable(clk_wzrd->axi_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "enabling s_axi_aclk failed\n");
+ return ret;
+ }
+ rate = clk_get_rate(clk_wzrd->axi_clk);
+ if (rate > WZRD_ACLK_MAX_FREQ) {
+ dev_err(&pdev->dev, "s_axi_aclk frequency (%lu) too high\n",
+ rate);
+ ret = -EINVAL;
+ goto err_disable_clk;
+ }
+
+ /* we don't support fractional div/mul yet */
+ reg = readl(clk_wzrd->base + WZRD_CLK_CFG_REG(0)) &
+ WZRD_CLkFBOUT_FRAC_EN;
+ reg |= readl(clk_wzrd->base + WZRD_CLK_CFG_REG(2)) &
+ WZRD_CLkOUT0_FRAC_EN;
+ if (reg)
+ dev_warn(&pdev->dev, "fractional div/mul not supported\n");
+
+ /* register multiplier */
+ reg = (readl(clk_wzrd->base + WZRD_CLK_CFG_REG(0)) &
+ WZRD_CLKFBOUT_MULT_MASK) >> WZRD_CLKFBOUT_MULT_SHIFT;
+ clk_name = kasprintf(GFP_KERNEL, "%s_mul", dev_name(&pdev->dev));
+ if (!clk_name) {
+ ret = -ENOMEM;
+ goto err_disable_clk;
+ }
+ clk_wzrd->clks_internal[wzrd_clk_mul] = clk_register_fixed_factor(
+ &pdev->dev, clk_name,
+ __clk_get_name(clk_wzrd->clk_in1),
+ 0, reg, 1);
+ kfree(clk_name);
+ if (IS_ERR(clk_wzrd->clks_internal[wzrd_clk_mul])) {
+ dev_err(&pdev->dev, "unable to register fixed-factor clock\n");
+ ret = PTR_ERR(clk_wzrd->clks_internal[wzrd_clk_mul]);
+ goto err_disable_clk;
+ }
+
+ /* register div */
+ reg = (readl(clk_wzrd->base + WZRD_CLK_CFG_REG(0)) &
+ WZRD_DIVCLK_DIVIDE_MASK) >> WZRD_DIVCLK_DIVIDE_SHIFT;
+ clk_name = kasprintf(GFP_KERNEL, "%s_mul_div", dev_name(&pdev->dev));
+ if (!clk_name) {
+ ret = -ENOMEM;
+ goto err_rm_int_clk;
+ }
+
+ clk_wzrd->clks_internal[wzrd_clk_mul_div] = clk_register_fixed_factor(
+ &pdev->dev, clk_name,
+ __clk_get_name(clk_wzrd->clks_internal[wzrd_clk_mul]),
+ 0, 1, reg);
+ if (IS_ERR(clk_wzrd->clks_internal[wzrd_clk_mul_div])) {
+ dev_err(&pdev->dev, "unable to register divider clock\n");
+ ret = PTR_ERR(clk_wzrd->clks_internal[wzrd_clk_mul_div]);
+ goto err_rm_int_clk;
+ }
+
+ /* register div per output */
+ for (i = WZRD_NUM_OUTPUTS - 1; i >= 0 ; i--) {
+ const char *clkout_name;
+ if (of_property_read_string_index(np, "clock-output-names", i,
+ &clkout_name)) {
+ dev_err(&pdev->dev,
+ "clock output name not specified\n");
+ ret = -EINVAL;
+ goto err_rm_int_clks;
+ }
+ reg = readl(clk_wzrd->base + WZRD_CLK_CFG_REG(2) + i * 12);
+ reg &= WZRD_CLKOUT_DIVIDE_MASK;
+ reg >>= WZRD_CLKOUT_DIVIDE_SHIFT;
+ clk_wzrd->clkout[i] = clk_register_fixed_factor(&pdev->dev,
+ clkout_name, clk_name, 0, 1, reg);
+ if (IS_ERR(clk_wzrd->clkout[i])) {
+ int j;
+
+ for (j = i + 1; j < WZRD_NUM_OUTPUTS; j++)
+ clk_unregister(clk_wzrd->clkout[j]);
+ dev_err(&pdev->dev,
+ "unable to register divider clock\n");
+ ret = PTR_ERR(clk_wzrd->clkout[i]);
+ goto err_rm_int_clks;
+ }
+ }
+
+ kfree(clk_name);
+
+ clk_wzrd->clk_data.clks = clk_wzrd->clkout;
+ clk_wzrd->clk_data.clk_num = ARRAY_SIZE(clk_wzrd->clkout);
+ of_clk_add_provider(np, of_clk_src_onecell_get, &clk_wzrd->clk_data);
+
+ if (clk_wzrd->speed_grade) {
+ clk_wzrd->nb.notifier_call = clk_wzrd_clk_notifier;
+
+ ret = clk_notifier_register(clk_wzrd->clk_in1,
+ &clk_wzrd->nb);
+ if (ret)
+ dev_warn(&pdev->dev,
+ "unable to register clock notifier\n");
+
+ ret = clk_notifier_register(clk_wzrd->axi_clk, &clk_wzrd->nb);
+ if (ret)
+ dev_warn(&pdev->dev,
+ "unable to register clock notifier\n");
+ }
+
+ return 0;
+
+err_rm_int_clks:
+ clk_unregister(clk_wzrd->clks_internal[1]);
+err_rm_int_clk:
+ kfree(clk_name);
+ clk_unregister(clk_wzrd->clks_internal[0]);
+err_disable_clk:
+ clk_disable_unprepare(clk_wzrd->axi_clk);
+
+ return ret;
+}
+
+static int clk_wzrd_remove(struct platform_device *pdev)
+{
+ int i;
+ struct clk_wzrd *clk_wzrd = platform_get_drvdata(pdev);
+
+ of_clk_del_provider(pdev->dev.of_node);
+
+ for (i = 0; i < WZRD_NUM_OUTPUTS; i++)
+ clk_unregister(clk_wzrd->clkout[i]);
+ for (i = 0; i < wzrd_clk_int_max; i++)
+ clk_unregister(clk_wzrd->clks_internal[i]);
+
+ if (clk_wzrd->speed_grade) {
+ clk_notifier_unregister(clk_wzrd->axi_clk, &clk_wzrd->nb);
+ clk_notifier_unregister(clk_wzrd->clk_in1, &clk_wzrd->nb);
+ }
+
+ clk_disable_unprepare(clk_wzrd->axi_clk);
+
+ return 0;
+}
+
+static const struct of_device_id clk_wzrd_ids[] = {
+ { .compatible = "xlnx,clocking-wizard" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, clk_wzrd_ids);
+
+static struct platform_driver clk_wzrd_driver = {
+ .driver = {
+ .name = "clk-wizard",
+ .of_match_table = clk_wzrd_ids,
+ .pm = &clk_wzrd_dev_pm_ops,
+ },
+ .probe = clk_wzrd_probe,
+ .remove = clk_wzrd_remove,
+};
+module_platform_driver(clk_wzrd_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Soeren Brinkmann <soren.brinkmann@xilinx.com");
+MODULE_DESCRIPTION("Driver for the Xilinx Clocking Wizard IP core");
diff --git a/drivers/staging/clocking-wizard/dt-binding.txt b/drivers/staging/clocking-wizard/dt-binding.txt
new file mode 100644
index 00000000000..723271e9331
--- /dev/null
+++ b/drivers/staging/clocking-wizard/dt-binding.txt
@@ -0,0 +1,30 @@
+Binding for Xilinx Clocking Wizard IP Core
+
+This binding uses the common clock binding[1]. Details about the devices can be
+found in the product guide[2].
+
+[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
+[2] Clocking Wizard Product Guide
+http://www.xilinx.com/support/documentation/ip_documentation/clk_wiz/v5_1/pg065-clk-wiz.pdf
+
+Required properties:
+ - compatible: Must be 'xlnx,clocking-wizard'
+ - reg: Base and size of the cores register space
+ - clocks: Handle to input clock
+ - clock-names: Tuple containing 'clk_in1' and 's_axi_aclk'
+ - clock-output-names: Names for the output clocks
+
+Optional properties:
+ - speed-grade: Speed grade of the device (valid values are 1..3)
+
+Example:
+ clock-generator@40040000 {
+ reg = <0x40040000 0x1000>;
+ compatible = "xlnx,clocking-wizard";
+ speed-grade = <1>;
+ clock-names = "clk_in1", "s_axi_aclk";
+ clocks = <&clkc 15>, <&clkc 15>;
+ clock-output-names = "clk_out0", "clk_out1", "clk_out2",
+ "clk_out3", "clk_out4", "clk_out5",
+ "clk_out6", "clk_out7";
+ };
diff --git a/drivers/staging/comedi/Kconfig b/drivers/staging/comedi/Kconfig
index 152f4c12ea4..a8201fe8751 100644
--- a/drivers/staging/comedi/Kconfig
+++ b/drivers/staging/comedi/Kconfig
@@ -384,6 +384,7 @@ config COMEDI_DT282X
config COMEDI_DMM32AT
tristate "Diamond Systems MM-32-AT PC/104 board support"
+ select COMEDI_8255
---help---
Enable support for Diamond Systems MM-32-AT PC/104 boards
@@ -564,11 +565,14 @@ config COMEDI_S526
endif # COMEDI_ISA_DRIVERS
menuconfig COMEDI_PCI_DRIVERS
- bool "Comedi PCI drivers"
+ tristate "Comedi PCI drivers"
depends on PCI
---help---
Enable support for comedi PCI drivers.
+ To compile this support as a module, choose M here: the module will
+ be called comedi_pci.
+
if COMEDI_PCI_DRIVERS
config COMEDI_8255_PCI
@@ -595,14 +599,6 @@ config COMEDI_ADDI_WATCHDOG
boards. This module will be automatically selected when needed. The
module will be called addi_watchdog.
-config COMEDI_ADDI_APCI_035
- tristate "ADDI-DATA APCI_035 support"
- ---help---
- Enable support for ADDI-DATA APCI_035 cards
-
- To compile this driver as a module, choose M here: the module will be
- called addi_apci_035.
-
config COMEDI_ADDI_APCI_1032
tristate "ADDI-DATA APCI_1032 support"
---help---
@@ -939,11 +935,11 @@ config COMEDI_CB_PCIDDA
called cb_pcidda.
config COMEDI_CB_PCIMDAS
- tristate "MeasurementComputing PCIM-DAS1602/16 support"
+ tristate "MeasurementComputing PCIM-DAS1602/16, PCIe-DAS1602/16 support"
select COMEDI_8255
---help---
Enable support for ComputerBoards/MeasurementComputing PCI Migration
- series PCIM-DAS1602/16
+ series PCIM-DAS1602/16 and PCIe-DAS1602/16.
To compile this driver as a module, choose M here: the module will be
called cb_pcimdas.
@@ -1084,11 +1080,14 @@ config COMEDI_NI_TIOCMD
endif # COMEDI_PCI_DRIVERS
menuconfig COMEDI_PCMCIA_DRIVERS
- bool "Comedi PCMCIA drivers"
+ tristate "Comedi PCMCIA drivers"
depends on PCMCIA
---help---
Enable support for comedi PCMCIA drivers.
+ To compile this support as a module, choose M here: the module will
+ be called comedi_pcmcia.
+
if COMEDI_PCMCIA_DRIVERS
config COMEDI_CB_DAS16_CS
@@ -1160,11 +1159,14 @@ config COMEDI_QUATECH_DAQP_CS
endif # COMEDI_PCMCIA_DRIVERS
menuconfig COMEDI_USB_DRIVERS
- bool "Comedi USB drivers"
+ tristate "Comedi USB drivers"
depends on USB
---help---
Enable support for comedi USB drivers.
+ To compile this support as a module, choose M here: the module will
+ be called comedi_usb.
+
if COMEDI_USB_DRIVERS
config COMEDI_DT9812
diff --git a/drivers/staging/comedi/Makefile b/drivers/staging/comedi/Makefile
index fae2d909000..7f9dfb3923a 100644
--- a/drivers/staging/comedi/Makefile
+++ b/drivers/staging/comedi/Makefile
@@ -2,12 +2,13 @@ ccflags-$(CONFIG_COMEDI_DEBUG) := -DDEBUG
comedi-y := comedi_fops.o range.o drivers.o \
comedi_buf.o
-comedi-$(CONFIG_COMEDI_PCI_DRIVERS) += comedi_pci.o
-comedi-$(CONFIG_COMEDI_PCMCIA_DRIVERS) += comedi_pcmcia.o
-comedi-$(CONFIG_COMEDI_USB_DRIVERS) += comedi_usb.o
comedi-$(CONFIG_PROC_FS) += proc.o
comedi-$(CONFIG_COMPAT) += comedi_compat32.o
+obj-$(CONFIG_COMEDI_PCI_DRIVERS) += comedi_pci.o
+obj-$(CONFIG_COMEDI_PCMCIA_DRIVERS) += comedi_pcmcia.o
+obj-$(CONFIG_COMEDI_USB_DRIVERS) += comedi_usb.o
+
obj-$(CONFIG_COMEDI) += comedi.o
obj-$(CONFIG_COMEDI) += kcomedilib/
diff --git a/drivers/staging/comedi/comedi.h b/drivers/staging/comedi/comedi.h
index c8c99e65423..74557407735 100644
--- a/drivers/staging/comedi/comedi.h
+++ b/drivers/staging/comedi/comedi.h
@@ -367,6 +367,8 @@ enum comedi_support_level {
#define COMEDI_BUFCONFIG _IOR(CIO, 13, struct comedi_bufconfig)
#define COMEDI_BUFINFO _IOWR(CIO, 14, struct comedi_bufinfo)
#define COMEDI_POLL _IO(CIO, 15)
+#define COMEDI_SETRSUBD _IO(CIO, 16)
+#define COMEDI_SETWSUBD _IO(CIO, 17)
/* structures */
@@ -514,17 +516,6 @@ struct comedi_bufinfo {
#define COMEDI_MIN_SPEED ((unsigned int)0xffffffff)
-/* callback stuff */
-/* only relevant to kernel modules. */
-
-#define COMEDI_CB_EOS 1 /* end of scan */
-#define COMEDI_CB_EOA 2 /* end of acquisition/output */
-#define COMEDI_CB_BLOCK 4 /* data has arrived:
- * wakes up read() / write() */
-#define COMEDI_CB_EOBUF 8 /* DEPRECATED: end of buffer */
-#define COMEDI_CB_ERROR 16 /* card error during acquisition */
-#define COMEDI_CB_OVERFLOW 32 /* buffer overflow/underflow */
-
/**********************************************************/
/* everything after this line is ALPHA */
/**********************************************************/
diff --git a/drivers/staging/comedi/comedi_buf.c b/drivers/staging/comedi/comedi_buf.c
index c60a45ad12b..19e7b229d15 100644
--- a/drivers/staging/comedi/comedi_buf.c
+++ b/drivers/staging/comedi/comedi_buf.c
@@ -236,6 +236,7 @@ void comedi_buf_reset(struct comedi_subdevice *s)
async->buf_read_ptr = 0;
async->cur_chan = 0;
+ async->scans_done = 0;
async->scan_progress = 0;
async->munge_chan = 0;
async->munge_count = 0;
@@ -252,15 +253,15 @@ static unsigned int comedi_buf_write_n_available(struct comedi_subdevice *s)
return free_end - async->buf_write_alloc_count;
}
-static unsigned int __comedi_buf_write_alloc(struct comedi_subdevice *s,
- unsigned int nbytes,
- int strict)
+/* allocates chunk for the writer from free buffer space */
+unsigned int comedi_buf_write_alloc(struct comedi_subdevice *s,
+ unsigned int nbytes)
{
struct comedi_async *async = s->async;
unsigned int available = comedi_buf_write_n_available(s);
if (nbytes > available)
- nbytes = strict ? 0 : available;
+ nbytes = available;
async->buf_write_alloc_count += nbytes;
@@ -272,13 +273,6 @@ static unsigned int __comedi_buf_write_alloc(struct comedi_subdevice *s,
return nbytes;
}
-
-/* allocates chunk for the writer from free buffer space */
-unsigned int comedi_buf_write_alloc(struct comedi_subdevice *s,
- unsigned int nbytes)
-{
- return __comedi_buf_write_alloc(s, nbytes, 0);
-}
EXPORT_SYMBOL_GPL(comedi_buf_write_alloc);
/*
@@ -290,7 +284,7 @@ static unsigned int comedi_buf_munge(struct comedi_subdevice *s,
{
struct comedi_async *async = s->async;
unsigned int count = 0;
- const unsigned num_sample_bytes = bytes_per_sample(s);
+ const unsigned num_sample_bytes = comedi_bytes_per_sample(s);
if (!s->munge || (async->cmd.flags & CMDF_RAWDATA)) {
async->munge_count += num_bytes;
@@ -427,43 +421,11 @@ unsigned int comedi_buf_read_free(struct comedi_subdevice *s,
}
EXPORT_SYMBOL_GPL(comedi_buf_read_free);
-int comedi_buf_put(struct comedi_subdevice *s, unsigned short x)
-{
- struct comedi_async *async = s->async;
- unsigned int n = __comedi_buf_write_alloc(s, sizeof(short), 1);
-
- if (n < sizeof(short)) {
- async->events |= COMEDI_CB_ERROR;
- return 0;
- }
- *(unsigned short *)(async->prealloc_buf + async->buf_write_ptr) = x;
- comedi_buf_write_free(s, sizeof(short));
- return 1;
-}
-EXPORT_SYMBOL_GPL(comedi_buf_put);
-
-int comedi_buf_get(struct comedi_subdevice *s, unsigned short *x)
+static void comedi_buf_memcpy_to(struct comedi_subdevice *s,
+ const void *data, unsigned int num_bytes)
{
struct comedi_async *async = s->async;
- unsigned int n = comedi_buf_read_n_available(s);
-
- if (n < sizeof(short))
- return 0;
- comedi_buf_read_alloc(s, sizeof(short));
- *x = *(unsigned short *)(async->prealloc_buf + async->buf_read_ptr);
- comedi_buf_read_free(s, sizeof(short));
- return 1;
-}
-EXPORT_SYMBOL_GPL(comedi_buf_get);
-
-void comedi_buf_memcpy_to(struct comedi_subdevice *s, unsigned int offset,
- const void *data, unsigned int num_bytes)
-{
- struct comedi_async *async = s->async;
- unsigned int write_ptr = async->buf_write_ptr + offset;
-
- if (write_ptr >= async->prealloc_bufsz)
- write_ptr %= async->prealloc_bufsz;
+ unsigned int write_ptr = async->buf_write_ptr;
while (num_bytes) {
unsigned int block_size;
@@ -481,17 +443,13 @@ void comedi_buf_memcpy_to(struct comedi_subdevice *s, unsigned int offset,
write_ptr = 0;
}
}
-EXPORT_SYMBOL_GPL(comedi_buf_memcpy_to);
-void comedi_buf_memcpy_from(struct comedi_subdevice *s, unsigned int offset,
- void *dest, unsigned int nbytes)
+static void comedi_buf_memcpy_from(struct comedi_subdevice *s,
+ void *dest, unsigned int nbytes)
{
void *src;
struct comedi_async *async = s->async;
- unsigned int read_ptr = async->buf_read_ptr + offset;
-
- if (read_ptr >= async->prealloc_bufsz)
- read_ptr %= async->prealloc_bufsz;
+ unsigned int read_ptr = async->buf_read_ptr;
while (nbytes) {
unsigned int block_size;
@@ -509,69 +467,84 @@ void comedi_buf_memcpy_from(struct comedi_subdevice *s, unsigned int offset,
read_ptr = 0;
}
}
-EXPORT_SYMBOL_GPL(comedi_buf_memcpy_from);
/**
- * comedi_write_array_to_buffer - write data to comedi buffer
+ * comedi_buf_write_samples - write sample data to comedi buffer
* @s: comedi_subdevice struct
- * @data: destination
- * @num_bytes: number of bytes to write
+ * @data: samples
+ * @nsamples: number of samples
*
- * Writes up to num_bytes bytes of data to the comedi buffer associated with
- * the subdevice, marks it as written and updates the acquisition scan
- * progress.
+ * Writes nsamples to the comedi buffer associated with the subdevice, marks
+ * it as written and updates the acquisition scan progress.
*
* Returns the amount of data written in bytes.
*/
-unsigned int comedi_write_array_to_buffer(struct comedi_subdevice *s,
- const void *data,
- unsigned int num_bytes)
+unsigned int comedi_buf_write_samples(struct comedi_subdevice *s,
+ const void *data, unsigned int nsamples)
{
- struct comedi_async *async = s->async;
- unsigned int retval;
-
- if (num_bytes == 0)
- return 0;
+ unsigned int max_samples;
+ unsigned int nbytes;
- retval = comedi_buf_write_alloc(s, num_bytes);
- if (retval != num_bytes) {
+ /*
+ * Make sure there is enough room in the buffer for all the samples.
+ * If not, clamp the nsamples to the number that will fit, flag the
+ * buffer overrun and add the samples that fit.
+ */
+ max_samples = comedi_bytes_to_samples(s,
+ comedi_buf_write_n_available(s));
+ if (nsamples > max_samples) {
dev_warn(s->device->class_dev, "buffer overrun\n");
- async->events |= COMEDI_CB_OVERFLOW;
- return 0;
+ s->async->events |= COMEDI_CB_OVERFLOW;
+ nsamples = max_samples;
}
- comedi_buf_memcpy_to(s, 0, data, num_bytes);
- comedi_buf_write_free(s, num_bytes);
- comedi_inc_scan_progress(s, num_bytes);
- async->events |= COMEDI_CB_BLOCK;
+ if (nsamples == 0)
+ return 0;
- return num_bytes;
+ nbytes = comedi_buf_write_alloc(s,
+ comedi_samples_to_bytes(s, nsamples));
+ comedi_buf_memcpy_to(s, data, nbytes);
+ comedi_buf_write_free(s, nbytes);
+ comedi_inc_scan_progress(s, nbytes);
+ s->async->events |= COMEDI_CB_BLOCK;
+
+ return nbytes;
}
-EXPORT_SYMBOL_GPL(comedi_write_array_to_buffer);
+EXPORT_SYMBOL_GPL(comedi_buf_write_samples);
/**
- * comedi_read_array_from_buffer - read data from comedi buffer
+ * comedi_buf_read_samples - read sample data from comedi buffer
* @s: comedi_subdevice struct
* @data: destination
- * @num_bytes: number of bytes to read
+ * @nsamples: maximum number of samples to read
*
- * Reads up to num_bytes bytes of data from the comedi buffer associated with
- * the subdevice, marks it as read and updates the acquisition scan progress.
+ * Reads up to nsamples from the comedi buffer associated with the subdevice,
+ * marks it as read and updates the acquisition scan progress.
*
* Returns the amount of data read in bytes.
*/
-unsigned int comedi_read_array_from_buffer(struct comedi_subdevice *s,
- void *data, unsigned int num_bytes)
+unsigned int comedi_buf_read_samples(struct comedi_subdevice *s,
+ void *data, unsigned int nsamples)
{
- if (num_bytes == 0)
+ unsigned int max_samples;
+ unsigned int nbytes;
+
+ /* clamp nsamples to the number of full samples available */
+ max_samples = comedi_bytes_to_samples(s,
+ comedi_buf_read_n_available(s));
+ if (nsamples > max_samples)
+ nsamples = max_samples;
+
+ if (nsamples == 0)
return 0;
- num_bytes = comedi_buf_read_alloc(s, num_bytes);
- comedi_buf_memcpy_from(s, 0, data, num_bytes);
- comedi_buf_read_free(s, num_bytes);
- comedi_inc_scan_progress(s, num_bytes);
+ nbytes = comedi_buf_read_alloc(s,
+ comedi_samples_to_bytes(s, nsamples));
+ comedi_buf_memcpy_from(s, data, nbytes);
+ comedi_buf_read_free(s, nbytes);
+ comedi_inc_scan_progress(s, nbytes);
s->async->events |= COMEDI_CB_BLOCK;
- return num_bytes;
+ return nbytes;
}
-EXPORT_SYMBOL_GPL(comedi_read_array_from_buffer);
+EXPORT_SYMBOL_GPL(comedi_buf_read_samples);
diff --git a/drivers/staging/comedi/comedi_compat32.c b/drivers/staging/comedi/comedi_compat32.c
index 9b6f96f1591..5a4c74f703b 100644
--- a/drivers/staging/comedi/comedi_compat32.c
+++ b/drivers/staging/comedi/comedi_compat32.c
@@ -416,6 +416,8 @@ static inline int raw_ioctl(struct file *file, unsigned int cmd,
case COMEDI_UNLOCK:
case COMEDI_CANCEL:
case COMEDI_POLL:
+ case COMEDI_SETRSUBD:
+ case COMEDI_SETWSUBD:
/* No translation needed. */
rc = translated_ioctl(file, cmd, arg);
break;
diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
index 9c32f027600..f143cb64d69 100644
--- a/drivers/staging/comedi/comedi_fops.c
+++ b/drivers/staging/comedi/comedi_fops.c
@@ -43,6 +43,22 @@
#include "comedi_internal.h"
+/**
+ * struct comedi_file - per-file private data for comedi device
+ * @dev: comedi_device struct
+ * @read_subdev: current "read" subdevice
+ * @write_subdev: current "write" subdevice
+ * @last_detach_count: last known detach count
+ * @last_attached: last known attached/detached state
+ */
+struct comedi_file {
+ struct comedi_device *dev;
+ struct comedi_subdevice *read_subdev;
+ struct comedi_subdevice *write_subdev;
+ unsigned int last_detach_count;
+ bool last_attached:1;
+};
+
#define COMEDI_NUM_MINORS 0x100
#define COMEDI_NUM_SUBDEVICE_MINORS \
(COMEDI_NUM_MINORS - COMEDI_NUM_BOARD_MINORS)
@@ -239,6 +255,54 @@ comedi_write_subdevice(const struct comedi_device *dev, unsigned int minor)
return dev->write_subdev;
}
+static void comedi_file_reset(struct file *file)
+{
+ struct comedi_file *cfp = file->private_data;
+ struct comedi_device *dev = cfp->dev;
+ struct comedi_subdevice *s, *read_s, *write_s;
+ unsigned int minor = iminor(file_inode(file));
+
+ read_s = dev->read_subdev;
+ write_s = dev->write_subdev;
+ if (minor >= COMEDI_NUM_BOARD_MINORS) {
+ s = comedi_subdevice_from_minor(dev, minor);
+ if (s == NULL || s->subdev_flags & SDF_CMD_READ)
+ read_s = s;
+ if (s == NULL || s->subdev_flags & SDF_CMD_WRITE)
+ write_s = s;
+ }
+ cfp->last_attached = dev->attached;
+ cfp->last_detach_count = dev->detach_count;
+ ACCESS_ONCE(cfp->read_subdev) = read_s;
+ ACCESS_ONCE(cfp->write_subdev) = write_s;
+}
+
+static void comedi_file_check(struct file *file)
+{
+ struct comedi_file *cfp = file->private_data;
+ struct comedi_device *dev = cfp->dev;
+
+ if (cfp->last_attached != dev->attached ||
+ cfp->last_detach_count != dev->detach_count)
+ comedi_file_reset(file);
+}
+
+static struct comedi_subdevice *comedi_file_read_subdevice(struct file *file)
+{
+ struct comedi_file *cfp = file->private_data;
+
+ comedi_file_check(file);
+ return ACCESS_ONCE(cfp->read_subdev);
+}
+
+static struct comedi_subdevice *comedi_file_write_subdevice(struct file *file)
+{
+ struct comedi_file *cfp = file->private_data;
+
+ comedi_file_check(file);
+ return ACCESS_ONCE(cfp->write_subdev);
+}
+
static int resize_async_buffer(struct comedi_device *dev,
struct comedi_subdevice *s, unsigned new_size)
{
@@ -776,7 +840,6 @@ static int do_devinfo_ioctl(struct comedi_device *dev,
struct comedi_devinfo __user *arg,
struct file *file)
{
- const unsigned minor = iminor(file_inode(file));
struct comedi_subdevice *s;
struct comedi_devinfo devinfo;
@@ -788,13 +851,13 @@ static int do_devinfo_ioctl(struct comedi_device *dev,
strlcpy(devinfo.driver_name, dev->driver->driver_name, COMEDI_NAMELEN);
strlcpy(devinfo.board_name, dev->board_name, COMEDI_NAMELEN);
- s = comedi_read_subdevice(dev, minor);
+ s = comedi_file_read_subdevice(file);
if (s)
devinfo.read_subdevice = s->index;
else
devinfo.read_subdevice = -1;
- s = comedi_write_subdevice(dev, minor);
+ s = comedi_file_write_subdevice(file);
if (s)
devinfo.write_subdevice = s->index;
else
@@ -991,7 +1054,7 @@ static int do_bufinfo_ioctl(struct comedi_device *dev,
if (s->busy != file)
return -EACCES;
- if (bi.bytes_read && (s->subdev_flags & SDF_CMD_READ)) {
+ if (bi.bytes_read && !(async->cmd.flags & CMDF_WRITE)) {
bi.bytes_read = comedi_buf_read_alloc(s, bi.bytes_read);
comedi_buf_read_free(s, bi.bytes_read);
@@ -1001,7 +1064,7 @@ static int do_bufinfo_ioctl(struct comedi_device *dev,
}
}
- if (bi.bytes_written && (s->subdev_flags & SDF_CMD_WRITE)) {
+ if (bi.bytes_written && (async->cmd.flags & CMDF_WRITE)) {
bi.bytes_written =
comedi_buf_write_alloc(s, bi.bytes_written);
comedi_buf_write_free(s, bi.bytes_written);
@@ -1451,6 +1514,21 @@ static int __comedi_get_user_cmd(struct comedi_device *dev,
return -EINVAL;
}
+ /*
+ * Set the CMDF_WRITE flag to the correct state if the subdevice
+ * supports only "read" commands or only "write" commands.
+ */
+ switch (s->subdev_flags & (SDF_CMD_READ | SDF_CMD_WRITE)) {
+ case SDF_CMD_READ:
+ cmd->flags &= ~CMDF_WRITE;
+ break;
+ case SDF_CMD_WRITE:
+ cmd->flags |= CMDF_WRITE;
+ break;
+ default:
+ break;
+ }
+
return 0;
}
@@ -1552,9 +1630,7 @@ static int do_cmd_ioctl(struct comedi_device *dev,
comedi_buf_reset(s);
- async->cb_mask =
- COMEDI_CB_EOA | COMEDI_CB_BLOCK | COMEDI_CB_ERROR |
- COMEDI_CB_OVERFLOW;
+ async->cb_mask = COMEDI_CB_BLOCK | COMEDI_CB_CANCEL_MASK;
if (async->cmd.flags & CMDF_WAKE_EOS)
async->cb_mask |= COMEDI_CB_EOS;
@@ -1720,7 +1796,6 @@ static int do_cancel_ioctl(struct comedi_device *dev, unsigned long arg,
void *file)
{
struct comedi_subdevice *s;
- int ret;
if (arg >= dev->n_subdevices)
return -EINVAL;
@@ -1734,9 +1809,7 @@ static int do_cancel_ioctl(struct comedi_device *dev, unsigned long arg,
if (s->busy != file)
return -EBUSY;
- ret = do_cancel(dev, s);
-
- return ret;
+ return do_cancel(dev, s);
}
/*
@@ -1774,11 +1847,96 @@ static int do_poll_ioctl(struct comedi_device *dev, unsigned long arg,
return -EINVAL;
}
+/*
+ * COMEDI_SETRSUBD ioctl
+ * sets the current "read" subdevice on a per-file basis
+ *
+ * arg:
+ * subdevice number
+ *
+ * reads:
+ * nothing
+ *
+ * writes:
+ * nothing
+ */
+static int do_setrsubd_ioctl(struct comedi_device *dev, unsigned long arg,
+ struct file *file)
+{
+ struct comedi_file *cfp = file->private_data;
+ struct comedi_subdevice *s_old, *s_new;
+
+ if (arg >= dev->n_subdevices)
+ return -EINVAL;
+
+ s_new = &dev->subdevices[arg];
+ s_old = comedi_file_read_subdevice(file);
+ if (s_old == s_new)
+ return 0; /* no change */
+
+ if (!(s_new->subdev_flags & SDF_CMD_READ))
+ return -EINVAL;
+
+ /*
+ * Check the file isn't still busy handling a "read" command on the
+ * old subdevice (if any).
+ */
+ if (s_old && s_old->busy == file && s_old->async &&
+ !(s_old->async->cmd.flags & CMDF_WRITE))
+ return -EBUSY;
+
+ ACCESS_ONCE(cfp->read_subdev) = s_new;
+ return 0;
+}
+
+/*
+ * COMEDI_SETWSUBD ioctl
+ * sets the current "write" subdevice on a per-file basis
+ *
+ * arg:
+ * subdevice number
+ *
+ * reads:
+ * nothing
+ *
+ * writes:
+ * nothing
+ */
+static int do_setwsubd_ioctl(struct comedi_device *dev, unsigned long arg,
+ struct file *file)
+{
+ struct comedi_file *cfp = file->private_data;
+ struct comedi_subdevice *s_old, *s_new;
+
+ if (arg >= dev->n_subdevices)
+ return -EINVAL;
+
+ s_new = &dev->subdevices[arg];
+ s_old = comedi_file_write_subdevice(file);
+ if (s_old == s_new)
+ return 0; /* no change */
+
+ if (!(s_new->subdev_flags & SDF_CMD_WRITE))
+ return -EINVAL;
+
+ /*
+ * Check the file isn't still busy handling a "write" command on the
+ * old subdevice (if any).
+ */
+ if (s_old && s_old->busy == file && s_old->async &&
+ (s_old->async->cmd.flags & CMDF_WRITE))
+ return -EBUSY;
+
+ ACCESS_ONCE(cfp->write_subdev) = s_new;
+ return 0;
+}
+
static long comedi_unlocked_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
- const unsigned minor = iminor(file_inode(file));
- struct comedi_device *dev = file->private_data;
+ unsigned minor = iminor(file_inode(file));
+ struct comedi_file *cfp = file->private_data;
+ struct comedi_device *dev = cfp->dev;
int rc;
mutex_lock(&dev->mutex);
@@ -1867,6 +2025,12 @@ static long comedi_unlocked_ioctl(struct file *file, unsigned int cmd,
case COMEDI_POLL:
rc = do_poll_ioctl(dev, arg, file);
break;
+ case COMEDI_SETRSUBD:
+ rc = do_setrsubd_ioctl(dev, arg, file);
+ break;
+ case COMEDI_SETWSUBD:
+ rc = do_setwsubd_ioctl(dev, arg, file);
+ break;
default:
rc = -ENOTTY;
break;
@@ -1900,8 +2064,8 @@ static struct vm_operations_struct comedi_vm_ops = {
static int comedi_mmap(struct file *file, struct vm_area_struct *vma)
{
- const unsigned minor = iminor(file_inode(file));
- struct comedi_device *dev = file->private_data;
+ struct comedi_file *cfp = file->private_data;
+ struct comedi_device *dev = cfp->dev;
struct comedi_subdevice *s;
struct comedi_async *async;
struct comedi_buf_map *bm = NULL;
@@ -1927,9 +2091,9 @@ static int comedi_mmap(struct file *file, struct vm_area_struct *vma)
}
if (vma->vm_flags & VM_WRITE)
- s = comedi_write_subdevice(dev, minor);
+ s = comedi_file_write_subdevice(file);
else
- s = comedi_read_subdevice(dev, minor);
+ s = comedi_file_read_subdevice(file);
if (!s) {
retval = -EINVAL;
goto done;
@@ -1992,8 +2156,8 @@ done:
static unsigned int comedi_poll(struct file *file, poll_table *wait)
{
unsigned int mask = 0;
- const unsigned minor = iminor(file_inode(file));
- struct comedi_device *dev = file->private_data;
+ struct comedi_file *cfp = file->private_data;
+ struct comedi_device *dev = cfp->dev;
struct comedi_subdevice *s;
mutex_lock(&dev->mutex);
@@ -2003,21 +2167,23 @@ static unsigned int comedi_poll(struct file *file, poll_table *wait)
goto done;
}
- s = comedi_read_subdevice(dev, minor);
+ s = comedi_file_read_subdevice(file);
if (s && s->async) {
poll_wait(file, &s->async->wait_head, wait);
if (!s->busy || !comedi_is_subdevice_running(s) ||
+ (s->async->cmd.flags & CMDF_WRITE) ||
comedi_buf_read_n_available(s) > 0)
mask |= POLLIN | POLLRDNORM;
}
- s = comedi_write_subdevice(dev, minor);
+ s = comedi_file_write_subdevice(file);
if (s && s->async) {
- unsigned int bps = bytes_per_sample(s);
+ unsigned int bps = comedi_bytes_per_sample(s);
poll_wait(file, &s->async->wait_head, wait);
comedi_buf_write_alloc(s, s->async->prealloc_bufsz);
if (!s->busy || !comedi_is_subdevice_running(s) ||
+ !(s->async->cmd.flags & CMDF_WRITE) ||
comedi_buf_write_n_allocated(s) >= bps)
mask |= POLLOUT | POLLWRNORM;
}
@@ -2034,8 +2200,8 @@ static ssize_t comedi_write(struct file *file, const char __user *buf,
struct comedi_async *async;
int n, m, count = 0, retval = 0;
DECLARE_WAITQUEUE(wait, current);
- const unsigned minor = iminor(file_inode(file));
- struct comedi_device *dev = file->private_data;
+ struct comedi_file *cfp = file->private_data;
+ struct comedi_device *dev = cfp->dev;
bool on_wait_queue = false;
bool attach_locked;
unsigned int old_detach_count;
@@ -2051,7 +2217,7 @@ static ssize_t comedi_write(struct file *file, const char __user *buf,
goto out;
}
- s = comedi_write_subdevice(dev, minor);
+ s = comedi_file_write_subdevice(file);
if (!s || !s->async) {
retval = -EIO;
goto out;
@@ -2065,6 +2231,10 @@ static ssize_t comedi_write(struct file *file, const char __user *buf,
retval = -EACCES;
goto out;
}
+ if (!(async->cmd.flags & CMDF_WRITE)) {
+ retval = -EINVAL;
+ goto out;
+ }
add_wait_queue(&async->wait_head, &wait);
on_wait_queue = true;
@@ -2099,7 +2269,7 @@ static ssize_t comedi_write(struct file *file, const char __user *buf,
* meantime!), but check the subdevice pointer
* as well just in case.
*/
- new_s = comedi_write_subdevice(dev, minor);
+ new_s = comedi_file_write_subdevice(file);
if (dev->attached &&
old_detach_count == dev->detach_count &&
s == new_s && new_s->async == async)
@@ -2136,6 +2306,10 @@ static ssize_t comedi_write(struct file *file, const char __user *buf,
retval = -EACCES;
break;
}
+ if (!(async->cmd.flags & CMDF_WRITE)) {
+ retval = -EINVAL;
+ break;
+ }
continue;
}
@@ -2170,8 +2344,8 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
struct comedi_async *async;
int n, m, count = 0, retval = 0;
DECLARE_WAITQUEUE(wait, current);
- const unsigned minor = iminor(file_inode(file));
- struct comedi_device *dev = file->private_data;
+ struct comedi_file *cfp = file->private_data;
+ struct comedi_device *dev = cfp->dev;
unsigned int old_detach_count;
bool become_nonbusy = false;
bool attach_locked;
@@ -2187,7 +2361,7 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
goto out;
}
- s = comedi_read_subdevice(dev, minor);
+ s = comedi_file_read_subdevice(file);
if (!s || !s->async) {
retval = -EIO;
goto out;
@@ -2200,6 +2374,10 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
retval = -EACCES;
goto out;
}
+ if (async->cmd.flags & CMDF_WRITE) {
+ retval = -EINVAL;
+ goto out;
+ }
add_wait_queue(&async->wait_head, &wait);
while (nbytes > 0 && !retval) {
@@ -2239,6 +2417,10 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
retval = -EACCES;
break;
}
+ if (async->cmd.flags & CMDF_WRITE) {
+ retval = -EINVAL;
+ break;
+ }
continue;
}
m = copy_to_user(buf, async->prealloc_buf +
@@ -2276,7 +2458,7 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
* meantime!), but check the subdevice pointer as well just in
* case.
*/
- new_s = comedi_read_subdevice(dev, minor);
+ new_s = comedi_file_read_subdevice(file);
if (dev->attached && old_detach_count == dev->detach_count &&
s == new_s && new_s->async == async) {
if (become_nonbusy || comedi_buf_n_bytes_ready(s) == 0)
@@ -2294,6 +2476,7 @@ out:
static int comedi_open(struct inode *inode, struct file *file)
{
const unsigned minor = iminor(inode);
+ struct comedi_file *cfp;
struct comedi_device *dev = comedi_dev_get_from_minor(minor);
int rc;
@@ -2302,6 +2485,12 @@ static int comedi_open(struct inode *inode, struct file *file)
return -ENODEV;
}
+ cfp = kzalloc(sizeof(*cfp), GFP_KERNEL);
+ if (!cfp)
+ return -ENOMEM;
+
+ cfp->dev = dev;
+
mutex_lock(&dev->mutex);
if (!dev->attached && !capable(CAP_NET_ADMIN)) {
dev_dbg(dev->class_dev, "not attached and not CAP_NET_ADMIN\n");
@@ -2323,26 +2512,31 @@ static int comedi_open(struct inode *inode, struct file *file)
}
dev->use_count++;
- file->private_data = dev;
+ file->private_data = cfp;
+ comedi_file_reset(file);
rc = 0;
out:
mutex_unlock(&dev->mutex);
- if (rc)
+ if (rc) {
comedi_dev_put(dev);
+ kfree(cfp);
+ }
return rc;
}
static int comedi_fasync(int fd, struct file *file, int on)
{
- struct comedi_device *dev = file->private_data;
+ struct comedi_file *cfp = file->private_data;
+ struct comedi_device *dev = cfp->dev;
return fasync_helper(fd, file, on, &dev->async_queue);
}
static int comedi_close(struct inode *inode, struct file *file)
{
- struct comedi_device *dev = file->private_data;
+ struct comedi_file *cfp = file->private_data;
+ struct comedi_device *dev = cfp->dev;
struct comedi_subdevice *s = NULL;
int i;
@@ -2368,6 +2562,7 @@ static int comedi_close(struct inode *inode, struct file *file)
mutex_unlock(&dev->mutex);
comedi_dev_put(dev);
+ kfree(cfp);
return 0;
}
@@ -2395,14 +2590,14 @@ void comedi_event(struct comedi_device *dev, struct comedi_subdevice *s)
if (!comedi_is_subdevice_running(s))
return;
- if (s->
- async->events & (COMEDI_CB_EOA | COMEDI_CB_ERROR |
- COMEDI_CB_OVERFLOW)) {
+ if (s->async->events & COMEDI_CB_CANCEL_MASK)
runflags_mask |= SRF_RUNNING;
- }
- /* remember if an error event has occurred, so an error
- * can be returned the next time the user does a read() */
- if (s->async->events & (COMEDI_CB_ERROR | COMEDI_CB_OVERFLOW)) {
+
+ /*
+ * Remember if an error event has occurred, so an error
+ * can be returned the next time the user does a read().
+ */
+ if (s->async->events & COMEDI_CB_ERROR_MASK) {
runflags_mask |= SRF_ERROR;
runflags |= SRF_ERROR;
}
diff --git a/drivers/staging/comedi/comedi_pci.c b/drivers/staging/comedi/comedi_pci.c
index aa0795a2660..6ba59c97700 100644
--- a/drivers/staging/comedi/comedi_pci.c
+++ b/drivers/staging/comedi/comedi_pci.c
@@ -16,6 +16,7 @@
* GNU General Public License for more details.
*/
+#include <linux/module.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
@@ -168,3 +169,18 @@ void comedi_pci_driver_unregister(struct comedi_driver *comedi_driver,
comedi_driver_unregister(comedi_driver);
}
EXPORT_SYMBOL_GPL(comedi_pci_driver_unregister);
+
+static int __init comedi_pci_init(void)
+{
+ return 0;
+}
+module_init(comedi_pci_init);
+
+static void __exit comedi_pci_exit(void)
+{
+}
+module_exit(comedi_pci_exit);
+
+MODULE_AUTHOR("http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi PCI interface module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/comedi_pcmcia.c b/drivers/staging/comedi/comedi_pcmcia.c
index 9d49d5d01ad..0529bae8e5a 100644
--- a/drivers/staging/comedi/comedi_pcmcia.c
+++ b/drivers/staging/comedi/comedi_pcmcia.c
@@ -16,6 +16,7 @@
* GNU General Public License for more details.
*/
+#include <linux/module.h>
#include <linux/kernel.h>
#include <pcmcia/cistpl.h>
@@ -154,3 +155,18 @@ void comedi_pcmcia_driver_unregister(struct comedi_driver *comedi_driver,
comedi_driver_unregister(comedi_driver);
}
EXPORT_SYMBOL_GPL(comedi_pcmcia_driver_unregister);
+
+static int __init comedi_pcmcia_init(void)
+{
+ return 0;
+}
+module_init(comedi_pcmcia_init);
+
+static void __exit comedi_pcmcia_exit(void)
+{
+}
+module_exit(comedi_pcmcia_exit);
+
+MODULE_AUTHOR("http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi PCMCIA interface module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/comedi_usb.c b/drivers/staging/comedi/comedi_usb.c
index 13f18bef609..0b862a64c04 100644
--- a/drivers/staging/comedi/comedi_usb.c
+++ b/drivers/staging/comedi/comedi_usb.c
@@ -16,6 +16,7 @@
* GNU General Public License for more details.
*/
+#include <linux/module.h>
#include <linux/usb.h>
#include "comedidev.h"
@@ -114,3 +115,18 @@ void comedi_usb_driver_unregister(struct comedi_driver *comedi_driver,
comedi_driver_unregister(comedi_driver);
}
EXPORT_SYMBOL_GPL(comedi_usb_driver_unregister);
+
+static int __init comedi_usb_init(void)
+{
+ return 0;
+}
+module_init(comedi_usb_init);
+
+static void __exit comedi_usb_exit(void)
+{
+}
+module_exit(comedi_usb_exit);
+
+MODULE_AUTHOR("http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi USB interface module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/comedidev.h b/drivers/staging/comedi/comedidev.h
index 1b2bbd56f6e..77be191988c 100644
--- a/drivers/staging/comedi/comedidev.h
+++ b/drivers/staging/comedi/comedidev.h
@@ -121,6 +121,7 @@ struct comedi_buf_map {
* @buf_read_ptr: buffer position for reader
* @cur_chan: current position in chanlist for scan (for those
* drivers that use it)
+ * @scans_done: the number of scans completed (COMEDI_CB_EOS)
* @scan_progress: amount received or sent for current scan (in bytes)
* @munge_chan: current position in chanlist for "munging"
* @munge_count: "munge" count (in bytes, modulo 2**32)
@@ -201,6 +202,7 @@ struct comedi_async {
unsigned int buf_write_ptr;
unsigned int buf_read_ptr;
unsigned int cur_chan;
+ unsigned int scans_done;
unsigned int scan_progress;
unsigned int munge_chan;
unsigned int munge_count;
@@ -213,6 +215,28 @@ struct comedi_async {
unsigned int x);
};
+/**
+ * comedi_async callback "events"
+ * @COMEDI_CB_EOS: end-of-scan
+ * @COMEDI_CB_EOA: end-of-acquisition/output
+ * @COMEDI_CB_BLOCK: data has arrived, wakes up read() / write()
+ * @COMEDI_CB_EOBUF: DEPRECATED: end of buffer
+ * @COMEDI_CB_ERROR: card error during acquisition
+ * @COMEDI_CB_OVERFLOW: buffer overflow/underflow
+ *
+ * @COMEDI_CB_ERROR_MASK: events that indicate an error has occurred
+ * @COMEDI_CB_CANCEL_MASK: events that will cancel an async command
+ */
+#define COMEDI_CB_EOS (1 << 0)
+#define COMEDI_CB_EOA (1 << 1)
+#define COMEDI_CB_BLOCK (1 << 2)
+#define COMEDI_CB_EOBUF (1 << 3)
+#define COMEDI_CB_ERROR (1 << 4)
+#define COMEDI_CB_OVERFLOW (1 << 5)
+
+#define COMEDI_CB_ERROR_MASK (COMEDI_CB_ERROR | COMEDI_CB_OVERFLOW)
+#define COMEDI_CB_CANCEL_MASK (COMEDI_CB_EOA | COMEDI_CB_ERROR_MASK)
+
struct comedi_driver {
struct comedi_driver *next;
@@ -391,12 +415,61 @@ static inline unsigned int comedi_offset_munge(struct comedi_subdevice *s,
return val ^ s->maxdata ^ (s->maxdata >> 1);
}
-static inline unsigned int bytes_per_sample(const struct comedi_subdevice *subd)
+/**
+ * comedi_bytes_per_sample - determine subdevice sample size
+ * @s: comedi_subdevice struct
+ *
+ * The sample size will be 4 (sizeof int) or 2 (sizeof short) depending on
+ * whether the SDF_LSAMPL subdevice flag is set or not.
+ *
+ * Returns the subdevice sample size.
+ */
+static inline unsigned int comedi_bytes_per_sample(struct comedi_subdevice *s)
+{
+ return s->subdev_flags & SDF_LSAMPL ? sizeof(int) : sizeof(short);
+}
+
+/**
+ * comedi_sample_shift - determine log2 of subdevice sample size
+ * @s: comedi_subdevice struct
+ *
+ * The sample size will be 4 (sizeof int) or 2 (sizeof short) depending on
+ * whether the SDF_LSAMPL subdevice flag is set or not. The log2 of the
+ * sample size will be 2 or 1 and can be used as the right operand of a
+ * bit-shift operator to multiply or divide something by the sample size.
+ *
+ * Returns log2 of the subdevice sample size.
+ */
+static inline unsigned int comedi_sample_shift(struct comedi_subdevice *s)
{
- if (subd->subdev_flags & SDF_LSAMPL)
- return sizeof(unsigned int);
+ return s->subdev_flags & SDF_LSAMPL ? 2 : 1;
+}
- return sizeof(short);
+/**
+ * comedi_bytes_to_samples - converts a number of bytes to a number of samples
+ * @s: comedi_subdevice struct
+ * @nbytes: number of bytes
+ *
+ * Returns the number of bytes divided by the subdevice sample size.
+ */
+static inline unsigned int comedi_bytes_to_samples(struct comedi_subdevice *s,
+ unsigned int nbytes)
+{
+ return nbytes >> comedi_sample_shift(s);
+}
+
+/**
+ * comedi_samples_to_bytes - converts a number of samples to a number of bytes
+ * @s: comedi_subdevice struct
+ * @nsamples: number of samples
+ *
+ * Returns the number of samples multiplied by the subdevice sample size.
+ * Does not check for arithmetic overflow.
+ */
+static inline unsigned int comedi_samples_to_bytes(struct comedi_subdevice *s,
+ unsigned int nsamples)
+{
+ return nsamples << comedi_sample_shift(s);
}
/*
@@ -419,18 +492,10 @@ unsigned int comedi_buf_read_n_available(struct comedi_subdevice *s);
unsigned int comedi_buf_read_alloc(struct comedi_subdevice *s, unsigned int n);
unsigned int comedi_buf_read_free(struct comedi_subdevice *s, unsigned int n);
-int comedi_buf_put(struct comedi_subdevice *s, unsigned short x);
-int comedi_buf_get(struct comedi_subdevice *s, unsigned short *x);
-
-void comedi_buf_memcpy_to(struct comedi_subdevice *s, unsigned int offset,
- const void *source, unsigned int num_bytes);
-void comedi_buf_memcpy_from(struct comedi_subdevice *s, unsigned int offset,
- void *destination, unsigned int num_bytes);
-unsigned int comedi_write_array_to_buffer(struct comedi_subdevice *s,
- const void *data,
- unsigned int num_bytes);
-unsigned int comedi_read_array_from_buffer(struct comedi_subdevice *s,
- void *data, unsigned int num_bytes);
+unsigned int comedi_buf_write_samples(struct comedi_subdevice *s,
+ const void *data, unsigned int nsamples);
+unsigned int comedi_buf_read_samples(struct comedi_subdevice *s,
+ void *data, unsigned int nsamples);
/* drivers.c - general comedi driver functions */
@@ -451,6 +516,10 @@ int comedi_dio_insn_config(struct comedi_device *, struct comedi_subdevice *,
unsigned int comedi_dio_update_state(struct comedi_subdevice *,
unsigned int *data);
unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s);
+unsigned int comedi_nscans_left(struct comedi_subdevice *s,
+ unsigned int nscans);
+unsigned int comedi_nsamples_left(struct comedi_subdevice *s,
+ unsigned int nsamples);
void comedi_inc_scan_progress(struct comedi_subdevice *s,
unsigned int num_bytes);
@@ -493,8 +562,6 @@ void comedi_driver_unregister(struct comedi_driver *);
module_driver(__comedi_driver, comedi_driver_register, \
comedi_driver_unregister)
-#ifdef CONFIG_COMEDI_PCI_DRIVERS
-
/* comedi_pci.c - comedi PCI driver specific functions */
/*
@@ -538,36 +605,6 @@ void comedi_pci_driver_unregister(struct comedi_driver *, struct pci_driver *);
module_driver(__comedi_driver, comedi_pci_driver_register, \
comedi_pci_driver_unregister, &(__pci_driver))
-#else
-
-/*
- * Some of the comedi mixed ISA/PCI drivers call the PCI specific
- * functions. Provide some dummy functions if CONFIG_COMEDI_PCI_DRIVERS
- * is not enabled.
- */
-
-static inline struct pci_dev *comedi_to_pci_dev(struct comedi_device *dev)
-{
- return NULL;
-}
-
-static inline int comedi_pci_enable(struct comedi_device *dev)
-{
- return -ENOSYS;
-}
-
-static inline void comedi_pci_disable(struct comedi_device *dev)
-{
-}
-
-static inline void comedi_pci_detach(struct comedi_device *dev)
-{
-}
-
-#endif /* CONFIG_COMEDI_PCI_DRIVERS */
-
-#ifdef CONFIG_COMEDI_PCMCIA_DRIVERS
-
/* comedi_pcmcia.c - comedi PCMCIA driver specific functions */
struct pcmcia_driver;
@@ -601,10 +638,6 @@ void comedi_pcmcia_driver_unregister(struct comedi_driver *,
module_driver(__comedi_driver, comedi_pcmcia_driver_register, \
comedi_pcmcia_driver_unregister, &(__pcmcia_driver))
-#endif /* CONFIG_COMEDI_PCMCIA_DRIVERS */
-
-#ifdef CONFIG_COMEDI_USB_DRIVERS
-
/* comedi_usb.c - comedi USB driver specific functions */
struct usb_driver;
@@ -634,6 +667,4 @@ void comedi_usb_driver_unregister(struct comedi_driver *, struct usb_driver *);
module_driver(__comedi_driver, comedi_usb_driver_register, \
comedi_usb_driver_unregister, &(__usb_driver))
-#endif /* CONFIG_COMEDI_USB_DRIVERS */
-
#endif /* _COMEDIDEV_H */
diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c
index 3e5bccbc9c3..61802d7947a 100644
--- a/drivers/staging/comedi/drivers.c
+++ b/drivers/staging/comedi/drivers.c
@@ -109,6 +109,10 @@ int comedi_alloc_subdev_readback(struct comedi_subdevice *s)
s->readback = kcalloc(s->n_chan, sizeof(*s->readback), GFP_KERNEL);
if (!s->readback)
return -ENOMEM;
+
+ if (!s->insn_read)
+ s->insn_read = comedi_readback_insn_read;
+
return 0;
}
EXPORT_SYMBOL_GPL(comedi_alloc_subdev_readback);
@@ -316,19 +320,91 @@ unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s)
case COMEDI_SUBD_DI:
case COMEDI_SUBD_DO:
case COMEDI_SUBD_DIO:
- bits_per_sample = 8 * bytes_per_sample(s);
- num_samples = (cmd->chanlist_len + bits_per_sample - 1) /
- bits_per_sample;
+ bits_per_sample = 8 * comedi_bytes_per_sample(s);
+ num_samples = DIV_ROUND_UP(cmd->scan_end_arg, bits_per_sample);
break;
default:
- num_samples = cmd->chanlist_len;
+ num_samples = cmd->scan_end_arg;
break;
}
- return num_samples * bytes_per_sample(s);
+ return comedi_samples_to_bytes(s, num_samples);
}
EXPORT_SYMBOL_GPL(comedi_bytes_per_scan);
/**
+ * comedi_nscans_left - return the number of scans left in the command
+ * @s: comedi_subdevice struct
+ * @nscans: the expected number of scans
+ *
+ * If nscans is 0, the number of scans available in the async buffer will be
+ * used. Otherwise the expected number of scans will be used.
+ *
+ * If the async command has a stop_src of TRIG_COUNT, the nscans will be
+ * checked against the number of scans left in the command.
+ *
+ * The return value will then be either the expected number of scans or the
+ * number of scans remaining in the command.
+ */
+unsigned int comedi_nscans_left(struct comedi_subdevice *s,
+ unsigned int nscans)
+{
+ struct comedi_async *async = s->async;
+ struct comedi_cmd *cmd = &async->cmd;
+
+ if (nscans == 0) {
+ unsigned int nbytes = comedi_buf_read_n_available(s);
+
+ nscans = nbytes / comedi_bytes_per_scan(s);
+ }
+
+ if (cmd->stop_src == TRIG_COUNT) {
+ unsigned int scans_left = 0;
+
+ if (async->scans_done < cmd->stop_arg)
+ scans_left = cmd->stop_arg - async->scans_done;
+
+ if (nscans > scans_left)
+ nscans = scans_left;
+ }
+ return nscans;
+}
+EXPORT_SYMBOL_GPL(comedi_nscans_left);
+
+/**
+ * comedi_nsamples_left - return the number of samples left in the command
+ * @s: comedi_subdevice struct
+ * @nsamples: the expected number of samples
+ *
+ * Returns the expected number of samples of the number of samples remaining
+ * in the command.
+ */
+unsigned int comedi_nsamples_left(struct comedi_subdevice *s,
+ unsigned int nsamples)
+{
+ struct comedi_async *async = s->async;
+ struct comedi_cmd *cmd = &async->cmd;
+
+ if (cmd->stop_src == TRIG_COUNT) {
+ /* +1 to force comedi_nscans_left() to return the scans left */
+ unsigned int nscans = (nsamples / cmd->scan_end_arg) + 1;
+ unsigned int scans_left = comedi_nscans_left(s, nscans);
+ unsigned int scan_pos =
+ comedi_bytes_to_samples(s, async->scan_progress);
+ unsigned long long samples_left = 0;
+
+ if (scans_left) {
+ samples_left = ((unsigned long long)scans_left *
+ cmd->scan_end_arg) - scan_pos;
+ }
+
+ if (samples_left < nsamples)
+ nsamples = samples_left;
+ }
+ return nsamples;
+}
+EXPORT_SYMBOL_GPL(comedi_nsamples_left);
+
+/**
* comedi_inc_scan_progress - update scan progress in asynchronous command
* @s: comedi_subdevice struct
* @num_bytes: amount of data in bytes to increment scan progress
@@ -342,10 +418,24 @@ void comedi_inc_scan_progress(struct comedi_subdevice *s,
unsigned int num_bytes)
{
struct comedi_async *async = s->async;
+ struct comedi_cmd *cmd = &async->cmd;
unsigned int scan_length = comedi_bytes_per_scan(s);
+ /* track the 'cur_chan' for non-SDF_PACKED subdevices */
+ if (!(s->subdev_flags & SDF_PACKED)) {
+ async->cur_chan += comedi_bytes_to_samples(s, num_bytes);
+ async->cur_chan %= cmd->chanlist_len;
+ }
+
async->scan_progress += num_bytes;
if (async->scan_progress >= scan_length) {
+ unsigned int nscans = async->scan_progress / scan_length;
+
+ if (async->scans_done < (UINT_MAX - nscans))
+ async->scans_done += nscans;
+ else
+ async->scans_done = UINT_MAX;
+
async->scan_progress %= scan_length;
async->events |= COMEDI_CB_EOS;
}
@@ -376,7 +466,7 @@ unsigned int comedi_handle_events(struct comedi_device *dev,
if (events == 0)
return events;
- if (events & (COMEDI_CB_EOA | COMEDI_CB_ERROR | COMEDI_CB_OVERFLOW))
+ if (events & COMEDI_CB_CANCEL_MASK)
s->cancel(dev, s);
comedi_event(dev, s);
diff --git a/drivers/staging/comedi/drivers/Makefile b/drivers/staging/comedi/drivers/Makefile
index 6bc9ef3b25b..84fdf20ca98 100644
--- a/drivers/staging/comedi/drivers/Makefile
+++ b/drivers/staging/comedi/drivers/Makefile
@@ -60,7 +60,6 @@ obj-$(CONFIG_COMEDI_S526) += s526.o
# Comedi PCI drivers
obj-$(CONFIG_COMEDI_8255_PCI) += 8255_pci.o
obj-$(CONFIG_COMEDI_ADDI_WATCHDOG) += addi_watchdog.o
-obj-$(CONFIG_COMEDI_ADDI_APCI_035) += addi_apci_035.o
obj-$(CONFIG_COMEDI_ADDI_APCI_1032) += addi_apci_1032.o
obj-$(CONFIG_COMEDI_ADDI_APCI_1500) += addi_apci_1500.o
obj-$(CONFIG_COMEDI_ADDI_APCI_1516) += addi_apci_1516.o
diff --git a/drivers/staging/comedi/drivers/addi-data/addi_common.c b/drivers/staging/comedi/drivers/addi-data/addi_common.c
deleted file mode 100644
index 2e7fb218340..00000000000
--- a/drivers/staging/comedi/drivers/addi-data/addi_common.c
+++ /dev/null
@@ -1,274 +0,0 @@
-/**
-@verbatim
-
-Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
-
- ADDI-DATA GmbH
- Dieselstrasse 3
- D-77833 Ottersweier
- Tel: +19(0)7223/9493-0
- Fax: +49(0)7223/9493-92
- http://www.addi-data.com
- info@addi-data.com
-
-This program is free software; you can redistribute it and/or modify it under
-the terms of the GNU General Public License as published by the Free Software
-Foundation; either version 2 of the License, or (at your option) any later
-version.
-
-This program is distributed in the hope that it will be useful, but WITHOUT ANY
-WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
-PARTICULAR PURPOSE. See the GNU General Public License for more details.
-
-@endverbatim
-*/
-/*
-
- +-----------------------------------------------------------------------+
- | (C) ADDI-DATA GmbH Dieselstrasse 3 D-77833 Ottersweier |
- +-----------------------------------------------------------------------+
- | Tel : +49 (0) 7223/9493-0 | email : info@addi-data.com |
- | Fax : +49 (0) 7223/9493-92 | Internet : http://www.addi-data.com |
- +-----------------------------------------------------------------------+
- | Project : ADDI DATA | Compiler : GCC |
- | Modulname : addi_common.c | Version : 2.96 |
- +-------------------------------+---------------------------------------+
- | Author : | Date : |
- +-----------------------------------------------------------------------+
- | Description : ADDI COMMON Main Module |
- +-----------------------------------------------------------------------+
-*/
-
-static int i_ADDIDATA_InsnReadEeprom(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- const struct addi_board *this_board = dev->board_ptr;
- struct addi_private *devpriv = dev->private;
- unsigned short w_Address = CR_CHAN(insn->chanspec);
- unsigned short w_Data;
-
- w_Data = addi_eeprom_readw(devpriv->i_IobaseAmcc,
- this_board->pc_EepromChip, 2 * w_Address);
- data[0] = w_Data;
-
- return insn->n;
-}
-
-static irqreturn_t v_ADDI_Interrupt(int irq, void *d)
-{
- struct comedi_device *dev = d;
- const struct addi_board *this_board = dev->board_ptr;
-
- this_board->interrupt(irq, d);
- return IRQ_RETVAL(1);
-}
-
-static int i_ADDI_Reset(struct comedi_device *dev)
-{
- const struct addi_board *this_board = dev->board_ptr;
-
- this_board->reset(dev);
- return 0;
-}
-
-static int addi_auto_attach(struct comedi_device *dev,
- unsigned long context_unused)
-{
- struct pci_dev *pcidev = comedi_to_pci_dev(dev);
- const struct addi_board *this_board = dev->board_ptr;
- struct addi_private *devpriv;
- struct comedi_subdevice *s;
- int ret, n_subdevices;
- unsigned int dw_Dummy;
-
- dev->board_name = this_board->pc_DriverName;
-
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
- ret = comedi_pci_enable(dev);
- if (ret)
- return ret;
-
- if (this_board->i_IorangeBase1)
- dev->iobase = pci_resource_start(pcidev, 1);
- else
- dev->iobase = pci_resource_start(pcidev, 0);
-
- devpriv->iobase = dev->iobase;
- devpriv->i_IobaseAmcc = pci_resource_start(pcidev, 0);
- devpriv->i_IobaseAddon = pci_resource_start(pcidev, 2);
- devpriv->i_IobaseReserved = pci_resource_start(pcidev, 3);
-
- /* Initialize parameters that can be overridden in EEPROM */
- devpriv->s_EeParameters.i_NbrAiChannel = this_board->i_NbrAiChannel;
- devpriv->s_EeParameters.i_NbrAoChannel = this_board->i_NbrAoChannel;
- devpriv->s_EeParameters.i_AiMaxdata = this_board->i_AiMaxdata;
- devpriv->s_EeParameters.i_AoMaxdata = this_board->i_AoMaxdata;
- devpriv->s_EeParameters.i_NbrDiChannel = this_board->i_NbrDiChannel;
- devpriv->s_EeParameters.i_NbrDoChannel = this_board->i_NbrDoChannel;
- devpriv->s_EeParameters.i_DoMaxdata = this_board->i_DoMaxdata;
- devpriv->s_EeParameters.i_Timer = this_board->i_Timer;
- devpriv->s_EeParameters.ui_MinAcquisitiontimeNs =
- this_board->ui_MinAcquisitiontimeNs;
- devpriv->s_EeParameters.ui_MinDelaytimeNs =
- this_board->ui_MinDelaytimeNs;
-
- /* ## */
-
- if (pcidev->irq > 0) {
- ret = request_irq(pcidev->irq, v_ADDI_Interrupt, IRQF_SHARED,
- dev->board_name, dev);
- if (ret == 0)
- dev->irq = pcidev->irq;
- }
-
- /* Read eepeom and fill addi_board Structure */
-
- if (this_board->i_PCIEeprom) {
- if (!(strcmp(this_board->pc_EepromChip, "S5920"))) {
- /* Set 3 wait stait */
- if (!(strcmp(dev->board_name, "apci035")))
- outl(0x80808082, devpriv->i_IobaseAmcc + 0x60);
- else
- outl(0x83838383, devpriv->i_IobaseAmcc + 0x60);
-
- /* Enable the interrupt for the controller */
- dw_Dummy = inl(devpriv->i_IobaseAmcc + 0x38);
- outl(dw_Dummy | 0x2000, devpriv->i_IobaseAmcc + 0x38);
- }
- addi_eeprom_read_info(dev, pci_resource_start(pcidev, 0));
- }
-
- n_subdevices = 7;
- ret = comedi_alloc_subdevices(dev, n_subdevices);
- if (ret)
- return ret;
-
- /* Allocate and Initialise AI Subdevice Structures */
- s = &dev->subdevices[0];
- if ((devpriv->s_EeParameters.i_NbrAiChannel)
- || (this_board->i_NbrAiChannelDiff)) {
- dev->read_subdev = s;
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags =
- SDF_READABLE | SDF_COMMON | SDF_GROUND
- | SDF_DIFF;
- if (devpriv->s_EeParameters.i_NbrAiChannel)
- s->n_chan = devpriv->s_EeParameters.i_NbrAiChannel;
- else
- s->n_chan = this_board->i_NbrAiChannelDiff;
- s->maxdata = devpriv->s_EeParameters.i_AiMaxdata;
- s->len_chanlist = this_board->i_AiChannelList;
- s->range_table = this_board->pr_AiRangelist;
-
- s->insn_config = this_board->ai_config;
- s->insn_read = this_board->ai_read;
- s->insn_write = this_board->ai_write;
- s->insn_bits = this_board->ai_bits;
- s->do_cmdtest = this_board->ai_cmdtest;
- s->do_cmd = this_board->ai_cmd;
- s->cancel = this_board->ai_cancel;
-
- } else {
- s->type = COMEDI_SUBD_UNUSED;
- }
-
- /* Allocate and Initialise AO Subdevice Structures */
- s = &dev->subdevices[1];
- if (devpriv->s_EeParameters.i_NbrAoChannel) {
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITEABLE | SDF_GROUND | SDF_COMMON;
- s->n_chan = devpriv->s_EeParameters.i_NbrAoChannel;
- s->maxdata = devpriv->s_EeParameters.i_AoMaxdata;
- s->len_chanlist =
- devpriv->s_EeParameters.i_NbrAoChannel;
- s->insn_write = this_board->ao_write;
- } else {
- s->type = COMEDI_SUBD_UNUSED;
- }
- /* Allocate and Initialise DI Subdevice Structures */
- s = &dev->subdevices[2];
- if (devpriv->s_EeParameters.i_NbrDiChannel) {
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_COMMON;
- s->n_chan = devpriv->s_EeParameters.i_NbrDiChannel;
- s->maxdata = 1;
- s->len_chanlist =
- devpriv->s_EeParameters.i_NbrDiChannel;
- s->range_table = &range_digital;
- s->insn_config = this_board->di_config;
- s->insn_read = this_board->di_read;
- s->insn_write = this_board->di_write;
- s->insn_bits = this_board->di_bits;
- } else {
- s->type = COMEDI_SUBD_UNUSED;
- }
- /* Allocate and Initialise DO Subdevice Structures */
- s = &dev->subdevices[3];
- if (devpriv->s_EeParameters.i_NbrDoChannel) {
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags =
- SDF_READABLE | SDF_WRITEABLE | SDF_GROUND | SDF_COMMON;
- s->n_chan = devpriv->s_EeParameters.i_NbrDoChannel;
- s->maxdata = devpriv->s_EeParameters.i_DoMaxdata;
- s->len_chanlist =
- devpriv->s_EeParameters.i_NbrDoChannel;
- s->range_table = &range_digital;
-
- /* insn_config - for digital output memory */
- s->insn_config = this_board->do_config;
- s->insn_write = this_board->do_write;
- s->insn_bits = this_board->do_bits;
- s->insn_read = this_board->do_read;
- } else {
- s->type = COMEDI_SUBD_UNUSED;
- }
-
- /* Allocate and Initialise Timer Subdevice Structures */
- s = &dev->subdevices[4];
- if (devpriv->s_EeParameters.i_Timer) {
- s->type = COMEDI_SUBD_TIMER;
- s->subdev_flags = SDF_WRITEABLE | SDF_GROUND | SDF_COMMON;
- s->n_chan = 1;
- s->maxdata = 0;
- s->len_chanlist = 1;
- s->range_table = &range_digital;
-
- s->insn_write = this_board->timer_write;
- s->insn_read = this_board->timer_read;
- s->insn_config = this_board->timer_config;
- s->insn_bits = this_board->timer_bits;
- } else {
- s->type = COMEDI_SUBD_UNUSED;
- }
-
- /* Allocate and Initialise TTL */
- s = &dev->subdevices[5];
- s->type = COMEDI_SUBD_UNUSED;
-
- /* EEPROM */
- s = &dev->subdevices[6];
- if (this_board->i_PCIEeprom) {
- s->type = COMEDI_SUBD_MEMORY;
- s->subdev_flags = SDF_READABLE | SDF_INTERNAL;
- s->n_chan = 256;
- s->maxdata = 0xffff;
- s->insn_read = i_ADDIDATA_InsnReadEeprom;
- } else {
- s->type = COMEDI_SUBD_UNUSED;
- }
-
- i_ADDI_Reset(dev);
- return 0;
-}
-
-static void i_ADDI_Detach(struct comedi_device *dev)
-{
- if (dev->iobase)
- i_ADDI_Reset(dev);
- comedi_pci_detach(dev);
-}
diff --git a/drivers/staging/comedi/drivers/addi-data/addi_common.h b/drivers/staging/comedi/drivers/addi-data/addi_common.h
deleted file mode 100644
index e2a3ffeee5c..00000000000
--- a/drivers/staging/comedi/drivers/addi-data/addi_common.h
+++ /dev/null
@@ -1,144 +0,0 @@
-/*
- * Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
- *
- * ADDI-DATA GmbH
- * Dieselstrasse 3
- * D-77833 Ottersweier
- * Tel: +19(0)7223/9493-0
- * Fax: +49(0)7223/9493-92
- * http://www.addi-data.com
- * info@addi-data.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- */
-
-#include <linux/sched.h>
-#include <linux/interrupt.h>
-
-struct addi_board {
- const char *pc_DriverName; /* driver name */
- int i_IorangeBase1;
- int i_PCIEeprom; /* eeprom present or not */
- char *pc_EepromChip; /* type of chip */
- int i_NbrAiChannel; /* num of A/D chans */
- int i_NbrAiChannelDiff; /* num of A/D chans in diff mode */
- int i_AiChannelList; /* len of chanlist */
- int i_NbrAoChannel; /* num of D/A chans */
- int i_AiMaxdata; /* resolution of A/D */
- int i_AoMaxdata; /* resolution of D/A */
- const struct comedi_lrange *pr_AiRangelist; /* rangelist for A/D */
-
- int i_NbrDiChannel; /* Number of DI channels */
- int i_NbrDoChannel; /* Number of DO channels */
- int i_DoMaxdata; /* data to set all channels high */
-
- int i_Timer; /* timer subdevice present or not */
- unsigned int ui_MinAcquisitiontimeNs; /* Minimum Acquisition in Nano secs */
- unsigned int ui_MinDelaytimeNs; /* Minimum Delay in Nano secs */
-
- /* interrupt and reset */
- void (*interrupt)(int irq, void *d);
- int (*reset)(struct comedi_device *);
-
- /* Subdevice functions */
-
- /* ANALOG INPUT */
- int (*ai_config)(struct comedi_device *, struct comedi_subdevice *,
- struct comedi_insn *, unsigned int *);
- int (*ai_read)(struct comedi_device *, struct comedi_subdevice *,
- struct comedi_insn *, unsigned int *);
- int (*ai_write)(struct comedi_device *, struct comedi_subdevice *,
- struct comedi_insn *, unsigned int *);
- int (*ai_bits)(struct comedi_device *, struct comedi_subdevice *,
- struct comedi_insn *, unsigned int *);
- int (*ai_cmdtest)(struct comedi_device *, struct comedi_subdevice *,
- struct comedi_cmd *);
- int (*ai_cmd)(struct comedi_device *, struct comedi_subdevice *);
- int (*ai_cancel)(struct comedi_device *, struct comedi_subdevice *);
-
- /* Analog Output */
- int (*ao_write)(struct comedi_device *, struct comedi_subdevice *,
- struct comedi_insn *, unsigned int *);
-
- /* Digital Input */
- int (*di_config)(struct comedi_device *, struct comedi_subdevice *,
- struct comedi_insn *, unsigned int *);
- int (*di_read)(struct comedi_device *, struct comedi_subdevice *,
- struct comedi_insn *, unsigned int *);
- int (*di_write)(struct comedi_device *, struct comedi_subdevice *,
- struct comedi_insn *, unsigned int *);
- int (*di_bits)(struct comedi_device *, struct comedi_subdevice *,
- struct comedi_insn *, unsigned int *);
-
- /* Digital Output */
- int (*do_config)(struct comedi_device *, struct comedi_subdevice *,
- struct comedi_insn *, unsigned int *);
- int (*do_write)(struct comedi_device *, struct comedi_subdevice *,
- struct comedi_insn *, unsigned int *);
- int (*do_bits)(struct comedi_device *, struct comedi_subdevice *,
- struct comedi_insn *, unsigned int *);
- int (*do_read)(struct comedi_device *, struct comedi_subdevice *,
- struct comedi_insn *, unsigned int *);
-
- /* TIMER */
- int (*timer_config)(struct comedi_device *, struct comedi_subdevice *,
- struct comedi_insn *, unsigned int *);
- int (*timer_write)(struct comedi_device *, struct comedi_subdevice *,
- struct comedi_insn *, unsigned int *);
- int (*timer_read)(struct comedi_device *, struct comedi_subdevice *,
- struct comedi_insn *, unsigned int *);
- int (*timer_bits)(struct comedi_device *, struct comedi_subdevice *,
- struct comedi_insn *, unsigned int *);
-};
-
-struct addi_private {
- int iobase;
- int i_IobaseAmcc; /* base+size for AMCC chip */
- int i_IobaseAddon; /* addon base address */
- int i_IobaseReserved;
- unsigned int ui_AiActualScan; /* how many scans we finished */
- unsigned int ui_AiNbrofChannels; /* how many channels is measured */
- unsigned int ui_AiChannelList[32]; /* actual chanlist */
- unsigned int ui_AiReadData[32];
- unsigned short us_UseDma; /* To use Dma or not */
- unsigned char b_DmaDoubleBuffer; /* we can use double buffering */
- unsigned int ui_DmaActualBuffer; /* which buffer is used now */
- unsigned short *ul_DmaBufferVirtual[2]; /* pointers to DMA buffer */
- dma_addr_t ul_DmaBufferHw[2]; /* hw address of DMA buff */
- unsigned int ui_DmaBufferSize[2]; /* size of dma buffer in bytes */
- unsigned int ui_DmaBufferUsesize[2]; /* which size we may now used for transfer */
- unsigned char b_DigitalOutputRegister; /* Digital Output Register */
- unsigned char b_OutputMemoryStatus;
- unsigned char b_TimerSelectMode; /* Contain data written at iobase + 0C */
- unsigned char b_ModeSelectRegister; /* Contain data written at iobase + 0E */
- unsigned short us_OutputRegister; /* Contain data written at iobase + 0 */
- unsigned char b_Timer2Mode; /* Specify the timer 2 mode */
- unsigned char b_Timer2Interrupt; /* Timer2 interrupt enable or disable */
- unsigned int ai_running:1;
- unsigned char b_InterruptMode; /* eoc eos or dma */
- unsigned char b_EocEosInterrupt; /* Enable disable eoc eos interrupt */
- unsigned int ui_EocEosConversionTime;
- unsigned char b_ExttrigEnable; /* To enable or disable external trigger */
-
- /* Pointer to the current process */
- struct task_struct *tsk_Current;
-
- /* Parameters read from EEPROM overriding static board info */
- struct {
- int i_NbrAiChannel; /* num of A/D chans */
- int i_NbrAoChannel; /* num of D/A chans */
- int i_AiMaxdata; /* resolution of A/D */
- int i_AoMaxdata; /* resolution of D/A */
- int i_NbrDiChannel; /* Number of DI channels */
- int i_NbrDoChannel; /* Number of DO channels */
- int i_DoMaxdata; /* data to set all channels high */
- int i_Timer; /* timer subdevice present or not */
- unsigned int ui_MinAcquisitiontimeNs;
- /* Minimum Acquisition in Nano secs */
- unsigned int ui_MinDelaytimeNs;
- /* Minimum Delay in Nano secs */
- } s_EeParameters;
-};
diff --git a/drivers/staging/comedi/drivers/addi-data/addi_eeprom.c b/drivers/staging/comedi/drivers/addi-data/addi_eeprom.c
deleted file mode 100644
index b731856c27d..00000000000
--- a/drivers/staging/comedi/drivers/addi-data/addi_eeprom.c
+++ /dev/null
@@ -1,360 +0,0 @@
-/*
- * addi_eeprom.c - ADDI EEPROM Module
- * Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
- * Project manager: Eric Stolz
- *
- * ADDI-DATA GmbH
- * Dieselstrasse 3
- * D-77833 Ottersweier
- * Tel: +19(0)7223/9493-0
- * Fax: +49(0)7223/9493-92
- * http://www.addi-data.com
- * info@addi-data.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- */
-
-#include <linux/delay.h>
-
-#define NVRAM_USER_DATA_START 0x100
-
-#define NVCMD_BEGIN_READ (0x7 << 5) /* nvRam begin read command */
-#define NVCMD_LOAD_LOW (0x4 << 5) /* nvRam load low command */
-#define NVCMD_LOAD_HIGH (0x5 << 5) /* nvRam load high command */
-
-#define EE93C76_CLK_BIT (1 << 0)
-#define EE93C76_CS_BIT (1 << 1)
-#define EE93C76_DOUT_BIT (1 << 2)
-#define EE93C76_DIN_BIT (1 << 3)
-#define EE93C76_READ_CMD (0x0180 << 4)
-#define EE93C76_CMD_LEN 13
-
-#define EEPROM_DIGITALINPUT 0
-#define EEPROM_DIGITALOUTPUT 1
-#define EEPROM_ANALOGINPUT 2
-#define EEPROM_ANALOGOUTPUT 3
-#define EEPROM_TIMER 4
-#define EEPROM_WATCHDOG 5
-#define EEPROM_TIMER_WATCHDOG_COUNTER 10
-
-static void addi_eeprom_clk_93c76(unsigned long iobase, unsigned int val)
-{
- outl(val & ~EE93C76_CLK_BIT, iobase);
- udelay(100);
-
- outl(val | EE93C76_CLK_BIT, iobase);
- udelay(100);
-}
-
-static unsigned int addi_eeprom_cmd_93c76(unsigned long iobase,
- unsigned int cmd,
- unsigned char len)
-{
- unsigned int val = EE93C76_CS_BIT;
- int i;
-
- /* Toggle EEPROM's Chip select to get it out of Shift Register Mode */
- outl(val, iobase);
- udelay(100);
-
- /* Send EEPROM command - one bit at a time */
- for (i = (len - 1); i >= 0; i--) {
- if (cmd & (1 << i))
- val |= EE93C76_DOUT_BIT;
- else
- val &= ~EE93C76_DOUT_BIT;
-
- /* Write the command */
- outl(val, iobase);
- udelay(100);
-
- addi_eeprom_clk_93c76(iobase, val);
- }
- return val;
-}
-
-static unsigned short addi_eeprom_readw_93c76(unsigned long iobase,
- unsigned short addr)
-{
- unsigned short val = 0;
- unsigned int cmd;
- unsigned int tmp;
- int i;
-
- /* Send EEPROM read command and offset to EEPROM */
- cmd = EE93C76_READ_CMD | (addr / 2);
- cmd = addi_eeprom_cmd_93c76(iobase, cmd, EE93C76_CMD_LEN);
-
- /* Get the 16-bit value */
- for (i = 0; i < 16; i++) {
- addi_eeprom_clk_93c76(iobase, cmd);
-
- tmp = inl(iobase);
- udelay(100);
-
- val <<= 1;
- if (tmp & EE93C76_DIN_BIT)
- val |= 0x1;
- }
-
- /* Toggle EEPROM's Chip select to get it out of Shift Register Mode */
- outl(0, iobase);
- udelay(100);
-
- return val;
-}
-
-static void addi_eeprom_nvram_wait(unsigned long iobase)
-{
- unsigned char val;
-
- do {
- val = inb(iobase + AMCC_OP_REG_MCSR_NVCMD);
- } while (val & 0x80);
-}
-
-static unsigned short addi_eeprom_readw_nvram(unsigned long iobase,
- unsigned short addr)
-{
- unsigned short val = 0;
- unsigned char tmp;
- unsigned char i;
-
- for (i = 0; i < 2; i++) {
- /* Load the low 8 bit address */
- outb(NVCMD_LOAD_LOW, iobase + AMCC_OP_REG_MCSR_NVCMD);
- addi_eeprom_nvram_wait(iobase);
- outb((addr + i) & 0xff, iobase + AMCC_OP_REG_MCSR_NVDATA);
- addi_eeprom_nvram_wait(iobase);
-
- /* Load the high 8 bit address */
- outb(NVCMD_LOAD_HIGH, iobase + AMCC_OP_REG_MCSR_NVCMD);
- addi_eeprom_nvram_wait(iobase);
- outb(((addr + i) >> 8) & 0xff,
- iobase + AMCC_OP_REG_MCSR_NVDATA);
- addi_eeprom_nvram_wait(iobase);
-
- /* Read the eeprom data byte */
- outb(NVCMD_BEGIN_READ, iobase + AMCC_OP_REG_MCSR_NVCMD);
- addi_eeprom_nvram_wait(iobase);
- tmp = inb(iobase + AMCC_OP_REG_MCSR_NVDATA);
- addi_eeprom_nvram_wait(iobase);
-
- if (i == 0)
- val |= tmp;
- else
- val |= (tmp << 8);
- }
-
- return val;
-}
-
-static unsigned short addi_eeprom_readw(unsigned long iobase,
- char *type,
- unsigned short addr)
-{
- unsigned short val = 0;
-
- /* Add the offset to the start of the user data */
- addr += NVRAM_USER_DATA_START;
-
- if (!strcmp(type, "S5920") || !strcmp(type, "S5933"))
- val = addi_eeprom_readw_nvram(iobase, addr);
-
- if (!strcmp(type, "93C76"))
- val = addi_eeprom_readw_93c76(iobase, addr);
-
- return val;
-}
-
-static void addi_eeprom_read_di_info(struct comedi_device *dev,
- unsigned long iobase,
- unsigned short addr)
-{
- const struct addi_board *this_board = dev->board_ptr;
- struct addi_private *devpriv = dev->private;
- char *type = this_board->pc_EepromChip;
- unsigned short tmp;
-
- /* Number of channels */
- tmp = addi_eeprom_readw(iobase, type, addr + 6);
- devpriv->s_EeParameters.i_NbrDiChannel = tmp;
-
- /* Interruptible or not */
- tmp = addi_eeprom_readw(iobase, type, addr + 8);
- tmp = (tmp >> 7) & 0x01;
-
- /* How many interruptible logic */
- tmp = addi_eeprom_readw(iobase, type, addr + 10);
-}
-
-static void addi_eeprom_read_do_info(struct comedi_device *dev,
- unsigned long iobase,
- unsigned short addr)
-{
- const struct addi_board *this_board = dev->board_ptr;
- struct addi_private *devpriv = dev->private;
- char *type = this_board->pc_EepromChip;
- unsigned short tmp;
-
- /* Number of channels */
- tmp = addi_eeprom_readw(iobase, type, addr + 6);
- devpriv->s_EeParameters.i_NbrDoChannel = tmp;
-
- devpriv->s_EeParameters.i_DoMaxdata = 0xffffffff >> (32 - tmp);
-}
-
-static void addi_eeprom_read_timer_info(struct comedi_device *dev,
- unsigned long iobase,
- unsigned short addr)
-{
- struct addi_private *devpriv = dev->private;
-#if 0
- const struct addi_board *this_board = dev->board_ptr;
- char *type = this_board->pc_EepromChip;
- unsigned short offset = 0;
- unsigned short ntimers;
- unsigned short tmp;
- int i;
-
- /* Number of Timers */
- ntimers = addi_eeprom_readw(iobase, type, addr + 6);
-
- /* Read header size */
- for (i = 0; i < ntimers; i++) {
- unsigned short size;
- unsigned short res;
- unsigned short mode;
- unsigned short min_timing;
- unsigned short timebase;
-
- size = addi_eeprom_readw(iobase, type, addr + 8 + offset + 0);
-
- /* Resolution / Mode */
- tmp = addi_eeprom_readw(iobase, type, addr + 8 + offset + 2);
- res = (tmp >> 10) & 0x3f;
- mode = (tmp >> 4) & 0x3f;
-
- /* MinTiming / Timebase */
- tmp = addi_eeprom_readw(iobase, type, addr + 8 + offset + 4);
- min_timing = (tmp >> 6) & 0x3ff;
- Timebase = tmp & 0x3f;
-
- offset += size;
- }
-#endif
- /* Timer subdevice present */
- devpriv->s_EeParameters.i_Timer = 1;
-}
-
-static void addi_eeprom_read_ao_info(struct comedi_device *dev,
- unsigned long iobase,
- unsigned short addr)
-{
- const struct addi_board *this_board = dev->board_ptr;
- struct addi_private *devpriv = dev->private;
- char *type = this_board->pc_EepromChip;
- unsigned short tmp;
-
- /* No of channels for 1st hard component */
- tmp = addi_eeprom_readw(iobase, type, addr + 10);
- devpriv->s_EeParameters.i_NbrAoChannel = (tmp >> 4) & 0x3ff;
-
- /* Resolution for 1st hard component */
- tmp = addi_eeprom_readw(iobase, type, addr + 16);
- tmp = (tmp >> 8) & 0xff;
- devpriv->s_EeParameters.i_AoMaxdata = 0xfff >> (16 - tmp);
-}
-
-static void addi_eeprom_read_ai_info(struct comedi_device *dev,
- unsigned long iobase,
- unsigned short addr)
-{
- const struct addi_board *this_board = dev->board_ptr;
- struct addi_private *devpriv = dev->private;
- char *type = this_board->pc_EepromChip;
- unsigned short offset;
- unsigned short tmp;
-
- /* No of channels for 1st hard component */
- tmp = addi_eeprom_readw(iobase, type, addr + 10);
- devpriv->s_EeParameters.i_NbrAiChannel = (tmp >> 4) & 0x3ff;
- if (!strcmp(this_board->pc_DriverName, "apci3200"))
- devpriv->s_EeParameters.i_NbrAiChannel *= 4;
-
- tmp = addi_eeprom_readw(iobase, type, addr + 16);
- devpriv->s_EeParameters.ui_MinAcquisitiontimeNs = tmp * 1000;
-
- tmp = addi_eeprom_readw(iobase, type, addr + 30);
- devpriv->s_EeParameters.ui_MinDelaytimeNs = tmp * 1000;
-
- tmp = addi_eeprom_readw(iobase, type, addr + 20);
- /* dma = (tmp >> 13) & 0x01; */
-
- tmp = addi_eeprom_readw(iobase, type, addr + 72) & 0xff;
- if (tmp) { /* > 0 */
- /* offset of first analog input single header */
- offset = 74 + (2 * tmp) + (10 * (1 + (tmp / 16)));
- } else { /* = 0 */
- offset = 74;
- }
-
- /* Resolution */
- tmp = addi_eeprom_readw(iobase, type, addr + offset + 2) & 0x1f;
- devpriv->s_EeParameters.i_AiMaxdata = 0xffff >> (16 - tmp);
-}
-
-static void addi_eeprom_read_info(struct comedi_device *dev,
- unsigned long iobase)
-{
- const struct addi_board *this_board = dev->board_ptr;
- char *type = this_board->pc_EepromChip;
- unsigned short size;
- unsigned char nfuncs;
- int i;
-
- size = addi_eeprom_readw(iobase, type, 8);
- nfuncs = addi_eeprom_readw(iobase, type, 10) & 0xff;
-
- /* Read functionality details */
- for (i = 0; i < nfuncs; i++) {
- unsigned short offset = i * 4;
- unsigned short addr;
- unsigned char func;
-
- func = addi_eeprom_readw(iobase, type, 12 + offset) & 0x3f;
- addr = addi_eeprom_readw(iobase, type, 14 + offset);
-
- switch (func) {
- case EEPROM_DIGITALINPUT:
- addi_eeprom_read_di_info(dev, iobase, addr);
- break;
-
- case EEPROM_DIGITALOUTPUT:
- addi_eeprom_read_do_info(dev, iobase, addr);
- break;
-
- case EEPROM_ANALOGINPUT:
- addi_eeprom_read_ai_info(dev, iobase, addr);
- break;
-
- case EEPROM_ANALOGOUTPUT:
- addi_eeprom_read_ao_info(dev, iobase, addr);
- break;
-
- case EEPROM_TIMER:
- case EEPROM_WATCHDOG:
- case EEPROM_TIMER_WATCHDOG_COUNTER:
- addi_eeprom_read_timer_info(dev, iobase, addr);
- break;
- }
- }
-}
diff --git a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci035.c b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci035.c
deleted file mode 100644
index 53bb51bd77b..00000000000
--- a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci035.c
+++ /dev/null
@@ -1,480 +0,0 @@
-/*
- * Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
- *
- * ADDI-DATA GmbH
- * Dieselstrasse 3
- * D-77833 Ottersweier
- * Tel: +19(0)7223/9493-0
- * Fax: +49(0)7223/9493-92
- * http://www.addi-data.com
- * info@addi-data.com
- *
- * This program is free software; you can redistribute it and/or modify it under
- * the terms of the GNU General Public License as published by the Free Software
- * Foundation; either version 2 of the License, or (at your option) any later
- * version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
- * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
- * details.
- */
-
-/* Card Specific information */
-#define APCI035_ADDRESS_RANGE 255
-
-/* Timer / Watchdog Related Defines */
-#define APCI035_TCW_SYNC_ENABLEDISABLE 0
-#define APCI035_TCW_RELOAD_VALUE 4
-#define APCI035_TCW_TIMEBASE 8
-#define APCI035_TCW_PROG 12
-#define APCI035_TCW_TRIG_STATUS 16
-#define APCI035_TCW_IRQ 20
-#define APCI035_TCW_WARN_TIMEVAL 24
-#define APCI035_TCW_WARN_TIMEBASE 28
-
-#define ADDIDATA_TIMER 0
-/* #define ADDIDATA_WATCHDOG 1 */
-
-#define APCI035_TW1 0
-#define APCI035_TW2 32
-#define APCI035_TW3 64
-#define APCI035_TW4 96
-
-#define APCI035_AI_OFFSET 0
-#define APCI035_TEMP 128
-#define APCI035_ALR_SEQ 4
-#define APCI035_START_STOP_INDEX 8
-#define APCI035_ALR_START_STOP 12
-#define APCI035_ALR_IRQ 16
-#define APCI035_EOS 20
-#define APCI035_CHAN_NO 24
-#define APCI035_CHAN_VAL 28
-#define APCI035_CONV_TIME_TIME_BASE 36
-#define APCI035_RELOAD_CONV_TIME_VAL 32
-#define APCI035_DELAY_TIME_TIME_BASE 44
-#define APCI035_RELOAD_DELAY_TIME_VAL 40
-#define ENABLE_EXT_TRIG 1
-#define ENABLE_EXT_GATE 2
-#define ENABLE_EXT_TRIG_GATE 3
-
-#define ANALOG_INPUT 0
-#define TEMPERATURE 1
-#define RESISTANCE 2
-
-#define ADDIDATA_GREATER_THAN_TEST 0
-#define ADDIDATA_LESS_THAN_TEST 1
-
-#define APCI035_MAXVOLT 2.5
-
-#define ADDIDATA_UNIPOLAR 1
-#define ADDIDATA_BIPOLAR 2
-
-/* ANALOG INPUT RANGE */
-static struct comedi_lrange range_apci035_ai = {
- 8, {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(2),
- BIP_RANGE(1),
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2),
- UNI_RANGE(1)
- }
-};
-
-static int i_WatchdogNbr;
-static int i_Temp;
-static int i_Flag = 1;
-
-/*
- * Configures The Timer , Counter or Watchdog
- *
- * data[0] 0 = Configure As Timer, 1 = Configure As Watchdog
- * data[1] Watchdog number
- * data[2] Time base Unit
- * data[3] Reload Value
- * data[4] External Trigger, 1 = Enable, 0 = Disable
- * data[5] External Trigger Level
- * 00 = Trigger Disabled
- * 01 = Trigger Enabled (Low level)
- * 10 = Trigger Enabled (High Level)
- * 11 = Trigger Enabled (High/Low level)
- * data[6] External Gate, 1 = Enable, 0 = Disable
- * data[7] External Gate level
- * 00 = Gate Disabled
- * 01 = Gate Enabled (Low level)
- * 10 = Gate Enabled (High Level)
- * data[8] Warning Relay, 1 = Enable, 0 = Disable
- * data[9] Warning Delay available
- * data[10] Warning Relay Time unit
- * data[11] Warning Relay Time Reload value
- * data[12] Reset Relay, 1 = Enable, 0 = Disable
- * data[13] Interrupt, 1 = Enable, 0 = Disable
- */
-static int apci035_timer_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- unsigned int ui_Status;
- unsigned int ui_Command;
- unsigned int ui_Mode;
-
- i_Temp = 0;
- devpriv->tsk_Current = current;
- devpriv->b_TimerSelectMode = data[0];
- i_WatchdogNbr = data[1];
- if (data[0] == 0)
- ui_Mode = 2;
- else
- ui_Mode = 0;
-
- ui_Command = 0;
- outl(ui_Command, devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 12);
-
- ui_Command = inl(devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 12);
-
- /* Set the reload value */
- outl(data[3], devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 4);
-
- /* Set the time unit */
- outl(data[2], devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 8);
- if (data[0] == ADDIDATA_TIMER) {
-
- /* Set the mode : */
- /* - Disable the hardware */
- /* - Disable the counter mode */
- /* - Disable the warning */
- /* - Disable the reset */
- /* - Enable the timer mode */
- /* - Set the timer mode */
-
- ui_Command =
- (ui_Command & 0xFFF719E2UL) | ui_Mode << 13UL | 0x10UL;
-
- } else if (data[0] == ADDIDATA_WATCHDOG) {
-
- /* Set the mode : */
- /* - Disable the hardware */
- /* - Disable the counter mode */
- /* - Disable the warning */
- /* - Disable the reset */
- /* - Disable the timer mode */
-
- ui_Command = ui_Command & 0xFFF819E2UL;
-
- } else {
- dev_err(dev->class_dev, "The parameter for Timer/watchdog selection is in error\n");
- return -EINVAL;
- }
-
- outl(ui_Command, devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 12);
-
- ui_Command = inl(devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 12);
-
- /* Disable the hardware trigger */
- ui_Command = ui_Command & 0xFFFFF89FUL;
- if (data[4] == 1) {
- /* Set the hardware trigger level */
- ui_Command = ui_Command | (data[5] << 5);
- }
- outl(ui_Command, devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 12);
-
- ui_Command = inl(devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 12);
-
- /* Disable the hardware gate */
- ui_Command = ui_Command & 0xFFFFF87FUL;
- if (data[6] == 1) {
- /* Set the hardware gate level */
- ui_Command = ui_Command | (data[7] << 7);
- }
- outl(ui_Command, devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 12);
-
- ui_Command = inl(devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 12);
-
- /* Disable the hardware output */
- ui_Command = ui_Command & 0xFFFFF9FBUL;
-
- /* Set the hardware output level */
- ui_Command = ui_Command | (data[8] << 2);
- outl(ui_Command, devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 12);
- if (data[9] == 1) {
- /* Set the reload value */
- outl(data[11],
- devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 24);
-
- /* Set the time unite */
- outl(data[10],
- devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 28);
- }
-
- ui_Command = inl(devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 12);
-
- /* Disable the hardware output */
- ui_Command = ui_Command & 0xFFFFF9F7UL;
-
- /* Set the hardware output level */
- ui_Command = ui_Command | (data[12] << 3);
- outl(ui_Command, devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 12);
-
- /* Enable the watchdog interrupt */
- ui_Command = inl(devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 12);
-
- /* Set the interrupt selection */
- ui_Status = inl(devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 16);
-
- ui_Command = (ui_Command & 0xFFFFF9FDUL) | (data[13] << 1);
- outl(ui_Command, devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 12);
-
- return insn->n;
-}
-
-/*
- * Start / Stop The Selected Timer , or Watchdog
- *
- * data[0]
- * 0 - Stop Selected Timer/Watchdog
- * 1 - Start Selected Timer/Watch*dog
- * 2 - Trigger Selected Timer/Watchdog
- * 3 - Stop All Timer/Watchdog
- * 4 - Start All Timer/Watchdog
- * 5 - Trigger All Timer/Watchdog
- */
-static int apci035_timer_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- unsigned int ui_Command;
- int i_Count;
-
- if (data[0] == 1) {
- ui_Command =
- inl(devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 12);
-
- /* Start the hardware */
- ui_Command = (ui_Command & 0xFFFFF9FFUL) | 0x1UL;
- outl(ui_Command,
- devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 12);
- }
- if (data[0] == 2) {
- ui_Command =
- inl(devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 12);
-
- /* Set the trigger command */
- ui_Command = (ui_Command & 0xFFFFF9FFUL) | 0x200UL;
- outl(ui_Command,
- devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 12);
- }
-
- if (data[0] == 0) {
- /* Stop The Watchdog */
- ui_Command = 0;
- /*
- * ui_Command = inl(devpriv->iobase+((i_WatchdogNbr-1)*32)+12);
- * ui_Command = ui_Command & 0xFFFFF9FEUL;
- */
- outl(ui_Command,
- devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 12);
- }
- if (data[0] == 3) {
- /* stop all Watchdogs */
- ui_Command = 0;
- for (i_Count = 1; i_Count <= 4; i_Count++) {
- if (devpriv->b_TimerSelectMode == ADDIDATA_WATCHDOG)
- ui_Command = 0x2UL;
- else
- ui_Command = 0x10UL;
-
- i_WatchdogNbr = i_Count;
- outl(ui_Command,
- devpriv->iobase + ((i_WatchdogNbr - 1) * 32) +
- 0);
- }
-
- }
- if (data[0] == 4) {
- /* start all Watchdogs */
- ui_Command = 0;
- for (i_Count = 1; i_Count <= 4; i_Count++) {
- if (devpriv->b_TimerSelectMode == ADDIDATA_WATCHDOG)
- ui_Command = 0x1UL;
- else
- ui_Command = 0x8UL;
-
- i_WatchdogNbr = i_Count;
- outl(ui_Command,
- devpriv->iobase + ((i_WatchdogNbr - 1) * 32) +
- 0);
- }
- }
- if (data[0] == 5) {
- /* trigger all Watchdogs */
- ui_Command = 0;
- for (i_Count = 1; i_Count <= 4; i_Count++) {
- if (devpriv->b_TimerSelectMode == ADDIDATA_WATCHDOG)
- ui_Command = 0x4UL;
- else
- ui_Command = 0x20UL;
-
- i_WatchdogNbr = i_Count;
- outl(ui_Command,
- devpriv->iobase + ((i_WatchdogNbr - 1) * 32) +
- 0);
- }
- i_Temp = 1;
- }
- return insn->n;
-}
-
-/*
- * Read The Selected Timer , Counter or Watchdog
- *
- * data[0] software trigger status
- * data[1] hardware trigger status
- * data[2] Software clear status
- * data[3] Overflow status
- * data[4] Timer actual value
- */
-static int apci035_timer_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- unsigned int ui_Status; /* Status register */
-
- i_WatchdogNbr = insn->unused[0];
-
- /* Get the status */
- ui_Status = inl(devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 16);
-
- /* Get the software trigger status */
- data[0] = ((ui_Status >> 1) & 1);
-
- /* Get the hardware trigger status */
- data[1] = ((ui_Status >> 2) & 1);
-
- /* Get the software clear status */
- data[2] = ((ui_Status >> 3) & 1);
-
- /* Get the overflow status */
- data[3] = ((ui_Status >> 0) & 1);
- if (devpriv->b_TimerSelectMode == ADDIDATA_TIMER)
- data[4] = inl(devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 0);
-
- return insn->n;
-}
-
-/*
- * Configures The Analog Input Subdevice
- *
- * data[0] Warning delay value
- */
-static int apci035_ai_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
-
- devpriv->tsk_Current = current;
- outl(0x200 | 0, devpriv->iobase + 128 + 0x4);
- outl(0, devpriv->iobase + 128 + 0);
-
- /* Initialise the warning value */
- outl(0x300 | 0, devpriv->iobase + 128 + 0x4);
- outl((data[0] << 8), devpriv->iobase + 128 + 0);
- outl(0x200000UL, devpriv->iobase + 128 + 12);
-
- return insn->n;
-}
-
-/*
- * Read value of the selected channel
- *
- * data[0] Digital Value Of Input
- */
-static int apci035_ai_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- unsigned int ui_CommandRegister;
-
- /* Set the start */
- ui_CommandRegister = 0x80000;
-
- /* Write the command register */
- outl(ui_CommandRegister, devpriv->iobase + 128 + 8);
-
- /* Read the digital value of the input */
- data[0] = inl(devpriv->iobase + 128 + 28);
- return insn->n;
-}
-
-static int apci035_reset(struct comedi_device *dev)
-{
- struct addi_private *devpriv = dev->private;
- int i_Count;
-
- for (i_Count = 1; i_Count <= 4; i_Count++) {
- i_WatchdogNbr = i_Count;
-
- /* stop all timers */
- outl(0x0, devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 0);
- }
- outl(0x0, devpriv->iobase + 128 + 12); /* Disable the warning delay */
-
- return 0;
-}
-
-static void apci035_interrupt(int irq, void *d)
-{
- struct comedi_device *dev = d;
- struct addi_private *devpriv = dev->private;
- unsigned int ui_StatusRegister1;
- unsigned int ui_StatusRegister2;
- unsigned int ui_ReadCommand;
- unsigned int ui_ChannelNumber;
- unsigned int ui_DigitalTemperature;
-
- if (i_Temp == 1) {
- i_WatchdogNbr = i_Flag;
- i_Flag = i_Flag + 1;
- }
-
- /* Read the interrupt status register of temperature Warning */
- ui_StatusRegister1 = inl(devpriv->iobase + 128 + 16);
-
- /* Read the interrupt status register for Watchdog/timer */
- ui_StatusRegister2 =
- inl(devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 20);
-
- /* Test if warning relay interrupt */
- if ((((ui_StatusRegister1) & 0x8) == 0x8)) {
-
- /* Disable the temperature warning */
- ui_ReadCommand = inl(devpriv->iobase + 128 + 12);
- ui_ReadCommand = ui_ReadCommand & 0xFFDF0000UL;
- outl(ui_ReadCommand, devpriv->iobase + 128 + 12);
-
- /* Read the channel number */
- ui_ChannelNumber = inl(devpriv->iobase + 128 + 60);
-
- /* Read the digital temperature value */
- ui_DigitalTemperature = inl(devpriv->iobase + 128 + 60);
-
- /* send signal to the sample */
- send_sig(SIGIO, devpriv->tsk_Current, 0);
-
- } else if ((ui_StatusRegister2 & 0x1) == 0x1) {
- /* send signal to the sample */
- send_sig(SIGIO, devpriv->tsk_Current, 0);
- }
-}
diff --git a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1500.c b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1500.c
index 0ea081e1e11..bfa9228c833 100644
--- a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1500.c
+++ b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1500.c
@@ -158,7 +158,7 @@ static int apci1500_di_config(struct comedi_device *dev,
struct comedi_insn *insn,
unsigned int *data)
{
- struct addi_private *devpriv = dev->private;
+ struct apci1500_private *devpriv = dev->private;
int i_PatternPolarity = 0, i_PatternTransition = 0, i_PatternMask = 0;
int i_MaxChannel = 0, i_Count = 0, i_EventMask = 0;
int i_PatternTransitionCount = 0, i_RegValue;
@@ -466,7 +466,7 @@ static int apci1500_di_write(struct comedi_device *dev,
struct comedi_insn *insn,
unsigned int *data)
{
- struct addi_private *devpriv = dev->private;
+ struct apci1500_private *devpriv = dev->private;
int i_Event1InterruptStatus = 0, i_Event2InterruptStatus =
0, i_RegValue;
@@ -653,7 +653,7 @@ static int apci1500_di_read(struct comedi_device *dev,
struct comedi_insn *insn,
unsigned int *data)
{
- struct addi_private *devpriv = dev->private;
+ struct apci1500_private *devpriv = dev->private;
int i_DummyRead = 0;
/* Software reset */
@@ -789,7 +789,7 @@ static int apci1500_di_insn_bits(struct comedi_device *dev,
struct comedi_insn *insn,
unsigned int *data)
{
- struct addi_private *devpriv = dev->private;
+ struct apci1500_private *devpriv = dev->private;
data[1] = inw(devpriv->i_IobaseAddon + APCI1500_DIGITAL_IP);
@@ -807,7 +807,7 @@ static int apci1500_do_config(struct comedi_device *dev,
struct comedi_insn *insn,
unsigned int *data)
{
- struct addi_private *devpriv = dev->private;
+ struct apci1500_private *devpriv = dev->private;
devpriv->b_OutputMemoryStatus = data[0];
return insn->n;
@@ -821,7 +821,7 @@ static int apci1500_do_write(struct comedi_device *dev,
struct comedi_insn *insn,
unsigned int *data)
{
- struct addi_private *devpriv = dev->private;
+ struct apci1500_private *devpriv = dev->private;
static unsigned int ui_Temp;
unsigned int ui_Temp1;
unsigned int ui_NoOfChannel = CR_CHAN(insn->chanspec); /* get the channel */
@@ -981,7 +981,7 @@ static int apci1500_timer_config(struct comedi_device *dev,
struct comedi_insn *insn,
unsigned int *data)
{
- struct addi_private *devpriv = dev->private;
+ struct apci1500_private *devpriv = dev->private;
int i_TimerCounterMode, i_MasterConfiguration;
devpriv->tsk_Current = current;
@@ -1471,7 +1471,7 @@ static int apci1500_timer_write(struct comedi_device *dev,
struct comedi_insn *insn,
unsigned int *data)
{
- struct addi_private *devpriv = dev->private;
+ struct apci1500_private *devpriv = dev->private;
int i_CommandAndStatusValue;
switch (data[0]) {
@@ -1731,7 +1731,7 @@ static int apci1500_timer_bits(struct comedi_device *dev,
struct comedi_insn *insn,
unsigned int *data)
{
- struct addi_private *devpriv = dev->private;
+ struct apci1500_private *devpriv = dev->private;
int i_CommandAndStatusValue;
switch (data[0]) {
@@ -1895,7 +1895,7 @@ static int apci1500_do_bits(struct comedi_device *dev,
struct comedi_insn *insn,
unsigned int *data)
{
- struct addi_private *devpriv = dev->private;
+ struct apci1500_private *devpriv = dev->private;
unsigned int ui_Status;
int i_RegValue;
int i_Constant;
@@ -2011,11 +2011,11 @@ static int apci1500_do_bits(struct comedi_device *dev,
return insn->n;
}
-static void apci1500_interrupt(int irq, void *d)
+static irqreturn_t apci1500_interrupt(int irq, void *d)
{
struct comedi_device *dev = d;
- struct addi_private *devpriv = dev->private;
+ struct apci1500_private *devpriv = dev->private;
unsigned int ui_InterruptStatus = 0;
int i_RegValue = 0;
@@ -2180,11 +2180,13 @@ static void apci1500_interrupt(int irq, void *d)
"Interrupt from unknown source\n");
}
+
+ return IRQ_HANDLED;
}
static int apci1500_reset(struct comedi_device *dev)
{
- struct addi_private *devpriv = dev->private;
+ struct apci1500_private *devpriv = dev->private;
int i_DummyRead = 0;
i_TimerCounter1Init = 0;
diff --git a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1564.c b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1564.c
index 98de96953a2..fa99c8ca4f9 100644
--- a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1564.c
+++ b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1564.c
@@ -16,244 +16,189 @@
#define ADDIDATA_TIMER 0
#define ADDIDATA_COUNTER 1
#define ADDIDATA_WATCHDOG 2
-#define APCI1564_COUNTER1 0
-#define APCI1564_COUNTER2 1
-#define APCI1564_COUNTER3 2
-#define APCI1564_COUNTER4 3
-
-/*
- * devpriv->amcc_iobase Register Map
- */
-#define APCI1564_DI_REG 0x04
-#define APCI1564_DI_INT_MODE1_REG 0x08
-#define APCI1564_DI_INT_MODE2_REG 0x0c
-#define APCI1564_DI_INT_STATUS_REG 0x10
-#define APCI1564_DI_IRQ_REG 0x14
-#define APCI1564_DO_REG 0x18
-#define APCI1564_DO_INT_CTRL_REG 0x1c
-#define APCI1564_DO_INT_STATUS_REG 0x20
-#define APCI1564_DO_IRQ_REG 0x24
-#define APCI1564_WDOG_REG 0x28
-#define APCI1564_WDOG_RELOAD_REG 0x2c
-#define APCI1564_WDOG_TIMEBASE_REG 0x30
-#define APCI1564_WDOG_CTRL_REG 0x34
-#define APCI1564_WDOG_STATUS_REG 0x38
-#define APCI1564_WDOG_IRQ_REG 0x3c
-#define APCI1564_WDOG_WARN_TIMEVAL_REG 0x40
-#define APCI1564_WDOG_WARN_TIMEBASE_REG 0x44
-#define APCI1564_TIMER_REG 0x48
-#define APCI1564_TIMER_RELOAD_REG 0x4c
-#define APCI1564_TIMER_TIMEBASE_REG 0x50
-#define APCI1564_TIMER_CTRL_REG 0x54
-#define APCI1564_TIMER_STATUS_REG 0x58
-#define APCI1564_TIMER_IRQ_REG 0x5c
-#define APCI1564_TIMER_WARN_TIMEVAL_REG 0x60
-#define APCI1564_TIMER_WARN_TIMEBASE_REG 0x64
-
-/*
- * dev->iobase Register Map
- */
-#define APCI1564_COUNTER_REG(x) (0x00 + ((x) * 0x20))
-#define APCI1564_COUNTER_RELOAD_REG(x) (0x04 + ((x) * 0x20))
-#define APCI1564_COUNTER_TIMEBASE_REG(x) (0x08 + ((x) * 0x20))
-#define APCI1564_COUNTER_CTRL_REG(x) (0x0c + ((x) * 0x20))
-#define APCI1564_COUNTER_STATUS_REG(x) (0x10 + ((x) * 0x20))
-#define APCI1564_COUNTER_IRQ_REG(x) (0x14 + ((x) * 0x20))
-#define APCI1564_COUNTER_WARN_TIMEVAL_REG(x) (0x18 + ((x) * 0x20))
-#define APCI1564_COUNTER_WARN_TIMEBASE_REG(x) (0x1c + ((x) * 0x20))
-
-/*
- * Configures The Timer or Counter
- *
- * data[0] Configure as: 0 = Timer, 1 = Counter
- * data[1] 1 = Enable Interrupt, 0 = Disable Interrupt
- * data[2] Time Unit
- * data[3] Reload Value
- * data[4] Timer Mode
- * data[5] Timer Counter Watchdog Number
- * data[6] Counter Direction
- */
-static int apci1564_timer_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+
+static int apci1564_timer_insn_config(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct apci1564_private *devpriv = dev->private;
- unsigned int ul_Command1 = 0;
+ unsigned int ctrl;
devpriv->tsk_current = current;
- if (data[0] == ADDIDATA_TIMER) {
- /* First Stop The Timer */
- ul_Command1 = inl(devpriv->amcc_iobase + APCI1564_TIMER_CTRL_REG);
- ul_Command1 = ul_Command1 & 0xFFFFF9FEUL;
- /* Stop The Timer */
- outl(ul_Command1, devpriv->amcc_iobase + APCI1564_TIMER_CTRL_REG);
-
- devpriv->timer_select_mode = ADDIDATA_TIMER;
- if (data[1] == 1) {
- /* Enable TIMER int & DISABLE ALL THE OTHER int SOURCES */
- outl(0x02, devpriv->amcc_iobase + APCI1564_TIMER_CTRL_REG);
- outl(0x0, devpriv->amcc_iobase + APCI1564_DI_IRQ_REG);
- outl(0x0, devpriv->amcc_iobase + APCI1564_DO_IRQ_REG);
- outl(0x0, devpriv->amcc_iobase + APCI1564_WDOG_IRQ_REG);
- outl(0x0, dev->iobase +
- APCI1564_COUNTER_IRQ_REG(APCI1564_COUNTER1));
- outl(0x0, dev->iobase +
- APCI1564_COUNTER_IRQ_REG(APCI1564_COUNTER2));
- outl(0x0, dev->iobase +
- APCI1564_COUNTER_IRQ_REG(APCI1564_COUNTER3));
- outl(0x0, dev->iobase +
- APCI1564_COUNTER_IRQ_REG(APCI1564_COUNTER4));
- } else {
- /* disable Timer interrupt */
- outl(0x0, devpriv->amcc_iobase + APCI1564_TIMER_CTRL_REG);
- }
- /* Loading Timebase */
- outl(data[2], devpriv->amcc_iobase + APCI1564_TIMER_TIMEBASE_REG);
-
- /* Loading the Reload value */
- outl(data[3], devpriv->amcc_iobase + APCI1564_TIMER_RELOAD_REG);
-
- ul_Command1 = inl(devpriv->amcc_iobase + APCI1564_TIMER_CTRL_REG);
- ul_Command1 = (ul_Command1 & 0xFFF719E2UL) | 2UL << 13UL | 0x10UL;
- /* mode 2 */
- outl(ul_Command1, devpriv->amcc_iobase + APCI1564_TIMER_CTRL_REG);
- } else if (data[0] == ADDIDATA_COUNTER) {
- devpriv->timer_select_mode = ADDIDATA_COUNTER;
- devpriv->mode_select_register = data[5];
-
- /* First Stop The Counter */
- ul_Command1 = inl(dev->iobase +
- APCI1564_COUNTER_CTRL_REG(data[5] - 1));
- ul_Command1 = ul_Command1 & 0xFFFFF9FEUL;
- /* Stop The Timer */
- outl(ul_Command1, dev->iobase +
- APCI1564_COUNTER_CTRL_REG(data[5] - 1));
-
- /* Set the reload value */
- outl(data[3], dev->iobase +
- APCI1564_COUNTER_RELOAD_REG(data[5] - 1));
-
- /* Set the mode : */
- /* - Disable the hardware */
- /* - Disable the counter mode */
- /* - Disable the warning */
- /* - Disable the reset */
- /* - Disable the timer mode */
- /* - Enable the counter mode */
-
- ul_Command1 =
- (ul_Command1 & 0xFFFC19E2UL) | 0x80000UL |
- (unsigned int) ((unsigned int) data[4] << 16UL);
- outl(ul_Command1, dev->iobase +
- APCI1564_COUNTER_CTRL_REG(data[5] - 1));
-
- /* Enable or Disable Interrupt */
- ul_Command1 = (ul_Command1 & 0xFFFFF9FD) | (data[1] << 1);
- outl(ul_Command1, dev->iobase +
- APCI1564_COUNTER_CTRL_REG(data[5] - 1));
-
- /* Set the Up/Down selection */
- ul_Command1 = (ul_Command1 & 0xFFFBF9FFUL) | (data[6] << 18);
- outl(ul_Command1, dev->iobase +
- APCI1564_COUNTER_CTRL_REG(data[5] - 1));
+ /* First Stop The Timer */
+ ctrl = inl(devpriv->timer + ADDI_TCW_CTRL_REG);
+ ctrl &= 0xfffff9fe;
+ /* Stop The Timer */
+ outl(ctrl, devpriv->timer + ADDI_TCW_CTRL_REG);
+
+ if (data[1] == 1) {
+ /* Enable timer int & disable all the other int sources */
+ outl(0x02, devpriv->timer + ADDI_TCW_CTRL_REG);
+ outl(0x0, dev->iobase + APCI1564_DI_IRQ_REG);
+ outl(0x0, dev->iobase + APCI1564_DO_IRQ_REG);
+ outl(0x0, dev->iobase + APCI1564_WDOG_IRQ_REG);
+ if (devpriv->counters) {
+ unsigned long iobase;
+
+ iobase = devpriv->counters + ADDI_TCW_IRQ_REG;
+ outl(0x0, iobase + APCI1564_COUNTER(0));
+ outl(0x0, iobase + APCI1564_COUNTER(1));
+ outl(0x0, iobase + APCI1564_COUNTER(2));
+ }
} else {
- dev_err(dev->class_dev, "Invalid subdevice.\n");
+ /* disable Timer interrupt */
+ outl(0x0, devpriv->timer + ADDI_TCW_CTRL_REG);
}
+
+ /* Loading Timebase */
+ outl(data[2], devpriv->timer + ADDI_TCW_TIMEBASE_REG);
+
+ /* Loading the Reload value */
+ outl(data[3], devpriv->timer + ADDI_TCW_RELOAD_REG);
+
+ ctrl = inl(devpriv->timer + ADDI_TCW_CTRL_REG);
+ ctrl &= 0xfff719e2;
+ ctrl |= (2 << 13) | 0x10;
+ /* mode 2 */
+ outl(ctrl, devpriv->timer + ADDI_TCW_CTRL_REG);
+
return insn->n;
}
-/*
- * Start / Stop The Selected Timer or Counter
- *
- * data[0] Configure as: 0 = Timer, 1 = Counter
- * data[1] 0 = Stop, 1 = Start, 2 = Trigger Clear (Only Counter)
- */
-static int apci1564_timer_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+static int apci1564_timer_insn_write(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct apci1564_private *devpriv = dev->private;
- unsigned int ul_Command1 = 0;
+ unsigned int ctrl;
+
+ ctrl = inl(devpriv->timer + ADDI_TCW_CTRL_REG);
+ switch (data[1]) {
+ case 0: /* Stop The Timer */
+ ctrl &= 0xfffff9fe;
+ break;
+ case 1: /* Enable the Timer */
+ ctrl &= 0xfffff9ff;
+ ctrl |= 0x1;
+ break;
+ }
+ outl(ctrl, devpriv->timer + ADDI_TCW_CTRL_REG);
- if (devpriv->timer_select_mode == ADDIDATA_TIMER) {
- if (data[1] == 1) {
- ul_Command1 = inl(devpriv->amcc_iobase + APCI1564_TIMER_CTRL_REG);
- ul_Command1 = (ul_Command1 & 0xFFFFF9FFUL) | 0x1UL;
+ return insn->n;
+}
- /* Enable the Timer */
- outl(ul_Command1, devpriv->amcc_iobase + APCI1564_TIMER_CTRL_REG);
- } else if (data[1] == 0) {
- /* Stop The Timer */
+static int apci1564_timer_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ struct apci1564_private *devpriv = dev->private;
+
+ /* Stores the status of the Timer */
+ data[0] = inl(devpriv->timer + ADDI_TCW_STATUS_REG) & 0x1;
+
+ /* Stores the Actual value of the Timer */
+ data[1] = inl(devpriv->timer + ADDI_TCW_VAL_REG);
- ul_Command1 = inl(devpriv->amcc_iobase + APCI1564_TIMER_CTRL_REG);
- ul_Command1 = ul_Command1 & 0xFFFFF9FEUL;
- outl(ul_Command1, devpriv->amcc_iobase + APCI1564_TIMER_CTRL_REG);
- }
- } else if (devpriv->timer_select_mode == ADDIDATA_COUNTER) {
- ul_Command1 =
- inl(dev->iobase +
- APCI1564_COUNTER_CTRL_REG(devpriv->mode_select_register - 1));
- if (data[1] == 1) {
- /* Start the Counter subdevice */
- ul_Command1 = (ul_Command1 & 0xFFFFF9FFUL) | 0x1UL;
- } else if (data[1] == 0) {
- /* Stops the Counter subdevice */
- ul_Command1 = 0;
-
- } else if (data[1] == 2) {
- /* Clears the Counter subdevice */
- ul_Command1 = (ul_Command1 & 0xFFFFF9FFUL) | 0x400;
- }
- outl(ul_Command1, dev->iobase +
- APCI1564_COUNTER_CTRL_REG(devpriv->mode_select_register - 1));
- } else {
- dev_err(dev->class_dev, "Invalid subdevice.\n");
- }
return insn->n;
}
-/*
- * Read The Selected Timer or Counter
- */
-static int apci1564_timer_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+static int apci1564_counter_insn_config(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct apci1564_private *devpriv = dev->private;
- unsigned int ul_Command1 = 0;
-
- if (devpriv->timer_select_mode == ADDIDATA_TIMER) {
- /* Stores the status of the Timer */
- data[0] = inl(devpriv->amcc_iobase + APCI1564_TIMER_STATUS_REG) & 0x1;
-
- /* Stores the Actual value of the Timer */
- data[1] = inl(devpriv->amcc_iobase + APCI1564_TIMER_REG);
- } else if (devpriv->timer_select_mode == ADDIDATA_COUNTER) {
- /* Read the Counter Actual Value. */
- data[0] =
- inl(dev->iobase +
- APCI1564_COUNTER_REG(devpriv->mode_select_register - 1));
- ul_Command1 =
- inl(dev->iobase +
- APCI1564_COUNTER_STATUS_REG(devpriv->mode_select_register - 1));
-
- /* Get the software trigger status */
- data[1] = (unsigned char) ((ul_Command1 >> 1) & 1);
-
- /* Get the hardware trigger status */
- data[2] = (unsigned char) ((ul_Command1 >> 2) & 1);
-
- /* Get the software clear status */
- data[3] = (unsigned char) ((ul_Command1 >> 3) & 1);
-
- /* Get the overflow status */
- data[4] = (unsigned char) ((ul_Command1 >> 0) & 1);
- } else {
- dev_err(dev->class_dev, "Invalid subdevice.\n");
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned long iobase = devpriv->counters + APCI1564_COUNTER(chan);
+ unsigned int ctrl;
+
+ devpriv->tsk_current = current;
+
+ /* First Stop The Counter */
+ ctrl = inl(iobase + ADDI_TCW_CTRL_REG);
+ ctrl &= 0xfffff9fe;
+ /* Stop The Timer */
+ outl(ctrl, iobase + ADDI_TCW_CTRL_REG);
+
+ /* Set the reload value */
+ outl(data[3], iobase + ADDI_TCW_RELOAD_REG);
+
+ /* Set the mode : */
+ /* - Disable the hardware */
+ /* - Disable the counter mode */
+ /* - Disable the warning */
+ /* - Disable the reset */
+ /* - Disable the timer mode */
+ /* - Enable the counter mode */
+
+ ctrl &= 0xfffc19e2;
+ ctrl |= 0x80000 | (data[4] << 16);
+ outl(ctrl, iobase + ADDI_TCW_CTRL_REG);
+
+ /* Enable or Disable Interrupt */
+ ctrl &= 0xfffff9fd;
+ ctrl |= (data[1] << 1);
+ outl(ctrl, iobase + ADDI_TCW_CTRL_REG);
+
+ /* Set the Up/Down selection */
+ ctrl &= 0xfffbf9ff;
+ ctrl |= (data[6] << 18);
+ outl(ctrl, iobase + ADDI_TCW_CTRL_REG);
+
+ return insn->n;
+}
+
+static int apci1564_counter_insn_write(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ struct apci1564_private *devpriv = dev->private;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned long iobase = devpriv->counters + APCI1564_COUNTER(chan);
+ unsigned int ctrl;
+
+ ctrl = inl(iobase + ADDI_TCW_CTRL_REG);
+ switch (data[1]) {
+ case 0: /* Stops the Counter subdevice */
+ ctrl = 0;
+ break;
+ case 1: /* Start the Counter subdevice */
+ ctrl &= 0xfffff9ff;
+ ctrl |= 0x1;
+ break;
+ case 2: /* Clears the Counter subdevice */
+ ctrl &= 0xfffff9ff;
+ ctrl |= 0x400;
+ break;
}
+ outl(ctrl, iobase + ADDI_TCW_CTRL_REG);
+
+ return insn->n;
+}
+
+static int apci1564_counter_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ struct apci1564_private *devpriv = dev->private;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned long iobase = devpriv->counters + APCI1564_COUNTER(chan);
+ unsigned int status;
+
+ /* Read the Counter Actual Value. */
+ data[0] = inl(iobase + ADDI_TCW_VAL_REG);
+
+ status = inl(iobase + ADDI_TCW_STATUS_REG);
+ data[1] = (status >> 1) & 1; /* software trigger status */
+ data[2] = (status >> 2) & 1; /* hardware trigger status */
+ data[3] = (status >> 3) & 1; /* software clear status */
+ data[4] = (status >> 0) & 1; /* overflow status */
+
return insn->n;
}
diff --git a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3120.c b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3120.c
deleted file mode 100644
index 2950815b65f..00000000000
--- a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3120.c
+++ /dev/null
@@ -1,2050 +0,0 @@
-/**
-@verbatim
-
-Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
-
- ADDI-DATA GmbH
- Dieselstrasse 3
- D-77833 Ottersweier
- Tel: +19(0)7223/9493-0
- Fax: +49(0)7223/9493-92
- http://www.addi-data.com
- info@addi-data.com
-
-This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version.
-
-This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
-
-@endverbatim
-*/
-/*
- +-----------------------------------------------------------------------+
- | (C) ADDI-DATA GmbH Dieselstrasse 3 D-77833 Ottersweier |
- +-----------------------------------------------------------------------+
- | Tel : +49 (0) 7223/9493-0 | email : info@addi-data.com |
- | Fax : +49 (0) 7223/9493-92 | Internet : http://www.addi-data.com |
- +-----------------------------------------------------------------------+
- | Project : APCI-3120 | Compiler : GCC |
- | Module name : hwdrv_apci3120.c| Version : 2.96 |
- +-------------------------------+---------------------------------------+
- | Project manager: Eric Stolz | Date : 02/12/2002 |
- +-----------------------------------------------------------------------+
- | Description :APCI3120 Module. Hardware abstraction Layer for APCI3120|
- +-----------------------------------------------------------------------+
- | UPDATE'S |
- +-----------------------------------------------------------------------+
- | Date | Author | Description of updates |
- +----------+-----------+------------------------------------------------+
- | | | |
- | | | |
- +----------+-----------+------------------------------------------------+
-*/
-
-#include <linux/delay.h>
-
-/*
- * ADDON RELATED ADDITIONS
- */
-/* Constant */
-#define APCI3120_ENABLE_TRANSFER_ADD_ON_LOW 0x00
-#define APCI3120_ENABLE_TRANSFER_ADD_ON_HIGH 0x1200
-#define APCI3120_A2P_FIFO_MANAGEMENT 0x04000400L
-#define APCI3120_AMWEN_ENABLE 0x02
-#define APCI3120_A2P_FIFO_WRITE_ENABLE 0x01
-#define APCI3120_FIFO_ADVANCE_ON_BYTE_2 0x20000000L
-#define APCI3120_ENABLE_WRITE_TC_INT 0x00004000L
-#define APCI3120_CLEAR_WRITE_TC_INT 0x00040000L
-#define APCI3120_DISABLE_AMWEN_AND_A2P_FIFO_WRITE 0x0
-#define APCI3120_DISABLE_BUS_MASTER_ADD_ON 0x0
-#define APCI3120_DISABLE_BUS_MASTER_PCI 0x0
-
-/* ADD_ON ::: this needed since apci supports 16 bit interface to add on */
-#define APCI3120_ADD_ON_AGCSTS_LOW 0x3C
-#define APCI3120_ADD_ON_AGCSTS_HIGH (APCI3120_ADD_ON_AGCSTS_LOW + 2)
-#define APCI3120_ADD_ON_MWAR_LOW 0x24
-#define APCI3120_ADD_ON_MWAR_HIGH (APCI3120_ADD_ON_MWAR_LOW + 2)
-#define APCI3120_ADD_ON_MWTC_LOW 0x058
-#define APCI3120_ADD_ON_MWTC_HIGH (APCI3120_ADD_ON_MWTC_LOW + 2)
-
-/* AMCC */
-#define APCI3120_AMCC_OP_MCSR 0x3C
-#define APCI3120_AMCC_OP_REG_INTCSR 0x38
-
-/* for transfer count enable bit */
-#define AGCSTS_TC_ENABLE 0x10000000
-
-/* used for test on mixture of BIP/UNI ranges */
-#define APCI3120_BIPOLAR_RANGES 4
-
-#define APCI3120_ADDRESS_RANGE 16
-
-#define APCI3120_DISABLE 0
-#define APCI3120_ENABLE 1
-
-#define APCI3120_START 1
-#define APCI3120_STOP 0
-
-#define APCI3120_EOC_MODE 1
-#define APCI3120_EOS_MODE 2
-#define APCI3120_DMA_MODE 3
-
-/* DIGITAL INPUT-OUTPUT DEFINE */
-
-#define APCI3120_DIGITAL_OUTPUT 0x0d
-#define APCI3120_RD_STATUS 0x02
-#define APCI3120_RD_FIFO 0x00
-
-/* digital output insn_write ON /OFF selection */
-#define APCI3120_SET4DIGITALOUTPUTON 1
-#define APCI3120_SET4DIGITALOUTPUTOFF 0
-
-/* analog output SELECT BIT */
-#define APCI3120_ANALOG_OP_CHANNEL_1 0x0000
-#define APCI3120_ANALOG_OP_CHANNEL_2 0x4000
-#define APCI3120_ANALOG_OP_CHANNEL_3 0x8000
-#define APCI3120_ANALOG_OP_CHANNEL_4 0xc000
-#define APCI3120_ANALOG_OP_CHANNEL_5 0x0000
-#define APCI3120_ANALOG_OP_CHANNEL_6 0x4000
-#define APCI3120_ANALOG_OP_CHANNEL_7 0x8000
-#define APCI3120_ANALOG_OP_CHANNEL_8 0xc000
-
-/* Enable external trigger bit in nWrAddress */
-#define APCI3120_ENABLE_EXT_TRIGGER 0x8000
-
-/* ANALOG OUTPUT AND INPUT DEFINE */
-#define APCI3120_UNIPOLAR 0x80
-#define APCI3120_BIPOLAR 0x00
-#define APCI3120_ANALOG_OUTPUT_1 0x08
-#define APCI3120_ANALOG_OUTPUT_2 0x0a
-#define APCI3120_1_GAIN 0x00
-#define APCI3120_2_GAIN 0x10
-#define APCI3120_5_GAIN 0x20
-#define APCI3120_10_GAIN 0x30
-#define APCI3120_SEQ_RAM_ADDRESS 0x06
-#define APCI3120_RESET_FIFO 0x0c
-#define APCI3120_TIMER_0_MODE_2 0x01
-#define APCI3120_TIMER_0_MODE_4 0x2
-#define APCI3120_SELECT_TIMER_0_WORD 0x00
-#define APCI3120_ENABLE_TIMER0 0x1000
-#define APCI3120_CLEAR_PR 0xf0ff
-#define APCI3120_CLEAR_PA 0xfff0
-#define APCI3120_CLEAR_PA_PR (APCI3120_CLEAR_PR & APCI3120_CLEAR_PA)
-
-/* nWrMode_Select */
-#define APCI3120_ENABLE_SCAN 0x8
-#define APCI3120_DISABLE_SCAN (~APCI3120_ENABLE_SCAN)
-#define APCI3120_ENABLE_EOS_INT 0x2
-
-#define APCI3120_DISABLE_EOS_INT (~APCI3120_ENABLE_EOS_INT)
-#define APCI3120_ENABLE_EOC_INT 0x1
-#define APCI3120_DISABLE_EOC_INT (~APCI3120_ENABLE_EOC_INT)
-#define APCI3120_DISABLE_ALL_INTERRUPT_WITHOUT_TIMER \
- (APCI3120_DISABLE_EOS_INT & APCI3120_DISABLE_EOC_INT)
-#define APCI3120_DISABLE_ALL_INTERRUPT \
- (APCI3120_DISABLE_TIMER_INT & APCI3120_DISABLE_EOS_INT & APCI3120_DISABLE_EOC_INT)
-
-/* status register bits */
-#define APCI3120_EOC 0x8000
-#define APCI3120_EOS 0x2000
-
-/* software trigger dummy register */
-#define APCI3120_START_CONVERSION 0x02
-
-/* TIMER DEFINE */
-#define APCI3120_QUARTZ_A 70
-#define APCI3120_QUARTZ_B 50
-#define APCI3120_TIMER 1
-#define APCI3120_WATCHDOG 2
-#define APCI3120_TIMER_DISABLE 0
-#define APCI3120_TIMER_ENABLE 1
-#define APCI3120_ENABLE_TIMER2 0x4000
-#define APCI3120_DISABLE_TIMER2 (~APCI3120_ENABLE_TIMER2)
-#define APCI3120_ENABLE_TIMER_INT 0x04
-#define APCI3120_DISABLE_TIMER_INT (~APCI3120_ENABLE_TIMER_INT)
-#define APCI3120_WRITE_MODE_SELECT 0x0e
-#define APCI3120_SELECT_TIMER_0_WORD 0x00
-#define APCI3120_SELECT_TIMER_1_WORD 0x01
-#define APCI3120_TIMER_1_MODE_2 0x4
-
-/* $$ BIT FOR MODE IN nCsTimerCtr1 */
-#define APCI3120_TIMER_2_MODE_0 0x0
-#define APCI3120_TIMER_2_MODE_2 0x10
-#define APCI3120_TIMER_2_MODE_5 0x30
-
-/* $$ BIT FOR MODE IN nCsTimerCtr0 */
-#define APCI3120_SELECT_TIMER_2_LOW_WORD 0x02
-#define APCI3120_SELECT_TIMER_2_HIGH_WORD 0x03
-
-#define APCI3120_TIMER_CRT0 0x0d
-#define APCI3120_TIMER_CRT1 0x0c
-
-#define APCI3120_TIMER_VALUE 0x04
-#define APCI3120_TIMER_STATUS_REGISTER 0x0d
-#define APCI3120_RD_STATUS 0x02
-#define APCI3120_WR_ADDRESS 0x00
-#define APCI3120_ENABLE_WATCHDOG 0x20
-#define APCI3120_DISABLE_WATCHDOG (~APCI3120_ENABLE_WATCHDOG)
-#define APCI3120_ENABLE_TIMER_COUNTER 0x10
-#define APCI3120_DISABLE_TIMER_COUNTER (~APCI3120_ENABLE_TIMER_COUNTER)
-#define APCI3120_FC_TIMER 0x1000
-#define APCI3120_ENABLE_TIMER0 0x1000
-#define APCI3120_ENABLE_TIMER1 0x2000
-#define APCI3120_ENABLE_TIMER2 0x4000
-#define APCI3120_DISABLE_TIMER0 (~APCI3120_ENABLE_TIMER0)
-#define APCI3120_DISABLE_TIMER1 (~APCI3120_ENABLE_TIMER1)
-#define APCI3120_DISABLE_TIMER2 (~APCI3120_ENABLE_TIMER2)
-
-#define APCI3120_TIMER2_SELECT_EOS 0xc0
-#define APCI3120_COUNTER 3
-#define APCI3120_DISABLE_ALL_TIMER (APCI3120_DISABLE_TIMER0 & \
- APCI3120_DISABLE_TIMER1 & \
- APCI3120_DISABLE_TIMER2)
-
-#define MAX_ANALOGINPUT_CHANNELS 32
-
-struct str_AnalogReadInformation {
- /* EOC or EOS */
- unsigned char b_Type;
- /* Interrupt use or not */
- unsigned char b_InterruptFlag;
- /* Selection of the conversion time */
- unsigned int ui_ConvertTiming;
- /* Number of channel to read */
- unsigned char b_NbrOfChannel;
- /* Number of the channel to be read */
- unsigned int ui_ChannelList[MAX_ANALOGINPUT_CHANNELS];
- /* Gain of each channel */
- unsigned int ui_RangeList[MAX_ANALOGINPUT_CHANNELS];
-};
-
-/* ANALOG INPUT RANGE */
-static const struct comedi_lrange range_apci3120_ai = {
- 8, {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(2),
- BIP_RANGE(1),
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2),
- UNI_RANGE(1)
- }
-};
-
-/* ANALOG OUTPUT RANGE */
-static const struct comedi_lrange range_apci3120_ao = {
- 2, {
- BIP_RANGE(10),
- UNI_RANGE(10)
- }
-};
-
-
-/* FUNCTION DEFINITIONS */
-static int apci3120_ai_insn_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- const struct addi_board *this_board = dev->board_ptr;
- struct addi_private *devpriv = dev->private;
- unsigned int i;
-
- if ((data[0] != APCI3120_EOC_MODE) && (data[0] != APCI3120_EOS_MODE))
- return -1;
-
- /* Check for Conversion time to be added */
- devpriv->ui_EocEosConversionTime = data[2];
-
- if (data[0] == APCI3120_EOS_MODE) {
-
- /* Test the number of the channel */
- for (i = 0; i < data[3]; i++) {
-
- if (CR_CHAN(data[4 + i]) >=
- this_board->i_NbrAiChannel) {
- dev_err(dev->class_dev, "bad channel list\n");
- return -2;
- }
- }
-
- devpriv->b_InterruptMode = APCI3120_EOS_MODE;
-
- if (data[1])
- devpriv->b_EocEosInterrupt = APCI3120_ENABLE;
- else
- devpriv->b_EocEosInterrupt = APCI3120_DISABLE;
- /* Copy channel list and Range List to devpriv */
- devpriv->ui_AiNbrofChannels = data[3];
- for (i = 0; i < devpriv->ui_AiNbrofChannels; i++)
- devpriv->ui_AiChannelList[i] = data[4 + i];
-
- } else { /* EOC */
- devpriv->b_InterruptMode = APCI3120_EOC_MODE;
- if (data[1])
- devpriv->b_EocEosInterrupt = APCI3120_ENABLE;
- else
- devpriv->b_EocEosInterrupt = APCI3120_DISABLE;
- }
-
- return insn->n;
-}
-
-/*
- * This function will first check channel list is ok or not and then
- * initialize the sequence RAM with the polarity, Gain,Channel number.
- * If the last argument of function "check"is 1 then it only checks
- * the channel list is ok or not.
- */
-static int apci3120_setup_chan_list(struct comedi_device *dev,
- struct comedi_subdevice *s,
- int n_chan,
- unsigned int *chanlist,
- char check)
-{
- struct addi_private *devpriv = dev->private;
- unsigned int i;
- unsigned int gain;
- unsigned short us_TmpValue;
-
- /* correct channel and range number check itself comedi/range.c */
- if (n_chan < 1) {
- if (!check)
- dev_err(dev->class_dev,
- "range/channel list is empty!\n");
- return 0;
- }
- /* All is ok, so we can setup channel/range list */
- if (check)
- return 1;
-
- /* Code to set the PA and PR...Here it set PA to 0 */
- devpriv->us_OutputRegister =
- devpriv->us_OutputRegister & APCI3120_CLEAR_PA_PR;
- devpriv->us_OutputRegister = ((n_chan - 1) & 0xf) << 8;
- outw(devpriv->us_OutputRegister, dev->iobase + APCI3120_WR_ADDRESS);
-
- for (i = 0; i < n_chan; i++) {
- /* store range list to card */
- us_TmpValue = CR_CHAN(chanlist[i]); /* get channel number */
-
- if (CR_RANGE(chanlist[i]) < APCI3120_BIPOLAR_RANGES)
- us_TmpValue &= ((~APCI3120_UNIPOLAR) & 0xff); /* set bipolar */
- else
- us_TmpValue |= APCI3120_UNIPOLAR; /* enable unipolar */
-
- gain = CR_RANGE(chanlist[i]); /* get gain number */
- us_TmpValue |= ((gain & 0x03) << 4); /* <<4 for G0 and G1 bit in RAM */
- us_TmpValue |= i << 8; /* To select the RAM LOCATION */
- outw(us_TmpValue, dev->iobase + APCI3120_SEQ_RAM_ADDRESS);
- }
- return 1; /* we can serve this with scan logic */
-}
-
-/*
- * Reads analog input in synchronous mode EOC and EOS is selected
- * as per configured if no conversion time is set uses default
- * conversion time 10 microsec.
- */
-static int apci3120_ai_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- const struct addi_board *this_board = dev->board_ptr;
- struct addi_private *devpriv = dev->private;
- unsigned short us_ConvertTiming, us_TmpValue, i;
- unsigned char b_Tmp;
-
- /* fix conversion time to 10 us */
- if (!devpriv->ui_EocEosConversionTime)
- us_ConvertTiming = 10;
- else
- us_ConvertTiming = (unsigned short) (devpriv->ui_EocEosConversionTime / 1000); /* nano to useconds */
-
- /* Clear software registers */
- devpriv->b_TimerSelectMode = 0;
- devpriv->b_ModeSelectRegister = 0;
- devpriv->us_OutputRegister = 0;
-
- if (insn->unused[0] == 222) { /* second insn read */
- for (i = 0; i < insn->n; i++)
- data[i] = devpriv->ui_AiReadData[i];
- } else {
- devpriv->tsk_Current = current; /* Save the current process task structure */
-
- /*
- * Testing if board have the new Quartz and calculate the time value
- * to set in the timer
- */
- us_TmpValue =
- (unsigned short) inw(devpriv->iobase + APCI3120_RD_STATUS);
-
- /* EL250804: Testing if board APCI3120 have the new Quartz or if it is an APCI3001 */
- if ((us_TmpValue & 0x00B0) == 0x00B0
- || !strcmp(this_board->pc_DriverName, "apci3001")) {
- us_ConvertTiming = (us_ConvertTiming * 2) - 2;
- } else {
- us_ConvertTiming =
- ((us_ConvertTiming * 12926) / 10000) - 1;
- }
-
- us_TmpValue = (unsigned short) devpriv->b_InterruptMode;
-
- switch (us_TmpValue) {
-
- case APCI3120_EOC_MODE:
-
- /*
- * Testing the interrupt flag and set the EOC bit Clears the FIFO
- */
- inw(devpriv->iobase + APCI3120_RESET_FIFO);
-
- /* Initialize the sequence array */
- if (!apci3120_setup_chan_list(dev, s, 1,
- &insn->chanspec, 0))
- return -EINVAL;
-
- /* Initialize Timer 0 mode 4 */
- devpriv->b_TimerSelectMode =
- (devpriv->
- b_TimerSelectMode & 0xFC) |
- APCI3120_TIMER_0_MODE_4;
- outb(devpriv->b_TimerSelectMode,
- devpriv->iobase + APCI3120_TIMER_CRT1);
-
- /* Reset the scan bit and Disables the EOS, DMA, EOC interrupt */
- devpriv->b_ModeSelectRegister =
- devpriv->
- b_ModeSelectRegister & APCI3120_DISABLE_SCAN;
-
- if (devpriv->b_EocEosInterrupt == APCI3120_ENABLE) {
-
- /* Disables the EOS,DMA and enables the EOC interrupt */
- devpriv->b_ModeSelectRegister =
- (devpriv->
- b_ModeSelectRegister &
- APCI3120_DISABLE_EOS_INT) |
- APCI3120_ENABLE_EOC_INT;
- inw(devpriv->iobase);
-
- } else {
- devpriv->b_ModeSelectRegister =
- devpriv->
- b_ModeSelectRegister &
- APCI3120_DISABLE_ALL_INTERRUPT_WITHOUT_TIMER;
- }
-
- outb(devpriv->b_ModeSelectRegister,
- devpriv->iobase + APCI3120_WRITE_MODE_SELECT);
-
- /* Sets gate 0 */
- devpriv->us_OutputRegister =
- (devpriv->
- us_OutputRegister & APCI3120_CLEAR_PA_PR) |
- APCI3120_ENABLE_TIMER0;
- outw(devpriv->us_OutputRegister,
- devpriv->iobase + APCI3120_WR_ADDRESS);
-
- /* Select Timer 0 */
- b_Tmp = ((devpriv->
- b_DigitalOutputRegister) & 0xF0) |
- APCI3120_SELECT_TIMER_0_WORD;
- outb(b_Tmp, devpriv->iobase + APCI3120_TIMER_CRT0);
-
- /* Set the conversion time */
- outw(us_ConvertTiming,
- devpriv->iobase + APCI3120_TIMER_VALUE);
-
- us_TmpValue =
- (unsigned short) inw(dev->iobase + APCI3120_RD_STATUS);
-
- if (devpriv->b_EocEosInterrupt == APCI3120_DISABLE) {
-
- do {
- /* Waiting for the end of conversion */
- us_TmpValue =
- inw(devpriv->iobase +
- APCI3120_RD_STATUS);
- } while ((us_TmpValue & APCI3120_EOC) ==
- APCI3120_EOC);
-
- /* Read the result in FIFO and put it in insn data pointer */
- us_TmpValue = inw(devpriv->iobase + 0);
- *data = us_TmpValue;
-
- inw(devpriv->iobase + APCI3120_RESET_FIFO);
- }
-
- break;
-
- case APCI3120_EOS_MODE:
-
- inw(devpriv->iobase);
- /* Clears the FIFO */
- inw(devpriv->iobase + APCI3120_RESET_FIFO);
- /* clear PA PR and disable timer 0 */
-
- devpriv->us_OutputRegister =
- (devpriv->
- us_OutputRegister & APCI3120_CLEAR_PA_PR) |
- APCI3120_DISABLE_TIMER0;
-
- outw(devpriv->us_OutputRegister,
- devpriv->iobase + APCI3120_WR_ADDRESS);
-
- if (!apci3120_setup_chan_list(dev, s,
- devpriv->ui_AiNbrofChannels,
- devpriv->ui_AiChannelList, 0))
- return -EINVAL;
-
- /* Initialize Timer 0 mode 2 */
- devpriv->b_TimerSelectMode =
- (devpriv->
- b_TimerSelectMode & 0xFC) |
- APCI3120_TIMER_0_MODE_2;
- outb(devpriv->b_TimerSelectMode,
- devpriv->iobase + APCI3120_TIMER_CRT1);
-
- /* Select Timer 0 */
- b_Tmp = ((devpriv->
- b_DigitalOutputRegister) & 0xF0) |
- APCI3120_SELECT_TIMER_0_WORD;
- outb(b_Tmp, devpriv->iobase + APCI3120_TIMER_CRT0);
-
- /* Set the conversion time */
- outw(us_ConvertTiming,
- devpriv->iobase + APCI3120_TIMER_VALUE);
-
- /* Set the scan bit */
- devpriv->b_ModeSelectRegister =
- devpriv->
- b_ModeSelectRegister | APCI3120_ENABLE_SCAN;
- outb(devpriv->b_ModeSelectRegister,
- devpriv->iobase + APCI3120_WRITE_MODE_SELECT);
-
- /* If Interrupt function is loaded */
- if (devpriv->b_EocEosInterrupt == APCI3120_ENABLE) {
- /* Disables the EOC,DMA and enables the EOS interrupt */
- devpriv->b_ModeSelectRegister =
- (devpriv->
- b_ModeSelectRegister &
- APCI3120_DISABLE_EOC_INT) |
- APCI3120_ENABLE_EOS_INT;
- inw(devpriv->iobase);
-
- } else
- devpriv->b_ModeSelectRegister =
- devpriv->
- b_ModeSelectRegister &
- APCI3120_DISABLE_ALL_INTERRUPT_WITHOUT_TIMER;
-
- outb(devpriv->b_ModeSelectRegister,
- devpriv->iobase + APCI3120_WRITE_MODE_SELECT);
-
- inw(devpriv->iobase + APCI3120_RD_STATUS);
-
- /* Sets gate 0 */
- devpriv->us_OutputRegister =
- devpriv->
- us_OutputRegister | APCI3120_ENABLE_TIMER0;
- outw(devpriv->us_OutputRegister,
- devpriv->iobase + APCI3120_WR_ADDRESS);
-
- /* Start conversion */
- outw(0, devpriv->iobase + APCI3120_START_CONVERSION);
-
- /* Waiting of end of conversion if interrupt is not installed */
- if (devpriv->b_EocEosInterrupt == APCI3120_DISABLE) {
- /* Waiting the end of conversion */
- do {
- us_TmpValue =
- inw(devpriv->iobase +
- APCI3120_RD_STATUS);
- } while ((us_TmpValue & APCI3120_EOS) !=
- APCI3120_EOS);
-
- for (i = 0; i < devpriv->ui_AiNbrofChannels;
- i++) {
- /* Read the result in FIFO and write them in shared memory */
- us_TmpValue = inw(devpriv->iobase);
- data[i] = (unsigned int) us_TmpValue;
- }
-
- devpriv->b_InterruptMode = APCI3120_EOC_MODE; /* Restore defaults */
- }
- break;
-
- default:
- dev_err(dev->class_dev, "inputs wrong\n");
-
- }
- devpriv->ui_EocEosConversionTime = 0; /* re initializing the variable */
- }
-
- return insn->n;
-
-}
-
-static int apci3120_reset(struct comedi_device *dev)
-{
- struct addi_private *devpriv = dev->private;
- unsigned int i;
- unsigned short us_TmpValue;
-
- devpriv->ai_running = 0;
- devpriv->b_EocEosInterrupt = APCI3120_DISABLE;
- devpriv->b_InterruptMode = APCI3120_EOC_MODE;
- devpriv->ui_EocEosConversionTime = 0; /* set eoc eos conv time to 0 */
-
- /* variables used in timer subdevice */
- devpriv->b_Timer2Mode = 0;
- devpriv->b_Timer2Interrupt = 0;
- devpriv->b_ExttrigEnable = 0; /* Disable ext trigger */
-
- /* Disable all interrupts, watchdog for the anolog output */
- devpriv->b_ModeSelectRegister = 0;
- outb(devpriv->b_ModeSelectRegister,
- dev->iobase + APCI3120_WRITE_MODE_SELECT);
-
- /* Disables all counters, ext trigger and clears PA, PR */
- devpriv->us_OutputRegister = 0;
- outw(devpriv->us_OutputRegister, dev->iobase + APCI3120_WR_ADDRESS);
-
- /*
- * Code to set the all anolog o/p channel to 0v 8191 is decimal
- * value for zero(0 v)volt in bipolar mode(default)
- */
- outw(8191 | APCI3120_ANALOG_OP_CHANNEL_1, dev->iobase + APCI3120_ANALOG_OUTPUT_1); /* channel 1 */
- outw(8191 | APCI3120_ANALOG_OP_CHANNEL_2, dev->iobase + APCI3120_ANALOG_OUTPUT_1); /* channel 2 */
- outw(8191 | APCI3120_ANALOG_OP_CHANNEL_3, dev->iobase + APCI3120_ANALOG_OUTPUT_1); /* channel 3 */
- outw(8191 | APCI3120_ANALOG_OP_CHANNEL_4, dev->iobase + APCI3120_ANALOG_OUTPUT_1); /* channel 4 */
-
- outw(8191 | APCI3120_ANALOG_OP_CHANNEL_5, dev->iobase + APCI3120_ANALOG_OUTPUT_2); /* channel 5 */
- outw(8191 | APCI3120_ANALOG_OP_CHANNEL_6, dev->iobase + APCI3120_ANALOG_OUTPUT_2); /* channel 6 */
- outw(8191 | APCI3120_ANALOG_OP_CHANNEL_7, dev->iobase + APCI3120_ANALOG_OUTPUT_2); /* channel 7 */
- outw(8191 | APCI3120_ANALOG_OP_CHANNEL_8, dev->iobase + APCI3120_ANALOG_OUTPUT_2); /* channel 8 */
-
- udelay(10);
-
- inw(dev->iobase + 0); /* make a dummy read */
- inb(dev->iobase + APCI3120_RESET_FIFO); /* flush FIFO */
- inw(dev->iobase + APCI3120_RD_STATUS); /* flush A/D status register */
-
- /* code to reset the RAM sequence */
- for (i = 0; i < 16; i++) {
- us_TmpValue = i << 8; /* select the location */
- outw(us_TmpValue, dev->iobase + APCI3120_SEQ_RAM_ADDRESS);
- }
- return 0;
-}
-
-static int apci3120_exttrig_enable(struct comedi_device *dev)
-{
- struct addi_private *devpriv = dev->private;
-
- devpriv->us_OutputRegister |= APCI3120_ENABLE_EXT_TRIGGER;
- outw(devpriv->us_OutputRegister, dev->iobase + APCI3120_WR_ADDRESS);
- return 0;
-}
-
-static int apci3120_exttrig_disable(struct comedi_device *dev)
-{
- struct addi_private *devpriv = dev->private;
-
- devpriv->us_OutputRegister &= ~APCI3120_ENABLE_EXT_TRIGGER;
- outw(devpriv->us_OutputRegister, dev->iobase + APCI3120_WR_ADDRESS);
- return 0;
-}
-
-static int apci3120_cancel(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct addi_private *devpriv = dev->private;
-
- /* Disable A2P Fifo write and AMWEN signal */
- outw(0, devpriv->i_IobaseAddon + 4);
-
- /* Disable Bus Master ADD ON */
- outw(APCI3120_ADD_ON_AGCSTS_LOW, devpriv->i_IobaseAddon + 0);
- outw(0, devpriv->i_IobaseAddon + 2);
- outw(APCI3120_ADD_ON_AGCSTS_HIGH, devpriv->i_IobaseAddon + 0);
- outw(0, devpriv->i_IobaseAddon + 2);
-
- /* Disable BUS Master PCI */
- outl(0, devpriv->i_IobaseAmcc + AMCC_OP_REG_MCSR);
-
- /* Disable ext trigger */
- apci3120_exttrig_disable(dev);
-
- devpriv->us_OutputRegister = 0;
- /* stop counters */
- outw(devpriv->
- us_OutputRegister & APCI3120_DISABLE_TIMER0 &
- APCI3120_DISABLE_TIMER1, dev->iobase + APCI3120_WR_ADDRESS);
-
- outw(APCI3120_DISABLE_ALL_TIMER, dev->iobase + APCI3120_WR_ADDRESS);
-
- /* DISABLE_ALL_INTERRUPT */
- outb(APCI3120_DISABLE_ALL_INTERRUPT,
- dev->iobase + APCI3120_WRITE_MODE_SELECT);
- /* Flush FIFO */
- inb(dev->iobase + APCI3120_RESET_FIFO);
- inw(dev->iobase + APCI3120_RD_STATUS);
- devpriv->ui_AiActualScan = 0;
- s->async->cur_chan = 0;
- devpriv->ui_DmaActualBuffer = 0;
-
- devpriv->ai_running = 0;
- devpriv->b_InterruptMode = APCI3120_EOC_MODE;
- devpriv->b_EocEosInterrupt = APCI3120_DISABLE;
- apci3120_reset(dev);
- return 0;
-}
-
-static int apci3120_ai_cmdtest(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
-{
- int err = 0;
-
- /* Step 1 : check if triggers are trivially valid */
-
- err |= cfc_check_trigger_src(&cmd->start_src, TRIG_NOW | TRIG_EXT);
- err |= cfc_check_trigger_src(&cmd->scan_begin_src,
- TRIG_TIMER | TRIG_FOLLOW);
- err |= cfc_check_trigger_src(&cmd->convert_src, TRIG_TIMER);
- err |= cfc_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
- err |= cfc_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
-
- if (err)
- return 1;
-
- /* Step 2a : make sure trigger sources are unique */
-
- err |= cfc_check_trigger_is_unique(cmd->start_src);
- err |= cfc_check_trigger_is_unique(cmd->scan_begin_src);
- err |= cfc_check_trigger_is_unique(cmd->stop_src);
-
- /* Step 2b : and mutually compatible */
-
- if (err)
- return 2;
-
- /* Step 3: check if arguments are trivially valid */
-
- err |= cfc_check_trigger_arg_is(&cmd->start_arg, 0);
-
- if (cmd->scan_begin_src == TRIG_TIMER) /* Test Delay timing */
- err |= cfc_check_trigger_arg_min(&cmd->scan_begin_arg, 100000);
-
- if (cmd->scan_begin_src == TRIG_TIMER) {
- if (cmd->convert_arg)
- err |= cfc_check_trigger_arg_min(&cmd->convert_arg,
- 10000);
- } else {
- err |= cfc_check_trigger_arg_min(&cmd->convert_arg, 10000);
- }
-
- err |= cfc_check_trigger_arg_min(&cmd->chanlist_len, 1);
- err |= cfc_check_trigger_arg_is(&cmd->scan_end_arg, cmd->chanlist_len);
-
- if (cmd->stop_src == TRIG_COUNT)
- err |= cfc_check_trigger_arg_min(&cmd->stop_arg, 1);
- else /* TRIG_NONE */
- err |= cfc_check_trigger_arg_is(&cmd->stop_arg, 0);
-
- if (err)
- return 3;
-
- /* step 4: fix up any arguments */
-
- if (cmd->scan_begin_src == TRIG_TIMER &&
- cmd->scan_begin_arg < cmd->convert_arg * cmd->scan_end_arg) {
- cmd->scan_begin_arg = cmd->convert_arg * cmd->scan_end_arg;
- err |= -EINVAL;
- }
-
- if (err)
- return 4;
-
- return 0;
-}
-
-/*
- * This is used for analog input cyclic acquisition.
- * Performs the command operations.
- * If DMA is configured does DMA initialization otherwise does the
- * acquisition with EOS interrupt.
- */
-static int apci3120_cyclic_ai(int mode,
- struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- const struct addi_board *this_board = dev->board_ptr;
- struct addi_private *devpriv = dev->private;
- struct comedi_cmd *cmd = &s->async->cmd;
- unsigned char b_Tmp;
- unsigned int ui_Tmp, ui_DelayTiming = 0, ui_TimerValue1 = 0, dmalen0 =
- 0, dmalen1 = 0, ui_TimerValue2 =
- 0, ui_TimerValue0, ui_ConvertTiming;
- unsigned short us_TmpValue;
-
- /* Resets the FIFO */
- inb(dev->iobase + APCI3120_RESET_FIFO);
-
- devpriv->ai_running = 1;
-
- /* clear software registers */
- devpriv->b_TimerSelectMode = 0;
- devpriv->us_OutputRegister = 0;
- devpriv->b_ModeSelectRegister = 0;
-
- /* Clear Timer Write TC int */
- outl(APCI3120_CLEAR_WRITE_TC_INT,
- devpriv->i_IobaseAmcc + APCI3120_AMCC_OP_REG_INTCSR);
-
- /* Disables All Timer */
- /* Sets PR and PA to 0 */
- devpriv->us_OutputRegister = devpriv->us_OutputRegister &
- APCI3120_DISABLE_TIMER0 &
- APCI3120_DISABLE_TIMER1 & APCI3120_CLEAR_PA_PR;
-
- outw(devpriv->us_OutputRegister, dev->iobase + APCI3120_WR_ADDRESS);
-
- /* Resets the FIFO */
- /* BEGIN JK 07.05.04: Comparison between WIN32 and Linux driver */
- inb(devpriv->iobase + APCI3120_RESET_FIFO);
- /* END JK 07.05.04: Comparison between WIN32 and Linux driver */
-
- devpriv->ui_AiActualScan = 0;
- s->async->cur_chan = 0;
- devpriv->ui_DmaActualBuffer = 0;
-
- /* value for timer2 minus -2 has to be done */
- ui_TimerValue2 = cmd->stop_arg - 2;
- ui_ConvertTiming = cmd->convert_arg;
-
- if (mode == 2)
- ui_DelayTiming = cmd->scan_begin_arg;
-
- /* Initializes the sequence array */
- if (!apci3120_setup_chan_list(dev, s, devpriv->ui_AiNbrofChannels,
- cmd->chanlist, 0))
- return -EINVAL;
-
- us_TmpValue = (unsigned short) inw(dev->iobase + APCI3120_RD_STATUS);
-
- /* EL241003 Begin: add this section to replace floats calculation by integer calculations */
- /* EL250804: Testing if board APCI3120 have the new Quartz or if it is an APCI3001 */
- if ((us_TmpValue & 0x00B0) == 0x00B0
- || !strcmp(this_board->pc_DriverName, "apci3001")) {
- ui_TimerValue0 = ui_ConvertTiming * 2 - 2000;
- ui_TimerValue0 = ui_TimerValue0 / 1000;
-
- if (mode == 2) {
- ui_DelayTiming = ui_DelayTiming / 1000;
- ui_TimerValue1 = ui_DelayTiming * 2 - 200;
- ui_TimerValue1 = ui_TimerValue1 / 100;
- }
- } else {
- ui_ConvertTiming = ui_ConvertTiming / 1000;
- ui_TimerValue0 = ui_ConvertTiming * 12926 - 10000;
- ui_TimerValue0 = ui_TimerValue0 / 10000;
-
- if (mode == 2) {
- ui_DelayTiming = ui_DelayTiming / 1000;
- ui_TimerValue1 = ui_DelayTiming * 12926 - 1;
- ui_TimerValue1 = ui_TimerValue1 / 1000000;
- }
- }
- /* EL241003 End */
-
- if (devpriv->b_ExttrigEnable == APCI3120_ENABLE)
- apci3120_exttrig_enable(dev); /* activate EXT trigger */
- switch (mode) {
- case 1:
- /* init timer0 in mode 2 */
- devpriv->b_TimerSelectMode =
- (devpriv->
- b_TimerSelectMode & 0xFC) | APCI3120_TIMER_0_MODE_2;
- outb(devpriv->b_TimerSelectMode,
- dev->iobase + APCI3120_TIMER_CRT1);
-
- /* Select Timer 0 */
- b_Tmp = ((devpriv->
- b_DigitalOutputRegister) & 0xF0) |
- APCI3120_SELECT_TIMER_0_WORD;
- outb(b_Tmp, dev->iobase + APCI3120_TIMER_CRT0);
- /* Set the conversion time */
- outw(((unsigned short) ui_TimerValue0),
- dev->iobase + APCI3120_TIMER_VALUE);
- break;
-
- case 2:
- /* init timer1 in mode 2 */
- devpriv->b_TimerSelectMode =
- (devpriv->
- b_TimerSelectMode & 0xF3) | APCI3120_TIMER_1_MODE_2;
- outb(devpriv->b_TimerSelectMode,
- dev->iobase + APCI3120_TIMER_CRT1);
-
- /* Select Timer 1 */
- b_Tmp = ((devpriv->
- b_DigitalOutputRegister) & 0xF0) |
- APCI3120_SELECT_TIMER_1_WORD;
- outb(b_Tmp, dev->iobase + APCI3120_TIMER_CRT0);
- /* Set the conversion time */
- outw(((unsigned short) ui_TimerValue1),
- dev->iobase + APCI3120_TIMER_VALUE);
-
- /* init timer0 in mode 2 */
- devpriv->b_TimerSelectMode =
- (devpriv->
- b_TimerSelectMode & 0xFC) | APCI3120_TIMER_0_MODE_2;
- outb(devpriv->b_TimerSelectMode,
- dev->iobase + APCI3120_TIMER_CRT1);
-
- /* Select Timer 0 */
- b_Tmp = ((devpriv->
- b_DigitalOutputRegister) & 0xF0) |
- APCI3120_SELECT_TIMER_0_WORD;
- outb(b_Tmp, dev->iobase + APCI3120_TIMER_CRT0);
-
- /* Set the conversion time */
- outw(((unsigned short) ui_TimerValue0),
- dev->iobase + APCI3120_TIMER_VALUE);
- break;
-
- }
- /* common for all modes */
- /* BEGIN JK 07.05.04: Comparison between WIN32 and Linux driver */
- devpriv->b_ModeSelectRegister = devpriv->b_ModeSelectRegister &
- APCI3120_DISABLE_SCAN;
- /* END JK 07.05.04: Comparison between WIN32 and Linux driver */
-
- outb(devpriv->b_ModeSelectRegister,
- dev->iobase + APCI3120_WRITE_MODE_SELECT);
-
- /* If DMA is disabled */
- if (devpriv->us_UseDma == APCI3120_DISABLE) {
- /* disable EOC and enable EOS */
- devpriv->b_InterruptMode = APCI3120_EOS_MODE;
- devpriv->b_EocEosInterrupt = APCI3120_ENABLE;
-
- devpriv->b_ModeSelectRegister =
- (devpriv->
- b_ModeSelectRegister & APCI3120_DISABLE_EOC_INT) |
- APCI3120_ENABLE_EOS_INT;
- outb(devpriv->b_ModeSelectRegister,
- dev->iobase + APCI3120_WRITE_MODE_SELECT);
-
- if (cmd->stop_src == TRIG_COUNT) {
- /*
- * configure Timer2 For counting EOS Reset gate 2 of Timer 2 to
- * disable it (Set Bit D14 to 0)
- */
- devpriv->us_OutputRegister =
- devpriv->
- us_OutputRegister & APCI3120_DISABLE_TIMER2;
- outw(devpriv->us_OutputRegister,
- dev->iobase + APCI3120_WR_ADDRESS);
-
- /* DISABLE TIMER intERRUPT */
- devpriv->b_ModeSelectRegister =
- devpriv->
- b_ModeSelectRegister &
- APCI3120_DISABLE_TIMER_INT & 0xEF;
- outb(devpriv->b_ModeSelectRegister,
- dev->iobase + APCI3120_WRITE_MODE_SELECT);
-
- /* (1) Init timer 2 in mode 0 and write timer value */
- devpriv->b_TimerSelectMode =
- (devpriv->
- b_TimerSelectMode & 0x0F) |
- APCI3120_TIMER_2_MODE_0;
- outb(devpriv->b_TimerSelectMode,
- dev->iobase + APCI3120_TIMER_CRT1);
-
- /* Writing LOW unsigned short */
- b_Tmp = ((devpriv->
- b_DigitalOutputRegister) & 0xF0) |
- APCI3120_SELECT_TIMER_2_LOW_WORD;
- outb(b_Tmp, dev->iobase + APCI3120_TIMER_CRT0);
- outw(ui_TimerValue2 & 0xffff,
- dev->iobase + APCI3120_TIMER_VALUE);
-
- /* Writing HIGH unsigned short */
- b_Tmp = ((devpriv->
- b_DigitalOutputRegister) & 0xF0) |
- APCI3120_SELECT_TIMER_2_HIGH_WORD;
- outb(b_Tmp, dev->iobase + APCI3120_TIMER_CRT0);
- outw((ui_TimerValue2 >> 16) & 0xffff,
- dev->iobase + APCI3120_TIMER_VALUE);
-
- /* (2) Reset FC_TIMER BIT Clearing timer status register */
- inb(dev->iobase + APCI3120_TIMER_STATUS_REGISTER);
- /* enable timer counter and disable watch dog */
- devpriv->b_ModeSelectRegister =
- (devpriv->
- b_ModeSelectRegister |
- APCI3120_ENABLE_TIMER_COUNTER) &
- APCI3120_DISABLE_WATCHDOG;
- /* select EOS clock input for timer 2 */
- devpriv->b_ModeSelectRegister =
- devpriv->
- b_ModeSelectRegister |
- APCI3120_TIMER2_SELECT_EOS;
- /* Enable timer2 interrupt */
- devpriv->b_ModeSelectRegister =
- devpriv->
- b_ModeSelectRegister |
- APCI3120_ENABLE_TIMER_INT;
- outb(devpriv->b_ModeSelectRegister,
- dev->iobase + APCI3120_WRITE_MODE_SELECT);
- devpriv->b_Timer2Mode = APCI3120_COUNTER;
- devpriv->b_Timer2Interrupt = APCI3120_ENABLE;
- }
- } else {
- /* If DMA Enabled */
- unsigned int scan_bytes = cmd->scan_end_arg * sizeof(short);
-
- devpriv->b_InterruptMode = APCI3120_DMA_MODE;
-
- /* Disables the EOC, EOS interrupt */
- devpriv->b_ModeSelectRegister = devpriv->b_ModeSelectRegister &
- APCI3120_DISABLE_EOC_INT & APCI3120_DISABLE_EOS_INT;
-
- outb(devpriv->b_ModeSelectRegister,
- dev->iobase + APCI3120_WRITE_MODE_SELECT);
-
- dmalen0 = devpriv->ui_DmaBufferSize[0];
- dmalen1 = devpriv->ui_DmaBufferSize[1];
-
- if (cmd->stop_src == TRIG_COUNT) {
- /*
- * Must we fill full first buffer? And must we fill
- * full second buffer when first is once filled?
- */
- if (dmalen0 > (cmd->stop_arg * scan_bytes)) {
- dmalen0 = cmd->stop_arg * scan_bytes;
- } else if (dmalen1 > (cmd->stop_arg * scan_bytes -
- dmalen0))
- dmalen1 = cmd->stop_arg * scan_bytes -
- dmalen0;
- }
-
- if (cmd->flags & CMDF_WAKE_EOS) {
- /* don't we want wake up every scan? */
- if (dmalen0 > scan_bytes) {
- dmalen0 = scan_bytes;
- if (cmd->scan_end_arg & 1)
- dmalen0 += 2;
- }
- if (dmalen1 > scan_bytes) {
- dmalen1 = scan_bytes;
- if (cmd->scan_end_arg & 1)
- dmalen1 -= 2;
- if (dmalen1 < 4)
- dmalen1 = 4;
- }
- } else { /* isn't output buff smaller that our DMA buff? */
- if (dmalen0 > s->async->prealloc_bufsz)
- dmalen0 = s->async->prealloc_bufsz;
- if (dmalen1 > s->async->prealloc_bufsz)
- dmalen1 = s->async->prealloc_bufsz;
- }
- devpriv->ui_DmaBufferUsesize[0] = dmalen0;
- devpriv->ui_DmaBufferUsesize[1] = dmalen1;
-
- /* Initialize DMA */
-
- /*
- * Set Transfer count enable bit and A2P_fifo reset bit in AGCSTS
- * register 1
- */
- ui_Tmp = AGCSTS_TC_ENABLE | AGCSTS_RESET_A2P_FIFO;
- outl(ui_Tmp, devpriv->i_IobaseAmcc + AMCC_OP_REG_AGCSTS);
-
- /* changed since 16 bit interface for add on */
- /* ENABLE BUS MASTER */
- outw(APCI3120_ADD_ON_AGCSTS_LOW, devpriv->i_IobaseAddon + 0);
- outw(APCI3120_ENABLE_TRANSFER_ADD_ON_LOW,
- devpriv->i_IobaseAddon + 2);
-
- outw(APCI3120_ADD_ON_AGCSTS_HIGH, devpriv->i_IobaseAddon + 0);
- outw(APCI3120_ENABLE_TRANSFER_ADD_ON_HIGH,
- devpriv->i_IobaseAddon + 2);
-
- /*
- * TO VERIFIED BEGIN JK 07.05.04: Comparison between WIN32 and Linux
- * driver
- */
- outw(0x1000, devpriv->i_IobaseAddon + 2);
- /* END JK 07.05.04: Comparison between WIN32 and Linux driver */
-
- /* 2 No change */
- /* A2P FIFO MANAGEMENT */
- /* A2P fifo reset & transfer control enable */
- outl(APCI3120_A2P_FIFO_MANAGEMENT, devpriv->i_IobaseAmcc +
- APCI3120_AMCC_OP_MCSR);
-
- /*
- * 3
- * beginning address of dma buf The 32 bit address of dma buffer
- * is converted into two 16 bit addresses Can done by using _attach
- * and put into into an array array used may be for differnet pages
- */
-
- /* DMA Start Address Low */
- outw(APCI3120_ADD_ON_MWAR_LOW, devpriv->i_IobaseAddon + 0);
- outw((devpriv->ul_DmaBufferHw[0] & 0xFFFF),
- devpriv->i_IobaseAddon + 2);
-
- /* DMA Start Address High */
- outw(APCI3120_ADD_ON_MWAR_HIGH, devpriv->i_IobaseAddon + 0);
- outw((devpriv->ul_DmaBufferHw[0] / 65536),
- devpriv->i_IobaseAddon + 2);
-
- /*
- * 4
- * amount of bytes to be transferred set transfer count used ADDON
- * MWTC register commented testing
- */
-
- /* Nbr of acquisition LOW */
- outw(APCI3120_ADD_ON_MWTC_LOW, devpriv->i_IobaseAddon + 0);
- outw((devpriv->ui_DmaBufferUsesize[0] & 0xFFFF),
- devpriv->i_IobaseAddon + 2);
-
- /* Nbr of acquisition HIGH */
- outw(APCI3120_ADD_ON_MWTC_HIGH, devpriv->i_IobaseAddon + 0);
- outw((devpriv->ui_DmaBufferUsesize[0] / 65536),
- devpriv->i_IobaseAddon + 2);
-
- /*
- * 5
- * To configure A2P FIFO testing outl(
- * FIFO_ADVANCE_ON_BYTE_2,devpriv->i_IobaseAmcc+AMCC_OP_REG_INTCSR);
- */
-
- /* A2P FIFO RESET */
- /*
- * TO VERIFY BEGIN JK 07.05.04: Comparison between WIN32 and Linux
- * driver
- */
- outl(0x04000000UL, devpriv->i_IobaseAmcc + AMCC_OP_REG_MCSR);
- /* END JK 07.05.04: Comparison between WIN32 and Linux driver */
-
- /*
- * 6
- * ENABLE A2P FIFO WRITE AND ENABLE AMWEN AMWEN_ENABLE |
- * A2P_FIFO_WRITE_ENABLE (0x01|0x02)=0x03
- */
-
- /*
- * 7
- * initialise end of dma interrupt AINT_WRITE_COMPL =
- * ENABLE_WRITE_TC_INT(ADDI)
- */
- /* A2P FIFO CONFIGURATE, END OF DMA intERRUPT INIT */
- outl((APCI3120_FIFO_ADVANCE_ON_BYTE_2 |
- APCI3120_ENABLE_WRITE_TC_INT),
- devpriv->i_IobaseAmcc + AMCC_OP_REG_INTCSR);
-
- /* BEGIN JK 07.05.04: Comparison between WIN32 and Linux driver */
- /* ENABLE A2P FIFO WRITE AND ENABLE AMWEN */
- outw(3, devpriv->i_IobaseAddon + 4);
- /* END JK 07.05.04: Comparison between WIN32 and Linux driver */
-
- /* A2P FIFO RESET */
- /* BEGIN JK 07.05.04: Comparison between WIN32 and Linux driver */
- outl(0x04000000UL,
- devpriv->i_IobaseAmcc + APCI3120_AMCC_OP_MCSR);
- /* END JK 07.05.04: Comparison between WIN32 and Linux driver */
- }
-
- if (devpriv->us_UseDma == APCI3120_DISABLE &&
- cmd->stop_src == TRIG_COUNT) {
- /* set gate 2 to start conversion */
- devpriv->us_OutputRegister =
- devpriv->us_OutputRegister | APCI3120_ENABLE_TIMER2;
- outw(devpriv->us_OutputRegister,
- dev->iobase + APCI3120_WR_ADDRESS);
- }
-
- switch (mode) {
- case 1:
- /* set gate 0 to start conversion */
- devpriv->us_OutputRegister =
- devpriv->us_OutputRegister | APCI3120_ENABLE_TIMER0;
- outw(devpriv->us_OutputRegister,
- dev->iobase + APCI3120_WR_ADDRESS);
- break;
- case 2:
- /* set gate 0 and gate 1 */
- devpriv->us_OutputRegister =
- devpriv->us_OutputRegister | APCI3120_ENABLE_TIMER1;
- devpriv->us_OutputRegister =
- devpriv->us_OutputRegister | APCI3120_ENABLE_TIMER0;
- outw(devpriv->us_OutputRegister,
- dev->iobase + APCI3120_WR_ADDRESS);
- break;
-
- }
-
- return 0;
-
-}
-
-/*
- * Does asynchronous acquisition.
- * Determines the mode 1 or 2.
- */
-static int apci3120_ai_cmd(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct addi_private *devpriv = dev->private;
- struct comedi_cmd *cmd = &s->async->cmd;
-
- /* loading private structure with cmd structure inputs */
- devpriv->ui_AiNbrofChannels = cmd->chanlist_len;
-
- if (cmd->start_src == TRIG_EXT)
- devpriv->b_ExttrigEnable = APCI3120_ENABLE;
- else
- devpriv->b_ExttrigEnable = APCI3120_DISABLE;
-
- if (cmd->scan_begin_src == TRIG_FOLLOW)
- return apci3120_cyclic_ai(1, dev, s);
- /* TRIG_TIMER */
- return apci3120_cyclic_ai(2, dev, s);
-}
-
-/*
- * This function copies the data from DMA buffer to the Comedi buffer.
- */
-static void v_APCI3120_InterruptDmaMoveBlock16bit(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned short *dma_buffer,
- unsigned int num_samples)
-{
- struct addi_private *devpriv = dev->private;
- struct comedi_cmd *cmd = &s->async->cmd;
-
- devpriv->ui_AiActualScan +=
- (s->async->cur_chan + num_samples) / cmd->scan_end_arg;
- s->async->cur_chan += num_samples;
- s->async->cur_chan %= cmd->scan_end_arg;
-
- cfc_write_array_to_buffer(s, dma_buffer, num_samples * sizeof(short));
-}
-
-/*
- * This is a handler for the DMA interrupt.
- * This function copies the data to Comedi Buffer.
- * For continuous DMA it reinitializes the DMA operation.
- * For single mode DMA it stop the acquisition.
- */
-static void apci3120_interrupt_dma(int irq, void *d)
-{
- struct comedi_device *dev = d;
- struct addi_private *devpriv = dev->private;
- struct comedi_subdevice *s = dev->read_subdev;
- struct comedi_cmd *cmd = &s->async->cmd;
- unsigned int next_dma_buf, samplesinbuf;
- unsigned long low_word, high_word, var;
- unsigned int ui_Tmp;
-
- samplesinbuf =
- devpriv->ui_DmaBufferUsesize[devpriv->ui_DmaActualBuffer] -
- inl(devpriv->i_IobaseAmcc + AMCC_OP_REG_MWTC);
-
- if (samplesinbuf <
- devpriv->ui_DmaBufferUsesize[devpriv->ui_DmaActualBuffer]) {
- dev_err(dev->class_dev, "Interrupted DMA transfer!\n");
- }
- if (samplesinbuf & 1) {
- dev_err(dev->class_dev, "Odd count of bytes in DMA ring!\n");
- apci3120_cancel(dev, s);
- return;
- }
- samplesinbuf = samplesinbuf >> 1; /* number of received samples */
- if (devpriv->b_DmaDoubleBuffer) {
- /* switch DMA buffers if is used double buffering */
- next_dma_buf = 1 - devpriv->ui_DmaActualBuffer;
-
- ui_Tmp = AGCSTS_TC_ENABLE | AGCSTS_RESET_A2P_FIFO;
- outl(ui_Tmp, devpriv->i_IobaseAddon + AMCC_OP_REG_AGCSTS);
-
- /* changed since 16 bit interface for add on */
- outw(APCI3120_ADD_ON_AGCSTS_LOW, devpriv->i_IobaseAddon + 0);
- outw(APCI3120_ENABLE_TRANSFER_ADD_ON_LOW,
- devpriv->i_IobaseAddon + 2);
- outw(APCI3120_ADD_ON_AGCSTS_HIGH, devpriv->i_IobaseAddon + 0);
- outw(APCI3120_ENABLE_TRANSFER_ADD_ON_HIGH, devpriv->i_IobaseAddon + 2); /* 0x1000 is out putted in windows driver */
-
- var = devpriv->ul_DmaBufferHw[next_dma_buf];
- low_word = var & 0xffff;
- var = devpriv->ul_DmaBufferHw[next_dma_buf];
- high_word = var / 65536;
-
- /* DMA Start Address Low */
- outw(APCI3120_ADD_ON_MWAR_LOW, devpriv->i_IobaseAddon + 0);
- outw(low_word, devpriv->i_IobaseAddon + 2);
-
- /* DMA Start Address High */
- outw(APCI3120_ADD_ON_MWAR_HIGH, devpriv->i_IobaseAddon + 0);
- outw(high_word, devpriv->i_IobaseAddon + 2);
-
- var = devpriv->ui_DmaBufferUsesize[next_dma_buf];
- low_word = var & 0xffff;
- var = devpriv->ui_DmaBufferUsesize[next_dma_buf];
- high_word = var / 65536;
-
- /* Nbr of acquisition LOW */
- outw(APCI3120_ADD_ON_MWTC_LOW, devpriv->i_IobaseAddon + 0);
- outw(low_word, devpriv->i_IobaseAddon + 2);
-
- /* Nbr of acquisition HIGH */
- outw(APCI3120_ADD_ON_MWTC_HIGH, devpriv->i_IobaseAddon + 0);
- outw(high_word, devpriv->i_IobaseAddon + 2);
-
- /*
- * To configure A2P FIFO
- * ENABLE A2P FIFO WRITE AND ENABLE AMWEN
- * AMWEN_ENABLE | A2P_FIFO_WRITE_ENABLE (0x01|0x02)=0x03
- */
- outw(3, devpriv->i_IobaseAddon + 4);
- /* initialise end of dma interrupt AINT_WRITE_COMPL = ENABLE_WRITE_TC_INT(ADDI) */
- outl((APCI3120_FIFO_ADVANCE_ON_BYTE_2 |
- APCI3120_ENABLE_WRITE_TC_INT),
- devpriv->i_IobaseAmcc + AMCC_OP_REG_INTCSR);
-
- }
- if (samplesinbuf) {
- v_APCI3120_InterruptDmaMoveBlock16bit(dev, s,
- devpriv->ul_DmaBufferVirtual[devpriv->
- ui_DmaActualBuffer], samplesinbuf);
-
- if (!(cmd->flags & CMDF_WAKE_EOS)) {
- s->async->events |= COMEDI_CB_EOS;
- comedi_event(dev, s);
- }
- }
- if (cmd->stop_src == TRIG_COUNT)
- if (devpriv->ui_AiActualScan >= cmd->stop_arg) {
- /* all data sampled */
- apci3120_cancel(dev, s);
- s->async->events |= COMEDI_CB_EOA;
- comedi_event(dev, s);
- return;
- }
-
- if (devpriv->b_DmaDoubleBuffer) { /* switch dma buffers */
- devpriv->ui_DmaActualBuffer = 1 - devpriv->ui_DmaActualBuffer;
- } else {
- /*
- * restart DMA if is not used double buffering
- * ADDED REINITIALISE THE DMA
- */
- ui_Tmp = AGCSTS_TC_ENABLE | AGCSTS_RESET_A2P_FIFO;
- outl(ui_Tmp, devpriv->i_IobaseAddon + AMCC_OP_REG_AGCSTS);
-
- /* changed since 16 bit interface for add on */
- outw(APCI3120_ADD_ON_AGCSTS_LOW, devpriv->i_IobaseAddon + 0);
- outw(APCI3120_ENABLE_TRANSFER_ADD_ON_LOW,
- devpriv->i_IobaseAddon + 2);
- outw(APCI3120_ADD_ON_AGCSTS_HIGH, devpriv->i_IobaseAddon + 0);
- outw(APCI3120_ENABLE_TRANSFER_ADD_ON_HIGH, devpriv->i_IobaseAddon + 2);
- /*
- * A2P FIFO MANAGEMENT
- * A2P fifo reset & transfer control enable
- */
- outl(APCI3120_A2P_FIFO_MANAGEMENT,
- devpriv->i_IobaseAmcc + AMCC_OP_REG_MCSR);
-
- var = devpriv->ul_DmaBufferHw[0];
- low_word = var & 0xffff;
- var = devpriv->ul_DmaBufferHw[0];
- high_word = var / 65536;
- outw(APCI3120_ADD_ON_MWAR_LOW, devpriv->i_IobaseAddon + 0);
- outw(low_word, devpriv->i_IobaseAddon + 2);
- outw(APCI3120_ADD_ON_MWAR_HIGH, devpriv->i_IobaseAddon + 0);
- outw(high_word, devpriv->i_IobaseAddon + 2);
-
- var = devpriv->ui_DmaBufferUsesize[0];
- low_word = var & 0xffff; /* changed */
- var = devpriv->ui_DmaBufferUsesize[0];
- high_word = var / 65536;
- outw(APCI3120_ADD_ON_MWTC_LOW, devpriv->i_IobaseAddon + 0);
- outw(low_word, devpriv->i_IobaseAddon + 2);
- outw(APCI3120_ADD_ON_MWTC_HIGH, devpriv->i_IobaseAddon + 0);
- outw(high_word, devpriv->i_IobaseAddon + 2);
-
- /*
- * To configure A2P FIFO
- * ENABLE A2P FIFO WRITE AND ENABLE AMWEN
- * AMWEN_ENABLE | A2P_FIFO_WRITE_ENABLE (0x01|0x02)=0x03
- */
- outw(3, devpriv->i_IobaseAddon + 4);
- /* initialise end of dma interrupt AINT_WRITE_COMPL = ENABLE_WRITE_TC_INT(ADDI) */
- outl((APCI3120_FIFO_ADVANCE_ON_BYTE_2 |
- APCI3120_ENABLE_WRITE_TC_INT),
- devpriv->i_IobaseAmcc + AMCC_OP_REG_INTCSR);
- }
-}
-
-/*
- * This function handles EOS interrupt.
- * This function copies the acquired data(from FIFO) to Comedi buffer.
- */
-static int apci3120_interrupt_handle_eos(struct comedi_device *dev)
-{
- struct addi_private *devpriv = dev->private;
- struct comedi_subdevice *s = dev->read_subdev;
- int n_chan, i;
- int err = 1;
-
- n_chan = devpriv->ui_AiNbrofChannels;
-
- for (i = 0; i < n_chan; i++)
- err &= comedi_buf_put(s, inw(dev->iobase + 0));
-
- s->async->events |= COMEDI_CB_EOS;
-
- if (err == 0)
- s->async->events |= COMEDI_CB_OVERFLOW;
-
- comedi_event(dev, s);
-
- return 0;
-}
-
-static void apci3120_interrupt(int irq, void *d)
-{
- struct comedi_device *dev = d;
- struct addi_private *devpriv = dev->private;
- struct comedi_subdevice *s = dev->read_subdev;
- unsigned short int_daq;
- unsigned int int_amcc, ui_Check, i;
- unsigned short us_TmpValue;
- unsigned char b_DummyRead;
-
- ui_Check = 1;
-
- int_daq = inw(dev->iobase + APCI3120_RD_STATUS) & 0xf000; /* get IRQ reasons */
- int_amcc = inl(devpriv->i_IobaseAmcc + AMCC_OP_REG_INTCSR); /* get AMCC int register */
-
- if ((!int_daq) && (!(int_amcc & ANY_S593X_INT))) {
- dev_err(dev->class_dev, "IRQ from unknown source\n");
- return;
- }
-
- outl(int_amcc | 0x00ff0000, devpriv->i_IobaseAmcc + AMCC_OP_REG_INTCSR); /* shutdown IRQ reasons in AMCC */
-
- int_daq = (int_daq >> 12) & 0xF;
-
- if (devpriv->b_ExttrigEnable == APCI3120_ENABLE) {
- /* Disable ext trigger */
- apci3120_exttrig_disable(dev);
- devpriv->b_ExttrigEnable = APCI3120_DISABLE;
- }
- /* clear the timer 2 interrupt */
- inb(devpriv->i_IobaseAmcc + APCI3120_TIMER_STATUS_REGISTER);
-
- if (int_amcc & MASTER_ABORT_INT)
- dev_err(dev->class_dev, "AMCC IRQ - MASTER DMA ABORT!\n");
- if (int_amcc & TARGET_ABORT_INT)
- dev_err(dev->class_dev, "AMCC IRQ - TARGET DMA ABORT!\n");
-
- /* Ckeck if EOC interrupt */
- if (((int_daq & 0x8) == 0)
- && (devpriv->b_InterruptMode == APCI3120_EOC_MODE)) {
- if (devpriv->b_EocEosInterrupt == APCI3120_ENABLE) {
-
- /* Read the AI Value */
- devpriv->ui_AiReadData[0] =
- (unsigned int) inw(devpriv->iobase + 0);
- devpriv->b_EocEosInterrupt = APCI3120_DISABLE;
- send_sig(SIGIO, devpriv->tsk_Current, 0); /* send signal to the sample */
- } else {
- /* Disable EOC Interrupt */
- devpriv->b_ModeSelectRegister =
- devpriv->
- b_ModeSelectRegister & APCI3120_DISABLE_EOC_INT;
- outb(devpriv->b_ModeSelectRegister,
- devpriv->iobase + APCI3120_WRITE_MODE_SELECT);
-
- }
- }
-
- /* Check If EOS interrupt */
- if ((int_daq & 0x2) && (devpriv->b_InterruptMode == APCI3120_EOS_MODE)) {
-
- if (devpriv->b_EocEosInterrupt == APCI3120_ENABLE) { /* enable this in without DMA ??? */
-
- if (devpriv->ai_running) {
- ui_Check = 0;
- apci3120_interrupt_handle_eos(dev);
- devpriv->ui_AiActualScan++;
- devpriv->b_ModeSelectRegister =
- devpriv->
- b_ModeSelectRegister |
- APCI3120_ENABLE_EOS_INT;
- outb(devpriv->b_ModeSelectRegister,
- dev->iobase +
- APCI3120_WRITE_MODE_SELECT);
- } else {
- ui_Check = 0;
- for (i = 0; i < devpriv->ui_AiNbrofChannels;
- i++) {
- us_TmpValue = inw(devpriv->iobase + 0);
- devpriv->ui_AiReadData[i] =
- (unsigned int) us_TmpValue;
- }
- devpriv->b_EocEosInterrupt = APCI3120_DISABLE;
- devpriv->b_InterruptMode = APCI3120_EOC_MODE;
-
- send_sig(SIGIO, devpriv->tsk_Current, 0); /* send signal to the sample */
-
- }
-
- } else {
- devpriv->b_ModeSelectRegister =
- devpriv->
- b_ModeSelectRegister & APCI3120_DISABLE_EOS_INT;
- outb(devpriv->b_ModeSelectRegister,
- dev->iobase + APCI3120_WRITE_MODE_SELECT);
- devpriv->b_EocEosInterrupt = APCI3120_DISABLE; /* Default settings */
- devpriv->b_InterruptMode = APCI3120_EOC_MODE;
- }
-
- }
- /* Timer2 interrupt */
- if (int_daq & 0x1) {
-
- switch (devpriv->b_Timer2Mode) {
- case APCI3120_COUNTER:
- devpriv->b_ModeSelectRegister =
- devpriv->
- b_ModeSelectRegister & APCI3120_DISABLE_EOS_INT;
- outb(devpriv->b_ModeSelectRegister,
- dev->iobase + APCI3120_WRITE_MODE_SELECT);
-
- /* stop timer 2 */
- devpriv->us_OutputRegister =
- devpriv->
- us_OutputRegister & APCI3120_DISABLE_ALL_TIMER;
- outw(devpriv->us_OutputRegister,
- dev->iobase + APCI3120_WR_ADDRESS);
-
- /* stop timer 0 and timer 1 */
- apci3120_cancel(dev, s);
-
- /* UPDATE-0.7.57->0.7.68comedi_done(dev,s); */
- s->async->events |= COMEDI_CB_EOA;
- comedi_event(dev, s);
-
- break;
-
- case APCI3120_TIMER:
-
- /* Send a signal to from kernel to user space */
- send_sig(SIGIO, devpriv->tsk_Current, 0);
- break;
-
- case APCI3120_WATCHDOG:
-
- /* Send a signal to from kernel to user space */
- send_sig(SIGIO, devpriv->tsk_Current, 0);
- break;
-
- default:
-
- /* disable Timer Interrupt */
- devpriv->b_ModeSelectRegister =
- devpriv->
- b_ModeSelectRegister &
- APCI3120_DISABLE_TIMER_INT;
-
- outb(devpriv->b_ModeSelectRegister,
- dev->iobase + APCI3120_WRITE_MODE_SELECT);
-
- }
-
- b_DummyRead = inb(dev->iobase + APCI3120_TIMER_STATUS_REGISTER);
-
- }
-
- if ((int_daq & 0x4) && (devpriv->b_InterruptMode == APCI3120_DMA_MODE)) {
- if (devpriv->ai_running) {
-
- /* Clear Timer Write TC int */
- outl(APCI3120_CLEAR_WRITE_TC_INT,
- devpriv->i_IobaseAmcc +
- APCI3120_AMCC_OP_REG_INTCSR);
-
- /* Clears the timer status register */
- inw(dev->iobase + APCI3120_TIMER_STATUS_REGISTER);
- /* do some data transfer */
- apci3120_interrupt_dma(irq, d);
- } else {
- /* Stops the Timer */
- outw(devpriv->
- us_OutputRegister & APCI3120_DISABLE_TIMER0 &
- APCI3120_DISABLE_TIMER1,
- dev->iobase + APCI3120_WR_ADDRESS);
- }
-
- }
-}
-
-/*
- * Configure Timer 2
- *
- * data[0] = TIMER configure as timer
- * = WATCHDOG configure as watchdog
- * data[1] = Timer constant
- * data[2] = Timer2 interrupt (1)enable or(0) disable
- */
-static int apci3120_config_insn_timer(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- const struct addi_board *this_board = dev->board_ptr;
- struct addi_private *devpriv = dev->private;
- unsigned int ui_Timervalue2;
- unsigned short us_TmpValue;
- unsigned char b_Tmp;
-
- if (!data[1])
- dev_err(dev->class_dev, "No timer constant!\n");
-
- devpriv->b_Timer2Interrupt = (unsigned char) data[2]; /* save info whether to enable or disable interrupt */
-
- ui_Timervalue2 = data[1] / 1000; /* convert nano seconds to u seconds */
-
- us_TmpValue = (unsigned short) inw(devpriv->iobase + APCI3120_RD_STATUS);
-
- /*
- * EL250804: Testing if board APCI3120 have the new Quartz or if it
- * is an APCI3001 and calculate the time value to set in the timer
- */
- if ((us_TmpValue & 0x00B0) == 0x00B0
- || !strcmp(this_board->pc_DriverName, "apci3001")) {
- /* Calculate the time value to set in the timer */
- ui_Timervalue2 = ui_Timervalue2 / 50;
- } else {
- /* Calculate the time value to set in the timer */
- ui_Timervalue2 = ui_Timervalue2 / 70;
- }
-
- /* Reset gate 2 of Timer 2 to disable it (Set Bit D14 to 0) */
- devpriv->us_OutputRegister =
- devpriv->us_OutputRegister & APCI3120_DISABLE_TIMER2;
- outw(devpriv->us_OutputRegister, devpriv->iobase + APCI3120_WR_ADDRESS);
-
- /* Disable TIMER Interrupt */
- devpriv->b_ModeSelectRegister =
- devpriv->
- b_ModeSelectRegister & APCI3120_DISABLE_TIMER_INT & 0xEF;
-
- /* Disable Eoc and Eos Interrupts */
- devpriv->b_ModeSelectRegister =
- devpriv->
- b_ModeSelectRegister & APCI3120_DISABLE_EOC_INT &
- APCI3120_DISABLE_EOS_INT;
- outb(devpriv->b_ModeSelectRegister,
- devpriv->iobase + APCI3120_WRITE_MODE_SELECT);
- if (data[0] == APCI3120_TIMER) { /* initialize timer */
- /* Set the Timer 2 in mode 2(Timer) */
- devpriv->b_TimerSelectMode =
- (devpriv->
- b_TimerSelectMode & 0x0F) | APCI3120_TIMER_2_MODE_2;
- outb(devpriv->b_TimerSelectMode,
- devpriv->iobase + APCI3120_TIMER_CRT1);
-
- /*
- * Configure the timer 2 for writing the LOW unsigned short of timer
- * is Delay value You must make a b_tmp variable with
- * DigitalOutPutRegister because at Address_1+APCI3120_TIMER_CRT0
- * you can set the digital output and configure the timer 2,and if
- * you don't make this, digital output are erase (Set to 0)
- */
-
- /* Writing LOW unsigned short */
- b_Tmp = ((devpriv->
- b_DigitalOutputRegister) & 0xF0) |
- APCI3120_SELECT_TIMER_2_LOW_WORD;
- outb(b_Tmp, devpriv->iobase + APCI3120_TIMER_CRT0);
- outw(ui_Timervalue2 & 0xffff,
- devpriv->iobase + APCI3120_TIMER_VALUE);
-
- /* Writing HIGH unsigned short */
- b_Tmp = ((devpriv->
- b_DigitalOutputRegister) & 0xF0) |
- APCI3120_SELECT_TIMER_2_HIGH_WORD;
- outb(b_Tmp, devpriv->iobase + APCI3120_TIMER_CRT0);
- outw((ui_Timervalue2 >> 16) & 0xffff,
- devpriv->iobase + APCI3120_TIMER_VALUE);
- /* timer2 in Timer mode enabled */
- devpriv->b_Timer2Mode = APCI3120_TIMER;
-
- } else { /* Initialize Watch dog */
-
- /* Set the Timer 2 in mode 5(Watchdog) */
- devpriv->b_TimerSelectMode =
- (devpriv->
- b_TimerSelectMode & 0x0F) | APCI3120_TIMER_2_MODE_5;
- outb(devpriv->b_TimerSelectMode,
- devpriv->iobase + APCI3120_TIMER_CRT1);
-
- /*
- * Configure the timer 2 for writing the LOW unsigned short of timer
- * is Delay value You must make a b_tmp variable with
- * DigitalOutPutRegister because at Address_1+APCI3120_TIMER_CRT0
- * you can set the digital output and configure the timer 2,and if
- * you don't make this, digital output are erase (Set to 0)
- */
-
- /* Writing LOW unsigned short */
- b_Tmp = ((devpriv->
- b_DigitalOutputRegister) & 0xF0) |
- APCI3120_SELECT_TIMER_2_LOW_WORD;
- outb(b_Tmp, devpriv->iobase + APCI3120_TIMER_CRT0);
- outw(ui_Timervalue2 & 0xffff,
- devpriv->iobase + APCI3120_TIMER_VALUE);
-
- /* Writing HIGH unsigned short */
- b_Tmp = ((devpriv->
- b_DigitalOutputRegister) & 0xF0) |
- APCI3120_SELECT_TIMER_2_HIGH_WORD;
- outb(b_Tmp, devpriv->iobase + APCI3120_TIMER_CRT0);
-
- outw((ui_Timervalue2 >> 16) & 0xffff,
- devpriv->iobase + APCI3120_TIMER_VALUE);
- /* watchdog enabled */
- devpriv->b_Timer2Mode = APCI3120_WATCHDOG;
-
- }
-
- return insn->n;
-
-}
-
-/*
- * To start and stop the timer
- *
- * data[0] = 1 (start)
- * = 0 (stop)
- * = 2 (write new value)
- * data[1] = new value
- *
- * devpriv->b_Timer2Mode = 0 DISABLE
- * = 1 Timer
- * = 2 Watch dog
- */
-static int apci3120_write_insn_timer(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- const struct addi_board *this_board = dev->board_ptr;
- struct addi_private *devpriv = dev->private;
- unsigned int ui_Timervalue2 = 0;
- unsigned short us_TmpValue;
- unsigned char b_Tmp;
-
- if ((devpriv->b_Timer2Mode != APCI3120_WATCHDOG)
- && (devpriv->b_Timer2Mode != APCI3120_TIMER)) {
- dev_err(dev->class_dev, "timer2 not configured\n");
- return -EINVAL;
- }
-
- if (data[0] == 2) { /* write new value */
- if (devpriv->b_Timer2Mode != APCI3120_TIMER) {
- dev_err(dev->class_dev,
- "timer2 not configured in TIMER MODE\n");
- return -EINVAL;
- }
-
- if (data[1])
- ui_Timervalue2 = data[1];
- else
- ui_Timervalue2 = 0;
- }
-
- switch (data[0]) {
- case APCI3120_START:
-
- /* Reset FC_TIMER BIT */
- inb(devpriv->iobase + APCI3120_TIMER_STATUS_REGISTER);
- if (devpriv->b_Timer2Mode == APCI3120_TIMER) { /* start timer */
- /* Enable Timer */
- devpriv->b_ModeSelectRegister =
- devpriv->b_ModeSelectRegister & 0x0B;
- } else { /* start watch dog */
- /* Enable WatchDog */
- devpriv->b_ModeSelectRegister =
- (devpriv->
- b_ModeSelectRegister & 0x0B) |
- APCI3120_ENABLE_WATCHDOG;
- }
-
- /* enable disable interrupt */
- if ((devpriv->b_Timer2Interrupt) == APCI3120_ENABLE) {
-
- devpriv->b_ModeSelectRegister =
- devpriv->
- b_ModeSelectRegister |
- APCI3120_ENABLE_TIMER_INT;
- /* save the task structure to pass info to user */
- devpriv->tsk_Current = current;
- } else {
-
- devpriv->b_ModeSelectRegister =
- devpriv->
- b_ModeSelectRegister &
- APCI3120_DISABLE_TIMER_INT;
- }
- outb(devpriv->b_ModeSelectRegister,
- devpriv->iobase + APCI3120_WRITE_MODE_SELECT);
-
- if (devpriv->b_Timer2Mode == APCI3120_TIMER) { /* start timer */
- /* For Timer mode is Gate2 must be activated timer started */
- devpriv->us_OutputRegister =
- devpriv->
- us_OutputRegister | APCI3120_ENABLE_TIMER2;
- outw(devpriv->us_OutputRegister,
- devpriv->iobase + APCI3120_WR_ADDRESS);
- }
-
- break;
-
- case APCI3120_STOP:
- if (devpriv->b_Timer2Mode == APCI3120_TIMER) {
- /* Disable timer */
- devpriv->b_ModeSelectRegister =
- devpriv->
- b_ModeSelectRegister &
- APCI3120_DISABLE_TIMER_COUNTER;
- } else {
- /* Disable WatchDog */
- devpriv->b_ModeSelectRegister =
- devpriv->
- b_ModeSelectRegister &
- APCI3120_DISABLE_WATCHDOG;
- }
- /* Disable timer interrupt */
- devpriv->b_ModeSelectRegister =
- devpriv->
- b_ModeSelectRegister & APCI3120_DISABLE_TIMER_INT;
-
- /* Write above states to register */
- outb(devpriv->b_ModeSelectRegister,
- devpriv->iobase + APCI3120_WRITE_MODE_SELECT);
-
- /* Reset Gate 2 */
- devpriv->us_OutputRegister =
- devpriv->us_OutputRegister & APCI3120_DISABLE_TIMER_INT;
- outw(devpriv->us_OutputRegister,
- devpriv->iobase + APCI3120_WR_ADDRESS);
-
- /* Reset FC_TIMER BIT */
- inb(devpriv->iobase + APCI3120_TIMER_STATUS_REGISTER);
-
- break;
-
- case 2: /* write new value to Timer */
- if (devpriv->b_Timer2Mode != APCI3120_TIMER) {
- dev_err(dev->class_dev,
- "timer2 not configured in TIMER MODE\n");
- return -EINVAL;
- }
- us_TmpValue =
- (unsigned short) inw(devpriv->iobase + APCI3120_RD_STATUS);
-
- /*
- * EL250804: Testing if board APCI3120 have the new Quartz or if it
- * is an APCI3001 and calculate the time value to set in the timer
- */
- if ((us_TmpValue & 0x00B0) == 0x00B0
- || !strcmp(this_board->pc_DriverName, "apci3001")) {
- /* Calculate the time value to set in the timer */
- ui_Timervalue2 = ui_Timervalue2 / 50;
- } else {
- /* Calculate the time value to set in the timer */
- ui_Timervalue2 = ui_Timervalue2 / 70;
- }
- /* Writing LOW unsigned short */
- b_Tmp = ((devpriv->
- b_DigitalOutputRegister) & 0xF0) |
- APCI3120_SELECT_TIMER_2_LOW_WORD;
- outb(b_Tmp, devpriv->iobase + APCI3120_TIMER_CRT0);
-
- outw(ui_Timervalue2 & 0xffff,
- devpriv->iobase + APCI3120_TIMER_VALUE);
-
- /* Writing HIGH unsigned short */
- b_Tmp = ((devpriv->
- b_DigitalOutputRegister) & 0xF0) |
- APCI3120_SELECT_TIMER_2_HIGH_WORD;
- outb(b_Tmp, devpriv->iobase + APCI3120_TIMER_CRT0);
-
- outw((ui_Timervalue2 >> 16) & 0xffff,
- devpriv->iobase + APCI3120_TIMER_VALUE);
-
- break;
- default:
- return -EINVAL; /* Not a valid input */
- }
-
- return insn->n;
-}
-
-/*
- * Read the Timer value
- *
- * for Timer: data[0]= Timer constant
- *
- * for watchdog: data[0] = 0 (still running)
- * = 1 (run down)
- */
-static int apci3120_read_insn_timer(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- unsigned char b_Tmp;
- unsigned short us_TmpValue, us_TmpValue_2, us_StatusValue;
-
- if ((devpriv->b_Timer2Mode != APCI3120_WATCHDOG)
- && (devpriv->b_Timer2Mode != APCI3120_TIMER)) {
- dev_err(dev->class_dev, "timer2 not configured\n");
- }
- if (devpriv->b_Timer2Mode == APCI3120_TIMER) {
-
- /* Read the LOW unsigned short of Timer 2 register */
- b_Tmp = ((devpriv->
- b_DigitalOutputRegister) & 0xF0) |
- APCI3120_SELECT_TIMER_2_LOW_WORD;
- outb(b_Tmp, devpriv->iobase + APCI3120_TIMER_CRT0);
-
- us_TmpValue = inw(devpriv->iobase + APCI3120_TIMER_VALUE);
-
- /* Read the HIGH unsigned short of Timer 2 register */
- b_Tmp = ((devpriv->
- b_DigitalOutputRegister) & 0xF0) |
- APCI3120_SELECT_TIMER_2_HIGH_WORD;
- outb(b_Tmp, devpriv->iobase + APCI3120_TIMER_CRT0);
-
- us_TmpValue_2 = inw(devpriv->iobase + APCI3120_TIMER_VALUE);
-
- /* combining both words */
- data[0] = (unsigned int) ((us_TmpValue) | ((us_TmpValue_2) << 16));
-
- } else { /* Read watch dog status */
-
- us_StatusValue = inw(devpriv->iobase + APCI3120_RD_STATUS);
- us_StatusValue =
- ((us_StatusValue & APCI3120_FC_TIMER) >> 12) & 1;
- if (us_StatusValue == 1) {
- /* RESET FC_TIMER BIT */
- inb(devpriv->iobase + APCI3120_TIMER_STATUS_REGISTER);
- }
- data[0] = us_StatusValue; /* when data[0] = 1 then the watch dog has rundown */
- }
- return insn->n;
-}
-
-static int apci3120_di_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- unsigned int val;
-
- /* the input channels are bits 11:8 of the status reg */
- val = inw(devpriv->iobase + APCI3120_RD_STATUS);
- data[1] = (val >> 8) & 0xf;
-
- return insn->n;
-}
-
-static int apci3120_do_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
-
- if (comedi_dio_update_state(s, data)) {
- /* The do channels are bits 7:4 of the do register */
- devpriv->b_DigitalOutputRegister = s->state << 4;
-
- outb(devpriv->b_DigitalOutputRegister,
- devpriv->iobase + APCI3120_DIGITAL_OUTPUT);
- }
-
- data[1] = s->state;
-
- return insn->n;
-}
-
-static int apci3120_ao_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- unsigned int ui_Range, ui_Channel;
- unsigned short us_TmpValue;
-
- ui_Range = CR_RANGE(insn->chanspec);
- ui_Channel = CR_CHAN(insn->chanspec);
-
- if (ui_Range) { /* if 1 then unipolar */
-
- if (data[0] != 0)
- data[0] =
- ((((ui_Channel & 0x03) << 14) & 0xC000) | (1 <<
- 13) | (data[0] + 8191));
- else
- data[0] =
- ((((ui_Channel & 0x03) << 14) & 0xC000) | (1 <<
- 13) | 8192);
-
- } else { /* if 0 then bipolar */
- data[0] =
- ((((ui_Channel & 0x03) << 14) & 0xC000) | (0 << 13) |
- data[0]);
-
- }
-
- do { /* Waiting of DA_READY BIT */
- us_TmpValue =
- ((unsigned short) inw(devpriv->iobase +
- APCI3120_RD_STATUS)) & 0x0001;
- } while (us_TmpValue != 0x0001);
-
- if (ui_Channel <= 3)
- /*
- * for channel 0-3 out at the register 1 (wrDac1-8) data[i]
- * typecasted to ushort since word write is to be done
- */
- outw((unsigned short) data[0],
- devpriv->iobase + APCI3120_ANALOG_OUTPUT_1);
- else
- /*
- * for channel 4-7 out at the register 2 (wrDac5-8) data[i]
- * typecasted to ushort since word write is to be done
- */
- outw((unsigned short) data[0],
- devpriv->iobase + APCI3120_ANALOG_OUTPUT_2);
-
- return insn->n;
-}
diff --git a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3200.c b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3200.c
deleted file mode 100644
index 5e321f91172..00000000000
--- a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3200.c
+++ /dev/null
@@ -1,3003 +0,0 @@
-/**
-@verbatim
-
-Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
-
- ADDI-DATA GmbH
- Dieselstrasse 3
- D-77833 Ottersweier
- Tel: +19(0)7223/9493-0
- Fax: +49(0)7223/9493-92
- http://www.addi-data.com
- info@addi-data.com
-
-This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version.
-
-This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
-
-@endverbatim
-*/
-/*
-
- +-----------------------------------------------------------------------+
- | (C) ADDI-DATA GmbH Dieselstraße 3 D-77833 Ottersweier |
- +-----------------------------------------------------------------------+
- | Tel : +49 (0) 7223/9493-0 | email : info@addi-data.com |
- | Fax : +49 (0) 7223/9493-92 | Internet : http://www.addi-data.com |
- +-------------------------------+---------------------------------------+
- | Project : APCI-3200 | Compiler : GCC |
- | Module name : hwdrv_apci3200.c| Version : 2.96 |
- +-------------------------------+---------------------------------------+
- | Project manager: Eric Stolz | Date : 02/12/2002 |
- +-------------------------------+---------------------------------------+
- | Description : Hardware Layer Access For APCI-3200 |
- +-----------------------------------------------------------------------+
- | UPDATES |
- +----------+-----------+------------------------------------------------+
- | Date | Author | Description of updates |
- +----------+-----------+------------------------------------------------+
- | 02.07.04 | J. Krauth | Modification from the driver in order to |
- | | | correct some errors when using several boards. |
- | | | |
- | | | |
- +----------+-----------+------------------------------------------------+
- | 26.10.04 | J. Krauth | - Update for COMEDI 0.7.68 |
- | | | - Read eeprom value |
- | | | - Append APCI-3300 |
- +----------+-----------+------------------------------------------------+
-*/
-
-/* Card Specific information */
-/* #define APCI3200_ADDRESS_RANGE 264 */
-
-/* Analog Input related Defines */
-#define APCI3200_AI_OFFSET_GAIN 0
-#define APCI3200_AI_SC_TEST 4
-#define APCI3200_AI_IRQ 8
-#define APCI3200_AI_AUTOCAL 12
-#define APCI3200_RELOAD_CONV_TIME_VAL 32
-#define APCI3200_CONV_TIME_TIME_BASE 36
-#define APCI3200_RELOAD_DELAY_TIME_VAL 40
-#define APCI3200_DELAY_TIME_TIME_BASE 44
-#define APCI3200_AI_MODULE1 0
-#define APCI3200_AI_MODULE2 64
-#define APCI3200_AI_MODULE3 128
-#define APCI3200_AI_MODULE4 192
-#define TRUE 1
-#define FALSE 0
-#define APCI3200_AI_EOSIRQ 16
-#define APCI3200_AI_EOS 20
-#define APCI3200_AI_CHAN_ID 24
-#define APCI3200_AI_CHAN_VAL 28
-#define ANALOG_INPUT 0
-#define TEMPERATURE 1
-#define RESISTANCE 2
-
-#define ENABLE_EXT_TRIG 1
-#define ENABLE_EXT_GATE 2
-#define ENABLE_EXT_TRIG_GATE 3
-
-#define APCI3200_MAXVOLT 2.5
-#define ADDIDATA_GREATER_THAN_TEST 0
-#define ADDIDATA_LESS_THAN_TEST 1
-
-#define ADDIDATA_UNIPOLAR 1
-#define ADDIDATA_BIPOLAR 2
-
-#define MAX_MODULE 4
-
-/* ANALOG INPUT RANGE */
-static const struct comedi_lrange range_apci3200_ai = {
- 8, {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(2),
- BIP_RANGE(1),
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2),
- UNI_RANGE(1)
- }
-};
-
-static const struct comedi_lrange range_apci3300_ai = {
- 4, {
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2),
- UNI_RANGE(1)
- }
-};
-
-int MODULE_NO;
-struct {
- int i_Gain;
- int i_Polarity;
- int i_OffsetRange;
- int i_Coupling;
- int i_SingleDiff;
- int i_AutoCalibration;
- unsigned int ui_ReloadValue;
- unsigned int ui_TimeUnitReloadVal;
- int i_Interrupt;
- int i_ModuleSelection;
-} Config_Parameters_Module1, Config_Parameters_Module2,
- Config_Parameters_Module3, Config_Parameters_Module4;
-
-
-struct str_ADDIDATA_RTDStruct {
- unsigned int ul_NumberOfValue;
- unsigned int *pul_ResistanceValue;
- unsigned int *pul_TemperatureValue;
-};
-
-struct str_Module {
- unsigned long ul_CurrentSourceCJC;
- unsigned long ul_CurrentSource[5];
- unsigned long ul_GainFactor[8]; /* Gain Factor */
- unsigned int w_GainValue[10];
-};
-
-struct str_BoardInfos {
-
- int i_CJCAvailable;
- int i_CJCPolarity;
- int i_CJCGain;
- int i_InterruptFlag;
- int i_ADDIDATAPolarity;
- int i_ADDIDATAGain;
- int i_AutoCalibration;
- int i_ADDIDATAConversionTime;
- int i_ADDIDATAConversionTimeUnit;
- int i_ADDIDATAType;
- int i_ChannelNo;
- int i_ChannelCount;
- int i_ScanType;
- int i_FirstChannel;
- int i_LastChannel;
- int i_Sum;
- int i_Offset;
- unsigned int ui_Channel_num;
- int i_Count;
- int i_Initialised;
- unsigned int ui_InterruptChannelValue[144]; /* Buffer */
- unsigned char b_StructInitialized;
- /* 7 is the maximal number of channels */
- unsigned int ui_ScanValueArray[7 + 12];
-
- int i_ConnectionType;
- int i_NbrOfModule;
- struct str_Module s_Module[MAX_MODULE];
-};
-
-/* BEGIN JK 06.07.04: Management of sevrals boards */
-/*
- int i_CJCAvailable=1;
- int i_CJCPolarity=0;
- int i_CJCGain=2;/* changed from 0 to 2 */
- int i_InterruptFlag=0;
- int i_ADDIDATAPolarity;
- int i_ADDIDATAGain;
- int i_AutoCalibration=0; /* : auto calibration */
- int i_ADDIDATAConversionTime;
- int i_ADDIDATAConversionTimeUnit;
- int i_ADDIDATAType;
- int i_ChannelNo;
- int i_ChannelCount=0;
- int i_ScanType;
- int i_FirstChannel;
- int i_LastChannel;
- int i_Sum=0;
- int i_Offset;
- unsigned int ui_Channel_num=0;
- static int i_Count=0;
- int i_Initialised=0;
- unsigned int ui_InterruptChannelValue[96]; /* Buffer */
-*/
-struct str_BoardInfos s_BoardInfos[100]; /* 100 will be the max number of boards to be used */
-/* END JK 06.07.04: Management of sevrals boards */
-
-#define AMCC_OP_REG_MCSR 0x3c
-#define EEPROM_BUSY 0x80000000
-#define NVCMD_LOAD_LOW (0x4 << 5) /* nvRam load low command */
-#define NVCMD_LOAD_HIGH (0x5 << 5) /* nvRam load high command */
-#define NVCMD_BEGIN_READ (0x7 << 5) /* nvRam begin read command */
-#define NVCMD_BEGIN_WRITE (0x6 << 5) /* EEPROM begin write command */
-
-static int i_AddiHeaderRW_ReadEeprom(int i_NbOfWordsToRead,
- unsigned int dw_PCIBoardEepromAddress,
- unsigned short w_EepromStartAddress,
- unsigned short *pw_DataRead)
-{
- unsigned int dw_eeprom_busy = 0;
- int i_Counter = 0;
- int i_WordCounter;
- int i;
- unsigned char pb_ReadByte[1];
- unsigned char b_ReadLowByte = 0;
- unsigned char b_ReadHighByte = 0;
- unsigned char b_SelectedAddressLow = 0;
- unsigned char b_SelectedAddressHigh = 0;
- unsigned short w_ReadWord = 0;
-
- for (i_WordCounter = 0; i_WordCounter < i_NbOfWordsToRead;
- i_WordCounter++) {
- do {
- dw_eeprom_busy =
- inl(dw_PCIBoardEepromAddress +
- AMCC_OP_REG_MCSR);
- dw_eeprom_busy = dw_eeprom_busy & EEPROM_BUSY;
- } while (dw_eeprom_busy == EEPROM_BUSY);
-
- for (i_Counter = 0; i_Counter < 2; i_Counter++) {
- b_SelectedAddressLow = (w_EepromStartAddress + i_Counter) % 256; /* Read the low 8 bit part */
- b_SelectedAddressHigh = (w_EepromStartAddress + i_Counter) / 256; /* Read the high 8 bit part */
-
- /* Select the load low address mode */
- outb(NVCMD_LOAD_LOW,
- dw_PCIBoardEepromAddress + AMCC_OP_REG_MCSR +
- 3);
-
- /* Wait on busy */
- do {
- dw_eeprom_busy =
- inl(dw_PCIBoardEepromAddress +
- AMCC_OP_REG_MCSR);
- dw_eeprom_busy = dw_eeprom_busy & EEPROM_BUSY;
- } while (dw_eeprom_busy == EEPROM_BUSY);
-
- /* Load the low address */
- outb(b_SelectedAddressLow,
- dw_PCIBoardEepromAddress + AMCC_OP_REG_MCSR +
- 2);
-
- /* Wait on busy */
- do {
- dw_eeprom_busy =
- inl(dw_PCIBoardEepromAddress +
- AMCC_OP_REG_MCSR);
- dw_eeprom_busy = dw_eeprom_busy & EEPROM_BUSY;
- } while (dw_eeprom_busy == EEPROM_BUSY);
-
- /* Select the load high address mode */
- outb(NVCMD_LOAD_HIGH,
- dw_PCIBoardEepromAddress + AMCC_OP_REG_MCSR +
- 3);
-
- /* Wait on busy */
- do {
- dw_eeprom_busy =
- inl(dw_PCIBoardEepromAddress +
- AMCC_OP_REG_MCSR);
- dw_eeprom_busy = dw_eeprom_busy & EEPROM_BUSY;
- } while (dw_eeprom_busy == EEPROM_BUSY);
-
- /* Load the high address */
- outb(b_SelectedAddressHigh,
- dw_PCIBoardEepromAddress + AMCC_OP_REG_MCSR +
- 2);
-
- /* Wait on busy */
- do {
- dw_eeprom_busy =
- inl(dw_PCIBoardEepromAddress +
- AMCC_OP_REG_MCSR);
- dw_eeprom_busy = dw_eeprom_busy & EEPROM_BUSY;
- } while (dw_eeprom_busy == EEPROM_BUSY);
-
- /* Select the READ mode */
- outb(NVCMD_BEGIN_READ,
- dw_PCIBoardEepromAddress + AMCC_OP_REG_MCSR +
- 3);
-
- /* Wait on busy */
- do {
- dw_eeprom_busy =
- inl(dw_PCIBoardEepromAddress +
- AMCC_OP_REG_MCSR);
- dw_eeprom_busy = dw_eeprom_busy & EEPROM_BUSY;
- } while (dw_eeprom_busy == EEPROM_BUSY);
-
- /* Read data into the EEPROM */
- *pb_ReadByte =
- inb(dw_PCIBoardEepromAddress +
- AMCC_OP_REG_MCSR + 2);
-
- /* Wait on busy */
- do {
- dw_eeprom_busy =
- inl(dw_PCIBoardEepromAddress +
- AMCC_OP_REG_MCSR);
- dw_eeprom_busy = dw_eeprom_busy & EEPROM_BUSY;
- } while (dw_eeprom_busy == EEPROM_BUSY);
-
- /* Select the upper address part */
- if (i_Counter == 0)
- b_ReadLowByte = pb_ReadByte[0];
- else
- b_ReadHighByte = pb_ReadByte[0];
-
-
- /* Sleep */
- msleep(1);
-
- }
- w_ReadWord =
- (b_ReadLowByte | (((unsigned short)b_ReadHighByte) *
- 256));
-
- pw_DataRead[i_WordCounter] = w_ReadWord;
-
- w_EepromStartAddress += 2; /* to read the next word */
-
- } /* for (...) i_NbOfWordsToRead */
- return 0;
-}
-
-static void v_GetAPCI3200EepromCalibrationValue(unsigned int dw_PCIBoardEepromAddress,
- struct str_BoardInfos *BoardInformations)
-{
- unsigned short w_AnalogInputMainHeaderAddress;
- unsigned short w_AnalogInputComponentAddress;
- unsigned short w_NumberOfModuls = 0;
- unsigned short w_CurrentSources[2];
- unsigned short w_ModulCounter = 0;
- unsigned short w_FirstHeaderSize = 0;
- unsigned short w_NumberOfInputs = 0;
- unsigned short w_CJCFlag = 0;
- unsigned short w_NumberOfGainValue = 0;
- unsigned short w_SingleHeaderAddress = 0;
- unsigned short w_SingleHeaderSize = 0;
- unsigned short w_Input = 0;
- unsigned short w_GainFactorAddress = 0;
- unsigned short w_GainFactorValue[2];
- unsigned short w_GainIndex = 0;
- unsigned short w_GainValue = 0;
-
- /*****************************************/
- /** Get the Analog input header address **/
- /*****************************************/
- i_AddiHeaderRW_ReadEeprom(1, /* i_NbOfWordsToRead */
- dw_PCIBoardEepromAddress, 0x116, /* w_EepromStartAddress: Analog input header address */
- &w_AnalogInputMainHeaderAddress);
-
- /*******************************************/
- /** Compute the real analog input address **/
- /*******************************************/
- w_AnalogInputMainHeaderAddress = w_AnalogInputMainHeaderAddress + 0x100;
-
- /******************************/
- /** Get the number of moduls **/
- /******************************/
- i_AddiHeaderRW_ReadEeprom(1, /* i_NbOfWordsToRead */
- dw_PCIBoardEepromAddress, w_AnalogInputMainHeaderAddress + 0x02, /* w_EepromStartAddress: Number of conponment */
- &w_NumberOfModuls);
-
- for (w_ModulCounter = 0; w_ModulCounter < w_NumberOfModuls;
- w_ModulCounter++) {
- /***********************************/
- /** Compute the component address **/
- /***********************************/
- w_AnalogInputComponentAddress =
- w_AnalogInputMainHeaderAddress +
- (w_FirstHeaderSize * w_ModulCounter) + 0x04;
-
- /****************************/
- /** Read first header size **/
- /****************************/
- i_AddiHeaderRW_ReadEeprom(1, /* i_NbOfWordsToRead */
- dw_PCIBoardEepromAddress, w_AnalogInputComponentAddress, /* Address of the first header */
- &w_FirstHeaderSize);
-
- w_FirstHeaderSize = w_FirstHeaderSize >> 4;
-
- /***************************/
- /** Read number of inputs **/
- /***************************/
- i_AddiHeaderRW_ReadEeprom(1, /* i_NbOfWordsToRead */
- dw_PCIBoardEepromAddress, w_AnalogInputComponentAddress + 0x06, /* Number of inputs for the first modul */
- &w_NumberOfInputs);
-
- w_NumberOfInputs = w_NumberOfInputs >> 4;
-
- /***********************/
- /** Read the CJC flag **/
- /***********************/
- i_AddiHeaderRW_ReadEeprom(1, /* i_NbOfWordsToRead */
- dw_PCIBoardEepromAddress, w_AnalogInputComponentAddress + 0x08, /* CJC flag */
- &w_CJCFlag);
-
- w_CJCFlag = (w_CJCFlag >> 3) & 0x1; /* Get only the CJC flag */
-
- /*******************************/
- /** Read number of gain value **/
- /*******************************/
- i_AddiHeaderRW_ReadEeprom(1, /* i_NbOfWordsToRead */
- dw_PCIBoardEepromAddress, w_AnalogInputComponentAddress + 0x44, /* Number of gain value */
- &w_NumberOfGainValue);
-
- w_NumberOfGainValue = w_NumberOfGainValue & 0xFF;
-
- /***********************************/
- /** Compute single header address **/
- /***********************************/
- w_SingleHeaderAddress =
- w_AnalogInputComponentAddress + 0x46 +
- (((w_NumberOfGainValue / 16) + 1) * 2) +
- (6 * w_NumberOfGainValue) +
- (4 * (((w_NumberOfGainValue / 16) + 1) * 2));
-
- /********************************************/
- /** Read current sources value for input 1 **/
- /********************************************/
- i_AddiHeaderRW_ReadEeprom(1, /* i_NbOfWordsToRead */
- dw_PCIBoardEepromAddress, w_SingleHeaderAddress, /* w_EepromStartAddress: Single header address */
- &w_SingleHeaderSize);
-
- w_SingleHeaderSize = w_SingleHeaderSize >> 4;
-
- /*************************************/
- /** Read gain factor for the module **/
- /*************************************/
- w_GainFactorAddress = w_AnalogInputComponentAddress;
-
- for (w_GainIndex = 0; w_GainIndex < w_NumberOfGainValue;
- w_GainIndex++) {
- /************************************/
- /** Read gain value for the module **/
- /************************************/
- i_AddiHeaderRW_ReadEeprom(1, /* i_NbOfWordsToRead */
- dw_PCIBoardEepromAddress, w_AnalogInputComponentAddress + 70 + (2 * (1 + (w_NumberOfGainValue / 16))) + (0x02 * w_GainIndex), /* Gain value */
- &w_GainValue);
-
- BoardInformations->s_Module[w_ModulCounter].
- w_GainValue[w_GainIndex] = w_GainValue;
-
- /*************************************/
- /** Read gain factor for the module **/
- /*************************************/
- i_AddiHeaderRW_ReadEeprom(2, /* i_NbOfWordsToRead */
- dw_PCIBoardEepromAddress, w_AnalogInputComponentAddress + 70 + ((2 * w_NumberOfGainValue) + (2 * (1 + (w_NumberOfGainValue / 16)))) + (0x04 * w_GainIndex), /* Gain factor */
- w_GainFactorValue);
-
- BoardInformations->s_Module[w_ModulCounter].
- ul_GainFactor[w_GainIndex] =
- (w_GainFactorValue[1] << 16) +
- w_GainFactorValue[0];
- }
-
- /***************************************************************/
- /** Read current source value for each channels of the module **/
- /***************************************************************/
- for (w_Input = 0; w_Input < w_NumberOfInputs; w_Input++) {
- /********************************************/
- /** Read current sources value for input 1 **/
- /********************************************/
- i_AddiHeaderRW_ReadEeprom(2, /* i_NbOfWordsToRead */
- dw_PCIBoardEepromAddress,
- (w_Input * w_SingleHeaderSize) +
- w_SingleHeaderAddress + 0x0C, w_CurrentSources);
-
- /************************************/
- /** Save the current sources value **/
- /************************************/
- BoardInformations->s_Module[w_ModulCounter].
- ul_CurrentSource[w_Input] =
- (w_CurrentSources[0] +
- ((w_CurrentSources[1] & 0xFFF) << 16));
- }
-
- /***************************************/
- /** Read the CJC current source value **/
- /***************************************/
- i_AddiHeaderRW_ReadEeprom(2, /* i_NbOfWordsToRead */
- dw_PCIBoardEepromAddress,
- (w_Input * w_SingleHeaderSize) + w_SingleHeaderAddress +
- 0x0C, w_CurrentSources);
-
- /************************************/
- /** Save the current sources value **/
- /************************************/
- BoardInformations->s_Module[w_ModulCounter].
- ul_CurrentSourceCJC =
- (w_CurrentSources[0] +
- ((w_CurrentSources[1] & 0xFFF) << 16));
- }
-}
-
-static int i_APCI3200_GetChannelCalibrationValue(struct comedi_device *dev,
- unsigned int ui_Channel_num,
- unsigned int *CJCCurrentSource,
- unsigned int *ChannelCurrentSource,
- unsigned int *ChannelGainFactor)
-{
- int i_DiffChannel = 0;
- int i_Module = 0;
-
- /* Test if single or differential mode */
- if (s_BoardInfos[dev->minor].i_ConnectionType == 1) {
- /* if diff */
-
- if (ui_Channel_num <= 1)
- i_DiffChannel = ui_Channel_num, i_Module = 0;
- else if ((ui_Channel_num >= 2) && (ui_Channel_num <= 3))
- i_DiffChannel = ui_Channel_num - 2, i_Module = 1;
- else if ((ui_Channel_num >= 4) && (ui_Channel_num <= 5))
- i_DiffChannel = ui_Channel_num - 4, i_Module = 2;
- else if ((ui_Channel_num >= 6) && (ui_Channel_num <= 7))
- i_DiffChannel = ui_Channel_num - 6, i_Module = 3;
-
- } else {
- /* if single */
- if ((ui_Channel_num == 0) || (ui_Channel_num == 1))
- i_DiffChannel = 0, i_Module = 0;
- else if ((ui_Channel_num == 2) || (ui_Channel_num == 3))
- i_DiffChannel = 1, i_Module = 0;
- else if ((ui_Channel_num == 4) || (ui_Channel_num == 5))
- i_DiffChannel = 0, i_Module = 1;
- else if ((ui_Channel_num == 6) || (ui_Channel_num == 7))
- i_DiffChannel = 1, i_Module = 1;
- else if ((ui_Channel_num == 8) || (ui_Channel_num == 9))
- i_DiffChannel = 0, i_Module = 2;
- else if ((ui_Channel_num == 10) || (ui_Channel_num == 11))
- i_DiffChannel = 1, i_Module = 2;
- else if ((ui_Channel_num == 12) || (ui_Channel_num == 13))
- i_DiffChannel = 0, i_Module = 3;
- else if ((ui_Channel_num == 14) || (ui_Channel_num == 15))
- i_DiffChannel = 1, i_Module = 3;
- }
-
- /* Test if thermocouple or RTD mode */
- *CJCCurrentSource =
- s_BoardInfos[dev->minor].s_Module[i_Module].ul_CurrentSourceCJC;
-
- *ChannelCurrentSource =
- s_BoardInfos[dev->minor].s_Module[i_Module].
- ul_CurrentSource[i_DiffChannel];
- /* } */
- /* } */
-
- /* Channle gain factor */
- *ChannelGainFactor =
- s_BoardInfos[dev->minor].s_Module[i_Module].
- ul_GainFactor[s_BoardInfos[dev->minor].i_ADDIDATAGain];
- /* End JK 21.10.2004: APCI-3200 / APCI-3300 Reading of EEPROM values */
-
- return 0;
-}
-
-static int apci3200_di_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
-
- data[1] = inl(devpriv->i_IobaseReserved) & 0xf;
-
- return insn->n;
-}
-
-static int apci3200_do_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
-
- s->state = inl(devpriv->i_IobaseAddon) & 0xf;
-
- if (comedi_dio_update_state(s, data))
- outl(s->state, devpriv->i_IobaseAddon);
-
- data[1] = s->state;
-
- return insn->n;
-}
-
-static int i_APCI3200_Read1AnalogInputChannel(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- unsigned int ui_EOC = 0;
- unsigned int ui_ChannelNo = 0;
- unsigned int ui_CommandRegister = 0;
-
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* ui_ChannelNo=i_ChannelNo; */
- ui_ChannelNo = s_BoardInfos[dev->minor].i_ChannelNo;
-
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 12) >> 19) & 1) != 1) ;
- /*********************************/
- /* Write the channel to configure */
- /*********************************/
- /* Begin JK 20.10.2004: Bad channel value is used when using differential mode */
- /* outl(0 | ui_Channel_num , devpriv->iobase+i_Offset + 0x4); */
- /* outl(0 | s_BoardInfos [dev->minor].ui_Channel_num , devpriv->iobase+s_BoardInfos [dev->minor].i_Offset + 0x4); */
- outl(0 | s_BoardInfos[dev->minor].i_ChannelNo,
- devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 0x4);
- /* End JK 20.10.2004: Bad channel value is used when using differential mode */
-
- /*******************************/
- /* Set the convert timing unit */
- /*******************************/
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 12) >> 19) & 1) != 1) ;
-
- /* outl(i_ADDIDATAConversionTimeUnit , devpriv->iobase+i_Offset + 36); */
- outl(s_BoardInfos[dev->minor].i_ADDIDATAConversionTimeUnit,
- devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 36);
-
- /**************************/
- /* Set the convert timing */
- /**************************/
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 12) >> 19) & 1) != 1) ;
-
- /* outl(i_ADDIDATAConversionTime , devpriv->iobase+i_Offset + 32); */
- outl(s_BoardInfos[dev->minor].i_ADDIDATAConversionTime,
- devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 32);
-
- /**************************************************************************/
- /* Set the start end stop index to the selected channel and set the start */
- /**************************************************************************/
-
- ui_CommandRegister = ui_ChannelNo | (ui_ChannelNo << 8) | 0x80000;
-
- /*Test if the interrupt is enable */
- if (s_BoardInfos[dev->minor].i_InterruptFlag == 1) {
- /* Enable the interrupt */
- ui_CommandRegister = ui_CommandRegister | 0x00100000;
- }
-
- /******************************/
- /* Write the command register */
- /******************************/
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 12) >> 19) & 1) != 1) ;
-
- /* outl(ui_CommandRegister, devpriv->iobase+i_Offset + 8); */
- outl(ui_CommandRegister,
- devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 8);
-
- /*Test if interrupt is enable */
- if (s_BoardInfos[dev->minor].i_InterruptFlag == 0) {
- do {
- /*************************/
- /*Read the EOC Status bit */
- /*************************/
-
- /* ui_EOC = inl(devpriv->iobase+i_Offset + 20) & 1; */
- ui_EOC = inl(devpriv->iobase +
- s_BoardInfos[dev->minor].i_Offset + 20) & 1;
-
- } while (ui_EOC != 1);
-
- /***************************************/
- /* Read the digital value of the input */
- /***************************************/
-
- /* data[0] = inl (devpriv->iobase+i_Offset + 28); */
- data[0] =
- inl(devpriv->iobase +
- s_BoardInfos[dev->minor].i_Offset + 28);
- /* END JK 06.07.04: Management of sevrals boards */
-
- }
- return 0;
-}
-
-static int i_APCI3200_ReadCalibrationOffsetValue(struct comedi_device *dev,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- unsigned int ui_Temp = 0, ui_EOC = 0;
- unsigned int ui_CommandRegister = 0;
-
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 12) >> 19) & 1) != 1) ;
- /*********************************/
- /* Write the channel to configure */
- /*********************************/
- /* Begin JK 20.10.2004: This seems not necessary ! */
- /* outl(0 | ui_Channel_num , devpriv->iobase+i_Offset + 0x4); */
- /* outl(0 | s_BoardInfos [dev->minor].ui_Channel_num , devpriv->iobase+s_BoardInfos [dev->minor].i_Offset + 0x4); */
- /* End JK 20.10.2004: This seems not necessary ! */
-
- /*******************************/
- /* Set the convert timing unit */
- /*******************************/
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 12) >> 19) & 1) != 1) ;
- /* outl(i_ADDIDATAConversionTimeUnit , devpriv->iobase+i_Offset + 36); */
- outl(s_BoardInfos[dev->minor].i_ADDIDATAConversionTimeUnit,
- devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 36);
- /**************************/
- /* Set the convert timing */
- /**************************/
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 12) >> 19) & 1) != 1) ;
- /* outl(i_ADDIDATAConversionTime , devpriv->iobase+i_Offset + 32); */
- outl(s_BoardInfos[dev->minor].i_ADDIDATAConversionTime,
- devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 32);
- /*****************************/
- /*Read the calibration offset */
- /*****************************/
- /* ui_Temp = inl(devpriv->iobase+i_Offset + 12); */
- ui_Temp = inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 12);
-
- /*********************************/
- /*Configure the Offset Conversion */
- /*********************************/
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 12) >> 19) & 1) != 1) ;
- /* outl((ui_Temp | 0x00020000), devpriv->iobase+i_Offset + 12); */
- outl((ui_Temp | 0x00020000),
- devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 12);
- /*******************************/
- /*Initialise ui_CommandRegister */
- /*******************************/
-
- ui_CommandRegister = 0;
-
- /*Test if the interrupt is enable */
- if (s_BoardInfos[dev->minor].i_InterruptFlag == 1) {
- /*Enable the interrupt */
- ui_CommandRegister = ui_CommandRegister | 0x00100000;
- }
-
- /**********************/
- /*Start the conversion */
- /**********************/
- ui_CommandRegister = ui_CommandRegister | 0x00080000;
-
- /***************************/
- /*Write the command regiter */
- /***************************/
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 12) >> 19) & 1) != 1) ;
- /* outl(ui_CommandRegister, devpriv->iobase+i_Offset + 8); */
- outl(ui_CommandRegister,
- devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 8);
-
- /*Test if interrupt is enable */
- if (s_BoardInfos[dev->minor].i_InterruptFlag == 0) {
- do {
- /*******************/
- /*Read the EOC flag */
- /*******************/
-
- /* ui_EOC = inl (devpriv->iobase+i_Offset + 20) & 1; */
- ui_EOC = inl(devpriv->iobase +
- s_BoardInfos[dev->minor].i_Offset + 20) & 1;
-
- } while (ui_EOC != 1);
-
- /**************************************************/
- /*Read the digital value of the calibration Offset */
- /**************************************************/
-
- /* data[0] = inl(devpriv->iobase+i_Offset+ 28); */
- data[0] =
- inl(devpriv->iobase +
- s_BoardInfos[dev->minor].i_Offset + 28);
- }
- return 0;
-}
-
-static int i_APCI3200_ReadCalibrationGainValue(struct comedi_device *dev,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- unsigned int ui_EOC = 0;
- int ui_CommandRegister = 0;
-
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 12) >> 19) & 1) != 1) ;
- /*********************************/
- /* Write the channel to configure */
- /*********************************/
- /* Begin JK 20.10.2004: This seems not necessary ! */
- /* outl(0 | ui_Channel_num , devpriv->iobase+i_Offset + 0x4); */
- /* outl(0 | s_BoardInfos [dev->minor].ui_Channel_num , devpriv->iobase+s_BoardInfos [dev->minor].i_Offset + 0x4); */
- /* End JK 20.10.2004: This seems not necessary ! */
-
- /***************************/
- /*Read the calibration gain */
- /***************************/
- /*******************************/
- /* Set the convert timing unit */
- /*******************************/
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 12) >> 19) & 1) != 1) ;
- /* outl(i_ADDIDATAConversionTimeUnit , devpriv->iobase+i_Offset + 36); */
- outl(s_BoardInfos[dev->minor].i_ADDIDATAConversionTimeUnit,
- devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 36);
- /**************************/
- /* Set the convert timing */
- /**************************/
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 12) >> 19) & 1) != 1) ;
- /* outl(i_ADDIDATAConversionTime , devpriv->iobase+i_Offset + 32); */
- outl(s_BoardInfos[dev->minor].i_ADDIDATAConversionTime,
- devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 32);
- /*******************************/
- /*Configure the Gain Conversion */
- /*******************************/
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 12) >> 19) & 1) != 1) ;
- /* outl(0x00040000 , devpriv->iobase+i_Offset + 12); */
- outl(0x00040000,
- devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 12);
-
- /*******************************/
- /*Initialise ui_CommandRegister */
- /*******************************/
-
- ui_CommandRegister = 0;
-
- /*Test if the interrupt is enable */
- if (s_BoardInfos[dev->minor].i_InterruptFlag == 1) {
- /*Enable the interrupt */
- ui_CommandRegister = ui_CommandRegister | 0x00100000;
- }
-
- /**********************/
- /*Start the conversion */
- /**********************/
-
- ui_CommandRegister = ui_CommandRegister | 0x00080000;
- /***************************/
- /*Write the command regiter */
- /***************************/
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 12) >> 19) & 1) != 1) ;
- /* outl(ui_CommandRegister , devpriv->iobase+i_Offset + 8); */
- outl(ui_CommandRegister,
- devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 8);
-
- /*Test if interrupt is enable */
- if (s_BoardInfos[dev->minor].i_InterruptFlag == 0) {
- do {
-
- /*******************/
- /*Read the EOC flag */
- /*******************/
-
- /* ui_EOC = inl(devpriv->iobase+i_Offset + 20) & 1; */
- ui_EOC = inl(devpriv->iobase +
- s_BoardInfos[dev->minor].i_Offset + 20) & 1;
-
- } while (ui_EOC != 1);
-
- /************************************************/
- /*Read the digital value of the calibration Gain */
- /************************************************/
-
- /* data[0] = inl(devpriv->iobase+i_Offset + 28); */
- data[0] =
- inl(devpriv->iobase +
- s_BoardInfos[dev->minor].i_Offset + 28);
-
- }
- return 0;
-}
-
-static int i_APCI3200_ReadCJCValue(struct comedi_device *dev,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- unsigned int ui_EOC = 0;
- int ui_CommandRegister = 0;
-
- /******************************/
- /*Set the converting time unit */
- /******************************/
-
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 12) >> 19) & 1) != 1) ;
-
- /* outl(i_ADDIDATAConversionTimeUnit , devpriv->iobase+i_Offset + 36); */
- outl(s_BoardInfos[dev->minor].i_ADDIDATAConversionTimeUnit,
- devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 36);
- /**************************/
- /* Set the convert timing */
- /**************************/
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 12) >> 19) & 1) != 1) ;
-
- /* outl(i_ADDIDATAConversionTime , devpriv->iobase+i_Offset + 32); */
- outl(s_BoardInfos[dev->minor].i_ADDIDATAConversionTime,
- devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 32);
-
- /******************************/
- /*Configure the CJC Conversion */
- /******************************/
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 12) >> 19) & 1) != 1) ;
-
- /* outl( 0x00000400 , devpriv->iobase+i_Offset + 4); */
- outl(0x00000400,
- devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 4);
- /*******************************/
- /*Initialise dw_CommandRegister */
- /*******************************/
- ui_CommandRegister = 0;
- /*Test if the interrupt is enable */
- if (s_BoardInfos[dev->minor].i_InterruptFlag == 1) {
- /*Enable the interrupt */
- ui_CommandRegister = ui_CommandRegister | 0x00100000;
- }
-
- /**********************/
- /*Start the conversion */
- /**********************/
-
- ui_CommandRegister = ui_CommandRegister | 0x00080000;
-
- /***************************/
- /*Write the command regiter */
- /***************************/
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 12) >> 19) & 1) != 1) ;
- /* outl(ui_CommandRegister , devpriv->iobase+i_Offset + 8); */
- outl(ui_CommandRegister,
- devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 8);
-
- /*Test if interrupt is enable */
- if (s_BoardInfos[dev->minor].i_InterruptFlag == 0) {
- do {
-
- /*******************/
- /*Read the EOC flag */
- /*******************/
-
- /* ui_EOC = inl(devpriv->iobase+i_Offset + 20) & 1; */
- ui_EOC = inl(devpriv->iobase +
- s_BoardInfos[dev->minor].i_Offset + 20) & 1;
-
- } while (ui_EOC != 1);
-
- /***********************************/
- /*Read the digital value of the CJC */
- /***********************************/
-
- /* data[0] = inl(devpriv->iobase+i_Offset + 28); */
- data[0] =
- inl(devpriv->iobase +
- s_BoardInfos[dev->minor].i_Offset + 28);
- }
- return 0;
-}
-
-static int i_APCI3200_ReadCJCCalOffset(struct comedi_device *dev,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- unsigned int ui_EOC = 0;
- int ui_CommandRegister = 0;
-
- /*******************************************/
- /*Read calibration offset value for the CJC */
- /*******************************************/
- /*******************************/
- /* Set the convert timing unit */
- /*******************************/
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 12) >> 19) & 1) != 1) ;
- /* outl(i_ADDIDATAConversionTimeUnit , devpriv->iobase+i_Offset + 36); */
- outl(s_BoardInfos[dev->minor].i_ADDIDATAConversionTimeUnit,
- devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 36);
- /**************************/
- /* Set the convert timing */
- /**************************/
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 12) >> 19) & 1) != 1) ;
- /* outl(i_ADDIDATAConversionTime , devpriv->iobase+i_Offset + 32); */
- outl(s_BoardInfos[dev->minor].i_ADDIDATAConversionTime,
- devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 32);
- /******************************/
- /*Configure the CJC Conversion */
- /******************************/
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 12) >> 19) & 1) != 1) ;
- /* outl(0x00000400 , devpriv->iobase+i_Offset + 4); */
- outl(0x00000400,
- devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 4);
- /*********************************/
- /*Configure the Offset Conversion */
- /*********************************/
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 12) >> 19) & 1) != 1) ;
- /* outl(0x00020000, devpriv->iobase+i_Offset + 12); */
- outl(0x00020000,
- devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 12);
- /*******************************/
- /*Initialise ui_CommandRegister */
- /*******************************/
- ui_CommandRegister = 0;
- /*Test if the interrupt is enable */
- if (s_BoardInfos[dev->minor].i_InterruptFlag == 1) {
- /*Enable the interrupt */
- ui_CommandRegister = ui_CommandRegister | 0x00100000;
- }
-
- /**********************/
- /*Start the conversion */
- /**********************/
- ui_CommandRegister = ui_CommandRegister | 0x00080000;
- /***************************/
- /*Write the command regiter */
- /***************************/
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 12) >> 19) & 1) != 1) ;
- /* outl(ui_CommandRegister,devpriv->iobase+i_Offset + 8); */
- outl(ui_CommandRegister,
- devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 8);
- if (s_BoardInfos[dev->minor].i_InterruptFlag == 0) {
- do {
- /*******************/
- /*Read the EOC flag */
- /*******************/
- /* ui_EOC = inl(devpriv->iobase+i_Offset + 20) & 1; */
- ui_EOC = inl(devpriv->iobase +
- s_BoardInfos[dev->minor].i_Offset + 20) & 1;
- } while (ui_EOC != 1);
-
- /**************************************************/
- /*Read the digital value of the calibration Offset */
- /**************************************************/
- /* data[0] = inl(devpriv->iobase+i_Offset + 28); */
- data[0] =
- inl(devpriv->iobase +
- s_BoardInfos[dev->minor].i_Offset + 28);
- }
- return 0;
-}
-
-static int i_APCI3200_ReadCJCCalGain(struct comedi_device *dev,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- unsigned int ui_EOC = 0;
- int ui_CommandRegister = 0;
-
- /*******************************/
- /* Set the convert timing unit */
- /*******************************/
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 12) >> 19) & 1) != 1) ;
- /* outl(i_ADDIDATAConversionTimeUnit , devpriv->iobase+i_Offset + 36); */
- outl(s_BoardInfos[dev->minor].i_ADDIDATAConversionTimeUnit,
- devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 36);
- /**************************/
- /* Set the convert timing */
- /**************************/
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 12) >> 19) & 1) != 1) ;
- /* outl(i_ADDIDATAConversionTime , devpriv->iobase+i_Offset + 32); */
- outl(s_BoardInfos[dev->minor].i_ADDIDATAConversionTime,
- devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 32);
- /******************************/
- /*Configure the CJC Conversion */
- /******************************/
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 12) >> 19) & 1) != 1) ;
- /* outl(0x00000400,devpriv->iobase+i_Offset + 4); */
- outl(0x00000400,
- devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 4);
- /*******************************/
- /*Configure the Gain Conversion */
- /*******************************/
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 12) >> 19) & 1) != 1) ;
- /* outl(0x00040000,devpriv->iobase+i_Offset + 12); */
- outl(0x00040000,
- devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 12);
-
- /*******************************/
- /*Initialise dw_CommandRegister */
- /*******************************/
- ui_CommandRegister = 0;
- /*Test if the interrupt is enable */
- if (s_BoardInfos[dev->minor].i_InterruptFlag == 1) {
- /*Enable the interrupt */
- ui_CommandRegister = ui_CommandRegister | 0x00100000;
- }
- /**********************/
- /*Start the conversion */
- /**********************/
- ui_CommandRegister = ui_CommandRegister | 0x00080000;
- /***************************/
- /*Write the command regiter */
- /***************************/
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 12) >> 19) & 1) != 1) ;
- /* outl(ui_CommandRegister ,devpriv->iobase+i_Offset + 8); */
- outl(ui_CommandRegister,
- devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 8);
- if (s_BoardInfos[dev->minor].i_InterruptFlag == 0) {
- do {
- /*******************/
- /*Read the EOC flag */
- /*******************/
- /* ui_EOC = inl(devpriv->iobase+i_Offset + 20) & 1; */
- ui_EOC = inl(devpriv->iobase +
- s_BoardInfos[dev->minor].i_Offset + 20) & 1;
- } while (ui_EOC != 1);
- /************************************************/
- /*Read the digital value of the calibration Gain */
- /************************************************/
- /* data[0] = inl (devpriv->iobase+i_Offset + 28); */
- data[0] =
- inl(devpriv->iobase +
- s_BoardInfos[dev->minor].i_Offset + 28);
- }
- return 0;
-}
-
-static int apci3200_reset(struct comedi_device *dev)
-{
- struct addi_private *devpriv = dev->private;
- int i_Temp;
- unsigned int dw_Dummy;
-
- /* i_InterruptFlag=0; */
- /* i_Initialised==0; */
- /* i_Count=0; */
- /* i_Sum=0; */
-
- s_BoardInfos[dev->minor].i_InterruptFlag = 0;
- s_BoardInfos[dev->minor].i_Initialised = 0;
- s_BoardInfos[dev->minor].i_Count = 0;
- s_BoardInfos[dev->minor].i_Sum = 0;
- s_BoardInfos[dev->minor].b_StructInitialized = 0;
-
- outl(0x83838383, devpriv->i_IobaseAmcc + 0x60);
-
- /* Enable the interrupt for the controller */
- dw_Dummy = inl(devpriv->i_IobaseAmcc + 0x38);
- outl(dw_Dummy | 0x2000, devpriv->i_IobaseAmcc + 0x38);
- outl(0, devpriv->i_IobaseAddon); /* Resets the output */
- /***************/
- /*Empty the buffer */
- /**************/
- for (i_Temp = 0; i_Temp <= 95; i_Temp++) {
- /* ui_InterruptChannelValue[i_Temp]=0; */
- s_BoardInfos[dev->minor].ui_InterruptChannelValue[i_Temp] = 0;
- } /* for(i_Temp=0;i_Temp<=95;i_Temp++) */
- /*****************************/
- /*Reset the START and IRQ bit */
- /*****************************/
- for (i_Temp = 0; i_Temp <= 192;) {
- while (((inl(devpriv->iobase + i_Temp + 12) >> 19) & 1) != 1) ;
- outl(0, devpriv->iobase + i_Temp + 8);
- i_Temp = i_Temp + 64;
- } /* for(i_Temp=0;i_Temp<=192;i_Temp+64) */
- return 0;
-}
-
-/*
- * Read value of the selected channel
- *
- * data[0] : Digital Value Of Input
- * data[1] : Calibration Offset Value
- * data[2] : Calibration Gain Value
- * data[3] : CJC value
- * data[4] : CJC offset value
- * data[5] : CJC gain value
- * data[6] : CJC current source from eeprom
- * data[7] : Channel current source from eeprom
- * data[8] : Channle gain factor from eeprom
- */
-static int apci3200_ai_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int ui_DummyValue = 0;
- int i_ConvertCJCCalibration;
- int i = 0;
-
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* if(i_Initialised==0) */
- if (s_BoardInfos[dev->minor].i_Initialised == 0)
- /* END JK 06.07.04: Management of sevrals boards */
- {
- apci3200_reset(dev);
- return -EINVAL;
- } /* if(i_Initialised==0); */
-
- switch (insn->unused[0]) {
- case 0:
-
- i_APCI3200_Read1AnalogInputChannel(dev, s, insn,
- &ui_DummyValue);
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* ui_InterruptChannelValue[i_Count+0]=ui_DummyValue; */
- s_BoardInfos[dev->minor].
- ui_InterruptChannelValue[s_BoardInfos[dev->minor].
- i_Count + 0] = ui_DummyValue;
- /* END JK 06.07.04: Management of sevrals boards */
-
- /* Begin JK 25.10.2004: APCI-3200 / APCI-3300 Reading of EEPROM values */
- i_APCI3200_GetChannelCalibrationValue(dev,
- s_BoardInfos[dev->minor].ui_Channel_num,
- &s_BoardInfos[dev->minor].
- ui_InterruptChannelValue[s_BoardInfos[dev->minor].
- i_Count + 6],
- &s_BoardInfos[dev->minor].
- ui_InterruptChannelValue[s_BoardInfos[dev->minor].
- i_Count + 7],
- &s_BoardInfos[dev->minor].
- ui_InterruptChannelValue[s_BoardInfos[dev->minor].
- i_Count + 8]);
- /* End JK 25.10.2004: APCI-3200 / APCI-3300 Reading of EEPROM values */
-
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* if((i_ADDIDATAType==2) && (i_InterruptFlag == FALSE) && (i_CJCAvailable==1)) */
- if ((s_BoardInfos[dev->minor].i_ADDIDATAType == 2)
- && (s_BoardInfos[dev->minor].i_InterruptFlag == FALSE)
- && (s_BoardInfos[dev->minor].i_CJCAvailable == 1))
- /* END JK 06.07.04: Management of sevrals boards */
- {
- i_APCI3200_ReadCJCValue(dev, &ui_DummyValue);
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* ui_InterruptChannelValue[i_Count + 3]=ui_DummyValue; */
- s_BoardInfos[dev->minor].
- ui_InterruptChannelValue[s_BoardInfos[dev->
- minor].i_Count + 3] = ui_DummyValue;
- /* END JK 06.07.04: Management of sevrals boards */
- } /* if((i_ADDIDATAType==2) && (i_InterruptFlag == FALSE)) */
- else {
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* ui_InterruptChannelValue[i_Count + 3]=0; */
- s_BoardInfos[dev->minor].
- ui_InterruptChannelValue[s_BoardInfos[dev->
- minor].i_Count + 3] = 0;
- /* END JK 06.07.04: Management of sevrals boards */
- } /* elseif((i_ADDIDATAType==2) && (i_InterruptFlag == FALSE) && (i_CJCAvailable==1)) */
-
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* if (( i_AutoCalibration == FALSE) && (i_InterruptFlag == FALSE)) */
- if ((s_BoardInfos[dev->minor].i_AutoCalibration == FALSE)
- && (s_BoardInfos[dev->minor].i_InterruptFlag == FALSE))
- /* END JK 06.07.04: Management of sevrals boards */
- {
- i_APCI3200_ReadCalibrationOffsetValue(dev,
- &ui_DummyValue);
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* ui_InterruptChannelValue[i_Count + 1]=ui_DummyValue; */
- s_BoardInfos[dev->minor].
- ui_InterruptChannelValue[s_BoardInfos[dev->
- minor].i_Count + 1] = ui_DummyValue;
- /* END JK 06.07.04: Management of sevrals boards */
- i_APCI3200_ReadCalibrationGainValue(dev,
- &ui_DummyValue);
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* ui_InterruptChannelValue[i_Count + 2]=ui_DummyValue; */
- s_BoardInfos[dev->minor].
- ui_InterruptChannelValue[s_BoardInfos[dev->
- minor].i_Count + 2] = ui_DummyValue;
- /* END JK 06.07.04: Management of sevrals boards */
- } /* if (( i_AutoCalibration == FALSE) && (i_InterruptFlag == FALSE)) */
-
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* if((i_ADDIDATAType==2) && (i_InterruptFlag == FALSE)&& (i_CJCAvailable==1)) */
- if ((s_BoardInfos[dev->minor].i_ADDIDATAType == 2)
- && (s_BoardInfos[dev->minor].i_InterruptFlag == FALSE)
- && (s_BoardInfos[dev->minor].i_CJCAvailable == 1))
- /* END JK 06.07.04: Management of sevrals boards */
- {
- /**********************************************************/
- /*Test if the Calibration channel must be read for the CJC */
- /**********************************************************/
- /**********************************/
- /*Test if the polarity is the same */
- /**********************************/
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* if(i_CJCPolarity!=i_ADDIDATAPolarity) */
- if (s_BoardInfos[dev->minor].i_CJCPolarity !=
- s_BoardInfos[dev->minor].i_ADDIDATAPolarity)
- /* END JK 06.07.04: Management of sevrals boards */
- {
- i_ConvertCJCCalibration = 1;
- } /* if(i_CJCPolarity!=i_ADDIDATAPolarity) */
- else {
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* if(i_CJCGain==i_ADDIDATAGain) */
- if (s_BoardInfos[dev->minor].i_CJCGain ==
- s_BoardInfos[dev->minor].i_ADDIDATAGain)
- /* END JK 06.07.04: Management of sevrals boards */
- {
- i_ConvertCJCCalibration = 0;
- } /* if(i_CJCGain==i_ADDIDATAGain) */
- else {
- i_ConvertCJCCalibration = 1;
- } /* elseif(i_CJCGain==i_ADDIDATAGain) */
- } /* elseif(i_CJCPolarity!=i_ADDIDATAPolarity) */
- if (i_ConvertCJCCalibration == 1) {
- i_APCI3200_ReadCJCCalOffset(dev,
- &ui_DummyValue);
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* ui_InterruptChannelValue[i_Count+4]=ui_DummyValue; */
- s_BoardInfos[dev->minor].
- ui_InterruptChannelValue[s_BoardInfos
- [dev->minor].i_Count + 4] =
- ui_DummyValue;
- /* END JK 06.07.04: Management of sevrals boards */
-
- i_APCI3200_ReadCJCCalGain(dev, &ui_DummyValue);
-
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* ui_InterruptChannelValue[i_Count+5]=ui_DummyValue; */
- s_BoardInfos[dev->minor].
- ui_InterruptChannelValue[s_BoardInfos
- [dev->minor].i_Count + 5] =
- ui_DummyValue;
- /* END JK 06.07.04: Management of sevrals boards */
- } /* if(i_ConvertCJCCalibration==1) */
- else {
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* ui_InterruptChannelValue[i_Count+4]=0; */
- /* ui_InterruptChannelValue[i_Count+5]=0; */
-
- s_BoardInfos[dev->minor].
- ui_InterruptChannelValue[s_BoardInfos
- [dev->minor].i_Count + 4] = 0;
- s_BoardInfos[dev->minor].
- ui_InterruptChannelValue[s_BoardInfos
- [dev->minor].i_Count + 5] = 0;
- /* END JK 06.07.04: Management of sevrals boards */
- } /* elseif(i_ConvertCJCCalibration==1) */
- } /* if((i_ADDIDATAType==2) && (i_InterruptFlag == FALSE)) */
-
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* if(i_ScanType!=1) */
- if (s_BoardInfos[dev->minor].i_ScanType != 1) {
- /* i_Count=0; */
- s_BoardInfos[dev->minor].i_Count = 0;
- } /* if(i_ScanType!=1) */
- else {
- /* i_Count=i_Count +6; */
- /* Begin JK 22.10.2004: APCI-3200 / APCI-3300 Reading of EEPROM values */
- /* s_BoardInfos [dev->minor].i_Count=s_BoardInfos [dev->minor].i_Count +6; */
- s_BoardInfos[dev->minor].i_Count =
- s_BoardInfos[dev->minor].i_Count + 9;
- /* End JK 22.10.2004: APCI-3200 / APCI-3300 Reading of EEPROM values */
- } /* else if(i_ScanType!=1) */
-
- /* if((i_ScanType==1) &&(i_InterruptFlag==1)) */
- if ((s_BoardInfos[dev->minor].i_ScanType == 1)
- && (s_BoardInfos[dev->minor].i_InterruptFlag == 1)) {
- /* i_Count=i_Count-6; */
- /* Begin JK 22.10.2004: APCI-3200 / APCI-3300 Reading of EEPROM values */
- /* s_BoardInfos [dev->minor].i_Count=s_BoardInfos [dev->minor].i_Count-6; */
- s_BoardInfos[dev->minor].i_Count =
- s_BoardInfos[dev->minor].i_Count - 9;
- /* End JK 22.10.2004: APCI-3200 / APCI-3300 Reading of EEPROM values */
- }
- /* if(i_ScanType==0) */
- if (s_BoardInfos[dev->minor].i_ScanType == 0) {
- /*
- data[0]= ui_InterruptChannelValue[0];
- data[1]= ui_InterruptChannelValue[1];
- data[2]= ui_InterruptChannelValue[2];
- data[3]= ui_InterruptChannelValue[3];
- data[4]= ui_InterruptChannelValue[4];
- data[5]= ui_InterruptChannelValue[5];
- */
- data[0] =
- s_BoardInfos[dev->minor].
- ui_InterruptChannelValue[0];
- data[1] =
- s_BoardInfos[dev->minor].
- ui_InterruptChannelValue[1];
- data[2] =
- s_BoardInfos[dev->minor].
- ui_InterruptChannelValue[2];
- data[3] =
- s_BoardInfos[dev->minor].
- ui_InterruptChannelValue[3];
- data[4] =
- s_BoardInfos[dev->minor].
- ui_InterruptChannelValue[4];
- data[5] =
- s_BoardInfos[dev->minor].
- ui_InterruptChannelValue[5];
-
- /* Begin JK 22.10.2004: APCI-3200 / APCI-3300 Reading of EEPROM values */
- i_APCI3200_GetChannelCalibrationValue(dev,
- s_BoardInfos[dev->minor].ui_Channel_num,
- &data[6], &data[7], &data[8]);
- /* End JK 22.10.2004: APCI-3200 / APCI-3300 Reading of EEPROM values */
- }
- break;
- case 1:
-
- for (i = 0; i < insn->n; i++) {
- /* data[i]=ui_InterruptChannelValue[i]; */
- data[i] =
- s_BoardInfos[dev->minor].
- ui_InterruptChannelValue[i];
- }
-
- /* i_Count=0; */
- /* i_Sum=0; */
- /* if(i_ScanType==1) */
- s_BoardInfos[dev->minor].i_Count = 0;
- s_BoardInfos[dev->minor].i_Sum = 0;
- if (s_BoardInfos[dev->minor].i_ScanType == 1) {
- /* i_Initialised=0; */
- /* i_InterruptFlag=0; */
- s_BoardInfos[dev->minor].i_Initialised = 0;
- s_BoardInfos[dev->minor].i_InterruptFlag = 0;
- /* END JK 06.07.04: Management of sevrals boards */
- }
- break;
- default:
- printk("\nThe parameters passed are in error\n");
- apci3200_reset(dev);
- return -EINVAL;
- } /* switch(insn->unused[0]) */
-
- return insn->n;
-}
-
-/*
- * Configures The Analog Input Subdevice
- *
- * data[0] = 0 Normal AI
- * = 1 RTD
- * = 2 THERMOCOUPLE
- * data[1] = Gain To Use
- * data[2] = 0 Bipolar
- * = 1 Unipolar
- * data[3] = Offset Range
- * data[4] = 0 DC Coupling
- * = 1 AC Coupling
- * data[5] = 0 Single
- * = 1 Differential
- * data[6] = TimerReloadValue
- * data[7] = ConvertingTimeUnit
- * data[8] = 0 Analog voltage measurement
- * = 1 Resistance measurement
- * = 2 Temperature measurement
- * data[9] = 0 Interrupt Disable
- * = 1 INterrupt Enable
- * data[10] = Type of Thermocouple
- * data[11] = single channel Module Number
- * data[12] = 0 Single Read
- * = 1 Read more channel
- * = 2 Single scan
- * = 3 Continuous Scan
- * data[13] = Number of channels to read
- * data[14] = 0 RTD not used
- * = 1 RTD 2 wire connection
- * = 2 RTD 3 wire connection
- * = 3 RTD 4 wire connection
- */
-static int apci3200_ai_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- unsigned int ul_Config = 0, ul_Temp = 0;
- unsigned int ui_ChannelNo = 0;
- unsigned int ui_Dummy = 0;
- int i_err = 0;
-
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* Initialize the structure */
- if (s_BoardInfos[dev->minor].b_StructInitialized != 1) {
- s_BoardInfos[dev->minor].i_CJCAvailable = 1;
- s_BoardInfos[dev->minor].i_CJCPolarity = 0;
- s_BoardInfos[dev->minor].i_CJCGain = 2; /* changed from 0 to 2 */
- s_BoardInfos[dev->minor].i_InterruptFlag = 0;
- s_BoardInfos[dev->minor].i_AutoCalibration = 0; /* : auto calibration */
- s_BoardInfos[dev->minor].i_ChannelCount = 0;
- s_BoardInfos[dev->minor].i_Sum = 0;
- s_BoardInfos[dev->minor].ui_Channel_num = 0;
- s_BoardInfos[dev->minor].i_Count = 0;
- s_BoardInfos[dev->minor].i_Initialised = 0;
- s_BoardInfos[dev->minor].b_StructInitialized = 1;
-
- /* Begin JK 21.10.2004: APCI-3200 / APCI-3300 Reading of EEPROM values */
- s_BoardInfos[dev->minor].i_ConnectionType = 0;
- /* End JK 21.10.2004: APCI-3200 / APCI-3300 Reading of EEPROM values */
-
- /* Begin JK 21.10.2004: APCI-3200 / APCI-3300 Reading of EEPROM values */
- memset(s_BoardInfos[dev->minor].s_Module, 0,
- sizeof(s_BoardInfos[dev->minor].s_Module[MAX_MODULE]));
-
- v_GetAPCI3200EepromCalibrationValue(devpriv->i_IobaseAmcc,
- &s_BoardInfos[dev->minor]);
- /* End JK 21.10.2004: APCI-3200 / APCI-3300 Reading of EEPROM values */
- }
-
- if (data[0] != 0 && data[0] != 1 && data[0] != 2) {
- printk("\nThe selection of acquisition type is in error\n");
- i_err++;
- } /* if(data[0]!=0 && data[0]!=1 && data[0]!=2) */
- if (data[0] == 1) {
- if (data[14] != 0 && data[14] != 1 && data[14] != 2
- && data[14] != 4) {
- printk("\n Error in selection of RTD connection type\n");
- i_err++;
- } /* if(data[14]!=0 && data[14]!=1 && data[14]!=2 && data[14]!=4) */
- } /* if(data[0]==1 ) */
- if (data[1] < 0 || data[1] > 7) {
- printk("\nThe selection of gain is in error\n");
- i_err++;
- } /* if(data[1]<0 || data[1]>7) */
- if (data[2] != 0 && data[2] != 1) {
- printk("\nThe selection of polarity is in error\n");
- i_err++;
- } /* if(data[2]!=0 && data[2]!=1) */
- if (data[3] != 0) {
- printk("\nThe selection of offset range is in error\n");
- i_err++;
- } /* if(data[3]!=0) */
- if (data[4] != 0 && data[4] != 1) {
- printk("\nThe selection of coupling is in error\n");
- i_err++;
- } /* if(data[4]!=0 && data[4]!=1) */
- if (data[5] != 0 && data[5] != 1) {
- printk("\nThe selection of single/differential mode is in error\n");
- i_err++;
- } /* if(data[5]!=0 && data[5]!=1) */
- if (data[8] != 0 && data[8] != 1 && data[2] != 2) {
- printk("\nError in selection of functionality\n");
- } /* if(data[8]!=0 && data[8]!=1 && data[2]!=2) */
- if (data[12] == 0 || data[12] == 1) {
- if (data[6] != 20 && data[6] != 40 && data[6] != 80
- && data[6] != 160) {
- printk("\nThe selection of conversion time reload value is in error\n");
- i_err++;
- } /* if (data[6]!=20 && data[6]!=40 && data[6]!=80 && data[6]!=160 ) */
- if (data[7] != 2) {
- printk("\nThe selection of conversion time unit is in error\n");
- i_err++;
- } /* if(data[7]!=2) */
- }
- if (data[9] != 0 && data[9] != 1) {
- printk("\nThe selection of interrupt enable is in error\n");
- i_err++;
- } /* if(data[9]!=0 && data[9]!=1) */
- if (data[11] < 0 || data[11] > 4) {
- printk("\nThe selection of module is in error\n");
- i_err++;
- } /* if(data[11] <0 || data[11]>1) */
- if (data[12] < 0 || data[12] > 3) {
- printk("\nThe selection of singlechannel/scan selection is in error\n");
- i_err++;
- } /* if(data[12] < 0 || data[12]> 3) */
- if (data[13] < 0 || data[13] > 16) {
- printk("\nThe selection of number of channels is in error\n");
- i_err++;
- } /* if(data[13] <0 ||data[13] >15) */
-
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /*
- i_ChannelCount=data[13];
- i_ScanType=data[12];
- i_ADDIDATAPolarity = data[2];
- i_ADDIDATAGain=data[1];
- i_ADDIDATAConversionTime=data[6];
- i_ADDIDATAConversionTimeUnit=data[7];
- i_ADDIDATAType=data[0];
- */
-
- /* Save acquisition configuration for the actual board */
- s_BoardInfos[dev->minor].i_ChannelCount = data[13];
- s_BoardInfos[dev->minor].i_ScanType = data[12];
- s_BoardInfos[dev->minor].i_ADDIDATAPolarity = data[2];
- s_BoardInfos[dev->minor].i_ADDIDATAGain = data[1];
- s_BoardInfos[dev->minor].i_ADDIDATAConversionTime = data[6];
- s_BoardInfos[dev->minor].i_ADDIDATAConversionTimeUnit = data[7];
- s_BoardInfos[dev->minor].i_ADDIDATAType = data[0];
- /* Begin JK 19.10.2004: APCI-3200 Driver update 0.7.57 -> 0.7.68 */
- s_BoardInfos[dev->minor].i_ConnectionType = data[5];
- /* End JK 19.10.2004: APCI-3200 Driver update 0.7.57 -> 0.7.68 */
- /* END JK 06.07.04: Management of sevrals boards */
-
- /* Begin JK 19.10.2004: APCI-3200 Driver update 0.7.57 -> 0.7.68 */
- memset(s_BoardInfos[dev->minor].ui_ScanValueArray, 0, (7 + 12) * sizeof(unsigned int)); /* 7 is the maximal number of channels */
- /* End JK 19.10.2004: APCI-3200 Driver update 0.7.57 -> 0.7.68 */
-
- /* BEGIN JK 02.07.04 : This while can't be do, it block the process when using severals boards */
- /* while(i_InterruptFlag==1) */
- while (s_BoardInfos[dev->minor].i_InterruptFlag == 1) {
-#ifndef MSXBOX
- udelay(1);
-#else
- /* In the case where the driver is compiled for the MSX-Box */
- /* we used a printk to have a little delay because udelay */
- /* seems to be broken under the MSX-Box. */
- /* This solution hat to be studied. */
- printk("");
-#endif
- }
- /* END JK 02.07.04 : This while can't be do, it block the process when using severals boards */
-
- ui_ChannelNo = CR_CHAN(insn->chanspec); /* get the channel */
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* i_ChannelNo=ui_ChannelNo; */
- /* ui_Channel_num =ui_ChannelNo; */
-
- s_BoardInfos[dev->minor].i_ChannelNo = ui_ChannelNo;
- s_BoardInfos[dev->minor].ui_Channel_num = ui_ChannelNo;
-
- /* END JK 06.07.04: Management of sevrals boards */
-
- if (data[5] == 0) {
- if (ui_ChannelNo > 15) {
- printk("\nThe Selection of the channel is in error\n");
- i_err++;
- } /* if(ui_ChannelNo>15) */
- } /* if(data[5]==0) */
- else {
- if (data[14] == 2) {
- if (ui_ChannelNo > 3) {
- printk("\nThe Selection of the channel is in error\n");
- i_err++;
- } /* if(ui_ChannelNo>3) */
- } /* if(data[14]==2) */
- else {
- if (ui_ChannelNo > 7) {
- printk("\nThe Selection of the channel is in error\n");
- i_err++;
- } /* if(ui_ChannelNo>7) */
- } /* elseif(data[14]==2) */
- } /* elseif(data[5]==0) */
- if (data[12] == 0 || data[12] == 1) {
- switch (data[5]) {
- case 0:
- if (ui_ChannelNo <= 3) {
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* i_Offset=0; */
- s_BoardInfos[dev->minor].i_Offset = 0;
- /* END JK 06.07.04: Management of sevrals boards */
- } /* if(ui_ChannelNo <=3) */
- if (ui_ChannelNo >= 4 && ui_ChannelNo <= 7) {
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* i_Offset=64; */
- s_BoardInfos[dev->minor].i_Offset = 64;
- /* END JK 06.07.04: Management of sevrals boards */
- } /* if(ui_ChannelNo >=4 && ui_ChannelNo <=7) */
- if (ui_ChannelNo >= 8 && ui_ChannelNo <= 11) {
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* i_Offset=128; */
- s_BoardInfos[dev->minor].i_Offset = 128;
- /* END JK 06.07.04: Management of sevrals boards */
- } /* if(ui_ChannelNo >=8 && ui_ChannelNo <=11) */
- if (ui_ChannelNo >= 12 && ui_ChannelNo <= 15) {
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* i_Offset=192; */
- s_BoardInfos[dev->minor].i_Offset = 192;
- /* END JK 06.07.04: Management of sevrals boards */
- } /* if(ui_ChannelNo >=12 && ui_ChannelNo <=15) */
- break;
- case 1:
- if (data[14] == 2) {
- if (ui_ChannelNo == 0) {
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* i_Offset=0; */
- s_BoardInfos[dev->minor].i_Offset = 0;
- /* END JK 06.07.04: Management of sevrals boards */
- } /* if(ui_ChannelNo ==0 ) */
- if (ui_ChannelNo == 1) {
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* i_Offset=0; */
- s_BoardInfos[dev->minor].i_Offset = 64;
- /* END JK 06.07.04: Management of sevrals boards */
- } /* if(ui_ChannelNo ==1) */
- if (ui_ChannelNo == 2) {
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* i_Offset=128; */
- s_BoardInfos[dev->minor].i_Offset = 128;
- /* END JK 06.07.04: Management of sevrals boards */
- } /* if(ui_ChannelNo ==2 ) */
- if (ui_ChannelNo == 3) {
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* i_Offset=192; */
- s_BoardInfos[dev->minor].i_Offset = 192;
- /* END JK 06.07.04: Management of sevrals boards */
- } /* if(ui_ChannelNo ==3) */
-
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* i_ChannelNo=0; */
- s_BoardInfos[dev->minor].i_ChannelNo = 0;
- /* END JK 06.07.04: Management of sevrals boards */
- ui_ChannelNo = 0;
- break;
- } /* if(data[14]==2) */
- if (ui_ChannelNo <= 1) {
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* i_Offset=0; */
- s_BoardInfos[dev->minor].i_Offset = 0;
- /* END JK 06.07.04: Management of sevrals boards */
- } /* if(ui_ChannelNo <=1) */
- if (ui_ChannelNo >= 2 && ui_ChannelNo <= 3) {
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* i_ChannelNo=i_ChannelNo-2; */
- /* i_Offset=64; */
- s_BoardInfos[dev->minor].i_ChannelNo =
- s_BoardInfos[dev->minor].i_ChannelNo -
- 2;
- s_BoardInfos[dev->minor].i_Offset = 64;
- /* END JK 06.07.04: Management of sevrals boards */
- ui_ChannelNo = ui_ChannelNo - 2;
- } /* if(ui_ChannelNo >=2 && ui_ChannelNo <=3) */
- if (ui_ChannelNo >= 4 && ui_ChannelNo <= 5) {
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* i_ChannelNo=i_ChannelNo-4; */
- /* i_Offset=128; */
- s_BoardInfos[dev->minor].i_ChannelNo =
- s_BoardInfos[dev->minor].i_ChannelNo -
- 4;
- s_BoardInfos[dev->minor].i_Offset = 128;
- /* END JK 06.07.04: Management of sevrals boards */
- ui_ChannelNo = ui_ChannelNo - 4;
- } /* if(ui_ChannelNo >=4 && ui_ChannelNo <=5) */
- if (ui_ChannelNo >= 6 && ui_ChannelNo <= 7) {
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* i_ChannelNo=i_ChannelNo-6; */
- /* i_Offset=192; */
- s_BoardInfos[dev->minor].i_ChannelNo =
- s_BoardInfos[dev->minor].i_ChannelNo -
- 6;
- s_BoardInfos[dev->minor].i_Offset = 192;
- /* END JK 06.07.04: Management of sevrals boards */
- ui_ChannelNo = ui_ChannelNo - 6;
- } /* if(ui_ChannelNo >=6 && ui_ChannelNo <=7) */
- break;
-
- default:
- printk("\n This selection of polarity does not exist\n");
- i_err++;
- } /* switch(data[2]) */
- } /* if(data[12]==0 || data[12]==1) */
- else {
- switch (data[11]) {
- case 1:
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* i_Offset=0; */
- s_BoardInfos[dev->minor].i_Offset = 0;
- /* END JK 06.07.04: Management of sevrals boards */
- break;
- case 2:
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* i_Offset=64; */
- s_BoardInfos[dev->minor].i_Offset = 64;
- /* END JK 06.07.04: Management of sevrals boards */
- break;
- case 3:
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* i_Offset=128; */
- s_BoardInfos[dev->minor].i_Offset = 128;
- /* END JK 06.07.04: Management of sevrals boards */
- break;
- case 4:
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* i_Offset=192; */
- s_BoardInfos[dev->minor].i_Offset = 192;
- /* END JK 06.07.04: Management of sevrals boards */
- break;
- default:
- printk("\nError in module selection\n");
- i_err++;
- } /* switch(data[11]) */
- } /* elseif(data[12]==0 || data[12]==1) */
- if (i_err) {
- apci3200_reset(dev);
- return -EINVAL;
- }
- /* if(i_ScanType!=1) */
- if (s_BoardInfos[dev->minor].i_ScanType != 1) {
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* i_Count=0; */
- /* i_Sum=0; */
- s_BoardInfos[dev->minor].i_Count = 0;
- s_BoardInfos[dev->minor].i_Sum = 0;
- /* END JK 06.07.04: Management of sevrals boards */
- } /* if(i_ScanType!=1) */
-
- ul_Config =
- data[1] | (data[2] << 6) | (data[5] << 7) | (data[3] << 8) |
- (data[4] << 9);
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 12) >> 19) & 1) != 1) ;
- /* END JK 06.07.04: Management of sevrals boards */
- /*********************************/
- /* Write the channel to configure */
- /*********************************/
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* outl(0 | ui_ChannelNo , devpriv->iobase+i_Offset + 0x4); */
- outl(0 | ui_ChannelNo,
- devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 0x4);
- /* END JK 06.07.04: Management of sevrals boards */
-
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 12) >> 19) & 1) != 1) ;
- /* END JK 06.07.04: Management of sevrals boards */
- /**************************/
- /* Reset the configuration */
- /**************************/
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* outl(0 , devpriv->iobase+i_Offset + 0x0); */
- outl(0, devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 0x0);
- /* END JK 06.07.04: Management of sevrals boards */
-
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 12) >> 19) & 1) != 1) ;
- /* END JK 06.07.04: Management of sevrals boards */
-
- /***************************/
- /* Write the configuration */
- /***************************/
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* outl(ul_Config , devpriv->iobase+i_Offset + 0x0); */
- outl(ul_Config,
- devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 0x0);
- /* END JK 06.07.04: Management of sevrals boards */
-
- /***************************/
- /*Reset the calibration bit */
- /***************************/
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* ul_Temp = inl(devpriv->iobase+i_Offset + 12); */
- ul_Temp = inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 12);
- /* END JK 06.07.04: Management of sevrals boards */
-
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 12) >> 19) & 1) != 1) ;
- /* END JK 06.07.04: Management of sevrals boards */
-
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* outl((ul_Temp & 0xFFF9FFFF) , devpriv->iobase+.i_Offset + 12); */
- outl((ul_Temp & 0xFFF9FFFF),
- devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 12);
- /* END JK 06.07.04: Management of sevrals boards */
-
- if (data[9] == 1) {
- devpriv->tsk_Current = current;
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* i_InterruptFlag=1; */
- s_BoardInfos[dev->minor].i_InterruptFlag = 1;
- /* END JK 06.07.04: Management of sevrals boards */
- } /* if(data[9]==1) */
- else {
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* i_InterruptFlag=0; */
- s_BoardInfos[dev->minor].i_InterruptFlag = 0;
- /* END JK 06.07.04: Management of sevrals boards */
- } /* else if(data[9]==1) */
-
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* i_Initialised=1; */
- s_BoardInfos[dev->minor].i_Initialised = 1;
- /* END JK 06.07.04: Management of sevrals boards */
-
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* if(i_ScanType==1) */
- if (s_BoardInfos[dev->minor].i_ScanType == 1)
- /* END JK 06.07.04: Management of sevrals boards */
- {
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* i_Sum=i_Sum+1; */
- s_BoardInfos[dev->minor].i_Sum =
- s_BoardInfos[dev->minor].i_Sum + 1;
- /* END JK 06.07.04: Management of sevrals boards */
-
- insn->unused[0] = 0;
- apci3200_ai_read(dev, s, insn, &ui_Dummy);
- }
-
- return insn->n;
-}
-
-/*
- * Tests the Selected Anlog Input Channel
- *
- * data[0] = 0 TestAnalogInputShortCircuit
- * = 1 TestAnalogInputConnection
- *
- * data[0] : Digital value obtained
- * data[1] : calibration offset
- * data[2] : calibration gain
- */
-static int apci3200_ai_bits_test(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- unsigned int ui_Configuration = 0;
- int i_Temp; /* ,i_TimeUnit; */
-
- /* if(i_Initialised==0) */
-
- if (s_BoardInfos[dev->minor].i_Initialised == 0) {
- apci3200_reset(dev);
- return -EINVAL;
- } /* if(i_Initialised==0); */
- if (data[0] != 0 && data[0] != 1) {
- printk("\nError in selection of functionality\n");
- apci3200_reset(dev);
- return -EINVAL;
- } /* if(data[0]!=0 && data[0]!=1) */
-
- if (data[0] == 1) /* Perform Short Circuit TEST */
- {
- /**************************/
- /*Set the short-cicuit bit */
- /**************************/
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].
- i_Offset + 12) >> 19) & 1) !=
- 1) ;
- /* outl((0x00001000 |i_ChannelNo) , devpriv->iobase+i_Offset + 4); */
- outl((0x00001000 | s_BoardInfos[dev->minor].i_ChannelNo),
- devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 4);
- /*************************/
- /*Set the time unit to ns */
- /*************************/
- /* i_TimeUnit= i_ADDIDATAConversionTimeUnit;
- i_ADDIDATAConversionTimeUnit= 1; */
- /* i_Temp= i_InterruptFlag ; */
- i_Temp = s_BoardInfos[dev->minor].i_InterruptFlag;
- s_BoardInfos[dev->minor].i_InterruptFlag = 0;
- i_APCI3200_Read1AnalogInputChannel(dev, s, insn, data);
- /* if(i_AutoCalibration == FALSE) */
- if (s_BoardInfos[dev->minor].i_AutoCalibration == FALSE) {
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].
- i_Offset +
- 12) >> 19) & 1) != 1) ;
-
- /* outl((0x00001000 |i_ChannelNo) , devpriv->iobase+i_Offset + 4); */
- outl((0x00001000 | s_BoardInfos[dev->minor].
- i_ChannelNo),
- devpriv->iobase +
- s_BoardInfos[dev->minor].i_Offset + 4);
- data++;
- i_APCI3200_ReadCalibrationOffsetValue(dev, data);
- data++;
- i_APCI3200_ReadCalibrationGainValue(dev, data);
- }
- } else {
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].
- i_Offset + 12) >> 19) & 1) !=
- 1) ;
- /* outl((0x00000800|i_ChannelNo) , devpriv->iobase+i_Offset + 4); */
- outl((0x00000800 | s_BoardInfos[dev->minor].i_ChannelNo),
- devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 4);
- /* ui_Configuration = inl(devpriv->iobase+i_Offset + 0); */
- ui_Configuration =
- inl(devpriv->iobase +
- s_BoardInfos[dev->minor].i_Offset + 0);
- /*************************/
- /*Set the time unit to ns */
- /*************************/
- /* i_TimeUnit= i_ADDIDATAConversionTimeUnit;
- i_ADDIDATAConversionTimeUnit= 1; */
- /* i_Temp= i_InterruptFlag ; */
- i_Temp = s_BoardInfos[dev->minor].i_InterruptFlag;
- s_BoardInfos[dev->minor].i_InterruptFlag = 0;
- i_APCI3200_Read1AnalogInputChannel(dev, s, insn, data);
- /* if(i_AutoCalibration == FALSE) */
- if (s_BoardInfos[dev->minor].i_AutoCalibration == FALSE) {
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].
- i_Offset +
- 12) >> 19) & 1) != 1) ;
- /* outl((0x00000800|i_ChannelNo) , devpriv->iobase+i_Offset + 4); */
- outl((0x00000800 | s_BoardInfos[dev->minor].
- i_ChannelNo),
- devpriv->iobase +
- s_BoardInfos[dev->minor].i_Offset + 4);
- data++;
- i_APCI3200_ReadCalibrationOffsetValue(dev, data);
- data++;
- i_APCI3200_ReadCalibrationGainValue(dev, data);
- }
- }
- /* i_InterruptFlag=i_Temp ; */
- s_BoardInfos[dev->minor].i_InterruptFlag = i_Temp;
- return insn->n;
-}
-
-static int apci3200_ai_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- apci3200_reset(dev);
- return insn->n;
-}
-
-static int apci3200_ai_cmdtest(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
-{
-
- int err = 0;
- unsigned int ui_ConvertTime = 0;
- unsigned int ui_ConvertTimeBase = 0;
- unsigned int ui_DelayTime = 0;
- unsigned int ui_DelayTimeBase = 0;
- int i_NbrOfChannel = 0;
- int i_Cpt = 0;
- double d_ConversionTimeForAllChannels = 0.0;
- double d_SCANTimeNewUnit = 0.0;
- unsigned int arg;
-
- /* Step 1 : check if triggers are trivially valid */
-
- err |= cfc_check_trigger_src(&cmd->start_src, TRIG_NOW | TRIG_EXT);
- err |= cfc_check_trigger_src(&cmd->scan_begin_src,
- TRIG_TIMER | TRIG_FOLLOW);
- err |= cfc_check_trigger_src(&cmd->convert_src, TRIG_TIMER);
- err |= cfc_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
- err |= cfc_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
-
- if (s_BoardInfos[dev->minor].i_InterruptFlag == 0)
- err |= -EINVAL;
-
- if (err) {
- apci3200_reset(dev);
- return 1;
- }
-
- /* Step 2a : make sure trigger sources are unique */
-
- err |= cfc_check_trigger_is_unique(&cmd->start_src);
- err |= cfc_check_trigger_is_unique(&cmd->scan_begin_src);
- err |= cfc_check_trigger_is_unique(&cmd->stop_src);
-
- /* Step 2b : and mutually compatible */
-
- if (err) {
- apci3200_reset(dev);
- return 2;
- }
-
- /* Step 3: check if arguments are trivially valid */
-
- switch (cmd->start_src) {
- case TRIG_NOW:
- err |= cfc_check_trigger_arg_is(&cmd->start_arg, 0);
- break;
- case TRIG_EXT:
- /* validate the trigger edge selection */
- arg = cmd->start_arg & 0xffff;
- if (arg < 1 || arg > 3) {
- cmd->start_arg &= ~0xffff;
- cmd->start_arg |= 1;
- err |= -EINVAL;
- }
- /* validate the trigger mode selection */
- arg = cmd->start_arg >> 16;
- if (arg != 2) {
- cmd->start_arg &= ~(0xffff << 16);
- cmd->start_arg |= (2 << 16);
- err |= -EINVAL;
- }
- break;
- }
-
- err |= cfc_check_trigger_arg_is(&cmd->scan_end_arg, cmd->chanlist_len);
-
- /* i_FirstChannel=cmd->chanlist[0]; */
- s_BoardInfos[dev->minor].i_FirstChannel = cmd->chanlist[0];
- /* i_LastChannel=cmd->chanlist[1]; */
- s_BoardInfos[dev->minor].i_LastChannel = cmd->chanlist[1];
-
- if (cmd->convert_src == TRIG_TIMER) {
- ui_ConvertTime = cmd->convert_arg & 0xFFFF;
- ui_ConvertTimeBase = cmd->convert_arg >> 16;
- if (ui_ConvertTime != 20 && ui_ConvertTime != 40
- && ui_ConvertTime != 80 && ui_ConvertTime != 160)
- {
- printk("\nThe selection of conversion time reload value is in error\n");
- err++;
- } /* if (ui_ConvertTime!=20 && ui_ConvertTime!=40 && ui_ConvertTime!=80 && ui_ConvertTime!=160 ) */
- if (ui_ConvertTimeBase != 2) {
- printk("\nThe selection of conversion time unit is in error\n");
- err++;
- } /* if(ui_ConvertTimeBase!=2) */
- } else {
- ui_ConvertTime = 0;
- ui_ConvertTimeBase = 0;
- }
- if (cmd->scan_begin_src == TRIG_FOLLOW) {
- ui_DelayTime = 0;
- ui_DelayTimeBase = 0;
- } /* if(cmd->scan_begin_src==TRIG_FOLLOW) */
- else {
- ui_DelayTime = cmd->scan_begin_arg & 0xFFFF;
- ui_DelayTimeBase = cmd->scan_begin_arg >> 16;
- if (ui_DelayTimeBase != 2 && ui_DelayTimeBase != 3) {
- err++;
- printk("\nThe Delay time base selection is in error\n");
- }
- if (ui_DelayTime < 1 || ui_DelayTime > 1023) {
- err++;
- printk("\nThe Delay time value is in error\n");
- }
- if (err) {
- apci3200_reset(dev);
- return 3;
- }
- fpu_begin();
- d_SCANTimeNewUnit = (double)ui_DelayTime;
- /* i_NbrOfChannel= i_LastChannel-i_FirstChannel + 4; */
- i_NbrOfChannel =
- s_BoardInfos[dev->minor].i_LastChannel -
- s_BoardInfos[dev->minor].i_FirstChannel + 4;
- /**********************************************************/
- /*calculate the total conversion time for all the channels */
- /**********************************************************/
- d_ConversionTimeForAllChannels =
- (double)((double)ui_ConvertTime /
- (double)i_NbrOfChannel);
-
- /*******************************/
- /*Convert the frequence in time */
- /*******************************/
- d_ConversionTimeForAllChannels =
- (double)1.0 / d_ConversionTimeForAllChannels;
- ui_ConvertTimeBase = 3;
- /***********************************/
- /*Test if the time unit is the same */
- /***********************************/
-
- if (ui_DelayTimeBase <= ui_ConvertTimeBase) {
-
- for (i_Cpt = 0;
- i_Cpt < (ui_ConvertTimeBase - ui_DelayTimeBase);
- i_Cpt++) {
-
- d_ConversionTimeForAllChannels =
- d_ConversionTimeForAllChannels * 1000;
- d_ConversionTimeForAllChannels =
- d_ConversionTimeForAllChannels + 1;
- }
- } else {
- for (i_Cpt = 0;
- i_Cpt < (ui_DelayTimeBase - ui_ConvertTimeBase);
- i_Cpt++) {
- d_SCANTimeNewUnit = d_SCANTimeNewUnit * 1000;
-
- }
- }
-
- if (d_ConversionTimeForAllChannels >= d_SCANTimeNewUnit) {
-
- printk("\nSCAN Delay value cannot be used\n");
- /*********************************/
- /*SCAN Delay value cannot be used */
- /*********************************/
- err++;
- }
- fpu_end();
- } /* else if(cmd->scan_begin_src==TRIG_FOLLOW) */
-
- if (err) {
- apci3200_reset(dev);
- return 4;
- }
-
- return 0;
-}
-
-static int apci3200_cancel(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct addi_private *devpriv = dev->private;
- unsigned int ui_Configuration = 0;
-
- /* i_InterruptFlag=0; */
- /* i_Initialised=0; */
- /* i_Count=0; */
- /* i_Sum=0; */
- s_BoardInfos[dev->minor].i_InterruptFlag = 0;
- s_BoardInfos[dev->minor].i_Initialised = 0;
- s_BoardInfos[dev->minor].i_Count = 0;
- s_BoardInfos[dev->minor].i_Sum = 0;
-
- /*******************/
- /*Read the register */
- /*******************/
- /* ui_Configuration = inl(devpriv->iobase+i_Offset + 8); */
- ui_Configuration =
- inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 8);
- /*****************************/
- /*Reset the START and IRQ bit */
- /*****************************/
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 12) >> 19) & 1) != 1) ;
- /* outl((ui_Configuration & 0xFFE7FFFF),devpriv->iobase+i_Offset + 8); */
- outl((ui_Configuration & 0xFFE7FFFF),
- devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 8);
- return 0;
-}
-
-/*
- * Does asynchronous acquisition
- * Determines the mode 1 or 2.
- */
-static int apci3200_ai_cmd(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct addi_private *devpriv = dev->private;
- struct comedi_cmd *cmd = &s->async->cmd;
- unsigned int ui_Configuration = 0;
- /* INT i_CurrentSource = 0; */
- unsigned int ui_Trigger = 0;
- unsigned int ui_TriggerEdge = 0;
- unsigned int ui_Triggermode = 0;
- unsigned int ui_ScanMode = 0;
- unsigned int ui_ConvertTime = 0;
- unsigned int ui_ConvertTimeBase = 0;
- unsigned int ui_DelayTime = 0;
- unsigned int ui_DelayTimeBase = 0;
- unsigned int ui_DelayMode = 0;
-
- /* i_FirstChannel=cmd->chanlist[0]; */
- /* i_LastChannel=cmd->chanlist[1]; */
- s_BoardInfos[dev->minor].i_FirstChannel = cmd->chanlist[0];
- s_BoardInfos[dev->minor].i_LastChannel = cmd->chanlist[1];
- if (cmd->start_src == TRIG_EXT) {
- ui_Trigger = 1;
- ui_TriggerEdge = cmd->start_arg & 0xFFFF;
- ui_Triggermode = cmd->start_arg >> 16;
- } /* if(cmd->start_src==TRIG_EXT) */
- else {
- ui_Trigger = 0;
- } /* elseif(cmd->start_src==TRIG_EXT) */
-
- if (cmd->stop_src == TRIG_COUNT) {
- ui_ScanMode = 0;
- } /* if (cmd->stop_src==TRIG_COUNT) */
- else {
- ui_ScanMode = 2;
- } /* else if (cmd->stop_src==TRIG_COUNT) */
-
- if (cmd->scan_begin_src == TRIG_FOLLOW) {
- ui_DelayTime = 0;
- ui_DelayTimeBase = 0;
- ui_DelayMode = 0;
- } /* if(cmd->scan_begin_src==TRIG_FOLLOW) */
- else {
- ui_DelayTime = cmd->scan_begin_arg & 0xFFFF;
- ui_DelayTimeBase = cmd->scan_begin_arg >> 16;
- ui_DelayMode = 1;
- } /* else if(cmd->scan_begin_src==TRIG_FOLLOW) */
- if (cmd->convert_src == TRIG_TIMER) {
- ui_ConvertTime = cmd->convert_arg & 0xFFFF;
- ui_ConvertTimeBase = cmd->convert_arg >> 16;
- } else {
- ui_ConvertTime = 0;
- ui_ConvertTimeBase = 0;
- }
-
- /* if(i_ADDIDATAType ==1 || ((i_ADDIDATAType==2))) */
- /* { */
- /**************************************************/
- /*Read the old configuration of the current source */
- /**************************************************/
- /* ui_Configuration = inl(devpriv->iobase+i_Offset + 12); */
- ui_Configuration =
- inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 12);
- /***********************************************/
- /*Write the configuration of the current source */
- /***********************************************/
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 12) >> 19) & 1) != 1) ;
- /* outl((ui_Configuration & 0xFFC00000 ), devpriv->iobase+i_Offset +12); */
- outl((ui_Configuration & 0xFFC00000),
- devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 12);
- /* } */
- ui_Configuration = 0;
-
- /* ui_Configuration = i_FirstChannel |(i_LastChannel << 8)| 0x00100000 | */
- ui_Configuration =
- s_BoardInfos[dev->minor].i_FirstChannel | (s_BoardInfos[dev->
- minor].
- i_LastChannel << 8) | 0x00100000 | (ui_Trigger << 24) |
- (ui_TriggerEdge << 25) | (ui_Triggermode << 27) | (ui_DelayMode
- << 18) | (ui_ScanMode << 16);
-
- /*************************/
- /*Write the Configuration */
- /*************************/
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 12) >> 19) & 1) != 1) ;
- /* outl( ui_Configuration, devpriv->iobase+i_Offset + 0x8); */
- outl(ui_Configuration,
- devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 0x8);
- /***********************/
- /*Write the Delay Value */
- /***********************/
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 12) >> 19) & 1) != 1) ;
- /* outl(ui_DelayTime,devpriv->iobase+i_Offset + 40); */
- outl(ui_DelayTime,
- devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 40);
- /***************************/
- /*Write the Delay time base */
- /***************************/
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 12) >> 19) & 1) != 1) ;
- /* outl(ui_DelayTimeBase,devpriv->iobase+i_Offset + 44); */
- outl(ui_DelayTimeBase,
- devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 44);
- /*********************************/
- /*Write the conversion time value */
- /*********************************/
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 12) >> 19) & 1) != 1) ;
- /* outl(ui_ConvertTime,devpriv->iobase+i_Offset + 32); */
- outl(ui_ConvertTime,
- devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 32);
-
- /********************************/
- /*Write the conversion time base */
- /********************************/
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 12) >> 19) & 1) != 1) ;
- /* outl(ui_ConvertTimeBase,devpriv->iobase+i_Offset + 36); */
- outl(ui_ConvertTimeBase,
- devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 36);
- /*******************/
- /*Read the register */
- /*******************/
- /* ui_Configuration = inl(devpriv->iobase+i_Offset + 4); */
- ui_Configuration =
- inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 4);
- /******************/
- /*Set the SCAN bit */
- /******************/
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 12) >> 19) & 1) != 1) ;
-
- /* outl(((ui_Configuration & 0x1E0FF) | 0x00002000),devpriv->iobase+i_Offset + 4); */
- outl(((ui_Configuration & 0x1E0FF) | 0x00002000),
- devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 4);
- /*******************/
- /*Read the register */
- /*******************/
- ui_Configuration = 0;
- /* ui_Configuration = inl(devpriv->iobase+i_Offset + 8); */
- ui_Configuration =
- inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 8);
-
- /*******************/
- /*Set the START bit */
- /*******************/
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 12) >> 19) & 1) != 1) ;
- /* outl((ui_Configuration | 0x00080000),devpriv->iobase+i_Offset + 8); */
- outl((ui_Configuration | 0x00080000),
- devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 8);
- return 0;
-}
-
-/*
- * This function copies the acquired data(from FIFO) to Comedi buffer.
- */
-static int i_APCI3200_InterruptHandleEos(struct comedi_device *dev)
-{
- struct addi_private *devpriv = dev->private;
- struct comedi_subdevice *s = dev->read_subdev;
- unsigned int ui_StatusRegister = 0;
-
- /* BEGIN JK 18.10.2004: APCI-3200 Driver update 0.7.57 -> 0.7.68 */
- /* comedi_async *async = s->async; */
- /* UINT *data; */
- /* data=async->data+async->buf_int_ptr;//new samples added from here onwards */
- int n = 0, i = 0;
- /* END JK 18.10.2004: APCI-3200 Driver update 0.7.57 -> 0.7.68 */
-
- /************************************/
- /*Read the interrupt status register */
- /************************************/
- /* ui_StatusRegister = inl(devpriv->iobase+i_Offset + 16); */
- ui_StatusRegister =
- inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 16);
-
- /*************************/
- /*Test if interrupt occur */
- /*************************/
-
- if ((ui_StatusRegister & 0x2) == 0x2) {
- /*************************/
- /*Read the channel number */
- /*************************/
- /* ui_ChannelNumber = inl(devpriv->iobase+i_Offset + 24); */
- /* BEGIN JK 18.10.2004: APCI-3200 Driver update 0.7.57 -> 0.7.68 */
- /* This value is not used */
- /* ui_ChannelNumber = inl(devpriv->iobase+s_BoardInfos [dev->minor].i_Offset + 24); */
- /* END JK 18.10.2004: APCI-3200 Driver update 0.7.57 -> 0.7.68 */
-
- /*************************************/
- /*Read the digital Analog Input value */
- /*************************************/
-
- /* data[i_Count] = inl(devpriv->iobase+i_Offset + 28); */
- /* Begin JK 18.10.2004: APCI-3200 Driver update 0.7.57 -> 0.7.68 */
- /* data[s_BoardInfos [dev->minor].i_Count] = inl(devpriv->iobase+s_BoardInfos [dev->minor].i_Offset + 28); */
- s_BoardInfos[dev->minor].ui_ScanValueArray[s_BoardInfos[dev->
- minor].i_Count] =
- inl(devpriv->iobase +
- s_BoardInfos[dev->minor].i_Offset + 28);
- /* End JK 18.10.2004: APCI-3200 Driver update 0.7.57 -> 0.7.68 */
-
- /* if((i_Count == (i_LastChannel-i_FirstChannel+3))) */
- if ((s_BoardInfos[dev->minor].i_Count ==
- (s_BoardInfos[dev->minor].i_LastChannel -
- s_BoardInfos[dev->minor].
- i_FirstChannel + 3))) {
-
- /* Begin JK 22.10.2004: APCI-3200 / APCI-3300 Reading of EEPROM values */
- s_BoardInfos[dev->minor].i_Count++;
-
- for (i = s_BoardInfos[dev->minor].i_FirstChannel;
- i <= s_BoardInfos[dev->minor].i_LastChannel;
- i++) {
- i_APCI3200_GetChannelCalibrationValue(dev, i,
- &s_BoardInfos[dev->minor].
- ui_ScanValueArray[s_BoardInfos[dev->
- minor].i_Count + ((i -
- s_BoardInfos
- [dev->minor].
- i_FirstChannel)
- * 3)],
- &s_BoardInfos[dev->minor].
- ui_ScanValueArray[s_BoardInfos[dev->
- minor].i_Count + ((i -
- s_BoardInfos
- [dev->minor].
- i_FirstChannel)
- * 3) + 1],
- &s_BoardInfos[dev->minor].
- ui_ScanValueArray[s_BoardInfos[dev->
- minor].i_Count + ((i -
- s_BoardInfos
- [dev->minor].
- i_FirstChannel)
- * 3) + 2]);
- }
-
- /* End JK 22.10.2004: APCI-3200 / APCI-3300 Reading of EEPROM values */
-
- /* i_Count=-1; */
-
- s_BoardInfos[dev->minor].i_Count = -1;
-
- /* async->buf_int_count+=(i_LastChannel-i_FirstChannel+4)*sizeof(unsigned int); */
- /* Begin JK 18.10.2004: APCI-3200 Driver update 0.7.57 -> 0.7.68 */
- /* async->buf_int_count+=(s_BoardInfos [dev->minor].i_LastChannel-s_BoardInfos [dev->minor].i_FirstChannel+4)*sizeof(unsigned int); */
- /* End JK 18.10.2004: APCI-3200 Driver update 0.7.57 -> 0.7.68 */
- /* async->buf_int_ptr+=(i_LastChannel-i_FirstChannel+4)*sizeof(unsigned int); */
- /* Begin JK 18.10.2004: APCI-3200 Driver update 0.7.57 -> 0.7.68 */
- /* async->buf_int_ptr+=(s_BoardInfos [dev->minor].i_LastChannel-s_BoardInfos [dev->minor].i_FirstChannel+4)*sizeof(unsigned int); */
- /* comedi_eos(dev,s); */
-
- /* Set the event type (Comedi Buffer End Of Scan) */
- s->async->events |= COMEDI_CB_EOS;
-
- /* Test if enougth memory is available and allocate it for 7 values */
- n = comedi_buf_write_alloc(s,
- (7 + 12) * sizeof(unsigned int));
-
- /* If not enough memory available, event is set to Comedi Buffer Error */
- if (n > ((7 + 12) * sizeof(unsigned int))) {
- printk("\ncomedi_buf_write_alloc n = %i", n);
- s->async->events |= COMEDI_CB_ERROR;
- }
- /* Write all 7 scan values in the comedi buffer */
- comedi_buf_memcpy_to(s, 0,
- (unsigned int *) s_BoardInfos[dev->minor].
- ui_ScanValueArray, (7 + 12) * sizeof(unsigned int));
-
- /* Update comedi buffer pinters indexes */
- comedi_buf_write_free(s,
- (7 + 12) * sizeof(unsigned int));
-
- /* Send events */
- comedi_event(dev, s);
- /* End JK 18.10.2004: APCI-3200 Driver update 0.7.57 -> 0.7.68 */
-
- /* BEGIN JK 18.10.2004: APCI-3200 Driver update 0.7.57 -> 0.7.68 */
- /* */
- /* if (s->async->buf_int_ptr>=s->async->data_len) // for buffer rool over */
- /* { */
- /* /* buffer rollover */ */
- /* s->async->buf_int_ptr=0; */
- /* comedi_eobuf(dev,s); */
- /* } */
- /* End JK 18.10.2004: APCI-3200 Driver update 0.7.57 -> 0.7.68 */
- }
- /* i_Count++; */
- s_BoardInfos[dev->minor].i_Count++;
- }
- /* i_InterruptFlag=0; */
- s_BoardInfos[dev->minor].i_InterruptFlag = 0;
- return 0;
-}
-
-static void apci3200_interrupt(int irq, void *d)
-{
- struct comedi_device *dev = d;
- struct addi_private *devpriv = dev->private;
- unsigned int ui_StatusRegister = 0;
- unsigned int ui_ChannelNumber = 0;
- int i_CalibrationFlag = 0;
- int i_CJCFlag = 0;
- unsigned int ui_DummyValue = 0;
- unsigned int ui_DigitalTemperature = 0;
- unsigned int ui_DigitalInput = 0;
- int i_ConvertCJCCalibration;
- /* BEGIN JK TEST */
- int i_ReturnValue = 0;
- /* END JK TEST */
-
- /* switch(i_ScanType) */
- switch (s_BoardInfos[dev->minor].i_ScanType) {
- case 0:
- case 1:
- /* switch(i_ADDIDATAType) */
- switch (s_BoardInfos[dev->minor].i_ADDIDATAType) {
- case 0:
- case 1:
-
- /************************************/
- /*Read the interrupt status register */
- /************************************/
- /* ui_StatusRegister = inl(devpriv->iobase+i_Offset + 16); */
- ui_StatusRegister =
- inl(devpriv->iobase +
- s_BoardInfos[dev->minor].i_Offset + 16);
- if ((ui_StatusRegister & 0x2) == 0x2) {
- /* i_CalibrationFlag = ((inl(devpriv->iobase+i_Offset + 12) & 0x00060000) >> 17); */
- i_CalibrationFlag =
- ((inl(devpriv->iobase +
- s_BoardInfos[dev->
- minor].
- i_Offset +
- 12) & 0x00060000) >>
- 17);
- /*************************/
- /*Read the channel number */
- /*************************/
- /* ui_ChannelNumber = inl(devpriv->iobase+i_Offset + 24); */
-
- /*************************************/
- /*Read the digital analog input value */
- /*************************************/
- /* ui_DigitalInput = inl(devpriv->iobase+i_Offset + 28); */
- ui_DigitalInput =
- inl(devpriv->iobase +
- s_BoardInfos[dev->minor].i_Offset + 28);
-
- /***********************************************/
- /* Test if the value read is the channel value */
- /***********************************************/
- if (i_CalibrationFlag == 0) {
- /* ui_InterruptChannelValue[i_Count + 0] = ui_DigitalInput; */
- s_BoardInfos[dev->minor].
- ui_InterruptChannelValue
- [s_BoardInfos[dev->minor].
- i_Count + 0] = ui_DigitalInput;
-
- /* Begin JK 22.10.2004: APCI-3200 / APCI-3300 Reading of EEPROM values */
- /*
- i_APCI3200_GetChannelCalibrationValue (dev, s_BoardInfos [dev->minor].ui_Channel_num,
- &s_BoardInfos [dev->minor].ui_InterruptChannelValue[s_BoardInfos [dev->minor].i_Count + 6],
- &s_BoardInfos [dev->minor].ui_InterruptChannelValue[s_BoardInfos [dev->minor].i_Count + 7],
- &s_BoardInfos [dev->minor].ui_InterruptChannelValue[s_BoardInfos [dev->minor].i_Count + 8]);
- */
- /* End JK 22.10.2004: APCI-3200 / APCI-3300 Reading of EEPROM values */
-
- /******************************************************/
- /*Start the conversion of the calibration offset value */
- /******************************************************/
- i_APCI3200_ReadCalibrationOffsetValue
- (dev, &ui_DummyValue);
- } /* if (i_CalibrationFlag == 0) */
- /**********************************************************/
- /* Test if the value read is the calibration offset value */
- /**********************************************************/
-
- if (i_CalibrationFlag == 1) {
-
- /******************/
- /* Save the value */
- /******************/
-
- /* ui_InterruptChannelValue[i_Count + 1] = ui_DigitalInput; */
- s_BoardInfos[dev->minor].
- ui_InterruptChannelValue
- [s_BoardInfos[dev->minor].
- i_Count + 1] = ui_DigitalInput;
-
- /******************************************************/
- /* Start the conversion of the calibration gain value */
- /******************************************************/
- i_APCI3200_ReadCalibrationGainValue(dev,
- &ui_DummyValue);
- } /* if (i_CalibrationFlag == 1) */
- /******************************************************/
- /*Test if the value read is the calibration gain value */
- /******************************************************/
-
- if (i_CalibrationFlag == 2) {
-
- /****************/
- /*Save the value */
- /****************/
- /* ui_InterruptChannelValue[i_Count + 2] = ui_DigitalInput; */
- s_BoardInfos[dev->minor].
- ui_InterruptChannelValue
- [s_BoardInfos[dev->minor].
- i_Count + 2] = ui_DigitalInput;
- /* if(i_ScanType==1) */
- if (s_BoardInfos[dev->minor].
- i_ScanType == 1) {
-
- /* i_InterruptFlag=0; */
- s_BoardInfos[dev->minor].
- i_InterruptFlag = 0;
- /* i_Count=i_Count + 6; */
- /* Begin JK 22.10.2004: APCI-3200 / APCI-3300 Reading of EEPROM values */
- /* s_BoardInfos [dev->minor].i_Count=s_BoardInfos [dev->minor].i_Count + 6; */
- s_BoardInfos[dev->minor].
- i_Count =
- s_BoardInfos[dev->
- minor].i_Count + 9;
- /* End JK 22.10.2004: APCI-3200 / APCI-3300 Reading of EEPROM values */
- } /* if(i_ScanType==1) */
- else {
- /* i_Count=0; */
- s_BoardInfos[dev->minor].
- i_Count = 0;
- } /* elseif(i_ScanType==1) */
- /* if(i_ScanType!=1) */
- if (s_BoardInfos[dev->minor].
- i_ScanType != 1) {
- i_ReturnValue = send_sig(SIGIO, devpriv->tsk_Current, 0); /* send signal to the sample */
- } /* if(i_ScanType!=1) */
- else {
- /* if(i_ChannelCount==i_Sum) */
- if (s_BoardInfos[dev->minor].
- i_ChannelCount ==
- s_BoardInfos[dev->
- minor].i_Sum) {
- send_sig(SIGIO, devpriv->tsk_Current, 0); /* send signal to the sample */
- }
- } /* if(i_ScanType!=1) */
- } /* if (i_CalibrationFlag == 2) */
- } /* if ((ui_StatusRegister & 0x2) == 0x2) */
-
- break;
-
- case 2:
- /************************************/
- /*Read the interrupt status register */
- /************************************/
-
- /* ui_StatusRegister = inl(devpriv->iobase+i_Offset + 16); */
- ui_StatusRegister =
- inl(devpriv->iobase +
- s_BoardInfos[dev->minor].i_Offset + 16);
- /*************************/
- /*Test if interrupt occur */
- /*************************/
-
- if ((ui_StatusRegister & 0x2) == 0x2) {
-
- /* i_CJCFlag = ((inl(devpriv->iobase+i_Offset + 4) & 0x00000400) >> 10); */
- i_CJCFlag =
- ((inl(devpriv->iobase +
- s_BoardInfos[dev->
- minor].
- i_Offset +
- 4) & 0x00000400) >> 10);
-
- /* i_CalibrationFlag = ((inl(devpriv->iobase+i_Offset + 12) & 0x00060000) >> 17); */
- i_CalibrationFlag =
- ((inl(devpriv->iobase +
- s_BoardInfos[dev->
- minor].
- i_Offset +
- 12) & 0x00060000) >>
- 17);
-
- /*************************/
- /*Read the channel number */
- /*************************/
-
- /* ui_ChannelNumber = inl(devpriv->iobase+i_Offset + 24); */
- ui_ChannelNumber =
- inl(devpriv->iobase +
- s_BoardInfos[dev->minor].i_Offset + 24);
- /* Begin JK 22.10.2004: APCI-3200 / APCI-3300 Reading of EEPROM values */
- s_BoardInfos[dev->minor].ui_Channel_num =
- ui_ChannelNumber;
- /* End JK 22.10.2004: APCI-3200 / APCI-3300 Reading of EEPROM values */
-
- /************************************/
- /*Read the digital temperature value */
- /************************************/
- /* ui_DigitalTemperature = inl(devpriv->iobase+i_Offset + 28); */
- ui_DigitalTemperature =
- inl(devpriv->iobase +
- s_BoardInfos[dev->minor].i_Offset + 28);
-
- /*********************************************/
- /*Test if the value read is the channel value */
- /*********************************************/
-
- if ((i_CalibrationFlag == 0)
- && (i_CJCFlag == 0)) {
- /* ui_InterruptChannelValue[i_Count + 0]=ui_DigitalTemperature; */
- s_BoardInfos[dev->minor].
- ui_InterruptChannelValue
- [s_BoardInfos[dev->minor].
- i_Count + 0] =
- ui_DigitalTemperature;
-
- /*********************************/
- /*Start the conversion of the CJC */
- /*********************************/
- i_APCI3200_ReadCJCValue(dev,
- &ui_DummyValue);
-
- } /* if ((i_CalibrationFlag == 0) && (i_CJCFlag == 0)) */
-
- /*****************************************/
- /*Test if the value read is the CJC value */
- /*****************************************/
-
- if ((i_CJCFlag == 1)
- && (i_CalibrationFlag == 0)) {
- /* ui_InterruptChannelValue[i_Count + 3]=ui_DigitalTemperature; */
- s_BoardInfos[dev->minor].
- ui_InterruptChannelValue
- [s_BoardInfos[dev->minor].
- i_Count + 3] =
- ui_DigitalTemperature;
-
- /******************************************************/
- /*Start the conversion of the calibration offset value */
- /******************************************************/
- i_APCI3200_ReadCalibrationOffsetValue
- (dev, &ui_DummyValue);
- } /* if ((i_CJCFlag == 1) && (i_CalibrationFlag == 0)) */
-
- /********************************************************/
- /*Test if the value read is the calibration offset value */
- /********************************************************/
-
- if ((i_CalibrationFlag == 1)
- && (i_CJCFlag == 0)) {
- /* ui_InterruptChannelValue[i_Count + 1]=ui_DigitalTemperature; */
- s_BoardInfos[dev->minor].
- ui_InterruptChannelValue
- [s_BoardInfos[dev->minor].
- i_Count + 1] =
- ui_DigitalTemperature;
-
- /****************************************************/
- /*Start the conversion of the calibration gain value */
- /****************************************************/
- i_APCI3200_ReadCalibrationGainValue(dev,
- &ui_DummyValue);
-
- } /* if ((i_CalibrationFlag == 1) && (i_CJCFlag == 0)) */
-
- /******************************************************/
- /*Test if the value read is the calibration gain value */
- /******************************************************/
-
- if ((i_CalibrationFlag == 2)
- && (i_CJCFlag == 0)) {
- /* ui_InterruptChannelValue[i_Count + 2]=ui_DigitalTemperature; */
- s_BoardInfos[dev->minor].
- ui_InterruptChannelValue
- [s_BoardInfos[dev->minor].
- i_Count + 2] =
- ui_DigitalTemperature;
-
- /**********************************************************/
- /*Test if the Calibration channel must be read for the CJC */
- /**********************************************************/
-
- /*Test if the polarity is the same */
- /**********************************/
- /* if(i_CJCPolarity!=i_ADDIDATAPolarity) */
- if (s_BoardInfos[dev->minor].
- i_CJCPolarity !=
- s_BoardInfos[dev->minor].
- i_ADDIDATAPolarity) {
- i_ConvertCJCCalibration = 1;
- } /* if(i_CJCPolarity!=i_ADDIDATAPolarity) */
- else {
- /* if(i_CJCGain==i_ADDIDATAGain) */
- if (s_BoardInfos[dev->minor].
- i_CJCGain ==
- s_BoardInfos[dev->
- minor].
- i_ADDIDATAGain) {
- i_ConvertCJCCalibration
- = 0;
- } /* if(i_CJCGain==i_ADDIDATAGain) */
- else {
- i_ConvertCJCCalibration
- = 1;
- } /* elseif(i_CJCGain==i_ADDIDATAGain) */
- } /* elseif(i_CJCPolarity!=i_ADDIDATAPolarity) */
- if (i_ConvertCJCCalibration == 1) {
- /****************************************************************/
- /*Start the conversion of the calibration gain value for the CJC */
- /****************************************************************/
- i_APCI3200_ReadCJCCalOffset(dev,
- &ui_DummyValue);
-
- } /* if(i_ConvertCJCCalibration==1) */
- else {
- /* ui_InterruptChannelValue[i_Count + 4]=0; */
- /* ui_InterruptChannelValue[i_Count + 5]=0; */
- s_BoardInfos[dev->minor].
- ui_InterruptChannelValue
- [s_BoardInfos[dev->
- minor].i_Count +
- 4] = 0;
- s_BoardInfos[dev->minor].
- ui_InterruptChannelValue
- [s_BoardInfos[dev->
- minor].i_Count +
- 5] = 0;
- } /* elseif(i_ConvertCJCCalibration==1) */
- } /* else if ((i_CalibrationFlag == 2) && (i_CJCFlag == 0)) */
-
- /********************************************************************/
- /*Test if the value read is the calibration offset value for the CJC */
- /********************************************************************/
-
- if ((i_CalibrationFlag == 1)
- && (i_CJCFlag == 1)) {
- /* ui_InterruptChannelValue[i_Count + 4]=ui_DigitalTemperature; */
- s_BoardInfos[dev->minor].
- ui_InterruptChannelValue
- [s_BoardInfos[dev->minor].
- i_Count + 4] =
- ui_DigitalTemperature;
-
- /****************************************************************/
- /*Start the conversion of the calibration gain value for the CJC */
- /****************************************************************/
- i_APCI3200_ReadCJCCalGain(dev,
- &ui_DummyValue);
-
- } /* if ((i_CalibrationFlag == 1) && (i_CJCFlag == 1)) */
-
- /******************************************************************/
- /*Test if the value read is the calibration gain value for the CJC */
- /******************************************************************/
-
- if ((i_CalibrationFlag == 2)
- && (i_CJCFlag == 1)) {
- /* ui_InterruptChannelValue[i_Count + 5]=ui_DigitalTemperature; */
- s_BoardInfos[dev->minor].
- ui_InterruptChannelValue
- [s_BoardInfos[dev->minor].
- i_Count + 5] =
- ui_DigitalTemperature;
-
- /* if(i_ScanType==1) */
- if (s_BoardInfos[dev->minor].
- i_ScanType == 1) {
-
- /* i_InterruptFlag=0; */
- s_BoardInfos[dev->minor].
- i_InterruptFlag = 0;
- /* i_Count=i_Count + 6; */
- /* Begin JK 22.10.2004: APCI-3200 / APCI-3300 Reading of EEPROM values */
- /* s_BoardInfos [dev->minor].i_Count=s_BoardInfos [dev->minor].i_Count + 6; */
- s_BoardInfos[dev->minor].
- i_Count =
- s_BoardInfos[dev->
- minor].i_Count + 9;
- /* End JK 22.10.2004: APCI-3200 / APCI-3300 Reading of EEPROM values */
- } /* if(i_ScanType==1) */
- else {
- /* i_Count=0; */
- s_BoardInfos[dev->minor].
- i_Count = 0;
- } /* elseif(i_ScanType==1) */
-
- /* if(i_ScanType!=1) */
- if (s_BoardInfos[dev->minor].
- i_ScanType != 1) {
- send_sig(SIGIO, devpriv->tsk_Current, 0); /* send signal to the sample */
- } /* if(i_ScanType!=1) */
- else {
- /* if(i_ChannelCount==i_Sum) */
- if (s_BoardInfos[dev->minor].
- i_ChannelCount ==
- s_BoardInfos[dev->
- minor].i_Sum) {
- send_sig(SIGIO, devpriv->tsk_Current, 0); /* send signal to the sample */
-
- } /* if(i_ChannelCount==i_Sum) */
- } /* else if(i_ScanType!=1) */
- } /* if ((i_CalibrationFlag == 2) && (i_CJCFlag == 1)) */
-
- } /* else if ((ui_StatusRegister & 0x2) == 0x2) */
- break;
- } /* switch(i_ADDIDATAType) */
- break;
- case 2:
- case 3:
- i_APCI3200_InterruptHandleEos(dev);
- break;
- } /* switch(i_ScanType) */
- return;
-}
diff --git a/drivers/staging/comedi/drivers/addi_apci_035.c b/drivers/staging/comedi/drivers/addi_apci_035.c
deleted file mode 100644
index af70c840188..00000000000
--- a/drivers/staging/comedi/drivers/addi_apci_035.c
+++ /dev/null
@@ -1,77 +0,0 @@
-#include <linux/module.h>
-#include <linux/pci.h>
-
-#include "../comedidev.h"
-#include "comedi_fc.h"
-#include "amcc_s5933.h"
-
-#include "addi-data/addi_common.h"
-
-#define ADDIDATA_WATCHDOG 2 /* Or shold it be something else */
-
-#include "addi-data/addi_eeprom.c"
-#include "addi-data/hwdrv_apci035.c"
-#include "addi-data/addi_common.c"
-
-static const struct addi_board apci035_boardtypes[] = {
- {
- .pc_DriverName = "apci035",
- .i_IorangeBase1 = APCI035_ADDRESS_RANGE,
- .i_PCIEeprom = 1,
- .pc_EepromChip = "S5920",
- .i_NbrAiChannel = 16,
- .i_NbrAiChannelDiff = 8,
- .i_AiChannelList = 16,
- .i_AiMaxdata = 0xff,
- .pr_AiRangelist = &range_apci035_ai,
- .i_Timer = 1,
- .ui_MinAcquisitiontimeNs = 10000,
- .ui_MinDelaytimeNs = 100000,
- .interrupt = apci035_interrupt,
- .reset = apci035_reset,
- .ai_config = apci035_ai_config,
- .ai_read = apci035_ai_read,
- .timer_config = apci035_timer_config,
- .timer_write = apci035_timer_write,
- .timer_read = apci035_timer_read,
- },
-};
-
-static int apci035_auto_attach(struct comedi_device *dev,
- unsigned long context)
-{
- dev->board_ptr = &apci035_boardtypes[0];
-
- return addi_auto_attach(dev, context);
-}
-
-static struct comedi_driver apci035_driver = {
- .driver_name = "addi_apci_035",
- .module = THIS_MODULE,
- .auto_attach = apci035_auto_attach,
- .detach = i_ADDI_Detach,
-};
-
-static int apci035_pci_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
-{
- return comedi_pci_auto_config(dev, &apci035_driver, id->driver_data);
-}
-
-static const struct pci_device_id apci035_pci_table[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x0300) },
- { 0 }
-};
-MODULE_DEVICE_TABLE(pci, apci035_pci_table);
-
-static struct pci_driver apci035_pci_driver = {
- .name = "addi_apci_035",
- .id_table = apci035_pci_table,
- .probe = apci035_pci_probe,
- .remove = comedi_pci_auto_unconfig,
-};
-module_comedi_pci_driver(apci035_driver, apci035_pci_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/addi_apci_1032.c b/drivers/staging/comedi/drivers/addi_apci_1032.c
index 840cb289507..bf14165297b 100644
--- a/drivers/staging/comedi/drivers/addi_apci_1032.c
+++ b/drivers/staging/comedi/drivers/addi_apci_1032.c
@@ -258,9 +258,8 @@ static irqreturn_t apci1032_interrupt(int irq, void *d)
outl(ctrl & ~APCI1032_CTRL_INT_ENA, dev->iobase + APCI1032_CTRL_REG);
s->state = inl(dev->iobase + APCI1032_STATUS_REG) & 0xffff;
- comedi_buf_put(s, s->state);
- s->async->events |= COMEDI_CB_BLOCK | COMEDI_CB_EOS;
- comedi_event(dev, s);
+ comedi_buf_write_samples(s, &s->state, 1);
+ comedi_handle_events(dev, s);
/* enable the interrupt */
outl(ctrl, dev->iobase + APCI1032_CTRL_REG);
diff --git a/drivers/staging/comedi/drivers/addi_apci_1500.c b/drivers/staging/comedi/drivers/addi_apci_1500.c
index b7a284ac664..30b132c3d09 100644
--- a/drivers/staging/comedi/drivers/addi_apci_1500.c
+++ b/drivers/staging/comedi/drivers/addi_apci_1500.c
@@ -1,54 +1,109 @@
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
#include "../comedidev.h"
#include "comedi_fc.h"
#include "amcc_s5933.h"
-#include "addi-data/addi_common.h"
+struct apci1500_private {
+ int iobase;
+ int i_IobaseAmcc;
+ int i_IobaseAddon;
+ int i_IobaseReserved;
+ unsigned char b_OutputMemoryStatus;
+ struct task_struct *tsk_Current;
+};
-#include "addi-data/addi_eeprom.c"
#include "addi-data/hwdrv_apci1500.c"
-#include "addi-data/addi_common.c"
-
-static const struct addi_board apci1500_boardtypes[] = {
- {
- .pc_DriverName = "apci1500",
- .i_IorangeBase1 = APCI1500_ADDRESS_RANGE,
- .i_PCIEeprom = 0,
- .i_NbrDiChannel = 16,
- .i_NbrDoChannel = 16,
- .i_DoMaxdata = 0xffff,
- .i_Timer = 1,
- .interrupt = apci1500_interrupt,
- .reset = apci1500_reset,
- .di_config = apci1500_di_config,
- .di_read = apci1500_di_read,
- .di_write = apci1500_di_write,
- .di_bits = apci1500_di_insn_bits,
- .do_config = apci1500_do_config,
- .do_write = apci1500_do_write,
- .do_bits = apci1500_do_bits,
- .timer_config = apci1500_timer_config,
- .timer_write = apci1500_timer_write,
- .timer_read = apci1500_timer_read,
- .timer_bits = apci1500_timer_bits,
- },
-};
static int apci1500_auto_attach(struct comedi_device *dev,
unsigned long context)
{
- dev->board_ptr = &apci1500_boardtypes[0];
+ struct pci_dev *pcidev = comedi_to_pci_dev(dev);
+ struct apci1500_private *devpriv;
+ struct comedi_subdevice *s;
+ int ret;
+
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
+ if (!devpriv)
+ return -ENOMEM;
+
+ ret = comedi_pci_enable(dev);
+ if (ret)
+ return ret;
+
+ dev->iobase = pci_resource_start(pcidev, 1);
+ devpriv->iobase = dev->iobase;
+ devpriv->i_IobaseAmcc = pci_resource_start(pcidev, 0);
+ devpriv->i_IobaseAddon = pci_resource_start(pcidev, 2);
+ devpriv->i_IobaseReserved = pci_resource_start(pcidev, 3);
+
+ if (pcidev->irq > 0) {
+ ret = request_irq(pcidev->irq, apci1500_interrupt, IRQF_SHARED,
+ dev->board_name, dev);
+ if (ret == 0)
+ dev->irq = pcidev->irq;
+ }
+
+ ret = comedi_alloc_subdevices(dev, 3);
+ if (ret)
+ return ret;
- return addi_auto_attach(dev, context);
+ /* Allocate and Initialise DI Subdevice Structures */
+ s = &dev->subdevices[0];
+ s->type = COMEDI_SUBD_DI;
+ s->subdev_flags = SDF_READABLE;
+ s->n_chan = 16;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_config = apci1500_di_config;
+ s->insn_read = apci1500_di_read;
+ s->insn_write = apci1500_di_write;
+ s->insn_bits = apci1500_di_insn_bits;
+
+ /* Allocate and Initialise DO Subdevice Structures */
+ s = &dev->subdevices[1];
+ s->type = COMEDI_SUBD_DO;
+ s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
+ s->n_chan = 16;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_config = apci1500_do_config;
+ s->insn_write = apci1500_do_write;
+ s->insn_bits = apci1500_do_bits;
+
+ /* Allocate and Initialise Timer Subdevice Structures */
+ s = &dev->subdevices[2];
+ s->type = COMEDI_SUBD_TIMER;
+ s->subdev_flags = SDF_WRITABLE;
+ s->n_chan = 1;
+ s->maxdata = 0;
+ s->len_chanlist = 1;
+ s->range_table = &range_digital;
+ s->insn_write = apci1500_timer_write;
+ s->insn_read = apci1500_timer_read;
+ s->insn_config = apci1500_timer_config;
+ s->insn_bits = apci1500_timer_bits;
+
+ apci1500_reset(dev);
+
+ return 0;
+}
+
+static void apci1500_detach(struct comedi_device *dev)
+{
+ if (dev->iobase)
+ apci1500_reset(dev);
+ comedi_pci_detach(dev);
}
static struct comedi_driver apci1500_driver = {
.driver_name = "addi_apci_1500",
.module = THIS_MODULE,
.auto_attach = apci1500_auto_attach,
- .detach = i_ADDI_Detach,
+ .detach = apci1500_detach,
};
static int apci1500_pci_probe(struct pci_dev *dev,
diff --git a/drivers/staging/comedi/drivers/addi_apci_1516.c b/drivers/staging/comedi/drivers/addi_apci_1516.c
index 55d00fd94c9..d8410415cc9 100644
--- a/drivers/staging/comedi/drivers/addi_apci_1516.c
+++ b/drivers/staging/comedi/drivers/addi_apci_1516.c
@@ -163,7 +163,7 @@ static int apci1516_auto_attach(struct comedi_device *dev,
s = &dev->subdevices[1];
if (this_board->do_nchan) {
s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITEABLE;
+ s->subdev_flags = SDF_WRITABLE;
s->n_chan = this_board->do_nchan;
s->maxdata = 1;
s->range_table = &range_digital;
diff --git a/drivers/staging/comedi/drivers/addi_apci_1564.c b/drivers/staging/comedi/drivers/addi_apci_1564.c
index 688b015a834..6872b69da5d 100644
--- a/drivers/staging/comedi/drivers/addi_apci_1564.c
+++ b/drivers/staging/comedi/drivers/addi_apci_1564.c
@@ -28,16 +28,91 @@
#include "../comedidev.h"
#include "comedi_fc.h"
-#include "amcc_s5933.h"
+#include "addi_tcw.h"
#include "addi_watchdog.h"
+/*
+ * PCI BAR 0
+ *
+ * PLD Revision 1.0 I/O Mapping
+ * 0x00 93C76 EEPROM
+ * 0x04 - 0x18 Timer 12-Bit
+ *
+ * PLD Revision 2.x I/O Mapping
+ * 0x00 93C76 EEPROM
+ * 0x04 - 0x14 Digital Input
+ * 0x18 - 0x25 Digital Output
+ * 0x28 - 0x44 Watchdog 8-Bit
+ * 0x48 - 0x64 Timer 12-Bit
+ */
+#define APCI1564_EEPROM_REG 0x00
+#define APCI1564_EEPROM_VCC_STATUS (1 << 8)
+#define APCI1564_EEPROM_TO_REV(x) (((x) >> 4) & 0xf)
+#define APCI1564_EEPROM_DI (1 << 3)
+#define APCI1564_EEPROM_DO (1 << 2)
+#define APCI1564_EEPROM_CS (1 << 1)
+#define APCI1564_EEPROM_CLK (1 << 0)
+#define APCI1564_REV1_TIMER_IOBASE 0x04
+#define APCI1564_REV2_MAIN_IOBASE 0x04
+#define APCI1564_REV2_TIMER_IOBASE 0x48
+
+/*
+ * PCI BAR 1
+ *
+ * PLD Revision 1.0 I/O Mapping
+ * 0x00 - 0x10 Digital Input
+ * 0x14 - 0x20 Digital Output
+ * 0x24 - 0x3c Watchdog 8-Bit
+ *
+ * PLD Revision 2.x I/O Mapping
+ * 0x00 Counter_0
+ * 0x20 Counter_1
+ * 0x30 Counter_3
+ */
+#define APCI1564_REV1_MAIN_IOBASE 0x00
+
+/*
+ * dev->iobase Register Map
+ * PLD Revision 1.0 - PCI BAR 1 + 0x00
+ * PLD Revision 2.x - PCI BAR 0 + 0x04
+ */
+#define APCI1564_DI_REG 0x00
+#define APCI1564_DI_INT_MODE1_REG 0x04
+#define APCI1564_DI_INT_MODE2_REG 0x08
+#define APCI1564_DI_INT_STATUS_REG 0x0c
+#define APCI1564_DI_IRQ_REG 0x10
+#define APCI1564_DO_REG 0x14
+#define APCI1564_DO_INT_CTRL_REG 0x18
+#define APCI1564_DO_INT_STATUS_REG 0x1c
+#define APCI1564_DO_IRQ_REG 0x20
+#define APCI1564_WDOG_REG 0x24
+#define APCI1564_WDOG_RELOAD_REG 0x28
+#define APCI1564_WDOG_TIMEBASE_REG 0x2c
+#define APCI1564_WDOG_CTRL_REG 0x30
+#define APCI1564_WDOG_STATUS_REG 0x34
+#define APCI1564_WDOG_IRQ_REG 0x38
+#define APCI1564_WDOG_WARN_TIMEVAL_REG 0x3c
+#define APCI1564_WDOG_WARN_TIMEBASE_REG 0x40
+
+/*
+ * devpriv->timer Register Map (see addi_tcw.h for register/bit defines)
+ * PLD Revision 1.0 - PCI BAR 0 + 0x04
+ * PLD Revision 2.x - PCI BAR 0 + 0x48
+ */
+
+/*
+ * devpriv->counters Register Map (see addi_tcw.h for register/bit defines)
+ * PLD Revision 2.x - PCI BAR 1 + 0x00
+ */
+#define APCI1564_COUNTER(x) ((x) * 0x20)
+
struct apci1564_private {
- unsigned int amcc_iobase; /* base of AMCC I/O registers */
+ unsigned long eeprom; /* base address of EEPROM register */
+ unsigned long timer; /* base address of 12-bit timer */
+ unsigned long counters; /* base address of 32-bit counters */
unsigned int mode1; /* riding-edge/high level channels */
unsigned int mode2; /* falling-edge/low level channels */
unsigned int ctrl; /* interrupt mode OR (edge) . AND (level) */
- unsigned char timer_select_mode;
- unsigned char mode_select_register;
struct task_struct *tsk_current;
};
@@ -48,27 +123,30 @@ static int apci1564_reset(struct comedi_device *dev)
struct apci1564_private *devpriv = dev->private;
/* Disable the input interrupts and reset status register */
- outl(0x0, devpriv->amcc_iobase + APCI1564_DI_IRQ_REG);
- inl(devpriv->amcc_iobase + APCI1564_DI_INT_STATUS_REG);
- outl(0x0, devpriv->amcc_iobase + APCI1564_DI_INT_MODE1_REG);
- outl(0x0, devpriv->amcc_iobase + APCI1564_DI_INT_MODE2_REG);
+ outl(0x0, dev->iobase + APCI1564_DI_IRQ_REG);
+ inl(dev->iobase + APCI1564_DI_INT_STATUS_REG);
+ outl(0x0, dev->iobase + APCI1564_DI_INT_MODE1_REG);
+ outl(0x0, dev->iobase + APCI1564_DI_INT_MODE2_REG);
/* Reset the output channels and disable interrupts */
- outl(0x0, devpriv->amcc_iobase + APCI1564_DO_REG);
- outl(0x0, devpriv->amcc_iobase + APCI1564_DO_INT_CTRL_REG);
+ outl(0x0, dev->iobase + APCI1564_DO_REG);
+ outl(0x0, dev->iobase + APCI1564_DO_INT_CTRL_REG);
/* Reset the watchdog registers */
- addi_watchdog_reset(devpriv->amcc_iobase + APCI1564_WDOG_REG);
+ addi_watchdog_reset(dev->iobase + APCI1564_WDOG_REG);
/* Reset the timer registers */
- outl(0x0, devpriv->amcc_iobase + APCI1564_TIMER_CTRL_REG);
- outl(0x0, devpriv->amcc_iobase + APCI1564_TIMER_RELOAD_REG);
+ outl(0x0, devpriv->timer + ADDI_TCW_CTRL_REG);
+ outl(0x0, devpriv->timer + ADDI_TCW_RELOAD_REG);
+
+ if (devpriv->counters) {
+ unsigned long iobase = devpriv->counters + ADDI_TCW_CTRL_REG;
- /* Reset the counter registers */
- outl(0x0, dev->iobase + APCI1564_COUNTER_CTRL_REG(APCI1564_COUNTER1));
- outl(0x0, dev->iobase + APCI1564_COUNTER_CTRL_REG(APCI1564_COUNTER2));
- outl(0x0, dev->iobase + APCI1564_COUNTER_CTRL_REG(APCI1564_COUNTER3));
- outl(0x0, dev->iobase + APCI1564_COUNTER_CTRL_REG(APCI1564_COUNTER4));
+ /* Reset the counter registers */
+ outl(0x0, iobase + APCI1564_COUNTER(0));
+ outl(0x0, iobase + APCI1564_COUNTER(1));
+ outl(0x0, iobase + APCI1564_COUNTER(2));
+ }
return 0;
}
@@ -82,55 +160,52 @@ static irqreturn_t apci1564_interrupt(int irq, void *d)
unsigned int ctrl;
unsigned int chan;
- /* check interrupt is from this device */
- if ((inl(devpriv->amcc_iobase + AMCC_OP_REG_INTCSR) &
- INTCSR_INTR_ASSERTED) == 0)
- return IRQ_NONE;
-
- status = inl(devpriv->amcc_iobase + APCI1564_DI_IRQ_REG);
+ status = inl(dev->iobase + APCI1564_DI_IRQ_REG);
if (status & APCI1564_DI_INT_ENABLE) {
/* disable the interrupt */
outl(status & APCI1564_DI_INT_DISABLE,
- devpriv->amcc_iobase + APCI1564_DI_IRQ_REG);
+ dev->iobase + APCI1564_DI_IRQ_REG);
- s->state = inl(dev->iobase + APCI1564_DI_INT_STATUS_REG)
- & 0xffff;
- comedi_buf_put(s, s->state);
- s->async->events |= COMEDI_CB_BLOCK | COMEDI_CB_EOS;
- comedi_event(dev, s);
+ s->state = inl(dev->iobase + APCI1564_DI_INT_STATUS_REG) &
+ 0xffff;
+ comedi_buf_write_samples(s, &s->state, 1);
+ comedi_handle_events(dev, s);
/* enable the interrupt */
- outl(status, devpriv->amcc_iobase + APCI1564_DI_IRQ_REG);
+ outl(status, dev->iobase + APCI1564_DI_IRQ_REG);
}
- status = inl(devpriv->amcc_iobase + APCI1564_TIMER_IRQ_REG);
+ status = inl(devpriv->timer + ADDI_TCW_IRQ_REG);
if (status & 0x01) {
/* Disable Timer Interrupt */
- ctrl = inl(devpriv->amcc_iobase + APCI1564_TIMER_CTRL_REG);
- outl(0x0, devpriv->amcc_iobase + APCI1564_TIMER_CTRL_REG);
+ ctrl = inl(devpriv->timer + ADDI_TCW_CTRL_REG);
+ outl(0x0, devpriv->timer + ADDI_TCW_CTRL_REG);
/* Send a signal to from kernel to user space */
send_sig(SIGIO, devpriv->tsk_current, 0);
/* Enable Timer Interrupt */
- outl(ctrl, devpriv->amcc_iobase + APCI1564_TIMER_CTRL_REG);
+ outl(ctrl, devpriv->timer + ADDI_TCW_CTRL_REG);
}
- for (chan = 0; chan < 4; chan++) {
- status = inl(dev->iobase + APCI1564_COUNTER_IRQ_REG(chan));
- if (status & 0x01) {
- /* Disable Counter Interrupt */
- ctrl = inl(dev->iobase +
- APCI1564_COUNTER_CTRL_REG(chan));
- outl(0x0, dev->iobase +
- APCI1564_COUNTER_CTRL_REG(chan));
-
- /* Send a signal to from kernel to user space */
- send_sig(SIGIO, devpriv->tsk_current, 0);
-
- /* Enable Counter Interrupt */
- outl(ctrl, dev->iobase +
- APCI1564_COUNTER_CTRL_REG(chan));
+ if (devpriv->counters) {
+ for (chan = 0; chan < 4; chan++) {
+ unsigned long iobase;
+
+ iobase = devpriv->counters + APCI1564_COUNTER(chan);
+
+ status = inl(iobase + ADDI_TCW_IRQ_REG);
+ if (status & 0x01) {
+ /* Disable Counter Interrupt */
+ ctrl = inl(iobase + ADDI_TCW_CTRL_REG);
+ outl(0x0, iobase + ADDI_TCW_CTRL_REG);
+
+ /* Send a signal to from kernel to user space */
+ send_sig(SIGIO, devpriv->tsk_current, 0);
+
+ /* Enable Counter Interrupt */
+ outl(ctrl, iobase + ADDI_TCW_CTRL_REG);
+ }
}
}
@@ -142,9 +217,7 @@ static int apci1564_di_insn_bits(struct comedi_device *dev,
struct comedi_insn *insn,
unsigned int *data)
{
- struct apci1564_private *devpriv = dev->private;
-
- data[1] = inl(devpriv->amcc_iobase + APCI1564_DI_REG);
+ data[1] = inl(dev->iobase + APCI1564_DI_REG);
return insn->n;
}
@@ -154,12 +227,10 @@ static int apci1564_do_insn_bits(struct comedi_device *dev,
struct comedi_insn *insn,
unsigned int *data)
{
- struct apci1564_private *devpriv = dev->private;
-
- s->state = inl(devpriv->amcc_iobase + APCI1564_DO_REG);
+ s->state = inl(dev->iobase + APCI1564_DO_REG);
if (comedi_dio_update_state(s, data))
- outl(s->state, devpriv->amcc_iobase + APCI1564_DO_REG);
+ outl(s->state, dev->iobase + APCI1564_DO_REG);
data[1] = s->state;
@@ -171,9 +242,7 @@ static int apci1564_diag_insn_bits(struct comedi_device *dev,
struct comedi_insn *insn,
unsigned int *data)
{
- struct apci1564_private *devpriv = dev->private;
-
- data[1] = inl(devpriv->amcc_iobase + APCI1564_DO_INT_STATUS_REG) & 3;
+ data[1] = inl(dev->iobase + APCI1564_DO_INT_STATUS_REG) & 3;
return insn->n;
}
@@ -227,10 +296,10 @@ static int apci1564_cos_insn_config(struct comedi_device *dev,
devpriv->ctrl = 0;
devpriv->mode1 = 0;
devpriv->mode2 = 0;
- outl(0x0, devpriv->amcc_iobase + APCI1564_DI_IRQ_REG);
- inl(devpriv->amcc_iobase + APCI1564_DI_INT_STATUS_REG);
- outl(0x0, devpriv->amcc_iobase + APCI1564_DI_INT_MODE1_REG);
- outl(0x0, devpriv->amcc_iobase + APCI1564_DI_INT_MODE2_REG);
+ outl(0x0, dev->iobase + APCI1564_DI_IRQ_REG);
+ inl(dev->iobase + APCI1564_DI_INT_STATUS_REG);
+ outl(0x0, dev->iobase + APCI1564_DI_INT_MODE1_REG);
+ outl(0x0, dev->iobase + APCI1564_DI_INT_MODE2_REG);
break;
case COMEDI_DIGITAL_TRIG_ENABLE_EDGES:
if (devpriv->ctrl != (APCI1564_DI_INT_ENABLE |
@@ -342,9 +411,9 @@ static int apci1564_cos_cmd(struct comedi_device *dev,
return -EINVAL;
}
- outl(devpriv->mode1, devpriv->amcc_iobase + APCI1564_DI_INT_MODE1_REG);
- outl(devpriv->mode2, devpriv->amcc_iobase + APCI1564_DI_INT_MODE2_REG);
- outl(devpriv->ctrl, devpriv->amcc_iobase + APCI1564_DI_IRQ_REG);
+ outl(devpriv->mode1, dev->iobase + APCI1564_DI_INT_MODE1_REG);
+ outl(devpriv->mode2, dev->iobase + APCI1564_DI_INT_MODE2_REG);
+ outl(devpriv->ctrl, dev->iobase + APCI1564_DI_IRQ_REG);
return 0;
}
@@ -352,12 +421,10 @@ static int apci1564_cos_cmd(struct comedi_device *dev,
static int apci1564_cos_cancel(struct comedi_device *dev,
struct comedi_subdevice *s)
{
- struct apci1564_private *devpriv = dev->private;
-
- outl(0x0, devpriv->amcc_iobase + APCI1564_DI_IRQ_REG);
- inl(devpriv->amcc_iobase + APCI1564_DI_INT_STATUS_REG);
- outl(0x0, devpriv->amcc_iobase + APCI1564_DI_INT_MODE1_REG);
- outl(0x0, devpriv->amcc_iobase + APCI1564_DI_INT_MODE2_REG);
+ outl(0x0, dev->iobase + APCI1564_DI_IRQ_REG);
+ inl(dev->iobase + APCI1564_DI_INT_STATUS_REG);
+ outl(0x0, dev->iobase + APCI1564_DI_INT_MODE1_REG);
+ outl(0x0, dev->iobase + APCI1564_DI_INT_MODE2_REG);
return 0;
}
@@ -368,6 +435,7 @@ static int apci1564_auto_attach(struct comedi_device *dev,
struct pci_dev *pcidev = comedi_to_pci_dev(dev);
struct apci1564_private *devpriv;
struct comedi_subdevice *s;
+ unsigned int val;
int ret;
devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
@@ -378,8 +446,20 @@ static int apci1564_auto_attach(struct comedi_device *dev,
if (ret)
return ret;
- dev->iobase = pci_resource_start(pcidev, 1);
- devpriv->amcc_iobase = pci_resource_start(pcidev, 0);
+ /* read the EEPROM register and check the I/O map revision */
+ devpriv->eeprom = pci_resource_start(pcidev, 0);
+ val = inl(devpriv->eeprom + APCI1564_EEPROM_REG);
+ if (APCI1564_EEPROM_TO_REV(val) == 0) {
+ /* PLD Revision 1.0 I/O Mapping */
+ dev->iobase = pci_resource_start(pcidev, 1) +
+ APCI1564_REV1_MAIN_IOBASE;
+ devpriv->timer = devpriv->eeprom + APCI1564_REV1_TIMER_IOBASE;
+ } else {
+ /* PLD Revision 2.x I/O Mapping */
+ dev->iobase = devpriv->eeprom + APCI1564_REV2_MAIN_IOBASE;
+ devpriv->timer = devpriv->eeprom + APCI1564_REV2_TIMER_IOBASE;
+ devpriv->counters = pci_resource_start(pcidev, 1);
+ }
apci1564_reset(dev);
@@ -390,7 +470,7 @@ static int apci1564_auto_attach(struct comedi_device *dev,
dev->irq = pcidev->irq;
}
- ret = comedi_alloc_subdevices(dev, 6);
+ ret = comedi_alloc_subdevices(dev, 7);
if (ret)
return ret;
@@ -406,7 +486,7 @@ static int apci1564_auto_attach(struct comedi_device *dev,
/* Allocate and Initialise DO Subdevice Structures */
s = &dev->subdevices[1];
s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITEABLE;
+ s->subdev_flags = SDF_WRITABLE;
s->n_chan = 32;
s->maxdata = 1;
s->range_table = &range_digital;
@@ -431,26 +511,40 @@ static int apci1564_auto_attach(struct comedi_device *dev,
s->type = COMEDI_SUBD_UNUSED;
}
- /* Allocate and Initialise Timer Subdevice Structures */
+ /* Timer subdevice */
s = &dev->subdevices[3];
s->type = COMEDI_SUBD_TIMER;
- s->subdev_flags = SDF_WRITEABLE;
+ s->subdev_flags = SDF_WRITABLE | SDF_READABLE;
s->n_chan = 1;
- s->maxdata = 0;
- s->len_chanlist = 1;
+ s->maxdata = 0x0fff;
s->range_table = &range_digital;
- s->insn_write = apci1564_timer_write;
- s->insn_read = apci1564_timer_read;
- s->insn_config = apci1564_timer_config;
+ s->insn_config = apci1564_timer_insn_config;
+ s->insn_write = apci1564_timer_insn_write;
+ s->insn_read = apci1564_timer_insn_read;
- /* Initialize the watchdog subdevice */
+ /* Counter subdevice */
s = &dev->subdevices[4];
- ret = addi_watchdog_init(s, devpriv->amcc_iobase + APCI1564_WDOG_REG);
+ if (devpriv->counters) {
+ s->type = COMEDI_SUBD_COUNTER;
+ s->subdev_flags = SDF_WRITABLE | SDF_READABLE | SDF_LSAMPL;
+ s->n_chan = 3;
+ s->maxdata = 0xffffffff;
+ s->range_table = &range_digital;
+ s->insn_config = apci1564_counter_insn_config;
+ s->insn_write = apci1564_counter_insn_write;
+ s->insn_read = apci1564_counter_insn_read;
+ } else {
+ s->type = COMEDI_SUBD_UNUSED;
+ }
+
+ /* Initialize the watchdog subdevice */
+ s = &dev->subdevices[5];
+ ret = addi_watchdog_init(s, dev->iobase + APCI1564_WDOG_REG);
if (ret)
return ret;
/* Initialize the diagnostic status subdevice */
- s = &dev->subdevices[5];
+ s = &dev->subdevices[6];
s->type = COMEDI_SUBD_DI;
s->subdev_flags = SDF_READABLE;
s->n_chan = 2;
diff --git a/drivers/staging/comedi/drivers/addi_apci_16xx.c b/drivers/staging/comedi/drivers/addi_apci_16xx.c
index 4162e2dc286..a1248dab369 100644
--- a/drivers/staging/comedi/drivers/addi_apci_16xx.c
+++ b/drivers/staging/comedi/drivers/addi_apci_16xx.c
@@ -140,7 +140,7 @@ static int apci16xx_auto_attach(struct comedi_device *dev,
for (i = 0; i < n_subdevs; i++) {
s = &dev->subdevices[i];
s->type = COMEDI_SUBD_DIO;
- s->subdev_flags = SDF_WRITEABLE | SDF_READABLE;
+ s->subdev_flags = SDF_WRITABLE | SDF_READABLE;
s->n_chan = ((i * 32) < board->n_chan) ? 32 : last;
s->maxdata = 1;
s->range_table = &range_digital;
diff --git a/drivers/staging/comedi/drivers/addi_apci_2032.c b/drivers/staging/comedi/drivers/addi_apci_2032.c
index aea3da32535..eebf4f151b3 100644
--- a/drivers/staging/comedi/drivers/addi_apci_2032.c
+++ b/drivers/staging/comedi/drivers/addi_apci_2032.c
@@ -47,7 +47,6 @@
struct apci2032_int_private {
spinlock_t spinlock;
- unsigned int stop_count;
bool active;
unsigned char enabled_isns;
};
@@ -148,7 +147,6 @@ static int apci2032_int_cmd(struct comedi_device *dev,
spin_lock_irqsave(&subpriv->spinlock, flags);
subpriv->enabled_isns = enabled_isns;
- subpriv->stop_count = cmd->stop_arg;
subpriv->active = true;
outl(enabled_isns, dev->iobase + APCI2032_INT_CTRL_REG);
@@ -178,7 +176,6 @@ static irqreturn_t apci2032_interrupt(int irq, void *d)
struct comedi_cmd *cmd = &s->async->cmd;
struct apci2032_int_private *subpriv;
unsigned int val;
- bool do_event = false;
if (!dev->attached)
return IRQ_NONE;
@@ -212,27 +209,16 @@ static irqreturn_t apci2032_interrupt(int irq, void *d)
bits |= (1 << i);
}
- if (comedi_buf_put(s, bits)) {
- s->async->events |= COMEDI_CB_BLOCK | COMEDI_CB_EOS;
- if (cmd->stop_src == TRIG_COUNT &&
- subpriv->stop_count > 0) {
- subpriv->stop_count--;
- if (subpriv->stop_count == 0) {
- /* end of acquisition */
- s->async->events |= COMEDI_CB_EOA;
- apci2032_int_stop(dev, s);
- }
- }
- } else {
- apci2032_int_stop(dev, s);
- s->async->events |= COMEDI_CB_OVERFLOW;
- }
- do_event = true;
+ comedi_buf_write_samples(s, &bits, 1);
+
+ if (cmd->stop_src == TRIG_COUNT &&
+ s->async->scans_done >= cmd->stop_arg)
+ s->async->events |= COMEDI_CB_EOA;
}
spin_unlock(&subpriv->spinlock);
- if (do_event)
- comedi_event(dev, s);
+
+ comedi_handle_events(dev, s);
return IRQ_HANDLED;
}
@@ -274,7 +260,7 @@ static int apci2032_auto_attach(struct comedi_device *dev,
/* Initialize the digital output subdevice */
s = &dev->subdevices[0];
s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITEABLE;
+ s->subdev_flags = SDF_WRITABLE;
s->n_chan = 32;
s->maxdata = 1;
s->range_table = &range_digital;
@@ -303,7 +289,7 @@ static int apci2032_auto_attach(struct comedi_device *dev,
return -ENOMEM;
spin_lock_init(&subpriv->spinlock);
s->private = subpriv;
- s->subdev_flags = SDF_READABLE | SDF_CMD_READ;
+ s->subdev_flags = SDF_READABLE | SDF_CMD_READ | SDF_PACKED;
s->len_chanlist = 2;
s->do_cmdtest = apci2032_int_cmdtest;
s->do_cmd = apci2032_int_cmd;
diff --git a/drivers/staging/comedi/drivers/addi_apci_2200.c b/drivers/staging/comedi/drivers/addi_apci_2200.c
index 51ab1f937ba..1f9d13661ac 100644
--- a/drivers/staging/comedi/drivers/addi_apci_2200.c
+++ b/drivers/staging/comedi/drivers/addi_apci_2200.c
@@ -98,7 +98,7 @@ static int apci2200_auto_attach(struct comedi_device *dev,
/* Initialize the digital output subdevice */
s = &dev->subdevices[1];
s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITEABLE;
+ s->subdev_flags = SDF_WRITABLE;
s->n_chan = 16;
s->maxdata = 1;
s->range_table = &range_digital;
diff --git a/drivers/staging/comedi/drivers/addi_apci_3120.c b/drivers/staging/comedi/drivers/addi_apci_3120.c
index ba71e24a56f..c65f9407fd0 100644
--- a/drivers/staging/comedi/drivers/addi_apci_3120.c
+++ b/drivers/staging/comedi/drivers/addi_apci_3120.c
@@ -1,70 +1,993 @@
+/*
+ * addi_apci_3120.c
+ * Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
+ *
+ * ADDI-DATA GmbH
+ * Dieselstrasse 3
+ * D-77833 Ottersweier
+ * Tel: +19(0)7223/9493-0
+ * Fax: +49(0)7223/9493-92
+ * http://www.addi-data.com
+ * info@addi-data.com
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/interrupt.h>
#include "../comedidev.h"
#include "comedi_fc.h"
#include "amcc_s5933.h"
-#include "addi-data/addi_common.h"
+/*
+ * PCI BAR 0 register map (devpriv->amcc)
+ * see amcc_s5933.h for register and bit defines
+ */
+#define APCI3120_FIFO_ADVANCE_ON_BYTE_2 (1 << 29)
+
+/*
+ * PCI BAR 1 register map (dev->iobase)
+ */
+#define APCI3120_AI_FIFO_REG 0x00
+#define APCI3120_CTRL_REG 0x00
+#define APCI3120_CTRL_EXT_TRIG (1 << 15)
+#define APCI3120_CTRL_GATE(x) (1 << (12 + (x)))
+#define APCI3120_CTRL_PR(x) (((x) & 0xf) << 8)
+#define APCI3120_CTRL_PA(x) (((x) & 0xf) << 0)
+#define APCI3120_AI_SOFTTRIG_REG 0x02
+#define APCI3120_STATUS_REG 0x02
+#define APCI3120_STATUS_EOC_INT (1 << 15)
+#define APCI3120_STATUS_AMCC_INT (1 << 14)
+#define APCI3120_STATUS_EOS_INT (1 << 13)
+#define APCI3120_STATUS_TIMER2_INT (1 << 12)
+#define APCI3120_STATUS_INT_MASK (0xf << 12)
+#define APCI3120_STATUS_TO_DI_BITS(x) (((x) >> 8) & 0xf)
+#define APCI3120_STATUS_TO_VERSION(x) (((x) >> 4) & 0xf)
+#define APCI3120_STATUS_FIFO_FULL (1 << 2)
+#define APCI3120_STATUS_FIFO_EMPTY (1 << 1)
+#define APCI3120_STATUS_DA_READY (1 << 0)
+#define APCI3120_TIMER_REG 0x04
+#define APCI3120_CHANLIST_REG 0x06
+#define APCI3120_CHANLIST_INDEX(x) (((x) & 0xf) << 8)
+#define APCI3120_CHANLIST_UNIPOLAR (1 << 7)
+#define APCI3120_CHANLIST_GAIN(x) (((x) & 0x3) << 4)
+#define APCI3120_CHANLIST_MUX(x) (((x) & 0xf) << 0)
+#define APCI3120_AO_REG(x) (0x08 + (((x) / 4) * 2))
+#define APCI3120_AO_MUX(x) (((x) & 0x3) << 14)
+#define APCI3120_AO_DATA(x) ((x) << 0)
+#define APCI3120_TIMER_MODE_REG 0x0c
+#define APCI3120_TIMER_MODE(_t, _m) ((_m) << ((_t) * 2))
+#define APCI3120_TIMER_MODE0 0 /* I8254_MODE0 */
+#define APCI3120_TIMER_MODE2 1 /* I8254_MODE2 */
+#define APCI3120_TIMER_MODE4 2 /* I8254_MODE4 */
+#define APCI3120_TIMER_MODE5 3 /* I8254_MODE5 */
+#define APCI3120_TIMER_MODE_MASK(_t) (3 << ((_t) * 2))
+#define APCI3120_CTR0_REG 0x0d
+#define APCI3120_CTR0_DO_BITS(x) ((x) << 4)
+#define APCI3120_CTR0_TIMER_SEL(x) ((x) << 0)
+#define APCI3120_MODE_REG 0x0e
+#define APCI3120_MODE_TIMER2_CLK_OSC (0 << 6)
+#define APCI3120_MODE_TIMER2_CLK_OUT1 (1 << 6)
+#define APCI3120_MODE_TIMER2_CLK_EOC (2 << 6)
+#define APCI3120_MODE_TIMER2_CLK_EOS (3 << 6)
+#define APCI3120_MODE_TIMER2_CLK_MASK (3 << 6)
+#define APCI3120_MODE_TIMER2_AS_TIMER (0 << 4)
+#define APCI3120_MODE_TIMER2_AS_COUNTER (1 << 4)
+#define APCI3120_MODE_TIMER2_AS_WDOG (2 << 4)
+#define APCI3120_MODE_TIMER2_AS_MASK (3 << 4) /* sets AS_TIMER */
+#define APCI3120_MODE_SCAN_ENA (1 << 3)
+#define APCI3120_MODE_TIMER2_IRQ_ENA (1 << 2)
+#define APCI3120_MODE_EOS_IRQ_ENA (1 << 1)
+#define APCI3120_MODE_EOC_IRQ_ENA (1 << 0)
+
+/*
+ * PCI BAR 2 register map (devpriv->addon)
+ */
+#define APCI3120_ADDON_ADDR_REG 0x00
+#define APCI3120_ADDON_DATA_REG 0x02
+#define APCI3120_ADDON_CTRL_REG 0x04
+#define APCI3120_ADDON_CTRL_AMWEN_ENA (1 << 1)
+#define APCI3120_ADDON_CTRL_A2P_FIFO_ENA (1 << 0)
-#include "addi-data/hwdrv_apci3120.c"
+/*
+ * Board revisions
+ */
+#define APCI3120_REVA 0xa
+#define APCI3120_REVB 0xb
+#define APCI3120_REVA_OSC_BASE 70 /* 70ns = 14.29MHz */
+#define APCI3120_REVB_OSC_BASE 50 /* 50ns = 20MHz */
+
+static const struct comedi_lrange apci3120_ai_range = {
+ 8, {
+ BIP_RANGE(10),
+ BIP_RANGE(5),
+ BIP_RANGE(2),
+ BIP_RANGE(1),
+ UNI_RANGE(10),
+ UNI_RANGE(5),
+ UNI_RANGE(2),
+ UNI_RANGE(1)
+ }
+};
enum apci3120_boardid {
BOARD_APCI3120,
BOARD_APCI3001,
};
-static const struct addi_board apci3120_boardtypes[] = {
+struct apci3120_board {
+ const char *name;
+ unsigned int ai_is_16bit:1;
+ unsigned int has_ao:1;
+};
+
+static const struct apci3120_board apci3120_boardtypes[] = {
[BOARD_APCI3120] = {
- .pc_DriverName = "apci3120",
- .i_NbrAiChannel = 16,
- .i_NbrAiChannelDiff = 8,
- .i_AiChannelList = 16,
- .i_NbrAoChannel = 8,
- .i_AiMaxdata = 0xffff,
- .i_AoMaxdata = 0x3fff,
- .i_NbrDiChannel = 4,
- .i_NbrDoChannel = 4,
- .i_DoMaxdata = 0x0f,
- .interrupt = apci3120_interrupt,
+ .name = "apci3120",
+ .ai_is_16bit = 1,
+ .has_ao = 1,
},
[BOARD_APCI3001] = {
- .pc_DriverName = "apci3001",
- .i_NbrAiChannel = 16,
- .i_NbrAiChannelDiff = 8,
- .i_AiChannelList = 16,
- .i_AiMaxdata = 0xfff,
- .i_NbrDiChannel = 4,
- .i_NbrDoChannel = 4,
- .i_DoMaxdata = 0x0f,
- .interrupt = apci3120_interrupt,
+ .name = "apci3001",
},
};
-static irqreturn_t v_ADDI_Interrupt(int irq, void *d)
+struct apci3120_dmabuf {
+ unsigned short *virt;
+ dma_addr_t hw;
+ unsigned int size;
+ unsigned int use_size;
+};
+
+struct apci3120_private {
+ unsigned long amcc;
+ unsigned long addon;
+ unsigned int osc_base;
+ unsigned int use_dma:1;
+ unsigned int use_double_buffer:1;
+ unsigned int cur_dmabuf:1;
+ struct apci3120_dmabuf dmabuf[2];
+ unsigned char do_bits;
+ unsigned char timer_mode;
+ unsigned char mode;
+ unsigned short ctrl;
+};
+
+static void apci3120_addon_write(struct comedi_device *dev,
+ unsigned int val, unsigned int reg)
+{
+ struct apci3120_private *devpriv = dev->private;
+
+ /* 16-bit interface for AMCC add-on registers */
+
+ outw(reg, devpriv->addon + APCI3120_ADDON_ADDR_REG);
+ outw(val & 0xffff, devpriv->addon + APCI3120_ADDON_DATA_REG);
+
+ outw(reg + 2, devpriv->addon + APCI3120_ADDON_ADDR_REG);
+ outw((val >> 16) & 0xffff, devpriv->addon + APCI3120_ADDON_DATA_REG);
+}
+
+static void apci3120_init_dma(struct comedi_device *dev,
+ struct apci3120_dmabuf *dmabuf)
+{
+ struct apci3120_private *devpriv = dev->private;
+
+ /* AMCC - enable transfer count and reset A2P FIFO */
+ outl(AGCSTS_TC_ENABLE | AGCSTS_RESET_A2P_FIFO,
+ devpriv->amcc + AMCC_OP_REG_AGCSTS);
+
+ /* Add-On - enable transfer count and reset A2P FIFO */
+ apci3120_addon_write(dev, AGCSTS_TC_ENABLE | AGCSTS_RESET_A2P_FIFO,
+ AMCC_OP_REG_AGCSTS);
+
+ /* AMCC - enable transfers and reset A2P flags */
+ outl(RESET_A2P_FLAGS | EN_A2P_TRANSFERS,
+ devpriv->amcc + AMCC_OP_REG_MCSR);
+
+ /* Add-On - DMA start address */
+ apci3120_addon_write(dev, dmabuf->hw, AMCC_OP_REG_AMWAR);
+
+ /* Add-On - Number of acquisitions */
+ apci3120_addon_write(dev, dmabuf->use_size, AMCC_OP_REG_AMWTC);
+
+ /* AMCC - enable write complete (DMA) and set FIFO advance */
+ outl(APCI3120_FIFO_ADVANCE_ON_BYTE_2 | AINT_WRITE_COMPL,
+ devpriv->amcc + AMCC_OP_REG_INTCSR);
+
+ /* Add-On - enable DMA */
+ outw(APCI3120_ADDON_CTRL_AMWEN_ENA | APCI3120_ADDON_CTRL_A2P_FIFO_ENA,
+ devpriv->addon + APCI3120_ADDON_CTRL_REG);
+}
+
+static void apci3120_setup_dma(struct comedi_device *dev,
+ struct comedi_subdevice *s)
+{
+ struct apci3120_private *devpriv = dev->private;
+ struct comedi_cmd *cmd = &s->async->cmd;
+ struct apci3120_dmabuf *dmabuf0 = &devpriv->dmabuf[0];
+ struct apci3120_dmabuf *dmabuf1 = &devpriv->dmabuf[1];
+ unsigned int dmalen0 = dmabuf0->size;
+ unsigned int dmalen1 = dmabuf1->size;
+ unsigned int scan_bytes;
+
+ scan_bytes = comedi_samples_to_bytes(s, cmd->scan_end_arg);
+
+ if (cmd->stop_src == TRIG_COUNT) {
+ /*
+ * Must we fill full first buffer? And must we fill
+ * full second buffer when first is once filled?
+ */
+ if (dmalen0 > (cmd->stop_arg * scan_bytes))
+ dmalen0 = cmd->stop_arg * scan_bytes;
+ else if (dmalen1 > (cmd->stop_arg * scan_bytes - dmalen0))
+ dmalen1 = cmd->stop_arg * scan_bytes - dmalen0;
+ }
+
+ if (cmd->flags & CMDF_WAKE_EOS) {
+ /* don't we want wake up every scan? */
+ if (dmalen0 > scan_bytes) {
+ dmalen0 = scan_bytes;
+ if (cmd->scan_end_arg & 1)
+ dmalen0 += 2;
+ }
+ if (dmalen1 > scan_bytes) {
+ dmalen1 = scan_bytes;
+ if (cmd->scan_end_arg & 1)
+ dmalen1 -= 2;
+ if (dmalen1 < 4)
+ dmalen1 = 4;
+ }
+ } else {
+ /* isn't output buff smaller that our DMA buff? */
+ if (dmalen0 > s->async->prealloc_bufsz)
+ dmalen0 = s->async->prealloc_bufsz;
+ if (dmalen1 > s->async->prealloc_bufsz)
+ dmalen1 = s->async->prealloc_bufsz;
+ }
+ dmabuf0->use_size = dmalen0;
+ dmabuf1->use_size = dmalen1;
+
+ apci3120_init_dma(dev, dmabuf0);
+}
+
+/*
+ * There are three timers on the board. They all use the same base
+ * clock with a fixed prescaler for each timer. The base clock used
+ * depends on the board version and type.
+ *
+ * APCI-3120 Rev A boards OSC = 14.29MHz base clock (~70ns)
+ * APCI-3120 Rev B boards OSC = 20MHz base clock (50ns)
+ * APCI-3001 boards OSC = 20MHz base clock (50ns)
+ *
+ * The prescalers for each timer are:
+ * Timer 0 CLK = OSC/10
+ * Timer 1 CLK = OSC/1000
+ * Timer 2 CLK = OSC/1000
+ */
+static unsigned int apci3120_ns_to_timer(struct comedi_device *dev,
+ unsigned int timer,
+ unsigned int ns,
+ unsigned int flags)
+{
+ struct apci3120_private *devpriv = dev->private;
+ unsigned int prescale = (timer == 0) ? 10 : 1000;
+ unsigned int timer_base = devpriv->osc_base * prescale;
+ unsigned int divisor;
+
+ switch (flags & CMDF_ROUND_MASK) {
+ case CMDF_ROUND_UP:
+ divisor = DIV_ROUND_UP(ns, timer_base);
+ break;
+ case CMDF_ROUND_DOWN:
+ divisor = ns / timer_base;
+ break;
+ case CMDF_ROUND_NEAREST:
+ default:
+ divisor = DIV_ROUND_CLOSEST(ns, timer_base);
+ break;
+ }
+
+ if (timer == 2) {
+ /* timer 2 is 24-bits */
+ if (divisor > 0x00ffffff)
+ divisor = 0x00ffffff;
+ } else {
+ /* timers 0 and 1 are 16-bits */
+ if (divisor > 0xffff)
+ divisor = 0xffff;
+ }
+ /* the timers require a minimum divisor of 2 */
+ if (divisor < 2)
+ divisor = 2;
+
+ return divisor;
+}
+
+static void apci3120_clr_timer2_interrupt(struct comedi_device *dev)
+{
+ /* a dummy read of APCI3120_CTR0_REG clears the timer 2 interrupt */
+ inb(dev->iobase + APCI3120_CTR0_REG);
+}
+
+static void apci3120_timer_write(struct comedi_device *dev,
+ unsigned int timer, unsigned int val)
+{
+ struct apci3120_private *devpriv = dev->private;
+
+ /* write 16-bit value to timer (lower 16-bits of timer 2) */
+ outb(APCI3120_CTR0_DO_BITS(devpriv->do_bits) |
+ APCI3120_CTR0_TIMER_SEL(timer),
+ dev->iobase + APCI3120_CTR0_REG);
+ outw(val & 0xffff, dev->iobase + APCI3120_TIMER_REG);
+
+ if (timer == 2) {
+ /* write upper 16-bits to timer 2 */
+ outb(APCI3120_CTR0_DO_BITS(devpriv->do_bits) |
+ APCI3120_CTR0_TIMER_SEL(timer + 1),
+ dev->iobase + APCI3120_CTR0_REG);
+ outw((val >> 16) & 0xffff, dev->iobase + APCI3120_TIMER_REG);
+ }
+}
+
+static unsigned int apci3120_timer_read(struct comedi_device *dev,
+ unsigned int timer)
+{
+ struct apci3120_private *devpriv = dev->private;
+ unsigned int val;
+
+ /* read 16-bit value from timer (lower 16-bits of timer 2) */
+ outb(APCI3120_CTR0_DO_BITS(devpriv->do_bits) |
+ APCI3120_CTR0_TIMER_SEL(timer),
+ dev->iobase + APCI3120_CTR0_REG);
+ val = inw(dev->iobase + APCI3120_TIMER_REG);
+
+ if (timer == 2) {
+ /* read upper 16-bits from timer 2 */
+ outb(APCI3120_CTR0_DO_BITS(devpriv->do_bits) |
+ APCI3120_CTR0_TIMER_SEL(timer + 1),
+ dev->iobase + APCI3120_CTR0_REG);
+ val |= (inw(dev->iobase + APCI3120_TIMER_REG) << 16);
+ }
+
+ return val;
+}
+
+static void apci3120_timer_set_mode(struct comedi_device *dev,
+ unsigned int timer, unsigned int mode)
+{
+ struct apci3120_private *devpriv = dev->private;
+
+ devpriv->timer_mode &= ~APCI3120_TIMER_MODE_MASK(timer);
+ devpriv->timer_mode |= APCI3120_TIMER_MODE(timer, mode);
+ outb(devpriv->timer_mode, dev->iobase + APCI3120_TIMER_MODE_REG);
+}
+
+static void apci3120_timer_enable(struct comedi_device *dev,
+ unsigned int timer, bool enable)
+{
+ struct apci3120_private *devpriv = dev->private;
+
+ if (enable)
+ devpriv->ctrl |= APCI3120_CTRL_GATE(timer);
+ else
+ devpriv->ctrl &= ~APCI3120_CTRL_GATE(timer);
+ outw(devpriv->ctrl, dev->iobase + APCI3120_CTRL_REG);
+}
+
+static void apci3120_exttrig_enable(struct comedi_device *dev, bool enable)
+{
+ struct apci3120_private *devpriv = dev->private;
+
+ if (enable)
+ devpriv->ctrl |= APCI3120_CTRL_EXT_TRIG;
+ else
+ devpriv->ctrl &= ~APCI3120_CTRL_EXT_TRIG;
+ outw(devpriv->ctrl, dev->iobase + APCI3120_CTRL_REG);
+}
+
+static void apci3120_set_chanlist(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ int n_chan, unsigned int *chanlist)
+{
+ struct apci3120_private *devpriv = dev->private;
+ int i;
+
+ /* set chanlist for scan */
+ for (i = 0; i < n_chan; i++) {
+ unsigned int chan = CR_CHAN(chanlist[i]);
+ unsigned int range = CR_RANGE(chanlist[i]);
+ unsigned int val;
+
+ val = APCI3120_CHANLIST_MUX(chan) |
+ APCI3120_CHANLIST_GAIN(range) |
+ APCI3120_CHANLIST_INDEX(i);
+
+ if (comedi_range_is_unipolar(s, range))
+ val |= APCI3120_CHANLIST_UNIPOLAR;
+
+ outw(val, dev->iobase + APCI3120_CHANLIST_REG);
+ }
+
+ /* a dummy read of APCI3120_TIMER_MODE_REG resets the ai FIFO */
+ inw(dev->iobase + APCI3120_TIMER_MODE_REG);
+
+ /* set scan length (PR) and scan start (PA) */
+ devpriv->ctrl = APCI3120_CTRL_PR(n_chan - 1) | APCI3120_CTRL_PA(0);
+ outw(devpriv->ctrl, dev->iobase + APCI3120_CTRL_REG);
+
+ /* enable chanlist scanning if necessary */
+ if (n_chan > 1)
+ devpriv->mode |= APCI3120_MODE_SCAN_ENA;
+}
+
+static void apci3120_interrupt_dma(struct comedi_device *dev,
+ struct comedi_subdevice *s)
+{
+ struct apci3120_private *devpriv = dev->private;
+ struct comedi_async *async = s->async;
+ struct comedi_cmd *cmd = &async->cmd;
+ struct apci3120_dmabuf *dmabuf;
+ unsigned int nbytes;
+ unsigned int nsamples;
+
+ dmabuf = &devpriv->dmabuf[devpriv->cur_dmabuf];
+
+ nbytes = dmabuf->use_size - inl(devpriv->amcc + AMCC_OP_REG_MWTC);
+
+ if (nbytes < dmabuf->use_size)
+ dev_err(dev->class_dev, "Interrupted DMA transfer!\n");
+ if (nbytes & 1) {
+ dev_err(dev->class_dev, "Odd count of bytes in DMA ring!\n");
+ async->events |= COMEDI_CB_ERROR;
+ return;
+ }
+
+ nsamples = comedi_bytes_to_samples(s, nbytes);
+ if (nsamples) {
+ comedi_buf_write_samples(s, dmabuf->virt, nsamples);
+
+ if (!(cmd->flags & CMDF_WAKE_EOS))
+ async->events |= COMEDI_CB_EOS;
+ }
+
+ if ((async->events & COMEDI_CB_CANCEL_MASK) ||
+ (cmd->stop_src == TRIG_COUNT && async->scans_done >= cmd->stop_arg))
+ return;
+
+ if (devpriv->use_double_buffer) {
+ /* switch DMA buffers for next interrupt */
+ devpriv->cur_dmabuf = !devpriv->cur_dmabuf;
+ dmabuf = &devpriv->dmabuf[devpriv->cur_dmabuf];
+ apci3120_init_dma(dev, dmabuf);
+ } else {
+ /* restart DMA if not using double buffering */
+ apci3120_init_dma(dev, dmabuf);
+ }
+}
+
+static irqreturn_t apci3120_interrupt(int irq, void *d)
{
struct comedi_device *dev = d;
- const struct addi_board *this_board = dev->board_ptr;
+ struct apci3120_private *devpriv = dev->private;
+ struct comedi_subdevice *s = dev->read_subdev;
+ struct comedi_async *async = s->async;
+ struct comedi_cmd *cmd = &async->cmd;
+ unsigned int status;
+ unsigned int int_amcc;
+
+ status = inw(dev->iobase + APCI3120_STATUS_REG);
+ int_amcc = inl(devpriv->amcc + AMCC_OP_REG_INTCSR);
+
+ if (!(status & APCI3120_STATUS_INT_MASK) &&
+ !(int_amcc & ANY_S593X_INT)) {
+ dev_err(dev->class_dev, "IRQ from unknown source\n");
+ return IRQ_NONE;
+ }
+
+ outl(int_amcc | AINT_INT_MASK, devpriv->amcc + AMCC_OP_REG_INTCSR);
- this_board->interrupt(irq, d);
- return IRQ_RETVAL(1);
+ if (devpriv->ctrl & APCI3120_CTRL_EXT_TRIG)
+ apci3120_exttrig_enable(dev, false);
+
+ if (int_amcc & MASTER_ABORT_INT)
+ dev_err(dev->class_dev, "AMCC IRQ - MASTER DMA ABORT!\n");
+ if (int_amcc & TARGET_ABORT_INT)
+ dev_err(dev->class_dev, "AMCC IRQ - TARGET DMA ABORT!\n");
+
+ if ((status & APCI3120_STATUS_EOC_INT) == 0 &&
+ (devpriv->mode & APCI3120_MODE_EOC_IRQ_ENA)) {
+ /* nothing to do... EOC mode is not currently used */
+ }
+
+ if ((status & APCI3120_STATUS_EOS_INT) &&
+ (devpriv->mode & APCI3120_MODE_EOS_IRQ_ENA)) {
+ unsigned short val;
+ int i;
+
+ for (i = 0; i < cmd->chanlist_len; i++) {
+ val = inw(dev->iobase + APCI3120_AI_FIFO_REG);
+ comedi_buf_write_samples(s, &val, 1);
+ }
+
+ devpriv->mode |= APCI3120_MODE_EOS_IRQ_ENA;
+ outb(devpriv->mode, dev->iobase + APCI3120_MODE_REG);
+ }
+
+ if (status & APCI3120_STATUS_TIMER2_INT) {
+ /*
+ * for safety...
+ * timer2 interrupts are not enabled in the driver
+ */
+ apci3120_clr_timer2_interrupt(dev);
+ }
+
+ if (status & APCI3120_STATUS_AMCC_INT) {
+ /* AMCC- Clear write complete interrupt (DMA) */
+ outl(AINT_WT_COMPLETE, devpriv->amcc + AMCC_OP_REG_INTCSR);
+
+ /* do some data transfer */
+ apci3120_interrupt_dma(dev, s);
+ }
+
+ if (cmd->stop_src == TRIG_COUNT && async->scans_done >= cmd->stop_arg)
+ async->events |= COMEDI_CB_EOA;
+
+ comedi_handle_events(dev, s);
+
+ return IRQ_HANDLED;
+}
+
+static int apci3120_ai_cmd(struct comedi_device *dev,
+ struct comedi_subdevice *s)
+{
+ struct apci3120_private *devpriv = dev->private;
+ struct comedi_cmd *cmd = &s->async->cmd;
+ unsigned int divisor;
+
+ /* set default mode bits */
+ devpriv->mode = APCI3120_MODE_TIMER2_CLK_OSC |
+ APCI3120_MODE_TIMER2_AS_TIMER;
+
+ /* AMCC- Clear write complete interrupt (DMA) */
+ outl(AINT_WT_COMPLETE, devpriv->amcc + AMCC_OP_REG_INTCSR);
+
+ devpriv->cur_dmabuf = 0;
+
+ /* load chanlist for command scan */
+ apci3120_set_chanlist(dev, s, cmd->chanlist_len, cmd->chanlist);
+
+ if (cmd->start_src == TRIG_EXT)
+ apci3120_exttrig_enable(dev, true);
+
+ if (cmd->scan_begin_src == TRIG_TIMER) {
+ /*
+ * Timer 1 is used in MODE2 (rate generator) to set the
+ * start time for each scan.
+ */
+ divisor = apci3120_ns_to_timer(dev, 1, cmd->scan_begin_arg,
+ cmd->flags);
+ apci3120_timer_set_mode(dev, 1, APCI3120_TIMER_MODE2);
+ apci3120_timer_write(dev, 1, divisor);
+ }
+
+ /*
+ * Timer 0 is used in MODE2 (rate generator) to set the conversion
+ * time for each acquisition.
+ */
+ divisor = apci3120_ns_to_timer(dev, 0, cmd->convert_arg, cmd->flags);
+ apci3120_timer_set_mode(dev, 0, APCI3120_TIMER_MODE2);
+ apci3120_timer_write(dev, 0, divisor);
+
+ if (devpriv->use_dma)
+ apci3120_setup_dma(dev, s);
+ else
+ devpriv->mode |= APCI3120_MODE_EOS_IRQ_ENA;
+
+ /* set mode to enable acquisition */
+ outb(devpriv->mode, dev->iobase + APCI3120_MODE_REG);
+
+ if (cmd->scan_begin_src == TRIG_TIMER)
+ apci3120_timer_enable(dev, 1, true);
+ apci3120_timer_enable(dev, 0, true);
+
+ return 0;
+}
+
+static int apci3120_ai_cmdtest(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_cmd *cmd)
+{
+ unsigned int arg;
+ int err = 0;
+
+ /* Step 1 : check if triggers are trivially valid */
+
+ err |= cfc_check_trigger_src(&cmd->start_src, TRIG_NOW | TRIG_EXT);
+ err |= cfc_check_trigger_src(&cmd->scan_begin_src,
+ TRIG_TIMER | TRIG_FOLLOW);
+ err |= cfc_check_trigger_src(&cmd->convert_src, TRIG_TIMER);
+ err |= cfc_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
+ err |= cfc_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
+
+ if (err)
+ return 1;
+
+ /* Step 2a : make sure trigger sources are unique */
+
+ err |= cfc_check_trigger_is_unique(cmd->start_src);
+ err |= cfc_check_trigger_is_unique(cmd->scan_begin_src);
+ err |= cfc_check_trigger_is_unique(cmd->stop_src);
+
+ /* Step 2b : and mutually compatible */
+
+ if (err)
+ return 2;
+
+ /* Step 3: check if arguments are trivially valid */
+
+ err |= cfc_check_trigger_arg_is(&cmd->start_arg, 0);
+
+ if (cmd->scan_begin_src == TRIG_TIMER) /* Test Delay timing */
+ err |= cfc_check_trigger_arg_min(&cmd->scan_begin_arg, 100000);
+
+ /* minimum conversion time per sample is 10us */
+ err |= cfc_check_trigger_arg_min(&cmd->convert_arg, 10000);
+
+ err |= cfc_check_trigger_arg_min(&cmd->chanlist_len, 1);
+ err |= cfc_check_trigger_arg_is(&cmd->scan_end_arg, cmd->chanlist_len);
+
+ if (cmd->stop_src == TRIG_COUNT)
+ err |= cfc_check_trigger_arg_min(&cmd->stop_arg, 1);
+ else /* TRIG_NONE */
+ err |= cfc_check_trigger_arg_is(&cmd->stop_arg, 0);
+
+ if (err)
+ return 3;
+
+ /* Step 4: fix up any arguments */
+
+ if (cmd->scan_begin_src == TRIG_TIMER) {
+ /* scan begin must be larger than the scan time */
+ arg = cmd->convert_arg * cmd->scan_end_arg;
+ err |= cfc_check_trigger_arg_min(&cmd->scan_begin_arg, arg);
+ }
+
+ if (err)
+ return 4;
+
+ /* Step 5: check channel list if it exists */
+
+ return 0;
+}
+
+static int apci3120_cancel(struct comedi_device *dev,
+ struct comedi_subdevice *s)
+{
+ struct apci3120_private *devpriv = dev->private;
+
+ /* Add-On - disable DMA */
+ outw(0, devpriv->addon + 4);
+
+ /* Add-On - disable bus master */
+ apci3120_addon_write(dev, 0, AMCC_OP_REG_AGCSTS);
+
+ /* AMCC - disable bus master */
+ outl(0, devpriv->amcc + AMCC_OP_REG_MCSR);
+
+ /* disable all counters, ext trigger, and reset scan */
+ devpriv->ctrl = 0;
+ outw(devpriv->ctrl, dev->iobase + APCI3120_CTRL_REG);
+
+ /* DISABLE_ALL_INTERRUPT */
+ devpriv->mode = 0;
+ outb(devpriv->mode, dev->iobase + APCI3120_MODE_REG);
+
+ inw(dev->iobase + APCI3120_STATUS_REG);
+ devpriv->cur_dmabuf = 0;
+
+ return 0;
+}
+
+static int apci3120_ai_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
+{
+ unsigned int status;
+
+ status = inw(dev->iobase + APCI3120_STATUS_REG);
+ if ((status & APCI3120_STATUS_EOC_INT) == 0)
+ return 0;
+ return -EBUSY;
+}
+
+static int apci3120_ai_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ struct apci3120_private *devpriv = dev->private;
+ unsigned int divisor;
+ int ret;
+ int i;
+
+ /* set mode for A/D conversions by software trigger with timer 0 */
+ devpriv->mode = APCI3120_MODE_TIMER2_CLK_OSC |
+ APCI3120_MODE_TIMER2_AS_TIMER;
+ outb(devpriv->mode, dev->iobase + APCI3120_MODE_REG);
+
+ /* load chanlist for single channel scan */
+ apci3120_set_chanlist(dev, s, 1, &insn->chanspec);
+
+ /*
+ * Timer 0 is used in MODE4 (software triggered strobe) to set the
+ * conversion time for each acquisition. Each conversion is triggered
+ * when the divisor is written to the timer, The conversion is done
+ * when the EOC bit in the status register is '0'.
+ */
+ apci3120_timer_set_mode(dev, 0, APCI3120_TIMER_MODE4);
+ apci3120_timer_enable(dev, 0, true);
+
+ /* fixed conversion time of 10 us */
+ divisor = apci3120_ns_to_timer(dev, 0, 10000, CMDF_ROUND_NEAREST);
+
+ for (i = 0; i < insn->n; i++) {
+ /* trigger conversion */
+ apci3120_timer_write(dev, 0, divisor);
+
+ ret = comedi_timeout(dev, s, insn, apci3120_ai_eoc, 0);
+ if (ret)
+ return ret;
+
+ data[i] = inw(dev->iobase + APCI3120_AI_FIFO_REG);
+ }
+
+ return insn->n;
+}
+
+static int apci3120_ao_ready(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
+{
+ unsigned int status;
+
+ status = inw(dev->iobase + APCI3120_STATUS_REG);
+ if (status & APCI3120_STATUS_DA_READY)
+ return 0;
+ return -EBUSY;
+}
+
+static int apci3120_ao_insn_write(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ int i;
+
+ for (i = 0; i < insn->n; i++) {
+ unsigned int val = data[i];
+ int ret;
+
+ ret = comedi_timeout(dev, s, insn, apci3120_ao_ready, 0);
+ if (ret)
+ return ret;
+
+ outw(APCI3120_AO_MUX(chan) | APCI3120_AO_DATA(val),
+ dev->iobase + APCI3120_AO_REG(chan));
+
+ s->readback[chan] = val;
+ }
+
+ return insn->n;
+}
+
+static int apci3120_di_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ unsigned int status;
+
+ status = inw(dev->iobase + APCI3120_STATUS_REG);
+ data[1] = APCI3120_STATUS_TO_DI_BITS(status);
+
+ return insn->n;
+}
+
+static int apci3120_do_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ struct apci3120_private *devpriv = dev->private;
+
+ if (comedi_dio_update_state(s, data)) {
+ devpriv->do_bits = s->state;
+ outb(APCI3120_CTR0_DO_BITS(devpriv->do_bits),
+ dev->iobase + APCI3120_CTR0_REG);
+ }
+
+ data[1] = s->state;
+
+ return insn->n;
+}
+
+static int apci3120_timer_insn_config(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ struct apci3120_private *devpriv = dev->private;
+ unsigned int divisor;
+ unsigned int status;
+ unsigned int mode;
+ unsigned int timer_mode;
+
+ switch (data[0]) {
+ case INSN_CONFIG_ARM:
+ apci3120_clr_timer2_interrupt(dev);
+ divisor = apci3120_ns_to_timer(dev, 2, data[1],
+ CMDF_ROUND_DOWN);
+ apci3120_timer_write(dev, 2, divisor);
+ apci3120_timer_enable(dev, 2, true);
+ break;
+
+ case INSN_CONFIG_DISARM:
+ apci3120_timer_enable(dev, 2, false);
+ apci3120_clr_timer2_interrupt(dev);
+ break;
+
+ case INSN_CONFIG_GET_COUNTER_STATUS:
+ data[1] = 0;
+ data[2] = COMEDI_COUNTER_ARMED | COMEDI_COUNTER_COUNTING |
+ COMEDI_COUNTER_TERMINAL_COUNT;
+
+ if (devpriv->ctrl & APCI3120_CTRL_GATE(2)) {
+ data[1] |= COMEDI_COUNTER_ARMED;
+ data[1] |= COMEDI_COUNTER_COUNTING;
+ }
+ status = inw(dev->iobase + APCI3120_STATUS_REG);
+ if (status & APCI3120_STATUS_TIMER2_INT) {
+ data[1] &= ~COMEDI_COUNTER_COUNTING;
+ data[1] |= COMEDI_COUNTER_TERMINAL_COUNT;
+ }
+ break;
+
+ case INSN_CONFIG_SET_COUNTER_MODE:
+ switch (data[1]) {
+ case I8254_MODE0:
+ mode = APCI3120_MODE_TIMER2_AS_COUNTER;
+ timer_mode = APCI3120_TIMER_MODE0;
+ break;
+ case I8254_MODE2:
+ mode = APCI3120_MODE_TIMER2_AS_TIMER;
+ timer_mode = APCI3120_TIMER_MODE2;
+ break;
+ case I8254_MODE4:
+ mode = APCI3120_MODE_TIMER2_AS_TIMER;
+ timer_mode = APCI3120_TIMER_MODE4;
+ break;
+ case I8254_MODE5:
+ mode = APCI3120_MODE_TIMER2_AS_WDOG;
+ timer_mode = APCI3120_TIMER_MODE5;
+ break;
+ default:
+ return -EINVAL;
+ }
+ apci3120_timer_enable(dev, 2, false);
+ apci3120_clr_timer2_interrupt(dev);
+ apci3120_timer_set_mode(dev, 2, timer_mode);
+ devpriv->mode &= ~APCI3120_MODE_TIMER2_AS_MASK;
+ devpriv->mode |= mode;
+ outb(devpriv->mode, dev->iobase + APCI3120_MODE_REG);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return insn->n;
+}
+
+static int apci3120_timer_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ int i;
+
+ for (i = 0; i < insn->n; i++)
+ data[i] = apci3120_timer_read(dev, 2);
+
+ return insn->n;
+}
+
+static void apci3120_dma_alloc(struct comedi_device *dev)
+{
+ struct apci3120_private *devpriv = dev->private;
+ struct apci3120_dmabuf *dmabuf;
+ int order;
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ dmabuf = &devpriv->dmabuf[i];
+ for (order = 2; order >= 0; order--) {
+ dmabuf->virt = dma_alloc_coherent(dev->hw_dev,
+ PAGE_SIZE << order,
+ &dmabuf->hw,
+ GFP_KERNEL);
+ if (dmabuf->virt)
+ break;
+ }
+ if (!dmabuf->virt)
+ break;
+ dmabuf->size = PAGE_SIZE << order;
+
+ if (i == 0)
+ devpriv->use_dma = 1;
+ if (i == 1)
+ devpriv->use_double_buffer = 1;
+ }
+}
+
+static void apci3120_dma_free(struct comedi_device *dev)
+{
+ struct apci3120_private *devpriv = dev->private;
+ struct apci3120_dmabuf *dmabuf;
+ int i;
+
+ if (!devpriv)
+ return;
+
+ for (i = 0; i < 2; i++) {
+ dmabuf = &devpriv->dmabuf[i];
+ if (dmabuf->virt) {
+ dma_free_coherent(dev->hw_dev, dmabuf->size,
+ dmabuf->virt, dmabuf->hw);
+ }
+ }
+}
+
+static void apci3120_reset(struct comedi_device *dev)
+{
+ /* disable all interrupt sources */
+ outb(0, dev->iobase + APCI3120_MODE_REG);
+
+ /* disable all counters, ext trigger, and reset scan */
+ outw(0, dev->iobase + APCI3120_CTRL_REG);
+
+ /* clear interrupt status */
+ inw(dev->iobase + APCI3120_STATUS_REG);
}
static int apci3120_auto_attach(struct comedi_device *dev,
unsigned long context)
{
struct pci_dev *pcidev = comedi_to_pci_dev(dev);
- const struct addi_board *this_board = NULL;
- struct addi_private *devpriv;
+ const struct apci3120_board *this_board = NULL;
+ struct apci3120_private *devpriv;
struct comedi_subdevice *s;
- int ret, order, i;
+ unsigned int status;
+ int ret;
if (context < ARRAY_SIZE(apci3120_boardtypes))
this_board = &apci3120_boardtypes[context];
if (!this_board)
return -ENODEV;
dev->board_ptr = this_board;
- dev->board_name = this_board->pc_DriverName;
+ dev->board_name = this_board->name;
devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
@@ -76,136 +999,100 @@ static int apci3120_auto_attach(struct comedi_device *dev,
pci_set_master(pcidev);
dev->iobase = pci_resource_start(pcidev, 1);
- devpriv->iobase = dev->iobase;
- devpriv->i_IobaseAmcc = pci_resource_start(pcidev, 0);
- devpriv->i_IobaseAddon = pci_resource_start(pcidev, 2);
- devpriv->i_IobaseReserved = pci_resource_start(pcidev, 3);
+ devpriv->amcc = pci_resource_start(pcidev, 0);
+ devpriv->addon = pci_resource_start(pcidev, 2);
+
+ apci3120_reset(dev);
if (pcidev->irq > 0) {
- ret = request_irq(pcidev->irq, v_ADDI_Interrupt, IRQF_SHARED,
+ ret = request_irq(pcidev->irq, apci3120_interrupt, IRQF_SHARED,
dev->board_name, dev);
- if (ret == 0)
+ if (ret == 0) {
dev->irq = pcidev->irq;
- }
- /* Allocate DMA buffers */
- for (i = 0; i < 2; i++) {
- for (order = 2; order >= 0; order--) {
- devpriv->ul_DmaBufferVirtual[i] =
- dma_alloc_coherent(dev->hw_dev, PAGE_SIZE << order,
- &devpriv->ul_DmaBufferHw[i],
- GFP_KERNEL);
-
- if (devpriv->ul_DmaBufferVirtual[i])
- break;
+ apci3120_dma_alloc(dev);
}
- if (!devpriv->ul_DmaBufferVirtual[i])
- break;
- devpriv->ui_DmaBufferSize[i] = PAGE_SIZE << order;
}
- if (devpriv->ul_DmaBufferVirtual[0])
- devpriv->us_UseDma = 1;
- if (devpriv->ul_DmaBufferVirtual[1])
- devpriv->b_DmaDoubleBuffer = 1;
+ status = inw(dev->iobase + APCI3120_STATUS_REG);
+ if (APCI3120_STATUS_TO_VERSION(status) == APCI3120_REVB ||
+ context == BOARD_APCI3001)
+ devpriv->osc_base = APCI3120_REVB_OSC_BASE;
+ else
+ devpriv->osc_base = APCI3120_REVA_OSC_BASE;
ret = comedi_alloc_subdevices(dev, 5);
if (ret)
return ret;
- /* Allocate and Initialise AI Subdevice Structures */
+ /* Analog Input subdevice */
s = &dev->subdevices[0];
- dev->read_subdev = s;
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags =
- SDF_READABLE | SDF_COMMON | SDF_GROUND
- | SDF_DIFF;
- if (this_board->i_NbrAiChannel)
- s->n_chan = this_board->i_NbrAiChannel;
- else
- s->n_chan = this_board->i_NbrAiChannelDiff;
- s->maxdata = this_board->i_AiMaxdata;
- s->len_chanlist = this_board->i_AiChannelList;
- s->range_table = &range_apci3120_ai;
-
- s->insn_config = apci3120_ai_insn_config;
- s->insn_read = apci3120_ai_insn_read;
- s->do_cmdtest = apci3120_ai_cmdtest;
- s->do_cmd = apci3120_ai_cmd;
- s->cancel = apci3120_cancel;
-
- /* Allocate and Initialise AO Subdevice Structures */
+ s->type = COMEDI_SUBD_AI;
+ s->subdev_flags = SDF_READABLE | SDF_COMMON | SDF_GROUND | SDF_DIFF;
+ s->n_chan = 16;
+ s->maxdata = this_board->ai_is_16bit ? 0xffff : 0x0fff;
+ s->range_table = &apci3120_ai_range;
+ s->insn_read = apci3120_ai_insn_read;
+ if (dev->irq) {
+ dev->read_subdev = s;
+ s->subdev_flags |= SDF_CMD_READ;
+ s->len_chanlist = s->n_chan;
+ s->do_cmdtest = apci3120_ai_cmdtest;
+ s->do_cmd = apci3120_ai_cmd;
+ s->cancel = apci3120_cancel;
+ }
+
+ /* Analog Output subdevice */
s = &dev->subdevices[1];
- if (this_board->i_NbrAoChannel) {
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITEABLE | SDF_GROUND | SDF_COMMON;
- s->n_chan = this_board->i_NbrAoChannel;
- s->maxdata = this_board->i_AoMaxdata;
- s->len_chanlist = this_board->i_NbrAoChannel;
- s->range_table = &range_apci3120_ao;
- s->insn_write = apci3120_ao_insn_write;
+ if (this_board->has_ao) {
+ s->type = COMEDI_SUBD_AO;
+ s->subdev_flags = SDF_WRITABLE | SDF_GROUND | SDF_COMMON;
+ s->n_chan = 8;
+ s->maxdata = 0x3fff;
+ s->range_table = &range_bipolar10;
+ s->insn_write = apci3120_ao_insn_write;
+
+ ret = comedi_alloc_subdev_readback(s);
+ if (ret)
+ return ret;
} else {
- s->type = COMEDI_SUBD_UNUSED;
+ s->type = COMEDI_SUBD_UNUSED;
}
- /* Allocate and Initialise DI Subdevice Structures */
+ /* Digital Input subdevice */
s = &dev->subdevices[2];
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_COMMON;
- s->n_chan = this_board->i_NbrDiChannel;
- s->maxdata = 1;
- s->len_chanlist = this_board->i_NbrDiChannel;
- s->range_table = &range_digital;
- s->insn_bits = apci3120_di_insn_bits;
-
- /* Allocate and Initialise DO Subdevice Structures */
+ s->type = COMEDI_SUBD_DI;
+ s->subdev_flags = SDF_READABLE;
+ s->n_chan = 4;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = apci3120_di_insn_bits;
+
+ /* Digital Output subdevice */
s = &dev->subdevices[3];
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags =
- SDF_READABLE | SDF_WRITEABLE | SDF_GROUND | SDF_COMMON;
- s->n_chan = this_board->i_NbrDoChannel;
- s->maxdata = this_board->i_DoMaxdata;
- s->len_chanlist = this_board->i_NbrDoChannel;
- s->range_table = &range_digital;
- s->insn_bits = apci3120_do_insn_bits;
-
- /* Allocate and Initialise Timer Subdevice Structures */
- s = &dev->subdevices[4];
- s->type = COMEDI_SUBD_TIMER;
- s->subdev_flags = SDF_WRITEABLE | SDF_GROUND | SDF_COMMON;
- s->n_chan = 1;
- s->maxdata = 0;
- s->len_chanlist = 1;
- s->range_table = &range_digital;
+ s->type = COMEDI_SUBD_DO;
+ s->subdev_flags = SDF_WRITABLE;
+ s->n_chan = 4;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = apci3120_do_insn_bits;
- s->insn_write = apci3120_write_insn_timer;
- s->insn_read = apci3120_read_insn_timer;
- s->insn_config = apci3120_config_insn_timer;
+ /* Timer subdevice */
+ s = &dev->subdevices[4];
+ s->type = COMEDI_SUBD_TIMER;
+ s->subdev_flags = SDF_READABLE;
+ s->n_chan = 1;
+ s->maxdata = 0x00ffffff;
+ s->insn_config = apci3120_timer_insn_config;
+ s->insn_read = apci3120_timer_insn_read;
- apci3120_reset(dev);
return 0;
}
static void apci3120_detach(struct comedi_device *dev)
{
- struct addi_private *devpriv = dev->private;
-
- if (dev->iobase)
- apci3120_reset(dev);
comedi_pci_detach(dev);
- if (devpriv) {
- unsigned int i;
-
- for (i = 0; i < 2; i++) {
- if (devpriv->ul_DmaBufferVirtual[i]) {
- dma_free_coherent(dev->hw_dev,
- devpriv->ui_DmaBufferSize[i],
- devpriv->
- ul_DmaBufferVirtual[i],
- devpriv->ul_DmaBufferHw[i]);
- }
- }
- }
+ apci3120_dma_free(dev);
}
static struct comedi_driver apci3120_driver = {
diff --git a/drivers/staging/comedi/drivers/addi_apci_3200.c b/drivers/staging/comedi/drivers/addi_apci_3200.c
deleted file mode 100644
index fe6897eff3d..00000000000
--- a/drivers/staging/comedi/drivers/addi_apci_3200.c
+++ /dev/null
@@ -1,125 +0,0 @@
-#include <linux/module.h>
-#include <linux/pci.h>
-
-#include <asm/i387.h>
-
-#include "../comedidev.h"
-#include "comedi_fc.h"
-#include "amcc_s5933.h"
-
-#include "addi-data/addi_common.h"
-
-static void fpu_begin(void)
-{
- kernel_fpu_begin();
-}
-
-static void fpu_end(void)
-{
- kernel_fpu_end();
-}
-
-#include "addi-data/addi_eeprom.c"
-#include "addi-data/hwdrv_apci3200.c"
-#include "addi-data/addi_common.c"
-
-enum apci3200_boardid {
- BOARD_APCI3200,
- BOARD_APCI3300,
-};
-
-static const struct addi_board apci3200_boardtypes[] = {
- [BOARD_APCI3200] = {
- .pc_DriverName = "apci3200",
- .i_IorangeBase1 = 256,
- .i_PCIEeprom = 1,
- .pc_EepromChip = "S5920",
- .i_NbrAiChannel = 16,
- .i_NbrAiChannelDiff = 8,
- .i_AiChannelList = 16,
- .i_AiMaxdata = 0x3ffff,
- .pr_AiRangelist = &range_apci3200_ai,
- .i_NbrDiChannel = 4,
- .i_NbrDoChannel = 4,
- .ui_MinAcquisitiontimeNs = 10000,
- .ui_MinDelaytimeNs = 100000,
- .interrupt = apci3200_interrupt,
- .reset = apci3200_reset,
- .ai_config = apci3200_ai_config,
- .ai_read = apci3200_ai_read,
- .ai_write = apci3200_ai_write,
- .ai_bits = apci3200_ai_bits_test,
- .ai_cmdtest = apci3200_ai_cmdtest,
- .ai_cmd = apci3200_ai_cmd,
- .ai_cancel = apci3200_cancel,
- .di_bits = apci3200_di_insn_bits,
- .do_bits = apci3200_do_insn_bits,
- },
- [BOARD_APCI3300] = {
- .pc_DriverName = "apci3300",
- .i_IorangeBase1 = 256,
- .i_PCIEeprom = 1,
- .pc_EepromChip = "S5920",
- .i_NbrAiChannelDiff = 8,
- .i_AiChannelList = 8,
- .i_AiMaxdata = 0x3ffff,
- .pr_AiRangelist = &range_apci3300_ai,
- .i_NbrDiChannel = 4,
- .i_NbrDoChannel = 4,
- .ui_MinAcquisitiontimeNs = 10000,
- .ui_MinDelaytimeNs = 100000,
- .interrupt = apci3200_interrupt,
- .reset = apci3200_reset,
- .ai_config = apci3200_ai_config,
- .ai_read = apci3200_ai_read,
- .ai_write = apci3200_ai_write,
- .ai_bits = apci3200_ai_bits_test,
- .ai_cmdtest = apci3200_ai_cmdtest,
- .ai_cmd = apci3200_ai_cmd,
- .ai_cancel = apci3200_cancel,
- .di_bits = apci3200_di_insn_bits,
- .do_bits = apci3200_do_insn_bits,
- },
-};
-
-static int apci3200_auto_attach(struct comedi_device *dev,
- unsigned long context)
-{
- const struct addi_board *board = NULL;
-
- if (context < ARRAY_SIZE(apci3200_boardtypes))
- board = &apci3200_boardtypes[context];
- if (!board)
- return -ENODEV;
- dev->board_ptr = board;
-
- return addi_auto_attach(dev, context);
-}
-
-static struct comedi_driver apci3200_driver = {
- .driver_name = "addi_apci_3200",
- .module = THIS_MODULE,
- .auto_attach = apci3200_auto_attach,
- .detach = i_ADDI_Detach,
-};
-
-static int apci3200_pci_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
-{
- return comedi_pci_auto_config(dev, &apci3200_driver, id->driver_data);
-}
-
-static const struct pci_device_id apci3200_pci_table[] = {
- { PCI_VDEVICE(ADDIDATA, 0x3000), BOARD_APCI3200 },
- { PCI_VDEVICE(ADDIDATA, 0x3007), BOARD_APCI3300 },
- { 0 }
-};
-MODULE_DEVICE_TABLE(pci, apci3200_pci_table);
-
-static struct pci_driver apci3200_pci_driver = {
- .name = "addi_apci_3200",
- .id_table = apci3200_pci_table,
- .probe = apci3200_pci_probe,
- .remove = comedi_pci_auto_unconfig,
-};
-module_comedi_pci_driver(apci3200_driver, apci3200_pci_driver);
diff --git a/drivers/staging/comedi/drivers/addi_apci_3501.c b/drivers/staging/comedi/drivers/addi_apci_3501.c
index 010efa3fed6..a726efcea6a 100644
--- a/drivers/staging/comedi/drivers/addi_apci_3501.c
+++ b/drivers/staging/comedi/drivers/addi_apci_3501.c
@@ -357,12 +357,11 @@ static int apci3501_auto_attach(struct comedi_device *dev,
s = &dev->subdevices[0];
if (ao_n_chan) {
s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITEABLE | SDF_GROUND | SDF_COMMON;
+ s->subdev_flags = SDF_WRITABLE | SDF_GROUND | SDF_COMMON;
s->n_chan = ao_n_chan;
s->maxdata = 0x3fff;
s->range_table = &apci3501_ao_range;
s->insn_write = apci3501_ao_insn_write;
- s->insn_read = comedi_readback_insn_read;
ret = comedi_alloc_subdev_readback(s);
if (ret)
@@ -383,7 +382,7 @@ static int apci3501_auto_attach(struct comedi_device *dev,
/* Initialize the digital output subdevice */
s = &dev->subdevices[2];
s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITEABLE;
+ s->subdev_flags = SDF_WRITABLE;
s->n_chan = 2;
s->maxdata = 1;
s->range_table = &range_digital;
@@ -392,7 +391,7 @@ static int apci3501_auto_attach(struct comedi_device *dev,
/* Initialize the timer/watchdog subdevice */
s = &dev->subdevices[3];
s->type = COMEDI_SUBD_TIMER;
- s->subdev_flags = SDF_WRITEABLE | SDF_GROUND | SDF_COMMON;
+ s->subdev_flags = SDF_WRITABLE;
s->n_chan = 1;
s->maxdata = 0;
s->len_chanlist = 1;
diff --git a/drivers/staging/comedi/drivers/addi_apci_3xxx.c b/drivers/staging/comedi/drivers/addi_apci_3xxx.c
index a296bd5b2c0..c173810a3b5 100644
--- a/drivers/staging/comedi/drivers/addi_apci_3xxx.c
+++ b/drivers/staging/comedi/drivers/addi_apci_3xxx.c
@@ -371,10 +371,10 @@ static irqreturn_t apci3xxx_irq_handler(int irq, void *d)
writel(status, dev->mmio + 16);
val = readl(dev->mmio + 28);
- comedi_buf_put(s, val);
+ comedi_buf_write_samples(s, &val, 1);
s->async->events |= COMEDI_CB_EOA;
- comedi_event(dev, s);
+ comedi_handle_events(dev, s);
return IRQ_HANDLED;
}
@@ -849,12 +849,11 @@ static int apci3xxx_auto_attach(struct comedi_device *dev,
if (board->has_ao) {
s = &dev->subdevices[subdev];
s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITEABLE | SDF_GROUND | SDF_COMMON;
+ s->subdev_flags = SDF_WRITABLE | SDF_GROUND | SDF_COMMON;
s->n_chan = 4;
s->maxdata = 0x0fff;
s->range_table = &apci3xxx_ao_range;
s->insn_write = apci3xxx_ao_insn_write;
- s->insn_read = comedi_readback_insn_read;
ret = comedi_alloc_subdev_readback(s);
if (ret)
@@ -880,7 +879,7 @@ static int apci3xxx_auto_attach(struct comedi_device *dev,
if (board->has_dig_out) {
s = &dev->subdevices[subdev];
s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITEABLE;
+ s->subdev_flags = SDF_WRITABLE;
s->n_chan = 4;
s->maxdata = 1;
s->range_table = &range_digital;
@@ -893,7 +892,7 @@ static int apci3xxx_auto_attach(struct comedi_device *dev,
if (board->has_ttl_io) {
s = &dev->subdevices[subdev];
s->type = COMEDI_SUBD_DIO;
- s->subdev_flags = SDF_READABLE | SDF_WRITEABLE;
+ s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
s->n_chan = 24;
s->maxdata = 1;
s->io_bits = 0xff; /* channels 0-7 are always outputs */
diff --git a/drivers/staging/comedi/drivers/addi_tcw.h b/drivers/staging/comedi/drivers/addi_tcw.h
new file mode 100644
index 00000000000..8794d4cbbfb
--- /dev/null
+++ b/drivers/staging/comedi/drivers/addi_tcw.h
@@ -0,0 +1,56 @@
+#ifndef _ADDI_TCW_H
+#define _ADDI_TCW_H
+
+/*
+ * Following are the generic definitions for the ADDI-DATA timer/counter/
+ * watchdog (TCW) registers and bits. Some of the registers are not used
+ * depending on the use of the TCW.
+ */
+
+#define ADDI_TCW_VAL_REG 0x00
+
+#define ADDI_TCW_SYNC_REG 0x00
+#define ADDI_TCW_SYNC_CTR_TRIG (1 << 8)
+#define ADDI_TCW_SYNC_CTR_DIS (1 << 7)
+#define ADDI_TCW_SYNC_CTR_ENA (1 << 6)
+#define ADDI_TCW_SYNC_TIMER_TRIG (1 << 5)
+#define ADDI_TCW_SYNC_TIMER_DIS (1 << 4)
+#define ADDI_TCW_SYNC_TIMER_ENA (1 << 3)
+#define ADDI_TCW_SYNC_WDOG_TRIG (1 << 2)
+#define ADDI_TCW_SYNC_WDOG_DIS (1 << 1)
+#define ADDI_TCW_SYNC_WDOG_ENA (1 << 0)
+
+#define ADDI_TCW_RELOAD_REG 0x04
+
+#define ADDI_TCW_TIMEBASE_REG 0x08
+
+#define ADDI_TCW_CTRL_REG 0x0c
+#define ADDI_TCW_CTRL_EXT_CLK_STATUS (1 << 21)
+#define ADDI_TCW_CTRL_CASCADE (1 << 20)
+#define ADDI_TCW_CTRL_CNTR_ENA (1 << 19)
+#define ADDI_TCW_CTRL_CNT_UP (1 << 18)
+#define ADDI_TCW_CTRL_EXT_CLK(x) ((x) << 16)
+#define ADDI_TCW_CTRL_OUT(x) ((x) << 11)
+#define ADDI_TCW_CTRL_GATE (1 << 10)
+#define ADDI_TCW_CTRL_TRIG (1 << 9)
+#define ADDI_TCW_CTRL_EXT_GATE(x) ((x) << 7)
+#define ADDI_TCW_CTRL_EXT_TRIG(x) ((x) << 5)
+#define ADDI_TCW_CTRL_TIMER_ENA (1 << 4)
+#define ADDI_TCW_CTRL_RESET_ENA (1 << 3)
+#define ADDI_TCW_CTRL_WARN_ENA (1 << 2)
+#define ADDI_TCW_CTRL_IRQ_ENA (1 << 1)
+#define ADDI_TCW_CTRL_ENA (1 << 0)
+
+#define ADDI_TCW_STATUS_REG 0x10
+#define ADDI_TCW_STATUS_SOFT_CLR (1 << 3)
+#define ADDI_TCW_STATUS_SOFT_TRIG (1 << 1)
+#define ADDI_TCW_STATUS_OVERFLOW (1 << 0)
+
+#define ADDI_TCW_IRQ_REG 0x14
+#define ADDI_TCW_IRQ (1 << 0)
+
+#define ADDI_TCW_WARN_TIMEVAL_REG 0x18
+
+#define ADDI_TCW_WARN_TIMEBASE_REG 0x1c
+
+#endif
diff --git a/drivers/staging/comedi/drivers/addi_watchdog.c b/drivers/staging/comedi/drivers/addi_watchdog.c
index 23031feaa09..c5b082d4e51 100644
--- a/drivers/staging/comedi/drivers/addi_watchdog.c
+++ b/drivers/staging/comedi/drivers/addi_watchdog.c
@@ -20,21 +20,9 @@
#include <linux/module.h>
#include "../comedidev.h"
+#include "addi_tcw.h"
#include "addi_watchdog.h"
-/*
- * Register offsets/defines for the addi-data watchdog
- */
-#define ADDI_WDOG_REG 0x00
-#define ADDI_WDOG_RELOAD_REG 0x04
-#define ADDI_WDOG_TIMEBASE 0x08
-#define ADDI_WDOG_CTRL_REG 0x0c
-#define ADDI_WDOG_CTRL_ENABLE (1 << 0)
-#define ADDI_WDOG_CTRL_SW_TRIG (1 << 9)
-#define ADDI_WDOG_STATUS_REG 0x10
-#define ADDI_WDOG_STATUS_ENABLED (1 << 0)
-#define ADDI_WDOG_STATUS_SW_TRIG (1 << 1)
-
struct addi_watchdog_private {
unsigned long iobase;
unsigned int wdog_ctrl;
@@ -60,9 +48,9 @@ static int addi_watchdog_insn_config(struct comedi_device *dev,
switch (data[0]) {
case INSN_CONFIG_ARM:
- spriv->wdog_ctrl = ADDI_WDOG_CTRL_ENABLE;
+ spriv->wdog_ctrl = ADDI_TCW_CTRL_ENA;
reload = data[1] & s->maxdata;
- outl(reload, spriv->iobase + ADDI_WDOG_RELOAD_REG);
+ outl(reload, spriv->iobase + ADDI_TCW_RELOAD_REG);
/* Time base is 20ms, let the user know the timeout */
dev_info(dev->class_dev, "watchdog enabled, timeout:%dms\n",
@@ -75,7 +63,7 @@ static int addi_watchdog_insn_config(struct comedi_device *dev,
return -EINVAL;
}
- outl(spriv->wdog_ctrl, spriv->iobase + ADDI_WDOG_CTRL_REG);
+ outl(spriv->wdog_ctrl, spriv->iobase + ADDI_TCW_CTRL_REG);
return insn->n;
}
@@ -89,7 +77,7 @@ static int addi_watchdog_insn_read(struct comedi_device *dev,
int i;
for (i = 0; i < insn->n; i++)
- data[i] = inl(spriv->iobase + ADDI_WDOG_STATUS_REG);
+ data[i] = inl(spriv->iobase + ADDI_TCW_STATUS_REG);
return insn->n;
}
@@ -109,8 +97,8 @@ static int addi_watchdog_insn_write(struct comedi_device *dev,
/* "ping" the watchdog */
for (i = 0; i < insn->n; i++) {
- outl(spriv->wdog_ctrl | ADDI_WDOG_CTRL_SW_TRIG,
- spriv->iobase + ADDI_WDOG_CTRL_REG);
+ outl(spriv->wdog_ctrl | ADDI_TCW_CTRL_TRIG,
+ spriv->iobase + ADDI_TCW_CTRL_REG);
}
return insn->n;
@@ -118,8 +106,8 @@ static int addi_watchdog_insn_write(struct comedi_device *dev,
void addi_watchdog_reset(unsigned long iobase)
{
- outl(0x0, iobase + ADDI_WDOG_CTRL_REG);
- outl(0x0, iobase + ADDI_WDOG_RELOAD_REG);
+ outl(0x0, iobase + ADDI_TCW_CTRL_REG);
+ outl(0x0, iobase + ADDI_TCW_RELOAD_REG);
}
EXPORT_SYMBOL_GPL(addi_watchdog_reset);
@@ -134,7 +122,7 @@ int addi_watchdog_init(struct comedi_subdevice *s, unsigned long iobase)
spriv->iobase = iobase;
s->type = COMEDI_SUBD_TIMER;
- s->subdev_flags = SDF_WRITEABLE;
+ s->subdev_flags = SDF_WRITABLE;
s->n_chan = 1;
s->maxdata = 0xff;
s->insn_config = addi_watchdog_insn_config;
diff --git a/drivers/staging/comedi/drivers/adl_pci6208.c b/drivers/staging/comedi/drivers/adl_pci6208.c
index 0ad46fe492c..528f15c25da 100644
--- a/drivers/staging/comedi/drivers/adl_pci6208.c
+++ b/drivers/staging/comedi/drivers/adl_pci6208.c
@@ -169,7 +169,6 @@ static int pci6208_auto_attach(struct comedi_device *dev,
s->maxdata = 0xffff;
s->range_table = &range_bipolar10;
s->insn_write = pci6208_ao_insn_write;
- s->insn_read = comedi_readback_insn_read;
ret = comedi_alloc_subdev_readback(s);
if (ret)
diff --git a/drivers/staging/comedi/drivers/adl_pci9111.c b/drivers/staging/comedi/drivers/adl_pci9111.c
index d18d8f21af2..47f6c0e9f01 100644
--- a/drivers/staging/comedi/drivers/adl_pci9111.c
+++ b/drivers/staging/comedi/drivers/adl_pci9111.c
@@ -133,8 +133,6 @@ static const struct comedi_lrange pci9111_ai_range = {
struct pci9111_private_data {
unsigned long lcr_io_base;
- int stop_counter;
-
unsigned int scan_delay;
unsigned int chunk_counter;
unsigned int chunk_num_samples;
@@ -404,12 +402,6 @@ static int pci9111_ai_do_cmd(struct comedi_device *dev,
outb(CR_RANGE(cmd->chanlist[0]) & PCI9111_AI_RANGE_MASK,
dev->iobase + PCI9111_AI_RANGE_STAT_REG);
- /* Set counter */
- if (cmd->stop_src == TRIG_COUNT)
- dev_private->stop_counter = cmd->stop_arg * cmd->chanlist_len;
- else /* TRIG_NONE */
- dev_private->stop_counter = 0;
-
/* Set timer pacer */
dev_private->scan_delay = 0;
if (cmd->convert_src == TRIG_TIMER) {
@@ -435,7 +427,6 @@ static int pci9111_ai_do_cmd(struct comedi_device *dev,
}
outb(trig, dev->iobase + PCI9111_AI_TRIG_CTRL_REG);
- dev_private->stop_counter *= (1 + dev_private->scan_delay);
dev_private->chunk_counter = 0;
dev_private->chunk_num_samples = cmd->chanlist_len *
(1 + dev_private->scan_delay);
@@ -452,7 +443,7 @@ static void pci9111_ai_munge(struct comedi_device *dev,
unsigned int maxdata = s->maxdata;
unsigned int invert = (maxdata + 1) >> 1;
unsigned int shift = (maxdata == 0xffff) ? 0 : 4;
- unsigned int num_samples = num_bytes / sizeof(short);
+ unsigned int num_samples = comedi_bytes_to_samples(s, num_bytes);
unsigned int i;
for (i = 0; i < num_samples; i++)
@@ -464,22 +455,14 @@ static void pci9111_handle_fifo_half_full(struct comedi_device *dev,
{
struct pci9111_private_data *devpriv = dev->private;
struct comedi_cmd *cmd = &s->async->cmd;
- unsigned int total = 0;
unsigned int samples;
- if (cmd->stop_src == TRIG_COUNT &&
- PCI9111_FIFO_HALF_SIZE > devpriv->stop_counter)
- samples = devpriv->stop_counter;
- else
- samples = PCI9111_FIFO_HALF_SIZE;
-
+ samples = comedi_nsamples_left(s, PCI9111_FIFO_HALF_SIZE);
insw(dev->iobase + PCI9111_AI_FIFO_REG,
devpriv->ai_bounce_buffer, samples);
if (devpriv->scan_delay < 1) {
- total = cfc_write_array_to_buffer(s,
- devpriv->ai_bounce_buffer,
- samples * sizeof(short));
+ comedi_buf_write_samples(s, devpriv->ai_bounce_buffer, samples);
} else {
unsigned int pos = 0;
unsigned int to_read;
@@ -492,17 +475,15 @@ static void pci9111_handle_fifo_half_full(struct comedi_device *dev,
if (to_read > samples - pos)
to_read = samples - pos;
- total += cfc_write_array_to_buffer(s,
+ comedi_buf_write_samples(s,
devpriv->ai_bounce_buffer + pos,
- to_read * sizeof(short));
+ to_read);
} else {
to_read = devpriv->chunk_num_samples -
devpriv->chunk_counter;
if (to_read > samples - pos)
to_read = samples - pos;
-
- total += to_read * sizeof(short);
}
pos += to_read;
@@ -513,8 +494,6 @@ static void pci9111_handle_fifo_half_full(struct comedi_device *dev,
devpriv->chunk_counter = 0;
}
}
-
- devpriv->stop_counter -= total / sizeof(short);
}
static irqreturn_t pci9111_interrupt(int irq, void *p_device)
@@ -561,7 +540,7 @@ static irqreturn_t pci9111_interrupt(int irq, void *p_device)
dev_dbg(dev->class_dev, "fifo overflow\n");
outb(0, dev->iobase + PCI9111_INT_CLR_REG);
async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA;
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
return IRQ_HANDLED;
}
@@ -571,14 +550,14 @@ static irqreturn_t pci9111_interrupt(int irq, void *p_device)
pci9111_handle_fifo_half_full(dev, s);
}
- if (cmd->stop_src == TRIG_COUNT && dev_private->stop_counter == 0)
+ if (cmd->stop_src == TRIG_COUNT && async->scans_done >= cmd->stop_arg)
async->events |= COMEDI_CB_EOA;
outb(0, dev->iobase + PCI9111_INT_CLR_REG);
spin_unlock_irqrestore(&dev->spinlock, irq_flags);
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
return IRQ_HANDLED;
}
@@ -752,7 +731,6 @@ static int pci9111_auto_attach(struct comedi_device *dev,
s->len_chanlist = 1;
s->range_table = &range_bipolar10;
s->insn_write = pci9111_ao_insn_write;
- s->insn_read = comedi_readback_insn_read;
ret = comedi_alloc_subdev_readback(s);
if (ret)
@@ -768,7 +746,7 @@ static int pci9111_auto_attach(struct comedi_device *dev,
s = &dev->subdevices[3];
s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
+ s->subdev_flags = SDF_WRITABLE;
s->n_chan = 16;
s->maxdata = 1;
s->range_table = &range_digital;
diff --git a/drivers/staging/comedi/drivers/adl_pci9118.c b/drivers/staging/comedi/drivers/adl_pci9118.c
index e18fd9569a2..26603582e71 100644
--- a/drivers/staging/comedi/drivers/adl_pci9118.c
+++ b/drivers/staging/comedi/drivers/adl_pci9118.c
@@ -221,7 +221,6 @@ struct pci9118_private {
unsigned char int_ctrl;
unsigned char ai_cfg;
unsigned int ai_do; /* what do AI? 0=nothing, 1 to 4 mode */
- unsigned int ai_act_scan; /* how many scans we finished */
unsigned int ai_n_realscanlen; /*
* what we must transfer for one
* outgoing scan include front/back adds
@@ -447,51 +446,112 @@ static void interrupt_pci9118_ai_mode4_switch(struct comedi_device *dev,
outl(devpriv->ai_cfg, dev->iobase + PCI9118_AI_CFG_REG);
}
-static unsigned int defragment_dma_buffer(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned short *dma_buffer,
- unsigned int num_samples)
+static unsigned int valid_samples_in_act_dma_buf(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ unsigned int n_raw_samples)
{
struct pci9118_private *devpriv = dev->private;
struct comedi_cmd *cmd = &s->async->cmd;
- unsigned int i = 0, j = 0;
- unsigned int start_pos = devpriv->ai_add_front,
- stop_pos = devpriv->ai_add_front + cmd->chanlist_len;
- unsigned int raw_scanlen = devpriv->ai_add_front + cmd->chanlist_len +
- devpriv->ai_add_back;
+ unsigned int start_pos = devpriv->ai_add_front;
+ unsigned int stop_pos = start_pos + cmd->chanlist_len;
+ unsigned int span_len = stop_pos + devpriv->ai_add_back;
+ unsigned int dma_pos = devpriv->ai_act_dmapos;
+ unsigned int whole_spans, n_samples, x;
- for (i = 0; i < num_samples; i++) {
- if (devpriv->ai_act_dmapos >= start_pos &&
- devpriv->ai_act_dmapos < stop_pos) {
- dma_buffer[j++] = dma_buffer[i];
+ if (span_len == cmd->chanlist_len)
+ return n_raw_samples; /* use all samples */
+
+ /*
+ * Not all samples are to be used. Buffer contents consist of a
+ * possibly non-whole number of spans and a region of each span
+ * is to be used.
+ *
+ * Account for samples in whole number of spans.
+ */
+ whole_spans = n_raw_samples / span_len;
+ n_samples = whole_spans * cmd->chanlist_len;
+ n_raw_samples -= whole_spans * span_len;
+
+ /*
+ * Deal with remaining samples which could overlap up to two spans.
+ */
+ while (n_raw_samples) {
+ if (dma_pos < start_pos) {
+ /* Skip samples before start position. */
+ x = start_pos - dma_pos;
+ if (x > n_raw_samples)
+ x = n_raw_samples;
+ dma_pos += x;
+ n_raw_samples -= x;
+ if (!n_raw_samples)
+ break;
}
- devpriv->ai_act_dmapos++;
- devpriv->ai_act_dmapos %= raw_scanlen;
+ if (dma_pos < stop_pos) {
+ /* Include samples before stop position. */
+ x = stop_pos - dma_pos;
+ if (x > n_raw_samples)
+ x = n_raw_samples;
+ n_samples += x;
+ dma_pos += x;
+ n_raw_samples -= x;
+ }
+ /* Advance to next span. */
+ start_pos += span_len;
+ stop_pos += span_len;
}
-
- return j;
+ return n_samples;
}
-static int move_block_from_dma(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned short *dma_buffer,
- unsigned int num_samples)
+static void move_block_from_dma(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ unsigned short *dma_buffer,
+ unsigned int n_raw_samples)
{
struct pci9118_private *devpriv = dev->private;
struct comedi_cmd *cmd = &s->async->cmd;
- unsigned int num_bytes;
-
- num_samples = defragment_dma_buffer(dev, s, dma_buffer, num_samples);
- devpriv->ai_act_scan +=
- (s->async->cur_chan + num_samples) / cmd->scan_end_arg;
- s->async->cur_chan += num_samples;
- s->async->cur_chan %= cmd->scan_end_arg;
- num_bytes =
- cfc_write_array_to_buffer(s, dma_buffer,
- num_samples * sizeof(short));
- if (num_bytes < num_samples * sizeof(short))
- return -1;
- return 0;
+ unsigned int start_pos = devpriv->ai_add_front;
+ unsigned int stop_pos = start_pos + cmd->chanlist_len;
+ unsigned int span_len = stop_pos + devpriv->ai_add_back;
+ unsigned int dma_pos = devpriv->ai_act_dmapos;
+ unsigned int x;
+
+ if (span_len == cmd->chanlist_len) {
+ /* All samples are to be copied. */
+ comedi_buf_write_samples(s, dma_buffer, n_raw_samples);
+ dma_pos += n_raw_samples;
+ } else {
+ /*
+ * Not all samples are to be copied. Buffer contents consist
+ * of a possibly non-whole number of spans and a region of
+ * each span is to be copied.
+ */
+ while (n_raw_samples) {
+ if (dma_pos < start_pos) {
+ /* Skip samples before start position. */
+ x = start_pos - dma_pos;
+ if (x > n_raw_samples)
+ x = n_raw_samples;
+ dma_pos += x;
+ n_raw_samples -= x;
+ if (!n_raw_samples)
+ break;
+ }
+ if (dma_pos < stop_pos) {
+ /* Copy samples before stop position. */
+ x = stop_pos - dma_pos;
+ if (x > n_raw_samples)
+ x = n_raw_samples;
+ comedi_buf_write_samples(s, dma_buffer, x);
+ dma_pos += x;
+ n_raw_samples -= x;
+ }
+ /* Advance to next span. */
+ start_pos += span_len;
+ stop_pos += span_len;
+ }
+ }
+ /* Update position in span for next time. */
+ devpriv->ai_act_dmapos = dma_pos % span_len;
}
static void pci9118_exttrg_enable(struct comedi_device *dev, bool enable)
@@ -578,9 +638,7 @@ static int pci9118_ai_cancel(struct comedi_device *dev,
devpriv->ai_do = 0;
devpriv->usedma = 0;
- devpriv->ai_act_scan = 0;
devpriv->ai_act_dmapos = 0;
- s->async->cur_chan = 0;
s->async->inttrig = NULL;
devpriv->ai_neverending = 0;
devpriv->dma_actbuf = 0;
@@ -594,8 +652,9 @@ static void pci9118_ai_munge(struct comedi_device *dev,
unsigned int start_chan_index)
{
struct pci9118_private *devpriv = dev->private;
- unsigned int i, num_samples = num_bytes / sizeof(short);
unsigned short *array = data;
+ unsigned int num_samples = comedi_bytes_to_samples(s, num_bytes);
+ unsigned int i;
for (i = 0; i < num_samples; i++) {
if (devpriv->usedma)
@@ -617,17 +676,11 @@ static void interrupt_pci9118_ai_onesample(struct comedi_device *dev,
sampl = inl(dev->iobase + PCI9118_AI_FIFO_REG);
- cfc_write_to_buffer(s, sampl);
- s->async->cur_chan++;
- if (s->async->cur_chan >= cmd->scan_end_arg) {
- /* one scan done */
- s->async->cur_chan %= cmd->scan_end_arg;
- devpriv->ai_act_scan++;
- if (!devpriv->ai_neverending) {
- /* all data sampled? */
- if (devpriv->ai_act_scan >= cmd->stop_arg)
- s->async->events |= COMEDI_CB_EOA;
- }
+ comedi_buf_write_samples(s, &sampl, 1);
+
+ if (!devpriv->ai_neverending) {
+ if (s->async->scans_done >= cmd->stop_arg)
+ s->async->events |= COMEDI_CB_EOA;
}
}
@@ -637,39 +690,37 @@ static void interrupt_pci9118_ai_dma(struct comedi_device *dev,
struct pci9118_private *devpriv = dev->private;
struct comedi_cmd *cmd = &s->async->cmd;
struct pci9118_dmabuf *dmabuf = &devpriv->dmabuf[devpriv->dma_actbuf];
- unsigned int next_dma_buf, samplesinbuf, sampls, m;
+ unsigned int n_all = comedi_bytes_to_samples(s, dmabuf->use_size);
+ unsigned int n_valid;
+ bool more_dma;
- samplesinbuf = dmabuf->use_size >> 1; /* number of received samples */
+ /* determine whether more DMA buffers to do after this one */
+ n_valid = valid_samples_in_act_dma_buf(dev, s, n_all);
+ more_dma = n_valid < comedi_nsamples_left(s, n_valid + 1);
- if (devpriv->dma_doublebuf) { /*
- * switch DMA buffers if is used
- * double buffering
- */
- next_dma_buf = 1 - devpriv->dma_actbuf;
- pci9118_amcc_setup_dma(dev, next_dma_buf);
- if (devpriv->ai_do == 4)
- interrupt_pci9118_ai_mode4_switch(dev, next_dma_buf);
+ /* switch DMA buffers and restart DMA if double buffering */
+ if (more_dma && devpriv->dma_doublebuf) {
+ devpriv->dma_actbuf = 1 - devpriv->dma_actbuf;
+ pci9118_amcc_setup_dma(dev, devpriv->dma_actbuf);
+ if (devpriv->ai_do == 4) {
+ interrupt_pci9118_ai_mode4_switch(dev,
+ devpriv->dma_actbuf);
+ }
}
- if (samplesinbuf) {
- /* how many samples is to end of buffer */
- m = s->async->prealloc_bufsz >> 1;
- sampls = m;
- move_block_from_dma(dev, s, dmabuf->virt, samplesinbuf);
- m = m - sampls; /* m=how many samples was transferred */
- }
+ if (n_all)
+ move_block_from_dma(dev, s, dmabuf->virt, n_all);
if (!devpriv->ai_neverending) {
- /* all data sampled? */
- if (devpriv->ai_act_scan >= cmd->stop_arg)
+ if (s->async->scans_done >= cmd->stop_arg)
s->async->events |= COMEDI_CB_EOA;
}
- if (devpriv->dma_doublebuf) {
- /* switch dma buffers */
- devpriv->dma_actbuf = 1 - devpriv->dma_actbuf;
- } else {
- /* restart DMA if is not used double buffering */
+ if (s->async->events & COMEDI_CB_CANCEL_MASK)
+ more_dma = false;
+
+ /* restart DMA if not double buffering */
+ if (more_dma && !devpriv->dma_doublebuf) {
pci9118_amcc_setup_dma(dev, 0);
if (devpriv->ai_do == 4)
interrupt_pci9118_ai_mode4_switch(dev, 0);
@@ -766,7 +817,7 @@ static irqreturn_t pci9118_interrupt(int irq, void *d)
interrupt_pci9118_ai_onesample(dev, s);
interrupt_exit:
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
return IRQ_HANDLED;
}
@@ -1123,9 +1174,7 @@ static int pci9118_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
inl(dev->iobase + PCI9118_AI_STATUS_REG);
inl(dev->iobase + PCI9118_INT_CTRL_REG);
- devpriv->ai_act_scan = 0;
devpriv->ai_act_dmapos = 0;
- s->async->cur_chan = 0;
if (devpriv->usedma) {
Compute_and_setup_dma(dev, s);
@@ -1624,7 +1673,6 @@ static int pci9118_common_attach(struct comedi_device *dev,
s->maxdata = 0x0fff;
s->range_table = &range_bipolar10;
s->insn_write = pci9118_ao_insn_write;
- s->insn_read = comedi_readback_insn_read;
ret = comedi_alloc_subdev_readback(s);
if (ret)
diff --git a/drivers/staging/comedi/drivers/adv_pci1710.c b/drivers/staging/comedi/drivers/adv_pci1710.c
index 5539bd29486..d02df7d0c62 100644
--- a/drivers/staging/comedi/drivers/adv_pci1710.c
+++ b/drivers/staging/comedi/drivers/adv_pci1710.c
@@ -298,7 +298,6 @@ static const struct boardtype boardtypes[] = {
struct pci1710_private {
unsigned int CntrlReg; /* Control register */
- unsigned int ai_act_scan; /* how many scans we finished */
unsigned char ai_et;
unsigned int ai_et_CntrlReg;
unsigned int ai_et_MuxVal;
@@ -730,16 +729,12 @@ static int pci171x_ai_cancel(struct comedi_device *dev,
break;
}
- devpriv->ai_act_scan = 0;
- s->async->cur_chan = 0;
-
return 0;
}
static void pci1710_handle_every_sample(struct comedi_device *dev,
struct comedi_subdevice *s)
{
- struct pci1710_private *devpriv = dev->private;
struct comedi_cmd *cmd = &s->async->cmd;
unsigned int status;
unsigned int val;
@@ -749,14 +744,14 @@ static void pci1710_handle_every_sample(struct comedi_device *dev,
if (status & Status_FE) {
dev_dbg(dev->class_dev, "A/D FIFO empty (%4x)\n", status);
s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
return;
}
if (status & Status_FF) {
dev_dbg(dev->class_dev,
"A/D FIFO Full status (Fatal Error!) (%4x)\n", status);
s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
return;
}
@@ -770,27 +765,19 @@ static void pci1710_handle_every_sample(struct comedi_device *dev,
break;
}
- comedi_buf_put(s, val & s->maxdata);
-
- s->async->cur_chan++;
- if (s->async->cur_chan >= cmd->chanlist_len)
- s->async->cur_chan = 0;
+ val &= s->maxdata;
+ comedi_buf_write_samples(s, &val, 1);
-
- if (s->async->cur_chan == 0) { /* one scan done */
- devpriv->ai_act_scan++;
- if (cmd->stop_src == TRIG_COUNT &&
- devpriv->ai_act_scan >= cmd->stop_arg) {
- /* all data sampled */
- s->async->events |= COMEDI_CB_EOA;
- break;
- }
+ if (cmd->stop_src == TRIG_COUNT &&
+ s->async->scans_done >= cmd->stop_arg) {
+ s->async->events |= COMEDI_CB_EOA;
+ break;
}
}
outb(0, dev->iobase + PCI171x_CLRINT); /* clear our INT request */
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
}
/*
@@ -799,8 +786,6 @@ static void pci1710_handle_every_sample(struct comedi_device *dev,
static int move_block_from_fifo(struct comedi_device *dev,
struct comedi_subdevice *s, int n, int turn)
{
- struct pci1710_private *devpriv = dev->private;
- struct comedi_cmd *cmd = &s->async->cmd;
unsigned int val;
int ret;
int i;
@@ -814,13 +799,8 @@ static int move_block_from_fifo(struct comedi_device *dev,
return ret;
}
- comedi_buf_put(s, val & s->maxdata);
-
- s->async->cur_chan++;
- if (s->async->cur_chan >= cmd->chanlist_len) {
- s->async->cur_chan = 0;
- devpriv->ai_act_scan++;
- }
+ val &= s->maxdata;
+ comedi_buf_write_samples(s, &val, 1);
}
return 0;
}
@@ -829,48 +809,47 @@ static void pci1710_handle_fifo(struct comedi_device *dev,
struct comedi_subdevice *s)
{
const struct boardtype *this_board = dev->board_ptr;
- struct pci1710_private *devpriv = dev->private;
struct comedi_cmd *cmd = &s->async->cmd;
- int m, samplesinbuf;
+ unsigned int nsamples;
+ unsigned int m;
m = inw(dev->iobase + PCI171x_STATUS);
if (!(m & Status_FH)) {
dev_dbg(dev->class_dev, "A/D FIFO not half full! (%4x)\n", m);
s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
return;
}
if (m & Status_FF) {
dev_dbg(dev->class_dev,
"A/D FIFO Full status (Fatal Error!) (%4x)\n", m);
s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
return;
}
- samplesinbuf = this_board->fifo_half_size;
- if (samplesinbuf * sizeof(short) >= s->async->prealloc_bufsz) {
- m = s->async->prealloc_bufsz / sizeof(short);
+ nsamples = this_board->fifo_half_size;
+ if (comedi_samples_to_bytes(s, nsamples) >= s->async->prealloc_bufsz) {
+ m = comedi_bytes_to_samples(s, s->async->prealloc_bufsz);
if (move_block_from_fifo(dev, s, m, 0))
return;
- samplesinbuf -= m;
+ nsamples -= m;
}
- if (samplesinbuf) {
- if (move_block_from_fifo(dev, s, samplesinbuf, 1))
+ if (nsamples) {
+ if (move_block_from_fifo(dev, s, nsamples, 1))
return;
}
if (cmd->stop_src == TRIG_COUNT &&
- devpriv->ai_act_scan >= cmd->stop_arg) {
- /* all data sampled */
+ s->async->scans_done >= cmd->stop_arg) {
s->async->events |= COMEDI_CB_EOA;
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
return;
}
outb(0, dev->iobase + PCI171x_CLRINT); /* clear our INT request */
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
}
/*
@@ -928,9 +907,6 @@ static int pci171x_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
outb(0, dev->iobase + PCI171x_CLRFIFO);
outb(0, dev->iobase + PCI171x_CLRINT);
- devpriv->ai_act_scan = 0;
- s->async->cur_chan = 0;
-
devpriv->CntrlReg &= Control_CNT0;
if ((cmd->flags & CMDF_WAKE_EOS) == 0)
devpriv->CntrlReg |= Control_ONEFH;
@@ -1208,7 +1184,7 @@ static int pci1710_auto_attach(struct comedi_device *dev,
if (this_board->n_dichan) {
s = &dev->subdevices[subdev];
s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_COMMON;
+ s->subdev_flags = SDF_READABLE;
s->n_chan = this_board->n_dichan;
s->maxdata = 1;
s->len_chanlist = this_board->n_dichan;
@@ -1220,7 +1196,7 @@ static int pci1710_auto_attach(struct comedi_device *dev,
if (this_board->n_dochan) {
s = &dev->subdevices[subdev];
s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE | SDF_GROUND | SDF_COMMON;
+ s->subdev_flags = SDF_WRITABLE;
s->n_chan = this_board->n_dochan;
s->maxdata = 1;
s->len_chanlist = this_board->n_dochan;
diff --git a/drivers/staging/comedi/drivers/adv_pci1723.c b/drivers/staging/comedi/drivers/adv_pci1723.c
index 1610e2b406f..65f854e1eb6 100644
--- a/drivers/staging/comedi/drivers/adv_pci1723.c
+++ b/drivers/staging/comedi/drivers/adv_pci1723.c
@@ -1,205 +1,124 @@
/*
- comedi/drivers/pci1723.c
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 2000 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
+ * adv_pci1723.c
+ * Comedi driver for the Advantech PCI-1723 card.
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 2000 David A. Schleef <ds@schleef.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
/*
-Driver: adv_pci1723
-Description: Advantech PCI-1723
-Author: yonggang <rsmgnu@gmail.com>, Ian Abbott <abbotti@mev.co.uk>
-Devices: [Advantech] PCI-1723 (adv_pci1723)
-Updated: Mon, 14 Apr 2008 15:12:56 +0100
-Status: works
-
-Configuration Options:
- [0] - PCI bus of device (optional)
- [1] - PCI slot of device (optional)
-
- If bus/slot is not specified, the first supported
- PCI device found will be used.
-
-Subdevice 0 is 8-channel AO, 16-bit, range +/- 10 V.
-
-Subdevice 1 is 16-channel DIO. The channels are configurable as input or
-output in 2 groups (0 to 7, 8 to 15). Configuring any channel implicitly
-configures all channels in the same group.
-
-TODO:
-
-1. Add the two milliamp ranges to the AO subdevice (0 to 20 mA, 4 to 20 mA).
-2. Read the initial ranges and values of the AO subdevice at start-up instead
- of reinitializing them.
-3. Implement calibration.
-*/
+ * Driver: adv_pci1723
+ * Description: Advantech PCI-1723
+ * Author: yonggang <rsmgnu@gmail.com>, Ian Abbott <abbotti@mev.co.uk>
+ * Devices: (Advantech) PCI-1723 [adv_pci1723]
+ * Updated: Mon, 14 Apr 2008 15:12:56 +0100
+ * Status: works
+ *
+ * Configuration Options: not applicable, uses comedi PCI auto config
+ *
+ * Subdevice 0 is 8-channel AO, 16-bit, range +/- 10 V.
+ *
+ * Subdevice 1 is 16-channel DIO. The channels are configurable as
+ * input or output in 2 groups (0 to 7, 8 to 15). Configuring any
+ * channel implicitly configures all channels in the same group.
+ *
+ * TODO:
+ * 1. Add the two milliamp ranges to the AO subdevice (0 to 20 mA,
+ * 4 to 20 mA).
+ * 2. Read the initial ranges and values of the AO subdevice at
+ * start-up instead of reinitializing them.
+ * 3. Implement calibration.
+ */
#include <linux/module.h>
#include <linux/pci.h>
#include "../comedidev.h"
-/* all the registers for the pci1723 board */
-#define PCI1723_DA(N) ((N)<<1) /* W: D/A register N (0 to 7) */
-
-#define PCI1723_SYN_SET 0x12 /* synchronized set register */
-#define PCI1723_ALL_CHNNELE_SYN_STROBE 0x12
- /* synchronized status register */
-
-#define PCI1723_RANGE_CALIBRATION_MODE 0x14
- /* range and calibration mode */
-#define PCI1723_RANGE_CALIBRATION_STATUS 0x14
- /* range and calibration status */
-
-#define PCI1723_CONTROL_CMD_CALIBRATION_FUN 0x16
- /*
- * SADC control command for
- * calibration function
- */
-#define PCI1723_STATUS_CMD_CALIBRATION_FUN 0x16
- /*
- * SADC control status for
- * calibration function
- */
-
-#define PCI1723_CALIBRATION_PARA_STROBE 0x18
- /* Calibration parameter strobe */
-
-#define PCI1723_DIGITAL_IO_PORT_SET 0x1A /* Digital I/O port setting */
-#define PCI1723_DIGITAL_IO_PORT_MODE 0x1A /* Digital I/O port mode */
-
-#define PCI1723_WRITE_DIGITAL_OUTPUT_CMD 0x1C
- /* Write digital output command */
-#define PCI1723_READ_DIGITAL_INPUT_DATA 0x1C /* Read digital input data */
-
-#define PCI1723_WRITE_CAL_CMD 0x1E /* Write calibration command */
-#define PCI1723_READ_CAL_STATUS 0x1E /* Read calibration status */
-
-#define PCI1723_SYN_STROBE 0x20 /* Synchronized strobe */
-
-#define PCI1723_RESET_ALL_CHN_STROBE 0x22
- /* Reset all D/A channels strobe */
-
-#define PCI1723_RESET_CAL_CONTROL_STROBE 0x24
- /*
- * Reset the calibration
- * controller strobe
- */
-
-#define PCI1723_CHANGE_CHA_OUTPUT_TYPE_STROBE 0x26
- /*
- * Change D/A channels output
- * type strobe
- */
-
-#define PCI1723_SELECT_CALIBRATION 0x28 /* Select the calibration Ref_V */
-
-struct pci1723_private {
- unsigned char da_range[8]; /* D/A output range for each channel */
- unsigned short ao_data[8]; /* data output buffer */
-};
-
/*
- * The pci1723 card reset;
+ * PCI Bar 2 I/O Register map (dev->iobase)
*/
-static int pci1723_reset(struct comedi_device *dev)
+#define PCI1723_AO_REG(x) (0x00 + ((x) * 2))
+#define PCI1723_BOARD_ID_REG 0x10
+#define PCI1723_BOARD_ID_MASK (0xf << 0)
+#define PCI1723_SYNC_CTRL_REG 0x12
+#define PCI1723_SYNC_CTRL_ASYNC (0 << 0)
+#define PCI1723_SYNC_CTRL_SYNC (1 << 0)
+#define PCI1723_CTRL_REG 0x14
+#define PCI1723_CTRL_BUSY (1 << 15)
+#define PCI1723_CTRL_INIT (1 << 14)
+#define PCI1723_CTRL_SELF (1 << 8)
+#define PCI1723_CTRL_IDX(x) (((x) & 0x3) << 6)
+#define PCI1723_CTRL_RANGE(x) (((x) & 0x3) << 4)
+#define PCI1723_CTRL_GAIN (0 << 3)
+#define PCI1723_CTRL_OFFSET (1 << 3)
+#define PCI1723_CTRL_CHAN(x) (((x) & 0x7) << 0)
+#define PCI1723_CALIB_CTRL_REG 0x16
+#define PCI1723_CALIB_CTRL_CS (1 << 2)
+#define PCI1723_CALIB_CTRL_DAT (1 << 1)
+#define PCI1723_CALIB_CTRL_CLK (1 << 0)
+#define PCI1723_CALIB_STROBE_REG 0x18
+#define PCI1723_DIO_CTRL_REG 0x1a
+#define PCI1723_DIO_CTRL_HDIO (1 << 1)
+#define PCI1723_DIO_CTRL_LDIO (1 << 0)
+#define PCI1723_DIO_DATA_REG 0x1c
+#define PCI1723_CALIB_DATA_REG 0x1e
+#define PCI1723_SYNC_STROBE_REG 0x20
+#define PCI1723_RESET_AO_STROBE_REG 0x22
+#define PCI1723_RESET_CALIB_STROBE_REG 0x24
+#define PCI1723_RANGE_STROBE_REG 0x26
+#define PCI1723_VREF_REG 0x28
+#define PCI1723_VREF_NEG10V (0 << 0)
+#define PCI1723_VREF_0V (1 << 0)
+#define PCI1723_VREF_POS10V (3 << 0)
+
+static int pci1723_ao_insn_write(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- struct pci1723_private *devpriv = dev->private;
+ unsigned int chan = CR_CHAN(insn->chanspec);
int i;
- outw(0x01, dev->iobase + PCI1723_SYN_SET);
- /* set synchronous output mode */
-
- for (i = 0; i < 8; i++) {
- /* set all outputs to 0V */
- devpriv->ao_data[i] = 0x8000;
- outw(devpriv->ao_data[i], dev->iobase + PCI1723_DA(i));
- /* set all ranges to +/- 10V */
- devpriv->da_range[i] = 0;
- outw(((devpriv->da_range[i] << 4) | i),
- PCI1723_RANGE_CALIBRATION_MODE);
- }
-
- outw(0, dev->iobase + PCI1723_CHANGE_CHA_OUTPUT_TYPE_STROBE);
- /* update ranges */
- outw(0, dev->iobase + PCI1723_SYN_STROBE); /* update outputs */
-
- /* set asynchronous output mode */
- outw(0, dev->iobase + PCI1723_SYN_SET);
-
- return 0;
-}
-
-static int pci1723_insn_read_ao(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- struct pci1723_private *devpriv = dev->private;
- int n, chan;
-
- chan = CR_CHAN(insn->chanspec);
- for (n = 0; n < insn->n; n++)
- data[n] = devpriv->ao_data[chan];
+ for (i = 0; i < insn->n; i++) {
+ unsigned int val = data[i];
- return n;
-}
-
-/*
- analog data output;
-*/
-static int pci1723_ao_write_winsn(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- struct pci1723_private *devpriv = dev->private;
- unsigned int chan = CR_CHAN(insn->chanspec);
- int n;
-
- for (n = 0; n < insn->n; n++) {
- devpriv->ao_data[chan] = data[n];
- outw(data[n], dev->iobase + PCI1723_DA(chan));
+ outw(val, dev->iobase + PCI1723_AO_REG(chan));
+ s->readback[chan] = val;
}
- return n;
+ return insn->n;
}
-/*
- digital i/o config/query
-*/
static int pci1723_dio_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int mask;
- unsigned short mode;
+ unsigned int mask = (chan < 8) ? 0x00ff : 0xff00;
+ unsigned short mode = 0x0000; /* assume output */
int ret;
- if (chan < 8)
- mask = 0x00ff;
- else
- mask = 0xff00;
-
ret = comedi_dio_insn_config(dev, s, insn, data, mask);
if (ret)
return ret;
- /* update hardware DIO mode */
- mode = 0x0000; /* assume output */
if (!(s->io_bits & 0x00ff))
- mode |= 0x0001; /* low byte input */
+ mode |= PCI1723_DIO_CTRL_LDIO; /* low byte input */
if (!(s->io_bits & 0xff00))
- mode |= 0x0002; /* high byte input */
- outw(mode, dev->iobase + PCI1723_DIGITAL_IO_PORT_SET);
+ mode |= PCI1723_DIO_CTRL_HDIO; /* high byte input */
+ outw(mode, dev->iobase + PCI1723_DIO_CTRL_REG);
return insn->n;
}
@@ -210,24 +129,21 @@ static int pci1723_dio_insn_bits(struct comedi_device *dev,
unsigned int *data)
{
if (comedi_dio_update_state(s, data))
- outw(s->state, dev->iobase + PCI1723_WRITE_DIGITAL_OUTPUT_CMD);
+ outw(s->state, dev->iobase + PCI1723_DIO_DATA_REG);
- data[1] = inw(dev->iobase + PCI1723_READ_DIGITAL_INPUT_DATA);
+ data[1] = inw(dev->iobase + PCI1723_DIO_DATA_REG);
return insn->n;
}
static int pci1723_auto_attach(struct comedi_device *dev,
- unsigned long context_unused)
+ unsigned long context_unused)
{
struct pci_dev *pcidev = comedi_to_pci_dev(dev);
- struct pci1723_private *devpriv;
struct comedi_subdevice *s;
+ unsigned int val;
int ret;
-
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
+ int i;
ret = comedi_pci_enable(dev);
if (ret)
@@ -239,61 +155,57 @@ static int pci1723_auto_attach(struct comedi_device *dev,
return ret;
s = &dev->subdevices[0];
- dev->write_subdev = s;
s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITEABLE | SDF_GROUND | SDF_COMMON;
+ s->subdev_flags = SDF_WRITABLE | SDF_GROUND | SDF_COMMON;
s->n_chan = 8;
s->maxdata = 0xffff;
- s->len_chanlist = 8;
s->range_table = &range_bipolar10;
- s->insn_write = pci1723_ao_write_winsn;
- s->insn_read = pci1723_insn_read_ao;
+ s->insn_write = pci1723_ao_insn_write;
+
+ ret = comedi_alloc_subdev_readback(s);
+ if (ret)
+ return ret;
+
+ /* synchronously reset all analog outputs to 0V, +/-10V range */
+ outw(PCI1723_SYNC_CTRL_SYNC, dev->iobase + PCI1723_SYNC_CTRL_REG);
+ for (i = 0; i < s->n_chan; i++) {
+ outw(PCI1723_CTRL_RANGE(0) | PCI1723_CTRL_CHAN(i),
+ PCI1723_CTRL_REG);
+ outw(0, dev->iobase + PCI1723_RANGE_STROBE_REG);
+
+ outw(0x8000, dev->iobase + PCI1723_AO_REG(i));
+ s->readback[i] = 0x8000;
+ }
+ outw(0, dev->iobase + PCI1723_SYNC_STROBE_REG);
+
+ /* disable syncronous control */
+ outw(PCI1723_SYNC_CTRL_ASYNC, dev->iobase + PCI1723_SYNC_CTRL_REG);
s = &dev->subdevices[1];
s->type = COMEDI_SUBD_DIO;
s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
s->n_chan = 16;
s->maxdata = 1;
- s->len_chanlist = 16;
s->range_table = &range_digital;
s->insn_config = pci1723_dio_insn_config;
s->insn_bits = pci1723_dio_insn_bits;
- /* read DIO config */
- switch (inw(dev->iobase + PCI1723_DIGITAL_IO_PORT_MODE) & 0x03) {
- case 0x00: /* low byte output, high byte output */
- s->io_bits = 0xFFFF;
- break;
- case 0x01: /* low byte input, high byte output */
- s->io_bits = 0xFF00;
- break;
- case 0x02: /* low byte output, high byte input */
- s->io_bits = 0x00FF;
- break;
- case 0x03: /* low byte input, high byte input */
- s->io_bits = 0x0000;
- break;
- }
- /* read DIO port state */
- s->state = inw(dev->iobase + PCI1723_READ_DIGITAL_INPUT_DATA);
-
- pci1723_reset(dev);
+ /* get initial DIO direction and state */
+ val = inw(dev->iobase + PCI1723_DIO_CTRL_REG);
+ if (!(val & PCI1723_DIO_CTRL_LDIO))
+ s->io_bits |= 0x00ff; /* low byte output */
+ if (!(val & PCI1723_DIO_CTRL_HDIO))
+ s->io_bits |= 0xff00; /* high byte output */
+ s->state = inw(dev->iobase + PCI1723_DIO_DATA_REG);
return 0;
}
-static void pci1723_detach(struct comedi_device *dev)
-{
- if (dev->iobase)
- pci1723_reset(dev);
- comedi_pci_detach(dev);
-}
-
static struct comedi_driver adv_pci1723_driver = {
.driver_name = "adv_pci1723",
.module = THIS_MODULE,
.auto_attach = pci1723_auto_attach,
- .detach = pci1723_detach,
+ .detach = comedi_pci_detach,
};
static int adv_pci1723_pci_probe(struct pci_dev *dev,
@@ -318,5 +230,5 @@ static struct pci_driver adv_pci1723_pci_driver = {
module_comedi_pci_driver(adv_pci1723_driver, adv_pci1723_pci_driver);
MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_DESCRIPTION("Advantech PCI-1723 Comedi driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/adv_pci1724.c b/drivers/staging/comedi/drivers/adv_pci1724.c
index 2697758b1ed..a8d28403262 100644
--- a/drivers/staging/comedi/drivers/adv_pci1724.c
+++ b/drivers/staging/comedi/drivers/adv_pci1724.c
@@ -1,122 +1,76 @@
/*
- comedi/drivers/adv_pci1724.c
- This is a driver for the Advantech PCI-1724U card.
-
- Author: Frank Mori Hess <fmh6jj@gmail.com>
- Copyright (C) 2013 GnuBIO Inc
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 1997-8 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
+ * adv_pci1724.c
+ * Comedi driver for the Advantech PCI-1724U card.
+ *
+ * Author: Frank Mori Hess <fmh6jj@gmail.com>
+ * Copyright (C) 2013 GnuBIO Inc
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 1997-8 David A. Schleef <ds@schleef.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
/*
-
-Driver: adv_1724
-Description: Advantech PCI-1724U
-Author: Frank Mori Hess <fmh6jj@gmail.com>
-Status: works
-Updated: 2013-02-09
-Devices: [Advantech] PCI-1724U (adv_pci1724)
-
-Subdevice 0 is the analog output.
-Subdevice 1 is the offset calibration for the analog output.
-Subdevice 2 is the gain calibration for the analog output.
-
-The calibration offset and gains have quite a large effect
-on the analog output, so it is possible to adjust the analog output to
-have an output range significantly different from the board's
-nominal output ranges. For a calibrated +/- 10V range, the analog
-output's offset will be set somewhere near mid-range (0x2000) and its
-gain will be near maximum (0x3fff).
-
-There is really no difference between the board's documented 0-20mA
-versus 4-20mA output ranges. To pick one or the other is simply a matter
-of adjusting the offset and gain calibration until the board outputs in
-the desired range.
-
-Configuration options:
- None
-
-Manual configuration of comedi devices is not supported by this driver;
-supported PCI devices are configured as comedi devices automatically.
-
-*/
+ * Driver: adv_pci1724
+ * Description: Advantech PCI-1724U
+ * Devices: (Advantech) PCI-1724U [adv_pci1724]
+ * Author: Frank Mori Hess <fmh6jj@gmail.com>
+ * Updated: 2013-02-09
+ * Status: works
+ *
+ * Configuration Options: not applicable, uses comedi PCI auto config
+ *
+ * Subdevice 0 is the analog output.
+ * Subdevice 1 is the offset calibration for the analog output.
+ * Subdevice 2 is the gain calibration for the analog output.
+ *
+ * The calibration offset and gains have quite a large effect on the
+ * analog output, so it is possible to adjust the analog output to
+ * have an output range significantly different from the board's
+ * nominal output ranges. For a calibrated +/-10V range, the analog
+ * output's offset will be set somewhere near mid-range (0x2000) and
+ * its gain will be near maximum (0x3fff).
+ *
+ * There is really no difference between the board's documented 0-20mA
+ * versus 4-20mA output ranges. To pick one or the other is simply a
+ * matter of adjusting the offset and gain calibration until the board
+ * outputs in the desired range.
+ */
#include <linux/module.h>
-#include <linux/delay.h>
#include <linux/pci.h>
#include "../comedidev.h"
-#define PCI_VENDOR_ID_ADVANTECH 0x13fe
-
-#define NUM_AO_CHANNELS 32
-
-/* register offsets */
-enum board_registers {
- DAC_CONTROL_REG = 0x0,
- SYNC_OUTPUT_REG = 0x4,
- EEPROM_CONTROL_REG = 0x8,
- SYNC_OUTPUT_TRIGGER_REG = 0xc,
- BOARD_ID_REG = 0x10
-};
-
-/* bit definitions for registers */
-enum dac_control_contents {
- DAC_DATA_MASK = 0x3fff,
- DAC_DESTINATION_MASK = 0xc000,
- DAC_NORMAL_MODE = 0xc000,
- DAC_OFFSET_MODE = 0x8000,
- DAC_GAIN_MODE = 0x4000,
- DAC_CHANNEL_SELECT_MASK = 0xf0000,
- DAC_GROUP_SELECT_MASK = 0xf00000
-};
-
-static uint32_t dac_data_bits(uint16_t dac_data)
-{
- return dac_data & DAC_DATA_MASK;
-}
-
-static uint32_t dac_channel_select_bits(unsigned channel)
-{
- return (channel << 16) & DAC_CHANNEL_SELECT_MASK;
-}
-
-static uint32_t dac_group_select_bits(unsigned group)
-{
- return (1 << (20 + group)) & DAC_GROUP_SELECT_MASK;
-}
-
-static uint32_t dac_channel_and_group_select_bits(unsigned comedi_channel)
-{
- return dac_channel_select_bits(comedi_channel % 8) |
- dac_group_select_bits(comedi_channel / 8);
-}
-
-enum sync_output_contents {
- SYNC_MODE = 0x1,
- DAC_BUSY = 0x2, /* dac state machine is not ready */
-};
-
-enum sync_output_trigger_contents {
- SYNC_TRIGGER_BITS = 0x0 /* any value works */
-};
-
-enum board_id_contents {
- BOARD_ID_MASK = 0xf
-};
-
-static const struct comedi_lrange ao_ranges_1724 = {
+/*
+ * PCI bar 2 Register I/O map (dev->iobase)
+ */
+#define PCI1724_DAC_CTRL_REG 0x00
+#define PCI1724_DAC_CTRL_GX(x) (1 << (20 + ((x) / 8)))
+#define PCI1724_DAC_CTRL_CX(x) (((x) % 8) << 16)
+#define PCI1724_DAC_CTRL_MODE_GAIN (1 << 14)
+#define PCI1724_DAC_CTRL_MODE_OFFSET (2 << 14)
+#define PCI1724_DAC_CTRL_MODE_NORMAL (3 << 14)
+#define PCI1724_DAC_CTRL_MODE_MASK (3 << 14)
+#define PCI1724_DAC_CTRL_DATA(x) (((x) & 0x3fff) << 0)
+#define PCI1724_SYNC_CTRL_REG 0x04
+#define PCI1724_SYNC_CTRL_DACSTAT (1 << 1)
+#define PCI1724_SYNC_CTRL_SYN (1 << 0)
+#define PCI1724_EEPROM_CTRL_REG 0x08
+#define PCI1724_SYNC_TRIG_REG 0x0c /* any value works */
+#define PCI1724_BOARD_ID_REG 0x10
+#define PCI1724_BOARD_ID_MASK (0xf << 0)
+
+static const struct comedi_lrange adv_pci1724_ao_ranges = {
4, {
BIP_RANGE(10),
RANGE_mA(0, 20),
@@ -125,254 +79,120 @@ static const struct comedi_lrange ao_ranges_1724 = {
}
};
-/* this structure is for data unique to this hardware driver. */
-struct adv_pci1724_private {
- int ao_value[NUM_AO_CHANNELS];
- int offset_value[NUM_AO_CHANNELS];
- int gain_value[NUM_AO_CHANNELS];
-};
-
-static int wait_for_dac_idle(struct comedi_device *dev)
-{
- static const int timeout = 10000;
- int i;
-
- for (i = 0; i < timeout; ++i) {
- if ((inl(dev->iobase + SYNC_OUTPUT_REG) & DAC_BUSY) == 0)
- break;
- udelay(1);
- }
- if (i == timeout) {
- dev_err(dev->class_dev,
- "Timed out waiting for dac to become idle\n");
- return -EIO;
- }
- return 0;
-}
-
-static int set_dac(struct comedi_device *dev, unsigned mode, unsigned channel,
- unsigned data)
-{
- int retval;
- unsigned control_bits;
-
- retval = wait_for_dac_idle(dev);
- if (retval < 0)
- return retval;
-
- control_bits = mode;
- control_bits |= dac_channel_and_group_select_bits(channel);
- control_bits |= dac_data_bits(data);
- outl(control_bits, dev->iobase + DAC_CONTROL_REG);
- return 0;
-}
-
-static int ao_winsn(struct comedi_device *dev, struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int adv_pci1724_dac_idle(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
{
- struct adv_pci1724_private *devpriv = dev->private;
- int channel = CR_CHAN(insn->chanspec);
- int retval;
- int i;
+ unsigned int status;
- /* turn off synchronous mode */
- outl(0, dev->iobase + SYNC_OUTPUT_REG);
-
- for (i = 0; i < insn->n; ++i) {
- retval = set_dac(dev, DAC_NORMAL_MODE, channel, data[i]);
- if (retval < 0)
- return retval;
- devpriv->ao_value[channel] = data[i];
- }
- return insn->n;
+ status = inl(dev->iobase + PCI1724_SYNC_CTRL_REG);
+ if ((status & PCI1724_SYNC_CTRL_DACSTAT) == 0)
+ return 0;
+ return -EBUSY;
}
-static int ao_readback_insn(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int adv_pci1724_insn_write(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- struct adv_pci1724_private *devpriv = dev->private;
- int channel = CR_CHAN(insn->chanspec);
- int i;
-
- if (devpriv->ao_value[channel] < 0) {
- dev_err(dev->class_dev,
- "Cannot read back channels which have not yet been written to\n");
- return -EIO;
- }
- for (i = 0; i < insn->n; i++)
- data[i] = devpriv->ao_value[channel];
-
- return insn->n;
-}
-
-static int offset_write_insn(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- struct adv_pci1724_private *devpriv = dev->private;
- int channel = CR_CHAN(insn->chanspec);
- int retval;
- int i;
-
- /* turn off synchronous mode */
- outl(0, dev->iobase + SYNC_OUTPUT_REG);
-
- for (i = 0; i < insn->n; ++i) {
- retval = set_dac(dev, DAC_OFFSET_MODE, channel, data[i]);
- if (retval < 0)
- return retval;
- devpriv->offset_value[channel] = data[i];
- }
-
- return insn->n;
-}
-
-static int offset_read_insn(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- struct adv_pci1724_private *devpriv = dev->private;
- unsigned int channel = CR_CHAN(insn->chanspec);
+ unsigned long mode = (unsigned long)s->private;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int ctrl;
+ int ret;
int i;
- if (devpriv->offset_value[channel] < 0) {
- dev_err(dev->class_dev,
- "Cannot read back channels which have not yet been written to\n");
- return -EIO;
- }
- for (i = 0; i < insn->n; i++)
- data[i] = devpriv->offset_value[channel];
-
- return insn->n;
-}
-
-static int gain_write_insn(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- struct adv_pci1724_private *devpriv = dev->private;
- int channel = CR_CHAN(insn->chanspec);
- int retval;
- int i;
+ ctrl = PCI1724_DAC_CTRL_GX(chan) | PCI1724_DAC_CTRL_CX(chan) | mode;
/* turn off synchronous mode */
- outl(0, dev->iobase + SYNC_OUTPUT_REG);
+ outl(0, dev->iobase + PCI1724_SYNC_CTRL_REG);
for (i = 0; i < insn->n; ++i) {
- retval = set_dac(dev, DAC_GAIN_MODE, channel, data[i]);
- if (retval < 0)
- return retval;
- devpriv->gain_value[channel] = data[i];
- }
+ unsigned int val = data[i];
- return insn->n;
-}
+ ret = comedi_timeout(dev, s, insn, adv_pci1724_dac_idle, 0);
+ if (ret)
+ return ret;
-static int gain_read_insn(struct comedi_device *dev,
- struct comedi_subdevice *s, struct comedi_insn *insn,
- unsigned int *data)
-{
- struct adv_pci1724_private *devpriv = dev->private;
- unsigned int channel = CR_CHAN(insn->chanspec);
- int i;
+ outl(ctrl | PCI1724_DAC_CTRL_DATA(val),
+ dev->iobase + PCI1724_DAC_CTRL_REG);
- if (devpriv->gain_value[channel] < 0) {
- dev_err(dev->class_dev,
- "Cannot read back channels which have not yet been written to\n");
- return -EIO;
+ s->readback[chan] = val;
}
- for (i = 0; i < insn->n; i++)
- data[i] = devpriv->gain_value[channel];
return insn->n;
}
-/* Allocate and initialize the subdevice structures.
- */
-static int setup_subdevices(struct comedi_device *dev)
+static int adv_pci1724_auto_attach(struct comedi_device *dev,
+ unsigned long context_unused)
{
+ struct pci_dev *pcidev = comedi_to_pci_dev(dev);
struct comedi_subdevice *s;
+ unsigned int board_id;
int ret;
+ ret = comedi_pci_enable(dev);
+ if (ret)
+ return ret;
+
+ dev->iobase = pci_resource_start(pcidev, 2);
+ board_id = inl(dev->iobase + PCI1724_BOARD_ID_REG);
+ dev_info(dev->class_dev, "board id: %d\n",
+ board_id & PCI1724_BOARD_ID_MASK);
+
ret = comedi_alloc_subdevices(dev, 3);
if (ret)
return ret;
- /* analog output subdevice */
+ /* Analog Output subdevice */
s = &dev->subdevices[0];
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_GROUND;
- s->n_chan = NUM_AO_CHANNELS;
- s->maxdata = 0x3fff;
- s->range_table = &ao_ranges_1724;
- s->insn_read = ao_readback_insn;
- s->insn_write = ao_winsn;
+ s->type = COMEDI_SUBD_AO;
+ s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_GROUND;
+ s->n_chan = 32;
+ s->maxdata = 0x3fff;
+ s->range_table = &adv_pci1724_ao_ranges;
+ s->insn_write = adv_pci1724_insn_write;
+ s->private = (void *)PCI1724_DAC_CTRL_MODE_NORMAL;
+
+ ret = comedi_alloc_subdev_readback(s);
+ if (ret)
+ return ret;
- /* offset calibration */
+ /* Offset Calibration subdevice */
s = &dev->subdevices[1];
- s->type = COMEDI_SUBD_CALIB;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_INTERNAL;
- s->n_chan = NUM_AO_CHANNELS;
- s->insn_read = offset_read_insn;
- s->insn_write = offset_write_insn;
- s->maxdata = 0x3fff;
+ s->type = COMEDI_SUBD_CALIB;
+ s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_INTERNAL;
+ s->n_chan = 32;
+ s->maxdata = 0x3fff;
+ s->insn_write = adv_pci1724_insn_write;
+ s->private = (void *)PCI1724_DAC_CTRL_MODE_OFFSET;
+
+ ret = comedi_alloc_subdev_readback(s);
+ if (ret)
+ return ret;
- /* gain calibration */
+ /* Gain Calibration subdevice */
s = &dev->subdevices[2];
- s->type = COMEDI_SUBD_CALIB;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_INTERNAL;
- s->n_chan = NUM_AO_CHANNELS;
- s->insn_read = gain_read_insn;
- s->insn_write = gain_write_insn;
- s->maxdata = 0x3fff;
-
- return 0;
-}
-
-static int adv_pci1724_auto_attach(struct comedi_device *dev,
- unsigned long context_unused)
-{
- struct pci_dev *pcidev = comedi_to_pci_dev(dev);
- struct adv_pci1724_private *devpriv;
- int i;
- int retval;
- unsigned int board_id;
-
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
- /* init software copies of output values to indicate we don't know
- * what the output value is since it has never been written. */
- for (i = 0; i < NUM_AO_CHANNELS; ++i) {
- devpriv->ao_value[i] = -1;
- devpriv->offset_value[i] = -1;
- devpriv->gain_value[i] = -1;
- }
-
- retval = comedi_pci_enable(dev);
- if (retval)
- return retval;
-
- dev->iobase = pci_resource_start(pcidev, 2);
- board_id = inl(dev->iobase + BOARD_ID_REG) & BOARD_ID_MASK;
- dev_info(dev->class_dev, "board id: %d\n", board_id);
-
- retval = setup_subdevices(dev);
- if (retval < 0)
- return retval;
+ s->type = COMEDI_SUBD_CALIB;
+ s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_INTERNAL;
+ s->n_chan = 32;
+ s->maxdata = 0x3fff;
+ s->insn_write = adv_pci1724_insn_write;
+ s->private = (void *)PCI1724_DAC_CTRL_MODE_GAIN;
+
+ ret = comedi_alloc_subdev_readback(s);
+ if (ret)
+ return ret;
- dev_info(dev->class_dev, "%s (pci %s) attached, board id: %u\n",
- dev->board_name, pci_name(pcidev), board_id);
return 0;
}
static struct comedi_driver adv_pci1724_driver = {
- .driver_name = "adv_pci1724",
- .module = THIS_MODULE,
- .auto_attach = adv_pci1724_auto_attach,
- .detach = comedi_pci_detach,
+ .driver_name = "adv_pci1724",
+ .module = THIS_MODULE,
+ .auto_attach = adv_pci1724_auto_attach,
+ .detach = comedi_pci_detach,
};
static int adv_pci1724_pci_probe(struct pci_dev *dev,
@@ -389,12 +209,11 @@ static const struct pci_device_id adv_pci1724_pci_table[] = {
MODULE_DEVICE_TABLE(pci, adv_pci1724_pci_table);
static struct pci_driver adv_pci1724_pci_driver = {
- .name = "adv_pci1724",
- .id_table = adv_pci1724_pci_table,
- .probe = adv_pci1724_pci_probe,
- .remove = comedi_pci_auto_unconfig,
+ .name = "adv_pci1724",
+ .id_table = adv_pci1724_pci_table,
+ .probe = adv_pci1724_pci_probe,
+ .remove = comedi_pci_auto_unconfig,
};
-
module_comedi_pci_driver(adv_pci1724_driver, adv_pci1724_pci_driver);
MODULE_AUTHOR("Frank Mori Hess <fmh6jj@gmail.com>");
diff --git a/drivers/staging/comedi/drivers/adv_pci_dio.c b/drivers/staging/comedi/drivers/adv_pci_dio.c
index f2e2d7e163b..09609d6d02d 100644
--- a/drivers/staging/comedi/drivers/adv_pci_dio.c
+++ b/drivers/staging/comedi/drivers/adv_pci_dio.c
@@ -930,7 +930,7 @@ static int pci1760_attach(struct comedi_device *dev)
s = &dev->subdevices[0];
s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_COMMON;
+ s->subdev_flags = SDF_READABLE;
s->n_chan = 8;
s->maxdata = 1;
s->len_chanlist = 8;
@@ -939,7 +939,7 @@ static int pci1760_attach(struct comedi_device *dev)
s = &dev->subdevices[1];
s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE | SDF_GROUND | SDF_COMMON;
+ s->subdev_flags = SDF_WRITABLE;
s->n_chan = 8;
s->maxdata = 1;
s->len_chanlist = 8;
@@ -978,7 +978,7 @@ static int pci_dio_add_di(struct comedi_device *dev,
const struct dio_boardtype *this_board = dev->board_ptr;
s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_COMMON | d->specflags;
+ s->subdev_flags = SDF_READABLE | d->specflags;
if (d->chans > 16)
s->subdev_flags |= SDF_LSAMPL;
s->n_chan = d->chans;
@@ -1008,7 +1008,7 @@ static int pci_dio_add_do(struct comedi_device *dev,
const struct dio_boardtype *this_board = dev->board_ptr;
s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE | SDF_GROUND | SDF_COMMON;
+ s->subdev_flags = SDF_WRITABLE;
if (d->chans > 16)
s->subdev_flags |= SDF_LSAMPL;
s->n_chan = d->chans;
diff --git a/drivers/staging/comedi/drivers/aio_aio12_8.c b/drivers/staging/comedi/drivers/aio_aio12_8.c
index 538277a691b..fbc3e5aa94c 100644
--- a/drivers/staging/comedi/drivers/aio_aio12_8.c
+++ b/drivers/staging/comedi/drivers/aio_aio12_8.c
@@ -212,7 +212,6 @@ static int aio_aio12_8_attach(struct comedi_device *dev,
s->maxdata = 0x0fff;
s->range_table = &range_aio_aio12_8;
s->insn_write = aio_aio12_8_ao_insn_write;
- s->insn_read = comedi_readback_insn_read;
ret = comedi_alloc_subdev_readback(s);
if (ret)
diff --git a/drivers/staging/comedi/drivers/amcc_s5933.h b/drivers/staging/comedi/drivers/amcc_s5933.h
index cf6a497b092..d4b8c0195bd 100644
--- a/drivers/staging/comedi/drivers/amcc_s5933.h
+++ b/drivers/staging/comedi/drivers/amcc_s5933.h
@@ -110,6 +110,8 @@
#define AGCSTS_TCZERO_MASK 0x000000c0
#define AGCSTS_FIFO_ST_MASK 0x0000003f
+#define AGCSTS_TC_ENABLE 0x10000000
+
#define AGCSTS_RESET_MBFLAGS 0x08000000
#define AGCSTS_RESET_P2A_FIFO 0x04000000
#define AGCSTS_RESET_A2P_FIFO 0x02000000
diff --git a/drivers/staging/comedi/drivers/amplc_dio200_common.c b/drivers/staging/comedi/drivers/amplc_dio200_common.c
index 2c1bfb09601..26aad705aad 100644
--- a/drivers/staging/comedi/drivers/amplc_dio200_common.c
+++ b/drivers/staging/comedi/drivers/amplc_dio200_common.c
@@ -120,7 +120,6 @@ struct dio200_subdev_intr {
unsigned int ofs;
unsigned int valid_isns;
unsigned int enabled_isns;
- unsigned int stopcount;
bool active:1;
};
@@ -256,7 +255,6 @@ static void dio200_read_scan_intr(struct comedi_device *dev,
struct comedi_subdevice *s,
unsigned int triggered)
{
- struct dio200_subdev_intr *subpriv = s->private;
struct comedi_cmd *cmd = &s->async->cmd;
unsigned short val;
unsigned int n, ch;
@@ -267,26 +265,12 @@ static void dio200_read_scan_intr(struct comedi_device *dev,
if (triggered & (1U << ch))
val |= (1U << n);
}
- /* Write the scan to the buffer. */
- if (comedi_buf_put(s, val)) {
- s->async->events |= (COMEDI_CB_BLOCK | COMEDI_CB_EOS);
- } else {
- /* Error! Stop acquisition. */
- dio200_stop_intr(dev, s);
- s->async->events |= COMEDI_CB_ERROR | COMEDI_CB_OVERFLOW;
- dev_err(dev->class_dev, "buffer overflow\n");
- }
- /* Check for end of acquisition. */
- if (cmd->stop_src == TRIG_COUNT) {
- if (subpriv->stopcount > 0) {
- subpriv->stopcount--;
- if (subpriv->stopcount == 0) {
- s->async->events |= COMEDI_CB_EOA;
- dio200_stop_intr(dev, s);
- }
- }
- }
+ comedi_buf_write_samples(s, &val, 1);
+
+ if (cmd->stop_src == TRIG_COUNT &&
+ s->async->scans_done >= cmd->stop_arg)
+ s->async->events |= COMEDI_CB_EOA;
}
static int dio200_handle_read_intr(struct comedi_device *dev,
@@ -297,13 +281,11 @@ static int dio200_handle_read_intr(struct comedi_device *dev,
unsigned triggered;
unsigned intstat;
unsigned cur_enabled;
- unsigned int oldevents;
unsigned long flags;
triggered = 0;
spin_lock_irqsave(&subpriv->spinlock, flags);
- oldevents = s->async->events;
if (board->has_int_sce) {
/*
* Collect interrupt sources that have triggered and disable
@@ -356,8 +338,7 @@ static int dio200_handle_read_intr(struct comedi_device *dev,
}
spin_unlock_irqrestore(&subpriv->spinlock, flags);
- if (oldevents != s->async->events)
- comedi_event(dev, s);
+ comedi_handle_events(dev, s);
return (triggered != 0);
}
@@ -436,7 +417,6 @@ static int dio200_subdev_intr_cmd(struct comedi_device *dev,
spin_lock_irqsave(&subpriv->spinlock, flags);
subpriv->active = true;
- subpriv->stopcount = cmd->stop_arg;
if (cmd->start_src == TRIG_INT)
s->async->inttrig = dio200_inttrig_start_intr;
@@ -469,7 +449,7 @@ static int dio200_subdev_intr_init(struct comedi_device *dev,
dio200_write8(dev, subpriv->ofs, 0);
s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE | SDF_CMD_READ;
+ s->subdev_flags = SDF_READABLE | SDF_CMD_READ | SDF_PACKED;
if (board->has_int_sce) {
s->n_chan = DIO200_MAX_ISNS;
s->len_chanlist = DIO200_MAX_ISNS;
diff --git a/drivers/staging/comedi/drivers/amplc_pc236_common.c b/drivers/staging/comedi/drivers/amplc_pc236_common.c
index 963c5d868b8..be87172d1f3 100644
--- a/drivers/staging/comedi/drivers/amplc_pc236_common.c
+++ b/drivers/staging/comedi/drivers/amplc_pc236_common.c
@@ -135,9 +135,8 @@ static irqreturn_t pc236_interrupt(int irq, void *d)
handled = pc236_intr_check(dev);
if (dev->attached && handled) {
- comedi_buf_put(s, 0);
- s->async->events |= COMEDI_CB_BLOCK | COMEDI_CB_EOS;
- comedi_event(dev, s);
+ comedi_buf_write_samples(s, &s->state, 1);
+ comedi_handle_events(dev, s);
}
return IRQ_RETVAL(handled);
}
diff --git a/drivers/staging/comedi/drivers/amplc_pc263.c b/drivers/staging/comedi/drivers/amplc_pc263.c
index f8e551d8fd9..b1946ce6ecc 100644
--- a/drivers/staging/comedi/drivers/amplc_pc263.c
+++ b/drivers/staging/comedi/drivers/amplc_pc263.c
@@ -83,7 +83,7 @@ static int pc263_attach(struct comedi_device *dev, struct comedi_devconfig *it)
s = &dev->subdevices[0];
/* digital output subdevice */
s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
+ s->subdev_flags = SDF_WRITABLE;
s->n_chan = 16;
s->maxdata = 1;
s->range_table = &range_digital;
diff --git a/drivers/staging/comedi/drivers/amplc_pci224.c b/drivers/staging/comedi/drivers/amplc_pci224.c
index 3bbbb57f19d..924c8298c7a 100644
--- a/drivers/staging/comedi/drivers/amplc_pci224.c
+++ b/drivers/staging/comedi/drivers/amplc_pci224.c
@@ -381,7 +381,6 @@ struct pci224_private {
unsigned short daccon;
unsigned int cached_div1;
unsigned int cached_div2;
- unsigned int ao_stop_count;
unsigned short ao_enab; /* max 16 channels so 'short' will do */
unsigned char intsce;
};
@@ -514,30 +513,21 @@ static void pci224_ao_handle_fifo(struct comedi_device *dev,
{
struct pci224_private *devpriv = dev->private;
struct comedi_cmd *cmd = &s->async->cmd;
- unsigned int bytes_per_scan = cfc_bytes_per_scan(s);
- unsigned int num_scans;
+ unsigned int num_scans = comedi_nscans_left(s, 0);
unsigned int room;
unsigned short dacstat;
unsigned int i, n;
- /* Determine number of scans available in buffer. */
- num_scans = comedi_buf_read_n_available(s) / bytes_per_scan;
- if (cmd->stop_src == TRIG_COUNT) {
- /* Fixed number of scans. */
- if (num_scans > devpriv->ao_stop_count)
- num_scans = devpriv->ao_stop_count;
- }
-
/* Determine how much room is in the FIFO (in samples). */
dacstat = inw(dev->iobase + PCI224_DACCON);
switch (dacstat & PCI224_DACCON_FIFOFL_MASK) {
case PCI224_DACCON_FIFOFL_EMPTY:
room = PCI224_FIFO_ROOM_EMPTY;
if (cmd->stop_src == TRIG_COUNT &&
- devpriv->ao_stop_count == 0) {
+ s->async->scans_done >= cmd->stop_arg) {
/* FIFO empty at end of counted acquisition. */
s->async->events |= COMEDI_CB_EOA;
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
return;
}
break;
@@ -568,25 +558,23 @@ static void pci224_ao_handle_fifo(struct comedi_device *dev,
/* Process scans. */
for (n = 0; n < num_scans; n++) {
- cfc_read_array_from_buffer(s, &devpriv->ao_scan_vals[0],
- bytes_per_scan);
+ comedi_buf_read_samples(s, &devpriv->ao_scan_vals[0],
+ cmd->chanlist_len);
for (i = 0; i < cmd->chanlist_len; i++) {
outw(devpriv->ao_scan_vals[devpriv->ao_scan_order[i]],
dev->iobase + PCI224_DACDATA);
}
}
- if (cmd->stop_src == TRIG_COUNT) {
- devpriv->ao_stop_count -= num_scans;
- if (devpriv->ao_stop_count == 0) {
- /*
- * Change FIFO interrupt trigger level to wait
- * until FIFO is empty.
- */
- devpriv->daccon = COMBINE(devpriv->daccon,
- PCI224_DACCON_FIFOINTR_EMPTY,
- PCI224_DACCON_FIFOINTR_MASK);
- outw(devpriv->daccon, dev->iobase + PCI224_DACCON);
- }
+ if (cmd->stop_src == TRIG_COUNT &&
+ s->async->scans_done >= cmd->stop_arg) {
+ /*
+ * Change FIFO interrupt trigger level to wait
+ * until FIFO is empty.
+ */
+ devpriv->daccon = COMBINE(devpriv->daccon,
+ PCI224_DACCON_FIFOINTR_EMPTY,
+ PCI224_DACCON_FIFOINTR_MASK);
+ outw(devpriv->daccon, dev->iobase + PCI224_DACCON);
}
if ((devpriv->daccon & PCI224_DACCON_TRIG_MASK) ==
PCI224_DACCON_TRIG_NONE) {
@@ -618,7 +606,7 @@ static void pci224_ao_handle_fifo(struct comedi_device *dev,
outw(devpriv->daccon, dev->iobase + PCI224_DACCON);
}
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
}
static int pci224_ao_inttrig_start(struct comedi_device *dev,
@@ -908,14 +896,6 @@ static int pci224_ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
if (cmd->scan_begin_src == TRIG_TIMER)
pci224_ao_start_pacer(dev, s);
- /*
- * Sort out end of acquisition.
- */
- if (cmd->stop_src == TRIG_COUNT)
- devpriv->ao_stop_count = cmd->stop_arg;
- else /* TRIG_EXT | TRIG_NONE */
- devpriv->ao_stop_count = 0;
-
spin_lock_irqsave(&devpriv->ao_spinlock, flags);
if (cmd->start_src == TRIG_INT) {
s->async->inttrig = pci224_ao_inttrig_start;
@@ -1095,7 +1075,6 @@ pci224_auto_attach(struct comedi_device *dev, unsigned long context_model)
s->maxdata = (1 << thisboard->ao_bits) - 1;
s->range_table = thisboard->ao_range;
s->insn_write = pci224_ao_insn_write;
- s->insn_read = comedi_readback_insn_read;
s->len_chanlist = s->n_chan;
dev->write_subdev = s;
s->do_cmd = pci224_ao_cmd;
diff --git a/drivers/staging/comedi/drivers/amplc_pci230.c b/drivers/staging/comedi/drivers/amplc_pci230.c
index 01796cd28e5..49806a5e514 100644
--- a/drivers/staging/comedi/drivers/amplc_pci230.c
+++ b/drivers/staging/comedi/drivers/amplc_pci230.c
@@ -35,7 +35,7 @@
* automatically.
*
* The PCI230+ and PCI260+ have the same PCI device IDs as the PCI230 and
- * PCI260, but can be distinguished by the the size of the PCI regions. A
+ * PCI260, but can be distinguished by the size of the PCI regions. A
* card will be configured as a "+" model if detected as such.
*
* Subdevices:
@@ -490,9 +490,6 @@ struct pci230_private {
spinlock_t ai_stop_spinlock; /* Spin lock for stopping AI command */
spinlock_t ao_stop_spinlock; /* Spin lock for stopping AO command */
unsigned long daqio; /* PCI230's DAQ I/O space */
- unsigned int ai_scan_count; /* Number of AI scans remaining */
- unsigned int ai_scan_pos; /* Current position within AI scan */
- unsigned int ao_scan_count; /* Number of AO scans remaining. */
int intr_cpuid; /* ID of CPU running ISR */
unsigned short hwver; /* Hardware version (for '+' models) */
unsigned short adccon; /* ADCCON register value */
@@ -1074,37 +1071,27 @@ static void pci230_ao_stop(struct comedi_device *dev,
static void pci230_handle_ao_nofifo(struct comedi_device *dev,
struct comedi_subdevice *s)
{
- struct pci230_private *devpriv = dev->private;
- unsigned short data;
- int i, ret;
struct comedi_async *async = s->async;
struct comedi_cmd *cmd = &async->cmd;
+ unsigned short data;
+ int i;
- if (cmd->stop_src == TRIG_COUNT && devpriv->ao_scan_count == 0)
+ if (cmd->stop_src == TRIG_COUNT && async->scans_done >= cmd->stop_arg)
return;
+
for (i = 0; i < cmd->chanlist_len; i++) {
unsigned int chan = CR_CHAN(cmd->chanlist[i]);
- /* Read sample from Comedi's circular buffer. */
- ret = comedi_buf_get(s, &data);
- if (ret == 0) {
- s->async->events |= COMEDI_CB_OVERFLOW;
- pci230_ao_stop(dev, s);
- dev_err(dev->class_dev, "AO buffer underrun\n");
+ if (!comedi_buf_read_samples(s, &data, 1)) {
+ async->events |= COMEDI_CB_OVERFLOW;
return;
}
pci230_ao_write_nofifo(dev, data, chan);
s->readback[chan] = data;
}
- async->events |= COMEDI_CB_BLOCK | COMEDI_CB_EOS;
- if (cmd->stop_src == TRIG_COUNT) {
- devpriv->ao_scan_count--;
- if (devpriv->ao_scan_count == 0) {
- /* End of acquisition. */
- async->events |= COMEDI_CB_EOA;
- pci230_ao_stop(dev, s);
- }
- }
+
+ if (cmd->stop_src == TRIG_COUNT && async->scans_done >= cmd->stop_arg)
+ async->events |= COMEDI_CB_EOA;
}
/*
@@ -1117,26 +1104,18 @@ static bool pci230_handle_ao_fifo(struct comedi_device *dev,
struct pci230_private *devpriv = dev->private;
struct comedi_async *async = s->async;
struct comedi_cmd *cmd = &async->cmd;
- unsigned int num_scans;
+ unsigned int num_scans = comedi_nscans_left(s, 0);
unsigned int room;
unsigned short dacstat;
unsigned int i, n;
unsigned int events = 0;
- bool running;
/* Get DAC FIFO status. */
dacstat = inw(devpriv->daqio + PCI230_DACCON);
- /* Determine number of scans available in buffer. */
- num_scans = comedi_buf_read_n_available(s) / cfc_bytes_per_scan(s);
- if (cmd->stop_src == TRIG_COUNT) {
- /* Fixed number of scans. */
- if (num_scans > devpriv->ao_scan_count)
- num_scans = devpriv->ao_scan_count;
- if (devpriv->ao_scan_count == 0) {
- /* End of acquisition. */
- events |= COMEDI_CB_EOA;
- }
- }
+
+ if (cmd->stop_src == TRIG_COUNT && num_scans == 0)
+ events |= COMEDI_CB_EOA;
+
if (events == 0) {
/* Check for FIFO underrun. */
if (dacstat & PCI230P2_DAC_FIFO_UNDERRUN_LATCHED) {
@@ -1175,27 +1154,22 @@ static bool pci230_handle_ao_fifo(struct comedi_device *dev,
unsigned int chan = CR_CHAN(cmd->chanlist[i]);
unsigned short datum;
- comedi_buf_get(s, &datum);
+ comedi_buf_read_samples(s, &datum, 1);
pci230_ao_write_fifo(dev, datum, chan);
s->readback[chan] = datum;
}
}
- events |= COMEDI_CB_EOS | COMEDI_CB_BLOCK;
- if (cmd->stop_src == TRIG_COUNT) {
- devpriv->ao_scan_count -= num_scans;
- if (devpriv->ao_scan_count == 0) {
- /*
- * All data for the command has been written
- * to FIFO. Set FIFO interrupt trigger level
- * to 'empty'.
- */
- devpriv->daccon =
- (devpriv->daccon &
- ~PCI230P2_DAC_INT_FIFO_MASK) |
- PCI230P2_DAC_INT_FIFO_EMPTY;
- outw(devpriv->daccon,
- devpriv->daqio + PCI230_DACCON);
- }
+
+ if (cmd->stop_src == TRIG_COUNT &&
+ async->scans_done >= cmd->stop_arg) {
+ /*
+ * All data for the command has been written
+ * to FIFO. Set FIFO interrupt trigger level
+ * to 'empty'.
+ */
+ devpriv->daccon &= ~PCI230P2_DAC_INT_FIFO_MASK;
+ devpriv->daccon |= PCI230P2_DAC_INT_FIFO_EMPTY;
+ outw(devpriv->daccon, devpriv->daqio + PCI230_DACCON);
}
/* Check if FIFO underrun occurred while writing to FIFO. */
dacstat = inw(devpriv->daqio + PCI230_DACCON);
@@ -1204,15 +1178,8 @@ static bool pci230_handle_ao_fifo(struct comedi_device *dev,
events |= COMEDI_CB_OVERFLOW | COMEDI_CB_ERROR;
}
}
- if (events & (COMEDI_CB_EOA | COMEDI_CB_ERROR | COMEDI_CB_OVERFLOW)) {
- /* Stopping AO due to completion or error. */
- pci230_ao_stop(dev, s);
- running = false;
- } else {
- running = true;
- }
async->events |= events;
- return running;
+ return !(async->events & COMEDI_CB_CANCEL_MASK);
}
static int pci230_ao_inttrig_scan_begin(struct comedi_device *dev,
@@ -1235,7 +1202,7 @@ static int pci230_ao_inttrig_scan_begin(struct comedi_device *dev,
/* Not using DAC FIFO. */
spin_unlock_irqrestore(&devpriv->ao_stop_spinlock, irqflags);
pci230_handle_ao_nofifo(dev, s);
- comedi_event(dev, s);
+ comedi_handle_events(dev, s);
} else {
/* Using DAC FIFO. */
/* Read DACSWTRIG register to trigger conversion. */
@@ -1265,7 +1232,7 @@ static void pci230_ao_start(struct comedi_device *dev,
/* Preload FIFO data. */
run = pci230_handle_ao_fifo(dev, s);
- comedi_event(dev, s);
+ comedi_handle_events(dev, s);
if (!run) {
/* Stopped. */
return;
@@ -1354,8 +1321,6 @@ static int pci230_ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
return -EBUSY;
}
- devpriv->ao_scan_count = cmd->stop_arg;
-
/*
* Set range - see analogue output range table; 0 => unipolar 10V,
* 1 => bipolar +/-10V range scale
@@ -1754,19 +1719,15 @@ static void pci230_ai_update_fifo_trigger_level(struct comedi_device *dev,
{
struct pci230_private *devpriv = dev->private;
struct comedi_cmd *cmd = &s->async->cmd;
- unsigned int scanlen = cmd->scan_end_arg;
unsigned int wake;
unsigned short triglev;
unsigned short adccon;
if (cmd->flags & CMDF_WAKE_EOS)
- wake = scanlen - devpriv->ai_scan_pos;
- else if (cmd->stop_src != TRIG_COUNT ||
- devpriv->ai_scan_count >= PCI230_ADC_FIFOLEVEL_HALFFULL ||
- scanlen >= PCI230_ADC_FIFOLEVEL_HALFFULL)
- wake = PCI230_ADC_FIFOLEVEL_HALFFULL;
+ wake = cmd->scan_end_arg - s->async->cur_chan;
else
- wake = devpriv->ai_scan_count * scanlen - devpriv->ai_scan_pos;
+ wake = comedi_nsamples_left(s, PCI230_ADC_FIFOLEVEL_HALFFULL);
+
if (wake >= PCI230_ADC_FIFOLEVEL_HALFFULL) {
triglev = PCI230_ADC_INT_FIFO_HALF;
} else if (wake > 1 && devpriv->hwver > 0) {
@@ -2059,28 +2020,17 @@ static void pci230_handle_ai(struct comedi_device *dev,
struct pci230_private *devpriv = dev->private;
struct comedi_async *async = s->async;
struct comedi_cmd *cmd = &async->cmd;
- unsigned int scanlen = cmd->scan_end_arg;
- unsigned int events = 0;
unsigned int status_fifo;
unsigned int i;
unsigned int todo;
unsigned int fifoamount;
+ unsigned short val;
/* Determine number of samples to read. */
- if (cmd->stop_src != TRIG_COUNT) {
- todo = PCI230_ADC_FIFOLEVEL_HALFFULL;
- } else if (devpriv->ai_scan_count == 0) {
- todo = 0;
- } else if (devpriv->ai_scan_count > PCI230_ADC_FIFOLEVEL_HALFFULL ||
- scanlen > PCI230_ADC_FIFOLEVEL_HALFFULL) {
- todo = PCI230_ADC_FIFOLEVEL_HALFFULL;
- } else {
- todo = devpriv->ai_scan_count * scanlen - devpriv->ai_scan_pos;
- if (todo > PCI230_ADC_FIFOLEVEL_HALFFULL)
- todo = PCI230_ADC_FIFOLEVEL_HALFFULL;
- }
+ todo = comedi_nsamples_left(s, PCI230_ADC_FIFOLEVEL_HALFFULL);
if (todo == 0)
return;
+
fifoamount = 0;
for (i = 0; i < todo; i++) {
if (fifoamount == 0) {
@@ -2092,7 +2042,7 @@ static void pci230_handle_ai(struct comedi_device *dev,
* unnoticed by the caller.
*/
dev_err(dev->class_dev, "AI FIFO overrun\n");
- events |= COMEDI_CB_OVERFLOW | COMEDI_CB_ERROR;
+ async->events |= COMEDI_CB_ERROR;
break;
} else if (status_fifo & PCI230_ADC_FIFO_EMPTY) {
/* FIFO empty. */
@@ -2111,37 +2061,23 @@ static void pci230_handle_ai(struct comedi_device *dev,
fifoamount = 1;
}
}
- /* Read sample and store in Comedi's circular buffer. */
- if (comedi_buf_put(s, pci230_ai_read(dev)) == 0) {
- events |= COMEDI_CB_ERROR | COMEDI_CB_OVERFLOW;
- dev_err(dev->class_dev, "AI buffer overflow\n");
+
+ val = pci230_ai_read(dev);
+ if (!comedi_buf_write_samples(s, &val, 1))
break;
- }
+
fifoamount--;
- devpriv->ai_scan_pos++;
- if (devpriv->ai_scan_pos == scanlen) {
- /* End of scan. */
- devpriv->ai_scan_pos = 0;
- devpriv->ai_scan_count--;
- async->events |= COMEDI_CB_EOS;
+
+ if (cmd->stop_src == TRIG_COUNT &&
+ async->scans_done >= cmd->stop_arg) {
+ async->events |= COMEDI_CB_EOA;
+ break;
}
}
- if (cmd->stop_src == TRIG_COUNT && devpriv->ai_scan_count == 0) {
- /* End of acquisition. */
- events |= COMEDI_CB_EOA;
- } else {
- /* More samples required, tell Comedi to block. */
- events |= COMEDI_CB_BLOCK;
- }
- async->events |= events;
- if (async->events & (COMEDI_CB_EOA | COMEDI_CB_ERROR |
- COMEDI_CB_OVERFLOW)) {
- /* disable hardware conversions */
- pci230_ai_stop(dev, s);
- } else {
- /* update FIFO interrupt trigger level */
+
+ /* update FIFO interrupt trigger level if still running */
+ if (!(async->events & COMEDI_CB_CANCEL_MASK))
pci230_ai_update_fifo_trigger_level(dev, s);
- }
}
static int pci230_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
@@ -2177,9 +2113,6 @@ static int pci230_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
if (!pci230_claim_shared(dev, res_mask, OWNER_AICMD))
return -EBUSY;
- devpriv->ai_scan_count = cmd->stop_arg;
- devpriv->ai_scan_pos = 0; /* Position within scan. */
-
/*
* Steps:
* - Set channel scan list.
@@ -2355,7 +2288,8 @@ static irqreturn_t pci230_interrupt(int irq, void *d)
unsigned char status_int, valid_status_int, temp_ier;
struct comedi_device *dev = (struct comedi_device *)d;
struct pci230_private *devpriv = dev->private;
- struct comedi_subdevice *s;
+ struct comedi_subdevice *s_ao = dev->write_subdev;
+ struct comedi_subdevice *s_ai = dev->read_subdev;
unsigned long irqflags;
/* Read interrupt status/enable register. */
@@ -2385,23 +2319,14 @@ static irqreturn_t pci230_interrupt(int irq, void *d)
* two.
*/
- if (valid_status_int & PCI230_INT_ZCLK_CT1) {
- s = dev->write_subdev;
- pci230_handle_ao_nofifo(dev, s);
- comedi_event(dev, s);
- }
+ if (valid_status_int & PCI230_INT_ZCLK_CT1)
+ pci230_handle_ao_nofifo(dev, s_ao);
- if (valid_status_int & PCI230P2_INT_DAC) {
- s = dev->write_subdev;
- pci230_handle_ao_fifo(dev, s);
- comedi_event(dev, s);
- }
+ if (valid_status_int & PCI230P2_INT_DAC)
+ pci230_handle_ao_fifo(dev, s_ao);
- if (valid_status_int & PCI230_INT_ADC) {
- s = dev->read_subdev;
- pci230_handle_ai(dev, s);
- comedi_event(dev, s);
- }
+ if (valid_status_int & PCI230_INT_ADC)
+ pci230_handle_ai(dev, s_ai);
/* Reenable interrupts. */
spin_lock_irqsave(&devpriv->isr_spinlock, irqflags);
@@ -2410,6 +2335,9 @@ static irqreturn_t pci230_interrupt(int irq, void *d)
devpriv->intr_running = false;
spin_unlock_irqrestore(&devpriv->isr_spinlock, irqflags);
+ comedi_handle_events(dev, s_ao);
+ comedi_handle_events(dev, s_ai);
+
return IRQ_HANDLED;
}
@@ -2583,7 +2511,6 @@ static int pci230_auto_attach(struct comedi_device *dev,
s->maxdata = (1 << thisboard->ao_bits) - 1;
s->range_table = &pci230_ao_range;
s->insn_write = pci230_ao_insn_write;
- s->insn_read = comedi_readback_insn_read;
s->len_chanlist = 2;
if (dev->irq) {
dev->write_subdev = s;
diff --git a/drivers/staging/comedi/drivers/amplc_pci263.c b/drivers/staging/comedi/drivers/amplc_pci263.c
index 2259bee98d4..0d2224b832a 100644
--- a/drivers/staging/comedi/drivers/amplc_pci263.c
+++ b/drivers/staging/comedi/drivers/amplc_pci263.c
@@ -71,7 +71,7 @@ static int pci263_auto_attach(struct comedi_device *dev,
s = &dev->subdevices[0];
/* digital output subdevice */
s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
+ s->subdev_flags = SDF_WRITABLE;
s->n_chan = 16;
s->maxdata = 1;
s->range_table = &range_digital;
diff --git a/drivers/staging/comedi/drivers/c6xdigio.c b/drivers/staging/comedi/drivers/c6xdigio.c
index e03dd6e7141..e7cb7032a91 100644
--- a/drivers/staging/comedi/drivers/c6xdigio.c
+++ b/drivers/staging/comedi/drivers/c6xdigio.c
@@ -265,7 +265,7 @@ static int c6xdigio_attach(struct comedi_device *dev,
s = &dev->subdevices[0];
/* pwm output subdevice */
s->type = COMEDI_SUBD_PWM;
- s->subdev_flags = SDF_WRITEABLE;
+ s->subdev_flags = SDF_WRITABLE;
s->n_chan = 2;
s->maxdata = 500;
s->range_table = &range_unknown;
diff --git a/drivers/staging/comedi/drivers/cb_das16_cs.c b/drivers/staging/comedi/drivers/cb_das16_cs.c
index f88880aea6d..0a48d2a961d 100644
--- a/drivers/staging/comedi/drivers/cb_das16_cs.c
+++ b/drivers/staging/comedi/drivers/cb_das16_cs.c
@@ -305,7 +305,6 @@ static int das16cs_auto_attach(struct comedi_device *dev,
s->maxdata = 0xffff;
s->range_table = &range_bipolar10;
s->insn_write = &das16cs_ao_insn_write;
- s->insn_read = comedi_readback_insn_read;
ret = comedi_alloc_subdev_readback(s);
if (ret)
diff --git a/drivers/staging/comedi/drivers/cb_pcidas.c b/drivers/staging/comedi/drivers/cb_pcidas.c
index 1ec363b7505..669b1703eb9 100644
--- a/drivers/staging/comedi/drivers/cb_pcidas.c
+++ b/drivers/staging/comedi/drivers/cb_pcidas.c
@@ -346,8 +346,6 @@ struct cb_pcidas_private {
/* divisors of master clock for analog input pacing */
unsigned int divisor1;
unsigned int divisor2;
- /* number of analog input samples remaining */
- unsigned int count;
/* bits to write to registers */
unsigned int adc_fifo_bits;
unsigned int s5933_intcsr_bits;
@@ -358,11 +356,6 @@ struct cb_pcidas_private {
/* divisors of master clock for analog output pacing */
unsigned int ao_divisor1;
unsigned int ao_divisor2;
- /* number of analog output samples remaining */
- unsigned int ao_count;
- unsigned int caldac_value[NUM_CHANNELS_8800];
- unsigned int trimpot_value[NUM_CHANNELS_8402];
- unsigned int dac08_value;
unsigned int calibration_source;
};
@@ -596,25 +589,14 @@ static void write_calibration_bitstream(struct comedi_device *dev,
}
}
-static int caldac_8800_write(struct comedi_device *dev, unsigned int address,
- uint8_t value)
+static void caldac_8800_write(struct comedi_device *dev,
+ unsigned int chan, uint8_t val)
{
struct cb_pcidas_private *devpriv = dev->private;
- static const int num_caldac_channels = 8;
static const int bitstream_length = 11;
- unsigned int bitstream = ((address & 0x7) << 8) | value;
+ unsigned int bitstream = ((chan & 0x7) << 8) | val;
static const int caldac_8800_udelay = 1;
- if (address >= num_caldac_channels) {
- dev_err(dev->class_dev, "illegal caldac channel\n");
- return -1;
- }
-
- if (value == devpriv->caldac_value[address])
- return 1;
-
- devpriv->caldac_value[address] = value;
-
write_calibration_bitstream(dev, cal_enable_bits(dev), bitstream,
bitstream_length);
@@ -623,75 +605,62 @@ static int caldac_8800_write(struct comedi_device *dev, unsigned int address,
devpriv->control_status + CALIBRATION_REG);
udelay(caldac_8800_udelay);
outw(cal_enable_bits(dev), devpriv->control_status + CALIBRATION_REG);
-
- return 1;
}
-static int caldac_write_insn(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int cb_pcidas_caldac_insn_write(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- const unsigned int channel = CR_CHAN(insn->chanspec);
+ unsigned int chan = CR_CHAN(insn->chanspec);
- return caldac_8800_write(dev, channel, data[0]);
-}
+ if (insn->n) {
+ unsigned int val = data[insn->n - 1];
-static int caldac_read_insn(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- struct cb_pcidas_private *devpriv = dev->private;
-
- data[0] = devpriv->caldac_value[CR_CHAN(insn->chanspec)];
+ if (s->readback[chan] != val) {
+ caldac_8800_write(dev, chan, val);
+ s->readback[chan] = val;
+ }
+ }
- return 1;
+ return insn->n;
}
/* 1602/16 pregain offset */
static void dac08_write(struct comedi_device *dev, unsigned int value)
{
struct cb_pcidas_private *devpriv = dev->private;
- unsigned long cal_reg;
-
- if (devpriv->dac08_value != value) {
- devpriv->dac08_value = value;
-
- cal_reg = devpriv->control_status + CALIBRATION_REG;
- value &= 0xff;
- value |= cal_enable_bits(dev);
+ value &= 0xff;
+ value |= cal_enable_bits(dev);
- /* latch the new value into the caldac */
- outw(value, cal_reg);
- udelay(1);
- outw(value | SELECT_DAC08_BIT, cal_reg);
- udelay(1);
- outw(value, cal_reg);
- udelay(1);
- }
+ /* latch the new value into the caldac */
+ outw(value, devpriv->control_status + CALIBRATION_REG);
+ udelay(1);
+ outw(value | SELECT_DAC08_BIT,
+ devpriv->control_status + CALIBRATION_REG);
+ udelay(1);
+ outw(value, devpriv->control_status + CALIBRATION_REG);
+ udelay(1);
}
-static int dac08_write_insn(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int cb_pcidas_dac08_insn_write(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- int i;
-
- for (i = 0; i < insn->n; i++)
- dac08_write(dev, data[i]);
-
- return insn->n;
-}
+ unsigned int chan = CR_CHAN(insn->chanspec);
-static int dac08_read_insn(struct comedi_device *dev,
- struct comedi_subdevice *s, struct comedi_insn *insn,
- unsigned int *data)
-{
- struct cb_pcidas_private *devpriv = dev->private;
+ if (insn->n) {
+ unsigned int val = data[insn->n - 1];
- data[0] = devpriv->dac08_value;
+ if (s->readback[chan] != val) {
+ dac08_write(dev, val);
+ s->readback[chan] = val;
+ }
+ }
- return 1;
+ return insn->n;
}
static int trimpot_7376_write(struct comedi_device *dev, uint8_t value)
@@ -740,50 +709,41 @@ static int trimpot_8402_write(struct comedi_device *dev, unsigned int channel,
return 0;
}
-static int cb_pcidas_trimpot_write(struct comedi_device *dev,
- unsigned int channel, unsigned int value)
+static void cb_pcidas_trimpot_write(struct comedi_device *dev,
+ unsigned int chan, unsigned int val)
{
const struct cb_pcidas_board *thisboard = dev->board_ptr;
- struct cb_pcidas_private *devpriv = dev->private;
-
- if (devpriv->trimpot_value[channel] == value)
- return 1;
- devpriv->trimpot_value[channel] = value;
switch (thisboard->trimpot) {
case AD7376:
- trimpot_7376_write(dev, value);
+ trimpot_7376_write(dev, val);
break;
case AD8402:
- trimpot_8402_write(dev, channel, value);
+ trimpot_8402_write(dev, chan, val);
break;
default:
dev_err(dev->class_dev, "driver bug?\n");
- return -1;
+ break;
}
-
- return 1;
}
-static int trimpot_write_insn(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int cb_pcidas_trimpot_insn_write(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- unsigned int channel = CR_CHAN(insn->chanspec);
+ unsigned int chan = CR_CHAN(insn->chanspec);
- return cb_pcidas_trimpot_write(dev, channel, data[0]);
-}
+ if (insn->n) {
+ unsigned int val = data[insn->n - 1];
-static int trimpot_read_insn(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- struct cb_pcidas_private *devpriv = dev->private;
- unsigned int channel = CR_CHAN(insn->chanspec);
-
- data[0] = devpriv->trimpot_value[channel];
+ if (s->readback[chan] != val) {
+ cb_pcidas_trimpot_write(dev, chan, val);
+ s->readback[chan] = val;
+ }
+ }
- return 1;
+ return insn->n;
}
static int cb_pcidas_ai_check_chanlist(struct comedi_device *dev,
@@ -976,9 +936,6 @@ static int cb_pcidas_ai_cmd(struct comedi_device *dev,
if (cmd->scan_begin_src == TRIG_TIMER || cmd->convert_src == TRIG_TIMER)
cb_pcidas_ai_load_counters(dev);
- /* set number of conversions */
- if (cmd->stop_src == TRIG_COUNT)
- devpriv->count = cmd->chanlist_len * cmd->stop_arg;
/* enable interrupts */
spin_lock_irqsave(&dev->spinlock, flags);
devpriv->adc_fifo_bits |= INTE;
@@ -1134,32 +1091,34 @@ static int cb_pcidas_cancel(struct comedi_device *dev,
return 0;
}
+static void cb_pcidas_ao_load_fifo(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ unsigned int nsamples)
+{
+ struct cb_pcidas_private *devpriv = dev->private;
+ unsigned int nbytes;
+
+ nsamples = comedi_nsamples_left(s, nsamples);
+ nbytes = comedi_buf_read_samples(s, devpriv->ao_buffer, nsamples);
+
+ nsamples = comedi_bytes_to_samples(s, nbytes);
+ outsw(devpriv->ao_registers + DACDATA, devpriv->ao_buffer, nsamples);
+}
+
static int cb_pcidas_ao_inttrig(struct comedi_device *dev,
struct comedi_subdevice *s,
unsigned int trig_num)
{
const struct cb_pcidas_board *thisboard = dev->board_ptr;
struct cb_pcidas_private *devpriv = dev->private;
- unsigned int num_bytes, num_points = thisboard->fifo_size;
struct comedi_async *async = s->async;
- struct comedi_cmd *cmd = &s->async->cmd;
+ struct comedi_cmd *cmd = &async->cmd;
unsigned long flags;
if (trig_num != cmd->start_arg)
return -EINVAL;
- /* load up fifo */
- if (cmd->stop_src == TRIG_COUNT && devpriv->ao_count < num_points)
- num_points = devpriv->ao_count;
-
- num_bytes = cfc_read_array_from_buffer(s, devpriv->ao_buffer,
- num_points * sizeof(short));
- num_points = num_bytes / sizeof(short);
-
- if (cmd->stop_src == TRIG_COUNT)
- devpriv->ao_count -= num_points;
- /* write data to board's fifo */
- outsw(devpriv->ao_registers + DACDATA, devpriv->ao_buffer, num_bytes);
+ cb_pcidas_ao_load_fifo(dev, s, thisboard->fifo_size);
/* enable dac half-full and empty interrupts */
spin_lock_irqsave(&dev->spinlock, flags);
@@ -1224,9 +1183,6 @@ static int cb_pcidas_ao_cmd(struct comedi_device *dev,
if (cmd->scan_begin_src == TRIG_TIMER)
cb_pcidas_ao_load_counters(dev);
- /* set number of conversions */
- if (cmd->stop_src == TRIG_COUNT)
- devpriv->ao_count = cmd->chanlist_len * cmd->stop_arg;
/* set pacer source */
spin_lock_irqsave(&dev->spinlock, flags);
switch (cmd->scan_begin_src) {
@@ -1275,8 +1231,6 @@ static void handle_ao_interrupt(struct comedi_device *dev, unsigned int status)
struct comedi_subdevice *s = dev->write_subdev;
struct comedi_async *async = s->async;
struct comedi_cmd *cmd = &async->cmd;
- unsigned int half_fifo = thisboard->fifo_size / 2;
- unsigned int num_points;
unsigned long flags;
if (status & DAEMI) {
@@ -1286,32 +1240,17 @@ static void handle_ao_interrupt(struct comedi_device *dev, unsigned int status)
devpriv->control_status + INT_ADCFIFO);
spin_unlock_irqrestore(&dev->spinlock, flags);
if (inw(devpriv->ao_registers + DAC_CSR) & DAC_EMPTY) {
- if (cmd->stop_src == TRIG_NONE ||
- (cmd->stop_src == TRIG_COUNT
- && devpriv->ao_count)) {
+ if (cmd->stop_src == TRIG_COUNT &&
+ async->scans_done >= cmd->stop_arg) {
+ async->events |= COMEDI_CB_EOA;
+ } else {
dev_err(dev->class_dev, "dac fifo underflow\n");
async->events |= COMEDI_CB_ERROR;
}
- async->events |= COMEDI_CB_EOA;
}
} else if (status & DAHFI) {
- unsigned int num_bytes;
+ cb_pcidas_ao_load_fifo(dev, s, thisboard->fifo_size / 2);
- /* figure out how many points we are writing to fifo */
- num_points = half_fifo;
- if (cmd->stop_src == TRIG_COUNT &&
- devpriv->ao_count < num_points)
- num_points = devpriv->ao_count;
- num_bytes =
- cfc_read_array_from_buffer(s, devpriv->ao_buffer,
- num_points * sizeof(short));
- num_points = num_bytes / sizeof(short);
-
- if (cmd->stop_src == TRIG_COUNT)
- devpriv->ao_count -= num_points;
- /* write data to board's fifo */
- outsw(devpriv->ao_registers + DACDATA, devpriv->ao_buffer,
- num_points);
/* clear half-full interrupt latch */
spin_lock_irqsave(&dev->spinlock, flags);
outw(devpriv->adc_fifo_bits | DAHFI,
@@ -1319,7 +1258,7 @@ static void handle_ao_interrupt(struct comedi_device *dev, unsigned int status)
spin_unlock_irqrestore(&dev->spinlock, flags);
}
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
}
static irqreturn_t cb_pcidas_interrupt(int irq, void *d)
@@ -1362,18 +1301,15 @@ static irqreturn_t cb_pcidas_interrupt(int irq, void *d)
/* if fifo half-full */
if (status & ADHFI) {
/* read data */
- num_samples = half_fifo;
- if (cmd->stop_src == TRIG_COUNT &&
- num_samples > devpriv->count) {
- num_samples = devpriv->count;
- }
+ num_samples = comedi_nsamples_left(s, half_fifo);
insw(devpriv->adc_fifo + ADCDATA, devpriv->ai_buffer,
num_samples);
- cfc_write_array_to_buffer(s, devpriv->ai_buffer,
- num_samples * sizeof(short));
- devpriv->count -= num_samples;
- if (cmd->stop_src == TRIG_COUNT && devpriv->count == 0)
+ comedi_buf_write_samples(s, devpriv->ai_buffer, num_samples);
+
+ if (cmd->stop_src == TRIG_COUNT &&
+ async->scans_done >= cmd->stop_arg)
async->events |= COMEDI_CB_EOA;
+
/* clear half-full interrupt latch */
spin_lock_irqsave(&dev->spinlock, flags);
outw(devpriv->adc_fifo_bits | INT,
@@ -1382,14 +1318,17 @@ static irqreturn_t cb_pcidas_interrupt(int irq, void *d)
/* else if fifo not empty */
} else if (status & (ADNEI | EOBI)) {
for (i = 0; i < timeout; i++) {
+ unsigned short val;
+
/* break if fifo is empty */
if ((ADNE & inw(devpriv->control_status +
INT_ADCFIFO)) == 0)
break;
- cfc_write_to_buffer(s, inw(devpriv->adc_fifo));
+ val = inw(devpriv->adc_fifo);
+ comedi_buf_write_samples(s, &val, 1);
+
if (cmd->stop_src == TRIG_COUNT &&
- --devpriv->count == 0) {
- /* end of acquisition */
+ async->scans_done >= cmd->stop_arg) {
async->events |= COMEDI_CB_EOA;
break;
}
@@ -1419,7 +1358,7 @@ static irqreturn_t cb_pcidas_interrupt(int irq, void *d)
async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
}
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
return IRQ_HANDLED;
}
@@ -1503,7 +1442,6 @@ static int cb_pcidas_auto_attach(struct comedi_device *dev,
s->range_table = &cb_pcidas_ao_ranges;
/* default to no fifo (*insn_write) */
s->insn_write = cb_pcidas_ao_nofifo_winsn;
- s->insn_read = comedi_readback_insn_read;
ret = comedi_alloc_subdev_readback(s);
if (ret)
@@ -1542,10 +1480,16 @@ static int cb_pcidas_auto_attach(struct comedi_device *dev,
s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_INTERNAL;
s->n_chan = NUM_CHANNELS_8800;
s->maxdata = 0xff;
- s->insn_read = caldac_read_insn;
- s->insn_write = caldac_write_insn;
- for (i = 0; i < s->n_chan; i++)
+ s->insn_write = cb_pcidas_caldac_insn_write;
+
+ ret = comedi_alloc_subdev_readback(s);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < s->n_chan; i++) {
caldac_8800_write(dev, i, s->maxdata / 2);
+ s->readback[i] = s->maxdata / 2;
+ }
/* trim potentiometer */
s = &dev->subdevices[5];
@@ -1558,10 +1502,16 @@ static int cb_pcidas_auto_attach(struct comedi_device *dev,
s->n_chan = NUM_CHANNELS_8402;
s->maxdata = 0xff;
}
- s->insn_read = trimpot_read_insn;
- s->insn_write = trimpot_write_insn;
- for (i = 0; i < s->n_chan; i++)
+ s->insn_write = cb_pcidas_trimpot_insn_write;
+
+ ret = comedi_alloc_subdev_readback(s);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < s->n_chan; i++) {
cb_pcidas_trimpot_write(dev, i, s->maxdata / 2);
+ s->readback[i] = s->maxdata / 2;
+ }
/* dac08 caldac */
s = &dev->subdevices[6];
@@ -1569,10 +1519,17 @@ static int cb_pcidas_auto_attach(struct comedi_device *dev,
s->type = COMEDI_SUBD_CALIB;
s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_INTERNAL;
s->n_chan = NUM_CHANNELS_DAC08;
- s->insn_read = dac08_read_insn;
- s->insn_write = dac08_write_insn;
s->maxdata = 0xff;
- dac08_write(dev, s->maxdata / 2);
+ s->insn_write = cb_pcidas_dac08_insn_write;
+
+ ret = comedi_alloc_subdev_readback(s);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < s->n_chan; i++) {
+ dac08_write(dev, s->maxdata / 2);
+ s->readback[i] = s->maxdata / 2;
+ }
} else
s->type = COMEDI_SUBD_UNUSED;
diff --git a/drivers/staging/comedi/drivers/cb_pcidas64.c b/drivers/staging/comedi/drivers/cb_pcidas64.c
index 3b6bffc6691..eddb7ace43d 100644
--- a/drivers/staging/comedi/drivers/cb_pcidas64.c
+++ b/drivers/staging/comedi/drivers/cb_pcidas64.c
@@ -1065,8 +1065,6 @@ struct pcidas64_private {
/* local address (used by dma controller) */
uint32_t local0_iobase;
uint32_t local1_iobase;
- /* number of analog input samples remaining */
- unsigned int ai_count;
/* dma buffers for analog input */
uint16_t *ai_buffer[MAX_AI_DMA_RING_COUNT];
/* physical addresses of ai dma buffers */
@@ -1087,8 +1085,6 @@ struct pcidas64_private {
dma_addr_t ao_dma_desc_bus_addr;
/* keeps track of buffer where the next ao sample should go */
unsigned int ao_dma_index;
- /* number of analog output samples remaining */
- unsigned long ao_count;
unsigned int hw_revision; /* stc chip hardware revision number */
/* last bits sent to INTR_ENABLE_REG register */
unsigned int intr_enable_bits;
@@ -1109,9 +1105,6 @@ struct pcidas64_private {
uint8_t i2c_cal_range_bits;
/* configure digital triggers to trigger on falling edge */
unsigned int ext_trig_falling;
- /* states of various devices stored to enable read-back */
- unsigned int ad8402_state[2];
- unsigned int caldac_state[8];
short ai_cmd_running;
unsigned int ai_fifo_segment_length;
struct ext_clock_info ext_clock;
@@ -2199,10 +2192,6 @@ static void setup_sample_counters(struct comedi_device *dev,
{
struct pcidas64_private *devpriv = dev->private;
- if (cmd->stop_src == TRIG_COUNT) {
- /* set software count */
- devpriv->ai_count = cmd->stop_arg * cmd->chanlist_len;
- }
/* load hardware conversion counter */
if (use_hw_sample_counter(cmd)) {
writew(cmd->stop_arg & 0xffff,
@@ -2642,8 +2631,6 @@ static void pio_drain_ai_fifo_16(struct comedi_device *dev)
{
struct pcidas64_private *devpriv = dev->private;
struct comedi_subdevice *s = dev->read_subdev;
- struct comedi_async *async = s->async;
- struct comedi_cmd *cmd = &async->cmd;
unsigned int i;
uint16_t prepost_bits;
int read_segment, read_index, write_segment, write_index;
@@ -2672,26 +2659,21 @@ static void pio_drain_ai_fifo_16(struct comedi_device *dev)
devpriv->ai_fifo_segment_length - read_index;
else
num_samples = write_index - read_index;
-
- if (cmd->stop_src == TRIG_COUNT) {
- if (devpriv->ai_count == 0)
- break;
- if (num_samples > devpriv->ai_count)
- num_samples = devpriv->ai_count;
-
- devpriv->ai_count -= num_samples;
- }
-
if (num_samples < 0) {
dev_err(dev->class_dev,
"cb_pcidas64: bug! num_samples < 0\n");
break;
}
+ num_samples = comedi_nsamples_left(s, num_samples);
+ if (num_samples == 0)
+ break;
+
for (i = 0; i < num_samples; i++) {
- cfc_write_to_buffer(s,
- readw(devpriv->main_iobase +
- ADC_FIFO_REG));
+ unsigned short val;
+
+ val = readw(devpriv->main_iobase + ADC_FIFO_REG);
+ comedi_buf_write_samples(s, &val, 1);
}
} while (read_segment != write_segment);
@@ -2706,33 +2688,30 @@ static void pio_drain_ai_fifo_32(struct comedi_device *dev)
{
struct pcidas64_private *devpriv = dev->private;
struct comedi_subdevice *s = dev->read_subdev;
- struct comedi_async *async = s->async;
- struct comedi_cmd *cmd = &async->cmd;
+ unsigned int nsamples;
unsigned int i;
- unsigned int max_transfer = 100000;
uint32_t fifo_data;
int write_code =
readw(devpriv->main_iobase + ADC_WRITE_PNTR_REG) & 0x7fff;
int read_code =
readw(devpriv->main_iobase + ADC_READ_PNTR_REG) & 0x7fff;
- if (cmd->stop_src == TRIG_COUNT) {
- if (max_transfer > devpriv->ai_count)
- max_transfer = devpriv->ai_count;
+ nsamples = comedi_nsamples_left(s, 100000);
+ for (i = 0; read_code != write_code && i < nsamples;) {
+ unsigned short val;
- }
- for (i = 0; read_code != write_code && i < max_transfer;) {
fifo_data = readl(dev->mmio + ADC_FIFO_REG);
- cfc_write_to_buffer(s, fifo_data & 0xffff);
+ val = fifo_data & 0xffff;
+ comedi_buf_write_samples(s, &val, 1);
i++;
- if (i < max_transfer) {
- cfc_write_to_buffer(s, (fifo_data >> 16) & 0xffff);
+ if (i < nsamples) {
+ val = (fifo_data >> 16) & 0xffff;
+ comedi_buf_write_samples(s, &val, 1);
i++;
}
read_code = readw(devpriv->main_iobase + ADC_READ_PNTR_REG) &
0x7fff;
}
- devpriv->ai_count -= i;
}
/* empty fifo */
@@ -2750,8 +2729,7 @@ static void drain_dma_buffers(struct comedi_device *dev, unsigned int channel)
{
const struct pcidas64_board *thisboard = dev->board_ptr;
struct pcidas64_private *devpriv = dev->private;
- struct comedi_async *async = dev->read_subdev->async;
- struct comedi_cmd *cmd = &async->cmd;
+ struct comedi_subdevice *s = dev->read_subdev;
uint32_t next_transfer_addr;
int j;
int num_samples = 0;
@@ -2772,16 +2750,10 @@ static void drain_dma_buffers(struct comedi_device *dev, unsigned int channel)
devpriv->ai_buffer_bus_addr[devpriv->ai_dma_index] +
DMA_BUFFER_SIZE) && j < ai_dma_ring_count(thisboard); j++) {
/* transfer data from dma buffer to comedi buffer */
- num_samples = dma_transfer_size(dev);
- if (cmd->stop_src == TRIG_COUNT) {
- if (num_samples > devpriv->ai_count)
- num_samples = devpriv->ai_count;
- devpriv->ai_count -= num_samples;
- }
- cfc_write_array_to_buffer(dev->read_subdev,
- devpriv->ai_buffer[devpriv->
- ai_dma_index],
- num_samples * sizeof(uint16_t));
+ num_samples = comedi_nsamples_left(s, dma_transfer_size(dev));
+ comedi_buf_write_samples(s,
+ devpriv->ai_buffer[devpriv->ai_dma_index],
+ num_samples);
devpriv->ai_dma_index = (devpriv->ai_dma_index + 1) %
ai_dma_ring_count(thisboard);
}
@@ -2831,12 +2803,12 @@ static void handle_ai_interrupt(struct comedi_device *dev,
spin_unlock_irqrestore(&dev->spinlock, flags);
}
/* if we are have all the data, then quit */
- if ((cmd->stop_src == TRIG_COUNT && (int)devpriv->ai_count <= 0) ||
- (cmd->stop_src == TRIG_EXT && (status & ADC_STOP_BIT))) {
+ if ((cmd->stop_src == TRIG_COUNT &&
+ async->scans_done >= cmd->stop_arg) ||
+ (cmd->stop_src == TRIG_EXT && (status & ADC_STOP_BIT)))
async->events |= COMEDI_CB_EOA;
- }
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
}
static inline unsigned int prev_ao_dma_index(struct comedi_device *dev)
@@ -2871,22 +2843,6 @@ static int last_ao_dma_load_completed(struct comedi_device *dev)
return 1;
}
-static int ao_stopped_by_error(struct comedi_device *dev,
- const struct comedi_cmd *cmd)
-{
- struct pcidas64_private *devpriv = dev->private;
-
- if (cmd->stop_src == TRIG_NONE)
- return 1;
- if (cmd->stop_src == TRIG_COUNT) {
- if (devpriv->ao_count)
- return 1;
- if (last_ao_dma_load_completed(dev) == 0)
- return 1;
- }
- return 0;
-}
-
static inline int ao_dma_needs_restart(struct comedi_device *dev,
unsigned short dma_status)
{
@@ -2912,32 +2868,39 @@ static void restart_ao_dma(struct comedi_device *dev)
dma_start_sync(dev, 0);
}
+static unsigned int cb_pcidas64_ao_fill_buffer(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ unsigned short *dest,
+ unsigned int max_bytes)
+{
+ unsigned int nsamples = comedi_bytes_to_samples(s, max_bytes);
+ unsigned int actual_bytes;
+
+ nsamples = comedi_nsamples_left(s, nsamples);
+ actual_bytes = comedi_buf_read_samples(s, dest, nsamples);
+
+ return comedi_bytes_to_samples(s, actual_bytes);
+}
+
static unsigned int load_ao_dma_buffer(struct comedi_device *dev,
const struct comedi_cmd *cmd)
{
struct pcidas64_private *devpriv = dev->private;
- unsigned int num_bytes, buffer_index, prev_buffer_index;
+ struct comedi_subdevice *s = dev->write_subdev;
+ unsigned int buffer_index = devpriv->ao_dma_index;
+ unsigned int prev_buffer_index = prev_ao_dma_index(dev);
+ unsigned int nsamples;
+ unsigned int nbytes;
unsigned int next_bits;
- buffer_index = devpriv->ao_dma_index;
- prev_buffer_index = prev_ao_dma_index(dev);
-
- num_bytes = comedi_buf_read_n_available(dev->write_subdev);
- if (num_bytes > DMA_BUFFER_SIZE)
- num_bytes = DMA_BUFFER_SIZE;
- if (cmd->stop_src == TRIG_COUNT && num_bytes > devpriv->ao_count)
- num_bytes = devpriv->ao_count;
- num_bytes -= num_bytes % bytes_in_sample;
-
- if (num_bytes == 0)
+ nsamples = cb_pcidas64_ao_fill_buffer(dev, s,
+ devpriv->ao_buffer[buffer_index],
+ DMA_BUFFER_SIZE);
+ if (nsamples == 0)
return 0;
- num_bytes = cfc_read_array_from_buffer(dev->write_subdev,
- devpriv->
- ao_buffer[buffer_index],
- num_bytes);
- devpriv->ao_dma_desc[buffer_index].transfer_size =
- cpu_to_le32(num_bytes);
+ nbytes = comedi_samples_to_bytes(s, nsamples);
+ devpriv->ao_dma_desc[buffer_index].transfer_size = cpu_to_le32(nbytes);
/* set end of chain bit so we catch underruns */
next_bits = le32_to_cpu(devpriv->ao_dma_desc[buffer_index].next);
next_bits |= PLX_END_OF_CHAIN_BIT;
@@ -2949,9 +2912,8 @@ static unsigned int load_ao_dma_buffer(struct comedi_device *dev,
devpriv->ao_dma_desc[prev_buffer_index].next = cpu_to_le32(next_bits);
devpriv->ao_dma_index = (buffer_index + 1) % AO_DMA_RING_COUNT;
- devpriv->ao_count -= num_bytes;
- return num_bytes;
+ return nbytes;
}
static void load_ao_dma(struct comedi_device *dev, const struct comedi_cmd *cmd)
@@ -3016,11 +2978,14 @@ static void handle_ao_interrupt(struct comedi_device *dev,
}
if ((status & DAC_DONE_BIT)) {
- async->events |= COMEDI_CB_EOA;
- if (ao_stopped_by_error(dev, cmd))
+ if ((cmd->stop_src == TRIG_COUNT &&
+ async->scans_done >= cmd->stop_arg) ||
+ last_ao_dma_load_completed(dev))
+ async->events |= COMEDI_CB_EOA;
+ else
async->events |= COMEDI_CB_ERROR;
}
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
}
static irqreturn_t handle_interrupt(int irq, void *d)
@@ -3191,7 +3156,9 @@ static void set_dac_interval_regs(struct comedi_device *dev,
static int prep_ao_dma(struct comedi_device *dev, const struct comedi_cmd *cmd)
{
struct pcidas64_private *devpriv = dev->private;
- unsigned int num_bytes;
+ struct comedi_subdevice *s = dev->write_subdev;
+ unsigned int nsamples;
+ unsigned int nbytes;
int i;
/* clear queue pointer too, since external queue has
@@ -3199,22 +3166,23 @@ static int prep_ao_dma(struct comedi_device *dev, const struct comedi_cmd *cmd)
writew(0, devpriv->main_iobase + ADC_QUEUE_CLEAR_REG);
writew(0, devpriv->main_iobase + DAC_BUFFER_CLEAR_REG);
- num_bytes = (DAC_FIFO_SIZE / 2) * bytes_in_sample;
- if (cmd->stop_src == TRIG_COUNT &&
- num_bytes / bytes_in_sample > devpriv->ao_count)
- num_bytes = devpriv->ao_count * bytes_in_sample;
- num_bytes = cfc_read_array_from_buffer(dev->write_subdev,
- devpriv->ao_bounce_buffer,
- num_bytes);
- for (i = 0; i < num_bytes / bytes_in_sample; i++) {
+ nsamples = cb_pcidas64_ao_fill_buffer(dev, s,
+ devpriv->ao_bounce_buffer,
+ DAC_FIFO_SIZE);
+ if (nsamples == 0)
+ return -1;
+
+ for (i = 0; i < nsamples; i++) {
writew(devpriv->ao_bounce_buffer[i],
devpriv->main_iobase + DAC_FIFO_REG);
}
- devpriv->ao_count -= num_bytes / bytes_in_sample;
- if (cmd->stop_src == TRIG_COUNT && devpriv->ao_count == 0)
+
+ if (cmd->stop_src == TRIG_COUNT &&
+ s->async->scans_done >= cmd->stop_arg)
return 0;
- num_bytes = load_ao_dma_buffer(dev, cmd);
- if (num_bytes == 0)
+
+ nbytes = load_ao_dma_buffer(dev, cmd);
+ if (nbytes == 0)
return -1;
load_ao_dma(dev, cmd);
@@ -3275,7 +3243,6 @@ static int ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
writew(0x0, devpriv->main_iobase + DAC_CONTROL0_REG);
devpriv->ao_dma_index = 0;
- devpriv->ao_count = cmd->stop_arg * cmd->chanlist_len;
set_dac_select_reg(dev, cmd);
set_dac_interval_regs(dev, cmd);
@@ -3580,9 +3547,6 @@ static void caldac_write(struct comedi_device *dev, unsigned int channel,
unsigned int value)
{
const struct pcidas64_board *thisboard = dev->board_ptr;
- struct pcidas64_private *devpriv = dev->private;
-
- devpriv->caldac_state[channel] = value;
switch (thisboard->layout) {
case LAYOUT_60XX:
@@ -3597,33 +3561,27 @@ static void caldac_write(struct comedi_device *dev, unsigned int channel,
}
}
-static int calib_write_insn(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int cb_pcidas64_calib_insn_write(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- struct pcidas64_private *devpriv = dev->private;
- int channel = CR_CHAN(insn->chanspec);
-
- /* return immediately if setting hasn't changed, since
- * programming these things is slow */
- if (devpriv->caldac_state[channel] == data[0])
- return 1;
-
- caldac_write(dev, channel, data[0]);
-
- return 1;
-}
+ unsigned int chan = CR_CHAN(insn->chanspec);
-static int calib_read_insn(struct comedi_device *dev,
- struct comedi_subdevice *s, struct comedi_insn *insn,
- unsigned int *data)
-{
- struct pcidas64_private *devpriv = dev->private;
- unsigned int channel = CR_CHAN(insn->chanspec);
+ /*
+ * Programming the calib device is slow. Only write the
+ * last data value if the value has changed.
+ */
+ if (insn->n) {
+ unsigned int val = data[insn->n - 1];
- data[0] = devpriv->caldac_state[channel];
+ if (s->readback[chan] != val) {
+ caldac_write(dev, chan, val);
+ s->readback[chan] = val;
+ }
+ }
- return 1;
+ return insn->n;
}
static void ad8402_write(struct comedi_device *dev, unsigned int channel,
@@ -3635,8 +3593,6 @@ static void ad8402_write(struct comedi_device *dev, unsigned int channel,
unsigned int bitstream = ((channel & 0x3) << 8) | (value & 0xff);
static const int ad8402_udelay = 1;
- devpriv->ad8402_state[channel] = value;
-
register_bits = SELECT_8402_64XX_BIT;
udelay(ad8402_udelay);
writew(register_bits, devpriv->main_iobase + CALIBRATION_REG);
@@ -3658,35 +3614,27 @@ static void ad8402_write(struct comedi_device *dev, unsigned int channel,
}
/* for pci-das6402/16, channel 0 is analog input gain and channel 1 is offset */
-static int ad8402_write_insn(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int cb_pcidas64_ad8402_insn_write(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- struct pcidas64_private *devpriv = dev->private;
- int channel = CR_CHAN(insn->chanspec);
-
- /* return immediately if setting hasn't changed, since
- * programming these things is slow */
- if (devpriv->ad8402_state[channel] == data[0])
- return 1;
-
- devpriv->ad8402_state[channel] = data[0];
-
- ad8402_write(dev, channel, data[0]);
-
- return 1;
-}
+ unsigned int chan = CR_CHAN(insn->chanspec);
-static int ad8402_read_insn(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- struct pcidas64_private *devpriv = dev->private;
- unsigned int channel = CR_CHAN(insn->chanspec);
+ /*
+ * Programming the calib device is slow. Only write the
+ * last data value if the value has changed.
+ */
+ if (insn->n) {
+ unsigned int val = data[insn->n - 1];
- data[0] = devpriv->ad8402_state[channel];
+ if (s->readback[chan] != val) {
+ ad8402_write(dev, chan, val);
+ s->readback[chan] = val;
+ }
+ }
- return 1;
+ return insn->n;
}
static uint16_t read_eeprom(struct comedi_device *dev, uint8_t address)
@@ -3816,7 +3764,6 @@ static int setup_subdevices(struct comedi_device *dev)
s->maxdata = (1 << thisboard->ao_bits) - 1;
s->range_table = thisboard->ao_range_table;
s->insn_write = ao_winsn;
- s->insn_read = comedi_readback_insn_read;
ret = comedi_alloc_subdev_readback(s);
if (ret)
@@ -3849,7 +3796,7 @@ static int setup_subdevices(struct comedi_device *dev)
if (thisboard->layout == LAYOUT_64XX) {
s = &dev->subdevices[3];
s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE | SDF_READABLE;
+ s->subdev_flags = SDF_WRITABLE;
s->n_chan = 4;
s->maxdata = 1;
s->range_table = &range_digital;
@@ -3895,10 +3842,16 @@ static int setup_subdevices(struct comedi_device *dev)
s->maxdata = 0xfff;
else
s->maxdata = 0xff;
- s->insn_read = calib_read_insn;
- s->insn_write = calib_write_insn;
- for (i = 0; i < s->n_chan; i++)
+ s->insn_write = cb_pcidas64_calib_insn_write;
+
+ ret = comedi_alloc_subdev_readback(s);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < s->n_chan; i++) {
caldac_write(dev, i, s->maxdata / 2);
+ s->readback[i] = s->maxdata / 2;
+ }
/* 2 channel ad8402 potentiometer */
s = &dev->subdevices[7];
@@ -3906,11 +3859,17 @@ static int setup_subdevices(struct comedi_device *dev)
s->type = COMEDI_SUBD_CALIB;
s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_INTERNAL;
s->n_chan = 2;
- s->insn_read = ad8402_read_insn;
- s->insn_write = ad8402_write_insn;
s->maxdata = 0xff;
- for (i = 0; i < s->n_chan; i++)
+ s->insn_write = cb_pcidas64_ad8402_insn_write;
+
+ ret = comedi_alloc_subdev_readback(s);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < s->n_chan; i++) {
ad8402_write(dev, i, s->maxdata / 2);
+ s->readback[i] = s->maxdata / 2;
+ }
} else
s->type = COMEDI_SUBD_UNUSED;
diff --git a/drivers/staging/comedi/drivers/cb_pcimdas.c b/drivers/staging/comedi/drivers/cb_pcimdas.c
index fe4d2544f3d..70dd2c9eecd 100644
--- a/drivers/staging/comedi/drivers/cb_pcimdas.c
+++ b/drivers/staging/comedi/drivers/cb_pcimdas.c
@@ -1,40 +1,45 @@
/*
- comedi/drivers/cb_pcimdas.c
- Comedi driver for Computer Boards PCIM-DAS1602/16
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 2000 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
+ * comedi/drivers/cb_pcimdas.c
+ * Comedi driver for Computer Boards PCIM-DAS1602/16 and PCIe-DAS1602/16
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 2000 David A. Schleef <ds@schleef.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
/*
-Driver: cb_pcimdas
-Description: Measurement Computing PCI Migration series boards
-Devices: [ComputerBoards] PCIM-DAS1602/16 (cb_pcimdas)
-Author: Richard Bytheway
-Updated: Wed, 13 Nov 2002 12:34:56 +0000
-Status: experimental
-
-Written to support the PCIM-DAS1602/16 on a 2.4 series kernel.
-
-Configuration Options:
- [0] - PCI bus number
- [1] - PCI slot number
-
-Developed from cb_pcidas and skel by Richard Bytheway (mocelet@sucs.org).
-Only supports DIO, AO and simple AI in it's present form.
-No interrupts, multi channel or FIFO AI,
-although the card looks like it could support this.
-See http://www.mccdaq.com/PDFs/Manuals/pcim-das1602-16.pdf for more details.
-*/
+ * Driver: cb_pcimdas
+ * Description: Measurement Computing PCI Migration series boards
+ * Devices: [ComputerBoards] PCIM-DAS1602/16 (cb_pcimdas), PCIe-DAS1602/16
+ * Author: Richard Bytheway
+ * Updated: Mon, 13 Oct 2014 11:57:39 +0000
+ * Status: experimental
+ *
+ * Written to support the PCIM-DAS1602/16 and PCIe-DAS1602/16.
+ *
+ * Configuration Options:
+ * none
+ *
+ * Manual configuration of PCI(e) cards is not supported; they are configured
+ * automatically.
+ *
+ * Developed from cb_pcidas and skel by Richard Bytheway (mocelet@sucs.org).
+ * Only supports DIO, AO and simple AI in it's present form.
+ * No interrupts, multi channel or FIFO AI,
+ * although the card looks like it could support this.
+ *
+ * http://www.mccdaq.com/PDFs/Manuals/pcim-das1602-16.pdf
+ * http://www.mccdaq.com/PDFs/Manuals/pcie-das1602-16.pdf
+ */
#include <linux/module.h>
#include <linux/pci.h>
@@ -45,7 +50,7 @@ See http://www.mccdaq.com/PDFs/Manuals/pcim-das1602-16.pdf for more details.
#include "plx9052.h"
#include "8255.h"
-/* Registers for the PCIM-DAS1602/16 */
+/* Registers for the PCIM-DAS1602/16 and PCIe-DAS1602/16 */
/* DAC Offsets */
#define ADC_TRIG 0
@@ -221,7 +226,6 @@ static int cb_pcimdas_auto_attach(struct comedi_device *dev,
/* ranges are hardware settable, but not software readable. */
s->range_table = &range_unknown;
s->insn_write = cb_pcimdas_ao_insn_write;
- s->insn_read = comedi_readback_insn_read;
ret = comedi_alloc_subdev_readback(s);
if (ret)
@@ -251,7 +255,8 @@ static int cb_pcimdas_pci_probe(struct pci_dev *dev,
}
static const struct pci_device_id cb_pcimdas_pci_table[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_CB, 0x0056) },
+ { PCI_DEVICE(PCI_VENDOR_ID_CB, 0x0056) }, /* PCIM-DAS1602/16 */
+ { PCI_DEVICE(PCI_VENDOR_ID_CB, 0x0115) }, /* PCIe-DAS1602/16 */
{ 0 }
};
MODULE_DEVICE_TABLE(pci, cb_pcimdas_pci_table);
@@ -265,5 +270,5 @@ static struct pci_driver cb_pcimdas_pci_driver = {
module_comedi_pci_driver(cb_pcimdas_driver, cb_pcimdas_pci_driver);
MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_DESCRIPTION("Comedi driver for PCIM-DAS1602/16 and PCIe-DAS1602/16");
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/comedi_bond.c b/drivers/staging/comedi/drivers/comedi_bond.c
index 8450c99af8b..85b2f4ab1ba 100644
--- a/drivers/staging/comedi/drivers/comedi_bond.c
+++ b/drivers/staging/comedi/drivers/comedi_bond.c
@@ -61,8 +61,7 @@ struct bonded_device {
};
struct comedi_bond_private {
-# define MAX_BOARD_NAME 256
- char name[MAX_BOARD_NAME];
+ char name[256];
struct bonded_device **devs;
unsigned ndevs;
unsigned nchans;
@@ -262,12 +261,10 @@ static int do_dev_config(struct comedi_device *dev, struct comedi_devconfig *it)
{
/* Append dev:subdev to devpriv->name */
char buf[20];
- int left =
- MAX_BOARD_NAME - strlen(devpriv->name) - 1;
snprintf(buf, sizeof(buf), "%u:%u ",
bdev->minor, bdev->subdev);
- buf[sizeof(buf) - 1] = 0;
- strncat(devpriv->name, buf, left);
+ strlcat(devpriv->name, buf,
+ sizeof(devpriv->name));
}
}
diff --git a/drivers/staging/comedi/drivers/comedi_fc.h b/drivers/staging/comedi/drivers/comedi_fc.h
index ce283597250..756be931c1a 100644
--- a/drivers/staging/comedi/drivers/comedi_fc.h
+++ b/drivers/staging/comedi/drivers/comedi_fc.h
@@ -23,49 +23,6 @@
#include "../comedidev.h"
-static inline unsigned int cfc_bytes_per_scan(struct comedi_subdevice *s)
-{
- return comedi_bytes_per_scan(s);
-}
-
-static inline void cfc_inc_scan_progress(struct comedi_subdevice *s,
- unsigned int num_bytes)
-{
- comedi_inc_scan_progress(s, num_bytes);
-}
-
-static inline unsigned int cfc_write_array_to_buffer(struct comedi_subdevice *s,
- const void *data,
- unsigned int num_bytes)
-{
- return comedi_write_array_to_buffer(s, data, num_bytes);
-}
-
-static inline unsigned int cfc_write_to_buffer(struct comedi_subdevice *s,
- unsigned short data)
-{
- return comedi_write_array_to_buffer(s, &data, sizeof(data));
-};
-
-static inline unsigned int cfc_write_long_to_buffer(struct comedi_subdevice *s,
- unsigned int data)
-{
- return comedi_write_array_to_buffer(s, &data, sizeof(data));
-};
-
-static inline unsigned int
-cfc_read_array_from_buffer(struct comedi_subdevice *s, void *data,
- unsigned int num_bytes)
-{
- return comedi_read_array_from_buffer(s, data, num_bytes);
-}
-
-static inline unsigned int cfc_handle_events(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- return comedi_handle_events(dev, s);
-}
-
/**
* cfc_check_trigger_src() - trivially validate a comedi_cmd trigger source
* @src: pointer to the trigger source to validate
diff --git a/drivers/staging/comedi/drivers/comedi_parport.c b/drivers/staging/comedi/drivers/comedi_parport.c
index bf002988192..3bac903c862 100644
--- a/drivers/staging/comedi/drivers/comedi_parport.c
+++ b/drivers/staging/comedi/drivers/comedi_parport.c
@@ -225,10 +225,9 @@ static irqreturn_t parport_interrupt(int irq, void *d)
if (!(ctrl & PARPORT_CTRL_IRQ_ENA))
return IRQ_NONE;
- comedi_buf_put(s, 0);
- s->async->events |= COMEDI_CB_BLOCK | COMEDI_CB_EOS;
+ comedi_buf_write_samples(s, &s->state, 1);
+ comedi_handle_events(dev, s);
- comedi_event(dev, s);
return IRQ_HANDLED;
}
diff --git a/drivers/staging/comedi/drivers/comedi_test.c b/drivers/staging/comedi/drivers/comedi_test.c
index 00c03df7252..e56525a1c8f 100644
--- a/drivers/staging/comedi/drivers/comedi_test.c
+++ b/drivers/staging/comedi/drivers/comedi_test.c
@@ -52,18 +52,23 @@ zero volts).
#include "comedi_fc.h"
#include <linux/timer.h>
+#include <linux/ktime.h>
#define N_CHANS 8
+enum waveform_state_bits {
+ WAVEFORM_AI_RUNNING = 0
+};
+
/* Data unique to this driver */
struct waveform_private {
struct timer_list timer;
- struct timeval last; /* time last timer interrupt occurred */
+ ktime_t last; /* time last timer interrupt occurred */
unsigned int uvolt_amplitude; /* waveform amplitude in microvolts */
unsigned long usec_period; /* waveform period in microseconds */
unsigned long usec_current; /* current time (mod waveform period) */
unsigned long usec_remainder; /* usec since last scan */
- unsigned long ai_count; /* number of conversions remaining */
+ unsigned long state_bits;
unsigned int scan_period; /* scan period in usec */
unsigned int convert_period; /* conversion period in usec */
unsigned int ao_loopbacks[N_CHANS];
@@ -164,36 +169,29 @@ static void waveform_ai_interrupt(unsigned long arg)
{
struct comedi_device *dev = (struct comedi_device *)arg;
struct waveform_private *devpriv = dev->private;
- struct comedi_async *async = dev->read_subdev->async;
+ struct comedi_subdevice *s = dev->read_subdev;
+ struct comedi_async *async = s->async;
struct comedi_cmd *cmd = &async->cmd;
unsigned int i, j;
/* all times in microsec */
unsigned long elapsed_time;
unsigned int num_scans;
- struct timeval now;
- bool stopping = false;
+ ktime_t now;
+
+ /* check command is still active */
+ if (!test_bit(WAVEFORM_AI_RUNNING, &devpriv->state_bits))
+ return;
- do_gettimeofday(&now);
+ now = ktime_get();
- elapsed_time =
- 1000000 * (now.tv_sec - devpriv->last.tv_sec) + now.tv_usec -
- devpriv->last.tv_usec;
+ elapsed_time = ktime_to_us(ktime_sub(now, devpriv->last));
devpriv->last = now;
num_scans =
(devpriv->usec_remainder + elapsed_time) / devpriv->scan_period;
devpriv->usec_remainder =
(devpriv->usec_remainder + elapsed_time) % devpriv->scan_period;
- if (cmd->stop_src == TRIG_COUNT) {
- unsigned int remaining = cmd->stop_arg - devpriv->ai_count;
-
- if (num_scans >= remaining) {
- /* about to finish */
- num_scans = remaining;
- stopping = true;
- }
- }
-
+ num_scans = comedi_nscans_left(s, num_scans);
for (i = 0; i < num_scans; i++) {
for (j = 0; j < cmd->chanlist_len; j++) {
unsigned short sample;
@@ -203,20 +201,19 @@ static void waveform_ai_interrupt(unsigned long arg)
devpriv->usec_current +
i * devpriv->scan_period +
j * devpriv->convert_period);
- cfc_write_to_buffer(dev->read_subdev, sample);
+ comedi_buf_write_samples(s, &sample, 1);
}
}
- devpriv->ai_count += i;
devpriv->usec_current += elapsed_time;
devpriv->usec_current %= devpriv->usec_period;
- if (stopping)
+ if (cmd->stop_src == TRIG_COUNT && async->scans_done >= cmd->stop_arg)
async->events |= COMEDI_CB_EOA;
else
mod_timer(&devpriv->timer, jiffies + 1);
- comedi_event(dev, dev->read_subdev);
+ comedi_handle_events(dev, s);
}
static int waveform_ai_cmdtest(struct comedi_device *dev,
@@ -308,7 +305,6 @@ static int waveform_ai_cmd(struct comedi_device *dev,
return -1;
}
- devpriv->ai_count = 0;
devpriv->scan_period = cmd->scan_begin_arg / nano_per_micro;
if (cmd->convert_src == TRIG_NOW)
@@ -316,11 +312,16 @@ static int waveform_ai_cmd(struct comedi_device *dev,
else /* TRIG_TIMER */
devpriv->convert_period = cmd->convert_arg / nano_per_micro;
- do_gettimeofday(&devpriv->last);
- devpriv->usec_current = devpriv->last.tv_usec % devpriv->usec_period;
+ devpriv->last = ktime_get();
+ devpriv->usec_current =
+ ((u32)ktime_to_us(devpriv->last)) % devpriv->usec_period;
devpriv->usec_remainder = 0;
devpriv->timer.expires = jiffies + 1;
+ /* mark command as active */
+ smp_mb__before_atomic();
+ set_bit(WAVEFORM_AI_RUNNING, &devpriv->state_bits);
+ smp_mb__after_atomic();
add_timer(&devpriv->timer);
return 0;
}
@@ -330,7 +331,11 @@ static int waveform_ai_cancel(struct comedi_device *dev,
{
struct waveform_private *devpriv = dev->private;
- del_timer_sync(&devpriv->timer);
+ /* mark command as no longer active */
+ clear_bit(WAVEFORM_AI_RUNNING, &devpriv->state_bits);
+ smp_mb__after_atomic();
+ /* cannot call del_timer_sync() as may be called from timer routine */
+ del_timer(&devpriv->timer);
return 0;
}
@@ -405,7 +410,7 @@ static int waveform_attach(struct comedi_device *dev,
dev->write_subdev = s;
/* analog output subdevice (loopback) */
s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITEABLE | SDF_GROUND;
+ s->subdev_flags = SDF_WRITABLE | SDF_GROUND;
s->n_chan = N_CHANS;
s->maxdata = 0xffff;
s->range_table = &waveform_ai_ranges;
@@ -432,7 +437,7 @@ static void waveform_detach(struct comedi_device *dev)
struct waveform_private *devpriv = dev->private;
if (devpriv)
- waveform_ai_cancel(dev, dev->read_subdev);
+ del_timer_sync(&devpriv->timer);
}
static struct comedi_driver waveform_driver = {
diff --git a/drivers/staging/comedi/drivers/dac02.c b/drivers/staging/comedi/drivers/dac02.c
index 34cbe83f0ce..beb36c8dd00 100644
--- a/drivers/staging/comedi/drivers/dac02.c
+++ b/drivers/staging/comedi/drivers/dac02.c
@@ -129,7 +129,6 @@ static int dac02_attach(struct comedi_device *dev, struct comedi_devconfig *it)
s->maxdata = 0x0fff;
s->range_table = &das02_ao_ranges;
s->insn_write = dac02_ao_insn_write;
- s->insn_read = comedi_readback_insn_read;
ret = comedi_alloc_subdev_readback(s);
if (ret)
diff --git a/drivers/staging/comedi/drivers/daqboard2000.c b/drivers/staging/comedi/drivers/daqboard2000.c
index e5b5a8133b3..96697fbb523 100644
--- a/drivers/staging/comedi/drivers/daqboard2000.c
+++ b/drivers/staging/comedi/drivers/daqboard2000.c
@@ -707,7 +707,6 @@ static int daqboard2000_auto_attach(struct comedi_device *dev,
s->n_chan = 2;
s->maxdata = 0xffff;
s->insn_write = daqboard2000_ao_insn_write;
- s->insn_read = comedi_readback_insn_read;
s->range_table = &range_bipolar10;
result = comedi_alloc_subdev_readback(s);
diff --git a/drivers/staging/comedi/drivers/das08.c b/drivers/staging/comedi/drivers/das08.c
index bdb671a66e2..20a9f0eb72b 100644
--- a/drivers/staging/comedi/drivers/das08.c
+++ b/drivers/staging/comedi/drivers/das08.c
@@ -474,7 +474,6 @@ int das08_common_attach(struct comedi_device *dev, unsigned long iobase)
s->maxdata = (1 << thisboard->ao_nbits) - 1;
s->range_table = &range_bipolar5;
s->insn_write = das08_ao_insn_write;
- s->insn_read = comedi_readback_insn_read;
ret = comedi_alloc_subdev_readback(s);
if (ret)
@@ -507,7 +506,7 @@ int das08_common_attach(struct comedi_device *dev, unsigned long iobase)
/* do */
if (thisboard->do_nchan) {
s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE | SDF_READABLE;
+ s->subdev_flags = SDF_WRITABLE;
s->n_chan = thisboard->do_nchan;
s->maxdata = 1;
s->range_table = &range_digital;
diff --git a/drivers/staging/comedi/drivers/das16.c b/drivers/staging/comedi/drivers/das16.c
index 2d8e86cec47..2436057304a 100644
--- a/drivers/staging/comedi/drivers/das16.c
+++ b/drivers/staging/comedi/drivers/das16.c
@@ -541,6 +541,7 @@ static void das16_interrupt(struct comedi_device *dev)
struct comedi_cmd *cmd = &async->cmd;
unsigned long spin_flags;
unsigned long dma_flags;
+ unsigned int nsamples;
int num_bytes, residue;
int buffer_index;
@@ -583,21 +584,25 @@ static void das16_interrupt(struct comedi_device *dev)
spin_unlock_irqrestore(&dev->spinlock, spin_flags);
- cfc_write_array_to_buffer(s,
- devpriv->dma_buffer[buffer_index], num_bytes);
+ nsamples = comedi_bytes_to_samples(s, num_bytes);
+ comedi_buf_write_samples(s, devpriv->dma_buffer[buffer_index],
+ nsamples);
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
}
static void das16_timer_interrupt(unsigned long arg)
{
struct comedi_device *dev = (struct comedi_device *)arg;
struct das16_private_struct *devpriv = dev->private;
+ unsigned long flags;
das16_interrupt(dev);
+ spin_lock_irqsave(&dev->spinlock, flags);
if (devpriv->timer_running)
mod_timer(&devpriv->timer, jiffies + timer_period());
+ spin_unlock_irqrestore(&dev->spinlock, flags);
}
static int das16_ai_check_chanlist(struct comedi_device *dev,
@@ -764,7 +769,7 @@ static int das16_cmd_exec(struct comedi_device *dev, struct comedi_subdevice *s)
return -1;
}
- devpriv->adc_byte_count = cmd->stop_arg * cfc_bytes_per_scan(s);
+ devpriv->adc_byte_count = cmd->stop_arg * comedi_bytes_per_scan(s);
if (devpriv->can_burst)
outb(DAS1600_CONV_DISABLE, dev->iobase + DAS1600_CONV_REG);
@@ -814,7 +819,8 @@ static int das16_cmd_exec(struct comedi_device *dev, struct comedi_subdevice *s)
enable_dma(devpriv->dma_chan);
release_dma_lock(flags);
- /* set up interrupt */
+ /* set up timer */
+ spin_lock_irqsave(&dev->spinlock, flags);
devpriv->timer_running = 1;
devpriv->timer.expires = jiffies + timer_period();
add_timer(&devpriv->timer);
@@ -823,6 +829,7 @@ static int das16_cmd_exec(struct comedi_device *dev, struct comedi_subdevice *s)
if (devpriv->can_burst)
outb(0, dev->iobase + DAS1600_CONV_REG);
+ spin_unlock_irqrestore(&dev->spinlock, flags);
return 0;
}
@@ -856,8 +863,9 @@ static void das16_ai_munge(struct comedi_device *dev,
unsigned int num_bytes,
unsigned int start_chan_index)
{
- unsigned int i, num_samples = num_bytes / sizeof(short);
unsigned short *data = array;
+ unsigned int num_samples = comedi_bytes_to_samples(s, num_bytes);
+ unsigned int i;
for (i = 0; i < num_samples; i++) {
data[i] = le16_to_cpu(data[i]);
@@ -1167,7 +1175,6 @@ static int das16_attach(struct comedi_device *dev, struct comedi_devconfig *it)
s->maxdata = 0x0fff;
s->range_table = devpriv->user_ao_range_table;
s->insn_write = das16_ao_insn_write;
- s->insn_read = comedi_readback_insn_read;
ret = comedi_alloc_subdev_readback(s);
if (ret)
@@ -1226,6 +1233,8 @@ static void das16_detach(struct comedi_device *dev)
int i;
if (devpriv) {
+ if (devpriv->timer.data)
+ del_timer_sync(&devpriv->timer);
if (dev->iobase)
das16_reset(dev);
diff --git a/drivers/staging/comedi/drivers/das16m1.c b/drivers/staging/comedi/drivers/das16m1.c
index 24b63c452f5..80f41b7e827 100644
--- a/drivers/staging/comedi/drivers/das16m1.c
+++ b/drivers/staging/comedi/drivers/das16m1.c
@@ -442,8 +442,7 @@ static void das16m1_handler(struct comedi_device *dev, unsigned int status)
num_samples = FIFO_SIZE;
insw(dev->iobase, devpriv->ai_buffer, num_samples);
munge_sample_array(devpriv->ai_buffer, num_samples);
- cfc_write_array_to_buffer(s, devpriv->ai_buffer,
- num_samples * sizeof(short));
+ comedi_buf_write_samples(s, devpriv->ai_buffer, num_samples);
devpriv->adc_count += num_samples;
if (cmd->stop_src == TRIG_COUNT) {
@@ -460,7 +459,7 @@ static void das16m1_handler(struct comedi_device *dev, unsigned int status)
dev_err(dev->class_dev, "fifo overflow\n");
}
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
}
static int das16m1_poll(struct comedi_device *dev, struct comedi_subdevice *s)
@@ -598,7 +597,7 @@ static int das16m1_attach(struct comedi_device *dev,
s = &dev->subdevices[2];
/* do */
s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE | SDF_READABLE;
+ s->subdev_flags = SDF_WRITABLE;
s->n_chan = 4;
s->maxdata = 1;
s->range_table = &range_digital;
diff --git a/drivers/staging/comedi/drivers/das1800.c b/drivers/staging/comedi/drivers/das1800.c
index a53d87ce9b1..be825d21a18 100644
--- a/drivers/staging/comedi/drivers/das1800.c
+++ b/drivers/staging/comedi/drivers/das1800.c
@@ -421,7 +421,6 @@ static const struct das1800_board das1800_boards[] = {
};
struct das1800_private {
- unsigned int count; /* number of data points left to be taken */
unsigned int divisor1; /* value to load into board's counter 1 for timed conversions */
unsigned int divisor2; /* value to load into board's counter 2 for timed conversions */
int irq_dma_bits; /* bits for control register b */
@@ -479,42 +478,33 @@ static void das1800_handle_fifo_half_full(struct comedi_device *dev,
struct comedi_subdevice *s)
{
struct das1800_private *devpriv = dev->private;
- int numPoints = 0; /* number of points to read */
- struct comedi_cmd *cmd = &s->async->cmd;
+ unsigned int nsamples = comedi_nsamples_left(s, FIFO_SIZE / 2);
- numPoints = FIFO_SIZE / 2;
- /* if we only need some of the points */
- if (cmd->stop_src == TRIG_COUNT && devpriv->count < numPoints)
- numPoints = devpriv->count;
- insw(dev->iobase + DAS1800_FIFO, devpriv->ai_buf0, numPoints);
- munge_data(dev, devpriv->ai_buf0, numPoints);
- cfc_write_array_to_buffer(s, devpriv->ai_buf0,
- numPoints * sizeof(devpriv->ai_buf0[0]));
- if (cmd->stop_src == TRIG_COUNT)
- devpriv->count -= numPoints;
+ insw(dev->iobase + DAS1800_FIFO, devpriv->ai_buf0, nsamples);
+ munge_data(dev, devpriv->ai_buf0, nsamples);
+ comedi_buf_write_samples(s, devpriv->ai_buf0, nsamples);
}
static void das1800_handle_fifo_not_empty(struct comedi_device *dev,
struct comedi_subdevice *s)
{
- struct das1800_private *devpriv = dev->private;
+ struct comedi_cmd *cmd = &s->async->cmd;
unsigned short dpnt;
int unipolar;
- struct comedi_cmd *cmd = &s->async->cmd;
unipolar = inb(dev->iobase + DAS1800_CONTROL_C) & UB;
while (inb(dev->iobase + DAS1800_STATUS) & FNE) {
- if (cmd->stop_src == TRIG_COUNT && devpriv->count == 0)
- break;
dpnt = inw(dev->iobase + DAS1800_FIFO);
/* convert to unsigned type if we are in a bipolar mode */
if (!unipolar)
;
dpnt = munge_bipolar_sample(dev, dpnt);
- cfc_write_to_buffer(s, dpnt);
- if (cmd->stop_src == TRIG_COUNT)
- devpriv->count--;
+ comedi_buf_write_samples(s, &dpnt, 1);
+
+ if (cmd->stop_src == TRIG_COUNT &&
+ s->async->scans_done >= cmd->stop_arg)
+ break;
}
}
@@ -525,8 +515,8 @@ static void das1800_flush_dma_channel(struct comedi_device *dev,
unsigned int channel, uint16_t *buffer)
{
struct das1800_private *devpriv = dev->private;
- unsigned int num_bytes, num_samples;
- struct comedi_cmd *cmd = &s->async->cmd;
+ unsigned int nbytes;
+ unsigned int nsamples;
disable_dma(channel);
@@ -535,17 +525,12 @@ static void das1800_flush_dma_channel(struct comedi_device *dev,
clear_dma_ff(channel);
/* figure out how many points to read */
- num_bytes = devpriv->dma_transfer_size - get_dma_residue(channel);
- num_samples = num_bytes / sizeof(short);
-
- /* if we only need some of the points */
- if (cmd->stop_src == TRIG_COUNT && devpriv->count < num_samples)
- num_samples = devpriv->count;
+ nbytes = devpriv->dma_transfer_size - get_dma_residue(channel);
+ nsamples = comedi_bytes_to_samples(s, nbytes);
+ nsamples = comedi_nsamples_left(s, nsamples);
- munge_data(dev, buffer, num_samples);
- cfc_write_array_to_buffer(s, buffer, num_bytes);
- if (cmd->stop_src == TRIG_COUNT)
- devpriv->count -= num_samples;
+ munge_data(dev, buffer, nsamples);
+ comedi_buf_write_samples(s, buffer, nsamples);
}
/* flushes remaining data from board when external trigger has stopped acquisition
@@ -649,14 +634,13 @@ static void das1800_ai_handler(struct comedi_device *dev)
das1800_handle_fifo_not_empty(dev, s);
}
- async->events |= COMEDI_CB_BLOCK;
/* if the card's fifo has overflowed */
if (status & OVF) {
/* clear OVF interrupt bit */
outb(CLEAR_INTR_MASK & ~OVF, dev->iobase + DAS1800_STATUS);
dev_err(dev->class_dev, "FIFO overflow\n");
async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA;
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
return;
}
/* stop taking data if appropriate */
@@ -670,11 +654,12 @@ static void das1800_ai_handler(struct comedi_device *dev)
else
das1800_handle_fifo_not_empty(dev, s);
async->events |= COMEDI_CB_EOA;
- } else if (cmd->stop_src == TRIG_COUNT && devpriv->count == 0) { /* stop_src TRIG_COUNT */
+ } else if (cmd->stop_src == TRIG_COUNT &&
+ async->scans_done >= cmd->stop_arg) {
async->events |= COMEDI_CB_EOA;
}
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
}
static int das1800_ai_poll(struct comedi_device *dev,
@@ -1102,9 +1087,6 @@ static int das1800_ai_do_cmd(struct comedi_device *dev,
/* interrupt fifo half full */
devpriv->irq_dma_bits |= FIMD;
}
- /* determine how many conversions we need */
- if (cmd->stop_src == TRIG_COUNT)
- devpriv->count = cmd->stop_arg * cmd->chanlist_len;
das1800_cancel(dev, s);
@@ -1515,7 +1497,7 @@ static int das1800_attach(struct comedi_device *dev,
/* do */
s = &dev->subdevices[3];
s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE | SDF_READABLE;
+ s->subdev_flags = SDF_WRITABLE;
s->n_chan = thisboard->do_n_chan;
s->maxdata = 1;
s->range_table = &range_digital;
diff --git a/drivers/staging/comedi/drivers/das6402.c b/drivers/staging/comedi/drivers/das6402.c
index ab6e4060888..780f4f646ea 100644
--- a/drivers/staging/comedi/drivers/das6402.c
+++ b/drivers/staging/comedi/drivers/das6402.c
@@ -35,6 +35,7 @@
#include <linux/interrupt.h>
#include "../comedidev.h"
+#include "comedi_fc.h"
#include "8253.h"
/*
@@ -192,26 +193,197 @@ static void das6402_enable_counter(struct comedi_device *dev, bool load)
}
}
+static unsigned int das6402_ai_read_sample(struct comedi_device *dev,
+ struct comedi_subdevice *s)
+{
+ unsigned int val;
+
+ val = inw(dev->iobase + DAS6402_AI_DATA_REG);
+ if (s->maxdata == 0x0fff)
+ val >>= 4;
+ return val;
+}
+
static irqreturn_t das6402_interrupt(int irq, void *d)
{
struct comedi_device *dev = d;
+ struct comedi_subdevice *s = dev->read_subdev;
+ struct comedi_async *async = s->async;
+ struct comedi_cmd *cmd = &async->cmd;
+ unsigned int status;
+
+ status = inb(dev->iobase + DAS6402_STATUS_REG);
+ if ((status & DAS6402_STATUS_INT) == 0)
+ return IRQ_NONE;
+
+ if (status & DAS6402_STATUS_FFULL) {
+ async->events |= COMEDI_CB_OVERFLOW;
+ } else if (status & DAS6402_STATUS_FFNE) {
+ unsigned int val;
+
+ val = das6402_ai_read_sample(dev, s);
+ comedi_buf_write_samples(s, &val, 1);
+
+ if (cmd->stop_src == TRIG_COUNT &&
+ async->scans_done >= cmd->stop_arg)
+ async->events |= COMEDI_CB_EOA;
+ }
das6402_clear_all_interrupts(dev);
+ comedi_handle_events(dev, s);
+
return IRQ_HANDLED;
}
+static void das6402_ai_set_mode(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ unsigned int chanspec,
+ unsigned int mode)
+{
+ unsigned int range = CR_RANGE(chanspec);
+ unsigned int aref = CR_AREF(chanspec);
+
+ mode |= DAS6402_MODE_RANGE(range);
+ if (aref == AREF_GROUND)
+ mode |= DAS6402_MODE_SE;
+ if (comedi_range_is_unipolar(s, range))
+ mode |= DAS6402_MODE_UNI;
+
+ das6402_set_mode(dev, mode);
+}
+
static int das6402_ai_cmd(struct comedi_device *dev,
struct comedi_subdevice *s)
{
- return -EINVAL;
+ struct das6402_private *devpriv = dev->private;
+ struct comedi_cmd *cmd = &s->async->cmd;
+ unsigned int chan_lo = CR_CHAN(cmd->chanlist[0]);
+ unsigned int chan_hi = CR_CHAN(cmd->chanlist[cmd->chanlist_len - 1]);
+
+ das6402_ai_set_mode(dev, s, cmd->chanlist[0], DAS6402_MODE_FIFONEPTY);
+
+ /* load the mux for chanlist conversion */
+ outw(DAS6402_AI_MUX_HI(chan_hi) | DAS6402_AI_MUX_LO(chan_lo),
+ dev->iobase + DAS6402_AI_MUX_REG);
+
+ das6402_enable_counter(dev, true);
+
+ /* enable interrupt and pacer trigger */
+ outb(DAS6402_CTRL_INTE |
+ DAS6402_CTRL_IRQ(devpriv->irq) |
+ DAS6402_CTRL_PACER_TRIG, dev->iobase + DAS6402_CTRL_REG);
+
+ return 0;
+}
+
+static int das6402_ai_check_chanlist(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_cmd *cmd)
+{
+ unsigned int chan0 = CR_CHAN(cmd->chanlist[0]);
+ unsigned int range0 = CR_RANGE(cmd->chanlist[0]);
+ unsigned int aref0 = CR_AREF(cmd->chanlist[0]);
+ int i;
+
+ for (i = 1; i < cmd->chanlist_len; i++) {
+ unsigned int chan = CR_CHAN(cmd->chanlist[i]);
+ unsigned int range = CR_RANGE(cmd->chanlist[i]);
+ unsigned int aref = CR_AREF(cmd->chanlist[i]);
+
+ if (chan != chan0 + i) {
+ dev_dbg(dev->class_dev,
+ "chanlist must be consecutive\n");
+ return -EINVAL;
+ }
+
+ if (range != range0) {
+ dev_dbg(dev->class_dev,
+ "chanlist must have the same range\n");
+ return -EINVAL;
+ }
+
+ if (aref != aref0) {
+ dev_dbg(dev->class_dev,
+ "chanlist must have the same reference\n");
+ return -EINVAL;
+ }
+
+ if (aref0 == AREF_DIFF && chan > (s->n_chan / 2)) {
+ dev_dbg(dev->class_dev,
+ "chanlist differential channel to large\n");
+ return -EINVAL;
+ }
+ }
+ return 0;
}
static int das6402_ai_cmdtest(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_cmd *cmd)
{
- return -EINVAL;
+ struct das6402_private *devpriv = dev->private;
+ int err = 0;
+ unsigned int arg;
+
+ /* Step 1 : check if triggers are trivially valid */
+
+ err |= cfc_check_trigger_src(&cmd->start_src, TRIG_NOW);
+ err |= cfc_check_trigger_src(&cmd->scan_begin_src, TRIG_FOLLOW);
+ err |= cfc_check_trigger_src(&cmd->convert_src, TRIG_TIMER);
+ err |= cfc_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
+ err |= cfc_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
+
+ if (err)
+ return 1;
+
+ /* Step 2a : make sure trigger sources are unique */
+
+ err |= cfc_check_trigger_is_unique(cmd->stop_src);
+
+ /* Step 2b : and mutually compatible */
+
+ if (err)
+ return 2;
+
+ /* Step 3: check if arguments are trivially valid */
+
+ err |= cfc_check_trigger_arg_is(&cmd->start_arg, 0);
+ err |= cfc_check_trigger_arg_is(&cmd->scan_begin_arg, 0);
+ err |= cfc_check_trigger_arg_min(&cmd->convert_arg, 10000);
+ err |= cfc_check_trigger_arg_min(&cmd->chanlist_len, 1);
+ err |= cfc_check_trigger_arg_is(&cmd->scan_end_arg, cmd->chanlist_len);
+
+ if (cmd->stop_src == TRIG_COUNT)
+ err |= cfc_check_trigger_arg_min(&cmd->stop_arg, 1);
+ else /* TRIG_NONE */
+ err |= cfc_check_trigger_arg_is(&cmd->stop_arg, 0);
+
+ if (err)
+ return 3;
+
+ /* step 4: fix up any arguments */
+
+ if (cmd->convert_src == TRIG_TIMER) {
+ arg = cmd->convert_arg;
+ i8253_cascade_ns_to_timer(I8254_OSC_BASE_10MHZ,
+ &devpriv->divider1,
+ &devpriv->divider2,
+ &arg, cmd->flags);
+ err |= cfc_check_trigger_arg_is(&cmd->convert_arg, arg);
+ }
+
+ if (err)
+ return 4;
+
+ /* Step 5: check channel list if it exists */
+ if (cmd->chanlist && cmd->chanlist_len > 0)
+ err |= das6402_ai_check_chanlist(dev, s, cmd);
+
+ if (err)
+ return 5;
+
+ return 0;
}
static int das6402_ai_cancel(struct comedi_device *dev,
@@ -246,26 +418,17 @@ static int das6402_ai_insn_read(struct comedi_device *dev,
unsigned int *data)
{
unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int range = CR_RANGE(insn->chanspec);
unsigned int aref = CR_AREF(insn->chanspec);
- unsigned int val;
int ret;
int i;
- val = DAS6402_MODE_RANGE(range) | DAS6402_MODE_POLLED;
- if (aref == AREF_DIFF) {
- if (chan > s->n_chan / 2)
- return -EINVAL;
- } else {
- val |= DAS6402_MODE_SE;
- }
- if (comedi_range_is_unipolar(s, range))
- val |= DAS6402_MODE_UNI;
+ if (aref == AREF_DIFF && chan > (s->n_chan / 2))
+ return -EINVAL;
/* enable software conversion trigger */
outb(DAS6402_CTRL_SOFT_TRIG, dev->iobase + DAS6402_CTRL_REG);
- das6402_set_mode(dev, val);
+ das6402_ai_set_mode(dev, s, insn->chanspec, DAS6402_MODE_POLLED);
/* load the mux for single channel conversion */
outw(DAS6402_AI_MUX_HI(chan) | DAS6402_AI_MUX_LO(chan),
@@ -279,12 +442,7 @@ static int das6402_ai_insn_read(struct comedi_device *dev,
if (ret)
break;
- val = inw(dev->iobase + DAS6402_AI_DATA_REG);
-
- if (s->maxdata == 0x0fff)
- val >>= 4;
-
- data[i] = val;
+ data[i] = das6402_ai_read_sample(dev, s);
}
das6402_ai_clear_eoc(dev);
@@ -497,7 +655,7 @@ static int das6402_attach(struct comedi_device *dev,
/* Analog Output subdevice */
s = &dev->subdevices[1];
s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITEABLE;
+ s->subdev_flags = SDF_WRITABLE;
s->n_chan = 2;
s->maxdata = board->maxdata;
s->range_table = &das6402_ao_ranges;
@@ -520,7 +678,7 @@ static int das6402_attach(struct comedi_device *dev,
/* Digital Input subdevice */
s = &dev->subdevices[3];
s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITEABLE;
+ s->subdev_flags = SDF_WRITABLE;
s->n_chan = 8;
s->maxdata = 1;
s->range_table = &range_digital;
diff --git a/drivers/staging/comedi/drivers/das800.c b/drivers/staging/comedi/drivers/das800.c
index d75e5528258..e5bdc242344 100644
--- a/drivers/staging/comedi/drivers/das800.c
+++ b/drivers/staging/comedi/drivers/das800.c
@@ -219,7 +219,6 @@ static const struct das800_board das800_boards[] = {
};
struct das800_private {
- unsigned int count; /* number of data points left to be taken */
unsigned int divisor1; /* counter 1 value for timed conversions */
unsigned int divisor2; /* counter 2 value for timed conversions */
unsigned int do_bits; /* digital output bits */
@@ -286,9 +285,6 @@ static void das800_set_frequency(struct comedi_device *dev)
static int das800_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
{
- struct das800_private *devpriv = dev->private;
-
- devpriv->count = 0;
das800_disable(dev);
return 0;
}
@@ -399,7 +395,6 @@ static int das800_ai_do_cmd(struct comedi_device *dev,
struct comedi_subdevice *s)
{
const struct das800_board *thisboard = dev->board_ptr;
- struct das800_private *devpriv = dev->private;
struct comedi_async *async = s->async;
struct comedi_cmd *cmd = &async->cmd;
unsigned int gain = CR_RANGE(cmd->chanlist[0]);
@@ -422,11 +417,6 @@ static int das800_ai_do_cmd(struct comedi_device *dev,
gain &= 0xf;
outb(gain, dev->iobase + DAS800_GAIN);
- if (cmd->stop_src == TRIG_COUNT)
- devpriv->count = cmd->stop_arg * cmd->chanlist_len;
- else /* TRIG_NONE */
- devpriv->count = 0;
-
/* enable auto channel scan, send interrupts on end of conversion
* and set clock source to internal or external
*/
@@ -509,25 +499,28 @@ static irqreturn_t das800_interrupt(int irq, void *d)
if (s->maxdata == 0x0fff)
val >>= 4; /* 12-bit sample */
- /* if there are more data points to collect */
- if (cmd->stop_src == TRIG_NONE || devpriv->count > 0) {
- /* write data point to buffer */
- cfc_write_to_buffer(s, val & s->maxdata);
- devpriv->count--;
+ val &= s->maxdata;
+ comedi_buf_write_samples(s, &val, 1);
+
+ if (cmd->stop_src == TRIG_COUNT &&
+ async->scans_done >= cmd->stop_arg) {
+ async->events |= COMEDI_CB_EOA;
+ break;
}
}
- async->events |= COMEDI_CB_BLOCK;
if (fifo_overflow) {
spin_unlock_irqrestore(&dev->spinlock, irq_flags);
async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA;
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
return IRQ_HANDLED;
}
- if (cmd->stop_src == TRIG_NONE || devpriv->count > 0) {
- /* Re-enable card's interrupt.
- * We already have spinlock, so indirect addressing is safe */
+ if (!(async->events & COMEDI_CB_CANCEL_MASK)) {
+ /*
+ * Re-enable card's interrupt.
+ * We already have spinlock, so indirect addressing is safe
+ */
das800_ind_write(dev, CONTROL1_INTE | devpriv->do_bits,
CONTROL1);
spin_unlock_irqrestore(&dev->spinlock, irq_flags);
@@ -535,9 +528,8 @@ static irqreturn_t das800_interrupt(int irq, void *d)
/* otherwise, stop taking data */
spin_unlock_irqrestore(&dev->spinlock, irq_flags);
das800_disable(dev);
- async->events |= COMEDI_CB_EOA;
}
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
return IRQ_HANDLED;
}
@@ -738,7 +730,7 @@ static int das800_attach(struct comedi_device *dev, struct comedi_devconfig *it)
/* Digital Output subdevice */
s = &dev->subdevices[2];
s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE | SDF_READABLE;
+ s->subdev_flags = SDF_WRITABLE;
s->n_chan = 4;
s->maxdata = 1;
s->range_table = &range_digital;
diff --git a/drivers/staging/comedi/drivers/dmm32at.c b/drivers/staging/comedi/drivers/dmm32at.c
index 7215e09305c..6df298a99cc 100644
--- a/drivers/staging/comedi/drivers/dmm32at.c
+++ b/drivers/staging/comedi/drivers/dmm32at.c
@@ -1,125 +1,139 @@
/*
- comedi/drivers/dmm32at.c
- Diamond Systems mm32at code for a Comedi driver
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 2000 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
+ * dmm32at.c
+ * Diamond Systems Diamond-MM-32-AT Comedi driver
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 2000 David A. Schleef <ds@schleef.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
/*
-Driver: dmm32at
-Description: Diamond Systems mm32at driver.
-Devices:
-Author: Perry J. Piplani <perry.j.piplani@nasa.gov>
-Updated: Fri Jun 4 09:13:24 CDT 2004
-Status: experimental
-
-This driver is for the Diamond Systems MM-32-AT board
-http://www.diamondsystems.com/products/diamondmm32at It is being used
-on serveral projects inside NASA, without problems so far. For analog
-input commands, TRIG_EXT is not yet supported at all..
-
-Configuration Options:
- comedi_config /dev/comedi0 dmm32at baseaddr,irq
-*/
+ * Driver: dmm32at
+ * Description: Diamond Systems Diamond-MM-32-AT
+ * Devices: (Diamond Systems) Diamond-MM-32-AT [dmm32at]
+ * Author: Perry J. Piplani <perry.j.piplani@nasa.gov>
+ * Updated: Fri Jun 4 09:13:24 CDT 2004
+ * Status: experimental
+ *
+ * Configuration Options:
+ * comedi_config /dev/comedi0 dmm32at baseaddr,irq
+ *
+ * This driver is for the Diamond Systems MM-32-AT board
+ * http://www.diamondsystems.com/products/diamondmm32at
+ *
+ * It is being used on serveral projects inside NASA, without
+ * problems so far. For analog input commands, TRIG_EXT is not
+ * yet supported.
+ */
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include "../comedidev.h"
+#include "8255.h"
#include "comedi_fc.h"
/* Board register addresses */
-#define DMM32AT_CONV 0x00
-#define DMM32AT_AILSB 0x00
-#define DMM32AT_AUXDOUT 0x01
-#define DMM32AT_AIMSB 0x01
-#define DMM32AT_AILOW 0x02
-#define DMM32AT_AIHIGH 0x03
-
-#define DMM32AT_DACSTAT 0x04
-#define DMM32AT_DACLSB_REG 0x04
-#define DMM32AT_DACMSB_REG 0x05
-#define DMM32AT_DACMSB_CHAN(x) ((x) << 6)
-
-#define DMM32AT_FIFOCNTRL 0x07
-#define DMM32AT_FIFOSTAT 0x07
-
-#define DMM32AT_CNTRL 0x08
-#define DMM32AT_AISTAT 0x08
-
-#define DMM32AT_INTCLOCK 0x09
-
-#define DMM32AT_CNTRDIO 0x0a
-
-#define DMM32AT_AICONF 0x0b
-#define DMM32AT_AIRBACK 0x0b
+#define DMM32AT_AI_START_CONV_REG 0x00
+#define DMM32AT_AI_LSB_REG 0x00
+#define DMM32AT_AUX_DOUT_REG 0x01
+#define DMM32AT_AUX_DOUT2 (1 << 2) /* J3.42 - OUT2 (OUT2EN) */
+#define DMM32AT_AUX_DOUT1 (1 << 1) /* J3.43 */
+#define DMM32AT_AUX_DOUT0 (1 << 0) /* J3.44 - OUT0 (OUT0EN) */
+#define DMM32AT_AI_MSB_REG 0x01
+#define DMM32AT_AI_LO_CHAN_REG 0x02
+#define DMM32AT_AI_HI_CHAN_REG 0x03
+#define DMM32AT_AUX_DI_REG 0x04
+#define DMM32AT_AUX_DI_DACBUSY (1 << 7)
+#define DMM32AT_AUX_DI_CALBUSY (1 << 6)
+#define DMM32AT_AUX_DI3 (1 << 3) /* J3.45 - ADCLK (CLKSEL) */
+#define DMM32AT_AUX_DI2 (1 << 2) /* J3.46 - GATE12 (GT12EN) */
+#define DMM32AT_AUX_DI1 (1 << 1) /* J3.47 - GATE0 (GT0EN) */
+#define DMM32AT_AUX_DI0 (1 << 0) /* J3.48 - CLK0 (SRC0) */
+#define DMM32AT_AO_LSB_REG 0x04
+#define DMM32AT_AO_MSB_REG 0x05
+#define DMM32AT_AO_MSB_DACH(x) ((x) << 6)
+#define DMM32AT_FIFO_DEPTH_REG 0x06
+#define DMM32AT_FIFO_CTRL_REG 0x07
+#define DMM32AT_FIFO_CTRL_FIFOEN (1 << 3)
+#define DMM32AT_FIFO_CTRL_SCANEN (1 << 2)
+#define DMM32AT_FIFO_CTRL_FIFORST (1 << 1)
+#define DMM32AT_FIFO_STATUS_REG 0x07
+#define DMM32AT_FIFO_STATUS_EF (1 << 7)
+#define DMM32AT_FIFO_STATUS_HF (1 << 6)
+#define DMM32AT_FIFO_STATUS_FF (1 << 5)
+#define DMM32AT_FIFO_STATUS_OVF (1 << 4)
+#define DMM32AT_FIFO_STATUS_FIFOEN (1 << 3)
+#define DMM32AT_FIFO_STATUS_SCANEN (1 << 2)
+#define DMM32AT_FIFO_STATUS_PAGE_MASK (3 << 0)
+#define DMM32AT_CTRL_REG 0x08
+#define DMM32AT_CTRL_RESETA (1 << 5)
+#define DMM32AT_CTRL_RESETD (1 << 4)
+#define DMM32AT_CTRL_INTRST (1 << 3)
+#define DMM32AT_CTRL_PAGE_8254 (0 << 0)
+#define DMM32AT_CTRL_PAGE_8255 (1 << 0)
+#define DMM32AT_CTRL_PAGE_CALIB (3 << 0)
+#define DMM32AT_AI_STATUS_REG 0x08
+#define DMM32AT_AI_STATUS_STS (1 << 7)
+#define DMM32AT_AI_STATUS_SD1 (1 << 6)
+#define DMM32AT_AI_STATUS_SD0 (1 << 5)
+#define DMM32AT_AI_STATUS_ADCH_MASK (0x1f << 0)
+#define DMM32AT_INTCLK_REG 0x09
+#define DMM32AT_INTCLK_ADINT (1 << 7)
+#define DMM32AT_INTCLK_DINT (1 << 6)
+#define DMM32AT_INTCLK_TINT (1 << 5)
+#define DMM32AT_INTCLK_CLKEN (1 << 1) /* 1=see below 0=software */
+#define DMM32AT_INTCLK_CLKSEL (1 << 0) /* 1=OUT2 0=EXTCLK */
+#define DMM32AT_CTRDIO_CFG_REG 0x0a
+#define DMM32AT_CTRDIO_CFG_FREQ12 (1 << 7) /* CLK12 1=100KHz 0=10MHz */
+#define DMM32AT_CTRDIO_CFG_FREQ0 (1 << 6) /* CLK0 1=10KHz 0=10MHz */
+#define DMM32AT_CTRDIO_CFG_OUT2EN (1 << 5) /* J3.42 1=OUT2 is DOUT2 */
+#define DMM32AT_CTRDIO_CFG_OUT0EN (1 << 4) /* J3,44 1=OUT0 is DOUT0 */
+#define DMM32AT_CTRDIO_CFG_GT0EN (1 << 2) /* J3.47 1=DIN1 is GATE0 */
+#define DMM32AT_CTRDIO_CFG_SRC0 (1 << 1) /* CLK0 is 0=FREQ0 1=J3.48 */
+#define DMM32AT_CTRDIO_CFG_GT12EN (1 << 0) /* J3.46 1=DIN2 is GATE12 */
+#define DMM32AT_AI_CFG_REG 0x0b
+#define DMM32AT_AI_CFG_SCINT_20US (0 << 4)
+#define DMM32AT_AI_CFG_SCINT_15US (1 << 4)
+#define DMM32AT_AI_CFG_SCINT_10US (2 << 4)
+#define DMM32AT_AI_CFG_SCINT_5US (3 << 4)
+#define DMM32AT_AI_CFG_RANGE (1 << 3) /* 0=5V 1=10V */
+#define DMM32AT_AI_CFG_ADBU (1 << 2) /* 0=bipolar 1=unipolar */
+#define DMM32AT_AI_CFG_GAIN(x) ((x) << 0)
+#define DMM32AT_AI_READBACK_REG 0x0b
+#define DMM32AT_AI_READBACK_WAIT (1 << 7) /* DMM32AT_AI_STATUS_STS */
+#define DMM32AT_AI_READBACK_RANGE (1 << 3)
+#define DMM32AT_AI_READBACK_ADBU (1 << 2)
+#define DMM32AT_AI_READBACK_GAIN_MASK (3 << 0)
#define DMM32AT_CLK1 0x0d
#define DMM32AT_CLK2 0x0e
#define DMM32AT_CLKCT 0x0f
-#define DMM32AT_DIOA 0x0c
-#define DMM32AT_DIOB 0x0d
-#define DMM32AT_DIOC 0x0e
-#define DMM32AT_DIOCONF 0x0f
+#define DMM32AT_8255_IOBASE 0x0c /* Page 1 registers */
/* Board register values. */
-/* DMM32AT_DACSTAT 0x04 */
-#define DMM32AT_DACBUSY 0x80
-
-/* DMM32AT_FIFOCNTRL 0x07 */
-#define DMM32AT_FIFORESET 0x02
-#define DMM32AT_SCANENABLE 0x04
-
-/* DMM32AT_CNTRL 0x08 */
-#define DMM32AT_RESET 0x20
-#define DMM32AT_INTRESET 0x08
-#define DMM32AT_CLKACC 0x00
-#define DMM32AT_DIOACC 0x01
-
-/* DMM32AT_AISTAT 0x08 */
-#define DMM32AT_STATUS 0x80
-
-/* DMM32AT_INTCLOCK 0x09 */
-#define DMM32AT_ADINT 0x80
-#define DMM32AT_CLKSEL 0x03
-
-/* DMM32AT_CNTRDIO 0x0a */
-#define DMM32AT_FREQ12 0x80
-
-/* DMM32AT_AICONF 0x0b */
+/* DMM32AT_AI_CFG_REG 0x0b */
#define DMM32AT_RANGE_U10 0x0c
#define DMM32AT_RANGE_U5 0x0d
#define DMM32AT_RANGE_B10 0x08
#define DMM32AT_RANGE_B5 0x00
-#define DMM32AT_SCINT_20 0x00
-#define DMM32AT_SCINT_15 0x10
-#define DMM32AT_SCINT_10 0x20
-#define DMM32AT_SCINT_5 0x30
/* DMM32AT_CLKCT 0x0f */
#define DMM32AT_CLKCT1 0x56 /* mode3 counter 1 - write low byte only */
#define DMM32AT_CLKCT2 0xb6 /* mode3 counter 2 - write high and low byte */
-/* DMM32AT_DIOCONF 0x0f */
-#define DMM32AT_DIENABLE 0x80
-#define DMM32AT_DIRA 0x10
-#define DMM32AT_DIRB 0x02
-#define DMM32AT_DIRCL 0x01
-#define DMM32AT_DIRCH 0x08
-
/* board AI ranges in comedi structure */
static const struct comedi_lrange dmm32at_airanges = {
4, {
@@ -150,12 +164,36 @@ static const struct comedi_lrange dmm32at_aoranges = {
}
};
-struct dmm32at_private {
- int data;
- int ai_inuse;
- unsigned int ai_scans_left;
- unsigned char dio_config;
-};
+static void dmm32at_ai_set_chanspec(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ unsigned int chanspec, int nchan)
+{
+ unsigned int chan = CR_CHAN(chanspec);
+ unsigned int range = CR_RANGE(chanspec);
+ unsigned int last_chan = (chan + nchan - 1) % s->n_chan;
+
+ outb(DMM32AT_FIFO_CTRL_FIFORST, dev->iobase + DMM32AT_FIFO_CTRL_REG);
+
+ if (nchan > 1)
+ outb(DMM32AT_FIFO_CTRL_SCANEN,
+ dev->iobase + DMM32AT_FIFO_CTRL_REG);
+
+ outb(chan, dev->iobase + DMM32AT_AI_LO_CHAN_REG);
+ outb(last_chan, dev->iobase + DMM32AT_AI_HI_CHAN_REG);
+ outb(dmm32at_rangebits[range], dev->iobase + DMM32AT_AI_CFG_REG);
+}
+
+static unsigned int dmm32at_ai_get_sample(struct comedi_device *dev,
+ struct comedi_subdevice *s)
+{
+ unsigned int val;
+
+ val = inb(dev->iobase + DMM32AT_AI_LSB_REG);
+ val |= (inb(dev->iobase + DMM32AT_AI_MSB_REG) << 8);
+
+ /* munge two's complement value to offset binary */
+ return comedi_offset_munge(s, val);
+}
static int dmm32at_ai_status(struct comedi_device *dev,
struct comedi_subdevice *s,
@@ -165,75 +203,39 @@ static int dmm32at_ai_status(struct comedi_device *dev,
unsigned char status;
status = inb(dev->iobase + context);
- if ((status & DMM32AT_STATUS) == 0)
+ if ((status & DMM32AT_AI_STATUS_STS) == 0)
return 0;
return -EBUSY;
}
-static int dmm32at_ai_rinsn(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int dmm32at_ai_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- int n;
- unsigned int d;
- unsigned short msb, lsb;
- unsigned char chan;
- int range;
int ret;
+ int i;
- /* get the channel and range number */
-
- chan = CR_CHAN(insn->chanspec) & (s->n_chan - 1);
- range = CR_RANGE(insn->chanspec);
-
- /* zero scan and fifo control and reset fifo */
- outb(DMM32AT_FIFORESET, dev->iobase + DMM32AT_FIFOCNTRL);
-
- /* write the ai channel range regs */
- outb(chan, dev->iobase + DMM32AT_AILOW);
- outb(chan, dev->iobase + DMM32AT_AIHIGH);
- /* set the range bits */
- outb(dmm32at_rangebits[range], dev->iobase + DMM32AT_AICONF);
+ dmm32at_ai_set_chanspec(dev, s, insn->chanspec, 1);
/* wait for circuit to settle */
- ret = comedi_timeout(dev, s, insn, dmm32at_ai_status, DMM32AT_AIRBACK);
+ ret = comedi_timeout(dev, s, insn, dmm32at_ai_status,
+ DMM32AT_AI_READBACK_REG);
if (ret)
return ret;
- /* convert n samples */
- for (n = 0; n < insn->n; n++) {
- /* trigger conversion */
- outb(0xff, dev->iobase + DMM32AT_CONV);
+ for (i = 0; i < insn->n; i++) {
+ outb(0xff, dev->iobase + DMM32AT_AI_START_CONV_REG);
- /* wait for conversion to end */
ret = comedi_timeout(dev, s, insn, dmm32at_ai_status,
- DMM32AT_AISTAT);
+ DMM32AT_AI_STATUS_REG);
if (ret)
return ret;
- /* read data */
- lsb = inb(dev->iobase + DMM32AT_AILSB);
- msb = inb(dev->iobase + DMM32AT_AIMSB);
-
- /* invert sign bit to make range unsigned, this is an
- idiosyncrasy of the diamond board, it return
- conversions as a signed value, i.e. -32768 to
- 32767, flipping the bit and interpreting it as
- signed gives you a range of 0 to 65535 which is
- used by comedi */
- d = ((msb ^ 0x0080) << 8) + lsb;
-
- data[n] = d;
+ data[i] = dmm32at_ai_get_sample(dev, s);
}
- /* return the number of samples read/written */
- return n;
-}
-
-static int dmm32at_ns_to_timer(unsigned int *ns, unsigned int flags)
-{
- /* trivial timer */
- return *ns;
+ return insn->n;
}
static int dmm32at_ai_check_chanlist(struct comedi_device *dev,
@@ -273,10 +275,8 @@ static int dmm32at_ai_cmdtest(struct comedi_device *dev,
/* Step 1 : check if triggers are trivially valid */
err |= cfc_check_trigger_src(&cmd->start_src, TRIG_NOW);
- err |= cfc_check_trigger_src(&cmd->scan_begin_src,
- TRIG_TIMER /*| TRIG_EXT */);
- err |= cfc_check_trigger_src(&cmd->convert_src,
- TRIG_TIMER /*| TRIG_EXT */);
+ err |= cfc_check_trigger_src(&cmd->scan_begin_src, TRIG_TIMER);
+ err |= cfc_check_trigger_src(&cmd->convert_src, TRIG_TIMER);
err |= cfc_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
err |= cfc_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
@@ -285,8 +285,6 @@ static int dmm32at_ai_cmdtest(struct comedi_device *dev,
/* Step 2a : make sure trigger sources are unique */
- err |= cfc_check_trigger_is_unique(cmd->scan_begin_src);
- err |= cfc_check_trigger_is_unique(cmd->convert_src);
err |= cfc_check_trigger_is_unique(cmd->stop_src);
/* Step 2b : and mutually compatible */
@@ -298,67 +296,32 @@ static int dmm32at_ai_cmdtest(struct comedi_device *dev,
err |= cfc_check_trigger_arg_is(&cmd->start_arg, 0);
-#define MAX_SCAN_SPEED 1000000 /* in nanoseconds */
-#define MIN_SCAN_SPEED 1000000000 /* in nanoseconds */
-
- if (cmd->scan_begin_src == TRIG_TIMER) {
- err |= cfc_check_trigger_arg_min(&cmd->scan_begin_arg,
- MAX_SCAN_SPEED);
- err |= cfc_check_trigger_arg_max(&cmd->scan_begin_arg,
- MIN_SCAN_SPEED);
- } else {
- /* external trigger */
- /* should be level/edge, hi/lo specification here */
- /* should specify multiple external triggers */
- err |= cfc_check_trigger_arg_max(&cmd->scan_begin_arg, 9);
- }
+ err |= cfc_check_trigger_arg_min(&cmd->scan_begin_arg, 1000000);
+ err |= cfc_check_trigger_arg_max(&cmd->scan_begin_arg, 1000000000);
- if (cmd->convert_src == TRIG_TIMER) {
- if (cmd->convert_arg >= 17500)
- cmd->convert_arg = 20000;
- else if (cmd->convert_arg >= 12500)
- cmd->convert_arg = 15000;
- else if (cmd->convert_arg >= 7500)
- cmd->convert_arg = 10000;
- else
- cmd->convert_arg = 5000;
- } else {
- /* external trigger */
- /* see above */
- err |= cfc_check_trigger_arg_max(&cmd->convert_arg, 9);
- }
+ if (cmd->convert_arg >= 17500)
+ cmd->convert_arg = 20000;
+ else if (cmd->convert_arg >= 12500)
+ cmd->convert_arg = 15000;
+ else if (cmd->convert_arg >= 7500)
+ cmd->convert_arg = 10000;
+ else
+ cmd->convert_arg = 5000;
err |= cfc_check_trigger_arg_is(&cmd->scan_end_arg, cmd->chanlist_len);
- if (cmd->stop_src == TRIG_COUNT) {
- err |= cfc_check_trigger_arg_max(&cmd->stop_arg, 0xfffffff0);
+ if (cmd->stop_src == TRIG_COUNT)
err |= cfc_check_trigger_arg_min(&cmd->stop_arg, 1);
- } else {
- /* TRIG_NONE */
+ else /* TRIG_NONE */
err |= cfc_check_trigger_arg_is(&cmd->stop_arg, 0);
- }
if (err)
return 3;
- /* step 4: fix up any arguments */
+ /* Step 4: fix up any arguments */
- if (cmd->scan_begin_src == TRIG_TIMER) {
- arg = cmd->scan_begin_arg;
- dmm32at_ns_to_timer(&arg, cmd->flags);
- err |= cfc_check_trigger_arg_is(&cmd->scan_begin_arg, arg);
- }
- if (cmd->convert_src == TRIG_TIMER) {
- arg = cmd->convert_arg;
- dmm32at_ns_to_timer(&arg, cmd->flags);
- err |= cfc_check_trigger_arg_is(&cmd->convert_arg, arg);
-
- if (cmd->scan_begin_src == TRIG_TIMER) {
- arg = cmd->convert_arg * cmd->scan_end_arg;
- err |= cfc_check_trigger_arg_min(&cmd->scan_begin_arg,
- arg);
- }
- }
+ arg = cmd->convert_arg * cmd->scan_end_arg;
+ err |= cfc_check_trigger_arg_min(&cmd->scan_begin_arg, arg);
if (err)
return 4;
@@ -384,11 +347,11 @@ static void dmm32at_setaitimer(struct comedi_device *dev, unsigned int nansec)
hi2 = (both2 & 0xff00) >> 8;
lo2 = both2 & 0x00ff;
- /* set the counter frequency to 10mhz */
- outb(0, dev->iobase + DMM32AT_CNTRDIO);
+ /* set counter clocks to 10MHz, disable all aux dio */
+ outb(0, dev->iobase + DMM32AT_CTRDIO_CFG_REG);
/* get access to the clock regs */
- outb(DMM32AT_CLKACC, dev->iobase + DMM32AT_CNTRL);
+ outb(DMM32AT_CTRL_PAGE_8254, dev->iobase + DMM32AT_CTRL_REG);
/* write the counter 1 control word and low byte to counter */
outb(DMM32AT_CLKCT1, dev->iobase + DMM32AT_CLKCT);
@@ -400,65 +363,37 @@ static void dmm32at_setaitimer(struct comedi_device *dev, unsigned int nansec)
outb(hi2, dev->iobase + DMM32AT_CLK2);
/* enable the ai conversion interrupt and the clock to start scans */
- outb(DMM32AT_ADINT | DMM32AT_CLKSEL, dev->iobase + DMM32AT_INTCLOCK);
+ outb(DMM32AT_INTCLK_ADINT |
+ DMM32AT_INTCLK_CLKEN | DMM32AT_INTCLK_CLKSEL,
+ dev->iobase + DMM32AT_INTCLK_REG);
}
static int dmm32at_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
{
- struct dmm32at_private *devpriv = dev->private;
struct comedi_cmd *cmd = &s->async->cmd;
- int range;
- unsigned char chanlo, chanhi;
int ret;
- if (!cmd->chanlist)
- return -EINVAL;
-
- /* get the channel list and range */
- chanlo = CR_CHAN(cmd->chanlist[0]) & (s->n_chan - 1);
- chanhi = chanlo + cmd->chanlist_len - 1;
- if (chanhi >= s->n_chan)
- return -EINVAL;
- range = CR_RANGE(cmd->chanlist[0]);
-
- /* reset fifo */
- outb(DMM32AT_FIFORESET, dev->iobase + DMM32AT_FIFOCNTRL);
-
- /* set scan enable */
- outb(DMM32AT_SCANENABLE, dev->iobase + DMM32AT_FIFOCNTRL);
-
- /* write the ai channel range regs */
- outb(chanlo, dev->iobase + DMM32AT_AILOW);
- outb(chanhi, dev->iobase + DMM32AT_AIHIGH);
-
- /* set the range bits */
- outb(dmm32at_rangebits[range], dev->iobase + DMM32AT_AICONF);
+ dmm32at_ai_set_chanspec(dev, s, cmd->chanlist[0], cmd->chanlist_len);
/* reset the interrupt just in case */
- outb(DMM32AT_INTRESET, dev->iobase + DMM32AT_CNTRL);
-
- if (cmd->stop_src == TRIG_COUNT)
- devpriv->ai_scans_left = cmd->stop_arg;
- else { /* TRIG_NONE */
- devpriv->ai_scans_left = 0xffffffff; /* indicates TRIG_NONE to
- * isr */
- }
+ outb(DMM32AT_CTRL_INTRST, dev->iobase + DMM32AT_CTRL_REG);
/*
* wait for circuit to settle
* we don't have the 'insn' here but it's not needed
*/
- ret = comedi_timeout(dev, s, NULL, dmm32at_ai_status, DMM32AT_AIRBACK);
+ ret = comedi_timeout(dev, s, NULL, dmm32at_ai_status,
+ DMM32AT_AI_READBACK_REG);
if (ret)
return ret;
- if (devpriv->ai_scans_left > 1) {
+ if (cmd->stop_src == TRIG_NONE || cmd->stop_arg > 1) {
/* start the clock and enable the interrupts */
dmm32at_setaitimer(dev, cmd->scan_begin_arg);
} else {
/* start the interrups and initiate a single scan */
- outb(DMM32AT_ADINT, dev->iobase + DMM32AT_INTCLOCK);
- outb(0xff, dev->iobase + DMM32AT_CONV);
+ outb(DMM32AT_INTCLK_ADINT, dev->iobase + DMM32AT_INTCLK_REG);
+ outb(0xff, dev->iobase + DMM32AT_AI_START_CONV_REG);
}
return 0;
@@ -468,19 +403,16 @@ static int dmm32at_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
static int dmm32at_ai_cancel(struct comedi_device *dev,
struct comedi_subdevice *s)
{
- struct dmm32at_private *devpriv = dev->private;
-
- devpriv->ai_scans_left = 1;
+ /* disable further interrupts and clocks */
+ outb(0x0, dev->iobase + DMM32AT_INTCLK_REG);
return 0;
}
static irqreturn_t dmm32at_isr(int irq, void *d)
{
struct comedi_device *dev = d;
- struct dmm32at_private *devpriv = dev->private;
unsigned char intstat;
- unsigned int samp;
- unsigned short msb, lsb;
+ unsigned int val;
int i;
if (!dev->attached) {
@@ -488,38 +420,26 @@ static irqreturn_t dmm32at_isr(int irq, void *d)
return IRQ_HANDLED;
}
- intstat = inb(dev->iobase + DMM32AT_INTCLOCK);
+ intstat = inb(dev->iobase + DMM32AT_INTCLK_REG);
- if (intstat & DMM32AT_ADINT) {
+ if (intstat & DMM32AT_INTCLK_ADINT) {
struct comedi_subdevice *s = dev->read_subdev;
struct comedi_cmd *cmd = &s->async->cmd;
for (i = 0; i < cmd->chanlist_len; i++) {
- /* read data */
- lsb = inb(dev->iobase + DMM32AT_AILSB);
- msb = inb(dev->iobase + DMM32AT_AIMSB);
-
- /* invert sign bit to make range unsigned */
- samp = ((msb ^ 0x0080) << 8) + lsb;
- comedi_buf_put(s, samp);
+ val = dmm32at_ai_get_sample(dev, s);
+ comedi_buf_write_samples(s, &val, 1);
}
- if (devpriv->ai_scans_left != 0xffffffff) { /* TRIG_COUNT */
- devpriv->ai_scans_left--;
- if (devpriv->ai_scans_left == 0) {
- /* disable further interrupts and clocks */
- outb(0x0, dev->iobase + DMM32AT_INTCLOCK);
- /* set the buffer to be flushed with an EOF */
- s->async->events |= COMEDI_CB_EOA;
- }
+ if (cmd->stop_src == TRIG_COUNT &&
+ s->async->scans_done >= cmd->stop_arg)
+ s->async->events |= COMEDI_CB_EOA;
- }
- /* flush the buffer */
- comedi_event(dev, s);
+ comedi_handle_events(dev, s);
}
/* reset the interrupt */
- outb(DMM32AT_INTRESET, dev->iobase + DMM32AT_CNTRL);
+ outb(DMM32AT_CTRL_INTRST, dev->iobase + DMM32AT_CTRL_REG);
return IRQ_HANDLED;
}
@@ -530,8 +450,8 @@ static int dmm32at_ao_eoc(struct comedi_device *dev,
{
unsigned char status;
- status = inb(dev->iobase + DMM32AT_DACSTAT);
- if ((status & DMM32AT_DACBUSY) == 0)
+ status = inb(dev->iobase + DMM32AT_AUX_DI_REG);
+ if ((status & DMM32AT_AUX_DI_DACBUSY) == 0)
return 0;
return -EBUSY;
}
@@ -549,9 +469,9 @@ static int dmm32at_ao_insn_write(struct comedi_device *dev,
int ret;
/* write LSB then MSB + chan to load DAC */
- outb(val & 0xff, dev->iobase + DMM32AT_DACLSB_REG);
- outb((val >> 8) | DMM32AT_DACMSB_CHAN(chan),
- dev->iobase + DMM32AT_DACMSB_REG);
+ outb(val & 0xff, dev->iobase + DMM32AT_AO_LSB_REG);
+ outb((val >> 8) | DMM32AT_AO_MSB_DACH(chan),
+ dev->iobase + DMM32AT_AO_MSB_REG);
/* wait for circuit to settle */
ret = comedi_timeout(dev, s, insn, dmm32at_ao_eoc, 0);
@@ -559,7 +479,7 @@ static int dmm32at_ao_insn_write(struct comedi_device *dev,
return ret;
/* dummy read to update DAC */
- inb(dev->iobase + DMM32AT_DACMSB_REG);
+ inb(dev->iobase + DMM32AT_AO_MSB_REG);
s->readback[chan] = val;
}
@@ -567,136 +487,82 @@ static int dmm32at_ao_insn_write(struct comedi_device *dev,
return insn->n;
}
-static int dmm32at_dio_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct dmm32at_private *devpriv = dev->private;
- unsigned int mask;
- unsigned int val;
-
- mask = comedi_dio_update_state(s, data);
- if (mask) {
- /* get access to the DIO regs */
- outb(DMM32AT_DIOACC, dev->iobase + DMM32AT_CNTRL);
-
- /* if either part of dio is set for output */
- if (((devpriv->dio_config & DMM32AT_DIRCL) == 0) ||
- ((devpriv->dio_config & DMM32AT_DIRCH) == 0)) {
- val = (s->state & 0x00ff0000) >> 16;
- outb(val, dev->iobase + DMM32AT_DIOC);
- }
- if ((devpriv->dio_config & DMM32AT_DIRB) == 0) {
- val = (s->state & 0x0000ff00) >> 8;
- outb(val, dev->iobase + DMM32AT_DIOB);
- }
- if ((devpriv->dio_config & DMM32AT_DIRA) == 0) {
- val = (s->state & 0x000000ff);
- outb(val, dev->iobase + DMM32AT_DIOA);
- }
- }
-
- val = inb(dev->iobase + DMM32AT_DIOA);
- val |= inb(dev->iobase + DMM32AT_DIOB) << 8;
- val |= inb(dev->iobase + DMM32AT_DIOC) << 16;
- s->state = val;
-
- data[1] = val;
-
- return insn->n;
-}
-
-static int dmm32at_dio_insn_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+static int dmm32at_8255_io(struct comedi_device *dev,
+ int dir, int port, int data, unsigned long regbase)
{
- struct dmm32at_private *devpriv = dev->private;
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int mask;
- unsigned char chanbit;
- int ret;
-
- if (chan < 8) {
- mask = 0x0000ff;
- chanbit = DMM32AT_DIRA;
- } else if (chan < 16) {
- mask = 0x00ff00;
- chanbit = DMM32AT_DIRB;
- } else if (chan < 20) {
- mask = 0x0f0000;
- chanbit = DMM32AT_DIRCL;
- } else {
- mask = 0xf00000;
- chanbit = DMM32AT_DIRCH;
- }
-
- ret = comedi_dio_insn_config(dev, s, insn, data, mask);
- if (ret)
- return ret;
-
- if (data[0] == INSN_CONFIG_DIO_OUTPUT)
- devpriv->dio_config &= ~chanbit;
- else
- devpriv->dio_config |= chanbit;
/* get access to the DIO regs */
- outb(DMM32AT_DIOACC, dev->iobase + DMM32AT_CNTRL);
- /* set the DIO's to the new configuration setting */
- outb(devpriv->dio_config, dev->iobase + DMM32AT_DIOCONF);
+ outb(DMM32AT_CTRL_PAGE_8255, dev->iobase + DMM32AT_CTRL_REG);
- return insn->n;
+ if (dir) {
+ outb(data, dev->iobase + regbase + port);
+ return 0;
+ }
+ return inb(dev->iobase + regbase + port);
}
-static int dmm32at_attach(struct comedi_device *dev,
- struct comedi_devconfig *it)
+/* Make sure the board is there and put it to a known state */
+static int dmm32at_reset(struct comedi_device *dev)
{
- struct dmm32at_private *devpriv;
- int ret;
- struct comedi_subdevice *s;
unsigned char aihi, ailo, fifostat, aistat, intstat, airback;
- ret = comedi_request_region(dev, it->options[0], 0x10);
- if (ret)
- return ret;
-
- /* the following just makes sure the board is there and gets
- it to a known state */
-
/* reset the board */
- outb(DMM32AT_RESET, dev->iobase + DMM32AT_CNTRL);
+ outb(DMM32AT_CTRL_RESETA, dev->iobase + DMM32AT_CTRL_REG);
/* allow a millisecond to reset */
udelay(1000);
/* zero scan and fifo control */
- outb(0x0, dev->iobase + DMM32AT_FIFOCNTRL);
+ outb(0x0, dev->iobase + DMM32AT_FIFO_CTRL_REG);
/* zero interrupt and clock control */
- outb(0x0, dev->iobase + DMM32AT_INTCLOCK);
+ outb(0x0, dev->iobase + DMM32AT_INTCLK_REG);
/* write a test channel range, the high 3 bits should drop */
- outb(0x80, dev->iobase + DMM32AT_AILOW);
- outb(0xff, dev->iobase + DMM32AT_AIHIGH);
+ outb(0x80, dev->iobase + DMM32AT_AI_LO_CHAN_REG);
+ outb(0xff, dev->iobase + DMM32AT_AI_HI_CHAN_REG);
/* set the range at 10v unipolar */
- outb(DMM32AT_RANGE_U10, dev->iobase + DMM32AT_AICONF);
+ outb(DMM32AT_RANGE_U10, dev->iobase + DMM32AT_AI_CFG_REG);
/* should take 10 us to settle, here's a hundred */
udelay(100);
/* read back the values */
- ailo = inb(dev->iobase + DMM32AT_AILOW);
- aihi = inb(dev->iobase + DMM32AT_AIHIGH);
- fifostat = inb(dev->iobase + DMM32AT_FIFOSTAT);
- aistat = inb(dev->iobase + DMM32AT_AISTAT);
- intstat = inb(dev->iobase + DMM32AT_INTCLOCK);
- airback = inb(dev->iobase + DMM32AT_AIRBACK);
-
- if ((ailo != 0x00) || (aihi != 0x1f) || (fifostat != 0x80) ||
- (aistat != 0x60 || (intstat != 0x00) || airback != 0x0c)) {
- dev_err(dev->class_dev, "board detection failed\n");
+ ailo = inb(dev->iobase + DMM32AT_AI_LO_CHAN_REG);
+ aihi = inb(dev->iobase + DMM32AT_AI_HI_CHAN_REG);
+ fifostat = inb(dev->iobase + DMM32AT_FIFO_STATUS_REG);
+ aistat = inb(dev->iobase + DMM32AT_AI_STATUS_REG);
+ intstat = inb(dev->iobase + DMM32AT_INTCLK_REG);
+ airback = inb(dev->iobase + DMM32AT_AI_READBACK_REG);
+
+ /*
+ * NOTE: The (DMM32AT_AI_STATUS_SD1 | DMM32AT_AI_STATUS_SD0)
+ * test makes this driver only work if the board is configured
+ * with all A/D channels set for single-ended operation.
+ */
+ if (ailo != 0x00 || aihi != 0x1f ||
+ fifostat != DMM32AT_FIFO_STATUS_EF ||
+ aistat != (DMM32AT_AI_STATUS_SD1 | DMM32AT_AI_STATUS_SD0) ||
+ intstat != 0x00 || airback != 0x0c)
return -EIO;
+
+ return 0;
+}
+
+static int dmm32at_attach(struct comedi_device *dev,
+ struct comedi_devconfig *it)
+{
+ struct comedi_subdevice *s;
+ int ret;
+
+ ret = comedi_request_region(dev, it->options[0], 0x10);
+ if (ret)
+ return ret;
+
+ ret = dmm32at_reset(dev);
+ if (ret) {
+ dev_err(dev->class_dev, "board detection failed\n");
+ return ret;
}
if (it->options[1]) {
@@ -706,65 +572,45 @@ static int dmm32at_attach(struct comedi_device *dev,
dev->irq = it->options[1];
}
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
ret = comedi_alloc_subdevices(dev, 3);
if (ret)
return ret;
+ /* Analog Input subdevice */
s = &dev->subdevices[0];
- /* analog input subdevice */
- s->type = COMEDI_SUBD_AI;
- /* we support single-ended (ground) and differential */
- s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_DIFF;
- s->n_chan = 32;
- s->maxdata = 0xffff;
- s->range_table = &dmm32at_airanges;
- s->insn_read = dmm32at_ai_rinsn;
+ s->type = COMEDI_SUBD_AI;
+ s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_DIFF;
+ s->n_chan = 32;
+ s->maxdata = 0xffff;
+ s->range_table = &dmm32at_airanges;
+ s->insn_read = dmm32at_ai_insn_read;
if (dev->irq) {
dev->read_subdev = s;
- s->subdev_flags |= SDF_CMD_READ;
- s->len_chanlist = 32;
- s->do_cmd = dmm32at_ai_cmd;
- s->do_cmdtest = dmm32at_ai_cmdtest;
- s->cancel = dmm32at_ai_cancel;
+ s->subdev_flags |= SDF_CMD_READ;
+ s->len_chanlist = s->n_chan;
+ s->do_cmd = dmm32at_ai_cmd;
+ s->do_cmdtest = dmm32at_ai_cmdtest;
+ s->cancel = dmm32at_ai_cancel;
}
+ /* Analog Output subdevice */
s = &dev->subdevices[1];
- /* analog output subdevice */
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 4;
- s->maxdata = 0x0fff;
- s->range_table = &dmm32at_aoranges;
- s->insn_write = dmm32at_ao_insn_write;
- s->insn_read = comedi_readback_insn_read;
+ s->type = COMEDI_SUBD_AO;
+ s->subdev_flags = SDF_WRITABLE;
+ s->n_chan = 4;
+ s->maxdata = 0x0fff;
+ s->range_table = &dmm32at_aoranges;
+ s->insn_write = dmm32at_ao_insn_write;
ret = comedi_alloc_subdev_readback(s);
if (ret)
return ret;
+ /* Digital I/O subdevice */
s = &dev->subdevices[2];
- /* digital i/o subdevice */
-
- /* get access to the DIO regs */
- outb(DMM32AT_DIOACC, dev->iobase + DMM32AT_CNTRL);
- /* set the DIO's to the defualt input setting */
- devpriv->dio_config = DMM32AT_DIRA | DMM32AT_DIRB |
- DMM32AT_DIRCL | DMM32AT_DIRCH | DMM32AT_DIENABLE;
- outb(devpriv->dio_config, dev->iobase + DMM32AT_DIOCONF);
-
- /* set up the subdevice */
- s->type = COMEDI_SUBD_DIO;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
- s->n_chan = 24;
- s->maxdata = 1;
- s->state = 0;
- s->range_table = &range_digital;
- s->insn_bits = dmm32at_dio_insn_bits;
- s->insn_config = dmm32at_dio_insn_config;
+ ret = subdev_8255_init(dev, s, dmm32at_8255_io, DMM32AT_8255_IOBASE);
+ if (ret)
+ return ret;
return 0;
}
@@ -778,5 +624,5 @@ static struct comedi_driver dmm32at_driver = {
module_comedi_driver(dmm32at_driver);
MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_DESCRIPTION("Comedi: Diamond Systems Diamond-MM-32-AT");
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/dt2801.c b/drivers/staging/comedi/drivers/dt2801.c
index e97386343a0..b96e60ffad7 100644
--- a/drivers/staging/comedi/drivers/dt2801.c
+++ b/drivers/staging/comedi/drivers/dt2801.c
@@ -597,7 +597,6 @@ havetype:
devpriv->dac_range_types[0] = dac_range_lkup(it->options[4]);
devpriv->dac_range_types[1] = dac_range_lkup(it->options[5]);
s->insn_write = dt2801_ao_insn_write;
- s->insn_read = comedi_readback_insn_read;
ret = comedi_alloc_subdev_readback(s);
if (ret)
diff --git a/drivers/staging/comedi/drivers/dt2811.c b/drivers/staging/comedi/drivers/dt2811.c
index 1736e397ad2..d660f277487 100644
--- a/drivers/staging/comedi/drivers/dt2811.c
+++ b/drivers/staging/comedi/drivers/dt2811.c
@@ -418,7 +418,6 @@ static int dt2811_attach(struct comedi_device *dev, struct comedi_devconfig *it)
devpriv->range_type_list[0] = dac_range_types[devpriv->dac_range[0]];
devpriv->range_type_list[1] = dac_range_types[devpriv->dac_range[1]];
s->insn_write = dt2811_ao_insn_write;
- s->insn_read = comedi_readback_insn_read;
ret = comedi_alloc_subdev_readback(s);
if (ret)
diff --git a/drivers/staging/comedi/drivers/dt2814.c b/drivers/staging/comedi/drivers/dt2814.c
index 9216c35c414..9805be13005 100644
--- a/drivers/staging/comedi/drivers/dt2814.c
+++ b/drivers/staging/comedi/drivers/dt2814.c
@@ -230,7 +230,7 @@ static irqreturn_t dt2814_interrupt(int irq, void *d)
s->async->events |= COMEDI_CB_EOA;
}
- comedi_event(dev, s);
+ comedi_handle_events(dev, s);
return IRQ_HANDLED;
}
diff --git a/drivers/staging/comedi/drivers/dt282x.c b/drivers/staging/comedi/drivers/dt282x.c
index cc974a5e5cf..2be98bb9a80 100644
--- a/drivers/staging/comedi/drivers/dt282x.c
+++ b/drivers/staging/comedi/drivers/dt282x.c
@@ -449,13 +449,29 @@ static void dt282x_munge(struct comedi_device *dev,
}
}
+static unsigned int dt282x_ao_setup_dma(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ int cur_dma)
+{
+ struct dt282x_private *devpriv = dev->private;
+ void *ptr = devpriv->dma[cur_dma].buf;
+ unsigned int nsamples = comedi_bytes_to_samples(s, devpriv->dma_maxsize);
+ unsigned int nbytes;
+
+ nbytes = comedi_buf_read_samples(s, ptr, nsamples);
+ if (nbytes)
+ dt282x_prep_ao_dma(dev, cur_dma, nbytes);
+ else
+ dev_err(dev->class_dev, "AO underrun\n");
+
+ return nbytes;
+}
+
static void dt282x_ao_dma_interrupt(struct comedi_device *dev,
struct comedi_subdevice *s)
{
struct dt282x_private *devpriv = dev->private;
int cur_dma = devpriv->current_dma_index;
- void *ptr = devpriv->dma[cur_dma].buf;
- int size;
outw(devpriv->supcsr | DT2821_SUPCSR_CLRDMADNE,
dev->iobase + DT2821_SUPCSR_REG);
@@ -464,13 +480,8 @@ static void dt282x_ao_dma_interrupt(struct comedi_device *dev,
devpriv->current_dma_index = 1 - cur_dma;
- size = cfc_read_array_from_buffer(s, ptr, devpriv->dma_maxsize);
- if (size == 0) {
- dev_err(dev->class_dev, "AO underrun\n");
+ if (!dt282x_ao_setup_dma(dev, s, cur_dma))
s->async->events |= COMEDI_CB_OVERFLOW;
- } else {
- dt282x_prep_ao_dma(dev, cur_dma, size);
- }
}
static void dt282x_ai_dma_interrupt(struct comedi_device *dev,
@@ -480,6 +491,7 @@ static void dt282x_ai_dma_interrupt(struct comedi_device *dev,
int cur_dma = devpriv->current_dma_index;
void *ptr = devpriv->dma[cur_dma].buf;
int size = devpriv->dma[cur_dma].size;
+ unsigned int nsamples = comedi_bytes_to_samples(s, size);
int ret;
outw(devpriv->supcsr | DT2821_SUPCSR_CLRDMADNE,
@@ -490,13 +502,11 @@ static void dt282x_ai_dma_interrupt(struct comedi_device *dev,
devpriv->current_dma_index = 1 - cur_dma;
dt282x_munge(dev, s, ptr, size);
- ret = cfc_write_array_to_buffer(s, ptr, size);
- if (ret != size) {
- s->async->events |= COMEDI_CB_OVERFLOW;
+ ret = comedi_buf_write_samples(s, ptr, nsamples);
+ if (ret != size)
return;
- }
- devpriv->nread -= size / 2;
+ devpriv->nread -= nsamples;
if (devpriv->nread < 0) {
dev_info(dev->class_dev, "nread off by one\n");
devpriv->nread = 0;
@@ -555,7 +565,6 @@ static irqreturn_t dt282x_interrupt(int irq, void *d)
}
#if 0
if (adcsr & DT2821_ADCSR_ADDONE) {
- int ret;
unsigned short data;
data = inw(dev->iobase + DT2821_ADDAT_REG);
@@ -563,10 +572,7 @@ static irqreturn_t dt282x_interrupt(int irq, void *d)
if (devpriv->ad_2scomp)
data = comedi_offset_munge(s, data);
- ret = comedi_buf_put(s, data);
-
- if (ret == 0)
- s->async->events |= COMEDI_CB_OVERFLOW;
+ comedi_buf_write_samples(s, &data, 1);
devpriv->nread--;
if (!devpriv->nread) {
@@ -579,8 +585,8 @@ static irqreturn_t dt282x_interrupt(int irq, void *d)
handled = 1;
}
#endif
- cfc_handle_events(dev, s);
- cfc_handle_events(dev, s_ao);
+ comedi_handle_events(dev, s);
+ comedi_handle_events(dev, s_ao);
return IRQ_RETVAL(handled);
}
@@ -916,26 +922,15 @@ static int dt282x_ao_inttrig(struct comedi_device *dev,
{
struct dt282x_private *devpriv = dev->private;
struct comedi_cmd *cmd = &s->async->cmd;
- int size;
if (trig_num != cmd->start_src)
return -EINVAL;
- size = cfc_read_array_from_buffer(s, devpriv->dma[0].buf,
- devpriv->dma_maxsize);
- if (size == 0) {
- dev_err(dev->class_dev, "AO underrun\n");
+ if (!dt282x_ao_setup_dma(dev, s, 0))
return -EPIPE;
- }
- dt282x_prep_ao_dma(dev, 0, size);
- size = cfc_read_array_from_buffer(s, devpriv->dma[1].buf,
- devpriv->dma_maxsize);
- if (size == 0) {
- dev_err(dev->class_dev, "AO underrun\n");
+ if (!dt282x_ao_setup_dma(dev, s, 1))
return -EPIPE;
- }
- dt282x_prep_ao_dma(dev, 1, size);
outw(devpriv->supcsr | DT2821_SUPCSR_STRIG,
dev->iobase + DT2821_SUPCSR_REG);
@@ -1236,7 +1231,6 @@ static int dt282x_attach(struct comedi_device *dev, struct comedi_devconfig *it)
/* ranges are per-channel, set by jumpers on the board */
s->range_table = &dt282x_ao_range;
s->insn_write = dt282x_ao_insn_write;
- s->insn_read = comedi_readback_insn_read;
if (dev->irq) {
dev->write_subdev = s;
s->subdev_flags |= SDF_CMD_WRITE;
diff --git a/drivers/staging/comedi/drivers/dt3000.c b/drivers/staging/comedi/drivers/dt3000.c
index 825561046b6..1d9a7a63e06 100644
--- a/drivers/staging/comedi/drivers/dt3000.c
+++ b/drivers/staging/comedi/drivers/dt3000.c
@@ -315,7 +315,7 @@ static void dt3k_ai_empty_fifo(struct comedi_device *dev,
for (i = 0; i < count; i++) {
data = readw(dev->mmio + DPR_ADC_buffer + rear);
- comedi_buf_put(s, data);
+ comedi_buf_write_samples(s, &data, 1);
rear++;
if (rear >= AI_FIFO_DEPTH)
rear = 0;
@@ -351,10 +351,8 @@ static irqreturn_t dt3k_interrupt(int irq, void *d)
status = readw(dev->mmio + DPR_Intr_Flag);
- if (status & DT3000_ADFULL) {
+ if (status & DT3000_ADFULL)
dt3k_ai_empty_fifo(dev, s);
- s->async->events |= COMEDI_CB_BLOCK;
- }
if (status & (DT3000_ADSWERR | DT3000_ADHWERR))
s->async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA;
@@ -363,7 +361,7 @@ static irqreturn_t dt3k_interrupt(int irq, void *d)
if (debug_n_ints >= 10)
s->async->events |= COMEDI_CB_EOA;
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
return IRQ_HANDLED;
}
@@ -699,7 +697,6 @@ static int dt3000_auto_attach(struct comedi_device *dev,
s->len_chanlist = 1;
s->range_table = &range_bipolar10;
s->insn_write = dt3k_ao_insn_write;
- s->insn_read = comedi_readback_insn_read;
ret = comedi_alloc_subdev_readback(s);
if (ret)
diff --git a/drivers/staging/comedi/drivers/dt9812.c b/drivers/staging/comedi/drivers/dt9812.c
index 77bb89fee32..06c601d8fdf 100644
--- a/drivers/staging/comedi/drivers/dt9812.c
+++ b/drivers/staging/comedi/drivers/dt9812.c
@@ -804,7 +804,7 @@ static int dt9812_auto_attach(struct comedi_device *dev,
/* Digital Output subdevice */
s = &dev->subdevices[1];
s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITEABLE;
+ s->subdev_flags = SDF_WRITABLE;
s->n_chan = 8;
s->maxdata = 1;
s->range_table = &range_digital;
@@ -822,7 +822,7 @@ static int dt9812_auto_attach(struct comedi_device *dev,
/* Analog Output subdevice */
s = &dev->subdevices[3];
s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITEABLE;
+ s->subdev_flags = SDF_WRITABLE;
s->n_chan = 2;
s->maxdata = 0x0fff;
s->range_table = is_unipolar ? &range_unipolar2_5 : &range_bipolar10;
diff --git a/drivers/staging/comedi/drivers/dyna_pci10xx.c b/drivers/staging/comedi/drivers/dyna_pci10xx.c
index 608aee0c3a1..1b6324c6eb2 100644
--- a/drivers/staging/comedi/drivers/dyna_pci10xx.c
+++ b/drivers/staging/comedi/drivers/dyna_pci10xx.c
@@ -218,7 +218,7 @@ static int dyna_pci10xx_auto_attach(struct comedi_device *dev,
/* digital input */
s = &dev->subdevices[2];
s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE | SDF_GROUND;
+ s->subdev_flags = SDF_READABLE;
s->n_chan = 16;
s->maxdata = 1;
s->range_table = &range_digital;
@@ -228,7 +228,7 @@ static int dyna_pci10xx_auto_attach(struct comedi_device *dev,
/* digital output */
s = &dev->subdevices[3];
s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE | SDF_GROUND;
+ s->subdev_flags = SDF_WRITABLE;
s->n_chan = 16;
s->maxdata = 1;
s->range_table = &range_digital;
diff --git a/drivers/staging/comedi/drivers/fl512.c b/drivers/staging/comedi/drivers/fl512.c
index 5a1e3c8fc01..e1f493241cd 100644
--- a/drivers/staging/comedi/drivers/fl512.c
+++ b/drivers/staging/comedi/drivers/fl512.c
@@ -135,7 +135,6 @@ static int fl512_attach(struct comedi_device *dev, struct comedi_devconfig *it)
s->maxdata = 0x0fff;
s->range_table = &range_fl512;
s->insn_write = fl512_ao_insn_write;
- s->insn_read = comedi_readback_insn_read;
ret = comedi_alloc_subdev_readback(s);
if (ret)
diff --git a/drivers/staging/comedi/drivers/gsc_hpdi.c b/drivers/staging/comedi/drivers/gsc_hpdi.c
index b8975a4606e..0979f536ed3 100644
--- a/drivers/staging/comedi/drivers/gsc_hpdi.c
+++ b/drivers/staging/comedi/drivers/gsc_hpdi.c
@@ -196,8 +196,8 @@ static void gsc_hpdi_drain_dma(struct comedi_device *dev, unsigned int channel)
size = devpriv->dio_count;
devpriv->dio_count -= size;
}
- cfc_write_array_to_buffer(s, devpriv->desc_dio_buffer[idx],
- size * sizeof(uint32_t));
+ comedi_buf_write_samples(s, devpriv->desc_dio_buffer[idx],
+ size);
idx++;
idx %= devpriv->num_dma_descriptors;
start = le32_to_cpu(devpriv->dma_desc[idx].pci_start_addr);
@@ -272,7 +272,7 @@ static irqreturn_t gsc_hpdi_interrupt(int irq, void *d)
if (devpriv->dio_count == 0)
async->events |= COMEDI_CB_EOA;
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
return IRQ_HANDLED;
}
@@ -689,7 +689,7 @@ static int gsc_hpdi_auto_attach(struct comedi_device *dev,
s = &dev->subdevices[0];
dev->read_subdev = s;
s->type = COMEDI_SUBD_DIO;
- s->subdev_flags = SDF_READABLE | SDF_WRITEABLE | SDF_LSAMPL |
+ s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_LSAMPL |
SDF_CMD_READ;
s->n_chan = 32;
s->len_chanlist = 32;
diff --git a/drivers/staging/comedi/drivers/icp_multi.c b/drivers/staging/comedi/drivers/icp_multi.c
index f4e1c1cf417..1ea16862010 100644
--- a/drivers/staging/comedi/drivers/icp_multi.c
+++ b/drivers/staging/comedi/drivers/icp_multi.c
@@ -53,7 +53,7 @@ Configuration options: not applicable, uses PCI auto config
#define ICP_MULTI_AI 2 /* R: Analogue input data */
#define ICP_MULTI_DAC_CSR 4 /* R/W: DAC command/status register */
#define ICP_MULTI_AO 6 /* R/W: Analogue output data */
-#define ICP_MULTI_DI 8 /* R/W: Digital inouts */
+#define ICP_MULTI_DI 8 /* R/W: Digital inputs */
#define ICP_MULTI_DO 0x0A /* R/W: Digital outputs */
#define ICP_MULTI_INT_EN 0x0C /* R/W: Interrupt enable register */
#define ICP_MULTI_INT_STAT 0x0E /* R/W: Interrupt status register */
@@ -319,7 +319,7 @@ static int icp_multi_insn_bits_do(struct comedi_device *dev,
if (comedi_dio_update_state(s, data))
writew(s->state, dev->mmio + ICP_MULTI_DO);
- data[1] = readw(dev->mmio + ICP_MULTI_DI);
+ data[1] = s->state;
return insn->n;
}
@@ -495,7 +495,6 @@ static int icp_multi_auto_attach(struct comedi_device *dev,
s->len_chanlist = 4;
s->range_table = &range_analog;
s->insn_write = icp_multi_ao_insn_write;
- s->insn_read = comedi_readback_insn_read;
ret = comedi_alloc_subdev_readback(s);
if (ret)
@@ -512,7 +511,7 @@ static int icp_multi_auto_attach(struct comedi_device *dev,
s = &dev->subdevices[3];
s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE | SDF_READABLE;
+ s->subdev_flags = SDF_WRITABLE;
s->n_chan = 8;
s->maxdata = 1;
s->len_chanlist = 8;
@@ -521,7 +520,7 @@ static int icp_multi_auto_attach(struct comedi_device *dev,
s = &dev->subdevices[4];
s->type = COMEDI_SUBD_COUNTER;
- s->subdev_flags = SDF_WRITABLE | SDF_GROUND | SDF_COMMON;
+ s->subdev_flags = SDF_WRITABLE;
s->n_chan = 4;
s->maxdata = 0xffff;
s->len_chanlist = 4;
diff --git a/drivers/staging/comedi/drivers/ii_pci20kc.c b/drivers/staging/comedi/drivers/ii_pci20kc.c
index cc5fd75b8bc..1085d66935f 100644
--- a/drivers/staging/comedi/drivers/ii_pci20kc.c
+++ b/drivers/staging/comedi/drivers/ii_pci20kc.c
@@ -392,7 +392,6 @@ static int ii20k_init_module(struct comedi_device *dev,
s->maxdata = 0xffff;
s->range_table = &ii20k_ao_ranges;
s->insn_write = ii20k_ao_insn_write;
- s->insn_read = comedi_readback_insn_read;
ret = comedi_alloc_subdev_readback(s);
if (ret)
diff --git a/drivers/staging/comedi/drivers/me4000.c b/drivers/staging/comedi/drivers/me4000.c
index 6561b00bea5..915685c1c85 100644
--- a/drivers/staging/comedi/drivers/me4000.c
+++ b/drivers/staging/comedi/drivers/me4000.c
@@ -44,8 +44,6 @@ broken.
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
-#include <linux/list.h>
-#include <linux/spinlock.h>
#include "../comedidev.h"
@@ -53,10 +51,7 @@ broken.
#include "8253.h"
#include "plx9052.h"
-#if 0
-/* file removed due to GPL incompatibility */
-#include "me4000_fw.h"
-#endif
+#define ME4000_FIRMWARE "me4000_firmware.bin"
/*
* ME4000 Register map and bit defines
@@ -333,27 +328,20 @@ static const struct comedi_lrange me4000_ai_range = {
}
};
-#define FIRMWARE_NOT_AVAILABLE 1
-#if FIRMWARE_NOT_AVAILABLE
-extern unsigned char *xilinx_firm;
-#endif
-
-static int xilinx_download(struct comedi_device *dev)
+static int me4000_xilinx_download(struct comedi_device *dev,
+ const u8 *data, size_t size,
+ unsigned long context)
{
struct pci_dev *pcidev = comedi_to_pci_dev(dev);
struct me4000_info *info = dev->private;
unsigned long xilinx_iobase = pci_resource_start(pcidev, 5);
- u32 value = 0;
- wait_queue_head_t queue;
- int idx = 0;
- int size = 0;
- unsigned int intcsr;
+ unsigned int file_length;
+ unsigned int val;
+ unsigned int i;
if (!xilinx_iobase)
return -ENODEV;
- init_waitqueue_head(&queue);
-
/*
* Set PLX local interrupt 2 polarity to high.
* Interrupt is thrown by init pin of xilinx.
@@ -361,61 +349,58 @@ static int xilinx_download(struct comedi_device *dev)
outl(PLX9052_INTCSR_LI2POL, info->plx_regbase + PLX9052_INTCSR);
/* Set /CS and /WRITE of the Xilinx */
- value = inl(info->plx_regbase + PLX9052_CNTRL);
- value |= PLX9052_CNTRL_UIO2_DATA;
- outl(value, info->plx_regbase + PLX9052_CNTRL);
+ val = inl(info->plx_regbase + PLX9052_CNTRL);
+ val |= PLX9052_CNTRL_UIO2_DATA;
+ outl(val, info->plx_regbase + PLX9052_CNTRL);
/* Init Xilinx with CS1 */
inb(xilinx_iobase + 0xC8);
/* Wait until /INIT pin is set */
udelay(20);
- intcsr = inl(info->plx_regbase + PLX9052_INTCSR);
- if (!(intcsr & PLX9052_INTCSR_LI2STAT)) {
+ val = inl(info->plx_regbase + PLX9052_INTCSR);
+ if (!(val & PLX9052_INTCSR_LI2STAT)) {
dev_err(dev->class_dev, "Can't init Xilinx\n");
return -EIO;
}
/* Reset /CS and /WRITE of the Xilinx */
- value = inl(info->plx_regbase + PLX9052_CNTRL);
- value &= ~PLX9052_CNTRL_UIO2_DATA;
- outl(value, info->plx_regbase + PLX9052_CNTRL);
- if (FIRMWARE_NOT_AVAILABLE) {
- dev_err(dev->class_dev,
- "xilinx firmware unavailable due to licensing, aborting");
- return -EIO;
- } else {
- /* Download Xilinx firmware */
- size = (xilinx_firm[0] << 24) + (xilinx_firm[1] << 16) +
- (xilinx_firm[2] << 8) + xilinx_firm[3];
- udelay(10);
+ val = inl(info->plx_regbase + PLX9052_CNTRL);
+ val &= ~PLX9052_CNTRL_UIO2_DATA;
+ outl(val, info->plx_regbase + PLX9052_CNTRL);
- for (idx = 0; idx < size; idx++) {
- outb(xilinx_firm[16 + idx], xilinx_iobase);
- udelay(10);
+ /* Download Xilinx firmware */
+ file_length = (((unsigned int)data[0] & 0xff) << 24) +
+ (((unsigned int)data[1] & 0xff) << 16) +
+ (((unsigned int)data[2] & 0xff) << 8) +
+ ((unsigned int)data[3] & 0xff);
+ udelay(10);
- /* Check if BUSY flag is low */
- if (inl(info->plx_regbase + PLX9052_CNTRL) & PLX9052_CNTRL_UIO1_DATA) {
- dev_err(dev->class_dev,
- "Xilinx is still busy (idx = %d)\n",
- idx);
- return -EIO;
- }
+ for (i = 0; i < file_length; i++) {
+ outb(data[16 + i], xilinx_iobase);
+ udelay(10);
+
+ /* Check if BUSY flag is low */
+ val = inl(info->plx_regbase + PLX9052_CNTRL);
+ if (val & PLX9052_CNTRL_UIO1_DATA) {
+ dev_err(dev->class_dev,
+ "Xilinx is still busy (i = %d)\n", i);
+ return -EIO;
}
}
/* If done flag is high download was successful */
- if (inl(info->plx_regbase + PLX9052_CNTRL) & PLX9052_CNTRL_UIO0_DATA) {
- } else {
+ val = inl(info->plx_regbase + PLX9052_CNTRL);
+ if (!(val & PLX9052_CNTRL_UIO0_DATA)) {
dev_err(dev->class_dev, "DONE flag is not set\n");
dev_err(dev->class_dev, "Download not successful\n");
return -EIO;
}
/* Set /CS and /WRITE */
- value = inl(info->plx_regbase + PLX9052_CNTRL);
- value |= PLX9052_CNTRL_UIO2_DATA;
- outl(value, info->plx_regbase + PLX9052_CNTRL);
+ val = inl(info->plx_regbase + PLX9052_CNTRL);
+ val |= PLX9052_CNTRL_UIO2_DATA;
+ outl(val, info->plx_regbase + PLX9052_CNTRL);
return 0;
}
@@ -431,7 +416,7 @@ static void me4000_reset(struct comedi_device *dev)
val |= PLX9052_CNTRL_PCI_RESET;
outl(val, info->plx_regbase + PLX9052_CNTRL);
val &= ~PLX9052_CNTRL_PCI_RESET;
- outl(val , info->plx_regbase + PLX9052_CNTRL);
+ outl(val, info->plx_regbase + PLX9052_CNTRL);
/* 0x8000 to the DACs means an output voltage of 0V */
for (chan = 0; chan < 4; chan++)
@@ -848,9 +833,6 @@ static int me4000_ai_do_cmd_test(struct comedi_device *dev,
unsigned int scan_ticks;
int err = 0;
- /* Only rounding flags are implemented */
- cmd->flags &= CMDF_ROUND_NEAREST | CMDF_ROUND_UP | CMDF_ROUND_DOWN;
-
/* Round the timer arguments */
ai_round_cmd_args(dev, s, cmd, &init_ticks, &scan_ticks, &chan_ticks);
@@ -1092,8 +1074,6 @@ static irqreturn_t me4000_ai_isr(int irq, void *dev_id)
} else if ((tmp & ME4000_AI_STATUS_BIT_FF_DATA)
&& !(tmp & ME4000_AI_STATUS_BIT_HF_DATA)
&& (tmp & ME4000_AI_STATUS_BIT_EF_DATA)) {
- s->async->events |= COMEDI_CB_BLOCK;
-
c = ME4000_AI_FIFO_COUNT / 2;
} else {
dev_err(dev->class_dev,
@@ -1119,7 +1099,7 @@ static irqreturn_t me4000_ai_isr(int irq, void *dev_id)
lval = inl(dev->iobase + ME4000_AI_DATA_REG) & 0xFFFF;
lval ^= 0x8000;
- if (!comedi_buf_put(s, lval)) {
+ if (!comedi_buf_write_samples(s, &lval, 1)) {
/*
* Buffer overflow, so stop conversion
* and disable all interrupts
@@ -1128,11 +1108,6 @@ static irqreturn_t me4000_ai_isr(int irq, void *dev_id)
tmp &= ~(ME4000_AI_CTRL_BIT_HF_IRQ |
ME4000_AI_CTRL_BIT_SC_IRQ);
outl(tmp, dev->iobase + ME4000_AI_CTRL_REG);
-
- s->async->events |= COMEDI_CB_OVERFLOW;
-
- dev_err(dev->class_dev, "Buffer overflow\n");
-
break;
}
}
@@ -1146,7 +1121,7 @@ static irqreturn_t me4000_ai_isr(int irq, void *dev_id)
if (inl(dev->iobase + ME4000_IRQ_STATUS_REG) &
ME4000_IRQ_STATUS_BIT_SC) {
- s->async->events |= COMEDI_CB_BLOCK | COMEDI_CB_EOA;
+ s->async->events |= COMEDI_CB_EOA;
/*
* Acquisition is complete, so stop
@@ -1164,11 +1139,8 @@ static irqreturn_t me4000_ai_isr(int irq, void *dev_id)
lval = inl(dev->iobase + ME4000_AI_DATA_REG) & 0xFFFF;
lval ^= 0x8000;
- if (!comedi_buf_put(s, lval)) {
- dev_err(dev->class_dev, "Buffer overflow\n");
- s->async->events |= COMEDI_CB_OVERFLOW;
+ if (!comedi_buf_write_samples(s, &lval, 1))
break;
- }
}
/* Work is done, so reset the interrupt */
@@ -1178,8 +1150,7 @@ static irqreturn_t me4000_ai_isr(int irq, void *dev_id)
outl(tmp, dev->iobase + ME4000_AI_CTRL_REG);
}
- if (s->async->events)
- comedi_event(dev, s);
+ comedi_handle_events(dev, s);
return IRQ_HANDLED;
}
@@ -1397,8 +1368,9 @@ static int me4000_auto_attach(struct comedi_device *dev,
if (!info->plx_regbase || !dev->iobase || !info->timer_regbase)
return -ENODEV;
- result = xilinx_download(dev);
- if (result)
+ result = comedi_load_firmware(dev, &pcidev->dev, ME4000_FIRMWARE,
+ me4000_xilinx_download, 0);
+ if (result < 0)
return result;
me4000_reset(dev);
@@ -1449,12 +1421,11 @@ static int me4000_auto_attach(struct comedi_device *dev,
if (thisboard->ao_nchan) {
s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITEABLE | SDF_COMMON | SDF_GROUND;
+ s->subdev_flags = SDF_WRITABLE | SDF_COMMON | SDF_GROUND;
s->n_chan = thisboard->ao_nchan;
s->maxdata = 0xFFFF; /* 16 bit DAC */
s->range_table = &range_bipolar10;
s->insn_write = me4000_ao_insn_write;
- s->insn_read = comedi_readback_insn_read;
result = comedi_alloc_subdev_readback(s);
if (result)
@@ -1561,3 +1532,4 @@ module_comedi_pci_driver(me4000_driver, me4000_pci_driver);
MODULE_AUTHOR("Comedi http://www.comedi.org");
MODULE_DESCRIPTION("Comedi low-level driver");
MODULE_LICENSE("GPL");
+MODULE_FIRMWARE(ME4000_FIRMWARE);
diff --git a/drivers/staging/comedi/drivers/me_daq.c b/drivers/staging/comedi/drivers/me_daq.c
index 00eaaf8ac14..b5278c11e62 100644
--- a/drivers/staging/comedi/drivers/me_daq.c
+++ b/drivers/staging/comedi/drivers/me_daq.c
@@ -511,13 +511,12 @@ static int me_auto_attach(struct comedi_device *dev,
s = &dev->subdevices[1];
if (board->has_ao) {
s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITEABLE | SDF_COMMON;
+ s->subdev_flags = SDF_WRITABLE | SDF_COMMON;
s->n_chan = 4;
s->maxdata = 0x0fff;
s->len_chanlist = 4;
s->range_table = &me_ao_range;
s->insn_write = me_ao_insn_write;
- s->insn_read = comedi_readback_insn_read;
ret = comedi_alloc_subdev_readback(s);
if (ret)
@@ -528,7 +527,7 @@ static int me_auto_attach(struct comedi_device *dev,
s = &dev->subdevices[2];
s->type = COMEDI_SUBD_DIO;
- s->subdev_flags = SDF_READABLE | SDF_WRITEABLE;
+ s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
s->n_chan = 32;
s->maxdata = 1;
s->len_chanlist = 32;
diff --git a/drivers/staging/comedi/drivers/mf6x4.c b/drivers/staging/comedi/drivers/mf6x4.c
index c8d3a22c589..af21bc180c4 100644
--- a/drivers/staging/comedi/drivers/mf6x4.c
+++ b/drivers/staging/comedi/drivers/mf6x4.c
@@ -259,7 +259,6 @@ static int mf6x4_auto_attach(struct comedi_device *dev, unsigned long context)
s->maxdata = 0x3fff; /* 14 bits DAC */
s->range_table = &range_bipolar10;
s->insn_write = mf6x4_ao_insn_write;
- s->insn_read = comedi_readback_insn_read;
ret = comedi_alloc_subdev_readback(s);
if (ret)
diff --git a/drivers/staging/comedi/drivers/mite.c b/drivers/staging/comedi/drivers/mite.c
index 4f7829010a9..ffc9e61d6cd 100644
--- a/drivers/staging/comedi/drivers/mite.c
+++ b/drivers/staging/comedi/drivers/mite.c
@@ -494,13 +494,10 @@ EXPORT_SYMBOL_GPL(mite_bytes_read_from_memory_ub);
unsigned mite_dma_tcr(struct mite_channel *mite_chan)
{
struct mite_struct *mite = mite_chan->mite;
- int tcr;
int lkar;
lkar = readl(mite->mite_io_addr + MITE_LKAR(mite_chan->channel));
- tcr = readl(mite->mite_io_addr + MITE_TCR(mite_chan->channel));
-
- return tcr;
+ return readl(mite->mite_io_addr + MITE_TCR(mite_chan->channel));
}
EXPORT_SYMBOL_GPL(mite_dma_tcr);
@@ -542,7 +539,7 @@ int mite_sync_input_dma(struct mite_channel *mite_chan,
return 0;
comedi_buf_write_free(s, count);
- cfc_inc_scan_progress(s, count);
+ comedi_inc_scan_progress(s, count);
async->events |= COMEDI_CB_BLOCK;
return 0;
}
@@ -553,7 +550,7 @@ int mite_sync_output_dma(struct mite_channel *mite_chan,
{
struct comedi_async *async = s->async;
struct comedi_cmd *cmd = &async->cmd;
- u32 stop_count = cmd->stop_arg * cfc_bytes_per_scan(s);
+ u32 stop_count = cmd->stop_arg * comedi_bytes_per_scan(s);
unsigned int old_alloc_count = async->buf_read_alloc_count;
u32 nbytes_ub, nbytes_lb;
int count;
diff --git a/drivers/staging/comedi/drivers/multiq3.c b/drivers/staging/comedi/drivers/multiq3.c
index f710c8e8132..8471219210b 100644
--- a/drivers/staging/comedi/drivers/multiq3.c
+++ b/drivers/staging/comedi/drivers/multiq3.c
@@ -238,7 +238,6 @@ static int multiq3_attach(struct comedi_device *dev,
s->maxdata = 0xfff;
s->range_table = &range_bipolar5;
s->insn_write = multiq3_ao_insn_write;
- s->insn_read = comedi_readback_insn_read;
ret = comedi_alloc_subdev_readback(s);
if (ret)
diff --git a/drivers/staging/comedi/drivers/ni_6527.c b/drivers/staging/comedi/drivers/ni_6527.c
index 45fb601e408..f99847f3999 100644
--- a/drivers/staging/comedi/drivers/ni_6527.c
+++ b/drivers/staging/comedi/drivers/ni_6527.c
@@ -208,9 +208,8 @@ static irqreturn_t ni6527_interrupt(int irq, void *d)
return IRQ_NONE;
if (status & NI6527_STATUS_EDGE) {
- comedi_buf_put(s, 0);
- s->async->events |= COMEDI_CB_EOS;
- comedi_event(dev, s);
+ comedi_buf_write_samples(s, &s->state, 1);
+ comedi_handle_events(dev, s);
}
writeb(NI6527_CLR_IRQS, dev->mmio + NI6527_CLR_REG);
@@ -238,9 +237,6 @@ static int ni6527_intr_cmdtest(struct comedi_device *dev,
/* Step 2a : make sure trigger sources are unique */
/* Step 2b : and mutually compatible */
- if (err)
- return 2;
-
/* Step 3: check if arguments are trivially valid */
err |= cfc_check_trigger_arg_is(&cmd->start_arg, 0);
diff --git a/drivers/staging/comedi/drivers/ni_65xx.c b/drivers/staging/comedi/drivers/ni_65xx.c
index 3b642861eb3..bcb326e3156 100644
--- a/drivers/staging/comedi/drivers/ni_65xx.c
+++ b/drivers/staging/comedi/drivers/ni_65xx.c
@@ -508,9 +508,9 @@ static irqreturn_t ni_65xx_interrupt(int irq, void *d)
writeb(NI_65XX_CLR_EDGE_INT | NI_65XX_CLR_OVERFLOW_INT,
dev->mmio + NI_65XX_CLR_REG);
- comedi_buf_put(s, 0);
- s->async->events |= COMEDI_CB_EOS;
- comedi_event(dev, s);
+ comedi_buf_write_samples(s, &s->state, 1);
+ comedi_handle_events(dev, s);
+
return IRQ_HANDLED;
}
@@ -534,9 +534,6 @@ static int ni_65xx_intr_cmdtest(struct comedi_device *dev,
/* Step 2a : make sure trigger sources are unique */
/* Step 2b : and mutually compatible */
- if (err)
- return 2;
-
/* Step 3: check if arguments are trivially valid */
err |= cfc_check_trigger_arg_is(&cmd->start_arg, 0);
diff --git a/drivers/staging/comedi/drivers/ni_660x.c b/drivers/staging/comedi/drivers/ni_660x.c
index 5b6794c8232..1e4dd82b12e 100644
--- a/drivers/staging/comedi/drivers/ni_660x.c
+++ b/drivers/staging/comedi/drivers/ni_660x.c
@@ -780,7 +780,7 @@ static void ni_660x_handle_gpct_interrupt(struct comedi_device *dev,
struct ni_gpct *counter = s->private;
ni_tio_handle_interrupt(counter, s);
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
}
static irqreturn_t ni_660x_interrupt(int irq, void *d)
diff --git a/drivers/staging/comedi/drivers/ni_670x.c b/drivers/staging/comedi/drivers/ni_670x.c
index 54721deb80c..c42a81c0bfa 100644
--- a/drivers/staging/comedi/drivers/ni_670x.c
+++ b/drivers/staging/comedi/drivers/ni_670x.c
@@ -228,7 +228,6 @@ static int ni_670x_auto_attach(struct comedi_device *dev,
s->range_table = &range_bipolar10;
}
s->insn_write = ni_670x_ao_insn_write;
- s->insn_read = comedi_readback_insn_read;
ret = comedi_alloc_subdev_readback(s);
if (ret)
diff --git a/drivers/staging/comedi/drivers/ni_at_a2150.c b/drivers/staging/comedi/drivers/ni_at_a2150.c
index 72ec857d073..69e543a0bf2 100644
--- a/drivers/staging/comedi/drivers/ni_at_a2150.c
+++ b/drivers/staging/comedi/drivers/ni_at_a2150.c
@@ -168,7 +168,6 @@ static irqreturn_t a2150_interrupt(int irq, void *d)
struct comedi_cmd *cmd;
unsigned int max_points, num_points, residue, leftover;
unsigned short dpnt;
- static const int sample_size = sizeof(devpriv->dma_buffer[0]);
if (!dev->attached) {
dev_err(dev->class_dev, "premature interrupt\n");
@@ -188,14 +187,14 @@ static irqreturn_t a2150_interrupt(int irq, void *d)
if (status & OVFL_BIT) {
dev_err(dev->class_dev, "fifo overflow\n");
async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA;
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
}
if ((status & DMA_TC_BIT) == 0) {
dev_err(dev->class_dev,
"caught non-dma interrupt? Aborting.\n");
async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA;
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
return IRQ_HANDLED;
}
@@ -206,12 +205,12 @@ static irqreturn_t a2150_interrupt(int irq, void *d)
clear_dma_ff(devpriv->dma);
/* figure out how many points to read */
- max_points = devpriv->dma_transfer_size / sample_size;
+ max_points = comedi_bytes_to_samples(s, devpriv->dma_transfer_size);
/* residue is the number of points left to be done on the dma
* transfer. It should always be zero at this point unless
* the stop_src is set to external triggering.
*/
- residue = get_dma_residue(devpriv->dma) / sample_size;
+ residue = comedi_bytes_to_samples(s, get_dma_residue(devpriv->dma));
num_points = max_points - residue;
if (devpriv->count < num_points && cmd->stop_src == TRIG_COUNT)
num_points = devpriv->count;
@@ -219,7 +218,8 @@ static irqreturn_t a2150_interrupt(int irq, void *d)
/* figure out how many points will be stored next time */
leftover = 0;
if (cmd->stop_src == TRIG_NONE) {
- leftover = devpriv->dma_transfer_size / sample_size;
+ leftover = comedi_bytes_to_samples(s,
+ devpriv->dma_transfer_size);
} else if (devpriv->count > max_points) {
leftover = devpriv->count - max_points;
if (leftover > max_points)
@@ -237,7 +237,7 @@ static irqreturn_t a2150_interrupt(int irq, void *d)
dpnt = devpriv->dma_buffer[i];
/* convert from 2's complement to unsigned coding */
dpnt ^= 0x8000;
- cfc_write_to_buffer(s, dpnt);
+ comedi_buf_write_samples(s, &dpnt, 1);
if (cmd->stop_src == TRIG_COUNT) {
if (--devpriv->count == 0) { /* end of acquisition */
async->events |= COMEDI_CB_EOA;
@@ -248,14 +248,13 @@ static irqreturn_t a2150_interrupt(int irq, void *d)
/* re-enable dma */
if (leftover) {
set_dma_addr(devpriv->dma, virt_to_bus(devpriv->dma_buffer));
- set_dma_count(devpriv->dma, leftover * sample_size);
+ set_dma_count(devpriv->dma,
+ comedi_samples_to_bytes(s, leftover));
enable_dma(devpriv->dma);
}
release_dma_lock(flags);
- async->events |= COMEDI_CB_BLOCK;
-
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
/* clear interrupt */
outw(0x00, dev->iobase + DMA_TC_CLEAR_REG);
diff --git a/drivers/staging/comedi/drivers/ni_at_ao.c b/drivers/staging/comedi/drivers/ni_at_ao.c
index 3e1ce586614..05370a4a74a 100644
--- a/drivers/staging/comedi/drivers/ni_at_ao.c
+++ b/drivers/staging/comedi/drivers/ni_at_ao.c
@@ -244,47 +244,31 @@ static int atao_calib_insn_write(struct comedi_device *dev,
struct comedi_insn *insn,
unsigned int *data)
{
- struct atao_private *devpriv = dev->private;
unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int bitstring;
- unsigned int val;
- int bit;
- if (insn->n == 0)
- return 0;
+ if (insn->n) {
+ unsigned int val = data[insn->n - 1];
+ unsigned int bitstring = ((chan & 0x7) << 8) | val;
+ unsigned int bits;
+ int bit;
- devpriv->caldac[chan] = data[insn->n - 1] & s->maxdata;
+ /* write the channel and last data value to the caldac */
+ /* clock the bitstring to the caldac; MSB -> LSB */
+ for (bit = 1 << 10; bit; bit >>= 1) {
+ bits = (bit & bitstring) ? ATAO_CFG2_SDATA : 0;
- /* write the channel and last data value to the caldac */
- bitstring = ((chan & 0x7) << 8) | devpriv->caldac[chan];
+ outw(bits, dev->iobase + ATAO_CFG2_REG);
+ outw(bits | ATAO_CFG2_SCLK,
+ dev->iobase + ATAO_CFG2_REG);
+ }
- /* clock the bitstring to the caldac; MSB -> LSB */
- for (bit = 1 << 10; bit; bit >>= 1) {
- val = (bit & bitstring) ? ATAO_CFG2_SDATA : 0;
+ /* strobe the caldac to load the value */
+ outw(ATAO_CFG2_CALLD(chan), dev->iobase + ATAO_CFG2_REG);
+ outw(ATAO_CFG2_CALLD_NOP, dev->iobase + ATAO_CFG2_REG);
- outw(val, dev->iobase + ATAO_CFG2_REG);
- outw(val | ATAO_CFG2_SCLK, dev->iobase + ATAO_CFG2_REG);
+ s->readback[chan] = val;
}
- /* strobe the caldac to load the value */
- outw(ATAO_CFG2_CALLD(chan), dev->iobase + ATAO_CFG2_REG);
- outw(ATAO_CFG2_CALLD_NOP, dev->iobase + ATAO_CFG2_REG);
-
- return insn->n;
-}
-
-static int atao_calib_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct atao_private *devpriv = dev->private;
- unsigned int chan = CR_CHAN(insn->chanspec);
- int i;
-
- for (i = 0; i < insn->n; i++)
- data[i] = devpriv->caldac[chan];
-
return insn->n;
}
@@ -344,7 +328,6 @@ static int atao_attach(struct comedi_device *dev, struct comedi_devconfig *it)
s->maxdata = 0x0fff;
s->range_table = it->options[3] ? &range_unipolar10 : &range_bipolar10;
s->insn_write = atao_ao_insn_write;
- s->insn_read = comedi_readback_insn_read;
ret = comedi_alloc_subdev_readback(s);
if (ret)
@@ -366,9 +349,12 @@ static int atao_attach(struct comedi_device *dev, struct comedi_devconfig *it)
s->subdev_flags = SDF_WRITABLE | SDF_INTERNAL;
s->n_chan = (board->n_ao_chans * 2) + 1;
s->maxdata = 0xff;
- s->insn_read = atao_calib_insn_read;
s->insn_write = atao_calib_insn_write;
+ ret = comedi_alloc_subdev_readback(s);
+ if (ret)
+ return ret;
+
/* EEPROM subdevice */
s = &dev->subdevices[3];
s->type = COMEDI_SUBD_UNUSED;
diff --git a/drivers/staging/comedi/drivers/ni_atmio16d.c b/drivers/staging/comedi/drivers/ni_atmio16d.c
index fc3c19de700..c484c89c94b 100644
--- a/drivers/staging/comedi/drivers/ni_atmio16d.c
+++ b/drivers/staging/comedi/drivers/ni_atmio16d.c
@@ -217,10 +217,12 @@ static irqreturn_t atmio16d_interrupt(int irq, void *d)
{
struct comedi_device *dev = d;
struct comedi_subdevice *s = dev->read_subdev;
+ unsigned short val;
- comedi_buf_put(s, inw(dev->iobase + AD_FIFO_REG));
+ val = inw(dev->iobase + AD_FIFO_REG);
+ comedi_buf_write_samples(s, &val, 1);
+ comedi_handle_events(dev, s);
- comedi_event(dev, s);
return IRQ_HANDLED;
}
@@ -298,7 +300,6 @@ static int atmio16d_ai_cmd(struct comedi_device *dev,
* It is still uber-experimental */
reset_counters(dev);
- s->async->cur_chan = 0;
/* check if scanning multiple channels */
if (cmd->chanlist_len < 2) {
@@ -691,7 +692,6 @@ static int atmio16d_attach(struct comedi_device *dev,
break;
}
s->insn_write = atmio16d_ao_insn_write;
- s->insn_read = comedi_readback_insn_read;
ret = comedi_alloc_subdev_readback(s);
if (ret)
diff --git a/drivers/staging/comedi/drivers/ni_labpc.h b/drivers/staging/comedi/drivers/ni_labpc.h
index f6e5cd15a40..ac2c01f9dfd 100644
--- a/drivers/staging/comedi/drivers/ni_labpc.h
+++ b/drivers/staging/comedi/drivers/ni_labpc.h
@@ -37,8 +37,6 @@ struct labpc_boardinfo {
struct labpc_private {
/* number of data points left to be taken */
unsigned long long count;
- /* software copy of analog output values */
- unsigned int ao_value[NUM_AO_CHAN];
/* software copys of bits written to command registers */
unsigned int cmd1;
unsigned int cmd2;
@@ -70,10 +68,6 @@ struct labpc_private {
unsigned int dma_transfer_size;
/* we are using dma/fifo-half-full/etc. */
enum transfer_type current_transfer;
- /* stores contents of board's eeprom */
- unsigned int eeprom_data[EEPROM_SIZE];
- /* stores settings of calibration dacs */
- unsigned int caldac[16];
/*
* function pointers so we can use inb/outb or readb/writeb as
* appropriate
diff --git a/drivers/staging/comedi/drivers/ni_labpc_common.c b/drivers/staging/comedi/drivers/ni_labpc_common.c
index 35bc2c25ddf..d89d5852aee 100644
--- a/drivers/staging/comedi/drivers/ni_labpc_common.c
+++ b/drivers/staging/comedi/drivers/ni_labpc_common.c
@@ -818,7 +818,7 @@ static int labpc_drain_fifo(struct comedi_device *dev)
devpriv->count--;
}
data = labpc_read_adc_fifo(dev);
- cfc_write_to_buffer(dev->read_subdev, data);
+ comedi_buf_write_samples(dev->read_subdev, &data, 1);
devpriv->stat1 = devpriv->read_byte(dev, STAT1_REG);
}
if (i == timeout) {
@@ -876,7 +876,7 @@ static irqreturn_t labpc_interrupt(int irq, void *d)
/* clear error interrupt */
devpriv->write_byte(dev, 0x1, ADC_FIFO_CLEAR_REG);
async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA;
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
dev_err(dev->class_dev, "overrun\n");
return IRQ_HANDLED;
}
@@ -896,7 +896,7 @@ static irqreturn_t labpc_interrupt(int irq, void *d)
/* clear error interrupt */
devpriv->write_byte(dev, 0x1, ADC_FIFO_CLEAR_REG);
async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA;
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
dev_err(dev->class_dev, "overflow\n");
return IRQ_HANDLED;
}
@@ -914,10 +914,22 @@ static irqreturn_t labpc_interrupt(int irq, void *d)
async->events |= COMEDI_CB_EOA;
}
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
return IRQ_HANDLED;
}
+static void labpc_ao_write(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ unsigned int chan, unsigned int val)
+{
+ struct labpc_private *devpriv = dev->private;
+
+ devpriv->write_byte(dev, val & 0xff, DAC_LSB_REG(chan));
+ devpriv->write_byte(dev, (val >> 8) & 0xff, DAC_MSB_REG(chan));
+
+ s->readback[chan] = val;
+}
+
static int labpc_ao_insn_write(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
@@ -927,7 +939,6 @@ static int labpc_ao_insn_write(struct comedi_device *dev,
struct labpc_private *devpriv = dev->private;
int channel, range;
unsigned long flags;
- int lsb, msb;
channel = CR_CHAN(insn->chanspec);
@@ -950,25 +961,7 @@ static int labpc_ao_insn_write(struct comedi_device *dev,
devpriv->write_byte(dev, devpriv->cmd6, CMD6_REG);
}
/* send data */
- lsb = data[0] & 0xff;
- msb = (data[0] >> 8) & 0xff;
- devpriv->write_byte(dev, lsb, DAC_LSB_REG(channel));
- devpriv->write_byte(dev, msb, DAC_MSB_REG(channel));
-
- /* remember value for readback */
- devpriv->ao_value[channel] = data[0];
-
- return 1;
-}
-
-static int labpc_ao_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct labpc_private *devpriv = dev->private;
-
- data[0] = devpriv->ao_value[CR_CHAN(insn->chanspec)];
+ labpc_ao_write(dev, s, channel, data[0]);
return 1;
}
@@ -1085,29 +1078,13 @@ static unsigned int labpc_eeprom_read_status(struct comedi_device *dev)
return value;
}
-static int labpc_eeprom_write(struct comedi_device *dev,
- unsigned int address, unsigned int value)
+static void labpc_eeprom_write(struct comedi_device *dev,
+ unsigned int address, unsigned int value)
{
struct labpc_private *devpriv = dev->private;
const int write_enable_instruction = 0x6;
const int write_instruction = 0x2;
const int write_length = 8; /* 8 bit write lengths to eeprom */
- const int write_in_progress_bit = 0x1;
- const int timeout = 10000;
- int i;
-
- /* make sure there isn't already a write in progress */
- for (i = 0; i < timeout; i++) {
- if ((labpc_eeprom_read_status(dev) & write_in_progress_bit) ==
- 0)
- break;
- }
- if (i == timeout) {
- dev_err(dev->class_dev, "eeprom write timed out\n");
- return -ETIME;
- }
- /* update software copy of eeprom */
- devpriv->eeprom_data[address] = value;
/* enable read/write to eeprom */
devpriv->cmd5 &= ~CMD5_EEPROMCS;
@@ -1140,8 +1117,6 @@ static int labpc_eeprom_write(struct comedi_device *dev,
devpriv->cmd5 &= ~(CMD5_EEPROMCS | CMD5_WRTPRT);
udelay(1);
devpriv->write_byte(dev, devpriv->cmd5, CMD5_REG);
-
- return 0;
}
/* writes to 8 bit calibration dacs */
@@ -1150,10 +1125,6 @@ static void write_caldac(struct comedi_device *dev, unsigned int channel,
{
struct labpc_private *devpriv = dev->private;
- if (value == devpriv->caldac[channel])
- return;
- devpriv->caldac[channel] = value;
-
/* clear caldac load bit and make sure we don't write to eeprom */
devpriv->cmd5 &= ~(CMD5_CALDACLD | CMD5_EEPROMCS | CMD5_WRTPRT);
udelay(1);
@@ -1184,25 +1155,30 @@ static int labpc_calib_insn_write(struct comedi_device *dev,
* Only write the last data value to the caldac. Preceding
* data would be overwritten anyway.
*/
- if (insn->n > 0)
- write_caldac(dev, chan, data[insn->n - 1]);
+ if (insn->n > 0) {
+ unsigned int val = data[insn->n - 1];
+
+ if (s->readback[chan] != val) {
+ write_caldac(dev, chan, val);
+ s->readback[chan] = val;
+ }
+ }
return insn->n;
}
-static int labpc_calib_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+static int labpc_eeprom_ready(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
{
- struct labpc_private *devpriv = dev->private;
- unsigned int chan = CR_CHAN(insn->chanspec);
- int i;
-
- for (i = 0; i < insn->n; i++)
- data[i] = devpriv->caldac[chan];
+ unsigned int status;
- return insn->n;
+ /* make sure there isn't already a write in progress */
+ status = labpc_eeprom_read_status(dev);
+ if ((status & 0x1) == 0)
+ return 0;
+ return -EBUSY;
}
static int labpc_eeprom_insn_write(struct comedi_device *dev,
@@ -1222,25 +1198,15 @@ static int labpc_eeprom_insn_write(struct comedi_device *dev,
* data would be overwritten anyway.
*/
if (insn->n > 0) {
- ret = labpc_eeprom_write(dev, chan, data[insn->n - 1]);
+ unsigned int val = data[insn->n - 1];
+
+ ret = comedi_timeout(dev, s, insn, labpc_eeprom_ready, 0);
if (ret)
return ret;
- }
-
- return insn->n;
-}
-
-static int labpc_eeprom_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct labpc_private *devpriv = dev->private;
- unsigned int chan = CR_CHAN(insn->chanspec);
- int i;
- for (i = 0; i < insn->n; i++)
- data[i] = devpriv->eeprom_data[chan];
+ labpc_eeprom_write(dev, chan, val);
+ s->readback[chan] = val;
+ }
return insn->n;
}
@@ -1309,19 +1275,15 @@ int labpc_common_attach(struct comedi_device *dev,
s->n_chan = NUM_AO_CHAN;
s->maxdata = 0x0fff;
s->range_table = &range_labpc_ao;
- s->insn_read = labpc_ao_insn_read;
s->insn_write = labpc_ao_insn_write;
- /* initialize analog outputs to a known value */
- for (i = 0; i < s->n_chan; i++) {
- short lsb, msb;
+ ret = comedi_alloc_subdev_readback(s);
+ if (ret)
+ return ret;
- devpriv->ao_value[i] = s->maxdata / 2;
- lsb = devpriv->ao_value[i] & 0xff;
- msb = (devpriv->ao_value[i] >> 8) & 0xff;
- devpriv->write_byte(dev, lsb, DAC_LSB_REG(i));
- devpriv->write_byte(dev, msb, DAC_MSB_REG(i));
- }
+ /* initialize analog outputs to a known value */
+ for (i = 0; i < s->n_chan; i++)
+ labpc_ao_write(dev, s, i, s->maxdata / 2);
} else {
s->type = COMEDI_SUBD_UNUSED;
}
@@ -1342,11 +1304,16 @@ int labpc_common_attach(struct comedi_device *dev,
s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_INTERNAL;
s->n_chan = 16;
s->maxdata = 0xff;
- s->insn_read = labpc_calib_insn_read;
s->insn_write = labpc_calib_insn_write;
- for (i = 0; i < s->n_chan; i++)
+ ret = comedi_alloc_subdev_readback(s);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < s->n_chan; i++) {
write_caldac(dev, i, s->maxdata / 2);
+ s->readback[i] = s->maxdata / 2;
+ }
} else {
s->type = COMEDI_SUBD_UNUSED;
}
@@ -1358,11 +1325,14 @@ int labpc_common_attach(struct comedi_device *dev,
s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_INTERNAL;
s->n_chan = EEPROM_SIZE;
s->maxdata = 0xff;
- s->insn_read = labpc_eeprom_insn_read;
s->insn_write = labpc_eeprom_insn_write;
+ ret = comedi_alloc_subdev_readback(s);
+ if (ret)
+ return ret;
+
for (i = 0; i < s->n_chan; i++)
- devpriv->eeprom_data[i] = labpc_eeprom_read(dev, i);
+ s->readback[i] = labpc_eeprom_read(dev, i);
} else {
s->type = COMEDI_SUBD_UNUSED;
}
diff --git a/drivers/staging/comedi/drivers/ni_labpc_isadma.c b/drivers/staging/comedi/drivers/ni_labpc_isadma.c
index 967202e0635..6d386050e59 100644
--- a/drivers/staging/comedi/drivers/ni_labpc_isadma.c
+++ b/drivers/staging/comedi/drivers/ni_labpc_isadma.c
@@ -91,7 +91,6 @@ void labpc_drain_dma(struct comedi_device *dev)
int status;
unsigned long flags;
unsigned int max_points, num_points, residue, leftover;
- int i;
status = devpriv->stat1;
@@ -122,9 +121,7 @@ void labpc_drain_dma(struct comedi_device *dev)
leftover = max_points;
}
- /* write data to comedi buffer */
- for (i = 0; i < num_points; i++)
- cfc_write_to_buffer(s, devpriv->dma_buffer[i]);
+ comedi_buf_write_samples(s, devpriv->dma_buffer, num_points);
if (cmd->stop_src == TRIG_COUNT)
devpriv->count -= num_points;
@@ -133,8 +130,6 @@ void labpc_drain_dma(struct comedi_device *dev)
set_dma_addr(devpriv->dma_chan, devpriv->dma_addr);
set_dma_count(devpriv->dma_chan, leftover * sample_size);
release_dma_lock(flags);
-
- async->events |= COMEDI_CB_BLOCK;
}
EXPORT_SYMBOL_GPL(labpc_drain_dma);
diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
index 320b080149b..11e70173712 100644
--- a/drivers/staging/comedi/drivers/ni_mio_common.c
+++ b/drivers/staging/comedi/drivers/ni_mio_common.c
@@ -686,13 +686,12 @@ static inline void ni_set_ai_dma_channel(struct comedi_device *dev, int channel)
{
unsigned bitfield;
- if (channel >= 0) {
+ if (channel >= 0)
bitfield =
(ni_stc_dma_channel_select_bitfield(channel) <<
AI_DMA_Select_Shift) & AI_DMA_Select_Mask;
- } else {
+ else
bitfield = 0;
- }
ni_set_bitfield(dev, AI_AO_Select, AI_DMA_Select_Mask, bitfield);
}
@@ -701,13 +700,12 @@ static inline void ni_set_ao_dma_channel(struct comedi_device *dev, int channel)
{
unsigned bitfield;
- if (channel >= 0) {
+ if (channel >= 0)
bitfield =
(ni_stc_dma_channel_select_bitfield(channel) <<
AO_DMA_Select_Shift) & AO_DMA_Select_Mask;
- } else {
+ else
bitfield = 0;
- }
ni_set_bitfield(dev, AI_AO_Select, AO_DMA_Select_Mask, bitfield);
}
@@ -1127,31 +1125,18 @@ static void ni_ao_fifo_load(struct comedi_device *dev,
struct comedi_subdevice *s, int n)
{
struct ni_private *devpriv = dev->private;
- struct comedi_async *async = s->async;
- struct comedi_cmd *cmd = &async->cmd;
- int chan;
int i;
unsigned short d;
u32 packed_data;
- int range;
- int err = 1;
- chan = async->cur_chan;
for (i = 0; i < n; i++) {
- err &= comedi_buf_get(s, &d);
- if (err == 0)
- break;
-
- range = CR_RANGE(cmd->chanlist[chan]);
+ comedi_buf_read_samples(s, &d, 1);
if (devpriv->is_6xxx) {
packed_data = d & 0xffff;
/* 6711 only has 16 bit wide ao fifo */
if (!devpriv->is_6711) {
- err &= comedi_buf_get(s, &d);
- if (err == 0)
- break;
- chan++;
+ comedi_buf_read_samples(s, &d, 1);
i++;
packed_data |= (d << 16) & 0xffff0000;
}
@@ -1159,12 +1144,7 @@ static void ni_ao_fifo_load(struct comedi_device *dev,
} else {
ni_writew(dev, d, DAC_FIFO_Data);
}
- chan++;
- chan %= cmd->chanlist_len;
}
- async->cur_chan = chan;
- if (err == 0)
- async->events |= COMEDI_CB_OVERFLOW;
}
/*
@@ -1187,21 +1167,20 @@ static int ni_ao_fifo_half_empty(struct comedi_device *dev,
struct comedi_subdevice *s)
{
const struct ni_board_struct *board = dev->board_ptr;
- int n;
+ unsigned int nbytes;
+ unsigned int nsamples;
- n = comedi_buf_read_n_available(s);
- if (n == 0) {
+ nbytes = comedi_buf_read_n_available(s);
+ if (nbytes == 0) {
s->async->events |= COMEDI_CB_OVERFLOW;
return 0;
}
- n /= sizeof(short);
- if (n > board->ao_fifo_depth / 2)
- n = board->ao_fifo_depth / 2;
-
- ni_ao_fifo_load(dev, s, n);
+ nsamples = comedi_bytes_to_samples(s, nbytes);
+ if (nsamples > board->ao_fifo_depth / 2)
+ nsamples = board->ao_fifo_depth / 2;
- s->async->events |= COMEDI_CB_BLOCK;
+ ni_ao_fifo_load(dev, s, nsamples);
return 1;
}
@@ -1211,7 +1190,8 @@ static int ni_ao_prep_fifo(struct comedi_device *dev,
{
const struct ni_board_struct *board = dev->board_ptr;
struct ni_private *devpriv = dev->private;
- int n;
+ unsigned int nbytes;
+ unsigned int nsamples;
/* reset fifo */
ni_stc_writew(dev, 1, DAC_FIFO_Clear);
@@ -1219,17 +1199,17 @@ static int ni_ao_prep_fifo(struct comedi_device *dev,
ni_ao_win_outl(dev, 0x6, AO_FIFO_Offset_Load_611x);
/* load some data */
- n = comedi_buf_read_n_available(s);
- if (n == 0)
+ nbytes = comedi_buf_read_n_available(s);
+ if (nbytes == 0)
return 0;
- n /= sizeof(short);
- if (n > board->ao_fifo_depth)
- n = board->ao_fifo_depth;
+ nsamples = comedi_bytes_to_samples(s, nbytes);
+ if (nsamples > board->ao_fifo_depth)
+ nsamples = board->ao_fifo_depth;
- ni_ao_fifo_load(dev, s, n);
+ ni_ao_fifo_load(dev, s, nsamples);
- return n;
+ return nsamples;
}
static void ni_ai_fifo_read(struct comedi_device *dev,
@@ -1237,44 +1217,42 @@ static void ni_ai_fifo_read(struct comedi_device *dev,
{
struct ni_private *devpriv = dev->private;
struct comedi_async *async = s->async;
+ u32 dl;
+ unsigned short data;
int i;
if (devpriv->is_611x) {
- unsigned short data[2];
- u32 dl;
-
for (i = 0; i < n / 2; i++) {
dl = ni_readl(dev, ADC_FIFO_Data_611x);
/* This may get the hi/lo data in the wrong order */
- data[0] = (dl >> 16) & 0xffff;
- data[1] = dl & 0xffff;
- cfc_write_array_to_buffer(s, data, sizeof(data));
+ data = (dl >> 16) & 0xffff;
+ comedi_buf_write_samples(s, &data, 1);
+ data = dl & 0xffff;
+ comedi_buf_write_samples(s, &data, 1);
}
/* Check if there's a single sample stuck in the FIFO */
if (n % 2) {
dl = ni_readl(dev, ADC_FIFO_Data_611x);
- data[0] = dl & 0xffff;
- cfc_write_to_buffer(s, data[0]);
+ data = dl & 0xffff;
+ comedi_buf_write_samples(s, &data, 1);
}
} else if (devpriv->is_6143) {
- unsigned short data[2];
- u32 dl;
-
/* This just reads the FIFO assuming the data is present, no checks on the FIFO status are performed */
for (i = 0; i < n / 2; i++) {
dl = ni_readl(dev, AIFIFO_Data_6143);
- data[0] = (dl >> 16) & 0xffff;
- data[1] = dl & 0xffff;
- cfc_write_array_to_buffer(s, data, sizeof(data));
+ data = (dl >> 16) & 0xffff;
+ comedi_buf_write_samples(s, &data, 1);
+ data = dl & 0xffff;
+ comedi_buf_write_samples(s, &data, 1);
}
if (n % 2) {
/* Assume there is a single sample stuck in the FIFO */
/* Get stranded sample into FIFO */
ni_writel(dev, 0x01, AIFIFO_Control_6143);
dl = ni_readl(dev, AIFIFO_Data_6143);
- data[0] = (dl >> 16) & 0xffff;
- cfc_write_to_buffer(s, data[0]);
+ data = (dl >> 16) & 0xffff;
+ comedi_buf_write_samples(s, &data, 1);
}
} else {
if (n > sizeof(devpriv->ai_fifo_buffer) /
@@ -1288,9 +1266,7 @@ static void ni_ai_fifo_read(struct comedi_device *dev,
devpriv->ai_fifo_buffer[i] =
ni_readw(dev, ADC_FIFO_Data_Register);
}
- cfc_write_array_to_buffer(s, devpriv->ai_fifo_buffer,
- n *
- sizeof(devpriv->ai_fifo_buffer[0]));
+ comedi_buf_write_samples(s, devpriv->ai_fifo_buffer, n);
}
}
@@ -1313,8 +1289,8 @@ static void ni_handle_fifo_dregs(struct comedi_device *dev)
{
struct ni_private *devpriv = dev->private;
struct comedi_subdevice *s = dev->read_subdev;
- unsigned short data[2];
u32 dl;
+ unsigned short data;
unsigned short fifo_empty;
int i;
@@ -1324,9 +1300,10 @@ static void ni_handle_fifo_dregs(struct comedi_device *dev)
dl = ni_readl(dev, ADC_FIFO_Data_611x);
/* This may get the hi/lo data in the wrong order */
- data[0] = (dl >> 16);
- data[1] = (dl & 0xffff);
- cfc_write_array_to_buffer(s, data, sizeof(data));
+ data = dl >> 16;
+ comedi_buf_write_samples(s, &data, 1);
+ data = dl & 0xffff;
+ comedi_buf_write_samples(s, &data, 1);
}
} else if (devpriv->is_6143) {
i = 0;
@@ -1334,9 +1311,10 @@ static void ni_handle_fifo_dregs(struct comedi_device *dev)
dl = ni_readl(dev, AIFIFO_Data_6143);
/* This may get the hi/lo data in the wrong order */
- data[0] = (dl >> 16);
- data[1] = (dl & 0xffff);
- cfc_write_array_to_buffer(s, data, sizeof(data));
+ data = dl >> 16;
+ comedi_buf_write_samples(s, &data, 1);
+ data = dl & 0xffff;
+ comedi_buf_write_samples(s, &data, 1);
i += 2;
}
/* Check if stranded sample is present */
@@ -1344,8 +1322,8 @@ static void ni_handle_fifo_dregs(struct comedi_device *dev)
/* Get stranded sample into FIFO */
ni_writel(dev, 0x01, AIFIFO_Control_6143);
dl = ni_readl(dev, AIFIFO_Data_6143);
- data[0] = (dl >> 16) & 0xffff;
- cfc_write_to_buffer(s, data[0]);
+ data = (dl >> 16) & 0xffff;
+ comedi_buf_write_samples(s, &data, 1);
}
} else {
@@ -1364,10 +1342,7 @@ static void ni_handle_fifo_dregs(struct comedi_device *dev)
devpriv->ai_fifo_buffer[i] =
ni_readw(dev, ADC_FIFO_Data_Register);
}
- cfc_write_array_to_buffer(s, devpriv->ai_fifo_buffer,
- i *
- sizeof(devpriv->
- ai_fifo_buffer[0]));
+ comedi_buf_write_samples(s, devpriv->ai_fifo_buffer, i);
}
}
}
@@ -1386,7 +1361,7 @@ static void get_last_sample_611x(struct comedi_device *dev)
if (ni_readb(dev, XXX_Status) & 0x80) {
dl = ni_readl(dev, ADC_FIFO_Data_611x);
data = (dl & 0xffff);
- cfc_write_to_buffer(s, data);
+ comedi_buf_write_samples(s, &data, 1);
}
}
@@ -1408,7 +1383,7 @@ static void get_last_sample_6143(struct comedi_device *dev)
/* This may get the hi/lo data in the wrong order */
data = (dl >> 16) & 0xffff;
- cfc_write_to_buffer(s, data);
+ comedi_buf_write_samples(s, &data, 1);
}
}
@@ -1462,7 +1437,7 @@ static void handle_gpct_interrupt(struct comedi_device *dev,
ni_tio_handle_interrupt(&devpriv->counter_dev->counters[counter_index],
s);
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
#endif
}
@@ -1518,7 +1493,7 @@ static void handle_a_interrupt(struct comedi_device *dev, unsigned short status,
if (comedi_is_subdevice_running(s)) {
s->async->events |=
COMEDI_CB_ERROR | COMEDI_CB_EOA;
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
}
return;
}
@@ -1533,7 +1508,7 @@ static void handle_a_interrupt(struct comedi_device *dev, unsigned short status,
if (status & (AI_Overrun_St | AI_Overflow_St))
s->async->events |= COMEDI_CB_OVERFLOW;
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
return;
}
if (status & AI_SC_TC_St) {
@@ -1559,7 +1534,7 @@ static void handle_a_interrupt(struct comedi_device *dev, unsigned short status,
if ((status & AI_STOP_St))
ni_handle_eos(dev, s);
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
}
static void ack_b_interrupt(struct comedi_device *dev, unsigned short b_status)
@@ -1635,7 +1610,7 @@ static void handle_b_interrupt(struct comedi_device *dev,
}
#endif
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
}
static void ni_ai_munge(struct comedi_device *dev, struct comedi_subdevice *s,
@@ -1645,12 +1620,12 @@ static void ni_ai_munge(struct comedi_device *dev, struct comedi_subdevice *s,
struct ni_private *devpriv = dev->private;
struct comedi_async *async = s->async;
struct comedi_cmd *cmd = &async->cmd;
- unsigned int length = num_bytes / bytes_per_sample(s);
+ unsigned int nsamples = comedi_bytes_to_samples(s, num_bytes);
unsigned short *array = data;
unsigned int *larray = data;
unsigned int i;
- for (i = 0; i < length; i++) {
+ for (i = 0; i < nsamples; i++) {
#ifdef PCIDMA
if (s->subdev_flags & SDF_LSAMPL)
larray[i] = le32_to_cpu(larray[i]);
@@ -2253,9 +2228,6 @@ static int ni_ai_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s,
/* Step 1 : check if triggers are trivially valid */
- if ((cmd->flags & CMDF_WRITE))
- cmd->flags &= ~CMDF_WRITE;
-
err |= cfc_check_trigger_src(&cmd->start_src,
TRIG_NOW | TRIG_INT | TRIG_EXT);
err |= cfc_check_trigger_src(&cmd->scan_begin_src,
@@ -2762,11 +2734,11 @@ static void ni_ao_munge(struct comedi_device *dev, struct comedi_subdevice *s,
unsigned int chan_index)
{
struct comedi_cmd *cmd = &s->async->cmd;
- unsigned int length = num_bytes / bytes_per_sample(s);
+ unsigned int nsamples = comedi_bytes_to_samples(s, num_bytes);
unsigned short *array = data;
unsigned int i;
- for (i = 0; i < length; i++) {
+ for (i = 0; i < nsamples; i++) {
unsigned int range = CR_RANGE(cmd->chanlist[chan_index]);
unsigned short val = array[i];
@@ -2981,12 +2953,15 @@ static int ni_ao_insn_config(struct comedi_device *dev,
{
const struct ni_board_struct *board = dev->board_ptr;
struct ni_private *devpriv = dev->private;
+ unsigned int nbytes;
switch (data[0]) {
case INSN_CONFIG_GET_HARDWARE_BUFFER_SIZE:
switch (data[1]) {
case COMEDI_OUTPUT:
- data[2] = 1 + board->ao_fifo_depth * sizeof(short);
+ nbytes = comedi_samples_to_bytes(s,
+ board->ao_fifo_depth);
+ data[2] = 1 + nbytes;
if (devpriv->mite)
data[2] += devpriv->mite->fifo_size;
break;
@@ -3288,9 +3263,6 @@ static int ni_ao_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s,
/* Step 1 : check if triggers are trivially valid */
- if ((cmd->flags & CMDF_WRITE) == 0)
- cmd->flags |= CMDF_WRITE;
-
err |= cfc_check_trigger_src(&cmd->start_src, TRIG_INT | TRIG_EXT);
err |= cfc_check_trigger_src(&cmd->scan_begin_src,
TRIG_TIMER | TRIG_EXT);
@@ -3515,9 +3487,6 @@ static int ni_cdio_cmdtest(struct comedi_device *dev,
/* Step 2a : make sure trigger sources are unique */
/* Step 2b : and mutually compatible */
- if (err)
- return 2;
-
/* Step 3: check if arguments are trivially valid */
err |= cfc_check_trigger_arg_is(&cmd->start_arg, 0);
@@ -3693,7 +3662,7 @@ static void handle_cdio_interrupt(struct comedi_device *dev)
M_Offset_CDIO_Command);
/* s->async->events |= COMEDI_CB_EOA; */
}
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
}
static int ni_serial_hw_readwrite8(struct comedi_device *dev,
@@ -3976,7 +3945,7 @@ static unsigned ni_gpct_to_stc_register(enum ni_gpct_register reg)
stc_register = Interrupt_B_Enable_Register;
break;
default:
- printk("%s: unhandled register 0x%x in switch.\n",
+ pr_err("%s: unhandled register 0x%x in switch.\n",
__func__, reg);
BUG();
return 0;
@@ -5472,7 +5441,6 @@ static int ni_E_init(struct comedi_device *dev,
s->range_table = board->ao_range_table;
s->insn_config = ni_ao_insn_config;
s->insn_write = ni_ao_insn_write;
- s->insn_read = comedi_readback_insn_read;
ret = comedi_alloc_subdev_readback(s);
if (ret)
diff --git a/drivers/staging/comedi/drivers/ni_pcidio.c b/drivers/staging/comedi/drivers/ni_pcidio.c
index 5252cba82e5..db7e8aac67b 100644
--- a/drivers/staging/comedi/drivers/ni_pcidio.c
+++ b/drivers/staging/comedi/drivers/ni_pcidio.c
@@ -384,11 +384,7 @@ static irqreturn_t nidio_interrupt(int irq, void *d)
struct comedi_subdevice *s = dev->read_subdev;
struct comedi_async *async = s->async;
struct mite_struct *mite = devpriv->mite;
-
- /* int i, j; */
- unsigned int auxdata = 0;
- unsigned short data1 = 0;
- unsigned short data2 = 0;
+ unsigned int auxdata;
int flags;
int status;
int work = 0;
@@ -451,13 +447,9 @@ static irqreturn_t nidio_interrupt(int irq, void *d)
goto out;
}
auxdata = readl(dev->mmio + Group_1_FIFO);
- data1 = auxdata & 0xffff;
- data2 = (auxdata & 0xffff0000) >> 16;
- comedi_buf_put(s, data1);
- comedi_buf_put(s, data2);
+ comedi_buf_write_samples(s, &auxdata, 1);
flags = readb(dev->mmio + Group_1_Flags);
}
- async->events |= COMEDI_CB_BLOCK;
}
if (flags & CountExpired) {
@@ -485,7 +477,7 @@ static irqreturn_t nidio_interrupt(int irq, void *d)
}
out:
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
#if 0
if (!tag)
writeb(0x03, dev->mmio + Master_DMA_And_Interrupt_Control);
diff --git a/drivers/staging/comedi/drivers/ni_stc.h b/drivers/staging/comedi/drivers/ni_stc.h
index 29efce30eb7..bd69c3f0acd 100644
--- a/drivers/staging/comedi/drivers/ni_stc.h
+++ b/drivers/staging/comedi/drivers/ni_stc.h
@@ -334,7 +334,7 @@ static inline unsigned RTSI_Output_Bit(unsigned channel, int is_mseries)
max_channel = 6;
}
if (channel > max_channel) {
- printk("%s: bug, invalid RTSI_channel=%i\n", __func__, channel);
+ pr_err("%s: bug, invalid RTSI_channel=%i\n", __func__, channel);
return 0;
}
return 1 << (base_bit_shift + channel);
@@ -1090,7 +1090,7 @@ static inline int M_Offset_Static_AI_Control(int i)
0x263,
};
if (((unsigned)i) >= ARRAY_SIZE(offset)) {
- printk("%s: invalid channel=%i\n", __func__, i);
+ pr_err("%s: invalid channel=%i\n", __func__, i);
return offset[0];
}
return offset[i];
@@ -1105,7 +1105,7 @@ static inline int M_Offset_AO_Reference_Attenuation(int channel)
0x267
};
if (((unsigned)channel) >= ARRAY_SIZE(offset)) {
- printk("%s: invalid channel=%i\n", __func__, channel);
+ pr_err("%s: invalid channel=%i\n", __func__, channel);
return offset[0];
}
return offset[channel];
@@ -1114,7 +1114,7 @@ static inline int M_Offset_AO_Reference_Attenuation(int channel)
static inline unsigned M_Offset_PFI_Output_Select(unsigned n)
{
if (n < 1 || n > NUM_PFI_OUTPUT_SELECT_REGS) {
- printk("%s: invalid pfi output select register=%i\n",
+ pr_err("%s: invalid pfi output select register=%i\n",
__func__, n);
return M_Offset_PFI_Output_Select_1;
}
@@ -1171,7 +1171,7 @@ static inline unsigned MSeries_PLL_In_Source_Select_RTSI_Bits(unsigned
RTSI_channel)
{
if (RTSI_channel > 7) {
- printk("%s: bug, invalid RTSI_channel=%i\n", __func__,
+ pr_err("%s: bug, invalid RTSI_channel=%i\n", __func__,
RTSI_channel);
return 0;
}
@@ -1192,7 +1192,7 @@ static inline unsigned MSeries_PLL_Divisor_Bits(unsigned divisor)
{
static const unsigned max_divisor = 0x10;
if (divisor < 1 || divisor > max_divisor) {
- printk("%s: bug, invalid divisor=%i\n", __func__, divisor);
+ pr_err("%s: bug, invalid divisor=%i\n", __func__, divisor);
return 0;
}
return (divisor & 0xf) << 8;
@@ -1202,7 +1202,7 @@ static inline unsigned MSeries_PLL_Multiplier_Bits(unsigned multiplier)
{
static const unsigned max_multiplier = 0x100;
if (multiplier < 1 || multiplier > max_multiplier) {
- printk("%s: bug, invalid multiplier=%i\n", __func__,
+ pr_err("%s: bug, invalid multiplier=%i\n", __func__,
multiplier);
return 0;
}
@@ -1464,7 +1464,7 @@ struct ni_private {
unsigned short ai_fifo_buffer[0x2000];
uint8_t eeprom_buffer[M_SERIES_EEPROM_SIZE];
- uint32_t serial_number;
+ __be32 serial_number;
struct mite_struct *mite;
struct mite_channel *ai_mite_chan;
diff --git a/drivers/staging/comedi/drivers/ni_tiocmd.c b/drivers/staging/comedi/drivers/ni_tiocmd.c
index 26e7291c4a5..6037bec77ef 100644
--- a/drivers/staging/comedi/drivers/ni_tiocmd.c
+++ b/drivers/staging/comedi/drivers/ni_tiocmd.c
@@ -371,9 +371,8 @@ static void ni_tio_acknowledge_and_confirm(struct ni_gpct *counter,
of gate interrupt via dma read/write
and report bogus gate errors */
if (counter->counter_dev->variant !=
- ni_gpct_variant_660x) {
+ ni_gpct_variant_660x)
*gate_error = 1;
- }
}
}
if (gxx_status & GI_TC_ERROR(cidx)) {
@@ -450,11 +449,10 @@ void ni_tio_handle_interrupt(struct ni_gpct *counter,
return;
}
gpct_mite_status = mite_get_status(counter->mite_chan);
- if (gpct_mite_status & CHSR_LINKC) {
+ if (gpct_mite_status & CHSR_LINKC)
writel(CHOR_CLRLC,
counter->mite_chan->mite->mite_io_addr +
MITE_CHOR(counter->mite_chan->channel));
- }
mite_sync_input_dma(counter->mite_chan, s);
spin_unlock_irqrestore(&counter->lock, flags);
}
diff --git a/drivers/staging/comedi/drivers/ni_usb6501.c b/drivers/staging/comedi/drivers/ni_usb6501.c
index df7ada8611f..3b5a1b90366 100644
--- a/drivers/staging/comedi/drivers/ni_usb6501.c
+++ b/drivers/staging/comedi/drivers/ni_usb6501.c
@@ -561,7 +561,7 @@ static int ni6501_auto_attach(struct comedi_device *dev,
/* Counter subdevice */
s = &dev->subdevices[1];
s->type = COMEDI_SUBD_COUNTER;
- s->subdev_flags = SDF_READABLE | SDF_WRITEABLE | SDF_LSAMPL;
+ s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_LSAMPL;
s->n_chan = 1;
s->maxdata = 0xffffffff;
s->insn_read = ni6501_cnt_insn_read;
diff --git a/drivers/staging/comedi/drivers/pcl711.c b/drivers/staging/comedi/drivers/pcl711.c
index 47f4887108a..938aebc8e0e 100644
--- a/drivers/staging/comedi/drivers/pcl711.c
+++ b/drivers/staging/comedi/drivers/pcl711.c
@@ -156,7 +156,6 @@ static const struct pcl711_board boardtypes[] = {
};
struct pcl711_private {
- unsigned int ntrig;
unsigned int divisor1;
unsigned int divisor2;
};
@@ -199,7 +198,6 @@ static int pcl711_ai_cancel(struct comedi_device *dev,
static irqreturn_t pcl711_interrupt(int irq, void *d)
{
struct comedi_device *dev = d;
- struct pcl711_private *devpriv = dev->private;
struct comedi_subdevice *s = dev->read_subdev;
struct comedi_cmd *cmd = &s->async->cmd;
unsigned int data;
@@ -213,16 +211,14 @@ static irqreturn_t pcl711_interrupt(int irq, void *d)
outb(PCL711_INT_STAT_CLR, dev->iobase + PCL711_INT_STAT_REG);
- if (comedi_buf_put(s, data) == 0) {
- s->async->events |= COMEDI_CB_OVERFLOW | COMEDI_CB_ERROR;
- } else {
- s->async->events |= COMEDI_CB_BLOCK | COMEDI_CB_EOS;
- if (cmd->stop_src == TRIG_COUNT && !(--devpriv->ntrig)) {
- pcl711_ai_set_mode(dev, PCL711_MODE_SOFTTRIG);
- s->async->events |= COMEDI_CB_EOA;
- }
- }
- comedi_event(dev, s);
+ comedi_buf_write_samples(s, &data, 1);
+
+ if (cmd->stop_src == TRIG_COUNT &&
+ s->async->scans_done >= cmd->stop_arg)
+ s->async->events |= COMEDI_CB_EOA;
+
+ comedi_handle_events(dev, s);
+
return IRQ_HANDLED;
}
@@ -373,14 +369,10 @@ static void pcl711_ai_load_counters(struct comedi_device *dev)
static int pcl711_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
{
- struct pcl711_private *devpriv = dev->private;
struct comedi_cmd *cmd = &s->async->cmd;
pcl711_set_changain(dev, s, cmd->chanlist[0]);
- if (cmd->stop_src == TRIG_COUNT)
- devpriv->ntrig = cmd->stop_arg;
-
if (cmd->scan_begin_src == TRIG_TIMER) {
pcl711_ai_load_counters(dev);
outb(PCL711_INT_STAT_CLR, dev->iobase + PCL711_INT_STAT_REG);
@@ -505,7 +497,6 @@ static int pcl711_attach(struct comedi_device *dev, struct comedi_devconfig *it)
s->maxdata = 0xfff;
s->range_table = &range_bipolar5;
s->insn_write = pcl711_ao_insn_write;
- s->insn_read = comedi_readback_insn_read;
ret = comedi_alloc_subdev_readback(s);
if (ret)
diff --git a/drivers/staging/comedi/drivers/pcl726.c b/drivers/staging/comedi/drivers/pcl726.c
index dc179bd02df..86f713fdf1d 100644
--- a/drivers/staging/comedi/drivers/pcl726.c
+++ b/drivers/staging/comedi/drivers/pcl726.c
@@ -235,9 +235,8 @@ static irqreturn_t pcl726_interrupt(int irq, void *d)
if (devpriv->cmd_running) {
pcl726_intr_cancel(dev, s);
- comedi_buf_put(s, 0);
- s->async->events |= (COMEDI_CB_BLOCK | COMEDI_CB_EOS);
- comedi_event(dev, s);
+ comedi_buf_write_samples(s, &s->state, 1);
+ comedi_handle_events(dev, s);
}
return IRQ_HANDLED;
@@ -377,7 +376,6 @@ static int pcl726_attach(struct comedi_device *dev,
s->maxdata = 0x0fff;
s->range_table_list = devpriv->rangelist;
s->insn_write = pcl726_ao_insn_write;
- s->insn_read = comedi_readback_insn_read;
ret = comedi_alloc_subdev_readback(s);
if (ret)
diff --git a/drivers/staging/comedi/drivers/pcl812.c b/drivers/staging/comedi/drivers/pcl812.c
index fd5ea6e0161..ac243ca5e0f 100644
--- a/drivers/staging/comedi/drivers/pcl812.c
+++ b/drivers/staging/comedi/drivers/pcl812.c
@@ -512,7 +512,6 @@ struct pcl812_private {
unsigned int last_ai_chanspec;
unsigned char mode_reg_int; /* there is stored INT number for some card */
unsigned int ai_poll_ptr; /* how many sampes transfer poll */
- unsigned int ai_act_scan; /* how many scans we finished */
unsigned int dmapages;
unsigned int hwdmasize;
unsigned long dmabuf[2]; /* PTR to DMA buf */
@@ -556,8 +555,8 @@ static void pcl812_ai_setup_dma(struct comedi_device *dev,
/* we use EOS, so adapt DMA buffer to one scan */
if (devpriv->ai_eos) {
- devpriv->dmabytestomove[0] = cfc_bytes_per_scan(s);
- devpriv->dmabytestomove[1] = cfc_bytes_per_scan(s);
+ devpriv->dmabytestomove[0] = comedi_bytes_per_scan(s);
+ devpriv->dmabytestomove[1] = comedi_bytes_per_scan(s);
devpriv->dma_runs_to_end = 1;
} else {
devpriv->dmabytestomove[0] = devpriv->hwdmasize;
@@ -572,7 +571,7 @@ static void pcl812_ai_setup_dma(struct comedi_device *dev,
devpriv->dma_runs_to_end = 1;
} else {
/* how many samples we must transfer? */
- bytes = cmd->stop_arg * cfc_bytes_per_scan(s);
+ bytes = cmd->stop_arg * comedi_bytes_per_scan(s);
/* how many DMA pages we must fill */
devpriv->dma_runs_to_end =
@@ -807,9 +806,7 @@ static int pcl812_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
devpriv->ai_dma = 0;
}
- devpriv->ai_act_scan = 0;
devpriv->ai_poll_ptr = 0;
- s->async->cur_chan = 0;
/* don't we want wake up every scan? */
if (cmd->flags & CMDF_WAKE_EOS) {
@@ -841,21 +838,10 @@ static int pcl812_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
static bool pcl812_ai_next_chan(struct comedi_device *dev,
struct comedi_subdevice *s)
{
- struct pcl812_private *devpriv = dev->private;
struct comedi_cmd *cmd = &s->async->cmd;
- s->async->events |= COMEDI_CB_BLOCK;
-
- s->async->cur_chan++;
- if (s->async->cur_chan >= cmd->chanlist_len) {
- s->async->cur_chan = 0;
- devpriv->ai_act_scan++;
- s->async->events |= COMEDI_CB_EOS;
- }
-
if (cmd->stop_src == TRIG_COUNT &&
- devpriv->ai_act_scan >= cmd->stop_arg) {
- /* all data sampled */
+ s->async->scans_done >= cmd->stop_arg) {
s->async->events |= COMEDI_CB_EOA;
return false;
}
@@ -867,7 +853,9 @@ static void pcl812_handle_eoc(struct comedi_device *dev,
struct comedi_subdevice *s)
{
struct comedi_cmd *cmd = &s->async->cmd;
+ unsigned int chan = s->async->cur_chan;
unsigned int next_chan;
+ unsigned short val;
if (pcl812_ai_eoc(dev, s, NULL, 0)) {
dev_dbg(dev->class_dev, "A/D cmd IRQ without DRDY!\n");
@@ -875,13 +863,12 @@ static void pcl812_handle_eoc(struct comedi_device *dev,
return;
}
- comedi_buf_put(s, pcl812_ai_get_sample(dev, s));
+ val = pcl812_ai_get_sample(dev, s);
+ comedi_buf_write_samples(s, &val, 1);
/* Set up next channel. Added by abbotti 2010-01-20, but untested. */
- next_chan = s->async->cur_chan + 1;
- if (next_chan >= cmd->chanlist_len)
- next_chan = 0;
- if (cmd->chanlist[s->async->cur_chan] != cmd->chanlist[next_chan])
+ next_chan = s->async->cur_chan;
+ if (cmd->chanlist[chan] != cmd->chanlist[next_chan])
pcl812_ai_set_chan_range(dev, cmd->chanlist[next_chan], 0);
pcl812_ai_next_chan(dev, s);
@@ -893,9 +880,11 @@ static void transfer_from_dma_buf(struct comedi_device *dev,
unsigned int bufptr, unsigned int len)
{
unsigned int i;
+ unsigned short val;
for (i = len; i; i--) {
- comedi_buf_put(s, ptr[bufptr++]);
+ val = ptr[bufptr++];
+ comedi_buf_write_samples(s, &val, 1);
if (!pcl812_ai_next_chan(dev, s))
break;
@@ -939,7 +928,7 @@ static irqreturn_t pcl812_interrupt(int irq, void *d)
pcl812_ai_clear_eoc(dev);
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
return IRQ_HANDLED;
}
@@ -1335,7 +1324,6 @@ static int pcl812_attach(struct comedi_device *dev, struct comedi_devconfig *it)
break;
}
s->insn_write = pcl812_ao_insn_write;
- s->insn_read = comedi_readback_insn_read;
ret = comedi_alloc_subdev_readback(s);
if (ret)
diff --git a/drivers/staging/comedi/drivers/pcl816.c b/drivers/staging/comedi/drivers/pcl816.c
index aa648713201..73deb4bd5c9 100644
--- a/drivers/staging/comedi/drivers/pcl816.c
+++ b/drivers/staging/comedi/drivers/pcl816.c
@@ -122,7 +122,6 @@ struct pcl816_private {
int next_dma_buf; /* which DMA buffer will be used next round */
long dma_runs_to_end; /* how many we must permorm DMA transfer to end of record */
unsigned long last_dma_run; /* how many bytes we must transfer on last DMA page */
- int ai_act_scan; /* how many scans we finished */
unsigned int ai_poll_ptr; /* how many sampes transfer poll */
unsigned int divisor1;
unsigned int divisor2;
@@ -160,7 +159,7 @@ static void pcl816_ai_setup_dma(struct comedi_device *dev,
bytes = devpriv->hwdmasize;
if (cmd->stop_src == TRIG_COUNT) {
/* how many */
- bytes = cmd->stop_arg * cfc_bytes_per_scan(s);
+ bytes = cmd->stop_arg * comedi_bytes_per_scan(s);
/* how many DMA pages we must fill */
devpriv->dma_runs_to_end = bytes / devpriv->hwdmasize;
@@ -286,21 +285,10 @@ static int pcl816_ai_eoc(struct comedi_device *dev,
static bool pcl816_ai_next_chan(struct comedi_device *dev,
struct comedi_subdevice *s)
{
- struct pcl816_private *devpriv = dev->private;
struct comedi_cmd *cmd = &s->async->cmd;
- s->async->events |= COMEDI_CB_BLOCK;
-
- s->async->cur_chan++;
- if (s->async->cur_chan >= cmd->chanlist_len) {
- s->async->cur_chan = 0;
- devpriv->ai_act_scan++;
- s->async->events |= COMEDI_CB_EOS;
- }
-
if (cmd->stop_src == TRIG_COUNT &&
- devpriv->ai_act_scan >= cmd->stop_arg) {
- /* all data sampled */
+ s->async->scans_done >= cmd->stop_arg) {
s->async->events |= COMEDI_CB_EOA;
return false;
}
@@ -313,10 +301,12 @@ static void transfer_from_dma_buf(struct comedi_device *dev,
unsigned short *ptr,
unsigned int bufptr, unsigned int len)
{
+ unsigned short val;
int i;
for (i = 0; i < len; i++) {
- comedi_buf_put(s, ptr[bufptr++]);
+ val = ptr[bufptr++];
+ comedi_buf_write_samples(s, &val, 1);
if (!pcl816_ai_next_chan(dev, s))
return;
@@ -355,7 +345,7 @@ static irqreturn_t pcl816_interrupt(int irq, void *d)
pcl816_ai_clear_eoc(dev);
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
return IRQ_HANDLED;
}
@@ -508,8 +498,6 @@ static int pcl816_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
pcl816_ai_setup_chanlist(dev, cmd->chanlist, seglen);
udelay(1);
- devpriv->ai_act_scan = 0;
- s->async->cur_chan = 0;
devpriv->ai_cmd_running = 1;
devpriv->ai_poll_ptr = 0;
devpriv->ai_cmd_canceled = 0;
@@ -566,7 +554,7 @@ static int pcl816_ai_poll(struct comedi_device *dev, struct comedi_subdevice *s)
devpriv->ai_poll_ptr = top1; /* new buffer position */
spin_unlock_irqrestore(&dev->spinlock, flags);
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
return comedi_buf_n_bytes_ready(s);
}
diff --git a/drivers/staging/comedi/drivers/pcl818.c b/drivers/staging/comedi/drivers/pcl818.c
index ac19e83ce62..8edea35532a 100644
--- a/drivers/staging/comedi/drivers/pcl818.c
+++ b/drivers/staging/comedi/drivers/pcl818.c
@@ -313,8 +313,6 @@ struct pcl818_private {
unsigned long last_dma_run; /* how many bytes we must transfer on last DMA page */
unsigned int ns_min; /* manimal allowed delay between samples (in us) for actual card */
int i8253_osc_base; /* 1/frequency of on board oscilator in ns */
- int ai_act_scan; /* how many scans we finished */
- int ai_act_chan; /* actual position in actual scan */
unsigned int act_chanlist[16]; /* MUX setting for actual AI operations */
unsigned int act_chanlist_len; /* how long is actual MUX list */
unsigned int act_chanlist_pos; /* actual position in MUX list */
@@ -352,7 +350,7 @@ static void pcl818_ai_setup_dma(struct comedi_device *dev,
disable_dma(devpriv->dma); /* disable dma */
bytes = devpriv->hwdmasize;
if (cmd->stop_src == TRIG_COUNT) {
- bytes = cmd->stop_arg * cfc_bytes_per_scan(s);
+ bytes = cmd->stop_arg * comedi_bytes_per_scan(s);
devpriv->dma_runs_to_end = bytes / devpriv->hwdmasize;
devpriv->last_dma_run = bytes % devpriv->hwdmasize;
devpriv->dma_runs_to_end--;
@@ -521,21 +519,12 @@ static bool pcl818_ai_next_chan(struct comedi_device *dev,
struct pcl818_private *devpriv = dev->private;
struct comedi_cmd *cmd = &s->async->cmd;
- s->async->events |= COMEDI_CB_BLOCK;
-
devpriv->act_chanlist_pos++;
if (devpriv->act_chanlist_pos >= devpriv->act_chanlist_len)
devpriv->act_chanlist_pos = 0;
- s->async->cur_chan++;
- if (s->async->cur_chan >= cmd->chanlist_len) {
- s->async->cur_chan = 0;
- devpriv->ai_act_scan--;
- s->async->events |= COMEDI_CB_EOS;
- }
-
- if (cmd->stop_src == TRIG_COUNT && devpriv->ai_act_scan == 0) {
- /* all data sampled */
+ if (cmd->stop_src == TRIG_COUNT &&
+ s->async->scans_done >= cmd->stop_arg) {
s->async->events |= COMEDI_CB_EOA;
return false;
}
@@ -560,7 +549,7 @@ static void pcl818_handle_eoc(struct comedi_device *dev,
if (pcl818_ai_dropout(dev, s, chan))
return;
- comedi_buf_put(s, val);
+ comedi_buf_write_samples(s, &val, 1);
pcl818_ai_next_chan(dev, s);
}
@@ -589,7 +578,7 @@ static void pcl818_handle_dma(struct comedi_device *dev,
if (pcl818_ai_dropout(dev, s, chan))
break;
- comedi_buf_put(s, val);
+ comedi_buf_write_samples(s, &val, 1);
if (!pcl818_ai_next_chan(dev, s))
break;
@@ -630,7 +619,7 @@ static void pcl818_handle_fifo(struct comedi_device *dev,
if (pcl818_ai_dropout(dev, s, chan))
break;
- comedi_buf_put(s, val);
+ comedi_buf_write_samples(s, &val, 1);
if (!pcl818_ai_next_chan(dev, s))
break;
@@ -642,6 +631,7 @@ static irqreturn_t pcl818_interrupt(int irq, void *d)
struct comedi_device *dev = d;
struct pcl818_private *devpriv = dev->private;
struct comedi_subdevice *s = dev->read_subdev;
+ struct comedi_cmd *cmd = &s->async->cmd;
if (!dev->attached || !devpriv->ai_cmd_running) {
pcl818_ai_clear_eoc(dev);
@@ -655,7 +645,7 @@ static irqreturn_t pcl818_interrupt(int irq, void *d)
* being reprogrammed while a DMA transfer is in
* progress.
*/
- devpriv->ai_act_scan = 0;
+ s->async->scans_done = cmd->stop_arg;
s->cancel(dev, s);
return IRQ_HANDLED;
}
@@ -669,7 +659,7 @@ static irqreturn_t pcl818_interrupt(int irq, void *d)
pcl818_ai_clear_eoc(dev);
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
return IRQ_HANDLED;
}
@@ -829,8 +819,6 @@ static int pcl818_ai_cmd(struct comedi_device *dev,
pcl818_ai_setup_chanlist(dev, cmd->chanlist, seglen);
devpriv->ai_data_len = s->async->prealloc_bufsz;
- devpriv->ai_act_scan = cmd->stop_arg;
- devpriv->ai_act_chan = 0;
devpriv->ai_cmd_running = 1;
devpriv->ai_cmd_canceled = 0;
devpriv->act_chanlist_pos = 0;
@@ -873,7 +861,8 @@ static int pcl818_ai_cancel(struct comedi_device *dev,
if (devpriv->dma) {
if (cmd->stop_src == TRIG_NONE ||
- (cmd->stop_src == TRIG_COUNT && devpriv->ai_act_scan > 0)) {
+ (cmd->stop_src == TRIG_COUNT &&
+ s->async->scans_done < cmd->stop_arg)) {
if (!devpriv->ai_cmd_canceled) {
/*
* Wait for running dma transfer to end,
@@ -1169,7 +1158,6 @@ static int pcl818_attach(struct comedi_device *dev, struct comedi_devconfig *it)
s->range_table = &range_unknown;
}
s->insn_write = pcl818_ao_insn_write;
- s->insn_read = comedi_readback_insn_read;
ret = comedi_alloc_subdev_readback(s);
if (ret)
diff --git a/drivers/staging/comedi/drivers/pcmmio.c b/drivers/staging/comedi/drivers/pcmmio.c
index fc40ee2b34e..f0059e935da 100644
--- a/drivers/staging/comedi/drivers/pcmmio.c
+++ b/drivers/staging/comedi/drivers/pcmmio.c
@@ -190,7 +190,6 @@ struct pcmmio_private {
spinlock_t pagelock; /* protects the page registers */
spinlock_t spinlock; /* protects the member variables */
unsigned int enabled_mask;
- unsigned int stop_count;
unsigned int active:1;
};
@@ -337,7 +336,6 @@ static void pcmmio_handle_dio_intr(struct comedi_device *dev,
{
struct pcmmio_private *devpriv = dev->private;
struct comedi_cmd *cmd = &s->async->cmd;
- unsigned int oldevents = s->async->events;
unsigned int val = 0;
unsigned long flags;
int i;
@@ -357,31 +355,16 @@ static void pcmmio_handle_dio_intr(struct comedi_device *dev,
val |= (1 << i);
}
- /* Write the scan to the buffer. */
- if (comedi_buf_put(s, val) &&
- comedi_buf_put(s, val >> 16)) {
- s->async->events |= (COMEDI_CB_BLOCK | COMEDI_CB_EOS);
- } else {
- /* Overflow! Stop acquisition!! */
- /* TODO: STOP_ACQUISITION_CALL_HERE!! */
- pcmmio_stop_intr(dev, s);
- }
+ comedi_buf_write_samples(s, &val, 1);
- /* Check for end of acquisition. */
- if (cmd->stop_src == TRIG_COUNT && devpriv->stop_count > 0) {
- devpriv->stop_count--;
- if (devpriv->stop_count == 0) {
- s->async->events |= COMEDI_CB_EOA;
- /* TODO: STOP_ACQUISITION_CALL_HERE!! */
- pcmmio_stop_intr(dev, s);
- }
- }
+ if (cmd->stop_src == TRIG_COUNT &&
+ s->async->scans_done >= cmd->stop_arg)
+ s->async->events |= COMEDI_CB_EOA;
done:
spin_unlock_irqrestore(&devpriv->spinlock, flags);
- if (oldevents != s->async->events)
- comedi_event(dev, s);
+ comedi_handle_events(dev, s);
}
static irqreturn_t interrupt_pcmmio(int irq, void *d)
@@ -481,8 +464,6 @@ static int pcmmio_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
spin_lock_irqsave(&devpriv->spinlock, flags);
devpriv->active = 1;
- devpriv->stop_count = cmd->stop_arg;
-
/* Set up start of acquisition. */
if (cmd->start_src == TRIG_INT)
s->async->inttrig = pcmmio_inttrig_start_intr;
@@ -751,7 +732,6 @@ static int pcmmio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
s->maxdata = 0xffff;
s->range_table = &pcmmio_ao_ranges;
s->insn_write = pcmmio_ao_insn_write;
- s->insn_read = comedi_readback_insn_read;
ret = comedi_alloc_subdev_readback(s);
if (ret)
@@ -774,7 +754,7 @@ static int pcmmio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
s->insn_config = pcmmio_dio_insn_config;
if (dev->irq) {
dev->read_subdev = s;
- s->subdev_flags |= SDF_CMD_READ;
+ s->subdev_flags |= SDF_CMD_READ | SDF_LSAMPL | SDF_PACKED;
s->len_chanlist = s->n_chan;
s->cancel = pcmmio_cancel;
s->do_cmd = pcmmio_cmd;
diff --git a/drivers/staging/comedi/drivers/pcmuio.c b/drivers/staging/comedi/drivers/pcmuio.c
index d4fe2ec25ec..0f5483b6147 100644
--- a/drivers/staging/comedi/drivers/pcmuio.c
+++ b/drivers/staging/comedi/drivers/pcmuio.c
@@ -130,7 +130,6 @@ struct pcmuio_asic {
spinlock_t pagelock; /* protects the page registers */
spinlock_t spinlock; /* protects member variables */
unsigned int enabled_mask;
- unsigned int stop_count;
unsigned int active:1;
};
@@ -317,7 +316,6 @@ static void pcmuio_handle_intr_subdev(struct comedi_device *dev,
int asic = pcmuio_subdevice_to_asic(s);
struct pcmuio_asic *chip = &devpriv->asics[asic];
struct comedi_cmd *cmd = &s->async->cmd;
- unsigned oldevents = s->async->events;
unsigned int val = 0;
unsigned long flags;
unsigned int i;
@@ -337,33 +335,16 @@ static void pcmuio_handle_intr_subdev(struct comedi_device *dev,
val |= (1 << i);
}
- /* Write the scan to the buffer. */
- if (comedi_buf_put(s, val) &&
- comedi_buf_put(s, val >> 16)) {
- s->async->events |= (COMEDI_CB_BLOCK | COMEDI_CB_EOS);
- } else {
- /* Overflow! Stop acquisition!! */
- /* TODO: STOP_ACQUISITION_CALL_HERE!! */
- pcmuio_stop_intr(dev, s);
- }
+ comedi_buf_write_samples(s, &val, 1);
- /* Check for end of acquisition. */
- if (cmd->stop_src == TRIG_COUNT) {
- if (chip->stop_count > 0) {
- chip->stop_count--;
- if (chip->stop_count == 0) {
- s->async->events |= COMEDI_CB_EOA;
- /* TODO: STOP_ACQUISITION_CALL_HERE!! */
- pcmuio_stop_intr(dev, s);
- }
- }
- }
+ if (cmd->stop_src == TRIG_COUNT &&
+ s->async->scans_done >= cmd->stop_arg)
+ s->async->events |= COMEDI_CB_EOA;
done:
spin_unlock_irqrestore(&chip->spinlock, flags);
- if (oldevents != s->async->events)
- comedi_event(dev, s);
+ comedi_handle_events(dev, s);
}
static int pcmuio_handle_asic_interrupt(struct comedi_device *dev, int asic)
@@ -487,8 +468,6 @@ static int pcmuio_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
spin_lock_irqsave(&chip->spinlock, flags);
chip->active = 1;
- chip->stop_count = cmd->stop_arg;
-
/* Set up start of acquisition. */
if (cmd->start_src == TRIG_INT)
s->async->inttrig = pcmuio_inttrig_start_intr;
@@ -614,7 +593,8 @@ static int pcmuio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if ((i == 0 && dev->irq) || (i == 2 && devpriv->irq2)) {
/* setup the interrupt subdevice */
dev->read_subdev = s;
- s->subdev_flags |= SDF_CMD_READ;
+ s->subdev_flags |= SDF_CMD_READ | SDF_LSAMPL |
+ SDF_PACKED;
s->len_chanlist = s->n_chan;
s->cancel = pcmuio_cancel;
s->do_cmd = pcmuio_cmd;
diff --git a/drivers/staging/comedi/drivers/quatech_daqp_cs.c b/drivers/staging/comedi/drivers/quatech_daqp_cs.c
index 6407df0404f..96098110b0b 100644
--- a/drivers/staging/comedi/drivers/quatech_daqp_cs.c
+++ b/drivers/staging/comedi/drivers/quatech_daqp_cs.c
@@ -65,8 +65,6 @@ struct daqp_private {
enum { semaphore, buffer } interrupt_mode;
struct completion eos;
-
- int count;
};
/* The DAQP communicates with the system through a 16 byte I/O window. */
@@ -194,6 +192,7 @@ static enum irqreturn daqp_interrupt(int irq, void *dev_id)
struct comedi_device *dev = dev_id;
struct daqp_private *devpriv = dev->private;
struct comedi_subdevice *s = dev->read_subdev;
+ struct comedi_cmd *cmd = &s->async->cmd;
int loop_limit = 10000;
int status;
@@ -221,18 +220,16 @@ static enum irqreturn daqp_interrupt(int irq, void *dev_id)
data |= inb(dev->iobase + DAQP_FIFO) << 8;
data ^= 0x8000;
- comedi_buf_put(s, data);
+ comedi_buf_write_samples(s, &data, 1);
/* If there's a limit, decrement it
* and stop conversion if zero
*/
- if (devpriv->count > 0) {
- devpriv->count--;
- if (devpriv->count == 0) {
- s->async->events |= COMEDI_CB_EOA;
- break;
- }
+ if (cmd->stop_src == TRIG_COUNT &&
+ s->async->scans_done >= cmd->stop_arg) {
+ s->async->events |= COMEDI_CB_EOA;
+ break;
}
if ((loop_limit--) <= 0)
@@ -245,9 +242,7 @@ static enum irqreturn daqp_interrupt(int irq, void *dev_id)
s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
}
- s->async->events |= COMEDI_CB_BLOCK;
-
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
}
return IRQ_HANDLED;
}
@@ -575,12 +570,16 @@ static int daqp_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
*/
if (cmd->stop_src == TRIG_COUNT) {
- devpriv->count = cmd->stop_arg * cmd->scan_end_arg;
- threshold = 2 * devpriv->count;
- while (threshold > DAQP_FIFO_SIZE * 3 / 4)
- threshold /= 2;
+ unsigned long long nsamples;
+ unsigned long long nbytes;
+
+ nsamples = (unsigned long long)cmd->stop_arg *
+ cmd->scan_end_arg;
+ nbytes = nsamples * comedi_bytes_per_sample(s);
+ while (nbytes > DAQP_FIFO_SIZE * 3 / 4)
+ nbytes /= 2;
+ threshold = nbytes;
} else {
- devpriv->count = -1;
threshold = DAQP_FIFO_SIZE / 2;
}
@@ -736,12 +735,11 @@ static int daqp_auto_attach(struct comedi_device *dev,
s = &dev->subdevices[1];
s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITEABLE;
+ s->subdev_flags = SDF_WRITABLE;
s->n_chan = 2;
s->maxdata = 0x0fff;
s->range_table = &range_bipolar5;
s->insn_write = daqp_ao_insn_write;
- s->insn_read = comedi_readback_insn_read;
ret = comedi_alloc_subdev_readback(s);
if (ret)
@@ -756,7 +754,7 @@ static int daqp_auto_attach(struct comedi_device *dev,
s = &dev->subdevices[3];
s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITEABLE;
+ s->subdev_flags = SDF_WRITABLE;
s->n_chan = 1;
s->maxdata = 1;
s->insn_bits = daqp_do_insn_bits;
diff --git a/drivers/staging/comedi/drivers/rtd520.c b/drivers/staging/comedi/drivers/rtd520.c
index 7d4cb140959..581aa58d9c0 100644
--- a/drivers/staging/comedi/drivers/rtd520.c
+++ b/drivers/staging/comedi/drivers/rtd520.c
@@ -379,7 +379,6 @@ struct rtd_private {
long ai_count; /* total transfer size (samples) */
int xfer_count; /* # to transfer data. 0->1/2FIFO */
int flags; /* flag event modes */
- DECLARE_BITMAP(chan_is_bipolar, RTD_MAX_CHANLIST);
unsigned fifosz;
};
@@ -438,7 +437,6 @@ static unsigned short rtd_convert_chan_gain(struct comedi_device *dev,
unsigned int chanspec, int index)
{
const struct rtd_boardinfo *board = dev->board_ptr;
- struct rtd_private *devpriv = dev->private;
unsigned int chan = CR_CHAN(chanspec);
unsigned int range = CR_RANGE(chanspec);
unsigned int aref = CR_AREF(chanspec);
@@ -451,17 +449,14 @@ static unsigned short rtd_convert_chan_gain(struct comedi_device *dev,
/* +-5 range */
r |= 0x000;
r |= (range & 0x7) << 4;
- __set_bit(index, devpriv->chan_is_bipolar);
} else if (range < board->range_uni10) {
/* +-10 range */
r |= 0x100;
r |= ((range - board->range_bip10) & 0x7) << 4;
- __set_bit(index, devpriv->chan_is_bipolar);
} else {
/* +10 range */
r |= 0x200;
r |= ((range - board->range_uni10) & 0x7) << 4;
- __clear_bit(index, devpriv->chan_is_bipolar);
}
switch (aref) {
@@ -561,6 +556,7 @@ static int rtd_ai_rinsn(struct comedi_device *dev,
unsigned int *data)
{
struct rtd_private *devpriv = dev->private;
+ unsigned int range = CR_RANGE(insn->chanspec);
int ret;
int n;
@@ -586,9 +582,11 @@ static int rtd_ai_rinsn(struct comedi_device *dev,
/* read data */
d = readw(devpriv->las1 + LAS1_ADC_FIFO);
d = d >> 3; /* low 3 bits are marker lines */
- if (test_bit(0, devpriv->chan_is_bipolar))
- /* convert to comedi unsigned data */
+
+ /* convert bipolar data to comedi unsigned data */
+ if (comedi_range_is_bipolar(s, range))
d = comedi_offset_munge(s, d);
+
data[n] = d & s->maxdata;
}
@@ -606,9 +604,12 @@ static int ai_read_n(struct comedi_device *dev, struct comedi_subdevice *s,
int count)
{
struct rtd_private *devpriv = dev->private;
+ struct comedi_async *async = s->async;
+ struct comedi_cmd *cmd = &async->cmd;
int ii;
for (ii = 0; ii < count; ii++) {
+ unsigned int range = CR_RANGE(cmd->chanlist[async->cur_chan]);
unsigned short d;
if (0 == devpriv->ai_count) { /* done */
@@ -618,41 +619,13 @@ static int ai_read_n(struct comedi_device *dev, struct comedi_subdevice *s,
d = readw(devpriv->las1 + LAS1_ADC_FIFO);
d = d >> 3; /* low 3 bits are marker lines */
- if (test_bit(s->async->cur_chan, devpriv->chan_is_bipolar))
- /* convert to comedi unsigned data */
- d = comedi_offset_munge(s, d);
- d &= s->maxdata;
-
- if (!comedi_buf_put(s, d))
- return -1;
-
- if (devpriv->ai_count > 0) /* < 0, means read forever */
- devpriv->ai_count--;
- }
- return 0;
-}
-
-/*
- unknown amout of data is waiting in fifo.
-*/
-static int ai_read_dregs(struct comedi_device *dev, struct comedi_subdevice *s)
-{
- struct rtd_private *devpriv = dev->private;
-
- while (readl(dev->mmio + LAS0_ADC) & FS_ADC_NOT_EMPTY) {
- unsigned short d = readw(devpriv->las1 + LAS1_ADC_FIFO);
-
- if (0 == devpriv->ai_count) { /* done */
- continue; /* read rest */
- }
- d = d >> 3; /* low 3 bits are marker lines */
- if (test_bit(s->async->cur_chan, devpriv->chan_is_bipolar))
- /* convert to comedi unsigned data */
+ /* convert bipolar data to comedi unsigned data */
+ if (comedi_range_is_bipolar(s, range))
d = comedi_offset_munge(s, d);
d &= s->maxdata;
- if (!comedi_buf_put(s, d))
+ if (!comedi_buf_write_samples(s, &d, 1))
return -1;
if (devpriv->ai_count > 0) /* < 0, means read forever */
@@ -703,8 +676,6 @@ static irqreturn_t rtd_interrupt(int irq, void *d)
if (0 == devpriv->ai_count)
goto xfer_done;
-
- comedi_event(dev, s);
} else if (devpriv->xfer_count > 0) {
if (fifo_status & FS_ADC_NOT_EMPTY) {
/* FIFO not empty */
@@ -713,8 +684,6 @@ static irqreturn_t rtd_interrupt(int irq, void *d)
if (0 == devpriv->ai_count)
goto xfer_done;
-
- comedi_event(dev, s);
}
}
}
@@ -726,28 +695,16 @@ static irqreturn_t rtd_interrupt(int irq, void *d)
/* clear the interrupt */
writew(status, dev->mmio + LAS0_CLEAR);
readw(dev->mmio + LAS0_CLEAR);
+
+ comedi_handle_events(dev, s);
+
return IRQ_HANDLED;
xfer_abort:
- writel(0, dev->mmio + LAS0_ADC_FIFO_CLEAR);
s->async->events |= COMEDI_CB_ERROR;
- devpriv->ai_count = 0; /* stop and don't transfer any more */
- /* fall into xfer_done */
xfer_done:
- /* pacer stop source: SOFTWARE */
- writel(0, dev->mmio + LAS0_PACER_STOP);
- writel(0, dev->mmio + LAS0_PACER); /* stop pacer */
- writel(0, dev->mmio + LAS0_ADC_CONVERSION);
- writew(0, dev->mmio + LAS0_IT);
-
- if (devpriv->ai_count > 0) { /* there shouldn't be anything left */
- fifo_status = readl(dev->mmio + LAS0_ADC);
- ai_read_dregs(dev, s); /* read anything left in FIFO */
- }
-
- s->async->events |= COMEDI_CB_EOA; /* signal end to comedi */
- comedi_event(dev, s);
+ s->async->events |= COMEDI_CB_EOA;
/* clear the interrupt */
status = readw(dev->mmio + LAS0_IT);
@@ -757,6 +714,8 @@ xfer_done:
fifo_status = readl(dev->mmio + LAS0_ADC);
overrun = readl(dev->mmio + LAS0_OVERRUN) & 0xffff;
+ comedi_handle_events(dev, s);
+
return IRQ_HANDLED;
}
@@ -1083,6 +1042,7 @@ static int rtd_ai_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
devpriv->ai_count = 0; /* stop and don't transfer any more */
status = readw(dev->mmio + LAS0_IT);
overrun = readl(dev->mmio + LAS0_OVERRUN) & 0xffff;
+ writel(0, dev->mmio + LAS0_ADC_FIFO_CLEAR);
return 0;
}
@@ -1303,7 +1263,6 @@ static int rtd_auto_attach(struct comedi_device *dev,
s->maxdata = 0x0fff;
s->range_table = &rtd_ao_range;
s->insn_write = rtd_ao_winsn;
- s->insn_read = comedi_readback_insn_read;
ret = comedi_alloc_subdev_readback(s);
if (ret)
diff --git a/drivers/staging/comedi/drivers/rti800.c b/drivers/staging/comedi/drivers/rti800.c
index e3d9f44cefb..67b4b378bd0 100644
--- a/drivers/staging/comedi/drivers/rti800.c
+++ b/drivers/staging/comedi/drivers/rti800.c
@@ -313,7 +313,6 @@ static int rti800_attach(struct comedi_device *dev, struct comedi_devconfig *it)
? rti800_ao_ranges[it->options[7]]
: &range_unknown;
s->insn_write = rti800_ao_insn_write;
- s->insn_read = comedi_readback_insn_read;
ret = comedi_alloc_subdev_readback(s);
if (ret)
diff --git a/drivers/staging/comedi/drivers/rti802.c b/drivers/staging/comedi/drivers/rti802.c
index c81b01c40f1..96c3974207a 100644
--- a/drivers/staging/comedi/drivers/rti802.c
+++ b/drivers/staging/comedi/drivers/rti802.c
@@ -100,7 +100,6 @@ static int rti802_attach(struct comedi_device *dev, struct comedi_devconfig *it)
s->maxdata = 0xfff;
s->n_chan = 8;
s->insn_write = rti802_ao_insn_write;
- s->insn_read = comedi_readback_insn_read;
ret = comedi_alloc_subdev_readback(s);
if (ret)
diff --git a/drivers/staging/comedi/drivers/s526.c b/drivers/staging/comedi/drivers/s526.c
index 75872c6aec2..6f3e8a08e75 100644
--- a/drivers/staging/comedi/drivers/s526.c
+++ b/drivers/staging/comedi/drivers/s526.c
@@ -583,7 +583,6 @@ static int s526_attach(struct comedi_device *dev, struct comedi_devconfig *it)
s->maxdata = 0xffff;
s->range_table = &range_bipolar10;
s->insn_write = s526_ao_insn_write;
- s->insn_read = comedi_readback_insn_read;
ret = comedi_alloc_subdev_readback(s);
if (ret)
diff --git a/drivers/staging/comedi/drivers/s626.c b/drivers/staging/comedi/drivers/s626.c
index 0e7621e890c..14932c5f379 100644
--- a/drivers/staging/comedi/drivers/s626.c
+++ b/drivers/staging/comedi/drivers/s626.c
@@ -78,7 +78,6 @@ struct s626_buffer_dma {
struct s626_private {
uint8_t ai_cmd_running; /* ai_cmd is running */
- int ai_sample_count; /* number of samples to acquire */
unsigned int ai_sample_timer; /* time between samples in
* units of the timer */
int ai_convert_count; /* conversion counter */
@@ -1480,7 +1479,6 @@ static bool s626_handle_eos_interrupt(struct comedi_device *dev)
* from the final ADC of the previous poll list scan.
*/
uint32_t *readaddr = (uint32_t *)devpriv->ana_buf.logical_base + 1;
- bool finished = false;
int i;
/* get the data and hand it over to comedi */
@@ -1494,36 +1492,21 @@ static bool s626_handle_eos_interrupt(struct comedi_device *dev)
tempdata = s626_ai_reg_to_uint(*readaddr);
readaddr++;
- /* put data into read buffer */
- cfc_write_to_buffer(s, tempdata);
+ comedi_buf_write_samples(s, &tempdata, 1);
}
- /* end of scan occurs */
- async->events |= COMEDI_CB_EOS;
+ if (cmd->stop_src == TRIG_COUNT && async->scans_done >= cmd->stop_arg)
+ async->events |= COMEDI_CB_EOA;
- if (cmd->stop_src == TRIG_COUNT) {
- devpriv->ai_sample_count--;
- if (devpriv->ai_sample_count <= 0) {
- devpriv->ai_cmd_running = 0;
-
- /* Stop RPS program */
- s626_mc_disable(dev, S626_MC1_ERPS1, S626_P_MC1);
-
- /* send end of acquisition */
- async->events |= COMEDI_CB_EOA;
-
- /* disable master interrupt */
- finished = true;
- }
- }
+ if (async->events & COMEDI_CB_CANCEL_MASK)
+ devpriv->ai_cmd_running = 0;
if (devpriv->ai_cmd_running && cmd->scan_begin_src == TRIG_EXT)
s626_dio_set_irq(dev, cmd->scan_begin_arg);
- /* tell comedi that data is there */
- comedi_event(dev, s);
+ comedi_handle_events(dev, s);
- return finished;
+ return !devpriv->ai_cmd_running;
}
static irqreturn_t s626_irq_handler(int irq, void *d)
@@ -1970,13 +1953,13 @@ static int s626_ns_to_timer(unsigned int *nanosec, unsigned int flags)
switch (flags & CMDF_ROUND_MASK) {
case CMDF_ROUND_NEAREST:
default:
- divider = (*nanosec + base / 2) / base;
+ divider = DIV_ROUND_CLOSEST(*nanosec, base);
break;
case CMDF_ROUND_DOWN:
divider = (*nanosec) / base;
break;
case CMDF_ROUND_UP:
- divider = (*nanosec + base - 1) / base;
+ divider = DIV_ROUND_UP(*nanosec, base);
break;
}
@@ -2102,8 +2085,6 @@ static int s626_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
break;
}
- devpriv->ai_sample_count = cmd->stop_arg;
-
s626_reset_adc(dev, ppl);
switch (cmd->start_src) {
@@ -2820,7 +2801,6 @@ static int s626_auto_attach(struct comedi_device *dev,
s->maxdata = 0x3fff;
s->range_table = &range_bipolar10;
s->insn_write = s626_ao_insn_write;
- s->insn_read = comedi_readback_insn_read;
ret = comedi_alloc_subdev_readback(s);
if (ret)
diff --git a/drivers/staging/comedi/drivers/serial2002.c b/drivers/staging/comedi/drivers/serial2002.c
index 167f82418cb..71226ee9064 100644
--- a/drivers/staging/comedi/drivers/serial2002.c
+++ b/drivers/staging/comedi/drivers/serial2002.c
@@ -742,7 +742,7 @@ static int serial2002_attach(struct comedi_device *dev,
/* digital output subdevice */
s = &dev->subdevices[1];
s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITEABLE;
+ s->subdev_flags = SDF_WRITABLE;
s->n_chan = 0;
s->maxdata = 1;
s->range_table = &range_digital;
@@ -760,7 +760,7 @@ static int serial2002_attach(struct comedi_device *dev,
/* analog output subdevice */
s = &dev->subdevices[3];
s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITEABLE;
+ s->subdev_flags = SDF_WRITABLE;
s->n_chan = 0;
s->maxdata = 1;
s->range_table = NULL;
diff --git a/drivers/staging/comedi/drivers/usbdux.c b/drivers/staging/comedi/drivers/usbdux.c
index 5adbfedf780..4737dbf8e01 100644
--- a/drivers/staging/comedi/drivers/usbdux.c
+++ b/drivers/staging/comedi/drivers/usbdux.c
@@ -1,37 +1,34 @@
/*
- comedi/drivers/usbdux.c
- Copyright (C) 2003-2007 Bernd Porr, Bernd.Porr@f2s.com
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
+ * usbdux.c
+ * Copyright (C) 2003-2014 Bernd Porr, mail@berndporr.me.uk
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
*/
+
/*
-Driver: usbdux
-Description: University of Stirling USB DAQ & INCITE Technology Limited
-Devices: [ITL] USB-DUX (usbdux.o)
-Author: Bernd Porr <BerndPorr@f2s.com>
-Updated: 8 Dec 2008
-Status: Stable
-Configuration options:
- You have to upload firmware with the -i option. The
- firmware is usually installed under /usr/share/usb or
- /usr/local/share/usb or /lib/firmware.
-
-Connection scheme for the counter at the digital port:
- 0=/CLK0, 1=UP/DOWN0, 2=RESET0, 4=/CLK1, 5=UP/DOWN1, 6=RESET1.
- The sampling rate of the counter is approximately 500Hz.
-
-Please note that under USB2.0 the length of the channel list determines
-the max sampling rate. If you sample only one channel you get 8kHz
-sampling rate. If you sample two channels you get 4kHz and so on.
-*/
+ * Driver: usbdux
+ * Description: University of Stirling USB DAQ & INCITE Technology Limited
+ * Devices: (ITL) USB-DUX [usbdux]
+ * Author: Bernd Porr <mail@berndporr.me.uk>
+ * Updated: 10 Oct 2014
+ * Status: Stable
+ * Connection scheme for the counter at the digital port:
+ * 0=/CLK0, 1=UP/DOWN0, 2=RESET0, 4=/CLK1, 5=UP/DOWN1, 6=RESET1.
+ * The sampling rate of the counter is approximately 500Hz.
+ *
+ * Note that under USB2.0 the length of the channel list determines
+ * the max sampling rate. If you sample only one channel you get 8kHz
+ * sampling rate. If you sample two channels you get 4kHz and so on.
+ */
+
/*
* I must give credit here to Chris Baugher who
* wrote the driver for AT-MIO-16d. I used some parts of this
@@ -205,9 +202,6 @@ struct usbdux_private {
unsigned int ao_cmd_running:1;
unsigned int pwm_cmd_running:1;
- /* number of samples to acquire */
- int ai_sample_count;
- int ao_sample_count;
/* time between samples in units of the timer */
unsigned int ai_timer;
unsigned int ao_timer;
@@ -253,128 +247,107 @@ static int usbdux_ai_cancel(struct comedi_device *dev,
return 0;
}
-/* analogue IN - interrupt service routine */
+static void usbduxsub_ai_handle_urb(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct urb *urb)
+{
+ struct usbdux_private *devpriv = dev->private;
+ struct comedi_async *async = s->async;
+ struct comedi_cmd *cmd = &async->cmd;
+ int ret;
+ int i;
+
+ devpriv->ai_counter--;
+ if (devpriv->ai_counter == 0) {
+ devpriv->ai_counter = devpriv->ai_timer;
+
+ /* get the data from the USB bus and hand it over to comedi */
+ for (i = 0; i < cmd->chanlist_len; i++) {
+ unsigned int range = CR_RANGE(cmd->chanlist[i]);
+ uint16_t val = le16_to_cpu(devpriv->in_buf[i]);
+
+ /* bipolar data is two's-complement */
+ if (comedi_range_is_bipolar(s, range))
+ val ^= ((s->maxdata + 1) >> 1);
+
+ /* transfer data */
+ if (!comedi_buf_write_samples(s, &val, 1))
+ return;
+ }
+
+ if (cmd->stop_src == TRIG_COUNT &&
+ async->scans_done >= cmd->stop_arg)
+ async->events |= COMEDI_CB_EOA;
+ }
+
+ /* if command is still running, resubmit urb */
+ if (!(async->events & COMEDI_CB_CANCEL_MASK)) {
+ urb->dev = comedi_to_usb_dev(dev);
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+ if (ret < 0) {
+ dev_err(dev->class_dev,
+ "urb resubmit failed in int-context! err=%d\n",
+ ret);
+ if (ret == -EL2NSYNC)
+ dev_err(dev->class_dev,
+ "buggy USB host controller or bug in IRQ handler!\n");
+ async->events |= COMEDI_CB_ERROR;
+ }
+ }
+}
+
static void usbduxsub_ai_isoc_irq(struct urb *urb)
{
struct comedi_device *dev = urb->context;
struct comedi_subdevice *s = dev->read_subdev;
+ struct comedi_async *async = s->async;
struct usbdux_private *devpriv = dev->private;
- struct comedi_cmd *cmd = &s->async->cmd;
- int i, err;
- /* first we test if something unusual has just happened */
+ /* exit if not running a command, do not resubmit urb */
+ if (!devpriv->ai_cmd_running)
+ return;
+
switch (urb->status) {
case 0:
/* copy the result in the transfer buffer */
memcpy(devpriv->in_buf, urb->transfer_buffer, SIZEINBUF);
+ usbduxsub_ai_handle_urb(dev, s, urb);
break;
+
case -EILSEQ:
- /* error in the ISOchronous data */
- /* we don't copy the data into the transfer buffer */
- /* and recycle the last data byte */
+ /*
+ * error in the ISOchronous data
+ * we don't copy the data into the transfer buffer
+ * and recycle the last data byte
+ */
dev_dbg(dev->class_dev, "CRC error in ISO IN stream\n");
+ usbduxsub_ai_handle_urb(dev, s, urb);
break;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
case -ECONNABORTED:
- /* happens after an unlink command */
- if (devpriv->ai_cmd_running) {
- s->async->events |= COMEDI_CB_EOA;
- s->async->events |= COMEDI_CB_ERROR;
- comedi_event(dev, s);
- /* stop the transfer w/o unlink */
- usbdux_ai_stop(dev, 0);
- }
- return;
+ /* after an unlink command, unplug, ... etc */
+ async->events |= COMEDI_CB_ERROR;
+ break;
default:
- /* a real error on the bus */
- /* pass error to comedi if we are really running a command */
- if (devpriv->ai_cmd_running) {
- dev_err(dev->class_dev,
- "Non-zero urb status received in ai intr context: %d\n",
- urb->status);
- s->async->events |= COMEDI_CB_EOA;
- s->async->events |= COMEDI_CB_ERROR;
- comedi_event(dev, s);
- /* don't do an unlink here */
- usbdux_ai_stop(dev, 0);
- }
- return;
+ /* a real error */
+ dev_err(dev->class_dev,
+ "Non-zero urb status received in ai intr context: %d\n",
+ urb->status);
+ async->events |= COMEDI_CB_ERROR;
+ break;
}
/*
- * at this point we are reasonably sure that nothing dodgy has happened
- * are we running a command?
+ * comedi_handle_events() cannot be used in this driver. The (*cancel)
+ * operation would unlink the urb.
*/
- if (unlikely(!devpriv->ai_cmd_running)) {
- /*
- * not running a command, do not continue execution if no
- * asynchronous command is running in particular not resubmit
- */
- return;
- }
-
- urb->dev = comedi_to_usb_dev(dev);
-
- /* resubmit the urb */
- err = usb_submit_urb(urb, GFP_ATOMIC);
- if (unlikely(err < 0)) {
- dev_err(dev->class_dev,
- "urb resubmit failed in int-context! err=%d\n", err);
- if (err == -EL2NSYNC)
- dev_err(dev->class_dev,
- "buggy USB host controller or bug in IRQ handler!\n");
- s->async->events |= COMEDI_CB_EOA;
- s->async->events |= COMEDI_CB_ERROR;
- comedi_event(dev, s);
- /* don't do an unlink here */
+ if (async->events & COMEDI_CB_CANCEL_MASK)
usbdux_ai_stop(dev, 0);
- return;
- }
-
- devpriv->ai_counter--;
- if (likely(devpriv->ai_counter > 0))
- return;
-
- /* timer zero, transfer measurements to comedi */
- devpriv->ai_counter = devpriv->ai_timer;
-
- /* test, if we transmit only a fixed number of samples */
- if (cmd->stop_src == TRIG_COUNT) {
- /* not continuous, fixed number of samples */
- devpriv->ai_sample_count--;
- /* all samples received? */
- if (devpriv->ai_sample_count < 0) {
- /* prevent a resubmit next time */
- usbdux_ai_stop(dev, 0);
- /* say comedi that the acquistion is over */
- s->async->events |= COMEDI_CB_EOA;
- comedi_event(dev, s);
- return;
- }
- }
- /* get the data from the USB bus and hand it over to comedi */
- for (i = 0; i < cmd->chanlist_len; i++) {
- unsigned int range = CR_RANGE(cmd->chanlist[i]);
- uint16_t val = le16_to_cpu(devpriv->in_buf[i]);
-
- /* bipolar data is two's-complement */
- if (comedi_range_is_bipolar(s, range))
- val ^= ((s->maxdata + 1) >> 1);
- /* transfer data */
- err = comedi_buf_put(s, val);
- if (unlikely(err == 0)) {
- /* buffer overflow */
- usbdux_ai_stop(dev, 0);
- return;
- }
- }
- /* tell comedi that data is there */
- s->async->events |= COMEDI_CB_BLOCK | COMEDI_CB_EOS;
comedi_event(dev, s);
}
@@ -402,71 +375,25 @@ static int usbdux_ao_cancel(struct comedi_device *dev,
return 0;
}
-static void usbduxsub_ao_isoc_irq(struct urb *urb)
+static void usbduxsub_ao_handle_urb(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct urb *urb)
{
- struct comedi_device *dev = urb->context;
- struct comedi_subdevice *s = dev->write_subdev;
struct usbdux_private *devpriv = dev->private;
- struct comedi_cmd *cmd = &s->async->cmd;
+ struct comedi_async *async = s->async;
+ struct comedi_cmd *cmd = &async->cmd;
uint8_t *datap;
int ret;
int i;
- switch (urb->status) {
- case 0:
- /* success */
- break;
-
- case -ECONNRESET:
- case -ENOENT:
- case -ESHUTDOWN:
- case -ECONNABORTED:
- /* after an unlink command, unplug, ... etc */
- /* no unlink needed here. Already shutting down. */
- if (devpriv->ao_cmd_running) {
- s->async->events |= COMEDI_CB_EOA;
- comedi_event(dev, s);
- usbdux_ao_stop(dev, 0);
- }
- return;
-
- default:
- /* a real error */
- if (devpriv->ao_cmd_running) {
- dev_err(dev->class_dev,
- "Non-zero urb status received in ao intr context: %d\n",
- urb->status);
- s->async->events |= COMEDI_CB_ERROR;
- s->async->events |= COMEDI_CB_EOA;
- comedi_event(dev, s);
- /* we do an unlink if we are in the high speed mode */
- usbdux_ao_stop(dev, 0);
- }
- return;
- }
-
- /* are we actually running? */
- if (!devpriv->ao_cmd_running)
- return;
-
- /* normal operation: executing a command in this subdevice */
devpriv->ao_counter--;
- if ((int)devpriv->ao_counter <= 0) {
- /* timer zero */
+ if (devpriv->ao_counter == 0) {
devpriv->ao_counter = devpriv->ao_timer;
- /* handle non continous acquisition */
- if (cmd->stop_src == TRIG_COUNT) {
- /* fixed number of samples */
- devpriv->ao_sample_count--;
- if (devpriv->ao_sample_count < 0) {
- /* all samples transmitted */
- usbdux_ao_stop(dev, 0);
- s->async->events |= COMEDI_CB_EOA;
- comedi_event(dev, s);
- /* no resubmit of the urb */
- return;
- }
+ if (cmd->stop_src == TRIG_COUNT &&
+ async->scans_done >= cmd->stop_arg) {
+ async->events |= COMEDI_CB_EOA;
+ return;
}
/* transmit data to the USB bus */
@@ -476,26 +403,25 @@ static void usbduxsub_ao_isoc_irq(struct urb *urb)
unsigned int chan = CR_CHAN(cmd->chanlist[i]);
unsigned short val;
- ret = comedi_buf_get(s, &val);
- if (ret < 0) {
+ if (!comedi_buf_read_samples(s, &val, 1)) {
dev_err(dev->class_dev, "buffer underflow\n");
- s->async->events |= (COMEDI_CB_EOA |
- COMEDI_CB_OVERFLOW);
+ async->events |= COMEDI_CB_OVERFLOW;
+ return;
}
+
/* pointer to the DA */
*datap++ = val & 0xff;
*datap++ = (val >> 8) & 0xff;
*datap++ = chan << 6;
s->readback[chan] = val;
-
- s->async->events |= COMEDI_CB_BLOCK;
- comedi_event(dev, s);
}
}
- urb->transfer_buffer_length = SIZEOUTBUF;
- urb->dev = comedi_to_usb_dev(dev);
- urb->status = 0;
- if (devpriv->ao_cmd_running) {
+
+ /* if command is still running, resubmit urb for BULK transfer */
+ if (!(async->events & COMEDI_CB_CANCEL_MASK)) {
+ urb->transfer_buffer_length = SIZEOUTBUF;
+ urb->dev = comedi_to_usb_dev(dev);
+ urb->status = 0;
if (devpriv->high_speed)
urb->interval = 8; /* uframes */
else
@@ -512,16 +438,54 @@ static void usbduxsub_ao_isoc_irq(struct urb *urb)
if (ret == -EL2NSYNC)
dev_err(dev->class_dev,
"buggy USB host controller or bug in IRQ handling!\n");
-
- s->async->events |= COMEDI_CB_EOA;
- s->async->events |= COMEDI_CB_ERROR;
- comedi_event(dev, s);
- /* don't do an unlink here */
- usbdux_ao_stop(dev, 0);
+ async->events |= COMEDI_CB_ERROR;
}
}
}
+static void usbduxsub_ao_isoc_irq(struct urb *urb)
+{
+ struct comedi_device *dev = urb->context;
+ struct comedi_subdevice *s = dev->write_subdev;
+ struct comedi_async *async = s->async;
+ struct usbdux_private *devpriv = dev->private;
+
+ /* exit if not running a command, do not resubmit urb */
+ if (!devpriv->ao_cmd_running)
+ return;
+
+ switch (urb->status) {
+ case 0:
+ usbduxsub_ao_handle_urb(dev, s, urb);
+ break;
+
+ case -ECONNRESET:
+ case -ENOENT:
+ case -ESHUTDOWN:
+ case -ECONNABORTED:
+ /* after an unlink command, unplug, ... etc */
+ async->events |= COMEDI_CB_ERROR;
+ break;
+
+ default:
+ /* a real error */
+ dev_err(dev->class_dev,
+ "Non-zero urb status received in ao intr context: %d\n",
+ urb->status);
+ async->events |= COMEDI_CB_ERROR;
+ break;
+ }
+
+ /*
+ * comedi_handle_events() cannot be used in this driver. The (*cancel)
+ * operation would unlink the urb.
+ */
+ if (async->events & COMEDI_CB_CANCEL_MASK)
+ usbdux_ao_stop(dev, 0);
+
+ comedi_event(dev, s);
+}
+
static int usbdux_submit_urbs(struct comedi_device *dev,
struct urb **urbs, int num_urbs,
int input_urb)
@@ -725,9 +689,6 @@ static int usbdux_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
if (devpriv->ai_cmd_running)
goto ai_cmd_exit;
- /* set current channel of the running acquisition to zero */
- s->async->cur_chan = 0;
-
devpriv->dux_commands[1] = len;
for (i = 0; i < len; ++i) {
unsigned int chan = CR_CHAN(cmd->chanlist[i]);
@@ -765,14 +726,6 @@ static int usbdux_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
devpriv->ai_counter = devpriv->ai_timer;
- if (cmd->stop_src == TRIG_COUNT) {
- /* data arrives as one packet */
- devpriv->ai_sample_count = cmd->stop_arg;
- } else {
- /* continous acquisition */
- devpriv->ai_sample_count = 0;
- }
-
if (cmd->start_src == TRIG_NOW) {
/* enable this acquisition operation */
devpriv->ai_cmd_running = 1;
@@ -1023,9 +976,6 @@ static int usbdux_ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
if (devpriv->ao_cmd_running)
goto ao_cmd_exit;
- /* set current channel of the running acquisition to zero */
- s->async->cur_chan = 0;
-
/* we count in steps of 1ms (125us) */
/* 125us mode not used yet */
if (0) { /* (devpriv->high_speed) */
@@ -1044,24 +994,6 @@ static int usbdux_ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
devpriv->ao_counter = devpriv->ao_timer;
- if (cmd->stop_src == TRIG_COUNT) {
- /* not continuous */
- /* counter */
- /* high speed also scans everything at once */
- if (0) { /* (devpriv->high_speed) */
- devpriv->ao_sample_count = cmd->stop_arg *
- cmd->scan_end_arg;
- } else {
- /* there's no scan as the scan has been */
- /* perf inside the FX2 */
- /* data arrives as one packet */
- devpriv->ao_sample_count = cmd->stop_arg;
- }
- } else {
- /* continous acquisition */
- devpriv->ao_sample_count = 0;
- }
-
if (cmd->start_src == TRIG_NOW) {
/* enable this acquisition operation */
devpriv->ao_cmd_running = 1;
diff --git a/drivers/staging/comedi/drivers/usbduxfast.c b/drivers/staging/comedi/drivers/usbduxfast.c
index f85818dd5e1..ddc4cb9d5ed 100644
--- a/drivers/staging/comedi/drivers/usbduxfast.c
+++ b/drivers/staging/comedi/drivers/usbduxfast.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2004 Bernd Porr, Bernd.Porr@f2s.com
+ * Copyright (C) 2004-2014 Bernd Porr, mail@berndporr.me.uk
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -13,6 +13,15 @@
*/
/*
+ * Driver: usbduxfast
+ * Description: University of Stirling USB DAQ & INCITE Technology Limited
+ * Devices: (ITL) USB-DUX [usbduxfast]
+ * Author: Bernd Porr <mail@berndporr.me.uk>
+ * Updated: 10 Oct 2014
+ * Status: stable
+ */
+
+/*
* I must give credit here to Chris Baugher who
* wrote the driver for AT-MIO-16d. I used some parts of this
* driver. I also must give credits to David Brownell
@@ -152,7 +161,6 @@ struct usbduxfast_private {
uint8_t *duxbuf;
int8_t *inbuf;
short int ai_cmd_running; /* asynchronous command is running */
- long int ai_sample_count; /* number of samples to acquire */
int ignore; /* counter which ignores the first
buffers */
struct semaphore sem;
@@ -227,114 +235,82 @@ static int usbduxfast_ai_cancel(struct comedi_device *dev,
return ret;
}
-/*
- * analogue IN
- * interrupt service routine
- */
+static void usbduxfast_ai_handle_urb(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct urb *urb)
+{
+ struct usbduxfast_private *devpriv = dev->private;
+ struct comedi_async *async = s->async;
+ struct comedi_cmd *cmd = &async->cmd;
+ int ret;
+
+ if (devpriv->ignore) {
+ devpriv->ignore--;
+ } else {
+ unsigned int nsamples;
+
+ nsamples = comedi_bytes_to_samples(s, urb->actual_length);
+ nsamples = comedi_nsamples_left(s, nsamples);
+ comedi_buf_write_samples(s, urb->transfer_buffer, nsamples);
+
+ if (cmd->stop_src == TRIG_COUNT &&
+ async->scans_done >= cmd->stop_arg)
+ async->events |= COMEDI_CB_EOA;
+ }
+
+ /* if command is still running, resubmit urb for BULK transfer */
+ if (!(async->events & COMEDI_CB_CANCEL_MASK)) {
+ urb->dev = comedi_to_usb_dev(dev);
+ urb->status = 0;
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+ if (ret < 0) {
+ dev_err(dev->class_dev, "urb resubm failed: %d", ret);
+ async->events |= COMEDI_CB_ERROR;
+ }
+ }
+}
+
static void usbduxfast_ai_interrupt(struct urb *urb)
{
struct comedi_device *dev = urb->context;
struct comedi_subdevice *s = dev->read_subdev;
struct comedi_async *async = s->async;
- struct comedi_cmd *cmd = &async->cmd;
- struct usb_device *usb = comedi_to_usb_dev(dev);
struct usbduxfast_private *devpriv = dev->private;
- int n, err;
- /* are we running a command? */
- if (unlikely(!devpriv->ai_cmd_running)) {
- /*
- * not running a command
- * do not continue execution if no asynchronous command
- * is running in particular not resubmit
- */
+ /* exit if not running a command, do not resubmit urb */
+ if (!devpriv->ai_cmd_running)
return;
- }
- /* first we test if something unusual has just happened */
switch (urb->status) {
case 0:
+ usbduxfast_ai_handle_urb(dev, s, urb);
break;
- /*
- * happens after an unlink command or when the device
- * is plugged out
- */
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
case -ECONNABORTED:
- /* tell this comedi */
- async->events |= COMEDI_CB_EOA;
+ /* after an unlink command, unplug, ... etc */
async->events |= COMEDI_CB_ERROR;
- comedi_event(dev, s);
- /* stop the transfer w/o unlink */
- usbduxfast_ai_stop(dev, 0);
- return;
+ break;
default:
+ /* a real error */
dev_err(dev->class_dev,
"non-zero urb status received in ai intr context: %d\n",
urb->status);
- async->events |= COMEDI_CB_EOA;
async->events |= COMEDI_CB_ERROR;
- comedi_event(dev, s);
- usbduxfast_ai_stop(dev, 0);
- return;
- }
-
- if (!devpriv->ignore) {
- if (cmd->stop_src == TRIG_COUNT) {
- /* not continuous, fixed number of samples */
- n = urb->actual_length / sizeof(uint16_t);
- if (unlikely(devpriv->ai_sample_count < n)) {
- unsigned int num_bytes;
-
- /* partial sample received */
- num_bytes = devpriv->ai_sample_count *
- sizeof(uint16_t);
- cfc_write_array_to_buffer(s,
- urb->transfer_buffer,
- num_bytes);
- usbduxfast_ai_stop(dev, 0);
- /* tell comedi that the acquistion is over */
- async->events |= COMEDI_CB_EOA;
- comedi_event(dev, s);
- return;
- }
- devpriv->ai_sample_count -= n;
- }
- /* write the full buffer to comedi */
- err = cfc_write_array_to_buffer(s, urb->transfer_buffer,
- urb->actual_length);
- if (unlikely(err == 0)) {
- /* buffer overflow */
- usbduxfast_ai_stop(dev, 0);
- return;
- }
-
- /* tell comedi that data is there */
- comedi_event(dev, s);
- } else {
- /* ignore this packet */
- devpriv->ignore--;
+ break;
}
/*
- * command is still running
- * resubmit urb for BULK transfer
+ * comedi_handle_events() cannot be used in this driver. The (*cancel)
+ * operation would unlink the urb.
*/
- urb->dev = usb;
- urb->status = 0;
- err = usb_submit_urb(urb, GFP_ATOMIC);
- if (err < 0) {
- dev_err(dev->class_dev,
- "urb resubm failed: %d", err);
- async->events |= COMEDI_CB_EOA;
- async->events |= COMEDI_CB_ERROR;
- comedi_event(dev, s);
+ if (async->events & COMEDI_CB_CANCEL_MASK)
usbduxfast_ai_stop(dev, 0);
- }
+
+ comedi_event(dev, s);
}
static int usbduxfast_submit_urb(struct comedi_device *dev)
@@ -499,8 +475,6 @@ static int usbduxfast_ai_cmd(struct comedi_device *dev,
up(&devpriv->sem);
return -EBUSY;
}
- /* set current channel of the running acquisition to zero */
- s->async->cur_chan = 0;
/*
* ignore the first buffers from the device if there
@@ -810,11 +784,6 @@ static int usbduxfast_ai_cmd(struct comedi_device *dev,
return result;
}
- if (cmd->stop_src == TRIG_COUNT)
- devpriv->ai_sample_count = cmd->stop_arg * cmd->scan_end_arg;
- else /* TRIG_NONE */
- devpriv->ai_sample_count = 0;
-
if ((cmd->start_src == TRIG_NOW) || (cmd->start_src == TRIG_EXT)) {
/* enable this acquisition operation */
devpriv->ai_cmd_running = 1;
diff --git a/drivers/staging/comedi/drivers/usbduxsigma.c b/drivers/staging/comedi/drivers/usbduxsigma.c
index ebd68e365ba..dc19435b652 100644
--- a/drivers/staging/comedi/drivers/usbduxsigma.c
+++ b/drivers/staging/comedi/drivers/usbduxsigma.c
@@ -1,6 +1,6 @@
/*
* usbduxsigma.c
- * Copyright (C) 2011 Bernd Porr, Bernd.Porr@f2s.com
+ * Copyright (C) 2011-2014 Bernd Porr, mail@berndporr.me.uk
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -17,9 +17,9 @@
* Driver: usbduxsigma
* Description: University of Stirling USB DAQ & INCITE Technology Limited
* Devices: (ITL) USB-DUX [usbduxsigma]
- * Author: Bernd Porr <BerndPorr@f2s.com>
- * Updated: 8 Nov 2011
- * Status: testing
+ * Author: Bernd Porr <mail@berndporr.me.uk>
+ * Updated: 10 Oct 2014
+ * Status: stable
*/
/*
@@ -164,9 +164,6 @@ struct usbduxsigma_private {
unsigned ao_cmd_running:1;
unsigned pwm_cmd_running:1;
- /* number of samples to acquire */
- int ai_sample_count;
- int ao_sample_count;
/* time between samples in units of the timer */
unsigned int ai_timer;
unsigned int ao_timer;
@@ -211,23 +208,75 @@ static int usbduxsigma_ai_cancel(struct comedi_device *dev,
return 0;
}
-static void usbduxsigma_ai_urb_complete(struct urb *urb)
+static void usbduxsigma_ai_handle_urb(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct urb *urb)
{
- struct comedi_device *dev = urb->context;
struct usbduxsigma_private *devpriv = dev->private;
- struct comedi_subdevice *s = dev->read_subdev;
- struct comedi_cmd *cmd = &s->async->cmd;
+ struct comedi_async *async = s->async;
+ struct comedi_cmd *cmd = &async->cmd;
unsigned int dio_state;
uint32_t val;
int ret;
int i;
- /* first we test if something unusual has just happened */
+ devpriv->ai_counter--;
+ if (devpriv->ai_counter == 0) {
+ devpriv->ai_counter = devpriv->ai_timer;
+
+ /* get the state of the dio pins to allow external trigger */
+ dio_state = be32_to_cpu(devpriv->in_buf[0]);
+
+ /* get the data from the USB bus and hand it over to comedi */
+ for (i = 0; i < cmd->chanlist_len; i++) {
+ /* transfer data, note first byte is the DIO state */
+ val = be32_to_cpu(devpriv->in_buf[i+1]);
+ val &= 0x00ffffff; /* strip status byte */
+ val ^= 0x00800000; /* convert to unsigned */
+
+ if (!comedi_buf_write_samples(s, &val, 1))
+ return;
+ }
+
+ if (cmd->stop_src == TRIG_COUNT &&
+ async->scans_done >= cmd->stop_arg)
+ async->events |= COMEDI_CB_EOA;
+ }
+
+ /* if command is still running, resubmit urb */
+ if (!(async->events & COMEDI_CB_CANCEL_MASK)) {
+ urb->dev = comedi_to_usb_dev(dev);
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+ if (ret < 0) {
+ dev_err(dev->class_dev,
+ "%s: urb resubmit failed (%d)\n",
+ __func__, ret);
+ if (ret == -EL2NSYNC)
+ dev_err(dev->class_dev,
+ "buggy USB host controller or bug in IRQ handler\n");
+ async->events |= COMEDI_CB_ERROR;
+ }
+ }
+}
+
+static void usbduxsigma_ai_urb_complete(struct urb *urb)
+{
+ struct comedi_device *dev = urb->context;
+ struct usbduxsigma_private *devpriv = dev->private;
+ struct comedi_subdevice *s = dev->read_subdev;
+ struct comedi_async *async = s->async;
+
+ /* exit if not running a command, do not resubmit urb */
+ if (!devpriv->ai_cmd_running)
+ return;
+
switch (urb->status) {
case 0:
/* copy the result in the transfer buffer */
memcpy(devpriv->in_buf, urb->transfer_buffer, SIZEINBUF);
+ usbduxsigma_ai_handle_urb(dev, s, urb);
break;
+
case -EILSEQ:
/*
* error in the ISOchronous data
@@ -235,7 +284,7 @@ static void usbduxsigma_ai_urb_complete(struct urb *urb)
* and recycle the last data byte
*/
dev_dbg(dev->class_dev, "CRC error in ISO IN stream\n");
-
+ usbduxsigma_ai_handle_urb(dev, s, urb);
break;
case -ECONNRESET:
@@ -243,86 +292,24 @@ static void usbduxsigma_ai_urb_complete(struct urb *urb)
case -ESHUTDOWN:
case -ECONNABORTED:
/* happens after an unlink command */
- if (devpriv->ai_cmd_running) {
- usbduxsigma_ai_stop(dev, 0); /* w/o unlink */
- /* we are still running a command, tell comedi */
- s->async->events |= (COMEDI_CB_EOA | COMEDI_CB_ERROR);
- comedi_event(dev, s);
- }
- return;
+ async->events |= COMEDI_CB_ERROR;
+ break;
default:
- /*
- * a real error on the bus
- * pass error to comedi if we are really running a command
- */
- if (devpriv->ai_cmd_running) {
- dev_err(dev->class_dev,
- "%s: non-zero urb status (%d)\n",
- __func__, urb->status);
- usbduxsigma_ai_stop(dev, 0); /* w/o unlink */
- s->async->events |= (COMEDI_CB_EOA | COMEDI_CB_ERROR);
- comedi_event(dev, s);
- }
- return;
- }
-
- if (unlikely(!devpriv->ai_cmd_running))
- return;
-
- urb->dev = comedi_to_usb_dev(dev);
-
- ret = usb_submit_urb(urb, GFP_ATOMIC);
- if (unlikely(ret < 0)) {
- dev_err(dev->class_dev, "%s: urb resubmit failed (%d)\n",
- __func__, ret);
- if (ret == -EL2NSYNC)
- dev_err(dev->class_dev,
- "buggy USB host controller or bug in IRQ handler\n");
- usbduxsigma_ai_stop(dev, 0); /* w/o unlink */
- s->async->events |= (COMEDI_CB_EOA | COMEDI_CB_ERROR);
- comedi_event(dev, s);
- return;
- }
-
- /* get the state of the dio pins to allow external trigger */
- dio_state = be32_to_cpu(devpriv->in_buf[0]);
-
- devpriv->ai_counter--;
- if (likely(devpriv->ai_counter > 0))
- return;
-
- /* timer zero, transfer measurements to comedi */
- devpriv->ai_counter = devpriv->ai_timer;
-
- if (cmd->stop_src == TRIG_COUNT) {
- /* not continuous, fixed number of samples */
- devpriv->ai_sample_count--;
- if (devpriv->ai_sample_count < 0) {
- usbduxsigma_ai_stop(dev, 0); /* w/o unlink */
- /* acquistion is over, tell comedi */
- s->async->events |= COMEDI_CB_EOA;
- comedi_event(dev, s);
- return;
- }
+ /* a real error */
+ dev_err(dev->class_dev, "%s: non-zero urb status (%d)\n",
+ __func__, urb->status);
+ async->events |= COMEDI_CB_ERROR;
+ break;
}
- /* get the data from the USB bus and hand it over to comedi */
- for (i = 0; i < cmd->chanlist_len; i++) {
- /* transfer data, note first byte is the DIO state */
- val = be32_to_cpu(devpriv->in_buf[i+1]);
- val &= 0x00ffffff; /* strip status byte */
- val ^= 0x00800000; /* convert to unsigned */
+ /*
+ * comedi_handle_events() cannot be used in this driver. The (*cancel)
+ * operation would unlink the urb.
+ */
+ if (async->events & COMEDI_CB_CANCEL_MASK)
+ usbduxsigma_ai_stop(dev, 0);
- ret = cfc_write_array_to_buffer(s, &val, sizeof(uint32_t));
- if (unlikely(ret == 0)) {
- /* buffer overflow */
- usbduxsigma_ai_stop(dev, 0); /* w/o unlink */
- return;
- }
- }
- /* tell comedi that data is there */
- s->async->events |= (COMEDI_CB_BLOCK | COMEDI_CB_EOS);
comedi_event(dev, s);
}
@@ -349,64 +336,25 @@ static int usbduxsigma_ao_cancel(struct comedi_device *dev,
return 0;
}
-static void usbduxsigma_ao_urb_complete(struct urb *urb)
+static void usbduxsigma_ao_handle_urb(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct urb *urb)
{
- struct comedi_device *dev = urb->context;
struct usbduxsigma_private *devpriv = dev->private;
- struct comedi_subdevice *s = dev->write_subdev;
- struct comedi_cmd *cmd = &s->async->cmd;
+ struct comedi_async *async = s->async;
+ struct comedi_cmd *cmd = &async->cmd;
uint8_t *datap;
int ret;
int i;
- switch (urb->status) {
- case 0:
- /* success */
- break;
-
- case -ECONNRESET:
- case -ENOENT:
- case -ESHUTDOWN:
- case -ECONNABORTED:
- /* happens after an unlink command */
- if (devpriv->ao_cmd_running) {
- usbduxsigma_ao_stop(dev, 0); /* w/o unlink */
- s->async->events |= COMEDI_CB_EOA;
- comedi_event(dev, s);
- }
- return;
-
- default:
- /* a real error */
- if (devpriv->ao_cmd_running) {
- dev_err(dev->class_dev,
- "%s: non-zero urb status (%d)\n",
- __func__, urb->status);
- usbduxsigma_ao_stop(dev, 0); /* w/o unlink */
- s->async->events |= (COMEDI_CB_ERROR | COMEDI_CB_EOA);
- comedi_event(dev, s);
- }
- return;
- }
-
- if (!devpriv->ao_cmd_running)
- return;
-
devpriv->ao_counter--;
- if ((int)devpriv->ao_counter <= 0) {
- /* timer zero, transfer from comedi */
+ if (devpriv->ao_counter == 0) {
devpriv->ao_counter = devpriv->ao_timer;
- if (cmd->stop_src == TRIG_COUNT) {
- /* not continuous, fixed number of samples */
- devpriv->ao_sample_count--;
- if (devpriv->ao_sample_count < 0) {
- usbduxsigma_ao_stop(dev, 0); /* w/o unlink */
- /* acquistion is over, tell comedi */
- s->async->events |= COMEDI_CB_EOA;
- comedi_event(dev, s);
- return;
- }
+ if (cmd->stop_src == TRIG_COUNT &&
+ async->scans_done >= cmd->stop_arg) {
+ async->events |= COMEDI_CB_EOA;
+ return;
}
/* transmit data to the USB bus */
@@ -416,46 +364,86 @@ static void usbduxsigma_ao_urb_complete(struct urb *urb)
unsigned int chan = CR_CHAN(cmd->chanlist[i]);
unsigned short val;
- ret = comedi_buf_get(s, &val);
- if (ret < 0) {
+ if (!comedi_buf_read_samples(s, &val, 1)) {
dev_err(dev->class_dev, "buffer underflow\n");
- s->async->events |= (COMEDI_CB_EOA |
- COMEDI_CB_OVERFLOW);
+ async->events |= COMEDI_CB_OVERFLOW;
+ return;
}
+
*datap++ = val;
*datap++ = chan;
s->readback[chan] = val;
-
- s->async->events |= COMEDI_CB_BLOCK;
- comedi_event(dev, s);
}
}
- urb->transfer_buffer_length = SIZEOUTBUF;
- urb->dev = comedi_to_usb_dev(dev);
- urb->status = 0;
- if (devpriv->high_speed)
- urb->interval = 8; /* uframes */
- else
- urb->interval = 1; /* frames */
- urb->number_of_packets = 1;
- urb->iso_frame_desc[0].offset = 0;
- urb->iso_frame_desc[0].length = SIZEOUTBUF;
- urb->iso_frame_desc[0].status = 0;
- ret = usb_submit_urb(urb, GFP_ATOMIC);
- if (ret < 0) {
- dev_err(dev->class_dev,
- "%s: urb resubmit failed (%d)\n",
- __func__, ret);
- if (ret == -EL2NSYNC)
+ /* if command is still running, resubmit urb */
+ if (!(async->events & COMEDI_CB_CANCEL_MASK)) {
+ urb->transfer_buffer_length = SIZEOUTBUF;
+ urb->dev = comedi_to_usb_dev(dev);
+ urb->status = 0;
+ if (devpriv->high_speed)
+ urb->interval = 8; /* uframes */
+ else
+ urb->interval = 1; /* frames */
+ urb->number_of_packets = 1;
+ urb->iso_frame_desc[0].offset = 0;
+ urb->iso_frame_desc[0].length = SIZEOUTBUF;
+ urb->iso_frame_desc[0].status = 0;
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+ if (ret < 0) {
dev_err(dev->class_dev,
- "buggy USB host controller or bug in IRQ handler\n");
- usbduxsigma_ao_stop(dev, 0); /* w/o unlink */
- s->async->events |= (COMEDI_CB_EOA | COMEDI_CB_ERROR);
- comedi_event(dev, s);
+ "%s: urb resubmit failed (%d)\n",
+ __func__, ret);
+ if (ret == -EL2NSYNC)
+ dev_err(dev->class_dev,
+ "buggy USB host controller or bug in IRQ handler\n");
+ async->events |= COMEDI_CB_ERROR;
+ }
}
}
+static void usbduxsigma_ao_urb_complete(struct urb *urb)
+{
+ struct comedi_device *dev = urb->context;
+ struct usbduxsigma_private *devpriv = dev->private;
+ struct comedi_subdevice *s = dev->write_subdev;
+ struct comedi_async *async = s->async;
+
+ /* exit if not running a command, do not resubmit urb */
+ if (!devpriv->ao_cmd_running)
+ return;
+
+ switch (urb->status) {
+ case 0:
+ usbduxsigma_ao_handle_urb(dev, s, urb);
+ break;
+
+ case -ECONNRESET:
+ case -ENOENT:
+ case -ESHUTDOWN:
+ case -ECONNABORTED:
+ /* happens after an unlink command */
+ async->events |= COMEDI_CB_ERROR;
+ break;
+
+ default:
+ /* a real error */
+ dev_err(dev->class_dev, "%s: non-zero urb status (%d)\n",
+ __func__, urb->status);
+ async->events |= COMEDI_CB_ERROR;
+ break;
+ }
+
+ /*
+ * comedi_handle_events() cannot be used in this driver. The (*cancel)
+ * operation would unlink the urb.
+ */
+ if (async->events & COMEDI_CB_CANCEL_MASK)
+ usbduxsigma_ao_stop(dev, 0);
+
+ comedi_event(dev, s);
+}
+
static int usbduxsigma_submit_urbs(struct comedi_device *dev,
struct urb **urbs, int num_urbs,
int input_urb)
@@ -584,14 +572,6 @@ static int usbduxsigma_ai_cmdtest(struct comedi_device *dev,
if (devpriv->ai_timer < 1)
err |= -EINVAL;
- if (cmd->stop_src == TRIG_COUNT) {
- /* data arrives as one packet */
- devpriv->ai_sample_count = cmd->stop_arg;
- } else {
- /* continuous acquisition */
- devpriv->ai_sample_count = 0;
- }
-
if (err)
return 4;
@@ -692,8 +672,6 @@ static int usbduxsigma_ai_cmd(struct comedi_device *dev,
down(&devpriv->sem);
- /* set current channel of the running acquisition to zero */
- s->async->cur_chan = 0;
for (i = 0; i < len; i++) {
unsigned int chan = CR_CHAN(cmd->chanlist[i]);
@@ -957,25 +935,6 @@ static int usbduxsigma_ao_cmdtest(struct comedi_device *dev,
if (devpriv->ao_timer < 1)
err |= -EINVAL;
- if (cmd->stop_src == TRIG_COUNT) {
- /* not continuous, use counter */
- if (high_speed) {
- /* high speed also scans everything at once */
- devpriv->ao_sample_count = cmd->stop_arg *
- cmd->scan_end_arg;
- } else {
- /*
- * There's no scan as the scan has been
- * handled inside the FX2. Data arrives as
- * one packet.
- */
- devpriv->ao_sample_count = cmd->stop_arg;
- }
- } else {
- /* continuous acquisition */
- devpriv->ao_sample_count = 0;
- }
-
if (err)
return 4;
@@ -991,9 +950,6 @@ static int usbduxsigma_ao_cmd(struct comedi_device *dev,
down(&devpriv->sem);
- /* set current channel of the running acquisition to zero */
- s->async->cur_chan = 0;
-
devpriv->ao_counter = devpriv->ao_timer;
if (cmd->start_src == TRIG_NOW) {
diff --git a/drivers/staging/comedi/drivers/vmk80xx.c b/drivers/staging/comedi/drivers/vmk80xx.c
index 71003416edc..a19a56ee0ee 100644
--- a/drivers/staging/comedi/drivers/vmk80xx.c
+++ b/drivers/staging/comedi/drivers/vmk80xx.c
@@ -797,7 +797,7 @@ static int vmk80xx_init_subdevices(struct comedi_device *dev)
/* Analog output subdevice */
s = &dev->subdevices[1];
s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITEABLE | SDF_GROUND;
+ s->subdev_flags = SDF_WRITABLE | SDF_GROUND;
s->n_chan = boardinfo->ao_nchans;
s->maxdata = 0x00ff;
s->range_table = boardinfo->range;
@@ -819,7 +819,7 @@ static int vmk80xx_init_subdevices(struct comedi_device *dev)
/* Digital output subdevice */
s = &dev->subdevices[3];
s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITEABLE;
+ s->subdev_flags = SDF_WRITABLE;
s->n_chan = 8;
s->maxdata = 1;
s->range_table = &range_digital;
@@ -834,7 +834,7 @@ static int vmk80xx_init_subdevices(struct comedi_device *dev)
s->insn_read = vmk80xx_cnt_insn_read;
s->insn_config = vmk80xx_cnt_insn_config;
if (devpriv->model == VMK8055_MODEL) {
- s->subdev_flags |= SDF_WRITEABLE;
+ s->subdev_flags |= SDF_WRITABLE;
s->insn_write = vmk80xx_cnt_insn_write;
}
@@ -842,7 +842,7 @@ static int vmk80xx_init_subdevices(struct comedi_device *dev)
if (devpriv->model == VMK8061_MODEL) {
s = &dev->subdevices[5];
s->type = COMEDI_SUBD_PWM;
- s->subdev_flags = SDF_READABLE | SDF_WRITEABLE;
+ s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
s->n_chan = boardinfo->pwm_nchans;
s->maxdata = boardinfo->pwm_maxdata;
s->insn_read = vmk80xx_pwm_insn_read;
diff --git a/drivers/staging/comedi/range.c b/drivers/staging/comedi/range.c
index b6849545b81..9a1dc56f21d 100644
--- a/drivers/staging/comedi/range.c
+++ b/drivers/staging/comedi/range.c
@@ -97,39 +97,6 @@ int do_rangeinfo_ioctl(struct comedi_device *dev,
return 0;
}
-static int aref_invalid(struct comedi_subdevice *s, unsigned int chanspec)
-{
- unsigned int aref;
-
- /* disable reporting invalid arefs... maybe someday */
- return 0;
-
- aref = CR_AREF(chanspec);
- switch (aref) {
- case AREF_DIFF:
- if (s->subdev_flags & SDF_DIFF)
- return 0;
- break;
- case AREF_COMMON:
- if (s->subdev_flags & SDF_COMMON)
- return 0;
- break;
- case AREF_GROUND:
- if (s->subdev_flags & SDF_GROUND)
- return 0;
- break;
- case AREF_OTHER:
- if (s->subdev_flags & SDF_OTHER)
- return 0;
- break;
- default:
- break;
- }
- dev_dbg(s->device->class_dev, "subdevice does not support aref %i",
- aref);
- return 1;
-}
-
/**
* comedi_check_chanlist() - Validate each element in a chanlist.
* @s: comedi_subdevice struct
@@ -153,8 +120,7 @@ int comedi_check_chanlist(struct comedi_subdevice *s, int n,
else
range_len = 0;
if (chan >= s->n_chan ||
- CR_RANGE(chanspec) >= range_len ||
- aref_invalid(s, chanspec)) {
+ CR_RANGE(chanspec) >= range_len) {
dev_warn(dev->class_dev,
"bad chanlist[%d]=0x%08x chan=%d range length=%d\n",
i, chanspec, chan, range_len);
diff --git a/drivers/staging/cptm1217/clearpad_tm1217.c b/drivers/staging/cptm1217/clearpad_tm1217.c
index edf9ff2ea25..7f265ce0dd1 100644
--- a/drivers/staging/cptm1217/clearpad_tm1217.c
+++ b/drivers/staging/cptm1217/clearpad_tm1217.c
@@ -278,7 +278,7 @@ static void cp_tm1217_get_data(struct cp_tm1217_device *ts)
static irqreturn_t cp_tm1217_sample_thread(int irq, void *handle)
{
- struct cp_tm1217_device *ts = (struct cp_tm1217_device *) handle;
+ struct cp_tm1217_device *ts = handle;
u8 req[2];
int retval;
diff --git a/drivers/staging/dgap/dgap.c b/drivers/staging/dgap/dgap.c
index d0be1cebf5a..bdb5317e3d9 100644
--- a/drivers/staging/dgap/dgap.c
+++ b/drivers/staging/dgap/dgap.c
@@ -65,144 +65,6 @@
#include "dgap.h"
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Digi International, http://www.digi.com");
-MODULE_DESCRIPTION("Driver for the Digi International EPCA PCI based product line");
-MODULE_SUPPORTED_DEVICE("dgap");
-
-static int dgap_start(void);
-static void dgap_init_globals(void);
-static struct board_t *dgap_found_board(struct pci_dev *pdev, int id,
- int boardnum);
-static void dgap_cleanup_board(struct board_t *brd);
-static void dgap_poll_handler(ulong dummy);
-static int dgap_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
-static void dgap_remove_one(struct pci_dev *dev);
-static int dgap_do_remap(struct board_t *brd);
-static void dgap_release_remap(struct board_t *brd);
-static irqreturn_t dgap_intr(int irq, void *voidbrd);
-
-static int dgap_tty_open(struct tty_struct *tty, struct file *file);
-static void dgap_tty_close(struct tty_struct *tty, struct file *file);
-static int dgap_block_til_ready(struct tty_struct *tty, struct file *file,
- struct channel_t *ch);
-static int dgap_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
- unsigned long arg);
-static int dgap_tty_digigeta(struct channel_t *ch,
- struct digi_t __user *retinfo);
-static int dgap_tty_digiseta(struct channel_t *ch, struct board_t *bd,
- struct un_t *un, struct digi_t __user *new_info);
-static int dgap_tty_digigetedelay(struct tty_struct *tty, int __user *retinfo);
-static int dgap_tty_digisetedelay(struct channel_t *ch, struct board_t *bd,
- struct un_t *un, int __user *new_info);
-static int dgap_tty_write_room(struct tty_struct *tty);
-static int dgap_tty_chars_in_buffer(struct tty_struct *tty);
-static void dgap_tty_start(struct tty_struct *tty);
-static void dgap_tty_stop(struct tty_struct *tty);
-static void dgap_tty_throttle(struct tty_struct *tty);
-static void dgap_tty_unthrottle(struct tty_struct *tty);
-static void dgap_tty_flush_chars(struct tty_struct *tty);
-static void dgap_tty_flush_buffer(struct tty_struct *tty);
-static void dgap_tty_hangup(struct tty_struct *tty);
-static int dgap_wait_for_drain(struct tty_struct *tty);
-static int dgap_set_modem_info(struct channel_t *ch, struct board_t *bd,
- struct un_t *un, unsigned int command,
- unsigned int __user *value);
-static int dgap_get_modem_info(struct channel_t *ch,
- unsigned int __user *value);
-static int dgap_tty_digisetcustombaud(struct channel_t *ch, struct board_t *bd,
- struct un_t *un, int __user *new_info);
-static int dgap_tty_digigetcustombaud(struct channel_t *ch, struct un_t *un,
- int __user *retinfo);
-static int dgap_tty_tiocmget(struct tty_struct *tty);
-static int dgap_tty_tiocmset(struct tty_struct *tty, unsigned int set,
- unsigned int clear);
-static int dgap_tty_send_break(struct tty_struct *tty, int msec);
-static void dgap_tty_wait_until_sent(struct tty_struct *tty, int timeout);
-static int dgap_tty_write(struct tty_struct *tty, const unsigned char *buf,
- int count);
-static void dgap_tty_set_termios(struct tty_struct *tty,
- struct ktermios *old_termios);
-static int dgap_tty_put_char(struct tty_struct *tty, unsigned char c);
-static void dgap_tty_send_xchar(struct tty_struct *tty, char ch);
-
-static int dgap_tty_register(struct board_t *brd);
-static void dgap_tty_unregister(struct board_t *brd);
-static int dgap_tty_init(struct board_t *);
-static void dgap_tty_free(struct board_t *);
-static void dgap_cleanup_tty(struct board_t *);
-static void dgap_carrier(struct channel_t *ch);
-static void dgap_input(struct channel_t *ch);
-
-/*
- * Our function prototypes from dgap_fep5
- */
-static void dgap_cmdw_ext(struct channel_t *ch, u16 cmd, u16 word, uint ncmds);
-static int dgap_event(struct board_t *bd);
-
-static void dgap_poll_tasklet(unsigned long data);
-static void dgap_cmdb(struct channel_t *ch, u8 cmd, u8 byte1,
- u8 byte2, uint ncmds);
-static void dgap_cmdw(struct channel_t *ch, u8 cmd, u16 word, uint ncmds);
-static void dgap_wmove(struct channel_t *ch, char *buf, uint cnt);
-static int dgap_param(struct channel_t *ch, struct board_t *bd, u32 un_type);
-static void dgap_parity_scan(struct channel_t *ch, unsigned char *cbuf,
- unsigned char *fbuf, int *len);
-static uint dgap_get_custom_baud(struct channel_t *ch);
-static void dgap_firmware_reset_port(struct channel_t *ch);
-
-/*
- * Function prototypes from dgap_parse.c.
- */
-static int dgap_gettok(char **in);
-static char *dgap_getword(char **in);
-static int dgap_checknode(struct cnode *p);
-
-/*
- * Function prototypes from dgap_sysfs.h
- */
-static void dgap_create_ports_sysfiles(struct board_t *bd);
-static void dgap_remove_ports_sysfiles(struct board_t *bd);
-
-static int dgap_create_driver_sysfiles(struct pci_driver *);
-static void dgap_remove_driver_sysfiles(struct pci_driver *);
-
-static void dgap_create_tty_sysfs(struct un_t *un, struct device *c);
-static void dgap_remove_tty_sysfs(struct device *c);
-
-/*
- * Function prototypes from dgap_parse.h
- */
-static int dgap_parsefile(char **in);
-static struct cnode *dgap_find_config(int type, int bus, int slot);
-static uint dgap_config_get_num_prts(struct board_t *bd);
-static char *dgap_create_config_string(struct board_t *bd, char *string);
-static uint dgap_config_get_useintr(struct board_t *bd);
-static uint dgap_config_get_altpin(struct board_t *bd);
-
-static void dgap_do_bios_load(struct board_t *brd, const u8 *ubios, int len);
-static void dgap_do_fep_load(struct board_t *brd, const u8 *ufep, int len);
-#ifdef DIGI_CONCENTRATORS_SUPPORTED
-static void dgap_do_conc_load(struct board_t *brd, u8 *uaddr, int len);
-#endif
-static int dgap_alloc_flipbuf(struct board_t *brd);
-static void dgap_free_flipbuf(struct board_t *brd);
-static int dgap_request_irq(struct board_t *brd);
-static void dgap_free_irq(struct board_t *brd);
-
-static void dgap_get_vpd(struct board_t *brd);
-static void dgap_do_reset_board(struct board_t *brd);
-static int dgap_test_bios(struct board_t *brd);
-static int dgap_test_fep(struct board_t *brd);
-static int dgap_tty_register_ports(struct board_t *brd);
-static int dgap_firmware_load(struct pci_dev *pdev, int card_type,
- struct board_t *brd);
-static void dgap_cleanup_nodes(void);
-
-static void dgap_cleanup_module(void);
-
-module_exit(dgap_cleanup_module);
-
/*
* File operations permitted on Control/Management major.
*/
@@ -218,7 +80,6 @@ static int dgap_poll_tick = 20; /* Poll interval - 20 ms */
static struct class *dgap_class;
-static struct board_t *dgap_boards_by_major[256];
static uint dgap_count = 500;
/*
@@ -297,13 +158,6 @@ static struct board_id dgap_ids[] = {
{0,} /* 0 terminated list. */
};
-static struct pci_driver dgap_driver = {
- .name = "dgap",
- .probe = dgap_init_one,
- .id_table = dgap_pci_tbl,
- .remove = dgap_remove_one,
-};
-
struct firmware_info {
u8 *conf_name; /* dgap.conf */
u8 *bios_name; /* BIOS filename */
@@ -366,29 +220,6 @@ static struct ktermios dgap_default_termios = {
.c_line = 0,
};
-static const struct tty_operations dgap_tty_ops = {
- .open = dgap_tty_open,
- .close = dgap_tty_close,
- .write = dgap_tty_write,
- .write_room = dgap_tty_write_room,
- .flush_buffer = dgap_tty_flush_buffer,
- .chars_in_buffer = dgap_tty_chars_in_buffer,
- .flush_chars = dgap_tty_flush_chars,
- .ioctl = dgap_tty_ioctl,
- .set_termios = dgap_tty_set_termios,
- .stop = dgap_tty_stop,
- .start = dgap_tty_start,
- .throttle = dgap_tty_throttle,
- .unthrottle = dgap_tty_unthrottle,
- .hangup = dgap_tty_hangup,
- .put_char = dgap_tty_put_char,
- .tiocmget = dgap_tty_tiocmget,
- .tiocmset = dgap_tty_tiocmset,
- .break_ctl = dgap_tty_send_break,
- .wait_until_sent = dgap_tty_wait_until_sent,
- .send_xchar = dgap_tty_send_xchar
-};
-
/*
* Our needed internal static variables from dgap_parse.c
*/
@@ -456,1078 +287,1226 @@ static struct toklist dgap_tlist[] = {
{ 0, NULL }
};
-/************************************************************************
- *
- * Driver load/unload functions
- *
- ************************************************************************/
/*
- * init_module()
- *
- * Module load. This is where it all starts.
+ * dgap_sindex: much like index(), but it looks for a match of any character in
+ * the group, and returns that position. If the first character is a ^, then
+ * this will match the first occurrence not in that group.
*/
-static int dgap_init_module(void)
+static char *dgap_sindex(char *string, char *group)
{
- int rc;
+ char *ptr;
- pr_info("%s, Digi International Part Number %s\n", DG_NAME, DG_PART);
+ if (!string || !group)
+ return NULL;
- rc = dgap_start();
- if (rc)
- return rc;
+ if (*group == '^') {
+ group++;
+ for (; *string; string++) {
+ for (ptr = group; *ptr; ptr++) {
+ if (*ptr == *string)
+ break;
+ }
+ if (*ptr == '\0')
+ return string;
+ }
+ } else {
+ for (; *string; string++) {
+ for (ptr = group; *ptr; ptr++) {
+ if (*ptr == *string)
+ return string;
+ }
+ }
+ }
- rc = pci_register_driver(&dgap_driver);
- if (rc)
- goto err_cleanup;
+ return NULL;
+}
- rc = dgap_create_driver_sysfiles(&dgap_driver);
- if (rc)
- goto err_cleanup;
+/*
+ * get a word from the input stream, also keep track of current line number.
+ * words are separated by whitespace.
+ */
+static char *dgap_getword(char **in)
+{
+ char *ret_ptr = *in;
- dgap_driver_state = DRIVER_READY;
+ char *ptr = dgap_sindex(*in, " \t\n");
- return 0;
+ /* If no word found, return null */
+ if (!ptr)
+ return NULL;
-err_cleanup:
+ /* Mark new location for our buffer */
+ *ptr = '\0';
+ *in = ptr + 1;
- dgap_cleanup_module();
+ /* Eat any extra spaces/tabs/newlines that might be present */
+ while (*in && **in && ((**in == ' ') ||
+ (**in == '\t') ||
+ (**in == '\n'))) {
+ **in = '\0';
+ *in = *in + 1;
+ }
- return rc;
+ return ret_ptr;
}
-module_init(dgap_init_module);
+
/*
- * Start of driver.
+ * Get a token from the input file; return 0 if end of file is reached
*/
-static int dgap_start(void)
+static int dgap_gettok(char **in)
{
- int rc;
- unsigned long flags;
- struct device *device;
-
- /*
- * make sure that the globals are
- * init'd before we do anything else
- */
- dgap_init_globals();
-
- dgap_numboards = 0;
+ char *w;
+ struct toklist *t;
- pr_info("For the tools package please visit http://www.digi.com\n");
+ if (strstr(dgap_cword, "board")) {
+ w = dgap_getword(in);
+ snprintf(dgap_cword, MAXCWORD, "%s", w);
+ for (t = dgap_brdtype; t->token != 0; t++) {
+ if (!strcmp(w, t->string))
+ return t->token;
+ }
+ } else {
+ while ((w = dgap_getword(in))) {
+ snprintf(dgap_cword, MAXCWORD, "%s", w);
+ for (t = dgap_tlist; t->token != 0; t++) {
+ if (!strcmp(w, t->string))
+ return t->token;
+ }
+ }
+ }
- /*
- * Register our base character device into the kernel.
- */
+ return 0;
+}
- /*
- * Register management/dpa devices
- */
- rc = register_chrdev(DIGI_DGAP_MAJOR, "dgap", &dgap_board_fops);
- if (rc < 0)
- return rc;
+/*
+ * dgap_checknode: see if all the necessary info has been supplied for a node
+ * before creating the next node.
+ */
+static int dgap_checknode(struct cnode *p)
+{
+ switch (p->type) {
+ case LNODE:
+ if (p->u.line.v_speed == 0) {
+ pr_err("line speed not specified");
+ return 1;
+ }
+ return 0;
- dgap_class = class_create(THIS_MODULE, "dgap_mgmt");
- if (IS_ERR(dgap_class)) {
- rc = PTR_ERR(dgap_class);
- goto failed_class;
- }
+ case CNODE:
+ if (p->u.conc.v_speed == 0) {
+ pr_err("concentrator line speed not specified");
+ return 1;
+ }
+ if (p->u.conc.v_nport == 0) {
+ pr_err("number of ports on concentrator not specified");
+ return 1;
+ }
+ if (p->u.conc.v_id == 0) {
+ pr_err("concentrator id letter not specified");
+ return 1;
+ }
+ return 0;
- device = device_create(dgap_class, NULL,
- MKDEV(DIGI_DGAP_MAJOR, 0),
- NULL, "dgap_mgmt");
- if (IS_ERR(device)) {
- rc = PTR_ERR(device);
- goto failed_device;
+ case MNODE:
+ if (p->u.module.v_nport == 0) {
+ pr_err("number of ports on EBI module not specified");
+ return 1;
+ }
+ if (p->u.module.v_id == 0) {
+ pr_err("EBI module id letter not specified");
+ return 1;
+ }
+ return 0;
}
+ return 0;
+}
- /* Start the poller */
- spin_lock_irqsave(&dgap_poll_lock, flags);
- init_timer(&dgap_poll_timer);
- dgap_poll_timer.function = dgap_poll_handler;
- dgap_poll_timer.data = 0;
- dgap_poll_time = jiffies + dgap_jiffies_from_ms(dgap_poll_tick);
- dgap_poll_timer.expires = dgap_poll_time;
- spin_unlock_irqrestore(&dgap_poll_lock, flags);
+/*
+ * Given a board pointer, returns whether we should use interrupts or not.
+ */
+static uint dgap_config_get_useintr(struct board_t *bd)
+{
+ struct cnode *p;
- add_timer(&dgap_poll_timer);
+ if (!bd)
+ return 0;
- return rc;
+ for (p = bd->bd_config; p; p = p->next) {
+ if (p->type == INTRNODE) {
+ /*
+ * check for pcxr types.
+ */
+ return p->u.useintr;
+ }
+ }
-failed_device:
- class_destroy(dgap_class);
-failed_class:
- unregister_chrdev(DIGI_DGAP_MAJOR, "dgap");
- return rc;
+ /* If not found, then don't turn on interrupts. */
+ return 0;
}
-static int dgap_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+/*
+ * Given a board pointer, returns whether we turn on altpin or not.
+ */
+static uint dgap_config_get_altpin(struct board_t *bd)
{
- int rc;
- struct board_t *brd;
+ struct cnode *p;
- if (dgap_numboards >= MAXBOARDS)
- return -EPERM;
+ if (!bd)
+ return 0;
- rc = pci_enable_device(pdev);
- if (rc)
- return -EIO;
+ for (p = bd->bd_config; p; p = p->next) {
+ if (p->type == ANODE) {
+ /*
+ * check for pcxr types.
+ */
+ return p->u.altpin;
+ }
+ }
- brd = dgap_found_board(pdev, ent->driver_data, dgap_numboards);
- if (IS_ERR(brd))
- return PTR_ERR(brd);
+ /* If not found, then don't turn on interrupts. */
+ return 0;
+}
- rc = dgap_firmware_load(pdev, ent->driver_data, brd);
- if (rc)
- goto cleanup_brd;
+/*
+ * Given a specific type of board, if found, detached link and
+ * returns the first occurrence in the list.
+ */
+static struct cnode *dgap_find_config(int type, int bus, int slot)
+{
+ struct cnode *p, *prev, *prev2, *found;
- rc = dgap_alloc_flipbuf(brd);
- if (rc)
- goto cleanup_brd;
+ p = &dgap_head;
- rc = dgap_tty_register(brd);
- if (rc)
- goto free_flipbuf;
+ while (p->next) {
+ prev = p;
+ p = p->next;
- rc = dgap_request_irq(brd);
- if (rc)
- goto unregister_tty;
+ if (p->type != BNODE)
+ continue;
- /*
- * Do tty device initialization.
- */
- rc = dgap_tty_init(brd);
- if (rc < 0)
- goto free_irq;
+ if (p->u.board.type != type)
+ continue;
- rc = dgap_tty_register_ports(brd);
- if (rc)
- goto tty_free;
+ if (p->u.board.v_pcibus &&
+ p->u.board.pcibus != bus)
+ continue;
- brd->state = BOARD_READY;
- brd->dpastatus = BD_RUNNING;
+ if (p->u.board.v_pcislot &&
+ p->u.board.pcislot != slot)
+ continue;
- dgap_board[dgap_numboards++] = brd;
+ found = p;
+ /*
+ * Keep walking thru the list till we
+ * find the next board.
+ */
+ while (p->next) {
+ prev2 = p;
+ p = p->next;
- return 0;
+ if (p->type != BNODE)
+ continue;
-tty_free:
- dgap_tty_free(brd);
-free_irq:
- dgap_free_irq(brd);
-unregister_tty:
- dgap_tty_unregister(brd);
-free_flipbuf:
- dgap_free_flipbuf(brd);
-cleanup_brd:
- dgap_cleanup_nodes();
- dgap_release_remap(brd);
- kfree(brd);
+ /*
+ * Mark the end of our 1 board
+ * chain of configs.
+ */
+ prev2->next = NULL;
- return rc;
-}
+ /*
+ * Link the "next" board to the
+ * previous board, effectively
+ * "unlinking" our board from
+ * the main config.
+ */
+ prev->next = p;
-static void dgap_remove_one(struct pci_dev *dev)
-{
- /* Do Nothing */
+ return found;
+ }
+ /*
+ * It must be the last board in the list.
+ */
+ prev->next = NULL;
+ return found;
+ }
+ return NULL;
}
/*
- * dgap_cleanup_module()
- *
- * Module unload. This is where it all ends.
+ * Given a board pointer, walks the config link, counting up
+ * all ports user specified should be on the board.
+ * (This does NOT mean they are all actually present right now tho)
*/
-static void dgap_cleanup_module(void)
+static uint dgap_config_get_num_prts(struct board_t *bd)
{
- int i;
- ulong lock_flags;
-
- spin_lock_irqsave(&dgap_poll_lock, lock_flags);
- dgap_poll_stop = 1;
- spin_unlock_irqrestore(&dgap_poll_lock, lock_flags);
-
- /* Turn off poller right away. */
- del_timer_sync(&dgap_poll_timer);
+ int count = 0;
+ struct cnode *p;
- dgap_remove_driver_sysfiles(&dgap_driver);
+ if (!bd)
+ return 0;
- device_destroy(dgap_class, MKDEV(DIGI_DGAP_MAJOR, 0));
- class_destroy(dgap_class);
- unregister_chrdev(DIGI_DGAP_MAJOR, "dgap");
+ for (p = bd->bd_config; p; p = p->next) {
- for (i = 0; i < dgap_numboards; ++i) {
- dgap_remove_ports_sysfiles(dgap_board[i]);
- dgap_cleanup_tty(dgap_board[i]);
- dgap_cleanup_board(dgap_board[i]);
+ switch (p->type) {
+ case BNODE:
+ /*
+ * check for pcxr types.
+ */
+ if (p->u.board.type > EPCFE)
+ count += p->u.board.nport;
+ break;
+ case CNODE:
+ count += p->u.conc.nport;
+ break;
+ case MNODE:
+ count += p->u.module.nport;
+ break;
+ }
}
-
- dgap_cleanup_nodes();
-
- if (dgap_numboards)
- pci_unregister_driver(&dgap_driver);
+ return count;
}
-/*
- * dgap_cleanup_board()
- *
- * Free all the memory associated with a board
- */
-static void dgap_cleanup_board(struct board_t *brd)
+static char *dgap_create_config_string(struct board_t *bd, char *string)
{
- int i;
-
- if (!brd || brd->magic != DGAP_BOARD_MAGIC)
- return;
-
- dgap_free_irq(brd);
-
- tasklet_kill(&brd->helper_tasklet);
+ char *ptr = string;
+ struct cnode *p;
+ struct cnode *q;
+ int speed;
- dgap_release_remap(brd);
+ if (!bd) {
+ *ptr = 0xff;
+ return string;
+ }
- /* Free all allocated channels structs */
- for (i = 0; i < MAXPORTS ; i++)
- kfree(brd->channels[i]);
+ for (p = bd->bd_config; p; p = p->next) {
- kfree(brd->flipbuf);
- kfree(brd->flipflagbuf);
+ switch (p->type) {
+ case LNODE:
+ *ptr = '\0';
+ ptr++;
+ *ptr = p->u.line.speed;
+ ptr++;
+ break;
+ case CNODE:
+ /*
+ * Because the EPC/con concentrators can have EM modules
+ * hanging off of them, we have to walk ahead in the
+ * list and keep adding the number of ports on each EM
+ * to the config. UGH!
+ */
+ speed = p->u.conc.speed;
+ q = p->next;
+ if (q && (q->type == MNODE)) {
+ *ptr = (p->u.conc.nport + 0x80);
+ ptr++;
+ p = q;
+ while (q->next && (q->next->type) == MNODE) {
+ *ptr = (q->u.module.nport + 0x80);
+ ptr++;
+ p = q;
+ q = q->next;
+ }
+ *ptr = q->u.module.nport;
+ ptr++;
+ } else {
+ *ptr = p->u.conc.nport;
+ ptr++;
+ }
- dgap_board[brd->boardnum] = NULL;
+ *ptr = speed;
+ ptr++;
+ break;
+ }
+ }
- kfree(brd);
+ *ptr = 0xff;
+ return string;
}
/*
- * dgap_found_board()
- *
- * A board has been found, init it.
+ * Parse a configuration file read into memory as a string.
*/
-static struct board_t *dgap_found_board(struct pci_dev *pdev, int id,
- int boardnum)
+static int dgap_parsefile(char **in)
{
- struct board_t *brd;
- unsigned int pci_irq;
- int i;
- int ret;
-
- /* get the board structure and prep it */
- brd = kzalloc(sizeof(struct board_t), GFP_KERNEL);
- if (!brd)
- return ERR_PTR(-ENOMEM);
+ struct cnode *p, *brd, *line, *conc;
+ int rc;
+ char *s;
+ int linecnt = 0;
- /* store the info for the board we've found */
- brd->magic = DGAP_BOARD_MAGIC;
- brd->boardnum = boardnum;
- brd->vendor = dgap_pci_tbl[id].vendor;
- brd->device = dgap_pci_tbl[id].device;
- brd->pdev = pdev;
- brd->pci_bus = pdev->bus->number;
- brd->pci_slot = PCI_SLOT(pdev->devfn);
- brd->name = dgap_ids[id].name;
- brd->maxports = dgap_ids[id].maxports;
- brd->type = dgap_ids[id].config_type;
- brd->dpatype = dgap_ids[id].dpatype;
- brd->dpastatus = BD_NOFEP;
- init_waitqueue_head(&brd->state_wait);
+ p = &dgap_head;
+ brd = line = conc = NULL;
- spin_lock_init(&brd->bd_lock);
+ /* perhaps we are adding to an existing list? */
+ while (p->next)
+ p = p->next;
- brd->inhibit_poller = FALSE;
- brd->wait_for_bios = 0;
- brd->wait_for_fep = 0;
+ /* file must start with a BEGIN */
+ while ((rc = dgap_gettok(in)) != BEGIN) {
+ if (rc == 0) {
+ pr_err("unexpected EOF");
+ return -1;
+ }
+ }
- for (i = 0; i < MAXPORTS; i++)
- brd->channels[i] = NULL;
+ for (; ;) {
+ int board_type = 0;
+ int conc_type = 0;
+ int module_type = 0;
- /* store which card & revision we have */
- pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &brd->subvendor);
- pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &brd->subdevice);
- pci_read_config_byte(pdev, PCI_REVISION_ID, &brd->rev);
+ rc = dgap_gettok(in);
+ if (rc == 0) {
+ pr_err("unexpected EOF");
+ return -1;
+ }
- pci_irq = pdev->irq;
- brd->irq = pci_irq;
+ switch (rc) {
+ case BEGIN: /* should only be 1 begin */
+ pr_err("unexpected config_begin\n");
+ return -1;
- /* get the PCI Base Address Registers */
+ case END:
+ return 0;
- /* Xr Jupiter and EPC use BAR 2 */
- if (brd->device == PCI_DEV_XRJ_DID || brd->device == PCI_DEV_EPCJ_DID) {
- brd->membase = pci_resource_start(pdev, 2);
- brd->membase_end = pci_resource_end(pdev, 2);
- }
- /* Everyone else uses BAR 0 */
- else {
- brd->membase = pci_resource_start(pdev, 0);
- brd->membase_end = pci_resource_end(pdev, 0);
- }
+ case BOARD: /* board info */
+ if (dgap_checknode(p))
+ return -1;
- if (!brd->membase) {
- ret = -ENODEV;
- goto free_brd;
- }
+ p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
+ if (!p->next)
+ return -1;
- if (brd->membase & 1)
- brd->membase &= ~3;
- else
- brd->membase &= ~15;
+ p = p->next;
- /*
- * On the PCI boards, there is no IO space allocated
- * The I/O registers will be in the first 3 bytes of the
- * upper 2MB of the 4MB memory space. The board memory
- * will be mapped into the low 2MB of the 4MB memory space
- */
- brd->port = brd->membase + PCI_IO_OFFSET;
- brd->port_end = brd->port + PCI_IO_SIZE;
+ p->type = BNODE;
+ p->u.board.status = kstrdup("No", GFP_KERNEL);
+ line = conc = NULL;
+ brd = p;
+ linecnt = -1;
- /*
- * Special initialization for non-PLX boards
- */
- if (brd->device != PCI_DEV_XRJ_DID && brd->device != PCI_DEV_EPCJ_DID) {
- unsigned short cmd;
+ board_type = dgap_gettok(in);
+ if (board_type == 0) {
+ pr_err("board !!type not specified");
+ return -1;
+ }
- pci_write_config_byte(pdev, 0x40, 0);
- pci_write_config_byte(pdev, 0x46, 0);
+ p->u.board.type = board_type;
- /* Limit burst length to 2 doubleword transactions */
- pci_write_config_byte(pdev, 0x42, 1);
+ break;
- /*
- * Enable IO and mem if not already done.
- * This was needed for support on Itanium.
- */
- pci_read_config_word(pdev, PCI_COMMAND, &cmd);
- cmd |= (PCI_COMMAND_IO | PCI_COMMAND_MEMORY);
- pci_write_config_word(pdev, PCI_COMMAND, cmd);
- }
+ case IO: /* i/o port */
+ if (p->type != BNODE) {
+ pr_err("IO port only valid for boards");
+ return -1;
+ }
+ s = dgap_getword(in);
+ if (!s) {
+ pr_err("unexpected end of file");
+ return -1;
+ }
+ p->u.board.portstr = kstrdup(s, GFP_KERNEL);
+ if (kstrtol(s, 0, &p->u.board.port)) {
+ pr_err("bad number for IO port");
+ return -1;
+ }
+ p->u.board.v_port = 1;
+ break;
- /* init our poll helper tasklet */
- tasklet_init(&brd->helper_tasklet, dgap_poll_tasklet,
- (unsigned long) brd);
+ case MEM: /* memory address */
+ if (p->type != BNODE) {
+ pr_err("memory address only valid for boards");
+ return -1;
+ }
+ s = dgap_getword(in);
+ if (!s) {
+ pr_err("unexpected end of file");
+ return -1;
+ }
+ p->u.board.addrstr = kstrdup(s, GFP_KERNEL);
+ if (kstrtoul(s, 0, &p->u.board.addr)) {
+ pr_err("bad number for memory address");
+ return -1;
+ }
+ p->u.board.v_addr = 1;
+ break;
- ret = dgap_do_remap(brd);
- if (ret)
- goto free_brd;
+ case PCIINFO: /* pci information */
+ if (p->type != BNODE) {
+ pr_err("memory address only valid for boards");
+ return -1;
+ }
+ s = dgap_getword(in);
+ if (!s) {
+ pr_err("unexpected end of file");
+ return -1;
+ }
+ p->u.board.pcibusstr = kstrdup(s, GFP_KERNEL);
+ if (kstrtoul(s, 0, &p->u.board.pcibus)) {
+ pr_err("bad number for pci bus");
+ return -1;
+ }
+ p->u.board.v_pcibus = 1;
+ s = dgap_getword(in);
+ if (!s) {
+ pr_err("unexpected end of file");
+ return -1;
+ }
+ p->u.board.pcislotstr = kstrdup(s, GFP_KERNEL);
+ if (kstrtoul(s, 0, &p->u.board.pcislot)) {
+ pr_err("bad number for pci slot");
+ return -1;
+ }
+ p->u.board.v_pcislot = 1;
+ break;
- pr_info("dgap: board %d: %s (rev %d), irq %ld\n",
- boardnum, brd->name, brd->rev, brd->irq);
+ case METHOD:
+ if (p->type != BNODE) {
+ pr_err("install method only valid for boards");
+ return -1;
+ }
+ s = dgap_getword(in);
+ if (!s) {
+ pr_err("unexpected end of file");
+ return -1;
+ }
+ p->u.board.method = kstrdup(s, GFP_KERNEL);
+ p->u.board.v_method = 1;
+ break;
- return brd;
+ case STATUS:
+ if (p->type != BNODE) {
+ pr_err("config status only valid for boards");
+ return -1;
+ }
+ s = dgap_getword(in);
+ if (!s) {
+ pr_err("unexpected end of file");
+ return -1;
+ }
+ p->u.board.status = kstrdup(s, GFP_KERNEL);
+ break;
-free_brd:
- kfree(brd);
+ case NPORTS: /* number of ports */
+ if (p->type == BNODE) {
+ s = dgap_getword(in);
+ if (!s) {
+ pr_err("unexpected end of file");
+ return -1;
+ }
+ if (kstrtol(s, 0, &p->u.board.nport)) {
+ pr_err("bad number for number of ports");
+ return -1;
+ }
+ p->u.board.v_nport = 1;
+ } else if (p->type == CNODE) {
+ s = dgap_getword(in);
+ if (!s) {
+ pr_err("unexpected end of file");
+ return -1;
+ }
+ if (kstrtol(s, 0, &p->u.conc.nport)) {
+ pr_err("bad number for number of ports");
+ return -1;
+ }
+ p->u.conc.v_nport = 1;
+ } else if (p->type == MNODE) {
+ s = dgap_getword(in);
+ if (!s) {
+ pr_err("unexpected end of file");
+ return -1;
+ }
+ if (kstrtol(s, 0, &p->u.module.nport)) {
+ pr_err("bad number for number of ports");
+ return -1;
+ }
+ p->u.module.v_nport = 1;
+ } else {
+ pr_err("nports only valid for concentrators or modules");
+ return -1;
+ }
+ break;
- return ERR_PTR(ret);
-}
+ case ID: /* letter ID used in tty name */
+ s = dgap_getword(in);
+ if (!s) {
+ pr_err("unexpected end of file");
+ return -1;
+ }
+ p->u.board.status = kstrdup(s, GFP_KERNEL);
-static int dgap_request_irq(struct board_t *brd)
-{
- int rc;
+ if (p->type == CNODE) {
+ p->u.conc.id = kstrdup(s, GFP_KERNEL);
+ p->u.conc.v_id = 1;
+ } else if (p->type == MNODE) {
+ p->u.module.id = kstrdup(s, GFP_KERNEL);
+ p->u.module.v_id = 1;
+ } else {
+ pr_err("id only valid for concentrators or modules");
+ return -1;
+ }
+ break;
- if (!brd || brd->magic != DGAP_BOARD_MAGIC)
- return -ENODEV;
+ case STARTO: /* start offset of ID */
+ if (p->type == BNODE) {
+ s = dgap_getword(in);
+ if (!s) {
+ pr_err("unexpected end of file");
+ return -1;
+ }
+ if (kstrtol(s, 0, &p->u.board.start)) {
+ pr_err("bad number for start of tty count");
+ return -1;
+ }
+ p->u.board.v_start = 1;
+ } else if (p->type == CNODE) {
+ s = dgap_getword(in);
+ if (!s) {
+ pr_err("unexpected end of file");
+ return -1;
+ }
+ if (kstrtol(s, 0, &p->u.conc.start)) {
+ pr_err("bad number for start of tty count");
+ return -1;
+ }
+ p->u.conc.v_start = 1;
+ } else if (p->type == MNODE) {
+ s = dgap_getword(in);
+ if (!s) {
+ pr_err("unexpected end of file");
+ return -1;
+ }
+ if (kstrtol(s, 0, &p->u.module.start)) {
+ pr_err("bad number for start of tty count");
+ return -1;
+ }
+ p->u.module.v_start = 1;
+ } else {
+ pr_err("start only valid for concentrators or modules");
+ return -1;
+ }
+ break;
- /*
- * Set up our interrupt handler if we are set to do interrupts.
- */
- if (dgap_config_get_useintr(brd) && brd->irq) {
+ case TTYN: /* tty name prefix */
+ if (dgap_checknode(p))
+ return -1;
- rc = request_irq(brd->irq, dgap_intr, IRQF_SHARED, "DGAP", brd);
+ p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
+ if (!p->next)
+ return -1;
- if (!rc)
- brd->intr_used = 1;
- }
- return 0;
-}
+ p = p->next;
+ p->type = TNODE;
-static void dgap_free_irq(struct board_t *brd)
-{
- if (brd->intr_used && brd->irq)
- free_irq(brd->irq, brd);
-}
+ s = dgap_getword(in);
+ if (!s) {
+ pr_err("unexpeced end of file");
+ return -1;
+ }
+ p->u.ttyname = kstrdup(s, GFP_KERNEL);
+ if (!p->u.ttyname)
+ return -1;
-static int dgap_firmware_load(struct pci_dev *pdev, int card_type,
- struct board_t *brd)
-{
- const struct firmware *fw;
- char *tmp_ptr;
- int ret;
- char *dgap_config_buf;
+ break;
- dgap_get_vpd(brd);
- dgap_do_reset_board(brd);
+ case CU: /* cu name prefix */
+ if (dgap_checknode(p))
+ return -1;
- if (fw_info[card_type].conf_name) {
- ret = request_firmware(&fw, fw_info[card_type].conf_name,
- &pdev->dev);
- if (ret) {
- dev_err(&pdev->dev, "config file %s not found\n",
- fw_info[card_type].conf_name);
- return ret;
- }
+ p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
+ if (!p->next)
+ return -1;
- dgap_config_buf = kzalloc(fw->size + 1, GFP_KERNEL);
- if (!dgap_config_buf) {
- release_firmware(fw);
- return -ENOMEM;
- }
+ p = p->next;
+ p->type = CUNODE;
- memcpy(dgap_config_buf, fw->data, fw->size);
- release_firmware(fw);
+ s = dgap_getword(in);
+ if (!s) {
+ pr_err("unexpeced end of file");
+ return -1;
+ }
+ p->u.cuname = kstrdup(s, GFP_KERNEL);
+ if (!p->u.cuname)
+ return -1;
- /*
- * preserve dgap_config_buf
- * as dgap_parsefile would
- * otherwise alter it.
- */
- tmp_ptr = dgap_config_buf;
+ break;
- if (dgap_parsefile(&tmp_ptr) != 0) {
- kfree(dgap_config_buf);
- return -EINVAL;
- }
- kfree(dgap_config_buf);
- }
+ case LINE: /* line information */
+ if (dgap_checknode(p))
+ return -1;
+ if (!brd) {
+ pr_err("must specify board before line info");
+ return -1;
+ }
+ switch (brd->u.board.type) {
+ case PPCM:
+ pr_err("line not valid for PC/em");
+ return -1;
+ }
- /*
- * Match this board to a config the user created for us.
- */
- brd->bd_config =
- dgap_find_config(brd->type, brd->pci_bus, brd->pci_slot);
+ p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
+ if (!p->next)
+ return -1;
- /*
- * Because the 4 port Xr products share the same PCI ID
- * as the 8 port Xr products, if we receive a NULL config
- * back, and this is a PAPORT8 board, retry with a
- * PAPORT4 attempt as well.
- */
- if (brd->type == PAPORT8 && !brd->bd_config)
- brd->bd_config =
- dgap_find_config(PAPORT4, brd->pci_bus, brd->pci_slot);
+ p = p->next;
+ p->type = LNODE;
+ conc = NULL;
+ line = p;
+ linecnt++;
+ break;
- if (!brd->bd_config) {
- dev_err(&pdev->dev, "No valid configuration found\n");
- return -EINVAL;
- }
+ case CONC: /* concentrator information */
+ if (dgap_checknode(p))
+ return -1;
+ if (!line) {
+ pr_err("must specify line info before concentrator");
+ return -1;
+ }
- if (fw_info[card_type].bios_name) {
- ret = request_firmware(&fw, fw_info[card_type].bios_name,
- &pdev->dev);
- if (ret) {
- dev_err(&pdev->dev, "bios file %s not found\n",
- fw_info[card_type].bios_name);
- return ret;
- }
- dgap_do_bios_load(brd, fw->data, fw->size);
- release_firmware(fw);
+ p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
+ if (!p->next)
+ return -1;
- /* Wait for BIOS to test board... */
- ret = dgap_test_bios(brd);
- if (ret)
- return ret;
- }
+ p = p->next;
+ p->type = CNODE;
+ conc = p;
- if (fw_info[card_type].fep_name) {
- ret = request_firmware(&fw, fw_info[card_type].fep_name,
- &pdev->dev);
- if (ret) {
- dev_err(&pdev->dev, "dgap: fep file %s not found\n",
- fw_info[card_type].fep_name);
- return ret;
- }
- dgap_do_fep_load(brd, fw->data, fw->size);
- release_firmware(fw);
+ if (linecnt)
+ brd->u.board.conc2++;
+ else
+ brd->u.board.conc1++;
- /* Wait for FEP to load on board... */
- ret = dgap_test_fep(brd);
- if (ret)
- return ret;
- }
+ conc_type = dgap_gettok(in);
+ if (conc_type == 0 || conc_type != CX ||
+ conc_type != EPC) {
+ pr_err("failed to set a type of concentratros");
+ return -1;
+ }
-#ifdef DIGI_CONCENTRATORS_SUPPORTED
- /*
- * If this is a CX or EPCX, we need to see if the firmware
- * is requesting a concentrator image from us.
- */
- if ((bd->type == PCX) || (bd->type == PEPC)) {
- chk_addr = (u16 *) (vaddr + DOWNREQ);
- /* Nonzero if FEP is requesting concentrator image. */
- check = readw(chk_addr);
- vaddr = brd->re_map_membase;
- }
+ p->u.conc.type = conc_type;
- if (fw_info[card_type].con_name && check && vaddr) {
- ret = request_firmware(&fw, fw_info[card_type].con_name,
- &pdev->dev);
- if (ret) {
- dev_err(&pdev->dev, "conc file %s not found\n",
- fw_info[card_type].con_name);
- return ret;
- }
- /* Put concentrator firmware loading code here */
- offset = readw((u16 *) (vaddr + DOWNREQ));
- memcpy_toio(offset, fw->data, fw->size);
+ break;
- dgap_do_conc_load(brd, (char *)fw->data, fw->size)
- release_firmware(fw);
- }
-#endif
+ case MOD: /* EBI module */
+ if (dgap_checknode(p))
+ return -1;
+ if (!brd) {
+ pr_err("must specify board info before EBI modules");
+ return -1;
+ }
+ switch (brd->u.board.type) {
+ case PPCM:
+ linecnt = 0;
+ break;
+ default:
+ if (!conc) {
+ pr_err("must specify concentrator info before EBI module");
+ return -1;
+ }
+ }
- return 0;
-}
+ p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
+ if (!p->next)
+ return -1;
-/*
- * Remap PCI memory.
- */
-static int dgap_do_remap(struct board_t *brd)
-{
- if (!brd || brd->magic != DGAP_BOARD_MAGIC)
- return -EIO;
+ p = p->next;
+ p->type = MNODE;
- if (!request_mem_region(brd->membase, 0x200000, "dgap"))
- return -ENOMEM;
+ if (linecnt)
+ brd->u.board.module2++;
+ else
+ brd->u.board.module1++;
- if (!request_mem_region(brd->membase + PCI_IO_OFFSET, 0x200000,
- "dgap")) {
- release_mem_region(brd->membase, 0x200000);
- return -ENOMEM;
- }
+ module_type = dgap_gettok(in);
+ if (module_type == 0 || module_type != PORTS ||
+ module_type != MODEM) {
+ pr_err("failed to set a type of module");
+ return -1;
+ }
- brd->re_map_membase = ioremap(brd->membase, 0x200000);
- if (!brd->re_map_membase) {
- release_mem_region(brd->membase, 0x200000);
- release_mem_region(brd->membase + PCI_IO_OFFSET, 0x200000);
- return -ENOMEM;
- }
+ p->u.module.type = module_type;
- brd->re_map_port = ioremap((brd->membase + PCI_IO_OFFSET), 0x200000);
- if (!brd->re_map_port) {
- release_mem_region(brd->membase, 0x200000);
- release_mem_region(brd->membase + PCI_IO_OFFSET, 0x200000);
- iounmap(brd->re_map_membase);
- return -ENOMEM;
- }
+ break;
- return 0;
-}
+ case CABLE:
+ if (p->type == LNODE) {
+ s = dgap_getword(in);
+ if (!s) {
+ pr_err("unexpected end of file");
+ return -1;
+ }
+ p->u.line.cable = kstrdup(s, GFP_KERNEL);
+ p->u.line.v_cable = 1;
+ }
+ break;
-static void dgap_release_remap(struct board_t *brd)
-{
- if (brd->re_map_membase) {
- release_mem_region(brd->membase, 0x200000);
- iounmap(brd->re_map_membase);
- }
+ case SPEED: /* sync line speed indication */
+ if (p->type == LNODE) {
+ s = dgap_getword(in);
+ if (!s) {
+ pr_err("unexpected end of file");
+ return -1;
+ }
+ if (kstrtol(s, 0, &p->u.line.speed)) {
+ pr_err("bad number for line speed");
+ return -1;
+ }
+ p->u.line.v_speed = 1;
+ } else if (p->type == CNODE) {
+ s = dgap_getword(in);
+ if (!s) {
+ pr_err("unexpected end of file");
+ return -1;
+ }
+ if (kstrtol(s, 0, &p->u.conc.speed)) {
+ pr_err("bad number for line speed");
+ return -1;
+ }
+ p->u.conc.v_speed = 1;
+ } else {
+ pr_err("speed valid only for lines or concentrators.");
+ return -1;
+ }
+ break;
- if (brd->re_map_port) {
- release_mem_region(brd->membase + PCI_IO_OFFSET, 0x200000);
- iounmap(brd->re_map_port);
- }
-}
-/*****************************************************************************
-*
-* Function:
-*
-* dgap_poll_handler
-*
-* Author:
-*
-* Scott H Kilau
-*
-* Parameters:
-*
-* dummy -- ignored
-*
-* Return Values:
-*
-* none
-*
-* Description:
-*
-* As each timer expires, it determines (a) whether the "transmit"
-* waiter needs to be woken up, and (b) whether the poller needs to
-* be rescheduled.
-*
-******************************************************************************/
+ case CONNECT:
+ if (p->type == CNODE) {
+ s = dgap_getword(in);
+ if (!s) {
+ pr_err("unexpected end of file");
+ return -1;
+ }
+ p->u.conc.connect = kstrdup(s, GFP_KERNEL);
+ p->u.conc.v_connect = 1;
+ }
+ break;
+ case PRINT: /* transparent print name prefix */
+ if (dgap_checknode(p))
+ return -1;
-static void dgap_poll_handler(ulong dummy)
-{
- int i;
- struct board_t *brd;
- unsigned long lock_flags;
- ulong new_time;
+ p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
+ if (!p->next)
+ return -1;
- dgap_poll_counter++;
+ p = p->next;
+ p->type = PNODE;
- /*
- * Do not start the board state machine until
- * driver tells us its up and running, and has
- * everything it needs.
- */
- if (dgap_driver_state != DRIVER_READY)
- goto schedule_poller;
+ s = dgap_getword(in);
+ if (!s) {
+ pr_err("unexpeced end of file");
+ return -1;
+ }
+ p->u.printname = kstrdup(s, GFP_KERNEL);
+ if (!p->u.printname)
+ return -1;
- /*
- * If we have just 1 board, or the system is not SMP,
- * then use the typical old style poller.
- * Otherwise, use our new tasklet based poller, which should
- * speed things up for multiple boards.
- */
- if ((dgap_numboards == 1) || (num_online_cpus() <= 1)) {
- for (i = 0; i < dgap_numboards; i++) {
+ break;
- brd = dgap_board[i];
+ case CMAJOR: /* major number */
+ if (dgap_checknode(p))
+ return -1;
- if (brd->state == BOARD_FAILED)
- continue;
- if (!brd->intr_running)
- /* Call the real board poller directly */
- dgap_poll_tasklet((unsigned long) brd);
- }
- } else {
- /*
- * Go thru each board, kicking off a
- * tasklet for each if needed
- */
- for (i = 0; i < dgap_numboards; i++) {
- brd = dgap_board[i];
+ p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
+ if (!p->next)
+ return -1;
- /*
- * Attempt to grab the board lock.
- *
- * If we can't get it, no big deal, the next poll
- * will get it. Basically, I just really don't want
- * to spin in here, because I want to kick off my
- * tasklets as fast as I can, and then get out the
- * poller.
- */
- if (!spin_trylock(&brd->bd_lock))
- continue;
+ p = p->next;
+ p->type = JNODE;
- /*
- * If board is in a failed state, don't bother
- * scheduling a tasklet
- */
- if (brd->state == BOARD_FAILED) {
- spin_unlock(&brd->bd_lock);
- continue;
+ s = dgap_getword(in);
+ if (!s) {
+ pr_err("unexpected end of file");
+ return -1;
}
+ if (kstrtol(s, 0, &p->u.majornumber)) {
+ pr_err("bad number for major number");
+ return -1;
+ }
+ break;
- /* Schedule a poll helper task */
- if (!brd->intr_running)
- tasklet_schedule(&brd->helper_tasklet);
-
- /*
- * Can't do DGAP_UNLOCK here, as we don't have
- * lock_flags because we did a trylock above.
- */
- spin_unlock(&brd->bd_lock);
- }
- }
+ case ALTPIN: /* altpin setting */
+ if (dgap_checknode(p))
+ return -1;
-schedule_poller:
+ p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
+ if (!p->next)
+ return -1;
- /*
- * Schedule ourself back at the nominal wakeup interval.
- */
- spin_lock_irqsave(&dgap_poll_lock, lock_flags);
- dgap_poll_time += dgap_jiffies_from_ms(dgap_poll_tick);
+ p = p->next;
+ p->type = ANODE;
- new_time = dgap_poll_time - jiffies;
+ s = dgap_getword(in);
+ if (!s) {
+ pr_err("unexpected end of file");
+ return -1;
+ }
+ if (kstrtol(s, 0, &p->u.altpin)) {
+ pr_err("bad number for altpin");
+ return -1;
+ }
+ break;
- if ((ulong) new_time >= 2 * dgap_poll_tick) {
- dgap_poll_time =
- jiffies + dgap_jiffies_from_ms(dgap_poll_tick);
- }
+ case USEINTR: /* enable interrupt setting */
+ if (dgap_checknode(p))
+ return -1;
- dgap_poll_timer.function = dgap_poll_handler;
- dgap_poll_timer.data = 0;
- dgap_poll_timer.expires = dgap_poll_time;
- spin_unlock_irqrestore(&dgap_poll_lock, lock_flags);
+ p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
+ if (!p->next)
+ return -1;
- if (!dgap_poll_stop)
- add_timer(&dgap_poll_timer);
-}
+ p = p->next;
+ p->type = INTRNODE;
+ s = dgap_getword(in);
+ if (!s) {
+ pr_err("unexpected end of file");
+ return -1;
+ }
+ if (kstrtol(s, 0, &p->u.useintr)) {
+ pr_err("bad number for useintr");
+ return -1;
+ }
+ break;
-/*
- * dgap_intr()
- *
- * Driver interrupt handler.
- */
-static irqreturn_t dgap_intr(int irq, void *voidbrd)
-{
- struct board_t *brd = (struct board_t *) voidbrd;
+ case TTSIZ: /* size of tty structure */
+ if (dgap_checknode(p))
+ return -1;
- if (!brd)
- return IRQ_NONE;
+ p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
+ if (!p->next)
+ return -1;
- /*
- * Check to make sure its for us.
- */
- if (brd->magic != DGAP_BOARD_MAGIC)
- return IRQ_NONE;
+ p = p->next;
+ p->type = TSNODE;
- brd->intr_count++;
+ s = dgap_getword(in);
+ if (!s) {
+ pr_err("unexpected end of file");
+ return -1;
+ }
+ if (kstrtol(s, 0, &p->u.ttysize)) {
+ pr_err("bad number for ttysize");
+ return -1;
+ }
+ break;
- /*
- * Schedule tasklet to run at a better time.
- */
- tasklet_schedule(&brd->helper_tasklet);
- return IRQ_HANDLED;
-}
+ case CHSIZ: /* channel structure size */
+ if (dgap_checknode(p))
+ return -1;
-/*
- * dgap_init_globals()
- *
- * This is where we initialize the globals from the static insmod
- * configuration variables. These are declared near the head of
- * this file.
- */
-static void dgap_init_globals(void)
-{
- int i;
+ p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
+ if (!p->next)
+ return -1;
- for (i = 0; i < MAXBOARDS; i++)
- dgap_board[i] = NULL;
+ p = p->next;
+ p->type = CSNODE;
- init_timer(&dgap_poll_timer);
-}
+ s = dgap_getword(in);
+ if (!s) {
+ pr_err("unexpected end of file");
+ return -1;
+ }
+ if (kstrtol(s, 0, &p->u.chsize)) {
+ pr_err("bad number for chsize");
+ return -1;
+ }
+ break;
-/************************************************************************
- *
- * TTY Initialization/Cleanup Functions
- *
- ************************************************************************/
+ case BSSIZ: /* board structure size */
+ if (dgap_checknode(p))
+ return -1;
-/*
- * dgap_tty_register()
- *
- * Init the tty subsystem for this board.
- */
-static int dgap_tty_register(struct board_t *brd)
-{
- int rc;
+ p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
+ if (!p->next)
+ return -1;
- brd->serial_driver = tty_alloc_driver(MAXPORTS, 0);
- if (IS_ERR(brd->serial_driver))
- return PTR_ERR(brd->serial_driver);
+ p = p->next;
+ p->type = BSNODE;
- snprintf(brd->serial_name, MAXTTYNAMELEN, "tty_dgap_%d_",
- brd->boardnum);
- brd->serial_driver->name = brd->serial_name;
- brd->serial_driver->name_base = 0;
- brd->serial_driver->major = 0;
- brd->serial_driver->minor_start = 0;
- brd->serial_driver->type = TTY_DRIVER_TYPE_SERIAL;
- brd->serial_driver->subtype = SERIAL_TYPE_NORMAL;
- brd->serial_driver->init_termios = dgap_default_termios;
- brd->serial_driver->driver_name = DRVSTR;
- brd->serial_driver->flags = (TTY_DRIVER_REAL_RAW |
- TTY_DRIVER_DYNAMIC_DEV |
- TTY_DRIVER_HARDWARE_BREAK);
-
- /* The kernel wants space to store pointers to tty_structs */
- brd->serial_driver->ttys =
- kzalloc(MAXPORTS * sizeof(struct tty_struct *), GFP_KERNEL);
- if (!brd->serial_driver->ttys) {
- rc = -ENOMEM;
- goto free_serial_drv;
- }
+ s = dgap_getword(in);
+ if (!s) {
+ pr_err("unexpected end of file");
+ return -1;
+ }
+ if (kstrtol(s, 0, &p->u.bssize)) {
+ pr_err("bad number for bssize");
+ return -1;
+ }
+ break;
- /*
- * Entry points for driver. Called by the kernel from
- * tty_io.c and n_tty.c.
- */
- tty_set_operations(brd->serial_driver, &dgap_tty_ops);
+ case UNTSIZ: /* sched structure size */
+ if (dgap_checknode(p))
+ return -1;
- /*
- * If we're doing transparent print, we have to do all of the above
- * again, separately so we don't get the LD confused about what major
- * we are when we get into the dgap_tty_open() routine.
- */
- brd->print_driver = tty_alloc_driver(MAXPORTS, 0);
- if (IS_ERR(brd->print_driver)) {
- rc = PTR_ERR(brd->print_driver);
- goto free_serial_drv;
- }
+ p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
+ if (!p->next)
+ return -1;
- snprintf(brd->print_name, MAXTTYNAMELEN, "pr_dgap_%d_",
- brd->boardnum);
- brd->print_driver->name = brd->print_name;
- brd->print_driver->name_base = 0;
- brd->print_driver->major = 0;
- brd->print_driver->minor_start = 0;
- brd->print_driver->type = TTY_DRIVER_TYPE_SERIAL;
- brd->print_driver->subtype = SERIAL_TYPE_NORMAL;
- brd->print_driver->init_termios = dgap_default_termios;
- brd->print_driver->driver_name = DRVSTR;
- brd->print_driver->flags = (TTY_DRIVER_REAL_RAW |
- TTY_DRIVER_DYNAMIC_DEV |
- TTY_DRIVER_HARDWARE_BREAK);
-
- /* The kernel wants space to store pointers to tty_structs */
- brd->print_driver->ttys =
- kzalloc(MAXPORTS * sizeof(struct tty_struct *), GFP_KERNEL);
- if (!brd->print_driver->ttys) {
- rc = -ENOMEM;
- goto free_print_drv;
- }
+ p = p->next;
+ p->type = USNODE;
- /*
- * Entry points for driver. Called by the kernel from
- * tty_io.c and n_tty.c.
- */
- tty_set_operations(brd->print_driver, &dgap_tty_ops);
+ s = dgap_getword(in);
+ if (!s) {
+ pr_err("unexpected end of file");
+ return -1;
+ }
+ if (kstrtol(s, 0, &p->u.unsize)) {
+ pr_err("bad number for schedsize");
+ return -1;
+ }
+ break;
- /* Register tty devices */
- rc = tty_register_driver(brd->serial_driver);
- if (rc < 0)
- goto free_print_drv;
+ case F2SIZ: /* f2200 structure size */
+ if (dgap_checknode(p))
+ return -1;
- /* Register Transparent Print devices */
- rc = tty_register_driver(brd->print_driver);
- if (rc < 0)
- goto unregister_serial_drv;
+ p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
+ if (!p->next)
+ return -1;
- dgap_boards_by_major[brd->serial_driver->major] = brd;
- brd->dgap_serial_major = brd->serial_driver->major;
+ p = p->next;
+ p->type = FSNODE;
- dgap_boards_by_major[brd->print_driver->major] = brd;
- brd->dgap_transparent_print_major = brd->print_driver->major;
+ s = dgap_getword(in);
+ if (!s) {
+ pr_err("unexpected end of file");
+ return -1;
+ }
+ if (kstrtol(s, 0, &p->u.f2size)) {
+ pr_err("bad number for f2200size");
+ return -1;
+ }
+ break;
- return 0;
+ case VPSIZ: /* vpix structure size */
+ if (dgap_checknode(p))
+ return -1;
-unregister_serial_drv:
- tty_unregister_driver(brd->serial_driver);
-free_print_drv:
- put_tty_driver(brd->print_driver);
-free_serial_drv:
- put_tty_driver(brd->serial_driver);
+ p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
+ if (!p->next)
+ return -1;
- return rc;
-}
+ p = p->next;
+ p->type = VSNODE;
-static void dgap_tty_unregister(struct board_t *brd)
-{
- tty_unregister_driver(brd->print_driver);
- tty_unregister_driver(brd->serial_driver);
- put_tty_driver(brd->print_driver);
- put_tty_driver(brd->serial_driver);
+ s = dgap_getword(in);
+ if (!s) {
+ pr_err("unexpected end of file");
+ return -1;
+ }
+ if (kstrtol(s, 0, &p->u.vpixsize)) {
+ pr_err("bad number for vpixsize");
+ return -1;
+ }
+ break;
+ }
+ }
}
-/*
- * dgap_tty_init()
- *
- * Init the tty subsystem. Called once per board after board has been
- * downloaded and init'ed.
- */
-static int dgap_tty_init(struct board_t *brd)
+static void dgap_cleanup_nodes(void)
{
- int i;
- int tlw;
- uint true_count;
- u8 __iomem *vaddr;
- u8 modem;
- struct channel_t *ch;
- struct bs_t __iomem *bs;
- struct cm_t __iomem *cm;
- int ret;
-
- /*
- * Initialize board structure elements.
- */
-
- vaddr = brd->re_map_membase;
- true_count = readw((vaddr + NCHAN));
-
- brd->nasync = dgap_config_get_num_prts(brd);
-
- if (!brd->nasync)
- brd->nasync = brd->maxports;
+ struct cnode *p;
- if (brd->nasync > brd->maxports)
- brd->nasync = brd->maxports;
+ p = &dgap_head;
- if (true_count != brd->nasync) {
- dev_warn(&brd->pdev->dev,
- "%s configured for %d ports, has %d ports.\n",
- brd->name, brd->nasync, true_count);
+ while (p) {
+ struct cnode *tmp = p->next;
- if ((brd->type == PPCM) &&
- (true_count == 64 || true_count == 0)) {
- dev_warn(&brd->pdev->dev,
- "Please make SURE the EBI cable running from the card\n");
- dev_warn(&brd->pdev->dev,
- "to each EM module is plugged into EBI IN!\n");
+ if (p->type == NULLNODE) {
+ p = tmp;
+ continue;
}
- brd->nasync = true_count;
-
- /* If no ports, don't bother going any further */
- if (!brd->nasync) {
- brd->state = BOARD_FAILED;
- brd->dpastatus = BD_NOFEP;
- return -EIO;
+ switch (p->type) {
+ case BNODE:
+ kfree(p->u.board.portstr);
+ kfree(p->u.board.addrstr);
+ kfree(p->u.board.pcibusstr);
+ kfree(p->u.board.pcislotstr);
+ kfree(p->u.board.method);
+ break;
+ case CNODE:
+ kfree(p->u.conc.id);
+ kfree(p->u.conc.connect);
+ break;
+ case MNODE:
+ kfree(p->u.module.id);
+ break;
+ case TNODE:
+ kfree(p->u.ttyname);
+ break;
+ case CUNODE:
+ kfree(p->u.cuname);
+ break;
+ case LNODE:
+ kfree(p->u.line.cable);
+ break;
+ case PNODE:
+ kfree(p->u.printname);
+ break;
}
- }
- /*
- * Allocate channel memory that might not have been allocated
- * when the driver was first loaded.
- */
- for (i = 0; i < brd->nasync; i++) {
- brd->channels[i] =
- kzalloc(sizeof(struct channel_t), GFP_KERNEL);
- if (!brd->channels[i]) {
- ret = -ENOMEM;
- goto free_chan;
- }
+ kfree(p->u.board.status);
+ kfree(p);
+ p = tmp;
}
+}
- ch = brd->channels[0];
- vaddr = brd->re_map_membase;
-
- bs = (struct bs_t __iomem *) ((ulong) vaddr + CHANBUF);
- cm = (struct cm_t __iomem *) ((ulong) vaddr + CMDBUF);
-
- brd->bd_bs = bs;
-
- /* Set up channel variables */
- for (i = 0; i < brd->nasync; i++, ch = brd->channels[i], bs++) {
-
- spin_lock_init(&ch->ch_lock);
-
- /* Store all our magic numbers */
- ch->magic = DGAP_CHANNEL_MAGIC;
- ch->ch_tun.magic = DGAP_UNIT_MAGIC;
- ch->ch_tun.un_type = DGAP_SERIAL;
- ch->ch_tun.un_ch = ch;
- ch->ch_tun.un_dev = i;
-
- ch->ch_pun.magic = DGAP_UNIT_MAGIC;
- ch->ch_pun.un_type = DGAP_PRINT;
- ch->ch_pun.un_ch = ch;
- ch->ch_pun.un_dev = i;
+/*
+ * Retrives the current custom baud rate from FEP memory,
+ * and returns it back to the user.
+ * Returns 0 on error.
+ */
+static uint dgap_get_custom_baud(struct channel_t *ch)
+{
+ u8 __iomem *vaddr;
+ ulong offset;
+ uint value;
- ch->ch_vaddr = vaddr;
- ch->ch_bs = bs;
- ch->ch_cm = cm;
- ch->ch_bd = brd;
- ch->ch_portnum = i;
- ch->ch_digi = dgap_digi_init;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return 0;
- /*
- * Set up digi dsr and dcd bits based on altpin flag.
- */
- if (dgap_config_get_altpin(brd)) {
- ch->ch_dsr = DM_CD;
- ch->ch_cd = DM_DSR;
- ch->ch_digi.digi_flags |= DIGI_ALTPIN;
- } else {
- ch->ch_cd = DM_CD;
- ch->ch_dsr = DM_DSR;
- }
+ if (!ch->ch_bd || ch->ch_bd->magic != DGAP_BOARD_MAGIC)
+ return 0;
- ch->ch_taddr = vaddr + (ioread16(&(ch->ch_bs->tx_seg)) << 4);
- ch->ch_raddr = vaddr + (ioread16(&(ch->ch_bs->rx_seg)) << 4);
- ch->ch_tx_win = 0;
- ch->ch_rx_win = 0;
- ch->ch_tsize = readw(&(ch->ch_bs->tx_max)) + 1;
- ch->ch_rsize = readw(&(ch->ch_bs->rx_max)) + 1;
- ch->ch_tstart = 0;
- ch->ch_rstart = 0;
+ if (!(ch->ch_bd->bd_flags & BD_FEP5PLUS))
+ return 0;
- /*
- * Set queue water marks, interrupt mask,
- * and general tty parameters.
- */
- tlw = ch->ch_tsize >= 2000 ? ((ch->ch_tsize * 5) / 8) :
- ch->ch_tsize / 2;
- ch->ch_tlw = tlw;
+ vaddr = ch->ch_bd->re_map_membase;
- dgap_cmdw(ch, STLOW, tlw, 0);
+ if (!vaddr)
+ return 0;
- dgap_cmdw(ch, SRLOW, ch->ch_rsize / 2, 0);
+ /*
+ * Go get from fep mem, what the fep
+ * believes the custom baud rate is.
+ */
+ offset = (ioread16(vaddr + ECS_SEG) << 4) + (ch->ch_portnum * 0x28)
+ + LINE_SPEED;
- dgap_cmdw(ch, SRHIGH, 7 * ch->ch_rsize / 8, 0);
+ value = readw(vaddr + offset);
+ return value;
+}
- ch->ch_mistat = readb(&(ch->ch_bs->m_stat));
+/*
+ * Remap PCI memory.
+ */
+static int dgap_remap(struct board_t *brd)
+{
+ if (!brd || brd->magic != DGAP_BOARD_MAGIC)
+ return -EIO;
- init_waitqueue_head(&ch->ch_flags_wait);
- init_waitqueue_head(&ch->ch_tun.un_flags_wait);
- init_waitqueue_head(&ch->ch_pun.un_flags_wait);
+ if (!request_mem_region(brd->membase, 0x200000, "dgap"))
+ return -ENOMEM;
- /* Turn on all modem interrupts for now */
- modem = (DM_CD | DM_DSR | DM_CTS | DM_RI);
- writeb(modem, &(ch->ch_bs->m_int));
+ if (!request_mem_region(brd->membase + PCI_IO_OFFSET, 0x200000,
+ "dgap")) {
+ release_mem_region(brd->membase, 0x200000);
+ return -ENOMEM;
+ }
- /*
- * Set edelay to 0 if interrupts are turned on,
- * otherwise set edelay to the usual 100.
- */
- if (brd->intr_used)
- writew(0, &(ch->ch_bs->edelay));
- else
- writew(100, &(ch->ch_bs->edelay));
+ brd->re_map_membase = ioremap(brd->membase, 0x200000);
+ if (!brd->re_map_membase) {
+ release_mem_region(brd->membase, 0x200000);
+ release_mem_region(brd->membase + PCI_IO_OFFSET, 0x200000);
+ return -ENOMEM;
+ }
- writeb(1, &(ch->ch_bs->idata));
+ brd->re_map_port = ioremap((brd->membase + PCI_IO_OFFSET), 0x200000);
+ if (!brd->re_map_port) {
+ release_mem_region(brd->membase, 0x200000);
+ release_mem_region(brd->membase + PCI_IO_OFFSET, 0x200000);
+ iounmap(brd->re_map_membase);
+ return -ENOMEM;
}
return 0;
-
-free_chan:
- while (--i >= 0) {
- kfree(brd->channels[i]);
- brd->channels[i] = NULL;
- }
- return ret;
}
-/*
- * dgap_tty_free()
- *
- * Free the channles which are allocated in dgap_tty_init().
- */
-static void dgap_tty_free(struct board_t *brd)
+static void dgap_unmap(struct board_t *brd)
{
- int i;
-
- for (i = 0; i < brd->nasync; i++)
- kfree(brd->channels[i]);
+ iounmap(brd->re_map_port);
+ iounmap(brd->re_map_membase);
+ release_mem_region(brd->membase + PCI_IO_OFFSET, 0x200000);
+ release_mem_region(brd->membase, 0x200000);
}
+
/*
- * dgap_cleanup_tty()
+ * dgap_parity_scan()
*
- * Uninitialize the TTY portion of this driver. Free all memory and
- * resources.
+ * Convert the FEP5 way of reporting parity errors and breaks into
+ * the Linux line discipline way.
*/
-static void dgap_cleanup_tty(struct board_t *brd)
+static void dgap_parity_scan(struct channel_t *ch, unsigned char *cbuf,
+ unsigned char *fbuf, int *len)
{
- struct device *dev;
- int i;
+ int l = *len;
+ int count = 0;
+ unsigned char *in, *cout, *fout;
+ unsigned char c;
- dgap_boards_by_major[brd->serial_driver->major] = NULL;
- brd->dgap_serial_major = 0;
- for (i = 0; i < brd->nasync; i++) {
- tty_port_destroy(&brd->serial_ports[i]);
- dev = brd->channels[i]->ch_tun.un_sysfs;
- dgap_remove_tty_sysfs(dev);
- tty_unregister_device(brd->serial_driver, i);
- }
- tty_unregister_driver(brd->serial_driver);
- put_tty_driver(brd->serial_driver);
- kfree(brd->serial_ports);
+ in = cbuf;
+ cout = cbuf;
+ fout = fbuf;
- dgap_boards_by_major[brd->print_driver->major] = NULL;
- brd->dgap_transparent_print_major = 0;
- for (i = 0; i < brd->nasync; i++) {
- tty_port_destroy(&brd->printer_ports[i]);
- dev = brd->channels[i]->ch_pun.un_sysfs;
- dgap_remove_tty_sysfs(dev);
- tty_unregister_device(brd->print_driver, i);
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return;
+
+ while (l--) {
+ c = *in++;
+ switch (ch->pscan_state) {
+ default:
+ /* reset to sanity and fall through */
+ ch->pscan_state = 0;
+
+ case 0:
+ /* No FF seen yet */
+ if (c == (unsigned char) '\377')
+ /* delete this character from stream */
+ ch->pscan_state = 1;
+ else {
+ *cout++ = c;
+ *fout++ = TTY_NORMAL;
+ count += 1;
+ }
+ break;
+
+ case 1:
+ /* first FF seen */
+ if (c == (unsigned char) '\377') {
+ /* doubled ff, transform to single ff */
+ *cout++ = c;
+ *fout++ = TTY_NORMAL;
+ count += 1;
+ ch->pscan_state = 0;
+ } else {
+ /* save value examination in next state */
+ ch->pscan_savechar = c;
+ ch->pscan_state = 2;
+ }
+ break;
+
+ case 2:
+ /* third character of ff sequence */
+
+ *cout++ = c;
+
+ if (ch->pscan_savechar == 0x0) {
+
+ if (c == 0x0) {
+ ch->ch_err_break++;
+ *fout++ = TTY_BREAK;
+ } else {
+ ch->ch_err_parity++;
+ *fout++ = TTY_PARITY;
+ }
+ }
+
+ count += 1;
+ ch->pscan_state = 0;
+ }
}
- tty_unregister_driver(brd->print_driver);
- put_tty_driver(brd->print_driver);
- kfree(brd->printer_ports);
+ *len = count;
}
/*=======================================================================
@@ -1738,6 +1717,33 @@ static void dgap_input(struct channel_t *ch)
}
+static void dgap_write_wakeup(struct board_t *bd, struct channel_t *ch,
+ struct un_t *un, u32 mask,
+ unsigned long *irq_flags1,
+ unsigned long *irq_flags2)
+{
+ if (!(un->un_flags & mask))
+ return;
+
+ un->un_flags &= ~mask;
+
+ if (!(un->un_flags & UN_ISOPEN))
+ return;
+
+ if ((un->un_tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) &&
+ un->un_tty->ldisc->ops->write_wakeup) {
+ spin_unlock_irqrestore(&ch->ch_lock, *irq_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, *irq_flags1);
+
+ (un->un_tty->ldisc->ops->write_wakeup)(un->un_tty);
+
+ spin_lock_irqsave(&bd->bd_lock, *irq_flags1);
+ spin_lock_irqsave(&ch->ch_lock, *irq_flags2);
+ }
+ wake_up_interruptible(&un->un_tty->write_wait);
+ wake_up_interruptible(&un->un_flags_wait);
+}
+
/************************************************************************
* Determines when CARRIER changes state and takes appropriate
* action.
@@ -1852,163 +1858,1207 @@ static void dgap_carrier(struct channel_t *ch)
ch->ch_flags &= ~CH_CD;
}
-/************************************************************************
+/*=======================================================================
*
- * TTY Entry points and helper functions
+ * dgap_event - FEP to host event processing routine.
*
- ************************************************************************/
-
-/*
- * dgap_tty_open()
+ * bd - Board of current event.
*
- */
-static int dgap_tty_open(struct tty_struct *tty, struct file *file)
+ *=======================================================================*/
+static int dgap_event(struct board_t *bd)
{
- struct board_t *brd;
struct channel_t *ch;
- struct un_t *un;
- struct bs_t __iomem *bs;
- uint major;
- uint minor;
- int rc;
ulong lock_flags;
ulong lock_flags2;
- u16 head;
+ struct bs_t __iomem *bs;
+ u8 __iomem *event;
+ u8 __iomem *vaddr;
+ struct ev_t __iomem *eaddr;
+ uint head;
+ uint tail;
+ int port;
+ int reason;
+ int modem;
+ int b1;
- major = MAJOR(tty_devnum(tty));
- minor = MINOR(tty_devnum(tty));
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return -EIO;
+
+ spin_lock_irqsave(&bd->bd_lock, lock_flags);
+
+ vaddr = bd->re_map_membase;
- if (major > 255)
+ if (!vaddr) {
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
return -EIO;
+ }
- /* Get board pointer from our array of majors we have allocated */
- brd = dgap_boards_by_major[major];
- if (!brd)
+ eaddr = (struct ev_t __iomem *) (vaddr + EVBUF);
+
+ /* Get our head and tail */
+ head = readw(&(eaddr->ev_head));
+ tail = readw(&(eaddr->ev_tail));
+
+ /*
+ * Forget it if pointers out of range.
+ */
+
+ if (head >= EVMAX - EVSTART || tail >= EVMAX - EVSTART ||
+ (head | tail) & 03) {
+ /* Let go of board lock */
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
return -EIO;
+ }
/*
- * If board is not yet up to a state of READY, go to
- * sleep waiting for it to happen or they cancel the open.
+ * Loop to process all the events in the buffer.
*/
- rc = wait_event_interruptible(brd->state_wait,
- (brd->state & BOARD_READY));
+ while (tail != head) {
- if (rc)
- return rc;
+ /*
+ * Get interrupt information.
+ */
- spin_lock_irqsave(&brd->bd_lock, lock_flags);
+ event = bd->re_map_membase + tail + EVSTART;
- /* The wait above should guarantee this cannot happen */
- if (brd->state != BOARD_READY) {
- spin_unlock_irqrestore(&brd->bd_lock, lock_flags);
- return -EIO;
+ port = ioread8(event);
+ reason = ioread8(event + 1);
+ modem = ioread8(event + 2);
+ b1 = ioread8(event + 3);
+
+ /*
+ * Make sure the interrupt is valid.
+ */
+ if (port >= bd->nasync)
+ goto next;
+
+ if (!(reason & (IFMODEM | IFBREAK | IFTLW | IFTEM | IFDATA)))
+ goto next;
+
+ ch = bd->channels[port];
+
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ goto next;
+
+ /*
+ * If we have made it here, the event was valid.
+ * Lock down the channel.
+ */
+ spin_lock_irqsave(&ch->ch_lock, lock_flags2);
+
+ bs = ch->ch_bs;
+
+ if (!bs) {
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ goto next;
+ }
+
+ /*
+ * Process received data.
+ */
+ if (reason & IFDATA) {
+
+ /*
+ * ALL LOCKS *MUST* BE DROPPED BEFORE CALLING INPUT!
+ * input could send some data to ld, which in turn
+ * could do a callback to one of our other functions.
+ */
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+
+ dgap_input(ch);
+
+ spin_lock_irqsave(&bd->bd_lock, lock_flags);
+ spin_lock_irqsave(&ch->ch_lock, lock_flags2);
+
+ if (ch->ch_flags & CH_RACTIVE)
+ ch->ch_flags |= CH_RENABLE;
+ else
+ writeb(1, &(bs->idata));
+
+ if (ch->ch_flags & CH_RWAIT) {
+ ch->ch_flags &= ~CH_RWAIT;
+
+ wake_up_interruptible
+ (&ch->ch_tun.un_flags_wait);
+ }
+ }
+
+ /*
+ * Process Modem change signals.
+ */
+ if (reason & IFMODEM) {
+ ch->ch_mistat = modem;
+ dgap_carrier(ch);
+ }
+
+ /*
+ * Process break.
+ */
+ if (reason & IFBREAK) {
+
+ if (ch->ch_tun.un_tty) {
+ /* A break has been indicated */
+ ch->ch_err_break++;
+ tty_buffer_request_room
+ (ch->ch_tun.un_tty->port, 1);
+ tty_insert_flip_char(ch->ch_tun.un_tty->port,
+ 0, TTY_BREAK);
+ tty_flip_buffer_push(ch->ch_tun.un_tty->port);
+ }
+ }
+
+ /*
+ * Process Transmit low.
+ */
+ if (reason & IFTLW) {
+ dgap_write_wakeup(bd, ch, &ch->ch_tun, UN_LOW,
+ &lock_flags, &lock_flags2);
+ dgap_write_wakeup(bd, ch, &ch->ch_pun, UN_LOW,
+ &lock_flags, &lock_flags2);
+ if (ch->ch_flags & CH_WLOW) {
+ ch->ch_flags &= ~CH_WLOW;
+ wake_up_interruptible(&ch->ch_flags_wait);
+ }
+ }
+
+ /*
+ * Process Transmit empty.
+ */
+ if (reason & IFTEM) {
+ dgap_write_wakeup(bd, ch, &ch->ch_tun, UN_EMPTY,
+ &lock_flags, &lock_flags2);
+ dgap_write_wakeup(bd, ch, &ch->ch_pun, UN_EMPTY,
+ &lock_flags, &lock_flags2);
+ if (ch->ch_flags & CH_WEMPTY) {
+ ch->ch_flags &= ~CH_WEMPTY;
+ wake_up_interruptible(&ch->ch_flags_wait);
+ }
+ }
+
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+
+next:
+ tail = (tail + 4) & (EVMAX - EVSTART - 4);
}
- /* If opened device is greater than our number of ports, bail. */
- if (MINOR(tty_devnum(tty)) > brd->nasync) {
- spin_unlock_irqrestore(&brd->bd_lock, lock_flags);
- return -EIO;
+ writew(tail, &(eaddr->ev_tail));
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+
+ return 0;
+}
+
+/*
+ * Our board poller function.
+ */
+static void dgap_poll_tasklet(unsigned long data)
+{
+ struct board_t *bd = (struct board_t *) data;
+ ulong lock_flags;
+ char __iomem *vaddr;
+ u16 head, tail;
+
+ if (!bd || (bd->magic != DGAP_BOARD_MAGIC))
+ return;
+
+ if (bd->inhibit_poller)
+ return;
+
+ spin_lock_irqsave(&bd->bd_lock, lock_flags);
+
+ vaddr = bd->re_map_membase;
+
+ /*
+ * If board is ready, parse deeper to see if there is anything to do.
+ */
+ if (bd->state == BOARD_READY) {
+
+ struct ev_t __iomem *eaddr;
+
+ if (!bd->re_map_membase) {
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ return;
+ }
+ if (!bd->re_map_port) {
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ return;
+ }
+
+ if (!bd->nasync)
+ goto out;
+
+ eaddr = (struct ev_t __iomem *) (vaddr + EVBUF);
+
+ /* Get our head and tail */
+ head = readw(&(eaddr->ev_head));
+ tail = readw(&(eaddr->ev_tail));
+
+ /*
+ * If there is an event pending. Go service it.
+ */
+ if (head != tail) {
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ dgap_event(bd);
+ spin_lock_irqsave(&bd->bd_lock, lock_flags);
+ }
+
+out:
+ /*
+ * If board is doing interrupts, ACK the interrupt.
+ */
+ if (bd && bd->intr_running)
+ readb(bd->re_map_port + 2);
+
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ return;
}
- ch = brd->channels[minor];
- if (!ch) {
- spin_unlock_irqrestore(&brd->bd_lock, lock_flags);
- return -EIO;
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+}
+
+/*
+ * dgap_found_board()
+ *
+ * A board has been found, init it.
+ */
+static struct board_t *dgap_found_board(struct pci_dev *pdev, int id,
+ int boardnum)
+{
+ struct board_t *brd;
+ unsigned int pci_irq;
+ int i;
+ int ret;
+
+ /* get the board structure and prep it */
+ brd = kzalloc(sizeof(struct board_t), GFP_KERNEL);
+ if (!brd)
+ return ERR_PTR(-ENOMEM);
+
+ /* store the info for the board we've found */
+ brd->magic = DGAP_BOARD_MAGIC;
+ brd->boardnum = boardnum;
+ brd->vendor = dgap_pci_tbl[id].vendor;
+ brd->device = dgap_pci_tbl[id].device;
+ brd->pdev = pdev;
+ brd->pci_bus = pdev->bus->number;
+ brd->pci_slot = PCI_SLOT(pdev->devfn);
+ brd->name = dgap_ids[id].name;
+ brd->maxports = dgap_ids[id].maxports;
+ brd->type = dgap_ids[id].config_type;
+ brd->dpatype = dgap_ids[id].dpatype;
+ brd->dpastatus = BD_NOFEP;
+ init_waitqueue_head(&brd->state_wait);
+
+ spin_lock_init(&brd->bd_lock);
+
+ brd->inhibit_poller = FALSE;
+ brd->wait_for_bios = 0;
+ brd->wait_for_fep = 0;
+
+ for (i = 0; i < MAXPORTS; i++)
+ brd->channels[i] = NULL;
+
+ /* store which card & revision we have */
+ pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &brd->subvendor);
+ pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &brd->subdevice);
+ pci_read_config_byte(pdev, PCI_REVISION_ID, &brd->rev);
+
+ pci_irq = pdev->irq;
+ brd->irq = pci_irq;
+
+ /* get the PCI Base Address Registers */
+
+ /* Xr Jupiter and EPC use BAR 2 */
+ if (brd->device == PCI_DEV_XRJ_DID || brd->device == PCI_DEV_EPCJ_DID) {
+ brd->membase = pci_resource_start(pdev, 2);
+ brd->membase_end = pci_resource_end(pdev, 2);
+ }
+ /* Everyone else uses BAR 0 */
+ else {
+ brd->membase = pci_resource_start(pdev, 0);
+ brd->membase_end = pci_resource_end(pdev, 0);
}
- /* Grab channel lock */
- spin_lock_irqsave(&ch->ch_lock, lock_flags2);
+ if (!brd->membase) {
+ ret = -ENODEV;
+ goto free_brd;
+ }
- /* Figure out our type */
- if (major == brd->dgap_serial_major) {
- un = &brd->channels[minor]->ch_tun;
- un->un_type = DGAP_SERIAL;
- } else if (major == brd->dgap_transparent_print_major) {
- un = &brd->channels[minor]->ch_pun;
- un->un_type = DGAP_PRINT;
+ if (brd->membase & 1)
+ brd->membase &= ~3;
+ else
+ brd->membase &= ~15;
+
+ /*
+ * On the PCI boards, there is no IO space allocated
+ * The I/O registers will be in the first 3 bytes of the
+ * upper 2MB of the 4MB memory space. The board memory
+ * will be mapped into the low 2MB of the 4MB memory space
+ */
+ brd->port = brd->membase + PCI_IO_OFFSET;
+ brd->port_end = brd->port + PCI_IO_SIZE;
+
+ /*
+ * Special initialization for non-PLX boards
+ */
+ if (brd->device != PCI_DEV_XRJ_DID && brd->device != PCI_DEV_EPCJ_DID) {
+ unsigned short cmd;
+
+ pci_write_config_byte(pdev, 0x40, 0);
+ pci_write_config_byte(pdev, 0x46, 0);
+
+ /* Limit burst length to 2 doubleword transactions */
+ pci_write_config_byte(pdev, 0x42, 1);
+
+ /*
+ * Enable IO and mem if not already done.
+ * This was needed for support on Itanium.
+ */
+ pci_read_config_word(pdev, PCI_COMMAND, &cmd);
+ cmd |= (PCI_COMMAND_IO | PCI_COMMAND_MEMORY);
+ pci_write_config_word(pdev, PCI_COMMAND, cmd);
+ }
+
+ /* init our poll helper tasklet */
+ tasklet_init(&brd->helper_tasklet, dgap_poll_tasklet,
+ (unsigned long) brd);
+
+ ret = dgap_remap(brd);
+ if (ret)
+ goto free_brd;
+
+ pr_info("dgap: board %d: %s (rev %d), irq %ld\n",
+ boardnum, brd->name, brd->rev, brd->irq);
+
+ return brd;
+
+free_brd:
+ kfree(brd);
+
+ return ERR_PTR(ret);
+}
+
+/*
+ * dgap_intr()
+ *
+ * Driver interrupt handler.
+ */
+static irqreturn_t dgap_intr(int irq, void *voidbrd)
+{
+ struct board_t *brd = voidbrd;
+
+ if (!brd)
+ return IRQ_NONE;
+
+ /*
+ * Check to make sure its for us.
+ */
+ if (brd->magic != DGAP_BOARD_MAGIC)
+ return IRQ_NONE;
+
+ brd->intr_count++;
+
+ /*
+ * Schedule tasklet to run at a better time.
+ */
+ tasklet_schedule(&brd->helper_tasklet);
+ return IRQ_HANDLED;
+}
+
+/*****************************************************************************
+*
+* Function:
+*
+* dgap_poll_handler
+*
+* Author:
+*
+* Scott H Kilau
+*
+* Parameters:
+*
+* dummy -- ignored
+*
+* Return Values:
+*
+* none
+*
+* Description:
+*
+* As each timer expires, it determines (a) whether the "transmit"
+* waiter needs to be woken up, and (b) whether the poller needs to
+* be rescheduled.
+*
+******************************************************************************/
+
+static void dgap_poll_handler(ulong dummy)
+{
+ unsigned int i;
+ struct board_t *brd;
+ unsigned long lock_flags;
+ ulong new_time;
+
+ dgap_poll_counter++;
+
+ /*
+ * Do not start the board state machine until
+ * driver tells us its up and running, and has
+ * everything it needs.
+ */
+ if (dgap_driver_state != DRIVER_READY)
+ goto schedule_poller;
+
+ /*
+ * If we have just 1 board, or the system is not SMP,
+ * then use the typical old style poller.
+ * Otherwise, use our new tasklet based poller, which should
+ * speed things up for multiple boards.
+ */
+ if ((dgap_numboards == 1) || (num_online_cpus() <= 1)) {
+ for (i = 0; i < dgap_numboards; i++) {
+
+ brd = dgap_board[i];
+
+ if (brd->state == BOARD_FAILED)
+ continue;
+ if (!brd->intr_running)
+ /* Call the real board poller directly */
+ dgap_poll_tasklet((unsigned long) brd);
+ }
} else {
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&brd->bd_lock, lock_flags);
- return -EIO;
+ /*
+ * Go thru each board, kicking off a
+ * tasklet for each if needed
+ */
+ for (i = 0; i < dgap_numboards; i++) {
+ brd = dgap_board[i];
+
+ /*
+ * Attempt to grab the board lock.
+ *
+ * If we can't get it, no big deal, the next poll
+ * will get it. Basically, I just really don't want
+ * to spin in here, because I want to kick off my
+ * tasklets as fast as I can, and then get out the
+ * poller.
+ */
+ if (!spin_trylock(&brd->bd_lock))
+ continue;
+
+ /*
+ * If board is in a failed state, don't bother
+ * scheduling a tasklet
+ */
+ if (brd->state == BOARD_FAILED) {
+ spin_unlock(&brd->bd_lock);
+ continue;
+ }
+
+ /* Schedule a poll helper task */
+ if (!brd->intr_running)
+ tasklet_schedule(&brd->helper_tasklet);
+
+ /*
+ * Can't do DGAP_UNLOCK here, as we don't have
+ * lock_flags because we did a trylock above.
+ */
+ spin_unlock(&brd->bd_lock);
+ }
}
- /* Store our unit into driver_data, so we always have it available. */
- tty->driver_data = un;
+schedule_poller:
/*
- * Error if channel info pointer is NULL.
+ * Schedule ourself back at the nominal wakeup interval.
*/
- bs = ch->ch_bs;
- if (!bs) {
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&brd->bd_lock, lock_flags);
- return -EIO;
+ spin_lock_irqsave(&dgap_poll_lock, lock_flags);
+ dgap_poll_time += dgap_jiffies_from_ms(dgap_poll_tick);
+
+ new_time = dgap_poll_time - jiffies;
+
+ if ((ulong) new_time >= 2 * dgap_poll_tick) {
+ dgap_poll_time =
+ jiffies + dgap_jiffies_from_ms(dgap_poll_tick);
+ }
+
+ dgap_poll_timer.function = dgap_poll_handler;
+ dgap_poll_timer.data = 0;
+ dgap_poll_timer.expires = dgap_poll_time;
+ spin_unlock_irqrestore(&dgap_poll_lock, lock_flags);
+
+ if (!dgap_poll_stop)
+ add_timer(&dgap_poll_timer);
+}
+
+/*=======================================================================
+ *
+ * dgap_cmdb - Sends a 2 byte command to the FEP.
+ *
+ * ch - Pointer to channel structure.
+ * cmd - Command to be sent.
+ * byte1 - Integer containing first byte to be sent.
+ * byte2 - Integer containing second byte to be sent.
+ * ncmds - Wait until ncmds or fewer cmds are left
+ * in the cmd buffer before returning.
+ *
+ *=======================================================================*/
+static void dgap_cmdb(struct channel_t *ch, u8 cmd, u8 byte1,
+ u8 byte2, uint ncmds)
+{
+ char __iomem *vaddr;
+ struct __iomem cm_t *cm_addr;
+ uint count;
+ uint n;
+ u16 head;
+ u16 tail;
+
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return;
+
+ /*
+ * Check if board is still alive.
+ */
+ if (ch->ch_bd->state == BOARD_FAILED)
+ return;
+
+ /*
+ * Make sure the pointers are in range before
+ * writing to the FEP memory.
+ */
+ vaddr = ch->ch_bd->re_map_membase;
+
+ if (!vaddr)
+ return;
+
+ cm_addr = (struct cm_t __iomem *) (vaddr + CMDBUF);
+ head = readw(&(cm_addr->cm_head));
+
+ /*
+ * Forget it if pointers out of range.
+ */
+ if (head >= (CMDMAX - CMDSTART) || (head & 03)) {
+ ch->ch_bd->state = BOARD_FAILED;
+ return;
}
/*
- * Initialize tty's
+ * Put the data in the circular command buffer.
*/
- if (!(un->un_flags & UN_ISOPEN)) {
- /* Store important variables. */
- un->un_tty = tty;
+ writeb(cmd, (vaddr + head + CMDSTART + 0));
+ writeb((u8) ch->ch_portnum, (vaddr + head + CMDSTART + 1));
+ writeb(byte1, (vaddr + head + CMDSTART + 2));
+ writeb(byte2, (vaddr + head + CMDSTART + 3));
- /* Maybe do something here to the TTY struct as well? */
+ head = (head + 4) & (CMDMAX - CMDSTART - 4);
+
+ writew(head, &(cm_addr->cm_head));
+
+ /*
+ * Wait if necessary before updating the head
+ * pointer to limit the number of outstanding
+ * commands to the FEP. If the time spent waiting
+ * is outlandish, declare the FEP dead.
+ */
+ for (count = dgap_count ;;) {
+
+ head = readw(&(cm_addr->cm_head));
+ tail = readw(&(cm_addr->cm_tail));
+
+ n = (head - tail) & (CMDMAX - CMDSTART - 4);
+
+ if (n <= ncmds * sizeof(struct cm_t))
+ break;
+
+ if (--count == 0) {
+ ch->ch_bd->state = BOARD_FAILED;
+ return;
+ }
+ udelay(10);
}
+}
+
+/*=======================================================================
+ *
+ * dgap_cmdw - Sends a 1 word command to the FEP.
+ *
+ * ch - Pointer to channel structure.
+ * cmd - Command to be sent.
+ * word - Integer containing word to be sent.
+ * ncmds - Wait until ncmds or fewer cmds are left
+ * in the cmd buffer before returning.
+ *
+ *=======================================================================*/
+static void dgap_cmdw(struct channel_t *ch, u8 cmd, u16 word, uint ncmds)
+{
+ char __iomem *vaddr;
+ struct __iomem cm_t *cm_addr;
+ uint count;
+ uint n;
+ u16 head;
+ u16 tail;
+
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return;
/*
- * Initialize if neither terminal or printer is open.
+ * Check if board is still alive.
*/
- if (!((ch->ch_tun.un_flags | ch->ch_pun.un_flags) & UN_ISOPEN)) {
+ if (ch->ch_bd->state == BOARD_FAILED)
+ return;
- ch->ch_mforce = 0;
- ch->ch_mval = 0;
+ /*
+ * Make sure the pointers are in range before
+ * writing to the FEP memory.
+ */
+ vaddr = ch->ch_bd->re_map_membase;
+ if (!vaddr)
+ return;
+
+ cm_addr = (struct cm_t __iomem *) (vaddr + CMDBUF);
+ head = readw(&(cm_addr->cm_head));
+
+ /*
+ * Forget it if pointers out of range.
+ */
+ if (head >= (CMDMAX - CMDSTART) || (head & 03)) {
+ ch->ch_bd->state = BOARD_FAILED;
+ return;
+ }
+
+ /*
+ * Put the data in the circular command buffer.
+ */
+ writeb(cmd, (vaddr + head + CMDSTART + 0));
+ writeb((u8) ch->ch_portnum, (vaddr + head + CMDSTART + 1));
+ writew((u16) word, (vaddr + head + CMDSTART + 2));
+
+ head = (head + 4) & (CMDMAX - CMDSTART - 4);
+
+ writew(head, &(cm_addr->cm_head));
+
+ /*
+ * Wait if necessary before updating the head
+ * pointer to limit the number of outstanding
+ * commands to the FEP. If the time spent waiting
+ * is outlandish, declare the FEP dead.
+ */
+ for (count = dgap_count ;;) {
+
+ head = readw(&(cm_addr->cm_head));
+ tail = readw(&(cm_addr->cm_tail));
+
+ n = (head - tail) & (CMDMAX - CMDSTART - 4);
+
+ if (n <= ncmds * sizeof(struct cm_t))
+ break;
+
+ if (--count == 0) {
+ ch->ch_bd->state = BOARD_FAILED;
+ return;
+ }
+ udelay(10);
+ }
+}
+
+/*=======================================================================
+ *
+ * dgap_cmdw_ext - Sends a extended word command to the FEP.
+ *
+ * ch - Pointer to channel structure.
+ * cmd - Command to be sent.
+ * word - Integer containing word to be sent.
+ * ncmds - Wait until ncmds or fewer cmds are left
+ * in the cmd buffer before returning.
+ *
+ *=======================================================================*/
+static void dgap_cmdw_ext(struct channel_t *ch, u16 cmd, u16 word, uint ncmds)
+{
+ char __iomem *vaddr;
+ struct __iomem cm_t *cm_addr;
+ uint count;
+ uint n;
+ u16 head;
+ u16 tail;
+
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return;
+
+ /*
+ * Check if board is still alive.
+ */
+ if (ch->ch_bd->state == BOARD_FAILED)
+ return;
+
+ /*
+ * Make sure the pointers are in range before
+ * writing to the FEP memory.
+ */
+ vaddr = ch->ch_bd->re_map_membase;
+ if (!vaddr)
+ return;
+
+ cm_addr = (struct cm_t __iomem *) (vaddr + CMDBUF);
+ head = readw(&(cm_addr->cm_head));
+
+ /*
+ * Forget it if pointers out of range.
+ */
+ if (head >= (CMDMAX - CMDSTART) || (head & 03)) {
+ ch->ch_bd->state = BOARD_FAILED;
+ return;
+ }
+
+ /*
+ * Put the data in the circular command buffer.
+ */
+
+ /* Write an FF to tell the FEP that we want an extended command */
+ writeb((u8) 0xff, (vaddr + head + CMDSTART + 0));
+
+ writeb((u8) ch->ch_portnum, (vaddr + head + CMDSTART + 1));
+ writew((u16) cmd, (vaddr + head + CMDSTART + 2));
+
+ /*
+ * If the second part of the command won't fit,
+ * put it at the beginning of the circular buffer.
+ */
+ if (((head + 4) >= ((CMDMAX - CMDSTART)) || (head & 03)))
+ writew((u16) word, (vaddr + CMDSTART));
+ else
+ writew((u16) word, (vaddr + head + CMDSTART + 4));
+
+ head = (head + 8) & (CMDMAX - CMDSTART - 4);
+
+ writew(head, &(cm_addr->cm_head));
+
+ /*
+ * Wait if necessary before updating the head
+ * pointer to limit the number of outstanding
+ * commands to the FEP. If the time spent waiting
+ * is outlandish, declare the FEP dead.
+ */
+ for (count = dgap_count ;;) {
+
+ head = readw(&(cm_addr->cm_head));
+ tail = readw(&(cm_addr->cm_tail));
+
+ n = (head - tail) & (CMDMAX - CMDSTART - 4);
+
+ if (n <= ncmds * sizeof(struct cm_t))
+ break;
+
+ if (--count == 0) {
+ ch->ch_bd->state = BOARD_FAILED;
+ return;
+ }
+ udelay(10);
+ }
+}
+
+/*=======================================================================
+ *
+ * dgap_wmove - Write data to FEP buffer.
+ *
+ * ch - Pointer to channel structure.
+ * buf - Poiter to characters to be moved.
+ * cnt - Number of characters to move.
+ *
+ *=======================================================================*/
+static void dgap_wmove(struct channel_t *ch, char *buf, uint cnt)
+{
+ int n;
+ char __iomem *taddr;
+ struct bs_t __iomem *bs;
+ u16 head;
+
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return;
+
+ /*
+ * Check parameters.
+ */
+ bs = ch->ch_bs;
+ head = readw(&(bs->tx_head));
+
+ /*
+ * If pointers are out of range, just return.
+ */
+ if ((cnt > ch->ch_tsize) ||
+ (unsigned)(head - ch->ch_tstart) >= ch->ch_tsize)
+ return;
+
+ /*
+ * If the write wraps over the top of the circular buffer,
+ * move the portion up to the wrap point, and reset the
+ * pointers to the bottom.
+ */
+ n = ch->ch_tstart + ch->ch_tsize - head;
+
+ if (cnt >= n) {
+ cnt -= n;
+ taddr = ch->ch_taddr + head;
+ memcpy_toio(taddr, buf, n);
+ head = ch->ch_tstart;
+ buf += n;
+ }
+
+ /*
+ * Move rest of data.
+ */
+ taddr = ch->ch_taddr + head;
+ n = cnt;
+ memcpy_toio(taddr, buf, n);
+ head += cnt;
+
+ writew(head, &(bs->tx_head));
+}
+
+/*
+ * Calls the firmware to reset this channel.
+ */
+static void dgap_firmware_reset_port(struct channel_t *ch)
+{
+ dgap_cmdb(ch, CHRESET, 0, 0, 0);
+
+ /*
+ * Now that the channel is reset, we need to make sure
+ * all the current settings get reapplied to the port
+ * in the firmware.
+ *
+ * So we will set the driver's cache of firmware
+ * settings all to 0, and then call param.
+ */
+ ch->ch_fepiflag = 0;
+ ch->ch_fepcflag = 0;
+ ch->ch_fepoflag = 0;
+ ch->ch_fepstartc = 0;
+ ch->ch_fepstopc = 0;
+ ch->ch_fepastartc = 0;
+ ch->ch_fepastopc = 0;
+ ch->ch_mostat = 0;
+ ch->ch_hflow = 0;
+}
+
+/*=======================================================================
+ *
+ * dgap_param - Set Digi parameters.
+ *
+ * struct tty_struct * - TTY for port.
+ *
+ *=======================================================================*/
+static int dgap_param(struct channel_t *ch, struct board_t *bd, u32 un_type)
+{
+ u16 head;
+ u16 cflag;
+ u16 iflag;
+ u8 mval;
+ u8 hflow;
+
+ /*
+ * If baud rate is zero, flush queues, and set mval to drop DTR.
+ */
+ if ((ch->ch_c_cflag & (CBAUD)) == 0) {
+
+ /* flush rx */
+ head = readw(&(ch->ch_bs->rx_head));
+ writew(head, &(ch->ch_bs->rx_tail));
+
+ /* flush tx */
+ head = readw(&(ch->ch_bs->tx_head));
+ writew(head, &(ch->ch_bs->tx_tail));
+ ch->ch_flags |= (CH_BAUD0);
+
+ /* Drop RTS and DTR */
+ ch->ch_mval &= ~(D_RTS(ch)|D_DTR(ch));
+ mval = D_DTR(ch) | D_RTS(ch);
+ ch->ch_baud_info = 0;
+
+ } else if (ch->ch_custom_speed && (bd->bd_flags & BD_FEP5PLUS)) {
/*
- * Flush input queue.
+ * Tell the fep to do the command
*/
- head = readw(&(bs->rx_head));
- writew(head, &(bs->rx_tail));
- ch->ch_flags = 0;
- ch->pscan_state = 0;
- ch->pscan_savechar = 0;
+ dgap_cmdw_ext(ch, 0xff01, ch->ch_custom_speed, 0);
- ch->ch_c_cflag = tty->termios.c_cflag;
- ch->ch_c_iflag = tty->termios.c_iflag;
- ch->ch_c_oflag = tty->termios.c_oflag;
- ch->ch_c_lflag = tty->termios.c_lflag;
- ch->ch_startc = tty->termios.c_cc[VSTART];
- ch->ch_stopc = tty->termios.c_cc[VSTOP];
+ /*
+ * Now go get from fep mem, what the fep
+ * believes the custom baud rate is.
+ */
+ ch->ch_custom_speed = dgap_get_custom_baud(ch);
+ ch->ch_baud_info = ch->ch_custom_speed;
- /* TODO: flush our TTY struct here? */
+ /* Handle transition from B0 */
+ if (ch->ch_flags & CH_BAUD0) {
+ ch->ch_flags &= ~(CH_BAUD0);
+ ch->ch_mval |= (D_RTS(ch)|D_DTR(ch));
+ }
+ mval = D_DTR(ch) | D_RTS(ch);
+
+ } else {
+ /*
+ * Set baud rate, character size, and parity.
+ */
+
+
+ int iindex = 0;
+ int jindex = 0;
+ int baud = 0;
+
+ ulong bauds[4][16] = {
+ { /* slowbaud */
+ 0, 50, 75, 110,
+ 134, 150, 200, 300,
+ 600, 1200, 1800, 2400,
+ 4800, 9600, 19200, 38400 },
+ { /* slowbaud & CBAUDEX */
+ 0, 57600, 115200, 230400,
+ 460800, 150, 200, 921600,
+ 600, 1200, 1800, 2400,
+ 4800, 9600, 19200, 38400 },
+ { /* fastbaud */
+ 0, 57600, 76800, 115200,
+ 14400, 57600, 230400, 76800,
+ 115200, 230400, 28800, 460800,
+ 921600, 9600, 19200, 38400 },
+ { /* fastbaud & CBAUDEX */
+ 0, 57600, 115200, 230400,
+ 460800, 150, 200, 921600,
+ 600, 1200, 1800, 2400,
+ 4800, 9600, 19200, 38400 }
+ };
+
+ /*
+ * Only use the TXPrint baud rate if the
+ * terminal unit is NOT open
+ */
+ if (!(ch->ch_tun.un_flags & UN_ISOPEN) &&
+ un_type == DGAP_PRINT)
+ baud = C_BAUD(ch->ch_pun.un_tty) & 0xff;
+ else
+ baud = C_BAUD(ch->ch_tun.un_tty) & 0xff;
+
+ if (ch->ch_c_cflag & CBAUDEX)
+ iindex = 1;
+
+ if (ch->ch_digi.digi_flags & DIGI_FAST)
+ iindex += 2;
+
+ jindex = baud;
+
+ if ((iindex >= 0) && (iindex < 4) &&
+ (jindex >= 0) && (jindex < 16))
+ baud = bauds[iindex][jindex];
+ else
+ baud = 0;
+
+ if (baud == 0)
+ baud = 9600;
+
+ ch->ch_baud_info = baud;
+
+ /*
+ * CBAUD has bit position 0x1000 set these days to
+ * indicate Linux baud rate remap.
+ * We use a different bit assignment for high speed.
+ * Clear this bit out while grabbing the parts of
+ * "cflag" we want.
+ */
+ cflag = ch->ch_c_cflag & ((CBAUD ^ CBAUDEX) | PARODD | PARENB |
+ CSTOPB | CSIZE);
+
+ /*
+ * HUPCL bit is used by FEP to indicate fast baud
+ * table is to be used.
+ */
+ if ((ch->ch_digi.digi_flags & DIGI_FAST) ||
+ (ch->ch_c_cflag & CBAUDEX))
+ cflag |= HUPCL;
+
+ if ((ch->ch_c_cflag & CBAUDEX) &&
+ !(ch->ch_digi.digi_flags & DIGI_FAST)) {
+ /*
+ * The below code is trying to guarantee that only
+ * baud rates 115200, 230400, 460800, 921600 are
+ * remapped. We use exclusive or because the various
+ * baud rates share common bit positions and therefore
+ * can't be tested for easily.
+ */
+ tcflag_t tcflag = (ch->ch_c_cflag & CBAUD) | CBAUDEX;
+ int baudpart = 0;
+
+ /*
+ * Map high speed requests to index
+ * into FEP's baud table
+ */
+ switch (tcflag) {
+ case B57600:
+ baudpart = 1;
+ break;
+#ifdef B76800
+ case B76800:
+ baudpart = 2;
+ break;
+#endif
+ case B115200:
+ baudpart = 3;
+ break;
+ case B230400:
+ baudpart = 9;
+ break;
+ case B460800:
+ baudpart = 11;
+ break;
+#ifdef B921600
+ case B921600:
+ baudpart = 12;
+ break;
+#endif
+ default:
+ baudpart = 0;
+ }
+
+ if (baudpart)
+ cflag = (cflag & ~(CBAUD | CBAUDEX)) | baudpart;
+ }
+
+ cflag &= 0xffff;
+
+ if (cflag != ch->ch_fepcflag) {
+ ch->ch_fepcflag = (u16) (cflag & 0xffff);
+
+ /*
+ * Okay to have channel and board
+ * locks held calling this
+ */
+ dgap_cmdw(ch, SCFLAG, (u16) cflag, 0);
+ }
+
+ /* Handle transition from B0 */
+ if (ch->ch_flags & CH_BAUD0) {
+ ch->ch_flags &= ~(CH_BAUD0);
+ ch->ch_mval |= (D_RTS(ch)|D_DTR(ch));
+ }
+ mval = D_DTR(ch) | D_RTS(ch);
}
- dgap_carrier(ch);
/*
- * Run param in case we changed anything
+ * Get input flags.
*/
- dgap_param(ch, brd, un->un_type);
+ iflag = ch->ch_c_iflag & (IGNBRK | BRKINT | IGNPAR | PARMRK |
+ INPCK | ISTRIP | IXON | IXANY | IXOFF);
+
+ if ((ch->ch_startc == _POSIX_VDISABLE) ||
+ (ch->ch_stopc == _POSIX_VDISABLE)) {
+ iflag &= ~(IXON | IXOFF);
+ ch->ch_c_iflag &= ~(IXON | IXOFF);
+ }
/*
- * follow protocol for opening port
+ * Only the IBM Xr card can switch between
+ * 232 and 422 modes on the fly
+ */
+ if (bd->device == PCI_DEV_XR_IBM_DID) {
+ if (ch->ch_digi.digi_flags & DIGI_422)
+ dgap_cmdb(ch, SCOMMODE, MODE_422, 0, 0);
+ else
+ dgap_cmdb(ch, SCOMMODE, MODE_232, 0, 0);
+ }
+
+ if (ch->ch_digi.digi_flags & DIGI_ALTPIN)
+ iflag |= IALTPIN;
+
+ if (iflag != ch->ch_fepiflag) {
+ ch->ch_fepiflag = iflag;
+
+ /* Okay to have channel and board locks held calling this */
+ dgap_cmdw(ch, SIFLAG, (u16) ch->ch_fepiflag, 0);
+ }
+
+ /*
+ * Select hardware handshaking.
*/
+ hflow = 0;
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&brd->bd_lock, lock_flags);
+ if (ch->ch_c_cflag & CRTSCTS)
+ hflow |= (D_RTS(ch) | D_CTS(ch));
+ if (ch->ch_digi.digi_flags & RTSPACE)
+ hflow |= D_RTS(ch);
+ if (ch->ch_digi.digi_flags & DTRPACE)
+ hflow |= D_DTR(ch);
+ if (ch->ch_digi.digi_flags & CTSPACE)
+ hflow |= D_CTS(ch);
+ if (ch->ch_digi.digi_flags & DSRPACE)
+ hflow |= D_DSR(ch);
+ if (ch->ch_digi.digi_flags & DCDPACE)
+ hflow |= D_CD(ch);
- rc = dgap_block_til_ready(tty, file, ch);
+ if (hflow != ch->ch_hflow) {
+ ch->ch_hflow = hflow;
- if (!un->un_tty)
- return -ENODEV;
+ /* Okay to have channel and board locks held calling this */
+ dgap_cmdb(ch, SHFLOW, (u8) hflow, 0xff, 0);
+ }
- /* No going back now, increment our unit and channel counters */
- spin_lock_irqsave(&ch->ch_lock, lock_flags);
- ch->ch_open_count++;
- un->un_open_count++;
- un->un_flags |= (UN_ISOPEN);
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
+ /*
+ * Set RTS and/or DTR Toggle if needed,
+ * but only if product is FEP5+ based.
+ */
+ if (bd->bd_flags & BD_FEP5PLUS) {
+ u16 hflow2 = 0;
- return rc;
+ if (ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE)
+ hflow2 |= (D_RTS(ch));
+ if (ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE)
+ hflow2 |= (D_DTR(ch));
+
+ dgap_cmdw_ext(ch, 0xff03, hflow2, 0);
+ }
+
+ /*
+ * Set modem control lines.
+ */
+
+ mval ^= ch->ch_mforce & (mval ^ ch->ch_mval);
+
+ if (ch->ch_mostat ^ mval) {
+ ch->ch_mostat = mval;
+
+ /* Okay to have channel and board locks held calling this */
+ dgap_cmdb(ch, SMODEM, (u8) mval, D_RTS(ch)|D_DTR(ch), 0);
+ }
+
+ /*
+ * Read modem signals, and then call carrier function.
+ */
+ ch->ch_mistat = readb(&(ch->ch_bs->m_stat));
+ dgap_carrier(ch);
+
+ /*
+ * Set the start and stop characters.
+ */
+ if (ch->ch_startc != ch->ch_fepstartc ||
+ ch->ch_stopc != ch->ch_fepstopc) {
+ ch->ch_fepstartc = ch->ch_startc;
+ ch->ch_fepstopc = ch->ch_stopc;
+
+ /* Okay to have channel and board locks held calling this */
+ dgap_cmdb(ch, SFLOWC, ch->ch_fepstartc, ch->ch_fepstopc, 0);
+ }
+
+ /*
+ * Set the Auxiliary start and stop characters.
+ */
+ if (ch->ch_astartc != ch->ch_fepastartc ||
+ ch->ch_astopc != ch->ch_fepastopc) {
+ ch->ch_fepastartc = ch->ch_astartc;
+ ch->ch_fepastopc = ch->ch_astopc;
+
+ /* Okay to have channel and board locks held calling this */
+ dgap_cmdb(ch, SAFLOWC, ch->ch_fepastartc, ch->ch_fepastopc, 0);
+ }
+
+ return 0;
}
/*
@@ -2143,15 +3193,18 @@ static int dgap_block_til_ready(struct tty_struct *tty, struct file *file,
}
/*
- * dgap_tty_hangup()
+ * dgap_tty_flush_buffer()
*
- * Hangup the port. Like a close, but don't wait for output to drain.
+ * Flush Tx buffer (make in == out)
*/
-static void dgap_tty_hangup(struct tty_struct *tty)
+static void dgap_tty_flush_buffer(struct tty_struct *tty)
{
struct board_t *bd;
struct channel_t *ch;
struct un_t *un;
+ ulong lock_flags;
+ ulong lock_flags2;
+ u16 head;
if (!tty || tty->magic != TTY_MAGIC)
return;
@@ -2168,21 +3221,39 @@ static void dgap_tty_hangup(struct tty_struct *tty)
if (!bd || bd->magic != DGAP_BOARD_MAGIC)
return;
- /* flush the transmit queues */
- dgap_tty_flush_buffer(tty);
+ spin_lock_irqsave(&bd->bd_lock, lock_flags);
+ spin_lock_irqsave(&ch->ch_lock, lock_flags2);
+
+ ch->ch_flags &= ~CH_STOP;
+ head = readw(&(ch->ch_bs->tx_head));
+ dgap_cmdw(ch, FLUSHTX, (u16) head, 0);
+ dgap_cmdw(ch, RESUMETX, 0, 0);
+ if (ch->ch_tun.un_flags & (UN_LOW|UN_EMPTY)) {
+ ch->ch_tun.un_flags &= ~(UN_LOW|UN_EMPTY);
+ wake_up_interruptible(&ch->ch_tun.un_flags_wait);
+ }
+ if (ch->ch_pun.un_flags & (UN_LOW|UN_EMPTY)) {
+ ch->ch_pun.un_flags &= ~(UN_LOW|UN_EMPTY);
+ wake_up_interruptible(&ch->ch_pun.un_flags_wait);
+ }
+
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ if (waitqueue_active(&tty->write_wait))
+ wake_up_interruptible(&tty->write_wait);
+ tty_wakeup(tty);
}
/*
- * dgap_tty_close()
+ * dgap_tty_hangup()
*
+ * Hangup the port. Like a close, but don't wait for output to drain.
*/
-static void dgap_tty_close(struct tty_struct *tty, struct file *file)
+static void dgap_tty_hangup(struct tty_struct *tty)
{
- struct ktermios *ts;
struct board_t *bd;
struct channel_t *ch;
struct un_t *un;
- ulong lock_flags;
if (!tty || tty->magic != TTY_MAGIC)
return;
@@ -2199,107 +3270,8 @@ static void dgap_tty_close(struct tty_struct *tty, struct file *file)
if (!bd || bd->magic != DGAP_BOARD_MAGIC)
return;
- ts = &tty->termios;
-
- spin_lock_irqsave(&ch->ch_lock, lock_flags);
-
- /*
- * Determine if this is the last close or not - and if we agree about
- * which type of close it is with the Line Discipline
- */
- if ((tty->count == 1) && (un->un_open_count != 1)) {
- /*
- * Uh, oh. tty->count is 1, which means that the tty
- * structure will be freed. un_open_count should always
- * be one in these conditions. If it's greater than
- * one, we've got real problems, since it means the
- * serial port won't be shutdown.
- */
- un->un_open_count = 1;
- }
-
- if (--un->un_open_count < 0)
- un->un_open_count = 0;
-
- ch->ch_open_count--;
-
- if (ch->ch_open_count && un->un_open_count) {
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
- return;
- }
-
- /* OK, its the last close on the unit */
-
- un->un_flags |= UN_CLOSING;
-
- tty->closing = 1;
-
- /*
- * Only officially close channel if count is 0 and
- * DIGI_PRINTER bit is not set.
- */
- if ((ch->ch_open_count == 0) &&
- !(ch->ch_digi.digi_flags & DIGI_PRINTER)) {
-
- ch->ch_flags &= ~(CH_RXBLOCK);
-
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
-
- /* wait for output to drain */
- /* This will also return if we take an interrupt */
-
- dgap_wait_for_drain(tty);
-
- dgap_tty_flush_buffer(tty);
- tty_ldisc_flush(tty);
-
- spin_lock_irqsave(&ch->ch_lock, lock_flags);
-
- tty->closing = 0;
-
- /*
- * If we have HUPCL set, lower DTR and RTS
- */
- if (ch->ch_c_cflag & HUPCL) {
- ch->ch_mostat &= ~(D_RTS(ch)|D_DTR(ch));
- dgap_cmdb(ch, SMODEM, 0, D_DTR(ch)|D_RTS(ch), 0);
-
- /*
- * Go to sleep to ensure RTS/DTR
- * have been dropped for modems to see it.
- */
- spin_unlock_irqrestore(&ch->ch_lock,
- lock_flags);
-
- /* .25 second delay for dropping RTS/DTR */
- schedule_timeout_interruptible(msecs_to_jiffies(250));
-
- spin_lock_irqsave(&ch->ch_lock, lock_flags);
- }
-
- ch->pscan_state = 0;
- ch->pscan_savechar = 0;
- ch->ch_baud_info = 0;
-
- }
-
- /*
- * turn off print device when closing print device.
- */
- if ((un->un_type == DGAP_PRINT) && (ch->ch_flags & CH_PRON)) {
- dgap_wmove(ch, ch->ch_digi.digi_offstr,
- (int) ch->ch_digi.digi_offlen);
- ch->ch_flags &= ~CH_PRON;
- }
-
- un->un_tty = NULL;
- un->un_flags &= ~(UN_ISOPEN | UN_CLOSING);
- tty->driver_data = NULL;
-
- wake_up_interruptible(&ch->ch_flags_wait);
- wake_up_interruptible(&un->un_flags_wait);
-
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
+ /* flush the transmit queues */
+ dgap_tty_flush_buffer(tty);
}
/*
@@ -2599,22 +3571,6 @@ static int dgap_tty_write_room(struct tty_struct *tty)
}
/*
- * dgap_tty_put_char()
- *
- * Put a character into ch->ch_buf
- *
- * - used by the line discipline for OPOST processing
- */
-static int dgap_tty_put_char(struct tty_struct *tty, unsigned char c)
-{
- /*
- * Simply call tty_write.
- */
- dgap_tty_write(tty, &c, 1);
- return 1;
-}
-
-/*
* dgap_tty_write()
*
* Take data from the user or kernel and send it out to the FEP.
@@ -2629,7 +3585,6 @@ static int dgap_tty_write(struct tty_struct *tty, const unsigned char *buf,
char __iomem *vaddr;
u16 head, tail, tmask, remain;
int bufcount, n;
- int orig_count;
ulong lock_flags;
if (!tty)
@@ -2650,13 +3605,6 @@ static int dgap_tty_write(struct tty_struct *tty, const unsigned char *buf,
if (!count)
return 0;
- /*
- * Store original amount of characters passed in.
- * This helps to figure out if we should ask the FEP
- * to send us an event when it has more space available.
- */
- orig_count = count;
-
spin_lock_irqsave(&ch->ch_lock, lock_flags);
/* Get our space available for the channel from the board */
@@ -2784,6 +3732,22 @@ static int dgap_tty_write(struct tty_struct *tty, const unsigned char *buf,
}
/*
+ * dgap_tty_put_char()
+ *
+ * Put a character into ch->ch_buf
+ *
+ * - used by the line discipline for OPOST processing
+ */
+static int dgap_tty_put_char(struct tty_struct *tty, unsigned char c)
+{
+ /*
+ * Simply call tty_write.
+ */
+ dgap_tty_write(tty, &c, 1);
+ return 1;
+}
+
+/*
* Return modem signals to ld.
*/
static int dgap_tty_tiocmget(struct tty_struct *tty)
@@ -3444,13 +4408,189 @@ static void dgap_tty_unthrottle(struct tty_struct *tty)
spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
}
-static void dgap_tty_start(struct tty_struct *tty)
+static struct board_t *find_board_by_major(unsigned int major)
{
- struct board_t *bd;
+ unsigned int i;
+
+ for (i = 0; i < MAXBOARDS; i++) {
+ struct board_t *brd = dgap_board[i];
+
+ if (!brd)
+ return NULL;
+ if (major == brd->serial_driver->major ||
+ major == brd->print_driver->major)
+ return brd;
+ }
+
+ return NULL;
+}
+
+/************************************************************************
+ *
+ * TTY Entry points and helper functions
+ *
+ ************************************************************************/
+
+/*
+ * dgap_tty_open()
+ *
+ */
+static int dgap_tty_open(struct tty_struct *tty, struct file *file)
+{
+ struct board_t *brd;
struct channel_t *ch;
struct un_t *un;
+ struct bs_t __iomem *bs;
+ uint major;
+ uint minor;
+ int rc;
ulong lock_flags;
ulong lock_flags2;
+ u16 head;
+
+ major = MAJOR(tty_devnum(tty));
+ minor = MINOR(tty_devnum(tty));
+
+ brd = find_board_by_major(major);
+ if (!brd)
+ return -EIO;
+
+ /*
+ * If board is not yet up to a state of READY, go to
+ * sleep waiting for it to happen or they cancel the open.
+ */
+ rc = wait_event_interruptible(brd->state_wait,
+ (brd->state & BOARD_READY));
+
+ if (rc)
+ return rc;
+
+ spin_lock_irqsave(&brd->bd_lock, lock_flags);
+
+ /* The wait above should guarantee this cannot happen */
+ if (brd->state != BOARD_READY) {
+ spin_unlock_irqrestore(&brd->bd_lock, lock_flags);
+ return -EIO;
+ }
+
+ /* If opened device is greater than our number of ports, bail. */
+ if (MINOR(tty_devnum(tty)) > brd->nasync) {
+ spin_unlock_irqrestore(&brd->bd_lock, lock_flags);
+ return -EIO;
+ }
+
+ ch = brd->channels[minor];
+ if (!ch) {
+ spin_unlock_irqrestore(&brd->bd_lock, lock_flags);
+ return -EIO;
+ }
+
+ /* Grab channel lock */
+ spin_lock_irqsave(&ch->ch_lock, lock_flags2);
+
+ /* Figure out our type */
+ if (major == brd->serial_driver->major) {
+ un = &brd->channels[minor]->ch_tun;
+ un->un_type = DGAP_SERIAL;
+ } else if (major == brd->print_driver->major) {
+ un = &brd->channels[minor]->ch_pun;
+ un->un_type = DGAP_PRINT;
+ } else {
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&brd->bd_lock, lock_flags);
+ return -EIO;
+ }
+
+ /* Store our unit into driver_data, so we always have it available. */
+ tty->driver_data = un;
+
+ /*
+ * Error if channel info pointer is NULL.
+ */
+ bs = ch->ch_bs;
+ if (!bs) {
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&brd->bd_lock, lock_flags);
+ return -EIO;
+ }
+
+ /*
+ * Initialize tty's
+ */
+ if (!(un->un_flags & UN_ISOPEN)) {
+ /* Store important variables. */
+ un->un_tty = tty;
+
+ /* Maybe do something here to the TTY struct as well? */
+ }
+
+ /*
+ * Initialize if neither terminal or printer is open.
+ */
+ if (!((ch->ch_tun.un_flags | ch->ch_pun.un_flags) & UN_ISOPEN)) {
+
+ ch->ch_mforce = 0;
+ ch->ch_mval = 0;
+
+ /*
+ * Flush input queue.
+ */
+ head = readw(&(bs->rx_head));
+ writew(head, &(bs->rx_tail));
+
+ ch->ch_flags = 0;
+ ch->pscan_state = 0;
+ ch->pscan_savechar = 0;
+
+ ch->ch_c_cflag = tty->termios.c_cflag;
+ ch->ch_c_iflag = tty->termios.c_iflag;
+ ch->ch_c_oflag = tty->termios.c_oflag;
+ ch->ch_c_lflag = tty->termios.c_lflag;
+ ch->ch_startc = tty->termios.c_cc[VSTART];
+ ch->ch_stopc = tty->termios.c_cc[VSTOP];
+
+ /* TODO: flush our TTY struct here? */
+ }
+
+ dgap_carrier(ch);
+ /*
+ * Run param in case we changed anything
+ */
+ dgap_param(ch, brd, un->un_type);
+
+ /*
+ * follow protocol for opening port
+ */
+
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&brd->bd_lock, lock_flags);
+
+ rc = dgap_block_til_ready(tty, file, ch);
+
+ if (!un->un_tty)
+ return -ENODEV;
+
+ /* No going back now, increment our unit and channel counters */
+ spin_lock_irqsave(&ch->ch_lock, lock_flags);
+ ch->ch_open_count++;
+ un->un_open_count++;
+ un->un_flags |= (UN_ISOPEN);
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
+
+ return rc;
+}
+
+/*
+ * dgap_tty_close()
+ *
+ */
+static void dgap_tty_close(struct tty_struct *tty, struct file *file)
+{
+ struct ktermios *ts;
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ ulong lock_flags;
if (!tty || tty->magic != TTY_MAGIC)
return;
@@ -3467,16 +4607,110 @@ static void dgap_tty_start(struct tty_struct *tty)
if (!bd || bd->magic != DGAP_BOARD_MAGIC)
return;
- spin_lock_irqsave(&bd->bd_lock, lock_flags);
- spin_lock_irqsave(&ch->ch_lock, lock_flags2);
+ ts = &tty->termios;
- dgap_cmdw(ch, RESUMETX, 0, 0);
+ spin_lock_irqsave(&ch->ch_lock, lock_flags);
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ /*
+ * Determine if this is the last close or not - and if we agree about
+ * which type of close it is with the Line Discipline
+ */
+ if ((tty->count == 1) && (un->un_open_count != 1)) {
+ /*
+ * Uh, oh. tty->count is 1, which means that the tty
+ * structure will be freed. un_open_count should always
+ * be one in these conditions. If it's greater than
+ * one, we've got real problems, since it means the
+ * serial port won't be shutdown.
+ */
+ un->un_open_count = 1;
+ }
+
+ if (--un->un_open_count < 0)
+ un->un_open_count = 0;
+
+ ch->ch_open_count--;
+
+ if (ch->ch_open_count && un->un_open_count) {
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
+ return;
+ }
+
+ /* OK, its the last close on the unit */
+
+ un->un_flags |= UN_CLOSING;
+
+ tty->closing = 1;
+
+ /*
+ * Only officially close channel if count is 0 and
+ * DIGI_PRINTER bit is not set.
+ */
+ if ((ch->ch_open_count == 0) &&
+ !(ch->ch_digi.digi_flags & DIGI_PRINTER)) {
+
+ ch->ch_flags &= ~(CH_RXBLOCK);
+
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
+
+ /* wait for output to drain */
+ /* This will also return if we take an interrupt */
+
+ dgap_wait_for_drain(tty);
+
+ dgap_tty_flush_buffer(tty);
+ tty_ldisc_flush(tty);
+
+ spin_lock_irqsave(&ch->ch_lock, lock_flags);
+
+ tty->closing = 0;
+
+ /*
+ * If we have HUPCL set, lower DTR and RTS
+ */
+ if (ch->ch_c_cflag & HUPCL) {
+ ch->ch_mostat &= ~(D_RTS(ch)|D_DTR(ch));
+ dgap_cmdb(ch, SMODEM, 0, D_DTR(ch)|D_RTS(ch), 0);
+
+ /*
+ * Go to sleep to ensure RTS/DTR
+ * have been dropped for modems to see it.
+ */
+ spin_unlock_irqrestore(&ch->ch_lock,
+ lock_flags);
+
+ /* .25 second delay for dropping RTS/DTR */
+ schedule_timeout_interruptible(msecs_to_jiffies(250));
+
+ spin_lock_irqsave(&ch->ch_lock, lock_flags);
+ }
+
+ ch->pscan_state = 0;
+ ch->pscan_savechar = 0;
+ ch->ch_baud_info = 0;
+
+ }
+
+ /*
+ * turn off print device when closing print device.
+ */
+ if ((un->un_type == DGAP_PRINT) && (ch->ch_flags & CH_PRON)) {
+ dgap_wmove(ch, ch->ch_digi.digi_offstr,
+ (int) ch->ch_digi.digi_offlen);
+ ch->ch_flags &= ~CH_PRON;
+ }
+
+ un->un_tty = NULL;
+ un->un_flags &= ~(UN_ISOPEN | UN_CLOSING);
+ tty->driver_data = NULL;
+
+ wake_up_interruptible(&ch->ch_flags_wait);
+ wake_up_interruptible(&un->un_flags_wait);
+
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
}
-static void dgap_tty_stop(struct tty_struct *tty)
+static void dgap_tty_start(struct tty_struct *tty)
{
struct board_t *bd;
struct channel_t *ch;
@@ -3502,26 +4736,13 @@ static void dgap_tty_stop(struct tty_struct *tty)
spin_lock_irqsave(&bd->bd_lock, lock_flags);
spin_lock_irqsave(&ch->ch_lock, lock_flags2);
- dgap_cmdw(ch, PAUSETX, 0, 0);
+ dgap_cmdw(ch, RESUMETX, 0, 0);
spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
}
-/*
- * dgap_tty_flush_chars()
- *
- * Flush the cook buffer
- *
- * Note to self, and any other poor souls who venture here:
- *
- * flush in this case DOES NOT mean dispose of the data.
- * instead, it means "stop buffering and send it if you
- * haven't already." Just guess how I figured that out... SRW 2-Jun-98
- *
- * It is also always called in interrupt context - JAR 8-Sept-99
- */
-static void dgap_tty_flush_chars(struct tty_struct *tty)
+static void dgap_tty_stop(struct tty_struct *tty)
{
struct board_t *bd;
struct channel_t *ch;
@@ -3547,25 +4768,32 @@ static void dgap_tty_flush_chars(struct tty_struct *tty)
spin_lock_irqsave(&bd->bd_lock, lock_flags);
spin_lock_irqsave(&ch->ch_lock, lock_flags2);
- /* TODO: Do something here */
+ dgap_cmdw(ch, PAUSETX, 0, 0);
spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
}
/*
- * dgap_tty_flush_buffer()
+ * dgap_tty_flush_chars()
*
- * Flush Tx buffer (make in == out)
+ * Flush the cook buffer
+ *
+ * Note to self, and any other poor souls who venture here:
+ *
+ * flush in this case DOES NOT mean dispose of the data.
+ * instead, it means "stop buffering and send it if you
+ * haven't already." Just guess how I figured that out... SRW 2-Jun-98
+ *
+ * It is also always called in interrupt context - JAR 8-Sept-99
*/
-static void dgap_tty_flush_buffer(struct tty_struct *tty)
+static void dgap_tty_flush_chars(struct tty_struct *tty)
{
struct board_t *bd;
struct channel_t *ch;
struct un_t *un;
ulong lock_flags;
ulong lock_flags2;
- u16 head;
if (!tty || tty->magic != TTY_MAGIC)
return;
@@ -3585,24 +4813,10 @@ static void dgap_tty_flush_buffer(struct tty_struct *tty)
spin_lock_irqsave(&bd->bd_lock, lock_flags);
spin_lock_irqsave(&ch->ch_lock, lock_flags2);
- ch->ch_flags &= ~CH_STOP;
- head = readw(&(ch->ch_bs->tx_head));
- dgap_cmdw(ch, FLUSHTX, (u16) head, 0);
- dgap_cmdw(ch, RESUMETX, 0, 0);
- if (ch->ch_tun.un_flags & (UN_LOW|UN_EMPTY)) {
- ch->ch_tun.un_flags &= ~(UN_LOW|UN_EMPTY);
- wake_up_interruptible(&ch->ch_tun.un_flags_wait);
- }
- if (ch->ch_pun.un_flags & (UN_LOW|UN_EMPTY)) {
- ch->ch_pun.un_flags &= ~(UN_LOW|UN_EMPTY);
- wake_up_interruptible(&ch->ch_pun.un_flags_wait);
- }
+ /* TODO: Do something here */
spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- if (waitqueue_active(&tty->write_wait))
- wake_up_interruptible(&tty->write_wait);
- tty_wakeup(tty);
}
/*****************************************************************************
@@ -3996,1614 +5210,151 @@ static int dgap_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
}
}
-static int dgap_alloc_flipbuf(struct board_t *brd)
-{
- /*
- * allocate flip buffer for board.
- */
- brd->flipbuf = kmalloc(MYFLIPLEN, GFP_KERNEL);
- if (!brd->flipbuf)
- return -ENOMEM;
-
- brd->flipflagbuf = kmalloc(MYFLIPLEN, GFP_KERNEL);
- if (!brd->flipflagbuf) {
- kfree(brd->flipbuf);
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static void dgap_free_flipbuf(struct board_t *brd)
-{
- kfree(brd->flipbuf);
- kfree(brd->flipflagbuf);
-}
-
-/*
- * Create pr and tty device entries
- */
-static int dgap_tty_register_ports(struct board_t *brd)
-{
- struct channel_t *ch;
- int i;
- int ret;
-
- brd->serial_ports = kcalloc(brd->nasync, sizeof(*brd->serial_ports),
- GFP_KERNEL);
- if (!brd->serial_ports)
- return -ENOMEM;
-
- brd->printer_ports = kcalloc(brd->nasync, sizeof(*brd->printer_ports),
- GFP_KERNEL);
- if (!brd->printer_ports) {
- ret = -ENOMEM;
- goto free_serial_ports;
- }
-
- for (i = 0; i < brd->nasync; i++) {
- tty_port_init(&brd->serial_ports[i]);
- tty_port_init(&brd->printer_ports[i]);
- }
-
- ch = brd->channels[0];
- for (i = 0; i < brd->nasync; i++, ch = brd->channels[i]) {
-
- struct device *classp;
-
- classp = tty_port_register_device(&brd->serial_ports[i],
- brd->serial_driver,
- i, NULL);
-
- if (IS_ERR(classp)) {
- ret = PTR_ERR(classp);
- goto unregister_ttys;
- }
-
- dgap_create_tty_sysfs(&ch->ch_tun, classp);
- ch->ch_tun.un_sysfs = classp;
-
- classp = tty_port_register_device(&brd->printer_ports[i],
- brd->print_driver,
- i, NULL);
-
- if (IS_ERR(classp)) {
- ret = PTR_ERR(classp);
- goto unregister_ttys;
- }
-
- dgap_create_tty_sysfs(&ch->ch_pun, classp);
- ch->ch_pun.un_sysfs = classp;
- }
- dgap_create_ports_sysfiles(brd);
-
- return 0;
-
-unregister_ttys:
- while (i >= 0) {
- ch = brd->channels[i];
- if (ch->ch_tun.un_sysfs) {
- dgap_remove_tty_sysfs(ch->ch_tun.un_sysfs);
- tty_unregister_device(brd->serial_driver, i);
- }
-
- if (ch->ch_pun.un_sysfs) {
- dgap_remove_tty_sysfs(ch->ch_pun.un_sysfs);
- tty_unregister_device(brd->print_driver, i);
- }
- i--;
- }
-
- for (i = 0; i < brd->nasync; i++) {
- tty_port_destroy(&brd->serial_ports[i]);
- tty_port_destroy(&brd->printer_ports[i]);
- }
-
- kfree(brd->printer_ports);
- brd->printer_ports = NULL;
-
-free_serial_ports:
- kfree(brd->serial_ports);
- brd->serial_ports = NULL;
-
- return ret;
-}
-
-/*
- * Copies the BIOS code from the user to the board,
- * and starts the BIOS running.
- */
-static void dgap_do_bios_load(struct board_t *brd, const u8 *ubios, int len)
-{
- u8 __iomem *addr;
- uint offset;
- int i;
-
- if (!brd || (brd->magic != DGAP_BOARD_MAGIC) || !brd->re_map_membase)
- return;
-
- addr = brd->re_map_membase;
-
- /*
- * clear POST area
- */
- for (i = 0; i < 16; i++)
- writeb(0, addr + POSTAREA + i);
-
- /*
- * Download bios
- */
- offset = 0x1000;
- memcpy_toio(addr + offset, ubios, len);
-
- writel(0x0bf00401, addr);
- writel(0, (addr + 4));
-
- /* Clear the reset, and change states. */
- writeb(FEPCLR, brd->re_map_port);
-}
-
-/*
- * Checks to see if the BIOS completed running on the card.
- */
-static int dgap_test_bios(struct board_t *brd)
-{
- u8 __iomem *addr;
- u16 word;
- u16 err1;
- u16 err2;
-
- if (!brd || (brd->magic != DGAP_BOARD_MAGIC) || !brd->re_map_membase)
- return -EINVAL;
-
- addr = brd->re_map_membase;
- word = readw(addr + POSTAREA);
-
- /*
- * It can take 5-6 seconds for a board to
- * pass the bios self test and post results.
- * Give it 10 seconds.
- */
- brd->wait_for_bios = 0;
- while (brd->wait_for_bios < 1000) {
- /* Check to see if BIOS thinks board is good. (GD). */
- if (word == *(u16 *) "GD")
- return 0;
- msleep_interruptible(10);
- brd->wait_for_bios++;
- word = readw(addr + POSTAREA);
- }
-
- /* Gave up on board after too long of time taken */
- err1 = readw(addr + SEQUENCE);
- err2 = readw(addr + ERROR);
- dev_warn(&brd->pdev->dev, "%s failed diagnostics. Error #(%x,%x).\n",
- brd->name, err1, err2);
- brd->state = BOARD_FAILED;
- brd->dpastatus = BD_NOBIOS;
-
- return -EIO;
-}
-
-/*
- * Copies the FEP code from the user to the board,
- * and starts the FEP running.
- */
-static void dgap_do_fep_load(struct board_t *brd, const u8 *ufep, int len)
-{
- u8 __iomem *addr;
- uint offset;
-
- if (!brd || (brd->magic != DGAP_BOARD_MAGIC) || !brd->re_map_membase)
- return;
-
- addr = brd->re_map_membase;
-
- /*
- * Download FEP
- */
- offset = 0x1000;
- memcpy_toio(addr + offset, ufep, len);
-
- /*
- * If board is a concentrator product, we need to give
- * it its config string describing how the concentrators look.
- */
- if ((brd->type == PCX) || (brd->type == PEPC)) {
- u8 string[100];
- u8 __iomem *config;
- u8 *xconfig;
- int i = 0;
-
- xconfig = dgap_create_config_string(brd, string);
-
- /* Write string to board memory */
- config = addr + CONFIG;
- for (; i < CONFIGSIZE; i++, config++, xconfig++) {
- writeb(*xconfig, config);
- if ((*xconfig & 0xff) == 0xff)
- break;
- }
- }
-
- writel(0xbfc01004, (addr + 0xc34));
- writel(0x3, (addr + 0xc30));
-
-}
-
-/*
- * Waits for the FEP to report thats its ready for us to use.
- */
-static int dgap_test_fep(struct board_t *brd)
-{
- u8 __iomem *addr;
- u16 word;
- u16 err1;
- u16 err2;
-
- if (!brd || (brd->magic != DGAP_BOARD_MAGIC) || !brd->re_map_membase)
- return -EINVAL;
-
- addr = brd->re_map_membase;
- word = readw(addr + FEPSTAT);
-
- /*
- * It can take 2-3 seconds for the FEP to
- * be up and running. Give it 5 secs.
- */
- brd->wait_for_fep = 0;
- while (brd->wait_for_fep < 500) {
- /* Check to see if FEP is up and running now. */
- if (word == *(u16 *) "OS") {
- /*
- * Check to see if the board can support FEP5+ commands.
- */
- word = readw(addr + FEP5_PLUS);
- if (word == *(u16 *) "5A")
- brd->bd_flags |= BD_FEP5PLUS;
-
- return 0;
- }
- msleep_interruptible(10);
- brd->wait_for_fep++;
- word = readw(addr + FEPSTAT);
- }
-
- /* Gave up on board after too long of time taken */
- err1 = readw(addr + SEQUENCE);
- err2 = readw(addr + ERROR);
- dev_warn(&brd->pdev->dev,
- "FEPOS for %s not functioning. Error #(%x,%x).\n",
- brd->name, err1, err2);
- brd->state = BOARD_FAILED;
- brd->dpastatus = BD_NOFEP;
-
- return -EIO;
-}
-
-/*
- * Physically forces the FEP5 card to reset itself.
- */
-static void dgap_do_reset_board(struct board_t *brd)
-{
- u8 check;
- u32 check1;
- u32 check2;
- int i;
-
- if (!brd || (brd->magic != DGAP_BOARD_MAGIC) ||
- !brd->re_map_membase || !brd->re_map_port)
- return;
-
- /* FEPRST does not vary among supported boards */
- writeb(FEPRST, brd->re_map_port);
-
- for (i = 0; i <= 1000; i++) {
- check = readb(brd->re_map_port) & 0xe;
- if (check == FEPRST)
- break;
- udelay(10);
-
- }
- if (i > 1000) {
- dev_warn(&brd->pdev->dev,
- "dgap: Board not resetting... Failing board.\n");
- brd->state = BOARD_FAILED;
- brd->dpastatus = BD_NOFEP;
- return;
- }
-
- /*
- * Make sure there really is memory out there.
- */
- writel(0xa55a3cc3, (brd->re_map_membase + LOWMEM));
- writel(0x5aa5c33c, (brd->re_map_membase + HIGHMEM));
- check1 = readl(brd->re_map_membase + LOWMEM);
- check2 = readl(brd->re_map_membase + HIGHMEM);
-
- if ((check1 != 0xa55a3cc3) || (check2 != 0x5aa5c33c)) {
- dev_warn(&brd->pdev->dev,
- "No memory at %p for board.\n",
- brd->re_map_membase);
- brd->state = BOARD_FAILED;
- brd->dpastatus = BD_NOFEP;
- return;
- }
-}
-
-#ifdef DIGI_CONCENTRATORS_SUPPORTED
-/*
- * Sends a concentrator image into the FEP5 board.
- */
-static void dgap_do_conc_load(struct board_t *brd, u8 *uaddr, int len)
-{
- char __iomem *vaddr;
- u16 offset;
- struct downld_t *to_dp;
-
- if (!brd || (brd->magic != DGAP_BOARD_MAGIC) || !brd->re_map_membase)
- return;
-
- vaddr = brd->re_map_membase;
-
- offset = readw((u16 *) (vaddr + DOWNREQ));
- to_dp = (struct downld_t *) (vaddr + (int) offset);
- memcpy_toio(to_dp, uaddr, len);
-
- /* Tell card we have data for it */
- writew(0, vaddr + (DOWNREQ));
-
- brd->conc_dl_status = NO_PENDING_CONCENTRATOR_REQUESTS;
-}
-#endif
-
-#define EXPANSION_ROM_SIZE (64 * 1024)
-#define FEP5_ROM_MAGIC (0xFEFFFFFF)
-
-static void dgap_get_vpd(struct board_t *brd)
-{
- u32 magic;
- u32 base_offset;
- u16 rom_offset;
- u16 vpd_offset;
- u16 image_length;
- u16 i;
- u8 byte1;
- u8 byte2;
-
- /*
- * Poke the magic number at the PCI Rom Address location.
- * If VPD is supported, the value read from that address
- * will be non-zero.
- */
- magic = FEP5_ROM_MAGIC;
- pci_write_config_dword(brd->pdev, PCI_ROM_ADDRESS, magic);
- pci_read_config_dword(brd->pdev, PCI_ROM_ADDRESS, &magic);
-
- /* VPD not supported, bail */
- if (!magic)
- return;
-
- /*
- * To get to the OTPROM memory, we have to send the boards base
- * address or'ed with 1 into the PCI Rom Address location.
- */
- magic = brd->membase | 0x01;
- pci_write_config_dword(brd->pdev, PCI_ROM_ADDRESS, magic);
- pci_read_config_dword(brd->pdev, PCI_ROM_ADDRESS, &magic);
-
- byte1 = readb(brd->re_map_membase);
- byte2 = readb(brd->re_map_membase + 1);
-
- /*
- * If the board correctly swapped to the OTPROM memory,
- * the first 2 bytes (header) should be 0x55, 0xAA
- */
- if (byte1 == 0x55 && byte2 == 0xAA) {
-
- base_offset = 0;
-
- /*
- * We have to run through all the OTPROM memory looking
- * for the VPD offset.
- */
- while (base_offset <= EXPANSION_ROM_SIZE) {
-
- /*
- * Lots of magic numbers here.
- *
- * The VPD offset is located inside the ROM Data
- * Structure.
- *
- * We also have to remember the length of each
- * ROM Data Structure, so we can "hop" to the next
- * entry if the VPD isn't in the current
- * ROM Data Structure.
- */
- rom_offset = readw(brd->re_map_membase +
- base_offset + 0x18);
- image_length = readw(brd->re_map_membase +
- rom_offset + 0x10) * 512;
- vpd_offset = readw(brd->re_map_membase +
- rom_offset + 0x08);
-
- /* Found the VPD entry */
- if (vpd_offset)
- break;
-
- /* We didn't find a VPD entry, go to next ROM entry. */
- base_offset += image_length;
-
- byte1 = readb(brd->re_map_membase + base_offset);
- byte2 = readb(brd->re_map_membase + base_offset + 1);
-
- /*
- * If the new ROM offset doesn't have 0x55, 0xAA
- * as its header, we have run out of ROM.
- */
- if (byte1 != 0x55 || byte2 != 0xAA)
- break;
- }
-
- /*
- * If we have a VPD offset, then mark the board
- * as having a valid VPD, and copy VPDSIZE (512) bytes of
- * that VPD to the buffer we have in our board structure.
- */
- if (vpd_offset) {
- brd->bd_flags |= BD_HAS_VPD;
- for (i = 0; i < VPDSIZE; i++) {
- brd->vpd[i] = readb(brd->re_map_membase +
- vpd_offset + i);
- }
- }
- }
-
- /*
- * We MUST poke the magic number at the PCI Rom Address location again.
- * This makes the card report the regular board memory back to us,
- * rather than the OTPROM memory.
- */
- magic = FEP5_ROM_MAGIC;
- pci_write_config_dword(brd->pdev, PCI_ROM_ADDRESS, magic);
-}
-
-/*
- * Our board poller function.
- */
-static void dgap_poll_tasklet(unsigned long data)
-{
- struct board_t *bd = (struct board_t *) data;
- ulong lock_flags;
- char __iomem *vaddr;
- u16 head, tail;
-
- if (!bd || (bd->magic != DGAP_BOARD_MAGIC))
- return;
-
- if (bd->inhibit_poller)
- return;
-
- spin_lock_irqsave(&bd->bd_lock, lock_flags);
-
- vaddr = bd->re_map_membase;
-
- /*
- * If board is ready, parse deeper to see if there is anything to do.
- */
- if (bd->state == BOARD_READY) {
-
- struct ev_t __iomem *eaddr;
-
- if (!bd->re_map_membase) {
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- return;
- }
- if (!bd->re_map_port) {
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- return;
- }
-
- if (!bd->nasync)
- goto out;
-
- eaddr = (struct ev_t __iomem *) (vaddr + EVBUF);
-
- /* Get our head and tail */
- head = readw(&(eaddr->ev_head));
- tail = readw(&(eaddr->ev_tail));
-
- /*
- * If there is an event pending. Go service it.
- */
- if (head != tail) {
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- dgap_event(bd);
- spin_lock_irqsave(&bd->bd_lock, lock_flags);
- }
-
-out:
- /*
- * If board is doing interrupts, ACK the interrupt.
- */
- if (bd && bd->intr_running)
- readb(bd->re_map_port + 2);
-
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- return;
- }
-
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-}
-
-/*=======================================================================
- *
- * dgap_cmdb - Sends a 2 byte command to the FEP.
- *
- * ch - Pointer to channel structure.
- * cmd - Command to be sent.
- * byte1 - Integer containing first byte to be sent.
- * byte2 - Integer containing second byte to be sent.
- * ncmds - Wait until ncmds or fewer cmds are left
- * in the cmd buffer before returning.
- *
- *=======================================================================*/
-static void dgap_cmdb(struct channel_t *ch, u8 cmd, u8 byte1,
- u8 byte2, uint ncmds)
-{
- char __iomem *vaddr;
- struct __iomem cm_t *cm_addr;
- uint count;
- uint n;
- u16 head;
- u16 tail;
-
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return;
-
- /*
- * Check if board is still alive.
- */
- if (ch->ch_bd->state == BOARD_FAILED)
- return;
-
- /*
- * Make sure the pointers are in range before
- * writing to the FEP memory.
- */
- vaddr = ch->ch_bd->re_map_membase;
-
- if (!vaddr)
- return;
-
- cm_addr = (struct cm_t __iomem *) (vaddr + CMDBUF);
- head = readw(&(cm_addr->cm_head));
-
- /*
- * Forget it if pointers out of range.
- */
- if (head >= (CMDMAX - CMDSTART) || (head & 03)) {
- ch->ch_bd->state = BOARD_FAILED;
- return;
- }
-
- /*
- * Put the data in the circular command buffer.
- */
- writeb(cmd, (vaddr + head + CMDSTART + 0));
- writeb((u8) ch->ch_portnum, (vaddr + head + CMDSTART + 1));
- writeb(byte1, (vaddr + head + CMDSTART + 2));
- writeb(byte2, (vaddr + head + CMDSTART + 3));
-
- head = (head + 4) & (CMDMAX - CMDSTART - 4);
-
- writew(head, &(cm_addr->cm_head));
-
- /*
- * Wait if necessary before updating the head
- * pointer to limit the number of outstanding
- * commands to the FEP. If the time spent waiting
- * is outlandish, declare the FEP dead.
- */
- for (count = dgap_count ;;) {
-
- head = readw(&(cm_addr->cm_head));
- tail = readw(&(cm_addr->cm_tail));
-
- n = (head - tail) & (CMDMAX - CMDSTART - 4);
-
- if (n <= ncmds * sizeof(struct cm_t))
- break;
-
- if (--count == 0) {
- ch->ch_bd->state = BOARD_FAILED;
- return;
- }
- udelay(10);
- }
-}
-
-/*=======================================================================
- *
- * dgap_cmdw - Sends a 1 word command to the FEP.
- *
- * ch - Pointer to channel structure.
- * cmd - Command to be sent.
- * word - Integer containing word to be sent.
- * ncmds - Wait until ncmds or fewer cmds are left
- * in the cmd buffer before returning.
- *
- *=======================================================================*/
-static void dgap_cmdw(struct channel_t *ch, u8 cmd, u16 word, uint ncmds)
-{
- char __iomem *vaddr;
- struct __iomem cm_t *cm_addr;
- uint count;
- uint n;
- u16 head;
- u16 tail;
-
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return;
-
- /*
- * Check if board is still alive.
- */
- if (ch->ch_bd->state == BOARD_FAILED)
- return;
-
- /*
- * Make sure the pointers are in range before
- * writing to the FEP memory.
- */
- vaddr = ch->ch_bd->re_map_membase;
- if (!vaddr)
- return;
-
- cm_addr = (struct cm_t __iomem *) (vaddr + CMDBUF);
- head = readw(&(cm_addr->cm_head));
-
- /*
- * Forget it if pointers out of range.
- */
- if (head >= (CMDMAX - CMDSTART) || (head & 03)) {
- ch->ch_bd->state = BOARD_FAILED;
- return;
- }
-
- /*
- * Put the data in the circular command buffer.
- */
- writeb(cmd, (vaddr + head + CMDSTART + 0));
- writeb((u8) ch->ch_portnum, (vaddr + head + CMDSTART + 1));
- writew((u16) word, (vaddr + head + CMDSTART + 2));
-
- head = (head + 4) & (CMDMAX - CMDSTART - 4);
-
- writew(head, &(cm_addr->cm_head));
-
- /*
- * Wait if necessary before updating the head
- * pointer to limit the number of outstanding
- * commands to the FEP. If the time spent waiting
- * is outlandish, declare the FEP dead.
- */
- for (count = dgap_count ;;) {
-
- head = readw(&(cm_addr->cm_head));
- tail = readw(&(cm_addr->cm_tail));
-
- n = (head - tail) & (CMDMAX - CMDSTART - 4);
-
- if (n <= ncmds * sizeof(struct cm_t))
- break;
-
- if (--count == 0) {
- ch->ch_bd->state = BOARD_FAILED;
- return;
- }
- udelay(10);
- }
-}
-
-/*=======================================================================
- *
- * dgap_cmdw_ext - Sends a extended word command to the FEP.
- *
- * ch - Pointer to channel structure.
- * cmd - Command to be sent.
- * word - Integer containing word to be sent.
- * ncmds - Wait until ncmds or fewer cmds are left
- * in the cmd buffer before returning.
- *
- *=======================================================================*/
-static void dgap_cmdw_ext(struct channel_t *ch, u16 cmd, u16 word, uint ncmds)
-{
- char __iomem *vaddr;
- struct __iomem cm_t *cm_addr;
- uint count;
- uint n;
- u16 head;
- u16 tail;
-
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return;
-
- /*
- * Check if board is still alive.
- */
- if (ch->ch_bd->state == BOARD_FAILED)
- return;
-
- /*
- * Make sure the pointers are in range before
- * writing to the FEP memory.
- */
- vaddr = ch->ch_bd->re_map_membase;
- if (!vaddr)
- return;
-
- cm_addr = (struct cm_t __iomem *) (vaddr + CMDBUF);
- head = readw(&(cm_addr->cm_head));
-
- /*
- * Forget it if pointers out of range.
- */
- if (head >= (CMDMAX - CMDSTART) || (head & 03)) {
- ch->ch_bd->state = BOARD_FAILED;
- return;
- }
-
- /*
- * Put the data in the circular command buffer.
- */
-
- /* Write an FF to tell the FEP that we want an extended command */
- writeb((u8) 0xff, (vaddr + head + CMDSTART + 0));
-
- writeb((u8) ch->ch_portnum, (vaddr + head + CMDSTART + 1));
- writew((u16) cmd, (vaddr + head + CMDSTART + 2));
-
- /*
- * If the second part of the command won't fit,
- * put it at the beginning of the circular buffer.
- */
- if (((head + 4) >= ((CMDMAX - CMDSTART)) || (head & 03)))
- writew((u16) word, (vaddr + CMDSTART));
- else
- writew((u16) word, (vaddr + head + CMDSTART + 4));
-
- head = (head + 8) & (CMDMAX - CMDSTART - 4);
-
- writew(head, &(cm_addr->cm_head));
-
- /*
- * Wait if necessary before updating the head
- * pointer to limit the number of outstanding
- * commands to the FEP. If the time spent waiting
- * is outlandish, declare the FEP dead.
- */
- for (count = dgap_count ;;) {
-
- head = readw(&(cm_addr->cm_head));
- tail = readw(&(cm_addr->cm_tail));
-
- n = (head - tail) & (CMDMAX - CMDSTART - 4);
-
- if (n <= ncmds * sizeof(struct cm_t))
- break;
-
- if (--count == 0) {
- ch->ch_bd->state = BOARD_FAILED;
- return;
- }
- udelay(10);
- }
-}
+static const struct tty_operations dgap_tty_ops = {
+ .open = dgap_tty_open,
+ .close = dgap_tty_close,
+ .write = dgap_tty_write,
+ .write_room = dgap_tty_write_room,
+ .flush_buffer = dgap_tty_flush_buffer,
+ .chars_in_buffer = dgap_tty_chars_in_buffer,
+ .flush_chars = dgap_tty_flush_chars,
+ .ioctl = dgap_tty_ioctl,
+ .set_termios = dgap_tty_set_termios,
+ .stop = dgap_tty_stop,
+ .start = dgap_tty_start,
+ .throttle = dgap_tty_throttle,
+ .unthrottle = dgap_tty_unthrottle,
+ .hangup = dgap_tty_hangup,
+ .put_char = dgap_tty_put_char,
+ .tiocmget = dgap_tty_tiocmget,
+ .tiocmset = dgap_tty_tiocmset,
+ .break_ctl = dgap_tty_send_break,
+ .wait_until_sent = dgap_tty_wait_until_sent,
+ .send_xchar = dgap_tty_send_xchar
+};
-/*=======================================================================
- *
- * dgap_wmove - Write data to FEP buffer.
+/************************************************************************
*
- * ch - Pointer to channel structure.
- * buf - Poiter to characters to be moved.
- * cnt - Number of characters to move.
+ * TTY Initialization/Cleanup Functions
*
- *=======================================================================*/
-static void dgap_wmove(struct channel_t *ch, char *buf, uint cnt)
-{
- int n;
- char __iomem *taddr;
- struct bs_t __iomem *bs;
- u16 head;
-
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return;
-
- /*
- * Check parameters.
- */
- bs = ch->ch_bs;
- head = readw(&(bs->tx_head));
-
- /*
- * If pointers are out of range, just return.
- */
- if ((cnt > ch->ch_tsize) ||
- (unsigned)(head - ch->ch_tstart) >= ch->ch_tsize)
- return;
-
- /*
- * If the write wraps over the top of the circular buffer,
- * move the portion up to the wrap point, and reset the
- * pointers to the bottom.
- */
- n = ch->ch_tstart + ch->ch_tsize - head;
-
- if (cnt >= n) {
- cnt -= n;
- taddr = ch->ch_taddr + head;
- memcpy_toio(taddr, buf, n);
- head = ch->ch_tstart;
- buf += n;
- }
-
- /*
- * Move rest of data.
- */
- taddr = ch->ch_taddr + head;
- n = cnt;
- memcpy_toio(taddr, buf, n);
- head += cnt;
-
- writew(head, &(bs->tx_head));
-}
-
-/*
- * Retrives the current custom baud rate from FEP memory,
- * and returns it back to the user.
- * Returns 0 on error.
- */
-static uint dgap_get_custom_baud(struct channel_t *ch)
-{
- u8 __iomem *vaddr;
- ulong offset;
- uint value;
-
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return 0;
-
- if (!ch->ch_bd || ch->ch_bd->magic != DGAP_BOARD_MAGIC)
- return 0;
-
- if (!(ch->ch_bd->bd_flags & BD_FEP5PLUS))
- return 0;
-
- vaddr = ch->ch_bd->re_map_membase;
-
- if (!vaddr)
- return 0;
-
- /*
- * Go get from fep mem, what the fep
- * believes the custom baud rate is.
- */
- offset = (ioread16(vaddr + ECS_SEG) << 4) + (ch->ch_portnum * 0x28)
- + LINE_SPEED;
-
- value = readw(vaddr + offset);
- return value;
-}
+ ************************************************************************/
/*
- * Calls the firmware to reset this channel.
- */
-static void dgap_firmware_reset_port(struct channel_t *ch)
-{
- dgap_cmdb(ch, CHRESET, 0, 0, 0);
-
- /*
- * Now that the channel is reset, we need to make sure
- * all the current settings get reapplied to the port
- * in the firmware.
- *
- * So we will set the driver's cache of firmware
- * settings all to 0, and then call param.
- */
- ch->ch_fepiflag = 0;
- ch->ch_fepcflag = 0;
- ch->ch_fepoflag = 0;
- ch->ch_fepstartc = 0;
- ch->ch_fepstopc = 0;
- ch->ch_fepastartc = 0;
- ch->ch_fepastopc = 0;
- ch->ch_mostat = 0;
- ch->ch_hflow = 0;
-}
-
-/*=======================================================================
- *
- * dgap_param - Set Digi parameters.
- *
- * struct tty_struct * - TTY for port.
+ * dgap_tty_register()
*
- *=======================================================================*/
-static int dgap_param(struct channel_t *ch, struct board_t *bd, u32 un_type)
+ * Init the tty subsystem for this board.
+ */
+static int dgap_tty_register(struct board_t *brd)
{
- u16 head;
- u16 cflag;
- u16 iflag;
- u8 mval;
- u8 hflow;
-
- /*
- * If baud rate is zero, flush queues, and set mval to drop DTR.
- */
- if ((ch->ch_c_cflag & (CBAUD)) == 0) {
-
- /* flush rx */
- head = readw(&(ch->ch_bs->rx_head));
- writew(head, &(ch->ch_bs->rx_tail));
-
- /* flush tx */
- head = readw(&(ch->ch_bs->tx_head));
- writew(head, &(ch->ch_bs->tx_tail));
-
- ch->ch_flags |= (CH_BAUD0);
-
- /* Drop RTS and DTR */
- ch->ch_mval &= ~(D_RTS(ch)|D_DTR(ch));
- mval = D_DTR(ch) | D_RTS(ch);
- ch->ch_baud_info = 0;
-
- } else if (ch->ch_custom_speed && (bd->bd_flags & BD_FEP5PLUS)) {
- /*
- * Tell the fep to do the command
- */
-
- dgap_cmdw_ext(ch, 0xff01, ch->ch_custom_speed, 0);
-
- /*
- * Now go get from fep mem, what the fep
- * believes the custom baud rate is.
- */
- ch->ch_custom_speed = dgap_get_custom_baud(ch);
- ch->ch_baud_info = ch->ch_custom_speed;
-
- /* Handle transition from B0 */
- if (ch->ch_flags & CH_BAUD0) {
- ch->ch_flags &= ~(CH_BAUD0);
- ch->ch_mval |= (D_RTS(ch)|D_DTR(ch));
- }
- mval = D_DTR(ch) | D_RTS(ch);
-
- } else {
- /*
- * Set baud rate, character size, and parity.
- */
-
-
- int iindex = 0;
- int jindex = 0;
- int baud = 0;
-
- ulong bauds[4][16] = {
- { /* slowbaud */
- 0, 50, 75, 110,
- 134, 150, 200, 300,
- 600, 1200, 1800, 2400,
- 4800, 9600, 19200, 38400 },
- { /* slowbaud & CBAUDEX */
- 0, 57600, 115200, 230400,
- 460800, 150, 200, 921600,
- 600, 1200, 1800, 2400,
- 4800, 9600, 19200, 38400 },
- { /* fastbaud */
- 0, 57600, 76800, 115200,
- 14400, 57600, 230400, 76800,
- 115200, 230400, 28800, 460800,
- 921600, 9600, 19200, 38400 },
- { /* fastbaud & CBAUDEX */
- 0, 57600, 115200, 230400,
- 460800, 150, 200, 921600,
- 600, 1200, 1800, 2400,
- 4800, 9600, 19200, 38400 }
- };
-
- /*
- * Only use the TXPrint baud rate if the
- * terminal unit is NOT open
- */
- if (!(ch->ch_tun.un_flags & UN_ISOPEN) &&
- un_type == DGAP_PRINT)
- baud = C_BAUD(ch->ch_pun.un_tty) & 0xff;
- else
- baud = C_BAUD(ch->ch_tun.un_tty) & 0xff;
-
- if (ch->ch_c_cflag & CBAUDEX)
- iindex = 1;
-
- if (ch->ch_digi.digi_flags & DIGI_FAST)
- iindex += 2;
-
- jindex = baud;
-
- if ((iindex >= 0) && (iindex < 4) &&
- (jindex >= 0) && (jindex < 16))
- baud = bauds[iindex][jindex];
- else
- baud = 0;
-
- if (baud == 0)
- baud = 9600;
-
- ch->ch_baud_info = baud;
-
- /*
- * CBAUD has bit position 0x1000 set these days to
- * indicate Linux baud rate remap.
- * We use a different bit assignment for high speed.
- * Clear this bit out while grabbing the parts of
- * "cflag" we want.
- */
- cflag = ch->ch_c_cflag & ((CBAUD ^ CBAUDEX) | PARODD | PARENB |
- CSTOPB | CSIZE);
-
- /*
- * HUPCL bit is used by FEP to indicate fast baud
- * table is to be used.
- */
- if ((ch->ch_digi.digi_flags & DIGI_FAST) ||
- (ch->ch_c_cflag & CBAUDEX))
- cflag |= HUPCL;
-
- if ((ch->ch_c_cflag & CBAUDEX) &&
- !(ch->ch_digi.digi_flags & DIGI_FAST)) {
- /*
- * The below code is trying to guarantee that only
- * baud rates 115200, 230400, 460800, 921600 are
- * remapped. We use exclusive or because the various
- * baud rates share common bit positions and therefore
- * can't be tested for easily.
- */
- tcflag_t tcflag = (ch->ch_c_cflag & CBAUD) | CBAUDEX;
- int baudpart = 0;
-
- /*
- * Map high speed requests to index
- * into FEP's baud table
- */
- switch (tcflag) {
- case B57600:
- baudpart = 1;
- break;
-#ifdef B76800
- case B76800:
- baudpart = 2;
- break;
-#endif
- case B115200:
- baudpart = 3;
- break;
- case B230400:
- baudpart = 9;
- break;
- case B460800:
- baudpart = 11;
- break;
-#ifdef B921600
- case B921600:
- baudpart = 12;
- break;
-#endif
- default:
- baudpart = 0;
- }
-
- if (baudpart)
- cflag = (cflag & ~(CBAUD | CBAUDEX)) | baudpart;
- }
-
- cflag &= 0xffff;
-
- if (cflag != ch->ch_fepcflag) {
- ch->ch_fepcflag = (u16) (cflag & 0xffff);
-
- /*
- * Okay to have channel and board
- * locks held calling this
- */
- dgap_cmdw(ch, SCFLAG, (u16) cflag, 0);
- }
-
- /* Handle transition from B0 */
- if (ch->ch_flags & CH_BAUD0) {
- ch->ch_flags &= ~(CH_BAUD0);
- ch->ch_mval |= (D_RTS(ch)|D_DTR(ch));
- }
- mval = D_DTR(ch) | D_RTS(ch);
- }
-
- /*
- * Get input flags.
- */
- iflag = ch->ch_c_iflag & (IGNBRK | BRKINT | IGNPAR | PARMRK |
- INPCK | ISTRIP | IXON | IXANY | IXOFF);
-
- if ((ch->ch_startc == _POSIX_VDISABLE) ||
- (ch->ch_stopc == _POSIX_VDISABLE)) {
- iflag &= ~(IXON | IXOFF);
- ch->ch_c_iflag &= ~(IXON | IXOFF);
- }
-
- /*
- * Only the IBM Xr card can switch between
- * 232 and 422 modes on the fly
- */
- if (bd->device == PCI_DEV_XR_IBM_DID) {
- if (ch->ch_digi.digi_flags & DIGI_422)
- dgap_cmdb(ch, SCOMMODE, MODE_422, 0, 0);
- else
- dgap_cmdb(ch, SCOMMODE, MODE_232, 0, 0);
- }
-
- if (ch->ch_digi.digi_flags & DIGI_ALTPIN)
- iflag |= IALTPIN;
-
- if (iflag != ch->ch_fepiflag) {
- ch->ch_fepiflag = iflag;
-
- /* Okay to have channel and board locks held calling this */
- dgap_cmdw(ch, SIFLAG, (u16) ch->ch_fepiflag, 0);
- }
-
- /*
- * Select hardware handshaking.
- */
- hflow = 0;
-
- if (ch->ch_c_cflag & CRTSCTS)
- hflow |= (D_RTS(ch) | D_CTS(ch));
- if (ch->ch_digi.digi_flags & RTSPACE)
- hflow |= D_RTS(ch);
- if (ch->ch_digi.digi_flags & DTRPACE)
- hflow |= D_DTR(ch);
- if (ch->ch_digi.digi_flags & CTSPACE)
- hflow |= D_CTS(ch);
- if (ch->ch_digi.digi_flags & DSRPACE)
- hflow |= D_DSR(ch);
- if (ch->ch_digi.digi_flags & DCDPACE)
- hflow |= D_CD(ch);
+ int rc;
- if (hflow != ch->ch_hflow) {
- ch->ch_hflow = hflow;
+ brd->serial_driver = tty_alloc_driver(MAXPORTS,
+ TTY_DRIVER_REAL_RAW |
+ TTY_DRIVER_DYNAMIC_DEV |
+ TTY_DRIVER_HARDWARE_BREAK);
+ if (IS_ERR(brd->serial_driver))
+ return PTR_ERR(brd->serial_driver);
- /* Okay to have channel and board locks held calling this */
- dgap_cmdb(ch, SHFLOW, (u8) hflow, 0xff, 0);
- }
+ snprintf(brd->serial_name, MAXTTYNAMELEN, "tty_dgap_%d_",
+ brd->boardnum);
+ brd->serial_driver->name = brd->serial_name;
+ brd->serial_driver->name_base = 0;
+ brd->serial_driver->major = 0;
+ brd->serial_driver->minor_start = 0;
+ brd->serial_driver->type = TTY_DRIVER_TYPE_SERIAL;
+ brd->serial_driver->subtype = SERIAL_TYPE_NORMAL;
+ brd->serial_driver->init_termios = dgap_default_termios;
+ brd->serial_driver->driver_name = DRVSTR;
/*
- * Set RTS and/or DTR Toggle if needed,
- * but only if product is FEP5+ based.
+ * Entry points for driver. Called by the kernel from
+ * tty_io.c and n_tty.c.
*/
- if (bd->bd_flags & BD_FEP5PLUS) {
- u16 hflow2 = 0;
-
- if (ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE)
- hflow2 |= (D_RTS(ch));
- if (ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE)
- hflow2 |= (D_DTR(ch));
-
- dgap_cmdw_ext(ch, 0xff03, hflow2, 0);
- }
+ tty_set_operations(brd->serial_driver, &dgap_tty_ops);
/*
- * Set modem control lines.
+ * If we're doing transparent print, we have to do all of the above
+ * again, separately so we don't get the LD confused about what major
+ * we are when we get into the dgap_tty_open() routine.
*/
-
- mval ^= ch->ch_mforce & (mval ^ ch->ch_mval);
-
- if (ch->ch_mostat ^ mval) {
- ch->ch_mostat = mval;
-
- /* Okay to have channel and board locks held calling this */
- dgap_cmdb(ch, SMODEM, (u8) mval, D_RTS(ch)|D_DTR(ch), 0);
+ brd->print_driver = tty_alloc_driver(MAXPORTS,
+ TTY_DRIVER_REAL_RAW |
+ TTY_DRIVER_DYNAMIC_DEV |
+ TTY_DRIVER_HARDWARE_BREAK);
+ if (IS_ERR(brd->print_driver)) {
+ rc = PTR_ERR(brd->print_driver);
+ goto free_serial_drv;
}
- /*
- * Read modem signals, and then call carrier function.
- */
- ch->ch_mistat = readb(&(ch->ch_bs->m_stat));
- dgap_carrier(ch);
+ snprintf(brd->print_name, MAXTTYNAMELEN, "pr_dgap_%d_",
+ brd->boardnum);
+ brd->print_driver->name = brd->print_name;
+ brd->print_driver->name_base = 0;
+ brd->print_driver->major = 0;
+ brd->print_driver->minor_start = 0;
+ brd->print_driver->type = TTY_DRIVER_TYPE_SERIAL;
+ brd->print_driver->subtype = SERIAL_TYPE_NORMAL;
+ brd->print_driver->init_termios = dgap_default_termios;
+ brd->print_driver->driver_name = DRVSTR;
/*
- * Set the start and stop characters.
+ * Entry points for driver. Called by the kernel from
+ * tty_io.c and n_tty.c.
*/
- if (ch->ch_startc != ch->ch_fepstartc ||
- ch->ch_stopc != ch->ch_fepstopc) {
- ch->ch_fepstartc = ch->ch_startc;
- ch->ch_fepstopc = ch->ch_stopc;
-
- /* Okay to have channel and board locks held calling this */
- dgap_cmdb(ch, SFLOWC, ch->ch_fepstartc, ch->ch_fepstopc, 0);
- }
+ tty_set_operations(brd->print_driver, &dgap_tty_ops);
- /*
- * Set the Auxiliary start and stop characters.
- */
- if (ch->ch_astartc != ch->ch_fepastartc ||
- ch->ch_astopc != ch->ch_fepastopc) {
- ch->ch_fepastartc = ch->ch_astartc;
- ch->ch_fepastopc = ch->ch_astopc;
+ /* Register tty devices */
+ rc = tty_register_driver(brd->serial_driver);
+ if (rc < 0)
+ goto free_print_drv;
- /* Okay to have channel and board locks held calling this */
- dgap_cmdb(ch, SAFLOWC, ch->ch_fepastartc, ch->ch_fepastopc, 0);
- }
+ /* Register Transparent Print devices */
+ rc = tty_register_driver(brd->print_driver);
+ if (rc < 0)
+ goto unregister_serial_drv;
return 0;
-}
-
-/*
- * dgap_parity_scan()
- *
- * Convert the FEP5 way of reporting parity errors and breaks into
- * the Linux line discipline way.
- */
-static void dgap_parity_scan(struct channel_t *ch, unsigned char *cbuf,
- unsigned char *fbuf, int *len)
-{
- int l = *len;
- int count = 0;
- unsigned char *in, *cout, *fout;
- unsigned char c;
-
- in = cbuf;
- cout = cbuf;
- fout = fbuf;
-
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return;
- while (l--) {
- c = *in++;
- switch (ch->pscan_state) {
- default:
- /* reset to sanity and fall through */
- ch->pscan_state = 0;
-
- case 0:
- /* No FF seen yet */
- if (c == (unsigned char) '\377')
- /* delete this character from stream */
- ch->pscan_state = 1;
- else {
- *cout++ = c;
- *fout++ = TTY_NORMAL;
- count += 1;
- }
- break;
-
- case 1:
- /* first FF seen */
- if (c == (unsigned char) '\377') {
- /* doubled ff, transform to single ff */
- *cout++ = c;
- *fout++ = TTY_NORMAL;
- count += 1;
- ch->pscan_state = 0;
- } else {
- /* save value examination in next state */
- ch->pscan_savechar = c;
- ch->pscan_state = 2;
- }
- break;
-
- case 2:
- /* third character of ff sequence */
-
- *cout++ = c;
-
- if (ch->pscan_savechar == 0x0) {
-
- if (c == 0x0) {
- ch->ch_err_break++;
- *fout++ = TTY_BREAK;
- } else {
- ch->ch_err_parity++;
- *fout++ = TTY_PARITY;
- }
- }
+unregister_serial_drv:
+ tty_unregister_driver(brd->serial_driver);
+free_print_drv:
+ put_tty_driver(brd->print_driver);
+free_serial_drv:
+ put_tty_driver(brd->serial_driver);
- count += 1;
- ch->pscan_state = 0;
- }
- }
- *len = count;
+ return rc;
}
-static void dgap_write_wakeup(struct board_t *bd, struct channel_t *ch,
- struct un_t *un, u32 mask,
- unsigned long *irq_flags1,
- unsigned long *irq_flags2)
+static void dgap_tty_unregister(struct board_t *brd)
{
- if (!(un->un_flags & mask))
- return;
-
- un->un_flags &= ~mask;
-
- if (!(un->un_flags & UN_ISOPEN))
- return;
-
- if ((un->un_tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) &&
- un->un_tty->ldisc->ops->write_wakeup) {
- spin_unlock_irqrestore(&ch->ch_lock, *irq_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, *irq_flags1);
-
- (un->un_tty->ldisc->ops->write_wakeup)(un->un_tty);
-
- spin_lock_irqsave(&bd->bd_lock, *irq_flags1);
- spin_lock_irqsave(&ch->ch_lock, *irq_flags2);
- }
- wake_up_interruptible(&un->un_tty->write_wait);
- wake_up_interruptible(&un->un_flags_wait);
+ tty_unregister_driver(brd->print_driver);
+ tty_unregister_driver(brd->serial_driver);
+ put_tty_driver(brd->print_driver);
+ put_tty_driver(brd->serial_driver);
}
-/*=======================================================================
- *
- * dgap_event - FEP to host event processing routine.
- *
- * bd - Board of current event.
- *
- *=======================================================================*/
-static int dgap_event(struct board_t *bd)
+static int dgap_alloc_flipbuf(struct board_t *brd)
{
- struct channel_t *ch;
- ulong lock_flags;
- ulong lock_flags2;
- struct bs_t __iomem *bs;
- u8 __iomem *event;
- u8 __iomem *vaddr;
- struct ev_t __iomem *eaddr;
- uint head;
- uint tail;
- int port;
- int reason;
- int modem;
- int b1;
-
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return -EIO;
-
- spin_lock_irqsave(&bd->bd_lock, lock_flags);
-
- vaddr = bd->re_map_membase;
-
- if (!vaddr) {
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- return -EIO;
- }
-
- eaddr = (struct ev_t __iomem *) (vaddr + EVBUF);
-
- /* Get our head and tail */
- head = readw(&(eaddr->ev_head));
- tail = readw(&(eaddr->ev_tail));
-
- /*
- * Forget it if pointers out of range.
- */
-
- if (head >= EVMAX - EVSTART || tail >= EVMAX - EVSTART ||
- (head | tail) & 03) {
- /* Let go of board lock */
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- return -EIO;
- }
-
/*
- * Loop to process all the events in the buffer.
+ * allocate flip buffer for board.
*/
- while (tail != head) {
-
- /*
- * Get interrupt information.
- */
-
- event = bd->re_map_membase + tail + EVSTART;
-
- port = ioread8(event);
- reason = ioread8(event + 1);
- modem = ioread8(event + 2);
- b1 = ioread8(event + 3);
-
- /*
- * Make sure the interrupt is valid.
- */
- if (port >= bd->nasync)
- goto next;
-
- if (!(reason & (IFMODEM | IFBREAK | IFTLW | IFTEM | IFDATA)))
- goto next;
-
- ch = bd->channels[port];
-
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- goto next;
-
- /*
- * If we have made it here, the event was valid.
- * Lock down the channel.
- */
- spin_lock_irqsave(&ch->ch_lock, lock_flags2);
-
- bs = ch->ch_bs;
-
- if (!bs) {
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- goto next;
- }
-
- /*
- * Process received data.
- */
- if (reason & IFDATA) {
-
- /*
- * ALL LOCKS *MUST* BE DROPPED BEFORE CALLING INPUT!
- * input could send some data to ld, which in turn
- * could do a callback to one of our other functions.
- */
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-
- dgap_input(ch);
-
- spin_lock_irqsave(&bd->bd_lock, lock_flags);
- spin_lock_irqsave(&ch->ch_lock, lock_flags2);
-
- if (ch->ch_flags & CH_RACTIVE)
- ch->ch_flags |= CH_RENABLE;
- else
- writeb(1, &(bs->idata));
-
- if (ch->ch_flags & CH_RWAIT) {
- ch->ch_flags &= ~CH_RWAIT;
-
- wake_up_interruptible
- (&ch->ch_tun.un_flags_wait);
- }
- }
-
- /*
- * Process Modem change signals.
- */
- if (reason & IFMODEM) {
- ch->ch_mistat = modem;
- dgap_carrier(ch);
- }
-
- /*
- * Process break.
- */
- if (reason & IFBREAK) {
-
- if (ch->ch_tun.un_tty) {
- /* A break has been indicated */
- ch->ch_err_break++;
- tty_buffer_request_room
- (ch->ch_tun.un_tty->port, 1);
- tty_insert_flip_char(ch->ch_tun.un_tty->port,
- 0, TTY_BREAK);
- tty_flip_buffer_push(ch->ch_tun.un_tty->port);
- }
- }
-
- /*
- * Process Transmit low.
- */
- if (reason & IFTLW) {
- dgap_write_wakeup(bd, ch, &ch->ch_tun, UN_LOW,
- &lock_flags, &lock_flags2);
- dgap_write_wakeup(bd, ch, &ch->ch_pun, UN_LOW,
- &lock_flags, &lock_flags2);
- if (ch->ch_flags & CH_WLOW) {
- ch->ch_flags &= ~CH_WLOW;
- wake_up_interruptible(&ch->ch_flags_wait);
- }
- }
-
- /*
- * Process Transmit empty.
- */
- if (reason & IFTEM) {
- dgap_write_wakeup(bd, ch, &ch->ch_tun, UN_EMPTY,
- &lock_flags, &lock_flags2);
- dgap_write_wakeup(bd, ch, &ch->ch_pun, UN_EMPTY,
- &lock_flags, &lock_flags2);
- if (ch->ch_flags & CH_WEMPTY) {
- ch->ch_flags &= ~CH_WEMPTY;
- wake_up_interruptible(&ch->ch_flags_wait);
- }
- }
-
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ brd->flipbuf = kmalloc(MYFLIPLEN, GFP_KERNEL);
+ if (!brd->flipbuf)
+ return -ENOMEM;
-next:
- tail = (tail + 4) & (EVMAX - EVSTART - 4);
+ brd->flipflagbuf = kmalloc(MYFLIPLEN, GFP_KERNEL);
+ if (!brd->flipflagbuf) {
+ kfree(brd->flipbuf);
+ return -ENOMEM;
}
- writew(tail, &(eaddr->ev_tail));
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-
return 0;
}
-static ssize_t dgap_driver_version_show(struct device_driver *ddp, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%s\n", DG_PART);
-}
-static DRIVER_ATTR(version, S_IRUSR, dgap_driver_version_show, NULL);
-
-
-static ssize_t dgap_driver_boards_show(struct device_driver *ddp, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%d\n", dgap_numboards);
-}
-static DRIVER_ATTR(boards, S_IRUSR, dgap_driver_boards_show, NULL);
-
-
-static ssize_t dgap_driver_maxboards_show(struct device_driver *ddp, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%d\n", MAXBOARDS);
-}
-static DRIVER_ATTR(maxboards, S_IRUSR, dgap_driver_maxboards_show, NULL);
-
-
-static ssize_t dgap_driver_pollcounter_show(struct device_driver *ddp,
- char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%ld\n", dgap_poll_counter);
-}
-static DRIVER_ATTR(pollcounter, S_IRUSR, dgap_driver_pollcounter_show, NULL);
-
-static ssize_t dgap_driver_pollrate_show(struct device_driver *ddp, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%dms\n", dgap_poll_tick);
-}
-
-static ssize_t dgap_driver_pollrate_store(struct device_driver *ddp,
- const char *buf, size_t count)
-{
- if (sscanf(buf, "%d\n", &dgap_poll_tick) != 1)
- return -EINVAL;
- return count;
-}
-static DRIVER_ATTR(pollrate, (S_IRUSR | S_IWUSR), dgap_driver_pollrate_show,
- dgap_driver_pollrate_store);
-
-static int dgap_create_driver_sysfiles(struct pci_driver *dgap_driver)
-{
- int rc = 0;
- struct device_driver *driverfs = &dgap_driver->driver;
-
- rc |= driver_create_file(driverfs, &driver_attr_version);
- rc |= driver_create_file(driverfs, &driver_attr_boards);
- rc |= driver_create_file(driverfs, &driver_attr_maxboards);
- rc |= driver_create_file(driverfs, &driver_attr_pollrate);
- rc |= driver_create_file(driverfs, &driver_attr_pollcounter);
-
- return rc;
-}
-
-static void dgap_remove_driver_sysfiles(struct pci_driver *dgap_driver)
+static void dgap_free_flipbuf(struct board_t *brd)
{
- struct device_driver *driverfs = &dgap_driver->driver;
-
- driver_remove_file(driverfs, &driver_attr_version);
- driver_remove_file(driverfs, &driver_attr_boards);
- driver_remove_file(driverfs, &driver_attr_maxboards);
- driver_remove_file(driverfs, &driver_attr_pollrate);
- driver_remove_file(driverfs, &driver_attr_pollcounter);
+ kfree(brd->flipbuf);
+ kfree(brd->flipflagbuf);
}
static struct board_t *dgap_verify_board(struct device *p)
@@ -5626,7 +5377,7 @@ static ssize_t dgap_ports_state_show(struct device *p,
{
struct board_t *bd;
int count = 0;
- int i;
+ unsigned int i;
bd = dgap_verify_board(p);
if (!bd)
@@ -5647,7 +5398,7 @@ static ssize_t dgap_ports_baud_show(struct device *p,
{
struct board_t *bd;
int count = 0;
- int i;
+ unsigned int i;
bd = dgap_verify_board(p);
if (!bd)
@@ -5668,7 +5419,7 @@ static ssize_t dgap_ports_msignals_show(struct device *p,
{
struct board_t *bd;
int count = 0;
- int i;
+ unsigned int i;
bd = dgap_verify_board(p);
if (!bd)
@@ -5705,7 +5456,7 @@ static ssize_t dgap_ports_iflag_show(struct device *p,
{
struct board_t *bd;
int count = 0;
- int i;
+ unsigned int i;
bd = dgap_verify_board(p);
if (!bd)
@@ -5725,7 +5476,7 @@ static ssize_t dgap_ports_cflag_show(struct device *p,
{
struct board_t *bd;
int count = 0;
- int i;
+ unsigned int i;
bd = dgap_verify_board(p);
if (!bd)
@@ -5745,7 +5496,7 @@ static ssize_t dgap_ports_oflag_show(struct device *p,
{
struct board_t *bd;
int count = 0;
- int i;
+ unsigned int i;
bd = dgap_verify_board(p);
if (!bd)
@@ -5765,7 +5516,7 @@ static ssize_t dgap_ports_lflag_show(struct device *p,
{
struct board_t *bd;
int count = 0;
- int i;
+ unsigned int i;
bd = dgap_verify_board(p);
if (!bd)
@@ -5785,7 +5536,7 @@ static ssize_t dgap_ports_digi_flag_show(struct device *p,
{
struct board_t *bd;
int count = 0;
- int i;
+ unsigned int i;
bd = dgap_verify_board(p);
if (!bd)
@@ -5805,7 +5556,7 @@ static ssize_t dgap_ports_rxcount_show(struct device *p,
{
struct board_t *bd;
int count = 0;
- int i;
+ unsigned int i;
bd = dgap_verify_board(p);
if (!bd)
@@ -5825,7 +5576,7 @@ static ssize_t dgap_ports_txcount_show(struct device *p,
{
struct board_t *bd;
int count = 0;
- int i;
+ unsigned int i;
bd = dgap_verify_board(p);
if (!bd)
@@ -5839,39 +5590,6 @@ static ssize_t dgap_ports_txcount_show(struct device *p,
}
static DEVICE_ATTR(ports_txcount, S_IRUSR, dgap_ports_txcount_show, NULL);
-/* this function creates the sys files that will export each signal status
- * to sysfs each value will be put in a separate filename
- */
-static void dgap_create_ports_sysfiles(struct board_t *bd)
-{
- dev_set_drvdata(&bd->pdev->dev, bd);
- device_create_file(&(bd->pdev->dev), &dev_attr_ports_state);
- device_create_file(&(bd->pdev->dev), &dev_attr_ports_baud);
- device_create_file(&(bd->pdev->dev), &dev_attr_ports_msignals);
- device_create_file(&(bd->pdev->dev), &dev_attr_ports_iflag);
- device_create_file(&(bd->pdev->dev), &dev_attr_ports_cflag);
- device_create_file(&(bd->pdev->dev), &dev_attr_ports_oflag);
- device_create_file(&(bd->pdev->dev), &dev_attr_ports_lflag);
- device_create_file(&(bd->pdev->dev), &dev_attr_ports_digi_flag);
- device_create_file(&(bd->pdev->dev), &dev_attr_ports_rxcount);
- device_create_file(&(bd->pdev->dev), &dev_attr_ports_txcount);
-}
-
-/* removes all the sys files created for that port */
-static void dgap_remove_ports_sysfiles(struct board_t *bd)
-{
- device_remove_file(&(bd->pdev->dev), &dev_attr_ports_state);
- device_remove_file(&(bd->pdev->dev), &dev_attr_ports_baud);
- device_remove_file(&(bd->pdev->dev), &dev_attr_ports_msignals);
- device_remove_file(&(bd->pdev->dev), &dev_attr_ports_iflag);
- device_remove_file(&(bd->pdev->dev), &dev_attr_ports_cflag);
- device_remove_file(&(bd->pdev->dev), &dev_attr_ports_oflag);
- device_remove_file(&(bd->pdev->dev), &dev_attr_ports_lflag);
- device_remove_file(&(bd->pdev->dev), &dev_attr_ports_digi_flag);
- device_remove_file(&(bd->pdev->dev), &dev_attr_ports_rxcount);
- device_remove_file(&(bd->pdev->dev), &dev_attr_ports_txcount);
-}
-
static ssize_t dgap_tty_state_show(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -6242,7 +5960,6 @@ static ssize_t dgap_tty_name_show(struct device *d,
}
ncount += cptr->u.module.nport;
-
}
}
@@ -6266,1086 +5983,1211 @@ static struct attribute *dgap_sysfs_tty_entries[] = {
NULL
};
-static struct attribute_group dgap_tty_attribute_group = {
- .name = NULL,
- .attrs = dgap_sysfs_tty_entries,
-};
-static void dgap_create_tty_sysfs(struct un_t *un, struct device *c)
+/* this function creates the sys files that will export each signal status
+ * to sysfs each value will be put in a separate filename
+ */
+static void dgap_create_ports_sysfiles(struct board_t *bd)
{
- int ret;
-
- ret = sysfs_create_group(&c->kobj, &dgap_tty_attribute_group);
- if (ret)
- return;
-
- dev_set_drvdata(c, un);
-
+ dev_set_drvdata(&bd->pdev->dev, bd);
+ device_create_file(&(bd->pdev->dev), &dev_attr_ports_state);
+ device_create_file(&(bd->pdev->dev), &dev_attr_ports_baud);
+ device_create_file(&(bd->pdev->dev), &dev_attr_ports_msignals);
+ device_create_file(&(bd->pdev->dev), &dev_attr_ports_iflag);
+ device_create_file(&(bd->pdev->dev), &dev_attr_ports_cflag);
+ device_create_file(&(bd->pdev->dev), &dev_attr_ports_oflag);
+ device_create_file(&(bd->pdev->dev), &dev_attr_ports_lflag);
+ device_create_file(&(bd->pdev->dev), &dev_attr_ports_digi_flag);
+ device_create_file(&(bd->pdev->dev), &dev_attr_ports_rxcount);
+ device_create_file(&(bd->pdev->dev), &dev_attr_ports_txcount);
}
-static void dgap_remove_tty_sysfs(struct device *c)
+/* removes all the sys files created for that port */
+static void dgap_remove_ports_sysfiles(struct board_t *bd)
{
- sysfs_remove_group(&c->kobj, &dgap_tty_attribute_group);
+ device_remove_file(&(bd->pdev->dev), &dev_attr_ports_state);
+ device_remove_file(&(bd->pdev->dev), &dev_attr_ports_baud);
+ device_remove_file(&(bd->pdev->dev), &dev_attr_ports_msignals);
+ device_remove_file(&(bd->pdev->dev), &dev_attr_ports_iflag);
+ device_remove_file(&(bd->pdev->dev), &dev_attr_ports_cflag);
+ device_remove_file(&(bd->pdev->dev), &dev_attr_ports_oflag);
+ device_remove_file(&(bd->pdev->dev), &dev_attr_ports_lflag);
+ device_remove_file(&(bd->pdev->dev), &dev_attr_ports_digi_flag);
+ device_remove_file(&(bd->pdev->dev), &dev_attr_ports_rxcount);
+ device_remove_file(&(bd->pdev->dev), &dev_attr_ports_txcount);
}
-static void dgap_cleanup_nodes(void)
+/*
+ * Copies the BIOS code from the user to the board,
+ * and starts the BIOS running.
+ */
+static void dgap_do_bios_load(struct board_t *brd, const u8 *ubios, int len)
{
- struct cnode *p;
+ u8 __iomem *addr;
+ uint offset;
+ unsigned int i;
- p = &dgap_head;
+ if (!brd || (brd->magic != DGAP_BOARD_MAGIC) || !brd->re_map_membase)
+ return;
- while (p) {
- struct cnode *tmp = p->next;
+ addr = brd->re_map_membase;
- if (p->type == NULLNODE) {
- p = tmp;
- continue;
- }
+ /*
+ * clear POST area
+ */
+ for (i = 0; i < 16; i++)
+ writeb(0, addr + POSTAREA + i);
- switch (p->type) {
- case BNODE:
- kfree(p->u.board.portstr);
- kfree(p->u.board.addrstr);
- kfree(p->u.board.pcibusstr);
- kfree(p->u.board.pcislotstr);
- kfree(p->u.board.method);
- break;
- case CNODE:
- kfree(p->u.conc.id);
- kfree(p->u.conc.connect);
- break;
- case MNODE:
- kfree(p->u.module.id);
- break;
- case TNODE:
- kfree(p->u.ttyname);
- break;
- case CUNODE:
- kfree(p->u.cuname);
- break;
- case LNODE:
- kfree(p->u.line.cable);
- break;
- case PNODE:
- kfree(p->u.printname);
- break;
- }
+ /*
+ * Download bios
+ */
+ offset = 0x1000;
+ memcpy_toio(addr + offset, ubios, len);
- kfree(p->u.board.status);
- kfree(p);
- p = tmp;
- }
+ writel(0x0bf00401, addr);
+ writel(0, (addr + 4));
+
+ /* Clear the reset, and change states. */
+ writeb(FEPCLR, brd->re_map_port);
}
+
/*
- * Parse a configuration file read into memory as a string.
+ * Checks to see if the BIOS completed running on the card.
*/
-static int dgap_parsefile(char **in)
+static int dgap_test_bios(struct board_t *brd)
{
- struct cnode *p, *brd, *line, *conc;
- int rc;
- char *s;
- int linecnt = 0;
+ u8 __iomem *addr;
+ u16 word;
+ u16 err1;
+ u16 err2;
- p = &dgap_head;
- brd = line = conc = NULL;
+ if (!brd || (brd->magic != DGAP_BOARD_MAGIC) || !brd->re_map_membase)
+ return -EINVAL;
- /* perhaps we are adding to an existing list? */
- while (p->next)
- p = p->next;
+ addr = brd->re_map_membase;
+ word = readw(addr + POSTAREA);
- /* file must start with a BEGIN */
- while ((rc = dgap_gettok(in)) != BEGIN) {
- if (rc == 0) {
- pr_err("unexpected EOF");
- return -1;
- }
+ /*
+ * It can take 5-6 seconds for a board to
+ * pass the bios self test and post results.
+ * Give it 10 seconds.
+ */
+ brd->wait_for_bios = 0;
+ while (brd->wait_for_bios < 1000) {
+ /* Check to see if BIOS thinks board is good. (GD). */
+ if (word == *(u16 *) "GD")
+ return 0;
+ msleep_interruptible(10);
+ brd->wait_for_bios++;
+ word = readw(addr + POSTAREA);
}
- for (; ;) {
- int board_type = 0;
- int conc_type = 0;
- int module_type = 0;
+ /* Gave up on board after too long of time taken */
+ err1 = readw(addr + SEQUENCE);
+ err2 = readw(addr + ERROR);
+ dev_warn(&brd->pdev->dev, "%s failed diagnostics. Error #(%x,%x).\n",
+ brd->name, err1, err2);
+ brd->state = BOARD_FAILED;
+ brd->dpastatus = BD_NOBIOS;
- rc = dgap_gettok(in);
- if (rc == 0) {
- pr_err("unexpected EOF");
- return -1;
- }
+ return -EIO;
+}
- switch (rc) {
- case BEGIN: /* should only be 1 begin */
- pr_err("unexpected config_begin\n");
- return -1;
+/*
+ * Copies the FEP code from the user to the board,
+ * and starts the FEP running.
+ */
+static void dgap_do_fep_load(struct board_t *brd, const u8 *ufep, int len)
+{
+ u8 __iomem *addr;
+ uint offset;
- case END:
- return 0;
+ if (!brd || (brd->magic != DGAP_BOARD_MAGIC) || !brd->re_map_membase)
+ return;
- case BOARD: /* board info */
- if (dgap_checknode(p))
- return -1;
+ addr = brd->re_map_membase;
- p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
- if (!p->next)
- return -1;
+ /*
+ * Download FEP
+ */
+ offset = 0x1000;
+ memcpy_toio(addr + offset, ufep, len);
- p = p->next;
+ /*
+ * If board is a concentrator product, we need to give
+ * it its config string describing how the concentrators look.
+ */
+ if ((brd->type == PCX) || (brd->type == PEPC)) {
+ u8 string[100];
+ u8 __iomem *config;
+ u8 *xconfig;
+ unsigned int i = 0;
- p->type = BNODE;
- p->u.board.status = kstrdup("No", GFP_KERNEL);
- line = conc = NULL;
- brd = p;
- linecnt = -1;
+ xconfig = dgap_create_config_string(brd, string);
- board_type = dgap_gettok(in);
- if (board_type == 0) {
- pr_err("board !!type not specified");
- return -1;
- }
+ /* Write string to board memory */
+ config = addr + CONFIG;
+ for (; i < CONFIGSIZE; i++, config++, xconfig++) {
+ writeb(*xconfig, config);
+ if ((*xconfig & 0xff) == 0xff)
+ break;
+ }
+ }
- p->u.board.type = board_type;
+ writel(0xbfc01004, (addr + 0xc34));
+ writel(0x3, (addr + 0xc30));
- break;
+}
- case IO: /* i/o port */
- if (p->type != BNODE) {
- pr_err("IO port only vaild for boards");
- return -1;
- }
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- p->u.board.portstr = kstrdup(s, GFP_KERNEL);
- if (kstrtol(s, 0, &p->u.board.port)) {
- pr_err("bad number for IO port");
- return -1;
- }
- p->u.board.v_port = 1;
- break;
+/*
+ * Waits for the FEP to report thats its ready for us to use.
+ */
+static int dgap_test_fep(struct board_t *brd)
+{
+ u8 __iomem *addr;
+ u16 word;
+ u16 err1;
+ u16 err2;
- case MEM: /* memory address */
- if (p->type != BNODE) {
- pr_err("memory address only vaild for boards");
- return -1;
- }
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- p->u.board.addrstr = kstrdup(s, GFP_KERNEL);
- if (kstrtoul(s, 0, &p->u.board.addr)) {
- pr_err("bad number for memory address");
- return -1;
- }
- p->u.board.v_addr = 1;
- break;
+ if (!brd || (brd->magic != DGAP_BOARD_MAGIC) || !brd->re_map_membase)
+ return -EINVAL;
- case PCIINFO: /* pci information */
- if (p->type != BNODE) {
- pr_err("memory address only vaild for boards");
- return -1;
- }
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- p->u.board.pcibusstr = kstrdup(s, GFP_KERNEL);
- if (kstrtoul(s, 0, &p->u.board.pcibus)) {
- pr_err("bad number for pci bus");
- return -1;
- }
- p->u.board.v_pcibus = 1;
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- p->u.board.pcislotstr = kstrdup(s, GFP_KERNEL);
- if (kstrtoul(s, 0, &p->u.board.pcislot)) {
- pr_err("bad number for pci slot");
- return -1;
- }
- p->u.board.v_pcislot = 1;
- break;
+ addr = brd->re_map_membase;
+ word = readw(addr + FEPSTAT);
- case METHOD:
- if (p->type != BNODE) {
- pr_err("install method only vaild for boards");
- return -1;
- }
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- p->u.board.method = kstrdup(s, GFP_KERNEL);
- p->u.board.v_method = 1;
- break;
+ /*
+ * It can take 2-3 seconds for the FEP to
+ * be up and running. Give it 5 secs.
+ */
+ brd->wait_for_fep = 0;
+ while (brd->wait_for_fep < 500) {
+ /* Check to see if FEP is up and running now. */
+ if (word == *(u16 *) "OS") {
+ /*
+ * Check to see if the board can support FEP5+ commands.
+ */
+ word = readw(addr + FEP5_PLUS);
+ if (word == *(u16 *) "5A")
+ brd->bd_flags |= BD_FEP5PLUS;
- case STATUS:
- if (p->type != BNODE) {
- pr_err("config status only vaild for boards");
- return -1;
- }
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- p->u.board.status = kstrdup(s, GFP_KERNEL);
- break;
+ return 0;
+ }
+ msleep_interruptible(10);
+ brd->wait_for_fep++;
+ word = readw(addr + FEPSTAT);
+ }
- case NPORTS: /* number of ports */
- if (p->type == BNODE) {
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- if (kstrtol(s, 0, &p->u.board.nport)) {
- pr_err("bad number for number of ports");
- return -1;
- }
- p->u.board.v_nport = 1;
- } else if (p->type == CNODE) {
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- if (kstrtol(s, 0, &p->u.conc.nport)) {
- pr_err("bad number for number of ports");
- return -1;
- }
- p->u.conc.v_nport = 1;
- } else if (p->type == MNODE) {
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- if (kstrtol(s, 0, &p->u.module.nport)) {
- pr_err("bad number for number of ports");
- return -1;
- }
- p->u.module.v_nport = 1;
- } else {
- pr_err("nports only valid for concentrators or modules");
- return -1;
- }
- break;
+ /* Gave up on board after too long of time taken */
+ err1 = readw(addr + SEQUENCE);
+ err2 = readw(addr + ERROR);
+ dev_warn(&brd->pdev->dev,
+ "FEPOS for %s not functioning. Error #(%x,%x).\n",
+ brd->name, err1, err2);
+ brd->state = BOARD_FAILED;
+ brd->dpastatus = BD_NOFEP;
- case ID: /* letter ID used in tty name */
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
+ return -EIO;
+}
- p->u.board.status = kstrdup(s, GFP_KERNEL);
+/*
+ * Physically forces the FEP5 card to reset itself.
+ */
+static void dgap_do_reset_board(struct board_t *brd)
+{
+ u8 check;
+ u32 check1;
+ u32 check2;
+ unsigned int i;
- if (p->type == CNODE) {
- p->u.conc.id = kstrdup(s, GFP_KERNEL);
- p->u.conc.v_id = 1;
- } else if (p->type == MNODE) {
- p->u.module.id = kstrdup(s, GFP_KERNEL);
- p->u.module.v_id = 1;
- } else {
- pr_err("id only valid for concentrators or modules");
- return -1;
- }
- break;
+ if (!brd || (brd->magic != DGAP_BOARD_MAGIC) ||
+ !brd->re_map_membase || !brd->re_map_port)
+ return;
- case STARTO: /* start offset of ID */
- if (p->type == BNODE) {
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- if (kstrtol(s, 0, &p->u.board.start)) {
- pr_err("bad number for start of tty count");
- return -1;
- }
- p->u.board.v_start = 1;
- } else if (p->type == CNODE) {
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- if (kstrtol(s, 0, &p->u.conc.start)) {
- pr_err("bad number for start of tty count");
- return -1;
- }
- p->u.conc.v_start = 1;
- } else if (p->type == MNODE) {
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- if (kstrtol(s, 0, &p->u.module.start)) {
- pr_err("bad number for start of tty count");
- return -1;
- }
- p->u.module.v_start = 1;
- } else {
- pr_err("start only valid for concentrators or modules");
- return -1;
- }
+ /* FEPRST does not vary among supported boards */
+ writeb(FEPRST, brd->re_map_port);
+
+ for (i = 0; i <= 1000; i++) {
+ check = readb(brd->re_map_port) & 0xe;
+ if (check == FEPRST)
break;
+ udelay(10);
- case TTYN: /* tty name prefix */
- if (dgap_checknode(p))
- return -1;
+ }
+ if (i > 1000) {
+ dev_warn(&brd->pdev->dev,
+ "dgap: Board not resetting... Failing board.\n");
+ brd->state = BOARD_FAILED;
+ brd->dpastatus = BD_NOFEP;
+ return;
+ }
- p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
- if (!p->next)
- return -1;
+ /*
+ * Make sure there really is memory out there.
+ */
+ writel(0xa55a3cc3, (brd->re_map_membase + LOWMEM));
+ writel(0x5aa5c33c, (brd->re_map_membase + HIGHMEM));
+ check1 = readl(brd->re_map_membase + LOWMEM);
+ check2 = readl(brd->re_map_membase + HIGHMEM);
- p = p->next;
- p->type = TNODE;
+ if ((check1 != 0xa55a3cc3) || (check2 != 0x5aa5c33c)) {
+ dev_warn(&brd->pdev->dev,
+ "No memory at %p for board.\n",
+ brd->re_map_membase);
+ brd->state = BOARD_FAILED;
+ brd->dpastatus = BD_NOFEP;
+ return;
+ }
+}
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpeced end of file");
- return -1;
- }
- p->u.ttyname = kstrdup(s, GFP_KERNEL);
- if (!p->u.ttyname)
- return -1;
+#ifdef DIGI_CONCENTRATORS_SUPPORTED
+/*
+ * Sends a concentrator image into the FEP5 board.
+ */
+static void dgap_do_conc_load(struct board_t *brd, u8 *uaddr, int len)
+{
+ char __iomem *vaddr;
+ u16 offset;
+ struct downld_t *to_dp;
- break;
+ if (!brd || (brd->magic != DGAP_BOARD_MAGIC) || !brd->re_map_membase)
+ return;
- case CU: /* cu name prefix */
- if (dgap_checknode(p))
- return -1;
+ vaddr = brd->re_map_membase;
- p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
- if (!p->next)
- return -1;
+ offset = readw((u16 *) (vaddr + DOWNREQ));
+ to_dp = (struct downld_t *) (vaddr + (int) offset);
+ memcpy_toio(to_dp, uaddr, len);
- p = p->next;
- p->type = CUNODE;
+ /* Tell card we have data for it */
+ writew(0, vaddr + (DOWNREQ));
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpeced end of file");
- return -1;
- }
- p->u.cuname = kstrdup(s, GFP_KERNEL);
- if (!p->u.cuname)
- return -1;
+ brd->conc_dl_status = NO_PENDING_CONCENTRATOR_REQUESTS;
+}
+#endif
- break;
+#define EXPANSION_ROM_SIZE (64 * 1024)
+#define FEP5_ROM_MAGIC (0xFEFFFFFF)
- case LINE: /* line information */
- if (dgap_checknode(p))
- return -1;
- if (!brd) {
- pr_err("must specify board before line info");
- return -1;
- }
- switch (brd->u.board.type) {
- case PPCM:
- pr_err("line not vaild for PC/em");
- return -1;
- }
+static void dgap_get_vpd(struct board_t *brd)
+{
+ u32 magic;
+ u32 base_offset;
+ u16 rom_offset;
+ u16 vpd_offset;
+ u16 image_length;
+ u16 i;
+ u8 byte1;
+ u8 byte2;
- p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
- if (!p->next)
- return -1;
+ /*
+ * Poke the magic number at the PCI Rom Address location.
+ * If VPD is supported, the value read from that address
+ * will be non-zero.
+ */
+ magic = FEP5_ROM_MAGIC;
+ pci_write_config_dword(brd->pdev, PCI_ROM_ADDRESS, magic);
+ pci_read_config_dword(brd->pdev, PCI_ROM_ADDRESS, &magic);
- p = p->next;
- p->type = LNODE;
- conc = NULL;
- line = p;
- linecnt++;
- break;
+ /* VPD not supported, bail */
+ if (!magic)
+ return;
- case CONC: /* concentrator information */
- if (dgap_checknode(p))
- return -1;
- if (!line) {
- pr_err("must specify line info before concentrator");
- return -1;
- }
+ /*
+ * To get to the OTPROM memory, we have to send the boards base
+ * address or'ed with 1 into the PCI Rom Address location.
+ */
+ magic = brd->membase | 0x01;
+ pci_write_config_dword(brd->pdev, PCI_ROM_ADDRESS, magic);
+ pci_read_config_dword(brd->pdev, PCI_ROM_ADDRESS, &magic);
- p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
- if (!p->next)
- return -1;
+ byte1 = readb(brd->re_map_membase);
+ byte2 = readb(brd->re_map_membase + 1);
- p = p->next;
- p->type = CNODE;
- conc = p;
+ /*
+ * If the board correctly swapped to the OTPROM memory,
+ * the first 2 bytes (header) should be 0x55, 0xAA
+ */
+ if (byte1 == 0x55 && byte2 == 0xAA) {
- if (linecnt)
- brd->u.board.conc2++;
- else
- brd->u.board.conc1++;
+ base_offset = 0;
- conc_type = dgap_gettok(in);
- if (conc_type == 0 || conc_type != CX ||
- conc_type != EPC) {
- pr_err("failed to set a type of concentratros");
- return -1;
- }
+ /*
+ * We have to run through all the OTPROM memory looking
+ * for the VPD offset.
+ */
+ while (base_offset <= EXPANSION_ROM_SIZE) {
- p->u.conc.type = conc_type;
+ /*
+ * Lots of magic numbers here.
+ *
+ * The VPD offset is located inside the ROM Data
+ * Structure.
+ *
+ * We also have to remember the length of each
+ * ROM Data Structure, so we can "hop" to the next
+ * entry if the VPD isn't in the current
+ * ROM Data Structure.
+ */
+ rom_offset = readw(brd->re_map_membase +
+ base_offset + 0x18);
+ image_length = readw(brd->re_map_membase +
+ rom_offset + 0x10) * 512;
+ vpd_offset = readw(brd->re_map_membase +
+ rom_offset + 0x08);
- break;
+ /* Found the VPD entry */
+ if (vpd_offset)
+ break;
- case MOD: /* EBI module */
- if (dgap_checknode(p))
- return -1;
- if (!brd) {
- pr_err("must specify board info before EBI modules");
- return -1;
- }
- switch (brd->u.board.type) {
- case PPCM:
- linecnt = 0;
+ /* We didn't find a VPD entry, go to next ROM entry. */
+ base_offset += image_length;
+
+ byte1 = readb(brd->re_map_membase + base_offset);
+ byte2 = readb(brd->re_map_membase + base_offset + 1);
+
+ /*
+ * If the new ROM offset doesn't have 0x55, 0xAA
+ * as its header, we have run out of ROM.
+ */
+ if (byte1 != 0x55 || byte2 != 0xAA)
break;
- default:
- if (!conc) {
- pr_err("must specify concentrator info before EBI module");
- return -1;
- }
+ }
+
+ /*
+ * If we have a VPD offset, then mark the board
+ * as having a valid VPD, and copy VPDSIZE (512) bytes of
+ * that VPD to the buffer we have in our board structure.
+ */
+ if (vpd_offset) {
+ brd->bd_flags |= BD_HAS_VPD;
+ for (i = 0; i < VPDSIZE; i++) {
+ brd->vpd[i] = readb(brd->re_map_membase +
+ vpd_offset + i);
}
+ }
+ }
- p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
- if (!p->next)
- return -1;
+ /*
+ * We MUST poke the magic number at the PCI Rom Address location again.
+ * This makes the card report the regular board memory back to us,
+ * rather than the OTPROM memory.
+ */
+ magic = FEP5_ROM_MAGIC;
+ pci_write_config_dword(brd->pdev, PCI_ROM_ADDRESS, magic);
+}
- p = p->next;
- p->type = MNODE;
- if (linecnt)
- brd->u.board.module2++;
- else
- brd->u.board.module1++;
+static ssize_t dgap_driver_version_show(struct device_driver *ddp, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%s\n", DG_PART);
+}
+static DRIVER_ATTR(version, S_IRUSR, dgap_driver_version_show, NULL);
- module_type = dgap_gettok(in);
- if (module_type == 0 || module_type != PORTS ||
- module_type != MODEM) {
- pr_err("failed to set a type of module");
- return -1;
- }
- p->u.module.type = module_type;
+static ssize_t dgap_driver_boards_show(struct device_driver *ddp, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", dgap_numboards);
+}
+static DRIVER_ATTR(boards, S_IRUSR, dgap_driver_boards_show, NULL);
- break;
- case CABLE:
- if (p->type == LNODE) {
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- p->u.line.cable = kstrdup(s, GFP_KERNEL);
- p->u.line.v_cable = 1;
- }
- break;
+static ssize_t dgap_driver_maxboards_show(struct device_driver *ddp, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", MAXBOARDS);
+}
+static DRIVER_ATTR(maxboards, S_IRUSR, dgap_driver_maxboards_show, NULL);
- case SPEED: /* sync line speed indication */
- if (p->type == LNODE) {
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- if (kstrtol(s, 0, &p->u.line.speed)) {
- pr_err("bad number for line speed");
- return -1;
- }
- p->u.line.v_speed = 1;
- } else if (p->type == CNODE) {
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- if (kstrtol(s, 0, &p->u.conc.speed)) {
- pr_err("bad number for line speed");
- return -1;
- }
- p->u.conc.v_speed = 1;
- } else {
- pr_err("speed valid only for lines or concentrators.");
- return -1;
- }
- break;
- case CONNECT:
- if (p->type == CNODE) {
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- p->u.conc.connect = kstrdup(s, GFP_KERNEL);
- p->u.conc.v_connect = 1;
- }
- break;
- case PRINT: /* transparent print name prefix */
- if (dgap_checknode(p))
- return -1;
+static ssize_t dgap_driver_pollcounter_show(struct device_driver *ddp,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%ld\n", dgap_poll_counter);
+}
+static DRIVER_ATTR(pollcounter, S_IRUSR, dgap_driver_pollcounter_show, NULL);
- p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
- if (!p->next)
- return -1;
+static ssize_t dgap_driver_pollrate_show(struct device_driver *ddp, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%dms\n", dgap_poll_tick);
+}
- p = p->next;
- p->type = PNODE;
+static ssize_t dgap_driver_pollrate_store(struct device_driver *ddp,
+ const char *buf, size_t count)
+{
+ if (sscanf(buf, "%d\n", &dgap_poll_tick) != 1)
+ return -EINVAL;
+ return count;
+}
+static DRIVER_ATTR(pollrate, (S_IRUSR | S_IWUSR), dgap_driver_pollrate_show,
+ dgap_driver_pollrate_store);
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpeced end of file");
- return -1;
- }
- p->u.printname = kstrdup(s, GFP_KERNEL);
- if (!p->u.printname)
- return -1;
- break;
+static int dgap_create_driver_sysfiles(struct pci_driver *dgap_driver)
+{
+ int rc = 0;
+ struct device_driver *driverfs = &dgap_driver->driver;
- case CMAJOR: /* major number */
- if (dgap_checknode(p))
- return -1;
+ rc |= driver_create_file(driverfs, &driver_attr_version);
+ rc |= driver_create_file(driverfs, &driver_attr_boards);
+ rc |= driver_create_file(driverfs, &driver_attr_maxboards);
+ rc |= driver_create_file(driverfs, &driver_attr_pollrate);
+ rc |= driver_create_file(driverfs, &driver_attr_pollcounter);
- p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
- if (!p->next)
- return -1;
+ return rc;
+}
- p = p->next;
- p->type = JNODE;
+static void dgap_remove_driver_sysfiles(struct pci_driver *dgap_driver)
+{
+ struct device_driver *driverfs = &dgap_driver->driver;
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- if (kstrtol(s, 0, &p->u.majornumber)) {
- pr_err("bad number for major number");
- return -1;
- }
- break;
+ driver_remove_file(driverfs, &driver_attr_version);
+ driver_remove_file(driverfs, &driver_attr_boards);
+ driver_remove_file(driverfs, &driver_attr_maxboards);
+ driver_remove_file(driverfs, &driver_attr_pollrate);
+ driver_remove_file(driverfs, &driver_attr_pollcounter);
+}
- case ALTPIN: /* altpin setting */
- if (dgap_checknode(p))
- return -1;
+static struct attribute_group dgap_tty_attribute_group = {
+ .name = NULL,
+ .attrs = dgap_sysfs_tty_entries,
+};
- p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
- if (!p->next)
- return -1;
+static void dgap_create_tty_sysfs(struct un_t *un, struct device *c)
+{
+ int ret;
- p = p->next;
- p->type = ANODE;
+ ret = sysfs_create_group(&c->kobj, &dgap_tty_attribute_group);
+ if (ret)
+ return;
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- if (kstrtol(s, 0, &p->u.altpin)) {
- pr_err("bad number for altpin");
- return -1;
- }
- break;
+ dev_set_drvdata(c, un);
- case USEINTR: /* enable interrupt setting */
- if (dgap_checknode(p))
- return -1;
+}
- p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
- if (!p->next)
- return -1;
+static void dgap_remove_tty_sysfs(struct device *c)
+{
+ sysfs_remove_group(&c->kobj, &dgap_tty_attribute_group);
+}
- p = p->next;
- p->type = INTRNODE;
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- if (kstrtol(s, 0, &p->u.useintr)) {
- pr_err("bad number for useintr");
- return -1;
- }
- break;
+/*
+ * Create pr and tty device entries
+ */
+static int dgap_tty_register_ports(struct board_t *brd)
+{
+ struct channel_t *ch;
+ int i;
+ int ret;
- case TTSIZ: /* size of tty structure */
- if (dgap_checknode(p))
- return -1;
+ brd->serial_ports = kcalloc(brd->nasync, sizeof(*brd->serial_ports),
+ GFP_KERNEL);
+ if (!brd->serial_ports)
+ return -ENOMEM;
- p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
- if (!p->next)
- return -1;
+ brd->printer_ports = kcalloc(brd->nasync, sizeof(*brd->printer_ports),
+ GFP_KERNEL);
+ if (!brd->printer_ports) {
+ ret = -ENOMEM;
+ goto free_serial_ports;
+ }
- p = p->next;
- p->type = TSNODE;
+ for (i = 0; i < brd->nasync; i++) {
+ tty_port_init(&brd->serial_ports[i]);
+ tty_port_init(&brd->printer_ports[i]);
+ }
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- if (kstrtol(s, 0, &p->u.ttysize)) {
- pr_err("bad number for ttysize");
- return -1;
- }
- break;
+ ch = brd->channels[0];
+ for (i = 0; i < brd->nasync; i++, ch = brd->channels[i]) {
- case CHSIZ: /* channel structure size */
- if (dgap_checknode(p))
- return -1;
+ struct device *classp;
- p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
- if (!p->next)
- return -1;
+ classp = tty_port_register_device(&brd->serial_ports[i],
+ brd->serial_driver,
+ i, NULL);
- p = p->next;
- p->type = CSNODE;
+ if (IS_ERR(classp)) {
+ ret = PTR_ERR(classp);
+ goto unregister_ttys;
+ }
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- if (kstrtol(s, 0, &p->u.chsize)) {
- pr_err("bad number for chsize");
- return -1;
- }
- break;
+ dgap_create_tty_sysfs(&ch->ch_tun, classp);
+ ch->ch_tun.un_sysfs = classp;
- case BSSIZ: /* board structure size */
- if (dgap_checknode(p))
- return -1;
+ classp = tty_port_register_device(&brd->printer_ports[i],
+ brd->print_driver,
+ i, NULL);
- p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
- if (!p->next)
- return -1;
+ if (IS_ERR(classp)) {
+ ret = PTR_ERR(classp);
+ goto unregister_ttys;
+ }
- p = p->next;
- p->type = BSNODE;
+ dgap_create_tty_sysfs(&ch->ch_pun, classp);
+ ch->ch_pun.un_sysfs = classp;
+ }
+ dgap_create_ports_sysfiles(brd);
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- if (kstrtol(s, 0, &p->u.bssize)) {
- pr_err("bad number for bssize");
- return -1;
- }
- break;
+ return 0;
- case UNTSIZ: /* sched structure size */
- if (dgap_checknode(p))
- return -1;
+unregister_ttys:
+ while (i >= 0) {
+ ch = brd->channels[i];
+ if (ch->ch_tun.un_sysfs) {
+ dgap_remove_tty_sysfs(ch->ch_tun.un_sysfs);
+ tty_unregister_device(brd->serial_driver, i);
+ }
- p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
- if (!p->next)
- return -1;
+ if (ch->ch_pun.un_sysfs) {
+ dgap_remove_tty_sysfs(ch->ch_pun.un_sysfs);
+ tty_unregister_device(brd->print_driver, i);
+ }
+ i--;
+ }
- p = p->next;
- p->type = USNODE;
+ for (i = 0; i < brd->nasync; i++) {
+ tty_port_destroy(&brd->serial_ports[i]);
+ tty_port_destroy(&brd->printer_ports[i]);
+ }
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- if (kstrtol(s, 0, &p->u.unsize)) {
- pr_err("bad number for schedsize");
- return -1;
- }
- break;
+ kfree(brd->printer_ports);
+ brd->printer_ports = NULL;
- case F2SIZ: /* f2200 structure size */
- if (dgap_checknode(p))
- return -1;
+free_serial_ports:
+ kfree(brd->serial_ports);
+ brd->serial_ports = NULL;
- p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
- if (!p->next)
- return -1;
+ return ret;
+}
- p = p->next;
- p->type = FSNODE;
+/*
+ * dgap_cleanup_tty()
+ *
+ * Uninitialize the TTY portion of this driver. Free all memory and
+ * resources.
+ */
+static void dgap_cleanup_tty(struct board_t *brd)
+{
+ struct device *dev;
+ unsigned int i;
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- if (kstrtol(s, 0, &p->u.f2size)) {
- pr_err("bad number for f2200size");
- return -1;
- }
- break;
+ for (i = 0; i < brd->nasync; i++) {
+ tty_port_destroy(&brd->serial_ports[i]);
+ dev = brd->channels[i]->ch_tun.un_sysfs;
+ dgap_remove_tty_sysfs(dev);
+ tty_unregister_device(brd->serial_driver, i);
+ }
+ tty_unregister_driver(brd->serial_driver);
+ put_tty_driver(brd->serial_driver);
+ kfree(brd->serial_ports);
- case VPSIZ: /* vpix structure size */
- if (dgap_checknode(p))
- return -1;
+ for (i = 0; i < brd->nasync; i++) {
+ tty_port_destroy(&brd->printer_ports[i]);
+ dev = brd->channels[i]->ch_pun.un_sysfs;
+ dgap_remove_tty_sysfs(dev);
+ tty_unregister_device(brd->print_driver, i);
+ }
+ tty_unregister_driver(brd->print_driver);
+ put_tty_driver(brd->print_driver);
+ kfree(brd->printer_ports);
+}
- p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
- if (!p->next)
- return -1;
+static int dgap_request_irq(struct board_t *brd)
+{
+ int rc;
- p = p->next;
- p->type = VSNODE;
+ if (!brd || brd->magic != DGAP_BOARD_MAGIC)
+ return -ENODEV;
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- if (kstrtol(s, 0, &p->u.vpixsize)) {
- pr_err("bad number for vpixsize");
- return -1;
- }
- break;
- }
+ /*
+ * Set up our interrupt handler if we are set to do interrupts.
+ */
+ if (dgap_config_get_useintr(brd) && brd->irq) {
+
+ rc = request_irq(brd->irq, dgap_intr, IRQF_SHARED, "DGAP", brd);
+
+ if (!rc)
+ brd->intr_used = 1;
}
+ return 0;
}
-/*
- * dgap_sindex: much like index(), but it looks for a match of any character in
- * the group, and returns that position. If the first character is a ^, then
- * this will match the first occurrence not in that group.
- */
-static char *dgap_sindex(char *string, char *group)
+static void dgap_free_irq(struct board_t *brd)
{
- char *ptr;
+ if (brd->intr_used && brd->irq)
+ free_irq(brd->irq, brd);
+}
- if (!string || !group)
- return (char *) NULL;
+static int dgap_firmware_load(struct pci_dev *pdev, int card_type,
+ struct board_t *brd)
+{
+ const struct firmware *fw;
+ char *tmp_ptr;
+ int ret;
+ char *dgap_config_buf;
- if (*group == '^') {
- group++;
- for (; *string; string++) {
- for (ptr = group; *ptr; ptr++) {
- if (*ptr == *string)
- break;
- }
- if (*ptr == '\0')
- return string;
+ dgap_get_vpd(brd);
+ dgap_do_reset_board(brd);
+
+ if (fw_info[card_type].conf_name) {
+ ret = request_firmware(&fw, fw_info[card_type].conf_name,
+ &pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "config file %s not found\n",
+ fw_info[card_type].conf_name);
+ return ret;
}
- } else {
- for (; *string; string++) {
- for (ptr = group; *ptr; ptr++) {
- if (*ptr == *string)
- return string;
- }
+
+ dgap_config_buf = kzalloc(fw->size + 1, GFP_KERNEL);
+ if (!dgap_config_buf) {
+ release_firmware(fw);
+ return -ENOMEM;
+ }
+
+ memcpy(dgap_config_buf, fw->data, fw->size);
+ release_firmware(fw);
+
+ /*
+ * preserve dgap_config_buf
+ * as dgap_parsefile would
+ * otherwise alter it.
+ */
+ tmp_ptr = dgap_config_buf;
+
+ if (dgap_parsefile(&tmp_ptr) != 0) {
+ kfree(dgap_config_buf);
+ return -EINVAL;
}
+ kfree(dgap_config_buf);
}
- return (char *) NULL;
-}
+ /*
+ * Match this board to a config the user created for us.
+ */
+ brd->bd_config =
+ dgap_find_config(brd->type, brd->pci_bus, brd->pci_slot);
-/*
- * Get a token from the input file; return 0 if end of file is reached
- */
-static int dgap_gettok(char **in)
-{
- char *w;
- struct toklist *t;
+ /*
+ * Because the 4 port Xr products share the same PCI ID
+ * as the 8 port Xr products, if we receive a NULL config
+ * back, and this is a PAPORT8 board, retry with a
+ * PAPORT4 attempt as well.
+ */
+ if (brd->type == PAPORT8 && !brd->bd_config)
+ brd->bd_config =
+ dgap_find_config(PAPORT4, brd->pci_bus, brd->pci_slot);
- if (strstr(dgap_cword, "board")) {
- w = dgap_getword(in);
- snprintf(dgap_cword, MAXCWORD, "%s", w);
- for (t = dgap_brdtype; t->token != 0; t++) {
- if (!strcmp(w, t->string))
- return t->token;
+ if (!brd->bd_config) {
+ dev_err(&pdev->dev, "No valid configuration found\n");
+ return -EINVAL;
+ }
+
+ if (fw_info[card_type].bios_name) {
+ ret = request_firmware(&fw, fw_info[card_type].bios_name,
+ &pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "bios file %s not found\n",
+ fw_info[card_type].bios_name);
+ return ret;
}
- } else {
- while ((w = dgap_getword(in))) {
- snprintf(dgap_cword, MAXCWORD, "%s", w);
- for (t = dgap_tlist; t->token != 0; t++) {
- if (!strcmp(w, t->string))
- return t->token;
- }
+ dgap_do_bios_load(brd, fw->data, fw->size);
+ release_firmware(fw);
+
+ /* Wait for BIOS to test board... */
+ ret = dgap_test_bios(brd);
+ if (ret)
+ return ret;
+ }
+
+ if (fw_info[card_type].fep_name) {
+ ret = request_firmware(&fw, fw_info[card_type].fep_name,
+ &pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "dgap: fep file %s not found\n",
+ fw_info[card_type].fep_name);
+ return ret;
+ }
+ dgap_do_fep_load(brd, fw->data, fw->size);
+ release_firmware(fw);
+
+ /* Wait for FEP to load on board... */
+ ret = dgap_test_fep(brd);
+ if (ret)
+ return ret;
+ }
+
+#ifdef DIGI_CONCENTRATORS_SUPPORTED
+ /*
+ * If this is a CX or EPCX, we need to see if the firmware
+ * is requesting a concentrator image from us.
+ */
+ if ((bd->type == PCX) || (bd->type == PEPC)) {
+ chk_addr = (u16 *) (vaddr + DOWNREQ);
+ /* Nonzero if FEP is requesting concentrator image. */
+ check = readw(chk_addr);
+ vaddr = brd->re_map_membase;
+ }
+
+ if (fw_info[card_type].con_name && check && vaddr) {
+ ret = request_firmware(&fw, fw_info[card_type].con_name,
+ &pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "conc file %s not found\n",
+ fw_info[card_type].con_name);
+ return ret;
}
+ /* Put concentrator firmware loading code here */
+ offset = readw((u16 *) (vaddr + DOWNREQ));
+ memcpy_toio(offset, fw->data, fw->size);
+
+ dgap_do_conc_load(brd, (char *)fw->data, fw->size)
+ release_firmware(fw);
}
+#endif
return 0;
}
/*
- * get a word from the input stream, also keep track of current line number.
- * words are separated by whitespace.
+ * dgap_tty_init()
+ *
+ * Init the tty subsystem. Called once per board after board has been
+ * downloaded and init'ed.
*/
-static char *dgap_getword(char **in)
+static int dgap_tty_init(struct board_t *brd)
{
- char *ret_ptr = *in;
+ int i;
+ int tlw;
+ uint true_count;
+ u8 __iomem *vaddr;
+ u8 modem;
+ struct channel_t *ch;
+ struct bs_t __iomem *bs;
+ struct cm_t __iomem *cm;
+ int ret;
- char *ptr = dgap_sindex(*in, " \t\n");
+ /*
+ * Initialize board structure elements.
+ */
- /* If no word found, return null */
- if (!ptr)
- return NULL;
+ vaddr = brd->re_map_membase;
+ true_count = readw((vaddr + NCHAN));
- /* Mark new location for our buffer */
- *ptr = '\0';
- *in = ptr + 1;
+ brd->nasync = dgap_config_get_num_prts(brd);
- /* Eat any extra spaces/tabs/newlines that might be present */
- while (*in && **in && ((**in == ' ') ||
- (**in == '\t') ||
- (**in == '\n'))) {
- **in = '\0';
- *in = *in + 1;
- }
+ if (!brd->nasync)
+ brd->nasync = brd->maxports;
- return ret_ptr;
-}
+ if (brd->nasync > brd->maxports)
+ brd->nasync = brd->maxports;
-/*
- * dgap_checknode: see if all the necessary info has been supplied for a node
- * before creating the next node.
- */
-static int dgap_checknode(struct cnode *p)
-{
- switch (p->type) {
- case LNODE:
- if (p->u.line.v_speed == 0) {
- pr_err("line speed not specified");
- return 1;
- }
- return 0;
+ if (true_count != brd->nasync) {
+ dev_warn(&brd->pdev->dev,
+ "%s configured for %d ports, has %d ports.\n",
+ brd->name, brd->nasync, true_count);
- case CNODE:
- if (p->u.conc.v_speed == 0) {
- pr_err("concentrator line speed not specified");
- return 1;
- }
- if (p->u.conc.v_nport == 0) {
- pr_err("number of ports on concentrator not specified");
- return 1;
+ if ((brd->type == PPCM) &&
+ (true_count == 64 || true_count == 0)) {
+ dev_warn(&brd->pdev->dev,
+ "Please make SURE the EBI cable running from the card\n");
+ dev_warn(&brd->pdev->dev,
+ "to each EM module is plugged into EBI IN!\n");
}
- if (p->u.conc.v_id == 0) {
- pr_err("concentrator id letter not specified");
- return 1;
+
+ brd->nasync = true_count;
+
+ /* If no ports, don't bother going any further */
+ if (!brd->nasync) {
+ brd->state = BOARD_FAILED;
+ brd->dpastatus = BD_NOFEP;
+ return -EIO;
}
- return 0;
+ }
- case MNODE:
- if (p->u.module.v_nport == 0) {
- pr_err("number of ports on EBI module not specified");
- return 1;
+ /*
+ * Allocate channel memory that might not have been allocated
+ * when the driver was first loaded.
+ */
+ for (i = 0; i < brd->nasync; i++) {
+ brd->channels[i] =
+ kzalloc(sizeof(struct channel_t), GFP_KERNEL);
+ if (!brd->channels[i]) {
+ ret = -ENOMEM;
+ goto free_chan;
}
- if (p->u.module.v_id == 0) {
- pr_err("EBI module id letter not specified");
- return 1;
+ }
+
+ ch = brd->channels[0];
+ vaddr = brd->re_map_membase;
+
+ bs = (struct bs_t __iomem *) ((ulong) vaddr + CHANBUF);
+ cm = (struct cm_t __iomem *) ((ulong) vaddr + CMDBUF);
+
+ brd->bd_bs = bs;
+
+ /* Set up channel variables */
+ for (i = 0; i < brd->nasync; i++, ch = brd->channels[i], bs++) {
+
+ spin_lock_init(&ch->ch_lock);
+
+ /* Store all our magic numbers */
+ ch->magic = DGAP_CHANNEL_MAGIC;
+ ch->ch_tun.magic = DGAP_UNIT_MAGIC;
+ ch->ch_tun.un_type = DGAP_SERIAL;
+ ch->ch_tun.un_ch = ch;
+ ch->ch_tun.un_dev = i;
+
+ ch->ch_pun.magic = DGAP_UNIT_MAGIC;
+ ch->ch_pun.un_type = DGAP_PRINT;
+ ch->ch_pun.un_ch = ch;
+ ch->ch_pun.un_dev = i;
+
+ ch->ch_vaddr = vaddr;
+ ch->ch_bs = bs;
+ ch->ch_cm = cm;
+ ch->ch_bd = brd;
+ ch->ch_portnum = i;
+ ch->ch_digi = dgap_digi_init;
+
+ /*
+ * Set up digi dsr and dcd bits based on altpin flag.
+ */
+ if (dgap_config_get_altpin(brd)) {
+ ch->ch_dsr = DM_CD;
+ ch->ch_cd = DM_DSR;
+ ch->ch_digi.digi_flags |= DIGI_ALTPIN;
+ } else {
+ ch->ch_cd = DM_CD;
+ ch->ch_dsr = DM_DSR;
}
- return 0;
+
+ ch->ch_taddr = vaddr + (ioread16(&(ch->ch_bs->tx_seg)) << 4);
+ ch->ch_raddr = vaddr + (ioread16(&(ch->ch_bs->rx_seg)) << 4);
+ ch->ch_tx_win = 0;
+ ch->ch_rx_win = 0;
+ ch->ch_tsize = readw(&(ch->ch_bs->tx_max)) + 1;
+ ch->ch_rsize = readw(&(ch->ch_bs->rx_max)) + 1;
+ ch->ch_tstart = 0;
+ ch->ch_rstart = 0;
+
+ /*
+ * Set queue water marks, interrupt mask,
+ * and general tty parameters.
+ */
+ tlw = ch->ch_tsize >= 2000 ? ((ch->ch_tsize * 5) / 8) :
+ ch->ch_tsize / 2;
+ ch->ch_tlw = tlw;
+
+ dgap_cmdw(ch, STLOW, tlw, 0);
+
+ dgap_cmdw(ch, SRLOW, ch->ch_rsize / 2, 0);
+
+ dgap_cmdw(ch, SRHIGH, 7 * ch->ch_rsize / 8, 0);
+
+ ch->ch_mistat = readb(&(ch->ch_bs->m_stat));
+
+ init_waitqueue_head(&ch->ch_flags_wait);
+ init_waitqueue_head(&ch->ch_tun.un_flags_wait);
+ init_waitqueue_head(&ch->ch_pun.un_flags_wait);
+
+ /* Turn on all modem interrupts for now */
+ modem = (DM_CD | DM_DSR | DM_CTS | DM_RI);
+ writeb(modem, &(ch->ch_bs->m_int));
+
+ /*
+ * Set edelay to 0 if interrupts are turned on,
+ * otherwise set edelay to the usual 100.
+ */
+ if (brd->intr_used)
+ writew(0, &(ch->ch_bs->edelay));
+ else
+ writew(100, &(ch->ch_bs->edelay));
+
+ writeb(1, &(ch->ch_bs->idata));
}
+
return 0;
+
+free_chan:
+ while (--i >= 0) {
+ kfree(brd->channels[i]);
+ brd->channels[i] = NULL;
+ }
+ return ret;
}
/*
- * Given a board pointer, returns whether we should use interrupts or not.
+ * dgap_tty_free()
+ *
+ * Free the channles which are allocated in dgap_tty_init().
*/
-static uint dgap_config_get_useintr(struct board_t *bd)
+static void dgap_tty_free(struct board_t *brd)
{
- struct cnode *p;
+ int i;
- if (!bd)
- return 0;
+ for (i = 0; i < brd->nasync; i++)
+ kfree(brd->channels[i]);
+}
- for (p = bd->bd_config; p; p = p->next) {
- if (p->type == INTRNODE) {
- /*
- * check for pcxr types.
- */
- return p->u.useintr;
- }
- }
+static int dgap_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ int rc;
+ struct board_t *brd;
+
+ if (dgap_numboards >= MAXBOARDS)
+ return -EPERM;
+
+ rc = pci_enable_device(pdev);
+ if (rc)
+ return -EIO;
+
+ brd = dgap_found_board(pdev, ent->driver_data, dgap_numboards);
+ if (IS_ERR(brd))
+ return PTR_ERR(brd);
+
+ rc = dgap_firmware_load(pdev, ent->driver_data, brd);
+ if (rc)
+ goto cleanup_brd;
+
+ rc = dgap_alloc_flipbuf(brd);
+ if (rc)
+ goto cleanup_brd;
+
+ rc = dgap_tty_register(brd);
+ if (rc)
+ goto free_flipbuf;
+
+ rc = dgap_request_irq(brd);
+ if (rc)
+ goto unregister_tty;
+
+ /*
+ * Do tty device initialization.
+ */
+ rc = dgap_tty_init(brd);
+ if (rc < 0)
+ goto free_irq;
+
+ rc = dgap_tty_register_ports(brd);
+ if (rc)
+ goto tty_free;
+
+ brd->state = BOARD_READY;
+ brd->dpastatus = BD_RUNNING;
+
+ dgap_board[dgap_numboards++] = brd;
- /* If not found, then don't turn on interrupts. */
return 0;
+
+tty_free:
+ dgap_tty_free(brd);
+free_irq:
+ dgap_free_irq(brd);
+unregister_tty:
+ dgap_tty_unregister(brd);
+free_flipbuf:
+ dgap_free_flipbuf(brd);
+cleanup_brd:
+ dgap_cleanup_nodes();
+ dgap_unmap(brd);
+ kfree(brd);
+
+ return rc;
}
+static void dgap_remove_one(struct pci_dev *dev)
+{
+ /* Do Nothing */
+}
+
+static struct pci_driver dgap_driver = {
+ .name = "dgap",
+ .probe = dgap_init_one,
+ .id_table = dgap_pci_tbl,
+ .remove = dgap_remove_one,
+};
+
/*
- * Given a board pointer, returns whether we turn on altpin or not.
+ * Start of driver.
*/
-static uint dgap_config_get_altpin(struct board_t *bd)
+static int dgap_start(void)
{
- struct cnode *p;
+ int rc;
+ unsigned long flags;
+ struct device *device;
- if (!bd)
- return 0;
+ dgap_numboards = 0;
- for (p = bd->bd_config; p; p = p->next) {
- if (p->type == ANODE) {
- /*
- * check for pcxr types.
- */
- return p->u.altpin;
- }
+ pr_info("For the tools package please visit http://www.digi.com\n");
+
+ /*
+ * Register our base character device into the kernel.
+ */
+
+ /*
+ * Register management/dpa devices
+ */
+ rc = register_chrdev(DIGI_DGAP_MAJOR, "dgap", &dgap_board_fops);
+ if (rc < 0)
+ return rc;
+
+ dgap_class = class_create(THIS_MODULE, "dgap_mgmt");
+ if (IS_ERR(dgap_class)) {
+ rc = PTR_ERR(dgap_class);
+ goto failed_class;
}
- /* If not found, then don't turn on interrupts. */
- return 0;
+ device = device_create(dgap_class, NULL,
+ MKDEV(DIGI_DGAP_MAJOR, 0),
+ NULL, "dgap_mgmt");
+ if (IS_ERR(device)) {
+ rc = PTR_ERR(device);
+ goto failed_device;
+ }
+
+ /* Start the poller */
+ spin_lock_irqsave(&dgap_poll_lock, flags);
+ init_timer(&dgap_poll_timer);
+ dgap_poll_timer.function = dgap_poll_handler;
+ dgap_poll_timer.data = 0;
+ dgap_poll_time = jiffies + dgap_jiffies_from_ms(dgap_poll_tick);
+ dgap_poll_timer.expires = dgap_poll_time;
+ spin_unlock_irqrestore(&dgap_poll_lock, flags);
+
+ add_timer(&dgap_poll_timer);
+
+ return rc;
+
+failed_device:
+ class_destroy(dgap_class);
+failed_class:
+ unregister_chrdev(DIGI_DGAP_MAJOR, "dgap");
+ return rc;
}
-/*
- * Given a specific type of board, if found, detached link and
- * returns the first occurrence in the list.
- */
-static struct cnode *dgap_find_config(int type, int bus, int slot)
+static void dgap_stop(void)
{
- struct cnode *p, *prev, *prev2, *found;
+ unsigned long lock_flags;
- p = &dgap_head;
+ spin_lock_irqsave(&dgap_poll_lock, lock_flags);
+ dgap_poll_stop = 1;
+ spin_unlock_irqrestore(&dgap_poll_lock, lock_flags);
- while (p->next) {
- prev = p;
- p = p->next;
+ del_timer_sync(&dgap_poll_timer);
- if (p->type != BNODE)
- continue;
+ device_destroy(dgap_class, MKDEV(DIGI_DGAP_MAJOR, 0));
+ class_destroy(dgap_class);
+ unregister_chrdev(DIGI_DGAP_MAJOR, "dgap");
+}
- if (p->u.board.type != type)
- continue;
+/*
+ * dgap_cleanup_board()
+ *
+ * Free all the memory associated with a board
+ */
+static void dgap_cleanup_board(struct board_t *brd)
+{
+ unsigned int i;
- if (p->u.board.v_pcibus &&
- p->u.board.pcibus != bus)
- continue;
+ if (!brd || brd->magic != DGAP_BOARD_MAGIC)
+ return;
- if (p->u.board.v_pcislot &&
- p->u.board.pcislot != slot)
- continue;
+ dgap_free_irq(brd);
- found = p;
- /*
- * Keep walking thru the list till we
- * find the next board.
- */
- while (p->next) {
- prev2 = p;
- p = p->next;
+ tasklet_kill(&brd->helper_tasklet);
- if (p->type != BNODE)
- continue;
+ dgap_unmap(brd);
- /*
- * Mark the end of our 1 board
- * chain of configs.
- */
- prev2->next = NULL;
+ /* Free all allocated channels structs */
+ for (i = 0; i < MAXPORTS ; i++)
+ kfree(brd->channels[i]);
- /*
- * Link the "next" board to the
- * previous board, effectively
- * "unlinking" our board from
- * the main config.
- */
- prev->next = p;
+ kfree(brd->flipbuf);
+ kfree(brd->flipflagbuf);
- return found;
- }
- /*
- * It must be the last board in the list.
- */
- prev->next = NULL;
- return found;
- }
- return NULL;
+ dgap_board[brd->boardnum] = NULL;
+
+ kfree(brd);
}
+
+/************************************************************************
+ *
+ * Driver load/unload functions
+ *
+ ************************************************************************/
+
/*
- * Given a board pointer, walks the config link, counting up
- * all ports user specified should be on the board.
- * (This does NOT mean they are all actually present right now tho)
+ * init_module()
+ *
+ * Module load. This is where it all starts.
*/
-static uint dgap_config_get_num_prts(struct board_t *bd)
+static int dgap_init_module(void)
{
- int count = 0;
- struct cnode *p;
+ int rc;
- if (!bd)
- return 0;
+ pr_info("%s, Digi International Part Number %s\n", DG_NAME, DG_PART);
- for (p = bd->bd_config; p; p = p->next) {
+ rc = dgap_start();
+ if (rc)
+ return rc;
- switch (p->type) {
- case BNODE:
- /*
- * check for pcxr types.
- */
- if (p->u.board.type > EPCFE)
- count += p->u.board.nport;
- break;
- case CNODE:
- count += p->u.conc.nport;
- break;
- case MNODE:
- count += p->u.module.nport;
- break;
- }
- }
- return count;
+ rc = pci_register_driver(&dgap_driver);
+ if (rc)
+ goto err_stop;
+
+ rc = dgap_create_driver_sysfiles(&dgap_driver);
+ if (rc)
+ goto err_unregister;
+
+ dgap_driver_state = DRIVER_READY;
+
+ return 0;
+
+err_unregister:
+ pci_unregister_driver(&dgap_driver);
+err_stop:
+ dgap_stop();
+
+ return rc;
}
-static char *dgap_create_config_string(struct board_t *bd, char *string)
+/*
+ * dgap_cleanup_module()
+ *
+ * Module unload. This is where it all ends.
+ */
+static void dgap_cleanup_module(void)
{
- char *ptr = string;
- struct cnode *p;
- struct cnode *q;
- int speed;
+ unsigned int i;
+ ulong lock_flags;
- if (!bd) {
- *ptr = 0xff;
- return string;
- }
+ spin_lock_irqsave(&dgap_poll_lock, lock_flags);
+ dgap_poll_stop = 1;
+ spin_unlock_irqrestore(&dgap_poll_lock, lock_flags);
- for (p = bd->bd_config; p; p = p->next) {
+ /* Turn off poller right away. */
+ del_timer_sync(&dgap_poll_timer);
- switch (p->type) {
- case LNODE:
- *ptr = '\0';
- ptr++;
- *ptr = p->u.line.speed;
- ptr++;
- break;
- case CNODE:
- /*
- * Because the EPC/con concentrators can have EM modules
- * hanging off of them, we have to walk ahead in the
- * list and keep adding the number of ports on each EM
- * to the config. UGH!
- */
- speed = p->u.conc.speed;
- q = p->next;
- if (q && (q->type == MNODE)) {
- *ptr = (p->u.conc.nport + 0x80);
- ptr++;
- p = q;
- while (q->next && (q->next->type) == MNODE) {
- *ptr = (q->u.module.nport + 0x80);
- ptr++;
- p = q;
- q = q->next;
- }
- *ptr = q->u.module.nport;
- ptr++;
- } else {
- *ptr = p->u.conc.nport;
- ptr++;
- }
+ dgap_remove_driver_sysfiles(&dgap_driver);
- *ptr = speed;
- ptr++;
- break;
- }
+ device_destroy(dgap_class, MKDEV(DIGI_DGAP_MAJOR, 0));
+ class_destroy(dgap_class);
+ unregister_chrdev(DIGI_DGAP_MAJOR, "dgap");
+
+ for (i = 0; i < dgap_numboards; ++i) {
+ dgap_remove_ports_sysfiles(dgap_board[i]);
+ dgap_cleanup_tty(dgap_board[i]);
+ dgap_cleanup_board(dgap_board[i]);
}
- *ptr = 0xff;
- return string;
+ dgap_cleanup_nodes();
+
+ if (dgap_numboards)
+ pci_unregister_driver(&dgap_driver);
}
+
+module_init(dgap_init_module);
+module_exit(dgap_cleanup_module);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Digi International, http://www.digi.com");
+MODULE_DESCRIPTION("Driver for the Digi International EPCA PCI based product line");
+MODULE_SUPPORTED_DEVICE("dgap");
diff --git a/drivers/staging/dgap/dgap.h b/drivers/staging/dgap/dgap.h
index 14e2ed0fe39..684033156e8 100644
--- a/drivers/staging/dgap/dgap.h
+++ b/drivers/staging/dgap/dgap.h
@@ -584,9 +584,6 @@ struct board_t {
struct tty_port *printer_ports;
char print_name[200];
- u32 dgap_serial_major;
- u32 dgap_transparent_print_major;
-
struct bs_t __iomem *bd_bs; /* Base structure pointer */
char *flipbuf; /* Our flip buffer, alloced if */
diff --git a/drivers/staging/dgnc/dgnc_cls.c b/drivers/staging/dgnc/dgnc_cls.c
index a17f4f6095c..bedc5221b6f 100644
--- a/drivers/staging/dgnc/dgnc_cls.c
+++ b/drivers/staging/dgnc/dgnc_cls.c
@@ -724,10 +724,8 @@ static void cls_tasklet(unsigned long data)
int state = 0;
int ports = 0;
- if (!bd || bd->magic != DGNC_BOARD_MAGIC) {
- APR(("poll_tasklet() - NULL or bad bd.\n"));
+ if (!bd || bd->magic != DGNC_BOARD_MAGIC)
return;
- }
/* Cache a couple board values */
spin_lock_irqsave(&bd->bd_lock, flags);
@@ -794,25 +792,17 @@ static void cls_tasklet(unsigned long data)
*/
static irqreturn_t cls_intr(int irq, void *voidbrd)
{
- struct dgnc_board *brd = (struct dgnc_board *) voidbrd;
+ struct dgnc_board *brd = voidbrd;
uint i = 0;
unsigned char poll_reg;
unsigned long flags;
- if (!brd) {
- APR(("Received interrupt (%d) with null board associated\n",
- irq));
- return IRQ_NONE;
- }
-
/*
- * Check to make sure its for us.
+ * Check to make sure it didn't receive interrupt with a null board
+ * associated or a board pointer that wasn't ours.
*/
- if (brd->magic != DGNC_BOARD_MAGIC) {
- APR(("Received interrupt (%d) with a board pointer that wasn't ours!\n",
- irq));
+ if (!brd || brd->magic != DGNC_BOARD_MAGIC)
return IRQ_NONE;
- }
spin_lock_irqsave(&brd->bd_intr_lock, flags);
@@ -928,8 +918,6 @@ static void cls_copy_data_from_uart_to_queue(struct channel_t *ch)
ch->ch_equeue[head] = linestatus & (UART_LSR_BI | UART_LSR_PE
| UART_LSR_FE);
ch->ch_rqueue[head] = readb(&ch->ch_cls_uart->txrx);
- dgnc_sniff_nowait_nolock(ch, "UART READ",
- ch->ch_rqueue + head, 1);
qleft--;
@@ -964,7 +952,6 @@ static int cls_drain(struct tty_struct *tty, uint seconds)
unsigned long flags;
struct channel_t *ch;
struct un_t *un;
- int rc = 0;
if (!tty || tty->magic != TTY_MAGIC)
return -ENXIO;
@@ -984,12 +971,11 @@ static int cls_drain(struct tty_struct *tty, uint seconds)
/*
* NOTE: Do something with time passed in.
*/
- rc = wait_event_interruptible(un->un_flags_wait,
- ((un->un_flags & UN_EMPTY) == 0));
/* If ret is non-zero, user ctrl-c'ed us */
- return rc;
+ return wait_event_interruptible(un->un_flags_wait,
+ ((un->un_flags & UN_EMPTY) == 0));
}
@@ -1098,8 +1084,6 @@ static void cls_copy_data_from_queue_to_uart(struct channel_t *ch)
ch->ch_tun.un_flags |= (UN_EMPTY);
}
writeb(ch->ch_wqueue[ch->ch_w_tail], &ch->ch_cls_uart->txrx);
- dgnc_sniff_nowait_nolock(ch, "UART WRITE",
- ch->ch_wqueue + ch->ch_w_tail, 1);
ch->ch_w_tail++;
ch->ch_w_tail &= WQUEUEMASK;
ch->ch_txcount++;
diff --git a/drivers/staging/dgnc/dgnc_driver.c b/drivers/staging/dgnc/dgnc_driver.c
index 21546659ff0..ba98ff34811 100644
--- a/drivers/staging/dgnc/dgnc_driver.c
+++ b/drivers/staging/dgnc/dgnc_driver.c
@@ -49,16 +49,6 @@ MODULE_AUTHOR("Digi International, http://www.digi.com");
MODULE_DESCRIPTION("Driver for the Digi International Neo and Classic PCI based product line");
MODULE_SUPPORTED_DEVICE("dgnc");
-/*
- * insmod command line overrideable parameters
- *
- * NOTE: we use a set of macros to create the variables, which allows
- * us to specify the variable type, name, initial value, and description.
- */
-PARM_INT(debug, 0x00, 0644, "Driver debugging level");
-PARM_INT(rawreadok, 1, 0644, "Bypass flip buffers on input");
-PARM_INT(trcbuf_size, 0x100000, 0644, "Debugging trace buffer size.");
-
/**************************************************************************
*
* protos for this file
@@ -207,8 +197,6 @@ static int __init dgnc_init_module(void)
{
int rc = 0;
- APR(("%s, Digi International Part Number %s\n", DG_NAME, DG_PART));
-
/*
* Initialize global stuff
*/
@@ -254,8 +242,6 @@ static int dgnc_start(void)
/* make sure that the globals are init'd before we do anything else */
dgnc_init_globals();
- APR(("For the tools package or updated drivers please visit http://www.digi.com\n"));
-
/*
* Register our base character device into the kernel.
* This allows the download daemon to connect to the downld device
@@ -265,7 +251,7 @@ static int dgnc_start(void)
*/
rc = register_chrdev(0, "dgnc", &dgnc_BoardFops);
if (rc <= 0) {
- APR(("Can't register dgnc driver device (%d)\n", rc));
+ pr_err(DRVSTR ": Can't register dgnc driver device (%d)\n", rc);
return -ENXIO;
}
dgnc_Major = rc;
@@ -281,7 +267,7 @@ static int dgnc_start(void)
rc = dgnc_tty_preinit();
if (rc < 0) {
- APR(("tty preinit - not enough memory (%d)\n", rc));
+ pr_err(DRVSTR ": tty preinit - not enough memory (%d)\n", rc);
return rc;
}
@@ -468,7 +454,8 @@ static int dgnc_found_board(struct pci_dev *pdev, int id)
brd->membase = pci_resource_start(pdev, 4);
if (!brd->membase) {
- APR(("card has no PCI IO resources, failing board.\n"));
+ dev_err(&brd->pdev->dev,
+ "Card has no PCI IO resources, failing.\n");
return -ENODEV;
}
@@ -555,7 +542,8 @@ static int dgnc_found_board(struct pci_dev *pdev, int id)
break;
default:
- APR(("Did not find any compatible Neo or Classic PCI boards in system.\n"));
+ dev_err(&brd->pdev->dev,
+ "Didn't find any compatible Neo/Classic PCI boards.\n");
return -ENXIO;
}
@@ -567,7 +555,7 @@ static int dgnc_found_board(struct pci_dev *pdev, int id)
rc = dgnc_tty_register(brd);
if (rc < 0) {
dgnc_tty_uninit(brd);
- APR(("Can't register tty devices (%d)\n", rc));
+ pr_err(DRVSTR ": Can't register tty devices (%d)\n", rc);
brd->state = BOARD_FAILED;
brd->dpastatus = BD_NOFEP;
goto failed;
@@ -575,7 +563,7 @@ static int dgnc_found_board(struct pci_dev *pdev, int id)
rc = dgnc_finalize_board_init(brd);
if (rc < 0) {
- APR(("Can't finalize board init (%d)\n", rc));
+ pr_err(DRVSTR ": Can't finalize board init (%d)\n", rc);
brd->state = BOARD_FAILED;
brd->dpastatus = BD_NOFEP;
@@ -585,7 +573,7 @@ static int dgnc_found_board(struct pci_dev *pdev, int id)
rc = dgnc_tty_init(brd);
if (rc < 0) {
dgnc_tty_uninit(brd);
- APR(("Can't init tty devices (%d)\n", rc));
+ pr_err(DRVSTR ": Can't init tty devices (%d)\n", rc);
brd->state = BOARD_FAILED;
brd->dpastatus = BD_NOFEP;
@@ -744,9 +732,6 @@ static void dgnc_init_globals(void)
{
int i = 0;
- dgnc_rawreadok = rawreadok;
- dgnc_trcbuf_size = trcbuf_size;
- dgnc_debug = debug;
dgnc_NumBoards = 0;
for (i = 0; i < MAXBOARDS; i++)
diff --git a/drivers/staging/dgnc/dgnc_driver.h b/drivers/staging/dgnc/dgnc_driver.h
index f901957c757..a8157eba28d 100644
--- a/drivers/staging/dgnc/dgnc_driver.h
+++ b/drivers/staging/dgnc/dgnc_driver.h
@@ -42,53 +42,13 @@
*
*************************************************************************/
-/*
- * Driver identification, error and debugging statments
- *
- * In theory, you can change all occurrences of "digi" in the next
- * three lines, and the driver printk's will all automagically change.
- *
- * APR((fmt, args, ...)); Always prints message
- */
+/* Driver identification and error statments */
#define PROCSTR "dgnc" /* /proc entries */
#define DEVSTR "/dev/dg/dgnc" /* /dev entries */
-#define DRVSTR "dgnc" /* Driver name string
- * displayed by APR */
-#define APR(args) do { printk(DRVSTR": "); printk args; \
- } while (0)
-#define RAPR(args) do { printk args; } while (0)
+#define DRVSTR "dgnc" /* Driver name string */
#define TRC_TO_CONSOLE 1
-/*
- * Debugging levels can be set using debug insmod variable
- * They can also be compiled out completely.
- */
-
-#define DBG_INIT (dgnc_debug & 0x01)
-#define DBG_BASIC (dgnc_debug & 0x02)
-#define DBG_CORE (dgnc_debug & 0x04)
-
-#define DBG_OPEN (dgnc_debug & 0x08)
-#define DBG_CLOSE (dgnc_debug & 0x10)
-#define DBG_READ (dgnc_debug & 0x20)
-#define DBG_WRITE (dgnc_debug & 0x40)
-
-#define DBG_IOCTL (dgnc_debug & 0x80)
-
-#define DBG_PROC (dgnc_debug & 0x100)
-#define DBG_PARAM (dgnc_debug & 0x200)
-#define DBG_PSCAN (dgnc_debug & 0x400)
-#define DBG_EVENT (dgnc_debug & 0x800)
-
-#define DBG_DRAIN (dgnc_debug & 0x1000)
-#define DBG_MSIGS (dgnc_debug & 0x2000)
-
-#define DBG_MGMT (dgnc_debug & 0x4000)
-#define DBG_INTR (dgnc_debug & 0x8000)
-
-#define DBG_CARR (dgnc_debug & 0x10000)
-
/* Number of boards we support at once. */
#define MAXBOARDS 20
#define MAXPORTS 8
@@ -134,8 +94,6 @@
#define _POSIX_VDISABLE '\0'
#endif
-#define SNIFF_MAX 65536 /* Sniff buffer size (2^n) */
-#define SNIFF_MASK (SNIFF_MAX - 1) /* Sniff wrap mask */
/*
* All the possible states the driver can be while being loaded.
@@ -342,13 +300,6 @@ struct un_t {
#define CH_FORCED_STOP 0x20000 /* Output is forcibly stopped */
#define CH_FORCED_STOPI 0x40000 /* Input is forcibly stopped */
-/*
- * Definitions for ch_sniff_flags
- */
-#define SNIFF_OPEN 0x1
-#define SNIFF_WAIT_DATA 0x2
-#define SNIFF_WAIT_SPACE 0x4
-
/* Our Read/Error/Write queue sizes */
#define RQUEUEMASK 0x1FFF /* 8 K - 1 */
@@ -442,21 +393,13 @@ struct channel_t {
struct proc_dir_entry *proc_entry_pointer;
struct dgnc_proc_entry *dgnc_channel_table;
- uint ch_sniff_in;
- uint ch_sniff_out;
- char *ch_sniff_buf; /* Sniff buffer for proc */
- ulong ch_sniff_flags; /* Channel flags */
- wait_queue_head_t ch_sniff_wait;
};
/*
* Our Global Variables.
*/
extern uint dgnc_Major; /* Our driver/mgmt major */
-extern int dgnc_debug; /* Debug variable */
-extern int dgnc_rawreadok; /* Set if user wants rawreads */
extern int dgnc_poll_tick; /* Poll interval - 20 ms */
-extern int dgnc_trcbuf_size; /* Size of the ringbuffer */
extern spinlock_t dgnc_global_lock; /* Driver global spinlock */
extern uint dgnc_NumBoards; /* Total number of boards */
extern struct dgnc_board *dgnc_Board[MAXBOARDS]; /* Array of board structs */
diff --git a/drivers/staging/dgnc/dgnc_kcompat.h b/drivers/staging/dgnc/dgnc_kcompat.h
index eaec7e6a28e..566cad0d33e 100644
--- a/drivers/staging/dgnc/dgnc_kcompat.h
+++ b/drivers/staging/dgnc/dgnc_kcompat.h
@@ -43,22 +43,4 @@
# endif
-# define PARM_STR(VAR, INIT, PERM, DESC) \
- static char *VAR = INIT; \
- char *dgnc_##VAR; \
- module_param(VAR, charp, PERM); \
- MODULE_PARM_DESC(VAR, DESC);
-
-# define PARM_INT(VAR, INIT, PERM, DESC) \
- static int VAR = INIT; \
- int dgnc_##VAR; \
- module_param(VAR, int, PERM); \
- MODULE_PARM_DESC(VAR, DESC);
-
-# define PARM_ULONG(VAR, INIT, PERM, DESC) \
- static ulong VAR = INIT; \
- ulong dgnc_##VAR; \
- module_param(VAR, long, PERM); \
- MODULE_PARM_DESC(VAR, DESC);
-
#endif /* ! __DGNC_KCOMPAT_H */
diff --git a/drivers/staging/dgnc/dgnc_neo.c b/drivers/staging/dgnc/dgnc_neo.c
index a5bd08fef27..c9a8a9825cf 100644
--- a/drivers/staging/dgnc/dgnc_neo.c
+++ b/drivers/staging/dgnc/dgnc_neo.c
@@ -530,10 +530,11 @@ static inline void neo_parse_lsr(struct dgnc_board *brd, uint port)
int linestatus;
unsigned long flags;
- if (!brd)
- return;
-
- if (brd->magic != DGNC_BOARD_MAGIC)
+ /*
+ * Check to make sure it didn't receive interrupt with a null board
+ * associated or a board pointer that wasn't ours.
+ */
+ if (!brd || brd->magic != DGNC_BOARD_MAGIC)
return;
if (port > brd->maxports)
@@ -869,10 +870,8 @@ static void neo_tasklet(unsigned long data)
int state = 0;
int ports = 0;
- if (!bd || bd->magic != DGNC_BOARD_MAGIC) {
- APR(("poll_tasklet() - NULL or bad bd.\n"));
+ if (!bd || bd->magic != DGNC_BOARD_MAGIC)
return;
- }
/* Cache a couple board values */
spin_lock_irqsave(&bd->bd_lock, flags);
@@ -945,7 +944,7 @@ static void neo_tasklet(unsigned long data)
*/
static irqreturn_t neo_intr(int irq, void *voidbrd)
{
- struct dgnc_board *brd = (struct dgnc_board *) voidbrd;
+ struct dgnc_board *brd = voidbrd;
struct channel_t *ch;
int port = 0;
int type = 0;
@@ -955,18 +954,12 @@ static irqreturn_t neo_intr(int irq, void *voidbrd)
unsigned long flags;
unsigned long flags2;
- if (!brd) {
- APR(("Received interrupt (%d) with null board associated\n", irq));
- return IRQ_NONE;
- }
-
/*
- * Check to make sure its for us.
+ * Check to make sure it didn't receive interrupt with a null board
+ * associated or a board pointer that wasn't ours.
*/
- if (brd->magic != DGNC_BOARD_MAGIC) {
- APR(("Received interrupt (%d) with a board pointer that wasn't ours!\n", irq));
+ if (!brd || brd->magic != DGNC_BOARD_MAGIC)
return IRQ_NONE;
- }
brd->intr_count++;
@@ -1224,7 +1217,6 @@ static void neo_copy_data_from_uart_to_queue(struct channel_t *ch)
/* Copy data from uart to the queue */
memcpy_fromio(ch->ch_rqueue + head, &ch->ch_neo_uart->txrxburst, n);
- dgnc_sniff_nowait_nolock(ch, "UART READ", ch->ch_rqueue + head, n);
/*
* Since RX_FIFO_DATA_ERROR was 0, we are guarenteed
@@ -1310,7 +1302,6 @@ static void neo_copy_data_from_uart_to_queue(struct channel_t *ch)
memcpy_fromio(ch->ch_rqueue + head, &ch->ch_neo_uart->txrxburst, 1);
ch->ch_equeue[head] = (unsigned char) linestatus;
- dgnc_sniff_nowait_nolock(ch, "UART READ", ch->ch_rqueue + head, 1);
/* Ditch any remaining linestatus value. */
linestatus = 0;
@@ -1563,7 +1554,6 @@ static void neo_copy_data_from_queue_to_uart(struct channel_t *ch)
}
memcpy_toio(&ch->ch_neo_uart->txrxburst, ch->ch_wqueue + tail, s);
- dgnc_sniff_nowait_nolock(ch, "UART WRITE", ch->ch_wqueue + tail, s);
/* Add and flip queue if needed */
tail = (tail + s) & WQUEUEMASK;
diff --git a/drivers/staging/dgnc/dgnc_sysfs.c b/drivers/staging/dgnc/dgnc_sysfs.c
index 6c3b387622e..2fd34ca70c5 100644
--- a/drivers/staging/dgnc/dgnc_sysfs.c
+++ b/drivers/staging/dgnc/dgnc_sysfs.c
@@ -63,39 +63,6 @@ static ssize_t dgnc_driver_maxboards_show(struct device_driver *ddp, char *buf)
}
static DRIVER_ATTR(maxboards, S_IRUSR, dgnc_driver_maxboards_show, NULL);
-static ssize_t dgnc_driver_debug_show(struct device_driver *ddp, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "0x%x\n", dgnc_debug);
-}
-
-static ssize_t dgnc_driver_debug_store(struct device_driver *ddp, const char *buf, size_t count)
-{
- int ret;
-
- ret = sscanf(buf, "0x%x\n", &dgnc_debug);
- if (ret != 1)
- return -EINVAL;
- return count;
-}
-static DRIVER_ATTR(debug, (S_IRUSR | S_IWUSR), dgnc_driver_debug_show, dgnc_driver_debug_store);
-
-
-static ssize_t dgnc_driver_rawreadok_show(struct device_driver *ddp, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "0x%x\n", dgnc_rawreadok);
-}
-
-static ssize_t dgnc_driver_rawreadok_store(struct device_driver *ddp, const char *buf, size_t count)
-{
- int ret;
-
- ret = sscanf(buf, "0x%x\n", &dgnc_rawreadok);
- if (ret != 1)
- return -EINVAL;
- return count;
-}
-static DRIVER_ATTR(rawreadok, (S_IRUSR | S_IWUSR), dgnc_driver_rawreadok_show, dgnc_driver_rawreadok_store);
-
static ssize_t dgnc_driver_pollrate_show(struct device_driver *ddp, char *buf)
{
@@ -122,8 +89,6 @@ void dgnc_create_driver_sysfiles(struct pci_driver *dgnc_driver)
rc |= driver_create_file(driverfs, &driver_attr_version);
rc |= driver_create_file(driverfs, &driver_attr_boards);
rc |= driver_create_file(driverfs, &driver_attr_maxboards);
- rc |= driver_create_file(driverfs, &driver_attr_debug);
- rc |= driver_create_file(driverfs, &driver_attr_rawreadok);
rc |= driver_create_file(driverfs, &driver_attr_pollrate);
if (rc)
printk(KERN_ERR "DGNC: sysfs driver_create_file failed!\n");
@@ -137,8 +102,6 @@ void dgnc_remove_driver_sysfiles(struct pci_driver *dgnc_driver)
driver_remove_file(driverfs, &driver_attr_version);
driver_remove_file(driverfs, &driver_attr_boards);
driver_remove_file(driverfs, &driver_attr_maxboards);
- driver_remove_file(driverfs, &driver_attr_debug);
- driver_remove_file(driverfs, &driver_attr_rawreadok);
driver_remove_file(driverfs, &driver_attr_pollrate);
}
diff --git a/drivers/staging/dgnc/dgnc_tty.c b/drivers/staging/dgnc/dgnc_tty.c
index 03c15069731..f81a375f8bc 100644
--- a/drivers/staging/dgnc/dgnc_tty.c
+++ b/drivers/staging/dgnc/dgnc_tty.c
@@ -49,7 +49,6 @@
#include <linux/delay.h> /* For udelay */
#include <linux/uaccess.h> /* For copy_from_user/copy_to_user */
#include <linux/pci.h>
-
#include "dgnc_driver.h"
#include "dgnc_tty.h"
#include "dgnc_types.h"
@@ -233,7 +232,8 @@ int dgnc_tty_register(struct dgnc_board *brd)
/* Register tty devices */
rc = tty_register_driver(&brd->SerialDriver);
if (rc < 0) {
- APR(("Can't register tty device (%d)\n", rc));
+ dev_dbg(&brd->pdev->dev,
+ "Can't register tty device (%d)\n", rc);
return rc;
}
brd->dgnc_Major_Serial_Registered = TRUE;
@@ -281,7 +281,9 @@ int dgnc_tty_register(struct dgnc_board *brd)
/* Register Transparent Print devices */
rc = tty_register_driver(&brd->PrintDriver);
if (rc < 0) {
- APR(("Can't register Transparent Print device (%d)\n", rc));
+ dev_dbg(&brd->pdev->dev,
+ "Can't register Transparent Print device(%d)\n",
+ rc);
return rc;
}
brd->dgnc_Major_TransparentPrint_Registered = TRUE;
@@ -371,7 +373,6 @@ int dgnc_tty_init(struct dgnc_board *brd)
init_waitqueue_head(&ch->ch_flags_wait);
init_waitqueue_head(&ch->ch_tun.un_flags_wait);
init_waitqueue_head(&ch->ch_pun.un_flags_wait);
- init_waitqueue_head(&ch->ch_sniff_wait);
{
struct device *classp;
@@ -446,127 +447,6 @@ void dgnc_tty_uninit(struct dgnc_board *brd)
#define TMPBUFLEN (1024)
-/*
- * dgnc_sniff - Dump data out to the "sniff" buffer if the
- * proc sniff file is opened...
- */
-void dgnc_sniff_nowait_nolock(struct channel_t *ch, unsigned char *text, unsigned char *buf, int len)
-{
- struct timeval tv;
- int n;
- int r;
- int nbuf;
- int i;
- int tmpbuflen;
- char *tmpbuf;
- char *p;
- int too_much_data;
-
- tmpbuf = kzalloc(TMPBUFLEN, GFP_ATOMIC);
- if (!tmpbuf)
- return;
- p = tmpbuf;
-
- /* Leave if sniff not open */
- if (!(ch->ch_sniff_flags & SNIFF_OPEN))
- goto exit;
-
- do_gettimeofday(&tv);
-
- /* Create our header for data dump */
- p += sprintf(p, "<%ld %ld><%s><", tv.tv_sec, tv.tv_usec, text);
- tmpbuflen = p - tmpbuf;
-
- do {
- too_much_data = 0;
-
- for (i = 0; i < len && tmpbuflen < (TMPBUFLEN - 4); i++) {
- p += sprintf(p, "%02x ", *buf);
- buf++;
- tmpbuflen = p - tmpbuf;
- }
-
- if (tmpbuflen < (TMPBUFLEN - 4)) {
- if (i > 0)
- p += sprintf(p - 1, "%s\n", ">");
- else
- p += sprintf(p, "%s\n", ">");
- } else {
- too_much_data = 1;
- len -= i;
- }
-
- nbuf = strlen(tmpbuf);
- p = tmpbuf;
-
- /*
- * Loop while data remains.
- */
- while (nbuf > 0 && ch->ch_sniff_buf) {
- /*
- * Determine the amount of available space left in the
- * buffer. If there's none, wait until some appears.
- */
- n = (ch->ch_sniff_out - ch->ch_sniff_in - 1) & SNIFF_MASK;
-
- /*
- * If there is no space left to write to in our sniff buffer,
- * we have no choice but to drop the data.
- * We *cannot* sleep here waiting for space, because this
- * function was probably called by the interrupt/timer routines!
- */
- if (n == 0)
- goto exit;
-
- /*
- * Copy as much data as will fit.
- */
-
- if (n > nbuf)
- n = nbuf;
-
- r = SNIFF_MAX - ch->ch_sniff_in;
-
- if (r <= n) {
- memcpy(ch->ch_sniff_buf + ch->ch_sniff_in, p, r);
-
- n -= r;
- ch->ch_sniff_in = 0;
- p += r;
- nbuf -= r;
- }
-
- memcpy(ch->ch_sniff_buf + ch->ch_sniff_in, p, n);
-
- ch->ch_sniff_in += n;
- p += n;
- nbuf -= n;
-
- /*
- * Wakeup any thread waiting for data
- */
- if (ch->ch_sniff_flags & SNIFF_WAIT_DATA) {
- ch->ch_sniff_flags &= ~SNIFF_WAIT_DATA;
- wake_up_interruptible(&ch->ch_sniff_wait);
- }
- }
-
- /*
- * If the user sent us too much data to push into our tmpbuf,
- * we need to keep looping around on all the data.
- */
- if (too_much_data) {
- p = tmpbuf;
- tmpbuflen = 0;
- }
-
- } while (too_much_data);
-
-exit:
- kfree(tmpbuf);
-}
-
-
/*=======================================================================
*
* dgnc_wmove - Write data to transmit queue.
@@ -781,8 +661,6 @@ void dgnc_input(struct channel_t *ch)
tty_insert_flip_string(tp->port, ch->ch_rqueue + tail, s);
}
- dgnc_sniff_nowait_nolock(ch, "USER READ", ch->ch_rqueue + tail, s);
-
tail += s;
n -= s;
/* Flip queue if needed */
@@ -1546,14 +1424,18 @@ static void dgnc_tty_close(struct tty_struct *tty, struct file *file)
* one, we've got real problems, since it means the
* serial port won't be shutdown.
*/
- APR(("tty->count is 1, un open count is %d\n", un->un_open_count));
+ dev_dbg(tty->dev,
+ "tty->count is 1, un open count is %d\n",
+ un->un_open_count);
un->un_open_count = 1;
}
if (un->un_open_count)
un->un_open_count--;
else
- APR(("bad serial port open count of %d\n", un->un_open_count));
+ dev_dbg(tty->dev,
+ "bad serial port open count of %d\n",
+ un->un_open_count);
ch->ch_open_count--;
@@ -1974,7 +1856,6 @@ static int dgnc_tty_write(struct tty_struct *tty,
if (n >= remain) {
n -= remain;
memcpy(ch->ch_wqueue + head, buf, remain);
- dgnc_sniff_nowait_nolock(ch, "USER WRITE", ch->ch_wqueue + head, remain);
head = 0;
buf += remain;
}
@@ -1985,7 +1866,6 @@ static int dgnc_tty_write(struct tty_struct *tty,
*/
remain = n;
memcpy(ch->ch_wqueue + head, buf, remain);
- dgnc_sniff_nowait_nolock(ch, "USER WRITE", ch->ch_wqueue + head, remain);
head += remain;
}
@@ -2325,8 +2205,6 @@ static int dgnc_set_modem_info(struct tty_struct *tty, unsigned int command, uns
if (!bd || bd->magic != DGNC_BOARD_MAGIC)
return ret;
- ret = 0;
-
ret = get_user(arg, value);
if (ret)
return ret;
@@ -3089,7 +2967,7 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
struct digi_getcounter buf;
buf.norun = ch->ch_err_overrun;
- buf.noflow = 0; /* The driver doesn't keep this stat */
+ buf.noflow = 0; /* The driver doesn't keep this stat */
buf.nframe = ch->ch_err_frame;
buf.nparity = ch->ch_err_parity;
buf.nbreak = ch->ch_err_break;
diff --git a/drivers/staging/dgnc/dgnc_tty.h b/drivers/staging/dgnc/dgnc_tty.h
index 58eef257c2e..3975f040714 100644
--- a/drivers/staging/dgnc/dgnc_tty.h
+++ b/drivers/staging/dgnc/dgnc_tty.h
@@ -37,6 +37,4 @@ void dgnc_carrier(struct channel_t *ch);
void dgnc_wakeup_writes(struct channel_t *ch);
void dgnc_check_queue_flow_control(struct channel_t *ch);
-void dgnc_sniff_nowait_nolock(struct channel_t *ch, unsigned char *text, unsigned char *buf, int nbuf);
-
#endif
diff --git a/drivers/staging/emxx_udc/emxx_udc.c b/drivers/staging/emxx_udc/emxx_udc.c
index ed8d86c98f6..eb178fcb795 100644
--- a/drivers/staging/emxx_udc/emxx_udc.c
+++ b/drivers/staging/emxx_udc/emxx_udc.c
@@ -2622,7 +2622,7 @@ static int nbu2ss_ep_enable(
return -EINVAL;
}
- ep_type = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
+ ep_type = usb_endpoint_type(desc);
if ((ep_type == USB_ENDPOINT_XFER_CONTROL)
|| (ep_type == USB_ENDPOINT_XFER_ISOC)) {
@@ -2644,7 +2644,7 @@ static int nbu2ss_ep_enable(
spin_lock_irqsave(&udc->lock, flags);
ep->desc = desc;
- ep->epnum = desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
+ ep->epnum = usb_endpoint_num(desc);
ep->direct = desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK;
ep->ep_type = ep_type;
ep->wedged = 0;
@@ -2722,8 +2722,7 @@ static void nbu2ss_ep_free_request(
if (_req != NULL) {
req = container_of(_req, struct nbu2ss_req, req);
- if (req != NULL)
- kfree(req);
+ kfree(req);
}
}
diff --git a/drivers/staging/ft1000/ft1000-pcmcia/boot.h b/drivers/staging/ft1000/ft1000-pcmcia/boot.h
index 60c015c1c28..e4a69852852 100644
--- a/drivers/staging/ft1000/ft1000-pcmcia/boot.h
+++ b/drivers/staging/ft1000/ft1000-pcmcia/boot.h
@@ -1,28 +1,28 @@
/*---------------------------------------------------------------------------
- FT1000 driver for Flarion Flash OFDM NIC Device
+ FT1000 driver for Flarion Flash OFDM NIC Device
- Copyright (C) 2002 Flarion Technologies, All rights reserved.
+ Copyright (C) 2002 Flarion Technologies, All rights reserved.
- This program is free software; you can redistribute it and/or modify it
- under the terms of the GNU General Public License as published by the Free
- Software Foundation; either version 2 of the License, or (at your option) any
- later version. This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details. You should have received a copy of the GNU General Public
- License along with this program; if not, write to the
- Free Software Foundation, Inc., 59 Temple Place -
- Suite 330, Boston, MA 02111-1307, USA.
+ This program is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the Free
+ Software Foundation; either version 2 of the License, or (at your option) any
+ later version. This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details. You should have received a copy of the GNU General Public
+ License along with this program; if not, write to the
+ Free Software Foundation, Inc., 59 Temple Place -
+ Suite 330, Boston, MA 02111-1307, USA.
---------------------------------------------------------------------------
- File: boot.h
+ File: boot.h
- Description: boatloader
+ Description: boatloader
- History:
- 1/11/05 Whc Ported to Linux.
+ History:
+ 1/11/05 Whc Ported to Linux.
----------------------------------------------------------------------------*/
+ ---------------------------------------------------------------------------*/
#ifndef _BOOTH_
#define _BOOTH_
diff --git a/drivers/staging/ft1000/ft1000-pcmcia/ft1000.h b/drivers/staging/ft1000/ft1000-pcmcia/ft1000.h
index 1d52738fff4..5992670f774 100644
--- a/drivers/staging/ft1000/ft1000-pcmcia/ft1000.h
+++ b/drivers/staging/ft1000/ft1000-pcmcia/ft1000.h
@@ -1,21 +1,21 @@
/*---------------------------------------------------------------------------
- FT1000 driver for Flarion Flash OFDM NIC Device
+ FT1000 driver for Flarion Flash OFDM NIC Device
- Copyright (C) 2002 Flarion Technologies, All rights reserved.
+ Copyright (C) 2002 Flarion Technologies, All rights reserved.
- This program is free software; you can redistribute it and/or modify it
- under the terms of the GNU General Public License as published by the Free
- Software Foundation; either version 2 of the License, or (at your option) any
- later version. This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details. You should have received a copy of the GNU General Public
- License along with this program; if not, write to the
- Free Software Foundation, Inc., 59 Temple Place -
- Suite 330, Boston, MA 02111-1307, USA.
----------------------------------------------------------------------------
- Description: Common structures and defines
----------------------------------------------------------------------------*/
+ This program is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the Free
+ Software Foundation; either version 2 of the License, or (at your option) any
+ later version. This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details. You should have received a copy of the GNU General Public
+ License along with this program; if not, write to the
+ Free Software Foundation, Inc., 59 Temple Place -
+ Suite 330, Boston, MA 02111-1307, USA.
+ ---------------------------------------------------------------------------
+ Description: Common structures and defines
+ ---------------------------------------------------------------------------*/
#ifndef _FT1000H_
#define _FT1000H_
diff --git a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_cs.c b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_cs.c
index 1f8b3ca35c6..922478e1cb5 100644
--- a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_cs.c
+++ b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_cs.c
@@ -1,30 +1,30 @@
/*---------------------------------------------------------------------------
- FT1000 driver for Flarion Flash OFDM NIC Device
+ FT1000 driver for Flarion Flash OFDM NIC Device
- Copyright (C) 1999 David A. Hinds. All Rights Reserved.
- Copyright (C) 2002 Flarion Technologies, All rights reserved.
- Copyright (C) 2006 Patrik Ostrihon, All rights reserved.
- Copyright (C) 2006 ProWeb Consulting, a.s, All rights reserved.
+ Copyright (C) 1999 David A. Hinds. All Rights Reserved.
+ Copyright (C) 2002 Flarion Technologies, All rights reserved.
+ Copyright (C) 2006 Patrik Ostrihon, All rights reserved.
+ Copyright (C) 2006 ProWeb Consulting, a.s, All rights reserved.
- The initial developer of the original code is David A. Hinds
- <dahinds@users.sourceforge.net>. Portions created by David A. Hinds.
+ The initial developer of the original code is David A. Hinds
+ <dahinds@users.sourceforge.net>. Portions created by David A. Hinds.
- This file was modified to support the Flarion Flash OFDM NIC Device
- by Wai Chan (w.chan@flarion.com).
+ This file was modified to support the Flarion Flash OFDM NIC Device
+ by Wai Chan (w.chan@flarion.com).
- Port for kernel 2.6 created by Patrik Ostrihon (patrik.ostrihon@pwc.sk)
+ Port for kernel 2.6 created by Patrik Ostrihon (patrik.ostrihon@pwc.sk)
- This program is free software; you can redistribute it and/or modify it
- under the terms of the GNU General Public License as published by the Free
- Software Foundation; either version 2 of the License, or (at your option) any
- later version. This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details. You should have received a copy of the GNU General Public
- License along with this program; if not, write to the
- Free Software Foundation, Inc., 59 Temple Place -
- Suite 330, Boston, MA 02111-1307, USA.
------------------------------------------------------------------------------*/
+ This program is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the Free
+ Software Foundation; either version 2 of the License, or (at your option) any
+ later version. This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details. You should have received a copy of the GNU General Public
+ License along with this program; if not, write to the
+ Free Software Foundation, Inc., 59 Temple Place -
+ Suite 330, Boston, MA 02111-1307, USA.
+ -----------------------------------------------------------------------------*/
#include <linux/kernel.h>
#include <linux/module.h>
@@ -80,11 +80,11 @@ static int ft1000_confcheck(struct pcmcia_device *link, void *priv_data)
/*======================================================================
- ft1000_config() is scheduled to run after a CARD_INSERTION event
- is received, to configure the PCMCIA socket, and to make the
- device available to the system.
+ ft1000_config() is scheduled to run after a CARD_INSERTION event
+ is received, to configure the PCMCIA socket, and to make the
+ device available to the system.
-======================================================================*/
+ ======================================================================*/
static int ft1000_config(struct pcmcia_device *link)
{
diff --git a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_dnld.c b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_dnld.c
index c1856f7d1e2..06b0e9cfb9b 100644
--- a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_dnld.c
+++ b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_dnld.c
@@ -1,24 +1,26 @@
/*---------------------------------------------------------------------------
- FT1000 driver for Flarion Flash OFDM NIC Device
-
- Copyright (C) 2002 Flarion Technologies, All rights reserved.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms of the GNU General Public License as published by the Free
- Software Foundation; either version 2 of the License, or (at your option) any
- later version. This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details. You should have received a copy of the GNU General Public
- License along with this program; if not, write to the
- Free Software Foundation, Inc., 59 Temple Place -
- Suite 330, Boston, MA 02111-1307, USA.
+ FT1000 driver for Flarion Flash OFDM NIC Device
+
+ Copyright (C) 2002 Flarion Technologies, All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the Free
+ Software Foundation; either version 2 of the License, or (at your option) any
+ later version. This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details. You should have received a copy of the GNU General Public
+ License along with this program; if not, write to the
+ Free Software Foundation, Inc., 59 Temple Place -
+ Suite 330, Boston, MA 02111-1307, USA.
--------------------------------------------------------------------------
- Description: This module will handshake with the DSP bootloader to
- download the DSP runtime image.
+ Description: This module will handshake with the DSP bootloader to
+ download the DSP runtime image.
----------------------------------------------------------------------------*/
+ ---------------------------------------------------------------------------*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#define __KERNEL_SYSCALLS__
@@ -99,7 +101,7 @@ struct dsp_file_hdr {
u32 version_data_offset; /* Offset were scrambled version data begins. */
u32 version_data_size; /* Size, in words, of scrambled version data. */
u32 nDspImages; /* Number of DSP images in file. */
-} __attribute__ ((packed));
+} __packed;
struct dsp_image_info {
u32 coff_date; /* Date/time when DSP Coff image was built. */
@@ -110,11 +112,11 @@ struct dsp_image_info {
u32 version; /* Embedded version # of DSP code. */
unsigned short checksum; /* Dsp File checksum */
unsigned short pad1;
-} __attribute__ ((packed));
+} __packed;
void card_bootload(struct net_device *dev)
{
- struct ft1000_info *info = (struct ft1000_info *) netdev_priv(dev);
+ struct ft1000_info *info = (struct ft1000_info *)netdev_priv(dev);
unsigned long flags;
u32 *pdata;
u32 size;
@@ -123,7 +125,7 @@ void card_bootload(struct net_device *dev)
netdev_dbg(dev, "card_bootload is called\n");
- pdata = (u32 *) bootimage;
+ pdata = (u32 *)bootimage;
size = sizeof(bootimage);
/* check for odd word */
@@ -146,7 +148,7 @@ void card_bootload(struct net_device *dev)
u16 get_handshake(struct net_device *dev, u16 expected_value)
{
- struct ft1000_info *info = (struct ft1000_info *) netdev_priv(dev);
+ struct ft1000_info *info = (struct ft1000_info *)netdev_priv(dev);
u16 handshake;
u32 tempx;
int loopcnt;
@@ -161,12 +163,12 @@ u16 get_handshake(struct net_device *dev, u16 expected_value)
} else {
tempx =
ntohl(ft1000_read_dpram_mag_32
- (dev, DWNLD_MAG_HANDSHAKE_LOC));
- handshake = (u16) tempx;
+ (dev, DWNLD_MAG_HANDSHAKE_LOC));
+ handshake = (u16)tempx;
}
if ((handshake == expected_value)
- || (handshake == HANDSHAKE_RESET_VALUE)) {
+ || (handshake == HANDSHAKE_RESET_VALUE)) {
return handshake;
}
loopcnt++;
@@ -180,7 +182,7 @@ u16 get_handshake(struct net_device *dev, u16 expected_value)
void put_handshake(struct net_device *dev, u16 handshake_value)
{
- struct ft1000_info *info = (struct ft1000_info *) netdev_priv(dev);
+ struct ft1000_info *info = (struct ft1000_info *)netdev_priv(dev);
u32 tempx;
if (info->AsicID == ELECTRABUZZ_ID) {
@@ -188,7 +190,7 @@ void put_handshake(struct net_device *dev, u16 handshake_value)
DWNLD_HANDSHAKE_LOC);
ft1000_write_reg(dev, FT1000_REG_DPRAM_DATA, handshake_value); /* Handshake */
} else {
- tempx = (u32) handshake_value;
+ tempx = (u32)handshake_value;
tempx = ntohl(tempx);
ft1000_write_dpram_mag_32(dev, DWNLD_MAG_HANDSHAKE_LOC, tempx); /* Handshake */
}
@@ -196,7 +198,7 @@ void put_handshake(struct net_device *dev, u16 handshake_value)
u16 get_request_type(struct net_device *dev)
{
- struct ft1000_info *info = (struct ft1000_info *) netdev_priv(dev);
+ struct ft1000_info *info = (struct ft1000_info *)netdev_priv(dev);
u16 request_type;
u32 tempx;
@@ -206,7 +208,7 @@ u16 get_request_type(struct net_device *dev)
} else {
tempx = ft1000_read_dpram_mag_32(dev, DWNLD_MAG_TYPE_LOC);
tempx = ntohl(tempx);
- request_type = (u16) tempx;
+ request_type = (u16)tempx;
}
return request_type;
@@ -215,7 +217,7 @@ u16 get_request_type(struct net_device *dev)
long get_request_value(struct net_device *dev)
{
- struct ft1000_info *info = (struct ft1000_info *) netdev_priv(dev);
+ struct ft1000_info *info = (struct ft1000_info *)netdev_priv(dev);
long value;
u16 w_val;
@@ -244,7 +246,7 @@ long get_request_value(struct net_device *dev)
void put_request_value(struct net_device *dev, long lvalue)
{
- struct ft1000_info *info = (struct ft1000_info *) netdev_priv(dev);
+ struct ft1000_info *info = (struct ft1000_info *)netdev_priv(dev);
u16 size;
u32 tempx;
@@ -271,11 +273,11 @@ void put_request_value(struct net_device *dev, long lvalue)
u16 hdr_checksum(struct pseudo_hdr *pHdr)
{
- u16 *usPtr = (u16 *) pHdr;
+ u16 *usPtr = (u16 *)pHdr;
u16 chksum;
chksum = ((((((usPtr[0] ^ usPtr[1]) ^ usPtr[2]) ^ usPtr[3]) ^
- usPtr[4]) ^ usPtr[5]) ^ usPtr[6]);
+ usPtr[4]) ^ usPtr[5]) ^ usPtr[6]);
return chksum;
}
@@ -283,7 +285,7 @@ u16 hdr_checksum(struct pseudo_hdr *pHdr)
int card_download(struct net_device *dev, const u8 *pFileStart,
size_t FileLength)
{
- struct ft1000_info *info = (struct ft1000_info *) netdev_priv(dev);
+ struct ft1000_info *info = (struct ft1000_info *)netdev_priv(dev);
int Status = SUCCESS;
u32 uiState;
u16 handshake;
@@ -316,13 +318,13 @@ int card_download(struct net_device *dev, const u8 *pFileStart,
file_version = *(long *)pFileStart;
if (file_version != 6) {
- printk(KERN_ERR "ft1000: unsupported firmware version %ld\n", file_version);
+ pr_err("unsupported firmware version %ld\n", file_version);
Status = FAILURE;
}
uiState = STATE_START_DWNLD;
- pFileHdr5 = (struct dsp_file_hdr *) pFileStart;
+ pFileHdr5 = (struct dsp_file_hdr *)pFileStart;
pUsFile = (u16 *) ((long)pFileStart + pFileHdr5->loader_offset);
pUcFile = (u8 *) ((long)pFileStart + pFileHdr5->loader_offset);
@@ -376,7 +378,7 @@ int card_download(struct net_device *dev, const u8 *pFileStart,
break;
}
if ((word_length * 2 + (long)pUcFile) >
- (long)pBootEnd) {
+ (long)pBootEnd) {
/*
* Error, beyond boot code range.
*/
@@ -390,8 +392,8 @@ int card_download(struct net_device *dev, const u8 *pFileStart,
* Position ASIC DPRAM auto-increment pointer.
*/
outw(DWNLD_MAG_PS_HDR_LOC,
- dev->base_addr +
- FT1000_REG_DPRAM_ADDR);
+ dev->base_addr +
+ FT1000_REG_DPRAM_ADDR);
if (word_length & 0x01)
word_length++;
word_length = word_length / 2;
@@ -402,12 +404,12 @@ int card_download(struct net_device *dev, const u8 *pFileStart,
(*pUsFile++ << 16);
pUcFile += 4;
outl(templong,
- dev->base_addr +
- FT1000_REG_MAG_DPDATAL);
+ dev->base_addr +
+ FT1000_REG_MAG_DPDATAL);
}
spin_unlock_irqrestore(&info->
- dpram_lock,
- flags);
+ dpram_lock,
+ flags);
break;
default:
Status = FAILURE;
@@ -430,7 +432,7 @@ int card_download(struct net_device *dev, const u8 *pFileStart,
switch (request) {
case REQUEST_FILE_CHECKSUM:
netdev_dbg(dev,
- "ft1000_dnld: REQUEST_FOR_CHECKSUM\n");
+ "ft1000_dnld: REQUEST_FOR_CHECKSUM\n");
put_request_value(dev, image_chksum);
break;
case REQUEST_RUN_ADDRESS:
@@ -468,7 +470,7 @@ int card_download(struct net_device *dev, const u8 *pFileStart,
break;
}
if ((word_length * 2 + (long)pUcFile) >
- (long)pCodeEnd) {
+ (long)pCodeEnd) {
/*
* Error, beyond boot code range.
*/
@@ -479,8 +481,8 @@ int card_download(struct net_device *dev, const u8 *pFileStart,
* Position ASIC DPRAM auto-increment pointer.
*/
outw(DWNLD_MAG_PS_HDR_LOC,
- dev->base_addr +
- FT1000_REG_DPRAM_ADDR);
+ dev->base_addr +
+ FT1000_REG_DPRAM_ADDR);
if (word_length & 0x01)
word_length++;
word_length = word_length / 2;
@@ -491,8 +493,8 @@ int card_download(struct net_device *dev, const u8 *pFileStart,
(*pUsFile++ << 16);
pUcFile += 4;
outl(templong,
- dev->base_addr +
- FT1000_REG_MAG_DPDATAL);
+ dev->base_addr +
+ FT1000_REG_MAG_DPDATAL);
}
break;
@@ -502,9 +504,9 @@ int card_download(struct net_device *dev, const u8 *pFileStart,
(long)(info->DSPInfoBlklen + 1) / 2;
put_request_value(dev, word_length);
pMailBoxData =
- (struct drv_msg *) &info->DSPInfoBlk[0];
+ (struct drv_msg *)&info->DSPInfoBlk[0];
pUsData =
- (u16 *) &pMailBoxData->data[0];
+ (u16 *)&pMailBoxData->data[0];
/* Provide mutual exclusive access while reading ASIC registers. */
spin_lock_irqsave(&info->dpram_lock,
flags);
@@ -528,8 +530,8 @@ int card_download(struct net_device *dev, const u8 *pFileStart,
* Position ASIC DPRAM auto-increment pointer.
*/
outw(DWNLD_MAG_PS_HDR_LOC,
- dev->base_addr +
- FT1000_REG_DPRAM_ADDR);
+ dev->base_addr +
+ FT1000_REG_DPRAM_ADDR);
if (word_length & 0x01)
word_length++;
@@ -540,13 +542,13 @@ int card_download(struct net_device *dev, const u8 *pFileStart,
templong |=
(*pUsData++ << 16);
outl(templong,
- dev->base_addr +
- FT1000_REG_MAG_DPDATAL);
+ dev->base_addr +
+ FT1000_REG_MAG_DPDATAL);
}
}
spin_unlock_irqrestore(&info->
- dpram_lock,
- flags);
+ dpram_lock,
+ flags);
break;
case REQUEST_VERSION_INFO:
@@ -555,8 +557,8 @@ int card_download(struct net_device *dev, const u8 *pFileStart,
put_request_value(dev, word_length);
pUsFile =
(u16 *) ((long)pFileStart +
- pFileHdr5->
- version_data_offset);
+ pFileHdr5->
+ version_data_offset);
/* Provide mutual exclusive access while reading ASIC registers. */
spin_lock_irqsave(&info->dpram_lock,
flags);
@@ -564,8 +566,8 @@ int card_download(struct net_device *dev, const u8 *pFileStart,
* Position ASIC DPRAM auto-increment pointer.
*/
outw(DWNLD_MAG_PS_HDR_LOC,
- dev->base_addr +
- FT1000_REG_DPRAM_ADDR);
+ dev->base_addr +
+ FT1000_REG_DPRAM_ADDR);
if (word_length & 0x01)
word_length++;
word_length = word_length / 2;
@@ -578,12 +580,12 @@ int card_download(struct net_device *dev, const u8 *pFileStart,
templong |=
(temp << 16);
outl(templong,
- dev->base_addr +
- FT1000_REG_MAG_DPDATAL);
+ dev->base_addr +
+ FT1000_REG_MAG_DPDATAL);
}
spin_unlock_irqrestore(&info->
- dpram_lock,
- flags);
+ dpram_lock,
+ flags);
break;
case REQUEST_CODE_BY_VERSION:
@@ -592,14 +594,14 @@ int card_download(struct net_device *dev, const u8 *pFileStart,
get_request_value(dev);
pDspImageInfoV6 =
(struct dsp_image_info *) ((long)
- pFileStart
- +
- sizeof
- (struct dsp_file_hdr));
+ pFileStart
+ +
+ sizeof
+ (struct dsp_file_hdr));
for (imageN = 0;
- imageN <
- pFileHdr5->nDspImages;
- imageN++) {
+ imageN <
+ pFileHdr5->nDspImages;
+ imageN++) {
temp = (u16)
(pDspImageInfoV6->
version);
@@ -610,30 +612,30 @@ int card_download(struct net_device *dev, const u8 *pFileStart,
templong |=
(temp << 16);
if (templong ==
- requested_version) {
+ requested_version) {
bGoodVersion =
true;
pUsFile =
(u16
*) ((long)
- pFileStart
- +
- pDspImageInfoV6->
- begin_offset);
+ pFileStart
+ +
+ pDspImageInfoV6->
+ begin_offset);
pUcFile =
(u8
*) ((long)
- pFileStart
- +
- pDspImageInfoV6->
- begin_offset);
+ pFileStart
+ +
+ pDspImageInfoV6->
+ begin_offset);
pCodeEnd =
(u8
*) ((long)
- pFileStart
- +
- pDspImageInfoV6->
- end_offset);
+ pFileStart
+ +
+ pDspImageInfoV6->
+ end_offset);
run_address =
pDspImageInfoV6->
run_address;
@@ -645,10 +647,10 @@ int card_download(struct net_device *dev, const u8 *pFileStart,
pDspImageInfoV6->
checksum;
netdev_dbg(dev,
- "ft1000_dnld: image_chksum = 0x%8x\n",
- (unsigned
- int)
- image_chksum);
+ "ft1000_dnld: image_chksum = 0x%8x\n",
+ (unsigned
+ int)
+ image_chksum);
break;
}
pDspImageInfoV6++;
@@ -674,25 +676,25 @@ int card_download(struct net_device *dev, const u8 *pFileStart,
break;
case STATE_DONE_DWNLD:
- if (((unsigned long) (pUcFile) - (unsigned long) pFileStart) >=
- (unsigned long) FileLength) {
+ if (((unsigned long)(pUcFile) - (unsigned long) pFileStart) >=
+ (unsigned long)FileLength) {
uiState = STATE_DONE_FILE;
break;
}
- pHdr = (struct pseudo_hdr *) pUsFile;
+ pHdr = (struct pseudo_hdr *)pUsFile;
if (pHdr->portdest == 0x80 /* DspOAM */
- && (pHdr->portsrc == 0x00 /* Driver */
+ && (pHdr->portsrc == 0x00 /* Driver */
|| pHdr->portsrc == 0x10 /* FMM */)) {
uiState = STATE_SECTION_PROV;
} else {
netdev_dbg(dev,
- "FT1000:download:Download error: Bad Port IDs in Pseudo Record\n");
+ "Download error: Bad Port IDs in Pseudo Record\n");
netdev_dbg(dev, "\t Port Source = 0x%2.2x\n",
- pHdr->portsrc);
+ pHdr->portsrc);
netdev_dbg(dev, "\t Port Destination = 0x%2.2x\n",
- pHdr->portdest);
+ pHdr->portdest);
Status = FAILURE;
}
@@ -700,7 +702,7 @@ int card_download(struct net_device *dev, const u8 *pFileStart,
case STATE_SECTION_PROV:
- pHdr = (struct pseudo_hdr *) pUcFile;
+ pHdr = (struct pseudo_hdr *)pUcFile;
if (pHdr->checksum == hdr_checksum(pHdr)) {
if (pHdr->portdest != 0x80 /* Dsp OAM */) {
@@ -715,8 +717,8 @@ int card_download(struct net_device *dev, const u8 *pFileStart,
GFP_ATOMIC);
if (pbuffer) {
memcpy(pbuffer, (void *)pUcFile,
- (u32) (usHdrLength +
- sizeof(struct pseudo_hdr)));
+ (u32) (usHdrLength +
+ sizeof(struct pseudo_hdr)));
/* link provisioning data */
pprov_record =
kmalloc(sizeof(struct prov_record),
@@ -725,15 +727,15 @@ int card_download(struct net_device *dev, const u8 *pFileStart,
pprov_record->pprov_data =
pbuffer;
list_add_tail(&pprov_record->
- list,
- &info->prov_list);
+ list,
+ &info->prov_list);
/* Move to next entry if available */
pUcFile =
- (u8 *) ((unsigned long) pUcFile +
- (unsigned long) ((usHdrLength + 1) & 0xFFFFFFFE) + sizeof(struct pseudo_hdr));
+ (u8 *)((unsigned long) pUcFile +
+ (unsigned long) ((usHdrLength + 1) & 0xFFFFFFFE) + sizeof(struct pseudo_hdr));
if ((unsigned long) (pUcFile) -
- (unsigned long) (pFileStart) >=
- (unsigned long) FileLength) {
+ (unsigned long) (pFileStart) >=
+ (unsigned long)FileLength) {
uiState =
STATE_DONE_FILE;
}
diff --git a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c
index 44575c78cf0..d5475b7270a 100644
--- a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c
+++ b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c
@@ -1,22 +1,24 @@
/*---------------------------------------------------------------------------
- FT1000 driver for Flarion Flash OFDM NIC Device
-
- Copyright (C) 2002 Flarion Technologies, All rights reserved.
- Copyright (C) 2006 Patrik Ostrihon, All rights reserved.
- Copyright (C) 2006 ProWeb Consulting, a.s, All rights reserved.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms of the GNU General Public License as published by the Free
- Software Foundation; either version 2 of the License, or (at your option) any
- later version. This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details. You should have received a copy of the GNU General Public
- License along with this program; if not, write to the
- Free Software Foundation, Inc., 59 Temple Place -
- Suite 330, Boston, MA 02111-1307, USA.
+ FT1000 driver for Flarion Flash OFDM NIC Device
+
+ Copyright (C) 2002 Flarion Technologies, All rights reserved.
+ Copyright (C) 2006 Patrik Ostrihon, All rights reserved.
+ Copyright (C) 2006 ProWeb Consulting, a.s, All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the Free
+ Software Foundation; either version 2 of the License, or (at your option) any
+ later version. This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details. You should have received a copy of the GNU General Public
+ License along with this program; if not, write to the
+ Free Software Foundation, Inc., 59 Temple Place -
+ Suite 330, Boston, MA 02111-1307, USA.
-------------------------------------------------------------------------*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched.h>
@@ -44,12 +46,6 @@
#include <pcmcia/cisreg.h>
#include <pcmcia/ds.h>
-#ifdef FT_DEBUG
-#define DEBUG(n, args...) printk(KERN_DEBUG args);
-#else
-#define DEBUG(n, args...)
-#endif
-
#include <linux/delay.h>
#include "ft1000.h"
@@ -57,7 +53,7 @@ static const struct firmware *fw_entry;
static void ft1000_hbchk(u_long data);
static struct timer_list poll_timer = {
- .function = ft1000_hbchk
+ .function = ft1000_hbchk
};
static u16 cmdbuffer[1024];
@@ -72,7 +68,7 @@ static void ft1000_disable_interrupts(struct net_device *dev);
/* new kernel */
MODULE_AUTHOR("");
MODULE_DESCRIPTION
- ("Support for Flarion Flash OFDM NIC Device. Support for PCMCIA when used with ft1000_cs.");
+("Support for Flarion Flash OFDM NIC Device. Support for PCMCIA when used with ft1000_cs.");
MODULE_LICENSE("GPL");
MODULE_SUPPORTED_DEVICE("FT1000");
@@ -80,15 +76,15 @@ MODULE_SUPPORTED_DEVICE("FT1000");
/*---------------------------------------------------------------------------
- Function: ft1000_read_fifo_len
- Description: This function will read the ASIC Uplink FIFO status register
- which will return the number of bytes remaining in the Uplink FIFO.
- Sixteen bytes are subtracted to make sure that the ASIC does not
- reach its threshold.
- Input:
- dev - network device structure
- Output:
- value - number of bytes available in the ASIC Uplink FIFO.
+ Function: ft1000_read_fifo_len
+ Description: This function will read the ASIC Uplink FIFO status register
+ which will return the number of bytes remaining in the Uplink FIFO.
+ Sixteen bytes are subtracted to make sure that the ASIC does not
+ reach its threshold.
+ Input:
+ dev - network device structure
+ Output:
+ value - number of bytes available in the ASIC Uplink FIFO.
-------------------------------------------------------------------------*/
static inline u16 ft1000_read_fifo_len(struct net_device *dev)
@@ -103,14 +99,14 @@ static inline u16 ft1000_read_fifo_len(struct net_device *dev)
/*---------------------------------------------------------------------------
- Function: ft1000_read_dpram
- Description: This function will read the specific area of dpram
- (Electrabuzz ASIC only)
- Input:
- dev - device structure
- offset - index of dpram
- Output:
- value - value of dpram
+ Function: ft1000_read_dpram
+ Description: This function will read the specific area of dpram
+ (Electrabuzz ASIC only)
+ Input:
+ dev - device structure
+ offset - index of dpram
+ Output:
+ value - value of dpram
-------------------------------------------------------------------------*/
u16 ft1000_read_dpram(struct net_device *dev, int offset)
@@ -130,19 +126,19 @@ u16 ft1000_read_dpram(struct net_device *dev, int offset)
/*---------------------------------------------------------------------------
- Function: ft1000_write_dpram
- Description: This function will write to a specific area of dpram
- (Electrabuzz ASIC only)
- Input:
- dev - device structure
- offset - index of dpram
- value - value to write
- Output:
- none.
+ Function: ft1000_write_dpram
+ Description: This function will write to a specific area of dpram
+ (Electrabuzz ASIC only)
+ Input:
+ dev - device structure
+ offset - index of dpram
+ value - value to write
+ Output:
+ none.
-------------------------------------------------------------------------*/
static inline void ft1000_write_dpram(struct net_device *dev,
- int offset, u16 value)
+ int offset, u16 value)
{
struct ft1000_info *info = netdev_priv(dev);
unsigned long flags;
@@ -156,14 +152,14 @@ static inline void ft1000_write_dpram(struct net_device *dev,
/*---------------------------------------------------------------------------
- Function: ft1000_read_dpram_mag_16
- Description: This function will read the specific area of dpram
- (Magnemite ASIC only)
- Input:
- dev - device structure
- offset - index of dpram
- Output:
- value - value of dpram
+ Function: ft1000_read_dpram_mag_16
+ Description: This function will read the specific area of dpram
+ (Magnemite ASIC only)
+ Input:
+ dev - device structure
+ offset - index of dpram
+ Output:
+ value - value of dpram
-------------------------------------------------------------------------*/
u16 ft1000_read_dpram_mag_16(struct net_device *dev, int offset, int Index)
@@ -188,19 +184,19 @@ u16 ft1000_read_dpram_mag_16(struct net_device *dev, int offset, int Index)
/*---------------------------------------------------------------------------
- Function: ft1000_write_dpram_mag_16
- Description: This function will write to a specific area of dpram
- (Magnemite ASIC only)
- Input:
- dev - device structure
- offset - index of dpram
- value - value to write
- Output:
- none.
+ Function: ft1000_write_dpram_mag_16
+ Description: This function will write to a specific area of dpram
+ (Magnemite ASIC only)
+ Input:
+ dev - device structure
+ offset - index of dpram
+ value - value to write
+ Output:
+ none.
-------------------------------------------------------------------------*/
static inline void ft1000_write_dpram_mag_16(struct net_device *dev,
- int offset, u16 value, int Index)
+ int offset, u16 value, int Index)
{
struct ft1000_info *info = netdev_priv(dev);
unsigned long flags;
@@ -218,14 +214,14 @@ static inline void ft1000_write_dpram_mag_16(struct net_device *dev,
/*---------------------------------------------------------------------------
- Function: ft1000_read_dpram_mag_32
- Description: This function will read the specific area of dpram
- (Magnemite ASIC only)
- Input:
- dev - device structure
- offset - index of dpram
- Output:
- value - value of dpram
+ Function: ft1000_read_dpram_mag_32
+ Description: This function will read the specific area of dpram
+ (Magnemite ASIC only)
+ Input:
+ dev - device structure
+ offset - index of dpram
+ Output:
+ value - value of dpram
-------------------------------------------------------------------------*/
u32 ft1000_read_dpram_mag_32(struct net_device *dev, int offset)
@@ -245,15 +241,15 @@ u32 ft1000_read_dpram_mag_32(struct net_device *dev, int offset)
/*---------------------------------------------------------------------------
- Function: ft1000_write_dpram_mag_32
- Description: This function will write to a specific area of dpram
- (Magnemite ASIC only)
- Input:
- dev - device structure
- offset - index of dpram
- value - value to write
- Output:
- none.
+ Function: ft1000_write_dpram_mag_32
+ Description: This function will write to a specific area of dpram
+ (Magnemite ASIC only)
+ Input:
+ dev - device structure
+ offset - index of dpram
+ value - value to write
+ Output:
+ none.
-------------------------------------------------------------------------*/
void ft1000_write_dpram_mag_32(struct net_device *dev, int offset, u32 value)
@@ -270,57 +266,51 @@ void ft1000_write_dpram_mag_32(struct net_device *dev, int offset, u32 value)
/*---------------------------------------------------------------------------
- Function: ft1000_enable_interrupts
- Description: This function will enable interrupts base on the current interrupt mask.
- Input:
- dev - device structure
- Output:
- None.
+ Function: ft1000_enable_interrupts
+ Description: This function will enable interrupts base on the current interrupt mask.
+ Input:
+ dev - device structure
+ Output:
+ None.
-------------------------------------------------------------------------*/
static void ft1000_enable_interrupts(struct net_device *dev)
{
u16 tempword;
- DEBUG(1, "ft1000_hw:ft1000_enable_interrupts()\n");
ft1000_write_reg(dev, FT1000_REG_SUP_IMASK, ISR_DEFAULT_MASK);
tempword = ft1000_read_reg(dev, FT1000_REG_SUP_IMASK);
- DEBUG(1,
- "ft1000_hw:ft1000_enable_interrupts:current interrupt enable mask = 0x%x\n",
- tempword);
+ pr_debug("current interrupt enable mask = 0x%x\n", tempword);
}
/*---------------------------------------------------------------------------
- Function: ft1000_disable_interrupts
- Description: This function will disable all interrupts.
- Input:
- dev - device structure
- Output:
- None.
+ Function: ft1000_disable_interrupts
+ Description: This function will disable all interrupts.
+ Input:
+ dev - device structure
+ Output:
+ None.
-------------------------------------------------------------------------*/
static void ft1000_disable_interrupts(struct net_device *dev)
{
u16 tempword;
- DEBUG(1, "ft1000_hw: ft1000_disable_interrupts()\n");
ft1000_write_reg(dev, FT1000_REG_SUP_IMASK, ISR_MASK_ALL);
tempword = ft1000_read_reg(dev, FT1000_REG_SUP_IMASK);
- DEBUG(1,
- "ft1000_hw:ft1000_disable_interrupts:current interrupt enable mask = 0x%x\n",
- tempword);
+ pr_debug("current interrupt enable mask = 0x%x\n", tempword);
}
/*---------------------------------------------------------------------------
- Function: ft1000_reset_asic
- Description: This function will call the Card Service function to reset the
- ASIC.
- Input:
- dev - device structure
- Output:
- none
+ Function: ft1000_reset_asic
+ Description: This function will call the Card Service function to reset the
+ ASIC.
+ Input:
+ dev - device structure
+ Output:
+ none
-------------------------------------------------------------------------*/
static void ft1000_reset_asic(struct net_device *dev)
@@ -329,8 +319,6 @@ static void ft1000_reset_asic(struct net_device *dev)
struct ft1000_pcmcia *pcmcia = info->priv;
u16 tempword;
- DEBUG(1, "ft1000_hw:ft1000_reset_asic called\n");
-
(*info->ft1000_reset) (pcmcia->link);
/*
@@ -351,22 +339,22 @@ static void ft1000_reset_asic(struct net_device *dev)
}
/* clear interrupts */
tempword = ft1000_read_reg(dev, FT1000_REG_SUP_ISR);
- DEBUG(1, "ft1000_hw: interrupt status register = 0x%x\n", tempword);
+ pr_debug("interrupt status register = 0x%x\n", tempword);
ft1000_write_reg(dev, FT1000_REG_SUP_ISR, tempword);
tempword = ft1000_read_reg(dev, FT1000_REG_SUP_ISR);
- DEBUG(1, "ft1000_hw: interrupt status register = 0x%x\n", tempword);
+ pr_debug("interrupt status register = 0x%x\n", tempword);
}
/*---------------------------------------------------------------------------
- Function: ft1000_reset_card
- Description: This function will reset the card
- Input:
- dev - device structure
- Output:
- status - false (card reset fail)
- true (card reset successful)
+ Function: ft1000_reset_card
+ Description: This function will reset the card
+ Input:
+ dev - device structure
+ Output:
+ status - false (card reset fail)
+ true (card reset successful)
-------------------------------------------------------------------------*/
static int ft1000_reset_card(struct net_device *dev)
@@ -377,8 +365,6 @@ static int ft1000_reset_card(struct net_device *dev)
unsigned long flags;
struct prov_record *ptr;
- DEBUG(1, "ft1000_hw:ft1000_reset_card called.....\n");
-
info->CardReady = 0;
info->ProgConStat = 0;
info->squeseqnum = 0;
@@ -388,8 +374,7 @@ static int ft1000_reset_card(struct net_device *dev)
/* Make sure we free any memory reserve for provisioning */
while (list_empty(&info->prov_list) == 0) {
- DEBUG(0,
- "ft1000_hw:ft1000_reset_card:deleting provisioning record\n");
+ pr_debug("deleting provisioning record\n");
ptr = list_entry(info->prov_list.next, struct prov_record, list);
list_del(&ptr->list);
kfree(ptr->pprov_data);
@@ -397,11 +382,10 @@ static int ft1000_reset_card(struct net_device *dev)
}
if (info->AsicID == ELECTRABUZZ_ID) {
- DEBUG(1, "ft1000_hw:ft1000_reset_card:resetting DSP\n");
+ pr_debug("resetting DSP\n");
ft1000_write_reg(dev, FT1000_REG_RESET, DSP_RESET_BIT);
} else {
- DEBUG(1,
- "ft1000_hw:ft1000_reset_card:resetting ASIC and DSP\n");
+ pr_debug("resetting ASIC and DSP\n");
ft1000_write_reg(dev, FT1000_REG_RESET,
(DSP_RESET_BIT | ASIC_RESET_BIT));
}
@@ -428,17 +412,16 @@ static int ft1000_reset_card(struct net_device *dev)
spin_unlock_irqrestore(&info->dpram_lock, flags);
}
- DEBUG(1, "ft1000_hw:ft1000_reset_card:resetting ASIC\n");
+ pr_debug("resetting ASIC\n");
mdelay(10);
/* reset ASIC */
ft1000_reset_asic(dev);
- DEBUG(1, "ft1000_hw:ft1000_reset_card:downloading dsp image\n");
+ pr_debug("downloading dsp image\n");
if (info->AsicID == MAGNEMITE_ID) {
/* Put dsp in reset and take ASIC out of reset */
- DEBUG(0,
- "ft1000_hw:ft1000_reset_card:Put DSP in reset and take ASIC out of reset\n");
+ pr_debug("Put DSP in reset and take ASIC out of reset\n");
ft1000_write_reg(dev, FT1000_REG_RESET, DSP_RESET_BIT);
/* Setting MAGNEMITE ASIC to big endian mode */
@@ -450,7 +433,7 @@ static int ft1000_reset_card(struct net_device *dev)
ft1000_write_reg(dev, FT1000_REG_RESET, 0);
/* FLARION_DSP_ACTIVE; */
mdelay(10);
- DEBUG(0, "ft1000_hw:ft1000_reset_card:Take DSP out of reset\n");
+ pr_debug("Take DSP out of reset\n");
/* Wait for 0xfefe indicating dsp ready before starting download */
for (i = 0; i < 50; i++) {
@@ -464,8 +447,7 @@ static int ft1000_reset_card(struct net_device *dev)
}
if (i == 50) {
- DEBUG(0,
- "ft1000_hw:ft1000_reset_card:No FEFE detected from DSP\n");
+ pr_debug("No FEFE detected from DSP\n");
return false;
}
@@ -476,10 +458,10 @@ static int ft1000_reset_card(struct net_device *dev)
}
if (card_download(dev, fw_entry->data, fw_entry->size)) {
- DEBUG(1, "card download unsuccessful\n");
+ pr_debug("card download unsuccessful\n");
return false;
} else {
- DEBUG(1, "card download successful\n");
+ pr_debug("card download successful\n");
}
mdelay(10);
@@ -494,8 +476,7 @@ static int ft1000_reset_card(struct net_device *dev)
/* Initialize DSP heartbeat area to ho */
ft1000_write_dpram(dev, FT1000_HI_HO, ho);
tempword = ft1000_read_dpram(dev, FT1000_HI_HO);
- DEBUG(1, "ft1000_hw:ft1000_reset_asic:hi_ho value = 0x%x\n",
- tempword);
+ pr_debug("hi_ho value = 0x%x\n", tempword);
} else {
/* Initialize DSP heartbeat area to ho */
ft1000_write_dpram_mag_16(dev, FT1000_MAG_HI_HO, ho_mag,
@@ -503,8 +484,7 @@ static int ft1000_reset_card(struct net_device *dev)
tempword =
ft1000_read_dpram_mag_16(dev, FT1000_MAG_HI_HO,
FT1000_MAG_HI_HO_INDX);
- DEBUG(1, "ft1000_hw:ft1000_reset_card:hi_ho value = 0x%x\n",
- tempword);
+ pr_debug("hi_ho value = 0x%x\n", tempword);
}
info->CardReady = 1;
@@ -521,14 +501,14 @@ static int ft1000_reset_card(struct net_device *dev)
/*---------------------------------------------------------------------------
- Function: ft1000_chkcard
- Description: This function will check if the device is presently available on
- the system.
- Input:
- dev - device structure
- Output:
- status - false (device is not present)
- true (device is present)
+ Function: ft1000_chkcard
+ Description: This function will check if the device is presently available on
+ the system.
+ Input:
+ dev - device structure
+ Output:
+ status - false (device is not present)
+ true (device is present)
-------------------------------------------------------------------------*/
static int ft1000_chkcard(struct net_device *dev)
@@ -541,8 +521,7 @@ static int ft1000_chkcard(struct net_device *dev)
*/
tempword = ft1000_read_reg(dev, FT1000_REG_SUP_IMASK);
if (tempword == 0) {
- DEBUG(1,
- "ft1000_hw:ft1000_chkcard: IMASK = 0 Card not detected\n");
+ pr_debug("IMASK = 0 Card not detected\n");
return false;
}
/*
@@ -551,8 +530,7 @@ static int ft1000_chkcard(struct net_device *dev)
*/
tempword = ft1000_read_reg(dev, FT1000_REG_ASIC_ID);
if (tempword == 0xffff) {
- DEBUG(1,
- "ft1000_hw:ft1000_chkcard: Version = 0xffff Card not detected\n");
+ pr_debug("Version = 0xffff Card not detected\n");
return false;
}
return true;
@@ -561,13 +539,13 @@ static int ft1000_chkcard(struct net_device *dev)
/*---------------------------------------------------------------------------
- Function: ft1000_hbchk
- Description: This function will perform the heart beat check of the DSP as
- well as the ASIC.
- Input:
- dev - device structure
- Output:
- none
+ Function: ft1000_hbchk
+ Description: This function will perform the heart beat check of the DSP as
+ well as the ASIC.
+ Input:
+ dev - device structure
+ Output:
+ none
-------------------------------------------------------------------------*/
static void ft1000_hbchk(u_long data)
@@ -586,11 +564,10 @@ static void ft1000_hbchk(u_long data)
} else {
tempword =
ntohs(ft1000_read_dpram_mag_16
- (dev, FT1000_MAG_HI_HO,
- FT1000_MAG_HI_HO_INDX));
+ (dev, FT1000_MAG_HI_HO,
+ FT1000_MAG_HI_HO_INDX));
}
- DEBUG(1, "ft1000_hw:ft1000_hbchk:hi_ho value = 0x%x\n",
- tempword);
+ pr_debug("hi_ho value = 0x%x\n", tempword);
/* Let's perform another check if ho is not detected */
if (tempword != ho) {
if (info->AsicID == ELECTRABUZZ_ID) {
@@ -601,8 +578,7 @@ static void ft1000_hbchk(u_long data)
}
}
if (tempword != ho) {
- printk(KERN_INFO
- "ft1000: heartbeat failed - no ho detected\n");
+ pr_info("heartbeat failed - no ho detected\n");
if (info->AsicID == ELECTRABUZZ_ID) {
info->DSP_TIME[0] =
ft1000_read_dpram(dev, FT1000_DSP_TIMER0);
@@ -632,8 +608,7 @@ static void ft1000_hbchk(u_long data)
}
info->DrvErrNum = DSP_HB_INFO;
if (ft1000_reset_card(dev) == 0) {
- printk(KERN_INFO
- "ft1000: Hardware Failure Detected - PC Card disabled\n");
+ pr_info("Hardware Failure Detected - PC Card disabled\n");
info->ProgConStat = 0xff;
return;
}
@@ -650,8 +625,7 @@ static void ft1000_hbchk(u_long data)
tempword = ft1000_read_reg(dev, FT1000_REG_DOORBELL);
}
if (tempword & FT1000_DB_HB) {
- printk(KERN_INFO
- "ft1000: heartbeat doorbell not clear by firmware\n");
+ pr_info("heartbeat doorbell not clear by firmware\n");
if (info->AsicID == ELECTRABUZZ_ID) {
info->DSP_TIME[0] =
ft1000_read_dpram(dev, FT1000_DSP_TIMER0);
@@ -681,8 +655,7 @@ static void ft1000_hbchk(u_long data)
}
info->DrvErrNum = DSP_HB_INFO;
if (ft1000_reset_card(dev) == 0) {
- printk(KERN_INFO
- "ft1000: Hardware Failure Detected - PC Card disabled\n");
+ pr_info("Hardware Failure Detected - PC Card disabled\n");
info->ProgConStat = 0xff;
return;
}
@@ -708,8 +681,8 @@ static void ft1000_hbchk(u_long data)
} else {
tempword =
ntohs(ft1000_read_dpram_mag_16
- (dev, FT1000_MAG_HI_HO,
- FT1000_MAG_HI_HO_INDX));
+ (dev, FT1000_MAG_HI_HO,
+ FT1000_MAG_HI_HO_INDX));
}
/* Let's write hi again if fail */
if (tempword != hi) {
@@ -730,8 +703,7 @@ static void ft1000_hbchk(u_long data)
}
if (tempword != hi) {
- printk(KERN_INFO
- "ft1000: heartbeat failed - cannot write hi into DPRAM\n");
+ pr_info("heartbeat failed - cannot write hi into DPRAM\n");
if (info->AsicID == ELECTRABUZZ_ID) {
info->DSP_TIME[0] =
ft1000_read_dpram(dev, FT1000_DSP_TIMER0);
@@ -761,8 +733,7 @@ static void ft1000_hbchk(u_long data)
}
info->DrvErrNum = DSP_HB_INFO;
if (ft1000_reset_card(dev) == 0) {
- printk(KERN_INFO
- "ft1000: Hardware Failure Detected - PC Card disabled\n");
+ pr_info("Hardware Failure Detected - PC Card disabled\n");
info->ProgConStat = 0xff;
return;
}
@@ -778,19 +749,19 @@ static void ft1000_hbchk(u_long data)
/* Schedule this module to run every 2 seconds */
poll_timer.expires = jiffies + (2 * HZ);
- poll_timer.data = (u_long) dev;
+ poll_timer.data = (u_long)dev;
add_timer(&poll_timer);
}
/*---------------------------------------------------------------------------
- Function: ft1000_send_cmd
- Description:
- Input:
- Output:
+ Function: ft1000_send_cmd
+ Description:
+ Input:
+ Output:
-------------------------------------------------------------------------*/
-static void ft1000_send_cmd (struct net_device *dev, u16 *ptempbuffer, int size, u16 qtype)
+static void ft1000_send_cmd(struct net_device *dev, u16 *ptempbuffer, int size, u16 qtype)
{
struct ft1000_info *info = netdev_priv(dev);
int i;
@@ -802,26 +773,26 @@ static void ft1000_send_cmd (struct net_device *dev, u16 *ptempbuffer, int size,
if ((size & 0x0001)) {
size++;
}
- DEBUG(1, "FT1000:ft1000_send_cmd:total length = %d\n", size);
- DEBUG(1, "FT1000:ft1000_send_cmd:length = %d\n", ntohs(*ptempbuffer));
+ pr_debug("total length = %d\n", size);
+ pr_debug("length = %d\n", ntohs(*ptempbuffer));
/*
* put message into slow queue area
* All messages are in the form total_len + pseudo header + message body
*/
spin_lock_irqsave(&info->dpram_lock, flags);
- /* Make sure SLOWQ doorbell is clear */
- tempword = ft1000_read_reg(dev, FT1000_REG_DOORBELL);
- i=0;
- while (tempword & FT1000_DB_DPRAM_TX) {
- mdelay(10);
- i++;
- if (i==10) {
- spin_unlock_irqrestore(&info->dpram_lock, flags);
- return;
- }
- tempword = ft1000_read_reg(dev, FT1000_REG_DOORBELL);
- }
+ /* Make sure SLOWQ doorbell is clear */
+ tempword = ft1000_read_reg(dev, FT1000_REG_DOORBELL);
+ i = 0;
+ while (tempword & FT1000_DB_DPRAM_TX) {
+ mdelay(10);
+ i++;
+ if (i == 10) {
+ spin_unlock_irqrestore(&info->dpram_lock, flags);
+ return;
+ }
+ tempword = ft1000_read_reg(dev, FT1000_REG_DOORBELL);
+ }
if (info->AsicID == ELECTRABUZZ_ID) {
ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR,
@@ -830,8 +801,7 @@ static void ft1000_send_cmd (struct net_device *dev, u16 *ptempbuffer, int size,
ft1000_write_reg(dev, FT1000_REG_DPRAM_DATA, size);
/* Write pseudo header and messgae body */
for (i = 0; i < (size >> 1); i++) {
- DEBUG(1, "FT1000:ft1000_send_cmd:data %d = 0x%x\n", i,
- *ptempbuffer);
+ pr_debug("data %d = 0x%x\n", i, *ptempbuffer);
tempword = htons(*ptempbuffer++);
ft1000_write_reg(dev, FT1000_REG_DPRAM_DATA, tempword);
}
@@ -844,18 +814,16 @@ static void ft1000_send_cmd (struct net_device *dev, u16 *ptempbuffer, int size,
ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR,
FT1000_DPRAM_MAG_TX_BASE + 1);
for (i = 0; i < (size >> 2); i++) {
- DEBUG(1, "FT1000:ft1000_send_cmd:data = 0x%x\n",
- *ptempbuffer);
+ pr_debug("data = 0x%x\n", *ptempbuffer);
outw(*ptempbuffer++,
- dev->base_addr + FT1000_REG_MAG_DPDATAL);
- DEBUG(1, "FT1000:ft1000_send_cmd:data = 0x%x\n",
- *ptempbuffer);
+ dev->base_addr + FT1000_REG_MAG_DPDATAL);
+ pr_debug("data = 0x%x\n", *ptempbuffer);
outw(*ptempbuffer++,
- dev->base_addr + FT1000_REG_MAG_DPDATAH);
+ dev->base_addr + FT1000_REG_MAG_DPDATAH);
}
- DEBUG(1, "FT1000:ft1000_send_cmd:data = 0x%x\n", *ptempbuffer);
+ pr_debug("data = 0x%x\n", *ptempbuffer);
outw(*ptempbuffer++, dev->base_addr + FT1000_REG_MAG_DPDATAL);
- DEBUG(1, "FT1000:ft1000_send_cmd:data = 0x%x\n", *ptempbuffer);
+ pr_debug("data = 0x%x\n", *ptempbuffer);
outw(*ptempbuffer++, dev->base_addr + FT1000_REG_MAG_DPDATAH);
}
spin_unlock_irqrestore(&info->dpram_lock, flags);
@@ -866,19 +834,19 @@ static void ft1000_send_cmd (struct net_device *dev, u16 *ptempbuffer, int size,
/*---------------------------------------------------------------------------
- Function: ft1000_receive_cmd
- Description: This function will read a message from the dpram area.
- Input:
- dev - network device structure
- pbuffer - caller supply address to buffer
- pnxtph - pointer to next pseudo header
- Output:
- Status = 0 (unsuccessful)
- = 1 (successful)
+ Function: ft1000_receive_cmd
+ Description: This function will read a message from the dpram area.
+ Input:
+ dev - network device structure
+ pbuffer - caller supply address to buffer
+ pnxtph - pointer to next pseudo header
+ Output:
+ Status = 0 (unsuccessful)
+ = 1 (successful)
-------------------------------------------------------------------------*/
static bool ft1000_receive_cmd(struct net_device *dev, u16 *pbuffer,
- int maxsz, u16 *pnxtph)
+ int maxsz, u16 *pnxtph)
{
struct ft1000_info *info = netdev_priv(dev);
u16 size;
@@ -888,20 +856,18 @@ static bool ft1000_receive_cmd(struct net_device *dev, u16 *pbuffer,
unsigned long flags;
if (info->AsicID == ELECTRABUZZ_ID) {
- size = ( ft1000_read_dpram(dev, *pnxtph) ) + sizeof(struct pseudo_hdr);
+ size = (ft1000_read_dpram(dev, *pnxtph)) + sizeof(struct pseudo_hdr);
} else {
size =
ntohs(ft1000_read_dpram_mag_16
- (dev, FT1000_MAG_PH_LEN,
- FT1000_MAG_PH_LEN_INDX)) + sizeof(struct pseudo_hdr);
+ (dev, FT1000_MAG_PH_LEN,
+ FT1000_MAG_PH_LEN_INDX)) + sizeof(struct pseudo_hdr);
}
if (size > maxsz) {
- DEBUG(1,
- "FT1000:ft1000_receive_cmd:Invalid command length = %d\n",
- size);
+ pr_debug("Invalid command length = %d\n", size);
return false;
} else {
- ppseudohdr = (u16 *) pbuffer;
+ ppseudohdr = (u16 *)pbuffer;
spin_lock_irqsave(&info->dpram_lock, flags);
if (info->AsicID == ELECTRABUZZ_ID) {
ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR,
@@ -915,26 +881,26 @@ static bool ft1000_receive_cmd(struct net_device *dev, u16 *pbuffer,
ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR,
FT1000_DPRAM_MAG_RX_BASE);
*pbuffer = inw(dev->base_addr + FT1000_REG_MAG_DPDATAH);
- DEBUG(1, "ft1000_hw:received data = 0x%x\n", *pbuffer);
+ pr_debug("received data = 0x%x\n", *pbuffer);
pbuffer++;
ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR,
FT1000_DPRAM_MAG_RX_BASE + 1);
for (i = 0; i <= (size >> 2); i++) {
*pbuffer =
inw(dev->base_addr +
- FT1000_REG_MAG_DPDATAL);
+ FT1000_REG_MAG_DPDATAL);
pbuffer++;
*pbuffer =
inw(dev->base_addr +
- FT1000_REG_MAG_DPDATAH);
+ FT1000_REG_MAG_DPDATAH);
pbuffer++;
}
/* copy odd aligned word */
*pbuffer = inw(dev->base_addr + FT1000_REG_MAG_DPDATAL);
- DEBUG(1, "ft1000_hw:received data = 0x%x\n", *pbuffer);
+ pr_debug("received data = 0x%x\n", *pbuffer);
pbuffer++;
*pbuffer = inw(dev->base_addr + FT1000_REG_MAG_DPDATAH);
- DEBUG(1, "ft1000_hw:received data = 0x%x\n", *pbuffer);
+ pr_debug("received data = 0x%x\n", *pbuffer);
pbuffer++;
}
if (size & 0x0001) {
@@ -953,8 +919,7 @@ static bool ft1000_receive_cmd(struct net_device *dev, u16 *pbuffer,
tempword ^= *ppseudohdr++;
}
if ((tempword != *ppseudohdr)) {
- DEBUG(1,
- "FT1000:ft1000_receive_cmd:Pseudo header checksum mismatch\n");
+ pr_debug("Pseudo header checksum mismatch\n");
/* Drop this message */
return false;
}
@@ -964,13 +929,13 @@ static bool ft1000_receive_cmd(struct net_device *dev, u16 *pbuffer,
/*---------------------------------------------------------------------------
- Function: ft1000_proc_drvmsg
- Description: This function will process the various driver messages.
- Input:
- dev - device structure
- pnxtph - pointer to next pseudo header
- Output:
- none
+ Function: ft1000_proc_drvmsg
+ Description: This function will process the various driver messages.
+ Input:
+ dev - device structure
+ pnxtph - pointer to next pseudo header
+ Output:
+ none
-------------------------------------------------------------------------*/
static void ft1000_proc_drvmsg(struct net_device *dev)
@@ -992,25 +957,24 @@ static void ft1000_proc_drvmsg(struct net_device *dev)
u16 wrd;
} convert;
- if (info->AsicID == ELECTRABUZZ_ID) {
- tempword = FT1000_DPRAM_RX_BASE+2;
- }
- else {
- tempword = FT1000_DPRAM_MAG_RX_BASE;
- }
- if ( ft1000_receive_cmd(dev, &cmdbuffer[0], MAX_CMD_SQSIZE, &tempword) ) {
+ if (info->AsicID == ELECTRABUZZ_ID) {
+ tempword = FT1000_DPRAM_RX_BASE+2;
+ }
+ else {
+ tempword = FT1000_DPRAM_MAG_RX_BASE;
+ }
+ if (ft1000_receive_cmd(dev, &cmdbuffer[0], MAX_CMD_SQSIZE, &tempword)) {
/* Get the message type which is total_len + PSEUDO header + msgtype + message body */
- pdrvmsg = (struct drv_msg *) & cmdbuffer[0];
+ pdrvmsg = (struct drv_msg *)&cmdbuffer[0];
msgtype = ntohs(pdrvmsg->type);
- DEBUG(1, "Command message type = 0x%x\n", msgtype);
+ pr_debug("Command message type = 0x%x\n", msgtype);
switch (msgtype) {
case DSP_PROVISION:
- DEBUG(0,
- "Got a provisioning request message from DSP\n");
+ pr_debug("Got a provisioning request message from DSP\n");
mdelay(25);
while (list_empty(&info->prov_list) == 0) {
- DEBUG(0, "Sending a provisioning message\n");
+ pr_debug("Sending a provisioning message\n");
/* Make sure SLOWQ doorbell is clear */
tempword =
ft1000_read_reg(dev, FT1000_REG_DOORBELL);
@@ -1025,25 +989,25 @@ static void ft1000_proc_drvmsg(struct net_device *dev)
ptr =
list_entry(info->prov_list.next,
struct prov_record, list);
- len = *(u16 *) ptr->pprov_data;
+ len = *(u16 *)ptr->pprov_data;
len = htons(len);
- pmsg = (u16 *) ptr->pprov_data;
- ppseudo_hdr = (struct pseudo_hdr *) pmsg;
+ pmsg = (u16 *)ptr->pprov_data;
+ ppseudo_hdr = (struct pseudo_hdr *)pmsg;
/* Insert slow queue sequence number */
ppseudo_hdr->seq_num = info->squeseqnum++;
ppseudo_hdr->portsrc = 0;
/* Calculate new checksum */
ppseudo_hdr->checksum = *pmsg++;
- DEBUG(1, "checksum = 0x%x\n",
- ppseudo_hdr->checksum);
+ pr_debug("checksum = 0x%x\n",
+ ppseudo_hdr->checksum);
for (i = 1; i < 7; i++) {
ppseudo_hdr->checksum ^= *pmsg++;
- DEBUG(1, "checksum = 0x%x\n",
- ppseudo_hdr->checksum);
+ pr_debug("checksum = 0x%x\n",
+ ppseudo_hdr->checksum);
}
- ft1000_send_cmd (dev, (u16 *)ptr->pprov_data, len, SLOWQ_TYPE);
+ ft1000_send_cmd(dev, (u16 *)ptr->pprov_data, len, SLOWQ_TYPE);
list_del(&ptr->list);
kfree(ptr->pprov_data);
kfree(ptr);
@@ -1055,19 +1019,29 @@ static void ft1000_proc_drvmsg(struct net_device *dev)
info->CardReady = 1;
break;
case MEDIA_STATE:
- pmediamsg = (struct media_msg *) & cmdbuffer[0];
+ pmediamsg = (struct media_msg *)&cmdbuffer[0];
if (info->ProgConStat != 0xFF) {
- if (pmediamsg->state) {
- DEBUG(1, "Media is up\n");
- if (info->mediastate == 0) {
- netif_carrier_on(dev);
- netif_wake_queue(dev);
- info->mediastate = 1;
- do_gettimeofday(&tv);
- info->ConTm = tv.tv_sec;
+ if (pmediamsg->state) {
+ pr_debug("Media is up\n");
+ if (info->mediastate == 0) {
+ netif_carrier_on(dev);
+ netif_wake_queue(dev);
+ info->mediastate = 1;
+ do_gettimeofday(&tv);
+ info->ConTm = tv.tv_sec;
+ }
+ } else {
+ pr_debug("Media is down\n");
+ if (info->mediastate == 1) {
+ info->mediastate = 0;
+ netif_carrier_off(dev);
+ netif_stop_queue(dev);
+ info->ConTm = 0;
+ }
}
- } else {
- DEBUG(1, "Media is down\n");
+ }
+ else {
+ pr_debug("Media is down\n");
if (info->mediastate == 1) {
info->mediastate = 0;
netif_carrier_off(dev);
@@ -1075,25 +1049,15 @@ static void ft1000_proc_drvmsg(struct net_device *dev)
info->ConTm = 0;
}
}
- }
- else {
- DEBUG(1, "Media is down\n");
- if (info->mediastate == 1) {
- info->mediastate = 0;
- netif_carrier_off(dev);
- netif_stop_queue(dev);
- info->ConTm = 0;
- }
- }
break;
case DSP_INIT_MSG:
- pdspinitmsg = (struct dsp_init_msg *) & cmdbuffer[0];
+ pdspinitmsg = (struct dsp_init_msg *)&cmdbuffer[0];
memcpy(info->DspVer, pdspinitmsg->DspVer, DSPVERSZ);
- DEBUG(1, "DSPVER = 0x%2x 0x%2x 0x%2x 0x%2x\n",
- info->DspVer[0], info->DspVer[1], info->DspVer[2],
- info->DspVer[3]);
+ pr_debug("DSPVER = 0x%2x 0x%2x 0x%2x 0x%2x\n",
+ info->DspVer[0], info->DspVer[1],
+ info->DspVer[2], info->DspVer[3]);
memcpy(info->HwSerNum, pdspinitmsg->HwSerNum,
- HWSERNUMSZ);
+ HWSERNUMSZ);
memcpy(info->Sku, pdspinitmsg->Sku, SKUSZ);
memcpy(info->eui64, pdspinitmsg->eui64, EUISZ);
dev->dev_addr[0] = info->eui64[0];
@@ -1104,34 +1068,33 @@ static void ft1000_proc_drvmsg(struct net_device *dev)
dev->dev_addr[5] = info->eui64[7];
if (ntohs(pdspinitmsg->length) ==
- (sizeof(struct dsp_init_msg) - 20)) {
+ (sizeof(struct dsp_init_msg) - 20)) {
memcpy(info->ProductMode,
- pdspinitmsg->ProductMode, MODESZ);
+ pdspinitmsg->ProductMode, MODESZ);
memcpy(info->RfCalVer, pdspinitmsg->RfCalVer,
- CALVERSZ);
+ CALVERSZ);
memcpy(info->RfCalDate, pdspinitmsg->RfCalDate,
- CALDATESZ);
- DEBUG(1, "RFCalVer = 0x%2x 0x%2x\n",
- info->RfCalVer[0], info->RfCalVer[1]);
+ CALDATESZ);
+ pr_debug("RFCalVer = 0x%2x 0x%2x\n",
+ info->RfCalVer[0], info->RfCalVer[1]);
}
- break ;
+ break;
case DSP_STORE_INFO:
- DEBUG(1, "FT1000:drivermsg:Got DSP_STORE_INFO\n");
+ pr_debug("Got DSP_STORE_INFO\n");
tempword = ntohs(pdrvmsg->length);
info->DSPInfoBlklen = tempword;
if (tempword < (MAX_DSP_SESS_REC - 4)) {
- pmsg = (u16 *) & pdrvmsg->data[0];
+ pmsg = (u16 *)&pdrvmsg->data[0];
for (i = 0; i < ((tempword + 1) / 2); i++) {
- DEBUG(1,
- "FT1000:drivermsg:dsp info data = 0x%x\n",
- *pmsg);
+ pr_debug("dsp info data = 0x%x\n",
+ *pmsg);
info->DSPInfoBlk[i + 10] = *pmsg++;
}
}
break;
case DSP_GET_INFO:
- DEBUG(1, "FT1000:drivermsg:Got DSP_GET_INFO\n");
+ pr_debug("Got DSP_GET_INFO\n");
/*
* copy dsp info block to dsp
* allow any outstanding ioctl to finish
@@ -1152,8 +1115,8 @@ static void ft1000_proc_drvmsg(struct net_device *dev)
* Put message into Slow Queue
* Form Pseudo header
*/
- pmsg = (u16 *) info->DSPInfoBlk;
- ppseudo_hdr = (struct pseudo_hdr *) pmsg;
+ pmsg = (u16 *)info->DSPInfoBlk;
+ ppseudo_hdr = (struct pseudo_hdr *)pmsg;
ppseudo_hdr->length =
htons(info->DSPInfoBlklen + 4);
ppseudo_hdr->source = 0x10;
@@ -1177,12 +1140,12 @@ static void ft1000_proc_drvmsg(struct net_device *dev)
info->DSPInfoBlk[8] = 0x7200;
info->DSPInfoBlk[9] =
htons(info->DSPInfoBlklen);
- ft1000_send_cmd (dev, (u16 *)info->DSPInfoBlk, (u16)(info->DSPInfoBlklen+4), 0);
+ ft1000_send_cmd(dev, (u16 *)info->DSPInfoBlk, (u16)(info->DSPInfoBlklen+4), 0);
}
break;
case GET_DRV_ERR_RPT_MSG:
- DEBUG(1, "FT1000:drivermsg:Got GET_DRV_ERR_RPT_MSG\n");
+ pr_debug("Got GET_DRV_ERR_RPT_MSG\n");
/*
* copy driver error message to dsp
* allow any outstanding ioctl to finish
@@ -1203,8 +1166,8 @@ static void ft1000_proc_drvmsg(struct net_device *dev)
* Put message into Slow Queue
* Form Pseudo header
*/
- pmsg = (u16 *) & tempbuffer[0];
- ppseudo_hdr = (struct pseudo_hdr *) pmsg;
+ pmsg = (u16 *)&tempbuffer[0];
+ ppseudo_hdr = (struct pseudo_hdr *)pmsg;
ppseudo_hdr->length = htons(0x0012);
ppseudo_hdr->source = 0x10;
ppseudo_hdr->destination = 0x20;
@@ -1220,11 +1183,11 @@ static void ft1000_proc_drvmsg(struct net_device *dev)
/* Insert application id */
ppseudo_hdr->portsrc = 0;
/* Calculate new checksum */
- ppseudo_hdr->checksum = *pmsg++;
- for (i=1; i<7; i++) {
- ppseudo_hdr->checksum ^= *pmsg++;
- }
- pmsg = (u16 *) & tempbuffer[16];
+ ppseudo_hdr->checksum = *pmsg++;
+ for (i = 1; i < 7; i++) {
+ ppseudo_hdr->checksum ^= *pmsg++;
+ }
+ pmsg = (u16 *)&tempbuffer[16];
*pmsg++ = htons(RSP_DRV_ERR_RPT_MSG);
*pmsg++ = htons(0x000e);
*pmsg++ = htons(info->DSP_TIME[0]);
@@ -1239,7 +1202,7 @@ static void ft1000_proc_drvmsg(struct net_device *dev)
*pmsg++ = convert.wrd;
*pmsg++ = htons(info->DrvErrNum);
- ft1000_send_cmd (dev, (u16 *)&tempbuffer[0], (u16)(0x0012), 0);
+ ft1000_send_cmd(dev, (u16 *)&tempbuffer[0], (u16)(0x0012), 0);
info->DrvErrNum = 0;
}
@@ -1252,14 +1215,14 @@ static void ft1000_proc_drvmsg(struct net_device *dev)
/*---------------------------------------------------------------------------
- Function: ft1000_parse_dpram_msg
- Description: This function will parse the message received from the DSP
- via the DPRAM interface.
- Input:
- dev - device structure
- Output:
- status - FAILURE
- SUCCESS
+ Function: ft1000_parse_dpram_msg
+ Description: This function will parse the message received from the DSP
+ via the DPRAM interface.
+ Input:
+ dev - device structure
+ Output:
+ status - FAILURE
+ SUCCESS
-------------------------------------------------------------------------*/
static int ft1000_parse_dpram_msg(struct net_device *dev)
@@ -1270,11 +1233,10 @@ static int ft1000_parse_dpram_msg(struct net_device *dev)
u16 nxtph;
u16 total_len;
int i = 0;
- int cnt;
unsigned long flags;
doorbell = ft1000_read_reg(dev, FT1000_REG_DOORBELL);
- DEBUG(1, "Doorbell = 0x%x\n", doorbell);
+ pr_debug("Doorbell = 0x%x\n", doorbell);
if (doorbell & FT1000_ASIC_RESET_REQ) {
/* Copy DSP session record from info block */
@@ -1291,7 +1253,7 @@ static int ft1000_parse_dpram_msg(struct net_device *dev)
FT1000_DPRAM_MAG_RX_BASE);
for (i = 0; i < MAX_DSP_SESS_REC / 2; i++) {
outl(info->DSPSess.MagRec[i],
- dev->base_addr + FT1000_REG_MAG_DPDATA);
+ dev->base_addr + FT1000_REG_MAG_DPDATA);
}
}
spin_unlock_irqrestore(&info->dpram_lock, flags);
@@ -1299,7 +1261,7 @@ static int ft1000_parse_dpram_msg(struct net_device *dev)
/* clear ASIC RESET request */
ft1000_write_reg(dev, FT1000_REG_DOORBELL,
FT1000_ASIC_RESET_REQ);
- DEBUG(1, "Got an ASIC RESET Request\n");
+ pr_debug("Got an ASIC RESET Request\n");
ft1000_write_reg(dev, FT1000_REG_DOORBELL,
FT1000_ASIC_RESET_DSP);
@@ -1311,8 +1273,7 @@ static int ft1000_parse_dpram_msg(struct net_device *dev)
}
if (doorbell & FT1000_DSP_ASIC_RESET) {
- DEBUG(0,
- "FT1000:ft1000_parse_dpram_msg: Got a dsp ASIC reset message\n");
+ pr_debug("Got a dsp ASIC reset message\n");
ft1000_write_reg(dev, FT1000_REG_DOORBELL,
FT1000_DSP_ASIC_RESET);
udelay(200);
@@ -1320,8 +1281,7 @@ static int ft1000_parse_dpram_msg(struct net_device *dev)
}
if (doorbell & FT1000_DB_DPRAM_RX) {
- DEBUG(1,
- "FT1000:ft1000_parse_dpram_msg: Got a slow queue message\n");
+ pr_debug("Got a slow queue message\n");
nxtph = FT1000_DPRAM_RX_BASE + 2;
if (info->AsicID == ELECTRABUZZ_ID) {
total_len =
@@ -1329,14 +1289,12 @@ static int ft1000_parse_dpram_msg(struct net_device *dev)
} else {
total_len =
ntohs(ft1000_read_dpram_mag_16
- (dev, FT1000_MAG_TOTAL_LEN,
- FT1000_MAG_TOTAL_LEN_INDX));
+ (dev, FT1000_MAG_TOTAL_LEN,
+ FT1000_MAG_TOTAL_LEN_INDX));
}
- DEBUG(1, "FT1000:ft1000_parse_dpram_msg:total length = %d\n",
- total_len);
+ pr_debug("total length = %d\n", total_len);
if ((total_len < MAX_CMD_SQSIZE) && (total_len > sizeof(struct pseudo_hdr))) {
- total_len += nxtph;
- cnt = 0;
+ total_len += nxtph;
/*
* ft1000_read_reg will return a value that needs to be byteswap
* in order to get DSP_QID_OFFSET.
@@ -1353,7 +1311,7 @@ static int ft1000_parse_dpram_msg(struct net_device *dev)
(dev, FT1000_MAG_PORT_ID,
FT1000_MAG_PORT_ID_INDX) & 0xff);
}
- DEBUG(1, "DSP_QID = 0x%x\n", portid);
+ pr_debug("DSP_QID = 0x%x\n", portid);
if (portid == DRIVERID) {
/* We are assumming one driver message from the DSP at a time. */
@@ -1389,7 +1347,7 @@ static int ft1000_parse_dpram_msg(struct net_device *dev)
FT1000_MAG_DSP_TIMER3_INDX);
}
info->DrvErrNum = DSP_CONDRESET_INFO;
- DEBUG(1, "ft1000_hw:DSP conditional reset requested\n");
+ pr_debug("DSP conditional reset requested\n");
ft1000_reset_card(dev);
ft1000_write_reg(dev, FT1000_REG_DOORBELL,
FT1000_DB_COND_RESET);
@@ -1397,9 +1355,9 @@ static int ft1000_parse_dpram_msg(struct net_device *dev)
/* let's clear any unexpected doorbells from DSP */
doorbell =
doorbell & ~(FT1000_DB_DPRAM_RX | FT1000_ASIC_RESET_REQ |
- FT1000_DB_COND_RESET | 0xff00);
+ FT1000_DB_COND_RESET | 0xff00);
if (doorbell) {
- DEBUG(1, "Clearing unexpected doorbell = 0x%x\n", doorbell);
+ pr_debug("Clearing unexpected doorbell = 0x%x\n", doorbell);
ft1000_write_reg(dev, FT1000_REG_DOORBELL, doorbell);
}
@@ -1409,14 +1367,14 @@ static int ft1000_parse_dpram_msg(struct net_device *dev)
/*---------------------------------------------------------------------------
- Function: ft1000_flush_fifo
- Description: This function will flush one packet from the downlink
- FIFO.
- Input:
- dev - device structure
- drv_err - driver error causing the flush fifo
- Output:
- None.
+ Function: ft1000_flush_fifo
+ Description: This function will flush one packet from the downlink
+ FIFO.
+ Input:
+ dev - device structure
+ drv_err - driver error causing the flush fifo
+ Output:
+ None.
-------------------------------------------------------------------------*/
static void ft1000_flush_fifo(struct net_device *dev, u16 DrvErrNum)
@@ -1427,7 +1385,6 @@ static void ft1000_flush_fifo(struct net_device *dev, u16 DrvErrNum)
u32 templong;
u16 tempword;
- DEBUG(1, "ft1000:ft1000_hw:ft1000_flush_fifo called\n");
if (pcmcia->PktIntfErr > MAX_PH_ERR) {
if (info->AsicID == ELECTRABUZZ_ID) {
info->DSP_TIME[0] =
@@ -1514,7 +1471,7 @@ static void ft1000_flush_fifo(struct net_device *dev, u16 DrvErrNum)
*/
tempword =
inw(dev->base_addr +
- FT1000_REG_SUP_IMASK);
+ FT1000_REG_SUP_IMASK);
if (tempword == 0) {
/* This indicates that we can not communicate with the ASIC */
info->DrvErrNum =
@@ -1533,23 +1490,23 @@ static void ft1000_flush_fifo(struct net_device *dev, u16 DrvErrNum)
} while ((tempword & 0x03) != 0x03);
if (info->AsicID == ELECTRABUZZ_ID) {
i++;
- DEBUG(0, "Flushing FIFO complete = %x\n", tempword);
+ pr_debug("Flushing FIFO complete = %x\n", tempword);
/* Flush last word in FIFO. */
tempword = ft1000_read_reg(dev, FT1000_REG_DFIFO);
/* Update FIFO counter for DSP */
i = i * 2;
- DEBUG(0, "Flush Data byte count to dsp = %d\n", i);
+ pr_debug("Flush Data byte count to dsp = %d\n", i);
info->fifo_cnt += i;
ft1000_write_dpram(dev, FT1000_FIFO_LEN,
info->fifo_cnt);
} else {
- DEBUG(0, "Flushing FIFO complete = %x\n", tempword);
+ pr_debug("Flushing FIFO complete = %x\n", tempword);
/* Flush last word in FIFO */
templong = inl(dev->base_addr + FT1000_REG_MAG_DFR);
tempword = inw(dev->base_addr + FT1000_REG_SUP_STAT);
- DEBUG(0, "FT1000_REG_SUP_STAT = 0x%x\n", tempword);
+ pr_debug("FT1000_REG_SUP_STAT = 0x%x\n", tempword);
tempword = inw(dev->base_addr + FT1000_REG_MAG_DFSR);
- DEBUG(0, "FT1000_REG_MAG_DFSR = 0x%x\n", tempword);
+ pr_debug("FT1000_REG_MAG_DFSR = 0x%x\n", tempword);
}
if (DrvErrNum) {
pcmcia->PktIntfErr++;
@@ -1559,15 +1516,15 @@ static void ft1000_flush_fifo(struct net_device *dev, u16 DrvErrNum)
/*---------------------------------------------------------------------------
- Function: ft1000_copy_up_pkt
- Description: This function will pull Flarion packets out of the Downlink
- FIFO and convert it to an ethernet packet. The ethernet packet will
- then be deliver to the TCP/IP stack.
- Input:
- dev - device structure
- Output:
- status - FAILURE
- SUCCESS
+ Function: ft1000_copy_up_pkt
+ Description: This function will pull Flarion packets out of the Downlink
+ FIFO and convert it to an ethernet packet. The ethernet packet will
+ then be deliver to the TCP/IP stack.
+ Input:
+ dev - device structure
+ Output:
+ status - FAILURE
+ SUCCESS
-------------------------------------------------------------------------*/
static int ft1000_copy_up_pkt(struct net_device *dev)
@@ -1583,7 +1540,6 @@ static int ft1000_copy_up_pkt(struct net_device *dev)
u32 *ptemplong;
u32 templong;
- DEBUG(1, "ft1000_copy_up_pkt\n");
/* Read length */
if (info->AsicID == ELECTRABUZZ_ID) {
tempword = ft1000_read_reg(dev, FT1000_REG_DFIFO);
@@ -1593,10 +1549,10 @@ static int ft1000_copy_up_pkt(struct net_device *dev)
len = ntohs(tempword);
}
chksum = tempword;
- DEBUG(1, "Number of Bytes in FIFO = %d\n", len);
+ pr_debug("Number of Bytes in FIFO = %d\n", len);
if (len > ENET_MAX_SIZE) {
- DEBUG(0, "size of ethernet packet invalid\n");
+ pr_debug("size of ethernet packet invalid\n");
if (info->AsicID == MAGNEMITE_ID) {
/* Read High word to complete 32 bit access */
tempword = ft1000_read_reg(dev, FT1000_REG_MAG_DFRH);
@@ -1609,7 +1565,7 @@ static int ft1000_copy_up_pkt(struct net_device *dev)
skb = dev_alloc_skb(len + 12 + 2);
if (skb == NULL) {
- DEBUG(0, "No Network buffers available\n");
+ pr_debug("No Network buffers available\n");
/* Read High word to complete 32 bit access */
if (info->AsicID == MAGNEMITE_ID) {
tempword = ft1000_read_reg(dev, FT1000_REG_MAG_DFRH);
@@ -1618,7 +1574,7 @@ static int ft1000_copy_up_pkt(struct net_device *dev)
info->stats.rx_errors++;
return FAILURE;
}
- pbuffer = (u8 *) skb_put(skb, len + 12);
+ pbuffer = (u8 *)skb_put(skb, len + 12);
/* Pseudo header */
if (info->AsicID == ELECTRABUZZ_ID) {
@@ -1630,37 +1586,37 @@ static int ft1000_copy_up_pkt(struct net_device *dev)
tempword = ft1000_read_reg(dev, FT1000_REG_DFIFO);
} else {
tempword = ft1000_read_reg(dev, FT1000_REG_MAG_DFRH);
- DEBUG(1, "Pseudo = 0x%x\n", tempword);
+ pr_debug("Pseudo = 0x%x\n", tempword);
chksum ^= tempword;
tempword = ft1000_read_reg(dev, FT1000_REG_MAG_DFRL);
- DEBUG(1, "Pseudo = 0x%x\n", tempword);
+ pr_debug("Pseudo = 0x%x\n", tempword);
chksum ^= tempword;
tempword = ft1000_read_reg(dev, FT1000_REG_MAG_DFRH);
- DEBUG(1, "Pseudo = 0x%x\n", tempword);
+ pr_debug("Pseudo = 0x%x\n", tempword);
chksum ^= tempword;
tempword = ft1000_read_reg(dev, FT1000_REG_MAG_DFRL);
- DEBUG(1, "Pseudo = 0x%x\n", tempword);
+ pr_debug("Pseudo = 0x%x\n", tempword);
chksum ^= tempword;
tempword = ft1000_read_reg(dev, FT1000_REG_MAG_DFRH);
- DEBUG(1, "Pseudo = 0x%x\n", tempword);
+ pr_debug("Pseudo = 0x%x\n", tempword);
chksum ^= tempword;
tempword = ft1000_read_reg(dev, FT1000_REG_MAG_DFRL);
- DEBUG(1, "Pseudo = 0x%x\n", tempword);
+ pr_debug("Pseudo = 0x%x\n", tempword);
chksum ^= tempword;
/* read checksum value */
tempword = ft1000_read_reg(dev, FT1000_REG_MAG_DFRH);
- DEBUG(1, "Pseudo = 0x%x\n", tempword);
+ pr_debug("Pseudo = 0x%x\n", tempword);
}
if (chksum != tempword) {
- DEBUG(0, "Packet checksum mismatch 0x%x 0x%x\n", chksum,
- tempword);
+ pr_debug("Packet checksum mismatch 0x%x 0x%x\n",
+ chksum, tempword);
ft1000_flush_fifo(dev, DSP_PKTPHCKSUM_INFO);
info->stats.rx_errors++;
kfree_skb(skb);
@@ -1687,7 +1643,7 @@ static int ft1000_copy_up_pkt(struct net_device *dev)
for (i = 0; i < len / 2; i++) {
tempword = ft1000_read_reg(dev, FT1000_REG_DFIFO);
*pbuffer++ = (u8) (tempword >> 8);
- *pbuffer++ = (u8) tempword;
+ *pbuffer++ = (u8)tempword;
if (ft1000_chkcard(dev) == false) {
kfree_skb(skb);
return FAILURE;
@@ -1700,25 +1656,25 @@ static int ft1000_copy_up_pkt(struct net_device *dev)
*pbuffer++ = (u8) (tempword >> 8);
}
} else {
- ptemplong = (u32 *) pbuffer;
+ ptemplong = (u32 *)pbuffer;
for (i = 0; i < len / 4; i++) {
templong = inl(dev->base_addr + FT1000_REG_MAG_DFR);
- DEBUG(1, "Data = 0x%8x\n", templong);
+ pr_debug("Data = 0x%8x\n", templong);
*ptemplong++ = templong;
}
/* Need to read one more word if odd align. */
if (len & 0x0003) {
templong = inl(dev->base_addr + FT1000_REG_MAG_DFR);
- DEBUG(1, "Data = 0x%8x\n", templong);
+ pr_debug("Data = 0x%8x\n", templong);
*ptemplong++ = templong;
}
}
- DEBUG(1, "Data passed to Protocol layer:\n");
+ pr_debug("Data passed to Protocol layer:\n");
for (i = 0; i < len + 12; i++) {
- DEBUG(1, "Protocol Data: 0x%x\n ", *ptemp++);
+ pr_debug("Protocol Data: 0x%x\n", *ptemp++);
}
skb->dev = dev;
@@ -1745,20 +1701,20 @@ static int ft1000_copy_up_pkt(struct net_device *dev)
/*---------------------------------------------------------------------------
- Function: ft1000_copy_down_pkt
- Description: This function will take an ethernet packet and convert it to
- a Flarion packet prior to sending it to the ASIC Downlink
- FIFO.
- Input:
- dev - device structure
- packet - address of ethernet packet
- len - length of IP packet
- Output:
- status - FAILURE
- SUCCESS
+ Function: ft1000_copy_down_pkt
+ Description: This function will take an ethernet packet and convert it to
+ a Flarion packet prior to sending it to the ASIC Downlink
+ FIFO.
+ Input:
+ dev - device structure
+ packet - address of ethernet packet
+ len - length of IP packet
+ Output:
+ status - FAILURE
+ SUCCESS
-------------------------------------------------------------------------*/
-static int ft1000_copy_down_pkt(struct net_device *dev, u16 * packet, u16 len)
+static int ft1000_copy_down_pkt(struct net_device *dev, u16 *packet, u16 len)
{
struct ft1000_info *info = netdev_priv(dev);
struct ft1000_pcmcia *pcmcia = info->priv;
@@ -1770,8 +1726,6 @@ static int ft1000_copy_down_pkt(struct net_device *dev, u16 * packet, u16 len)
int i;
u32 *plong;
- DEBUG(1, "ft1000_hw: copy_down_pkt()\n");
-
/* Check if there is room on the FIFO */
if (len > ft1000_read_fifo_len(dev)) {
udelay(10);
@@ -1791,8 +1745,7 @@ static int ft1000_copy_down_pkt(struct net_device *dev, u16 * packet, u16 len)
udelay(20);
}
if (len > ft1000_read_fifo_len(dev)) {
- DEBUG(1,
- "ft1000_hw:ft1000_copy_down_pkt:Transmit FIFO is fulli - pkt drop\n");
+ pr_debug("Transmit FIFO is full - pkt drop\n");
info->stats.tx_errors++;
return SUCCESS;
}
@@ -1823,39 +1776,30 @@ static int ft1000_copy_down_pkt(struct net_device *dev, u16 * packet, u16 len)
if (info->AsicID == ELECTRABUZZ_ID) {
/* copy first word to UFIFO_BEG reg */
ft1000_write_reg(dev, FT1000_REG_UFIFO_BEG, pseudo.buff[0]);
- DEBUG(1, "ft1000_hw:ft1000_copy_down_pkt:data 0 BEG = 0x%04x\n",
- pseudo.buff[0]);
+ pr_debug("data 0 BEG = 0x%04x\n", pseudo.buff[0]);
/* copy subsequent words to UFIFO_MID reg */
ft1000_write_reg(dev, FT1000_REG_UFIFO_MID, pseudo.buff[1]);
- DEBUG(1, "ft1000_hw:ft1000_copy_down_pkt:data 1 MID = 0x%04x\n",
- pseudo.buff[1]);
+ pr_debug("data 1 MID = 0x%04x\n", pseudo.buff[1]);
ft1000_write_reg(dev, FT1000_REG_UFIFO_MID, pseudo.buff[2]);
- DEBUG(1, "ft1000_hw:ft1000_copy_down_pkt:data 2 MID = 0x%04x\n",
- pseudo.buff[2]);
+ pr_debug("data 2 MID = 0x%04x\n", pseudo.buff[2]);
ft1000_write_reg(dev, FT1000_REG_UFIFO_MID, pseudo.buff[3]);
- DEBUG(1, "ft1000_hw:ft1000_copy_down_pkt:data 3 MID = 0x%04x\n",
- pseudo.buff[3]);
+ pr_debug("data 3 MID = 0x%04x\n", pseudo.buff[3]);
ft1000_write_reg(dev, FT1000_REG_UFIFO_MID, pseudo.buff[4]);
- DEBUG(1, "ft1000_hw:ft1000_copy_down_pkt:data 4 MID = 0x%04x\n",
- pseudo.buff[4]);
+ pr_debug("data 4 MID = 0x%04x\n", pseudo.buff[4]);
ft1000_write_reg(dev, FT1000_REG_UFIFO_MID, pseudo.buff[5]);
- DEBUG(1, "ft1000_hw:ft1000_copy_down_pkt:data 5 MID = 0x%04x\n",
- pseudo.buff[5]);
+ pr_debug("data 5 MID = 0x%04x\n", pseudo.buff[5]);
ft1000_write_reg(dev, FT1000_REG_UFIFO_MID, pseudo.buff[6]);
- DEBUG(1, "ft1000_hw:ft1000_copy_down_pkt:data 6 MID = 0x%04x\n",
- pseudo.buff[6]);
+ pr_debug("data 6 MID = 0x%04x\n", pseudo.buff[6]);
ft1000_write_reg(dev, FT1000_REG_UFIFO_MID, pseudo.buff[7]);
- DEBUG(1, "ft1000_hw:ft1000_copy_down_pkt:data 7 MID = 0x%04x\n",
- pseudo.buff[7]);
+ pr_debug("data 7 MID = 0x%04x\n", pseudo.buff[7]);
/* Write PPP type + IP Packet into Downlink FIFO */
for (i = 0; i < (len >> 1) - 1; i++) {
ft1000_write_reg(dev, FT1000_REG_UFIFO_MID,
htons(*packet));
- DEBUG(1,
- "ft1000_hw:ft1000_copy_down_pkt:data %d MID = 0x%04x\n",
- i + 8, htons(*packet));
+ pr_debug("data %d MID = 0x%04x\n",
+ i + 8, htons(*packet));
packet++;
}
@@ -1863,41 +1807,33 @@ static int ft1000_copy_down_pkt(struct net_device *dev, u16 * packet, u16 len)
if (len & 0x0001) {
ft1000_write_reg(dev, FT1000_REG_UFIFO_MID,
htons(*packet));
- DEBUG(1,
- "ft1000_hw:ft1000_copy_down_pkt:data MID = 0x%04x\n",
- htons(*packet));
+ pr_debug("data MID = 0x%04x\n", htons(*packet));
packet++;
ft1000_write_reg(dev, FT1000_REG_UFIFO_END,
htons(*packet));
- DEBUG(1,
- "ft1000_hw:ft1000_copy_down_pkt:data %d MID = 0x%04x\n",
- i + 8, htons(*packet));
+ pr_debug("data %d MID = 0x%04x\n",
+ i + 8, htons(*packet));
} else {
ft1000_write_reg(dev, FT1000_REG_UFIFO_END,
htons(*packet));
- DEBUG(1,
- "ft1000_hw:ft1000_copy_down_pkt:data %d MID = 0x%04x\n",
- i + 8, htons(*packet));
+ pr_debug("data %d MID = 0x%04x\n",
+ i + 8, htons(*packet));
}
} else {
- outl(*(u32 *) & pseudo.buff[0],
- dev->base_addr + FT1000_REG_MAG_UFDR);
- DEBUG(1, "ft1000_copy_down_pkt: Pseudo = 0x%8x\n",
- *(u32 *) & pseudo.buff[0]);
- outl(*(u32 *) & pseudo.buff[2],
- dev->base_addr + FT1000_REG_MAG_UFDR);
- DEBUG(1, "ft1000_copy_down_pkt: Pseudo = 0x%8x\n",
- *(u32 *) & pseudo.buff[2]);
- outl(*(u32 *) & pseudo.buff[4],
- dev->base_addr + FT1000_REG_MAG_UFDR);
- DEBUG(1, "ft1000_copy_down_pkt: Pseudo = 0x%8x\n",
- *(u32 *) & pseudo.buff[4]);
- outl(*(u32 *) & pseudo.buff[6],
- dev->base_addr + FT1000_REG_MAG_UFDR);
- DEBUG(1, "ft1000_copy_down_pkt: Pseudo = 0x%8x\n",
- *(u32 *) & pseudo.buff[6]);
-
- plong = (u32 *) packet;
+ outl(*(u32 *)&pseudo.buff[0],
+ dev->base_addr + FT1000_REG_MAG_UFDR);
+ pr_debug("Pseudo = 0x%8x\n", *(u32 *)&pseudo.buff[0]);
+ outl(*(u32 *)&pseudo.buff[2],
+ dev->base_addr + FT1000_REG_MAG_UFDR);
+ pr_debug("Pseudo = 0x%8x\n", *(u32 *)&pseudo.buff[2]);
+ outl(*(u32 *)&pseudo.buff[4],
+ dev->base_addr + FT1000_REG_MAG_UFDR);
+ pr_debug("Pseudo = 0x%8x\n", *(u32 *)&pseudo.buff[4]);
+ outl(*(u32 *)&pseudo.buff[6],
+ dev->base_addr + FT1000_REG_MAG_UFDR);
+ pr_debug("Pseudo = 0x%8x\n", *(u32 *)&pseudo.buff[6]);
+
+ plong = (u32 *)packet;
/* Write PPP type + IP Packet into Downlink FIFO */
for (i = 0; i < (len >> 2); i++) {
outl(*plong++, dev->base_addr + FT1000_REG_MAG_UFDR);
@@ -1905,9 +1841,7 @@ static int ft1000_copy_down_pkt(struct net_device *dev, u16 * packet, u16 len)
/* Check for odd alignment */
if (len & 0x0003) {
- DEBUG(1,
- "ft1000_hw:ft1000_copy_down_pkt:data = 0x%8x\n",
- *plong);
+ pr_debug("data = 0x%8x\n", *plong);
outl(*plong++, dev->base_addr + FT1000_REG_MAG_UFDR);
}
outl(1, dev->base_addr + FT1000_REG_MAG_UFER);
@@ -1928,19 +1862,14 @@ static struct net_device_stats *ft1000_stats(struct net_device *dev)
static int ft1000_open(struct net_device *dev)
{
-
- DEBUG(0, "ft1000_hw: ft1000_open is called\n");
-
ft1000_reset_card(dev);
- DEBUG(0, "ft1000_hw: ft1000_open is ended\n");
/* schedule ft1000_hbchk to perform periodic heartbeat checks on DSP and ASIC */
init_timer(&poll_timer);
poll_timer.expires = jiffies + (2 * HZ);
- poll_timer.data = (u_long) dev;
+ poll_timer.data = (u_long)dev;
add_timer(&poll_timer);
- DEBUG(0, "ft1000_hw: ft1000_open is ended2\n");
return 0;
}
@@ -1948,13 +1877,11 @@ static int ft1000_close(struct net_device *dev)
{
struct ft1000_info *info = netdev_priv(dev);
- DEBUG(0, "ft1000_hw: ft1000_close()\n");
-
info->CardReady = 0;
del_timer(&poll_timer);
if (ft1000_card_present == 1) {
- DEBUG(0, "Media is down\n");
+ pr_debug("Media is down\n");
netif_stop_queue(dev);
ft1000_disable_interrupts(dev);
@@ -1971,31 +1898,28 @@ static int ft1000_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct ft1000_info *info = netdev_priv(dev);
u8 *pdata;
- DEBUG(1, "ft1000_hw: ft1000_start_xmit()\n");
if (skb == NULL) {
- DEBUG(1, "ft1000_hw: ft1000_start_xmit:skb == NULL!!!\n");
+ pr_debug("skb == NULL!!!\n");
return 0;
}
- DEBUG(1, "ft1000_hw: ft1000_start_xmit:length of packet = %d\n",
- skb->len);
+ pr_debug("length of packet = %d\n", skb->len);
- pdata = (u8 *) skb->data;
+ pdata = (u8 *)skb->data;
if (info->mediastate == 0) {
/* Drop packet is mediastate is down */
- DEBUG(1, "ft1000_hw:ft1000_copy_down_pkt:mediastate is down\n");
+ pr_debug("mediastate is down\n");
return SUCCESS;
}
if ((skb->len < ENET_HEADER_SIZE) || (skb->len > ENET_MAX_SIZE)) {
/* Drop packet which has invalid size */
- DEBUG(1,
- "ft1000_hw:ft1000_copy_down_pkt:invalid ethernet length\n");
+ pr_debug("invalid ethernet length\n");
return SUCCESS;
}
ft1000_copy_down_pkt(dev, (u16 *) (pdata + ENET_HEADER_SIZE - 2),
- skb->len - ENET_HEADER_SIZE + 2);
+ skb->len - ENET_HEADER_SIZE + 2);
dev_kfree_skb(skb);
@@ -2004,14 +1928,12 @@ static int ft1000_start_xmit(struct sk_buff *skb, struct net_device *dev)
static irqreturn_t ft1000_interrupt(int irq, void *dev_id)
{
- struct net_device *dev = (struct net_device *)dev_id;
+ struct net_device *dev = dev_id;
struct ft1000_info *info = netdev_priv(dev);
u16 tempword;
u16 inttype;
int cnt;
- DEBUG(1, "ft1000_hw: ft1000_interrupt()\n");
-
if (info->CardReady == 0) {
ft1000_disable_interrupts(dev);
return IRQ_HANDLED;
@@ -2033,19 +1955,19 @@ static irqreturn_t ft1000_interrupt(int irq, void *dev_id)
ft1000_parse_dpram_msg(dev);
if (inttype & ISR_RCV) {
- DEBUG(1, "Data in FIFO\n");
+ pr_debug("Data in FIFO\n");
cnt = 0;
do {
/* Check if we have packets in the Downlink FIFO */
if (info->AsicID == ELECTRABUZZ_ID) {
tempword =
- ft1000_read_reg(dev,
- FT1000_REG_DFIFO_STAT);
+ ft1000_read_reg(dev,
+ FT1000_REG_DFIFO_STAT);
} else {
tempword =
- ft1000_read_reg(dev,
- FT1000_REG_MAG_DFSR);
+ ft1000_read_reg(dev,
+ FT1000_REG_MAG_DFSR);
}
if (tempword & 0x1f) {
ft1000_copy_up_pkt(dev);
@@ -2058,12 +1980,13 @@ static irqreturn_t ft1000_interrupt(int irq, void *dev_id)
}
/* clear interrupts */
tempword = ft1000_read_reg(dev, FT1000_REG_SUP_ISR);
- DEBUG(1, "ft1000_hw: interrupt status register = 0x%x\n", tempword);
+ pr_debug("interrupt status register = 0x%x\n", tempword);
ft1000_write_reg(dev, FT1000_REG_SUP_ISR, tempword);
/* Read interrupt type */
- inttype = ft1000_read_reg (dev, FT1000_REG_SUP_ISR);
- DEBUG(1, "ft1000_hw: interrupt status register after clear = 0x%x\n", inttype);
+ inttype = ft1000_read_reg(dev, FT1000_REG_SUP_ISR);
+ pr_debug("interrupt status register after clear = 0x%x\n",
+ inttype);
}
ft1000_enable_interrupts(dev);
return IRQ_HANDLED;
@@ -2075,8 +1998,6 @@ void stop_ft1000_card(struct net_device *dev)
struct prov_record *ptr;
/* int cnt; */
- DEBUG(0, "ft1000_hw: stop_ft1000_card()\n");
-
info->CardReady = 0;
ft1000_card_present = 0;
netif_stop_queue(dev);
@@ -2105,7 +2026,7 @@ void stop_ft1000_card(struct net_device *dev)
}
static void ft1000_get_drvinfo(struct net_device *dev,
- struct ethtool_drvinfo *info)
+ struct ethtool_drvinfo *info)
{
struct ft1000_info *ft_info;
ft_info = netdev_priv(dev);
@@ -2121,6 +2042,7 @@ static void ft1000_get_drvinfo(struct net_device *dev,
static u32 ft1000_get_link(struct net_device *dev)
{
struct ft1000_info *info;
+
info = netdev_priv(dev);
return info->mediastate;
}
@@ -2131,37 +2053,36 @@ static const struct ethtool_ops ops = {
};
struct net_device *init_ft1000_card(struct pcmcia_device *link,
- void *ft1000_reset)
+ void *ft1000_reset)
{
struct ft1000_info *info;
struct ft1000_pcmcia *pcmcia;
struct net_device *dev;
static const struct net_device_ops ft1000ops = /* Slavius 21.10.2009 due to kernel changes */
- {
- .ndo_open = &ft1000_open,
- .ndo_stop = &ft1000_close,
- .ndo_start_xmit = &ft1000_start_xmit,
- .ndo_get_stats = &ft1000_stats,
- };
+ {
+ .ndo_open = &ft1000_open,
+ .ndo_stop = &ft1000_close,
+ .ndo_start_xmit = &ft1000_start_xmit,
+ .ndo_get_stats = &ft1000_stats,
+ };
- DEBUG(1, "ft1000_hw: init_ft1000_card()\n");
- DEBUG(1, "ft1000_hw: irq = %d\n", link->irq);
- DEBUG(1, "ft1000_hw: port = 0x%04x\n", link->resource[0]->start);
+ pr_debug("irq = %d, port = 0x%04llx\n",
+ link->irq, (unsigned long long)link->resource[0]->start);
flarion_ft1000_cnt++;
if (flarion_ft1000_cnt > 1) {
flarion_ft1000_cnt--;
- printk(KERN_INFO
- "ft1000: This driver can not support more than one instance\n");
+ dev_info(&link->dev,
+ "This driver can not support more than one instance\n");
return NULL;
}
dev = alloc_etherdev(sizeof(struct ft1000_info));
if (!dev) {
- printk(KERN_ERR "ft1000: failed to allocate etherdev\n");
+ dev_err(&link->dev, "Failed to allocate etherdev\n");
return NULL;
}
@@ -2170,9 +2091,9 @@ struct net_device *init_ft1000_card(struct pcmcia_device *link,
memset(info, 0, sizeof(struct ft1000_info));
- DEBUG(1, "address of dev = 0x%8x\n", (u32) dev);
- DEBUG(1, "address of dev info = 0x%8x\n", (u32) info);
- DEBUG(0, "device name = %s\n", dev->name);
+ pr_debug("address of dev = 0x%p\n", dev);
+ pr_debug("address of dev info = 0x%p\n", info);
+ pr_debug("device name = %s\n", dev->name);
memset(&info->stats, 0, sizeof(struct net_device_stats));
@@ -2204,41 +2125,41 @@ struct net_device *init_ft1000_card(struct pcmcia_device *link,
dev->netdev_ops = &ft1000ops; /* Slavius 21.10.2009 due to kernel changes */
- DEBUG(0, "device name = %s\n", dev->name);
+ pr_debug("device name = %s\n", dev->name);
dev->irq = link->irq;
dev->base_addr = link->resource[0]->start;
if (pcmcia_get_mac_from_cis(link, dev)) {
- printk(KERN_ERR "ft1000: Could not read mac address\n");
+ netdev_err(dev, "Could not read mac address\n");
goto err_dev;
}
if (request_irq(dev->irq, ft1000_interrupt, IRQF_SHARED, dev->name, dev)) {
- printk(KERN_ERR "ft1000: Could not request_irq\n");
+ netdev_err(dev, "Could not request_irq\n");
goto err_dev;
}
if (request_region(dev->base_addr, 256, dev->name) == NULL) {
- printk(KERN_ERR "ft1000: Could not request_region\n");
+ netdev_err(dev, "Could not request_region\n");
goto err_irq;
}
if (register_netdev(dev) != 0) {
- DEBUG(0, "ft1000: Could not register netdev");
+ pr_debug("Could not register netdev\n");
goto err_reg;
}
info->AsicID = ft1000_read_reg(dev, FT1000_REG_ASIC_ID);
if (info->AsicID == ELECTRABUZZ_ID) {
- DEBUG(0, "ft1000_hw: ELECTRABUZZ ASIC\n");
+ pr_debug("ELECTRABUZZ ASIC\n");
if (request_firmware(&fw_entry, "ft1000.img", &link->dev) != 0) {
- printk(KERN_INFO "ft1000: Could not open ft1000.img\n");
+ pr_info("Could not open ft1000.img\n");
goto err_unreg;
}
} else {
- DEBUG(0, "ft1000_hw: MAGNEMITE ASIC\n");
+ pr_debug("MAGNEMITE ASIC\n");
if (request_firmware(&fw_entry, "ft2000.img", &link->dev) != 0) {
- printk(KERN_INFO "ft1000: Could not open ft2000.img\n");
+ pr_info("Could not open ft2000.img\n");
goto err_unreg;
}
}
@@ -2247,8 +2168,8 @@ struct net_device *init_ft1000_card(struct pcmcia_device *link,
ft1000_card_present = 1;
dev->ethtool_ops = &ops;
- printk(KERN_INFO "ft1000: %s: addr 0x%04lx irq %d, MAC addr %pM\n",
- dev->name, dev->base_addr, dev->irq, dev->dev_addr);
+ pr_info("%s: addr 0x%04lx irq %d, MAC addr %pM\n",
+ dev->name, dev->base_addr, dev->irq, dev->dev_addr);
return dev;
err_unreg:
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_debug.c b/drivers/staging/ft1000/ft1000-usb/ft1000_debug.c
index 0f347ab0d30..c8d27822994 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_debug.c
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_debug.c
@@ -1,31 +1,34 @@
/*
-*---------------------------------------------------------------------------
-* FT1000 driver for Flarion Flash OFDM NIC Device
-*
-* Copyright (C) 2006 Flarion Technologies, All rights reserved.
-*
-* This program is free software; you can redistribute it and/or modify it
-* under the terms of the GNU General Public License as published by the Free
-* Software Foundation; either version 2 of the License, or (at your option) any
-* later version. This program is distributed in the hope that it will be useful,
-* but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
-* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-* more details. You should have received a copy of the GNU General Public
-* License along with this program; if not, write to the
-* Free Software Foundation, Inc., 59 Temple Place -
-* Suite 330, Boston, MA 02111-1307, USA.
-*---------------------------------------------------------------------------
-*
-* File: ft1000_chdev.c
-*
-* Description: Custom character device dispatch routines.
-*
-* History:
-* 8/29/02 Whc Ported to Linux.
-* 6/05/06 Whc Porting to Linux 2.6.9
-*
-*---------------------------------------------------------------------------
-*/
+ *---------------------------------------------------------------------------
+ * FT1000 driver for Flarion Flash OFDM NIC Device
+ *
+ * Copyright (C) 2006 Flarion Technologies, All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version. This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details. You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place -
+ * Suite 330, Boston, MA 02111-1307, USA.
+ *---------------------------------------------------------------------------
+ *
+ * File: ft1000_chdev.c
+ *
+ * Description: Custom character device dispatch routines.
+ *
+ * History:
+ * 8/29/02 Whc Ported to Linux.
+ * 6/05/06 Whc Porting to Linux 2.6.9
+ *
+ *---------------------------------------------------------------------------
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
@@ -38,12 +41,12 @@
#include <linux/debugfs.h>
#include "ft1000_usb.h"
-static int ft1000_flarion_cnt = 0;
+static int ft1000_flarion_cnt;
static int ft1000_open(struct inode *inode, struct file *file);
static unsigned int ft1000_poll_dev(struct file *file, poll_table *wait);
static long ft1000_ioctl(struct file *file, unsigned int command,
- unsigned long argument);
+ unsigned long argument);
static int ft1000_release(struct inode *inode, struct file *file);
/* List to free receive command buffer pool */
@@ -55,8 +58,8 @@ spinlock_t free_buff_lock;
int numofmsgbuf = 0;
/*
-* Table of entry-point routines for char device
-*/
+ * Table of entry-point routines for char device
+ */
static const struct file_operations ft1000fops = {
.unlocked_ioctl = ft1000_ioctl,
.poll = ft1000_poll_dev,
@@ -66,104 +69,104 @@ static const struct file_operations ft1000fops = {
};
/*
----------------------------------------------------------------------------
-* Function: ft1000_get_buffer
-*
-* Parameters:
-*
-* Returns:
-*
-* Description:
-*
-* Notes:
-*
-*---------------------------------------------------------------------------
-*/
+ ---------------------------------------------------------------------------
+ * Function: ft1000_get_buffer
+ *
+ * Parameters:
+ *
+ * Returns:
+ *
+ * Description:
+ *
+ * Notes:
+ *
+ *---------------------------------------------------------------------------
+ */
struct dpram_blk *ft1000_get_buffer(struct list_head *bufflist)
{
- unsigned long flags;
+ unsigned long flags;
struct dpram_blk *ptr;
- spin_lock_irqsave(&free_buff_lock, flags);
- /* Check if buffer is available */
- if (list_empty(bufflist)) {
- DEBUG("ft1000_get_buffer: No more buffer - %d\n", numofmsgbuf);
- ptr = NULL;
- } else {
- numofmsgbuf--;
- ptr = list_entry(bufflist->next, struct dpram_blk, list);
- list_del(&ptr->list);
- /* DEBUG("ft1000_get_buffer: number of free msg buffers = %d\n", numofmsgbuf); */
- }
- spin_unlock_irqrestore(&free_buff_lock, flags);
-
- return ptr;
+ spin_lock_irqsave(&free_buff_lock, flags);
+ /* Check if buffer is available */
+ if (list_empty(bufflist)) {
+ pr_debug("No more buffer - %d\n", numofmsgbuf);
+ ptr = NULL;
+ } else {
+ numofmsgbuf--;
+ ptr = list_entry(bufflist->next, struct dpram_blk, list);
+ list_del(&ptr->list);
+ /* pr_debug("number of free msg buffers = %d\n", numofmsgbuf); */
+ }
+ spin_unlock_irqrestore(&free_buff_lock, flags);
+
+ return ptr;
}
/*
-*---------------------------------------------------------------------------
-* Function: ft1000_free_buffer
-*
-* Parameters:
-*
-* Returns:
-*
-* Description:
-*
-* Notes:
-*
-*---------------------------------------------------------------------------
-*/
+ *---------------------------------------------------------------------------
+ * Function: ft1000_free_buffer
+ *
+ * Parameters:
+ *
+ * Returns:
+ *
+ * Description:
+ *
+ * Notes:
+ *
+ *---------------------------------------------------------------------------
+ */
void ft1000_free_buffer(struct dpram_blk *pdpram_blk, struct list_head *plist)
{
- unsigned long flags;
-
- spin_lock_irqsave(&free_buff_lock, flags);
- /* Put memory back to list */
- list_add_tail(&pdpram_blk->list, plist);
- numofmsgbuf++;
- /*DEBUG("ft1000_free_buffer: number of free msg buffers = %d\n", numofmsgbuf); */
- spin_unlock_irqrestore(&free_buff_lock, flags);
+ unsigned long flags;
+
+ spin_lock_irqsave(&free_buff_lock, flags);
+ /* Put memory back to list */
+ list_add_tail(&pdpram_blk->list, plist);
+ numofmsgbuf++;
+ /*pr_debug("number of free msg buffers = %d\n", numofmsgbuf); */
+ spin_unlock_irqrestore(&free_buff_lock, flags);
}
/*
-*---------------------------------------------------------------------------
-* Function: ft1000_CreateDevice
-*
-* Parameters: dev - pointer to adapter object
-*
-* Returns: 0 if successful
-*
-* Description: Creates a private char device.
-*
-* Notes: Only called by init_module().
-*
-*---------------------------------------------------------------------------
-*/
+ *---------------------------------------------------------------------------
+ * Function: ft1000_CreateDevice
+ *
+ * Parameters: dev - pointer to adapter object
+ *
+ * Returns: 0 if successful
+ *
+ * Description: Creates a private char device.
+ *
+ * Notes: Only called by init_module().
+ *
+ *---------------------------------------------------------------------------
+ */
int ft1000_create_dev(struct ft1000_usb *dev)
{
- int result;
- int i;
+ int result;
+ int i;
struct dentry *dir, *file;
struct ft1000_debug_dirs *tmp;
- /* make a new device name */
- sprintf(dev->DeviceName, "%s%d", "FT1000_", dev->CardNumber);
+ /* make a new device name */
+ sprintf(dev->DeviceName, "%s%d", "FT1000_", dev->CardNumber);
- DEBUG("%s: number of instance = %d\n", __func__, ft1000_flarion_cnt);
- DEBUG("DeviceCreated = %x\n", dev->DeviceCreated);
+ pr_debug("number of instance = %d\n", ft1000_flarion_cnt);
+ pr_debug("DeviceCreated = %x\n", dev->DeviceCreated);
- if (dev->DeviceCreated) {
- DEBUG("%s: \"%s\" already registered\n", __func__, dev->DeviceName);
- return -EIO;
- }
+ if (dev->DeviceCreated) {
+ pr_debug("\"%s\" already registered\n", dev->DeviceName);
+ return -EIO;
+ }
- /* register the device */
- DEBUG("%s: \"%s\" debugfs device registration\n", __func__, dev->DeviceName);
+ /* register the device */
+ pr_debug("\"%s\" debugfs device registration\n", dev->DeviceName);
tmp = kmalloc(sizeof(struct ft1000_debug_dirs), GFP_KERNEL);
if (tmp == NULL) {
@@ -178,7 +181,7 @@ int ft1000_create_dev(struct ft1000_usb *dev)
}
file = debugfs_create_file("device", S_IRUGO | S_IWUSR, dir,
- dev, &ft1000fops);
+ dev, &ft1000fops);
if (IS_ERR(file)) {
result = PTR_ERR(file);
goto debug_file_fail;
@@ -189,25 +192,25 @@ int ft1000_create_dev(struct ft1000_usb *dev)
tmp->int_number = dev->CardNumber;
list_add(&(tmp->list), &(dev->nodes.list));
- DEBUG("%s: registered debugfs directory \"%s\"\n", __func__, dev->DeviceName);
-
- /* initialize application information */
- dev->appcnt = 0;
- for (i=0; i<MAX_NUM_APP; i++) {
- dev->app_info[i].nTxMsg = 0;
- dev->app_info[i].nRxMsg = 0;
- dev->app_info[i].nTxMsgReject = 0;
- dev->app_info[i].nRxMsgMiss = 0;
- dev->app_info[i].fileobject = NULL;
- dev->app_info[i].app_id = i+1;
- dev->app_info[i].DspBCMsgFlag = 0;
- dev->app_info[i].NumOfMsg = 0;
- init_waitqueue_head(&dev->app_info[i].wait_dpram_msg);
- INIT_LIST_HEAD(&dev->app_info[i].app_sqlist);
- }
-
- dev->DeviceCreated = TRUE;
- ft1000_flarion_cnt++;
+ pr_debug("registered debugfs directory \"%s\"\n", dev->DeviceName);
+
+ /* initialize application information */
+ dev->appcnt = 0;
+ for (i = 0; i < MAX_NUM_APP; i++) {
+ dev->app_info[i].nTxMsg = 0;
+ dev->app_info[i].nRxMsg = 0;
+ dev->app_info[i].nTxMsgReject = 0;
+ dev->app_info[i].nRxMsgMiss = 0;
+ dev->app_info[i].fileobject = NULL;
+ dev->app_info[i].app_id = i+1;
+ dev->app_info[i].DspBCMsgFlag = 0;
+ dev->app_info[i].NumOfMsg = 0;
+ init_waitqueue_head(&dev->app_info[i].wait_dpram_msg);
+ INIT_LIST_HEAD(&dev->app_info[i].app_sqlist);
+ }
+
+ dev->DeviceCreated = TRUE;
+ ft1000_flarion_cnt++;
return 0;
@@ -220,33 +223,29 @@ fail:
}
/*
-*---------------------------------------------------------------------------
-* Function: ft1000_DestroyDeviceDEBUG
-*
-* Parameters: dev - pointer to adapter object
-*
-* Description: Destroys a private char device.
-*
-* Notes: Only called by cleanup_module().
-*
-*---------------------------------------------------------------------------
-*/
+ *---------------------------------------------------------------------------
+ * Function: ft1000_DestroyDeviceDEBUG
+ *
+ * Parameters: dev - pointer to adapter object
+ *
+ * Description: Destroys a private char device.
+ *
+ * Notes: Only called by cleanup_module().
+ *
+ *---------------------------------------------------------------------------
+ */
void ft1000_destroy_dev(struct net_device *netdev)
{
struct ft1000_info *info = netdev_priv(netdev);
struct ft1000_usb *dev = info->priv;
- int i;
+ int i;
struct dpram_blk *pdpram_blk;
struct dpram_blk *ptr;
struct list_head *pos, *q;
struct ft1000_debug_dirs *dir;
- DEBUG("%s called\n", __func__);
-
-
-
- if (dev->DeviceCreated) {
- ft1000_flarion_cnt--;
+ if (dev->DeviceCreated) {
+ ft1000_flarion_cnt--;
list_for_each_safe(pos, q, &dev->nodes.list) {
dir = list_entry(pos, struct ft1000_debug_dirs, list);
if (dir->int_number == dev->CardNumber) {
@@ -256,29 +255,28 @@ void ft1000_destroy_dev(struct net_device *netdev)
kfree(dir);
}
}
- DEBUG("%s: unregistered device \"%s\"\n", __func__,
- dev->DeviceName);
-
- /* Make sure we free any memory reserve for slow Queue */
- for (i=0; i<MAX_NUM_APP; i++) {
- while (list_empty(&dev->app_info[i].app_sqlist) == 0) {
- pdpram_blk = list_entry(dev->app_info[i].app_sqlist.next, struct dpram_blk, list);
- list_del(&pdpram_blk->list);
- ft1000_free_buffer(pdpram_blk, &freercvpool);
-
- }
- wake_up_interruptible(&dev->app_info[i].wait_dpram_msg);
- }
-
- /* Remove buffer allocated for receive command data */
- if (ft1000_flarion_cnt == 0) {
- while (list_empty(&freercvpool) == 0) {
- ptr = list_entry(freercvpool.next, struct dpram_blk, list);
- list_del(&ptr->list);
- kfree(ptr->pbuffer);
- kfree(ptr);
- }
- }
+ pr_debug("unregistered device \"%s\"\n", dev->DeviceName);
+
+ /* Make sure we free any memory reserve for slow Queue */
+ for (i = 0; i < MAX_NUM_APP; i++) {
+ while (list_empty(&dev->app_info[i].app_sqlist) == 0) {
+ pdpram_blk = list_entry(dev->app_info[i].app_sqlist.next, struct dpram_blk, list);
+ list_del(&pdpram_blk->list);
+ ft1000_free_buffer(pdpram_blk, &freercvpool);
+
+ }
+ wake_up_interruptible(&dev->app_info[i].wait_dpram_msg);
+ }
+
+ /* Remove buffer allocated for receive command data */
+ if (ft1000_flarion_cnt == 0) {
+ while (list_empty(&freercvpool) == 0) {
+ ptr = list_entry(freercvpool.next, struct dpram_blk, list);
+ list_del(&ptr->list);
+ kfree(ptr->pbuffer);
+ kfree(ptr);
+ }
+ }
dev->DeviceCreated = FALSE;
}
@@ -286,507 +284,499 @@ void ft1000_destroy_dev(struct net_device *netdev)
}
/*
-*---------------------------------------------------------------------------
-* Function: ft1000_open
-*
-* Parameters:
-*
-* Description:
-*
-* Notes:
-*
-*---------------------------------------------------------------------------
-*/
+ *---------------------------------------------------------------------------
+ * Function: ft1000_open
+ *
+ * Parameters:
+ *
+ * Description:
+ *
+ * Notes:
+ *
+ *---------------------------------------------------------------------------
+ */
static int ft1000_open(struct inode *inode, struct file *file)
{
struct ft1000_info *info;
struct ft1000_usb *dev = (struct ft1000_usb *)inode->i_private;
- int i,num;
+ int i, num;
- DEBUG("%s called\n", __func__);
- num = (MINOR(inode->i_rdev) & 0xf);
- DEBUG("ft1000_open: minor number=%d\n", num);
+ num = (MINOR(inode->i_rdev) & 0xf);
+ pr_debug("minor number=%d\n", num);
info = file->private_data = netdev_priv(dev->net);
- DEBUG("f_owner = %p number of application = %d\n", (&file->f_owner), dev->appcnt);
-
- /* Check if maximum number of application exceeded */
- if (dev->appcnt > MAX_NUM_APP) {
- DEBUG("Maximum number of application exceeded\n");
- return -EACCES;
- }
-
- /* Search for available application info block */
- for (i=0; i<MAX_NUM_APP; i++) {
- if ((dev->app_info[i].fileobject == NULL)) {
- break;
- }
- }
-
- /* Fail due to lack of application info block */
- if (i == MAX_NUM_APP) {
- DEBUG("Could not find an application info block\n");
- return -EACCES;
- }
-
- dev->appcnt++;
- dev->app_info[i].fileobject = &file->f_owner;
- dev->app_info[i].nTxMsg = 0;
- dev->app_info[i].nRxMsg = 0;
- dev->app_info[i].nTxMsgReject = 0;
- dev->app_info[i].nRxMsgMiss = 0;
+ pr_debug("f_owner = %p number of application = %d\n",
+ &file->f_owner, dev->appcnt);
+
+ /* Check if maximum number of application exceeded */
+ if (dev->appcnt > MAX_NUM_APP) {
+ pr_debug("Maximum number of application exceeded\n");
+ return -EACCES;
+ }
+
+ /* Search for available application info block */
+ for (i = 0; i < MAX_NUM_APP; i++) {
+ if ((dev->app_info[i].fileobject == NULL)) {
+ break;
+ }
+ }
+
+ /* Fail due to lack of application info block */
+ if (i == MAX_NUM_APP) {
+ pr_debug("Could not find an application info block\n");
+ return -EACCES;
+ }
+
+ dev->appcnt++;
+ dev->app_info[i].fileobject = &file->f_owner;
+ dev->app_info[i].nTxMsg = 0;
+ dev->app_info[i].nRxMsg = 0;
+ dev->app_info[i].nTxMsgReject = 0;
+ dev->app_info[i].nRxMsgMiss = 0;
nonseekable_open(inode, file);
- return 0;
+ return 0;
}
/*
-*---------------------------------------------------------------------------
-* Function: ft1000_poll_dev
-*
-* Parameters:
-*
-* Description:
-*
-* Notes:
-*
-*---------------------------------------------------------------------------
-*/
+ *---------------------------------------------------------------------------
+ * Function: ft1000_poll_dev
+ *
+ * Parameters:
+ *
+ * Description:
+ *
+ * Notes:
+ *
+ *---------------------------------------------------------------------------
+ */
static unsigned int ft1000_poll_dev(struct file *file, poll_table *wait)
{
- struct net_device *netdev = file->private_data;
+ struct net_device *netdev = file->private_data;
struct ft1000_info *info = netdev_priv(netdev);
struct ft1000_usb *dev = info->priv;
- int i;
-
- /* DEBUG("ft1000_poll_dev called\n"); */
- if (ft1000_flarion_cnt == 0) {
- DEBUG("FT1000:ft1000_poll_dev called when ft1000_flarion_cnt is zero\n");
- return (-EBADF);
- }
-
- /* Search for matching file object */
- for (i=0; i<MAX_NUM_APP; i++) {
- if (dev->app_info[i].fileobject == &file->f_owner) {
- /* DEBUG("FT1000:ft1000_ioctl: Message is for AppId = %d\n", dev->app_info[i].app_id); */
- break;
- }
- }
-
- /* Could not find application info block */
- if (i == MAX_NUM_APP) {
- DEBUG("FT1000:ft1000_ioctl:Could not find application info block\n");
- return (-EACCES);
- }
-
- if (list_empty(&dev->app_info[i].app_sqlist) == 0) {
- DEBUG("FT1000:ft1000_poll_dev:Message detected in slow queue\n");
- return(POLLIN | POLLRDNORM | POLLPRI);
- }
-
- poll_wait(file, &dev->app_info[i].wait_dpram_msg, wait);
- /* DEBUG("FT1000:ft1000_poll_dev:Polling for data from DSP\n"); */
+ int i;
+
+ if (ft1000_flarion_cnt == 0) {
+ pr_debug("called with ft1000_flarion_cnt value zero\n");
+ return -EBADF;
+ }
+
+ /* Search for matching file object */
+ for (i = 0; i < MAX_NUM_APP; i++) {
+ if (dev->app_info[i].fileobject == &file->f_owner) {
+ /* pr_debug("Message is for AppId = %d\n", dev->app_info[i].app_id); */
+ break;
+ }
+ }
+
+ /* Could not find application info block */
+ if (i == MAX_NUM_APP) {
+ pr_debug("Could not find application info block\n");
+ return -EACCES;
+ }
+
+ if (list_empty(&dev->app_info[i].app_sqlist) == 0) {
+ pr_debug("Message detected in slow queue\n");
+ return(POLLIN | POLLRDNORM | POLLPRI);
+ }
+
+ poll_wait(file, &dev->app_info[i].wait_dpram_msg, wait);
+ /* pr_debug("Polling for data from DSP\n"); */
return 0;
}
/*
-*---------------------------------------------------------------------------
-* Function: ft1000_ioctl
-*
-* Parameters:
-*
-* Description:
-*
-* Notes:
-*
-*---------------------------------------------------------------------------
-*/
+ *---------------------------------------------------------------------------
+ * Function: ft1000_ioctl
+ *
+ * Parameters:
+ *
+ * Description:
+ *
+ * Notes:
+ *
+ *---------------------------------------------------------------------------
+ */
static long ft1000_ioctl(struct file *file, unsigned int command,
- unsigned long argument)
+ unsigned long argument)
{
- void __user *argp = (void __user *)argument;
+ void __user *argp = (void __user *)argument;
struct ft1000_info *info;
- struct ft1000_usb *ft1000dev;
- int result=0;
- int cmd;
- int i;
- u16 tempword;
- unsigned long flags;
- struct timeval tv;
+ struct ft1000_usb *ft1000dev;
+ int result = 0;
+ int cmd;
+ int i;
+ u16 tempword;
+ unsigned long flags;
+ struct timeval tv;
struct IOCTL_GET_VER get_ver_data;
struct IOCTL_GET_DSP_STAT get_stat_data;
- u8 ConnectionMsg[] = {0x00,0x44,0x10,0x20,0x80,0x00,0x00,0x00,0x00,0x00,0x03,0x00,0x00,0x00,0x93,0x64,
- 0x00,0x00,0x02,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x05,0x00,0x00,0x00,0x0a,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x02,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x12,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x02,0x37,0x00,0x00,0x00,0x08,0x00,0x00,0x00,0x01,0x00,0x01,0x7f,0x00,
- 0x00,0x01,0x00,0x00};
+ u8 ConnectionMsg[] = {0x00, 0x44, 0x10, 0x20, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x93, 0x64,
+ 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x0a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x02, 0x37, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x01, 0x00, 0x01, 0x7f, 0x00,
+ 0x00, 0x01, 0x00, 0x00};
+
+ unsigned short ledStat = 0;
+ unsigned short conStat = 0;
+
+ if (ft1000_flarion_cnt == 0) {
+ pr_debug("called with ft1000_flarion_cnt of zero\n");
+ return -EBADF;
+ }
- unsigned short ledStat=0;
- unsigned short conStat=0;
+ /* pr_debug("command = 0x%x argument = 0x%8x\n", command, (u32)argument); */
- /* DEBUG("ft1000_ioctl called\n"); */
+ info = file->private_data;
+ ft1000dev = info->priv;
+ cmd = _IOC_NR(command);
+ /* pr_debug("cmd = 0x%x\n", cmd); */
+
+ /* process the command */
+ switch (cmd) {
+ case IOCTL_REGISTER_CMD:
+ pr_debug("IOCTL_FT1000_REGISTER called\n");
+ result = get_user(tempword, (__u16 __user *)argp);
+ if (result) {
+ pr_debug("result = %d failed to get_user\n", result);
+ break;
+ }
+ if (tempword == DSPBCMSGID) {
+ /* Search for matching file object */
+ for (i = 0; i < MAX_NUM_APP; i++) {
+ if (ft1000dev->app_info[i].fileobject == &file->f_owner) {
+ ft1000dev->app_info[i].DspBCMsgFlag = 1;
+ pr_debug("Registered for broadcast messages\n");
+ break;
+ }
+ }
+ }
+ break;
- if (ft1000_flarion_cnt == 0) {
- DEBUG("FT1000:ft1000_ioctl called when ft1000_flarion_cnt is zero\n");
- return (-EBADF);
- }
+ case IOCTL_GET_VER_CMD:
+ pr_debug("IOCTL_FT1000_GET_VER called\n");
- /* DEBUG("FT1000:ft1000_ioctl:command = 0x%x argument = 0x%8x\n", command, (u32)argument); */
+ get_ver_data.drv_ver = FT1000_DRV_VER;
- info = file->private_data;
- ft1000dev = info->priv;
- cmd = _IOC_NR(command);
- /* DEBUG("FT1000:ft1000_ioctl:cmd = 0x%x\n", cmd); */
-
- /* process the command */
- switch (cmd) {
- case IOCTL_REGISTER_CMD:
- DEBUG("FT1000:ft1000_ioctl: IOCTL_FT1000_REGISTER called\n");
- result = get_user(tempword, (__u16 __user*)argp);
- if (result) {
- DEBUG("result = %d failed to get_user\n", result);
- break;
- }
- if (tempword == DSPBCMSGID) {
- /* Search for matching file object */
- for (i=0; i<MAX_NUM_APP; i++) {
- if (ft1000dev->app_info[i].fileobject == &file->f_owner) {
- ft1000dev->app_info[i].DspBCMsgFlag = 1;
- DEBUG("FT1000:ft1000_ioctl:Registered for broadcast messages\n");
- break;
- }
- }
- }
- break;
-
- case IOCTL_GET_VER_CMD:
- DEBUG("FT1000:ft1000_ioctl: IOCTL_FT1000_GET_VER called\n");
-
- get_ver_data.drv_ver = FT1000_DRV_VER;
-
- if (copy_to_user(argp, &get_ver_data, sizeof(get_ver_data))) {
- DEBUG("FT1000:ft1000_ioctl: copy fault occurred\n");
- result = -EFAULT;
- break;
- }
-
- DEBUG("FT1000:ft1000_ioctl:driver version = 0x%x\n",(unsigned int)get_ver_data.drv_ver);
-
- break;
- case IOCTL_CONNECT:
- /* Connect Message */
- DEBUG("FT1000:ft1000_ioctl: IOCTL_FT1000_CONNECT\n");
- ConnectionMsg[79] = 0xfc;
- result = card_send_command(ft1000dev, (unsigned short *)ConnectionMsg, 0x4c);
-
- break;
- case IOCTL_DISCONNECT:
- /* Disconnect Message */
- DEBUG("FT1000:ft1000_ioctl: IOCTL_FT1000_DISCONNECT\n");
- ConnectionMsg[79] = 0xfd;
- result = card_send_command(ft1000dev, (unsigned short *)ConnectionMsg, 0x4c);
- break;
- case IOCTL_GET_DSP_STAT_CMD:
- /* DEBUG("FT1000:ft1000_ioctl: IOCTL_FT1000_GET_DSP_STAT called\n"); */
- memset(&get_stat_data, 0, sizeof(get_stat_data));
- memcpy(get_stat_data.DspVer, info->DspVer, DSPVERSZ);
- memcpy(get_stat_data.HwSerNum, info->HwSerNum, HWSERNUMSZ);
- memcpy(get_stat_data.Sku, info->Sku, SKUSZ);
- memcpy(get_stat_data.eui64, info->eui64, EUISZ);
-
- if (info->ProgConStat != 0xFF) {
- ft1000_read_dpram16(ft1000dev, FT1000_MAG_DSP_LED, (u8 *)&ledStat, FT1000_MAG_DSP_LED_INDX);
- get_stat_data.LedStat = ntohs(ledStat);
- DEBUG("FT1000:ft1000_ioctl: LedStat = 0x%x\n", get_stat_data.LedStat);
- ft1000_read_dpram16(ft1000dev, FT1000_MAG_DSP_CON_STATE, (u8 *)&conStat, FT1000_MAG_DSP_CON_STATE_INDX);
- get_stat_data.ConStat = ntohs(conStat);
- DEBUG("FT1000:ft1000_ioctl: ConStat = 0x%x\n", get_stat_data.ConStat);
- } else {
- get_stat_data.ConStat = 0x0f;
- }
-
-
- get_stat_data.nTxPkts = info->stats.tx_packets;
- get_stat_data.nRxPkts = info->stats.rx_packets;
- get_stat_data.nTxBytes = info->stats.tx_bytes;
- get_stat_data.nRxBytes = info->stats.rx_bytes;
- do_gettimeofday(&tv);
- get_stat_data.ConTm = (u32)(tv.tv_sec - info->ConTm);
- DEBUG("Connection Time = %d\n", (int)get_stat_data.ConTm);
- if (copy_to_user(argp, &get_stat_data, sizeof(get_stat_data))) {
- DEBUG("FT1000:ft1000_ioctl: copy fault occurred\n");
- result = -EFAULT;
- break;
- }
- DEBUG("ft1000_chioctl: GET_DSP_STAT succeed\n");
- break;
- case IOCTL_SET_DPRAM_CMD:
- {
+ if (copy_to_user(argp, &get_ver_data, sizeof(get_ver_data))) {
+ pr_debug("copy fault occurred\n");
+ result = -EFAULT;
+ break;
+ }
+
+ pr_debug("driver version = 0x%x\n",
+ (unsigned int)get_ver_data.drv_ver);
+
+ break;
+ case IOCTL_CONNECT:
+ /* Connect Message */
+ pr_debug("IOCTL_FT1000_CONNECT\n");
+ ConnectionMsg[79] = 0xfc;
+ result = card_send_command(ft1000dev, (unsigned short *)ConnectionMsg, 0x4c);
+
+ break;
+ case IOCTL_DISCONNECT:
+ /* Disconnect Message */
+ pr_debug("IOCTL_FT1000_DISCONNECT\n");
+ ConnectionMsg[79] = 0xfd;
+ result = card_send_command(ft1000dev, (unsigned short *)ConnectionMsg, 0x4c);
+ break;
+ case IOCTL_GET_DSP_STAT_CMD:
+ /* pr_debug("IOCTL_FT1000_GET_DSP_STAT\n"); */
+ memset(&get_stat_data, 0, sizeof(get_stat_data));
+ memcpy(get_stat_data.DspVer, info->DspVer, DSPVERSZ);
+ memcpy(get_stat_data.HwSerNum, info->HwSerNum, HWSERNUMSZ);
+ memcpy(get_stat_data.Sku, info->Sku, SKUSZ);
+ memcpy(get_stat_data.eui64, info->eui64, EUISZ);
+
+ if (info->ProgConStat != 0xFF) {
+ ft1000_read_dpram16(ft1000dev, FT1000_MAG_DSP_LED, (u8 *)&ledStat, FT1000_MAG_DSP_LED_INDX);
+ get_stat_data.LedStat = ntohs(ledStat);
+ pr_debug("LedStat = 0x%x\n", get_stat_data.LedStat);
+ ft1000_read_dpram16(ft1000dev, FT1000_MAG_DSP_CON_STATE, (u8 *)&conStat, FT1000_MAG_DSP_CON_STATE_INDX);
+ get_stat_data.ConStat = ntohs(conStat);
+ pr_debug("ConStat = 0x%x\n", get_stat_data.ConStat);
+ } else {
+ get_stat_data.ConStat = 0x0f;
+ }
+
+
+ get_stat_data.nTxPkts = info->stats.tx_packets;
+ get_stat_data.nRxPkts = info->stats.rx_packets;
+ get_stat_data.nTxBytes = info->stats.tx_bytes;
+ get_stat_data.nRxBytes = info->stats.rx_bytes;
+ do_gettimeofday(&tv);
+ get_stat_data.ConTm = (u32)(tv.tv_sec - info->ConTm);
+ pr_debug("Connection Time = %d\n", (int)get_stat_data.ConTm);
+ if (copy_to_user(argp, &get_stat_data, sizeof(get_stat_data))) {
+ pr_debug("copy fault occurred\n");
+ result = -EFAULT;
+ break;
+ }
+ pr_debug("GET_DSP_STAT succeed\n");
+ break;
+ case IOCTL_SET_DPRAM_CMD:
+ {
struct IOCTL_DPRAM_BLK *dpram_data = NULL;
/* struct IOCTL_DPRAM_COMMAND dpram_command; */
- u16 qtype;
- u16 msgsz;
+ u16 qtype;
+ u16 msgsz;
struct pseudo_hdr *ppseudo_hdr;
- u16 *pmsg;
- u16 total_len;
- u16 app_index;
- u16 status;
+ u16 *pmsg;
+ u16 total_len;
+ u16 app_index;
+ u16 status;
- /* DEBUG("FT1000:ft1000_ioctl: IOCTL_FT1000_SET_DPRAM called\n");*/
+ /* pr_debug("IOCTL_FT1000_SET_DPRAM called\n");*/
- if (ft1000_flarion_cnt == 0) {
- return (-EBADF);
- }
+ if (ft1000_flarion_cnt == 0)
+ return -EBADF;
- if (ft1000dev->DrvMsgPend) {
- return (-ENOTTY);
- }
+ if (ft1000dev->DrvMsgPend)
+ return -ENOTTY;
- if (ft1000dev->fProvComplete == 0) {
- return (-EACCES);
- }
+ if (ft1000dev->fProvComplete == 0)
+ return -EACCES;
- ft1000dev->fAppMsgPend = 1;
+ ft1000dev->fAppMsgPend = 1;
- if (info->CardReady) {
+ if (info->CardReady) {
- /* DEBUG("FT1000:ft1000_ioctl: try to SET_DPRAM \n"); */
+ /* pr_debug("try to SET_DPRAM\n"); */
- /* Get the length field to see how many bytes to copy */
- result = get_user(msgsz, (__u16 __user *)argp);
- if (result)
- break;
- msgsz = ntohs(msgsz);
- /* DEBUG("FT1000:ft1000_ioctl: length of message = %d\n", msgsz); */
-
- if (msgsz > MAX_CMD_SQSIZE) {
- DEBUG("FT1000:ft1000_ioctl: bad message length = %d\n", msgsz);
- result = -EINVAL;
- break;
- }
-
- result = -ENOMEM;
- dpram_data = kmalloc(msgsz + 2, GFP_KERNEL);
- if (!dpram_data)
- break;
+ /* Get the length field to see how many bytes to copy */
+ result = get_user(msgsz, (__u16 __user *)argp);
+ if (result)
+ break;
+ msgsz = ntohs(msgsz);
+ /* pr_debug("length of message = %d\n", msgsz); */
+
+ if (msgsz > MAX_CMD_SQSIZE) {
+ pr_debug("bad message length = %d\n", msgsz);
+ result = -EINVAL;
+ break;
+ }
- if (copy_from_user(dpram_data, argp, msgsz+2)) {
- DEBUG("FT1000:ft1000_ChIoctl: copy fault occurred\n");
- result = -EFAULT;
- } else {
- /* Check if this message came from a registered application */
- for (i=0; i<MAX_NUM_APP; i++) {
- if (ft1000dev->app_info[i].fileobject == &file->f_owner) {
- break;
- }
- }
- if (i==MAX_NUM_APP) {
- DEBUG("FT1000:No matching application fileobject\n");
- result = -EINVAL;
- kfree(dpram_data);
- break;
- }
- app_index = i;
-
- /* Check message qtype type which is the lower byte within qos_class */
- qtype = ntohs(dpram_data->pseudohdr.qos_class) & 0xff;
- /* DEBUG("FT1000_ft1000_ioctl: qtype = %d\n", qtype); */
- if (qtype) {
- } else {
- /* Put message into Slow Queue */
- /* Only put a message into the DPRAM if msg doorbell is available */
- status = ft1000_read_register(ft1000dev, &tempword, FT1000_REG_DOORBELL);
- /* DEBUG("FT1000_ft1000_ioctl: READ REGISTER tempword=%x\n", tempword); */
- if (tempword & FT1000_DB_DPRAM_TX) {
- /* Suspend for 2ms and try again due to DSP doorbell busy */
- mdelay(2);
- status = ft1000_read_register(ft1000dev, &tempword, FT1000_REG_DOORBELL);
- if (tempword & FT1000_DB_DPRAM_TX) {
- /* Suspend for 1ms and try again due to DSP doorbell busy */
- mdelay(1);
- status = ft1000_read_register(ft1000dev, &tempword, FT1000_REG_DOORBELL);
- if (tempword & FT1000_DB_DPRAM_TX) {
- status = ft1000_read_register(ft1000dev, &tempword, FT1000_REG_DOORBELL);
- if (tempword & FT1000_DB_DPRAM_TX) {
- /* Suspend for 3ms and try again due to DSP doorbell busy */
- mdelay(3);
- status = ft1000_read_register(ft1000dev, &tempword, FT1000_REG_DOORBELL);
- if (tempword & FT1000_DB_DPRAM_TX) {
- DEBUG("FT1000:ft1000_ioctl:Doorbell not available\n");
- result = -ENOTTY;
- kfree(dpram_data);
- break;
- }
- }
- }
- }
- }
-
- /*DEBUG("FT1000_ft1000_ioctl: finished reading register\n"); */
-
- /* Make sure we are within the limits of the slow queue memory limitation */
- if ((msgsz < MAX_CMD_SQSIZE) && (msgsz > PSEUDOSZ)) {
- /* Need to put sequence number plus new checksum for message */
- pmsg = (u16 *)&dpram_data->pseudohdr;
- ppseudo_hdr = (struct pseudo_hdr *)pmsg;
- total_len = msgsz+2;
- if (total_len & 0x1) {
- total_len++;
- }
-
- /* Insert slow queue sequence number */
- ppseudo_hdr->seq_num = info->squeseqnum++;
- ppseudo_hdr->portsrc = ft1000dev->app_info[app_index].app_id;
- /* Calculate new checksum */
- ppseudo_hdr->checksum = *pmsg++;
- /* DEBUG("checksum = 0x%x\n", ppseudo_hdr->checksum); */
- for (i=1; i<7; i++) {
- ppseudo_hdr->checksum ^= *pmsg++;
- /* DEBUG("checksum = 0x%x\n", ppseudo_hdr->checksum); */
- }
- pmsg++;
- ppseudo_hdr = (struct pseudo_hdr *)pmsg;
- result = card_send_command(ft1000dev,(unsigned short*)dpram_data,total_len+2);
-
-
- ft1000dev->app_info[app_index].nTxMsg++;
- } else {
- result = -EINVAL;
- }
- }
- }
- } else {
- DEBUG("FT1000:ft1000_ioctl: Card not ready take messages\n");
- result = -EACCES;
- }
- kfree(dpram_data);
-
- }
- break;
- case IOCTL_GET_DPRAM_CMD:
- {
+ result = -ENOMEM;
+ dpram_data = kmalloc(msgsz + 2, GFP_KERNEL);
+ if (!dpram_data)
+ break;
+
+ if (copy_from_user(dpram_data, argp, msgsz+2)) {
+ pr_debug("copy fault occurred\n");
+ result = -EFAULT;
+ } else {
+ /* Check if this message came from a registered application */
+ for (i = 0; i < MAX_NUM_APP; i++) {
+ if (ft1000dev->app_info[i].fileobject == &file->f_owner) {
+ break;
+ }
+ }
+ if (i == MAX_NUM_APP) {
+ pr_debug("No matching application fileobject\n");
+ result = -EINVAL;
+ kfree(dpram_data);
+ break;
+ }
+ app_index = i;
+
+ /* Check message qtype type which is the lower byte within qos_class */
+ qtype = ntohs(dpram_data->pseudohdr.qos_class) & 0xff;
+ /* pr_debug("qtype = %d\n", qtype); */
+ if (qtype) {
+ } else {
+ /* Put message into Slow Queue */
+ /* Only put a message into the DPRAM if msg doorbell is available */
+ status = ft1000_read_register(ft1000dev, &tempword, FT1000_REG_DOORBELL);
+ /* pr_debug("READ REGISTER tempword=%x\n", tempword); */
+ if (tempword & FT1000_DB_DPRAM_TX) {
+ /* Suspend for 2ms and try again due to DSP doorbell busy */
+ mdelay(2);
+ status = ft1000_read_register(ft1000dev, &tempword, FT1000_REG_DOORBELL);
+ if (tempword & FT1000_DB_DPRAM_TX) {
+ /* Suspend for 1ms and try again due to DSP doorbell busy */
+ mdelay(1);
+ status = ft1000_read_register(ft1000dev, &tempword, FT1000_REG_DOORBELL);
+ if (tempword & FT1000_DB_DPRAM_TX) {
+ status = ft1000_read_register(ft1000dev, &tempword, FT1000_REG_DOORBELL);
+ if (tempword & FT1000_DB_DPRAM_TX) {
+ /* Suspend for 3ms and try again due to DSP doorbell busy */
+ mdelay(3);
+ status = ft1000_read_register(ft1000dev, &tempword, FT1000_REG_DOORBELL);
+ if (tempword & FT1000_DB_DPRAM_TX) {
+ pr_debug("Doorbell not available\n");
+ result = -ENOTTY;
+ kfree(dpram_data);
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ /*pr_debug("finished reading register\n"); */
+
+ /* Make sure we are within the limits of the slow queue memory limitation */
+ if ((msgsz < MAX_CMD_SQSIZE) && (msgsz > PSEUDOSZ)) {
+ /* Need to put sequence number plus new checksum for message */
+ pmsg = (u16 *)&dpram_data->pseudohdr;
+ ppseudo_hdr = (struct pseudo_hdr *)pmsg;
+ total_len = msgsz+2;
+ if (total_len & 0x1) {
+ total_len++;
+ }
+
+ /* Insert slow queue sequence number */
+ ppseudo_hdr->seq_num = info->squeseqnum++;
+ ppseudo_hdr->portsrc = ft1000dev->app_info[app_index].app_id;
+ /* Calculate new checksum */
+ ppseudo_hdr->checksum = *pmsg++;
+ /* pr_debug("checksum = 0x%x\n", ppseudo_hdr->checksum); */
+ for (i = 1; i < 7; i++) {
+ ppseudo_hdr->checksum ^= *pmsg++;
+ /* pr_debug("checksum = 0x%x\n", ppseudo_hdr->checksum); */
+ }
+ pmsg++;
+ ppseudo_hdr = (struct pseudo_hdr *)pmsg;
+ result = card_send_command(ft1000dev, (unsigned short *)dpram_data, total_len+2);
+
+
+ ft1000dev->app_info[app_index].nTxMsg++;
+ } else {
+ result = -EINVAL;
+ }
+ }
+ }
+ } else {
+ pr_debug("Card not ready take messages\n");
+ result = -EACCES;
+ }
+ kfree(dpram_data);
+
+ }
+ break;
+ case IOCTL_GET_DPRAM_CMD:
+ {
struct dpram_blk *pdpram_blk;
struct IOCTL_DPRAM_BLK __user *pioctl_dpram;
- int msglen;
-
- /* DEBUG("FT1000:ft1000_ioctl: IOCTL_FT1000_GET_DPRAM called\n"); */
-
- if (ft1000_flarion_cnt == 0) {
- return (-EBADF);
- }
-
- /* Search for matching file object */
- for (i=0; i<MAX_NUM_APP; i++) {
- if (ft1000dev->app_info[i].fileobject == &file->f_owner) {
- /*DEBUG("FT1000:ft1000_ioctl: Message is for AppId = %d\n", ft1000dev->app_info[i].app_id); */
- break;
- }
- }
-
- /* Could not find application info block */
- if (i == MAX_NUM_APP) {
- DEBUG("FT1000:ft1000_ioctl:Could not find application info block\n");
- result = -EBADF;
- break;
- }
-
- result = 0;
- pioctl_dpram = argp;
- if (list_empty(&ft1000dev->app_info[i].app_sqlist) == 0) {
- /* DEBUG("FT1000:ft1000_ioctl:Message detected in slow queue\n"); */
- spin_lock_irqsave(&free_buff_lock, flags);
- pdpram_blk = list_entry(ft1000dev->app_info[i].app_sqlist.next, struct dpram_blk, list);
- list_del(&pdpram_blk->list);
- ft1000dev->app_info[i].NumOfMsg--;
- /* DEBUG("FT1000:ft1000_ioctl:NumOfMsg for app %d = %d\n", i, ft1000dev->app_info[i].NumOfMsg); */
- spin_unlock_irqrestore(&free_buff_lock, flags);
- msglen = ntohs(*(u16 *)pdpram_blk->pbuffer) + PSEUDOSZ;
- result = get_user(msglen, &pioctl_dpram->total_len);
- if (result)
- break;
- msglen = htons(msglen);
- /* DEBUG("FT1000:ft1000_ioctl:msg length = %x\n", msglen); */
- if (copy_to_user (&pioctl_dpram->pseudohdr, pdpram_blk->pbuffer, msglen)) {
- DEBUG("FT1000:ft1000_ioctl: copy fault occurred\n");
- result = -EFAULT;
+ int msglen;
+
+ /* pr_debug("IOCTL_FT1000_GET_DPRAM called\n"); */
+
+ if (ft1000_flarion_cnt == 0)
+ return -EBADF;
+
+ /* Search for matching file object */
+ for (i = 0; i < MAX_NUM_APP; i++) {
+ if (ft1000dev->app_info[i].fileobject == &file->f_owner) {
+ /*pr_debug("Message is for AppId = %d\n", ft1000dev->app_info[i].app_id); */
+ break;
+ }
+ }
+
+ /* Could not find application info block */
+ if (i == MAX_NUM_APP) {
+ pr_debug("Could not find application info block\n");
+ result = -EBADF;
break;
- }
+ }
+
+ result = 0;
+ pioctl_dpram = argp;
+ if (list_empty(&ft1000dev->app_info[i].app_sqlist) == 0) {
+ /* pr_debug("Message detected in slow queue\n"); */
+ spin_lock_irqsave(&free_buff_lock, flags);
+ pdpram_blk = list_entry(ft1000dev->app_info[i].app_sqlist.next, struct dpram_blk, list);
+ list_del(&pdpram_blk->list);
+ ft1000dev->app_info[i].NumOfMsg--;
+ /* pr_debug("NumOfMsg for app %d = %d\n", i, ft1000dev->app_info[i].NumOfMsg); */
+ spin_unlock_irqrestore(&free_buff_lock, flags);
+ msglen = ntohs(*(u16 *)pdpram_blk->pbuffer) + PSEUDOSZ;
+ result = get_user(msglen, &pioctl_dpram->total_len);
+ if (result)
+ break;
+ msglen = htons(msglen);
+ /* pr_debug("msg length = %x\n", msglen); */
+ if (copy_to_user(&pioctl_dpram->pseudohdr, pdpram_blk->pbuffer, msglen)) {
+ pr_debug("copy fault occurred\n");
+ result = -EFAULT;
+ break;
+ }
+
+ ft1000_free_buffer(pdpram_blk, &freercvpool);
+ result = msglen;
+ }
+ /* pr_debug("IOCTL_FT1000_GET_DPRAM no message\n"); */
+ }
+ break;
- ft1000_free_buffer(pdpram_blk, &freercvpool);
- result = msglen;
- }
- /* DEBUG("FT1000:ft1000_ioctl: IOCTL_FT1000_GET_DPRAM no message\n"); */
- }
- break;
-
- default:
- DEBUG("FT1000:ft1000_ioctl:unknown command: 0x%x\n", command);
- result = -ENOTTY;
- break;
- }
- ft1000dev->fAppMsgPend = 0;
- return result;
+ default:
+ pr_debug("unknown command: 0x%x\n", command);
+ result = -ENOTTY;
+ break;
+ }
+ ft1000dev->fAppMsgPend = 0;
+ return result;
}
/*
-*---------------------------------------------------------------------------
-* Function: ft1000_release
-*
-* Parameters:
-*
-* Description:
-*
-* Notes:
-*
-*---------------------------------------------------------------------------
-*/
+ *---------------------------------------------------------------------------
+ * Function: ft1000_release
+ *
+ * Parameters:
+ *
+ * Description:
+ *
+ * Notes:
+ *
+ *---------------------------------------------------------------------------
+ */
static int ft1000_release(struct inode *inode, struct file *file)
{
struct ft1000_info *info;
- struct net_device *dev;
- struct ft1000_usb *ft1000dev;
- int i;
+ struct net_device *dev;
+ struct ft1000_usb *ft1000dev;
+ int i;
struct dpram_blk *pdpram_blk;
- DEBUG("ft1000_release called\n");
-
- dev = file->private_data;
+ dev = file->private_data;
info = netdev_priv(dev);
ft1000dev = info->priv;
- if (ft1000_flarion_cnt == 0) {
- ft1000dev->appcnt--;
- return (-EBADF);
- }
-
- /* Search for matching file object */
- for (i=0; i<MAX_NUM_APP; i++) {
- if (ft1000dev->app_info[i].fileobject == &file->f_owner) {
- /* DEBUG("FT1000:ft1000_ioctl: Message is for AppId = %d\n", ft1000dev->app_info[i].app_id); */
- break;
- }
- }
-
- if (i==MAX_NUM_APP)
- return 0;
-
- while (list_empty(&ft1000dev->app_info[i].app_sqlist) == 0) {
- DEBUG("Remove and free memory queue up on slow queue\n");
- pdpram_blk = list_entry(ft1000dev->app_info[i].app_sqlist.next, struct dpram_blk, list);
- list_del(&pdpram_blk->list);
- ft1000_free_buffer(pdpram_blk, &freercvpool);
- }
-
- /* initialize application information */
- ft1000dev->appcnt--;
- DEBUG("ft1000_chdev:%s:appcnt = %d\n", __func__, ft1000dev->appcnt);
- ft1000dev->app_info[i].fileobject = NULL;
-
- return 0;
+ if (ft1000_flarion_cnt == 0) {
+ ft1000dev->appcnt--;
+ return -EBADF;
+ }
+
+ /* Search for matching file object */
+ for (i = 0; i < MAX_NUM_APP; i++) {
+ if (ft1000dev->app_info[i].fileobject == &file->f_owner) {
+ /* pr_debug("Message is for AppId = %d\n", ft1000dev->app_info[i].app_id); */
+ break;
+ }
+ }
+
+ if (i == MAX_NUM_APP)
+ return 0;
+
+ while (list_empty(&ft1000dev->app_info[i].app_sqlist) == 0) {
+ pr_debug("Remove and free memory queue up on slow queue\n");
+ pdpram_blk = list_entry(ft1000dev->app_info[i].app_sqlist.next, struct dpram_blk, list);
+ list_del(&pdpram_blk->list);
+ ft1000_free_buffer(pdpram_blk, &freercvpool);
+ }
+
+ /* initialize application information */
+ ft1000dev->appcnt--;
+ pr_debug("appcnt = %d\n", ft1000dev->appcnt);
+ ft1000dev->app_info[i].fileobject = NULL;
+
+ return 0;
}
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_download.c b/drivers/staging/ft1000/ft1000-usb/ft1000_download.c
index 37707da09e9..e8126325877 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_download.c
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_download.c
@@ -1,8 +1,10 @@
/*
-* CopyRight (C) 2007 Qualcomm Inc. All Rights Reserved.
-*
-* This file is part of Express Card USB Driver
-*/
+ * CopyRight (C) 2007 Qualcomm Inc. All Rights Reserved.
+ *
+ * This file is part of Express Card USB Driver
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
@@ -117,17 +119,16 @@ static int check_usb_db(struct ft1000_usb *ft1000dev)
while (loopcnt < 10) {
status = ft1000_read_register(ft1000dev, &temp,
- FT1000_REG_DOORBELL);
- DEBUG("check_usb_db: read FT1000_REG_DOORBELL value is %x\n",
- temp);
+ FT1000_REG_DOORBELL);
+ pr_debug("read FT1000_REG_DOORBELL value is %x\n", temp);
if (temp & 0x0080) {
- DEBUG("FT1000:Got checkusb doorbell\n");
+ pr_debug("Got checkusb doorbell\n");
status = ft1000_write_register(ft1000dev, 0x0080,
- FT1000_REG_DOORBELL);
+ FT1000_REG_DOORBELL);
status = ft1000_write_register(ft1000dev, 0x0100,
- FT1000_REG_DOORBELL);
+ FT1000_REG_DOORBELL);
status = ft1000_write_register(ft1000dev, 0x8000,
- FT1000_REG_DOORBELL);
+ FT1000_REG_DOORBELL);
break;
}
loopcnt++;
@@ -138,13 +139,13 @@ static int check_usb_db(struct ft1000_usb *ft1000dev)
loopcnt = 0;
while (loopcnt < 20) {
status = ft1000_read_register(ft1000dev, &temp,
- FT1000_REG_DOORBELL);
- DEBUG("FT1000:check_usb_db:Doorbell = 0x%x\n", temp);
+ FT1000_REG_DOORBELL);
+ pr_debug("Doorbell = 0x%x\n", temp);
if (temp & 0x8000) {
loopcnt++;
msleep(10);
} else {
- DEBUG("check_usb_db: door bell is cleared, return 0\n");
+ pr_debug("door bell is cleared, return 0\n");
return 0;
}
}
@@ -164,23 +165,22 @@ static u16 get_handshake(struct ft1000_usb *ft1000dev, u16 expected_value)
while (loopcnt < 100) {
/* Need to clear downloader doorbell if Hartley ASIC */
status = ft1000_write_register(ft1000dev, FT1000_DB_DNLD_RX,
- FT1000_REG_DOORBELL);
+ FT1000_REG_DOORBELL);
if (ft1000dev->fcodeldr) {
- DEBUG(" get_handshake: fcodeldr is %d\n",
- ft1000dev->fcodeldr);
+ pr_debug("fcodeldr is %d\n", ft1000dev->fcodeldr);
ft1000dev->fcodeldr = 0;
status = check_usb_db(ft1000dev);
if (status != 0) {
- DEBUG("get_handshake: check_usb_db failed\n");
+ pr_debug("check_usb_db failed\n");
break;
}
status = ft1000_write_register(ft1000dev,
- FT1000_DB_DNLD_RX,
- FT1000_REG_DOORBELL);
+ FT1000_DB_DNLD_RX,
+ FT1000_REG_DOORBELL);
}
status = ft1000_read_dpram16(ft1000dev,
- DWNLD_MAG1_HANDSHAKE_LOC, (u8 *)&handshake, 1);
+ DWNLD_MAG1_HANDSHAKE_LOC, (u8 *)&handshake, 1);
handshake = ntohs(handshake);
if (status)
@@ -209,12 +209,12 @@ static void put_handshake(struct ft1000_usb *ft1000dev, u16 handshake_value)
tempword = (u16)(tempx & 0xffff);
status = ft1000_write_dpram16(ft1000dev, DWNLD_MAG1_HANDSHAKE_LOC,
- tempword, 0);
+ tempword, 0);
tempword = (u16)(tempx >> 16);
status = ft1000_write_dpram16(ft1000dev, DWNLD_MAG1_HANDSHAKE_LOC,
- tempword, 1);
+ tempword, 1);
status = ft1000_write_register(ft1000dev, FT1000_DB_DNLD_TX,
- FT1000_REG_DOORBELL);
+ FT1000_REG_DOORBELL);
}
static u16 get_handshake_usb(struct ft1000_usb *ft1000dev, u16 expected_value)
@@ -230,27 +230,27 @@ static u16 get_handshake_usb(struct ft1000_usb *ft1000dev, u16 expected_value)
while (loopcnt < 100) {
if (ft1000dev->usbboot == 2) {
status = ft1000_read_dpram32(ft1000dev, 0,
- (u8 *)&(ft1000dev->tempbuf[0]), 64);
+ (u8 *)&(ft1000dev->tempbuf[0]), 64);
for (temp = 0; temp < 16; temp++) {
- DEBUG("tempbuf %d = 0x%x\n", temp,
- ft1000dev->tempbuf[temp]);
+ pr_debug("tempbuf %d = 0x%x\n",
+ temp, ft1000dev->tempbuf[temp]);
}
status = ft1000_read_dpram16(ft1000dev,
- DWNLD_MAG1_HANDSHAKE_LOC,
- (u8 *)&handshake, 1);
- DEBUG("handshake from read_dpram16 = 0x%x\n",
- handshake);
+ DWNLD_MAG1_HANDSHAKE_LOC,
+ (u8 *)&handshake, 1);
+ pr_debug("handshake from read_dpram16 = 0x%x\n",
+ handshake);
if (ft1000dev->dspalive == ft1000dev->tempbuf[6]) {
handshake = 0;
} else {
handshake = ft1000dev->tempbuf[1];
ft1000dev->dspalive =
- ft1000dev->tempbuf[6];
+ ft1000dev->tempbuf[6];
}
} else {
status = ft1000_read_dpram16(ft1000dev,
- DWNLD_MAG1_HANDSHAKE_LOC,
- (u8 *)&handshake, 1);
+ DWNLD_MAG1_HANDSHAKE_LOC,
+ (u8 *)&handshake, 1);
}
loopcnt++;
@@ -281,12 +281,12 @@ static u16 get_request_type(struct ft1000_usb *ft1000dev)
if (ft1000dev->bootmode == 1) {
status = fix_ft1000_read_dpram32(ft1000dev,
- DWNLD_MAG1_TYPE_LOC, (u8 *)&tempx);
+ DWNLD_MAG1_TYPE_LOC, (u8 *)&tempx);
tempx = ntohl(tempx);
} else {
tempx = 0;
status = ft1000_read_dpram16(ft1000dev,
- DWNLD_MAG1_TYPE_LOC, (u8 *)&tempword, 1);
+ DWNLD_MAG1_TYPE_LOC, (u8 *)&tempword, 1);
tempx |= (tempword << 16);
tempx = ntohl(tempx);
}
@@ -304,7 +304,7 @@ static u16 get_request_type_usb(struct ft1000_usb *ft1000dev)
if (ft1000dev->bootmode == 1) {
status = fix_ft1000_read_dpram32(ft1000dev,
- DWNLD_MAG1_TYPE_LOC, (u8 *)&tempx);
+ DWNLD_MAG1_TYPE_LOC, (u8 *)&tempx);
tempx = ntohl(tempx);
} else {
if (ft1000dev->usbboot == 2) {
@@ -313,8 +313,8 @@ static u16 get_request_type_usb(struct ft1000_usb *ft1000dev)
} else {
tempx = 0;
status = ft1000_read_dpram16(ft1000dev,
- DWNLD_MAG1_TYPE_LOC,
- (u8 *)&tempword, 1);
+ DWNLD_MAG1_TYPE_LOC,
+ (u8 *)&tempword, 1);
}
tempx |= (tempword << 16);
tempx = ntohl(tempx);
@@ -332,14 +332,14 @@ static long get_request_value(struct ft1000_usb *ft1000dev)
if (ft1000dev->bootmode == 1) {
status = fix_ft1000_read_dpram32(ft1000dev,
- DWNLD_MAG1_SIZE_LOC, (u8 *)&value);
+ DWNLD_MAG1_SIZE_LOC, (u8 *)&value);
value = ntohl(value);
} else {
status = ft1000_read_dpram16(ft1000dev,
- DWNLD_MAG1_SIZE_LOC, (u8 *)&tempword, 0);
+ DWNLD_MAG1_SIZE_LOC, (u8 *)&tempword, 0);
value = tempword;
status = ft1000_read_dpram16(ft1000dev,
- DWNLD_MAG1_SIZE_LOC, (u8 *)&tempword, 1);
+ DWNLD_MAG1_SIZE_LOC, (u8 *)&tempword, 1);
value |= (tempword << 16);
value = ntohl(value);
}
@@ -369,7 +369,7 @@ static u16 hdr_checksum(struct pseudo_hdr *pHdr)
chksum = ((((((usPtr[0] ^ usPtr[1]) ^ usPtr[2]) ^ usPtr[3]) ^
- usPtr[4]) ^ usPtr[5]) ^ usPtr[6]);
+ usPtr[4]) ^ usPtr[5]) ^ usPtr[6]);
return chksum;
}
@@ -387,7 +387,7 @@ static int check_buffers(u16 *buff_w, u16 *buff_r, int len, int offset)
}
static int write_dpram32_and_check(struct ft1000_usb *ft1000dev,
- u16 tempbuffer[], u16 dpram)
+ u16 tempbuffer[], u16 dpram)
{
int status;
u16 resultbuffer[64];
@@ -395,38 +395,38 @@ static int write_dpram32_and_check(struct ft1000_usb *ft1000dev,
for (i = 0; i < 10; i++) {
status = ft1000_write_dpram32(ft1000dev, dpram,
- (u8 *)&tempbuffer[0], 64);
+ (u8 *)&tempbuffer[0], 64);
if (status == 0) {
/* Work around for ASIC bit stuffing problem. */
if ((tempbuffer[31] & 0xfe00) == 0xfe00) {
status = ft1000_write_dpram32(ft1000dev,
- dpram+12, (u8 *)&tempbuffer[24],
- 64);
+ dpram+12, (u8 *)&tempbuffer[24],
+ 64);
}
/* Let's check the data written */
status = ft1000_read_dpram32(ft1000dev, dpram,
- (u8 *)&resultbuffer[0], 64);
+ (u8 *)&resultbuffer[0], 64);
if ((tempbuffer[31] & 0xfe00) == 0xfe00) {
if (check_buffers(tempbuffer, resultbuffer, 28,
- 0)) {
- DEBUG("FT1000:download:DPRAM write failed 1 during bootloading\n");
+ 0)) {
+ pr_debug("DPRAM write failed 1 during bootloading\n");
usleep_range(9000, 11000);
break;
}
status = ft1000_read_dpram32(ft1000dev,
- dpram+12,
- (u8 *)&resultbuffer[0], 64);
+ dpram+12,
+ (u8 *)&resultbuffer[0], 64);
if (check_buffers(tempbuffer, resultbuffer, 16,
- 24)) {
- DEBUG("FT1000:download:DPRAM write failed 2 during bootloading\n");
+ 24)) {
+ pr_debug("DPRAM write failed 2 during bootloading\n");
usleep_range(9000, 11000);
break;
}
} else {
if (check_buffers(tempbuffer, resultbuffer, 32,
- 0)) {
- DEBUG("FT1000:download:DPRAM write failed 3 during bootloading\n");
+ 0)) {
+ pr_debug("DPRAM write failed 3 during bootloading\n");
usleep_range(9000, 11000);
break;
}
@@ -445,7 +445,7 @@ static int write_dpram32_and_check(struct ft1000_usb *ft1000dev,
* long word_length - length of the buffer to be written to DPRAM
*/
static int write_blk(struct ft1000_usb *ft1000dev, u16 **pUsFile, u8 **pUcFile,
- long word_length)
+ long word_length)
{
int status = 0;
u16 dpram;
@@ -453,7 +453,7 @@ static int write_blk(struct ft1000_usb *ft1000dev, u16 **pUsFile, u8 **pUcFile,
u16 tempword;
u16 tempbuffer[64];
- /*DEBUG("FT1000:download:start word_length = %d\n",(int)word_length); */
+ /*pr_debug("start word_length = %d\n",(int)word_length); */
dpram = (u16)DWNLD_MAG1_PS_HDR_LOC;
tempword = *(*pUsFile);
(*pUsFile)++;
@@ -483,21 +483,22 @@ static int write_blk(struct ft1000_usb *ft1000dev, u16 **pUsFile, u8 **pUcFile,
}
}
- /*DEBUG("write_blk: loopcnt is %d\n", loopcnt); */
- /*DEBUG("write_blk: bootmode = %d\n", bootmode); */
- /*DEBUG("write_blk: dpram = %x\n", dpram); */
+ /*pr_debug("loopcnt is %d\n", loopcnt); */
+ /*pr_debug("write_blk: bootmode = %d\n", bootmode); */
+ /*pr_debug("write_blk: dpram = %x\n", dpram); */
if (ft1000dev->bootmode == 0) {
if (dpram >= 0x3F4)
status = ft1000_write_dpram32(ft1000dev, dpram,
- (u8 *)&tempbuffer[0], 8);
+ (u8 *)&tempbuffer[0], 8);
else
status = ft1000_write_dpram32(ft1000dev, dpram,
- (u8 *)&tempbuffer[0], 64);
+ (u8 *)&tempbuffer[0], 64);
} else {
status = write_dpram32_and_check(ft1000dev, tempbuffer,
- dpram);
+ dpram);
if (status != 0) {
- DEBUG("FT1000:download:Write failed tempbuffer[31] = 0x%x\n", tempbuffer[31]);
+ pr_debug("Write failed tempbuffer[31] = 0x%x\n",
+ tempbuffer[31]);
break;
}
}
@@ -508,7 +509,7 @@ static int write_blk(struct ft1000_usb *ft1000dev, u16 **pUsFile, u8 **pUcFile,
static void usb_dnld_complete(struct urb *urb)
{
- /* DEBUG("****** usb_dnld_complete\n"); */
+ /* pr_debug("****** usb_dnld_complete\n"); */
}
/* writes a block of DSP image to DPRAM
@@ -548,22 +549,21 @@ static int write_blk_fifo(struct ft1000_usb *ft1000dev, u16 **pUsFile,
}
static int scram_start_dwnld(struct ft1000_usb *ft1000dev, u16 *hshake,
- u32 *state)
+ u32 *state)
{
int status = 0;
- DEBUG("FT1000:STATE_START_DWNLD\n");
if (ft1000dev->usbboot)
*hshake = get_handshake_usb(ft1000dev, HANDSHAKE_DSP_BL_READY);
else
*hshake = get_handshake(ft1000dev, HANDSHAKE_DSP_BL_READY);
if (*hshake == HANDSHAKE_DSP_BL_READY) {
- DEBUG("scram_dnldr: handshake is HANDSHAKE_DSP_BL_READY, call put_handshake(HANDSHAKE_DRIVER_READY)\n");
+ pr_debug("handshake is HANDSHAKE_DSP_BL_READY, call put_handshake(HANDSHAKE_DRIVER_READY)\n");
put_handshake(ft1000dev, HANDSHAKE_DRIVER_READY);
} else if (*hshake == HANDSHAKE_TIMEOUT_VALUE) {
status = -ETIMEDOUT;
} else {
- DEBUG("FT1000:download:Download error: Handshake failed\n");
+ pr_debug("Download error: Handshake failed\n");
status = -ENETRESET;
}
*state = STATE_BOOT_DWNLD;
@@ -571,22 +571,22 @@ static int scram_start_dwnld(struct ft1000_usb *ft1000dev, u16 *hshake,
}
static int request_code_segment(struct ft1000_usb *ft1000dev, u16 **s_file,
- u8 **c_file, const u8 *endpoint, bool boot_case)
+ u8 **c_file, const u8 *endpoint, bool boot_case)
{
long word_length;
int status = 0;
- /*DEBUG("FT1000:REQUEST_CODE_SEGMENT\n");i*/
word_length = get_request_value(ft1000dev);
- /*DEBUG("FT1000:word_length = 0x%x\n", (int)word_length); */
+ /*pr_debug("word_length = 0x%x\n", (int)word_length); */
/*NdisMSleep (100); */
if (word_length > MAX_LENGTH) {
- DEBUG("FT1000:download:Download error: Max length exceeded\n");
+ pr_debug("Download error: Max length exceeded\n");
return -1;
}
if ((word_length * 2 + (long)c_file) > (long)endpoint) {
/* Error, beyond boot code range.*/
- DEBUG("FT1000:download:Download error: Requested len=%d exceeds BOOT code boundary.\n", (int)word_length);
+ pr_debug("Download error: Requested len=%d exceeds BOOT code boundary\n",
+ (int)word_length);
return -1;
}
if (word_length & 0x1)
@@ -595,14 +595,14 @@ static int request_code_segment(struct ft1000_usb *ft1000dev, u16 **s_file,
if (boot_case) {
status = write_blk(ft1000dev, s_file, c_file, word_length);
- /*DEBUG("write_blk returned %d\n", status); */
+ /*pr_debug("write_blk returned %d\n", status); */
} else {
status = write_blk_fifo(ft1000dev, s_file, c_file, word_length);
if (ft1000dev->usbboot == 0)
ft1000dev->usbboot++;
if (ft1000dev->usbboot == 1)
status |= ft1000_write_dpram16(ft1000dev,
- DWNLD_MAG1_PS_HDR_LOC, 0, 0);
+ DWNLD_MAG1_PS_HDR_LOC, 0, 0);
}
return status;
}
@@ -641,8 +641,6 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
struct prov_record *pprov_record;
struct ft1000_info *pft1000info = netdev_priv(ft1000dev->net);
- DEBUG("Entered scram_dnldr...\n");
-
ft1000dev->fcodeldr = 0;
ft1000dev->usbboot = 0;
ft1000dev->dspalive = 0xffff;
@@ -653,7 +651,7 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
state = STATE_START_DWNLD;
- file_hdr = (struct dsp_file_hdr *)pFileStart;
+ file_hdr = pFileStart;
ft1000_write_register(ft1000dev, 0x800, FT1000_REG_MAG_WATERMARK);
@@ -674,7 +672,7 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
break;
case STATE_BOOT_DWNLD:
- DEBUG("FT1000:STATE_BOOT_DWNLD\n");
+ pr_debug("STATE_BOOT_DWNLD\n");
ft1000dev->bootmode = 1;
handshake = get_handshake(ft1000dev, HANDSHAKE_REQUEST);
if (handshake == HANDSHAKE_REQUEST) {
@@ -684,35 +682,34 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
request = get_request_type(ft1000dev);
switch (request) {
case REQUEST_RUN_ADDRESS:
- DEBUG("FT1000:REQUEST_RUN_ADDRESS\n");
+ pr_debug("REQUEST_RUN_ADDRESS\n");
put_request_value(ft1000dev,
loader_code_address);
break;
case REQUEST_CODE_LENGTH:
- DEBUG("FT1000:REQUEST_CODE_LENGTH\n");
+ pr_debug("REQUEST_CODE_LENGTH\n");
put_request_value(ft1000dev,
loader_code_size);
break;
case REQUEST_DONE_BL:
- DEBUG("FT1000:REQUEST_DONE_BL\n");
+ pr_debug("REQUEST_DONE_BL\n");
/* Reposition ptrs to beginning of code section */
s_file = (u16 *) (boot_end);
c_file = (u8 *) (boot_end);
- /* DEBUG("FT1000:download:s_file = 0x%8x\n", (int)s_file); */
- /* DEBUG("FT1000:download:c_file = 0x%8x\n", (int)c_file); */
+ /* pr_debug("download:s_file = 0x%8x\n", (int)s_file); */
+ /* pr_debug("FT1000:download:c_file = 0x%8x\n", (int)c_file); */
state = STATE_CODE_DWNLD;
ft1000dev->fcodeldr = 1;
break;
case REQUEST_CODE_SEGMENT:
status = request_code_segment(ft1000dev,
- &s_file, &c_file,
- (const u8 *)boot_end,
- true);
- break;
+ &s_file, &c_file,
+ (const u8 *)boot_end,
+ true);
+ break;
default:
- DEBUG
- ("FT1000:download:Download error: Bad request type=%d in BOOT download state.\n",
- request);
+ pr_debug("Download error: Bad request type=%d in BOOT download state\n",
+ request);
status = -1;
break;
}
@@ -723,68 +720,60 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
put_handshake(ft1000dev,
HANDSHAKE_RESPONSE);
} else {
- DEBUG
- ("FT1000:download:Download error: Handshake failed\n");
+ pr_debug("Download error: Handshake failed\n");
status = -1;
}
break;
case STATE_CODE_DWNLD:
- /* DEBUG("FT1000:STATE_CODE_DWNLD\n"); */
+ /* pr_debug("STATE_CODE_DWNLD\n"); */
ft1000dev->bootmode = 0;
if (ft1000dev->usbboot)
handshake =
- get_handshake_usb(ft1000dev,
- HANDSHAKE_REQUEST);
+ get_handshake_usb(ft1000dev,
+ HANDSHAKE_REQUEST);
else
handshake =
- get_handshake(ft1000dev, HANDSHAKE_REQUEST);
+ get_handshake(ft1000dev, HANDSHAKE_REQUEST);
if (handshake == HANDSHAKE_REQUEST) {
/*
* Get type associated with the request.
*/
if (ft1000dev->usbboot)
request =
- get_request_type_usb(ft1000dev);
+ get_request_type_usb(ft1000dev);
else
request = get_request_type(ft1000dev);
switch (request) {
case REQUEST_FILE_CHECKSUM:
- DEBUG
- ("FT1000:download:image_chksum = 0x%8x\n",
- image_chksum);
+ pr_debug("image_chksum = 0x%8x\n",
+ image_chksum);
put_request_value(ft1000dev,
image_chksum);
break;
case REQUEST_RUN_ADDRESS:
- DEBUG
- ("FT1000:download: REQUEST_RUN_ADDRESS\n");
+ pr_debug("REQUEST_RUN_ADDRESS\n");
if (correct_version) {
- DEBUG
- ("FT1000:download:run_address = 0x%8x\n",
- (int)run_address);
+ pr_debug("run_address = 0x%8x\n",
+ (int)run_address);
put_request_value(ft1000dev,
run_address);
} else {
- DEBUG
- ("FT1000:download:Download error: Got Run address request before image offset request.\n");
+ pr_debug("Download error: Got Run address request before image offset request\n");
status = -1;
break;
}
break;
case REQUEST_CODE_LENGTH:
- DEBUG
- ("FT1000:download:REQUEST_CODE_LENGTH\n");
+ pr_debug("REQUEST_CODE_LENGTH\n");
if (correct_version) {
- DEBUG
- ("FT1000:download:run_size = 0x%8x\n",
- (int)run_size);
+ pr_debug("run_size = 0x%8x\n",
+ (int)run_size);
put_request_value(ft1000dev,
run_size);
} else {
- DEBUG
- ("FT1000:download:Download error: Got Size request before image offset request.\n");
+ pr_debug("Download error: Got Size request before image offset request\n");
status = -1;
break;
}
@@ -793,69 +782,66 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
ft1000dev->usbboot = 3;
/* Reposition ptrs to beginning of provisioning section */
s_file =
- (u16 *) (pFileStart +
- file_hdr->commands_offset);
+ (u16 *) (pFileStart +
+ file_hdr->commands_offset);
c_file =
- (u8 *) (pFileStart +
- file_hdr->commands_offset);
+ (u8 *) (pFileStart +
+ file_hdr->commands_offset);
state = STATE_DONE_DWNLD;
break;
case REQUEST_CODE_SEGMENT:
- /* DEBUG("FT1000:download: REQUEST_CODE_SEGMENT - CODELOADER\n"); */
+ /* pr_debug("REQUEST_CODE_SEGMENT - CODELOADER\n"); */
if (!correct_version) {
- DEBUG
- ("FT1000:download:Download error: Got Code Segment request before image offset request.\n");
+ pr_debug("Download error: Got Code Segment request before image offset request\n");
status = -1;
break;
}
status = request_code_segment(ft1000dev,
- &s_file, &c_file,
- (const u8 *)code_end,
- false);
+ &s_file, &c_file,
+ (const u8 *)code_end,
+ false);
break;
case REQUEST_MAILBOX_DATA:
- DEBUG
- ("FT1000:download: REQUEST_MAILBOX_DATA\n");
+ pr_debug("REQUEST_MAILBOX_DATA\n");
/* Convert length from byte count to word count. Make sure we round up. */
word_length =
- (long)(pft1000info->DSPInfoBlklen +
- 1) / 2;
+ (long)(pft1000info->DSPInfoBlklen +
+ 1) / 2;
put_request_value(ft1000dev,
word_length);
mailbox_data =
- (struct drv_msg *)&(pft1000info->
- DSPInfoBlk[0]);
+ (struct drv_msg *)&(pft1000info->
+ DSPInfoBlk[0]);
/*
* Position ASIC DPRAM auto-increment pointer.
*/
- data = (u16 *) &mailbox_data->data[0];
- dpram = (u16) DWNLD_MAG1_PS_HDR_LOC;
+ data = (u16 *)&mailbox_data->data[0];
+ dpram = (u16)DWNLD_MAG1_PS_HDR_LOC;
if (word_length & 0x1)
word_length++;
- word_length = (word_length / 2);
+ word_length = word_length / 2;
for (; word_length > 0; word_length--) { /* In words */
templong = *data++;
templong |= (*data++ << 16);
status =
- fix_ft1000_write_dpram32
- (ft1000dev, dpram++,
- (u8 *) &templong);
+ fix_ft1000_write_dpram32
+ (ft1000dev, dpram++,
+ (u8 *)&templong);
}
break;
case REQUEST_VERSION_INFO:
- DEBUG
- ("FT1000:download:REQUEST_VERSION_INFO\n");
+ pr_debug("REQUEST_VERSION_INFO\n");
word_length =
- file_hdr->version_data_size;
+ file_hdr->version_data_size;
put_request_value(ft1000dev,
word_length);
/*
@@ -863,15 +849,15 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
*/
s_file =
- (u16 *) (pFileStart +
- file_hdr->
- version_data_offset);
+ (u16 *) (pFileStart +
+ file_hdr->
+ version_data_offset);
- dpram = (u16) DWNLD_MAG1_PS_HDR_LOC;
+ dpram = (u16)DWNLD_MAG1_PS_HDR_LOC;
if (word_length & 0x1)
word_length++;
- word_length = (word_length / 2);
+ word_length = word_length / 2;
for (; word_length > 0; word_length--) { /* In words */
@@ -879,26 +865,25 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
temp = ntohs(*s_file++);
templong |= (temp << 16);
status =
- fix_ft1000_write_dpram32
- (ft1000dev, dpram++,
- (u8 *) &templong);
+ fix_ft1000_write_dpram32
+ (ft1000dev, dpram++,
+ (u8 *)&templong);
}
break;
case REQUEST_CODE_BY_VERSION:
- DEBUG
- ("FT1000:download:REQUEST_CODE_BY_VERSION\n");
+ pr_debug("REQUEST_CODE_BY_VERSION\n");
correct_version = false;
requested_version =
- get_request_value(ft1000dev);
+ get_request_value(ft1000dev);
dsp_img_info =
- (struct dsp_image_info *)(pFileStart
- +
- sizeof
- (struct
- dsp_file_hdr));
+ (struct dsp_image_info *)(pFileStart
+ +
+ sizeof
+ (struct
+ dsp_file_hdr));
for (image = 0;
image < file_hdr->nDspImages;
@@ -907,30 +892,29 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
if (dsp_img_info->version ==
requested_version) {
correct_version = true;
- DEBUG
- ("FT1000:download: correct_version is TRUE\n");
+ pr_debug("correct_version is TRUE\n");
s_file =
- (u16 *) (pFileStart
- +
- dsp_img_info->
- begin_offset);
+ (u16 *) (pFileStart
+ +
+ dsp_img_info->
+ begin_offset);
c_file =
- (u8 *) (pFileStart +
- dsp_img_info->
- begin_offset);
+ (u8 *) (pFileStart +
+ dsp_img_info->
+ begin_offset);
code_end =
- (u8 *) (pFileStart +
- dsp_img_info->
- end_offset);
+ (u8 *) (pFileStart +
+ dsp_img_info->
+ end_offset);
run_address =
- dsp_img_info->
- run_address;
+ dsp_img_info->
+ run_address;
run_size =
- dsp_img_info->
- image_size;
+ dsp_img_info->
+ image_size;
image_chksum =
- (u32) dsp_img_info->
- checksum;
+ (u32)dsp_img_info->
+ checksum;
break;
}
dsp_img_info++;
@@ -941,18 +925,16 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
/*
* Error, beyond boot code range.
*/
- DEBUG
- ("FT1000:download:Download error: Bad Version Request = 0x%x.\n",
- (int)requested_version);
+ pr_debug("Download error: Bad Version Request = 0x%x.\n",
+ (int)requested_version);
status = -1;
break;
}
break;
default:
- DEBUG
- ("FT1000:download:Download error: Bad request type=%d in CODE download state.\n",
- request);
+ pr_debug("Download error: Bad request type=%d in CODE download state.\n",
+ request);
status = -1;
break;
}
@@ -963,20 +945,19 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
put_handshake(ft1000dev,
HANDSHAKE_RESPONSE);
} else {
- DEBUG
- ("FT1000:download:Download error: Handshake failed\n");
+ pr_debug("Download error: Handshake failed\n");
status = -1;
}
break;
case STATE_DONE_DWNLD:
- DEBUG("FT1000:download:Code loader is done...\n");
+ pr_debug("Code loader is done...\n");
state = STATE_SECTION_PROV;
break;
case STATE_SECTION_PROV:
- DEBUG("FT1000:download:STATE_SECTION_PROV\n");
+ pr_debug("STATE_SECTION_PROV\n");
pseudo_header = (struct pseudo_hdr *)c_file;
if (pseudo_header->checksum ==
@@ -990,9 +971,9 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
/* Get buffer for provisioning data */
pbuffer =
- kmalloc((pseudo_header_len +
- sizeof(struct pseudo_hdr)),
- GFP_ATOMIC);
+ kmalloc((pseudo_header_len +
+ sizeof(struct pseudo_hdr)),
+ GFP_ATOMIC);
if (pbuffer) {
memcpy(pbuffer, (void *)c_file,
(u32) (pseudo_header_len +
@@ -1000,20 +981,20 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
pseudo_hdr)));
/* link provisioning data */
pprov_record =
- kmalloc(sizeof(struct prov_record),
- GFP_ATOMIC);
+ kmalloc(sizeof(struct prov_record),
+ GFP_ATOMIC);
if (pprov_record) {
pprov_record->pprov_data =
- pbuffer;
+ pbuffer;
list_add_tail(&pprov_record->
list,
&pft1000info->
prov_list);
/* Move to next entry if available */
c_file =
- (u8 *) ((unsigned long)
- c_file +
- (u32) ((pseudo_header_len + 1) & 0xFFFFFFFE) + sizeof(struct pseudo_hdr));
+ (u8 *) ((unsigned long)
+ c_file +
+ (u32) ((pseudo_header_len + 1) & 0xFFFFFFFE) + sizeof(struct pseudo_hdr));
if ((unsigned long)(c_file) -
(unsigned long)(pFileStart)
>=
@@ -1031,13 +1012,12 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
/* Checksum did not compute */
status = -1;
}
- DEBUG
- ("ft1000:download: after STATE_SECTION_PROV, state = %d, status= %d\n",
- state, status);
+ pr_debug("after STATE_SECTION_PROV, state = %d, status= %d\n",
+ state, status);
break;
case STATE_DONE_PROV:
- DEBUG("FT1000:download:STATE_DONE_PROV\n");
+ pr_debug("STATE_DONE_PROV\n");
state = STATE_DONE_FILE;
break;
@@ -1050,21 +1030,21 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
break;
/****
- // Check if Card is present
- status = Harley_Read_Register(&temp, FT1000_REG_SUP_IMASK);
- if ( (status != NDIS_STATUS_SUCCESS) || (temp == 0x0000) ) {
- break;
- }
-
- status = Harley_Read_Register(&temp, FT1000_REG_ASIC_ID);
- if ( (status != NDIS_STATUS_SUCCESS) || (temp == 0xffff) ) {
- break;
- }
+ // Check if Card is present
+ status = Harley_Read_Register(&temp, FT1000_REG_SUP_IMASK);
+ if ( (status != NDIS_STATUS_SUCCESS) || (temp == 0x0000) ) {
+ break;
+ }
+
+ status = Harley_Read_Register(&temp, FT1000_REG_ASIC_ID);
+ if ( (status != NDIS_STATUS_SUCCESS) || (temp == 0xffff) ) {
+ break;
+ }
****/
} /* End while */
- DEBUG("Download exiting with status = 0x%8x\n", status);
+ pr_debug("Download exiting with status = 0x%8x\n", status);
ft1000_write_register(ft1000dev, FT1000_DB_DNLD_TX,
FT1000_REG_DOORBELL);
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_hw.c b/drivers/staging/ft1000/ft1000-usb/ft1000_hw.c
index 2e13e7b7ec1..d12cfc9aa32 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_hw.c
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_hw.c
@@ -1,8 +1,10 @@
/* CopyRight (C) 2007 Qualcomm Inc. All Rights Reserved.
-*
-*
-* This file is part of Express Card USB Driver
-*/
+ *
+ *
+ * This file is part of Express Card USB Driver
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
@@ -35,16 +37,16 @@ static u8 tempbuffer[1600];
#define MAX_RCV_LOOP 100
/* send a control message via USB interface synchronously
-* Parameters: ft1000_usb - device structure
-* pipe - usb control message pipe
-* request - control request
-* requesttype - control message request type
-* value - value to be written or 0
-* index - register index
-* data - data buffer to hold the read/write values
-* size - data size
-* timeout - control message time out value
-*/
+ * Parameters: ft1000_usb - device structure
+ * pipe - usb control message pipe
+ * request - control request
+ * requesttype - control message request type
+ * value - value to be written or 0
+ * index - register index
+ * data - data buffer to hold the read/write values
+ * size - data size
+ * timeout - control message time out value
+ */
static int ft1000_control(struct ft1000_usb *ft1000dev, unsigned int pipe,
u8 request, u8 requesttype, u16 value, u16 index,
void *data, u16 size, int timeout)
@@ -52,7 +54,7 @@ static int ft1000_control(struct ft1000_usb *ft1000dev, unsigned int pipe,
int ret;
if ((ft1000dev == NULL) || (ft1000dev->dev == NULL)) {
- DEBUG("ft1000dev or ft1000dev->dev == NULL, failure\n");
+ pr_debug("ft1000dev or ft1000dev->dev == NULL, failure\n");
return -ENODEV;
}
@@ -171,7 +173,7 @@ int ft1000_read_dpram16(struct ft1000_usb *ft1000dev, u16 indx, u8 *buffer,
/* write into DPRAM a number of bytes */
int ft1000_write_dpram16(struct ft1000_usb *ft1000dev, u16 indx, u16 value,
- u8 highlow)
+ u8 highlow)
{
int ret = 0;
u8 request;
@@ -212,7 +214,7 @@ int fix_ft1000_read_dpram32(struct ft1000_usb *ft1000dev, u16 indx,
*buffer++ = buf[pos++];
*buffer++ = buf[pos++];
} else {
- DEBUG("fix_ft1000_read_dpram32: DPRAM32 Read failed\n");
+ pr_debug("DPRAM32 Read failed\n");
*buffer++ = 0;
*buffer++ = 0;
*buffer++ = 0;
@@ -246,7 +248,7 @@ int fix_ft1000_write_dpram32(struct ft1000_usb *ft1000dev, u16 indx, u8 *buffer)
buf[pos2++] = *buffer++;
ret = ft1000_write_dpram32(ft1000dev, pos1, buf, 16);
} else {
- DEBUG("fix_ft1000_write_dpram32: DPRAM32 Read failed\n");
+ pr_debug("DPRAM32 Read failed\n");
return ret;
}
@@ -270,8 +272,7 @@ int fix_ft1000_write_dpram32(struct ft1000_usb *ft1000dev, u16 indx, u8 *buffer)
for (i = 0; i < 16; i++) {
if (tempbuffer[i] != resultbuffer[i]) {
ret = -1;
- DEBUG("%s Failed to write\n",
- __func__);
+ pr_debug("Failed to write\n");
}
}
}
@@ -287,19 +288,19 @@ static void card_reset_dsp(struct ft1000_usb *ft1000dev, bool value)
u16 tempword;
status = ft1000_write_register(ft1000dev, HOST_INTF_BE,
- FT1000_REG_SUP_CTRL);
+ FT1000_REG_SUP_CTRL);
status = ft1000_read_register(ft1000dev, &tempword,
FT1000_REG_SUP_CTRL);
if (value) {
- DEBUG("Reset DSP\n");
+ pr_debug("Reset DSP\n");
status = ft1000_read_register(ft1000dev, &tempword,
FT1000_REG_RESET);
tempword |= DSP_RESET_BIT;
status = ft1000_write_register(ft1000dev, tempword,
FT1000_REG_RESET);
} else {
- DEBUG("Activate DSP\n");
+ pr_debug("Activate DSP\n");
status = ft1000_read_register(ft1000dev, &tempword,
FT1000_REG_RESET);
tempword |= DSP_ENCRYPTED;
@@ -318,18 +319,18 @@ static void card_reset_dsp(struct ft1000_usb *ft1000dev, bool value)
}
/* send a command to ASIC
-* Parameters: ft1000_usb - device structure
-* ptempbuffer - command buffer
-* size - command buffer size
-*/
+ * Parameters: ft1000_usb - device structure
+ * ptempbuffer - command buffer
+ * size - command buffer size
+ */
int card_send_command(struct ft1000_usb *ft1000dev, void *ptempbuffer,
- int size)
+ int size)
{
int ret;
unsigned short temp;
unsigned char *commandbuf;
- DEBUG("card_send_command: enter card_send_command... size=%d\n", size);
+ pr_debug("enter card_send_command... size=%d\n", size);
commandbuf = kmalloc(size + 2, GFP_KERNEL);
if (!commandbuf)
@@ -355,7 +356,7 @@ int card_send_command(struct ft1000_usb *ft1000dev, void *ptempbuffer,
return ret;
usleep_range(900, 1100);
ret = ft1000_write_register(ft1000dev, FT1000_DB_DPRAM_TX,
- FT1000_REG_DOORBELL);
+ FT1000_REG_DOORBELL);
if (ret)
return ret;
usleep_range(900, 1100);
@@ -364,7 +365,7 @@ int card_send_command(struct ft1000_usb *ft1000dev, void *ptempbuffer,
#if 0
if ((temp & 0x0100) == 0)
- DEBUG("card_send_command: Message sent\n");
+ pr_debug("Message sent\n");
#endif
return ret;
}
@@ -390,7 +391,7 @@ int dsp_reload(struct ft1000_usb *ft1000dev)
status = ft1000_write_register(ft1000dev, tempword, FT1000_REG_RESET);
msleep(1000);
status = ft1000_read_register(ft1000dev, &tempword, FT1000_REG_RESET);
- DEBUG("Reset Register = 0x%x\n", tempword);
+ pr_debug("Reset Register = 0x%x\n", tempword);
/* Toggle DSP reset */
card_reset_dsp(ft1000dev, 1);
@@ -399,13 +400,13 @@ int dsp_reload(struct ft1000_usb *ft1000dev)
msleep(1000);
status =
- ft1000_write_register(ft1000dev, HOST_INTF_BE, FT1000_REG_SUP_CTRL);
+ ft1000_write_register(ft1000dev, HOST_INTF_BE, FT1000_REG_SUP_CTRL);
/* Let's check for FEFE */
status =
- ft1000_read_dpram32(ft1000dev, FT1000_MAG_DPRAM_FEFE_INDX,
- (u8 *) &templong, 4);
- DEBUG("templong (fefe) = 0x%8x\n", templong);
+ ft1000_read_dpram32(ft1000dev, FT1000_MAG_DPRAM_FEFE_INDX,
+ (u8 *)&templong, 4);
+ pr_debug("templong (fefe) = 0x%8x\n", templong);
/* call codeloader */
status = scram_dnldr(ft1000dev, pFileStart, FileLength);
@@ -415,8 +416,6 @@ int dsp_reload(struct ft1000_usb *ft1000dev)
msleep(1000);
- DEBUG("dsp_reload returned\n");
-
return 0;
}
@@ -427,8 +426,6 @@ static void ft1000_reset_asic(struct net_device *dev)
struct ft1000_usb *ft1000dev = info->priv;
u16 tempword;
- DEBUG("ft1000_hw:ft1000_reset_asic called\n");
-
/* Let's use the register provided by the Magnemite ASIC to reset the
* ASIC and DSP.
*/
@@ -442,10 +439,10 @@ static void ft1000_reset_asic(struct net_device *dev)
/* clear interrupts */
ft1000_read_register(ft1000dev, &tempword, FT1000_REG_SUP_ISR);
- DEBUG("ft1000_hw: interrupt status register = 0x%x\n", tempword);
+ pr_debug("interrupt status register = 0x%x\n", tempword);
ft1000_write_register(ft1000dev, tempword, FT1000_REG_SUP_ISR);
ft1000_read_register(ft1000dev, &tempword, FT1000_REG_SUP_ISR);
- DEBUG("ft1000_hw: interrupt status register = 0x%x\n", tempword);
+ pr_debug("interrupt status register = 0x%x\n", tempword);
}
static int ft1000_reset_card(struct net_device *dev)
@@ -455,38 +452,36 @@ static int ft1000_reset_card(struct net_device *dev)
u16 tempword;
struct prov_record *ptr;
- DEBUG("ft1000_hw:ft1000_reset_card called.....\n");
-
ft1000dev->fCondResetPend = true;
info->CardReady = 0;
ft1000dev->fProvComplete = false;
/* Make sure we free any memory reserve for provisioning */
while (list_empty(&info->prov_list) == 0) {
- DEBUG("ft1000_reset_card:deleting provisioning record\n");
+ pr_debug("deleting provisioning record\n");
ptr =
- list_entry(info->prov_list.next, struct prov_record, list);
+ list_entry(info->prov_list.next, struct prov_record, list);
list_del(&ptr->list);
kfree(ptr->pprov_data);
kfree(ptr);
}
- DEBUG("ft1000_hw:ft1000_reset_card: reset asic\n");
+ pr_debug("reset asic\n");
ft1000_reset_asic(dev);
- DEBUG("ft1000_hw:ft1000_reset_card: call dsp_reload\n");
+ pr_debug("call dsp_reload\n");
dsp_reload(ft1000dev);
- DEBUG("dsp reload successful\n");
+ pr_debug("dsp reload successful\n");
mdelay(10);
/* Initialize DSP heartbeat area */
ft1000_write_dpram16(ft1000dev, FT1000_MAG_HI_HO, ho_mag,
FT1000_MAG_HI_HO_INDX);
- ft1000_read_dpram16(ft1000dev, FT1000_MAG_HI_HO, (u8 *) &tempword,
+ ft1000_read_dpram16(ft1000dev, FT1000_MAG_HI_HO, (u8 *)&tempword,
FT1000_MAG_HI_HO_INDX);
- DEBUG("ft1000_hw:ft1000_reset_card:hi_ho value = 0x%x\n", tempword);
+ pr_debug("hi_ho value = 0x%x\n", tempword);
info->CardReady = 1;
@@ -508,8 +503,8 @@ static void ft1000_usb_transmit_complete(struct urb *urb)
}
/* take an ethernet packet and convert it to a Flarion
-* packet prior to sending it to the ASIC Downlink FIFO.
-*/
+ * packet prior to sending it to the ASIC Downlink FIFO.
+ */
static int ft1000_copy_down_pkt(struct net_device *netdev, u8 *packet, u16 len)
{
struct ft1000_info *pInfo = netdev_priv(netdev);
@@ -520,14 +515,13 @@ static int ft1000_copy_down_pkt(struct net_device *netdev, u8 *packet, u16 len)
struct pseudo_hdr hdr;
if (!pInfo->CardReady) {
- DEBUG("ft1000_copy_down_pkt::Card Not Ready\n");
+ pr_debug("Card Not Ready\n");
return -ENODEV;
}
count = sizeof(struct pseudo_hdr) + len;
if (count > MAX_BUF_SIZE) {
- DEBUG("Error:ft1000_copy_down_pkt:Message Size Overflow!\n");
- DEBUG("size = %d\n", count);
+ pr_debug("Message Size Overflow! size = %d\n", count);
return -EINVAL;
}
@@ -545,7 +539,7 @@ static int ft1000_copy_down_pkt(struct net_device *netdev, u8 *packet, u16 len)
hdr.control = 0x00;
hdr.checksum = hdr.length ^ hdr.source ^ hdr.destination ^
- hdr.portdest ^ hdr.portsrc ^ hdr.sh_str_id ^ hdr.control;
+ hdr.portdest ^ hdr.portsrc ^ hdr.sh_str_id ^ hdr.control;
memcpy(&pFt1000Dev->tx_buf[0], &hdr, sizeof(hdr));
memcpy(&(pFt1000Dev->tx_buf[sizeof(struct pseudo_hdr)]), packet, len);
@@ -559,12 +553,12 @@ static int ft1000_copy_down_pkt(struct net_device *netdev, u8 *packet, u16 len)
pFt1000Dev->tx_buf, count,
ft1000_usb_transmit_complete, (void *)pFt1000Dev);
- t = (u8 *) pFt1000Dev->tx_urb->transfer_buffer;
+ t = (u8 *)pFt1000Dev->tx_urb->transfer_buffer;
ret = usb_submit_urb(pFt1000Dev->tx_urb, GFP_ATOMIC);
if (ret) {
- DEBUG("ft1000 failed tx_urb %d\n", ret);
+ pr_debug("failed tx_urb %d\n", ret);
return ret;
}
pInfo->stats.tx_packets++;
@@ -574,9 +568,9 @@ static int ft1000_copy_down_pkt(struct net_device *netdev, u8 *packet, u16 len)
}
/* transmit an ethernet packet
-* Parameters: skb - socket buffer to be sent
-* dev - network device
-*/
+ * Parameters: skb - socket buffer to be sent
+ * dev - network device
+ */
static int ft1000_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ft1000_info *pInfo = netdev_priv(dev);
@@ -585,30 +579,30 @@ static int ft1000_start_xmit(struct sk_buff *skb, struct net_device *dev)
int maxlen, pipe;
if (skb == NULL) {
- DEBUG("ft1000_hw: ft1000_start_xmit:skb == NULL!!!\n");
+ pr_debug("skb == NULL!!!\n");
return NETDEV_TX_OK;
}
if (pFt1000Dev->status & FT1000_STATUS_CLOSING) {
- DEBUG("network driver is closed, return\n");
+ pr_debug("network driver is closed, return\n");
goto err;
}
pipe =
- usb_sndbulkpipe(pFt1000Dev->dev, pFt1000Dev->bulk_out_endpointAddr);
+ usb_sndbulkpipe(pFt1000Dev->dev, pFt1000Dev->bulk_out_endpointAddr);
maxlen = usb_maxpacket(pFt1000Dev->dev, pipe, usb_pipeout(pipe));
- pdata = (u8 *) skb->data;
+ pdata = (u8 *)skb->data;
if (pInfo->mediastate == 0) {
/* Drop packet is mediastate is down */
- DEBUG("ft1000_hw:ft1000_start_xmit:mediastate is down\n");
+ pr_debug("mediastate is down\n");
goto err;
}
if ((skb->len < ENET_HEADER_SIZE) || (skb->len > ENET_MAX_SIZE)) {
/* Drop packet which has invalid size */
- DEBUG("ft1000_hw:ft1000_start_xmit:invalid ethernet length\n");
+ pr_debug("invalid ethernet length\n");
goto err;
}
@@ -628,7 +622,7 @@ static int ft1000_open(struct net_device *dev)
struct ft1000_usb *pFt1000Dev = pInfo->priv;
struct timeval tv;
- DEBUG("ft1000_open is called for card %d\n", pFt1000Dev->CardNumber);
+ pr_debug("ft1000_open is called for card %d\n", pFt1000Dev->CardNumber);
pInfo->stats.rx_bytes = 0;
pInfo->stats.tx_bytes = 0;
@@ -676,11 +670,9 @@ int init_ft1000_netdev(struct ft1000_usb *ft1000dev)
char card_nr[2];
u8 gCardIndex = 0;
- DEBUG("Enter init_ft1000_netdev...\n");
-
netdev = alloc_etherdev(sizeof(struct ft1000_info));
if (!netdev) {
- DEBUG("init_ft1000_netdev: can not allocate network device\n");
+ pr_debug("can not allocate network device\n");
return -ENOMEM;
}
@@ -690,7 +682,7 @@ int init_ft1000_netdev(struct ft1000_usb *ft1000dev)
dev_alloc_name(netdev, netdev->name);
- DEBUG("init_ft1000_netdev: network device name is %s\n", netdev->name);
+ pr_debug("network device name is %s\n", netdev->name);
if (strncmp(netdev->name, "eth", 3) == 0) {
card_nr[0] = netdev->name[3];
@@ -702,7 +694,7 @@ int init_ft1000_netdev(struct ft1000_usb *ft1000dev)
}
ft1000dev->CardNumber = gCardIndex;
- DEBUG("card number = %d\n", ft1000dev->CardNumber);
+ pr_debug("card number = %d\n", ft1000dev->CardNumber);
} else {
netdev_err(ft1000dev->net, "ft1000: Invalid device name\n");
ret_val = -ENXIO;
@@ -738,7 +730,7 @@ int init_ft1000_netdev(struct ft1000_usb *ft1000dev)
ft1000dev->net = netdev;
- DEBUG("Initialize free_buff_lock and freercvpool\n");
+ pr_debug("Initialize free_buff_lock and freercvpool\n");
spin_lock_init(&free_buff_lock);
/* initialize a list of buffers to be use for queuing
@@ -790,7 +782,6 @@ int reg_ft1000_netdev(struct ft1000_usb *ft1000dev,
netdev = ft1000dev->net;
pInfo = netdev_priv(ft1000dev->net);
- DEBUG("Enter reg_ft1000_netdev...\n");
ft1000_read_register(ft1000dev, &pInfo->AsicID, FT1000_REG_ASIC_ID);
@@ -799,23 +790,21 @@ int reg_ft1000_netdev(struct ft1000_usb *ft1000dev,
rc = register_netdev(netdev);
if (rc) {
- DEBUG("reg_ft1000_netdev: could not register network device\n");
+ pr_debug("could not register network device\n");
free_netdev(netdev);
return rc;
}
ft1000_create_dev(ft1000dev);
- DEBUG("reg_ft1000_netdev returned\n");
-
pInfo->CardReady = 1;
return 0;
}
/* take a packet from the FIFO up link and
-* convert it into an ethernet packet and deliver it to the IP stack
-*/
+ * convert it into an ethernet packet and deliver it to the IP stack
+ */
static int ft1000_copy_up_pkt(struct urb *urb)
{
struct ft1000_info *info = urb->context;
@@ -832,14 +821,14 @@ static int ft1000_copy_up_pkt(struct urb *urb)
u16 *chksum;
if (ft1000dev->status & FT1000_STATUS_CLOSING) {
- DEBUG("network driver is closed, return\n");
+ pr_debug("network driver is closed, return\n");
return 0;
}
/* Read length */
len = urb->transfer_buffer_length;
lena = urb->actual_length;
- chksum = (u16 *) ft1000dev->rx_buf;
+ chksum = (u16 *)ft1000dev->rx_buf;
tempword = *chksum++;
for (i = 1; i < 7; i++)
@@ -854,13 +843,13 @@ static int ft1000_copy_up_pkt(struct urb *urb)
skb = dev_alloc_skb(len + 12 + 2);
if (skb == NULL) {
- DEBUG("ft1000_copy_up_pkt: No Network buffers available\n");
+ pr_debug("No Network buffers available\n");
info->stats.rx_errors++;
ft1000_submit_rx_urb(info);
return -1;
}
- pbuffer = (u8 *) skb_put(skb, len + 12);
+ pbuffer = (u8 *)skb_put(skb, len + 12);
/* subtract the number of bytes read already */
ptemp = pbuffer;
@@ -905,7 +894,7 @@ static int ft1000_submit_rx_urb(struct ft1000_info *info)
struct ft1000_usb *pFt1000Dev = info->priv;
if (pFt1000Dev->status & FT1000_STATUS_CLOSING) {
- DEBUG("network driver is closed, return\n");
+ pr_debug("network driver is closed, return\n");
return -ENODEV;
}
@@ -914,13 +903,12 @@ static int ft1000_submit_rx_urb(struct ft1000_info *info)
usb_rcvbulkpipe(pFt1000Dev->dev,
pFt1000Dev->bulk_in_endpointAddr),
pFt1000Dev->rx_buf, MAX_BUF_SIZE,
- (usb_complete_t) ft1000_copy_up_pkt, info);
+ (usb_complete_t)ft1000_copy_up_pkt, info);
result = usb_submit_urb(pFt1000Dev->rx_urb, GFP_ATOMIC);
if (result) {
- pr_err("ft1000_submit_rx_urb: submitting rx_urb %d failed\n",
- result);
+ pr_err("submitting rx_urb %d failed\n", result);
return result;
}
@@ -935,7 +923,7 @@ int ft1000_close(struct net_device *net)
ft1000dev->status |= FT1000_STATUS_CLOSING;
- DEBUG("ft1000_close: pInfo=%p, ft1000dev=%p\n", pInfo, ft1000dev);
+ pr_debug("pInfo=%p, ft1000dev=%p\n", pInfo, ft1000dev);
netif_carrier_off(net);
netif_stop_queue(net);
ft1000dev->status &= ~FT1000_STATUS_CLOSING;
@@ -952,7 +940,7 @@ static int ft1000_chkcard(struct ft1000_usb *dev)
int status;
if (dev->fCondResetPend) {
- DEBUG("ft1000_hw:ft1000_chkcard:Card is being reset, return FALSE\n");
+ pr_debug("Card is being reset, return FALSE\n");
return TRUE;
}
/* Mask register is used to check for device presence since it is never
@@ -960,7 +948,7 @@ static int ft1000_chkcard(struct ft1000_usb *dev)
*/
status = ft1000_read_register(dev, &tempword, FT1000_REG_SUP_IMASK);
if (tempword == 0) {
- DEBUG("ft1000_hw:ft1000_chkcard: IMASK = 0 Card not detected\n");
+ pr_debug("IMASK = 0 Card not detected\n");
return FALSE;
}
/* The system will return the value of 0xffff for the version register
@@ -969,17 +957,17 @@ static int ft1000_chkcard(struct ft1000_usb *dev)
status = ft1000_read_register(dev, &tempword, FT1000_REG_ASIC_ID);
if (tempword != 0x1b01) {
dev->status |= FT1000_STATUS_CLOSING;
- DEBUG("ft1000_hw:ft1000_chkcard: Version = 0xffff Card not detected\n");
+ pr_debug("Version = 0xffff Card not detected\n");
return FALSE;
}
return TRUE;
}
/* read a message from the dpram area.
-* Input:
-* dev - network device structure
-* pbuffer - caller supply address to buffer
-*/
+ * Input:
+ * dev - network device structure
+ * pbuffer - caller supply address to buffer
+ */
static bool ft1000_receive_cmd(struct ft1000_usb *dev, u16 *pbuffer,
int maxsz)
{
@@ -990,46 +978,45 @@ static bool ft1000_receive_cmd(struct ft1000_usb *dev, u16 *pbuffer,
u16 tempword;
ret =
- ft1000_read_dpram16(dev, FT1000_MAG_PH_LEN, (u8 *) &size,
- FT1000_MAG_PH_LEN_INDX);
+ ft1000_read_dpram16(dev, FT1000_MAG_PH_LEN, (u8 *)&size,
+ FT1000_MAG_PH_LEN_INDX);
size = ntohs(size) + PSEUDOSZ;
if (size > maxsz) {
- DEBUG("FT1000:ft1000_receive_cmd:Invalid command length = %d\n",
- size);
+ pr_debug("Invalid command length = %d\n", size);
return FALSE;
}
- ppseudohdr = (u16 *) pbuffer;
+ ppseudohdr = (u16 *)pbuffer;
ft1000_write_register(dev, FT1000_DPRAM_MAG_RX_BASE,
FT1000_REG_DPRAM_ADDR);
ret =
- ft1000_read_register(dev, pbuffer, FT1000_REG_MAG_DPDATAH);
+ ft1000_read_register(dev, pbuffer, FT1000_REG_MAG_DPDATAH);
pbuffer++;
ft1000_write_register(dev, FT1000_DPRAM_MAG_RX_BASE + 1,
FT1000_REG_DPRAM_ADDR);
for (i = 0; i <= (size >> 2); i++) {
ret =
- ft1000_read_register(dev, pbuffer,
- FT1000_REG_MAG_DPDATAL);
+ ft1000_read_register(dev, pbuffer,
+ FT1000_REG_MAG_DPDATAL);
pbuffer++;
ret =
- ft1000_read_register(dev, pbuffer,
- FT1000_REG_MAG_DPDATAH);
+ ft1000_read_register(dev, pbuffer,
+ FT1000_REG_MAG_DPDATAH);
pbuffer++;
}
/* copy odd aligned word */
ret =
- ft1000_read_register(dev, pbuffer, FT1000_REG_MAG_DPDATAL);
+ ft1000_read_register(dev, pbuffer, FT1000_REG_MAG_DPDATAL);
pbuffer++;
ret =
- ft1000_read_register(dev, pbuffer, FT1000_REG_MAG_DPDATAH);
+ ft1000_read_register(dev, pbuffer, FT1000_REG_MAG_DPDATAH);
pbuffer++;
if (size & 0x0001) {
/* copy odd byte from fifo */
ret =
- ft1000_read_register(dev, &tempword,
- FT1000_REG_DPRAM_DATA);
+ ft1000_read_register(dev, &tempword,
+ FT1000_REG_DPRAM_DATA);
*pbuffer = ntohs(tempword);
}
/* Check if pseudo header checksum is good
@@ -1058,17 +1045,15 @@ static int ft1000_dsp_prov(void *arg)
int status;
u16 TempShortBuf[256];
- DEBUG("*** DspProv Entered\n");
-
while (list_empty(&info->prov_list) == 0) {
- DEBUG("DSP Provisioning List Entry\n");
+ pr_debug("DSP Provisioning List Entry\n");
/* Check if doorbell is available */
- DEBUG("check if doorbell is cleared\n");
+ pr_debug("check if doorbell is cleared\n");
status =
- ft1000_read_register(dev, &tempword, FT1000_REG_DOORBELL);
+ ft1000_read_register(dev, &tempword, FT1000_REG_DOORBELL);
if (status) {
- DEBUG("ft1000_dsp_prov::ft1000_read_register error\n");
+ pr_debug("ft1000_read_register error\n");
break;
}
@@ -1076,7 +1061,7 @@ static int ft1000_dsp_prov(void *arg)
mdelay(10);
i++;
if (i == 10) {
- DEBUG("FT1000:ft1000_dsp_prov:message drop\n");
+ pr_debug("message drop\n");
return -1;
}
ft1000_read_register(dev, &tempword,
@@ -1084,17 +1069,17 @@ static int ft1000_dsp_prov(void *arg)
}
if (!(tempword & FT1000_DB_DPRAM_TX)) {
- DEBUG("*** Provision Data Sent to DSP\n");
+ pr_debug("*** Provision Data Sent to DSP\n");
/* Send provisioning data */
ptr =
- list_entry(info->prov_list.next, struct prov_record,
- list);
- len = *(u16 *) ptr->pprov_data;
+ list_entry(info->prov_list.next, struct prov_record,
+ list);
+ len = *(u16 *)ptr->pprov_data;
len = htons(len);
len += PSEUDOSZ;
- pmsg = (u16 *) ptr->pprov_data;
+ pmsg = (u16 *)ptr->pprov_data;
ppseudo_hdr = (struct pseudo_hdr *)pmsg;
/* Insert slow queue sequence number */
ppseudo_hdr->seq_num = info->squeseqnum++;
@@ -1109,12 +1094,12 @@ static int ft1000_dsp_prov(void *arg)
memcpy(&TempShortBuf[2], ppseudo_hdr, len);
status =
- ft1000_write_dpram32(dev, 0,
- (u8 *) &TempShortBuf[0],
- (unsigned short)(len + 2));
+ ft1000_write_dpram32(dev, 0,
+ (u8 *)&TempShortBuf[0],
+ (unsigned short)(len + 2));
status =
- ft1000_write_register(dev, FT1000_DB_DPRAM_TX,
- FT1000_REG_DOORBELL);
+ ft1000_write_register(dev, FT1000_DB_DPRAM_TX,
+ FT1000_REG_DOORBELL);
list_del(&ptr->list);
kfree(ptr->pprov_data);
@@ -1123,7 +1108,7 @@ static int ft1000_dsp_prov(void *arg)
usleep_range(9000, 11000);
}
- DEBUG("DSP Provisioning List Entry finished\n");
+ pr_debug("DSP Provisioning List Entry finished\n");
msleep(100);
@@ -1158,37 +1143,26 @@ static int ft1000_proc_drvmsg(struct ft1000_usb *dev, u16 size)
status = ft1000_read_dpram32(dev, 0x200, cmdbuffer, size);
#ifdef JDEBUG
- DEBUG("ft1000_proc_drvmsg:cmdbuffer\n");
- for (i = 0; i < size; i += 5) {
- if ((i + 5) < size)
- DEBUG("0x%x, 0x%x, 0x%x, 0x%x, 0x%x\n", cmdbuffer[i],
- cmdbuffer[i + 1], cmdbuffer[i + 2],
- cmdbuffer[i + 3], cmdbuffer[i + 4]);
- else {
- for (j = i; j < size; j++)
- DEBUG("0x%x ", cmdbuffer[j]);
- DEBUG("\n");
- break;
- }
- }
+ print_hex_dump_debug("cmdbuffer: ", HEX_DUMP_OFFSET, 16, 1,
+ cmdbuffer, size, true);
#endif
pdrvmsg = (struct drv_msg *)&cmdbuffer[2];
msgtype = ntohs(pdrvmsg->type);
- DEBUG("ft1000_proc_drvmsg:Command message type = 0x%x\n", msgtype);
+ pr_debug("Command message type = 0x%x\n", msgtype);
switch (msgtype) {
case MEDIA_STATE:{
- DEBUG("ft1000_proc_drvmsg:Command message type = MEDIA_STATE");
+ pr_debug("Command message type = MEDIA_STATE\n");
pmediamsg = (struct media_msg *)&cmdbuffer[0];
if (info->ProgConStat != 0xFF) {
if (pmediamsg->state) {
- DEBUG("Media is up\n");
+ pr_debug("Media is up\n");
if (info->mediastate == 0) {
if (dev->NetDevRegDone)
netif_wake_queue(dev->net);
info->mediastate = 1;
}
} else {
- DEBUG("Media is down\n");
+ pr_debug("Media is down\n");
if (info->mediastate == 1) {
info->mediastate = 0;
if (dev->NetDevRegDone)
@@ -1196,7 +1170,7 @@ static int ft1000_proc_drvmsg(struct ft1000_usb *dev, u16 size)
}
}
} else {
- DEBUG("Media is down\n");
+ pr_debug("Media is down\n");
if (info->mediastate == 1) {
info->mediastate = 0;
info->ConTm = 0;
@@ -1205,20 +1179,20 @@ static int ft1000_proc_drvmsg(struct ft1000_usb *dev, u16 size)
break;
}
case DSP_INIT_MSG:{
- DEBUG("ft1000_proc_drvmsg:Command message type = DSP_INIT_MSG");
+ pr_debug("Command message type = DSP_INIT_MSG\n");
pdspinitmsg = (struct dsp_init_msg *)&cmdbuffer[2];
memcpy(info->DspVer, pdspinitmsg->DspVer, DSPVERSZ);
- DEBUG("DSPVER = 0x%2x 0x%2x 0x%2x 0x%2x\n",
- info->DspVer[0], info->DspVer[1], info->DspVer[2],
- info->DspVer[3]);
+ pr_debug("DSPVER = 0x%2x 0x%2x 0x%2x 0x%2x\n",
+ info->DspVer[0], info->DspVer[1], info->DspVer[2],
+ info->DspVer[3]);
memcpy(info->HwSerNum, pdspinitmsg->HwSerNum,
HWSERNUMSZ);
memcpy(info->Sku, pdspinitmsg->Sku, SKUSZ);
memcpy(info->eui64, pdspinitmsg->eui64, EUISZ);
- DEBUG("EUI64=%2x.%2x.%2x.%2x.%2x.%2x.%2x.%2x\n",
- info->eui64[0], info->eui64[1], info->eui64[2],
- info->eui64[3], info->eui64[4], info->eui64[5],
- info->eui64[6], info->eui64[7]);
+ pr_debug("EUI64=%2x.%2x.%2x.%2x.%2x.%2x.%2x.%2x\n",
+ info->eui64[0], info->eui64[1], info->eui64[2],
+ info->eui64[3], info->eui64[4], info->eui64[5],
+ info->eui64[6], info->eui64[7]);
dev->net->dev_addr[0] = info->eui64[0];
dev->net->dev_addr[1] = info->eui64[1];
dev->net->dev_addr[2] = info->eui64[2];
@@ -1229,17 +1203,17 @@ static int ft1000_proc_drvmsg(struct ft1000_usb *dev, u16 size)
if (ntohs(pdspinitmsg->length) ==
(sizeof(struct dsp_init_msg) - 20)) {
memcpy(info->ProductMode, pdspinitmsg->ProductMode,
- MODESZ);
+ MODESZ);
memcpy(info->RfCalVer, pdspinitmsg->RfCalVer, CALVERSZ);
memcpy(info->RfCalDate, pdspinitmsg->RfCalDate,
CALDATESZ);
- DEBUG("RFCalVer = 0x%2x 0x%2x\n", info->RfCalVer[0],
- info->RfCalVer[1]);
+ pr_debug("RFCalVer = 0x%2x 0x%2x\n",
+ info->RfCalVer[0], info->RfCalVer[1]);
}
break;
}
case DSP_PROVISION:{
- DEBUG("ft1000_proc_drvmsg:Command message type = DSP_PROVISION\n");
+ pr_debug("Command message type = DSP_PROVISION\n");
/* kick off dspprov routine to start provisioning
* Send provisioning data to DSP
@@ -1252,21 +1226,20 @@ static int ft1000_proc_drvmsg(struct ft1000_usb *dev, u16 size)
} else {
dev->fProvComplete = true;
status = ft1000_write_register(dev, FT1000_DB_HB,
- FT1000_REG_DOORBELL);
- DEBUG("FT1000:drivermsg:No more DSP provisioning data in dsp image\n");
+ FT1000_REG_DOORBELL);
+ pr_debug("No more DSP provisioning data in dsp image\n");
}
- DEBUG("ft1000_proc_drvmsg:DSP PROVISION is done\n");
+ pr_debug("DSP PROVISION is done\n");
break;
}
case DSP_STORE_INFO:{
- DEBUG("ft1000_proc_drvmsg:Command message type = DSP_STORE_INFO");
- DEBUG("FT1000:drivermsg:Got DSP_STORE_INFO\n");
+ pr_debug("Command message type = DSP_STORE_INFO");
tempword = ntohs(pdrvmsg->length);
info->DSPInfoBlklen = tempword;
if (tempword < (MAX_DSP_SESS_REC - 4)) {
- pmsg = (u16 *) &pdrvmsg->data[0];
+ pmsg = (u16 *)&pdrvmsg->data[0];
for (i = 0; i < ((tempword + 1) / 2); i++) {
- DEBUG("FT1000:drivermsg:dsp info data = 0x%x\n", *pmsg);
+ pr_debug("dsp info data = 0x%x\n", *pmsg);
info->DSPInfoBlk[i + 10] = *pmsg++;
}
} else {
@@ -1275,33 +1248,33 @@ static int ft1000_proc_drvmsg(struct ft1000_usb *dev, u16 size)
break;
}
case DSP_GET_INFO:{
- DEBUG("FT1000:drivermsg:Got DSP_GET_INFO\n");
+ pr_debug("Got DSP_GET_INFO\n");
/* copy dsp info block to dsp */
dev->DrvMsgPend = 1;
/* allow any outstanding ioctl to finish */
mdelay(10);
status = ft1000_read_register(dev, &tempword,
- FT1000_REG_DOORBELL);
+ FT1000_REG_DOORBELL);
if (tempword & FT1000_DB_DPRAM_TX) {
mdelay(10);
status = ft1000_read_register(dev, &tempword,
- FT1000_REG_DOORBELL);
+ FT1000_REG_DOORBELL);
if (tempword & FT1000_DB_DPRAM_TX) {
mdelay(10);
status = ft1000_read_register(dev, &tempword,
- FT1000_REG_DOORBELL);
+ FT1000_REG_DOORBELL);
if (tempword & FT1000_DB_DPRAM_TX)
break;
}
}
/* Put message into Slow Queue Form Pseudo header */
- pmsg = (u16 *) info->DSPInfoBlk;
+ pmsg = (u16 *)info->DSPInfoBlk;
*pmsg++ = 0;
*pmsg++ = htons(info->DSPInfoBlklen + 20 + info->DSPInfoBlklen);
ppseudo_hdr =
- (struct pseudo_hdr *)(u16 *) &info->DSPInfoBlk[2];
+ (struct pseudo_hdr *)(u16 *)&info->DSPInfoBlk[2];
ppseudo_hdr->length = htons(info->DSPInfoBlklen + 4
- + info->DSPInfoBlklen);
+ + info->DSPInfoBlklen);
ppseudo_hdr->source = 0x10;
ppseudo_hdr->destination = 0x20;
ppseudo_hdr->portdest = 0;
@@ -1323,31 +1296,31 @@ static int ft1000_proc_drvmsg(struct ft1000_usb *dev, u16 size)
info->DSPInfoBlk[10] = 0x7200;
info->DSPInfoBlk[11] = htons(info->DSPInfoBlklen);
status = ft1000_write_dpram32(dev, 0,
- (u8 *)&info->DSPInfoBlk[0],
- (unsigned short)(info->DSPInfoBlklen + 22));
+ (u8 *)&info->DSPInfoBlk[0],
+ (unsigned short)(info->DSPInfoBlklen + 22));
status = ft1000_write_register(dev, FT1000_DB_DPRAM_TX,
- FT1000_REG_DOORBELL);
+ FT1000_REG_DOORBELL);
dev->DrvMsgPend = 0;
break;
}
case GET_DRV_ERR_RPT_MSG:{
- DEBUG("FT1000:drivermsg:Got GET_DRV_ERR_RPT_MSG\n");
+ pr_debug("Got GET_DRV_ERR_RPT_MSG\n");
/* copy driver error message to dsp */
dev->DrvMsgPend = 1;
/* allow any outstanding ioctl to finish */
mdelay(10);
status = ft1000_read_register(dev, &tempword,
- FT1000_REG_DOORBELL);
+ FT1000_REG_DOORBELL);
if (tempword & FT1000_DB_DPRAM_TX) {
mdelay(10);
status = ft1000_read_register(dev, &tempword,
- FT1000_REG_DOORBELL);
+ FT1000_REG_DOORBELL);
if (tempword & FT1000_DB_DPRAM_TX)
mdelay(10);
}
if ((tempword & FT1000_DB_DPRAM_TX) == 0) {
/* Put message into Slow Queue Form Pseudo header */
- pmsg = (u16 *) &tempbuffer[0];
+ pmsg = (u16 *)&tempbuffer[0];
ppseudo_hdr = (struct pseudo_hdr *)pmsg;
ppseudo_hdr->length = htons(0x0012);
ppseudo_hdr->source = 0x10;
@@ -1368,7 +1341,7 @@ static int ft1000_proc_drvmsg(struct ft1000_usb *dev, u16 size)
for (i = 1; i < 7; i++)
ppseudo_hdr->checksum ^= *pmsg++;
- pmsg = (u16 *) &tempbuffer[16];
+ pmsg = (u16 *)&tempbuffer[16];
*pmsg++ = htons(RSP_DRV_ERR_RPT_MSG);
*pmsg++ = htons(0x000e);
*pmsg++ = htons(info->DSP_TIME[0]);
@@ -1384,7 +1357,7 @@ static int ft1000_proc_drvmsg(struct ft1000_usb *dev, u16 size)
*pmsg++ = htons(info->DrvErrNum);
status = card_send_command(dev, (unsigned char *)&tempbuffer[0],
- (u16)(0x0012 + PSEUDOSZ));
+ (u16)(0x0012 + PSEUDOSZ));
if (status)
goto out;
info->DrvErrNum = 0;
@@ -1399,7 +1372,6 @@ static int ft1000_proc_drvmsg(struct ft1000_usb *dev, u16 size)
status = 0;
out:
kfree(cmdbuffer);
- DEBUG("return from ft1000_proc_drvmsg\n");
return status;
}
@@ -1412,32 +1384,32 @@ static int dsp_broadcast_msg_id(struct ft1000_usb *dev)
for (i = 0; i < MAX_NUM_APP; i++) {
if ((dev->app_info[i].DspBCMsgFlag)
- && (dev->app_info[i].fileobject)
- && (dev->app_info[i].NumOfMsg
- < MAX_MSG_LIMIT)) {
+ && (dev->app_info[i].fileobject)
+ && (dev->app_info[i].NumOfMsg
+ < MAX_MSG_LIMIT)) {
pdpram_blk = ft1000_get_buffer(&freercvpool);
if (pdpram_blk == NULL) {
- DEBUG("Out of memory in free receive command pool\n");
+ pr_debug("Out of memory in free receive command pool\n");
dev->app_info[i].nRxMsgMiss++;
return -1;
}
if (ft1000_receive_cmd(dev, pdpram_blk->pbuffer,
- MAX_CMD_SQSIZE)) {
+ MAX_CMD_SQSIZE)) {
/* Put message into the
* appropriate application block
*/
dev->app_info[i].nRxMsg++;
spin_lock_irqsave(&free_buff_lock, flags);
list_add_tail(&pdpram_blk->list,
- &dev->app_info[i] .app_sqlist);
+ &dev->app_info[i] .app_sqlist);
dev->app_info[i].NumOfMsg++;
spin_unlock_irqrestore(&free_buff_lock, flags);
wake_up_interruptible(&dev->app_info[i]
- .wait_dpram_msg);
+ .wait_dpram_msg);
} else {
dev->app_info[i].nRxMsgMiss++;
ft1000_free_buffer(pdpram_blk, &freercvpool);
- DEBUG("pdpram_blk::ft1000_get_buffer NULL\n");
+ pr_debug("ft1000_get_buffer NULL\n");
return -1;
}
}
@@ -1452,7 +1424,7 @@ static int handle_misc_portid(struct ft1000_usb *dev)
pdpram_blk = ft1000_get_buffer(&freercvpool);
if (pdpram_blk == NULL) {
- DEBUG("Out of memory in free receive command pool\n");
+ pr_debug("Out of memory in free receive command pool\n");
return -1;
}
if (!ft1000_receive_cmd(dev, pdpram_blk->pbuffer, MAX_CMD_SQSIZE))
@@ -1461,11 +1433,12 @@ static int handle_misc_portid(struct ft1000_usb *dev)
/* Search for correct application block */
for (i = 0; i < MAX_NUM_APP; i++) {
if (dev->app_info[i].app_id == ((struct pseudo_hdr *)
- pdpram_blk->pbuffer)->portdest)
+ pdpram_blk->pbuffer)->portdest)
break;
}
if (i == MAX_NUM_APP) {
- DEBUG("FT1000:ft1000_parse_dpram_msg: No application matching id = %d\n", ((struct pseudo_hdr *)pdpram_blk->pbuffer)->portdest);
+ pr_debug("No application matching id = %d\n",
+ ((struct pseudo_hdr *)pdpram_blk->pbuffer)->portdest);
goto exit_failure;
} else if (dev->app_info[i].NumOfMsg > MAX_MSG_LIMIT) {
goto exit_failure;
@@ -1495,26 +1468,26 @@ int ft1000_poll(void *dev_id)
u16 portid;
if (ft1000_chkcard(dev) == FALSE) {
- DEBUG("ft1000_poll::ft1000_chkcard: failed\n");
+ pr_debug("failed\n");
return -1;
}
status = ft1000_read_register(dev, &tempword, FT1000_REG_DOORBELL);
if (!status) {
if (tempword & FT1000_DB_DPRAM_RX) {
status = ft1000_read_dpram16(dev,
- 0x200, (u8 *)&data, 0);
+ 0x200, (u8 *)&data, 0);
size = ntohs(data) + 16 + 2;
if (size % 4) {
modulo = 4 - (size % 4);
size = size + modulo;
}
status = ft1000_read_dpram16(dev, 0x201,
- (u8 *)&portid, 1);
+ (u8 *)&portid, 1);
portid &= 0xff;
if (size < MAX_CMD_SQSIZE) {
switch (portid) {
case DRIVERID:
- DEBUG("ft1000_poll: FT1000_REG_DOORBELL message type: FT1000_DB_DPRAM_RX : portid DRIVERID\n");
+ pr_debug("FT1000_REG_DOORBELL message type: FT1000_DB_DPRAM_RX : portid DRIVERID\n");
status = ft1000_proc_drvmsg(dev, size);
if (status != 0)
return status;
@@ -1527,87 +1500,88 @@ int ft1000_poll(void *dev_id)
break;
}
} else
- DEBUG("FT1000:dpc:Invalid total length for SlowQ = %d\n", size);
+ pr_debug("Invalid total length for SlowQ = %d\n",
+ size);
status = ft1000_write_register(dev,
- FT1000_DB_DPRAM_RX,
- FT1000_REG_DOORBELL);
+ FT1000_DB_DPRAM_RX,
+ FT1000_REG_DOORBELL);
} else if (tempword & FT1000_DSP_ASIC_RESET) {
/* Let's reset the ASIC from the Host side as well */
status = ft1000_write_register(dev, ASIC_RESET_BIT,
- FT1000_REG_RESET);
+ FT1000_REG_RESET);
status = ft1000_read_register(dev, &tempword,
- FT1000_REG_RESET);
+ FT1000_REG_RESET);
i = 0;
while (tempword & ASIC_RESET_BIT) {
status = ft1000_read_register(dev, &tempword,
- FT1000_REG_RESET);
+ FT1000_REG_RESET);
usleep_range(9000, 11000);
i++;
if (i == 100)
break;
}
if (i == 100) {
- DEBUG("Unable to reset ASIC\n");
+ pr_debug("Unable to reset ASIC\n");
return 0;
}
usleep_range(9000, 11000);
/* Program WMARK register */
status = ft1000_write_register(dev, 0x600,
- FT1000_REG_MAG_WATERMARK);
+ FT1000_REG_MAG_WATERMARK);
/* clear ASIC reset doorbell */
status = ft1000_write_register(dev,
- FT1000_DSP_ASIC_RESET,
- FT1000_REG_DOORBELL);
+ FT1000_DSP_ASIC_RESET,
+ FT1000_REG_DOORBELL);
usleep_range(9000, 11000);
} else if (tempword & FT1000_ASIC_RESET_REQ) {
- DEBUG("ft1000_poll: FT1000_REG_DOORBELL message type: FT1000_ASIC_RESET_REQ\n");
+ pr_debug("FT1000_REG_DOORBELL message type: FT1000_ASIC_RESET_REQ\n");
/* clear ASIC reset request from DSP */
status = ft1000_write_register(dev,
- FT1000_ASIC_RESET_REQ,
- FT1000_REG_DOORBELL);
+ FT1000_ASIC_RESET_REQ,
+ FT1000_REG_DOORBELL);
status = ft1000_write_register(dev, HOST_INTF_BE,
- FT1000_REG_SUP_CTRL);
+ FT1000_REG_SUP_CTRL);
/* copy dsp session record from Adapter block */
status = ft1000_write_dpram32(dev, 0,
- (u8 *)&info->DSPSess.Rec[0], 1024);
+ (u8 *)&info->DSPSess.Rec[0], 1024);
status = ft1000_write_register(dev, 0x600,
- FT1000_REG_MAG_WATERMARK);
+ FT1000_REG_MAG_WATERMARK);
/* ring doorbell to tell DSP that
* ASIC is out of reset
* */
status = ft1000_write_register(dev,
- FT1000_ASIC_RESET_DSP,
- FT1000_REG_DOORBELL);
+ FT1000_ASIC_RESET_DSP,
+ FT1000_REG_DOORBELL);
} else if (tempword & FT1000_DB_COND_RESET) {
- DEBUG("ft1000_poll: FT1000_REG_DOORBELL message type: FT1000_DB_COND_RESET\n");
+ pr_debug("FT1000_REG_DOORBELL message type: FT1000_DB_COND_RESET\n");
if (!dev->fAppMsgPend) {
/* Reset ASIC and DSP */
status = ft1000_read_dpram16(dev,
- FT1000_MAG_DSP_TIMER0,
- (u8 *)&(info->DSP_TIME[0]),
- FT1000_MAG_DSP_TIMER0_INDX);
+ FT1000_MAG_DSP_TIMER0,
+ (u8 *)&(info->DSP_TIME[0]),
+ FT1000_MAG_DSP_TIMER0_INDX);
status = ft1000_read_dpram16(dev,
- FT1000_MAG_DSP_TIMER1,
- (u8 *)&(info->DSP_TIME[1]),
- FT1000_MAG_DSP_TIMER1_INDX);
+ FT1000_MAG_DSP_TIMER1,
+ (u8 *)&(info->DSP_TIME[1]),
+ FT1000_MAG_DSP_TIMER1_INDX);
status = ft1000_read_dpram16(dev,
- FT1000_MAG_DSP_TIMER2,
- (u8 *)&(info->DSP_TIME[2]),
- FT1000_MAG_DSP_TIMER2_INDX);
+ FT1000_MAG_DSP_TIMER2,
+ (u8 *)&(info->DSP_TIME[2]),
+ FT1000_MAG_DSP_TIMER2_INDX);
status = ft1000_read_dpram16(dev,
- FT1000_MAG_DSP_TIMER3,
- (u8 *)&(info->DSP_TIME[3]),
- FT1000_MAG_DSP_TIMER3_INDX);
+ FT1000_MAG_DSP_TIMER3,
+ (u8 *)&(info->DSP_TIME[3]),
+ FT1000_MAG_DSP_TIMER3_INDX);
info->CardReady = 0;
info->DrvErrNum = DSP_CONDRESET_INFO;
- DEBUG("ft1000_hw:DSP conditional reset requested\n");
+ pr_debug("DSP conditional reset requested\n");
info->ft1000_reset(dev->net);
} else {
dev->fProvComplete = false;
dev->fCondResetPend = true;
}
ft1000_write_register(dev, FT1000_DB_COND_RESET,
- FT1000_REG_DOORBELL);
+ FT1000_REG_DOORBELL);
}
}
return 0;
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_ioctl.h b/drivers/staging/ft1000/ft1000-usb/ft1000_ioctl.h
index cb644a58d9f..e9472bebda0 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_ioctl.h
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_ioctl.h
@@ -1,30 +1,30 @@
/*
-*---------------------------------------------------------------------------
-* FT1000 driver for Flarion Flash OFDM NIC Device
-*
-* Copyright (C) 2002 Flarion Technologies, All rights reserved.
-*
-* This program is free software; you can redistribute it and/or modify it
-* under the terms of the GNU General Public License as published by the Free
-* Software Foundation; either version 2 of the License, or (at your option) any
-* later version. This program is distributed in the hope that it will be useful,
-* but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
-* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-* more details. You should have received a copy of the GNU General Public
-* License along with this program; if not, write to the
-* Free Software Foundation, Inc., 59 Temple Place -
-* Suite 330, Boston, MA 02111-1307, USA.
-*---------------------------------------------------------------------------
-*
-* File: ft1000_ioctl.h
-*
-* Description: Common structures and defines relating to IOCTL
-*
-* History:
-* 11/5/02 Whc Created.
-*
-*---------------------------------------------------------------------------
-*/
+ *---------------------------------------------------------------------------
+ * FT1000 driver for Flarion Flash OFDM NIC Device
+ *
+ * Copyright (C) 2002 Flarion Technologies, All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version. This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details. You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place -
+ * Suite 330, Boston, MA 02111-1307, USA.
+ *---------------------------------------------------------------------------
+ *
+ * File: ft1000_ioctl.h
+ *
+ * Description: Common structures and defines relating to IOCTL
+ *
+ * History:
+ * 11/5/02 Whc Created.
+ *
+ *---------------------------------------------------------------------------
+ */
#ifndef _FT1000IOCTLH_
#define _FT1000IOCTLH_
@@ -94,8 +94,8 @@ struct IOCTL_DPRAM_COMMAND {
} __packed;
/*
-* Custom IOCTL command codes
-*/
+ * Custom IOCTL command codes
+ */
#define FT1000_MAGIC_CODE 'F'
#define IOCTL_REGISTER_CMD 0
@@ -106,8 +106,8 @@ struct IOCTL_DPRAM_COMMAND {
#define IOCTL_CONNECT 10
#define IOCTL_DISCONNECT 11
-#define IOCTL_FT1000_GET_DSP_STAT _IOR(FT1000_MAGIC_CODE, \
- IOCTL_GET_DSP_STAT_CMD, \
+#define IOCTL_FT1000_GET_DSP_STAT _IOR(FT1000_MAGIC_CODE, \
+ IOCTL_GET_DSP_STAT_CMD, \
struct IOCTL_GET_DSP_STAT)
#define IOCTL_FT1000_GET_VER _IOR(FT1000_MAGIC_CODE, IOCTL_GET_VER_CMD, \
struct IOCTL_GET_VER)
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_usb.c b/drivers/staging/ft1000/ft1000-usb/ft1000_usb.c
index 39be30c0eed..a6b55f42c07 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_usb.c
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_usb.c
@@ -7,6 +7,9 @@
* $Id:
*====================================================
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/usb.h>
@@ -45,7 +48,7 @@ static int ft1000_poll_thread(void *arg)
if (!gPollingfailed) {
ret = ft1000_poll(arg);
if (ret != 0) {
- DEBUG("ft1000_poll_thread: polling failed\n");
+ pr_debug("polling failed\n");
gPollingfailed = true;
}
}
@@ -71,9 +74,8 @@ static int ft1000_probe(struct usb_interface *interface,
return -ENOMEM;
dev = interface_to_usbdev(interface);
- DEBUG("ft1000_probe: usb device descriptor info:\n");
- DEBUG("ft1000_probe: number of configuration is %d\n",
- dev->descriptor.bNumConfigurations);
+ pr_debug("usb device descriptor info - number of configuration is %d\n",
+ dev->descriptor.bNumConfigurations);
ft1000dev->dev = dev;
ft1000dev->status = 0;
@@ -85,60 +87,56 @@ static int ft1000_probe(struct usb_interface *interface,
goto err_fw;
}
- DEBUG("ft1000_probe is called\n");
numaltsetting = interface->num_altsetting;
- DEBUG("ft1000_probe: number of alt settings is :%d\n", numaltsetting);
+ pr_debug("number of alt settings is: %d\n", numaltsetting);
iface_desc = interface->cur_altsetting;
- DEBUG("ft1000_probe: number of endpoints is %d\n",
- iface_desc->desc.bNumEndpoints);
- DEBUG("ft1000_probe: descriptor type is %d\n",
- iface_desc->desc.bDescriptorType);
- DEBUG("ft1000_probe: interface number is %d\n",
- iface_desc->desc.bInterfaceNumber);
- DEBUG("ft1000_probe: alternatesetting is %d\n",
- iface_desc->desc.bAlternateSetting);
- DEBUG("ft1000_probe: interface class is %d\n",
- iface_desc->desc.bInterfaceClass);
- DEBUG("ft1000_probe: control endpoint info:\n");
- DEBUG("ft1000_probe: descriptor0 type -- %d\n",
- iface_desc->endpoint[0].desc.bmAttributes);
- DEBUG("ft1000_probe: descriptor1 type -- %d\n",
- iface_desc->endpoint[1].desc.bmAttributes);
- DEBUG("ft1000_probe: descriptor2 type -- %d\n",
- iface_desc->endpoint[2].desc.bmAttributes);
+ pr_debug("number of endpoints is: %d\n",
+ iface_desc->desc.bNumEndpoints);
+ pr_debug("descriptor type is: %d\n", iface_desc->desc.bDescriptorType);
+ pr_debug("interface number is: %d\n",
+ iface_desc->desc.bInterfaceNumber);
+ pr_debug("alternatesetting is: %d\n",
+ iface_desc->desc.bAlternateSetting);
+ pr_debug("interface class is: %d\n", iface_desc->desc.bInterfaceClass);
+ pr_debug("control endpoint info:\n");
+ pr_debug("descriptor0 type -- %d\n",
+ iface_desc->endpoint[0].desc.bmAttributes);
+ pr_debug("descriptor1 type -- %d\n",
+ iface_desc->endpoint[1].desc.bmAttributes);
+ pr_debug("descriptor2 type -- %d\n",
+ iface_desc->endpoint[2].desc.bmAttributes);
for (i = 0; i < iface_desc->desc.bNumEndpoints; i++) {
endpoint =
- (struct usb_endpoint_descriptor *)&iface_desc->
- endpoint[i].desc;
- DEBUG("endpoint %d\n", i);
- DEBUG("bEndpointAddress=%x, bmAttributes=%x\n",
- endpoint->bEndpointAddress, endpoint->bmAttributes);
+ (struct usb_endpoint_descriptor *)&iface_desc->
+ endpoint[i].desc;
+ pr_debug("endpoint %d\n", i);
+ pr_debug("bEndpointAddress=%x, bmAttributes=%x\n",
+ endpoint->bEndpointAddress, endpoint->bmAttributes);
if ((endpoint->bEndpointAddress & USB_DIR_IN)
&& ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
USB_ENDPOINT_XFER_BULK)) {
ft1000dev->bulk_in_endpointAddr =
- endpoint->bEndpointAddress;
- DEBUG("ft1000_probe: in: %d\n",
- endpoint->bEndpointAddress);
+ endpoint->bEndpointAddress;
+ pr_debug("in: %d\n", endpoint->bEndpointAddress);
}
if (!(endpoint->bEndpointAddress & USB_DIR_IN)
&& ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
USB_ENDPOINT_XFER_BULK)) {
ft1000dev->bulk_out_endpointAddr =
- endpoint->bEndpointAddress;
- DEBUG("ft1000_probe: out: %d\n",
- endpoint->bEndpointAddress);
+ endpoint->bEndpointAddress;
+ pr_debug("out: %d\n", endpoint->bEndpointAddress);
}
}
- DEBUG("bulk_in=%d, bulk_out=%d\n", ft1000dev->bulk_in_endpointAddr,
- ft1000dev->bulk_out_endpointAddr);
+ pr_debug("bulk_in=%d, bulk_out=%d\n",
+ ft1000dev->bulk_in_endpointAddr,
+ ft1000dev->bulk_out_endpointAddr);
ret = request_firmware(&dsp_fw, "ft3000.img", &dev->dev);
if (ret < 0) {
- pr_err("Error request_firmware().\n");
+ pr_err("Error request_firmware()\n");
goto err_fw;
}
@@ -155,7 +153,7 @@ static int ft1000_probe(struct usb_interface *interface,
FileLength = dsp_fw->size;
release_firmware(dsp_fw);
- DEBUG("ft1000_probe: start downloading dsp image...\n");
+ pr_debug("start downloading dsp image...\n");
ret = init_ft1000_netdev(ft1000dev);
if (ret)
@@ -163,7 +161,7 @@ static int ft1000_probe(struct usb_interface *interface,
pft1000info = netdev_priv(ft1000dev->net);
- DEBUG("In probe: pft1000info=%p\n", pft1000info);
+ pr_debug("pft1000info=%p\n", pft1000info);
ret = dsp_reload(ft1000dev);
if (ret) {
pr_err("Problem with DSP image loading\n");
@@ -172,7 +170,7 @@ static int ft1000_probe(struct usb_interface *interface,
gPollingfailed = false;
ft1000dev->pPollThread =
- kthread_run(ft1000_poll_thread, ft1000dev, "ft1000_poll");
+ kthread_run(ft1000_poll_thread, ft1000dev, "ft1000_poll");
if (IS_ERR(ft1000dev->pPollThread)) {
ret = PTR_ERR(ft1000dev->pPollThread);
@@ -187,10 +185,10 @@ static int ft1000_probe(struct usb_interface *interface,
goto err_thread;
}
msleep(100);
- DEBUG("ft1000_probe::Waiting for Card Ready\n");
+ pr_debug("Waiting for Card Ready\n");
}
- DEBUG("ft1000_probe::Card Ready!!!! Registering network device\n");
+ pr_debug("Card Ready!!!! Registering network device\n");
ret = reg_ft1000_netdev(ft1000dev, interface);
if (ret)
@@ -216,24 +214,21 @@ static void ft1000_disconnect(struct usb_interface *interface)
struct ft1000_info *pft1000info;
struct ft1000_usb *ft1000dev;
- DEBUG("ft1000_disconnect is called\n");
-
- pft1000info = (struct ft1000_info *) usb_get_intfdata(interface);
- DEBUG("In disconnect pft1000info=%p\n", pft1000info);
+ pft1000info = (struct ft1000_info *)usb_get_intfdata(interface);
+ pr_debug("In disconnect pft1000info=%p\n", pft1000info);
if (pft1000info) {
ft1000dev = pft1000info->priv;
if (ft1000dev->pPollThread)
kthread_stop(ft1000dev->pPollThread);
- DEBUG("ft1000_disconnect: threads are terminated\n");
+ pr_debug("threads are terminated\n");
if (ft1000dev->net) {
- DEBUG("ft1000_disconnect: destroy char driver\n");
+ pr_debug("destroy char driver\n");
ft1000_destroy_dev(ft1000dev->net);
unregister_netdev(ft1000dev->net);
- DEBUG
- ("ft1000_disconnect: network device unregistered\n");
+ pr_debug("network device unregistered\n");
free_netdev(ft1000dev->net);
}
@@ -241,7 +236,7 @@ static void ft1000_disconnect(struct usb_interface *interface)
usb_free_urb(ft1000dev->rx_urb);
usb_free_urb(ft1000dev->tx_urb);
- DEBUG("ft1000_disconnect: urb freed\n");
+ pr_debug("urb freed\n");
kfree(ft1000dev);
}
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_usb.h b/drivers/staging/ft1000/ft1000-usb/ft1000_usb.h
index 8f7ccae57f3..fea60d5651a 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_usb.h
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_usb.h
@@ -28,8 +28,6 @@ struct app_info_block {
*/
} __packed;
-#define DEBUG(args...) pr_info(args)
-
#define FALSE 0
#define TRUE 1
@@ -137,7 +135,7 @@ extern spinlock_t free_buff_lock;
int ft1000_create_dev(struct ft1000_usb *dev);
void ft1000_destroy_dev(struct net_device *dev);
extern int card_send_command(struct ft1000_usb *ft1000dev,
- void *ptempbuffer, int size);
+ void *ptempbuffer, int size);
struct dpram_blk *ft1000_get_buffer(struct list_head *bufflist);
void ft1000_free_buffer(struct dpram_blk *pdpram_blk, struct list_head *plist);
diff --git a/drivers/staging/fwserial/fwserial.c b/drivers/staging/fwserial/fwserial.c
index af0c3878358..73deae3cd9e 100644
--- a/drivers/staging/fwserial/fwserial.c
+++ b/drivers/staging/fwserial/fwserial.c
@@ -278,7 +278,6 @@ static void fwtty_send_txn_async(struct fwtty_peer *peer,
len, fwtty_common_callback, txn);
}
-
static void __fwtty_restart_tx(struct fwtty_port *port)
{
int len, avail;
@@ -508,7 +507,6 @@ static void fwtty_do_hangup(struct work_struct *work)
tty_kref_put(tty);
}
-
static void fwtty_emit_breaks(struct work_struct *work)
{
struct fwtty_port *port = to_port(to_delayed_work(work), emit_breaks);
@@ -1622,7 +1620,6 @@ static inline int mgmt_pkt_expected_len(__be16 code)
case FWSC_VIRT_CABLE_PLUG_RSP: /* | FWSC_RSP_OK */
return sizeof(pkt.hdr) + sizeof(pkt.plug_rsp);
-
case FWSC_VIRT_CABLE_UNPLUG:
case FWSC_VIRT_CABLE_UNPLUG_RSP:
case FWSC_VIRT_CABLE_PLUG_RSP | FWSC_RSP_NACK:
diff --git a/drivers/staging/gdm724x/gdm_lte.c b/drivers/staging/gdm724x/gdm_lte.c
index c657639f884..73eede16382 100644
--- a/drivers/staging/gdm724x/gdm_lte.c
+++ b/drivers/staging/gdm724x/gdm_lte.c
@@ -626,7 +626,7 @@ static void gdm_lte_netif_rx(struct net_device *dev, char *buf,
void *addr = buf + sizeof(struct iphdr) +
sizeof(struct udphdr) +
offsetof(struct dhcp_packet, chaddr);
- memcpy(nic->dest_mac_addr, addr, ETH_ALEN);
+ ether_addr_copy(nic->dest_mac_addr, addr);
}
}
@@ -639,7 +639,7 @@ static void gdm_lte_netif_rx(struct net_device *dev, char *buf,
}
/* Format the data so that it can be put to skb */
- memcpy(mac_header_data, nic->dest_mac_addr, ETH_ALEN);
+ ether_addr_copy(mac_header_data, nic->dest_mac_addr);
memcpy(mac_header_data + ETH_ALEN, nic->src_mac_addr, ETH_ALEN);
vlan_eth.h_vlan_TCI = htons(nic->vlan_id);
@@ -842,9 +842,9 @@ static void form_mac_address(u8 *dev_addr, u8 *nic_src, u8 *nic_dest,
{
/* Form the dev_addr */
if (!mac_address)
- memcpy(dev_addr, gdm_lte_macaddr, ETH_ALEN);
+ ether_addr_copy(dev_addr, gdm_lte_macaddr);
else
- memcpy(dev_addr, mac_address, ETH_ALEN);
+ ether_addr_copy(dev_addr, mac_address);
/* The last byte of the mac address
* should be less than or equal to 0xFC
@@ -858,7 +858,7 @@ static void form_mac_address(u8 *dev_addr, u8 *nic_src, u8 *nic_dest,
memcpy(nic_src, dev_addr, 3);
/* Copy the nic_dest from dev_addr*/
- memcpy(nic_dest, dev_addr, ETH_ALEN);
+ ether_addr_copy(nic_dest, dev_addr);
}
static void validate_mac_address(u8 *mac_address)
diff --git a/drivers/staging/gdm724x/gdm_mux.h b/drivers/staging/gdm724x/gdm_mux.h
index 0163b243d3e..3d50383c6ce 100644
--- a/drivers/staging/gdm724x/gdm_mux.h
+++ b/drivers/staging/gdm724x/gdm_mux.h
@@ -35,10 +35,10 @@
#define RETRY_TIMER 30 /* msec */
struct mux_pkt_header {
- unsigned int start_flag;
- unsigned int seq_num;
- unsigned int payload_size;
- unsigned short packet_type;
+ __le32 start_flag;
+ __le32 seq_num;
+ __le32 payload_size;
+ __le16 packet_type;
unsigned char data[0];
};
diff --git a/drivers/staging/gdm72xx/Kconfig b/drivers/staging/gdm72xx/Kconfig
index 5836503caa7..bf11a7fbfc5 100644
--- a/drivers/staging/gdm72xx/Kconfig
+++ b/drivers/staging/gdm72xx/Kconfig
@@ -53,7 +53,7 @@ if WIMAX_GDM72XX_USB
config WIMAX_GDM72XX_USB_PM
bool "Enable power management support"
- depends on PM_RUNTIME
+ depends on PM
help
Enable USB power management in order to reduce power consumption
while the interface is not in use.
diff --git a/drivers/staging/gdm72xx/gdm_wimax.c b/drivers/staging/gdm72xx/gdm_wimax.c
index f5a3378c395..9cab54bfa6f 100644
--- a/drivers/staging/gdm72xx/gdm_wimax.c
+++ b/drivers/staging/gdm72xx/gdm_wimax.c
@@ -115,7 +115,7 @@ static void gdm_wimax_event_rcv(struct net_device *dev, u16 type, void *msg,
{
struct nic *nic = netdev_priv(dev);
- u8 *buf = (u8 *)msg;
+ u8 *buf = msg;
u16 hci_cmd = (buf[0]<<8) | buf[1];
u16 hci_len = (buf[2]<<8) | buf[3];
@@ -605,10 +605,8 @@ static void gdm_wimax_netif_rx(struct net_device *dev, char *buf, int len)
int ret;
skb = dev_alloc_skb(len + 2);
- if (!skb) {
- netdev_err(dev, "%s: dev_alloc_skb failed!\n", __func__);
+ if (!skb)
return;
- }
skb_reserve(skb, 2);
dev->stats.rx_packets++;
diff --git a/drivers/staging/gs_fpgaboot/gs_fpgaboot.c b/drivers/staging/gs_fpgaboot/gs_fpgaboot.c
index 6aa9d7c3013..6da72858d28 100644
--- a/drivers/staging/gs_fpgaboot/gs_fpgaboot.c
+++ b/drivers/staging/gs_fpgaboot/gs_fpgaboot.c
@@ -46,27 +46,6 @@ static char *file = "xlinx_fpga_firmware.bit";
module_param(file, charp, S_IRUGO);
MODULE_PARM_DESC(file, "Xilinx FPGA firmware file.");
-#ifdef DEBUG_FPGA
-static void datadump(char *msg, void *m, int n)
-{
- int i;
- unsigned char *c;
-
- pr_info("=== %s ===\n", msg);
-
- c = m;
-
- for (i = 0; i < n; i++) {
- if ((i&0xf) == 0)
- pr_info(KERN_INFO "\n 0x%4x: ", i);
-
- pr_info("%02X ", c[i]);
- }
-
- pr_info("\n");
-}
-#endif /* DEBUG_FPGA */
-
static void read_bitstream(char *bitdata, char *buf, int *offset, int rdsize)
{
memcpy(buf, bitdata + *offset, rdsize);
@@ -157,12 +136,10 @@ static void gs_print_header(struct fpgaimage *fimage)
static void gs_read_bitstream(struct fpgaimage *fimage)
{
char *bitdata;
- int size;
int offset;
offset = 0;
bitdata = (char *)fimage->fw_entry->data;
- size = fimage->fw_entry->size;
readmagic_bitstream(bitdata, &offset);
readinfo_bitstream(bitdata, fimage->filename, &offset);
@@ -195,15 +172,15 @@ static int gs_read_image(struct fpgaimage *fimage)
return 0;
}
-static int gs_load_image(struct fpgaimage *fimage, char *file)
+static int gs_load_image(struct fpgaimage *fimage, char *fw_file)
{
int err;
- pr_info("load fpgaimage %s\n", file);
+ pr_info("load fpgaimage %s\n", fw_file);
- err = request_firmware(&fimage->fw_entry, file, &firmware_pdev->dev);
+ err = request_firmware(&fimage->fw_entry, fw_file, &firmware_pdev->dev);
if (err != 0) {
- pr_err("firmware %s is missing, cannot continue.\n", file);
+ pr_err("firmware %s is missing, cannot continue.\n", fw_file);
return err;
}
@@ -220,9 +197,9 @@ static int gs_download_image(struct fpgaimage *fimage, enum wbus bus_bytes)
size = fimage->lendata;
#ifdef DEBUG_FPGA
- datadump("bitfile sample", bitdata, 0x100);
+ print_hex_dump_bytes("bitfile sample: ", DUMP_PREFIX_OFFSET,
+ bitdata, 0x100);
#endif /* DEBUG_FPGA */
-
if (!xl_supported_prog_bus_width(bus_bytes)) {
pr_err("unsupported program bus width %d\n",
bus_bytes);
@@ -316,10 +293,8 @@ static int gs_fpgaboot(void)
struct fpgaimage *fimage;
fimage = kmalloc(sizeof(struct fpgaimage), GFP_KERNEL);
- if (fimage == NULL) {
- pr_err("No memory is available\n");
- goto err_out;
- }
+ if (!fimage)
+ return -ENOMEM;
err = gs_load_image(fimage, file);
if (err) {
@@ -361,50 +336,44 @@ err_out2:
err_out1:
kfree(fimage);
-err_out:
return -1;
}
static int __init gs_fpgaboot_init(void)
{
- int err, r;
-
- r = -1;
+ int err;
pr_info("FPGA DOWNLOAD --->\n");
pr_info("FPGA image file name: %s\n", file);
err = init_driver();
- if (err != 0) {
+ if (err) {
pr_err("FPGA DRIVER INIT FAIL!!\n");
- return r;
+ return err;
}
err = xl_init_io();
if (err) {
pr_err("GPIO INIT FAIL!!\n");
- r = -1;
goto errout;
}
err = gs_fpgaboot();
if (err) {
pr_err("FPGA DOWNLOAD FAIL!!\n");
- r = -1;
goto errout;
}
pr_info("FPGA DOWNLOAD DONE <---\n");
- r = 0;
- return r;
+ return 0;
errout:
finish_driver();
- return r;
+ return err;
}
static void __exit gs_fpgaboot_exit(void)
diff --git a/drivers/staging/iio/Documentation/generic_buffer.c b/drivers/staging/iio/Documentation/generic_buffer.c
index 044ea196aa6..de4647e2495 100644
--- a/drivers/staging/iio/Documentation/generic_buffer.c
+++ b/drivers/staging/iio/Documentation/generic_buffer.c
@@ -47,6 +47,7 @@ int size_from_channelarray(struct iio_channel_info *channels, int num_channels)
{
int bytes = 0;
int i = 0;
+
while (i < num_channels) {
if (bytes % channels[i].bytes == 0)
channels[i].location = bytes;
@@ -74,12 +75,14 @@ void print2byte(int input, struct iio_channel_info *info)
input = input >> info->shift;
if (info->is_signed) {
int16_t val = input;
+
val &= (1 << info->bits_used) - 1;
val = (int16_t)(val << (16 - info->bits_used)) >>
(16 - info->bits_used);
printf("%05f ", ((float)val + info->offset)*info->scale);
} else {
uint16_t val = input;
+
val &= (1 << info->bits_used) - 1;
printf("%05f ", ((float)val + info->offset)*info->scale);
}
@@ -97,6 +100,7 @@ void process_scan(char *data,
int num_channels)
{
int k;
+
for (k = 0; k < num_channels; k++)
switch (channels[k].bytes) {
/* only a few cases implemented so far */
@@ -158,11 +162,12 @@ int main(int argc, char **argv)
char *buffer_access;
int scan_size;
int noevents = 0;
+ int notrigger = 0;
char *dummy;
struct iio_channel_info *channels;
- while ((c = getopt(argc, argv, "l:w:c:et:n:")) != -1) {
+ while ((c = getopt(argc, argv, "l:w:c:et:n:g")) != -1) {
switch (c) {
case 'n':
device_name = optarg;
@@ -183,6 +188,9 @@ int main(int argc, char **argv)
case 'l':
buf_len = strtoul(optarg, &dummy, 10);
break;
+ case 'g':
+ notrigger = 1;
+ break;
case '?':
return -1;
}
@@ -201,28 +209,32 @@ int main(int argc, char **argv)
printf("iio device number being used is %d\n", dev_num);
asprintf(&dev_dir_name, "%siio:device%d", iio_dir, dev_num);
- if (trigger_name == NULL) {
- /*
- * Build the trigger name. If it is device associated its
- * name is <device_name>_dev[n] where n matches the device
- * number found above
- */
- ret = asprintf(&trigger_name,
- "%s-dev%d", device_name, dev_num);
- if (ret < 0) {
- ret = -ENOMEM;
- goto error_ret;
+
+ if (!notrigger) {
+ if (trigger_name == NULL) {
+ /*
+ * Build the trigger name. If it is device associated
+ * its name is <device_name>_dev[n] where n matches
+ * the device number found above.
+ */
+ ret = asprintf(&trigger_name,
+ "%s-dev%d", device_name, dev_num);
+ if (ret < 0) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
}
- }
- /* Verify the trigger exists */
- trig_num = find_type_by_name(trigger_name, "trigger");
- if (trig_num < 0) {
- printf("Failed to find the trigger %s\n", trigger_name);
- ret = -ENODEV;
- goto error_free_triggername;
- }
- printf("iio trigger number being used is %d\n", trig_num);
+ /* Verify the trigger exists */
+ trig_num = find_type_by_name(trigger_name, "trigger");
+ if (trig_num < 0) {
+ printf("Failed to find the trigger %s\n", trigger_name);
+ ret = -ENODEV;
+ goto error_free_triggername;
+ }
+ printf("iio trigger number being used is %d\n", trig_num);
+ } else
+ printf("trigger-less mode selected\n");
/*
* Parse the files in scan_elements to identify what channels are
@@ -246,14 +258,18 @@ int main(int argc, char **argv)
ret = -ENOMEM;
goto error_free_triggername;
}
- printf("%s %s\n", dev_dir_name, trigger_name);
- /* Set the device trigger to be the data ready trigger found above */
- ret = write_sysfs_string_and_verify("trigger/current_trigger",
- dev_dir_name,
- trigger_name);
- if (ret < 0) {
- printf("Failed to write current_trigger file\n");
- goto error_free_buf_dir_name;
+
+ if (!notrigger) {
+ printf("%s %s\n", dev_dir_name, trigger_name);
+ /* Set the device trigger to be the data ready trigger found
+ * above */
+ ret = write_sysfs_string_and_verify("trigger/current_trigger",
+ dev_dir_name,
+ trigger_name);
+ if (ret < 0) {
+ printf("Failed to write current_trigger file\n");
+ goto error_free_buf_dir_name;
+ }
}
/* Setup ring buffer parameters */
@@ -323,9 +339,10 @@ int main(int argc, char **argv)
if (ret < 0)
goto error_close_buffer_access;
- /* Disconnect the trigger - just write a dummy name. */
- write_sysfs_string("trigger/current_trigger",
- dev_dir_name, "NULL");
+ if (!notrigger)
+ /* Disconnect the trigger - just write a dummy name. */
+ write_sysfs_string("trigger/current_trigger",
+ dev_dir_name, "NULL");
error_close_buffer_access:
close(fp);
diff --git a/drivers/staging/iio/Documentation/iio_event_monitor.c b/drivers/staging/iio/Documentation/iio_event_monitor.c
index 569d6f8face..940ed2399e7 100644
--- a/drivers/staging/iio/Documentation/iio_event_monitor.c
+++ b/drivers/staging/iio/Documentation/iio_event_monitor.c
@@ -69,16 +69,29 @@ static const char * const iio_modifier_names[] = {
[IIO_MOD_X] = "x",
[IIO_MOD_Y] = "y",
[IIO_MOD_Z] = "z",
+ [IIO_MOD_X_AND_Y] = "x&y",
+ [IIO_MOD_X_AND_Z] = "x&z",
+ [IIO_MOD_Y_AND_Z] = "y&z",
+ [IIO_MOD_X_AND_Y_AND_Z] = "x&y&z",
+ [IIO_MOD_X_OR_Y] = "x|y",
+ [IIO_MOD_X_OR_Z] = "x|z",
+ [IIO_MOD_Y_OR_Z] = "y|z",
+ [IIO_MOD_X_OR_Y_OR_Z] = "x|y|z",
[IIO_MOD_LIGHT_BOTH] = "both",
[IIO_MOD_LIGHT_IR] = "ir",
[IIO_MOD_ROOT_SUM_SQUARED_X_Y] = "sqrt(x^2+y^2)",
[IIO_MOD_SUM_SQUARED_X_Y_Z] = "x^2+y^2+z^2",
- [IIO_MOD_LIGHT_BOTH] = "both",
- [IIO_MOD_LIGHT_IR] = "ir",
[IIO_MOD_LIGHT_CLEAR] = "clear",
[IIO_MOD_LIGHT_RED] = "red",
[IIO_MOD_LIGHT_GREEN] = "green",
[IIO_MOD_LIGHT_BLUE] = "blue",
+ [IIO_MOD_QUATERNION] = "quaternion",
+ [IIO_MOD_TEMP_AMBIENT] = "ambient",
+ [IIO_MOD_TEMP_OBJECT] = "object",
+ [IIO_MOD_NORTH_MAGN] = "from_north_magnetic",
+ [IIO_MOD_NORTH_TRUE] = "from_north_true",
+ [IIO_MOD_NORTH_MAGN_TILT_COMP] = "from_north_magnetic_tilt_comp",
+ [IIO_MOD_NORTH_TRUE_TILT_COMP] = "from_north_true_tilt_comp",
};
static bool event_is_known(struct iio_event_data *event)
@@ -118,6 +131,14 @@ static bool event_is_known(struct iio_event_data *event)
case IIO_MOD_X:
case IIO_MOD_Y:
case IIO_MOD_Z:
+ case IIO_MOD_X_AND_Y:
+ case IIO_MOD_X_AND_Z:
+ case IIO_MOD_Y_AND_Z:
+ case IIO_MOD_X_AND_Y_AND_Z:
+ case IIO_MOD_X_OR_Y:
+ case IIO_MOD_X_OR_Z:
+ case IIO_MOD_Y_OR_Z:
+ case IIO_MOD_X_OR_Y_OR_Z:
case IIO_MOD_LIGHT_BOTH:
case IIO_MOD_LIGHT_IR:
case IIO_MOD_ROOT_SUM_SQUARED_X_Y:
@@ -126,6 +147,13 @@ static bool event_is_known(struct iio_event_data *event)
case IIO_MOD_LIGHT_RED:
case IIO_MOD_LIGHT_GREEN:
case IIO_MOD_LIGHT_BLUE:
+ case IIO_MOD_QUATERNION:
+ case IIO_MOD_TEMP_AMBIENT:
+ case IIO_MOD_TEMP_OBJECT:
+ case IIO_MOD_NORTH_MAGN:
+ case IIO_MOD_NORTH_TRUE:
+ case IIO_MOD_NORTH_MAGN_TILT_COMP:
+ case IIO_MOD_NORTH_TRUE_TILT_COMP:
break;
default:
return false;
diff --git a/drivers/staging/iio/Documentation/iio_utils.h b/drivers/staging/iio/Documentation/iio_utils.h
index 0973a092224..568eff06f80 100644
--- a/drivers/staging/iio/Documentation/iio_utils.h
+++ b/drivers/staging/iio/Documentation/iio_utils.h
@@ -34,6 +34,7 @@ inline int iioutils_break_up_name(const char *full_name,
char *current;
char *w, *r;
char *working;
+
current = strdup(full_name);
working = strtok(current, "_\0");
w = working;
@@ -335,6 +336,7 @@ inline int build_channel_array(const char *device_dir,
if (strcmp(ent->d_name + strlen(ent->d_name) - strlen("_en"),
"_en") == 0) {
int current_enabled = 0;
+
current = &(*ci_array)[count++];
ret = asprintf(&filename,
"%s/%s", scan_el_dir, ent->d_name);
@@ -506,6 +508,7 @@ inline int _write_sysfs_int(char *filename, char *basedir, int val, int verify)
FILE *sysfsfp;
int test;
char *temp = malloc(strlen(basedir) + strlen(filename) + 2);
+
if (temp == NULL)
return -ENOMEM;
sprintf(temp, "%s/%s", basedir, filename);
@@ -554,6 +557,7 @@ int _write_sysfs_string(char *filename, char *basedir, char *val, int verify)
int ret = 0;
FILE *sysfsfp;
char *temp = malloc(strlen(basedir) + strlen(filename) + 2);
+
if (temp == NULL) {
printf("Memory allocation failed\n");
return -ENOMEM;
@@ -614,6 +618,7 @@ int read_sysfs_posint(char *filename, char *basedir)
int ret;
FILE *sysfsfp;
char *temp = malloc(strlen(basedir) + strlen(filename) + 2);
+
if (temp == NULL) {
printf("Memory allocation failed");
return -ENOMEM;
@@ -636,6 +641,7 @@ int read_sysfs_float(char *filename, char *basedir, float *val)
int ret = 0;
FILE *sysfsfp;
char *temp = malloc(strlen(basedir) + strlen(filename) + 2);
+
if (temp == NULL) {
printf("Memory allocation failed");
return -ENOMEM;
@@ -658,6 +664,7 @@ int read_sysfs_string(const char *filename, const char *basedir, char *str)
int ret = 0;
FILE *sysfsfp;
char *temp = malloc(strlen(basedir) + strlen(filename) + 2);
+
if (temp == NULL) {
printf("Memory allocation failed");
return -ENOMEM;
diff --git a/drivers/staging/iio/Documentation/lsiio.c b/drivers/staging/iio/Documentation/lsiio.c
index 24ae9694eb4..98a0de09813 100644
--- a/drivers/staging/iio/Documentation/lsiio.c
+++ b/drivers/staging/iio/Documentation/lsiio.c
@@ -46,6 +46,7 @@ static int dump_channels(const char *dev_dir_name)
{
DIR *dp;
const struct dirent *ent;
+
dp = opendir(dev_dir_name);
if (dp == NULL)
return -errno;
@@ -62,17 +63,17 @@ static int dump_one_device(const char *dev_dir_name)
{
char name[IIO_MAX_NAME_LENGTH];
int dev_idx;
+ int retval;
- sscanf(dev_dir_name + strlen(iio_dir) + strlen(type_device),
+ retval = sscanf(dev_dir_name + strlen(iio_dir) + strlen(type_device),
"%i", &dev_idx);
+ if (retval != 1)
+ return -EINVAL;
read_sysfs_string("name", dev_dir_name, name);
printf("Device %03d: %s\n", dev_idx, name);
- if (verblevel >= VERBLEVEL_SENSORS) {
- int ret = dump_channels(dev_dir_name);
- if (ret)
- return ret;
- }
+ if (verblevel >= VERBLEVEL_SENSORS)
+ return dump_channels(dev_dir_name);
return 0;
}
@@ -80,9 +81,12 @@ static int dump_one_trigger(const char *dev_dir_name)
{
char name[IIO_MAX_NAME_LENGTH];
int dev_idx;
+ int retval;
- sscanf(dev_dir_name + strlen(iio_dir) + strlen(type_trigger),
+ retval = sscanf(dev_dir_name + strlen(iio_dir) + strlen(type_trigger),
"%i", &dev_idx);
+ if (retval != 1)
+ return -EINVAL;
read_sysfs_string("name", dev_dir_name, name);
printf("Trigger %03d: %s\n", dev_idx, name);
return 0;
@@ -107,6 +111,7 @@ static void dump_devices(void)
while (ent = readdir(dp), ent != NULL) {
if (check_prefix(ent->d_name, type_device)) {
char *dev_dir_name;
+
asprintf(&dev_dir_name, "%s%s", iio_dir, ent->d_name);
dump_one_device(dev_dir_name);
free(dev_dir_name);
@@ -118,6 +123,7 @@ static void dump_devices(void)
while (ent = readdir(dp), ent != NULL) {
if (check_prefix(ent->d_name, type_trigger)) {
char *dev_dir_name;
+
asprintf(&dev_dir_name, "%s%s", iio_dir, ent->d_name);
dump_one_trigger(dev_dir_name);
free(dev_dir_name);
diff --git a/drivers/staging/iio/accel/Kconfig b/drivers/staging/iio/accel/Kconfig
index ad45dfbdf41..07b7ffa00ab 100644
--- a/drivers/staging/iio/accel/Kconfig
+++ b/drivers/staging/iio/accel/Kconfig
@@ -9,53 +9,71 @@ config ADIS16201
select IIO_ADIS_LIB
select IIO_ADIS_LIB_BUFFER if IIO_BUFFER
help
- Say yes here to build support for Analog Devices adis16201 dual-axis
+ Say Y here to build support for Analog Devices adis16201 dual-axis
digital inclinometer and accelerometer.
+ To compile this driver as a module, say M here: the module will
+ be called adis16201.
+
config ADIS16203
tristate "Analog Devices ADIS16203 Programmable 360 Degrees Inclinometer"
depends on SPI
select IIO_ADIS_LIB
select IIO_ADIS_LIB_BUFFER if IIO_BUFFER
help
- Say yes here to build support for Analog Devices adis16203 Programmable
+ Say Y here to build support for Analog Devices adis16203 Programmable
360 Degrees Inclinometer.
+ To compile this driver as a module, say M here: the module will be
+ called adis16203.
+
config ADIS16204
tristate "Analog Devices ADIS16204 Programmable High-g Digital Impact Sensor and Recorder"
depends on SPI
select IIO_ADIS_LIB
select IIO_ADIS_LIB_BUFFER if IIO_BUFFER
help
- Say yes here to build support for Analog Devices adis16204 Programmable
+ Say Y here to build support for Analog Devices adis16204 Programmable
High-g Digital Impact Sensor and Recorder.
+ To compile this driver as a module, say M here: the module will be
+ called adis16204.
+
config ADIS16209
tristate "Analog Devices ADIS16209 Dual-Axis Digital Inclinometer and Accelerometer"
depends on SPI
select IIO_ADIS_LIB
select IIO_ADIS_LIB_BUFFER if IIO_BUFFER
help
- Say yes here to build support for Analog Devices adis16209 dual-axis digital inclinometer
+ Say Y here to build support for Analog Devices adis16209 dual-axis digital inclinometer
and accelerometer.
+ To compile this driver as a module, say M here: the module will be
+ called adis16209.
+
config ADIS16220
tristate "Analog Devices ADIS16220 Programmable Digital Vibration Sensor"
depends on SPI
select IIO_ADIS_LIB
help
- Say yes here to build support for Analog Devices adis16220 programmable
+ Say Y here to build support for Analog Devices adis16220 programmable
digital vibration sensor.
+ To compile this driver as a module, say M here: the module will be
+ called adis16220.
+
config ADIS16240
tristate "Analog Devices ADIS16240 Programmable Impact Sensor and Recorder"
depends on SPI
select IIO_ADIS_LIB
select IIO_ADIS_LIB_BUFFER if IIO_BUFFER
help
- Say yes here to build support for Analog Devices adis16240 programmable
+ Say Y here to build support for Analog Devices adis16240 programmable
impact Sensor and recorder.
+ To compile this driver as a module, say M here: the module will be
+ called adis16240.
+
config LIS3L02DQ
tristate "ST Microelectronics LIS3L02DQ Accelerometer Driver"
depends on SPI
@@ -63,16 +81,21 @@ config LIS3L02DQ
depends on !IIO_BUFFER || IIO_KFIFO_BUF
depends on GPIOLIB
help
- Say yes here to build SPI support for the ST microelectronics
+ Say Y here to build SPI support for the ST microelectronics
accelerometer. The driver supplies direct access via sysfs files
and an event interface via a character device.
+ To compile this driver as a module, say M here: the module will be
+ called lis3l02dq.
+
config SCA3000
depends on IIO_BUFFER
depends on SPI
tristate "VTI SCA3000 series accelerometers"
help
- Say yes here to build support for the VTI SCA3000 series of SPI
+ Say Y here to build support for the VTI SCA3000 series of SPI
accelerometers. These devices use a hardware ring buffer.
+ To compile this driver as a module, say M here: the module will be
+ called sca3000.
endmenu
diff --git a/drivers/staging/iio/accel/lis3l02dq_ring.c b/drivers/staging/iio/accel/lis3l02dq_ring.c
index 61f94221b8b..9efc77b0ebd 100644
--- a/drivers/staging/iio/accel/lis3l02dq_ring.c
+++ b/drivers/staging/iio/accel/lis3l02dq_ring.c
@@ -35,6 +35,7 @@ irqreturn_t lis3l02dq_data_rdy_trig_poll(int irq, void *private)
iio_trigger_poll(st->trig);
return IRQ_HANDLED;
}
+
return IRQ_WAKE_THREAD;
}
diff --git a/drivers/staging/iio/adc/ad7192.c b/drivers/staging/iio/adc/ad7192.c
index c110a255d4e..f6526aa22e8 100644
--- a/drivers/staging/iio/adc/ad7192.c
+++ b/drivers/staging/iio/adc/ad7192.c
@@ -223,7 +223,8 @@ static int ad7192_setup(struct ad7192_state *st,
id &= AD7192_ID_MASK;
if (id != st->devid)
- dev_warn(&st->sd.spi->dev, "device ID query failed (0x%X)\n", id);
+ dev_warn(&st->sd.spi->dev, "device ID query failed (0x%X)\n",
+ id);
switch (pdata->clock_source_sel) {
case AD7192_CLK_EXT_MCLK1_2:
diff --git a/drivers/staging/iio/adc/ad7280a.c b/drivers/staging/iio/adc/ad7280a.c
index d215edf66af..4d4870787ee 100644
--- a/drivers/staging/iio/adc/ad7280a.c
+++ b/drivers/staging/iio/adc/ad7280a.c
@@ -188,7 +188,7 @@ static void ad7280_delay(struct ad7280_state *st)
if (st->readback_delay_us < 50)
udelay(st->readback_delay_us);
else
- msleep(1);
+ usleep_range(250, 500);
}
static int __ad7280_read32(struct ad7280_state *st, unsigned *val)
diff --git a/drivers/staging/iio/adc/ad7606_spi.c b/drivers/staging/iio/adc/ad7606_spi.c
index 6a8ecd73a1a..7303983e64a 100644
--- a/drivers/staging/iio/adc/ad7606_spi.c
+++ b/drivers/staging/iio/adc/ad7606_spi.c
@@ -23,7 +23,7 @@ static int ad7606_spi_read_block(struct device *dev,
int i, ret;
unsigned short *data = buf;
- ret = spi_read(spi, (u8 *)buf, count * 2);
+ ret = spi_read(spi, buf, count * 2);
if (ret < 0) {
dev_err(&spi->dev, "SPI read error\n");
return ret;
diff --git a/drivers/staging/iio/adc/ad7816.c b/drivers/staging/iio/adc/ad7816.c
index 734a7e4886a..48b1c374003 100644
--- a/drivers/staging/iio/adc/ad7816.c
+++ b/drivers/staging/iio/adc/ad7816.c
@@ -152,7 +152,8 @@ static ssize_t ad7816_show_available_modes(struct device *dev,
return sprintf(buf, "full\npower-save\n");
}
-static IIO_DEVICE_ATTR(available_modes, S_IRUGO, ad7816_show_available_modes, NULL, 0);
+static IIO_DEVICE_ATTR(available_modes, S_IRUGO, ad7816_show_available_modes,
+ NULL, 0);
static ssize_t ad7816_show_channel(struct device *dev,
struct device_attribute *attr,
diff --git a/drivers/staging/iio/adc/lpc32xx_adc.c b/drivers/staging/iio/adc/lpc32xx_adc.c
index 4708e9a4163..5331c442fcf 100644
--- a/drivers/staging/iio/adc/lpc32xx_adc.c
+++ b/drivers/staging/iio/adc/lpc32xx_adc.c
@@ -116,7 +116,7 @@ static const struct iio_chan_spec lpc32xx_adc_iio_channels[] = {
static irqreturn_t lpc32xx_adc_isr(int irq, void *dev_id)
{
- struct lpc32xx_adc_info *info = (struct lpc32xx_adc_info *) dev_id;
+ struct lpc32xx_adc_info *info = dev_id;
/* Read value and clear irq */
info->value = __raw_readl(LPC32XX_ADC_VALUE(info->adc_base)) &
diff --git a/drivers/staging/iio/adc/mxs-lradc.c b/drivers/staging/iio/adc/mxs-lradc.c
index 51931045bed..f053535385b 100644
--- a/drivers/staging/iio/adc/mxs-lradc.c
+++ b/drivers/staging/iio/adc/mxs-lradc.c
@@ -455,7 +455,8 @@ static void mxs_lradc_setup_ts_channel(struct mxs_lradc *lradc, unsigned ch)
* SoC's delay unit and start the conversion later
* and automatically.
*/
- mxs_lradc_reg_wrt(lradc, LRADC_DELAY_TRIGGER(0) | /* don't trigger ADC */
+ mxs_lradc_reg_wrt(lradc,
+ LRADC_DELAY_TRIGGER(0) | /* don't trigger ADC */
LRADC_DELAY_TRIGGER_DELAYS(1 << 3) | /* trigger DELAY unit#3 */
LRADC_DELAY_KICK |
LRADC_DELAY_DELAY(lradc->settling_delay),
@@ -513,7 +514,8 @@ static void mxs_lradc_setup_ts_pressure(struct mxs_lradc *lradc, unsigned ch1,
* SoC's delay unit and start the conversion later
* and automatically.
*/
- mxs_lradc_reg_wrt(lradc, LRADC_DELAY_TRIGGER(0) | /* don't trigger ADC */
+ mxs_lradc_reg_wrt(lradc,
+ LRADC_DELAY_TRIGGER(0) | /* don't trigger ADC */
LRADC_DELAY_TRIGGER_DELAYS(1 << 3) | /* trigger DELAY unit#3 */
LRADC_DELAY_KICK |
LRADC_DELAY_DELAY(lradc->settling_delay), LRADC_DELAY(2));
diff --git a/drivers/staging/iio/adc/spear_adc.c b/drivers/staging/iio/adc/spear_adc.c
index 750697832b9..8ad71691fc0 100644
--- a/drivers/staging/iio/adc/spear_adc.c
+++ b/drivers/staging/iio/adc/spear_adc.c
@@ -98,7 +98,7 @@ static void spear_adc_set_clk(struct spear_adc_state *st, u32 val)
u32 clk_high, clk_low, count;
u32 apb_clk = clk_get_rate(st->clk);
- count = (apb_clk + val - 1) / val;
+ count = DIV_ROUND_UP(apb_clk, val);
clk_low = count / 2;
clk_high = count - clk_low;
st->current_clk = apb_clk / count;
@@ -226,7 +226,7 @@ static const struct iio_chan_spec spear_adc_iio_channels[] = {
static irqreturn_t spear_adc_isr(int irq, void *dev_id)
{
- struct spear_adc_state *st = (struct spear_adc_state *)dev_id;
+ struct spear_adc_state *st = dev_id;
/* Read value to clear IRQ */
st->value = spear_adc_get_average(st);
diff --git a/drivers/staging/iio/addac/Kconfig b/drivers/staging/iio/addac/Kconfig
index e6795e0bed1..0ed7e13e228 100644
--- a/drivers/staging/iio/addac/Kconfig
+++ b/drivers/staging/iio/addac/Kconfig
@@ -10,6 +10,9 @@ config ADT7316
Say yes here to build support for Analog Devices ADT7316, ADT7317, ADT7318
and ADT7516, ADT7517, ADT7519 temperature sensors, ADC and DAC.
+ To compile this driver as a module, choose M here: the module will
+ be called adt7316.
+
config ADT7316_SPI
tristate "support SPI bus connection"
depends on SPI && ADT7316
@@ -18,6 +21,9 @@ config ADT7316_SPI
Say yes here to build SPI bus support for Analog Devices ADT7316/7/8
and ADT7516/7/9.
+ To compile this driver as a module, choose M here: the module will
+ be called adt7316_spi.
+
config ADT7316_I2C
tristate "support I2C bus connection"
depends on I2C && ADT7316
@@ -25,4 +31,7 @@ config ADT7316_I2C
Say yes here to build I2C bus support for Analog Devices ADT7316/7/8
and ADT7516/7/9.
+ To compile this driver as a module, choose M here: the module will
+ be called adt7316_i2c.
+
endmenu
diff --git a/drivers/staging/iio/addac/adt7316.h b/drivers/staging/iio/addac/adt7316.h
index ec50bf34628..ec40fbb698a 100644
--- a/drivers/staging/iio/addac/adt7316.h
+++ b/drivers/staging/iio/addac/adt7316.h
@@ -30,6 +30,7 @@ extern const struct dev_pm_ops adt7316_pm_ops;
#else
#define ADT7316_PM_OPS NULL
#endif
-int adt7316_probe(struct device *dev, struct adt7316_bus *bus, const char *name);
+int adt7316_probe(struct device *dev, struct adt7316_bus *bus,
+ const char *name);
#endif
diff --git a/drivers/staging/iio/gyro/Kconfig b/drivers/staging/iio/gyro/Kconfig
index 88b199bb292..f62f68fd6f3 100644
--- a/drivers/staging/iio/gyro/Kconfig
+++ b/drivers/staging/iio/gyro/Kconfig
@@ -7,7 +7,10 @@ config ADIS16060
tristate "Analog Devices ADIS16060 Yaw Rate Gyroscope with SPI driver"
depends on SPI
help
- Say yes here to build support for Analog Devices adis16060 wide bandwidth
+ Say Y (yes) here to build support for Analog Devices adis16060 wide bandwidth
yaw rate gyroscope with SPI.
+ To compile this driver as a module, say M here: the module will be
+ called adis16060. If unsure, say N.
+
endmenu
diff --git a/drivers/staging/iio/light/tsl2x7x_core.c b/drivers/staging/iio/light/tsl2x7x_core.c
index e0d88fa2a5b..423f96bdf59 100644
--- a/drivers/staging/iio/light/tsl2x7x_core.c
+++ b/drivers/staging/iio/light/tsl2x7x_core.c
@@ -1040,8 +1040,8 @@ static ssize_t tsl2x7x_als_persistence_show(struct device *dev,
y = (TSL2X7X_MAX_TIMER_CNT - (u8)chip->tsl2x7x_settings.als_time) + 1;
z = y * TSL2X7X_MIN_ITIME;
filter_delay = z * (chip->tsl2x7x_settings.persistence & 0x0F);
- y = (filter_delay / 1000);
- z = (filter_delay % 1000);
+ y = filter_delay / 1000;
+ z = filter_delay % 1000;
return snprintf(buf, PAGE_SIZE, "%d.%03d\n", y, z);
}
@@ -1086,8 +1086,8 @@ static ssize_t tsl2x7x_prox_persistence_show(struct device *dev,
y = (TSL2X7X_MAX_TIMER_CNT - (u8)chip->tsl2x7x_settings.prx_time) + 1;
z = y * TSL2X7X_MIN_ITIME;
filter_delay = z * ((chip->tsl2x7x_settings.persistence & 0xF0) >> 4);
- y = (filter_delay / 1000);
- z = (filter_delay % 1000);
+ y = filter_delay / 1000;
+ z = filter_delay % 1000;
return snprintf(buf, PAGE_SIZE, "%d.%03d\n", y, z);
}
@@ -1573,8 +1573,7 @@ static struct attribute *tsl2x7x_ALS_device_attrs[] = {
&dev_attr_power_state.attr,
&dev_attr_in_illuminance0_calibscale_available.attr,
&dev_attr_in_illuminance0_integration_time.attr,
- &iio_const_attr_in_illuminance0_integration_time_available\
- .dev_attr.attr,
+ &iio_const_attr_in_illuminance0_integration_time_available.dev_attr.attr,
&dev_attr_in_illuminance0_target_input.attr,
&dev_attr_in_illuminance0_calibrate.attr,
&dev_attr_in_illuminance0_lux_table.attr,
@@ -1591,8 +1590,7 @@ static struct attribute *tsl2x7x_ALSPRX_device_attrs[] = {
&dev_attr_power_state.attr,
&dev_attr_in_illuminance0_calibscale_available.attr,
&dev_attr_in_illuminance0_integration_time.attr,
- &iio_const_attr_in_illuminance0_integration_time_available\
- .dev_attr.attr,
+ &iio_const_attr_in_illuminance0_integration_time_available.dev_attr.attr,
&dev_attr_in_illuminance0_target_input.attr,
&dev_attr_in_illuminance0_calibrate.attr,
&dev_attr_in_illuminance0_lux_table.attr,
@@ -1611,8 +1609,7 @@ static struct attribute *tsl2x7x_ALSPRX2_device_attrs[] = {
&dev_attr_power_state.attr,
&dev_attr_in_illuminance0_calibscale_available.attr,
&dev_attr_in_illuminance0_integration_time.attr,
- &iio_const_attr_in_illuminance0_integration_time_available\
- .dev_attr.attr,
+ &iio_const_attr_in_illuminance0_integration_time_available.dev_attr.attr,
&dev_attr_in_illuminance0_target_input.attr,
&dev_attr_in_illuminance0_calibrate.attr,
&dev_attr_in_illuminance0_lux_table.attr,
diff --git a/drivers/staging/iio/meter/Kconfig b/drivers/staging/iio/meter/Kconfig
index e53274b64ae..64cd3704ec6 100644
--- a/drivers/staging/iio/meter/Kconfig
+++ b/drivers/staging/iio/meter/Kconfig
@@ -10,6 +10,9 @@ config ADE7753
Say yes here to build support for Analog Devices ADE7753 Single-Phase Multifunction
Metering IC with di/dt Sensor Interface.
+ To compile this driver as a module, choose M here: the
+ module will be called ade7753.
+
config ADE7754
tristate "Analog Devices ADE7754 Polyphase Multifunction Energy Metering IC Driver"
depends on SPI
@@ -17,6 +20,9 @@ config ADE7754
Say yes here to build support for Analog Devices ADE7754 Polyphase
Multifunction Energy Metering IC Driver.
+ To compile this driver as a module, choose M here: the
+ module will be called ade7754.
+
config ADE7758
tristate "Analog Devices ADE7758 Poly Phase Multifunction Energy Metering IC Driver"
depends on SPI
@@ -26,6 +32,9 @@ config ADE7758
Say yes here to build support for Analog Devices ADE7758 Polyphase
Multifunction Energy Metering IC with Per Phase Information Driver.
+ To compile this driver as a module, choose M here: the
+ module will be called ade7758.
+
config ADE7759
tristate "Analog Devices ADE7759 Active Energy Metering IC Driver"
depends on SPI
@@ -33,6 +42,9 @@ config ADE7759
Say yes here to build support for Analog Devices ADE7758 Active Energy
Metering IC with di/dt Sensor Interface.
+ To compile this driver as a module, choose M here: the
+ module will be called ade7759.
+
config ADE7854
tristate "Analog Devices ADE7854/58/68/78 Polyphase Multifunction Energy Metering IC Driver"
depends on SPI || I2C
@@ -40,6 +52,9 @@ config ADE7854
Say yes here to build support for Analog Devices ADE7854/58/68/78 Polyphase
Multifunction Energy Metering IC Driver.
+ To compile this driver as a module, choose M here: the
+ module will be called ade7854.
+
config ADE7854_I2C
tristate "support I2C bus connection"
depends on ADE7854 && I2C
diff --git a/drivers/staging/iio/trigger/Kconfig b/drivers/staging/iio/trigger/Kconfig
index 2fd18c60323..710a2f3e787 100644
--- a/drivers/staging/iio/trigger/Kconfig
+++ b/drivers/staging/iio/trigger/Kconfig
@@ -1,4 +1,4 @@
-#
+ #
# Industrial I/O standalone triggers
#
comment "Triggers - standalone"
@@ -12,6 +12,9 @@ config IIO_PERIODIC_RTC_TRIGGER
Provides support for using periodic capable real time
clocks as IIO triggers.
+ To compile this driver as a module, choose M here: the
+ module will be called iio-trig-periodic-rtc.
+
config IIO_BFIN_TMR_TRIGGER
tristate "Blackfin TIMER trigger"
depends on BLACKFIN
diff --git a/drivers/staging/iio/trigger/iio-trig-periodic-rtc.c b/drivers/staging/iio/trigger/iio-trig-periodic-rtc.c
index 82c2e6d3f5a..a24caf73ae0 100644
--- a/drivers/staging/iio/trigger/iio-trig-periodic-rtc.c
+++ b/drivers/staging/iio/trigger/iio-trig-periodic-rtc.c
@@ -72,7 +72,8 @@ static ssize_t iio_trig_periodic_write_freq(struct device *dev,
if (val > 0) {
ret = rtc_irq_set_freq(trig_info->rtc, &trig_info->task, val);
if (ret == 0 && trig_info->state && trig_info->frequency == 0)
- ret = rtc_irq_set_state(trig_info->rtc, &trig_info->task, 1);
+ ret = rtc_irq_set_state(trig_info->rtc,
+ &trig_info->task, 1);
} else if (val == 0) {
ret = rtc_irq_set_state(trig_info->rtc, &trig_info->task, 0);
} else
diff --git a/drivers/staging/imx-drm/TODO b/drivers/staging/imx-drm/TODO
deleted file mode 100644
index 29636fb1395..00000000000
--- a/drivers/staging/imx-drm/TODO
+++ /dev/null
@@ -1,17 +0,0 @@
-TODO:
-- get DRM Maintainer review for this code
-- decide where to put the base driver. It is not specific to a subsystem
- and would be used by DRM/KMS and media/V4L2
-
-Missing features (not necessarily for moving out of staging):
-
-- Add support for IC (Image converter)
-- Add support for CSI (CMOS Sensor interface)
-- Add support for VDIC (Video Deinterlacer)
-
-Many work-in-progress patches for the above features exist. Contact
-Sascha Hauer <kernel@pengutronix.de> if you are interested in working
-on a specific feature.
-
-Please send any patches to Greg Kroah-Hartman <gregkh@linuxfoundation.org> and
-Sascha Hauer <kernel@pengutronix.de>
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h
index 8888b275617..2e5a9e5965b 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h
@@ -242,18 +242,6 @@ do { \
#define LCONSOLE_EMERG(format, ...) CDEBUG(D_CONSOLE | D_EMERG, format, ## __VA_ARGS__)
-void libcfs_log_goto(struct libcfs_debug_msg_data *, const char *, long_ptr_t);
-#define GOTO(label, rc) \
-do { \
- if (cfs_cdebug_show(D_TRACE, DEBUG_SUBSYSTEM)) { \
- LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, D_TRACE, NULL); \
- libcfs_log_goto(&msgdata, #label, (long_ptr_t)(rc)); \
- } else { \
- (void)(rc); \
- } \
- goto label; \
-} while (0)
-
int libcfs_debug_msg(struct libcfs_debug_msg_data *msgdata,
const char *format1, ...)
__attribute__ ((format (printf, 2, 3)));
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
index 8f5cdd584f8..62b575deac3 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
@@ -930,8 +930,7 @@ kiblnd_close_peer_conns_locked (kib_peer_t *peer, int why)
list_for_each_safe (ctmp, cnxt, &peer->ibp_conns) {
conn = list_entry(ctmp, kib_conn_t, ibc_list);
- CDEBUG(D_NET, "Closing conn -> %s, "
- "version: %x, reason: %d\n",
+ CDEBUG(D_NET, "Closing conn -> %s, version: %x, reason: %d\n",
libcfs_nid2str(peer->ibp_nid),
conn->ibc_version, why);
@@ -958,8 +957,7 @@ kiblnd_close_stale_conns_locked (kib_peer_t *peer,
conn->ibc_incarnation == incarnation)
continue;
- CDEBUG(D_NET, "Closing stale conn -> %s version: %x, "
- "incarnation:%#llx(%x, %#llx)\n",
+ CDEBUG(D_NET, "Closing stale conn -> %s version: %x, incarnation:%#llx(%x, %#llx)\n",
libcfs_nid2str(peer->ibp_nid),
conn->ibc_version, conn->ibc_incarnation,
version, incarnation);
@@ -1599,8 +1597,7 @@ kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages, int npages,
if (fps->fps_increasing) {
spin_unlock(&fps->fps_lock);
- CDEBUG(D_NET, "Another thread is allocating new "
- "FMR pool, waiting for her to complete\n");
+ CDEBUG(D_NET, "Another thread is allocating new FMR pool, waiting for her to complete\n");
schedule();
goto again;
@@ -1801,8 +1798,7 @@ kiblnd_pool_alloc_node(kib_poolset_t *ps)
if (ps->ps_increasing) {
/* another thread is allocating a new pool */
spin_unlock(&ps->ps_lock);
- CDEBUG(D_NET, "Another thread is allocating new "
- "%s pool, waiting for her to complete\n",
+ CDEBUG(D_NET, "Another thread is allocating new %s pool, waiting for her to complete\n",
ps->ps_name);
schedule();
goto again;
@@ -2411,7 +2407,7 @@ kiblnd_hdev_setup_mrs(kib_hca_dev_t *hdev)
goto out;
}
- mr_size = (1ULL << hdev->ibh_mr_shift);
+ mr_size = 1ULL << hdev->ibh_mr_shift;
mm_size = (unsigned long)high_memory - PAGE_OFFSET;
hdev->ibh_nmrs = (int)((mm_size + mr_size - 1) >> hdev->ibh_mr_shift);
@@ -3081,7 +3077,7 @@ kiblnd_startup (lnet_ni_t *ni)
LIBCFS_ALLOC(net, sizeof(*net));
ni->ni_data = net;
if (net == NULL)
- goto failed;
+ goto net_failed;
do_gettimeofday(&tv);
net->ibn_incarnation = (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
@@ -3147,6 +3143,7 @@ failed:
if (net->ibn_dev == NULL && ibdev != NULL)
kiblnd_destroy_dev(ibdev);
+net_failed:
kiblnd_shutdown(ni);
CDEBUG(D_NET, "kiblnd_startup failed\n");
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
index 14c9c8d18d0..b48d7edf566 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
@@ -1116,8 +1116,7 @@ kiblnd_init_rdma (kib_conn_t *conn, kib_tx_t *tx, int type,
}
if (tx->tx_nwrq == IBLND_RDMA_FRAGS(conn->ibc_version)) {
- CERROR("RDMA too fragmented for %s (%d): "
- "%d/%d src %d/%d dst frags\n",
+ CERROR("RDMA too fragmented for %s (%d): %d/%d src %d/%d dst frags\n",
libcfs_nid2str(conn->ibc_peer->ibp_nid),
IBLND_RDMA_FRAGS(conn->ibc_version),
srcidx, srcrd->rd_nfrags,
@@ -2254,8 +2253,8 @@ kiblnd_passive_connect (struct rdma_cm_id *cmid, void *priv, int priv_nob)
if (ni == NULL || /* no matching net */
ni->ni_nid != reqmsg->ibm_dstnid || /* right NET, wrong NID! */
net->ibn_dev != ibdev) { /* wrong device */
- CERROR("Can't accept %s on %s (%s:%d:%pI4h): "
- "bad dst nid %s\n", libcfs_nid2str(nid),
+ CERROR("Can't accept %s on %s (%s:%d:%pI4h): bad dst nid %s\n",
+ libcfs_nid2str(nid),
ni == NULL ? "NA" : libcfs_nid2str(ni->ni_nid),
ibdev->ibd_ifname, ibdev->ibd_nnets,
&ibdev->ibd_ifip,
@@ -2295,8 +2294,7 @@ kiblnd_passive_connect (struct rdma_cm_id *cmid, void *priv, int priv_nob)
if (reqmsg->ibm_u.connparams.ibcp_max_frags !=
IBLND_RDMA_FRAGS(version)) {
- CERROR("Can't accept %s(version %x): "
- "incompatible max_frags %d (%d wanted)\n",
+ CERROR("Can't accept %s(version %x): incompatible max_frags %d (%d wanted)\n",
libcfs_nid2str(nid), version,
reqmsg->ibm_u.connparams.ibcp_max_frags,
IBLND_RDMA_FRAGS(version));
@@ -2502,8 +2500,7 @@ kiblnd_reconnect (kib_conn_t *conn, int version,
break;
}
- CNETERR("%s: retrying (%s), %x, %x, "
- "queue_dep: %d, max_frag: %d, msg_size: %d\n",
+ CNETERR("%s: retrying (%s), %x, %x, queue_dep: %d, max_frag: %d, msg_size: %d\n",
libcfs_nid2str(peer->ibp_nid),
reason, IBLND_MSG_VERSION, version,
cp != NULL? cp->ibcp_queue_depth :IBLND_MSG_QUEUE_SIZE(version),
@@ -2679,8 +2676,7 @@ kiblnd_check_connreply (kib_conn_t *conn, void *priv, int priv_nob)
}
if (ver != msg->ibm_version) {
- CERROR("%s replied version %x is different with "
- "requested version %x\n",
+ CERROR("%s replied version %x is different with requested version %x\n",
libcfs_nid2str(peer->ibp_nid), msg->ibm_version, ver);
rc = -EPROTO;
goto failed;
@@ -2724,8 +2720,7 @@ kiblnd_check_connreply (kib_conn_t *conn, void *priv, int priv_nob)
read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
if (rc != 0) {
- CERROR("Bad connection reply from %s, rc = %d, "
- "version: %x max_frags: %d\n",
+ CERROR("Bad connection reply from %s, rc = %d, version: %x max_frags: %d\n",
libcfs_nid2str(peer->ibp_nid), rc,
msg->ibm_version, msg->ibm_u.connparams.ibcp_max_frags);
goto failed;
@@ -3060,8 +3055,7 @@ kiblnd_check_conns (int idx)
}
if (timedout) {
- CERROR("Timed out RDMA with %s (%lu): "
- "c: %u, oc: %u, rc: %u\n",
+ CERROR("Timed out RDMA with %s (%lu): c: %u, oc: %u, rc: %u\n",
libcfs_nid2str(peer->ibp_nid),
cfs_duration_sec(cfs_time_current() -
peer->ibp_last_alive),
@@ -3334,10 +3328,8 @@ kiblnd_scheduler(void *arg)
rc = cfs_cpt_bind(lnet_cpt_table(), sched->ibs_cpt);
if (rc != 0) {
- CWARN("Failed to bind on CPT %d, please verify whether "
- "all CPUs are healthy and reload modules if necessary, "
- "otherwise your system might under risk of low "
- "performance\n", sched->ibs_cpt);
+ CWARN("Failed to bind on CPT %d, please verify whether all CPUs are healthy and reload modules if necessary, otherwise your system might under risk of low performance\n",
+ sched->ibs_cpt);
}
spin_lock_irqsave(&sched->ibs_lock, flags);
@@ -3369,8 +3361,7 @@ kiblnd_scheduler(void *arg)
rc = ib_req_notify_cq(conn->ibc_cq,
IB_CQ_NEXT_COMP);
if (rc < 0) {
- CWARN("%s: ib_req_notify_cq failed: %d, "
- "closing connection\n",
+ CWARN("%s: ib_req_notify_cq failed: %d, closing connection\n",
libcfs_nid2str(conn->ibc_peer->ibp_nid), rc);
kiblnd_close_conn(conn, -EIO);
kiblnd_conn_decref(conn);
@@ -3383,8 +3374,7 @@ kiblnd_scheduler(void *arg)
}
if (rc < 0) {
- CWARN("%s: ib_poll_cq failed: %d, "
- "closing connection\n",
+ CWARN("%s: ib_poll_cq failed: %d, closing connection\n",
libcfs_nid2str(conn->ibc_peer->ibp_nid),
rc);
kiblnd_close_conn(conn, -EIO);
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c
index cefdfb6b1be..8b4a8e9a29b 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c
@@ -222,8 +222,7 @@ kiblnd_tunables_init (void)
*kiblnd_tunables.kib_concurrent_sends = *kiblnd_tunables.kib_peertxcredits / 2;
if (*kiblnd_tunables.kib_concurrent_sends < *kiblnd_tunables.kib_peertxcredits) {
- CWARN("Concurrent sends %d is lower than message queue size: %d, "
- "performance may drop slightly.\n",
+ CWARN("Concurrent sends %d is lower than message queue size: %d, performance may drop slightly.\n",
*kiblnd_tunables.kib_concurrent_sends, *kiblnd_tunables.kib_peertxcredits);
}
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
index 038854e8302..9188b34e694 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
@@ -337,8 +337,7 @@ ksocknal_associate_route_conn_locked(ksock_route_t *route, ksock_conn_t *conn)
&route->ksnr_ipaddr,
&conn->ksnc_myipaddr);
} else {
- CDEBUG(D_NET, "Rebinding %s %pI4h from "
- "%pI4h to %pI4h\n",
+ CDEBUG(D_NET, "Rebinding %s %pI4h from %pI4h to %pI4h\n",
libcfs_id2str(peer->ksnp_id),
&route->ksnr_ipaddr,
&route->ksnr_myipaddr,
@@ -974,8 +973,7 @@ ksocknal_accept (lnet_ni_t *ni, struct socket *sock)
LIBCFS_ALLOC(cr, sizeof(*cr));
if (cr == NULL) {
- LCONSOLE_ERROR_MSG(0x12f, "Dropping connection request from "
- "%pI4h: memory exhausted\n",
+ LCONSOLE_ERROR_MSG(0x12f, "Dropping connection request from %pI4h: memory exhausted\n",
&peer_ip);
return -ENOMEM;
}
@@ -1288,8 +1286,7 @@ ksocknal_create_conn (lnet_ni_t *ni, ksock_route_t *route,
* socket callbacks.
*/
- CDEBUG(D_NET, "New conn %s p %d.x %pI4h -> %pI4h/%d"
- " incarnation:%lld sched[%d:%d]\n",
+ CDEBUG(D_NET, "New conn %s p %d.x %pI4h -> %pI4h/%d incarnation:%lld sched[%d:%d]\n",
libcfs_id2str(peerid), conn->ksnc_proto->pro_version,
&conn->ksnc_myipaddr, &conn->ksnc_ipaddr,
conn->ksnc_port, incarnation, cpt,
@@ -1638,37 +1635,32 @@ ksocknal_destroy_conn (ksock_conn_t *conn)
case SOCKNAL_RX_LNET_PAYLOAD:
last_rcv = conn->ksnc_rx_deadline -
cfs_time_seconds(*ksocknal_tunables.ksnd_timeout);
- CERROR("Completing partial receive from %s[%d]"
- ", ip %pI4h:%d, with error, wanted: %d, left: %d, "
- "last alive is %ld secs ago\n",
+ CERROR("Completing partial receive from %s[%d], ip %pI4h:%d, with error, wanted: %d, left: %d, last alive is %ld secs ago\n",
libcfs_id2str(conn->ksnc_peer->ksnp_id), conn->ksnc_type,
&conn->ksnc_ipaddr, conn->ksnc_port,
conn->ksnc_rx_nob_wanted, conn->ksnc_rx_nob_left,
cfs_duration_sec(cfs_time_sub(cfs_time_current(),
- last_rcv)));
+ last_rcv)));
lnet_finalize (conn->ksnc_peer->ksnp_ni,
conn->ksnc_cookie, -EIO);
break;
case SOCKNAL_RX_LNET_HEADER:
if (conn->ksnc_rx_started)
- CERROR("Incomplete receive of lnet header from %s"
- ", ip %pI4h:%d, with error, protocol: %d.x.\n",
+ CERROR("Incomplete receive of lnet header from %s, ip %pI4h:%d, with error, protocol: %d.x.\n",
libcfs_id2str(conn->ksnc_peer->ksnp_id),
&conn->ksnc_ipaddr, conn->ksnc_port,
conn->ksnc_proto->pro_version);
break;
case SOCKNAL_RX_KSM_HEADER:
if (conn->ksnc_rx_started)
- CERROR("Incomplete receive of ksock message from %s"
- ", ip %pI4h:%d, with error, protocol: %d.x.\n",
+ CERROR("Incomplete receive of ksock message from %s, ip %pI4h:%d, with error, protocol: %d.x.\n",
libcfs_id2str(conn->ksnc_peer->ksnp_id),
&conn->ksnc_ipaddr, conn->ksnc_port,
conn->ksnc_proto->pro_version);
break;
case SOCKNAL_RX_SLOP:
if (conn->ksnc_rx_started)
- CERROR("Incomplete receive of slops from %s"
- ", ip %pI4h:%d, with error\n",
+ CERROR("Incomplete receive of slops from %s, ip %pI4h:%d, with error\n",
libcfs_id2str(conn->ksnc_peer->ksnp_id),
&conn->ksnc_ipaddr, conn->ksnc_port);
break;
@@ -2348,16 +2340,11 @@ ksocknal_base_shutdown(void)
static __u64
ksocknal_new_incarnation (void)
{
- struct timeval tv;
/* The incarnation number is the time this module loaded and it
- * identifies this particular instance of the socknal. Hopefully
- * we won't be able to reboot more frequently than 1MHz for the
- * foreseeable future :) */
-
- do_gettimeofday(&tv);
-
- return (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
+ * identifies this particular instance of the socknal.
+ */
+ return ktime_get_ns();
}
static int
@@ -2516,22 +2503,21 @@ ksocknal_debug_peerhash (lnet_ni_t *ni)
ksock_route_t *route;
ksock_conn_t *conn;
- CWARN ("Active peer on shutdown: %s, ref %d, scnt %d, "
- "closing %d, accepting %d, err %d, zcookie %llu, "
- "txq %d, zc_req %d\n", libcfs_id2str(peer->ksnp_id),
- atomic_read(&peer->ksnp_refcount),
- peer->ksnp_sharecount, peer->ksnp_closing,
- peer->ksnp_accepting, peer->ksnp_error,
- peer->ksnp_zc_next_cookie,
- !list_empty(&peer->ksnp_tx_queue),
- !list_empty(&peer->ksnp_zc_req_list));
+ CWARN("Active peer on shutdown: %s, ref %d, scnt %d, closing %d, accepting %d, err %d, zcookie %llu, txq %d, zc_req %d\n",
+ libcfs_id2str(peer->ksnp_id),
+ atomic_read(&peer->ksnp_refcount),
+ peer->ksnp_sharecount, peer->ksnp_closing,
+ peer->ksnp_accepting, peer->ksnp_error,
+ peer->ksnp_zc_next_cookie,
+ !list_empty(&peer->ksnp_tx_queue),
+ !list_empty(&peer->ksnp_zc_req_list));
list_for_each (tmp, &peer->ksnp_routes) {
route = list_entry(tmp, ksock_route_t, ksnr_list);
- CWARN ("Route: ref %d, schd %d, conn %d, cnted %d, "
- "del %d\n", atomic_read(&route->ksnr_refcount),
- route->ksnr_scheduled, route->ksnr_connecting,
- route->ksnr_connected, route->ksnr_deleted);
+ CWARN("Route: ref %d, schd %d, conn %d, cnted %d, del %d\n",
+ atomic_read(&route->ksnr_refcount),
+ route->ksnr_scheduled, route->ksnr_connecting,
+ route->ksnr_connected, route->ksnr_deleted);
}
list_for_each (tmp, &peer->ksnp_conns) {
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
index d29f5f134b8..e6c1d364795 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
@@ -551,19 +551,16 @@ ksocknal_process_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
if (!conn->ksnc_closing) {
switch (rc) {
case -ECONNRESET:
- LCONSOLE_WARN("Host %pI4h reset our connection "
- "while we were sending data; it may have "
- "rebooted.\n",
+ LCONSOLE_WARN("Host %pI4h reset our connection while we were sending data; it may have rebooted.\n",
&conn->ksnc_ipaddr);
break;
default:
- LCONSOLE_WARN("There was an unexpected network error "
- "while writing to %pI4h: %d.\n",
+ LCONSOLE_WARN("There was an unexpected network error while writing to %pI4h: %d.\n",
&conn->ksnc_ipaddr, rc);
break;
}
- CDEBUG(D_NET, "[%p] Error %d on write to %s"
- " ip %pI4h:%d\n", conn, rc,
+ CDEBUG(D_NET, "[%p] Error %d on write to %s ip %pI4h:%d\n",
+ conn, rc,
libcfs_id2str(conn->ksnc_peer->ksnp_id),
&conn->ksnc_ipaddr,
conn->ksnc_port);
@@ -799,8 +796,7 @@ ksocknal_find_connectable_route_locked (ksock_peer_t *peer)
if (!(route->ksnr_retry_interval == 0 || /* first attempt */
cfs_time_aftereq(now, route->ksnr_timeout))) {
CDEBUG(D_NET,
- "Too soon to retry route %pI4h "
- "(cnted %d, interval %ld, %ld secs later)\n",
+ "Too soon to retry route %pI4h (cnted %d, interval %ld, %ld secs later)\n",
&route->ksnr_ipaddr,
route->ksnr_connected,
route->ksnr_retry_interval,
@@ -874,8 +870,8 @@ ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id)
write_unlock_bh(g_lock);
if ((id.pid & LNET_PID_USERFLAG) != 0) {
- CERROR("Refusing to create a connection to "
- "userspace process %s\n", libcfs_id2str(id));
+ CERROR("Refusing to create a connection to userspace process %s\n",
+ libcfs_id2str(id));
return -EHOSTUNREACH;
}
@@ -1132,18 +1128,17 @@ ksocknal_process_receive (ksock_conn_t *conn)
LASSERT (rc != -EAGAIN);
if (rc == 0)
- CDEBUG(D_NET, "[%p] EOF from %s"
- " ip %pI4h:%d\n", conn,
- libcfs_id2str(conn->ksnc_peer->ksnp_id),
- &conn->ksnc_ipaddr,
- conn->ksnc_port);
+ CDEBUG(D_NET, "[%p] EOF from %s ip %pI4h:%d\n",
+ conn,
+ libcfs_id2str(conn->ksnc_peer->ksnp_id),
+ &conn->ksnc_ipaddr,
+ conn->ksnc_port);
else if (!conn->ksnc_closing)
- CERROR("[%p] Error %d on read from %s"
- " ip %pI4h:%d\n",
- conn, rc,
- libcfs_id2str(conn->ksnc_peer->ksnp_id),
- &conn->ksnc_ipaddr,
- conn->ksnc_port);
+ CERROR("[%p] Error %d on read from %s ip %pI4h:%d\n",
+ conn, rc,
+ libcfs_id2str(conn->ksnc_peer->ksnp_id),
+ &conn->ksnc_ipaddr,
+ conn->ksnc_port);
/* it's not an error if conn is being closed */
ksocknal_close_conn_and_siblings (conn,
@@ -1724,10 +1719,10 @@ ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn,
hello->kshm_magic != __swab32(LNET_PROTO_MAGIC) &&
hello->kshm_magic != le32_to_cpu (LNET_PROTO_TCP_MAGIC)) {
/* Unexpected magic! */
- CERROR("Bad magic(1) %#08x (%#08x expected) from "
- "%pI4h\n", __cpu_to_le32 (hello->kshm_magic),
- LNET_PROTO_TCP_MAGIC,
- &conn->ksnc_ipaddr);
+ CERROR("Bad magic(1) %#08x (%#08x expected) from %pI4h\n",
+ __cpu_to_le32 (hello->kshm_magic),
+ LNET_PROTO_TCP_MAGIC,
+ &conn->ksnc_ipaddr);
return -EPROTO;
}
@@ -1755,10 +1750,9 @@ ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn,
ksocknal_send_hello(ni, conn, ni->ni_nid, hello);
}
- CERROR("Unknown protocol version (%d.x expected)"
- " from %pI4h\n",
- conn->ksnc_proto->pro_version,
- &conn->ksnc_ipaddr);
+ CERROR("Unknown protocol version (%d.x expected) from %pI4h\n",
+ conn->ksnc_proto->pro_version,
+ &conn->ksnc_ipaddr);
return -EPROTO;
}
@@ -1778,8 +1772,8 @@ ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn,
*incarnation = hello->kshm_src_incarnation;
if (hello->kshm_src_nid == LNET_NID_ANY) {
- CERROR("Expecting a HELLO hdr with a NID, but got LNET_NID_ANY"
- "from %pI4h\n", &conn->ksnc_ipaddr);
+ CERROR("Expecting a HELLO hdr with a NID, but got LNET_NID_ANY from %pI4h\n",
+ &conn->ksnc_ipaddr);
return -EPROTO;
}
@@ -1810,10 +1804,7 @@ ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn,
if (peerid->pid != recv_id.pid ||
peerid->nid != recv_id.nid) {
- LCONSOLE_ERROR_MSG(0x130, "Connected successfully to %s on host"
- " %pI4h, but they claimed they were "
- "%s; please check your Lustre "
- "configuration.\n",
+ LCONSOLE_ERROR_MSG(0x130, "Connected successfully to %s on host %pI4h, but they claimed they were %s; please check your Lustre configuration.\n",
libcfs_id2str(*peerid),
&conn->ksnc_ipaddr,
libcfs_id2str(recv_id));
@@ -2199,8 +2190,7 @@ ksocknal_connd (void *arg)
if (ksocknal_connect(route)) {
/* consecutive retry */
if (cons_retry++ > SOCKNAL_INSANITY_RECONN) {
- CWARN("massive consecutive "
- "re-connecting to %pI4h\n",
+ CWARN("massive consecutive re-connecting to %pI4h\n",
&route->ksnr_ipaddr);
cons_retry = 0;
}
@@ -2264,25 +2254,20 @@ ksocknal_find_timed_out_conn (ksock_peer_t *peer)
switch (error) {
case ECONNRESET:
- CNETERR("A connection with %s "
- "(%pI4h:%d) was reset; "
- "it may have rebooted.\n",
+ CNETERR("A connection with %s (%pI4h:%d) was reset; it may have rebooted.\n",
libcfs_id2str(peer->ksnp_id),
&conn->ksnc_ipaddr,
conn->ksnc_port);
break;
case ETIMEDOUT:
- CNETERR("A connection with %s "
- "(%pI4h:%d) timed out; the "
- "network or node may be down.\n",
+ CNETERR("A connection with %s (%pI4h:%d) timed out; the network or node may be down.\n",
libcfs_id2str(peer->ksnp_id),
&conn->ksnc_ipaddr,
conn->ksnc_port);
break;
default:
- CNETERR("An unexpected network error %d "
- "occurred with %s "
- "(%pI4h:%d\n", error,
+ CNETERR("An unexpected network error %d occurred with %s (%pI4h:%d\n",
+ error,
libcfs_id2str(peer->ksnp_id),
&conn->ksnc_ipaddr,
conn->ksnc_port);
@@ -2297,8 +2282,7 @@ ksocknal_find_timed_out_conn (ksock_peer_t *peer)
conn->ksnc_rx_deadline)) {
/* Timed out incomplete incoming message */
ksocknal_conn_addref(conn);
- CNETERR("Timeout receiving from %s (%pI4h:%d), "
- "state %d wanted %d left %d\n",
+ CNETERR("Timeout receiving from %s (%pI4h:%d), state %d wanted %d left %d\n",
libcfs_id2str(peer->ksnp_id),
&conn->ksnc_ipaddr,
conn->ksnc_port,
@@ -2315,8 +2299,7 @@ ksocknal_find_timed_out_conn (ksock_peer_t *peer)
/* Timed out messages queued for sending or
* buffered in the socket's send buffer */
ksocknal_conn_addref(conn);
- CNETERR("Timeout sending data to %s (%pI4h:%d) "
- "the network or that node may be down.\n",
+ CNETERR("Timeout sending data to %s (%pI4h:%d) the network or that node may be down.\n",
libcfs_id2str(peer->ksnp_id),
&conn->ksnc_ipaddr,
conn->ksnc_port);
@@ -2500,9 +2483,7 @@ ksocknal_check_peer_timeouts (int idx)
spin_unlock(&peer->ksnp_lock);
read_unlock(&ksocknal_data.ksnd_global_lock);
- CERROR("Total %d stale ZC_REQs for peer %s detected; the "
- "oldest(%p) timed out %ld secs ago, "
- "resid: %d, wmem: %d\n",
+ CERROR("Total %d stale ZC_REQs for peer %s detected; the oldest(%p) timed out %ld secs ago, resid: %d, wmem: %d\n",
n, libcfs_nid2str(peer->ksnp_id.nid), tx,
cfs_duration_sec(cfs_time_current() - deadline),
resid, conn->ksnc_sock->sk->sk_wmem_queued);
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c
index 9dde548070a..ea9d80f40ca 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c
@@ -515,8 +515,8 @@ ksocknal_send_hello_v1 (ksock_conn_t *conn, ksock_hello_msg_t *hello)
hello->kshm_nips * sizeof(__u32),
lnet_acceptor_timeout());
if (rc != 0) {
- CNETERR("Error %d sending HELLO payload (%d)"
- " to %pI4h/%d\n", rc, hello->kshm_nips,
+ CNETERR("Error %d sending HELLO payload (%d) to %pI4h/%d\n",
+ rc, hello->kshm_nips,
&conn->ksnc_ipaddr, conn->ksnc_port);
}
out:
@@ -560,8 +560,8 @@ ksocknal_send_hello_v2 (ksock_conn_t *conn, ksock_hello_msg_t *hello)
hello->kshm_nips * sizeof(__u32),
lnet_acceptor_timeout());
if (rc != 0) {
- CNETERR("Error %d sending HELLO payload (%d)"
- " to %pI4h/%d\n", rc, hello->kshm_nips,
+ CNETERR("Error %d sending HELLO payload (%d) to %pI4h/%d\n",
+ rc, hello->kshm_nips,
&conn->ksnc_ipaddr, conn->ksnc_port);
}
@@ -595,10 +595,9 @@ ksocknal_recv_hello_v1(ksock_conn_t *conn, ksock_hello_msg_t *hello,
/* ...and check we got what we expected */
if (hdr->type != cpu_to_le32 (LNET_MSG_HELLO)) {
- CERROR("Expecting a HELLO hdr,"
- " but got type %d from %pI4h\n",
- le32_to_cpu (hdr->type),
- &conn->ksnc_ipaddr);
+ CERROR("Expecting a HELLO hdr, but got type %d from %pI4h\n",
+ le32_to_cpu(hdr->type),
+ &conn->ksnc_ipaddr);
rc = -EPROTO;
goto out;
}
diff --git a/drivers/staging/lustre/lnet/lnet/api-ni.c b/drivers/staging/lustre/lnet/lnet/api-ni.c
index 60bc2ae4fdf..faceb9505d8 100644
--- a/drivers/staging/lustre/lnet/lnet/api-ni.c
+++ b/drivers/staging/lustre/lnet/lnet/api-ni.c
@@ -37,6 +37,7 @@
#define DEBUG_SUBSYSTEM S_LNET
#include "../../include/linux/lnet/lib-lnet.h"
#include <linux/log2.h>
+#include <linux/ktime.h>
#define D_LNI D_CONSOLE
@@ -276,7 +277,7 @@ lnet_find_lnd_by_type(int type)
struct list_head *tmp;
/* holding lnd mutex */
- list_for_each (tmp, &the_lnet.ln_lnds) {
+ list_for_each(tmp, &the_lnet.ln_lnds) {
lnd = list_entry(tmp, lnd_t, lnd_list);
if ((int)lnd->lnd_type == type)
@@ -417,17 +418,9 @@ static __u64
lnet_create_interface_cookie(void)
{
/* NB the interface cookie in wire handles guards against delayed
- * replies and ACKs appearing valid after reboot. Initialisation time,
- * even if it's only implemented to millisecond resolution is probably
- * easily good enough. */
- struct timeval tv;
- __u64 cookie;
-
- do_gettimeofday(&tv);
- cookie = tv.tv_sec;
- cookie *= 1000000;
- cookie += tv.tv_usec;
- return cookie;
+ * replies and ACKs appearing valid after reboot.
+ */
+ return ktime_get_ns();
}
static char *
@@ -1652,7 +1645,6 @@ lnet_destroy_ping_info(void)
offsetof(lnet_ping_info_t,
pi_ni[the_lnet.ln_ping_info->pi_nnis]));
the_lnet.ln_ping_info = NULL;
- return;
}
int
diff --git a/drivers/staging/lustre/lnet/lnet/lib-md.c b/drivers/staging/lustre/lnet/lnet/lib-md.c
index e4d906a6563..3225c069637 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-md.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-md.c
@@ -218,7 +218,7 @@ lnet_md_deconstruct(lnet_libmd_t *lmd, lnet_md_t *umd)
lnet_eq2handle(&umd->eq_handle, lmd->md_eq);
}
-int
+static int
lnet_md_validate(lnet_md_t *umd)
{
if (umd->start == NULL && umd->length != 0) {
diff --git a/drivers/staging/lustre/lnet/lnet/lib-move.c b/drivers/staging/lustre/lnet/lnet/lib-move.c
index 4b9567d67f3..c8c1ed84fe5 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-move.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-move.c
@@ -561,7 +561,7 @@ lnet_extract_kiov(int dst_niov, lnet_kiov_t *dst,
}
EXPORT_SYMBOL(lnet_extract_kiov);
-void
+static void
lnet_ni_recv(lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
unsigned int offset, unsigned int mlen, unsigned int rlen)
{
@@ -599,7 +599,7 @@ lnet_ni_recv(lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
lnet_finalize(ni, msg, rc);
}
-void
+static void
lnet_setpayloadbuffer(lnet_msg_t *msg)
{
lnet_libmd_t *md = msg->msg_md;
@@ -639,7 +639,7 @@ lnet_prep_send(lnet_msg_t *msg, int type, lnet_process_id_t target,
msg->msg_hdr.payload_length = cpu_to_le32(len);
}
-void
+static void
lnet_ni_send(lnet_ni_t *ni, lnet_msg_t *msg)
{
void *priv = msg->msg_private;
@@ -654,7 +654,7 @@ lnet_ni_send(lnet_ni_t *ni, lnet_msg_t *msg)
lnet_finalize(ni, msg, rc);
}
-int
+static int
lnet_ni_eager_recv(lnet_ni_t *ni, lnet_msg_t *msg)
{
int rc;
@@ -668,8 +668,7 @@ lnet_ni_eager_recv(lnet_ni_t *ni, lnet_msg_t *msg)
rc = (ni->ni_lnd->lnd_eager_recv)(ni, msg->msg_private, msg,
&msg->msg_private);
if (rc != 0) {
- CERROR("recv from %s / send to %s aborted: "
- "eager_recv failed %d\n",
+ CERROR("recv from %s / send to %s aborted: eager_recv failed %d\n",
libcfs_nid2str(msg->msg_rxpeer->lp_nid),
libcfs_id2str(msg->msg_target), rc);
LASSERT(rc < 0); /* required by my callers */
@@ -679,7 +678,7 @@ lnet_ni_eager_recv(lnet_ni_t *ni, lnet_msg_t *msg)
}
/* NB: caller shall hold a ref on 'lp' as I'd drop lnet_net_lock */
-void
+static void
lnet_ni_query_locked(lnet_ni_t *ni, lnet_peer_t *lp)
{
unsigned long last_alive = 0;
@@ -731,7 +730,7 @@ lnet_peer_is_alive(lnet_peer_t *lp, unsigned long now)
/* NB: returns 1 when alive, 0 when dead, negative when error;
* may drop the lnet_net_lock */
-int
+static int
lnet_peer_alive_locked(lnet_peer_t *lp)
{
unsigned long now = cfs_time_current();
@@ -753,8 +752,7 @@ lnet_peer_alive_locked(lnet_peer_t *lp)
if (time_before(now, next_query)) {
if (lp->lp_alive)
- CWARN("Unexpected aliveness of peer %s: "
- "%d < %d (%d/%d)\n",
+ CWARN("Unexpected aliveness of peer %s: %d < %d (%d/%d)\n",
libcfs_nid2str(lp->lp_nid),
(int)now, (int)next_query,
lnet_queryinterval,
@@ -817,8 +815,7 @@ lnet_post_send_locked(lnet_msg_t *msg, int do_send)
(msg->msg_md->md_flags & LNET_MD_FLAG_ABORTED) != 0) {
lnet_net_unlock(cpt);
- CNETERR("Aborting message for %s: LNetM[DE]Unlink() already "
- "called on the MD/ME.\n",
+ CNETERR("Aborting message for %s: LNetM[DE]Unlink() already called on the MD/ME.\n",
libcfs_id2str(msg->msg_target));
if (do_send)
lnet_finalize(ni, msg, -ECANCELED);
@@ -871,7 +868,7 @@ lnet_post_send_locked(lnet_msg_t *msg, int do_send)
}
-lnet_rtrbufpool_t *
+static lnet_rtrbufpool_t *
lnet_msg2bufpool(lnet_msg_t *msg)
{
lnet_rtrbufpool_t *rbp;
@@ -891,7 +888,7 @@ lnet_msg2bufpool(lnet_msg_t *msg)
return rbp;
}
-int
+static int
lnet_post_routed_recv_locked(lnet_msg_t *msg, int do_recv)
{
/* lnet_parse is going to lnet_net_unlock immediately after this, so it
@@ -1220,8 +1217,8 @@ lnet_send(lnet_nid_t src_nid, lnet_msg_t *msg, lnet_nid_t rtr_nid)
src_ni = lnet_nid2ni_locked(src_nid, cpt);
if (src_ni == NULL) {
lnet_net_unlock(cpt);
- LCONSOLE_WARN("Can't send to %s: src %s is not a "
- "local nid\n", libcfs_nid2str(dst_nid),
+ LCONSOLE_WARN("Can't send to %s: src %s is not a local nid\n",
+ libcfs_nid2str(dst_nid),
libcfs_nid2str(src_nid));
return -EINVAL;
}
@@ -1283,8 +1280,7 @@ lnet_send(lnet_nid_t src_nid, lnet_msg_t *msg, lnet_nid_t rtr_nid)
lnet_ni_decref_locked(src_ni, cpt);
lnet_net_unlock(cpt);
- LCONSOLE_WARN("No route to %s via %s "
- "(all routers down)\n",
+ LCONSOLE_WARN("No route to %s via %s (all routers down)\n",
libcfs_id2str(msg->msg_target),
libcfs_nid2str(src_nid));
return -EHOSTUNREACH;
@@ -1676,8 +1672,7 @@ lnet_print_hdr(lnet_hdr_t *hdr)
break;
case LNET_MSG_PUT:
- CWARN(" Ptl index %d, ack md %#llx.%#llx, "
- "match bits %llu\n",
+ CWARN(" Ptl index %d, ack md %#llx.%#llx, match bits %llu\n",
hdr->msg.put.ptl_index,
hdr->msg.put.ack_wmd.wh_interface_cookie,
hdr->msg.put.ack_wmd.wh_object_cookie,
@@ -1688,8 +1683,8 @@ lnet_print_hdr(lnet_hdr_t *hdr)
break;
case LNET_MSG_GET:
- CWARN(" Ptl index %d, return md %#llx.%#llx, "
- "match bits %llu\n", hdr->msg.get.ptl_index,
+ CWARN(" Ptl index %d, return md %#llx.%#llx, match bits %llu\n",
+ hdr->msg.get.ptl_index,
hdr->msg.get.return_wmd.wh_interface_cookie,
hdr->msg.get.return_wmd.wh_object_cookie,
hdr->msg.get.match_bits);
@@ -1699,16 +1694,14 @@ lnet_print_hdr(lnet_hdr_t *hdr)
break;
case LNET_MSG_ACK:
- CWARN(" dst md %#llx.%#llx, "
- "manipulated length %d\n",
+ CWARN(" dst md %#llx.%#llx, manipulated length %d\n",
hdr->msg.ack.dst_wmd.wh_interface_cookie,
hdr->msg.ack.dst_wmd.wh_object_cookie,
hdr->msg.ack.mlength);
break;
case LNET_MSG_REPLY:
- CWARN(" dst md %#llx.%#llx, "
- "length %d\n",
+ CWARN(" dst md %#llx.%#llx, length %d\n",
hdr->msg.reply.dst_wmd.wh_interface_cookie,
hdr->msg.reply.dst_wmd.wh_object_cookie,
hdr->payload_length);
@@ -1757,8 +1750,7 @@ lnet_parse(lnet_ni_t *ni, lnet_hdr_t *hdr, lnet_nid_t from_nid,
case LNET_MSG_REPLY:
if (payload_length >
(__u32)(for_me ? LNET_MAX_PAYLOAD : LNET_MTU)) {
- CERROR("%s, src %s: bad %s payload %d "
- "(%d max expected)\n",
+ CERROR("%s, src %s: bad %s payload %d (%d max expected)\n",
libcfs_nid2str(from_nid),
libcfs_nid2str(src_nid),
lnet_msgtyp2str(type),
@@ -1794,40 +1786,36 @@ lnet_parse(lnet_ni_t *ni, lnet_hdr_t *hdr, lnet_nid_t from_nid,
if (!for_me) {
if (LNET_NIDNET(dest_nid) == LNET_NIDNET(ni->ni_nid)) {
/* should have gone direct */
- CERROR("%s, src %s: Bad dest nid %s "
- "(should have been sent direct)\n",
- libcfs_nid2str(from_nid),
- libcfs_nid2str(src_nid),
- libcfs_nid2str(dest_nid));
+ CERROR("%s, src %s: Bad dest nid %s (should have been sent direct)\n",
+ libcfs_nid2str(from_nid),
+ libcfs_nid2str(src_nid),
+ libcfs_nid2str(dest_nid));
return -EPROTO;
}
if (lnet_islocalnid(dest_nid)) {
/* dest is another local NI; sender should have used
* this node's NID on its own network */
- CERROR("%s, src %s: Bad dest nid %s "
- "(it's my nid but on a different network)\n",
- libcfs_nid2str(from_nid),
- libcfs_nid2str(src_nid),
- libcfs_nid2str(dest_nid));
+ CERROR("%s, src %s: Bad dest nid %s (it's my nid but on a different network)\n",
+ libcfs_nid2str(from_nid),
+ libcfs_nid2str(src_nid),
+ libcfs_nid2str(dest_nid));
return -EPROTO;
}
if (rdma_req && type == LNET_MSG_GET) {
- CERROR("%s, src %s: Bad optimized GET for %s "
- "(final destination must be me)\n",
- libcfs_nid2str(from_nid),
- libcfs_nid2str(src_nid),
- libcfs_nid2str(dest_nid));
+ CERROR("%s, src %s: Bad optimized GET for %s (final destination must be me)\n",
+ libcfs_nid2str(from_nid),
+ libcfs_nid2str(src_nid),
+ libcfs_nid2str(dest_nid));
return -EPROTO;
}
if (!the_lnet.ln_routing) {
- CERROR("%s, src %s: Dropping message for %s "
- "(routing not enabled)\n",
- libcfs_nid2str(from_nid),
- libcfs_nid2str(src_nid),
- libcfs_nid2str(dest_nid));
+ CERROR("%s, src %s: Dropping message for %s (routing not enabled)\n",
+ libcfs_nid2str(from_nid),
+ libcfs_nid2str(src_nid),
+ libcfs_nid2str(dest_nid));
goto drop;
}
}
@@ -1882,8 +1870,7 @@ lnet_parse(lnet_ni_t *ni, lnet_hdr_t *hdr, lnet_nid_t from_nid,
rc = lnet_nid2peer_locked(&msg->msg_rxpeer, from_nid, cpt);
if (rc != 0) {
lnet_net_unlock(cpt);
- CERROR("%s, src %s: Dropping %s "
- "(error %d looking up sender)\n",
+ CERROR("%s, src %s: Dropping %s (error %d looking up sender)\n",
libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
lnet_msgtyp2str(type), rc);
lnet_msg_free(msg);
@@ -2003,12 +1990,11 @@ lnet_recv_delayed_msg_list(struct list_head *head)
LASSERT(msg->msg_rxpeer != NULL);
LASSERT(msg->msg_hdr.type == LNET_MSG_PUT);
- CDEBUG(D_NET, "Resuming delayed PUT from %s portal %d "
- "match %llu offset %d length %d.\n",
- libcfs_id2str(id), msg->msg_hdr.msg.put.ptl_index,
- msg->msg_hdr.msg.put.match_bits,
- msg->msg_hdr.msg.put.offset,
- msg->msg_hdr.payload_length);
+ CDEBUG(D_NET, "Resuming delayed PUT from %s portal %d match %llu offset %d length %d.\n",
+ libcfs_id2str(id), msg->msg_hdr.msg.put.ptl_index,
+ msg->msg_hdr.msg.put.match_bits,
+ msg->msg_hdr.msg.put.offset,
+ msg->msg_hdr.payload_length);
lnet_recv_put(msg->msg_rxpeer->lp_ni, msg);
}
diff --git a/drivers/staging/lustre/lnet/lnet/lib-ptl.c b/drivers/staging/lustre/lnet/lnet/lib-ptl.c
index 720c73be4d3..19ed696344f 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-ptl.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-ptl.c
@@ -192,8 +192,7 @@ lnet_try_match_md(lnet_libmd_t *md,
}
/* Commit to this ME/MD */
- CDEBUG(D_NET, "Incoming %s index %x from %s of "
- "length %d/%d into md %#llx [%d] + %d\n",
+ CDEBUG(D_NET, "Incoming %s index %x from %s of length %d/%d into md %#llx [%d] + %d\n",
(info->mi_opc == LNET_MD_OP_PUT) ? "put" : "get",
info->mi_portal, libcfs_id2str(info->mi_id), mlength,
info->mi_rlength, md->md_lh.lh_cookie, md->md_niov, offset);
diff --git a/drivers/staging/lustre/lnet/lnet/lo.c b/drivers/staging/lustre/lnet/lnet/lo.c
index be31dfc5fa4..17e1643fd67 100644
--- a/drivers/staging/lustre/lnet/lnet/lo.c
+++ b/drivers/staging/lustre/lnet/lnet/lo.c
@@ -35,7 +35,7 @@
#define DEBUG_SUBSYSTEM S_LNET
#include "../../include/linux/lnet/lib-lnet.h"
-int
+static int
lolnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
{
LASSERT(!lntmsg->msg_routing);
@@ -44,7 +44,7 @@ lolnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
return lnet_parse(ni, &lntmsg->msg_hdr, ni->ni_nid, lntmsg, 0);
}
-int
+static int
lolnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg,
int delayed, unsigned int niov,
struct iovec *iov, lnet_kiov_t *kiov,
@@ -86,7 +86,7 @@ lolnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg,
static int lolnd_instanced;
-void
+static void
lolnd_shutdown(lnet_ni_t *ni)
{
CDEBUG(D_NET, "shutdown\n");
@@ -95,7 +95,7 @@ lolnd_shutdown(lnet_ni_t *ni)
lolnd_instanced = 0;
}
-int
+static int
lolnd_startup(lnet_ni_t *ni)
{
LASSERT(ni->ni_lnd == &the_lolnd);
diff --git a/drivers/staging/lustre/lnet/lnet/module.c b/drivers/staging/lustre/lnet/lnet/module.c
index e84d59d23ae..3c23677bc28 100644
--- a/drivers/staging/lustre/lnet/lnet/module.c
+++ b/drivers/staging/lustre/lnet/lnet/module.c
@@ -43,7 +43,7 @@ MODULE_PARM_DESC(config_on_load, "configure network at module load");
static struct mutex lnet_config_mutex;
-int
+static int
lnet_configure(void *arg)
{
/* 'arg' only there so I can be passed to cfs_create_thread() */
@@ -63,7 +63,7 @@ lnet_configure(void *arg)
return rc;
}
-int
+static int
lnet_unconfigure(void)
{
int refcount;
@@ -83,7 +83,7 @@ lnet_unconfigure(void)
return (refcount == 0) ? 0 : -EBUSY;
}
-int
+static int
lnet_ioctl(unsigned int cmd, struct libcfs_ioctl_data *data)
{
int rc;
@@ -110,7 +110,7 @@ lnet_ioctl(unsigned int cmd, struct libcfs_ioctl_data *data)
DECLARE_IOCTL_HANDLER(lnet_ioctl_handler, lnet_ioctl);
-int
+static int __init
init_lnet(void)
{
int rc;
@@ -135,7 +135,7 @@ init_lnet(void)
return 0;
}
-void
+static void __exit
fini_lnet(void)
{
int rc;
diff --git a/drivers/staging/lustre/lnet/lnet/router.c b/drivers/staging/lustre/lnet/lnet/router.c
index b5b8fb576bf..c667b5b7676 100644
--- a/drivers/staging/lustre/lnet/lnet/router.c
+++ b/drivers/staging/lustre/lnet/lnet/router.c
@@ -457,8 +457,7 @@ lnet_check_routes(void)
lnet_net_unlock(cpt);
- CERROR("Routes to %s via %s and %s not "
- "supported\n",
+ CERROR("Routes to %s via %s and %s not supported\n",
libcfs_net2str(net),
libcfs_nid2str(nid1),
libcfs_nid2str(nid2));
@@ -752,7 +751,7 @@ lnet_router_checker_event(lnet_event_t *event)
lnet_net_unlock(lp->lp_cpt);
}
-void
+static void
lnet_wait_known_routerstate(void)
{
lnet_peer_t *rtr;
@@ -784,7 +783,7 @@ lnet_wait_known_routerstate(void)
}
}
-void
+static void
lnet_update_ni_status_locked(void)
{
lnet_ni_t *ni;
@@ -824,7 +823,7 @@ lnet_update_ni_status_locked(void)
}
}
-void
+static void
lnet_destroy_rc_data(lnet_rc_data_t *rcd)
{
LASSERT(list_empty(&rcd->rcd_list));
@@ -845,7 +844,7 @@ lnet_destroy_rc_data(lnet_rc_data_t *rcd)
LIBCFS_FREE(rcd, sizeof(*rcd));
}
-lnet_rc_data_t *
+static lnet_rc_data_t *
lnet_create_rc_data_locked(lnet_peer_t *gateway)
{
lnet_rc_data_t *rcd = NULL;
@@ -959,8 +958,7 @@ lnet_ping_router_locked (lnet_peer_t *rtr)
secs = lnet_router_check_interval(rtr);
CDEBUG(D_NET,
- "rtr %s %d: deadline %lu ping_notsent %d alive %d "
- "alive_count %d lp_ping_timestamp %lu\n",
+ "rtr %s %d: deadline %lu ping_notsent %d alive %d alive_count %d lp_ping_timestamp %lu\n",
libcfs_nid2str(rtr->lp_nid), secs,
rtr->lp_ping_deadline, rtr->lp_ping_notsent,
rtr->lp_alive, rtr->lp_alive_count, rtr->lp_ping_timestamp);
@@ -1010,9 +1008,7 @@ lnet_router_checker_start(void)
if (check_routers_before_use &&
dead_router_check_interval <= 0) {
- LCONSOLE_ERROR_MSG(0x10a, "'dead_router_check_interval' must be"
- " set if 'check_routers_before_use' is set"
- "\n");
+ LCONSOLE_ERROR_MSG(0x10a, "'dead_router_check_interval' must be set if 'check_routers_before_use' is set\n");
return -EINVAL;
}
@@ -1224,7 +1220,7 @@ rescan:
return 0;
}
-void
+static void
lnet_destroy_rtrbuf(lnet_rtrbuf_t *rb, int npages)
{
int sz = offsetof(lnet_rtrbuf_t, rb_kiov[npages]);
@@ -1235,7 +1231,7 @@ lnet_destroy_rtrbuf(lnet_rtrbuf_t *rb, int npages)
LIBCFS_FREE(rb, sz);
}
-lnet_rtrbuf_t *
+static lnet_rtrbuf_t *
lnet_new_rtrbuf(lnet_rtrbufpool_t *rbp, int cpt)
{
int npages = rbp->rbp_npages;
@@ -1270,7 +1266,7 @@ lnet_new_rtrbuf(lnet_rtrbufpool_t *rbp, int cpt)
return rb;
}
-void
+static void
lnet_rtrpool_free_bufs(lnet_rtrbufpool_t *rbp)
{
int npages = rbp->rbp_npages;
@@ -1299,7 +1295,7 @@ lnet_rtrpool_free_bufs(lnet_rtrbufpool_t *rbp)
rbp->rbp_nbuffers = rbp->rbp_credits = 0;
}
-int
+static int
lnet_rtrpool_alloc_bufs(lnet_rtrbufpool_t *rbp, int nbufs, int cpt)
{
lnet_rtrbuf_t *rb;
@@ -1333,7 +1329,7 @@ lnet_rtrpool_alloc_bufs(lnet_rtrbufpool_t *rbp, int nbufs, int cpt)
return 0;
}
-void
+static void
lnet_rtrpool_init(lnet_rtrbufpool_t *rbp, int npages)
{
INIT_LIST_HEAD(&rbp->rbp_msgs);
@@ -1370,8 +1366,8 @@ lnet_nrb_tiny_calculate(int npages)
if (tiny_router_buffers < 0) {
LCONSOLE_ERROR_MSG(0x10c,
- "tiny_router_buffers=%d invalid when "
- "routing enabled\n", tiny_router_buffers);
+ "tiny_router_buffers=%d invalid when routing enabled\n",
+ tiny_router_buffers);
return -1;
}
@@ -1389,8 +1385,8 @@ lnet_nrb_small_calculate(int npages)
if (small_router_buffers < 0) {
LCONSOLE_ERROR_MSG(0x10c,
- "small_router_buffers=%d invalid when "
- "routing enabled\n", small_router_buffers);
+ "small_router_buffers=%d invalid when routing enabled\n",
+ small_router_buffers);
return -1;
}
@@ -1408,8 +1404,8 @@ lnet_nrb_large_calculate(int npages)
if (large_router_buffers < 0) {
LCONSOLE_ERROR_MSG(0x10c,
- "large_router_buffers=%d invalid when "
- "routing enabled\n", large_router_buffers);
+ "large_router_buffers=%d invalid when routing enabled\n",
+ large_router_buffers);
return -1;
}
@@ -1442,8 +1438,7 @@ lnet_rtrpools_alloc(int im_a_router)
} else if (!strcmp(forwarding, "enabled")) {
/* explicitly enabled */
} else {
- LCONSOLE_ERROR_MSG(0x10b, "'forwarding' not set to either "
- "'enabled' or 'disabled'\n");
+ LCONSOLE_ERROR_MSG(0x10b, "'forwarding' not set to either 'enabled' or 'disabled'\n");
return -EINVAL;
}
@@ -1520,11 +1515,10 @@ lnet_notify(lnet_ni_t *ni, lnet_nid_t nid, int alive, unsigned long when)
/* can't do predictions... */
if (cfs_time_after(when, now)) {
- CWARN ("Ignoring prediction from %s of %s %s "
- "%ld seconds in the future\n",
- (ni == NULL) ? "userspace" : libcfs_nid2str(ni->ni_nid),
- libcfs_nid2str(nid), alive ? "up" : "down",
- cfs_duration_sec(cfs_time_sub(when, now)));
+ CWARN("Ignoring prediction from %s of %s %s %ld seconds in the future\n",
+ (ni == NULL) ? "userspace" : libcfs_nid2str(ni->ni_nid),
+ libcfs_nid2str(nid), alive ? "up" : "down",
+ cfs_duration_sec(cfs_time_sub(when, now)));
return -EINVAL;
}
diff --git a/drivers/staging/lustre/lnet/lnet/router_proc.c b/drivers/staging/lustre/lnet/lnet/router_proc.c
index 6e8f7e2bbcf..46cde7036f1 100644
--- a/drivers/staging/lustre/lnet/lnet/router_proc.c
+++ b/drivers/staging/lustre/lnet/lnet/router_proc.c
@@ -164,8 +164,8 @@ static int proc_lnet_stats(struct ctl_table *table, int write,
__proc_lnet_stats);
}
-int proc_lnet_routes(struct ctl_table *table, int write, void __user *buffer,
- size_t *lenp, loff_t *ppos)
+static int proc_lnet_routes(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
{
const int tmpsiz = 256;
char *tmpstr;
@@ -290,8 +290,8 @@ int proc_lnet_routes(struct ctl_table *table, int write, void __user *buffer,
return rc;
}
-int proc_lnet_routers(struct ctl_table *table, int write, void __user *buffer,
- size_t *lenp, loff_t *ppos)
+static int proc_lnet_routers(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
{
int rc = 0;
char *tmpstr;
@@ -425,8 +425,8 @@ int proc_lnet_routers(struct ctl_table *table, int write, void __user *buffer,
return rc;
}
-int proc_lnet_peers(struct ctl_table *table, int write, void __user *buffer,
- size_t *lenp, loff_t *ppos)
+static int proc_lnet_peers(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
{
const int tmpsiz = 256;
struct lnet_peer_table *ptable;
@@ -657,8 +657,8 @@ static int proc_lnet_buffers(struct ctl_table *table, int write,
__proc_lnet_buffers);
}
-int proc_lnet_nis(struct ctl_table *table, int write, void __user *buffer,
- size_t *lenp, loff_t *ppos)
+static int proc_lnet_nis(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
{
int tmpsiz = 128 * LNET_CPT_NUMBER;
int rc = 0;
@@ -791,20 +791,17 @@ static struct lnet_portal_rotors portal_rotors[] = {
{
.pr_value = LNET_PTL_ROTOR_ON,
.pr_name = "ON",
- .pr_desc = "round-robin dispatch all PUT messages for "
- "wildcard portals"
+ .pr_desc = "round-robin dispatch all PUT messages for wildcard portals"
},
{
.pr_value = LNET_PTL_ROTOR_RR_RT,
.pr_name = "RR_RT",
- .pr_desc = "round-robin dispatch routed PUT message for "
- "wildcard portals"
+ .pr_desc = "round-robin dispatch routed PUT message for wildcard portals"
},
{
.pr_value = LNET_PTL_ROTOR_HASH_RT,
.pr_name = "HASH_RT",
- .pr_desc = "dispatch routed PUT message by hashing source "
- "NID for wildcard portals"
+ .pr_desc = "dispatch routed PUT message by hashing source NID for wildcard portals"
},
{
.pr_value = -1,
diff --git a/drivers/staging/lustre/lnet/selftest/brw_test.c b/drivers/staging/lustre/lnet/selftest/brw_test.c
index a94f336d578..463da076fa7 100644
--- a/drivers/staging/lustre/lnet/selftest/brw_test.c
+++ b/drivers/staging/lustre/lnet/selftest/brw_test.c
@@ -233,7 +233,7 @@ brw_fill_bulk(srpc_bulk_t *bk, int pattern, __u64 magic)
}
}
-int
+static int
brw_check_bulk(srpc_bulk_t *bk, int pattern, __u64 magic)
{
int i;
@@ -358,7 +358,7 @@ out:
return;
}
-void
+static void
brw_server_rpc_done(srpc_server_rpc_t *rpc)
{
srpc_bulk_t *blk = rpc->srpc_bulk;
@@ -378,7 +378,7 @@ brw_server_rpc_done(srpc_server_rpc_t *rpc)
sfw_free_pages(rpc);
}
-int
+static int
brw_bulk_ready(srpc_server_rpc_t *rpc, int status)
{
__u64 magic = BRW_MAGIC;
@@ -414,7 +414,7 @@ brw_bulk_ready(srpc_server_rpc_t *rpc, int status)
return 0;
}
-int
+static int
brw_server_handle(struct srpc_server_rpc *rpc)
{
struct srpc_service *sv = rpc->srpc_scd->scd_svc;
diff --git a/drivers/staging/lustre/lnet/selftest/conctl.c b/drivers/staging/lustre/lnet/selftest/conctl.c
index ae7b0fcd818..5bc615309e7 100644
--- a/drivers/staging/lustre/lnet/selftest/conctl.c
+++ b/drivers/staging/lustre/lnet/selftest/conctl.c
@@ -45,7 +45,7 @@
#include "../../include/linux/lnet/lnetst.h"
#include "console.h"
-int
+static int
lst_session_new_ioctl(lstio_session_new_args_t *args)
{
char *name;
@@ -82,7 +82,7 @@ lst_session_new_ioctl(lstio_session_new_args_t *args)
return rc;
}
-int
+static int
lst_session_end_ioctl(lstio_session_end_args_t *args)
{
if (args->lstio_ses_key != console_session.ses_key)
@@ -91,7 +91,7 @@ lst_session_end_ioctl(lstio_session_end_args_t *args)
return lstcon_session_end();
}
-int
+static int
lst_session_info_ioctl(lstio_session_info_args_t *args)
{
/* no checking of key */
@@ -113,7 +113,7 @@ lst_session_info_ioctl(lstio_session_info_args_t *args)
args->lstio_ses_nmlen);
}
-int
+static int
lst_debug_ioctl(lstio_debug_args_t *args)
{
char *name = NULL;
@@ -194,7 +194,7 @@ out:
return rc;
}
-int
+static int
lst_group_add_ioctl(lstio_group_add_args_t *args)
{
char *name;
@@ -228,7 +228,7 @@ lst_group_add_ioctl(lstio_group_add_args_t *args)
return rc;
}
-int
+static int
lst_group_del_ioctl(lstio_group_del_args_t *args)
{
int rc;
@@ -262,7 +262,7 @@ lst_group_del_ioctl(lstio_group_del_args_t *args)
return rc;
}
-int
+static int
lst_group_update_ioctl(lstio_group_update_args_t *args)
{
int rc;
@@ -320,7 +320,7 @@ lst_group_update_ioctl(lstio_group_update_args_t *args)
return rc;
}
-int
+static int
lst_nodes_add_ioctl(lstio_group_nodes_args_t *args)
{
unsigned feats;
@@ -365,7 +365,7 @@ lst_nodes_add_ioctl(lstio_group_nodes_args_t *args)
return rc;
}
-int
+static int
lst_group_list_ioctl(lstio_group_list_args_t *args)
{
if (args->lstio_grp_key != console_session.ses_key)
@@ -382,7 +382,7 @@ lst_group_list_ioctl(lstio_group_list_args_t *args)
args->lstio_grp_namep);
}
-int
+static int
lst_group_info_ioctl(lstio_group_info_args_t *args)
{
char *name;
@@ -446,7 +446,7 @@ lst_group_info_ioctl(lstio_group_info_args_t *args)
return 0;
}
-int
+static int
lst_batch_add_ioctl(lstio_batch_add_args_t *args)
{
int rc;
@@ -480,7 +480,7 @@ lst_batch_add_ioctl(lstio_batch_add_args_t *args)
return rc;
}
-int
+static int
lst_batch_run_ioctl(lstio_batch_run_args_t *args)
{
int rc;
@@ -515,7 +515,7 @@ lst_batch_run_ioctl(lstio_batch_run_args_t *args)
return rc;
}
-int
+static int
lst_batch_stop_ioctl(lstio_batch_stop_args_t *args)
{
int rc;
@@ -551,7 +551,7 @@ lst_batch_stop_ioctl(lstio_batch_stop_args_t *args)
return rc;
}
-int
+static int
lst_batch_query_ioctl(lstio_batch_query_args_t *args)
{
char *name;
@@ -593,7 +593,7 @@ lst_batch_query_ioctl(lstio_batch_query_args_t *args)
return rc;
}
-int
+static int
lst_batch_list_ioctl(lstio_batch_list_args_t *args)
{
if (args->lstio_bat_key != console_session.ses_key)
@@ -610,7 +610,7 @@ lst_batch_list_ioctl(lstio_batch_list_args_t *args)
args->lstio_bat_namep);
}
-int
+static int
lst_batch_info_ioctl(lstio_batch_info_args_t *args)
{
char *name;
@@ -675,7 +675,7 @@ lst_batch_info_ioctl(lstio_batch_info_args_t *args)
return rc;
}
-int
+static int
lst_stat_query_ioctl(lstio_stat_args_t *args)
{
int rc;
diff --git a/drivers/staging/lustre/lnet/selftest/conrpc.c b/drivers/staging/lustre/lnet/selftest/conrpc.c
index a3a60d6e908..9999b0dc03e 100644
--- a/drivers/staging/lustre/lnet/selftest/conrpc.c
+++ b/drivers/staging/lustre/lnet/selftest/conrpc.c
@@ -88,7 +88,7 @@ lstcon_rpc_done(srpc_client_rpc_t *rpc)
spin_unlock(&rpc->crpc_lock);
}
-int
+static int
lstcon_rpc_init(lstcon_node_t *nd, int service, unsigned feats,
int bulk_npg, int bulk_len, int embedded, lstcon_rpc_t *crpc)
{
@@ -113,7 +113,7 @@ lstcon_rpc_init(lstcon_node_t *nd, int service, unsigned feats,
return 0;
}
-int
+static int
lstcon_rpc_prep(lstcon_node_t *nd, int service, unsigned feats,
int bulk_npg, int bulk_len, lstcon_rpc_t **crpcpp)
{
@@ -182,7 +182,7 @@ lstcon_rpc_put(lstcon_rpc_t *crpc)
atomic_dec(&console_session.ses_rpc_counter);
}
-void
+static void
lstcon_rpc_post(lstcon_rpc_t *crpc)
{
lstcon_rpc_trans_t *trans = crpc->crp_trans;
@@ -383,7 +383,7 @@ lstcon_rpc_trans_postwait(lstcon_rpc_trans_t *trans, int timeout)
return rc;
}
-int
+static int
lstcon_rpc_get_reply(lstcon_rpc_t *crpc, srpc_msg_t **msgpp)
{
lstcon_node_t *nd = crpc->crp_node;
@@ -718,7 +718,7 @@ lstcon_next_id(int idx, int nkiov, lnet_kiov_t *kiov)
return &pid[idx % SFW_ID_PER_PAGE];
}
-int
+static int
lstcon_dstnodes_prep(lstcon_group_t *grp, int idx,
int dist, int span, int nkiov, lnet_kiov_t *kiov)
{
@@ -772,7 +772,7 @@ lstcon_dstnodes_prep(lstcon_group_t *grp, int idx,
return 0;
}
-int
+static int
lstcon_pingrpc_prep(lst_test_ping_param_t *param, srpc_test_reqst_t *req)
{
test_ping_req_t *prq = &req->tsr_u.ping;
@@ -783,7 +783,7 @@ lstcon_pingrpc_prep(lst_test_ping_param_t *param, srpc_test_reqst_t *req)
return 0;
}
-int
+static int
lstcon_bulkrpc_v0_prep(lst_test_bulk_param_t *param, srpc_test_reqst_t *req)
{
test_bulk_req_t *brq = &req->tsr_u.bulk_v0;
@@ -795,7 +795,7 @@ lstcon_bulkrpc_v0_prep(lst_test_bulk_param_t *param, srpc_test_reqst_t *req)
return 0;
}
-int
+static int
lstcon_bulkrpc_v1_prep(lst_test_bulk_param_t *param, srpc_test_reqst_t *req)
{
test_bulk_req_v1_t *brq = &req->tsr_u.bulk_v1;
@@ -915,7 +915,7 @@ lstcon_testrpc_prep(lstcon_node_t *nd, int transop, unsigned feats,
return rc;
}
-int
+static int
lstcon_sesnew_stat_reply(lstcon_rpc_trans_t *trans,
lstcon_node_t *nd, srpc_msg_t *reply)
{
@@ -1162,7 +1162,7 @@ lstcon_rpc_trans_ndlist(struct list_head *ndlist,
return rc;
}
-void
+static void
lstcon_rpc_pinger(void *arg)
{
stt_timer_t *ptimer = (stt_timer_t *)arg;
diff --git a/drivers/staging/lustre/lnet/selftest/console.c b/drivers/staging/lustre/lnet/selftest/console.c
index 5dad9f1f946..49cb6543d53 100644
--- a/drivers/staging/lustre/lnet/selftest/console.c
+++ b/drivers/staging/lustre/lnet/selftest/console.c
@@ -1208,8 +1208,7 @@ again:
lstcon_rpc_trans_destroy(trans);
/* return if any error */
- CDEBUG(D_NET, "Failed to add test %s, "
- "RPC error %d, framework error %d\n",
+ CDEBUG(D_NET, "Failed to add test %s, RPC error %d, framework error %d\n",
transop == LST_TRANS_TSBCLIADD ? "client" : "server",
lstcon_trans_stat()->trs_rpc_errno,
lstcon_trans_stat()->trs_fwk_errno);
@@ -1885,8 +1884,7 @@ lstcon_session_feats_check(unsigned feats)
spin_unlock(&console_session.ses_rpc_lock);
if (rc != 0) {
- CERROR("remote features %x do not match with "
- "session features %x of console\n",
+ CERROR("remote features %x do not match with session features %x of console\n",
feats, console_session.ses_features);
}
diff --git a/drivers/staging/lustre/lnet/selftest/framework.c b/drivers/staging/lustre/lnet/selftest/framework.c
index df04ab7de83..cc9d1826ae6 100644
--- a/drivers/staging/lustre/lnet/selftest/framework.c
+++ b/drivers/staging/lustre/lnet/selftest/framework.c
@@ -156,7 +156,7 @@ sfw_register_test (srpc_service_t *service, sfw_test_client_ops_t *cliops)
return 0;
}
-void
+static void
sfw_add_session_timer (void)
{
sfw_session_t *sn = sfw_data.fw_session;
@@ -176,7 +176,7 @@ sfw_add_session_timer (void)
return;
}
-int
+static int
sfw_del_session_timer (void)
{
sfw_session_t *sn = sfw_data.fw_session;
@@ -238,7 +238,7 @@ sfw_deactivate_session (void)
}
-void
+static void
sfw_session_expired (void *data)
{
sfw_session_t *sn = data;
@@ -284,15 +284,14 @@ sfw_init_session(sfw_session_t *sn, lst_sid_t sid,
}
/* completion handler for incoming framework RPCs */
-void
+static void
sfw_server_rpc_done(struct srpc_server_rpc *rpc)
{
struct srpc_service *sv = rpc->srpc_scd->scd_svc;
int status = rpc->srpc_status;
CDEBUG (D_NET,
- "Incoming framework RPC done: "
- "service %s, peer %s, status %s:%d\n",
+ "Incoming framework RPC done: service %s, peer %s, status %s:%d\n",
sv->sv_name, libcfs_id2str(rpc->srpc_peer),
swi_state2str(rpc->srpc_wi.swi_state),
status);
@@ -302,7 +301,7 @@ sfw_server_rpc_done(struct srpc_server_rpc *rpc)
return;
}
-void
+static void
sfw_client_rpc_fini (srpc_client_rpc_t *rpc)
{
LASSERT (rpc->crpc_bulk.bk_niov == 0);
@@ -310,8 +309,7 @@ sfw_client_rpc_fini (srpc_client_rpc_t *rpc)
LASSERT (atomic_read(&rpc->crpc_refcount) == 0);
CDEBUG (D_NET,
- "Outgoing framework RPC done: "
- "service %d, peer %s, status %s:%d:%d\n",
+ "Outgoing framework RPC done: service %d, peer %s, status %s:%d:%d\n",
rpc->crpc_service, libcfs_id2str(rpc->crpc_dest),
swi_state2str(rpc->crpc_wi.swi_state),
rpc->crpc_aborted, rpc->crpc_status);
@@ -325,7 +323,7 @@ sfw_client_rpc_fini (srpc_client_rpc_t *rpc)
spin_unlock(&sfw_data.fw_lock);
}
-sfw_batch_t *
+static sfw_batch_t *
sfw_find_batch (lst_bid_t bid)
{
sfw_session_t *sn = sfw_data.fw_session;
@@ -341,7 +339,7 @@ sfw_find_batch (lst_bid_t bid)
return NULL;
}
-sfw_batch_t *
+static sfw_batch_t *
sfw_bid2batch (lst_bid_t bid)
{
sfw_session_t *sn = sfw_data.fw_session;
@@ -367,7 +365,7 @@ sfw_bid2batch (lst_bid_t bid)
return bat;
}
-int
+static int
sfw_get_stats (srpc_stat_reqst_t *request, srpc_stat_reply_t *reply)
{
sfw_session_t *sn = sfw_data.fw_session;
@@ -479,7 +477,7 @@ sfw_make_session(srpc_mksn_reqst_t *request, srpc_mksn_reply_t *reply)
return 0;
}
-int
+static int
sfw_remove_session (srpc_rmsn_reqst_t *request, srpc_rmsn_reply_t *reply)
{
sfw_session_t *sn = sfw_data.fw_session;
@@ -511,7 +509,7 @@ sfw_remove_session (srpc_rmsn_reqst_t *request, srpc_rmsn_reply_t *reply)
return 0;
}
-int
+static int
sfw_debug_session (srpc_debug_reqst_t *request, srpc_debug_reply_t *reply)
{
sfw_session_t *sn = sfw_data.fw_session;
@@ -532,7 +530,7 @@ sfw_debug_session (srpc_debug_reqst_t *request, srpc_debug_reply_t *reply)
return 0;
}
-void
+static void
sfw_test_rpc_fini (srpc_client_rpc_t *rpc)
{
sfw_test_unit_t *tsu = rpc->crpc_priv;
@@ -554,7 +552,7 @@ sfw_test_buffers(sfw_test_instance_t *tsi)
return max(SFW_TEST_WI_MIN, nbuf + SFW_TEST_WI_EXTRA);
}
-int
+static int
sfw_load_test(struct sfw_test_instance *tsi)
{
struct sfw_test_case *tsc;
@@ -575,8 +573,8 @@ sfw_load_test(struct sfw_test_instance *tsi)
rc = srpc_service_add_buffers(svc, nbuf);
if (rc != 0) {
- CWARN("Failed to reserve enough buffers: "
- "service %s, %d needed: %d\n", svc->sv_name, nbuf, rc);
+ CWARN("Failed to reserve enough buffers: service %s, %d needed: %d\n",
+ svc->sv_name, nbuf, rc);
/* NB: this error handler is not strictly correct, because
* it may release more buffers than already allocated,
* but it doesn't matter because request portal should
@@ -591,7 +589,7 @@ sfw_load_test(struct sfw_test_instance *tsi)
return 0;
}
-void
+static void
sfw_unload_test(struct sfw_test_instance *tsi)
{
struct sfw_test_case *tsc = sfw_find_test_case(tsi->tsi_service);
@@ -609,7 +607,7 @@ sfw_unload_test(struct sfw_test_instance *tsi)
return;
}
-void
+static void
sfw_destroy_test_instance (sfw_test_instance_t *tsi)
{
srpc_client_rpc_t *rpc;
@@ -643,7 +641,7 @@ clean:
return;
}
-void
+static void
sfw_destroy_batch (sfw_batch_t *tsb)
{
sfw_test_instance_t *tsi;
@@ -682,7 +680,7 @@ sfw_destroy_session (sfw_session_t *sn)
return;
}
-void
+static void
sfw_unpack_addtest_req(srpc_msg_t *msg)
{
srpc_test_reqst_t *req = &msg->msg_body.tes_reqst;
@@ -727,7 +725,7 @@ sfw_unpack_addtest_req(srpc_msg_t *msg)
return;
}
-int
+static int
sfw_add_test_instance (sfw_batch_t *tsb, srpc_server_rpc_t *rpc)
{
srpc_msg_t *msg = &rpc->srpc_reqstbuf->buf_msg;
@@ -865,7 +863,7 @@ sfw_test_unit_done (sfw_test_unit_t *tsu)
return;
}
-void
+static void
sfw_test_rpc_done (srpc_client_rpc_t *rpc)
{
sfw_test_unit_t *tsu = rpc->crpc_priv;
@@ -944,7 +942,7 @@ sfw_create_test_rpc(sfw_test_unit_t *tsu, lnet_process_id_t peer,
return 0;
}
-int
+static int
sfw_run_test (swi_workitem_t *wi)
{
sfw_test_unit_t *tsu = wi->swi_workitem.wi_data;
@@ -994,7 +992,7 @@ test_done:
return 1;
}
-int
+static int
sfw_run_batch (sfw_batch_t *tsb)
{
swi_workitem_t *wi;
@@ -1072,7 +1070,7 @@ sfw_stop_batch (sfw_batch_t *tsb, int force)
return 0;
}
-int
+static int
sfw_query_batch (sfw_batch_t *tsb, int testidx, srpc_batch_reply_t *reply)
{
sfw_test_instance_t *tsi;
@@ -1117,7 +1115,7 @@ sfw_alloc_pages(struct srpc_server_rpc *rpc, int cpt, int npages, int len,
return 0;
}
-int
+static int
sfw_add_test (srpc_server_rpc_t *rpc)
{
sfw_session_t *sn = sfw_data.fw_session;
@@ -1187,7 +1185,7 @@ sfw_add_test (srpc_server_rpc_t *rpc)
return 0;
}
-int
+static int
sfw_control_batch (srpc_batch_reqst_t *request, srpc_batch_reply_t *reply)
{
sfw_session_t *sn = sfw_data.fw_session;
@@ -1228,7 +1226,7 @@ sfw_control_batch (srpc_batch_reqst_t *request, srpc_batch_reply_t *reply)
return 0;
}
-int
+static int
sfw_handle_server_rpc(struct srpc_server_rpc *rpc)
{
struct srpc_service *sv = rpc->srpc_scd->scd_svc;
@@ -1270,8 +1268,7 @@ sfw_handle_server_rpc(struct srpc_server_rpc *rpc)
if (sn != NULL &&
sn->sn_features != request->msg_ses_feats) {
- CNETERR("Features of framework RPC don't match "
- "features of current session: %x/%x\n",
+ CNETERR("Features of framework RPC don't match features of current session: %x/%x\n",
request->msg_ses_feats, sn->sn_features);
reply->msg_body.reply.status = EPROTO;
reply->msg_body.reply.sid = sn->sn_id;
@@ -1334,7 +1331,7 @@ sfw_handle_server_rpc(struct srpc_server_rpc *rpc)
return rc;
}
-int
+static int
sfw_bulk_ready(struct srpc_server_rpc *rpc, int status)
{
struct srpc_service *sv = rpc->srpc_scd->scd_svc;
@@ -1348,8 +1345,7 @@ sfw_bulk_ready(struct srpc_server_rpc *rpc, int status)
spin_lock(&sfw_data.fw_lock);
if (status != 0) {
- CERROR("Bulk transfer failed for RPC: "
- "service %s, peer %s, status %d\n",
+ CERROR("Bulk transfer failed for RPC: service %s, peer %s, status %d\n",
sv->sv_name, libcfs_id2str(rpc->srpc_peer), status);
spin_unlock(&sfw_data.fw_lock);
return -EIO;
@@ -1664,12 +1660,10 @@ sfw_startup (void)
}
if (session_timeout == 0)
- CWARN ("Zero session_timeout specified "
- "- test sessions never expire.\n");
+ CWARN("Zero session_timeout specified - test sessions never expire.\n");
if (rpc_timeout == 0)
- CWARN ("Zero rpc_timeout specified "
- "- test RPC never expire.\n");
+ CWARN("Zero rpc_timeout specified - test RPC never expire.\n");
memset(&sfw_data, 0, sizeof(struct smoketest_framework));
@@ -1727,8 +1721,7 @@ sfw_startup (void)
rc = srpc_service_add_buffers(sv, sv->sv_wi_total);
if (rc != 0) {
- CWARN("Failed to reserve enough buffers: "
- "service %s, %d needed: %d\n",
+ CWARN("Failed to reserve enough buffers: service %s, %d needed: %d\n",
sv->sv_name, sv->sv_wi_total, rc);
error = -ENOMEM;
}
diff --git a/drivers/staging/lustre/lnet/selftest/module.c b/drivers/staging/lustre/lnet/selftest/module.c
index 6dd4309dc5e..c6ef5b0d8de 100644
--- a/drivers/staging/lustre/lnet/selftest/module.c
+++ b/drivers/staging/lustre/lnet/selftest/module.c
@@ -55,7 +55,7 @@ static int lst_init_step = LST_INIT_NONE;
struct cfs_wi_sched *lst_sched_serial;
struct cfs_wi_sched **lst_sched_test;
-void
+static void
lnet_selftest_fini(void)
{
int i;
@@ -90,18 +90,7 @@ lnet_selftest_fini(void)
return;
}
-void
-lnet_selftest_structure_assertion(void)
-{
- CLASSERT(sizeof(srpc_msg_t) == 160);
- CLASSERT(sizeof(srpc_test_reqst_t) == 70);
- CLASSERT(offsetof(srpc_msg_t, msg_body.tes_reqst.tsr_concur) == 72);
- CLASSERT(offsetof(srpc_msg_t, msg_body.tes_reqst.tsr_ndest) == 78);
- CLASSERT(sizeof(srpc_stat_reply_t) == 136);
- CLASSERT(sizeof(srpc_stat_reqst_t) == 28);
-}
-
-int
+static int
lnet_selftest_init(void)
{
int nscheds;
@@ -130,8 +119,8 @@ lnet_selftest_init(void)
rc = cfs_wi_sched_create("lst_t", lnet_cpt_table(), i,
nthrs, &lst_sched_test[i]);
if (rc != 0) {
- CERROR("Failed to create CPT affinity WI scheduler "
- "%d for LST\n", i);
+ CERROR("Failed to create CPT affinity WI scheduler %d for LST\n",
+ i);
goto error;
}
}
diff --git a/drivers/staging/lustre/lnet/selftest/ping_test.c b/drivers/staging/lustre/lnet/selftest/ping_test.c
index 750cac4afbb..d8c0df6e685 100644
--- a/drivers/staging/lustre/lnet/selftest/ping_test.c
+++ b/drivers/staging/lustre/lnet/selftest/ping_test.c
@@ -44,7 +44,7 @@
#define LST_PING_TEST_MAGIC 0xbabeface
-int ping_srv_workitems = SFW_TEST_WI_MAX;
+static int ping_srv_workitems = SFW_TEST_WI_MAX;
module_param(ping_srv_workitems, int, 0644);
MODULE_PARM_DESC(ping_srv_workitems, "# PING server workitems");
diff --git a/drivers/staging/lustre/lnet/selftest/rpc.c b/drivers/staging/lustre/lnet/selftest/rpc.c
index a9f29d8833a..f753add7bfb 100644
--- a/drivers/staging/lustre/lnet/selftest/rpc.c
+++ b/drivers/staging/lustre/lnet/selftest/rpc.c
@@ -87,7 +87,7 @@ void srpc_set_counters (const srpc_counters_t *cnt)
spin_unlock(&srpc_data.rpc_glock);
}
-int
+static int
srpc_add_bulk_page(srpc_bulk_t *bk, struct page *pg, int i, int nob)
{
nob = min(nob, (int)PAGE_CACHE_SIZE);
@@ -170,7 +170,7 @@ srpc_next_id (void)
return id;
}
-void
+static void
srpc_init_server_rpc(struct srpc_server_rpc *rpc,
struct srpc_service_cd *scd,
struct srpc_buffer *buffer)
@@ -351,7 +351,7 @@ srpc_remove_service (srpc_service_t *sv)
return 0;
}
-int
+static int
srpc_post_passive_rdma(int portal, int local, __u64 matchbits, void *buf,
int len, int options, lnet_process_id_t peer,
lnet_handle_md_t *mdh, srpc_event_t *ev)
@@ -391,7 +391,7 @@ srpc_post_passive_rdma(int portal, int local, __u64 matchbits, void *buf,
return 0;
}
-int
+static int
srpc_post_active_rdma(int portal, __u64 matchbits, void *buf, int len,
int options, lnet_process_id_t peer, lnet_nid_t self,
lnet_handle_md_t *mdh, srpc_event_t *ev)
@@ -443,7 +443,7 @@ srpc_post_active_rdma(int portal, __u64 matchbits, void *buf, int len,
return 0;
}
-int
+static int
srpc_post_active_rqtbuf(lnet_process_id_t peer, int service, void *buf,
int len, lnet_handle_md_t *mdh, srpc_event_t *ev)
{
@@ -452,7 +452,7 @@ srpc_post_active_rqtbuf(lnet_process_id_t peer, int service, void *buf,
LNET_NID_ANY, mdh, ev);
}
-int
+static int
srpc_post_passive_rqtbuf(int service, int local, void *buf, int len,
lnet_handle_md_t *mdh, srpc_event_t *ev)
{
@@ -466,7 +466,7 @@ srpc_post_passive_rqtbuf(int service, int local, void *buf, int len,
LNET_MD_OP_PUT, any, mdh, ev);
}
-int
+static int
srpc_service_post_buffer(struct srpc_service_cd *scd, struct srpc_buffer *buf)
{
struct srpc_service *sv = scd->scd_svc;
@@ -678,9 +678,7 @@ srpc_finish_service(struct srpc_service *sv)
rpc = list_entry(scd->scd_rpc_active.next,
struct srpc_server_rpc, srpc_list);
- CNETERR("Active RPC %p on shutdown: sv %s, peer %s, "
- "wi %s scheduled %d running %d, "
- "ev fired %d type %d status %d lnet %d\n",
+ CNETERR("Active RPC %p on shutdown: sv %s, peer %s, wi %s scheduled %d running %d, ev fired %d type %d status %d lnet %d\n",
rpc, sv->sv_name, libcfs_id2str(rpc->srpc_peer),
swi_state2str(rpc->srpc_wi.swi_state),
rpc->srpc_wi.swi_workitem.wi_scheduled,
@@ -697,7 +695,7 @@ srpc_finish_service(struct srpc_service *sv)
}
/* called with sv->sv_lock held */
-void
+static void
srpc_service_recycle_buffer(struct srpc_service_cd *scd, srpc_buffer_t *buf)
{
if (!scd->scd_svc->sv_shuttingdown && scd->scd_buf_adjust >= 0) {
@@ -787,7 +785,7 @@ srpc_shutdown_service(srpc_service_t *sv)
}
}
-int
+static int
srpc_send_request (srpc_client_rpc_t *rpc)
{
srpc_event_t *ev = &rpc->crpc_reqstev;
@@ -807,7 +805,7 @@ srpc_send_request (srpc_client_rpc_t *rpc)
return rc;
}
-int
+static int
srpc_prepare_reply (srpc_client_rpc_t *rpc)
{
srpc_event_t *ev = &rpc->crpc_replyev;
@@ -831,7 +829,7 @@ srpc_prepare_reply (srpc_client_rpc_t *rpc)
return rc;
}
-int
+static int
srpc_prepare_bulk (srpc_client_rpc_t *rpc)
{
srpc_bulk_t *bk = &rpc->crpc_bulk;
@@ -863,7 +861,7 @@ srpc_prepare_bulk (srpc_client_rpc_t *rpc)
return rc;
}
-int
+static int
srpc_do_bulk (srpc_server_rpc_t *rpc)
{
srpc_event_t *ev = &rpc->srpc_ev;
@@ -891,7 +889,7 @@ srpc_do_bulk (srpc_server_rpc_t *rpc)
}
/* only called from srpc_handle_rpc */
-void
+static void
srpc_server_rpc_done(srpc_server_rpc_t *rpc, int status)
{
struct srpc_service_cd *scd = rpc->srpc_scd;
@@ -1066,7 +1064,7 @@ srpc_handle_rpc(swi_workitem_t *wi)
return 0;
}
-void
+static void
srpc_client_rpc_expired (void *data)
{
srpc_client_rpc_t *rpc = data;
@@ -1108,7 +1106,7 @@ srpc_add_client_rpc_timer (srpc_client_rpc_t *rpc)
*
* Upon exit the RPC expiry timer is not queued and the handler is not
* running on any CPU. */
-void
+static void
srpc_del_client_rpc_timer (srpc_client_rpc_t *rpc)
{
/* timer not planted or already exploded */
@@ -1129,7 +1127,7 @@ srpc_del_client_rpc_timer (srpc_client_rpc_t *rpc)
}
}
-void
+static void
srpc_client_rpc_done (srpc_client_rpc_t *rpc, int status)
{
swi_workitem_t *wi = &rpc->crpc_wi;
@@ -1236,20 +1234,18 @@ srpc_send_rpc (swi_workitem_t *wi)
if (reply->msg_type != type ||
(reply->msg_magic != SRPC_MSG_MAGIC &&
reply->msg_magic != __swab32(SRPC_MSG_MAGIC))) {
- CWARN ("Bad message from %s: type %u (%d expected),"
- " magic %u (%d expected).\n",
- libcfs_id2str(rpc->crpc_dest),
- reply->msg_type, type,
- reply->msg_magic, SRPC_MSG_MAGIC);
+ CWARN("Bad message from %s: type %u (%d expected), magic %u (%d expected).\n",
+ libcfs_id2str(rpc->crpc_dest),
+ reply->msg_type, type,
+ reply->msg_magic, SRPC_MSG_MAGIC);
rc = -EBADMSG;
break;
}
if (do_bulk && reply->msg_body.reply.status != 0) {
- CWARN ("Remote error %d at %s, unlink bulk buffer in "
- "case peer didn't initiate bulk transfer\n",
- reply->msg_body.reply.status,
- libcfs_id2str(rpc->crpc_dest));
+ CWARN("Remote error %d at %s, unlink bulk buffer in case peer didn't initiate bulk transfer\n",
+ reply->msg_body.reply.status,
+ libcfs_id2str(rpc->crpc_dest));
LNetMDUnlink(rpc->crpc_bulk.bk_mdh);
}
@@ -1393,7 +1389,7 @@ srpc_send_reply(struct srpc_server_rpc *rpc)
}
/* when in kernel always called with LNET_LOCK() held, and in thread context */
-void
+static void
srpc_lnet_ev_handler(lnet_event_t *ev)
{
struct srpc_service_cd *scd;
@@ -1504,11 +1500,10 @@ srpc_lnet_ev_handler(lnet_event_t *ev)
msg->msg_type != __swab32(type)) ||
(msg->msg_magic != SRPC_MSG_MAGIC &&
msg->msg_magic != __swab32(SRPC_MSG_MAGIC))) {
- CERROR ("Dropping RPC (%s) from %s: "
- "status %d mlength %d type %u magic %u.\n",
- sv->sv_name, libcfs_id2str(ev->initiator),
- ev->status, ev->mlength,
- msg->msg_type, msg->msg_magic);
+ CERROR("Dropping RPC (%s) from %s: status %d mlength %d type %u magic %u.\n",
+ sv->sv_name, libcfs_id2str(ev->initiator),
+ ev->status, ev->mlength,
+ msg->msg_type, msg->msg_magic);
/* NB can't call srpc_service_recycle_buffer here since
* it may call LNetM[DE]Attach. The invalid magic tells
diff --git a/drivers/staging/lustre/lnet/selftest/timer.c b/drivers/staging/lustre/lnet/selftest/timer.c
index 91d4caa4edb..f8352c2a7d3 100644
--- a/drivers/staging/lustre/lnet/selftest/timer.c
+++ b/drivers/staging/lustre/lnet/selftest/timer.c
@@ -121,7 +121,7 @@ stt_del_timer(stt_timer_t *timer)
}
/* called with stt_data.stt_lock held */
-int
+static int
stt_expire_list(struct list_head *slot, unsigned long now)
{
int expired = 0;
@@ -145,7 +145,7 @@ stt_expire_list(struct list_head *slot, unsigned long now)
return expired;
}
-int
+static int
stt_check_timers(unsigned long *last)
{
int expired = 0;
@@ -168,7 +168,7 @@ stt_check_timers(unsigned long *last)
}
-int
+static int
stt_timer_main(void *arg)
{
cfs_block_allsigs();
@@ -187,7 +187,7 @@ stt_timer_main(void *arg)
return 0;
}
-int
+static int
stt_start_timer_thread(void)
{
struct task_struct *task;
diff --git a/drivers/staging/lustre/lustre/Kconfig b/drivers/staging/lustre/lustre/Kconfig
index 4f65ba1158b..6725467ef4d 100644
--- a/drivers/staging/lustre/lustre/Kconfig
+++ b/drivers/staging/lustre/lustre/Kconfig
@@ -57,5 +57,5 @@ config LUSTRE_TRANSLATE_ERRNOS
config LUSTRE_LLITE_LLOOP
tristate "Lustre virtual block device"
depends on LUSTRE_FS && BLOCK
- depends on !PPC_64K_PAGES && !ARM64_64K_PAGES
+ depends on !PPC_64K_PAGES && !ARM64_64K_PAGES && !MICROBLAZE_64K_PAGES && !PAGE_SIZE_64KB && !IA64_PAGE_SIZE_64KB && !PARISC_PAGE_SIZE_64KB
default m
diff --git a/drivers/staging/lustre/lustre/include/dt_object.h b/drivers/staging/lustre/lustre/include/dt_object.h
index 212ebaea855..be4c7d95e78 100644
--- a/drivers/staging/lustre/lustre/include/dt_object.h
+++ b/drivers/staging/lustre/lustre/include/dt_object.h
@@ -617,7 +617,7 @@ struct dt_index_operations {
int (*load)(const struct lu_env *env,
const struct dt_it *di, __u64 hash);
int (*key_rec)(const struct lu_env *env,
- const struct dt_it *di, void* key_rec);
+ const struct dt_it *di, void *key_rec);
} dio_it;
};
@@ -667,7 +667,7 @@ static inline int lu_device_is_dt(const struct lu_device *d)
return ergo(d != NULL, d->ld_type->ldt_tags & LU_DEVICE_DT);
}
-static inline struct dt_device * lu2dt_dev(struct lu_device *l)
+static inline struct dt_device *lu2dt_dev(struct lu_device *l)
{
LASSERT(lu_device_is_dt(l));
return container_of0(l, struct dt_device, dd_lu_dev);
diff --git a/drivers/staging/lustre/lustre/include/linux/lustre_compat25.h b/drivers/staging/lustre/lustre/include/linux/lustre_compat25.h
index e94ab343ab2..3925db16065 100644
--- a/drivers/staging/lustre/lustre/include/linux/lustre_compat25.h
+++ b/drivers/staging/lustre/lustre/include/linux/lustre_compat25.h
@@ -42,28 +42,6 @@
#include "lustre_patchless_compat.h"
-# define LOCK_FS_STRUCT(fs) spin_lock(&(fs)->lock)
-# define UNLOCK_FS_STRUCT(fs) spin_unlock(&(fs)->lock)
-
-static inline void ll_set_fs_pwd(struct fs_struct *fs, struct vfsmount *mnt,
- struct dentry *dentry)
-{
- struct path path;
- struct path old_pwd;
-
- path.mnt = mnt;
- path.dentry = dentry;
- LOCK_FS_STRUCT(fs);
- old_pwd = fs->pwd;
- path_get(&path);
- fs->pwd = path;
- UNLOCK_FS_STRUCT(fs);
-
- if (old_pwd.dentry)
- path_put(&old_pwd);
-}
-
-
/*
* set ATTR_BLOCKS to a high value to avoid any risk of collision with other
* ATTR_* attributes (see bug 13828)
@@ -91,8 +69,6 @@ static inline void ll_set_fs_pwd(struct fs_struct *fs, struct vfsmount *mnt,
# define inode_dio_read(i) atomic_inc(&(i)->i_dio_count)
/* inode_dio_done(i) use as-is for read unlock */
-#define TREE_READ_LOCK_IRQ(mapping) spin_lock_irq(&(mapping)->tree_lock)
-#define TREE_READ_UNLOCK_IRQ(mapping) spin_unlock_irq(&(mapping)->tree_lock)
#ifndef FS_HAS_FIEMAP
#define FS_HAS_FIEMAP (0)
@@ -112,8 +88,6 @@ static inline void ll_set_fs_pwd(struct fs_struct *fs, struct vfsmount *mnt,
#define cfs_bio_io_error(a, b) bio_io_error((a))
#define cfs_bio_endio(a, b, c) bio_endio((a), (c))
-#define cfs_fs_pwd(fs) ((fs)->pwd.dentry)
-#define cfs_fs_mnt(fs) ((fs)->pwd.mnt)
#define cfs_path_put(nd) path_put(&(nd)->path)
@@ -139,8 +113,7 @@ ll_quota_on(struct super_block *sb, int off, int ver, char *name, int remount)
);
path_put(&path);
return rc;
- }
- else
+ } else
return -ENOSYS;
}
@@ -149,8 +122,7 @@ static inline int ll_quota_off(struct super_block *sb, int off, int remount)
if (sb->s_qcop->quota_off) {
return sb->s_qcop->quota_off(sb, off
);
- }
- else
+ } else
return -ENOSYS;
}
diff --git a/drivers/staging/lustre/lustre/include/linux/obd.h b/drivers/staging/lustre/lustre/include/linux/obd.h
index 9d7e28ace42..9cd8683573c 100644
--- a/drivers/staging/lustre/lustre/include/linux/obd.h
+++ b/drivers/staging/lustre/lustre/include/linux/obd.h
@@ -87,8 +87,7 @@ static inline void __client_obd_list_lock(client_obd_lock_t *lock,
if (task == NULL)
continue;
- LCONSOLE_WARN("%s:%d: lock %p was acquired"
- " by <%s:%d:%s:%d> for %lu seconds.\n",
+ LCONSOLE_WARN("%s:%d: lock %p was acquired by <%s:%d:%s:%d> for %lu seconds.\n",
current->comm, current->pid,
lock, task->comm, task->pid,
lock->func, lock->line,
diff --git a/drivers/staging/lustre/lustre/include/lprocfs_status.h b/drivers/staging/lustre/lustre/include/lprocfs_status.h
index ccb6cd42a67..cfe503b7df6 100644
--- a/drivers/staging/lustre/lustre/include/lprocfs_status.h
+++ b/drivers/staging/lustre/lustre/include/lprocfs_status.h
@@ -374,8 +374,8 @@ static inline void s2dhms(struct dhms *ts, time_t secs)
#define JOBSTATS_PROCNAME_UID "procname_uid"
#define JOBSTATS_NODELOCAL "nodelocal"
-extern int lprocfs_write_frac_helper(const char *buffer, unsigned long count,
- int *val, int mult);
+extern int lprocfs_write_frac_helper(const char __user *buffer,
+ unsigned long count, int *val, int mult);
extern int lprocfs_read_frac_helper(char *buffer, unsigned long count,
long val, int mult);
#if defined (CONFIG_PROC_FS)
@@ -557,7 +557,7 @@ extern void lprocfs_free_obd_stats(struct obd_device *obddev);
extern void lprocfs_free_md_stats(struct obd_device *obddev);
struct obd_export;
struct nid_stat;
-extern int lprocfs_add_clear_entry(struct obd_device * obd,
+extern int lprocfs_add_clear_entry(struct obd_device *obd,
struct proc_dir_entry *entry);
extern int lprocfs_exp_setup(struct obd_export *exp,
lnet_nid_t *peer_nid, int *newnid);
@@ -647,7 +647,7 @@ extern int lprocfs_rd_kbytesavail(struct seq_file *m, void *data);
extern int lprocfs_rd_filestotal(struct seq_file *m, void *data);
extern int lprocfs_rd_filesfree(struct seq_file *m, void *data);
-extern int lprocfs_write_helper(const char *buffer, unsigned long count,
+extern int lprocfs_write_helper(const char __user *buffer, unsigned long count,
int *val);
extern int lprocfs_seq_read_frac_helper(struct seq_file *m, long val, int mult);
extern int lprocfs_write_u64_helper(const char *buffer, unsigned long count,
diff --git a/drivers/staging/lustre/lustre/include/lu_object.h b/drivers/staging/lustre/lustre/include/lu_object.h
index 6015ee5c4b6..2ddb2b054d8 100644
--- a/drivers/staging/lustre/lustre/include/lu_object.h
+++ b/drivers/staging/lustre/lustre/include/lu_object.h
@@ -1120,7 +1120,7 @@ struct lu_context_key {
};
#define LU_KEY_INIT(mod, type) \
- static void* mod##_key_init(const struct lu_context *ctx, \
+ static void *mod##_key_init(const struct lu_context *ctx, \
struct lu_context_key *key) \
{ \
type *value; \
@@ -1137,7 +1137,7 @@ struct lu_context_key {
#define LU_KEY_FINI(mod, type) \
static void mod##_key_fini(const struct lu_context *ctx, \
- struct lu_context_key *key, void* data) \
+ struct lu_context_key *key, void *data) \
{ \
type *info = data; \
\
diff --git a/drivers/staging/lustre/lustre/include/lustre_capa.h b/drivers/staging/lustre/lustre/include/lustre_capa.h
index ab6b9ea98a7..fe19534ebd8 100644
--- a/drivers/staging/lustre/lustre/include/lustre_capa.h
+++ b/drivers/staging/lustre/lustre/include/lustre_capa.h
@@ -154,7 +154,7 @@ static inline __u32 capa_expiry(struct lustre_capa *capa)
}
void _debug_capa(struct lustre_capa *, struct libcfs_debug_msg_data *,
- const char *fmt, ... );
+ const char *fmt, ...);
#define DEBUG_CAPA(level, capa, fmt, args...) \
do { \
if (((level) & D_CANTMASK) != 0 || \
diff --git a/drivers/staging/lustre/lustre/include/lustre_disk.h b/drivers/staging/lustre/lustre/include/lustre_disk.h
index 515b835ce14..9b283313174 100644
--- a/drivers/staging/lustre/lustre/include/lustre_disk.h
+++ b/drivers/staging/lustre/lustre/include/lustre_disk.h
@@ -368,8 +368,7 @@ static inline void check_lcd(char *obd_name, int index,
if (strnlen((char*)lcd->lcd_uuid, length) == length) {
lcd->lcd_uuid[length - 1] = '\0';
- LCONSOLE_ERROR("the client UUID (%s) on %s for exports"
- "stored in last_rcvd(index = %d) is bad!\n",
+ LCONSOLE_ERROR("the client UUID (%s) on %s for exports stored in last_rcvd(index = %d) is bad!\n",
lcd->lcd_uuid, obd_name, index);
}
}
diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm.h b/drivers/staging/lustre/lustre/include/lustre_dlm.h
index 14ac46f45fd..83bc0a9d7d4 100644
--- a/drivers/staging/lustre/lustre/include/lustre_dlm.h
+++ b/drivers/staging/lustre/lustre/include/lustre_dlm.h
@@ -1446,7 +1446,7 @@ static inline void check_res_locked(struct ldlm_resource *res)
assert_spin_locked(&res->lr_lock);
}
-struct ldlm_resource * lock_res_and_lock(struct ldlm_lock *lock);
+struct ldlm_resource *lock_res_and_lock(struct ldlm_lock *lock);
void unlock_res_and_lock(struct ldlm_lock *lock);
/* ldlm_pool.c */
diff --git a/drivers/staging/lustre/lustre/include/lustre_eacl.h b/drivers/staging/lustre/lustre/include/lustre_eacl.h
index b94f76a3301..0f8f76c43ee 100644
--- a/drivers/staging/lustre/lustre/include/lustre_eacl.h
+++ b/drivers/staging/lustre/lustre/include/lustre_eacl.h
@@ -74,7 +74,7 @@ typedef struct {
extern ext_acl_xattr_header *
lustre_posix_acl_xattr_2ext(posix_acl_xattr_header *header, int size);
extern int
-lustre_posix_acl_xattr_filter(posix_acl_xattr_header *header, int size,
+lustre_posix_acl_xattr_filter(posix_acl_xattr_header *header, size_t size,
posix_acl_xattr_header **out);
extern void
lustre_posix_acl_xattr_free(posix_acl_xattr_header *header, int size);
diff --git a/drivers/staging/lustre/lustre/include/lustre_lib.h b/drivers/staging/lustre/lustre/include/lustre_lib.h
index 12c7590e61f..bf135630c39 100644
--- a/drivers/staging/lustre/lustre/include/lustre_lib.h
+++ b/drivers/staging/lustre/lustre/include/lustre_lib.h
@@ -85,7 +85,7 @@ void target_send_reply(struct ptlrpc_request *req, int rc, int fail_id);
/* client.c */
-int client_sanobd_setup(struct obd_device *obddev, struct lustre_cfg* lcfg);
+int client_sanobd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg);
struct client_obd *client_conn2cli(struct lustre_handle *conn);
struct md_open_data;
diff --git a/drivers/staging/lustre/lustre/include/lustre_net.h b/drivers/staging/lustre/lustre/include/lustre_net.h
index 0a024d3cfeb..36396d1c94d 100644
--- a/drivers/staging/lustre/lustre/include/lustre_net.h
+++ b/drivers/staging/lustre/lustre/include/lustre_net.h
@@ -2627,12 +2627,7 @@ __u32 lustre_msg_get_timeout(struct lustre_msg *msg);
__u32 lustre_msg_get_service_time(struct lustre_msg *msg);
char *lustre_msg_get_jobid(struct lustre_msg *msg);
__u32 lustre_msg_get_cksum(struct lustre_msg *msg);
-#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 7, 50, 0)
-__u32 lustre_msg_calc_cksum(struct lustre_msg *msg, int compat18);
-#else
-# warning "remove checksum compatibility support for b1_8"
__u32 lustre_msg_calc_cksum(struct lustre_msg *msg);
-#endif
void lustre_msg_set_handle(struct lustre_msg *msg,
struct lustre_handle *handle);
void lustre_msg_set_type(struct lustre_msg *msg, __u32 type);
@@ -2951,7 +2946,7 @@ void ptlrpcd_decref(void);
* procfs output related functions
* @{
*/
-const char* ll_opcode2str(__u32 opcode);
+const char *ll_opcode2str(__u32 opcode);
#if defined (CONFIG_PROC_FS)
void ptlrpc_lprocfs_register_obd(struct obd_device *obd);
void ptlrpc_lprocfs_unregister_obd(struct obd_device *obd);
diff --git a/drivers/staging/lustre/lustre/include/obd_class.h b/drivers/staging/lustre/lustre/include/obd_class.h
index 882e40bd584..4a29261c514 100644
--- a/drivers/staging/lustre/lustre/include/obd_class.h
+++ b/drivers/staging/lustre/lustre/include/obd_class.h
@@ -414,7 +414,7 @@ do { \
#define EXP_MD_COUNTER_INCREMENT(exp, op)
#endif
-static inline int lprocfs_nid_ldlm_stats_init(struct nid_stat* tmp)
+static inline int lprocfs_nid_ldlm_stats_init(struct nid_stat *tmp)
{
/* Always add in ldlm_stats */
tmp->nid_ldlm_stats = lprocfs_alloc_stats(LDLM_LAST_OPC - LDLM_FIRST_OPC
diff --git a/drivers/staging/lustre/lustre/ldlm/interval_tree.c b/drivers/staging/lustre/lustre/ldlm/interval_tree.c
index a3d7a729241..eab2bd60241 100644
--- a/drivers/staging/lustre/lustre/ldlm/interval_tree.c
+++ b/drivers/staging/lustre/lustre/ldlm/interval_tree.c
@@ -73,6 +73,7 @@ static inline int extent_compare(struct interval_node_extent *e1,
struct interval_node_extent *e2)
{
int rc;
+
if (e1->start == e2->start) {
if (e1->end < e2->end)
rc = -1;
@@ -321,6 +322,7 @@ static void interval_insert_color(struct interval_node *node,
/* Parent is RED, so gparent must not be NULL */
if (node_is_left_child(parent)) {
struct interval_node *uncle;
+
uncle = gparent->in_right;
if (uncle && node_is_red(uncle)) {
uncle->in_color = INTERVAL_BLACK;
@@ -340,6 +342,7 @@ static void interval_insert_color(struct interval_node *node,
__rotate_right(gparent, root);
} else {
struct interval_node *uncle;
+
uncle = gparent->in_left;
if (uncle && node_is_red(uncle)) {
uncle->in_color = INTERVAL_BLACK;
@@ -427,6 +430,7 @@ static void interval_erase_color(struct interval_node *node,
} else {
if (node_is_black_or_0(tmp->in_right)) {
struct interval_node *o_left;
+
o_left = tmp->in_left;
if (o_left)
o_left->in_color = INTERVAL_BLACK;
@@ -458,6 +462,7 @@ static void interval_erase_color(struct interval_node *node,
} else {
if (node_is_black_or_0(tmp->in_left)) {
struct interval_node *o_right;
+
o_right = tmp->in_right;
if (o_right)
o_right->in_color = INTERVAL_BLACK;
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c b/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c
index 0c09b611f4a..a89eebaedab 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c
@@ -182,7 +182,9 @@ void ldlm_extent_add_lock(struct ldlm_resource *res,
root = &res->lr_itree[idx].lit_root;
found = interval_insert(&node->li_node, root);
if (found) { /* The policy group found. */
- struct ldlm_interval *tmp = ldlm_interval_detach(lock);
+ struct ldlm_interval *tmp;
+
+ tmp = ldlm_interval_detach(lock);
LASSERT(tmp != NULL);
ldlm_interval_free(tmp);
ldlm_interval_attach(to_ldlm_interval(found), lock);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
index b798daa094b..a4c252febfe 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
@@ -260,7 +260,8 @@ ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags, int first_enq,
int splitted = 0;
const struct ldlm_callback_suite null_cbs = { NULL };
- CDEBUG(D_DLMTRACE, "flags %#llx owner %llu pid %u mode %u start %llu end %llu\n",
+ CDEBUG(D_DLMTRACE,
+ "flags %#llx owner %llu pid %u mode %u start %llu end %llu\n",
*flags, new->l_policy_data.l_flock.owner,
new->l_policy_data.l_flock.pid, mode,
req->l_policy_data.l_flock.start,
@@ -291,6 +292,7 @@ reprocess:
}
} else {
int reprocess_failed = 0;
+
lockmode_verify(mode);
/* This loop determines if there are existing locks
@@ -496,7 +498,8 @@ reprocess:
new->l_policy_data.l_flock.end + 1;
new2->l_conn_export = lock->l_conn_export;
if (lock->l_export != NULL) {
- new2->l_export = class_export_lock_get(lock->l_export, new2);
+ new2->l_export = class_export_lock_get(lock->l_export,
+ new2);
if (new2->l_export->exp_lock_hash &&
hlist_unhashed(&new2->l_exp_hash))
cfs_hash_add(new2->l_export->exp_lock_hash,
@@ -619,8 +622,7 @@ ldlm_flock_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data)
return 0;
}
- LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, "
- "sleeping");
+ LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, sleeping");
fwd.fwd_lock = lock;
obd = class_exp2obd(lock->l_conn_export);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h b/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
index f997566ef11..6c6c57ca91d 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
@@ -91,7 +91,8 @@ static inline int ldlm_ns_empty(struct ldlm_namespace *ns)
}
void ldlm_namespace_move_to_active_locked(struct ldlm_namespace *, ldlm_side_t);
-void ldlm_namespace_move_to_inactive_locked(struct ldlm_namespace *, ldlm_side_t);
+void ldlm_namespace_move_to_inactive_locked(struct ldlm_namespace *,
+ ldlm_side_t);
struct ldlm_namespace *ldlm_namespace_first_locked(ldlm_side_t);
/* ldlm_request.c */
@@ -214,6 +215,7 @@ static inline struct ldlm_extent *
ldlm_interval_extent(struct ldlm_interval *node)
{
struct ldlm_lock *lock;
+
LASSERT(!list_empty(&node->li_group));
lock = list_entry(node->li_group.next, struct ldlm_lock,
@@ -244,7 +246,7 @@ typedef enum ldlm_policy_res ldlm_policy_res_t;
\
return lprocfs_rd_uint(m, &tmp); \
} \
- struct __##var##__dummy_read {;} /* semicolon catcher */
+ struct __##var##__dummy_read {; } /* semicolon catcher */
#define LDLM_POOL_PROC_WRITER(var, type) \
static int lprocfs_wr_##var(struct file *file, const char *buffer, \
@@ -266,7 +268,7 @@ typedef enum ldlm_policy_res ldlm_policy_res_t;
\
return rc; \
} \
- struct __##var##__dummy_write {;} /* semicolon catcher */
+ struct __##var##__dummy_write {; } /* semicolon catcher */
static inline int is_granted_or_cancelled(struct ldlm_lock *lock)
{
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
index c21e30a074b..c5c86e73ca5 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
@@ -603,7 +603,8 @@ int client_disconnect_export(struct obd_export *exp)
/* obd_force == local only */
ldlm_cli_cancel_unused(obd->obd_namespace, NULL,
obd->obd_force ? LCF_LOCAL : 0, NULL);
- ldlm_namespace_free_prior(obd->obd_namespace, imp, obd->obd_force);
+ ldlm_namespace_free_prior(obd->obd_namespace, imp,
+ obd->obd_force);
}
/* There's no need to hold sem while disconnecting an import,
@@ -858,8 +859,8 @@ void ldlm_dump_export_locks(struct obd_export *exp)
if (!list_empty(&exp->exp_locks_list)) {
struct ldlm_lock *lock;
- CERROR("dumping locks for export %p,"
- "ignore if the unmount doesn't hang\n", exp);
+ CERROR("dumping locks for export %p,ignore if the unmount doesn't hang\n",
+ exp);
list_for_each_entry(lock, &exp->exp_locks_list,
l_exp_refs_link)
LDLM_ERROR(lock, "lock:");
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
index 6140130b605..8191005464b 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
@@ -226,6 +226,7 @@ EXPORT_SYMBOL(ldlm_lock_put);
int ldlm_lock_remove_from_lru_nolock(struct ldlm_lock *lock)
{
int rc = 0;
+
if (!list_empty(&lock->l_lru)) {
struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
@@ -575,7 +576,7 @@ struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *handle,
/* It's unlikely but possible that someone marked the lock as
* destroyed after we did handle2object on it */
- if (flags == 0 && ((lock->l_flags & LDLM_FL_DESTROYED)== 0)) {
+ if (flags == 0 && ((lock->l_flags & LDLM_FL_DESTROYED) == 0)) {
lu_ref_add(&lock->l_reference, "handle", current);
return lock;
}
@@ -811,8 +812,7 @@ void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode)
/* If we received a blocked AST and this was the last reference,
* run the callback. */
if ((lock->l_flags & LDLM_FL_NS_SRV) && lock->l_export)
- CERROR("FL_CBPENDING set on non-local lock--just a "
- "warning\n");
+ CERROR("FL_CBPENDING set on non-local lock--just a warning\n");
LDLM_DEBUG(lock, "final decref done on cbpending lock");
@@ -859,6 +859,7 @@ void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode)
void ldlm_lock_decref(struct lustre_handle *lockh, __u32 mode)
{
struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0);
+
LASSERTF(lock != NULL, "Non-existing lock: %#llx\n", lockh->cookie);
ldlm_lock_decref_internal(lock, mode);
LDLM_LOCK_PUT(lock);
@@ -981,7 +982,6 @@ static void search_granted_lock(struct list_head *queue,
prev->res_link = queue->prev;
prev->mode_link = &req->l_sl_mode;
prev->policy_link = &req->l_sl_policy;
- return;
}
/**
@@ -1287,6 +1287,7 @@ ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags,
__u64 wait_flags = LDLM_FL_LVB_READY |
LDLM_FL_DESTROYED | LDLM_FL_FAIL_NOTIFIED;
struct l_wait_info lwi;
+
if (lock->l_completion_ast) {
int err = lock->l_completion_ast(lock,
LDLM_FL_WAIT_NOREPROC,
@@ -1340,9 +1341,10 @@ ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags,
} else if (!(flags & LDLM_FL_TEST_LOCK)) {/*less verbose for test-only*/
LDLM_DEBUG_NOLOCK("not matched ns %p type %u mode %u res %llu/%llu (%llu %llu)",
- ns, type, mode, res_id->name[0], res_id->name[1],
+ ns, type, mode, res_id->name[0],
+ res_id->name[1],
(type == LDLM_PLAIN || type == LDLM_IBITS) ?
- res_id->name[2] :policy->l_extent.start,
+ res_id->name[2] : policy->l_extent.start,
(type == LDLM_PLAIN || type == LDLM_IBITS) ?
res_id->name[3] : policy->l_extent.end);
}
@@ -1453,7 +1455,8 @@ int ldlm_fill_lvb(struct ldlm_lock *lock, struct req_capsule *pill,
memcpy(data, lvb, size);
} else {
- LDLM_ERROR(lock, "Replied unexpected lquota LVB size %d",
+ LDLM_ERROR(lock,
+ "Replied unexpected lquota LVB size %d",
size);
return -EINVAL;
}
@@ -1641,8 +1644,7 @@ ldlm_error_t ldlm_lock_enqueue(struct ldlm_namespace *ns,
ldlm_grant_lock(lock, NULL);
goto out;
} else {
- CERROR("This is client-side-only module, cannot handle "
- "LDLM_NAMESPACE_SERVER resource type lock.\n");
+ CERROR("This is client-side-only module, cannot handle LDLM_NAMESPACE_SERVER resource type lock.\n");
LBUG();
}
@@ -1820,24 +1822,24 @@ int ldlm_run_ast_work(struct ldlm_namespace *ns, struct list_head *rpc_list,
arg->list = rpc_list;
switch (ast_type) {
- case LDLM_WORK_BL_AST:
- arg->type = LDLM_BL_CALLBACK;
- work_ast_lock = ldlm_work_bl_ast_lock;
- break;
- case LDLM_WORK_CP_AST:
- arg->type = LDLM_CP_CALLBACK;
- work_ast_lock = ldlm_work_cp_ast_lock;
- break;
- case LDLM_WORK_REVOKE_AST:
- arg->type = LDLM_BL_CALLBACK;
- work_ast_lock = ldlm_work_revoke_ast_lock;
- break;
- case LDLM_WORK_GL_AST:
- arg->type = LDLM_GL_CALLBACK;
- work_ast_lock = ldlm_work_gl_ast_lock;
- break;
- default:
- LBUG();
+ case LDLM_WORK_BL_AST:
+ arg->type = LDLM_BL_CALLBACK;
+ work_ast_lock = ldlm_work_bl_ast_lock;
+ break;
+ case LDLM_WORK_CP_AST:
+ arg->type = LDLM_CP_CALLBACK;
+ work_ast_lock = ldlm_work_cp_ast_lock;
+ break;
+ case LDLM_WORK_REVOKE_AST:
+ arg->type = LDLM_BL_CALLBACK;
+ work_ast_lock = ldlm_work_revoke_ast_lock;
+ break;
+ case LDLM_WORK_GL_AST:
+ arg->type = LDLM_GL_CALLBACK;
+ work_ast_lock = ldlm_work_gl_ast_lock;
+ break;
+ default:
+ LBUG();
}
/* We create a ptlrpc request set with flow control extension.
@@ -1904,8 +1906,7 @@ void ldlm_reprocess_all(struct ldlm_resource *res)
LIST_HEAD(rpc_list);
if (!ns_is_client(ldlm_res_to_ns(res))) {
- CERROR("This is client-side-only module, cannot handle "
- "LDLM_NAMESPACE_SERVER resource type lock.\n");
+ CERROR("This is client-side-only module, cannot handle LDLM_NAMESPACE_SERVER resource type lock.\n");
LBUG();
}
}
@@ -2038,8 +2039,7 @@ int ldlm_cancel_locks_for_export_cb(struct cfs_hash *hs, struct cfs_hash_bd *bd,
ecl->ecl_loop++;
if ((ecl->ecl_loop & -ecl->ecl_loop) == ecl->ecl_loop) {
CDEBUG(D_INFO,
- "Cancel lock %p for export %p (loop %d), still have "
- "%d locks left on hash table.\n",
+ "Cancel lock %p for export %p (loop %d), still have %d locks left on hash table.\n",
lock, exp, ecl->ecl_loop,
atomic_read(&hs->hs_count));
}
@@ -2169,8 +2169,7 @@ struct ldlm_resource *ldlm_lock_convert(struct ldlm_lock *lock, int new_mode,
lock->l_completion_ast(lock, 0, NULL);
}
} else {
- CERROR("This is client-side-only module, cannot handle "
- "LDLM_NAMESPACE_SERVER resource type lock.\n");
+ CERROR("This is client-side-only module, cannot handle LDLM_NAMESPACE_SERVER resource type lock.\n");
LBUG();
}
unlock_res_and_lock(lock);
@@ -2224,23 +2223,21 @@ void _ldlm_lock_debug(struct ldlm_lock *lock,
nid = libcfs_nid2str(exp->exp_connection->c_peer.nid);
} else if (exp && exp->exp_obd != NULL) {
struct obd_import *imp = exp->exp_obd->u.cli.cl_import;
+
nid = libcfs_nid2str(imp->imp_connection->c_peer.nid);
}
if (resource == NULL) {
libcfs_debug_vmsg2(msgdata, fmt, args,
- " ns: \?\? lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s "
- "res: \?\? rrc=\?\? type: \?\?\? flags: %#llx nid: %s "
- "remote: %#llx expref: %d pid: %u timeout: %lu "
- "lvb_type: %d\n",
- lock,
- lock->l_handle.h_cookie, atomic_read(&lock->l_refc),
- lock->l_readers, lock->l_writers,
- ldlm_lockname[lock->l_granted_mode],
- ldlm_lockname[lock->l_req_mode],
- lock->l_flags, nid, lock->l_remote_handle.cookie,
- exp ? atomic_read(&exp->exp_refcount) : -99,
- lock->l_pid, lock->l_callback_timeout, lock->l_lvb_type);
+ " ns: \?\? lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s res: \?\? rrc=\?\? type: \?\?\? flags: %#llx nid: %s remote: %#llx expref: %d pid: %u timeout: %lu lvb_type: %d\n",
+ lock,
+ lock->l_handle.h_cookie, atomic_read(&lock->l_refc),
+ lock->l_readers, lock->l_writers,
+ ldlm_lockname[lock->l_granted_mode],
+ ldlm_lockname[lock->l_req_mode],
+ lock->l_flags, nid, lock->l_remote_handle.cookie,
+ exp ? atomic_read(&exp->exp_refcount) : -99,
+ lock->l_pid, lock->l_callback_timeout, lock->l_lvb_type);
va_end(args);
return;
}
@@ -2248,90 +2245,78 @@ void _ldlm_lock_debug(struct ldlm_lock *lock,
switch (resource->lr_type) {
case LDLM_EXTENT:
libcfs_debug_vmsg2(msgdata, fmt, args,
- " ns: %s lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s "
- "res: "DLDLMRES" rrc: %d type: %s [%llu->%llu] "
- "(req %llu->%llu) flags: %#llx nid: %s remote: "
- "%#llx expref: %d pid: %u timeout: %lu lvb_type: %d\n",
- ldlm_lock_to_ns_name(lock), lock,
- lock->l_handle.h_cookie, atomic_read(&lock->l_refc),
- lock->l_readers, lock->l_writers,
- ldlm_lockname[lock->l_granted_mode],
- ldlm_lockname[lock->l_req_mode],
- PLDLMRES(resource),
- atomic_read(&resource->lr_refcount),
- ldlm_typename[resource->lr_type],
- lock->l_policy_data.l_extent.start,
- lock->l_policy_data.l_extent.end,
- lock->l_req_extent.start, lock->l_req_extent.end,
- lock->l_flags, nid, lock->l_remote_handle.cookie,
- exp ? atomic_read(&exp->exp_refcount) : -99,
- lock->l_pid, lock->l_callback_timeout,
- lock->l_lvb_type);
+ " ns: %s lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s res: " DLDLMRES " rrc: %d type: %s [%llu->%llu] (req %llu->%llu) flags: %#llx nid: %s remote: %#llx expref: %d pid: %u timeout: %lu lvb_type: %d\n",
+ ldlm_lock_to_ns_name(lock), lock,
+ lock->l_handle.h_cookie, atomic_read(&lock->l_refc),
+ lock->l_readers, lock->l_writers,
+ ldlm_lockname[lock->l_granted_mode],
+ ldlm_lockname[lock->l_req_mode],
+ PLDLMRES(resource),
+ atomic_read(&resource->lr_refcount),
+ ldlm_typename[resource->lr_type],
+ lock->l_policy_data.l_extent.start,
+ lock->l_policy_data.l_extent.end,
+ lock->l_req_extent.start, lock->l_req_extent.end,
+ lock->l_flags, nid, lock->l_remote_handle.cookie,
+ exp ? atomic_read(&exp->exp_refcount) : -99,
+ lock->l_pid, lock->l_callback_timeout,
+ lock->l_lvb_type);
break;
case LDLM_FLOCK:
libcfs_debug_vmsg2(msgdata, fmt, args,
- " ns: %s lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s "
- "res: "DLDLMRES" rrc: %d type: %s pid: %d "
- "[%llu->%llu] flags: %#llx nid: %s "
- "remote: %#llx expref: %d pid: %u timeout: %lu\n",
- ldlm_lock_to_ns_name(lock), lock,
- lock->l_handle.h_cookie, atomic_read(&lock->l_refc),
- lock->l_readers, lock->l_writers,
- ldlm_lockname[lock->l_granted_mode],
- ldlm_lockname[lock->l_req_mode],
- PLDLMRES(resource),
- atomic_read(&resource->lr_refcount),
- ldlm_typename[resource->lr_type],
- lock->l_policy_data.l_flock.pid,
- lock->l_policy_data.l_flock.start,
- lock->l_policy_data.l_flock.end,
- lock->l_flags, nid, lock->l_remote_handle.cookie,
- exp ? atomic_read(&exp->exp_refcount) : -99,
- lock->l_pid, lock->l_callback_timeout);
+ " ns: %s lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s res: " DLDLMRES " rrc: %d type: %s pid: %d [%llu->%llu] flags: %#llx nid: %s remote: %#llx expref: %d pid: %u timeout: %lu\n",
+ ldlm_lock_to_ns_name(lock), lock,
+ lock->l_handle.h_cookie, atomic_read(&lock->l_refc),
+ lock->l_readers, lock->l_writers,
+ ldlm_lockname[lock->l_granted_mode],
+ ldlm_lockname[lock->l_req_mode],
+ PLDLMRES(resource),
+ atomic_read(&resource->lr_refcount),
+ ldlm_typename[resource->lr_type],
+ lock->l_policy_data.l_flock.pid,
+ lock->l_policy_data.l_flock.start,
+ lock->l_policy_data.l_flock.end,
+ lock->l_flags, nid, lock->l_remote_handle.cookie,
+ exp ? atomic_read(&exp->exp_refcount) : -99,
+ lock->l_pid, lock->l_callback_timeout);
break;
case LDLM_IBITS:
libcfs_debug_vmsg2(msgdata, fmt, args,
- " ns: %s lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s "
- "res: "DLDLMRES" bits %#llx rrc: %d type: %s "
- "flags: %#llx nid: %s remote: %#llx expref: %d "
- "pid: %u timeout: %lu lvb_type: %d\n",
- ldlm_lock_to_ns_name(lock),
- lock, lock->l_handle.h_cookie,
- atomic_read(&lock->l_refc),
- lock->l_readers, lock->l_writers,
- ldlm_lockname[lock->l_granted_mode],
- ldlm_lockname[lock->l_req_mode],
- PLDLMRES(resource),
- lock->l_policy_data.l_inodebits.bits,
- atomic_read(&resource->lr_refcount),
- ldlm_typename[resource->lr_type],
- lock->l_flags, nid, lock->l_remote_handle.cookie,
- exp ? atomic_read(&exp->exp_refcount) : -99,
- lock->l_pid, lock->l_callback_timeout,
- lock->l_lvb_type);
+ " ns: %s lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s res: " DLDLMRES " bits %#llx rrc: %d type: %s flags: %#llx nid: %s remote: %#llx expref: %d pid: %u timeout: %lu lvb_type: %d\n",
+ ldlm_lock_to_ns_name(lock),
+ lock, lock->l_handle.h_cookie,
+ atomic_read(&lock->l_refc),
+ lock->l_readers, lock->l_writers,
+ ldlm_lockname[lock->l_granted_mode],
+ ldlm_lockname[lock->l_req_mode],
+ PLDLMRES(resource),
+ lock->l_policy_data.l_inodebits.bits,
+ atomic_read(&resource->lr_refcount),
+ ldlm_typename[resource->lr_type],
+ lock->l_flags, nid, lock->l_remote_handle.cookie,
+ exp ? atomic_read(&exp->exp_refcount) : -99,
+ lock->l_pid, lock->l_callback_timeout,
+ lock->l_lvb_type);
break;
default:
libcfs_debug_vmsg2(msgdata, fmt, args,
- " ns: %s lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s "
- "res: "DLDLMRES" rrc: %d type: %s flags: %#llx "
- "nid: %s remote: %#llx expref: %d pid: %u "
- "timeout: %lu lvb_type: %d\n",
- ldlm_lock_to_ns_name(lock),
- lock, lock->l_handle.h_cookie,
- atomic_read(&lock->l_refc),
- lock->l_readers, lock->l_writers,
- ldlm_lockname[lock->l_granted_mode],
- ldlm_lockname[lock->l_req_mode],
- PLDLMRES(resource),
- atomic_read(&resource->lr_refcount),
- ldlm_typename[resource->lr_type],
- lock->l_flags, nid, lock->l_remote_handle.cookie,
- exp ? atomic_read(&exp->exp_refcount) : -99,
- lock->l_pid, lock->l_callback_timeout,
- lock->l_lvb_type);
+ " ns: %s lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s res: " DLDLMRES " rrc: %d type: %s flags: %#llx nid: %s remote: %#llx expref: %d pid: %u timeout: %lu lvb_type: %d\n",
+ ldlm_lock_to_ns_name(lock),
+ lock, lock->l_handle.h_cookie,
+ atomic_read(&lock->l_refc),
+ lock->l_readers, lock->l_writers,
+ ldlm_lockname[lock->l_granted_mode],
+ ldlm_lockname[lock->l_req_mode],
+ PLDLMRES(resource),
+ atomic_read(&resource->lr_refcount),
+ ldlm_typename[resource->lr_type],
+ lock->l_flags, nid, lock->l_remote_handle.cookie,
+ exp ? atomic_read(&exp->exp_refcount) : -99,
+ lock->l_pid, lock->l_callback_timeout,
+ lock->l_lvb_type);
break;
}
va_end(args);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
index 91cf7ebae11..98fbd3f7e75 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
@@ -158,13 +158,15 @@ void ldlm_handle_bl_callback(struct ldlm_namespace *ns,
unlock_res_and_lock(lock);
if (do_ast) {
- CDEBUG(D_DLMTRACE, "Lock %p already unused, calling callback (%p)\n",
- lock, lock->l_blocking_ast);
+ CDEBUG(D_DLMTRACE,
+ "Lock %p already unused, calling callback (%p)\n", lock,
+ lock->l_blocking_ast);
if (lock->l_blocking_ast != NULL)
lock->l_blocking_ast(lock, ld, lock->l_ast_data,
LDLM_CB_BLOCKING);
} else {
- CDEBUG(D_DLMTRACE, "Lock %p is referenced, will be cancelled later\n",
+ CDEBUG(D_DLMTRACE,
+ "Lock %p is referenced, will be cancelled later\n",
lock);
}
@@ -190,6 +192,7 @@ static void ldlm_handle_cp_callback(struct ptlrpc_request *req,
if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_BL_CB_RACE)) {
int to = cfs_time_seconds(1);
+
while (to > 0) {
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(to);
@@ -210,9 +213,7 @@ static void ldlm_handle_cp_callback(struct ptlrpc_request *req,
LASSERT(lock->l_lvb_data != NULL);
if (unlikely(lock->l_lvb_len < lvb_len)) {
- LDLM_ERROR(lock, "Replied LVB is larger than "
- "expectation, expected = %d, "
- "replied = %d",
+ LDLM_ERROR(lock, "Replied LVB is larger than expectation, expected = %d, replied = %d",
lock->l_lvb_len, lvb_len);
rc = -EINVAL;
goto out;
@@ -639,8 +640,8 @@ static int ldlm_callback_handler(struct ptlrpc_request *req)
lock = ldlm_handle2lock_long(&dlm_req->lock_handle[0], 0);
if (!lock) {
- CDEBUG(D_DLMTRACE, "callback on lock %#llx - lock "
- "disappeared\n", dlm_req->lock_handle[0].cookie);
+ CDEBUG(D_DLMTRACE, "callback on lock %#llx - lock disappeared\n",
+ dlm_req->lock_handle[0].cookie);
rc = ldlm_callback_reply(req, -EINVAL);
ldlm_callback_errmsg(req, "Operate with invalid parameter", rc,
&dlm_req->lock_handle[0]);
@@ -663,8 +664,7 @@ static int ldlm_callback_handler(struct ptlrpc_request *req)
if (((lock->l_flags & LDLM_FL_CANCELING) &&
(lock->l_flags & LDLM_FL_BL_DONE)) ||
(lock->l_flags & LDLM_FL_FAILED)) {
- LDLM_DEBUG(lock, "callback on lock "
- "%#llx - lock disappeared\n",
+ LDLM_DEBUG(lock, "callback on lock %#llx - lock disappeared\n",
dlm_req->lock_handle[0].cookie);
unlock_res_and_lock(lock);
LDLM_LOCK_RELEASE(lock);
@@ -724,7 +724,7 @@ static int ldlm_callback_handler(struct ptlrpc_request *req)
static struct ldlm_bl_work_item *ldlm_bl_get_work(struct ldlm_bl_pool *blp)
{
struct ldlm_bl_work_item *blwi = NULL;
- static unsigned int num_bl = 0;
+ static unsigned int num_bl;
spin_lock(&blp->blp_lock);
/* process a request from the blp_list at least every blp_num_threads */
@@ -887,6 +887,7 @@ void ldlm_put_ref(void)
mutex_lock(&ldlm_ref_mutex);
if (ldlm_refcount == 1) {
int rc = ldlm_cleanup();
+
if (rc)
CERROR("ldlm_cleanup failed: %d\n", rc);
else
@@ -969,6 +970,7 @@ static cfs_hash_ops_t ldlm_export_lock_ops = {
int ldlm_init_export(struct obd_export *exp)
{
int rc;
+
exp->exp_lock_hash =
cfs_hash_create(obd_uuid2str(&exp->exp_client_uuid),
HASH_EXP_LOCK_CUR_BITS,
@@ -1049,7 +1051,7 @@ static int ldlm_setup(void)
.so_req_handler = ldlm_callback_handler,
},
};
- ldlm_state->ldlm_cb_service = \
+ ldlm_state->ldlm_cb_service =
ptlrpc_register_service(&conf, ldlm_svc_proc_dir);
if (IS_ERR(ldlm_state->ldlm_cb_service)) {
CERROR("failed to start service\n");
@@ -1077,7 +1079,7 @@ static int ldlm_setup(void)
blp->blp_min_threads = LDLM_NTHRS_INIT;
blp->blp_max_threads = LDLM_NTHRS_MAX;
} else {
- blp->blp_min_threads = blp->blp_max_threads = \
+ blp->blp_min_threads = blp->blp_max_threads =
min_t(int, LDLM_NTHRS_MAX, max_t(int, LDLM_NTHRS_INIT,
ldlm_num_threads));
}
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c b/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
index 6054eee848d..4c838f615a6 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
@@ -54,10 +54,10 @@
* calculated as the number of locks in LRU * lock live time in seconds. If
* CLV > SLV - lock is canceled.
*
- * Client has LVF, that is, lock volume factor which regulates how much sensitive
- * client should be about last SLV from server. The higher LVF is the more locks
- * will be canceled on client. Default value for it is 1. Setting LVF to 2 means
- * that client will cancel locks 2 times faster.
+ * Client has LVF, that is, lock volume factor which regulates how much
+ * sensitive client should be about last SLV from server. The higher LVF is the
+ * more locks will be canceled on client. Default value for it is 1. Setting LVF
+ * to 2 means that client will cancel locks 2 times faster.
*
* Locks on a client will be canceled more intensively in these cases:
* (1) if SLV is smaller, that is, load is higher on the server;
@@ -70,11 +70,12 @@
* if flow is getting thinner, more and more particles become outside of it and
* as particles are locks, they should be canceled.
*
- * General idea of this belongs to Vitaly Fertman (vitaly@clusterfs.com). Andreas
- * Dilger (adilger@clusterfs.com) proposed few nice ideas like using LVF and many
- * cleanups. Flow definition to allow more easy understanding of the logic belongs
- * to Nikita Danilov (nikita@clusterfs.com) as well as many cleanups and fixes.
- * And design and implementation are done by Yury Umanets (umka@clusterfs.com).
+ * General idea of this belongs to Vitaly Fertman (vitaly@clusterfs.com).
+ * Andreas Dilger (adilger@clusterfs.com) proposed few nice ideas like using
+ * LVF and many cleanups. Flow definition to allow more easy understanding of
+ * the logic belongs to Nikita Danilov (nikita@clusterfs.com) as well as many
+ * cleanups and fixes. And design and implementation are done by Yury Umanets
+ * (umka@clusterfs.com).
*
* Glossary for terms used:
*
@@ -265,16 +266,15 @@ static void ldlm_pool_recalc_slv(struct ldlm_pool *pl)
* SLV. And the opposite, the more grant plan is over-consumed
* (load time) the faster drops SLV.
*/
- slv_factor = (grant_usage << LDLM_POOL_SLV_SHIFT);
+ slv_factor = grant_usage << LDLM_POOL_SLV_SHIFT;
do_div(slv_factor, limit);
slv = slv * slv_factor;
slv = dru(slv, LDLM_POOL_SLV_SHIFT, round_up);
- if (slv > ldlm_pool_slv_max(limit)) {
+ if (slv > ldlm_pool_slv_max(limit))
slv = ldlm_pool_slv_max(limit);
- } else if (slv < ldlm_pool_slv_min(limit)) {
+ else if (slv < ldlm_pool_slv_min(limit))
slv = ldlm_pool_slv_min(limit);
- }
pl->pl_server_lock_volume = slv;
}
@@ -614,8 +614,8 @@ int ldlm_pool_shrink(struct ldlm_pool *pl, int nr,
lprocfs_counter_add(pl->pl_stats,
LDLM_POOL_SHRINK_FREED_STAT,
cancel);
- CDEBUG(D_DLMTRACE, "%s: request to shrink %d locks, "
- "shrunk %d\n", pl->pl_name, nr, cancel);
+ CDEBUG(D_DLMTRACE, "%s: request to shrink %d locks, shrunk %d\n",
+ pl->pl_name, nr, cancel);
}
}
return cancel;
@@ -636,7 +636,7 @@ int ldlm_pool_setup(struct ldlm_pool *pl, int limit)
}
EXPORT_SYMBOL(ldlm_pool_setup);
-#if defined (CONFIG_PROC_FS)
+#if defined(CONFIG_PROC_FS)
static int lprocfs_pool_state_seq_show(struct seq_file *m, void *unused)
{
int granted, grant_rate, cancel_rate, grant_step;
@@ -696,8 +696,9 @@ LPROC_SEQ_FOPS_RO(lprocfs_grant_plan);
LDLM_POOL_PROC_READER_SEQ_SHOW(recalc_period, int);
LDLM_POOL_PROC_WRITER(recalc_period, int);
-static ssize_t lprocfs_recalc_period_seq_write(struct file *file, const char *buf,
- size_t len, loff_t *off)
+static ssize_t lprocfs_recalc_period_seq_write(struct file *file,
+ const char *buf, size_t len,
+ loff_t *off)
{
struct seq_file *seq = file->private_data;
@@ -943,6 +944,7 @@ EXPORT_SYMBOL(ldlm_pool_del);
__u64 ldlm_pool_get_slv(struct ldlm_pool *pl)
{
__u64 slv;
+
spin_lock(&pl->pl_lock);
slv = pl->pl_server_lock_volume;
spin_unlock(&pl->pl_lock);
@@ -971,6 +973,7 @@ EXPORT_SYMBOL(ldlm_pool_set_slv);
__u64 ldlm_pool_get_clv(struct ldlm_pool *pl)
{
__u64 slv;
+
spin_lock(&pl->pl_lock);
slv = pl->pl_client_lock_volume;
spin_unlock(&pl->pl_lock);
@@ -1132,23 +1135,27 @@ static unsigned long ldlm_pools_scan(ldlm_side_t client, int nr, gfp_t gfp_mask)
return (client == LDLM_NAMESPACE_SERVER) ? SHRINK_STOP : freed;
}
-static unsigned long ldlm_pools_srv_count(struct shrinker *s, struct shrink_control *sc)
+static unsigned long ldlm_pools_srv_count(struct shrinker *s,
+ struct shrink_control *sc)
{
return ldlm_pools_count(LDLM_NAMESPACE_SERVER, sc->gfp_mask);
}
-static unsigned long ldlm_pools_srv_scan(struct shrinker *s, struct shrink_control *sc)
+static unsigned long ldlm_pools_srv_scan(struct shrinker *s,
+ struct shrink_control *sc)
{
return ldlm_pools_scan(LDLM_NAMESPACE_SERVER, sc->nr_to_scan,
sc->gfp_mask);
}
-static unsigned long ldlm_pools_cli_count(struct shrinker *s, struct shrink_control *sc)
+static unsigned long ldlm_pools_cli_count(struct shrinker *s,
+ struct shrink_control *sc)
{
return ldlm_pools_count(LDLM_NAMESPACE_CLIENT, sc->gfp_mask);
}
-static unsigned long ldlm_pools_cli_scan(struct shrinker *s, struct shrink_control *sc)
+static unsigned long ldlm_pools_cli_scan(struct shrinker *s,
+ struct shrink_control *sc)
{
return ldlm_pools_scan(LDLM_NAMESPACE_CLIENT, sc->nr_to_scan,
sc->gfp_mask);
@@ -1194,10 +1201,8 @@ int ldlm_pools_recalc(ldlm_side_t client)
* of limit.
*/
if (nr_l >= 2 * (LDLM_POOL_HOST_L / 3)) {
- CWARN("\"Modest\" pools eat out 2/3 of server locks "
- "limit (%d of %lu). This means that you have too "
- "many clients for this amount of server RAM. "
- "Upgrade server!\n", nr_l, LDLM_POOL_HOST_L);
+ CWARN("\"Modest\" pools eat out 2/3 of server locks limit (%d of %lu). This means that you have too many clients for this amount of server RAM. Upgrade server!\n",
+ nr_l, LDLM_POOL_HOST_L);
equal = 1;
}
@@ -1205,8 +1210,7 @@ int ldlm_pools_recalc(ldlm_side_t client)
* The rest is given to greedy namespaces.
*/
list_for_each_entry(ns, ldlm_namespace_list(client),
- ns_list_chain)
- {
+ ns_list_chain) {
if (!equal && ns->ns_appetite != LDLM_NAMESPACE_GREEDY)
continue;
@@ -1383,9 +1387,8 @@ static int ldlm_pools_thread_start(void)
static void ldlm_pools_thread_stop(void)
{
- if (ldlm_pools_thread == NULL) {
+ if (ldlm_pools_thread == NULL)
return;
- }
thread_set_flags(ldlm_pools_thread, SVC_STOPPING);
wake_up(&ldlm_pools_thread->t_ctl_waitq);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
index 9ce437b1879..287da325d92 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
@@ -95,16 +95,14 @@ int ldlm_expired_completion_wait(void *data)
struct obd_device *obd;
if (lock->l_conn_export == NULL) {
- static unsigned long next_dump = 0, last_dump = 0;
+ static unsigned long next_dump, last_dump;
LCONSOLE_WARN("lock timed out (enqueued at "CFS_TIME_T", "
CFS_DURATION_T"s ago)\n",
lock->l_last_activity,
cfs_time_sub(get_seconds(),
lock->l_last_activity));
- LDLM_DEBUG(lock, "lock timed out (enqueued at "CFS_TIME_T", "
- CFS_DURATION_T"s ago); not entering recovery in "
- "server code, just going back to sleep",
+ LDLM_DEBUG(lock, "lock timed out (enqueued at " CFS_TIME_T ", " CFS_DURATION_T "s ago); not entering recovery in server code, just going back to sleep",
lock->l_last_activity,
cfs_time_sub(get_seconds(),
lock->l_last_activity));
@@ -137,6 +135,7 @@ EXPORT_SYMBOL(ldlm_expired_completion_wait);
int ldlm_get_enq_timeout(struct ldlm_lock *lock)
{
int timeout = at_get(ldlm_lock_to_ns_at(lock));
+
if (AT_OFF)
return obd_timeout / 2;
/* Since these are non-updating timeouts, we should be conservative.
@@ -191,8 +190,7 @@ int ldlm_completion_ast_async(struct ldlm_lock *lock, __u64 flags, void *data)
return ldlm_completion_tail(lock);
}
- LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, "
- "going forward");
+ LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, going forward");
ldlm_reprocess_all(lock->l_resource);
return 0;
}
@@ -240,17 +238,15 @@ int ldlm_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data)
return 0;
}
- LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, "
- "sleeping");
+ LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, sleeping");
noreproc:
obd = class_exp2obd(lock->l_conn_export);
/* if this is a local lock, then there is no import */
- if (obd != NULL) {
+ if (obd != NULL)
imp = obd->u.cli.cl_import;
- }
/* Wait a long time for enqueue - server may have to callback a
lock from another client. Server will evict the other client if it
@@ -324,8 +320,7 @@ int ldlm_blocking_ast_nocheck(struct ldlm_lock *lock)
if (rc < 0)
CERROR("ldlm_cli_cancel: %d\n", rc);
} else {
- LDLM_DEBUG(lock, "Lock still has references, will be "
- "cancelled later");
+ LDLM_DEBUG(lock, "Lock still has references, will be cancelled later");
}
return 0;
}
@@ -483,8 +478,7 @@ static void failed_lock_cleanup(struct ldlm_namespace *ns,
if (need_cancel)
LDLM_DEBUG(lock,
- "setting FL_LOCAL_ONLY | LDLM_FL_FAILED | "
- "LDLM_FL_ATOMIC_CB | LDLM_FL_CBPENDING");
+ "setting FL_LOCAL_ONLY | LDLM_FL_FAILED | LDLM_FL_ATOMIC_CB | LDLM_FL_CBPENDING");
else
LDLM_DEBUG(lock, "lock was granted or failed in race");
@@ -557,8 +551,7 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
rc = size;
goto cleanup;
} else if (unlikely(size > lvb_len)) {
- LDLM_ERROR(lock, "Replied LVB is larger than "
- "expectation, expected = %d, replied = %d",
+ LDLM_ERROR(lock, "Replied LVB is larger than expectation, expected = %d, replied = %d",
lvb_len, size);
rc = -EINVAL;
goto cleanup;
@@ -608,6 +601,7 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
* again. */
if ((*flags) & LDLM_FL_LOCK_CHANGED) {
int newmode = reply->lock_desc.l_req_mode;
+
LASSERT(!is_replay);
if (newmode && newmode != lock->l_req_mode) {
LDLM_DEBUG(lock, "server returned different mode %s",
@@ -676,6 +670,7 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
rc = ldlm_lock_enqueue(ns, &lock, NULL, flags);
if (lock->l_completion_ast != NULL) {
int err = lock->l_completion_ast(lock, *flags, NULL);
+
if (!rc)
rc = err;
if (rc)
@@ -725,6 +720,7 @@ static inline int ldlm_capsule_handles_avail(struct req_capsule *pill,
int off)
{
int size = req_capsule_msg_size(pill, loc);
+
return ldlm_req_handles_avail(size, off);
}
@@ -733,6 +729,7 @@ static inline int ldlm_format_handles_avail(struct obd_import *imp,
enum req_location loc, int off)
{
int size = req_capsule_fmt_size(imp->imp_msg_magic, fmt, loc);
+
return ldlm_req_handles_avail(size, off);
}
@@ -1107,8 +1104,7 @@ static __u64 ldlm_cli_cancel_local(struct ldlm_lock *lock)
unlock_res_and_lock(lock);
if (local_only) {
- CDEBUG(D_DLMTRACE, "not sending request (at caller's "
- "instruction)\n");
+ CDEBUG(D_DLMTRACE, "not sending request (at caller's instruction)\n");
rc = LDLM_FL_LOCAL_ONLY;
}
ldlm_lock_cancel(lock);
@@ -1223,8 +1219,7 @@ int ldlm_cli_cancel_req(struct obd_export *exp, struct list_head *cancels,
rc = ptlrpc_queue_wait(req);
}
if (rc == LUSTRE_ESTALE) {
- CDEBUG(D_DLMTRACE, "client/server (nid %s) "
- "out of sync -- not fatal\n",
+ CDEBUG(D_DLMTRACE, "client/server (nid %s) out of sync -- not fatal\n",
libcfs_nid2str(req->rq_import->
imp_connection->c_peer.nid));
rc = 0;
@@ -1235,8 +1230,8 @@ int ldlm_cli_cancel_req(struct obd_export *exp, struct list_head *cancels,
} else if (rc != ELDLM_OK) {
/* -ESHUTDOWN is common on umount */
CDEBUG_LIMIT(rc == -ESHUTDOWN ? D_DLMTRACE : D_ERROR,
- "Got rc %d from cancel RPC: "
- "canceling anyway\n", rc);
+ "Got rc %d from cancel RPC: canceling anyway\n",
+ rc);
break;
}
sent = count;
@@ -1279,7 +1274,8 @@ int ldlm_cli_update_pool(struct ptlrpc_request *req)
* server-side namespace is not possible. */
if (lustre_msg_get_slv(req->rq_repmsg) == 0 ||
lustre_msg_get_limit(req->rq_repmsg) == 0) {
- DEBUG_REQ(D_HA, req, "Zero SLV or Limit found (SLV: %llu, Limit: %u)",
+ DEBUG_REQ(D_HA, req,
+ "Zero SLV or Limit found (SLV: %llu, Limit: %u)",
lustre_msg_get_slv(req->rq_repmsg),
lustre_msg_get_limit(req->rq_repmsg));
return 0;
@@ -1416,19 +1412,20 @@ static ldlm_policy_res_t ldlm_cancel_no_wait_policy(struct ldlm_namespace *ns,
{
ldlm_policy_res_t result = LDLM_POLICY_CANCEL_LOCK;
ldlm_cancel_for_recovery cb = ns->ns_cancel_for_recovery;
+
lock_res_and_lock(lock);
/* don't check added & count since we want to process all locks
* from unused list */
switch (lock->l_resource->lr_type) {
- case LDLM_EXTENT:
- case LDLM_IBITS:
- if (cb && cb(lock))
- break;
- default:
- result = LDLM_POLICY_SKIP_LOCK;
- lock->l_flags |= LDLM_FL_SKIPPED;
+ case LDLM_EXTENT:
+ case LDLM_IBITS:
+ if (cb && cb(lock))
break;
+ default:
+ result = LDLM_POLICY_SKIP_LOCK;
+ lock->l_flags |= LDLM_FL_SKIPPED;
+ break;
}
unlock_res_and_lock(lock);
@@ -1594,8 +1591,9 @@ ldlm_cancel_lru_policy(struct ldlm_namespace *ns, int flags)
* sending any RPCs or waiting for any
* outstanding RPC to complete.
*/
-static int ldlm_prepare_lru_list(struct ldlm_namespace *ns, struct list_head *cancels,
- int count, int max, int flags)
+static int ldlm_prepare_lru_list(struct ldlm_namespace *ns,
+ struct list_head *cancels, int count, int max,
+ int flags)
{
ldlm_cancel_lru_policy_t pf;
struct ldlm_lock *lock, *next;
@@ -1730,6 +1728,7 @@ int ldlm_cancel_lru_local(struct ldlm_namespace *ns, struct list_head *cancels,
int flags)
{
int added;
+
added = ldlm_prepare_lru_list(ns, cancels, count, max, flags);
if (added <= 0)
return added;
@@ -1918,7 +1917,8 @@ struct ldlm_cli_cancel_arg {
void *lc_opaque;
};
-static int ldlm_cli_hash_cancel_unused(struct cfs_hash *hs, struct cfs_hash_bd *bd,
+static int ldlm_cli_hash_cancel_unused(struct cfs_hash *hs,
+ struct cfs_hash_bd *bd,
struct hlist_node *hnode, void *arg)
{
struct ldlm_resource *res = cfs_hash_object(hs, hnode);
@@ -2014,6 +2014,7 @@ struct iter_helper_data {
static int ldlm_iter_helper(struct ldlm_lock *lock, void *closure)
{
struct iter_helper_data *helper = closure;
+
return helper->iter(lock, helper->closure);
}
@@ -2080,7 +2081,8 @@ static int ldlm_chain_lock_for_replay(struct ldlm_lock *lock, void *closure)
/* we use l_pending_chain here, because it's unused on clients. */
LASSERTF(list_empty(&lock->l_pending_chain),
"lock %p next %p prev %p\n",
- lock, &lock->l_pending_chain.next,&lock->l_pending_chain.prev);
+ lock, &lock->l_pending_chain.next,
+ &lock->l_pending_chain.prev);
/* bug 9573: don't replay locks left after eviction, or
* bug 17614: locks being actively cancelled. Get a reference
* on a lock so that it does not disappear under us (e.g. due to cancel)
@@ -2114,8 +2116,7 @@ static int replay_lock_interpret(const struct lu_env *env,
lock = ldlm_handle2lock(&aa->lock_handle);
if (!lock) {
- CERROR("received replay ack for unknown local cookie %#llx"
- " remote cookie %#llx from server %s id %s\n",
+ CERROR("received replay ack for unknown local cookie %#llx remote cookie %#llx from server %s id %s\n",
aa->lock_handle.cookie, reply->lock_handle.cookie,
req->rq_export->exp_client_uuid.uuid,
libcfs_id2str(req->rq_peer));
@@ -2243,9 +2244,8 @@ static void ldlm_cancel_unused_locks_for_replay(struct ldlm_namespace *ns)
int canceled;
LIST_HEAD(cancels);
- CDEBUG(D_DLMTRACE, "Dropping as many unused locks as possible before"
- "replay for namespace %s (%d)\n",
- ldlm_ns_name(ns), ns->ns_nr_unused);
+ CDEBUG(D_DLMTRACE, "Dropping as many unused locks as possible before replay for namespace %s (%d)\n",
+ ldlm_ns_name(ns), ns->ns_nr_unused);
/* We don't need to care whether or not LRU resize is enabled
* because the LDLM_CANCEL_NO_WAIT policy doesn't use the
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c b/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
index a785b7a7d1b..1f150e46f50 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
@@ -71,7 +71,7 @@ extern unsigned int ldlm_cancel_unused_locks_before_replay;
* DDOS. */
unsigned int ldlm_dump_granted_max = 256;
-#if defined (CONFIG_PROC_FS)
+#if defined(CONFIG_PROC_FS)
static ssize_t lprocfs_wr_dump_ns(struct file *file, const char *buffer,
size_t count, loff_t *off)
{
@@ -93,7 +93,7 @@ int ldlm_proc_setup(void)
&ldlm_dump_granted_max },
{ "cancel_unused_locks_before_replay", &ldlm_rw_uint_fops,
&ldlm_cancel_unused_locks_before_replay },
- { NULL }};
+ { NULL } };
LASSERT(ldlm_ns_proc_dir == NULL);
ldlm_type_proc_dir = lprocfs_register(OBD_LDLM_DEVICENAME,
@@ -215,8 +215,8 @@ static ssize_t lprocfs_lru_size_seq_write(struct file *file,
LDLM_CANCEL_PASSED);
if (canceled < unused) {
CDEBUG(D_DLMTRACE,
- "not all requested locks are canceled, "
- "requested: %d, canceled: %d\n", unused,
+ "not all requested locks are canceled, requested: %d, canceled: %d\n",
+ unused,
canceled);
return -EINVAL;
}
@@ -385,8 +385,8 @@ int ldlm_namespace_proc_register(struct ldlm_namespace *ns)
#undef MAX_STRING_SIZE
#else /* CONFIG_PROC_FS */
-#define ldlm_namespace_proc_unregister(ns) ({;})
-#define ldlm_namespace_proc_register(ns) ({0;})
+#define ldlm_namespace_proc_unregister(ns) ({; })
+#define ldlm_namespace_proc_register(ns) ({0; })
#endif /* CONFIG_PROC_FS */
@@ -454,7 +454,8 @@ static void *ldlm_res_hop_object(struct hlist_node *hnode)
return hlist_entry(hnode, struct ldlm_resource, lr_hash);
}
-static void ldlm_res_hop_get_locked(struct cfs_hash *hs, struct hlist_node *hnode)
+static void ldlm_res_hop_get_locked(struct cfs_hash *hs,
+ struct hlist_node *hnode)
{
struct ldlm_resource *res;
@@ -462,7 +463,8 @@ static void ldlm_res_hop_get_locked(struct cfs_hash *hs, struct hlist_node *hnod
ldlm_resource_getref(res);
}
-static void ldlm_res_hop_put_locked(struct cfs_hash *hs, struct hlist_node *hnode)
+static void ldlm_res_hop_put_locked(struct cfs_hash *hs,
+ struct hlist_node *hnode)
{
struct ldlm_resource *res;
@@ -501,7 +503,7 @@ cfs_hash_ops_t ldlm_ns_fid_hash_ops = {
.hs_put = ldlm_res_hop_put
};
-typedef struct {
+struct ldlm_ns_hash_def {
ldlm_ns_type_t nsd_type;
/** hash bucket bits */
unsigned nsd_bkt_bits;
@@ -509,9 +511,9 @@ typedef struct {
unsigned nsd_all_bits;
/** hash operations */
cfs_hash_ops_t *nsd_hops;
-} ldlm_ns_hash_def_t;
+};
-ldlm_ns_hash_def_t ldlm_ns_hash_defs[] = {
+struct ldlm_ns_hash_def ldlm_ns_hash_defs[] = {
{
.nsd_type = LDLM_NS_TYPE_MDC,
.nsd_bkt_bits = 11,
@@ -563,7 +565,7 @@ struct ldlm_namespace *ldlm_namespace_new(struct obd_device *obd, char *name,
{
struct ldlm_namespace *ns = NULL;
struct ldlm_ns_bucket *nsb;
- ldlm_ns_hash_def_t *nsd;
+ struct ldlm_ns_hash_def *nsd;
struct cfs_hash_bd bd;
int idx;
int rc;
@@ -576,7 +578,7 @@ struct ldlm_namespace *ldlm_namespace_new(struct obd_device *obd, char *name,
return NULL;
}
- for (idx = 0;;idx++) {
+ for (idx = 0;; idx++) {
nsd = &ldlm_ns_hash_defs[idx];
if (nsd->nsd_type == LDLM_NS_TYPE_UNKNOWN) {
CERROR("Unknown type %d for ns %s\n", ns_type, name);
@@ -735,8 +737,7 @@ static void cleanup_resource(struct ldlm_resource *res, struct list_head *q,
} else {
ldlm_resource_unlink_lock(lock);
unlock_res(res);
- LDLM_DEBUG(lock, "Freeing a lock still held by a "
- "client node");
+ LDLM_DEBUG(lock, "Freeing a lock still held by a client node");
ldlm_lock_destroy(lock);
}
LDLM_LOCK_RELEASE(lock);
@@ -805,6 +806,7 @@ static int __ldlm_namespace_free(struct ldlm_namespace *ns, int force)
if (atomic_read(&ns->ns_bref) > 0) {
struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
int rc;
+
CDEBUG(D_DLMTRACE,
"dlm namespace %s free waiting on refcount %d\n",
ldlm_ns_name(ns), atomic_read(&ns->ns_bref));
@@ -818,16 +820,14 @@ force_wait:
/* Forced cleanups should be able to reclaim all references,
* so it's safe to wait forever... we can't leak locks... */
if (force && rc == -ETIMEDOUT) {
- LCONSOLE_ERROR("Forced cleanup waiting for %s "
- "namespace with %d resources in use, "
- "(rc=%d)\n", ldlm_ns_name(ns),
+ LCONSOLE_ERROR("Forced cleanup waiting for %s namespace with %d resources in use, (rc=%d)\n",
+ ldlm_ns_name(ns),
atomic_read(&ns->ns_bref), rc);
goto force_wait;
}
if (atomic_read(&ns->ns_bref)) {
- LCONSOLE_ERROR("Cleanup waiting for %s namespace "
- "with %d resources in use, (rc=%d)\n",
+ LCONSOLE_ERROR("Cleanup waiting for %s namespace with %d resources in use, (rc=%d)\n",
ldlm_ns_name(ns),
atomic_read(&ns->ns_bref), rc);
return ELDLM_NAMESPACE_EXISTS;
@@ -1335,6 +1335,7 @@ void ldlm_dump_all_namespaces(ldlm_side_t client, int level)
list_for_each(tmp, ldlm_namespace_list(client)) {
struct ldlm_namespace *ns;
+
ns = list_entry(tmp, struct ldlm_namespace, ns_list_chain);
ldlm_namespace_dump(level, ns);
}
@@ -1404,8 +1405,8 @@ void ldlm_resource_dump(int level, struct ldlm_resource *res)
LDLM_DEBUG_LIMIT(level, lock, "###");
if (!(level & D_CANTMASK) &&
++granted > ldlm_dump_granted_max) {
- CDEBUG(level, "only dump %d granted locks to "
- "avoid DDOS.\n", granted);
+ CDEBUG(level, "only dump %d granted locks to avoid DDOS.\n",
+ granted);
break;
}
}
diff --git a/drivers/staging/lustre/lustre/libcfs/debug.c b/drivers/staging/lustre/lustre/libcfs/debug.c
index ba43b3067fa..a7a7ac626aa 100644
--- a/drivers/staging/lustre/lustre/libcfs/debug.c
+++ b/drivers/staging/lustre/lustre/libcfs/debug.c
@@ -318,9 +318,7 @@ libcfs_debug_str2mask(int *mask, const char *str, int is_subsys)
if (t >= 1 && matched == n) {
/* don't print warning for lctl set_param debug=0 or -1 */
if (m != 0 && m != -1)
- CWARN("You are trying to use a numerical value for the "
- "mask - this will be deprecated in a future "
- "release.\n");
+ CWARN("You are trying to use a numerical value for the mask - this will be deprecated in a future release.\n");
*mask = m;
return 0;
}
@@ -375,8 +373,8 @@ void libcfs_debug_dumplog(void)
(void *)(long)current_pid(),
"libcfs_debug_dumper");
if (IS_ERR(dumper))
- pr_err("LustreError: cannot start log dump thread:"
- " %ld\n", PTR_ERR(dumper));
+ pr_err("LustreError: cannot start log dump thread: %ld\n",
+ PTR_ERR(dumper));
else
schedule();
@@ -412,7 +410,7 @@ int libcfs_debug_init(unsigned long bufsize)
max = TCD_MAX_PAGES;
} else {
max = (max / num_possible_cpus());
- max = (max << (20 - PAGE_CACHE_SHIFT));
+ max = max << (20 - PAGE_CACHE_SHIFT);
}
rc = cfs_tracefile_init(max);
@@ -460,11 +458,3 @@ void libcfs_debug_set_level(unsigned int debug_level)
}
EXPORT_SYMBOL(libcfs_debug_set_level);
-
-void libcfs_log_goto(struct libcfs_debug_msg_data *msgdata, const char *label,
- long_ptr_t rc)
-{
- libcfs_debug_msg(msgdata, "Process leaving via %s (rc=%lu : %ld : %#lx)\n",
- label, (ulong_ptr_t)rc, rc, rc);
-}
-EXPORT_SYMBOL(libcfs_log_goto);
diff --git a/drivers/staging/lustre/lustre/libcfs/fail.c b/drivers/staging/lustre/lustre/libcfs/fail.c
index e73ca3df973..92444b0fe2a 100644
--- a/drivers/staging/lustre/lustre/libcfs/fail.c
+++ b/drivers/staging/lustre/lustre/libcfs/fail.c
@@ -103,18 +103,18 @@ int __cfs_fail_check_set(__u32 id, __u32 value, int set)
}
switch (set) {
- case CFS_FAIL_LOC_NOSET:
- case CFS_FAIL_LOC_VALUE:
- break;
- case CFS_FAIL_LOC_ORSET:
- cfs_fail_loc |= value & ~(CFS_FAILED | CFS_FAIL_ONCE);
- break;
- case CFS_FAIL_LOC_RESET:
- cfs_fail_loc = value;
- break;
- default:
- LASSERTF(0, "called with bad set %u\n", set);
- break;
+ case CFS_FAIL_LOC_NOSET:
+ case CFS_FAIL_LOC_VALUE:
+ break;
+ case CFS_FAIL_LOC_ORSET:
+ cfs_fail_loc |= value & ~(CFS_FAILED | CFS_FAIL_ONCE);
+ break;
+ case CFS_FAIL_LOC_RESET:
+ cfs_fail_loc = value;
+ break;
+ default:
+ LASSERTF(0, "called with bad set %u\n", set);
+ break;
}
return 1;
diff --git a/drivers/staging/lustre/lustre/libcfs/hash.c b/drivers/staging/lustre/lustre/libcfs/hash.c
index 3b67b7b6428..2d1e6729e99 100644
--- a/drivers/staging/lustre/lustre/libcfs/hash.c
+++ b/drivers/staging/lustre/lustre/libcfs/hash.c
@@ -1121,8 +1121,7 @@ cfs_hash_destroy(struct cfs_hash *hs)
cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
hlist_for_each_safe(hnode, pos, hhead) {
LASSERTF(!cfs_hash_with_assert_empty(hs),
- "hash %s bucket %u(%u) is not "
- " empty: %u items left\n",
+ "hash %s bucket %u(%u) is not empty: %u items left\n",
hs->hs_name, bd.bd_bucket->hsb_index,
bd.bd_offset, bd.bd_bucket->hsb_count);
/* can't assert key valicate, because we
@@ -1371,8 +1370,7 @@ cfs_hash_lookup(struct cfs_hash *hs, const void *key)
EXPORT_SYMBOL(cfs_hash_lookup);
static void
-cfs_hash_for_each_enter(struct cfs_hash *hs)
-{
+cfs_hash_for_each_enter(struct cfs_hash *hs) {
LASSERT(!cfs_hash_is_exiting(hs));
if (!cfs_hash_with_rehash(hs))
@@ -1397,8 +1395,7 @@ cfs_hash_for_each_enter(struct cfs_hash *hs)
}
static void
-cfs_hash_for_each_exit(struct cfs_hash *hs)
-{
+cfs_hash_for_each_exit(struct cfs_hash *hs) {
int remained;
int bits;
@@ -1429,8 +1426,7 @@ cfs_hash_for_each_exit(struct cfs_hash *hs)
*/
static __u64
cfs_hash_for_each_tight(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
- void *data, int remove_safe)
-{
+ void *data, int remove_safe) {
struct hlist_node *hnode;
struct hlist_node *pos;
struct cfs_hash_bd bd;
@@ -1523,8 +1519,7 @@ EXPORT_SYMBOL(cfs_hash_for_each);
void
cfs_hash_for_each_safe(struct cfs_hash *hs,
- cfs_hash_for_each_cb_t func, void *data)
-{
+ cfs_hash_for_each_cb_t func, void *data) {
cfs_hash_for_each_tight(hs, func, data, 1);
}
EXPORT_SYMBOL(cfs_hash_for_each_safe);
@@ -1572,8 +1567,8 @@ EXPORT_SYMBOL(cfs_hash_size_get);
* two cases, so iteration has to be stopped on change.
*/
static int
-cfs_hash_for_each_relax(struct cfs_hash *hs, cfs_hash_for_each_cb_t func, void *data)
-{
+cfs_hash_for_each_relax(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
+ void *data) {
struct hlist_node *hnode;
struct hlist_node *tmp;
struct cfs_hash_bd bd;
@@ -1634,8 +1629,7 @@ cfs_hash_for_each_relax(struct cfs_hash *hs, cfs_hash_for_each_cb_t func, void *
int
cfs_hash_for_each_nolock(struct cfs_hash *hs,
- cfs_hash_for_each_cb_t func, void *data)
-{
+ cfs_hash_for_each_cb_t func, void *data) {
if (cfs_hash_with_no_lock(hs) ||
cfs_hash_with_rehash_key(hs) ||
!cfs_hash_with_no_itemref(hs))
@@ -1667,8 +1661,7 @@ EXPORT_SYMBOL(cfs_hash_for_each_nolock);
*/
int
cfs_hash_for_each_empty(struct cfs_hash *hs,
- cfs_hash_for_each_cb_t func, void *data)
-{
+ cfs_hash_for_each_cb_t func, void *data) {
unsigned i = 0;
if (cfs_hash_with_no_lock(hs))
@@ -1726,8 +1719,7 @@ EXPORT_SYMBOL(cfs_hash_hlist_for_each);
*/
void
cfs_hash_for_each_key(struct cfs_hash *hs, const void *key,
- cfs_hash_for_each_cb_t func, void *data)
-{
+ cfs_hash_for_each_cb_t func, void *data) {
struct hlist_node *hnode;
struct cfs_hash_bd bds[2];
unsigned i;
diff --git a/drivers/staging/lustre/lustre/libcfs/libcfs_cpu.c b/drivers/staging/lustre/lustre/libcfs/libcfs_cpu.c
index dbb81b6cc20..31a558115a9 100644
--- a/drivers/staging/lustre/lustre/libcfs/libcfs_cpu.c
+++ b/drivers/staging/lustre/lustre/libcfs/libcfs_cpu.c
@@ -38,7 +38,7 @@
#include "../../include/linux/libcfs/libcfs.h"
/** Global CPU partition table */
-struct cfs_cpt_table *cfs_cpt_table __read_mostly = NULL;
+struct cfs_cpt_table *cfs_cpt_table __read_mostly;
EXPORT_SYMBOL(cfs_cpt_table);
#ifndef HAVE_LIBCFS_CPT
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-cpu.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-cpu.c
index 224c65b5ce4..05f7595f18a 100644
--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-cpu.c
+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-cpu.c
@@ -333,8 +333,8 @@ cfs_cpt_unset_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu)
/* caller doesn't know the partition ID */
cpt = cptab->ctb_cpu2cpt[cpu];
if (cpt < 0) { /* not set in this CPT-table */
- CDEBUG(D_INFO, "Try to unset cpu %d which is "
- "not in CPT-table %p\n", cpt, cptab);
+ CDEBUG(D_INFO, "Try to unset cpu %d which is not in CPT-table %p\n",
+ cpt, cptab);
return;
}
@@ -384,8 +384,8 @@ cfs_cpt_set_cpumask(struct cfs_cpt_table *cptab, int cpt, cpumask_t *mask)
int i;
if (cpus_weight(*mask) == 0 || any_online_cpu(*mask) == NR_CPUS) {
- CDEBUG(D_INFO, "No online CPU is found in the CPU mask "
- "for CPU partition %d\n", cpt);
+ CDEBUG(D_INFO, "No online CPU is found in the CPU mask for CPU partition %d\n",
+ cpt);
return 0;
}
@@ -579,9 +579,8 @@ cfs_cpt_bind(struct cfs_cpt_table *cptab, int cpt)
}
if (any_online_cpu(*cpumask) == NR_CPUS) {
- CERROR("No online CPU found in CPU partition %d, did someone "
- "do CPU hotplug on system? You might need to reload "
- "Lustre modules to keep system working well.\n", cpt);
+ CERROR("No online CPU found in CPU partition %d, did someone do CPU hotplug on system? You might need to reload Lustre modules to keep system working well.\n",
+ cpt);
return -EINVAL;
}
@@ -737,16 +736,12 @@ cfs_cpt_table_create(int ncpt)
ncpt = rc;
if (ncpt > num_online_cpus() || ncpt > 4 * rc) {
- CWARN("CPU partition number %d is larger than suggested "
- "value (%d), your system may have performance"
- "issue or run out of memory while under pressure\n",
+ CWARN("CPU partition number %d is larger than suggested value (%d), your system may have performance issue or run out of memory while under pressure\n",
ncpt, rc);
}
if (num_online_cpus() % ncpt != 0) {
- CERROR("CPU number %d is not multiple of cpu_npartition %d, "
- "please try different cpu_npartitions value or"
- "set pattern string by cpu_pattern=STRING\n",
+ CERROR("CPU number %d is not multiple of cpu_npartition %d, please try different cpu_npartitions value or set pattern string by cpu_pattern=STRING\n",
(int)num_online_cpus(), ncpt);
goto failed;
}
@@ -796,8 +791,7 @@ cfs_cpt_table_create(int ncpt)
if (cpt != ncpt ||
num != cpus_weight(*cptab->ctb_parts[ncpt - 1].cpt_cpumask)) {
- CERROR("Expect %d(%d) CPU partitions but got %d(%d), "
- "CPU hotplug/unplug while setting?\n",
+ CERROR("Expect %d(%d) CPU partitions but got %d(%d), CPU hotplug/unplug while setting?\n",
cptab->ctb_nparts, num, cpt,
cpus_weight(*cptab->ctb_parts[ncpt - 1].cpt_cpumask));
goto failed;
@@ -808,8 +802,7 @@ cfs_cpt_table_create(int ncpt)
return cptab;
failed:
- CERROR("Failed to setup CPU-partition-table with %d "
- "CPU-partitions, online HW nodes: %d, HW cpus: %d.\n",
+ CERROR("Failed to setup CPU-partition-table with %d CPU-partitions, online HW nodes: %d, HW cpus: %d.\n",
ncpt, num_online_nodes(), num_online_cpus());
if (mask != NULL)
@@ -975,9 +968,8 @@ cfs_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
warn = any_online_cpu(*cpt_data.cpt_cpumask) >= nr_cpu_ids;
mutex_unlock(&cpt_data.cpt_mutex);
CDEBUG(warn ? D_WARNING : D_INFO,
- "Lustre: can't support CPU plug-out well now, "
- "performance and stability could be impacted "
- "[CPU %u action: %lx]\n", cpu, action);
+ "Lustre: can't support CPU plug-out well now, performance and stability could be impacted [CPU %u action: %lx]\n",
+ cpu, action);
}
return NOTIFY_OK;
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-debug.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-debug.c
index 3298ddf6d3b..12005a71aa7 100644
--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-debug.c
+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-debug.c
@@ -87,8 +87,7 @@ void libcfs_run_debug_log_upcall(char *file)
rc = call_usermodehelper(argv[0], argv, envp, 1);
if (rc < 0 && rc != -ENOENT) {
- CERROR("Error %d invoking LNET debug log upcall %s %s; "
- "check /proc/sys/lnet/debug_log_upcall\n",
+ CERROR("Error %d invoking LNET debug log upcall %s %s; check /proc/sys/lnet/debug_log_upcall\n",
rc, argv[0], argv[1]);
} else {
CDEBUG(D_HA, "Invoked LNET debug log upcall %s %s\n",
@@ -114,8 +113,7 @@ void libcfs_run_upcall(char **argv)
rc = call_usermodehelper(argv[0], argv, envp, 1);
if (rc < 0 && rc != -ENOENT) {
- CERROR("Error %d invoking LNET upcall %s %s%s%s%s%s%s%s%s; "
- "check /proc/sys/lnet/upcall\n",
+ CERROR("Error %d invoking LNET upcall %s %s%s%s%s%s%s%s%s; check /proc/sys/lnet/upcall\n",
rc, argv[0], argv[1],
argc < 3 ? "" : ",", argc < 3 ? "" : argv[2],
argc < 4 ? "" : ",", argc < 4 ? "" : argv[3],
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
index bbe2c68c18a..83d3f08a37b 100644
--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
@@ -365,8 +365,8 @@ static int __proc_cpt_table(void *data, int write,
if (rc >= 0)
break;
- LIBCFS_FREE(buf, len);
if (rc == -EFBIG) {
+ LIBCFS_FREE(buf, len);
len <<= 1;
continue;
}
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-tcpip.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-tcpip.c
index 939b33dd652..b91a1f95bbd 100644
--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-tcpip.c
+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-tcpip.c
@@ -279,8 +279,7 @@ libcfs_sock_write (struct socket *sock, void *buffer, int nob, int timeout)
rc = kernel_setsockopt(sock, SOL_SOCKET, SO_SNDTIMEO,
(char *)&tv, sizeof(tv));
if (rc != 0) {
- CERROR("Can't set socket send timeout "
- "%ld.%06d: %d\n",
+ CERROR("Can't set socket send timeout %ld.%06d: %d\n",
(long)tv.tv_sec, (int)tv.tv_usec, rc);
return rc;
}
diff --git a/drivers/staging/lustre/lustre/libcfs/tracefile.c b/drivers/staging/lustre/lustre/libcfs/tracefile.c
index 7561030c96e..5917c31c7ed 100644
--- a/drivers/staging/lustre/lustre/libcfs/tracefile.c
+++ b/drivers/staging/lustre/lustre/libcfs/tracefile.c
@@ -196,8 +196,7 @@ static void cfs_tcd_shrink(struct cfs_trace_cpu_data *tcd)
*/
if (printk_ratelimit())
- printk(KERN_WARNING "debug daemon buffer overflowed; "
- "discarding 10%% of pages (%d of %ld)\n",
+ printk(KERN_WARNING "debug daemon buffer overflowed; discarding 10%% of pages (%d of %ld)\n",
pgcount + 1, tcd->tcd_cur_pages);
INIT_LIST_HEAD(&pc.pc_pages);
@@ -357,8 +356,8 @@ int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata,
}
if (*(string_buf+needed-1) != '\n')
- printk(KERN_INFO "format at %s:%d:%s doesn't end in "
- "newline\n", file, msgdata->msg_line, msgdata->msg_fn);
+ printk(KERN_INFO "format at %s:%d:%s doesn't end in newline\n",
+ file, msgdata->msg_line, msgdata->msg_fn);
header.ph_len = known_size + needed;
debug_buf = (char *)page_address(tage->page) + tage->used;
@@ -715,8 +714,8 @@ int cfs_tracefile_dump_all_pages(char *filename)
kunmap(tage->page);
if (rc != (int)tage->used) {
- printk(KERN_WARNING "wanted to write %u but wrote "
- "%d\n", tage->used, rc);
+ printk(KERN_WARNING "wanted to write %u but wrote %d\n",
+ tage->used, rc);
put_pages_back(&pc);
__LASSERT(list_empty(&pc.pc_pages));
break;
@@ -875,8 +874,8 @@ int cfs_trace_daemon_command(char *str)
strcpy(cfs_tracefile, str);
printk(KERN_INFO
- "Lustre: debug daemon will attempt to start writing "
- "to %s (%lukB max)\n", cfs_tracefile,
+ "Lustre: debug daemon will attempt to start writing to %s (%lukB max)\n",
+ cfs_tracefile,
(long)(cfs_tracefile_size >> 10));
cfs_trace_start_thread();
@@ -914,15 +913,15 @@ int cfs_trace_set_debug_mb(int mb)
if (mb < num_possible_cpus()) {
printk(KERN_WARNING
- "Lustre: %d MB is too small for debug buffer size, "
- "setting it to %d MB.\n", mb, num_possible_cpus());
+ "Lustre: %d MB is too small for debug buffer size, setting it to %d MB.\n",
+ mb, num_possible_cpus());
mb = num_possible_cpus();
}
if (mb > limit) {
printk(KERN_WARNING
- "Lustre: %d MB is too large for debug buffer size, "
- "setting it to %d MB.\n", mb, limit);
+ "Lustre: %d MB is too large for debug buffer size, setting it to %d MB.\n",
+ mb, limit);
mb = limit;
}
@@ -1004,8 +1003,8 @@ static int tracefiled(void *arg)
if (IS_ERR(filp)) {
rc = PTR_ERR(filp);
filp = NULL;
- printk(KERN_WARNING "couldn't open %s: "
- "%d\n", cfs_tracefile, rc);
+ printk(KERN_WARNING "couldn't open %s: %d\n",
+ cfs_tracefile, rc);
}
}
cfs_tracefile_read_unlock();
@@ -1034,8 +1033,8 @@ static int tracefiled(void *arg)
kunmap(tage->page);
if (rc != (int)tage->used) {
- printk(KERN_WARNING "wanted to write %u "
- "but wrote %d\n", tage->used, rc);
+ printk(KERN_WARNING "wanted to write %u but wrote %d\n",
+ tage->used, rc);
put_pages_back(&pc);
__LASSERT(list_empty(&pc.pc_pages));
}
@@ -1047,8 +1046,7 @@ static int tracefiled(void *arg)
if (!list_empty(&pc.pc_pages)) {
int i;
- printk(KERN_ALERT "Lustre: trace pages aren't "
- " empty\n");
+ printk(KERN_ALERT "Lustre: trace pages aren't empty\n");
printk(KERN_ERR "total cpus(%d): ",
num_possible_cpus());
for (i = 0; i < num_possible_cpus(); i++)
@@ -1061,8 +1059,8 @@ static int tracefiled(void *arg)
i = 0;
list_for_each_entry_safe(tage, tmp, &pc.pc_pages,
linkage)
- printk(KERN_ERR "page %d belongs to cpu "
- "%d\n", ++i, tage->cpu);
+ printk(KERN_ERR "page %d belongs to cpu %d\n",
+ ++i, tage->cpu);
printk(KERN_ERR "There are %d pages unwritten\n",
i);
}
diff --git a/drivers/staging/lustre/lustre/llite/dcache.c b/drivers/staging/lustre/lustre/llite/dcache.c
index f692261e9b5..5bb9c85cec8 100644
--- a/drivers/staging/lustre/lustre/llite/dcache.c
+++ b/drivers/staging/lustre/lustre/llite/dcache.c
@@ -259,8 +259,8 @@ void ll_invalidate_aliases(struct inode *inode)
ll_lock_dcache(inode);
ll_d_hlist_for_each_entry(dentry, p, &inode->i_dentry, d_u.d_alias) {
- CDEBUG(D_DENTRY, "dentry in drop %pd (%p) parent %p "
- "inode %p flags %d\n", dentry, dentry, dentry->d_parent,
+ CDEBUG(D_DENTRY, "dentry in drop %pd (%p) parent %p inode %p flags %d\n",
+ dentry, dentry, dentry->d_parent,
dentry->d_inode, dentry->d_flags);
if (unlikely(dentry == dentry->d_sb->s_root)) {
diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c
index a79fd65ec4c..1ac7a702ce2 100644
--- a/drivers/staging/lustre/lustre/llite/dir.c
+++ b/drivers/staging/lustre/lustre/llite/dir.c
@@ -163,7 +163,7 @@ static int ll_dir_filler(void *_hash, struct page *page0)
LASSERT(max_pages > 0 && max_pages <= MD_MAX_BRW_PAGES);
- page_pool = kzalloc(sizeof(page) * max_pages, GFP_NOFS);
+ page_pool = kcalloc(max_pages, sizeof(page), GFP_NOFS);
if (page_pool) {
page_pool[0] = page0;
} else {
@@ -228,8 +228,8 @@ static int ll_dir_filler(void *_hash, struct page *page0)
if (ll_pagevec_add(&lru_pvec, page) == 0)
ll_pagevec_lru_add_file(&lru_pvec);
} else {
- CDEBUG(D_VFSTRACE, "page %lu add to page cache failed:"
- " %d\n", offset, ret);
+ CDEBUG(D_VFSTRACE, "page %lu add to page cache failed: %d\n",
+ offset, ret);
}
page_cache_release(page);
}
@@ -275,14 +275,14 @@ static struct page *ll_dir_page_locate(struct inode *dir, __u64 *hash,
struct page *page;
int found;
- TREE_READ_LOCK_IRQ(mapping);
+ spin_lock_irq(&mapping->tree_lock);
found = radix_tree_gang_lookup(&mapping->page_tree,
(void **)&page, offset, 1);
if (found > 0) {
struct lu_dirpage *dp;
page_cache_get(page);
- TREE_READ_UNLOCK_IRQ(mapping);
+ spin_unlock_irq(&mapping->tree_lock);
/*
* In contrast to find_lock_page() we are sure that directory
* page cannot be truncated (while DLM lock is held) and,
@@ -326,7 +326,7 @@ static struct page *ll_dir_page_locate(struct inode *dir, __u64 *hash,
}
} else {
- TREE_READ_UNLOCK_IRQ(mapping);
+ spin_unlock_irq(&mapping->tree_lock);
page = NULL;
}
return page;
@@ -600,8 +600,8 @@ static int ll_readdir(struct file *filp, struct dir_context *ctx)
int api32 = ll_need_32bit_api(sbi);
int rc;
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p) pos %lu/%llu "
- " 32bit_api %d\n", inode->i_ino, inode->i_generation,
+ CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p) pos %lu/%llu 32bit_api %d\n",
+ inode->i_ino, inode->i_generation,
inode, (unsigned long)lfd->lfd_pos, i_size_read(inode), api32);
if (lfd->lfd_pos == MDS_DIR_END_OFF) {
@@ -661,7 +661,7 @@ int ll_dir_setdirstripe(struct inode *dir, struct lmv_user_md *lump,
int mode;
int err;
- mode = (0755 & (S_IRWXUGO|S_ISVTX) & ~current->fs->umask) | S_IFDIR;
+ mode = (0755 & ~current_umask()) | S_IFDIR;
op_data = ll_prep_md_op_data(NULL, dir, NULL, filename,
strlen(filename), mode, LUSTRE_OPC_MKDIR,
lump);
@@ -715,10 +715,9 @@ int ll_dir_setstripe(struct inode *inode, struct lov_user_md *lump,
break;
}
default: {
- CDEBUG(D_IOCTL, "bad userland LOV MAGIC:"
- " %#08x != %#08x nor %#08x\n",
- lump->lmm_magic, LOV_USER_MAGIC_V1,
- LOV_USER_MAGIC_V3);
+ CDEBUG(D_IOCTL, "bad userland LOV MAGIC: %#08x != %#08x nor %#08x\n",
+ lump->lmm_magic, LOV_USER_MAGIC_V1,
+ LOV_USER_MAGIC_V3);
return -EINVAL;
}
}
@@ -814,8 +813,8 @@ int ll_dir_getstripe(struct inode *inode, struct lov_mds_md **lmmp,
rc = md_getattr(sbi->ll_md_exp, op_data, &req);
ll_finish_md_op_data(op_data);
if (rc < 0) {
- CDEBUG(D_INFO, "md_getattr failed on inode "
- "%lu/%u: rc %d\n", inode->i_ino,
+ CDEBUG(D_INFO, "md_getattr failed on inode %lu/%u: rc %d\n",
+ inode->i_ino,
inode->i_generation, rc);
goto out;
}
@@ -1013,8 +1012,7 @@ static int ll_ioc_copy_end(struct super_block *sb, struct hsm_copy *copy)
copy->hc_hai.hai_action == HSMA_ARCHIVE);
iput(inode);
if (rc) {
- CDEBUG(D_HSM, "Could not read file data version. "
- "Request could not be confirmed.\n");
+ CDEBUG(D_HSM, "Could not read file data version. Request could not be confirmed.\n");
if (hpk.hpk_errval == 0)
hpk.hpk_errval = -rc;
goto progress;
@@ -1028,8 +1026,7 @@ static int ll_ioc_copy_end(struct super_block *sb, struct hsm_copy *copy)
* to check anyway. */
if ((copy->hc_hai.hai_action == HSMA_ARCHIVE) &&
(copy->hc_data_version != data_version)) {
- CDEBUG(D_HSM, "File data version mismatched. "
- "File content was changed during archiving. "
+ CDEBUG(D_HSM, "File data version mismatched. File content was changed during archiving. "
DFID", start:%#llx current:%#llx\n",
PFID(&copy->hc_hai.hai_fid),
copy->hc_data_version, data_version);
@@ -1384,7 +1381,7 @@ lmv_out_free:
if (copy_from_user(lumv1, lumv1p, sizeof(*lumv1)))
return -EFAULT;
- if ((lumv1->lmm_magic == LOV_USER_MAGIC_V3) ) {
+ if (lumv1->lmm_magic == LOV_USER_MAGIC_V3) {
if (copy_from_user(&lumv3, lumv3p, sizeof(lumv3)))
return -EFAULT;
}
@@ -1509,8 +1506,7 @@ out_rmdir:
cmd == LL_IOC_MDC_GETINFO)) {
rc = 0;
goto skip_lmm;
- }
- else
+ } else
goto out_req;
}
@@ -1694,64 +1690,6 @@ out_poll:
OBD_FREE_PTR(check);
return rc;
}
-#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 7, 50, 0)
- case LL_IOC_QUOTACTL_18: {
- /* copy the old 1.x quota struct for internal use, then copy
- * back into old format struct. For 1.8 compatibility. */
- struct if_quotactl_18 *qctl_18;
- struct if_quotactl *qctl_20;
-
- qctl_18 = kzalloc(sizeof(*qctl_18), GFP_NOFS);
- if (!qctl_18)
- return -ENOMEM;
-
- qctl_20 = kzalloc(sizeof(*qctl_20), GFP_NOFS);
- if (!qctl_20) {
- rc = -ENOMEM;
- goto out_quotactl_18;
- }
-
- if (copy_from_user(qctl_18, (void *)arg, sizeof(*qctl_18))) {
- rc = -ENOMEM;
- goto out_quotactl_20;
- }
-
- QCTL_COPY(qctl_20, qctl_18);
- qctl_20->qc_idx = 0;
-
- /* XXX: dqb_valid was borrowed as a flag to mark that
- * only mds quota is wanted */
- if (qctl_18->qc_cmd == Q_GETQUOTA &&
- qctl_18->qc_dqblk.dqb_valid) {
- qctl_20->qc_valid = QC_MDTIDX;
- qctl_20->qc_dqblk.dqb_valid = 0;
- } else if (qctl_18->obd_uuid.uuid[0] != '\0') {
- qctl_20->qc_valid = QC_UUID;
- qctl_20->obd_uuid = qctl_18->obd_uuid;
- } else {
- qctl_20->qc_valid = QC_GENERAL;
- }
-
- rc = quotactl_ioctl(sbi, qctl_20);
-
- if (rc == 0) {
- QCTL_COPY(qctl_18, qctl_20);
- qctl_18->obd_uuid = qctl_20->obd_uuid;
-
- if (copy_to_user((void *)arg, qctl_18,
- sizeof(*qctl_18)))
- rc = -EFAULT;
- }
-
-out_quotactl_20:
- OBD_FREE_PTR(qctl_20);
-out_quotactl_18:
- OBD_FREE_PTR(qctl_18);
- return rc;
- }
-#else
-#warning "remove old LL_IOC_QUOTACTL_18 compatibility code"
-#endif /* LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 7, 50, 0) */
case LL_IOC_QUOTACTL: {
struct if_quotactl *qctl;
diff --git a/drivers/staging/lustre/lustre/llite/file.c b/drivers/staging/lustre/lustre/llite/file.c
index a2ae9a68a9a..35a2df01528 100644
--- a/drivers/staging/lustre/lustre/llite/file.c
+++ b/drivers/staging/lustre/lustre/llite/file.c
@@ -170,8 +170,8 @@ static int ll_close_inode_openhandle(struct obd_export *md_exp,
* OSTs and send setattr to back to MDS. */
rc = ll_som_update(inode, op_data);
if (rc) {
- CERROR("inode %lu mdc Size-on-MDS update failed: "
- "rc = %d\n", inode->i_ino, rc);
+ CERROR("inode %lu mdc Size-on-MDS update failed: rc = %d\n",
+ inode->i_ino, rc);
rc = 0;
}
} else if (rc) {
@@ -247,7 +247,7 @@ int ll_md_real_close(struct inode *inode, fmode_t fmode)
return 0;
}
- och=*och_p;
+ och = *och_p;
*och_p = NULL;
mutex_unlock(&lli->lli_och_mutex);
@@ -358,7 +358,7 @@ int ll_file_release(struct inode *inode, struct file *file)
fd = LUSTRE_FPRIVATE(file);
LASSERT(fd != NULL);
- /* The last ref on @file, maybe not the the owner pid of statahead.
+ /* The last ref on @file, maybe not the owner pid of statahead.
* Different processes can open the same dir, "ll_opendir_key" means:
* it is me that should stop the statahead thread. */
if (S_ISDIR(inode->i_mode) && lli->lli_opendir_key == fd &&
@@ -975,8 +975,8 @@ int ll_inode_getattr(struct inode *inode, struct obdo *obdo,
struct ost_id *oi = lsm ? &lsm->lsm_oi : &obdo->o_oi;
obdo_refresh_inode(inode, obdo, obdo->o_valid);
- CDEBUG(D_INODE, "objid "DOSTID" size %llu, blocks %llu,"
- " blksize %lu\n", POSTID(oi), i_size_read(inode),
+ CDEBUG(D_INODE, "objid " DOSTID " size %llu, blocks %llu, blksize %lu\n",
+ POSTID(oi), i_size_read(inode),
(unsigned long long)inode->i_blocks,
1UL << inode->i_blkbits);
}
@@ -1403,8 +1403,8 @@ int ll_lov_getstripe_ea_info(struct inode *inode, const char *filename,
rc = md_getattr_name(sbi->ll_md_exp, op_data, &req);
ll_finish_md_op_data(op_data);
if (rc < 0) {
- CDEBUG(D_INFO, "md_getattr_name failed "
- "on %s: rc %d\n", filename, rc);
+ CDEBUG(D_INFO, "md_getattr_name failed on %s: rc %d\n",
+ filename, rc);
goto out;
}
@@ -2221,8 +2221,8 @@ ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (cmd == LL_IOC_SETFLAGS) {
if ((flags & LL_FILE_IGNORE_LOCK) &&
!(file->f_flags & O_DIRECT)) {
- CERROR("%s: unable to disable locking on "
- "non-O_DIRECT file\n", current->comm);
+ CERROR("%s: unable to disable locking on non-O_DIRECT file\n",
+ current->comm);
return -EINVAL;
}
@@ -2848,7 +2848,7 @@ ldlm_mode_t ll_take_md_lock(struct inode *inode, __u64 bits,
struct lustre_handle *lockh, __u64 flags,
ldlm_mode_t mode)
{
- ldlm_policy_data_t policy = { .l_inodebits = {bits}};
+ ldlm_policy_data_t policy = { .l_inodebits = {bits} };
struct lu_fid *fid;
ldlm_mode_t rc;
diff --git a/drivers/staging/lustre/lustre/llite/llite_capa.c b/drivers/staging/lustre/lustre/llite/llite_capa.c
index b1e39ee412c..aec9a44120c 100644
--- a/drivers/staging/lustre/lustre/llite/llite_capa.c
+++ b/drivers/staging/lustre/lustre/llite/llite_capa.c
@@ -540,8 +540,7 @@ static int ll_update_capa(struct obd_capa *ocapa, struct lustre_capa *capa)
if (rc == -EIO && !capa_is_expired(ocapa)) {
delay_capa_renew(ocapa, 120);
DEBUG_CAPA(D_ERROR, &ocapa->c_capa,
- "renewal failed: -EIO, "
- "retry in 2 mins");
+ "renewal failed: -EIO, retry in 2 mins");
ll_capa_renewal_retries++;
goto retry;
} else {
diff --git a/drivers/staging/lustre/lustre/llite/llite_close.c b/drivers/staging/lustre/lustre/llite/llite_close.c
index 84e0003f2da..21b4a502677 100644
--- a/drivers/staging/lustre/lustre/llite/llite_close.c
+++ b/drivers/staging/lustre/lustre/llite/llite_close.c
@@ -90,8 +90,7 @@ void ll_queue_done_writing(struct inode *inode, unsigned long flags)
struct ll_close_queue *lcq = ll_i2sbi(inode)->ll_lcq;
if (lli->lli_flags & LLIF_MDS_SIZE_LOCK)
- CWARN("ino %lu/%u(flags %u) som valid it just after "
- "recovery\n",
+ CWARN("ino %lu/%u(flags %u) som valid it just after recovery\n",
inode->i_ino, inode->i_generation,
lli->lli_flags);
/* DONE_WRITING is allowed and inode has no dirty page. */
@@ -124,8 +123,8 @@ void ll_done_writing_attr(struct inode *inode, struct md_op_data *op_data)
op_data->op_flags |= MF_SOM_CHANGE;
/* Check if Size-on-MDS attributes are valid. */
if (lli->lli_flags & LLIF_MDS_SIZE_LOCK)
- CERROR("ino %lu/%u(flags %u) som valid it just after "
- "recovery\n", inode->i_ino, inode->i_generation,
+ CERROR("ino %lu/%u(flags %u) som valid it just after recovery\n",
+ inode->i_ino, inode->i_generation,
lli->lli_flags);
if (!cl_local_size(inode)) {
@@ -218,8 +217,8 @@ int ll_som_update(struct inode *inode, struct md_op_data *op_data)
LASSERT(op_data != NULL);
if (lli->lli_flags & LLIF_MDS_SIZE_LOCK)
- CERROR("ino %lu/%u(flags %u) som valid it just after "
- "recovery\n", inode->i_ino, inode->i_generation,
+ CERROR("ino %lu/%u(flags %u) som valid it just after recovery\n",
+ inode->i_ino, inode->i_generation,
lli->lli_flags);
OBDO_ALLOC(oa);
@@ -238,9 +237,8 @@ int ll_som_update(struct inode *inode, struct md_op_data *op_data)
if (rc) {
oa->o_valid = 0;
if (rc != -ENOENT)
- CERROR("inode_getattr failed (%d): unable to "
- "send a Size-on-MDS attribute update "
- "for inode %lu/%u\n", rc, inode->i_ino,
+ CERROR("inode_getattr failed (%d): unable to send a Size-on-MDS attribute update for inode %lu/%u\n",
+ rc, inode->i_ino,
inode->i_generation);
} else {
CDEBUG(D_INODE, "Size-on-MDS update on "DFID"\n",
diff --git a/drivers/staging/lustre/lustre/llite/llite_internal.h b/drivers/staging/lustre/lustre/llite/llite_internal.h
index 77d1c12704b..37306e0c7aa 100644
--- a/drivers/staging/lustre/lustre/llite/llite_internal.h
+++ b/drivers/staging/lustre/lustre/llite/llite_internal.h
@@ -1435,8 +1435,8 @@ static inline void ll_set_lock_data(struct obd_export *exp, struct inode *inode,
* case the dcache being cleared */
if (it->d.lustre.it_remote_lock_mode) {
handle.cookie = it->d.lustre.it_remote_lock_handle;
- CDEBUG(D_DLMTRACE, "setting l_data to inode %p"
- "(%lu/%u) for remote lock %#llx\n", inode,
+ CDEBUG(D_DLMTRACE, "setting l_data to inode %p(%lu/%u) for remote lock %#llx\n",
+ inode,
inode->i_ino, inode->i_generation,
handle.cookie);
md_set_lock_data(exp, &handle.cookie, inode, NULL);
@@ -1444,8 +1444,8 @@ static inline void ll_set_lock_data(struct obd_export *exp, struct inode *inode,
handle.cookie = it->d.lustre.it_lock_handle;
- CDEBUG(D_DLMTRACE, "setting l_data to inode %p (%lu/%u)"
- " for lock %#llx\n", inode, inode->i_ino,
+ CDEBUG(D_DLMTRACE, "setting l_data to inode %p (%lu/%u) for lock %#llx\n",
+ inode, inode->i_ino,
inode->i_generation, handle.cookie);
md_set_lock_data(exp, &handle.cookie, inode,
@@ -1489,8 +1489,8 @@ static inline void __d_lustre_invalidate(struct dentry *dentry)
*/
static inline void d_lustre_invalidate(struct dentry *dentry, int nested)
{
- CDEBUG(D_DENTRY, "invalidate dentry %pd (%p) parent %p inode %p "
- "refc %d\n", dentry, dentry,
+ CDEBUG(D_DENTRY, "invalidate dentry %pd (%p) parent %p inode %p refc %d\n",
+ dentry, dentry,
dentry->d_parent, dentry->d_inode, d_count(dentry));
spin_lock_nested(&dentry->d_lock,
@@ -1509,24 +1509,6 @@ static inline void d_lustre_revalidate(struct dentry *dentry)
spin_unlock(&dentry->d_lock);
}
-#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 7, 50, 0)
-/* Compatibility for old (1.8) compiled userspace quota code */
-struct if_quotactl_18 {
- __u32 qc_cmd;
- __u32 qc_type;
- __u32 qc_id;
- __u32 qc_stat;
- struct obd_dqinfo qc_dqinfo;
- struct obd_dqblk qc_dqblk;
- char obd_type[16];
- struct obd_uuid obd_uuid;
-};
-#define LL_IOC_QUOTACTL_18 _IOWR('f', 162, struct if_quotactl_18 *)
-/* End compatibility for old (1.8) compiled userspace quota code */
-#else
-#warning "remove old LL_IOC_QUOTACTL_18 compatibility code"
-#endif /* LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 7, 50, 0) */
-
enum {
LL_LAYOUT_GEN_NONE = ((__u32)-2), /* layout lock was cancelled */
LL_LAYOUT_GEN_EMPTY = ((__u32)-1) /* for empty layout */
diff --git a/drivers/staging/lustre/lustre/llite/llite_lib.c b/drivers/staging/lustre/lustre/llite/llite_lib.c
index 7b6b9e2e010..a3367bfb145 100644
--- a/drivers/staging/lustre/lustre/llite/llite_lib.c
+++ b/drivers/staging/lustre/lustre/llite/llite_lib.c
@@ -250,12 +250,11 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
data->ocd_brw_size = MD_MAX_BRW_SIZE;
- err = obd_connect(NULL, &sbi->ll_md_exp, obd, &sbi->ll_sb_uuid, data, NULL);
+ err = obd_connect(NULL, &sbi->ll_md_exp, obd, &sbi->ll_sb_uuid,
+ data, NULL);
if (err == -EBUSY) {
- LCONSOLE_ERROR_MSG(0x14f, "An MDT (md %s) is performing "
- "recovery, of which this client is not a "
- "part. Please wait for recovery to complete,"
- " abort, or time out.\n", md);
+ LCONSOLE_ERROR_MSG(0x14f, "An MDT (md %s) is performing recovery, of which this client is not a part. Please wait for recovery to complete, abort, or time out.\n",
+ md);
goto out;
} else if (err) {
CERROR("cannot connect to %s: rc = %d\n", md, err);
@@ -267,8 +266,8 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
err = obd_fid_init(sbi->ll_md_exp->exp_obd, sbi->ll_md_exp,
LUSTRE_SEQ_METADATA);
if (err) {
- CERROR("%s: Can't init metadata layer FID infrastructure, "
- "rc = %d\n", sbi->ll_md_exp->exp_obd->obd_name, err);
+ CERROR("%s: Can't init metadata layer FID infrastructure, rc = %d\n",
+ sbi->ll_md_exp->exp_obd->obd_name, err);
goto out_md;
}
@@ -296,10 +295,7 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
buf = kzalloc(PAGE_CACHE_SIZE, GFP_KERNEL);
obd_connect_flags2str(buf, PAGE_CACHE_SIZE,
valid ^ CLIENT_CONNECT_MDT_REQD, ",");
- LCONSOLE_ERROR_MSG(0x170, "Server %s does not support "
- "feature(s) needed for correct operation "
- "of this client (%s). Please upgrade "
- "server or downgrade client.\n",
+ LCONSOLE_ERROR_MSG(0x170, "Server %s does not support feature(s) needed for correct operation of this client (%s). Please upgrade server or downgrade client.\n",
sbi->ll_md_exp->exp_obd->obd_name, buf);
OBD_FREE(buf, PAGE_CACHE_SIZE);
err = -EPROTO;
@@ -325,8 +321,7 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
if ((sbi->ll_flags & LL_SBI_USER_XATTR) &&
!(data->ocd_connect_flags & OBD_CONNECT_XATTR)) {
- LCONSOLE_INFO("Disabling user_xattr feature because "
- "it is not supported on the server\n");
+ LCONSOLE_INFO("Disabling user_xattr feature because it is not supported on the server\n");
sbi->ll_flags &= ~LL_SBI_USER_XATTR;
}
@@ -351,8 +346,7 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
} else {
if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
sbi->ll_flags &= ~LL_SBI_RMT_CLIENT;
- LCONSOLE_INFO("client claims to be remote, but server "
- "rejected, forced to be local.\n");
+ LCONSOLE_INFO("client claims to be remote, but server rejected, forced to be local.\n");
}
}
@@ -429,8 +423,8 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
if (sbi->ll_flags & LL_SBI_RMT_CLIENT)
data->ocd_connect_flags |= OBD_CONNECT_RMT_CLIENT_FORCE;
- CDEBUG(D_RPCTRACE, "ocd_connect_flags: %#llx ocd_version: %d "
- "ocd_grant: %d\n", data->ocd_connect_flags,
+ CDEBUG(D_RPCTRACE, "ocd_connect_flags: %#llx ocd_version: %d ocd_grant: %d\n",
+ data->ocd_connect_flags,
data->ocd_version, data->ocd_grant);
obd->obd_upcall.onu_owner = &sbi->ll_lco;
@@ -441,10 +435,8 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
err = obd_connect(NULL, &sbi->ll_dt_exp, obd, &sbi->ll_sb_uuid, data,
NULL);
if (err == -EBUSY) {
- LCONSOLE_ERROR_MSG(0x150, "An OST (dt %s) is performing "
- "recovery, of which this client is not a "
- "part. Please wait for recovery to "
- "complete, abort, or time out.\n", dt);
+ LCONSOLE_ERROR_MSG(0x150, "An OST (dt %s) is performing recovery, of which this client is not a part. Please wait for recovery to complete, abort, or time out.\n",
+ dt);
goto out_md;
} else if (err) {
CERROR("%s: Cannot connect to %s: rc = %d\n",
@@ -457,8 +449,8 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
err = obd_fid_init(sbi->ll_dt_exp->exp_obd, sbi->ll_dt_exp,
LUSTRE_SEQ_METADATA);
if (err) {
- CERROR("%s: Can't init data layer FID infrastructure, "
- "rc = %d\n", sbi->ll_dt_exp->exp_obd->obd_name, err);
+ CERROR("%s: Can't init data layer FID infrastructure, rc = %d\n",
+ sbi->ll_dt_exp->exp_obd->obd_name, err);
goto out_dt;
}
@@ -698,9 +690,9 @@ void lustre_dump_dentry(struct dentry *dentry, int recur)
list_for_each(tmp, &dentry->d_subdirs)
subdirs++;
- CERROR("dentry %p dump: name=%pd parent=%p, inode=%p, count=%u,"
- " flags=0x%x, fsdata=%p, %d subdirs\n", dentry, dentry,
- dentry->d_parent, dentry->d_inode, d_count(dentry),
+ CERROR("dentry %p dump: name=%pd parent=%pd (%p), inode=%p, count=%u, flags=0x%x, fsdata=%p, %d subdirs\n",
+ dentry, dentry, dentry->d_parent, dentry->d_parent,
+ dentry->d_inode, d_count(dentry),
dentry->d_flags, dentry->d_fsdata, subdirs);
if (dentry->d_inode != NULL)
ll_dump_inode(dentry->d_inode);
@@ -710,6 +702,7 @@ void lustre_dump_dentry(struct dentry *dentry, int recur)
list_for_each(tmp, &dentry->d_subdirs) {
struct dentry *d = list_entry(tmp, struct dentry, d_child);
+
lustre_dump_dentry(d, recur - 1);
}
}
@@ -754,9 +747,9 @@ void ll_kill_super(struct super_block *sb)
return;
sbi = ll_s2sbi(sb);
- /* we need to restore s_dev from changed for clustered NFS before put_super
- * because new kernels have cached s_dev and change sb->s_dev in
- * put_super not affected real removing devices */
+ /* we need to restore s_dev from changed for clustered NFS before
+ * put_super because new kernels have cached s_dev and change sb->s_dev
+ * in put_super not affected real removing devices */
if (sbi) {
sb->s_dev = sbi->ll_sdev_orig;
sbi->ll_umounting = 1;
@@ -814,25 +807,6 @@ static int ll_options(char *options, int *flags)
*flags &= ~tmp;
goto next;
}
-#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 5, 50, 0)
- tmp = ll_set_opt("acl", s1, LL_SBI_ACL);
- if (tmp) {
- /* Ignore deprecated mount option. The client will
- * always try to mount with ACL support, whether this
- * is used depends on whether server supports it. */
- LCONSOLE_ERROR_MSG(0x152, "Ignoring deprecated "
- "mount option 'acl'.\n");
- goto next;
- }
- tmp = ll_set_opt("noacl", s1, LL_SBI_ACL);
- if (tmp) {
- LCONSOLE_ERROR_MSG(0x152, "Ignoring deprecated "
- "mount option 'noacl'.\n");
- goto next;
- }
-#else
-#warning "{no}acl options have been deprecated since 1.8, please remove them"
-#endif
tmp = ll_set_opt("remote_client", s1, LL_SBI_RMT_CLIENT);
if (tmp) {
*flags |= tmp;
@@ -1038,9 +1012,8 @@ int ll_fill_super(struct super_block *sb, struct vfsmount *mnt)
/* Profile set with LCFG_MOUNTOPT so we can find our mdc and osc obds */
lprof = class_get_profile(profilenm);
if (lprof == NULL) {
- LCONSOLE_ERROR_MSG(0x156, "The client profile '%s' could not be"
- " read from the MGS. Does that filesystem "
- "exist?\n", profilenm);
+ LCONSOLE_ERROR_MSG(0x156, "The client profile '%s' could not be read from the MGS. Does that filesystem exist?\n",
+ profilenm);
err = -EINVAL;
goto out_free;
}
@@ -1119,9 +1092,8 @@ void ll_put_super(struct super_block *sb)
}
next = 0;
- while ((obd = class_devices_in_group(&sbi->ll_sb_uuid, &next)) !=NULL) {
+ while ((obd = class_devices_in_group(&sbi->ll_sb_uuid, &next)))
class_manual_cleanup(obd);
- }
if (sbi->ll_flags & LL_SBI_VERBOSE)
LCONSOLE_WARN("Unmounted %s\n", profilenm ? profilenm : "");
@@ -1150,14 +1122,14 @@ struct inode *ll_inode_from_resource_lock(struct ldlm_lock *lock)
lock_res_and_lock(lock);
if (lock->l_resource->lr_lvb_inode) {
struct ll_inode_info *lli;
+
lli = ll_i2info(lock->l_resource->lr_lvb_inode);
if (lli->lli_inode_magic == LLI_INODE_MAGIC) {
inode = igrab(lock->l_resource->lr_lvb_inode);
} else {
inode = lock->l_resource->lr_lvb_inode;
LDLM_DEBUG_LIMIT(inode->i_state & I_FREEING ? D_INFO :
- D_WARNING, lock, "lr_lvb_inode %p is "
- "bogus: magic %08x",
+ D_WARNING, lock, "lr_lvb_inode %p is bogus: magic %08x",
lock->l_resource->lr_lvb_inode,
lli->lli_inode_magic);
inode = NULL;
@@ -1730,11 +1702,11 @@ void ll_update_inode(struct inode *inode, struct lustre_md *md)
if (body->valid & OBD_MD_FLTYPE)
inode->i_mode = (inode->i_mode & ~S_IFMT)|(body->mode & S_IFMT);
LASSERT(inode->i_mode != 0);
- if (S_ISREG(inode->i_mode)) {
- inode->i_blkbits = min(PTLRPC_MAX_BRW_BITS + 1, LL_MAX_BLKSIZE_BITS);
- } else {
+ if (S_ISREG(inode->i_mode))
+ inode->i_blkbits = min(PTLRPC_MAX_BRW_BITS + 1,
+ LL_MAX_BLKSIZE_BITS);
+ else
inode->i_blkbits = inode->i_sb->s_blocksize_bits;
- }
if (body->valid & OBD_MD_FLUID)
inode->i_uid = make_kuid(&init_user_ns, body->uid);
if (body->valid & OBD_MD_FLGID)
@@ -1778,9 +1750,7 @@ void ll_update_inode(struct inode *inode, struct lustre_md *md)
if (lli->lli_flags & (LLIF_DONE_WRITING |
LLIF_EPOCH_PENDING |
LLIF_SOM_DIRTY)) {
- CERROR("ino %lu flags %u still has "
- "size authority! do not trust "
- "the size got from MDS\n",
+ CERROR("ino %lu flags %u still has size authority! do not trust the size got from MDS\n",
inode->i_ino, lli->lli_flags);
} else {
/* Use old size assignment to avoid
@@ -1848,6 +1818,7 @@ void ll_read_inode2(struct inode *inode, void *opaque)
if (S_ISREG(inode->i_mode)) {
struct ll_sb_info *sbi = ll_i2sbi(inode);
+
inode->i_op = &ll_file_inode_operations;
inode->i_fop = sbi->ll_fop;
inode->i_mapping->a_ops = (struct address_space_operations *)&ll_aops;
@@ -1878,11 +1849,10 @@ void ll_delete_inode(struct inode *inode)
/* Workaround for LU-118 */
if (inode->i_data.nrpages) {
- TREE_READ_LOCK_IRQ(&inode->i_data);
- TREE_READ_UNLOCK_IRQ(&inode->i_data);
+ spin_lock_irq(&inode->i_data.tree_lock);
+ spin_unlock_irq(&inode->i_data.tree_lock);
LASSERTF(inode->i_data.nrpages == 0,
- "inode=%lu/%u(%p) nrpages=%lu, see "
- "http://jira.whamcloud.com/browse/LU-118\n",
+ "inode=%lu/%u(%p) nrpages=%lu, see http://jira.whamcloud.com/browse/LU-118\n",
inode->i_ino, inode->i_generation, inode,
inode->i_data.nrpages);
}
@@ -2164,7 +2134,13 @@ int ll_obd_statfs(struct inode *inode, void *arg)
__u32 flags;
int len = 0, rc;
- if (!inode || !(sbi = ll_i2sbi(inode))) {
+ if (!inode) {
+ rc = -EINVAL;
+ goto out_statfs;
+ }
+
+ sbi = ll_i2sbi(inode);
+ if (!sbi) {
rc = -EINVAL;
goto out_statfs;
}
@@ -2396,21 +2372,6 @@ char *ll_get_fsname(struct super_block *sb, char *buf, int buflen)
return buf;
}
-static char* ll_d_path(struct dentry *dentry, char *buf, int bufsize)
-{
- char *path = NULL;
-
- struct path p;
-
- p.dentry = dentry;
- p.mnt = current->fs->root.mnt;
- path_get(&p);
- path = d_path(&p, buf, bufsize);
- path_put(&p);
-
- return path;
-}
-
void ll_dirty_page_discard_warn(struct page *page, int ioret)
{
char *buf, *path = NULL;
@@ -2422,12 +2383,12 @@ void ll_dirty_page_discard_warn(struct page *page, int ioret)
if (buf != NULL) {
dentry = d_find_alias(page->mapping->host);
if (dentry != NULL)
- path = ll_d_path(dentry, buf, PAGE_SIZE);
+ path = dentry_path_raw(dentry, buf, PAGE_SIZE);
}
CDEBUG(D_WARNING,
- "%s: dirty page discard: %s/fid: "DFID"/%s may get corrupted "
- "(rc %d)\n", ll_get_fsname(page->mapping->host->i_sb, NULL, 0),
+ "%s: dirty page discard: %s/fid: " DFID "/%s may get corrupted (rc %d)\n",
+ ll_get_fsname(page->mapping->host->i_sb, NULL, 0),
s2lsi(page->mapping->host->i_sb)->lsi_lmd->lmd_dev,
PFID(&obj->cob_header.coh_lu.loh_fid),
(path && !IS_ERR(path)) ? path : "", ioret);
diff --git a/drivers/staging/lustre/lustre/llite/llite_mmap.c b/drivers/staging/lustre/lustre/llite/llite_mmap.c
index ba1c047ae92..479bf428780 100644
--- a/drivers/staging/lustre/lustre/llite/llite_mmap.c
+++ b/drivers/staging/lustre/lustre/llite/llite_mmap.c
@@ -234,8 +234,7 @@ static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage,
*/
unlock_page(vmpage);
- CDEBUG(D_MMAP, "Race on page_mkwrite %p/%lu, page has "
- "been written out, retry.\n",
+ CDEBUG(D_MMAP, "Race on page_mkwrite %p/%lu, page has been written out, retry.\n",
vmpage, vmpage->index);
*retry = true;
@@ -366,8 +365,7 @@ restart:
vmf->page = NULL;
if (!printed && ++count > 16) {
- CWARN("the page is under heavy contention,"
- "maybe your app(%s) needs revising :-)\n",
+ CWARN("the page is under heavy contention, maybe your app(%s) needs revising :-)\n",
current->comm);
printed = true;
}
@@ -393,8 +391,7 @@ static int ll_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
result = ll_page_mkwrite0(vma, vmf->page, &retry);
if (!printed && ++count > 16) {
- CWARN("app(%s): the page %lu of file %lu is under heavy"
- " contention.\n",
+ CWARN("app(%s): the page %lu of file %lu is under heavy contention.\n",
current->comm, vmf->pgoff,
file_inode(vma->vm_file)->i_ino);
printed = true;
diff --git a/drivers/staging/lustre/lustre/llite/llite_rmtacl.c b/drivers/staging/lustre/lustre/llite/llite_rmtacl.c
index 586f49a374e..f4da156f387 100644
--- a/drivers/staging/lustre/lustre/llite/llite_rmtacl.c
+++ b/drivers/staging/lustre/lustre/llite/llite_rmtacl.c
@@ -131,8 +131,8 @@ int rct_add(struct rmtacl_ctl_table *rct, pid_t key, int ops)
spin_lock(&rct->rct_lock);
e = __rct_search(rct, key);
if (unlikely(e != NULL)) {
- CWARN("Unexpected stale rmtacl_entry found: "
- "[key: %d] [ops: %d]\n", (int)key, ops);
+ CWARN("Unexpected stale rmtacl_entry found: [key: %d] [ops: %d]\n",
+ (int)key, ops);
rce_free(e);
}
list_add_tail(&rce->rce_list, &rct->rct_entries[rce_hashfunc(key)]);
@@ -263,8 +263,7 @@ int ee_add(struct eacl_table *et, pid_t key, struct lu_fid *fid, int type,
spin_lock(&et->et_lock);
e = __et_search_del(et, key, fid, type);
if (unlikely(e != NULL)) {
- CWARN("Unexpected stale eacl_entry found: "
- "[key: %d] [fid: "DFID"] [type: %d]\n",
+ CWARN("Unexpected stale eacl_entry found: [key: %d] [fid: " DFID "] [type: %d]\n",
(int)key, PFID(fid), type);
ee_free(e);
}
diff --git a/drivers/staging/lustre/lustre/llite/lloop.c b/drivers/staging/lustre/lustre/llite/lloop.c
index 9e31b789b79..03124884064 100644
--- a/drivers/staging/lustre/lustre/llite/lloop.c
+++ b/drivers/staging/lustre/lustre/llite/lloop.c
@@ -777,8 +777,8 @@ static int __init lloop_init(void)
if (max_loop < 1 || max_loop > 256) {
max_loop = MAX_LOOP_DEFAULT;
- CWARN("lloop: invalid max_loop (must be between"
- " 1 and 256), using default (%u)\n", max_loop);
+ CWARN("lloop: invalid max_loop (must be between 1 and 256), using default (%u)\n",
+ max_loop);
}
lloop_major = register_blkdev(0, "lloop");
@@ -792,11 +792,11 @@ static int __init lloop_init(void)
if (ll_iocontrol_magic == NULL)
goto out_mem1;
- loop_dev = kzalloc(max_loop * sizeof(*loop_dev), GFP_KERNEL);
+ loop_dev = kcalloc(max_loop, sizeof(*loop_dev), GFP_KERNEL);
if (!loop_dev)
goto out_mem1;
- disks = kzalloc(max_loop * sizeof(*disks), GFP_KERNEL);
+ disks = kcalloc(max_loop, sizeof(*disks), GFP_KERNEL);
if (!disks)
goto out_mem2;
diff --git a/drivers/staging/lustre/lustre/llite/lproc_llite.c b/drivers/staging/lustre/lustre/llite/lproc_llite.c
index 3b3df9f0342..e6a909e6faf 100644
--- a/drivers/staging/lustre/lustre/llite/lproc_llite.c
+++ b/drivers/staging/lustre/lustre/llite/lproc_llite.c
@@ -227,8 +227,9 @@ static int ll_max_readahead_mb_seq_show(struct seq_file *m, void *v)
return lprocfs_seq_read_frac_helper(m, pages_number, mult);
}
-static ssize_t ll_max_readahead_mb_seq_write(struct file *file, const char *buffer,
- size_t count, loff_t *off)
+static ssize_t ll_max_readahead_mb_seq_write(struct file *file,
+ const char __user *buffer,
+ size_t count, loff_t *off)
{
struct super_block *sb = ((struct seq_file *)file->private_data)->private;
struct ll_sb_info *sbi = ll_s2sbi(sb);
@@ -269,7 +270,7 @@ static int ll_max_readahead_per_file_mb_seq_show(struct seq_file *m, void *v)
}
static ssize_t ll_max_readahead_per_file_mb_seq_write(struct file *file,
- const char *buffer,
+ const char __user *buffer,
size_t count, loff_t *off)
{
struct super_block *sb = ((struct seq_file *)file->private_data)->private;
@@ -283,8 +284,7 @@ static ssize_t ll_max_readahead_per_file_mb_seq_write(struct file *file,
if (pages_number < 0 ||
pages_number > sbi->ll_ra_info.ra_max_pages) {
- CERROR("can't set file readahead more than"
- "max_read_ahead_mb %lu MB\n",
+ CERROR("can't set file readahead more than max_read_ahead_mb %lu MB\n",
sbi->ll_ra_info.ra_max_pages);
return -ERANGE;
}
@@ -313,7 +313,7 @@ static int ll_max_read_ahead_whole_mb_seq_show(struct seq_file *m, void *unused)
}
static ssize_t ll_max_read_ahead_whole_mb_seq_write(struct file *file,
- const char *buffer,
+ const char __user *buffer,
size_t count, loff_t *off)
{
struct super_block *sb = ((struct seq_file *)file->private_data)->private;
@@ -329,9 +329,8 @@ static ssize_t ll_max_read_ahead_whole_mb_seq_write(struct file *file,
* algorithm does this anyway so it's pointless to set it larger. */
if (pages_number < 0 ||
pages_number > sbi->ll_ra_info.ra_max_pages_per_file) {
- CERROR("can't set max_read_ahead_whole_mb more than "
- "max_read_ahead_per_file_mb: %lu\n",
- sbi->ll_ra_info.ra_max_pages_per_file >> (20 - PAGE_CACHE_SHIFT));
+ CERROR("can't set max_read_ahead_whole_mb more than max_read_ahead_per_file_mb: %lu\n",
+ sbi->ll_ra_info.ra_max_pages_per_file >> (20 - PAGE_CACHE_SHIFT));
return -ERANGE;
}
@@ -469,8 +468,9 @@ static int ll_checksum_seq_show(struct seq_file *m, void *v)
return seq_printf(m, "%u\n", (sbi->ll_flags & LL_SBI_CHECKSUM) ? 1 : 0);
}
-static ssize_t ll_checksum_seq_write(struct file *file, const char *buffer,
- size_t count, loff_t *off)
+static ssize_t ll_checksum_seq_write(struct file *file,
+ const char __user *buffer,
+ size_t count, loff_t *off)
{
struct super_block *sb = ((struct seq_file *)file->private_data)->private;
struct ll_sb_info *sbi = ll_s2sbi(sb);
@@ -504,8 +504,9 @@ static int ll_max_rw_chunk_seq_show(struct seq_file *m, void *v)
return seq_printf(m, "%lu\n", ll_s2sbi(sb)->ll_max_rw_chunk);
}
-static ssize_t ll_max_rw_chunk_seq_write(struct file *file, const char *buffer,
- size_t count, loff_t *off)
+static ssize_t ll_max_rw_chunk_seq_write(struct file *file,
+ const char __user *buffer,
+ size_t count, loff_t *off)
{
struct super_block *sb = ((struct seq_file *)file->private_data)->private;
int rc, val;
@@ -533,8 +534,8 @@ static int ll_rd_track_id(struct seq_file *m, enum stats_track_type type)
}
}
-static int ll_wr_track_id(const char *buffer, unsigned long count, void *data,
- enum stats_track_type type)
+static int ll_wr_track_id(const char __user *buffer, unsigned long count,
+ void *data, enum stats_track_type type)
{
struct super_block *sb = data;
int rc, pid;
@@ -556,8 +557,9 @@ static int ll_track_pid_seq_show(struct seq_file *m, void *v)
return ll_rd_track_id(m, STATS_TRACK_PID);
}
-static ssize_t ll_track_pid_seq_write(struct file *file, const char *buffer,
- size_t count, loff_t *off)
+static ssize_t ll_track_pid_seq_write(struct file *file,
+ const char __user *buffer,
+ size_t count, loff_t *off)
{
struct seq_file *seq = file->private_data;
return ll_wr_track_id(buffer, count, seq->private, STATS_TRACK_PID);
@@ -569,8 +571,9 @@ static int ll_track_ppid_seq_show(struct seq_file *m, void *v)
return ll_rd_track_id(m, STATS_TRACK_PPID);
}
-static ssize_t ll_track_ppid_seq_write(struct file *file, const char *buffer,
- size_t count, loff_t *off)
+static ssize_t ll_track_ppid_seq_write(struct file *file,
+ const char __user *buffer,
+ size_t count, loff_t *off)
{
struct seq_file *seq = file->private_data;
return ll_wr_track_id(buffer, count, seq->private, STATS_TRACK_PPID);
@@ -582,8 +585,9 @@ static int ll_track_gid_seq_show(struct seq_file *m, void *v)
return ll_rd_track_id(m, STATS_TRACK_GID);
}
-static ssize_t ll_track_gid_seq_write(struct file *file, const char *buffer,
- size_t count, loff_t *off)
+static ssize_t ll_track_gid_seq_write(struct file *file,
+ const char __user *buffer,
+ size_t count, loff_t *off)
{
struct seq_file *seq = file->private_data;
return ll_wr_track_id(buffer, count, seq->private, STATS_TRACK_GID);
@@ -598,8 +602,9 @@ static int ll_statahead_max_seq_show(struct seq_file *m, void *v)
return seq_printf(m, "%u\n", sbi->ll_sa_max);
}
-static ssize_t ll_statahead_max_seq_write(struct file *file, const char *buffer,
- size_t count, loff_t *off)
+static ssize_t ll_statahead_max_seq_write(struct file *file,
+ const char __user *buffer,
+ size_t count, loff_t *off)
{
struct super_block *sb = ((struct seq_file *)file->private_data)->private;
struct ll_sb_info *sbi = ll_s2sbi(sb);
@@ -612,8 +617,8 @@ static ssize_t ll_statahead_max_seq_write(struct file *file, const char *buffer,
if (val >= 0 && val <= LL_SA_RPC_MAX)
sbi->ll_sa_max = val;
else
- CERROR("Bad statahead_max value %d. Valid values are in the "
- "range [0, %d]\n", val, LL_SA_RPC_MAX);
+ CERROR("Bad statahead_max value %d. Valid values are in the range [0, %d]\n",
+ val, LL_SA_RPC_MAX);
return count;
}
@@ -628,8 +633,9 @@ static int ll_statahead_agl_seq_show(struct seq_file *m, void *v)
sbi->ll_flags & LL_SBI_AGL_ENABLED ? 1 : 0);
}
-static ssize_t ll_statahead_agl_seq_write(struct file *file, const char *buffer,
- size_t count, loff_t *off)
+static ssize_t ll_statahead_agl_seq_write(struct file *file,
+ const char __user *buffer,
+ size_t count, loff_t *off)
{
struct super_block *sb = ((struct seq_file *)file->private_data)->private;
struct ll_sb_info *sbi = ll_s2sbi(sb);
@@ -672,8 +678,9 @@ static int ll_lazystatfs_seq_show(struct seq_file *m, void *v)
(sbi->ll_flags & LL_SBI_LAZYSTATFS) ? 1 : 0);
}
-static ssize_t ll_lazystatfs_seq_write(struct file *file, const char *buffer,
- size_t count, loff_t *off)
+static ssize_t ll_lazystatfs_seq_write(struct file *file,
+ const char __user *buffer,
+ size_t count, loff_t *off)
{
struct super_block *sb = ((struct seq_file *)file->private_data)->private;
struct ll_sb_info *sbi = ll_s2sbi(sb);
@@ -761,8 +768,8 @@ static int ll_sbi_flags_seq_show(struct seq_file *m, void *v)
while (flags != 0) {
if (ARRAY_SIZE(str) <= i) {
- CERROR("%s: Revise array LL_SBI_FLAGS to match sbi "
- "flags please.\n", ll_get_fsname(sb, NULL, 0));
+ CERROR("%s: Revise array LL_SBI_FLAGS to match sbi flags please.\n",
+ ll_get_fsname(sb, NULL, 0));
return -EINVAL;
}
@@ -787,7 +794,8 @@ static int ll_xattr_cache_seq_show(struct seq_file *m, void *v)
return rc;
}
-static ssize_t ll_xattr_cache_seq_write(struct file *file, const char *buffer,
+static ssize_t ll_xattr_cache_seq_write(struct file *file,
+ const char __user *buffer,
size_t count, loff_t *off)
{
struct seq_file *seq = file->private_data;
@@ -813,7 +821,7 @@ LPROC_SEQ_FOPS(ll_xattr_cache);
static struct lprocfs_vars lprocfs_llite_obd_vars[] = {
{ "uuid", &ll_sb_uuid_fops, NULL, 0 },
- //{ "mntpt_path", ll_rd_path, 0, 0 },
+ /* { "mntpt_path", ll_rd_path, 0, 0 }, */
{ "fstype", &ll_fstype_fops, NULL, 0 },
{ "site", &ll_site_stats_fops, NULL, 0 },
{ "blocksize", &ll_blksize_fops, NULL, 0 },
@@ -823,7 +831,7 @@ static struct lprocfs_vars lprocfs_llite_obd_vars[] = {
{ "filestotal", &ll_filestotal_fops, NULL, 0 },
{ "filesfree", &ll_filesfree_fops, NULL, 0 },
{ "client_type", &ll_client_type_fops, NULL, 0 },
- //{ "filegroups", lprocfs_rd_filegroups, 0, 0 },
+ /* { "filegroups", lprocfs_rd_filegroups, 0, 0 }, */
{ "max_read_ahead_mb", &ll_max_readahead_mb_fops, NULL },
{ "max_read_ahead_per_file_mb", &ll_max_readahead_per_file_mb_fops,
NULL },
@@ -1133,8 +1141,8 @@ static void ll_display_extents_info(struct ll_rw_extents_info *io_extents,
read_cum += r;
write_cum += w;
end = 1 << (i + LL_HIST_START - units);
- seq_printf(seq, "%4lu%c - %4lu%c%c: %14lu %4lu %4lu | "
- "%14lu %4lu %4lu\n", start, *unitp, end, *unitp,
+ seq_printf(seq, "%4lu%c - %4lu%c%c: %14lu %4lu %4lu | %14lu %4lu %4lu\n",
+ start, *unitp, end, *unitp,
(i == LL_HIST_MAX - 1) ? '+' : ' ',
r, pct(r, read_tot), pct(read_cum, read_tot),
w, pct(w, write_tot), pct(write_cum, write_tot));
@@ -1160,8 +1168,7 @@ static int ll_rw_extents_stats_pp_seq_show(struct seq_file *seq, void *v)
if (!sbi->ll_rw_stats_on) {
seq_printf(seq, "disabled\n"
- "write anything in this file to activate, "
- "then 0 or \"[D/d]isabled\" to deactivate\n");
+ "write anything in this file to activate, then 0 or \"[D/d]isabled\" to deactivate\n");
return 0;
}
seq_printf(seq, "snapshot_time: %lu.%lu (secs.usecs)\n",
@@ -1239,8 +1246,7 @@ static int ll_rw_extents_stats_seq_show(struct seq_file *seq, void *v)
if (!sbi->ll_rw_stats_on) {
seq_printf(seq, "disabled\n"
- "write anything in this file to activate, "
- "then 0 or \"[D/d]isabled\" to deactivate\n");
+ "write anything in this file to activate, then 0 or \"[D/d]isabled\" to deactivate\n");
return 0;
}
seq_printf(seq, "snapshot_time: %lu.%lu (secs.usecs)\n",
@@ -1418,8 +1424,7 @@ static int ll_rw_offset_stats_seq_show(struct seq_file *seq, void *v)
if (!sbi->ll_rw_stats_on) {
seq_printf(seq, "disabled\n"
- "write anything in this file to activate, "
- "then 0 or \"[D/d]isabled\" to deactivate\n");
+ "write anything in this file to activate, then 0 or \"[D/d]isabled\" to deactivate\n");
return 0;
}
spin_lock(&sbi->ll_process_lock);
diff --git a/drivers/staging/lustre/lustre/llite/namei.c b/drivers/staging/lustre/lustre/llite/namei.c
index 8e926b385a6..1bf891bd321 100644
--- a/drivers/staging/lustre/lustre/llite/namei.c
+++ b/drivers/staging/lustre/lustre/llite/namei.c
@@ -83,8 +83,8 @@ static int ll_set_inode(struct inode *inode, void *opaque)
lli->lli_fid = body->fid1;
if (unlikely(!(body->valid & OBD_MD_FLTYPE))) {
- CERROR("Can not initialize inode "DFID" without object type: "
- "valid = %#llx\n", PFID(&lli->lli_fid), body->valid);
+ CERROR("Can not initialize inode " DFID " without object type: valid = %#llx\n",
+ PFID(&lli->lli_fid), body->valid);
return -EINVAL;
}
@@ -598,8 +598,7 @@ static int ll_atomic_open(struct inode *dir, struct dentry *dentry,
long long lookup_flags = LOOKUP_OPEN;
int rc = 0;
- CDEBUG(D_VFSTRACE, "VFS Op:name=%pd,dir=%lu/%u(%p),file %p,"
- "open_flags %x,mode %x opened %d\n",
+ CDEBUG(D_VFSTRACE, "VFS Op:name=%pd,dir=%lu/%u(%p),file %p,open_flags %x,mode %x opened %d\n",
dentry, dir->i_ino,
dir->i_generation, dir, file, open_flags, mode, *opened);
@@ -843,8 +842,7 @@ static int ll_create_nd(struct inode *dir, struct dentry *dentry,
{
int rc;
- CDEBUG(D_VFSTRACE, "VFS Op:name=%pd,dir=%lu/%u(%p),"
- "flags=%u, excl=%d\n",
+ CDEBUG(D_VFSTRACE, "VFS Op:name=%pd,dir=%lu/%u(%p),flags=%u, excl=%d\n",
dentry, dir->i_ino,
dir->i_generation, dir, mode, want_excl);
diff --git a/drivers/staging/lustre/lustre/llite/remote_perm.c b/drivers/staging/lustre/lustre/llite/remote_perm.c
index c05a9126cfe..a58182600da 100644
--- a/drivers/staging/lustre/lustre/llite/remote_perm.c
+++ b/drivers/staging/lustre/lustre/llite/remote_perm.c
@@ -194,7 +194,7 @@ int ll_update_remote_perm(struct inode *inode, struct mdt_remote_perm *perm)
if (!lli->lli_remote_perms)
lli->lli_remote_perms = perm_hash;
- else if (perm_hash)
+ else
free_rmtperm_hash(perm_hash);
head = lli->lli_remote_perms + remote_perm_hashfunc(perm->rp_uid);
@@ -209,8 +209,7 @@ again:
continue;
if (tmp->lrp_fsgid != perm->rp_fsgid)
continue;
- if (lrp)
- free_ll_remote_perm(lrp);
+ free_ll_remote_perm(lrp);
lrp = tmp;
break;
}
diff --git a/drivers/staging/lustre/lustre/llite/rw.c b/drivers/staging/lustre/lustre/llite/rw.c
index 1f53b986338..10a0421366d 100644
--- a/drivers/staging/lustre/lustre/llite/rw.c
+++ b/drivers/staging/lustre/lustre/llite/rw.c
@@ -121,8 +121,8 @@ static struct ll_cl_context *ll_cl_init(struct file *file,
/* this is too bad. Someone is trying to write the
* page w/o holding inode mutex. This means we can
* add dirty pages into cache during truncate */
- CERROR("Proc %s is dirting page w/o inode lock, this"
- "will break truncate.\n", current->comm);
+ CERROR("Proc %s is dirtying page w/o inode lock, this will break truncate\n",
+ current->comm);
dump_stack();
LBUG();
return ERR_PTR(-EIO);
@@ -145,7 +145,7 @@ static struct ll_cl_context *ll_cl_init(struct file *file,
*/
io->ci_lockreq = CILR_NEVER;
- pos = (vmpage->index << PAGE_CACHE_SHIFT);
+ pos = vmpage->index << PAGE_CACHE_SHIFT;
/* Create a temp IO to serve write. */
result = cl_io_rw_init(env, io, CIT_WRITE, pos, PAGE_CACHE_SIZE);
@@ -606,8 +606,8 @@ stride_pg_count(pgoff_t st_off, unsigned long st_len, unsigned long st_pgs,
else
pg_count = start_left + st_pgs * (end - start - 1) + end_left;
- CDEBUG(D_READA, "st_off %lu, st_len %lu st_pgs %lu off %lu length %lu"
- "pgcount %lu\n", st_off, st_len, st_pgs, off, length, pg_count);
+ CDEBUG(D_READA, "st_off %lu, st_len %lu st_pgs %lu off %lu length %lu pgcount %lu\n",
+ st_off, st_len, st_pgs, off, length, pg_count);
return pg_count;
}
@@ -667,10 +667,10 @@ static int ll_read_ahead_pages(const struct lu_env *env,
/* FIXME: This assertion only is valid when it is for
* forward read-ahead, it will be fixed when backward
* read-ahead is implemented */
- LASSERTF(page_idx > ria->ria_stoff, "Invalid page_idx %lu"
- "rs %lu re %lu ro %lu rl %lu rp %lu\n", page_idx,
- ria->ria_start, ria->ria_end, ria->ria_stoff,
- ria->ria_length, ria->ria_pages);
+ LASSERTF(page_idx > ria->ria_stoff, "Invalid page_idx %lu rs %lu re %lu ro %lu rl %lu rp %lu\n",
+ page_idx,
+ ria->ria_start, ria->ria_end, ria->ria_stoff,
+ ria->ria_length, ria->ria_pages);
offset = page_idx - ria->ria_stoff;
offset = offset % (ria->ria_length);
if (offset > ria->ria_pages) {
@@ -927,8 +927,8 @@ static void ras_stride_increase_window(struct ll_readahead_state *ras,
LASSERT(ras->ras_stride_length > 0);
LASSERTF(ras->ras_window_start + ras->ras_window_len
- >= ras->ras_stride_offset, "window_start %lu, window_len %lu"
- " stride_offset %lu\n", ras->ras_window_start,
+ >= ras->ras_stride_offset, "window_start %lu, window_len %lu stride_offset %lu\n",
+ ras->ras_window_start,
ras->ras_window_len, ras->ras_stride_offset);
stride_len = ras->ras_window_start + ras->ras_window_len -
diff --git a/drivers/staging/lustre/lustre/llite/rw26.c b/drivers/staging/lustre/lustre/llite/rw26.c
index 4c77ae8b935..2f21304046a 100644
--- a/drivers/staging/lustre/lustre/llite/rw26.c
+++ b/drivers/staging/lustre/lustre/llite/rw26.c
@@ -183,7 +183,7 @@ static int ll_set_page_dirty(struct page *vmpage)
return __set_page_dirty_nobuffers(vmpage);
}
-#define MAX_DIRECTIO_SIZE 2*1024*1024*1024UL
+#define MAX_DIRECTIO_SIZE (2*1024*1024*1024UL)
static inline int ll_get_user_pages(int rw, unsigned long user_addr,
size_t size, struct page ***pages,
@@ -417,7 +417,7 @@ static ssize_t ll_direct_IO_26(int rw, struct kiocb *iocb,
result = iov_iter_get_pages_alloc(iter, &pages, count, &offs);
if (likely(result > 0)) {
- int n = (result + offs + PAGE_SIZE - 1) / PAGE_SIZE;
+ int n = DIV_ROUND_UP(result + offs, PAGE_SIZE);
result = ll_direct_IO_26_seg(env, io, rw, inode,
file->f_mapping,
result, file_offset,
@@ -535,7 +535,7 @@ const struct address_space_operations ll_aops = {
#else
const struct address_space_operations_ext ll_aops = {
.orig_aops.readpage = ll_readpage,
-// .orig_aops.readpages = ll_readpages,
+/* .orig_aops.readpages = ll_readpages, */
.orig_aops.direct_IO = ll_direct_IO_26,
.orig_aops.writepage = ll_writepage,
.orig_aops.writepages = ll_writepages,
diff --git a/drivers/staging/lustre/lustre/llite/statahead.c b/drivers/staging/lustre/lustre/llite/statahead.c
index 09d965e7684..6ad9dd0fe2b 100644
--- a/drivers/staging/lustre/lustre/llite/statahead.c
+++ b/drivers/staging/lustre/lustre/llite/statahead.c
@@ -334,8 +334,7 @@ static void ll_sa_entry_put(struct ll_statahead_info *sai,
LASSERT(ll_sa_entry_unhashed(entry));
ll_sa_entry_cleanup(sai, entry);
- if (entry->se_inode)
- iput(entry->se_inode);
+ iput(entry->se_inode);
OBD_FREE(entry, entry->se_size);
atomic_dec(&sai->sai_cache_count);
@@ -915,7 +914,7 @@ static int do_sa_revalidate(struct inode *dir, struct ll_sa_entry *entry,
return rc;
}
-static void ll_statahead_one(struct dentry *parent, const char* entry_name,
+static void ll_statahead_one(struct dentry *parent, const char *entry_name,
int entry_name_len)
{
struct inode *dir = parent->d_inode;
@@ -1491,10 +1490,7 @@ ll_sai_unplug(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
sai->sai_consecutive_miss++;
if (sa_low_hit(sai) && thread_is_running(thread)) {
atomic_inc(&sbi->ll_sa_wrong);
- CDEBUG(D_READA, "Statahead for dir "DFID" hit "
- "ratio too low: hit/miss %llu/%llu"
- ", sent/replied %llu/%llu, stopping "
- "statahead thread\n",
+ CDEBUG(D_READA, "Statahead for dir " DFID " hit ratio too low: hit/miss %llu/%llu, sent/replied %llu/%llu, stopping statahead thread\n",
PFID(&lli->lli_fid), sai->sai_hit,
sai->sai_miss, sai->sai_sent,
sai->sai_replied);
@@ -1612,8 +1608,7 @@ int do_statahead_enter(struct inode *dir, struct dentry **dentryp,
} else if ((*dentryp)->d_inode != inode) {
/* revalidate, but inode is recreated */
CDEBUG(D_READA,
- "stale dentry %pd inode %lu/%u, "
- "statahead inode %lu/%u\n",
+ "stale dentry %pd inode %lu/%u, statahead inode %lu/%u\n",
*dentryp,
(*dentryp)->d_inode->i_ino,
(*dentryp)->d_inode->i_generation,
@@ -1665,8 +1660,7 @@ int do_statahead_enter(struct inode *dir, struct dentry **dentryp,
if (unlikely(sai->sai_inode != parent->d_inode)) {
struct ll_inode_info *nlli = ll_i2info(parent->d_inode);
- CWARN("Race condition, someone changed %pd just now: "
- "old parent "DFID", new parent "DFID"\n",
+ CWARN("Race condition, someone changed %pd just now: old parent "DFID", new parent "DFID"\n",
*dentryp,
PFID(&lli->lli_fid), PFID(&nlli->lli_fid));
dput(parent);
diff --git a/drivers/staging/lustre/lustre/llite/super25.c b/drivers/staging/lustre/lustre/llite/super25.c
index e61dbed120a..6aff155651c 100644
--- a/drivers/staging/lustre/lustre/llite/super25.c
+++ b/drivers/staging/lustre/lustre/llite/super25.c
@@ -162,7 +162,7 @@ static int __init init_lustre_lite(void)
/* Nodes with small feet have little entropy
* the NID for this node gives the most entropy in the low bits */
- for (i=0; ; i++) {
+ for (i = 0; ; i++) {
if (LNetGetId(i, &lnet_id) == -ENOENT) {
break;
}
diff --git a/drivers/staging/lustre/lustre/llite/symlink.c b/drivers/staging/lustre/lustre/llite/symlink.c
index eccd3a717a4..686b6a574cc 100644
--- a/drivers/staging/lustre/lustre/llite/symlink.c
+++ b/drivers/staging/lustre/lustre/llite/symlink.c
@@ -100,8 +100,8 @@ static int ll_readlink_internal(struct inode *inode,
if (*symname == NULL ||
strnlen(*symname, symlen) != symlen - 1) {
/* not full/NULL terminated */
- CERROR("inode %lu: symlink not NULL terminated string"
- "of length %d\n", inode->i_ino, symlen - 1);
+ CERROR("inode %lu: symlink not NULL terminated string of length %d\n",
+ inode->i_ino, symlen - 1);
rc = -EPROTO;
goto failed;
}
diff --git a/drivers/staging/lustre/lustre/llite/vvp_io.c b/drivers/staging/lustre/lustre/llite/vvp_io.c
index e540a6d286f..930f6010203 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_io.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_io.c
@@ -709,7 +709,7 @@ static int vvp_io_fault_start(const struct lu_env *env,
}
- if (fio->ft_mkwrite ) {
+ if (fio->ft_mkwrite) {
pgoff_t last_index;
/*
* Capture the size while holding the lli_trunc_sem from above
@@ -720,9 +720,8 @@ static int vvp_io_fault_start(const struct lu_env *env,
last_index = cl_index(obj, size - 1);
if (last_index < fio->ft_index) {
CDEBUG(D_PAGE,
- "llite: mkwrite and truncate race happened: "
- "%p: 0x%lx 0x%lx\n",
- vmpage->mapping, fio->ft_index, last_index);
+ "llite: mkwrite and truncate race happened: %p: 0x%lx 0x%lx\n",
+ vmpage->mapping, fio->ft_index, last_index);
/*
* We need to return if we are
* passed the end of the file. This will propagate
diff --git a/drivers/staging/lustre/lustre/llite/vvp_page.c b/drivers/staging/lustre/lustre/llite/vvp_page.c
index 4626346f6ee..954ed08c6af 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_page.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_page.c
@@ -376,8 +376,7 @@ static int vvp_page_print(const struct lu_env *env,
struct ccc_page *vp = cl2ccc_page(slice);
struct page *vmpage = vp->cpg_page;
- (*printer)(env, cookie, LUSTRE_VVP_NAME"-page@%p(%d:%d:%d) "
- "vm@%p ",
+ (*printer)(env, cookie, LUSTRE_VVP_NAME "-page@%p(%d:%d:%d) vm@%p ",
vp, vp->cpg_defer_uptodate, vp->cpg_ra_used,
vp->cpg_write_queued, vmpage);
if (vmpage != NULL) {
diff --git a/drivers/staging/lustre/lustre/llite/xattr.c b/drivers/staging/lustre/lustre/llite/xattr.c
index 3151baf5585..b439936b452 100644
--- a/drivers/staging/lustre/lustre/llite/xattr.c
+++ b/drivers/staging/lustre/lustre/llite/xattr.c
@@ -201,8 +201,7 @@ int ll_setxattr_common(struct inode *inode, const char *name,
#endif
if (rc) {
if (rc == -EOPNOTSUPP && xattr_type == XATTR_USER_T) {
- LCONSOLE_INFO("Disabling user_xattr feature because "
- "it is not supported on the server\n");
+ LCONSOLE_INFO("Disabling user_xattr feature because it is not supported on the server\n");
sbi->ll_flags &= ~LL_SBI_USER_XATTR;
}
return rc;
@@ -234,6 +233,9 @@ int ll_setxattr(struct dentry *dentry, const char *name,
struct lov_user_md *lump = (struct lov_user_md *)value;
int rc = 0;
+ if (size != 0 && size < sizeof(struct lov_user_md))
+ return -EINVAL;
+
/* Attributes that are saved via getxattr will always have
* the stripe_offset as 0. Instead, the MDS should be
* allowed to pick the starting OST index. b=17846 */
diff --git a/drivers/staging/lustre/lustre/llite/xattr_cache.c b/drivers/staging/lustre/lustre/llite/xattr_cache.c
index 627cbe242f2..e2badf17d95 100644
--- a/drivers/staging/lustre/lustre/llite/xattr_cache.c
+++ b/drivers/staging/lustre/lustre/llite/xattr_cache.c
@@ -126,9 +126,7 @@ static int ll_xattr_cache_add(struct list_head *cache,
return -ENOMEM;
}
- xattr->xe_namelen = strlen(xattr_name) + 1;
-
- xattr->xe_name = kzalloc(xattr->xe_namelen, GFP_NOFS);
+ xattr->xe_name = kstrdup(xattr_name, GFP_NOFS);
if (!xattr->xe_name) {
CDEBUG(D_CACHE, "failed to alloc xattr name %u\n",
xattr->xe_namelen);
@@ -141,7 +139,6 @@ static int ll_xattr_cache_add(struct list_head *cache,
goto err_value;
}
- memcpy(xattr->xe_name, xattr_name, xattr->xe_namelen);
memcpy(xattr->xe_value, xattr_val, xattr_val_len);
xattr->xe_vallen = xattr_val_len;
list_add(&xattr->xe_list, cache);
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_fld.c b/drivers/staging/lustre/lustre/lmv/lmv_fld.c
index e8421f04bed..ee235926f52 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_fld.c
+++ b/drivers/staging/lustre/lustre/lmv/lmv_fld.c
@@ -75,8 +75,7 @@ int lmv_fld_lookup(struct lmv_obd *lmv,
*mds, PFID(fid));
if (*mds >= lmv->desc.ld_tgt_count) {
- CERROR("FLD lookup got invalid mds #%x (max: %x) "
- "for fid="DFID"\n", *mds, lmv->desc.ld_tgt_count,
+ CERROR("FLD lookup got invalid mds #%x (max: %x) for fid=" DFID "\n", *mds, lmv->desc.ld_tgt_count,
PFID(fid));
rc = -EINVAL;
}
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_intent.c b/drivers/staging/lustre/lustre/lmv/lmv_intent.c
index 5106124b7d9..d22d57b4ff3 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_intent.c
+++ b/drivers/staging/lustre/lustre/lmv/lmv_intent.c
@@ -186,8 +186,8 @@ int lmv_intent_open(struct obd_export *exp, struct md_op_data *op_data,
return rc;
}
- CDEBUG(D_INODE, "OPEN_INTENT with fid1="DFID", fid2="DFID","
- " name='%s' -> mds #%d\n", PFID(&op_data->op_fid1),
+ CDEBUG(D_INODE, "OPEN_INTENT with fid1=" DFID ", fid2=" DFID ", name='%s' -> mds #%d\n",
+ PFID(&op_data->op_fid1),
PFID(&op_data->op_fid2), op_data->op_name, tgt->ltd_idx);
rc = md_intent_lock(tgt->ltd_exp, op_data, lmm, lmmsize, it, flags,
@@ -226,8 +226,8 @@ int lmv_intent_open(struct obd_export *exp, struct md_op_data *op_data,
* this is normal situation, we should not print error here,
* only debug info.
*/
- CDEBUG(D_INODE, "Can't handle remote %s: dir "DFID"("DFID"):"
- "%*s: %d\n", LL_IT2STR(it), PFID(&op_data->op_fid2),
+ CDEBUG(D_INODE, "Can't handle remote %s: dir " DFID "(" DFID "):%*s: %d\n",
+ LL_IT2STR(it), PFID(&op_data->op_fid2),
PFID(&op_data->op_fid1), op_data->op_namelen,
op_data->op_name, rc);
return rc;
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_internal.h b/drivers/staging/lustre/lustre/lmv/lmv_internal.h
index b911e764387..852d78721ca 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_internal.h
+++ b/drivers/staging/lustre/lustre/lmv/lmv_internal.h
@@ -42,8 +42,8 @@
#define LMV_MAX_TGT_COUNT 128
-#define lmv_init_lock(lmv) mutex_lock(&lmv->init_mutex);
-#define lmv_init_unlock(lmv) mutex_unlock(&lmv->init_mutex);
+#define lmv_init_lock(lmv) mutex_lock(&lmv->init_mutex)
+#define lmv_init_unlock(lmv) mutex_unlock(&lmv->init_mutex)
#define LL_IT2STR(it) \
((it) ? ldlm_it2str((it)->it_op) : "0")
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_obd.c b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
index 1a5821289c3..9f3837412cd 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_obd.c
+++ b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
@@ -325,8 +325,8 @@ static int lmv_init_ea_size(struct obd_export *exp, int easize,
rc = md_init_ea_size(lmv->tgts[i]->ltd_exp, easize, def_easize,
cookiesize, def_cookiesize);
if (rc) {
- CERROR("%s: obd_init_ea_size() failed on MDT target %d:"
- " rc = %d.\n", obd->obd_name, i, rc);
+ CERROR("%s: obd_init_ea_size() failed on MDT target %d: rc = %d.\n",
+ obd->obd_name, i, rc);
break;
}
}
@@ -427,8 +427,7 @@ int lmv_connect_mdc(struct obd_device *obd, struct lmv_tgt_desc *tgt)
mdc_obd->obd_type->typ_name,
mdc_obd->obd_name);
if (mdc_symlink == NULL) {
- CERROR("Could not register LMV target "
- "/proc/fs/lustre/%s/%s/target_obds/%s.",
+ CERROR("Could not register LMV target /proc/fs/lustre/%s/%s/target_obds/%s.",
obd->obd_type->typ_name, obd->obd_name,
mdc_obd->obd_name);
lprocfs_remove(&lmv_proc_dir);
@@ -474,8 +473,8 @@ static int lmv_add_target(struct obd_device *obd, struct obd_uuid *uuidp,
if ((index < lmv->tgts_size) && (lmv->tgts[index] != NULL)) {
tgt = lmv->tgts[index];
- CERROR("%s: UUID %s already assigned at LOV target index %d:"
- " rc = %d\n", obd->obd_name,
+ CERROR("%s: UUID %s already assigned at LOV target index %d: rc = %d\n",
+ obd->obd_name,
obd_uuid2str(&tgt->ltd_uuid), index, -EEXIST);
lmv_init_unlock(lmv);
return -EEXIST;
@@ -600,8 +599,7 @@ int lmv_check_connect(struct obd_device *obd)
--lmv->desc.ld_active_tgt_count;
rc2 = obd_disconnect(tgt->ltd_exp);
if (rc2) {
- CERROR("LMV target %s disconnect on "
- "MDC idx %d: error %d\n",
+ CERROR("LMV target %s disconnect on MDC idx %d: error %d\n",
tgt->ltd_uuid.uuid, i, rc2);
}
}
@@ -865,10 +863,9 @@ static int lmv_hsm_ct_register(struct lmv_obd *lmv, unsigned int cmd, int len,
if (err) {
if (lmv->tgts[i]->ltd_active) {
/* permanent error */
- CERROR("error: iocontrol MDC %s on MDT"
- "idx %d cmd %x: err = %d\n",
- lmv->tgts[i]->ltd_uuid.uuid,
- i, cmd, err);
+ CERROR("error: iocontrol MDC %s on MDTidx %d cmd %x: err = %d\n",
+ lmv->tgts[i]->ltd_uuid.uuid,
+ i, cmd, err);
rc = err;
lk->lk_flags |= LK_FLG_STOP;
/* unregister from previous MDS */
@@ -925,7 +922,7 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
__u32 index;
memcpy(&index, data->ioc_inlbuf2, sizeof(__u32));
- if ((index >= count))
+ if (index >= count)
return -ENODEV;
if (lmv->tgts[index] == NULL ||
@@ -1147,10 +1144,9 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
return err;
} else if (err) {
if (lmv->tgts[i]->ltd_active) {
- CERROR("error: iocontrol MDC %s on MDT"
- "idx %d cmd %x: err = %d\n",
- lmv->tgts[i]->ltd_uuid.uuid,
- i, cmd, err);
+ CERROR("error: iocontrol MDC %s on MDTidx %d cmd %x: err = %d\n",
+ lmv->tgts[i]->ltd_uuid.uuid,
+ i, cmd, err);
if (!rc)
rc = err;
}
@@ -1234,8 +1230,8 @@ static int lmv_placement_policy(struct obd_device *obd,
if (lum->lum_type == LMV_STRIPE_TYPE &&
lum->lum_stripe_offset != -1) {
if (lum->lum_stripe_offset >= lmv->desc.ld_tgt_count) {
- CERROR("%s: Stripe_offset %d > MDT count %d:"
- " rc = %d\n", obd->obd_name,
+ CERROR("%s: Stripe_offset %d > MDT count %d: rc = %d\n",
+ obd->obd_name,
lum->lum_stripe_offset,
lmv->desc.ld_tgt_count, -ERANGE);
return -ERANGE;
@@ -1298,8 +1294,8 @@ int lmv_fid_alloc(struct obd_export *exp, struct lu_fid *fid,
rc = lmv_placement_policy(obd, op_data, &mds);
if (rc) {
- CERROR("Can't get target for allocating fid, "
- "rc %d\n", rc);
+ CERROR("Can't get target for allocating fid, rc %d\n",
+ rc);
return rc;
}
@@ -2310,7 +2306,6 @@ retry:
static int lmv_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
{
struct lmv_obd *lmv = &obd->u.lmv;
- int rc = 0;
switch (stage) {
case OBD_CLEANUP_EARLY:
@@ -2324,7 +2319,7 @@ static int lmv_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
default:
break;
}
- return rc;
+ return 0;
}
static int lmv_get_info(const struct lu_env *env, struct obd_export *exp,
diff --git a/drivers/staging/lustre/lustre/lov/lov_ea.c b/drivers/staging/lustre/lustre/lov/lov_ea.c
index 9e21e5efcdb..e9ec39c5a6c 100644
--- a/drivers/staging/lustre/lustre/lov/lov_ea.c
+++ b/drivers/staging/lustre/lustre/lov/lov_ea.c
@@ -348,9 +348,8 @@ const struct lsm_operations lsm_v3_ops = {
void dump_lsm(unsigned int level, const struct lov_stripe_md *lsm)
{
- CDEBUG(level, "lsm %p, objid "DOSTID", maxbytes %#llx, magic 0x%08X,"
- " stripe_size %u, stripe_count %u, refc: %d,"
- " layout_gen %u, pool ["LOV_POOLNAMEF"]\n", lsm,
+ CDEBUG(level, "lsm %p, objid " DOSTID ", maxbytes %#llx, magic 0x%08X, stripe_size %u, stripe_count %u, refc: %d, layout_gen %u, pool [" LOV_POOLNAMEF "]\n",
+ lsm,
POSTID(&lsm->lsm_oi), lsm->lsm_maxbytes, lsm->lsm_magic,
lsm->lsm_stripe_size, lsm->lsm_stripe_count,
atomic_read(&lsm->lsm_refc), lsm->lsm_layout_gen,
diff --git a/drivers/staging/lustre/lustre/lov/lov_obd.c b/drivers/staging/lustre/lustre/lov/lov_obd.c
index 94dfd64bd28..ea503d2a19f 100644
--- a/drivers/staging/lustre/lustre/lov/lov_obd.c
+++ b/drivers/staging/lustre/lustre/lov/lov_obd.c
@@ -168,8 +168,8 @@ int lov_connect_obd(struct obd_device *obd, __u32 index, int activate,
if (imp->imp_invalid) {
- CDEBUG(D_CONFIG, "not connecting OSC %s; administratively "
- "disabled\n", obd_uuid2str(tgt_uuid));
+ CDEBUG(D_CONFIG, "not connecting OSC %s; administratively disabled\n",
+ obd_uuid2str(tgt_uuid));
return 0;
}
@@ -201,10 +201,9 @@ int lov_connect_obd(struct obd_device *obd, __u32 index, int activate,
osc_obd->obd_type->typ_name,
osc_obd->obd_name);
if (osc_symlink == NULL) {
- CERROR("could not register LOV target "
- "/proc/fs/lustre/%s/%s/target_obds/%s.",
- obd->obd_type->typ_name, obd->obd_name,
- osc_obd->obd_name);
+ CERROR("could not register LOV target /proc/fs/lustre/%s/%s/target_obds/%s.",
+ obd->obd_type->typ_name, obd->obd_name,
+ osc_obd->obd_name);
lprocfs_remove(&lov_proc_dir);
obd->obd_proc_private = NULL;
}
@@ -726,8 +725,7 @@ void lov_fix_desc_stripe_size(__u64 *val)
{
if (*val < LOV_MIN_STRIPE_SIZE) {
if (*val != 0)
- LCONSOLE_INFO("Increasing default stripe size to "
- "minimum %u\n",
+ LCONSOLE_INFO("Increasing default stripe size to minimum %u\n",
LOV_DESC_STRIPE_SIZE_DEFAULT);
*val = LOV_DESC_STRIPE_SIZE_DEFAULT;
} else if (*val & (LOV_MIN_STRIPE_SIZE - 1)) {
@@ -847,7 +845,6 @@ out:
static int lov_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
{
- int rc = 0;
struct lov_obd *lov = &obd->u.lov;
switch (stage) {
@@ -865,7 +862,7 @@ static int lov_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
break;
}
- return rc;
+ return 0;
}
static int lov_cleanup(struct obd_device *obd)
@@ -900,8 +897,7 @@ static int lov_cleanup(struct obd_device *obd)
/* We should never get here - these
should have been removed in the
disconnect. */
- CERROR("lov tgt %d not cleaned!"
- " deathrow=%d, lovrc=%d\n",
+ CERROR("lov tgt %d not cleaned! deathrow=%d, lovrc=%d\n",
i, lov->lov_death_row,
atomic_read(&lov->lov_refcount));
lov_del_target(obd, i, NULL, 0);
@@ -1176,8 +1172,8 @@ static int lov_getattr_async(struct obd_export *exp, struct obd_info *oinfo,
list_for_each(pos, &lovset->set_list) {
req = list_entry(pos, struct lov_request, rq_link);
- CDEBUG(D_INFO, "objid "DOSTID"[%d] has subobj "DOSTID" at idx"
- "%u\n", POSTID(&oinfo->oi_oa->o_oi), req->rq_stripe,
+ CDEBUG(D_INFO, "objid " DOSTID "[%d] has subobj " DOSTID " at idx%u\n",
+ POSTID(&oinfo->oi_oa->o_oi), req->rq_stripe,
POSTID(&req->rq_oi.oi_oa->o_oi), req->rq_idx);
rc = obd_getattr_async(lov->lov_tgts[req->rq_idx]->ltd_exp,
&req->rq_oi, rqset);
@@ -1256,8 +1252,8 @@ static int lov_setattr_async(struct obd_export *exp, struct obd_info *oinfo,
if (oinfo->oi_oa->o_valid & OBD_MD_FLCOOKIE)
oti->oti_logcookies = set->set_cookies + req->rq_stripe;
- CDEBUG(D_INFO, "objid "DOSTID"[%d] has subobj "DOSTID" at idx"
- "%u\n", POSTID(&oinfo->oi_oa->o_oi), req->rq_stripe,
+ CDEBUG(D_INFO, "objid " DOSTID "[%d] has subobj " DOSTID " at idx%u\n",
+ POSTID(&oinfo->oi_oa->o_oi), req->rq_stripe,
POSTID(&req->rq_oi.oi_oa->o_oi), req->rq_idx);
rc = obd_setattr_async(lov->lov_tgts[req->rq_idx]->ltd_exp,
@@ -1568,8 +1564,7 @@ static int lov_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
if (lov->lov_tgts[i]->ltd_active) {
CDEBUG(err == -ENOTTY ?
D_IOCTL : D_WARNING,
- "iocontrol OSC %s on OST "
- "idx %d cmd %x: err = %d\n",
+ "iocontrol OSC %s on OST idx %d cmd %x: err = %d\n",
lov_uuid2str(lov, i),
i, cmd, err);
if (!rc)
@@ -2266,8 +2261,8 @@ static int lov_quotacheck(struct obd_device *obd, struct obd_export *exp,
/* Skip quota check on the administratively disabled OSTs. */
if (!lov->lov_tgts[i]->ltd_activate) {
- CWARN("lov idx %d was administratively disabled, "
- "skip quotacheck on it.\n", i);
+ CWARN("lov idx %d was administratively disabled, skip quotacheck on it.\n",
+ i);
continue;
}
diff --git a/drivers/staging/lustre/lustre/lov/lov_pack.c b/drivers/staging/lustre/lustre/lov/lov_pack.c
index 5edd6a3a9c5..5356d532417 100644
--- a/drivers/staging/lustre/lustre/lov/lov_pack.c
+++ b/drivers/staging/lustre/lustre/lov/lov_pack.c
@@ -438,8 +438,7 @@ int lov_getstripe(struct obd_export *exp, struct lov_stripe_md *lsm,
if (copy_from_user(&lum, lump, lum_size)) {
rc = -EFAULT;
goto out_set;
- }
- else if ((lum.lmm_magic != LOV_USER_MAGIC) &&
+ } else if ((lum.lmm_magic != LOV_USER_MAGIC) &&
(lum.lmm_magic != LOV_USER_MAGIC_V3)) {
rc = -EINVAL;
goto out_set;
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_lib.c b/drivers/staging/lustre/lustre/mdc/mdc_lib.c
index e8732cc30ce..4e59995e004 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_lib.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_lib.c
@@ -194,10 +194,8 @@ static __u64 mds_pack_open_flags(__u64 flags, __u32 mode)
cr_flags |= MDS_OPEN_SYNC;
if (flags & O_DIRECTORY)
cr_flags |= MDS_OPEN_DIRECTORY;
-#ifdef FMODE_EXEC
- if (flags & FMODE_EXEC)
+ if (flags & __FMODE_EXEC)
cr_flags |= MDS_FMODE_EXEC;
-#endif
if (cl_is_lov_delay_create(flags))
cr_flags |= MDS_OPEN_DELAY_CREATE;
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_locks.c b/drivers/staging/lustre/lustre/mdc/mdc_locks.c
index b58147ee62b..8c9b4f5494e 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_locks.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_locks.c
@@ -296,10 +296,8 @@ static struct ptlrpc_request *mdc_intent_open_pack(struct obd_export *exp,
} else {
if (it->it_flags & (FMODE_WRITE|MDS_OPEN_TRUNC))
mode = LCK_CW;
-#ifdef FMODE_EXEC
- else if (it->it_flags & FMODE_EXEC)
+ else if (it->it_flags & __FMODE_EXEC)
mode = LCK_PR;
-#endif
else
mode = LCK_CR;
}
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_request.c b/drivers/staging/lustre/lustre/mdc/mdc_request.c
index 14e1ba1675f..3b0f245a878 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_request.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_request.c
@@ -60,7 +60,7 @@ struct mdc_renew_capa_args {
static int mdc_cleanup(struct obd_device *obd);
-int mdc_unpack_capa(struct obd_export *exp, struct ptlrpc_request *req,
+static int mdc_unpack_capa(struct obd_export *exp, struct ptlrpc_request *req,
const struct req_msg_field *field, struct obd_capa **oc)
{
struct lustre_capa *capa;
@@ -147,7 +147,7 @@ out:
}
/* This should be mdc_get_info("rootfid") */
-int mdc_getstatus(struct obd_export *exp, struct lu_fid *rootfid,
+static int mdc_getstatus(struct obd_export *exp, struct lu_fid *rootfid,
struct obd_capa **pc)
{
return send_getstatus(class_exp2cliimp(exp), rootfid, pc,
@@ -214,7 +214,7 @@ static int mdc_getattr_common(struct obd_export *exp,
return 0;
}
-int mdc_getattr(struct obd_export *exp, struct md_op_data *op_data,
+static int mdc_getattr(struct obd_export *exp, struct md_op_data *op_data,
struct ptlrpc_request **request)
{
struct ptlrpc_request *req;
@@ -258,7 +258,7 @@ int mdc_getattr(struct obd_export *exp, struct md_op_data *op_data,
return rc;
}
-int mdc_getattr_name(struct obd_export *exp, struct md_op_data *op_data,
+static int mdc_getattr_name(struct obd_export *exp, struct md_op_data *op_data,
struct ptlrpc_request **request)
{
struct ptlrpc_request *req;
@@ -441,7 +441,7 @@ static int mdc_xattr_common(struct obd_export *exp,
return rc;
}
-int mdc_setxattr(struct obd_export *exp, const struct lu_fid *fid,
+static int mdc_setxattr(struct obd_export *exp, const struct lu_fid *fid,
struct obd_capa *oc, u64 valid, const char *xattr_name,
const char *input, int input_size, int output_size,
int flags, __u32 suppgid, struct ptlrpc_request **request)
@@ -452,7 +452,7 @@ int mdc_setxattr(struct obd_export *exp, const struct lu_fid *fid,
suppgid, request);
}
-int mdc_getxattr(struct obd_export *exp, const struct lu_fid *fid,
+static int mdc_getxattr(struct obd_export *exp, const struct lu_fid *fid,
struct obd_capa *oc, u64 valid, const char *xattr_name,
const char *input, int input_size, int output_size,
int flags, struct ptlrpc_request **request)
diff --git a/drivers/staging/lustre/lustre/mgc/mgc_request.c b/drivers/staging/lustre/lustre/mgc/mgc_request.c
index bc263adf09d..60d2b0f1269 100644
--- a/drivers/staging/lustre/lustre/mgc/mgc_request.c
+++ b/drivers/staging/lustre/lustre/mgc/mgc_request.c
@@ -510,8 +510,6 @@ static void do_requeue(struct config_llog_data *cld)
static int mgc_requeue_thread(void *data)
{
- int rc = 0;
-
CDEBUG(D_MGC, "Starting requeue thread\n");
/* Keep trying failed locks periodically */
@@ -592,7 +590,7 @@ static int mgc_requeue_thread(void *data)
complete(&rq_exit);
CDEBUG(D_MGC, "Ending requeue thread\n");
- return rc;
+ return 0;
}
/* Add a cld to the list to requeue. Start the requeue thread if needed.
@@ -736,8 +734,7 @@ static int mgc_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
rc = PTR_ERR(kthread_run(mgc_requeue_thread, NULL,
"ll_cfg_requeue"));
if (IS_ERR_VALUE(rc)) {
- CERROR("%s: Cannot start requeue thread (%d),"
- "no more log updates!\n",
+ CERROR("%s: Cannot start requeue thread (%d),no more log updates!\n",
obd->obd_name, rc);
goto err_cleanup;
}
@@ -1021,8 +1018,7 @@ int mgc_set_info_async(const struct lu_env *env, struct obd_export *exp,
sptlrpc_flavor2name(&cli->cl_flvr_mgc,
str, sizeof(str));
- LCONSOLE_ERROR("asking sptlrpc flavor %s to MGS but "
- "currently %s is in use\n",
+ LCONSOLE_ERROR("asking sptlrpc flavor %s to MGS but currently %s is in use\n",
(char *) val, str);
rc = -EPERM;
}
@@ -1055,8 +1051,6 @@ static int mgc_import_event(struct obd_device *obd,
struct obd_import *imp,
enum obd_import_event event)
{
- int rc = 0;
-
LASSERT(imp->imp_obd == obd);
CDEBUG(D_MGC, "import event %#x\n", event);
@@ -1090,7 +1084,7 @@ static int mgc_import_event(struct obd_device *obd,
CERROR("Unknown import event %#x\n", event);
LBUG();
}
- return rc;
+ return 0;
}
enum {
diff --git a/drivers/staging/lustre/lustre/obdclass/acl.c b/drivers/staging/lustre/lustre/obdclass/acl.c
index 2619bfeceb8..9a69f6b35a0 100644
--- a/drivers/staging/lustre/lustre/obdclass/acl.c
+++ b/drivers/staging/lustre/lustre/obdclass/acl.c
@@ -171,17 +171,17 @@ EXPORT_SYMBOL(lustre_posix_acl_xattr_2ext);
/*
* Filter out the "nobody" entries in the posix ACL.
*/
-int lustre_posix_acl_xattr_filter(posix_acl_xattr_header *header, int size,
+int lustre_posix_acl_xattr_filter(posix_acl_xattr_header *header, size_t size,
posix_acl_xattr_header **out)
{
int count, i, j, rc = 0;
__u32 id;
posix_acl_xattr_header *new;
- if (unlikely(size < 0))
- return -EINVAL;
- else if (!size)
+ if (!size)
return 0;
+ if (size < sizeof(*new))
+ return -EINVAL;
OBD_ALLOC(new, size);
if (unlikely(new == NULL))
diff --git a/drivers/staging/lustre/lustre/obdclass/capa.c b/drivers/staging/lustre/lustre/obdclass/capa.c
index cd1abce378e..d206b1046a1 100644
--- a/drivers/staging/lustre/lustre/obdclass/capa.c
+++ b/drivers/staging/lustre/lustre/obdclass/capa.c
@@ -272,8 +272,7 @@ int capa_hmac(__u8 *hmac, struct lustre_capa *capa, __u8 *key)
tfm = crypto_alloc_hash(alg->ha_name, 0, 0);
if (IS_ERR(tfm)) {
- CERROR("crypto_alloc_tfm failed, check whether your kernel"
- "has crypto support!\n");
+ CERROR("crypto_alloc_tfm failed, check whether your kernel has crypto support!\n");
return PTR_ERR(tfm);
}
keylen = alg->ha_keylen;
@@ -302,7 +301,7 @@ int capa_encrypt_id(__u32 *d, __u32 *s, __u8 *key, int keylen)
/* passing "aes" in a variable instead of a constant string keeps gcc
* 4.3.2 happy */
- tfm = crypto_alloc_blkcipher(alg, 0, 0 );
+ tfm = crypto_alloc_blkcipher(alg, 0, 0);
if (IS_ERR(tfm)) {
CERROR("failed to load transform for aes\n");
return PTR_ERR(tfm);
@@ -355,7 +354,7 @@ int capa_decrypt_id(__u32 *d, __u32 *s, __u8 *key, int keylen)
/* passing "aes" in a variable instead of a constant string keeps gcc
* 4.3.2 happy */
- tfm = crypto_alloc_blkcipher(alg, 0, 0 );
+ tfm = crypto_alloc_blkcipher(alg, 0, 0);
if (IS_ERR(tfm)) {
CERROR("failed to load transform for aes\n");
return PTR_ERR(tfm);
@@ -407,14 +406,13 @@ EXPORT_SYMBOL(capa_cpy);
void _debug_capa(struct lustre_capa *c,
struct libcfs_debug_msg_data *msgdata,
- const char *fmt, ... )
+ const char *fmt, ...)
{
va_list args;
va_start(args, fmt);
libcfs_debug_vmsg2(msgdata, fmt, args,
- " capability@%p fid "DFID" opc %#llx uid %llu"
- " gid %llu flags %u alg %d keyid %u timeout %u "
- "expiry %u\n", c, PFID(capa_fid(c)), capa_opc(c),
+ " capability@%p fid " DFID " opc %#llx uid %llu gid %llu flags %u alg %d keyid %u timeout %u expiry %u\n",
+ c, PFID(capa_fid(c)), capa_opc(c),
capa_uid(c), capa_gid(c), capa_flags(c),
capa_alg(c), capa_keyid(c), capa_timeout(c),
capa_expiry(c));
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_io.c b/drivers/staging/lustre/lustre/obdclass/cl_io.c
index f2383a497cb..3141b604370 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_io.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_io.c
@@ -1622,8 +1622,7 @@ int cl_sync_io_wait(const struct lu_env *env, struct cl_io *io,
atomic_read(&anchor->csi_sync_nr) == 0,
&lwi);
if (rc < 0) {
- CERROR("SYNC IO failed with error: %d, try to cancel "
- "%d remaining pages\n",
+ CERROR("SYNC IO failed with error: %d, try to cancel %d remaining pages\n",
rc, atomic_read(&anchor->csi_sync_nr));
(void)cl_io_cancel(env, io, queue);
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_lock.c b/drivers/staging/lustre/lustre/obdclass/cl_lock.c
index b204531ef71..b081167f976 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_lock.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_lock.c
@@ -129,8 +129,7 @@ static void cl_lock_trace0(int level, const struct lu_env *env,
const char *func, const int line)
{
struct cl_object_header *h = cl_object_header(lock->cll_descr.cld_obj);
- CDEBUG(level, "%s: %p@(%d %p %d %d %d %d %d %lx)"
- "(%p/%d/%d) at %s():%d\n",
+ CDEBUG(level, "%s: %p@(%d %p %d %d %d %d %d %lx)(%p/%d/%d) at %s():%d\n",
prefix, lock, atomic_read(&lock->cll_ref),
lock->cll_guarder, lock->cll_depth,
lock->cll_state, lock->cll_error, lock->cll_holds,
diff --git a/drivers/staging/lustre/lustre/obdclass/class_obd.c b/drivers/staging/lustre/lustre/obdclass/class_obd.c
index 7265ecbc6f9..89a3fb2e56b 100644
--- a/drivers/staging/lustre/lustre/obdclass/class_obd.c
+++ b/drivers/staging/lustre/lustre/obdclass/class_obd.c
@@ -144,13 +144,11 @@ int obd_alloc_fail(const void *ptr, const char *name, const char *type,
CERROR("%s%salloc of %s (%llu bytes) failed at %s:%d\n",
ptr ? "force " :"", type, name, (__u64)size, file,
line);
- CERROR("%llu total bytes and %llu total pages "
- "(%llu bytes) allocated by Lustre, "
- "%d total bytes by LNET\n",
+ CERROR("%llu total bytes and %llu total pages (%llu bytes) allocated by Lustre, %d total bytes by LNET\n",
obd_memory_sum(),
obd_pages_sum() << PAGE_CACHE_SHIFT,
obd_pages_sum(),
- atomic_read(&libcfs_kmemory));
+ atomic_read(&libcfs_kmemory));
return 1;
}
return 0;
diff --git a/drivers/staging/lustre/lustre/obdclass/debug.c b/drivers/staging/lustre/lustre/obdclass/debug.c
index d0f8f875ddd..9c934e6d2ea 100644
--- a/drivers/staging/lustre/lustre/obdclass/debug.c
+++ b/drivers/staging/lustre/lustre/obdclass/debug.c
@@ -40,6 +40,7 @@
#define DEBUG_SUBSYSTEM D_OTHER
+#include <linux/unaligned/access_ok.h>
#include "../include/obd_support.h"
#include "../include/lustre_debug.h"
@@ -60,14 +61,11 @@ int block_debug_setup(void *addr, int len, __u64 off, __u64 id)
{
LASSERT(addr);
- off = cpu_to_le64 (off);
- id = cpu_to_le64 (id);
- memcpy(addr, (char *)&off, LPDS);
- memcpy(addr + LPDS, (char *)&id, LPDS);
-
+ put_unaligned_le64(off, addr);
+ put_unaligned_le64(id, addr+LPDS);
addr += len - LPDS - LPDS;
- memcpy(addr, (char *)&off, LPDS);
- memcpy(addr + LPDS, (char *)&id, LPDS);
+ put_unaligned_le64(off, addr);
+ put_unaligned_le64(id, addr+LPDS);
return 0;
}
diff --git a/drivers/staging/lustre/lustre/obdclass/dt_object.c b/drivers/staging/lustre/lustre/obdclass/dt_object.c
index 52256c26bf0..e7be26ec752 100644
--- a/drivers/staging/lustre/lustre/obdclass/dt_object.c
+++ b/drivers/staging/lustre/lustre/obdclass/dt_object.c
@@ -332,8 +332,7 @@ static struct dt_object *dt_reg_open(const struct lu_env *env,
result = dt_lookup_dir(env, p, name, fid);
if (result == 0){
o = dt_locate(env, dt, fid);
- }
- else
+ } else
o = ERR_PTR(result);
return o;
@@ -950,8 +949,8 @@ int lprocfs_dt_rd_blksize(char *page, char **start, off_t off,
{
struct dt_device *dt = data;
struct obd_statfs osfs;
-
int rc = dt_statfs(NULL, dt, &osfs);
+
if (rc == 0) {
*eof = 1;
rc = snprintf(page, count, "%u\n",
@@ -967,8 +966,8 @@ int lprocfs_dt_rd_kbytestotal(char *page, char **start, off_t off,
{
struct dt_device *dt = data;
struct obd_statfs osfs;
-
int rc = dt_statfs(NULL, dt, &osfs);
+
if (rc == 0) {
__u32 blk_size = osfs.os_bsize >> 10;
__u64 result = osfs.os_blocks;
@@ -989,8 +988,8 @@ int lprocfs_dt_rd_kbytesfree(char *page, char **start, off_t off,
{
struct dt_device *dt = data;
struct obd_statfs osfs;
-
int rc = dt_statfs(NULL, dt, &osfs);
+
if (rc == 0) {
__u32 blk_size = osfs.os_bsize >> 10;
__u64 result = osfs.os_bfree;
@@ -1011,8 +1010,8 @@ int lprocfs_dt_rd_kbytesavail(char *page, char **start, off_t off,
{
struct dt_device *dt = data;
struct obd_statfs osfs;
-
int rc = dt_statfs(NULL, dt, &osfs);
+
if (rc == 0) {
__u32 blk_size = osfs.os_bsize >> 10;
__u64 result = osfs.os_bavail;
@@ -1033,8 +1032,8 @@ int lprocfs_dt_rd_filestotal(char *page, char **start, off_t off,
{
struct dt_device *dt = data;
struct obd_statfs osfs;
-
int rc = dt_statfs(NULL, dt, &osfs);
+
if (rc == 0) {
*eof = 1;
rc = snprintf(page, count, "%llu\n", osfs.os_files);
@@ -1049,8 +1048,8 @@ int lprocfs_dt_rd_filesfree(char *page, char **start, off_t off,
{
struct dt_device *dt = data;
struct obd_statfs osfs;
-
int rc = dt_statfs(NULL, dt, &osfs);
+
if (rc == 0) {
*eof = 1;
rc = snprintf(page, count, "%llu\n", osfs.os_ffree);
diff --git a/drivers/staging/lustre/lustre/obdclass/genops.c b/drivers/staging/lustre/lustre/obdclass/genops.c
index c314e9c2343..736ca410aca 100644
--- a/drivers/staging/lustre/lustre/obdclass/genops.c
+++ b/drivers/staging/lustre/lustre/obdclass/genops.c
@@ -317,7 +317,7 @@ struct obd_device *class_newdev(const char *type_name, const char *name)
result->obd_minor, new_obd_minor);
obd_devs[result->obd_minor] = NULL;
- result->obd_name[0]='\0';
+ result->obd_name[0] = '\0';
}
result = ERR_PTR(-EEXIST);
break;
@@ -524,8 +524,8 @@ void class_obd_list(void)
/* Search for a client OBD connected to tgt_uuid. If grp_uuid is
specified, then only the client with that uuid is returned,
otherwise any client connected to the tgt is returned. */
-struct obd_device * class_find_client_obd(struct obd_uuid *tgt_uuid,
- const char * typ_name,
+struct obd_device *class_find_client_obd(struct obd_uuid *tgt_uuid,
+ const char *typ_name,
struct obd_uuid *grp_uuid)
{
int i;
@@ -557,7 +557,7 @@ EXPORT_SYMBOL(class_find_client_obd);
searching at *next, and if a device is found, the next index to look
at is saved in *next. If next is NULL, then the first matching device
will always be returned. */
-struct obd_device * class_devices_in_group(struct obd_uuid *grp_uuid, int *next)
+struct obd_device *class_devices_in_group(struct obd_uuid *grp_uuid, int *next)
{
int i;
@@ -1087,8 +1087,7 @@ void __class_export_del_lock_ref(struct obd_export *exp, struct ldlm_lock *lock)
spin_lock(&exp->exp_locks_list_guard);
LASSERT(lock->l_exp_refs_nr > 0);
if (lock->l_exp_refs_target != exp) {
- LCONSOLE_WARN("lock %p, "
- "mismatching export pointers: %p, %p\n",
+ LCONSOLE_WARN("lock %p, mismatching export pointers: %p, %p\n",
lock, lock->l_exp_refs_target, exp);
}
if (-- lock->l_exp_refs_nr == 0) {
@@ -1259,8 +1258,7 @@ static void class_disconnect_export_list(struct list_head *list,
}
class_export_get(exp);
- CDEBUG(D_HA, "%s: disconnecting export at %s (%p), "
- "last request at "CFS_TIME_T"\n",
+ CDEBUG(D_HA, "%s: disconnecting export at %s (%p), last request at " CFS_TIME_T "\n",
exp->exp_obd->obd_name, obd_export_nid2str(exp),
exp, exp->exp_last_request_time);
/* release one export reference anyway */
@@ -1284,8 +1282,8 @@ void class_disconnect_exports(struct obd_device *obd)
spin_unlock(&obd->obd_dev_lock);
if (!list_empty(&work_list)) {
- CDEBUG(D_HA, "OBD device %d (%p) has exports, "
- "disconnecting them\n", obd->obd_minor, obd);
+ CDEBUG(D_HA, "OBD device %d (%p) has exports, disconnecting them\n",
+ obd->obd_minor, obd);
class_disconnect_export_list(&work_list,
exp_flags_from_obd(obd));
} else
@@ -1422,8 +1420,8 @@ int obd_export_evict_by_nid(struct obd_device *obd, const char *nid)
LASSERTF(doomed_exp != obd->obd_self_export,
"self-export is hashed by NID?\n");
exports_evicted++;
- LCONSOLE_WARN("%s: evicting %s (at %s) by administrative "
- "request\n", obd->obd_name,
+ LCONSOLE_WARN("%s: evicting %s (at %s) by administrative request\n",
+ obd->obd_name,
obd_uuid2str(&doomed_exp->exp_client_uuid),
obd_export_nid2str(doomed_exp));
class_fail_export(doomed_exp);
@@ -1546,9 +1544,7 @@ void obd_exports_barrier(struct obd_device *obd)
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(cfs_time_seconds(waited));
if (waited > 5 && IS_PO2(waited)) {
- LCONSOLE_WARN("%s is waiting for obd_unlinked_exports "
- "more than %d seconds. "
- "The obd refcount = %d. Is it stuck?\n",
+ LCONSOLE_WARN("%s is waiting for obd_unlinked_exports more than %d seconds. The obd refcount = %d. Is it stuck?\n",
obd->obd_name, waited,
atomic_read(&obd->obd_refcount));
dump_exports(obd, 1);
@@ -1783,7 +1779,7 @@ EXPORT_SYMBOL(kuc_len);
* @param p Pointer to payload area
* @returns Pointer to kuc header
*/
-struct kuc_hdr * kuc_ptr(void *p)
+struct kuc_hdr *kuc_ptr(void *p)
{
struct kuc_hdr *lh = ((struct kuc_hdr *)p) - 1;
LASSERT(lh->kuc_magic == KUC_MAGIC);
diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c
index 7eaaaa648df..66ceab20c74 100644
--- a/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c
+++ b/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c
@@ -166,14 +166,14 @@ int obd_ioctl_popdata(void *arg, void *data, int len)
EXPORT_SYMBOL(obd_ioctl_popdata);
/* opening /dev/obd */
-static int obd_class_open(struct inode * inode, struct file * file)
+static int obd_class_open(struct inode *inode, struct file *file)
{
try_module_get(THIS_MODULE);
return 0;
}
/* closing /dev/obd */
-static int obd_class_release(struct inode * inode, struct file * file)
+static int obd_class_release(struct inode *inode, struct file *file)
{
module_put(THIS_MODULE);
return 0;
diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c
index 38a9b319355..dd46e735816 100644
--- a/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c
+++ b/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c
@@ -202,9 +202,8 @@ static int proc_max_dirty_pages_in_mb(struct ctl_table *table, int write,
/* Don't allow them to let dirty pages exceed 90% of system
* memory and set a hard minimum of 4MB. */
if (obd_max_dirty_pages > ((totalram_pages / 10) * 9)) {
- CERROR("Refusing to set max dirty pages to %u, which "
- "is more than 90%% of available RAM; setting "
- "to %lu\n", obd_max_dirty_pages,
+ CERROR("Refusing to set max dirty pages to %u, which is more than 90%% of available RAM; setting to %lu\n",
+ obd_max_dirty_pages,
((totalram_pages / 10) * 9));
obd_max_dirty_pages = ((totalram_pages / 10) * 9);
} else if (obd_max_dirty_pages < 4 << (20 - PAGE_CACHE_SHIFT)) {
diff --git a/drivers/staging/lustre/lustre/obdclass/llog.c b/drivers/staging/lustre/lustre/obdclass/llog.c
index 3ab05292152..114be4a78cc 100644
--- a/drivers/staging/lustre/lustre/obdclass/llog.c
+++ b/drivers/staging/lustre/lustre/obdclass/llog.c
@@ -346,8 +346,8 @@ repeat:
}
if (rec->lrh_len == 0 ||
rec->lrh_len > LLOG_CHUNK_SIZE) {
- CWARN("invalid length %d in llog record for "
- "index %d/%d\n", rec->lrh_len,
+ CWARN("invalid length %d in llog record for index %d/%d\n",
+ rec->lrh_len,
rec->lrh_index, index);
rc = -EINVAL;
goto out;
diff --git a/drivers/staging/lustre/lustre/obdclass/llog_cat.c b/drivers/staging/lustre/lustre/obdclass/llog_cat.c
index 6e139cf372c..4b850fc5f5d 100644
--- a/drivers/staging/lustre/lustre/obdclass/llog_cat.c
+++ b/drivers/staging/lustre/lustre/obdclass/llog_cat.c
@@ -228,8 +228,7 @@ int llog_cat_close(const struct lu_env *env, struct llog_handle *cathandle)
(llh->llh_count == 1)) {
rc = llog_destroy(env, loghandle);
if (rc)
- CERROR("%s: failure destroying log during "
- "cleanup: rc = %d\n",
+ CERROR("%s: failure destroying log during cleanup: rc = %d\n",
loghandle->lgh_ctxt->loc_obd->obd_name,
rc);
@@ -746,8 +745,7 @@ int llog_cat_cleanup(const struct lu_env *env, struct llog_handle *cathandle,
llog_cat_set_first_idx(cathandle, index);
rc = llog_cancel_rec(env, cathandle, index);
if (rc == 0)
- CDEBUG(D_HA, "cancel plain log at index"
- " %u of catalog "DOSTID"\n",
+ CDEBUG(D_HA, "cancel plain log at index %u of catalog " DOSTID "\n",
index, POSTID(&cathandle->lgh_id.lgl_oi));
return rc;
}
@@ -810,8 +808,8 @@ int llog_cat_init_and_process(const struct lu_env *env,
rc = llog_process_or_fork(env, llh, cat_cancel_cb, NULL, NULL, false);
if (rc)
- CERROR("%s: llog_process() with cat_cancel_cb failed: rc = "
- "%d\n", llh->lgh_ctxt->loc_obd->obd_name, rc);
+ CERROR("%s: llog_process() with cat_cancel_cb failed: rc = %d\n",
+ llh->lgh_ctxt->loc_obd->obd_name, rc);
return 0;
}
EXPORT_SYMBOL(llog_cat_init_and_process);
diff --git a/drivers/staging/lustre/lustre/obdclass/llog_obd.c b/drivers/staging/lustre/lustre/obdclass/llog_obd.c
index da769db0af7..978d886a110 100644
--- a/drivers/staging/lustre/lustre/obdclass/llog_obd.c
+++ b/drivers/staging/lustre/lustre/obdclass/llog_obd.c
@@ -42,7 +42,7 @@
#include "llog_internal.h"
/* helper functions for calling the llog obd methods */
-static struct llog_ctxt* llog_new_ctxt(struct obd_device *obd)
+static struct llog_ctxt *llog_new_ctxt(struct obd_device *obd)
{
struct llog_ctxt *ctxt;
diff --git a/drivers/staging/lustre/lustre/obdclass/llog_swab.c b/drivers/staging/lustre/lustre/obdclass/llog_swab.c
index bfac8387021..d3ec90e85eb 100644
--- a/drivers/staging/lustre/lustre/obdclass/llog_swab.c
+++ b/drivers/staging/lustre/lustre/obdclass/llog_swab.c
@@ -400,8 +400,7 @@ void lustre_swab_cfg_marker(struct cfg_marker *marker, int swab, int size)
}
marker->cm_createtime = createtime;
marker->cm_canceltime = canceltime;
- CDEBUG(D_CONFIG, "Find old cfg_marker(Srv32b,Clt64b) "
- "for target %s, converting\n",
+ CDEBUG(D_CONFIG, "Find old cfg_marker(Srv32b,Clt64b) for target %s, converting\n",
marker->cm_tgtname);
} else if (swab) {
__swab64s(&marker->cm_createtime);
diff --git a/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c b/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
index 61e04af2464..3b7dfc36772 100644
--- a/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
+++ b/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
@@ -177,7 +177,7 @@ int lprocfs_read_frac_helper(char *buffer, unsigned long count, long val,
}
EXPORT_SYMBOL(lprocfs_read_frac_helper);
-int lprocfs_write_frac_helper(const char *buffer, unsigned long count,
+int lprocfs_write_frac_helper(const char __user *buffer, unsigned long count,
int *val, int mult)
{
char kernbuf[20], *end, *pbuf;
@@ -1400,8 +1400,8 @@ int lprocfs_alloc_obd_stats(struct obd_device *obd, unsigned num_private_stats)
* LPROCFS_OBD_OP_INIT(.., .., opname)
* is missing from the list above. */
LASSERTF(stats->ls_cnt_header[i].lc_name != NULL,
- "Missing obd_stat initializer obd_op "
- "operation at offset %d.\n", i - num_private_stats);
+ "Missing obd_stat initializer obd_op operation at offset %d.\n",
+ i - num_private_stats);
}
rc = lprocfs_register_stats(obd->obd_proc_entry, "stats", stats);
if (rc < 0) {
@@ -1486,8 +1486,7 @@ int lprocfs_alloc_md_stats(struct obd_device *obd,
for (i = num_private_stats; i < num_stats; i++) {
if (stats->ls_cnt_header[i].lc_name == NULL) {
- CERROR("Missing md_stat initializer md_op "
- "operation at offset %d. Aborting.\n",
+ CERROR("Missing md_stat initializer md_op operation at offset %d. Aborting.\n",
i - num_private_stats);
LBUG();
}
@@ -1607,8 +1606,7 @@ LPROC_SEQ_FOPS_RO(lproc_exp_hash);
int lprocfs_nid_stats_clear_read(struct seq_file *m, void *data)
{
return seq_printf(m, "%s\n",
- "Write into this file to clear all nid stats and "
- "stale nid entries");
+ "Write into this file to clear all nid stats and stale nid entries");
}
EXPORT_SYMBOL(lprocfs_nid_stats_clear_read);
@@ -1819,7 +1817,7 @@ __s64 lprocfs_read_helper(struct lprocfs_counter *lc,
}
EXPORT_SYMBOL(lprocfs_read_helper);
-int lprocfs_write_helper(const char *buffer, unsigned long count,
+int lprocfs_write_helper(const char __user *buffer, unsigned long count,
int *val)
{
return lprocfs_write_frac_helper(buffer, count, val, 1);
diff --git a/drivers/staging/lustre/lustre/obdclass/lu_object.c b/drivers/staging/lustre/lustre/obdclass/lu_object.c
index 2fc037cfb62..83bf168c293 100644
--- a/drivers/staging/lustre/lustre/obdclass/lu_object.c
+++ b/drivers/staging/lustre/lustre/obdclass/lu_object.c
@@ -866,8 +866,7 @@ static int lu_htable_order(void)
/* clear off unreasonable cache setting. */
if (lu_cache_percent == 0 || lu_cache_percent > LU_CACHE_PERCENT_MAX) {
- CWARN("obdclass: invalid lu_cache_percent: %u, it must be in"
- " the range of (0, %u]. Will use default value: %u.\n",
+ CWARN("obdclass: invalid lu_cache_percent: %u, it must be in the range of (0, %u]. Will use default value: %u.\n",
lu_cache_percent, LU_CACHE_PERCENT_MAX,
LU_CACHE_PERCENT_DEFAULT);
diff --git a/drivers/staging/lustre/lustre/obdclass/obd_config.c b/drivers/staging/lustre/lustre/obdclass/obd_config.c
index 5e7b3d7cc98..6ce9adc2f11 100644
--- a/drivers/staging/lustre/lustre/obdclass/obd_config.c
+++ b/drivers/staging/lustre/lustre/obdclass/obd_config.c
@@ -835,7 +835,7 @@ int class_del_conn(struct obd_device *obd, struct lustre_cfg *lcfg)
LIST_HEAD(lustre_profile_list);
-struct lustre_profile *class_get_profile(const char * prof)
+struct lustre_profile *class_get_profile(const char *prof)
{
struct lustre_profile *lprof;
@@ -1443,8 +1443,7 @@ int class_config_llog_handler(const struct lu_env *env,
if (!(clli->cfg_flags & CFG_F_COMPAT146) &&
!(clli->cfg_flags & CFG_F_MARKER) &&
(lcfg->lcfg_command != LCFG_MARKER)) {
- CWARN("Config not inside markers, ignoring! "
- "(inst: %p, uuid: %s, flags: %#x)\n",
+ CWARN("Config not inside markers, ignoring! (inst: %p, uuid: %s, flags: %#x)\n",
clli->cfg_instance,
clli->cfg_uuid.uuid, clli->cfg_flags);
clli->cfg_flags |= CFG_F_SKIP;
@@ -1467,14 +1466,12 @@ int class_config_llog_handler(const struct lu_env *env,
if ((lcfg->lcfg_command == LCFG_ATTACH && typename &&
strcmp(typename, "mds") == 0)) {
- CWARN("For 1.8 interoperability, rename obd "
- "type from mds to mdt\n");
+ CWARN("For 1.8 interoperability, rename obd type from mds to mdt\n");
typename[2] = 't';
}
if ((lcfg->lcfg_command == LCFG_SETUP && index &&
strcmp(index, "type") == 0)) {
- CDEBUG(D_INFO, "For 1.8 interoperability, "
- "set this index to '0'\n");
+ CDEBUG(D_INFO, "For 1.8 interoperability, set this index to '0'\n");
index[0] = '0';
index[1] = 0;
}
diff --git a/drivers/staging/lustre/lustre/obdclass/obd_mount.c b/drivers/staging/lustre/lustre/obdclass/obd_mount.c
index 1260c8713bc..4f39cdee1b5 100644
--- a/drivers/staging/lustre/lustre/obdclass/obd_mount.c
+++ b/drivers/staging/lustre/lustre/obdclass/obd_mount.c
@@ -100,19 +100,12 @@ int lustre_process_log(struct super_block *sb, char *logname,
OBD_FREE_PTR(bufs);
if (rc == -EINVAL)
- LCONSOLE_ERROR_MSG(0x15b, "%s: The configuration from log '%s'"
- "failed from the MGS (%d). Make sure this "
- "client and the MGS are running compatible "
- "versions of Lustre.\n",
+ LCONSOLE_ERROR_MSG(0x15b, "%s: The configuration from log '%s' failed from the MGS (%d). Make sure this client and the MGS are running compatible versions of Lustre.\n",
mgc->obd_name, logname, rc);
if (rc)
- LCONSOLE_ERROR_MSG(0x15c, "%s: The configuration from log '%s' "
- "failed (%d). This may be the result of "
- "communication errors between this node and "
- "the MGS, a bad configuration, or other "
- "errors. See the syslog for more "
- "information.\n", mgc->obd_name, logname,
+ LCONSOLE_ERROR_MSG(0x15c, "%s: The configuration from log '%s' failed (%d). This may be the result of communication errors between this node and the MGS, a bad configuration, or other errors. See the syslog for more information.\n",
+ mgc->obd_name, logname,
rc);
/* class_obd_list(); */
@@ -297,11 +290,8 @@ int lustre_start_mgc(struct super_block *sb)
if (has_ir ^ !(*flags & LMD_FLG_NOIR)) {
/* LMD_FLG_NOIR is for test purpose only */
LCONSOLE_WARN(
- "Trying to mount a client with IR setting "
- "not compatible with current mgc. "
- "Force to use current mgc setting that is "
- "IR %s.\n",
- has_ir ? "enabled" : "disabled");
+ "Trying to mount a client with IR setting not compatible with current mgc. Force to use current mgc setting that is IR %s.\n",
+ has_ir ? "enabled" : "disabled");
if (has_ir)
*flags &= ~LMD_FLG_NOIR;
else
@@ -998,16 +988,14 @@ static int lmd_parse(char *options, struct lustre_mount_data *lmd)
LASSERT(lmd);
if (!options) {
- LCONSOLE_ERROR_MSG(0x162, "Missing mount data: check that "
- "/sbin/mount.lustre is installed.\n");
+ LCONSOLE_ERROR_MSG(0x162, "Missing mount data: check that /sbin/mount.lustre is installed.\n");
return -EINVAL;
}
/* Options should be a string - try to detect old lmd data */
if ((raw->lmd_magic & 0xffffff00) == (LMD_MAGIC & 0xffffff00)) {
- LCONSOLE_ERROR_MSG(0x163, "You're using an old version of "
- "/sbin/mount.lustre. Please install "
- "version %s\n", LUSTRE_VERSION_STRING);
+ LCONSOLE_ERROR_MSG(0x163, "You're using an old version of /sbin/mount.lustre. Please install version %s\n",
+ LUSTRE_VERSION_STRING);
return -EINVAL;
}
lmd->lmd_magic = LMD_MAGIC;
@@ -1139,8 +1127,7 @@ static int lmd_parse(char *options, struct lustre_mount_data *lmd)
}
if (!devname) {
- LCONSOLE_ERROR_MSG(0x164, "Can't find the device name "
- "(need mount option 'device=...')\n");
+ LCONSOLE_ERROR_MSG(0x164, "Can't find the device name (need mount option 'device=...')\n");
goto invalid;
}
@@ -1232,9 +1219,7 @@ int lustre_fill_super(struct super_block *sb, void *data, int silent)
if (client_fill_super == NULL)
request_module("lustre");
if (client_fill_super == NULL) {
- LCONSOLE_ERROR_MSG(0x165, "Nothing registered for "
- "client mount! Is the 'lustre' "
- "module loaded?\n");
+ LCONSOLE_ERROR_MSG(0x165, "Nothing registered for client mount! Is the 'lustre' module loaded?\n");
lustre_put_lsi(sb);
rc = -ENODEV;
} else {
@@ -1249,8 +1234,7 @@ int lustre_fill_super(struct super_block *sb, void *data, int silent)
/* c_f_s will call lustre_common_put_super on failure */
}
} else {
- CERROR("This is client-side-only module, "
- "cannot handle server mount.\n");
+ CERROR("This is client-side-only module, cannot handle server mount.\n");
rc = -EINVAL;
}
diff --git a/drivers/staging/lustre/lustre/obdecho/echo_client.c b/drivers/staging/lustre/lustre/obdecho/echo_client.c
index 98e4290919d..5f6d9441bc4 100644
--- a/drivers/staging/lustre/lustre/obdecho/echo_client.c
+++ b/drivers/staging/lustre/lustre/obdecho/echo_client.c
@@ -698,14 +698,16 @@ static struct lu_device *echo_device_alloc(const struct lu_env *env,
int cleanup = 0;
OBD_ALLOC_PTR(ed);
- if (ed == NULL)
- GOTO(out, rc = -ENOMEM);
+ if (ed == NULL) {
+ rc = -ENOMEM;
+ goto out;
+ }
cleanup = 1;
cd = &ed->ed_cl;
rc = cl_device_init(cd, t);
if (rc)
- GOTO(out, rc);
+ goto out;
cd->cd_lu_dev.ld_ops = &echo_device_lu_ops;
cd->cd_ops = &echo_device_cl_ops;
@@ -719,24 +721,26 @@ static struct lu_device *echo_device_alloc(const struct lu_env *env,
if (tgt == NULL) {
CERROR("Can not find tgt device %s\n",
lustre_cfg_string(cfg, 1));
- GOTO(out, rc = -ENODEV);
+ rc = -ENODEV;
+ goto out;
}
next = tgt->obd_lu_dev;
if (!strcmp(tgt->obd_type->typ_name, LUSTRE_MDT_NAME)) {
CERROR("echo MDT client must be run on server\n");
- GOTO(out, rc = -EOPNOTSUPP);
+ rc = -EOPNOTSUPP;
+ goto out;
}
rc = echo_site_init(env, ed);
if (rc)
- GOTO(out, rc);
+ goto out;
cleanup = 3;
rc = echo_client_setup(env, obd, cfg);
if (rc)
- GOTO(out, rc);
+ goto out;
ed->ed_ec = &obd->u.echo_client;
cleanup = 4;
@@ -749,15 +753,17 @@ static struct lu_device *echo_device_alloc(const struct lu_env *env,
tgt_type_name = tgt->obd_type->typ_name;
if (next != NULL) {
LASSERT(next != NULL);
- if (next->ld_site != NULL)
- GOTO(out, rc = -EBUSY);
+ if (next->ld_site != NULL) {
+ rc = -EBUSY;
+ goto out;
+ }
next->ld_site = &ed->ed_site->cs_lu;
rc = next->ld_type->ldt_ops->ldto_device_init(env, next,
next->ld_type->ldt_name,
NULL);
if (rc)
- GOTO(out, rc);
+ goto out;
/* Tricky case, I have to determine the obd type since
* CLIO uses the different parameters to initialize
@@ -865,8 +871,7 @@ static struct lu_device *echo_device_free(const struct lu_env *env,
spin_lock(&ec->ec_lock);
while (!list_empty(&ec->ec_objects)) {
spin_unlock(&ec->ec_lock);
- CERROR("echo_client still has objects at cleanup time, "
- "wait for 1 second\n");
+ CERROR("echo_client still has objects at cleanup time, wait for 1 second\n");
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(cfs_time_seconds(1));
lu_site_purge(env, &ed->ed_site->cs_lu, -1);
@@ -968,15 +973,19 @@ static struct echo_object *cl_echo_object_find(struct echo_device *d,
fid = &info->eti_fid;
rc = ostid_to_fid(fid, &lsm->lsm_oi, 0);
- if (rc != 0)
- GOTO(out, eco = ERR_PTR(rc));
+ if (rc != 0) {
+ eco = ERR_PTR(rc);
+ goto out;
+ }
/* In the function below, .hs_keycmp resolves to
* lu_obj_hop_keycmp() */
/* coverity[overrun-buffer-val] */
obj = cl_object_find(env, echo_dev2cl(d), fid, &conf->eoc_cl);
- if (IS_ERR(obj))
- GOTO(out, eco = (void *)obj);
+ if (IS_ERR(obj)) {
+ eco = (void *)obj;
+ goto out;
+ }
eco = cl2echo_obj(obj);
if (eco->eo_deleted) {
@@ -1076,7 +1085,7 @@ static int cl_echo_enqueue(struct echo_object *eco, u64 start, u64 end,
io->ci_ignore_layout = 1;
result = cl_io_init(env, io, CIT_MISC, echo_obj2cl(eco));
if (result < 0)
- GOTO(out, result);
+ goto out;
LASSERT(result == 0);
result = cl_echo_enqueue0(env, eco, start, end, mode, cookie, 0);
@@ -1182,7 +1191,7 @@ static int cl_echo_object_brw(struct echo_object *eco, int rw, u64 offset,
io->ci_ignore_layout = 1;
rc = cl_io_init(env, io, CIT_MISC, obj);
if (rc < 0)
- GOTO(out, rc);
+ goto out;
LASSERT(rc == 0);
@@ -1191,7 +1200,7 @@ static int cl_echo_object_brw(struct echo_object *eco, int rw, u64 offset,
rw == READ ? LCK_PR : LCK_PW, &lh.cookie,
CEF_NEVER);
if (rc < 0)
- GOTO(error_lock, rc);
+ goto error_lock;
for (i = 0; i < npages; i++) {
LASSERT(pages[i]);
@@ -1318,7 +1327,7 @@ static int echo_create_object(const struct lu_env *env, struct echo_device *ed,
rc = echo_alloc_memmd(ed, &lsm);
if (rc < 0) {
CERROR("Cannot allocate md: rc = %d\n", rc);
- GOTO(failed, rc);
+ goto failed;
}
if (ulsm != NULL) {
@@ -1326,7 +1335,7 @@ static int echo_create_object(const struct lu_env *env, struct echo_device *ed,
rc = echo_copyin_lsm (ed, lsm, ulsm, ulsm_nob);
if (rc != 0)
- GOTO(failed, rc);
+ goto failed;
if (lsm->lsm_stripe_count == 0)
lsm->lsm_stripe_count = ec->ec_nstripes;
@@ -1363,7 +1372,7 @@ static int echo_create_object(const struct lu_env *env, struct echo_device *ed,
rc = obd_create(env, ec->ec_exp, oa, &lsm, oti);
if (rc != 0) {
CERROR("Cannot create objects: rc = %d\n", rc);
- GOTO(failed, rc);
+ goto failed;
}
created = 1;
}
@@ -1373,8 +1382,10 @@ static int echo_create_object(const struct lu_env *env, struct echo_device *ed,
oa->o_valid |= OBD_MD_FLID;
eco = cl_echo_object_find(ed, &lsm);
- if (IS_ERR(eco))
- GOTO(failed, rc = PTR_ERR(eco));
+ if (IS_ERR(eco)) {
+ rc = PTR_ERR(eco);
+ goto failed;
+ }
cl_echo_object_put(eco);
CDEBUG(D_INFO, "oa oid "DOSTID"\n", POSTID(&oa->o_oi));
@@ -1642,8 +1653,10 @@ static int echo_client_prep_commit(const struct lu_env *env,
OBD_ALLOC(lnb, npages * sizeof(struct niobuf_local));
OBD_ALLOC(rnb, npages * sizeof(struct niobuf_remote));
- if (lnb == NULL || rnb == NULL)
- GOTO(out, ret = -ENOMEM);
+ if (lnb == NULL || rnb == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
if (rw == OBD_BRW_WRITE && async)
brw_flags |= OBD_BRW_ASYNC;
@@ -1671,7 +1684,7 @@ static int echo_client_prep_commit(const struct lu_env *env,
ret = obd_preprw(env, rw, exp, oa, 1, &ioo, rnb, &lpages,
lnb, oti, NULL);
if (ret != 0)
- GOTO(out, ret);
+ goto out;
LASSERT(lpages == npages);
for (i = 0; i < lpages; i++) {
@@ -1704,7 +1717,7 @@ static int echo_client_prep_commit(const struct lu_env *env,
ret = obd_commitrw(env, rw, exp, oa, 1, &ioo,
rnb, npages, lnb, oti, ret);
if (ret != 0)
- GOTO(out, ret);
+ goto out;
/* Reset oti otherwise it would confuse ldiskfs. */
memset(oti, 0, sizeof(*oti));
@@ -1862,21 +1875,27 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
return -ENOMEM;
rc = lu_env_init(env, LCT_DT_THREAD);
- if (rc)
- GOTO(out, rc = -ENOMEM);
+ if (rc) {
+ rc = -ENOMEM;
+ goto out;
+ }
switch (cmd) {
case OBD_IOC_CREATE: /* may create echo object */
- if (!capable(CFS_CAP_SYS_ADMIN))
- GOTO (out, rc = -EPERM);
+ if (!capable(CFS_CAP_SYS_ADMIN)) {
+ rc = -EPERM;
+ goto out;
+ }
rc = echo_create_object(env, ed, 1, oa, data->ioc_pbuf1,
data->ioc_plen1, &dummy_oti);
- GOTO(out, rc);
+ goto out;
case OBD_IOC_DESTROY:
- if (!capable(CFS_CAP_SYS_ADMIN))
- GOTO (out, rc = -EPERM);
+ if (!capable(CFS_CAP_SYS_ADMIN)) {
+ rc = -EPERM;
+ goto out;
+ }
rc = echo_get_object(&eco, ed, oa);
if (rc == 0) {
@@ -1886,7 +1905,7 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
eco->eo_deleted = 1;
echo_put_object(eco);
}
- GOTO(out, rc);
+ goto out;
case OBD_IOC_GETATTR:
rc = echo_get_object(&eco, ed, oa);
@@ -1897,11 +1916,13 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
rc = obd_getattr(env, ec->ec_exp, &oinfo);
echo_put_object(eco);
}
- GOTO(out, rc);
+ goto out;
case OBD_IOC_SETATTR:
- if (!capable(CFS_CAP_SYS_ADMIN))
- GOTO (out, rc = -EPERM);
+ if (!capable(CFS_CAP_SYS_ADMIN)) {
+ rc = -EPERM;
+ goto out;
+ }
rc = echo_get_object(&eco, ed, oa);
if (rc == 0) {
@@ -1912,17 +1933,19 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
rc = obd_setattr(env, ec->ec_exp, &oinfo, NULL);
echo_put_object(eco);
}
- GOTO(out, rc);
+ goto out;
case OBD_IOC_BRW_WRITE:
- if (!capable(CFS_CAP_SYS_ADMIN))
- GOTO (out, rc = -EPERM);
+ if (!capable(CFS_CAP_SYS_ADMIN)) {
+ rc = -EPERM;
+ goto out;
+ }
rw = OBD_BRW_WRITE;
/* fall through */
case OBD_IOC_BRW_READ:
rc = echo_client_brw_ioctl(env, rw, exp, data, &dummy_oti);
- GOTO(out, rc);
+ goto out;
case ECHO_IOC_GET_STRIPE:
rc = echo_get_object(&eco, ed, oa);
@@ -1931,11 +1954,13 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
data->ioc_plen1);
echo_put_object(eco);
}
- GOTO(out, rc);
+ goto out;
case ECHO_IOC_SET_STRIPE:
- if (!capable(CFS_CAP_SYS_ADMIN))
- GOTO (out, rc = -EPERM);
+ if (!capable(CFS_CAP_SYS_ADMIN)) {
+ rc = -EPERM;
+ goto out;
+ }
if (data->ioc_pbuf1 == NULL) { /* unset */
rc = echo_get_object(&eco, ed, oa);
@@ -1948,25 +1973,28 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
data->ioc_pbuf1,
data->ioc_plen1, &dummy_oti);
}
- GOTO (out, rc);
+ goto out;
case ECHO_IOC_ENQUEUE:
- if (!capable(CFS_CAP_SYS_ADMIN))
- GOTO (out, rc = -EPERM);
+ if (!capable(CFS_CAP_SYS_ADMIN)) {
+ rc = -EPERM;
+ goto out;
+ }
rc = echo_client_enqueue(exp, oa,
data->ioc_conn1, /* lock mode */
data->ioc_offset,
data->ioc_count);/*extent*/
- GOTO (out, rc);
+ goto out;
case ECHO_IOC_CANCEL:
rc = echo_client_cancel(exp, oa);
- GOTO (out, rc);
+ goto out;
default:
CERROR ("echo_ioctl(): unrecognised ioctl %#x\n", cmd);
- GOTO (out, rc = -ENOTTY);
+ rc = -ENOTTY;
+ goto out;
}
out:
@@ -2084,11 +2112,13 @@ static int echo_client_disconnect(struct obd_export *exp)
{
int rc;
- if (exp == NULL)
- GOTO(out, rc = -EINVAL);
+ if (exp == NULL) {
+ rc = -EINVAL;
+ goto out;
+ }
rc = class_disconnect(exp);
- GOTO(out, rc);
+ goto out;
out:
return rc;
}
diff --git a/drivers/staging/lustre/lustre/osc/osc_cache.c b/drivers/staging/lustre/lustre/osc/osc_cache.c
index 7734d666b7a..370e6d4896c 100644
--- a/drivers/staging/lustre/lustre/osc/osc_cache.c
+++ b/drivers/staging/lustre/lustre/osc/osc_cache.c
@@ -178,76 +178,113 @@ static int osc_extent_sanity_check0(struct osc_extent *ext,
int page_count;
int rc = 0;
- if (!osc_object_is_locked(obj))
- GOTO(out, rc = 9);
+ if (!osc_object_is_locked(obj)) {
+ rc = 9;
+ goto out;
+ }
- if (ext->oe_state >= OES_STATE_MAX)
- GOTO(out, rc = 10);
+ if (ext->oe_state >= OES_STATE_MAX) {
+ rc = 10;
+ goto out;
+ }
- if (atomic_read(&ext->oe_refc) <= 0)
- GOTO(out, rc = 20);
+ if (atomic_read(&ext->oe_refc) <= 0) {
+ rc = 20;
+ goto out;
+ }
- if (atomic_read(&ext->oe_refc) < atomic_read(&ext->oe_users))
- GOTO(out, rc = 30);
+ if (atomic_read(&ext->oe_refc) < atomic_read(&ext->oe_users)) {
+ rc = 30;
+ goto out;
+ }
switch (ext->oe_state) {
case OES_INV:
if (ext->oe_nr_pages > 0 || !list_empty(&ext->oe_pages))
- GOTO(out, rc = 35);
- GOTO(out, rc = 0);
- break;
+ rc = 35;
+ else
+ rc = 0;
+ goto out;
case OES_ACTIVE:
- if (atomic_read(&ext->oe_users) == 0)
- GOTO(out, rc = 40);
- if (ext->oe_hp)
- GOTO(out, rc = 50);
- if (ext->oe_fsync_wait && !ext->oe_urgent)
- GOTO(out, rc = 55);
+ if (atomic_read(&ext->oe_users) == 0) {
+ rc = 40;
+ goto out;
+ }
+ if (ext->oe_hp) {
+ rc = 50;
+ goto out;
+ }
+ if (ext->oe_fsync_wait && !ext->oe_urgent) {
+ rc = 55;
+ goto out;
+ }
break;
case OES_CACHE:
- if (ext->oe_grants == 0)
- GOTO(out, rc = 60);
- if (ext->oe_fsync_wait && !ext->oe_urgent && !ext->oe_hp)
- GOTO(out, rc = 65);
+ if (ext->oe_grants == 0) {
+ rc = 60;
+ goto out;
+ }
+ if (ext->oe_fsync_wait && !ext->oe_urgent && !ext->oe_hp) {
+ rc = 65;
+ goto out;
+ }
default:
- if (atomic_read(&ext->oe_users) > 0)
- GOTO(out, rc = 70);
+ if (atomic_read(&ext->oe_users) > 0) {
+ rc = 70;
+ goto out;
+ }
}
- if (ext->oe_max_end < ext->oe_end || ext->oe_end < ext->oe_start)
- GOTO(out, rc = 80);
+ if (ext->oe_max_end < ext->oe_end || ext->oe_end < ext->oe_start) {
+ rc = 80;
+ goto out;
+ }
- if (ext->oe_osclock == NULL && ext->oe_grants > 0)
- GOTO(out, rc = 90);
+ if (ext->oe_osclock == NULL && ext->oe_grants > 0) {
+ rc = 90;
+ goto out;
+ }
if (ext->oe_osclock) {
struct cl_lock_descr *descr;
descr = &ext->oe_osclock->cll_descr;
if (!(descr->cld_start <= ext->oe_start &&
- descr->cld_end >= ext->oe_max_end))
- GOTO(out, rc = 100);
+ descr->cld_end >= ext->oe_max_end)) {
+ rc = 100;
+ goto out;
+ }
}
- if (ext->oe_nr_pages > ext->oe_mppr)
- GOTO(out, rc = 105);
+ if (ext->oe_nr_pages > ext->oe_mppr) {
+ rc = 105;
+ goto out;
+ }
/* Do not verify page list if extent is in RPC. This is because an
* in-RPC extent is supposed to be exclusively accessible w/o lock. */
- if (ext->oe_state > OES_CACHE)
- GOTO(out, rc = 0);
+ if (ext->oe_state > OES_CACHE) {
+ rc = 0;
+ goto out;
+ }
- if (!extent_debug)
- GOTO(out, rc = 0);
+ if (!extent_debug) {
+ rc = 0;
+ goto out;
+ }
page_count = 0;
list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
pgoff_t index = oap2cl_page(oap)->cp_index;
++page_count;
- if (index > ext->oe_end || index < ext->oe_start)
- GOTO(out, rc = 110);
+ if (index > ext->oe_end || index < ext->oe_start) {
+ rc = 110;
+ goto out;
+ }
+ }
+ if (page_count != ext->oe_nr_pages) {
+ rc = 120;
+ goto out;
}
- if (page_count != ext->oe_nr_pages)
- GOTO(out, rc = 120);
out:
if (rc != 0)
@@ -536,10 +573,9 @@ static int osc_extent_merge(const struct lu_env *env, struct osc_extent *cur,
/**
* Drop user count of osc_extent, and unplug IO asynchronously.
*/
-int osc_extent_release(const struct lu_env *env, struct osc_extent *ext)
+void osc_extent_release(const struct lu_env *env, struct osc_extent *ext)
{
struct osc_object *obj = ext->oe_obj;
- int rc = 0;
LASSERT(atomic_read(&ext->oe_users) > 0);
LASSERT(sanity_check(ext) == 0);
@@ -571,7 +607,6 @@ int osc_extent_release(const struct lu_env *env, struct osc_extent *ext)
osc_io_unplug_async(env, osc_cli(obj), obj);
}
osc_extent_put(env, ext);
- return rc;
}
static inline int overlapped(struct osc_extent *ex1, struct osc_extent *ex2)
@@ -776,8 +811,10 @@ restart:
rc = osc_extent_wait(env, conflict, OES_INV);
osc_extent_put(env, conflict);
conflict = NULL;
- if (rc < 0)
- GOTO(out, found = ERR_PTR(rc));
+ if (rc < 0) {
+ found = ERR_PTR(rc);
+ goto out;
+ }
goto restart;
}
@@ -934,7 +971,7 @@ static int osc_extent_truncate(struct osc_extent *ext, pgoff_t trunc_index,
io->ci_obj = cl_object_top(osc2cl(obj));
rc = cl_io_init(env, io, CIT_MISC, io->ci_obj);
if (rc < 0)
- GOTO(out, rc);
+ goto out;
/* discard all pages with index greater then trunc_index */
list_for_each_entry_safe(oap, tmp, &ext->oe_pages,
@@ -1114,21 +1151,27 @@ static int osc_extent_expand(struct osc_extent *ext, pgoff_t index, int *grants)
osc_object_lock(obj);
LASSERT(sanity_check_nolock(ext) == 0);
end_chunk = ext->oe_end >> ppc_bits;
- if (chunk > end_chunk + 1)
- GOTO(out, rc = -ERANGE);
+ if (chunk > end_chunk + 1) {
+ rc = -ERANGE;
+ goto out;
+ }
- if (end_chunk >= chunk)
- GOTO(out, rc = 0);
+ if (end_chunk >= chunk) {
+ rc = 0;
+ goto out;
+ }
LASSERT(end_chunk + 1 == chunk);
/* try to expand this extent to cover @index */
end_index = min(ext->oe_max_end, ((chunk + 1) << ppc_bits) - 1);
next = next_extent(ext);
- if (next != NULL && next->oe_start <= end_index)
+ if (next != NULL && next->oe_start <= end_index) {
/* complex mode - overlapped with the next extent,
* this case will be handled by osc_extent_find() */
- GOTO(out, rc = -EAGAIN);
+ rc = -EAGAIN;
+ goto out;
+ }
ext->oe_end = end_index;
ext->oe_grants += chunksize;
@@ -1497,12 +1540,16 @@ static int osc_enter_cache(const struct lu_env *env, struct client_obd *cli,
* of queued writes and create a discontiguous rpc stream */
if (OBD_FAIL_CHECK(OBD_FAIL_OSC_NO_GRANT) ||
cli->cl_dirty_max < PAGE_CACHE_SIZE ||
- cli->cl_ar.ar_force_sync || loi->loi_ar.ar_force_sync)
- GOTO(out, rc = -EDQUOT);
+ cli->cl_ar.ar_force_sync || loi->loi_ar.ar_force_sync) {
+ rc = -EDQUOT;
+ goto out;
+ }
/* Hopefully normal case - cache space and write credits available */
- if (osc_enter_cache_try(cli, oap, bytes, 0))
- GOTO(out, rc = 0);
+ if (osc_enter_cache_try(cli, oap, bytes, 0)) {
+ rc = 0;
+ goto out;
+ }
/* We can get here for two reasons: too many dirty pages in cache, or
* run out of grants. In both cases we should write dirty pages out.
@@ -1530,16 +1577,18 @@ static int osc_enter_cache(const struct lu_env *env, struct client_obd *cli,
/* l_wait_event is interrupted by signal */
if (rc < 0) {
list_del_init(&ocw.ocw_entry);
- GOTO(out, rc);
+ goto out;
}
LASSERT(list_empty(&ocw.ocw_entry));
rc = ocw.ocw_rc;
if (rc != -EDQUOT)
- GOTO(out, rc);
- if (osc_enter_cache_try(cli, oap, bytes, 0))
- GOTO(out, rc = 0);
+ goto out;
+ if (osc_enter_cache_try(cli, oap, bytes, 0)) {
+ rc = 0;
+ goto out;
+ }
}
out:
client_obd_list_unlock(&cli->cl_loi_list_lock);
@@ -1562,8 +1611,8 @@ void osc_wake_cache_waiters(struct client_obd *cli)
if ((cli->cl_dirty + PAGE_CACHE_SIZE > cli->cl_dirty_max) ||
(atomic_read(&obd_dirty_pages) + 1 >
obd_max_dirty_pages)) {
- CDEBUG(D_CACHE, "no dirty room: dirty: %ld "
- "osc max %ld, sys max %d\n", cli->cl_dirty,
+ CDEBUG(D_CACHE, "no dirty room: dirty: %ld osc max %ld, sys max %d\n",
+ cli->cl_dirty,
cli->cl_dirty_max, obd_max_dirty_pages);
goto wakeup;
}
@@ -2401,14 +2450,15 @@ int osc_flush_async_page(const struct lu_env *env, struct cl_io *io,
* one making the extent active, we could deadlock waiting for
* the page writeback to clear but it won't because the extent
* is active and won't be written out. */
- GOTO(out, rc = -EAGAIN);
+ rc = -EAGAIN;
+ goto out;
default:
break;
}
rc = cl_page_prep(env, io, cl_page_top(cp), CRT_WRITE);
if (rc)
- GOTO(out, rc);
+ goto out;
spin_lock(&oap->oap_lock);
oap->oap_async_flags |= ASYNC_READY|ASYNC_URGENT;
diff --git a/drivers/staging/lustre/lustre/osc/osc_cl_internal.h b/drivers/staging/lustre/lustre/osc/osc_cl_internal.h
index ebbd95c0cea..365b2787b3c 100644
--- a/drivers/staging/lustre/lustre/osc/osc_cl_internal.h
+++ b/drivers/staging/lustre/lustre/osc/osc_cl_internal.h
@@ -678,7 +678,7 @@ struct osc_extent {
int osc_extent_finish(const struct lu_env *env, struct osc_extent *ext,
int sent, int rc);
-int osc_extent_release(const struct lu_env *env, struct osc_extent *ext);
+void osc_extent_release(const struct lu_env *env, struct osc_extent *ext);
/** @} osc */
diff --git a/drivers/staging/lustre/lustre/osc/osc_lock.c b/drivers/staging/lustre/lustre/osc/osc_lock.c
index 8138856fda8..a7f08bc4816 100644
--- a/drivers/staging/lustre/lustre/osc/osc_lock.c
+++ b/drivers/staging/lustre/lustre/osc/osc_lock.c
@@ -1067,14 +1067,12 @@ static int osc_lock_enqueue_wait(const struct lu_env *env,
* conflicts, we do not wait but return 0 so the
* request is send to the server
*/
- CDEBUG(D_DLMTRACE, "group lock %p is conflicted "
- "with %p, no wait, send to server\n",
+ CDEBUG(D_DLMTRACE, "group lock %p is conflicted with %p, no wait, send to server\n",
lock, conflict);
cl_lock_put(env, conflict);
rc = 0;
} else {
- CDEBUG(D_DLMTRACE, "lock %p is conflicted with %p, "
- "will wait\n",
+ CDEBUG(D_DLMTRACE, "lock %p is conflicted with %p, will wait\n",
lock, conflict);
LASSERT(lock->cll_conflict == NULL);
lu_ref_add(&conflict->cll_reference, "cancel-wait",
diff --git a/drivers/staging/lustre/lustre/osc/osc_object.c b/drivers/staging/lustre/lustre/osc/osc_object.c
index 69000584619..92c202f7039 100644
--- a/drivers/staging/lustre/lustre/osc/osc_object.c
+++ b/drivers/staging/lustre/lustre/osc/osc_object.c
@@ -140,8 +140,7 @@ static int osc_object_print(const struct lu_env *env, void *cookie,
struct lov_oinfo *oinfo = osc->oo_oinfo;
struct osc_async_rc *ar = &oinfo->loi_ar;
- (*p)(env, cookie, "id: "DOSTID" "
- "idx: %d gen: %d kms_valid: %u kms %llu rc: %d force_sync: %d min_xid: %llu ",
+ (*p)(env, cookie, "id: " DOSTID " idx: %d gen: %d kms_valid: %u kms %llu rc: %d force_sync: %d min_xid: %llu ",
POSTID(&oinfo->loi_oi), oinfo->loi_ost_idx,
oinfo->loi_ost_gen, oinfo->loi_kms_valid, oinfo->loi_kms,
ar->ar_rc, ar->ar_force_sync, ar->ar_min_xid);
diff --git a/drivers/staging/lustre/lustre/osc/osc_page.c b/drivers/staging/lustre/lustre/osc/osc_page.c
index fcd079b1af0..76ba58b09c5 100644
--- a/drivers/staging/lustre/lustre/osc/osc_page.c
+++ b/drivers/staging/lustre/lustre/osc/osc_page.c
@@ -369,12 +369,7 @@ static int osc_page_print(const struct lu_env *env,
struct osc_object *obj = cl2osc(slice->cpl_obj);
struct client_obd *cli = &osc_export(obj)->exp_obd->u.cli;
- return (*printer)(env, cookie, LUSTRE_OSC_NAME"-page@%p: "
- "1< %#x %d %u %s %s > "
- "2< %llu %u %u %#x %#x | %p %p %p > "
- "3< %s %p %d %lu %d > "
- "4< %d %d %d %lu %s | %s %s %s %s > "
- "5< %s %s %s %s | %d %s | %d %s %s>\n",
+ return (*printer)(env, cookie, LUSTRE_OSC_NAME "-page@%p: 1< %#x %d %u %s %s > 2< %llu %u %u %#x %#x | %p %p %p > 3< %s %p %d %lu %d > 4< %d %d %d %lu %s | %s %s %s %s > 5< %s %s %s %s | %d %s | %d %s %s>\n",
opg,
/* 1 */
oap->oap_magic, oap->oap_cmd,
@@ -550,8 +545,8 @@ void osc_page_submit(const struct lu_env *env, struct osc_page *opg,
LINVRNT(osc_page_protected(env, opg,
crt == CRT_WRITE ? CLM_WRITE : CLM_READ, 1));
- LASSERTF(oap->oap_magic == OAP_MAGIC, "Bad oap magic: oap %p, "
- "magic 0x%x\n", oap, oap->oap_magic);
+ LASSERTF(oap->oap_magic == OAP_MAGIC, "Bad oap magic: oap %p, magic 0x%x\n",
+ oap, oap->oap_magic);
LASSERT(oap->oap_async_flags & ASYNC_READY);
LASSERT(oap->oap_async_flags & ASYNC_COUNT_STABLE);
diff --git a/drivers/staging/lustre/lustre/osc/osc_request.c b/drivers/staging/lustre/lustre/osc/osc_request.c
index 44657a06b8a..b9450b95f1c 100644
--- a/drivers/staging/lustre/lustre/osc/osc_request.c
+++ b/drivers/staging/lustre/lustre/osc/osc_request.c
@@ -1078,9 +1078,9 @@ static void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd)
cli->cl_chunkbits = max_t(int, PAGE_CACHE_SHIFT, ocd->ocd_blocksize);
client_obd_list_unlock(&cli->cl_loi_list_lock);
- CDEBUG(D_CACHE, "%s, setting cl_avail_grant: %ld cl_lost_grant: %ld."
- "chunk bits: %d.\n", cli->cl_import->imp_obd->obd_name,
- cli->cl_avail_grant, cli->cl_lost_grant, cli->cl_chunkbits);
+ CDEBUG(D_CACHE, "%s, setting cl_avail_grant: %ld cl_lost_grant: %ld chunk bits: %d\n",
+ cli->cl_import->imp_obd->obd_name,
+ cli->cl_avail_grant, cli->cl_lost_grant, cli->cl_chunkbits);
if (ocd->ocd_connect_flags & OBD_CONNECT_GRANT_SHRINK &&
list_empty(&cli->cl_grant_shrink_list))
@@ -1171,8 +1171,7 @@ static inline int can_merge_pages(struct brw_page *p1, struct brw_page *p2)
/* warn if we try to combine flags that we don't know to be
* safe to combine */
if (unlikely((p1->flag & mask) != (p2->flag & mask))) {
- CWARN("Saw flags 0x%x and 0x%x in the same brw, please "
- "report this at http://bugs.whamcloud.com/\n",
+ CWARN("Saw flags 0x%x and 0x%x in the same brw, please report this at http://bugs.whamcloud.com/\n",
p1->flag, p2->flag);
}
return 0;
@@ -1343,8 +1342,7 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli,
"i: %d/%d pg: %p off: %llu, count: %u\n",
i, page_count, pg, pg->off, pg->count);
LASSERTF(i == 0 || pg->off > pg_prev->off,
- "i %d p_c %u pg %p [pri %lu ind %lu] off %llu"
- " prev_pg %p [pri %lu ind %lu] off %llu\n",
+ "i %d p_c %u pg %p [pri %lu ind %lu] off %llu prev_pg %p [pri %lu ind %lu] off %llu\n",
i, page_count,
pg->pg, page_private(pg->pg), pg->pg->index, pg->off,
pg_prev->pg, page_private(pg_prev->pg),
@@ -1467,16 +1465,16 @@ static int check_write_checksum(struct obdo *oa, const lnet_process_id_t *peer,
cksum_type);
if (cksum_type != client_cksum_type)
- msg = "the server did not use the checksum type specified in "
- "the original request - likely a protocol problem";
+ msg = "the server did not use the checksum type specified in the original request - likely a protocol problem"
+ ;
else if (new_cksum == server_cksum)
- msg = "changed on the client after we checksummed it - "
- "likely false positive due to mmap IO (bug 11742)";
+ msg = "changed on the client after we checksummed it - likely false positive due to mmap IO (bug 11742)"
+ ;
else if (new_cksum == client_cksum)
msg = "changed in transit before arrival at OST";
else
- msg = "changed in transit AND doesn't match the original - "
- "likely false positive due to mmap IO (bug 11742)";
+ msg = "changed in transit AND doesn't match the original - likely false positive due to mmap IO (bug 11742)"
+ ;
LCONSOLE_ERROR_MSG(0x132, "BAD WRITE CHECKSUM: %s: from %s inode "DFID
" object "DOSTID" extent [%llu-%llu]\n",
@@ -1486,8 +1484,8 @@ static int check_write_checksum(struct obdo *oa, const lnet_process_id_t *peer,
oa->o_valid & OBD_MD_FLFID ? oa->o_parent_ver : 0,
POSTID(&oa->o_oi), pga[0]->off,
pga[page_count-1]->off + pga[page_count-1]->count - 1);
- CERROR("original client csum %x (type %x), server csum %x (type %x), "
- "client csum now %x\n", client_cksum, client_cksum_type,
+ CERROR("original client csum %x (type %x), server csum %x (type %x), client csum now %x\n",
+ client_cksum, client_cksum_type,
server_cksum, cksum_type, new_cksum);
return 1;
}
@@ -1601,23 +1599,21 @@ static int osc_brw_fini_request(struct ptlrpc_request *req, int rc)
}
if (server_cksum != client_cksum) {
- LCONSOLE_ERROR_MSG(0x133, "%s: BAD READ CHECKSUM: from "
- "%s%s%s inode "DFID" object "DOSTID
- " extent [%llu-%llu]\n",
+ LCONSOLE_ERROR_MSG(0x133, "%s: BAD READ CHECKSUM: from %s%s%s inode " DFID " object " DOSTID " extent [%llu-%llu]\n",
req->rq_import->imp_obd->obd_name,
libcfs_nid2str(peer->nid),
via, router,
body->oa.o_valid & OBD_MD_FLFID ?
- body->oa.o_parent_seq : (__u64)0,
+ body->oa.o_parent_seq : (__u64)0,
body->oa.o_valid & OBD_MD_FLFID ?
- body->oa.o_parent_oid : 0,
+ body->oa.o_parent_oid : 0,
body->oa.o_valid & OBD_MD_FLFID ?
- body->oa.o_parent_ver : 0,
+ body->oa.o_parent_ver : 0,
POSTID(&body->oa.o_oi),
aa->aa_ppga[0]->off,
aa->aa_ppga[aa->aa_page_count-1]->off +
aa->aa_ppga[aa->aa_page_count-1]->count -
- 1);
+ 1);
CERROR("client %x, server %x, cksum_type %x\n",
client_cksum, server_cksum, cksum_type);
cksum_counter = 0;
@@ -1771,8 +1767,7 @@ static int brw_interpret(const struct lu_env *env,
if (osc_recoverable_error(rc)) {
if (req->rq_import_generation !=
req->rq_import->imp_generation) {
- CDEBUG(D_HA, "%s: resend cross eviction for object: "
- ""DOSTID", rc = %d.\n",
+ CDEBUG(D_HA, "%s: resend cross eviction for object: " DOSTID ", rc = %d.\n",
req->rq_import->imp_obd->obd_name,
POSTID(&aa->aa_oa->o_oi), rc);
} else if (rc == -EINPROGRESS ||
@@ -3013,8 +3008,8 @@ static int osc_reconnect(const struct lu_env *env,
cli->cl_lost_grant = 0;
client_obd_list_unlock(&cli->cl_loi_list_lock);
- CDEBUG(D_RPCTRACE, "ocd_connect_flags: %#llx ocd_version: %d"
- " ocd_grant: %d, lost: %ld.\n", data->ocd_connect_flags,
+ CDEBUG(D_RPCTRACE, "ocd_connect_flags: %#llx ocd_version: %d ocd_grant: %d, lost: %ld.\n",
+ data->ocd_connect_flags,
data->ocd_version, data->ocd_grant, lost_grant);
}
@@ -3217,8 +3212,6 @@ out_ptlrpcd:
static int osc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
{
- int rc = 0;
-
switch (stage) {
case OBD_CLEANUP_EARLY: {
struct obd_import *imp;
@@ -3253,7 +3246,7 @@ static int osc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
break;
}
}
- return rc;
+ return 0;
}
int osc_cleanup(struct obd_device *obd)
diff --git a/drivers/staging/lustre/lustre/ptlrpc/client.c b/drivers/staging/lustre/lustre/ptlrpc/client.c
index 38cc931a189..dc9e406f321 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/client.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/client.c
@@ -265,8 +265,7 @@ static void ptlrpc_at_adj_service(struct ptlrpc_request *req,
so just keep minimal history here */
oldse = at_measured(&at->iat_service_estimate[idx], serv_est);
if (oldse != 0)
- CDEBUG(D_ADAPTTO, "The RPC service estimate for %s ptl %d "
- "has changed from %d to %d\n",
+ CDEBUG(D_ADAPTTO, "The RPC service estimate for %s ptl %d has changed from %d to %d\n",
req->rq_import->imp_obd->obd_name, req->rq_request_portal,
oldse, at_get(&at->iat_service_estimate[idx]));
}
@@ -297,8 +296,7 @@ static void ptlrpc_at_adj_net_latency(struct ptlrpc_request *req,
oldnl = at_measured(&at->iat_net_latency, nl);
if (oldnl != 0)
- CDEBUG(D_ADAPTTO, "The network latency for %s (nid %s) "
- "has changed from %d to %d\n",
+ CDEBUG(D_ADAPTTO, "The network latency for %s (nid %s) has changed from %d to %d\n",
req->rq_import->imp_obd->obd_name,
obd_uuid2str(
&req->rq_import->imp_connection->c_remote_uuid),
@@ -371,8 +369,8 @@ static int ptlrpc_at_recv_early_reply(struct ptlrpc_request *req)
ptlrpc_at_get_net_latency(req);
DEBUG_REQ(D_ADAPTTO, req,
- "Early reply #%d, new deadline in "CFS_DURATION_T"s "
- "("CFS_DURATION_T"s)", req->rq_early_count,
+ "Early reply #%d, new deadline in " CFS_DURATION_T "s (" CFS_DURATION_T "s)",
+ req->rq_early_count,
cfs_time_sub(req->rq_deadline, get_seconds()),
cfs_time_sub(req->rq_deadline, olddl));
@@ -445,8 +443,8 @@ void ptlrpc_add_rqs_to_pool(struct ptlrpc_request_pool *pool, int num_rq)
LASSERTF(list_empty(&pool->prp_req_list) ||
size == pool->prp_rq_size,
- "Trying to change pool size with nonempty pool "
- "from %d to %d bytes\n", pool->prp_rq_size, size);
+ "Trying to change pool size with nonempty pool from %d to %d bytes\n",
+ pool->prp_rq_size, size);
spin_lock(&pool->prp_lock);
pool->prp_rq_size = size;
@@ -1146,11 +1144,10 @@ static int ptlrpc_check_status(struct ptlrpc_request *req)
struct obd_import *imp = req->rq_import;
__u32 opc = lustre_msg_get_opc(req->rq_reqmsg);
if (ptlrpc_console_allow(req))
- LCONSOLE_ERROR_MSG(0x011, "%s: Communicating with %s,"
- " operation %s failed with %d.\n",
+ LCONSOLE_ERROR_MSG(0x011, "%s: Communicating with %s, operation %s failed with %d.\n",
imp->imp_obd->obd_name,
libcfs_nid2str(
- imp->imp_connection->c_peer.nid),
+ imp->imp_connection->c_peer.nid),
ll_opcode2str(opc), err);
return err < 0 ? err : -EINVAL;
}
@@ -1206,8 +1203,7 @@ static int after_reply(struct ptlrpc_request *req)
if (req->rq_reply_truncate) {
if (ptlrpc_no_resend(req)) {
- DEBUG_REQ(D_ERROR, req, "reply buffer overflow,"
- " expected: %d, actual size: %d",
+ DEBUG_REQ(D_ERROR, req, "reply buffer overflow, expected: %d, actual size: %d",
req->rq_nob_received, req->rq_repbuf_len);
return -EOVERFLOW;
}
@@ -1259,8 +1255,7 @@ static int after_reply(struct ptlrpc_request *req)
/* new xid is already allocated for bulk in
* ptlrpc_check_set() */
req->rq_xid = ptlrpc_next_xid();
- DEBUG_REQ(D_RPCTRACE, req, "Allocating new xid for "
- "resend on EINPROGRESS");
+ DEBUG_REQ(D_RPCTRACE, req, "Allocating new xid for resend on EINPROGRESS");
}
/* Readjust the timeout for current conditions */
@@ -1412,8 +1407,8 @@ static int ptlrpc_send_new_req(struct ptlrpc_request *req)
req->rq_waiting = 1;
spin_unlock(&req->rq_lock);
- DEBUG_REQ(D_HA, req, "req from PID %d waiting for recovery: "
- "(%s != %s)", lustre_msg_get_status(req->rq_reqmsg),
+ DEBUG_REQ(D_HA, req, "req from PID %d waiting for recovery: (%s != %s)",
+ lustre_msg_get_status(req->rq_reqmsg),
ptlrpc_import_state_name(req->rq_send_state),
ptlrpc_import_state_name(imp->imp_state));
LASSERT(list_empty(&req->rq_list));
@@ -1450,8 +1445,8 @@ static int ptlrpc_send_new_req(struct ptlrpc_request *req)
}
}
- CDEBUG(D_RPCTRACE, "Sending RPC pname:cluuid:pid:xid:nid:opc"
- " %s:%s:%d:%llu:%s:%d\n", current_comm(),
+ CDEBUG(D_RPCTRACE, "Sending RPC pname:cluuid:pid:xid:nid:opc %s:%s:%d:%llu:%s:%d\n",
+ current_comm(),
imp->imp_obd->obd_uuid.uuid,
lustre_msg_get_status(req->rq_reqmsg), req->rq_xid,
libcfs_nid2str(imp->imp_connection->c_peer.nid),
@@ -1828,12 +1823,11 @@ interpret:
ptlrpc_rqphase_move(req, RQ_PHASE_COMPLETE);
CDEBUG(req->rq_reqmsg != NULL ? D_RPCTRACE : 0,
- "Completed RPC pname:cluuid:pid:xid:nid:"
- "opc %s:%s:%d:%llu:%s:%d\n",
- current_comm(), imp->imp_obd->obd_uuid.uuid,
- lustre_msg_get_status(req->rq_reqmsg), req->rq_xid,
- libcfs_nid2str(imp->imp_connection->c_peer.nid),
- lustre_msg_get_opc(req->rq_reqmsg));
+ "Completed RPC pname:cluuid:pid:xid:nid:opc %s:%s:%d:%llu:%s:%d\n",
+ current_comm(), imp->imp_obd->obd_uuid.uuid,
+ lustre_msg_get_status(req->rq_reqmsg), req->rq_xid,
+ libcfs_nid2str(imp->imp_connection->c_peer.nid),
+ lustre_msg_get_opc(req->rq_reqmsg));
spin_lock(&imp->imp_lock);
/* Request already may be not on sending or delaying list. This
@@ -2215,10 +2209,8 @@ EXPORT_SYMBOL(ptlrpc_set_wait);
*/
static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked)
{
- if (request == NULL) {
+ if (request == NULL)
return;
- }
-
LASSERTF(!request->rq_receiving_reply, "req %p\n", request);
LASSERTF(request->rq_rqbd == NULL, "req %p\n", request);/* client-side */
LASSERTF(list_empty(&request->rq_list), "req %p\n", request);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/events.c b/drivers/staging/lustre/lustre/ptlrpc/events.c
index 32dfffa76a5..7f8644e0111 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/events.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/events.c
@@ -128,8 +128,8 @@ void reply_in_callback(lnet_event_t *ev)
((lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT))) {
/* Early reply */
DEBUG_REQ(D_ADAPTTO, req,
- "Early reply received: mlen=%u offset=%d replen=%d "
- "replied=%d unlinked=%d", ev->mlength, ev->offset,
+ "Early reply received: mlen=%u offset=%d replen=%d replied=%d unlinked=%d",
+ ev->mlength, ev->offset,
req->rq_replen, req->rq_replied, ev->unlinked);
req->rq_early_count++; /* number received, client side */
@@ -313,8 +313,7 @@ void request_in_callback(lnet_event_t *ev)
}
req = ptlrpc_request_cache_alloc(GFP_ATOMIC);
if (req == NULL) {
- CERROR("Can't allocate incoming request descriptor: "
- "Dropping %s RPC from %s\n",
+ CERROR("Can't allocate incoming request descriptor: Dropping %s RPC from %s\n",
service->srv_name,
libcfs_id2str(ev->initiator));
return;
diff --git a/drivers/staging/lustre/lustre/ptlrpc/import.c b/drivers/staging/lustre/lustre/ptlrpc/import.c
index 2e7e7171ca6..4ceb90db02a 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/import.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/import.c
@@ -157,18 +157,14 @@ int ptlrpc_set_import_discon(struct obd_import *imp, __u32 conn_cnt)
&target_start, &target_len);
if (imp->imp_replayable) {
- LCONSOLE_WARN("%s: Connection to %.*s (at %s) was "
- "lost; in progress operations using this "
- "service will wait for recovery to complete\n",
- imp->imp_obd->obd_name, target_len, target_start,
- libcfs_nid2str(imp->imp_connection->c_peer.nid));
+ LCONSOLE_WARN("%s: Connection to %.*s (at %s) was lost; in progress operations using this service will wait for recovery to complete\n",
+ imp->imp_obd->obd_name, target_len, target_start,
+ libcfs_nid2str(imp->imp_connection->c_peer.nid));
} else {
- LCONSOLE_ERROR_MSG(0x166, "%s: Connection to "
- "%.*s (at %s) was lost; in progress "
- "operations using this service will fail\n",
- imp->imp_obd->obd_name,
- target_len, target_start,
- libcfs_nid2str(imp->imp_connection->c_peer.nid));
+ LCONSOLE_ERROR_MSG(0x166, "%s: Connection to %.*s (at %s) was lost; in progress operations using this service will fail\n",
+ imp->imp_obd->obd_name,
+ target_len, target_start,
+ libcfs_nid2str(imp->imp_connection->c_peer.nid));
}
IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_DISCON);
spin_unlock(&imp->imp_lock);
@@ -328,8 +324,8 @@ void ptlrpc_invalidate_import(struct obd_import *imp)
* sluggish nets). Let's check this. If there
* is no inflight and unregistering != 0, this
* is bug. */
- LASSERTF(count == 0, "Some RPCs are still "
- "unregistering: %d\n", count);
+ LASSERTF(count == 0, "Some RPCs are still unregistering: %d\n",
+ count);
/* Let's save one loop as soon as inflight have
* dropped to zero. No new inflights possible at
@@ -353,12 +349,11 @@ void ptlrpc_invalidate_import(struct obd_import *imp)
"still on delayed list");
}
- CERROR("%s: RPCs in \"%s\" phase found (%d). "
- "Network is sluggish? Waiting them "
- "to error out.\n", cli_tgt,
+ CERROR("%s: RPCs in \"%s\" phase found (%d). Network is sluggish? Waiting them to error out.\n",
+ cli_tgt,
ptlrpc_phase2str(RQ_PHASE_UNREGISTERING),
atomic_read(&imp->
- imp_unregistering));
+ imp_unregistering));
}
spin_unlock(&imp->imp_lock);
}
@@ -413,8 +408,7 @@ void ptlrpc_fail_import(struct obd_import *imp, __u32 conn_cnt)
if (ptlrpc_set_import_discon(imp, conn_cnt)) {
if (!imp->imp_replayable) {
- CDEBUG(D_HA, "import %s@%s for %s not replayable, "
- "auto-deactivating\n",
+ CDEBUG(D_HA, "import %s@%s for %s not replayable, auto-deactivating\n",
obd2cli_tgt(imp->imp_obd),
imp->imp_connection->c_remote_uuid.uuid,
imp->imp_obd->obd_name);
@@ -541,8 +535,8 @@ static int import_select_connection(struct obd_import *imp)
at_reset(at, CONNECTION_SWITCH_MAX);
}
LASSERT(imp_conn->oic_last_attempt);
- CDEBUG(D_HA, "%s: tried all connections, increasing latency "
- "to %ds\n", imp->imp_obd->obd_name, at_get(at));
+ CDEBUG(D_HA, "%s: tried all connections, increasing latency to %ds\n",
+ imp->imp_obd->obd_name, at_get(at));
}
imp_conn->oic_last_attempt = cfs_time_current_64();
@@ -564,8 +558,7 @@ static int import_select_connection(struct obd_import *imp)
deuuidify(obd2cli_tgt(imp->imp_obd), NULL,
&target_start, &target_len);
- CDEBUG(D_HA, "%s: Connection changing to"
- " %.*s (at %s)\n",
+ CDEBUG(D_HA, "%s: Connection changing to %.*s (at %s)\n",
imp->imp_obd->obd_name,
target_len, target_start,
libcfs_nid2str(imp_conn->oic_conn->c_peer.nid));
@@ -935,14 +928,13 @@ static int ptlrpc_connect_interpret(const struct lu_env *env,
lustre_msg_get_handle(
request->rq_repmsg)->cookie);
} else {
- LCONSOLE_WARN("Evicted from %s (at %s) "
- "after server handle changed from %#llx to %#llx\n",
+ LCONSOLE_WARN("Evicted from %s (at %s) after server handle changed from %#llx to %#llx\n",
obd2cli_tgt(imp->imp_obd),
imp->imp_connection-> \
c_remote_uuid.uuid,
imp->imp_remote_handle.cookie,
lustre_msg_get_handle(
- request->rq_repmsg)->cookie);
+ request->rq_repmsg)->cookie);
}
@@ -962,8 +954,8 @@ static int ptlrpc_connect_interpret(const struct lu_env *env,
}
if (imp->imp_invalid) {
- CDEBUG(D_HA, "%s: reconnected but import is invalid; "
- "marking evicted\n", imp->imp_obd->obd_name);
+ CDEBUG(D_HA, "%s: reconnected but import is invalid; marking evicted\n",
+ imp->imp_obd->obd_name);
IMPORT_SET_STATE(imp, LUSTRE_IMP_EVICTED);
} else if (MSG_CONNECT_RECOVERING & msg_flags) {
CDEBUG(D_HA, "%s: reconnected to %s during replay\n",
@@ -985,8 +977,8 @@ static int ptlrpc_connect_interpret(const struct lu_env *env,
imp->imp_last_replay_transno = 0;
IMPORT_SET_STATE(imp, LUSTRE_IMP_REPLAY);
} else {
- DEBUG_REQ(D_HA, request, "%s: evicting (reconnect/recover flags"
- " not set: %x)", imp->imp_obd->obd_name, msg_flags);
+ DEBUG_REQ(D_HA, request, "%s: evicting (reconnect/recover flags not set: %x)",
+ imp->imp_obd->obd_name, msg_flags);
imp->imp_remote_handle =
*lustre_msg_get_handle(request->rq_repmsg);
IMPORT_SET_STATE(imp, LUSTRE_IMP_EVICTED);
@@ -994,17 +986,13 @@ static int ptlrpc_connect_interpret(const struct lu_env *env,
/* Sanity checks for a reconnected import. */
if (!(imp->imp_replayable) != !(msg_flags & MSG_CONNECT_REPLAYABLE)) {
- CERROR("imp_replayable flag does not match server "
- "after reconnect. We should LBUG right here.\n");
+ CERROR("imp_replayable flag does not match server after reconnect. We should LBUG right here.\n");
}
if (lustre_msg_get_last_committed(request->rq_repmsg) > 0 &&
lustre_msg_get_last_committed(request->rq_repmsg) <
aa->pcaa_peer_committed) {
- CERROR("%s went back in time (transno %lld"
- " was previously committed, server now claims %lld"
- ")! See https://bugzilla.lustre.org/show_bug.cgi?"
- "id=9646\n",
+ CERROR("%s went back in time (transno %lld was previously committed, server now claims %lld)! See https://bugzilla.lustre.org/show_bug.cgi?id=9646\n",
obd2cli_tgt(imp->imp_obd), aa->pcaa_peer_committed,
lustre_msg_get_last_committed(request->rq_repmsg));
}
@@ -1013,8 +1001,7 @@ finish:
rc = ptlrpc_import_recovery_state_machine(imp);
if (rc != 0) {
if (rc == -ENOTCONN) {
- CDEBUG(D_HA, "evicted/aborted by %s@%s during recovery;"
- "invalidating and reconnecting\n",
+ CDEBUG(D_HA, "evicted/aborted by %s@%s during recovery; invalidating and reconnecting\n",
obd2cli_tgt(imp->imp_obd),
imp->imp_connection->c_remote_uuid.uuid);
ptlrpc_connect_import(imp);
@@ -1034,9 +1021,7 @@ finish:
if ((imp->imp_connect_flags_orig & OBD_CONNECT_IBITS) &&
!(ocd->ocd_connect_flags & OBD_CONNECT_IBITS)) {
- LCONSOLE_WARN("%s: MDS %s does not support ibits "
- "lock, either very old or invalid: "
- "requested %llx, replied %llx\n",
+ LCONSOLE_WARN("%s: MDS %s does not support ibits lock, either very old or invalid: requested %llx, replied %llx\n",
imp->imp_obd->obd_name,
imp->imp_connection->c_remote_uuid.uuid,
imp->imp_connect_flags_orig,
@@ -1052,13 +1037,12 @@ finish:
LUSTRE_VERSION_OFFSET_WARN)) {
/* Sigh, some compilers do not like #ifdef in the middle
of macro arguments */
- const char *older = "older. Consider upgrading server "
- "or downgrading client";
- const char *newer = "newer than client version. "
- "Consider upgrading client";
+ const char *older = "older. Consider upgrading server or downgrading client"
+ ;
+ const char *newer = "newer than client version. Consider upgrading client"
+ ;
- LCONSOLE_WARN("Server %s version (%d.%d.%d.%d) "
- "is much %s (%s)\n",
+ LCONSOLE_WARN("Server %s version (%d.%d.%d.%d) is much %s (%s)\n",
obd2cli_tgt(imp->imp_obd),
OBD_OCD_VERSION_MAJOR(ocd->ocd_version),
OBD_OCD_VERSION_MINOR(ocd->ocd_version),
@@ -1095,10 +1079,7 @@ finish:
* the checksum types it doesn't support */
if ((ocd->ocd_cksum_types &
cksum_types_supported_client()) == 0) {
- LCONSOLE_WARN("The negotiation of the checksum "
- "algorithm to use with server %s "
- "failed (%x/%x), disabling "
- "checksums\n",
+ LCONSOLE_WARN("The negotiation of the checksum algorithm to use with server %s failed (%x/%x), disabling checksums\n",
obd2cli_tgt(imp->imp_obd),
ocd->ocd_cksum_types,
cksum_types_supported_client());
@@ -1191,17 +1172,13 @@ out:
* connection from liblustre clients, so we
* should never see this from VFS context
*/
- LCONSOLE_ERROR_MSG(0x16a, "Server %s version "
- "(%d.%d.%d.%d)"
- " refused connection from this client "
- "with an incompatible version (%s). "
- "Client must be recompiled\n",
- obd2cli_tgt(imp->imp_obd),
- OBD_OCD_VERSION_MAJOR(ocd->ocd_version),
- OBD_OCD_VERSION_MINOR(ocd->ocd_version),
- OBD_OCD_VERSION_PATCH(ocd->ocd_version),
- OBD_OCD_VERSION_FIX(ocd->ocd_version),
- LUSTRE_VERSION_STRING);
+ LCONSOLE_ERROR_MSG(0x16a, "Server %s version (%d.%d.%d.%d) refused connection from this client with an incompatible version (%s). Client must be recompiled\n",
+ obd2cli_tgt(imp->imp_obd),
+ OBD_OCD_VERSION_MAJOR(ocd->ocd_version),
+ OBD_OCD_VERSION_MINOR(ocd->ocd_version),
+ OBD_OCD_VERSION_PATCH(ocd->ocd_version),
+ OBD_OCD_VERSION_FIX(ocd->ocd_version),
+ LUSTRE_VERSION_STRING);
ptlrpc_deactivate_import(imp);
IMPORT_SET_STATE(imp, LUSTRE_IMP_CLOSED);
}
@@ -1237,8 +1214,7 @@ static int completed_replay_interpret(const struct lu_env *env,
"%s: version recovery fails, reconnecting\n",
req->rq_import->imp_obd->obd_name);
} else {
- CDEBUG(D_HA, "%s: LAST_REPLAY message error: %d, "
- "reconnecting\n",
+ CDEBUG(D_HA, "%s: LAST_REPLAY message error: %d, reconnecting\n",
req->rq_import->imp_obd->obd_name,
req->rq_status);
}
@@ -1343,9 +1319,7 @@ int ptlrpc_import_recovery_state_machine(struct obd_import *imp)
/* Don't care about MGC eviction */
if (strcmp(imp->imp_obd->obd_type->typ_name,
LUSTRE_MGC_NAME) != 0) {
- LCONSOLE_ERROR_MSG(0x167, "%s: This client was evicted "
- "by %.*s; in progress operations "
- "using this service will fail.\n",
+ LCONSOLE_ERROR_MSG(0x167, "%s: This client was evicted by %.*s; in progress operations using this service will fail.\n",
imp->imp_obd->obd_name, target_len,
target_start);
}
@@ -1455,8 +1429,7 @@ int ptlrpc_disconnect_import(struct obd_import *imp, int noclose)
break;
default:
rc = -EINVAL;
- CERROR("%s: don't know how to disconnect from %s "
- "(connect_op %d): rc = %d\n",
+ CERROR("%s: don't know how to disconnect from %s (connect_op %d): rc = %d\n",
imp->imp_obd->obd_name, obd2cli_tgt(imp->imp_obd),
imp->imp_connect_op, rc);
return rc;
@@ -1607,8 +1580,8 @@ int at_measured(struct adaptive_timeout *at, unsigned int val)
at->at_current = max(at->at_current, at_min);
if (at->at_current != old)
- CDEBUG(D_OTHER, "AT %p change: old=%u new=%u delta=%d "
- "(val=%u) hist %u %u %u %u\n", at,
+ CDEBUG(D_OTHER, "AT %p change: old=%u new=%u delta=%d (val=%u) hist %u %u %u %u\n",
+ at,
old, at->at_current, at->at_current - old, val,
at->at_hist[0], at->at_hist[1], at->at_hist[2],
at->at_hist[3]);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/layout.c b/drivers/staging/lustre/lustre/ptlrpc/layout.c
index 5b8337187b5..dc5ceb55d00 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/layout.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/layout.c
@@ -980,18 +980,7 @@ EXPORT_SYMBOL(RMF_CONN);
struct req_msg_field RMF_CONNECT_DATA =
DEFINE_MSGF("cdata",
RMF_F_NO_SIZE_CHECK /* we allow extra space for interop */,
-#if LUSTRE_VERSION_CODE > OBD_OCD_VERSION(2, 7, 50, 0)
sizeof(struct obd_connect_data),
-#else
-/* For interoperability with 1.8 and 2.0 clients/servers.
- * The RPC verification code allows larger RPC buffers, but not
- * smaller buffers. Until we no longer need to keep compatibility
- * with older servers/clients we can only check that the buffer
- * size is at least as large as obd_connect_data_v1. That is not
- * not in itself harmful, since the chance of just corrupting this
- * field is low. See JIRA LU-16 for details. */
- sizeof(struct obd_connect_data_v1),
-#endif
lustre_swab_connect, NULL);
EXPORT_SYMBOL(RMF_CONNECT_DATA);
@@ -1897,8 +1886,8 @@ swabber_dumper_helper(struct req_capsule *pill,
swabber(value);
ptlrpc_buf_set_swabbed(pill->rc_req, inout, offset);
if (dump) {
- CDEBUG(D_RPCTRACE, "Dump of swabbed field %s "
- "follows\n", field->rmf_name);
+ CDEBUG(D_RPCTRACE, "Dump of swabbed field %s follows\n",
+ field->rmf_name);
field->rmf_dumper(value);
}
@@ -1914,8 +1903,7 @@ swabber_dumper_helper(struct req_capsule *pill,
i < n;
i++, p += field->rmf_size) {
if (dump) {
- CDEBUG(D_RPCTRACE, "Dump of %sarray field %s, "
- "element %d follows\n",
+ CDEBUG(D_RPCTRACE, "Dump of %sarray field %s, element %d follows\n",
do_swab ? "unswabbed " : "", field->rmf_name, i);
field->rmf_dumper(p);
}
@@ -1923,8 +1911,8 @@ swabber_dumper_helper(struct req_capsule *pill,
continue;
swabber(p);
if (dump) {
- CDEBUG(D_RPCTRACE, "Dump of swabbed array field %s, "
- "element %d follows\n", field->rmf_name, i);
+ CDEBUG(D_RPCTRACE, "Dump of swabbed array field %s, element %d follows\n",
+ field->rmf_name, i);
field->rmf_dumper(value);
}
}
@@ -1983,8 +1971,7 @@ static void *__req_capsule_get(struct req_capsule *pill,
*/
len = lustre_msg_buflen(msg, offset);
if ((len % field->rmf_size) != 0) {
- CERROR("%s: array field size mismatch "
- "%d modulo %d != 0 (%d)\n",
+ CERROR("%s: array field size mismatch %d modulo %d != 0 (%d)\n",
field->rmf_name, len, field->rmf_size, loc);
return NULL;
}
@@ -1997,8 +1984,7 @@ static void *__req_capsule_get(struct req_capsule *pill,
if (value == NULL) {
DEBUG_REQ(D_ERROR, pill->rc_req,
- "Wrong buffer for field `%s' (%d of %d) "
- "in format `%s': %d vs. %d (%s)\n",
+ "Wrong buffer for field `%s' (%d of %d) in format `%s': %d vs. %d (%s)\n",
field->rmf_name, offset, lustre_msg_bufcount(msg),
fmt->rf_name, lustre_msg_buflen(msg, offset), len,
rcl_names[loc]);
@@ -2013,7 +1999,7 @@ static void *__req_capsule_get(struct req_capsule *pill,
/**
* Dump a request and/or reply
*/
-void __req_capsule_dump(struct req_capsule *pill, enum req_location loc)
+static void __req_capsule_dump(struct req_capsule *pill, enum req_location loc)
{
const struct req_format *fmt;
const struct req_msg_field *field;
@@ -2031,8 +2017,8 @@ void __req_capsule_dump(struct req_capsule *pill, enum req_location loc)
* have a specific dumper
*/
len = req_capsule_get_size(pill, field, loc);
- CDEBUG(D_RPCTRACE, "Field %s has no dumper function;"
- "field size is %d\n", field->rmf_name, len);
+ CDEBUG(D_RPCTRACE, "Field %s has no dumper function; field size is %d\n",
+ field->rmf_name, len);
} else {
/* It's the dumping side-effect that we're interested in */
(void) __req_capsule_get(pill, field, loc, NULL, 1);
@@ -2184,8 +2170,7 @@ void req_capsule_set_size(struct req_capsule *pill,
(size > 0)) {
if ((field->rmf_flags & RMF_F_STRUCT_ARRAY) &&
(size % field->rmf_size != 0)) {
- CERROR("%s: array field size mismatch "
- "%d %% %d != 0 (%d)\n",
+ CERROR("%s: array field size mismatch %d %% %d != 0 (%d)\n",
field->rmf_name, size, field->rmf_size, loc);
LBUG();
} else if (!(field->rmf_flags & RMF_F_STRUCT_ARRAY) &&
diff --git a/drivers/staging/lustre/lustre/ptlrpc/llog_client.c b/drivers/staging/lustre/lustre/ptlrpc/llog_client.c
index 56f825fbb17..e9baf5bbee3 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/llog_client.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/llog_client.c
@@ -334,8 +334,7 @@ static int llog_client_read_header(const struct lu_env *env,
llh_hdr->lrh_type, LLOG_HDR_MAGIC);
rc = -EIO;
} else if (llh_hdr->lrh_len != LLOG_CHUNK_SIZE) {
- CERROR("incorrectly sized log header: %#x "
- "(expecting %#x)\n",
+ CERROR("incorrectly sized log header: %#x (expecting %#x)\n",
llh_hdr->lrh_len, LLOG_CHUNK_SIZE);
CERROR("you may need to re-run lconf --write_conf.\n");
rc = -EIO;
diff --git a/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c b/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
index 87b9764a4f1..4011e0050fc 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
@@ -175,7 +175,7 @@ const char *ll_opcode2str(__u32 opcode)
return ll_rpc_opcode_table[offset].opname;
}
-const char* ll_eopcode2str(__u32 opcode)
+const char *ll_eopcode2str(__u32 opcode)
{
LASSERT(ll_eopcode_table[opcode].opcode == opcode);
return ll_eopcode_table[opcode].opname;
@@ -694,8 +694,7 @@ default_queue:
if (queue == PTLRPC_NRS_QUEUE_HP && !nrs_svc_has_hp(svc)) {
rc = -ENODEV;
goto out;
- }
- else if (queue == PTLRPC_NRS_QUEUE_BOTH && !nrs_svc_has_hp(svc))
+ } else if (queue == PTLRPC_NRS_QUEUE_BOTH && !nrs_svc_has_hp(svc))
queue = PTLRPC_NRS_QUEUE_REG;
/**
@@ -746,8 +745,7 @@ ptlrpc_lprocfs_svc_req_history_seek(struct ptlrpc_service_part *svcpt,
svcpt->scp_service->srv_name, svcpt->scp_cpt,
srhi->srhi_seq, srhi->srhi_req->rq_history_seq);
LASSERTF(!list_empty(&svcpt->scp_hist_reqs),
- "%s:%d: seek offset %llu, request seq %llu, "
- "last culled %llu\n",
+ "%s:%d: seek offset %llu, request seq %llu, last culled %llu\n",
svcpt->scp_service->srv_name, svcpt->scp_cpt,
seq, srhi->srhi_seq, svcpt->scp_hist_seq_culled);
e = &srhi->srhi_req->rq_history_list;
@@ -814,8 +812,8 @@ ptlrpc_lprocfs_svc_req_history_start(struct seq_file *s, loff_t *pos)
int i;
if (sizeof(loff_t) != sizeof(__u64)) { /* can't support */
- CWARN("Failed to read request history because size of loff_t "
- "%d can't match size of u64\n", (int)sizeof(loff_t));
+ CWARN("Failed to read request history because size of loff_t %d can't match size of u64\n",
+ (int)sizeof(loff_t));
return NULL;
}
@@ -1298,14 +1296,12 @@ int lprocfs_wr_import(struct file *file, const char *buffer,
if (*endptr) {
CERROR("config: wrong instance # %s\n", ptr);
} else if (inst != imp->imp_connect_data.ocd_instance) {
- CDEBUG(D_INFO, "IR: %s is connecting to an obsoleted "
- "target(%u/%u), reconnecting...\n",
+ CDEBUG(D_INFO, "IR: %s is connecting to an obsoleted target(%u/%u), reconnecting...\n",
imp->imp_obd->obd_name,
imp->imp_connect_data.ocd_instance, inst);
do_reconn = 1;
} else {
- CDEBUG(D_INFO, "IR: %s has already been connecting to "
- "new target(%u)\n",
+ CDEBUG(D_INFO, "IR: %s has already been connecting to new target(%u)\n",
imp->imp_obd->obd_name, inst);
}
}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/niobuf.c b/drivers/staging/lustre/lustre/ptlrpc/niobuf.c
index c1e8aa4d5ec..f715e9a8b99 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/niobuf.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/niobuf.c
@@ -224,8 +224,8 @@ int ptlrpc_register_bulk(struct ptlrpc_request *req)
total_md - desc->bd_md_count);
spin_unlock(&desc->bd_lock);
- CDEBUG(D_NET, "Setup %u bulk %s buffers: %u pages %u bytes, "
- "xid x%#llx-%#llx, portal %u\n", desc->bd_md_count,
+ CDEBUG(D_NET, "Setup %u bulk %s buffers: %u pages %u bytes, xid x%#llx-%#llx, portal %u\n",
+ desc->bd_md_count,
desc->bd_type == BULK_GET_SOURCE ? "get-source" : "put-sink",
desc->bd_iov_count, desc->bd_nob,
desc->bd_last_xid, req->rq_xid, desc->bd_portal);
@@ -337,8 +337,7 @@ static void ptlrpc_at_set_reply(struct ptlrpc_request *req, int flags)
if (req->rq_reqmsg &&
!(lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)) {
- CDEBUG(D_ADAPTTO, "No early reply support: flags=%#x "
- "req_flags=%#x magic=%d:%x/%x len=%d\n",
+ CDEBUG(D_ADAPTTO, "No early reply support: flags=%#x req_flags=%#x magic=%d:%x/%x len=%d\n",
flags, lustre_msg_get_flags(req->rq_reqmsg),
lustre_msg_is_v1(req->rq_reqmsg),
lustre_msg_get_magic(req->rq_reqmsg),
diff --git a/drivers/staging/lustre/lustre/ptlrpc/nrs.c b/drivers/staging/lustre/lustre/ptlrpc/nrs.c
index 181301bd208..d5fd7215c72 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/nrs.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/nrs.c
@@ -770,8 +770,8 @@ static int nrs_policy_register(struct ptlrpc_nrs *nrs,
tmp = nrs_policy_find_locked(nrs, policy->pol_desc->pd_name);
if (tmp != NULL) {
- CERROR("NRS policy %s has been registered, can't register it "
- "for %s\n", policy->pol_desc->pd_name,
+ CERROR("NRS policy %s has been registered, can't register it for %s\n",
+ policy->pol_desc->pd_name,
svcpt->scp_service->srv_name);
nrs_policy_put_locked(tmp);
@@ -882,8 +882,7 @@ static int nrs_register_policies_locked(struct ptlrpc_nrs *nrs)
if (nrs_policy_compatible(svc, desc)) {
rc = nrs_policy_register(nrs, desc);
if (rc != 0) {
- CERROR("Failed to register NRS policy %s for "
- "partition %d of service %s: %d\n",
+ CERROR("Failed to register NRS policy %s for partition %d of service %s: %d\n",
desc->pd_name, svcpt->scp_cpt,
svc->srv_name, rc);
/**
@@ -1082,8 +1081,7 @@ again:
if (rc == -ENOENT) {
rc = 0;
} else if (rc != 0) {
- CERROR("Failed to unregister NRS policy %s for "
- "partition %d of service %s: %d\n",
+ CERROR("Failed to unregister NRS policy %s for partition %d of service %s: %d\n",
desc->pd_name, svcpt->scp_cpt,
svcpt->scp_service->srv_name, rc);
return rc;
@@ -1145,18 +1143,15 @@ int ptlrpc_nrs_policy_register(struct ptlrpc_nrs_pol_conf *conf)
if ((conf->nc_flags & PTLRPC_NRS_FL_REG_EXTERN) &&
(conf->nc_flags & (PTLRPC_NRS_FL_FALLBACK |
PTLRPC_NRS_FL_REG_START))) {
- CERROR("NRS: failing to register policy %s. Please check "
- "policy flags; external policies cannot act as fallback "
- "policies, or be started immediately upon registration "
- "without interaction with lprocfs\n", conf->nc_name);
+ CERROR("NRS: failing to register policy %s. Please check policy flags; external policies cannot act as fallback policies, or be started immediately upon registration without interaction with lprocfs\n",
+ conf->nc_name);
return -EINVAL;
}
mutex_lock(&nrs_core.nrs_mutex);
if (nrs_policy_find_desc_locked(conf->nc_name) != NULL) {
- CERROR("NRS: failing to register policy %s which has already "
- "been registered with NRS core!\n",
+ CERROR("NRS: failing to register policy %s which has already been registered with NRS core!\n",
conf->nc_name);
rc = -EEXIST;
goto fail;
@@ -1209,8 +1204,7 @@ again:
nrs = nrs_svcpt2nrs(svcpt, hp);
rc = nrs_policy_register(nrs, desc);
if (rc != 0) {
- CERROR("Failed to register NRS policy %s for "
- "partition %d of service %s: %d\n",
+ CERROR("Failed to register NRS policy %s for partition %d of service %s: %d\n",
desc->pd_name, svcpt->scp_cpt,
svcpt->scp_service->srv_name, rc);
@@ -1281,8 +1275,7 @@ int ptlrpc_nrs_policy_unregister(struct ptlrpc_nrs_pol_conf *conf)
LASSERT(conf != NULL);
if (conf->nc_flags & PTLRPC_NRS_FL_FALLBACK) {
- CERROR("Unable to unregister a fallback policy, unless the "
- "PTLRPC service is stopping.\n");
+ CERROR("Unable to unregister a fallback policy, unless the PTLRPC service is stopping.\n");
return -EPERM;
}
@@ -1292,8 +1285,7 @@ int ptlrpc_nrs_policy_unregister(struct ptlrpc_nrs_pol_conf *conf)
desc = nrs_policy_find_desc_locked(conf->nc_name);
if (desc == NULL) {
- CERROR("Failing to unregister NRS policy %s which has "
- "not been registered with NRS core!\n",
+ CERROR("Failing to unregister NRS policy %s which has not been registered with NRS core!\n",
conf->nc_name);
rc = -ENOENT;
goto not_exist;
@@ -1304,9 +1296,8 @@ int ptlrpc_nrs_policy_unregister(struct ptlrpc_nrs_pol_conf *conf)
rc = nrs_policy_unregister_locked(desc);
if (rc < 0) {
if (rc == -EBUSY)
- CERROR("Please first stop policy %s on all service "
- "partitions and then retry to unregister the "
- "policy.\n", conf->nc_name);
+ CERROR("Please first stop policy %s on all service partitions and then retry to unregister the policy.\n",
+ conf->nc_name);
goto fail;
}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c b/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c
index 50556db15f7..2f45f765783 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c
@@ -412,8 +412,8 @@ void *lustre_msg_buf_v2(struct lustre_msg_v2 *m, int n, int min_size)
buflen = m->lm_buflens[n];
if (unlikely(buflen < min_size)) {
- CERROR("msg %p buffer[%d] size %d too small "
- "(required %d, opc=%d)\n", m, n, buflen, min_size,
+ CERROR("msg %p buffer[%d] size %d too small (required %d, opc=%d)\n",
+ m, n, buflen, min_size,
n == MSG_PTLRPC_BODY_OFF ? -1 : lustre_msg_get_opc(m));
return NULL;
}
@@ -749,21 +749,19 @@ char *lustre_msg_string(struct lustre_msg *m, int index, int max_len)
slen = strnlen(str, blen);
if (slen == blen) { /* not NULL terminated */
- CERROR("can't unpack non-NULL terminated string in "
- "msg %p buffer[%d] len %d\n", m, index, blen);
+ CERROR("can't unpack non-NULL terminated string in msg %p buffer[%d] len %d\n",
+ m, index, blen);
return NULL;
}
if (max_len == 0) {
if (slen != blen - 1) {
- CERROR("can't unpack short string in msg %p "
- "buffer[%d] len %d: strlen %d\n",
+ CERROR("can't unpack short string in msg %p buffer[%d] len %d: strlen %d\n",
m, index, blen, slen);
return NULL;
}
} else if (slen > max_len) {
- CERROR("can't unpack oversized string in msg %p "
- "buffer[%d] len %d strlen %d: max %d expected\n",
+ CERROR("can't unpack oversized string in msg %p buffer[%d] len %d strlen %d: max %d expected\n",
m, index, blen, slen, max_len);
return NULL;
}
@@ -1313,43 +1311,17 @@ __u32 lustre_msg_get_cksum(struct lustre_msg *msg)
}
}
-#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 7, 50, 0)
-/*
- * In 1.6 and 1.8 the checksum was computed only on struct ptlrpc_body as
- * it was in 1.6 (88 bytes, smaller than the full size in 1.8). It makes
- * more sense to compute the checksum on the full ptlrpc_body, regardless
- * of what size it is, but in order to keep interoperability with 1.8 we
- * can optionally also checksum only the first 88 bytes (caller decides). */
-# define ptlrpc_body_cksum_size_compat18 88
-
-__u32 lustre_msg_calc_cksum(struct lustre_msg *msg, int compat18)
-#else
-# warning "remove checksum compatibility support for b1_8"
__u32 lustre_msg_calc_cksum(struct lustre_msg *msg)
-#endif
{
switch (msg->lm_magic) {
case LUSTRE_MSG_MAGIC_V2: {
struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
-#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 7, 50, 0)
- __u32 crc;
- unsigned int hsize = 4;
- __u32 len = compat18 ? ptlrpc_body_cksum_size_compat18 :
- lustre_msg_buflen(msg, MSG_PTLRPC_BODY_OFF);
- LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
- cfs_crypto_hash_digest(CFS_HASH_ALG_CRC32, (unsigned char *)pb,
- len, NULL, 0, (unsigned char *)&crc,
- &hsize);
- return crc;
-#else
-# warning "remove checksum compatibility support for b1_8"
__u32 crc;
unsigned int hsize = 4;
cfs_crypto_hash_digest(CFS_HASH_ALG_CRC32, (unsigned char *)pb,
lustre_msg_buflen(msg, MSG_PTLRPC_BODY_OFF),
NULL, 0, (unsigned char *)&crc, &hsize);
return crc;
-#endif
}
default:
CERROR("incorrect message magic: %08x\n", msg->lm_magic);
@@ -2284,8 +2256,8 @@ void lustre_swab_quota_body(struct quota_body *b)
void dump_ioo(struct obd_ioobj *ioo)
{
CDEBUG(D_RPCTRACE,
- "obd_ioobj: ioo_oid="DOSTID", ioo_max_brw=%#x, "
- "ioo_bufct=%d\n", POSTID(&ioo->ioo_oid), ioo->ioo_max_brw,
+ "obd_ioobj: ioo_oid=" DOSTID ", ioo_max_brw=%#x, ioo_bufct=%d\n",
+ POSTID(&ioo->ioo_oid), ioo->ioo_max_brw,
ioo->ioo_bufcnt);
}
EXPORT_SYMBOL(dump_ioo);
@@ -2356,8 +2328,7 @@ void dump_obdo(struct obdo *oa)
CDEBUG(D_RPCTRACE, "obdo: o_handle = %lld\n",
oa->o_handle.cookie);
if (valid & OBD_MD_FLCOOKIE)
- CDEBUG(D_RPCTRACE, "obdo: o_lcookie = "
- "(llog_cookie dumping not yet implemented)\n");
+ CDEBUG(D_RPCTRACE, "obdo: o_lcookie = (llog_cookie dumping not yet implemented)\n");
}
EXPORT_SYMBOL(dump_obdo);
@@ -2421,17 +2392,15 @@ void _debug_req(struct ptlrpc_request *req,
va_start(args, fmt);
libcfs_debug_vmsg2(msgdata, fmt, args,
- " req@%p x%llu/t%lld(%lld) o%d->%s@%s:%d/%d"
- " lens %d/%d e %d to %d dl "CFS_TIME_T" ref %d "
- "fl "REQ_FLAGS_FMT"/%x/%x rc %d/%d\n",
+ " req@%p x%llu/t%lld(%lld) o%d->%s@%s:%d/%d lens %d/%d e %d to %d dl " CFS_TIME_T " ref %d fl " REQ_FLAGS_FMT "/%x/%x rc %d/%d\n",
req, req->rq_xid, req->rq_transno,
req_ok ? lustre_msg_get_transno(req->rq_reqmsg) : 0,
req_ok ? lustre_msg_get_opc(req->rq_reqmsg) : -1,
req->rq_import ?
- req->rq_import->imp_obd->obd_name :
- req->rq_export ?
- req->rq_export->exp_client_uuid.uuid :
- "<?>",
+ req->rq_import->imp_obd->obd_name :
+ req->rq_export ?
+ req->rq_export->exp_client_uuid.uuid :
+ "<?>",
libcfs_nid2str(nid),
req->rq_request_portal, req->rq_reply_portal,
req->rq_reqlen, req->rq_replen,
diff --git a/drivers/staging/lustre/lustre/ptlrpc/pinger.c b/drivers/staging/lustre/lustre/ptlrpc/pinger.c
index 20341b27a06..340d98a6413 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/pinger.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/pinger.c
@@ -206,8 +206,7 @@ static void ptlrpc_pinger_process_import(struct obd_import *imp,
spin_unlock(&imp->imp_lock);
- CDEBUG(level == LUSTRE_IMP_FULL ? D_INFO : D_HA, "%s->%s: level %s/%u "
- "force %u force_next %u deactive %u pingable %u suppress %u\n",
+ CDEBUG(level == LUSTRE_IMP_FULL ? D_INFO : D_HA, "%s->%s: level %s/%u force %u force_next %u deactive %u pingable %u suppress %u\n",
imp->imp_obd->obd_uuid.uuid, obd2cli_tgt(imp->imp_obd),
ptlrpc_import_state_name(level), level, force, force_next,
imp->imp_deactive, imp->imp_pingable, suppress);
@@ -220,8 +219,7 @@ static void ptlrpc_pinger_process_import(struct obd_import *imp,
} else if (level != LUSTRE_IMP_FULL ||
imp->imp_obd->obd_no_recov ||
imp_is_deactive(imp)) {
- CDEBUG(D_HA, "%s->%s: not pinging (in recovery "
- "or recovery disabled: %s)\n",
+ CDEBUG(D_HA, "%s->%s: not pinging (in recovery or recovery disabled: %s)\n",
imp->imp_obd->obd_uuid.uuid, obd2cli_tgt(imp->imp_obd),
ptlrpc_import_state_name(level));
if (force) {
@@ -334,11 +332,7 @@ int ptlrpc_start_pinger(void)
thread_is_running(&pinger_thread), &lwi);
if (suppress_pings)
- CWARN("Pings will be suppressed at the request of the "
- "administrator. The configuration shall meet the "
- "additional requirements described in the manual. "
- "(Search for the \"suppress_pings\" kernel module "
- "parameter.)\n");
+ CWARN("Pings will be suppressed at the request of the administrator. The configuration shall meet the additional requirements described in the manual. (Search for the \"suppress_pings\" kernel module parameter.)\n");
return 0;
}
@@ -428,7 +422,7 @@ EXPORT_SYMBOL(ptlrpc_pinger_del_import);
* Register a timeout callback to the pinger list, and the callback will
* be called when timeout happens.
*/
-struct timeout_item* ptlrpc_new_timeout(int time, enum timeout_event event,
+struct timeout_item *ptlrpc_new_timeout(int time, enum timeout_event event,
timeout_cb_t cb, void *data)
{
struct timeout_item *ti;
@@ -448,7 +442,7 @@ struct timeout_item* ptlrpc_new_timeout(int time, enum timeout_event event,
}
/**
- * Register timeout event on the the pinger thread.
+ * Register timeout event on the pinger thread.
* Note: the timeout list is an sorted list with increased timeout value.
*/
static struct timeout_item*
@@ -623,11 +617,7 @@ static int ping_evictor_main(void *arg)
if (expire_time > exp->exp_last_request_time) {
class_export_get(exp);
spin_unlock(&obd->obd_dev_lock);
- LCONSOLE_WARN("%s: haven't heard from client %s"
- " (at %s) in %ld seconds. I think"
- " it's dead, and I am evicting"
- " it. exp %p, cur %ld expire %ld"
- " last %ld\n",
+ LCONSOLE_WARN("%s: haven't heard from client %s (at %s) in %ld seconds. I think it's dead, and I am evicting it. exp %p, cur %ld expire %ld last %ld\n",
obd->obd_name,
obd_uuid2str(&exp->exp_client_uuid),
obd_export_nid2str(exp),
diff --git a/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c b/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
index 357ea9f8bd5..cbcc541cac4 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
@@ -356,10 +356,9 @@ static int ptlrpcd_check(struct lu_env *env, struct ptlrpcd_ctl *pc)
if (atomic_read(&ps->set_new_count)) {
rc = ptlrpcd_steal_rqset(set, ps);
if (rc > 0)
- CDEBUG(D_RPCTRACE, "transfer %d"
- " async RPCs [%d->%d]\n",
- rc, partner->pc_index,
- pc->pc_index);
+ CDEBUG(D_RPCTRACE, "transfer %d async RPCs [%d->%d]\n",
+ rc, partner->pc_index,
+ pc->pc_index);
}
ptlrpc_reqset_put(ps);
} while (rc == 0 && pc->pc_cursor != first);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/recover.c b/drivers/staging/lustre/lustre/ptlrpc/recover.c
index e1bc77b83ff..7b1d7294733 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/recover.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/recover.c
@@ -237,8 +237,7 @@ void ptlrpc_request_handle_notconn(struct ptlrpc_request *failed_req)
if (ptlrpc_set_import_discon(imp,
lustre_msg_get_conn_cnt(failed_req->rq_reqmsg))) {
if (!imp->imp_replayable) {
- CDEBUG(D_HA, "import %s@%s for %s not replayable, "
- "auto-deactivating\n",
+ CDEBUG(D_HA, "import %s@%s for %s not replayable, auto-deactivating\n",
obd2cli_tgt(imp->imp_obd),
imp->imp_connection->c_remote_uuid.uuid,
imp->imp_obd->obd_name);
@@ -274,8 +273,8 @@ int ptlrpc_set_import_active(struct obd_import *imp, int active)
/* When deactivating, mark import invalid, and abort in-flight
* requests. */
if (!active) {
- LCONSOLE_WARN("setting import %s INACTIVE by administrator "
- "request\n", obd2cli_tgt(imp->imp_obd));
+ LCONSOLE_WARN("setting import %s INACTIVE by administrator request\n",
+ obd2cli_tgt(imp->imp_obd));
/* set before invalidate to avoid messages about imp_inval
* set without imp_deactive in ptlrpc_import_delay_req */
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec.c b/drivers/staging/lustre/lustre/ptlrpc/sec.c
index 4ce76852e6e..21e9dc9d558 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec.c
@@ -209,7 +209,7 @@ EXPORT_SYMBOL(sptlrpc_flavor2name_bulk);
char *sptlrpc_flavor2name(struct sptlrpc_flavor *sf, char *buf, int bufsize)
{
- snprintf(buf, bufsize, "%s", sptlrpc_flavor2name_base(sf->sf_rpc));
+ strlcpy(buf, sptlrpc_flavor2name_base(sf->sf_rpc), bufsize);
/*
* currently we don't support customized bulk specification for
@@ -220,10 +220,9 @@ char *sptlrpc_flavor2name(struct sptlrpc_flavor *sf, char *buf, int bufsize)
bspec[0] = '-';
sptlrpc_flavor2name_bulk(sf, &bspec[1], sizeof(bspec) - 1);
- strncat(buf, bspec, bufsize);
+ strlcat(buf, bspec, bufsize);
}
- buf[bufsize - 1] = '\0';
return buf;
}
EXPORT_SYMBOL(sptlrpc_flavor2name);
@@ -457,8 +456,8 @@ int sptlrpc_req_ctx_switch(struct ptlrpc_request *req,
LASSERT(req->rq_reqlen);
LASSERT(req->rq_replen);
- CDEBUG(D_SEC, "req %p: switch ctx %p(%u->%s) -> %p(%u->%s), "
- "switch sec %p(%s) -> %p(%s)\n", req,
+ CDEBUG(D_SEC, "req %p: switch ctx %p(%u->%s) -> %p(%u->%s), switch sec %p(%s) -> %p(%s)\n",
+ req,
oldctx, oldctx->cc_vcred.vc_uid, sec2target_str(oldctx->cc_sec),
newctx, newctx->cc_vcred.vc_uid, sec2target_str(newctx->cc_sec),
oldctx->cc_sec, oldctx->cc_sec->ps_policy->sp_name,
@@ -1842,8 +1841,8 @@ int sptlrpc_target_export_check(struct obd_export *exp,
req->rq_svc_ctx,
&flavor);
} else {
- CDEBUG(D_SEC, "exp %p (%x|%x|%x): is current flavor, "
- "install rvs ctx\n", exp, exp->exp_flvr.sf_rpc,
+ CDEBUG(D_SEC, "exp %p (%x|%x|%x): is current flavor, install rvs ctx\n",
+ exp, exp->exp_flvr.sf_rpc,
exp->exp_flvr_old[0].sf_rpc,
exp->exp_flvr_old[1].sf_rpc);
spin_unlock(&exp->exp_lock);
@@ -1856,13 +1855,12 @@ int sptlrpc_target_export_check(struct obd_export *exp,
if (exp->exp_flvr_expire[0]) {
if (exp->exp_flvr_expire[0] >= get_seconds()) {
if (flavor_allowed(&exp->exp_flvr_old[0], req)) {
- CDEBUG(D_SEC, "exp %p (%x|%x|%x): match the "
- "middle one ("CFS_DURATION_T")\n", exp,
+ CDEBUG(D_SEC, "exp %p (%x|%x|%x): match the middle one (" CFS_DURATION_T ")\n", exp,
exp->exp_flvr.sf_rpc,
exp->exp_flvr_old[0].sf_rpc,
exp->exp_flvr_old[1].sf_rpc,
exp->exp_flvr_expire[0] -
- get_seconds());
+ get_seconds());
spin_unlock(&exp->exp_lock);
return 0;
}
@@ -1881,13 +1879,13 @@ int sptlrpc_target_export_check(struct obd_export *exp,
if (exp->exp_flvr_changed == 0 && exp->exp_flvr_expire[1]) {
if (exp->exp_flvr_expire[1] >= get_seconds()) {
if (flavor_allowed(&exp->exp_flvr_old[1], req)) {
- CDEBUG(D_SEC, "exp %p (%x|%x|%x): match the "
- "oldest one ("CFS_DURATION_T")\n", exp,
+ CDEBUG(D_SEC, "exp %p (%x|%x|%x): match the oldest one (" CFS_DURATION_T ")\n",
+ exp,
exp->exp_flvr.sf_rpc,
exp->exp_flvr_old[0].sf_rpc,
exp->exp_flvr_old[1].sf_rpc,
exp->exp_flvr_expire[1] -
- get_seconds());
+ get_seconds());
spin_unlock(&exp->exp_lock);
return 0;
}
@@ -1907,8 +1905,7 @@ int sptlrpc_target_export_check(struct obd_export *exp,
spin_unlock(&exp->exp_lock);
- CWARN("exp %p(%s): req %p (%u|%u|%u|%u|%u|%u) with "
- "unauthorized flavor %x, expect %x|%x(%+ld)|%x(%+ld)\n",
+ CWARN("exp %p(%s): req %p (%u|%u|%u|%u|%u|%u) with unauthorized flavor %x, expect %x|%x(%+ld)|%x(%+ld)\n",
exp, exp->exp_obd->obd_name,
req, req->rq_auth_gss, req->rq_ctx_init, req->rq_ctx_fini,
req->rq_auth_usr_root, req->rq_auth_usr_mdt, req->rq_auth_usr_ost,
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
index cc68a1cf24e..0dabd83fd46 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
@@ -772,8 +772,7 @@ void sptlrpc_enc_pool_fini(void)
if (page_pools.epp_st_access > 0) {
CDEBUG(D_SEC,
- "max pages %lu, grows %u, grow fails %u, shrinks %u, "
- "access %lu, missing %lu, max qlen %u, max wait "
+ "max pages %lu, grows %u, grow fails %u, shrinks %u, access %lu, missing %lu, max qlen %u, max wait "
CFS_TIME_T"/%d\n",
page_pools.epp_st_max_pages, page_pools.epp_st_grows,
page_pools.epp_st_grow_fails,
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_null.c b/drivers/staging/lustre/lustre/ptlrpc/sec_null.c
index 099cec3b669..4e132435b45 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_null.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_null.c
@@ -101,16 +101,7 @@ int null_ctx_verify(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req)
if (req->rq_early) {
cksums = lustre_msg_get_cksum(req->rq_repdata);
-#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 7, 50, 0)
- if (lustre_msghdr_get_flags(req->rq_reqmsg) &
- MSGHDR_CKSUM_INCOMPAT18)
- cksumc = lustre_msg_calc_cksum(req->rq_repmsg, 0);
- else
- cksumc = lustre_msg_calc_cksum(req->rq_repmsg, 1);
-#else
-# warning "remove checksum compatibility support for b1_8"
cksumc = lustre_msg_calc_cksum(req->rq_repmsg);
-#endif
if (cksumc != cksums) {
CDEBUG(D_SEC,
"early reply checksum mismatch: %08x != %08x\n",
@@ -371,16 +362,7 @@ int null_authorize(struct ptlrpc_request *req)
} else {
__u32 cksum;
-#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 7, 50, 0)
- if (lustre_msghdr_get_flags(req->rq_reqmsg) &
- MSGHDR_CKSUM_INCOMPAT18)
- cksum = lustre_msg_calc_cksum(rs->rs_repbuf, 0);
- else
- cksum = lustre_msg_calc_cksum(rs->rs_repbuf, 1);
-#else
-# warning "remove checksum compatibility support for b1_8"
cksum = lustre_msg_calc_cksum(rs->rs_repbuf);
-#endif
lustre_msg_set_cksum(rs->rs_repbuf, cksum);
req->rq_reply_off = 0;
}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c b/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c
index 3d72b810c45..a79cd53010a 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c
@@ -938,8 +938,8 @@ int plain_svc_wrap_bulk(struct ptlrpc_request *req,
rc = plain_generate_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg,
tokenv);
if (rc) {
- CERROR("bulk read: server failed to compute "
- "checksum: %d\n", rc);
+ CERROR("bulk read: server failed to compute checksum: %d\n",
+ rc);
} else {
if (OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE))
corrupt_bulk_data(desc);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/service.c b/drivers/staging/lustre/lustre/ptlrpc/service.c
index a8df8a79233..635b12b22ce 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/service.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/service.c
@@ -580,8 +580,7 @@ ptlrpc_server_nthreads_check(struct ptlrpc_service *svc,
svc->srv_nthrs_cpt_init = init;
if (nthrs * svc->srv_ncpts > tc->tc_nthrs_max) {
- CDEBUG(D_OTHER, "%s: This service may have more threads (%d) "
- "than the given soft limit (%d)\n",
+ CDEBUG(D_OTHER, "%s: This service may have more threads (%d) than the given soft limit (%d)\n",
svc->srv_name, nthrs * svc->srv_ncpts,
tc->tc_nthrs_max);
}
@@ -1251,8 +1250,8 @@ static int ptlrpc_at_send_early_reply(struct ptlrpc_request *req)
/* deadline is when the client expects us to reply, margin is the
difference between clients' and servers' expectations */
DEBUG_REQ(D_ADAPTTO, req,
- "%ssending early reply (deadline %+lds, margin %+lds) for "
- "%d+%d", AT_OFF ? "AT off - not " : "",
+ "%ssending early reply (deadline %+lds, margin %+lds) for %d+%d",
+ AT_OFF ? "AT off - not " : "",
olddl, olddl - at_get(&svcpt->scp_at_estimate),
at_get(&svcpt->scp_at_estimate), at_extra);
@@ -1260,17 +1259,15 @@ static int ptlrpc_at_send_early_reply(struct ptlrpc_request *req)
return 0;
if (olddl < 0) {
- DEBUG_REQ(D_WARNING, req, "Already past deadline (%+lds), "
- "not sending early reply. Consider increasing "
- "at_early_margin (%d)?", olddl, at_early_margin);
+ DEBUG_REQ(D_WARNING, req, "Already past deadline (%+lds), not sending early reply. Consider increasing at_early_margin (%d)?",
+ olddl, at_early_margin);
/* Return an error so we're not re-added to the timed list. */
return -ETIMEDOUT;
}
if (!(lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)) {
- DEBUG_REQ(D_INFO, req, "Wanted to ask client for more time, "
- "but no AT support");
+ DEBUG_REQ(D_INFO, req, "Wanted to ask client for more time, but no AT support");
return -ENOSYS;
}
@@ -1296,8 +1293,7 @@ static int ptlrpc_at_send_early_reply(struct ptlrpc_request *req)
* we may be past adaptive_max */
if (req->rq_deadline >= req->rq_arrival_time.tv_sec +
at_get(&svcpt->scp_at_estimate)) {
- DEBUG_REQ(D_WARNING, req, "Couldn't add any time "
- "(%ld/%ld), not sending early reply\n",
+ DEBUG_REQ(D_WARNING, req, "Couldn't add any time (%ld/%ld), not sending early reply\n",
olddl, req->rq_arrival_time.tv_sec +
at_get(&svcpt->scp_at_estimate) -
get_seconds());
@@ -1329,8 +1325,7 @@ static int ptlrpc_at_send_early_reply(struct ptlrpc_request *req)
LASSERT(atomic_read(&req->rq_refcount));
/** if it is last refcount then early reply isn't needed */
if (atomic_read(&req->rq_refcount) == 1) {
- DEBUG_REQ(D_ADAPTTO, reqcopy, "Normal reply already sent out, "
- "abort sending early reply\n");
+ DEBUG_REQ(D_ADAPTTO, reqcopy, "Normal reply already sent out, abort sending early reply\n");
rc = -EINVAL;
goto out;
}
@@ -1454,16 +1449,14 @@ static int ptlrpc_at_check_timed(struct ptlrpc_service_part *svcpt)
spin_unlock(&svcpt->scp_at_lock);
- CDEBUG(D_ADAPTTO, "timeout in %+ds, asking for %d secs on %d early "
- "replies\n", first, at_extra, counter);
+ CDEBUG(D_ADAPTTO, "timeout in %+ds, asking for %d secs on %d early replies\n",
+ first, at_extra, counter);
if (first < 0) {
/* We're already past request deadlines before we even get a
chance to send early replies */
- LCONSOLE_WARN("%s: This server is not able to keep up with "
- "request traffic (cpu-bound).\n",
+ LCONSOLE_WARN("%s: This server is not able to keep up with request traffic (cpu-bound).\n",
svcpt->scp_service->srv_name);
- CWARN("earlyQ=%d reqQ=%d recA=%d, svcEst=%d, "
- "delay="CFS_DURATION_T"(jiff)\n",
+ CWARN("earlyQ=%d reqQ=%d recA=%d, svcEst=%d, delay=" CFS_DURATION_T "(jiff)\n",
counter, svcpt->scp_nreqs_incoming,
svcpt->scp_nreqs_active,
at_get(&svcpt->scp_at_estimate), delay);
@@ -1825,8 +1818,7 @@ ptlrpc_server_handle_req_in(struct ptlrpc_service_part *svcpt,
if (rc == 0) {
rc = sptlrpc_target_export_check(req->rq_export, req);
if (rc)
- DEBUG_REQ(D_ERROR, req, "DROPPING req with "
- "illegal security flavor,");
+ DEBUG_REQ(D_ERROR, req, "DROPPING req with illegal security flavor,");
}
if (rc)
@@ -1942,18 +1934,17 @@ ptlrpc_server_handle_request(struct ptlrpc_service_part *svcpt,
/* Discard requests queued for longer than the deadline.
The deadline is increased if we send an early reply. */
if (get_seconds() > request->rq_deadline) {
- DEBUG_REQ(D_ERROR, request, "Dropping timed-out request from %s"
- ": deadline "CFS_DURATION_T":"CFS_DURATION_T"s ago\n",
+ DEBUG_REQ(D_ERROR, request, "Dropping timed-out request from %s: deadline " CFS_DURATION_T ":" CFS_DURATION_T "s ago\n",
libcfs_id2str(request->rq_peer),
cfs_time_sub(request->rq_deadline,
- request->rq_arrival_time.tv_sec),
+ request->rq_arrival_time.tv_sec),
cfs_time_sub(get_seconds(),
- request->rq_deadline));
+ request->rq_deadline));
goto put_conn;
}
- CDEBUG(D_RPCTRACE, "Handling RPC pname:cluuid+ref:pid:xid:nid:opc "
- "%s:%s+%d:%d:x%llu:%s:%d\n", current_comm(),
+ CDEBUG(D_RPCTRACE, "Handling RPC pname:cluuid+ref:pid:xid:nid:opc %s:%s+%d:%d:x%llu:%s:%d\n",
+ current_comm(),
(request->rq_export ?
(char *)request->rq_export->exp_client_uuid.uuid : "0"),
(request->rq_export ?
@@ -1986,26 +1977,24 @@ put_conn:
do_gettimeofday(&work_end);
timediff = cfs_timeval_sub(&work_end, &work_start, NULL);
- CDEBUG(D_RPCTRACE, "Handled RPC pname:cluuid+ref:pid:xid:nid:opc "
- "%s:%s+%d:%d:x%llu:%s:%d Request processed in "
- "%ldus (%ldus total) trans %llu rc %d/%d\n",
- current_comm(),
- (request->rq_export ?
- (char *)request->rq_export->exp_client_uuid.uuid : "0"),
- (request->rq_export ?
- atomic_read(&request->rq_export->exp_refcount) : -99),
- lustre_msg_get_status(request->rq_reqmsg),
- request->rq_xid,
- libcfs_id2str(request->rq_peer),
- lustre_msg_get_opc(request->rq_reqmsg),
- timediff,
- cfs_timeval_sub(&work_end, &request->rq_arrival_time, NULL),
- (request->rq_repmsg ?
- lustre_msg_get_transno(request->rq_repmsg) :
- request->rq_transno),
- request->rq_status,
- (request->rq_repmsg ?
- lustre_msg_get_status(request->rq_repmsg) : -999));
+ CDEBUG(D_RPCTRACE, "Handled RPC pname:cluuid+ref:pid:xid:nid:opc %s:%s+%d:%d:x%llu:%s:%d Request processed in %ldus (%ldus total) trans %llu rc %d/%d\n",
+ current_comm(),
+ (request->rq_export ?
+ (char *)request->rq_export->exp_client_uuid.uuid : "0"),
+ (request->rq_export ?
+ atomic_read(&request->rq_export->exp_refcount) : -99),
+ lustre_msg_get_status(request->rq_reqmsg),
+ request->rq_xid,
+ libcfs_id2str(request->rq_peer),
+ lustre_msg_get_opc(request->rq_reqmsg),
+ timediff,
+ cfs_timeval_sub(&work_end, &request->rq_arrival_time, NULL),
+ (request->rq_repmsg ?
+ lustre_msg_get_transno(request->rq_repmsg) :
+ request->rq_transno),
+ request->rq_status,
+ (request->rq_repmsg ?
+ lustre_msg_get_status(request->rq_repmsg) : -999));
if (likely(svc->srv_stats != NULL && request->rq_reqmsg != NULL)) {
__u32 op = lustre_msg_get_opc(request->rq_reqmsg);
int opc = opcode_offset(op);
@@ -2557,8 +2546,8 @@ static int ptlrpc_start_hr_threads(void)
if (!IS_ERR_VALUE(rc))
continue;
- CERROR("Reply handling thread %d:%d Failed on starting: "
- "rc = %d\n", i, j, rc);
+ CERROR("Reply handling thread %d:%d Failed on starting: rc = %d\n",
+ i, j, rc);
ptlrpc_stop_hr_threads();
return rc;
}
@@ -2920,8 +2909,7 @@ ptlrpc_service_unlink_rqbd(struct ptlrpc_service *svc)
rc = l_wait_event(svcpt->scp_waitq,
svcpt->scp_nrqbds_posted == 0, &lwi);
if (rc == -ETIMEDOUT) {
- CWARN("Service %s waiting for "
- "request buffers\n",
+ CWARN("Service %s waiting for request buffers\n",
svcpt->scp_service->srv_name);
}
spin_lock(&svcpt->scp_lock);
diff --git a/drivers/staging/media/Kconfig b/drivers/staging/media/Kconfig
index 96498b7fc20..2a054a99d43 100644
--- a/drivers/staging/media/Kconfig
+++ b/drivers/staging/media/Kconfig
@@ -27,12 +27,18 @@ source "drivers/staging/media/davinci_vpfe/Kconfig"
source "drivers/staging/media/dt3155v4l/Kconfig"
+source "drivers/staging/media/tlg2300/Kconfig"
+
source "drivers/staging/media/mn88472/Kconfig"
source "drivers/staging/media/mn88473/Kconfig"
source "drivers/staging/media/omap4iss/Kconfig"
+source "drivers/staging/media/parport/Kconfig"
+
+source "drivers/staging/media/vino/Kconfig"
+
# Keep LIRC at the end, as it has sub-menus
source "drivers/staging/media/lirc/Kconfig"
diff --git a/drivers/staging/media/Makefile b/drivers/staging/media/Makefile
index 30fb352fc4a..412b2840839 100644
--- a/drivers/staging/media/Makefile
+++ b/drivers/staging/media/Makefile
@@ -6,4 +6,7 @@ obj-$(CONFIG_VIDEO_DM365_VPFE) += davinci_vpfe/
obj-$(CONFIG_VIDEO_OMAP4) += omap4iss/
obj-$(CONFIG_DVB_MN88472) += mn88472/
obj-$(CONFIG_DVB_MN88473) += mn88473/
+obj-y += parport/
+obj-$(CONFIG_VIDEO_TLG2300) += tlg2300/
+obj-y += vino/
diff --git a/drivers/staging/media/cxd2099/cxd2099.c b/drivers/staging/media/cxd2099/cxd2099.c
index 73e7b2c9e4a..657ea480c6e 100644
--- a/drivers/staging/media/cxd2099/cxd2099.c
+++ b/drivers/staging/media/cxd2099/cxd2099.c
@@ -527,7 +527,7 @@ static int slot_reset(struct dvb_ca_en50221 *ca, int slot)
u8 val;
#endif
for (i = 0; i < 100; i++) {
- msleep(10);
+ usleep_range(10000, 11000);
#if 0
read_reg(ci, 0x06, &val);
dev_info(&ci->i2c->dev, "%d:%02x\n", i, val);
diff --git a/drivers/staging/media/davinci_vpfe/dm365_ipipe.h b/drivers/staging/media/davinci_vpfe/dm365_ipipe.h
index cf4204603eb..d81b29e1930 100644
--- a/drivers/staging/media/davinci_vpfe/dm365_ipipe.h
+++ b/drivers/staging/media/davinci_vpfe/dm365_ipipe.h
@@ -120,8 +120,8 @@ struct vpfe_ipipe_device {
enum ipipe_input_entity input;
unsigned int output;
struct v4l2_ctrl_handler ctrls;
- void *__iomem base_addr;
- void *__iomem isp5_base_addr;
+ void __iomem *base_addr;
+ void __iomem *isp5_base_addr;
struct ipipe_module_params config;
};
diff --git a/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.c b/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.c
index 6461de1a61f..2a3a56b88de 100644
--- a/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.c
+++ b/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.c
@@ -24,7 +24,7 @@
#define IPIPE_MODE_CONTINUOUS 0
#define IPIPE_MODE_SINGLE_SHOT 1
-static void ipipe_clock_enable(void *__iomem base_addr)
+static void ipipe_clock_enable(void __iomem *base_addr)
{
/* enable IPIPE MMR for register write access */
regw_ip(base_addr, IPIPE_GCK_MMR_DEFAULT, IPIPE_GCK_MMR);
@@ -34,7 +34,7 @@ static void ipipe_clock_enable(void *__iomem base_addr)
}
static void
-rsz_set_common_params(void *__iomem rsz_base, struct resizer_params *params)
+rsz_set_common_params(void __iomem *rsz_base, struct resizer_params *params)
{
struct rsz_common_params *rsz_common = &params->rsz_common;
u32 val;
@@ -66,7 +66,7 @@ rsz_set_common_params(void *__iomem rsz_base, struct resizer_params *params)
}
static void
-rsz_set_rsz_regs(void *__iomem rsz_base, unsigned int rsz_id,
+rsz_set_rsz_regs(void __iomem *rsz_base, unsigned int rsz_id,
struct resizer_params *params)
{
struct resizer_scale_param *rsc_params;
@@ -171,7 +171,7 @@ rsz_set_rsz_regs(void *__iomem rsz_base, unsigned int rsz_id,
/*set the registers of either RSZ0 or RSZ1 */
static void
-ipipe_setup_resizer(void *__iomem rsz_base, struct resizer_params *params)
+ipipe_setup_resizer(void __iomem *rsz_base, struct resizer_params *params)
{
/* enable MMR gate to write to Resizer */
regw_rsz(rsz_base, 1, RSZ_GCK_MMR);
@@ -302,8 +302,8 @@ int config_rsz_hw(struct vpfe_resizer_device *resizer,
struct resizer_params *config)
{
struct vpfe_device *vpfe_dev = to_vpfe_device(resizer);
- void *__iomem ipipe_base = vpfe_dev->vpfe_ipipe.base_addr;
- void *__iomem rsz_base = vpfe_dev->vpfe_resizer.base_addr;
+ void __iomem *ipipe_base = vpfe_dev->vpfe_ipipe.base_addr;
+ void __iomem *rsz_base = vpfe_dev->vpfe_resizer.base_addr;
/* enable VPSS clock */
vpss_enable_clock(VPSS_IPIPE_CLOCK, 1);
@@ -315,7 +315,7 @@ int config_rsz_hw(struct vpfe_resizer_device *resizer,
}
static void
-rsz_set_y_address(void *__iomem rsz_base, unsigned int address,
+rsz_set_y_address(void __iomem *rsz_base, unsigned int address,
unsigned int offset)
{
u32 val;
@@ -330,7 +330,7 @@ rsz_set_y_address(void *__iomem rsz_base, unsigned int address,
}
static void
-rsz_set_c_address(void *__iomem rsz_base, unsigned int address,
+rsz_set_c_address(void __iomem *rsz_base, unsigned int address,
unsigned int offset)
{
u32 val;
@@ -352,7 +352,7 @@ rsz_set_c_address(void *__iomem rsz_base, unsigned int address,
* @address: the address to set
*/
int
-resizer_set_outaddr(void *__iomem rsz_base, struct resizer_params *params,
+resizer_set_outaddr(void __iomem *rsz_base, struct resizer_params *params,
int resize_no, unsigned int address)
{
struct resizer_scale_param *rsc_param;
@@ -411,7 +411,7 @@ resizer_set_outaddr(void *__iomem rsz_base, struct resizer_params *params,
}
void
-ipipe_set_lutdpc_regs(void *__iomem base_addr, void *__iomem isp5_base_addr,
+ipipe_set_lutdpc_regs(void __iomem *base_addr, void __iomem *isp5_base_addr,
struct vpfe_ipipe_lutdpc *dpc)
{
u32 max_tbl_size = LUT_DPC_MAX_SIZE >> 1;
@@ -446,7 +446,7 @@ ipipe_set_lutdpc_regs(void *__iomem base_addr, void *__iomem isp5_base_addr,
}
static void
-set_dpc_thresholds(void *__iomem base_addr,
+set_dpc_thresholds(void __iomem *base_addr,
struct vpfe_ipipe_otfdpc_2_0_cfg *dpc_thr)
{
regw_ip(base_addr, dpc_thr->corr_thr.r & OTFDPC_DPC2_THR_MASK,
@@ -467,7 +467,7 @@ set_dpc_thresholds(void *__iomem base_addr,
DPC_OTF_2D_THR_B);
}
-void ipipe_set_otfdpc_regs(void *__iomem base_addr,
+void ipipe_set_otfdpc_regs(void __iomem *base_addr,
struct vpfe_ipipe_otfdpc *otfdpc)
{
struct vpfe_ipipe_otfdpc_2_0_cfg *dpc_2_0 = &otfdpc->alg_cfg.dpc_2_0;
@@ -523,7 +523,7 @@ void ipipe_set_otfdpc_regs(void *__iomem base_addr,
/* 2D Noise filter */
void
-ipipe_set_d2f_regs(void *__iomem base_addr, unsigned int id,
+ipipe_set_d2f_regs(void __iomem *base_addr, unsigned int id,
struct vpfe_ipipe_nf *noise_filter)
{
@@ -571,7 +571,7 @@ ipipe_set_d2f_regs(void *__iomem base_addr, unsigned int id,
(((decimal & 0x1f) | ((integer & 0x7) << 5)))
/* Green Imbalance Correction */
-void ipipe_set_gic_regs(void *__iomem base_addr, struct vpfe_ipipe_gic *gic)
+void ipipe_set_gic_regs(void __iomem *base_addr, struct vpfe_ipipe_gic *gic)
{
u32 val;
@@ -609,7 +609,7 @@ void ipipe_set_gic_regs(void *__iomem base_addr, struct vpfe_ipipe_gic *gic)
#define IPIPE_U13Q9(decimal, integer) \
(((decimal & 0x1ff) | ((integer & 0xf) << 9)))
/* White balance */
-void ipipe_set_wb_regs(void *__iomem base_addr, struct vpfe_ipipe_wb *wb)
+void ipipe_set_wb_regs(void __iomem *base_addr, struct vpfe_ipipe_wb *wb)
{
u32 val;
@@ -635,7 +635,7 @@ void ipipe_set_wb_regs(void *__iomem base_addr, struct vpfe_ipipe_wb *wb)
}
/* CFA */
-void ipipe_set_cfa_regs(void *__iomem base_addr, struct vpfe_ipipe_cfa *cfa)
+void ipipe_set_cfa_regs(void __iomem *base_addr, struct vpfe_ipipe_cfa *cfa)
{
ipipe_clock_enable(base_addr);
@@ -671,7 +671,7 @@ void ipipe_set_cfa_regs(void *__iomem base_addr, struct vpfe_ipipe_cfa *cfa)
}
void
-ipipe_set_rgb2rgb_regs(void *__iomem base_addr, unsigned int id,
+ipipe_set_rgb2rgb_regs(void __iomem *base_addr, unsigned int id,
struct vpfe_ipipe_rgb2rgb *rgb)
{
u32 offset_mask = RGB2RGB_1_OFST_MASK;
@@ -724,7 +724,7 @@ ipipe_set_rgb2rgb_regs(void *__iomem base_addr, unsigned int id,
}
static void
-ipipe_update_gamma_tbl(void *__iomem isp5_base_addr,
+ipipe_update_gamma_tbl(void __iomem *isp5_base_addr,
struct vpfe_ipipe_gamma_entry *table, int size, u32 addr)
{
int count;
@@ -738,7 +738,7 @@ ipipe_update_gamma_tbl(void *__iomem isp5_base_addr,
}
void
-ipipe_set_gamma_regs(void *__iomem base_addr, void *__iomem isp5_base_addr,
+ipipe_set_gamma_regs(void __iomem *base_addr, void __iomem *isp5_base_addr,
struct vpfe_ipipe_gamma *gamma)
{
int table_size;
@@ -770,7 +770,7 @@ ipipe_set_gamma_regs(void *__iomem base_addr, void *__iomem isp5_base_addr,
}
void
-ipipe_set_3d_lut_regs(void *__iomem base_addr, void *__iomem isp5_base_addr,
+ipipe_set_3d_lut_regs(void __iomem *base_addr, void __iomem *isp5_base_addr,
struct vpfe_ipipe_3d_lut *lut_3d)
{
struct vpfe_ipipe_3d_lut_entry *tbl;
@@ -819,7 +819,7 @@ ipipe_set_3d_lut_regs(void *__iomem base_addr, void *__iomem isp5_base_addr,
/* Lumina adjustments */
void
-ipipe_set_lum_adj_regs(void *__iomem base_addr, struct ipipe_lum_adj *lum_adj)
+ipipe_set_lum_adj_regs(void __iomem *base_addr, struct ipipe_lum_adj *lum_adj)
{
u32 val;
@@ -834,7 +834,7 @@ ipipe_set_lum_adj_regs(void *__iomem base_addr, struct ipipe_lum_adj *lum_adj)
#define IPIPE_S12Q8(decimal, integer) \
(((decimal & 0xff) | ((integer & 0xf) << 8)))
-void ipipe_set_rgb2ycbcr_regs(void *__iomem base_addr,
+void ipipe_set_rgb2ycbcr_regs(void __iomem *base_addr,
struct vpfe_ipipe_rgb2yuv *yuv)
{
u32 val;
@@ -866,7 +866,7 @@ void ipipe_set_rgb2ycbcr_regs(void *__iomem base_addr,
/* YUV 422 conversion */
void
-ipipe_set_yuv422_conv_regs(void *__iomem base_addr,
+ipipe_set_yuv422_conv_regs(void __iomem *base_addr,
struct vpfe_ipipe_yuv422_conv *conv)
{
u32 val;
@@ -879,7 +879,7 @@ ipipe_set_yuv422_conv_regs(void *__iomem base_addr,
}
void
-ipipe_set_gbce_regs(void *__iomem base_addr, void *__iomem isp5_base_addr,
+ipipe_set_gbce_regs(void __iomem *base_addr, void __iomem *isp5_base_addr,
struct vpfe_ipipe_gbce *gbce)
{
unsigned int count;
@@ -906,7 +906,7 @@ ipipe_set_gbce_regs(void *__iomem base_addr, void *__iomem isp5_base_addr,
}
void
-ipipe_set_ee_regs(void *__iomem base_addr, void *__iomem isp5_base_addr,
+ipipe_set_ee_regs(void __iomem *base_addr, void __iomem *isp5_base_addr,
struct vpfe_ipipe_yee *ee)
{
unsigned int count;
@@ -950,7 +950,7 @@ ipipe_set_ee_regs(void *__iomem base_addr, void *__iomem isp5_base_addr,
}
/* Chromatic Artifact Correction. CAR */
-static void ipipe_set_mf(void *__iomem base_addr)
+static void ipipe_set_mf(void __iomem *base_addr)
{
/* typ to dynamic switch */
regw_ip(base_addr, VPFE_IPIPE_CAR_DYN_SWITCH, CAR_TYP);
@@ -959,7 +959,7 @@ static void ipipe_set_mf(void *__iomem base_addr)
}
static void
-ipipe_set_gain_ctrl(void *__iomem base_addr, struct vpfe_ipipe_car *car)
+ipipe_set_gain_ctrl(void __iomem *base_addr, struct vpfe_ipipe_car *car)
{
regw_ip(base_addr, VPFE_IPIPE_CAR_CHR_GAIN_CTRL, CAR_TYP);
regw_ip(base_addr, car->hpf, CAR_HPF_TYP);
@@ -975,7 +975,7 @@ ipipe_set_gain_ctrl(void *__iomem base_addr, struct vpfe_ipipe_car *car)
CAR_GN2_MIN);
}
-void ipipe_set_car_regs(void *__iomem base_addr, struct vpfe_ipipe_car *car)
+void ipipe_set_car_regs(void __iomem *base_addr, struct vpfe_ipipe_car *car)
{
u32 val;
@@ -1010,7 +1010,7 @@ void ipipe_set_car_regs(void *__iomem base_addr, struct vpfe_ipipe_car *car)
}
/* Chromatic Gain Suppression */
-void ipipe_set_cgs_regs(void *__iomem base_addr, struct vpfe_ipipe_cgs *cgs)
+void ipipe_set_cgs_regs(void __iomem *base_addr, struct vpfe_ipipe_cgs *cgs)
{
ipipe_clock_enable(base_addr);
regw_ip(base_addr, cgs->en, CGS_EN);
@@ -1025,12 +1025,12 @@ void ipipe_set_cgs_regs(void *__iomem base_addr, struct vpfe_ipipe_cgs *cgs)
regw_ip(base_addr, cgs->h_min, CGS_GN1_H_MIN);
}
-void rsz_src_enable(void *__iomem rsz_base, int enable)
+void rsz_src_enable(void __iomem *rsz_base, int enable)
{
regw_rsz(rsz_base, enable, RSZ_SRC_EN);
}
-int rsz_enable(void *__iomem rsz_base, int rsz_id, int enable)
+int rsz_enable(void __iomem *rsz_base, int rsz_id, int enable)
{
if (rsz_id == RSZ_A) {
regw_rsz(rsz_base, enable, RSZ_EN_A);
diff --git a/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.h b/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.h
index 81176fb9d16..2bf2f7a6917 100644
--- a/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.h
+++ b/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.h
@@ -490,29 +490,29 @@
#define RSZ_RGB_TYP_SHIFT 0
#define RSZ_RGB_ALPHA_MASK 0xff
-static inline u32 regr_ip(void *__iomem addr, u32 offset)
+static inline u32 regr_ip(void __iomem *addr, u32 offset)
{
return readl(addr + offset);
}
-static inline void regw_ip(void *__iomem addr, u32 val, u32 offset)
+static inline void regw_ip(void __iomem *addr, u32 val, u32 offset)
{
writel(val, addr + offset);
}
-static inline u32 w_ip_table(void *__iomem addr, u32 val, u32 offset)
+static inline u32 w_ip_table(void __iomem *addr, u32 val, u32 offset)
{
writel(val, addr + offset);
return val;
}
-static inline u32 regr_rsz(void *__iomem addr, u32 offset)
+static inline u32 regr_rsz(void __iomem *addr, u32 offset)
{
return readl(addr + offset);
}
-static inline u32 regw_rsz(void *__iomem addr, u32 val, u32 offset)
+static inline u32 regw_rsz(void __iomem *addr, u32 val, u32 offset)
{
writel(val, addr + offset);
@@ -520,39 +520,39 @@ static inline u32 regw_rsz(void *__iomem addr, u32 val, u32 offset)
}
int config_ipipe_hw(struct vpfe_ipipe_device *ipipe);
-int resizer_set_outaddr(void *__iomem rsz_base, struct resizer_params *params,
+int resizer_set_outaddr(void __iomem *rsz_base, struct resizer_params *params,
int resize_no, unsigned int address);
-int rsz_enable(void *__iomem rsz_base, int rsz_id, int enable);
-void rsz_src_enable(void *__iomem rsz_base, int enable);
+int rsz_enable(void __iomem *rsz_base, int rsz_id, int enable);
+void rsz_src_enable(void __iomem *rsz_base, int enable);
void rsz_set_in_pix_format(unsigned char y_c);
int config_rsz_hw(struct vpfe_resizer_device *resizer,
struct resizer_params *config);
-void ipipe_set_d2f_regs(void *__iomem base_addr, unsigned int id,
+void ipipe_set_d2f_regs(void __iomem *base_addr, unsigned int id,
struct vpfe_ipipe_nf *noise_filter);
-void ipipe_set_rgb2rgb_regs(void *__iomem base_addr, unsigned int id,
+void ipipe_set_rgb2rgb_regs(void __iomem *base_addr, unsigned int id,
struct vpfe_ipipe_rgb2rgb *rgb);
-void ipipe_set_yuv422_conv_regs(void *__iomem base_addr,
+void ipipe_set_yuv422_conv_regs(void __iomem *base_addr,
struct vpfe_ipipe_yuv422_conv *conv);
-void ipipe_set_lum_adj_regs(void *__iomem base_addr,
+void ipipe_set_lum_adj_regs(void __iomem *base_addr,
struct ipipe_lum_adj *lum_adj);
-void ipipe_set_rgb2ycbcr_regs(void *__iomem base_addr,
+void ipipe_set_rgb2ycbcr_regs(void __iomem *base_addr,
struct vpfe_ipipe_rgb2yuv *yuv);
-void ipipe_set_lutdpc_regs(void *__iomem base_addr,
- void *__iomem isp5_base_addr, struct vpfe_ipipe_lutdpc *lutdpc);
-void ipipe_set_otfdpc_regs(void *__iomem base_addr,
+void ipipe_set_lutdpc_regs(void __iomem *base_addr,
+ void __iomem *isp5_base_addr, struct vpfe_ipipe_lutdpc *lutdpc);
+void ipipe_set_otfdpc_regs(void __iomem *base_addr,
struct vpfe_ipipe_otfdpc *otfdpc);
-void ipipe_set_3d_lut_regs(void *__iomem base_addr,
- void *__iomem isp5_base_addr, struct vpfe_ipipe_3d_lut *lut_3d);
-void ipipe_set_gamma_regs(void *__iomem base_addr,
- void *__iomem isp5_base_addr, struct vpfe_ipipe_gamma *gamma);
-void ipipe_set_ee_regs(void *__iomem base_addr,
- void *__iomem isp5_base_addr, struct vpfe_ipipe_yee *ee);
-void ipipe_set_gbce_regs(void *__iomem base_addr,
- void *__iomem isp5_base_addr, struct vpfe_ipipe_gbce *gbce);
-void ipipe_set_gic_regs(void *__iomem base_addr, struct vpfe_ipipe_gic *gic);
-void ipipe_set_cfa_regs(void *__iomem base_addr, struct vpfe_ipipe_cfa *cfa);
-void ipipe_set_car_regs(void *__iomem base_addr, struct vpfe_ipipe_car *car);
-void ipipe_set_cgs_regs(void *__iomem base_addr, struct vpfe_ipipe_cgs *cgs);
-void ipipe_set_wb_regs(void *__iomem base_addr, struct vpfe_ipipe_wb *wb);
+void ipipe_set_3d_lut_regs(void __iomem *base_addr,
+ void __iomem *isp5_base_addr, struct vpfe_ipipe_3d_lut *lut_3d);
+void ipipe_set_gamma_regs(void __iomem *base_addr,
+ void __iomem *isp5_base_addr, struct vpfe_ipipe_gamma *gamma);
+void ipipe_set_ee_regs(void __iomem *base_addr,
+ void __iomem *isp5_base_addr, struct vpfe_ipipe_yee *ee);
+void ipipe_set_gbce_regs(void __iomem *base_addr,
+ void __iomem *isp5_base_addr, struct vpfe_ipipe_gbce *gbce);
+void ipipe_set_gic_regs(void __iomem *base_addr, struct vpfe_ipipe_gic *gic);
+void ipipe_set_cfa_regs(void __iomem *base_addr, struct vpfe_ipipe_cfa *cfa);
+void ipipe_set_car_regs(void __iomem *base_addr, struct vpfe_ipipe_car *car);
+void ipipe_set_cgs_regs(void __iomem *base_addr, struct vpfe_ipipe_cgs *cgs);
+void ipipe_set_wb_regs(void __iomem *base_addr, struct vpfe_ipipe_wb *wb);
#endif /* _DAVINCI_VPFE_DM365_IPIPE_HW_H */
diff --git a/drivers/staging/media/davinci_vpfe/dm365_ipipeif.c b/drivers/staging/media/davinci_vpfe/dm365_ipipeif.c
index a86f16ff581..87d42e18377 100644
--- a/drivers/staging/media/davinci_vpfe/dm365_ipipeif.c
+++ b/drivers/staging/media/davinci_vpfe/dm365_ipipeif.c
@@ -421,7 +421,7 @@ static int
ipipeif_get_config(struct v4l2_subdev *sd, void __user *arg)
{
struct vpfe_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
- struct ipipeif_params *config = (struct ipipeif_params *)arg;
+ struct ipipeif_params *config = arg;
struct device *dev = ipipeif->subdev.v4l2_dev->dev;
if (!arg) {
@@ -462,7 +462,7 @@ ipipeif_get_config(struct v4l2_subdev *sd, void __user *arg)
static long ipipeif_ioctl(struct v4l2_subdev *sd,
unsigned int cmd, void *arg)
{
- struct ipipeif_params *config = (struct ipipeif_params *)arg;
+ struct ipipeif_params *config = arg;
int ret = -ENOIOCTLCMD;
switch (cmd) {
diff --git a/drivers/staging/media/davinci_vpfe/dm365_ipipeif.h b/drivers/staging/media/davinci_vpfe/dm365_ipipeif.h
index 608701fc5fe..cea3d61335a 100644
--- a/drivers/staging/media/davinci_vpfe/dm365_ipipeif.h
+++ b/drivers/staging/media/davinci_vpfe/dm365_ipipeif.h
@@ -134,7 +134,7 @@ struct vpfe_ipipeif_device {
unsigned int output;
struct vpfe_video_device video_in;
struct v4l2_ctrl_handler ctrls;
- void *__iomem ipipeif_base_addr;
+ void __iomem *ipipeif_base_addr;
struct ipipeif_params config;
int dpcm_predictor;
int gain;
diff --git a/drivers/staging/media/davinci_vpfe/dm365_isif.c b/drivers/staging/media/davinci_vpfe/dm365_isif.c
index fa26f63831b..0ba0bf2c1cf 100644
--- a/drivers/staging/media/davinci_vpfe/dm365_isif.c
+++ b/drivers/staging/media/davinci_vpfe/dm365_isif.c
@@ -70,17 +70,17 @@ static const u32 isif_srggb_pattern =
ISIF_COLPTN_Gb_G << ISIF_CCOLP_CP15_4 |
ISIF_COLPTN_B_Mg << ISIF_CCOLP_CP17_6;
-static inline u32 isif_read(void *__iomem base_addr, u32 offset)
+static inline u32 isif_read(void __iomem *base_addr, u32 offset)
{
return readl(base_addr + offset);
}
-static inline void isif_write(void *__iomem base_addr, u32 val, u32 offset)
+static inline void isif_write(void __iomem *base_addr, u32 val, u32 offset)
{
writel(val, base_addr + offset);
}
-static inline u32 isif_merge(void *__iomem base_addr, u32 mask, u32 val,
+static inline u32 isif_merge(void __iomem *base_addr, u32 mask, u32 val,
u32 offset)
{
u32 new_val = (isif_read(base_addr, offset) & ~mask) | (val & mask);
@@ -646,7 +646,7 @@ static void isif_config_gain_offset(struct vpfe_isif_device *isif)
{
struct vpfe_isif_gain_offsets_adj *gain_off_ptr =
&isif->isif_cfg.bayer.config_params.gain_offset;
- void *__iomem base = isif->isif_cfg.base_addr;
+ void __iomem *base = isif->isif_cfg.base_addr;
u32 val;
val = ((gain_off_ptr->gain_sdram_en & 1) << GAIN_SDRAM_EN_SHIFT) |
@@ -1602,6 +1602,7 @@ isif_pad_get_crop(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
if (crop->which == V4L2_SUBDEV_FORMAT_TRY) {
struct v4l2_rect *rect;
+
rect = v4l2_subdev_get_try_crop(fh, ISIF_PAD_SINK);
memcpy(&crop->rect, rect, sizeof(*rect));
} else {
@@ -1991,7 +1992,7 @@ int vpfe_isif_init(struct vpfe_isif_device *isif, struct platform_device *pdev)
struct media_entity *me = &sd->entity;
static resource_size_t res_len;
struct resource *res;
- void *__iomem addr;
+ void __iomem *addr;
int status;
int i = 0;
diff --git a/drivers/staging/media/davinci_vpfe/dm365_isif.h b/drivers/staging/media/davinci_vpfe/dm365_isif.h
index 473fd2cfe35..89e814e9c0d 100644
--- a/drivers/staging/media/davinci_vpfe/dm365_isif.h
+++ b/drivers/staging/media/davinci_vpfe/dm365_isif.h
@@ -159,9 +159,9 @@ struct isif_oper_config {
struct isif_params_raw bayer;
enum isif_data_pack data_pack;
struct isif_gain_values isif_gain_params;
- void *__iomem base_addr;
- void *__iomem linear_tbl0_addr;
- void *__iomem linear_tbl1_addr;
+ void __iomem *base_addr;
+ void __iomem *linear_tbl0_addr;
+ void __iomem *linear_tbl1_addr;
};
#define ISIF_PAD_SINK 0
diff --git a/drivers/staging/media/davinci_vpfe/dm365_resizer.c b/drivers/staging/media/davinci_vpfe/dm365_resizer.c
index e0b29c8ca22..75e70e14b72 100644
--- a/drivers/staging/media/davinci_vpfe/dm365_resizer.c
+++ b/drivers/staging/media/davinci_vpfe/dm365_resizer.c
@@ -149,7 +149,7 @@ configure_resizer_out_params(struct vpfe_resizer_device *resizer, int index,
param->rsz_en[index] = DISABLE;
return;
}
- output = (struct vpfe_rsz_output_spec *)output_spec;
+ output = output_spec;
param->rsz_en[index] = ENABLE;
if (partial) {
param->rsz_rsc_param[index].h_flip = output->h_flip;
@@ -633,7 +633,7 @@ resizer_calculate_normal_f_div_param(struct device *dev, int input_width,
if (!(val % 2)) {
h1 = val;
} else {
- val = (input_width << 7);
+ val = input_width << 7;
val -= rsz >> 1;
val /= rsz << 1;
val <<= 1;
@@ -1218,12 +1218,12 @@ static long resizer_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
switch (cmd) {
case VIDIOC_VPFE_RSZ_S_CONFIG:
- user_config = (struct vpfe_rsz_config *)arg;
+ user_config = arg;
ret = resizer_set_configuration(resizer, user_config);
break;
case VIDIOC_VPFE_RSZ_G_CONFIG:
- user_config = (struct vpfe_rsz_config *)arg;
+ user_config = arg;
if (!user_config->config) {
dev_err(dev, "error in VIDIOC_VPFE_RSZ_G_CONFIG\n");
return -EINVAL;
diff --git a/drivers/staging/media/davinci_vpfe/dm365_resizer.h b/drivers/staging/media/davinci_vpfe/dm365_resizer.h
index 59a79422b91..93b0f44030a 100644
--- a/drivers/staging/media/davinci_vpfe/dm365_resizer.h
+++ b/drivers/staging/media/davinci_vpfe/dm365_resizer.h
@@ -228,7 +228,7 @@ struct vpfe_resizer_device {
struct dm365_resizer_device resizer_a;
struct dm365_resizer_device resizer_b;
struct resizer_params config;
- void *__iomem base_addr;
+ void __iomem *base_addr;
};
int vpfe_resizer_init(struct vpfe_resizer_device *vpfe_rsz,
diff --git a/drivers/staging/media/lirc/lirc_bt829.c b/drivers/staging/media/lirc/lirc_bt829.c
index 4c806ba4132..44f56554717 100644
--- a/drivers/staging/media/lirc/lirc_bt829.c
+++ b/drivers/staging/media/lirc/lirc_bt829.c
@@ -56,11 +56,6 @@ static unsigned char do_get_bits(void);
#define DRIVER_NAME "lirc_bt829"
static bool debug;
-#define dprintk(fmt, args...) \
- do { \
- if (debug) \
- printk(KERN_DEBUG DRIVER_NAME ": "fmt, ## args); \
- } while (0)
static int atir_minor;
static phys_addr_t pci_addr_phys;
@@ -101,7 +96,7 @@ static int atir_add_to_buf(void *data, struct lirc_buffer *buf)
status = poll_main();
key = (status >> 8) & 0xFF;
if (status & 0xFF) {
- dprintk("reading key %02X\n", key);
+ dev_dbg(atir_driver.dev, "reading key %02X\n", key);
lirc_buffer_write(buf, &key);
return 0;
}
@@ -110,13 +105,13 @@ static int atir_add_to_buf(void *data, struct lirc_buffer *buf)
static int atir_set_use_inc(void *data)
{
- dprintk("driver is opened\n");
+ dev_dbg(atir_driver.dev, "driver is opened\n");
return 0;
}
static void atir_set_use_dec(void *data)
{
- dprintk("driver is closed\n");
+ dev_dbg(atir_driver.dev, "driver is closed\n");
}
int init_module(void)
@@ -154,7 +149,8 @@ int init_module(void)
rc = atir_minor;
goto err_unmap;
}
- dprintk("driver is registered on minor %d\n", atir_minor);
+ dev_dbg(atir_driver.dev, "driver is registered on minor %d\n",
+ atir_minor);
return 0;
diff --git a/drivers/staging/media/lirc/lirc_imon.c b/drivers/staging/media/lirc/lirc_imon.c
index 232edd5b174..9ce7d9990e3 100644
--- a/drivers/staging/media/lirc/lirc_imon.c
+++ b/drivers/staging/media/lirc/lirc_imon.c
@@ -495,7 +495,7 @@ static int ir_open(void *data)
/* prevent races with disconnect */
mutex_lock(&driver_lock);
- context = (struct imon_context *)data;
+ context = data;
/* initial IR protocol decode variables */
context->rx.count = 0;
@@ -516,7 +516,7 @@ static void ir_close(void *data)
{
struct imon_context *context;
- context = (struct imon_context *)data;
+ context = data;
if (!context) {
pr_err("%s: no context for device\n", __func__);
return;
@@ -572,29 +572,6 @@ static void submit_data(struct imon_context *context)
wake_up(&context->driver->rbuf->wait_poll);
}
-static inline int tv2int(const struct timeval *a, const struct timeval *b)
-{
- int usecs = 0;
- int sec = 0;
-
- if (b->tv_usec > a->tv_usec) {
- usecs = 1000000;
- sec--;
- }
-
- usecs += a->tv_usec - b->tv_usec;
-
- sec += a->tv_sec - b->tv_sec;
- sec *= 1000;
- usecs /= 1000;
- sec += usecs;
-
- if (sec < 0)
- sec = 1000;
-
- return sec;
-}
-
/**
* Process the incoming packet
*/
diff --git a/drivers/staging/media/lirc/lirc_sasem.c b/drivers/staging/media/lirc/lirc_sasem.c
index 2f0463eb988..4a268200cbf 100644
--- a/drivers/staging/media/lirc/lirc_sasem.c
+++ b/drivers/staging/media/lirc/lirc_sasem.c
@@ -488,7 +488,7 @@ static int ir_open(void *data)
/* prevent races with disconnect */
mutex_lock(&disconnect_lock);
- context = (struct sasem_context *) data;
+ context = data;
mutex_lock(&context->ctx_lock);
@@ -530,7 +530,7 @@ static void ir_close(void *data)
{
struct sasem_context *context;
- context = (struct sasem_context *)data;
+ context = data;
if (!context) {
pr_err("%s: no context for device\n", __func__);
return;
diff --git a/drivers/staging/media/lirc/lirc_sir.c b/drivers/staging/media/lirc/lirc_sir.c
index e961b521821..39f4733fb1e 100644
--- a/drivers/staging/media/lirc/lirc_sir.c
+++ b/drivers/staging/media/lirc/lirc_sir.c
@@ -140,12 +140,6 @@ static int rx_buf[RBUF_LEN];
static unsigned int rx_tail, rx_head;
static bool debug;
-#define dprintk(fmt, args...) \
- do { \
- if (debug) \
- printk(KERN_DEBUG LIRC_DRIVER_NAME ": " \
- fmt, ## args); \
- } while (0)
/* SECTION: Prototypes */
@@ -322,7 +316,7 @@ static void add_read_queue(int flag, unsigned long val)
unsigned int new_rx_tail;
int newval;
- dprintk("add flag %d with val %lu\n", flag, val);
+ pr_debug("add flag %d with val %lu\n", flag, val);
newval = val & PULSE_MASK;
@@ -342,7 +336,7 @@ static void add_read_queue(int flag, unsigned long val)
}
new_rx_tail = (rx_tail + 1) & (RBUF_LEN - 1);
if (new_rx_tail == rx_head) {
- dprintk("Buffer overrun.\n");
+ pr_debug("Buffer overrun.\n");
return;
}
rx_buf[rx_tail] = newval;
@@ -439,7 +433,8 @@ static void sir_timeout(unsigned long data)
outb(UART_FCR_CLEAR_RCVR, io + UART_FCR);
/* determine 'virtual' pulse end: */
pulse_end = delta(&last_tv, &last_intr_tv);
- dprintk("timeout add %d for %lu usec\n", last_value, pulse_end);
+ dev_dbg(driver.dev, "timeout add %d for %lu usec\n",
+ last_value, pulse_end);
add_read_queue(last_value, pulse_end);
last_value = 0;
last_tv = last_intr_tv;
@@ -479,14 +474,15 @@ static irqreturn_t sir_interrupt(int irq, void *dev_id)
do_gettimeofday(&curr_tv);
deltv = delta(&last_tv, &curr_tv);
deltintrtv = delta(&last_intr_tv, &curr_tv);
- dprintk("t %lu, d %d\n", deltintrtv, (int)data);
+ dev_dbg(driver.dev, "t %lu, d %d\n",
+ deltintrtv, (int)data);
/*
* if nothing came in last X cycles,
* it was gap
*/
if (deltintrtv > TIME_CONST * threshold) {
if (last_value) {
- dprintk("GAP\n");
+ dev_dbg(driver.dev, "GAP\n");
/* simulate signal change */
add_read_queue(last_value,
deltv -
diff --git a/drivers/staging/media/lirc/lirc_zilog.c b/drivers/staging/media/lirc/lirc_zilog.c
index 1e15d2cab72..cc872fb4ca6 100644
--- a/drivers/staging/media/lirc/lirc_zilog.c
+++ b/drivers/staging/media/lirc/lirc_zilog.c
@@ -152,23 +152,12 @@ struct tx_data_struct {
static struct tx_data_struct *tx_data;
static struct mutex tx_data_lock;
-#define zilog_notify(s, args...) printk(KERN_NOTICE KBUILD_MODNAME ": " s, \
- ## args)
-#define zilog_error(s, args...) printk(KERN_ERR KBUILD_MODNAME ": " s, ## args)
-#define zilog_info(s, args...) printk(KERN_INFO KBUILD_MODNAME ": " s, ## args)
/* module parameters */
static bool debug; /* debug output */
static bool tx_only; /* only handle the IR Tx function */
static int minor = -1; /* minor number */
-#define dprintk(fmt, args...) \
- do { \
- if (debug) \
- printk(KERN_DEBUG KBUILD_MODNAME ": " fmt, \
- ## args); \
- } while (0)
-
/* struct IR reference counting */
static struct IR *get_ir_device(struct IR *ir, bool ir_devices_lock_held)
@@ -333,7 +322,7 @@ static int add_to_buf(struct IR *ir)
struct IR_tx *tx;
if (lirc_buffer_full(rbuf)) {
- dprintk("buffer overflow\n");
+ dev_dbg(ir->l.dev, "buffer overflow\n");
return -EOVERFLOW;
}
@@ -379,16 +368,17 @@ static int add_to_buf(struct IR *ir)
*/
ret = i2c_master_send(rx->c, sendbuf, 1);
if (ret != 1) {
- zilog_error("i2c_master_send failed with %d\n", ret);
+ dev_err(ir->l.dev, "i2c_master_send failed with %d\n",
+ ret);
if (failures >= 3) {
mutex_unlock(&ir->ir_lock);
- zilog_error("unable to read from the IR chip "
+ dev_err(ir->l.dev, "unable to read from the IR chip "
"after 3 resets, giving up\n");
break;
}
/* Looks like the chip crashed, reset it */
- zilog_error("polling the IR receiver chip failed, "
+ dev_err(ir->l.dev, "polling the IR receiver chip failed, "
"trying reset\n");
set_current_state(TASK_UNINTERRUPTIBLE);
@@ -415,13 +405,14 @@ static int add_to_buf(struct IR *ir)
ret = i2c_master_recv(rx->c, keybuf, sizeof(keybuf));
mutex_unlock(&ir->ir_lock);
if (ret != sizeof(keybuf)) {
- zilog_error("i2c_master_recv failed with %d -- "
+ dev_err(ir->l.dev, "i2c_master_recv failed with %d -- "
"keeping last read buffer\n", ret);
} else {
rx->b[0] = keybuf[3];
rx->b[1] = keybuf[4];
rx->b[2] = keybuf[5];
- dprintk("key (0x%02x/0x%02x)\n", rx->b[0], rx->b[1]);
+ dev_dbg(ir->l.dev, "key (0x%02x/0x%02x)\n",
+ rx->b[0], rx->b[1]);
}
/* key pressed ? */
@@ -472,7 +463,7 @@ static int lirc_thread(void *arg)
struct IR *ir = arg;
struct lirc_buffer *rbuf = ir->l.rbuf;
- dprintk("poll thread started\n");
+ dev_dbg(ir->l.dev, "poll thread started\n");
while (!kthread_should_stop()) {
set_current_state(TASK_INTERRUPTIBLE);
@@ -500,7 +491,7 @@ static int lirc_thread(void *arg)
wake_up_interruptible(&rbuf->wait_poll);
}
- dprintk("poll thread ended\n");
+ dev_dbg(ir->l.dev, "poll thread ended\n");
return 0;
}
@@ -644,7 +635,7 @@ static int get_key_data(unsigned char *buf,
return -EPROTO;
corrupt:
- zilog_error("firmware is corrupt\n");
+ pr_err("firmware is corrupt\n");
return -EFAULT;
}
@@ -662,10 +653,11 @@ static int send_data_block(struct IR_tx *tx, unsigned char *data_block)
buf[0] = (unsigned char)(i + 1);
for (j = 0; j < tosend; ++j)
buf[1 + j] = data_block[i + j];
- dprintk("%*ph", 5, buf);
+ dev_dbg(tx->ir->l.dev, "%*ph", 5, buf);
ret = i2c_master_send(tx->c, buf, tosend + 1);
if (ret != tosend + 1) {
- zilog_error("i2c_master_send failed with %d\n", ret);
+ dev_err(tx->ir->l.dev, "i2c_master_send failed with %d\n",
+ ret);
return ret < 0 ? ret : -EFAULT;
}
i += tosend;
@@ -689,7 +681,7 @@ static int send_boot_data(struct IR_tx *tx)
buf[1] = 0x20;
ret = i2c_master_send(tx->c, buf, 2);
if (ret != 2) {
- zilog_error("i2c_master_send failed with %d\n", ret);
+ dev_err(tx->ir->l.dev, "i2c_master_send failed with %d\n", ret);
return ret < 0 ? ret : -EFAULT;
}
@@ -706,21 +698,22 @@ static int send_boot_data(struct IR_tx *tx)
}
if (ret != 1) {
- zilog_error("i2c_master_send failed with %d\n", ret);
+ dev_err(tx->ir->l.dev, "i2c_master_send failed with %d\n", ret);
return ret < 0 ? ret : -EFAULT;
}
/* Here comes the firmware version... (hopefully) */
ret = i2c_master_recv(tx->c, buf, 4);
if (ret != 4) {
- zilog_error("i2c_master_recv failed with %d\n", ret);
+ dev_err(tx->ir->l.dev, "i2c_master_recv failed with %d\n", ret);
return 0;
}
if ((buf[0] != 0x80) && (buf[0] != 0xa0)) {
- zilog_error("unexpected IR TX init response: %02x\n", buf[0]);
+ dev_err(tx->ir->l.dev, "unexpected IR TX init response: %02x\n",
+ buf[0]);
return 0;
}
- zilog_notify("Zilog/Hauppauge IR blaster firmware version "
+ dev_notice(tx->ir->l.dev, "Zilog/Hauppauge IR blaster firmware version "
"%d.%d.%d loaded\n", buf[1], buf[2], buf[3]);
return 0;
@@ -736,7 +729,7 @@ static void fw_unload_locked(void)
vfree(tx_data);
tx_data = NULL;
- dprintk("successfully unloaded IR blaster firmware\n");
+ pr_debug("successfully unloaded IR blaster firmware\n");
}
}
@@ -766,17 +759,16 @@ static int fw_load(struct IR_tx *tx)
/* Request codeset data file */
ret = request_firmware(&fw_entry, "haup-ir-blaster.bin", tx->ir->l.dev);
if (ret != 0) {
- zilog_error("firmware haup-ir-blaster.bin not available (%d)\n",
+ dev_err(tx->ir->l.dev, "firmware haup-ir-blaster.bin not available (%d)\n",
ret);
ret = ret < 0 ? ret : -EFAULT;
goto out;
}
- dprintk("firmware of size %zu loaded\n", fw_entry->size);
+ dev_dbg(tx->ir->l.dev, "firmware of size %zu loaded\n", fw_entry->size);
/* Parse the file */
tx_data = vmalloc(sizeof(*tx_data));
if (tx_data == NULL) {
- zilog_error("out of memory\n");
release_firmware(fw_entry);
ret = -ENOMEM;
goto out;
@@ -786,7 +778,6 @@ static int fw_load(struct IR_tx *tx)
/* Copy the data so hotplug doesn't get confused and timeout */
tx_data->datap = vmalloc(fw_entry->size);
if (tx_data->datap == NULL) {
- zilog_error("out of memory\n");
release_firmware(fw_entry);
vfree(tx_data);
ret = -ENOMEM;
@@ -801,7 +792,7 @@ static int fw_load(struct IR_tx *tx)
if (!read_uint8(&data, tx_data->endp, &version))
goto corrupt;
if (version != 1) {
- zilog_error("unsupported code set file version (%u, expected"
+ dev_err(tx->ir->l.dev, "unsupported code set file version (%u, expected"
"1) -- please upgrade to a newer driver",
version);
fw_unload_locked();
@@ -818,7 +809,8 @@ static int fw_load(struct IR_tx *tx)
&tx_data->num_code_sets))
goto corrupt;
- dprintk("%u IR blaster codesets loaded\n", tx_data->num_code_sets);
+ dev_dbg(tx->ir->l.dev, "%u IR blaster codesets loaded\n",
+ tx_data->num_code_sets);
tx_data->code_sets = vmalloc(
tx_data->num_code_sets * sizeof(char *));
@@ -882,7 +874,7 @@ static int fw_load(struct IR_tx *tx)
goto out;
corrupt:
- zilog_error("firmware is corrupt\n");
+ dev_err(tx->ir->l.dev, "firmware is corrupt\n");
fw_unload_locked();
ret = -EFAULT;
@@ -902,9 +894,9 @@ static ssize_t read(struct file *filep, char __user *outbuf, size_t n,
unsigned int m;
DECLARE_WAITQUEUE(wait, current);
- dprintk("read called\n");
+ dev_dbg(ir->l.dev, "read called\n");
if (n % rbuf->chunk_size) {
- dprintk("read result = -EINVAL\n");
+ dev_dbg(ir->l.dev, "read result = -EINVAL\n");
return -EINVAL;
}
@@ -948,7 +940,7 @@ static ssize_t read(struct file *filep, char __user *outbuf, size_t n,
unsigned char buf[MAX_XFER_SIZE];
if (rbuf->chunk_size > sizeof(buf)) {
- zilog_error("chunk_size is too big (%d)!\n",
+ dev_err(ir->l.dev, "chunk_size is too big (%d)!\n",
rbuf->chunk_size);
ret = -EINVAL;
break;
@@ -962,7 +954,7 @@ static ssize_t read(struct file *filep, char __user *outbuf, size_t n,
retries++;
}
if (retries >= 5) {
- zilog_error("Buffer read failed!\n");
+ dev_err(ir->l.dev, "Buffer read failed!\n");
ret = -EIO;
}
}
@@ -972,7 +964,8 @@ static ssize_t read(struct file *filep, char __user *outbuf, size_t n,
put_ir_rx(rx, false);
set_current_state(TASK_RUNNING);
- dprintk("read result = %d (%s)\n", ret, ret ? "Error" : "OK");
+ dev_dbg(ir->l.dev, "read result = %d (%s)\n",
+ ret, ret ? "Error" : "OK");
return ret ? ret : written;
}
@@ -988,7 +981,7 @@ static int send_code(struct IR_tx *tx, unsigned int code, unsigned int key)
ret = get_key_data(data_block, code, key);
if (ret == -EPROTO) {
- zilog_error("failed to get data for code %u, key %u -- check "
+ dev_err(tx->ir->l.dev, "failed to get data for code %u, key %u -- check "
"lircd.conf entries\n", code, key);
return ret;
} else if (ret != 0)
@@ -1004,7 +997,7 @@ static int send_code(struct IR_tx *tx, unsigned int code, unsigned int key)
buf[1] = 0x40;
ret = i2c_master_send(tx->c, buf, 2);
if (ret != 2) {
- zilog_error("i2c_master_send failed with %d\n", ret);
+ dev_err(tx->ir->l.dev, "i2c_master_send failed with %d\n", ret);
return ret < 0 ? ret : -EFAULT;
}
@@ -1017,18 +1010,18 @@ static int send_code(struct IR_tx *tx, unsigned int code, unsigned int key)
}
if (ret != 1) {
- zilog_error("i2c_master_send failed with %d\n", ret);
+ dev_err(tx->ir->l.dev, "i2c_master_send failed with %d\n", ret);
return ret < 0 ? ret : -EFAULT;
}
/* Send finished download? */
ret = i2c_master_recv(tx->c, buf, 1);
if (ret != 1) {
- zilog_error("i2c_master_recv failed with %d\n", ret);
+ dev_err(tx->ir->l.dev, "i2c_master_recv failed with %d\n", ret);
return ret < 0 ? ret : -EFAULT;
}
if (buf[0] != 0xA0) {
- zilog_error("unexpected IR TX response #1: %02x\n",
+ dev_err(tx->ir->l.dev, "unexpected IR TX response #1: %02x\n",
buf[0]);
return -EFAULT;
}
@@ -1038,7 +1031,7 @@ static int send_code(struct IR_tx *tx, unsigned int code, unsigned int key)
buf[1] = 0x80;
ret = i2c_master_send(tx->c, buf, 2);
if (ret != 2) {
- zilog_error("i2c_master_send failed with %d\n", ret);
+ dev_err(tx->ir->l.dev, "i2c_master_send failed with %d\n", ret);
return ret < 0 ? ret : -EFAULT;
}
@@ -1048,7 +1041,7 @@ static int send_code(struct IR_tx *tx, unsigned int code, unsigned int key)
* going to skip this whole mess and say we're done on the HD PVR
*/
if (!tx->post_tx_ready_poll) {
- dprintk("sent code %u, key %u\n", code, key);
+ dev_dbg(tx->ir->l.dev, "sent code %u, key %u\n", code, key);
return 0;
}
@@ -1064,11 +1057,11 @@ static int send_code(struct IR_tx *tx, unsigned int code, unsigned int key)
ret = i2c_master_send(tx->c, buf, 1);
if (ret == 1)
break;
- dprintk("NAK expected: i2c_master_send "
+ dev_dbg(tx->ir->l.dev, "NAK expected: i2c_master_send "
"failed with %d (try %d)\n", ret, i+1);
}
if (ret != 1) {
- zilog_error("IR TX chip never got ready: last i2c_master_send "
+ dev_err(tx->ir->l.dev, "IR TX chip never got ready: last i2c_master_send "
"failed with %d\n", ret);
return ret < 0 ? ret : -EFAULT;
}
@@ -1076,16 +1069,17 @@ static int send_code(struct IR_tx *tx, unsigned int code, unsigned int key)
/* Seems to be an 'ok' response */
i = i2c_master_recv(tx->c, buf, 1);
if (i != 1) {
- zilog_error("i2c_master_recv failed with %d\n", ret);
+ dev_err(tx->ir->l.dev, "i2c_master_recv failed with %d\n", ret);
return -EFAULT;
}
if (buf[0] != 0x80) {
- zilog_error("unexpected IR TX response #2: %02x\n", buf[0]);
+ dev_err(tx->ir->l.dev, "unexpected IR TX response #2: %02x\n",
+ buf[0]);
return -EFAULT;
}
/* Oh good, it worked */
- dprintk("sent code %u, key %u\n", code, key);
+ dev_dbg(tx->ir->l.dev, "sent code %u, key %u\n", code, key);
return 0;
}
@@ -1171,11 +1165,11 @@ static ssize_t write(struct file *filep, const char __user *buf, size_t n,
*/
if (ret != 0) {
/* Looks like the chip crashed, reset it */
- zilog_error("sending to the IR transmitter chip "
+ dev_err(tx->ir->l.dev, "sending to the IR transmitter chip "
"failed, trying reset\n");
if (failures >= 3) {
- zilog_error("unable to send to the IR chip "
+ dev_err(tx->ir->l.dev, "unable to send to the IR chip "
"after 3 resets, giving up\n");
mutex_unlock(&ir->ir_lock);
mutex_unlock(&tx->client_lock);
@@ -1210,7 +1204,7 @@ static unsigned int poll(struct file *filep, poll_table *wait)
struct lirc_buffer *rbuf = ir->l.rbuf;
unsigned int ret;
- dprintk("poll called\n");
+ dev_dbg(ir->l.dev, "poll called\n");
rx = get_ir_rx(ir);
if (rx == NULL) {
@@ -1218,7 +1212,7 @@ static unsigned int poll(struct file *filep, poll_table *wait)
* Revisit this, if our poll function ever reports writeable
* status for Tx
*/
- dprintk("poll result = POLLERR\n");
+ dev_dbg(ir->l.dev, "poll result = POLLERR\n");
return POLLERR;
}
@@ -1231,7 +1225,8 @@ static unsigned int poll(struct file *filep, poll_table *wait)
/* Indicate what ops could happen immediately without blocking */
ret = lirc_buffer_empty(rbuf) ? 0 : (POLLIN|POLLRDNORM);
- dprintk("poll result = %s\n", ret ? "POLLIN|POLLRDNORM" : "none");
+ dev_dbg(ir->l.dev, "poll result = %s\n",
+ ret ? "POLLIN|POLLRDNORM" : "none");
return ret;
}
@@ -1338,7 +1333,7 @@ static int close(struct inode *node, struct file *filep)
struct IR *ir = filep->private_data;
if (ir == NULL) {
- zilog_error("close: no private_data attached to the file!\n");
+ dev_err(ir->l.dev, "close: no private_data attached to the file!\n");
return -ENODEV;
}
@@ -1450,7 +1445,7 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
int ret;
bool tx_probe = false;
- dprintk("%s: %s on i2c-%d (%s), client addr=0x%02x\n",
+ dev_dbg(&client->dev, "%s: %s on i2c-%d (%s), client addr=0x%02x\n",
__func__, id->name, adap->nr, adap->name, client->addr);
/*
@@ -1463,7 +1458,7 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
else if (tx_only) /* module option */
return -ENXIO;
- zilog_info("probing IR %s on %s (i2c-%d)\n",
+ pr_info("probing IR %s on %s (i2c-%d)\n",
tx_probe ? "Tx" : "Rx", adap->name, adap->nr);
mutex_lock(&ir_devices_lock);
@@ -1545,7 +1540,7 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
/* Proceed only if the Rx client is also ready or not needed */
if (rx == NULL && !tx_only) {
- zilog_info("probe of IR Tx on %s (i2c-%d) done. Waiting"
+ dev_info(tx->ir->l.dev, "probe of IR Tx on %s (i2c-%d) done. Waiting"
" on IR Rx.\n", adap->name, adap->nr);
goto out_ok;
}
@@ -1584,7 +1579,7 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
"zilog-rx-i2c-%d", adap->nr);
if (IS_ERR(rx->task)) {
ret = PTR_ERR(rx->task);
- zilog_error("%s: could not start IR Rx polling thread"
+ dev_err(tx->ir->l.dev, "%s: could not start IR Rx polling thread"
"\n", __func__);
/* Failed kthread, so put back the ir ref */
put_ir_device(ir, true);
@@ -1597,7 +1592,7 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
/* Proceed only if the Tx client is also ready */
if (tx == NULL) {
- zilog_info("probe of IR Rx on %s (i2c-%d) done. Waiting"
+ pr_info("probe of IR Rx on %s (i2c-%d) done. Waiting"
" on IR Tx.\n", adap->name, adap->nr);
goto out_ok;
}
@@ -1607,12 +1602,12 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
ir->l.minor = minor; /* module option: user requested minor number */
ir->l.minor = lirc_register_driver(&ir->l);
if (ir->l.minor < 0 || ir->l.minor >= MAX_IRCTL_DEVICES) {
- zilog_error("%s: \"minor\" must be between 0 and %d (%d)!\n",
+ dev_err(tx->ir->l.dev, "%s: \"minor\" must be between 0 and %d (%d)!\n",
__func__, MAX_IRCTL_DEVICES-1, ir->l.minor);
ret = -EBADRQC;
goto out_put_xx;
}
- zilog_info("IR unit on %s (i2c-%d) registered as lirc%d and ready\n",
+ dev_info(ir->l.dev, "IR unit on %s (i2c-%d) registered as lirc%d and ready\n",
adap->name, adap->nr, ir->l.minor);
out_ok:
@@ -1621,7 +1616,7 @@ out_ok:
if (tx != NULL)
put_ir_tx(tx, true);
put_ir_device(ir, true);
- zilog_info("probe of IR %s on %s (i2c-%d) done\n",
+ dev_info(ir->l.dev, "probe of IR %s on %s (i2c-%d) done\n",
tx_probe ? "Tx" : "Rx", adap->name, adap->nr);
mutex_unlock(&ir_devices_lock);
return 0;
@@ -1634,7 +1629,7 @@ out_put_xx:
out_put_ir:
put_ir_device(ir, true);
out_no_ir:
- zilog_error("%s: probing IR %s on %s (i2c-%d) failed with %d\n",
+ dev_err(&client->dev, "%s: probing IR %s on %s (i2c-%d) failed with %d\n",
__func__, tx_probe ? "Tx" : "Rx", adap->name, adap->nr,
ret);
mutex_unlock(&ir_devices_lock);
@@ -1645,7 +1640,7 @@ static int __init zilog_init(void)
{
int ret;
- zilog_notify("Zilog/Hauppauge IR driver initializing\n");
+ pr_notice("Zilog/Hauppauge IR driver initializing\n");
mutex_init(&tx_data_lock);
@@ -1653,9 +1648,9 @@ static int __init zilog_init(void)
ret = i2c_add_driver(&driver);
if (ret)
- zilog_error("initialization failed\n");
+ pr_err("initialization failed\n");
else
- zilog_notify("initialization complete\n");
+ pr_notice("initialization complete\n");
return ret;
}
@@ -1665,7 +1660,7 @@ static void __exit zilog_exit(void)
i2c_del_driver(&driver);
/* if loaded */
fw_unload();
- zilog_notify("Zilog/Hauppauge IR driver unloaded\n");
+ pr_notice("Zilog/Hauppauge IR driver unloaded\n");
}
module_init(zilog_init);
diff --git a/drivers/staging/media/omap4iss/iss.c b/drivers/staging/media/omap4iss/iss.c
index 96b14b326e0..cc1dfadd91e 100644
--- a/drivers/staging/media/omap4iss/iss.c
+++ b/drivers/staging/media/omap4iss/iss.c
@@ -1357,10 +1357,8 @@ static int iss_probe(struct platform_device *pdev)
return -EINVAL;
iss = devm_kzalloc(&pdev->dev, sizeof(*iss), GFP_KERNEL);
- if (!iss) {
- dev_err(&pdev->dev, "Could not allocate memory\n");
+ if (!iss)
return -ENOMEM;
- }
mutex_init(&iss->iss_mutex);
diff --git a/drivers/staging/media/omap4iss/iss_csi2.c b/drivers/staging/media/omap4iss/iss_csi2.c
index 7dbf68cd356..21971c675b8 100644
--- a/drivers/staging/media/omap4iss/iss_csi2.c
+++ b/drivers/staging/media/omap4iss/iss_csi2.c
@@ -1231,7 +1231,7 @@ static int csi2_init_entities(struct iss_csi2_device *csi2, const char *subname)
v4l2_subdev_init(sd, &csi2_ops);
sd->internal_ops = &csi2_internal_ops;
- sprintf(name, "CSI2%s", subname);
+ snprintf(name, sizeof(name), "CSI2%s", subname);
snprintf(sd->name, sizeof(sd->name), "OMAP4 ISS %s", name);
sd->grp_id = 1 << 16; /* group ID for iss subdevs */
diff --git a/drivers/media/parport/Kconfig b/drivers/staging/media/parport/Kconfig
index 948c981d9f0..15974efdba1 100644
--- a/drivers/media/parport/Kconfig
+++ b/drivers/staging/media/parport/Kconfig
@@ -7,18 +7,22 @@ menuconfig MEDIA_PARPORT_SUPPORT
if MEDIA_PARPORT_SUPPORT
config VIDEO_BWQCAM
- tristate "Quickcam BW Video For Linux"
+ tristate "Quickcam BW Video For Linux (Deprecated)"
depends on PARPORT && VIDEO_V4L2
select VIDEOBUF2_VMALLOC
help
Say Y have if you the black and white version of the QuickCam
camera. See the next option for the color version.
+ This driver is deprecated and will be removed soon. If you have
+ hardware for this and you want to work on this driver, then contact
+ the linux-media mailinglist.
+
To compile this driver as a module, choose M here: the
module will be called bw-qcam.
config VIDEO_CQCAM
- tristate "QuickCam Colour Video For Linux"
+ tristate "QuickCam Colour Video For Linux (Deprecated)"
depends on PARPORT && VIDEO_V4L2
help
This is the video4linux driver for the colour version of the
@@ -28,18 +32,26 @@ config VIDEO_CQCAM
as a module (c-qcam).
Read <file:Documentation/video4linux/CQcam.txt> for more information.
+ This driver is deprecated and will be removed soon. If you have
+ hardware for this and you want to work on this driver, then contact
+ the linux-media mailinglist.
+
config VIDEO_PMS
- tristate "Mediavision Pro Movie Studio Video For Linux"
+ tristate "Mediavision Pro Movie Studio Video For Linux (Deprecated)"
depends on ISA && VIDEO_V4L2
help
Say Y if you have the ISA Mediavision Pro Movie Studio
capture card.
+ This driver is deprecated and will be removed soon. If you have
+ hardware for this and you want to work on this driver, then contact
+ the linux-media mailinglist.
+
To compile this driver as a module, choose M here: the
module will be called pms.
config VIDEO_W9966
- tristate "W9966CF Webcam (FlyCam Supra and others) Video For Linux"
+ tristate "W9966CF Webcam (FlyCam Supra and others) Video For Linux (Deprecated)"
depends on PARPORT_1284 && PARPORT && VIDEO_V4L2
help
Video4linux driver for Winbond's w9966 based Webcams.
@@ -50,4 +62,8 @@ config VIDEO_W9966
Check out <file:Documentation/video4linux/w9966.txt> for more
information.
+
+ This driver is deprecated and will be removed soon. If you have
+ hardware for this and you want to work on this driver, then contact
+ the linux-media mailinglist.
endif
diff --git a/drivers/media/parport/Makefile b/drivers/staging/media/parport/Makefile
index 4eea06d7af5..4eea06d7af5 100644
--- a/drivers/media/parport/Makefile
+++ b/drivers/staging/media/parport/Makefile
diff --git a/drivers/media/parport/bw-qcam.c b/drivers/staging/media/parport/bw-qcam.c
index 67b9da1dc43..67b9da1dc43 100644
--- a/drivers/media/parport/bw-qcam.c
+++ b/drivers/staging/media/parport/bw-qcam.c
diff --git a/drivers/media/parport/c-qcam.c b/drivers/staging/media/parport/c-qcam.c
index b9010bd3ed3..b9010bd3ed3 100644
--- a/drivers/media/parport/c-qcam.c
+++ b/drivers/staging/media/parport/c-qcam.c
diff --git a/drivers/media/parport/pms.c b/drivers/staging/media/parport/pms.c
index e6b497528ce..e6b497528ce 100644
--- a/drivers/media/parport/pms.c
+++ b/drivers/staging/media/parport/pms.c
diff --git a/drivers/media/parport/w9966.c b/drivers/staging/media/parport/w9966.c
index f7502f3a6a3..f7502f3a6a3 100644
--- a/drivers/media/parport/w9966.c
+++ b/drivers/staging/media/parport/w9966.c
diff --git a/drivers/media/usb/tlg2300/Kconfig b/drivers/staging/media/tlg2300/Kconfig
index 645d915267e..81784c6f7b8 100644
--- a/drivers/media/usb/tlg2300/Kconfig
+++ b/drivers/staging/media/tlg2300/Kconfig
@@ -1,5 +1,5 @@
config VIDEO_TLG2300
- tristate "Telegent TLG2300 USB video capture support"
+ tristate "Telegent TLG2300 USB video capture support (Deprecated)"
depends on VIDEO_DEV && I2C && SND && DVB_CORE
select VIDEO_TUNER
select VIDEO_TVEEPROM
@@ -12,5 +12,9 @@ config VIDEO_TLG2300
This is a video4linux driver for Telegent tlg2300 based TV cards.
The driver supports V4L2, DVB-T and radio.
+ This driver is deprecated and will be removed soon. If you have
+ hardware for this and you want to work on this driver, then contact
+ the linux-media mailinglist.
+
To compile this driver as a module, choose M here: the
module will be called poseidon
diff --git a/drivers/media/usb/tlg2300/Makefile b/drivers/staging/media/tlg2300/Makefile
index 137f8e38cde..137f8e38cde 100644
--- a/drivers/media/usb/tlg2300/Makefile
+++ b/drivers/staging/media/tlg2300/Makefile
diff --git a/drivers/media/usb/tlg2300/pd-alsa.c b/drivers/staging/media/tlg2300/pd-alsa.c
index dd8fe100590..dd8fe100590 100644
--- a/drivers/media/usb/tlg2300/pd-alsa.c
+++ b/drivers/staging/media/tlg2300/pd-alsa.c
diff --git a/drivers/media/usb/tlg2300/pd-common.h b/drivers/staging/media/tlg2300/pd-common.h
index 9e23ad32d2f..9e23ad32d2f 100644
--- a/drivers/media/usb/tlg2300/pd-common.h
+++ b/drivers/staging/media/tlg2300/pd-common.h
diff --git a/drivers/media/usb/tlg2300/pd-dvb.c b/drivers/staging/media/tlg2300/pd-dvb.c
index ca4994a5190..ca4994a5190 100644
--- a/drivers/media/usb/tlg2300/pd-dvb.c
+++ b/drivers/staging/media/tlg2300/pd-dvb.c
diff --git a/drivers/media/usb/tlg2300/pd-main.c b/drivers/staging/media/tlg2300/pd-main.c
index b31f4791b8f..b31f4791b8f 100644
--- a/drivers/media/usb/tlg2300/pd-main.c
+++ b/drivers/staging/media/tlg2300/pd-main.c
diff --git a/drivers/media/usb/tlg2300/pd-radio.c b/drivers/staging/media/tlg2300/pd-radio.c
index b391194a840..b391194a840 100644
--- a/drivers/media/usb/tlg2300/pd-radio.c
+++ b/drivers/staging/media/tlg2300/pd-radio.c
diff --git a/drivers/media/usb/tlg2300/pd-video.c b/drivers/staging/media/tlg2300/pd-video.c
index 8cd7f02fcf9..8cd7f02fcf9 100644
--- a/drivers/media/usb/tlg2300/pd-video.c
+++ b/drivers/staging/media/tlg2300/pd-video.c
diff --git a/drivers/media/usb/tlg2300/vendorcmds.h b/drivers/staging/media/tlg2300/vendorcmds.h
index ba6f4ae3b2c..ba6f4ae3b2c 100644
--- a/drivers/media/usb/tlg2300/vendorcmds.h
+++ b/drivers/staging/media/tlg2300/vendorcmds.h
diff --git a/drivers/staging/media/vino/Kconfig b/drivers/staging/media/vino/Kconfig
new file mode 100644
index 00000000000..03700dadafd
--- /dev/null
+++ b/drivers/staging/media/vino/Kconfig
@@ -0,0 +1,24 @@
+config VIDEO_VINO
+ tristate "SGI Vino Video For Linux (Deprecated)"
+ depends on I2C && SGI_IP22 && VIDEO_V4L2
+ select VIDEO_SAA7191 if MEDIA_SUBDRV_AUTOSELECT
+ help
+ Say Y here to build in support for the Vino video input system found
+ on SGI Indy machines.
+
+ This driver is deprecated and will be removed soon. If you have
+ hardware for this and you want to work on this driver, then contact
+ the linux-media mailinglist.
+
+config VIDEO_SAA7191
+ tristate "Philips SAA7191 video decoder (Deprecated)"
+ depends on VIDEO_V4L2 && I2C
+ ---help---
+ Support for the Philips SAA7191 video decoder.
+
+ This driver is deprecated and will be removed soon. If you have
+ hardware for this and you want to work on this driver, then contact
+ the linux-media mailinglist.
+
+ To compile this driver as a module, choose M here: the
+ module will be called saa7191.
diff --git a/drivers/staging/media/vino/Makefile b/drivers/staging/media/vino/Makefile
new file mode 100644
index 00000000000..914c2513687
--- /dev/null
+++ b/drivers/staging/media/vino/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_VIDEO_VINO) += indycam.o
+obj-$(CONFIG_VIDEO_VINO) += vino.o
+obj-$(CONFIG_VIDEO_SAA7191) += saa7191.o
diff --git a/drivers/media/platform/indycam.c b/drivers/staging/media/vino/indycam.c
index f1d192bbcb4..f1d192bbcb4 100644
--- a/drivers/media/platform/indycam.c
+++ b/drivers/staging/media/vino/indycam.c
diff --git a/drivers/media/platform/indycam.h b/drivers/staging/media/vino/indycam.h
index 881f21c474c..881f21c474c 100644
--- a/drivers/media/platform/indycam.h
+++ b/drivers/staging/media/vino/indycam.h
diff --git a/drivers/media/i2c/saa7191.c b/drivers/staging/media/vino/saa7191.c
index 8e9699268a6..8e9699268a6 100644
--- a/drivers/media/i2c/saa7191.c
+++ b/drivers/staging/media/vino/saa7191.c
diff --git a/drivers/media/i2c/saa7191.h b/drivers/staging/media/vino/saa7191.h
index 803c74d6066..803c74d6066 100644
--- a/drivers/media/i2c/saa7191.h
+++ b/drivers/staging/media/vino/saa7191.h
diff --git a/drivers/media/platform/vino.c b/drivers/staging/media/vino/vino.c
index 2c85357f774..2c85357f774 100644
--- a/drivers/media/platform/vino.c
+++ b/drivers/staging/media/vino/vino.c
diff --git a/drivers/media/platform/vino.h b/drivers/staging/media/vino/vino.h
index de2d615ae7c..de2d615ae7c 100644
--- a/drivers/media/platform/vino.h
+++ b/drivers/staging/media/vino/vino.h
diff --git a/drivers/staging/octeon-usb/octeon-hcd.c b/drivers/staging/octeon-usb/octeon-hcd.c
index 2f8eaf768bf..6b8b108c4e6 100644
--- a/drivers/staging/octeon-usb/octeon-hcd.c
+++ b/drivers/staging/octeon-usb/octeon-hcd.c
@@ -456,6 +456,16 @@ struct octeon_temp_buffer {
u8 data[0];
};
+static inline struct octeon_hcd *cvmx_usb_to_octeon(struct cvmx_usb_state *p)
+{
+ return container_of(p, struct octeon_hcd, usb);
+}
+
+static inline struct usb_hcd *octeon_to_hcd(struct octeon_hcd *p)
+{
+ return container_of((void *)p, struct usb_hcd, hcd_priv);
+}
+
/**
* octeon_alloc_temp_buffer - allocate a temporary buffer for USB transfer
* (if needed)
@@ -640,8 +650,7 @@ static inline int __cvmx_usb_get_data_pid(struct cvmx_usb_pipe *pipe)
{
if (pipe->pid_toggle)
return 2; /* Data1 */
- else
- return 0; /* Data0 */
+ return 0; /* Data0 */
}
/**
@@ -744,7 +753,7 @@ static int cvmx_usb_initialize(struct cvmx_usb_state *usb,
* such that USB is as close as possible to 125Mhz
*/
{
- int divisor = (octeon_get_clock_rate()+125000000-1)/125000000;
+ int divisor = DIV_ROUND_UP(octeon_get_clock_rate(), 125000000);
/* Lower than 4 doesn't seem to work properly */
if (divisor < 4)
divisor = 4;
@@ -1328,7 +1337,8 @@ static void __cvmx_usb_poll_rx_fifo(struct cvmx_usb_state *usb)
/* Loop writing the FIFO data for this packet into memory */
while (bytes > 0) {
- *ptr++ = __cvmx_usb_read_csr32(usb, USB_FIFO_ADDRESS(channel, usb->index));
+ *ptr++ = __cvmx_usb_read_csr32(usb,
+ USB_FIFO_ADDRESS(channel, usb->index));
bytes -= 4;
}
CVMX_SYNCW;
@@ -1479,7 +1489,8 @@ static void __cvmx_usb_fill_tx_fifo(struct cvmx_usb_state *usb, int channel)
fifo = &usb->nonperiodic;
fifo->entry[fifo->head].channel = channel;
- fifo->entry[fifo->head].address = __cvmx_usb_read_csr64(usb, CVMX_USBNX_DMA0_OUTB_CHN0(usb->index) + channel*8);
+ fifo->entry[fifo->head].address = __cvmx_usb_read_csr64(usb,
+ CVMX_USBNX_DMA0_OUTB_CHN0(usb->index) + channel*8);
fifo->entry[fifo->head].size = (usbc_hctsiz.s.xfersize+3)>>2;
fifo->head++;
if (fifo->head > MAX_CHANNELS)
@@ -1501,6 +1512,9 @@ static void __cvmx_usb_start_channel_control(struct cvmx_usb_state *usb,
int channel,
struct cvmx_usb_pipe *pipe)
{
+ struct octeon_hcd *priv = cvmx_usb_to_octeon(usb);
+ struct usb_hcd *hcd = octeon_to_hcd(priv);
+ struct device *dev = hcd->self.controller;
struct cvmx_usb_transaction *transaction =
list_first_entry(&pipe->transactions, typeof(*transaction),
node);
@@ -1517,7 +1531,7 @@ static void __cvmx_usb_start_channel_control(struct cvmx_usb_state *usb,
switch (transaction->stage) {
case CVMX_USB_STAGE_NON_CONTROL:
case CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE:
- cvmx_dprintf("%s: ERROR - Non control stage\n", __func__);
+ dev_err(dev, "%s: ERROR - Non control stage\n", __func__);
break;
case CVMX_USB_STAGE_SETUP:
usbc_hctsiz.s.pid = 3; /* Setup */
@@ -1607,8 +1621,8 @@ static void __cvmx_usb_start_channel_control(struct cvmx_usb_state *usb,
* Calculate the number of packets to transfer. If the length is zero
* we still need to transfer one packet
*/
- packets_to_transfer = (bytes_to_transfer + pipe->max_packet - 1) /
- pipe->max_packet;
+ packets_to_transfer = DIV_ROUND_UP(bytes_to_transfer,
+ pipe->max_packet);
if (packets_to_transfer == 0)
packets_to_transfer = 1;
else if ((packets_to_transfer > 1) &&
@@ -1700,7 +1714,9 @@ static void __cvmx_usb_start_channel(struct cvmx_usb_state *usb,
usbc_hcintmsk.s.stallmsk = 1;
usbc_hcintmsk.s.xfercomplmsk = 1;
}
- __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCINTMSKX(channel, usb->index), usbc_hcintmsk.u32);
+ __cvmx_usb_write_csr32(usb,
+ CVMX_USBCX_HCINTMSKX(channel, usb->index),
+ usbc_hcintmsk.u32);
/* Enable the channel interrupt to propagate */
usbc_haintmsk.u32 = __cvmx_usb_read_csr32(usb,
@@ -1853,8 +1869,7 @@ static void __cvmx_usb_start_channel(struct cvmx_usb_state *usb,
* zero we still need to transfer one packet
*/
packets_to_transfer =
- (bytes_to_transfer + pipe->max_packet - 1) /
- pipe->max_packet;
+ DIV_ROUND_UP(bytes_to_transfer, pipe->max_packet);
if (packets_to_transfer == 0)
packets_to_transfer = 1;
else if ((packets_to_transfer > 1) &&
@@ -2110,16 +2125,6 @@ done:
union cvmx_usbcx_gintmsk, sofmsk, need_sof);
}
-static inline struct octeon_hcd *cvmx_usb_to_octeon(struct cvmx_usb_state *p)
-{
- return container_of(p, struct octeon_hcd, usb);
-}
-
-static inline struct usb_hcd *octeon_to_hcd(struct octeon_hcd *p)
-{
- return container_of((void *)p, struct usb_hcd, hcd_priv);
-}
-
static void octeon_usb_urb_complete_callback(struct cvmx_usb_state *usb,
enum cvmx_usb_complete status,
struct cvmx_usb_pipe *pipe,
@@ -2583,6 +2588,9 @@ static int cvmx_usb_get_frame_number(struct cvmx_usb_state *usb)
*/
static int __cvmx_usb_poll_channel(struct cvmx_usb_state *usb, int channel)
{
+ struct octeon_hcd *priv = cvmx_usb_to_octeon(usb);
+ struct usb_hcd *hcd = octeon_to_hcd(priv);
+ struct device *dev = hcd->self.controller;
union cvmx_usbcx_hcintx usbc_hcint;
union cvmx_usbcx_hctsizx usbc_hctsiz;
union cvmx_usbcx_hccharx usbc_hcchar;
@@ -2640,8 +2648,8 @@ static int __cvmx_usb_poll_channel(struct cvmx_usb_state *usb, int channel)
* Channel halt isn't needed.
*/
} else {
- cvmx_dprintf("USB%d: Channel %d interrupt without halt\n",
- usb->index, channel);
+ dev_err(dev, "USB%d: Channel %d interrupt without halt\n",
+ usb->index, channel);
return 0;
}
}
@@ -2881,9 +2889,11 @@ static int __cvmx_usb_poll_channel(struct cvmx_usb_state *usb, int channel)
struct usb_ctrlrequest *header =
cvmx_phys_to_ptr(transaction->control_header);
if (header->wLength)
- transaction->stage = CVMX_USB_STAGE_DATA;
+ transaction->stage =
+ CVMX_USB_STAGE_DATA;
else
- transaction->stage = CVMX_USB_STAGE_STATUS;
+ transaction->stage =
+ CVMX_USB_STAGE_STATUS;
}
break;
case CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE:
@@ -2891,9 +2901,11 @@ static int __cvmx_usb_poll_channel(struct cvmx_usb_state *usb, int channel)
struct usb_ctrlrequest *header =
cvmx_phys_to_ptr(transaction->control_header);
if (header->wLength)
- transaction->stage = CVMX_USB_STAGE_DATA;
+ transaction->stage =
+ CVMX_USB_STAGE_DATA;
else
- transaction->stage = CVMX_USB_STAGE_STATUS;
+ transaction->stage =
+ CVMX_USB_STAGE_STATUS;
}
break;
case CVMX_USB_STAGE_DATA:
@@ -3015,7 +3027,8 @@ static int __cvmx_usb_poll_channel(struct cvmx_usb_state *usb, int channel)
* is complete, the pipe sleeps until the next
* schedule interval
*/
- if (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT) {
+ if (pipe->transfer_dir ==
+ CVMX_USB_DIRECTION_OUT) {
/*
* If no space left or this wasn't a max
* size packet then this transfer is
diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
index b2b6c3cd2be..fcbe836aa99 100644
--- a/drivers/staging/octeon/ethernet-rx.c
+++ b/drivers/staging/octeon/ethernet-rx.c
@@ -61,66 +61,7 @@
#include <asm/octeon/cvmx-gmxx-defs.h>
-struct cvm_napi_wrapper {
- struct napi_struct napi;
-} ____cacheline_aligned_in_smp;
-
-static struct cvm_napi_wrapper cvm_oct_napi[NR_CPUS] __cacheline_aligned_in_smp;
-
-struct cvm_oct_core_state {
- int baseline_cores;
- /*
- * The number of additional cores that could be processing
- * input packets.
- */
- atomic_t available_cores;
- cpumask_t cpu_state;
-} ____cacheline_aligned_in_smp;
-
-static struct cvm_oct_core_state core_state __cacheline_aligned_in_smp;
-
-static int cvm_irq_cpu;
-
-static void cvm_oct_enable_napi(void *_)
-{
- int cpu = smp_processor_id();
- napi_schedule(&cvm_oct_napi[cpu].napi);
-}
-
-static void cvm_oct_enable_one_cpu(void)
-{
- int v;
- int cpu;
-
- /* Check to see if more CPUs are available for receive processing... */
- v = atomic_sub_if_positive(1, &core_state.available_cores);
- if (v < 0)
- return;
-
- /* ... if a CPU is available, Turn on NAPI polling for that CPU. */
- for_each_online_cpu(cpu) {
- if (!cpu_test_and_set(cpu, core_state.cpu_state)) {
- v = smp_call_function_single(cpu, cvm_oct_enable_napi,
- NULL, 0);
- if (v)
- panic("Can't enable NAPI.");
- break;
- }
- }
-}
-
-static void cvm_oct_no_more_work(void)
-{
- int cpu = smp_processor_id();
-
- if (cpu == cvm_irq_cpu) {
- enable_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group);
- return;
- }
-
- cpu_clear(cpu, core_state.cpu_state);
- atomic_add(1, &core_state.available_cores);
-}
+static struct napi_struct cvm_oct_napi;
/**
* cvm_oct_do_interrupt - interrupt handler.
@@ -132,8 +73,7 @@ static irqreturn_t cvm_oct_do_interrupt(int cpl, void *dev_id)
{
/* Disable the IRQ and start napi_poll. */
disable_irq_nosync(OCTEON_IRQ_WORKQ0 + pow_receive_group);
- cvm_irq_cpu = smp_processor_id();
- cvm_oct_enable_napi(NULL);
+ napi_schedule(&cvm_oct_napi);
return IRQ_HANDLED;
}
@@ -186,13 +126,15 @@ static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work)
if (*ptr == 0xd5) {
/*
- printk_ratelimited("Port %d received 0xd5 preamble\n", work->ipprt);
+ printk_ratelimited("Port %d received 0xd5 preamble\n",
+ work->ipprt);
*/
work->packet_ptr.s.addr += i + 1;
work->len -= i + 5;
} else if ((*ptr & 0xf) == 0xd) {
/*
- printk_ratelimited("Port %d received 0x?d preamble\n", work->ipprt);
+ printk_ratelimited("Port %d received 0x?d preamble\n",
+ work->ipprt);
*/
work->packet_ptr.s.addr += i;
work->len -= i + 4;
@@ -278,28 +220,15 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
cvmx_write_csr(CVMX_POW_WQ_INT, wq_int.u64);
break;
}
- pskb = (struct sk_buff **)(cvm_oct_get_buffer_ptr(work->packet_ptr) - sizeof(void *));
+ pskb = (struct sk_buff **)(cvm_oct_get_buffer_ptr(work->packet_ptr) -
+ sizeof(void *));
prefetch(pskb);
if (USE_ASYNC_IOBDMA && rx_count < (budget - 1)) {
- cvmx_pow_work_request_async_nocheck(CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT);
+ cvmx_pow_work_request_async_nocheck(CVMX_SCR_SCRATCH,
+ CVMX_POW_NO_WAIT);
did_work_request = 1;
}
-
- if (rx_count == 0) {
- /*
- * First time through, see if there is enough
- * work waiting to merit waking another
- * CPU.
- */
- union cvmx_pow_wq_int_cntx counts;
- int backlog;
- int cores_in_use = core_state.baseline_cores - atomic_read(&core_state.available_cores);
- counts.u64 = cvmx_read_csr(CVMX_POW_WQ_INT_CNTX(pow_receive_group));
- backlog = counts.s.iq_cnt + counts.s.ds_cnt;
- if (backlog > budget * cores_in_use && napi != NULL)
- cvm_oct_enable_one_cpu();
- }
rx_count++;
skb_in_hw = USE_SKBUFFS_IN_HW && work->word2.s.bufs == 1;
@@ -322,7 +251,8 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
* buffer.
*/
if (likely(skb_in_hw)) {
- skb->data = skb->head + work->packet_ptr.s.addr - cvmx_ptr_to_phys(skb->head);
+ skb->data = skb->head + work->packet_ptr.s.addr -
+ cvmx_ptr_to_phys(skb->head);
prefetch(skb->data);
skb->len = work->len;
skb_set_tail_pointer(skb, skb->len);
@@ -359,7 +289,8 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
/* No packet buffers to free */
} else {
int segments = work->word2.s.bufs;
- union cvmx_buf_ptr segment_ptr = work->packet_ptr;
+ union cvmx_buf_ptr segment_ptr =
+ work->packet_ptr;
int len = work->len;
while (segments--) {
@@ -375,8 +306,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
* one: int segment_size =
* segment_ptr.s.size;
*/
- int segment_size = CVMX_FPA_PACKET_POOL_SIZE -
- (segment_ptr.s.addr - (((segment_ptr.s.addr >> 7) - segment_ptr.s.back) << 7));
+ int segment_size =
+ CVMX_FPA_PACKET_POOL_SIZE -
+ (segment_ptr.s.addr -
+ (((segment_ptr.s.addr >> 7) -
+ segment_ptr.s.back) << 7));
/*
* Don't copy more than what
* is left in the packet.
@@ -407,8 +341,10 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
skb->protocol = eth_type_trans(skb, dev);
skb->dev = dev;
- if (unlikely(work->word2.s.not_IP || work->word2.s.IP_exc ||
- work->word2.s.L4_error || !work->word2.s.tcp_or_udp))
+ if (unlikely(work->word2.s.not_IP ||
+ work->word2.s.IP_exc ||
+ work->word2.s.L4_error ||
+ !work->word2.s.tcp_or_udp))
skb->ip_summed = CHECKSUM_NONE;
else
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -416,11 +352,15 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
/* Increment RX stats for virtual ports */
if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
#ifdef CONFIG_64BIT
- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
+ atomic64_add(1,
+ (atomic64_t *)&priv->stats.rx_packets);
+ atomic64_add(skb->len,
+ (atomic64_t *)&priv->stats.rx_bytes);
#else
- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
+ atomic_add(1,
+ (atomic_t *)&priv->stats.rx_packets);
+ atomic_add(skb->len,
+ (atomic_t *)&priv->stats.rx_bytes);
#endif
}
netif_receive_skb(skb);
@@ -431,9 +371,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
dev->name);
*/
#ifdef CONFIG_64BIT
- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
+ atomic64_add(1,
+ (atomic64_t *)&priv->stats.rx_dropped);
#else
- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
+ atomic_add(1,
+ (atomic_t *)&priv->stats.rx_dropped);
#endif
dev_kfree_skb_irq(skb);
}
@@ -476,7 +418,7 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
if (rx_count < budget && napi != NULL) {
/* No more work */
napi_complete(napi);
- cvm_oct_no_more_work();
+ enable_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group);
}
return rx_count;
}
@@ -511,18 +453,10 @@ void cvm_oct_rx_initialize(void)
if (NULL == dev_for_napi)
panic("No net_devices were allocated.");
- if (max_rx_cpus >= 1 && max_rx_cpus < num_online_cpus())
- atomic_set(&core_state.available_cores, max_rx_cpus);
- else
- atomic_set(&core_state.available_cores, num_online_cpus());
- core_state.baseline_cores = atomic_read(&core_state.available_cores);
-
- core_state.cpu_state = CPU_MASK_NONE;
- for_each_possible_cpu(i) {
- netif_napi_add(dev_for_napi, &cvm_oct_napi[i].napi,
- cvm_oct_napi_poll, rx_napi_weight);
- napi_enable(&cvm_oct_napi[i].napi);
- }
+ netif_napi_add(dev_for_napi, &cvm_oct_napi, cvm_oct_napi_poll,
+ rx_napi_weight);
+ napi_enable(&cvm_oct_napi);
+
/* Register an IRQ handler to receive POW interrupts */
i = request_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group,
cvm_oct_do_interrupt, 0, "Ethernet", cvm_oct_device);
@@ -543,15 +477,11 @@ void cvm_oct_rx_initialize(void)
int_pc.s.pc_thr = 5;
cvmx_write_csr(CVMX_POW_WQ_INT_PC, int_pc.u64);
-
- /* Scheduld NAPI now. This will indirectly enable interrupts. */
- cvm_oct_enable_one_cpu();
+ /* Schedule NAPI now. This will indirectly enable the interrupt. */
+ napi_schedule(&cvm_oct_napi);
}
void cvm_oct_rx_shutdown(void)
{
- int i;
- /* Shutdown all of the NAPIs */
- for_each_possible_cpu(i)
- netif_napi_del(&cvm_oct_napi[i].napi);
+ netif_napi_del(&cvm_oct_napi);
}
diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c
index 4e54d854021..b7a7854d3f7 100644
--- a/drivers/staging/octeon/ethernet-tx.c
+++ b/drivers/staging/octeon/ethernet-tx.c
@@ -77,6 +77,7 @@ static DECLARE_TASKLET(cvm_oct_tx_cleanup_tasklet, cvm_oct_tx_do_cleanup, 0);
static inline int32_t cvm_oct_adjust_skb_to_free(int32_t skb_to_free, int fau)
{
int32_t undo;
+
undo = skb_to_free > 0 ? MAX_SKB_TO_FREE : skb_to_free +
MAX_SKB_TO_FREE;
if (undo > 0)
@@ -89,6 +90,7 @@ static inline int32_t cvm_oct_adjust_skb_to_free(int32_t skb_to_free, int fau)
static void cvm_oct_kick_tx_poll_watchdog(void)
{
union cvmx_ciu_timx ciu_timx;
+
ciu_timx.u64 = 0;
ciu_timx.s.one_shot = 1;
ciu_timx.s.len = cvm_oct_tx_poll_interval;
@@ -118,9 +120,11 @@ static void cvm_oct_free_tx_skbs(struct net_device *dev)
total_freed += skb_to_free;
if (skb_to_free > 0) {
struct sk_buff *to_free_list = NULL;
+
spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
while (skb_to_free > 0) {
struct sk_buff *t;
+
t = __skb_dequeue(&priv->tx_free_list[qos]);
t->next = to_free_list;
to_free_list = t;
@@ -131,6 +135,7 @@ static void cvm_oct_free_tx_skbs(struct net_device *dev)
/* Do the actual freeing outside of the lock. */
while (to_free_list) {
struct sk_buff *t = to_free_list;
+
to_free_list = to_free_list->next;
dev_kfree_skb_any(t);
}
@@ -258,6 +263,7 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
if (gmx_prt_cfg.s.duplex == 0) {
int add_bytes = 64 - skb->len;
+
if ((skb_tail_pointer(skb) + add_bytes) <=
skb_end_pointer(skb))
memset(__skb_put(skb, add_bytes), 0,
@@ -289,6 +295,7 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
CVM_OCT_SKB_CB(skb)[0] = hw_buffer.u64;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
struct skb_frag_struct *fs = skb_shinfo(skb)->frags + i;
+
hw_buffer.s.addr = XKPHYS_TO_PHYS(
(u64)(page_address(fs->page.p) +
fs->page_offset));
@@ -495,6 +502,7 @@ skip_xmit:
while (skb_to_free > 0) {
struct sk_buff *t = __skb_dequeue(&priv->tx_free_list[qos]);
+
t->next = to_free_list;
to_free_list = t;
skb_to_free--;
@@ -505,6 +513,7 @@ skip_xmit:
/* Do the actual freeing outside of the lock. */
while (to_free_list) {
struct sk_buff *t = to_free_list;
+
to_free_list = to_free_list->next;
dev_kfree_skb_any(t);
}
@@ -550,6 +559,7 @@ int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev)
/* Get a work queue entry */
cvmx_wqe_t *work = cvmx_fpa_alloc(CVMX_FPA_WQE_POOL);
+
if (unlikely(work == NULL)) {
printk_ratelimited("%s: Failed to allocate a work queue entry\n",
dev->name);
@@ -713,6 +723,7 @@ static void cvm_oct_tx_do_cleanup(unsigned long arg)
for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) {
if (cvm_oct_device[port]) {
struct net_device *dev = cvm_oct_device[port];
+
cvm_oct_free_tx_skbs(dev);
}
}
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
index af24294d946..ee321496dcd 100644
--- a/drivers/staging/octeon/ethernet.c
+++ b/drivers/staging/octeon/ethernet.c
@@ -98,12 +98,6 @@ MODULE_PARM_DESC(pow_send_list, "\n"
"\t\"eth2,spi3,spi7\" would cause these three devices to transmit\n"
"\tusing the pow_send_group.");
-int max_rx_cpus = -1;
-module_param(max_rx_cpus, int, 0444);
-MODULE_PARM_DESC(max_rx_cpus, "\n"
- "\t\tThe maximum number of CPUs to use for packet reception.\n"
- "\t\tUse -1 to use all available CPUs.");
-
int rx_napi_weight = 32;
module_param(rx_napi_weight, int, 0444);
MODULE_PARM_DESC(rx_napi_weight, "The NAPI WEIGHT parameter.");
@@ -452,7 +446,7 @@ int cvm_oct_common_init(struct net_device *dev)
mac = of_get_mac_address(priv->of_node);
if (mac)
- memcpy(dev->dev_addr, mac, ETH_ALEN);
+ ether_addr_copy(dev->dev_addr, mac);
else
eth_hw_addr_random(dev);
diff --git a/drivers/staging/octeon/octeon-ethernet.h b/drivers/staging/octeon/octeon-ethernet.h
index d0e32111991..f48dc766fad 100644
--- a/drivers/staging/octeon/octeon-ethernet.h
+++ b/drivers/staging/octeon/octeon-ethernet.h
@@ -99,7 +99,6 @@ extern struct workqueue_struct *cvm_oct_poll_queue;
extern atomic_t cvm_oct_poll_queue_stopping;
extern u64 cvm_oct_tx_poll_interval;
-extern int max_rx_cpus;
extern int rx_napi_weight;
#endif
diff --git a/drivers/staging/olpc_dcon/olpc_dcon.c b/drivers/staging/olpc_dcon/olpc_dcon.c
index eb83b28b8cd..6a9a8815477 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon.c
+++ b/drivers/staging/olpc_dcon/olpc_dcon.c
@@ -682,8 +682,7 @@ static int dcon_remove(struct i2c_client *client)
free_irq(DCON_IRQ, dcon);
- if (dcon->bl_dev)
- backlight_device_unregister(dcon->bl_dev);
+ backlight_device_unregister(dcon->bl_dev);
if (dcon_device != NULL)
platform_device_unregister(dcon_device);
diff --git a/drivers/staging/ozwpan/ozhcd.c b/drivers/staging/ozwpan/ozhcd.c
index 347b8b1ffa2..8543bb29a13 100644
--- a/drivers/staging/ozwpan/ozhcd.c
+++ b/drivers/staging/ozwpan/ozhcd.c
@@ -353,8 +353,7 @@ static void oz_complete_urb(struct usb_hcd *hcd, struct urb *urb,
}
spin_lock(&g_tasklet_lock);
spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
- if (cancel_urbl)
- oz_free_urb_link(cancel_urbl);
+ oz_free_urb_link(cancel_urbl);
}
/*
@@ -522,8 +521,7 @@ static int oz_dequeue_ep_urb(struct oz_port *port, u8 ep_addr, int in_dir,
}
}
spin_unlock_bh(&port->ozhcd->hcd_lock);
- if (urbl)
- oz_free_urb_link(urbl);
+ oz_free_urb_link(urbl);
return urbl ? 0 : -EIDRM;
}
@@ -729,7 +727,7 @@ void oz_hcd_pd_reset(void *hpd, void *hport)
{
/* Cleanup the current configuration and report reset to the core.
*/
- struct oz_port *port = (struct oz_port *)hport;
+ struct oz_port *port = hport;
struct oz_hcd *ozhcd = port->ozhcd;
oz_dbg(ON, "PD Reset\n");
@@ -748,7 +746,7 @@ void oz_hcd_pd_reset(void *hpd, void *hport)
void oz_hcd_get_desc_cnf(void *hport, u8 req_id, int status, const u8 *desc,
int length, int offset, int total_size)
{
- struct oz_port *port = (struct oz_port *)hport;
+ struct oz_port *port = hport;
struct urb *urb;
int err = 0;
@@ -888,7 +886,7 @@ static void oz_hcd_complete_set_interface(struct oz_port *port, struct urb *urb,
void oz_hcd_control_cnf(void *hport, u8 req_id, u8 rcode, const u8 *data,
int data_len)
{
- struct oz_port *port = (struct oz_port *)hport;
+ struct oz_port *port = hport;
struct urb *urb;
struct usb_ctrlrequest *setup;
struct usb_hcd *hcd = port->ozhcd->hcd;
@@ -1035,7 +1033,7 @@ static inline int oz_usb_get_frame_number(void)
int oz_hcd_heartbeat(void *hport)
{
int rc = 0;
- struct oz_port *port = (struct oz_port *)hport;
+ struct oz_port *port = hport;
struct oz_hcd *ozhcd = port->ozhcd;
struct oz_urb_link *urbl, *n;
LIST_HEAD(xfr_list);
@@ -1913,7 +1911,7 @@ static void oz_get_hub_descriptor(struct usb_hcd *hcd,
memset(desc, 0, sizeof(*desc));
desc->bDescriptorType = 0x29;
desc->bDescLength = 9;
- desc->wHubCharacteristics = (__force __u16)cpu_to_le16(0x0001);
+ desc->wHubCharacteristics = cpu_to_le16(0x0001);
desc->bNbrPorts = OZ_NB_PORTS;
}
@@ -2031,11 +2029,11 @@ static int oz_clear_port_feature(struct usb_hcd *hcd, u16 wvalue, u16 windex)
break;
case USB_PORT_FEAT_C_CONNECTION:
oz_dbg(HUB, "USB_PORT_FEAT_C_CONNECTION\n");
- clear_bits = (USB_PORT_STAT_C_CONNECTION << 16);
+ clear_bits = USB_PORT_STAT_C_CONNECTION << 16;
break;
case USB_PORT_FEAT_C_ENABLE:
oz_dbg(HUB, "USB_PORT_FEAT_C_ENABLE\n");
- clear_bits = (USB_PORT_STAT_C_ENABLE << 16);
+ clear_bits = USB_PORT_STAT_C_ENABLE << 16;
break;
case USB_PORT_FEAT_C_SUSPEND:
oz_dbg(HUB, "USB_PORT_FEAT_C_SUSPEND\n");
@@ -2045,7 +2043,7 @@ static int oz_clear_port_feature(struct usb_hcd *hcd, u16 wvalue, u16 windex)
break;
case USB_PORT_FEAT_C_RESET:
oz_dbg(HUB, "USB_PORT_FEAT_C_RESET\n");
- clear_bits = (USB_PORT_FEAT_C_RESET << 16);
+ clear_bits = USB_PORT_FEAT_C_RESET << 16;
break;
case USB_PORT_FEAT_TEST:
oz_dbg(HUB, "USB_PORT_FEAT_TEST\n");
diff --git a/drivers/staging/ozwpan/ozusbsvc1.c b/drivers/staging/ozwpan/ozusbsvc1.c
index be7ee01c50a..d434d8c6fff 100644
--- a/drivers/staging/ozwpan/ozusbsvc1.c
+++ b/drivers/staging/ozwpan/ozusbsvc1.c
@@ -56,7 +56,7 @@ static int oz_usb_submit_elt(struct oz_elt_buf *eb, struct oz_elt_info *ei,
int oz_usb_get_desc_req(void *hpd, u8 req_id, u8 req_type, u8 desc_type,
u8 index, __le16 windex, int offset, int len)
{
- struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd;
+ struct oz_usb_ctx *usb_ctx = hpd;
struct oz_pd *pd = usb_ctx->pd;
struct oz_elt *elt;
struct oz_get_desc_req *body;
@@ -92,7 +92,7 @@ int oz_usb_get_desc_req(void *hpd, u8 req_id, u8 req_type, u8 desc_type,
*/
static int oz_usb_set_config_req(void *hpd, u8 req_id, u8 index)
{
- struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd;
+ struct oz_usb_ctx *usb_ctx = hpd;
struct oz_pd *pd = usb_ctx->pd;
struct oz_elt *elt;
struct oz_elt_buf *eb = &pd->elt_buff;
@@ -115,7 +115,7 @@ static int oz_usb_set_config_req(void *hpd, u8 req_id, u8 index)
*/
static int oz_usb_set_interface_req(void *hpd, u8 req_id, u8 index, u8 alt)
{
- struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd;
+ struct oz_usb_ctx *usb_ctx = hpd;
struct oz_pd *pd = usb_ctx->pd;
struct oz_elt *elt;
struct oz_elt_buf *eb = &pd->elt_buff;
@@ -140,7 +140,7 @@ static int oz_usb_set_interface_req(void *hpd, u8 req_id, u8 index, u8 alt)
static int oz_usb_set_clear_feature_req(void *hpd, u8 req_id, u8 type,
u8 recipient, u8 index, __le16 feature)
{
- struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd;
+ struct oz_usb_ctx *usb_ctx = hpd;
struct oz_pd *pd = usb_ctx->pd;
struct oz_elt *elt;
struct oz_elt_buf *eb = &pd->elt_buff;
@@ -166,7 +166,7 @@ static int oz_usb_set_clear_feature_req(void *hpd, u8 req_id, u8 type,
static int oz_usb_vendor_class_req(void *hpd, u8 req_id, u8 req_type,
u8 request, __le16 value, __le16 index, const u8 *data, int data_len)
{
- struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd;
+ struct oz_usb_ctx *usb_ctx = hpd;
struct oz_pd *pd = usb_ctx->pd;
struct oz_elt *elt;
struct oz_elt_buf *eb = &pd->elt_buff;
@@ -244,7 +244,7 @@ int oz_usb_control_req(void *hpd, u8 req_id, struct usb_ctrlrequest *setup,
*/
int oz_usb_send_isoc(void *hpd, u8 ep_num, struct urb *urb)
{
- struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd;
+ struct oz_usb_ctx *usb_ctx = hpd;
struct oz_pd *pd = usb_ctx->pd;
struct oz_elt_buf *eb;
int i;
diff --git a/drivers/staging/panel/TODO b/drivers/staging/panel/TODO
index a4be749bcdf..2db3f994b63 100644
--- a/drivers/staging/panel/TODO
+++ b/drivers/staging/panel/TODO
@@ -1,6 +1,5 @@
TODO:
- checkpatch.pl cleanups
- - Lindent
- review major/minor usages
- review userspace api
- see if all of this could be easier done in userspace instead.
diff --git a/drivers/staging/panel/panel.c b/drivers/staging/panel/panel.c
index 6d1a32097d3..98325b7b446 100644
--- a/drivers/staging/panel/panel.c
+++ b/drivers/staging/panel/panel.c
@@ -133,6 +133,8 @@
#define LCD_ESCAPE_LEN 24 /* max chars for LCD escape command */
#define LCD_ESCAPE_CHAR 27 /* use char 27 for escape command */
+#define NOT_SET -1
+
/* macros to simplify use of the parallel port */
#define r_ctr(x) (parport_read_control((x)->port))
#define r_dtr(x) (parport_read_data((x)->port))
@@ -210,6 +212,10 @@ static pmask_t phys_prev;
static char inputs_stable;
/* these variables are specific to the keypad */
+static struct {
+ bool enabled;
+} keypad;
+
static char keypad_buffer[KEYPAD_BUFFER];
static int keypad_buflen;
static int keypad_start;
@@ -217,17 +223,50 @@ static char keypressed;
static wait_queue_head_t keypad_read_wait;
/* lcd-specific variables */
-
-/* contains the LCD config state */
-static unsigned long int lcd_flags;
-/* contains the LCD X offset */
-static unsigned long int lcd_addr_x;
-/* contains the LCD Y offset */
-static unsigned long int lcd_addr_y;
-/* current escape sequence, 0 terminated */
-static char lcd_escape[LCD_ESCAPE_LEN + 1];
-/* not in escape state. >=0 = escape cmd len */
-static int lcd_escape_len = -1;
+static struct {
+ bool enabled;
+ bool initialized;
+ bool must_clear;
+
+ /* TODO: use bool here? */
+ char left_shift;
+
+ int height;
+ int width;
+ int bwidth;
+ int hwidth;
+ int charset;
+ int proto;
+ int light_tempo;
+
+ /* TODO: use union here? */
+ struct {
+ int e;
+ int rs;
+ int rw;
+ int cl;
+ int da;
+ int bl;
+ } pins;
+
+ /* contains the LCD config state */
+ unsigned long int flags;
+
+ /* Contains the LCD X and Y offset */
+ struct {
+ unsigned long int x;
+ unsigned long int y;
+ } addr;
+
+ /* Current escape sequence and it's length or -1 if outside */
+ struct {
+ char buf[LCD_ESCAPE_LEN + 1];
+ int len;
+ } esc_seq;
+} lcd;
+
+/* Needed only for init */
+static int selected_lcd_type = NOT_SET;
/*
* Bit masks to convert LCD signals to parallel port outputs.
@@ -302,14 +341,15 @@ static unsigned char lcd_bits[LCD_PORTS][LCD_BITS][BIT_STATES];
/*
* Construct custom config from the kernel's configuration
*/
-#define DEFAULT_PROFILE PANEL_PROFILE_LARGE
#define DEFAULT_PARPORT 0
-#define DEFAULT_LCD LCD_TYPE_OLD
-#define DEFAULT_KEYPAD KEYPAD_TYPE_OLD
+#define DEFAULT_PROFILE PANEL_PROFILE_LARGE
+#define DEFAULT_KEYPAD_TYPE KEYPAD_TYPE_OLD
+#define DEFAULT_LCD_TYPE LCD_TYPE_OLD
+#define DEFAULT_LCD_HEIGHT 2
#define DEFAULT_LCD_WIDTH 40
#define DEFAULT_LCD_BWIDTH 40
#define DEFAULT_LCD_HWIDTH 64
-#define DEFAULT_LCD_HEIGHT 2
+#define DEFAULT_LCD_CHARSET LCD_CHARSET_NORMAL
#define DEFAULT_LCD_PROTO LCD_PROTO_PARALLEL
#define DEFAULT_LCD_PIN_E PIN_AUTOLF
@@ -318,27 +358,31 @@ static unsigned char lcd_bits[LCD_PORTS][LCD_BITS][BIT_STATES];
#define DEFAULT_LCD_PIN_SCL PIN_STROBE
#define DEFAULT_LCD_PIN_SDA PIN_D0
#define DEFAULT_LCD_PIN_BL PIN_NOT_SET
-#define DEFAULT_LCD_CHARSET LCD_CHARSET_NORMAL
-
-#ifdef CONFIG_PANEL_PROFILE
-#undef DEFAULT_PROFILE
-#define DEFAULT_PROFILE CONFIG_PANEL_PROFILE
-#endif
#ifdef CONFIG_PANEL_PARPORT
#undef DEFAULT_PARPORT
#define DEFAULT_PARPORT CONFIG_PANEL_PARPORT
#endif
+#ifdef CONFIG_PANEL_PROFILE
+#undef DEFAULT_PROFILE
+#define DEFAULT_PROFILE CONFIG_PANEL_PROFILE
+#endif
+
#if DEFAULT_PROFILE == 0 /* custom */
#ifdef CONFIG_PANEL_KEYPAD
-#undef DEFAULT_KEYPAD
-#define DEFAULT_KEYPAD CONFIG_PANEL_KEYPAD
+#undef DEFAULT_KEYPAD_TYPE
+#define DEFAULT_KEYPAD_TYPE CONFIG_PANEL_KEYPAD
#endif
#ifdef CONFIG_PANEL_LCD
-#undef DEFAULT_LCD
-#define DEFAULT_LCD CONFIG_PANEL_LCD
+#undef DEFAULT_LCD_TYPE
+#define DEFAULT_LCD_TYPE CONFIG_PANEL_LCD
+#endif
+
+#ifdef CONFIG_PANEL_LCD_HEIGHT
+#undef DEFAULT_LCD_HEIGHT
+#define DEFAULT_LCD_HEIGHT CONFIG_PANEL_LCD_HEIGHT
#endif
#ifdef CONFIG_PANEL_LCD_WIDTH
@@ -356,9 +400,9 @@ static unsigned char lcd_bits[LCD_PORTS][LCD_BITS][BIT_STATES];
#define DEFAULT_LCD_HWIDTH CONFIG_PANEL_LCD_HWIDTH
#endif
-#ifdef CONFIG_PANEL_LCD_HEIGHT
-#undef DEFAULT_LCD_HEIGHT
-#define DEFAULT_LCD_HEIGHT CONFIG_PANEL_LCD_HEIGHT
+#ifdef CONFIG_PANEL_LCD_CHARSET
+#undef DEFAULT_LCD_CHARSET
+#define DEFAULT_LCD_CHARSET CONFIG_PANEL_LCD_CHARSET
#endif
#ifdef CONFIG_PANEL_LCD_PROTO
@@ -396,25 +440,18 @@ static unsigned char lcd_bits[LCD_PORTS][LCD_BITS][BIT_STATES];
#define DEFAULT_LCD_PIN_BL CONFIG_PANEL_LCD_PIN_BL
#endif
-#ifdef CONFIG_PANEL_LCD_CHARSET
-#undef DEFAULT_LCD_CHARSET
-#define DEFAULT_LCD_CHARSET CONFIG_PANEL_LCD_CHARSET
-#endif
-
#endif /* DEFAULT_PROFILE == 0 */
/* global variables */
-static int keypad_open_cnt; /* #times opened */
-static int lcd_open_cnt; /* #times opened */
+
+/* Device single-open policy control */
+static atomic_t lcd_available = ATOMIC_INIT(1);
+static atomic_t keypad_available = ATOMIC_INIT(1);
+
static struct pardevice *pprt;
-static int lcd_initialized;
static int keypad_initialized;
-static int light_tempo;
-
-static char lcd_must_clear;
-static char lcd_left_shift;
static char init_in_progress;
static void (*lcd_write_cmd)(int);
@@ -426,59 +463,51 @@ static struct timer_list scan_timer;
MODULE_DESCRIPTION("Generic parallel port LCD/Keypad driver");
-static int parport = -1;
+static int parport = DEFAULT_PARPORT;
module_param(parport, int, 0000);
MODULE_PARM_DESC(parport, "Parallel port index (0=lpt1, 1=lpt2, ...)");
-static int lcd_height = -1;
+static int profile = DEFAULT_PROFILE;
+module_param(profile, int, 0000);
+MODULE_PARM_DESC(profile,
+ "1=16x2 old kp; 2=serial 16x2, new kp; 3=16x2 hantronix; "
+ "4=16x2 nexcom; default=40x2, old kp");
+
+static int keypad_type = NOT_SET;
+module_param(keypad_type, int, 0000);
+MODULE_PARM_DESC(keypad_type,
+ "Keypad type: 0=none, 1=old 6 keys, 2=new 6+1 keys, 3=nexcom 4 keys");
+
+static int lcd_type = NOT_SET;
+module_param(lcd_type, int, 0000);
+MODULE_PARM_DESC(lcd_type,
+ "LCD type: 0=none, 1=old //, 2=serial ks0074, 3=hantronix //, 4=nexcom //, 5=compiled-in");
+
+static int lcd_height = NOT_SET;
module_param(lcd_height, int, 0000);
MODULE_PARM_DESC(lcd_height, "Number of lines on the LCD");
-static int lcd_width = -1;
+static int lcd_width = NOT_SET;
module_param(lcd_width, int, 0000);
MODULE_PARM_DESC(lcd_width, "Number of columns on the LCD");
-static int lcd_bwidth = -1; /* internal buffer width (usually 40) */
+static int lcd_bwidth = NOT_SET; /* internal buffer width (usually 40) */
module_param(lcd_bwidth, int, 0000);
MODULE_PARM_DESC(lcd_bwidth, "Internal LCD line width (40)");
-static int lcd_hwidth = -1; /* hardware buffer width (usually 64) */
+static int lcd_hwidth = NOT_SET; /* hardware buffer width (usually 64) */
module_param(lcd_hwidth, int, 0000);
MODULE_PARM_DESC(lcd_hwidth, "LCD line hardware address (64)");
-static int lcd_enabled = -1;
-module_param(lcd_enabled, int, 0000);
-MODULE_PARM_DESC(lcd_enabled, "Deprecated option, use lcd_type instead");
-
-static int keypad_enabled = -1;
-module_param(keypad_enabled, int, 0000);
-MODULE_PARM_DESC(keypad_enabled, "Deprecated option, use keypad_type instead");
-
-static int lcd_type = -1;
-module_param(lcd_type, int, 0000);
-MODULE_PARM_DESC(lcd_type,
- "LCD type: 0=none, 1=old //, 2=serial ks0074, 3=hantronix //, 4=nexcom //, 5=compiled-in");
+static int lcd_charset = NOT_SET;
+module_param(lcd_charset, int, 0000);
+MODULE_PARM_DESC(lcd_charset, "LCD character set: 0=standard, 1=KS0074");
-static int lcd_proto = -1;
+static int lcd_proto = NOT_SET;
module_param(lcd_proto, int, 0000);
MODULE_PARM_DESC(lcd_proto,
"LCD communication: 0=parallel (//), 1=serial, 2=TI LCD Interface");
-static int lcd_charset = -1;
-module_param(lcd_charset, int, 0000);
-MODULE_PARM_DESC(lcd_charset, "LCD character set: 0=standard, 1=KS0074");
-
-static int keypad_type = -1;
-module_param(keypad_type, int, 0000);
-MODULE_PARM_DESC(keypad_type,
- "Keypad type: 0=none, 1=old 6 keys, 2=new 6+1 keys, 3=nexcom 4 keys");
-
-static int profile = DEFAULT_PROFILE;
-module_param(profile, int, 0000);
-MODULE_PARM_DESC(profile,
- "1=16x2 old kp; 2=serial 16x2, new kp; 3=16x2 hantronix; "
- "4=16x2 nexcom; default=40x2, old kp");
-
/*
* These are the parallel port pins the LCD control signals are connected to.
* Set this to 0 if the signal is not used. Set it to its opposite value
@@ -503,20 +532,31 @@ module_param(lcd_rw_pin, int, 0000);
MODULE_PARM_DESC(lcd_rw_pin,
"# of the // port pin connected to LCD 'RW' signal, with polarity (-17..17)");
-static int lcd_bl_pin = PIN_NOT_SET;
-module_param(lcd_bl_pin, int, 0000);
-MODULE_PARM_DESC(lcd_bl_pin,
- "# of the // port pin connected to LCD backlight, with polarity (-17..17)");
+static int lcd_cl_pin = PIN_NOT_SET;
+module_param(lcd_cl_pin, int, 0000);
+MODULE_PARM_DESC(lcd_cl_pin,
+ "# of the // port pin connected to serial LCD 'SCL' signal, with polarity (-17..17)");
static int lcd_da_pin = PIN_NOT_SET;
module_param(lcd_da_pin, int, 0000);
MODULE_PARM_DESC(lcd_da_pin,
"# of the // port pin connected to serial LCD 'SDA' signal, with polarity (-17..17)");
-static int lcd_cl_pin = PIN_NOT_SET;
-module_param(lcd_cl_pin, int, 0000);
-MODULE_PARM_DESC(lcd_cl_pin,
- "# of the // port pin connected to serial LCD 'SCL' signal, with polarity (-17..17)");
+static int lcd_bl_pin = PIN_NOT_SET;
+module_param(lcd_bl_pin, int, 0000);
+MODULE_PARM_DESC(lcd_bl_pin,
+ "# of the // port pin connected to LCD backlight, with polarity (-17..17)");
+
+/* Deprecated module parameters - consider not using them anymore */
+
+static int lcd_enabled = NOT_SET;
+module_param(lcd_enabled, int, 0000);
+MODULE_PARM_DESC(lcd_enabled, "Deprecated option, use lcd_type instead");
+
+static int keypad_enabled = NOT_SET;
+module_param(keypad_enabled, int, 0000);
+MODULE_PARM_DESC(keypad_enabled, "Deprecated option, use keypad_type instead");
+
static const unsigned char *lcd_char_conv;
@@ -748,7 +788,7 @@ static void lcd_send_serial(int byte)
/* turn the backlight on or off */
static void lcd_backlight(int on)
{
- if (lcd_bl_pin == PIN_NONE)
+ if (lcd.pins.bl == PIN_NONE)
return;
/* The backlight is activated by setting the AUTOFEED line to +5V */
@@ -847,23 +887,23 @@ static void lcd_write_data_tilcd(int data)
static void lcd_gotoxy(void)
{
lcd_write_cmd(0x80 /* set DDRAM address */
- | (lcd_addr_y ? lcd_hwidth : 0)
+ | (lcd.addr.y ? lcd.hwidth : 0)
/* we force the cursor to stay at the end of the
line if it wants to go farther */
- | ((lcd_addr_x < lcd_bwidth) ? lcd_addr_x &
- (lcd_hwidth - 1) : lcd_bwidth - 1));
+ | ((lcd.addr.x < lcd.bwidth) ? lcd.addr.x &
+ (lcd.hwidth - 1) : lcd.bwidth - 1));
}
static void lcd_print(char c)
{
- if (lcd_addr_x < lcd_bwidth) {
+ if (lcd.addr.x < lcd.bwidth) {
if (lcd_char_conv != NULL)
c = lcd_char_conv[(unsigned char)c];
lcd_write_data(c);
- lcd_addr_x++;
+ lcd.addr.x++;
}
/* prevents the cursor from wrapping onto the next line */
- if (lcd_addr_x == lcd_bwidth)
+ if (lcd.addr.x == lcd.bwidth)
lcd_gotoxy();
}
@@ -872,12 +912,12 @@ static void lcd_clear_fast_s(void)
{
int pos;
- lcd_addr_x = 0;
- lcd_addr_y = 0;
+ lcd.addr.x = 0;
+ lcd.addr.y = 0;
lcd_gotoxy();
spin_lock_irq(&pprt_lock);
- for (pos = 0; pos < lcd_height * lcd_hwidth; pos++) {
+ for (pos = 0; pos < lcd.height * lcd.hwidth; pos++) {
lcd_send_serial(0x5F); /* R/W=W, RS=1 */
lcd_send_serial(' ' & 0x0F);
lcd_send_serial((' ' >> 4) & 0x0F);
@@ -885,8 +925,8 @@ static void lcd_clear_fast_s(void)
}
spin_unlock_irq(&pprt_lock);
- lcd_addr_x = 0;
- lcd_addr_y = 0;
+ lcd.addr.x = 0;
+ lcd.addr.y = 0;
lcd_gotoxy();
}
@@ -895,12 +935,12 @@ static void lcd_clear_fast_p8(void)
{
int pos;
- lcd_addr_x = 0;
- lcd_addr_y = 0;
+ lcd.addr.x = 0;
+ lcd.addr.y = 0;
lcd_gotoxy();
spin_lock_irq(&pprt_lock);
- for (pos = 0; pos < lcd_height * lcd_hwidth; pos++) {
+ for (pos = 0; pos < lcd.height * lcd.hwidth; pos++) {
/* present the data to the data port */
w_dtr(pprt, ' ');
@@ -923,8 +963,8 @@ static void lcd_clear_fast_p8(void)
}
spin_unlock_irq(&pprt_lock);
- lcd_addr_x = 0;
- lcd_addr_y = 0;
+ lcd.addr.x = 0;
+ lcd.addr.y = 0;
lcd_gotoxy();
}
@@ -933,12 +973,12 @@ static void lcd_clear_fast_tilcd(void)
{
int pos;
- lcd_addr_x = 0;
- lcd_addr_y = 0;
+ lcd.addr.x = 0;
+ lcd.addr.y = 0;
lcd_gotoxy();
spin_lock_irq(&pprt_lock);
- for (pos = 0; pos < lcd_height * lcd_hwidth; pos++) {
+ for (pos = 0; pos < lcd.height * lcd.hwidth; pos++) {
/* present the data to the data port */
w_dtr(pprt, ' ');
udelay(60);
@@ -946,8 +986,8 @@ static void lcd_clear_fast_tilcd(void)
spin_unlock_irq(&pprt_lock);
- lcd_addr_x = 0;
- lcd_addr_y = 0;
+ lcd.addr.x = 0;
+ lcd.addr.y = 0;
lcd_gotoxy();
}
@@ -955,15 +995,15 @@ static void lcd_clear_fast_tilcd(void)
static void lcd_clear_display(void)
{
lcd_write_cmd(0x01); /* clear display */
- lcd_addr_x = 0;
- lcd_addr_y = 0;
+ lcd.addr.x = 0;
+ lcd.addr.y = 0;
/* we must wait a few milliseconds (15) */
long_sleep(15);
}
static void lcd_init_display(void)
{
- lcd_flags = ((lcd_height > 1) ? LCD_FLAG_N : 0)
+ lcd.flags = ((lcd.height > 1) ? LCD_FLAG_N : 0)
| LCD_FLAG_D | LCD_FLAG_C | LCD_FLAG_B;
long_sleep(20); /* wait 20 ms after power-up for the paranoid */
@@ -976,8 +1016,8 @@ static void lcd_init_display(void)
long_sleep(10);
lcd_write_cmd(0x30 /* set font height and lines number */
- | ((lcd_flags & LCD_FLAG_F) ? 4 : 0)
- | ((lcd_flags & LCD_FLAG_N) ? 8 : 0)
+ | ((lcd.flags & LCD_FLAG_F) ? 4 : 0)
+ | ((lcd.flags & LCD_FLAG_N) ? 8 : 0)
);
long_sleep(10);
@@ -985,12 +1025,12 @@ static void lcd_init_display(void)
long_sleep(10);
lcd_write_cmd(0x08 /* set display mode */
- | ((lcd_flags & LCD_FLAG_D) ? 4 : 0)
- | ((lcd_flags & LCD_FLAG_C) ? 2 : 0)
- | ((lcd_flags & LCD_FLAG_B) ? 1 : 0)
+ | ((lcd.flags & LCD_FLAG_D) ? 4 : 0)
+ | ((lcd.flags & LCD_FLAG_C) ? 2 : 0)
+ | ((lcd.flags & LCD_FLAG_B) ? 1 : 0)
);
- lcd_backlight((lcd_flags & LCD_FLAG_L) ? 1 : 0);
+ lcd_backlight((lcd.flags & LCD_FLAG_L) ? 1 : 0);
long_sleep(10);
@@ -1013,100 +1053,101 @@ static inline int handle_lcd_special_code(void)
int processed = 0;
- char *esc = lcd_escape + 2;
- int oldflags = lcd_flags;
+ char *esc = lcd.esc_seq.buf + 2;
+ int oldflags = lcd.flags;
/* check for display mode flags */
switch (*esc) {
case 'D': /* Display ON */
- lcd_flags |= LCD_FLAG_D;
+ lcd.flags |= LCD_FLAG_D;
processed = 1;
break;
case 'd': /* Display OFF */
- lcd_flags &= ~LCD_FLAG_D;
+ lcd.flags &= ~LCD_FLAG_D;
processed = 1;
break;
case 'C': /* Cursor ON */
- lcd_flags |= LCD_FLAG_C;
+ lcd.flags |= LCD_FLAG_C;
processed = 1;
break;
case 'c': /* Cursor OFF */
- lcd_flags &= ~LCD_FLAG_C;
+ lcd.flags &= ~LCD_FLAG_C;
processed = 1;
break;
case 'B': /* Blink ON */
- lcd_flags |= LCD_FLAG_B;
+ lcd.flags |= LCD_FLAG_B;
processed = 1;
break;
case 'b': /* Blink OFF */
- lcd_flags &= ~LCD_FLAG_B;
+ lcd.flags &= ~LCD_FLAG_B;
processed = 1;
break;
case '+': /* Back light ON */
- lcd_flags |= LCD_FLAG_L;
+ lcd.flags |= LCD_FLAG_L;
processed = 1;
break;
case '-': /* Back light OFF */
- lcd_flags &= ~LCD_FLAG_L;
+ lcd.flags &= ~LCD_FLAG_L;
processed = 1;
break;
case '*':
/* flash back light using the keypad timer */
if (scan_timer.function != NULL) {
- if (light_tempo == 0 && ((lcd_flags & LCD_FLAG_L) == 0))
+ if (lcd.light_tempo == 0
+ && ((lcd.flags & LCD_FLAG_L) == 0))
lcd_backlight(1);
- light_tempo = FLASH_LIGHT_TEMPO;
+ lcd.light_tempo = FLASH_LIGHT_TEMPO;
}
processed = 1;
break;
case 'f': /* Small Font */
- lcd_flags &= ~LCD_FLAG_F;
+ lcd.flags &= ~LCD_FLAG_F;
processed = 1;
break;
case 'F': /* Large Font */
- lcd_flags |= LCD_FLAG_F;
+ lcd.flags |= LCD_FLAG_F;
processed = 1;
break;
case 'n': /* One Line */
- lcd_flags &= ~LCD_FLAG_N;
+ lcd.flags &= ~LCD_FLAG_N;
processed = 1;
break;
case 'N': /* Two Lines */
- lcd_flags |= LCD_FLAG_N;
+ lcd.flags |= LCD_FLAG_N;
break;
case 'l': /* Shift Cursor Left */
- if (lcd_addr_x > 0) {
+ if (lcd.addr.x > 0) {
/* back one char if not at end of line */
- if (lcd_addr_x < lcd_bwidth)
+ if (lcd.addr.x < lcd.bwidth)
lcd_write_cmd(0x10);
- lcd_addr_x--;
+ lcd.addr.x--;
}
processed = 1;
break;
case 'r': /* shift cursor right */
- if (lcd_addr_x < lcd_width) {
+ if (lcd.addr.x < lcd.width) {
/* allow the cursor to pass the end of the line */
- if (lcd_addr_x <
- (lcd_bwidth - 1))
+ if (lcd.addr.x <
+ (lcd.bwidth - 1))
lcd_write_cmd(0x14);
- lcd_addr_x++;
+ lcd.addr.x++;
}
processed = 1;
break;
case 'L': /* shift display left */
- lcd_left_shift++;
+ lcd.left_shift++;
lcd_write_cmd(0x18);
processed = 1;
break;
case 'R': /* shift display right */
- lcd_left_shift--;
+ lcd.left_shift--;
lcd_write_cmd(0x1C);
processed = 1;
break;
case 'k': { /* kill end of line */
int x;
- for (x = lcd_addr_x; x < lcd_bwidth; x++)
+ for (x = lcd.addr.x; x < lcd.bwidth; x++)
lcd_write_data(' ');
/* restore cursor position */
@@ -1116,7 +1157,7 @@ static inline int handle_lcd_special_code(void)
}
case 'I': /* reinitialize display */
lcd_init_display();
- lcd_left_shift = 0;
+ lcd.left_shift = 0;
processed = 1;
break;
case 'G': {
@@ -1187,11 +1228,11 @@ static inline int handle_lcd_special_code(void)
while (*esc) {
if (*esc == 'x') {
esc++;
- if (kstrtoul(esc, 10, &lcd_addr_x) < 0)
+ if (kstrtoul(esc, 10, &lcd.addr.x) < 0)
break;
} else if (*esc == 'y') {
esc++;
- if (kstrtoul(esc, 10, &lcd_addr_y) < 0)
+ if (kstrtoul(esc, 10, &lcd.addr.y) < 0)
break;
} else {
break;
@@ -1204,25 +1245,25 @@ static inline int handle_lcd_special_code(void)
}
/* Check whether one flag was changed */
- if (oldflags != lcd_flags) {
+ if (oldflags != lcd.flags) {
/* check whether one of B,C,D flags were changed */
- if ((oldflags ^ lcd_flags) &
+ if ((oldflags ^ lcd.flags) &
(LCD_FLAG_B | LCD_FLAG_C | LCD_FLAG_D))
/* set display mode */
lcd_write_cmd(0x08
- | ((lcd_flags & LCD_FLAG_D) ? 4 : 0)
- | ((lcd_flags & LCD_FLAG_C) ? 2 : 0)
- | ((lcd_flags & LCD_FLAG_B) ? 1 : 0));
+ | ((lcd.flags & LCD_FLAG_D) ? 4 : 0)
+ | ((lcd.flags & LCD_FLAG_C) ? 2 : 0)
+ | ((lcd.flags & LCD_FLAG_B) ? 1 : 0));
/* check whether one of F,N flags was changed */
- else if ((oldflags ^ lcd_flags) & (LCD_FLAG_F | LCD_FLAG_N))
+ else if ((oldflags ^ lcd.flags) & (LCD_FLAG_F | LCD_FLAG_N))
lcd_write_cmd(0x30
- | ((lcd_flags & LCD_FLAG_F) ? 4 : 0)
- | ((lcd_flags & LCD_FLAG_N) ? 8 : 0));
+ | ((lcd.flags & LCD_FLAG_F) ? 4 : 0)
+ | ((lcd.flags & LCD_FLAG_N) ? 8 : 0));
/* check whether L flag was changed */
- else if ((oldflags ^ lcd_flags) & (LCD_FLAG_L)) {
- if (lcd_flags & (LCD_FLAG_L))
+ else if ((oldflags ^ lcd.flags) & (LCD_FLAG_L)) {
+ if (lcd.flags & (LCD_FLAG_L))
lcd_backlight(1);
- else if (light_tempo == 0)
+ else if (lcd.light_tempo == 0)
/* switch off the light only when the tempo
lighting is gone */
lcd_backlight(0);
@@ -1235,29 +1276,29 @@ static inline int handle_lcd_special_code(void)
static void lcd_write_char(char c)
{
/* first, we'll test if we're in escape mode */
- if ((c != '\n') && lcd_escape_len >= 0) {
+ if ((c != '\n') && lcd.esc_seq.len >= 0) {
/* yes, let's add this char to the buffer */
- lcd_escape[lcd_escape_len++] = c;
- lcd_escape[lcd_escape_len] = 0;
+ lcd.esc_seq.buf[lcd.esc_seq.len++] = c;
+ lcd.esc_seq.buf[lcd.esc_seq.len] = 0;
} else {
/* aborts any previous escape sequence */
- lcd_escape_len = -1;
+ lcd.esc_seq.len = -1;
switch (c) {
case LCD_ESCAPE_CHAR:
/* start of an escape sequence */
- lcd_escape_len = 0;
- lcd_escape[lcd_escape_len] = 0;
+ lcd.esc_seq.len = 0;
+ lcd.esc_seq.buf[lcd.esc_seq.len] = 0;
break;
case '\b':
/* go back one char and clear it */
- if (lcd_addr_x > 0) {
+ if (lcd.addr.x > 0) {
/* check if we're not at the
end of the line */
- if (lcd_addr_x < lcd_bwidth)
+ if (lcd.addr.x < lcd.bwidth)
/* back one char */
lcd_write_cmd(0x10);
- lcd_addr_x--;
+ lcd.addr.x--;
}
/* replace with a space */
lcd_write_data(' ');
@@ -1271,15 +1312,15 @@ static void lcd_write_char(char c)
case '\n':
/* flush the remainder of the current line and
go to the beginning of the next line */
- for (; lcd_addr_x < lcd_bwidth; lcd_addr_x++)
+ for (; lcd.addr.x < lcd.bwidth; lcd.addr.x++)
lcd_write_data(' ');
- lcd_addr_x = 0;
- lcd_addr_y = (lcd_addr_y + 1) % lcd_height;
+ lcd.addr.x = 0;
+ lcd.addr.y = (lcd.addr.y + 1) % lcd.height;
lcd_gotoxy();
break;
case '\r':
/* go to the beginning of the same line */
- lcd_addr_x = 0;
+ lcd.addr.x = 0;
lcd_gotoxy();
break;
case '\t':
@@ -1295,32 +1336,32 @@ static void lcd_write_char(char c)
/* now we'll see if we're in an escape mode and if the current
escape sequence can be understood. */
- if (lcd_escape_len >= 2) {
+ if (lcd.esc_seq.len >= 2) {
int processed = 0;
- if (!strcmp(lcd_escape, "[2J")) {
+ if (!strcmp(lcd.esc_seq.buf, "[2J")) {
/* clear the display */
lcd_clear_fast();
processed = 1;
- } else if (!strcmp(lcd_escape, "[H")) {
+ } else if (!strcmp(lcd.esc_seq.buf, "[H")) {
/* cursor to home */
- lcd_addr_x = 0;
- lcd_addr_y = 0;
+ lcd.addr.x = 0;
+ lcd.addr.y = 0;
lcd_gotoxy();
processed = 1;
}
/* codes starting with ^[[L */
- else if ((lcd_escape_len >= 3) &&
- (lcd_escape[0] == '[') &&
- (lcd_escape[1] == 'L')) {
+ else if ((lcd.esc_seq.len >= 3) &&
+ (lcd.esc_seq.buf[0] == '[') &&
+ (lcd.esc_seq.buf[1] == 'L')) {
processed = handle_lcd_special_code();
}
/* LCD special escape codes */
/* flush the escape sequence if it's been processed
or if it is getting too long. */
- if (processed || (lcd_escape_len >= LCD_ESCAPE_LEN))
- lcd_escape_len = -1;
+ if (processed || (lcd.esc_seq.len >= LCD_ESCAPE_LEN))
+ lcd.esc_seq.len = -1;
} /* escape codes */
}
@@ -1347,23 +1388,22 @@ static ssize_t lcd_write(struct file *file,
static int lcd_open(struct inode *inode, struct file *file)
{
- if (lcd_open_cnt)
+ if (!atomic_dec_and_test(&lcd_available))
return -EBUSY; /* open only once at a time */
if (file->f_mode & FMODE_READ) /* device is write-only */
return -EPERM;
- if (lcd_must_clear) {
+ if (lcd.must_clear) {
lcd_clear_display();
- lcd_must_clear = 0;
+ lcd.must_clear = false;
}
- lcd_open_cnt++;
return nonseekable_open(inode, file);
}
static int lcd_release(struct inode *inode, struct file *file)
{
- lcd_open_cnt--;
+ atomic_inc(&lcd_available);
return 0;
}
@@ -1375,9 +1415,9 @@ static const struct file_operations lcd_fops = {
};
static struct miscdevice lcd_dev = {
- LCD_MINOR,
- "lcd",
- &lcd_fops
+ .minor = LCD_MINOR,
+ .name = "lcd",
+ .fops = &lcd_fops,
};
/* public function usable from the kernel for any purpose */
@@ -1386,7 +1426,7 @@ static void panel_lcd_print(const char *s)
const char *tmp = s;
int count = strlen(s);
- if (lcd_enabled && lcd_initialized) {
+ if (lcd.enabled && lcd.initialized) {
for (; count-- > 0; tmp++) {
if (!in_interrupt() && (((count + 1) & 0x1f) == 0))
/* let's be a little nice with other processes
@@ -1401,183 +1441,173 @@ static void panel_lcd_print(const char *s)
/* initialize the LCD driver */
static void lcd_init(void)
{
- switch (lcd_type) {
+ switch (selected_lcd_type) {
case LCD_TYPE_OLD:
/* parallel mode, 8 bits */
- if (lcd_proto < 0)
- lcd_proto = LCD_PROTO_PARALLEL;
- if (lcd_charset < 0)
- lcd_charset = LCD_CHARSET_NORMAL;
- if (lcd_e_pin == PIN_NOT_SET)
- lcd_e_pin = PIN_STROBE;
- if (lcd_rs_pin == PIN_NOT_SET)
- lcd_rs_pin = PIN_AUTOLF;
-
- if (lcd_width < 0)
- lcd_width = 40;
- if (lcd_bwidth < 0)
- lcd_bwidth = 40;
- if (lcd_hwidth < 0)
- lcd_hwidth = 64;
- if (lcd_height < 0)
- lcd_height = 2;
+ lcd.proto = LCD_PROTO_PARALLEL;
+ lcd.charset = LCD_CHARSET_NORMAL;
+ lcd.pins.e = PIN_STROBE;
+ lcd.pins.rs = PIN_AUTOLF;
+
+ lcd.width = 40;
+ lcd.bwidth = 40;
+ lcd.hwidth = 64;
+ lcd.height = 2;
break;
case LCD_TYPE_KS0074:
/* serial mode, ks0074 */
- if (lcd_proto < 0)
- lcd_proto = LCD_PROTO_SERIAL;
- if (lcd_charset < 0)
- lcd_charset = LCD_CHARSET_KS0074;
- if (lcd_bl_pin == PIN_NOT_SET)
- lcd_bl_pin = PIN_AUTOLF;
- if (lcd_cl_pin == PIN_NOT_SET)
- lcd_cl_pin = PIN_STROBE;
- if (lcd_da_pin == PIN_NOT_SET)
- lcd_da_pin = PIN_D0;
-
- if (lcd_width < 0)
- lcd_width = 16;
- if (lcd_bwidth < 0)
- lcd_bwidth = 40;
- if (lcd_hwidth < 0)
- lcd_hwidth = 16;
- if (lcd_height < 0)
- lcd_height = 2;
+ lcd.proto = LCD_PROTO_SERIAL;
+ lcd.charset = LCD_CHARSET_KS0074;
+ lcd.pins.bl = PIN_AUTOLF;
+ lcd.pins.cl = PIN_STROBE;
+ lcd.pins.da = PIN_D0;
+
+ lcd.width = 16;
+ lcd.bwidth = 40;
+ lcd.hwidth = 16;
+ lcd.height = 2;
break;
case LCD_TYPE_NEXCOM:
/* parallel mode, 8 bits, generic */
- if (lcd_proto < 0)
- lcd_proto = LCD_PROTO_PARALLEL;
- if (lcd_charset < 0)
- lcd_charset = LCD_CHARSET_NORMAL;
- if (lcd_e_pin == PIN_NOT_SET)
- lcd_e_pin = PIN_AUTOLF;
- if (lcd_rs_pin == PIN_NOT_SET)
- lcd_rs_pin = PIN_SELECP;
- if (lcd_rw_pin == PIN_NOT_SET)
- lcd_rw_pin = PIN_INITP;
-
- if (lcd_width < 0)
- lcd_width = 16;
- if (lcd_bwidth < 0)
- lcd_bwidth = 40;
- if (lcd_hwidth < 0)
- lcd_hwidth = 64;
- if (lcd_height < 0)
- lcd_height = 2;
+ lcd.proto = LCD_PROTO_PARALLEL;
+ lcd.charset = LCD_CHARSET_NORMAL;
+ lcd.pins.e = PIN_AUTOLF;
+ lcd.pins.rs = PIN_SELECP;
+ lcd.pins.rw = PIN_INITP;
+
+ lcd.width = 16;
+ lcd.bwidth = 40;
+ lcd.hwidth = 64;
+ lcd.height = 2;
break;
case LCD_TYPE_CUSTOM:
/* customer-defined */
- if (lcd_proto < 0)
- lcd_proto = DEFAULT_LCD_PROTO;
- if (lcd_charset < 0)
- lcd_charset = DEFAULT_LCD_CHARSET;
+ lcd.proto = DEFAULT_LCD_PROTO;
+ lcd.charset = DEFAULT_LCD_CHARSET;
/* default geometry will be set later */
break;
case LCD_TYPE_HANTRONIX:
/* parallel mode, 8 bits, hantronix-like */
default:
- if (lcd_proto < 0)
- lcd_proto = LCD_PROTO_PARALLEL;
- if (lcd_charset < 0)
- lcd_charset = LCD_CHARSET_NORMAL;
- if (lcd_e_pin == PIN_NOT_SET)
- lcd_e_pin = PIN_STROBE;
- if (lcd_rs_pin == PIN_NOT_SET)
- lcd_rs_pin = PIN_SELECP;
-
- if (lcd_width < 0)
- lcd_width = 16;
- if (lcd_bwidth < 0)
- lcd_bwidth = 40;
- if (lcd_hwidth < 0)
- lcd_hwidth = 64;
- if (lcd_height < 0)
- lcd_height = 2;
+ lcd.proto = LCD_PROTO_PARALLEL;
+ lcd.charset = LCD_CHARSET_NORMAL;
+ lcd.pins.e = PIN_STROBE;
+ lcd.pins.rs = PIN_SELECP;
+
+ lcd.width = 16;
+ lcd.bwidth = 40;
+ lcd.hwidth = 64;
+ lcd.height = 2;
break;
}
+ /* Overwrite with module params set on loading */
+ if (lcd_height != NOT_SET)
+ lcd.height = lcd_height;
+ if (lcd_width != NOT_SET)
+ lcd.width = lcd_width;
+ if (lcd_bwidth != NOT_SET)
+ lcd.bwidth = lcd_bwidth;
+ if (lcd_hwidth != NOT_SET)
+ lcd.hwidth = lcd_hwidth;
+ if (lcd_charset != NOT_SET)
+ lcd.charset = lcd_charset;
+ if (lcd_proto != NOT_SET)
+ lcd.proto = lcd_proto;
+ if (lcd_e_pin != PIN_NOT_SET)
+ lcd.pins.e = lcd_e_pin;
+ if (lcd_rs_pin != PIN_NOT_SET)
+ lcd.pins.rs = lcd_rs_pin;
+ if (lcd_rw_pin != PIN_NOT_SET)
+ lcd.pins.rw = lcd_rw_pin;
+ if (lcd_cl_pin != PIN_NOT_SET)
+ lcd.pins.cl = lcd_cl_pin;
+ if (lcd_da_pin != PIN_NOT_SET)
+ lcd.pins.da = lcd_da_pin;
+ if (lcd_bl_pin != PIN_NOT_SET)
+ lcd.pins.bl = lcd_bl_pin;
+
/* this is used to catch wrong and default values */
- if (lcd_width <= 0)
- lcd_width = DEFAULT_LCD_WIDTH;
- if (lcd_bwidth <= 0)
- lcd_bwidth = DEFAULT_LCD_BWIDTH;
- if (lcd_hwidth <= 0)
- lcd_hwidth = DEFAULT_LCD_HWIDTH;
- if (lcd_height <= 0)
- lcd_height = DEFAULT_LCD_HEIGHT;
-
- if (lcd_proto == LCD_PROTO_SERIAL) { /* SERIAL */
+ if (lcd.width <= 0)
+ lcd.width = DEFAULT_LCD_WIDTH;
+ if (lcd.bwidth <= 0)
+ lcd.bwidth = DEFAULT_LCD_BWIDTH;
+ if (lcd.hwidth <= 0)
+ lcd.hwidth = DEFAULT_LCD_HWIDTH;
+ if (lcd.height <= 0)
+ lcd.height = DEFAULT_LCD_HEIGHT;
+
+ if (lcd.proto == LCD_PROTO_SERIAL) { /* SERIAL */
lcd_write_cmd = lcd_write_cmd_s;
lcd_write_data = lcd_write_data_s;
lcd_clear_fast = lcd_clear_fast_s;
- if (lcd_cl_pin == PIN_NOT_SET)
- lcd_cl_pin = DEFAULT_LCD_PIN_SCL;
- if (lcd_da_pin == PIN_NOT_SET)
- lcd_da_pin = DEFAULT_LCD_PIN_SDA;
+ if (lcd.pins.cl == PIN_NOT_SET)
+ lcd.pins.cl = DEFAULT_LCD_PIN_SCL;
+ if (lcd.pins.da == PIN_NOT_SET)
+ lcd.pins.da = DEFAULT_LCD_PIN_SDA;
- } else if (lcd_proto == LCD_PROTO_PARALLEL) { /* PARALLEL */
+ } else if (lcd.proto == LCD_PROTO_PARALLEL) { /* PARALLEL */
lcd_write_cmd = lcd_write_cmd_p8;
lcd_write_data = lcd_write_data_p8;
lcd_clear_fast = lcd_clear_fast_p8;
- if (lcd_e_pin == PIN_NOT_SET)
- lcd_e_pin = DEFAULT_LCD_PIN_E;
- if (lcd_rs_pin == PIN_NOT_SET)
- lcd_rs_pin = DEFAULT_LCD_PIN_RS;
- if (lcd_rw_pin == PIN_NOT_SET)
- lcd_rw_pin = DEFAULT_LCD_PIN_RW;
+ if (lcd.pins.e == PIN_NOT_SET)
+ lcd.pins.e = DEFAULT_LCD_PIN_E;
+ if (lcd.pins.rs == PIN_NOT_SET)
+ lcd.pins.rs = DEFAULT_LCD_PIN_RS;
+ if (lcd.pins.rw == PIN_NOT_SET)
+ lcd.pins.rw = DEFAULT_LCD_PIN_RW;
} else {
lcd_write_cmd = lcd_write_cmd_tilcd;
lcd_write_data = lcd_write_data_tilcd;
lcd_clear_fast = lcd_clear_fast_tilcd;
}
- if (lcd_bl_pin == PIN_NOT_SET)
- lcd_bl_pin = DEFAULT_LCD_PIN_BL;
-
- if (lcd_e_pin == PIN_NOT_SET)
- lcd_e_pin = PIN_NONE;
- if (lcd_rs_pin == PIN_NOT_SET)
- lcd_rs_pin = PIN_NONE;
- if (lcd_rw_pin == PIN_NOT_SET)
- lcd_rw_pin = PIN_NONE;
- if (lcd_bl_pin == PIN_NOT_SET)
- lcd_bl_pin = PIN_NONE;
- if (lcd_cl_pin == PIN_NOT_SET)
- lcd_cl_pin = PIN_NONE;
- if (lcd_da_pin == PIN_NOT_SET)
- lcd_da_pin = PIN_NONE;
-
- if (lcd_charset < 0)
- lcd_charset = DEFAULT_LCD_CHARSET;
-
- if (lcd_charset == LCD_CHARSET_KS0074)
+ if (lcd.pins.bl == PIN_NOT_SET)
+ lcd.pins.bl = DEFAULT_LCD_PIN_BL;
+
+ if (lcd.pins.e == PIN_NOT_SET)
+ lcd.pins.e = PIN_NONE;
+ if (lcd.pins.rs == PIN_NOT_SET)
+ lcd.pins.rs = PIN_NONE;
+ if (lcd.pins.rw == PIN_NOT_SET)
+ lcd.pins.rw = PIN_NONE;
+ if (lcd.pins.bl == PIN_NOT_SET)
+ lcd.pins.bl = PIN_NONE;
+ if (lcd.pins.cl == PIN_NOT_SET)
+ lcd.pins.cl = PIN_NONE;
+ if (lcd.pins.da == PIN_NOT_SET)
+ lcd.pins.da = PIN_NONE;
+
+ if (lcd.charset == NOT_SET)
+ lcd.charset = DEFAULT_LCD_CHARSET;
+
+ if (lcd.charset == LCD_CHARSET_KS0074)
lcd_char_conv = lcd_char_conv_ks0074;
else
lcd_char_conv = NULL;
- if (lcd_bl_pin != PIN_NONE)
+ if (lcd.pins.bl != PIN_NONE)
init_scan_timer();
- pin_to_bits(lcd_e_pin, lcd_bits[LCD_PORT_D][LCD_BIT_E],
+ pin_to_bits(lcd.pins.e, lcd_bits[LCD_PORT_D][LCD_BIT_E],
lcd_bits[LCD_PORT_C][LCD_BIT_E]);
- pin_to_bits(lcd_rs_pin, lcd_bits[LCD_PORT_D][LCD_BIT_RS],
+ pin_to_bits(lcd.pins.rs, lcd_bits[LCD_PORT_D][LCD_BIT_RS],
lcd_bits[LCD_PORT_C][LCD_BIT_RS]);
- pin_to_bits(lcd_rw_pin, lcd_bits[LCD_PORT_D][LCD_BIT_RW],
+ pin_to_bits(lcd.pins.rw, lcd_bits[LCD_PORT_D][LCD_BIT_RW],
lcd_bits[LCD_PORT_C][LCD_BIT_RW]);
- pin_to_bits(lcd_bl_pin, lcd_bits[LCD_PORT_D][LCD_BIT_BL],
+ pin_to_bits(lcd.pins.bl, lcd_bits[LCD_PORT_D][LCD_BIT_BL],
lcd_bits[LCD_PORT_C][LCD_BIT_BL]);
- pin_to_bits(lcd_cl_pin, lcd_bits[LCD_PORT_D][LCD_BIT_CL],
+ pin_to_bits(lcd.pins.cl, lcd_bits[LCD_PORT_D][LCD_BIT_CL],
lcd_bits[LCD_PORT_C][LCD_BIT_CL]);
- pin_to_bits(lcd_da_pin, lcd_bits[LCD_PORT_D][LCD_BIT_DA],
+ pin_to_bits(lcd.pins.da, lcd_bits[LCD_PORT_D][LCD_BIT_DA],
lcd_bits[LCD_PORT_C][LCD_BIT_DA]);
/* before this line, we must NOT send anything to the display.
* Since lcd_init_display() needs to write data, we have to
* enable mark the LCD initialized just before. */
- lcd_initialized = 1;
+ lcd.initialized = true;
lcd_init_display();
/* display a short message */
@@ -1589,10 +1619,10 @@ static void lcd_init(void)
panel_lcd_print("\x1b[Lc\x1b[Lb\x1b[L*Linux-" UTS_RELEASE "\nPanel-"
PANEL_VERSION);
#endif
- lcd_addr_x = 0;
- lcd_addr_y = 0;
+ lcd.addr.x = 0;
+ lcd.addr.y = 0;
/* clear the display on the next device opening */
- lcd_must_clear = 1;
+ lcd.must_clear = true;
lcd_gotoxy();
}
@@ -1627,20 +1657,19 @@ static ssize_t keypad_read(struct file *file,
static int keypad_open(struct inode *inode, struct file *file)
{
- if (keypad_open_cnt)
+ if (!atomic_dec_and_test(&keypad_available))
return -EBUSY; /* open only once at a time */
if (file->f_mode & FMODE_WRITE) /* device is read-only */
return -EPERM;
keypad_buflen = 0; /* flush the buffer on opening */
- keypad_open_cnt++;
return 0;
}
static int keypad_release(struct inode *inode, struct file *file)
{
- keypad_open_cnt--;
+ atomic_inc(&keypad_available);
return 0;
}
@@ -1652,9 +1681,9 @@ static const struct file_operations keypad_fops = {
};
static struct miscdevice keypad_dev = {
- KEYPAD_MINOR,
- "keypad",
- &keypad_fops
+ .minor = KEYPAD_MINOR,
+ .name = "keypad",
+ .fops = &keypad_fops,
};
static void keypad_send_key(const char *string, int max_len)
@@ -1663,7 +1692,7 @@ static void keypad_send_key(const char *string, int max_len)
return;
/* send the key to the device only if a process is attached to it. */
- if (keypad_open_cnt > 0) {
+ if (!atomic_read(&keypad_available)) {
while (max_len-- && keypad_buflen < KEYPAD_BUFFER && *string) {
keypad_buffer[(keypad_start + keypad_buflen++) %
KEYPAD_BUFFER] = *string++;
@@ -1917,7 +1946,7 @@ static void panel_process_inputs(void)
static void panel_scan_timer(void)
{
- if (keypad_enabled && keypad_initialized) {
+ if (keypad.enabled && keypad_initialized) {
if (spin_trylock_irq(&pprt_lock)) {
phys_scan_contacts();
@@ -1929,14 +1958,16 @@ static void panel_scan_timer(void)
panel_process_inputs();
}
- if (lcd_enabled && lcd_initialized) {
+ if (lcd.enabled && lcd.initialized) {
if (keypressed) {
- if (light_tempo == 0 && ((lcd_flags & LCD_FLAG_L) == 0))
+ if (lcd.light_tempo == 0
+ && ((lcd.flags & LCD_FLAG_L) == 0))
lcd_backlight(1);
- light_tempo = FLASH_LIGHT_TEMPO;
- } else if (light_tempo > 0) {
- light_tempo--;
- if (light_tempo == 0 && ((lcd_flags & LCD_FLAG_L) == 0))
+ lcd.light_tempo = FLASH_LIGHT_TEMPO;
+ } else if (lcd.light_tempo > 0) {
+ lcd.light_tempo--;
+ if (lcd.light_tempo == 0
+ && ((lcd.flags & LCD_FLAG_L) == 0))
lcd_backlight(0);
}
}
@@ -2108,7 +2139,7 @@ static void keypad_init(void)
static int panel_notify_sys(struct notifier_block *this, unsigned long code,
void *unused)
{
- if (lcd_enabled && lcd_initialized) {
+ if (lcd.enabled && lcd.initialized) {
switch (code) {
case SYS_DOWN:
panel_lcd_print
@@ -2164,13 +2195,13 @@ static void panel_attach(struct parport *port)
/* must init LCD first, just in case an IRQ from the keypad is
* generated at keypad init
*/
- if (lcd_enabled) {
+ if (lcd.enabled) {
lcd_init();
if (misc_register(&lcd_dev))
goto err_unreg_device;
}
- if (keypad_enabled) {
+ if (keypad.enabled) {
keypad_init();
if (misc_register(&keypad_dev))
goto err_lcd_unreg;
@@ -2178,7 +2209,7 @@ static void panel_attach(struct parport *port)
return;
err_lcd_unreg:
- if (lcd_enabled)
+ if (lcd.enabled)
misc_deregister(&lcd_dev);
err_unreg_device:
parport_unregister_device(pprt);
@@ -2196,14 +2227,14 @@ static void panel_detach(struct parport *port)
return;
}
- if (keypad_enabled && keypad_initialized) {
+ if (keypad.enabled && keypad_initialized) {
misc_deregister(&keypad_dev);
keypad_initialized = 0;
}
- if (lcd_enabled && lcd_initialized) {
+ if (lcd.enabled && lcd.initialized) {
misc_deregister(&lcd_dev);
- lcd_initialized = 0;
+ lcd.initialized = false;
}
parport_release(pprt);
@@ -2218,72 +2249,89 @@ static struct parport_driver panel_driver = {
};
/* init function */
-static int panel_init(void)
+static int __init panel_init_module(void)
{
- /* for backwards compatibility */
- if (keypad_type < 0)
- keypad_type = keypad_enabled;
-
- if (lcd_type < 0)
- lcd_type = lcd_enabled;
-
- if (parport < 0)
- parport = DEFAULT_PARPORT;
+ int selected_keypad_type = NOT_SET;
/* take care of an eventual profile */
switch (profile) {
case PANEL_PROFILE_CUSTOM:
/* custom profile */
- if (keypad_type < 0)
- keypad_type = DEFAULT_KEYPAD;
- if (lcd_type < 0)
- lcd_type = DEFAULT_LCD;
+ selected_keypad_type = DEFAULT_KEYPAD_TYPE;
+ selected_lcd_type = DEFAULT_LCD_TYPE;
break;
case PANEL_PROFILE_OLD:
/* 8 bits, 2*16, old keypad */
- if (keypad_type < 0)
- keypad_type = KEYPAD_TYPE_OLD;
- if (lcd_type < 0)
- lcd_type = LCD_TYPE_OLD;
- if (lcd_width < 0)
+ selected_keypad_type = KEYPAD_TYPE_OLD;
+ selected_lcd_type = LCD_TYPE_OLD;
+
+ /* TODO: This two are a little hacky, sort it out later */
+ if (lcd_width == NOT_SET)
lcd_width = 16;
- if (lcd_hwidth < 0)
+ if (lcd_hwidth == NOT_SET)
lcd_hwidth = 16;
break;
case PANEL_PROFILE_NEW:
/* serial, 2*16, new keypad */
- if (keypad_type < 0)
- keypad_type = KEYPAD_TYPE_NEW;
- if (lcd_type < 0)
- lcd_type = LCD_TYPE_KS0074;
+ selected_keypad_type = KEYPAD_TYPE_NEW;
+ selected_lcd_type = LCD_TYPE_KS0074;
break;
case PANEL_PROFILE_HANTRONIX:
/* 8 bits, 2*16 hantronix-like, no keypad */
- if (keypad_type < 0)
- keypad_type = KEYPAD_TYPE_NONE;
- if (lcd_type < 0)
- lcd_type = LCD_TYPE_HANTRONIX;
+ selected_keypad_type = KEYPAD_TYPE_NONE;
+ selected_lcd_type = LCD_TYPE_HANTRONIX;
break;
case PANEL_PROFILE_NEXCOM:
/* generic 8 bits, 2*16, nexcom keypad, eg. Nexcom. */
- if (keypad_type < 0)
- keypad_type = KEYPAD_TYPE_NEXCOM;
- if (lcd_type < 0)
- lcd_type = LCD_TYPE_NEXCOM;
+ selected_keypad_type = KEYPAD_TYPE_NEXCOM;
+ selected_lcd_type = LCD_TYPE_NEXCOM;
break;
case PANEL_PROFILE_LARGE:
/* 8 bits, 2*40, old keypad */
- if (keypad_type < 0)
- keypad_type = KEYPAD_TYPE_OLD;
- if (lcd_type < 0)
- lcd_type = LCD_TYPE_OLD;
+ selected_keypad_type = KEYPAD_TYPE_OLD;
+ selected_lcd_type = LCD_TYPE_OLD;
break;
}
- lcd_enabled = (lcd_type > 0);
- keypad_enabled = (keypad_type > 0);
+ /*
+ * Init lcd struct with load-time values to preserve exact current
+ * functionality (at least for now).
+ */
+ lcd.height = lcd_height;
+ lcd.width = lcd_width;
+ lcd.bwidth = lcd_bwidth;
+ lcd.hwidth = lcd_hwidth;
+ lcd.charset = lcd_charset;
+ lcd.proto = lcd_proto;
+ lcd.pins.e = lcd_e_pin;
+ lcd.pins.rs = lcd_rs_pin;
+ lcd.pins.rw = lcd_rw_pin;
+ lcd.pins.cl = lcd_cl_pin;
+ lcd.pins.da = lcd_da_pin;
+ lcd.pins.bl = lcd_bl_pin;
+
+ /* Leave it for now, just in case */
+ lcd.esc_seq.len = -1;
+
+ /*
+ * Overwrite selection with module param values (both keypad and lcd),
+ * where the deprecated params have lower prio.
+ */
+ if (keypad_enabled != NOT_SET)
+ selected_keypad_type = keypad_enabled;
+ if (keypad_type != NOT_SET)
+ selected_keypad_type = keypad_type;
- switch (keypad_type) {
+ keypad.enabled = (selected_keypad_type > 0);
+
+ if (lcd_enabled != NOT_SET)
+ selected_lcd_type = lcd_enabled;
+ if (lcd_type != NOT_SET)
+ selected_lcd_type = lcd_type;
+
+ lcd.enabled = (selected_lcd_type > 0);
+
+ switch (selected_keypad_type) {
case KEYPAD_TYPE_OLD:
keypad_profile = old_keypad_profile;
break;
@@ -2306,7 +2354,7 @@ static int panel_init(void)
return -EIO;
}
- if (!lcd_enabled && !keypad_enabled) {
+ if (!lcd.enabled && !keypad.enabled) {
/* no device enabled, let's release the parport */
if (pprt) {
parport_release(pprt);
@@ -2333,11 +2381,6 @@ static int panel_init(void)
return 0;
}
-static int __init panel_init_module(void)
-{
- return panel_init();
-}
-
static void __exit panel_cleanup_module(void)
{
unregister_reboot_notifier(&panel_notifier);
@@ -2346,16 +2389,16 @@ static void __exit panel_cleanup_module(void)
del_timer_sync(&scan_timer);
if (pprt != NULL) {
- if (keypad_enabled) {
+ if (keypad.enabled) {
misc_deregister(&keypad_dev);
keypad_initialized = 0;
}
- if (lcd_enabled) {
+ if (lcd.enabled) {
panel_lcd_print("\x0cLCD driver " PANEL_VERSION
"\nunloaded.\x1b[Lc\x1b[Lb\x1b[L-");
misc_deregister(&lcd_dev);
- lcd_initialized = 0;
+ lcd.initialized = false;
}
/* TODO: free all input signals */
diff --git a/drivers/staging/rtl8188eu/core/rtw_ap.c b/drivers/staging/rtl8188eu/core/rtw_ap.c
index 9224e029ef2..d61842ed673 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ap.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ap.c
@@ -888,7 +888,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
pbss_network->Rssi = 0;
- memcpy(pbss_network->MacAddress, myid(&(padapter->eeprompriv)), ETH_ALEN);
+ ether_addr_copy(pbss_network->MacAddress, myid(&(padapter->eeprompriv)));
/* beacon interval */
p = rtw_get_beacon_interval_from_ie(ie);/* 8: TimeStamp, 2: Beacon Interval 2:Capability */
@@ -1164,7 +1164,7 @@ int rtw_acl_add_sta(struct adapter *padapter, u8 *addr)
if (!paclnode->valid) {
INIT_LIST_HEAD(&paclnode->list);
- memcpy(paclnode->addr, addr, ETH_ALEN);
+ ether_addr_copy(paclnode->addr, addr);
paclnode->valid = true;
@@ -1186,7 +1186,6 @@ int rtw_acl_add_sta(struct adapter *padapter, u8 *addr)
int rtw_acl_remove_sta(struct adapter *padapter, u8 *addr)
{
struct list_head *plist, *phead;
- int ret = 0;
struct rtw_wlan_acl_node *paclnode;
struct sta_priv *pstapriv = &padapter->stapriv;
struct wlan_acl_pool *pacl_list = &pstapriv->acl_list;
@@ -1217,7 +1216,7 @@ int rtw_acl_remove_sta(struct adapter *padapter, u8 *addr)
spin_unlock_bh(&(pacl_node_q->lock));
DBG_88E("%s, acl_num =%d\n", __func__, pacl_list->num);
- return ret;
+ return 0;
}
static void update_bcn_fixed_ie(struct adapter *padapter)
@@ -1753,7 +1752,6 @@ u8 ap_free_sta(struct adapter *padapter, struct sta_info *psta,
int rtw_ap_inform_ch_switch(struct adapter *padapter, u8 new_ch, u8 ch_offset)
{
struct list_head *phead, *plist;
- int ret = 0;
struct sta_info *psta = NULL;
struct sta_priv *pstapriv = &padapter->stapriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
@@ -1761,7 +1759,7 @@ int rtw_ap_inform_ch_switch(struct adapter *padapter, u8 new_ch, u8 ch_offset)
u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
if ((pmlmeinfo->state&0x03) != WIFI_FW_AP_STATE)
- return ret;
+ return 0;
DBG_88E(FUNC_NDEV_FMT" with ch:%u, offset:%u\n",
FUNC_NDEV_ARG(padapter->pnetdev), new_ch, ch_offset);
@@ -1782,13 +1780,12 @@ int rtw_ap_inform_ch_switch(struct adapter *padapter, u8 new_ch, u8 ch_offset)
issue_action_spct_ch_switch(padapter, bc_addr, new_ch, ch_offset);
- return ret;
+ return 0;
}
int rtw_sta_flush(struct adapter *padapter)
{
struct list_head *phead, *plist;
- int ret = 0;
struct sta_info *psta = NULL;
struct sta_priv *pstapriv = &padapter->stapriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
@@ -1798,7 +1795,7 @@ int rtw_sta_flush(struct adapter *padapter)
DBG_88E(FUNC_NDEV_FMT"\n", FUNC_NDEV_ARG(padapter->pnetdev));
if ((pmlmeinfo->state&0x03) != WIFI_FW_AP_STATE)
- return ret;
+ return 0;
spin_lock_bh(&pstapriv->asoc_list_lock);
phead = &pstapriv->asoc_list;
@@ -1822,7 +1819,7 @@ int rtw_sta_flush(struct adapter *padapter)
associated_clients_update(padapter, true);
- return ret;
+ return 0;
}
/* called > TSR LEVEL for USB or SDIO Interface*/
diff --git a/drivers/staging/rtl8188eu/core/rtw_cmd.c b/drivers/staging/rtl8188eu/core/rtw_cmd.c
index eddef9cd2e1..4b434624495 100644
--- a/drivers/staging/rtl8188eu/core/rtw_cmd.c
+++ b/drivers/staging/rtl8188eu/core/rtw_cmd.c
@@ -167,7 +167,7 @@ int rtw_cmd_thread(void *context)
struct cmd_obj *pcmd;
u8 (*cmd_hdl)(struct adapter *padapter, u8 *pbuf);
void (*pcmd_callback)(struct adapter *dev, struct cmd_obj *pcmd);
- struct adapter *padapter = (struct adapter *)context;
+ struct adapter *padapter = context;
struct cmd_priv *pcmdpriv = &(padapter->cmdpriv);
allow_signal(SIGTERM);
@@ -433,8 +433,7 @@ u8 rtw_joinbss_cmd(struct adapter *padapter, struct wlan_network *pnetwork)
psecnetwork = (struct wlan_bssid_ex *)&psecuritypriv->sec_bss;
if (psecnetwork == NULL) {
- if (pcmd != NULL)
- kfree(pcmd);
+ kfree(pcmd);
res = _FAIL;
@@ -456,7 +455,7 @@ u8 rtw_joinbss_cmd(struct adapter *padapter, struct wlan_network *pnetwork)
psecnetwork->IELength = 0;
/* Added by Albert 2009/02/18 */
- /* If the the driver wants to use the bssid to create the connection. */
+ /* If the driver wants to use the bssid to create the connection. */
/* If not, we have to copy the connecting AP's MAC address to it so that */
/* the driver just has the bssid information for PMKIDList searching. */
@@ -638,7 +637,7 @@ u8 rtw_setstakey_cmd(struct adapter *padapter, u8 *psta, u8 unicast_key)
ether_addr_copy(psetstakey_para->addr, sta->hwaddr);
if (check_fwstate(pmlmepriv, WIFI_STATION_STATE))
- psetstakey_para->algorithm = (unsigned char) psecuritypriv->dot11PrivacyAlgrthm;
+ psetstakey_para->algorithm = (unsigned char)psecuritypriv->dot11PrivacyAlgrthm;
else
GET_ENCRY_ALGO(psecuritypriv, sta, psetstakey_para->algorithm, false);
diff --git a/drivers/staging/rtl8188eu/core/rtw_debug.c b/drivers/staging/rtl8188eu/core/rtw_debug.c
index 1f72f7d8097..bc3fe10ff24 100644
--- a/drivers/staging/rtl8188eu/core/rtw_debug.c
+++ b/drivers/staging/rtl8188eu/core/rtw_debug.c
@@ -45,7 +45,7 @@ int proc_get_write_reg(char *page, char **start,
int proc_set_write_reg(struct file *file, const char __user *buffer,
unsigned long count, void *data)
{
- struct net_device *dev = (struct net_device *)data;
+ struct net_device *dev = data;
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
char tmp[32];
u32 addr, val, len;
@@ -577,7 +577,7 @@ int proc_get_rx_signal(char *page, char **start,
int proc_set_rx_signal(struct file *file, const char __user *buffer,
unsigned long count, void *data)
{
- struct net_device *dev = (struct net_device *)data;
+ struct net_device *dev = data;
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
char tmp[32];
u32 is_signal_dbg;
@@ -627,7 +627,7 @@ int proc_get_ht_enable(char *page, char **start,
int proc_set_ht_enable(struct file *file, const char __user *buffer,
unsigned long count, void *data)
{
- struct net_device *dev = (struct net_device *)data;
+ struct net_device *dev = data;
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
struct registry_priv *pregpriv = &padapter->registrypriv;
char tmp[32];
@@ -669,7 +669,7 @@ int proc_get_cbw40_enable(char *page, char **start,
int proc_set_cbw40_enable(struct file *file, const char __user *buffer,
unsigned long count, void *data)
{
- struct net_device *dev = (struct net_device *)data;
+ struct net_device *dev = data;
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
struct registry_priv *pregpriv = &padapter->registrypriv;
char tmp[32];
@@ -710,7 +710,7 @@ int proc_get_ampdu_enable(char *page, char **start,
int proc_set_ampdu_enable(struct file *file, const char __user *buffer,
unsigned long count, void *data)
{
- struct net_device *dev = (struct net_device *)data;
+ struct net_device *dev = data;
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
struct registry_priv *pregpriv = &padapter->registrypriv;
char tmp[32];
@@ -771,7 +771,7 @@ int proc_get_rx_stbc(char *page, char **start,
int proc_set_rx_stbc(struct file *file, const char __user *buffer,
unsigned long count, void *data)
{
- struct net_device *dev = (struct net_device *)data;
+ struct net_device *dev = data;
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
struct registry_priv *pregpriv = &padapter->registrypriv;
char tmp[32];
@@ -800,7 +800,7 @@ int proc_get_rssi_disp(char *page, char **start,
int proc_set_rssi_disp(struct file *file, const char __user *buffer,
unsigned long count, void *data)
{
- struct net_device *dev = (struct net_device *)data;
+ struct net_device *dev = data;
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
char tmp[32];
u32 enable = 0;
diff --git a/drivers/staging/rtl8188eu/core/rtw_efuse.c b/drivers/staging/rtl8188eu/core/rtw_efuse.c
index 7006088d1ad..8816d116a8b 100644
--- a/drivers/staging/rtl8188eu/core/rtw_efuse.c
+++ b/drivers/staging/rtl8188eu/core/rtw_efuse.c
@@ -106,13 +106,13 @@ efuse_phymap_to_logical(u8 *phymap, u16 _offset, u16 _size_byte, u8 *pbuf)
efuseTbl = kzalloc(EFUSE_MAP_LEN_88E, GFP_KERNEL);
if (efuseTbl == NULL) {
DBG_88E("%s: alloc efuseTbl fail!\n", __func__);
- goto exit;
+ return;
}
eFuseWord = (u16 **)rtw_malloc2d(EFUSE_MAX_SECTION_88E, EFUSE_MAX_WORD_UNIT, sizeof(u16));
if (eFuseWord == NULL) {
DBG_88E("%s: alloc eFuseWord fail!\n", __func__);
- goto exit;
+ goto eFuseWord_failed;
}
/* 0. Refresh efuse init map as all oxFF. */
@@ -210,10 +210,10 @@ efuse_phymap_to_logical(u8 *phymap, u16 _offset, u16 _size_byte, u8 *pbuf)
/* */
exit:
- kfree(efuseTbl);
+ kfree(eFuseWord);
- if (eFuseWord)
- kfree(eFuseWord);
+eFuseWord_failed:
+ kfree(efuseTbl);
}
static void efuse_read_phymap_from_txpktbuf(
@@ -250,7 +250,7 @@ static void efuse_read_phymap_from_txpktbuf(
while (!(reg_0x143 = usb_read8(adapter, REG_TXPKTBUF_DBG)) &&
(passing_time = rtw_get_passing_time_ms(start)) < 1000) {
DBG_88E("%s polling reg_0x143:0x%02x, reg_0x106:0x%02x\n", __func__, reg_0x143, usb_read8(adapter, 0x106));
- msleep(1);
+ usleep_range(1000, 2000);
}
lo32 = usb_read32(adapter, REG_PKTBUF_DBG_DATA_L);
@@ -322,7 +322,6 @@ void efuse_ReadEFuse(struct adapter *Adapter, u8 efuseType, u16 _offset, u16 _si
iol_read_efuse(Adapter, 0, _offset, _size_byte, pbuf);
iol_mode_enable(Adapter, 0);
}
- return;
}
/* Do not support BT */
@@ -332,56 +331,56 @@ void EFUSE_GetEfuseDefinition(struct adapter *pAdapter, u8 efuseType, u8 type, v
case TYPE_EFUSE_MAX_SECTION:
{
u8 *pMax_section;
- pMax_section = (u8 *)pOut;
+ pMax_section = pOut;
*pMax_section = EFUSE_MAX_SECTION_88E;
}
break;
case TYPE_EFUSE_REAL_CONTENT_LEN:
{
u16 *pu2Tmp;
- pu2Tmp = (u16 *)pOut;
+ pu2Tmp = pOut;
*pu2Tmp = EFUSE_REAL_CONTENT_LEN_88E;
}
break;
case TYPE_EFUSE_CONTENT_LEN_BANK:
{
u16 *pu2Tmp;
- pu2Tmp = (u16 *)pOut;
+ pu2Tmp = pOut;
*pu2Tmp = EFUSE_REAL_CONTENT_LEN_88E;
}
break;
case TYPE_AVAILABLE_EFUSE_BYTES_BANK:
{
u16 *pu2Tmp;
- pu2Tmp = (u16 *)pOut;
+ pu2Tmp = pOut;
*pu2Tmp = (u16)(EFUSE_REAL_CONTENT_LEN_88E-EFUSE_OOB_PROTECT_BYTES_88E);
}
break;
case TYPE_AVAILABLE_EFUSE_BYTES_TOTAL:
{
u16 *pu2Tmp;
- pu2Tmp = (u16 *)pOut;
+ pu2Tmp = pOut;
*pu2Tmp = (u16)(EFUSE_REAL_CONTENT_LEN_88E-EFUSE_OOB_PROTECT_BYTES_88E);
}
break;
case TYPE_EFUSE_MAP_LEN:
{
u16 *pu2Tmp;
- pu2Tmp = (u16 *)pOut;
+ pu2Tmp = pOut;
*pu2Tmp = (u16)EFUSE_MAP_LEN_88E;
}
break;
case TYPE_EFUSE_PROTECT_BYTES_BANK:
{
u8 *pu1Tmp;
- pu1Tmp = (u8 *)pOut;
+ pu1Tmp = pOut;
*pu1Tmp = (u8)(EFUSE_OOB_PROTECT_BYTES_88E);
}
break;
default:
{
u8 *pu1Tmp;
- pu1Tmp = (u8 *)pOut;
+ pu1Tmp = pOut;
*pu1Tmp = 0;
}
break;
@@ -638,10 +637,9 @@ static bool hal_EfusePgPacketWrite2ByteHeader(struct adapter *pAdapter, u8 efuse
if ((tmp_header & 0x0F) == 0x0F) { /* word_en PG fail */
if (repeatcnt++ > EFUSE_REPEAT_THRESHOLD_) {
return false;
- } else {
- efuse_addr++;
- continue;
}
+ efuse_addr++;
+ continue;
} else if (pg_header != tmp_header) { /* offset PG fail */
struct pgpkt fixPkt;
fixPkt.offset = ((pg_header_temp & 0xE0) >> 5) | ((tmp_header & 0xF0) >> 1);
@@ -708,14 +706,13 @@ static bool hal_EfusePgPacketWriteData(struct adapter *pAdapter, u8 efuseType, u
if (badworden == 0x0F) {
/* write ok */
return true;
- } else {
- /* reorganize other pg packet */
- PgWriteSuccess = Efuse_PgPacketWrite(pAdapter, pTargetPkt->offset, badworden, pTargetPkt->data);
- if (!PgWriteSuccess)
- return false;
- else
- return true;
}
+ /* reorganize other pg packet */
+ PgWriteSuccess = Efuse_PgPacketWrite(pAdapter, pTargetPkt->offset, badworden, pTargetPkt->data);
+ if (!PgWriteSuccess)
+ return false;
+ else
+ return true;
}
static bool
diff --git a/drivers/staging/rtl8188eu/core/rtw_ieee80211.c b/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
index 755d3effd0a..f2c3ca79c0c 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
@@ -159,7 +159,7 @@ u8 *rtw_set_ie
return pbuf + len + 2;
}
-inline u8 *rtw_set_ie_ch_switch (u8 *buf, u32 *buf_len, u8 ch_switch_mode,
+inline u8 *rtw_set_ie_ch_switch(u8 *buf, u32 *buf_len, u8 ch_switch_mode,
u8 new_ch, u8 ch_switch_cnt)
{
u8 ie_data[3];
@@ -870,7 +870,7 @@ static int rtw_ieee802_11_parse_vendor_specific(u8 *pos, uint elen,
if (elen < 4) {
if (show_errors) {
DBG_88E("short vendor specific information element ignored (len=%lu)\n",
- (unsigned long) elen);
+ (unsigned long)elen);
}
return -1;
}
@@ -890,7 +890,7 @@ static int rtw_ieee802_11_parse_vendor_specific(u8 *pos, uint elen,
case WME_OUI_TYPE: /* this is a Wi-Fi WME info. element */
if (elen < 5) {
DBG_88E("short WME information element ignored (len=%lu)\n",
- (unsigned long) elen);
+ (unsigned long)elen);
return -1;
}
switch (pos[4]) {
@@ -905,7 +905,7 @@ static int rtw_ieee802_11_parse_vendor_specific(u8 *pos, uint elen,
break;
default:
DBG_88E("unknown WME information element ignored (subtype=%d len=%lu)\n",
- pos[4], (unsigned long) elen);
+ pos[4], (unsigned long)elen);
return -1;
}
break;
@@ -916,7 +916,7 @@ static int rtw_ieee802_11_parse_vendor_specific(u8 *pos, uint elen,
break;
default:
DBG_88E("Unknown Microsoft information element ignored (type=%d len=%lu)\n",
- pos[3], (unsigned long) elen);
+ pos[3], (unsigned long)elen);
return -1;
}
break;
@@ -929,13 +929,13 @@ static int rtw_ieee802_11_parse_vendor_specific(u8 *pos, uint elen,
break;
default:
DBG_88E("Unknown Broadcom information element ignored (type=%d len=%lu)\n",
- pos[3], (unsigned long) elen);
+ pos[3], (unsigned long)elen);
return -1;
}
break;
default:
DBG_88E("unknown vendor specific information element ignored (vendor OUI %02x:%02x:%02x len=%lu)\n",
- pos[0], pos[1], pos[2], (unsigned long) elen);
+ pos[0], pos[1], pos[2], (unsigned long)elen);
return -1;
}
return 0;
@@ -969,7 +969,7 @@ enum parse_res rtw_ieee802_11_parse_elems(u8 *start, uint len,
if (elen > left) {
if (show_errors) {
DBG_88E("IEEE 802.11 element parse failed (id=%d elen=%d left=%lu)\n",
- id, elen, (unsigned long) left);
+ id, elen, (unsigned long)left);
}
return ParseFailed;
}
diff --git a/drivers/staging/rtl8188eu/core/rtw_ioctl_set.c b/drivers/staging/rtl8188eu/core/rtw_ioctl_set.c
index fc280ce57d2..2faf6b2e812 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ioctl_set.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ioctl_set.c
@@ -98,7 +98,6 @@ u8 rtw_do_join(struct adapter *padapter)
pibss = padapter->registrypriv.dev_network.MacAddress;
- memset(&pdev_network->Ssid, 0, sizeof(struct ndis_802_11_ssid));
memcpy(&pdev_network->Ssid, &pmlmepriv->assoc_ssid, sizeof(struct ndis_802_11_ssid));
rtw_update_registrypriv_dev_network(padapter);
diff --git a/drivers/staging/rtl8188eu/core/rtw_led.c b/drivers/staging/rtl8188eu/core/rtw_led.c
index 384be22052e..1b8264b978d 100644
--- a/drivers/staging/rtl8188eu/core/rtw_led.c
+++ b/drivers/staging/rtl8188eu/core/rtw_led.c
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
*
******************************************************************************/
@@ -28,7 +24,7 @@
/* */
void BlinkTimerCallback(void *data)
{
- struct LED_871x *pLed = (struct LED_871x *)data;
+ struct LED_871x *pLed = data;
struct adapter *padapter = pLed->padapter;
if ((padapter->bSurpriseRemoved) || (padapter->bDriverStopped))
@@ -228,7 +224,8 @@ static void SwLedBlink1(struct LED_871x *pLed)
pLed->bLedWPSBlinkInProgress = false;
} else {
pLed->BlinkingLedState = RTW_LED_OFF;
- _set_timer(&(pLed->BlinkTimer), LED_BLINK_WPS_SUCESS_INTERVAL_ALPHA);
+ _set_timer(&(pLed->BlinkTimer),
+ LED_BLINK_WPS_SUCCESS_INTERVAL_ALPHA);
}
break;
default:
@@ -392,7 +389,8 @@ static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAct
pLed->CurrLedState = LED_BLINK_WPS_STOP;
if (pLed->bLedOn) {
pLed->BlinkingLedState = RTW_LED_OFF;
- _set_timer(&(pLed->BlinkTimer), LED_BLINK_WPS_SUCESS_INTERVAL_ALPHA);
+ _set_timer(&(pLed->BlinkTimer),
+ LED_BLINK_WPS_SUCCESS_INTERVAL_ALPHA);
} else {
pLed->BlinkingLedState = RTW_LED_ON;
_set_timer(&(pLed->BlinkTimer), 0);
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme.c b/drivers/staging/rtl8188eu/core/rtw_mlme.c
index 149c271e966..d4632da50c1 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mlme.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme.c
@@ -674,7 +674,6 @@ void rtw_surveydone_event_callback(struct adapter *adapter, u8 *pbuf)
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("switching to adhoc master\n"));
- memset(&pdev_network->Ssid, 0, sizeof(struct ndis_802_11_ssid));
memcpy(&pdev_network->Ssid, &pmlmepriv->assoc_ssid, sizeof(struct ndis_802_11_ssid));
rtw_update_registrypriv_dev_network(adapter);
@@ -1334,7 +1333,6 @@ void rtw_stadel_event_callback(struct adapter *adapter, u8 *pbuf)
memcpy(pdev_network, &tgt_network->network, get_wlan_bssid_ex_sz(&tgt_network->network));
- memset(&pdev_network->Ssid, 0, sizeof(struct ndis_802_11_ssid));
memcpy(&pdev_network->Ssid, &pmlmepriv->assoc_ssid, sizeof(struct ndis_802_11_ssid));
rtw_update_registrypriv_dev_network(adapter);
@@ -1364,7 +1362,7 @@ void rtw_cpwm_event_callback(struct adapter *padapter, u8 *pbuf)
*/
void _rtw_join_timeout_handler (void *function_context)
{
- struct adapter *adapter = (struct adapter *)function_context;
+ struct adapter *adapter = function_context;
struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
int do_join_r;
@@ -1406,7 +1404,7 @@ void _rtw_join_timeout_handler (void *function_context)
*/
void rtw_scan_timeout_handler (void *function_context)
{
- struct adapter *adapter = (struct adapter *)function_context;
+ struct adapter *adapter = function_context;
struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
DBG_88E(FUNC_ADPT_FMT" fw_state=%x\n", FUNC_ADPT_ARG(adapter), get_fwstate(pmlmepriv));
@@ -1437,7 +1435,7 @@ void rtw_dynamic_check_timer_handlder(void *function_context)
struct registry_priv *pregistrypriv = &adapter->registrypriv;
if (!adapter)
- goto exit;
+ return;
if (!adapter->hw_init_completed)
goto exit;
if ((adapter->bDriverStopped) || (adapter->bSurpriseRemoved))
@@ -2117,7 +2115,7 @@ void rtw_issue_addbareq_cmd(struct adapter *padapter, struct xmit_frame *pxmitfr
if (0 == issued) {
DBG_88E("rtw_issue_addbareq_cmd, p=%d\n", priority);
psta->htpriv.candidate_tid_bitmap |= BIT((u8)priority);
- rtw_addbareq_cmd(padapter, (u8) priority, pattrib->ra);
+ rtw_addbareq_cmd(padapter, (u8)priority, pattrib->ra);
}
}
}
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
index 70b1bc3e0e6..e4b7ee4c99d 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
@@ -227,7 +227,7 @@ static void init_mlme_ext_priv_value(struct adapter *padapter)
pmlmeext->cur_channel = padapter->registrypriv.channel;
pmlmeext->cur_bwmode = HT_CHANNEL_WIDTH_20;
pmlmeext->cur_ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
- pmlmeext->oper_channel = pmlmeext->cur_channel ;
+ pmlmeext->oper_channel = pmlmeext->cur_channel;
pmlmeext->oper_bwmode = pmlmeext->cur_bwmode;
pmlmeext->oper_ch_offset = pmlmeext->cur_ch_offset;
pmlmeext->retry = 0;
@@ -371,7 +371,6 @@ static u8 init_channel_set(struct adapter *padapter, u8 ChannelPlan, struct rt_c
int init_mlme_ext_priv(struct adapter *padapter)
{
- int res = _SUCCESS;
struct registry_priv *pregistrypriv = &padapter->registrypriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
@@ -397,7 +396,7 @@ int init_mlme_ext_priv(struct adapter *padapter)
pmlmeext->active_keep_alive_check = true;
- return res;
+ return _SUCCESS;
}
void free_mlme_ext_priv(struct mlme_ext_priv *pmlmeext)
@@ -945,7 +944,7 @@ unsigned int OnAssocReq(struct adapter *padapter, struct recv_frame *precv_frame
}
pstat = rtw_get_stainfo(pstapriv, GetAddr2Ptr(pframe));
- if (pstat == (struct sta_info *)NULL) {
+ if (pstat == NULL) {
status = _RSON_CLS2_;
goto asoc_class2_error;
}
@@ -1554,7 +1553,6 @@ unsigned int OnAtim(struct adapter *padapter, struct recv_frame *precv_frame)
unsigned int on_action_spct(struct adapter *padapter, struct recv_frame *precv_frame)
{
- unsigned int ret = _FAIL;
struct sta_info *psta = NULL;
struct sta_priv *pstapriv = &padapter->stapriv;
u8 *pframe = precv_frame->rx_data;
@@ -1587,7 +1585,7 @@ unsigned int on_action_spct(struct adapter *padapter, struct recv_frame *precv_f
}
exit:
- return ret;
+ return _FAIL;
}
unsigned int OnAction_qos(struct adapter *padapter, struct recv_frame *precv_frame)
@@ -2000,7 +1998,7 @@ void issue_beacon(struct adapter *padapter, int timeout_ms)
DBG_88E("%s, alloc mgnt frame fail\n", __func__);
return;
}
-#if defined (CONFIG_88EU_AP_MODE)
+#if defined(CONFIG_88EU_AP_MODE)
spin_lock_bh(&pmlmepriv->bcn_update_lock);
#endif /* if defined (CONFIG_88EU_AP_MODE) */
@@ -2027,7 +2025,7 @@ void issue_beacon(struct adapter *padapter, int timeout_ms)
SetFrameSubType(pframe, WIFI_BEACON);
pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
- pattrib->pktlen = sizeof (struct rtw_ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
if ((pmlmeinfo->state&0x03) == WIFI_FW_AP_STATE) {
int len_diff;
@@ -2042,8 +2040,8 @@ void issue_beacon(struct adapter *padapter, int timeout_ms)
);
pframe += (cur_network->IELength+len_diff);
pattrib->pktlen += (cur_network->IELength+len_diff);
- wps_ie = rtw_get_wps_ie(pmgntframe->buf_addr+TXDESC_OFFSET+sizeof (struct rtw_ieee80211_hdr_3addr)+_BEACON_IE_OFFSET_,
- pattrib->pktlen-sizeof (struct rtw_ieee80211_hdr_3addr)-_BEACON_IE_OFFSET_, NULL, &wps_ielen);
+ wps_ie = rtw_get_wps_ie(pmgntframe->buf_addr+TXDESC_OFFSET+sizeof(struct rtw_ieee80211_hdr_3addr)+_BEACON_IE_OFFSET_,
+ pattrib->pktlen-sizeof(struct rtw_ieee80211_hdr_3addr)-_BEACON_IE_OFFSET_, NULL, &wps_ielen);
if (wps_ie && wps_ielen > 0)
rtw_get_wps_attr_content(wps_ie, wps_ielen, WPS_ATTR_SELECTED_REGISTRAR, (u8 *)(&sr), NULL);
if (sr != 0)
@@ -2101,7 +2099,7 @@ void issue_beacon(struct adapter *padapter, int timeout_ms)
/* todo:HT for adhoc */
_issue_bcn:
-#if defined (CONFIG_88EU_AP_MODE)
+#if defined(CONFIG_88EU_AP_MODE)
pmlmepriv->update_bcn = false;
spin_unlock_bh(&pmlmepriv->bcn_update_lock);
@@ -2130,7 +2128,7 @@ void issue_probersp(struct adapter *padapter, unsigned char *da, u8 is_valid_p2p
__le16 *fctrl;
unsigned char *mac, *bssid;
struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
-#if defined (CONFIG_88EU_AP_MODE)
+#if defined(CONFIG_88EU_AP_MODE)
u8 *pwps_ie;
uint wps_ielen;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
@@ -2323,8 +2321,8 @@ static int _issue_probereq(struct adapter *padapter, struct ndis_802_11_ssid *ps
pmlmeext->mgnt_seq++;
SetFrameSubType(pframe, WIFI_PROBEREQ);
- pframe += sizeof (struct rtw_ieee80211_hdr_3addr);
- pattrib->pktlen = sizeof (struct rtw_ieee80211_hdr_3addr);
+ pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
if (pssid)
pframe = rtw_set_ie(pframe, _SSID_IE_, pssid->SsidLength, pssid->Ssid, &(pattrib->pktlen));
@@ -3209,7 +3207,7 @@ exit:
return ret;
}
-void issue_action_spct_ch_switch (struct adapter *padapter, u8 *ra, u8 new_ch, u8 ch_offset)
+void issue_action_spct_ch_switch(struct adapter *padapter, u8 *ra, u8 new_ch, u8 ch_offset)
{
struct xmit_frame *pmgntframe;
struct pkt_attrib *pattrib;
@@ -3260,7 +3258,7 @@ void issue_action_spct_ch_switch (struct adapter *padapter, u8 *ra, u8 new_ch, u
pframe = rtw_set_fixed_ie(pframe, 1, &(action), &(pattrib->pktlen));
}
- pframe = rtw_set_ie_ch_switch (pframe, &(pattrib->pktlen), 0, new_ch, 0);
+ pframe = rtw_set_ie_ch_switch(pframe, &(pattrib->pktlen), 0, new_ch, 0);
pframe = rtw_set_ie_secondary_ch_offset(pframe, &(pattrib->pktlen),
hal_ch_offset_to_secondary_ch_offset(ch_offset));
@@ -4835,7 +4833,7 @@ void linked_status_chk(struct adapter *padapter)
void survey_timer_hdl(void *function_context)
{
- struct adapter *padapter = (struct adapter *)function_context;
+ struct adapter *padapter = function_context;
struct cmd_obj *ph2c;
struct sitesurvey_parm *psurveyPara;
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
@@ -4912,7 +4910,7 @@ void link_timer_hdl(void *function_context)
void addba_timer_hdl(void *function_context)
{
- struct sta_info *psta = (struct sta_info *)function_context;
+ struct sta_info *psta = function_context;
struct ht_priv *phtpriv;
if (!psta)
diff --git a/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c b/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
index 27ed83cca19..df463a29b64 100644
--- a/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
+++ b/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
@@ -279,12 +279,11 @@ void rtw_ps_processor(struct adapter *padapter)
exit:
rtw_set_pwr_state_check_timer(&padapter->pwrctrlpriv);
pwrpriv->ps_processing = false;
- return;
}
static void pwr_state_check_handler(void *FunctionContext)
{
- struct adapter *padapter = (struct adapter *)FunctionContext;
+ struct adapter *padapter = FunctionContext;
rtw_ps_cmd(padapter);
}
@@ -527,7 +526,7 @@ void rtw_init_pwrctrl_priv(struct adapter *padapter)
pwrctrlpriv->LpsIdleCount = 0;
if (padapter->registrypriv.mp_mode == 1)
- pwrctrlpriv->power_mgnt = PS_MODE_ACTIVE ;
+ pwrctrlpriv->power_mgnt = PS_MODE_ACTIVE;
else
pwrctrlpriv->power_mgnt = padapter->registrypriv.power_mgnt;/* PS_MODE_MIN; */
pwrctrlpriv->bLeisurePs = (PS_MODE_ACTIVE != pwrctrlpriv->power_mgnt) ? true : false;
@@ -577,7 +576,7 @@ int _rtw_pwr_wakeup(struct adapter *padapter, u32 ips_deffer_ms, const char *cal
if (pwrpriv->ps_processing) {
DBG_88E("%s wait ps_processing...\n", __func__);
while (pwrpriv->ps_processing && rtw_get_passing_time_ms(start) <= 3000)
- msleep(10);
+ usleep_range(1000, 3000);
if (pwrpriv->ps_processing)
DBG_88E("%s wait ps_processing timeout\n", __func__);
else
diff --git a/drivers/staging/rtl8188eu/core/rtw_recv.c b/drivers/staging/rtl8188eu/core/rtw_recv.c
index 4d56dbad2a7..bd79e9e7105 100644
--- a/drivers/staging/rtl8188eu/core/rtw_recv.c
+++ b/drivers/staging/rtl8188eu/core/rtw_recv.c
@@ -46,7 +46,7 @@ void rtw_signal_stat_timer_hdl(RTW_TIMER_HDL_ARGS);
void _rtw_init_sta_recv_priv(struct sta_recv_priv *psta_recvpriv)
{
- memset((u8 *)psta_recvpriv, 0, sizeof (struct sta_recv_priv));
+ memset((u8 *)psta_recvpriv, 0, sizeof(struct sta_recv_priv));
spin_lock_init(&psta_recvpriv->lock);
@@ -109,7 +109,7 @@ exit:
return res;
}
-void _rtw_free_recv_priv (struct recv_priv *precvpriv)
+void _rtw_free_recv_priv(struct recv_priv *precvpriv)
{
struct adapter *padapter = precvpriv->adapter;
@@ -124,7 +124,7 @@ void _rtw_free_recv_priv (struct recv_priv *precvpriv)
}
-struct recv_frame *_rtw_alloc_recvframe (struct __queue *pfree_recv_queue)
+struct recv_frame *_rtw_alloc_recvframe(struct __queue *pfree_recv_queue)
{
struct recv_frame *hdr;
struct list_head *plist, *phead;
@@ -797,7 +797,7 @@ exit:
return ret;
}
-static int ap2sta_data_frame (
+static int ap2sta_data_frame(
struct adapter *adapter,
struct recv_frame *precv_frame,
struct sta_info **psta)
@@ -1266,7 +1266,7 @@ static int validate_recv_frame(struct adapter *adapter,
u8 bDumpRxPkt;
struct rx_pkt_attrib *pattrib = &precv_frame->attrib;
u8 *ptr = precv_frame->rx_data;
- u8 ver = (unsigned char) (*ptr)&0x3;
+ u8 ver = (unsigned char)(*ptr)&0x3;
struct mlme_ext_priv *pmlmeext = &adapter->mlmeextpriv;
@@ -1373,7 +1373,6 @@ static int wlanhdr_to_ethhdr(struct recv_frame *precvframe)
u8 *psnap_type;
struct ieee80211_snap_hdr *psnap;
- int ret = _SUCCESS;
struct adapter *adapter = precvframe->adapter;
struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
u8 *ptr = precvframe->rx_data;
@@ -1428,7 +1427,7 @@ static int wlanhdr_to_ethhdr(struct recv_frame *precvframe)
memcpy(ptr+12, &be_tmp, 2);
}
- return ret;
+ return _SUCCESS;
}
/* perform defrag */
@@ -1624,7 +1623,6 @@ static int amsdu_to_msdu(struct adapter *padapter, struct recv_frame *prframe)
struct sk_buff *sub_skb, *subframes[MAX_SUBFRAME_COUNT];
struct recv_priv *precvpriv = &padapter->recvpriv;
struct __queue *pfree_recv_queue = &(precvpriv->free_recv_queue);
- int ret = _SUCCESS;
nr_subframes = 0;
pattrib = &prframe->attrib;
@@ -1728,7 +1726,7 @@ exit:
prframe->len = 0;
rtw_free_recvframe(prframe, pfree_recv_queue);/* free this recv_frame */
- return ret;
+ return _SUCCESS;
}
static int check_indicate_seq(struct recv_reorder_ctrl *preorder_ctrl, u16 seq_num)
@@ -1949,7 +1947,7 @@ _err_exit:
void rtw_reordering_ctrl_timeout_handler(void *pcontext)
{
- struct recv_reorder_ctrl *preorder_ctrl = (struct recv_reorder_ctrl *)pcontext;
+ struct recv_reorder_ctrl *preorder_ctrl = pcontext;
struct adapter *padapter = preorder_ctrl->padapter;
struct __queue *ppending_recvframe_queue = &preorder_ctrl->pending_recvframe_queue;
@@ -1981,7 +1979,7 @@ static int process_recv_indicatepkts(struct adapter *padapter,
}
}
} else { /* B/G mode */
- retval = wlanhdr_to_ethhdr (prframe);
+ retval = wlanhdr_to_ethhdr(prframe);
if (retval != _SUCCESS) {
RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("wlanhdr_to_ethhdr: drop pkt\n"));
return retval;
diff --git a/drivers/staging/rtl8188eu/core/rtw_security.c b/drivers/staging/rtl8188eu/core/rtw_security.c
index f9096a512da..bd8d60a230e 100644
--- a/drivers/staging/rtl8188eu/core/rtw_security.c
+++ b/drivers/staging/rtl8188eu/core/rtw_security.c
@@ -189,7 +189,7 @@ void rtw_wep_encrypt(struct adapter *padapter, u8 *pxmitframe)
arcfour_encrypt(&mycontext, payload+length, crc, 4);
pframe += pxmitpriv->frag_len;
- pframe = (u8 *) round_up((size_t)(pframe), 4);
+ pframe = (u8 *)round_up((size_t)(pframe), 4);
}
}
}
@@ -258,7 +258,7 @@ static void secmicputuint32(u8 *p, u32 val)
{
long i;
for (i = 0; i < 4; i++) {
- *p++ = (u8) (val & 0xff);
+ *p++ = (u8)(val & 0xff);
val >>= 8;
}
}
@@ -621,14 +621,14 @@ u32 rtw_tkip_encrypt(struct adapter *padapter, u8 *pxmitframe)
arcfour_encrypt(&mycontext, payload, payload, length);
arcfour_encrypt(&mycontext, payload+length, crc, 4);
} else {
- length = pxmitpriv->frag_len-pattrib->hdrlen-pattrib->iv_len-pattrib->icv_len ;
+ length = pxmitpriv->frag_len-pattrib->hdrlen-pattrib->iv_len-pattrib->icv_len;
*((__le32 *)crc) = getcrc32(payload, length);/* modified by Amy*/
arcfour_init(&mycontext, rc4key, 16);
arcfour_encrypt(&mycontext, payload, payload, length);
arcfour_encrypt(&mycontext, payload+length, crc, 4);
pframe += pxmitpriv->frag_len;
- pframe = (u8 *) round_up((size_t)(pframe), 4);
+ pframe = (u8 *)round_up((size_t)(pframe), 4);
}
}
} else {
@@ -953,8 +953,8 @@ static void construct_mic_iv(u8 *mic_iv, int qc_exists, int a4_exists, u8 *mpdu,
mic_iv[i] = mpdu[i + 8]; /* mic_iv[2:7] = A2[0:5] = mpdu[10:15] */
for (i = 8; i < 14; i++)
mic_iv[i] = pn_vector[13 - i]; /* mic_iv[8:13] = PN[5:0] */
- mic_iv[14] = (unsigned char) (payload_length / 256);
- mic_iv[15] = (unsigned char) (payload_length % 256);
+ mic_iv[14] = (unsigned char)(payload_length / 256);
+ mic_iv[15] = (unsigned char)(payload_length % 256);
}
/************************************************/
@@ -1045,8 +1045,8 @@ static void construct_ctr_preload(u8 *ctr_preload, int a4_exists, int qc_exists,
ctr_preload[i] = mpdu[i + 8]; /* ctr_preload[2:7] = A2[0:5] = mpdu[10:15] */
for (i = 8; i < 14; i++)
ctr_preload[i] = pn_vector[13 - i]; /* ctr_preload[8:13] = PN[5:0] */
- ctr_preload[14] = (unsigned char) (c / 256); /* Ctr */
- ctr_preload[15] = (unsigned char) (c % 256);
+ ctr_preload[14] = (unsigned char)(c / 256); /* Ctr */
+ ctr_preload[15] = (unsigned char)(c % 256);
}
/************************************/
@@ -1219,7 +1219,7 @@ u32 rtw_aes_encrypt(struct adapter *padapter, u8 *pxmitframe)
pframe = ((struct xmit_frame *)pxmitframe)->buf_addr + hw_hdr_offset;
/* 4 start to encrypt each fragment */
- if ((pattrib->encrypt == _AES_)) {
+ if (pattrib->encrypt == _AES_) {
if (pattrib->psta)
stainfo = pattrib->psta;
else
@@ -1238,11 +1238,11 @@ u32 rtw_aes_encrypt(struct adapter *padapter, u8 *pxmitframe)
aes_cipher(prwskey, pattrib->hdrlen, pframe, length);
} else{
- length = pxmitpriv->frag_len-pattrib->hdrlen-pattrib->iv_len-pattrib->icv_len ;
+ length = pxmitpriv->frag_len-pattrib->hdrlen-pattrib->iv_len-pattrib->icv_len;
aes_cipher(prwskey, pattrib->hdrlen, pframe, length);
pframe += pxmitpriv->frag_len;
- pframe = (u8 *) round_up((size_t)(pframe), 8);
+ pframe = (u8 *)round_up((size_t)(pframe), 8);
}
}
} else{
@@ -1460,7 +1460,7 @@ u32 rtw_aes_decrypt(struct adapter *padapter, u8 *precvframe)
u32 res = _SUCCESS;
pframe = (unsigned char *)((struct recv_frame *)precvframe)->rx_data;
/* 4 start to encrypt each fragment */
- if ((prxattrib->encrypt == _AES_)) {
+ if (prxattrib->encrypt == _AES_) {
stainfo = rtw_get_stainfo(&padapter->stapriv, &prxattrib->ta[0]);
if (stainfo != NULL) {
RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("rtw_aes_decrypt: stainfo!= NULL!!!\n"));
diff --git a/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c b/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c
index e1dc8fa82d3..dc9d0ddf6b3 100644
--- a/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c
+++ b/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c
@@ -29,7 +29,7 @@
static void _rtw_init_stainfo(struct sta_info *psta)
{
- memset((u8 *)psta, 0, sizeof (struct sta_info));
+ memset((u8 *)psta, 0, sizeof(struct sta_info));
spin_lock_init(&psta->lock);
INIT_LIST_HEAD(&psta->list);
diff --git a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
index d300369977f..324c1a7fd0b 100644
--- a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
+++ b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
@@ -1394,7 +1394,6 @@ unsigned char check_assoc_AP(u8 *pframe, uint len)
DBG_88E("link to Artheros AP\n");
return HT_IOT_PEER_ATHEROS;
} else if ((!memcmp(pIE->data, BROADCOM_OUI1, 3)) ||
- (!memcmp(pIE->data, BROADCOM_OUI2, 3)) ||
(!memcmp(pIE->data, BROADCOM_OUI2, 3))) {
DBG_88E("link to Broadcom AP\n");
return HT_IOT_PEER_BROADCOM;
diff --git a/drivers/staging/rtl8188eu/core/rtw_xmit.c b/drivers/staging/rtl8188eu/core/rtw_xmit.c
index 639ace06a3d..7a71df16746 100644
--- a/drivers/staging/rtl8188eu/core/rtw_xmit.c
+++ b/drivers/staging/rtl8188eu/core/rtw_xmit.c
@@ -37,7 +37,7 @@ static void _init_txservq(struct tx_servq *ptxservq)
void _rtw_init_sta_xmit_priv(struct sta_xmit_priv *psta_xmitpriv)
{
- memset((unsigned char *)psta_xmitpriv, 0, sizeof (struct sta_xmit_priv));
+ memset((unsigned char *)psta_xmitpriv, 0, sizeof(struct sta_xmit_priv));
spin_lock_init(&psta_xmitpriv->lock);
_init_txservq(&psta_xmitpriv->be_q);
_init_txservq(&psta_xmitpriv->bk_q);
@@ -223,7 +223,7 @@ exit:
return res;
}
-void _rtw_free_xmit_priv (struct xmit_priv *pxmitpriv)
+void _rtw_free_xmit_priv(struct xmit_priv *pxmitpriv)
{
int i;
struct adapter *padapter = pxmitpriv->adapter;
@@ -691,7 +691,7 @@ static s32 xmitframe_addmic(struct adapter *padapter, struct xmit_frame *pxmitfr
payload = pframe;
for (curfragnum = 0; curfragnum < pattrib->nr_frags; curfragnum++) {
- payload = (u8 *) round_up((size_t)(payload), 4);
+ payload = (u8 *)round_up((size_t)(payload), 4);
RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_,
("=== curfragnum=%d, pframe = 0x%.2x, 0x%.2x, 0x%.2x, 0x%.2x, 0x%.2x, 0x%.2x, 0x%.2x, 0x%.2x,!!!\n",
curfragnum, *payload, *(payload+1),
@@ -772,7 +772,7 @@ static s32 xmitframe_swencrypt(struct adapter *padapter, struct xmit_frame *pxmi
return _SUCCESS;
}
-s32 rtw_make_wlanhdr (struct adapter *padapter , u8 *hdr, struct pkt_attrib *pattrib)
+s32 rtw_make_wlanhdr(struct adapter *padapter, u8 *hdr, struct pkt_attrib *pattrib)
{
u16 *qc;
@@ -1025,8 +1025,7 @@ s32 rtw_xmitframe_coalesce(struct adapter *padapter, struct sk_buff *pkt, struct
/* adding icv, if necessary... */
if (pattrib->iv_len) {
- if (psta != NULL) {
- switch (pattrib->encrypt) {
+ switch (pattrib->encrypt) {
case _WEP40_:
case _WEP104_:
WEP_IV(pattrib->iv, psta->dot11txpn, pattrib->key_idx);
@@ -1043,7 +1042,6 @@ s32 rtw_xmitframe_coalesce(struct adapter *padapter, struct sk_buff *pkt, struct
else
AES_IV(pattrib->iv, psta->dot11txpn, 0);
break;
- }
}
memcpy(pframe, pattrib->iv, pattrib->iv_len);
@@ -1098,7 +1096,7 @@ s32 rtw_xmitframe_coalesce(struct adapter *padapter, struct sk_buff *pkt, struct
addr = (size_t)(pframe);
- mem_start = (unsigned char *) round_up(addr, 4) + hw_hdr_offset;
+ mem_start = (unsigned char *)round_up(addr, 4) + hw_hdr_offset;
memcpy(mem_start, pbuf_start + hw_hdr_offset, pattrib->hdrlen);
}
diff --git a/drivers/staging/rtl8188eu/hal/bb_cfg.c b/drivers/staging/rtl8188eu/hal/bb_cfg.c
index 80e8cc92c10..1e963bf9e48 100644
--- a/drivers/staging/rtl8188eu/hal/bb_cfg.c
+++ b/drivers/staging/rtl8188eu/hal/bb_cfg.c
@@ -173,7 +173,7 @@ static bool set_baseband_agc_config(struct adapter *adapt)
u32 v1 = array[i];
u32 v2 = array[i+1];
- if (v1 < 0xCDCDCDCD){
+ if (v1 < 0xCDCDCDCD) {
phy_set_bb_reg(adapt, v1, bMaskDWord, v2);
udelay(1);
}
@@ -552,7 +552,7 @@ static void store_pwrindex_offset(struct adapter *Adapter, u32 regaddr, u32 bitm
}
}
-static void rtl_addr_delay(struct adapter *adapt, u32 addr, u32 bit_mask ,u32 data)
+static void rtl_addr_delay(struct adapter *adapt, u32 addr, u32 bit_mask, u32 data)
{
if (addr == 0xfe) {
msleep(50);
diff --git a/drivers/staging/rtl8188eu/hal/fw.c b/drivers/staging/rtl8188eu/hal/fw.c
index 17b7f375054..3b2875481fc 100644
--- a/drivers/staging/rtl8188eu/hal/fw.c
+++ b/drivers/staging/rtl8188eu/hal/fw.c
@@ -84,7 +84,7 @@ static void _rtl88e_fw_block_write(struct adapter *adapt,
static void _rtl88e_fill_dummy(u8 *pfwbuf, u32 *pfwlen)
{
u32 fwlen = *pfwlen;
- u8 remain = (u8) (fwlen % 4);
+ u8 remain = (u8)(fwlen % 4);
remain = (remain == 0) ? 0 : (4 - remain);
@@ -101,7 +101,7 @@ static void _rtl88e_fw_page_write(struct adapter *adapt,
u32 page, const u8 *buffer, u32 size)
{
u8 value8;
- u8 u8page = (u8) (page & 0x07);
+ u8 u8page = (u8)(page & 0x07);
value8 = (usb_read8(adapt, REG_MCUFWDL + 2) & 0xF8) | u8page;
@@ -193,13 +193,13 @@ int rtl88eu_download_fw(struct adapter *adapt)
u32 fwsize;
int err;
- if (request_firmware(&fw, fw_name, device)){
+ if (request_firmware(&fw, fw_name, device)) {
dev_err(device, "Firmware %s not available\n", fw_name);
return -ENOENT;
}
if (fw->size > FW_8188E_SIZE) {
- dev_err(device,"Firmware size exceed 0x%X. Check it.\n",
+ dev_err(device, "Firmware size exceed 0x%X. Check it.\n",
FW_8188E_SIZE);
return -1;
}
diff --git a/drivers/staging/rtl8188eu/hal/hal_intf.c b/drivers/staging/rtl8188eu/hal/hal_intf.c
index 538a0f65d09..4bdbed28774 100644
--- a/drivers/staging/rtl8188eu/hal/hal_intf.c
+++ b/drivers/staging/rtl8188eu/hal/hal_intf.c
@@ -275,13 +275,6 @@ void rtw_hal_write_rfreg(struct adapter *adapt, enum rf_radio_path rfpath,
bitmask, data);
}
-s32 rtw_hal_interrupt_handler(struct adapter *adapt)
-{
- if (adapt->HalFunc.interrupt_handler)
- return adapt->HalFunc.interrupt_handler(adapt);
- return _FAIL;
-}
-
void rtw_hal_set_bwmode(struct adapter *adapt,
enum ht_channel_width bandwidth, u8 offset)
{
@@ -329,15 +322,6 @@ void rtw_hal_sreset_init(struct adapter *adapt)
adapt->HalFunc.sreset_init_value(adapt);
}
-u8 rtw_hal_sreset_get_wifi_status(struct adapter *adapt)
-{
- u8 status = 0;
-
- if (adapt->HalFunc.sreset_get_wifi_status)
- status = adapt->HalFunc.sreset_get_wifi_status(adapt);
- return status;
-}
-
void rtw_hal_notch_filter(struct adapter *adapter, bool enable)
{
if (adapter->HalFunc.hal_notch_filter)
diff --git a/drivers/staging/rtl8188eu/hal/mac_cfg.c b/drivers/staging/rtl8188eu/hal/mac_cfg.c
index c0e7fa93805..febc83a5adb 100644
--- a/drivers/staging/rtl8188eu/hal/mac_cfg.c
+++ b/drivers/staging/rtl8188eu/hal/mac_cfg.c
@@ -127,7 +127,7 @@ bool rtl88eu_phy_mac_config(struct adapter *adapt)
ptrarray = array_MAC_REG_8188E;
for (i = 0; i < arraylength; i = i + 2)
- usb_write8(adapt, ptrarray[i], (u8) ptrarray[i + 1]);
+ usb_write8(adapt, ptrarray[i], (u8)ptrarray[i + 1]);
usb_write8(adapt, REG_MAX_AGGR_NUM, MAX_AGGR_NUM);
return true;
diff --git a/drivers/staging/rtl8188eu/hal/odm.c b/drivers/staging/rtl8188eu/hal/odm.c
index e4df83710ca..9873998011d 100644
--- a/drivers/staging/rtl8188eu/hal/odm.c
+++ b/drivers/staging/rtl8188eu/hal/odm.c
@@ -437,8 +437,8 @@ void odm_CommonInfoSelfInit(struct odm_dm_struct *pDM_Odm)
{
struct adapter *adapter = pDM_Odm->Adapter;
- pDM_Odm->bCckHighPower = (bool) phy_query_bb_reg(adapter, 0x824, BIT9);
- pDM_Odm->RFPathRxEnable = (u8) phy_query_bb_reg(adapter, 0xc04, 0x0F);
+ pDM_Odm->bCckHighPower = (bool)phy_query_bb_reg(adapter, 0x824, BIT9);
+ pDM_Odm->RFPathRxEnable = (u8)phy_query_bb_reg(adapter, 0xc04, 0x0F);
ODM_InitDebugSetting(pDM_Odm);
}
@@ -529,7 +529,7 @@ void odm_DIGInit(struct odm_dm_struct *pDM_Odm)
struct adapter *adapter = pDM_Odm->Adapter;
struct rtw_dig *pDM_DigTable = &pDM_Odm->DM_DigTable;
- pDM_DigTable->CurIGValue = (u8) phy_query_bb_reg(adapter, ODM_REG_IGI_A_11N, ODM_BIT_IGI_11N);
+ pDM_DigTable->CurIGValue = (u8)phy_query_bb_reg(adapter, ODM_REG_IGI_A_11N, ODM_BIT_IGI_11N);
pDM_DigTable->RssiLowThresh = DM_DIG_THRESH_LOW;
pDM_DigTable->RssiHighThresh = DM_DIG_THRESH_HIGH;
pDM_DigTable->FALowThresh = DM_false_ALARM_THRESH_LOW;
@@ -620,7 +620,7 @@ void odm_DIG(struct odm_dm_struct *pDM_Odm)
} else if (pDM_Odm->SupportAbility & ODM_BB_ANT_DIV) {
/* 1 Lower Bound for 88E AntDiv */
if (pDM_Odm->AntDivType == CG_TRX_HW_ANTDIV) {
- DIG_Dynamic_MIN = (u8) pDM_DigTable->AntDiv_RSSI_max;
+ DIG_Dynamic_MIN = (u8)pDM_DigTable->AntDiv_RSSI_max;
ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD,
("odm_DIG(): pDM_DigTable->AntDiv_RSSI_max=%d\n",
pDM_DigTable->AntDiv_RSSI_max));
diff --git a/drivers/staging/rtl8188eu/hal/odm_HWConfig.c b/drivers/staging/rtl8188eu/hal/odm_HWConfig.c
index 4e4e21936e7..29f87dffbad 100644
--- a/drivers/staging/rtl8188eu/hal/odm_HWConfig.c
+++ b/drivers/staging/rtl8188eu/hal/odm_HWConfig.c
@@ -118,7 +118,7 @@ static void odm_RxPhyStatus92CSeries_Parsing(struct odm_dm_struct *dm_odm,
cck_highpwr = dm_odm->bCckHighPower;
- cck_agc_rpt = pPhyStaRpt->cck_agc_rpt_ofdm_cfosho_a ;
+ cck_agc_rpt = pPhyStaRpt->cck_agc_rpt_ofdm_cfosho_a;
/* 2011.11.28 LukeLee: 88E use different LNA & VGA gain table */
/* The RSSI formula should be modified according to the gain table */
diff --git a/drivers/staging/rtl8188eu/hal/odm_RTL8188E.c b/drivers/staging/rtl8188eu/hal/odm_RTL8188E.c
index 5342af778eb..d3c6873925b 100644
--- a/drivers/staging/rtl8188eu/hal/odm_RTL8188E.c
+++ b/drivers/staging/rtl8188eu/hal/odm_RTL8188E.c
@@ -278,7 +278,7 @@ static void rtl88eu_dm_hw_ant_div(struct odm_dm_struct *dm_odm)
struct rtw_dig *dig_table = &dm_odm->DM_DigTable;
struct sta_info *entry;
u32 i, min_rssi = 0xFF, ant_div_max_rssi = 0, max_rssi = 0;
- u32 local_min_rssi,local_max_rssi;
+ u32 local_min_rssi, local_max_rssi;
u32 main_rssi, aux_rssi;
u8 RxIdleAnt = 0, target_ant = 7;
diff --git a/drivers/staging/rtl8188eu/hal/phy.c b/drivers/staging/rtl8188eu/hal/phy.c
index c4f7f358a81..3f663fe151b 100644
--- a/drivers/staging/rtl8188eu/hal/phy.c
+++ b/drivers/staging/rtl8188eu/hal/phy.c
@@ -478,7 +478,7 @@ void rtl88eu_dm_txpower_tracking_callback_thermalmeter(struct adapter *adapt)
/* 2.4G, decrease power */
{0, 0, 2, 3, 4, 4, 5, 6, 7, 7, 8, 9, 10, 10, 11},
/* 2.4G, increase power */
- {0, 0, -1, -2, -3, -4,-4, -4, -4, -5, -7, -8,-9, -9, -10},
+ {0, 0, -1, -2, -3, -4, -4, -4, -4, -5, -7, -8, -9, -9, -10},
};
u8 thermal_mapping[2][index_mapping_NUM_88E] = {
/* 2.4G, decrease power */
diff --git a/drivers/staging/rtl8188eu/hal/rf.c b/drivers/staging/rtl8188eu/hal/rf.c
index c2fac34c813..eea4c8a6022 100644
--- a/drivers/staging/rtl8188eu/hal/rf.c
+++ b/drivers/staging/rtl8188eu/hal/rf.c
@@ -131,7 +131,7 @@ void rtl88eu_phy_rf6052_set_cck_txpower(struct adapter *adapt, u8 *powerlevel)
/* powerbase1 for HT MCS rates */
static void getpowerbase88e(struct adapter *adapt, u8 *pwr_level_ofdm,
u8 *pwr_level_bw20, u8 *pwr_level_bw40,
- u8 channel,u32 *ofdmbase, u32 *mcs_base)
+ u8 channel, u32 *ofdmbase, u32 *mcs_base)
{
struct hal_data_8188e *hal_data = GET_HAL_DATA(adapt);
u32 powerbase0, powerbase1;
diff --git a/drivers/staging/rtl8188eu/hal/rf_cfg.c b/drivers/staging/rtl8188eu/hal/rf_cfg.c
index ddc2f55fe13..5dc11cae2ef 100644
--- a/drivers/staging/rtl8188eu/hal/rf_cfg.c
+++ b/drivers/staging/rtl8188eu/hal/rf_cfg.c
@@ -164,7 +164,7 @@ do { \
#define B3WIREDATALENGTH 0x800
#define BRFSI_RFENV 0x10
-static void rtl_rfreg_delay(struct adapter *adapt, enum rf_radio_path rfpath,u32 addr, u32 mask, u32 data)
+static void rtl_rfreg_delay(struct adapter *adapt, enum rf_radio_path rfpath, u32 addr, u32 mask, u32 data)
{
if (addr == 0xfe) {
mdelay(50);
@@ -190,7 +190,7 @@ static void rtl8188e_config_rf_reg(struct adapter *adapt,
u32 content = 0x1000; /*RF Content: radio_a_txt*/
u32 maskforphyset = (u32)(content & 0xE000);
- rtl_rfreg_delay(adapt, RF90_PATH_A, addr| maskforphyset,
+ rtl_rfreg_delay(adapt, RF90_PATH_A, addr | maskforphyset,
RFREG_OFFSET_MASK,
data);
}
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c b/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c
index 023a3d84ee8..7f30dea1b53 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c
@@ -150,11 +150,9 @@ u8 rtl8188e_set_raid_cmd(struct adapter *adapt, u32 mask)
struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
if (haldata->fw_ractrl) {
- __le32 lmask;
memset(buf, 0, 3);
- lmask = cpu_to_le32(mask);
- memcpy(buf, &lmask, 3);
+ put_unaligned_le32(mask, buf);
FillH2CCmd_88E(adapt, H2C_DM_MACID_CFG, 3, buf);
} else {
@@ -254,7 +252,7 @@ void rtl8188e_set_FwMediaStatus_cmd(struct adapter *adapt, __le16 mstatus_rpt)
{
u8 opmode, macid;
u16 mst_rpt = le16_to_cpu(mstatus_rpt);
- opmode = (u8) mst_rpt;
+ opmode = (u8)mst_rpt;
macid = (u8)(mst_rpt >> 8);
DBG_88E("### %s: MStatus=%x MACID=%d\n", __func__, opmode, macid);
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c b/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c
index dab4c337a86..01566210bbd 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c
@@ -155,6 +155,8 @@ void rtl8188e_HalDmWatchDog(struct adapter *Adapter)
bool fw_ps_awake = true;
u8 hw_init_completed = false;
struct hal_data_8188e *hal_data = GET_HAL_DATA(Adapter);
+ struct mlme_priv *pmlmepriv = NULL;
+ u8 bLinked = false;
hw_init_completed = Adapter->hw_init_completed;
@@ -170,22 +172,20 @@ void rtl8188e_HalDmWatchDog(struct adapter *Adapter)
fw_ps_awake = false;
/* ODM */
- if (hw_init_completed) {
- struct mlme_priv *pmlmepriv = &Adapter->mlmepriv;
- u8 bLinked = false;
-
- if ((check_fwstate(pmlmepriv, WIFI_AP_STATE)) ||
- (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE | WIFI_ADHOC_MASTER_STATE))) {
- if (Adapter->stapriv.asoc_sta_count > 2)
- bLinked = true;
- } else {/* Station mode */
- if (check_fwstate(pmlmepriv, _FW_LINKED))
- bLinked = true;
- }
-
- ODM_CmnInfoUpdate(&hal_data->odmpriv, ODM_CMNINFO_LINK, bLinked);
- ODM_DMWatchdog(&hal_data->odmpriv);
+ pmlmepriv = &Adapter->mlmepriv;
+
+ if ((check_fwstate(pmlmepriv, WIFI_AP_STATE)) ||
+ (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE |
+ WIFI_ADHOC_MASTER_STATE))) {
+ if (Adapter->stapriv.asoc_sta_count > 2)
+ bLinked = true;
+ } else {/* Station mode */
+ if (check_fwstate(pmlmepriv, _FW_LINKED))
+ bLinked = true;
}
+
+ ODM_CmnInfoUpdate(&hal_data->odmpriv, ODM_CMNINFO_LINK, bLinked);
+ ODM_DMWatchdog(&hal_data->odmpriv);
skip_dm:
/* Check GPIO to determine current RF on/off and Pbc status. */
/* Check Hardware Radio ON/OFF or not */
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c b/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c
index d6fe5e6aa4f..7d460eaafa3 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c
@@ -181,7 +181,8 @@ static void rtl8188e_SetHalODMVar(struct adapter *Adapter, enum hal_odm_variable
switch (eVariable) {
case HAL_ODM_STA_INFO:
{
- struct sta_info *psta = (struct sta_info *)pValue1;
+ struct sta_info *psta = pValue1;
+
if (bSet) {
DBG_88E("### Set STA_(%d) info\n", psta->mac_id);
ODM_CmnInfoPtrArrayHook(podmpriv, ODM_CMNINFO_STA_STATUS, psta->mac_id, psta);
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_xmit.c b/drivers/staging/rtl8188eu/hal/rtl8188e_xmit.c
index 7a4f754d86d..a6ba53b488e 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_xmit.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_xmit.c
@@ -25,7 +25,7 @@
void dump_txrpt_ccx_88e(void *buf)
{
- struct txrpt_ccx_88e *txrpt_ccx = (struct txrpt_ccx_88e *)buf;
+ struct txrpt_ccx_88e *txrpt_ccx = buf;
DBG_88E("%s:\n"
"tag1:%u, pkt_num:%u, txdma_underflow:%u, int_bt:%u, int_tri:%u, int_ccx:%u\n"
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c b/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c
index be9eede6931..594c1da9db2 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c
@@ -399,7 +399,7 @@ static s32 rtw_dump_xframe(struct adapter *adapt, struct xmit_frame *pxmitframe)
mem_addr += w_sz;
- mem_addr = (u8 *) round_up((size_t)mem_addr, 4);
+ mem_addr = (u8 *)round_up((size_t)mem_addr, 4);
}
rtw_free_xmitframe(pxmitpriv, pxmitframe);
diff --git a/drivers/staging/rtl8188eu/hal/usb_halinit.c b/drivers/staging/rtl8188eu/hal/usb_halinit.c
index caf2ca3a47e..14650e91c78 100644
--- a/drivers/staging/rtl8188eu/hal/usb_halinit.c
+++ b/drivers/staging/rtl8188eu/hal/usb_halinit.c
@@ -1673,7 +1673,7 @@ static void SetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
pRegToSet = RegToSet_Normal; /* 0xb972a841; */
FactorToSet = *((u8 *)val);
if (FactorToSet <= 3) {
- FactorToSet = (1<<(FactorToSet + 2));
+ FactorToSet = 1 << (FactorToSet + 2);
if (FactorToSet > 0xf)
FactorToSet = 0xf;
@@ -2012,7 +2012,7 @@ static u8 SetHalDefVar8188EUsb(struct adapter *Adapter, enum hal_def_variable eV
u8 bRSSIDump = *((u8 *)pValue);
struct odm_dm_struct *dm_ocm = &(haldata->odmpriv);
if (bRSSIDump)
- dm_ocm->DebugComponents = ODM_COMP_DIG|ODM_COMP_FA_CNT ;
+ dm_ocm->DebugComponents = ODM_COMP_DIG|ODM_COMP_FA_CNT;
else
dm_ocm->DebugComponents = 0;
}
diff --git a/drivers/staging/rtl8188eu/include/hal_intf.h b/drivers/staging/rtl8188eu/include/hal_intf.h
index 9191993dd3f..3b476d80f64 100644
--- a/drivers/staging/rtl8188eu/include/hal_intf.h
+++ b/drivers/staging/rtl8188eu/include/hal_intf.h
@@ -304,8 +304,6 @@ void rtw_hal_write_rfreg(struct adapter *padapter,
enum rf_radio_path eRFPath, u32 RegAddr,
u32 BitMask, u32 Data);
-s32 rtw_hal_interrupt_handler(struct adapter *padapter);
-
void rtw_hal_set_bwmode(struct adapter *padapter,
enum ht_channel_width Bandwidth, u8 Offset);
void rtw_hal_set_chan(struct adapter *padapter, u8 channel);
@@ -317,7 +315,6 @@ void rtw_hal_antdiv_rssi_compared(struct adapter *padapter,
struct wlan_bssid_ex *src);
void rtw_hal_sreset_init(struct adapter *padapter);
-u8 rtw_hal_sreset_get_wifi_status(struct adapter *padapter);
void rtw_hal_notch_filter(struct adapter *adapter, bool enable);
void rtw_hal_reset_security_engine(struct adapter *adapter);
diff --git a/drivers/staging/rtl8188eu/include/ieee80211_ext.h b/drivers/staging/rtl8188eu/include/ieee80211_ext.h
index 1052d1817a9..15e53d380ad 100644
--- a/drivers/staging/rtl8188eu/include/ieee80211_ext.h
+++ b/drivers/staging/rtl8188eu/include/ieee80211_ext.h
@@ -103,24 +103,24 @@ struct wme_parameter_element {
#define WPA_PUT_LE16(a, val) \
do { \
- (a)[1] = ((u16) (val)) >> 8; \
- (a)[0] = ((u16) (val)) & 0xff; \
+ (a)[1] = ((u16)(val)) >> 8; \
+ (a)[0] = ((u16)(val)) & 0xff; \
} while (0)
#define WPA_PUT_BE32(a, val) \
do { \
- (a)[0] = (u8) ((((u32) (val)) >> 24) & 0xff); \
- (a)[1] = (u8) ((((u32) (val)) >> 16) & 0xff); \
- (a)[2] = (u8) ((((u32) (val)) >> 8) & 0xff); \
- (a)[3] = (u8) (((u32) (val)) & 0xff); \
+ (a)[0] = (u8)((((u32) (val)) >> 24) & 0xff); \
+ (a)[1] = (u8)((((u32) (val)) >> 16) & 0xff); \
+ (a)[2] = (u8)((((u32) (val)) >> 8) & 0xff); \
+ (a)[3] = (u8)(((u32) (val)) & 0xff); \
} while (0)
#define WPA_PUT_LE32(a, val) \
do { \
- (a)[3] = (u8) ((((u32) (val)) >> 24) & 0xff); \
- (a)[2] = (u8) ((((u32) (val)) >> 16) & 0xff); \
- (a)[1] = (u8) ((((u32) (val)) >> 8) & 0xff); \
- (a)[0] = (u8) (((u32) (val)) & 0xff); \
+ (a)[3] = (u8)((((u32) (val)) >> 24) & 0xff); \
+ (a)[2] = (u8)((((u32) (val)) >> 16) & 0xff); \
+ (a)[1] = (u8)((((u32) (val)) >> 8) & 0xff); \
+ (a)[0] = (u8)(((u32) (val)) & 0xff); \
} while (0)
#define RSN_SELECTOR_PUT(a, val) WPA_PUT_BE32((u8 *)(a), (val))
diff --git a/drivers/staging/rtl8188eu/include/odm_debug.h b/drivers/staging/rtl8188eu/include/odm_debug.h
index db7b44e16c4..914f831a5b7 100644
--- a/drivers/staging/rtl8188eu/include/odm_debug.h
+++ b/drivers/staging/rtl8188eu/include/odm_debug.h
@@ -83,9 +83,8 @@
#define ODM_COMP_INIT BIT31
/*------------------------Export Marco Definition---------------------------*/
-#define DbgPrint pr_info
#define RT_PRINTK(fmt, args...) \
- DbgPrint("%s(): " fmt, __func__, ## args);
+ pr_info("%s(): " fmt, __func__, ## args);
#ifndef ASSERT
#define ASSERT(expr)
@@ -94,40 +93,18 @@
#define ODM_RT_TRACE(pDM_Odm, comp, level, fmt) \
if (((comp) & pDM_Odm->DebugComponents) && \
(level <= pDM_Odm->DebugLevel)) { \
- DbgPrint("[ODM-8188E] "); \
- RT_PRINTK fmt; \
- }
-
-#define ODM_RT_TRACE_F(pDM_Odm, comp, level, fmt) \
- if (((comp) & pDM_Odm->DebugComponents) && \
- (level <= pDM_Odm->DebugLevel)) { \
+ pr_info("[ODM-8188E] "); \
RT_PRINTK fmt; \
}
#define ODM_RT_ASSERT(pDM_Odm, expr, fmt) \
if (!(expr)) { \
- DbgPrint("Assertion failed! %s at ......\n", #expr); \
- DbgPrint(" ......%s,%s,line=%d\n", __FILE__, \
+ pr_info("Assertion failed! %s at ......\n", #expr); \
+ pr_info(" ......%s,%s,line=%d\n", __FILE__, \
__func__, __LINE__); \
RT_PRINTK fmt; \
ASSERT(false); \
}
-#define ODM_dbg_enter() { DbgPrint("==> %s\n", __func__); }
-#define ODM_dbg_exit() { DbgPrint("<== %s\n", __func__); }
-#define ODM_dbg_trace(str) { DbgPrint("%s:%s\n", __func__, str); }
-
-#define ODM_PRINT_ADDR(pDM_Odm, comp, level, title_str, ptr) \
- if (((comp) & pDM_Odm->DebugComponents) && \
- (level <= pDM_Odm->DebugLevel)) { \
- int __i; \
- u8 *__ptr = (u8 *)ptr; \
- DbgPrint("[ODM] "); \
- DbgPrint(title_str); \
- DbgPrint(" "); \
- for (__i = 0; __i < 6; __i++) \
- DbgPrint("%02X%s", __ptr[__i], (__i == 5)?"":"-");\
- DbgPrint("\n"); \
- }
void ODM_InitDebugSetting(struct odm_dm_struct *pDM_Odm);
diff --git a/drivers/staging/rtl8188eu/include/osdep_service.h b/drivers/staging/rtl8188eu/include/osdep_service.h
index fed9c86890b..82f58f87656 100644
--- a/drivers/staging/rtl8188eu/include/osdep_service.h
+++ b/drivers/staging/rtl8188eu/include/osdep_service.h
@@ -182,8 +182,8 @@ u64 rtw_modular64(u64 x, u64 y);
/* Macros for handling unaligned memory accesses */
-#define RTW_GET_BE24(a) ((((u32) (a)[0]) << 16) | (((u32) (a)[1]) << 8) | \
- ((u32) (a)[2]))
+#define RTW_GET_BE24(a) ((((u32)(a)[0]) << 16) | (((u32) (a)[1]) << 8) | \
+ ((u32)(a)[2]))
void rtw_buf_free(u8 **buf, u32 *buf_len);
void rtw_buf_update(u8 **buf, u32 *buf_len, u8 *src, u32 src_len);
diff --git a/drivers/staging/rtl8188eu/include/rtw_debug.h b/drivers/staging/rtl8188eu/include/rtw_debug.h
index a38616e3cad..971bf457f32 100644
--- a/drivers/staging/rtl8188eu/include/rtw_debug.h
+++ b/drivers/staging/rtl8188eu/include/rtw_debug.h
@@ -106,7 +106,7 @@ extern u32 GlobalDebugLevel;
u8 *ptr = (u8 *)_hexdata; \
pr_info("%s", DRIVER_PREFIX); \
pr_info(_titlestring); \
- for (__i = 0; __i < (int)_hexdatalen; __i++ ) { \
+ for (__i = 0; __i < (int)_hexdatalen; __i++) { \
pr_info("%02X%s", ptr[__i], \
(((__i + 1) % 4) == 0) ? \
" " : " "); \
diff --git a/drivers/staging/rtl8188eu/include/rtw_led.h b/drivers/staging/rtl8188eu/include/rtw_led.h
index c5194b620da..23f0cfe312f 100644
--- a/drivers/staging/rtl8188eu/include/rtw_led.h
+++ b/drivers/staging/rtl8188eu/include/rtw_led.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
*
******************************************************************************/
#ifndef __RTW_LED_H_
@@ -27,7 +23,7 @@
#define LED_BLINK_LINK_INTERVAL_ALPHA 500 /* 500 */
#define LED_BLINK_SCAN_INTERVAL_ALPHA 180 /* 150 */
#define LED_BLINK_FASTER_INTERVAL_ALPHA 50
-#define LED_BLINK_WPS_SUCESS_INTERVAL_ALPHA 5000
+#define LED_BLINK_WPS_SUCCESS_INTERVAL_ALPHA 5000
enum LED_CTL_MODE {
LED_CTL_POWER_ON,
@@ -92,7 +88,7 @@ struct LED_871x {
void LedControl8188eu(struct adapter *padapter, enum LED_CTL_MODE LedAction);
-struct led_priv{
+struct led_priv {
/* add for led control */
struct LED_871x SwLed0;
u8 bRegUseLed;
diff --git a/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h b/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
index d699ca19ef1..8d72ccf5f2a 100644
--- a/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
+++ b/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
@@ -448,7 +448,7 @@ struct mlme_ext_priv {
int init_mlme_ext_priv(struct adapter *adapter);
int init_hw_mlme_ext(struct adapter *padapter);
-void free_mlme_ext_priv (struct mlme_ext_priv *pmlmeext);
+void free_mlme_ext_priv(struct mlme_ext_priv *pmlmeext);
extern void init_mlme_ext_timer(struct adapter *padapter);
extern void init_addba_retry_timer(struct adapter *adapt, struct sta_info *sta);
extern struct xmit_frame *alloc_mgtxmitframe(struct xmit_priv *pxmitpriv);
@@ -646,8 +646,8 @@ void mlmeext_sta_add_event_callback(struct adapter *padapter,
void linked_status_chk(struct adapter *padapter);
-void survey_timer_hdl (void *function_context);
-void link_timer_hdl (void *funtion_context);
+void survey_timer_hdl(void *function_context);
+void link_timer_hdl(void *funtion_context);
void addba_timer_hdl(void *function_context);
#define set_survey_timer(mlmeext, ms) \
@@ -708,15 +708,15 @@ u8 tdls_hdl(struct adapter *padapter, unsigned char *pbuf);
#ifdef _RTW_CMD_C_
static struct cmd_hdl wlancmds[] = {
- GEN_MLME_EXT_HANDLER(sizeof (struct wlan_bssid_ex), join_cmd_hdl)
- GEN_MLME_EXT_HANDLER(sizeof (struct disconnect_parm), disconnect_hdl)
- GEN_MLME_EXT_HANDLER(sizeof (struct wlan_bssid_ex), createbss_hdl)
- GEN_MLME_EXT_HANDLER(sizeof (struct setopmode_parm), setopmode_hdl)
- GEN_MLME_EXT_HANDLER(sizeof (struct sitesurvey_parm), sitesurvey_cmd_hdl)
- GEN_MLME_EXT_HANDLER(sizeof (struct setauth_parm), setauth_hdl)
- GEN_MLME_EXT_HANDLER(sizeof (struct setkey_parm), setkey_hdl)
- GEN_MLME_EXT_HANDLER(sizeof (struct set_stakey_parm), set_stakey_hdl)
- GEN_MLME_EXT_HANDLER(sizeof (struct set_assocsta_parm), NULL)
+ GEN_MLME_EXT_HANDLER(sizeof(struct wlan_bssid_ex), join_cmd_hdl)
+ GEN_MLME_EXT_HANDLER(sizeof(struct disconnect_parm), disconnect_hdl)
+ GEN_MLME_EXT_HANDLER(sizeof(struct wlan_bssid_ex), createbss_hdl)
+ GEN_MLME_EXT_HANDLER(sizeof(struct setopmode_parm), setopmode_hdl)
+ GEN_MLME_EXT_HANDLER(sizeof(struct sitesurvey_parm), sitesurvey_cmd_hdl)
+ GEN_MLME_EXT_HANDLER(sizeof(struct setauth_parm), setauth_hdl)
+ GEN_MLME_EXT_HANDLER(sizeof(struct setkey_parm), setkey_hdl)
+ GEN_MLME_EXT_HANDLER(sizeof(struct set_stakey_parm), set_stakey_hdl)
+ GEN_MLME_EXT_HANDLER(sizeof(struct set_assocsta_parm), NULL)
GEN_MLME_EXT_HANDLER(sizeof(struct addBaReq_parm), add_ba_hdl)
GEN_MLME_EXT_HANDLER(sizeof(struct set_ch_parm), set_ch_hdl)
GEN_MLME_EXT_HANDLER(sizeof(struct wlan_bssid_ex), tx_beacon_hdl)
@@ -787,7 +787,7 @@ static struct fwevent wlanevents[] = {
{0, NULL},
{0, NULL},
{0, &rtw_survey_event_callback}, /*8*/
- {sizeof (struct surveydone_event), &rtw_surveydone_event_callback},/*9*/
+ {sizeof(struct surveydone_event), &rtw_surveydone_event_callback},/*9*/
{0, &rtw_joinbss_event_callback}, /*10*/
{sizeof(struct stassoc_event), &rtw_stassoc_event_callback},
{sizeof(struct stadel_event), &rtw_stadel_event_callback},
diff --git a/drivers/staging/rtl8188eu/include/wifi.h b/drivers/staging/rtl8188eu/include/wifi.h
index a88ebf41bba..8dbdfafd52b 100644
--- a/drivers/staging/rtl8188eu/include/wifi.h
+++ b/drivers/staging/rtl8188eu/include/wifi.h
@@ -766,27 +766,27 @@ enum ht_cap_ampdu_factor {
#define OP_MODE_20MHZ_HT_STA_ASSOCED 2
#define OP_MODE_MIXED 3
-#define HT_INFO_HT_PARAM_SECONDARY_CHNL_OFF_MASK ((u8) BIT(0) | BIT(1))
-#define HT_INFO_HT_PARAM_SECONDARY_CHNL_ABOVE ((u8) BIT(0))
-#define HT_INFO_HT_PARAM_SECONDARY_CHNL_BELOW ((u8) BIT(0) | BIT(1))
-#define HT_INFO_HT_PARAM_REC_TRANS_CHNL_WIDTH ((u8) BIT(2))
-#define HT_INFO_HT_PARAM_RIFS_MODE ((u8) BIT(3))
-#define HT_INFO_HT_PARAM_CTRL_ACCESS_ONLY ((u8) BIT(4))
-#define HT_INFO_HT_PARAM_SRV_INTERVAL_GRANULARITY ((u8) BIT(5))
+#define HT_INFO_HT_PARAM_SECONDARY_CHNL_OFF_MASK ((u8)BIT(0) | BIT(1))
+#define HT_INFO_HT_PARAM_SECONDARY_CHNL_ABOVE ((u8)BIT(0))
+#define HT_INFO_HT_PARAM_SECONDARY_CHNL_BELOW ((u8)BIT(0) | BIT(1))
+#define HT_INFO_HT_PARAM_REC_TRANS_CHNL_WIDTH ((u8)BIT(2))
+#define HT_INFO_HT_PARAM_RIFS_MODE ((u8)BIT(3))
+#define HT_INFO_HT_PARAM_CTRL_ACCESS_ONLY ((u8)BIT(4))
+#define HT_INFO_HT_PARAM_SRV_INTERVAL_GRANULARITY ((u8)BIT(5))
#define HT_INFO_OPERATION_MODE_OP_MODE_MASK \
- ((u16) (0x0001 | 0x0002))
+ ((u16)(0x0001 | 0x0002))
#define HT_INFO_OPERATION_MODE_OP_MODE_OFFSET 0
-#define HT_INFO_OPERATION_MODE_NON_GF_DEVS_PRESENT ((u8) BIT(2))
-#define HT_INFO_OPERATION_MODE_TRANSMIT_BURST_LIMIT ((u8) BIT(3))
-#define HT_INFO_OPERATION_MODE_NON_HT_STA_PRESENT ((u8) BIT(4))
-
-#define HT_INFO_STBC_PARAM_DUAL_BEACON ((u16) BIT(6))
-#define HT_INFO_STBC_PARAM_DUAL_STBC_PROTECT ((u16) BIT(7))
-#define HT_INFO_STBC_PARAM_SECONDARY_BC ((u16) BIT(8))
-#define HT_INFO_STBC_PARAM_LSIG_TXOP_PROTECT_ALLOWED ((u16) BIT(9))
-#define HT_INFO_STBC_PARAM_PCO_ACTIVE ((u16) BIT(10))
-#define HT_INFO_STBC_PARAM_PCO_PHASE ((u16) BIT(11))
+#define HT_INFO_OPERATION_MODE_NON_GF_DEVS_PRESENT ((u8)BIT(2))
+#define HT_INFO_OPERATION_MODE_TRANSMIT_BURST_LIMIT ((u8)BIT(3))
+#define HT_INFO_OPERATION_MODE_NON_HT_STA_PRESENT ((u8)BIT(4))
+
+#define HT_INFO_STBC_PARAM_DUAL_BEACON ((u16)BIT(6))
+#define HT_INFO_STBC_PARAM_DUAL_STBC_PROTECT ((u16)BIT(7))
+#define HT_INFO_STBC_PARAM_SECONDARY_BC ((u16)BIT(8))
+#define HT_INFO_STBC_PARAM_LSIG_TXOP_PROTECT_ALLOWED ((u16)BIT(9))
+#define HT_INFO_STBC_PARAM_PCO_ACTIVE ((u16)BIT(10))
+#define HT_INFO_STBC_PARAM_PCO_PHASE ((u16)BIT(11))
/* ===============WPS Section=============== */
/* For WPSv1.0 */
diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
index d598fec4abb..24a8f5ac96e 100644
--- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
@@ -361,7 +361,7 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param,
param->u.crypt.err = 0;
param->u.crypt.alg[IEEE_CRYPT_ALG_NAME_LEN - 1] = '\0';
- if (param_len < (u32) ((u8 *)param->u.crypt.key - (u8 *)param) + param->u.crypt.key_len) {
+ if (param_len < (u32)((u8 *)param->u.crypt.key - (u8 *)param) + param->u.crypt.key_len) {
ret = -EINVAL;
goto exit;
}
@@ -512,14 +512,12 @@ static int rtw_set_wpa_ie(struct adapter *padapter, char *pie, unsigned short ie
}
if (ielen) {
- buf = kzalloc(ielen, GFP_KERNEL);
+ buf = kmemdup(pie, ielen, GFP_KERNEL);
if (buf == NULL) {
ret = -ENOMEM;
goto exit;
}
- memcpy(buf, pie, ielen);
-
/* dump */
{
int i;
@@ -1136,7 +1134,8 @@ static int rtw_wx_set_scan(struct net_device *dev, struct iw_request_info *a,
struct iw_scan_req *req = (struct iw_scan_req *)extra;
if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
- int len = min((int)req->essid_len, IW_ESSID_MAX_SIZE);
+ int len = min_t(int, req->essid_len,
+ IW_ESSID_MAX_SIZE);
memcpy(ssid[0].Ssid, req->essid, len);
ssid[0].SsidLength = len;
@@ -1417,7 +1416,7 @@ static int rtw_wx_set_rate(struct net_device *dev,
struct iw_request_info *a,
union iwreq_data *wrqu, char *extra)
{
- int i, ret = 0;
+ int i;
u8 datarates[NumRates];
u32 target_rate = wrqu->bitrate.value;
u32 fixed = wrqu->bitrate.fixed;
@@ -1490,7 +1489,7 @@ set_rate:
RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_, ("datarate_inx =%d\n", datarates[i]));
}
- return ret;
+ return 0;
}
static int rtw_wx_get_rate(struct net_device *dev,
@@ -2699,10 +2698,8 @@ static int rtw_set_wps_beacon(struct net_device *dev, struct ieee_param *param,
ie_len = len-12-2;/* 12 = param header, 2:no packed */
- if (pmlmepriv->wps_beacon_ie) {
- kfree(pmlmepriv->wps_beacon_ie);
- pmlmepriv->wps_beacon_ie = NULL;
- }
+ kfree(pmlmepriv->wps_beacon_ie);
+ pmlmepriv->wps_beacon_ie = NULL;
if (ie_len > 0) {
pmlmepriv->wps_beacon_ie = rtw_malloc(ie_len);
@@ -2736,10 +2733,8 @@ static int rtw_set_wps_probe_resp(struct net_device *dev, struct ieee_param *par
ie_len = len-12-2;/* 12 = param header, 2:no packed */
- if (pmlmepriv->wps_probe_resp_ie) {
- kfree(pmlmepriv->wps_probe_resp_ie);
- pmlmepriv->wps_probe_resp_ie = NULL;
- }
+ kfree(pmlmepriv->wps_probe_resp_ie);
+ pmlmepriv->wps_probe_resp_ie = NULL;
if (ie_len > 0) {
pmlmepriv->wps_probe_resp_ie = rtw_malloc(ie_len);
@@ -2768,10 +2763,8 @@ static int rtw_set_wps_assoc_resp(struct net_device *dev, struct ieee_param *par
ie_len = len-12-2;/* 12 = param header, 2:no packed */
- if (pmlmepriv->wps_assoc_resp_ie) {
- kfree(pmlmepriv->wps_assoc_resp_ie);
- pmlmepriv->wps_assoc_resp_ie = NULL;
- }
+ kfree(pmlmepriv->wps_assoc_resp_ie);
+ pmlmepriv->wps_assoc_resp_ie = NULL;
if (ie_len > 0) {
pmlmepriv->wps_assoc_resp_ie = rtw_malloc(ie_len);
diff --git a/drivers/staging/rtl8188eu/os_dep/os_intfs.c b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
index 08a80f759b8..88a909c9e45 100644
--- a/drivers/staging/rtl8188eu/os_dep/os_intfs.c
+++ b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
@@ -512,7 +512,6 @@ void rtw_proc_remove_one(struct net_device *dev)
static uint loadparam(struct adapter *padapter, struct net_device *pnetdev)
{
- uint status = _SUCCESS;
struct registry_priv *registry_par = &padapter->registrypriv;
@@ -527,7 +526,7 @@ static uint loadparam(struct adapter *padapter, struct net_device *pnetdev)
registry_par->channel = (u8)rtw_channel;
registry_par->wireless_mode = (u8)rtw_wireless_mode;
- registry_par->vrtl_carrier_sense = (u8)rtw_vrtl_carrier_sense ;
+ registry_par->vrtl_carrier_sense = (u8)rtw_vrtl_carrier_sense;
registry_par->vcs_type = (u8)rtw_vcs_type;
registry_par->rts_thresh = (u16)rtw_rts_thresh;
registry_par->frag_thresh = (u16)rtw_frag_thresh;
@@ -582,7 +581,7 @@ static uint loadparam(struct adapter *padapter, struct net_device *pnetdev)
snprintf(registry_par->ifname, 16, "%s", ifname);
snprintf(registry_par->if2name, 16, "%s", if2name);
registry_par->notch_filter = (u8)rtw_notch_filter;
- return status;
+ return _SUCCESS;
}
static int rtw_net_set_mac_address(struct net_device *pnetdev, void *p)
@@ -760,7 +759,6 @@ void rtw_stop_drv_threads(struct adapter *padapter)
static u8 rtw_init_default_value(struct adapter *padapter)
{
- u8 ret = _SUCCESS;
struct registry_priv *pregistrypriv = &padapter->registrypriv;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
@@ -803,12 +801,11 @@ static u8 rtw_init_default_value(struct adapter *padapter)
padapter->bWritePortCancel = false;
padapter->bRxRSSIDisplay = 0;
padapter->bNotifyChannelChange = 0;
- return ret;
+ return _SUCCESS;
}
u8 rtw_reset_drv_sw(struct adapter *padapter)
{
- u8 ret8 = _SUCCESS;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct pwrctrl_priv *pwrctrlpriv = &padapter->pwrctrlpriv;
@@ -833,7 +830,7 @@ u8 rtw_reset_drv_sw(struct adapter *padapter)
rtw_set_signal_stat_timer(&padapter->recvpriv);
- return ret8;
+ return _SUCCESS;
}
u8 rtw_init_drv_sw(struct adapter *padapter)
diff --git a/drivers/staging/rtl8188eu/os_dep/osdep_service.c b/drivers/staging/rtl8188eu/os_dep/osdep_service.c
index 8af4a8d24cc..abcb3a8589e 100644
--- a/drivers/staging/rtl8188eu/os_dep/osdep_service.c
+++ b/drivers/staging/rtl8188eu/os_dep/osdep_service.c
@@ -51,7 +51,7 @@ void *rtw_malloc2d(int h, int w, int size)
{
int j;
- void **a = (void **)kzalloc(h*sizeof(void *) + h*w*size, GFP_KERNEL);
+ void **a = kzalloc(h*sizeof(void *) + h*w*size, GFP_KERNEL);
if (a == NULL) {
pr_info("%s: alloc memory fail!\n", __func__);
return NULL;
diff --git a/drivers/staging/rtl8188eu/os_dep/rtw_android.c b/drivers/staging/rtl8188eu/os_dep/rtw_android.c
index d9d55d12fd5..99ce077007f 100644
--- a/drivers/staging/rtl8188eu/os_dep/rtw_android.c
+++ b/drivers/staging/rtl8188eu/os_dep/rtw_android.c
@@ -148,36 +148,21 @@ static int rtw_android_set_block(struct net_device *net, char *command,
int rtw_android_priv_cmd(struct net_device *net, struct ifreq *ifr, int cmd)
{
int ret = 0;
- char *command = NULL;
+ char *command;
int cmd_num;
int bytes_written = 0;
struct android_wifi_priv_cmd priv_cmd;
- if (!ifr->ifr_data) {
- ret = -EINVAL;
- goto exit;
- }
- if (copy_from_user(&priv_cmd, ifr->ifr_data,
- sizeof(struct android_wifi_priv_cmd))) {
- ret = -EFAULT;
- goto exit;
- }
- command = kmalloc(priv_cmd.total_len, GFP_KERNEL);
- if (!command) {
- DBG_88E("%s: failed to allocate memory\n", __func__);
- ret = -ENOMEM;
- goto exit;
- }
- if (!access_ok(VERIFY_READ, priv_cmd.buf, priv_cmd.total_len)) {
- DBG_88E("%s: failed to access memory\n", __func__);
- ret = -EFAULT;
- goto exit;
- }
- if (copy_from_user(command, (char __user *)priv_cmd.buf,
- priv_cmd.total_len)) {
- ret = -EFAULT;
- goto exit;
- }
+ if (!ifr->ifr_data)
+ return -EINVAL;
+ if (copy_from_user(&priv_cmd, ifr->ifr_data, sizeof(priv_cmd)))
+ return -EFAULT;
+ if (priv_cmd.total_len < 1)
+ return -EINVAL;
+ command = memdup_user(priv_cmd.buf, priv_cmd.total_len);
+ if (IS_ERR(command))
+ return PTR_ERR(command);
+ command[priv_cmd.total_len - 1] = 0;
DBG_88E("%s: Android private cmd \"%s\" on %s\n",
__func__, command, ifr->ifr_name);
cmd_num = rtw_android_cmdstr_to_num(command);
@@ -191,7 +176,7 @@ int rtw_android_priv_cmd(struct net_device *net, struct ifreq *ifr, int cmd)
DBG_88E("%s: Ignore private cmd \"%s\" - iface %s is down\n",
__func__, command, ifr->ifr_name);
ret = 0;
- goto exit;
+ goto free;
}
switch (cmd_num) {
case ANDROID_WIFI_CMD_STOP:
@@ -279,7 +264,7 @@ response:
} else {
ret = bytes_written;
}
-exit:
+free:
kfree(command);
return ret;
}
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
index 2f87150a21b..bee39c2278f 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
@@ -63,7 +63,6 @@ static struct dvobj_priv *usb_dvobj_init(struct usb_interface *usb_intf)
struct usb_config_descriptor *pconf_desc;
struct usb_host_interface *phost_iface;
struct usb_interface_descriptor *piface_desc;
- struct usb_host_endpoint *phost_endp;
struct usb_endpoint_descriptor *pendp_desc;
struct usb_device *pusbd;
@@ -92,24 +91,22 @@ static struct dvobj_priv *usb_dvobj_init(struct usb_interface *usb_intf)
for (i = 0; i < pdvobjpriv->nr_endpoint; i++) {
int ep_num;
- phost_endp = phost_iface->endpoint + i;
-
- if (phost_endp) {
- pendp_desc = &phost_endp->desc;
- ep_num = usb_endpoint_num(pendp_desc);
-
- if (usb_endpoint_is_bulk_in(pendp_desc)) {
- pdvobjpriv->RtInPipe[pdvobjpriv->RtNumInPipes] = ep_num;
- pdvobjpriv->RtNumInPipes++;
- } else if (usb_endpoint_is_int_in(pendp_desc)) {
- pdvobjpriv->RtInPipe[pdvobjpriv->RtNumInPipes] = ep_num;
- pdvobjpriv->RtNumInPipes++;
- } else if (usb_endpoint_is_bulk_out(pendp_desc)) {
- pdvobjpriv->RtOutPipe[pdvobjpriv->RtNumOutPipes] = ep_num;
- pdvobjpriv->RtNumOutPipes++;
- }
- pdvobjpriv->ep_num[i] = ep_num;
+ pendp_desc = &phost_iface->endpoint[i].desc;
+
+ ep_num = usb_endpoint_num(pendp_desc);
+
+ if (usb_endpoint_is_bulk_in(pendp_desc)) {
+ pdvobjpriv->RtInPipe[pdvobjpriv->RtNumInPipes] = ep_num;
+ pdvobjpriv->RtNumInPipes++;
+ } else if (usb_endpoint_is_int_in(pendp_desc)) {
+ pdvobjpriv->RtInPipe[pdvobjpriv->RtNumInPipes] = ep_num;
+ pdvobjpriv->RtNumInPipes++;
+ } else if (usb_endpoint_is_bulk_out(pendp_desc)) {
+ pdvobjpriv->RtOutPipe[pdvobjpriv->RtNumOutPipes] =
+ ep_num;
+ pdvobjpriv->RtNumOutPipes++;
}
+ pdvobjpriv->ep_num[i] = ep_num;
}
if (pusbd->speed == USB_SPEED_HIGH)
@@ -557,8 +554,6 @@ static void rtw_dev_remove(struct usb_interface *pusb_intf)
RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("-dev_remove()\n"));
DBG_88E("-r871xu_dev_remove, done\n");
-
- return;
}
static struct usb_driver rtl8188e_usb_drv = {
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c b/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
index ba1e178fb51..d2efa9dfc8c 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
@@ -160,10 +160,10 @@ static int recvbuf2recvframe(struct adapter *adapt, struct sk_buff *pskb)
switch (haldata->UsbRxAggMode) {
case USB_RX_AGG_DMA:
case USB_RX_AGG_MIX:
- pkt_offset = (u16) round_up(pkt_offset, 128);
+ pkt_offset = (u16)round_up(pkt_offset, 128);
break;
case USB_RX_AGG_USB:
- pkt_offset = (u16) round_up(pkt_offset, 4);
+ pkt_offset = (u16)round_up(pkt_offset, 4);
break;
case USB_RX_AGG_DISABLE:
default:
@@ -843,7 +843,7 @@ void usb_write_port_cancel(struct adapter *padapter)
void rtl8188eu_recv_tasklet(void *priv)
{
struct sk_buff *pskb;
- struct adapter *adapt = (struct adapter *)priv;
+ struct adapter *adapt = priv;
struct recv_priv *precvpriv = &adapt->recvpriv;
while (NULL != (pskb = skb_dequeue(&precvpriv->rx_skb_queue))) {
@@ -862,7 +862,7 @@ void rtl8188eu_recv_tasklet(void *priv)
void rtl8188eu_xmit_tasklet(void *priv)
{
int ret = false;
- struct adapter *adapt = (struct adapter *)priv;
+ struct adapter *adapt = priv;
struct xmit_priv *pxmitpriv = &adapt->xmitpriv;
if (check_fwstate(&adapt->mlmepriv, _FW_UNDER_SURVEY))
diff --git a/drivers/staging/rtl8188eu/os_dep/xmit_linux.c b/drivers/staging/rtl8188eu/os_dep/xmit_linux.c
index 0ce47b07ef8..5acf9a9ddde 100644
--- a/drivers/staging/rtl8188eu/os_dep/xmit_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/xmit_linux.c
@@ -46,7 +46,7 @@ void _rtw_open_pktfile(struct sk_buff *pktptr, struct pkt_file *pfile)
}
-uint _rtw_pktfile_read (struct pkt_file *pfile, u8 *rmem, uint rlen)
+uint _rtw_pktfile_read(struct pkt_file *pfile, u8 *rmem, uint rlen)
{
uint len = 0;
@@ -66,13 +66,7 @@ uint _rtw_pktfile_read (struct pkt_file *pfile, u8 *rmem, uint rlen)
int rtw_endofpktfile(struct pkt_file *pfile)
{
-
- if (pfile->pkt_len == 0) {
- return true;
- }
-
-
- return false;
+ return pfile->pkt_len == 0;
}
int rtw_os_xmit_resource_alloc(struct adapter *padapter, struct xmit_buf *pxmitbuf, u32 alloc_sz)
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
index 0ffed2d06b5..552d943b176 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
@@ -1287,7 +1287,7 @@ void rtl8192_tx_fill_desc(struct net_device *dev, struct tx_desc *pdesc,
void rtl8192_tx_fill_cmd_desc(struct net_device *dev,
struct tx_desc_cmd *entry,
- struct cb_desc *cb_desc, struct sk_buff* skb)
+ struct cb_desc *cb_desc, struct sk_buff *skb)
{
struct r8192_priv *priv = rtllib_priv(dev);
dma_addr_t mapping = pci_map_single(priv->pdev, skb->data, skb->len,
@@ -1302,7 +1302,7 @@ void rtl8192_tx_fill_cmd_desc(struct net_device *dev,
if (cb_desc->bCmdOrInit == DESC_PACKET_TYPE_INIT) {
entry->CmdInit = DESC_PACKET_TYPE_INIT;
} else {
- struct tx_desc * entry_tmp = (struct tx_desc *)entry;
+ struct tx_desc *entry_tmp = (struct tx_desc *)entry;
entry_tmp->CmdInit = DESC_PACKET_TYPE_NORMAL;
entry_tmp->Offset = sizeof(struct tx_fwinfo_8190pci) + 8;
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
index b6ce8c3b2f8..885315cac3a 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
@@ -1449,13 +1449,11 @@ static void dm_CCKTxPowerAdjust_TSSI(struct net_device *dev, bool bInCH14)
(priv->cck_txbbgain_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[1]<<8)) ;
rtl8192_setBBreg(dev, rCCK0_TxFilter1, bMaskHWord, TempVal);
- TempVal = 0;
TempVal = (u32)(priv->cck_txbbgain_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[2] +
(priv->cck_txbbgain_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[3]<<8) +
(priv->cck_txbbgain_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[4]<<16)+
(priv->cck_txbbgain_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[5]<<24));
rtl8192_setBBreg(dev, rCCK0_TxFilter2, bMaskDWord, TempVal);
- TempVal = 0;
TempVal = (u32)(priv->cck_txbbgain_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[6] +
(priv->cck_txbbgain_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[7]<<8)) ;
@@ -1465,13 +1463,11 @@ static void dm_CCKTxPowerAdjust_TSSI(struct net_device *dev, bool bInCH14)
(priv->cck_txbbgain_ch14_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[1]<<8)) ;
rtl8192_setBBreg(dev, rCCK0_TxFilter1, bMaskHWord, TempVal);
- TempVal = 0;
TempVal = (u32)(priv->cck_txbbgain_ch14_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[2] +
(priv->cck_txbbgain_ch14_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[3]<<8) +
(priv->cck_txbbgain_ch14_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[4]<<16)+
(priv->cck_txbbgain_ch14_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[5]<<24));
rtl8192_setBBreg(dev, rCCK0_TxFilter2, bMaskDWord, TempVal);
- TempVal = 0;
TempVal = (u32)(priv->cck_txbbgain_ch14_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[6] +
(priv->cck_txbbgain_ch14_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[7]<<8)) ;
@@ -1493,7 +1489,6 @@ static void dm_CCKTxPowerAdjust_ThermalMeter(struct net_device *dev, bool bInCH
rtl8192_setBBreg(dev, rCCK0_TxFilter1, bMaskHWord, TempVal);
RT_TRACE(COMP_POWER_TRACKING, "CCK not chnl 14, reg 0x%x = 0x%x\n",
rCCK0_TxFilter1, TempVal);
- TempVal = 0;
TempVal = CCKSwingTable_Ch1_Ch13[priv->CCK_index][2] +
(CCKSwingTable_Ch1_Ch13[priv->CCK_index][3]<<8) +
(CCKSwingTable_Ch1_Ch13[priv->CCK_index][4]<<16)+
@@ -1501,7 +1496,6 @@ static void dm_CCKTxPowerAdjust_ThermalMeter(struct net_device *dev, bool bInCH
rtl8192_setBBreg(dev, rCCK0_TxFilter2, bMaskDWord, TempVal);
RT_TRACE(COMP_POWER_TRACKING, "CCK not chnl 14, reg 0x%x = 0x%x\n",
rCCK0_TxFilter2, TempVal);
- TempVal = 0;
TempVal = CCKSwingTable_Ch1_Ch13[priv->CCK_index][6] +
(CCKSwingTable_Ch1_Ch13[priv->CCK_index][7]<<8) ;
@@ -1515,7 +1509,6 @@ static void dm_CCKTxPowerAdjust_ThermalMeter(struct net_device *dev, bool bInCH
rtl8192_setBBreg(dev, rCCK0_TxFilter1, bMaskHWord, TempVal);
RT_TRACE(COMP_POWER_TRACKING, "CCK chnl 14, reg 0x%x = 0x%x\n",
rCCK0_TxFilter1, TempVal);
- TempVal = 0;
TempVal = CCKSwingTable_Ch14[priv->CCK_index][2] +
(CCKSwingTable_Ch14[priv->CCK_index][3]<<8) +
(CCKSwingTable_Ch14[priv->CCK_index][4]<<16)+
@@ -1523,7 +1516,6 @@ static void dm_CCKTxPowerAdjust_ThermalMeter(struct net_device *dev, bool bInCH
rtl8192_setBBreg(dev, rCCK0_TxFilter2, bMaskDWord, TempVal);
RT_TRACE(COMP_POWER_TRACKING, "CCK chnl 14, reg 0x%x = 0x%x\n",
rCCK0_TxFilter2, TempVal);
- TempVal = 0;
TempVal = CCKSwingTable_Ch14[priv->CCK_index][6] +
(CCKSwingTable_Ch14[priv->CCK_index][7]<<8) ;
diff --git a/drivers/staging/rtl8192e/rtl819x_BAProc.c b/drivers/staging/rtl8192e/rtl819x_BAProc.c
index 6da57847a53..0415e02b4ef 100644
--- a/drivers/staging/rtl8192e/rtl819x_BAProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_BAProc.c
@@ -16,6 +16,8 @@
* Contact Information:
* wlanfae <wlanfae@realtek.com>
******************************************************************************/
+#include <asm/byteorder.h>
+#include <asm/unaligned.h>
#include "rtllib.h"
#include "rtl819x_BA.h"
@@ -79,7 +81,6 @@ static struct sk_buff *rtllib_ADDBA(struct rtllib_device *ieee, u8 *Dst,
struct sk_buff *skb = NULL;
struct rtllib_hdr_3addr *BAReq = NULL;
u8 *tag = NULL;
- u16 tmp = 0;
u16 len = ieee->tx_headroom + 9;
RTLLIB_DEBUG(RTLLIB_DL_TRACE | RTLLIB_DL_BA, "========>%s(), frame(%d)"
@@ -115,15 +116,15 @@ static struct sk_buff *rtllib_ADDBA(struct rtllib_device *ieee, u8 *Dst,
if (ACT_ADDBARSP == type) {
RT_TRACE(COMP_DBG, "====>to send ADDBARSP\n");
- tmp = StatusCode;
- memcpy(tag, (u8 *)&tmp, 2);
+
+ put_unaligned_le16(StatusCode, tag);
tag += 2;
}
- tmp = pBA->BaParamSet.shortData;
- memcpy(tag, (u8 *)&tmp, 2);
+
+ put_unaligned_le16(pBA->BaParamSet.shortData, tag);
tag += 2;
- tmp = pBA->BaTimeoutValue;
- memcpy(tag, (u8 *)&tmp, 2);
+
+ put_unaligned_le16(pBA->BaTimeoutValue, tag);
tag += 2;
if (ACT_ADDBAREQ == type) {
@@ -143,7 +144,6 @@ static struct sk_buff *rtllib_DELBA(struct rtllib_device *ieee, u8 *dst,
struct sk_buff *skb = NULL;
struct rtllib_hdr_3addr *Delba = NULL;
u8 *tag = NULL;
- u16 tmp = 0;
u16 len = 6 + ieee->tx_headroom;
if (net_ratelimit())
@@ -178,11 +178,11 @@ static struct sk_buff *rtllib_DELBA(struct rtllib_device *ieee, u8 *dst,
*tag++ = ACT_CAT_BA;
*tag++ = ACT_DELBA;
- tmp = DelbaParamSet.shortData;
- memcpy(tag, (u8 *)&tmp, 2);
+
+ put_unaligned_le16(DelbaParamSet.shortData, tag);
tag += 2;
- tmp = ReasonCode;
- memcpy(tag, (u8 *)&tmp, 2);
+
+ put_unaligned_le16(ReasonCode, tag);
tag += 2;
RTLLIB_DEBUG_DATA(RTLLIB_DL_DATA|RTLLIB_DL_BA, skb->data, skb->len);
diff --git a/drivers/staging/rtl8192e/rtllib.h b/drivers/staging/rtl8192e/rtllib.h
index 2d82f8993ea..cef2dc27103 100644
--- a/drivers/staging/rtl8192e/rtllib.h
+++ b/drivers/staging/rtl8192e/rtllib.h
@@ -2762,8 +2762,6 @@ extern void rtllib_stop_scan(struct rtllib_device *ieee);
extern bool rtllib_act_scanning(struct rtllib_device *ieee, bool sync_scan);
extern void rtllib_stop_scan_syncro(struct rtllib_device *ieee);
extern void rtllib_start_scan_syncro(struct rtllib_device *ieee, u8 is_mesh);
-extern inline struct sk_buff *rtllib_probe_req(struct rtllib_device *ieee);
-extern u8 MgntQuery_MgntFrameTxRate(struct rtllib_device *ieee);
extern void rtllib_sta_ps_send_null_frame(struct rtllib_device *ieee,
short pwr);
extern void rtllib_sta_wakeup(struct rtllib_device *ieee, short nl);
@@ -2944,12 +2942,12 @@ void rtllib_softmac_scan_syncro(struct rtllib_device *ieee, u8 is_mesh);
extern const long rtllib_wlan_frequencies[];
-extern inline void rtllib_increment_scans(struct rtllib_device *ieee)
+static inline void rtllib_increment_scans(struct rtllib_device *ieee)
{
ieee->scans++;
}
-extern inline int rtllib_get_scans(struct rtllib_device *ieee)
+static inline int rtllib_get_scans(struct rtllib_device *ieee)
{
return ieee->scans;
}
diff --git a/drivers/staging/rtl8192e/rtllib_rx.c b/drivers/staging/rtl8192e/rtllib_rx.c
index 1c2014fd8d8..cf11b042b93 100644
--- a/drivers/staging/rtl8192e/rtllib_rx.c
+++ b/drivers/staging/rtl8192e/rtllib_rx.c
@@ -1415,10 +1415,6 @@ static int rtllib_rx_InfraAdhoc(struct rtllib_device *ieee, struct sk_buff *skb,
return 1;
rx_dropped:
- if (rxb != NULL) {
- kfree(rxb);
- rxb = NULL;
- }
ieee->stats.rx_dropped++;
/* Returning 0 indicates to caller that we have not handled the SKB--
@@ -2691,7 +2687,7 @@ void rtllib_rx_mgt(struct rtllib_device *ieee,
struct sk_buff *skb,
struct rtllib_rx_stats *stats)
{
- struct rtllib_hdr_4addr *header = (struct rtllib_hdr_4addr *)skb->data ;
+ struct rtllib_hdr_4addr *header = (struct rtllib_hdr_4addr *)skb->data;
if ((WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) !=
RTLLIB_STYPE_PROBE_RESP) &&
diff --git a/drivers/staging/rtl8192e/rtllib_softmac.c b/drivers/staging/rtl8192e/rtllib_softmac.c
index abb6729ae27..d992a754e72 100644
--- a/drivers/staging/rtl8192e/rtllib_softmac.c
+++ b/drivers/staging/rtl8192e/rtllib_softmac.c
@@ -193,7 +193,7 @@ MgntQuery_TxRateExcludeCCKRates(struct rtllib_device *ieee)
return QueryRate;
}
-u8 MgntQuery_MgntFrameTxRate(struct rtllib_device *ieee)
+static u8 MgntQuery_MgntFrameTxRate(struct rtllib_device *ieee)
{
struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
u8 rate;
@@ -343,7 +343,7 @@ inline void softmac_ps_mgmt_xmit(struct sk_buff *skb,
}
}
-inline struct sk_buff *rtllib_probe_req(struct rtllib_device *ieee)
+static inline struct sk_buff *rtllib_probe_req(struct rtllib_device *ieee)
{
unsigned int len, rate_len;
u8 *tag;
@@ -1311,7 +1311,7 @@ inline struct sk_buff *rtllib_association_req(struct rtllib_network *beacon,
}
if (beacon->bCkipSupported) {
- static u8 AironetIeOui[] = {0x00, 0x01, 0x66};
+ static const u8 AironetIeOui[] = {0x00, 0x01, 0x66};
u8 CcxAironetBuf[30];
struct octet_string osCcxAironetIE;
@@ -1331,10 +1331,11 @@ inline struct sk_buff *rtllib_association_req(struct rtllib_network *beacon,
}
if (beacon->bCcxRmEnable) {
- static u8 CcxRmCapBuf[] = {0x00, 0x40, 0x96, 0x01, 0x01, 0x00};
+ static const u8 CcxRmCapBuf[] = {0x00, 0x40, 0x96, 0x01, 0x01,
+ 0x00};
struct octet_string osCcxRmCap;
- osCcxRmCap.Octet = CcxRmCapBuf;
+ osCcxRmCap.Octet = (u8 *) CcxRmCapBuf;
osCcxRmCap.Length = sizeof(CcxRmCapBuf);
tag = skb_put(skb, ccxrm_ie_len);
*tag++ = MFIE_TYPE_GENERIC;
@@ -3167,6 +3168,7 @@ void rtllib_softmac_free(struct rtllib_device *ieee)
cancel_delayed_work(&ieee->associate_retry_wq);
destroy_workqueue(ieee->wq);
up(&ieee->wx_sem);
+ tasklet_kill(&ieee->ps_task);
}
/********************************************************
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
index 73de9e9669f..d401dbf4c7c 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
@@ -713,8 +713,8 @@ static void RxReorderIndicatePacket(struct ieee80211_device *ieee,
while(!list_empty(&pTS->RxPendingPktList)) {
IEEE80211_DEBUG(IEEE80211_DL_REORDER,"%s(): start RREORDER indicate\n",__func__);
pReorderEntry = (PRX_REORDER_ENTRY)list_entry(pTS->RxPendingPktList.prev,RX_REORDER_ENTRY,List);
- if( SN_LESS(pReorderEntry->SeqNum, pTS->RxIndicateSeq) ||
- SN_EQUAL(pReorderEntry->SeqNum, pTS->RxIndicateSeq))
+ if (SN_LESS(pReorderEntry->SeqNum, pTS->RxIndicateSeq) ||
+ SN_EQUAL(pReorderEntry->SeqNum, pTS->RxIndicateSeq))
{
/* This protect buffer from overflow. */
if(index >= REORDER_WIN_SIZE) {
@@ -800,9 +800,8 @@ static u8 parse_subframe(struct sk_buff *skb,
// Null packet, don't indicate it to upper layer
ChkLength = LLCOffset;/* + (Frame_WEP(frame)!=0 ?Adapter->MgntInfo.SecurityInfo.EncryptionHeadOverhead:0);*/
- if( skb->len <= ChkLength ) {
+ if (skb->len <= ChkLength)
return 0;
- }
skb_pull(skb, LLCOffset);
@@ -1035,10 +1034,9 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
{
// IEEE80211_DEBUG(IEEE80211_DL_REORDER,"%s(): pRxTS->RxLastFragNum is %d,frag is %d,pRxTS->RxLastSeqNum is %d,seq is %d\n",__func__,pRxTS->RxLastFragNum,frag,pRxTS->RxLastSeqNum,WLAN_GET_SEQ_SEQ(sc));
- if( (fc & (1<<11)) &&
- (frag == pRxTS->RxLastFragNum) &&
- (WLAN_GET_SEQ_SEQ(sc) == pRxTS->RxLastSeqNum) )
- {
+ if ((fc & (1<<11)) &&
+ (frag == pRxTS->RxLastFragNum) &&
+ (WLAN_GET_SEQ_SEQ(sc) == pRxTS->RxLastSeqNum)) {
goto rx_dropped;
}
else
@@ -2456,7 +2454,7 @@ static inline void ieee80211_process_probe_response(
// then wireless adapter should do active scan from ch1~11 and
// passive scan from ch12~14
- if( !IsLegalChannel(ieee, network.channel) )
+ if (!IsLegalChannel(ieee, network.channel))
return;
if(ieee->bGlobalDomain)
{
@@ -2465,8 +2463,7 @@ static inline void ieee80211_process_probe_response(
// Case 1: Country code
if(IS_COUNTRY_IE_VALID(ieee) )
{
- if( !IsLegalChannel(ieee, network.channel) )
- {
+ if (!IsLegalChannel(ieee, network.channel)) {
printk("GetScanInfo(): For Country code, filter probe response at channel(%d).\n", network.channel);
return;
}
@@ -2487,8 +2484,7 @@ static inline void ieee80211_process_probe_response(
// Case 1: Country code
if(IS_COUNTRY_IE_VALID(ieee) )
{
- if( !IsLegalChannel(ieee, network.channel) )
- {
+ if (!IsLegalChannel(ieee, network.channel)) {
printk("GetScanInfo(): For Country code, filter beacon at channel(%d).\n",network.channel);
return;
}
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
index a85bb232be9..d1471877e19 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
@@ -411,7 +411,7 @@ static void ieee80211_send_probe(struct ieee80211_device *ieee)
}
}
-void ieee80211_send_probe_requests(struct ieee80211_device *ieee)
+static void ieee80211_send_probe_requests(struct ieee80211_device *ieee)
{
if (ieee->active_scan && (ieee->softmac_features & IEEE_SOFTMAC_PROBERQ)){
ieee80211_send_probe(ieee);
@@ -517,7 +517,7 @@ static void ieee80211_softmac_scan_wq(struct work_struct *work)
goto out;
ieee->set_chan(ieee->dev, ieee->current_network.channel);
if(channel_map[ieee->current_network.channel] == 1)
- ieee80211_send_probe_requests(ieee);
+ ieee80211_send_probe_requests(ieee);
queue_delayed_work(ieee->wq, &ieee->softmac_scan_wq, IEEE80211_SOFTMAC_SCAN_TIME);
@@ -804,12 +804,11 @@ static struct sk_buff *ieee80211_probe_resp(struct ieee80211_device *ieee, u8 *d
*(tag++) = ieee->current_network.channel;
if(atim_len){
- u16 val16;
*(tag++) = MFIE_TYPE_IBSS_SET;
*(tag++) = 2;
- //*((u16*)(tag)) = cpu_to_le16(ieee->current_network.atim_window);
- val16 = cpu_to_le16(ieee->current_network.atim_window);
- memcpy((u8 *)tag, (u8 *)&val16, 2);
+
+ put_unaligned_le16(ieee->current_network.atim_window,
+ (u8 *)tag);
tag+=2;
}
@@ -1043,10 +1042,9 @@ inline struct sk_buff *ieee80211_association_req(struct ieee80211_network *beaco
{
ccxrm_ie_len = 6+2;
}
- if( beacon->BssCcxVerNumber >= 2 )
- {
+ if (beacon->BssCcxVerNumber >= 2)
cxvernum_ie_len = 5+2;
- }
+
#ifdef THOMAS_TURBO
len = sizeof(struct ieee80211_assoc_request_frame)+ 2
+ beacon->ssid_len//essid tagged val
@@ -1103,7 +1101,7 @@ inline struct sk_buff *ieee80211_association_req(struct ieee80211_network *beaco
if(ieee->short_slot)
hdr->capability |= cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT);
if (wmm_info_len) //QOS
- hdr->capability |= cpu_to_le16(WLAN_CAPABILITY_QOS);
+ hdr->capability |= cpu_to_le16(WLAN_CAPABILITY_QOS);
hdr->listen_interval = 0xa; //FIXME
@@ -1118,8 +1116,7 @@ inline struct sk_buff *ieee80211_association_req(struct ieee80211_network *beaco
ieee80211_MFIE_Brate(ieee, &tag);
ieee80211_MFIE_Grate(ieee, &tag);
// For CCX 1 S13, CKIP. Added by Annie, 2006-08-14.
- if( beacon->bCkipSupported )
- {
+ if (beacon->bCkipSupported) {
static u8 AironetIeOui[] = {0x00, 0x01, 0x66}; // "4500-client"
u8 CcxAironetBuf[30];
OCTET_STRING osCcxAironetIE;
@@ -1158,8 +1155,7 @@ inline struct sk_buff *ieee80211_association_req(struct ieee80211_network *beaco
tag += osCcxRmCap.Length;
}
- if( beacon->BssCcxVerNumber >= 2 )
- {
+ if (beacon->BssCcxVerNumber >= 2) {
u8 CcxVerNumBuf[] = {0x00, 0x40, 0x96, 0x03, 0x00};
OCTET_STRING osCcxVerNum;
CcxVerNumBuf[4] = beacon->BssCcxVerNumber;
@@ -1533,7 +1529,7 @@ void ieee80211_softmac_check_all_nets(struct ieee80211_device *ieee)
break;
if (ieee->scan_age == 0 || time_after(target->last_scanned + ieee->scan_age, jiffies))
- ieee80211_softmac_new_net(ieee, target);
+ ieee80211_softmac_new_net(ieee, target);
}
spin_unlock_irqrestore(&ieee->lock, flags);
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c
index 82ea533a0cf..644368df634 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c
@@ -374,7 +374,7 @@ int ieee80211_wx_set_scan(struct ieee80211_device *ieee, struct iw_request_info
goto out;
}
- if ( ieee->state == IEEE80211_LINKED){
+ if (ieee->state == IEEE80211_LINKED) {
queue_work(ieee->wq, &ieee->wx_sync_scan_wq);
/* intentionally forget to up sem */
return 0;
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
index 57bef219687..fca73c7c9fb 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
@@ -529,8 +529,7 @@ static void ieee80211_query_protectionmode(struct ieee80211_device *ieee,
}
}
// For test , CTS replace with RTS
- if( 0 )
- {
+ if (0) {
tcb_desc->bCTSEnable = true;
tcb_desc->rts_rate = MGN_24M;
tcb_desc->bRTSEnable = true;
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c
index 68f5ede8663..ae1b3cf2866 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c
@@ -172,7 +172,7 @@ static inline char *rtl819x_translate_scan(struct ieee80211_device *ieee,
iwe.cmd = IWEVCUSTOM;
iwe.u.data.length = p - custom;
if (iwe.u.data.length)
- start = iwe_stream_add_point(info, start, stop, &iwe, custom);
+ start = iwe_stream_add_point(info, start, stop, &iwe, custom);
/* Add quality statistics */
/* TODO: Fix these values... */
iwe.cmd = IWEVQUAL;
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c b/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c
index 51552d42d66..cd196cec0dd 100644
--- a/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c
+++ b/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c
@@ -110,7 +110,7 @@ static struct sk_buff *ieee80211_ADDBA(struct ieee80211_device *ieee, u8 *Dst, P
struct sk_buff *skb = NULL;
struct ieee80211_hdr_3addr *BAReq = NULL;
u8 *tag = NULL;
- u16 tmp = 0;
+ __le16 tmp = 0;
u16 len = ieee->tx_headroom + 9;
//category(1) + action field(1) + Dialog Token(1) + BA Parameter Set(2) + BA Timeout Value(2) + BA Start SeqCtrl(2)(or StatusCode(2))
IEEE80211_DEBUG(IEEE80211_DL_TRACE | IEEE80211_DL_BA, "========>%s(), frame(%d) sentd to:%pM, ieee->dev:%p\n", __func__, type, Dst, ieee->dev);
@@ -196,7 +196,7 @@ static struct sk_buff *ieee80211_DELBA(
struct sk_buff *skb = NULL;
struct ieee80211_hdr_3addr *Delba = NULL;
u8 *tag = NULL;
- u16 tmp = 0;
+ __le16 tmp = 0;
//len = head len + DELBA Parameter Set(2) + Reason Code(2)
u16 len = 6 + ieee->tx_headroom;
@@ -342,8 +342,7 @@ int ieee80211_rx_ADDBAReq(struct ieee80211_device *ieee, struct sk_buff *skb)
PSEQUENCE_CONTROL pBaStartSeqCtrl = NULL;
PRX_TS_RECORD pTS = NULL;
- if (skb->len < sizeof( struct ieee80211_hdr_3addr) + 9)
- {
+ if (skb->len < sizeof(struct ieee80211_hdr_3addr) + 9) {
IEEE80211_DEBUG(IEEE80211_DL_ERR,
" Invalid skb len in BAREQ(%d / %zu)\n",
skb->len,
@@ -444,8 +443,7 @@ int ieee80211_rx_ADDBARsp(struct ieee80211_device *ieee, struct sk_buff *skb)
PBA_PARAM_SET pBaParamSet = NULL;
u16 ReasonCode;
- if (skb->len < sizeof( struct ieee80211_hdr_3addr) + 9)
- {
+ if (skb->len < sizeof(struct ieee80211_hdr_3addr) + 9) {
IEEE80211_DEBUG(IEEE80211_DL_ERR,
" Invalid skb len in BARSP(%d / %zu)\n",
skb->len,
@@ -463,10 +461,9 @@ int ieee80211_rx_ADDBARsp(struct ieee80211_device *ieee, struct sk_buff *skb)
// Check the capability
// Since we can always receive A-MPDU, we just check if it is under HT mode.
- if( ieee->current_network.qos_data.active == 0 ||
- ieee->pHTInfo->bCurrentHTSupport == false ||
- ieee->pHTInfo->bCurrentAMPDUEnable == false )
- {
+ if (ieee->current_network.qos_data.active == 0 ||
+ ieee->pHTInfo->bCurrentHTSupport == false ||
+ ieee->pHTInfo->bCurrentAMPDUEnable == false) {
IEEE80211_DEBUG(IEEE80211_DL_ERR, "reject to ADDBA_RSP as some capability is not ready(%d, %d, %d)\n",ieee->current_network.qos_data.active, ieee->pHTInfo->bCurrentHTSupport, ieee->pHTInfo->bCurrentAMPDUEnable);
ReasonCode = DELBA_REASON_UNKNOWN_BA;
goto OnADDBARsp_Reject;
@@ -577,8 +574,7 @@ int ieee80211_rx_DELBA(struct ieee80211_device *ieee, struct sk_buff *skb)
u16 *pReasonCode = NULL;
u8 *dst = NULL;
- if (skb->len < sizeof( struct ieee80211_hdr_3addr) + 6)
- {
+ if (skb->len < sizeof(struct ieee80211_hdr_3addr) + 6) {
IEEE80211_DEBUG(IEEE80211_DL_ERR,
" Invalid skb len in DELBA(%d / %zu)\n",
skb->len,
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c b/drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c
index 1ea2cd39267..e60d926a397 100644
--- a/drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c
+++ b/drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c
@@ -602,8 +602,7 @@ void HTConstructCapabilityElement(struct ieee80211_device *ieee, u8 *posHTCap, u
// TODO: Nedd to take care of this part
IEEE80211_DEBUG(IEEE80211_DL_HT, "TX HT cap/info ele BW=%d MaxAMSDUSize:%d DssCCk:%d\n", pCapELE->ChlWidth, pCapELE->MaxAMSDUSize, pCapELE->DssCCk);
- if( IsEncrypt)
- {
+ if (IsEncrypt) {
pCapELE->MPDUDensity = 7; // 8us
pCapELE->MaxRxAMPDUFactor = 2; // 2 is for 32 K and 3 is 64K
}
@@ -951,8 +950,7 @@ void HTOnAssocRsp(struct ieee80211_device *ieee)
static u8 EWC11NHTCap[] = {0x00, 0x90, 0x4c, 0x33}; // For 11n EWC definition, 2007.07.17, by Emily
static u8 EWC11NHTInfo[] = {0x00, 0x90, 0x4c, 0x34}; // For 11n EWC definition, 2007.07.17, by Emily
- if( pHTInfo->bCurrentHTSupport == false )
- {
+ if (pHTInfo->bCurrentHTSupport == false) {
IEEE80211_DEBUG(IEEE80211_DL_ERR, "<=== HTOnAssocRsp(): HT_DISABLE\n");
return;
}
@@ -1043,7 +1041,7 @@ void HTOnAssocRsp(struct ieee80211_device *ieee)
// Replace MPDU factor declared in original association response frame format. 2007.08.20 by Emily
if (ieee->current_network.bssht.bdRT2RTAggregation)
{
- if( ieee->pairwise_key_type != KEY_TYPE_NA)
+ if (ieee->pairwise_key_type != KEY_TYPE_NA)
// Realtek may set 32k in security mode and 64k for others
pHTInfo->CurrentAMPDUFactor = pPeerHTCap->MaxRxAMPDUFactor;
else
@@ -1332,8 +1330,7 @@ u8 HTCCheck(struct ieee80211_device *ieee, u8 *pFrame)
{
if(ieee->pHTInfo->bCurrentHTSupport)
{
- if( (IsQoSDataFrame(pFrame) && Frame_Order(pFrame)) == 1)
- {
+ if ((IsQoSDataFrame(pFrame) && Frame_Order(pFrame)) == 1) {
IEEE80211_DEBUG(IEEE80211_DL_HT, "HT CONTROL FILED EXIST!!\n");
return true;
}
diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
index 929ac29197c..e031a253e2a 100644
--- a/drivers/staging/rtl8192u/r8192U_core.c
+++ b/drivers/staging/rtl8192u/r8192U_core.c
@@ -1791,8 +1791,8 @@ static void rtl8192_link_change(struct net_device *dev)
}
static struct ieee80211_qos_parameters def_qos_parameters = {
- {3, 3, 3, 3},/* cw_min */
- {7, 7, 7, 7},/* cw_max */
+ {cpu_to_le16(3), cpu_to_le16(3), cpu_to_le16(3), cpu_to_le16(3)},
+ {cpu_to_le16(7), cpu_to_le16(7), cpu_to_le16(7), cpu_to_le16(7)},
{2, 2, 2, 2},/* aifs */
{0, 0, 0, 0},/* flags */
{0, 0, 0, 0} /* tx_op_limit */
@@ -1821,8 +1821,11 @@ static void rtl8192_qos_activate(struct work_struct *work)
struct net_device *dev = priv->ieee80211->dev;
struct ieee80211_qos_parameters *qos_parameters = &priv->ieee80211->current_network.qos_data.parameters;
u8 mode = priv->ieee80211->current_network.mode;
- u8 u1bAIFS;
+ u32 u1bAIFS;
u32 u4bAcParam;
+ u32 op_limit;
+ u32 cw_max;
+ u32 cw_min;
int i;
mutex_lock(&priv->mutex);
@@ -1835,11 +1838,14 @@ static void rtl8192_qos_activate(struct work_struct *work)
for (i = 0; i < QOS_QUEUE_NUM; i++) {
//Mode G/A: slotTimeTimer = 9; Mode B: 20
u1bAIFS = qos_parameters->aifs[i] * ((mode&(IEEE_G|IEEE_N_24G)) ? 9 : 20) + aSifsTime;
- u4bAcParam = ((((u32)(qos_parameters->tx_op_limit[i]))<< AC_PARAM_TXOP_LIMIT_OFFSET)|
- (((u32)(qos_parameters->cw_max[i]))<< AC_PARAM_ECW_MAX_OFFSET)|
- (((u32)(qos_parameters->cw_min[i]))<< AC_PARAM_ECW_MIN_OFFSET)|
- ((u32)u1bAIFS << AC_PARAM_AIFS_OFFSET));
-
+ u1bAIFS <<= AC_PARAM_AIFS_OFFSET;
+ op_limit = (u32)le16_to_cpu(qos_parameters->tx_op_limit[i]);
+ op_limit <<= AC_PARAM_TXOP_LIMIT_OFFSET;
+ cw_max = (u32)le16_to_cpu(qos_parameters->cw_max[i]);
+ cw_max <<= AC_PARAM_ECW_MAX_OFFSET;
+ cw_min = (u32)le16_to_cpu(qos_parameters->cw_min[i]);
+ cw_min <<= AC_PARAM_ECW_MIN_OFFSET;
+ u4bAcParam = op_limit | cw_max | cw_min | u1bAIFS;
write_nic_dword(dev, WDCAPARA_ADD[i], u4bAcParam);
}
diff --git a/drivers/staging/rtl8192u/r8192U_dm.c b/drivers/staging/rtl8192u/r8192U_dm.c
index b0b66fba563..936565d4601 100644
--- a/drivers/staging/rtl8192u/r8192U_dm.c
+++ b/drivers/staging/rtl8192u/r8192U_dm.c
@@ -1476,14 +1476,12 @@ static void dm_CCKTxPowerAdjust_TSSI(struct net_device *dev, bool bInCH14)
rtl8192_setBBreg(dev, rCCK0_TxFilter1, bMaskHWord, TempVal);
//Write 0xa24 ~ 0xa27
- TempVal = 0;
TempVal = priv->cck_txbbgain_table[priv->cck_present_attentuation].ccktxbb_valuearray[2] +
(priv->cck_txbbgain_table[priv->cck_present_attentuation].ccktxbb_valuearray[3]<<8) +
(priv->cck_txbbgain_table[priv->cck_present_attentuation].ccktxbb_valuearray[4]<<16)+
(priv->cck_txbbgain_table[priv->cck_present_attentuation].ccktxbb_valuearray[5]<<24);
rtl8192_setBBreg(dev, rCCK0_TxFilter2, bMaskDWord, TempVal);
//Write 0xa28 0xa29
- TempVal = 0;
TempVal = priv->cck_txbbgain_table[priv->cck_present_attentuation].ccktxbb_valuearray[6] +
(priv->cck_txbbgain_table[priv->cck_present_attentuation].ccktxbb_valuearray[7]<<8) ;
@@ -1496,14 +1494,12 @@ static void dm_CCKTxPowerAdjust_TSSI(struct net_device *dev, bool bInCH14)
rtl8192_setBBreg(dev, rCCK0_TxFilter1, bMaskHWord, TempVal);
//Write 0xa24 ~ 0xa27
- TempVal = 0;
TempVal = priv->cck_txbbgain_ch14_table[priv->cck_present_attentuation].ccktxbb_valuearray[2] +
(priv->cck_txbbgain_ch14_table[priv->cck_present_attentuation].ccktxbb_valuearray[3]<<8) +
(priv->cck_txbbgain_ch14_table[priv->cck_present_attentuation].ccktxbb_valuearray[4]<<16)+
(priv->cck_txbbgain_ch14_table[priv->cck_present_attentuation].ccktxbb_valuearray[5]<<24);
rtl8192_setBBreg(dev, rCCK0_TxFilter2, bMaskDWord, TempVal);
//Write 0xa28 0xa29
- TempVal = 0;
TempVal = priv->cck_txbbgain_ch14_table[priv->cck_present_attentuation].ccktxbb_valuearray[6] +
(priv->cck_txbbgain_ch14_table[priv->cck_present_attentuation].ccktxbb_valuearray[7]<<8) ;
@@ -1528,7 +1524,6 @@ static void dm_CCKTxPowerAdjust_ThermalMeter(struct net_device *dev, bool bInCH
RT_TRACE(COMP_POWER_TRACKING, "CCK not chnl 14, reg 0x%x = 0x%x\n",
rCCK0_TxFilter1, TempVal);
//Write 0xa24 ~ 0xa27
- TempVal = 0;
TempVal = CCKSwingTable_Ch1_Ch13[priv->CCK_index][2] +
(CCKSwingTable_Ch1_Ch13[priv->CCK_index][3]<<8) +
(CCKSwingTable_Ch1_Ch13[priv->CCK_index][4]<<16)+
@@ -1537,7 +1532,6 @@ static void dm_CCKTxPowerAdjust_ThermalMeter(struct net_device *dev, bool bInCH
RT_TRACE(COMP_POWER_TRACKING, "CCK not chnl 14, reg 0x%x = 0x%x\n",
rCCK0_TxFilter2, TempVal);
//Write 0xa28 0xa29
- TempVal = 0;
TempVal = CCKSwingTable_Ch1_Ch13[priv->CCK_index][6] +
(CCKSwingTable_Ch1_Ch13[priv->CCK_index][7]<<8) ;
@@ -1556,7 +1550,6 @@ static void dm_CCKTxPowerAdjust_ThermalMeter(struct net_device *dev, bool bInCH
RT_TRACE(COMP_POWER_TRACKING, "CCK chnl 14, reg 0x%x = 0x%x\n",
rCCK0_TxFilter1, TempVal);
//Write 0xa24 ~ 0xa27
- TempVal = 0;
TempVal = CCKSwingTable_Ch14[priv->CCK_index][2] +
(CCKSwingTable_Ch14[priv->CCK_index][3]<<8) +
(CCKSwingTable_Ch14[priv->CCK_index][4]<<16)+
@@ -1565,7 +1558,6 @@ static void dm_CCKTxPowerAdjust_ThermalMeter(struct net_device *dev, bool bInCH
RT_TRACE(COMP_POWER_TRACKING, "CCK chnl 14, reg 0x%x = 0x%x\n",
rCCK0_TxFilter2, TempVal);
//Write 0xa28 0xa29
- TempVal = 0;
TempVal = CCKSwingTable_Ch14[priv->CCK_index][6] +
(CCKSwingTable_Ch14[priv->CCK_index][7]<<8) ;
@@ -1810,85 +1802,6 @@ void dm_change_dynamic_initgain_thresh(struct net_device *dev, u32 dm_type,
}
} /* DM_ChangeDynamicInitGainThresh */
-void
-dm_change_rxpath_selection_setting(
- struct net_device *dev,
- s32 DM_Type,
- s32 DM_Value)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- prate_adaptive pRA = (prate_adaptive)&(priv->rate_adaptive);
-
-
- if(DM_Type == 0)
- {
- if(DM_Value > 1)
- DM_Value = 1;
- DM_RxPathSelTable.Enable = (u8)DM_Value;
- }
- else if(DM_Type == 1)
- {
- if(DM_Value > 1)
- DM_Value = 1;
- DM_RxPathSelTable.DbgMode = (u8)DM_Value;
- }
- else if(DM_Type == 2)
- {
- if(DM_Value > 40)
- DM_Value = 40;
- DM_RxPathSelTable.SS_TH_low = (u8)DM_Value;
- }
- else if(DM_Type == 3)
- {
- if(DM_Value > 25)
- DM_Value = 25;
- DM_RxPathSelTable.diff_TH = (u8)DM_Value;
- }
- else if(DM_Type == 4)
- {
- if(DM_Value >= CCK_Rx_Version_MAX)
- DM_Value = CCK_Rx_Version_1;
- DM_RxPathSelTable.cck_method= (u8)DM_Value;
- }
- else if(DM_Type == 10)
- {
- if(DM_Value > 100)
- DM_Value = 50;
- DM_RxPathSelTable.rf_rssi[0] = (u8)DM_Value;
- }
- else if(DM_Type == 11)
- {
- if(DM_Value > 100)
- DM_Value = 50;
- DM_RxPathSelTable.rf_rssi[1] = (u8)DM_Value;
- }
- else if(DM_Type == 12)
- {
- if(DM_Value > 100)
- DM_Value = 50;
- DM_RxPathSelTable.rf_rssi[2] = (u8)DM_Value;
- }
- else if(DM_Type == 13)
- {
- if(DM_Value > 100)
- DM_Value = 50;
- DM_RxPathSelTable.rf_rssi[3] = (u8)DM_Value;
- }
- else if(DM_Type == 20)
- {
- if(DM_Value > 1)
- DM_Value = 1;
- pRA->ping_rssi_enable = (u8)DM_Value;
- }
- else if(DM_Type == 21)
- {
- if(DM_Value > 30)
- DM_Value = 30;
- pRA->ping_rssi_thresh_for_ra = DM_Value;
- }
-}
-
-
/*-----------------------------------------------------------------------------
* Function: dm_dig_init()
*
@@ -3554,7 +3467,6 @@ static void dm_check_txrateandretrycount(struct net_device *dev)
static void dm_send_rssi_tofw(struct net_device *dev)
{
- DCMD_TXCMD_T tx_cmd;
struct r8192_priv *priv = ieee80211_priv(dev);
// If we test chariot, we should stop the TX command ?
@@ -3562,9 +3474,6 @@ static void dm_send_rssi_tofw(struct net_device *dev)
// 0x1e0(byte) to notify driver.
write_nic_byte(dev, DRIVER_RSSI, (u8)priv->undecorated_smoothed_pwdb);
return;
- tx_cmd.Op = TXCMD_SET_RX_RSSI;
- tx_cmd.Length = 4;
- tx_cmd.Value = priv->undecorated_smoothed_pwdb;
}
/*---------------------------Define function prototype------------------------*/
diff --git a/drivers/staging/rtl8192u/r8192U_wx.c b/drivers/staging/rtl8192u/r8192U_wx.c
index 28f60d2dbe5..361d2d0c3df 100644
--- a/drivers/staging/rtl8192u/r8192U_wx.c
+++ b/drivers/staging/rtl8192u/r8192U_wx.c
@@ -171,7 +171,6 @@ static int r8192_wx_set_crcmon(struct net_device *dev,
struct r8192_priv *priv = ieee80211_priv(dev);
int *parms = (int *)extra;
int enable = (parms[0] > 0);
- short prev = priv->crcmon;
down(&priv->wx_sem);
@@ -183,11 +182,6 @@ static int r8192_wx_set_crcmon(struct net_device *dev,
DMESG("bad CRC in monitor mode are %s",
priv->crcmon ? "accepted" : "rejected");
- if (prev != priv->crcmon && priv->up) {
- /* rtl8180_down(dev); */
- /* rtl8180_up(dev); */
- }
-
up(&priv->wx_sem);
return 0;
diff --git a/drivers/staging/rtl8192u/r819xU_firmware.c b/drivers/staging/rtl8192u/r819xU_firmware.c
index f66ad8a0dfe..c230be290ab 100644
--- a/drivers/staging/rtl8192u/r819xU_firmware.c
+++ b/drivers/staging/rtl8192u/r819xU_firmware.c
@@ -45,6 +45,7 @@ static bool fw_download_code(struct net_device *dev, u8 *code_virtual_address,
unsigned char *seg_ptr;
cb_desc *tcb_desc;
u8 bLastIniPkt;
+ u8 index;
firmware_init_param(dev);
//Fragmentation might be required
@@ -78,18 +79,19 @@ static bool fw_download_code(struct net_device *dev, u8 *code_virtual_address,
* Transform from little endian to big endian
* and pending zero
*/
- for (i=0; i < frag_length; i+=4) {
- *seg_ptr++ = ((i+0)<frag_length)?code_virtual_address[i+3]:0;
- *seg_ptr++ = ((i+1)<frag_length)?code_virtual_address[i+2]:0;
- *seg_ptr++ = ((i+2)<frag_length)?code_virtual_address[i+1]:0;
- *seg_ptr++ = ((i+3)<frag_length)?code_virtual_address[i+0]:0;
+ for (i = 0; i < frag_length; i += 4) {
+ *seg_ptr++ = ((i+0) < frag_length)?code_virtual_address[i+3] : 0;
+ *seg_ptr++ = ((i+1) < frag_length)?code_virtual_address[i+2] : 0;
+ *seg_ptr++ = ((i+2) < frag_length)?code_virtual_address[i+1] : 0;
+ *seg_ptr++ = ((i+3) < frag_length)?code_virtual_address[i+0] : 0;
}
- tcb_desc->txbuf_size= (u16)i;
+ tcb_desc->txbuf_size = (u16)i;
skb_put(skb, i);
- if (!priv->ieee80211->check_nic_enough_desc(dev,tcb_desc->queue_index)||
- (!skb_queue_empty(&priv->ieee80211->skb_waitQ[tcb_desc->queue_index]))||\
- (priv->ieee80211->queue_stop)) {
+ index = tcb_desc->queue_index;
+ if (!priv->ieee80211->check_nic_enough_desc(dev, index) ||
+ (!skb_queue_empty(&priv->ieee80211->skb_waitQ[index])) ||
+ (priv->ieee80211->queue_stop)) {
RT_TRACE(COMP_FIRMWARE,"=====================================================> tx full!\n");
skb_queue_tail(&priv->ieee80211->skb_waitQ[tcb_desc->queue_index], skb);
} else {
diff --git a/drivers/staging/rtl8192u/r819xU_firmware.h b/drivers/staging/rtl8192u/r819xU_firmware.h
index c48c884aa1a..cfa222350a9 100644
--- a/drivers/staging/rtl8192u/r819xU_firmware.h
+++ b/drivers/staging/rtl8192u/r819xU_firmware.h
@@ -12,15 +12,15 @@
#define GET_COMMAND_PACKET_FRAG_THRESHOLD(v) (4*(v/4) - 8 - USB_HWDESC_HEADER_LEN)
//#endif
-typedef enum _firmware_init_step{
+typedef enum _firmware_init_step {
FW_INIT_STEP0_BOOT = 0,
FW_INIT_STEP1_MAIN = 1,
FW_INIT_STEP2_DATA = 2,
-}firmware_init_step_e;
+} firmware_init_step_e;
-typedef enum _opt_rst_type{
+typedef enum _opt_rst_type {
OPT_SYSTEM_RESET = 0,
OPT_FIRMWARE_RESET = 1,
-}opt_rst_type_e;
+} opt_rst_type_e;
#endif
diff --git a/drivers/staging/rtl8192u/r819xU_phy.c b/drivers/staging/rtl8192u/r819xU_phy.c
index e9c15fe8ded..058960251ba 100644
--- a/drivers/staging/rtl8192u/r819xU_phy.c
+++ b/drivers/staging/rtl8192u/r819xU_phy.c
@@ -1463,6 +1463,7 @@ void rtl8192_SwChnl_WorkItem(struct net_device *dev)
u8 rtl8192_phy_SwChnl(struct net_device *dev, u8 channel)
{
struct r8192_priv *priv = ieee80211_priv(dev);
+
RT_TRACE(COMP_CH, "%s(), SwChnlInProgress: %d\n", __func__,
priv->SwChnlInProgress);
if (!priv->up)
diff --git a/drivers/staging/rtl8712/hal_init.c b/drivers/staging/rtl8712/hal_init.c
index 1d6ade05fa1..324da34383e 100644
--- a/drivers/staging/rtl8712/hal_init.c
+++ b/drivers/staging/rtl8712/hal_init.c
@@ -86,7 +86,7 @@ static u32 rtl871x_open_fw(struct _adapter *padapter, const u8 **ppmappedfw)
(int)padapter->fw->size);
return 0;
}
- *ppmappedfw = (u8 *)((*praw)->data);
+ *ppmappedfw = (*praw)->data;
return (*praw)->size;
}
@@ -136,15 +136,10 @@ static void update_fwhdr(struct fw_hdr *pfwhdr, const u8 *pmappedfw)
static u8 chk_fwhdr(struct fw_hdr *pfwhdr, u32 ulfilelength)
{
u32 fwhdrsz, fw_sz;
- u8 intf, rfconf;
/* check signature */
if ((pfwhdr->signature != 0x8712) && (pfwhdr->signature != 0x8192))
return _FAIL;
- /* check interface */
- intf = (u8)((pfwhdr->version&0x3000) >> 12);
- /* check rf_conf */
- rfconf = (u8)((pfwhdr->version&0xC000) >> 14);
/* check fw_priv_sze & sizeof(struct fw_priv) */
if (pfwhdr->fw_priv_sz != sizeof(struct fw_priv))
return _FAIL;
@@ -162,7 +157,7 @@ static u8 rtl8712_dl_fw(struct _adapter *padapter)
sint i;
u8 tmp8, tmp8_a;
u16 tmp16;
- u32 maxlen = 0, tmp32; /* for compare usage */
+ u32 maxlen = 0; /* for compare usage */
uint dump_imem_sz, imem_sz, dump_emem_sz, emem_sz; /* max = 49152; */
struct fw_hdr fwhdr;
u32 ulfilelength; /* FW file size */
@@ -262,7 +257,7 @@ static u8 rtl8712_dl_fw(struct _adapter *padapter)
if (tmp8_a != (tmp8|BIT(2)))
goto exit_fail;
- tmp32 = r8712_read32(padapter, TCR);
+ r8712_read32(padapter, TCR);
/* 4.polling IMEM Ready */
i = 100;
diff --git a/drivers/staging/rtl8712/ieee80211.c b/drivers/staging/rtl8712/ieee80211.c
index fe9459e483c..57868085ce5 100644
--- a/drivers/staging/rtl8712/ieee80211.c
+++ b/drivers/staging/rtl8712/ieee80211.c
@@ -124,11 +124,10 @@ u8 *r8712_get_ie(u8 *pbuf, sint index, sint *len, sint limit)
if (*p == index) {
*len = *(p + 1);
return p;
- } else {
- tmp = *(p + 1);
- p += (tmp + 2);
- i += (tmp + 2);
}
+ tmp = *(p + 1);
+ p += (tmp + 2);
+ i += (tmp + 2);
if (i >= limit)
break;
}
@@ -237,10 +236,9 @@ unsigned char *r8712_get_wpa_ie(unsigned char *pie, int *wpa_ie_len, int limit)
goto check_next_ie;
*wpa_ie_len = *(pbuf + 1);
return pbuf;
- } else {
- *wpa_ie_len = 0;
- return NULL;
}
+ *wpa_ie_len = 0;
+ return NULL;
check_next_ie:
limit = limit - (pbuf - pie) - 2 - len;
if (limit <= 0)
@@ -370,13 +368,12 @@ int r8712_parse_wpa2_ie(u8 *rsn_ie, int rsn_ie_len, int *group_cipher,
int r8712_get_sec_ie(u8 *in_ie, uint in_len, u8 *rsn_ie, u16 *rsn_len,
u8 *wpa_ie, u16 *wpa_len)
{
- u8 authmode, sec_idx;
+ u8 authmode;
u8 wpa_oui[4] = {0x0, 0x50, 0xf2, 0x01};
uint cnt;
/*Search required WPA or WPA2 IE and copy to sec_ie[ ]*/
cnt = (_TIMESTAMP_ + _BEACON_ITERVAL_ + _CAPABILITY_);
- sec_idx = 0;
while (cnt < in_len) {
authmode = in_ie[cnt];
if ((authmode == _WPA_IE_ID_) &&
@@ -414,7 +411,7 @@ int r8712_get_wps_ie(u8 *in_ie, uint in_len, u8 *wps_ie, uint *wps_ielen)
cnt += in_ie[cnt+1]+2;
match = true;
break;
- } else
+ }
cnt += in_ie[cnt+1]+2; /* goto next */
}
return match;
diff --git a/drivers/staging/rtl8712/osdep_service.h b/drivers/staging/rtl8712/osdep_service.h
index d7a357b8d2d..5153ad9c2c7 100644
--- a/drivers/staging/rtl8712/osdep_service.h
+++ b/drivers/staging/rtl8712/osdep_service.h
@@ -88,17 +88,13 @@ static inline u32 _down_sema(struct semaphore *sema)
{
if (down_interruptible(sema))
return _FAIL;
- else
- return _SUCCESS;
+ return _SUCCESS;
}
static inline u32 end_of_queue_search(struct list_head *head,
struct list_head *plist)
{
- if (head == plist)
- return true;
- else
- return false;
+ return (head == plist);
}
static inline void sleep_schedulable(int ms)
diff --git a/drivers/staging/rtl8712/recv_linux.c b/drivers/staging/rtl8712/recv_linux.c
index 495ee1205e0..0631f363825 100644
--- a/drivers/staging/rtl8712/recv_linux.c
+++ b/drivers/staging/rtl8712/recv_linux.c
@@ -146,7 +146,7 @@ void r8712_os_read_port(struct _adapter *padapter, struct recv_buf *precvbuf)
dev_kfree_skb_any(precvbuf->pskb);
precvbuf->pskb = NULL;
precvbuf->reuse = false;
- if (precvbuf->irp_pending == false)
+ if (!precvbuf->irp_pending)
r8712_read_port(padapter, precvpriv->ff_hwaddr, 0,
(unsigned char *)precvbuf);
}
diff --git a/drivers/staging/rtl8712/rtl8712_cmd.c b/drivers/staging/rtl8712/rtl8712_cmd.c
index 720e8a15db9..62e53cc1d8b 100644
--- a/drivers/staging/rtl8712/rtl8712_cmd.c
+++ b/drivers/staging/rtl8712/rtl8712_cmd.c
@@ -157,10 +157,8 @@ static u8 read_bbreg_hdl(struct _adapter *padapter, u8 *pbuf)
{
u32 val;
void (*pcmd_callback)(struct _adapter *dev, struct cmd_obj *pcmd);
- struct readBB_parm *prdbbparm;
struct cmd_obj *pcmd = (struct cmd_obj *)pbuf;
- prdbbparm = (struct readBB_parm *)pcmd->parmbuf;
if (pcmd->rsp && pcmd->rspsz > 0)
memcpy(pcmd->rsp, (u8 *)&val, pcmd->rspsz);
pcmd_callback = cmd_callback[pcmd->cmdcode].callback;
@@ -174,10 +172,8 @@ static u8 read_bbreg_hdl(struct _adapter *padapter, u8 *pbuf)
static u8 write_bbreg_hdl(struct _adapter *padapter, u8 *pbuf)
{
void (*pcmd_callback)(struct _adapter *dev, struct cmd_obj *pcmd);
- struct writeBB_parm *pwritebbparm;
struct cmd_obj *pcmd = (struct cmd_obj *)pbuf;
- pwritebbparm = (struct writeBB_parm *)pcmd->parmbuf;
pcmd_callback = cmd_callback[pcmd->cmdcode].callback;
if (pcmd_callback == NULL)
r8712_free_cmd_obj(pcmd);
@@ -190,10 +186,8 @@ static u8 read_rfreg_hdl(struct _adapter *padapter, u8 *pbuf)
{
u32 val;
void (*pcmd_callback)(struct _adapter *dev, struct cmd_obj *pcmd);
- struct readRF_parm *prdrfparm;
struct cmd_obj *pcmd = (struct cmd_obj *)pbuf;
- prdrfparm = (struct readRF_parm *)pcmd->parmbuf;
if (pcmd->rsp && pcmd->rspsz > 0)
memcpy(pcmd->rsp, (u8 *)&val, pcmd->rspsz);
pcmd_callback = cmd_callback[pcmd->cmdcode].callback;
@@ -207,10 +201,8 @@ static u8 read_rfreg_hdl(struct _adapter *padapter, u8 *pbuf)
static u8 write_rfreg_hdl(struct _adapter *padapter, u8 *pbuf)
{
void (*pcmd_callback)(struct _adapter *dev, struct cmd_obj *pcmd);
- struct writeRF_parm *pwriterfparm;
struct cmd_obj *pcmd = (struct cmd_obj *)pbuf;
- pwriterfparm = (struct writeRF_parm *)pcmd->parmbuf;
pcmd_callback = cmd_callback[pcmd->cmdcode].callback;
if (pcmd_callback == NULL)
r8712_free_cmd_obj(pcmd);
@@ -222,9 +214,7 @@ static u8 write_rfreg_hdl(struct _adapter *padapter, u8 *pbuf)
static u8 sys_suspend_hdl(struct _adapter *padapter, u8 *pbuf)
{
struct cmd_obj *pcmd = (struct cmd_obj *)pbuf;
- struct usb_suspend_parm *psetusbsuspend;
- psetusbsuspend = (struct usb_suspend_parm *)pcmd->parmbuf;
r8712_free_cmd_obj(pcmd);
return H2C_SUCCESS;
}
@@ -320,7 +310,7 @@ void r8712_fw_cmd_data(struct _adapter *pAdapter, u32 *value, u8 flag)
int r8712_cmd_thread(void *context)
{
struct cmd_obj *pcmd;
- unsigned int cmdsz, wr_sz, *pcmdbuf, *prspbuf;
+ unsigned int cmdsz, wr_sz, *pcmdbuf;
struct tx_desc *pdesc;
void (*pcmd_callback)(struct _adapter *dev, struct cmd_obj *pcmd);
struct _adapter *padapter = (struct _adapter *)context;
@@ -342,7 +332,6 @@ _next:
continue;
}
pcmdbuf = (unsigned int *)pcmdpriv->cmd_buf;
- prspbuf = (unsigned int *)pcmdpriv->rsp_buf;
pdesc = (struct tx_desc *)pcmdbuf;
memset(pdesc, 0, TXDESC_SIZE);
pcmd = cmd_hdl_filter(padapter, pcmd);
diff --git a/drivers/staging/rtl8712/rtl8712_efuse.c b/drivers/staging/rtl8712/rtl8712_efuse.c
index c9eeb4270ab..d9571699949 100644
--- a/drivers/staging/rtl8712/rtl8712_efuse.c
+++ b/drivers/staging/rtl8712/rtl8712_efuse.c
@@ -219,13 +219,12 @@ u16 r8712_efuse_get_current_size(struct _adapter *padapter)
{
int bContinual = true;
u16 efuse_addr = 0;
- u8 hoffset = 0, hworden = 0;
+ u8 hworden = 0;
u8 efuse_data, word_cnts = 0;
while (bContinual && efuse_one_byte_read(padapter, efuse_addr,
&efuse_data) && (efuse_addr < efuse_available_max_size)) {
if (efuse_data != 0xFF) {
- hoffset = (efuse_data >> 4) & 0x0F;
hworden = efuse_data & 0x0F;
word_cnts = calculate_word_cnts(hworden);
/* read next header */
@@ -414,19 +413,18 @@ u8 r8712_efuse_pg_packet_write(struct _adapter *padapter, const u8 offset,
bResult = false;
}
break;
- } else { /* write header fail */
- bResult = false;
- if (0xFF == efuse_data)
- return bResult; /* nothing damaged. */
- /* call rescue procedure */
- if (fix_header(padapter, efuse_data, efuse_addr) ==
- false)
- return false; /* rescue fail */
-
- if (++repeat_times > _REPEAT_THRESHOLD_) /* fail */
- break;
- /* otherwise, take another risk... */
}
+ /* write header fail */
+ bResult = false;
+ if (0xFF == efuse_data)
+ return bResult; /* nothing damaged. */
+ /* call rescue procedure */
+ if (!fix_header(padapter, efuse_data, efuse_addr))
+ return false; /* rescue fail */
+
+ if (++repeat_times > _REPEAT_THRESHOLD_) /* fail */
+ break;
+ /* otherwise, take another risk... */
}
return bResult;
}
@@ -541,15 +539,16 @@ u8 r8712_efuse_map_write(struct _adapter *padapter, u16 addr, u16 cnts,
}
idx++;
break;
- } else {
- if ((data[idx] != pktdata[i]) || (data[idx+1] !=
- pktdata[i+1])) {
- word_en &= ~BIT(i >> 1);
- newdata[j++] = data[idx];
- newdata[j++] = data[idx + 1];
- }
- idx += 2;
}
+
+ if ((data[idx] != pktdata[i]) || (data[idx+1] !=
+ pktdata[i+1])) {
+ word_en &= ~BIT(i >> 1);
+ newdata[j++] = data[idx];
+ newdata[j++] = data[idx + 1];
+ }
+ idx += 2;
+
if (idx == cnts)
break;
}
diff --git a/drivers/staging/rtl8712/rtl8712_recv.c b/drivers/staging/rtl8712/rtl8712_recv.c
index b2780620926..cd8b444b255 100644
--- a/drivers/staging/rtl8712/rtl8712_recv.c
+++ b/drivers/staging/rtl8712/rtl8712_recv.c
@@ -158,8 +158,6 @@ int r8712_free_recvframe(union recv_frame *precvframe,
static void update_recvframe_attrib_from_recvstat(struct rx_pkt_attrib *pattrib,
struct recv_stat *prxstat)
{
- u32 *pphy_info;
- struct phy_stat *pphy_stat;
u16 drvinfo_sz = 0;
drvinfo_sz = (le32_to_cpu(prxstat->rxdw0)&0x000f0000)>>16;
@@ -189,10 +187,6 @@ static void update_recvframe_attrib_from_recvstat(struct rx_pkt_attrib *pattrib,
/*Offset 16*/
/*Offset 20*/
/*phy_info*/
- if (drvinfo_sz) {
- pphy_stat = (struct phy_stat *)(prxstat+1);
- pphy_info = (u32 *)prxstat+1;
- }
}
/*perform defrag*/
@@ -200,7 +194,7 @@ static union recv_frame *recvframe_defrag(struct _adapter *adapter,
struct __queue *defrag_q)
{
struct list_head *plist, *phead;
- u8 *data, wlanhdr_offset;
+ u8 wlanhdr_offset;
u8 curfragnum;
struct recv_frame_hdr *pfhdr, *pnfhdr;
union recv_frame *prframe, *pnextrframe;
@@ -223,7 +217,6 @@ static union recv_frame *recvframe_defrag(struct _adapter *adapter,
curfragnum++;
plist = &defrag_q->queue;
plist = plist->next;
- data = get_recvframe_data(prframe);
while (end_of_queue_search(phead, plist) == false) {
pnextrframe = LIST_CONTAINOR(plist, union recv_frame, u);
pnfhdr = &pnextrframe->u.hdr;
@@ -436,13 +429,11 @@ void r8712_rxcmd_event_hdl(struct _adapter *padapter, void *prxcmdbuf)
{
uint voffset;
u8 *poffset;
- u16 pkt_len, cmd_len, drvinfo_sz;
- u8 eid, cmd_seq;
+ u16 cmd_len, drvinfo_sz;
struct recv_stat *prxstat;
poffset = (u8 *)prxcmdbuf;
voffset = *(uint *)poffset;
- pkt_len = le32_to_cpu(voffset) & 0x00003fff;
prxstat = (struct recv_stat *)prxcmdbuf;
drvinfo_sz = ((le32_to_cpu(prxstat->rxdw0) & 0x000f0000) >> 16);
drvinfo_sz = drvinfo_sz << 3;
@@ -450,8 +441,6 @@ void r8712_rxcmd_event_hdl(struct _adapter *padapter, void *prxcmdbuf)
do {
voffset = *(uint *)poffset;
cmd_len = (u16)(le32_to_cpu(voffset) & 0xffff);
- cmd_seq = (u8)((le32_to_cpu(voffset) >> 24) & 0x7f);
- eid = (u8)((le32_to_cpu(voffset) >> 16) & 0xff);
r8712_event_handle(padapter, (uint *)poffset);
poffset += (cmd_len + 8);/*8 bytes alignment*/
} while (le32_to_cpu(voffset) & BIT(31));
@@ -533,11 +522,10 @@ int r8712_recv_indicatepkts_in_order(struct _adapter *padapter,
if (bforced == true) {
if (list_empty(phead))
return true;
- else {
- prframe = LIST_CONTAINOR(plist, union recv_frame, u);
- pattrib = &prframe->u.hdr.attrib;
- preorder_ctrl->indicate_seq = pattrib->seq_num;
- }
+
+ prframe = LIST_CONTAINOR(plist, union recv_frame, u);
+ pattrib = &prframe->u.hdr.attrib;
+ preorder_ctrl->indicate_seq = pattrib->seq_num;
}
/* Prepare indication list and indication.
* Check if there is any packet need indicate. */
diff --git a/drivers/staging/rtl8712/rtl871x_cmd.c b/drivers/staging/rtl8712/rtl871x_cmd.c
index d9c1561e327..fe5e315319f 100644
--- a/drivers/staging/rtl8712/rtl871x_cmd.c
+++ b/drivers/staging/rtl8712/rtl871x_cmd.c
@@ -205,12 +205,12 @@ void r8712_free_cmd_obj(struct cmd_obj *pcmd)
{
if ((pcmd->cmdcode != _JoinBss_CMD_) &&
(pcmd->cmdcode != _CreateBss_CMD_))
- kfree((unsigned char *)pcmd->parmbuf);
+ kfree(pcmd->parmbuf);
if (pcmd->rsp != NULL) {
if (pcmd->rspsz != 0)
- kfree((unsigned char *)pcmd->rsp);
+ kfree(pcmd->rsp);
}
- kfree((unsigned char *)pcmd);
+ kfree(pcmd);
}
/*
@@ -232,7 +232,7 @@ u8 r8712_sitesurvey_cmd(struct _adapter *padapter,
return _FAIL;
psurveyPara = kmalloc(sizeof(*psurveyPara), GFP_ATOMIC);
if (psurveyPara == NULL) {
- kfree((unsigned char *) ph2c);
+ kfree(ph2c);
return _FAIL;
}
init_h2fwcmd_w_parm_no_rsp(ph2c, psurveyPara,
@@ -264,7 +264,7 @@ u8 r8712_setdatarate_cmd(struct _adapter *padapter, u8 *rateset)
return _FAIL;
pbsetdataratepara = kmalloc(sizeof(*pbsetdataratepara), GFP_ATOMIC);
if (pbsetdataratepara == NULL) {
- kfree((u8 *) ph2c);
+ kfree(ph2c);
return _FAIL;
}
init_h2fwcmd_w_parm_no_rsp(ph2c, pbsetdataratepara,
@@ -286,7 +286,7 @@ u8 r8712_set_chplan_cmd(struct _adapter *padapter, int chplan)
return _FAIL;
psetchplanpara = kmalloc(sizeof(*psetchplanpara), GFP_ATOMIC);
if (psetchplanpara == NULL) {
- kfree((u8 *) ph2c);
+ kfree(ph2c);
return _FAIL;
}
init_h2fwcmd_w_parm_no_rsp(ph2c, psetchplanpara,
@@ -307,7 +307,7 @@ u8 r8712_setbasicrate_cmd(struct _adapter *padapter, u8 *rateset)
return _FAIL;
pssetbasicratepara = kmalloc(sizeof(*pssetbasicratepara), GFP_ATOMIC);
if (pssetbasicratepara == NULL) {
- kfree((u8 *) ph2c);
+ kfree(ph2c);
return _FAIL;
}
init_h2fwcmd_w_parm_no_rsp(ph2c, pssetbasicratepara,
@@ -329,7 +329,7 @@ u8 r8712_setptm_cmd(struct _adapter *padapter, u8 type)
return _FAIL;
pwriteptmparm = kmalloc(sizeof(*pwriteptmparm), GFP_ATOMIC);
if (pwriteptmparm == NULL) {
- kfree((u8 *) ph2c);
+ kfree(ph2c);
return _FAIL;
}
init_h2fwcmd_w_parm_no_rsp(ph2c, pwriteptmparm, GEN_CMD_CODE(_SetPT));
@@ -349,7 +349,7 @@ u8 r8712_setfwdig_cmd(struct _adapter *padapter, u8 type)
return _FAIL;
pwriteptmparm = kmalloc(sizeof(*pwriteptmparm), GFP_ATOMIC);
if (pwriteptmparm == NULL) {
- kfree((u8 *) ph2c);
+ kfree(ph2c);
return _FAIL;
}
init_h2fwcmd_w_parm_no_rsp(ph2c, pwriteptmparm, GEN_CMD_CODE(_SetDIG));
@@ -369,7 +369,7 @@ u8 r8712_setfwra_cmd(struct _adapter *padapter, u8 type)
return _FAIL;
pwriteptmparm = kmalloc(sizeof(*pwriteptmparm), GFP_ATOMIC);
if (pwriteptmparm == NULL) {
- kfree((u8 *) ph2c);
+ kfree(ph2c);
return _FAIL;
}
init_h2fwcmd_w_parm_no_rsp(ph2c, pwriteptmparm, GEN_CMD_CODE(_SetRA));
@@ -389,7 +389,7 @@ u8 r8712_setrfreg_cmd(struct _adapter *padapter, u8 offset, u32 val)
return _FAIL;
pwriterfparm = kmalloc(sizeof(*pwriterfparm), GFP_ATOMIC);
if (pwriterfparm == NULL) {
- kfree((u8 *) ph2c);
+ kfree(ph2c);
return _FAIL;
}
init_h2fwcmd_w_parm_no_rsp(ph2c, pwriterfparm, GEN_CMD_CODE(_SetRFReg));
@@ -410,7 +410,7 @@ u8 r8712_getrfreg_cmd(struct _adapter *padapter, u8 offset, u8 *pval)
return _FAIL;
prdrfparm = kmalloc(sizeof(*prdrfparm), GFP_ATOMIC);
if (prdrfparm == NULL) {
- kfree((u8 *) ph2c);
+ kfree(ph2c);
return _FAIL;
}
INIT_LIST_HEAD(&ph2c->list);
@@ -470,7 +470,6 @@ u8 r8712_createbss_cmd(struct _adapter *padapter)
u8 r8712_joinbss_cmd(struct _adapter *padapter, struct wlan_network *pnetwork)
{
- u8 *auth;
uint t_len = 0;
struct ndis_wlan_bssid_ex *psecnetwork;
struct cmd_obj *pcmd;
@@ -517,7 +516,6 @@ u8 r8712_joinbss_cmd(struct _adapter *padapter, struct wlan_network *pnetwork)
return _FAIL;
}
memcpy(psecnetwork, &pnetwork->network, t_len);
- auth = &psecuritypriv->authenticator_ie[0];
psecuritypriv->authenticator_ie[0] = (unsigned char)
psecnetwork->IELength;
if ((psecnetwork->IELength-12) < (256 - 1))
@@ -527,7 +525,7 @@ u8 r8712_joinbss_cmd(struct _adapter *padapter, struct wlan_network *pnetwork)
memcpy(&psecuritypriv->authenticator_ie[1],
&psecnetwork->IEs[12], (256-1));
psecnetwork->IELength = 0;
- /* If the the driver wants to use the bssid to create the connection.
+ /* If the driver wants to use the bssid to create the connection.
* If not, we copy the connecting AP's MAC address to it so that
* the driver just has the bssid information for PMKIDList searching.
*/
@@ -626,7 +624,7 @@ u8 r8712_disassoc_cmd(struct _adapter *padapter) /* for sta_mode */
return _FAIL;
pdisconnect = kmalloc(sizeof(*pdisconnect), GFP_ATOMIC);
if (pdisconnect == NULL) {
- kfree((u8 *)pdisconnect_cmd);
+ kfree(pdisconnect_cmd);
return _FAIL;
}
init_h2fwcmd_w_parm_no_rsp(pdisconnect_cmd, pdisconnect,
@@ -648,7 +646,7 @@ u8 r8712_setopmode_cmd(struct _adapter *padapter,
return _FAIL;
psetop = kmalloc(sizeof(*psetop), GFP_ATOMIC);
if (psetop == NULL) {
- kfree((u8 *) ph2c);
+ kfree(ph2c);
return _FAIL;
}
init_h2fwcmd_w_parm_no_rsp(ph2c, psetop, _SetOpMode_CMD_);
@@ -672,13 +670,13 @@ u8 r8712_setstakey_cmd(struct _adapter *padapter, u8 *psta, u8 unicast_key)
return _FAIL;
psetstakey_para = kmalloc(sizeof(*psetstakey_para), GFP_ATOMIC);
if (psetstakey_para == NULL) {
- kfree((u8 *) ph2c);
+ kfree(ph2c);
return _FAIL;
}
psetstakey_rsp = kmalloc(sizeof(*psetstakey_rsp), GFP_ATOMIC);
if (psetstakey_rsp == NULL) {
- kfree((u8 *) ph2c);
- kfree((u8 *) psetstakey_para);
+ kfree(ph2c);
+ kfree(psetstakey_para);
return _FAIL;
}
init_h2fwcmd_w_parm_no_rsp(ph2c, psetstakey_para, _SetStaKey_CMD_);
@@ -712,7 +710,7 @@ u8 r8712_setrfintfs_cmd(struct _adapter *padapter, u8 mode)
return _FAIL;
psetrfintfsparm = kmalloc(sizeof(*psetrfintfsparm), GFP_ATOMIC);
if (psetrfintfsparm == NULL) {
- kfree((unsigned char *) ph2c);
+ kfree(ph2c);
return _FAIL;
}
init_h2fwcmd_w_parm_no_rsp(ph2c, psetrfintfsparm,
@@ -734,7 +732,7 @@ u8 r8712_setrttbl_cmd(struct _adapter *padapter,
return _FAIL;
psetrttblparm = kmalloc(sizeof(*psetrttblparm), GFP_ATOMIC);
if (psetrttblparm == NULL) {
- kfree((unsigned char *)ph2c);
+ kfree(ph2c);
return _FAIL;
}
init_h2fwcmd_w_parm_no_rsp(ph2c, psetrttblparm,
@@ -755,7 +753,7 @@ u8 r8712_gettssi_cmd(struct _adapter *padapter, u8 offset, u8 *pval)
return _FAIL;
prdtssiparm = kmalloc(sizeof(*prdtssiparm), GFP_ATOMIC);
if (prdtssiparm == NULL) {
- kfree((unsigned char *) ph2c);
+ kfree(ph2c);
return _FAIL;
}
INIT_LIST_HEAD(&ph2c->list);
@@ -781,7 +779,7 @@ u8 r8712_setMacAddr_cmd(struct _adapter *padapter, u8 *mac_addr)
return _FAIL;
psetMacAddr_para = kmalloc(sizeof(*psetMacAddr_para), GFP_ATOMIC);
if (psetMacAddr_para == NULL) {
- kfree((u8 *) ph2c);
+ kfree(ph2c);
return _FAIL;
}
init_h2fwcmd_w_parm_no_rsp(ph2c, psetMacAddr_para,
@@ -803,13 +801,13 @@ u8 r8712_setassocsta_cmd(struct _adapter *padapter, u8 *mac_addr)
return _FAIL;
psetassocsta_para = kmalloc(sizeof(*psetassocsta_para), GFP_ATOMIC);
if (psetassocsta_para == NULL) {
- kfree((u8 *) ph2c);
+ kfree(ph2c);
return _FAIL;
}
psetassocsta_rsp = kmalloc(sizeof(*psetassocsta_rsp), GFP_ATOMIC);
if (psetassocsta_rsp == NULL) {
- kfree((u8 *)ph2c);
- kfree((u8 *)psetassocsta_para);
+ kfree(ph2c);
+ kfree(psetassocsta_para);
return _FAIL;
}
init_h2fwcmd_w_parm_no_rsp(ph2c, psetassocsta_para, _SetAssocSta_CMD_);
@@ -831,7 +829,7 @@ u8 r8712_addbareq_cmd(struct _adapter *padapter, u8 tid)
return _FAIL;
paddbareq_parm = kmalloc(sizeof(*paddbareq_parm), GFP_ATOMIC);
if (paddbareq_parm == NULL) {
- kfree((unsigned char *)ph2c);
+ kfree(ph2c);
return _FAIL;
}
paddbareq_parm->tid = tid;
@@ -852,7 +850,7 @@ u8 r8712_wdg_wk_cmd(struct _adapter *padapter)
return _FAIL;
pdrvintcmd_param = kmalloc(sizeof(*pdrvintcmd_param), GFP_ATOMIC);
if (pdrvintcmd_param == NULL) {
- kfree((unsigned char *)ph2c);
+ kfree(ph2c);
return _FAIL;
}
pdrvintcmd_param->i_cid = WDG_WK_CID;
@@ -1026,7 +1024,7 @@ u8 r8712_disconnectCtrlEx_cmd(struct _adapter *adapter, u32 enableDrvCtrl,
return _FAIL;
param = kzalloc(sizeof(*param), GFP_ATOMIC);
if (param == NULL) {
- kfree((unsigned char *) ph2c);
+ kfree(ph2c);
return _FAIL;
}
diff --git a/drivers/staging/rtl8712/rtl871x_io.c b/drivers/staging/rtl8712/rtl871x_io.c
index d7b63aedead..e4e5b13cb92 100644
--- a/drivers/staging/rtl8712/rtl871x_io.c
+++ b/drivers/staging/rtl8712/rtl871x_io.c
@@ -93,7 +93,7 @@ static uint register_intf_hdl(u8 *dev, struct intf_hdl *pintfhdl)
pintfhdl->intf_option = 0;
pintfhdl->adapter = dev;
pintfhdl->intf_dev = (u8 *)&(adapter->dvobjpriv);
- if (_init_intf_hdl(adapter, pintfhdl) == false)
+ if (!_init_intf_hdl(adapter, pintfhdl))
goto register_intf_hdl_fail;
return _SUCCESS;
register_intf_hdl_fail:
@@ -142,7 +142,7 @@ uint r8712_alloc_io_queue(struct _adapter *adapter)
alloc_io_queue_fail:
if (pio_queue) {
kfree(pio_queue->pallocated_free_ioreqs_buf);
- kfree((u8 *)pio_queue);
+ kfree(pio_queue);
}
adapter->pio_queue = NULL;
return _FAIL;
@@ -156,6 +156,6 @@ void r8712_free_io_queue(struct _adapter *adapter)
kfree(pio_queue->pallocated_free_ioreqs_buf);
adapter->pio_queue = NULL;
unregister_intf_hdl(&pio_queue->intf);
- kfree((u8 *)pio_queue);
+ kfree(pio_queue);
}
}
diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
index 8e42ce06e5d..73b7d864ccb 100644
--- a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
+++ b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
@@ -170,7 +170,7 @@ static inline char *translate_scan(struct _adapter *padapter,
s8 *p;
u32 i = 0, ht_ielen = 0;
u16 cap, ht_cap = false, mcs_rate;
- u8 rssi, bw_40MHz = 0, short_GI = 0;
+ u8 rssi;
if ((pnetwork->network.Configuration.DSConfig < 1) ||
(pnetwork->network.Configuration.DSConfig > 14)) {
@@ -197,10 +197,6 @@ static inline char *translate_scan(struct _adapter *padapter,
ht_cap = true;
pht_capie = (struct ieee80211_ht_cap *)(p + 2);
memcpy(&mcs_rate , pht_capie->supp_mcs_set, 2);
- bw_40MHz = (pht_capie->cap_info&IEEE80211_HT_CAP_SUP_WIDTH)
- ? 1 : 0;
- short_GI = (pht_capie->cap_info&(IEEE80211_HT_CAP_SGI_20 |
- IEEE80211_HT_CAP_SGI_40)) ? 1 : 0;
}
/* Add the protocol name */
iwe.cmd = SIOCGIWNAME;
@@ -287,12 +283,10 @@ static inline char *translate_scan(struct _adapter *padapter,
u8 wpa_ie[255], rsn_ie[255];
u16 wpa_len = 0, rsn_len = 0;
int n;
- sint out_len = 0;
- out_len = r8712_get_sec_ie(pnetwork->network.IEs,
- pnetwork->network.
- IELength, rsn_ie, &rsn_len,
- wpa_ie, &wpa_len);
+ r8712_get_sec_ie(pnetwork->network.IEs,
+ pnetwork->network.IELength, rsn_ie, &rsn_len,
+ wpa_ie, &wpa_len);
if (wpa_len > 0) {
memset(buf, 0, MAX_WPA_IE_LEN);
n = sprintf(buf, "wpa_ie=");
@@ -505,14 +499,14 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param,
}
}
exit:
- kfree((u8 *)pwep);
+ kfree(pwep);
return ret;
}
static int r871x_set_wpa_ie(struct _adapter *padapter, char *pie,
unsigned short ielen)
{
- u8 *buf = NULL, *pos = NULL;
+ u8 *buf = NULL;
int group_cipher = 0, pairwise_cipher = 0;
int ret = 0;
@@ -522,7 +516,6 @@ static int r871x_set_wpa_ie(struct _adapter *padapter, char *pie,
buf = kmemdup(pie, ielen, GFP_ATOMIC);
if (buf == NULL)
return -ENOMEM;
- pos = buf;
if (ielen < RSN_HEADER_LEN) {
ret = -EINVAL;
goto exit;
@@ -1133,13 +1126,11 @@ static int r871x_wx_set_mlme(struct net_device *dev,
union iwreq_data *wrqu, char *extra)
{
int ret = 0;
- u16 reason;
struct _adapter *padapter = (struct _adapter *)netdev_priv(dev);
struct iw_mlme *mlme = (struct iw_mlme *) extra;
if (mlme == NULL)
return -1;
- reason = cpu_to_le16(mlme->reason_code);
switch (mlme->cmd) {
case IW_MLME_DEAUTH:
if (!r8712_set_802_11_disassociate(padapter))
@@ -2216,7 +2207,7 @@ static int wpa_supplicant_ioctl(struct net_device *dev, struct iw_point *p)
}
if (ret == 0 && copy_to_user(p->pointer, param, p->length))
ret = -EFAULT;
- kfree((u8 *)param);
+ kfree(param);
return ret;
}
diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_set.c b/drivers/staging/rtl8712/rtl871x_ioctl_set.c
index 9d47eb47283..6318a0e65e6 100644
--- a/drivers/staging/rtl8712/rtl871x_ioctl_set.c
+++ b/drivers/staging/rtl8712/rtl871x_ioctl_set.c
@@ -77,7 +77,7 @@ static u8 do_join(struct _adapter *padapter)
/* when set_ssid/set_bssid for do_join(), but scanning queue
* is empty we try to issue sitesurvey firstly
*/
- if (pmlmepriv->sitesurveyctrl.traffic_busy == false)
+ if (!pmlmepriv->sitesurveyctrl.traffic_busy)
r8712_sitesurvey_cmd(padapter, &pmlmepriv->assoc_ssid);
return true;
} else {
@@ -143,8 +143,7 @@ u8 r8712_set_802_11_bssid(struct _adapter *padapter, u8 *bssid)
_FW_LINKED|WIFI_ADHOC_MASTER_STATE) == true) {
if (!memcmp(&pmlmepriv->cur_network.network.MacAddress, bssid,
ETH_ALEN)) {
- if (check_fwstate(pmlmepriv,
- WIFI_STATION_STATE) == false)
+ if (!check_fwstate(pmlmepriv, WIFI_STATION_STATE))
goto _Abort_Set_BSSID; /* driver is in
* WIFI_ADHOC_MASTER_STATE */
} else {
@@ -177,7 +176,7 @@ void r8712_set_802_11_ssid(struct _adapter *padapter,
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct wlan_network *pnetwork = &pmlmepriv->cur_network;
- if (padapter->hw_init_completed == false)
+ if (!padapter->hw_init_completed)
return;
spin_lock_irqsave(&pmlmepriv->lock, irqL);
if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY|_FW_UNDER_LINKING)) {
@@ -188,10 +187,9 @@ void r8712_set_802_11_ssid(struct _adapter *padapter,
if ((pmlmepriv->assoc_ssid.SsidLength == ssid->SsidLength) &&
(!memcmp(&pmlmepriv->assoc_ssid.Ssid, ssid->Ssid,
ssid->SsidLength))) {
- if ((check_fwstate(pmlmepriv,
- WIFI_STATION_STATE) == false)) {
- if (r8712_is_same_ibss(padapter,
- pnetwork) == false) {
+ if (!check_fwstate(pmlmepriv, WIFI_STATION_STATE)) {
+ if (!r8712_is_same_ibss(padapter,
+ pnetwork)) {
/* if in WIFI_ADHOC_MASTER_STATE or
* WIFI_ADHOC_STATE, create bss or
* rejoin again
@@ -227,7 +225,7 @@ void r8712_set_802_11_ssid(struct _adapter *padapter,
}
if (padapter->securitypriv.btkip_countermeasure == true)
goto _Abort_Set_SSID;
- if (validate_ssid(ssid) == false)
+ if (!validate_ssid(ssid))
goto _Abort_Set_SSID;
memcpy(&pmlmepriv->assoc_ssid, ssid, sizeof(struct ndis_802_11_ssid));
pmlmepriv->assoc_by_bssid = false;
@@ -308,10 +306,10 @@ u8 r8712_set_802_11_bssid_list_scan(struct _adapter *padapter)
unsigned long irqL;
u8 ret = true;
- if (padapter == NULL)
+ if (!padapter)
return false;
pmlmepriv = &padapter->mlmepriv;
- if (padapter->hw_init_completed == false)
+ if (!padapter->hw_init_completed)
return false;
spin_lock_irqsave(&pmlmepriv->lock, irqL);
if ((check_fwstate(pmlmepriv, _FW_UNDER_SURVEY|_FW_UNDER_LINKING)) ||
@@ -345,13 +343,9 @@ u8 r8712_set_802_11_authentication_mode(struct _adapter *padapter,
u8 r8712_set_802_11_add_wep(struct _adapter *padapter,
struct NDIS_802_11_WEP *wep)
{
- u8 bdefaultkey;
- u8 btransmitkey;
sint keyid;
struct security_priv *psecuritypriv = &padapter->securitypriv;
- bdefaultkey = (wep->KeyIndex & 0x40000000) > 0 ? false : true;
- btransmitkey = (wep->KeyIndex & 0x80000000) > 0 ? true : false;
keyid = wep->KeyIndex & 0x3fffffff;
if (keyid >= WEP_KEYS)
return false;
diff --git a/drivers/staging/rtl8712/rtl871x_mlme.c b/drivers/staging/rtl8712/rtl871x_mlme.c
index 00f2e0fc4ac..b7462e8145d 100644
--- a/drivers/staging/rtl8712/rtl871x_mlme.c
+++ b/drivers/staging/rtl8712/rtl871x_mlme.c
@@ -923,7 +923,7 @@ void r8712_joinbss_event_callback(struct _adapter *adapter, u8 *pbuf)
ignore_joinbss_callback:
spin_unlock_irqrestore(&pmlmepriv->lock, irqL);
if (sizeof(struct list_head) == 4 * sizeof(u32))
- kfree((u8 *)pnetwork);
+ kfree(pnetwork);
}
void r8712_stassoc_event_callback(struct _adapter *adapter, u8 *pbuf)
@@ -1218,7 +1218,7 @@ sint r8712_set_auth(struct _adapter *adapter,
psetauthparm = kzalloc(sizeof(*psetauthparm), GFP_ATOMIC);
if (psetauthparm == NULL) {
- kfree((unsigned char *)pcmd);
+ kfree(pcmd);
return _FAIL;
}
psetauthparm->mode = (u8)psecuritypriv->AuthAlgrthm;
@@ -1372,7 +1372,7 @@ static int SecIsInPMKIDList(struct _adapter *Adapter, u8 *bssid)
sint r8712_restruct_sec_ie(struct _adapter *adapter, u8 *in_ie,
u8 *out_ie, uint in_len)
{
- u8 authmode = 0, securitytype, match;
+ u8 authmode = 0, match;
u8 sec_ie[255], uncst_oui[4], bkup_ie[255];
u8 wpa_oui[4] = {0x0, 0x50, 0xf2, 0x01};
uint ielength, cnt, remove_cnt;
@@ -1399,21 +1399,17 @@ sint r8712_restruct_sec_ie(struct _adapter *adapter, u8 *in_ie,
switch (ndissecuritytype) {
case Ndis802_11Encryption1Enabled:
case Ndis802_11Encryption1KeyAbsent:
- securitytype = _WEP40_;
uncst_oui[3] = 0x1;
break;
case Ndis802_11Encryption2Enabled:
case Ndis802_11Encryption2KeyAbsent:
- securitytype = _TKIP_;
uncst_oui[3] = 0x2;
break;
case Ndis802_11Encryption3Enabled:
case Ndis802_11Encryption3KeyAbsent:
- securitytype = _AES_;
uncst_oui[3] = 0x4;
break;
default:
- securitytype = _NO_PRIVACY_;
break;
}
/*Search required WPA or WPA2 IE and copy to sec_ie[] */
@@ -1705,7 +1701,7 @@ unsigned int r8712_restructure_ht_ie(struct _adapter *padapter, u8 *in_ie,
u8 *out_ie, uint in_len, uint *pout_len)
{
u32 ielen, out_len;
- unsigned char *p, *pframe;
+ unsigned char *p;
struct ieee80211_ht_cap ht_capie;
unsigned char WMM_IE[] = {0x00, 0x50, 0xf2, 0x02, 0x00, 0x01, 0x00};
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
@@ -1717,10 +1713,8 @@ unsigned int r8712_restructure_ht_ie(struct _adapter *padapter, u8 *in_ie,
if (p && (ielen > 0)) {
if (pqospriv->qos_option == 0) {
out_len = *pout_len;
- pframe = r8712_set_ie(out_ie+out_len,
- _VENDOR_SPECIFIC_IE_,
- _WMM_IE_Length_,
- WMM_IE, pout_len);
+ r8712_set_ie(out_ie+out_len, _VENDOR_SPECIFIC_IE_,
+ _WMM_IE_Length_, WMM_IE, pout_len);
pqospriv->qos_option = 1;
}
out_len = *pout_len;
@@ -1733,9 +1727,9 @@ unsigned int r8712_restructure_ht_ie(struct _adapter *padapter, u8 *in_ie,
IEEE80211_HT_CAP_DSSSCCK40;
ht_capie.ampdu_params_info = (IEEE80211_HT_CAP_AMPDU_FACTOR &
0x03) | (IEEE80211_HT_CAP_AMPDU_DENSITY & 0x00);
- pframe = r8712_set_ie(out_ie+out_len, _HT_CAPABILITY_IE_,
- sizeof(struct ieee80211_ht_cap),
- (unsigned char *)&ht_capie, pout_len);
+ r8712_set_ie(out_ie+out_len, _HT_CAPABILITY_IE_,
+ sizeof(struct ieee80211_ht_cap),
+ (unsigned char *)&ht_capie, pout_len);
phtpriv->ht_option = 1;
}
return phtpriv->ht_option;
@@ -1748,7 +1742,6 @@ static void update_ht_cap(struct _adapter *padapter, u8 *pie, uint ie_len)
int i, len;
struct sta_info *bmc_sta, *psta;
struct ieee80211_ht_cap *pht_capie;
- struct ieee80211_ht_addt_info *pht_addtinfo;
struct recv_reorder_ctrl *preorder_ctrl;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct ht_priv *phtpriv = &pmlmepriv->htpriv;
@@ -1801,8 +1794,6 @@ static void update_ht_cap(struct _adapter *padapter, u8 *pie, uint ie_len)
p = r8712_get_ie(pie + sizeof(struct NDIS_802_11_FIXED_IEs),
_HT_ADD_INFO_IE_, &len,
ie_len-sizeof(struct NDIS_802_11_FIXED_IEs));
- if (p && len > 0)
- pht_addtinfo = (struct ieee80211_ht_addt_info *)(p + 2);
}
void r8712_issue_addbareq_cmd(struct _adapter *padapter, int priority)
diff --git a/drivers/staging/rtl8712/rtl871x_mp.c b/drivers/staging/rtl8712/rtl871x_mp.c
index 2ec660ad78a..3d913b9701b 100644
--- a/drivers/staging/rtl8712/rtl871x_mp.c
+++ b/drivers/staging/rtl8712/rtl871x_mp.c
@@ -56,7 +56,7 @@ static int init_mp_priv(struct mp_priv *pmp_priv)
pmp_priv->pallocated_mp_xmitframe_buf = kmalloc(NR_MP_XMITFRAME *
sizeof(struct mp_xmit_frame) + 4,
GFP_ATOMIC);
- if (pmp_priv->pallocated_mp_xmitframe_buf == NULL) {
+ if (!pmp_priv->pallocated_mp_xmitframe_buf) {
res = _FAIL;
goto _exit_init_mp_priv;
}
@@ -173,7 +173,7 @@ u8 r8712_bb_reg_write(struct _adapter *pAdapter, u16 offset, u32 value)
oldValue = r8712_bb_reg_read(pAdapter, iocmd.value);
oldValue &= (0xFFFFFFFF >> ((4 - shift) * 8));
value = oldValue | (newValue << (shift * 8));
- if (fw_iocmd_write(pAdapter, iocmd, value) == false)
+ if (!fw_iocmd_write(pAdapter, iocmd, value))
return false;
iocmd.value += 4;
oldValue = r8712_bb_reg_read(pAdapter, iocmd.value);
diff --git a/drivers/staging/rtl8712/rtl871x_mp_ioctl.c b/drivers/staging/rtl8712/rtl871x_mp_ioctl.c
index 9827ff8143b..a16f15e9199 100644
--- a/drivers/staging/rtl8712/rtl871x_mp_ioctl.c
+++ b/drivers/staging/rtl8712/rtl871x_mp_ioctl.c
@@ -1431,11 +1431,8 @@ unsigned int mp_ioctl_xmit_packet_hdl(struct oid_par_priv *poid_par_priv)
/*-------------------------------------------------------------------------*/
uint oid_rt_set_power_down_hdl(struct oid_par_priv *poid_par_priv)
{
- u8 bpwrup;
-
if (poid_par_priv->type_of_oid != SET_OID)
return RNDIS_STATUS_NOT_ACCEPTED;
- bpwrup = *(u8 *)poid_par_priv->information_buf;
/*CALL the power_down function*/
return RNDIS_STATUS_SUCCESS;
}
diff --git a/drivers/staging/rtl8712/rtl871x_pwrctrl.c b/drivers/staging/rtl8712/rtl871x_pwrctrl.c
index 51dcf5555b6..ed2844d2b02 100644
--- a/drivers/staging/rtl8712/rtl871x_pwrctrl.c
+++ b/drivers/staging/rtl8712/rtl871x_pwrctrl.c
@@ -156,11 +156,9 @@ static void rpwm_workitem_callback(struct work_struct *work)
struct pwrctrl_priv, rpwm_workitem);
struct _adapter *padapter = container_of(pwrpriv,
struct _adapter, pwrctrlpriv);
- u8 cpwm = pwrpriv->cpwm;
-
if (pwrpriv->cpwm != pwrpriv->rpwm) {
_enter_pwrlock(&pwrpriv->lock);
- cpwm = r8712_read8(padapter, SDIO_HCPWM);
+ r8712_read8(padapter, SDIO_HCPWM);
pwrpriv->rpwm_retry = 1;
r8712_set_rpwm(padapter, pwrpriv->rpwm);
up(&pwrpriv->lock);
diff --git a/drivers/staging/rtl8712/rtl871x_recv.c b/drivers/staging/rtl8712/rtl871x_recv.c
index 9b99a71670f..06f15f81c4d 100644
--- a/drivers/staging/rtl8712/rtl871x_recv.c
+++ b/drivers/staging/rtl8712/rtl871x_recv.c
@@ -601,7 +601,7 @@ sint r8712_wlanhdr_to_ethhdr(union recv_frame *precvframe)
{
/*remove the wlanhdr and add the eth_hdr*/
sint rmv_len;
- u16 eth_type, len;
+ u16 len;
u8 bsnaphdr;
u8 *psnap_type;
struct ieee80211_snap_hdr *psnap;
@@ -635,7 +635,6 @@ sint r8712_wlanhdr_to_ethhdr(union recv_frame *precvframe)
ptr += rmv_len;
*ptr = 0x87;
*(ptr+1) = 0x12;
- eth_type = 0x8712;
/* append rx status for mp test packets */
ptr = recvframe_pull(precvframe, (rmv_len -
sizeof(struct ethhdr) + 2) - 24);
@@ -658,27 +657,11 @@ s32 r8712_recv_entry(union recv_frame *precvframe)
{
struct _adapter *padapter;
struct recv_priv *precvpriv;
- struct mlme_priv *pmlmepriv;
- struct recv_stat *prxstat;
- struct dvobj_priv *pdev;
- u8 *phead, *pdata, *ptail, *pend;
- struct __queue *pfree_recv_queue, *ppending_recv_queue;
s32 ret = _SUCCESS;
- struct intf_hdl *pintfhdl;
padapter = precvframe->u.hdr.adapter;
- pintfhdl = &padapter->pio_queue->intf;
- pmlmepriv = &padapter->mlmepriv;
precvpriv = &(padapter->recvpriv);
- pdev = &padapter->dvobjpriv;
- pfree_recv_queue = &(precvpriv->free_recv_queue);
- ppending_recv_queue = &(precvpriv->recv_pending_queue);
- phead = precvframe->u.hdr.rx_head;
- pdata = precvframe->u.hdr.rx_data;
- ptail = precvframe->u.hdr.rx_tail;
- pend = precvframe->u.hdr.rx_end;
- prxstat = (struct recv_stat *)phead;
padapter->ledpriv.LedControlHandler(padapter, LED_CTL_RX);
diff --git a/drivers/staging/rtl8712/rtl871x_recv.h b/drivers/staging/rtl8712/rtl871x_recv.h
index 92ca8997e5b..77487bb9d3c 100644
--- a/drivers/staging/rtl8712/rtl871x_recv.h
+++ b/drivers/staging/rtl8712/rtl871x_recv.h
@@ -170,11 +170,8 @@ static inline u8 *recvframe_put(union recv_frame *precvframe, sint sz)
/* used for append sz bytes from ptr to rx_tail, update rx_tail and
* return the updated rx_tail to the caller
* after putting, rx_tail must be still larger than rx_end. */
- unsigned char *prev_rx_tail;
-
if (precvframe == NULL)
return NULL;
- prev_rx_tail = precvframe->u.hdr.rx_tail;
precvframe->u.hdr.rx_tail += sz;
if (precvframe->u.hdr.rx_tail > precvframe->u.hdr.rx_end) {
precvframe->u.hdr.rx_tail -= sz;
diff --git a/drivers/staging/rtl8712/rtl871x_security.c b/drivers/staging/rtl8712/rtl871x_security.c
index 8faf22bb7c9..c653ad6854b 100644
--- a/drivers/staging/rtl8712/rtl871x_security.c
+++ b/drivers/staging/rtl8712/rtl871x_security.c
@@ -578,7 +578,7 @@ u32 r8712_tkip_encrypt(struct _adapter *padapter, u8 *pxmitframe)
u8 ttkey[16];
u8 crc[4];
struct arc4context mycontext;
- u32 curfragnum, length, prwskeylen;
+ u32 curfragnum, length;
u8 *pframe, *payload, *iv, *prwskey;
union pn48 txpn;
@@ -600,7 +600,6 @@ u32 r8712_tkip_encrypt(struct _adapter *padapter, u8 *pxmitframe)
&pattrib->ra[0]);
if (stainfo != NULL) {
prwskey = &stainfo->x_UncstKey.skey[0];
- prwskeylen = 16;
for (curfragnum = 0; curfragnum < pattrib->nr_frags;
curfragnum++) {
iv = pframe + pattrib->hdrlen;
@@ -655,7 +654,7 @@ u32 r8712_tkip_decrypt(struct _adapter *padapter, u8 *precvframe)
u8 ttkey[16];
u8 crc[4];
struct arc4context mycontext;
- u32 length, prwskeylen;
+ u32 length;
u8 *pframe, *payload, *iv, *prwskey, idx = 0;
union pn48 txpn;
struct sta_info *stainfo;
@@ -683,7 +682,6 @@ u32 r8712_tkip_decrypt(struct _adapter *padapter, u8 *precvframe)
return _FAIL;
} else
prwskey = &stainfo->x_UncstKey.skey[0];
- prwskeylen = 16;
GET_TKIP_PN(iv, txpn);
pnl = (u16)(txpn.val);
pnh = (u32)(txpn.val >> 16);
@@ -1154,7 +1152,6 @@ u32 r8712_aes_encrypt(struct _adapter *padapter, u8 *pxmitframe)
{ /* exclude ICV */
/* Intermediate Buffers */
sint curfragnum, length;
- u32 prwskeylen;
u8 *pframe, *prwskey;
struct sta_info *stainfo;
struct pkt_attrib *pattrib = &((struct xmit_frame *)
@@ -1174,7 +1171,6 @@ u32 r8712_aes_encrypt(struct _adapter *padapter, u8 *pxmitframe)
&pattrib->ra[0]);
if (stainfo != NULL) {
prwskey = &stainfo->x_UncstKey.skey[0];
- prwskeylen = 16;
for (curfragnum = 0; curfragnum < pattrib->nr_frags;
curfragnum++) {
if ((curfragnum + 1) == pattrib->nr_frags) {
@@ -1363,7 +1359,6 @@ u32 r8712_aes_decrypt(struct _adapter *padapter, u8 *precvframe)
{ /* exclude ICV */
/* Intermediate Buffers */
sint length;
- u32 prwskeylen;
u8 *pframe, *prwskey, *iv, idx;
struct sta_info *stainfo;
struct rx_pkt_attrib *prxattrib = &((union recv_frame *)
@@ -1387,7 +1382,6 @@ u32 r8712_aes_decrypt(struct _adapter *padapter, u8 *precvframe)
} else
prwskey = &stainfo->x_UncstKey.skey[0];
- prwskeylen = 16;
length = ((union recv_frame *)precvframe)->
u.hdr.len-prxattrib->hdrlen-prxattrib->iv_len;
aes_decipher(prwskey, prxattrib->hdrlen, pframe,
diff --git a/drivers/staging/rtl8712/rtl871x_sta_mgt.c b/drivers/staging/rtl8712/rtl871x_sta_mgt.c
index e769bb5c5fb..4c9b98e8210 100644
--- a/drivers/staging/rtl8712/rtl871x_sta_mgt.c
+++ b/drivers/staging/rtl8712/rtl871x_sta_mgt.c
@@ -79,13 +79,11 @@ static void mfree_all_stainfo(struct sta_priv *pstapriv)
{
unsigned long irqL;
struct list_head *plist, *phead;
- struct sta_info *psta = NULL;
spin_lock_irqsave(&pstapriv->sta_hash_lock, irqL);
phead = &pstapriv->free_sta_queue.queue;
plist = phead->next;
while ((end_of_queue_search(phead, plist)) == false) {
- psta = LIST_CONTAINOR(plist, struct sta_info, list);
plist = plist->next;
}
@@ -109,7 +107,6 @@ u32 _r8712_free_sta_priv(struct sta_priv *pstapriv)
struct sta_info *r8712_alloc_stainfo(struct sta_priv *pstapriv, u8 *hwaddr)
{
- uint tmp_aid;
s32 index;
struct list_head *phash_list;
struct sta_info *psta;
@@ -127,7 +124,6 @@ struct sta_info *r8712_alloc_stainfo(struct sta_priv *pstapriv, u8 *hwaddr)
psta = LIST_CONTAINOR(pfree_sta_queue->queue.next,
struct sta_info, list);
list_del_init(&(psta->list));
- tmp_aid = psta->aid;
_init_stainfo(psta);
memcpy(psta->hwaddr, hwaddr, ETH_ALEN);
index = wifi_mac_hash(hwaddr);
@@ -267,15 +263,10 @@ struct sta_info *r8712_get_stainfo(struct sta_priv *pstapriv, u8 *hwaddr)
void r8712_init_bcmc_stainfo(struct _adapter *padapter)
{
- struct sta_info *psta;
- struct tx_servq *ptxservq;
unsigned char bcast_addr[6] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
struct sta_priv *pstapriv = &padapter->stapriv;
- psta = r8712_alloc_stainfo(pstapriv, bcast_addr);
- if (psta == NULL)
- return;
- ptxservq = &(psta->sta_xmitpriv.be_q);
+ r8712_alloc_stainfo(pstapriv, bcast_addr);
}
struct sta_info *r8712_get_bcmc_stainfo(struct _adapter *padapter)
diff --git a/drivers/staging/rtl8712/rtl871x_xmit.c b/drivers/staging/rtl8712/rtl871x_xmit.c
index f49acaf0407..62a377e7fdc 100644
--- a/drivers/staging/rtl8712/rtl871x_xmit.c
+++ b/drivers/staging/rtl8712/rtl871x_xmit.c
@@ -184,7 +184,6 @@ void _free_xmit_priv(struct xmit_priv *pxmitpriv)
sint r8712_update_attrib(struct _adapter *padapter, _pkt *pkt,
struct pkt_attrib *pattrib)
{
- uint i;
struct pkt_file pktfile;
struct sta_info *psta = NULL;
struct ethhdr etherhdr;
@@ -199,7 +198,7 @@ sint r8712_update_attrib(struct _adapter *padapter, _pkt *pkt,
_r8712_open_pktfile(pkt, &pktfile);
- i = _r8712_pktfile_read(&pktfile, (unsigned char *)&etherhdr, ETH_HLEN);
+ _r8712_pktfile_read(&pktfile, (unsigned char *)&etherhdr, ETH_HLEN);
pattrib->ether_type = ntohs(etherhdr.h_proto);
@@ -236,7 +235,7 @@ sint r8712_update_attrib(struct _adapter *padapter, _pkt *pkt,
/* for mp storing the txcmd per packet,
* according to the info of txcmd to update pattrib */
/*get MP_TXDESC_SIZE bytes txcmd per packet*/
- i = _r8712_pktfile_read(&pktfile, (u8 *)&txdesc, TXDESC_SIZE);
+ _r8712_pktfile_read(&pktfile, (u8 *)&txdesc, TXDESC_SIZE);
memcpy(pattrib->ra, pattrib->dst, ETH_ALEN);
memcpy(pattrib->ta, pattrib->src, ETH_ALEN);
pattrib->pctrl = 1;
@@ -347,7 +346,7 @@ sint r8712_update_attrib(struct _adapter *padapter, _pkt *pkt,
static sint xmitframe_addmic(struct _adapter *padapter,
struct xmit_frame *pxmitframe)
{
- u32 curfragnum, length, datalen;
+ u32 curfragnum, length;
u8 *pframe, *payload, mic[8];
struct mic_data micdata;
struct sta_info *stainfo;
@@ -369,7 +368,6 @@ static sint xmitframe_addmic(struct _adapter *padapter,
u8 null_key[16] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0};
- datalen = pattrib->pktlen - pattrib->hdrlen;
pframe = pxmitframe->buf_addr + TXDESC_OFFSET;
if (bmcst) {
if (!memcmp(psecuritypriv->XGrptxmickey
@@ -486,7 +484,7 @@ static sint make_wlanhdr(struct _adapter *padapter , u8 *hdr,
memset(hdr, 0, WLANHDR_OFFSET);
SetFrameSubType(fctrl, pattrib->subtype);
if (pattrib->subtype & WIFI_DATA_TYPE) {
- if ((check_fwstate(pmlmepriv, WIFI_STATION_STATE) == true)) {
+ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) == true) {
/* to_ds = 1, fr_ds = 0; */
SetToDs(fctrl);
memcpy(pwlanhdr->addr1, get_bssid(pmlmepriv),
@@ -825,16 +823,13 @@ void r8712_free_xmitframe(struct xmit_priv *pxmitpriv,
unsigned long irqL;
struct __queue *pfree_xmit_queue = &pxmitpriv->free_xmit_queue;
struct _adapter *padapter = pxmitpriv->adapter;
- struct sk_buff *pndis_pkt = NULL;
if (pxmitframe == NULL)
return;
spin_lock_irqsave(&pfree_xmit_queue->lock, irqL);
list_del_init(&pxmitframe->list);
- if (pxmitframe->pkt) {
- pndis_pkt = pxmitframe->pkt;
+ if (pxmitframe->pkt)
pxmitframe->pkt = NULL;
- }
list_add_tail(&pxmitframe->list, &pfree_xmit_queue->queue);
pxmitpriv->free_xmitframe_cnt++;
spin_unlock_irqrestore(&pfree_xmit_queue->lock, irqL);
diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c
index a3d733b145e..7d0d1719b13 100644
--- a/drivers/staging/rtl8712/usb_intf.c
+++ b/drivers/staging/rtl8712/usb_intf.c
@@ -255,9 +255,6 @@ static struct drv_priv drvpriv = {
static uint r8712_usb_dvobj_init(struct _adapter *padapter)
{
uint status = _SUCCESS;
- struct usb_device_descriptor *pdev_desc;
- struct usb_host_config *phost_conf;
- struct usb_config_descriptor *pconf_desc;
struct usb_host_interface *phost_iface;
struct usb_interface_descriptor *piface_desc;
struct dvobj_priv *pdvobjpriv = &padapter->dvobjpriv;
@@ -265,9 +262,6 @@ static uint r8712_usb_dvobj_init(struct _adapter *padapter)
pdvobjpriv->padapter = padapter;
padapter->EepromAddressSize = 6;
- pdev_desc = &pusbd->descriptor;
- phost_conf = pusbd->actconfig;
- pconf_desc = &phost_conf->desc;
phost_iface = &pintf->altsetting[0];
piface_desc = &phost_iface->desc;
pdvobjpriv->nr_endpoint = piface_desc->bNumEndpoints;
@@ -292,13 +286,13 @@ static void r8712_usb_dvobj_deinit(struct _adapter *padapter)
void rtl871x_intf_stop(struct _adapter *padapter)
{
/*disable_hw_interrupt*/
- if (padapter->bSurpriseRemoved == false) {
+ if (!padapter->bSurpriseRemoved) {
/*device still exists, so driver can do i/o operation
* TODO: */
}
/* cancel in irp */
- if (padapter->dvobjpriv.inirp_deinit != NULL)
+ if (padapter->dvobjpriv.inirp_deinit)
padapter->dvobjpriv.inirp_deinit(padapter);
/* cancel out irp */
r8712_usb_write_port_cancel(padapter);
@@ -318,7 +312,7 @@ void r871x_dev_unload(struct _adapter *padapter)
r8712_stop_drv_threads(padapter);
/*s5.*/
- if (padapter->bSurpriseRemoved == false) {
+ if (!padapter->bSurpriseRemoved) {
padapter->hw_init_completed = false;
rtl8712_hal_deinit(padapter);
}
@@ -402,7 +396,7 @@ static int r871xu_drv_init(struct usb_interface *pusb_intf,
/* step 3.
* initialize the dvobj_priv
*/
- if (padapter->dvobj_init == NULL)
+ if (!padapter->dvobj_init)
goto error;
else {
status = padapter->dvobj_init(padapter);
@@ -568,7 +562,7 @@ static int r871xu_drv_init(struct usb_interface *pusb_intf,
((mac[0] == 0x00) && (mac[1] == 0x00) &&
(mac[2] == 0x00) && (mac[3] == 0x00) &&
(mac[4] == 0x00) && (mac[5] == 0x00)) ||
- (AutoloadFail == false)) {
+ (!AutoloadFail)) {
mac[0] = 0x00;
mac[1] = 0xe0;
mac[2] = 0x4c;
diff --git a/drivers/staging/rtl8712/usb_ops_linux.c b/drivers/staging/rtl8712/usb_ops_linux.c
index e89d2b07fcb..c3a4e3f26b4 100644
--- a/drivers/staging/rtl8712/usb_ops_linux.c
+++ b/drivers/staging/rtl8712/usb_ops_linux.c
@@ -168,7 +168,6 @@ static void usb_write_mem_complete(struct urb *purb)
void r8712_usb_write_mem(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *wmem)
{
unsigned int pipe;
- int status;
struct _adapter *padapter = (struct _adapter *)pintfhdl->adapter;
struct intf_priv *pintfpriv = pintfhdl->pintfpriv;
struct io_queue *pio_queue = (struct io_queue *)padapter->pio_queue;
@@ -186,7 +185,7 @@ void r8712_usb_write_mem(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *wmem)
usb_fill_bulk_urb(piorw_urb, pusbd, pipe,
wmem, cnt, usb_write_mem_complete,
pio_queue);
- status = usb_submit_urb(piorw_urb, GFP_ATOMIC);
+ usb_submit_urb(piorw_urb, GFP_ATOMIC);
_down_sema(&pintfpriv->io_retevt);
}
@@ -267,7 +266,7 @@ u32 r8712_usb_read_port(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *rmem)
if (adapter->bDriverStopped || adapter->bSurpriseRemoved ||
adapter->pwrctrlpriv.pnp_bstop_trx)
return _FAIL;
- if ((precvbuf->reuse == false) || (precvbuf->pskb == NULL)) {
+ if (!precvbuf->reuse == false || !precvbuf->pskb) {
precvbuf->pskb = skb_dequeue(&precvpriv->free_recv_skb_queue);
if (NULL != precvbuf->pskb)
precvbuf->reuse = true;
@@ -275,10 +274,10 @@ u32 r8712_usb_read_port(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *rmem)
if (precvbuf != NULL) {
r8712_init_recvbuf(adapter, precvbuf);
/* re-assign for linux based on skb */
- if ((precvbuf->reuse == false) || (precvbuf->pskb == NULL)) {
+ if (!precvbuf->reuse || !precvbuf->pskb) {
precvbuf->pskb = netdev_alloc_skb(adapter->pnetdev,
MAX_RECVBUF_SZ + RECVBUFF_ALIGN_SZ);
- if (precvbuf->pskb == NULL)
+ if (!precvbuf->pskb)
return _FAIL;
tmpaddr = (addr_t)precvbuf->pskb->data;
alignment = tmpaddr & (RECVBUFF_ALIGN_SZ-1);
diff --git a/drivers/staging/rtl8712/xmit_linux.c b/drivers/staging/rtl8712/xmit_linux.c
index 0ac9130faf6..039b598152b 100644
--- a/drivers/staging/rtl8712/xmit_linux.c
+++ b/drivers/staging/rtl8712/xmit_linux.c
@@ -79,7 +79,6 @@ sint r8712_endofpktfile(struct pkt_file *pfile)
void r8712_set_qos(struct pkt_file *ppktfile, struct pkt_attrib *pattrib)
{
- int i;
struct ethhdr etherhdr;
struct iphdr ip_hdr;
u16 UserPriority = 0;
@@ -89,8 +88,7 @@ void r8712_set_qos(struct pkt_file *ppktfile, struct pkt_attrib *pattrib)
/* get UserPriority from IP hdr*/
if (pattrib->ether_type == 0x0800) {
- i = _r8712_pktfile_read(ppktfile, (u8 *)&ip_hdr,
- sizeof(ip_hdr));
+ _r8712_pktfile_read(ppktfile, (u8 *)&ip_hdr, sizeof(ip_hdr));
/*UserPriority = (ntohs(ip_hdr.tos) >> 5) & 0x3 ;*/
UserPriority = ip_hdr.tos >> 5;
} else {
diff --git a/drivers/staging/rtl8723au/Makefile b/drivers/staging/rtl8723au/Makefile
index a9aae216363..3e8989018a8 100644
--- a/drivers/staging/rtl8723au/Makefile
+++ b/drivers/staging/rtl8723au/Makefile
@@ -2,7 +2,6 @@ r8723au-y := \
core/rtw_cmd.o \
core/rtw_efuse.o \
core/rtw_ieee80211.o \
- core/rtw_led.o \
core/rtw_mlme.o \
core/rtw_mlme_ext.o \
core/rtw_pwrctrl.o \
@@ -33,8 +32,6 @@ r8723au-y := \
hal/rtl8723a_rf6052.o \
hal/rtl8723a_rxdesc.o \
hal/rtl8723a_sreset.o \
- hal/rtl8723a_xmit.o \
- hal/rtl8723au_led.o \
hal/rtl8723au_recv.o \
hal/rtl8723au_xmit.o \
hal/usb_halinit.o \
diff --git a/drivers/staging/rtl8723au/core/rtw_ap.c b/drivers/staging/rtl8723au/core/rtw_ap.c
index 6b4092f05da..e394d12c36b 100644
--- a/drivers/staging/rtl8723au/core/rtw_ap.c
+++ b/drivers/staging/rtl8723au/core/rtw_ap.c
@@ -231,12 +231,10 @@ void expire_timeout_chk23a(struct rtw_adapter *padapter)
psta->expire_to--;
}
- if (psta->expire_to <= 0)
- {
+ if (psta->expire_to <= 0) {
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- if (padapter->registrypriv.wifi_spec == 1)
- {
+ if (padapter->registrypriv.wifi_spec == 1) {
psta->expire_to = pstapriv->expire_to;
continue;
}
@@ -308,15 +306,12 @@ void expire_timeout_chk23a(struct rtw_adapter *padapter)
ret = issue_nulldata23a(padapter, psta->hwaddr, 0, 3, 50);
psta->keep_alive_trycnt++;
- if (ret == _SUCCESS)
- {
+ if (ret == _SUCCESS) {
DBG_8723A("asoc check, sta(" MAC_FMT ") is alive\n", MAC_ARG(psta->hwaddr));
psta->expire_to = pstapriv->expire_to;
psta->keep_alive_trycnt = 0;
continue;
- }
- else if (psta->keep_alive_trycnt <= 3)
- {
+ } else if (psta->keep_alive_trycnt <= 3) {
DBG_8723A("ack check for asoc expire, keep_alive_trycnt =%d\n", psta->keep_alive_trycnt);
psta->expire_to = 1;
continue;
@@ -363,8 +358,7 @@ void add_RATid23a(struct rtw_adapter *padapter, struct sta_info *psta, u8 rssi_l
return;
/* b/g mode ra_bitmap */
- for (i = 0; i < sizeof(psta->bssrateset); i++)
- {
+ for (i = 0; i < sizeof(psta->bssrateset); i++) {
if (psta->bssrateset[i])
tx_ra_bitmap |= rtw_get_bit_value_from_ieee_value23a(psta->bssrateset[i]&0x7f);
}
@@ -406,8 +400,7 @@ void add_RATid23a(struct rtw_adapter *padapter, struct sta_info *psta, u8 rssi_l
raid = networktype_to_raid23a(sta_band);
init_rate = get_highest_rate_idx23a(tx_ra_bitmap&0x0fffffff)&0x3f;
- if (psta->aid < NUM_STA)
- {
+ if (psta->aid < NUM_STA) {
u8 arg = 0;
arg = psta->mac_id&0x1f;
@@ -436,11 +429,8 @@ void add_RATid23a(struct rtw_adapter *padapter, struct sta_info *psta, u8 rssi_l
psta->raid = raid;
psta->init_rate = init_rate;
- }
- else
- {
+ } else
DBG_8723A("station aid %d exceed the max number\n", psta->aid);
- }
}
static void update_bmc_sta(struct rtw_adapter *padapter)
@@ -453,8 +443,7 @@ static void update_bmc_sta(struct rtw_adapter *padapter)
struct wlan_bssid_ex *pcur_network = &pmlmepriv->cur_network.network;
struct sta_info *psta = rtw_get_bcmc_stainfo23a(padapter);
- if (psta)
- {
+ if (psta) {
psta->aid = 0;/* default set to 0 */
psta->mac_id = psta->aid + 1;
@@ -474,8 +463,7 @@ static void update_bmc_sta(struct rtw_adapter *padapter)
psta->bssratelen = supportRateNum;
/* b/g mode ra_bitmap */
- for (i = 0; i < supportRateNum; i++)
- {
+ for (i = 0; i < supportRateNum; i++) {
if (psta->bssrateset[i])
tx_ra_bitmap |= rtw_get_bit_value_from_ieee_value23a(psta->bssrateset[i]&0x7f);
}
@@ -522,11 +510,8 @@ static void update_bmc_sta(struct rtw_adapter *padapter)
psta->state = _FW_LINKED;
spin_unlock_bh(&psta->lock);
- }
- else
- {
+ } else
DBG_8723A("add_RATid23a_bmc_sta error!\n");
- }
}
/* notes: */
@@ -561,8 +546,7 @@ void update_sta_info23a_apmode23a(struct rtw_adapter *padapter, struct sta_info
/* ERP */
VCS_update23a(padapter, psta);
/* HT related cap */
- if (phtpriv_sta->ht_option)
- {
+ if (phtpriv_sta->ht_option) {
/* check if sta supports rx ampdu */
phtpriv_sta->ampdu_enable = phtpriv_ap->ampdu_enable;
@@ -580,9 +564,7 @@ void update_sta_info23a_apmode23a(struct rtw_adapter *padapter, struct sta_info
psta->qos_option = true;
- }
- else
- {
+ } else {
phtpriv_sta->ampdu_enable = false;
phtpriv_sta->sgi = false;
@@ -654,7 +636,7 @@ static void start_bss_network(struct rtw_adapter *padapter, u8 *pbuf)
bcn_interval = (u16)pnetwork->beacon_interval;
cur_channel = pnetwork->DSConfig;
- cur_bwmode = HT_CHANNEL_WIDTH_20;;
+ cur_bwmode = HT_CHANNEL_WIDTH_20;
cur_ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
/* check if there is wps ie, */
@@ -1122,7 +1104,6 @@ int rtw_acl_remove_sta23a(struct rtw_adapter *padapter, u8 *addr)
struct sta_priv *pstapriv = &padapter->stapriv;
struct wlan_acl_pool *pacl_list = &pstapriv->acl_list;
struct rtw_queue *pacl_node_q = &pacl_list->acl_node_q;
- int ret = 0;
DBG_8723A("%s(acl_num =%d) = %pM\n", __func__, pacl_list->num, addr);
@@ -1148,7 +1129,7 @@ int rtw_acl_remove_sta23a(struct rtw_adapter *padapter, u8 *addr)
DBG_8723A("%s, acl_num =%d\n", __func__, pacl_list->num);
- return ret;
+ return 0;
}
static void update_bcn_fixed_ie(struct rtw_adapter *padapter)
@@ -1217,8 +1198,6 @@ static void update_bcn_wmm_ie(struct rtw_adapter *padapter)
static void update_bcn_wps_ie(struct rtw_adapter *padapter)
{
DBG_8723A("%s\n", __func__);
-
- return;
}
static void update_bcn_p2p_ie(struct rtw_adapter *padapter)
@@ -1261,8 +1240,7 @@ void update_beacon23a(struct rtw_adapter *padapter, u8 ie_id, u8 *oui, u8 tx)
spin_lock_bh(&pmlmepriv->bcn_update_lock);
- switch (ie_id)
- {
+ switch (ie_id) {
case 0xFF:
/* 8: TimeStamp, 2: Beacon Interval 2:Capability */
update_bcn_fixed_ie(padapter);
@@ -1389,8 +1367,7 @@ static int rtw_ht_operation_update(struct rtw_adapter *padapter)
void associated_clients_update23a(struct rtw_adapter *padapter, u8 updated)
{
/* update associated stations cap. */
- if (updated == true)
- {
+ if (updated == true) {
struct list_head *phead, *plist, *ptmp;
struct sta_info *psta;
struct sta_priv *pstapriv = &padapter->stapriv;
@@ -1416,34 +1393,27 @@ void bss_cap_update_on_sta_join23a(struct rtw_adapter *padapter, struct sta_info
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- if (!(psta->flags & WLAN_STA_SHORT_PREAMBLE))
- {
- if (!psta->no_short_preamble_set)
- {
+ if (!(psta->flags & WLAN_STA_SHORT_PREAMBLE)) {
+ if (!psta->no_short_preamble_set) {
psta->no_short_preamble_set = 1;
pmlmepriv->num_sta_no_short_preamble++;
if ((pmlmeext->cur_wireless_mode > WIRELESS_11B) &&
- (pmlmepriv->num_sta_no_short_preamble == 1))
- {
+ (pmlmepriv->num_sta_no_short_preamble == 1)) {
beacon_updated = true;
update_beacon23a(padapter, 0xFF, NULL, true);
}
}
- }
- else
- {
- if (psta->no_short_preamble_set)
- {
+ } else {
+ if (psta->no_short_preamble_set) {
psta->no_short_preamble_set = 0;
pmlmepriv->num_sta_no_short_preamble--;
if ((pmlmeext->cur_wireless_mode > WIRELESS_11B) &&
- (pmlmepriv->num_sta_no_short_preamble == 0))
- {
+ (pmlmepriv->num_sta_no_short_preamble == 0)) {
beacon_updated = true;
update_beacon23a(padapter, 0xFF, NULL, true);
}
@@ -1451,32 +1421,25 @@ void bss_cap_update_on_sta_join23a(struct rtw_adapter *padapter, struct sta_info
}
}
- if (psta->flags & WLAN_STA_NONERP)
- {
- if (!psta->nonerp_set)
- {
+ if (psta->flags & WLAN_STA_NONERP) {
+ if (!psta->nonerp_set) {
psta->nonerp_set = 1;
pmlmepriv->num_sta_non_erp++;
- if (pmlmepriv->num_sta_non_erp == 1)
- {
+ if (pmlmepriv->num_sta_non_erp == 1) {
beacon_updated = true;
update_beacon23a(padapter, WLAN_EID_ERP_INFO, NULL, true);
}
}
- }
- else
- {
- if (psta->nonerp_set)
- {
+ } else {
+ if (psta->nonerp_set) {
psta->nonerp_set = 0;
pmlmepriv->num_sta_non_erp--;
- if (pmlmepriv->num_sta_non_erp == 0)
- {
+ if (pmlmepriv->num_sta_non_erp == 0) {
beacon_updated = true;
update_beacon23a(padapter, WLAN_EID_ERP_INFO, NULL, true);
}
@@ -1484,42 +1447,34 @@ void bss_cap_update_on_sta_join23a(struct rtw_adapter *padapter, struct sta_info
}
- if (!(psta->capability & WLAN_CAPABILITY_SHORT_SLOT_TIME))
- {
- if (!psta->no_short_slot_time_set)
- {
+ if (!(psta->capability & WLAN_CAPABILITY_SHORT_SLOT_TIME)) {
+ if (!psta->no_short_slot_time_set) {
psta->no_short_slot_time_set = 1;
pmlmepriv->num_sta_no_short_slot_time++;
if ((pmlmeext->cur_wireless_mode > WIRELESS_11B) &&
- (pmlmepriv->num_sta_no_short_slot_time == 1))
- {
+ (pmlmepriv->num_sta_no_short_slot_time == 1)) {
beacon_updated = true;
update_beacon23a(padapter, 0xFF, NULL, true);
}
}
- }
- else
- {
- if (psta->no_short_slot_time_set)
- {
+ } else {
+ if (psta->no_short_slot_time_set) {
psta->no_short_slot_time_set = 0;
pmlmepriv->num_sta_no_short_slot_time--;
if ((pmlmeext->cur_wireless_mode > WIRELESS_11B) &&
- (pmlmepriv->num_sta_no_short_slot_time == 0))
- {
+ (pmlmepriv->num_sta_no_short_slot_time == 0)) {
beacon_updated = true;
update_beacon23a(padapter, 0xFF, NULL, true);
}
}
}
- if (psta->flags & WLAN_STA_HT)
- {
+ if (psta->flags & WLAN_STA_HT) {
u16 ht_capab = le16_to_cpu(psta->htpriv.ht_cap.cap_info);
DBG_8723A("HT: STA " MAC_FMT " HT Capabilities "
@@ -1552,9 +1507,7 @@ void bss_cap_update_on_sta_join23a(struct rtw_adapter *padapter, struct sta_info
pmlmepriv->num_sta_ht_20mhz);
}
- }
- else
- {
+ } else {
if (!psta->no_ht_set) {
psta->no_ht_set = 1;
pmlmepriv->num_sta_no_ht++;
@@ -1567,8 +1520,7 @@ void bss_cap_update_on_sta_join23a(struct rtw_adapter *padapter, struct sta_info
}
}
- if (rtw_ht_operation_update(padapter) > 0)
- {
+ if (rtw_ht_operation_update(padapter) > 0) {
update_beacon23a(padapter, WLAN_EID_HT_CAPABILITY, NULL, false);
update_beacon23a(padapter, WLAN_EID_HT_OPERATION, NULL, true);
}
@@ -1592,8 +1544,7 @@ u8 bss_cap_update_on_sta_leave23a(struct rtw_adapter *padapter, struct sta_info
psta->no_short_preamble_set = 0;
pmlmepriv->num_sta_no_short_preamble--;
if (pmlmeext->cur_wireless_mode > WIRELESS_11B
- && pmlmepriv->num_sta_no_short_preamble == 0)
- {
+ && pmlmepriv->num_sta_no_short_preamble == 0) {
beacon_updated = true;
update_beacon23a(padapter, 0xFF, NULL, true);
}
@@ -1602,8 +1553,7 @@ u8 bss_cap_update_on_sta_leave23a(struct rtw_adapter *padapter, struct sta_info
if (psta->nonerp_set) {
psta->nonerp_set = 0;
pmlmepriv->num_sta_non_erp--;
- if (pmlmepriv->num_sta_non_erp == 0)
- {
+ if (pmlmepriv->num_sta_non_erp == 0) {
beacon_updated = true;
update_beacon23a(padapter, WLAN_EID_ERP_INFO,
NULL, true);
@@ -1614,8 +1564,7 @@ u8 bss_cap_update_on_sta_leave23a(struct rtw_adapter *padapter, struct sta_info
psta->no_short_slot_time_set = 0;
pmlmepriv->num_sta_no_short_slot_time--;
if (pmlmeext->cur_wireless_mode > WIRELESS_11B
- && pmlmepriv->num_sta_no_short_slot_time == 0)
- {
+ && pmlmepriv->num_sta_no_short_slot_time == 0) {
beacon_updated = true;
update_beacon23a(padapter, 0xFF, NULL, true);
}
@@ -1636,8 +1585,7 @@ u8 bss_cap_update_on_sta_leave23a(struct rtw_adapter *padapter, struct sta_info
pmlmepriv->num_sta_ht_20mhz--;
}
- if (rtw_ht_operation_update(padapter) > 0)
- {
+ if (rtw_ht_operation_update(padapter) > 0) {
update_beacon23a(padapter, WLAN_EID_HT_CAPABILITY, NULL, false);
update_beacon23a(padapter, WLAN_EID_HT_OPERATION, NULL, true);
}
@@ -1657,8 +1605,7 @@ u8 ap_free_sta23a(struct rtw_adapter *padapter, struct sta_info *psta, bool acti
if (!psta)
return beacon_updated;
- if (active == true)
- {
+ if (active) {
/* tear down Rx AMPDU */
send_delba23a(padapter, 0, psta->hwaddr);/* recipient */
@@ -1698,7 +1645,6 @@ u8 ap_free_sta23a(struct rtw_adapter *padapter, struct sta_info *psta, bool acti
int rtw_ap_inform_ch_switch23a (struct rtw_adapter *padapter, u8 new_ch, u8 ch_offset)
{
struct list_head *phead, *plist;
- int ret = 0;
struct sta_info *psta = NULL;
struct sta_priv *pstapriv = &padapter->stapriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
@@ -1706,7 +1652,7 @@ int rtw_ap_inform_ch_switch23a (struct rtw_adapter *padapter, u8 new_ch, u8 ch_o
u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
if ((pmlmeinfo->state&0x03) != MSR_AP)
- return ret;
+ return 0;
DBG_8723A("%s(%s): with ch:%u, offset:%u\n", __func__,
padapter->pnetdev->name, new_ch, ch_offset);
@@ -1724,13 +1670,12 @@ int rtw_ap_inform_ch_switch23a (struct rtw_adapter *padapter, u8 new_ch, u8 ch_o
issue_action_spct_ch_switch23a (padapter, bc_addr, new_ch, ch_offset);
- return ret;
+ return 0;
}
int rtw_sta_flush23a(struct rtw_adapter *padapter)
{
struct list_head *phead, *plist, *ptmp;
- int ret = 0;
struct sta_info *psta;
struct sta_priv *pstapriv = &padapter->stapriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
@@ -1743,7 +1688,7 @@ int rtw_sta_flush23a(struct rtw_adapter *padapter)
DBG_8723A("%s(%s)\n", __func__, padapter->pnetdev->name);
if ((pmlmeinfo->state&0x03) != MSR_AP)
- return ret;
+ return 0;
spin_lock_bh(&pstapriv->asoc_list_lock);
phead = &pstapriv->asoc_list;
@@ -1769,7 +1714,7 @@ int rtw_sta_flush23a(struct rtw_adapter *padapter)
associated_clients_update23a(padapter, true);
- return ret;
+ return 0;
}
/* called > TSR LEVEL for USB or SDIO Interface*/
@@ -1788,13 +1733,10 @@ void sta_info_update23a(struct rtw_adapter *padapter, struct sta_info *psta)
psta->qos_option = 0;
/* update 802.11n ht cap. */
- if (WLAN_STA_HT&flags)
- {
+ if (WLAN_STA_HT&flags) {
psta->htpriv.ht_option = true;
psta->qos_option = 1;
- }
- else
- {
+ } else {
psta->htpriv.ht_option = false;
}
@@ -1807,8 +1749,7 @@ void sta_info_update23a(struct rtw_adapter *padapter, struct sta_info *psta)
/* called >= TSR LEVEL for USB or SDIO Interface*/
void ap_sta_info_defer_update23a(struct rtw_adapter *padapter, struct sta_info *psta)
{
- if (psta->state & _FW_LINKED)
- {
+ if (psta->state & _FW_LINKED) {
/* add ratid */
add_RATid23a(padapter, psta, 0);/* DM_RATR_STA_INIT */
}
@@ -1819,7 +1760,7 @@ void rtw_ap_restore_network(struct rtw_adapter *padapter)
{
struct mlme_priv *mlmepriv = &padapter->mlmepriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct sta_priv * pstapriv = &padapter->stapriv;
+ struct sta_priv *pstapriv = &padapter->stapriv;
struct sta_info *psta;
struct security_priv *psecuritypriv = &padapter->securitypriv;
struct list_head *phead, *plist, *ptmp;
diff --git a/drivers/staging/rtl8723au/core/rtw_cmd.c b/drivers/staging/rtl8723au/core/rtw_cmd.c
index 4eaa50297b9..60e0ded8ae0 100644
--- a/drivers/staging/rtl8723au/core/rtw_cmd.c
+++ b/drivers/staging/rtl8723au/core/rtw_cmd.c
@@ -36,25 +36,25 @@ static struct cmd_hdl wlancmds[] = {
GEN_MLME_EXT_HANDLER(0, NULL)
GEN_MLME_EXT_HANDLER(0, NULL)
GEN_MLME_EXT_HANDLER(0, NULL)
- GEN_MLME_EXT_HANDLER(sizeof (struct wlan_bssid_ex), join_cmd_hdl23a) /*14*/
- GEN_MLME_EXT_HANDLER(sizeof (struct disconnect_parm), disconnect_hdl23a)
- GEN_MLME_EXT_HANDLER(sizeof (struct wlan_bssid_ex), createbss_hdl23a)
- GEN_MLME_EXT_HANDLER(sizeof (struct setopmode_parm), setopmode_hdl23a)
- GEN_MLME_EXT_HANDLER(sizeof (struct sitesurvey_parm), sitesurvey_cmd_hdl23a) /*18*/
- GEN_MLME_EXT_HANDLER(sizeof (struct setauth_parm), setauth_hdl23a)
- GEN_MLME_EXT_HANDLER(sizeof (struct setkey_parm), setkey_hdl23a) /*20*/
- GEN_MLME_EXT_HANDLER(sizeof (struct set_stakey_parm), set_stakey_hdl23a)
- GEN_MLME_EXT_HANDLER(sizeof (struct set_assocsta_parm), NULL)
- GEN_MLME_EXT_HANDLER(sizeof (struct del_assocsta_parm), NULL)
- GEN_MLME_EXT_HANDLER(sizeof (struct setstapwrstate_parm), NULL)
- GEN_MLME_EXT_HANDLER(sizeof (struct setbasicrate_parm), NULL)
- GEN_MLME_EXT_HANDLER(sizeof (struct getbasicrate_parm), NULL)
- GEN_MLME_EXT_HANDLER(sizeof (struct setdatarate_parm), NULL)
- GEN_MLME_EXT_HANDLER(sizeof (struct getdatarate_parm), NULL)
- GEN_MLME_EXT_HANDLER(sizeof (struct setphyinfo_parm), NULL)
- GEN_MLME_EXT_HANDLER(sizeof (struct getphyinfo_parm), NULL) /*30*/
- GEN_MLME_EXT_HANDLER(sizeof (struct setphy_parm), NULL)
- GEN_MLME_EXT_HANDLER(sizeof (struct getphy_parm), NULL)
+ GEN_MLME_EXT_HANDLER(sizeof(struct wlan_bssid_ex), join_cmd_hdl23a) /*14*/
+ GEN_MLME_EXT_HANDLER(sizeof(struct disconnect_parm), disconnect_hdl23a)
+ GEN_MLME_EXT_HANDLER(sizeof(struct wlan_bssid_ex), createbss_hdl23a)
+ GEN_MLME_EXT_HANDLER(sizeof(struct setopmode_parm), setopmode_hdl23a)
+ GEN_MLME_EXT_HANDLER(sizeof(struct sitesurvey_parm), sitesurvey_cmd_hdl23a) /*18*/
+ GEN_MLME_EXT_HANDLER(sizeof(struct setauth_parm), setauth_hdl23a)
+ GEN_MLME_EXT_HANDLER(sizeof(struct setkey_parm), setkey_hdl23a) /*20*/
+ GEN_MLME_EXT_HANDLER(sizeof(struct set_stakey_parm), set_stakey_hdl23a)
+ GEN_MLME_EXT_HANDLER(sizeof(struct set_assocsta_parm), NULL)
+ GEN_MLME_EXT_HANDLER(sizeof(struct del_assocsta_parm), NULL)
+ GEN_MLME_EXT_HANDLER(sizeof(struct setstapwrstate_parm), NULL)
+ GEN_MLME_EXT_HANDLER(sizeof(struct setbasicrate_parm), NULL)
+ GEN_MLME_EXT_HANDLER(sizeof(struct getbasicrate_parm), NULL)
+ GEN_MLME_EXT_HANDLER(sizeof(struct setdatarate_parm), NULL)
+ GEN_MLME_EXT_HANDLER(sizeof(struct getdatarate_parm), NULL)
+ GEN_MLME_EXT_HANDLER(sizeof(struct setphyinfo_parm), NULL)
+ GEN_MLME_EXT_HANDLER(sizeof(struct getphyinfo_parm), NULL) /*30*/
+ GEN_MLME_EXT_HANDLER(sizeof(struct setphy_parm), NULL)
+ GEN_MLME_EXT_HANDLER(sizeof(struct getphy_parm), NULL)
GEN_MLME_EXT_HANDLER(0, NULL)
GEN_MLME_EXT_HANDLER(0, NULL)
GEN_MLME_EXT_HANDLER(0, NULL)
@@ -359,6 +359,7 @@ int rtw_sitesurvey_cmd23a(struct rtw_adapter *padapter,
/* prepare ssid list */
if (ssid) {
int i;
+
for (i = 0; i < ssid_num && i < RTW_SSID_SCAN_AMOUNT; i++) {
if (ssid[i].ssid_len) {
memcpy(&psurveyPara->ssid[i], &ssid[i],
@@ -371,6 +372,7 @@ int rtw_sitesurvey_cmd23a(struct rtw_adapter *padapter,
/* prepare channel list */
if (ch) {
int i;
+
for (i = 0; i < ch_num && i < RTW_CHANNEL_SCAN_AMOUNT; i++) {
if (ch[i].hw_value &&
!(ch[i].flags & IEEE80211_CHAN_DISABLED)) {
@@ -389,8 +391,6 @@ int rtw_sitesurvey_cmd23a(struct rtw_adapter *padapter,
mod_timer(&pmlmepriv->scan_to_timer, jiffies +
msecs_to_jiffies(SCANNING_TIMEOUT));
- rtw_led_control(padapter, LED_CTL_SITE_SURVEY);
-
pmlmepriv->scan_interval = SCAN_INTERVAL;/* 30*2 sec = 60sec */
} else
_clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY);
@@ -415,8 +415,6 @@ int rtw_createbss_cmd23a(struct rtw_adapter *padapter)
pdev_network = &padapter->registrypriv.dev_network;
- rtw_led_control(padapter, LED_CTL_START_TO_LINK);
-
if (pmlmepriv->assoc_ssid.ssid_len == 0) {
RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_,
(" createbss for Any SSid:%s\n",
@@ -465,8 +463,6 @@ int rtw_joinbss_cmd23a(struct rtw_adapter *padapter,
ifmode = pnetwork->network.ifmode;
- rtw_led_control(padapter, LED_CTL_START_TO_LINK);
-
if (pmlmepriv->assoc_ssid.ssid_len == 0) {
RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_,
("+Join cmd: Any SSid\n"));
@@ -599,7 +595,7 @@ exit:
return res;
}
-int rtw_disassoc_cmd23a(struct rtw_adapter*padapter, u32 deauth_timeout_ms,
+int rtw_disassoc_cmd23a(struct rtw_adapter *padapter, u32 deauth_timeout_ms,
bool enqueue)
{
struct cmd_obj *cmdobj = NULL;
@@ -719,6 +715,7 @@ int rtw_setstakey_cmd23a(struct rtw_adapter *padapter, u8 *psta, u8 unicast_key)
memcpy(&psetstakey_para->key, &sta->dot118021x_UncstKey, 16);
} else {
int idx = psecuritypriv->dot118021XGrpKeyid;
+
memcpy(&psetstakey_para->key,
&psecuritypriv->dot118021XGrpKey[idx].skey, 16);
}
@@ -786,7 +783,7 @@ exit:
return res;
}
-int rtw_addbareq_cmd23a(struct rtw_adapter*padapter, u8 tid, u8 *addr)
+int rtw_addbareq_cmd23a(struct rtw_adapter *padapter, u8 tid, u8 *addr)
{
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
struct cmd_obj *ph2c;
@@ -822,7 +819,7 @@ exit:
return res;
}
-int rtw_dynamic_chk_wk_cmd23a(struct rtw_adapter*padapter)
+int rtw_dynamic_chk_wk_cmd23a(struct rtw_adapter *padapter)
{
struct cmd_obj *ph2c;
struct drvextra_cmd_parm *pdrvextra_cmd_parm;
@@ -859,7 +856,7 @@ exit:
* This is only ever called from on_action_spct23a_ch_switch () which isn't
* called from anywhere itself
*/
-int rtw_set_ch_cmd23a(struct rtw_adapter*padapter, u8 ch, u8 bw, u8 ch_offset,
+int rtw_set_ch_cmd23a(struct rtw_adapter *padapter, u8 ch, u8 bw, u8 ch_offset,
u8 enqueue)
{
struct cmd_obj *pcmdobj;
@@ -919,34 +916,34 @@ static void traffic_status_watchdog(struct rtw_adapter *padapter)
u8 bHigherBusyTxTraffic = false;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
int BusyThreshold = 100;
+ struct rt_link_detect *ldi = &pmlmepriv->LinkDetectInfo;
+
/* */
/* Determine if our traffic is busy now */
/* */
if (check_fwstate(pmlmepriv, _FW_LINKED)) {
if (rtl8723a_BT_coexist(padapter))
BusyThreshold = 50;
- else if (pmlmepriv->LinkDetectInfo.bBusyTraffic)
+ else if (ldi->bBusyTraffic)
BusyThreshold = 75;
/* if we raise bBusyTraffic in last watchdog, using
lower threshold. */
- if (pmlmepriv->LinkDetectInfo.NumRxOkInPeriod > BusyThreshold ||
- pmlmepriv->LinkDetectInfo.NumTxOkInPeriod > BusyThreshold) {
+ if (ldi->NumRxOkInPeriod > BusyThreshold ||
+ ldi->NumTxOkInPeriod > BusyThreshold) {
bBusyTraffic = true;
- if (pmlmepriv->LinkDetectInfo.NumRxOkInPeriod >
- pmlmepriv->LinkDetectInfo.NumTxOkInPeriod)
+ if (ldi->NumRxOkInPeriod > ldi->NumTxOkInPeriod)
bRxBusyTraffic = true;
else
bTxBusyTraffic = true;
}
/* Higher Tx/Rx data. */
- if (pmlmepriv->LinkDetectInfo.NumRxOkInPeriod > 4000 ||
- pmlmepriv->LinkDetectInfo.NumTxOkInPeriod > 4000) {
+ if (ldi->NumRxOkInPeriod > 4000 ||
+ ldi->NumTxOkInPeriod > 4000) {
bHigherBusyTraffic = true;
- if (pmlmepriv->LinkDetectInfo.NumRxOkInPeriod >
- pmlmepriv->LinkDetectInfo.NumTxOkInPeriod)
+ if (ldi->NumRxOkInPeriod > ldi->NumTxOkInPeriod)
bHigherBusyRxTraffic = true;
else
bHigherBusyTxTraffic = true;
@@ -955,9 +952,9 @@ static void traffic_status_watchdog(struct rtw_adapter *padapter)
if (!rtl8723a_BT_coexist(padapter) ||
!rtl8723a_BT_using_antenna_1(padapter)) {
/* check traffic for powersaving. */
- if (((pmlmepriv->LinkDetectInfo.NumRxUnicastOkInPeriod +
- pmlmepriv->LinkDetectInfo.NumTxOkInPeriod) > 8) ||
- pmlmepriv->LinkDetectInfo.NumRxUnicastOkInPeriod >2)
+ if (((ldi->NumRxUnicastOkInPeriod +
+ ldi->NumTxOkInPeriod) > 8) ||
+ ldi->NumRxUnicastOkInPeriod > 2)
bEnterPS = false;
else
bEnterPS = true;
@@ -971,15 +968,15 @@ static void traffic_status_watchdog(struct rtw_adapter *padapter)
} else
LPS_Leave23a(padapter);
- pmlmepriv->LinkDetectInfo.NumRxOkInPeriod = 0;
- pmlmepriv->LinkDetectInfo.NumTxOkInPeriod = 0;
- pmlmepriv->LinkDetectInfo.NumRxUnicastOkInPeriod = 0;
- pmlmepriv->LinkDetectInfo.bBusyTraffic = bBusyTraffic;
- pmlmepriv->LinkDetectInfo.bTxBusyTraffic = bTxBusyTraffic;
- pmlmepriv->LinkDetectInfo.bRxBusyTraffic = bRxBusyTraffic;
- pmlmepriv->LinkDetectInfo.bHigherBusyTraffic = bHigherBusyTraffic;
- pmlmepriv->LinkDetectInfo.bHigherBusyRxTraffic = bHigherBusyRxTraffic;
- pmlmepriv->LinkDetectInfo.bHigherBusyTxTraffic = bHigherBusyTxTraffic;
+ ldi->NumRxOkInPeriod = 0;
+ ldi->NumTxOkInPeriod = 0;
+ ldi->NumRxUnicastOkInPeriod = 0;
+ ldi->bBusyTraffic = bBusyTraffic;
+ ldi->bTxBusyTraffic = bTxBusyTraffic;
+ ldi->bRxBusyTraffic = bRxBusyTraffic;
+ ldi->bHigherBusyTraffic = bHigherBusyTraffic;
+ ldi->bHigherBusyRxTraffic = bHigherBusyRxTraffic;
+ ldi->bHigherBusyTxTraffic = bHigherBusyTxTraffic;
}
static void dynamic_chk_wk_hdl(struct rtw_adapter *padapter, u8 *pbuf, int sz)
@@ -1017,46 +1014,45 @@ static void lps_ctrl_wk_hdl(struct rtw_adapter *padapter, u8 lps_ctrl_type)
check_fwstate(pmlmepriv, WIFI_ADHOC_STATE))
return;
- switch (lps_ctrl_type)
- {
- case LPS_CTRL_SCAN:
- rtl8723a_BT_wifiscan_notify(padapter, true);
- if (!rtl8723a_BT_using_antenna_1(padapter)) {
- if (check_fwstate(pmlmepriv, _FW_LINKED))
- LPS_Leave23a(padapter);
+ switch (lps_ctrl_type) {
+ case LPS_CTRL_SCAN:
+ rtl8723a_BT_wifiscan_notify(padapter, true);
+ if (!rtl8723a_BT_using_antenna_1(padapter)) {
+ if (check_fwstate(pmlmepriv, _FW_LINKED))
+ LPS_Leave23a(padapter);
}
- break;
- case LPS_CTRL_JOINBSS:
+ break;
+ case LPS_CTRL_JOINBSS:
+ LPS_Leave23a(padapter);
+ break;
+ case LPS_CTRL_CONNECT:
+ mstatus = 1;/* connect */
+ /* Reset LPS Setting */
+ padapter->pwrctrlpriv.LpsIdleCount = 0;
+ rtl8723a_set_FwJoinBssReport_cmd(padapter, 1);
+ rtl8723a_BT_mediastatus_notify(padapter, mstatus);
+ break;
+ case LPS_CTRL_DISCONNECT:
+ mstatus = 0;/* disconnect */
+ rtl8723a_BT_mediastatus_notify(padapter, mstatus);
+ if (!rtl8723a_BT_using_antenna_1(padapter))
LPS_Leave23a(padapter);
- break;
- case LPS_CTRL_CONNECT:
- mstatus = 1;/* connect */
- /* Reset LPS Setting */
- padapter->pwrctrlpriv.LpsIdleCount = 0;
- rtl8723a_set_FwJoinBssReport_cmd(padapter, 1);
- rtl8723a_BT_mediastatus_notify(padapter, mstatus);
- break;
- case LPS_CTRL_DISCONNECT:
- mstatus = 0;/* disconnect */
- rtl8723a_BT_mediastatus_notify(padapter, mstatus);
- if (!rtl8723a_BT_using_antenna_1(padapter))
- LPS_Leave23a(padapter);
- rtl8723a_set_FwJoinBssReport_cmd(padapter, 0);
- break;
- case LPS_CTRL_SPECIAL_PACKET:
- pwrpriv->DelayLPSLastTimeStamp = jiffies;
- rtl8723a_BT_specialpacket_notify(padapter);
- if (!rtl8723a_BT_using_antenna_1(padapter))
- LPS_Leave23a(padapter);
- break;
- case LPS_CTRL_LEAVE:
- rtl8723a_BT_lps_leave(padapter);
- if (!rtl8723a_BT_using_antenna_1(padapter))
- LPS_Leave23a(padapter);
- break;
+ rtl8723a_set_FwJoinBssReport_cmd(padapter, 0);
+ break;
+ case LPS_CTRL_SPECIAL_PACKET:
+ pwrpriv->DelayLPSLastTimeStamp = jiffies;
+ rtl8723a_BT_specialpacket_notify(padapter);
+ if (!rtl8723a_BT_using_antenna_1(padapter))
+ LPS_Leave23a(padapter);
+ break;
+ case LPS_CTRL_LEAVE:
+ rtl8723a_BT_lps_leave(padapter);
+ if (!rtl8723a_BT_using_antenna_1(padapter))
+ LPS_Leave23a(padapter);
+ break;
- default:
- break;
+ default:
+ break;
}
}
@@ -1098,7 +1094,7 @@ exit:
return res;
}
-int rtw_ps_cmd23a(struct rtw_adapter*padapter)
+int rtw_ps_cmd23a(struct rtw_adapter *padapter)
{
struct cmd_obj *ppscmd;
struct drvextra_cmd_parm *pdrvextra_cmd_parm;
@@ -1147,12 +1143,12 @@ static void rtw_chk_hi_queue_hdl(struct rtw_adapter *padapter)
val = rtl8723a_chk_hi_queue_empty(padapter);
- while (val == false) {
+ while (!val) {
msleep(100);
cnt++;
- if (cnt>10)
+ if (cnt > 10)
break;
val = rtl8723a_chk_hi_queue_empty(padapter);
@@ -1168,7 +1164,7 @@ static void rtw_chk_hi_queue_hdl(struct rtw_adapter *padapter)
}
}
-int rtw_chk_hi_queue_cmd23a(struct rtw_adapter*padapter)
+int rtw_chk_hi_queue_cmd23a(struct rtw_adapter *padapter)
{
struct cmd_obj *ph2c;
struct drvextra_cmd_parm *pdrvextra_cmd_parm;
@@ -1305,8 +1301,7 @@ int rtw_drvextra_cmd_hdl23a(struct rtw_adapter *padapter, const u8 *pbuf)
pdrvextra_cmd = (struct drvextra_cmd_parm *)pbuf;
- switch (pdrvextra_cmd->ec_id)
- {
+ switch (pdrvextra_cmd->ec_id) {
case DYNAMIC_CHK_WK_CID:
dynamic_chk_wk_hdl(padapter, pdrvextra_cmd->pbuf,
pdrvextra_cmd->type_size);
diff --git a/drivers/staging/rtl8723au/core/rtw_efuse.c b/drivers/staging/rtl8723au/core/rtw_efuse.c
index 9f6ce7d071c..81960e788f8 100644
--- a/drivers/staging/rtl8723au/core/rtw_efuse.c
+++ b/drivers/staging/rtl8723au/core/rtw_efuse.c
@@ -117,12 +117,7 @@ Efuse_GetCurrentSize23a(struct rtw_adapter *pAdapter, u8 efuseType)
u8
Efuse_CalculateWordCnts23a(u8 word_en)
{
- u8 word_cnts = 0;
- if (!(word_en & BIT(0))) word_cnts++; /* 0 : write enable */
- if (!(word_en & BIT(1))) word_cnts++;
- if (!(word_en & BIT(2))) word_cnts++;
- if (!(word_en & BIT(3))) word_cnts++;
- return word_cnts;
+ return hweight8((~word_en) & 0xf);
}
/* */
@@ -181,7 +176,7 @@ EFUSE_GetEfuseDefinition23a(struct rtw_adapter *pAdapter, u8 efuseType,
switch (type) {
case TYPE_EFUSE_MAX_SECTION:
- pMax_section = (u8 *) pOut;
+ pMax_section = pOut;
if (efuseType == EFUSE_WIFI)
*pMax_section = EFUSE_MAX_SECTION_8723A;
@@ -190,7 +185,7 @@ EFUSE_GetEfuseDefinition23a(struct rtw_adapter *pAdapter, u8 efuseType,
break;
case TYPE_EFUSE_REAL_CONTENT_LEN:
- pu2Tmp = (u16 *) pOut;
+ pu2Tmp = pOut;
if (efuseType == EFUSE_WIFI)
*pu2Tmp = EFUSE_REAL_CONTENT_LEN_8723A;
@@ -199,7 +194,7 @@ EFUSE_GetEfuseDefinition23a(struct rtw_adapter *pAdapter, u8 efuseType,
break;
case TYPE_AVAILABLE_EFUSE_BYTES_BANK:
- pu2Tmp = (u16 *) pOut;
+ pu2Tmp = pOut;
if (efuseType == EFUSE_WIFI)
*pu2Tmp = (EFUSE_REAL_CONTENT_LEN_8723A -
@@ -210,7 +205,7 @@ EFUSE_GetEfuseDefinition23a(struct rtw_adapter *pAdapter, u8 efuseType,
break;
case TYPE_AVAILABLE_EFUSE_BYTES_TOTAL:
- pu2Tmp = (u16 *) pOut;
+ pu2Tmp = pOut;
if (efuseType == EFUSE_WIFI)
*pu2Tmp = (EFUSE_REAL_CONTENT_LEN_8723A -
@@ -221,7 +216,7 @@ EFUSE_GetEfuseDefinition23a(struct rtw_adapter *pAdapter, u8 efuseType,
break;
case TYPE_EFUSE_MAP_LEN:
- pu2Tmp = (u16 *) pOut;
+ pu2Tmp = pOut;
if (efuseType == EFUSE_WIFI)
*pu2Tmp = EFUSE_MAP_LEN_8723A;
@@ -230,7 +225,7 @@ EFUSE_GetEfuseDefinition23a(struct rtw_adapter *pAdapter, u8 efuseType,
break;
case TYPE_EFUSE_PROTECT_BYTES_BANK:
- pu1Tmp = (u8 *) pOut;
+ pu1Tmp = pOut;
if (efuseType == EFUSE_WIFI)
*pu1Tmp = EFUSE_OOB_PROTECT_BYTES;
@@ -239,7 +234,7 @@ EFUSE_GetEfuseDefinition23a(struct rtw_adapter *pAdapter, u8 efuseType,
break;
case TYPE_EFUSE_CONTENT_LEN_BANK:
- pu2Tmp = (u16 *) pOut;
+ pu2Tmp = pOut;
if (efuseType == EFUSE_WIFI)
*pu2Tmp = EFUSE_REAL_CONTENT_LEN_8723A;
@@ -248,7 +243,7 @@ EFUSE_GetEfuseDefinition23a(struct rtw_adapter *pAdapter, u8 efuseType,
break;
default:
- pu1Tmp = (u8 *) pOut;
+ pu1Tmp = pOut;
*pu1Tmp = 0;
break;
}
diff --git a/drivers/staging/rtl8723au/core/rtw_ieee80211.c b/drivers/staging/rtl8723au/core/rtw_ieee80211.c
index 6274cb397c9..bbbcfc8257d 100644
--- a/drivers/staging/rtl8723au/core/rtw_ieee80211.c
+++ b/drivers/staging/rtl8723au/core/rtw_ieee80211.c
@@ -69,6 +69,7 @@ int rtw_get_bit_value_from_ieee_value23a(u8 val)
{2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108, 0};
int i = 0;
+
while (dot11_rate_table[i] != 0) {
if (dot11_rate_table[i] == val)
return BIT(i);
@@ -301,8 +302,7 @@ void rtw_set_supported_rate23a(u8 *SupportedRates, uint mode)
memset(SupportedRates, 0, NDIS_802_11_LENGTH_RATES_EX);
- switch (mode)
- {
+ switch (mode) {
case WIRELESS_11B:
memcpy(SupportedRates, WIFI_CCKRATES, IEEE80211_CCK_RATE_LEN);
break;
diff --git a/drivers/staging/rtl8723au/core/rtw_led.c b/drivers/staging/rtl8723au/core/rtw_led.c
deleted file mode 100644
index 989cda29a57..00000000000
--- a/drivers/staging/rtl8723au/core/rtw_led.c
+++ /dev/null
@@ -1,1893 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-
-#include <drv_types.h>
-#include <rtl8723a_led.h>
-
-/* */
-/* Description: */
-/* Callback function of LED BlinkTimer, */
-/* it just schedules to corresponding BlinkWorkItem/led_blink_hdl23a */
-/* */
-static void BlinkTimerCallback(unsigned long data)
-{
- struct led_8723a *pLed = (struct led_8723a *)data;
- struct rtw_adapter *padapter = pLed->padapter;
-
- /* DBG_8723A("%s\n", __func__); */
-
- if ((padapter->bSurpriseRemoved == true) || (padapter->bDriverStopped == true))
- {
- /* DBG_8723A("%s bSurpriseRemoved:%d, bDriverStopped:%d\n", __func__, padapter->bSurpriseRemoved, padapter->bDriverStopped); */
- return;
- }
- schedule_work(&pLed->BlinkWorkItem);
-}
-
-/* */
-/* Description: */
-/* Callback function of LED BlinkWorkItem. */
-/* We dispatch acture LED blink action according to LedStrategy. */
-/* */
-void BlinkWorkItemCallback23a(struct work_struct *work)
-{
- struct led_8723a *pLed = container_of(work, struct led_8723a, BlinkWorkItem);
- BlinkHandler23a(pLed);
-}
-
-/* */
-/* Description: */
-/* Reset status of led_8723a object. */
-/* */
-void ResetLedStatus23a(struct led_8723a * pLed) {
-
- pLed->CurrLedState = RTW_LED_OFF; /* Current LED state. */
- pLed->bLedOn = false; /* true if LED is ON, false if LED is OFF. */
-
- pLed->bLedBlinkInProgress = false; /* true if it is blinking, false o.w.. */
- pLed->bLedWPSBlinkInProgress = false;
-
- pLed->BlinkTimes = 0; /* Number of times to toggle led state for blinking. */
- pLed->BlinkingLedState = LED_UNKNOWN; /* Next state for blinking, either RTW_LED_ON or RTW_LED_OFF are. */
-
- pLed->bLedNoLinkBlinkInProgress = false;
- pLed->bLedLinkBlinkInProgress = false;
- pLed->bLedStartToLinkBlinkInProgress = false;
- pLed->bLedScanBlinkInProgress = false;
-}
-
- /* */
-/* Description: */
-/* Initialize an led_8723a object. */
-/* */
-void
-InitLed871x23a(struct rtw_adapter *padapter, struct led_8723a *pLed, enum led_pin_8723a LedPin)
-{
- pLed->padapter = padapter;
- pLed->LedPin = LedPin;
-
- ResetLedStatus23a(pLed);
-
- setup_timer(&pLed->BlinkTimer, BlinkTimerCallback, (unsigned long)pLed);
-
- INIT_WORK(&pLed->BlinkWorkItem, BlinkWorkItemCallback23a);
-}
-
-/* */
-/* Description: */
-/* DeInitialize an led_8723a object. */
-/* */
-void
-DeInitLed871x23a(struct led_8723a *pLed)
-{
- cancel_work_sync(&pLed->BlinkWorkItem);
- del_timer_sync(&pLed->BlinkTimer);
- ResetLedStatus23a(pLed);
-}
-
-/* Description: */
-/* Implementation of LED blinking behavior. */
-/* It toggle off LED and schedule corresponding timer if necessary. */
-
-static void SwLedBlink(struct led_8723a *pLed)
-{
- struct rtw_adapter *padapter = pLed->padapter;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- u8 bStopBlinking = false;
-
- /* Change LED according to BlinkingLedState specified. */
- if (pLed->BlinkingLedState == RTW_LED_ON) {
- SwLedOn23a(padapter, pLed);
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("Blinktimes (%d): turn on\n", pLed->BlinkTimes));
- } else {
- SwLedOff23a(padapter, pLed);
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("Blinktimes (%d): turn off\n", pLed->BlinkTimes));
- }
-
- /* Determine if we shall change LED state again. */
- pLed->BlinkTimes--;
- switch (pLed->CurrLedState) {
-
- case LED_BLINK_NORMAL:
- if (pLed->BlinkTimes == 0)
- bStopBlinking = true;
- break;
- case LED_BLINK_StartToBlink:
- if (check_fwstate(pmlmepriv, _FW_LINKED) &&
- check_fwstate(pmlmepriv, WIFI_STATION_STATE))
- bStopBlinking = true;
- if (check_fwstate(pmlmepriv, _FW_LINKED) &&
- (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) ||
- check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)))
- bStopBlinking = true;
- else if (pLed->BlinkTimes == 0)
- bStopBlinking = true;
- break;
- case LED_BLINK_WPS:
- if (pLed->BlinkTimes == 0)
- bStopBlinking = true;
- break;
- default:
- bStopBlinking = true;
- break;
- }
-
- if (bStopBlinking) {
- if ((check_fwstate(pmlmepriv, _FW_LINKED)) && !pLed->bLedOn)
- SwLedOn23a(padapter, pLed);
- else if ((check_fwstate(pmlmepriv, _FW_LINKED)) && pLed->bLedOn)
- SwLedOff23a(padapter, pLed);
-
- pLed->BlinkTimes = 0;
- pLed->bLedBlinkInProgress = false;
- } else {
- /* Assign LED state to toggle. */
- if (pLed->BlinkingLedState == RTW_LED_ON)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
-
- /* Schedule a timer to toggle LED state. */
- switch (pLed->CurrLedState) {
- case LED_BLINK_NORMAL:
- mod_timer(&pLed->BlinkTimer, jiffies +
- msecs_to_jiffies(LED_BLINK_NORMAL_INTERVAL));
- break;
- case LED_BLINK_SLOWLY:
- case LED_BLINK_StartToBlink:
- mod_timer(&pLed->BlinkTimer, jiffies +
- msecs_to_jiffies(LED_BLINK_SLOWLY_INTERVAL));
- break;
- case LED_BLINK_WPS:
- mod_timer(&pLed->BlinkTimer, jiffies +
- msecs_to_jiffies(LED_BLINK_LONG_INTERVAL));
- break;
- default:
- mod_timer(&pLed->BlinkTimer, jiffies +
- msecs_to_jiffies(LED_BLINK_SLOWLY_INTERVAL));
- break;
- }
- }
-}
-
-static void SwLedBlink1(struct led_8723a *pLed)
-{
- struct rtw_adapter *padapter = pLed->padapter;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- unsigned long delay = 0;
- u8 bStopBlinking = false;
-
- /* Change LED according to BlinkingLedState specified. */
- if (pLed->BlinkingLedState == RTW_LED_ON) {
- SwLedOn23a(padapter, pLed);
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_,
- ("Blinktimes (%d): turn on\n", pLed->BlinkTimes));
- } else {
- SwLedOff23a(padapter, pLed);
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_,
- ("Blinktimes (%d): turn off\n", pLed->BlinkTimes));
- }
-
- if (padapter->pwrctrlpriv.rf_pwrstate != rf_on) {
- SwLedOff23a(padapter, pLed);
- ResetLedStatus23a(pLed);
- return;
- }
- switch (pLed->CurrLedState) {
- case LED_BLINK_SLOWLY:
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- delay = LED_BLINK_NO_LINK_INTERVAL_ALPHA;
- break;
- case LED_BLINK_NORMAL:
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- delay = LED_BLINK_LINK_INTERVAL_ALPHA;
- break;
- case LED_BLINK_SCAN:
- pLed->BlinkTimes--;
- if (pLed->BlinkTimes == 0)
- bStopBlinking = true;
- if (bStopBlinking) {
- if (check_fwstate(pmlmepriv, _FW_LINKED)) {
- pLed->bLedLinkBlinkInProgress = true;
- pLed->CurrLedState = LED_BLINK_NORMAL;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- delay = LED_BLINK_LINK_INTERVAL_ALPHA;
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("CurrLedState %d\n", pLed->CurrLedState));
- } else {
- pLed->bLedNoLinkBlinkInProgress = true;
- pLed->CurrLedState = LED_BLINK_SLOWLY;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- delay = LED_BLINK_NO_LINK_INTERVAL_ALPHA;
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("CurrLedState %d\n", pLed->CurrLedState));
- }
- pLed->bLedScanBlinkInProgress = false;
- } else {
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- delay = LED_BLINK_SCAN_INTERVAL_ALPHA;
- }
- break;
- case LED_BLINK_TXRX:
- pLed->BlinkTimes--;
- if (pLed->BlinkTimes == 0)
- bStopBlinking = true;
- if (bStopBlinking) {
- if (check_fwstate(pmlmepriv, _FW_LINKED)) {
- pLed->bLedLinkBlinkInProgress = true;
- pLed->CurrLedState = LED_BLINK_NORMAL;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- delay = LED_BLINK_LINK_INTERVAL_ALPHA;
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("CurrLedState %d\n", pLed->CurrLedState));
- } else {
- pLed->bLedNoLinkBlinkInProgress = true;
- pLed->CurrLedState = LED_BLINK_SLOWLY;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- delay = LED_BLINK_NO_LINK_INTERVAL_ALPHA;
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("CurrLedState %d\n", pLed->CurrLedState));
- }
- pLed->BlinkTimes = 0;
- pLed->bLedBlinkInProgress = false;
- } else {
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- delay = LED_BLINK_FASTER_INTERVAL_ALPHA;
- }
- break;
- case LED_BLINK_WPS:
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- delay = LED_BLINK_SCAN_INTERVAL_ALPHA;
- break;
- case LED_BLINK_WPS_STOP: /* WPS success */
- if (pLed->BlinkingLedState == RTW_LED_ON)
- bStopBlinking = false;
- else
- bStopBlinking = true;
- if (bStopBlinking) {
- pLed->bLedLinkBlinkInProgress = true;
- pLed->CurrLedState = LED_BLINK_NORMAL;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- delay = LED_BLINK_LINK_INTERVAL_ALPHA;
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("CurrLedState %d\n", pLed->CurrLedState));
-
- pLed->bLedWPSBlinkInProgress = false;
- } else {
- pLed->BlinkingLedState = RTW_LED_OFF;
- delay = LED_BLINK_WPS_SUCESS_INTERVAL_ALPHA;
- }
- break;
- default:
- break;
- }
- if (delay)
- mod_timer(&pLed->BlinkTimer, jiffies + msecs_to_jiffies(delay));
-}
-
-static void SwLedBlink2(struct led_8723a *pLed)
-{
- struct rtw_adapter *padapter = pLed->padapter;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- u8 bStopBlinking = false;
-
- /* Change LED according to BlinkingLedState specified. */
- if (pLed->BlinkingLedState == RTW_LED_ON) {
- SwLedOn23a(padapter, pLed);
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_,
- ("Blinktimes (%d): turn on\n", pLed->BlinkTimes));
- } else {
- SwLedOff23a(padapter, pLed);
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_,
- ("Blinktimes (%d): turn off\n", pLed->BlinkTimes));
- }
- switch (pLed->CurrLedState) {
- case LED_BLINK_SCAN:
- pLed->BlinkTimes--;
- if (pLed->BlinkTimes == 0)
- bStopBlinking = true;
- if (bStopBlinking) {
- if (padapter->pwrctrlpriv.rf_pwrstate != rf_on) {
- SwLedOff23a(padapter, pLed);
- } else if (check_fwstate(pmlmepriv, _FW_LINKED)) {
- pLed->CurrLedState = RTW_LED_ON;
- pLed->BlinkingLedState = RTW_LED_ON;
- SwLedOn23a(padapter, pLed);
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_,
- ("stop scan blink CurrLedState %d\n",
- pLed->CurrLedState));
- } else {
- pLed->CurrLedState = RTW_LED_OFF;
- pLed->BlinkingLedState = RTW_LED_OFF;
- SwLedOff23a(padapter, pLed);
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_,
- ("stop scan blink CurrLedState %d\n",
- pLed->CurrLedState));
- }
- pLed->bLedScanBlinkInProgress = false;
- } else {
- if (padapter->pwrctrlpriv.rf_pwrstate != rf_on) {
- SwLedOff23a(padapter, pLed);
- } else {
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- mod_timer(&pLed->BlinkTimer,
- jiffies + msecs_to_jiffies(LED_BLINK_SCAN_INTERVAL_ALPHA));
- }
- }
- break;
- case LED_BLINK_TXRX:
- pLed->BlinkTimes--;
- if (pLed->BlinkTimes == 0)
- bStopBlinking = true;
- if (bStopBlinking) {
- if (padapter->pwrctrlpriv.rf_pwrstate != rf_on) {
- SwLedOff23a(padapter, pLed);
- } else if (check_fwstate(pmlmepriv, _FW_LINKED)) {
- pLed->CurrLedState = RTW_LED_ON;
- pLed->BlinkingLedState = RTW_LED_ON;
- SwLedOn23a(padapter, pLed);
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_,
- ("stop CurrLedState %d\n", pLed->CurrLedState));
-
- } else {
- pLed->CurrLedState = RTW_LED_OFF;
- pLed->BlinkingLedState = RTW_LED_OFF;
- SwLedOff23a(padapter, pLed);
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_,
- ("stop CurrLedState %d\n", pLed->CurrLedState));
- }
- pLed->bLedBlinkInProgress = false;
- } else {
- if (padapter->pwrctrlpriv.rf_pwrstate != rf_on) {
- SwLedOff23a(padapter, pLed);
- } else {
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- mod_timer(&pLed->BlinkTimer,
- jiffies + msecs_to_jiffies(LED_BLINK_FASTER_INTERVAL_ALPHA));
- }
- }
- break;
- default:
- break;
- }
-}
-
-static void SwLedBlink3(struct led_8723a *pLed)
-{
- struct rtw_adapter *padapter = pLed->padapter;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- u8 bStopBlinking = false;
-
- /* Change LED according to BlinkingLedState specified. */
- if (pLed->BlinkingLedState == RTW_LED_ON)
- {
- SwLedOn23a(padapter, pLed);
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("Blinktimes (%d): turn on\n", pLed->BlinkTimes));
- }
- else
- {
- if (pLed->CurrLedState != LED_BLINK_WPS_STOP)
- SwLedOff23a(padapter, pLed);
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("Blinktimes (%d): turn off\n", pLed->BlinkTimes));
- }
-
- switch (pLed->CurrLedState)
- {
- case LED_BLINK_SCAN:
- pLed->BlinkTimes--;
- if (pLed->BlinkTimes == 0)
- {
- bStopBlinking = true;
- }
-
- if (bStopBlinking)
- {
- if (padapter->pwrctrlpriv.rf_pwrstate != rf_on)
- {
- SwLedOff23a(padapter, pLed);
- }
- else if (check_fwstate(pmlmepriv, _FW_LINKED)) {
- pLed->CurrLedState = RTW_LED_ON;
- pLed->BlinkingLedState = RTW_LED_ON;
- if (!pLed->bLedOn)
- SwLedOn23a(padapter, pLed);
-
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("CurrLedState %d\n", pLed->CurrLedState));
- } else {
- pLed->CurrLedState = RTW_LED_OFF;
- pLed->BlinkingLedState = RTW_LED_OFF;
- if (pLed->bLedOn)
- SwLedOff23a(padapter, pLed);
-
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("CurrLedState %d\n", pLed->CurrLedState));
- }
- pLed->bLedScanBlinkInProgress = false;
- }
- else
- {
- if (padapter->pwrctrlpriv.rf_pwrstate != rf_on)
- {
- SwLedOff23a(padapter, pLed);
- }
- else
- {
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- mod_timer(&pLed->BlinkTimer,
- jiffies + msecs_to_jiffies(LED_BLINK_SCAN_INTERVAL_ALPHA));
- }
- }
- break;
-
- case LED_BLINK_TXRX:
- pLed->BlinkTimes--;
- if (pLed->BlinkTimes == 0)
- {
- bStopBlinking = true;
- }
- if (bStopBlinking)
- {
- if (padapter->pwrctrlpriv.rf_pwrstate != rf_on)
- {
- SwLedOff23a(padapter, pLed);
- } else if (check_fwstate(pmlmepriv,
- _FW_LINKED)) {
- pLed->CurrLedState = RTW_LED_ON;
- pLed->BlinkingLedState = RTW_LED_ON;
-
- if (!pLed->bLedOn)
- SwLedOn23a(padapter, pLed);
-
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("CurrLedState %d\n", pLed->CurrLedState));
- } else {
- pLed->CurrLedState = RTW_LED_OFF;
- pLed->BlinkingLedState = RTW_LED_OFF;
-
- if (pLed->bLedOn)
- SwLedOff23a(padapter, pLed);
-
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("CurrLedState %d\n", pLed->CurrLedState));
- }
- pLed->bLedBlinkInProgress = false;
- }
- else
- {
- if (padapter->pwrctrlpriv.rf_pwrstate != rf_on)
- {
- SwLedOff23a(padapter, pLed);
- }
- else
- {
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- mod_timer(&pLed->BlinkTimer,
- jiffies + msecs_to_jiffies(LED_BLINK_FASTER_INTERVAL_ALPHA));
- }
- }
- break;
-
- case LED_BLINK_WPS:
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- mod_timer(&pLed->BlinkTimer, jiffies +
- msecs_to_jiffies(LED_BLINK_SCAN_INTERVAL_ALPHA));
- break;
-
- case LED_BLINK_WPS_STOP: /* WPS success */
- if (pLed->BlinkingLedState == RTW_LED_ON)
- {
- pLed->BlinkingLedState = RTW_LED_OFF;
- mod_timer(&pLed->BlinkTimer, jiffies +
- msecs_to_jiffies(LED_BLINK_WPS_SUCESS_INTERVAL_ALPHA));
- bStopBlinking = false;
- } else {
- bStopBlinking = true;
- }
-
- if (bStopBlinking)
- {
- if (padapter->pwrctrlpriv.rf_pwrstate != rf_on)
- {
- SwLedOff23a(padapter, pLed);
- }
- else
- {
- pLed->CurrLedState = RTW_LED_ON;
- pLed->BlinkingLedState = RTW_LED_ON;
- SwLedOn23a(padapter, pLed);
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("CurrLedState %d\n", pLed->CurrLedState));
- }
- pLed->bLedWPSBlinkInProgress = false;
- }
- break;
-
- default:
- break;
- }
-}
-
-static void SwLedBlink4(struct led_8723a *pLed)
-{
- struct rtw_adapter *padapter = pLed->padapter;
- struct led_priv *ledpriv = &padapter->ledpriv;
- struct led_8723a *pLed1 = &ledpriv->SwLed1;
- u8 bStopBlinking = false;
- unsigned long delay = 0;
-
- /* Change LED according to BlinkingLedState specified. */
- if (pLed->BlinkingLedState == RTW_LED_ON)
- {
- SwLedOn23a(padapter, pLed);
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("Blinktimes (%d): turn on\n", pLed->BlinkTimes));
- } else {
- SwLedOff23a(padapter, pLed);
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("Blinktimes (%d): turn off\n", pLed->BlinkTimes));
- }
-
- if (!pLed1->bLedWPSBlinkInProgress && pLed1->BlinkingLedState == LED_UNKNOWN)
- {
- pLed1->BlinkingLedState = RTW_LED_OFF;
- pLed1->CurrLedState = RTW_LED_OFF;
- SwLedOff23a(padapter, pLed1);
- }
-
- switch (pLed->CurrLedState)
- {
- case LED_BLINK_SLOWLY:
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- delay = LED_BLINK_NO_LINK_INTERVAL_ALPHA;
- break;
-
- case LED_BLINK_StartToBlink:
- if (pLed->bLedOn) {
- pLed->BlinkingLedState = RTW_LED_OFF;
- delay = LED_BLINK_SLOWLY_INTERVAL;
- } else {
- pLed->BlinkingLedState = RTW_LED_ON;
- delay = LED_BLINK_NORMAL_INTERVAL;
- }
- break;
-
- case LED_BLINK_SCAN:
- pLed->BlinkTimes--;
- if (pLed->BlinkTimes == 0) {
- bStopBlinking = false;
- }
-
- if (bStopBlinking) {
- if (padapter->pwrctrlpriv.rf_pwrstate != rf_on && padapter->pwrctrlpriv.rfoff_reason > RF_CHANGE_BY_PS) {
- SwLedOff23a(padapter, pLed);
- } else {
- pLed->bLedNoLinkBlinkInProgress = false;
- pLed->CurrLedState = LED_BLINK_SLOWLY;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- delay = LED_BLINK_NO_LINK_INTERVAL_ALPHA;
- }
- pLed->bLedScanBlinkInProgress = false;
- } else {
- if (padapter->pwrctrlpriv.rf_pwrstate != rf_on && padapter->pwrctrlpriv.rfoff_reason > RF_CHANGE_BY_PS) {
- SwLedOff23a(padapter, pLed);
- } else {
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- delay = LED_BLINK_SCAN_INTERVAL_ALPHA;
- }
- }
- break;
-
- case LED_BLINK_TXRX:
- pLed->BlinkTimes--;
- if (pLed->BlinkTimes == 0) {
- bStopBlinking = true;
- }
- if (bStopBlinking) {
- if (padapter->pwrctrlpriv.rf_pwrstate != rf_on && padapter->pwrctrlpriv.rfoff_reason > RF_CHANGE_BY_PS) {
- SwLedOff23a(padapter, pLed);
- } else {
- pLed->bLedNoLinkBlinkInProgress = true;
- pLed->CurrLedState = LED_BLINK_SLOWLY;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- delay = LED_BLINK_NO_LINK_INTERVAL_ALPHA;
- }
- pLed->bLedBlinkInProgress = false;
- } else {
- if (padapter->pwrctrlpriv.rf_pwrstate != rf_on && padapter->pwrctrlpriv.rfoff_reason > RF_CHANGE_BY_PS) {
- SwLedOff23a(padapter, pLed);
- } else {
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- delay = LED_BLINK_FASTER_INTERVAL_ALPHA;
- }
- }
- break;
-
- case LED_BLINK_WPS:
- if (pLed->bLedOn) {
- pLed->BlinkingLedState = RTW_LED_OFF;
- delay = LED_BLINK_SLOWLY_INTERVAL;
- } else {
- pLed->BlinkingLedState = RTW_LED_ON;
- delay = LED_BLINK_NORMAL_INTERVAL;
- }
- break;
-
- case LED_BLINK_WPS_STOP: /* WPS authentication fail */
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
-
- delay = LED_BLINK_NORMAL_INTERVAL;
- break;
-
- case LED_BLINK_WPS_STOP_OVERLAP: /* WPS session overlap */
- pLed->BlinkTimes--;
- if (pLed->BlinkTimes == 0) {
- if (pLed->bLedOn) {
- pLed->BlinkTimes = 1;
- } else {
- bStopBlinking = true;
- }
- }
-
- if (bStopBlinking) {
- pLed->BlinkTimes = 10;
- pLed->BlinkingLedState = RTW_LED_ON;
- delay = LED_BLINK_LINK_INTERVAL_ALPHA;
- } else {
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
-
- delay = LED_BLINK_NORMAL_INTERVAL;
- }
- break;
-
- default:
- break;
- }
- if (delay)
- mod_timer(&pLed->BlinkTimer, jiffies + msecs_to_jiffies(delay));
-
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("SwLedBlink4 CurrLedState %d\n", pLed->CurrLedState));
-}
-
-static void SwLedBlink5(struct led_8723a *pLed)
-{
- struct rtw_adapter *padapter = pLed->padapter;
- u8 bStopBlinking = false;
- unsigned long delay = 0;
-
- /* Change LED according to BlinkingLedState specified. */
- if (pLed->BlinkingLedState == RTW_LED_ON) {
- SwLedOn23a(padapter, pLed);
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("Blinktimes (%d): turn on\n", pLed->BlinkTimes));
- } else {
- SwLedOff23a(padapter, pLed);
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("Blinktimes (%d): turn off\n", pLed->BlinkTimes));
- }
-
- switch (pLed->CurrLedState)
- {
- case LED_BLINK_SCAN:
- pLed->BlinkTimes--;
- if (pLed->BlinkTimes == 0) {
- bStopBlinking = true;
- }
-
- if (bStopBlinking) {
- if (padapter->pwrctrlpriv.rf_pwrstate != rf_on && padapter->pwrctrlpriv.rfoff_reason > RF_CHANGE_BY_PS) {
- pLed->CurrLedState = RTW_LED_OFF;
- pLed->BlinkingLedState = RTW_LED_OFF;
- if (pLed->bLedOn)
- SwLedOff23a(padapter, pLed);
- } else {
- pLed->CurrLedState = RTW_LED_ON;
- pLed->BlinkingLedState = RTW_LED_ON;
- if (!pLed->bLedOn)
- delay = LED_BLINK_FASTER_INTERVAL_ALPHA;
- }
-
- pLed->bLedScanBlinkInProgress = false;
- } else {
- if (padapter->pwrctrlpriv.rf_pwrstate != rf_on && padapter->pwrctrlpriv.rfoff_reason > RF_CHANGE_BY_PS) {
- SwLedOff23a(padapter, pLed);
- } else {
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- delay = LED_BLINK_SCAN_INTERVAL_ALPHA;
- }
- }
- break;
-
- case LED_BLINK_TXRX:
- pLed->BlinkTimes--;
- if (pLed->BlinkTimes == 0) {
- bStopBlinking = true;
- }
-
- if (bStopBlinking) {
- if (padapter->pwrctrlpriv.rf_pwrstate != rf_on && padapter->pwrctrlpriv.rfoff_reason > RF_CHANGE_BY_PS) {
- pLed->CurrLedState = RTW_LED_OFF;
- pLed->BlinkingLedState = RTW_LED_OFF;
- if (pLed->bLedOn)
- SwLedOff23a(padapter, pLed);
- } else {
- pLed->CurrLedState = RTW_LED_ON;
- pLed->BlinkingLedState = RTW_LED_ON;
- if (!pLed->bLedOn)
- delay = LED_BLINK_FASTER_INTERVAL_ALPHA;
- }
-
- pLed->bLedBlinkInProgress = false;
- } else {
- if (padapter->pwrctrlpriv.rf_pwrstate != rf_on && padapter->pwrctrlpriv.rfoff_reason > RF_CHANGE_BY_PS) {
- SwLedOff23a(padapter, pLed);
- } else {
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- delay = LED_BLINK_FASTER_INTERVAL_ALPHA;
- }
- }
- break;
-
- default:
- break;
- }
-
- if (delay)
- mod_timer(&pLed->BlinkTimer, jiffies + msecs_to_jiffies(delay));
-
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("SwLedBlink5 CurrLedState %d\n", pLed->CurrLedState));
-}
-
-static void SwLedBlink6(struct led_8723a *pLed)
-{
- struct rtw_adapter *padapter = pLed->padapter;
-
- /* Change LED according to BlinkingLedState specified. */
- if (pLed->BlinkingLedState == RTW_LED_ON) {
- SwLedOn23a(padapter, pLed);
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("Blinktimes (%d): turn on\n", pLed->BlinkTimes));
- } else {
- SwLedOff23a(padapter, pLed);
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("Blinktimes (%d): turn off\n", pLed->BlinkTimes));
- }
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("<==== blink6\n"));
-}
-
-/* ALPHA, added by chiyoko, 20090106 */
-static void
-SwLedControlMode1(struct rtw_adapter *padapter, enum led_ctl_mode LedAction)
-{
- struct led_priv *ledpriv = &padapter->ledpriv;
- struct led_8723a *pLed = &ledpriv->SwLed0;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-
- long delay = -1;
-
- switch (LedAction)
- {
- case LED_CTL_POWER_ON:
- case LED_CTL_START_TO_LINK:
- case LED_CTL_NO_LINK:
- if (pLed->bLedNoLinkBlinkInProgress == false) {
- if (pLed->CurrLedState == LED_BLINK_SCAN ||
- IS_LED_WPS_BLINKING(pLed)) {
- return;
- }
- if (pLed->bLedLinkBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedLinkBlinkInProgress = false;
- }
- if (pLed->bLedBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedBlinkInProgress = false;
- }
-
- pLed->bLedNoLinkBlinkInProgress = true;
- pLed->CurrLedState = LED_BLINK_SLOWLY;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- delay = LED_BLINK_NO_LINK_INTERVAL_ALPHA;
- }
- break;
-
- case LED_CTL_LINK:
- if (pLed->bLedLinkBlinkInProgress == false) {
- if (pLed->CurrLedState == LED_BLINK_SCAN ||
- IS_LED_WPS_BLINKING(pLed)) {
- return;
- }
- if (pLed->bLedNoLinkBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedNoLinkBlinkInProgress = false;
- }
- if (pLed->bLedBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedBlinkInProgress = false;
- }
- pLed->bLedLinkBlinkInProgress = true;
- pLed->CurrLedState = LED_BLINK_NORMAL;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- delay = LED_BLINK_LINK_INTERVAL_ALPHA;
- }
- break;
-
- case LED_CTL_SITE_SURVEY:
- if (pmlmepriv->LinkDetectInfo.bBusyTraffic &&
- check_fwstate(pmlmepriv, _FW_LINKED))
- ;
- else if (pLed->bLedScanBlinkInProgress == false) {
- if (IS_LED_WPS_BLINKING(pLed))
- return;
-
- if (pLed->bLedNoLinkBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedNoLinkBlinkInProgress = false;
- }
- if (pLed->bLedLinkBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedLinkBlinkInProgress = false;
- }
- if (pLed->bLedBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedBlinkInProgress = false;
- }
- pLed->bLedScanBlinkInProgress = true;
- pLed->CurrLedState = LED_BLINK_SCAN;
- pLed->BlinkTimes = 24;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- delay = LED_BLINK_SCAN_INTERVAL_ALPHA;
- }
- break;
-
- case LED_CTL_TX:
- case LED_CTL_RX:
- if (pLed->bLedBlinkInProgress == false) {
- if (pLed->CurrLedState == LED_BLINK_SCAN ||
- IS_LED_WPS_BLINKING(pLed)) {
- return;
- }
- if (pLed->bLedNoLinkBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedNoLinkBlinkInProgress = false;
- }
- if (pLed->bLedLinkBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedLinkBlinkInProgress = false;
- }
- pLed->bLedBlinkInProgress = true;
- pLed->CurrLedState = LED_BLINK_TXRX;
- pLed->BlinkTimes = 2;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- delay = LED_BLINK_FASTER_INTERVAL_ALPHA;
- }
- break;
-
- case LED_CTL_START_WPS: /* wait until xinpin finish */
- case LED_CTL_START_WPS_BOTTON:
- if (pLed->bLedWPSBlinkInProgress == false) {
- if (pLed->bLedNoLinkBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedNoLinkBlinkInProgress = false;
- }
- if (pLed->bLedLinkBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedLinkBlinkInProgress = false;
- }
- if (pLed->bLedBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedBlinkInProgress = false;
- }
- if (pLed->bLedScanBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedScanBlinkInProgress = false;
- }
- pLed->bLedWPSBlinkInProgress = true;
- pLed->CurrLedState = LED_BLINK_WPS;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- delay = LED_BLINK_SCAN_INTERVAL_ALPHA;
- }
- break;
-
- case LED_CTL_STOP_WPS:
- if (pLed->bLedNoLinkBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedNoLinkBlinkInProgress = false;
- }
- if (pLed->bLedLinkBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedLinkBlinkInProgress = false;
- }
- if (pLed->bLedBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedBlinkInProgress = false;
- }
- if (pLed->bLedScanBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedScanBlinkInProgress = false;
- }
- if (pLed->bLedWPSBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
- } else {
- pLed->bLedWPSBlinkInProgress = true;
- }
-
- pLed->CurrLedState = LED_BLINK_WPS_STOP;
- if (pLed->bLedOn) {
- pLed->BlinkingLedState = RTW_LED_OFF;
- delay = LED_BLINK_WPS_SUCESS_INTERVAL_ALPHA;
- } else {
- pLed->BlinkingLedState = RTW_LED_ON;
- delay = 0;
- }
- break;
-
- case LED_CTL_STOP_WPS_FAIL:
- if (pLed->bLedWPSBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedWPSBlinkInProgress = false;
- }
-
- pLed->bLedNoLinkBlinkInProgress = true;
- pLed->CurrLedState = LED_BLINK_SLOWLY;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- delay = LED_BLINK_NO_LINK_INTERVAL_ALPHA;
- break;
-
- case LED_CTL_POWER_OFF:
- pLed->CurrLedState = RTW_LED_OFF;
- pLed->BlinkingLedState = RTW_LED_OFF;
- if (pLed->bLedNoLinkBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedNoLinkBlinkInProgress = false;
- }
- if (pLed->bLedLinkBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedLinkBlinkInProgress = false;
- }
- if (pLed->bLedBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedBlinkInProgress = false;
- }
- if (pLed->bLedWPSBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedWPSBlinkInProgress = false;
- }
- if (pLed->bLedScanBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedScanBlinkInProgress = false;
- }
-
- SwLedOff23a(padapter, pLed);
- break;
-
- default:
- break;
-
- }
-
- if (delay != -1)
- mod_timer(&pLed->BlinkTimer, jiffies + msecs_to_jiffies(delay));
-
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("Led %d\n", pLed->CurrLedState));
-}
-
- /* Arcadyan/Sitecom , added by chiyoko, 20090216 */
-static void
-SwLedControlMode2(struct rtw_adapter *padapter, enum led_ctl_mode LedAction)
-{
- struct led_priv *ledpriv = &padapter->ledpriv;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct led_8723a *pLed = &ledpriv->SwLed0;
- long delay = -1;
-
- switch (LedAction) {
- case LED_CTL_SITE_SURVEY:
- if (pmlmepriv->LinkDetectInfo.bBusyTraffic)
- ;
- else if (pLed->bLedScanBlinkInProgress == false) {
- if (IS_LED_WPS_BLINKING(pLed))
- return;
-
- if (pLed->bLedBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedBlinkInProgress = false;
- }
- pLed->bLedScanBlinkInProgress = true;
- pLed->CurrLedState = LED_BLINK_SCAN;
- pLed->BlinkTimes = 24;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- delay = LED_BLINK_SCAN_INTERVAL_ALPHA;
- }
- break;
- case LED_CTL_TX:
- case LED_CTL_RX:
- if (pLed->bLedBlinkInProgress == false &&
- check_fwstate(pmlmepriv, _FW_LINKED)) {
- if (pLed->CurrLedState == LED_BLINK_SCAN ||
- IS_LED_WPS_BLINKING(pLed)) {
- return;
- }
-
- pLed->bLedBlinkInProgress = true;
- pLed->CurrLedState = LED_BLINK_TXRX;
- pLed->BlinkTimes = 2;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- delay = LED_BLINK_FASTER_INTERVAL_ALPHA;
- }
- break;
- case LED_CTL_LINK:
- pLed->CurrLedState = RTW_LED_ON;
- pLed->BlinkingLedState = RTW_LED_ON;
- if (pLed->bLedBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedBlinkInProgress = false;
- }
- if (pLed->bLedScanBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedScanBlinkInProgress = false;
- }
-
- delay = 0;
- break;
- case LED_CTL_START_WPS: /* wait until xinpin finish */
- case LED_CTL_START_WPS_BOTTON:
- if (pLed->bLedWPSBlinkInProgress == false) {
- if (pLed->bLedBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedBlinkInProgress = false;
- }
- if (pLed->bLedScanBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedScanBlinkInProgress = false;
- }
- pLed->bLedWPSBlinkInProgress = true;
- pLed->CurrLedState = RTW_LED_ON;
- pLed->BlinkingLedState = RTW_LED_ON;
- delay = 0;
- }
- break;
- case LED_CTL_STOP_WPS:
- pLed->bLedWPSBlinkInProgress = false;
- if (padapter->pwrctrlpriv.rf_pwrstate != rf_on) {
- SwLedOff23a(padapter, pLed);
- } else {
- pLed->CurrLedState = RTW_LED_ON;
- pLed->BlinkingLedState = RTW_LED_ON;
- delay = 0;
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("CurrLedState %d\n", pLed->CurrLedState));
- }
- break;
- case LED_CTL_STOP_WPS_FAIL:
- pLed->bLedWPSBlinkInProgress = false;
- if (padapter->pwrctrlpriv.rf_pwrstate != rf_on) {
- SwLedOff23a(padapter, pLed);
- } else {
- pLed->CurrLedState = RTW_LED_OFF;
- pLed->BlinkingLedState = RTW_LED_OFF;
- delay = 0;
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("CurrLedState %d\n", pLed->CurrLedState));
- }
- break;
- case LED_CTL_START_TO_LINK:
- case LED_CTL_NO_LINK:
- if (!IS_LED_BLINKING(pLed))
- {
- pLed->CurrLedState = RTW_LED_OFF;
- pLed->BlinkingLedState = RTW_LED_OFF;
- delay = 0;
- }
- break;
- case LED_CTL_POWER_OFF:
- pLed->CurrLedState = RTW_LED_OFF;
- pLed->BlinkingLedState = RTW_LED_OFF;
- if (pLed->bLedBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedBlinkInProgress = false;
- }
- if (pLed->bLedScanBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedScanBlinkInProgress = false;
- }
- if (pLed->bLedWPSBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedWPSBlinkInProgress = false;
- }
-
- delay = 0;
- break;
- default:
- break;
-
- }
-
- if (delay != -1)
- mod_timer(&pLed->BlinkTimer, jiffies + msecs_to_jiffies(delay));
-
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("CurrLedState %d\n", pLed->CurrLedState));
-}
-
- /* COREGA, added by chiyoko, 20090316 */
-static void
-SwLedControlMode3(struct rtw_adapter *padapter, enum led_ctl_mode LedAction)
-{
- struct led_priv *ledpriv = &padapter->ledpriv;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct led_8723a *pLed = &ledpriv->SwLed0;
- long delay = -1;
-
- switch (LedAction)
- {
- case LED_CTL_SITE_SURVEY:
- if (pmlmepriv->LinkDetectInfo.bBusyTraffic)
- ;
- else if (pLed->bLedScanBlinkInProgress == false) {
- if (IS_LED_WPS_BLINKING(pLed))
- return;
-
- if (pLed->bLedBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedBlinkInProgress = false;
- }
- pLed->bLedScanBlinkInProgress = true;
- pLed->CurrLedState = LED_BLINK_SCAN;
- pLed->BlinkTimes = 24;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- delay = LED_BLINK_SCAN_INTERVAL_ALPHA;
- }
- break;
-
- case LED_CTL_TX:
- case LED_CTL_RX:
- if (pLed->bLedBlinkInProgress == false &&
- check_fwstate(pmlmepriv, _FW_LINKED)) {
- if (pLed->CurrLedState == LED_BLINK_SCAN ||
- IS_LED_WPS_BLINKING(pLed)) {
- return;
- }
-
- pLed->bLedBlinkInProgress = true;
- pLed->CurrLedState = LED_BLINK_TXRX;
- pLed->BlinkTimes = 2;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- delay = LED_BLINK_FASTER_INTERVAL_ALPHA;
- }
- break;
-
- case LED_CTL_LINK:
- if (IS_LED_WPS_BLINKING(pLed))
- return;
-
- pLed->CurrLedState = RTW_LED_ON;
- pLed->BlinkingLedState = RTW_LED_ON;
- if (pLed->bLedBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedBlinkInProgress = false;
- }
- if (pLed->bLedScanBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedScanBlinkInProgress = false;
- }
-
- delay = 0;
- break;
-
- case LED_CTL_START_WPS: /* wait until xinpin finish */
- case LED_CTL_START_WPS_BOTTON:
- if (pLed->bLedWPSBlinkInProgress == false) {
- if (pLed->bLedBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedBlinkInProgress = false;
- }
- if (pLed->bLedScanBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedScanBlinkInProgress = false;
- }
- pLed->bLedWPSBlinkInProgress = true;
- pLed->CurrLedState = LED_BLINK_WPS;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- delay = LED_BLINK_SCAN_INTERVAL_ALPHA;
- }
- break;
-
- case LED_CTL_STOP_WPS:
- if (pLed->bLedWPSBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedWPSBlinkInProgress = false;
- } else {
- pLed->bLedWPSBlinkInProgress = true;
- }
-
- pLed->CurrLedState = LED_BLINK_WPS_STOP;
- if (pLed->bLedOn) {
- pLed->BlinkingLedState = RTW_LED_OFF;
- delay = LED_BLINK_WPS_SUCESS_INTERVAL_ALPHA;
- } else {
- pLed->BlinkingLedState = RTW_LED_ON;
- delay = 0;
- }
-
- break;
-
- case LED_CTL_STOP_WPS_FAIL:
- if (pLed->bLedWPSBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedWPSBlinkInProgress = false;
- }
-
- pLed->CurrLedState = RTW_LED_OFF;
- pLed->BlinkingLedState = RTW_LED_OFF;
- delay = 0;
- break;
-
- case LED_CTL_START_TO_LINK:
- case LED_CTL_NO_LINK:
- if (!IS_LED_BLINKING(pLed))
- {
- pLed->CurrLedState = RTW_LED_OFF;
- pLed->BlinkingLedState = RTW_LED_OFF;
- delay = 0;
- }
- break;
-
- case LED_CTL_POWER_OFF:
- pLed->CurrLedState = RTW_LED_OFF;
- pLed->BlinkingLedState = RTW_LED_OFF;
- if (pLed->bLedBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedBlinkInProgress = false;
- }
- if (pLed->bLedScanBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedScanBlinkInProgress = false;
- }
- if (pLed->bLedWPSBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedWPSBlinkInProgress = false;
- }
-
- delay = 0;
- break;
-
- default:
- break;
-
- }
-
- if (delay != -1)
- mod_timer(&pLed->BlinkTimer, jiffies + msecs_to_jiffies(delay));
-
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("CurrLedState %d\n", pLed->CurrLedState));
-}
-
- /* Edimax-Belkin, added by chiyoko, 20090413 */
-static void
-SwLedControlMode4(struct rtw_adapter *padapter, enum led_ctl_mode LedAction)
-{
- struct led_priv *ledpriv = &padapter->ledpriv;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct led_8723a *pLed = &ledpriv->SwLed0;
- struct led_8723a *pLed1 = &ledpriv->SwLed1;
-
- switch (LedAction)
- {
- case LED_CTL_START_TO_LINK:
- if (pLed1->bLedWPSBlinkInProgress) {
- pLed1->bLedWPSBlinkInProgress = false;
- del_timer_sync(&pLed1->BlinkTimer);
-
- pLed1->BlinkingLedState = RTW_LED_OFF;
- pLed1->CurrLedState = RTW_LED_OFF;
-
- if (pLed1->bLedOn)
- mod_timer(&pLed->BlinkTimer, jiffies);
- }
-
- if (pLed->bLedStartToLinkBlinkInProgress == false) {
- if (pLed->CurrLedState == LED_BLINK_SCAN ||
- IS_LED_WPS_BLINKING(pLed)) {
- return;
- }
- if (pLed->bLedBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedBlinkInProgress = false;
- }
- if (pLed->bLedNoLinkBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedNoLinkBlinkInProgress = false;
- }
-
- pLed->bLedStartToLinkBlinkInProgress = true;
- pLed->CurrLedState = LED_BLINK_StartToBlink;
- if (pLed->bLedOn) {
- pLed->BlinkingLedState = RTW_LED_OFF;
- mod_timer(&pLed->BlinkTimer,
- jiffies + msecs_to_jiffies(LED_BLINK_SLOWLY_INTERVAL));
- } else {
- pLed->BlinkingLedState = RTW_LED_ON;
- mod_timer(&pLed->BlinkTimer,
- jiffies + msecs_to_jiffies(LED_BLINK_NORMAL_INTERVAL));
- }
- }
- break;
-
- case LED_CTL_LINK:
- case LED_CTL_NO_LINK:
- /* LED1 settings */
- if (LedAction == LED_CTL_LINK) {
- if (pLed1->bLedWPSBlinkInProgress) {
- pLed1->bLedWPSBlinkInProgress = false;
- del_timer_sync(&pLed1->BlinkTimer);
-
- pLed1->BlinkingLedState = RTW_LED_OFF;
- pLed1->CurrLedState = RTW_LED_OFF;
-
- if (pLed1->bLedOn)
- mod_timer(&pLed->BlinkTimer, jiffies);
- }
- }
-
- if (pLed->bLedNoLinkBlinkInProgress == false) {
- if (pLed->CurrLedState == LED_BLINK_SCAN ||
- IS_LED_WPS_BLINKING(pLed)) {
- return;
- }
- if (pLed->bLedBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedBlinkInProgress = false;
- }
-
- pLed->bLedNoLinkBlinkInProgress = true;
- pLed->CurrLedState = LED_BLINK_SLOWLY;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- mod_timer(&pLed->BlinkTimer, jiffies +
- msecs_to_jiffies(LED_BLINK_NO_LINK_INTERVAL_ALPHA));
- }
- break;
-
- case LED_CTL_SITE_SURVEY:
- if (pmlmepriv->LinkDetectInfo.bBusyTraffic &&
- check_fwstate(pmlmepriv, _FW_LINKED))
- ;
- else if (pLed->bLedScanBlinkInProgress == false) {
- if (IS_LED_WPS_BLINKING(pLed))
- return;
-
- if (pLed->bLedNoLinkBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedNoLinkBlinkInProgress = false;
- }
- if (pLed->bLedBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedBlinkInProgress = false;
- }
- pLed->bLedScanBlinkInProgress = true;
- pLed->CurrLedState = LED_BLINK_SCAN;
- pLed->BlinkTimes = 24;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- mod_timer(&pLed->BlinkTimer, jiffies +
- msecs_to_jiffies(LED_BLINK_SCAN_INTERVAL_ALPHA));
- }
- break;
-
- case LED_CTL_TX:
- case LED_CTL_RX:
- if (pLed->bLedBlinkInProgress == false) {
- if (pLed->CurrLedState == LED_BLINK_SCAN ||
- IS_LED_WPS_BLINKING(pLed)) {
- return;
- }
- if (pLed->bLedNoLinkBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedNoLinkBlinkInProgress = false;
- }
- pLed->bLedBlinkInProgress = true;
- pLed->CurrLedState = LED_BLINK_TXRX;
- pLed->BlinkTimes = 2;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- mod_timer(&pLed->BlinkTimer, jiffies +
- msecs_to_jiffies(LED_BLINK_FASTER_INTERVAL_ALPHA));
- }
- break;
-
- case LED_CTL_START_WPS: /* wait until xinpin finish */
- case LED_CTL_START_WPS_BOTTON:
- if (pLed1->bLedWPSBlinkInProgress) {
- pLed1->bLedWPSBlinkInProgress = false;
- del_timer_sync(&pLed1->BlinkTimer);
-
- pLed1->BlinkingLedState = RTW_LED_OFF;
- pLed1->CurrLedState = RTW_LED_OFF;
-
- if (pLed1->bLedOn)
- mod_timer(&pLed->BlinkTimer, jiffies);
- }
-
- if (pLed->bLedWPSBlinkInProgress == false) {
- if (pLed->bLedNoLinkBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedNoLinkBlinkInProgress = false;
- }
- if (pLed->bLedBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedBlinkInProgress = false;
- }
- if (pLed->bLedScanBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedScanBlinkInProgress = false;
- }
- pLed->bLedWPSBlinkInProgress = true;
- pLed->CurrLedState = LED_BLINK_WPS;
- if (pLed->bLedOn)
- {
- pLed->BlinkingLedState = RTW_LED_OFF;
- mod_timer(&pLed->BlinkTimer, jiffies +
- msecs_to_jiffies(LED_BLINK_SLOWLY_INTERVAL));
- } else {
- pLed->BlinkingLedState = RTW_LED_ON;
- mod_timer(&pLed->BlinkTimer, jiffies +
- msecs_to_jiffies(LED_BLINK_NORMAL_INTERVAL));
- }
- }
- break;
-
- case LED_CTL_STOP_WPS: /* WPS connect success */
- if (pLed->bLedWPSBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedWPSBlinkInProgress = false;
- }
-
- pLed->bLedNoLinkBlinkInProgress = true;
- pLed->CurrLedState = LED_BLINK_SLOWLY;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- mod_timer(&pLed->BlinkTimer, jiffies +
- msecs_to_jiffies(LED_BLINK_NO_LINK_INTERVAL_ALPHA));
- break;
-
- case LED_CTL_STOP_WPS_FAIL: /* WPS authentication fail */
- if (pLed->bLedWPSBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedWPSBlinkInProgress = false;
- }
-
- pLed->bLedNoLinkBlinkInProgress = true;
- pLed->CurrLedState = LED_BLINK_SLOWLY;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- mod_timer(&pLed->BlinkTimer, jiffies +
- msecs_to_jiffies(LED_BLINK_NO_LINK_INTERVAL_ALPHA));
-
- /* LED1 settings */
- if (pLed1->bLedWPSBlinkInProgress)
- del_timer_sync(&pLed1->BlinkTimer);
- else
- pLed1->bLedWPSBlinkInProgress = true;
-
- pLed1->CurrLedState = LED_BLINK_WPS_STOP;
- if (pLed1->bLedOn)
- pLed1->BlinkingLedState = RTW_LED_OFF;
- else
- pLed1->BlinkingLedState = RTW_LED_ON;
- mod_timer(&pLed->BlinkTimer, jiffies +
- msecs_to_jiffies(LED_BLINK_NORMAL_INTERVAL));
-
- break;
-
- case LED_CTL_STOP_WPS_FAIL_OVERLAP: /* WPS session overlap */
- if (pLed->bLedWPSBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedWPSBlinkInProgress = false;
- }
-
- pLed->bLedNoLinkBlinkInProgress = true;
- pLed->CurrLedState = LED_BLINK_SLOWLY;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- mod_timer(&pLed->BlinkTimer, jiffies +
- msecs_to_jiffies(LED_BLINK_NO_LINK_INTERVAL_ALPHA));
-
- /* LED1 settings */
- if (pLed1->bLedWPSBlinkInProgress)
- del_timer_sync(&pLed1->BlinkTimer);
- else
- pLed1->bLedWPSBlinkInProgress = true;
-
- pLed1->CurrLedState = LED_BLINK_WPS_STOP_OVERLAP;
- pLed1->BlinkTimes = 10;
- if (pLed1->bLedOn)
- pLed1->BlinkingLedState = RTW_LED_OFF;
- else
- pLed1->BlinkingLedState = RTW_LED_ON;
- mod_timer(&pLed->BlinkTimer, jiffies +
- msecs_to_jiffies(LED_BLINK_NORMAL_INTERVAL));
-
- break;
-
- case LED_CTL_POWER_OFF:
- pLed->CurrLedState = RTW_LED_OFF;
- pLed->BlinkingLedState = RTW_LED_OFF;
-
- if (pLed->bLedNoLinkBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedNoLinkBlinkInProgress = false;
- }
- if (pLed->bLedLinkBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedLinkBlinkInProgress = false;
- }
- if (pLed->bLedBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedBlinkInProgress = false;
- }
- if (pLed->bLedWPSBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedWPSBlinkInProgress = false;
- }
- if (pLed->bLedScanBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedScanBlinkInProgress = false;
- }
- if (pLed->bLedStartToLinkBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedStartToLinkBlinkInProgress = false;
- }
-
- if (pLed1->bLedWPSBlinkInProgress) {
- del_timer_sync(&pLed1->BlinkTimer);
- pLed1->bLedWPSBlinkInProgress = false;
- }
-
- pLed1->BlinkingLedState = LED_UNKNOWN;
- SwLedOff23a(padapter, pLed);
- SwLedOff23a(padapter, pLed1);
- break;
-
- default:
- break;
-
- }
-
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("Led %d\n", pLed->CurrLedState));
-}
-
- /* Sercomm-Belkin, added by chiyoko, 20090415 */
-static void
-SwLedControlMode5(struct rtw_adapter *padapter, enum led_ctl_mode LedAction)
-{
- struct led_priv *ledpriv = &padapter->ledpriv;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct led_8723a *pLed = &ledpriv->SwLed0;
-
- switch (LedAction)
- {
- case LED_CTL_POWER_ON:
- case LED_CTL_NO_LINK:
- case LED_CTL_LINK: /* solid blue */
- pLed->CurrLedState = RTW_LED_ON;
- pLed->BlinkingLedState = RTW_LED_ON;
-
- mod_timer(&pLed->BlinkTimer, jiffies);
- break;
-
- case LED_CTL_SITE_SURVEY:
- if (pmlmepriv->LinkDetectInfo.bBusyTraffic &&
- check_fwstate(pmlmepriv, _FW_LINKED))
- ;
- else if (pLed->bLedScanBlinkInProgress == false)
- {
- if (pLed->bLedBlinkInProgress == true)
- {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedBlinkInProgress = false;
- }
- pLed->bLedScanBlinkInProgress = true;
- pLed->CurrLedState = LED_BLINK_SCAN;
- pLed->BlinkTimes = 24;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- mod_timer(&pLed->BlinkTimer, jiffies +
- msecs_to_jiffies(LED_BLINK_SCAN_INTERVAL_ALPHA));
- }
- break;
-
- case LED_CTL_TX:
- case LED_CTL_RX:
- if (pLed->bLedBlinkInProgress == false) {
- if (pLed->CurrLedState == LED_BLINK_SCAN) {
- return;
- }
- pLed->bLedBlinkInProgress = true;
- pLed->CurrLedState = LED_BLINK_TXRX;
- pLed->BlinkTimes = 2;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- mod_timer(&pLed->BlinkTimer, jiffies +
- msecs_to_jiffies(LED_BLINK_FASTER_INTERVAL_ALPHA));
- }
- break;
-
- case LED_CTL_POWER_OFF:
- pLed->CurrLedState = RTW_LED_OFF;
- pLed->BlinkingLedState = RTW_LED_OFF;
-
- if (pLed->bLedBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedBlinkInProgress = false;
- }
-
- SwLedOff23a(padapter, pLed);
- break;
-
- default:
- break;
-
- }
-
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("Led %d\n", pLed->CurrLedState));
-}
-
- /* WNC-Corega, added by chiyoko, 20090902 */
-static void SwLedControlMode6(struct rtw_adapter *padapter,
- enum led_ctl_mode LedAction)
-{
- struct led_priv *ledpriv = &padapter->ledpriv;
- struct led_8723a *pLed0 = &ledpriv->SwLed0;
-
- switch (LedAction) {
- case LED_CTL_POWER_ON:
- case LED_CTL_LINK:
- case LED_CTL_NO_LINK:
- del_timer_sync(&pLed0->BlinkTimer);
- pLed0->CurrLedState = RTW_LED_ON;
- pLed0->BlinkingLedState = RTW_LED_ON;
- mod_timer(&pLed0->BlinkTimer, jiffies);
- break;
- case LED_CTL_POWER_OFF:
- SwLedOff23a(padapter, pLed0);
- break;
- default:
- break;
- }
-
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("ledcontrol 6 Led %d\n", pLed0->CurrLedState));
-}
-
-/* */
-/* Description: */
-/* Handler function of LED Blinking. */
-/* We dispatch acture LED blink action according to LedStrategy. */
-/* */
-void BlinkHandler23a(struct led_8723a *pLed)
-{
- struct rtw_adapter *padapter = pLed->padapter;
- struct led_priv *ledpriv = &padapter->ledpriv;
-
- /* DBG_8723A("%s (%s:%d)\n", __func__, current->comm, current->pid); */
-
- if ((padapter->bSurpriseRemoved) || (padapter->bDriverStopped))
- return;
-
- switch (ledpriv->LedStrategy) {
- case SW_LED_MODE0:
- SwLedBlink(pLed);
- break;
- case SW_LED_MODE1:
- SwLedBlink1(pLed);
- break;
- case SW_LED_MODE2:
- SwLedBlink2(pLed);
- break;
- case SW_LED_MODE3:
- SwLedBlink3(pLed);
- break;
- case SW_LED_MODE4:
- SwLedBlink4(pLed);
- break;
- case SW_LED_MODE5:
- SwLedBlink5(pLed);
- break;
- case SW_LED_MODE6:
- SwLedBlink6(pLed);
- break;
- default:
- break;
- }
-}
-
-void
-LedControl871x23a(struct rtw_adapter *padapter, enum led_ctl_mode LedAction) {
- struct led_priv *ledpriv = &padapter->ledpriv;
-
- if ((padapter->bSurpriseRemoved == true) ||
- (padapter->bDriverStopped == true) ||
- (padapter->hw_init_completed == false)) {
- return;
- }
-
- if (ledpriv->bRegUseLed == false)
- return;
-
- /* if (!priv->up) */
- /* return; */
-
- /* if (priv->bInHctTest) */
- /* return; */
-
- if ((padapter->pwrctrlpriv.rf_pwrstate != rf_on &&
- padapter->pwrctrlpriv.rfoff_reason > RF_CHANGE_BY_PS) &&
- (LedAction == LED_CTL_TX || LedAction == LED_CTL_RX ||
- LedAction == LED_CTL_SITE_SURVEY ||
- LedAction == LED_CTL_LINK ||
- LedAction == LED_CTL_NO_LINK ||
- LedAction == LED_CTL_POWER_ON)) {
- return;
- }
-
- switch (ledpriv->LedStrategy) {
- case SW_LED_MODE0:
- break;
- case SW_LED_MODE1:
- SwLedControlMode1(padapter, LedAction);
- break;
- case SW_LED_MODE2:
- SwLedControlMode2(padapter, LedAction);
- break;
- case SW_LED_MODE3:
- SwLedControlMode3(padapter, LedAction);
- break;
- case SW_LED_MODE4:
- SwLedControlMode4(padapter, LedAction);
- break;
- case SW_LED_MODE5:
- SwLedControlMode5(padapter, LedAction);
- break;
- case SW_LED_MODE6:
- SwLedControlMode6(padapter, LedAction);
- break;
- default:
- break;
- }
-
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("LedStrategy:%d, LedAction %d\n", ledpriv->LedStrategy, LedAction));
-}
diff --git a/drivers/staging/rtl8723au/core/rtw_mlme.c b/drivers/staging/rtl8723au/core/rtw_mlme.c
index 1f6006439bb..7299ef0a2e5 100644
--- a/drivers/staging/rtl8723au/core/rtw_mlme.c
+++ b/drivers/staging/rtl8723au/core/rtw_mlme.c
@@ -50,7 +50,6 @@ static void rtw_init_mlme_timer(struct rtw_adapter *padapter)
int rtw_init_mlme_priv23a(struct rtw_adapter *padapter)
{
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- int res = _SUCCESS;
pmlmepriv->nic_hdl = padapter;
@@ -68,7 +67,7 @@ int rtw_init_mlme_priv23a(struct rtw_adapter *padapter)
rtw_clear_scan_deny(padapter);
rtw_init_mlme_timer(padapter);
- return res;
+ return _SUCCESS;
}
#ifdef CONFIG_8723AU_AP_MODE
@@ -110,7 +109,6 @@ struct wlan_network *rtw_alloc_network(struct mlme_priv *pmlmepriv, gfp_t gfp)
pnetwork->network_type = 0;
pnetwork->fixed = false;
pnetwork->last_scanned = jiffies;
- pnetwork->aid = 0;
pnetwork->join_res = 0;
}
@@ -218,8 +216,6 @@ void rtw_generate_random_ibss23a(u8 *pibss)
pibss[3] = curtime & 0xff;/* p[0]; */
pibss[4] = (curtime >> 8) & 0xff;/* p[1]; */
pibss[5] = (curtime >> 16) & 0xff;/* p[2]; */
-
- return;
}
void rtw_set_roaming(struct rtw_adapter *adapter, u8 to_roaming)
@@ -356,12 +352,12 @@ rtw_get_oldest_wlan_network23a(struct rtw_queue *scanned_queue)
void update_network23a(struct wlan_bssid_ex *dst, struct wlan_bssid_ex *src,
struct rtw_adapter *padapter, bool update_ie)
{
- u8 ss_ori = dst->PhyInfo.SignalStrength;
- u8 sq_ori = dst->PhyInfo.SignalQuality;
+ u8 ss_ori = dst->SignalStrength;
+ u8 sq_ori = dst->SignalQuality;
long rssi_ori = dst->Rssi;
- u8 ss_smp = src->PhyInfo.SignalStrength;
- u8 sq_smp = src->PhyInfo.SignalQuality;
+ u8 ss_smp = src->SignalStrength;
+ u8 sq_smp = src->SignalQuality;
long rssi_smp = src->Rssi;
u8 ss_final;
@@ -389,16 +385,16 @@ void update_network23a(struct wlan_bssid_ex *dst, struct wlan_bssid_ex *src,
rssi_final = rssi_ori;
} else {
if (sq_smp != 101) { /* from the right channel */
- ss_final = ((u32)src->PhyInfo.SignalStrength +
- (u32)dst->PhyInfo.SignalStrength * 4) / 5;
- sq_final = ((u32)src->PhyInfo.SignalQuality +
- (u32)dst->PhyInfo.SignalQuality * 4) / 5;
+ ss_final = ((u32)src->SignalStrength +
+ (u32)dst->SignalStrength * 4) / 5;
+ sq_final = ((u32)src->SignalQuality +
+ (u32)dst->SignalQuality * 4) / 5;
rssi_final = src->Rssi+dst->Rssi * 4 / 5;
} else {
/* bss info not receiving from the right channel, use
the original RX signal infos */
- ss_final = dst->PhyInfo.SignalStrength;
- sq_final = dst->PhyInfo.SignalQuality;
+ ss_final = dst->SignalStrength;
+ sq_final = dst->SignalQuality;
rssi_final = dst->Rssi;
}
@@ -407,14 +403,13 @@ void update_network23a(struct wlan_bssid_ex *dst, struct wlan_bssid_ex *src,
if (update_ie)
memcpy(dst, src, get_wlan_bssid_ex_sz(src));
- dst->PhyInfo.SignalStrength = ss_final;
- dst->PhyInfo.SignalQuality = sq_final;
+ dst->SignalStrength = ss_final;
+ dst->SignalQuality = sq_final;
dst->Rssi = rssi_final;
DBG_8723A("%s %s(%pM), SignalStrength:%u, SignalQuality:%u, "
"RawRSSI:%ld\n", __func__, dst->Ssid.ssid, dst->MacAddress,
- dst->PhyInfo.SignalStrength,
- dst->PhyInfo.SignalQuality, dst->Rssi);
+ dst->SignalStrength, dst->SignalQuality, dst->Rssi);
}
static void update_current_network(struct rtw_adapter *adapter,
@@ -487,12 +482,11 @@ static void rtw_update_scanned_network(struct rtw_adapter *adapter,
pnetwork->last_scanned = jiffies;
pnetwork->network_type = 0;
- pnetwork->aid = 0;
pnetwork->join_res = 0;
/* bss info not receiving from the right channel */
- if (pnetwork->network.PhyInfo.SignalQuality == 101)
- pnetwork->network.PhyInfo.SignalQuality = 0;
+ if (pnetwork->network.SignalQuality == 101)
+ pnetwork->network.SignalQuality = 0;
} else {
/*
* we have an entry and we are going to update it. But
@@ -579,8 +573,6 @@ void rtw_atimdone_event_callback23a(struct rtw_adapter *adapter, const u8 *pbuf)
{
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_,
("receive atimdone_evet\n"));
-
- return;
}
void rtw_survey_event_cb23a(struct rtw_adapter *adapter, const u8 *pbuf)
@@ -650,8 +642,6 @@ exit:
kfree(survey->bss);
survey->bss = NULL;
-
- return;
}
void
@@ -825,8 +815,6 @@ void rtw_indicate_connect23a(struct rtw_adapter *padapter)
if (!check_fwstate(&padapter->mlmepriv, _FW_LINKED)) {
set_fwstate(pmlmepriv, _FW_LINKED);
- rtw_led_control(padapter, LED_CTL_LINK);
-
rtw_cfg80211_indicate_connect(padapter);
netif_carrier_on(padapter->pnetdev);
@@ -871,10 +859,7 @@ void rtw_indicate_disconnect23a(struct rtw_adapter *padapter)
_clr_fwstate_(pmlmepriv, _FW_LINKED);
- rtw_led_control(padapter, LED_CTL_NO_LINK);
-
rtw_clear_scan_deny(padapter);
-
}
rtw_lps_ctrl_wk_cmd23a(padapter, LPS_CTRL_DISCONNECT, 1);
@@ -1028,22 +1013,18 @@ rtw_joinbss_update_network23a(struct rtw_adapter *padapter,
cur_network->network.beacon_interval =
ptarget_wlan->network.beacon_interval;
cur_network->network.tsf = ptarget_wlan->network.tsf;
- cur_network->aid = pnetwork->join_res;
rtw_set_signal_stat_timer(&padapter->recvpriv);
padapter->recvpriv.signal_strength =
- ptarget_wlan->network.PhyInfo.SignalStrength;
- padapter->recvpriv.signal_qual =
- ptarget_wlan->network.PhyInfo.SignalQuality;
+ ptarget_wlan->network.SignalStrength;
+ padapter->recvpriv.signal_qual = ptarget_wlan->network.SignalQuality;
/*
* the ptarget_wlan->network.Rssi is raw data, we use
- * ptarget_wlan->network.PhyInfo.SignalStrength instead (has scaled)
+ * ptarget_wlan->network.SignalStrength instead (has scaled)
*/
- padapter->recvpriv.rssi = translate_percentage_to_dbm(
- ptarget_wlan->network.PhyInfo.SignalStrength);
- DBG_8723A("%s signal_strength:%3u, rssi:%3d, signal_qual:%3u\n",
+ DBG_8723A("%s signal_strength:%3u, signal_qual:%3u\n",
__func__, padapter->recvpriv.signal_strength,
- padapter->recvpriv.rssi, padapter->recvpriv.signal_qual);
+ padapter->recvpriv.signal_qual);
rtw_set_signal_stat_timer(&padapter->recvpriv);
/* update fw_state will clr _FW_UNDER_LINKING here indirectly */
@@ -1132,7 +1113,7 @@ void rtw_joinbss_event_prehandle23a(struct rtw_adapter *adapter, u8 *pbuf)
if (check_fwstate(pmlmepriv, _FW_UNDER_LINKING)) {
/* s1. find ptarget_wlan */
if (check_fwstate(pmlmepriv, _FW_LINKED)) {
- if (the_same_macaddr == true) {
+ if (the_same_macaddr) {
ptarget_wlan = rtw_find_network23a(&pmlmepriv->scanned_queue, cur_network->network.MacAddress);
} else {
pcur_wlan = rtw_find_network23a(&pmlmepriv->scanned_queue, cur_network->network.MacAddress);
@@ -1515,18 +1496,21 @@ out:
inline bool rtw_is_scan_deny(struct rtw_adapter *adapter)
{
struct mlme_priv *mlmepriv = &adapter->mlmepriv;
+
return (atomic_read(&mlmepriv->set_scan_deny) != 0) ? true : false;
}
void rtw_clear_scan_deny(struct rtw_adapter *adapter)
{
struct mlme_priv *mlmepriv = &adapter->mlmepriv;
+
atomic_set(&mlmepriv->set_scan_deny, 0);
}
void rtw_set_scan_deny_timer_hdl(unsigned long data)
{
struct rtw_adapter *adapter = (struct rtw_adapter *)data;
+
rtw_clear_scan_deny(adapter);
}
@@ -1540,7 +1524,8 @@ void rtw_set_scan_deny(struct rtw_adapter *adapter, u32 ms)
}
#if defined(IEEE80211_SCAN_RESULT_EXPIRE)
-#define RTW_SCAN_RESULT_EXPIRE IEEE80211_SCAN_RESULT_EXPIRE/HZ*1000 -1000 /* 3000 -1000 */
+#define RTW_SCAN_RESULT_EXPIRE \
+ ((IEEE80211_SCAN_RESULT_EXPIRE / (HZ*1000)) - 1000) /* 3000 -1000 */
#else
#define RTW_SCAN_RESULT_EXPIRE 2000
#endif
@@ -1766,7 +1751,7 @@ exit:
return ret;
}
-int rtw_set_auth23a(struct rtw_adapter * adapter,
+int rtw_set_auth23a(struct rtw_adapter *adapter,
struct security_priv *psecuritypriv)
{
struct cmd_obj *pcmd;
@@ -2151,6 +2136,7 @@ bool rtw_restructure_ht_ie23a(struct rtw_adapter *padapter, u8 *in_ie,
if (p && p[1] > 0) {
u32 rx_packet_offset, max_recvbuf_sz;
+
if (pmlmepriv->qos_option == 0) {
out_len = *pout_len;
pframe = rtw_set_ie23a(out_ie + out_len,
@@ -2165,9 +2151,9 @@ bool rtw_restructure_ht_ie23a(struct rtw_adapter *padapter, u8 *in_ie,
memset(&ht_capie, 0, sizeof(struct ieee80211_ht_cap));
- ht_capie.cap_info = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
+ ht_capie.cap_info = cpu_to_le16(IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_SGI_40 |
- IEEE80211_HT_CAP_TX_STBC | IEEE80211_HT_CAP_DSSSCCK40;
+ IEEE80211_HT_CAP_TX_STBC | IEEE80211_HT_CAP_DSSSCCK40);
GetHalDefVar8192CUsb(padapter, HAL_DEF_RX_PACKET_OFFSET,
&rx_packet_offset);
diff --git a/drivers/staging/rtl8723au/core/rtw_mlme_ext.c b/drivers/staging/rtl8723au/core/rtw_mlme_ext.c
index 3eb77de17e3..0e0f73c86e5 100644
--- a/drivers/staging/rtl8723au/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8723au/core/rtw_mlme_ext.c
@@ -331,6 +331,7 @@ rtw_update_TSF(struct mlme_ext_priv *pmlmeext, struct ieee80211_mgmt *mgmt)
int rtw_ch_set_search_ch23a(struct rt_channel_info *ch_set, const u32 ch)
{
int i;
+
for (i = 0; ch_set[i]. ChannelNum != 0; i++) {
if (ch == ch_set[i].ChannelNum)
break;
@@ -566,7 +567,6 @@ static u8 init_channel_set(struct rtw_adapter *padapter, u8 cplan,
int init_mlme_ext_priv23a(struct rtw_adapter *padapter)
{
- int res = _SUCCESS;
struct registry_priv *pregistrypriv = &padapter->registrypriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
@@ -593,7 +593,7 @@ int init_mlme_ext_priv23a(struct rtw_adapter *padapter)
pmlmeext->mlmeext_init = true;
pmlmeext->active_keep_alive_check = true;
- return res;
+ return _SUCCESS;
}
void free_mlme_ext_priv23a (struct mlme_ext_priv *pmlmeext)
@@ -680,8 +680,7 @@ void mgt_dispatcher23a(struct rtw_adapter *padapter,
}
#ifdef CONFIG_8723AU_AP_MODE
- switch (stype)
- {
+ switch (stype) {
case IEEE80211_STYPE_AUTH:
if (check_fwstate(pmlmepriv, WIFI_AP_STATE))
ptable->func = &OnAuth23a;
@@ -1572,6 +1571,7 @@ OnAssocReq23a(struct rtw_adapter *padapter, struct recv_frame *precv_frame)
pstat->uapsd_bk = 0;
if (pmlmepriv->qos_option) {
const u8 *end = pos + left;
+
p = pos;
for (;;) {
@@ -2335,12 +2335,9 @@ static int update_hidden_ssid(u8 *ies, u32 ies_len, u8 hidden_ssid_mode)
__func__, hidden_ssid_mode, ssid_ie, ssid_len_ori); */
if (ssid_ie && ssid_len_ori > 0) {
- switch (hidden_ssid_mode)
- {
+ switch (hidden_ssid_mode) {
case 1:
next_ie = ssid_ie + 2 + ssid_len_ori;
- remain_len = 0;
-
remain_len = ies_len -(next_ie-ies);
ssid_ie[1] = 0;
@@ -2599,7 +2596,9 @@ static void issue_probersp(struct rtw_adapter *padapter, unsigned char *da,
if (ssid_ie && cur_network->Ssid.ssid_len) {
uint remainder_ielen;
u8 *remainder_ie;
+
remainder_ie = ssid_ie + 2;
+
remainder_ielen = pframe - remainder_ie;
DBG_8723A_LEVEL(_drv_warning_, "%s(%s): "
@@ -2862,6 +2861,7 @@ static void issue_auth(struct rtw_adapter *padapter, struct sta_info *psta,
if (psta) { /* for AP mode */
#ifdef CONFIG_8723AU_AP_MODE
unsigned short val16;
+
ether_addr_copy(mgmt->da, psta->hwaddr);
ether_addr_copy(mgmt->sa, myid(&padapter->eeprompriv));
ether_addr_copy(mgmt->bssid, myid(&padapter->eeprompriv));
@@ -3306,6 +3306,7 @@ static void issue_assocreq(struct rtw_adapter *padapter)
!memcmp(p + 2, WMM_OUI23A, 4) ||
!memcmp(p + 2, WPS_OUI23A, 4)) {
u8 plen = p[1];
+
if (!padapter->registrypriv.wifi_spec) {
/* Commented by Kurt 20110629 */
/* In some older APs, WPS handshake */
@@ -3997,7 +3998,7 @@ int send_beacon23a(struct rtw_adapter *padapter)
yield();
bxmitok = rtl8723a_get_bcn_valid(padapter);
poll++;
- } while ((poll % 10) != 0 && bxmitok == false &&
+ } while ((poll % 10) != 0 && !bxmitok &&
!padapter->bSurpriseRemoved &&
!padapter->bDriverStopped);
@@ -4070,6 +4071,7 @@ static void rtw_site_survey(struct rtw_adapter *padapter)
if (ScanType == SCAN_ACTIVE) /* obey the channel plan setting... */
{
int i;
+
for (i = 0;i<RTW_SSID_SCAN_AMOUNT;i++) {
if (pmlmeext->sitesurvey_res.ssid[i].ssid_len) {
/* todo: to issue two probe req??? */
@@ -4197,9 +4199,9 @@ static struct wlan_bssid_ex *collect_bss_info(struct rtw_adapter *padapter,
/* get the signal strength */
/* in dBM.raw data */
bssid->Rssi = precv_frame->attrib.phy_info.RecvSignalPower;
- bssid->PhyInfo.SignalQuality =
+ bssid->SignalQuality =
precv_frame->attrib.phy_info.SignalQuality;/* in percentage */
- bssid->PhyInfo.SignalStrength =
+ bssid->SignalStrength =
precv_frame->attrib.phy_info.SignalStrength;/* in percentage */
/* checking SSID */
@@ -4293,6 +4295,7 @@ static struct wlan_bssid_ex *collect_bss_info(struct rtw_adapter *padapter,
bssid->IELength);
if (p && p[1] > 0) {
struct ieee80211_ht_cap *pHT_caps;
+
pHT_caps = (struct ieee80211_ht_cap *)(p + 2);
if (pHT_caps->cap_info &
@@ -4305,7 +4308,7 @@ static struct wlan_bssid_ex *collect_bss_info(struct rtw_adapter *padapter,
/* mark bss info receiving from nearby channel as SignalQuality 101 */
if (bssid->DSConfig != rtw_get_oper_ch23a(padapter))
- bssid->PhyInfo.SignalQuality = 101;
+ bssid->SignalQuality = 101;
return bssid;
fail:
@@ -4319,6 +4322,7 @@ static void start_create_ibss(struct rtw_adapter *padapter)
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
struct wlan_bssid_ex *pnetwork = &pmlmeinfo->network;
+
pmlmeext->cur_channel = (u8)pnetwork->DSConfig;
pmlmeinfo->bcn_interval = pnetwork->beacon_interval;
@@ -4354,9 +4358,7 @@ static void start_create_ibss(struct rtw_adapter *padapter)
report_join_res23a(padapter, 1);
pmlmeinfo->state |= WIFI_FW_ASSOC_SUCCESS;
}
- }
- else
- {
+ } else {
DBG_8723A("%s: invalid cap:%x\n", __func__, caps);
return;
}
@@ -4414,9 +4416,7 @@ static void start_clnt_join(struct rtw_adapter *padapter)
pmlmeinfo->state = MSR_ADHOC;
report_join_res23a(padapter, 1);
- }
- else
- {
+ } else {
/* DBG_8723A("marc: invalid cap:%x\n", caps); */
return;
}
@@ -4480,16 +4480,12 @@ int receive_disconnect23a(struct rtw_adapter *padapter,
DBG_8723A("%s\n", __func__);
- if ((pmlmeinfo->state&0x03) == MSR_INFRA)
- {
- if (pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS)
- {
+ if ((pmlmeinfo->state&0x03) == MSR_INFRA) {
+ if (pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS) {
pmlmeinfo->state = MSR_NOLINK;
report_del_sta_event23a(padapter, MacAddr, reason);
- }
- else if (pmlmeinfo->state & WIFI_FW_LINKING_STATE)
- {
+ } else if (pmlmeinfo->state & WIFI_FW_LINKING_STATE) {
pmlmeinfo->state = MSR_NOLINK;
report_join_res23a(padapter, -2);
}
@@ -4866,7 +4862,7 @@ void report_join_res23a(struct rtw_adapter *padapter, int res)
pjoinbss_evt = (struct joinbss_event*)(pevtcmd + sizeof(struct C2HEvent_Header));
memcpy((unsigned char *)&pjoinbss_evt->network.network,
&pmlmeinfo->network, sizeof(struct wlan_bssid_ex));
- pjoinbss_evt->network.join_res = pjoinbss_evt->network.aid = res;
+ pjoinbss_evt->network.join_res = res;
DBG_8723A("report_join_res23a(%d)\n", res);
@@ -4995,8 +4991,7 @@ void update_sta_info23a(struct rtw_adapter *padapter, struct sta_info *psta)
VCS_update23a(padapter, psta);
/* HT */
- if (pmlmepriv->htpriv.ht_option)
- {
+ if (pmlmepriv->htpriv.ht_option) {
psta->htpriv.ht_option = true;
psta->htpriv.ampdu_enable = pmlmepriv->htpriv.ampdu_enable;
@@ -5006,9 +5001,7 @@ void update_sta_info23a(struct rtw_adapter *padapter, struct sta_info *psta)
psta->qos_option = true;
- }
- else
- {
+ } else {
psta->htpriv.ht_option = false;
psta->htpriv.ampdu_enable = false;
@@ -5050,12 +5043,10 @@ void mlmeext_joinbss_event_callback23a(struct rtw_adapter *padapter,
goto exit_mlmeext_joinbss_event_callback23a;
}
- if ((pmlmeinfo->state&0x03) == MSR_ADHOC)
- {
+ if ((pmlmeinfo->state&0x03) == MSR_ADHOC) {
/* for bc/mc */
psta_bmc = rtw_get_bcmc_stainfo23a(padapter);
- if (psta_bmc)
- {
+ if (psta_bmc) {
pmlmeinfo->FW_sta_info[psta_bmc->mac_id].psta = psta_bmc;
update_bmc_sta_support_rate23a(padapter, psta_bmc->mac_id);
Update_RA_Entry23a(padapter, psta_bmc);
@@ -5086,8 +5077,7 @@ void mlmeext_joinbss_event_callback23a(struct rtw_adapter *padapter,
set_channel_bwmode23a(padapter, pmlmeext->cur_channel, pmlmeext->cur_ch_offset, pmlmeext->cur_bwmode);
psta = rtw_get_stainfo23a(pstapriv, cur_network->MacAddress);
- if (psta) /* only for infra. mode */
- {
+ if (psta) { /* only for infra. mode */
pmlmeinfo->FW_sta_info[psta->mac_id].psta = psta;
/* DBG_8723A("set_sta_rate23a\n"); */
@@ -5123,8 +5113,7 @@ void mlmeext_sta_add_event_callback23a(struct rtw_adapter *padapter,
if ((pmlmeinfo->state & 0x03) == MSR_ADHOC) {
/* adhoc master or sta_count>1 */
- if (pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS)
- {
+ if (pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS) {
/* nothing to do */
} else { /* adhoc client */
/* correcting TSF */
diff --git a/drivers/staging/rtl8723au/core/rtw_pwrctrl.c b/drivers/staging/rtl8723au/core/rtw_pwrctrl.c
index 1b2af7381d8..e2d51afe522 100644
--- a/drivers/staging/rtl8723au/core/rtw_pwrctrl.c
+++ b/drivers/staging/rtl8723au/core/rtw_pwrctrl.c
@@ -159,7 +159,7 @@ void rtw_ps_processor23a(struct rtw_adapter *padapter)
if (pwrpriv->ips_mode_req == IPS_NONE)
goto exit;
- if (rtw_pwr_unassociated_idle(padapter) == false)
+ if (!rtw_pwr_unassociated_idle(padapter))
goto exit;
if (pwrpriv->rf_pwrstate == rf_on &&
@@ -172,12 +172,12 @@ void rtw_ps_processor23a(struct rtw_adapter *padapter)
exit:
rtw_set_pwr_state_check_timer(&padapter->pwrctrlpriv);
pwrpriv->ps_processing = false;
- return;
}
static void pwr_state_check_handler(unsigned long data)
{
struct rtw_adapter *padapter = (struct rtw_adapter *)data;
+
rtw_ps_cmd23a(padapter);
}
@@ -338,8 +338,7 @@ s32 LPS_RF_ON_check23a(struct rtw_adapter *padapter, u32 delay_ms)
start_time = jiffies;
end_time = start_time + msecs_to_jiffies(delay_ms);
- while (1)
- {
+ while (1) {
bAwake = rtl8723a_get_fwlps_rf_on(padapter);
if (bAwake == true)
break;
@@ -470,6 +469,7 @@ void rtw_free_pwrctrl_priv(struct rtw_adapter *adapter)
inline void rtw_set_ips_deny23a(struct rtw_adapter *padapter, u32 ms)
{
struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
+
pwrpriv->ips_deny_time = jiffies + msecs_to_jiffies(ms);
}
diff --git a/drivers/staging/rtl8723au/core/rtw_recv.c b/drivers/staging/rtl8723au/core/rtw_recv.c
index 5bc7734d9a7..559dddee264 100644
--- a/drivers/staging/rtl8723au/core/rtw_recv.c
+++ b/drivers/staging/rtl8723au/core/rtw_recv.c
@@ -215,6 +215,7 @@ u32 rtw_free_uc_swdec_pending_queue23a(struct rtw_adapter *adapter)
{
u32 cnt = 0;
struct recv_frame *pending_frame;
+
while ((pending_frame = rtw_alloc_recvframe23a(&adapter->recvpriv.uc_swdec_pending_queue))) {
rtw_free_recvframe23a(pending_frame);
DBG_8723A("%s: dequeue uc_swdec_pending_queue\n", __func__);
@@ -239,6 +240,7 @@ int rtw_enqueue_recvbuf23a_to_head(struct recv_buf *precvbuf, struct rtw_queue *
int rtw_enqueue_recvbuf23a(struct recv_buf *precvbuf, struct rtw_queue *queue)
{
unsigned long irqL;
+
spin_lock_irqsave(&queue->lock, irqL);
list_del_init(&precvbuf->list);
@@ -364,6 +366,7 @@ int recvframe_chkmic(struct rtw_adapter *adapter,
if (bmic_err == true) {
int i;
+
RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
("\n *(pframemic-8)-*(pframemic-1) ="
"0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:"
@@ -483,6 +486,7 @@ struct recv_frame *decryptor(struct rtw_adapter *padapter,
if (prxattrib->encrypt > 0) {
u8 *iv = precv_frame->pkt->data + prxattrib->hdrlen;
+
prxattrib->key_index = (((iv[3]) >> 6) & 0x3);
if (prxattrib->key_index > WEP_KEYS) {
@@ -564,59 +568,27 @@ static struct recv_frame *portctrl(struct rtw_adapter *adapter,
("########portctrl:adapter->securitypriv.dot11AuthAlgrthm ="
"%d\n", adapter->securitypriv.dot11AuthAlgrthm));
+ prtnframe = precv_frame;
+
if (auth_alg == dot11AuthAlgrthm_8021X) {
/* get ether_type */
ptr = pfhdr->pkt->data + pfhdr->attrib.hdrlen;
ether_type = (ptr[6] << 8) | ptr[7];
- if ((psta != NULL) && (psta->ieee8021x_blocked)) {
+ if (psta && psta->ieee8021x_blocked) {
/* blocked */
/* only accept EAPOL frame */
RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
("########portctrl:psta->ieee8021x_blocked =="
"1\n"));
- if (ether_type == eapol_type) {
- prtnframe = precv_frame;
- } else {
+ if (ether_type != eapol_type) {
/* free this frame */
rtw_free_recvframe23a(precv_frame);
prtnframe = NULL;
}
- } else {
- /* allowed */
- /* check decryption status, and decrypt the frame if needed */
- RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
- ("########portctrl:psta->ieee8021x_blocked =="
- "0\n"));
- RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
- ("portctrl:precv_frame->hdr.attrib.privacy ="
- "%x\n", precv_frame->attrib.privacy));
-
- if (pattrib->bdecrypted == 0) {
- RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
- ("portctrl:prxstat->decrypted =%x\n",
- pattrib->bdecrypted));
- }
-
- prtnframe = precv_frame;
- /* check is the EAPOL frame or not (Rekey) */
- if (ether_type == eapol_type) {
- RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_,
- ("########portctrl:ether_type == "
- "0x888e\n"));
- /* check Rekey */
-
- prtnframe = precv_frame;
- } else {
- RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
- ("########portctrl:ether_type = 0x%04x"
- "\n", ether_type));
- }
}
- } else {
- prtnframe = precv_frame;
}
return prtnframe;
@@ -1066,6 +1038,7 @@ int sta2ap_data_frame(struct rtw_adapter *adapter,
}
} else {
u8 *myhwaddr = myid(&adapter->eeprompriv);
+
if (!ether_addr_equal(pattrib->ra, myhwaddr)) {
ret = RTW_RX_HANDLED;
goto exit;
@@ -1405,8 +1378,7 @@ static int validate_recv_data_frame(struct rtw_adapter *adapter,
RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
("\n pattrib->encrypt =%d\n", pattrib->encrypt));
- switch (pattrib->encrypt)
- {
+ switch (pattrib->encrypt) {
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
pattrib->iv_len = IEEE80211_WEP_IV_LEN;
@@ -1505,8 +1477,7 @@ static int validate_recv_frame(struct rtw_adapter *adapter,
if (unlikely(bDumpRxPkt == 1))
dump_rx_pkt(skb, type, bDumpRxPkt);
- switch (type)
- {
+ switch (type) {
case IEEE80211_FTYPE_MGMT:
retval = validate_recv_mgnt_frame(adapter, precv_frame);
if (retval == _FAIL) {
@@ -1524,7 +1495,6 @@ static int validate_recv_frame(struct rtw_adapter *adapter,
retval = _FAIL; /* only data frame return _SUCCESS */
break;
case IEEE80211_FTYPE_DATA:
- rtw_led_control(adapter, LED_CTL_RX);
pattrib->qos = (subtype & IEEE80211_STYPE_QOS_DATA) ? 1 : 0;
retval = validate_recv_data_frame(adapter, precv_frame);
if (retval == _FAIL) {
@@ -1551,8 +1521,6 @@ static int wlanhdr_to_ethhdr (struct recv_frame *precvframe)
u16 eth_type, len, hdrlen;
u8 bsnaphdr;
u8 *psnap;
-
- int ret = _SUCCESS;
struct rtw_adapter *adapter = precvframe->adapter;
struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
@@ -1613,7 +1581,7 @@ static int wlanhdr_to_ethhdr (struct recv_frame *precvframe)
}
- return ret;
+ return _SUCCESS;
}
/* perform defrag */
@@ -1691,7 +1659,7 @@ struct recv_frame *recvframe_defrag(struct rtw_adapter *adapter,
skb_put(skb, pnfhdr->pkt->len);
prframe->attrib.icv_len = pnfhdr->attrib.icv_len;
- };
+ }
/* free the defrag_q queue and return the prframe */
rtw_free_recvframe23a_queue(defrag_q);
@@ -2177,8 +2145,7 @@ int process_recv_indicatepkts(struct rtw_adapter *padapter,
return retval;
}
}
- } else /* B/G mode */
- {
+ } else { /* B/G mode */
retval = wlanhdr_to_ethhdr(prframe);
if (retval != _SUCCESS) {
RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
@@ -2238,8 +2205,6 @@ static int recv_func_posthandle(struct rtw_adapter *padapter,
struct recv_priv *precvpriv = &padapter->recvpriv;
/* DATA FRAME */
- rtw_led_control(padapter, LED_CTL_RX);
-
prframe = decryptor(padapter, prframe);
if (prframe == NULL) {
RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
@@ -2349,66 +2314,52 @@ void rtw_signal_stat_timer_hdl23a(unsigned long data)
u8 _alpha = 3; /* this value is based on converging_constant = 5000 */
/* and sampling_interval = 1000 */
- if (adapter->recvpriv.is_signal_dbg) {
- /* update the user specific value, signal_strength_dbg, */
- /* to signal_strength, rssi */
- adapter->recvpriv.signal_strength =
- adapter->recvpriv.signal_strength_dbg;
- adapter->recvpriv.rssi =
- (s8)translate_percentage_to_dbm((u8)adapter->recvpriv.signal_strength_dbg);
- } else {
- if (recvpriv->signal_strength_data.update_req == 0) {
- /* update_req is clear, means we got rx */
- avg_signal_strength =
- recvpriv->signal_strength_data.avg_val;
- num_signal_strength =
- recvpriv->signal_strength_data.total_num;
- /* after avg_vals are acquired, we can re-stat */
- /* the signal values */
- recvpriv->signal_strength_data.update_req = 1;
- }
+ if (recvpriv->signal_strength_data.update_req == 0) {
+ /* update_req is clear, means we got rx */
+ avg_signal_strength = recvpriv->signal_strength_data.avg_val;
+ num_signal_strength = recvpriv->signal_strength_data.total_num;
+ /* after avg_vals are acquired, we can re-stat */
+ /* the signal values */
+ recvpriv->signal_strength_data.update_req = 1;
+ }
+
+ if (recvpriv->signal_qual_data.update_req == 0) {
+ /* update_req is clear, means we got rx */
+ avg_signal_qual = recvpriv->signal_qual_data.avg_val;
+ num_signal_qual = recvpriv->signal_qual_data.total_num;
+ /* after avg_vals are acquired, we can re-stat */
+ /*the signal values */
+ recvpriv->signal_qual_data.update_req = 1;
+ }
+
+ /* update value of signal_strength, rssi, signal_qual */
+ if (!check_fwstate(&adapter->mlmepriv, _FW_UNDER_SURVEY)) {
+ tmp_s = (avg_signal_strength + (_alpha - 1) *
+ recvpriv->signal_strength);
+ if (tmp_s %_alpha)
+ tmp_s = tmp_s / _alpha + 1;
+ else
+ tmp_s = tmp_s / _alpha;
+ if (tmp_s > 100)
+ tmp_s = 100;
- if (recvpriv->signal_qual_data.update_req == 0) {
- /* update_req is clear, means we got rx */
- avg_signal_qual = recvpriv->signal_qual_data.avg_val;
- num_signal_qual = recvpriv->signal_qual_data.total_num;
- /* after avg_vals are acquired, we can re-stat */
- /*the signal values */
- recvpriv->signal_qual_data.update_req = 1;
- }
+ tmp_q = avg_signal_qual + (_alpha - 1) * recvpriv->signal_qual;
+ if (tmp_q %_alpha)
+ tmp_q = tmp_q / _alpha + 1;
+ else
+ tmp_q = tmp_q / _alpha;
+ if (tmp_q > 100)
+ tmp_q = 100;
- /* update value of signal_strength, rssi, signal_qual */
- if (!check_fwstate(&adapter->mlmepriv, _FW_UNDER_SURVEY)) {
- tmp_s = (avg_signal_strength + (_alpha - 1) *
- recvpriv->signal_strength);
- if (tmp_s %_alpha)
- tmp_s = tmp_s / _alpha + 1;
- else
- tmp_s = tmp_s / _alpha;
- if (tmp_s > 100)
- tmp_s = 100;
-
- tmp_q = (avg_signal_qual + (_alpha - 1) *
- recvpriv->signal_qual);
- if (tmp_q %_alpha)
- tmp_q = tmp_q / _alpha + 1;
- else
- tmp_q = tmp_q / _alpha;
- if (tmp_q > 100)
- tmp_q = 100;
-
- recvpriv->signal_strength = tmp_s;
- recvpriv->rssi = (s8)translate_percentage_to_dbm(tmp_s);
- recvpriv->signal_qual = tmp_q;
-
- DBG_8723A("%s signal_strength:%3u, rssi:%3d, "
- "signal_qual:%3u, num_signal_strength:%u, "
- "num_signal_qual:%u\n",
- __func__, recvpriv->signal_strength,
- recvpriv->rssi, recvpriv->signal_qual,
- num_signal_strength, num_signal_qual
- );
- }
+ recvpriv->signal_strength = tmp_s;
+ recvpriv->signal_qual = tmp_q;
+
+ DBG_8723A("%s signal_strength:%3u, signal_qual:%3u, "
+ "num_signal_strength:%u, num_signal_qual:%u\n",
+ __func__, recvpriv->signal_strength,
+ recvpriv->signal_qual, num_signal_strength,
+ num_signal_qual);
}
+
rtw_set_signal_stat_timer(recvpriv);
}
diff --git a/drivers/staging/rtl8723au/core/rtw_security.c b/drivers/staging/rtl8723au/core/rtw_security.c
index 76371ae6937..715a47414bd 100644
--- a/drivers/staging/rtl8723au/core/rtw_security.c
+++ b/drivers/staging/rtl8723au/core/rtw_security.c
@@ -23,19 +23,18 @@
#define CRC32_POLY 0x04c11db7
-struct arc4context
-{
+struct arc4context {
u32 x;
u32 y;
u8 state[256];
};
-static void arcfour_init(struct arc4context *parc4ctx, u8 * key, u32 key_len)
+static void arcfour_init(struct arc4context *parc4ctx, u8 *key, u32 key_len)
{
u32 t, u;
u32 keyindex;
u32 stateindex;
- u8 * state;
+ u8 *state;
u32 counter;
state = parc4ctx->state;
@@ -45,8 +44,7 @@ static void arcfour_init(struct arc4context *parc4ctx, u8 * key, u32 key_len)
state[counter] = (u8)counter;
keyindex = 0;
stateindex = 0;
- for (counter = 0; counter < 256; counter++)
- {
+ for (counter = 0; counter < 256; counter++) {
t = state[counter];
stateindex = (stateindex + key[keyindex] + t) & 0xff;
u = state[stateindex];
@@ -62,7 +60,7 @@ static u32 arcfour_byte( struct arc4context *parc4ctx)
u32 x;
u32 y;
u32 sx, sy;
- u8 * state;
+ u8 *state;
state = parc4ctx->state;
x = (parc4ctx->x + 1) & 0xff;
@@ -78,8 +76,8 @@ static u32 arcfour_byte( struct arc4context *parc4ctx)
}
static void arcfour_encrypt( struct arc4context *parc4ctx,
- u8 * dest,
- u8 * src,
+ u8 *dest,
+ u8 *src,
u32 len)
{
u32 i;
@@ -114,8 +112,7 @@ static void crc32_init(void)
c = 0x12340000;
- for (i = 0; i < 256; ++i)
- {
+ for (i = 0; i < 256; ++i) {
k = crc32_reverseBit((u8)i);
for (c = ((u32)k) << 24, j = 8; j > 0; --j) {
c = c & 0x80000000 ? (c << 1) ^ CRC32_POLY : (c << 1);
@@ -221,7 +218,7 @@ void rtw_wep_decrypt23a(struct rtw_adapter *padapter,
u8 keyindex;
struct rx_pkt_attrib *prxattrib = &precvframe->attrib;
struct security_priv *psecuritypriv = &padapter->securitypriv;
- struct sk_buff * skb = precvframe->pkt;
+ struct sk_buff *skb = precvframe->pkt;
pframe = skb->data;
@@ -260,33 +257,29 @@ void rtw_wep_decrypt23a(struct rtw_adapter *padapter,
crc[1], payload[length - 3],
crc[0], payload[length - 4]));
}
-
- return;
}
/* 3 ===== TKIP related ===== */
-static u32 secmicgetuint32(u8 * p)
+static u32 secmicgetuint32(u8 *p)
/* Convert from Byte[] to u32 in a portable way */
{
s32 i;
u32 res = 0;
- for (i = 0; i<4; i++)
- {
+ for (i = 0; i<4; i++) {
res |= ((u32)(*p++)) << (8*i);
}
return res;
}
-static void secmicputuint32(u8 * p, u32 val)
+static void secmicputuint32(u8 *p, u32 val)
/* Convert from long to Byte[] in a portable way */
{
long i;
- for (i = 0; i<4; i++)
- {
+ for (i = 0; i<4; i++) {
*p++ = (u8) (val & 0xff);
val >>= 8;
}
@@ -304,7 +297,7 @@ static void secmicclear(struct mic_data *pmicdata)
}
-void rtw_secmicsetkey23a(struct mic_data *pmicdata, u8 * key)
+void rtw_secmicsetkey23a(struct mic_data *pmicdata, u8 *key)
{
/* Set the key */
@@ -322,8 +315,7 @@ void rtw_secmicappend23abyte23a(struct mic_data *pmicdata, u8 b)
pmicdata->M |= ((unsigned long)b) << (8*pmicdata->nBytesInM);
pmicdata->nBytesInM++;
/* Process the word if it is full. */
- if (pmicdata->nBytesInM >= 4)
- {
+ if (pmicdata->nBytesInM >= 4) {
pmicdata->L ^= pmicdata->M;
pmicdata->R ^= ROL32(pmicdata->L, 17);
pmicdata->L += pmicdata->R;
@@ -340,19 +332,18 @@ void rtw_secmicappend23abyte23a(struct mic_data *pmicdata, u8 b)
}
-void rtw_secmicappend23a(struct mic_data *pmicdata, u8 * src, u32 nbytes)
+void rtw_secmicappend23a(struct mic_data *pmicdata, u8 *src, u32 nbytes)
{
/* This is simple */
- while(nbytes > 0)
- {
+ while(nbytes > 0) {
rtw_secmicappend23abyte23a(pmicdata, *src++);
nbytes--;
}
}
-void rtw_secgetmic23a(struct mic_data *pmicdata, u8 * dst)
+void rtw_secgetmic23a(struct mic_data *pmicdata, u8 *dst)
{
/* Append the minimum padding */
@@ -362,8 +353,7 @@ void rtw_secgetmic23a(struct mic_data *pmicdata, u8 * dst)
rtw_secmicappend23abyte23a(pmicdata, 0);
rtw_secmicappend23abyte23a(pmicdata, 0);
/* and then zeroes until the length is a multiple of 4 */
- while(pmicdata->nBytesInM != 0)
- {
+ while(pmicdata->nBytesInM != 0) {
rtw_secmicappend23abyte23a(pmicdata, 0);
}
/* The appendByte function has already computed the result. */
@@ -374,7 +364,8 @@ void rtw_secgetmic23a(struct mic_data *pmicdata, u8 * dst)
}
-void rtw_seccalctkipmic23a(u8 * key, u8 *header, u8 *data, u32 data_len, u8 *mic_code, u8 pri)
+void rtw_seccalctkipmic23a(u8 *key, u8 *header, u8 *data, u32 data_len,
+ u8 *mic_code, u8 pri)
{
struct mic_data micdata;
@@ -531,8 +522,8 @@ static void phase1(u16 *p1k, const u8 *tk, const u8 *ta, u32 iv32)
/* Now compute an unbalanced Feistel cipher with 80-bit block */
/* size on the 80-bit block P1K[], using the 128-bit key TK[] */
- for (i = 0; i < PHASE1_LOOP_CNT ;i++)
- { /* Each add operation here is mod 2**16 */
+ for (i = 0; i < PHASE1_LOOP_CNT ;i++) {
+ /* Each add operation here is mod 2**16 */
p1k[0] += _S_(p1k[4] ^ TK16((i&1)+0));
p1k[1] += _S_(p1k[0] ^ TK16((i&1)+2));
p1k[2] += _S_(p1k[1] ^ TK16((i&1)+4));
@@ -602,8 +593,7 @@ static void phase2(u8 *rc4key, const u8 *tk, const u16 *p1k, u16 iv16)
rc4key[3] = Lo8((PPK[5] ^ TK16(0)) >> 1);
/* Copy 96 bits of PPK[0..5] to RC4KEY[4..15] (little-endian) */
- for (i = 0;i<6;i++)
- {
+ for (i = 0;i<6;i++) {
rc4key[4+2*i] = Lo8(PPK[i]);
rc4key[5+2*i] = Hi8(PPK[i]);
}
@@ -649,8 +639,7 @@ int rtw_tkip_encrypt23a(struct rtw_adapter *padapter,
if (stainfo!= NULL) {
- if (!(stainfo->state &_FW_LINKED))
- {
+ if (!(stainfo->state &_FW_LINKED)) {
DBG_8723A("%s, psta->state(0x%x) != _FW_LINKED\n", __func__, stainfo->state);
return _FAIL;
}
@@ -728,7 +717,7 @@ int rtw_tkip_decrypt23a(struct rtw_adapter *padapter,
struct sta_info *stainfo;
struct rx_pkt_attrib *prxattrib = &precvframe->attrib;
struct security_priv *psecuritypriv = &padapter->securitypriv;
- struct sk_buff * skb = precvframe->pkt;
+ struct sk_buff *skb = precvframe->pkt;
int res = _SUCCESS;
pframe = skb->data;
@@ -887,8 +876,7 @@ static void byte_sub(u8 *in, u8 *out)
{
int i;
- for (i = 0; i< 16; i++)
- {
+ for (i = 0; i< 16; i++) {
out[i] = sbox(in[i]);
}
@@ -928,8 +916,7 @@ static void mix_column(u8 *in, u8 *out)
u8 temp[4];
u8 tempb[4];
- for (i = 0 ; i<4; i++)
- {
+ for (i = 0 ; i<4; i++) {
if ((in[i] & 0x80) == 0x80)
add1b[i] = 0x1b;
else
@@ -951,11 +938,9 @@ static void mix_column(u8 *in, u8 *out)
andf7[2] = in[2] & 0x7f;
andf7[3] = in[3] & 0x7f;
- for (i = 3; i>0; i--) /* logical shift left 1 bit */
- {
+ for (i = 3; i>0; i--) { /* logical shift left 1 bit */
andf7[i] = andf7[i] << 1;
- if ((andf7[i-1] & 0x80) == 0x80)
- {
+ if ((andf7[i-1] & 0x80) == 0x80) {
andf7[i] = (andf7[i] | 0x01);
}
}
@@ -988,21 +973,15 @@ static void aes128k128d(u8 *key, u8 *data, u8 *ciphertext)
for (i = 0; i<16; i++) round_key[i] = key[i];
- for (round = 0; round < 11; round++)
- {
- if (round == 0)
- {
+ for (round = 0; round < 11; round++) {
+ if (round == 0) {
xor_128(round_key, data, ciphertext);
next_key(round_key, round);
- }
- else if (round == 10)
- {
+ } else if (round == 10) {
byte_sub(ciphertext, intermediatea);
shift_row(intermediatea, intermediateb);
xor_128(intermediateb, round_key, ciphertext);
- }
- else /* 1 - 9 */
- {
+ } else { /* 1 - 9 */
byte_sub(ciphertext, intermediatea);
shift_row(intermediatea, intermediateb);
mix_column(&intermediateb[0], &intermediatea[0]);
@@ -1088,20 +1067,17 @@ static void construct_mic_header2(u8 *mic_header2, u8 *mpdu, int a4_exists,
mic_header2[6] = 0x00;
mic_header2[7] = 0x00; /* mpdu[23]; */
- if (!qc_exists && a4_exists)
- {
+ if (!qc_exists && a4_exists) {
for (i = 0;i<6;i++) mic_header2[8+i] = mpdu[24+i]; /* A4 */
}
- if (qc_exists && !a4_exists)
- {
+ if (qc_exists && !a4_exists) {
mic_header2[8] = mpdu[24] & 0x0f; /* mute bits 15 - 4 */
mic_header2[9] = mpdu[25] & 0x00;
}
- if (qc_exists && a4_exists)
- {
+ if (qc_exists && a4_exists) {
for (i = 0;i<6;i++) mic_header2[8+i] = mpdu[24+i]; /* A4 */
mic_header2[14] = mpdu[30] & 0x0f;
diff --git a/drivers/staging/rtl8723au/core/rtw_sreset.c b/drivers/staging/rtl8723au/core/rtw_sreset.c
index 58ed980795a..29a29d92a6a 100644
--- a/drivers/staging/rtl8723au/core/rtw_sreset.c
+++ b/drivers/staging/rtl8723au/core/rtw_sreset.c
@@ -107,7 +107,7 @@ static void sreset_restore_network_station(struct rtw_adapter *padapter)
mlmeext_joinbss_event_callback23a(padapter, 1);
/* restore Sequence No. */
- rtl8723au_write8(padapter, 0x4dc, padapter->xmitpriv.nqos_ssn);
+ rtl8723au_write8(padapter, REG_NQOS_SEQ, padapter->xmitpriv.nqos_ssn);
sreset_restore_security_station(padapter);
}
diff --git a/drivers/staging/rtl8723au/core/rtw_wlan_util.c b/drivers/staging/rtl8723au/core/rtw_wlan_util.c
index 09c44a55d4a..69d9e0f17fd 100644
--- a/drivers/staging/rtl8723au/core/rtw_wlan_util.c
+++ b/drivers/staging/rtl8723au/core/rtw_wlan_util.c
@@ -608,8 +608,6 @@ void WMMOnAssocRsp23a(struct rtw_adapter *padapter)
DBG_8723A("wmm_para_seq(%d): %d\n", i,
pxmitpriv->wmm_para_seq[i]);
}
-
- return;
}
static void bwmode_update_check(struct rtw_adapter *padapter, const u8 *p)
@@ -750,7 +748,6 @@ void HT_caps_handler23a(struct rtw_adapter *padapter, const u8 *p)
else
cap->mcs.rx_mask[i] &= MCS_rate_2R23A[i];
}
- return;
}
void HT_info_handler23a(struct rtw_adapter *padapter, const u8 *p)
@@ -771,7 +768,6 @@ void HT_info_handler23a(struct rtw_adapter *padapter, const u8 *p)
pmlmeinfo->HT_info_enable = 1;
memcpy(&pmlmeinfo->HT_info, p + 2, p[1]);
- return;
}
void HTOnAssocRsp23a(struct rtw_adapter *padapter)
@@ -833,7 +829,7 @@ void VCS_update23a(struct rtw_adapter *padapter, struct sta_info *psta)
psta->cts2self = 0;
break;
case 1: /* on */
- if (pregpriv->vcs_type == 1) { /* 1:RTS/CTS 2:CTS to self */
+ if (pregpriv->vcs_type == RTS_CTS) {
psta->rtsen = 1;
psta->cts2self = 0;
} else {
@@ -844,7 +840,7 @@ void VCS_update23a(struct rtw_adapter *padapter, struct sta_info *psta)
case 2: /* auto */
default:
if (pmlmeinfo->ERP_enable && pmlmeinfo->ERP_IE & BIT(1)) {
- if (pregpriv->vcs_type == 1) {
+ if (pregpriv->vcs_type == RTS_CTS) {
psta->rtsen = 1;
psta->cts2self = 0;
} else {
@@ -870,7 +866,7 @@ int rtw_check_bcn_info23a(struct rtw_adapter *Adapter,
int pie_len, ssid_len, privacy;
const u8 *p, *ssid;
- if (is_client_associated_to_ap23a(Adapter) == false)
+ if (!is_client_associated_to_ap23a(Adapter))
return _SUCCESS;
if (unlikely(!ieee80211_is_beacon(mgmt->frame_control))) {
@@ -1080,7 +1076,7 @@ bool is_ap_in_tkip23a(struct rtw_adapter *padapter)
return false;
}
-bool should_forbid_n_rate23a(struct rtw_adapter * padapter)
+bool should_forbid_n_rate23a(struct rtw_adapter *padapter)
{
u32 i;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
@@ -1153,6 +1149,7 @@ bool is_ap_in_wep23a(struct rtw_adapter *padapter)
static int wifirate2_ratetbl_inx23a(unsigned char rate)
{
int inx = 0;
+
rate = rate & 0x7f;
switch (rate) {
@@ -1311,6 +1308,7 @@ unsigned char check_assoc_AP23a(u8 *pframe, uint len)
u8 epigram_vendor_flag;
u8 ralink_vendor_flag;
const u8 *p;
+
epigram_vendor_flag = 0;
ralink_vendor_flag = 0;
@@ -1324,7 +1322,6 @@ unsigned char check_assoc_AP23a(u8 *pframe, uint len)
DBG_8723A("link to Artheros AP\n");
return HT_IOT_PEER_ATHEROS;
} else if (!memcmp(p + 2, BROADCOM_OUI1, 3) ||
- !memcmp(p + 2, BROADCOM_OUI2, 3) ||
!memcmp(p + 2, BROADCOM_OUI2, 3)) {
DBG_8723A("link to Broadcom AP\n");
return HT_IOT_PEER_BROADCOM;
diff --git a/drivers/staging/rtl8723au/core/rtw_xmit.c b/drivers/staging/rtl8723au/core/rtw_xmit.c
index 7a8038156ce..7a5e6bf0d1a 100644
--- a/drivers/staging/rtl8723au/core/rtw_xmit.c
+++ b/drivers/staging/rtl8723au/core/rtw_xmit.c
@@ -22,9 +22,6 @@
#include <usb_ops.h>
#include <rtl8723a_xmit.h>
-static u8 P802_1H_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0xf8 };
-static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 };
-
static void _init_txservq(struct tx_servq *ptxservq)
{
@@ -180,16 +177,8 @@ int _rtw_init_xmit_priv23a(struct xmit_priv *pxmitpriv,
for (i = 0; i < 4; i ++)
pxmitpriv->wmm_para_seq[i] = i;
- pxmitpriv->txirp_cnt = 1;
-
sema_init(&pxmitpriv->tx_retevt, 0);
- /* per AC pending irp */
- pxmitpriv->beq_cnt = 0;
- pxmitpriv->bkq_cnt = 0;
- pxmitpriv->viq_cnt = 0;
- pxmitpriv->voq_cnt = 0;
-
pxmitpriv->ack_tx = false;
mutex_init(&pxmitpriv->ack_tx_mutex);
rtw_sctx_init23a(&pxmitpriv->ack_tx_ops, 0);
@@ -315,6 +304,7 @@ static void update_attrib_vcs_info(struct rtw_adapter *padapter, struct xmit_fra
/* check HT op mode */
if (pattrib->ht_en) {
u8 HTOpMode = pmlmeinfo->HT_protection;
+
if ((pmlmeext->cur_bwmode && (HTOpMode == 2 || HTOpMode == 3)) ||
(!pmlmeext->cur_bwmode && HTOpMode == 3)) {
pattrib->vcs_mode = RTS_CTS;
@@ -464,6 +454,7 @@ static int update_attrib(struct rtw_adapter *padapter,
if (pattrib->pktlen > 282 + 24) {
if (pattrib->ether_type == ETH_P_IP) {/* IP header */
u8 *pframe = skb->data;
+
pframe += ETH_HLEN;
if ((pframe[21] == 68 && pframe[23] == 67) ||
@@ -1048,21 +1039,23 @@ s32 rtw_txframes_sta_ac_pending23a(struct rtw_adapter *padapter,
return ptxservq->qcnt;
}
-/*
- * Calculate wlan 802.11 packet MAX size from pkt_attrib
- * This function doesn't consider fragment case
+/* Logical Link Control(LLC) SubNetwork Attachment Point(SNAP) header
+ * IEEE LLC/SNAP header contains 8 octets
+ * First 3 octets comprise the LLC portion
+ * SNAP portion, 5 octets, is divided into two fields:
+ * Organizationally Unique Identifier(OUI), 3 octets,
+ * type, defined by that organization, 2 octets.
*/
-u32 rtw_calculate_wlan_pkt_size_by_attribue23a(struct pkt_attrib *pattrib)
+static int rtw_put_snap(u8 *data, u16 h_proto)
{
- u32 len = 0;
-
- len = pattrib->hdrlen + pattrib->iv_len; /* WLAN Header and IV */
- len += SNAP_SIZE + sizeof(u16); /* LLC */
- len += pattrib->pktlen;
- if (pattrib->encrypt == WLAN_CIPHER_SUITE_TKIP) len += 8; /* MIC */
- len += ((pattrib->bswenc) ? pattrib->icv_len : 0); /* ICV */
+ if (h_proto == ETH_P_IPX || h_proto == ETH_P_AARP)
+ ether_addr_copy(data, bridge_tunnel_header);
+ else
+ ether_addr_copy(data, rfc1042_header);
- return len;
+ data += ETH_ALEN;
+ put_unaligned_be16(h_proto, data);
+ return ETH_ALEN + sizeof(u16);
}
/*
@@ -1188,7 +1181,7 @@ int rtw_xmitframe_coalesce23a(struct rtw_adapter *padapter, struct sk_buff *skb,
mpdu_len -= pattrib->iv_len;
}
if (frg_inx == 0) {
- llc_sz = rtw_put_snap23a(pframe, pattrib->ether_type);
+ llc_sz = rtw_put_snap(pframe, pattrib->ether_type);
pframe += llc_sz;
mpdu_len -= llc_sz;
}
@@ -1258,34 +1251,6 @@ exit:
return res;
}
-/* Logical Link Control(LLC) SubNetwork Attachment Point(SNAP) header
- * IEEE LLC/SNAP header contains 8 octets
- * First 3 octets comprise the LLC portion
- * SNAP portion, 5 octets, is divided into two fields:
- * Organizationally Unique Identifier(OUI), 3 octets,
- * type, defined by that organization, 2 octets.
- */
-s32 rtw_put_snap23a(u8 *data, u16 h_proto)
-{
- struct ieee80211_snap_hdr *snap;
- u8 *oui;
-
- snap = (struct ieee80211_snap_hdr *)data;
- snap->dsap = 0xaa;
- snap->ssap = 0xaa;
- snap->ctrl = 0x03;
-
- if (h_proto == 0x8137 || h_proto == 0x80f3)
- oui = P802_1H_OUI;
- else
- oui = RFC1042_OUI;
- snap->oui[0] = oui[0];
- snap->oui[1] = oui[1];
- snap->oui[2] = oui[2];
- *(u16 *)(data + SNAP_SIZE) = htons(h_proto);
- return SNAP_SIZE + sizeof(u16);
-}
-
void rtw_update_protection23a(struct rtw_adapter *padapter, u8 *ie, uint ie_len)
{
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
@@ -1293,7 +1258,7 @@ void rtw_update_protection23a(struct rtw_adapter *padapter, u8 *ie, uint ie_len)
uint protection;
const u8 *p;
- switch (pxmitpriv->vcs_setting) {
+ switch (pregistrypriv->vrtl_carrier_sense) {
case DISABLE_VCS:
pxmitpriv->vcs = NONE_VCS;
break;
@@ -1326,7 +1291,7 @@ void rtw_count_tx_stats23a(struct rtw_adapter *padapter, struct xmit_frame *pxmi
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- if ((pxmitframe->frame_tag&0x0f) == DATA_FRAMETAG) {
+ if (pxmitframe->frame_tag == DATA_FRAMETAG) {
pxmitpriv->tx_bytes += sz;
pmlmepriv->LinkDetectInfo.NumTxOkInPeriod++;
@@ -1893,18 +1858,6 @@ u32 rtw_get_ff_hwaddr23a(struct xmit_frame *pxmitframe)
return addr;
}
-static void do_queue_select(struct rtw_adapter *padapter, struct pkt_attrib *pattrib)
-{
- u8 qsel;
-
- qsel = pattrib->priority;
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_,
- ("### do_queue_select priority =%d , qsel = %d\n",
- pattrib->priority, qsel));
-
- pattrib->qsel = qsel;
-}
-
/*
* The main transmit(tx) entry
*
@@ -1936,9 +1889,7 @@ int rtw_xmit23a(struct rtw_adapter *padapter, struct sk_buff *skb)
}
pxmitframe->pkt = skb;
- rtw_led_control(padapter, LED_CTL_TX);
-
- do_queue_select(padapter, &pxmitframe->attrib);
+ pxmitframe->attrib.qsel = pxmitframe->attrib.priority;
#ifdef CONFIG_8723AU_AP_MODE
spin_lock_bh(&pxmitpriv->lock);
@@ -2411,11 +2362,6 @@ void rtw23a_sctx_done_err(struct submit_ctx **sctx, int status)
}
}
-void rtw_sctx_done23a(struct submit_ctx **sctx)
-{
- rtw23a_sctx_done_err(sctx, RTW_SCTX_DONE_SUCCESS);
-}
-
int rtw_ack_tx_wait23a(struct xmit_priv *pxmitpriv, u32 timeout_ms)
{
struct submit_ctx *pack_tx_ops = &pxmitpriv->ack_tx_ops;
diff --git a/drivers/staging/rtl8723au/hal/HalDMOutSrc8723A_CE.c b/drivers/staging/rtl8723au/hal/HalDMOutSrc8723A_CE.c
index 4b41bc4ce1e..179a1ba0302 100644
--- a/drivers/staging/rtl8723au/hal/HalDMOutSrc8723A_CE.c
+++ b/drivers/staging/rtl8723au/hal/HalDMOutSrc8723A_CE.c
@@ -612,7 +612,7 @@ static void _PHY_PathADDAOn(struct rtw_adapter *pAdapter, u32 *ADDAReg, bool isP
u32 i;
pathOn = isPathAOn ? 0x04db25a4 : 0x0b1b25a4;
- if (false == is2T) {
+ if (!is2T) {
pathOn = 0x0bdb25a0;
PHY_SetBBReg(pAdapter, ADDAReg[0], bMaskDWord, 0x0b1b25a0);
} else {
diff --git a/drivers/staging/rtl8723au/hal/hal_com.c b/drivers/staging/rtl8723au/hal/hal_com.c
index bf919f6e412..bf4cae20bd1 100644
--- a/drivers/staging/rtl8723au/hal/hal_com.c
+++ b/drivers/staging/rtl8723au/hal/hal_com.c
@@ -463,7 +463,7 @@ void rtl8723a_set_ampdu_factor(struct rtw_adapter *padapter, u8 FactorToSet)
MaxAggNum = 0xF;
if (FactorToSet <= 3) {
- FactorToSet = (1 << (FactorToSet + 2));
+ FactorToSet = 1 << (FactorToSet + 2);
if (FactorToSet > MaxAggNum)
FactorToSet = MaxAggNum;
@@ -727,7 +727,7 @@ void rtl8723a_fifo_cleanup(struct rtw_adapter *padapter)
rtl8723au_write8(padapter, REG_TXPAUSE, 0xff);
/* keep sn */
- padapter->xmitpriv.nqos_ssn = rtl8723au_read16(padapter, REG_NQOS_SEQ);
+ padapter->xmitpriv.nqos_ssn = rtl8723au_read8(padapter, REG_NQOS_SEQ);
if (pwrpriv->bkeepfwalive != true) {
u32 v32;
diff --git a/drivers/staging/rtl8723au/hal/odm_HWConfig.c b/drivers/staging/rtl8723au/hal/odm_HWConfig.c
index 29d844d66ca..fb3cc872f20 100644
--- a/drivers/staging/rtl8723au/hal/odm_HWConfig.c
+++ b/drivers/staging/rtl8723au/hal/odm_HWConfig.c
@@ -391,20 +391,11 @@ static void odm_Process_RSSIForDM(struct dm_odm_t *pDM_Odm,
}
}
-/* Endianness before calling this API */
-static void ODM_PhyStatusQuery23a_92CSeries(struct dm_odm_t *pDM_Odm,
- struct phy_info *pPhyInfo,
- u8 *pPhyStatus,
- struct odm_packet_info *pPktinfo)
+void ODM_PhyStatusQuery23a(struct dm_odm_t *pDM_Odm, struct phy_info *pPhyInfo,
+ u8 *pPhyStatus, struct odm_packet_info *pPktinfo)
{
odm_RxPhyStatus92CSeries_Parsing(pDM_Odm, pPhyInfo,
pPhyStatus, pPktinfo);
odm_Process_RSSIForDM(pDM_Odm, pPhyInfo, pPktinfo);
}
-
-void ODM_PhyStatusQuery23a(struct dm_odm_t *pDM_Odm, struct phy_info *pPhyInfo,
- u8 *pPhyStatus, struct odm_packet_info *pPktinfo)
-{
- ODM_PhyStatusQuery23a_92CSeries(pDM_Odm, pPhyInfo, pPhyStatus, pPktinfo);
-}
diff --git a/drivers/staging/rtl8723au/hal/rtl8723a_bt-coexist.c b/drivers/staging/rtl8723au/hal/rtl8723a_bt-coexist.c
index 9054a987f06..86a83975f4f 100644
--- a/drivers/staging/rtl8723au/hal/rtl8723a_bt-coexist.c
+++ b/drivers/staging/rtl8723au/hal/rtl8723a_bt-coexist.c
@@ -340,7 +340,7 @@ static u8 bthci_GetAssocInfo(struct rtw_adapter *padapter, u8 EntryNum)
tempBuf, TotalLen-BaseMemoryShift);
pAmpAsoc = (struct amp_assoc_structure *)tempBuf;
- pAmpAsoc->Length = le16_to_cpu(pAmpAsoc->Length);
+ le16_to_cpus(&pAmpAsoc->Length);
BaseMemoryShift += 3 + pAmpAsoc->Length;
RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("TypeID = 0x%x, ", pAmpAsoc->TypeID));
@@ -1759,18 +1759,6 @@ static enum hci_status bthci_CmdReadConnectionAcceptTimeout(struct rtw_adapter *
return status;
}
-/* 7.3.3 */
-static enum hci_status
-bthci_CmdSetEventFilter(
- struct rtw_adapter *padapter,
- struct packet_irp_hcicmd_data *pHciCmd
- )
-{
- enum hci_status status = HCI_STATUS_SUCCESS;
-
- return status;
-}
-
/* 7.3.14 */
static enum hci_status
bthci_CmdWriteConnectionAcceptTimeout(
@@ -2982,19 +2970,12 @@ bthci_CmdReadLinkQuality(
return status;
}
-static enum hci_status bthci_CmdReadRSSI(struct rtw_adapter *padapter)
-{
- enum hci_status status = HCI_STATUS_SUCCESS;
- return status;
-}
-
static enum hci_status
bthci_CmdCreateLogicalLink(
struct rtw_adapter *padapter,
struct packet_irp_hcicmd_data *pHciCmd
)
{
- enum hci_status status = HCI_STATUS_SUCCESS;
struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
struct bt_dgb *pBtDbg = &pBTInfo->BtDbg;
@@ -3003,7 +2984,7 @@ bthci_CmdCreateLogicalLink(
bthci_BuildLogicalLink(padapter, pHciCmd,
HCI_CREATE_LOGICAL_LINK);
- return status;
+ return HCI_STATUS_SUCCESS;
}
static enum hci_status
@@ -3012,7 +2993,6 @@ bthci_CmdAcceptLogicalLink(
struct packet_irp_hcicmd_data *pHciCmd
)
{
- enum hci_status status = HCI_STATUS_SUCCESS;
struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
struct bt_dgb *pBtDbg = &pBTInfo->BtDbg;
@@ -3021,7 +3001,7 @@ bthci_CmdAcceptLogicalLink(
bthci_BuildLogicalLink(padapter, pHciCmd,
HCI_ACCEPT_LOGICAL_LINK);
- return status;
+ return HCI_STATUS_SUCCESS;
}
static enum hci_status
@@ -4138,15 +4118,6 @@ bthci_CmdHostBufferSize(struct rtw_adapter *padapter,
}
static enum hci_status
-bthci_CmdHostNumberOfCompletedPackets(struct rtw_adapter *padapter,
- struct packet_irp_hcicmd_data *pHciCmd)
-{
- enum hci_status status = HCI_STATUS_SUCCESS;
-
- return status;
-}
-
-static enum hci_status
bthci_UnknownCMD(struct rtw_adapter *padapter, struct packet_irp_hcicmd_data *pHciCmd)
{
enum hci_status status = HCI_STATUS_UNKNOW_HCI_CMD;
@@ -4219,7 +4190,6 @@ bthci_HandleOGFSetEventMaskCMD(struct rtw_adapter *padapter,
break;
case HCI_SET_EVENT_FILTER:
RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_SET_EVENT_FILTER\n"));
- status = bthci_CmdSetEventFilter(padapter, pHciCmd);
break;
case HCI_WRITE_CONNECTION_ACCEPT_TIMEOUT:
RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_WRITE_CONNECTION_ACCEPT_TIMEOUT\n"));
@@ -4235,7 +4205,6 @@ bthci_HandleOGFSetEventMaskCMD(struct rtw_adapter *padapter,
break;
case HCI_HOST_NUMBER_OF_COMPLETED_PACKETS:
RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_HOST_NUMBER_OF_COMPLETED_PACKETS\n"));
- status = bthci_CmdHostNumberOfCompletedPackets(padapter, pHciCmd);
break;
case HCI_READ_LINK_SUPERVISION_TIMEOUT:
RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_READ_LINK_SUPERVISION_TIMEOUT\n"));
@@ -4323,7 +4292,6 @@ bthci_HandleOGFStatusParameters(struct rtw_adapter *padapter,
break;
case HCI_READ_RSSI:
RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_READ_RSSI\n"));
- status = bthci_CmdReadRSSI(padapter);
break;
case HCI_READ_LOCAL_AMP_INFO:
RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_READ_LOCAL_AMP_INFO\n"));
@@ -10671,7 +10639,7 @@ void BTDM_BBBackOffLevel(struct rtw_adapter *padapter, u8 type)
void BTDM_FWCoexAllOff(struct rtw_adapter *padapter)
{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);;
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
RTPRINT(FBT, BT_TRACE, ("BTDM_FWCoexAllOff()\n"));
if (pHalData->bt_coexist.bFWCoexistAllOff)
@@ -10685,7 +10653,7 @@ void BTDM_FWCoexAllOff(struct rtw_adapter *padapter)
void BTDM_SWCoexAllOff(struct rtw_adapter *padapter)
{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);;
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
RTPRINT(FBT, BT_TRACE, ("BTDM_SWCoexAllOff()\n"));
if (pHalData->bt_coexist.bSWCoexistAllOff)
@@ -10698,7 +10666,7 @@ void BTDM_SWCoexAllOff(struct rtw_adapter *padapter)
void BTDM_HWCoexAllOff(struct rtw_adapter *padapter)
{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);;
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
RTPRINT(FBT, BT_TRACE, ("BTDM_HWCoexAllOff()\n"));
if (pHalData->bt_coexist.bHWCoexistAllOff)
diff --git a/drivers/staging/rtl8723au/hal/rtl8723a_cmd.c b/drivers/staging/rtl8723au/hal/rtl8723a_cmd.c
index 271c33d6ca5..7b56411cc3c 100644
--- a/drivers/staging/rtl8723au/hal/rtl8723a_cmd.c
+++ b/drivers/staging/rtl8723au/hal/rtl8723a_cmd.c
@@ -115,19 +115,16 @@ exit:
int rtl8723a_set_rssi_cmd(struct rtw_adapter *padapter, u8 *param)
{
- int res = _SUCCESS;
-
*((u32 *)param) = cpu_to_le32(*((u32 *)param));
FillH2CCmd(padapter, RSSI_SETTING_EID, 3, param);
- return res;
+ return _SUCCESS;
}
int rtl8723a_set_raid_cmd(struct rtw_adapter *padapter, u32 mask, u8 arg)
{
u8 buf[5];
- int res = _SUCCESS;
memset(buf, 0, 5);
mask = cpu_to_le32(mask);
@@ -136,7 +133,7 @@ int rtl8723a_set_raid_cmd(struct rtw_adapter *padapter, u32 mask, u8 arg)
FillH2CCmd(padapter, MACID_CONFIG_EID, 5, buf);
- return res;
+ return _SUCCESS;
}
/* bitmap[0:27] = tx_rate_bitmap */
diff --git a/drivers/staging/rtl8723au/hal/rtl8723a_hal_init.c b/drivers/staging/rtl8723au/hal/rtl8723a_hal_init.c
index 8523908d5e5..a5eadd4e258 100644
--- a/drivers/staging/rtl8723au/hal/rtl8723a_hal_init.c
+++ b/drivers/staging/rtl8723au/hal/rtl8723a_hal_init.c
@@ -47,29 +47,19 @@ static void _FWDownloadEnable(struct rtw_adapter *padapter, bool enable)
}
}
-static int _BlockWrite(struct rtw_adapter *padapter, void *buffer, u32 buffSize)
-{
- int ret;
-
- if (buffSize > MAX_PAGE_SIZE)
- return _FAIL;
-
- ret = rtl8723au_writeN(padapter, FW_8723A_START_ADDRESS,
- buffSize, buffer);
-
- return ret;
-}
-
static int
_PageWrite(struct rtw_adapter *padapter, u32 page, void *buffer, u32 size)
{
u8 value8;
u8 u8Page = (u8) (page & 0x07);
+ if (size > MAX_PAGE_SIZE)
+ return _FAIL;
+
value8 = (rtl8723au_read8(padapter, REG_MCUFWDL + 2) & 0xF8) | u8Page;
rtl8723au_write8(padapter, REG_MCUFWDL + 2, value8);
- return _BlockWrite(padapter, buffer, size);
+ return rtl8723au_writeN(padapter, FW_8723A_START_ADDRESS, size, buffer);
}
static int _WriteFW(struct rtw_adapter *padapter, void *buffer, u32 size)
@@ -743,84 +733,6 @@ u16 rtl8723a_EfuseGetCurrentSize_BT(struct rtw_adapter *padapter)
return retU2;
}
-bool
-rtl8723a_EfusePgPacketRead(struct rtw_adapter *padapter, u8 offset, u8 *data)
-{
- u8 efuse_data, word_cnts = 0;
- u16 efuse_addr = 0;
- u8 hoffset = 0, hworden = 0;
- u8 i;
- u8 max_section = 0;
- s32 ret;
-
- if (data == NULL)
- return false;
-
- EFUSE_GetEfuseDefinition23a(padapter, EFUSE_WIFI, TYPE_EFUSE_MAX_SECTION,
- &max_section);
- if (offset > max_section) {
- DBG_8723A("%s: Packet offset(%d) is illegal(>%d)!\n",
- __func__, offset, max_section);
- return false;
- }
-
- memset(data, 0xFF, PGPKT_DATA_SIZE);
- ret = true;
-
- /* */
- /* <Roger_TODO> Efuse has been pre-programmed dummy 5Bytes at the
- end of Efuse by CP. */
- /* Skip dummy parts to prevent unexpected data read from Efuse. */
- /* By pass right now. 2009.02.19. */
- /* */
- while (AVAILABLE_EFUSE_ADDR(efuse_addr)) {
- if (efuse_OneByteRead23a(padapter, efuse_addr++, &efuse_data) ==
- _FAIL) {
- ret = false;
- break;
- }
-
- if (efuse_data == 0xFF)
- break;
-
- if (EXT_HEADER(efuse_data)) {
- hoffset = GET_HDR_OFFSET_2_0(efuse_data);
- efuse_OneByteRead23a(padapter, efuse_addr++, &efuse_data);
- if (ALL_WORDS_DISABLED(efuse_data)) {
- DBG_8723A("%s: Error!! All words disabled!\n",
- __func__);
- continue;
- }
-
- hoffset |= ((efuse_data & 0xF0) >> 1);
- hworden = efuse_data & 0x0F;
- } else {
- hoffset = (efuse_data >> 4) & 0x0F;
- hworden = efuse_data & 0x0F;
- }
-
- if (hoffset == offset) {
- for (i = 0; i < EFUSE_MAX_WORD_UNIT; i++) {
- /* Check word enable condition in the section */
- if (!(hworden & (0x01 << i))) {
- ReadEFuseByte23a(padapter, efuse_addr++,
- &efuse_data);
- data[i * 2] = efuse_data;
-
- ReadEFuseByte23a(padapter, efuse_addr++,
- &efuse_data);
- data[(i * 2) + 1] = efuse_data;
- }
- }
- } else {
- word_cnts = Efuse_CalculateWordCnts23a(hworden);
- efuse_addr += word_cnts * 2;
- }
- }
-
- return ret;
-}
-
void rtl8723a_read_chip_version(struct rtw_adapter *padapter)
{
u32 value32;
@@ -1126,6 +1038,21 @@ exit:
return ret;
}
+void handle_txrpt_ccx_8723a(struct rtw_adapter *adapter, void *buf)
+{
+ struct txrpt_ccx_8723a *txrpt_ccx = buf;
+ struct submit_ctx *pack_tx_ops = &adapter->xmitpriv.ack_tx_ops;
+
+ if (txrpt_ccx->int_ccx && adapter->xmitpriv.ack_tx) {
+ if (txrpt_ccx->pkt_ok)
+ rtw23a_sctx_done_err(&pack_tx_ops,
+ RTW_SCTX_DONE_SUCCESS);
+ else
+ rtw23a_sctx_done_err(&pack_tx_ops,
+ RTW_SCTX_DONE_CCX_PKT_FAIL);
+ }
+}
+
void rtl8723a_InitAntenna_Selection(struct rtw_adapter *padapter)
{
u8 val;
@@ -1326,18 +1253,17 @@ c. APSD_CTRL 0x600[7:0] = 0x40
d. SYS_FUNC_EN 0x02[7:0] = 0x16 reset BB state machine
e. SYS_FUNC_EN 0x02[7:0] = 0x14 reset BB state machine
***************************************/
- u8 eRFPath = 0, value8 = 0;
+ u8 value8;
rtl8723au_write8(padapter, REG_TXPAUSE, 0xFF);
- PHY_SetRFReg(padapter, (enum RF_RADIO_PATH) eRFPath, 0x0, bMaskByte0, 0x0);
+ PHY_SetRFReg(padapter, RF_PATH_A, 0x0, bMaskByte0, 0x0);
- value8 |= APSDOFF;
+ value8 = APSDOFF;
rtl8723au_write8(padapter, REG_APSD_CTRL, value8); /* 0x40 */
/* Set BB reset at first */
- value8 = 0;
- value8 |= (FEN_USBD | FEN_USBA | FEN_BB_GLB_RSTn);
+ value8 = FEN_USBD | FEN_USBA | FEN_BB_GLB_RSTn;
rtl8723au_write8(padapter, REG_SYS_FUNC_EN, value8); /* 0x16 */
/* Set global reset. */
@@ -1350,11 +1276,6 @@ e. SYS_FUNC_EN 0x02[7:0] = 0x14 reset BB state machine
/* RT_TRACE(COMP_INIT, DBG_LOUD, ("======> RF off and reset BB.\n")); */
}
-static void _DisableRFAFEAndResetBB(struct rtw_adapter *padapter)
-{
- _DisableRFAFEAndResetBB8192C(padapter);
-}
-
static void _ResetDigitalProcedure1_92C(struct rtw_adapter *padapter,
bool bWithoutHWSM)
{
@@ -1368,18 +1289,18 @@ static void _ResetDigitalProcedure1_92C(struct rtw_adapter *padapter,
i. SYS_FUNC_EN 0x02[10]= 1 enable MCU register,
(8051 enable)
******************************/
- u16 valu16 = 0;
+ u16 valu16;
rtl8723au_write8(padapter, REG_MCUFWDL, 0);
valu16 = rtl8723au_read16(padapter, REG_SYS_FUNC_EN);
/* reset MCU , 8051 */
rtl8723au_write16(padapter, REG_SYS_FUNC_EN,
- valu16 & (~FEN_CPUEN));
+ valu16 & ~FEN_CPUEN);
valu16 = rtl8723au_read16(padapter, REG_SYS_FUNC_EN) & 0x0FFF;
/* reset MAC */
rtl8723au_write16(padapter, REG_SYS_FUNC_EN,
- valu16 | (FEN_HWPDN | FEN_ELDR));
+ valu16 | FEN_HWPDN | FEN_ELDR);
valu16 = rtl8723au_read16(padapter, REG_SYS_FUNC_EN);
/* enable MCU , 8051 */
@@ -1387,43 +1308,41 @@ static void _ResetDigitalProcedure1_92C(struct rtw_adapter *padapter,
valu16 | FEN_CPUEN);
} else {
u8 retry_cnts = 0;
+ u8 val8;
+
+ val8 = rtl8723au_read8(padapter, REG_MCUFWDL);
/* 2010/08/12 MH For USB SS, we can not stop 8051 when we
are trying to enter IPS/HW&SW radio off. For
S3/S4/S5/Disable, we can stop 8051 because */
/* we will init FW when power on again. */
/* If we want to SS mode, we can not reset 8051. */
- if (rtl8723au_read8(padapter, REG_MCUFWDL) & BIT(1)) {
+ if ((val8 & BIT(1)) && padapter->bFWReady) {
/* IF fw in RAM code, do reset */
- if (padapter->bFWReady) {
- /* 2010/08/25 MH Accordign to RD alfred's
- suggestion, we need to disable other */
- /* HRCV INT to influence 8051 reset. */
- rtl8723au_write8(padapter, REG_FWIMR, 0x20);
- /* 2011/02/15 MH According to Alex's
- suggestion, close mask to prevent
- incorrect FW write operation. */
- rtl8723au_write8(padapter, REG_FTIMR, 0x00);
- rtl8723au_write8(padapter, REG_FSIMR, 0x00);
-
- /* 8051 reset by self */
- rtl8723au_write8(padapter, REG_HMETFR + 3,
- 0x20);
-
- while ((retry_cnts++ < 100) &&
- (FEN_CPUEN &
- rtl8723au_read16(padapter,
- REG_SYS_FUNC_EN))) {
- udelay(50); /* us */
- }
+ /* 2010/08/25 MH Accordign to RD alfred's
+ suggestion, we need to disable other */
+ /* HRCV INT to influence 8051 reset. */
+ rtl8723au_write8(padapter, REG_FWIMR, 0x20);
+ /* 2011/02/15 MH According to Alex's
+ suggestion, close mask to prevent
+ incorrect FW write operation. */
+ rtl8723au_write8(padapter, REG_FTIMR, 0x00);
+ rtl8723au_write8(padapter, REG_FSIMR, 0x00);
+
+ /* 8051 reset by self */
+ rtl8723au_write8(padapter, REG_HMETFR + 3, 0x20);
+
+ while ((retry_cnts++ < 100) &&
+ (rtl8723au_read16(padapter, REG_SYS_FUNC_EN) &
+ FEN_CPUEN)) {
+ udelay(50); /* us */
+ }
- if (retry_cnts >= 100) {
- /* Reset MAC and Enable 8051 */
- rtl8723au_write8(padapter,
- REG_SYS_FUNC_EN + 1,
- 0x50);
- mdelay(10);
- }
+ if (retry_cnts >= 100) {
+ /* Reset MAC and Enable 8051 */
+ rtl8723au_write8(padapter,
+ REG_SYS_FUNC_EN + 1, 0x50);
+ mdelay(10);
}
}
/* Reset MAC and Enable 8051 */
@@ -1450,12 +1369,6 @@ static void _ResetDigitalProcedure1_92C(struct rtw_adapter *padapter,
}
}
-static void _ResetDigitalProcedure1(struct rtw_adapter *padapter,
- bool bWithoutHWSM)
-{
- _ResetDigitalProcedure1_92C(padapter, bWithoutHWSM);
-}
-
static void _ResetDigitalProcedure2(struct rtw_adapter *padapter)
{
/*****************************
@@ -1472,8 +1385,8 @@ m. SYS_ISO_CTRL 0x01[7:0] = 0x83 isolated ELDR to PON
static void _DisableAnalog(struct rtw_adapter *padapter, bool bWithoutHWSM)
{
struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
- u16 value16 = 0;
- u8 value8 = 0;
+ u16 value16;
+ u8 value8;
if (bWithoutHWSM) {
/*****************************
@@ -1487,7 +1400,7 @@ static void _DisableAnalog(struct rtw_adapter *padapter, bool bWithoutHWSM)
/* rtl8723au_write8(padapter, REG_LDOV12D_CTRL, 0x54); */
value8 = rtl8723au_read8(padapter, REG_LDOV12D_CTRL);
- value8 &= (~LDV12_EN);
+ value8 &= ~LDV12_EN;
rtl8723au_write8(padapter, REG_LDOV12D_CTRL, value8);
/* RT_TRACE(COMP_INIT, DBG_LOUD,
(" REG_LDOV12D_CTRL Reg0x21:0x%02x.\n", value8)); */
@@ -1509,9 +1422,9 @@ static void _DisableAnalog(struct rtw_adapter *padapter, bool bWithoutHWSM)
use HW to shut down 8051 automatically. */
/* Becasue suspend operatione need the asistance of 8051
to wait for 3ms. */
- value16 |= (APDM_HOST | AFSM_HSUS | PFM_ALDN);
+ value16 = APDM_HOST | AFSM_HSUS | PFM_ALDN;
} else {
- value16 |= (APDM_HOST | AFSM_HSUS | PFM_ALDN);
+ value16 = APDM_HOST | AFSM_HSUS | PFM_ALDN;
}
rtl8723au_write16(padapter, REG_APS_FSMCO, value16); /* 0x4802 */
@@ -1522,16 +1435,14 @@ static void _DisableAnalog(struct rtw_adapter *padapter, bool bWithoutHWSM)
/* HW Auto state machine */
int CardDisableHWSM(struct rtw_adapter *padapter, u8 resetMCU)
{
- int rtStatus = _SUCCESS;
-
if (padapter->bSurpriseRemoved) {
- return rtStatus;
+ return _SUCCESS;
}
/* RF Off Sequence ==== */
- _DisableRFAFEAndResetBB(padapter);
+ _DisableRFAFEAndResetBB8192C(padapter);
/* ==== Reset digital sequence ====== */
- _ResetDigitalProcedure1(padapter, false);
+ _ResetDigitalProcedure1_92C(padapter, false);
/* ==== Pull GPIO PIN to balance level and LED control ====== */
_DisableGPIO(padapter);
@@ -1542,25 +1453,21 @@ int CardDisableHWSM(struct rtw_adapter *padapter, u8 resetMCU)
RT_TRACE(_module_hci_hal_init_c_, _drv_info_,
("======> Card disable finished.\n"));
- return rtStatus;
+ return _SUCCESS;
}
/* without HW Auto state machine */
int CardDisableWithoutHWSM(struct rtw_adapter *padapter)
{
- int rtStatus = _SUCCESS;
-
- /* RT_TRACE(COMP_INIT, DBG_LOUD,
- ("======> Card Disable Without HWSM .\n")); */
if (padapter->bSurpriseRemoved) {
- return rtStatus;
+ return _SUCCESS;
}
/* RF Off Sequence ==== */
- _DisableRFAFEAndResetBB(padapter);
+ _DisableRFAFEAndResetBB8192C(padapter);
/* ==== Reset digital sequence ====== */
- _ResetDigitalProcedure1(padapter, true);
+ _ResetDigitalProcedure1_92C(padapter, true);
/* ==== Pull GPIO PIN to balance level and LED control ====== */
_DisableGPIO(padapter);
@@ -1573,29 +1480,27 @@ int CardDisableWithoutHWSM(struct rtw_adapter *padapter)
/* RT_TRACE(COMP_INIT, DBG_LOUD,
("<====== Card Disable Without HWSM .\n")); */
- return rtStatus;
+ return _SUCCESS;
}
void Hal_InitPGData(struct rtw_adapter *padapter, u8 *PROMContent)
{
struct eeprom_priv *pEEPROM = GET_EEPROM_EFUSE_PRIV(padapter);
- if (false == pEEPROM->bautoload_fail_flag) { /* autoload OK. */
+ if (!pEEPROM->bautoload_fail_flag) { /* autoload OK. */
if (!pEEPROM->EepromOrEfuse) {
/* Read EFUSE real map to shadow. */
EFUSE_ShadowMapUpdate23a(padapter, EFUSE_WIFI);
- memcpy((void *)PROMContent,
- (void *)pEEPROM->efuse_eeprom_data,
+ memcpy(PROMContent, pEEPROM->efuse_eeprom_data,
HWSET_MAX_SIZE);
}
- } else { /* autoload fail */
+ } else {
RT_TRACE(_module_hci_hal_init_c_, _drv_notice_,
("AutoLoad Fail reported from CR9346!!\n"));
-/* pHalData->AutoloadFailFlag = true; */
/* update to default value 0xFF */
- if (false == pEEPROM->EepromOrEfuse)
+ if (!pEEPROM->EepromOrEfuse)
EFUSE_ShadowMapUpdate23a(padapter, EFUSE_WIFI);
- memcpy((void *)PROMContent, (void *)pEEPROM->efuse_eeprom_data,
+ memcpy(PROMContent, pEEPROM->efuse_eeprom_data,
HWSET_MAX_SIZE);
}
}
@@ -1945,13 +1850,13 @@ Hal_EfuseParseThermalMeter_8723A(struct rtw_adapter *padapter,
/* */
/* ThermalMeter from EEPROM */
/* */
- if (AutoloadFail == false)
+ if (!AutoloadFail)
pHalData->EEPROMThermalMeter =
PROMContent[EEPROM_THERMAL_METER_8723A];
else
pHalData->EEPROMThermalMeter = EEPROM_Default_ThermalMeter;
- if ((pHalData->EEPROMThermalMeter == 0xff) || (AutoloadFail == true)) {
+ if ((pHalData->EEPROMThermalMeter == 0xff) || AutoloadFail) {
pHalData->bAPKThermalMeterIgnore = true;
pHalData->EEPROMThermalMeter = EEPROM_Default_ThermalMeter;
}
@@ -1960,10 +1865,6 @@ Hal_EfuseParseThermalMeter_8723A(struct rtw_adapter *padapter,
pHalData->EEPROMThermalMeter);
}
-void Hal_InitChannelPlan23a(struct rtw_adapter *padapter)
-{
-}
-
static void rtl8723a_cal_txdesc_chksum(struct tx_desc *ptxdesc)
{
u16 *usPtr = (u16 *) ptxdesc;
@@ -1981,254 +1882,6 @@ static void rtl8723a_cal_txdesc_chksum(struct tx_desc *ptxdesc)
ptxdesc->txdw7 |= cpu_to_le32(checksum & 0x0000ffff);
}
-static void fill_txdesc_sectype(struct pkt_attrib *pattrib,
- struct txdesc_8723a *ptxdesc)
-{
- if ((pattrib->encrypt > 0) && !pattrib->bswenc) {
- switch (pattrib->encrypt) {
- /* SEC_TYPE */
- case WLAN_CIPHER_SUITE_WEP40:
- case WLAN_CIPHER_SUITE_WEP104:
- case WLAN_CIPHER_SUITE_TKIP:
- ptxdesc->sectype = 1;
- break;
-
- case WLAN_CIPHER_SUITE_CCMP:
- ptxdesc->sectype = 3;
- break;
-
- case 0:
- default:
- break;
- }
- }
-}
-
-static void fill_txdesc_vcs(struct pkt_attrib *pattrib,
- struct txdesc_8723a *ptxdesc)
-{
- /* DBG_8723A("cvs_mode =%d\n", pattrib->vcs_mode); */
-
- switch (pattrib->vcs_mode) {
- case RTS_CTS:
- ptxdesc->rtsen = 1;
- break;
-
- case CTS_TO_SELF:
- ptxdesc->cts2self = 1;
- break;
-
- case NONE_VCS:
- default:
- break;
- }
-
- if (pattrib->vcs_mode) {
- ptxdesc->hw_rts_en = 1; /* ENABLE HW RTS */
-
- /* Set RTS BW */
- if (pattrib->ht_en) {
- if (pattrib->bwmode & HT_CHANNEL_WIDTH_40)
- ptxdesc->rts_bw = 1;
-
- switch (pattrib->ch_offset) {
- case HAL_PRIME_CHNL_OFFSET_DONT_CARE:
- ptxdesc->rts_sc = 0;
- break;
-
- case HAL_PRIME_CHNL_OFFSET_LOWER:
- ptxdesc->rts_sc = 1;
- break;
-
- case HAL_PRIME_CHNL_OFFSET_UPPER:
- ptxdesc->rts_sc = 2;
- break;
-
- default:
- ptxdesc->rts_sc = 3; /* Duplicate */
- break;
- }
- }
- }
-}
-
-static void fill_txdesc_phy(struct pkt_attrib *pattrib,
- struct txdesc_8723a *ptxdesc)
-{
- if (pattrib->ht_en) {
- if (pattrib->bwmode & HT_CHANNEL_WIDTH_40)
- ptxdesc->data_bw = 1;
-
- switch (pattrib->ch_offset) {
- case HAL_PRIME_CHNL_OFFSET_DONT_CARE:
- ptxdesc->data_sc = 0;
- break;
-
- case HAL_PRIME_CHNL_OFFSET_LOWER:
- ptxdesc->data_sc = 1;
- break;
-
- case HAL_PRIME_CHNL_OFFSET_UPPER:
- ptxdesc->data_sc = 2;
- break;
-
- default:
- ptxdesc->data_sc = 3; /* Duplicate */
- break;
- }
- }
-}
-
-static void rtl8723a_fill_default_txdesc(struct xmit_frame *pxmitframe,
- u8 *pbuf)
-{
- struct rtw_adapter *padapter;
- struct hal_data_8723a *pHalData;
- struct dm_priv *pdmpriv;
- struct mlme_ext_priv *pmlmeext;
- struct mlme_ext_info *pmlmeinfo;
- struct pkt_attrib *pattrib;
- struct txdesc_8723a *ptxdesc;
- s32 bmcst;
-
- padapter = pxmitframe->padapter;
- pHalData = GET_HAL_DATA(padapter);
- pdmpriv = &pHalData->dmpriv;
- pmlmeext = &padapter->mlmeextpriv;
- pmlmeinfo = &pmlmeext->mlmext_info;
-
- pattrib = &pxmitframe->attrib;
- bmcst = is_multicast_ether_addr(pattrib->ra);
-
- ptxdesc = (struct txdesc_8723a *)pbuf;
-
- if (pxmitframe->frame_tag == DATA_FRAMETAG) {
- ptxdesc->macid = pattrib->mac_id; /* CAM_ID(MAC_ID) */
-
- if (pattrib->ampdu_en == true)
- ptxdesc->agg_en = 1; /* AGG EN */
- else
- ptxdesc->bk = 1; /* AGG BK */
-
- ptxdesc->qsel = pattrib->qsel;
- ptxdesc->rate_id = pattrib->raid;
-
- fill_txdesc_sectype(pattrib, ptxdesc);
-
- ptxdesc->seq = pattrib->seqnum;
-
- if ((pattrib->ether_type != 0x888e) &&
- (pattrib->ether_type != 0x0806) &&
- (pattrib->dhcp_pkt != 1)) {
- /* Non EAP & ARP & DHCP type data packet */
-
- fill_txdesc_vcs(pattrib, ptxdesc);
- fill_txdesc_phy(pattrib, ptxdesc);
-
- ptxdesc->rtsrate = 8; /* RTS Rate = 24M */
- ptxdesc->data_ratefb_lmt = 0x1F;
- ptxdesc->rts_ratefb_lmt = 0xF;
-
- /* use REG_INIDATA_RATE_SEL value */
- ptxdesc->datarate =
- pdmpriv->INIDATA_RATE[pattrib->mac_id];
-
- } else {
- /* EAP data packet and ARP packet. */
- /* Use the 1M data rate to send the EAP/ARP packet. */
- /* This will maybe make the handshake smooth. */
-
- ptxdesc->bk = 1; /* AGG BK */
- ptxdesc->userate = 1; /* driver uses rate */
- if (pmlmeinfo->preamble_mode == PREAMBLE_SHORT)
- ptxdesc->data_short = 1;
- ptxdesc->datarate = MRateToHwRate23a(pmlmeext->tx_rate);
- }
- } else if (pxmitframe->frame_tag == MGNT_FRAMETAG) {
-/* RT_TRACE(_module_hal_xmit_c_, _drv_notice_,
- ("%s: MGNT_FRAMETAG\n", __func__)); */
-
- ptxdesc->macid = pattrib->mac_id; /* CAM_ID(MAC_ID) */
- ptxdesc->qsel = pattrib->qsel;
- ptxdesc->rate_id = pattrib->raid; /* Rate ID */
- ptxdesc->seq = pattrib->seqnum;
- ptxdesc->userate = 1; /* driver uses rate, 1M */
- ptxdesc->rty_lmt_en = 1; /* retry limit enable */
- ptxdesc->data_rt_lmt = 6; /* retry limit = 6 */
-
- /* CCX-TXRPT ack for xmit mgmt frames. */
- if (pxmitframe->ack_report)
- ptxdesc->ccx = 1;
-
- ptxdesc->datarate = MRateToHwRate23a(pmlmeext->tx_rate);
- } else if (pxmitframe->frame_tag == TXAGG_FRAMETAG) {
- RT_TRACE(_module_hal_xmit_c_, _drv_warning_,
- ("%s: TXAGG_FRAMETAG\n", __func__));
- } else {
- RT_TRACE(_module_hal_xmit_c_, _drv_warning_,
- ("%s: frame_tag = 0x%x\n", __func__,
- pxmitframe->frame_tag));
-
- ptxdesc->macid = 4; /* CAM_ID(MAC_ID) */
- ptxdesc->rate_id = 6; /* Rate ID */
- ptxdesc->seq = pattrib->seqnum;
- ptxdesc->userate = 1; /* driver uses rate */
- ptxdesc->datarate = MRateToHwRate23a(pmlmeext->tx_rate);
- }
-
- ptxdesc->pktlen = pattrib->last_txcmdsz;
- ptxdesc->offset = TXDESC_SIZE + OFFSET_SZ;
- if (bmcst)
- ptxdesc->bmc = 1;
- ptxdesc->ls = 1;
- ptxdesc->fs = 1;
- ptxdesc->own = 1;
-
- /* 2009.11.05. tynli_test. Suggested by SD4 Filen for FW LPS. */
- /* (1) The sequence number of each non-Qos frame / broadcast /
- * multicast / mgnt frame should be controled by Hw because Fw
- * will also send null data which we cannot control when Fw LPS enable.
- * --> default enable non-Qos data sequense number.
- 2010.06.23. by tynli. */
- /* (2) Enable HW SEQ control for beacon packet,
- * because we use Hw beacon. */
- /* (3) Use HW Qos SEQ to control the seq num of Ext port
- * non-Qos packets. */
- /* 2010.06.23. Added by tynli. */
- if (!pattrib->qos_en) {
- /* Hw set sequence number */
- ptxdesc->hwseq_en = 1; /* HWSEQ_EN */
- ptxdesc->hwseq_sel = 0; /* HWSEQ_SEL */
- }
-}
-
-/*
- * Description:
- *
- * Parameters:
- * pxmitframe xmitframe
- * pbuf where to fill tx desc
- */
-void rtl8723a_update_txdesc(struct xmit_frame *pxmitframe, u8 *pbuf)
-{
- struct tx_desc *pdesc;
-
- pdesc = (struct tx_desc *)pbuf;
- memset(pdesc, 0, sizeof(struct tx_desc));
-
- rtl8723a_fill_default_txdesc(pxmitframe, pbuf);
-
- pdesc->txdw0 = cpu_to_le32(pdesc->txdw0);
- pdesc->txdw1 = cpu_to_le32(pdesc->txdw1);
- pdesc->txdw2 = cpu_to_le32(pdesc->txdw2);
- pdesc->txdw3 = cpu_to_le32(pdesc->txdw3);
- pdesc->txdw4 = cpu_to_le32(pdesc->txdw4);
- pdesc->txdw5 = cpu_to_le32(pdesc->txdw5);
- pdesc->txdw6 = cpu_to_le32(pdesc->txdw6);
- pdesc->txdw7 = cpu_to_le32(pdesc->txdw7);
- rtl8723a_cal_txdesc_chksum(pdesc);
-}
-
/*
* Description: In normal chip, we should send some packet to Hw which
* will be used by Fw in FW LPS mode. The function is to fill the Tx
diff --git a/drivers/staging/rtl8723au/hal/rtl8723a_phycfg.c b/drivers/staging/rtl8723au/hal/rtl8723a_phycfg.c
index 3d4d7ec2750..88e91cd8ebb 100644
--- a/drivers/staging/rtl8723au/hal/rtl8723a_phycfg.c
+++ b/drivers/staging/rtl8723au/hal/rtl8723a_phycfg.c
@@ -418,7 +418,6 @@ PHY_SetRFReg(struct rtw_adapter *Adapter, enum RF_RADIO_PATH eRFPath,
*---------------------------------------------------------------------------*/
int PHY_MACConfig8723A(struct rtw_adapter *Adapter)
{
- int rtStatus = _SUCCESS;
struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
bool is92C = IS_92C_SERIAL(pHalData->VersionID);
@@ -433,7 +432,7 @@ int PHY_MACConfig8723A(struct rtw_adapter *Adapter)
if (is92C && (BOARD_USB_DONGLE == pHalData->BoardType))
rtl8723au_write8(Adapter, 0x40, 0x04);
- return rtStatus;
+ return _SUCCESS;
}
/**
diff --git a/drivers/staging/rtl8723au/hal/rtl8723a_rf6052.c b/drivers/staging/rtl8723au/hal/rtl8723a_rf6052.c
index 2dc0886e5f9..1aad4384471 100644
--- a/drivers/staging/rtl8723au/hal/rtl8723a_rf6052.c
+++ b/drivers/staging/rtl8723au/hal/rtl8723a_rf6052.c
@@ -42,21 +42,6 @@
#include <rtl8723a_hal.h>
#include <usb_ops_linux.h>
-/*---------------------------Define Local Constant---------------------------*/
-/* Define local structure for debug!!!!! */
-struct rf_shadow_compare_map {
- /* Shadow register value */
- u32 Value;
- /* Compare or not flag */
- u8 Compare;
- /* Record If it had ever modified unpredicted */
- u8 ErrorOrNot;
- /* Recorver Flag */
- u8 Recorver;
- /* */
- u8 Driver_Write;
-};
-
/*-----------------------------------------------------------------------------
* Function: PHY_RF6052SetBandwidth()
*
@@ -71,20 +56,23 @@ struct rf_shadow_compare_map {
*
* Note: For RF type 0222D
*---------------------------------------------------------------------------*/
-void rtl8723a_phy_rf6052set_bw(
- struct rtw_adapter *Adapter,
- enum ht_channel_width Bandwidth) /* 20M or 40M */
+void rtl8723a_phy_rf6052set_bw(struct rtw_adapter *Adapter,
+ enum ht_channel_width Bandwidth) /* 20M or 40M */
{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
switch (Bandwidth) {
case HT_CHANNEL_WIDTH_20:
- pHalData->RfRegChnlVal[0] = ((pHalData->RfRegChnlVal[0] & 0xfffff3ff) | 0x0400);
- PHY_SetRFReg(Adapter, RF_PATH_A, RF_CHNLBW, bRFRegOffsetMask, pHalData->RfRegChnlVal[0]);
+ pHalData->RfRegChnlVal[0] =
+ (pHalData->RfRegChnlVal[0] & 0xfffff3ff) | 0x0400;
+ PHY_SetRFReg(Adapter, RF_PATH_A, RF_CHNLBW, bRFRegOffsetMask,
+ pHalData->RfRegChnlVal[0]);
break;
case HT_CHANNEL_WIDTH_40:
- pHalData->RfRegChnlVal[0] = ((pHalData->RfRegChnlVal[0] & 0xfffff3ff));
- PHY_SetRFReg(Adapter, RF_PATH_A, RF_CHNLBW, bRFRegOffsetMask, pHalData->RfRegChnlVal[0]);
+ pHalData->RfRegChnlVal[0] =
+ (pHalData->RfRegChnlVal[0] & 0xfffff3ff);
+ PHY_SetRFReg(Adapter, RF_PATH_A, RF_CHNLBW, bRFRegOffsetMask,
+ pHalData->RfRegChnlVal[0]);
break;
default:
break;
@@ -108,7 +96,8 @@ void rtl8723a_phy_rf6052set_bw(
*
*---------------------------------------------------------------------------*/
-void rtl823a_phy_rf6052setccktxpower(struct rtw_adapter *Adapter, u8 *pPowerlevel)
+void rtl823a_phy_rf6052setccktxpower(struct rtw_adapter *Adapter,
+ u8 *pPowerlevel)
{
struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
struct dm_priv *pdmpriv = &pHalData->dmpriv;
@@ -118,7 +107,8 @@ void rtl823a_phy_rf6052setccktxpower(struct rtw_adapter *Adapter, u8 *pPowerleve
u8 idx1, idx2;
u8 *ptr;
- /* According to SD3 eechou's suggestion, we need to disable turbo scan for RU. */
+ /* According to SD3 eechou's suggestion, we need to disable
+ turbo scan for RU. */
/* Otherwise, external PA will be broken if power index > 0x20. */
if (pHalData->EEPROMRegulatory != 0 || pHalData->ExternalPA)
TurboScanOff = true;
@@ -131,29 +121,37 @@ void rtl823a_phy_rf6052setccktxpower(struct rtw_adapter *Adapter, u8 *pPowerleve
if (TurboScanOff) {
for (idx1 = RF_PATH_A; idx1 <= RF_PATH_B; idx1++) {
- TxAGC[idx1] =
- pPowerlevel[idx1] | (pPowerlevel[idx1]<<8) |
- (pPowerlevel[idx1]<<16) | (pPowerlevel[idx1]<<24);
- /* 2010/10/18 MH For external PA module. We need to limit power index to be less than 0x20. */
+ TxAGC[idx1] = pPowerlevel[idx1] |
+ (pPowerlevel[idx1] << 8) |
+ (pPowerlevel[idx1] << 16) |
+ (pPowerlevel[idx1] << 24);
+ /* 2010/10/18 MH For external PA module.
+ We need to limit power index to be less
+ than 0x20. */
if (TxAGC[idx1] > 0x20 && pHalData->ExternalPA)
TxAGC[idx1] = 0x20;
}
}
} else {
-/* 20100427 Joseph: Driver dynamic Tx power shall not affect Tx power. It shall be determined by power training mechanism. */
-/* Currently, we cannot fully disable driver dynamic tx power mechanism because it is referenced by BT coexist mechanism. */
-/* In the future, two mechanism shall be separated from each other and maintained independantly. Thanks for Lanhsin's reminder. */
+/* 20100427 Joseph: Driver dynamic Tx power shall not affect Tx
+ * power. It shall be determined by power training mechanism. */
+/* Currently, we cannot fully disable driver dynamic tx power
+ * mechanism because it is referenced by BT coexist mechanism. */
+/* In the future, two mechanism shall be separated from each other
+ * and maintained independantly. Thanks for Lanhsin's reminder. */
if (pdmpriv->DynamicTxHighPowerLvl == TxHighPwrLevel_Level1) {
TxAGC[RF_PATH_A] = 0x10101010;
TxAGC[RF_PATH_B] = 0x10101010;
- } else if (pdmpriv->DynamicTxHighPowerLvl == TxHighPwrLevel_Level2) {
+ } else if (pdmpriv->DynamicTxHighPowerLvl ==
+ TxHighPwrLevel_Level2) {
TxAGC[RF_PATH_A] = 0x00000000;
TxAGC[RF_PATH_B] = 0x00000000;
} else {
for (idx1 = RF_PATH_A; idx1 <= RF_PATH_B; idx1++) {
- TxAGC[idx1] =
- pPowerlevel[idx1] | (pPowerlevel[idx1]<<8) |
- (pPowerlevel[idx1]<<16) | (pPowerlevel[idx1]<<24);
+ TxAGC[idx1] = pPowerlevel[idx1] |
+ (pPowerlevel[idx1] << 8) |
+ (pPowerlevel[idx1] << 16) |
+ (pPowerlevel[idx1] << 24);
}
if (pHalData->EEPROMRegulatory == 0) {
@@ -178,29 +176,24 @@ void rtl823a_phy_rf6052setccktxpower(struct rtw_adapter *Adapter, u8 *pPowerleve
}
/* rf-A cck tx power */
- tmpval = TxAGC[RF_PATH_A]&0xff;
+ tmpval = TxAGC[RF_PATH_A] & 0xff;
PHY_SetBBReg(Adapter, rTxAGC_A_CCK1_Mcs32, bMaskByte1, tmpval);
- tmpval = TxAGC[RF_PATH_A]>>8;
+ tmpval = TxAGC[RF_PATH_A] >> 8;
PHY_SetBBReg(Adapter, rTxAGC_B_CCK11_A_CCK2_11, 0xffffff00, tmpval);
/* rf-B cck tx power */
- tmpval = TxAGC[RF_PATH_B]>>24;
+ tmpval = TxAGC[RF_PATH_B] >> 24;
PHY_SetBBReg(Adapter, rTxAGC_B_CCK11_A_CCK2_11, bMaskByte0, tmpval);
- tmpval = TxAGC[RF_PATH_B]&0x00ffffff;
+ tmpval = TxAGC[RF_PATH_B] & 0x00ffffff;
PHY_SetBBReg(Adapter, rTxAGC_B_CCK1_55_Mcs32, 0xffffff00, tmpval);
} /* PHY_RF6052SetCckTxPower */
/* powerbase0 for OFDM rates */
/* powerbase1 for HT MCS rates */
-static void getPowerBase(
- struct rtw_adapter *Adapter,
- u8 *pPowerLevel,
- u8 Channel,
- u32 *OfdmBase,
- u32 *MCSBase
- )
+static void getPowerBase(struct rtw_adapter *Adapter, u8 *pPowerLevel,
+ u8 Channel, u32 *OfdmBase, u32 *MCSBase)
{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
u32 powerBase0, powerBase1;
u8 Legacy_pwrdiff = 0;
s8 HT20_pwrdiff = 0;
@@ -211,8 +204,9 @@ static void getPowerBase(
Legacy_pwrdiff = pHalData->TxPwrLegacyHtDiff[i][Channel-1];
powerBase0 = powerlevel[i] + Legacy_pwrdiff;
- powerBase0 = (powerBase0<<24) | (powerBase0<<16) | (powerBase0<<8) | powerBase0;
- *(OfdmBase+i) = powerBase0;
+ powerBase0 = powerBase0 << 24 | powerBase0 << 16 |
+ powerBase0 << 8 | powerBase0;
+ *(OfdmBase + i) = powerBase0;
}
for (i = 0; i < 2; i++) {
@@ -222,36 +216,35 @@ static void getPowerBase(
powerlevel[i] += HT20_pwrdiff;
}
powerBase1 = powerlevel[i];
- powerBase1 = (powerBase1<<24) | (powerBase1<<16) | (powerBase1<<8) | powerBase1;
- *(MCSBase+i) = powerBase1;
+ powerBase1 = powerBase1 << 24 | powerBase1 << 16 |
+ powerBase1 << 8 | powerBase1;
+ *(MCSBase + i) = powerBase1;
}
}
-static void getTxPowerWriteValByRegulatory(
- struct rtw_adapter *Adapter,
- u8 Channel,
- u8 index,
- u32 *powerBase0,
- u32 *powerBase1,
- u32 *pOutWriteVal
- )
+static void
+getTxPowerWriteValByRegulatory(struct rtw_adapter *Adapter, u8 Channel,
+ u8 index, u32 *powerBase0, u32 *powerBase1,
+ u32 *pOutWriteVal)
{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
struct dm_priv *pdmpriv = &pHalData->dmpriv;
- u8 i, chnlGroup = 0, pwr_diff_limit[4];
- u32 writeVal, customer_limit, rf;
+ u8 i, chnlGroup = 0, pwr_diff_limit[4];
+ u32 writeVal, customer_limit, rf;
/* Index 0 & 1 = legacy OFDM, 2-5 = HT_MCS rate */
for (rf = 0; rf < 2; rf++) {
switch (pHalData->EEPROMRegulatory) {
case 0: /* Realtek better performance */
- /* increase power diff defined by Realtek for large power */
+ /* increase power diff defined by Realtek for
+ * large power */
chnlGroup = 0;
writeVal = pHalData->MCSTxPowerLevelOriginalOffset[chnlGroup][index+(rf?8:0)] +
((index < 2) ? powerBase0[rf] : powerBase1[rf]);
break;
case 1: /* Realtek regulatory */
- /* increase power diff defined by Realtek for regulatory */
+ /* increase power diff defined by Realtek for
+ * regulatory */
if (pHalData->pwrGroupCnt == 1)
chnlGroup = 0;
if (pHalData->pwrGroupCnt >= 3) {
@@ -262,17 +255,20 @@ static void getTxPowerWriteValByRegulatory(
else if (Channel > 9)
chnlGroup = 2;
- if (pHalData->CurrentChannelBW == HT_CHANNEL_WIDTH_20)
+ if (pHalData->CurrentChannelBW ==
+ HT_CHANNEL_WIDTH_20)
chnlGroup++;
else
chnlGroup += 4;
}
writeVal = pHalData->MCSTxPowerLevelOriginalOffset[chnlGroup][index+(rf?8:0)] +
- ((index < 2) ? powerBase0[rf] : powerBase1[rf]);
+ ((index < 2) ? powerBase0[rf] :
+ powerBase1[rf]);
break;
case 2: /* Better regulatory */
- /* don't increase any power diff */
- writeVal = ((index < 2) ? powerBase0[rf] : powerBase1[rf]);
+ /* don't increase any power diff */
+ writeVal = ((index < 2) ? powerBase0[rf] :
+ powerBase1[rf]);
break;
case 3: /* Customer defined power diff. */
chnlGroup = 0;
@@ -299,28 +295,34 @@ static void getTxPowerWriteValByRegulatory(
break;
}
-/* 20100427 Joseph: Driver dynamic Tx power shall not affect Tx power. It shall be determined by power training mechanism. */
-/* Currently, we cannot fully disable driver dynamic tx power mechanism because it is referenced by BT coexist mechanism. */
-/* In the future, two mechanism shall be separated from each other and maintained independantly. Thanks for Lanhsin's reminder. */
+/* 20100427 Joseph: Driver dynamic Tx power shall not affect Tx power.
+ It shall be determined by power training mechanism. */
+/* Currently, we cannot fully disable driver dynamic tx power mechanism
+ because it is referenced by BT coexist mechanism. */
+/* In the future, two mechanism shall be separated from each other and
+ maintained independantly. Thanks for Lanhsin's reminder. */
if (pdmpriv->DynamicTxHighPowerLvl == TxHighPwrLevel_Level1)
writeVal = 0x14141414;
- else if (pdmpriv->DynamicTxHighPowerLvl == TxHighPwrLevel_Level2)
+ else if (pdmpriv->DynamicTxHighPowerLvl ==
+ TxHighPwrLevel_Level2)
writeVal = 0x00000000;
- /* 20100628 Joseph: High power mode for BT-Coexist mechanism. */
- /* This mechanism is only applied when Driver-Highpower-Mechanism is OFF. */
+ /* 20100628 Joseph: High power mode for BT-Coexist mechanism. */
+ /* This mechanism is only applied when
+ Driver-Highpower-Mechanism is OFF. */
if (pdmpriv->DynamicTxHighPowerLvl == TxHighPwrLevel_BT1)
writeVal = writeVal - 0x06060606;
else if (pdmpriv->DynamicTxHighPowerLvl == TxHighPwrLevel_BT2)
writeVal = writeVal;
- *(pOutWriteVal+rf) = writeVal;
+ *(pOutWriteVal + rf) = writeVal;
}
}
-static void writeOFDMPowerReg(struct rtw_adapter *Adapter, u8 index, u32 *pValue)
+static void writeOFDMPowerReg(struct rtw_adapter *Adapter, u8 index,
+ u32 *pValue)
{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
u16 RegOffset_A[6] = {
rTxAGC_A_Rate18_06, rTxAGC_A_Rate54_24,
rTxAGC_A_Mcs03_Mcs00, rTxAGC_A_Mcs07_Mcs04,
@@ -338,12 +340,13 @@ static void writeOFDMPowerReg(struct rtw_adapter *Adapter, u8 index, u32 *pValue
for (rf = 0; rf < 2; rf++) {
writeVal = pValue[rf];
for (i = 0; i < 4; i++) {
- pwr_val[i] = (u8)((writeVal & (0x7f<<(i*8)))>>(i*8));
- if (pwr_val[i] > RF6052_MAX_TX_PWR)
+ pwr_val[i] = (u8)((writeVal &
+ (0x7f << (i * 8))) >> (i * 8));
+ if (pwr_val[i] > RF6052_MAX_TX_PWR)
pwr_val[i] = RF6052_MAX_TX_PWR;
}
- writeVal = (pwr_val[3]<<24) | (pwr_val[2]<<16) |
- (pwr_val[1]<<8) | pwr_val[0];
+ writeVal = pwr_val[3] << 24 | pwr_val[2] << 16 |
+ pwr_val[1] << 8 | pwr_val[0];
if (rf == 0)
RegOffset = RegOffset_A[index];
@@ -352,7 +355,8 @@ static void writeOFDMPowerReg(struct rtw_adapter *Adapter, u8 index, u32 *pValue
PHY_SetBBReg(Adapter, RegOffset, bMaskDWord, writeVal);
- /* 201005115 Joseph: Set Tx Power diff for Tx power training mechanism. */
+ /* 201005115 Joseph: Set Tx Power diff for Tx power
+ training mechanism. */
if (((pHalData->rf_type == RF_2T2R) &&
(RegOffset == rTxAGC_A_Mcs15_Mcs12 ||
RegOffset == rTxAGC_B_Mcs15_Mcs12)) ||
@@ -360,15 +364,19 @@ static void writeOFDMPowerReg(struct rtw_adapter *Adapter, u8 index, u32 *pValue
(RegOffset == rTxAGC_A_Mcs07_Mcs04 ||
RegOffset == rTxAGC_B_Mcs07_Mcs04))) {
writeVal = pwr_val[3];
- if (RegOffset == rTxAGC_A_Mcs15_Mcs12 || RegOffset == rTxAGC_A_Mcs07_Mcs04)
+ if (RegOffset == rTxAGC_A_Mcs15_Mcs12 ||
+ RegOffset == rTxAGC_A_Mcs07_Mcs04)
RegOffset = 0xc90;
- if (RegOffset == rTxAGC_B_Mcs15_Mcs12 || RegOffset == rTxAGC_B_Mcs07_Mcs04)
+ if (RegOffset == rTxAGC_B_Mcs15_Mcs12 ||
+ RegOffset == rTxAGC_B_Mcs07_Mcs04)
RegOffset = 0xc98;
for (i = 0; i < 3; i++) {
if (i != 2)
- writeVal = (writeVal > 8) ? (writeVal-8) : 0;
+ writeVal = (writeVal > 8) ?
+ (writeVal - 8) : 0;
else
- writeVal = (writeVal > 6) ? (writeVal-6) : 0;
+ writeVal = (writeVal > 6) ?
+ (writeVal - 6) : 0;
rtl8723au_write8(Adapter, RegOffset + i,
(u8)writeVal);
}
@@ -379,8 +387,9 @@ static void writeOFDMPowerReg(struct rtw_adapter *Adapter, u8 index, u32 *pValue
* Function: PHY_RF6052SetOFDMTxPower
*
* Overview: For legacy and HY OFDM, we must read EEPROM TX power index for
- * different channel and read original value in TX power register area from
- * 0xe00. We increase offset and original value to be correct tx pwr.
+ * different channel and read original value in TX power
+ * register area from 0xe00. We increase offset and
+ * original value to be correct tx pwr.
*
* Input: NONE
*
@@ -389,20 +398,23 @@ static void writeOFDMPowerReg(struct rtw_adapter *Adapter, u8 index, u32 *pValue
* Return: NONE
*
* Revised History:
- * When Who Remark
- * 11/05/2008 MHC Simulate 8192 series method.
- * 01/06/2009 MHC 1. Prevent Path B tx power overflow or underflow dure to
- * A/B pwr difference or legacy/HT pwr diff.
- * 2. We concern with path B legacy/HT OFDM difference.
- * 01/22/2009 MHC Support new EPRO format from SD3.
+ * When Remark
+ * 11/05/2008 MHC Simulate 8192 series method.
+ * 01/06/2009 MHC 1. Prevent Path B tx power overflow or
+ * underflow dure to A/B pwr difference or
+ * legacy/HT pwr diff.
+ * 2. We concern with path B legacy/HT OFDM difference.
+ * 01/22/2009 MHC Support new EPRO format from SD3.
*
*---------------------------------------------------------------------------*/
-void rtl8723a_PHY_RF6052SetOFDMTxPower(struct rtw_adapter *Adapter, u8 *pPowerLevel, u8 Channel)
+void rtl8723a_PHY_RF6052SetOFDMTxPower(struct rtw_adapter *Adapter,
+ u8 *pPowerLevel, u8 Channel)
{
u32 writeVal[2], powerBase0[2], powerBase1[2];
u8 index = 0;
- getPowerBase(Adapter, pPowerLevel, Channel, &powerBase0[0], &powerBase1[0]);
+ getPowerBase(Adapter, pPowerLevel, Channel,
+ &powerBase0[0], &powerBase1[0]);
for (index = 0; index < 6; index++) {
getTxPowerWriteValByRegulatory(Adapter, Channel, index,
@@ -416,7 +428,7 @@ static int phy_RF6052_Config_ParaFile(struct rtw_adapter *Adapter)
{
u32 u4RegValue = 0;
u8 eRFPath;
- struct bb_reg_define *pPhyReg;
+ struct bb_reg_define *pPhyReg;
int rtStatus = _SUCCESS;
struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
@@ -430,15 +442,17 @@ static int phy_RF6052_Config_ParaFile(struct rtw_adapter *Adapter)
/*----Store original RFENV control type----*/
switch (eRFPath) {
case RF_PATH_A:
- u4RegValue = PHY_QueryBBReg(Adapter, pPhyReg->rfintfs, bRFSI_RFENV);
+ u4RegValue = PHY_QueryBBReg(Adapter, pPhyReg->rfintfs,
+ bRFSI_RFENV);
break;
case RF_PATH_B:
- u4RegValue = PHY_QueryBBReg(Adapter, pPhyReg->rfintfs, bRFSI_RFENV<<16);
+ u4RegValue = PHY_QueryBBReg(Adapter, pPhyReg->rfintfs,
+ bRFSI_RFENV << 16);
break;
}
/*----Set RF_ENV enable----*/
- PHY_SetBBReg(Adapter, pPhyReg->rfintfe, bRFSI_RFENV<<16, 0x1);
+ PHY_SetBBReg(Adapter, pPhyReg->rfintfe, bRFSI_RFENV << 16, 0x1);
udelay(1);/* PlatformStallExecution(1); */
/*----Set RF_ENV output high----*/
@@ -446,10 +460,12 @@ static int phy_RF6052_Config_ParaFile(struct rtw_adapter *Adapter)
udelay(1);/* PlatformStallExecution(1); */
/* Set bit number of Address and Data for RF register */
- PHY_SetBBReg(Adapter, pPhyReg->rfHSSIPara2, b3WireAddressLength, 0x0); /* Set 1 to 4 bits for 8255 */
+ PHY_SetBBReg(Adapter, pPhyReg->rfHSSIPara2, b3WireAddressLength,
+ 0x0); /* Set 1 to 4 bits for 8255 */
udelay(1);/* PlatformStallExecution(1); */
- PHY_SetBBReg(Adapter, pPhyReg->rfHSSIPara2, b3WireDataLength, 0x0); /* Set 0 to 12 bits for 8255 */
+ PHY_SetBBReg(Adapter, pPhyReg->rfHSSIPara2, b3WireDataLength,
+ 0x0); /* Set 0 to 12 bits for 8255 */
udelay(1);/* PlatformStallExecution(1); */
/*----Initialize RF fom connfiguration file----*/
@@ -464,15 +480,16 @@ static int phy_RF6052_Config_ParaFile(struct rtw_adapter *Adapter)
/*----Restore RFENV control type----*/;
switch (eRFPath) {
case RF_PATH_A:
- PHY_SetBBReg(Adapter, pPhyReg->rfintfs, bRFSI_RFENV, u4RegValue);
+ PHY_SetBBReg(Adapter, pPhyReg->rfintfs,
+ bRFSI_RFENV, u4RegValue);
break;
case RF_PATH_B:
- PHY_SetBBReg(Adapter, pPhyReg->rfintfs, bRFSI_RFENV<<16, u4RegValue);
+ PHY_SetBBReg(Adapter, pPhyReg->rfintfs,
+ bRFSI_RFENV << 16, u4RegValue);
break;
}
if (rtStatus != _SUCCESS) {
- /* RT_TRACE(COMP_FPGA, DBG_LOUD, ("phy_RF6052_Config_ParaFile():Radio[%d] Fail!!", eRFPath)); */
goto phy_RF6052_Config_ParaFile_Fail;
}
}
diff --git a/drivers/staging/rtl8723au/hal/rtl8723a_xmit.c b/drivers/staging/rtl8723au/hal/rtl8723a_xmit.c
deleted file mode 100644
index 6ea2f9efef6..00000000000
--- a/drivers/staging/rtl8723au/hal/rtl8723a_xmit.c
+++ /dev/null
@@ -1,31 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-#define _RTL8723A_XMIT_C_
-
-#include <osdep_service.h>
-#include <drv_types.h>
-#include <rtl8723a_hal.h>
-
-void handle_txrpt_ccx_8723a(struct rtw_adapter *adapter, void *buf)
-{
- struct txrpt_ccx_8723a *txrpt_ccx = buf;
-
- if (txrpt_ccx->int_ccx) {
- if (txrpt_ccx->pkt_ok)
- rtw_ack_tx_done23a(&adapter->xmitpriv, RTW_SCTX_DONE_SUCCESS);
- else
- rtw_ack_tx_done23a(&adapter->xmitpriv, RTW_SCTX_DONE_CCX_PKT_FAIL);
- }
-}
diff --git a/drivers/staging/rtl8723au/hal/rtl8723au_led.c b/drivers/staging/rtl8723au/hal/rtl8723au_led.c
deleted file mode 100644
index b946636af9b..00000000000
--- a/drivers/staging/rtl8723au/hal/rtl8723au_led.c
+++ /dev/null
@@ -1,124 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-
-#include "drv_types.h"
-#include "rtl8723a_hal.h"
-#include "rtl8723a_led.h"
-#include "usb_ops_linux.h"
-
-/* */
-/* LED object. */
-/* */
-
-/* */
-/* Prototype of protected function. */
-/* */
-
-/* */
-/* LED_819xUsb routines. */
-/* */
-
-/* Description: */
-/* Turn on LED according to LedPin specified. */
-void SwLedOn23a(struct rtw_adapter *padapter, struct led_8723a *pLed)
-{
- u8 LedCfg = 0;
-
- if ((padapter->bSurpriseRemoved == true) || (padapter->bDriverStopped == true))
- return;
- switch (pLed->LedPin) {
- case LED_PIN_GPIO0:
- break;
- case LED_PIN_LED0:
- /* SW control led0 on. */
- rtl8723au_write8(padapter, REG_LEDCFG0,
- (LedCfg&0xf0)|BIT(5)|BIT(6));
- break;
- case LED_PIN_LED1:
- /* SW control led1 on. */
- rtl8723au_write8(padapter, REG_LEDCFG1, (LedCfg&0x00)|BIT(6));
- break;
- case LED_PIN_LED2:
- LedCfg = rtl8723au_read8(padapter, REG_LEDCFG2);
- /* SW control led1 on. */
- rtl8723au_write8(padapter, REG_LEDCFG2, (LedCfg&0x80)|BIT(5));
- break;
- default:
- break;
- }
- pLed->bLedOn = true;
-}
-
-/* Description: */
-/* Turn off LED according to LedPin specified. */
-void SwLedOff23a(struct rtw_adapter *padapter, struct led_8723a *pLed)
-{
- u8 LedCfg = 0;
- /* struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter); */
-
- if ((padapter->bSurpriseRemoved) || (padapter->bDriverStopped))
- goto exit;
-
- switch (pLed->LedPin) {
- case LED_PIN_GPIO0:
- break;
- case LED_PIN_LED0:
- /* SW control led0 on. */
- rtl8723au_write8(padapter, REG_LEDCFG0,
- (LedCfg&0xf0)|BIT(5)|BIT(6));
- break;
- case LED_PIN_LED1:
- /* SW control led1 on. */
- rtl8723au_write8(padapter, REG_LEDCFG1,
- (LedCfg&0x00)|BIT(5)|BIT(6));
- break;
- case LED_PIN_LED2:
- LedCfg = rtl8723au_read8(padapter, REG_LEDCFG2);
- /* SW control led1 on. */
- rtl8723au_write8(padapter, REG_LEDCFG2,
- (LedCfg&0x80)|BIT(3)|BIT(5));
- break;
- default:
- break;
- }
-exit:
- pLed->bLedOn = false;
-}
-
-/* Interface to manipulate LED objects. */
-
-/* Description: */
-/* Initialize all LED_871x objects. */
-void
-rtl8723au_InitSwLeds(struct rtw_adapter *padapter)
-{
- struct led_priv *pledpriv = &padapter->ledpriv;
-
- pledpriv->LedControlHandler = LedControl871x23a;
- /* 8723as-vau wifi used led2 */
- InitLed871x23a(padapter, &pledpriv->SwLed0, LED_PIN_LED2);
-
-/* InitLed871x23a(padapter,&pledpriv->SwLed1, LED_PIN_LED2); */
-}
-
-/* Description: */
-/* DeInitialize all LED_819xUsb objects. */
-void
-rtl8723au_DeInitSwLeds(struct rtw_adapter *padapter)
-{
- struct led_priv *ledpriv = &padapter->ledpriv;
-
- DeInitLed871x23a(&ledpriv->SwLed0);
-}
diff --git a/drivers/staging/rtl8723au/hal/rtl8723au_xmit.c b/drivers/staging/rtl8723au/hal/rtl8723au_xmit.c
index a67850fe6e5..6070510bb47 100644
--- a/drivers/staging/rtl8723au/hal/rtl8723au_xmit.c
+++ b/drivers/staging/rtl8723au/hal/rtl8723au_xmit.c
@@ -21,18 +21,6 @@
/* include <rtl8192c_hal.h> */
#include <rtl8723a_hal.h>
-static void do_queue_select(struct rtw_adapter *padapter, struct pkt_attrib *pattrib)
-{
- u8 qsel;
-
- qsel = pattrib->priority;
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_,
- ("### do_queue_select priority =%d , qsel = %d\n",
- pattrib->priority, qsel));
-
- pattrib->qsel = qsel;
-}
-
static int urb_zero_packet_chk(struct rtw_adapter *padapter, int sz)
{
int blnSetTxDescOffset;
@@ -163,7 +151,7 @@ static s32 update_txdesc(struct xmit_frame *pxmitframe, u8 *pmem, s32 sz, u8 bag
memset(ptxdesc, 0, sizeof(struct tx_desc));
- if ((pxmitframe->frame_tag&0x0f) == DATA_FRAMETAG) {
+ if (pxmitframe->frame_tag == DATA_FRAMETAG) {
/* offset 4 */
ptxdesc->txdw1 |= cpu_to_le32(pattrib->mac_id&0x1f);
@@ -215,7 +203,7 @@ static s32 update_txdesc(struct xmit_frame *pxmitframe, u8 *pmem, s32 sz, u8 bag
ptxdesc->txdw5 |= cpu_to_le32(MRateToHwRate23a(pmlmeext->tx_rate));
}
- } else if ((pxmitframe->frame_tag&0x0f) == MGNT_FRAMETAG) {
+ } else if (pxmitframe->frame_tag == MGNT_FRAMETAG) {
/* offset 4 */
ptxdesc->txdw1 |= cpu_to_le32(pattrib->mac_id&0x1f);
@@ -240,10 +228,11 @@ static s32 update_txdesc(struct xmit_frame *pxmitframe, u8 *pmem, s32 sz, u8 bag
ptxdesc->txdw5 |= cpu_to_le32(0x00180000);/* retry limit = 6 */
ptxdesc->txdw5 |= cpu_to_le32(MRateToHwRate23a(pmlmeext->tx_rate));
- } else if ((pxmitframe->frame_tag&0x0f) == TXAGG_FRAMETAG) {
+ } else if (pxmitframe->frame_tag == TXAGG_FRAMETAG) {
DBG_8723A("pxmitframe->frame_tag == TXAGG_FRAMETAG\n");
} else {
- DBG_8723A("pxmitframe->frame_tag = %d\n", pxmitframe->frame_tag);
+ DBG_8723A("pxmitframe->frame_tag = %d\n",
+ pxmitframe->frame_tag);
/* offset 4 */
ptxdesc->txdw1 |= cpu_to_le32((4)&0x1f);/* CAM_ID(MAC_ID) */
@@ -306,10 +295,10 @@ static int rtw_dump_xframe(struct rtw_adapter *padapter,
struct pkt_attrib *pattrib = &pxmitframe->attrib;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- if ((pxmitframe->frame_tag == DATA_FRAMETAG) &&
- (pxmitframe->attrib.ether_type != 0x0806) &&
- (pxmitframe->attrib.ether_type != 0x888e) &&
- (pxmitframe->attrib.dhcp_pkt != 1))
+ if (pxmitframe->frame_tag == DATA_FRAMETAG &&
+ pxmitframe->attrib.ether_type != ETH_P_ARP &&
+ pxmitframe->attrib.ether_type != ETH_P_PAE &&
+ pxmitframe->attrib.dhcp_pkt != 1)
rtw_issue_addbareq_cmd23a(padapter, pxmitframe);
mem_addr = pxmitframe->buf_addr;
@@ -392,7 +381,7 @@ bool rtl8723au_xmitframe_complete(struct rtw_adapter *padapter,
pxmitbuf->priv_data = pxmitframe;
- if ((pxmitframe->frame_tag&0x0f) == DATA_FRAMETAG) {
+ if (pxmitframe->frame_tag == DATA_FRAMETAG) {
if (pxmitframe->attrib.priority <= 15)/* TID0~15 */
res = rtw_xmitframe_coalesce23a(padapter, pxmitframe->pkt, pxmitframe);
@@ -440,7 +429,7 @@ bool rtl8723au_hal_xmit(struct rtw_adapter *padapter,
struct pkt_attrib *pattrib = &pxmitframe->attrib;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- do_queue_select(padapter, pattrib);
+ pattrib->qsel = pattrib->priority;
spin_lock_bh(&pxmitpriv->lock);
#ifdef CONFIG_8723AU_AP_MODE
diff --git a/drivers/staging/rtl8723au/hal/usb_halinit.c b/drivers/staging/rtl8723au/hal/usb_halinit.c
index a5de03d45aa..febe5cedef8 100644
--- a/drivers/staging/rtl8723au/hal/usb_halinit.c
+++ b/drivers/staging/rtl8723au/hal/usb_halinit.c
@@ -21,11 +21,13 @@
#include <HalPwrSeqCmd.h>
#include <Hal8723PwrSeq.h>
#include <rtl8723a_hal.h>
-#include <rtl8723a_led.h>
#include <linux/ieee80211.h>
#include <usb_ops.h>
+static void phy_SsPwrSwitch92CU(struct rtw_adapter *Adapter,
+ enum rt_rf_power_state eRFPowerState);
+
static void
_ConfigChipOutEP(struct rtw_adapter *pAdapter, u8 NumOutPipe)
{
@@ -61,42 +63,28 @@ _ConfigChipOutEP(struct rtw_adapter *pAdapter, u8 NumOutPipe)
(u32)NumOutPipe, (u32)pHalData->OutEpNumber)); */
}
-static bool rtl8723au_set_queue_pipe_mapping(struct rtw_adapter *pAdapter,
- u8 NumInPipe, u8 NumOutPipe)
+bool rtl8723au_chip_configure(struct rtw_adapter *padapter)
{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(pAdapter);
- bool result = false;
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+ struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(padapter);
+ u8 NumInPipe = pdvobjpriv->RtNumInPipes;
+ u8 NumOutPipe = pdvobjpriv->RtNumOutPipes;
- _ConfigChipOutEP(pAdapter, NumOutPipe);
+ _ConfigChipOutEP(padapter, NumOutPipe);
/* Normal chip with one IN and one OUT doesn't have interrupt IN EP. */
if (pHalData->OutEpNumber == 1) {
if (NumInPipe != 1)
- return result;
+ return false;
}
- result = Hal_MappingOutPipe23a(pAdapter, NumOutPipe);
-
- return result;
-}
-
-void rtl8723au_chip_configure(struct rtw_adapter *padapter)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
- struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(padapter);
-
- pHalData->interfaceIndex = pdvobjpriv->InterfaceNumber;
-
- rtl8723au_set_queue_pipe_mapping(padapter,
- pdvobjpriv->RtNumInPipes,
- pdvobjpriv->RtNumOutPipes);
+ return Hal_MappingOutPipe23a(padapter, NumOutPipe);
}
static int _InitPowerOn(struct rtw_adapter *padapter)
{
- int status = _SUCCESS;
- u16 value16 = 0;
- u8 value8 = 0;
+ u16 value16;
+ u8 value8;
/* RSV_CTRL 0x1C[7:0] = 0x00
unlock ISO/CLK/Power control register */
@@ -123,7 +111,7 @@ static int _InitPowerOn(struct rtw_adapter *padapter)
/* for Efuse PG, suggest by Jackie 2011.11.23 */
PHY_SetBBReg(padapter, REG_EFUSE_CTRL, BIT(28)|BIT(29)|BIT(30), 0x06);
- return status;
+ return _SUCCESS;
}
/* Shall USB interface init this? */
@@ -313,7 +301,7 @@ static void _InitNormalChipThreeOutEpPriority(struct rtw_adapter *Adapter)
_InitNormalChipRegPriority(Adapter, beQ, bkQ, viQ, voQ, mgtQ, hiQ);
}
-static void _InitNormalChipQueuePriority(struct rtw_adapter *Adapter)
+static void _InitQueuePriority(struct rtw_adapter *Adapter)
{
struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
@@ -333,11 +321,6 @@ static void _InitNormalChipQueuePriority(struct rtw_adapter *Adapter)
}
}
-static void _InitQueuePriority(struct rtw_adapter *Adapter)
-{
- _InitNormalChipQueuePriority(Adapter);
-}
-
static void _InitTransferPageSize(struct rtw_adapter *Adapter)
{
/* Tx page size is always 128. */
@@ -442,18 +425,6 @@ static void _InitEDCA(struct rtw_adapter *Adapter)
rtl8723au_write32(Adapter, REG_EDCA_VO_PARAM, 0x002FA226);
}
-static void _InitHWLed(struct rtw_adapter *Adapter)
-{
- struct led_priv *pledpriv = &Adapter->ledpriv;
-
- if (pledpriv->LedStrategy != HW_LED)
- return;
-
-/* HW led control */
-/* to do .... */
-/* must consider cases of antenna diversity/ commbo card/solo card/mini card */
-}
-
static void _InitRDGSetting(struct rtw_adapter *Adapter)
{
rtl8723au_write8(Adapter, REG_RD_CTRL, 0xFF);
@@ -480,7 +451,7 @@ static void _InitRFType(struct rtw_adapter *Adapter)
pHalData->rf_chip = RF_6052;
- if (is92CU == false) {
+ if (!is92CU) {
pHalData->rf_type = RF_1T1R;
DBG_8723A("Set RF Chip ID to RF_6052 and RF type to 1T1R.\n");
return;
@@ -527,23 +498,22 @@ enum rt_rf_power_state RfOnOffDetect23a(struct rtw_adapter *pAdapter)
return rfpowerstate;
}
-void _ps_open_RF23a(struct rtw_adapter *padapter);
-
int rtl8723au_hal_init(struct rtw_adapter *Adapter)
{
- u8 val8 = 0;
- u32 boundary;
- int status = _SUCCESS;
struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
struct pwrctrl_priv *pwrctrlpriv = &Adapter->pwrctrlpriv;
struct registry_priv *pregistrypriv = &Adapter->registrypriv;
+ u8 val8 = 0;
+ u32 boundary;
+ int status = _SUCCESS;
+ bool mac_on;
unsigned long init_start_time = jiffies;
Adapter->hw_init_completed = false;
if (Adapter->pwrctrlpriv.bkeepfwalive) {
- _ps_open_RF23a(Adapter);
+ phy_SsPwrSwitch92CU(Adapter, rf_on);
if (pHalData->bIQKInitialized) {
rtl8723a_phy_iq_calibrate(Adapter, true);
@@ -566,9 +536,9 @@ int rtl8723au_hal_init(struct rtw_adapter *Adapter)
/* 0x100 value of first mac is 0xEA while 0x100 value of secondary
is 0x00 */
if (val8 == 0xEA) {
- pHalData->bMACFuncEnable = false;
+ mac_on = false;
} else {
- pHalData->bMACFuncEnable = true;
+ mac_on = true;
RT_TRACE(_module_hci_hal_init_c_, _drv_info_,
("%s: MAC has already power on\n", __func__));
}
@@ -587,7 +557,7 @@ int rtl8723au_hal_init(struct rtw_adapter *Adapter)
boundary = WMM_NORMAL_TX_PAGE_BOUNDARY;
}
- if (!pHalData->bMACFuncEnable) {
+ if (!mac_on) {
status = InitLLTTable23a(Adapter, boundary);
if (status == _FAIL) {
RT_TRACE(_module_hci_hal_init_c_, _drv_err_,
@@ -673,7 +643,7 @@ int rtl8723au_hal_init(struct rtw_adapter *Adapter)
pHalData->RfRegChnlVal[0] = PHY_QueryRFReg(Adapter, (enum RF_RADIO_PATH)0, RF_CHNLBW, bRFRegOffsetMask);
pHalData->RfRegChnlVal[1] = PHY_QueryRFReg(Adapter, (enum RF_RADIO_PATH)1, RF_CHNLBW, bRFRegOffsetMask);
- if (!pHalData->bMACFuncEnable) {
+ if (!mac_on) {
_InitQueueReservedPage(Adapter);
_InitTxBufferBoundary(Adapter);
}
@@ -694,8 +664,6 @@ int rtl8723au_hal_init(struct rtw_adapter *Adapter)
_InitRetryFunction(Adapter);
rtl8723a_InitBeaconParameters(Adapter);
- _InitHWLed(Adapter);
-
_BBTurnOnBlock(Adapter);
/* NicIFSetMacAddress(padapter, padapter->PermanentAddress); */
@@ -806,266 +774,108 @@ exit:
}
static void phy_SsPwrSwitch92CU(struct rtw_adapter *Adapter,
- enum rt_rf_power_state eRFPowerState,
- int bRegSSPwrLvl)
+ enum rt_rf_power_state eRFPowerState)
{
struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
- u8 value8;
- u8 bytetmp;
+ u8 sps0;
+
+ sps0 = rtl8723au_read8(Adapter, REG_SPS0_CTRL);
switch (eRFPowerState) {
case rf_on:
- if (bRegSSPwrLvl == 1) {
- /* 1. Enable MAC Clock. Can not be enabled now. */
- /* WriteXBYTE(REG_SYS_CLKR+1,
- ReadXBYTE(REG_SYS_CLKR+1) | BIT(3)); */
-
- /* 2. Force PWM, Enable SPS18_LDO_Marco_Block */
- rtl8723au_write8(Adapter, REG_SPS0_CTRL,
- rtl8723au_read8(Adapter, REG_SPS0_CTRL) |
- BIT(0) | BIT(3));
-
- /* 3. restore BB, AFE control register. */
- /* RF */
- if (pHalData->rf_type == RF_2T2R)
- PHY_SetBBReg(Adapter, rFPGA0_XAB_RFParameter,
- 0x380038, 1);
- else
- PHY_SetBBReg(Adapter, rFPGA0_XAB_RFParameter,
- 0x38, 1);
- PHY_SetBBReg(Adapter, rOFDM0_TRxPathEnable, 0xf0, 1);
- PHY_SetBBReg(Adapter, rFPGA0_RFMOD, BIT(1), 0);
-
- /* AFE */
- if (pHalData->rf_type == RF_2T2R)
- PHY_SetBBReg(Adapter, rRx_Wait_CCA, bMaskDWord,
- 0x63DB25A0);
- else if (pHalData->rf_type == RF_1T1R)
- PHY_SetBBReg(Adapter, rRx_Wait_CCA, bMaskDWord,
- 0x631B25A0);
-
- /* 4. issue 3-wire command that RF set to Rx idle
- mode. This is used to re-write the RX idle mode. */
- /* We can only prvide a usual value instead and then
- HW will modify the value by itself. */
- PHY_SetRFReg(Adapter, RF_PATH_A, 0,
- bRFRegOffsetMask, 0x32D95);
- if (pHalData->rf_type == RF_2T2R) {
- PHY_SetRFReg(Adapter, RF_PATH_B, 0,
- bRFRegOffsetMask, 0x32D95);
- }
- } else { /* Level 2 or others. */
- /* h. AFE_PLL_CTRL 0x28[7:0] = 0x80
- disable AFE PLL */
- rtl8723au_write8(Adapter, REG_AFE_PLL_CTRL, 0x81);
-
- /* i. AFE_XTAL_CTRL 0x24[15:0] = 0x880F
- gated AFE DIG_CLOCK */
- rtl8723au_write16(Adapter, REG_AFE_XTAL_CTRL, 0x800F);
- mdelay(1);
-
- /* 2. Force PWM, Enable SPS18_LDO_Marco_Block */
- rtl8723au_write8(Adapter, REG_SPS0_CTRL,
- rtl8723au_read8(Adapter, REG_SPS0_CTRL) |
- BIT(0) | BIT(3));
-
- /* 3. restore BB, AFE control register. */
- /* RF */
- if (pHalData->rf_type == RF_2T2R)
- PHY_SetBBReg(Adapter, rFPGA0_XAB_RFParameter,
- 0x380038, 1);
- else
- PHY_SetBBReg(Adapter, rFPGA0_XAB_RFParameter,
- 0x38, 1);
- PHY_SetBBReg(Adapter, rOFDM0_TRxPathEnable, 0xf0, 1);
- PHY_SetBBReg(Adapter, rFPGA0_RFMOD, BIT(1), 0);
-
- /* AFE */
- if (pHalData->rf_type == RF_2T2R)
- PHY_SetBBReg(Adapter, rRx_Wait_CCA,
- bMaskDWord, 0x63DB25A0);
- else if (pHalData->rf_type == RF_1T1R)
- PHY_SetBBReg(Adapter, rRx_Wait_CCA,
- bMaskDWord, 0x631B25A0);
-
- /* 4. issue 3-wire command that RF set to Rx idle
- mode. This is used to re-write the RX idle mode. */
- /* We can only prvide a usual value instead and
- then HW will modify the value by itself. */
- PHY_SetRFReg(Adapter, RF_PATH_A, 0,
+ /* 1. Enable MAC Clock. Can not be enabled now. */
+ /* WriteXBYTE(REG_SYS_CLKR+1,
+ ReadXBYTE(REG_SYS_CLKR+1) | BIT(3)); */
+
+ /* 2. Force PWM, Enable SPS18_LDO_Marco_Block */
+ rtl8723au_write8(Adapter, REG_SPS0_CTRL,
+ sps0 | BIT(0) | BIT(3));
+
+ /* 3. restore BB, AFE control register. */
+ /* RF */
+ if (pHalData->rf_type == RF_2T2R)
+ PHY_SetBBReg(Adapter, rFPGA0_XAB_RFParameter,
+ 0x380038, 1);
+ else
+ PHY_SetBBReg(Adapter, rFPGA0_XAB_RFParameter,
+ 0x38, 1);
+ PHY_SetBBReg(Adapter, rOFDM0_TRxPathEnable, 0xf0, 1);
+ PHY_SetBBReg(Adapter, rFPGA0_RFMOD, BIT(1), 0);
+
+ /* AFE */
+ if (pHalData->rf_type == RF_2T2R)
+ PHY_SetBBReg(Adapter, rRx_Wait_CCA, bMaskDWord,
+ 0x63DB25A0);
+ else if (pHalData->rf_type == RF_1T1R)
+ PHY_SetBBReg(Adapter, rRx_Wait_CCA, bMaskDWord,
+ 0x631B25A0);
+
+ /* 4. issue 3-wire command that RF set to Rx idle
+ mode. This is used to re-write the RX idle mode. */
+ /* We can only prvide a usual value instead and then
+ HW will modify the value by itself. */
+ PHY_SetRFReg(Adapter, RF_PATH_A, 0, bRFRegOffsetMask, 0x32D95);
+ if (pHalData->rf_type == RF_2T2R) {
+ PHY_SetRFReg(Adapter, RF_PATH_B, 0,
bRFRegOffsetMask, 0x32D95);
- if (pHalData->rf_type == RF_2T2R) {
- PHY_SetRFReg(Adapter, RF_PATH_B, 0,
- bRFRegOffsetMask, 0x32D95);
- }
-
- /* 5. gated MAC Clock */
- bytetmp = rtl8723au_read8(Adapter, REG_APSD_CTRL);
- rtl8723au_write8(Adapter, REG_APSD_CTRL,
- bytetmp & ~BIT(6));
-
- mdelay(10);
-
- /* Set BB reset at first */
- /* 0x16 */
- rtl8723au_write8(Adapter, REG_SYS_FUNC_EN, 0x17);
-
- /* Enable TX */
- rtl8723au_write8(Adapter, REG_TXPAUSE, 0x0);
}
break;
case rf_sleep:
case rf_off:
- value8 = rtl8723au_read8(Adapter, REG_SPS0_CTRL);
if (IS_81xxC_VENDOR_UMC_B_CUT(pHalData->VersionID))
- value8 &= ~BIT(0);
+ sps0 &= ~BIT(0);
else
- value8 &= ~(BIT(0) | BIT(3));
- if (bRegSSPwrLvl == 1) {
- RT_TRACE(_module_hal_init_c_, _drv_err_, ("SS LVL1\n"));
- /* Disable RF and BB only for SelectSuspend. */
-
- /* 1. Set BB/RF to shutdown. */
- /* (1) Reg878[5:3]= 0 RF rx_code for
- preamble power saving */
- /* (2)Reg878[21:19]= 0 Turn off RF-B */
- /* (3) RegC04[7:4]= 0 Turn off all paths
- for packet detection */
- /* (4) Reg800[1] = 1 enable preamble power
- saving */
- Adapter->pwrctrlpriv.PS_BBRegBackup[PSBBREG_RF0] =
- PHY_QueryBBReg(Adapter, rFPGA0_XAB_RFParameter,
- bMaskDWord);
- Adapter->pwrctrlpriv.PS_BBRegBackup[PSBBREG_RF1] =
- PHY_QueryBBReg(Adapter, rOFDM0_TRxPathEnable,
- bMaskDWord);
- Adapter->pwrctrlpriv.PS_BBRegBackup[PSBBREG_RF2] =
- PHY_QueryBBReg(Adapter, rFPGA0_RFMOD,
- bMaskDWord);
- if (pHalData->rf_type == RF_2T2R) {
- PHY_SetBBReg(Adapter, rFPGA0_XAB_RFParameter,
- 0x380038, 0);
- } else if (pHalData->rf_type == RF_1T1R) {
- PHY_SetBBReg(Adapter, rFPGA0_XAB_RFParameter,
- 0x38, 0);
- }
- PHY_SetBBReg(Adapter, rOFDM0_TRxPathEnable, 0xf0, 0);
- PHY_SetBBReg(Adapter, rFPGA0_RFMOD, BIT(1), 1);
-
- /* 2 .AFE control register to power down. bit[30:22] */
- Adapter->pwrctrlpriv.PS_BBRegBackup[PSBBREG_AFE0] =
- PHY_QueryBBReg(Adapter, rRx_Wait_CCA,
- bMaskDWord);
- if (pHalData->rf_type == RF_2T2R)
- PHY_SetBBReg(Adapter, rRx_Wait_CCA, bMaskDWord,
- 0x00DB25A0);
- else if (pHalData->rf_type == RF_1T1R)
- PHY_SetBBReg(Adapter, rRx_Wait_CCA, bMaskDWord,
- 0x001B25A0);
-
- /* 3. issue 3-wire command that RF set to power down.*/
- PHY_SetRFReg(Adapter, RF_PATH_A, 0, bRFRegOffsetMask, 0);
- if (pHalData->rf_type == RF_2T2R)
- PHY_SetRFReg(Adapter, RF_PATH_B, 0,
- bRFRegOffsetMask, 0);
-
- /* 4. Force PFM , disable SPS18_LDO_Marco_Block */
- rtl8723au_write8(Adapter, REG_SPS0_CTRL, value8);
- } else { /* Level 2 or others. */
- RT_TRACE(_module_hal_init_c_, _drv_err_, ("SS LVL2\n"));
- {
- u8 eRFPath = RF_PATH_A, value8 = 0;
- rtl8723au_write8(Adapter, REG_TXPAUSE, 0xFF);
- PHY_SetRFReg(Adapter,
- (enum RF_RADIO_PATH)eRFPath,
- 0x0, bMaskByte0, 0x0);
- value8 |= APSDOFF;
- /* 0x40 */
- rtl8723au_write8(Adapter, REG_APSD_CTRL,
- value8);
-
- /* After switch APSD, we need to delay
- for stability */
- mdelay(10);
-
- /* Set BB reset at first */
- value8 = 0;
- value8 |= (FEN_USBD | FEN_USBA |
- FEN_BB_GLB_RSTn);
- /* 0x16 */
- rtl8723au_write8(Adapter, REG_SYS_FUNC_EN,
- value8);
- }
-
- /* Disable RF and BB only for SelectSuspend. */
-
- /* 1. Set BB/RF to shutdown. */
- /* (1) Reg878[5:3]= 0 RF rx_code for
- preamble power saving */
- /* (2)Reg878[21:19]= 0 Turn off RF-B */
- /* (3) RegC04[7:4]= 0 Turn off all paths for
- packet detection */
- /* (4) Reg800[1] = 1 enable preamble power
- saving */
- Adapter->pwrctrlpriv.PS_BBRegBackup[PSBBREG_RF0] =
- PHY_QueryBBReg(Adapter, rFPGA0_XAB_RFParameter,
- bMaskDWord);
- Adapter->pwrctrlpriv.PS_BBRegBackup[PSBBREG_RF1] =
- PHY_QueryBBReg(Adapter, rOFDM0_TRxPathEnable,
- bMaskDWord);
- Adapter->pwrctrlpriv.PS_BBRegBackup[PSBBREG_RF2] =
- PHY_QueryBBReg(Adapter, rFPGA0_RFMOD,
- bMaskDWord);
- if (pHalData->rf_type == RF_2T2R)
- PHY_SetBBReg(Adapter, rFPGA0_XAB_RFParameter,
- 0x380038, 0);
- else if (pHalData->rf_type == RF_1T1R)
- PHY_SetBBReg(Adapter, rFPGA0_XAB_RFParameter,
- 0x38, 0);
- PHY_SetBBReg(Adapter, rOFDM0_TRxPathEnable, 0xf0, 0);
- PHY_SetBBReg(Adapter, rFPGA0_RFMOD, BIT(1), 1);
-
- /* 2 .AFE control register to power down. bit[30:22] */
- Adapter->pwrctrlpriv.PS_BBRegBackup[PSBBREG_AFE0] =
- PHY_QueryBBReg(Adapter, rRx_Wait_CCA,
- bMaskDWord);
- if (pHalData->rf_type == RF_2T2R)
- PHY_SetBBReg(Adapter, rRx_Wait_CCA, bMaskDWord,
- 0x00DB25A0);
- else if (pHalData->rf_type == RF_1T1R)
- PHY_SetBBReg(Adapter, rRx_Wait_CCA, bMaskDWord,
- 0x001B25A0);
-
- /* 3. issue 3-wire command that RF set to power down. */
- PHY_SetRFReg(Adapter, RF_PATH_A, 0, bRFRegOffsetMask, 0);
- if (pHalData->rf_type == RF_2T2R)
- PHY_SetRFReg(Adapter, RF_PATH_B, 0,
- bRFRegOffsetMask, 0);
-
- /* 4. Force PFM , disable SPS18_LDO_Marco_Block */
- rtl8723au_write8(Adapter, REG_SPS0_CTRL, value8);
-
- /* 2010/10/13 MH/Isaachsu exchange sequence. */
- /* h. AFE_PLL_CTRL 0x28[7:0] = 0x80
- disable AFE PLL */
- rtl8723au_write8(Adapter, REG_AFE_PLL_CTRL, 0x80);
- mdelay(1);
-
- /* i. AFE_XTAL_CTRL 0x24[15:0] = 0x880F
- gated AFE DIG_CLOCK */
- rtl8723au_write16(Adapter, REG_AFE_XTAL_CTRL, 0xA80F);
+ sps0 &= ~(BIT(0) | BIT(3));
+
+ RT_TRACE(_module_hal_init_c_, _drv_err_, ("SS LVL1\n"));
+ /* Disable RF and BB only for SelectSuspend. */
+
+ /* 1. Set BB/RF to shutdown. */
+ /* (1) Reg878[5:3]= 0 RF rx_code for
+ preamble power saving */
+ /* (2)Reg878[21:19]= 0 Turn off RF-B */
+ /* (3) RegC04[7:4]= 0 Turn off all paths
+ for packet detection */
+ /* (4) Reg800[1] = 1 enable preamble power saving */
+ Adapter->pwrctrlpriv.PS_BBRegBackup[PSBBREG_RF0] =
+ PHY_QueryBBReg(Adapter, rFPGA0_XAB_RFParameter,
+ bMaskDWord);
+ Adapter->pwrctrlpriv.PS_BBRegBackup[PSBBREG_RF1] =
+ PHY_QueryBBReg(Adapter, rOFDM0_TRxPathEnable,
+ bMaskDWord);
+ Adapter->pwrctrlpriv.PS_BBRegBackup[PSBBREG_RF2] =
+ PHY_QueryBBReg(Adapter, rFPGA0_RFMOD, bMaskDWord);
+ if (pHalData->rf_type == RF_2T2R) {
+ PHY_SetBBReg(Adapter, rFPGA0_XAB_RFParameter,
+ 0x380038, 0);
+ } else if (pHalData->rf_type == RF_1T1R) {
+ PHY_SetBBReg(Adapter, rFPGA0_XAB_RFParameter, 0x38, 0);
}
+ PHY_SetBBReg(Adapter, rOFDM0_TRxPathEnable, 0xf0, 0);
+ PHY_SetBBReg(Adapter, rFPGA0_RFMOD, BIT(1), 1);
+
+ /* 2 .AFE control register to power down. bit[30:22] */
+ Adapter->pwrctrlpriv.PS_BBRegBackup[PSBBREG_AFE0] =
+ PHY_QueryBBReg(Adapter, rRx_Wait_CCA, bMaskDWord);
+ if (pHalData->rf_type == RF_2T2R)
+ PHY_SetBBReg(Adapter, rRx_Wait_CCA, bMaskDWord,
+ 0x00DB25A0);
+ else if (pHalData->rf_type == RF_1T1R)
+ PHY_SetBBReg(Adapter, rRx_Wait_CCA, bMaskDWord,
+ 0x001B25A0);
+
+ /* 3. issue 3-wire command that RF set to power down.*/
+ PHY_SetRFReg(Adapter, RF_PATH_A, 0, bRFRegOffsetMask, 0);
+ if (pHalData->rf_type == RF_2T2R)
+ PHY_SetRFReg(Adapter, RF_PATH_B, 0,
+ bRFRegOffsetMask, 0);
+
+ /* 4. Force PFM , disable SPS18_LDO_Marco_Block */
+ rtl8723au_write8(Adapter, REG_SPS0_CTRL, sps0);
break;
default:
break;
}
-
-} /* phy_PowerSwitch92CU */
-
-void _ps_open_RF23a(struct rtw_adapter *padapter)
-{
- /* here call with bRegSSPwrLvl 1, bRegSSPwrLvl 2 needs to be verified */
- phy_SsPwrSwitch92CU(padapter, rf_on, 1);
}
static void CardDisableRTL8723U(struct rtw_adapter *Adapter)
@@ -1142,8 +952,7 @@ int rtl8723au_inirp_init(struct rtw_adapter *Adapter)
/* issue Rx irp to receive data */
precvbuf = (struct recv_buf *)precvpriv->precv_buf;
for (i = 0; i < NR_RECVBUFF; i++) {
- if (rtl8723au_read_port(Adapter, RECV_BULK_IN_ADDR, 0,
- precvbuf) == _FAIL) {
+ if (rtl8723au_read_port(Adapter, 0, precvbuf) == _FAIL) {
RT_TRACE(_module_hci_hal_init_c_, _drv_err_,
("usb_rx_init: usb_read_port error\n"));
status = _FAIL;
@@ -1151,9 +960,9 @@ int rtl8723au_inirp_init(struct rtw_adapter *Adapter)
}
precvbuf++;
}
- if (rtl8723au_read_interrupt(Adapter, RECV_INT_IN_ADDR) == _FAIL) {
+ if (rtl8723au_read_interrupt(Adapter) == _FAIL) {
RT_TRACE(_module_hci_hal_init_c_, _drv_err_,
- ("usb_rx_init: usb_read_interrupt error\n"));
+ ("%s: usb_read_interrupt error\n", __func__));
status = _FAIL;
}
pHalData->IntrMask[0] = rtl8723au_read32(Adapter, REG_USB_HIMR);
@@ -1209,14 +1018,6 @@ static void _ReadBoardType(struct rtw_adapter *Adapter, u8 *PROMContent,
pHalData->ExternalPA = 1;
}
-static void _ReadLEDSetting(struct rtw_adapter *Adapter, u8 *PROMContent,
- bool AutoloadFail)
-{
- struct led_priv *pledpriv = &Adapter->ledpriv;
-
- pledpriv->LedStrategy = HW_LED;
-}
-
static void Hal_EfuseParseMACAddr_8723AU(struct rtw_adapter *padapter,
u8 *hwinfo, bool AutoLoadFail)
{
@@ -1263,7 +1064,6 @@ static void readAdapterInfo(struct rtw_adapter *padapter)
pEEPROM->bautoload_fail_flag);
Hal_EfuseParseThermalMeter_8723A(padapter, hwinfo,
pEEPROM->bautoload_fail_flag);
- _ReadLEDSetting(padapter, hwinfo, pEEPROM->bautoload_fail_flag);
/* _ReadRFSetting(Adapter, PROMContent, pEEPROM->bautoload_fail_flag); */
/* _ReadPSSetting(Adapter, PROMContent, pEEPROM->bautoload_fail_flag); */
Hal_EfuseParseAntennaDiversity(padapter, hwinfo,
@@ -1276,10 +1076,6 @@ static void readAdapterInfo(struct rtw_adapter *padapter)
pEEPROM->bautoload_fail_flag);
Hal_EfuseParseXtal_8723A(padapter, hwinfo,
pEEPROM->bautoload_fail_flag);
- /* */
- /* The following part initialize some vars by PG info. */
- /* */
- Hal_InitChannelPlan23a(padapter);
/* hal_CustomizedBehavior_8723U(Adapter); */
@@ -1311,13 +1107,6 @@ static void _ReadRFType(struct rtw_adapter *Adapter)
pHalData->rf_chip = RF_6052;
}
-static void _ReadSilmComboMode(struct rtw_adapter *Adapter)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
-
- pHalData->SlimComboDbg = false; /* Default is not debug mode. */
-}
-
/* */
/* Description: */
/* We should set Efuse cell selection to WiFi cell in default. */
@@ -1350,10 +1139,6 @@ void rtl8723a_read_adapter_info(struct rtw_adapter *Adapter)
_ReadRFType(Adapter);/* rf_chip -> _InitRFType() */
_ReadPROMContent(Adapter);
- /* 2010/10/25 MH THe function must be called after
- borad_type & IC-Version recognize. */
- _ReadSilmComboMode(Adapter);
-
/* MSG_8723A("%s()(done), rf_chip = 0x%x, rf_type = 0x%x\n",
__func__, pHalData->rf_chip, pHalData->rf_type); */
@@ -1417,17 +1202,17 @@ int GetHalDefVar8192CUsb(struct rtw_adapter *Adapter,
void rtl8723a_update_ramask(struct rtw_adapter *padapter,
u32 mac_id, u8 rssi_level)
{
- u8 init_rate = 0;
- u8 networkType, raid;
- u32 mask, rate_bitmap;
- u8 shortGIrate = false;
- int supportRateNum = 0;
struct sta_info *psta;
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
- struct dm_priv *pdmpriv = &pHalData->dmpriv;
+ struct FW_Sta_Info *fw_sta;
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+ struct dm_priv *pdmpriv = &pHalData->dmpriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
struct wlan_bssid_ex *cur_network = &pmlmeinfo->network;
+ u8 init_rate, networkType, raid;
+ u32 mask, rate_bitmap;
+ u8 shortGIrate = false;
+ int supportRateNum;
if (mac_id >= NUM_STA) /* CAM_SIZE */
return;
@@ -1456,8 +1241,8 @@ void rtl8723a_update_ramask(struct rtw_adapter *padapter,
break;
case 1:/* for broadcast/multicast */
- supportRateNum = rtw_get_rateset_len23a(
- pmlmeinfo->FW_sta_info[mac_id].SupportedRates);
+ fw_sta = &pmlmeinfo->FW_sta_info[mac_id];
+ supportRateNum = rtw_get_rateset_len23a(fw_sta->SupportedRates);
if (pmlmeext->cur_wireless_mode & WIRELESS_11B)
networkType = WIRELESS_11B;
else
@@ -1469,23 +1254,22 @@ void rtl8723a_update_ramask(struct rtw_adapter *padapter,
break;
default: /* for each sta in IBSS */
- supportRateNum = rtw_get_rateset_len23a(
- pmlmeinfo->FW_sta_info[mac_id].SupportedRates);
+ fw_sta = &pmlmeinfo->FW_sta_info[mac_id];
+ supportRateNum = rtw_get_rateset_len23a(fw_sta->SupportedRates);
networkType = judge_network_type23a(padapter,
- pmlmeinfo->FW_sta_info[mac_id].SupportedRates,
- supportRateNum) & 0xf;
+ fw_sta->SupportedRates,
+ supportRateNum) & 0xf;
/* pmlmeext->cur_wireless_mode = networkType; */
raid = networktype_to_raid23a(networkType);
mask = update_supported_rate23a(cur_network->SupportedRates,
- supportRateNum);
+ supportRateNum);
/* todo: support HT in IBSS */
break;
}
/* mask &= 0x0fffffff; */
- rate_bitmap = 0x0fffffff;
rate_bitmap = ODM_Get_Rate_Bitmap23a(pHalData, mac_id, mask,
rssi_level);
DBG_8723A("%s => mac_id:%d, networkType:0x%02x, "
@@ -1493,15 +1277,14 @@ void rtl8723a_update_ramask(struct rtw_adapter *padapter,
__func__, mac_id, networkType, mask, rssi_level, rate_bitmap);
mask &= rate_bitmap;
- mask |= ((raid<<28)&0xf0000000);
+ mask |= ((raid << 28) & 0xf0000000);
- init_rate = get_highest_rate_idx23a(mask)&0x3f;
+ init_rate = get_highest_rate_idx23a(mask) & 0x3f;
if (pHalData->fw_ractrl == true) {
u8 arg = 0;
- /* arg = (cam_idx-4)&0x1f;MACID */
- arg = mac_id&0x1f;/* MACID */
+ arg = mac_id & 0x1f;/* MACID */
arg |= BIT(7);
@@ -1516,7 +1299,7 @@ void rtl8723a_update_ramask(struct rtw_adapter *padapter,
if (shortGIrate == true)
init_rate |= BIT(6);
- rtl8723au_write8(padapter, (REG_INIDATA_RATE_SEL+mac_id),
+ rtl8723au_write8(padapter, (REG_INIDATA_RATE_SEL + mac_id),
init_rate);
}
diff --git a/drivers/staging/rtl8723au/hal/usb_ops_linux.c b/drivers/staging/rtl8723au/hal/usb_ops_linux.c
index c1b04c13c39..a6d16adce10 100644
--- a/drivers/staging/rtl8723au/hal/usb_ops_linux.c
+++ b/drivers/staging/rtl8723au/hal/usb_ops_linux.c
@@ -317,7 +317,7 @@ urb_submit:
}
}
-int rtl8723au_read_interrupt(struct rtw_adapter *adapter, u32 addr)
+int rtl8723au_read_interrupt(struct rtw_adapter *adapter)
{
int err;
unsigned int pipe;
@@ -545,8 +545,7 @@ static void usb_read_port_complete(struct urb *purb)
("usb_read_port_complete: (purb->actual_"
"length > MAX_RECVBUF_SZ) || (purb->actual_"
"length < RXDESC_SIZE)\n"));
- rtl8723au_read_port(padapter, RECV_BULK_IN_ADDR, 0,
- precvbuf);
+ rtl8723au_read_port(padapter, 0, precvbuf);
DBG_8723A("%s()-%d: RX Warning!\n",
__func__, __LINE__);
} else {
@@ -561,8 +560,7 @@ static void usb_read_port_complete(struct urb *purb)
tasklet_schedule(&precvpriv->recv_tasklet);
precvbuf->pskb = NULL;
- rtl8723au_read_port(padapter, RECV_BULK_IN_ADDR, 0,
- precvbuf);
+ rtl8723au_read_port(padapter, 0, precvbuf);
}
} else {
RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
@@ -596,8 +594,7 @@ static void usb_read_port_complete(struct urb *purb)
break;
case -EPROTO:
case -EOVERFLOW:
- rtl8723au_read_port(padapter, RECV_BULK_IN_ADDR, 0,
- precvbuf);
+ rtl8723au_read_port(padapter, 0, precvbuf);
break;
case -EINPROGRESS:
DBG_8723A("ERROR: URB IS IN PROGRESS!\n");
@@ -608,18 +605,18 @@ static void usb_read_port_complete(struct urb *purb)
}
}
-int rtl8723au_read_port(struct rtw_adapter *adapter, u32 addr, u32 cnt,
+int rtl8723au_read_port(struct rtw_adapter *adapter, u32 cnt,
struct recv_buf *precvbuf)
{
+ struct urb *purb;
+ struct dvobj_priv *pdvobj = adapter_to_dvobj(adapter);
+ struct recv_priv *precvpriv = &adapter->recvpriv;
+ struct usb_device *pusbd = pdvobj->pusbdev;
int err;
unsigned int pipe;
unsigned long tmpaddr;
unsigned long alignment;
int ret = _SUCCESS;
- struct urb *purb;
- struct dvobj_priv *pdvobj = adapter_to_dvobj(adapter);
- struct recv_priv *precvpriv = &adapter->recvpriv;
- struct usb_device *pusbd = pdvobj->pusbdev;
if (adapter->bDriverStopped || adapter->bSurpriseRemoved) {
RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
diff --git a/drivers/staging/rtl8723au/include/Hal8723UHWImg_CE.h b/drivers/staging/rtl8723au/include/Hal8723UHWImg_CE.h
index bbeaab48057..c834b3a738d 100644
--- a/drivers/staging/rtl8723au/include/Hal8723UHWImg_CE.h
+++ b/drivers/staging/rtl8723au/include/Hal8723UHWImg_CE.h
@@ -19,7 +19,7 @@ extern u8 Rtl8723UFwUMCBCutImgArrayWithoutBT[Rtl8723UUMCBCutImgArrayWithoutBTLen
extern const u8 Rtl8723SFwUMCBCutMPImgArray[Rtl8723SUMCBCutMPImgArrayLength];
#define Rtl8723EBTImgArrayLength 15276
-extern u8 Rtl8723EFwBTImgArray[Rtl8723EBTImgArrayLength] ;
+extern u8 Rtl8723EFwBTImgArray[Rtl8723EBTImgArrayLength];
#define Rtl8723UPHY_REG_Array_PGLength 336
extern u32 Rtl8723UPHY_REG_Array_PG[Rtl8723UPHY_REG_Array_PGLength];
diff --git a/drivers/staging/rtl8723au/include/drv_types.h b/drivers/staging/rtl8723au/include/drv_types.h
index 9870f87bdc7..e83463aeb9b 100644
--- a/drivers/staging/rtl8723au/include/drv_types.h
+++ b/drivers/staging/rtl8723au/include/drv_types.h
@@ -51,7 +51,6 @@ enum _NIC_VERSION {
#include <rtw_debug.h>
#include <rtw_rf.h>
#include <rtw_event.h>
-#include <rtw_led.h>
#include <rtw_mlme_ext.h>
#include <rtw_ap.h>
@@ -228,7 +227,6 @@ struct rtw_adapter {
struct registry_priv registrypriv;
struct pwrctrl_priv pwrctrlpriv;
struct eeprom_priv eeprompriv;
- struct led_priv ledpriv;
u32 setband;
diff --git a/drivers/staging/rtl8723au/include/odm_debug.h b/drivers/staging/rtl8723au/include/odm_debug.h
index 4d935a33ccb..83be5bab9e0 100644
--- a/drivers/staging/rtl8723au/include/odm_debug.h
+++ b/drivers/staging/rtl8723au/include/odm_debug.h
@@ -91,8 +91,7 @@
#define ODM_COMP_INIT BIT(31)
/*------------------------Export Macro Definition---------------------------*/
- #define DbgPrint printk
- #define RT_PRINTK(fmt, args...) DbgPrint("%s(): " fmt, __func__, ## args);
+ #define RT_PRINTK(fmt, args...) printk("%s(): " fmt, __func__, ## args);
#ifndef ASSERT
#define ASSERT(expr)
@@ -101,38 +100,17 @@
#define ODM_RT_TRACE(pDM_Odm, comp, level, fmt) \
if(((comp) & pDM_Odm->DebugComponents) && (level <= pDM_Odm->DebugLevel)) \
{ \
- DbgPrint("[ODM-8723A] "); \
- RT_PRINTK fmt; \
- }
-
-#define ODM_RT_TRACE_F(pDM_Odm, comp, level, fmt) \
- if(((comp) & pDM_Odm->DebugComponents) && (level <= pDM_Odm->DebugLevel)) \
- { \
+ printk("[ODM-8723A] "); \
RT_PRINTK fmt; \
}
#define ODM_RT_ASSERT(pDM_Odm, expr, fmt) \
if(!(expr)) { \
- DbgPrint("Assertion failed! %s at ......\n", #expr); \
- DbgPrint(" ......%s,%s,line=%d\n", __FILE__, __func__, __LINE__);\
+ printk("Assertion failed! %s at ......\n", #expr); \
+ printk(" ......%s,%s,line=%d\n", __FILE__, __func__, __LINE__);\
RT_PRINTK fmt; \
ASSERT(false); \
}
-#define ODM_dbg_enter() { DbgPrint("==> %s\n", __func__); }
-#define ODM_dbg_exit() { DbgPrint("<== %s\n", __func__); }
-#define ODM_dbg_trace(str) { DbgPrint("%s:%s\n", __func__, str); }
-
-#define ODM_PRINT_ADDR(pDM_Odm, comp, level, title_str, ptr) \
- if(((comp) & pDM_Odm->DebugComponents) && (level <= pDM_Odm->DebugLevel){ \
- int __i; \
- u8 * __ptr = (u8 *)ptr; \
- DbgPrint("[ODM] "); \
- DbgPrint(title_str); \
- DbgPrint(" "); \
- for (__i=0; __i < 6; __i++) \
- DbgPrint("%02X%s", __ptr[__i], (__i == 5) ? "" : "-"); \
- DbgPrint("\n"); \
- }
void ODM_InitDebugSetting23a(struct dm_odm_t *pDM_Odm);
diff --git a/drivers/staging/rtl8723au/include/rtl8723a_dm.h b/drivers/staging/rtl8723au/include/rtl8723a_dm.h
index 18112225e53..bf236e8e47a 100644
--- a/drivers/staging/rtl8723au/include/rtl8723a_dm.h
+++ b/drivers/staging/rtl8723au/include/rtl8723a_dm.h
@@ -37,8 +37,7 @@ enum{
#define IQK_BB_REG_NUM 9
#define HP_THERMAL_NUM 8
/* duplicate code,will move to ODM ######### */
-struct dm_priv
-{
+struct dm_priv {
u32 InitODMFlag;
/* Upper and Lower Signal threshold for Rate Adaptive*/
diff --git a/drivers/staging/rtl8723au/include/rtl8723a_hal.h b/drivers/staging/rtl8723au/include/rtl8723a_hal.h
index ee203a572cb..e14633678b5 100644
--- a/drivers/staging/rtl8723au/include/rtl8723a_hal.h
+++ b/drivers/staging/rtl8723au/include/rtl8723a_hal.h
@@ -372,16 +372,9 @@ struct hal_data_8723a {
/* 2010/08/09 MH Add CU power down mode. */
u8 pwrdown;
- /* Add for dual MAC 0--Mac0 1--Mac1 */
- u32 interfaceIndex;
-
u8 OutEpQueueSel;
u8 OutEpNumber;
- /* 2010/11/22 MH Add for slim combo debug mode selective. */
- /* This is used for fix the drawback of CU TSMC-A/UMC-A cut. HW auto suspend ability. Close BT clock. */
- bool SlimComboDbg;
-
/* */
/* Add For EEPROM Efuse switch and Efuse Shadow map Setting */
/* */
@@ -405,8 +398,6 @@ struct hal_data_8723a {
* 2011/02/23 MH Add for 8723 mylti function definition. The define should be moved to an */
/* independent file in the future. */
- bool bMACFuncEnable;
-
/* Interrupt related register information. */
u32 IntArray[2];
u32 IntrMask[2];
@@ -518,8 +509,6 @@ void Hal_EfuseParseRateIndicationOption(struct rtw_adapter *padapter, u8 *hwinfo
void Hal_EfuseParseXtal_8723A(struct rtw_adapter *pAdapter, u8 *hwinfo, u8 AutoLoadFail);
void Hal_EfuseParseThermalMeter_8723A(struct rtw_adapter *padapter, u8 *hwinfo, bool AutoLoadFail);
-void Hal_InitChannelPlan23a(struct rtw_adapter *padapter);
-
/* register */
void SetBcnCtrlReg23a(struct rtw_adapter *padapter, u8 SetBits, u8 ClearBits);
void rtl8723a_InitBeaconParameters(struct rtw_adapter *padapter);
diff --git a/drivers/staging/rtl8723au/include/rtl8723a_led.h b/drivers/staging/rtl8723au/include/rtl8723a_led.h
deleted file mode 100644
index 1623d186feb..00000000000
--- a/drivers/staging/rtl8723au/include/rtl8723a_led.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-#ifndef __RTL8723A_LED_H__
-#define __RTL8723A_LED_H__
-
-#include <osdep_service.h>
-#include <drv_types.h>
-
-
-/* */
-/* Interface to manipulate LED objects. */
-/* */
-void rtl8723au_InitSwLeds(struct rtw_adapter *padapter);
-void rtl8723au_DeInitSwLeds(struct rtw_adapter *padapter);
-void SwLedOn23a(struct rtw_adapter *padapter, struct led_8723a * pLed);
-void SwLedOff23a(struct rtw_adapter *padapter, struct led_8723a * pLed);
-
-#endif
diff --git a/drivers/staging/rtl8723au/include/rtl8723a_recv.h b/drivers/staging/rtl8723au/include/rtl8723a_recv.h
index 885d4d3859f..0177bbc1c1c 100644
--- a/drivers/staging/rtl8723au/include/rtl8723a_recv.h
+++ b/drivers/staging/rtl8723au/include/rtl8723a_recv.h
@@ -28,15 +28,11 @@
#define MAX_RECVBUF_SZ 15360 /* 15k < 16k */
-#define RECV_BULK_IN_ADDR 0x80
-#define RECV_INT_IN_ADDR 0x81
-
#define PHY_RSSI_SLID_WIN_MAX 100
#define PHY_LINKQUALITY_SLID_WIN_MAX 20
-struct phy_stat
-{
+struct phy_stat {
unsigned int phydw0;
unsigned int phydw1;
unsigned int phydw2;
diff --git a/drivers/staging/rtl8723au/include/rtl8723a_xmit.h b/drivers/staging/rtl8723au/include/rtl8723a_xmit.h
index 815560c6e1d..7db29f40ab7 100644
--- a/drivers/staging/rtl8723au/include/rtl8723a_xmit.h
+++ b/drivers/staging/rtl8723au/include/rtl8723a_xmit.h
@@ -212,7 +212,6 @@ struct txrpt_ccx_8723a {
#define txrpt_ccx_qtime_8723a(txrpt_ccx) ((txrpt_ccx)->ccx_qtime0+((txrpt_ccx)->ccx_qtime1<<8))
void handle_txrpt_ccx_8723a(struct rtw_adapter *adapter, void *buf);
-void rtl8723a_update_txdesc(struct xmit_frame *pxmitframe, u8 *pmem);
void rtl8723a_fill_fake_txdesc(struct rtw_adapter *padapter, u8 *pDesc, u32 BufferLen, u8 IsPsPoll, u8 IsBTQosNull);
int rtl8723au_hal_xmitframe_enqueue(struct rtw_adapter *padapter, struct xmit_frame *pxmitframe);
diff --git a/drivers/staging/rtl8723au/include/rtw_cmd.h b/drivers/staging/rtl8723au/include/rtw_cmd.h
index ef67068a5fe..71044107d13 100644
--- a/drivers/staging/rtl8723au/include/rtw_cmd.h
+++ b/drivers/staging/rtl8723au/include/rtw_cmd.h
@@ -17,7 +17,6 @@
#include <wlan_bssdef.h>
#include <rtw_rf.h>
-#include <rtw_led.h>
#define C2H_MEM_SZ (16*1024)
@@ -450,8 +449,7 @@ struct getrfintfs_parm {
u8 rfintfs;
};
-struct Tx_Beacon_param
-{
+struct Tx_Beacon_param {
struct wlan_bssid_ex network;
};
diff --git a/drivers/staging/rtl8723au/include/rtw_ht.h b/drivers/staging/rtl8723au/include/rtw_ht.h
index cfc947daf08..780eb894411 100644
--- a/drivers/staging/rtl8723au/include/rtw_ht.h
+++ b/drivers/staging/rtl8723au/include/rtw_ht.h
@@ -19,8 +19,7 @@
#include "linux/ieee80211.h"
#include "wifi.h"
-struct ht_priv
-{
+struct ht_priv {
bool ht_option;
bool ampdu_enable;/* for enable Tx A-MPDU */
/* u8 baddbareq_issued[16]; */
diff --git a/drivers/staging/rtl8723au/include/rtw_led.h b/drivers/staging/rtl8723au/include/rtw_led.h
deleted file mode 100644
index c071da587ef..00000000000
--- a/drivers/staging/rtl8723au/include/rtw_led.h
+++ /dev/null
@@ -1,181 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-#ifndef __RTW_LED_H_
-#define __RTW_LED_H_
-
-#include <osdep_service.h>
-#include <drv_types.h>
-
-#define MSECS(t) (HZ * ((t) / 1000) + (HZ * ((t) % 1000)) / 1000)
-
-#define LED_BLINK_NORMAL_INTERVAL 100
-#define LED_BLINK_SLOWLY_INTERVAL 200
-#define LED_BLINK_LONG_INTERVAL 400
-
-#define LED_BLINK_NO_LINK_INTERVAL_ALPHA 1000
-#define LED_BLINK_LINK_INTERVAL_ALPHA 500 /* 500 */
-#define LED_BLINK_SCAN_INTERVAL_ALPHA 180 /* 150 */
-#define LED_BLINK_FASTER_INTERVAL_ALPHA 50
-#define LED_BLINK_WPS_SUCESS_INTERVAL_ALPHA 5000
-
-#define LED_BLINK_NORMAL_INTERVAL_NETTRONIX 100
-#define LED_BLINK_SLOWLY_INTERVAL_NETTRONIX 2000
-
-#define LED_BLINK_SLOWLY_INTERVAL_PORNET 1000
-#define LED_BLINK_NORMAL_INTERVAL_PORNET 100
-
-#define LED_BLINK_FAST_INTERVAL_BITLAND 30
-
-/* 060403, rcnjko: Customized for AzWave. */
-#define LED_CM2_BLINK_ON_INTERVAL 250
-#define LED_CM2_BLINK_OFF_INTERVAL 4750
-
-#define LED_CM8_BLINK_INTERVAL 500 /* for QMI */
-#define LED_CM8_BLINK_OFF_INTERVAL 3750 /* for QMI */
-
-/* 080124, lanhsin: Customized for RunTop */
-#define LED_RunTop_BLINK_INTERVAL 300
-
-/* 060421, rcnjko: Customized for Sercomm Printer Server case. */
-#define LED_CM3_BLINK_INTERVAL 1500
-
-enum led_ctl_mode {
- LED_CTL_POWER_ON = 1,
- LED_CTL_LINK = 2,
- LED_CTL_NO_LINK = 3,
- LED_CTL_TX = 4,
- LED_CTL_RX = 5,
- LED_CTL_SITE_SURVEY = 6,
- LED_CTL_POWER_OFF = 7,
- LED_CTL_START_TO_LINK = 8,
- LED_CTL_START_WPS = 9,
- LED_CTL_STOP_WPS = 10,
- LED_CTL_START_WPS_BOTTON = 11, /* added for runtop */
- LED_CTL_STOP_WPS_FAIL = 12, /* added for ALPHA */
- LED_CTL_STOP_WPS_FAIL_OVERLAP = 13, /* added for BELKIN */
- LED_CTL_CONNECTION_NO_TRANSFER = 14,
-};
-
-enum led_state_872x {
- LED_UNKNOWN = 0,
- RTW_LED_ON = 1,
- RTW_LED_OFF = 2,
- LED_BLINK_NORMAL = 3,
- LED_BLINK_SLOWLY = 4,
- LED_BLINK_POWER_ON = 5,
- LED_BLINK_SCAN = 6, /* LED is blinking during scanning period, the # of times to blink is depend on time for scanning. */
- LED_BLINK_NO_LINK = 7, /* LED is blinking during no link state. */
- LED_BLINK_StartToBlink = 8,/* Customzied for Sercomm Printer Server case */
- LED_BLINK_TXRX = 9,
- LED_BLINK_WPS = 10, /* LED is blinkg during WPS communication */
- LED_BLINK_WPS_STOP = 11, /* for ALPHA */
- LED_BLINK_WPS_STOP_OVERLAP = 12, /* for BELKIN */
- LED_BLINK_RUNTOP = 13, /* Customized for RunTop */
- LED_BLINK_CAMEO = 14,
- LED_BLINK_XAVI = 15,
- LED_BLINK_ALWAYS_ON = 16,
-};
-
-enum led_pin_8723a {
- LED_PIN_NULL = 0,
- LED_PIN_LED0 = 1,
- LED_PIN_LED1 = 2,
- LED_PIN_LED2 = 3,
- LED_PIN_GPIO0 = 4,
-};
-
-struct led_8723a {
- struct rtw_adapter *padapter;
-
- enum led_pin_8723a LedPin; /* Identify how to implement this SW led. */
- enum led_state_872x CurrLedState; /* Current LED state. */
- enum led_state_872x BlinkingLedState; /* Next state for blinking, either RTW_LED_ON or RTW_LED_OFF are. */
-
- u8 bLedOn; /* true if LED is ON, false if LED is OFF. */
-
- u8 bLedBlinkInProgress; /* true if it is blinking, false o.w.. */
-
- u8 bLedWPSBlinkInProgress;
-
- u32 BlinkTimes; /* Number of times to toggle led state for blinking. */
-
- struct timer_list BlinkTimer; /* Timer object for led blinking. */
-
- u8 bSWLedCtrl;
-
- /* ALPHA, added by chiyoko, 20090106 */
- u8 bLedNoLinkBlinkInProgress;
- u8 bLedLinkBlinkInProgress;
- u8 bLedStartToLinkBlinkInProgress;
- u8 bLedScanBlinkInProgress;
-
- struct work_struct BlinkWorkItem; /* Workitem used by BlinkTimer to manipulate H/W to blink LED. */
-};
-
-#define IS_LED_WPS_BLINKING(_LED_871x) (((struct led_8723a *)_LED_871x)->CurrLedState==LED_BLINK_WPS \
- || ((struct led_8723a *)_LED_871x)->CurrLedState==LED_BLINK_WPS_STOP \
- || ((struct led_8723a *)_LED_871x)->bLedWPSBlinkInProgress)
-
-#define IS_LED_BLINKING(_LED_871x) (((struct led_8723a *)_LED_871x)->bLedWPSBlinkInProgress \
- ||((struct led_8723a *)_LED_871x)->bLedScanBlinkInProgress)
-
-/* */
-/* LED customization. */
-/* */
-
-enum led_strategy_8723a {
- SW_LED_MODE0 = 0, /* SW control 1 LED via GPIO0. It is default option. */
- SW_LED_MODE1= 1, /* 2 LEDs, through LED0 and LED1. For ALPHA. */
- SW_LED_MODE2 = 2, /* SW control 1 LED via GPIO0, customized for AzWave 8187 minicard. */
- SW_LED_MODE3 = 3, /* SW control 1 LED via GPIO0, customized for Sercomm Printer Server case. */
- SW_LED_MODE4 = 4, /* for Edimax / Belkin */
- SW_LED_MODE5 = 5, /* for Sercomm / Belkin */
- SW_LED_MODE6 = 6, /* for 88CU minicard, porting from ce SW_LED_MODE7 */
- HW_LED = 50, /* HW control 2 LEDs, LED0 and LED1 (there are 4 different control modes, see MAC.CONFIG1 for details.) */
- LED_ST_NONE = 99,
-};
-
-void LedControl871x23a(struct rtw_adapter *padapter, enum led_ctl_mode LedAction);
-
-struct led_priv{
- /* add for led controll */
- struct led_8723a SwLed0;
- struct led_8723a SwLed1;
- enum led_strategy_8723a LedStrategy;
- u8 bRegUseLed;
- void (*LedControlHandler)(struct rtw_adapter *padapter, enum led_ctl_mode LedAction);
- /* add for led controll */
-};
-
-#define rtw_led_control(adapter, LedAction)
-
-void BlinkWorkItemCallback23a(struct work_struct *work);
-
-void ResetLedStatus23a(struct led_8723a *pLed);
-
-void
-InitLed871x23a(
- struct rtw_adapter *padapter,
- struct led_8723a *pLed,
- enum led_pin_8723a LedPin
-);
-
-void
-DeInitLed871x23a(struct led_8723a *pLed);
-
-/* hal... */
-void BlinkHandler23a(struct led_8723a *pLed);
-
-#endif /* __RTW_LED_H_ */
diff --git a/drivers/staging/rtl8723au/include/rtw_mlme.h b/drivers/staging/rtl8723au/include/rtw_mlme.h
index 2ff01eb8fc0..a6751f13833 100644
--- a/drivers/staging/rtl8723au/include/rtw_mlme.h
+++ b/drivers/staging/rtl8723au/include/rtw_mlme.h
@@ -270,7 +270,7 @@ static inline void _clr_fwstate_(struct mlme_priv *pmlmepriv, int state)
static inline void clr_fwstate(struct mlme_priv *pmlmepriv, int state)
{
spin_lock_bh(&pmlmepriv->lock);
- if (check_fwstate(pmlmepriv, state) == true)
+ if (check_fwstate(pmlmepriv, state))
pmlmepriv->fw_state ^= state;
spin_unlock_bh(&pmlmepriv->lock);
}
diff --git a/drivers/staging/rtl8723au/include/rtw_mlme_ext.h b/drivers/staging/rtl8723au/include/rtw_mlme_ext.h
index 97c3c4478f2..51dba1fa4c5 100644
--- a/drivers/staging/rtl8723au/include/rtw_mlme_ext.h
+++ b/drivers/staging/rtl8723au/include/rtw_mlme_ext.h
@@ -270,8 +270,7 @@ struct action_handler {
int (*func)(struct rtw_adapter *padapter, struct recv_frame *precv_frame);
};
-struct ss_res
-{
+struct ss_res {
int state;
int bss_cnt;
int channel_idx;
@@ -318,8 +317,7 @@ struct FW_Sta_Info {
* 5. ... and so on, till survey done.
*/
-struct mlme_ext_info
-{
+struct mlme_ext_info {
u32 state;
u32 reauth_count;
u32 reassoc_count;
diff --git a/drivers/staging/rtl8723au/include/rtw_recv.h b/drivers/staging/rtl8723au/include/rtw_recv.h
index f846bb5e7ab..dc784be3ddd 100644
--- a/drivers/staging/rtl8723au/include/rtw_recv.h
+++ b/drivers/staging/rtl8723au/include/rtw_recv.h
@@ -200,9 +200,6 @@ struct recv_priv {
u8 *precv_buf;
/* For display the phy informatiom */
- u8 is_signal_dbg; /* for debug */
- u8 signal_strength_dbg; /* for debug */
- s8 rssi;
s8 rxpwdb;
u8 signal_strength;
u8 signal_qual;
diff --git a/drivers/staging/rtl8723au/include/rtw_xmit.h b/drivers/staging/rtl8723au/include/rtw_xmit.h
index 32a84417032..2b7d6d08238 100644
--- a/drivers/staging/rtl8723au/include/rtw_xmit.h
+++ b/drivers/staging/rtl8723au/include/rtw_xmit.h
@@ -196,7 +196,6 @@ enum {
void rtw_sctx_init23a(struct submit_ctx *sctx, int timeout_ms);
int rtw_sctx_wait23a(struct submit_ctx *sctx);
void rtw23a_sctx_done_err(struct submit_ctx **sctx, int status);
-void rtw_sctx_done23a(struct submit_ctx **sctx);
struct xmit_buf {
struct list_head list, list2;
@@ -295,10 +294,6 @@ struct xmit_priv {
struct rtw_adapter *adapter;
- u8 vcs_setting;
- u8 vcs;
- u8 vcs_type;
-
u64 tx_bytes;
u64 tx_pkts;
u64 tx_drop;
@@ -307,6 +302,8 @@ struct xmit_priv {
struct hw_xmit *hwxmits;
u8 hwxmit_entry;
+ u8 vcs;
+ u8 nqos_ssn;
u8 wmm_para_seq[4];/* sequence for wmm ac parameter strength from
* large to small. it's value is 0->vo, 1->vi,
@@ -314,14 +311,8 @@ struct xmit_priv {
*/
struct semaphore tx_retevt;/* all tx return event; */
- u8 txirp_cnt;/* */
struct tasklet_struct xmit_tasklet;
- /* per AC pending irp */
- int beq_cnt;
- int bkq_cnt;
- int viq_cnt;
- int voq_cnt;
struct rtw_queue free_xmitbuf_queue;
struct list_head xmitbuf_list; /* track buffers for cleanup */
@@ -332,7 +323,6 @@ struct xmit_priv {
struct list_head xmitextbuf_list; /* track buffers for cleanup */
uint free_xmit_extbuf_cnt;
- u16 nqos_ssn;
int ack_tx;
struct mutex ack_tx_mutex;
struct submit_ctx ack_tx_ops;
@@ -349,7 +339,6 @@ s32 rtw_free_xmitbuf23a(struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf);
void rtw_count_tx_stats23a(struct rtw_adapter *padapter,
struct xmit_frame *pxmitframe, int sz);
void rtw_update_protection23a(struct rtw_adapter *padapter, u8 *ie, uint ie_len);
-s32 rtw_put_snap23a(u8 *data, u16 h_proto);
struct xmit_frame *rtw_alloc_xmitframe23a_ext(struct xmit_priv *pxmitpriv);
struct xmit_frame *rtw_alloc_xmitframe23a_once(struct xmit_priv *pxmitpriv);
s32 rtw_free_xmitframe23a(struct xmit_priv *pxmitpriv,
@@ -363,8 +352,6 @@ struct xmit_frame *rtw_dequeue_xframe23a(struct xmit_priv *pxmitpriv,
struct hw_xmit *phwxmit_i, int entry);
s32 rtw_xmit23a_classifier(struct rtw_adapter *padapter,
struct xmit_frame *pxmitframe);
-u32 rtw_calculate_wlan_pkt_size_by_attribue23a(struct pkt_attrib *pattrib);
-#define rtw_wlan_pkt_size(f) rtw_calculate_wlan_pkt_size_by_attribue23a(&f->attrib)
s32 rtw_xmitframe_coalesce23a(struct rtw_adapter *padapter, struct sk_buff *pkt,
struct xmit_frame *pxmitframe);
s32 _rtw_init_hw_txqueue(struct hw_txqueue *phw_txqueue, u8 ac_tag);
@@ -391,7 +378,6 @@ void xmit_delivery_enabled_frames23a(struct rtw_adapter *padapter,
u8 qos_acm23a(u8 acm_mask, u8 priority);
u32 rtw_get_ff_hwaddr23a(struct xmit_frame *pxmitframe);
int rtw_ack_tx_wait23a(struct xmit_priv *pxmitpriv, u32 timeout_ms);
-void rtw_ack_tx_done23a(struct xmit_priv *pxmitpriv, int status);
/* include after declaring struct xmit_buf, in order to avoid warning */
#include <xmit_osdep.h>
diff --git a/drivers/staging/rtl8723au/include/usb_ops.h b/drivers/staging/rtl8723au/include/usb_ops.h
index ade8bc71572..ff11e13b24a 100644
--- a/drivers/staging/rtl8723au/include/usb_ops.h
+++ b/drivers/staging/rtl8723au/include/usb_ops.h
@@ -63,6 +63,6 @@ static inline void rtw_reset_continual_urb_error(struct dvobj_priv *dvobj)
atomic_set(&dvobj->continual_urb_error, 0);
}
-void rtl8723au_chip_configure(struct rtw_adapter *padapter);
+bool rtl8723au_chip_configure(struct rtw_adapter *padapter);
#endif /* __USB_OPS_H_ */
diff --git a/drivers/staging/rtl8723au/include/usb_ops_linux.h b/drivers/staging/rtl8723au/include/usb_ops_linux.h
index bf68bbb41f9..af2f14b8b36 100644
--- a/drivers/staging/rtl8723au/include/usb_ops_linux.h
+++ b/drivers/staging/rtl8723au/include/usb_ops_linux.h
@@ -21,13 +21,13 @@
#define MAX_USBCTRL_VENDORREQ_TIMES 10
-int rtl8723au_read_port(struct rtw_adapter *adapter, u32 addr, u32 cnt,
+int rtl8723au_read_port(struct rtw_adapter *adapter, u32 cnt,
struct recv_buf *precvbuf);
void rtl8723au_read_port_cancel(struct rtw_adapter *padapter);
int rtl8723au_write_port(struct rtw_adapter *padapter, u32 addr, u32 cnt,
struct xmit_buf *pxmitbuf);
void rtl8723au_write_port_cancel(struct rtw_adapter *padapter);
-int rtl8723au_read_interrupt(struct rtw_adapter *adapter, u32 addr);
+int rtl8723au_read_interrupt(struct rtw_adapter *adapter);
u8 rtl8723au_read8(struct rtw_adapter *padapter, u16 addr);
u16 rtl8723au_read16(struct rtw_adapter *padapter, u16 addr);
diff --git a/drivers/staging/rtl8723au/include/wlan_bssdef.h b/drivers/staging/rtl8723au/include/wlan_bssdef.h
index 96e8074a7c1..95b32e15a4d 100644
--- a/drivers/staging/rtl8723au/include/wlan_bssdef.h
+++ b/drivers/staging/rtl8723au/include/wlan_bssdef.h
@@ -57,23 +57,6 @@ enum {
Ndis802_11Encryption3KeyAbsent,
};
-/* Key mapping keys require a BSSID */
-struct ndis_802_11_key {
- u32 Length; /* Length of this structure */
- u32 KeyIndex;
- u32 KeyLength; /* length of key in bytes */
- unsigned char BSSID[6];
- unsigned long long KeyRSC;
- u8 KeyMaterial[32]; /* variable length depending on above field */
-};
-
-struct wlan_phy_info {
- u8 SignalStrength;/* in percentage) */
- u8 SignalQuality;/* in percentage) */
- u8 Optimum_antenna; /* for Antenna diversity */
- u8 Reserved_0;
-};
-
struct wlan_bcn_info {
/* these infor get from rtw_get_encrypt_info when
* * translate scan to UI */
@@ -99,7 +82,8 @@ struct wlan_bssid_ex {
u32 DSConfig; /* Frequency, units are kHz */
enum nl80211_iftype ifmode;
unsigned char SupportedRates[NDIS_802_11_LENGTH_RATES_EX];
- struct wlan_phy_info PhyInfo;
+ u8 SignalStrength;/* in percentage */
+ u8 SignalQuality;/* in percentage */
u32 IELength;
u8 IEs[MAX_IE_SZ]; /* timestamp, beacon interval, and capability info*/
} __packed;
@@ -115,7 +99,6 @@ struct wlan_network {
/* set to fixed when not to be removed as site-surveying */
int fixed;
unsigned long last_scanned; /* timestamp for the network */
- int aid; /* will only be valid when a BSS is joined. */
int join_res;
struct wlan_bssid_ex network; /* must be the last item */
struct wlan_bcn_info BcnInfo;
diff --git a/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c
index 3d26955da72..82a8c06ab34 100644
--- a/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c
+++ b/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c
@@ -275,7 +275,8 @@ static int rtw_cfg80211_inform_bss(struct rtw_adapter *padapter,
&pnetwork->network)) {
notify_signal = 100 * translate_percentage_to_dbm(padapter->recvpriv.signal_strength); /* dbm */
} else {
- notify_signal = 100 * translate_percentage_to_dbm(pnetwork->network.PhyInfo.SignalStrength); /* dbm */
+ notify_signal = 100 * translate_percentage_to_dbm(
+ pnetwork->network.SignalStrength); /* dbm */
}
bss = cfg80211_inform_bss(wiphy, notify_channel,
@@ -471,7 +472,6 @@ static int rtw_cfg80211_ap_set_encryption(struct net_device *dev, u8 key_index,
int set_tx, const u8 *sta_addr,
struct key_params *keyparms)
{
- int ret = 0;
int key_len;
struct sta_info *psta = NULL, *pbcmc_sta = NULL;
struct rtw_adapter *padapter = netdev_priv(dev);
@@ -708,7 +708,7 @@ static int rtw_cfg80211_ap_set_encryption(struct net_device *dev, u8 key_index,
exit:
- return ret;
+ return 0;
}
#endif
@@ -850,7 +850,6 @@ static int rtw_cfg80211_set_encryption(struct net_device *dev, u8 key_index,
dot11PrivacyAlgrthm;
}
}
- } else if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE)) { /* adhoc mode */
}
}
@@ -2364,7 +2363,6 @@ void rtw_cfg80211_indicate_sta_assoc(struct rtw_adapter *padapter,
ie_offset = offsetof(struct ieee80211_mgmt,
u.reassoc_req.variable);
- sinfo.filled = 0;
sinfo.filled = STATION_INFO_ASSOC_REQ_IES;
sinfo.assoc_req_ies = pmgmt_frame + ie_offset;
sinfo.assoc_req_ies_len = frame_len - ie_offset;
@@ -2432,20 +2430,16 @@ void rtw_cfg80211_indicate_sta_disassoc(struct rtw_adapter *padapter,
static int rtw_cfg80211_monitor_if_open(struct net_device *ndev)
{
- int ret = 0;
-
DBG_8723A("%s\n", __func__);
- return ret;
+ return 0;
}
static int rtw_cfg80211_monitor_if_close(struct net_device *ndev)
{
- int ret = 0;
-
DBG_8723A("%s\n", __func__);
- return ret;
+ return 0;
}
static int rtw_cfg80211_monitor_if_xmit_entry(struct sk_buff *skb,
@@ -2574,11 +2568,9 @@ fail:
static int
rtw_cfg80211_monitor_if_set_mac_address(struct net_device *ndev, void *addr)
{
- int ret = 0;
-
DBG_8723A("%s\n", __func__);
- return ret;
+ return 0;
}
static const struct net_device_ops rtw_cfg80211_monitor_if_ops = {
diff --git a/drivers/staging/rtl8723au/os_dep/os_intfs.c b/drivers/staging/rtl8723au/os_dep/os_intfs.c
index b34eaec9dd4..9966d16342b 100644
--- a/drivers/staging/rtl8723au/os_dep/os_intfs.c
+++ b/drivers/staging/rtl8723au/os_dep/os_intfs.c
@@ -175,7 +175,6 @@ static int netdev_close(struct net_device *pnetdev);
static int loadparam(struct rtw_adapter *padapter, struct net_device *pnetdev)
{
struct registry_priv *registry_par = &padapter->registrypriv;
- int status = _SUCCESS;
GlobalDebugLevel23A = rtw_debug;
registry_par->chip_version = (u8)rtw_chip_version;
@@ -234,7 +233,7 @@ static int loadparam(struct rtw_adapter *padapter, struct net_device *pnetdev)
snprintf(registry_par->if2name, 16, "%s", if2name);
registry_par->notch_filter = (u8)rtw_notch_filter;
registry_par->regulatory_tid = (u8)rtw_regulatory_id;
- return status;
+ return _SUCCESS;
}
static int rtw_net_set_mac_address(struct net_device *pnetdev, void *p)
@@ -384,12 +383,9 @@ static int rtw_init_default_value(struct rtw_adapter *padapter)
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct security_priv *psecuritypriv = &padapter->securitypriv;
- int ret = _SUCCESS;
/* xmit_priv */
- pxmitpriv->vcs_setting = pregistrypriv->vrtl_carrier_sense;
pxmitpriv->vcs = pregistrypriv->vcs_type;
- pxmitpriv->vcs_type = pregistrypriv->vcs_type;
/* pxmitpriv->rts_thresh = pregistrypriv->rts_thresh; */
pxmitpriv->frag_len = pregistrypriv->frag_thresh;
@@ -425,7 +421,7 @@ static int rtw_init_default_value(struct rtw_adapter *padapter)
/* misc. */
padapter->bReadPortCancel = false;
padapter->bWritePortCancel = false;
- return ret;
+ return _SUCCESS;
}
int rtw_reset_drv_sw23a(struct rtw_adapter *padapter)
@@ -546,9 +542,6 @@ void rtw_cancel_all_timer23a(struct rtw_adapter *padapter)
RT_TRACE(_module_os_intfs_c_, _drv_info_,
("%s:cancel dynamic_chk_timer!\n", __func__));
- RT_TRACE(_module_os_intfs_c_, _drv_info_,
- ("%s:cancel DeInitSwLeds!\n", __func__));
-
del_timer_sync(&padapter->pwrctrlpriv.pwr_state_check_timer);
del_timer_sync(&padapter->mlmepriv.set_scan_deny_timer);
@@ -685,8 +678,6 @@ int netdev_open23a(struct net_device *pnetdev)
rtw_cfg80211_init_wiphy(padapter);
- rtw_led_control(padapter, LED_CTL_NO_LINK);
-
padapter->bup = true;
}
padapter->net_closed = false;
@@ -768,8 +759,6 @@ int rtw_ips_pwr_up23a(struct rtw_adapter *padapter)
result = ips_netdrv_open(padapter);
- rtw_led_control(padapter, LED_CTL_NO_LINK);
-
DBG_8723A("<=== rtw_ips_pwr_up23a.............. in %dms\n",
jiffies_to_msecs(jiffies - start_time));
return result;
@@ -784,8 +773,6 @@ void rtw_ips_pwr_down23a(struct rtw_adapter *padapter)
padapter->bCardDisableWOHSM = true;
padapter->net_closed = true;
- rtw_led_control(padapter, LED_CTL_POWER_OFF);
-
rtw_ips_dev_unload23a(padapter);
padapter->bCardDisableWOHSM = false;
DBG_8723A("<=== rtw_ips_pwr_down23a..................... in %dms\n",
@@ -844,8 +831,6 @@ static int netdev_close(struct net_device *pnetdev)
rtw_free_assoc_resources23a(padapter, 1);
/* s2-4. */
rtw_free_network_queue23a(padapter);
- /* Close LED */
- rtw_led_control(padapter, LED_CTL_POWER_OFF);
}
rtw_scan_abort23a(padapter);
diff --git a/drivers/staging/rtl8723au/os_dep/usb_intf.c b/drivers/staging/rtl8723au/os_dep/usb_intf.c
index 865743ecd85..373a617ace5 100644
--- a/drivers/staging/rtl8723au/os_dep/usb_intf.c
+++ b/drivers/staging/rtl8723au/os_dep/usb_intf.c
@@ -59,21 +59,6 @@ static struct usb_driver rtl8723a_usb_drv = {
static struct usb_driver *usb_drv = &rtl8723a_usb_drv;
-static inline int RT_usb_endpoint_is_bulk_in(const struct usb_endpoint_descriptor *epd)
-{
- return usb_endpoint_xfer_bulk(epd) && usb_endpoint_dir_in(epd);
-}
-
-static inline int RT_usb_endpoint_is_bulk_out(const struct usb_endpoint_descriptor *epd)
-{
- return usb_endpoint_xfer_bulk(epd) && usb_endpoint_dir_out(epd);
-}
-
-static inline int RT_usb_endpoint_is_int_in(const struct usb_endpoint_descriptor *epd)
-{
- return usb_endpoint_xfer_int(epd) && usb_endpoint_dir_in(epd);
-}
-
static int rtw_init_intf_priv(struct dvobj_priv *dvobj)
{
mutex_init(&dvobj->usb_vendor_req_mutex);
@@ -143,21 +128,21 @@ static struct dvobj_priv *usb_dvobj_init(struct usb_interface *usb_intf)
le16_to_cpu(pendp_desc->wMaxPacketSize));
DBG_8723A("bInterval =%x\n", pendp_desc->bInterval);
- if (RT_usb_endpoint_is_bulk_in(pendp_desc)) {
- DBG_8723A("RT_usb_endpoint_is_bulk_in = %x\n",
+ if (usb_endpoint_is_bulk_in(pendp_desc)) {
+ DBG_8723A("usb_endpoint_is_bulk_in = %x\n",
usb_endpoint_num(pendp_desc));
pdvobjpriv->RtInPipe[pdvobjpriv->RtNumInPipes] =
usb_endpoint_num(pendp_desc);
pdvobjpriv->RtNumInPipes++;
- } else if (RT_usb_endpoint_is_int_in(pendp_desc)) {
- DBG_8723A("RT_usb_endpoint_is_int_in = %x, Interval = %x\n",
+ } else if (usb_endpoint_is_int_in(pendp_desc)) {
+ DBG_8723A("usb_endpoint_is_int_in = %x, Interval = %x\n",
usb_endpoint_num(pendp_desc),
pendp_desc->bInterval);
pdvobjpriv->RtInPipe[pdvobjpriv->RtNumInPipes] =
usb_endpoint_num(pendp_desc);
pdvobjpriv->RtNumInPipes++;
- } else if (RT_usb_endpoint_is_bulk_out(pendp_desc)) {
- DBG_8723A("RT_usb_endpoint_is_bulk_out = %x\n",
+ } else if (usb_endpoint_is_bulk_out(pendp_desc)) {
+ DBG_8723A("usb_endpoint_is_bulk_out = %x\n",
usb_endpoint_num(pendp_desc));
pdvobjpriv->RtOutPipe[pdvobjpriv->RtNumOutPipes] =
usb_endpoint_num(pendp_desc);
@@ -257,6 +242,7 @@ void rtl8723a_usb_intf_stop(struct rtw_adapter *padapter)
static void rtw_dev_unload(struct rtw_adapter *padapter)
{
+ struct submit_ctx *pack_tx_ops = &padapter->xmitpriv.ack_tx_ops;
RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("+rtw_dev_unload\n"));
if (padapter->bup) {
@@ -264,8 +250,8 @@ static void rtw_dev_unload(struct rtw_adapter *padapter)
padapter->bDriverStopped = true;
if (padapter->xmitpriv.ack_tx)
- rtw_ack_tx_done23a(&padapter->xmitpriv,
- RTW_SCTX_DONE_DRV_STOP);
+ rtw23a_sctx_done_err(&pack_tx_ops,
+ RTW_SCTX_DONE_DRV_STOP);
/* s3. */
rtl8723a_usb_intf_stop(padapter);
@@ -322,8 +308,6 @@ int rtw_hw_suspend23a(struct rtw_adapter *padapter)
if (check_fwstate(pmlmepriv, _FW_LINKED)) {
_clr_fwstate_(pmlmepriv, _FW_LINKED);
- rtw_led_control(padapter, LED_CTL_NO_LINK);
-
rtw_os_indicate_disconnect23a(padapter);
/* donnot enqueue cmd */
@@ -546,7 +530,8 @@ static struct rtw_adapter *rtw_usb_if1_init(struct dvobj_priv *dvobj,
rtl8723a_read_chip_version(padapter);
/* step usb endpoint mapping */
- rtl8723au_chip_configure(padapter);
+ if (!rtl8723au_chip_configure(padapter))
+ goto free_hal_data;
/* step read efuse/eeprom data and get mac_addr */
rtl8723a_read_adapter_info(padapter);
diff --git a/drivers/staging/rtl8723au/os_dep/usb_ops_linux.c b/drivers/staging/rtl8723au/os_dep/usb_ops_linux.c
index a3349ac57ba..3e19b3b2c1c 100644
--- a/drivers/staging/rtl8723au/os_dep/usb_ops_linux.c
+++ b/drivers/staging/rtl8723au/os_dep/usb_ops_linux.c
@@ -18,13 +18,6 @@
#include <usb_ops_linux.h>
#include <rtw_sreset.h>
-struct zero_bulkout_context {
- void *pbuf;
- void *purb;
- void *pirp;
- void *padapter;
-};
-
void rtl8723au_read_port_cancel(struct rtw_adapter *padapter)
{
struct recv_buf *precvbuf;
@@ -53,18 +46,6 @@ static void usb_write_port23a_complete(struct urb *purb)
unsigned long irqL;
switch (pxmitbuf->flags) {
- case VO_QUEUE_INX:
- pxmitpriv->voq_cnt--;
- break;
- case VI_QUEUE_INX:
- pxmitpriv->viq_cnt--;
- break;
- case BE_QUEUE_INX:
- pxmitpriv->beq_cnt--;
- break;
- case BK_QUEUE_INX:
- pxmitpriv->bkq_cnt--;
- break;
case HIGH_QUEUE_INX:
#ifdef CONFIG_8723AU_AP_MODE
rtw_chk_hi_queue_cmd23a(padapter);
@@ -166,19 +147,15 @@ int rtl8723au_write_port(struct rtw_adapter *padapter, u32 addr, u32 cnt,
switch (addr) {
case VO_QUEUE_INX:
- pxmitpriv->voq_cnt++;
pxmitbuf->flags = VO_QUEUE_INX;
break;
case VI_QUEUE_INX:
- pxmitpriv->viq_cnt++;
pxmitbuf->flags = VI_QUEUE_INX;
break;
case BE_QUEUE_INX:
- pxmitpriv->beq_cnt++;
pxmitbuf->flags = BE_QUEUE_INX;
break;
case BK_QUEUE_INX:
- pxmitpriv->bkq_cnt++;
pxmitbuf->flags = BK_QUEUE_INX;
break;
case HIGH_QUEUE_INX:
diff --git a/drivers/staging/rts5208/ms.c b/drivers/staging/rts5208/ms.c
index 228e48339b9..b4612fb615f 100644
--- a/drivers/staging/rts5208/ms.c
+++ b/drivers/staging/rts5208/ms.c
@@ -2599,9 +2599,9 @@ static int mspro_rw_multi_sector(struct scsi_cmnd *srb,
if (count > sector_cnt) {
if (mode_2k)
- ms_card->seq_mode |= MODE_2K_SEQ;
+ ms_card->seq_mode = MODE_2K_SEQ;
else
- ms_card->seq_mode |= MODE_512_SEQ;
+ ms_card->seq_mode = MODE_512_SEQ;
}
} else {
count = sector_cnt;
diff --git a/drivers/staging/rts5208/rtsx.c b/drivers/staging/rts5208/rtsx.c
index 2d2527c3aea..c74f1b8108f 100644
--- a/drivers/staging/rts5208/rtsx.c
+++ b/drivers/staging/rts5208/rtsx.c
@@ -418,7 +418,7 @@ static void rtsx_shutdown(struct pci_dev *pci)
static int rtsx_control_thread(void *__dev)
{
- struct rtsx_dev *dev = (struct rtsx_dev *)__dev;
+ struct rtsx_dev *dev = __dev;
struct rtsx_chip *chip = dev->chip;
struct Scsi_Host *host = rtsx_to_host(dev);
@@ -527,7 +527,7 @@ SkipForAbort:
static int rtsx_polling_thread(void *__dev)
{
- struct rtsx_dev *dev = (struct rtsx_dev *)__dev;
+ struct rtsx_dev *dev = __dev;
struct rtsx_chip *chip = dev->chip;
struct sd_info *sd_card = &(chip->sd_card);
struct xd_info *xd_card = &(chip->xd_card);
diff --git a/drivers/staging/rts5208/rtsx_chip.c b/drivers/staging/rts5208/rtsx_chip.c
index a7ade8b4e7f..9593d813293 100644
--- a/drivers/staging/rts5208/rtsx_chip.c
+++ b/drivers/staging/rts5208/rtsx_chip.c
@@ -126,10 +126,11 @@ static int rtsx_pre_handle_sdio_old(struct rtsx_chip *chip)
if (chip->ignore_sd && CHK_SDIO_EXIST(chip)) {
if (chip->asic_code) {
RTSX_WRITE_REG(chip, CARD_PULL_CTL5, 0xFF,
- MS_INS_PU | SD_WP_PU | SD_CD_PU | SD_CMD_PU);
+ MS_INS_PU | SD_WP_PU |
+ SD_CD_PU | SD_CMD_PU);
} else {
RTSX_WRITE_REG(chip, FPGA_PULL_CTL, 0xFF,
- FPGA_SD_PULL_CTL_EN);
+ FPGA_SD_PULL_CTL_EN);
}
RTSX_WRITE_REG(chip, CARD_SHARE_MODE, 0xFF, CARD_SHARE_48_SD);
@@ -137,7 +138,7 @@ static int rtsx_pre_handle_sdio_old(struct rtsx_chip *chip)
RTSX_WRITE_REG(chip, 0xFF2C, 0x01, 0x01);
RTSX_WRITE_REG(chip, SDIO_CTRL, 0xFF,
- SDIO_BUS_CTRL | SDIO_CD_CTRL);
+ SDIO_BUS_CTRL | SDIO_CD_CTRL);
chip->sd_int = 1;
chip->sd_io = 1;
@@ -201,7 +202,7 @@ static int rtsx_pre_handle_sdio_new(struct rtsx_chip *chip)
TRACE_RET(chip, STATUS_FAIL);
} else {
RTSX_WRITE_REG(chip, FPGA_PULL_CTL,
- FPGA_SD_PULL_CTL_BIT | 0x20, 0);
+ FPGA_SD_PULL_CTL_BIT | 0x20, 0);
}
retval = card_share_mode(chip, SD_CARD);
if (retval != STATUS_SUCCESS)
@@ -226,6 +227,87 @@ static int rtsx_pre_handle_sdio_new(struct rtsx_chip *chip)
}
#endif
+static int rtsx_reset_aspm(struct rtsx_chip *chip)
+{
+ int ret;
+
+ if (chip->dynamic_aspm) {
+ if (!CHK_SDIO_EXIST(chip) || !CHECK_PID(chip, 0x5288))
+ return STATUS_SUCCESS;
+
+ ret = rtsx_write_cfg_dw(chip, 2, 0xC0, 0xFF,
+ chip->aspm_l0s_l1_en);
+ if (ret != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+ }
+
+ if (CHECK_PID(chip, 0x5208))
+ RTSX_WRITE_REG(chip, ASPM_FORCE_CTL, 0xFF, 0x3F);
+ ret = rtsx_write_config_byte(chip, LCTLR, chip->aspm_l0s_l1_en);
+ if (ret != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ chip->aspm_level[0] = chip->aspm_l0s_l1_en;
+ if (CHK_SDIO_EXIST(chip)) {
+ chip->aspm_level[1] = chip->aspm_l0s_l1_en;
+ ret = rtsx_write_cfg_dw(chip, CHECK_PID(chip, 0x5288) ? 2 : 1,
+ 0xC0, 0xFF, chip->aspm_l0s_l1_en);
+ if (ret != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ chip->aspm_enabled = 1;
+
+ return STATUS_SUCCESS;
+}
+
+static int rtsx_enable_pcie_intr(struct rtsx_chip *chip)
+{
+ int ret;
+
+ if (!chip->asic_code || !CHECK_PID(chip, 0x5208)) {
+ rtsx_enable_bus_int(chip);
+ return STATUS_SUCCESS;
+ }
+
+ if (chip->phy_debug_mode) {
+ RTSX_WRITE_REG(chip, CDRESUMECTL, 0x77, 0);
+ rtsx_disable_bus_int(chip);
+ } else {
+ rtsx_enable_bus_int(chip);
+ }
+
+ if (chip->ic_version >= IC_VER_D) {
+ u16 reg;
+
+ ret = rtsx_read_phy_register(chip, 0x00, &reg);
+ if (ret != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ reg &= 0xFE7F;
+ reg |= 0x80;
+ ret = rtsx_write_phy_register(chip, 0x00, reg);
+ if (ret != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ ret = rtsx_read_phy_register(chip, 0x1C, &reg);
+ if (ret != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ reg &= 0xFFF7;
+ ret = rtsx_write_phy_register(chip, 0x1C, reg);
+ if (ret != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (chip->driver_first_load && (chip->ic_version < IC_VER_C))
+ rtsx_calibration(chip);
+
+ return STATUS_SUCCESS;
+}
+
int rtsx_reset_chip(struct rtsx_chip *chip)
{
int retval;
@@ -268,7 +350,7 @@ int rtsx_reset_chip(struct rtsx_chip *chip)
#ifdef LED_AUTO_BLINK
RTSX_WRITE_REG(chip, CARD_AUTO_BLINK, 0xFF,
- LED_BLINK_SPEED | BLINK_EN | LED_GPIO0);
+ LED_BLINK_SPEED | BLINK_EN | LED_GPIO0);
#endif
if (chip->asic_code) {
@@ -288,39 +370,9 @@ int rtsx_reset_chip(struct rtsx_chip *chip)
/* Enable ASPM */
if (chip->aspm_l0s_l1_en) {
- if (chip->dynamic_aspm) {
- if (CHK_SDIO_EXIST(chip)) {
- if (CHECK_PID(chip, 0x5288)) {
- retval = rtsx_write_cfg_dw(chip, 2, 0xC0, 0xFF, chip->aspm_l0s_l1_en);
- if (retval != STATUS_SUCCESS)
- TRACE_RET(chip, STATUS_FAIL);
- }
- }
- } else {
- if (CHECK_PID(chip, 0x5208))
- RTSX_WRITE_REG(chip, ASPM_FORCE_CTL,
- 0xFF, 0x3F);
-
- retval = rtsx_write_config_byte(chip, LCTLR,
- chip->aspm_l0s_l1_en);
- if (retval != STATUS_SUCCESS)
- TRACE_RET(chip, STATUS_FAIL);
-
- chip->aspm_level[0] = chip->aspm_l0s_l1_en;
- if (CHK_SDIO_EXIST(chip)) {
- chip->aspm_level[1] = chip->aspm_l0s_l1_en;
- if (CHECK_PID(chip, 0x5288))
- retval = rtsx_write_cfg_dw(chip, 2, 0xC0, 0xFF, chip->aspm_l0s_l1_en);
- else
- retval = rtsx_write_cfg_dw(chip, 1, 0xC0, 0xFF, chip->aspm_l0s_l1_en);
-
- if (retval != STATUS_SUCCESS)
- TRACE_RET(chip, STATUS_FAIL);
-
- }
-
- chip->aspm_enabled = 1;
- }
+ retval = rtsx_reset_aspm(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
} else {
if (chip->asic_code && CHECK_PID(chip, 0x5208)) {
retval = rtsx_write_phy_register(chip, 0x07, 0x0129);
@@ -338,91 +390,38 @@ int rtsx_reset_chip(struct rtsx_chip *chip)
TRACE_RET(chip, STATUS_FAIL);
if (CHK_SDIO_EXIST(chip)) {
- if (CHECK_PID(chip, 0x5288))
- retval = rtsx_write_cfg_dw(chip, 2, 0xC0,
- 0xFF00, 0x0100);
- else
- retval = rtsx_write_cfg_dw(chip, 1, 0xC0,
- 0xFF00, 0x0100);
+ retval = rtsx_write_cfg_dw(chip,
+ CHECK_PID(chip, 0x5288) ? 2 : 1,
+ 0xC0, 0xFF00, 0x0100);
if (retval != STATUS_SUCCESS)
TRACE_RET(chip, STATUS_FAIL);
-
}
- if (CHECK_PID(chip, 0x5288)) {
- if (!CHK_SDIO_EXIST(chip)) {
- retval = rtsx_write_cfg_dw(chip, 2, 0xC0, 0xFFFF,
- 0x0103);
- if (retval != STATUS_SUCCESS)
- TRACE_RET(chip, STATUS_FAIL);
-
- retval = rtsx_write_cfg_dw(chip, 2, 0x84, 0xFF, 0x03);
- if (retval != STATUS_SUCCESS)
- TRACE_RET(chip, STATUS_FAIL);
+ if (CHECK_PID(chip, 0x5288) && !CHK_SDIO_EXIST(chip)) {
+ retval = rtsx_write_cfg_dw(chip, 2, 0xC0, 0xFFFF, 0x0103);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
- }
+ retval = rtsx_write_cfg_dw(chip, 2, 0x84, 0xFF, 0x03);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
}
RTSX_WRITE_REG(chip, IRQSTAT0, LINK_RDY_INT, LINK_RDY_INT);
RTSX_WRITE_REG(chip, PERST_GLITCH_WIDTH, 0xFF, 0x80);
- /* Enable PCIE interrupt */
- if (chip->asic_code) {
- if (CHECK_PID(chip, 0x5208)) {
- if (chip->phy_debug_mode) {
- RTSX_WRITE_REG(chip, CDRESUMECTL, 0x77, 0);
- rtsx_disable_bus_int(chip);
- } else {
- rtsx_enable_bus_int(chip);
- }
-
- if (chip->ic_version >= IC_VER_D) {
- u16 reg;
-
- retval = rtsx_read_phy_register(chip, 0x00,
- &reg);
- if (retval != STATUS_SUCCESS)
- TRACE_RET(chip, STATUS_FAIL);
-
- reg &= 0xFE7F;
- reg |= 0x80;
- retval = rtsx_write_phy_register(chip, 0x00,
- reg);
- if (retval != STATUS_SUCCESS)
- TRACE_RET(chip, STATUS_FAIL);
-
- retval = rtsx_read_phy_register(chip, 0x1C,
- &reg);
- if (retval != STATUS_SUCCESS)
- TRACE_RET(chip, STATUS_FAIL);
-
- reg &= 0xFFF7;
- retval = rtsx_write_phy_register(chip, 0x1C,
- reg);
- if (retval != STATUS_SUCCESS)
- TRACE_RET(chip, STATUS_FAIL);
-
- }
-
- if (chip->driver_first_load &&
- (chip->ic_version < IC_VER_C))
- rtsx_calibration(chip);
-
- } else {
- rtsx_enable_bus_int(chip);
- }
- } else {
- rtsx_enable_bus_int(chip);
- }
+ retval = rtsx_enable_pcie_intr(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
chip->need_reset = 0;
chip->int_reg = rtsx_readl(chip, RTSX_BIPR);
if (chip->hw_bypass_sd)
- goto NextCard;
+ goto nextcard;
dev_dbg(rtsx_dev(chip), "In %s, chip->int_reg = 0x%x\n", __func__,
chip->int_reg);
if (chip->int_reg & SD_EXIST) {
@@ -443,10 +442,10 @@ int rtsx_reset_chip(struct rtsx_chip *chip)
} else {
chip->sd_io = 0;
RTSX_WRITE_REG(chip, SDIO_CTRL, SDIO_BUS_CTRL | SDIO_CD_CTRL,
- 0);
+ 0);
}
-NextCard:
+nextcard:
if (chip->int_reg & XD_EXIST)
chip->need_reset |= XD_CARD;
if (chip->int_reg & MS_EXIST)
@@ -484,10 +483,10 @@ NextCard:
if (chip->ft2_fast_mode) {
RTSX_WRITE_REG(chip, CARD_PWR_CTL, 0xFF,
- MS_PARTIAL_POWER_ON | SD_PARTIAL_POWER_ON);
+ MS_PARTIAL_POWER_ON | SD_PARTIAL_POWER_ON);
udelay(chip->pmos_pwr_on_interval);
RTSX_WRITE_REG(chip, CARD_PWR_CTL, 0xFF,
- MS_POWER_ON | SD_POWER_ON);
+ MS_POWER_ON | SD_POWER_ON);
wait_timeout(200);
}
@@ -540,10 +539,7 @@ static int rts5208_init(struct rtsx_chip *chip)
RTSX_WRITE_REG(chip, CLK_SEL, 0x03, 0x03);
RTSX_READ_REG(chip, CLK_SEL, &val);
- if (val == 0)
- chip->asic_code = 1;
- else
- chip->asic_code = 0;
+ chip->asic_code = val == 0 ? 1 : 0;
if (chip->asic_code) {
retval = rtsx_read_phy_register(chip, 0x1C, &reg);
@@ -553,10 +549,7 @@ static int rts5208_init(struct rtsx_chip *chip)
dev_dbg(rtsx_dev(chip), "Value of phy register 0x1C is 0x%x\n",
reg);
chip->ic_version = (reg >> 4) & 0x07;
- if (reg & PHY_DEBUG_MODE)
- chip->phy_debug_mode = 1;
- else
- chip->phy_debug_mode = 0;
+ chip->phy_debug_mode = reg & PHY_DEBUG_MODE ? 1 : 0;
} else {
RTSX_READ_REG(chip, 0xFE80, &val);
@@ -566,16 +559,10 @@ static int rts5208_init(struct rtsx_chip *chip)
RTSX_READ_REG(chip, PDINFO, &val);
dev_dbg(rtsx_dev(chip), "PDINFO: 0x%x\n", val);
- if (val & AUX_PWR_DETECTED)
- chip->aux_pwr_exist = 1;
- else
- chip->aux_pwr_exist = 0;
+ chip->aux_pwr_exist = val & AUX_PWR_DETECTED ? 1 : 0;
RTSX_READ_REG(chip, 0xFE50, &val);
- if (val & 0x01)
- chip->hw_bypass_sd = 1;
- else
- chip->hw_bypass_sd = 0;
+ chip->hw_bypass_sd = val & 0x01 ? 1 : 0;
rtsx_read_config_byte(chip, 0x0E, &val);
if (val & 0x80)
@@ -585,10 +572,7 @@ static int rts5208_init(struct rtsx_chip *chip)
if (chip->use_hw_setting) {
RTSX_READ_REG(chip, CHANGE_LINK_STATE, &val);
- if (val & 0x80)
- chip->auto_delink_en = 1;
- else
- chip->auto_delink_en = 0;
+ chip->auto_delink_en = val & 0x80 ? 1 : 0;
}
return STATUS_SUCCESS;
@@ -602,33 +586,21 @@ static int rts5288_init(struct rtsx_chip *chip)
RTSX_WRITE_REG(chip, CLK_SEL, 0x03, 0x03);
RTSX_READ_REG(chip, CLK_SEL, &val);
- if (val == 0)
- chip->asic_code = 1;
- else
- chip->asic_code = 0;
+ chip->asic_code = val == 0 ? 1 : 0;
chip->ic_version = 0;
chip->phy_debug_mode = 0;
RTSX_READ_REG(chip, PDINFO, &val);
dev_dbg(rtsx_dev(chip), "PDINFO: 0x%x\n", val);
- if (val & AUX_PWR_DETECTED)
- chip->aux_pwr_exist = 1;
- else
- chip->aux_pwr_exist = 0;
+ chip->aux_pwr_exist = val & AUX_PWR_DETECTED ? 1 : 0;
RTSX_READ_REG(chip, CARD_SHARE_MODE, &val);
dev_dbg(rtsx_dev(chip), "CARD_SHARE_MODE: 0x%x\n", val);
- if (val & 0x04)
- chip->baro_pkg = QFN;
- else
- chip->baro_pkg = LQFP;
+ chip->baro_pkg = val & 0x04 ? QFN : LQFP;
RTSX_READ_REG(chip, 0xFE5A, &val);
- if (val & 0x10)
- chip->hw_bypass_sd = 1;
- else
- chip->hw_bypass_sd = 0;
+ chip->hw_bypass_sd = val & 0x10 ? 1 : 0;
retval = rtsx_read_cfg_dw(chip, 0, 0x718, &lval);
if (retval != STATUS_SUCCESS)
@@ -643,16 +615,12 @@ static int rts5288_init(struct rtsx_chip *chip)
if (chip->use_hw_setting) {
RTSX_READ_REG(chip, CHANGE_LINK_STATE, &val);
- if (val & 0x80)
- chip->auto_delink_en = 1;
- else
- chip->auto_delink_en = 0;
+ chip->auto_delink_en = val & 0x80 ? 1 : 0;
if (CHECK_BARO_PKG(chip, LQFP))
chip->lun_mode = SD_MS_1LUN;
else
chip->lun_mode = DEFAULT_SINGLE;
-
}
return STATUS_SUCCESS;
@@ -660,9 +628,9 @@ static int rts5288_init(struct rtsx_chip *chip)
int rtsx_init_chip(struct rtsx_chip *chip)
{
- struct sd_info *sd_card = &(chip->sd_card);
- struct xd_info *xd_card = &(chip->xd_card);
- struct ms_info *ms_card = &(chip->ms_card);
+ struct sd_info *sd_card = &chip->sd_card;
+ struct xd_info *xd_card = &chip->xd_card;
+ struct ms_info *ms_card = &chip->ms_card;
int retval;
unsigned int i;
@@ -740,7 +708,6 @@ int rtsx_init_chip(struct rtsx_chip *chip)
retval = rts5288_init(chip);
if (retval != STATUS_SUCCESS)
TRACE_RET(chip, STATUS_FAIL);
-
}
if (chip->ss_en == 2)
@@ -842,7 +809,6 @@ static void rtsx_monitor_aspm_config(struct rtsx_chip *chip)
} else {
if (reg0 & 0x03)
maybe_support_aspm = 1;
-
}
if (reg_changed) {
@@ -859,15 +825,15 @@ static void rtsx_monitor_aspm_config(struct rtsx_chip *chip)
chip->sdio_aspm = 0;
}
rtsx_write_register(chip, ASPM_FORCE_CTL, 0xFF,
- 0x30 | chip->aspm_level[0] |
- (chip->aspm_level[1] << 2));
+ 0x30 | chip->aspm_level[0] |
+ (chip->aspm_level[1] << 2));
}
}
void rtsx_polling_func(struct rtsx_chip *chip)
{
#ifdef SUPPORT_SD_LOCK
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
#endif
int ss_allowed;
@@ -875,7 +841,7 @@ void rtsx_polling_func(struct rtsx_chip *chip)
return;
if (rtsx_chk_stat(chip, RTSX_STAT_DELINK))
- goto Delink_Stage;
+ goto delink_stage;
if (chip->polling_config) {
u8 val;
@@ -888,7 +854,7 @@ void rtsx_polling_func(struct rtsx_chip *chip)
#ifdef SUPPORT_OCP
if (chip->ocp_int) {
- rtsx_read_register(chip, OCPSTAT, &(chip->ocp_stat));
+ rtsx_read_register(chip, OCPSTAT, &chip->ocp_stat);
if (chip->card_exist & SD_CARD)
sd_power_off_card3v3(chip);
@@ -932,7 +898,6 @@ void rtsx_polling_func(struct rtsx_chip *chip)
rtsx_read_cfg_dw(chip, 1, 0x04, &val);
if (val & 0x07)
ss_allowed = 0;
-
}
}
} else {
@@ -958,7 +923,7 @@ void rtsx_polling_func(struct rtsx_chip *chip)
#ifdef SUPPORT_SDIO_ASPM
if (CHK_SDIO_EXIST(chip) && !CHK_SDIO_IGNORED(chip) &&
- chip->aspm_l0s_l1_en && chip->dynamic_aspm) {
+ chip->aspm_l0s_l1_en && chip->dynamic_aspm) {
if (chip->sd_io) {
dynamic_configure_sdio_aspm(chip);
} else {
@@ -966,7 +931,8 @@ void rtsx_polling_func(struct rtsx_chip *chip)
dev_dbg(rtsx_dev(chip), "SDIO enter ASPM!\n");
rtsx_write_register(chip,
ASPM_FORCE_CTL, 0xFC,
- 0x30 | (chip->aspm_level[1] << 2));
+ 0x30 |
+ (chip->aspm_level[1] << 2));
chip->sdio_aspm = 1;
}
}
@@ -988,9 +954,10 @@ void rtsx_polling_func(struct rtsx_chip *chip)
turn_off_led(chip, LED_GPIO);
- if (chip->auto_power_down && !chip->card_ready && !chip->sd_io)
- rtsx_force_power_down(chip, SSC_PDCTL | OC_PDCTL);
-
+ if (chip->auto_power_down && !chip->card_ready &&
+ !chip->sd_io)
+ rtsx_force_power_down(chip,
+ SSC_PDCTL | OC_PDCTL);
}
}
@@ -1013,7 +980,6 @@ void rtsx_polling_func(struct rtsx_chip *chip)
break;
}
-
#ifdef SUPPORT_OCP
if (CHECK_LUN_MODE(chip, SD_MS_2LUN)) {
if (chip->ocp_stat &
@@ -1024,7 +990,7 @@ void rtsx_polling_func(struct rtsx_chip *chip)
if (chip->ocp_stat & (SD_OC_NOW | SD_OC_EVER)) {
if (chip->card_exist & SD_CARD) {
rtsx_write_register(chip, CARD_OE, SD_OUTPUT_EN,
- 0);
+ 0);
card_power_off(chip, SD_CARD);
chip->card_fail |= SD_CARD;
}
@@ -1032,7 +998,7 @@ void rtsx_polling_func(struct rtsx_chip *chip)
if (chip->ocp_stat & (MS_OC_NOW | MS_OC_EVER)) {
if (chip->card_exist & MS_CARD) {
rtsx_write_register(chip, CARD_OE, MS_OUTPUT_EN,
- 0);
+ 0);
card_power_off(chip, MS_CARD);
chip->card_fail |= MS_CARD;
}
@@ -1043,15 +1009,15 @@ void rtsx_polling_func(struct rtsx_chip *chip)
chip->ocp_stat);
if (chip->card_exist & SD_CARD) {
rtsx_write_register(chip, CARD_OE, SD_OUTPUT_EN,
- 0);
+ 0);
chip->card_fail |= SD_CARD;
} else if (chip->card_exist & MS_CARD) {
rtsx_write_register(chip, CARD_OE, MS_OUTPUT_EN,
- 0);
+ 0);
chip->card_fail |= MS_CARD;
} else if (chip->card_exist & XD_CARD) {
rtsx_write_register(chip, CARD_OE, XD_OUTPUT_EN,
- 0);
+ 0);
chip->card_fail |= XD_CARD;
}
card_power_off(chip, SD_CARD);
@@ -1059,9 +1025,9 @@ void rtsx_polling_func(struct rtsx_chip *chip)
}
#endif
-Delink_Stage:
+delink_stage:
if (chip->auto_delink_en && chip->auto_delink_allowed &&
- !chip->card_ready && !chip->card_ejected && !chip->sd_io) {
+ !chip->card_ready && !chip->card_ejected && !chip->sd_io) {
int enter_L1 = chip->auto_delink_in_L1 && (
chip->aspm_l0s_l1_en || chip->ss_en);
int delink_stage1_cnt = chip->delink_stage1_step;
@@ -1081,27 +1047,33 @@ Delink_Stage:
dev_dbg(rtsx_dev(chip), "False card inserted, do force delink\n");
if (enter_L1)
- rtsx_write_register(chip, HOST_SLEEP_STATE, 0x03, 1);
+ rtsx_write_register(chip,
+ HOST_SLEEP_STATE,
+ 0x03, 1);
rtsx_write_register(chip,
- CHANGE_LINK_STATE, 0x0A,
- 0x0A);
+ CHANGE_LINK_STATE,
+ 0x0A, 0x0A);
if (enter_L1)
rtsx_enter_L1(chip);
- chip->auto_delink_cnt = delink_stage3_cnt + 1;
+ chip->auto_delink_cnt =
+ delink_stage3_cnt + 1;
} else {
dev_dbg(rtsx_dev(chip), "No card inserted, do delink\n");
if (enter_L1)
- rtsx_write_register(chip, HOST_SLEEP_STATE, 0x03, 1);
+ rtsx_write_register(chip,
+ HOST_SLEEP_STATE,
+ 0x03, 1);
- rtsx_write_register(chip, CHANGE_LINK_STATE, 0x02, 0x02);
+ rtsx_write_register(chip,
+ CHANGE_LINK_STATE,
+ 0x02, 0x02);
if (enter_L1)
rtsx_enter_L1(chip);
-
}
}
@@ -1115,7 +1087,7 @@ Delink_Stage:
rtsx_set_phy_reg_bit(chip, 0x1C, 2);
rtsx_write_register(chip, CHANGE_LINK_STATE,
- 0x0A, 0x0A);
+ 0x0A, 0x0A);
}
chip->auto_delink_cnt++;
@@ -1219,7 +1191,7 @@ int rtsx_read_register(struct rtsx_chip *chip, u16 addr, u8 *data)
}
int rtsx_write_cfg_dw(struct rtsx_chip *chip, u8 func_no, u16 addr, u32 mask,
- u32 val)
+ u32 val)
{
u8 mode = 0, tmp;
int i;
@@ -1279,7 +1251,7 @@ int rtsx_read_cfg_dw(struct rtsx_chip *chip, u8 func_no, u16 addr, u32 *val)
}
int rtsx_write_cfg_seq(struct rtsx_chip *chip, u8 func, u16 addr, u8 *buf,
- int len)
+ int len)
{
u32 *data, *mask;
u16 offset = addr % 4;
@@ -1324,7 +1296,7 @@ int rtsx_write_cfg_seq(struct rtsx_chip *chip, u8 func, u16 addr, u8 *buf,
for (i = 0; i < dw_len; i++) {
retval = rtsx_write_cfg_dw(chip, func, aligned_addr + i * 4,
- mask[i], data[i]);
+ mask[i], data[i]);
if (retval != STATUS_SUCCESS) {
vfree(data);
vfree(mask);
@@ -1339,7 +1311,7 @@ int rtsx_write_cfg_seq(struct rtsx_chip *chip, u8 func, u16 addr, u8 *buf,
}
int rtsx_read_cfg_seq(struct rtsx_chip *chip, u8 func, u16 addr, u8 *buf,
- int len)
+ int len)
{
u32 *data;
u16 offset = addr % 4;
@@ -1360,7 +1332,7 @@ int rtsx_read_cfg_seq(struct rtsx_chip *chip, u8 func, u16 addr, u8 *buf,
for (i = 0; i < dw_len; i++) {
retval = rtsx_read_cfg_dw(chip, func, aligned_addr + i * 4,
- data + i);
+ data + i);
if (retval != STATUS_SUCCESS) {
vfree(data);
TRACE_RET(chip, STATUS_FAIL);
@@ -1522,7 +1494,7 @@ int rtsx_set_phy_reg_bit(struct rtsx_chip *chip, u8 reg, u8 bit)
if (retval != STATUS_SUCCESS)
TRACE_RET(chip, STATUS_FAIL);
- if (0 == (value & (1 << bit))) {
+ if ((value & (1 << bit)) == 0) {
value |= (1 << bit);
retval = rtsx_write_phy_register(chip, reg, value);
if (retval != STATUS_SUCCESS)
@@ -1595,12 +1567,9 @@ void rtsx_enter_ss(struct rtsx_chip *chip)
rtsx_force_power_down(chip, SSC_PDCTL | OC_PDCTL);
}
- if (CHK_SDIO_EXIST(chip)) {
- if (CHECK_PID(chip, 0x5288))
- rtsx_write_cfg_dw(chip, 2, 0xC0, 0xFF00, 0x0100);
- else
- rtsx_write_cfg_dw(chip, 1, 0xC0, 0xFF00, 0x0100);
- }
+ if (CHK_SDIO_EXIST(chip))
+ rtsx_write_cfg_dw(chip, CHECK_PID(chip, 0x5288) ? 2 : 1,
+ 0xC0, 0xFF00, 0x0100);
if (chip->auto_delink_en) {
rtsx_write_register(chip, HOST_SLEEP_STATE, 0x01, 0x01);
@@ -1666,7 +1635,7 @@ int rtsx_pre_handle_interrupt(struct rtsx_chip *chip)
chip->int_reg = rtsx_readl(chip, RTSX_BIPR);
if (((chip->int_reg & int_enable) == 0) ||
- (chip->int_reg == 0xFFFFFFFF))
+ (chip->int_reg == 0xFFFFFFFF))
return STATUS_FAIL;
status = chip->int_reg &= (int_enable | 0x7FFFFF);
@@ -1676,12 +1645,12 @@ int rtsx_pre_handle_interrupt(struct rtsx_chip *chip)
if (status & SD_INT) {
if (status & SD_EXIST) {
- set_bit(SD_NR, &(chip->need_reset));
+ set_bit(SD_NR, &chip->need_reset);
} else {
- set_bit(SD_NR, &(chip->need_release));
+ set_bit(SD_NR, &chip->need_release);
chip->sd_reset_counter = 0;
chip->sd_show_cnt = 0;
- clear_bit(SD_NR, &(chip->need_reset));
+ clear_bit(SD_NR, &chip->need_reset);
}
} else {
/* If multi-luns, it's possible that
@@ -1691,35 +1660,35 @@ int rtsx_pre_handle_interrupt(struct rtsx_chip *chip)
all existed cards should be reset.
*/
if (exit_ss && (status & SD_EXIST))
- set_bit(SD_NR, &(chip->need_reinit));
+ set_bit(SD_NR, &chip->need_reinit);
}
if (!CHECK_PID(chip, 0x5288) || CHECK_BARO_PKG(chip, QFN)) {
if (status & XD_INT) {
if (status & XD_EXIST) {
- set_bit(XD_NR, &(chip->need_reset));
+ set_bit(XD_NR, &chip->need_reset);
} else {
- set_bit(XD_NR, &(chip->need_release));
+ set_bit(XD_NR, &chip->need_release);
chip->xd_reset_counter = 0;
chip->xd_show_cnt = 0;
- clear_bit(XD_NR, &(chip->need_reset));
+ clear_bit(XD_NR, &chip->need_reset);
}
} else {
if (exit_ss && (status & XD_EXIST))
- set_bit(XD_NR, &(chip->need_reinit));
+ set_bit(XD_NR, &chip->need_reinit);
}
}
if (status & MS_INT) {
if (status & MS_EXIST) {
- set_bit(MS_NR, &(chip->need_reset));
+ set_bit(MS_NR, &chip->need_reset);
} else {
- set_bit(MS_NR, &(chip->need_release));
+ set_bit(MS_NR, &chip->need_release);
chip->ms_reset_counter = 0;
chip->ms_show_cnt = 0;
- clear_bit(MS_NR, &(chip->need_reset));
+ clear_bit(MS_NR, &chip->need_reset);
}
} else {
if (exit_ss && (status & MS_EXIST))
- set_bit(MS_NR, &(chip->need_reinit));
+ set_bit(MS_NR, &chip->need_reinit);
}
}
@@ -1727,10 +1696,8 @@ int rtsx_pre_handle_interrupt(struct rtsx_chip *chip)
chip->ocp_int = ocp_int & status;
#endif
- if (chip->sd_io) {
- if (chip->int_reg & DATA_DONE_INT)
- chip->int_reg &= ~(u32)DATA_DONE_INT;
- }
+ if (chip->sd_io && (chip->int_reg & DATA_DONE_INT))
+ chip->int_reg &= ~(u32)DATA_DONE_INT;
return STATUS_SUCCESS;
}
@@ -1774,14 +1741,14 @@ void rtsx_do_before_power_down(struct rtsx_chip *chip, int pm_stat)
if (pm_stat == PM_S1) {
dev_dbg(rtsx_dev(chip), "Host enter S1\n");
rtsx_write_register(chip, HOST_SLEEP_STATE, 0x03,
- HOST_ENTER_S1);
+ HOST_ENTER_S1);
} else if (pm_stat == PM_S3) {
if (chip->s3_pwr_off_delay > 0)
wait_timeout(chip->s3_pwr_off_delay);
dev_dbg(rtsx_dev(chip), "Host enter S3\n");
rtsx_write_register(chip, HOST_SLEEP_STATE, 0x03,
- HOST_ENTER_S3);
+ HOST_ENTER_S3);
}
if (chip->do_delink_before_power_down && chip->auto_delink_en)
@@ -1796,31 +1763,25 @@ void rtsx_do_before_power_down(struct rtsx_chip *chip, int pm_stat)
void rtsx_enable_aspm(struct rtsx_chip *chip)
{
- if (chip->aspm_l0s_l1_en && chip->dynamic_aspm) {
- if (!chip->aspm_enabled) {
- dev_dbg(rtsx_dev(chip), "Try to enable ASPM\n");
- chip->aspm_enabled = 1;
+ if (chip->aspm_l0s_l1_en && chip->dynamic_aspm && !chip->aspm_enabled) {
+ dev_dbg(rtsx_dev(chip), "Try to enable ASPM\n");
+ chip->aspm_enabled = 1;
- if (chip->asic_code && CHECK_PID(chip, 0x5208))
- rtsx_write_phy_register(chip, 0x07, 0);
- if (CHECK_PID(chip, 0x5208)) {
- rtsx_write_register(chip, ASPM_FORCE_CTL, 0xF3,
- 0x30 | chip->aspm_level[0]);
- } else {
- rtsx_write_config_byte(chip, LCTLR,
- chip->aspm_l0s_l1_en);
- }
+ if (chip->asic_code && CHECK_PID(chip, 0x5208))
+ rtsx_write_phy_register(chip, 0x07, 0);
+ if (CHECK_PID(chip, 0x5208)) {
+ rtsx_write_register(chip, ASPM_FORCE_CTL, 0xF3,
+ 0x30 | chip->aspm_level[0]);
+ } else {
+ rtsx_write_config_byte(chip, LCTLR,
+ chip->aspm_l0s_l1_en);
+ }
- if (CHK_SDIO_EXIST(chip)) {
- u16 val = chip->aspm_l0s_l1_en | 0x0100;
+ if (CHK_SDIO_EXIST(chip)) {
+ u16 val = chip->aspm_l0s_l1_en | 0x0100;
- if (CHECK_PID(chip, 0x5288))
- rtsx_write_cfg_dw(chip, 2, 0xC0,
- 0xFFFF, val);
- else
- rtsx_write_cfg_dw(chip, 1, 0xC0,
- 0xFFFF, val);
- }
+ rtsx_write_cfg_dw(chip, CHECK_PID(chip, 0x5288) ? 2 : 1,
+ 0xC0, 0xFFF, val);
}
}
}
@@ -1830,21 +1791,19 @@ void rtsx_disable_aspm(struct rtsx_chip *chip)
if (CHECK_PID(chip, 0x5208))
rtsx_monitor_aspm_config(chip);
- if (chip->aspm_l0s_l1_en && chip->dynamic_aspm) {
- if (chip->aspm_enabled) {
- dev_dbg(rtsx_dev(chip), "Try to disable ASPM\n");
- chip->aspm_enabled = 0;
+ if (chip->aspm_l0s_l1_en && chip->dynamic_aspm && chip->aspm_enabled) {
+ dev_dbg(rtsx_dev(chip), "Try to disable ASPM\n");
+ chip->aspm_enabled = 0;
- if (chip->asic_code && CHECK_PID(chip, 0x5208))
- rtsx_write_phy_register(chip, 0x07, 0x0129);
- if (CHECK_PID(chip, 0x5208))
- rtsx_write_register(chip, ASPM_FORCE_CTL,
- 0xF3, 0x30);
- else
- rtsx_write_config_byte(chip, LCTLR, 0x00);
+ if (chip->asic_code && CHECK_PID(chip, 0x5208))
+ rtsx_write_phy_register(chip, 0x07, 0x0129);
+ if (CHECK_PID(chip, 0x5208))
+ rtsx_write_register(chip, ASPM_FORCE_CTL,
+ 0xF3, 0x30);
+ else
+ rtsx_write_config_byte(chip, LCTLR, 0x00);
- wait_timeout(1);
- }
+ wait_timeout(1);
}
}
@@ -1907,7 +1866,7 @@ int rtsx_write_ppbuf(struct rtsx_chip *chip, u8 *buf, int buf_len)
for (j = 0; j < 256; j++) {
rtsx_add_cmd(chip, WRITE_REG_CMD, reg_addr++, 0xFF,
- *ptr);
+ *ptr);
ptr++;
}
@@ -1921,7 +1880,7 @@ int rtsx_write_ppbuf(struct rtsx_chip *chip, u8 *buf, int buf_len)
for (j = 0; j < buf_len%256; j++) {
rtsx_add_cmd(chip, WRITE_REG_CMD, reg_addr++, 0xFF,
- *ptr);
+ *ptr);
ptr++;
}
diff --git a/drivers/staging/rts5208/rtsx_scsi.c b/drivers/staging/rts5208/rtsx_scsi.c
index bbbf7968a0b..11610826acf 100644
--- a/drivers/staging/rts5208/rtsx_scsi.c
+++ b/drivers/staging/rts5208/rtsx_scsi.c
@@ -584,9 +584,8 @@ static int start_stop_unit(struct scsi_cmnd *srb, struct rtsx_chip *chip)
case MAKE_MEDIUM_READY:
case LOAD_MEDIUM:
- if (check_card_ready(chip, lun)) {
+ if (check_card_ready(chip, lun))
return TRANSPORT_GOOD;
- }
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
TRACE_RET(chip, TRANSPORT_FAILED);
diff --git a/drivers/staging/rts5208/rtsx_transport.c b/drivers/staging/rts5208/rtsx_transport.c
index 0a67dca72df..756a9687c29 100644
--- a/drivers/staging/rts5208/rtsx_transport.c
+++ b/drivers/staging/rts5208/rtsx_transport.c
@@ -539,7 +539,7 @@ static int rtsx_transfer_sglist_adma(struct rtsx_chip *chip, u8 card,
if (i == buf_cnt / (HOST_SG_TBL_BUF_LEN / 8))
sg_cnt = buf_cnt % (HOST_SG_TBL_BUF_LEN / 8);
else
- sg_cnt = (HOST_SG_TBL_BUF_LEN / 8);
+ sg_cnt = HOST_SG_TBL_BUF_LEN / 8;
chip->sgi = 0;
for (j = 0; j < sg_cnt; j++) {
@@ -728,15 +728,13 @@ int rtsx_transfer_data_partial(struct rtsx_chip *chip, u8 card,
if (rtsx_chk_stat(chip, RTSX_STAT_ABORT))
return -EIO;
- if (use_sg) {
+ if (use_sg)
err = rtsx_transfer_sglist_adma_partial(chip, card,
(struct scatterlist *)buf, use_sg,
index, offset, (int)len, dma_dir, timeout);
- } else {
+ else
err = rtsx_transfer_buf(chip, card,
buf, len, dma_dir, timeout);
- }
-
if (err < 0) {
if (RTSX_TST_DELINK(chip)) {
RTSX_CLR_DELINK(chip);
diff --git a/drivers/staging/rts5208/rtsx_transport.h b/drivers/staging/rts5208/rtsx_transport.h
index b4b11237277..899bc2079db 100644
--- a/drivers/staging/rts5208/rtsx_transport.h
+++ b/drivers/staging/rts5208/rtsx_transport.h
@@ -46,7 +46,7 @@ void rtsx_add_cmd(struct rtsx_chip *chip,
void rtsx_send_cmd_no_wait(struct rtsx_chip *chip);
int rtsx_send_cmd(struct rtsx_chip *chip, u8 card, int timeout);
-extern inline u8 *rtsx_get_cmd_data(struct rtsx_chip *chip)
+static inline u8 *rtsx_get_cmd_data(struct rtsx_chip *chip)
{
#ifdef CMD_USING_SG
return (u8 *)(chip->host_sg_tbl_ptr);
diff --git a/drivers/staging/skein/Kconfig b/drivers/staging/skein/Kconfig
index b9172bfcdc1..012a8233376 100644
--- a/drivers/staging/skein/Kconfig
+++ b/drivers/staging/skein/Kconfig
@@ -1,8 +1,8 @@
config CRYPTO_SKEIN
- bool "Skein digest algorithm"
+ tristate "Skein digest algorithm"
depends on (X86 || UML_X86) && 64BIT && CRYPTO
- select CRYPTO_THREEFISH
select CRYPTO_HASH
+ select CRYPTO_ALGAPI
help
Skein secure hash algorithm is one of 5 finalists from the NIST SHA3
competition.
@@ -12,21 +12,5 @@ config CRYPTO_SKEIN
http://www.skein-hash.info/sites/default/files/skein1.3.pdf
- for more information. This module depends on the threefish block
- cipher module.
-
-config CRYPTO_THREEFISH
- bool "Threefish tweakable block cipher"
- depends on (X86 || UML_X86) && 64BIT && CRYPTO
- select CRYPTO_ALGAPI
- help
- Threefish cipher algorithm is the tweakable block cipher underneath
- the Skein family of secure hash algorithms. Skein is one of 5
- finalists from the NIST SHA3 competition.
-
- Skein is optimized for modern, 64bit processors and is highly
- customizable. See:
-
- http://www.skein-hash.info/sites/default/files/skein1.3.pdf
-
- for more information.
+ for more information. This module also contains the threefish block
+ cipher algorithm.
diff --git a/drivers/staging/skein/Makefile b/drivers/staging/skein/Makefile
index a14aaddd829..b7f947fb98f 100644
--- a/drivers/staging/skein/Makefile
+++ b/drivers/staging/skein/Makefile
@@ -1,9 +1,10 @@
#
# Makefile for the skein secure hash algorithm
#
-obj-$(CONFIG_CRYPTO_SKEIN) += skein.o \
- skein_api.o \
- skein_block.o
-
-obj-$(CONFIG_CRYPTO_THREEFISH) += threefish_block.o \
- threefish_api.o
+obj-$(CONFIG_CRYPTO_SKEIN) += skein.o
+skein-y := skein_base.o \
+ skein_api.o \
+ skein_block.o \
+ threefish_block.o \
+ threefish_api.o \
+ skein_generic.o
diff --git a/drivers/staging/skein/skein_api.c b/drivers/staging/skein/skein_api.c
index 6e700eefc00..5bfce076f7c 100644
--- a/drivers/staging/skein/skein_api.c
+++ b/drivers/staging/skein/skein_api.c
@@ -31,7 +31,7 @@ int skein_ctx_prepare(struct skein_ctx *ctx, enum skein_size size)
{
skein_assert_ret(ctx && size, SKEIN_FAIL);
- memset(ctx , 0, sizeof(struct skein_ctx));
+ memset(ctx, 0, sizeof(struct skein_ctx));
ctx->skein_size = size;
return SKEIN_SUCCESS;
diff --git a/drivers/staging/skein/skein_api.h b/drivers/staging/skein/skein_api.h
index e02fa19d945..171b8754954 100644
--- a/drivers/staging/skein/skein_api.h
+++ b/drivers/staging/skein/skein_api.h
@@ -79,7 +79,7 @@ OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/types.h>
-#include "skein.h"
+#include "skein_base.h"
/**
* Which Skein size to use
diff --git a/drivers/staging/skein/skein.c b/drivers/staging/skein/skein_base.c
index 8cc83587b1f..7e700a6b578 100644
--- a/drivers/staging/skein/skein.c
+++ b/drivers/staging/skein/skein_base.c
@@ -8,10 +8,9 @@
**
************************************************************************/
-#define SKEIN_PORT_CODE /* instantiate any code in skein_port.h */
-
#include <linux/string.h> /* get the memcpy/memset functions */
-#include "skein.h" /* get the Skein API definitions */
+#include <linux/export.h>
+#include "skein_base.h" /* get the Skein API definitions */
#include "skein_iv.h" /* get precomputed IVs */
#include "skein_block.h"
@@ -125,8 +124,6 @@ int skein_256_init_ext(struct skein_256_ctx *ctx, size_t hash_bit_len,
/* tree hash config info (or SKEIN_CFG_TREE_INFO_SEQUENTIAL) */
cfg.w[2] = skein_swap64(tree_info);
- skein_show_key(256, &ctx->h, key, key_bytes);
-
/* compute the initial chaining values from config block */
skein_256_process_block(ctx, cfg.b, 1, SKEIN_CFG_STR_LEN);
@@ -233,8 +230,6 @@ int skein_256_final(struct skein_256_ctx *ctx, u8 *hash_val)
/* "output" the ctr mode bytes */
skein_put64_lsb_first(hash_val+i*SKEIN_256_BLOCK_BYTES, ctx->x,
n);
- skein_show_final(256, &ctx->h, n,
- hash_val+i*SKEIN_256_BLOCK_BYTES);
/* restore the counter mode key for next time */
memcpy(ctx->x, x, sizeof(x));
}
@@ -354,8 +349,6 @@ int skein_512_init_ext(struct skein_512_ctx *ctx, size_t hash_bit_len,
/* tree hash config info (or SKEIN_CFG_TREE_INFO_SEQUENTIAL) */
cfg.w[2] = skein_swap64(tree_info);
- skein_show_key(512, &ctx->h, key, key_bytes);
-
/* compute the initial chaining values from config block */
skein_512_process_block(ctx, cfg.b, 1, SKEIN_CFG_STR_LEN);
@@ -462,8 +455,6 @@ int skein_512_final(struct skein_512_ctx *ctx, u8 *hash_val)
/* "output" the ctr mode bytes */
skein_put64_lsb_first(hash_val+i*SKEIN_512_BLOCK_BYTES, ctx->x,
n);
- skein_show_final(512, &ctx->h, n,
- hash_val+i*SKEIN_512_BLOCK_BYTES);
/* restore the counter mode key for next time */
memcpy(ctx->x, x, sizeof(x));
}
@@ -578,8 +569,6 @@ int skein_1024_init_ext(struct skein_1024_ctx *ctx, size_t hash_bit_len,
/* tree hash config info (or SKEIN_CFG_TREE_INFO_SEQUENTIAL) */
cfg.w[2] = skein_swap64(tree_info);
- skein_show_key(1024, &ctx->h, key, key_bytes);
-
/* compute the initial chaining values from config block */
skein_1024_process_block(ctx, cfg.b, 1, SKEIN_CFG_STR_LEN);
@@ -686,8 +675,6 @@ int skein_1024_final(struct skein_1024_ctx *ctx, u8 *hash_val)
/* "output" the ctr mode bytes */
skein_put64_lsb_first(hash_val+i*SKEIN_1024_BLOCK_BYTES, ctx->x,
n);
- skein_show_final(1024, &ctx->h, n,
- hash_val+i*SKEIN_1024_BLOCK_BYTES);
/* restore the counter mode key for next time */
memcpy(ctx->x, x, sizeof(x));
}
@@ -795,8 +782,6 @@ int skein_256_output(struct skein_256_ctx *ctx, u8 *hash_val)
/* "output" the ctr mode bytes */
skein_put64_lsb_first(hash_val+i*SKEIN_256_BLOCK_BYTES, ctx->x,
n);
- skein_show_final(256, &ctx->h, n,
- hash_val+i*SKEIN_256_BLOCK_BYTES);
/* restore the counter mode key for next time */
memcpy(ctx->x, x, sizeof(x));
}
@@ -834,8 +819,6 @@ int skein_512_output(struct skein_512_ctx *ctx, u8 *hash_val)
/* "output" the ctr mode bytes */
skein_put64_lsb_first(hash_val+i*SKEIN_512_BLOCK_BYTES, ctx->x,
n);
- skein_show_final(256, &ctx->h, n,
- hash_val+i*SKEIN_512_BLOCK_BYTES);
/* restore the counter mode key for next time */
memcpy(ctx->x, x, sizeof(x));
}
@@ -873,8 +856,6 @@ int skein_1024_output(struct skein_1024_ctx *ctx, u8 *hash_val)
/* "output" the ctr mode bytes */
skein_put64_lsb_first(hash_val+i*SKEIN_1024_BLOCK_BYTES, ctx->x,
n);
- skein_show_final(256, &ctx->h, n,
- hash_val+i*SKEIN_1024_BLOCK_BYTES);
/* restore the counter mode key for next time */
memcpy(ctx->x, x, sizeof(x));
}
diff --git a/drivers/staging/skein/skein.h b/drivers/staging/skein/skein_base.h
index e6669f196e5..3c7f8ad3627 100644
--- a/drivers/staging/skein/skein.h
+++ b/drivers/staging/skein/skein_base.h
@@ -15,10 +15,6 @@
**
** The "default" note explains what happens when the switch is not defined.
**
-** SKEIN_DEBUG -- make callouts from inside Skein code
-** to examine/display intermediate values.
-** [default: no callouts (no overhead)]
-**
** SKEIN_ERR_CHECK -- how error checking is handled inside Skein
** code. If not defined, most error checking
** is disabled (for performance). Otherwise,
@@ -28,9 +24,10 @@
**
***************************************************************************/
-#ifndef rotl_64
-#define rotl_64(x, N) (((x) << (N)) | ((x) >> (64-(N))))
-#endif
+/*Skein digest sizes for crypto api*/
+#define SKEIN256_DIGEST_BIT_SIZE 256
+#define SKEIN512_DIGEST_BIT_SIZE 512
+#define SKEIN1024_DIGEST_BIT_SIZE 1024
/* below two prototype assume we are handed aligned data */
#define skein_put64_lsb_first(dst08, src64, b_cnt) memcpy(dst08, src64, b_cnt)
@@ -44,12 +41,12 @@ enum {
SKEIN_BAD_HASHLEN = 2
};
-#define SKEIN_MODIFIER_WORDS (2) /* number of modifier (tweak) words */
+#define SKEIN_MODIFIER_WORDS 2 /* number of modifier (tweak) words */
-#define SKEIN_256_STATE_WORDS (4)
-#define SKEIN_512_STATE_WORDS (8)
-#define SKEIN_1024_STATE_WORDS (16)
-#define SKEIN_MAX_STATE_WORDS (16)
+#define SKEIN_256_STATE_WORDS 4
+#define SKEIN_512_STATE_WORDS 8
+#define SKEIN_1024_STATE_WORDS 16
+#define SKEIN_MAX_STATE_WORDS 16
#define SKEIN_256_STATE_BYTES (8*SKEIN_256_STATE_WORDS)
#define SKEIN_512_STATE_BYTES (8*SKEIN_512_STATE_WORDS)
@@ -87,6 +84,11 @@ struct skein_1024_ctx { /* 1024-bit Skein hash context structure */
u8 b[SKEIN_1024_BLOCK_BYTES]; /* partial block buf (8-byte aligned) */
};
+static inline u64 rotl_64(u64 x, u8 N)
+{
+ return (x << N) | (x >> (64 - N));
+}
+
/* Skein APIs for (incremental) "straight hashing" */
int skein_256_init(struct skein_256_ctx *ctx, size_t hash_bit_len);
int skein_512_init(struct skein_512_ctx *ctx, size_t hash_bit_len);
@@ -273,19 +275,6 @@ int skein_1024_output(struct skein_1024_ctx *ctx, u8 *hash_val);
(hdr).tweak[1] |= SKEIN_T1_TREE_LEVEL(height); \
}
-/*****************************************************************
-** "Internal" Skein definitions for debugging and error checking
-******************************************************************/
-#ifdef SKEIN_DEBUG /* examine/display intermediate values? */
-#include "skein_debug.h"
-#else /* default is no callouts */
-#define skein_show_block(bits, ctx, x, blk_ptr, w_ptr, ks_event_ptr, ks_odd_ptr)
-#define skein_show_round(bits, ctx, r, x)
-#define skein_show_r_ptr(bits, ctx, r, x_ptr)
-#define skein_show_final(bits, ctx, cnt, out_ptr)
-#define skein_show_key(bits, ctx, key, key_bytes)
-#endif
-
/* ignore all asserts, for performance */
#define skein_assert_ret(x, ret_code)
#define skein_assert(x)
diff --git a/drivers/staging/skein/skein_block.c b/drivers/staging/skein/skein_block.c
index 616364faf92..66261ab25c8 100644
--- a/drivers/staging/skein/skein_block.c
+++ b/drivers/staging/skein/skein_block.c
@@ -15,7 +15,7 @@
************************************************************************/
#include <linux/string.h>
-#include "skein.h"
+#include "skein_base.h"
#include "skein_block.h"
#ifndef SKEIN_USE_ASM
@@ -26,32 +26,27 @@
#define SKEIN_LOOP 001 /* default: unroll 256 and 512, but not 1024 */
#endif
-#define BLK_BITS (WCNT*64) /* some useful definitions for code here */
+#define BLK_BITS (WCNT * 64) /* some useful definitions for code here */
#define KW_TWK_BASE (0)
#define KW_KEY_BASE (3)
#define ks (kw + KW_KEY_BASE)
#define ts (kw + KW_TWK_BASE)
#ifdef SKEIN_DEBUG
-#define debug_save_tweak(ctx) { \
- ctx->h.tweak[0] = ts[0]; ctx->h.tweak[1] = ts[1]; }
+#define debug_save_tweak(ctx) \
+{ \
+ ctx->h.tweak[0] = ts[0]; \
+ ctx->h.tweak[1] = ts[1]; \
+}
#else
#define debug_save_tweak(ctx)
#endif
-/***************************** SKEIN_256 ******************************/
#if !(SKEIN_USE_ASM & 256)
-void skein_256_process_block(struct skein_256_ctx *ctx, const u8 *blk_ptr,
- size_t blk_cnt, size_t byte_cnt_add)
- { /* do it in C */
- enum {
- WCNT = SKEIN_256_STATE_WORDS
- };
#undef RCNT
-#define RCNT (SKEIN_256_ROUNDS_TOTAL/8)
-
+#define RCNT (SKEIN_256_ROUNDS_TOTAL / 8)
#ifdef SKEIN_LOOP /* configure how much to unroll the loop */
-#define SKEIN_UNROLL_256 (((SKEIN_LOOP)/100)%10)
+#define SKEIN_UNROLL_256 (((SKEIN_LOOP) / 100) % 10)
#else
#define SKEIN_UNROLL_256 (0)
#endif
@@ -60,17 +55,329 @@ void skein_256_process_block(struct skein_256_ctx *ctx, const u8 *blk_ptr,
#if (RCNT % SKEIN_UNROLL_256)
#error "Invalid SKEIN_UNROLL_256" /* sanity check on unroll count */
#endif
- size_t r;
- u64 kw[WCNT+4+RCNT*2]; /* key schedule: chaining vars + tweak + "rot"*/
+#endif
+#define ROUND256(p0, p1, p2, p3, ROT, r_num) \
+do { \
+ X##p0 += X##p1; \
+ X##p1 = rotl_64(X##p1, ROT##_0); \
+ X##p1 ^= X##p0; \
+ X##p2 += X##p3; \
+ X##p3 = rotl_64(X##p3, ROT##_1); \
+ X##p3 ^= X##p2; \
+} while (0)
+
+#if SKEIN_UNROLL_256 == 0
+#define R256(p0, p1, p2, p3, ROT, r_num) /* fully unrolled */ \
+do { \
+ ROUND256(p0, p1, p2, p3, ROT, r_num); \
+} while (0)
+
+#define I256(R) \
+do { \
+ /* inject the key schedule value */ \
+ X0 += ks[((R) + 1) % 5]; \
+ X1 += ks[((R) + 2) % 5] + ts[((R) + 1) % 3]; \
+ X2 += ks[((R) + 3) % 5] + ts[((R) + 2) % 3]; \
+ X3 += ks[((R) + 4) % 5] + (R) + 1; \
+} while (0)
#else
- u64 kw[WCNT+4]; /* key schedule words : chaining vars + tweak */
+/* looping version */
+#define R256(p0, p1, p2, p3, ROT, r_num) \
+do { \
+ ROUND256(p0, p1, p2, p3, ROT, r_num); \
+} while (0)
+
+#define I256(R) \
+do { \
+ /* inject the key schedule value */ \
+ X0 += ks[r + (R) + 0]; \
+ X1 += ks[r + (R) + 1] + ts[r + (R) + 0]; \
+ X2 += ks[r + (R) + 2] + ts[r + (R) + 1]; \
+ X3 += ks[r + (R) + 3] + r + (R); \
+ /* rotate key schedule */ \
+ ks[r + (R) + 4] = ks[r + (R) - 1]; \
+ ts[r + (R) + 2] = ts[r + (R) - 1]; \
+} while (0)
+#endif
+#define R256_8_ROUNDS(R) \
+do { \
+ R256(0, 1, 2, 3, R_256_0, 8 * (R) + 1); \
+ R256(0, 3, 2, 1, R_256_1, 8 * (R) + 2); \
+ R256(0, 1, 2, 3, R_256_2, 8 * (R) + 3); \
+ R256(0, 3, 2, 1, R_256_3, 8 * (R) + 4); \
+ I256(2 * (R)); \
+ R256(0, 1, 2, 3, R_256_4, 8 * (R) + 5); \
+ R256(0, 3, 2, 1, R_256_5, 8 * (R) + 6); \
+ R256(0, 1, 2, 3, R_256_6, 8 * (R) + 7); \
+ R256(0, 3, 2, 1, R_256_7, 8 * (R) + 8); \
+ I256(2 * (R) + 1); \
+} while (0)
+
+#define R256_UNROLL_R(NN) \
+ ((SKEIN_UNROLL_256 == 0 && \
+ SKEIN_256_ROUNDS_TOTAL / 8 > (NN)) || \
+ (SKEIN_UNROLL_256 > (NN)))
+
+#if (SKEIN_UNROLL_256 > 14)
+#error "need more unrolling in skein_256_process_block"
+#endif
+#endif
+
+#if !(SKEIN_USE_ASM & 512)
+#undef RCNT
+#define RCNT (SKEIN_512_ROUNDS_TOTAL/8)
+
+#ifdef SKEIN_LOOP /* configure how much to unroll the loop */
+#define SKEIN_UNROLL_512 (((SKEIN_LOOP)/10)%10)
+#else
+#define SKEIN_UNROLL_512 (0)
+#endif
+
+#if SKEIN_UNROLL_512
+#if (RCNT % SKEIN_UNROLL_512)
+#error "Invalid SKEIN_UNROLL_512" /* sanity check on unroll count */
+#endif
+#endif
+#define ROUND512(p0, p1, p2, p3, p4, p5, p6, p7, ROT, r_num) \
+do { \
+ X##p0 += X##p1; \
+ X##p1 = rotl_64(X##p1, ROT##_0); \
+ X##p1 ^= X##p0; \
+ X##p2 += X##p3; \
+ X##p3 = rotl_64(X##p3, ROT##_1); \
+ X##p3 ^= X##p2; \
+ X##p4 += X##p5; \
+ X##p5 = rotl_64(X##p5, ROT##_2); \
+ X##p5 ^= X##p4; \
+ X##p6 += X##p7; X##p7 = rotl_64(X##p7, ROT##_3); \
+ X##p7 ^= X##p6; \
+} while (0)
+
+#if SKEIN_UNROLL_512 == 0
+#define R512(p0, p1, p2, p3, p4, p5, p6, p7, ROT, r_num) /* unrolled */ \
+do { \
+ ROUND512(p0, p1, p2, p3, p4, p5, p6, p7, ROT, r_num); \
+} while (0)
+
+#define I512(R) \
+do { \
+ /* inject the key schedule value */ \
+ X0 += ks[((R) + 1) % 9]; \
+ X1 += ks[((R) + 2) % 9]; \
+ X2 += ks[((R) + 3) % 9]; \
+ X3 += ks[((R) + 4) % 9]; \
+ X4 += ks[((R) + 5) % 9]; \
+ X5 += ks[((R) + 6) % 9] + ts[((R) + 1) % 3]; \
+ X6 += ks[((R) + 7) % 9] + ts[((R) + 2) % 3]; \
+ X7 += ks[((R) + 8) % 9] + (R) + 1; \
+} while (0)
+
+#else /* looping version */
+#define R512(p0, p1, p2, p3, p4, p5, p6, p7, ROT, r_num) \
+do { \
+ ROUND512(p0, p1, p2, p3, p4, p5, p6, p7, ROT, r_num); \
+} while (0)
+
+#define I512(R) \
+do { \
+ /* inject the key schedule value */ \
+ X0 += ks[r + (R) + 0]; \
+ X1 += ks[r + (R) + 1]; \
+ X2 += ks[r + (R) + 2]; \
+ X3 += ks[r + (R) + 3]; \
+ X4 += ks[r + (R) + 4]; \
+ X5 += ks[r + (R) + 5] + ts[r + (R) + 0]; \
+ X6 += ks[r + (R) + 6] + ts[r + (R) + 1]; \
+ X7 += ks[r + (R) + 7] + r + (R); \
+ /* rotate key schedule */ \
+ ks[r + (R) + 8] = ks[r + (R) - 1]; \
+ ts[r + (R) + 2] = ts[r + (R) - 1]; \
+} while (0)
+#endif /* end of looped code definitions */
+#define R512_8_ROUNDS(R) /* do 8 full rounds */ \
+do { \
+ R512(0, 1, 2, 3, 4, 5, 6, 7, R_512_0, 8 * (R) + 1); \
+ R512(2, 1, 4, 7, 6, 5, 0, 3, R_512_1, 8 * (R) + 2); \
+ R512(4, 1, 6, 3, 0, 5, 2, 7, R_512_2, 8 * (R) + 3); \
+ R512(6, 1, 0, 7, 2, 5, 4, 3, R_512_3, 8 * (R) + 4); \
+ I512(2 * (R)); \
+ R512(0, 1, 2, 3, 4, 5, 6, 7, R_512_4, 8 * (R) + 5); \
+ R512(2, 1, 4, 7, 6, 5, 0, 3, R_512_5, 8 * (R) + 6); \
+ R512(4, 1, 6, 3, 0, 5, 2, 7, R_512_6, 8 * (R) + 7); \
+ R512(6, 1, 0, 7, 2, 5, 4, 3, R_512_7, 8 * (R) + 8); \
+ I512(2 * (R) + 1); /* and key injection */ \
+} while (0)
+#define R512_UNROLL_R(NN) \
+ ((SKEIN_UNROLL_512 == 0 && \
+ SKEIN_512_ROUNDS_TOTAL/8 > (NN)) || \
+ (SKEIN_UNROLL_512 > (NN)))
+
+#if (SKEIN_UNROLL_512 > 14)
+#error "need more unrolling in skein_512_process_block"
+#endif
+#endif
+
+#if !(SKEIN_USE_ASM & 1024)
+#undef RCNT
+#define RCNT (SKEIN_1024_ROUNDS_TOTAL/8)
+#ifdef SKEIN_LOOP /* configure how much to unroll the loop */
+#define SKEIN_UNROLL_1024 ((SKEIN_LOOP)%10)
+#else
+#define SKEIN_UNROLL_1024 (0)
+#endif
+
+#if (SKEIN_UNROLL_1024 != 0)
+#if (RCNT % SKEIN_UNROLL_1024)
+#error "Invalid SKEIN_UNROLL_1024" /* sanity check on unroll count */
+#endif
+#endif
+#define ROUND1024(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, pA, pB, pC, pD, pE, \
+ pF, ROT, r_num) \
+do { \
+ X##p0 += X##p1; \
+ X##p1 = rotl_64(X##p1, ROT##_0); \
+ X##p1 ^= X##p0; \
+ X##p2 += X##p3; \
+ X##p3 = rotl_64(X##p3, ROT##_1); \
+ X##p3 ^= X##p2; \
+ X##p4 += X##p5; \
+ X##p5 = rotl_64(X##p5, ROT##_2); \
+ X##p5 ^= X##p4; \
+ X##p6 += X##p7; \
+ X##p7 = rotl_64(X##p7, ROT##_3); \
+ X##p7 ^= X##p6; \
+ X##p8 += X##p9; \
+ X##p9 = rotl_64(X##p9, ROT##_4); \
+ X##p9 ^= X##p8; \
+ X##pA += X##pB; \
+ X##pB = rotl_64(X##pB, ROT##_5); \
+ X##pB ^= X##pA; \
+ X##pC += X##pD; \
+ X##pD = rotl_64(X##pD, ROT##_6); \
+ X##pD ^= X##pC; \
+ X##pE += X##pF; \
+ X##pF = rotl_64(X##pF, ROT##_7); \
+ X##pF ^= X##pE; \
+} while (0)
+
+#if SKEIN_UNROLL_1024 == 0
+#define R1024(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, pA, pB, pC, pD, pE, pF, \
+ ROT, rn) \
+do { \
+ ROUND1024(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, pA, pB, pC, pD, pE, \
+ pF, ROT, rn); \
+} while (0)
+
+#define I1024(R) \
+do { \
+ /* inject the key schedule value */ \
+ X00 += ks[((R) + 1) % 17]; \
+ X01 += ks[((R) + 2) % 17]; \
+ X02 += ks[((R) + 3) % 17]; \
+ X03 += ks[((R) + 4) % 17]; \
+ X04 += ks[((R) + 5) % 17]; \
+ X05 += ks[((R) + 6) % 17]; \
+ X06 += ks[((R) + 7) % 17]; \
+ X07 += ks[((R) + 8) % 17]; \
+ X08 += ks[((R) + 9) % 17]; \
+ X09 += ks[((R) + 10) % 17]; \
+ X10 += ks[((R) + 11) % 17]; \
+ X11 += ks[((R) + 12) % 17]; \
+ X12 += ks[((R) + 13) % 17]; \
+ X13 += ks[((R) + 14) % 17] + ts[((R) + 1) % 3]; \
+ X14 += ks[((R) + 15) % 17] + ts[((R) + 2) % 3]; \
+ X15 += ks[((R) + 16) % 17] + (R) + 1; \
+} while (0)
+#else /* looping version */
+#define R1024(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, pA, pB, pC, pD, pE, pF, \
+ ROT, rn) \
+do { \
+ ROUND1024(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, pA, pB, pC, pD, pE, \
+ pF, ROT, rn); \
+} while (0)
+
+#define I1024(R) \
+do { \
+ /* inject the key schedule value */ \
+ X00 += ks[r + (R) + 0]; \
+ X01 += ks[r + (R) + 1]; \
+ X02 += ks[r + (R) + 2]; \
+ X03 += ks[r + (R) + 3]; \
+ X04 += ks[r + (R) + 4]; \
+ X05 += ks[r + (R) + 5]; \
+ X06 += ks[r + (R) + 6]; \
+ X07 += ks[r + (R) + 7]; \
+ X08 += ks[r + (R) + 8]; \
+ X09 += ks[r + (R) + 9]; \
+ X10 += ks[r + (R) + 10]; \
+ X11 += ks[r + (R) + 11]; \
+ X12 += ks[r + (R) + 12]; \
+ X13 += ks[r + (R) + 13] + ts[r + (R) + 0]; \
+ X14 += ks[r + (R) + 14] + ts[r + (R) + 1]; \
+ X15 += ks[r + (R) + 15] + r + (R); \
+ /* rotate key schedule */ \
+ ks[r + (R) + 16] = ks[r + (R) - 1]; \
+ ts[r + (R) + 2] = ts[r + (R) - 1]; \
+} while (0)
+
+#endif
+#define R1024_8_ROUNDS(R) \
+do { \
+ R1024(00, 01, 02, 03, 04, 05, 06, 07, 08, 09, 10, 11, 12, 13, 14, 15, \
+ R1024_0, 8*(R) + 1); \
+ R1024(00, 09, 02, 13, 06, 11, 04, 15, 10, 07, 12, 03, 14, 05, 08, 01, \
+ R1024_1, 8*(R) + 2); \
+ R1024(00, 07, 02, 05, 04, 03, 06, 01, 12, 15, 14, 13, 08, 11, 10, 09, \
+ R1024_2, 8*(R) + 3); \
+ R1024(00, 15, 02, 11, 06, 13, 04, 09, 14, 01, 08, 05, 10, 03, 12, 07, \
+ R1024_3, 8*(R) + 4); \
+ I1024(2*(R)); \
+ R1024(00, 01, 02, 03, 04, 05, 06, 07, 08, 09, 10, 11, 12, 13, 14, 15, \
+ R1024_4, 8*(R) + 5); \
+ R1024(00, 09, 02, 13, 06, 11, 04, 15, 10, 07, 12, 03, 14, 05, 08, 01, \
+ R1024_5, 8*(R) + 6); \
+ R1024(00, 07, 02, 05, 04, 03, 06, 01, 12, 15, 14, 13, 08, 11, 10, 09, \
+ R1024_6, 8*(R) + 7); \
+ R1024(00, 15, 02, 11, 06, 13, 04, 09, 14, 01, 08, 05, 10, 03, 12, 07, \
+ R1024_7, 8*(R) + 8); \
+ I1024(2*(R)+1); \
+} while (0)
+
+#define R1024_UNROLL_R(NN) \
+ ((SKEIN_UNROLL_1024 == 0 && \
+ SKEIN_1024_ROUNDS_TOTAL/8 > (NN)) || \
+ (SKEIN_UNROLL_1024 > (NN)))
+
+#if (SKEIN_UNROLL_1024 > 14)
+#error "need more unrolling in Skein_1024_Process_Block"
+#endif
+#endif
+
+/***************************** SKEIN_256 ******************************/
+#if !(SKEIN_USE_ASM & 256)
+void skein_256_process_block(struct skein_256_ctx *ctx, const u8 *blk_ptr,
+ size_t blk_cnt, size_t byte_cnt_add)
+{ /* do it in C */
+ enum {
+ WCNT = SKEIN_256_STATE_WORDS
+ };
+ size_t r;
+#if SKEIN_UNROLL_256
+ /* key schedule: chaining vars + tweak + "rot"*/
+ u64 kw[WCNT+4+RCNT*2];
+#else
+ /* key schedule words : chaining vars + tweak */
+ u64 kw[WCNT+4];
#endif
u64 X0, X1, X2, X3; /* local copy of context vars, for speed */
u64 w[WCNT]; /* local copy of input block */
#ifdef SKEIN_DEBUG
const u64 *X_ptr[4]; /* use for debugging (help cc put Xn in regs) */
- X_ptr[0] = &X0; X_ptr[1] = &X1; X_ptr[2] = &X2; X_ptr[3] = &X3;
+ X_ptr[0] = &X0;
+ X_ptr[1] = &X1;
+ X_ptr[2] = &X2;
+ X_ptr[3] = &X3;
#endif
skein_assert(blk_cnt != 0); /* never call with blk_cnt == 0! */
ts[0] = ctx->h.tweak[0];
@@ -94,132 +401,62 @@ void skein_256_process_block(struct skein_256_ctx *ctx, const u8 *blk_ptr,
/* get input block in little-endian format */
skein_get64_lsb_first(w, blk_ptr, WCNT);
debug_save_tweak(ctx);
- skein_show_block(BLK_BITS, &ctx->h, ctx->x, blk_ptr, w, ks, ts);
- X0 = w[0] + ks[0]; /* do the first full key injection */
+ /* do the first full key injection */
+ X0 = w[0] + ks[0];
X1 = w[1] + ks[1] + ts[0];
X2 = w[2] + ks[2] + ts[1];
X3 = w[3] + ks[3];
- /* show starting state values */
- skein_show_r_ptr(BLK_BITS, &ctx->h, SKEIN_RND_KEY_INITIAL,
- x_ptr);
-
blk_ptr += SKEIN_256_BLOCK_BYTES;
/* run the rounds */
-
-#define ROUND256(p0, p1, p2, p3, ROT, r_num) \
-do { \
- X##p0 += X##p1; X##p1 = rotl_64(X##p1, ROT##_0); X##p1 ^= X##p0; \
- X##p2 += X##p3; X##p3 = rotl_64(X##p3, ROT##_1); X##p3 ^= X##p2; \
-} while (0)
-
-#if SKEIN_UNROLL_256 == 0
-#define R256(p0, p1, p2, p3, ROT, r_num) /* fully unrolled */ \
-do { \
- ROUND256(p0, p1, p2, p3, ROT, r_num); \
- skein_show_r_ptr(BLK_BITS, &ctx->h, r_num, X_ptr); \
-} while (0)
-
-#define I256(R) \
-do { \
- /* inject the key schedule value */ \
- X0 += ks[((R)+1) % 5]; \
- X1 += ks[((R)+2) % 5] + ts[((R)+1) % 3]; \
- X2 += ks[((R)+3) % 5] + ts[((R)+2) % 3]; \
- X3 += ks[((R)+4) % 5] + (R)+1; \
- skein_show_r_ptr(BLK_BITS, &ctx->h, SKEIN_RND_KEY_INJECT, X_ptr); \
-} while (0)
-#else /* looping version */
-#define R256(p0, p1, p2, p3, ROT, r_num) \
-do { \
- ROUND256(p0, p1, p2, p3, ROT, r_num); \
- skein_show_r_ptr(BLK_BITS, &ctx->h, 4 * (r - 1) + r_num, X_ptr); \
-} while (0)
-
-#define I256(R) \
-do { \
- /* inject the key schedule value */ \
- X0 += ks[r+(R)+0]; \
- X1 += ks[r+(R)+1] + ts[r+(R)+0]; \
- X2 += ks[r+(R)+2] + ts[r+(R)+1]; \
- X3 += ks[r+(R)+3] + r+(R); \
- /* rotate key schedule */ \
- ks[r + (R) + 4] = ks[r + (R) - 1]; \
- ts[r + (R) + 2] = ts[r + (R) - 1]; \
- skein_show_r_ptr(BLK_BITS, &ctx->h, SKEIN_RND_KEY_INJECT, X_ptr); \
-} while (0)
-
- for (r = 1; r < 2 * RCNT; r += 2 * SKEIN_UNROLL_256)
+ for (r = 1;
+ r < (SKEIN_UNROLL_256 ? 2 * RCNT : 2);
+ r += (SKEIN_UNROLL_256 ? 2 * SKEIN_UNROLL_256 : 1)) {
+ R256_8_ROUNDS(0);
+#if R256_UNROLL_R(1)
+ R256_8_ROUNDS(1);
+#endif
+#if R256_UNROLL_R(2)
+ R256_8_ROUNDS(2);
+#endif
+#if R256_UNROLL_R(3)
+ R256_8_ROUNDS(3);
+#endif
+#if R256_UNROLL_R(4)
+ R256_8_ROUNDS(4);
+#endif
+#if R256_UNROLL_R(5)
+ R256_8_ROUNDS(5);
+#endif
+#if R256_UNROLL_R(6)
+ R256_8_ROUNDS(6);
+#endif
+#if R256_UNROLL_R(7)
+ R256_8_ROUNDS(7);
+#endif
+#if R256_UNROLL_R(8)
+ R256_8_ROUNDS(8);
+#endif
+#if R256_UNROLL_R(9)
+ R256_8_ROUNDS(9);
+#endif
+#if R256_UNROLL_R(10)
+ R256_8_ROUNDS(10);
+#endif
+#if R256_UNROLL_R(11)
+ R256_8_ROUNDS(11);
+#endif
+#if R256_UNROLL_R(12)
+ R256_8_ROUNDS(12);
+#endif
+#if R256_UNROLL_R(13)
+ R256_8_ROUNDS(13);
+#endif
+#if R256_UNROLL_R(14)
+ R256_8_ROUNDS(14);
#endif
- {
-#define R256_8_ROUNDS(R) \
-do { \
- R256(0, 1, 2, 3, R_256_0, 8 * (R) + 1); \
- R256(0, 3, 2, 1, R_256_1, 8 * (R) + 2); \
- R256(0, 1, 2, 3, R_256_2, 8 * (R) + 3); \
- R256(0, 3, 2, 1, R_256_3, 8 * (R) + 4); \
- I256(2 * (R)); \
- R256(0, 1, 2, 3, R_256_4, 8 * (R) + 5); \
- R256(0, 3, 2, 1, R_256_5, 8 * (R) + 6); \
- R256(0, 1, 2, 3, R_256_6, 8 * (R) + 7); \
- R256(0, 3, 2, 1, R_256_7, 8 * (R) + 8); \
- I256(2 * (R) + 1); \
-} while (0)
-
- R256_8_ROUNDS(0);
-
-#define R256_UNROLL_R(NN) \
- ((SKEIN_UNROLL_256 == 0 && \
- SKEIN_256_ROUNDS_TOTAL/8 > (NN)) || \
- (SKEIN_UNROLL_256 > (NN)))
-
- #if R256_UNROLL_R(1)
- R256_8_ROUNDS(1);
- #endif
- #if R256_UNROLL_R(2)
- R256_8_ROUNDS(2);
- #endif
- #if R256_UNROLL_R(3)
- R256_8_ROUNDS(3);
- #endif
- #if R256_UNROLL_R(4)
- R256_8_ROUNDS(4);
- #endif
- #if R256_UNROLL_R(5)
- R256_8_ROUNDS(5);
- #endif
- #if R256_UNROLL_R(6)
- R256_8_ROUNDS(6);
- #endif
- #if R256_UNROLL_R(7)
- R256_8_ROUNDS(7);
- #endif
- #if R256_UNROLL_R(8)
- R256_8_ROUNDS(8);
- #endif
- #if R256_UNROLL_R(9)
- R256_8_ROUNDS(9);
- #endif
- #if R256_UNROLL_R(10)
- R256_8_ROUNDS(10);
- #endif
- #if R256_UNROLL_R(11)
- R256_8_ROUNDS(11);
- #endif
- #if R256_UNROLL_R(12)
- R256_8_ROUNDS(12);
- #endif
- #if R256_UNROLL_R(13)
- R256_8_ROUNDS(13);
- #endif
- #if R256_UNROLL_R(14)
- R256_8_ROUNDS(14);
- #endif
- #if (SKEIN_UNROLL_256 > 14)
-#error "need more unrolling in skein_256_process_block"
- #endif
}
/* do the final "feedforward" xor, update context chaining */
ctx->x[0] = X0 ^ w[0];
@@ -227,8 +464,6 @@ do { \
ctx->x[2] = X2 ^ w[2];
ctx->x[3] = X3 ^ w[3];
- skein_show_round(BLK_BITS, &ctx->h, SKEIN_RND_FEED_FWD, ctx->x);
-
ts[1] &= ~SKEIN_T1_FLAG_FIRST;
} while (--blk_cnt);
ctx->h.tweak[0] = ts[0];
@@ -256,20 +491,8 @@ void skein_512_process_block(struct skein_512_ctx *ctx, const u8 *blk_ptr,
enum {
WCNT = SKEIN_512_STATE_WORDS
};
-#undef RCNT
-#define RCNT (SKEIN_512_ROUNDS_TOTAL/8)
-
-#ifdef SKEIN_LOOP /* configure how much to unroll the loop */
-#define SKEIN_UNROLL_512 (((SKEIN_LOOP)/10)%10)
-#else
-#define SKEIN_UNROLL_512 (0)
-#endif
-
-#if SKEIN_UNROLL_512
-#if (RCNT % SKEIN_UNROLL_512)
-#error "Invalid SKEIN_UNROLL_512" /* sanity check on unroll count */
-#endif
size_t r;
+#if SKEIN_UNROLL_512
u64 kw[WCNT+4+RCNT*2]; /* key sched: chaining vars + tweak + "rot"*/
#else
u64 kw[WCNT+4]; /* key schedule words : chaining vars + tweak */
@@ -279,8 +502,14 @@ void skein_512_process_block(struct skein_512_ctx *ctx, const u8 *blk_ptr,
#ifdef SKEIN_DEBUG
const u64 *X_ptr[8]; /* use for debugging (help cc put Xn in regs) */
- X_ptr[0] = &X0; X_ptr[1] = &X1; X_ptr[2] = &X2; X_ptr[3] = &X3;
- X_ptr[4] = &X4; X_ptr[5] = &X5; X_ptr[6] = &X6; X_ptr[7] = &X7;
+ X_ptr[0] = &X0;
+ X_ptr[1] = &X1;
+ X_ptr[2] = &X2;
+ X_ptr[3] = &X3;
+ X_ptr[4] = &X4;
+ X_ptr[5] = &X5;
+ X_ptr[6] = &X6;
+ X_ptr[7] = &X7;
#endif
skein_assert(blk_cnt != 0); /* never call with blk_cnt == 0! */
@@ -310,143 +539,68 @@ void skein_512_process_block(struct skein_512_ctx *ctx, const u8 *blk_ptr,
/* get input block in little-endian format */
skein_get64_lsb_first(w, blk_ptr, WCNT);
debug_save_tweak(ctx);
- skein_show_block(BLK_BITS, &ctx->h, ctx->x, blk_ptr, w, ks, ts);
- X0 = w[0] + ks[0]; /* do the first full key injection */
- X1 = w[1] + ks[1];
- X2 = w[2] + ks[2];
- X3 = w[3] + ks[3];
- X4 = w[4] + ks[4];
- X5 = w[5] + ks[5] + ts[0];
- X6 = w[6] + ks[6] + ts[1];
- X7 = w[7] + ks[7];
+ /* do the first full key injection */
+ X0 = w[0] + ks[0];
+ X1 = w[1] + ks[1];
+ X2 = w[2] + ks[2];
+ X3 = w[3] + ks[3];
+ X4 = w[4] + ks[4];
+ X5 = w[5] + ks[5] + ts[0];
+ X6 = w[6] + ks[6] + ts[1];
+ X7 = w[7] + ks[7];
blk_ptr += SKEIN_512_BLOCK_BYTES;
- skein_show_r_ptr(BLK_BITS, &ctx->h, SKEIN_RND_KEY_INITIAL,
- X_ptr);
/* run the rounds */
-#define ROUND512(p0, p1, p2, p3, p4, p5, p6, p7, ROT, r_num) \
-do { \
- X##p0 += X##p1; X##p1 = rotl_64(X##p1, ROT##_0); X##p1 ^= X##p0; \
- X##p2 += X##p3; X##p3 = rotl_64(X##p3, ROT##_1); X##p3 ^= X##p2; \
- X##p4 += X##p5; X##p5 = rotl_64(X##p5, ROT##_2); X##p5 ^= X##p4; \
- X##p6 += X##p7; X##p7 = rotl_64(X##p7, ROT##_3); X##p7 ^= X##p6; \
-} while (0)
-
-#if SKEIN_UNROLL_512 == 0
-#define R512(p0, p1, p2, p3, p4, p5, p6, p7, ROT, r_num) /* unrolled */ \
-do { \
- ROUND512(p0, p1, p2, p3, p4, p5, p6, p7, ROT, r_num) \
- skein_show_r_ptr(BLK_BITS, &ctx->h, r_num, X_ptr); \
-} while (0)
-
-#define I512(R) \
-do { \
- /* inject the key schedule value */ \
- X0 += ks[((R) + 1) % 9]; \
- X1 += ks[((R) + 2) % 9]; \
- X2 += ks[((R) + 3) % 9]; \
- X3 += ks[((R) + 4) % 9]; \
- X4 += ks[((R) + 5) % 9]; \
- X5 += ks[((R) + 6) % 9] + ts[((R) + 1) % 3]; \
- X6 += ks[((R) + 7) % 9] + ts[((R) + 2) % 3]; \
- X7 += ks[((R) + 8) % 9] + (R) + 1; \
- skein_show_r_ptr(BLK_BITS, &ctx->h, SKEIN_RND_KEY_INJECT, X_ptr); \
-} while (0)
-#else /* looping version */
-#define R512(p0, p1, p2, p3, p4, p5, p6, p7, ROT, r_num) \
-do { \
- ROUND512(p0, p1, p2, p3, p4, p5, p6, p7, ROT, r_num); \
- skein_show_r_ptr(BLK_BITS, &ctx->h, 4 * (r - 1) + r_num, X_ptr); \
-} while (0)
-
-#define I512(R) \
-do { \
- /* inject the key schedule value */ \
- X0 += ks[r + (R) + 0]; \
- X1 += ks[r + (R) + 1]; \
- X2 += ks[r + (R) + 2]; \
- X3 += ks[r + (R) + 3]; \
- X4 += ks[r + (R) + 4]; \
- X5 += ks[r + (R) + 5] + ts[r + (R) + 0]; \
- X6 += ks[r + (R) + 6] + ts[r + (R) + 1]; \
- X7 += ks[r + (R) + 7] + r + (R); \
- /* rotate key schedule */ \
- ks[r + (R) + 8] = ks[r + (R) - 1]; \
- ts[r + (R) + 2] = ts[r + (R) - 1]; \
- skein_show_r_ptr(BLK_BITS, &ctx->h, SKEIN_RND_KEY_INJECT, X_ptr); \
-} while (0)
-
- for (r = 1; r < 2 * RCNT; r += 2 * SKEIN_UNROLL_512)
-#endif /* end of looped code definitions */
- {
-#define R512_8_ROUNDS(R) /* do 8 full rounds */ \
-do { \
- R512(0, 1, 2, 3, 4, 5, 6, 7, R_512_0, 8 * (R) + 1); \
- R512(2, 1, 4, 7, 6, 5, 0, 3, R_512_1, 8 * (R) + 2); \
- R512(4, 1, 6, 3, 0, 5, 2, 7, R_512_2, 8 * (R) + 3); \
- R512(6, 1, 0, 7, 2, 5, 4, 3, R_512_3, 8 * (R) + 4); \
- I512(2 * (R)); \
- R512(0, 1, 2, 3, 4, 5, 6, 7, R_512_4, 8 * (R) + 5); \
- R512(2, 1, 4, 7, 6, 5, 0, 3, R_512_5, 8 * (R) + 6); \
- R512(4, 1, 6, 3, 0, 5, 2, 7, R_512_6, 8 * (R) + 7); \
- R512(6, 1, 0, 7, 2, 5, 4, 3, R_512_7, 8 * (R) + 8); \
- I512(2 * (R) + 1); /* and key injection */ \
-} while (0)
+ for (r = 1;
+ r < (SKEIN_UNROLL_512 ? 2 * RCNT : 2);
+ r += (SKEIN_UNROLL_512 ? 2 * SKEIN_UNROLL_512 : 1)) {
R512_8_ROUNDS(0);
-#define R512_UNROLL_R(NN) \
- ((SKEIN_UNROLL_512 == 0 && \
- SKEIN_512_ROUNDS_TOTAL/8 > (NN)) || \
- (SKEIN_UNROLL_512 > (NN)))
-
- #if R512_UNROLL_R(1)
+#if R512_UNROLL_R(1)
R512_8_ROUNDS(1);
- #endif
- #if R512_UNROLL_R(2)
+#endif
+#if R512_UNROLL_R(2)
R512_8_ROUNDS(2);
- #endif
- #if R512_UNROLL_R(3)
+#endif
+#if R512_UNROLL_R(3)
R512_8_ROUNDS(3);
- #endif
- #if R512_UNROLL_R(4)
+#endif
+#if R512_UNROLL_R(4)
R512_8_ROUNDS(4);
- #endif
- #if R512_UNROLL_R(5)
+#endif
+#if R512_UNROLL_R(5)
R512_8_ROUNDS(5);
- #endif
- #if R512_UNROLL_R(6)
+#endif
+#if R512_UNROLL_R(6)
R512_8_ROUNDS(6);
- #endif
- #if R512_UNROLL_R(7)
+#endif
+#if R512_UNROLL_R(7)
R512_8_ROUNDS(7);
- #endif
- #if R512_UNROLL_R(8)
+#endif
+#if R512_UNROLL_R(8)
R512_8_ROUNDS(8);
- #endif
- #if R512_UNROLL_R(9)
+#endif
+#if R512_UNROLL_R(9)
R512_8_ROUNDS(9);
- #endif
- #if R512_UNROLL_R(10)
+#endif
+#if R512_UNROLL_R(10)
R512_8_ROUNDS(10);
- #endif
- #if R512_UNROLL_R(11)
+#endif
+#if R512_UNROLL_R(11)
R512_8_ROUNDS(11);
- #endif
- #if R512_UNROLL_R(12)
+#endif
+#if R512_UNROLL_R(12)
R512_8_ROUNDS(12);
- #endif
- #if R512_UNROLL_R(13)
+#endif
+#if R512_UNROLL_R(13)
R512_8_ROUNDS(13);
- #endif
- #if R512_UNROLL_R(14)
+#endif
+#if R512_UNROLL_R(14)
R512_8_ROUNDS(14);
- #endif
- #if (SKEIN_UNROLL_512 > 14)
-#error "need more unrolling in skein_512_process_block"
- #endif
+#endif
}
/* do the final "feedforward" xor, update context chaining */
@@ -458,7 +612,6 @@ do { \
ctx->x[5] = X5 ^ w[5];
ctx->x[6] = X6 ^ w[6];
ctx->x[7] = X7 ^ w[7];
- skein_show_round(BLK_BITS, &ctx->h, SKEIN_RND_FEED_FWD, ctx->x);
ts[1] &= ~SKEIN_T1_FLAG_FIRST;
} while (--blk_cnt);
@@ -487,20 +640,8 @@ void skein_1024_process_block(struct skein_1024_ctx *ctx, const u8 *blk_ptr,
enum {
WCNT = SKEIN_1024_STATE_WORDS
};
-#undef RCNT
-#define RCNT (SKEIN_1024_ROUNDS_TOTAL/8)
-
-#ifdef SKEIN_LOOP /* configure how much to unroll the loop */
-#define SKEIN_UNROLL_1024 ((SKEIN_LOOP)%10)
-#else
-#define SKEIN_UNROLL_1024 (0)
-#endif
-
-#if (SKEIN_UNROLL_1024 != 0)
-#if (RCNT % SKEIN_UNROLL_1024)
-#error "Invalid SKEIN_UNROLL_1024" /* sanity check on unroll count */
-#endif
size_t r;
+#if (SKEIN_UNROLL_1024 != 0)
u64 kw[WCNT+4+RCNT*2]; /* key sched: chaining vars + tweak + "rot" */
#else
u64 kw[WCNT+4]; /* key schedule words : chaining vars + tweak */
@@ -510,16 +651,6 @@ void skein_1024_process_block(struct skein_1024_ctx *ctx, const u8 *blk_ptr,
u64 X00, X01, X02, X03, X04, X05, X06, X07,
X08, X09, X10, X11, X12, X13, X14, X15;
u64 w[WCNT]; /* local copy of input block */
-#ifdef SKEIN_DEBUG
- const u64 *X_ptr[16]; /* use for debugging (help cc put Xn in regs) */
-
- X_ptr[0] = &X00; X_ptr[1] = &X01; X_ptr[2] = &X02;
- X_ptr[3] = &X03; X_ptr[4] = &X04; X_ptr[5] = &X05;
- X_ptr[6] = &X06; X_ptr[7] = &X07; X_ptr[8] = &X08;
- X_ptr[9] = &X09; X_ptr[10] = &X10; X_ptr[11] = &X11;
- X_ptr[12] = &X12; X_ptr[13] = &X13; X_ptr[14] = &X14;
- X_ptr[15] = &X15;
-#endif
skein_assert(blk_cnt != 0); /* never call with blk_cnt == 0! */
ts[0] = ctx->h.tweak[0];
@@ -548,192 +679,81 @@ void skein_1024_process_block(struct skein_1024_ctx *ctx, const u8 *blk_ptr,
ks[13] = ctx->x[13];
ks[14] = ctx->x[14];
ks[15] = ctx->x[15];
- ks[16] = ks[0] ^ ks[1] ^ ks[2] ^ ks[3] ^
- ks[4] ^ ks[5] ^ ks[6] ^ ks[7] ^
- ks[8] ^ ks[9] ^ ks[10] ^ ks[11] ^
+ ks[16] = ks[0] ^ ks[1] ^ ks[2] ^ ks[3] ^
+ ks[4] ^ ks[5] ^ ks[6] ^ ks[7] ^
+ ks[8] ^ ks[9] ^ ks[10] ^ ks[11] ^
ks[12] ^ ks[13] ^ ks[14] ^ ks[15] ^ SKEIN_KS_PARITY;
- ts[2] = ts[0] ^ ts[1];
+ ts[2] = ts[0] ^ ts[1];
/* get input block in little-endian format */
skein_get64_lsb_first(w, blk_ptr, WCNT);
debug_save_tweak(ctx);
- skein_show_block(BLK_BITS, &ctx->h, ctx->x, blk_ptr, w, ks, ts);
-
- X00 = w[0] + ks[0]; /* do the first full key injection */
- X01 = w[1] + ks[1];
- X02 = w[2] + ks[2];
- X03 = w[3] + ks[3];
- X04 = w[4] + ks[4];
- X05 = w[5] + ks[5];
- X06 = w[6] + ks[6];
- X07 = w[7] + ks[7];
- X08 = w[8] + ks[8];
- X09 = w[9] + ks[9];
- X10 = w[10] + ks[10];
- X11 = w[11] + ks[11];
- X12 = w[12] + ks[12];
- X13 = w[13] + ks[13] + ts[0];
- X14 = w[14] + ks[14] + ts[1];
- X15 = w[15] + ks[15];
-
- skein_show_r_ptr(BLK_BITS, &ctx->h, SKEIN_RND_KEY_INITIAL,
- X_ptr);
-
-#define ROUND1024(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, pA, pB, pC, pD, pE, \
- pF, ROT, r_num) \
-do { \
- X##p0 += X##p1; X##p1 = rotl_64(X##p1, ROT##_0); X##p1 ^= X##p0; \
- X##p2 += X##p3; X##p3 = rotl_64(X##p3, ROT##_1); X##p3 ^= X##p2; \
- X##p4 += X##p5; X##p5 = rotl_64(X##p5, ROT##_2); X##p5 ^= X##p4; \
- X##p6 += X##p7; X##p7 = rotl_64(X##p7, ROT##_3); X##p7 ^= X##p6; \
- X##p8 += X##p9; X##p9 = rotl_64(X##p9, ROT##_4); X##p9 ^= X##p8; \
- X##pA += X##pB; X##pB = rotl_64(X##pB, ROT##_5); X##pB ^= X##pA; \
- X##pC += X##pD; X##pD = rotl_64(X##pD, ROT##_6); X##pD ^= X##pC; \
- X##pE += X##pF; X##pF = rotl_64(X##pF, ROT##_7); X##pF ^= X##pE; \
-} while (0)
-
-#if SKEIN_UNROLL_1024 == 0
-#define R1024(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, pA, pB, pC, pD, pE, pF, \
- ROT, rn) \
-do { \
- ROUND1024(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, pA, pB, pC, pD, pE, \
- pF, ROT, rn); \
- skein_show_r_ptr(BLK_BITS, &ctx->h, rn, X_ptr); \
-} while (0)
-
-#define I1024(R) \
-do { \
- /* inject the key schedule value */ \
- X00 += ks[((R) + 1) % 17]; \
- X01 += ks[((R) + 2) % 17]; \
- X02 += ks[((R) + 3) % 17]; \
- X03 += ks[((R) + 4) % 17]; \
- X04 += ks[((R) + 5) % 17]; \
- X05 += ks[((R) + 6) % 17]; \
- X06 += ks[((R) + 7) % 17]; \
- X07 += ks[((R) + 8) % 17]; \
- X08 += ks[((R) + 9) % 17]; \
- X09 += ks[((R) + 10) % 17]; \
- X10 += ks[((R) + 11) % 17]; \
- X11 += ks[((R) + 12) % 17]; \
- X12 += ks[((R) + 13) % 17]; \
- X13 += ks[((R) + 14) % 17] + ts[((R) + 1) % 3]; \
- X14 += ks[((R) + 15) % 17] + ts[((R) + 2) % 3]; \
- X15 += ks[((R) + 16) % 17] + (R) + 1; \
- skein_show_r_ptr(BLK_BITS, &ctx->h, SKEIN_RND_KEY_INJECT, X_ptr); \
-} while (0)
-#else /* looping version */
-#define R1024(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, pA, pB, pC, pD, pE, pF, \
- ROT, rn) \
-do { \
- ROUND1024(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, pA, pB, pC, pD, pE, \
- pF, ROT, rn); \
- skein_show_r_ptr(BLK_BITS, &ctx->h, 4 * (r - 1) + rn, X_ptr); \
-} while (0)
-
-#define I1024(R) \
-do { \
- /* inject the key schedule value */ \
- X00 += ks[r + (R) + 0]; \
- X01 += ks[r + (R) + 1]; \
- X02 += ks[r + (R) + 2]; \
- X03 += ks[r + (R) + 3]; \
- X04 += ks[r + (R) + 4]; \
- X05 += ks[r + (R) + 5]; \
- X06 += ks[r + (R) + 6]; \
- X07 += ks[r + (R) + 7]; \
- X08 += ks[r + (R) + 8]; \
- X09 += ks[r + (R) + 9]; \
- X10 += ks[r + (R) + 10]; \
- X11 += ks[r + (R) + 11]; \
- X12 += ks[r + (R) + 12]; \
- X13 += ks[r + (R) + 13] + ts[r + (R) + 0]; \
- X14 += ks[r + (R) + 14] + ts[r + (R) + 1]; \
- X15 += ks[r + (R) + 15] + r + (R); \
- /* rotate key schedule */ \
- ks[r + (R) + 16] = ks[r + (R) - 1]; \
- ts[r + (R) + 2] = ts[r + (R) - 1]; \
- skein_show_r_ptr(BLK_BITSi, &ctx->h, SKEIN_RND_KEY_INJECT, X_ptr); \
-} while (0)
-
- for (r = 1; r <= 2 * RCNT; r += 2 * SKEIN_UNROLL_1024)
-#endif
- {
-#define R1024_8_ROUNDS(R) \
-do { \
- R1024(00, 01, 02, 03, 04, 05, 06, 07, 08, 09, 10, 11, 12, 13, 14, 15, \
- R1024_0, 8*(R) + 1); \
- R1024(00, 09, 02, 13, 06, 11, 04, 15, 10, 07, 12, 03, 14, 05, 08, 01, \
- R1024_1, 8*(R) + 2); \
- R1024(00, 07, 02, 05, 04, 03, 06, 01, 12, 15, 14, 13, 08, 11, 10, 09, \
- R1024_2, 8*(R) + 3); \
- R1024(00, 15, 02, 11, 06, 13, 04, 09, 14, 01, 08, 05, 10, 03, 12, 07, \
- R1024_3, 8*(R) + 4); \
- I1024(2*(R)); \
- R1024(00, 01, 02, 03, 04, 05, 06, 07, 08, 09, 10, 11, 12, 13, 14, 15, \
- R1024_4, 8*(R) + 5); \
- R1024(00, 09, 02, 13, 06, 11, 04, 15, 10, 07, 12, 03, 14, 05, 08, 01, \
- R1024_5, 8*(R) + 6); \
- R1024(00, 07, 02, 05, 04, 03, 06, 01, 12, 15, 14, 13, 08, 11, 10, 09, \
- R1024_6, 8*(R) + 7); \
- R1024(00, 15, 02, 11, 06, 13, 04, 09, 14, 01, 08, 05, 10, 03, 12, 07, \
- R1024_7, 8*(R) + 8); \
- I1024(2*(R)+1); \
-} while (0)
+ /* do the first full key injection */
+ X00 = w[0] + ks[0];
+ X01 = w[1] + ks[1];
+ X02 = w[2] + ks[2];
+ X03 = w[3] + ks[3];
+ X04 = w[4] + ks[4];
+ X05 = w[5] + ks[5];
+ X06 = w[6] + ks[6];
+ X07 = w[7] + ks[7];
+ X08 = w[8] + ks[8];
+ X09 = w[9] + ks[9];
+ X10 = w[10] + ks[10];
+ X11 = w[11] + ks[11];
+ X12 = w[12] + ks[12];
+ X13 = w[13] + ks[13] + ts[0];
+ X14 = w[14] + ks[14] + ts[1];
+ X15 = w[15] + ks[15];
+
+ for (r = 1;
+ r < (SKEIN_UNROLL_1024 ? 2 * RCNT : 2);
+ r += (SKEIN_UNROLL_1024 ? 2 * SKEIN_UNROLL_1024 : 1)) {
R1024_8_ROUNDS(0);
-
-#define R1024_UNROLL_R(NN) \
- ((SKEIN_UNROLL_1024 == 0 && \
- SKEIN_1024_ROUNDS_TOTAL/8 > (NN)) || \
- (SKEIN_UNROLL_1024 > (NN)))
-
- #if R1024_UNROLL_R(1)
+#if R1024_UNROLL_R(1)
R1024_8_ROUNDS(1);
- #endif
- #if R1024_UNROLL_R(2)
+#endif
+#if R1024_UNROLL_R(2)
R1024_8_ROUNDS(2);
- #endif
- #if R1024_UNROLL_R(3)
+#endif
+#if R1024_UNROLL_R(3)
R1024_8_ROUNDS(3);
- #endif
- #if R1024_UNROLL_R(4)
+#endif
+#if R1024_UNROLL_R(4)
R1024_8_ROUNDS(4);
- #endif
- #if R1024_UNROLL_R(5)
+#endif
+#if R1024_UNROLL_R(5)
R1024_8_ROUNDS(5);
- #endif
- #if R1024_UNROLL_R(6)
+#endif
+#if R1024_UNROLL_R(6)
R1024_8_ROUNDS(6);
- #endif
- #if R1024_UNROLL_R(7)
+#endif
+#if R1024_UNROLL_R(7)
R1024_8_ROUNDS(7);
- #endif
- #if R1024_UNROLL_R(8)
+#endif
+#if R1024_UNROLL_R(8)
R1024_8_ROUNDS(8);
- #endif
- #if R1024_UNROLL_R(9)
+#endif
+#if R1024_UNROLL_R(9)
R1024_8_ROUNDS(9);
- #endif
- #if R1024_UNROLL_R(10)
+#endif
+#if R1024_UNROLL_R(10)
R1024_8_ROUNDS(10);
- #endif
- #if R1024_UNROLL_R(11)
+#endif
+#if R1024_UNROLL_R(11)
R1024_8_ROUNDS(11);
- #endif
- #if R1024_UNROLL_R(12)
+#endif
+#if R1024_UNROLL_R(12)
R1024_8_ROUNDS(12);
- #endif
- #if R1024_UNROLL_R(13)
+#endif
+#if R1024_UNROLL_R(13)
R1024_8_ROUNDS(13);
- #endif
- #if R1024_UNROLL_R(14)
+#endif
+#if R1024_UNROLL_R(14)
R1024_8_ROUNDS(14);
- #endif
-#if (SKEIN_UNROLL_1024 > 14)
-#error "need more unrolling in Skein_1024_Process_Block"
- #endif
+#endif
}
/* do the final "feedforward" xor, update context chaining */
@@ -754,8 +774,6 @@ do { \
ctx->x[14] = X14 ^ w[14];
ctx->x[15] = X15 ^ w[15];
- skein_show_round(BLK_BITS, &ctx->h, SKEIN_RND_FEED_FWD, ctx->x);
-
ts[1] &= ~SKEIN_T1_FLAG_FIRST;
blk_ptr += SKEIN_1024_BLOCK_BYTES;
} while (--blk_cnt);
diff --git a/drivers/staging/skein/skein_block.h b/drivers/staging/skein/skein_block.h
index bd7bdc35df2..9d40f4a5267 100644
--- a/drivers/staging/skein/skein_block.h
+++ b/drivers/staging/skein/skein_block.h
@@ -10,7 +10,7 @@
#ifndef _SKEIN_BLOCK_H_
#define _SKEIN_BLOCK_H_
-#include "skein.h" /* get the Skein API definitions */
+#include "skein_base.h" /* get the Skein API definitions */
void skein_256_process_block(struct skein_256_ctx *ctx, const u8 *blk_ptr,
size_t blk_cnt, size_t byte_cnt_add);
diff --git a/drivers/staging/skein/skein_generic.c b/drivers/staging/skein/skein_generic.c
new file mode 100644
index 00000000000..85bd7d0168b
--- /dev/null
+++ b/drivers/staging/skein/skein_generic.c
@@ -0,0 +1,216 @@
+/*
+ * Cryptographic API.
+ *
+ * Skein256 Hash Algorithm.
+ *
+ * Derived from cryptoapi implementation, adapted for in-place
+ * scatterlist interface.
+ *
+ * Copyright (c) Eric Rost <eric.rost@mybabylon.net>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <crypto/internal/hash.h>
+#include "skein_base.h"
+
+
+static int skein256_init(struct shash_desc *desc)
+{
+ return skein_256_init((struct skein_256_ctx *) shash_desc_ctx(desc),
+ SKEIN256_DIGEST_BIT_SIZE);
+}
+
+static int skein256_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+{
+ return skein_256_update((struct skein_256_ctx *)shash_desc_ctx(desc),
+ data, len);
+}
+
+static int skein256_final(struct shash_desc *desc, u8 *out)
+{
+ return skein_256_final((struct skein_256_ctx *)shash_desc_ctx(desc),
+ out);
+}
+
+static int skein256_export(struct shash_desc *desc, void *out)
+{
+ struct skein_256_ctx *sctx = shash_desc_ctx(desc);
+
+ memcpy(out, sctx, sizeof(*sctx));
+ return 0;
+}
+
+static int skein256_import(struct shash_desc *desc, const void *in)
+{
+ struct skein_256_ctx *sctx = shash_desc_ctx(desc);
+
+ memcpy(sctx, in, sizeof(*sctx));
+ return 0;
+}
+
+static int skein512_init(struct shash_desc *desc)
+{
+ return skein_512_init((struct skein_512_ctx *)shash_desc_ctx(desc),
+ SKEIN512_DIGEST_BIT_SIZE);
+}
+
+static int skein512_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+{
+ return skein_512_update((struct skein_512_ctx *)shash_desc_ctx(desc),
+ data, len);
+}
+
+static int skein512_final(struct shash_desc *desc, u8 *out)
+{
+ return skein_512_final((struct skein_512_ctx *)shash_desc_ctx(desc),
+ out);
+}
+
+static int skein512_export(struct shash_desc *desc, void *out)
+{
+ struct skein_512_ctx *sctx = shash_desc_ctx(desc);
+
+ memcpy(out, sctx, sizeof(*sctx));
+ return 0;
+}
+
+static int skein512_import(struct shash_desc *desc, const void *in)
+{
+ struct skein_512_ctx *sctx = shash_desc_ctx(desc);
+
+ memcpy(sctx, in, sizeof(*sctx));
+ return 0;
+}
+
+static int skein1024_init(struct shash_desc *desc)
+{
+ return skein_1024_init((struct skein_1024_ctx *)shash_desc_ctx(desc),
+ SKEIN1024_DIGEST_BIT_SIZE);
+}
+
+static int skein1024_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+{
+ return skein_1024_update((struct skein_1024_ctx *)shash_desc_ctx(desc),
+ data, len);
+}
+
+static int skein1024_final(struct shash_desc *desc, u8 *out)
+{
+ return skein_1024_final((struct skein_1024_ctx *)shash_desc_ctx(desc),
+ out);
+}
+
+static int skein1024_export(struct shash_desc *desc, void *out)
+{
+ struct skein_1024_ctx *sctx = shash_desc_ctx(desc);
+
+ memcpy(out, sctx, sizeof(*sctx));
+ return 0;
+}
+
+static int skein1024_import(struct shash_desc *desc, const void *in)
+{
+ struct skein_1024_ctx *sctx = shash_desc_ctx(desc);
+
+ memcpy(sctx, in, sizeof(*sctx));
+ return 0;
+}
+
+static struct shash_alg alg256 = {
+ .digestsize = (SKEIN256_DIGEST_BIT_SIZE / 8),
+ .init = skein256_init,
+ .update = skein256_update,
+ .final = skein256_final,
+ .export = skein256_export,
+ .import = skein256_import,
+ .descsize = sizeof(struct skein_256_ctx),
+ .statesize = sizeof(struct skein_256_ctx),
+ .base = {
+ .cra_name = "skein256",
+ .cra_driver_name = "skein",
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_blocksize = SKEIN_256_BLOCK_BYTES,
+ .cra_module = THIS_MODULE,
+ }
+};
+
+static struct shash_alg alg512 = {
+ .digestsize = (SKEIN512_DIGEST_BIT_SIZE / 8),
+ .init = skein512_init,
+ .update = skein512_update,
+ .final = skein512_final,
+ .export = skein512_export,
+ .import = skein512_import,
+ .descsize = sizeof(struct skein_512_ctx),
+ .statesize = sizeof(struct skein_512_ctx),
+ .base = {
+ .cra_name = "skein512",
+ .cra_driver_name = "skein",
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_blocksize = SKEIN_512_BLOCK_BYTES,
+ .cra_module = THIS_MODULE,
+ }
+};
+
+static struct shash_alg alg1024 = {
+ .digestsize = (SKEIN1024_DIGEST_BIT_SIZE / 8),
+ .init = skein1024_init,
+ .update = skein1024_update,
+ .final = skein1024_final,
+ .export = skein1024_export,
+ .import = skein1024_import,
+ .descsize = sizeof(struct skein_1024_ctx),
+ .statesize = sizeof(struct skein_1024_ctx),
+ .base = {
+ .cra_name = "skein1024",
+ .cra_driver_name = "skein",
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_blocksize = SKEIN_1024_BLOCK_BYTES,
+ .cra_module = THIS_MODULE,
+ }
+};
+
+static int __init skein_generic_init(void)
+{
+ if (crypto_register_shash(&alg256))
+ goto out;
+ if (crypto_register_shash(&alg512))
+ goto unreg256;
+ if (crypto_register_shash(&alg1024))
+ goto unreg512;
+
+ return 0;
+
+
+unreg512:
+ crypto_unregister_shash(&alg512);
+unreg256:
+ crypto_unregister_shash(&alg256);
+out:
+ return -1;
+}
+
+static void __exit skein_generic_fini(void)
+{
+ crypto_unregister_shash(&alg256);
+ crypto_unregister_shash(&alg512);
+ crypto_unregister_shash(&alg1024);
+}
+
+module_init(skein_generic_init);
+module_exit(skein_generic_fini);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Skein Hash Algorithm");
+
+MODULE_ALIAS("skein");
diff --git a/drivers/staging/skein/skein_iv.h b/drivers/staging/skein/skein_iv.h
index d9dc1d5ed55..8a06314d0ed 100644
--- a/drivers/staging/skein/skein_iv.h
+++ b/drivers/staging/skein/skein_iv.h
@@ -1,7 +1,7 @@
#ifndef _SKEIN_IV_H_
#define _SKEIN_IV_H_
-#include "skein.h" /* get Skein macros and types */
+#include "skein_base.h" /* get Skein macros and types */
/*
***************** Pre-computed Skein IVs *******************
diff --git a/drivers/staging/skein/threefish_api.h b/drivers/staging/skein/threefish_api.h
index 8d5ddf8b3a9..8e0a0b77ecc 100644
--- a/drivers/staging/skein/threefish_api.h
+++ b/drivers/staging/skein/threefish_api.h
@@ -29,7 +29,7 @@
*/
#include <linux/types.h>
-#include "skein.h"
+#include "skein_base.h"
#define KEY_SCHEDULE_CONST 0x1BD11BDAA9FC1A22L
diff --git a/drivers/staging/slicoss/slicoss.c b/drivers/staging/slicoss/slicoss.c
index 56ca3b6c144..42d62ef56cb 100644
--- a/drivers/staging/slicoss/slicoss.c
+++ b/drivers/staging/slicoss/slicoss.c
@@ -498,12 +498,14 @@ static int slic_card_download(struct adapter *adapter)
slic_reg32_write(&slic_regs->slic_wcs,
baseaddress + codeaddr, FLUSH);
/* Write out instruction to low addr */
- slic_reg32_write(&slic_regs->slic_wcs, instruction, FLUSH);
+ slic_reg32_write(&slic_regs->slic_wcs,
+ instruction, FLUSH);
instruction = *(u32 *)(fw->data + index);
index += 4;
/* Write out instruction to high addr */
- slic_reg32_write(&slic_regs->slic_wcs, instruction, FLUSH);
+ slic_reg32_write(&slic_regs->slic_wcs,
+ instruction, FLUSH);
instruction = *(u32 *)(fw->data + index);
index += 4;
}
@@ -596,8 +598,7 @@ static void slic_mac_address_config(struct adapter *adapter)
u32 value2;
__iomem struct slic_regs *slic_regs = adapter->slic_regs;
- value = *(u32 *) &adapter->currmacaddr[2];
- value = ntohl(value);
+ value = ntohl(*(__be32 *) &adapter->currmacaddr[2]);
slic_reg32_write(&slic_regs->slic_wraddral, value, FLUSH);
slic_reg32_write(&slic_regs->slic_wraddrbl, value, FLUSH);
@@ -1533,14 +1534,18 @@ retry_rcvqfill:
dev_err(dev, "%s: LOW 32bits PHYSICAL ADDRESS == 0\n",
__func__);
dev_err(dev, "skb[%p] PROBLEM\n", skb);
- dev_err(dev, " skbdata[%p]\n", skb->data);
+ dev_err(dev, " skbdata[%p]\n",
+ skb->data);
dev_err(dev, " skblen[%x]\n", skb->len);
dev_err(dev, " paddr[%p]\n", paddr);
dev_err(dev, " paddrl[%x]\n", paddrl);
dev_err(dev, " paddrh[%x]\n", paddrh);
- dev_err(dev, " rcvq->head[%p]\n", rcvq->head);
- dev_err(dev, " rcvq->tail[%p]\n", rcvq->tail);
- dev_err(dev, " rcvq->count[%x]\n", rcvq->count);
+ dev_err(dev, " rcvq->head[%p]\n",
+ rcvq->head);
+ dev_err(dev, " rcvq->tail[%p]\n",
+ rcvq->tail);
+ dev_err(dev, " rcvq->count[%x]\n",
+ rcvq->count);
dev_err(dev, "SKIP THIS SKB!!!!!!!!\n");
goto retry_rcvqfill;
}
@@ -1549,14 +1554,18 @@ retry_rcvqfill:
dev_err(dev, "%s: LOW 32bits PHYSICAL ADDRESS == 0\n",
__func__);
dev_err(dev, "skb[%p] PROBLEM\n", skb);
- dev_err(dev, " skbdata[%p]\n", skb->data);
+ dev_err(dev, " skbdata[%p]\n",
+ skb->data);
dev_err(dev, " skblen[%x]\n", skb->len);
dev_err(dev, " paddr[%p]\n", paddr);
dev_err(dev, " paddrl[%x]\n", paddrl);
dev_err(dev, " paddrh[%x]\n", paddrh);
- dev_err(dev, " rcvq->head[%p]\n", rcvq->head);
- dev_err(dev, " rcvq->tail[%p]\n", rcvq->tail);
- dev_err(dev, " rcvq->count[%x]\n", rcvq->count);
+ dev_err(dev, " rcvq->head[%p]\n",
+ rcvq->head);
+ dev_err(dev, " rcvq->tail[%p]\n",
+ rcvq->tail);
+ dev_err(dev, " rcvq->count[%x]\n",
+ rcvq->count);
dev_err(dev, "GIVE TO CARD ANYWAY\n");
}
#endif
@@ -1612,7 +1621,7 @@ static int slic_rcvqueue_init(struct adapter *adapter)
rcvq->size = SLIC_RCVQ_ENTRIES;
rcvq->errors = 0;
rcvq->count = 0;
- i = (SLIC_RCVQ_ENTRIES / SLIC_RCVQ_FILLENTRIES);
+ i = SLIC_RCVQ_ENTRIES / SLIC_RCVQ_FILLENTRIES;
count = 0;
while (i) {
count += slic_rcvqueue_fill(adapter);
@@ -1788,7 +1797,7 @@ static int slic_mcast_add_list(struct adapter *adapter, char *address)
if (mcaddr == NULL)
return 1;
- memcpy(mcaddr->address, address, ETH_ALEN);
+ ether_addr_copy(mcaddr->address, address);
mcaddr->next = adapter->mcastaddrs;
adapter->mcastaddrs = mcaddr;
@@ -1885,7 +1894,8 @@ static void slic_xmit_fail(struct adapter *adapter,
break;
case XMIT_FAIL_HOSTCMD_FAIL:
dev_err(&adapter->netdev->dev,
- "xmit_start skb[%p] type[%x] No host commands available\n", skb, skb->pkt_type);
+ "xmit_start skb[%p] type[%x] No host commands available\n",
+ skb, skb->pkt_type);
break;
}
}
@@ -2097,7 +2107,8 @@ static void slic_interrupt_card_up(u32 isr, struct adapter *adapter,
}
} else if (isr & ISR_XDROP) {
dev_err(&dev->dev,
- "isr & ISR_ERR [%x] ISR_XDROP\n", isr);
+ "isr & ISR_ERR [%x] ISR_XDROP\n",
+ isr);
} else {
dev_err(&dev->dev,
"isr & ISR_ERR [%x]\n",
@@ -2341,7 +2352,8 @@ static int slic_if_init(struct adapter *adapter)
SLIC_GET_ADDR_LOW(&pshmem->isr), FLUSH);
#else
slic_reg32_write(&slic_regs->slic_addr_upper, 0, DONT_FLUSH);
- slic_reg32_write(&slic_regs->slic_isp, (u32)&pshmem->isr, FLUSH);
+ slic_reg32_write(&slic_regs->slic_isp, (u32)&pshmem->isr,
+ FLUSH);
#endif
spin_unlock_irqrestore(&adapter->bit64reglock.lock,
adapter->bit64reglock.flags);
diff --git a/drivers/staging/speakup/kobjects.c b/drivers/staging/speakup/kobjects.c
index bcc7f62654f..b12c76de60b 100644
--- a/drivers/staging/speakup/kobjects.c
+++ b/drivers/staging/speakup/kobjects.c
@@ -81,7 +81,7 @@ static ssize_t chars_chartab_show(struct kobject *kobj,
static void report_char_chartab_status(int reset, int received, int used,
int rejected, int do_characters)
{
- char *object_type[] = {
+ static char const *object_type[] = {
"character class entries",
"character descriptions",
};
@@ -809,10 +809,10 @@ static ssize_t message_store_helper(const char *buf, size_t count,
if (msg_stored == -ENOMEM)
reset = 1;
break;
- } else {
- used++;
}
+ used++;
+
cp = linefeed + 1;
}
diff --git a/drivers/staging/speakup/main.c b/drivers/staging/speakup/main.c
index 3f30a1b6e72..e9f0c150d24 100644
--- a/drivers/staging/speakup/main.c
+++ b/drivers/staging/speakup/main.c
@@ -695,7 +695,7 @@ static void say_next_word(struct vc_data *vc)
static void spell_word(struct vc_data *vc)
{
- static char *delay_str[] = { "", ",", ".", ". .", ". . ." };
+ static char const *delay_str[] = { "", ",", ".", ". .", ". . ." };
char *cp = buf, *str_cap = spk_str_caps_stop;
char *cp1, *last_cap = spk_str_caps_stop;
u_char ch;
diff --git a/drivers/staging/speakup/speakup_dtlk.c b/drivers/staging/speakup/speakup_dtlk.c
index d7d51527389..4e059ea78d4 100644
--- a/drivers/staging/speakup/speakup_dtlk.c
+++ b/drivers/staging/speakup/speakup_dtlk.c
@@ -231,7 +231,7 @@ static void do_catch_up(struct spk_synth *synth)
if (ch == '\n')
ch = PROCSPEECH;
spk_out(ch);
- if ((jiffies >= jiff_max) && (ch == SPACE)) {
+ if (time_after_eq(jiffies, jiff_max) && (ch == SPACE)) {
spk_out(PROCSPEECH);
spin_lock_irqsave(&speakup_info.spinlock, flags);
delay_time_val = delay_time->u.n.value;
diff --git a/drivers/staging/speakup/speakup_keypc.c b/drivers/staging/speakup/speakup_keypc.c
index 4ed38898a17..cef20fdda64 100644
--- a/drivers/staging/speakup/speakup_keypc.c
+++ b/drivers/staging/speakup/speakup_keypc.c
@@ -229,7 +229,7 @@ spin_lock_irqsave(&speakup_info.spinlock, flags);
ch = PROCSPEECH;
outb_p(ch, synth_port);
SWAIT;
- if ((jiffies >= jiff_max) && (ch == SPACE)) {
+ if (time_after_eq(jiffies, jiff_max) && (ch == SPACE)) {
timeout = 1000;
while (synth_writable())
if (--timeout <= 0)
diff --git a/drivers/staging/unisys/channels/channel.c b/drivers/staging/unisys/channels/channel.c
index b4bdee4b575..74cc4d6b515 100644
--- a/drivers/staging/unisys/channels/channel.c
+++ b/drivers/staging/unisys/channels/channel.c
@@ -43,45 +43,45 @@
* Return value:
* 1 if the insertion succeeds, 0 if the queue was full.
*/
-unsigned char
-visor_signal_insert(CHANNEL_HEADER __iomem *pChannel, u32 Queue, void *pSignal)
+unsigned char spar_signal_insert(struct channel_header __iomem *ch, u32 queue,
+ void *sig)
{
void __iomem *psignal;
unsigned int head, tail, nof;
- SIGNAL_QUEUE_HEADER __iomem *pqhdr =
- (SIGNAL_QUEUE_HEADER __iomem *)
- ((char __iomem *) pChannel + readq(&pChannel->oChannelSpace))
- + Queue;
+ struct signal_queue_header __iomem *pqhdr =
+ (struct signal_queue_header __iomem *)
+ ((char __iomem *)ch + readq(&ch->ch_space_offset))
+ + queue;
/* capture current head and tail */
- head = readl(&pqhdr->Head);
- tail = readl(&pqhdr->Tail);
+ head = readl(&pqhdr->head);
+ tail = readl(&pqhdr->tail);
/* queue is full if (head + 1) % n equals tail */
- if (((head + 1) % readl(&pqhdr->MaxSignalSlots)) == tail) {
- nof = readq(&pqhdr->NumOverflows) + 1;
- writeq(nof, &pqhdr->NumOverflows);
+ if (((head + 1) % readl(&pqhdr->max_slots)) == tail) {
+ nof = readq(&pqhdr->num_overflows) + 1;
+ writeq(nof, &pqhdr->num_overflows);
return 0;
}
/* increment the head index */
- head = (head + 1) % readl(&pqhdr->MaxSignalSlots);
+ head = (head + 1) % readl(&pqhdr->max_slots);
/* copy signal to the head location from the area pointed to
* by pSignal
*/
- psignal = (char __iomem *)pqhdr + readq(&pqhdr->oSignalBase) +
- (head * readl(&pqhdr->SignalSize));
- memcpy_toio(psignal, pSignal, readl(&pqhdr->SignalSize));
+ psignal = (char __iomem *)pqhdr + readq(&pqhdr->sig_base_offset) +
+ (head * readl(&pqhdr->signal_size));
+ memcpy_toio(psignal, sig, readl(&pqhdr->signal_size));
mb(); /* channel synch */
- writel(head, &pqhdr->Head);
+ writel(head, &pqhdr->head);
- writeq(readq(&pqhdr->NumSignalsSent) + 1, &pqhdr->NumSignalsSent);
+ writeq(readq(&pqhdr->num_sent) + 1, &pqhdr->num_sent);
return 1;
}
-EXPORT_SYMBOL_GPL(visor_signal_insert);
+EXPORT_SYMBOL_GPL(spar_signal_insert);
/*
* Routine Description:
@@ -102,40 +102,40 @@ EXPORT_SYMBOL_GPL(visor_signal_insert);
* 1 if the removal succeeds, 0 if the queue was empty.
*/
unsigned char
-visor_signal_remove(CHANNEL_HEADER __iomem *pChannel, u32 Queue, void *pSignal)
+spar_signal_remove(struct channel_header __iomem *ch, u32 queue, void *sig)
{
void __iomem *psource;
unsigned int head, tail;
- SIGNAL_QUEUE_HEADER __iomem *pqhdr =
- (SIGNAL_QUEUE_HEADER __iomem *) ((char __iomem *) pChannel +
- readq(&pChannel->oChannelSpace)) + Queue;
+ struct signal_queue_header __iomem *pqhdr =
+ (struct signal_queue_header __iomem *)((char __iomem *)ch +
+ readq(&ch->ch_space_offset)) + queue;
/* capture current head and tail */
- head = readl(&pqhdr->Head);
- tail = readl(&pqhdr->Tail);
+ head = readl(&pqhdr->head);
+ tail = readl(&pqhdr->tail);
/* queue is empty if the head index equals the tail index */
if (head == tail) {
- writeq(readq(&pqhdr->NumEmptyCnt) + 1, &pqhdr->NumEmptyCnt);
+ writeq(readq(&pqhdr->num_empty) + 1, &pqhdr->num_empty);
return 0;
}
/* advance past the 'empty' front slot */
- tail = (tail + 1) % readl(&pqhdr->MaxSignalSlots);
+ tail = (tail + 1) % readl(&pqhdr->max_slots);
/* copy signal from tail location to the area pointed to by pSignal */
- psource = (char __iomem *) pqhdr + readq(&pqhdr->oSignalBase) +
- (tail * readl(&pqhdr->SignalSize));
- memcpy_fromio(pSignal, psource, readl(&pqhdr->SignalSize));
+ psource = (char __iomem *)pqhdr + readq(&pqhdr->sig_base_offset) +
+ (tail * readl(&pqhdr->signal_size));
+ memcpy_fromio(sig, psource, readl(&pqhdr->signal_size));
mb(); /* channel synch */
- writel(tail, &pqhdr->Tail);
+ writel(tail, &pqhdr->tail);
- writeq(readq(&pqhdr->NumSignalsReceived) + 1,
- &pqhdr->NumSignalsReceived);
+ writeq(readq(&pqhdr->num_received) + 1,
+ &pqhdr->num_received);
return 1;
}
-EXPORT_SYMBOL_GPL(visor_signal_remove);
+EXPORT_SYMBOL_GPL(spar_signal_remove);
/*
* Routine Description:
@@ -156,18 +156,18 @@ EXPORT_SYMBOL_GPL(visor_signal_remove);
* Return value:
* # of signals copied.
*/
-unsigned int
-SignalRemoveAll(pCHANNEL_HEADER pChannel, u32 Queue, void *pSignal)
+unsigned int spar_signal_remove_all(struct channel_header *ch, u32 queue,
+ void *sig)
{
void *psource;
- unsigned int head, tail, signalCount = 0;
- pSIGNAL_QUEUE_HEADER pqhdr =
- (pSIGNAL_QUEUE_HEADER) ((char *) pChannel +
- pChannel->oChannelSpace) + Queue;
+ unsigned int head, tail, count = 0;
+ struct signal_queue_header *pqhdr =
+ (struct signal_queue_header *)((char *)ch +
+ ch->ch_space_offset) + queue;
/* capture current head and tail */
- head = pqhdr->Head;
- tail = pqhdr->Tail;
+ head = pqhdr->head;
+ tail = pqhdr->tail;
/* queue is empty if the head index equals the tail index */
if (head == tail)
@@ -175,25 +175,25 @@ SignalRemoveAll(pCHANNEL_HEADER pChannel, u32 Queue, void *pSignal)
while (head != tail) {
/* advance past the 'empty' front slot */
- tail = (tail + 1) % pqhdr->MaxSignalSlots;
+ tail = (tail + 1) % pqhdr->max_slots;
/* copy signal from tail location to the area pointed
* to by pSignal
*/
psource =
- (char *) pqhdr + pqhdr->oSignalBase +
- (tail * pqhdr->SignalSize);
- memcpy((char *) pSignal + (pqhdr->SignalSize * signalCount),
- psource, pqhdr->SignalSize);
+ (char *)pqhdr + pqhdr->sig_base_offset +
+ (tail * pqhdr->signal_size);
+ memcpy((char *)sig + (pqhdr->signal_size * count),
+ psource, pqhdr->signal_size);
mb(); /* channel synch */
- pqhdr->Tail = tail;
+ pqhdr->tail = tail;
- signalCount++;
- pqhdr->NumSignalsReceived++;
+ count++;
+ pqhdr->num_received++;
}
- return signalCount;
+ return count;
}
/*
@@ -207,13 +207,13 @@ SignalRemoveAll(pCHANNEL_HEADER pChannel, u32 Queue, void *pSignal)
* Return value:
* 1 if the signal queue is empty, 0 otherwise.
*/
-unsigned char
-visor_signalqueue_empty(CHANNEL_HEADER __iomem *pChannel, u32 Queue)
+unsigned char spar_signalqueue_empty(struct channel_header __iomem *ch,
+ u32 queue)
{
- SIGNAL_QUEUE_HEADER __iomem *pqhdr =
- (SIGNAL_QUEUE_HEADER __iomem *) ((char __iomem *) pChannel +
- readq(&pChannel->oChannelSpace)) + Queue;
- return readl(&pqhdr->Head) == readl(&pqhdr->Tail);
+ struct signal_queue_header __iomem *pqhdr =
+ (struct signal_queue_header __iomem *)((char __iomem *)ch +
+ readq(&ch->ch_space_offset)) + queue;
+ return readl(&pqhdr->head) == readl(&pqhdr->tail);
}
-EXPORT_SYMBOL_GPL(visor_signalqueue_empty);
+EXPORT_SYMBOL_GPL(spar_signalqueue_empty);
diff --git a/drivers/staging/unisys/channels/chanstub.c b/drivers/staging/unisys/channels/chanstub.c
index d54c5d635a9..b6fd126f16f 100644
--- a/drivers/staging/unisys/channels/chanstub.c
+++ b/drivers/staging/unisys/channels/chanstub.c
@@ -42,26 +42,26 @@ channel_mod_exit(void)
}
unsigned char
-SignalInsert_withLock(CHANNEL_HEADER __iomem *pChannel, u32 Queue,
+SignalInsert_withLock(struct channel_header __iomem *pChannel, u32 Queue,
void *pSignal, spinlock_t *lock)
{
unsigned char result;
unsigned long flags;
spin_lock_irqsave(lock, flags);
- result = visor_signal_insert(pChannel, Queue, pSignal);
+ result = spar_signal_insert(pChannel, Queue, pSignal);
spin_unlock_irqrestore(lock, flags);
return result;
}
unsigned char
-SignalRemove_withLock(CHANNEL_HEADER __iomem *pChannel, u32 Queue,
+SignalRemove_withLock(struct channel_header __iomem *pChannel, u32 Queue,
void *pSignal, spinlock_t *lock)
{
unsigned char result;
spin_lock(lock);
- result = visor_signal_remove(pChannel, Queue, pSignal);
+ result = spar_signal_remove(pChannel, Queue, pSignal);
spin_unlock(lock);
return result;
}
diff --git a/drivers/staging/unisys/channels/chanstub.h b/drivers/staging/unisys/channels/chanstub.h
index d08e2c69d2a..1531759a1b3 100644
--- a/drivers/staging/unisys/channels/chanstub.h
+++ b/drivers/staging/unisys/channels/chanstub.h
@@ -15,9 +15,9 @@
#ifndef __CHANSTUB_H__
#define __CHANSTUB_H__
-unsigned char SignalInsert_withLock(CHANNEL_HEADER __iomem *pChannel, u32 Queue,
- void *pSignal, spinlock_t *lock);
-unsigned char SignalRemove_withLock(CHANNEL_HEADER __iomem *pChannel, u32 Queue,
- void *pSignal, spinlock_t *lock);
+unsigned char SignalInsert_withLock(struct channel_header __iomem *pChannel,
+ u32 Queue, void *pSignal, spinlock_t *lock);
+unsigned char SignalRemove_withLock(struct channel_header __iomem *pChannel,
+ u32 Queue, void *pSignal, spinlock_t *lock);
#endif
diff --git a/drivers/staging/unisys/common-spar/include/channels/channel.h b/drivers/staging/unisys/common-spar/include/channels/channel.h
index c25dfbf7f6b..6fb6e5b3dda 100644
--- a/drivers/staging/unisys/common-spar/include/channels/channel.h
+++ b/drivers/staging/unisys/common-spar/include/channels/channel.h
@@ -50,43 +50,12 @@
#define ULTRA_CHANNEL_PROTOCOL_SIGNATURE SIGNATURE_32('E', 'C', 'N', 'L')
-#define CHANNEL_GUID_MISMATCH(chType, chName, field, expected, actual, fil, \
- lin, logCtx) \
- do { \
- pr_err("Channel mismatch on channel=%s(%pUL) field=%s expected=%pUL actual=%pUL @%s:%d\n", \
- chName, &chType, field, \
- &expected, &actual, \
- fil, lin); \
- } while (0)
-#define CHANNEL_U32_MISMATCH(chType, chName, field, expected, actual, fil, \
- lin, logCtx) \
- do { \
- pr_err("Channel mismatch on channel=%s(%pUL) field=%s expected=0x%-8.8lx actual=0x%-8.8lx @%s:%d\n", \
- chName, &chType, field, \
- (unsigned long)expected, (unsigned long)actual, \
- fil, lin); \
- } while (0)
-
-#define CHANNEL_U64_MISMATCH(chType, chName, field, expected, actual, fil, \
- lin, logCtx) \
- do { \
- pr_err("Channel mismatch on channel=%s(%pUL) field=%s expected=0x%-8.8Lx actual=0x%-8.8Lx @%s:%d\n", \
- chName, &chType, field, \
- (unsigned long long)expected, \
- (unsigned long long)actual, \
- fil, lin); \
- } while (0)
-
-#define UltraLogEvent(logCtx, EventId, Severity, SubsystemMask, pFunctionName, \
- LineNumber, Str, args...) \
- pr_info(Str, ## args)
-
-typedef enum {
+enum channel_serverstate {
CHANNELSRV_UNINITIALIZED = 0, /* channel is in an undefined state */
CHANNELSRV_READY = 1 /* channel has been initialized by server */
-} CHANNEL_SERVERSTATE;
+};
-typedef enum {
+enum channel_clientstate {
CHANNELCLI_DETACHED = 0,
CHANNELCLI_DISABLED = 1, /* client can see channel but is NOT
* allowed to use it unless given TBD
@@ -100,32 +69,32 @@ typedef enum {
* using channel */
CHANNELCLI_OWNED = 5 /* "no worries" state - client can
* access channel anytime */
-} CHANNEL_CLIENTSTATE;
+};
+
static inline const u8 *
ULTRA_CHANNELCLI_STRING(u32 v)
{
switch (v) {
case CHANNELCLI_DETACHED:
- return (const u8 *) ("DETACHED");
+ return (const u8 *)("DETACHED");
case CHANNELCLI_DISABLED:
- return (const u8 *) ("DISABLED");
+ return (const u8 *)("DISABLED");
case CHANNELCLI_ATTACHING:
- return (const u8 *) ("ATTACHING");
+ return (const u8 *)("ATTACHING");
case CHANNELCLI_ATTACHED:
- return (const u8 *) ("ATTACHED");
+ return (const u8 *)("ATTACHED");
case CHANNELCLI_BUSY:
- return (const u8 *) ("BUSY");
+ return (const u8 *)("BUSY");
case CHANNELCLI_OWNED:
- return (const u8 *) ("OWNED");
+ return (const u8 *)("OWNED");
default:
break;
}
- return (const u8 *) ("?");
+ return (const u8 *)("?");
}
-#define ULTRA_CHANNELSRV_IS_READY(x) ((x) == CHANNELSRV_READY)
-#define ULTRA_CHANNEL_SERVER_READY(pChannel) \
- (ULTRA_CHANNELSRV_IS_READY(readl(&(pChannel)->SrvState)))
+#define SPAR_CHANNEL_SERVER_READY(ch) \
+ (readl(&(ch)->srv_state) == CHANNELSRV_READY)
#define ULTRA_VALID_CHANNELCLI_TRANSITION(o, n) \
(((((o) == CHANNELCLI_DETACHED) && ((n) == CHANNELCLI_DISABLED)) || \
@@ -145,60 +114,41 @@ ULTRA_CHANNELCLI_STRING(u32 v)
(((o) == CHANNELCLI_BUSY) && ((n) == CHANNELCLI_OWNED)) || (0)) \
? (1) : (0))
-#define ULTRA_CHANNEL_CLIENT_CHK_TRANSITION(old, new, chanId, logCtx, \
+#define SPAR_CHANNEL_CLIENT_CHK_TRANSITION(old, new, id, log, \
file, line) \
do { \
if (!ULTRA_VALID_CHANNELCLI_TRANSITION(old, new)) \
- UltraLogEvent(logCtx, \
- CHANNELSTATE_DIAG_EVENTID_TRANSITERR, \
- CHANNELSTATE_DIAG_SEVERITY, \
- CHANNELSTATE_DIAG_SUBSYS, \
- __func__, __LINE__, \
- "%s Channel StateTransition INVALID! (%s) %s(%d)-->%s(%d) @%s:%d\n", \
- chanId, "CliState<x>", \
- ULTRA_CHANNELCLI_STRING(old), \
- old, \
- ULTRA_CHANNELCLI_STRING(new), \
- new, \
- PathName_Last_N_Nodes((u8 *)file, 4), \
- line); \
+ pr_info("%s Channel StateTransition INVALID! (%s) %s(%d)-->%s(%d) @%s:%d\n", \
+ id, "CliState<x>", \
+ ULTRA_CHANNELCLI_STRING(old), \
+ old, \
+ ULTRA_CHANNELCLI_STRING(new), \
+ new, \
+ pathname_last_n_nodes((u8 *)file, 4), \
+ line); \
} while (0)
-#define ULTRA_CHANNEL_CLIENT_TRANSITION(pChan, chanId, \
- newstate, logCtx) \
+#define SPAR_CHANNEL_CLIENT_TRANSITION(ch, id, newstate, log) \
do { \
- ULTRA_CHANNEL_CLIENT_CHK_TRANSITION( \
- readl(&(((CHANNEL_HEADER __iomem *) \
- (pChan))->CliStateOS)), \
- newstate, \
- chanId, logCtx, __FILE__, __LINE__); \
- UltraLogEvent(logCtx, CHANNELSTATE_DIAG_EVENTID_TRANSITOK, \
- CHANNELSTATE_DIAG_SEVERITY, \
- CHANNELSTATE_DIAG_SUBSYS, \
- __func__, __LINE__, \
- "%s Channel StateTransition (%s) %s(%d)-->%s(%d) @%s:%d\n", \
- chanId, "CliStateOS", \
- ULTRA_CHANNELCLI_STRING( \
- readl(&((CHANNEL_HEADER __iomem *) \
- (pChan))->CliStateOS)), \
- readl(&((CHANNEL_HEADER __iomem *) \
- (pChan))->CliStateOS), \
- ULTRA_CHANNELCLI_STRING(newstate), \
- newstate, \
- PathName_Last_N_Nodes(__FILE__, 4), __LINE__); \
- writel(newstate, &((CHANNEL_HEADER __iomem *) \
- (pChan))->CliStateOS); \
+ SPAR_CHANNEL_CLIENT_CHK_TRANSITION( \
+ readl(&(((struct channel_header __iomem *)\
+ (ch))->cli_state_os)), \
+ newstate, id, log, __FILE__, __LINE__); \
+ pr_info("%s Channel StateTransition (%s) %s(%d)-->%s(%d) @%s:%d\n", \
+ id, "CliStateOS", \
+ ULTRA_CHANNELCLI_STRING( \
+ readl(&((struct channel_header __iomem *)\
+ (ch))->cli_state_os)), \
+ readl(&((struct channel_header __iomem *)\
+ (ch))->cli_state_os), \
+ ULTRA_CHANNELCLI_STRING(newstate), \
+ newstate, \
+ pathname_last_n_nodes(__FILE__, 4), __LINE__); \
+ writel(newstate, &((struct channel_header __iomem *)\
+ (ch))->cli_state_os); \
mb(); /* required for channel synch */ \
} while (0)
-#define ULTRA_CHANNEL_CLIENT_ACQUIRE_OS(pChan, chanId, logCtx) \
- ULTRA_channel_client_acquire_os(pChan, chanId, logCtx, \
- (char *)__FILE__, __LINE__, \
- (char *)__func__)
-#define ULTRA_CHANNEL_CLIENT_RELEASE_OS(pChan, chanId, logCtx) \
- ULTRA_channel_client_release_os(pChan, chanId, logCtx, \
- (char *)__FILE__, __LINE__, (char *)__func__)
-
/* Values for ULTRA_CHANNEL_PROTOCOL.CliErrorBoot: */
/* throttling invalid boot channel statetransition error due to client
* disabled */
@@ -239,98 +189,98 @@ ULTRA_CHANNELCLI_STRING(u32 v)
#pragma pack(push, 1) /* both GCC and VC now allow this pragma */
/* Common Channel Header */
-typedef struct _CHANNEL_HEADER {
- u64 Signature; /* Signature */
- u32 LegacyState; /* DEPRECATED - being replaced by */
+struct channel_header {
+ u64 signature; /* Signature */
+ u32 legacy_state; /* DEPRECATED - being replaced by */
/* / SrvState, CliStateBoot, and CliStateOS below */
- u32 HeaderSize; /* sizeof(CHANNEL_HEADER) */
- u64 Size; /* Total size of this channel in bytes */
- u64 Features; /* Flags to modify behavior */
- uuid_le Type; /* Channel type: data, bus, control, etc. */
- u64 PartitionHandle; /* ID of guest partition */
- u64 Handle; /* Device number of this channel in client */
- u64 oChannelSpace; /* Offset in bytes to channel specific area */
- u32 VersionId; /* CHANNEL_HEADER Version ID */
- u32 PartitionIndex; /* Index of guest partition */
- uuid_le ZoneGuid; /* Guid of Channel's zone */
- u32 oClientString; /* offset from channel header to
+ u32 header_size; /* sizeof(struct channel_header) */
+ u64 size; /* Total size of this channel in bytes */
+ u64 features; /* Flags to modify behavior */
+ uuid_le chtype; /* Channel type: data, bus, control, etc. */
+ u64 partition_handle; /* ID of guest partition */
+ u64 handle; /* Device number of this channel in client */
+ u64 ch_space_offset; /* Offset in bytes to channel specific area */
+ u32 version_id; /* struct channel_header Version ID */
+ u32 partition_index; /* Index of guest partition */
+ uuid_le zone_uuid; /* Guid of Channel's zone */
+ u32 cli_str_offset; /* offset from channel header to
* nul-terminated ClientString (0 if
* ClientString not present) */
- u32 CliStateBoot; /* CHANNEL_CLIENTSTATE of pre-boot
+ u32 cli_state_boot; /* CHANNEL_CLIENTSTATE of pre-boot
* EFI client of this channel */
- u32 CmdStateCli; /* CHANNEL_COMMANDSTATE (overloaded in
+ u32 cmd_state_cli; /* CHANNEL_COMMANDSTATE (overloaded in
* Windows drivers, see ServerStateUp,
* ServerStateDown, etc) */
- u32 CliStateOS; /* CHANNEL_CLIENTSTATE of Guest OS
+ u32 cli_state_os; /* CHANNEL_CLIENTSTATE of Guest OS
* client of this channel */
- u32 ChannelCharacteristics; /* CHANNEL_CHARACTERISTIC_<xxx> */
- u32 CmdStateSrv; /* CHANNEL_COMMANDSTATE (overloaded in
+ u32 ch_characteristic; /* CHANNEL_CHARACTERISTIC_<xxx> */
+ u32 cmd_state_srv; /* CHANNEL_COMMANDSTATE (overloaded in
* Windows drivers, see ServerStateUp,
* ServerStateDown, etc) */
- u32 SrvState; /* CHANNEL_SERVERSTATE */
- u8 CliErrorBoot; /* bits to indicate err states for
+ u32 srv_state; /* CHANNEL_SERVERSTATE */
+ u8 cli_error_boot; /* bits to indicate err states for
* boot clients, so err messages can
* be throttled */
- u8 CliErrorOS; /* bits to indicate err states for OS
+ u8 cli_error_os; /* bits to indicate err states for OS
* clients, so err messages can be
* throttled */
- u8 Filler[1]; /* Pad out to 128 byte cacheline */
+ u8 filler[1]; /* Pad out to 128 byte cacheline */
/* Please add all new single-byte values below here */
- u8 RecoverChannel;
-} CHANNEL_HEADER, *pCHANNEL_HEADER, ULTRA_CHANNEL_PROTOCOL;
+ u8 recover_channel;
+};
#define ULTRA_CHANNEL_ENABLE_INTS (0x1ULL << 0)
/* Subheader for the Signal Type variation of the Common Channel */
-typedef struct _SIGNAL_QUEUE_HEADER {
+struct signal_queue_header {
/* 1st cache line */
- u32 VersionId; /* SIGNAL_QUEUE_HEADER Version ID */
- u32 Type; /* Queue type: storage, network */
- u64 Size; /* Total size of this queue in bytes */
- u64 oSignalBase; /* Offset to signal queue area */
- u64 FeatureFlags; /* Flags to modify behavior */
- u64 NumSignalsSent; /* Total # of signals placed in this queue */
- u64 NumOverflows; /* Total # of inserts failed due to
+ u32 version; /* SIGNAL_QUEUE_HEADER Version ID */
+ u32 chtype; /* Queue type: storage, network */
+ u64 size; /* Total size of this queue in bytes */
+ u64 sig_base_offset; /* Offset to signal queue area */
+ u64 features; /* Flags to modify behavior */
+ u64 num_sent; /* Total # of signals placed in this queue */
+ u64 num_overflows; /* Total # of inserts failed due to
* full queue */
- u32 SignalSize; /* Total size of a signal for this queue */
- u32 MaxSignalSlots; /* Max # of slots in queue, 1 slot is
+ u32 signal_size; /* Total size of a signal for this queue */
+ u32 max_slots; /* Max # of slots in queue, 1 slot is
* always empty */
- u32 MaxSignals; /* Max # of signals in queue
+ u32 max_signals; /* Max # of signals in queue
* (MaxSignalSlots-1) */
- u32 Head; /* Queue head signal # */
+ u32 head; /* Queue head signal # */
/* 2nd cache line */
- u64 NumSignalsReceived; /* Total # of signals removed from this queue */
- u32 Tail; /* Queue tail signal # (on separate
+ u64 num_received; /* Total # of signals removed from this queue */
+ u32 tail; /* Queue tail signal # (on separate
* cache line) */
- u32 Reserved1; /* Reserved field */
- u64 Reserved2; /* Resrved field */
- u64 ClientQueue;
- u64 NumInterruptsReceived; /* Total # of Interrupts received. This
+ u32 reserved1; /* Reserved field */
+ u64 reserved2; /* Reserved field */
+ u64 client_queue;
+ u64 num_irq_received; /* Total # of Interrupts received. This
* is incremented by the ISR in the
* guest windows driver */
- u64 NumEmptyCnt; /* Number of times that visor_signal_remove
+ u64 num_empty; /* Number of times that visor_signal_remove
* is called and returned Empty
* Status. */
- u32 ErrorFlags; /* Error bits set during SignalReinit
+ u32 errorflags; /* Error bits set during SignalReinit
* to denote trouble with client's
* fields */
- u8 Filler[12]; /* Pad out to 64 byte cacheline */
-} SIGNAL_QUEUE_HEADER, *pSIGNAL_QUEUE_HEADER;
+ u8 filler[12]; /* Pad out to 64 byte cacheline */
+};
#pragma pack(pop)
-#define SignalInit(chan, QHDRFLD, QDATAFLD, QDATATYPE, ver, typ) \
+#define spar_signal_init(chan, QHDRFLD, QDATAFLD, QDATATYPE, ver, typ) \
do { \
memset(&chan->QHDRFLD, 0, sizeof(chan->QHDRFLD)); \
- chan->QHDRFLD.VersionId = ver; \
- chan->QHDRFLD.Type = typ; \
- chan->QHDRFLD.Size = sizeof(chan->QDATAFLD); \
- chan->QHDRFLD.SignalSize = sizeof(QDATATYPE); \
- chan->QHDRFLD.oSignalBase = (u64)(chan->QDATAFLD)- \
+ chan->QHDRFLD.version = ver; \
+ chan->QHDRFLD.chtype = typ; \
+ chan->QHDRFLD.size = sizeof(chan->QDATAFLD); \
+ chan->QHDRFLD.signal_size = sizeof(QDATATYPE); \
+ chan->QHDRFLD.sig_base_offset = (u64)(chan->QDATAFLD)- \
(u64)(&chan->QHDRFLD); \
- chan->QHDRFLD.MaxSignalSlots = \
+ chan->QHDRFLD.max_slots = \
sizeof(chan->QDATAFLD)/sizeof(QDATATYPE); \
- chan->QHDRFLD.MaxSignals = chan->QHDRFLD.MaxSignalSlots-1; \
+ chan->QHDRFLD.max_signals = chan->QHDRFLD.max_slots-1; \
} while (0)
/* Generic function useful for validating any type of channel when it is
@@ -339,64 +289,62 @@ typedef struct _SIGNAL_QUEUE_HEADER {
* is used to pass the EFI_DIAG_CAPTURE_PROTOCOL needed to log messages.
*/
static inline int
-ULTRA_check_channel_client(void __iomem *pChannel,
- uuid_le expectedTypeGuid,
- char *channelName,
- u64 expectedMinBytes,
- u32 expectedVersionId,
- u64 expectedSignature,
- char *fileName, int lineNumber, void *logCtx)
+spar_check_channel_client(void __iomem *ch,
+ uuid_le expected_uuid,
+ char *chname,
+ u64 expected_min_bytes,
+ u32 expected_version,
+ u64 expected_signature)
{
- if (uuid_le_cmp(expectedTypeGuid, NULL_UUID_LE) != 0) {
+ if (uuid_le_cmp(expected_uuid, NULL_UUID_LE) != 0) {
uuid_le guid;
memcpy_fromio(&guid,
- &((CHANNEL_HEADER __iomem *)(pChannel))->Type,
+ &((struct channel_header __iomem *)(ch))->chtype,
sizeof(guid));
/* caller wants us to verify type GUID */
- if (uuid_le_cmp(guid, expectedTypeGuid) != 0) {
- CHANNEL_GUID_MISMATCH(expectedTypeGuid, channelName,
- "type", expectedTypeGuid,
- guid, fileName,
- lineNumber, logCtx);
+ if (uuid_le_cmp(guid, expected_uuid) != 0) {
+ pr_err("Channel mismatch on channel=%s(%pUL) field=type expected=%pUL actual=%pUL\n",
+ chname, &expected_uuid,
+ &expected_uuid, &guid);
return 0;
}
}
- if (expectedMinBytes > 0) /* caller wants us to verify
+ if (expected_min_bytes > 0) { /* caller wants us to verify
* channel size */
- if (readq(&((CHANNEL_HEADER __iomem *)
- (pChannel))->Size) < expectedMinBytes) {
- CHANNEL_U64_MISMATCH(expectedTypeGuid, channelName,
- "size", expectedMinBytes,
- readq(&((CHANNEL_HEADER __iomem *)
- (pChannel))->Size),
- fileName,
- lineNumber, logCtx);
+ unsigned long long bytes =
+ readq(&((struct channel_header __iomem *)
+ (ch))->size);
+ if (bytes < expected_min_bytes) {
+ pr_err("Channel mismatch on channel=%s(%pUL) field=size expected=0x%-8.8Lx actual=0x%-8.8Lx\n",
+ chname, &expected_uuid,
+ (unsigned long long)expected_min_bytes, bytes);
return 0;
}
- if (expectedVersionId > 0) /* caller wants us to verify
+ }
+ if (expected_version > 0) { /* caller wants us to verify
* channel version */
- if (readl(&((CHANNEL_HEADER __iomem *) (pChannel))->VersionId)
- != expectedVersionId) {
- CHANNEL_U32_MISMATCH(expectedTypeGuid, channelName,
- "version", expectedVersionId,
- readl(&((CHANNEL_HEADER __iomem *)
- (pChannel))->VersionId),
- fileName, lineNumber, logCtx);
+ unsigned long ver = readl(&((struct channel_header __iomem *)
+ (ch))->version_id);
+ if (ver != expected_version) {
+ pr_err("Channel mismatch on channel=%s(%pUL) field=version expected=0x%-8.8lx actual=0x%-8.8lx\n",
+ chname, &expected_uuid,
+ (unsigned long)expected_version, ver);
return 0;
}
- if (expectedSignature > 0) /* caller wants us to verify
+ }
+ if (expected_signature > 0) { /* caller wants us to verify
* channel signature */
- if (readq(&((CHANNEL_HEADER __iomem *) (pChannel))->Signature)
- != expectedSignature) {
- CHANNEL_U64_MISMATCH(expectedTypeGuid, channelName,
- "signature", expectedSignature,
- readq(&((CHANNEL_HEADER __iomem *)
- (pChannel))->Signature),
- fileName,
- lineNumber, logCtx);
+ unsigned long long sig =
+ readq(&((struct channel_header __iomem *)
+ (ch))->signature);
+ if (sig != expected_signature) {
+ pr_err("Channel mismatch on channel=%s(%pUL) field=signature expected=0x%-8.8llx actual=0x%-8.8llx\n",
+ chname, &expected_uuid,
+ expected_signature, sig);
return 0;
}
+ }
return 1;
}
@@ -405,19 +353,16 @@ ULTRA_check_channel_client(void __iomem *pChannel,
* Note that <logCtx> is only needed for callers in the EFI environment, and
* is used to pass the EFI_DIAG_CAPTURE_PROTOCOL needed to log messages.
*/
-static inline int
-ULTRA_check_channel_server(uuid_le typeGuid,
- char *channelName,
- u64 expectedMinBytes,
- u64 actualBytes,
- char *fileName, int lineNumber, void *logCtx)
+static inline int spar_check_channel_server(uuid_le typeuuid, char *name,
+ u64 expected_min_bytes,
+ u64 actual_bytes)
{
- if (expectedMinBytes > 0) /* caller wants us to verify
+ if (expected_min_bytes > 0) /* caller wants us to verify
* channel size */
- if (actualBytes < expectedMinBytes) {
- CHANNEL_U64_MISMATCH(typeGuid, channelName, "size",
- expectedMinBytes, actualBytes,
- fileName, lineNumber, logCtx);
+ if (actual_bytes < expected_min_bytes) {
+ pr_err("Channel mismatch on channel=%s(%pUL) field=size expected=0x%-8.8llx actual=0x%-8.8llx\n",
+ name, &typeuuid, expected_min_bytes,
+ actual_bytes);
return 0;
}
return 1;
@@ -430,7 +375,7 @@ ULTRA_check_channel_server(uuid_le typeGuid,
* in it, the return pointer will be to the beginning of the string.
*/
static inline u8 *
-PathName_Last_N_Nodes(u8 *s, unsigned int n)
+pathname_last_n_nodes(u8 *s, unsigned int n)
{
u8 *p = s;
unsigned int node_count = 0;
@@ -455,59 +400,43 @@ PathName_Last_N_Nodes(u8 *s, unsigned int n)
}
static inline int
-ULTRA_channel_client_acquire_os(void __iomem *pChannel, u8 *chanId,
- void *logCtx, char *file, int line, char *func)
+spar_channel_client_acquire_os(void __iomem *ch, u8 *id)
{
- CHANNEL_HEADER __iomem *pChan = pChannel;
+ struct channel_header __iomem *hdr = ch;
- if (readl(&pChan->CliStateOS) == CHANNELCLI_DISABLED) {
- if ((readb(&pChan->CliErrorOS)
+ if (readl(&hdr->cli_state_os) == CHANNELCLI_DISABLED) {
+ if ((readb(&hdr->cli_error_os)
& ULTRA_CLIERROROS_THROTTLEMSG_DISABLED) == 0) {
/* we are NOT throttling this message */
- writeb(readb(&pChan->CliErrorOS) |
+ writeb(readb(&hdr->cli_error_os) |
ULTRA_CLIERROROS_THROTTLEMSG_DISABLED,
- &pChan->CliErrorOS);
+ &hdr->cli_error_os);
/* throttle until acquire successful */
- UltraLogEvent(logCtx,
- CHANNELSTATE_DIAG_EVENTID_TRANSITERR,
- CHANNELSTATE_DIAG_SEVERITY,
- CHANNELSTATE_DIAG_SUBSYS, func, line,
- "%s Channel StateTransition INVALID! - acquire failed because OS client DISABLED @%s:%d\n",
- chanId, PathName_Last_N_Nodes(
- (u8 *) file, 4), line);
+ pr_info("%s Channel StateTransition INVALID! - acquire failed because OS client DISABLED\n",
+ id);
}
return 0;
}
- if ((readl(&pChan->CliStateOS) != CHANNELCLI_OWNED)
- && (readl(&pChan->CliStateBoot) == CHANNELCLI_DISABLED)) {
+ if ((readl(&hdr->cli_state_os) != CHANNELCLI_OWNED) &&
+ (readl(&hdr->cli_state_boot) == CHANNELCLI_DISABLED)) {
/* Our competitor is DISABLED, so we can transition to OWNED */
- UltraLogEvent(logCtx, CHANNELSTATE_DIAG_EVENTID_TRANSITOK,
- CHANNELSTATE_DIAG_SEVERITY,
- CHANNELSTATE_DIAG_SUBSYS, func, line,
- "%s Channel StateTransition (%s) %s(%d)-->%s(%d) @%s:%d\n",
- chanId, "CliStateOS",
- ULTRA_CHANNELCLI_STRING(
- readl(&pChan->CliStateOS)),
- readl(&pChan->CliStateOS),
- ULTRA_CHANNELCLI_STRING(CHANNELCLI_OWNED),
- CHANNELCLI_OWNED,
- PathName_Last_N_Nodes((u8 *) file, 4), line);
- writel(CHANNELCLI_OWNED, &pChan->CliStateOS);
+ pr_info("%s Channel StateTransition (%s) %s(%d)-->%s(%d)\n",
+ id, "cli_state_os",
+ ULTRA_CHANNELCLI_STRING(readl(&hdr->cli_state_os)),
+ readl(&hdr->cli_state_os),
+ ULTRA_CHANNELCLI_STRING(CHANNELCLI_OWNED),
+ CHANNELCLI_OWNED);
+ writel(CHANNELCLI_OWNED, &hdr->cli_state_os);
mb(); /* required for channel synch */
}
- if (readl(&pChan->CliStateOS) == CHANNELCLI_OWNED) {
- if (readb(&pChan->CliErrorOS) != 0) {
+ if (readl(&hdr->cli_state_os) == CHANNELCLI_OWNED) {
+ if (readb(&hdr->cli_error_os) != 0) {
/* we are in an error msg throttling state;
* come out of it */
- UltraLogEvent(logCtx,
- CHANNELSTATE_DIAG_EVENTID_TRANSITOK,
- CHANNELSTATE_DIAG_SEVERITY,
- CHANNELSTATE_DIAG_SUBSYS, func, line,
- "%s Channel OS client acquire now successful @%s:%d\n",
- chanId, PathName_Last_N_Nodes((u8 *) file,
- 4), line);
- writeb(0, &pChan->CliErrorOS);
+ pr_info("%s Channel OS client acquire now successful\n",
+ id);
+ writeb(0, &hdr->cli_error_os);
}
return 1;
}
@@ -515,95 +444,67 @@ ULTRA_channel_client_acquire_os(void __iomem *pChannel, u8 *chanId,
/* We have to do it the "hard way". We transition to BUSY,
* and can use the channel iff our competitor has not also
* transitioned to BUSY. */
- if (readl(&pChan->CliStateOS) != CHANNELCLI_ATTACHED) {
- if ((readb(&pChan->CliErrorOS)
+ if (readl(&hdr->cli_state_os) != CHANNELCLI_ATTACHED) {
+ if ((readb(&hdr->cli_error_os)
& ULTRA_CLIERROROS_THROTTLEMSG_NOTATTACHED) == 0) {
/* we are NOT throttling this message */
- writeb(readb(&pChan->CliErrorOS) |
+ writeb(readb(&hdr->cli_error_os) |
ULTRA_CLIERROROS_THROTTLEMSG_NOTATTACHED,
- &pChan->CliErrorOS);
+ &hdr->cli_error_os);
/* throttle until acquire successful */
- UltraLogEvent(logCtx,
- CHANNELSTATE_DIAG_EVENTID_TRANSITERR,
- CHANNELSTATE_DIAG_SEVERITY,
- CHANNELSTATE_DIAG_SUBSYS, func, line,
- "%s Channel StateTransition INVALID! - acquire failed because OS client NOT ATTACHED (state=%s(%d)) @%s:%d\n",
- chanId,
- ULTRA_CHANNELCLI_STRING(
- readl(&pChan->CliStateOS)),
- readl(&pChan->CliStateOS),
- PathName_Last_N_Nodes((u8 *) file, 4),
- line);
+ pr_info("%s Channel StateTransition INVALID! - acquire failed because OS client NOT ATTACHED (state=%s(%d))\n",
+ id, ULTRA_CHANNELCLI_STRING(
+ readl(&hdr->cli_state_os)),
+ readl(&hdr->cli_state_os));
}
return 0;
}
- writel(CHANNELCLI_BUSY, &pChan->CliStateOS);
+ writel(CHANNELCLI_BUSY, &hdr->cli_state_os);
mb(); /* required for channel synch */
- if (readl(&pChan->CliStateBoot) == CHANNELCLI_BUSY) {
- if ((readb(&pChan->CliErrorOS)
+ if (readl(&hdr->cli_state_boot) == CHANNELCLI_BUSY) {
+ if ((readb(&hdr->cli_error_os)
& ULTRA_CLIERROROS_THROTTLEMSG_BUSY) == 0) {
/* we are NOT throttling this message */
- writeb(readb(&pChan->CliErrorOS) |
+ writeb(readb(&hdr->cli_error_os) |
ULTRA_CLIERROROS_THROTTLEMSG_BUSY,
- &pChan->CliErrorOS);
+ &hdr->cli_error_os);
/* throttle until acquire successful */
- UltraLogEvent(logCtx,
- CHANNELSTATE_DIAG_EVENTID_TRANSITBUSY,
- CHANNELSTATE_DIAG_SEVERITY,
- CHANNELSTATE_DIAG_SUBSYS, func, line,
- "%s Channel StateTransition failed - host OS acquire failed because boot BUSY @%s:%d\n",
- chanId, PathName_Last_N_Nodes((u8 *) file,
- 4), line);
+ pr_info("%s Channel StateTransition failed - host OS acquire failed because boot BUSY\n",
+ id);
}
/* reset busy */
- writel(CHANNELCLI_ATTACHED, &pChan->CliStateOS);
+ writel(CHANNELCLI_ATTACHED, &hdr->cli_state_os);
mb(); /* required for channel synch */
return 0;
}
- if (readb(&pChan->CliErrorOS) != 0) {
+ if (readb(&hdr->cli_error_os) != 0) {
/* we are in an error msg throttling state; come out of it */
- UltraLogEvent(logCtx, CHANNELSTATE_DIAG_EVENTID_TRANSITOK,
- CHANNELSTATE_DIAG_SEVERITY,
- CHANNELSTATE_DIAG_SUBSYS, func, line,
- "%s Channel OS client acquire now successful @%s:%d\n",
- chanId, PathName_Last_N_Nodes((u8 *) file, 4),
- line);
- writeb(0, &pChan->CliErrorOS);
+ pr_info("%s Channel OS client acquire now successful\n", id);
+ writeb(0, &hdr->cli_error_os);
}
return 1;
}
static inline void
-ULTRA_channel_client_release_os(void __iomem *pChannel, u8 *chanId,
- void *logCtx, char *file, int line, char *func)
+spar_channel_client_release_os(void __iomem *ch, u8 *id)
{
- CHANNEL_HEADER __iomem *pChan = pChannel;
+ struct channel_header __iomem *hdr = ch;
- if (readb(&pChan->CliErrorOS) != 0) {
+ if (readb(&hdr->cli_error_os) != 0) {
/* we are in an error msg throttling state; come out of it */
- UltraLogEvent(logCtx, CHANNELSTATE_DIAG_EVENTID_TRANSITOK,
- CHANNELSTATE_DIAG_SEVERITY,
- CHANNELSTATE_DIAG_SUBSYS, func, line,
- "%s Channel OS client error state cleared @%s:%d\n",
- chanId, PathName_Last_N_Nodes((u8 *) file, 4),
- line);
- writeb(0, &pChan->CliErrorOS);
+ pr_info("%s Channel OS client error state cleared\n", id);
+ writeb(0, &hdr->cli_error_os);
}
- if (readl(&pChan->CliStateOS) == CHANNELCLI_OWNED)
+ if (readl(&hdr->cli_state_os) == CHANNELCLI_OWNED)
return;
- if (readl(&pChan->CliStateOS) != CHANNELCLI_BUSY) {
- UltraLogEvent(logCtx, CHANNELSTATE_DIAG_EVENTID_TRANSITERR,
- CHANNELSTATE_DIAG_SEVERITY,
- CHANNELSTATE_DIAG_SUBSYS, func, line,
- "%s Channel StateTransition INVALID! - release failed because OS client NOT BUSY (state=%s(%d)) @%s:%d\n",
- chanId,
- ULTRA_CHANNELCLI_STRING(
- readl(&pChan->CliStateOS)),
- readl(&pChan->CliStateOS),
- PathName_Last_N_Nodes((u8 *) file, 4), line);
+ if (readl(&hdr->cli_state_os) != CHANNELCLI_BUSY) {
+ pr_info("%s Channel StateTransition INVALID! - release failed because OS client NOT BUSY (state=%s(%d))\n",
+ id, ULTRA_CHANNELCLI_STRING(
+ readl(&hdr->cli_state_os)),
+ readl(&hdr->cli_state_os));
/* return; */
}
- writel(CHANNELCLI_ATTACHED, &pChan->CliStateOS); /* release busy */
+ writel(CHANNELCLI_ATTACHED, &hdr->cli_state_os); /* release busy */
}
/*
@@ -625,8 +526,8 @@ ULTRA_channel_client_release_os(void __iomem *pChannel, u8 *chanId,
* full.
*/
-unsigned char visor_signal_insert(CHANNEL_HEADER __iomem *pChannel, u32 Queue,
- void *pSignal);
+unsigned char spar_signal_insert(struct channel_header __iomem *ch, u32 queue,
+ void *sig);
/*
* Routine Description:
@@ -647,8 +548,8 @@ unsigned char visor_signal_insert(CHANNEL_HEADER __iomem *pChannel, u32 Queue,
* empty.
*/
-unsigned char visor_signal_remove(CHANNEL_HEADER __iomem *pChannel, u32 Queue,
- void *pSignal);
+unsigned char spar_signal_remove(struct channel_header __iomem *ch, u32 queue,
+ void *sig);
/*
* Routine Description:
@@ -669,8 +570,8 @@ unsigned char visor_signal_remove(CHANNEL_HEADER __iomem *pChannel, u32 Queue,
* Return value:
* # of signals copied.
*/
-unsigned int SignalRemoveAll(pCHANNEL_HEADER pChannel, u32 Queue,
- void *pSignal);
+unsigned int spar_signal_remove_all(struct channel_header *ch, u32 queue,
+ void *sig);
/*
* Routine Description:
@@ -683,7 +584,7 @@ unsigned int SignalRemoveAll(pCHANNEL_HEADER pChannel, u32 Queue,
* Return value:
* 1 if the signal queue is empty, 0 otherwise.
*/
-unsigned char visor_signalqueue_empty(CHANNEL_HEADER __iomem *pChannel,
- u32 Queue);
+unsigned char spar_signalqueue_empty(struct channel_header __iomem *ch,
+ u32 queue);
#endif
diff --git a/drivers/staging/unisys/common-spar/include/channels/channel_guid.h b/drivers/staging/unisys/common-spar/include/channels/channel_guid.h
index 63c67ca4c9e..706363fc3e9 100644
--- a/drivers/staging/unisys/common-spar/include/channels/channel_guid.h
+++ b/drivers/staging/unisys/common-spar/include/channels/channel_guid.h
@@ -20,45 +20,42 @@
/* Used in IOChannel
* {414815ed-c58c-11da-95a9-00e08161165f}
*/
-#define ULTRA_VHBA_CHANNEL_PROTOCOL_GUID \
+#define SPAR_VHBA_CHANNEL_PROTOCOL_UUID \
UUID_LE(0x414815ed, 0xc58c, 0x11da, \
0x95, 0xa9, 0x0, 0xe0, 0x81, 0x61, 0x16, 0x5f)
-static const uuid_le UltraVhbaChannelProtocolGuid =
- ULTRA_VHBA_CHANNEL_PROTOCOL_GUID;
+static const uuid_le spar_vhba_channel_protocol_uuid =
+ SPAR_VHBA_CHANNEL_PROTOCOL_UUID;
/* Used in IOChannel
* {8cd5994d-c58e-11da-95a9-00e08161165f}
*/
-#define ULTRA_VNIC_CHANNEL_PROTOCOL_GUID \
+#define SPAR_VNIC_CHANNEL_PROTOCOL_UUID \
UUID_LE(0x8cd5994d, 0xc58e, 0x11da, \
0x95, 0xa9, 0x0, 0xe0, 0x81, 0x61, 0x16, 0x5f)
-static const uuid_le UltraVnicChannelProtocolGuid =
- ULTRA_VNIC_CHANNEL_PROTOCOL_GUID;
+static const uuid_le spar_vnic_channel_protocol_uuid =
+ SPAR_VNIC_CHANNEL_PROTOCOL_UUID;
/* Used in IOChannel
* {72120008-4AAB-11DC-8530-444553544200}
*/
-#define ULTRA_SIOVM_GUID \
+#define SPAR_SIOVM_UUID \
UUID_LE(0x72120008, 0x4AAB, 0x11DC, \
0x85, 0x30, 0x44, 0x45, 0x53, 0x54, 0x42, 0x00)
-static const uuid_le UltraSIOVMGuid = ULTRA_SIOVM_GUID;
-
+static const uuid_le spar_siovm_uuid = SPAR_SIOVM_UUID;
/* Used in visornoop/visornoop_main.c
* {5b52c5ac-e5f5-4d42-8dff-429eaecd221f}
*/
-#define ULTRA_CONTROLDIRECTOR_CHANNEL_PROTOCOL_GUID \
+#define SPAR_CONTROLDIRECTOR_CHANNEL_PROTOCOL_UUID \
UUID_LE(0x5b52c5ac, 0xe5f5, 0x4d42, \
0x8d, 0xff, 0x42, 0x9e, 0xae, 0xcd, 0x22, 0x1f)
-static const uuid_le UltraControlDirectorChannelProtocolGuid =
- ULTRA_CONTROLDIRECTOR_CHANNEL_PROTOCOL_GUID;
+static const uuid_le spar_controldirector_channel_protocol_uuid =
+ SPAR_CONTROLDIRECTOR_CHANNEL_PROTOCOL_UUID;
/* Used in visorchipset/visorchipset_main.c
* {B4E79625-AEDE-4EAA-9E11-D3EDDCD4504C}
*/
-#define ULTRA_DIAG_POOL_CHANNEL_PROTOCOL_GUID \
+#define SPAR_DIAG_POOL_CHANNEL_PROTOCOL_UUID \
UUID_LE(0xb4e79625, 0xaede, 0x4eaa, \
0x9e, 0x11, 0xd3, 0xed, 0xdc, 0xd4, 0x50, 0x4c)
-
-
diff --git a/drivers/staging/unisys/common-spar/include/channels/controlframework.h b/drivers/staging/unisys/common-spar/include/channels/controlframework.h
index fd4726e754e..33d9caf337c 100644
--- a/drivers/staging/unisys/common-spar/include/channels/controlframework.h
+++ b/drivers/staging/unisys/common-spar/include/channels/controlframework.h
@@ -28,50 +28,35 @@
#include <linux/types.h>
#include "channel.h"
-#define ULTRA_MEMORY_COUNT_Ki 1024
-
-/* Scale order 0 is one 32-bit (4-byte) word (in 64 or 128-bit
- * architecture potentially 64 or 128-bit word) */
-#define ULTRA_MEMORY_PAGE_WORD 4
-
-/* Define Ki scale page to be traditional 4KB page */
-#define ULTRA_MEMORY_PAGE_Ki (ULTRA_MEMORY_PAGE_WORD * ULTRA_MEMORY_COUNT_Ki)
-typedef struct _ULTRA_SEGMENT_STATE {
- u16 Enabled:1; /* Bit 0: May enter other states */
- u16 Active:1; /* Bit 1: Assigned to active partition */
- u16 Alive:1; /* Bit 2: Configure message sent to
+struct spar_segment_state {
+ u16 enabled:1; /* Bit 0: May enter other states */
+ u16 active:1; /* Bit 1: Assigned to active partition */
+ u16 alive:1; /* Bit 2: Configure message sent to
* service/server */
- u16 Revoked:1; /* Bit 3: similar to partition state
+ u16 revoked:1; /* Bit 3: similar to partition state
* ShuttingDown */
- u16 Allocated:1; /* Bit 4: memory (device/port number)
+ u16 allocated:1; /* Bit 4: memory (device/port number)
* has been selected by Command */
- u16 Known:1; /* Bit 5: has been introduced to the
+ u16 known:1; /* Bit 5: has been introduced to the
* service/guest partition */
- u16 Ready:1; /* Bit 6: service/Guest partition has
+ u16 ready:1; /* Bit 6: service/Guest partition has
* responded to introduction */
- u16 Operating:1; /* Bit 7: resource is configured and
+ u16 operating:1; /* Bit 7: resource is configured and
* operating */
/* Note: don't use high bit unless we need to switch to ushort
* which is non-compliant */
-} ULTRA_SEGMENT_STATE;
-static const ULTRA_SEGMENT_STATE SegmentStateRunning = {
+};
+
+static const struct spar_segment_state segment_state_running = {
1, 1, 1, 0, 1, 1, 1, 1
};
-static const ULTRA_SEGMENT_STATE SegmentStatePaused = {
+
+static const struct spar_segment_state segment_state_paused = {
1, 1, 1, 0, 1, 1, 1, 0
};
-static const ULTRA_SEGMENT_STATE SegmentStateStandby = {
+
+static const struct spar_segment_state segment_state_standby = {
1, 1, 0, 0, 1, 1, 1, 0
};
-typedef union {
- u64 Full;
- struct {
- u8 Major; /* will be 1 for the first release and
- * increment thereafter */
- u8 Minor;
- u16 Maintenance;
- u32 Revision; /* Subversion revision */
- } Part;
-} ULTRA_COMPONENT_VERSION;
#endif /* _CONTROL_FRAMEWORK_H_ not defined */
diff --git a/drivers/staging/unisys/common-spar/include/channels/controlvmchannel.h b/drivers/staging/unisys/common-spar/include/channels/controlvmchannel.h
index d08c198e0de..a66db7968d6 100644
--- a/drivers/staging/unisys/common-spar/include/channels/controlvmchannel.h
+++ b/drivers/staging/unisys/common-spar/include/channels/controlvmchannel.h
@@ -27,12 +27,12 @@ enum { INVALID_GUEST_FIRMWARE, SAMPLE_GUEST_FIRMWARE,
};
/* {2B3C2D10-7EF5-4ad8-B966-3448B7386B3D} */
-#define ULTRA_CONTROLVM_CHANNEL_PROTOCOL_GUID \
+#define SPAR_CONTROLVM_CHANNEL_PROTOCOL_UUID \
UUID_LE(0x2b3c2d10, 0x7ef5, 0x4ad8, \
0xb9, 0x66, 0x34, 0x48, 0xb7, 0x38, 0x6b, 0x3d)
-static const uuid_le UltraControlvmChannelProtocolGuid =
- ULTRA_CONTROLVM_CHANNEL_PROTOCOL_GUID;
+static const uuid_le spar_controlvm_channel_protocol_uuid =
+ SPAR_CONTROLVM_CHANNEL_PROTOCOL_UUID;
#define ULTRA_CONTROLVM_CHANNEL_PROTOCOL_SIGNATURE \
ULTRA_CHANNEL_PROTOCOL_SIGNATURE
@@ -45,19 +45,13 @@ static const uuid_le UltraControlvmChannelProtocolGuid =
* channel struct withOUT needing to increment this. */
#define ULTRA_CONTROLVM_CHANNEL_PROTOCOL_VERSIONID 1
-#define ULTRA_CONTROLVM_CHANNEL_OK_CLIENT(pChannel, logCtx) \
- (ULTRA_check_channel_client(pChannel, \
- UltraControlvmChannelProtocolGuid, \
+#define SPAR_CONTROLVM_CHANNEL_OK_CLIENT(ch) \
+ spar_check_channel_client(ch, \
+ spar_controlvm_channel_protocol_uuid, \
"controlvm", \
- sizeof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL), \
+ sizeof(struct spar_controlvm_channel_protocol), \
ULTRA_CONTROLVM_CHANNEL_PROTOCOL_VERSIONID, \
- ULTRA_CONTROLVM_CHANNEL_PROTOCOL_SIGNATURE, \
- __FILE__, __LINE__, logCtx))
-#define ULTRA_CONTROLVM_CHANNEL_OK_SERVER(actualBytes, logCtx) \
- (ULTRA_check_channel_server(UltraControlvmChannelProtocolGuid, \
- "controlvm", \
- sizeof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL), \
- actualBytes, __FILE__, __LINE__, logCtx))
+ ULTRA_CONTROLVM_CHANNEL_PROTOCOL_SIGNATURE)
#define MY_DEVICE_INDEX 0
#define MAX_MACDATA_LEN 8 /* number of bytes for MAC address in config packet */
@@ -88,7 +82,7 @@ static const uuid_le UltraControlvmChannelProtocolGuid =
* - issued on the EventQueue queue (q #2) in the ControlVm channel
* - responded to on the EventAckQueue queue (q #3) in the ControlVm channel
*/
-typedef enum {
+enum controlvm_id {
CONTROLVM_INVALID = 0,
/* SWITCH commands required Parameter: SwitchNumber */
/* BUS commands required Parameter: BusNumber */
@@ -117,9 +111,9 @@ typedef enum {
CONTROLVM_CHIPSET_READY = 0x304, /* CP --> SP */
CONTROLVM_CHIPSET_SELFTEST = 0x305, /* CP --> SP */
-} CONTROLVM_ID;
+};
-struct InterruptInfo {
+struct irq_info {
/**< specifies interrupt info. It is used to send interrupts
* for this channel. The peer at the end of this channel
* who has registered an interrupt (using recv fields
@@ -128,495 +122,390 @@ struct InterruptInfo {
* interrupt. Currently this is used by IOPart-SP to wake
* up GP when Data Channel transitions from empty to
* non-empty.*/
- u64 sendInterruptHandle;
+ u64 send_irq_handle;
/**< specifies interrupt handle. It is used to retrieve the
* corresponding interrupt pin from Monitor; and the
* interrupt pin is used to connect to the corresponding
- * intrrupt. Used by IOPart-GP only. */
- u64 recvInterruptHandle;
+ * interrupt. Used by IOPart-GP only. */
+ u64 recv_irq_handle;
/**< specifies interrupt vector. It, interrupt pin, and shared are
* used to connect to the corresponding interrupt. Used by
* IOPart-GP only. */
- u32 recvInterruptVector;
+ u32 recv_irq_vector;
/**< specifies if the recvInterrupt is shared. It, interrupt pin
* and vector are used to connect to 0 = not shared; 1 = shared.
* the corresponding interrupt. Used by IOPart-GP only. */
- u8 recvInterruptShared;
+ u8 recv_irq_shared;
u8 reserved[3]; /* Natural alignment purposes */
};
-struct PciId {
- u16 Domain;
- u8 Bus;
- u8 Slot;
- u8 Func;
- u8 Reserved[3]; /* Natural alignment purposes */
-};
-
-struct PciConfigHdr {
- u16 VendorId;
- u16 SubSysVendor;
- u16 DeviceId;
- u16 SubSysDevice;
- u32 ClassCode;
- u32 Reserved; /* Natural alignment purposes */
-};
-
-struct ScsiId {
- u32 Bus;
- u32 Target;
- u32 Lun;
- u32 Host; /* Command should ignore this for *
- * DiskArrival/RemovalEvents */
-};
-
-struct WWID {
- u32 wwid1;
- u32 wwid2;
-};
-
-struct virtDiskInfo {
- u32 switchNo; /* defined by SWITCH_CREATE */
- u32 externalPortNo; /* 0 for SAS RAID provided (external)
- * virtual disks, 1 for virtual disk
- * images, 2 for gold disk images */
- u16 VirtualDiskIndex; /* Index of disk descriptor in the
- * VirtualDisk segment associated with
- * externalPortNo */
- u16 Reserved1;
- u32 Reserved2;
+struct pci_id {
+ u16 domain;
+ u8 bus;
+ u8 slot;
+ u8 func;
+ u8 reserved[3]; /* Natural alignment purposes */
};
-typedef enum {
- CONTROLVM_ACTION_NONE = 0,
- CONTROLVM_ACTION_SET_RESTORE = 0x05E7,
- CONTROLVM_ACTION_CLEAR_RESTORE = 0x0C18,
- CONTROLVM_ACTION_RESTORING = 0x08E5,
- CONTROLVM_ACTION_RESTORE_BUSY = 0x0999,
- CONTROLVM_ACTION_CLEAR_NVRAM = 0xB01
-} CONTROLVM_ACTION;
-
-typedef enum _ULTRA_TOOL_ACTIONS {
- /* enumeration that defines intended action */
- ULTRA_TOOL_ACTION_NONE = 0, /* normal boot of boot disk */
- ULTRA_TOOL_ACTION_INSTALL = 1, /* install source disk(s) to boot
- * disk */
- ULTRA_TOOL_ACTION_CAPTURE = 2, /* capture boot disk to target disk(s)
- * as 'gold image' */
- ULTRA_TOOL_ACTION_REPAIR = 3, /* use source disk(s) to repair
- * installation on boot disk */
- ULTRA_TOOL_ACTION_CLEAN = 4, /* 'scrub' virtual disk before
- * releasing back to storage pool */
- ULTRA_TOOL_ACTION_UPGRADE = 5, /* upgrade to use content of images
- * referenced from newer blueprint */
- ULTRA_TOOL_ACTION_DIAG = 6, /* use tool to invoke diagnostic script
- * provided by blueprint */
- ULTRA_TOOL_ACTION_FAILED = 7, /* used when tool fails installation
- and cannot continue */
- ULTRA_TOOL_ACTION_COUNT = 8
-} ULTRA_TOOL_ACTIONS;
-
-typedef struct _ULTRA_EFI_SPAR_INDICATION {
- u64 BootToFirmwareUI:1; /* Bit 0: Stop in uefi ui */
- u64 ClearNvram:1; /* Bit 1: Clear NVRAM */
- u64 ClearCmos:1; /* Bit 2: Clear CMOS */
- u64 BootToTool:1; /* Bit 3: Run install tool */
+struct efi_spar_indication {
+ u64 boot_to_fw_ui:1; /* Bit 0: Stop in uefi ui */
+ u64 clear_nvram:1; /* Bit 1: Clear NVRAM */
+ u64 clear_cmos:1; /* Bit 2: Clear CMOS */
+ u64 boot_to_tool:1; /* Bit 3: Run install tool */
/* remaining bits are available */
-} ULTRA_EFI_SPAR_INDICATION;
+};
-typedef enum {
+enum ultra_chipset_feature {
ULTRA_CHIPSET_FEATURE_REPLY = 0x00000001,
ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG = 0x00000002,
ULTRA_CHIPSET_FEATURE_PCIVBUS = 0x00000004
-} ULTRA_CHIPSET_FEATURE;
+};
/** This is the common structure that is at the beginning of every
* ControlVm message (both commands and responses) in any ControlVm
* queue. Commands are easily distinguished from responses by
* looking at the flags.response field.
*/
-typedef struct _CONTROLVM_MESSAGE_HEADER {
- u32 Id; /* See CONTROLVM_ID. */
+struct controlvm_message_header {
+ u32 id; /* See CONTROLVM_ID. */
/* For requests, indicates the message type. */
/* For responses, indicates the type of message we are responding to. */
- u32 MessageSize; /* Includes size of this struct + size
+ u32 message_size; /* Includes size of this struct + size
* of message */
- u32 SegmentIndex; /* Index of segment containing Vm
+ u32 segment_index; /* Index of segment containing Vm
* message/information */
- u32 CompletionStatus; /* Error status code or result of
+ u32 completion_status; /* Error status code or result of
* message completion */
struct {
u32 failed:1; /**< =1 in a response to * signify
* failure */
- u32 responseExpected:1; /**< =1 in all messages that expect a
+ u32 response_expected:1; /**< =1 in all messages that expect a
* response (Control ignores this
* bit) */
u32 server:1; /**< =1 in all bus & device-related
* messages where the message
* receiver is to act as the bus or
* device server */
- u32 testMessage:1; /**< =1 for testing use only
+ u32 test_message:1; /**< =1 for testing use only
* (Control and Command ignore this
* bit) */
- u32 partialCompletion:1; /**< =1 if there are forthcoming
+ u32 partial_completion:1; /**< =1 if there are forthcoming
* responses/acks associated
* with this message */
u32 preserve:1; /**< =1 this is to let us know to
* preserve channel contents
* (for running guests)*/
- u32 writerInDiag:1; /**< =1 the DiagWriter is active in the
+ u32 writer_in_diag:1; /**< =1 the DiagWriter is active in the
* Diagnostic Partition*/
-
- /* remaining bits in this 32-bit word are available */
- } Flags;
- u32 Reserved; /* Natural alignment */
- u64 MessageHandle; /* Identifies the particular message instance,
+ } flags;
+ u32 reserved; /* Natural alignment */
+ u64 message_handle; /* Identifies the particular message instance,
* and is used to match particular */
/* request instances with the corresponding response instance. */
- u64 PayloadVmOffset; /* Offset of payload area from start of this
+ u64 payload_vm_offset; /* Offset of payload area from start of this
* instance of ControlVm segment */
- u32 PayloadMaxBytes; /* Maximum bytes allocated in payload
+ u32 payload_max_bytes; /* Maximum bytes allocated in payload
* area of ControlVm segment */
- u32 PayloadBytes; /* Actual number of bytes of payload
+ u32 payload_bytes; /* Actual number of bytes of payload
* area to copy between IO/Command; */
/* if non-zero, there is a payload to copy. */
-} CONTROLVM_MESSAGE_HEADER;
-
-typedef struct _CONTROLVM_PACKET_DEVICE_CREATE {
- u32 busNo; /**< bus # (0..n-1) from the msg receiver's
- * perspective */
+};
- /* Control uses header SegmentIndex field to access bus number... */
- u32 devNo; /**< bus-relative (0..n-1) device number */
- u64 channelAddr; /**< Guest physical address of the channel, which
- * can be dereferenced by the receiver
- * of this ControlVm command */
- u64 channelBytes; /**< specifies size of the channel in bytes */
- uuid_le dataTypeGuid;/**< specifies format of data in channel */
- uuid_le devInstGuid; /**< instance guid for the device */
- struct InterruptInfo intr; /**< specifies interrupt information */
-} CONTROLVM_PACKET_DEVICE_CREATE; /* for CONTROLVM_DEVICE_CREATE */
-
-typedef struct _CONTROLVM_PACKET_DEVICE_CONFIGURE {
- u32 busNo; /**< bus # (0..n-1) from the msg
+struct controlvm_packet_device_create {
+ u32 bus_no; /* bus # (0..n-1) from the msg receiver's end */
+ u32 dev_no; /* bus-relative (0..n-1) device number */
+ u64 channel_addr; /* Guest physical address of the channel, which
+ * can be dereferenced by the receiver of this
+ * ControlVm command */
+ u64 channel_bytes; /* specifies size of the channel in bytes */
+ uuid_le data_type_uuid; /* specifies format of data in channel */
+ uuid_le dev_inst_uuid; /* instance guid for the device */
+ struct irq_info intr; /* specifies interrupt information */
+}; /* for CONTROLVM_DEVICE_CREATE */
+
+struct controlvm_packet_device_configure {
+ u32 bus_no; /**< bus # (0..n-1) from the msg
* receiver's perspective */
/* Control uses header SegmentIndex field to access bus number... */
- u32 devNo; /**< bus-relative (0..n-1) device number */
-} CONTROLVM_PACKET_DEVICE_CONFIGURE; /* for CONTROLVM_DEVICE_CONFIGURE */
+ u32 dev_no; /**< bus-relative (0..n-1) device number */
+} ; /* for CONTROLVM_DEVICE_CONFIGURE */
-typedef struct _CONTROLVM_MESSAGE_DEVICE_CREATE {
- CONTROLVM_MESSAGE_HEADER Header;
- CONTROLVM_PACKET_DEVICE_CREATE Packet;
-} CONTROLVM_MESSAGE_DEVICE_CREATE; /* total 128 bytes */
+struct controlvm_message_device_create {
+ struct controlvm_message_header header;
+ struct controlvm_packet_device_create packet;
+}; /* total 128 bytes */
-typedef struct _CONTROLVM_MESSAGE_DEVICE_CONFIGURE {
- CONTROLVM_MESSAGE_HEADER Header;
- CONTROLVM_PACKET_DEVICE_CONFIGURE Packet;
-} CONTROLVM_MESSAGE_DEVICE_CONFIGURE; /* total 56 bytes */
+struct controlvm_message_device_configure {
+ struct controlvm_message_header header;
+ struct controlvm_packet_device_configure packet;
+}; /* total 56 bytes */
/* This is the format for a message in any ControlVm queue. */
-typedef struct _CONTROLVM_MESSAGE_PACKET {
+struct controlvm_message_packet {
union {
-
- /* BEGIN Request messages */
struct {
- u32 busNo; /*< bus # (0..n-1) from the msg
- * receiver's perspective */
-
- /* Control uses header SegmentIndex field to access bus number... */
- u32 deviceCount; /*< indicates the max number of
- * devices on this bus */
- u64 channelAddr; /*< Guest physical address of the
- * channel, which can be
- * dereferenced by the receiver
- * of this ControlVm command */
- u64 channelBytes; /*< size of the channel in bytes */
- uuid_le busDataTypeGuid;/*< indicates format of data in
- bus channel */
- uuid_le busInstGuid; /*< instance guid for the bus */
- } createBus; /* for CONTROLVM_BUS_CREATE */
+ u32 bus_no; /* bus # (0..n-1) from the msg
+ * receiver's perspective */
+ u32 dev_count; /* indicates the max number of
+ * devices on this bus */
+ u64 channel_addr; /* Guest physical address of
+ * the channel, which can be
+ * dereferenced by the receiver
+ * of this ControlVm command */
+ u64 channel_bytes; /* size of the channel */
+ uuid_le bus_data_type_uuid; /* indicates format of
+ * data in bus channel*/
+ uuid_le bus_inst_uuid; /* instance uuid for the bus */
+ } create_bus; /* for CONTROLVM_BUS_CREATE */
struct {
- u32 busNo; /*< bus # (0..n-1) from the msg
- * receiver's perspective */
-
- /* Control uses header SegmentIndex field to access bus number... */
+ u32 bus_no; /* bus # (0..n-1) from the msg
+ * receiver's perspective */
u32 reserved; /* Natural alignment purposes */
- } destroyBus; /* for CONTROLVM_BUS_DESTROY */
+ } destroy_bus; /* for CONTROLVM_BUS_DESTROY */
struct {
- u32 busNo; /*< bus # (0..n-1) from the
- * msg receiver's
- * perspective */
-
- /* Control uses header SegmentIndex field to access bus number... */
- u32 reserved1; /* for alignment purposes */
- u64 guestHandle; /* This is used to convert
- * guest physical address to real
- * physical address for DMA, for ex. */
- u64 recvBusInterruptHandle;/*< specifies interrupt
- * info. It is used by SP to register
- * to receive interrupts from the CP.
- * This interrupt is used for bus
- * level notifications. The
- * corresponding
- * sendBusInterruptHandle is kept in
- * CP. */
- } configureBus; /* for CONTROLVM_BUS_CONFIGURE */
-
+ u32 bus_no; /* bus # (0..n-1) from the receiver's
+ * perspective */
+ u32 reserved1; /* for alignment purposes */
+ u64 guest_handle; /* This is used to convert
+ * guest physical address to
+ * physical address */
+ u64 recv_bus_irq_handle;
+ /* specifies interrupt info. It is used by SP
+ * to register to receive interrupts from the
+ * CP. This interrupt is used for bus level
+ * notifications. The corresponding
+ * sendBusInterruptHandle is kept in CP. */
+ } configure_bus; /* for CONTROLVM_BUS_CONFIGURE */
/* for CONTROLVM_DEVICE_CREATE */
- CONTROLVM_PACKET_DEVICE_CREATE createDevice;
+ struct controlvm_packet_device_create create_device;
struct {
- u32 busNo; /*< bus # (0..n-1) from the msg
- * receiver's perspective */
-
- /* Control uses header SegmentIndex field to access bus number... */
- u32 devNo; /*< bus-relative (0..n-1) device
- * number */
- } destroyDevice; /* for CONTROLVM_DEVICE_DESTROY */
-
+ u32 bus_no; /* bus # (0..n-1) from the msg
+ * receiver's perspective */
+ u32 dev_no; /* bus-relative (0..n-1) device # */
+ } destroy_device; /* for CONTROLVM_DEVICE_DESTROY */
/* for CONTROLVM_DEVICE_CONFIGURE */
- CONTROLVM_PACKET_DEVICE_CONFIGURE configureDevice;
+ struct controlvm_packet_device_configure configure_device;
struct {
- u32 busNo; /*< bus # (0..n-1) from the msg
- * receiver's perspective */
-
- /* Control uses header SegmentIndex field to access bus number... */
- u32 devNo; /*< bus-relative (0..n-1) device
- * number */
- } reconfigureDevice; /* for CONTROLVM_DEVICE_RECONFIGURE */
+ u32 bus_no; /* bus # (0..n-1) from the msg
+ * receiver's perspective */
+ u32 dev_no; /* bus-relative (0..n-1) device # */
+ } reconfigure_device; /* for CONTROLVM_DEVICE_RECONFIGURE */
struct {
- u32 busNo;
- ULTRA_SEGMENT_STATE state;
+ u32 bus_no;
+ struct spar_segment_state state;
u8 reserved[2]; /* Natural alignment purposes */
- } busChangeState; /* for CONTROLVM_BUS_CHANGESTATE */
+ } bus_change_state; /* for CONTROLVM_BUS_CHANGESTATE */
struct {
- u32 busNo;
- u32 devNo;
- ULTRA_SEGMENT_STATE state;
+ u32 bus_no;
+ u32 dev_no;
+ struct spar_segment_state state;
struct {
- u32 physicalDevice:1; /* =1 if message is for
+ u32 phys_device:1; /* =1 if message is for
* a physical device */
- /* remaining bits in this 32-bit word are available */
} flags;
u8 reserved[2]; /* Natural alignment purposes */
- } deviceChangeState; /* for CONTROLVM_DEVICE_CHANGESTATE */
+ } device_change_state; /* for CONTROLVM_DEVICE_CHANGESTATE */
struct {
- u32 busNo;
- u32 devNo;
- ULTRA_SEGMENT_STATE state;
+ u32 bus_no;
+ u32 dev_no;
+ struct spar_segment_state state;
u8 reserved[6]; /* Natural alignment purposes */
- } deviceChangeStateEvent; /* for CONTROLVM_DEVICE_CHANGESTATE_EVENT */
+ } device_change_state_event;
+ /* for CONTROLVM_DEVICE_CHANGESTATE_EVENT */
struct {
- u32 busCount; /*< indicates the max number of busses */
- u32 switchCount; /*< indicates the max number of
- * switches (applicable for service
- * partition only) */
- ULTRA_CHIPSET_FEATURE features;
- u32 platformNumber; /* Platform Number */
- } initChipset; /* for CONTROLVM_CHIPSET_INIT */
+ u32 bus_count; /* indicates the max number of busses */
+ u32 switch_count; /* indicates the max number of
+ * switches if a service partition */
+ enum ultra_chipset_feature features;
+ u32 platform_number; /* Platform Number */
+ } init_chipset; /* for CONTROLVM_CHIPSET_INIT */
struct {
- u32 Options; /*< reserved */
- u32 Test; /*< bit 0 set to run embedded selftest */
- } chipsetSelftest; /* for CONTROLVM_CHIPSET_SELFTEST */
-
- /* END Request messages */
-
- /* BEGIN Response messages */
-
- /* END Response messages */
-
- /* BEGIN Event messages */
+ u32 options; /* reserved */
+ u32 test; /* bit 0 set to run embedded selftest */
+ } chipset_selftest; /* for CONTROLVM_CHIPSET_SELFTEST */
+ u64 addr; /* a physical address of something, that can be
+ * dereferenced by the receiver of this
+ * ControlVm command (depends on command id) */
+ u64 handle; /* a handle of something (depends on command
+ * id) */
+ };
+};
- /* END Event messages */
+/* All messages in any ControlVm queue have this layout. */
+struct controlvm_message {
+ struct controlvm_message_header hdr;
+ struct controlvm_message_packet cmd;
+};
- /* BEGIN Ack messages */
+struct device_map {
+ GUEST_PHYSICAL_ADDRESS device_channel_address;
+ u64 device_channel_size;
+ u32 ca_index;
+ u32 reserved; /* natural alignment */
+ u64 reserved2; /* Align structure on 32-byte boundary */
+};
- /* END Ack messages */
- u64 addr; /*< a physical address of something, that
- * can be dereferenced by the receiver of
- * this ControlVm command (depends on
- * command id) */
- u64 handle; /*< a handle of something (depends on
- * command id) */
- };
-} CONTROLVM_MESSAGE_PACKET;
+struct guest_devices {
+ struct device_map video_channel;
+ struct device_map keyboard_channel;
+ struct device_map network_channel;
+ struct device_map storage_channel;
+ struct device_map console_channel;
+ u32 partition_index;
+ u32 pad;
+};
-/* All messages in any ControlVm queue have this layout. */
-typedef struct _CONTROLVM_MESSAGE {
- CONTROLVM_MESSAGE_HEADER hdr;
- CONTROLVM_MESSAGE_PACKET cmd;
-} CONTROLVM_MESSAGE;
-
-typedef struct _DEVICE_MAP {
- GUEST_PHYSICAL_ADDRESS DeviceChannelAddress;
- u64 DeviceChannelSize;
- u32 CA_Index;
- u32 Reserved; /* natural alignment */
- u64 Reserved2; /* Align structure on 32-byte boundary */
-} DEVICE_MAP;
-
-typedef struct _GUEST_DEVICES {
- DEVICE_MAP VideoChannel;
- DEVICE_MAP KeyboardChannel;
- DEVICE_MAP NetworkChannel;
- DEVICE_MAP StorageChannel;
- DEVICE_MAP ConsoleChannel;
- u32 PartitionIndex;
- u32 Pad;
-} GUEST_DEVICES;
-
-typedef struct _ULTRA_CONTROLVM_CHANNEL_PROTOCOL {
- CHANNEL_HEADER Header;
- GUEST_PHYSICAL_ADDRESS gpControlVm; /* guest physical address of
+struct spar_controlvm_channel_protocol {
+ struct channel_header header;
+ GUEST_PHYSICAL_ADDRESS gp_controlvm; /* guest physical address of
* this channel */
- GUEST_PHYSICAL_ADDRESS gpPartitionTables; /* guest physical address of
- * partition tables */
- GUEST_PHYSICAL_ADDRESS gpDiagGuest; /* guest physical address of
+ GUEST_PHYSICAL_ADDRESS gp_partition_tables;/* guest physical address of
+ * partition tables */
+ GUEST_PHYSICAL_ADDRESS gp_diag_guest; /* guest physical address of
* diagnostic channel */
- GUEST_PHYSICAL_ADDRESS gpBootRomDisk; /* guest phys addr of (read
+ GUEST_PHYSICAL_ADDRESS gp_boot_romdisk;/* guest phys addr of (read
* only) Boot ROM disk */
- GUEST_PHYSICAL_ADDRESS gpBootRamDisk; /* guest phys addr of writable
+ GUEST_PHYSICAL_ADDRESS gp_boot_ramdisk;/* guest phys addr of writable
* Boot RAM disk */
- GUEST_PHYSICAL_ADDRESS gpAcpiTable; /* guest phys addr of acpi
+ GUEST_PHYSICAL_ADDRESS gp_acpi_table; /* guest phys addr of acpi
* table */
- GUEST_PHYSICAL_ADDRESS gpControlChannel; /* guest phys addr of control
- * channel */
- GUEST_PHYSICAL_ADDRESS gpDiagRomDisk; /* guest phys addr of diagnostic
+ GUEST_PHYSICAL_ADDRESS gp_control_channel;/* guest phys addr of control
+ * channel */
+ GUEST_PHYSICAL_ADDRESS gp_diag_romdisk;/* guest phys addr of diagnostic
* ROM disk */
- GUEST_PHYSICAL_ADDRESS gpNvram; /* guest phys addr of NVRAM
+ GUEST_PHYSICAL_ADDRESS gp_nvram; /* guest phys addr of NVRAM
* channel */
- u64 RequestPayloadOffset; /* Offset to request payload area */
- u64 EventPayloadOffset; /* Offset to event payload area */
- u32 RequestPayloadBytes; /* Bytes available in request payload
+ u64 request_payload_offset; /* Offset to request payload area */
+ u64 event_payload_offset; /* Offset to event payload area */
+ u32 request_payload_bytes; /* Bytes available in request payload
* area */
- u32 EventPayloadBytes; /* Bytes available in event payload area */
- u32 ControlChannelBytes;
- u32 NvramChannelBytes; /* Bytes in PartitionNvram segment */
- u32 MessageBytes; /* sizeof(CONTROLVM_MESSAGE) */
- u32 MessageCount; /* CONTROLVM_MESSAGE_MAX */
- GUEST_PHYSICAL_ADDRESS gpSmbiosTable; /* guest phys addr of SMBIOS
+ u32 event_payload_bytes;/* Bytes available in event payload area */
+ u32 control_channel_bytes;
+ u32 nvram_channel_bytes; /* Bytes in PartitionNvram segment */
+ u32 message_bytes; /* sizeof(CONTROLVM_MESSAGE) */
+ u32 message_count; /* CONTROLVM_MESSAGE_MAX */
+ GUEST_PHYSICAL_ADDRESS gp_smbios_table;/* guest phys addr of SMBIOS
* tables */
- GUEST_PHYSICAL_ADDRESS gpPhysicalSmbiosTable; /* guest phys addr of
- * SMBIOS table */
+ GUEST_PHYSICAL_ADDRESS gp_physical_smbios_table;/* guest phys addr of
+ * SMBIOS table */
/* ULTRA_MAX_GUESTS_PER_SERVICE */
- GUEST_DEVICES gpObsoleteGuestDevices[16];
+ struct guest_devices gp_obsolete_guest_devices[16];
/* guest physical address of EFI firmware image base */
- GUEST_PHYSICAL_ADDRESS VirtualGuestFirmwareImageBase;
+ GUEST_PHYSICAL_ADDRESS virtual_guest_firmware_image_base;
/* guest physical address of EFI firmware entry point */
- GUEST_PHYSICAL_ADDRESS VirtualGuestFirmwareEntryPoint;
+ GUEST_PHYSICAL_ADDRESS virtual_guest_firmware_entry_point;
/* guest EFI firmware image size */
- u64 VirtualGuestFirmwareImageSize;
+ u64 virtual_guest_firmware_image_size;
/* GPA = 1MB where EFI firmware image is copied to */
- GUEST_PHYSICAL_ADDRESS VirtualGuestFirmwareBootBase;
- GUEST_PHYSICAL_ADDRESS VirtualGuestImageBase;
- GUEST_PHYSICAL_ADDRESS VirtualGuestImageSize;
- u64 PrototypeControlChannelOffset;
- GUEST_PHYSICAL_ADDRESS VirtualGuestPartitionHandle;
+ GUEST_PHYSICAL_ADDRESS virtual_guest_firmware_boot_base;
+ GUEST_PHYSICAL_ADDRESS virtual_guest_image_base;
+ GUEST_PHYSICAL_ADDRESS virtual_guest_image_size;
+ u64 prototype_control_channel_offset;
+ GUEST_PHYSICAL_ADDRESS virtual_guest_partition_handle;
- u16 RestoreAction; /* Restore Action field to restore the guest
+ u16 restore_action; /* Restore Action field to restore the guest
* partition */
- u16 DumpAction; /* For Windows guests it shows if the visordisk
+ u16 dump_action; /* For Windows guests it shows if the visordisk
* is running in dump mode */
- u16 NvramFailCount;
- u16 SavedCrashMsgCount; /* = CONTROLVM_CRASHMSG_MAX */
- u32 SavedCrashMsgOffset; /* Offset to request payload area needed
+ u16 nvram_fail_count;
+ u16 saved_crash_message_count; /* = CONTROLVM_CRASHMSG_MAX */
+ u32 saved_crash_message_offset; /* Offset to request payload area needed
* for crash dump */
- u32 InstallationError; /* Type of error encountered during
+ u32 installation_error; /* Type of error encountered during
* installation */
- u32 InstallationTextId; /* Id of string to display */
- u16 InstallationRemainingSteps; /* Number of remaining installation
- * steps (for progress bars) */
- u8 ToolAction; /* ULTRA_TOOL_ACTIONS Installation Action
+ u32 installation_text_id; /* Id of string to display */
+ u16 installation_remaining_steps;/* Number of remaining installation
+ * steps (for progress bars) */
+ u8 tool_action; /* ULTRA_TOOL_ACTIONS Installation Action
* field */
- u8 Reserved; /* alignment */
- ULTRA_EFI_SPAR_INDICATION EfiSparIndication;
- ULTRA_EFI_SPAR_INDICATION EfiSparIndicationSupported;
- u32 SPReserved;
- u8 Reserved2[28]; /* Force signals to begin on 128-byte cache
+ u8 reserved; /* alignment */
+ struct efi_spar_indication efi_spar_ind;
+ struct efi_spar_indication efi_spar_ind_supported;
+ u32 sp_reserved;
+ u8 reserved2[28]; /* Force signals to begin on 128-byte cache
* line */
- SIGNAL_QUEUE_HEADER RequestQueue; /* Service or guest partition
- * uses this queue to send
- * requests to Control */
- SIGNAL_QUEUE_HEADER ResponseQueue; /* Control uses this queue to
- * respond to service or guest
- * partition requests */
- SIGNAL_QUEUE_HEADER EventQueue; /* Control uses this queue to
+ struct signal_queue_header request_queue;/* Service or guest partition
+ * uses this queue to send
+ * requests to Control */
+ struct signal_queue_header response_queue;/* Control uses this queue to
+ * respond to service or guest
+ * partition requests */
+ struct signal_queue_header event_queue; /* Control uses this queue to
* send events to service or
* guest partition */
- SIGNAL_QUEUE_HEADER EventAckQueue; /* Service or guest partition
- * uses this queue to ack
- * Control events */
+ struct signal_queue_header event_ack_queue;/* Service or guest partition
+ * uses this queue to ack
+ * Control events */
/* Request fixed-size message pool - does not include payload */
- CONTROLVM_MESSAGE RequestMsg[CONTROLVM_MESSAGE_MAX];
+ struct controlvm_message request_msg[CONTROLVM_MESSAGE_MAX];
/* Response fixed-size message pool - does not include payload */
- CONTROLVM_MESSAGE ResponseMsg[CONTROLVM_MESSAGE_MAX];
+ struct controlvm_message response_msg[CONTROLVM_MESSAGE_MAX];
/* Event fixed-size message pool - does not include payload */
- CONTROLVM_MESSAGE EventMsg[CONTROLVM_MESSAGE_MAX];
+ struct controlvm_message event_msg[CONTROLVM_MESSAGE_MAX];
/* Ack fixed-size message pool - does not include payload */
- CONTROLVM_MESSAGE EventAckMsg[CONTROLVM_MESSAGE_MAX];
+ struct controlvm_message event_ack_msg[CONTROLVM_MESSAGE_MAX];
/* Message stored during IOVM creation to be reused after crash */
- CONTROLVM_MESSAGE SavedCrashMsg[CONTROLVM_CRASHMSG_MAX];
-} ULTRA_CONTROLVM_CHANNEL_PROTOCOL;
+ struct controlvm_message saved_crash_msg[CONTROLVM_CRASHMSG_MAX];
+};
/* Offsets for VM channel attributes... */
#define VM_CH_REQ_QUEUE_OFFSET \
- offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL, RequestQueue)
+ offsetof(struct spar_controlvm_channel_protocol, request_queue)
#define VM_CH_RESP_QUEUE_OFFSET \
- offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL, ResponseQueue)
+ offsetof(struct spar_controlvm_channel_protocol, response_queue)
#define VM_CH_EVENT_QUEUE_OFFSET \
- offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL, EventQueue)
+ offsetof(struct spar_controlvm_channel_protocol, event_queue)
#define VM_CH_ACK_QUEUE_OFFSET \
- offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL, EventAckQueue)
+ offsetof(struct spar_controlvm_channel_protocol, event_ack_queue)
#define VM_CH_REQ_MSG_OFFSET \
- offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL, RequestMsg)
+ offsetof(struct spar_controlvm_channel_protocol, request_msg)
#define VM_CH_RESP_MSG_OFFSET \
- offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL, ResponseMsg)
+ offsetof(struct spar_controlvm_channel_protocol, response_msg)
#define VM_CH_EVENT_MSG_OFFSET \
- offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL, EventMsg)
+ offsetof(struct spar_controlvm_channel_protocol, event_msg)
#define VM_CH_ACK_MSG_OFFSET \
- offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL, EventAckMsg)
+ offsetof(struct spar_controlvm_channel_protocol, event_ack_msg)
#define VM_CH_CRASH_MSG_OFFSET \
- offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL, SavedCrashMsg)
+ offsetof(struct spar_controlvm_channel_protocol, saved_crash_msg)
/* The following header will be located at the beginning of PayloadVmOffset for
- * various ControlVm commands. The receiver of a ControlVm command with a
- * PayloadVmOffset will dereference this address and then use ConnectionOffset,
- * InitiatorOffset, and TargetOffset to get the location of UTF-8 formatted
- * strings that can be parsed to obtain command-specific information. The value
- * of TotalLength should equal PayloadBytes. The format of the strings at
- * PayloadVmOffset will take different forms depending on the message. See the
- * following Wiki page for more information:
- * https://ustr-linux-1.na.uis.unisys.com/spar/index.php/ControlVm_Parameters_Area
+ * various ControlVm commands. The receiver of a ControlVm command with a
+ * PayloadVmOffset will dereference this address and then use connection_offset,
+ * initiator_offset, and target_offset to get the location of UTF-8 formatted
+ * strings that can be parsed to obtain command-specific information. The value
+ * of total_length should equal PayloadBytes. The format of the strings at
+ * PayloadVmOffset will take different forms depending on the message.
*/
-typedef struct _ULTRA_CONTROLVM_PARAMETERS_HEADER {
- u32 TotalLength;
- u32 HeaderLength;
- u32 ConnectionOffset;
- u32 ConnectionLength;
- u32 InitiatorOffset;
- u32 InitiatorLength;
- u32 TargetOffset;
- u32 TargetLength;
- u32 ClientOffset;
- u32 ClientLength;
- u32 NameOffset;
- u32 NameLength;
- uuid_le Id;
- u32 Revision;
- u32 Reserved; /* Natural alignment */
-} ULTRA_CONTROLVM_PARAMETERS_HEADER;
+struct spar_controlvm_parameters_header {
+ u32 total_length;
+ u32 header_length;
+ u32 connection_offset;
+ u32 connection_length;
+ u32 initiator_offset;
+ u32 initiator_length;
+ u32 target_offset;
+ u32 target_length;
+ u32 client_offset;
+ u32 client_length;
+ u32 name_offset;
+ u32 name_length;
+ uuid_le id;
+ u32 revision;
+ u32 reserved; /* Natural alignment */
+};
#endif /* __CONTROLVMCHANNEL_H__ */
diff --git a/drivers/staging/unisys/common-spar/include/channels/diagchannel.h b/drivers/staging/unisys/common-spar/include/channels/diagchannel.h
index 9912e51b89b..e8fb8678a8e 100644
--- a/drivers/staging/unisys/common-spar/include/channels/diagchannel.h
+++ b/drivers/staging/unisys/common-spar/include/channels/diagchannel.h
@@ -37,12 +37,12 @@
#include "channel.h"
/* {EEA7A573-DB82-447c-8716-EFBEAAAE4858} */
-#define ULTRA_DIAG_CHANNEL_PROTOCOL_GUID \
+#define SPAR_DIAG_CHANNEL_PROTOCOL_UUID \
UUID_LE(0xeea7a573, 0xdb82, 0x447c, \
0x87, 0x16, 0xef, 0xbe, 0xaa, 0xae, 0x48, 0x58)
-static const uuid_le UltraDiagChannelProtocolGuid =
- ULTRA_DIAG_CHANNEL_PROTOCOL_GUID;
+static const uuid_le spar_diag_channel_protocol_uuid =
+ SPAR_DIAG_CHANNEL_PROTOCOL_UUID;
/* {E850F968-3263-4484-8CA5-2A35D087A5A8} */
#define ULTRA_DIAG_ROOT_CHANNEL_PROTOCOL_GUID \
@@ -58,19 +58,20 @@ static const uuid_le UltraDiagChannelProtocolGuid =
* increment this. */
#define ULTRA_DIAG_CHANNEL_PROTOCOL_VERSIONID 2
-#define ULTRA_DIAG_CHANNEL_OK_CLIENT(pChannel, logCtx) \
- (ULTRA_check_channel_client(pChannel, \
- UltraDiagChannelProtocolGuid, \
- "diag", \
- sizeof(ULTRA_DIAG_CHANNEL_PROTOCOL), \
- ULTRA_DIAG_CHANNEL_PROTOCOL_VERSIONID, \
- ULTRA_DIAG_CHANNEL_PROTOCOL_SIGNATURE, \
- __FILE__, __LINE__, logCtx))
-#define ULTRA_DIAG_CHANNEL_OK_SERVER(actualBytes, logCtx) \
- (ULTRA_check_channel_server(UltraDiagChannelProtocolGuid, \
- "diag", \
- sizeof(ULTRA_DIAG_CHANNEL_PROTOCOL), \
- actualBytes, __FILE__, __LINE__, logCtx))
+#define SPAR_DIAG_CHANNEL_OK_CLIENT(ch)\
+ (spar_check_channel_client(ch,\
+ spar_diag_channel_protocol_uuid,\
+ "diag",\
+ sizeof(struct spar_diag_channel_protocol),\
+ ULTRA_DIAG_CHANNEL_PROTOCOL_VERSIONID,\
+ ULTRA_DIAG_CHANNEL_PROTOCOL_SIGNATURE))
+
+#define SPAR_DIAG_CHANNEL_OK_SERVER(bytes)\
+ (spar_check_channel_server(spar_diag_channel_protocol_uuid,\
+ "diag",\
+ sizeof(struct spar_diag_channel_protocol),\
+ bytes))
+
#define MAX_MODULE_NAME_SIZE 128 /* Maximum length of module name... */
#define MAX_ADDITIONAL_INFO_SIZE 256 /* Maximum length of any additional info
* accompanying event... */
@@ -105,21 +106,21 @@ static const uuid_le UltraDiagChannelProtocolGuid =
/* Copied from EFI's EFI_TIME struct in efidef.h. EFI headers are not allowed
* in some of the Supervisor areas, such as Monitor, so it has been "ported" here
* for use in diagnostic event timestamps... */
-typedef struct _DIAG_EFI_TIME {
- u16 Year; /* 1998 - 20XX */
- u8 Month; /* 1 - 12 */
- u8 Day; /* 1 - 31 */
- u8 Hour; /* 0 - 23 */
- u8 Minute; /* 0 - 59 */
- u8 Second; /* 0 - 59 */
- u8 Pad1;
- u32 Nanosecond; /* 0 - 999, 999, 999 */
- s16 TimeZone; /* -1440 to 1440 or 2047 */
- u8 Daylight;
- u8 Pad2;
-} DIAG_EFI_TIME;
-
-typedef enum {
+struct diag_efi_time {
+ u16 year; /* 1998 - 20XX */
+ u8 month; /* 1 - 12 */
+ u8 day; /* 1 - 31 */
+ u8 hour; /* 0 - 23 */
+ u8 minute; /* 0 - 59 */
+ u8 second; /* 0 - 59 */
+ u8 pad1;
+ u32 nanosecond; /* 0 - 999, 999, 999 */
+ s16 timezone; /* -1440 to 1440 or 2047 */
+ u8 daylight;
+ u8 pad2;
+};
+
+enum spar_component_types {
ULTRA_COMPONENT_GUEST = 0,
ULTRA_COMPONENT_MONITOR = 0x01,
ULTRA_COMPONENT_CCM = 0x02, /* Common Control module */
@@ -144,9 +145,9 @@ typedef enum {
ULTRA_COMPONENT_PSERVICES = 0x17,
ULTRA_COMPONENT_PDIAG = 0x18
/* RESERVED 0x18 - 0x1F */
-} ULTRA_COMPONENT_TYPES;
+};
-/* Structure: DIAG_CHANNEL_EVENT Purpose: Contains attributes that make up an
+/* Structure: diag_channel_event Purpose: Contains attributes that make up an
* event to be written to the DIAG_CHANNEL memory. Attributes: EventId: Id of
* the diagnostic event to write to memory. Severity: Severity of the event
* (Error, Info, etc). ModuleName: Module/file name where event originated.
@@ -155,40 +156,40 @@ typedef enum {
* Reserved: Padding to align structure on a 64-byte cache line boundary.
* AdditionalInfo: Array of characters for additional event info (may be
* empty). */
-typedef struct _DIAG_CHANNEL_EVENT {
- u32 EventId;
- u32 Severity;
- u8 ModuleName[MAX_MODULE_NAME_SIZE];
- u32 LineNumber;
- DIAG_EFI_TIME Timestamp; /* Size = 16 bytes */
- u32 PartitionNumber; /* Filled in by Diag Switch as pool blocks are
+struct diag_channel_event {
+ u32 event_id;
+ u32 severity;
+ u8 module_name[MAX_MODULE_NAME_SIZE];
+ u32 line_number;
+ struct diag_efi_time timestamp; /* Size = 16 bytes */
+ u32 partition_number; /* Filled in by Diag Switch as pool blocks are
* filled */
- u16 VirtualProcessorNumber;
- u16 LogicalProcessorNumber;
- u8 ComponentType; /* ULTRA_COMPONENT_TYPES */
- u8 Subsystem;
- u16 Reserved0; /* pad to u64 alignment */
- u32 BlockNumber; /* filled in by DiagSwitch as pool blocks are
+ u16 vcpu_number;
+ u16 lcpu_number;
+ u8 component_type; /* ULTRA_COMPONENT_TYPES */
+ u8 subsystem;
+ u16 reserved0; /* pad to u64 alignment */
+ u32 block_no; /* filled in by DiagSwitch as pool blocks are
* filled */
- u32 BlockNumberHigh;
- u32 EventNumber; /* filled in by DiagSwitch as pool blocks are
+ u32 block_no_high;
+ u32 event_no; /* filled in by DiagSwitch as pool blocks are
* filled */
- u32 EventNumberHigh;
+ u32 event_no_high;
- /* The BlockNumber and EventNumber fields are set only by DiagSwitch
+ /* The block_no and event_no fields are set only by DiagSwitch
* and referenced only by WinDiagDisplay formatting tool as
* additional diagnostic information. Other tools including
* WinDiagDisplay currently ignore these 'Reserved' bytes. */
- u8 Reserved[8];
- u8 AdditionalInfo[MAX_ADDITIONAL_INFO_SIZE];
+ u8 reserved[8];
+ u8 additional_info[MAX_ADDITIONAL_INFO_SIZE];
- /* NOTE: Changesto DIAG_CHANNEL_EVENT generally need to be reflected in
+ /* NOTE: Changes to diag_channel_event generally need to be reflected in
* existing copies *
* - for AppOS at
* GuestLinux/visordiag_early/supervisor_diagchannel.h *
* - for WinDiagDisplay at
* EFI/Ultra/Tools/WinDiagDisplay/WinDiagDisplay/diagstruct.h */
-} DIAG_CHANNEL_EVENT;
+};
/* Levels of severity for diagnostic events, in order from lowest severity to
* highest (i.e. fatal errors are the most severe, and should always be logged,
@@ -201,7 +202,8 @@ typedef struct _DIAG_CHANNEL_EVENT {
* they are valid for controlling the amount of event data. This enum is also
* defined in DotNet\sParFramework\ControlFramework\ControlFramework.cs. If a
* change is made to this enum, they should also be reflected in that file. */
-typedef enum { DIAG_SEVERITY_ENUM_BEGIN = 0,
+enum diag_severity {
+ DIAG_SEVERITY_ENUM_BEGIN = 0,
DIAG_SEVERITY_OVERRIDE = DIAG_SEVERITY_ENUM_BEGIN,
DIAG_SEVERITY_VERBOSE = DIAG_SEVERITY_OVERRIDE, /* 0 */
DIAG_SEVERITY_INFO = DIAG_SEVERITY_VERBOSE + 1, /* 1 */
@@ -212,7 +214,7 @@ typedef enum { DIAG_SEVERITY_ENUM_BEGIN = 0,
DIAG_SEVERITY_ENUM_END = DIAG_SEVERITY_SHUTOFF, /* 5 */
DIAG_SEVERITY_NONFATAL_ERR = DIAG_SEVERITY_ERR,
DIAG_SEVERITY_FATAL_ERR = DIAG_SEVERITY_PRINT
-} DIAG_SEVERITY;
+};
/* Event Cause enums
*
@@ -233,26 +235,24 @@ typedef enum { DIAG_SEVERITY_ENUM_BEGIN = 0,
* If a change is made to this enum, they should also be reflected in that
* file. */
-
-
/* A cause value "DIAG_CAUSE_FILE_XFER" together with a severity value of
* "DIAG_SEVERITY_PRINT" (=4), is used for transferring text or binary file to
* the Diag partition. This cause-severity combination will be used by Logger
* DiagSwitch to segregate events into block types. The files are transferred in
-* 256 byte chunks maximum, in the AdditionalInfo field of the DIAG_CHANNEL_EVENT
+* 256 byte chunks maximum, in the AdditionalInfo field of the diag_channel_event
* structure. In the file transfer mode, some event fields will have different
* meaning: EventId specifies the file offset, severity specifies the block type,
* ModuleName specifies the filename, LineNumber specifies the number of valid
* data bytes in an event and AdditionalInfo contains up to 256 bytes of data. */
/* The Diag DiagWriter appends event blocks to events.raw as today, and for data
- * blocks uses DIAG_CHANNEL_EVENT
+ * blocks uses diag_channel_event
* PartitionNumber to extract and append 'AdditionalInfo' to filename (specified
* by ModuleName). */
/* The Dell PDiag uses this new mechanism to stash DSET .zip onto the
* 'diagnostic' virtual disk. */
-typedef enum {
+enum diag_cause {
DIAG_CAUSE_UNKNOWN = 0,
DIAG_CAUSE_UNKNOWN_DEBUG = DIAG_CAUSE_UNKNOWN + 1, /* 1 */
DIAG_CAUSE_DEBUG = DIAG_CAUSE_UNKNOWN_DEBUG + 1, /* 2 */
@@ -264,7 +264,7 @@ typedef enum {
DIAG_CAUSE_INTERNAL_ERROR = DIAG_CAUSE_INVALID_REQUEST + 1, /* 8 */
DIAG_CAUSE_FILE_XFER = DIAG_CAUSE_INTERNAL_ERROR + 1, /* 9 */
DIAG_CAUSE_ENUM_END = DIAG_CAUSE_FILE_XFER /* 9 */
-} DIAG_CAUSE;
+};
/* Event Cause category defined into the byte 2 of Severity */
#define CAUSE_DEBUG (DIAG_CAUSE_DEBUG << CAUSE_SHIFT_AMT)
@@ -344,7 +344,7 @@ typedef enum {
#define CAUSE_FILE_XFER_SEVERITY_PRINT \
(CAUSE_FILE_XFER | DIAG_SEVERITY_PRINT)
-/* Structure: DIAG_CHANNEL_PROTOCOL_HEADER
+/* Structure: diag_channel_protocol_header
*
* Purpose: Contains attributes that make up the header specific to the
* DIAG_CHANNEL area.
@@ -362,12 +362,12 @@ typedef enum {
* whether events are logged. Any event's severity for a
* particular subsystem below this level will be discarded.
*/
-typedef struct _DIAG_CHANNEL_PROTOCOL_HEADER {
- volatile u32 DiagLock;
- u8 IsChannelInitialized;
- u8 Reserved[3];
- u8 SubsystemSeverityFilter[64];
-} DIAG_CHANNEL_PROTOCOL_HEADER;
+struct diag_channel_protocol_header {
+ u32 diag_lock;
+ u8 channel_initialized;
+ u8 reserved[3];
+ u8 subsystem_severity_filter[64];
+};
/* The Diagram for the Diagnostic Channel: */
/* ----------------------- */
@@ -375,19 +375,20 @@ typedef struct _DIAG_CHANNEL_PROTOCOL_HEADER {
/* ----------------------- */
/* | Signal Queue Header | Defined by SIGNAL_QUEUE_HEADER */
/* ----------------------- */
-/* | DiagChannel Header | Defined by DIAG_CHANNEL_PROTOCOL_HEADER */
+/* | DiagChannel Header | Defined by diag_channel_protocol_header */
/* ----------------------- */
-/* | Channel Event Info | Defined by (DIAG_CHANNEL_EVENT * MAX_EVENTS) */
+/* | Channel Event Info | Defined by diag_channel_event*MAX_EVENTS */
/* ----------------------- */
/* | Reserved | Reserved (pad out to 4MB) */
/* ----------------------- */
/* Offsets/sizes for diagnostic channel attributes... */
-#define DIAG_CH_QUEUE_HEADER_OFFSET (sizeof(ULTRA_CHANNEL_PROTOCOL))
-#define DIAG_CH_QUEUE_HEADER_SIZE (sizeof(SIGNAL_QUEUE_HEADER))
+#define DIAG_CH_QUEUE_HEADER_OFFSET (sizeof(struct channel_header))
+#define DIAG_CH_QUEUE_HEADER_SIZE (sizeof(struct signal_queue_header))
#define DIAG_CH_PROTOCOL_HEADER_OFFSET \
(DIAG_CH_QUEUE_HEADER_OFFSET + DIAG_CH_QUEUE_HEADER_SIZE)
-#define DIAG_CH_PROTOCOL_HEADER_SIZE (sizeof(DIAG_CHANNEL_PROTOCOL_HEADER))
+#define DIAG_CH_PROTOCOL_HEADER_SIZE \
+ (sizeof(struct diag_channel_protocol_header))
#define DIAG_CH_EVENT_OFFSET \
(DIAG_CH_PROTOCOL_HEADER_OFFSET + DIAG_CH_PROTOCOL_HEADER_SIZE)
#define DIAG_CH_SIZE (4096 * 1024)
@@ -397,7 +398,7 @@ typedef struct _DIAG_CHANNEL_PROTOCOL_HEADER {
#define DIAG_CH_LRG_SIZE (2 * DIAG_CH_SIZE) /* 8 MB */
/*
- * Structure: ULTRA_DIAG_CHANNEL_PROTOCOL
+ * Structure: spar_diag_channel_protocol
*
* Purpose: Contains attributes that make up the DIAG_CHANNEL memory.
*
@@ -409,19 +410,18 @@ typedef struct _DIAG_CHANNEL_PROTOCOL_HEADER {
* store event.
*
* DiagChannelHeader: Diagnostic channel header info (see
- * DIAG_CHANNEL_PROTOCOL_HEADER comments).
+ * diag_channel_protocol_header comments).
*
* Events: Area where diagnostic events (up to MAX_EVENTS) are written.
*
*Reserved: Reserved area to allow for correct channel size padding.
*/
-typedef struct _ULTRA_DIAG_CHANNEL_PROTOCOL {
- ULTRA_CHANNEL_PROTOCOL CommonChannelHeader;
- SIGNAL_QUEUE_HEADER QueueHeader;
- DIAG_CHANNEL_PROTOCOL_HEADER DiagChannelHeader;
- DIAG_CHANNEL_EVENT Events[(DIAG_CH_SIZE - DIAG_CH_EVENT_OFFSET) /
- sizeof(DIAG_CHANNEL_EVENT)];
-}
-ULTRA_DIAG_CHANNEL_PROTOCOL;
+struct spar_diag_channel_protocol {
+ struct channel_header common_channel_header;
+ struct signal_queue_header queue_header;
+ struct diag_channel_protocol_header diag_channel_header;
+ struct diag_channel_event events[(DIAG_CH_SIZE - DIAG_CH_EVENT_OFFSET) /
+ sizeof(struct diag_channel_event)];
+};
#endif
diff --git a/drivers/staging/unisys/common-spar/include/channels/iochannel.h b/drivers/staging/unisys/common-spar/include/channels/iochannel.h
index b1dd73d1f42..eb7efe484f6 100644
--- a/drivers/staging/unisys/common-spar/include/channels/iochannel.h
+++ b/drivers/staging/unisys/common-spar/include/channels/iochannel.h
@@ -8,7 +8,6 @@
* this file. Note: Everything is OS-independent because this file is
* used by Windows, Linux and possible EFI drivers. */
-
/*
* Communication flow between the IOPart and GuestPart uses the channel headers
* channel state. The following states are currently being used:
@@ -60,42 +59,22 @@
#define ULTRA_VNIC_CHANNEL_PROTOCOL_VERSIONID 2
#define ULTRA_VSWITCH_CHANNEL_PROTOCOL_VERSIONID 1
-#define ULTRA_VHBA_CHANNEL_OK_CLIENT(pChannel, logCtx) \
- (ULTRA_check_channel_client(pChannel, UltraVhbaChannelProtocolGuid, \
- "vhba", MIN_IO_CHANNEL_SIZE, \
- ULTRA_VHBA_CHANNEL_PROTOCOL_VERSIONID, \
- ULTRA_VHBA_CHANNEL_PROTOCOL_SIGNATURE, \
- __FILE__, __LINE__, logCtx))
-#define ULTRA_VHBA_CHANNEL_OK_SERVER(actualBytes, logCtx) \
- (ULTRA_check_channel_server(UltraVhbaChannelProtocolGuid, \
- "vhba", MIN_IO_CHANNEL_SIZE, actualBytes, \
- __FILE__, __LINE__, logCtx))
-#define ULTRA_VNIC_CHANNEL_OK_CLIENT(pChannel, logCtx) \
- (ULTRA_check_channel_client(pChannel, UltraVnicChannelProtocolGuid, \
- "vnic", MIN_IO_CHANNEL_SIZE, \
- ULTRA_VNIC_CHANNEL_PROTOCOL_VERSIONID, \
- ULTRA_VNIC_CHANNEL_PROTOCOL_SIGNATURE, \
- __FILE__, __LINE__, logCtx))
-#define ULTRA_VNIC_CHANNEL_OK_SERVER(actualBytes, logCtx) \
- (ULTRA_check_channel_server(UltraVnicChannelProtocolGuid, \
- "vnic", MIN_IO_CHANNEL_SIZE, actualBytes, \
- __FILE__, __LINE__, logCtx))
-#define ULTRA_VSWITCH_CHANNEL_OK_CLIENT(pChannel, logCtx) \
- (ULTRA_check_channel_client(pChannel, UltraVswitchChannelProtocolGuid, \
- "vswitch", MIN_IO_CHANNEL_SIZE, \
- ULTRA_VSWITCH_CHANNEL_PROTOCOL_VERSIONID, \
- ULTRA_VSWITCH_CHANNEL_PROTOCOL_SIGNATURE, \
- __FILE__, __LINE__, logCtx))
-#define ULTRA_VSWITCH_CHANNEL_OK_SERVER(actualBytes, logCtx) \
- (ULTRA_check_channel_server(UltraVswitchChannelProtocolGuid, \
- "vswitch", MIN_IO_CHANNEL_SIZE, \
- actualBytes, \
- __FILE__, __LINE__, logCtx))
+#define SPAR_VHBA_CHANNEL_OK_CLIENT(ch) \
+ (spar_check_channel_client(ch, spar_vhba_channel_protocol_uuid, \
+ "vhba", MIN_IO_CHANNEL_SIZE, \
+ ULTRA_VHBA_CHANNEL_PROTOCOL_VERSIONID, \
+ ULTRA_VHBA_CHANNEL_PROTOCOL_SIGNATURE))
+
+#define SPAR_VNIC_CHANNEL_OK_CLIENT(ch) \
+ (spar_check_channel_client(ch, spar_vnic_channel_protocol_uuid, \
+ "vnic", MIN_IO_CHANNEL_SIZE, \
+ ULTRA_VNIC_CHANNEL_PROTOCOL_VERSIONID, \
+ ULTRA_VNIC_CHANNEL_PROTOCOL_SIGNATURE))
+
/*
* Everything necessary to handle SCSI & NIC traffic between Guest Partition and
* IO Partition is defined below. */
-
/*
* Defines and enums.
*/
@@ -172,8 +151,9 @@
* SCSI Host value */
/* various types of network packets that can be sent in cmdrsp */
-typedef enum { NET_RCV_POST = 0, /* submit buffer to hold receiving
- * incoming packet */
+enum net_types {
+ NET_RCV_POST = 0, /* submit buffer to hold receiving
+ * incoming packet */
/* virtnic -> uisnic */
NET_RCV, /* incoming packet received */
/* uisnic -> virtpci */
@@ -195,7 +175,7 @@ typedef enum { NET_RCV_POST = 0, /* submit buffer to hold receiving
* its MAC addr */
NET_MACADDR_ACK, /* MAC address */
-} NET_TYPES;
+};
#define ETH_HEADER_SIZE 14 /* size of ethernet header */
@@ -211,19 +191,23 @@ typedef enum { NET_RCV_POST = 0, /* submit buffer to hold receiving
#define MAX_MACADDR_LEN 6 /* number of bytes in MAC address */
#endif /* MAX_MACADDR_LEN */
-#define ETH_IS_LOCALLY_ADMINISTERED(Address) \
- (((u8 *) (Address))[0] & ((u8) 0x02))
+#define ETH_IS_LOCALLY_ADMINISTERED(address) \
+ (((u8 *)(address))[0] & ((u8)0x02))
#define NIC_VENDOR_ID 0x0008000B
/* various types of scsi task mgmt commands */
-typedef enum { TASK_MGMT_ABORT_TASK =
- 1, TASK_MGMT_BUS_RESET, TASK_MGMT_LUN_RESET,
- TASK_MGMT_TARGET_RESET,
-} TASK_MGMT_TYPES;
+enum task_mgmt_types {
+ TASK_MGMT_ABORT_TASK = 1,
+ TASK_MGMT_BUS_RESET,
+ TASK_MGMT_LUN_RESET,
+ TASK_MGMT_TARGET_RESET,
+};
/* various types of vdisk mgmt commands */
-typedef enum { VDISK_MGMT_ACQUIRE = 1, VDISK_MGMT_RELEASE,
-} VDISK_MGMT_TYPES;
+enum vdisk_mgmt_types {
+ VDISK_MGMT_ACQUIRE = 1,
+ VDISK_MGMT_RELEASE,
+};
/* this is used in the vdest field */
#define VDEST_ALL 0xFFFF
@@ -242,7 +226,6 @@ typedef enum { VDISK_MGMT_ACQUIRE = 1, VDISK_MGMT_RELEASE,
/*
* structs with pragma pack */
-
/* ///////////// BEGIN PRAGMA PACK PUSH 1 ///////////////////////// */
/* ///////////// ONLY STRUCT TYPE SHOULD BE BELOW */
@@ -377,16 +360,16 @@ struct uiscmdrsp_scsi {
do { \
memset(buf, 0, \
MINNUM(len, \
- (unsigned int) NO_DISK_INQUIRY_RESULT_LEN)); \
- buf[2] = (u8) SCSI_SPC2_VER; \
+ (unsigned int)NO_DISK_INQUIRY_RESULT_LEN)); \
+ buf[2] = (u8)SCSI_SPC2_VER; \
if (lun == 0) { \
- buf[0] = (u8) lun0notpresent; \
- buf[3] = (u8) DEV_HISUPPORT; \
+ buf[0] = (u8)lun0notpresent; \
+ buf[3] = (u8)DEV_HISUPPORT; \
} else \
- buf[0] = (u8) notpresent; \
- buf[4] = (u8) ( \
+ buf[0] = (u8)notpresent; \
+ buf[4] = (u8)( \
MINNUM(len, \
- (unsigned int) NO_DISK_INQUIRY_RESULT_LEN) - 5); \
+ (unsigned int)NO_DISK_INQUIRY_RESULT_LEN) - 5);\
if (len >= NO_DISK_INQUIRY_RESULT_LEN) { \
buf[8] = 'D'; \
buf[9] = 'E'; \
@@ -410,12 +393,10 @@ struct uiscmdrsp_scsi {
} \
} while (0)
-
/*
* Struct & Defines to support sense information.
*/
-
/* The following struct is returned in sensebuf field in uiscmdrsp_scsi. It is
* initialized in exactly the manner that is recommended in Windows (hence the
* odd values).
@@ -429,21 +410,21 @@ struct uiscmdrsp_scsi {
* AdditionalSenseLength contains will be sizeof(sense_data)-8=10.
*/
struct sense_data {
- u8 ErrorCode:7;
- u8 Valid:1;
- u8 SegmentNumber;
- u8 SenseKey:4;
- u8 Reserved:1;
- u8 IncorrectLength:1;
- u8 EndOfMedia:1;
- u8 FileMark:1;
- u8 Information[4];
- u8 AdditionalSenseLength;
- u8 CommandSpecificInformation[4];
- u8 AdditionalSenseCode;
- u8 AdditionalSenseCodeQualifier;
- u8 FieldReplaceableUnitCode;
- u8 SenseKeySpecific[3];
+ u8 errorcode:7;
+ u8 valid:1;
+ u8 segment_number;
+ u8 sense_key:4;
+ u8 reserved:1;
+ u8 incorrect_length:1;
+ u8 end_of_media:1;
+ u8 file_mark:1;
+ u8 information[4];
+ u8 additional_sense_length;
+ u8 command_specific_information[4];
+ u8 additional_sense_code;
+ u8 additional_sense_code_qualifier;
+ u8 fru_code;
+ u8 sense_key_specific[3];
};
/* some SCSI ADSENSE codes */
@@ -484,7 +465,6 @@ struct net_pkt_xmt {
* each fragment */
char ethhdr[ETH_HEADER_SIZE]; /* the ethernet header */
struct {
-
/* these are needed for csum at uisnic end */
u8 valid; /* 1 = rest of this struct is valid - else
* ignore */
@@ -528,13 +508,12 @@ struct net_pkt_rcvpost {
* to be describable */
struct phys_info frag; /* physical page information for the
* single fragment 2K rcv buf */
- u64 UniqueNum; /* This is used to make sure that
+ u64 unique_num; /* This is used to make sure that
* receive posts are returned to */
/* the Adapter which sent them origonally. */
};
struct net_pkt_rcv {
-
/* the number of receive buffers that can be chained */
/* is based on max mtu and size of each rcv buf */
u32 rcv_done_len; /* length of received data */
@@ -544,8 +523,8 @@ struct net_pkt_rcv {
* that must be chained; */
/* each entry is a receive buffer provided by NET_RCV_POST. */
/* NOTE: first rcvbuf in the chain will also be provided in net.buf. */
- u64 UniqueNum;
- u32 RcvsDroppedDelta;
+ u64 unique_num;
+ u32 rcvs_dropped_delta;
};
struct net_pkt_enbdis {
@@ -560,7 +539,7 @@ struct net_pkt_macaddr {
/* cmd rsp packet used for VNIC network traffic */
struct uiscmdrsp_net {
- NET_TYPES type;
+ enum net_types type;
void *buf;
union {
struct net_pkt_xmt xmt; /* used for NET_XMIT */
@@ -576,7 +555,7 @@ struct uiscmdrsp_net {
};
struct uiscmdrsp_scsitaskmgmt {
- TASK_MGMT_TYPES tasktype;
+ enum task_mgmt_types tasktype;
/* the type of task */
struct uisscsi_dest vdest;
@@ -594,7 +573,7 @@ struct uiscmdrsp_scsitaskmgmt {
* For windows guests, this is a pointer to a location that a waiting
* thread is testing to see if the taskmgmt command has completed.
* When the rsp is received by guest, the thread receiving the
- * response uses this to notify the the thread waiting for taskmgmt
+ * response uses this to notify the thread waiting for taskmgmt
* command completion. Its value is preserved by iopart & returned
* as is in the task mgmt rsp. */
void *notifyresult;
@@ -615,7 +594,7 @@ struct uiscmdrsp_scsitaskmgmt {
/* Note that the vHba pointer is not used by the Client/Guest side. */
struct uiscmdrsp_disknotify {
u8 add; /* 0-remove, 1-add */
- void *vHba; /* Pointer to vhba_info for channel info to
+ void *v_hba; /* Pointer to vhba_info for channel info to
* route msg */
u32 channel, id, lun; /* SCSI Path of Disk to added or removed */
};
@@ -623,7 +602,7 @@ struct uiscmdrsp_disknotify {
/* The following is used by virthba/vSCSI to send the Acquire/Release commands
* to the IOVM. */
struct uiscmdrsp_vdiskmgmt {
- VDISK_MGMT_TYPES vdisktype;
+ enum vdisk_mgmt_types vdisktype;
/* the type of task */
struct uisscsi_dest vdest;
@@ -641,7 +620,7 @@ struct uiscmdrsp_vdiskmgmt {
* For windows guests, this is a pointer to a location that a waiting
* thread is testing to see if the taskmgmt command has completed.
* When the rsp is received by guest, the thread receiving the
- * response uses this to notify the the thread waiting for taskmgmt
+ * response uses this to notify the thread waiting for taskmgmt
* command completion. Its value is preserved by iopart & returned
* as is in the task mgmt rsp. */
void *notifyresult;
@@ -683,11 +662,11 @@ struct uiscmdrsp {
/* This is just the header of the IO channel. It is assumed that directly after
* this header there is a large region of memory which contains the command and
-* response queues as specified in cmdQ and rspQ SIGNAL_QUEUE_HEADERS. */
-typedef struct _ULTRA_IO_CHANNEL_PROTOCOL {
- CHANNEL_HEADER ChannelHeader;
- SIGNAL_QUEUE_HEADER cmdQ;
- SIGNAL_QUEUE_HEADER rspQ;
+* response queues as specified in cmd_q and rsp_q SIGNAL_QUEUE_HEADERS. */
+struct spar_io_channel_protocol {
+ struct channel_header channel_header;
+ struct signal_queue_header cmd_q;
+ struct signal_queue_header rsp_q;
union {
struct {
struct vhba_wwnn wwnn; /* 8 bytes */
@@ -697,14 +676,14 @@ typedef struct _ULTRA_IO_CHANNEL_PROTOCOL {
u8 macaddr[MAX_MACADDR_LEN]; /* 6 bytes */
u32 num_rcv_bufs; /* 4 */
u32 mtu; /* 4 */
- uuid_le zoneGuid; /* 16 */
+ uuid_le zone_uuid; /* 16 */
} vnic; /* total 30 */
};
#define MAX_CLIENTSTRING_LEN 1024
- u8 clientString[MAX_CLIENTSTRING_LEN]; /* NULL terminated - so holds
+ u8 client_string[MAX_CLIENTSTRING_LEN];/* NULL terminated - so holds
* max - 1 bytes */
-} ULTRA_IO_CHANNEL_PROTOCOL;
+};
#pragma pack(pop)
/* ///////////// END PRAGMA PACK PUSH 1 /////////////////////////// */
@@ -733,144 +712,17 @@ typedef struct _ULTRA_IO_CHANNEL_PROTOCOL {
* INLINE functions for initializing and accessing I/O data channels
*/
-
-#define NUMSIGNALS(x, q) (((ULTRA_IO_CHANNEL_PROTOCOL *)(x))->q.MaxSignalSlots)
-#define SIZEOF_PROTOCOL (COVER(sizeof(ULTRA_IO_CHANNEL_PROTOCOL), 64))
+#define SIZEOF_PROTOCOL (COVER(sizeof(struct spar_io_channel_protocol), 64))
#define SIZEOF_CMDRSP (COVER(sizeof(struct uiscmdrsp), 64))
-#define IO_CHANNEL_SIZE(x) COVER(SIZEOF_PROTOCOL + \
- (NUMSIGNALS(x, cmdQ) + \
- NUMSIGNALS(x, rspQ)) * SIZEOF_CMDRSP, 4096)
#define MIN_IO_CHANNEL_SIZE COVER(SIZEOF_PROTOCOL + \
2 * MIN_NUMSIGNALS * SIZEOF_CMDRSP, 4096)
-#ifdef __GNUC__
-/* These defines should only ever be used in service partitons */
-/* because they rely on the size of uiscmdrsp */
-#define QSLOTSFROMBYTES(bytes) (((bytes-SIZEOF_PROTOCOL)/2)/SIZEOF_CMDRSP)
-#define QSIZEFROMBYTES(bytes) (QSLOTSFROMBYTES(bytes)*SIZEOF_CMDRSP)
-#define SignalQInit(x) \
- do { \
- x->cmdQ.Size = QSIZEFROMBYTES(x->ChannelHeader.Size); \
- x->cmdQ.oSignalBase = SIZEOF_PROTOCOL - \
- offsetof(ULTRA_IO_CHANNEL_PROTOCOL, cmdQ); \
- x->cmdQ.SignalSize = SIZEOF_CMDRSP; \
- x->cmdQ.MaxSignalSlots = \
- QSLOTSFROMBYTES(x->ChannelHeader.Size); \
- x->cmdQ.MaxSignals = x->cmdQ.MaxSignalSlots - 1; \
- x->rspQ.Size = QSIZEFROMBYTES(x->ChannelHeader.Size); \
- x->rspQ.oSignalBase = \
- (SIZEOF_PROTOCOL + x->cmdQ.Size) - \
- offsetof(ULTRA_IO_CHANNEL_PROTOCOL, rspQ); \
- x->rspQ.SignalSize = SIZEOF_CMDRSP; \
- x->rspQ.MaxSignalSlots = \
- QSLOTSFROMBYTES(x->ChannelHeader.Size); \
- x->rspQ.MaxSignals = x->rspQ.MaxSignalSlots - 1; \
- x->ChannelHeader.oChannelSpace = \
- offsetof(ULTRA_IO_CHANNEL_PROTOCOL, cmdQ); \
- } while (0)
-
-#define INIT_CLIENTSTRING(chan, type, clientStr, clientStrLen) \
- do { \
- if (clientStr) { \
- chan->ChannelHeader.oClientString = \
- offsetof(type, clientString); \
- memcpy(chan->clientString, clientStr, \
- MINNUM(clientStrLen, \
- (u32) (MAX_CLIENTSTRING_LEN - 1))); \
- chan->clientString[MINNUM(clientStrLen, \
- (u32) (MAX_CLIENTSTRING_LEN \
- - 1))] \
- = '\0'; \
- } \
- else \
- if (clientStrLen > 0) \
- return 0; \
- } while (0)
-
-
-#define ULTRA_IO_CHANNEL_SERVER_READY(x, chanId, logCtx) \
- ULTRA_CHANNEL_SERVER_TRANSITION(x, chanId, SrvState, CHANNELSRV_READY, \
- logCtx)
-
-#define ULTRA_IO_CHANNEL_SERVER_NOTREADY(x, chanId, logCtx) \
- ULTRA_CHANNEL_SERVER_TRANSITION(x, chanId, SrvState, \
- CHANNELSRV_UNINITIALIZED, logCtx)
-
-static inline int ULTRA_VHBA_init_channel(ULTRA_IO_CHANNEL_PROTOCOL *x,
- struct vhba_wwnn *wwnn,
- struct vhba_config_max *max,
- unsigned char *clientStr,
- u32 clientStrLen, u64 bytes) {
- memset(x, 0, sizeof(ULTRA_IO_CHANNEL_PROTOCOL));
- x->ChannelHeader.VersionId = ULTRA_VHBA_CHANNEL_PROTOCOL_VERSIONID;
- x->ChannelHeader.Signature = ULTRA_VHBA_CHANNEL_PROTOCOL_SIGNATURE;
- x->ChannelHeader.SrvState = CHANNELSRV_UNINITIALIZED;
- x->ChannelHeader.HeaderSize = sizeof(x->ChannelHeader);
- x->ChannelHeader.Size = COVER(bytes, 4096);
- x->ChannelHeader.Type = UltraVhbaChannelProtocolGuid;
- x->ChannelHeader.ZoneGuid = NULL_UUID_LE;
- x->vhba.wwnn = *wwnn;
- x->vhba.max = *max;
- INIT_CLIENTSTRING(x, ULTRA_IO_CHANNEL_PROTOCOL, clientStr,
- clientStrLen);
- SignalQInit(x);
- if ((x->cmdQ.MaxSignalSlots > MAX_NUMSIGNALS) ||
- (x->rspQ.MaxSignalSlots > MAX_NUMSIGNALS)) {
- return 0;
- }
- if ((x->cmdQ.MaxSignalSlots < MIN_NUMSIGNALS) ||
- (x->rspQ.MaxSignalSlots < MIN_NUMSIGNALS)) {
- return 0;
- }
- return 1;
-}
-
-static inline void ULTRA_VHBA_set_max(ULTRA_IO_CHANNEL_PROTOCOL *x,
- struct vhba_config_max *max) {
- x->vhba.max = *max;
-}
-
-static inline int ULTRA_VNIC_init_channel(ULTRA_IO_CHANNEL_PROTOCOL *x,
- unsigned char *macaddr,
- u32 num_rcv_bufs, u32 mtu,
- uuid_le zoneGuid,
- unsigned char *clientStr,
- u32 clientStrLen,
- u64 bytes) {
- memset(x, 0, sizeof(ULTRA_IO_CHANNEL_PROTOCOL));
- x->ChannelHeader.VersionId = ULTRA_VNIC_CHANNEL_PROTOCOL_VERSIONID;
- x->ChannelHeader.Signature = ULTRA_VNIC_CHANNEL_PROTOCOL_SIGNATURE;
- x->ChannelHeader.SrvState = CHANNELSRV_UNINITIALIZED;
- x->ChannelHeader.HeaderSize = sizeof(x->ChannelHeader);
- x->ChannelHeader.Size = COVER(bytes, 4096);
- x->ChannelHeader.Type = UltraVnicChannelProtocolGuid;
- x->ChannelHeader.ZoneGuid = NULL_UUID_LE;
- memcpy(x->vnic.macaddr, macaddr, MAX_MACADDR_LEN);
- x->vnic.num_rcv_bufs = num_rcv_bufs;
- x->vnic.mtu = mtu;
- x->vnic.zoneGuid = zoneGuid;
- INIT_CLIENTSTRING(x, ULTRA_IO_CHANNEL_PROTOCOL, clientStr,
- clientStrLen);
- SignalQInit(x);
- if ((x->cmdQ.MaxSignalSlots > MAX_NUMSIGNALS) ||
- (x->rspQ.MaxSignalSlots > MAX_NUMSIGNALS)) {
- return 0;
- }
- if ((x->cmdQ.MaxSignalSlots < MIN_NUMSIGNALS) ||
- (x->rspQ.MaxSignalSlots < MIN_NUMSIGNALS)) {
- return 0;
- }
- return 1;
-}
-
-#endif /* __GNUC__ */
/*
* INLINE function for expanding a guest's pfn-off-size into multiple 4K page
* pfn-off-size entires.
*/
-
/* we deal with 4K page sizes when we it comes to passing page information
* between */
/* Guest and IOPartition. */
@@ -900,13 +752,12 @@ add_physinfo_entries(u32 inp_pfn, /* input - specifies the pfn to be used
firstlen = PI_PAGE_SIZE - inp_off;
if (inp_len <= firstlen) {
-
/* the input entry spans only one page - add as is */
if (index >= max_pi_arr_entries)
return 0;
pi_arr[index].pi_pfn = inp_pfn;
- pi_arr[index].pi_off = (u16) inp_off;
- pi_arr[index].pi_len = (u16) inp_len;
+ pi_arr[index].pi_off = (u16)inp_off;
+ pi_arr[index].pi_len = (u16)inp_len;
return index + 1;
}
@@ -924,9 +775,8 @@ add_physinfo_entries(u32 inp_pfn, /* input - specifies the pfn to be used
else {
pi_arr[index + i].pi_off = 0;
pi_arr[index + i].pi_len =
- (u16) MINNUM(len, (u32) PI_PAGE_SIZE);
+ (u16)MINNUM(len, (u32)PI_PAGE_SIZE);
}
-
}
return index + i;
}
diff --git a/drivers/staging/unisys/common-spar/include/channels/vbuschannel.h b/drivers/staging/unisys/common-spar/include/channels/vbuschannel.h
index 1231c454176..2c42ce16e0c 100644
--- a/drivers/staging/unisys/common-spar/include/channels/vbuschannel.h
+++ b/drivers/staging/unisys/common-spar/include/channels/vbuschannel.h
@@ -28,65 +28,61 @@
#include "channel.h"
/* {193b331b-c58f-11da-95a9-00e08161165f} */
-#define ULTRA_VBUS_CHANNEL_PROTOCOL_GUID \
+#define SPAR_VBUS_CHANNEL_PROTOCOL_UUID \
UUID_LE(0x193b331b, 0xc58f, 0x11da, \
0x95, 0xa9, 0x0, 0xe0, 0x81, 0x61, 0x16, 0x5f)
-static const uuid_le UltraVbusChannelProtocolGuid =
- ULTRA_VBUS_CHANNEL_PROTOCOL_GUID;
+static const uuid_le spar_vbus_channel_protocol_uuid =
+ SPAR_VBUS_CHANNEL_PROTOCOL_UUID;
-#define ULTRA_VBUS_CHANNEL_PROTOCOL_SIGNATURE ULTRA_CHANNEL_PROTOCOL_SIGNATURE
+#define SPAR_VBUS_CHANNEL_PROTOCOL_SIGNATURE ULTRA_CHANNEL_PROTOCOL_SIGNATURE
/* Must increment this whenever you insert or delete fields within this channel
* struct. Also increment whenever you change the meaning of fields within this
* channel struct so as to break pre-existing software. Note that you can
* usually add fields to the END of the channel struct withOUT needing to
* increment this. */
-#define ULTRA_VBUS_CHANNEL_PROTOCOL_VERSIONID 1
+#define SPAR_VBUS_CHANNEL_PROTOCOL_VERSIONID 1
-#define ULTRA_VBUS_CHANNEL_OK_CLIENT(pChannel, logCtx) \
- (ULTRA_check_channel_client(pChannel, \
- UltraVbusChannelProtocolGuid, \
- "vbus", \
- sizeof(ULTRA_VBUS_CHANNEL_PROTOCOL), \
- ULTRA_VBUS_CHANNEL_PROTOCOL_VERSIONID, \
- ULTRA_VBUS_CHANNEL_PROTOCOL_SIGNATURE, \
- __FILE__, __LINE__, logCtx))
-
-#define ULTRA_VBUS_CHANNEL_OK_SERVER(actualBytes, logCtx) \
- (ULTRA_check_channel_server(UltraVbusChannelProtocolGuid, \
- "vbus", \
- sizeof(ULTRA_VBUS_CHANNEL_PROTOCOL), \
- actualBytes, \
- __FILE__, __LINE__, logCtx))
+#define SPAR_VBUS_CHANNEL_OK_CLIENT(ch) \
+ spar_check_channel_client(ch, \
+ spar_vbus_channel_protocol_uuid, \
+ "vbus", \
+ sizeof(struct spar_vbus_channel_protocol),\
+ SPAR_VBUS_CHANNEL_PROTOCOL_VERSIONID, \
+ SPAR_VBUS_CHANNEL_PROTOCOL_SIGNATURE)
+#define SPAR_VBUS_CHANNEL_OK_SERVER(actual_bytes) \
+ (spar_check_channel_server(spar_vbus_channel_protocol_uuid, \
+ "vbus", \
+ sizeof(struct ultra_vbus_channel_protocol),\
+ actual_bytes))
#pragma pack(push, 1) /* both GCC and VC now allow this pragma */
-typedef struct _ULTRA_VBUS_HEADERINFO {
- u32 structBytes; /* size of this struct in bytes */
- u32 deviceInfoStructBytes; /* sizeof(ULTRA_VBUS_DEVICEINFO) */
- u32 devInfoCount; /* num of items in DevInfo member */
+struct spar_vbus_headerinfo {
+ u32 struct_bytes; /* size of this struct in bytes */
+ u32 device_info_struct_bytes; /* sizeof(ULTRA_VBUS_DEVICEINFO) */
+ u32 dev_info_count; /* num of items in DevInfo member */
/* (this is the allocated size) */
- u32 chpInfoByteOffset; /* byte offset from beginning of this struct */
- /* to the the ChpInfo struct (below) */
- u32 busInfoByteOffset; /* byte offset from beginning of this struct */
- /* to the the BusInfo struct (below) */
- u32 devInfoByteOffset; /* byte offset from beginning of this struct */
- /* to the the DevInfo array (below) */
+ u32 chp_info_offset; /* byte offset from beginning of this struct */
+ /* to the ChpInfo struct (below) */
+ u32 bus_info_offset; /* byte offset from beginning of this struct */
+ /* to the BusInfo struct (below) */
+ u32 dev_info_offset; /* byte offset from beginning of this struct */
+ /* to the DevInfo array (below) */
u8 reserved[104];
-} ULTRA_VBUS_HEADERINFO;
+};
-typedef struct _ULTRA_VBUS_CHANNEL_PROTOCOL {
- ULTRA_CHANNEL_PROTOCOL ChannelHeader; /* initialized by server */
- ULTRA_VBUS_HEADERINFO HdrInfo; /* initialized by server */
+struct spar_vbus_channel_protocol {
+ struct channel_header channel_header; /* initialized by server */
+ struct spar_vbus_headerinfo hdr_info; /* initialized by server */
/* the remainder of this channel is filled in by the client */
- ULTRA_VBUS_DEVICEINFO ChpInfo; /* describes client chipset device and
- * driver */
- ULTRA_VBUS_DEVICEINFO BusInfo; /* describes client bus device and
- * driver */
- ULTRA_VBUS_DEVICEINFO DevInfo[0]; /* describes client device and
- * driver for */
- /* each device on the bus */
-} ULTRA_VBUS_CHANNEL_PROTOCOL;
+ struct ultra_vbus_deviceinfo chp_info;
+ /* describes client chipset device and driver */
+ struct ultra_vbus_deviceinfo bus_info;
+ /* describes client bus device and driver */
+ struct ultra_vbus_deviceinfo dev_info[0];
+ /* describes client device and driver for each device on the bus */
+};
#define VBUS_CH_SIZE_EXACT(MAXDEVICES) \
(sizeof(ULTRA_VBUS_CHANNEL_PROTOCOL) + ((MAXDEVICES) * \
diff --git a/drivers/staging/unisys/common-spar/include/vbusdeviceinfo.h b/drivers/staging/unisys/common-spar/include/vbusdeviceinfo.h
index 3bbdc2bb7eb..9b6d3e69355 100644
--- a/drivers/staging/unisys/common-spar/include/vbusdeviceinfo.h
+++ b/drivers/staging/unisys/common-spar/include/vbusdeviceinfo.h
@@ -25,13 +25,13 @@
* It is filled in by the client side to provide info about the device
* and driver from the client's perspective.
*/
-typedef struct _ULTRA_VBUS_DEVICEINFO {
- u8 devType[16]; /* short string identifying the device type */
- u8 drvName[16]; /* driver .sys file name */
- u8 infoStrings[96]; /* sequence of tab-delimited id strings: */
+struct ultra_vbus_deviceinfo {
+ u8 devtype[16]; /* short string identifying the device type */
+ u8 drvname[16]; /* driver .sys file name */
+ u8 infostrs[96]; /* sequence of tab-delimited id strings: */
/* <DRIVER_REV> <DRIVER_VERTAG> <DRIVER_COMPILETIME> */
u8 reserved[128]; /* pad size to 256 bytes */
-} ULTRA_VBUS_DEVICEINFO;
+};
#pragma pack(pop)
@@ -63,8 +63,9 @@ vbuschannel_sanitize_buffer(char *p, int remain, char *src, int srcmax)
p++;
remain--;
chars++;
- } else if (p == NULL)
+ } else if (p == NULL) {
chars++;
+ }
nonprintable_streak = 0;
}
if (remain > 0) {
@@ -72,10 +73,12 @@ vbuschannel_sanitize_buffer(char *p, int remain, char *src, int srcmax)
p++;
remain--;
chars++;
- } else if (p == NULL)
+ } else if (p == NULL) {
chars++;
- } else
+ }
+ } else {
nonprintable_streak = 1;
+ }
src++;
srcmax--;
}
@@ -115,7 +118,7 @@ vbuschannel_itoa(char *p, int remain, int num)
}
/* form a backwards decimal ascii string in <s> */
while (num > 0) {
- if (digits >= (int) sizeof(s))
+ if (digits >= (int)sizeof(s))
return 0;
s[digits++] = (num % 10) + '0';
num = num / 10;
@@ -147,15 +150,15 @@ vbuschannel_itoa(char *p, int remain, int num)
* Returns the number of bytes written to <p>.
*/
static inline int
-vbuschannel_devinfo_to_string(ULTRA_VBUS_DEVICEINFO *devinfo,
- char *p, int remain, int devix)
+vbuschannel_devinfo_to_string(struct ultra_vbus_deviceinfo *devinfo,
+ char *p, int remain, int devix)
{
char *psrc;
int nsrc, x, i, pad;
int chars = 0;
- psrc = &(devinfo->devType[0]);
- nsrc = sizeof(devinfo->devType);
+ psrc = &devinfo->devtype[0];
+ nsrc = sizeof(devinfo->devtype);
if (vbuschannel_sanitize_buffer(NULL, 0, psrc, nsrc) <= 0)
return 0;
@@ -184,8 +187,8 @@ vbuschannel_devinfo_to_string(ULTRA_VBUS_DEVICEINFO *devinfo,
VBUSCHANNEL_ADDACHAR(' ', p, remain, chars);
/* emit driver name */
- psrc = &(devinfo->drvName[0]);
- nsrc = sizeof(devinfo->drvName);
+ psrc = &devinfo->drvname[0];
+ nsrc = sizeof(devinfo->drvname);
x = vbuschannel_sanitize_buffer(p, remain, psrc, nsrc);
p += x;
remain -= x;
@@ -196,8 +199,8 @@ vbuschannel_devinfo_to_string(ULTRA_VBUS_DEVICEINFO *devinfo,
VBUSCHANNEL_ADDACHAR(' ', p, remain, chars);
/* emit strings */
- psrc = &(devinfo->infoStrings[0]);
- nsrc = sizeof(devinfo->infoStrings);
+ psrc = &devinfo->infostrs[0];
+ nsrc = sizeof(devinfo->infostrs);
x = vbuschannel_sanitize_buffer(p, remain, psrc, nsrc);
p += x;
remain -= x;
diff --git a/drivers/staging/unisys/common-spar/include/vmcallinterface.h b/drivers/staging/unisys/common-spar/include/vmcallinterface.h
index 0b5b5626af5..78333719c49 100644
--- a/drivers/staging/unisys/common-spar/include/vmcallinterface.h
+++ b/drivers/staging/unisys/common-spar/include/vmcallinterface.h
@@ -33,7 +33,7 @@
/* define subsystem number for AppOS, used in uislib driver */
#define MDS_APPOS 0x4000000000000000L /* subsystem = 62 - AppOS */
-typedef enum { /* VMCALL identification tuples */
+enum vmcall_monitor_interface_method_tuple { /* VMCALL identification tuples */
/* Note: when a new VMCALL is added:
* - the 1st 2 hex digits correspond to one of the
* VMCALL_MONITOR_INTERFACE types and
@@ -66,7 +66,7 @@ typedef enum { /* VMCALL identification tuples */
* ULTRA_SERVICE_CAPABILITY_TIME
* capable guest to make
* VMCALL */
-} VMCALL_MONITOR_INTERFACE_METHOD_TUPLE;
+};
#define VMCALL_SUCCESS 0
#define VMCALL_SUCCESSFUL(result) (result == 0)
@@ -76,12 +76,12 @@ typedef enum { /* VMCALL identification tuples */
__unisys_vmcall_gnuc(tuple, reg_ebx, reg_ecx)
#define unisys_extended_vmcall(tuple, reg_ebx, reg_ecx, reg_edx) \
__unisys_extended_vmcall_gnuc(tuple, reg_ebx, reg_ecx, reg_edx)
-#define ISSUE_IO_VMCALL(InterfaceMethod, param, result) \
- (result = unisys_vmcall(InterfaceMethod, (param) & 0xFFFFFFFF, \
+#define ISSUE_IO_VMCALL(method, param, result) \
+ (result = unisys_vmcall(method, (param) & 0xFFFFFFFF, \
(param) >> 32))
-#define ISSUE_IO_EXTENDED_VMCALL(InterfaceMethod, param1, param2, \
+#define ISSUE_IO_EXTENDED_VMCALL(method, param1, param2, \
param3, result) \
- (result = unisys_extended_vmcall(InterfaceMethod, param1, \
+ (result = unisys_extended_vmcall(method, param1, \
param2, param3))
/* The following uses VMCALL_POST_CODE_LOGEVENT interface but is currently
@@ -107,21 +107,20 @@ struct phys_info {
#pragma pack(pop)
/* ///////////// END PRAGMA PACK PUSH 1 /////////////////////////// */
-typedef struct phys_info IO_DATA_STRUCTURE;
/* ///////////// BEGIN PRAGMA PACK PUSH 1 ///////////////////////// */
/* ///////////// ONLY STRUCT TYPE SHOULD BE BELOW */
#pragma pack(push, 1)
/* Parameters to VMCALL_IO_CONTROLVM_ADDR interface */
-typedef struct _VMCALL_IO_CONTROLVM_ADDR_PARAMS {
+struct vmcall_io_controlvm_addr_params {
/* The Guest-relative physical address of the ControlVm channel.
* This VMCall fills this in with the appropriate address. */
- u64 ChannelAddress; /* contents provided by this VMCALL (OUT) */
+ u64 address; /* contents provided by this VMCALL (OUT) */
/* the size of the ControlVm channel in bytes This VMCall fills this
* in with the appropriate address. */
- u32 ChannelBytes; /* contents provided by this VMCALL (OUT) */
- u8 Unused[4]; /* Unused Bytes in the 64-Bit Aligned Struct */
-} VMCALL_IO_CONTROLVM_ADDR_PARAMS;
+ u32 channel_bytes; /* contents provided by this VMCALL (OUT) */
+ u8 unused[4]; /* Unused Bytes in the 64-Bit Aligned Struct */
+};
#pragma pack(pop)
/* ///////////// END PRAGMA PACK PUSH 1 /////////////////////////// */
@@ -130,11 +129,11 @@ typedef struct _VMCALL_IO_CONTROLVM_ADDR_PARAMS {
/* ///////////// ONLY STRUCT TYPE SHOULD BE BELOW */
#pragma pack(push, 1)
/* Parameters to VMCALL_IO_DIAG_ADDR interface */
-typedef struct _VMCALL_IO_DIAG_ADDR_PARAMS {
+struct vmcall_io_diag_addr_params {
/* The Guest-relative physical address of the diagnostic channel.
* This VMCall fills this in with the appropriate address. */
- u64 ChannelAddress; /* contents provided by this VMCALL (OUT) */
-} VMCALL_IO_DIAG_ADDR_PARAMS;
+ u64 address; /* contents provided by this VMCALL (OUT) */
+};
#pragma pack(pop)
/* ///////////// END PRAGMA PACK PUSH 1 /////////////////////////// */
@@ -143,25 +142,25 @@ typedef struct _VMCALL_IO_DIAG_ADDR_PARAMS {
/* ///////////// ONLY STRUCT TYPE SHOULD BE BELOW */
#pragma pack(push, 1)
/* Parameters to VMCALL_IO_VISORSERIAL_ADDR interface */
-typedef struct _VMCALL_IO_VISORSERIAL_ADDR_PARAMS {
+struct vmcall_io_visorserial_addr_params {
/* The Guest-relative physical address of the serial console
* channel. This VMCall fills this in with the appropriate
* address. */
- u64 ChannelAddress; /* contents provided by this VMCALL (OUT) */
-} VMCALL_IO_VISORSERIAL_ADDR_PARAMS;
+ u64 address; /* contents provided by this VMCALL (OUT) */
+};
#pragma pack(pop)
/* ///////////// END PRAGMA PACK PUSH 1 /////////////////////////// */
/* Parameters to VMCALL_CHANNEL_MISMATCH interface */
-typedef struct _VMCALL_CHANNEL_VERSION_MISMATCH_PARAMS {
- u8 ChannelName[32]; /* Null terminated string giving name of channel
+struct vmcall_channel_version_mismatch_params {
+ u8 chname[32]; /* Null terminated string giving name of channel
* (IN) */
- u8 ItemName[32]; /* Null terminated string giving name of
+ u8 item_name[32]; /* Null terminated string giving name of
* mismatched item (IN) */
- u32 SourceLineNumber; /* line# where invoked. (IN) */
- u8 SourceFileName[36]; /* source code where invoked - Null terminated
+ u32 line_no; /* line# where invoked. (IN) */
+ u8 file_name[36]; /* source code where invoked - Null terminated
* string (IN) */
-} VMCALL_CHANNEL_VERSION_MISMATCH_PARAMS;
+};
#endif /* __IOMONINTF_H__ */
diff --git a/drivers/staging/unisys/include/timskmod.h b/drivers/staging/unisys/include/timskmod.h
index b14494ff6c1..cff7983dab8 100644
--- a/drivers/staging/unisys/include/timskmod.h
+++ b/drivers/staging/unisys/include/timskmod.h
@@ -31,7 +31,6 @@
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
-#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/vmalloc.h>
#include <linux/proc_fs.h>
@@ -71,7 +70,6 @@
/** Try to evaulate the provided expression, and do a RETINT(x) iff
* the expression evaluates to < 0.
- * @param x the expression to try
*/
#define ASSERT(cond) \
do { if (!(cond)) \
@@ -89,11 +87,6 @@
(void *)(p2) = SWAPPOINTERS_TEMP; \
} while (0)
-/**
- * @addtogroup driverlogging
- * @{
- */
-
#define PRINTKDRV(fmt, args...) LOGINF(fmt, ## args)
#define TBDDRV(fmt, args...) LOGERR(fmt, ## args)
#define HUHDRV(fmt, args...) LOGERR(fmt, ## args)
@@ -114,8 +107,6 @@
#define INFODEVX(devno, fmt, args...) LOGINFDEVX(devno, fmt, ## args)
#define DEBUGDEV(devname, fmt, args...) DBGINFDEV(devname, fmt, ## args)
-/* @} */
-
/** Verifies the consistency of your PRIVATEDEVICEDATA structure using
* conventional "signature" fields:
* <p>
@@ -139,7 +130,7 @@
((fd)->sig2 == fd))
/** Sleep for an indicated number of seconds (for use in kernel mode).
- * @param x the number of seconds to sleep.
+ * x - the number of seconds to sleep.
*/
#define SLEEP(x) \
do { current->state = TASK_INTERRUPTIBLE; \
@@ -147,17 +138,13 @@
} while (0)
/** Sleep for an indicated number of jiffies (for use in kernel mode).
- * @param x the number of jiffies to sleep.
+ * x - the number of jiffies to sleep.
*/
#define SLEEPJIFFIES(x) \
do { current->state = TASK_INTERRUPTIBLE; \
schedule_timeout(x); \
} while (0)
-#ifndef max
-#define max(a, b) (((a) > (b)) ? (a) : (b))
-#endif
-
static inline struct cdev *cdev_alloc_init(struct module *owner,
const struct file_operations *fops)
{
diff --git a/drivers/staging/unisys/include/uisqueue.h b/drivers/staging/unisys/include/uisqueue.h
index 5178270b98d..25b6181d78a 100644
--- a/drivers/staging/unisys/include/uisqueue.h
+++ b/drivers/staging/unisys/include/uisqueue.h
@@ -34,8 +34,7 @@
#include "controlvmcompletionstatus.h"
struct uisqueue_info {
-
- CHANNEL_HEADER __iomem *chan;
+ struct channel_header __iomem *chan;
/* channel containing queues in which scsi commands &
* responses are queued
*/
@@ -48,8 +47,8 @@ struct uisqueue_info {
u64 non_empty_wakeup_cnt;
struct {
- SIGNAL_QUEUE_HEADER reserved1; /* */
- SIGNAL_QUEUE_HEADER reserved2; /* */
+ struct signal_queue_header reserved1; /* */
+ struct signal_queue_header reserved2; /* */
} safe_uis_queue;
unsigned int (*send_int_if_needed)(struct uisqueue_info *info,
unsigned int whichcqueue,
@@ -119,7 +118,7 @@ struct extport_info {
*/
struct switch_info *swtch;
- struct PciId pci_id;
+ struct pci_id pci_id;
char name[MAX_NAME_SIZE_UISQUEUE];
union {
struct vhba_wwnn wwnn;
@@ -133,7 +132,7 @@ struct device_info {
u64 channel_bytes;
uuid_le channel_uuid;
uuid_le instance_uuid;
- struct InterruptInfo intr;
+ struct irq_info intr;
struct switch_info *swtch;
char devid[30]; /* "vbus<busno>:dev<devno>" */
u16 polling;
@@ -149,30 +148,27 @@ struct device_info {
unsigned long long last_on_list_cnt;
};
-typedef enum {
+enum switch_type {
RECOVERY_LAN = 1,
IB_LAN = 2
-} SWITCH_TYPE;
+};
struct bus_info {
- u32 busNo, deviceCount;
+ u32 bus_no, device_count;
struct device_info **device;
- u64 guestHandle, recvBusInterruptHandle;
- uuid_le busInstGuid;
- ULTRA_VBUS_CHANNEL_PROTOCOL __iomem *pBusChannel;
- int busChannelBytes;
+ u64 guest_handle, recv_bus_irq_handle;
+ uuid_le bus_inst_uuid;
+ struct ultra_vbus_channel_protocol __iomem *bus_channel;
+ int bus_channel_bytes;
struct proc_dir_entry *proc_dir; /* proc/uislib/vbus/<x> */
struct proc_dir_entry *proc_info; /* proc/uislib/vbus/<x>/info */
char name[25];
- char partitionName[99];
+ char partition_name[99];
struct bus_info *next;
- u8 localVnic; /* 1 if local vnic created internally
+ u8 local_vnic; /* 1 if local vnic created internally
* by IOVM; 0 otherwise... */
};
-#define DEDICATED_SWITCH(s) ((s->extPortCount == 1) && \
- (s->intPortCount == 1))
-
struct sn_list_entry {
struct uisscsi_dest pdest; /* scsi bus, target, lun for
* phys disk */
@@ -183,23 +179,12 @@ struct sn_list_entry {
struct sn_list_entry *next;
};
-struct network_policy {
- u32 promiscuous:1;
- u32 macassign:1;
- u32 peerforwarding:1;
- u32 nonotify:1;
- u32 standby:1;
- u32 callhome:2;
- char ip_addr[30];
-};
-
/*
* IO messages sent to UisnicControlChanFunc & UissdControlChanFunc by
* code that processes the ControlVm channel messages.
*/
-
-typedef enum {
+enum iopart_msg_type {
IOPART_ADD_VNIC,
IOPART_DEL_VNIC,
IOPART_DEL_ALL_VNICS,
@@ -219,7 +204,7 @@ typedef enum {
IOPART_RESUME_VDISK,
IOPART_ADD_DEVICE, /* add generic device */
IOPART_DEL_DEVICE, /* del generic device */
-} IOPART_MSG_TYPE;
+};
struct add_virt_iopart {
void *chanptr; /* pointer to data channel */
@@ -228,7 +213,7 @@ struct add_virt_iopart {
* for DMA, for ex. */
u64 recv_bus_irq_handle; /* used to register to receive
* bus level interrupts. */
- struct InterruptInfo intr; /* contains recv & send
+ struct irq_info intr; /* contains recv & send
* interrupt info */
/* recvInterruptHandle is used to register to receive
* interrupts on the data channel. Used by GuestLinux/Windows
@@ -259,21 +244,15 @@ struct add_vdisk_iopart {
struct uisscsi_dest pdest; /* scsi bus, target, lun for phys disk */
u8 sernum[MAX_SERIAL_NUM]; /* serial num of physical disk */
u32 serlen; /* length of serial num */
- u32 bus_no;
- u32 dev_no;
};
struct del_vdisk_iopart {
void *chanptr; /* pointer to data channel */
struct uisscsi_dest vdest; /* scsi bus, target, lun for virt disk */
- u32 bus_no;
- u32 dev_no;
};
struct del_virt_iopart {
void *chanptr; /* pointer to data channel */
- u32 bus_no;
- u32 dev_no;
};
struct det_virt_iopart { /* detach internal port */
@@ -297,8 +276,7 @@ struct del_switch_iopart { /* destroy switch */
};
struct io_msgs {
-
- IOPART_MSG_TYPE msgtype;
+ enum iopart_msg_type msgtype;
/* additional params needed by some messages */
union {
@@ -329,7 +307,7 @@ struct io_msgs {
* the ControlVm channel messages.
*/
-typedef enum {
+enum guestpart_msg_type {
GUEST_ADD_VBUS,
GUEST_ADD_VHBA,
GUEST_ADD_VNIC,
@@ -344,15 +322,15 @@ typedef enum {
GUEST_PAUSE_VNIC,
GUEST_RESUME_VHBA,
GUEST_RESUME_VNIC
-} GUESTPART_MSG_TYPE;
+};
struct add_vbus_guestpart {
void __iomem *chanptr; /* pointer to data channel for bus -
* NOT YET USED */
- u32 busNo; /* bus number to be created/deleted */
- u32 deviceCount; /* max num of devices on bus */
- uuid_le busTypeGuid; /* indicates type of bus */
- uuid_le busInstGuid; /* instance guid for device */
+ u32 bus_no; /* bus number to be created/deleted */
+ u32 dev_count; /* max num of devices on bus */
+ uuid_le bus_uuid; /* indicates type of bus */
+ uuid_le instance_uuid; /* instance guid for device */
};
struct del_vbus_guestpart {
@@ -367,7 +345,7 @@ struct add_virt_guestpart {
u32 bus_no; /* bus number for the operation */
u32 device_no; /* number of device on the bus */
uuid_le instance_uuid; /* instance guid for device */
- struct InterruptInfo intr; /* recv/send interrupt info */
+ struct irq_info intr; /* recv/send interrupt info */
/* recvInterruptHandle contains info needed in order to
* register to receive interrupts on the data channel.
* sendInterruptHandle contains handle which is provided to
@@ -394,8 +372,7 @@ struct init_chipset_guestpart {
};
struct guest_msgs {
-
- GUESTPART_MSG_TYPE msgtype;
+ enum guestpart_msg_type msgtype;
/* additional params needed by messages */
union {
diff --git a/drivers/staging/unisys/include/uisutils.h b/drivers/staging/unisys/include/uisutils.h
index 74e7cf65502..7414220676d 100644
--- a/drivers/staging/unisys/include/uisutils.h
+++ b/drivers/staging/unisys/include/uisutils.h
@@ -26,6 +26,7 @@
#include <linux/sched.h>
#include <linux/gfp.h>
#include <linux/uuid.h>
+#include <linux/if_ether.h>
#include "vmcallinterface.h"
#include "channel.h"
@@ -43,39 +44,38 @@
/* global function pointers that act as callback functions into
* uisnicmod, uissdmod, and virtpcimod
*/
-extern int (*UisnicControlChanFunc)(struct io_msgs *);
-extern int (*UissdControlChanFunc)(struct io_msgs *);
-extern int (*VirtControlChanFunc)(struct guest_msgs *);
+extern int (*uisnic_control_chan_func)(struct io_msgs *);
+extern int (*uissd_control_chan_func)(struct io_msgs *);
+extern int (*virt_control_chan_func)(struct guest_msgs *);
/* Return values of above callback functions: */
#define CCF_ERROR 0 /* completed and failed */
#define CCF_OK 1 /* completed successfully */
#define CCF_PENDING 2 /* operation still pending */
-extern atomic_t UisUtils_Registered_Services;
+extern atomic_t uisutils_registered_services;
-typedef unsigned int MACARRAY[MAX_MACADDR_LEN];
-typedef struct ReqHandlerInfo_struct {
- uuid_le switchTypeGuid;
+struct req_handler_info {
+ uuid_le switch_uuid;
int (*controlfunc)(struct io_msgs *);
unsigned long min_channel_bytes;
- int (*Server_Channel_Ok)(unsigned long channelBytes);
- int (*Server_Channel_Init)
- (void *x, unsigned char *clientStr, u32 clientStrLen, u64 bytes);
+ int (*server_channel_ok)(unsigned long channel_bytes);
+ int (*server_channel_init)(void *x, unsigned char *client_str,
+ u32 client_str_len, u64 bytes);
char switch_type_name[99];
struct list_head list_link; /* links into ReqHandlerInfo_list */
-} ReqHandlerInfo_t;
+};
-ReqHandlerInfo_t *ReqHandlerAdd(uuid_le switchTypeGuid,
+struct req_handler_info *req_handler_add(uuid_le switch_uuid,
const char *switch_type_name,
int (*controlfunc)(struct io_msgs *),
unsigned long min_channel_bytes,
- int (*Server_Channel_Ok)(unsigned long
- channelBytes),
- int (*Server_Channel_Init)
- (void *x, unsigned char *clientStr,
- u32 clientStrLen, u64 bytes));
-ReqHandlerInfo_t *ReqHandlerFind(uuid_le switchTypeGuid);
-int ReqHandlerDel(uuid_le switchTypeGuid);
+ int (*svr_channel_ok)(unsigned long
+ channel_bytes),
+ int (*svr_channel_init)(void *x,
+ unsigned char *client_str,
+ u32 client_str_len, u64 bytes));
+struct req_handler_info *req_handler_find(uuid_le switch_uuid);
+int req_handler_del(uuid_le switch_uuid);
#define uislib_ioremap_cache(addr, size) \
dbg_ioremap_cache(addr, size, __FILE__, __LINE__)
@@ -114,52 +114,49 @@ int uisutil_add_proc_line_ex(int *total, char **buffer, int *buffer_remaining,
char *format, ...);
int uisctrl_register_req_handler(int type, void *fptr,
- ULTRA_VBUS_DEVICEINFO *chipset_driver_info);
-int uisctrl_register_req_handler_ex(uuid_le switchTypeGuid,
- const char *switch_type_name,
- int (*fptr)(struct io_msgs *),
- unsigned long min_channel_bytes,
- int (*Server_Channel_Ok)(unsigned long
- channelBytes),
- int (*Server_Channel_Init)
- (void *x, unsigned char *clientStr,
- u32 clientStrLen, u64 bytes),
- ULTRA_VBUS_DEVICEINFO *chipset_DriverInfo);
-
-int uisctrl_unregister_req_handler_ex(uuid_le switchTypeGuid);
+ struct ultra_vbus_deviceinfo *chipset_driver_info);
+int uisctrl_register_req_handler_ex(uuid_le switch_guid,
+ const char *switch_type_name,
+ int (*fptr)(struct io_msgs *),
+ unsigned long min_channel_bytes,
+ int (*svr_channel_ok)(unsigned long
+ channel_bytes),
+ int (*svr_channel_init)(void *x,
+ unsigned char *client_str,
+ u32 client_str_len,
+ u64 bytes),
+ struct ultra_vbus_deviceinfo *chipset_driver_info);
+
+int uisctrl_unregister_req_handler_ex(uuid_le switch_uuid);
unsigned char *util_map_virt(struct phys_info *sg);
void util_unmap_virt(struct phys_info *sg);
unsigned char *util_map_virt_atomic(struct phys_info *sg);
void util_unmap_virt_atomic(void *buf);
-int uislib_server_inject_add_vnic(u32 switchNo, u32 BusNo, u32 numIntPorts,
- u32 numExtPorts, MACARRAY pmac[],
- pCHANNEL_HEADER **chan);
-void uislib_server_inject_del_vnic(u32 switchNo, u32 busNo, u32 numIntPorts,
- u32 numExtPorts);
-int uislib_client_inject_add_bus(u32 busNo, uuid_le instGuid,
- u64 channelAddr, ulong nChannelBytes);
-int uislib_client_inject_del_bus(u32 busNo);
-
-int uislib_client_inject_add_vhba(u32 busNo, u32 devNo,
+int uislib_client_inject_add_bus(u32 bus_no, uuid_le inst_uuid,
+ u64 channel_addr, ulong n_channel_bytes);
+int uislib_client_inject_del_bus(u32 bus_no);
+
+int uislib_client_inject_add_vhba(u32 bus_no, u32 dev_no,
u64 phys_chan_addr, u32 chan_bytes,
- int is_test_addr, uuid_le instGuid,
- struct InterruptInfo *intr);
-int uislib_client_inject_pause_vhba(u32 busNo, u32 devNo);
-int uislib_client_inject_resume_vhba(u32 busNo, u32 devNo);
-int uislib_client_inject_del_vhba(u32 busNo, u32 devNo);
-int uislib_client_inject_add_vnic(u32 busNo, u32 devNo,
+ int is_test_addr, uuid_le inst_uuid,
+ struct irq_info *intr);
+int uislib_client_inject_pause_vhba(u32 bus_no, u32 dev_no);
+int uislib_client_inject_resume_vhba(u32 bus_no, u32 dev_no);
+int uislib_client_inject_del_vhba(u32 bus_no, u32 dev_no);
+int uislib_client_inject_add_vnic(u32 bus_no, u32 dev_no,
u64 phys_chan_addr, u32 chan_bytes,
- int is_test_addr, uuid_le instGuid,
- struct InterruptInfo *intr);
-int uislib_client_inject_pause_vnic(u32 busNo, u32 devNo);
-int uislib_client_inject_resume_vnic(u32 busNo, u32 devNo);
-int uislib_client_inject_del_vnic(u32 busNo, u32 devNo);
+ int is_test_addr, uuid_le inst_uuid,
+ struct irq_info *intr);
+int uislib_client_inject_pause_vnic(u32 bus_no, u32 dev_no);
+int uislib_client_inject_resume_vnic(u32 bus_no, u32 dev_no);
+int uislib_client_inject_del_vnic(u32 bus_no, u32 dev_no);
#ifdef STORAGE_CHANNEL
u64 uislib_storage_channel(int client_id);
#endif
int uislib_get_owned_pdest(struct uisscsi_dest *pdest);
-int uislib_send_event(CONTROLVM_ID id, CONTROLVM_MESSAGE_PACKET *event);
+int uislib_send_event(enum controlvm_id id,
+ struct controlvm_message_packet *event);
/* structure used by vhba & vnic to keep track of queue & thread info */
struct chaninfo {
@@ -182,11 +179,14 @@ struct chaninfo {
set_current_state(TASK_INTERRUPTIBLE); \
schedule_timeout(msecs_to_jiffies(x)); \
}
+
#define UIS_THREAD_WAIT_USEC(x) { \
set_current_state(TASK_INTERRUPTIBLE); \
schedule_timeout(usecs_to_jiffies(x)); \
}
+
#define UIS_THREAD_WAIT UIS_THREAD_WAIT_MSEC(5)
+
#define UIS_THREAD_WAIT_SEC(x) { \
set_current_state(TASK_INTERRUPTIBLE); \
schedule_timeout((x)*HZ); \
@@ -224,42 +224,42 @@ unsigned int uisutil_copy_fragsinfo_from_skb(unsigned char *calling_ctx,
static inline unsigned int
issue_vmcall_io_controlvm_addr(u64 *control_addr, u32 *control_bytes)
{
- VMCALL_IO_CONTROLVM_ADDR_PARAMS params;
+ struct vmcall_io_controlvm_addr_params params;
int result = VMCALL_SUCCESS;
u64 physaddr;
physaddr = virt_to_phys(&params);
ISSUE_IO_VMCALL(VMCALL_IO_CONTROLVM_ADDR, physaddr, result);
if (VMCALL_SUCCESSFUL(result)) {
- *control_addr = params.ChannelAddress;
- *control_bytes = params.ChannelBytes;
+ *control_addr = params.address;
+ *control_bytes = params.channel_bytes;
}
return result;
}
static inline unsigned int issue_vmcall_io_diag_addr(u64 *diag_channel_addr)
{
- VMCALL_IO_DIAG_ADDR_PARAMS params;
+ struct vmcall_io_diag_addr_params params;
int result = VMCALL_SUCCESS;
u64 physaddr;
physaddr = virt_to_phys(&params);
ISSUE_IO_VMCALL(VMCALL_IO_DIAG_ADDR, physaddr, result);
if (VMCALL_SUCCESSFUL(result))
- *diag_channel_addr = params.ChannelAddress;
+ *diag_channel_addr = params.address;
return result;
}
static inline unsigned int issue_vmcall_io_visorserial_addr(u64 *channel_addr)
{
- VMCALL_IO_VISORSERIAL_ADDR_PARAMS params;
+ struct vmcall_io_visorserial_addr_params params;
int result = VMCALL_SUCCESS;
u64 physaddr;
physaddr = virt_to_phys(&params);
ISSUE_IO_VMCALL(VMCALL_IO_VISORSERIAL_ADDR, physaddr, result);
if (VMCALL_SUCCESSFUL(result))
- *channel_addr = params.ChannelAddress;
+ *channel_addr = params.address;
return result;
}
@@ -273,17 +273,8 @@ static inline s64 issue_vmcall_query_guest_virtual_time_offset(void)
return result;
}
-static inline s64 issue_vmcall_measurement_do_nothing(void)
-{
- u64 result = VMCALL_SUCCESS;
- u64 physaddr = 0;
-
- ISSUE_IO_VMCALL(VMCALL_MEASUREMENT_DO_NOTHING, physaddr, result);
- return result;
-}
-
struct log_info_t {
- volatile unsigned long long last_cycles;
+ unsigned long long last_cycles;
unsigned long long delta_sum[64];
unsigned long long delta_cnt[64];
unsigned long long max_delta[64];
@@ -302,44 +293,29 @@ static inline unsigned int issue_vmcall_channel_mismatch(const char *chname,
const char *item_name, u32 line_no,
const char *path_n_fn)
{
- VMCALL_CHANNEL_VERSION_MISMATCH_PARAMS params;
+ struct vmcall_channel_version_mismatch_params params;
int result = VMCALL_SUCCESS;
u64 physaddr;
char *last_slash = NULL;
- strlcpy(params.ChannelName, chname,
- lengthof(VMCALL_CHANNEL_VERSION_MISMATCH_PARAMS, ChannelName));
- strlcpy(params.ItemName, item_name,
- lengthof(VMCALL_CHANNEL_VERSION_MISMATCH_PARAMS, ItemName));
- params.SourceLineNumber = line_no;
+ strlcpy(params.chname, chname, sizeof(params.chname));
+ strlcpy(params.item_name, item_name, sizeof(params.item_name));
+ params.line_no = line_no;
last_slash = strrchr(path_n_fn, '/');
if (last_slash != NULL) {
last_slash++;
- strlcpy(params.SourceFileName, last_slash,
- lengthof(VMCALL_CHANNEL_VERSION_MISMATCH_PARAMS,
- SourceFileName));
+ strlcpy(params.file_name, last_slash, sizeof(params.file_name));
} else
- strlcpy(params.SourceFileName,
+ strlcpy(params.file_name,
"Cannot determine source filename",
- lengthof(VMCALL_CHANNEL_VERSION_MISMATCH_PARAMS,
- SourceFileName));
+ sizeof(params.file_name));
physaddr = virt_to_phys(&params);
ISSUE_IO_VMCALL(VMCALL_CHANNEL_VERSION_MISMATCH, physaddr, result);
return result;
}
-static inline unsigned int issue_vmcall_fatal(void)
-{
- int result = VMCALL_SUCCESS;
- u64 physaddr = 0;
-
- ISSUE_IO_VMCALL(VMCALL_GENERIC_SURRENDER_QUANTUM_FOREVER, physaddr,
- result);
- return result;
-}
-
#define UIS_DAEMONIZE(nam)
void *uislib_cache_alloc(struct kmem_cache *cur_pool, char *fn, int ln);
#define UISCACHEALLOC(cur_pool) uislib_cache_alloc(cur_pool, __FILE__, __LINE__)
diff --git a/drivers/staging/unisys/include/vbushelper.h b/drivers/staging/unisys/include/vbushelper.h
index 1bde549ec0d..84abe5f99f5 100644
--- a/drivers/staging/unisys/include/vbushelper.h
+++ b/drivers/staging/unisys/include/vbushelper.h
@@ -26,19 +26,19 @@
#define TARGET_HOSTNAME "linuxguest"
static inline void bus_device_info_init(
- ULTRA_VBUS_DEVICEINFO * bus_device_info_ptr,
+ struct ultra_vbus_deviceinfo *bus_device_info_ptr,
const char *dev_type, const char *drv_name,
const char *ver, const char *ver_tag)
{
- memset(bus_device_info_ptr, 0, sizeof(ULTRA_VBUS_DEVICEINFO));
- snprintf(bus_device_info_ptr->devType,
- sizeof(bus_device_info_ptr->devType),
+ memset(bus_device_info_ptr, 0, sizeof(struct ultra_vbus_deviceinfo));
+ snprintf(bus_device_info_ptr->devtype,
+ sizeof(bus_device_info_ptr->devtype),
"%s", (dev_type) ? dev_type : "unknownType");
- snprintf(bus_device_info_ptr->drvName,
- sizeof(bus_device_info_ptr->drvName),
+ snprintf(bus_device_info_ptr->drvname,
+ sizeof(bus_device_info_ptr->drvname),
"%s", (drv_name) ? drv_name : "unknownDriver");
- snprintf(bus_device_info_ptr->infoStrings,
- sizeof(bus_device_info_ptr->infoStrings), "%s\t%s\t%s",
+ snprintf(bus_device_info_ptr->infostrs,
+ sizeof(bus_device_info_ptr->infostrs), "%s\t%s\t%s",
(ver) ? ver : "unknownVer",
(ver_tag) ? ver_tag : "unknownVerTag",
TARGET_HOSTNAME);
diff --git a/drivers/staging/unisys/uislib/uislib.c b/drivers/staging/unisys/uislib/uislib.c
index 706f1c0c2c6..7c87452a9f1 100644
--- a/drivers/staging/unisys/uislib/uislib.c
+++ b/drivers/staging/unisys/uislib/uislib.c
@@ -57,7 +57,7 @@
#define __MYFILE__ "uislib.c"
/* global function pointers that act as callback functions into virtpcimod */
-int (*VirtControlChanFunc)(struct guest_msgs *);
+int (*virt_control_chan_func)(struct guest_msgs *);
static int ProcReadBufferValid;
static char *ProcReadBuffer; /* Note this MUST be global,
@@ -121,12 +121,12 @@ static const struct file_operations debugfs_info_fops = {
};
static void
-init_msg_header(CONTROLVM_MESSAGE *msg, u32 id, uint rsp, uint svr)
+init_msg_header(struct controlvm_message *msg, u32 id, uint rsp, uint svr)
{
- memset(msg, 0, sizeof(CONTROLVM_MESSAGE));
- msg->hdr.Id = id;
- msg->hdr.Flags.responseExpected = rsp;
- msg->hdr.Flags.server = svr;
+ memset(msg, 0, sizeof(struct controlvm_message));
+ msg->hdr.id = id;
+ msg->hdr.flags.response_expected = rsp;
+ msg->hdr.flags.server = svr;
}
static __iomem void *
@@ -142,7 +142,7 @@ init_vbus_channel(u64 channelAddr, u32 channelBytes)
rc = NULL;
goto Away;
}
- if (!ULTRA_VBUS_CHANNEL_OK_CLIENT(pChan, NULL)) {
+ if (!SPAR_VBUS_CHANNEL_OK_CLIENT(pChan)) {
ERRDRV("%s channel cannot be used", __func__);
uislib_iounmap(pChan);
rc = NULL;
@@ -154,7 +154,7 @@ Away:
}
static int
-create_bus(CONTROLVM_MESSAGE *msg, char *buf)
+create_bus(struct controlvm_message *msg, char *buf)
{
u32 busNo, deviceCount;
struct bus_info *tmp, *bus;
@@ -168,8 +168,8 @@ create_bus(CONTROLVM_MESSAGE *msg, char *buf)
return CONTROLVM_RESP_ERROR_MAX_BUSES;
}
- busNo = msg->cmd.createBus.busNo;
- deviceCount = msg->cmd.createBus.deviceCount;
+ busNo = msg->cmd.create_bus.bus_no;
+ deviceCount = msg->cmd.create_bus.dev_count;
POSTCODE_LINUX_4(BUS_CREATE_ENTRY_PC, busNo, deviceCount,
POSTCODE_SEVERITY_INFO);
@@ -188,25 +188,25 @@ create_bus(CONTROLVM_MESSAGE *msg, char *buf)
/* Currently by default, the bus Number is the GuestHandle.
* Configure Bus message can override this.
*/
- if (msg->hdr.Flags.testMessage) {
+ if (msg->hdr.flags.test_message) {
/* This implies we're the IOVM so set guest handle to 0... */
- bus->guestHandle = 0;
- bus->busNo = busNo;
- bus->localVnic = 1;
+ bus->guest_handle = 0;
+ bus->bus_no = busNo;
+ bus->local_vnic = 1;
} else
- bus->busNo = bus->guestHandle = busNo;
- sprintf(bus->name, "%d", (int) bus->busNo);
- bus->deviceCount = deviceCount;
+ bus->bus_no = bus->guest_handle = busNo;
+ sprintf(bus->name, "%d", (int) bus->bus_no);
+ bus->device_count = deviceCount;
bus->device =
(struct device_info **) ((char *) bus + sizeof(struct bus_info));
- bus->busInstGuid = msg->cmd.createBus.busInstGuid;
- bus->busChannelBytes = 0;
- bus->pBusChannel = NULL;
+ bus->bus_inst_uuid = msg->cmd.create_bus.bus_inst_uuid;
+ bus->bus_channel_bytes = 0;
+ bus->bus_channel = NULL;
/* add bus to our bus list - but check for duplicates first */
read_lock(&BusListLock);
for (tmp = BusListHead; tmp; tmp = tmp->next) {
- if (tmp->busNo == bus->busNo)
+ if (tmp->bus_no == bus->bus_no)
break;
}
read_unlock(&BusListLock);
@@ -215,39 +215,39 @@ create_bus(CONTROLVM_MESSAGE *msg, char *buf)
* reject add
*/
LOGERR("CONTROLVM_BUS_CREATE Failed: bus %d already exists.\n",
- bus->busNo);
- POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus->busNo,
+ bus->bus_no);
+ POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus->bus_no,
POSTCODE_SEVERITY_ERR);
kfree(bus);
return CONTROLVM_RESP_ERROR_ALREADY_DONE;
}
- if ((msg->cmd.createBus.channelAddr != 0)
- && (msg->cmd.createBus.channelBytes != 0)) {
- bus->busChannelBytes = msg->cmd.createBus.channelBytes;
- bus->pBusChannel =
- init_vbus_channel(msg->cmd.createBus.channelAddr,
- msg->cmd.createBus.channelBytes);
+ if ((msg->cmd.create_bus.channel_addr != 0)
+ && (msg->cmd.create_bus.channel_bytes != 0)) {
+ bus->bus_channel_bytes = msg->cmd.create_bus.channel_bytes;
+ bus->bus_channel =
+ init_vbus_channel(msg->cmd.create_bus.channel_addr,
+ msg->cmd.create_bus.channel_bytes);
}
/* the msg is bound for virtpci; send guest_msgs struct to callback */
- if (!msg->hdr.Flags.server) {
+ if (!msg->hdr.flags.server) {
struct guest_msgs cmd;
cmd.msgtype = GUEST_ADD_VBUS;
- cmd.add_vbus.busNo = busNo;
- cmd.add_vbus.chanptr = bus->pBusChannel;
- cmd.add_vbus.deviceCount = deviceCount;
- cmd.add_vbus.busTypeGuid = msg->cmd.createBus.busDataTypeGuid;
- cmd.add_vbus.busInstGuid = msg->cmd.createBus.busInstGuid;
- if (!VirtControlChanFunc) {
+ cmd.add_vbus.bus_no = busNo;
+ cmd.add_vbus.chanptr = bus->bus_channel;
+ cmd.add_vbus.dev_count = deviceCount;
+ cmd.add_vbus.bus_uuid = msg->cmd.create_bus.bus_data_type_uuid;
+ cmd.add_vbus.instance_uuid = msg->cmd.create_bus.bus_inst_uuid;
+ if (!virt_control_chan_func) {
LOGERR("CONTROLVM_BUS_CREATE Failed: virtpci callback not registered.");
- POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus->busNo,
+ POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus->bus_no,
POSTCODE_SEVERITY_ERR);
kfree(bus);
return CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_FAILURE;
}
- if (!VirtControlChanFunc(&cmd)) {
+ if (!virt_control_chan_func(&cmd)) {
LOGERR("CONTROLVM_BUS_CREATE Failed: virtpci GUEST_ADD_VBUS returned error.");
- POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus->busNo,
+ POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus->bus_no,
POSTCODE_SEVERITY_ERR);
kfree(bus);
return
@@ -266,26 +266,26 @@ create_bus(CONTROLVM_MESSAGE *msg, char *buf)
BusListCount++;
write_unlock(&BusListLock);
- POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, bus->busNo,
+ POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, bus->bus_no,
POSTCODE_SEVERITY_INFO);
return CONTROLVM_RESP_SUCCESS;
}
static int
-destroy_bus(CONTROLVM_MESSAGE *msg, char *buf)
+destroy_bus(struct controlvm_message *msg, char *buf)
{
int i;
struct bus_info *bus, *prev = NULL;
struct guest_msgs cmd;
u32 busNo;
- busNo = msg->cmd.destroyBus.busNo;
+ busNo = msg->cmd.destroy_bus.bus_no;
read_lock(&BusListLock);
bus = BusListHead;
while (bus) {
- if (bus->busNo == busNo)
+ if (bus->bus_no == busNo)
break;
prev = bus;
bus = bus->next;
@@ -299,7 +299,7 @@ destroy_bus(CONTROLVM_MESSAGE *msg, char *buf)
}
/* verify that this bus has no devices. */
- for (i = 0; i < bus->deviceCount; i++) {
+ for (i = 0; i < bus->device_count; i++) {
if (bus->device[i] != NULL) {
LOGERR("CONTROLVM_BUS_DESTROY Failed: device %i attached to bus %d.",
i, busNo);
@@ -309,18 +309,18 @@ destroy_bus(CONTROLVM_MESSAGE *msg, char *buf)
}
read_unlock(&BusListLock);
- if (msg->hdr.Flags.server)
+ if (msg->hdr.flags.server)
goto remove;
/* client messages require us to call the virtpci callback associated
with this bus. */
cmd.msgtype = GUEST_DEL_VBUS;
cmd.del_vbus.bus_no = busNo;
- if (!VirtControlChanFunc) {
+ if (!virt_control_chan_func) {
LOGERR("CONTROLVM_BUS_DESTROY Failed: virtpci callback not registered.");
return CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_FAILURE;
}
- if (!VirtControlChanFunc(&cmd)) {
+ if (!virt_control_chan_func(&cmd)) {
LOGERR("CONTROLVM_BUS_DESTROY Failed: virtpci GUEST_DEL_VBUS returned error.");
return CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_CALLBACK_ERROR;
}
@@ -335,9 +335,9 @@ remove:
BusListCount--;
write_unlock(&BusListLock);
- if (bus->pBusChannel) {
- uislib_iounmap(bus->pBusChannel);
- bus->pBusChannel = NULL;
+ if (bus->bus_channel) {
+ uislib_iounmap(bus->bus_channel);
+ bus->bus_channel = NULL;
}
kfree(bus);
@@ -345,17 +345,17 @@ remove:
}
static int
-create_device(CONTROLVM_MESSAGE *msg, char *buf)
+create_device(struct controlvm_message *msg, char *buf)
{
struct device_info *dev;
struct bus_info *bus;
u32 busNo, devNo;
int result = CONTROLVM_RESP_SUCCESS;
u64 minSize = MIN_IO_CHANNEL_SIZE;
- ReqHandlerInfo_t *pReqHandler;
+ struct req_handler_info *pReqHandler;
- busNo = msg->cmd.createDevice.busNo;
- devNo = msg->cmd.createDevice.devNo;
+ busNo = msg->cmd.create_device.bus_no;
+ devNo = msg->cmd.create_device.dev_no;
POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC, devNo, busNo,
POSTCODE_SEVERITY_INFO);
@@ -368,26 +368,26 @@ create_device(CONTROLVM_MESSAGE *msg, char *buf)
return CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
}
- dev->channel_uuid = msg->cmd.createDevice.dataTypeGuid;
- dev->intr = msg->cmd.createDevice.intr;
- dev->channel_addr = msg->cmd.createDevice.channelAddr;
+ dev->channel_uuid = msg->cmd.create_device.data_type_uuid;
+ dev->intr = msg->cmd.create_device.intr;
+ dev->channel_addr = msg->cmd.create_device.channel_addr;
dev->bus_no = busNo;
dev->dev_no = devNo;
sema_init(&dev->interrupt_callback_lock, 1); /* unlocked */
sprintf(dev->devid, "vbus%u:dev%u", (unsigned) busNo, (unsigned) devNo);
/* map the channel memory for the device. */
- if (msg->hdr.Flags.testMessage)
+ if (msg->hdr.flags.test_message)
dev->chanptr = (void __iomem *)__va(dev->channel_addr);
else {
- pReqHandler = ReqHandlerFind(dev->channel_uuid);
+ pReqHandler = req_handler_find(dev->channel_uuid);
if (pReqHandler)
/* generic service handler registered for this
* channel
*/
minSize = pReqHandler->min_channel_bytes;
- if (minSize > msg->cmd.createDevice.channelBytes) {
+ if (minSize > msg->cmd.create_device.channel_bytes) {
LOGERR("CONTROLVM_DEVICE_CREATE Failed: channel size is too small, channel size:0x%lx, required size:0x%lx",
- (ulong) msg->cmd.createDevice.channelBytes,
+ (ulong) msg->cmd.create_device.channel_bytes,
(ulong) minSize);
POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, devNo, busNo,
POSTCODE_SEVERITY_ERR);
@@ -396,27 +396,27 @@ create_device(CONTROLVM_MESSAGE *msg, char *buf)
}
dev->chanptr =
uislib_ioremap_cache(dev->channel_addr,
- msg->cmd.createDevice.channelBytes);
+ msg->cmd.create_device.channel_bytes);
if (!dev->chanptr) {
LOGERR("CONTROLVM_DEVICE_CREATE Failed: ioremap_cache of channelAddr:%Lx for channelBytes:%llu failed",
dev->channel_addr,
- msg->cmd.createDevice.channelBytes);
+ msg->cmd.create_device.channel_bytes);
result = CONTROLVM_RESP_ERROR_IOREMAP_FAILED;
POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, devNo, busNo,
POSTCODE_SEVERITY_ERR);
goto Away;
}
}
- dev->instance_uuid = msg->cmd.createDevice.devInstGuid;
- dev->channel_bytes = msg->cmd.createDevice.channelBytes;
+ dev->instance_uuid = msg->cmd.create_device.dev_inst_uuid;
+ dev->channel_bytes = msg->cmd.create_device.channel_bytes;
read_lock(&BusListLock);
for (bus = BusListHead; bus; bus = bus->next) {
- if (bus->busNo == busNo) {
+ if (bus->bus_no == busNo) {
/* make sure the device number is valid */
- if (devNo >= bus->deviceCount) {
+ if (devNo >= bus->device_count) {
LOGERR("CONTROLVM_DEVICE_CREATE Failed: device (%d) >= deviceCount (%d).",
- devNo, bus->deviceCount);
+ devNo, bus->device_count);
result = CONTROLVM_RESP_ERROR_MAX_DEVICES;
POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC,
devNo, busNo,
@@ -439,17 +439,18 @@ create_device(CONTROLVM_MESSAGE *msg, char *buf)
/* the msg is bound for virtpci; send
* guest_msgs struct to callback
*/
- if (!msg->hdr.Flags.server) {
+ if (!msg->hdr.flags.server) {
struct guest_msgs cmd;
if (!uuid_le_cmp(dev->channel_uuid,
- UltraVhbaChannelProtocolGuid)) {
- wait_for_valid_guid(&((CHANNEL_HEADER
- __iomem *) (dev->
+ spar_vhba_channel_protocol_uuid)) {
+ wait_for_valid_guid(&((
+ struct channel_header
+ __iomem *) (dev->
chanptr))->
- Type);
- if (!ULTRA_VHBA_CHANNEL_OK_CLIENT
- (dev->chanptr, NULL)) {
+ chtype);
+ if (!SPAR_VHBA_CHANNEL_OK_CLIENT
+ (dev->chanptr)) {
LOGERR("CONTROLVM_DEVICE_CREATE Failed:[CLIENT]VHBA dev %d chan invalid.",
devNo);
POSTCODE_LINUX_4
@@ -468,13 +469,14 @@ create_device(CONTROLVM_MESSAGE *msg, char *buf)
cmd.add_vhba.intr = dev->intr;
} else
if (!uuid_le_cmp(dev->channel_uuid,
- UltraVnicChannelProtocolGuid)) {
- wait_for_valid_guid(&((CHANNEL_HEADER
- __iomem *) (dev->
+ spar_vnic_channel_protocol_uuid)) {
+ wait_for_valid_guid(&((
+ struct channel_header
+ __iomem *) (dev->
chanptr))->
- Type);
- if (!ULTRA_VNIC_CHANNEL_OK_CLIENT
- (dev->chanptr, NULL)) {
+ chtype);
+ if (!SPAR_VNIC_CHANNEL_OK_CLIENT
+ (dev->chanptr)) {
LOGERR("CONTROLVM_DEVICE_CREATE Failed: VNIC[CLIENT] dev %d chan invalid.",
devNo);
POSTCODE_LINUX_4
@@ -500,7 +502,7 @@ create_device(CONTROLVM_MESSAGE *msg, char *buf)
goto Away;
}
- if (!VirtControlChanFunc) {
+ if (!virt_control_chan_func) {
LOGERR("CONTROLVM_DEVICE_CREATE Failed: virtpci callback not registered.");
POSTCODE_LINUX_4
(DEVICE_CREATE_FAILURE_PC, devNo,
@@ -509,7 +511,7 @@ create_device(CONTROLVM_MESSAGE *msg, char *buf)
goto Away;
}
- if (!VirtControlChanFunc(&cmd)) {
+ if (!virt_control_chan_func(&cmd)) {
LOGERR("CONTROLVM_DEVICE_CREATE Failed: virtpci GUEST_ADD_[VHBA||VNIC] returned error.");
POSTCODE_LINUX_4
(DEVICE_CREATE_FAILURE_PC, devNo,
@@ -532,7 +534,7 @@ create_device(CONTROLVM_MESSAGE *msg, char *buf)
result = CONTROLVM_RESP_ERROR_BUS_INVALID;
Away:
- if (!msg->hdr.Flags.testMessage) {
+ if (!msg->hdr.flags.test_message) {
uislib_iounmap(dev->chanptr);
dev->chanptr = NULL;
}
@@ -542,7 +544,7 @@ Away:
}
static int
-pause_device(CONTROLVM_MESSAGE *msg)
+pause_device(struct controlvm_message *msg)
{
u32 busNo, devNo;
struct bus_info *bus;
@@ -550,16 +552,16 @@ pause_device(CONTROLVM_MESSAGE *msg)
struct guest_msgs cmd;
int retval = CONTROLVM_RESP_SUCCESS;
- busNo = msg->cmd.deviceChangeState.busNo;
- devNo = msg->cmd.deviceChangeState.devNo;
+ busNo = msg->cmd.device_change_state.bus_no;
+ devNo = msg->cmd.device_change_state.dev_no;
read_lock(&BusListLock);
for (bus = BusListHead; bus; bus = bus->next) {
- if (bus->busNo == busNo) {
+ if (bus->bus_no == busNo) {
/* make sure the device number is valid */
- if (devNo >= bus->deviceCount) {
+ if (devNo >= bus->device_count) {
LOGERR("CONTROLVM_DEVICE_CHANGESTATE:pause Failed: device(%d) >= deviceCount(%d).",
- devNo, bus->deviceCount);
+ devNo, bus->device_count);
retval = CONTROLVM_RESP_ERROR_DEVICE_INVALID;
} else {
/* make sure this device exists */
@@ -585,22 +587,22 @@ pause_device(CONTROLVM_MESSAGE *msg)
* guest_msgs struct to callback
*/
if (!uuid_le_cmp(dev->channel_uuid,
- UltraVhbaChannelProtocolGuid)) {
+ spar_vhba_channel_protocol_uuid)) {
cmd.msgtype = GUEST_PAUSE_VHBA;
cmd.pause_vhba.chanptr = dev->chanptr;
} else if (!uuid_le_cmp(dev->channel_uuid,
- UltraVnicChannelProtocolGuid)) {
+ spar_vnic_channel_protocol_uuid)) {
cmd.msgtype = GUEST_PAUSE_VNIC;
cmd.pause_vnic.chanptr = dev->chanptr;
} else {
LOGERR("CONTROLVM_DEVICE_CHANGESTATE:pause Failed: unknown channelTypeGuid.\n");
return CONTROLVM_RESP_ERROR_CHANNEL_TYPE_UNKNOWN;
}
- if (!VirtControlChanFunc) {
+ if (!virt_control_chan_func) {
LOGERR("CONTROLVM_DEVICE_CHANGESTATE Failed: virtpci callback not registered.");
return CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_FAILURE;
}
- if (!VirtControlChanFunc(&cmd)) {
+ if (!virt_control_chan_func(&cmd)) {
LOGERR("CONTROLVM_DEVICE_CHANGESTATE:pause Failed: virtpci GUEST_PAUSE_[VHBA||VNIC] returned error.");
return
CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_CALLBACK_ERROR;
@@ -610,7 +612,7 @@ pause_device(CONTROLVM_MESSAGE *msg)
}
static int
-resume_device(CONTROLVM_MESSAGE *msg)
+resume_device(struct controlvm_message *msg)
{
u32 busNo, devNo;
struct bus_info *bus;
@@ -618,16 +620,16 @@ resume_device(CONTROLVM_MESSAGE *msg)
struct guest_msgs cmd;
int retval = CONTROLVM_RESP_SUCCESS;
- busNo = msg->cmd.deviceChangeState.busNo;
- devNo = msg->cmd.deviceChangeState.devNo;
+ busNo = msg->cmd.device_change_state.bus_no;
+ devNo = msg->cmd.device_change_state.dev_no;
read_lock(&BusListLock);
for (bus = BusListHead; bus; bus = bus->next) {
- if (bus->busNo == busNo) {
+ if (bus->bus_no == busNo) {
/* make sure the device number is valid */
- if (devNo >= bus->deviceCount) {
+ if (devNo >= bus->device_count) {
LOGERR("CONTROLVM_DEVICE_CHANGESTATE:resume Failed: device(%d) >= deviceCount(%d).",
- devNo, bus->deviceCount);
+ devNo, bus->device_count);
retval = CONTROLVM_RESP_ERROR_DEVICE_INVALID;
} else {
/* make sure this device exists */
@@ -654,22 +656,22 @@ resume_device(CONTROLVM_MESSAGE *msg)
*/
if (retval == CONTROLVM_RESP_SUCCESS) {
if (!uuid_le_cmp(dev->channel_uuid,
- UltraVhbaChannelProtocolGuid)) {
+ spar_vhba_channel_protocol_uuid)) {
cmd.msgtype = GUEST_RESUME_VHBA;
cmd.resume_vhba.chanptr = dev->chanptr;
} else if (!uuid_le_cmp(dev->channel_uuid,
- UltraVnicChannelProtocolGuid)) {
+ spar_vnic_channel_protocol_uuid)) {
cmd.msgtype = GUEST_RESUME_VNIC;
cmd.resume_vnic.chanptr = dev->chanptr;
} else {
LOGERR("CONTROLVM_DEVICE_CHANGESTATE:resume Failed: unknown channelTypeGuid.\n");
return CONTROLVM_RESP_ERROR_CHANNEL_TYPE_UNKNOWN;
}
- if (!VirtControlChanFunc) {
+ if (!virt_control_chan_func) {
LOGERR("CONTROLVM_DEVICE_CHANGESTATE Failed: virtpci callback not registered.");
return CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_FAILURE;
}
- if (!VirtControlChanFunc(&cmd)) {
+ if (!virt_control_chan_func(&cmd)) {
LOGERR("CONTROLVM_DEVICE_CHANGESTATE:resume Failed: virtpci GUEST_RESUME_[VHBA||VNIC] returned error.");
return
CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_CALLBACK_ERROR;
@@ -679,7 +681,7 @@ resume_device(CONTROLVM_MESSAGE *msg)
}
static int
-destroy_device(CONTROLVM_MESSAGE *msg, char *buf)
+destroy_device(struct controlvm_message *msg, char *buf)
{
u32 busNo, devNo;
struct bus_info *bus;
@@ -687,17 +689,17 @@ destroy_device(CONTROLVM_MESSAGE *msg, char *buf)
struct guest_msgs cmd;
int retval = CONTROLVM_RESP_SUCCESS;
- busNo = msg->cmd.destroyDevice.busNo;
- devNo = msg->cmd.destroyDevice.devNo;
+ busNo = msg->cmd.destroy_device.bus_no;
+ devNo = msg->cmd.destroy_device.bus_no;
read_lock(&BusListLock);
LOGINF("destroy_device called for busNo=%u, devNo=%u", busNo, devNo);
for (bus = BusListHead; bus; bus = bus->next) {
- if (bus->busNo == busNo) {
+ if (bus->bus_no == busNo) {
/* make sure the device number is valid */
- if (devNo >= bus->deviceCount) {
+ if (devNo >= bus->device_count) {
LOGERR("CONTROLVM_DEVICE_DESTORY Failed: device(%d) >= deviceCount(%d).",
- devNo, bus->deviceCount);
+ devNo, bus->device_count);
retval = CONTROLVM_RESP_ERROR_DEVICE_INVALID;
} else {
/* make sure this device exists */
@@ -724,11 +726,11 @@ destroy_device(CONTROLVM_MESSAGE *msg, char *buf)
* guest_msgs struct to callback
*/
if (!uuid_le_cmp(dev->channel_uuid,
- UltraVhbaChannelProtocolGuid)) {
+ spar_vhba_channel_protocol_uuid)) {
cmd.msgtype = GUEST_DEL_VHBA;
cmd.del_vhba.chanptr = dev->chanptr;
} else if (!uuid_le_cmp(dev->channel_uuid,
- UltraVnicChannelProtocolGuid)) {
+ spar_vnic_channel_protocol_uuid)) {
cmd.msgtype = GUEST_DEL_VNIC;
cmd.del_vnic.chanptr = dev->chanptr;
} else {
@@ -736,12 +738,12 @@ destroy_device(CONTROLVM_MESSAGE *msg, char *buf)
return
CONTROLVM_RESP_ERROR_CHANNEL_TYPE_UNKNOWN;
}
- if (!VirtControlChanFunc) {
+ if (!virt_control_chan_func) {
LOGERR("CONTROLVM_DEVICE_DESTORY Failed: virtpci callback not registered.");
return
CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_FAILURE;
}
- if (!VirtControlChanFunc(&cmd)) {
+ if (!virt_control_chan_func(&cmd)) {
LOGERR("CONTROLVM_DEVICE_DESTROY Failed: virtpci GUEST_DEL_[VHBA||VNIC] returned error.");
return
CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_CALLBACK_ERROR;
@@ -756,7 +758,7 @@ destroy_device(CONTROLVM_MESSAGE *msg, char *buf)
uislib_disable_channel_interrupts(busNo, devNo);
}
/* unmap the channel memory for the device. */
- if (!msg->hdr.Flags.testMessage) {
+ if (!msg->hdr.flags.test_message) {
LOGINF("destroy_device, doing iounmap");
uislib_iounmap(dev->chanptr);
}
@@ -767,23 +769,23 @@ destroy_device(CONTROLVM_MESSAGE *msg, char *buf)
}
static int
-init_chipset(CONTROLVM_MESSAGE *msg, char *buf)
+init_chipset(struct controlvm_message *msg, char *buf)
{
POSTCODE_LINUX_2(CHIPSET_INIT_ENTRY_PC, POSTCODE_SEVERITY_INFO);
- MaxBusCount = msg->cmd.initChipset.busCount;
- PlatformNumber = msg->cmd.initChipset.platformNumber;
+ MaxBusCount = msg->cmd.init_chipset.bus_count;
+ PlatformNumber = msg->cmd.init_chipset.platform_number;
PhysicalDataChan = 0;
/* We need to make sure we have our functions registered
* before processing messages. If we are a test vehicle the
- * testMessage for init_chipset will be set. We can ignore the
+ * test_message for init_chipset will be set. We can ignore the
* waits for the callbacks, since this will be manually entered
- * from a user. If no testMessage is set, we will wait for the
+ * from a user. If no test_message is set, we will wait for the
* functions.
*/
- if (!msg->hdr.Flags.testMessage)
- WAIT_ON_CALLBACK(VirtControlChanFunc);
+ if (!msg->hdr.flags.test_message)
+ WAIT_ON_CALLBACK(virt_control_chan_func);
chipset_inited = 1;
POSTCODE_LINUX_2(CHIPSET_INIT_EXIT_PC, POSTCODE_SEVERITY_INFO);
@@ -794,10 +796,10 @@ init_chipset(CONTROLVM_MESSAGE *msg, char *buf)
static int
delete_bus_glue(u32 busNo)
{
- CONTROLVM_MESSAGE msg;
+ struct controlvm_message msg;
init_msg_header(&msg, CONTROLVM_BUS_DESTROY, 0, 0);
- msg.cmd.destroyBus.busNo = busNo;
+ msg.cmd.destroy_bus.bus_no = busNo;
if (destroy_bus(&msg, NULL) != CONTROLVM_RESP_SUCCESS) {
LOGERR("destroy_bus failed. busNo=0x%x\n", busNo);
return 0;
@@ -808,11 +810,11 @@ delete_bus_glue(u32 busNo)
static int
delete_device_glue(u32 busNo, u32 devNo)
{
- CONTROLVM_MESSAGE msg;
+ struct controlvm_message msg;
init_msg_header(&msg, CONTROLVM_DEVICE_DESTROY, 0, 0);
- msg.cmd.destroyDevice.busNo = busNo;
- msg.cmd.destroyDevice.devNo = devNo;
+ msg.cmd.destroy_device.bus_no = busNo;
+ msg.cmd.destroy_device.dev_no = devNo;
if (destroy_device(&msg, NULL) != CONTROLVM_RESP_SUCCESS) {
LOGERR("destroy_device failed. busNo=0x%x devNo=0x%x\n", busNo,
devNo);
@@ -822,14 +824,14 @@ delete_device_glue(u32 busNo, u32 devNo)
}
int
-uislib_client_inject_add_bus(u32 busNo, uuid_le instGuid,
- u64 channelAddr, ulong nChannelBytes)
+uislib_client_inject_add_bus(u32 bus_no, uuid_le inst_uuid,
+ u64 channel_addr, ulong n_channel_bytes)
{
- CONTROLVM_MESSAGE msg;
+ struct controlvm_message msg;
- LOGINF("enter busNo=0x%x\n", busNo);
+ LOGINF("enter busNo=0x%x\n", bus_no);
/* step 0: init the chipset */
- POSTCODE_LINUX_3(CHIPSET_INIT_ENTRY_PC, busNo, POSTCODE_SEVERITY_INFO);
+ POSTCODE_LINUX_3(CHIPSET_INIT_ENTRY_PC, bus_no, POSTCODE_SEVERITY_INFO);
if (!chipset_inited) {
/* step: initialize the chipset */
@@ -841,31 +843,32 @@ uislib_client_inject_add_bus(u32 busNo, uuid_le instGuid,
* after number 4, then the add_vnic will fail, and the
* ultraboot will fail.
*/
- msg.cmd.initChipset.busCount = 23;
- msg.cmd.initChipset.switchCount = 0;
+ msg.cmd.init_chipset.bus_count = 23;
+ msg.cmd.init_chipset.switch_count = 0;
if (init_chipset(&msg, NULL) != CONTROLVM_RESP_SUCCESS) {
LOGERR("init_chipset failed.\n");
return 0;
}
LOGINF("chipset initialized\n");
- POSTCODE_LINUX_3(CHIPSET_INIT_EXIT_PC, busNo,
+ POSTCODE_LINUX_3(CHIPSET_INIT_EXIT_PC, bus_no,
POSTCODE_SEVERITY_INFO);
}
/* step 1: create a bus */
- POSTCODE_LINUX_3(BUS_CREATE_ENTRY_PC, busNo, POSTCODE_SEVERITY_WARNING);
+ POSTCODE_LINUX_3(BUS_CREATE_ENTRY_PC, bus_no,
+ POSTCODE_SEVERITY_WARNING);
init_msg_header(&msg, CONTROLVM_BUS_CREATE, 0, 0);
- msg.cmd.createBus.busNo = busNo;
- msg.cmd.createBus.deviceCount = 23; /* devNo+1; */
- msg.cmd.createBus.channelAddr = channelAddr;
- msg.cmd.createBus.channelBytes = nChannelBytes;
+ msg.cmd.create_bus.bus_no = bus_no;
+ msg.cmd.create_bus.dev_count = 23; /* devNo+1; */
+ msg.cmd.create_bus.channel_addr = channel_addr;
+ msg.cmd.create_bus.channel_bytes = n_channel_bytes;
if (create_bus(&msg, NULL) != CONTROLVM_RESP_SUCCESS) {
LOGERR("create_bus failed.\n");
- POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, busNo,
+ POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
POSTCODE_SEVERITY_ERR);
return 0;
}
- POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, busNo, POSTCODE_SEVERITY_INFO);
+ POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, bus_no, POSTCODE_SEVERITY_INFO);
return 1;
}
@@ -873,26 +876,26 @@ EXPORT_SYMBOL_GPL(uislib_client_inject_add_bus);
int
-uislib_client_inject_del_bus(u32 busNo)
+uislib_client_inject_del_bus(u32 bus_no)
{
- return delete_bus_glue(busNo);
+ return delete_bus_glue(bus_no);
}
EXPORT_SYMBOL_GPL(uislib_client_inject_del_bus);
int
-uislib_client_inject_pause_vhba(u32 busNo, u32 devNo)
+uislib_client_inject_pause_vhba(u32 bus_no, u32 dev_no)
{
- CONTROLVM_MESSAGE msg;
+ struct controlvm_message msg;
int rc;
init_msg_header(&msg, CONTROLVM_DEVICE_CHANGESTATE, 0, 0);
- msg.cmd.deviceChangeState.busNo = busNo;
- msg.cmd.deviceChangeState.devNo = devNo;
- msg.cmd.deviceChangeState.state = SegmentStateStandby;
+ msg.cmd.device_change_state.bus_no = bus_no;
+ msg.cmd.device_change_state.dev_no = dev_no;
+ msg.cmd.device_change_state.state = segment_state_standby;
rc = pause_device(&msg);
if (rc != CONTROLVM_RESP_SUCCESS) {
LOGERR("VHBA pause_device failed. busNo=0x%x devNo=0x%x\n",
- busNo, devNo);
+ bus_no, dev_no);
return rc;
}
return 0;
@@ -900,19 +903,19 @@ uislib_client_inject_pause_vhba(u32 busNo, u32 devNo)
EXPORT_SYMBOL_GPL(uislib_client_inject_pause_vhba);
int
-uislib_client_inject_resume_vhba(u32 busNo, u32 devNo)
+uislib_client_inject_resume_vhba(u32 bus_no, u32 dev_no)
{
- CONTROLVM_MESSAGE msg;
+ struct controlvm_message msg;
int rc;
init_msg_header(&msg, CONTROLVM_DEVICE_CHANGESTATE, 0, 0);
- msg.cmd.deviceChangeState.busNo = busNo;
- msg.cmd.deviceChangeState.devNo = devNo;
- msg.cmd.deviceChangeState.state = SegmentStateRunning;
+ msg.cmd.device_change_state.bus_no = bus_no;
+ msg.cmd.device_change_state.dev_no = dev_no;
+ msg.cmd.device_change_state.state = segment_state_running;
rc = resume_device(&msg);
if (rc != CONTROLVM_RESP_SUCCESS) {
LOGERR("VHBA resume_device failed. busNo=0x%x devNo=0x%x\n",
- busNo, devNo);
+ bus_no, dev_no);
return rc;
}
return 0;
@@ -921,19 +924,19 @@ uislib_client_inject_resume_vhba(u32 busNo, u32 devNo)
EXPORT_SYMBOL_GPL(uislib_client_inject_resume_vhba);
int
-uislib_client_inject_add_vhba(u32 busNo, u32 devNo,
+uislib_client_inject_add_vhba(u32 bus_no, u32 dev_no,
u64 phys_chan_addr, u32 chan_bytes,
- int is_test_addr, uuid_le instGuid,
- struct InterruptInfo *intr)
+ int is_test_addr, uuid_le inst_uuid,
+ struct irq_info *intr)
{
- CONTROLVM_MESSAGE msg;
+ struct controlvm_message msg;
- LOGINF(" enter busNo=0x%x devNo=0x%x\n", busNo, devNo);
+ LOGINF(" enter busNo=0x%x devNo=0x%x\n", bus_no, dev_no);
/* chipset init'ed with bus bus has been previously created -
* Verify it still exists step 2: create the VHBA device on the
* bus
*/
- POSTCODE_LINUX_4(VHBA_CREATE_ENTRY_PC, devNo, busNo,
+ POSTCODE_LINUX_4(VHBA_CREATE_ENTRY_PC, dev_no, bus_no,
POSTCODE_SEVERITY_INFO);
init_msg_header(&msg, CONTROLVM_DEVICE_CREATE, 0, 0);
@@ -941,16 +944,16 @@ uislib_client_inject_add_vhba(u32 busNo, u32 devNo,
/* signify that the physical channel address does NOT
* need to be ioremap()ed
*/
- msg.hdr.Flags.testMessage = 1;
- msg.cmd.createDevice.busNo = busNo;
- msg.cmd.createDevice.devNo = devNo;
- msg.cmd.createDevice.devInstGuid = instGuid;
+ msg.hdr.flags.test_message = 1;
+ msg.cmd.create_device.bus_no = bus_no;
+ msg.cmd.create_device.dev_no = dev_no;
+ msg.cmd.create_device.dev_inst_uuid = inst_uuid;
if (intr)
- msg.cmd.createDevice.intr = *intr;
+ msg.cmd.create_device.intr = *intr;
else
- memset(&msg.cmd.createDevice.intr, 0,
- sizeof(struct InterruptInfo));
- msg.cmd.createDevice.channelAddr = phys_chan_addr;
+ memset(&msg.cmd.create_device.intr, 0,
+ sizeof(struct irq_info));
+ msg.cmd.create_device.channel_addr = phys_chan_addr;
if (chan_bytes < MIN_IO_CHANNEL_SIZE) {
LOGERR("wrong channel size.chan_bytes = 0x%x IO_CHANNEL_SIZE= 0x%x\n",
chan_bytes, (unsigned int) MIN_IO_CHANNEL_SIZE);
@@ -958,41 +961,41 @@ uislib_client_inject_add_vhba(u32 busNo, u32 devNo,
MIN_IO_CHANNEL_SIZE, POSTCODE_SEVERITY_ERR);
return 0;
}
- msg.cmd.createDevice.channelBytes = chan_bytes;
- msg.cmd.createDevice.dataTypeGuid = UltraVhbaChannelProtocolGuid;
+ msg.cmd.create_device.channel_bytes = chan_bytes;
+ msg.cmd.create_device.data_type_uuid = spar_vhba_channel_protocol_uuid;
if (create_device(&msg, NULL) != CONTROLVM_RESP_SUCCESS) {
LOGERR("VHBA create_device failed.\n");
- POSTCODE_LINUX_4(VHBA_CREATE_FAILURE_PC, devNo, busNo,
+ POSTCODE_LINUX_4(VHBA_CREATE_FAILURE_PC, dev_no, bus_no,
POSTCODE_SEVERITY_ERR);
return 0;
}
- POSTCODE_LINUX_4(VHBA_CREATE_SUCCESS_PC, devNo, busNo,
+ POSTCODE_LINUX_4(VHBA_CREATE_SUCCESS_PC, dev_no, bus_no,
POSTCODE_SEVERITY_INFO);
return 1;
}
EXPORT_SYMBOL_GPL(uislib_client_inject_add_vhba);
int
-uislib_client_inject_del_vhba(u32 busNo, u32 devNo)
+uislib_client_inject_del_vhba(u32 bus_no, u32 dev_no)
{
- return delete_device_glue(busNo, devNo);
+ return delete_device_glue(bus_no, dev_no);
}
EXPORT_SYMBOL_GPL(uislib_client_inject_del_vhba);
int
-uislib_client_inject_add_vnic(u32 busNo, u32 devNo,
+uislib_client_inject_add_vnic(u32 bus_no, u32 dev_no,
u64 phys_chan_addr, u32 chan_bytes,
- int is_test_addr, uuid_le instGuid,
- struct InterruptInfo *intr)
+ int is_test_addr, uuid_le inst_uuid,
+ struct irq_info *intr)
{
- CONTROLVM_MESSAGE msg;
+ struct controlvm_message msg;
- LOGINF(" enter busNo=0x%x devNo=0x%x\n", busNo, devNo);
+ LOGINF(" enter busNo=0x%x devNo=0x%x\n", bus_no, dev_no);
/* chipset init'ed with bus bus has been previously created -
* Verify it still exists step 2: create the VNIC device on the
* bus
*/
- POSTCODE_LINUX_4(VNIC_CREATE_ENTRY_PC, devNo, busNo,
+ POSTCODE_LINUX_4(VNIC_CREATE_ENTRY_PC, dev_no, bus_no,
POSTCODE_SEVERITY_INFO);
init_msg_header(&msg, CONTROLVM_DEVICE_CREATE, 0, 0);
@@ -1000,16 +1003,16 @@ uislib_client_inject_add_vnic(u32 busNo, u32 devNo,
/* signify that the physical channel address does NOT
* need to be ioremap()ed
*/
- msg.hdr.Flags.testMessage = 1;
- msg.cmd.createDevice.busNo = busNo;
- msg.cmd.createDevice.devNo = devNo;
- msg.cmd.createDevice.devInstGuid = instGuid;
+ msg.hdr.flags.test_message = 1;
+ msg.cmd.create_device.bus_no = bus_no;
+ msg.cmd.create_device.dev_no = dev_no;
+ msg.cmd.create_device.dev_inst_uuid = inst_uuid;
if (intr)
- msg.cmd.createDevice.intr = *intr;
+ msg.cmd.create_device.intr = *intr;
else
- memset(&msg.cmd.createDevice.intr, 0,
- sizeof(struct InterruptInfo));
- msg.cmd.createDevice.channelAddr = phys_chan_addr;
+ memset(&msg.cmd.create_device.intr, 0,
+ sizeof(struct irq_info));
+ msg.cmd.create_device.channel_addr = phys_chan_addr;
if (chan_bytes < MIN_IO_CHANNEL_SIZE) {
LOGERR("wrong channel size.chan_bytes = 0x%x IO_CHANNEL_SIZE= 0x%x\n",
chan_bytes, (unsigned int) MIN_IO_CHANNEL_SIZE);
@@ -1017,35 +1020,35 @@ uislib_client_inject_add_vnic(u32 busNo, u32 devNo,
MIN_IO_CHANNEL_SIZE, POSTCODE_SEVERITY_ERR);
return 0;
}
- msg.cmd.createDevice.channelBytes = chan_bytes;
- msg.cmd.createDevice.dataTypeGuid = UltraVnicChannelProtocolGuid;
+ msg.cmd.create_device.channel_bytes = chan_bytes;
+ msg.cmd.create_device.data_type_uuid = spar_vnic_channel_protocol_uuid;
if (create_device(&msg, NULL) != CONTROLVM_RESP_SUCCESS) {
LOGERR("VNIC create_device failed.\n");
- POSTCODE_LINUX_4(VNIC_CREATE_FAILURE_PC, devNo, busNo,
+ POSTCODE_LINUX_4(VNIC_CREATE_FAILURE_PC, dev_no, bus_no,
POSTCODE_SEVERITY_ERR);
return 0;
}
- POSTCODE_LINUX_4(VNIC_CREATE_SUCCESS_PC, devNo, busNo,
+ POSTCODE_LINUX_4(VNIC_CREATE_SUCCESS_PC, dev_no, bus_no,
POSTCODE_SEVERITY_INFO);
return 1;
}
EXPORT_SYMBOL_GPL(uislib_client_inject_add_vnic);
int
-uislib_client_inject_pause_vnic(u32 busNo, u32 devNo)
+uislib_client_inject_pause_vnic(u32 bus_no, u32 dev_no)
{
- CONTROLVM_MESSAGE msg;
+ struct controlvm_message msg;
int rc;
init_msg_header(&msg, CONTROLVM_DEVICE_CHANGESTATE, 0, 0);
- msg.cmd.deviceChangeState.busNo = busNo;
- msg.cmd.deviceChangeState.devNo = devNo;
- msg.cmd.deviceChangeState.state = SegmentStateStandby;
+ msg.cmd.device_change_state.bus_no = bus_no;
+ msg.cmd.device_change_state.dev_no = dev_no;
+ msg.cmd.device_change_state.state = segment_state_standby;
rc = pause_device(&msg);
if (rc != CONTROLVM_RESP_SUCCESS) {
LOGERR("VNIC pause_device failed. busNo=0x%x devNo=0x%x\n",
- busNo, devNo);
+ bus_no, dev_no);
return -1;
}
return 0;
@@ -1053,19 +1056,19 @@ uislib_client_inject_pause_vnic(u32 busNo, u32 devNo)
EXPORT_SYMBOL_GPL(uislib_client_inject_pause_vnic);
int
-uislib_client_inject_resume_vnic(u32 busNo, u32 devNo)
+uislib_client_inject_resume_vnic(u32 bus_no, u32 dev_no)
{
- CONTROLVM_MESSAGE msg;
+ struct controlvm_message msg;
int rc;
init_msg_header(&msg, CONTROLVM_DEVICE_CHANGESTATE, 0, 0);
- msg.cmd.deviceChangeState.busNo = busNo;
- msg.cmd.deviceChangeState.devNo = devNo;
- msg.cmd.deviceChangeState.state = SegmentStateRunning;
+ msg.cmd.device_change_state.bus_no = bus_no;
+ msg.cmd.device_change_state.dev_no = dev_no;
+ msg.cmd.device_change_state.state = segment_state_running;
rc = resume_device(&msg);
if (rc != CONTROLVM_RESP_SUCCESS) {
LOGERR("VNIC resume_device failed. busNo=0x%x devNo=0x%x\n",
- busNo, devNo);
+ bus_no, dev_no);
return -1;
}
return 0;
@@ -1074,88 +1077,12 @@ uislib_client_inject_resume_vnic(u32 busNo, u32 devNo)
EXPORT_SYMBOL_GPL(uislib_client_inject_resume_vnic);
int
-uislib_client_inject_del_vnic(u32 busNo, u32 devNo)
+uislib_client_inject_del_vnic(u32 bus_no, u32 dev_no)
{
- return delete_device_glue(busNo, devNo);
+ return delete_device_glue(bus_no, dev_no);
}
EXPORT_SYMBOL_GPL(uislib_client_inject_del_vnic);
-static int
-uislib_client_add_vnic(u32 busNo)
-{
- BOOL busCreated = FALSE;
- int devNo = 0; /* Default to 0, since only one device
- * will be created for this bus... */
- CONTROLVM_MESSAGE msg;
-
- init_msg_header(&msg, CONTROLVM_BUS_CREATE, 0, 0);
- msg.hdr.Flags.testMessage = 1;
- msg.cmd.createBus.busNo = busNo;
- msg.cmd.createBus.deviceCount = 4;
- msg.cmd.createBus.channelAddr = 0;
- msg.cmd.createBus.channelBytes = 0;
- if (create_bus(&msg, NULL) != CONTROLVM_RESP_SUCCESS) {
- LOGERR("client create_bus failed");
- return 0;
- }
- busCreated = TRUE;
-
- init_msg_header(&msg, CONTROLVM_DEVICE_CREATE, 0, 0);
- msg.hdr.Flags.testMessage = 1;
- msg.cmd.createDevice.busNo = busNo;
- msg.cmd.createDevice.devNo = devNo;
- msg.cmd.createDevice.devInstGuid = NULL_UUID_LE;
- memset(&msg.cmd.createDevice.intr, 0, sizeof(struct InterruptInfo));
- msg.cmd.createDevice.channelAddr = PhysicalDataChan;
- msg.cmd.createDevice.channelBytes = MIN_IO_CHANNEL_SIZE;
- msg.cmd.createDevice.dataTypeGuid = UltraVnicChannelProtocolGuid;
- if (create_device(&msg, NULL) != CONTROLVM_RESP_SUCCESS) {
- LOGERR("client create_device failed");
- goto AwayCleanup;
- }
-
- return 1;
-
-AwayCleanup:
- if (busCreated) {
- init_msg_header(&msg, CONTROLVM_BUS_DESTROY, 0, 0);
- msg.hdr.Flags.testMessage = 1;
- msg.cmd.destroyBus.busNo = busNo;
- if (destroy_bus(&msg, NULL) != CONTROLVM_RESP_SUCCESS)
- LOGERR("client destroy_bus failed.\n");
- }
-
- return 0;
-} /* end uislib_client_add_vnic */
-EXPORT_SYMBOL_GPL(uislib_client_add_vnic);
-
-static int
-uislib_client_delete_vnic(u32 busNo)
-{
- int devNo = 0; /* Default to 0, since only one device
- * will be created for this bus... */
- CONTROLVM_MESSAGE msg;
-
- init_msg_header(&msg, CONTROLVM_DEVICE_DESTROY, 0, 0);
- msg.hdr.Flags.testMessage = 1;
- msg.cmd.destroyDevice.busNo = busNo;
- msg.cmd.destroyDevice.devNo = devNo;
- if (destroy_device(&msg, NULL) != CONTROLVM_RESP_SUCCESS) {
- /* Don't error exit - try to see if bus can be destroyed... */
- LOGERR("client destroy_device failed.\n");
- }
-
- init_msg_header(&msg, CONTROLVM_BUS_DESTROY, 0, 0);
- msg.hdr.Flags.testMessage = 1;
- msg.cmd.destroyBus.busNo = busNo;
- if (destroy_bus(&msg, NULL) != CONTROLVM_RESP_SUCCESS)
- LOGERR("client destroy_bus failed.\n");
-
- return 1;
-}
-EXPORT_SYMBOL_GPL(uislib_client_delete_vnic);
-/* end client_delete_vnic */
-
void *
uislib_cache_alloc(struct kmem_cache *cur_pool, char *fn, int ln)
{
@@ -1206,17 +1133,17 @@ info_debugfs_read_helper(char **buff, int *buff_len)
for (bus = BusListHead; bus; bus = bus->next) {
if (PLINE(" bus=0x%p, busNo=%d, deviceCount=%d\n",
- bus, bus->busNo, bus->deviceCount) < 0)
+ bus, bus->bus_no, bus->device_count) < 0)
goto err_done_unlock;
if (PLINE(" Devices:\n") < 0)
goto err_done_unlock;
- for (i = 0; i < bus->deviceCount; i++) {
+ for (i = 0; i < bus->device_count; i++) {
if (bus->device[i]) {
if (PLINE(" busNo %d, device[%i]: 0x%p, chanptr=0x%p, swtch=0x%p\n",
- bus->busNo, i, bus->device[i],
+ bus->bus_no, i, bus->device[i],
bus->device[i]->chanptr,
bus->device[i]->swtch) < 0)
goto err_done_unlock;
@@ -1232,7 +1159,7 @@ info_debugfs_read_helper(char **buff, int *buff_len)
read_unlock(&BusListLock);
if (PLINE("UisUtils_Registered_Services: %d\n",
- atomic_read(&UisUtils_Registered_Services)) < 0)
+ atomic_read(&uisutils_registered_services)) < 0)
goto err_done;
if (PLINE("cycles_before_wait %llu wait_cycles:%llu\n",
cycles_before_wait, wait_cycles) < 0)
@@ -1294,9 +1221,9 @@ find_dev(u32 busNo, u32 devNo)
read_lock(&BusListLock);
for (bus = BusListHead; bus; bus = bus->next) {
- if (bus->busNo == busNo) {
+ if (bus->bus_no == busNo) {
/* make sure the device number is valid */
- if (devNo >= bus->deviceCount) {
+ if (devNo >= bus->device_count) {
LOGERR("%s bad busNo, devNo=%d,%d",
__func__,
(int) (busNo), (int) (devNo));
@@ -1560,13 +1487,13 @@ uislib_mod_init(void)
LOGINF("sizeof(uiscmdrsp_net):%lu\n",
(ulong) sizeof(struct uiscmdrsp_net));
LOGINF("sizeof(CONTROLVM_MESSAGE):%lu bytes\n",
- (ulong) sizeof(CONTROLVM_MESSAGE));
- LOGINF("sizeof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL):%lu bytes\n",
- (ulong) sizeof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL));
+ (ulong) sizeof(struct controlvm_message));
+ LOGINF("sizeof(struct spar_controlvm_channel_protocol):%lu bytes\n",
+ (ulong) sizeof(struct spar_controlvm_channel_protocol));
LOGINF("sizeof(CHANNEL_HEADER):%lu bytes\n",
- (ulong) sizeof(CHANNEL_HEADER));
- LOGINF("sizeof(ULTRA_IO_CHANNEL_PROTOCOL):%lu bytes\n",
- (ulong) sizeof(ULTRA_IO_CHANNEL_PROTOCOL));
+ (ulong) sizeof(struct channel_header));
+ LOGINF("sizeof(struct spar_io_channel_protocol):%lu bytes\n",
+ (ulong) sizeof(struct spar_io_channel_protocol));
LOGINF("SIZEOF_CMDRSP:%lu bytes\n", SIZEOF_CMDRSP);
LOGINF("SIZEOF_PROTOCOL:%lu bytes\n", SIZEOF_PROTOCOL);
@@ -1574,7 +1501,7 @@ uislib_mod_init(void)
BusListHead = NULL;
BusListCount = MaxBusCount = 0;
rwlock_init(&BusListLock);
- VirtControlChanFunc = NULL;
+ virt_control_chan_func = NULL;
/* Issue VMCALL_GET_CONTROLVM_ADDR to get CtrlChanPhysAddr and
* then map this physical address to a virtual address. */
diff --git a/drivers/staging/unisys/uislib/uisqueue.c b/drivers/staging/unisys/uislib/uisqueue.c
index 44208841bd5..f9f8442d58c 100644
--- a/drivers/staging/unisys/uislib/uisqueue.c
+++ b/drivers/staging/unisys/uislib/uisqueue.c
@@ -81,13 +81,13 @@ do_locked_client_insert(struct uisqueue_info *queueinfo,
u8 rc = 0;
spin_lock_irqsave(lock, flags);
- if (!ULTRA_CHANNEL_CLIENT_ACQUIRE_OS(queueinfo->chan, channelId, NULL))
+ if (!spar_channel_client_acquire_os(queueinfo->chan, channelId))
goto unlock;
- if (visor_signal_insert(queueinfo->chan, whichqueue, pSignal)) {
+ if (spar_signal_insert(queueinfo->chan, whichqueue, pSignal)) {
queueinfo->packets_sent++;
rc = 1;
}
- ULTRA_CHANNEL_CLIENT_RELEASE_OS(queueinfo->chan, channelId, NULL);
+ spar_channel_client_release_os(queueinfo->chan, channelId);
unlock:
spin_unlock_irqrestore((spinlock_t *)lock, flags);
return rc;
@@ -125,7 +125,7 @@ int
uisqueue_get_cmdrsp(struct uisqueue_info *queueinfo,
void *cmdrsp, unsigned int whichqueue)
{
- if (!visor_signal_remove(queueinfo->chan, whichqueue, cmdrsp))
+ if (!spar_signal_remove(queueinfo->chan, whichqueue, cmdrsp))
return 0;
queueinfo->packets_received++;
diff --git a/drivers/staging/unisys/uislib/uisutils.c b/drivers/staging/unisys/uislib/uisutils.c
index 8ff6d26ff00..4a5b8677392 100644
--- a/drivers/staging/unisys/uislib/uisutils.c
+++ b/drivers/staging/unisys/uislib/uisutils.c
@@ -25,9 +25,7 @@
#include "uisutils.h"
#include "version.h"
#include "vbushelper.h"
-#include <linux/uuid.h>
#include <linux/skbuff.h>
-#include <linux/uuid.h>
#ifdef CONFIG_HIGHMEM
#include <linux/highmem.h>
#endif
@@ -39,7 +37,7 @@
#define __MYFILE__ "uisutils.c"
/* exports */
-atomic_t UisUtils_Registered_Services = ATOMIC_INIT(0);
+atomic_t uisutils_registered_services = ATOMIC_INIT(0);
/* num registrations via
* uisctrl_register_req_handler() or
* uisctrl_register_req_handler_ex() */
@@ -75,20 +73,20 @@ EXPORT_SYMBOL_GPL(uisutil_add_proc_line_ex);
int
uisctrl_register_req_handler(int type, void *fptr,
- ULTRA_VBUS_DEVICEINFO *chipset_driver_info)
+ struct ultra_vbus_deviceinfo *chipset_driver_info)
{
LOGINF("type = %d, fptr = 0x%p.\n", type, fptr);
switch (type) {
case 2:
if (fptr) {
- if (!VirtControlChanFunc)
- atomic_inc(&UisUtils_Registered_Services);
- VirtControlChanFunc = fptr;
+ if (!virt_control_chan_func)
+ atomic_inc(&uisutils_registered_services);
+ virt_control_chan_func = fptr;
} else {
- if (VirtControlChanFunc)
- atomic_dec(&UisUtils_Registered_Services);
- VirtControlChanFunc = NULL;
+ if (virt_control_chan_func)
+ atomic_dec(&uisutils_registered_services);
+ virt_control_chan_func = NULL;
}
break;
@@ -105,76 +103,75 @@ uisctrl_register_req_handler(int type, void *fptr,
EXPORT_SYMBOL_GPL(uisctrl_register_req_handler);
int
-uisctrl_register_req_handler_ex(uuid_le switchTypeGuid,
- const char *switch_type_name,
- int (*controlfunc)(struct io_msgs *),
- unsigned long min_channel_bytes,
- int (*Server_Channel_Ok)(unsigned long
- channelBytes),
- int (*Server_Channel_Init)
- (void *x, unsigned char *clientStr,
- u32 clientStrLen, u64 bytes),
- ULTRA_VBUS_DEVICEINFO *chipset_DriverInfo)
+uisctrl_register_req_handler_ex(uuid_le switch_uuid,
+ const char *switch_type_name,
+ int (*controlfunc)(struct io_msgs *),
+ unsigned long min_channel_bytes,
+ int (*server_channel_ok)(unsigned long channel_bytes),
+ int (*server_channel_init)(void *x,
+ unsigned char *client_str,
+ u32 client_str_len, u64 bytes),
+ struct ultra_vbus_deviceinfo *chipset_driver_info)
{
- ReqHandlerInfo_t *pReqHandlerInfo;
+ struct req_handler_info *pReqHandlerInfo;
int rc = 0; /* assume failure */
LOGINF("type=%pUL, controlfunc=0x%p.\n",
- &switchTypeGuid, controlfunc);
+ &switch_uuid, controlfunc);
if (!controlfunc) {
- LOGERR("%pUL: controlfunc must be supplied\n", &switchTypeGuid);
+ LOGERR("%pUL: controlfunc must be supplied\n", &switch_uuid);
goto Away;
}
- if (!Server_Channel_Ok) {
+ if (!server_channel_ok) {
LOGERR("%pUL: Server_Channel_Ok must be supplied\n",
- &switchTypeGuid);
+ &switch_uuid);
goto Away;
}
- if (!Server_Channel_Init) {
+ if (!server_channel_init) {
LOGERR("%pUL: Server_Channel_Init must be supplied\n",
- &switchTypeGuid);
+ &switch_uuid);
goto Away;
}
- pReqHandlerInfo = ReqHandlerAdd(switchTypeGuid,
+ pReqHandlerInfo = req_handler_add(switch_uuid,
switch_type_name,
controlfunc,
min_channel_bytes,
- Server_Channel_Ok, Server_Channel_Init);
+ server_channel_ok, server_channel_init);
if (!pReqHandlerInfo) {
- LOGERR("failed to add %pUL to server list\n", &switchTypeGuid);
+ LOGERR("failed to add %pUL to server list\n", &switch_uuid);
goto Away;
}
- atomic_inc(&UisUtils_Registered_Services);
+ atomic_inc(&uisutils_registered_services);
rc = 1; /* success */
Away:
if (rc) {
- if (chipset_DriverInfo)
- bus_device_info_init(chipset_DriverInfo, "chipset",
+ if (chipset_driver_info)
+ bus_device_info_init(chipset_driver_info, "chipset",
"uislib", VERSION, NULL);
} else
- LOGERR("failed to register type %pUL.\n", &switchTypeGuid);
+ LOGERR("failed to register type %pUL.\n", &switch_uuid);
return rc;
}
EXPORT_SYMBOL_GPL(uisctrl_register_req_handler_ex);
int
-uisctrl_unregister_req_handler_ex(uuid_le switchTypeGuid)
+uisctrl_unregister_req_handler_ex(uuid_le switch_uuid)
{
int rc = 0; /* assume failure */
- LOGINF("type=%pUL.\n", &switchTypeGuid);
- if (ReqHandlerDel(switchTypeGuid) < 0) {
+ LOGINF("type=%pUL.\n", &switch_uuid);
+ if (req_handler_del(switch_uuid) < 0) {
LOGERR("failed to remove %pUL from server list\n",
- &switchTypeGuid);
+ &switch_uuid);
goto Away;
}
- atomic_dec(&UisUtils_Registered_Services);
+ atomic_dec(&uisutils_registered_services);
rc = 1; /* success */
Away:
if (!rc)
- LOGERR("failed to unregister type %pUL.\n", &switchTypeGuid);
+ LOGERR("failed to unregister type %pUL.\n", &switch_uuid);
return rc;
}
EXPORT_SYMBOL_GPL(uisctrl_unregister_req_handler_ex);
@@ -275,11 +272,11 @@ dolist: if (skb_shinfo(skb)->frag_list) {
}
EXPORT_SYMBOL_GPL(uisutil_copy_fragsinfo_from_skb);
-static LIST_HEAD(ReqHandlerInfo_list); /* list of ReqHandlerInfo_t */
+static LIST_HEAD(ReqHandlerInfo_list); /* list of struct req_handler_info */
static DEFINE_SPINLOCK(ReqHandlerInfo_list_lock);
-ReqHandlerInfo_t *
-ReqHandlerAdd(uuid_le switchTypeGuid,
+struct req_handler_info *
+req_handler_add(uuid_le switch_uuid,
const char *switch_type_name,
int (*controlfunc)(struct io_msgs *),
unsigned long min_channel_bytes,
@@ -287,16 +284,16 @@ ReqHandlerAdd(uuid_le switchTypeGuid,
int (*Server_Channel_Init)
(void *x, unsigned char *clientStr, u32 clientStrLen, u64 bytes))
{
- ReqHandlerInfo_t *rc = NULL;
+ struct req_handler_info *rc = NULL;
rc = kzalloc(sizeof(*rc), GFP_ATOMIC);
if (!rc)
return NULL;
- rc->switchTypeGuid = switchTypeGuid;
+ rc->switch_uuid = switch_uuid;
rc->controlfunc = controlfunc;
rc->min_channel_bytes = min_channel_bytes;
- rc->Server_Channel_Ok = Server_Channel_Ok;
- rc->Server_Channel_Init = Server_Channel_Init;
+ rc->server_channel_ok = Server_Channel_Ok;
+ rc->server_channel_init = Server_Channel_Init;
if (switch_type_name)
strncpy(rc->switch_type_name, switch_type_name,
sizeof(rc->switch_type_name) - 1);
@@ -307,16 +304,16 @@ ReqHandlerAdd(uuid_le switchTypeGuid,
return rc;
}
-ReqHandlerInfo_t *
-ReqHandlerFind(uuid_le switchTypeGuid)
+struct req_handler_info *
+req_handler_find(uuid_le switch_uuid)
{
struct list_head *lelt, *tmp;
- ReqHandlerInfo_t *entry = NULL;
+ struct req_handler_info *entry = NULL;
spin_lock(&ReqHandlerInfo_list_lock);
list_for_each_safe(lelt, tmp, &ReqHandlerInfo_list) {
- entry = list_entry(lelt, ReqHandlerInfo_t, list_link);
- if (uuid_le_cmp(entry->switchTypeGuid, switchTypeGuid) == 0) {
+ entry = list_entry(lelt, struct req_handler_info, list_link);
+ if (uuid_le_cmp(entry->switch_uuid, switch_uuid) == 0) {
spin_unlock(&ReqHandlerInfo_list_lock);
return entry;
}
@@ -326,16 +323,16 @@ ReqHandlerFind(uuid_le switchTypeGuid)
}
int
-ReqHandlerDel(uuid_le switchTypeGuid)
+req_handler_del(uuid_le switch_uuid)
{
struct list_head *lelt, *tmp;
- ReqHandlerInfo_t *entry = NULL;
+ struct req_handler_info *entry = NULL;
int rc = -1;
spin_lock(&ReqHandlerInfo_list_lock);
list_for_each_safe(lelt, tmp, &ReqHandlerInfo_list) {
- entry = list_entry(lelt, ReqHandlerInfo_t, list_link);
- if (uuid_le_cmp(entry->switchTypeGuid, switchTypeGuid) == 0) {
+ entry = list_entry(lelt, struct req_handler_info, list_link);
+ if (uuid_le_cmp(entry->switch_uuid, switch_uuid) == 0) {
list_del(lelt);
kfree(entry);
rc++;
diff --git a/drivers/staging/unisys/virthba/virthba.c b/drivers/staging/unisys/virthba/virthba.c
index 938e2c82c1a..d7a629b5f11 100644
--- a/drivers/staging/unisys/virthba/virthba.c
+++ b/drivers/staging/unisys/virthba/virthba.c
@@ -101,7 +101,6 @@ static DEF_SCSI_QCMD(virthba_queue_command)
#define virthba_queue_command virthba_queue_command_lck
#endif
-
static int virthba_slave_alloc(struct scsi_device *scsidev);
static int virthba_slave_configure(struct scsi_device *scsidev);
static void virthba_slave_destroy(struct scsi_device *scsidev);
@@ -172,7 +171,7 @@ struct virthba_info {
struct virtpci_dev *virtpcidev;
struct list_head dev_info_list;
struct chaninfo chinfo;
- struct InterruptInfo intr; /* use recvInterrupt info to receive
+ struct irq_info intr; /* use recvInterrupt info to receive
interrupts when IOs complete */
int interrupt_vector;
struct scsipending pending[MAX_PENDING_REQUESTS]; /* Tracks the requests
@@ -420,8 +419,8 @@ static irqreturn_t
virthba_ISR(int irq, void *dev_id)
{
struct virthba_info *virthbainfo = (struct virthba_info *) dev_id;
- CHANNEL_HEADER __iomem *pChannelHeader;
- SIGNAL_QUEUE_HEADER __iomem *pqhdr;
+ struct channel_header __iomem *pChannelHeader;
+ struct signal_queue_header __iomem *pqhdr;
u64 mask;
unsigned long long rc1;
@@ -429,24 +428,24 @@ virthba_ISR(int irq, void *dev_id)
return IRQ_NONE;
virthbainfo->interrupts_rcvd++;
pChannelHeader = virthbainfo->chinfo.queueinfo->chan;
- if (((readq(&pChannelHeader->Features)
+ if (((readq(&pChannelHeader->features)
& ULTRA_IO_IOVM_IS_OK_WITH_DRIVER_DISABLING_INTS) != 0)
- && ((readq(&pChannelHeader->Features) &
+ && ((readq(&pChannelHeader->features) &
ULTRA_IO_DRIVER_DISABLES_INTS) !=
0)) {
virthbainfo->interrupts_disabled++;
mask = ~ULTRA_CHANNEL_ENABLE_INTS;
rc1 = uisqueue_interlocked_and(virthbainfo->flags_addr, mask);
}
- if (visor_signalqueue_empty(pChannelHeader, IOCHAN_FROM_IOPART)) {
+ if (spar_signalqueue_empty(pChannelHeader, IOCHAN_FROM_IOPART)) {
virthbainfo->interrupts_notme++;
return IRQ_NONE;
}
- pqhdr = (SIGNAL_QUEUE_HEADER __iomem *)
+ pqhdr = (struct signal_queue_header __iomem *)
((char __iomem *) pChannelHeader +
- readq(&pChannelHeader->oChannelSpace)) + IOCHAN_FROM_IOPART;
- writeq(readq(&pqhdr->NumInterruptsReceived) + 1,
- &pqhdr->NumInterruptsReceived);
+ readq(&pChannelHeader->ch_space_offset)) + IOCHAN_FROM_IOPART;
+ writeq(readq(&pqhdr->num_irq_received) + 1,
+ &pqhdr->num_irq_received);
atomic_set(&virthbainfo->interrupt_rcvd, 1);
wake_up_interruptible(&virthbainfo->rsp_queue);
return IRQ_HANDLED;
@@ -461,17 +460,17 @@ virthba_probe(struct virtpci_dev *virtpcidev, const struct pci_device_id *id)
int rsp;
int i;
irq_handler_t handler = virthba_ISR;
- CHANNEL_HEADER __iomem *pChannelHeader;
- SIGNAL_QUEUE_HEADER __iomem *pqhdr;
+ struct channel_header __iomem *pChannelHeader;
+ struct signal_queue_header __iomem *pqhdr;
u64 mask;
LOGVER("entering virthba_probe...\n");
- LOGVER("virtpcidev busNo<<%d>>devNo<<%d>>", virtpcidev->busNo,
- virtpcidev->deviceNo);
+ LOGVER("virtpcidev bus_no<<%d>>devNo<<%d>>", virtpcidev->bus_no,
+ virtpcidev->device_no);
LOGINF("entering virthba_probe...\n");
- LOGINF("virtpcidev busNo<<%d>>devNo<<%d>>", virtpcidev->busNo,
- virtpcidev->deviceNo);
+ LOGINF("virtpcidev bus_no<<%d>>devNo<<%d>>", virtpcidev->bus_no,
+ virtpcidev->device_no);
POSTCODE_LINUX_2(VHBA_PROBE_ENTRY_PC, POSTCODE_SEVERITY_INFO);
/* call scsi_host_alloc to register a scsi host adapter
* instance - this virthba that has just been created is an
@@ -578,18 +577,18 @@ virthba_probe(struct virtpci_dev *virtpcidev, const struct pci_device_id *id)
INIT_WORK(&virthbainfo->serverdown_completion,
virthba_serverdown_complete);
- writeq(readq(&virthbainfo->chinfo.queueinfo->chan->Features) |
+ writeq(readq(&virthbainfo->chinfo.queueinfo->chan->features) |
ULTRA_IO_CHANNEL_IS_POLLING,
- &virthbainfo->chinfo.queueinfo->chan->Features);
+ &virthbainfo->chinfo.queueinfo->chan->features);
/* start thread that will receive scsicmnd responses */
DBGINF("starting rsp thread -- queueinfo: 0x%p, threadinfo: 0x%p.\n",
virthbainfo->chinfo.queueinfo, &virthbainfo->chinfo.threadinfo);
pChannelHeader = virthbainfo->chinfo.queueinfo->chan;
- pqhdr = (SIGNAL_QUEUE_HEADER __iomem *)
+ pqhdr = (struct signal_queue_header __iomem *)
((char __iomem *)pChannelHeader +
- readq(&pChannelHeader->oChannelSpace)) + IOCHAN_FROM_IOPART;
- virthbainfo->flags_addr = &pqhdr->FeatureFlags;
+ readq(&pChannelHeader->ch_space_offset)) + IOCHAN_FROM_IOPART;
+ virthbainfo->flags_addr = &pqhdr->features;
if (!uisthread_start(&virthbainfo->chinfo.threadinfo,
process_incoming_rsps,
@@ -603,16 +602,16 @@ virthba_probe(struct virtpci_dev *virtpcidev, const struct pci_device_id *id)
return -ENODEV;
}
LOGINF("sendInterruptHandle=0x%16llX",
- virthbainfo->intr.sendInterruptHandle);
+ virthbainfo->intr.send_irq_handle);
LOGINF("recvInterruptHandle=0x%16llX",
- virthbainfo->intr.recvInterruptHandle);
+ virthbainfo->intr.recv_irq_handle);
LOGINF("recvInterruptVector=0x%8X",
- virthbainfo->intr.recvInterruptVector);
+ virthbainfo->intr.recv_irq_vector);
LOGINF("recvInterruptShared=0x%2X",
- virthbainfo->intr.recvInterruptShared);
+ virthbainfo->intr.recv_irq_shared);
LOGINF("scsihost.hostt->name=%s", scsihost->hostt->name);
virthbainfo->interrupt_vector =
- virthbainfo->intr.recvInterruptHandle & INTERRUPT_VECTOR_MASK;
+ virthbainfo->intr.recv_irq_handle & INTERRUPT_VECTOR_MASK;
rsp = request_irq(virthbainfo->interrupt_vector, handler, IRQF_SHARED,
scsihost->hostt->name, virthbainfo);
if (rsp != 0) {
@@ -622,7 +621,7 @@ virthba_probe(struct virtpci_dev *virtpcidev, const struct pci_device_id *id)
POSTCODE_LINUX_2(VHBA_PROBE_FAILURE_PC, POSTCODE_SEVERITY_ERR);
} else {
u64 __iomem *Features_addr =
- &virthbainfo->chinfo.queueinfo->chan->Features;
+ &virthbainfo->chinfo.queueinfo->chan->features;
LOGERR("request_irq(%d) uislib_virthba_ISR request succeeded\n",
virthbainfo->interrupt_vector);
mask = ~(ULTRA_IO_CHANNEL_IS_POLLING |
@@ -649,8 +648,8 @@ virthba_remove(struct virtpci_dev *virtpcidev)
struct Scsi_Host *scsihost =
(struct Scsi_Host *) virtpcidev->scsi.scsihost;
- LOGINF("virtpcidev busNo<<%d>>devNo<<%d>>", virtpcidev->busNo,
- virtpcidev->deviceNo);
+ LOGINF("virtpcidev bus_no<<%d>>devNo<<%d>>", virtpcidev->bus_no,
+ virtpcidev->device_no);
virthbainfo = (struct virthba_info *) scsihost->hostdata;
if (virthbainfo->interrupt_vector != -1)
free_irq(virthbainfo->interrupt_vector, virthbainfo);
@@ -674,7 +673,7 @@ virthba_remove(struct virtpci_dev *virtpcidev)
}
static int
-forward_vdiskmgmt_command(VDISK_MGMT_TYPES vdiskcmdtype,
+forward_vdiskmgmt_command(enum vdisk_mgmt_types vdiskcmdtype,
struct Scsi_Host *scsihost,
struct uisscsi_dest *vdest)
{
@@ -738,7 +737,8 @@ forward_vdiskmgmt_command(VDISK_MGMT_TYPES vdiskcmdtype,
/*****************************************************/
static int
-forward_taskmgmt_command(TASK_MGMT_TYPES tasktype, struct scsi_device *scsidev)
+forward_taskmgmt_command(enum task_mgmt_types tasktype,
+ struct scsi_device *scsidev)
{
struct uiscmdrsp *cmdrsp;
struct virthba_info *virthbainfo =
@@ -1142,9 +1142,9 @@ do_scsi_linuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
scsicmd, cmdrsp->scsi.cmnd[0],
scsidev->host->host_no, scsidev->id,
scsidev->channel, scsidev->lun,
- cmdrsp->scsi.linuxstat, sd->Valid, sd->SenseKey,
- sd->AdditionalSenseCode,
- sd->AdditionalSenseCodeQualifier);
+ cmdrsp->scsi.linuxstat, sd->valid, sd->sense_key,
+ sd->additional_sense_code,
+ sd->additional_sense_code_qualifier);
if (atomic_read(&vdisk->error_count) ==
VIRTHBA_ERROR_COUNT) {
LOGERR("Throtling SCSICMD errors disk <%d:%d:%d:%llu>\n",
@@ -1276,8 +1276,8 @@ drain_queue(struct virthba_info *virthbainfo, struct chaninfo *dc,
while (1) {
spin_lock_irqsave(&virthbainfo->chinfo.insertlock, flags);
- if (!ULTRA_CHANNEL_CLIENT_ACQUIRE_OS(dc->queueinfo->chan,
- "vhba", NULL)) {
+ if (!spar_channel_client_acquire_os(dc->queueinfo->chan,
+ "vhba")) {
spin_unlock_irqrestore(&virthbainfo->chinfo.insertlock,
flags);
virthbainfo->acquire_failed_cnt++;
@@ -1285,8 +1285,7 @@ drain_queue(struct virthba_info *virthbainfo, struct chaninfo *dc,
}
qrslt = uisqueue_get_cmdrsp(dc->queueinfo, cmdrsp,
IOCHAN_FROM_IOPART);
- ULTRA_CHANNEL_CLIENT_RELEASE_OS(dc->queueinfo->chan,
- "vhba", NULL);
+ spar_channel_client_release_os(dc->queueinfo->chan, "vhba");
spin_unlock_irqrestore(&virthbainfo->chinfo.insertlock, flags);
if (qrslt == 0)
break;
@@ -1310,7 +1309,7 @@ drain_queue(struct virthba_info *virthbainfo, struct chaninfo *dc,
* a Client/Guest Partition. Let's be
* safe and set it to NULL now. Do
* not use it here! */
- cmdrsp->disknotify.vHba = NULL;
+ cmdrsp->disknotify.v_hba = NULL;
process_disk_notify(shost, cmdrsp);
} else if (cmdrsp->cmdtype == CMD_VDISKMGMT_TYPE) {
if (!del_scsipending_entry(virthbainfo,
@@ -1323,7 +1322,6 @@ drain_queue(struct virthba_info *virthbainfo, struct chaninfo *dc,
}
}
-
/* main function for the thread that waits for scsi commands to arrive
* in a specified queue
*/
@@ -1453,7 +1451,7 @@ static ssize_t enable_ints_write(struct file *file,
if (VirtHbasOpen[i].virthbainfo != NULL) {
virthbainfo = VirtHbasOpen[i].virthbainfo;
Features_addr =
- &virthbainfo->chinfo.queueinfo->chan->Features;
+ &virthbainfo->chinfo.queueinfo->chan->features;
if (new_value == 1) {
mask = ~(ULTRA_IO_CHANNEL_IS_POLLING |
ULTRA_IO_DRIVER_DISABLES_INTS);
@@ -1482,8 +1480,8 @@ virthba_serverup(struct virtpci_dev *virtpcidev)
(struct virthba_info *) ((struct Scsi_Host *) virtpcidev->scsi.
scsihost)->hostdata;
- DBGINF("virtpcidev busNo<<%d>>devNo<<%d>>", virtpcidev->busNo,
- virtpcidev->deviceNo);
+ DBGINF("virtpcidev bus_no<<%d>>devNo<<%d>>", virtpcidev->bus_no,
+ virtpcidev->device_no);
if (!virthbainfo->serverdown) {
DBGINF("Server up message received while server is already up.\n");
@@ -1498,9 +1496,9 @@ virthba_serverup(struct virtpci_dev *virtpcidev)
/* Must transition channel to ATTACHED state BEFORE we
* can start using the device again
*/
- ULTRA_CHANNEL_CLIENT_TRANSITION(virthbainfo->chinfo.queueinfo->chan,
- dev_name(&virtpcidev->generic_dev),
- CHANNELCLI_ATTACHED, NULL);
+ SPAR_CHANNEL_CLIENT_TRANSITION(virthbainfo->chinfo.queueinfo->chan,
+ dev_name(&virtpcidev->generic_dev),
+ CHANNELCLI_ATTACHED, NULL);
/* Start Processing the IOVM Response Queue Again */
if (!uisthread_start(&virthbainfo->chinfo.threadinfo,
@@ -1573,13 +1571,13 @@ virthba_serverdown_complete(struct work_struct *work)
virtpcidev = virthbainfo->virtpcidev;
- DBGINF("virtpcidev busNo<<%d>>devNo<<%d>>", virtpcidev->busNo,
- virtpcidev->deviceNo);
+ DBGINF("virtpcidev bus_no<<%d>>devNo<<%d>>", virtpcidev->bus_no,
+ virtpcidev->device_no);
virthbainfo->serverdown = true;
virthbainfo->serverchangingstate = false;
/* Return the ServerDown response to Command */
- visorchipset_device_pause_response(virtpcidev->busNo,
- virtpcidev->deviceNo, 0);
+ visorchipset_device_pause_response(virtpcidev->bus_no,
+ virtpcidev->device_no, 0);
}
/* As per VirtpciFunc returns 1 for success and 0 for failure */
@@ -1591,8 +1589,8 @@ virthba_serverdown(struct virtpci_dev *virtpcidev, u32 state)
scsihost)->hostdata;
DBGINF("virthba_serverdown");
- DBGINF("virtpcidev busNo<<%d>>devNo<<%d>>", virtpcidev->busNo,
- virtpcidev->deviceNo);
+ DBGINF("virtpcidev bus_no<<%d>>devNo<<%d>>", virtpcidev->bus_no,
+ virtpcidev->device_no);
if (!virthbainfo->serverdown && !virthbainfo->serverchangingstate) {
virthbainfo->serverchangingstate = true;
diff --git a/drivers/staging/unisys/virthba/virthba.h b/drivers/staging/unisys/virthba/virthba.h
index d4b809b0c7b..59901668d4f 100644
--- a/drivers/staging/unisys/virthba/virthba.h
+++ b/drivers/staging/unisys/virthba/virthba.h
@@ -19,13 +19,9 @@
* Unisys Virtual HBA driver header
*/
-
-
#ifndef __VIRTHBA_H__
#define __VIRTHBA_H__
-
#define VIRTHBA_VERSION "01.00"
-
#endif /* __VIRTHBA_H__ */
diff --git a/drivers/staging/unisys/virtpci/virtpci.c b/drivers/staging/unisys/virtpci/virtpci.c
index ee9f8260cd1..39b828dce50 100644
--- a/drivers/staging/unisys/virtpci/virtpci.c
+++ b/drivers/staging/unisys/virtpci/virtpci.c
@@ -50,6 +50,7 @@ struct driver_private {
struct module_kobject *mkobj;
struct device_driver *driver;
};
+
#define to_driver(obj) container_of(obj, struct driver_private, kobj)
/* bus_id went away in 2.6.30 - the size was 20 bytes, so we'll define
@@ -109,7 +110,7 @@ static int virtpci_device_probe(struct device *dev);
static int virtpci_device_remove(struct device *dev);
static ssize_t info_debugfs_read(struct file *file, char __user *buf,
- size_t len, loff_t *offset);
+ size_t len, loff_t *offset);
static const struct file_operations debugfs_info_fops = {
.read = info_debugfs_read,
@@ -137,7 +138,7 @@ static struct device virtpci_rootbus_device = {
};
/* filled in with info about parent chipset driver when we register with it */
-static ULTRA_VBUS_DEVICEINFO Chipset_DriverInfo;
+static struct ultra_vbus_deviceinfo chipset_driver_info;
static const struct sysfs_ops virtpci_driver_sysfs_ops = {
.show = virtpci_driver_attr_show,
@@ -148,11 +149,11 @@ static struct kobj_type virtpci_driver_kobj_type = {
.sysfs_ops = &virtpci_driver_sysfs_ops,
};
-static struct virtpci_dev *VpcidevListHead;
-static DEFINE_RWLOCK(VpcidevListLock);
+static struct virtpci_dev *vpcidev_list_head;
+static DEFINE_RWLOCK(vpcidev_list_lock);
/* filled in with info about this driver, wrt it servicing client busses */
-static ULTRA_VBUS_DEVICEINFO Bus_DriverInfo;
+static struct ultra_vbus_deviceinfo bus_driver_info;
/*****************************************************/
/* debugfs entries */
@@ -171,13 +172,12 @@ struct virtpci_busdev {
/*****************************************************/
static inline
-int WAIT_FOR_IO_CHANNEL(ULTRA_IO_CHANNEL_PROTOCOL __iomem *chanptr)
+int WAIT_FOR_IO_CHANNEL(struct spar_io_channel_protocol __iomem *chanptr)
{
int count = 120;
while (count > 0) {
-
- if (ULTRA_CHANNEL_SERVER_READY(&chanptr->ChannelHeader))
+ if (SPAR_CHANNEL_SERVER_READY(&chanptr->channel_header))
return 1;
UIS_THREAD_WAIT_SEC(1);
count--;
@@ -186,8 +186,8 @@ int WAIT_FOR_IO_CHANNEL(ULTRA_IO_CHANNEL_PROTOCOL __iomem *chanptr)
}
/* Write the contents of <info> to the ULTRA_VBUS_CHANNEL_PROTOCOL.ChpInfo. */
-static int write_vbus_chpInfo(ULTRA_VBUS_CHANNEL_PROTOCOL *chan,
- ULTRA_VBUS_DEVICEINFO *info)
+static int write_vbus_chp_info(struct spar_vbus_channel_protocol *chan,
+ struct ultra_vbus_deviceinfo *info)
{
int off;
@@ -195,18 +195,18 @@ static int write_vbus_chpInfo(ULTRA_VBUS_CHANNEL_PROTOCOL *chan,
LOGERR("vbus channel not present");
return -1;
}
- off = sizeof(ULTRA_CHANNEL_PROTOCOL) + chan->HdrInfo.chpInfoByteOffset;
- if (chan->HdrInfo.chpInfoByteOffset == 0) {
- LOGERR("vbus channel not used, because chpInfoByteOffset == 0");
+ off = sizeof(struct channel_header) + chan->hdr_info.chp_info_offset;
+ if (chan->hdr_info.chp_info_offset == 0) {
+ LOGERR("vbus channel not used, because chp_info_offset == 0");
return -1;
}
- memcpy(((u8 *) (chan)) + off, info, sizeof(*info));
+ memcpy(((u8 *)(chan)) + off, info, sizeof(*info));
return 0;
}
/* Write the contents of <info> to the ULTRA_VBUS_CHANNEL_PROTOCOL.BusInfo. */
-static int write_vbus_busInfo(ULTRA_VBUS_CHANNEL_PROTOCOL *chan,
- ULTRA_VBUS_DEVICEINFO *info)
+static int write_vbus_bus_info(struct spar_vbus_channel_protocol *chan,
+ struct ultra_vbus_deviceinfo *info)
{
int off;
@@ -214,12 +214,12 @@ static int write_vbus_busInfo(ULTRA_VBUS_CHANNEL_PROTOCOL *chan,
LOGERR("vbus channel not present");
return -1;
}
- off = sizeof(ULTRA_CHANNEL_PROTOCOL) + chan->HdrInfo.busInfoByteOffset;
- if (chan->HdrInfo.busInfoByteOffset == 0) {
- LOGERR("vbus channel not used, because busInfoByteOffset == 0");
+ off = sizeof(struct channel_header) + chan->hdr_info.bus_info_offset;
+ if (chan->hdr_info.bus_info_offset == 0) {
+ LOGERR("vbus channel not used, because bus_info_offset == 0");
return -1;
}
- memcpy(((u8 *) (chan)) + off, info, sizeof(*info));
+ memcpy(((u8 *)(chan)) + off, info, sizeof(*info));
return 0;
}
@@ -227,8 +227,8 @@ static int write_vbus_busInfo(ULTRA_VBUS_CHANNEL_PROTOCOL *chan,
* ULTRA_VBUS_CHANNEL_PROTOCOL.DevInfo[<devix>].
*/
static int
-write_vbus_devInfo(ULTRA_VBUS_CHANNEL_PROTOCOL *chan,
- ULTRA_VBUS_DEVICEINFO *info, int devix)
+write_vbus_dev_info(struct spar_vbus_channel_protocol *chan,
+ struct ultra_vbus_deviceinfo *info, int devix)
{
int off;
@@ -237,14 +237,14 @@ write_vbus_devInfo(ULTRA_VBUS_CHANNEL_PROTOCOL *chan,
return -1;
}
off =
- (sizeof(ULTRA_CHANNEL_PROTOCOL) +
- chan->HdrInfo.devInfoByteOffset) +
- (chan->HdrInfo.deviceInfoStructBytes * devix);
- if (chan->HdrInfo.devInfoByteOffset == 0) {
- LOGERR("vbus channel not used, because devInfoByteOffset == 0");
+ (sizeof(struct channel_header) +
+ chan->hdr_info.dev_info_offset) +
+ (chan->hdr_info.device_info_struct_bytes * devix);
+ if (chan->hdr_info.dev_info_offset == 0) {
+ LOGERR("vbus channel not used, because dev_info_offset == 0");
return -1;
}
- memcpy(((u8 *) (chan)) + off, info, sizeof(*info));
+ memcpy(((u8 *)(chan)) + off, info, sizeof(*info));
return 0;
}
@@ -256,13 +256,13 @@ static int add_vbus(struct add_vbus_guestpart *addparams)
int ret;
struct device *vbus;
- vbus = kzalloc(sizeof(struct device), GFP_ATOMIC);
+ vbus = kzalloc(sizeof(*vbus), GFP_ATOMIC);
POSTCODE_LINUX_2(VPCI_CREATE_ENTRY_PC, POSTCODE_SEVERITY_INFO);
if (!vbus)
return 0;
- dev_set_name(vbus, "vbus%d", addparams->busNo);
+ dev_set_name(vbus, "vbus%d", addparams->bus_no);
vbus->release = virtpci_bus_release;
vbus->parent = &virtpci_rootbus_device; /* root bus is parent */
vbus->bus = &virtpci_bus_type; /* bus type */
@@ -279,11 +279,12 @@ static int add_vbus(struct add_vbus_guestpart *addparams)
POSTCODE_LINUX_2(VPCI_CREATE_FAILURE_PC, POSTCODE_SEVERITY_ERR);
return 0;
}
- write_vbus_chpInfo(vbus->platform_data /* chanptr */ ,
- &Chipset_DriverInfo);
- write_vbus_busInfo(vbus->platform_data /* chanptr */ , &Bus_DriverInfo);
+ write_vbus_chp_info(vbus->platform_data /* chanptr */ ,
+ &chipset_driver_info);
+ write_vbus_bus_info(vbus->platform_data /* chanptr */ ,
+ &bus_driver_info);
LOGINF("Added vbus %d; device %s created successfully\n",
- addparams->busNo, BUS_ID(vbus));
+ addparams->bus_no, BUS_ID(vbus));
POSTCODE_LINUX_2(VPCI_CREATE_EXIT_PC, POSTCODE_SEVERITY_INFO);
return 1;
}
@@ -293,26 +294,15 @@ static int add_vbus(struct add_vbus_guestpart *addparams)
*/
#define GET_SCSIADAPINFO_FROM_CHANPTR(chanptr) { \
memcpy_fromio(&scsi.wwnn, \
- &((ULTRA_IO_CHANNEL_PROTOCOL __iomem *) \
+ &((struct spar_io_channel_protocol __iomem *) \
chanptr)->vhba.wwnn, \
sizeof(struct vhba_wwnn)); \
memcpy_fromio(&scsi.max, \
- &((ULTRA_IO_CHANNEL_PROTOCOL __iomem *) \
+ &((struct spar_io_channel_protocol __iomem *) \
chanptr)->vhba.max, \
sizeof(struct vhba_config_max)); \
}
-/* find bus device with the busid that matches - match_busid matches bus_id */
-#define GET_BUS_DEV(busno) { \
- sprintf(busid, "vbus%d", busno); \
- vbus = bus_find_device(&virtpci_bus_type, NULL, \
- (void *)busid, match_busid); \
- if (!vbus) { \
- LOGERR("**** FAILED to find vbus %s\n", busid); \
- return 0; \
- } \
-}
-
/* adds a vhba
* returns 0 failure, 1 success,
*/
@@ -325,7 +315,7 @@ static int add_vhba(struct add_virt_guestpart *addparams)
POSTCODE_LINUX_2(VPCI_CREATE_ENTRY_PC, POSTCODE_SEVERITY_INFO);
if (!WAIT_FOR_IO_CHANNEL
- ((ULTRA_IO_CHANNEL_PROTOCOL __iomem *) addparams->chanptr)) {
+ ((struct spar_io_channel_protocol __iomem *)addparams->chanptr)) {
LOGERR("Timed out. Channel not ready\n");
POSTCODE_LINUX_2(VPCI_CREATE_FAILURE_PC, POSTCODE_SEVERITY_ERR);
return 0;
@@ -333,7 +323,14 @@ static int add_vhba(struct add_virt_guestpart *addparams)
GET_SCSIADAPINFO_FROM_CHANPTR(addparams->chanptr);
- GET_BUS_DEV(addparams->bus_no);
+ /* find bus device with the busid that matches match_busid */
+ sprintf(busid, "vbus%d", addparams->bus_no);
+ vbus = bus_find_device(&virtpci_bus_type, NULL,
+ (void *)busid, match_busid);
+ if (!vbus) {
+ LOGERR("**** FAILED to find vbus %s\n", busid);
+ return 0;
+ }
LOGINF("Adding vhba wwnn:%x:%x config:%d-%d-%d-%d chanptr:%p\n",
scsi.wwnn.wwnn1, scsi.wwnn.wwnn2,
@@ -347,7 +344,6 @@ static int add_vhba(struct add_virt_guestpart *addparams)
POSTCODE_SEVERITY_INFO);
}
return i;
-
}
/* for CHANSOCK macaddr is AUTO-GENERATED; for normal channels,
@@ -355,17 +351,17 @@ static int add_vhba(struct add_virt_guestpart *addparams)
*/
#define GET_NETADAPINFO_FROM_CHANPTR(chanptr) { \
memcpy_fromio(net.mac_addr, \
- ((ULTRA_IO_CHANNEL_PROTOCOL __iomem *) \
- chanptr)->vnic.macaddr, \
+ ((struct spar_io_channel_protocol __iomem *) \
+ chanptr)->vnic.macaddr, \
MAX_MACADDR_LEN); \
net.num_rcv_bufs = \
- readl(&((ULTRA_IO_CHANNEL_PROTOCOL __iomem *) \
- chanptr)->vnic.num_rcv_bufs); \
- net.mtu = readl(&((ULTRA_IO_CHANNEL_PROTOCOL __iomem *) \
- chanptr)->vnic.mtu); \
- memcpy_fromio(&net.zoneGuid, \
- &((ULTRA_IO_CHANNEL_PROTOCOL __iomem *) \
- chanptr)->vnic.zoneGuid, \
+ readl(&((struct spar_io_channel_protocol __iomem *)\
+ chanptr)->vnic.num_rcv_bufs); \
+ net.mtu = readl(&((struct spar_io_channel_protocol __iomem *) \
+ chanptr)->vnic.mtu); \
+ memcpy_fromio(&net.zone_uuid, \
+ &((struct spar_io_channel_protocol __iomem *)\
+ chanptr)->vnic.zone_uuid, \
sizeof(uuid_le)); \
}
@@ -382,7 +378,7 @@ add_vnic(struct add_virt_guestpart *addparams)
POSTCODE_LINUX_2(VPCI_CREATE_ENTRY_PC, POSTCODE_SEVERITY_INFO);
if (!WAIT_FOR_IO_CHANNEL
- ((ULTRA_IO_CHANNEL_PROTOCOL __iomem *) addparams->chanptr)) {
+ ((struct spar_io_channel_protocol __iomem *)addparams->chanptr)) {
LOGERR("Timed out, channel not ready\n");
POSTCODE_LINUX_2(VPCI_CREATE_FAILURE_PC, POSTCODE_SEVERITY_ERR);
return 0;
@@ -390,12 +386,19 @@ add_vnic(struct add_virt_guestpart *addparams)
GET_NETADAPINFO_FROM_CHANPTR(addparams->chanptr);
- GET_BUS_DEV(addparams->bus_no);
+ /* find bus device with the busid that matches match_busid */
+ sprintf(busid, "vbus%d", addparams->bus_no);
+ vbus = bus_find_device(&virtpci_bus_type, NULL,
+ (void *)busid, match_busid);
+ if (!vbus) {
+ LOGERR("**** FAILED to find vbus %s\n", busid);
+ return 0;
+ }
LOGINF("Adding vnic macaddr:%02x:%02x:%02x:%02x:%02x:%02x rcvbufs:%d mtu:%d chanptr:%p%pUL\n",
- net.mac_addr[0], net.mac_addr[1], net.mac_addr[2], net.mac_addr[3],
- net.mac_addr[4], net.mac_addr[5], net.num_rcv_bufs, net.mtu,
- addparams->chanptr, &net.zoneGuid);
+ net.mac_addr[0], net.mac_addr[1], net.mac_addr[2],
+ net.mac_addr[3], net.mac_addr[4], net.mac_addr[5],
+ net.num_rcv_bufs, net.mtu, addparams->chanptr, &net.zone_uuid);
i = virtpci_device_add(vbus, VIRTNIC_TYPE, addparams, NULL, &net);
if (i) {
LOGINF("Added vnic macaddr:%02x:%02x:%02x:%02x:%02x:%02x\n",
@@ -417,7 +420,15 @@ delete_vbus(struct del_vbus_guestpart *delparams)
struct device *vbus;
unsigned char busid[BUS_ID_SIZE];
- GET_BUS_DEV(delparams->bus_no);
+ /* find bus device with the busid that matches match_busid */
+ sprintf(busid, "vbus%d", delparams->bus_no);
+ vbus = bus_find_device(&virtpci_bus_type, NULL,
+ (void *)busid, match_busid);
+ if (!vbus) {
+ LOGERR("**** FAILED to find vbus %s\n", busid);
+ return 0;
+ }
+
/* ensure that bus has no devices? -- TBD */
LOGINF("Deleting %s\n", BUS_ID(vbus));
if (delete_vbus_device(vbus, NULL))
@@ -430,9 +441,9 @@ static int
delete_vbus_device(struct device *vbus, void *data)
{
int checkforroot = (data != NULL);
- struct device *pDev = &virtpci_rootbus_device;
+ struct device *dev = &virtpci_rootbus_device;
- if ((checkforroot) && match_busid(vbus, (void *) BUS_ID(pDev))) {
+ if ((checkforroot) && match_busid(vbus, (void *)BUS_ID(dev))) {
/* skip it - don't delete root bus */
LOGINF("skipping root bus\n");
return 0; /* pretend no error */
@@ -590,10 +601,10 @@ static void delete_all(void)
struct virtpci_dev *tmpvpcidev, *nextvpcidev;
/* delete the entire vhba/vnic list in one shot */
- write_lock_irqsave(&VpcidevListLock, flags);
- tmpvpcidev = VpcidevListHead;
- VpcidevListHead = NULL;
- write_unlock_irqrestore(&VpcidevListLock, flags);
+ write_lock_irqsave(&vpcidev_list_lock, flags);
+ tmpvpcidev = vpcidev_list_head;
+ vpcidev_list_head = NULL;
+ write_unlock_irqrestore(&vpcidev_list_lock, flags);
/* delete one vhba/vnic at a time */
while (tmpvpcidev) {
@@ -607,24 +618,32 @@ static void delete_all(void)
/* now delete each vbus */
if (bus_for_each_dev
- (&virtpci_bus_type, NULL, (void *) 1, delete_vbus_device))
+ (&virtpci_bus_type, NULL, (void *)1, delete_vbus_device))
LOGERR("delete of all vbus failed\n");
}
/* deletes all vnics or vhbas
* returns 0 failure, 1 success,
*/
-static int delete_all_virt(VIRTPCI_DEV_TYPE devtype, struct del_vbus_guestpart *delparams)
+static int delete_all_virt(enum virtpci_dev_type devtype,
+ struct del_vbus_guestpart *delparams)
{
int i;
unsigned char busid[BUS_ID_SIZE];
struct device *vbus;
- GET_BUS_DEV(delparams->bus_no);
+ /* find bus device with the busid that matches match_busid */
+ sprintf(busid, "vbus%d", delparams->bus_no);
+ vbus = bus_find_device(&virtpci_bus_type, NULL,
+ (void *)busid, match_busid);
+ if (!vbus) {
+ LOGERR("**** FAILED to find vbus %s\n", busid);
+ return 0;
+ }
if ((devtype != VIRTHBA_TYPE) && (devtype != VIRTNIC_TYPE)) {
LOGERR("**** FAILED to delete all devices; devtype:%d not vhba:%d or vnic:%d\n",
- devtype, VIRTHBA_TYPE, VIRTNIC_TYPE);
+ devtype, VIRTHBA_TYPE, VIRTNIC_TYPE);
return 0;
}
@@ -694,10 +713,10 @@ virtpci_match_device(const struct pci_device_id *ids,
{
while (ids->vendor || ids->subvendor || ids->class_mask) {
DBGINF("ids->vendor:%x dev->vendor:%x ids->device:%x dev->device:%x\n",
- ids->vendor, dev->vendor, ids->device, dev->device);
+ ids->vendor, dev->vendor, ids->device, dev->device);
- if ((ids->vendor == dev->vendor)
- && (ids->device == dev->device))
+ if ((ids->vendor == dev->vendor) &&
+ (ids->device == dev->device))
return ids;
ids++;
@@ -752,15 +771,15 @@ static int virtpci_device_resume(struct device *dev)
/* For a child device just created on a client bus, fill in
* information about the driver that is controlling this device into
- * the the appropriate slot within the vbus channel of the bus
+ * the appropriate slot within the vbus channel of the bus
* instance.
*/
-static void fix_vbus_devInfo(struct device *dev, int devNo, int devType,
- struct virtpci_driver *virtpcidrv)
+static void fix_vbus_dev_info(struct device *dev, int dev_no, int dev_type,
+ struct virtpci_driver *virtpcidrv)
{
struct device *vbus;
- void *pChan;
- ULTRA_VBUS_DEVICEINFO devInfo;
+ void *chan;
+ struct ultra_vbus_deviceinfo dev_info;
const char *stype;
if (!dev) {
@@ -776,12 +795,12 @@ static void fix_vbus_devInfo(struct device *dev, int devNo, int devType,
LOGERR("%s dev has no parent bus", __func__);
return;
}
- pChan = vbus->platform_data;
- if (!pChan) {
+ chan = vbus->platform_data;
+ if (!chan) {
LOGERR("%s dev bus has no channel", __func__);
return;
}
- switch (devType) {
+ switch (dev_type) {
case PCI_DEVICE_ID_VIRTHBA:
stype = "vHBA";
break;
@@ -792,17 +811,17 @@ static void fix_vbus_devInfo(struct device *dev, int devNo, int devType,
stype = "unknown";
break;
}
- bus_device_info_init(&devInfo, stype,
- virtpcidrv->name,
- virtpcidrv->version,
- virtpcidrv->vertag);
- write_vbus_devInfo(pChan, &devInfo, devNo);
+ bus_device_info_init(&dev_info, stype,
+ virtpcidrv->name,
+ virtpcidrv->version,
+ virtpcidrv->vertag);
+ write_vbus_dev_info(chan, &dev_info, dev_no);
/* Re-write bus+chipset info, because it is possible that this
* was previously written by our good counterpart, visorbus.
*/
- write_vbus_chpInfo(pChan, &Chipset_DriverInfo);
- write_vbus_busInfo(pChan, &Bus_DriverInfo);
+ write_vbus_chp_info(chan, &chipset_driver_info);
+ write_vbus_bus_info(chan, &bus_driver_info);
}
/* This function is called to query the existence of a specific device
@@ -842,13 +861,14 @@ static int virtpci_device_probe(struct device *dev)
*/
error = virtpcidrv->probe(virtpcidev, id);
if (!error) {
- fix_vbus_devInfo(dev, virtpcidev->deviceNo,
- virtpcidev->device, virtpcidrv);
+ fix_vbus_dev_info(dev, virtpcidev->device_no,
+ virtpcidev->device, virtpcidrv);
virtpcidev->mydriver = virtpcidrv;
POSTCODE_LINUX_2(VPCI_PROBE_EXIT_PC,
POSTCODE_SEVERITY_INFO);
- } else
+ } else {
put_device(dev);
+ }
}
POSTCODE_LINUX_2(VPCI_PROBE_FAILURE_PC, POSTCODE_SEVERITY_ERR);
return error; /* -ENODEV for probe failure */
@@ -896,17 +916,20 @@ static void virtpci_bus_release(struct device *dev)
/* Adapter functions */
/*****************************************************/
+/* scsi is expected to be NULL for VNIC add
+ * net is expected to be NULL for VHBA add
+ */
static int virtpci_device_add(struct device *parentbus, int devtype,
struct add_virt_guestpart *addparams,
- struct scsi_adap_info *scsi, /* NULL for VNIC add */
- struct net_adap_info *net /* NULL for VHBA add */)
+ struct scsi_adap_info *scsi,
+ struct net_adap_info *net)
{
struct virtpci_dev *virtpcidev = NULL;
struct virtpci_dev *tmpvpcidev = NULL, *prev;
unsigned long flags;
int ret;
- ULTRA_IO_CHANNEL_PROTOCOL __iomem *pIoChan = NULL;
- struct device *pDev;
+ struct spar_io_channel_protocol __iomem *io_chan = NULL;
+ struct device *dev;
LOGINF("virtpci_device_add parentbus:%p chanptr:%p\n", parentbus,
addparams->chanptr);
@@ -915,14 +938,14 @@ static int virtpci_device_add(struct device *parentbus, int devtype,
if ((devtype != VIRTHBA_TYPE) && (devtype != VIRTNIC_TYPE)) {
LOGERR("**** FAILED to add device; devtype:%d not vhba:%d or vnic:%d\n",
- devtype, VIRTHBA_TYPE, VIRTNIC_TYPE);
+ devtype, VIRTHBA_TYPE, VIRTNIC_TYPE);
POSTCODE_LINUX_3(VPCI_CREATE_FAILURE_PC, devtype,
POSTCODE_SEVERITY_ERR);
return 0;
}
/* add a Virtual Device */
- virtpcidev = kzalloc(sizeof(struct virtpci_dev), GFP_ATOMIC);
+ virtpcidev = kzalloc(sizeof(*virtpcidev), GFP_ATOMIC);
if (virtpcidev == NULL) {
LOGERR("can't add device - malloc FALLED\n");
POSTCODE_LINUX_2(MALLOC_FAILURE_PC, POSTCODE_SEVERITY_ERR);
@@ -939,14 +962,14 @@ static int virtpci_device_add(struct device *parentbus, int devtype,
virtpcidev->net = *net;
}
virtpcidev->vendor = PCI_VENDOR_ID_UNISYS;
- virtpcidev->busNo = addparams->bus_no;
- virtpcidev->deviceNo = addparams->device_no;
+ virtpcidev->bus_no = addparams->bus_no;
+ virtpcidev->device_no = addparams->device_no;
virtpcidev->queueinfo.chan = addparams->chanptr;
virtpcidev->queueinfo.send_int_if_needed = NULL;
/* Set up safe queue... */
- pIoChan = (ULTRA_IO_CHANNEL_PROTOCOL __iomem *)
+ io_chan = (struct spar_io_channel_protocol __iomem *)
virtpcidev->queueinfo.chan;
virtpcidev->intr = addparams->intr;
@@ -962,8 +985,8 @@ static int virtpci_device_add(struct device *parentbus, int devtype,
/* add the vhba/vnic to virtpci device list - but check for
* duplicate wwnn/macaddr first
*/
- write_lock_irqsave(&VpcidevListLock, flags);
- for (tmpvpcidev = VpcidevListHead; tmpvpcidev;
+ write_lock_irqsave(&vpcidev_list_lock, flags);
+ for (tmpvpcidev = vpcidev_list_head; tmpvpcidev;
tmpvpcidev = tmpvpcidev->next) {
if (devtype == VIRTHBA_TYPE) {
if ((tmpvpcidev->scsi.wwnn.wwnn1 == scsi->wwnn.wwnn1) &&
@@ -984,7 +1007,7 @@ static int virtpci_device_add(struct device *parentbus, int devtype,
/* found a vhba/vnic already in the list with same
* wwnn or macaddr - reject add
*/
- write_unlock_irqrestore(&VpcidevListLock, flags);
+ write_unlock_irqrestore(&vpcidev_list_lock, flags);
kfree(virtpcidev);
LOGERR("**** FAILED vhba/vnic already exists in the list\n");
POSTCODE_LINUX_2(VPCI_CREATE_FAILURE_PC, POSTCODE_SEVERITY_ERR);
@@ -992,26 +1015,26 @@ static int virtpci_device_add(struct device *parentbus, int devtype,
}
/* add it at the head */
- if (!VpcidevListHead)
- VpcidevListHead = virtpcidev;
- else {
+ if (!vpcidev_list_head) {
+ vpcidev_list_head = virtpcidev;
+ } else {
/* insert virtpcidev at the head of our linked list of
* vpcidevs
*/
- virtpcidev->next = VpcidevListHead;
- VpcidevListHead = virtpcidev;
+ virtpcidev->next = vpcidev_list_head;
+ vpcidev_list_head = virtpcidev;
}
- write_unlock_irqrestore(&VpcidevListLock, flags);
+ write_unlock_irqrestore(&vpcidev_list_lock, flags);
/* Must transition channel to ATTACHED state BEFORE
* registering the device, because polling of the channel
* queues can begin at any time after device_register().
*/
- pDev = &virtpcidev->generic_dev;
- ULTRA_CHANNEL_CLIENT_TRANSITION(addparams->chanptr,
- BUS_ID(pDev),
- CHANNELCLI_ATTACHED, NULL);
+ dev = &virtpcidev->generic_dev;
+ SPAR_CHANNEL_CLIENT_TRANSITION(addparams->chanptr,
+ BUS_ID(dev),
+ CHANNELCLI_ATTACHED, NULL);
/* don't register until device has been added to
* list. Otherwise, a device_unregister from this function can
@@ -1031,24 +1054,24 @@ static int virtpci_device_add(struct device *parentbus, int devtype,
*/
if (ret) {
LOGERR("device_register returned %d\n", ret);
- pDev = &virtpcidev->generic_dev;
- ULTRA_CHANNEL_CLIENT_TRANSITION(addparams->chanptr,
- BUS_ID(pDev),
- CHANNELCLI_DETACHED, NULL);
+ dev = &virtpcidev->generic_dev;
+ SPAR_CHANNEL_CLIENT_TRANSITION(addparams->chanptr,
+ BUS_ID(dev),
+ CHANNELCLI_DETACHED, NULL);
/* remove virtpcidev, the one we just added, from the list */
- write_lock_irqsave(&VpcidevListLock, flags);
- for (tmpvpcidev = VpcidevListHead, prev = NULL;
+ write_lock_irqsave(&vpcidev_list_lock, flags);
+ for (tmpvpcidev = vpcidev_list_head, prev = NULL;
tmpvpcidev;
prev = tmpvpcidev, tmpvpcidev = tmpvpcidev->next) {
if (tmpvpcidev == virtpcidev) {
if (prev)
prev->next = tmpvpcidev->next;
else
- VpcidevListHead = tmpvpcidev->next;
+ vpcidev_list_head = tmpvpcidev->next;
break;
}
}
- write_unlock_irqrestore(&VpcidevListLock, flags);
+ write_unlock_irqrestore(&vpcidev_list_lock, flags);
kfree(virtpcidev);
return 0;
}
@@ -1080,9 +1103,9 @@ static int virtpci_device_serverdown(struct device *parentbus,
}
/* find the vhba or vnic in virtpci device list */
- write_lock_irqsave(&VpcidevListLock, flags);
+ write_lock_irqsave(&vpcidev_list_lock, flags);
- for (tmpvpcidev = VpcidevListHead, prevvpcidev = NULL;
+ for (tmpvpcidev = vpcidev_list_head, prevvpcidev = NULL;
(tmpvpcidev && !found);
prevvpcidev = tmpvpcidev, tmpvpcidev = tmpvpcidev->next) {
if (tmpvpcidev->devtype != devtype)
@@ -1110,7 +1133,7 @@ static int virtpci_device_serverdown(struct device *parentbus,
vpcidriver = tmpvpcidev->mydriver;
rc = vpcidriver->suspend(tmpvpcidev, 0);
}
- write_unlock_irqrestore(&VpcidevListLock, flags);
+ write_unlock_irqrestore(&vpcidev_list_lock, flags);
if (!found) {
LOGERR("**** FAILED to find vhba/vnic in the list\n");
@@ -1139,9 +1162,9 @@ static int virtpci_device_serverup(struct device *parentbus,
}
/* find the vhba or vnic in virtpci device list */
- write_lock_irqsave(&VpcidevListLock, flags);
+ write_lock_irqsave(&vpcidev_list_lock, flags);
- for (tmpvpcidev = VpcidevListHead, prevvpcidev = NULL;
+ for (tmpvpcidev = vpcidev_list_head, prevvpcidev = NULL;
(tmpvpcidev && !found);
prevvpcidev = tmpvpcidev, tmpvpcidev = tmpvpcidev->next) {
if (tmpvpcidev->devtype != devtype)
@@ -1172,12 +1195,13 @@ static int virtpci_device_serverup(struct device *parentbus,
* ever have a bus that contains NO devices, since we
* would never even get here in that case.
*/
- fix_vbus_devInfo(&tmpvpcidev->generic_dev, tmpvpcidev->deviceNo,
- tmpvpcidev->device, vpcidriver);
+ fix_vbus_dev_info(&tmpvpcidev->generic_dev,
+ tmpvpcidev->device_no,
+ tmpvpcidev->device, vpcidriver);
rc = vpcidriver->resume(tmpvpcidev);
}
- write_unlock_irqrestore(&VpcidevListLock, flags);
+ write_unlock_irqrestore(&vpcidev_list_lock, flags);
if (!found) {
LOGERR("**** FAILED to find vhba/vnic in the list\n");
@@ -1218,8 +1242,8 @@ static int virtpci_device_del(struct device *parentbus,
* device_unregister after we release the lock; otherwise we
* encounter "schedule while atomic"
*/
- write_lock_irqsave(&VpcidevListLock, flags);
- for (tmpvpcidev = VpcidevListHead, prevvpcidev = NULL; tmpvpcidev;) {
+ write_lock_irqsave(&vpcidev_list_lock, flags);
+ for (tmpvpcidev = vpcidev_list_head, prevvpcidev = NULL; tmpvpcidev;) {
if (tmpvpcidev->devtype != devtype)
DEL_CONTINUE;
@@ -1253,7 +1277,7 @@ static int virtpci_device_del(struct device *parentbus,
/* not at head */
prevvpcidev->next = tmpvpcidev->next;
else
- VpcidevListHead = tmpvpcidev->next;
+ vpcidev_list_head = tmpvpcidev->next;
/* add it to our deletelist */
tmpvpcidev->next = dellist;
@@ -1268,9 +1292,9 @@ static int virtpci_device_del(struct device *parentbus,
if (prevvpcidev)
tmpvpcidev = prevvpcidev->next;
else
- tmpvpcidev = VpcidevListHead;
+ tmpvpcidev = vpcidev_list_head;
}
- write_unlock_irqrestore(&VpcidevListLock, flags);
+ write_unlock_irqrestore(&vpcidev_list_lock, flags);
if (!all && (count == 0)) {
LOGERR("**** FAILED to find vhba/vnic in the list\n");
@@ -1425,7 +1449,7 @@ static int print_vbus(struct device *vbus, void *data)
}
static ssize_t info_debugfs_read(struct file *file, char __user *buf,
- size_t len, loff_t *offset)
+ size_t len, loff_t *offset)
{
ssize_t bytes_read = 0;
int str_pos = 0;
@@ -1446,18 +1470,19 @@ static ssize_t info_debugfs_read(struct file *file, char __user *buf,
printparam.buf = vbuf;
printparam.len = &len;
if (bus_for_each_dev(&virtpci_bus_type, NULL,
- (void *) &printparam, print_vbus))
+ (void *)&printparam, print_vbus))
LOGERR("Failed to find bus\n");
str_pos += scnprintf(vbuf + str_pos, len - str_pos,
"\n Virtual PCI devices\n");
- read_lock_irqsave(&VpcidevListLock, flags);
- tmpvpcidev = VpcidevListHead;
+ read_lock_irqsave(&vpcidev_list_lock, flags);
+ tmpvpcidev = vpcidev_list_head;
while (tmpvpcidev) {
if (tmpvpcidev->devtype == VIRTHBA_TYPE) {
str_pos += scnprintf(vbuf + str_pos, len - str_pos,
"[%d:%d] VHba:%08x:%08x max-config:%d-%d-%d-%d",
- tmpvpcidev->busNo, tmpvpcidev->deviceNo,
+ tmpvpcidev->bus_no,
+ tmpvpcidev->device_no,
tmpvpcidev->scsi.wwnn.wwnn1,
tmpvpcidev->scsi.wwnn.wwnn2,
tmpvpcidev->scsi.max.max_channel,
@@ -1467,7 +1492,8 @@ static ssize_t info_debugfs_read(struct file *file, char __user *buf,
} else {
str_pos += scnprintf(vbuf + str_pos, len - str_pos,
"[%d:%d] VNic:%02x:%02x:%02x:%02x:%02x:%02x num_rcv_bufs:%d mtu:%d",
- tmpvpcidev->busNo, tmpvpcidev->deviceNo,
+ tmpvpcidev->bus_no,
+ tmpvpcidev->device_no,
tmpvpcidev->net.mac_addr[0],
tmpvpcidev->net.mac_addr[1],
tmpvpcidev->net.mac_addr[2],
@@ -1482,7 +1508,7 @@ static ssize_t info_debugfs_read(struct file *file, char __user *buf,
tmpvpcidev->queueinfo.chan);
tmpvpcidev = tmpvpcidev->next;
}
- read_unlock_irqrestore(&VpcidevListLock, flags);
+ read_unlock_irqrestore(&vpcidev_list_lock, flags);
str_pos += scnprintf(vbuf + str_pos, len - str_pos, "\n");
bytes_read = simple_read_from_buffer(buf, len, offset, vbuf, str_pos);
@@ -1498,7 +1524,6 @@ static int __init virtpci_mod_init(void)
{
int ret;
-
if (!unisys_spar_platform)
return -ENODEV;
@@ -1515,8 +1540,8 @@ static int __init virtpci_mod_init(void)
return ret;
}
DBGINF("bus_register successful\n");
- bus_device_info_init(&Bus_DriverInfo, "clientbus", "virtpci",
- VERSION, NULL);
+ bus_device_info_init(&bus_driver_info, "clientbus", "virtpci",
+ VERSION, NULL);
/* create a root bus used to parent all the virtpci buses. */
ret = device_register(&virtpci_rootbus_device);
@@ -1529,8 +1554,8 @@ static int __init virtpci_mod_init(void)
}
DBGINF("device_register successful ret:%x\n", ret);
- if (!uisctrl_register_req_handler(2, (void *) &virtpci_ctrlchan_func,
- &Chipset_DriverInfo)) {
+ if (!uisctrl_register_req_handler(2, (void *)&virtpci_ctrlchan_func,
+ &chipset_driver_info)) {
LOGERR("uisctrl_register_req_handler ****FAILED.\n");
POSTCODE_LINUX_2(VPCI_CREATE_FAILURE_PC, POSTCODE_SEVERITY_ERR);
device_unregister(&virtpci_rootbus_device);
@@ -1539,11 +1564,11 @@ static int __init virtpci_mod_init(void)
}
LOGINF("successfully registered virtpci_ctrlchan_func (0x%p) as callback.\n",
- (void *) &virtpci_ctrlchan_func);
+ (void *)&virtpci_ctrlchan_func);
/* create debugfs directory and info file inside. */
virtpci_debugfs_dir = debugfs_create_dir("virtpci", NULL);
debugfs_create_file("info", S_IRUSR, virtpci_debugfs_dir,
- NULL, &debugfs_info_fops);
+ NULL, &debugfs_info_fops);
LOGINF("Leaving\n");
POSTCODE_LINUX_2(VPCI_CREATE_EXIT_PC, POSTCODE_SEVERITY_INFO);
return 0;
@@ -1561,7 +1586,6 @@ static void __exit virtpci_mod_exit(void)
bus_unregister(&virtpci_bus_type);
debugfs_remove_recursive(virtpci_debugfs_dir);
LOGINF("Leaving\n");
-
}
module_init(virtpci_mod_init);
diff --git a/drivers/staging/unisys/virtpci/virtpci.h b/drivers/staging/unisys/virtpci/virtpci.h
index 6e26956c79f..9d85f55e816 100644
--- a/drivers/staging/unisys/virtpci/virtpci.h
+++ b/drivers/staging/unisys/virtpci/virtpci.h
@@ -42,25 +42,25 @@ struct net_adap_info {
u8 mac_addr[MAX_MACADDR_LEN];
int num_rcv_bufs;
unsigned mtu;
- uuid_le zoneGuid;
+ uuid_le zone_uuid;
};
-typedef enum {
+enum virtpci_dev_type {
VIRTHBA_TYPE = 0,
VIRTNIC_TYPE = 1,
VIRTBUS_TYPE = 6,
-} VIRTPCI_DEV_TYPE;
+};
struct virtpci_dev {
- VIRTPCI_DEV_TYPE devtype; /* indicates type of the
+ enum virtpci_dev_type devtype; /* indicates type of the
* virtual pci device */
struct virtpci_driver *mydriver; /* which driver has allocated
* this device */
unsigned short vendor; /* vendor id for device */
unsigned short device; /* device id for device */
- u32 busNo; /* number of bus on which device exists */
- u32 deviceNo; /* device's number on the bus */
- struct InterruptInfo intr; /* interrupt info */
+ u32 bus_no; /* number of bus on which device exists */
+ u32 device_no; /* device's number on the bus */
+ struct irq_info intr; /* interrupt info */
struct device generic_dev; /* generic device */
union {
struct scsi_adap_info scsi;
@@ -80,15 +80,15 @@ struct virtpci_driver {
const struct pci_device_id *id_table; /* must be non-NULL for probe
* to be called */
int (*probe)(struct virtpci_dev *dev,
- const struct pci_device_id *id); /* device inserted */
+ const struct pci_device_id *id); /* device inserted */
void (*remove)(struct virtpci_dev *dev); /* Device removed (NULL if
* not a hot-plug capable
* driver) */
int (*suspend)(struct virtpci_dev *dev,
- u32 state); /* Device suspended */
+ u32 state); /* Device suspended */
int (*resume)(struct virtpci_dev *dev); /* Device woken up */
int (*enable_wake)(struct virtpci_dev *dev,
- u32 state, int enable); /* Enable wake event */
+ u32 state, int enable); /* Enable wake event */
struct device_driver core_driver; /* VIRTPCI core fills this in */
};
diff --git a/drivers/staging/unisys/visorchannel/globals.h b/drivers/staging/unisys/visorchannel/globals.h
index 07653b8dea7..581ed83fe6d 100644
--- a/drivers/staging/unisys/visorchannel/globals.h
+++ b/drivers/staging/unisys/visorchannel/globals.h
@@ -25,5 +25,4 @@
#define MYDRVNAME "visorchannel"
-
#endif
diff --git a/drivers/staging/unisys/visorchannel/visorchannel.h b/drivers/staging/unisys/visorchannel/visorchannel.h
index 9a4d7b6755d..5061edff959 100644
--- a/drivers/staging/unisys/visorchannel/visorchannel.h
+++ b/drivers/staging/unisys/visorchannel/visorchannel.h
@@ -66,7 +66,7 @@ char *visorchannel_id(VISORCHANNEL *channel, char *s);
char *visorchannel_zoneid(VISORCHANNEL *channel, char *s);
u64 visorchannel_get_clientpartition(VISORCHANNEL *channel);
uuid_le visorchannel_get_uuid(VISORCHANNEL *channel);
-MEMREGION *visorchannel_get_memregion(VISORCHANNEL *channel);
+struct memregion *visorchannel_get_memregion(VISORCHANNEL *channel);
char *visorchannel_uuid_id(uuid_le *guid, char *s);
void visorchannel_debug(VISORCHANNEL *channel, int nQueues,
struct seq_file *seq, u32 off);
diff --git a/drivers/staging/unisys/visorchannel/visorchannel_funcs.c b/drivers/staging/unisys/visorchannel/visorchannel_funcs.c
index 01a44c55350..36559d5fa67 100644
--- a/drivers/staging/unisys/visorchannel/visorchannel_funcs.c
+++ b/drivers/staging/unisys/visorchannel/visorchannel_funcs.c
@@ -29,8 +29,8 @@
#define MYDRVNAME "visorchannel"
struct VISORCHANNEL_Tag {
- MEMREGION *memregion; /* from visor_memregion_create() */
- CHANNEL_HEADER chan_hdr;
+ struct memregion *memregion; /* from visor_memregion_create() */
+ struct channel_header chan_hdr;
uuid_le guid;
ulong size;
BOOL needs_lock;
@@ -38,10 +38,10 @@ struct VISORCHANNEL_Tag {
spinlock_t remove_lock;
struct {
- SIGNAL_QUEUE_HEADER req_queue;
- SIGNAL_QUEUE_HEADER rsp_queue;
- SIGNAL_QUEUE_HEADER event_queue;
- SIGNAL_QUEUE_HEADER ack_queue;
+ struct signal_queue_header req_queue;
+ struct signal_queue_header rsp_queue;
+ struct signal_queue_header event_queue;
+ struct signal_queue_header ack_queue;
} safe_uis_queue;
};
@@ -60,7 +60,7 @@ visorchannel_create_guts(HOSTADDRESS physaddr, ulong channelBytes,
if (p == NULL) {
ERRDRV("allocation failed: (status=0)\n");
rc = NULL;
- goto Away;
+ goto cleanup;
}
p->memregion = NULL;
p->needs_lock = needs_lock;
@@ -70,39 +70,39 @@ visorchannel_create_guts(HOSTADDRESS physaddr, ulong channelBytes,
/* prepare chan_hdr (abstraction to read/write channel memory) */
if (parent == NULL)
p->memregion =
- visor_memregion_create(physaddr, sizeof(CHANNEL_HEADER));
+ visor_memregion_create(physaddr,
+ sizeof(struct channel_header));
else
p->memregion =
visor_memregion_create_overlapped(parent->memregion,
- off,
- sizeof(CHANNEL_HEADER));
+ off, sizeof(struct channel_header));
if (p->memregion == NULL) {
ERRDRV("visor_memregion_create failed failed: (status=0)\n");
rc = NULL;
- goto Away;
+ goto cleanup;
}
if (visor_memregion_read(p->memregion, 0, &p->chan_hdr,
- sizeof(CHANNEL_HEADER)) < 0) {
+ sizeof(struct channel_header)) < 0) {
ERRDRV("visor_memregion_read failed: (status=0)\n");
rc = NULL;
- goto Away;
+ goto cleanup;
}
if (channelBytes == 0)
/* we had better be a CLIENT of this channel */
- channelBytes = (ulong) p->chan_hdr.Size;
+ channelBytes = (ulong)p->chan_hdr.size;
if (uuid_le_cmp(guid, NULL_UUID_LE) == 0)
/* we had better be a CLIENT of this channel */
- guid = p->chan_hdr.Type;
+ guid = p->chan_hdr.chtype;
if (visor_memregion_resize(p->memregion, channelBytes) < 0) {
ERRDRV("visor_memregion_resize failed: (status=0)\n");
rc = NULL;
- goto Away;
+ goto cleanup;
}
p->size = channelBytes;
p->guid = guid;
rc = p;
-Away:
+cleanup:
if (rc == NULL) {
if (p != NULL) {
@@ -194,14 +194,14 @@ EXPORT_SYMBOL_GPL(visorchannel_id);
char *
visorchannel_zoneid(VISORCHANNEL *channel, char *s)
{
- return visorchannel_uuid_id(&channel->chan_hdr.ZoneGuid, s);
+ return visorchannel_uuid_id(&channel->chan_hdr.zone_uuid, s);
}
EXPORT_SYMBOL_GPL(visorchannel_zoneid);
HOSTADDRESS
visorchannel_get_clientpartition(VISORCHANNEL *channel)
{
- return channel->chan_hdr.PartitionHandle;
+ return channel->chan_hdr.partition_handle;
}
EXPORT_SYMBOL_GPL(visorchannel_get_clientpartition);
@@ -212,7 +212,7 @@ visorchannel_get_uuid(VISORCHANNEL *channel)
}
EXPORT_SYMBOL_GPL(visorchannel_get_uuid);
-MEMREGION *
+struct memregion *
visorchannel_get_memregion(VISORCHANNEL *channel)
{
return channel->memregion;
@@ -225,8 +225,11 @@ visorchannel_read(VISORCHANNEL *channel, ulong offset,
{
int rc = visor_memregion_read(channel->memregion, offset,
local, nbytes);
- if ((rc >= 0) && (offset == 0) && (nbytes >= sizeof(CHANNEL_HEADER)))
- memcpy(&channel->chan_hdr, local, sizeof(CHANNEL_HEADER));
+ if ((rc >= 0) && (offset == 0) &&
+ (nbytes >= sizeof(struct channel_header))) {
+ memcpy(&channel->chan_hdr, local,
+ sizeof(struct channel_header));
+ }
return rc;
}
EXPORT_SYMBOL_GPL(visorchannel_read);
@@ -235,8 +238,9 @@ int
visorchannel_write(VISORCHANNEL *channel, ulong offset,
void *local, ulong nbytes)
{
- if (offset == 0 && nbytes >= sizeof(CHANNEL_HEADER))
- memcpy(&channel->chan_hdr, local, sizeof(CHANNEL_HEADER));
+ if (offset == 0 && nbytes >= sizeof(struct channel_header))
+ memcpy(&channel->chan_hdr, local,
+ sizeof(struct channel_header));
return visor_memregion_write(channel->memregion, offset, local, nbytes);
}
EXPORT_SYMBOL_GPL(visorchannel_write);
@@ -251,7 +255,7 @@ visorchannel_clear(VISORCHANNEL *channel, ulong offset, u8 ch, ulong nbytes)
if (buf == NULL) {
ERRDRV("%s failed memory allocation", __func__);
- goto Away;
+ goto cleanup;
}
memset(buf, ch, bufsize);
while (nbytes > 0) {
@@ -264,14 +268,14 @@ visorchannel_clear(VISORCHANNEL *channel, ulong offset, u8 ch, ulong nbytes)
buf, thisbytes);
if (x < 0) {
rc = x;
- goto Away;
+ goto cleanup;
}
written += thisbytes;
nbytes -= thisbytes;
}
rc = 0;
-Away:
+cleanup:
if (buf != NULL) {
vfree(buf);
buf = NULL;
@@ -283,7 +287,7 @@ EXPORT_SYMBOL_GPL(visorchannel_clear);
void __iomem *
visorchannel_get_header(VISORCHANNEL *channel)
{
- return (void __iomem *) &(channel->chan_hdr);
+ return (void __iomem *)&channel->chan_hdr;
}
EXPORT_SYMBOL_GPL(visorchannel_get_header);
@@ -291,14 +295,15 @@ EXPORT_SYMBOL_GPL(visorchannel_get_header);
* channel header
*/
#define SIG_QUEUE_OFFSET(chan_hdr, q) \
- ((chan_hdr)->oChannelSpace + ((q) * sizeof(SIGNAL_QUEUE_HEADER)))
+ ((chan_hdr)->ch_space_offset + \
+ ((q) * sizeof(struct signal_queue_header)))
/** Return offset of a specific queue entry (data) from the beginning of a
* channel header
*/
#define SIG_DATA_OFFSET(chan_hdr, q, sig_hdr, slot) \
- (SIG_QUEUE_OFFSET(chan_hdr, q) + (sig_hdr)->oSignalBase + \
- ((slot) * (sig_hdr)->SignalSize))
+ (SIG_QUEUE_OFFSET(chan_hdr, q) + (sig_hdr)->sig_base_offset + \
+ ((slot) * (sig_hdr)->signal_size))
/** Write the contents of a specific field within a SIGNAL_QUEUE_HEADER back
* into host memory
@@ -306,39 +311,42 @@ EXPORT_SYMBOL_GPL(visorchannel_get_header);
#define SIG_WRITE_FIELD(channel, queue, sig_hdr, FIELD) \
(visor_memregion_write(channel->memregion, \
SIG_QUEUE_OFFSET(&channel->chan_hdr, queue)+ \
- offsetof(SIGNAL_QUEUE_HEADER, FIELD), \
+ offsetof(struct signal_queue_header, FIELD),\
&((sig_hdr)->FIELD), \
sizeof((sig_hdr)->FIELD)) >= 0)
static BOOL
sig_read_header(VISORCHANNEL *channel, u32 queue,
- SIGNAL_QUEUE_HEADER *sig_hdr)
+ struct signal_queue_header *sig_hdr)
{
BOOL rc = FALSE;
- if (channel->chan_hdr.oChannelSpace < sizeof(CHANNEL_HEADER)) {
+ if (channel->chan_hdr.ch_space_offset < sizeof(struct channel_header)) {
ERRDRV("oChannelSpace too small: (status=%d)\n", rc);
- goto Away;
+ goto cleanup;
}
/* Read the appropriate SIGNAL_QUEUE_HEADER into local memory. */
if (visor_memregion_read(channel->memregion,
SIG_QUEUE_OFFSET(&channel->chan_hdr, queue),
- sig_hdr, sizeof(SIGNAL_QUEUE_HEADER)) < 0) {
+ sig_hdr,
+ sizeof(struct signal_queue_header)) < 0) {
ERRDRV("queue=%d SIG_QUEUE_OFFSET=%d",
queue, (int)SIG_QUEUE_OFFSET(&channel->chan_hdr, queue));
- ERRDRV("visor_memregion_read of signal queue failed: (status=%d)\n", rc);
- goto Away;
+ ERRDRV("visor_memregion_read of signal queue failed: (status=%d)\n",
+ rc);
+ goto cleanup;
}
rc = TRUE;
-Away:
+cleanup:
return rc;
}
static BOOL
sig_do_data(VISORCHANNEL *channel, u32 queue,
- SIGNAL_QUEUE_HEADER *sig_hdr, u32 slot, void *data, BOOL is_write)
+ struct signal_queue_header *sig_hdr, u32 slot, void *data,
+ BOOL is_write)
{
BOOL rc = FALSE;
int signal_data_offset = SIG_DATA_OFFSET(&channel->chan_hdr, queue,
@@ -346,53 +354,55 @@ sig_do_data(VISORCHANNEL *channel, u32 queue,
if (is_write) {
if (visor_memregion_write(channel->memregion,
signal_data_offset,
- data, sig_hdr->SignalSize) < 0) {
- ERRDRV("visor_memregion_write of signal data failed: (status=%d)\n", rc);
- goto Away;
+ data, sig_hdr->signal_size) < 0) {
+ ERRDRV("visor_memregion_write of signal data failed: (status=%d)\n",
+ rc);
+ goto cleanup;
}
} else {
if (visor_memregion_read(channel->memregion, signal_data_offset,
- data, sig_hdr->SignalSize) < 0) {
- ERRDRV("visor_memregion_read of signal data failed: (status=%d)\n", rc);
- goto Away;
+ data, sig_hdr->signal_size) < 0) {
+ ERRDRV("visor_memregion_read of signal data failed: (status=%d)\n",
+ rc);
+ goto cleanup;
}
}
rc = TRUE;
-Away:
+cleanup:
return rc;
}
static inline BOOL
sig_read_data(VISORCHANNEL *channel, u32 queue,
- SIGNAL_QUEUE_HEADER *sig_hdr, u32 slot, void *data)
+ struct signal_queue_header *sig_hdr, u32 slot, void *data)
{
return sig_do_data(channel, queue, sig_hdr, slot, data, FALSE);
}
static inline BOOL
sig_write_data(VISORCHANNEL *channel, u32 queue,
- SIGNAL_QUEUE_HEADER *sig_hdr, u32 slot, void *data)
+ struct signal_queue_header *sig_hdr, u32 slot, void *data)
{
return sig_do_data(channel, queue, sig_hdr, slot, data, TRUE);
}
static inline unsigned char
-safe_sig_queue_validate(pSIGNAL_QUEUE_HEADER psafe_sqh,
- pSIGNAL_QUEUE_HEADER punsafe_sqh,
+safe_sig_queue_validate(struct signal_queue_header *psafe_sqh,
+ struct signal_queue_header *punsafe_sqh,
u32 *phead, u32 *ptail)
{
- if ((*phead >= psafe_sqh->MaxSignalSlots)
- || (*ptail >= psafe_sqh->MaxSignalSlots)) {
+ if ((*phead >= psafe_sqh->max_slots) ||
+ (*ptail >= psafe_sqh->max_slots)) {
/* Choose 0 or max, maybe based on current tail value */
*phead = 0;
*ptail = 0;
/* Sync with client as necessary */
- punsafe_sqh->Head = *phead;
- punsafe_sqh->Tail = *ptail;
+ punsafe_sqh->head = *phead;
+ punsafe_sqh->tail = *ptail;
ERRDRV("safe_sig_queue_validate: head = 0x%x, tail = 0x%x, MaxSlots = 0x%x",
- *phead, *ptail, psafe_sqh->MaxSignalSlots);
+ *phead, *ptail, psafe_sqh->max_slots);
return 0;
}
return 1;
@@ -402,41 +412,42 @@ BOOL
visorchannel_signalremove(VISORCHANNEL *channel, u32 queue, void *msg)
{
BOOL rc = FALSE;
- SIGNAL_QUEUE_HEADER sig_hdr;
+ struct signal_queue_header sig_hdr;
if (channel->needs_lock)
spin_lock(&channel->remove_lock);
if (!sig_read_header(channel, queue, &sig_hdr)) {
rc = FALSE;
- goto Away;
+ goto cleanup;
}
- if (sig_hdr.Head == sig_hdr.Tail) {
+ if (sig_hdr.head == sig_hdr.tail) {
rc = FALSE; /* no signals to remove */
- goto Away;
+ goto cleanup;
}
- sig_hdr.Tail = (sig_hdr.Tail + 1) % sig_hdr.MaxSignalSlots;
- if (!sig_read_data(channel, queue, &sig_hdr, sig_hdr.Tail, msg)) {
+ sig_hdr.tail = (sig_hdr.tail + 1) % sig_hdr.max_slots;
+ if (!sig_read_data(channel, queue, &sig_hdr, sig_hdr.tail, msg)) {
ERRDRV("sig_read_data failed: (status=%d)\n", rc);
- goto Away;
+ goto cleanup;
}
- sig_hdr.NumSignalsReceived++;
+ sig_hdr.num_received++;
/* For each data field in SIGNAL_QUEUE_HEADER that was modified,
* update host memory.
*/
mb(); /* required for channel synch */
- if (!SIG_WRITE_FIELD(channel, queue, &sig_hdr, Tail)) {
+ if (!SIG_WRITE_FIELD(channel, queue, &sig_hdr, tail)) {
ERRDRV("visor_memregion_write of Tail failed: (status=%d)\n",
rc);
- goto Away;
+ goto cleanup;
}
- if (!SIG_WRITE_FIELD(channel, queue, &sig_hdr, NumSignalsReceived)) {
- ERRDRV("visor_memregion_write of NumSignalsReceived failed: (status=%d)\n", rc);
- goto Away;
+ if (!SIG_WRITE_FIELD(channel, queue, &sig_hdr, num_received)) {
+ ERRDRV("visor_memregion_write of NumSignalsReceived failed: (status=%d)\n",
+ rc);
+ goto cleanup;
}
rc = TRUE;
-Away:
+cleanup:
if (channel->needs_lock)
spin_unlock(&channel->remove_lock);
@@ -448,48 +459,50 @@ BOOL
visorchannel_signalinsert(VISORCHANNEL *channel, u32 queue, void *msg)
{
BOOL rc = FALSE;
- SIGNAL_QUEUE_HEADER sig_hdr;
+ struct signal_queue_header sig_hdr;
if (channel->needs_lock)
spin_lock(&channel->insert_lock);
if (!sig_read_header(channel, queue, &sig_hdr)) {
rc = FALSE;
- goto Away;
+ goto cleanup;
}
- sig_hdr.Head = ((sig_hdr.Head + 1) % sig_hdr.MaxSignalSlots);
- if (sig_hdr.Head == sig_hdr.Tail) {
- sig_hdr.NumOverflows++;
- if (!SIG_WRITE_FIELD(channel, queue, &sig_hdr, NumOverflows)) {
- ERRDRV("visor_memregion_write of NumOverflows failed: (status=%d)\n", rc);
- goto Away;
+ sig_hdr.head = ((sig_hdr.head + 1) % sig_hdr.max_slots);
+ if (sig_hdr.head == sig_hdr.tail) {
+ sig_hdr.num_overflows++;
+ if (!SIG_WRITE_FIELD(channel, queue, &sig_hdr, num_overflows)) {
+ ERRDRV("visor_memregion_write of NumOverflows failed: (status=%d)\n",
+ rc);
+ goto cleanup;
}
rc = FALSE;
- goto Away;
+ goto cleanup;
}
- if (!sig_write_data(channel, queue, &sig_hdr, sig_hdr.Head, msg)) {
+ if (!sig_write_data(channel, queue, &sig_hdr, sig_hdr.head, msg)) {
ERRDRV("sig_write_data failed: (status=%d)\n", rc);
- goto Away;
+ goto cleanup;
}
- sig_hdr.NumSignalsSent++;
+ sig_hdr.num_sent++;
/* For each data field in SIGNAL_QUEUE_HEADER that was modified,
* update host memory.
*/
mb(); /* required for channel synch */
- if (!SIG_WRITE_FIELD(channel, queue, &sig_hdr, Head)) {
+ if (!SIG_WRITE_FIELD(channel, queue, &sig_hdr, head)) {
ERRDRV("visor_memregion_write of Head failed: (status=%d)\n",
rc);
- goto Away;
+ goto cleanup;
}
- if (!SIG_WRITE_FIELD(channel, queue, &sig_hdr, NumSignalsSent)) {
- ERRDRV("visor_memregion_write of NumSignalsSent failed: (status=%d)\n", rc);
- goto Away;
+ if (!SIG_WRITE_FIELD(channel, queue, &sig_hdr, num_sent)) {
+ ERRDRV("visor_memregion_write of NumSignalsSent failed: (status=%d)\n",
+ rc);
+ goto cleanup;
}
rc = TRUE;
-Away:
+cleanup:
if (channel->needs_lock)
spin_unlock(&channel->insert_lock);
@@ -497,59 +510,58 @@ Away:
}
EXPORT_SYMBOL_GPL(visorchannel_signalinsert);
-
int
visorchannel_signalqueue_slots_avail(VISORCHANNEL *channel, u32 queue)
{
- SIGNAL_QUEUE_HEADER sig_hdr;
+ struct signal_queue_header sig_hdr;
u32 slots_avail, slots_used;
u32 head, tail;
if (!sig_read_header(channel, queue, &sig_hdr))
return 0;
- head = sig_hdr.Head;
- tail = sig_hdr.Tail;
+ head = sig_hdr.head;
+ tail = sig_hdr.tail;
if (head < tail)
- head = head + sig_hdr.MaxSignalSlots;
+ head = head + sig_hdr.max_slots;
slots_used = (head - tail);
- slots_avail = sig_hdr.MaxSignals - slots_used;
- return (int) slots_avail;
+ slots_avail = sig_hdr.max_signals - slots_used;
+ return (int)slots_avail;
}
EXPORT_SYMBOL_GPL(visorchannel_signalqueue_slots_avail);
int
visorchannel_signalqueue_max_slots(VISORCHANNEL *channel, u32 queue)
{
- SIGNAL_QUEUE_HEADER sig_hdr;
+ struct signal_queue_header sig_hdr;
if (!sig_read_header(channel, queue, &sig_hdr))
return 0;
- return (int) sig_hdr.MaxSignals;
+ return (int)sig_hdr.max_signals;
}
EXPORT_SYMBOL_GPL(visorchannel_signalqueue_max_slots);
static void
-sigqueue_debug(SIGNAL_QUEUE_HEADER *q, int which, struct seq_file *seq)
+sigqueue_debug(struct signal_queue_header *q, int which, struct seq_file *seq)
{
seq_printf(seq, "Signal Queue #%d\n", which);
- seq_printf(seq, " VersionId = %lu\n", (ulong) q->VersionId);
- seq_printf(seq, " Type = %lu\n", (ulong) q->Type);
+ seq_printf(seq, " VersionId = %lu\n", (ulong)q->version);
+ seq_printf(seq, " Type = %lu\n", (ulong)q->chtype);
seq_printf(seq, " oSignalBase = %llu\n",
- (long long) q->oSignalBase);
- seq_printf(seq, " SignalSize = %lu\n", (ulong) q->SignalSize);
+ (long long)q->sig_base_offset);
+ seq_printf(seq, " SignalSize = %lu\n", (ulong)q->signal_size);
seq_printf(seq, " MaxSignalSlots = %lu\n",
- (ulong) q->MaxSignalSlots);
- seq_printf(seq, " MaxSignals = %lu\n", (ulong) q->MaxSignals);
+ (ulong)q->max_slots);
+ seq_printf(seq, " MaxSignals = %lu\n", (ulong)q->max_signals);
seq_printf(seq, " FeatureFlags = %-16.16Lx\n",
- (long long) q->FeatureFlags);
+ (long long)q->features);
seq_printf(seq, " NumSignalsSent = %llu\n",
- (long long) q->NumSignalsSent);
+ (long long)q->num_sent);
seq_printf(seq, " NumSignalsReceived = %llu\n",
- (long long) q->NumSignalsReceived);
+ (long long)q->num_received);
seq_printf(seq, " NumOverflows = %llu\n",
- (long long) q->NumOverflows);
- seq_printf(seq, " Head = %lu\n", (ulong) q->Head);
- seq_printf(seq, " Tail = %lu\n", (ulong) q->Tail);
+ (long long)q->num_overflows);
+ seq_printf(seq, " Head = %lu\n", (ulong)q->head);
+ seq_printf(seq, " Tail = %lu\n", (ulong)q->tail);
}
void
@@ -558,9 +570,9 @@ visorchannel_debug(VISORCHANNEL *channel, int nQueues,
{
HOSTADDRESS addr = 0;
ulong nbytes = 0, nbytes_region = 0;
- MEMREGION *memregion = NULL;
- CHANNEL_HEADER hdr;
- CHANNEL_HEADER *phdr = &hdr;
+ struct memregion *memregion = NULL;
+ struct channel_header hdr;
+ struct channel_header *phdr = &hdr;
int i = 0;
int errcode = 0;
@@ -576,7 +588,7 @@ visorchannel_debug(VISORCHANNEL *channel, int nQueues,
addr = visor_memregion_get_physaddr(memregion);
nbytes_region = visor_memregion_get_nbytes(memregion);
errcode = visorchannel_read(channel, off,
- phdr, sizeof(CHANNEL_HEADER));
+ phdr, sizeof(struct channel_header));
if (errcode < 0) {
seq_printf(seq,
"Read of channel header failed with errcode=%d)\n",
@@ -584,39 +596,41 @@ visorchannel_debug(VISORCHANNEL *channel, int nQueues,
if (off == 0) {
phdr = &channel->chan_hdr;
seq_puts(seq, "(following data may be stale)\n");
- } else
+ } else {
return;
+ }
}
- nbytes = (ulong) (phdr->Size);
+ nbytes = (ulong)(phdr->size);
seq_printf(seq, "--- Begin channel @0x%-16.16Lx for 0x%lx bytes (region=0x%lx bytes) ---\n",
addr + off, nbytes, nbytes_region);
- seq_printf(seq, "Type = %pUL\n", &phdr->Type);
- seq_printf(seq, "ZoneGuid = %pUL\n", &phdr->ZoneGuid);
+ seq_printf(seq, "Type = %pUL\n", &phdr->chtype);
+ seq_printf(seq, "ZoneGuid = %pUL\n", &phdr->zone_uuid);
seq_printf(seq, "Signature = 0x%-16.16Lx\n",
- (long long) phdr->Signature);
- seq_printf(seq, "LegacyState = %lu\n", (ulong) phdr->LegacyState);
- seq_printf(seq, "SrvState = %lu\n", (ulong) phdr->SrvState);
- seq_printf(seq, "CliStateBoot = %lu\n", (ulong) phdr->CliStateBoot);
- seq_printf(seq, "CliStateOS = %lu\n", (ulong) phdr->CliStateOS);
- seq_printf(seq, "HeaderSize = %lu\n", (ulong) phdr->HeaderSize);
- seq_printf(seq, "Size = %llu\n", (long long) phdr->Size);
+ (long long)phdr->signature);
+ seq_printf(seq, "LegacyState = %lu\n", (ulong)phdr->legacy_state);
+ seq_printf(seq, "SrvState = %lu\n", (ulong)phdr->srv_state);
+ seq_printf(seq, "CliStateBoot = %lu\n", (ulong)phdr->cli_state_boot);
+ seq_printf(seq, "CliStateOS = %lu\n", (ulong)phdr->cli_state_os);
+ seq_printf(seq, "HeaderSize = %lu\n", (ulong)phdr->header_size);
+ seq_printf(seq, "Size = %llu\n", (long long)phdr->size);
seq_printf(seq, "Features = 0x%-16.16llx\n",
- (long long) phdr->Features);
+ (long long)phdr->features);
seq_printf(seq, "PartitionHandle = 0x%-16.16llx\n",
- (long long) phdr->PartitionHandle);
+ (long long)phdr->partition_handle);
seq_printf(seq, "Handle = 0x%-16.16llx\n",
- (long long) phdr->Handle);
- seq_printf(seq, "VersionId = %lu\n", (ulong) phdr->VersionId);
+ (long long)phdr->handle);
+ seq_printf(seq, "VersionId = %lu\n", (ulong)phdr->version_id);
seq_printf(seq, "oChannelSpace = %llu\n",
- (long long) phdr->oChannelSpace);
- if ((phdr->oChannelSpace == 0) || (errcode < 0))
+ (long long)phdr->ch_space_offset);
+ if ((phdr->ch_space_offset == 0) || (errcode < 0))
;
else
for (i = 0; i < nQueues; i++) {
- SIGNAL_QUEUE_HEADER q;
+ struct signal_queue_header q;
errcode = visorchannel_read(channel,
- off + phdr->oChannelSpace +
+ off +
+ phdr->ch_space_offset +
(i * sizeof(q)),
&q, sizeof(q));
if (errcode < 0) {
@@ -643,15 +657,17 @@ visorchannel_dump_section(VISORCHANNEL *chan, char *s,
fmtbufsize = 100 * COVQ(len, 16);
buf = kmalloc(len, GFP_KERNEL|__GFP_NORETRY);
+ if (!buf)
+ return;
fmtbuf = kmalloc(fmtbufsize, GFP_KERNEL|__GFP_NORETRY);
- if (buf == NULL || fmtbuf == NULL)
- goto Away;
+ if (!fmtbuf)
+ goto fmt_failed;
errcode = visorchannel_read(chan, off, buf, len);
if (errcode < 0) {
ERRDRV("%s failed to read %s from channel errcode=%d",
s, __func__, errcode);
- goto Away;
+ goto read_failed;
}
seq_printf(seq, "channel %s:\n", s);
tbuf = buf;
@@ -663,14 +679,9 @@ visorchannel_dump_section(VISORCHANNEL *chan, char *s,
len -= 16;
}
-Away:
- if (buf != NULL) {
- kfree(buf);
- buf = NULL;
- }
- if (fmtbuf != NULL) {
- kfree(fmtbuf);
- fmtbuf = NULL;
- }
+read_failed:
+ kfree(fmtbuf);
+fmt_failed:
+ kfree(buf);
}
EXPORT_SYMBOL_GPL(visorchannel_dump_section);
diff --git a/drivers/staging/unisys/visorchipset/file.c b/drivers/staging/unisys/visorchipset/file.c
index 3321764069d..373fa36b711 100644
--- a/drivers/staging/unisys/visorchipset/file.c
+++ b/drivers/staging/unisys/visorchipset/file.c
@@ -155,9 +155,9 @@ visorchipset_mmap(struct file *file, struct vm_area_struct *vma)
return -ENXIO;
}
visorchannel_read(*PControlVm_channel,
- offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL,
- gpControlChannel), &addr,
- sizeof(addr));
+ offsetof(struct spar_controlvm_channel_protocol,
+ gp_control_channel),
+ &addr, sizeof(addr));
if (addr == 0) {
ERRDRV("%s control channel address is 0", __func__);
return -ENXIO;
diff --git a/drivers/staging/unisys/visorchipset/parser.c b/drivers/staging/unisys/visorchipset/parser.c
index 661aaae9b15..9edbd3bbd18 100644
--- a/drivers/staging/unisys/visorchipset/parser.c
+++ b/drivers/staging/unisys/visorchipset/parser.c
@@ -47,8 +47,8 @@ parser_init_guts(u64 addr, u32 bytes, BOOL isLocal,
int allocbytes = sizeof(PARSER_CONTEXT) + bytes;
PARSER_CONTEXT *rc = NULL;
PARSER_CONTEXT *ctx = NULL;
- MEMREGION *rgn = NULL;
- ULTRA_CONTROLVM_PARAMETERS_HEADER *phdr = NULL;
+ struct memregion *rgn = NULL;
+ struct spar_controlvm_parameters_header *phdr = NULL;
if (tryAgain)
*tryAgain = FALSE;
@@ -110,27 +110,29 @@ parser_init_guts(u64 addr, u32 bytes, BOOL isLocal,
rc = ctx;
goto Away;
}
- phdr = (ULTRA_CONTROLVM_PARAMETERS_HEADER *) (ctx->data);
- if (phdr->TotalLength != bytes) {
+ phdr = (struct spar_controlvm_parameters_header *)(ctx->data);
+ if (phdr->total_length != bytes) {
ERRDRV("%s - bad total length %lu (should be %lu)",
__func__,
- (ulong) (phdr->TotalLength), (ulong) (bytes));
+ (ulong) (phdr->total_length), (ulong) (bytes));
rc = NULL;
goto Away;
}
- if (phdr->TotalLength < phdr->HeaderLength) {
+ if (phdr->total_length < phdr->header_length) {
ERRDRV("%s - total length < header length (%lu < %lu)",
__func__,
- (ulong) (phdr->TotalLength),
- (ulong) (phdr->HeaderLength));
+ (ulong) (phdr->total_length),
+ (ulong) (phdr->header_length));
rc = NULL;
goto Away;
}
- if (phdr->HeaderLength < sizeof(ULTRA_CONTROLVM_PARAMETERS_HEADER)) {
+ if (phdr->header_length <
+ sizeof(struct spar_controlvm_parameters_header)) {
ERRDRV("%s - header is too small (%lu < %lu)",
__func__,
- (ulong) (phdr->HeaderLength),
- (ulong) (sizeof(ULTRA_CONTROLVM_PARAMETERS_HEADER)));
+ (ulong) (phdr->header_length),
+ (ulong)(sizeof(
+ struct spar_controlvm_parameters_header)));
rc = NULL;
goto Away;
}
@@ -159,7 +161,7 @@ parser_init(u64 addr, u32 bytes, BOOL isLocal, BOOL *tryAgain)
}
/* Call this instead of parser_init() if the payload area consists of just
- * a sequence of bytes, rather than a ULTRA_CONTROLVM_PARAMETERS_HEADER
+ * a sequence of bytes, rather than a struct spar_controlvm_parameters_header
* structures. Afterwards, you can call parser_simpleString_get() or
* parser_byteStream_get() to obtain the data.
*/
@@ -196,44 +198,44 @@ parser_byteStream_get(PARSER_CONTEXT *ctx, ulong *nbytes)
uuid_le
parser_id_get(PARSER_CONTEXT *ctx)
{
- ULTRA_CONTROLVM_PARAMETERS_HEADER *phdr = NULL;
+ struct spar_controlvm_parameters_header *phdr = NULL;
if (ctx == NULL) {
ERRDRV("%s (%s:%d) - no context",
__func__, __FILE__, __LINE__);
return NULL_UUID_LE;
}
- phdr = (ULTRA_CONTROLVM_PARAMETERS_HEADER *) (ctx->data);
- return phdr->Id;
+ phdr = (struct spar_controlvm_parameters_header *)(ctx->data);
+ return phdr->id;
}
void
parser_param_start(PARSER_CONTEXT *ctx, PARSER_WHICH_STRING which_string)
{
- ULTRA_CONTROLVM_PARAMETERS_HEADER *phdr = NULL;
+ struct spar_controlvm_parameters_header *phdr = NULL;
if (ctx == NULL) {
ERRDRV("%s (%s:%d) - no context",
__func__, __FILE__, __LINE__);
goto Away;
}
- phdr = (ULTRA_CONTROLVM_PARAMETERS_HEADER *) (ctx->data);
+ phdr = (struct spar_controlvm_parameters_header *)(ctx->data);
switch (which_string) {
case PARSERSTRING_INITIATOR:
- ctx->curr = ctx->data + phdr->InitiatorOffset;
- ctx->bytes_remaining = phdr->InitiatorLength;
+ ctx->curr = ctx->data + phdr->initiator_offset;
+ ctx->bytes_remaining = phdr->initiator_length;
break;
case PARSERSTRING_TARGET:
- ctx->curr = ctx->data + phdr->TargetOffset;
- ctx->bytes_remaining = phdr->TargetLength;
+ ctx->curr = ctx->data + phdr->target_offset;
+ ctx->bytes_remaining = phdr->target_length;
break;
case PARSERSTRING_CONNECTION:
- ctx->curr = ctx->data + phdr->ConnectionOffset;
- ctx->bytes_remaining = phdr->ConnectionLength;
+ ctx->curr = ctx->data + phdr->connection_offset;
+ ctx->bytes_remaining = phdr->connection_length;
break;
case PARSERSTRING_NAME:
- ctx->curr = ctx->data + phdr->NameOffset;
- ctx->bytes_remaining = phdr->NameLength;
+ ctx->curr = ctx->data + phdr->name_offset;
+ ctx->bytes_remaining = phdr->name_length;
break;
default:
ERRDRV("%s - bad which_string %d", __func__, which_string);
diff --git a/drivers/staging/unisys/visorchipset/testing.h b/drivers/staging/unisys/visorchipset/testing.h
index 015d502cbb1..573aa8b5ba6 100644
--- a/drivers/staging/unisys/visorchipset/testing.h
+++ b/drivers/staging/unisys/visorchipset/testing.h
@@ -23,8 +23,9 @@
#include "globals.h"
#include "controlvmchannel.h"
-void test_produce_test_message(CONTROLVM_MESSAGE *msg, int isLocalTestAddr);
-BOOL test_consume_test_message(CONTROLVM_MESSAGE *msg);
+void test_produce_test_message(struct controlvm_message *msg,
+ int isLocalTestAddr);
+BOOL test_consume_test_message(struct controlvm_message *msg);
void test_manufacture_vnic_client_add(void *p);
void test_manufacture_vnic_client_add_phys(HOSTADDRESS addr);
void test_manufacture_preamble_messages(void);
diff --git a/drivers/staging/unisys/visorchipset/visorchipset.h b/drivers/staging/unisys/visorchipset/visorchipset.h
index 2bf2e2f368e..46dad63fa2c 100644
--- a/drivers/staging/unisys/visorchipset/visorchipset.h
+++ b/drivers/staging/unisys/visorchipset/visorchipset.h
@@ -31,85 +31,85 @@
/** Describes the state from the perspective of which controlvm messages have
* been received for a bus or device.
*/
-typedef struct {
+struct visorchipset_state {
u32 created:1;
u32 attached:1;
u32 configured:1;
u32 running:1;
/* Add new fields above. */
/* Remaining bits in this 32-bit word are unused. */
-} VISORCHIPSET_STATE;
+};
-typedef enum {
+enum visorchipset_addresstype {
/** address is guest physical, but outside of the physical memory
* region that is controlled by the running OS (this is the normal
* address type for Supervisor channels)
*/
- ADDRTYPE_localPhysical,
+ ADDRTYPE_LOCALPHYSICAL,
/** address is guest physical, and withIN the confines of the
* physical memory controlled by the running OS.
*/
- ADDRTYPE_localTest,
-} VISORCHIPSET_ADDRESSTYPE;
+ ADDRTYPE_LOCALTEST,
+};
-typedef enum {
- CRASH_dev,
- CRASH_bus,
-} CRASH_OBJ_TYPE;
+enum crash_obj_type {
+ CRASH_DEV,
+ CRASH_BUS,
+};
/** Attributes for a particular Supervisor channel.
*/
-typedef struct {
- VISORCHIPSET_ADDRESSTYPE addrType;
- HOSTADDRESS channelAddr;
- struct InterruptInfo intr;
- u64 nChannelBytes;
- uuid_le channelTypeGuid;
- uuid_le channelInstGuid;
+struct visorchipset_channel_info {
+ enum visorchipset_addresstype addr_type;
+ HOSTADDRESS channel_addr;
+ struct irq_info intr;
+ u64 n_channel_bytes;
+ uuid_le channel_type_uuid;
+ uuid_le channel_inst_uuid;
-} VISORCHIPSET_CHANNEL_INFO;
+};
/** Attributes for a particular Supervisor device.
* Any visorchipset client can query these attributes using
* visorchipset_get_client_device_info() or
* visorchipset_get_server_device_info().
*/
-typedef struct {
+struct visorchipset_device_info {
struct list_head entry;
- u32 busNo;
- u32 devNo;
- uuid_le devInstGuid;
- VISORCHIPSET_STATE state;
- VISORCHIPSET_CHANNEL_INFO chanInfo;
- u32 Reserved1; /* CONTROLVM_ID */
- u64 Reserved2;
- u32 switchNo; /* when devState.attached==1 */
- u32 internalPortNo; /* when devState.attached==1 */
- CONTROLVM_MESSAGE_HEADER pendingMsgHdr; /* CONTROLVM_MESSAGE */
+ u32 bus_no;
+ u32 dev_no;
+ uuid_le dev_inst_uuid;
+ struct visorchipset_state state;
+ struct visorchipset_channel_info chan_info;
+ u32 reserved1; /* control_vm_id */
+ u64 reserved2;
+ u32 switch_no; /* when devState.attached==1 */
+ u32 internal_port_no; /* when devState.attached==1 */
+ struct controlvm_message_header pending_msg_hdr;/* CONTROLVM_MESSAGE */
/** For private use by the bus driver */
void *bus_driver_context;
-} VISORCHIPSET_DEVICE_INFO;
+};
-static inline VISORCHIPSET_DEVICE_INFO *
-finddevice(struct list_head *list, u32 busNo, u32 devNo)
+static inline struct visorchipset_device_info *finddevice(
+ struct list_head *list, u32 bus_no, u32 dev_no)
{
- VISORCHIPSET_DEVICE_INFO *p;
+ struct visorchipset_device_info *p;
list_for_each_entry(p, list, entry) {
- if (p->busNo == busNo && p->devNo == devNo)
+ if (p->bus_no == bus_no && p->dev_no == dev_no)
return p;
}
return NULL;
}
-static inline void delbusdevices(struct list_head *list, u32 busNo)
+static inline void delbusdevices(struct list_head *list, u32 bus_no)
{
- VISORCHIPSET_DEVICE_INFO *p, *tmp;
+ struct visorchipset_device_info *p, *tmp;
list_for_each_entry_safe(p, tmp, list, entry) {
- if (p->busNo == busNo) {
+ if (p->bus_no == bus_no) {
list_del(&p->entry);
kfree(p);
}
@@ -122,37 +122,37 @@ static inline void delbusdevices(struct list_head *list, u32 busNo)
* Any visorchipset client can query these attributes using
* visorchipset_get_client_bus_info() or visorchipset_get_bus_info().
*/
-typedef struct {
+struct visorchipset_bus_info {
struct list_head entry;
- u32 busNo;
- VISORCHIPSET_STATE state;
- VISORCHIPSET_CHANNEL_INFO chanInfo;
- uuid_le partitionGuid;
- u64 partitionHandle;
+ u32 bus_no;
+ struct visorchipset_state state;
+ struct visorchipset_channel_info chan_info;
+ uuid_le partition_uuid;
+ u64 partition_handle;
u8 *name; /* UTF8 */
u8 *description; /* UTF8 */
- u64 Reserved1;
- u32 Reserved2;
- MYPROCOBJECT *procObject;
+ u64 reserved1;
+ u32 reserved2;
+ MYPROCOBJECT *proc_object;
struct {
u32 server:1;
/* Add new fields above. */
/* Remaining bits in this 32-bit word are unused. */
} flags;
- CONTROLVM_MESSAGE_HEADER pendingMsgHdr; /* CONTROLVM MsgHdr */
+ struct controlvm_message_header pending_msg_hdr;/* CONTROLVM MsgHdr */
/** For private use by the bus driver */
void *bus_driver_context;
- u64 devNo;
+ u64 dev_no;
-} VISORCHIPSET_BUS_INFO;
+};
-static inline VISORCHIPSET_BUS_INFO *
-findbus(struct list_head *list, u32 busNo)
+static inline struct visorchipset_bus_info *
+findbus(struct list_head *list, u32 bus_no)
{
- VISORCHIPSET_BUS_INFO *p;
+ struct visorchipset_bus_info *p;
list_for_each_entry(p, list, entry) {
- if (p->busNo == busNo)
+ if (p->bus_no == bus_no)
return p;
}
return NULL;
@@ -160,75 +160,73 @@ findbus(struct list_head *list, u32 busNo)
/** Attributes for a particular Supervisor switch.
*/
-typedef struct {
- u32 switchNo;
- VISORCHIPSET_STATE state;
- uuid_le switchTypeGuid;
- u8 *authService1;
- u8 *authService2;
- u8 *authService3;
- u8 *securityContext;
- u64 Reserved;
- u32 Reserved2; /* CONTROLVM_ID */
+struct visorchipset_switch_info {
+ u32 switch_no;
+ struct visorchipset_state state;
+ uuid_le switch_type_uuid;
+ u8 *authservice1;
+ u8 *authservice2;
+ u8 *authservice3;
+ u8 *security_context;
+ u64 reserved;
+ u32 reserved2; /* control_vm_id */
struct device dev;
BOOL dev_exists;
- CONTROLVM_MESSAGE_HEADER pendingMsgHdr;
-
-} VISORCHIPSET_SWITCH_INFO;
+ struct controlvm_message_header pending_msg_hdr;
+};
/** Attributes for a particular Supervisor external port, which is connected
* to a specific switch.
*/
-typedef struct {
- u32 switchNo;
- u32 externalPortNo;
- VISORCHIPSET_STATE state;
- uuid_le networkZoneGuid;
- int pdPort;
+struct visorchipset_externalport_info {
+ u32 switch_no;
+ u32 external_port_no;
+ struct visorchipset_state state;
+ uuid_le network_zone_uuid;
+ int pd_port;
u8 *ip;
- u8 *ipNetmask;
- u8 *ipBroadcast;
- u8 *ipNetwork;
- u8 *ipGateway;
- u8 *ipDNS;
- u64 Reserved1;
- u32 Reserved2; /* CONTROLVM_ID */
+ u8 *ip_netmask;
+ u8 *ip_broadcast;
+ u8 *ip_network;
+ u8 *ip_gateway;
+ u8 *ip_dns;
+ u64 reserved1;
+ u32 reserved2; /* control_vm_id */
struct device dev;
BOOL dev_exists;
- CONTROLVM_MESSAGE_HEADER pendingMsgHdr;
-
-} VISORCHIPSET_EXTERNALPORT_INFO;
+ struct controlvm_message_header pending_msg_hdr;
+};
/** Attributes for a particular Supervisor internal port, which is how a
* device connects to a particular switch.
*/
-typedef struct {
- u32 switchNo;
- u32 internalPortNo;
- VISORCHIPSET_STATE state;
- u32 busNo; /* valid only when state.attached == 1 */
- u32 devNo; /* valid only when state.attached == 1 */
- u64 Reserved1;
- u32 Reserved2; /* CONTROLVM_ID */
- CONTROLVM_MESSAGE_HEADER pendingMsgHdr;
- MYPROCOBJECT *procObject;
-
-} VISORCHIPSET_INTERNALPORT_INFO;
+struct visorchipset_internalport_info {
+ u32 switch_no;
+ u32 internal_port_no;
+ struct visorchipset_state state;
+ u32 bus_no; /* valid only when state.attached == 1 */
+ u32 dev_no; /* valid only when state.attached == 1 */
+ u64 reserved1;
+ u32 reserved2; /* CONTROLVM_ID */
+ struct controlvm_message_header pending_msg_hdr;
+ MYPROCOBJECT *proc_object;
+
+};
/* These functions will be called from within visorchipset when certain
* events happen. (The implementation of these functions is outside of
* visorchipset.)
*/
-typedef struct {
- void (*bus_create)(ulong busNo);
- void (*bus_destroy)(ulong busNo);
- void (*device_create)(ulong busNo, ulong devNo);
- void (*device_destroy)(ulong busNo, ulong devNo);
- void (*device_pause)(ulong busNo, ulong devNo);
- void (*device_resume)(ulong busNo, ulong devNo);
- int (*get_channel_info)(uuid_le typeGuid, ulong *minSize,
- ulong *maxSize);
-} VISORCHIPSET_BUSDEV_NOTIFIERS;
+struct visorchipset_busdev_notifiers {
+ void (*bus_create)(ulong bus_no);
+ void (*bus_destroy)(ulong bus_no);
+ void (*device_create)(ulong bus_no, ulong dev_no);
+ void (*device_destroy)(ulong bus_no, ulong dev_no);
+ void (*device_pause)(ulong bus_no, ulong dev_no);
+ void (*device_resume)(ulong bus_no, ulong dev_no);
+ int (*get_channel_info)(uuid_le type_uuid, ulong *min_size,
+ ulong *max_size);
+};
/* These functions live inside visorchipset, and will be called to indicate
* responses to specific events (by code outside of visorchipset).
@@ -236,14 +234,14 @@ typedef struct {
* 0 = it worked
* -1 = it failed
*/
-typedef struct {
- void (*bus_create)(ulong busNo, int response);
- void (*bus_destroy)(ulong busNo, int response);
- void (*device_create)(ulong busNo, ulong devNo, int response);
- void (*device_destroy)(ulong busNo, ulong devNo, int response);
- void (*device_pause)(ulong busNo, ulong devNo, int response);
- void (*device_resume)(ulong busNo, ulong devNo, int response);
-} VISORCHIPSET_BUSDEV_RESPONDERS;
+struct visorchipset_busdev_responders {
+ void (*bus_create)(ulong bus_no, int response);
+ void (*bus_destroy)(ulong bus_no, int response);
+ void (*device_create)(ulong bus_no, ulong dev_no, int response);
+ void (*device_destroy)(ulong bus_no, ulong dev_no, int response);
+ void (*device_pause)(ulong bus_no, ulong dev_no, int response);
+ void (*device_resume)(ulong bus_no, ulong dev_no, int response);
+};
/** Register functions (in the bus driver) to get called by visorchipset
* whenever a bus or device appears for which this service partition is
@@ -252,9 +250,10 @@ typedef struct {
* responses.
*/
void
-visorchipset_register_busdev_client(VISORCHIPSET_BUSDEV_NOTIFIERS *notifiers,
- VISORCHIPSET_BUSDEV_RESPONDERS *responders,
- ULTRA_VBUS_DEVICEINFO *driverInfo);
+visorchipset_register_busdev_client(
+ struct visorchipset_busdev_notifiers *notifiers,
+ struct visorchipset_busdev_responders *responders,
+ struct ultra_vbus_deviceinfo *driver_info);
/** Register functions (in the bus driver) to get called by visorchipset
* whenever a bus or device appears for which this service partition is
@@ -263,47 +262,31 @@ visorchipset_register_busdev_client(VISORCHIPSET_BUSDEV_NOTIFIERS *notifiers,
* responses.
*/
void
-visorchipset_register_busdev_server(VISORCHIPSET_BUSDEV_NOTIFIERS *notifiers,
- VISORCHIPSET_BUSDEV_RESPONDERS *responders,
- ULTRA_VBUS_DEVICEINFO *driverInfo);
+visorchipset_register_busdev_server(
+ struct visorchipset_busdev_notifiers *notifiers,
+ struct visorchipset_busdev_responders *responders,
+ struct ultra_vbus_deviceinfo *driver_info);
-typedef void (*SPARREPORTEVENT_COMPLETE_FUNC) (CONTROLVM_MESSAGE *msg,
+typedef void (*SPARREPORTEVENT_COMPLETE_FUNC) (struct controlvm_message *msg,
int status);
-void visorchipset_device_pause_response(ulong busNo, ulong devNo, int response);
+void visorchipset_device_pause_response(ulong bus_no, ulong dev_no,
+ int response);
-BOOL visorchipset_get_bus_info(ulong busNo, VISORCHIPSET_BUS_INFO *busInfo);
-BOOL visorchipset_get_device_info(ulong busNo, ulong devNo,
- VISORCHIPSET_DEVICE_INFO *devInfo);
-BOOL visorchipset_get_switch_info(ulong switchNo,
- VISORCHIPSET_SWITCH_INFO *switchInfo);
-BOOL visorchipset_get_externalport_info(ulong switchNo, ulong externalPortNo,
- VISORCHIPSET_EXTERNALPORT_INFO
- *externalPortInfo);
-BOOL visorchipset_set_bus_context(ulong busNo, void *context);
-BOOL visorchipset_set_device_context(ulong busNo, ulong devNo, void *context);
+BOOL visorchipset_get_bus_info(ulong bus_no,
+ struct visorchipset_bus_info *bus_info);
+BOOL visorchipset_get_device_info(ulong bus_no, ulong dev_no,
+ struct visorchipset_device_info *dev_info);
+BOOL visorchipset_set_bus_context(ulong bus_no, void *context);
+BOOL visorchipset_set_device_context(ulong bus_no, ulong dev_no, void *context);
int visorchipset_chipset_ready(void);
int visorchipset_chipset_selftest(void);
int visorchipset_chipset_notready(void);
-void visorchipset_controlvm_respond_reportEvent(CONTROLVM_MESSAGE *msg,
- void *payload);
-void visorchipset_save_message(CONTROLVM_MESSAGE *msg, CRASH_OBJ_TYPE type);
+void visorchipset_save_message(struct controlvm_message *msg,
+ enum crash_obj_type type);
void *visorchipset_cache_alloc(struct kmem_cache *pool,
BOOL ok_to_block, char *fn, int ln);
void visorchipset_cache_free(struct kmem_cache *pool, void *p,
char *fn, int ln);
-#if defined(TRANSMITFILE_DEBUG) || defined(DEBUG)
-#define DBG_GETFILE_PAYLOAD(msg, controlvm_header) \
- LOGINF(msg, \
- (ulong)controlvm_header.PayloadVmOffset, \
- (ulong)controlvm_header.PayloadMaxBytes)
-#define DBG_GETFILE(fmt, ...) LOGINF(fmt, ##__VA_ARGS__)
-#define DBG_PUTFILE(fmt, ...) LOGINF(fmt, ##__VA_ARGS__)
-#else
-#define DBG_GETFILE_PAYLOAD(msg, controlvm_header)
-#define DBG_GETFILE(fmt, ...)
-#define DBG_PUTFILE(fmt, ...)
-#endif
-
#endif
diff --git a/drivers/staging/unisys/visorchipset/visorchipset_main.c b/drivers/staging/unisys/visorchipset/visorchipset_main.c
index e5df39554a1..7e6be32cf7b 100644
--- a/drivers/staging/unisys/visorchipset/visorchipset_main.c
+++ b/drivers/staging/unisys/visorchipset/visorchipset_main.c
@@ -72,26 +72,28 @@ static struct workqueue_struct *Periodic_controlvm_workqueue;
static DEFINE_SEMAPHORE(NotifierLock);
typedef struct {
- CONTROLVM_MESSAGE message;
+ struct controlvm_message message;
unsigned int crc;
} MESSAGE_ENVELOPE;
-static CONTROLVM_MESSAGE_HEADER g_DiagMsgHdr;
-static CONTROLVM_MESSAGE_HEADER g_ChipSetMsgHdr;
-static CONTROLVM_MESSAGE_HEADER g_DelDumpMsgHdr;
+static struct controlvm_message_header g_DiagMsgHdr;
+static struct controlvm_message_header g_ChipSetMsgHdr;
+static struct controlvm_message_header g_DelDumpMsgHdr;
static const uuid_le UltraDiagPoolChannelProtocolGuid =
- ULTRA_DIAG_POOL_CHANNEL_PROTOCOL_GUID;
+ SPAR_DIAG_POOL_CHANNEL_PROTOCOL_UUID;
/* 0xffffff is an invalid Bus/Device number */
static ulong g_diagpoolBusNo = 0xffffff;
static ulong g_diagpoolDevNo = 0xffffff;
-static CONTROLVM_MESSAGE_PACKET g_DeviceChangeStatePacket;
+static struct controlvm_message_packet g_DeviceChangeStatePacket;
/* Only VNIC and VHBA channels are sent to visorclientbus (aka
* "visorhackbus")
*/
#define FOR_VISORHACKBUS(channel_type_guid) \
- (((uuid_le_cmp(channel_type_guid, UltraVnicChannelProtocolGuid) == 0)\
- || (uuid_le_cmp(channel_type_guid, UltraVhbaChannelProtocolGuid) == 0)))
+ (((uuid_le_cmp(channel_type_guid,\
+ spar_vnic_channel_protocol_uuid) == 0)\
+ || (uuid_le_cmp(channel_type_guid,\
+ spar_vhba_channel_protocol_uuid) == 0)))
#define FOR_VISORBUS(channel_type_guid) (!(FOR_VISORHACKBUS(channel_type_guid)))
#define is_diagpool_channel(channel_type_guid) \
@@ -112,12 +114,12 @@ typedef struct {
/* Manages the request payload in the controlvm channel */
static CONTROLVM_PAYLOAD_INFO ControlVm_payload_info;
-static pCHANNEL_HEADER Test_Vnic_channel;
+static struct channel_header *Test_Vnic_channel;
typedef struct {
- CONTROLVM_MESSAGE_HEADER Dumpcapture_header;
- CONTROLVM_MESSAGE_HEADER Gettextdump_header;
- CONTROLVM_MESSAGE_HEADER Dumpcomplete_header;
+ struct controlvm_message_header Dumpcapture_header;
+ struct controlvm_message_header Gettextdump_header;
+ struct controlvm_message_header Dumpcomplete_header;
BOOL Gettextdump_outstanding;
u32 crc32;
ulong length;
@@ -134,7 +136,7 @@ static LIVEDUMP_INFO LiveDump_info;
* this scenario, we simply stash the controlvm message, then attempt to
* process it again the next time controlvm_periodic_work() runs.
*/
-static CONTROLVM_MESSAGE ControlVm_Pending_Msg;
+static struct controlvm_message ControlVm_Pending_Msg;
static BOOL ControlVm_Pending_Msg_Valid = FALSE;
/* Pool of struct putfile_buffer_entry, for keeping track of pending (incoming)
@@ -180,7 +182,7 @@ struct putfile_request {
u64 sig; /* PUTFILE_REQUEST_SIG */
/* header from original TransmitFile request */
- CONTROLVM_MESSAGE_HEADER controlvm_header;
+ struct controlvm_message_header controlvm_header;
u64 file_request_number; /* from original TransmitFile request */
/* link to next struct putfile_request */
@@ -218,7 +220,7 @@ struct parahotplug_request {
struct list_head list;
int id;
unsigned long expiration;
- CONTROLVM_MESSAGE msg;
+ struct controlvm_message msg;
};
static LIST_HEAD(Parahotplug_request_list);
@@ -228,8 +230,8 @@ static void parahotplug_process_list(void);
/* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
* CONTROLVM_REPORTEVENT.
*/
-static VISORCHIPSET_BUSDEV_NOTIFIERS BusDev_Server_Notifiers;
-static VISORCHIPSET_BUSDEV_NOTIFIERS BusDev_Client_Notifiers;
+static struct visorchipset_busdev_notifiers BusDev_Server_Notifiers;
+static struct visorchipset_busdev_notifiers BusDev_Client_Notifiers;
static void bus_create_response(ulong busNo, int response);
static void bus_destroy_response(ulong busNo, int response);
@@ -237,7 +239,7 @@ static void device_create_response(ulong busNo, ulong devNo, int response);
static void device_destroy_response(ulong busNo, ulong devNo, int response);
static void device_resume_response(ulong busNo, ulong devNo, int response);
-static VISORCHIPSET_BUSDEV_RESPONDERS BusDev_Responders = {
+static struct visorchipset_busdev_responders BusDev_Responders = {
.bus_create = bus_create_response,
.bus_destroy = bus_destroy_response,
.device_create = device_create_response,
@@ -342,13 +344,14 @@ static struct platform_device Visorchipset_platform_device = {
};
/* Function prototypes */
-static void controlvm_respond(CONTROLVM_MESSAGE_HEADER *msgHdr, int response);
-static void controlvm_respond_chipset_init(CONTROLVM_MESSAGE_HEADER *msgHdr,
- int response,
- ULTRA_CHIPSET_FEATURE features);
-static void controlvm_respond_physdev_changestate(CONTROLVM_MESSAGE_HEADER *
- msgHdr, int response,
- ULTRA_SEGMENT_STATE state);
+static void controlvm_respond(struct controlvm_message_header *msgHdr,
+ int response);
+static void controlvm_respond_chipset_init(
+ struct controlvm_message_header *msgHdr, int response,
+ enum ultra_chipset_feature features);
+static void controlvm_respond_physdev_changestate(
+ struct controlvm_message_header *msgHdr, int response,
+ struct spar_segment_state state);
static ssize_t toolaction_show(struct device *dev,
struct device_attribute *attr,
@@ -357,8 +360,8 @@ static ssize_t toolaction_show(struct device *dev,
u8 toolAction;
visorchannel_read(ControlVm_channel,
- offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL,
- ToolAction), &toolAction, sizeof(u8));
+ offsetof(struct spar_controlvm_channel_protocol,
+ tool_action), &toolAction, sizeof(u8));
return scnprintf(buf, PAGE_SIZE, "%u\n", toolAction);
}
@@ -373,7 +376,7 @@ static ssize_t toolaction_store(struct device *dev,
return -EINVAL;
ret = visorchannel_write(ControlVm_channel,
- offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL, ToolAction),
+ offsetof(struct spar_controlvm_channel_protocol, tool_action),
&toolAction, sizeof(u8));
if (ret)
@@ -385,14 +388,14 @@ static ssize_t boottotool_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- ULTRA_EFI_SPAR_INDICATION efiSparIndication;
+ struct efi_spar_indication efiSparIndication;
visorchannel_read(ControlVm_channel,
- offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL,
- EfiSparIndication), &efiSparIndication,
- sizeof(ULTRA_EFI_SPAR_INDICATION));
+ offsetof(struct spar_controlvm_channel_protocol,
+ efi_spar_ind), &efiSparIndication,
+ sizeof(struct efi_spar_indication));
return scnprintf(buf, PAGE_SIZE, "%u\n",
- efiSparIndication.BootToTool);
+ efiSparIndication.boot_to_tool);
}
static ssize_t boottotool_store(struct device *dev,
@@ -400,17 +403,17 @@ static ssize_t boottotool_store(struct device *dev,
const char *buf, size_t count)
{
int val, ret;
- ULTRA_EFI_SPAR_INDICATION efiSparIndication;
+ struct efi_spar_indication efiSparIndication;
if (kstrtoint(buf, 10, &val) != 0)
return -EINVAL;
- efiSparIndication.BootToTool = val;
+ efiSparIndication.boot_to_tool = val;
ret = visorchannel_write(ControlVm_channel,
- offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL,
- EfiSparIndication),
+ offsetof(struct spar_controlvm_channel_protocol,
+ efi_spar_ind),
&(efiSparIndication),
- sizeof(ULTRA_EFI_SPAR_INDICATION));
+ sizeof(struct efi_spar_indication));
if (ret)
return ret;
@@ -423,7 +426,7 @@ static ssize_t error_show(struct device *dev, struct device_attribute *attr,
u32 error;
visorchannel_read(ControlVm_channel, offsetof(
- ULTRA_CONTROLVM_CHANNEL_PROTOCOL, InstallationError),
+ struct spar_controlvm_channel_protocol, installation_error),
&error, sizeof(u32));
return scnprintf(buf, PAGE_SIZE, "%i\n", error);
}
@@ -438,8 +441,8 @@ static ssize_t error_store(struct device *dev, struct device_attribute *attr,
return -EINVAL;
ret = visorchannel_write(ControlVm_channel,
- offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL,
- InstallationError),
+ offsetof(struct spar_controlvm_channel_protocol,
+ installation_error),
&error, sizeof(u32));
if (ret)
return ret;
@@ -452,7 +455,7 @@ static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
u32 textId;
visorchannel_read(ControlVm_channel, offsetof(
- ULTRA_CONTROLVM_CHANNEL_PROTOCOL, InstallationTextId),
+ struct spar_controlvm_channel_protocol, installation_text_id),
&textId, sizeof(u32));
return scnprintf(buf, PAGE_SIZE, "%i\n", textId);
}
@@ -467,8 +470,8 @@ static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
return -EINVAL;
ret = visorchannel_write(ControlVm_channel,
- offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL,
- InstallationTextId),
+ offsetof(struct spar_controlvm_channel_protocol,
+ installation_text_id),
&textId, sizeof(u32));
if (ret)
return ret;
@@ -482,8 +485,8 @@ static ssize_t remaining_steps_show(struct device *dev,
u16 remainingSteps;
visorchannel_read(ControlVm_channel,
- offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL,
- InstallationRemainingSteps),
+ offsetof(struct spar_controlvm_channel_protocol,
+ installation_remaining_steps),
&remainingSteps,
sizeof(u16));
return scnprintf(buf, PAGE_SIZE, "%hu\n", remainingSteps);
@@ -499,8 +502,8 @@ static ssize_t remaining_steps_store(struct device *dev,
return -EINVAL;
ret = visorchannel_write(ControlVm_channel,
- offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL,
- InstallationRemainingSteps),
+ offsetof(struct spar_controlvm_channel_protocol,
+ installation_remaining_steps),
&remainingSteps, sizeof(u16));
if (ret)
return ret;
@@ -539,11 +542,11 @@ testUnicode(void)
static void
busInfo_clear(void *v)
{
- VISORCHIPSET_BUS_INFO *p = (VISORCHIPSET_BUS_INFO *) (v);
+ struct visorchipset_bus_info *p = (struct visorchipset_bus_info *) (v);
- if (p->procObject) {
- visor_proc_DestroyObject(p->procObject);
- p->procObject = NULL;
+ if (p->proc_object) {
+ visor_proc_DestroyObject(p->proc_object);
+ p->proc_object = NULL;
}
kfree(p->name);
p->name = NULL;
@@ -552,16 +555,17 @@ busInfo_clear(void *v)
p->description = NULL;
p->state.created = 0;
- memset(p, 0, sizeof(VISORCHIPSET_BUS_INFO));
+ memset(p, 0, sizeof(struct visorchipset_bus_info));
}
static void
devInfo_clear(void *v)
{
- VISORCHIPSET_DEVICE_INFO *p = (VISORCHIPSET_DEVICE_INFO *) (v);
+ struct visorchipset_device_info *p =
+ (struct visorchipset_device_info *)(v);
p->state.created = 0;
- memset(p, 0, sizeof(VISORCHIPSET_DEVICE_INFO));
+ memset(p, 0, sizeof(struct visorchipset_device_info));
}
static u8
@@ -585,9 +589,10 @@ clear_chipset_events(void)
}
void
-visorchipset_register_busdev_server(VISORCHIPSET_BUSDEV_NOTIFIERS *notifiers,
- VISORCHIPSET_BUSDEV_RESPONDERS *responders,
- ULTRA_VBUS_DEVICEINFO *driverInfo)
+visorchipset_register_busdev_server(
+ struct visorchipset_busdev_notifiers *notifiers,
+ struct visorchipset_busdev_responders *responders,
+ struct ultra_vbus_deviceinfo *driver_info)
{
down(&NotifierLock);
if (notifiers == NULL) {
@@ -600,8 +605,8 @@ visorchipset_register_busdev_server(VISORCHIPSET_BUSDEV_NOTIFIERS *notifiers,
}
if (responders)
*responders = BusDev_Responders;
- if (driverInfo)
- bus_device_info_init(driverInfo, "chipset", "visorchipset",
+ if (driver_info)
+ bus_device_info_init(driver_info, "chipset", "visorchipset",
VERSION, NULL);
up(&NotifierLock);
@@ -609,9 +614,10 @@ visorchipset_register_busdev_server(VISORCHIPSET_BUSDEV_NOTIFIERS *notifiers,
EXPORT_SYMBOL_GPL(visorchipset_register_busdev_server);
void
-visorchipset_register_busdev_client(VISORCHIPSET_BUSDEV_NOTIFIERS *notifiers,
- VISORCHIPSET_BUSDEV_RESPONDERS *responders,
- ULTRA_VBUS_DEVICEINFO *driverInfo)
+visorchipset_register_busdev_client(
+ struct visorchipset_busdev_notifiers *notifiers,
+ struct visorchipset_busdev_responders *responders,
+ struct ultra_vbus_deviceinfo *driver_info)
{
down(&NotifierLock);
if (notifiers == NULL) {
@@ -624,9 +630,9 @@ visorchipset_register_busdev_client(VISORCHIPSET_BUSDEV_NOTIFIERS *notifiers,
}
if (responders)
*responders = BusDev_Responders;
- if (driverInfo)
- bus_device_info_init(driverInfo, "chipset(bolts)", "visorchipset",
- VERSION, NULL);
+ if (driver_info)
+ bus_device_info_init(driver_info, "chipset(bolts)",
+ "visorchipset", VERSION, NULL);
up(&NotifierLock);
}
EXPORT_SYMBOL_GPL(visorchipset_register_busdev_client);
@@ -634,8 +640,8 @@ EXPORT_SYMBOL_GPL(visorchipset_register_busdev_client);
static void
cleanup_controlvm_structures(void)
{
- VISORCHIPSET_BUS_INFO *bi, *tmp_bi;
- VISORCHIPSET_DEVICE_INFO *di, *tmp_di;
+ struct visorchipset_bus_info *bi, *tmp_bi;
+ struct visorchipset_device_info *di, *tmp_di;
list_for_each_entry_safe(bi, tmp_bi, &BusInfoList, entry) {
busInfo_clear(bi);
@@ -651,10 +657,10 @@ cleanup_controlvm_structures(void)
}
static void
-chipset_init(CONTROLVM_MESSAGE *inmsg)
+chipset_init(struct controlvm_message *inmsg)
{
static int chipset_inited;
- ULTRA_CHIPSET_FEATURE features = 0;
+ enum ultra_chipset_feature features = 0;
int rc = CONTROLVM_RESP_SUCCESS;
POSTCODE_LINUX_2(CHIPSET_INIT_ENTRY_PC, POSTCODE_SEVERITY_INFO);
@@ -669,7 +675,7 @@ chipset_init(CONTROLVM_MESSAGE *inmsg)
/* Set features to indicate we support parahotplug (if Command
* also supports it). */
features =
- inmsg->cmd.initChipset.
+ inmsg->cmd.init_chipset.
features & ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG;
/* Set the "reply" bit so Command knows this is a
@@ -679,42 +685,42 @@ chipset_init(CONTROLVM_MESSAGE *inmsg)
Away:
if (rc < 0)
cleanup_controlvm_structures();
- if (inmsg->hdr.Flags.responseExpected)
+ if (inmsg->hdr.flags.response_expected)
controlvm_respond_chipset_init(&inmsg->hdr, rc, features);
}
static void
-controlvm_init_response(CONTROLVM_MESSAGE *msg,
- CONTROLVM_MESSAGE_HEADER *msgHdr, int response)
-{
- memset(msg, 0, sizeof(CONTROLVM_MESSAGE));
- memcpy(&msg->hdr, msgHdr, sizeof(CONTROLVM_MESSAGE_HEADER));
- msg->hdr.PayloadBytes = 0;
- msg->hdr.PayloadVmOffset = 0;
- msg->hdr.PayloadMaxBytes = 0;
+controlvm_init_response(struct controlvm_message *msg,
+ struct controlvm_message_header *msgHdr, int response)
+{
+ memset(msg, 0, sizeof(struct controlvm_message));
+ memcpy(&msg->hdr, msgHdr, sizeof(struct controlvm_message_header));
+ msg->hdr.payload_bytes = 0;
+ msg->hdr.payload_vm_offset = 0;
+ msg->hdr.payload_max_bytes = 0;
if (response < 0) {
- msg->hdr.Flags.failed = 1;
- msg->hdr.CompletionStatus = (u32) (-response);
+ msg->hdr.flags.failed = 1;
+ msg->hdr.completion_status = (u32) (-response);
}
}
static void
-controlvm_respond(CONTROLVM_MESSAGE_HEADER *msgHdr, int response)
+controlvm_respond(struct controlvm_message_header *msgHdr, int response)
{
- CONTROLVM_MESSAGE outmsg;
+ struct controlvm_message outmsg;
controlvm_init_response(&outmsg, msgHdr, response);
/* For DiagPool channel DEVICE_CHANGESTATE, we need to send
* back the deviceChangeState structure in the packet. */
- if (msgHdr->Id == CONTROLVM_DEVICE_CHANGESTATE
- && g_DeviceChangeStatePacket.deviceChangeState.busNo ==
+ if (msgHdr->id == CONTROLVM_DEVICE_CHANGESTATE
+ && g_DeviceChangeStatePacket.device_change_state.bus_no ==
g_diagpoolBusNo
- && g_DeviceChangeStatePacket.deviceChangeState.devNo ==
+ && g_DeviceChangeStatePacket.device_change_state.dev_no ==
g_diagpoolDevNo)
outmsg.cmd = g_DeviceChangeStatePacket;
- if (outmsg.hdr.Flags.testMessage == 1) {
+ if (outmsg.hdr.flags.test_message == 1) {
LOGINF("%s controlvm_msg=0x%x response=%d for test message",
- __func__, outmsg.hdr.Id, response);
+ __func__, outmsg.hdr.id, response);
return;
}
if (!visorchannel_signalinsert(ControlVm_channel,
@@ -725,13 +731,14 @@ controlvm_respond(CONTROLVM_MESSAGE_HEADER *msgHdr, int response)
}
static void
-controlvm_respond_chipset_init(CONTROLVM_MESSAGE_HEADER *msgHdr, int response,
- ULTRA_CHIPSET_FEATURE features)
+controlvm_respond_chipset_init(struct controlvm_message_header *msgHdr,
+ int response,
+ enum ultra_chipset_feature features)
{
- CONTROLVM_MESSAGE outmsg;
+ struct controlvm_message outmsg;
controlvm_init_response(&outmsg, msgHdr, response);
- outmsg.cmd.initChipset.features = features;
+ outmsg.cmd.init_chipset.features = features;
if (!visorchannel_signalinsert(ControlVm_channel,
CONTROLVM_QUEUE_REQUEST, &outmsg)) {
LOGERR("signalinsert failed!");
@@ -739,15 +746,15 @@ controlvm_respond_chipset_init(CONTROLVM_MESSAGE_HEADER *msgHdr, int response,
}
}
-static void
-controlvm_respond_physdev_changestate(CONTROLVM_MESSAGE_HEADER *msgHdr,
- int response, ULTRA_SEGMENT_STATE state)
+static void controlvm_respond_physdev_changestate(
+ struct controlvm_message_header *msgHdr, int response,
+ struct spar_segment_state state)
{
- CONTROLVM_MESSAGE outmsg;
+ struct controlvm_message outmsg;
controlvm_init_response(&outmsg, msgHdr, response);
- outmsg.cmd.deviceChangeState.state = state;
- outmsg.cmd.deviceChangeState.flags.physicalDevice = 1;
+ outmsg.cmd.device_change_state.state = state;
+ outmsg.cmd.device_change_state.flags.phys_device = 1;
if (!visorchannel_signalinsert(ControlVm_channel,
CONTROLVM_QUEUE_REQUEST, &outmsg)) {
LOGERR("signalinsert failed!");
@@ -756,15 +763,16 @@ controlvm_respond_physdev_changestate(CONTROLVM_MESSAGE_HEADER *msgHdr,
}
void
-visorchipset_save_message(CONTROLVM_MESSAGE *msg, CRASH_OBJ_TYPE type)
+visorchipset_save_message(struct controlvm_message *msg,
+ enum crash_obj_type type)
{
u32 localSavedCrashMsgOffset;
u16 localSavedCrashMsgCount;
/* get saved message count */
if (visorchannel_read(ControlVm_channel,
- offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL,
- SavedCrashMsgCount),
+ offsetof(struct spar_controlvm_channel_protocol,
+ saved_crash_message_count),
&localSavedCrashMsgCount, sizeof(u16)) < 0) {
LOGERR("failed to get Saved Message Count");
POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
@@ -783,8 +791,8 @@ visorchipset_save_message(CONTROLVM_MESSAGE *msg, CRASH_OBJ_TYPE type)
/* get saved crash message offset */
if (visorchannel_read(ControlVm_channel,
- offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL,
- SavedCrashMsgOffset),
+ offsetof(struct spar_controlvm_channel_protocol,
+ saved_crash_message_offset),
&localSavedCrashMsgOffset, sizeof(u32)) < 0) {
LOGERR("failed to get Saved Message Offset");
POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
@@ -792,10 +800,11 @@ visorchipset_save_message(CONTROLVM_MESSAGE *msg, CRASH_OBJ_TYPE type)
return;
}
- if (type == CRASH_bus) {
+ if (type == CRASH_BUS) {
if (visorchannel_write(ControlVm_channel,
localSavedCrashMsgOffset,
- msg, sizeof(CONTROLVM_MESSAGE)) < 0) {
+ msg,
+ sizeof(struct controlvm_message)) < 0) {
LOGERR("SAVE_MSG_BUS_FAILURE: Failed to write CrashCreateBusMsg!");
POSTCODE_LINUX_2(SAVE_MSG_BUS_FAILURE_PC,
POSTCODE_SEVERITY_ERR);
@@ -804,8 +813,8 @@ visorchipset_save_message(CONTROLVM_MESSAGE *msg, CRASH_OBJ_TYPE type)
} else {
if (visorchannel_write(ControlVm_channel,
localSavedCrashMsgOffset +
- sizeof(CONTROLVM_MESSAGE), msg,
- sizeof(CONTROLVM_MESSAGE)) < 0) {
+ sizeof(struct controlvm_message), msg,
+ sizeof(struct controlvm_message)) < 0) {
LOGERR("SAVE_MSG_DEV_FAILURE: Failed to write CrashCreateDevMsg!");
POSTCODE_LINUX_2(SAVE_MSG_DEV_FAILURE_PC,
POSTCODE_SEVERITY_ERR);
@@ -816,9 +825,9 @@ visorchipset_save_message(CONTROLVM_MESSAGE *msg, CRASH_OBJ_TYPE type)
EXPORT_SYMBOL_GPL(visorchipset_save_message);
static void
-bus_responder(CONTROLVM_ID cmdId, ulong busNo, int response)
+bus_responder(enum controlvm_id cmdId, ulong busNo, int response)
{
- VISORCHIPSET_BUS_INFO *p = NULL;
+ struct visorchipset_bus_info *p = NULL;
BOOL need_clear = FALSE;
p = findbus(&BusInfoList, busNo);
@@ -838,16 +847,16 @@ bus_responder(CONTROLVM_ID cmdId, ulong busNo, int response)
need_clear = TRUE;
}
- if (p->pendingMsgHdr.Id == CONTROLVM_INVALID) {
+ if (p->pending_msg_hdr.id == CONTROLVM_INVALID) {
LOGERR("bus_responder no pending msg");
return; /* no controlvm response needed */
}
- if (p->pendingMsgHdr.Id != (u32) cmdId) {
- LOGERR("expected=%d, found=%d", cmdId, p->pendingMsgHdr.Id);
+ if (p->pending_msg_hdr.id != (u32) cmdId) {
+ LOGERR("expected=%d, found=%d", cmdId, p->pending_msg_hdr.id);
return;
}
- controlvm_respond(&p->pendingMsgHdr, response);
- p->pendingMsgHdr.Id = CONTROLVM_INVALID;
+ controlvm_respond(&p->pending_msg_hdr, response);
+ p->pending_msg_hdr.id = CONTROLVM_INVALID;
if (need_clear) {
busInfo_clear(p);
delbusdevices(&DevInfoList, busNo);
@@ -855,32 +864,32 @@ bus_responder(CONTROLVM_ID cmdId, ulong busNo, int response)
}
static void
-device_changestate_responder(CONTROLVM_ID cmdId,
+device_changestate_responder(enum controlvm_id cmdId,
ulong busNo, ulong devNo, int response,
- ULTRA_SEGMENT_STATE responseState)
+ struct spar_segment_state responseState)
{
- VISORCHIPSET_DEVICE_INFO *p = NULL;
- CONTROLVM_MESSAGE outmsg;
+ struct visorchipset_device_info *p = NULL;
+ struct controlvm_message outmsg;
p = finddevice(&DevInfoList, busNo, devNo);
if (!p) {
LOGERR("internal error; busNo=%lu, devNo=%lu", busNo, devNo);
return;
}
- if (p->pendingMsgHdr.Id == CONTROLVM_INVALID) {
+ if (p->pending_msg_hdr.id == CONTROLVM_INVALID) {
LOGERR("device_responder no pending msg");
return; /* no controlvm response needed */
}
- if (p->pendingMsgHdr.Id != cmdId) {
- LOGERR("expected=%d, found=%d", cmdId, p->pendingMsgHdr.Id);
+ if (p->pending_msg_hdr.id != cmdId) {
+ LOGERR("expected=%d, found=%d", cmdId, p->pending_msg_hdr.id);
return;
}
- controlvm_init_response(&outmsg, &p->pendingMsgHdr, response);
+ controlvm_init_response(&outmsg, &p->pending_msg_hdr, response);
- outmsg.cmd.deviceChangeState.busNo = busNo;
- outmsg.cmd.deviceChangeState.devNo = devNo;
- outmsg.cmd.deviceChangeState.state = responseState;
+ outmsg.cmd.device_change_state.bus_no = busNo;
+ outmsg.cmd.device_change_state.dev_no = devNo;
+ outmsg.cmd.device_change_state.state = responseState;
if (!visorchannel_signalinsert(ControlVm_channel,
CONTROLVM_QUEUE_REQUEST, &outmsg)) {
@@ -888,13 +897,14 @@ device_changestate_responder(CONTROLVM_ID cmdId,
return;
}
- p->pendingMsgHdr.Id = CONTROLVM_INVALID;
+ p->pending_msg_hdr.id = CONTROLVM_INVALID;
}
static void
-device_responder(CONTROLVM_ID cmdId, ulong busNo, ulong devNo, int response)
+device_responder(enum controlvm_id cmdId, ulong busNo, ulong devNo,
+ int response)
{
- VISORCHIPSET_DEVICE_INFO *p = NULL;
+ struct visorchipset_device_info *p = NULL;
BOOL need_clear = FALSE;
p = finddevice(&DevInfoList, busNo, devNo);
@@ -909,38 +919,38 @@ device_responder(CONTROLVM_ID cmdId, ulong busNo, ulong devNo, int response)
need_clear = TRUE;
}
- if (p->pendingMsgHdr.Id == CONTROLVM_INVALID) {
+ if (p->pending_msg_hdr.id == CONTROLVM_INVALID) {
LOGERR("device_responder no pending msg");
return; /* no controlvm response needed */
}
- if (p->pendingMsgHdr.Id != (u32) cmdId) {
- LOGERR("expected=%d, found=%d", cmdId, p->pendingMsgHdr.Id);
+ if (p->pending_msg_hdr.id != (u32) cmdId) {
+ LOGERR("expected=%d, found=%d", cmdId, p->pending_msg_hdr.id);
return;
}
- controlvm_respond(&p->pendingMsgHdr, response);
- p->pendingMsgHdr.Id = CONTROLVM_INVALID;
+ controlvm_respond(&p->pending_msg_hdr, response);
+ p->pending_msg_hdr.id = CONTROLVM_INVALID;
if (need_clear)
devInfo_clear(p);
}
static void
bus_epilog(u32 busNo,
- u32 cmd, CONTROLVM_MESSAGE_HEADER *msgHdr,
+ u32 cmd, struct controlvm_message_header *msgHdr,
int response, BOOL needResponse)
{
BOOL notified = FALSE;
- VISORCHIPSET_BUS_INFO *pBusInfo = findbus(&BusInfoList, busNo);
+ struct visorchipset_bus_info *pBusInfo = findbus(&BusInfoList, busNo);
if (!pBusInfo) {
LOGERR("HUH? bad busNo=%d", busNo);
return;
}
if (needResponse) {
- memcpy(&pBusInfo->pendingMsgHdr, msgHdr,
- sizeof(CONTROLVM_MESSAGE_HEADER));
+ memcpy(&pBusInfo->pending_msg_hdr, msgHdr,
+ sizeof(struct controlvm_message_header));
} else
- pBusInfo->pendingMsgHdr.Id = CONTROLVM_INVALID;
+ pBusInfo->pending_msg_hdr.id = CONTROLVM_INVALID;
down(&NotifierLock);
if (response == CONTROLVM_RESP_SUCCESS) {
@@ -981,7 +991,7 @@ bus_epilog(u32 busNo,
}
if (notified)
/* The callback function just called above is responsible
- * for calling the appropriate VISORCHIPSET_BUSDEV_RESPONDERS
+ * for calling the appropriate visorchipset_busdev_responders
* function, which will call bus_responder()
*/
;
@@ -991,14 +1001,14 @@ bus_epilog(u32 busNo,
}
static void
-device_epilog(u32 busNo, u32 devNo, ULTRA_SEGMENT_STATE state, u32 cmd,
- CONTROLVM_MESSAGE_HEADER *msgHdr, int response,
+device_epilog(u32 busNo, u32 devNo, struct spar_segment_state state, u32 cmd,
+ struct controlvm_message_header *msgHdr, int response,
BOOL needResponse, BOOL for_visorbus)
{
- VISORCHIPSET_BUSDEV_NOTIFIERS *notifiers = NULL;
+ struct visorchipset_busdev_notifiers *notifiers = NULL;
BOOL notified = FALSE;
- VISORCHIPSET_DEVICE_INFO *pDevInfo =
+ struct visorchipset_device_info *pDevInfo =
finddevice(&DevInfoList, busNo, devNo);
char *envp[] = {
"SPARSP_DIAGPOOL_PAUSED_STATE = 1",
@@ -1014,10 +1024,10 @@ device_epilog(u32 busNo, u32 devNo, ULTRA_SEGMENT_STATE state, u32 cmd,
else
notifiers = &BusDev_Client_Notifiers;
if (needResponse) {
- memcpy(&pDevInfo->pendingMsgHdr, msgHdr,
- sizeof(CONTROLVM_MESSAGE_HEADER));
+ memcpy(&pDevInfo->pending_msg_hdr, msgHdr,
+ sizeof(struct controlvm_message_header));
} else
- pDevInfo->pendingMsgHdr.Id = CONTROLVM_INVALID;
+ pDevInfo->pending_msg_hdr.id = CONTROLVM_INVALID;
down(&NotifierLock);
if (response >= 0) {
@@ -1030,8 +1040,9 @@ device_epilog(u32 busNo, u32 devNo, ULTRA_SEGMENT_STATE state, u32 cmd,
break;
case CONTROLVM_DEVICE_CHANGESTATE:
/* ServerReady / ServerRunning / SegmentStateRunning */
- if (state.Alive == SegmentStateRunning.Alive &&
- state.Operating == SegmentStateRunning.Operating) {
+ if (state.alive == segment_state_running.alive &&
+ state.operating ==
+ segment_state_running.operating) {
if (notifiers->device_resume) {
(*notifiers->device_resume) (busNo,
devNo);
@@ -1039,9 +1050,9 @@ device_epilog(u32 busNo, u32 devNo, ULTRA_SEGMENT_STATE state, u32 cmd,
}
}
/* ServerNotReady / ServerLost / SegmentStateStandby */
- else if (state.Alive == SegmentStateStandby.Alive &&
- state.Operating ==
- SegmentStateStandby.Operating) {
+ else if (state.alive == segment_state_standby.alive &&
+ state.operating ==
+ segment_state_standby.operating) {
/* technically this is standby case
* where server is lost
*/
@@ -1050,9 +1061,9 @@ device_epilog(u32 busNo, u32 devNo, ULTRA_SEGMENT_STATE state, u32 cmd,
devNo);
notified = TRUE;
}
- } else if (state.Alive == SegmentStatePaused.Alive &&
- state.Operating ==
- SegmentStatePaused.Operating) {
+ } else if (state.alive == segment_state_paused.alive &&
+ state.operating ==
+ segment_state_paused.operating) {
/* this is lite pause where channel is
* still valid just 'pause' of it
*/
@@ -1079,7 +1090,7 @@ device_epilog(u32 busNo, u32 devNo, ULTRA_SEGMENT_STATE state, u32 cmd,
}
if (notified)
/* The callback function just called above is responsible
- * for calling the appropriate VISORCHIPSET_BUSDEV_RESPONDERS
+ * for calling the appropriate visorchipset_busdev_responders
* function, which will call device_responder()
*/
;
@@ -1089,12 +1100,12 @@ device_epilog(u32 busNo, u32 devNo, ULTRA_SEGMENT_STATE state, u32 cmd,
}
static void
-bus_create(CONTROLVM_MESSAGE *inmsg)
+bus_create(struct controlvm_message *inmsg)
{
- CONTROLVM_MESSAGE_PACKET *cmd = &inmsg->cmd;
- ulong busNo = cmd->createBus.busNo;
+ struct controlvm_message_packet *cmd = &inmsg->cmd;
+ ulong busNo = cmd->create_bus.bus_no;
int rc = CONTROLVM_RESP_SUCCESS;
- VISORCHIPSET_BUS_INFO *pBusInfo = NULL;
+ struct visorchipset_bus_info *pBusInfo = NULL;
pBusInfo = findbus(&BusInfoList, busNo);
@@ -1106,7 +1117,7 @@ bus_create(CONTROLVM_MESSAGE *inmsg)
rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
goto Away;
}
- pBusInfo = kzalloc(sizeof(VISORCHIPSET_BUS_INFO), GFP_KERNEL);
+ pBusInfo = kzalloc(sizeof(struct visorchipset_bus_info), GFP_KERNEL);
if (pBusInfo == NULL) {
LOGERR("CONTROLVM_BUS_CREATE Failed: bus %lu kzalloc failed",
busNo);
@@ -1117,21 +1128,22 @@ bus_create(CONTROLVM_MESSAGE *inmsg)
}
INIT_LIST_HEAD(&pBusInfo->entry);
- pBusInfo->busNo = busNo;
- pBusInfo->devNo = cmd->createBus.deviceCount;
+ pBusInfo->bus_no = busNo;
+ pBusInfo->dev_no = cmd->create_bus.dev_count;
POSTCODE_LINUX_3(BUS_CREATE_ENTRY_PC, busNo, POSTCODE_SEVERITY_INFO);
- if (inmsg->hdr.Flags.testMessage == 1)
- pBusInfo->chanInfo.addrType = ADDRTYPE_localTest;
+ if (inmsg->hdr.flags.test_message == 1)
+ pBusInfo->chan_info.addr_type = ADDRTYPE_LOCALTEST;
else
- pBusInfo->chanInfo.addrType = ADDRTYPE_localPhysical;
+ pBusInfo->chan_info.addr_type = ADDRTYPE_LOCALPHYSICAL;
- pBusInfo->flags.server = inmsg->hdr.Flags.server;
- pBusInfo->chanInfo.channelAddr = cmd->createBus.channelAddr;
- pBusInfo->chanInfo.nChannelBytes = cmd->createBus.channelBytes;
- pBusInfo->chanInfo.channelTypeGuid = cmd->createBus.busDataTypeGuid;
- pBusInfo->chanInfo.channelInstGuid = cmd->createBus.busInstGuid;
+ pBusInfo->flags.server = inmsg->hdr.flags.server;
+ pBusInfo->chan_info.channel_addr = cmd->create_bus.channel_addr;
+ pBusInfo->chan_info.n_channel_bytes = cmd->create_bus.channel_bytes;
+ pBusInfo->chan_info.channel_type_uuid =
+ cmd->create_bus.bus_data_type_uuid;
+ pBusInfo->chan_info.channel_inst_uuid = cmd->create_bus.bus_inst_uuid;
list_add(&pBusInfo->entry, &BusInfoList);
@@ -1139,15 +1151,15 @@ bus_create(CONTROLVM_MESSAGE *inmsg)
Away:
bus_epilog(busNo, CONTROLVM_BUS_CREATE, &inmsg->hdr,
- rc, inmsg->hdr.Flags.responseExpected == 1);
+ rc, inmsg->hdr.flags.response_expected == 1);
}
static void
-bus_destroy(CONTROLVM_MESSAGE *inmsg)
+bus_destroy(struct controlvm_message *inmsg)
{
- CONTROLVM_MESSAGE_PACKET *cmd = &inmsg->cmd;
- ulong busNo = cmd->destroyBus.busNo;
- VISORCHIPSET_BUS_INFO *pBusInfo;
+ struct controlvm_message_packet *cmd = &inmsg->cmd;
+ ulong busNo = cmd->destroy_bus.bus_no;
+ struct visorchipset_bus_info *pBusInfo;
int rc = CONTROLVM_RESP_SUCCESS;
pBusInfo = findbus(&BusInfoList, busNo);
@@ -1165,19 +1177,19 @@ bus_destroy(CONTROLVM_MESSAGE *inmsg)
Away:
bus_epilog(busNo, CONTROLVM_BUS_DESTROY, &inmsg->hdr,
- rc, inmsg->hdr.Flags.responseExpected == 1);
+ rc, inmsg->hdr.flags.response_expected == 1);
}
static void
-bus_configure(CONTROLVM_MESSAGE *inmsg, PARSER_CONTEXT *parser_ctx)
+bus_configure(struct controlvm_message *inmsg, PARSER_CONTEXT *parser_ctx)
{
- CONTROLVM_MESSAGE_PACKET *cmd = &inmsg->cmd;
- ulong busNo = cmd->configureBus.busNo;
- VISORCHIPSET_BUS_INFO *pBusInfo = NULL;
+ struct controlvm_message_packet *cmd = &inmsg->cmd;
+ ulong busNo = cmd->configure_bus.bus_no;
+ struct visorchipset_bus_info *pBusInfo = NULL;
int rc = CONTROLVM_RESP_SUCCESS;
char s[99];
- busNo = cmd->configureBus.busNo;
+ busNo = cmd->configure_bus.bus_no;
POSTCODE_LINUX_3(BUS_CONFIGURE_ENTRY_PC, busNo, POSTCODE_SEVERITY_INFO);
pBusInfo = findbus(&BusInfoList, busNo);
@@ -1198,35 +1210,35 @@ bus_configure(CONTROLVM_MESSAGE *inmsg, PARSER_CONTEXT *parser_ctx)
goto Away;
}
/* TBD - add this check to other commands also... */
- if (pBusInfo->pendingMsgHdr.Id != CONTROLVM_INVALID) {
+ if (pBusInfo->pending_msg_hdr.id != CONTROLVM_INVALID) {
LOGERR("CONTROLVM_BUS_CONFIGURE Failed: bus %lu MsgId=%u outstanding",
- busNo, (uint) pBusInfo->pendingMsgHdr.Id);
+ busNo, (uint) pBusInfo->pending_msg_hdr.id);
POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, busNo,
POSTCODE_SEVERITY_ERR);
rc = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
goto Away;
}
- pBusInfo->partitionHandle = cmd->configureBus.guestHandle;
- pBusInfo->partitionGuid = parser_id_get(parser_ctx);
+ pBusInfo->partition_handle = cmd->configure_bus.guest_handle;
+ pBusInfo->partition_uuid = parser_id_get(parser_ctx);
parser_param_start(parser_ctx, PARSERSTRING_NAME);
pBusInfo->name = parser_string_get(parser_ctx);
- visorchannel_uuid_id(&pBusInfo->partitionGuid, s);
+ visorchannel_uuid_id(&pBusInfo->partition_uuid, s);
POSTCODE_LINUX_3(BUS_CONFIGURE_EXIT_PC, busNo, POSTCODE_SEVERITY_INFO);
Away:
bus_epilog(busNo, CONTROLVM_BUS_CONFIGURE, &inmsg->hdr,
- rc, inmsg->hdr.Flags.responseExpected == 1);
+ rc, inmsg->hdr.flags.response_expected == 1);
}
static void
-my_device_create(CONTROLVM_MESSAGE *inmsg)
+my_device_create(struct controlvm_message *inmsg)
{
- CONTROLVM_MESSAGE_PACKET *cmd = &inmsg->cmd;
- ulong busNo = cmd->createDevice.busNo;
- ulong devNo = cmd->createDevice.devNo;
- VISORCHIPSET_DEVICE_INFO *pDevInfo = NULL;
- VISORCHIPSET_BUS_INFO *pBusInfo = NULL;
+ struct controlvm_message_packet *cmd = &inmsg->cmd;
+ ulong busNo = cmd->create_device.bus_no;
+ ulong devNo = cmd->create_device.dev_no;
+ struct visorchipset_device_info *pDevInfo = NULL;
+ struct visorchipset_bus_info *pBusInfo = NULL;
int rc = CONTROLVM_RESP_SUCCESS;
pDevInfo = finddevice(&DevInfoList, busNo, devNo);
@@ -1255,7 +1267,7 @@ my_device_create(CONTROLVM_MESSAGE *inmsg)
rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
goto Away;
}
- pDevInfo = kzalloc(sizeof(VISORCHIPSET_DEVICE_INFO), GFP_KERNEL);
+ pDevInfo = kzalloc(sizeof(struct visorchipset_device_info), GFP_KERNEL);
if (pDevInfo == NULL) {
LOGERR("CONTROLVM_DEVICE_CREATE Failed: busNo=%lu, devNo=%lu kmaloc failed",
busNo, devNo);
@@ -1266,45 +1278,47 @@ my_device_create(CONTROLVM_MESSAGE *inmsg)
}
INIT_LIST_HEAD(&pDevInfo->entry);
- pDevInfo->busNo = busNo;
- pDevInfo->devNo = devNo;
- pDevInfo->devInstGuid = cmd->createDevice.devInstGuid;
+ pDevInfo->bus_no = busNo;
+ pDevInfo->dev_no = devNo;
+ pDevInfo->dev_inst_uuid = cmd->create_device.dev_inst_uuid;
POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC, devNo, busNo,
POSTCODE_SEVERITY_INFO);
- if (inmsg->hdr.Flags.testMessage == 1)
- pDevInfo->chanInfo.addrType = ADDRTYPE_localTest;
+ if (inmsg->hdr.flags.test_message == 1)
+ pDevInfo->chan_info.addr_type = ADDRTYPE_LOCALTEST;
else
- pDevInfo->chanInfo.addrType = ADDRTYPE_localPhysical;
- pDevInfo->chanInfo.channelAddr = cmd->createDevice.channelAddr;
- pDevInfo->chanInfo.nChannelBytes = cmd->createDevice.channelBytes;
- pDevInfo->chanInfo.channelTypeGuid = cmd->createDevice.dataTypeGuid;
- pDevInfo->chanInfo.intr = cmd->createDevice.intr;
+ pDevInfo->chan_info.addr_type = ADDRTYPE_LOCALPHYSICAL;
+ pDevInfo->chan_info.channel_addr = cmd->create_device.channel_addr;
+ pDevInfo->chan_info.n_channel_bytes = cmd->create_device.channel_bytes;
+ pDevInfo->chan_info.channel_type_uuid =
+ cmd->create_device.data_type_uuid;
+ pDevInfo->chan_info.intr = cmd->create_device.intr;
list_add(&pDevInfo->entry, &DevInfoList);
POSTCODE_LINUX_4(DEVICE_CREATE_EXIT_PC, devNo, busNo,
POSTCODE_SEVERITY_INFO);
Away:
/* get the bus and devNo for DiagPool channel */
- if (is_diagpool_channel(pDevInfo->chanInfo.channelTypeGuid)) {
+ if (pDevInfo &&
+ is_diagpool_channel(pDevInfo->chan_info.channel_type_uuid)) {
g_diagpoolBusNo = busNo;
g_diagpoolDevNo = devNo;
LOGINF("CONTROLVM_DEVICE_CREATE for DiagPool channel: busNo=%lu, devNo=%lu",
g_diagpoolBusNo, g_diagpoolDevNo);
}
- device_epilog(busNo, devNo, SegmentStateRunning,
+ device_epilog(busNo, devNo, segment_state_running,
CONTROLVM_DEVICE_CREATE, &inmsg->hdr, rc,
- inmsg->hdr.Flags.responseExpected == 1,
- FOR_VISORBUS(pDevInfo->chanInfo.channelTypeGuid));
+ inmsg->hdr.flags.response_expected == 1,
+ FOR_VISORBUS(pDevInfo->chan_info.channel_type_uuid));
}
static void
-my_device_changestate(CONTROLVM_MESSAGE *inmsg)
+my_device_changestate(struct controlvm_message *inmsg)
{
- CONTROLVM_MESSAGE_PACKET *cmd = &inmsg->cmd;
- ulong busNo = cmd->deviceChangeState.busNo;
- ulong devNo = cmd->deviceChangeState.devNo;
- ULTRA_SEGMENT_STATE state = cmd->deviceChangeState.state;
- VISORCHIPSET_DEVICE_INFO *pDevInfo = NULL;
+ struct controlvm_message_packet *cmd = &inmsg->cmd;
+ ulong busNo = cmd->device_change_state.bus_no;
+ ulong devNo = cmd->device_change_state.dev_no;
+ struct spar_segment_state state = cmd->device_change_state.state;
+ struct visorchipset_device_info *pDevInfo = NULL;
int rc = CONTROLVM_RESP_SUCCESS;
pDevInfo = finddevice(&DevInfoList, busNo, devNo);
@@ -1327,17 +1341,18 @@ Away:
if ((rc >= CONTROLVM_RESP_SUCCESS) && pDevInfo)
device_epilog(busNo, devNo, state, CONTROLVM_DEVICE_CHANGESTATE,
&inmsg->hdr, rc,
- inmsg->hdr.Flags.responseExpected == 1,
- FOR_VISORBUS(pDevInfo->chanInfo.channelTypeGuid));
+ inmsg->hdr.flags.response_expected == 1,
+ FOR_VISORBUS(
+ pDevInfo->chan_info.channel_type_uuid));
}
static void
-my_device_destroy(CONTROLVM_MESSAGE *inmsg)
+my_device_destroy(struct controlvm_message *inmsg)
{
- CONTROLVM_MESSAGE_PACKET *cmd = &inmsg->cmd;
- ulong busNo = cmd->destroyDevice.busNo;
- ulong devNo = cmd->destroyDevice.devNo;
- VISORCHIPSET_DEVICE_INFO *pDevInfo = NULL;
+ struct controlvm_message_packet *cmd = &inmsg->cmd;
+ ulong busNo = cmd->destroy_device.bus_no;
+ ulong devNo = cmd->destroy_device.dev_no;
+ struct visorchipset_device_info *pDevInfo = NULL;
int rc = CONTROLVM_RESP_SUCCESS;
pDevInfo = finddevice(&DevInfoList, busNo, devNo);
@@ -1355,10 +1370,11 @@ my_device_destroy(CONTROLVM_MESSAGE *inmsg)
Away:
if ((rc >= CONTROLVM_RESP_SUCCESS) && pDevInfo)
- device_epilog(busNo, devNo, SegmentStateRunning,
+ device_epilog(busNo, devNo, segment_state_running,
CONTROLVM_DEVICE_DESTROY, &inmsg->hdr, rc,
- inmsg->hdr.Flags.responseExpected == 1,
- FOR_VISORBUS(pDevInfo->chanInfo.channelTypeGuid));
+ inmsg->hdr.flags.response_expected == 1,
+ FOR_VISORBUS(
+ pDevInfo->chan_info.channel_type_uuid));
}
/* When provided with the physical address of the controlvm channel
@@ -1382,7 +1398,7 @@ initialize_controlvm_payload_info(HOSTADDRESS phys_addr, u64 offset, u32 bytes,
}
memset(info, 0, sizeof(CONTROLVM_PAYLOAD_INFO));
if ((offset == 0) || (bytes == 0)) {
- LOGERR("CONTROLVM_PAYLOAD_INIT Failed: RequestPayloadOffset=%llu RequestPayloadBytes=%llu!",
+ LOGERR("CONTROLVM_PAYLOAD_INIT Failed: request_payload_offset=%llu request_payload_bytes=%llu!",
(u64) offset, (u64) bytes);
rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
goto Away;
@@ -1429,8 +1445,8 @@ initialize_controlvm_payload(void)
u32 payloadBytes = 0;
if (visorchannel_read(ControlVm_channel,
- offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL,
- RequestPayloadOffset),
+ offsetof(struct spar_controlvm_channel_protocol,
+ request_payload_offset),
&payloadOffset, sizeof(payloadOffset)) < 0) {
LOGERR("CONTROLVM_PAYLOAD_INIT Failed to read controlvm channel!");
POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
@@ -1438,8 +1454,8 @@ initialize_controlvm_payload(void)
return;
}
if (visorchannel_read(ControlVm_channel,
- offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL,
- RequestPayloadBytes),
+ offsetof(struct spar_controlvm_channel_protocol,
+ request_payload_bytes),
&payloadBytes, sizeof(payloadBytes)) < 0) {
LOGERR("CONTROLVM_PAYLOAD_INIT Failed to read controlvm channel!");
POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
@@ -1487,15 +1503,15 @@ visorchipset_chipset_notready(void)
EXPORT_SYMBOL_GPL(visorchipset_chipset_notready);
static void
-chipset_ready(CONTROLVM_MESSAGE_HEADER *msgHdr)
+chipset_ready(struct controlvm_message_header *msgHdr)
{
int rc = visorchipset_chipset_ready();
if (rc != CONTROLVM_RESP_SUCCESS)
rc = -rc;
- if (msgHdr->Flags.responseExpected && !visorchipset_holdchipsetready)
+ if (msgHdr->flags.response_expected && !visorchipset_holdchipsetready)
controlvm_respond(msgHdr, rc);
- if (msgHdr->Flags.responseExpected && visorchipset_holdchipsetready) {
+ if (msgHdr->flags.response_expected && visorchipset_holdchipsetready) {
/* Send CHIPSET_READY response when all modules have been loaded
* and disks mounted for the partition
*/
@@ -1505,24 +1521,24 @@ chipset_ready(CONTROLVM_MESSAGE_HEADER *msgHdr)
}
static void
-chipset_selftest(CONTROLVM_MESSAGE_HEADER *msgHdr)
+chipset_selftest(struct controlvm_message_header *msgHdr)
{
int rc = visorchipset_chipset_selftest();
if (rc != CONTROLVM_RESP_SUCCESS)
rc = -rc;
- if (msgHdr->Flags.responseExpected)
+ if (msgHdr->flags.response_expected)
controlvm_respond(msgHdr, rc);
}
static void
-chipset_notready(CONTROLVM_MESSAGE_HEADER *msgHdr)
+chipset_notready(struct controlvm_message_header *msgHdr)
{
int rc = visorchipset_chipset_notready();
if (rc != CONTROLVM_RESP_SUCCESS)
rc = -rc;
- if (msgHdr->Flags.responseExpected)
+ if (msgHdr->flags.response_expected)
controlvm_respond(msgHdr, rc);
}
@@ -1530,13 +1546,14 @@ chipset_notready(CONTROLVM_MESSAGE_HEADER *msgHdr)
* CONTROLVM_QUEUE_EVENT queue in the controlvm channel.
*/
static BOOL
-read_controlvm_event(CONTROLVM_MESSAGE *msg)
+read_controlvm_event(struct controlvm_message *msg)
{
if (visorchannel_signalremove(ControlVm_channel,
CONTROLVM_QUEUE_EVENT, msg)) {
/* got a message */
- if (msg->hdr.Flags.testMessage == 1) {
- LOGERR("ignoring bad CONTROLVM_QUEUE_EVENT msg with controlvm_msg_id=0x%x because Flags.testMessage is nonsensical (=1)", msg->hdr.Id);
+ if (msg->hdr.flags.test_message == 1) {
+ LOGERR("ignoring bad CONTROLVM_QUEUE_EVENT msg with controlvm_msg_id=0x%x because Flags.testMessage is nonsensical (=1)",
+ msg->hdr.id);
return FALSE;
}
return TRUE;
@@ -1586,7 +1603,7 @@ parahotplug_next_expiration(void)
* CONTROLVM_MESSAGE that we can stick on a list
*/
static struct parahotplug_request *
-parahotplug_request_create(CONTROLVM_MESSAGE *msg)
+parahotplug_request_create(struct controlvm_message *msg)
{
struct parahotplug_request *req =
kmalloc(sizeof(struct parahotplug_request),
@@ -1618,7 +1635,7 @@ parahotplug_request_destroy(struct parahotplug_request *req)
static void
parahotplug_request_kickoff(struct parahotplug_request *req)
{
- CONTROLVM_MESSAGE_PACKET *cmd = &req->msg.cmd;
+ struct controlvm_message_packet *cmd = &req->msg.cmd;
char env_cmd[40], env_id[40], env_state[40], env_bus[40], env_dev[40],
env_func[40];
char *envp[] = {
@@ -1628,18 +1645,19 @@ parahotplug_request_kickoff(struct parahotplug_request *req)
sprintf(env_cmd, "SPAR_PARAHOTPLUG=1");
sprintf(env_id, "SPAR_PARAHOTPLUG_ID=%d", req->id);
sprintf(env_state, "SPAR_PARAHOTPLUG_STATE=%d",
- cmd->deviceChangeState.state.Active);
+ cmd->device_change_state.state.active);
sprintf(env_bus, "SPAR_PARAHOTPLUG_BUS=%d",
- cmd->deviceChangeState.busNo);
+ cmd->device_change_state.bus_no);
sprintf(env_dev, "SPAR_PARAHOTPLUG_DEVICE=%d",
- cmd->deviceChangeState.devNo >> 3);
+ cmd->device_change_state.dev_no >> 3);
sprintf(env_func, "SPAR_PARAHOTPLUG_FUNCTION=%d",
- cmd->deviceChangeState.devNo & 0x7);
+ cmd->device_change_state.dev_no & 0x7);
LOGINF("parahotplug_request_kickoff: state=%d, bdf=%d/%d/%d, id=%u\n",
- cmd->deviceChangeState.state.Active,
- cmd->deviceChangeState.busNo, cmd->deviceChangeState.devNo >> 3,
- cmd->deviceChangeState.devNo & 7, req->id);
+ cmd->device_change_state.state.active,
+ cmd->device_change_state.bus_no,
+ cmd->device_change_state.dev_no >> 3,
+ cmd->device_change_state.dev_no & 7, req->id);
kobject_uevent_env(&Visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
envp);
@@ -1662,11 +1680,11 @@ parahotplug_process_list(void)
list_entry(pos, struct parahotplug_request, list);
if (time_after_eq(jiffies, req->expiration)) {
list_del(pos);
- if (req->msg.hdr.Flags.responseExpected)
+ if (req->msg.hdr.flags.response_expected)
controlvm_respond_physdev_changestate(
&req->msg.hdr,
CONTROLVM_RESP_ERROR_DEVICE_UDEV_TIMEOUT,
- req->msg.cmd.deviceChangeState.state);
+ req->msg.cmd.device_change_state.state);
parahotplug_request_destroy(req);
}
}
@@ -1697,11 +1715,11 @@ parahotplug_request_complete(int id, u16 active)
*/
list_del(pos);
spin_unlock(&Parahotplug_request_list_lock);
- req->msg.cmd.deviceChangeState.state.Active = active;
- if (req->msg.hdr.Flags.responseExpected)
+ req->msg.cmd.device_change_state.state.active = active;
+ if (req->msg.hdr.flags.response_expected)
controlvm_respond_physdev_changestate(
&req->msg.hdr, CONTROLVM_RESP_SUCCESS,
- req->msg.cmd.deviceChangeState.state);
+ req->msg.cmd.device_change_state.state);
parahotplug_request_destroy(req);
return 0;
}
@@ -1715,7 +1733,7 @@ parahotplug_request_complete(int id, u16 active)
* Enables or disables a PCI device by kicking off a udev script
*/
static void
-parahotplug_process_message(CONTROLVM_MESSAGE *inmsg)
+parahotplug_process_message(struct controlvm_message *inmsg)
{
struct parahotplug_request *req;
@@ -1726,7 +1744,7 @@ parahotplug_process_message(CONTROLVM_MESSAGE *inmsg)
return;
}
- if (inmsg->cmd.deviceChangeState.state.Active) {
+ if (inmsg->cmd.device_change_state.state.active) {
/* For enable messages, just respond with success
* right away. This is a bit of a hack, but there are
* issues with the early enable messages we get (with
@@ -1738,9 +1756,8 @@ parahotplug_process_message(CONTROLVM_MESSAGE *inmsg)
*/
parahotplug_request_kickoff(req);
controlvm_respond_physdev_changestate(&inmsg->hdr,
- CONTROLVM_RESP_SUCCESS,
- inmsg->cmd.
- deviceChangeState.state);
+ CONTROLVM_RESP_SUCCESS, inmsg->cmd.
+ device_change_state.state);
parahotplug_request_destroy(req);
} else {
/* For disable messages, add the request to the
@@ -1768,23 +1785,23 @@ parahotplug_process_message(CONTROLVM_MESSAGE *inmsg)
* either successfully or with an error.
*/
static BOOL
-handle_command(CONTROLVM_MESSAGE inmsg, HOSTADDRESS channel_addr)
+handle_command(struct controlvm_message inmsg, HOSTADDRESS channel_addr)
{
- CONTROLVM_MESSAGE_PACKET *cmd = &inmsg.cmd;
+ struct controlvm_message_packet *cmd = &inmsg.cmd;
u64 parametersAddr = 0;
u32 parametersBytes = 0;
PARSER_CONTEXT *parser_ctx = NULL;
BOOL isLocalAddr = FALSE;
- CONTROLVM_MESSAGE ackmsg;
+ struct controlvm_message ackmsg;
/* create parsing context if necessary */
- isLocalAddr = (inmsg.hdr.Flags.testMessage == 1);
+ isLocalAddr = (inmsg.hdr.flags.test_message == 1);
if (channel_addr == 0) {
LOGERR("HUH? channel_addr is 0!");
return TRUE;
}
- parametersAddr = channel_addr + inmsg.hdr.PayloadVmOffset;
- parametersBytes = inmsg.hdr.PayloadBytes;
+ parametersAddr = channel_addr + inmsg.hdr.payload_vm_offset;
+ parametersBytes = inmsg.hdr.payload_bytes;
/* Parameter and channel addresses within test messages actually lie
* within our OS-controlled memory. We need to know that, because it
@@ -1802,7 +1819,7 @@ handle_command(CONTROLVM_MESSAGE inmsg, HOSTADDRESS channel_addr)
return FALSE;
}
LOGWRN("parsing failed");
- LOGWRN("inmsg.hdr.Id=0x%lx", (ulong) inmsg.hdr.Id);
+ LOGWRN("inmsg.hdr.Id=0x%lx", (ulong) inmsg.hdr.id);
LOGWRN("parametersAddr=0x%llx", (u64) parametersAddr);
LOGWRN("parametersBytes=%lu", (ulong) parametersBytes);
LOGWRN("isLocalAddr=%d", isLocalAddr);
@@ -1818,45 +1835,45 @@ handle_command(CONTROLVM_MESSAGE inmsg, HOSTADDRESS channel_addr)
(ControlVm_channel, CONTROLVM_QUEUE_ACK, &ackmsg)))
LOGWRN("failed to send ACK failed");
}
- switch (inmsg.hdr.Id) {
+ switch (inmsg.hdr.id) {
case CONTROLVM_CHIPSET_INIT:
LOGINF("CHIPSET_INIT(#busses=%lu,#switches=%lu)",
- (ulong) inmsg.cmd.initChipset.busCount,
- (ulong) inmsg.cmd.initChipset.switchCount);
+ (ulong) inmsg.cmd.init_chipset.bus_count,
+ (ulong) inmsg.cmd.init_chipset.switch_count);
chipset_init(&inmsg);
break;
case CONTROLVM_BUS_CREATE:
LOGINF("BUS_CREATE(%lu,#devs=%lu)",
- (ulong) cmd->createBus.busNo,
- (ulong) cmd->createBus.deviceCount);
+ (ulong) cmd->create_bus.bus_no,
+ (ulong) cmd->create_bus.dev_count);
bus_create(&inmsg);
break;
case CONTROLVM_BUS_DESTROY:
- LOGINF("BUS_DESTROY(%lu)", (ulong) cmd->destroyBus.busNo);
+ LOGINF("BUS_DESTROY(%lu)", (ulong) cmd->destroy_bus.bus_no);
bus_destroy(&inmsg);
break;
case CONTROLVM_BUS_CONFIGURE:
- LOGINF("BUS_CONFIGURE(%lu)", (ulong) cmd->configureBus.busNo);
+ LOGINF("BUS_CONFIGURE(%lu)", (ulong) cmd->configure_bus.bus_no);
bus_configure(&inmsg, parser_ctx);
break;
case CONTROLVM_DEVICE_CREATE:
LOGINF("DEVICE_CREATE(%lu,%lu)",
- (ulong) cmd->createDevice.busNo,
- (ulong) cmd->createDevice.devNo);
+ (ulong) cmd->create_device.bus_no,
+ (ulong) cmd->create_device.dev_no);
my_device_create(&inmsg);
break;
case CONTROLVM_DEVICE_CHANGESTATE:
- if (cmd->deviceChangeState.flags.physicalDevice) {
+ if (cmd->device_change_state.flags.phys_device) {
LOGINF("DEVICE_CHANGESTATE for physical device (%lu,%lu, active=%lu)",
- (ulong) cmd->deviceChangeState.busNo,
- (ulong) cmd->deviceChangeState.devNo,
- (ulong) cmd->deviceChangeState.state.Active);
+ (ulong) cmd->device_change_state.bus_no,
+ (ulong) cmd->device_change_state.dev_no,
+ (ulong) cmd->device_change_state.state.active);
parahotplug_process_message(&inmsg);
} else {
LOGINF("DEVICE_CHANGESTATE for virtual device (%lu,%lu, state.Alive=0x%lx)",
- (ulong) cmd->deviceChangeState.busNo,
- (ulong) cmd->deviceChangeState.devNo,
- (ulong) cmd->deviceChangeState.state.Alive);
+ (ulong) cmd->device_change_state.bus_no,
+ (ulong) cmd->device_change_state.dev_no,
+ (ulong) cmd->device_change_state.state.alive);
/* save the hdr and cmd structures for later use */
/* when sending back the response to Command */
my_device_changestate(&inmsg);
@@ -1867,16 +1884,16 @@ handle_command(CONTROLVM_MESSAGE inmsg, HOSTADDRESS channel_addr)
break;
case CONTROLVM_DEVICE_DESTROY:
LOGINF("DEVICE_DESTROY(%lu,%lu)",
- (ulong) cmd->destroyDevice.busNo,
- (ulong) cmd->destroyDevice.devNo);
+ (ulong) cmd->destroy_device.bus_no,
+ (ulong) cmd->destroy_device.dev_no);
my_device_destroy(&inmsg);
break;
case CONTROLVM_DEVICE_CONFIGURE:
LOGINF("DEVICE_CONFIGURE(%lu,%lu)",
- (ulong) cmd->configureDevice.busNo,
- (ulong) cmd->configureDevice.devNo);
+ (ulong) cmd->configure_device.bus_no,
+ (ulong) cmd->configure_device.dev_no);
/* no op for now, just send a respond that we passed */
- if (inmsg.hdr.Flags.responseExpected)
+ if (inmsg.hdr.flags.response_expected)
controlvm_respond(&inmsg.hdr, CONTROLVM_RESP_SUCCESS);
break;
case CONTROLVM_CHIPSET_READY:
@@ -1892,8 +1909,8 @@ handle_command(CONTROLVM_MESSAGE inmsg, HOSTADDRESS channel_addr)
chipset_notready(&inmsg.hdr);
break;
default:
- LOGERR("unrecognized controlvm cmd=%d", (int) inmsg.hdr.Id);
- if (inmsg.hdr.Flags.responseExpected)
+ LOGERR("unrecognized controlvm cmd=%d", (int) inmsg.hdr.id);
+ if (inmsg.hdr.flags.response_expected)
controlvm_respond(&inmsg.hdr,
-CONTROLVM_RESP_ERROR_MESSAGE_ID_UNKNOWN);
break;
@@ -1923,8 +1940,7 @@ static HOSTADDRESS controlvm_get_channel_address(void)
static void
controlvm_periodic_work(struct work_struct *work)
{
- VISORCHIPSET_CHANNEL_INFO chanInfo;
- CONTROLVM_MESSAGE inmsg;
+ struct controlvm_message inmsg;
BOOL gotACommand = FALSE;
BOOL handle_command_failed = FALSE;
static u64 Poll_Count;
@@ -1938,8 +1954,6 @@ controlvm_periodic_work(struct work_struct *work)
if (visorchipset_clientregwait && !clientregistered)
goto Away;
- memset(&chanInfo, 0, sizeof(VISORCHIPSET_CHANNEL_INFO));
-
Poll_Count++;
if (Poll_Count >= 250)
; /* keep going */
@@ -1950,24 +1964,24 @@ controlvm_periodic_work(struct work_struct *work)
* should be sent
*/
if (visorchipset_holdchipsetready
- && (g_ChipSetMsgHdr.Id != CONTROLVM_INVALID)) {
+ && (g_ChipSetMsgHdr.id != CONTROLVM_INVALID)) {
if (check_chipset_events() == 1) {
LOGINF("Sending CHIPSET_READY response");
controlvm_respond(&g_ChipSetMsgHdr, 0);
clear_chipset_events();
memset(&g_ChipSetMsgHdr, 0,
- sizeof(CONTROLVM_MESSAGE_HEADER));
+ sizeof(struct controlvm_message_header));
}
}
while (visorchannel_signalremove(ControlVm_channel,
CONTROLVM_QUEUE_RESPONSE,
&inmsg)) {
- if (inmsg.hdr.PayloadMaxBytes != 0) {
+ if (inmsg.hdr.payload_max_bytes != 0) {
LOGERR("Payload of size %lu returned @%lu with unexpected message id %d.",
- (ulong) inmsg.hdr.PayloadMaxBytes,
- (ulong) inmsg.hdr.PayloadVmOffset,
- inmsg.hdr.Id);
+ (ulong) inmsg.hdr.payload_max_bytes,
+ (ulong) inmsg.hdr.payload_vm_offset,
+ inmsg.hdr.id);
}
}
if (!gotACommand) {
@@ -2033,9 +2047,9 @@ static void
setup_crash_devices_work_queue(struct work_struct *work)
{
- CONTROLVM_MESSAGE localCrashCreateBusMsg;
- CONTROLVM_MESSAGE localCrashCreateDevMsg;
- CONTROLVM_MESSAGE msg;
+ struct controlvm_message localCrashCreateBusMsg;
+ struct controlvm_message localCrashCreateDevMsg;
+ struct controlvm_message msg;
u32 localSavedCrashMsgOffset;
u16 localSavedCrashMsgCount;
@@ -2052,16 +2066,16 @@ setup_crash_devices_work_queue(struct work_struct *work)
POSTCODE_LINUX_2(CRASH_DEV_ENTRY_PC, POSTCODE_SEVERITY_INFO);
/* send init chipset msg */
- msg.hdr.Id = CONTROLVM_CHIPSET_INIT;
- msg.cmd.initChipset.busCount = 23;
- msg.cmd.initChipset.switchCount = 0;
+ msg.hdr.id = CONTROLVM_CHIPSET_INIT;
+ msg.cmd.init_chipset.bus_count = 23;
+ msg.cmd.init_chipset.switch_count = 0;
chipset_init(&msg);
/* get saved message count */
if (visorchannel_read(ControlVm_channel,
- offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL,
- SavedCrashMsgCount),
+ offsetof(struct spar_controlvm_channel_protocol,
+ saved_crash_message_count),
&localSavedCrashMsgCount, sizeof(u16)) < 0) {
LOGERR("failed to get Saved Message Count");
POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
@@ -2080,8 +2094,8 @@ setup_crash_devices_work_queue(struct work_struct *work)
/* get saved crash message offset */
if (visorchannel_read(ControlVm_channel,
- offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL,
- SavedCrashMsgOffset),
+ offsetof(struct spar_controlvm_channel_protocol,
+ saved_crash_message_offset),
&localSavedCrashMsgOffset, sizeof(u32)) < 0) {
LOGERR("failed to get Saved Message Offset");
POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
@@ -2093,7 +2107,7 @@ setup_crash_devices_work_queue(struct work_struct *work)
if (visorchannel_read(ControlVm_channel,
localSavedCrashMsgOffset,
&localCrashCreateBusMsg,
- sizeof(CONTROLVM_MESSAGE)) < 0) {
+ sizeof(struct controlvm_message)) < 0) {
LOGERR("CRASH_DEV_RD_BUS_FAIULRE: Failed to read CrashCreateBusMsg!");
POSTCODE_LINUX_2(CRASH_DEV_RD_BUS_FAIULRE_PC,
POSTCODE_SEVERITY_ERR);
@@ -2103,9 +2117,9 @@ setup_crash_devices_work_queue(struct work_struct *work)
/* read create device message for storage device */
if (visorchannel_read(ControlVm_channel,
localSavedCrashMsgOffset +
- sizeof(CONTROLVM_MESSAGE),
+ sizeof(struct controlvm_message),
&localCrashCreateDevMsg,
- sizeof(CONTROLVM_MESSAGE)) < 0) {
+ sizeof(struct controlvm_message)) < 0) {
LOGERR("CRASH_DEV_RD_DEV_FAIULRE: Failed to read CrashCreateDevMsg!");
POSTCODE_LINUX_2(CRASH_DEV_RD_DEV_FAIULRE_PC,
POSTCODE_SEVERITY_ERR);
@@ -2113,7 +2127,7 @@ setup_crash_devices_work_queue(struct work_struct *work)
}
/* reuse IOVM create bus message */
- if (localCrashCreateBusMsg.cmd.createBus.channelAddr != 0)
+ if (localCrashCreateBusMsg.cmd.create_bus.channel_addr != 0)
bus_create(&localCrashCreateBusMsg);
else {
LOGERR("CrashCreateBusMsg is null, no dump will be taken");
@@ -2123,7 +2137,7 @@ setup_crash_devices_work_queue(struct work_struct *work)
}
/* reuse create device message for storage device */
- if (localCrashCreateDevMsg.cmd.createDevice.channelAddr != 0)
+ if (localCrashCreateDevMsg.cmd.create_device.channel_addr != 0)
my_device_create(&localCrashCreateDevMsg);
else {
LOGERR("CrashCreateDevMsg is null, no dump will be taken");
@@ -2168,12 +2182,12 @@ device_destroy_response(ulong busNo, ulong devNo, int response)
}
void
-visorchipset_device_pause_response(ulong busNo, ulong devNo, int response)
+visorchipset_device_pause_response(ulong bus_no, ulong dev_no, int response)
{
device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
- busNo, devNo, response,
- SegmentStateStandby);
+ bus_no, dev_no, response,
+ segment_state_standby);
}
EXPORT_SYMBOL_GPL(visorchipset_device_pause_response);
@@ -2182,30 +2196,30 @@ device_resume_response(ulong busNo, ulong devNo, int response)
{
device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
busNo, devNo, response,
- SegmentStateRunning);
+ segment_state_running);
}
BOOL
-visorchipset_get_bus_info(ulong busNo, VISORCHIPSET_BUS_INFO *busInfo)
+visorchipset_get_bus_info(ulong bus_no, struct visorchipset_bus_info *bus_info)
{
- void *p = findbus(&BusInfoList, busNo);
+ void *p = findbus(&BusInfoList, bus_no);
if (!p) {
- LOGERR("(%lu) failed", busNo);
+ LOGERR("(%lu) failed", bus_no);
return FALSE;
}
- memcpy(busInfo, p, sizeof(VISORCHIPSET_BUS_INFO));
+ memcpy(bus_info, p, sizeof(struct visorchipset_bus_info));
return TRUE;
}
EXPORT_SYMBOL_GPL(visorchipset_get_bus_info);
BOOL
-visorchipset_set_bus_context(ulong busNo, void *context)
+visorchipset_set_bus_context(ulong bus_no, void *context)
{
- VISORCHIPSET_BUS_INFO *p = findbus(&BusInfoList, busNo);
+ struct visorchipset_bus_info *p = findbus(&BusInfoList, bus_no);
if (!p) {
- LOGERR("(%lu) failed", busNo);
+ LOGERR("(%lu) failed", bus_no);
return FALSE;
}
p->bus_driver_context = context;
@@ -2214,27 +2228,28 @@ visorchipset_set_bus_context(ulong busNo, void *context)
EXPORT_SYMBOL_GPL(visorchipset_set_bus_context);
BOOL
-visorchipset_get_device_info(ulong busNo, ulong devNo,
- VISORCHIPSET_DEVICE_INFO *devInfo)
+visorchipset_get_device_info(ulong bus_no, ulong dev_no,
+ struct visorchipset_device_info *dev_info)
{
- void *p = finddevice(&DevInfoList, busNo, devNo);
+ void *p = finddevice(&DevInfoList, bus_no, dev_no);
if (!p) {
- LOGERR("(%lu,%lu) failed", busNo, devNo);
+ LOGERR("(%lu,%lu) failed", bus_no, dev_no);
return FALSE;
}
- memcpy(devInfo, p, sizeof(VISORCHIPSET_DEVICE_INFO));
+ memcpy(dev_info, p, sizeof(struct visorchipset_device_info));
return TRUE;
}
EXPORT_SYMBOL_GPL(visorchipset_get_device_info);
BOOL
-visorchipset_set_device_context(ulong busNo, ulong devNo, void *context)
+visorchipset_set_device_context(ulong bus_no, ulong dev_no, void *context)
{
- VISORCHIPSET_DEVICE_INFO *p = finddevice(&DevInfoList, busNo, devNo);
+ struct visorchipset_device_info *p =
+ finddevice(&DevInfoList, bus_no, dev_no);
if (!p) {
- LOGERR("(%lu,%lu) failed", busNo, devNo);
+ LOGERR("(%lu,%lu) failed", bus_no, dev_no);
return FALSE;
}
p->bus_driver_context = context;
@@ -2377,11 +2392,10 @@ visorchipset_init(void)
ControlVm_channel =
visorchannel_create_with_lock
(addr,
- sizeof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL),
- UltraControlvmChannelProtocolGuid);
- if (ULTRA_CONTROLVM_CHANNEL_OK_CLIENT
- (visorchannel_get_header(ControlVm_channel),
- NULL)) {
+ sizeof(struct spar_controlvm_channel_protocol),
+ spar_controlvm_channel_protocol_uuid);
+ if (SPAR_CONTROLVM_CHANNEL_OK_CLIENT(
+ visorchannel_get_header(ControlVm_channel))) {
LOGINF("Channel %s (ControlVm) discovered",
visorchannel_id(ControlVm_channel, s));
initialize_controlvm_payload();
@@ -2404,11 +2418,11 @@ visorchipset_init(void)
goto Away;
}
- memset(&g_DiagMsgHdr, 0, sizeof(CONTROLVM_MESSAGE_HEADER));
+ memset(&g_DiagMsgHdr, 0, sizeof(struct controlvm_message_header));
- memset(&g_ChipSetMsgHdr, 0, sizeof(CONTROLVM_MESSAGE_HEADER));
+ memset(&g_ChipSetMsgHdr, 0, sizeof(struct controlvm_message_header));
- memset(&g_DelDumpMsgHdr, 0, sizeof(CONTROLVM_MESSAGE_HEADER));
+ memset(&g_DelDumpMsgHdr, 0, sizeof(struct controlvm_message_header));
Putfile_buffer_list_pool =
kmem_cache_create(Putfile_buffer_list_pool_name,
@@ -2497,11 +2511,11 @@ visorchipset_exit(void)
cleanup_controlvm_structures();
- memset(&g_DiagMsgHdr, 0, sizeof(CONTROLVM_MESSAGE_HEADER));
+ memset(&g_DiagMsgHdr, 0, sizeof(struct controlvm_message_header));
- memset(&g_ChipSetMsgHdr, 0, sizeof(CONTROLVM_MESSAGE_HEADER));
+ memset(&g_ChipSetMsgHdr, 0, sizeof(struct controlvm_message_header));
- memset(&g_DelDumpMsgHdr, 0, sizeof(CONTROLVM_MESSAGE_HEADER));
+ memset(&g_DelDumpMsgHdr, 0, sizeof(struct controlvm_message_header));
LOGINF("Channel %s (ControlVm) disconnected",
visorchannel_id(ControlVm_channel, s));
diff --git a/drivers/staging/unisys/visorutil/charqueue.c b/drivers/staging/unisys/visorutil/charqueue.c
index 22241c7b4f7..1ce7003c3a9 100644
--- a/drivers/staging/unisys/visorutil/charqueue.c
+++ b/drivers/staging/unisys/visorutil/charqueue.c
@@ -25,9 +25,7 @@
#define IS_EMPTY(charqueue) (charqueue->head == charqueue->tail)
-
-
-struct CHARQUEUE_Tag {
+struct charqueue {
int alloc_size;
int nslots;
spinlock_t lock;
@@ -35,12 +33,10 @@ struct CHARQUEUE_Tag {
unsigned char buf[0];
};
-
-
-CHARQUEUE *visor_charqueue_create(ulong nslots)
+struct charqueue *visor_charqueue_create(ulong nslots)
{
- int alloc_size = sizeof(CHARQUEUE) + nslots + 1;
- CHARQUEUE *cq = kmalloc(alloc_size, GFP_KERNEL|__GFP_NORETRY);
+ int alloc_size = sizeof(struct charqueue) + nslots + 1;
+ struct charqueue *cq = kmalloc(alloc_size, GFP_KERNEL|__GFP_NORETRY);
if (cq == NULL) {
ERRDRV("visor_charqueue_create allocation failed (alloc_size=%d)",
@@ -49,15 +45,14 @@ CHARQUEUE *visor_charqueue_create(ulong nslots)
}
cq->alloc_size = alloc_size;
cq->nslots = nslots;
- cq->head = cq->tail = 0;
+ cq->head = 0;
+ cq->tail = 0;
spin_lock_init(&cq->lock);
return cq;
}
EXPORT_SYMBOL_GPL(visor_charqueue_create);
-
-
-void visor_charqueue_enqueue(CHARQUEUE *charqueue, unsigned char c)
+void visor_charqueue_enqueue(struct charqueue *charqueue, unsigned char c)
{
int alloc_slots = charqueue->nslots+1; /* 1 slot is always empty */
@@ -71,9 +66,7 @@ void visor_charqueue_enqueue(CHARQUEUE *charqueue, unsigned char c)
}
EXPORT_SYMBOL_GPL(visor_charqueue_enqueue);
-
-
-BOOL visor_charqueue_is_empty(CHARQUEUE *charqueue)
+BOOL visor_charqueue_is_empty(struct charqueue *charqueue)
{
BOOL b;
@@ -84,9 +77,7 @@ BOOL visor_charqueue_is_empty(CHARQUEUE *charqueue)
}
EXPORT_SYMBOL_GPL(visor_charqueue_is_empty);
-
-
-static int charqueue_dequeue_1(CHARQUEUE *charqueue)
+static int charqueue_dequeue_1(struct charqueue *charqueue)
{
int alloc_slots = charqueue->nslots + 1; /* 1 slot is always empty */
@@ -96,9 +87,7 @@ static int charqueue_dequeue_1(CHARQUEUE *charqueue)
return charqueue->buf[charqueue->tail];
}
-
-
-int charqueue_dequeue(CHARQUEUE *charqueue)
+int charqueue_dequeue(struct charqueue *charqueue)
{
int rc;
@@ -108,9 +97,8 @@ int charqueue_dequeue(CHARQUEUE *charqueue)
return rc;
}
-
-
-int visor_charqueue_dequeue_n(CHARQUEUE *charqueue, unsigned char *buf, int n)
+int visor_charqueue_dequeue_n(struct charqueue *charqueue, unsigned char *buf,
+ int n)
{
int rc, counter = 0, c;
@@ -132,9 +120,7 @@ int visor_charqueue_dequeue_n(CHARQUEUE *charqueue, unsigned char *buf, int n)
}
EXPORT_SYMBOL_GPL(visor_charqueue_dequeue_n);
-
-
-void visor_charqueue_destroy(CHARQUEUE *charqueue)
+void visor_charqueue_destroy(struct charqueue *charqueue)
{
if (charqueue == NULL)
return;
diff --git a/drivers/staging/unisys/visorutil/charqueue.h b/drivers/staging/unisys/visorutil/charqueue.h
index d6f16587a36..56c1f79a54b 100644
--- a/drivers/staging/unisys/visorutil/charqueue.h
+++ b/drivers/staging/unisys/visorutil/charqueue.h
@@ -21,17 +21,18 @@
#include "uniklog.h"
#include "timskmod.h"
-/* CHARQUEUE is an opaque structure to users.
+/* struct charqueue is an opaque structure to users.
* Fields are declared only in the implementation .c files.
*/
-typedef struct CHARQUEUE_Tag CHARQUEUE;
+struct charqueue;
-CHARQUEUE *visor_charqueue_create(ulong nslots);
-void visor_charqueue_enqueue(CHARQUEUE *charqueue, unsigned char c);
-int charqueue_dequeue(CHARQUEUE *charqueue);
-int visor_charqueue_dequeue_n(CHARQUEUE *charqueue, unsigned char *buf, int n);
-BOOL visor_charqueue_is_empty(CHARQUEUE *charqueue);
-void visor_charqueue_destroy(CHARQUEUE *charqueue);
+struct charqueue *visor_charqueue_create(ulong nslots);
+void visor_charqueue_enqueue(struct charqueue *charqueue, unsigned char c);
+int charqueue_dequeue(struct charqueue *charqueue);
+int visor_charqueue_dequeue_n(struct charqueue *charqueue, unsigned char *buf,
+ int n);
+BOOL visor_charqueue_is_empty(struct charqueue *charqueue);
+void visor_charqueue_destroy(struct charqueue *charqueue);
#endif
diff --git a/drivers/staging/unisys/visorutil/easyproc.c b/drivers/staging/unisys/visorutil/easyproc.c
index 3b388494e2a..40f1ae9a155 100644
--- a/drivers/staging/unisys/visorutil/easyproc.c
+++ b/drivers/staging/unisys/visorutil/easyproc.c
@@ -254,9 +254,9 @@ void visor_easyproc_CreateDeviceProperty(struct easyproc_device_info *p,
}
strcpy(px->property_name, property_name);
if (px->procEntry == NULL) {
- ERRDEVX(p->devno, "failed to register /proc/%s/device/%d/%s entry",
- p->pdriver->ProcId, p->devno, property_name
- );
+ ERRDEVX(p->devno,
+ "failed to register /proc/%s/device/%d/%s entry",
+ p->pdriver->ProcId, p->devno, property_name);
return;
}
px->show_device_property_info = show_property_info;
diff --git a/drivers/staging/unisys/visorutil/memregion.h b/drivers/staging/unisys/visorutil/memregion.h
index f4a65d2fcf0..0c3eebcf6d5 100644
--- a/drivers/staging/unisys/visorutil/memregion.h
+++ b/drivers/staging/unisys/visorutil/memregion.h
@@ -20,24 +20,24 @@
#include "timskmod.h"
-/* MEMREGION is an opaque structure to users.
+/* struct memregion is an opaque structure to users.
* Fields are declared only in the implementation .c files.
*/
-typedef struct MEMREGION_Tag MEMREGION;
+struct memregion;
-MEMREGION *visor_memregion_create(HOSTADDRESS physaddr, ulong nbytes);
-MEMREGION *visor_memregion_create_overlapped(MEMREGION *parent,
- ulong offset, ulong nbytes);
-int visor_memregion_resize(MEMREGION *memregion, ulong newsize);
-int visor_memregion_read(MEMREGION *memregion,
- ulong offset, void *dest, ulong nbytes);
-int visor_memregion_write(MEMREGION *memregion,
+struct memregion *visor_memregion_create(HOSTADDRESS physaddr, ulong nbytes);
+struct memregion *visor_memregion_create_overlapped(struct memregion *parent,
+ ulong offset, ulong nbytes);
+int visor_memregion_resize(struct memregion *memregion, ulong newsize);
+int visor_memregion_read(struct memregion *memregion,
+ ulong offset, void *dest, ulong nbytes);
+int visor_memregion_write(struct memregion *memregion,
ulong offset, void *src, ulong nbytes);
-void visor_memregion_destroy(MEMREGION *memregion);
-HOSTADDRESS visor_memregion_get_physaddr(MEMREGION *memregion);
-ulong visor_memregion_get_nbytes(MEMREGION *memregion);
-void memregion_dump(MEMREGION *memregion, char *s,
+void visor_memregion_destroy(struct memregion *memregion);
+HOSTADDRESS visor_memregion_get_physaddr(struct memregion *memregion);
+ulong visor_memregion_get_nbytes(struct memregion *memregion);
+void memregion_dump(struct memregion *memregion, char *s,
ulong off, ulong len, struct seq_file *seq);
-void __iomem *visor_memregion_get_pointer(MEMREGION *memregion);
+void __iomem *visor_memregion_get_pointer(struct memregion *memregion);
#endif
diff --git a/drivers/staging/unisys/visorutil/memregion_direct.c b/drivers/staging/unisys/visorutil/memregion_direct.c
index 65bc07b947d..33522cc8c22 100644
--- a/drivers/staging/unisys/visorutil/memregion_direct.c
+++ b/drivers/staging/unisys/visorutil/memregion_direct.c
@@ -26,7 +26,7 @@
#define MYDRVNAME "memregion"
-struct MEMREGION_Tag {
+struct memregion {
HOSTADDRESS physaddr;
ulong nbytes;
void __iomem *mapped;
@@ -34,15 +34,15 @@ struct MEMREGION_Tag {
BOOL overlapped;
};
-static BOOL mapit(MEMREGION *memregion);
-static void unmapit(MEMREGION *memregion);
+static BOOL mapit(struct memregion *memregion);
+static void unmapit(struct memregion *memregion);
-MEMREGION *
+struct memregion *
visor_memregion_create(HOSTADDRESS physaddr, ulong nbytes)
{
- MEMREGION *rc = NULL;
- MEMREGION *memregion = kzalloc(sizeof(MEMREGION),
- GFP_KERNEL | __GFP_NORETRY);
+ struct memregion *rc = NULL;
+ struct memregion *memregion = kzalloc(sizeof(*memregion),
+ GFP_KERNEL | __GFP_NORETRY);
if (memregion == NULL) {
ERRDRV("visor_memregion_create allocation failed");
return NULL;
@@ -52,24 +52,23 @@ visor_memregion_create(HOSTADDRESS physaddr, ulong nbytes)
memregion->overlapped = FALSE;
if (!mapit(memregion)) {
rc = NULL;
- goto Away;
+ goto cleanup;
}
rc = memregion;
-Away:
+cleanup:
if (rc == NULL) {
- if (memregion != NULL) {
- visor_memregion_destroy(memregion);
- memregion = NULL;
- }
+ visor_memregion_destroy(memregion);
+ memregion = NULL;
}
return rc;
}
EXPORT_SYMBOL_GPL(visor_memregion_create);
-MEMREGION *
-visor_memregion_create_overlapped(MEMREGION *parent, ulong offset, ulong nbytes)
+struct memregion *
+visor_memregion_create_overlapped(struct memregion *parent, ulong offset,
+ ulong nbytes)
{
- MEMREGION *memregion = NULL;
+ struct memregion *memregion = NULL;
if (parent == NULL) {
ERRDRV("%s parent is NULL", __func__);
@@ -85,7 +84,7 @@ visor_memregion_create_overlapped(MEMREGION *parent, ulong offset, ulong nbytes)
__func__, offset, nbytes);
return NULL;
}
- memregion = kzalloc(sizeof(MEMREGION), GFP_KERNEL|__GFP_NORETRY);
+ memregion = kzalloc(sizeof(*memregion), GFP_KERNEL|__GFP_NORETRY);
if (memregion == NULL) {
ERRDRV("%s allocation failed", __func__);
return NULL;
@@ -93,23 +92,23 @@ visor_memregion_create_overlapped(MEMREGION *parent, ulong offset, ulong nbytes)
memregion->physaddr = parent->physaddr + offset;
memregion->nbytes = nbytes;
- memregion->mapped = ((u8 __iomem *) (parent->mapped)) + offset;
+ memregion->mapped = ((u8 __iomem *)(parent->mapped)) + offset;
memregion->requested = FALSE;
memregion->overlapped = TRUE;
return memregion;
}
EXPORT_SYMBOL_GPL(visor_memregion_create_overlapped);
-
static BOOL
-mapit(MEMREGION *memregion)
+mapit(struct memregion *memregion)
{
- ulong physaddr = (ulong) (memregion->physaddr);
+ ulong physaddr = (ulong)(memregion->physaddr);
ulong nbytes = memregion->nbytes;
memregion->requested = FALSE;
if (!request_mem_region(physaddr, nbytes, MYDRVNAME))
- ERRDRV("cannot reserve channel memory @0x%lx for 0x%lx-- no big deal", physaddr, nbytes);
+ ERRDRV("cannot reserve channel memory @0x%lx for 0x%lx-- no big deal",
+ physaddr, nbytes);
else
memregion->requested = TRUE;
memregion->mapped = ioremap_cache(physaddr, nbytes);
@@ -122,42 +121,42 @@ mapit(MEMREGION *memregion)
}
static void
-unmapit(MEMREGION *memregion)
+unmapit(struct memregion *memregion)
{
if (memregion->mapped != NULL) {
iounmap(memregion->mapped);
memregion->mapped = NULL;
}
if (memregion->requested) {
- release_mem_region((ulong) (memregion->physaddr),
+ release_mem_region((ulong)(memregion->physaddr),
memregion->nbytes);
memregion->requested = FALSE;
}
}
HOSTADDRESS
-visor_memregion_get_physaddr(MEMREGION *memregion)
+visor_memregion_get_physaddr(struct memregion *memregion)
{
return memregion->physaddr;
}
EXPORT_SYMBOL_GPL(visor_memregion_get_physaddr);
ulong
-visor_memregion_get_nbytes(MEMREGION *memregion)
+visor_memregion_get_nbytes(struct memregion *memregion)
{
return memregion->nbytes;
}
EXPORT_SYMBOL_GPL(visor_memregion_get_nbytes);
void __iomem *
-visor_memregion_get_pointer(MEMREGION *memregion)
+visor_memregion_get_pointer(struct memregion *memregion)
{
return memregion->mapped;
}
EXPORT_SYMBOL_GPL(visor_memregion_get_pointer);
int
-visor_memregion_resize(MEMREGION *memregion, ulong newsize)
+visor_memregion_resize(struct memregion *memregion, ulong newsize)
{
if (newsize == memregion->nbytes)
return 0;
@@ -176,10 +175,9 @@ visor_memregion_resize(MEMREGION *memregion, ulong newsize)
}
EXPORT_SYMBOL_GPL(visor_memregion_resize);
-
static int
memregion_readwrite(BOOL is_write,
- MEMREGION *memregion, ulong offset,
+ struct memregion *memregion, ulong offset,
void *local, ulong nbytes)
{
if (offset + nbytes > memregion->nbytes) {
@@ -195,7 +193,7 @@ memregion_readwrite(BOOL is_write,
}
int
-visor_memregion_read(MEMREGION *memregion, ulong offset, void *dest,
+visor_memregion_read(struct memregion *memregion, ulong offset, void *dest,
ulong nbytes)
{
return memregion_readwrite(FALSE, memregion, offset, dest, nbytes);
@@ -203,7 +201,7 @@ visor_memregion_read(MEMREGION *memregion, ulong offset, void *dest,
EXPORT_SYMBOL_GPL(visor_memregion_read);
int
-visor_memregion_write(MEMREGION *memregion, ulong offset, void *src,
+visor_memregion_write(struct memregion *memregion, ulong offset, void *src,
ulong nbytes)
{
return memregion_readwrite(TRUE, memregion, offset, src, nbytes);
@@ -211,7 +209,7 @@ visor_memregion_write(MEMREGION *memregion, ulong offset, void *src,
EXPORT_SYMBOL_GPL(visor_memregion_write);
void
-visor_memregion_destroy(MEMREGION *memregion)
+visor_memregion_destroy(struct memregion *memregion)
{
if (memregion == NULL)
return;
diff --git a/drivers/staging/unisys/visorutil/periodic_work.c b/drivers/staging/unisys/visorutil/periodic_work.c
index 3dd1c04d0e1..0908bf92940 100644
--- a/drivers/staging/unisys/visorutil/periodic_work.c
+++ b/drivers/staging/unisys/visorutil/periodic_work.c
@@ -25,8 +25,6 @@
#define MYDRVNAME "periodic_work"
-
-
struct periodic_work {
rwlock_t lock;
struct delayed_work work;
@@ -39,8 +37,6 @@ struct periodic_work {
const char *devnam;
};
-
-
static void periodic_work_func(struct work_struct *work)
{
struct periodic_work *pw;
@@ -49,8 +45,6 @@ static void periodic_work_func(struct work_struct *work)
(*pw->workfunc)(pw->workfuncarg);
}
-
-
struct periodic_work *visor_periodic_work_create(ulong jiffy_interval,
struct workqueue_struct *workqueue,
void (*workfunc)(void *),
@@ -73,16 +67,12 @@ struct periodic_work *visor_periodic_work_create(ulong jiffy_interval,
}
EXPORT_SYMBOL_GPL(visor_periodic_work_create);
-
-
void visor_periodic_work_destroy(struct periodic_work *pw)
{
kfree(pw);
}
EXPORT_SYMBOL_GPL(visor_periodic_work_destroy);
-
-
/** Call this from your periodic work worker function to schedule the next
* call.
* If this function returns FALSE, there was a failure and the
@@ -112,8 +102,6 @@ unlock:
}
EXPORT_SYMBOL_GPL(visor_periodic_work_nextperiod);
-
-
/** This function returns TRUE iff new periodic work was actually started.
* If this function returns FALSE, then no work was started
* (either because it was already started, or because of a failure).
@@ -145,13 +133,9 @@ BOOL visor_periodic_work_start(struct periodic_work *pw)
unlock:
write_unlock(&pw->lock);
return rc;
-
}
EXPORT_SYMBOL_GPL(visor_periodic_work_start);
-
-
-
/** This function returns TRUE iff your call actually stopped the periodic
* work.
*
@@ -223,8 +207,9 @@ BOOL visor_periodic_work_stop(struct periodic_work *pw)
*/
SLEEPJIFFIES(10);
write_lock(&pw->lock);
- } else
+ } else {
pw->want_to_stop = FALSE;
+ }
}
write_unlock(&pw->lock);
return stopped_something;
diff --git a/drivers/staging/unisys/visorutil/procobjecttree.c b/drivers/staging/unisys/visorutil/procobjecttree.c
index c476036f738..195772d22c9 100644
--- a/drivers/staging/unisys/visorutil/procobjecttree.c
+++ b/drivers/staging/unisys/visorutil/procobjecttree.c
@@ -320,19 +320,18 @@ void visor_proc_DestroyObject(MYPROCOBJECT *obj)
kfree(obj->procDirProperties);
obj->procDirProperties = NULL;
}
- if (obj->procDirPropertyContexts != NULL) {
- kfree(obj->procDirPropertyContexts);
- obj->procDirPropertyContexts = NULL;
- }
+
+ kfree(obj->procDirPropertyContexts);
+ obj->procDirPropertyContexts = NULL;
+
if (obj->procDir != NULL) {
if (obj->name != NULL)
remove_proc_entry(obj->name, type->procDir);
obj->procDir = NULL;
}
- if (obj->name != NULL) {
- kfree(obj->name);
- obj->name = NULL;
- }
+
+ kfree(obj->name);
+ obj->name = NULL;
kfree(obj);
}
EXPORT_SYMBOL_GPL(visor_proc_DestroyObject);
diff --git a/drivers/staging/unisys/visorutil/visorkmodutils.c b/drivers/staging/unisys/visorutil/visorkmodutils.c
index d6815f9e133..556e2642d2d 100644
--- a/drivers/staging/unisys/visorutil/visorkmodutils.c
+++ b/drivers/staging/unisys/visorutil/visorkmodutils.c
@@ -36,18 +36,7 @@
int unisys_spar_platform;
EXPORT_SYMBOL_GPL(unisys_spar_platform);
-/** Callers to interfaces that set __GFP_NORETRY flag below
- * must check for a NULL (error) result as we are telling the
- * kernel interface that it is okay to fail.
- */
-
-void *kmalloc_kernel(size_t siz)
-{
- return kmalloc(siz, GFP_KERNEL | __GFP_NORETRY);
-}
-
-static __init uint32_t
-visorutil_spar_detect(void)
+static __init uint32_t visorutil_spar_detect(void)
{
unsigned int eax, ebx, ecx, edx;
@@ -57,22 +46,19 @@ visorutil_spar_detect(void)
return (ebx == UNISYS_SPAR_ID_EBX) &&
(ecx == UNISYS_SPAR_ID_ECX) &&
(edx == UNISYS_SPAR_ID_EDX);
- } else
+ } else {
return 0;
-
+ }
}
-
-
-
-static __init int
-visorutil_mod_init(void)
+static __init int visorutil_mod_init(void)
{
if (visorutil_spar_detect()) {
unisys_spar_platform = TRUE;
return 0;
- } else
+ } else {
return -ENODEV;
+ }
}
static __exit void
diff --git a/drivers/staging/vme/devices/Kconfig b/drivers/staging/vme/devices/Kconfig
index 8e8bbb1dcd9..1d2ff0cc41f 100644
--- a/drivers/staging/vme/devices/Kconfig
+++ b/drivers/staging/vme/devices/Kconfig
@@ -8,6 +8,9 @@ config VME_USER
VME windows in a manner at least semi-compatible with the interface
provided with the original driver at <http://www.vmelinux.org/>.
+ To compile this driver as a module, choose M here. The module will
+ be called vme_user. If unsure, say N.
+
config VME_PIO2
tristate "GE PIO2 VME"
depends on STAGING && GPIOLIB
diff --git a/drivers/staging/vme/devices/vme_pio2_gpio.c b/drivers/staging/vme/devices/vme_pio2_gpio.c
index c64776f7180..da34d5529f5 100644
--- a/drivers/staging/vme/devices/vme_pio2_gpio.c
+++ b/drivers/staging/vme/devices/vme_pio2_gpio.c
@@ -191,11 +191,11 @@ int pio2_gpio_init(struct pio2_card *card)
int retval = 0;
char *label;
- label = kmalloc(PIO2_NUM_CHANNELS, GFP_KERNEL);
+ label = kasprintf(GFP_KERNEL,
+ "%s@%s", driver_name, dev_name(&card->vdev->dev));
if (label == NULL)
return -ENOMEM;
- sprintf(label, "%s@%s", driver_name, dev_name(&card->vdev->dev));
card->gc.label = label;
card->gc.ngpio = PIO2_NUM_CHANNELS;
diff --git a/drivers/staging/vt6655/80211hdr.h b/drivers/staging/vt6655/80211hdr.h
deleted file mode 100644
index 36e14ec5056..00000000000
--- a/drivers/staging/vt6655/80211hdr.h
+++ /dev/null
@@ -1,318 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- *
- * File: 80211hdr.h
- *
- * Purpose: 802.11 MAC headers related pre-defines and macros.
- *
- *
- * Author: Lyndon Chen
- *
- * Date: Apr 8, 2002
- *
- */
-
-#ifndef __80211HDR_H__
-#define __80211HDR_H__
-
-#include "ttype.h"
-
-/* bit type */
-#define BIT0 0x00000001
-#define BIT1 0x00000002
-#define BIT2 0x00000004
-#define BIT3 0x00000008
-#define BIT4 0x00000010
-#define BIT5 0x00000020
-#define BIT6 0x00000040
-#define BIT7 0x00000080
-#define BIT8 0x00000100
-#define BIT9 0x00000200
-#define BIT10 0x00000400
-#define BIT11 0x00000800
-#define BIT12 0x00001000
-#define BIT13 0x00002000
-#define BIT14 0x00004000
-#define BIT15 0x00008000
-#define BIT16 0x00010000
-#define BIT17 0x00020000
-#define BIT18 0x00040000
-#define BIT19 0x00080000
-#define BIT20 0x00100000
-#define BIT21 0x00200000
-#define BIT22 0x00400000
-#define BIT23 0x00800000
-#define BIT24 0x01000000
-#define BIT25 0x02000000
-#define BIT26 0x04000000
-#define BIT27 0x08000000
-#define BIT28 0x10000000
-#define BIT29 0x20000000
-#define BIT30 0x40000000
-#define BIT31 0x80000000
-
-/* 802.11 frame related, defined as 802.11 spec */
-#define WLAN_ADDR_LEN 6
-#define WLAN_CRC_LEN 4
-#define WLAN_CRC32_LEN 4
-#define WLAN_FCS_LEN 4
-#define WLAN_BSSID_LEN 6
-#define WLAN_BSS_TS_LEN 8
-#define WLAN_HDR_ADDR2_LEN 16
-#define WLAN_HDR_ADDR3_LEN 24
-#define WLAN_HDR_ADDR4_LEN 30
-#define WLAN_IEHDR_LEN 2
-#define WLAN_SSID_MAXLEN 32
-#define WLAN_RATES_MAXLEN 16
-#define WLAN_RATES_MAXLEN_11B 4
-#define WLAN_RSN_MAXLEN 32
-#define WLAN_DATA_MAXLEN 2312
-#define WLAN_A3FR_MAXLEN (WLAN_HDR_ADDR3_LEN + WLAN_DATA_MAXLEN + \
- WLAN_CRC_LEN)
-
-#define WLAN_BEACON_FR_MAXLEN WLAN_A3FR_MAXLEN
-#define WLAN_ATIM_FR_MAXLEN (WLAN_HDR_ADDR3_LEN + 0)
-#define WLAN_NULLDATA_FR_MAXLEN (WLAN_HDR_ADDR3_LEN + 0)
-#define WLAN_DISASSOC_FR_MAXLEN (WLAN_HDR_ADDR3_LEN + 2)
-#define WLAN_ASSOCREQ_FR_MAXLEN WLAN_A3FR_MAXLEN
-#define WLAN_ASSOCRESP_FR_MAXLEN WLAN_A3FR_MAXLEN
-#define WLAN_REASSOCREQ_FR_MAXLEN WLAN_A3FR_MAXLEN
-#define WLAN_REASSOCRESP_FR_MAXLEN WLAN_A3FR_MAXLEN
-#define WLAN_PROBEREQ_FR_MAXLEN WLAN_A3FR_MAXLEN
-#define WLAN_PROBERESP_FR_MAXLEN WLAN_A3FR_MAXLEN
-#define WLAN_AUTHEN_FR_MAXLEN WLAN_A3FR_MAXLEN
-#define WLAN_DEAUTHEN_FR_MAXLEN (WLAN_HDR_ADDR3_LEN + 2)
-
-#define WLAN_WEP_NKEYS 4
-#define WLAN_WEP40_KEYLEN 5
-#define WLAN_WEP104_KEYLEN 13
-#define WLAN_WEP232_KEYLEN 29
-#define WLAN_WEPMAX_KEYLEN 32
-#define WLAN_CHALLENGE_IE_MAXLEN 255
-#define WLAN_CHALLENGE_IE_LEN 130
-#define WLAN_CHALLENGE_LEN 128
-#define WLAN_WEP_IV_LEN 4
-#define WLAN_WEP_ICV_LEN 4
-#define WLAN_FRAGS_MAX 16
-
-/* Frame Type */
-#define WLAN_TYPE_MGR 0x00
-#define WLAN_TYPE_CTL 0x01
-#define WLAN_TYPE_DATA 0x02
-
-#define WLAN_FTYPE_MGMT 0x00
-#define WLAN_FTYPE_CTL 0x01
-#define WLAN_FTYPE_DATA 0x02
-
-/* Frame Subtypes */
-#define WLAN_FSTYPE_ASSOCREQ 0x00
-#define WLAN_FSTYPE_ASSOCRESP 0x01
-#define WLAN_FSTYPE_REASSOCREQ 0x02
-#define WLAN_FSTYPE_REASSOCRESP 0x03
-#define WLAN_FSTYPE_PROBEREQ 0x04
-#define WLAN_FSTYPE_PROBERESP 0x05
-#define WLAN_FSTYPE_BEACON 0x08
-#define WLAN_FSTYPE_ATIM 0x09
-#define WLAN_FSTYPE_DISASSOC 0x0a
-#define WLAN_FSTYPE_AUTHEN 0x0b
-#define WLAN_FSTYPE_DEAUTHEN 0x0c
-#define WLAN_FSTYPE_ACTION 0x0d
-
-/* Control */
-#define WLAN_FSTYPE_PSPOLL 0x0a
-#define WLAN_FSTYPE_RTS 0x0b
-#define WLAN_FSTYPE_CTS 0x0c
-#define WLAN_FSTYPE_ACK 0x0d
-#define WLAN_FSTYPE_CFEND 0x0e
-#define WLAN_FSTYPE_CFENDCFACK 0x0f
-
-/* Data */
-#define WLAN_FSTYPE_DATAONLY 0x00
-#define WLAN_FSTYPE_DATA_CFACK 0x01
-#define WLAN_FSTYPE_DATA_CFPOLL 0x02
-#define WLAN_FSTYPE_DATA_CFACK_CFPOLL 0x03
-#define WLAN_FSTYPE_NULL 0x04
-#define WLAN_FSTYPE_CFACK 0x05
-#define WLAN_FSTYPE_CFPOLL 0x06
-#define WLAN_FSTYPE_CFACK_CFPOLL 0x07
-
-#ifdef __BIG_ENDIAN
-
-/* GET & SET Frame Control bit */
-#define WLAN_GET_FC_PRVER(n) (((unsigned short)(n) >> 8) & (BIT0 | BIT1))
-#define WLAN_GET_FC_FTYPE(n) ((((unsigned short)(n) >> 8) & (BIT2 | BIT3)) >> 2)
-#define WLAN_GET_FC_FSTYPE(n) ((((unsigned short)(n) >> 8) & (BIT4|BIT5|BIT6|BIT7)) >> 4)
-#define WLAN_GET_FC_TODS(n) ((((unsigned short)(n) << 8) & (BIT8)) >> 8)
-#define WLAN_GET_FC_FROMDS(n) ((((unsigned short)(n) << 8) & (BIT9)) >> 9)
-#define WLAN_GET_FC_MOREFRAG(n) ((((unsigned short)(n) << 8) & (BIT10)) >> 10)
-#define WLAN_GET_FC_RETRY(n) ((((unsigned short)(n) << 8) & (BIT11)) >> 11)
-#define WLAN_GET_FC_PWRMGT(n) ((((unsigned short)(n) << 8) & (BIT12)) >> 12)
-#define WLAN_GET_FC_MOREDATA(n) ((((unsigned short)(n) << 8) & (BIT13)) >> 13)
-#define WLAN_GET_FC_ISWEP(n) ((((unsigned short)(n) << 8) & (BIT14)) >> 14)
-#define WLAN_GET_FC_ORDER(n) ((((unsigned short)(n) << 8) & (BIT15)) >> 15)
-
-/* Sequence Field bit */
-#define WLAN_GET_SEQ_FRGNUM(n) (((unsigned short)(n) >> 8) & (BIT0|BIT1|BIT2|BIT3))
-#define WLAN_GET_SEQ_SEQNUM(n) ((((unsigned short)(n) >> 8) & (~(BIT0|BIT1|BIT2|BIT3))) >> 4)
-
-/* Capability Field bit */
-#define WLAN_GET_CAP_INFO_ESS(n) (((n) >> 8) & BIT0)
-#define WLAN_GET_CAP_INFO_IBSS(n) ((((n) >> 8) & BIT1) >> 1)
-#define WLAN_GET_CAP_INFO_CFPOLLABLE(n) ((((n) >> 8) & BIT2) >> 2)
-#define WLAN_GET_CAP_INFO_CFPOLLREQ(n) ((((n) >> 8) & BIT3) >> 3)
-#define WLAN_GET_CAP_INFO_PRIVACY(n) ((((n) >> 8) & BIT4) >> 4)
-#define WLAN_GET_CAP_INFO_SHORTPREAMBLE(n) ((((n) >> 8) & BIT5) >> 5)
-#define WLAN_GET_CAP_INFO_PBCC(n) ((((n) >> 8) & BIT6) >> 6)
-#define WLAN_GET_CAP_INFO_AGILITY(n) ((((n) >> 8) & BIT7) >> 7)
-#define WLAN_GET_CAP_INFO_SPECTRUMMNG(n) ((((n)) & BIT8) >> 10)
-#define WLAN_GET_CAP_INFO_SHORTSLOTTIME(n) ((((n)) & BIT10) >> 10)
-#define WLAN_GET_CAP_INFO_DSSSOFDM(n) ((((n)) & BIT13) >> 13)
-#define WLAN_GET_CAP_INFO_GRPACK(n) ((((n)) & BIT14) >> 14)
-
-#else
-
-/* GET & SET Frame Control bit */
-#define WLAN_GET_FC_PRVER(n) (((unsigned short)(n)) & (BIT0 | BIT1))
-#define WLAN_GET_FC_FTYPE(n) ((((unsigned short)(n)) & (BIT2 | BIT3)) >> 2)
-#define WLAN_GET_FC_FSTYPE(n) ((((unsigned short)(n)) & (BIT4|BIT5|BIT6|BIT7)) >> 4)
-#define WLAN_GET_FC_TODS(n) ((((unsigned short)(n)) & (BIT8)) >> 8)
-#define WLAN_GET_FC_FROMDS(n) ((((unsigned short)(n)) & (BIT9)) >> 9)
-#define WLAN_GET_FC_MOREFRAG(n) ((((unsigned short)(n)) & (BIT10)) >> 10)
-#define WLAN_GET_FC_RETRY(n) ((((unsigned short)(n)) & (BIT11)) >> 11)
-#define WLAN_GET_FC_PWRMGT(n) ((((unsigned short)(n)) & (BIT12)) >> 12)
-#define WLAN_GET_FC_MOREDATA(n) ((((unsigned short)(n)) & (BIT13)) >> 13)
-#define WLAN_GET_FC_ISWEP(n) ((((unsigned short)(n)) & (BIT14)) >> 14)
-#define WLAN_GET_FC_ORDER(n) ((((unsigned short)(n)) & (BIT15)) >> 15)
-
-/* Sequence Field bit */
-#define WLAN_GET_SEQ_FRGNUM(n) (((unsigned short)(n)) & (BIT0|BIT1|BIT2|BIT3))
-#define WLAN_GET_SEQ_SEQNUM(n) ((((unsigned short)(n)) & (~(BIT0|BIT1|BIT2|BIT3))) >> 4)
-
-/* Capability Field bit */
-#define WLAN_GET_CAP_INFO_ESS(n) ((n) & BIT0)
-#define WLAN_GET_CAP_INFO_IBSS(n) (((n) & BIT1) >> 1)
-#define WLAN_GET_CAP_INFO_CFPOLLABLE(n) (((n) & BIT2) >> 2)
-#define WLAN_GET_CAP_INFO_CFPOLLREQ(n) (((n) & BIT3) >> 3)
-#define WLAN_GET_CAP_INFO_PRIVACY(n) (((n) & BIT4) >> 4)
-#define WLAN_GET_CAP_INFO_SHORTPREAMBLE(n) (((n) & BIT5) >> 5)
-#define WLAN_GET_CAP_INFO_PBCC(n) (((n) & BIT6) >> 6)
-#define WLAN_GET_CAP_INFO_AGILITY(n) (((n) & BIT7) >> 7)
-#define WLAN_GET_CAP_INFO_SPECTRUMMNG(n) (((n) & BIT8) >> 10)
-#define WLAN_GET_CAP_INFO_SHORTSLOTTIME(n) (((n) & BIT10) >> 10)
-#define WLAN_GET_CAP_INFO_DSSSOFDM(n) (((n) & BIT13) >> 13)
-#define WLAN_GET_CAP_INFO_GRPACK(n) (((n) & BIT14) >> 14)
-
-#endif /*#ifdef __BIG_ENDIAN */
-
-#define WLAN_SET_CAP_INFO_ESS(n) (n)
-#define WLAN_SET_CAP_INFO_IBSS(n) ((n) << 1)
-#define WLAN_SET_CAP_INFO_CFPOLLABLE(n) ((n) << 2)
-#define WLAN_SET_CAP_INFO_CFPOLLREQ(n) ((n) << 3)
-#define WLAN_SET_CAP_INFO_PRIVACY(n) ((n) << 4)
-#define WLAN_SET_CAP_INFO_SHORTPREAMBLE(n) ((n) << 5)
-#define WLAN_SET_CAP_INFO_SPECTRUMMNG(n) ((n) << 8)
-#define WLAN_SET_CAP_INFO_PBCC(n) ((n) << 6)
-#define WLAN_SET_CAP_INFO_AGILITY(n) ((n) << 7)
-#define WLAN_SET_CAP_INFO_SHORTSLOTTIME(n) ((n) << 10)
-#define WLAN_SET_CAP_INFO_DSSSOFDM(n) ((n) << 13)
-#define WLAN_SET_CAP_INFO_GRPACK(n) ((n) << 14)
-
-#define WLAN_SET_FC_PRVER(n) ((unsigned short)(n))
-#define WLAN_SET_FC_FTYPE(n) (((unsigned short)(n)) << 2)
-#define WLAN_SET_FC_FSTYPE(n) (((unsigned short)(n)) << 4)
-#define WLAN_SET_FC_TODS(n) (((unsigned short)(n)) << 8)
-#define WLAN_SET_FC_FROMDS(n) (((unsigned short)(n)) << 9)
-#define WLAN_SET_FC_MOREFRAG(n) (((unsigned short)(n)) << 10)
-#define WLAN_SET_FC_RETRY(n) (((unsigned short)(n)) << 11)
-#define WLAN_SET_FC_PWRMGT(n) (((unsigned short)(n)) << 12)
-#define WLAN_SET_FC_MOREDATA(n) (((unsigned short)(n)) << 13)
-#define WLAN_SET_FC_ISWEP(n) (((unsigned short)(n)) << 14)
-#define WLAN_SET_FC_ORDER(n) (((unsigned short)(n)) << 15)
-
-#define WLAN_SET_SEQ_FRGNUM(n) ((unsigned short)(n))
-#define WLAN_SET_SEQ_SEQNUM(n) (((unsigned short)(n)) << 4)
-
-/* ERP Field bit */
-
-#define WLAN_GET_ERP_NONERP_PRESENT(n) ((n) & BIT0)
-#define WLAN_GET_ERP_USE_PROTECTION(n) (((n) & BIT1) >> 1)
-#define WLAN_GET_ERP_BARKER_MODE(n) (((n) & BIT2) >> 2)
-
-#define WLAN_SET_ERP_NONERP_PRESENT(n) (n)
-#define WLAN_SET_ERP_USE_PROTECTION(n) ((n) << 1)
-#define WLAN_SET_ERP_BARKER_MODE(n) ((n) << 2)
-
-/* Support & Basic Rates field */
-#define WLAN_MGMT_IS_BASICRATE(b) ((b) & BIT7)
-#define WLAN_MGMT_GET_RATE(b) ((b) & ~BIT7)
-
-/* TIM field */
-#define WLAN_MGMT_IS_MULTICAST_TIM(b) ((b) & BIT0)
-#define WLAN_MGMT_GET_TIM_OFFSET(b) (((b) & ~BIT0) >> 1)
-
-/* 3-Addr & 4-Addr */
-#define WLAN_HDR_A3_DATA_PTR(p) (((unsigned char *)(p)) + WLAN_HDR_ADDR3_LEN)
-#define WLAN_HDR_A4_DATA_PTR(p) (((unsigned char *)(p)) + WLAN_HDR_ADDR4_LEN)
-
-/* IEEE ADDR */
-#define IEEE_ADDR_UNIVERSAL 0x02
-#define IEEE_ADDR_GROUP 0x01
-
-typedef struct {
- unsigned char abyAddr[6];
-} IEEE_ADDR, *PIEEE_ADDR;
-
-/* 802.11 Header Format */
-
-typedef struct tagWLAN_80211HDR_A2 {
- unsigned short wFrameCtl;
- unsigned short wDurationID;
- unsigned char abyAddr1[WLAN_ADDR_LEN];
- unsigned char abyAddr2[WLAN_ADDR_LEN];
-} __attribute__ ((__packed__))
-WLAN_80211HDR_A2, *PWLAN_80211HDR_A2;
-
-typedef struct tagWLAN_80211HDR_A3 {
- unsigned short wFrameCtl;
- unsigned short wDurationID;
- unsigned char abyAddr1[WLAN_ADDR_LEN];
- unsigned char abyAddr2[WLAN_ADDR_LEN];
- unsigned char abyAddr3[WLAN_ADDR_LEN];
- unsigned short wSeqCtl;
-} __attribute__ ((__packed__))
-WLAN_80211HDR_A3, *PWLAN_80211HDR_A3;
-
-typedef struct tagWLAN_80211HDR_A4 {
- unsigned short wFrameCtl;
- unsigned short wDurationID;
- unsigned char abyAddr1[WLAN_ADDR_LEN];
- unsigned char abyAddr2[WLAN_ADDR_LEN];
- unsigned char abyAddr3[WLAN_ADDR_LEN];
- unsigned short wSeqCtl;
- unsigned char abyAddr4[WLAN_ADDR_LEN];
-} __attribute__ ((__packed__))
-WLAN_80211HDR_A4, *PWLAN_80211HDR_A4;
-
-typedef union tagUWLAN_80211HDR {
- WLAN_80211HDR_A2 sA2;
- WLAN_80211HDR_A3 sA3;
- WLAN_80211HDR_A4 sA4;
-} UWLAN_80211HDR, *PUWLAN_80211HDR;
-
-#endif /* __80211HDR_H__ */
diff --git a/drivers/staging/vt6655/80211mgr.c b/drivers/staging/vt6655/80211mgr.c
deleted file mode 100644
index 7d2c6472ec9..00000000000
--- a/drivers/staging/vt6655/80211mgr.c
+++ /dev/null
@@ -1,1019 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * File: 80211mgr.c
- *
- * Purpose: Handles the 802.11 management support functions
- *
- * Author: Lyndon Chen
- *
- * Date: May 8, 2002
- *
- * Functions:
- * vMgrEncodeBeacon - Encode the Beacon frame
- * vMgrDecodeBeacon - Decode the Beacon frame
- * vMgrEncodeIBSSATIM - Encode the IBSS ATIM frame
- * vMgrDecodeIBSSATIM - Decode the IBSS ATIM frame
- * vMgrEncodeDisassociation - Encode the Disassociation frame
- * vMgrDecodeDisassociation - Decode the Disassociation frame
- * vMgrEncodeAssocRequest - Encode the Association request frame
- * vMgrDecodeAssocRequest - Decode the Association request frame
- * vMgrEncodeAssocResponse - Encode the Association response frame
- * vMgrDecodeAssocResponse - Decode the Association response frame
- * vMgrEncodeReAssocRequest - Encode the ReAssociation request frame
- * vMgrDecodeReAssocRequest - Decode the ReAssociation request frame
- * vMgrEncodeProbeRequest - Encode the Probe request frame
- * vMgrDecodeProbeRequest - Decode the Probe request frame
- * vMgrEncodeProbeResponse - Encode the Probe response frame
- * vMgrDecodeProbeResponse - Decode the Probe response frame
- * vMgrEncodeAuthen - Encode the Authentication frame
- * vMgrDecodeAuthen - Decode the Authentication frame
- * vMgrEncodeDeauthen - Encode the DeAuthentication frame
- * vMgrDecodeDeauthen - Decode the DeAuthentication frame
- * vMgrEncodeReassocResponse - Encode the Reassociation response frame
- * vMgrDecodeReassocResponse - Decode the Reassociation response frame
- *
- * Revision History:
- *
- */
-
-#include "tmacro.h"
-#include "tether.h"
-#include "80211mgr.h"
-#include "80211hdr.h"
-#include "device.h"
-#include "wpa.h"
-
-/*--------------------- Static Definitions -------------------------*/
-
-/*--------------------- Static Classes ----------------------------*/
-
-/*--------------------- Static Functions --------------------------*/
-
-/*--------------------- Export Variables --------------------------*/
-
-/*--------------------- Export Functions --------------------------*/
-
-/*+
- *
- * Routine Description:
- * Encode Beacon frame body offset
- *
- * Return Value:
- * None.
- *
- -*/
-
-void
-vMgrEncodeBeacon(
- PWLAN_FR_BEACON pFrame
-)
-{
- pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
-
- /* Fixed Fields */
- pFrame->pqwTimestamp = (__le64 *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_BEACON_OFF_TS);
- pFrame->pwBeaconInterval = (unsigned short *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_BEACON_OFF_BCN_INT);
- pFrame->pwCapInfo = (unsigned short *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_BEACON_OFF_CAPINFO);
-
- pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_BEACON_OFF_SSID;
-}
-
-/*+
- *
- * Routine Description:
- * Decode Beacon frame body offset
- *
- *
- * Return Value:
- * None.
- *
- -*/
-
-void
-vMgrDecodeBeacon(
- PWLAN_FR_BEACON pFrame
-)
-{
- PWLAN_IE pItem;
-
- pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
-
- /* Fixed Fields */
- pFrame->pqwTimestamp = (__le64 *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_BEACON_OFF_TS);
- pFrame->pwBeaconInterval = (unsigned short *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_BEACON_OFF_BCN_INT);
- pFrame->pwCapInfo = (unsigned short *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_BEACON_OFF_CAPINFO);
-
- /* Information elements */
- pItem = (PWLAN_IE)((unsigned char *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))) +
- WLAN_BEACON_OFF_SSID);
- while (((unsigned char *)pItem) < (pFrame->pBuf + pFrame->len)) {
- switch (pItem->byElementID) {
- case WLAN_EID_SSID:
- if (pFrame->pSSID == NULL)
- pFrame->pSSID = (PWLAN_IE_SSID)pItem;
- break;
- case WLAN_EID_SUPP_RATES:
- if (pFrame->pSuppRates == NULL)
- pFrame->pSuppRates = (PWLAN_IE_SUPP_RATES)pItem;
- break;
- case WLAN_EID_FH_PARMS:
- /* pFrame->pFHParms = (PWLAN_IE_FH_PARMS)pItem; */
- break;
- case WLAN_EID_DS_PARMS:
- if (pFrame->pDSParms == NULL)
- pFrame->pDSParms = (PWLAN_IE_DS_PARMS)pItem;
- break;
- case WLAN_EID_CF_PARMS:
- if (pFrame->pCFParms == NULL)
- pFrame->pCFParms = (PWLAN_IE_CF_PARMS)pItem;
- break;
- case WLAN_EID_IBSS_PARMS:
- if (pFrame->pIBSSParms == NULL)
- pFrame->pIBSSParms = (PWLAN_IE_IBSS_PARMS)pItem;
- break;
- case WLAN_EID_TIM:
- if (pFrame->pTIM == NULL)
- pFrame->pTIM = (PWLAN_IE_TIM)pItem;
- break;
-
- case WLAN_EID_RSN:
- if (pFrame->pRSN == NULL)
- pFrame->pRSN = (PWLAN_IE_RSN)pItem;
- break;
- case WLAN_EID_RSN_WPA:
- if (pFrame->pRSNWPA == NULL) {
- if (WPAb_Is_RSN((PWLAN_IE_RSN_EXT)pItem) == true)
- pFrame->pRSNWPA =
- (PWLAN_IE_RSN_EXT)pItem;
- }
- break;
-
- case WLAN_EID_ERP:
- if (pFrame->pERP == NULL)
- pFrame->pERP = (PWLAN_IE_ERP)pItem;
- break;
- case WLAN_EID_EXTSUPP_RATES:
- if (pFrame->pExtSuppRates == NULL)
- pFrame->pExtSuppRates =
- (PWLAN_IE_SUPP_RATES)pItem;
- break;
-
- case WLAN_EID_COUNTRY: /* 7 */
- if (pFrame->pIE_Country == NULL)
- pFrame->pIE_Country = (PWLAN_IE_COUNTRY)pItem;
- break;
-
- case WLAN_EID_PWR_CONSTRAINT: /* 32 */
- if (pFrame->pIE_PowerConstraint == NULL)
- pFrame->pIE_PowerConstraint =
- (PWLAN_IE_PW_CONST)pItem;
- break;
-
- case WLAN_EID_CH_SWITCH: /* 37 */
- if (pFrame->pIE_CHSW == NULL)
- pFrame->pIE_CHSW = (PWLAN_IE_CH_SW)pItem;
- break;
-
- case WLAN_EID_QUIET: /* 40 */
- if (pFrame->pIE_Quiet == NULL)
- pFrame->pIE_Quiet = (PWLAN_IE_QUIET)pItem;
- break;
-
- case WLAN_EID_IBSS_DFS:
- if (pFrame->pIE_IBSSDFS == NULL)
- pFrame->pIE_IBSSDFS = (PWLAN_IE_IBSS_DFS)pItem;
- break;
-
- default:
- pr_debug("Unrecognized EID=%dd in beacon decode\n",
- pItem->byElementID);
- break;
-
- }
- pItem = (PWLAN_IE)(((unsigned char *)pItem) + 2 + pItem->len);
- }
-}
-
-/*+
- *
- * Routine Description:
- * Encode IBSS ATIM
- *
- *
- * Return Value:
- * None.
- *
- -*/
-
-void
-vMgrEncodeIBSSATIM(
- PWLAN_FR_IBSSATIM pFrame
-)
-{
- pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
- pFrame->len = WLAN_HDR_ADDR3_LEN;
-}
-
-/*+
- *
- * Routine Description:
- * Decode IBSS ATIM
- *
- *
- * Return Value:
- * None.
- *
- -*/
-
-void
-vMgrDecodeIBSSATIM(
- PWLAN_FR_IBSSATIM pFrame
-)
-{
- pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
-}
-
-/*+
- *
- * Routine Description:
- * Encode Disassociation
- *
- *
- * Return Value:
- * None.
- *
- -*/
-
-void
-vMgrEncodeDisassociation(
- PWLAN_FR_DISASSOC pFrame
-)
-{
- pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
-
- /* Fixed Fields */
- pFrame->pwReason = (unsigned short *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_DISASSOC_OFF_REASON);
- pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_DISASSOC_OFF_REASON +
- sizeof(*(pFrame->pwReason));
-}
-
-/*+
- *
- * Routine Description:
- * Decode Disassociation
- *
- *
- * Return Value:
- * None.
- *
- -*/
-
-void
-vMgrDecodeDisassociation(
- PWLAN_FR_DISASSOC pFrame
-)
-{
- pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
-
- /* Fixed Fields */
- pFrame->pwReason = (unsigned short *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_DISASSOC_OFF_REASON);
-}
-
-/*+
- *
- * Routine Description:
- * Encode Association Request
- *
- *
- * Return Value:
- * None.
- *
- -*/
-
-void
-vMgrEncodeAssocRequest(
- PWLAN_FR_ASSOCREQ pFrame
-)
-{
- pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
- /* Fixed Fields */
- pFrame->pwCapInfo = (unsigned short *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_ASSOCREQ_OFF_CAP_INFO);
- pFrame->pwListenInterval = (unsigned short *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_ASSOCREQ_OFF_LISTEN_INT);
- pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_ASSOCREQ_OFF_LISTEN_INT +
- sizeof(*(pFrame->pwListenInterval));
-}
-
-/*+
- *
- * Routine Description: (AP)
- * Decode Association Request
- *
- *
- * Return Value:
- * None.
- *
- -*/
-
-void
-vMgrDecodeAssocRequest(
- PWLAN_FR_ASSOCREQ pFrame
-)
-{
- PWLAN_IE pItem;
-
- pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
- /* Fixed Fields */
- pFrame->pwCapInfo = (unsigned short *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_ASSOCREQ_OFF_CAP_INFO);
- pFrame->pwListenInterval = (unsigned short *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_ASSOCREQ_OFF_LISTEN_INT);
-
- /* Information elements */
- pItem = (PWLAN_IE)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_ASSOCREQ_OFF_SSID);
-
- while (((unsigned char *)pItem) < (pFrame->pBuf + pFrame->len)) {
- switch (pItem->byElementID) {
- case WLAN_EID_SSID:
- if (pFrame->pSSID == NULL)
- pFrame->pSSID = (PWLAN_IE_SSID)pItem;
- break;
- case WLAN_EID_SUPP_RATES:
- if (pFrame->pSuppRates == NULL)
- pFrame->pSuppRates =
- (PWLAN_IE_SUPP_RATES)pItem;
- break;
-
- case WLAN_EID_RSN:
- if (pFrame->pRSN == NULL)
- pFrame->pRSN = (PWLAN_IE_RSN)pItem;
- break;
- case WLAN_EID_RSN_WPA:
- if (pFrame->pRSNWPA == NULL) {
- if (WPAb_Is_RSN((PWLAN_IE_RSN_EXT)pItem) == true)
- pFrame->pRSNWPA =
- (PWLAN_IE_RSN_EXT)pItem;
- }
- break;
- case WLAN_EID_EXTSUPP_RATES:
- if (pFrame->pExtSuppRates == NULL)
- pFrame->pExtSuppRates =
- (PWLAN_IE_SUPP_RATES)pItem;
- break;
-
- default:
- pr_debug("Unrecognized EID=%dd in assocreq decode\n",
- pItem->byElementID);
- break;
- }
- pItem = (PWLAN_IE)(((unsigned char *)pItem) + 2 + pItem->len);
- }
-}
-
-/*+
- *
- * Routine Description: (AP)
- * Encode Association Response
- *
- *
- * Return Value:
- * None.
- *
- -*/
-
-void
-vMgrEncodeAssocResponse(
- PWLAN_FR_ASSOCRESP pFrame
-)
-{
- pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
-
- /* Fixed Fields */
- pFrame->pwCapInfo = (unsigned short *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_ASSOCRESP_OFF_CAP_INFO);
- pFrame->pwStatus = (unsigned short *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_ASSOCRESP_OFF_STATUS);
- pFrame->pwAid = (unsigned short *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_ASSOCRESP_OFF_AID);
- pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_ASSOCRESP_OFF_AID +
- sizeof(*(pFrame->pwAid));
-}
-
-/*+
- *
- * Routine Description:
- * Decode Association Response
- *
- *
- * Return Value:
- * None.
- *
- -*/
-
-void
-vMgrDecodeAssocResponse(
- PWLAN_FR_ASSOCRESP pFrame
-)
-{
- PWLAN_IE pItem;
-
- pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
-
- /* Fixed Fields */
- pFrame->pwCapInfo = (unsigned short *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_ASSOCRESP_OFF_CAP_INFO);
- pFrame->pwStatus = (unsigned short *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_ASSOCRESP_OFF_STATUS);
- pFrame->pwAid = (unsigned short *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_ASSOCRESP_OFF_AID);
-
- /* Information elements */
- pFrame->pSuppRates = (PWLAN_IE_SUPP_RATES)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_ASSOCRESP_OFF_SUPP_RATES);
-
- pItem = (PWLAN_IE)(pFrame->pSuppRates);
- pItem = (PWLAN_IE)(((unsigned char *)pItem) + 2 + pItem->len);
-
- if ((((unsigned char *)pItem) < (pFrame->pBuf + pFrame->len)) &&
- (pItem->byElementID == WLAN_EID_EXTSUPP_RATES)) {
- pFrame->pExtSuppRates = (PWLAN_IE_SUPP_RATES)pItem;
- pr_debug("pFrame->pExtSuppRates=[%p]\n", pItem);
- } else {
- pFrame->pExtSuppRates = NULL;
- }
-}
-
-/*+
- *
- * Routine Description:
- * Encode Reassociation Request
- *
- *
- * Return Value:
- * None.
- *
- -*/
-
-void
-vMgrEncodeReassocRequest(
- PWLAN_FR_REASSOCREQ pFrame
-)
-{
- pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
-
- /* Fixed Fields */
- pFrame->pwCapInfo = (unsigned short *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_REASSOCREQ_OFF_CAP_INFO);
- pFrame->pwListenInterval = (unsigned short *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_REASSOCREQ_OFF_LISTEN_INT);
- pFrame->pAddrCurrAP = (PIEEE_ADDR)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_REASSOCREQ_OFF_CURR_AP);
- pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_REASSOCREQ_OFF_CURR_AP +
- sizeof(*(pFrame->pAddrCurrAP));
-}
-
-/*+
- *
- * Routine Description: (AP)
- * Decode Reassociation Request
- *
- *
- * Return Value:
- * None.
- *
- -*/
-
-void
-vMgrDecodeReassocRequest(
- PWLAN_FR_REASSOCREQ pFrame
-)
-{
- PWLAN_IE pItem;
-
- pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
-
- /* Fixed Fields */
- pFrame->pwCapInfo = (unsigned short *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_REASSOCREQ_OFF_CAP_INFO);
- pFrame->pwListenInterval = (unsigned short *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_REASSOCREQ_OFF_LISTEN_INT);
- pFrame->pAddrCurrAP = (PIEEE_ADDR)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_REASSOCREQ_OFF_CURR_AP);
-
- /* Information elements */
- pItem = (PWLAN_IE)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_REASSOCREQ_OFF_SSID);
-
- while (((unsigned char *)pItem) < (pFrame->pBuf + pFrame->len)) {
- switch (pItem->byElementID) {
- case WLAN_EID_SSID:
- if (pFrame->pSSID == NULL)
- pFrame->pSSID = (PWLAN_IE_SSID)pItem;
- break;
- case WLAN_EID_SUPP_RATES:
- if (pFrame->pSuppRates == NULL)
- pFrame->pSuppRates =
- (PWLAN_IE_SUPP_RATES)pItem;
- break;
-
- case WLAN_EID_RSN:
- if (pFrame->pRSN == NULL)
- pFrame->pRSN = (PWLAN_IE_RSN)pItem;
- break;
- case WLAN_EID_RSN_WPA:
- if (pFrame->pRSNWPA == NULL) {
- if (WPAb_Is_RSN((PWLAN_IE_RSN_EXT)pItem) == true)
- pFrame->pRSNWPA =
- (PWLAN_IE_RSN_EXT)pItem;
- }
- break;
-
- case WLAN_EID_EXTSUPP_RATES:
- if (pFrame->pExtSuppRates == NULL)
- pFrame->pExtSuppRates =
- (PWLAN_IE_SUPP_RATES)pItem;
- break;
- default:
- pr_debug("Unrecognized EID=%dd in reassocreq decode\n",
- pItem->byElementID);
- break;
- }
- pItem = (PWLAN_IE)(((unsigned char *)pItem) + 2 + pItem->len);
- }
-}
-
-/*+
- *
- * Routine Description:
- * Encode Probe Request
- *
- *
- * Return Value:
- * None.
- *
- -*/
-
-void
-vMgrEncodeProbeRequest(
- PWLAN_FR_PROBEREQ pFrame
-)
-{
- pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
- pFrame->len = WLAN_HDR_ADDR3_LEN;
-}
-
-/*+
- *
- * Routine Description:
- * Decode Probe Request
- *
- *
- * Return Value:
- * None.
- *
- -*/
-
-void
-vMgrDecodeProbeRequest(
- PWLAN_FR_PROBEREQ pFrame
-)
-{
- PWLAN_IE pItem;
-
- pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
-
- /* Information elements */
- pItem = (PWLAN_IE)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)));
-
- while (((unsigned char *)pItem) < (pFrame->pBuf + pFrame->len)) {
- switch (pItem->byElementID) {
- case WLAN_EID_SSID:
- if (pFrame->pSSID == NULL)
- pFrame->pSSID = (PWLAN_IE_SSID)pItem;
- break;
-
- case WLAN_EID_SUPP_RATES:
- if (pFrame->pSuppRates == NULL)
- pFrame->pSuppRates =
- (PWLAN_IE_SUPP_RATES)pItem;
- break;
-
- case WLAN_EID_EXTSUPP_RATES:
- if (pFrame->pExtSuppRates == NULL)
- pFrame->pExtSuppRates =
- (PWLAN_IE_SUPP_RATES)pItem;
- break;
-
- default:
- pr_debug("Bad EID=%dd in probereq\n",
- pItem->byElementID);
- break;
- }
-
- pItem = (PWLAN_IE)(((unsigned char *)pItem) + 2 + pItem->len);
- }
-}
-
-/*+
- *
- * Routine Description:
- * Encode Probe Response
- *
- *
- * Return Value:
- * None.
- *
- -*/
-
-void
-vMgrEncodeProbeResponse(
- PWLAN_FR_PROBERESP pFrame
-)
-{
- pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
-
- /* Fixed Fields */
- pFrame->pqwTimestamp = (__le64 *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_PROBERESP_OFF_TS);
- pFrame->pwBeaconInterval = (unsigned short *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_PROBERESP_OFF_BCN_INT);
- pFrame->pwCapInfo = (unsigned short *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_PROBERESP_OFF_CAP_INFO);
-
- pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_PROBERESP_OFF_CAP_INFO +
- sizeof(*(pFrame->pwCapInfo));
-}
-
-/*+
- *
- * Routine Description:
- * Decode Probe Response
- *
- *
- * Return Value:
- * None.
- *
- -*/
-
-void
-vMgrDecodeProbeResponse(
- PWLAN_FR_PROBERESP pFrame
-)
-{
- PWLAN_IE pItem;
-
- pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
-
- /* Fixed Fields */
- pFrame->pqwTimestamp = (__le64 *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_PROBERESP_OFF_TS);
- pFrame->pwBeaconInterval = (unsigned short *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_PROBERESP_OFF_BCN_INT);
- pFrame->pwCapInfo = (unsigned short *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_PROBERESP_OFF_CAP_INFO);
-
- /* Information elements */
- pItem = (PWLAN_IE)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_PROBERESP_OFF_SSID);
-
- while (((unsigned char *)pItem) < (pFrame->pBuf + pFrame->len)) {
- switch (pItem->byElementID) {
- case WLAN_EID_SSID:
- if (pFrame->pSSID == NULL)
- pFrame->pSSID = (PWLAN_IE_SSID)pItem;
- break;
- case WLAN_EID_SUPP_RATES:
- if (pFrame->pSuppRates == NULL)
- pFrame->pSuppRates =
- (PWLAN_IE_SUPP_RATES)pItem;
- break;
- case WLAN_EID_FH_PARMS:
- break;
- case WLAN_EID_DS_PARMS:
- if (pFrame->pDSParms == NULL)
- pFrame->pDSParms = (PWLAN_IE_DS_PARMS)pItem;
- break;
- case WLAN_EID_CF_PARMS:
- if (pFrame->pCFParms == NULL)
- pFrame->pCFParms = (PWLAN_IE_CF_PARMS)pItem;
- break;
- case WLAN_EID_IBSS_PARMS:
- if (pFrame->pIBSSParms == NULL)
- pFrame->pIBSSParms =
- (PWLAN_IE_IBSS_PARMS)pItem;
- break;
-
- case WLAN_EID_RSN:
- if (pFrame->pRSN == NULL)
- pFrame->pRSN = (PWLAN_IE_RSN)pItem;
- break;
- case WLAN_EID_RSN_WPA:
- if (pFrame->pRSNWPA == NULL) {
- if (WPAb_Is_RSN((PWLAN_IE_RSN_EXT)pItem) == true)
- pFrame->pRSNWPA =
- (PWLAN_IE_RSN_EXT)pItem;
- }
- break;
- case WLAN_EID_ERP:
- if (pFrame->pERP == NULL)
- pFrame->pERP = (PWLAN_IE_ERP)pItem;
- break;
- case WLAN_EID_EXTSUPP_RATES:
- if (pFrame->pExtSuppRates == NULL)
- pFrame->pExtSuppRates =
- (PWLAN_IE_SUPP_RATES)pItem;
- break;
-
- case WLAN_EID_COUNTRY: /* 7 */
- if (pFrame->pIE_Country == NULL)
- pFrame->pIE_Country = (PWLAN_IE_COUNTRY)pItem;
- break;
-
- case WLAN_EID_PWR_CONSTRAINT: /* 32 */
- if (pFrame->pIE_PowerConstraint == NULL)
- pFrame->pIE_PowerConstraint =
- (PWLAN_IE_PW_CONST)pItem;
- break;
-
- case WLAN_EID_CH_SWITCH: /* 37 */
- if (pFrame->pIE_CHSW == NULL)
- pFrame->pIE_CHSW = (PWLAN_IE_CH_SW)pItem;
- break;
-
- case WLAN_EID_QUIET: /* 40 */
- if (pFrame->pIE_Quiet == NULL)
- pFrame->pIE_Quiet = (PWLAN_IE_QUIET)pItem;
- break;
-
- case WLAN_EID_IBSS_DFS:
- if (pFrame->pIE_IBSSDFS == NULL)
- pFrame->pIE_IBSSDFS = (PWLAN_IE_IBSS_DFS)pItem;
- break;
-
- default:
- pr_debug("Bad EID=%dd in proberesp\n",
- pItem->byElementID);
- break;
- }
-
- pItem = (PWLAN_IE)(((unsigned char *)pItem) + 2 + pItem->len);
- }
-}
-
-/*+
- *
- * Routine Description:
- * Encode Authentication frame
- *
- *
- * Return Value:
- * None.
- *
- -*/
-
-void
-vMgrEncodeAuthen(
- PWLAN_FR_AUTHEN pFrame
-)
-{
- pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
-
- /* Fixed Fields */
- pFrame->pwAuthAlgorithm = (unsigned short *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_AUTHEN_OFF_AUTH_ALG);
- pFrame->pwAuthSequence = (unsigned short *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_AUTHEN_OFF_AUTH_SEQ);
- pFrame->pwStatus = (unsigned short *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_AUTHEN_OFF_STATUS);
- pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_AUTHEN_OFF_STATUS +
- sizeof(*(pFrame->pwStatus));
-}
-
-/*+
- *
- * Routine Description:
- * Decode Authentication
- *
- *
- * Return Value:
- * None.
- *
- -*/
-
-void
-vMgrDecodeAuthen(
- PWLAN_FR_AUTHEN pFrame
-)
-{
- PWLAN_IE pItem;
-
- pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
-
- /* Fixed Fields */
- pFrame->pwAuthAlgorithm = (unsigned short *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_AUTHEN_OFF_AUTH_ALG);
- pFrame->pwAuthSequence = (unsigned short *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_AUTHEN_OFF_AUTH_SEQ);
- pFrame->pwStatus = (unsigned short *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_AUTHEN_OFF_STATUS);
-
- /* Information elements */
- pItem = (PWLAN_IE)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_AUTHEN_OFF_CHALLENGE);
-
- if (((unsigned char *)pItem) < (pFrame->pBuf + pFrame->len) &&
- pItem->byElementID == WLAN_EID_CHALLENGE)
- pFrame->pChallenge = (PWLAN_IE_CHALLENGE)pItem;
-}
-
-/*+
- *
- * Routine Description:
- * Encode Authentication
- *
- *
- * Return Value:
- * None.
- *
- -*/
-
-void
-vMgrEncodeDeauthen(
- PWLAN_FR_DEAUTHEN pFrame
-)
-{
- pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
-
- /* Fixed Fields */
- pFrame->pwReason = (unsigned short *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_DEAUTHEN_OFF_REASON);
- pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_DEAUTHEN_OFF_REASON +
- sizeof(*(pFrame->pwReason));
-}
-
-/*+
- *
- * Routine Description:
- * Decode Deauthentication
- *
- *
- * Return Value:
- * None.
- *
- -*/
-
-void
-vMgrDecodeDeauthen(
- PWLAN_FR_DEAUTHEN pFrame
-)
-{
- pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
-
- /* Fixed Fields */
- pFrame->pwReason = (unsigned short *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_DEAUTHEN_OFF_REASON);
-}
-
-/*+
- *
- * Routine Description: (AP)
- * Encode Reassociation Response
- *
- *
- * Return Value:
- * None.
- *
- -*/
-
-void
-vMgrEncodeReassocResponse(
- PWLAN_FR_REASSOCRESP pFrame
-)
-{
- pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
-
- /* Fixed Fields */
- pFrame->pwCapInfo = (unsigned short *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_REASSOCRESP_OFF_CAP_INFO);
- pFrame->pwStatus = (unsigned short *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_REASSOCRESP_OFF_STATUS);
- pFrame->pwAid = (unsigned short *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_REASSOCRESP_OFF_AID);
-
- pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_REASSOCRESP_OFF_AID +
- sizeof(*(pFrame->pwAid));
-}
-
-/*+
- *
- * Routine Description:
- * Decode Reassociation Response
- *
- *
- * Return Value:
- * None.
- *
- -*/
-
-void
-vMgrDecodeReassocResponse(
- PWLAN_FR_REASSOCRESP pFrame
-)
-{
- PWLAN_IE pItem;
-
- pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
-
- /* Fixed Fields */
- pFrame->pwCapInfo = (unsigned short *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_REASSOCRESP_OFF_CAP_INFO);
- pFrame->pwStatus = (unsigned short *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_REASSOCRESP_OFF_STATUS);
- pFrame->pwAid = (unsigned short *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_REASSOCRESP_OFF_AID);
-
- /* Information elements */
- pFrame->pSuppRates = (PWLAN_IE_SUPP_RATES)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_REASSOCRESP_OFF_SUPP_RATES);
-
- pItem = (PWLAN_IE)(pFrame->pSuppRates);
- pItem = (PWLAN_IE)(((unsigned char *)pItem) + 2 + pItem->len);
-
- if ((((unsigned char *)pItem) < (pFrame->pBuf + pFrame->len)) &&
- (pItem->byElementID == WLAN_EID_EXTSUPP_RATES)) {
- pFrame->pExtSuppRates = (PWLAN_IE_SUPP_RATES)pItem;
- }
-}
diff --git a/drivers/staging/vt6655/80211mgr.h b/drivers/staging/vt6655/80211mgr.h
deleted file mode 100644
index d462a8af087..00000000000
--- a/drivers/staging/vt6655/80211mgr.h
+++ /dev/null
@@ -1,725 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- *
- * File: 80211mgr.h
- *
- * Purpose: 802.11 management frames pre-defines.
- *
- *
- * Author: Lyndon Chen
- *
- * Date: May 8, 2002
- *
- */
-
-#ifndef __80211MGR_H__
-#define __80211MGR_H__
-
-#include <linux/types.h>
-#include "linux/ieee80211.h"
-
-#include "ttype.h"
-#include "80211hdr.h"
-
-#define WLAN_MIN_ARRAY 1
-
-/* Information Element ID value */
-#define WLAN_EID_FH_PARMS 2
-#define WLAN_EID_DS_PARMS 3
-#define WLAN_EID_CF_PARMS 4
-#define WLAN_EID_IBSS_PARMS 6
-#define WLAN_EID_TPC_REQ 34
-#define WLAN_EID_TPC_REP 35
-#define WLAN_EID_SUPP_CH 36
-#define WLAN_EID_CH_SWITCH 37
-#define WLAN_EID_MEASURE_REQ 38
-#define WLAN_EID_MEASURE_REP 39
-#define WLAN_EID_QUIET 40
-#define WLAN_EID_IBSS_DFS 41
-#define WLAN_EID_ERP 42
-/* reference 802.11i 7.3.2 table 20 */
-#define WLAN_EID_EXTSUPP_RATES 50
-/* reference WiFi WPA spec. */
-#define WLAN_EID_RSN_WPA 221
-
-#define WLAN_EID_ERP_NONERP_PRESENT 0x01
-#define WLAN_EID_ERP_USE_PROTECTION 0x02
-#define WLAN_EID_ERP_BARKER_MODE 0x04
-
-/* Reason Codes */
-#define WLAN_MGMT_REASON_RSVD 0
-#define WLAN_MGMT_REASON_UNSPEC 1
-#define WLAN_MGMT_REASON_PRIOR_AUTH_INVALID 2
-#define WLAN_MGMT_REASON_DEAUTH_LEAVING 3
-#define WLAN_MGMT_REASON_DISASSOC_INACTIVE 4
-#define WLAN_MGMT_REASON_DISASSOC_AP_BUSY 5
-#define WLAN_MGMT_REASON_CLASS2_NONAUTH 6
-#define WLAN_MGMT_REASON_CLASS3_NONASSOC 7
-#define WLAN_MGMT_REASON_DISASSOC_STA_HASLEFT 8
-#define WLAN_MGMT_REASON_CANT_ASSOC_NONAUTH 9
-#define WLAN_MGMT_REASON_DISASSOC_PWR_CAP_UNACCEPT 10
-#define WLAN_MGMT_REASON_DISASSOC_SUPP_CH_UNACCEPT 11
-#define WLAN_MGMT_REASON_INVALID_IE 13
-#define WLAN_MGMT_REASON_MIC_FAILURE 14
-#define WLAN_MGMT_REASON_4WAY_HANDSHAKE_TIMEOUT 15
-#define WLAN_MGMT_REASON_GRPKEY_UPDATE_TIMEOUT 16
-#define WLAN_MGMT_REASON_4WAY_INFO_DIFFERENT 17
-#define WLAN_MGMT_REASON_MULTCAST_CIPHER_INVALID 18
-#define WLAN_MGMT_REASON_UNCAST_CIPHER_INVALID 19
-#define WLAN_MGMT_REASON_AKMP_INVALID 20
-#define WLAN_MGMT_REASON_RSNE_UNSUPPORTED 21
-#define WLAN_MGMT_REASON_RSNE_CAP_INVALID 22
-#define WLAN_MGMT_REASON_80211X_AUTH_FAILED 23
-
-/* Status Codes */
-#define WLAN_MGMT_STATUS_SUCCESS 0
-#define WLAN_MGMT_STATUS_UNSPEC_FAILURE 1
-#define WLAN_MGMT_STATUS_CAPS_UNSUPPORTED 10
-#define WLAN_MGMT_STATUS_REASSOC_NO_ASSOC 11
-#define WLAN_MGMT_STATUS_ASSOC_DENIED_UNSPEC 12
-#define WLAN_MGMT_STATUS_UNSUPPORTED_AUTHALG 13
-#define WLAN_MGMT_STATUS_RX_AUTH_NOSEQ 14
-#define WLAN_MGMT_STATUS_CHALLENGE_FAIL 15
-#define WLAN_MGMT_STATUS_AUTH_TIMEOUT 16
-#define WLAN_MGMT_STATUS_ASSOC_DENIED_BUSY 17
-#define WLAN_MGMT_STATUS_ASSOC_DENIED_RATES 18
-#define WLAN_MGMT_STATUS_ASSOC_DENIED_SHORTPREAMBLE 19
-#define WLAN_MGMT_STATUS_ASSOC_DENIED_PBCC 20
-#define WLAN_MGMT_STATUS_ASSOC_DENIED_AGILITY 21
-
-/* reference 802.11h 7.3.1.9 */
-#define WLAN_MGMT_STATUS_ASSOC_REJECT_BCS_SPECTRUM_MNG 22
-#define WLAN_MGMT_STATUS_ASSOC_REJECT_BCS_PWR_CAP 23
-#define WLAN_MGMT_STATUS_ASSOC_REJECT_BCS_SUPP_CH 24
-/* reference 802.11g 7.3.1.9 */
-#define WLAN_MGMT_STATUS_SHORTSLOTTIME_UNSUPPORTED 25
-#define WLAN_MGMT_STATUS_DSSSOFDM_UNSUPPORTED 26
-/* reference 802.11i 3.7.1.9 table 19 */
-#define WLAN_MGMT_STATUS_INVALID_IE 40
-#define WLAN_MGMT_STATUS_GROUP_CIPHER_INVALID 41
-#define WLAN_MGMT_STATUS_PAIRWISE_CIPHER_INVALID 42
-#define WLAN_MGMT_STATUS_AKMP_INVALID 43
-#define WLAN_MGMT_STATUS_UNSUPPORT_RSN_IE_VER 44
-#define WLAN_MGMT_STATUS_INVALID_RSN_IE_CAP 45
-#define WLAN_MGMT_STATUS_CIPHER_REJECT 46
-
-/* Auth Algorithm */
-#define WLAN_AUTH_ALG_OPENSYSTEM 0
-#define WLAN_AUTH_ALG_SHAREDKEY 1
-
-/* Management Frame Field Offsets */
-/* Note: Not all fields are listed because of variable lengths. */
-/* Note: These offsets are from the start of the frame data */
-
-#define WLAN_BEACON_OFF_TS 0
-#define WLAN_BEACON_OFF_BCN_INT 8
-#define WLAN_BEACON_OFF_CAPINFO 10
-#define WLAN_BEACON_OFF_SSID 12
-
-#define WLAN_DISASSOC_OFF_REASON 0
-
-#define WLAN_ASSOCREQ_OFF_CAP_INFO 0
-#define WLAN_ASSOCREQ_OFF_LISTEN_INT 2
-#define WLAN_ASSOCREQ_OFF_SSID 4
-
-#define WLAN_ASSOCRESP_OFF_CAP_INFO 0
-#define WLAN_ASSOCRESP_OFF_STATUS 2
-#define WLAN_ASSOCRESP_OFF_AID 4
-#define WLAN_ASSOCRESP_OFF_SUPP_RATES 6
-
-#define WLAN_REASSOCREQ_OFF_CAP_INFO 0
-#define WLAN_REASSOCREQ_OFF_LISTEN_INT 2
-#define WLAN_REASSOCREQ_OFF_CURR_AP 4
-#define WLAN_REASSOCREQ_OFF_SSID 10
-
-#define WLAN_REASSOCRESP_OFF_CAP_INFO 0
-#define WLAN_REASSOCRESP_OFF_STATUS 2
-#define WLAN_REASSOCRESP_OFF_AID 4
-#define WLAN_REASSOCRESP_OFF_SUPP_RATES 6
-
-#define WLAN_PROBEREQ_OFF_SSID 0
-
-#define WLAN_PROBERESP_OFF_TS 0
-#define WLAN_PROBERESP_OFF_BCN_INT 8
-#define WLAN_PROBERESP_OFF_CAP_INFO 10
-#define WLAN_PROBERESP_OFF_SSID 12
-
-#define WLAN_AUTHEN_OFF_AUTH_ALG 0
-#define WLAN_AUTHEN_OFF_AUTH_SEQ 2
-#define WLAN_AUTHEN_OFF_STATUS 4
-#define WLAN_AUTHEN_OFF_CHALLENGE 6
-
-#define WLAN_DEAUTHEN_OFF_REASON 0
-
-/* Cipher Suite Selectors defined in 802.11i */
-#define WLAN_11i_CSS_USE_GROUP 0
-#define WLAN_11i_CSS_WEP40 1
-#define WLAN_11i_CSS_TKIP 2
-#define WLAN_11i_CSS_CCMP 4
-#define WLAN_11i_CSS_WEP104 5
-#define WLAN_11i_CSS_UNKNOWN 255
-
-/* Authentication and Key Management Suite Selectors defined in 802.11i */
-#define WLAN_11i_AKMSS_802_1X 1
-#define WLAN_11i_AKMSS_PSK 2
-#define WLAN_11i_AKMSS_UNKNOWN 255
-
-/* Measurement type definitions reference ieee 802.11h Table 20b */
-#define MEASURE_TYPE_BASIC 0
-#define MEASURE_TYPE_CCA 1
-#define MEASURE_TYPE_RPI 2
-
-/* Measurement request mode definitions reference ieee 802.11h Figure 46h */
-#define MEASURE_MODE_ENABLE 0x02
-#define MEASURE_MODE_REQ 0x04
-#define MEASURE_MODE_REP 0x08
-
-/* Measurement report mode definitions reference ieee 802.11h Figure 46m */
-#define MEASURE_MODE_LATE 0x01
-#define MEASURE_MODE_INCAPABLE 0x02
-#define MEASURE_MODE_REFUSED 0x04
-
-/* Information Element Types */
-
-#pragma pack(1)
-typedef struct tagWLAN_IE {
- unsigned char byElementID;
- unsigned char len;
-} __attribute__ ((__packed__))
-WLAN_IE, *PWLAN_IE;
-
-/* Service Set Identity (SSID) */
-#pragma pack(1)
-typedef struct tagWLAN_IE_SSID {
- unsigned char byElementID;
- unsigned char len;
- unsigned char abySSID[1];
-} __attribute__ ((__packed__))
-WLAN_IE_SSID, *PWLAN_IE_SSID;
-
-/* Supported Rates */
-#pragma pack(1)
-typedef struct tagWLAN_IE_SUPP_RATES {
- unsigned char byElementID;
- unsigned char len;
- unsigned char abyRates[1];
-} __attribute__ ((__packed__))
-WLAN_IE_SUPP_RATES, *PWLAN_IE_SUPP_RATES;
-
-/* FH Parameter Set */
-#pragma pack(1)
-typedef struct _WLAN_IE_FH_PARMS {
- unsigned char byElementID;
- unsigned char len;
- unsigned short wDwellTime;
- unsigned char byHopSet;
- unsigned char byHopPattern;
- unsigned char byHopIndex;
-} WLAN_IE_FH_PARMS, *PWLAN_IE_FH_PARMS;
-
-/* DS Parameter Set */
-#pragma pack(1)
-typedef struct tagWLAN_IE_DS_PARMS {
- unsigned char byElementID;
- unsigned char len;
- unsigned char byCurrChannel;
-} __attribute__ ((__packed__))
-WLAN_IE_DS_PARMS, *PWLAN_IE_DS_PARMS;
-
-/* CF Parameter Set */
-#pragma pack(1)
-typedef struct tagWLAN_IE_CF_PARMS {
- unsigned char byElementID;
- unsigned char len;
- unsigned char byCFPCount;
- unsigned char byCFPPeriod;
- unsigned short wCFPMaxDuration;
- unsigned short wCFPDurRemaining;
-} __attribute__ ((__packed__))
-WLAN_IE_CF_PARMS, *PWLAN_IE_CF_PARMS;
-
-/* TIM */
-#pragma pack(1)
-typedef struct tagWLAN_IE_TIM {
- unsigned char byElementID;
- unsigned char len;
- unsigned char byDTIMCount;
- unsigned char byDTIMPeriod;
- unsigned char byBitMapCtl;
- unsigned char byVirtBitMap[1];
-} __attribute__ ((__packed__))
-WLAN_IE_TIM, *PWLAN_IE_TIM;
-
-/* IBSS Parameter Set */
-#pragma pack(1)
-typedef struct tagWLAN_IE_IBSS_PARMS {
- unsigned char byElementID;
- unsigned char len;
- unsigned short wATIMWindow;
-} __attribute__ ((__packed__))
-WLAN_IE_IBSS_PARMS, *PWLAN_IE_IBSS_PARMS;
-
-/* Challenge Text */
-#pragma pack(1)
-typedef struct tagWLAN_IE_CHALLENGE {
- unsigned char byElementID;
- unsigned char len;
- unsigned char abyChallenge[1];
-} __attribute__ ((__packed__))
-WLAN_IE_CHALLENGE, *PWLAN_IE_CHALLENGE;
-
-#pragma pack(1)
-typedef struct tagWLAN_IE_RSN_EXT {
- unsigned char byElementID;
- unsigned char len;
- unsigned char abyOUI[4];
- unsigned short wVersion;
- unsigned char abyMulticast[4];
- unsigned short wPKCount;
- struct {
- unsigned char abyOUI[4];
- } PKSList[1]; /* the rest is variable so need to */
- /* overlay ieauth structure */
-} WLAN_IE_RSN_EXT, *PWLAN_IE_RSN_EXT;
-
-#pragma pack(1)
-typedef struct tagWLAN_IE_RSN_AUTH {
- unsigned short wAuthCount;
- struct {
- unsigned char abyOUI[4];
- } AuthKSList[1];
-} WLAN_IE_RSN_AUTH, *PWLAN_IE_RSN_AUTH;
-
-/* RSN Identity */
-#pragma pack(1)
-typedef struct tagWLAN_IE_RSN {
- unsigned char byElementID;
- unsigned char len;
- unsigned short wVersion;
- unsigned char abyRSN[WLAN_MIN_ARRAY];
-} WLAN_IE_RSN, *PWLAN_IE_RSN;
-
-/* ERP */
-#pragma pack(1)
-typedef struct tagWLAN_IE_ERP {
- unsigned char byElementID;
- unsigned char len;
- unsigned char byContext;
-} __attribute__ ((__packed__))
-WLAN_IE_ERP, *PWLAN_IE_ERP;
-
-#pragma pack(1)
-typedef struct _MEASEURE_REQ {
- unsigned char byChannel;
- unsigned char abyStartTime[8];
- unsigned char abyDuration[2];
-} MEASEURE_REQ, *PMEASEURE_REQ,
- MEASEURE_REQ_BASIC, *PMEASEURE_REQ_BASIC,
- MEASEURE_REQ_CCA, *PMEASEURE_REQ_CCA,
- MEASEURE_REQ_RPI, *PMEASEURE_REQ_RPI;
-
-typedef struct _MEASEURE_REP_BASIC {
- unsigned char byChannel;
- unsigned char abyStartTime[8];
- unsigned char abyDuration[2];
- unsigned char byMap;
-} MEASEURE_REP_BASIC, *PMEASEURE_REP_BASIC;
-
-typedef struct _MEASEURE_REP_CCA {
- unsigned char byChannel;
- unsigned char abyStartTime[8];
- unsigned char abyDuration[2];
- unsigned char byCCABusyFraction;
-} MEASEURE_REP_CCA, *PMEASEURE_REP_CCA;
-
-typedef struct _MEASEURE_REP_RPI {
- unsigned char byChannel;
- unsigned char abyStartTime[8];
- unsigned char abyDuration[2];
- unsigned char abyRPIdensity[8];
-} MEASEURE_REP_RPI, *PMEASEURE_REP_RPI;
-
-typedef union _MEASEURE_REP {
- MEASEURE_REP_BASIC sBasic;
- MEASEURE_REP_CCA sCCA;
- MEASEURE_REP_RPI sRPI;
-} MEASEURE_REP, *PMEASEURE_REP;
-
-typedef struct _WLAN_IE_MEASURE_REQ {
- unsigned char byElementID;
- unsigned char len;
- unsigned char byToken;
- unsigned char byMode;
- unsigned char byType;
- MEASEURE_REQ sReq;
-} WLAN_IE_MEASURE_REQ, *PWLAN_IE_MEASURE_REQ;
-
-typedef struct _WLAN_IE_MEASURE_REP {
- unsigned char byElementID;
- unsigned char len;
- unsigned char byToken;
- unsigned char byMode;
- unsigned char byType;
- MEASEURE_REP sRep;
-} WLAN_IE_MEASURE_REP, *PWLAN_IE_MEASURE_REP;
-
-typedef struct _WLAN_IE_CH_SW {
- unsigned char byElementID;
- unsigned char len;
- unsigned char byMode;
- unsigned char byChannel;
- unsigned char byCount;
-} WLAN_IE_CH_SW, *PWLAN_IE_CH_SW;
-
-typedef struct _WLAN_IE_QUIET {
- unsigned char byElementID;
- unsigned char len;
- unsigned char byQuietCount;
- unsigned char byQuietPeriod;
- unsigned char abyQuietDuration[2];
- unsigned char abyQuietOffset[2];
-} WLAN_IE_QUIET, *PWLAN_IE_QUIET;
-
-typedef struct _WLAN_IE_COUNTRY {
- unsigned char byElementID;
- unsigned char len;
- unsigned char abyCountryString[3];
- unsigned char abyCountryInfo[3];
-} WLAN_IE_COUNTRY, *PWLAN_IE_COUNTRY;
-
-typedef struct _WLAN_IE_PW_CONST {
- unsigned char byElementID;
- unsigned char len;
- unsigned char byPower;
-} WLAN_IE_PW_CONST, *PWLAN_IE_PW_CONST;
-
-typedef struct _WLAN_IE_PW_CAP {
- unsigned char byElementID;
- unsigned char len;
- unsigned char byMinPower;
- unsigned char byMaxPower;
-} WLAN_IE_PW_CAP, *PWLAN_IE_PW_CAP;
-
-typedef struct _WLAN_IE_SUPP_CH {
- unsigned char byElementID;
- unsigned char len;
- unsigned char abyChannelTuple[2];
-} WLAN_IE_SUPP_CH, *PWLAN_IE_SUPP_CH;
-
-typedef struct _WLAN_IE_TPC_REQ {
- unsigned char byElementID;
- unsigned char len;
-} WLAN_IE_TPC_REQ, *PWLAN_IE_TPC_REQ;
-
-typedef struct _WLAN_IE_TPC_REP {
- unsigned char byElementID;
- unsigned char len;
- unsigned char byTxPower;
- unsigned char byLinkMargin;
-} WLAN_IE_TPC_REP, *PWLAN_IE_TPC_REP;
-
-typedef struct _WLAN_IE_IBSS_DFS {
- unsigned char byElementID;
- unsigned char len;
- unsigned char abyDFSOwner[6];
- unsigned char byDFSRecovery;
- unsigned char abyChannelMap[2];
-} WLAN_IE_IBSS_DFS, *PWLAN_IE_IBSS_DFS;
-
-#pragma pack()
-
-/* Frame Types */
-/* prototype structure, all mgmt frame types will start with these members */
-typedef struct tagWLAN_FR_MGMT {
- unsigned int uType;
- unsigned int len;
- unsigned char *pBuf;
- PUWLAN_80211HDR pHdr;
-} WLAN_FR_MGMT, *PWLAN_FR_MGMT;
-
-/* Beacon frame */
-typedef struct tagWLAN_FR_BEACON {
- unsigned int uType;
- unsigned int len;
- unsigned char *pBuf;
- PUWLAN_80211HDR pHdr;
- __le64 *pqwTimestamp;
- unsigned short *pwBeaconInterval;
- unsigned short *pwCapInfo;
- PWLAN_IE_SSID pSSID;
- PWLAN_IE_SUPP_RATES pSuppRates;
- PWLAN_IE_DS_PARMS pDSParms;
- PWLAN_IE_CF_PARMS pCFParms;
- PWLAN_IE_TIM pTIM;
- PWLAN_IE_IBSS_PARMS pIBSSParms;
- PWLAN_IE_RSN pRSN;
- PWLAN_IE_RSN_EXT pRSNWPA;
- PWLAN_IE_ERP pERP;
- PWLAN_IE_SUPP_RATES pExtSuppRates;
- PWLAN_IE_COUNTRY pIE_Country;
- PWLAN_IE_PW_CONST pIE_PowerConstraint;
- PWLAN_IE_CH_SW pIE_CHSW;
- PWLAN_IE_IBSS_DFS pIE_IBSSDFS;
- PWLAN_IE_QUIET pIE_Quiet;
-} WLAN_FR_BEACON, *PWLAN_FR_BEACON;
-
-/* IBSS ATIM frame */
-typedef struct tagWLAN_FR_IBSSATIM {
- unsigned int uType;
- unsigned int len;
- unsigned char *pBuf;
- PUWLAN_80211HDR pHdr;
-} WLAN_FR_IBSSATIM, *PWLAN_FR_IBSSATIM;
-
-/* Disassociation */
-typedef struct tagWLAN_FR_DISASSOC {
- unsigned int uType;
- unsigned int len;
- unsigned char *pBuf;
- PUWLAN_80211HDR pHdr;
- unsigned short *pwReason;
-} WLAN_FR_DISASSOC, *PWLAN_FR_DISASSOC;
-
-/* Association Request */
-typedef struct tagWLAN_FR_ASSOCREQ {
- unsigned int uType;
- unsigned int len;
- unsigned char *pBuf;
- PUWLAN_80211HDR pHdr;
- unsigned short *pwCapInfo;
- unsigned short *pwListenInterval;
- PWLAN_IE_SSID pSSID;
- PWLAN_IE_SUPP_RATES pSuppRates;
- PWLAN_IE_RSN pRSN;
- PWLAN_IE_RSN_EXT pRSNWPA;
- PWLAN_IE_SUPP_RATES pExtSuppRates;
- PWLAN_IE_PW_CAP pCurrPowerCap;
- PWLAN_IE_SUPP_CH pCurrSuppCh;
-} WLAN_FR_ASSOCREQ, *PWLAN_FR_ASSOCREQ;
-
-/* Association Response */
-typedef struct tagWLAN_FR_ASSOCRESP {
- unsigned int uType;
- unsigned int len;
- unsigned char *pBuf;
- PUWLAN_80211HDR pHdr;
- unsigned short *pwCapInfo;
- unsigned short *pwStatus;
- unsigned short *pwAid;
- PWLAN_IE_SUPP_RATES pSuppRates;
- PWLAN_IE_SUPP_RATES pExtSuppRates;
-} WLAN_FR_ASSOCRESP, *PWLAN_FR_ASSOCRESP;
-
-/* Reassociation Request */
-typedef struct tagWLAN_FR_REASSOCREQ {
- unsigned int uType;
- unsigned int len;
- unsigned char *pBuf;
- PUWLAN_80211HDR pHdr;
- unsigned short *pwCapInfo;
- unsigned short *pwListenInterval;
- PIEEE_ADDR pAddrCurrAP;
- PWLAN_IE_SSID pSSID;
- PWLAN_IE_SUPP_RATES pSuppRates;
- PWLAN_IE_RSN pRSN;
- PWLAN_IE_RSN_EXT pRSNWPA;
- PWLAN_IE_SUPP_RATES pExtSuppRates;
-} WLAN_FR_REASSOCREQ, *PWLAN_FR_REASSOCREQ;
-
-/* Reassociation Response */
-typedef struct tagWLAN_FR_REASSOCRESP {
- unsigned int uType;
- unsigned int len;
- unsigned char *pBuf;
- PUWLAN_80211HDR pHdr;
- unsigned short *pwCapInfo;
- unsigned short *pwStatus;
- unsigned short *pwAid;
- PWLAN_IE_SUPP_RATES pSuppRates;
- PWLAN_IE_SUPP_RATES pExtSuppRates;
-} WLAN_FR_REASSOCRESP, *PWLAN_FR_REASSOCRESP;
-
-/* Probe Request */
-typedef struct tagWLAN_FR_PROBEREQ {
- unsigned int uType;
- unsigned int len;
- unsigned char *pBuf;
- PUWLAN_80211HDR pHdr;
- PWLAN_IE_SSID pSSID;
- PWLAN_IE_SUPP_RATES pSuppRates;
- PWLAN_IE_SUPP_RATES pExtSuppRates;
-} WLAN_FR_PROBEREQ, *PWLAN_FR_PROBEREQ;
-
-/* Probe Response */
-typedef struct tagWLAN_FR_PROBERESP {
- unsigned int uType;
- unsigned int len;
- unsigned char *pBuf;
- PUWLAN_80211HDR pHdr;
- __le64 *pqwTimestamp;
- unsigned short *pwBeaconInterval;
- unsigned short *pwCapInfo;
- PWLAN_IE_SSID pSSID;
- PWLAN_IE_SUPP_RATES pSuppRates;
- PWLAN_IE_DS_PARMS pDSParms;
- PWLAN_IE_CF_PARMS pCFParms;
- PWLAN_IE_IBSS_PARMS pIBSSParms;
- PWLAN_IE_RSN pRSN;
- PWLAN_IE_RSN_EXT pRSNWPA;
- PWLAN_IE_ERP pERP;
- PWLAN_IE_SUPP_RATES pExtSuppRates;
- PWLAN_IE_COUNTRY pIE_Country;
- PWLAN_IE_PW_CONST pIE_PowerConstraint;
- PWLAN_IE_CH_SW pIE_CHSW;
- PWLAN_IE_IBSS_DFS pIE_IBSSDFS;
- PWLAN_IE_QUIET pIE_Quiet;
-} WLAN_FR_PROBERESP, *PWLAN_FR_PROBERESP;
-
-/* Authentication */
-typedef struct tagWLAN_FR_AUTHEN {
- unsigned int uType;
- unsigned int len;
- unsigned char *pBuf;
- PUWLAN_80211HDR pHdr;
- unsigned short *pwAuthAlgorithm;
- unsigned short *pwAuthSequence;
- unsigned short *pwStatus;
- PWLAN_IE_CHALLENGE pChallenge;
-} WLAN_FR_AUTHEN, *PWLAN_FR_AUTHEN;
-
-/* Deauthenication */
-typedef struct tagWLAN_FR_DEAUTHEN {
- unsigned int uType;
- unsigned int len;
- unsigned char *pBuf;
- PUWLAN_80211HDR pHdr;
- unsigned short *pwReason;
-} WLAN_FR_DEAUTHEN, *PWLAN_FR_DEAUTHEN;
-
-void
-vMgrEncodeBeacon(
- PWLAN_FR_BEACON pFrame
-);
-
-void
-vMgrDecodeBeacon(
- PWLAN_FR_BEACON pFrame
-);
-
-void
-vMgrEncodeIBSSATIM(
- PWLAN_FR_IBSSATIM pFrame
-);
-
-void
-vMgrDecodeIBSSATIM(
- PWLAN_FR_IBSSATIM pFrame
-);
-
-void
-vMgrEncodeDisassociation(
- PWLAN_FR_DISASSOC pFrame
-);
-
-void
-vMgrDecodeDisassociation(
- PWLAN_FR_DISASSOC pFrame
-);
-
-void
-vMgrEncodeAssocRequest(
- PWLAN_FR_ASSOCREQ pFrame
-);
-
-void
-vMgrDecodeAssocRequest(
- PWLAN_FR_ASSOCREQ pFrame
-);
-
-void
-vMgrEncodeAssocResponse(
- PWLAN_FR_ASSOCRESP pFrame
-);
-
-void
-vMgrDecodeAssocResponse(
- PWLAN_FR_ASSOCRESP pFrame
-);
-
-void
-vMgrEncodeReassocRequest(
- PWLAN_FR_REASSOCREQ pFrame
-);
-
-void
-vMgrDecodeReassocRequest(
- PWLAN_FR_REASSOCREQ pFrame
-);
-
-void
-vMgrEncodeProbeRequest(
- PWLAN_FR_PROBEREQ pFrame
-);
-
-void
-vMgrDecodeProbeRequest(
- PWLAN_FR_PROBEREQ pFrame
-);
-
-void
-vMgrEncodeProbeResponse(
- PWLAN_FR_PROBERESP pFrame
-);
-
-void
-vMgrDecodeProbeResponse(
- PWLAN_FR_PROBERESP pFrame
-);
-
-void
-vMgrEncodeAuthen(
- PWLAN_FR_AUTHEN pFrame
-);
-
-void
-vMgrDecodeAuthen(
- PWLAN_FR_AUTHEN pFrame
-);
-
-void
-vMgrEncodeDeauthen(
- PWLAN_FR_DEAUTHEN pFrame
-);
-
-void
-vMgrDecodeDeauthen(
- PWLAN_FR_DEAUTHEN pFrame
-);
-
-void
-vMgrEncodeReassocResponse(
- PWLAN_FR_REASSOCRESP pFrame
-);
-
-void
-vMgrDecodeReassocResponse(
- PWLAN_FR_REASSOCRESP pFrame
-);
-
-#endif/* __80211MGR_H__ */
diff --git a/drivers/staging/vt6655/IEEE11h.c b/drivers/staging/vt6655/IEEE11h.c
deleted file mode 100644
index 180a27cc74d..00000000000
--- a/drivers/staging/vt6655/IEEE11h.c
+++ /dev/null
@@ -1,141 +0,0 @@
-/*
- * Copyright (c) 1996, 2005 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- *
- * File: IEEE11h.c
- *
- * Purpose:
- *
- * Functions:
- *
- * Revision History:
- *
- * Author: Yiching Chen
- *
- * Date: Mar. 31, 2005
- *
- */
-
-#include "ttype.h"
-#include "tmacro.h"
-#include "tether.h"
-#include "IEEE11h.h"
-#include "device.h"
-#include "wmgr.h"
-#include "rxtx.h"
-#include "channel.h"
-
-/*--------------------- Static Definitions -------------------------*/
-
-#pragma pack(1)
-
-typedef struct _WLAN_FRAME_ACTION {
- WLAN_80211HDR_A3 Header;
- unsigned char byCategory;
- unsigned char byAction;
- unsigned char abyVars[1];
-} WLAN_FRAME_ACTION, *PWLAN_FRAME_ACTION;
-
-typedef struct _WLAN_FRAME_MSRREQ {
- WLAN_80211HDR_A3 Header;
- unsigned char byCategory;
- unsigned char byAction;
- unsigned char byDialogToken;
- WLAN_IE_MEASURE_REQ sMSRReqEIDs[1];
-} WLAN_FRAME_MSRREQ, *PWLAN_FRAME_MSRREQ;
-
-typedef struct _WLAN_FRAME_MSRREP {
- WLAN_80211HDR_A3 Header;
- unsigned char byCategory;
- unsigned char byAction;
- unsigned char byDialogToken;
- WLAN_IE_MEASURE_REP sMSRRepEIDs[1];
-} WLAN_FRAME_MSRREP, *PWLAN_FRAME_MSRREP;
-
-typedef struct _WLAN_FRAME_TPCREQ {
- WLAN_80211HDR_A3 Header;
- unsigned char byCategory;
- unsigned char byAction;
- unsigned char byDialogToken;
- WLAN_IE_TPC_REQ sTPCReqEIDs;
-} WLAN_FRAME_TPCREQ, *PWLAN_FRAME_TPCREQ;
-
-typedef struct _WLAN_FRAME_TPCREP {
- WLAN_80211HDR_A3 Header;
- unsigned char byCategory;
- unsigned char byAction;
- unsigned char byDialogToken;
- WLAN_IE_TPC_REP sTPCRepEIDs;
-} WLAN_FRAME_TPCREP, *PWLAN_FRAME_TPCREP;
-
-#pragma pack()
-
-/* action field reference ieee 802.11h Table 20e */
-#define ACTION_MSRREQ 0
-#define ACTION_MSRREP 1
-#define ACTION_TPCREQ 2
-#define ACTION_TPCREP 3
-#define ACTION_CHSW 4
-
-/*--------------------- Static Classes ----------------------------*/
-
-/*--------------------- Static Variables --------------------------*/
-
-/*--------------------- Static Functions --------------------------*/
-
-/*--------------------- Export Variables --------------------------*/
-
-/*--------------------- Export Functions --------------------------*/
-
-bool IEEE11hbMSRRepTx(void *pMgmtHandle)
-{
- PSMgmtObject pMgmt = (PSMgmtObject) pMgmtHandle;
- PWLAN_FRAME_MSRREP pMSRRep = (PWLAN_FRAME_MSRREP)
- (pMgmt->abyCurrentMSRRep + sizeof(STxMgmtPacket));
- size_t uLength = 0;
- PSTxMgmtPacket pTxPacket = NULL;
-
- pTxPacket = (PSTxMgmtPacket)pMgmt->abyCurrentMSRRep;
- memset(pTxPacket, 0, sizeof(STxMgmtPacket) + WLAN_A3FR_MAXLEN);
- pTxPacket->p80211Header = (PUWLAN_80211HDR)((unsigned char *)pTxPacket +
- sizeof(STxMgmtPacket));
-
- pMSRRep->Header.wFrameCtl = (WLAN_SET_FC_FTYPE(WLAN_FTYPE_MGMT) |
- WLAN_SET_FC_FSTYPE(WLAN_FSTYPE_ACTION)
-);
-
- memcpy(pMSRRep->Header.abyAddr1, ((PWLAN_FRAME_MSRREQ)
- (pMgmt->abyCurrentMSRReq))->Header.abyAddr2, WLAN_ADDR_LEN);
- memcpy(pMSRRep->Header.abyAddr2,
- CARDpGetCurrentAddress(pMgmt->pAdapter), WLAN_ADDR_LEN);
- memcpy(pMSRRep->Header.abyAddr3, pMgmt->abyCurrBSSID, WLAN_BSSID_LEN);
-
- pMSRRep->byCategory = 0;
- pMSRRep->byAction = 1;
- pMSRRep->byDialogToken = ((PWLAN_FRAME_MSRREQ)
- (pMgmt->abyCurrentMSRReq))->byDialogToken;
-
- uLength = pMgmt->uLengthOfRepEIDs + offsetof(WLAN_FRAME_MSRREP,
- sMSRRepEIDs);
-
- pTxPacket->cbMPDULen = uLength;
- pTxPacket->cbPayloadLen = uLength - WLAN_HDR_ADDR3_LEN;
- if (csMgmt_xmit(pMgmt->pAdapter, pTxPacket) != CMD_STATUS_PENDING)
- return false;
- return true;
-}
diff --git a/drivers/staging/vt6655/IEEE11h.h b/drivers/staging/vt6655/IEEE11h.h
deleted file mode 100644
index 551922022b1..00000000000
--- a/drivers/staging/vt6655/IEEE11h.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Copyright (c) 1996, 2005 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- *
- * File: IEEE11h.h
- *
- * Purpose: Defines the macros, types, and functions for dealing
- * with IEEE 802.11h.
- *
- * Author: Yiching Chen
- *
- * Date: Mar. 31, 2005
- *
- */
-
-#ifndef __IEEE11h_H__
-#define __IEEE11h_H__
-
-#include "ttype.h"
-#include "80211hdr.h"
-#include "80211mgr.h"
-
-bool IEEE11hbMSRRepTx(
- void *pMgmtHandle
-);
-
-#endif // __IEEE11h_H__
diff --git a/drivers/staging/vt6655/Kconfig b/drivers/staging/vt6655/Kconfig
index c3ba693a8ca..77cfc708c51 100644
--- a/drivers/staging/vt6655/Kconfig
+++ b/drivers/staging/vt6655/Kconfig
@@ -1,8 +1,6 @@
config VT6655
tristate "VIA Technologies VT6655 support"
- depends on PCI && WLAN && m
- select WIRELESS_EXT
- select WEXT_PRIV
+ depends on PCI && MAC80211 && m
---help---
This is a vendor-written driver for VIA VT6655.
diff --git a/drivers/staging/vt6655/Makefile b/drivers/staging/vt6655/Makefile
index f7544a6cb63..115b951bf0d 100644
--- a/drivers/staging/vt6655/Makefile
+++ b/drivers/staging/vt6655/Makefile
@@ -7,33 +7,12 @@ vt6655_stage-y += device_main.o \
channel.o \
mac.o \
baseband.o \
- wctl.o \
- 80211mgr.o \
- wcmd.o \
- wmgr.o \
- bssdb.o \
rxtx.o \
dpc.o \
power.o \
- datarate.o \
srom.o \
mib.o \
- rc4.o \
- tether.o \
- tcrc.o \
- ioctl.o \
- hostap.o \
- wpa.o \
key.o \
- tkip.o \
- michael.o \
- wroute.o \
- rf.o \
- iwctl.o \
- wpactl.o \
- wpa2.o \
- aes_ccmp.o \
- vntwifi.o \
- IEEE11h.o
+ rf.o
obj-$(CONFIG_VT6655) += vt6655_stage.o
diff --git a/drivers/staging/vt6655/aes_ccmp.c b/drivers/staging/vt6655/aes_ccmp.c
deleted file mode 100644
index 1dfcfcb3c69..00000000000
--- a/drivers/staging/vt6655/aes_ccmp.c
+++ /dev/null
@@ -1,374 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- *
- * File: aes_ccmp.c
- *
- * Purpose: AES_CCMP decryption
- *
- * Author: Warren Hsu
- *
- * Date: Feb 15, 2005
- *
- * Functions:
- * AESbGenCCMP - Parsing RX-packet
- *
- *
- * Revision History:
- *
- */
-
-#include "device.h"
-#include "80211hdr.h"
-#include "aes_ccmp.h"
-
-/*--------------------- Static Definitions -------------------------*/
-
-/*--------------------- Static Classes ----------------------------*/
-
-/*--------------------- Static Variables --------------------------*/
-
-/*
- * SBOX Table
- */
-
-static unsigned char sbox_table[256] = {
- 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76,
- 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0,
- 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15,
- 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75,
- 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84,
- 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf,
- 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8,
- 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2,
- 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73,
- 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb,
- 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79,
- 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08,
- 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a,
- 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e,
- 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf,
- 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16
-};
-
-static unsigned char dot2_table[256] = {
- 0x00, 0x02, 0x04, 0x06, 0x08, 0x0a, 0x0c, 0x0e, 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e,
- 0x20, 0x22, 0x24, 0x26, 0x28, 0x2a, 0x2c, 0x2e, 0x30, 0x32, 0x34, 0x36, 0x38, 0x3a, 0x3c, 0x3e,
- 0x40, 0x42, 0x44, 0x46, 0x48, 0x4a, 0x4c, 0x4e, 0x50, 0x52, 0x54, 0x56, 0x58, 0x5a, 0x5c, 0x5e,
- 0x60, 0x62, 0x64, 0x66, 0x68, 0x6a, 0x6c, 0x6e, 0x70, 0x72, 0x74, 0x76, 0x78, 0x7a, 0x7c, 0x7e,
- 0x80, 0x82, 0x84, 0x86, 0x88, 0x8a, 0x8c, 0x8e, 0x90, 0x92, 0x94, 0x96, 0x98, 0x9a, 0x9c, 0x9e,
- 0xa0, 0xa2, 0xa4, 0xa6, 0xa8, 0xaa, 0xac, 0xae, 0xb0, 0xb2, 0xb4, 0xb6, 0xb8, 0xba, 0xbc, 0xbe,
- 0xc0, 0xc2, 0xc4, 0xc6, 0xc8, 0xca, 0xcc, 0xce, 0xd0, 0xd2, 0xd4, 0xd6, 0xd8, 0xda, 0xdc, 0xde,
- 0xe0, 0xe2, 0xe4, 0xe6, 0xe8, 0xea, 0xec, 0xee, 0xf0, 0xf2, 0xf4, 0xf6, 0xf8, 0xfa, 0xfc, 0xfe,
- 0x1b, 0x19, 0x1f, 0x1d, 0x13, 0x11, 0x17, 0x15, 0x0b, 0x09, 0x0f, 0x0d, 0x03, 0x01, 0x07, 0x05,
- 0x3b, 0x39, 0x3f, 0x3d, 0x33, 0x31, 0x37, 0x35, 0x2b, 0x29, 0x2f, 0x2d, 0x23, 0x21, 0x27, 0x25,
- 0x5b, 0x59, 0x5f, 0x5d, 0x53, 0x51, 0x57, 0x55, 0x4b, 0x49, 0x4f, 0x4d, 0x43, 0x41, 0x47, 0x45,
- 0x7b, 0x79, 0x7f, 0x7d, 0x73, 0x71, 0x77, 0x75, 0x6b, 0x69, 0x6f, 0x6d, 0x63, 0x61, 0x67, 0x65,
- 0x9b, 0x99, 0x9f, 0x9d, 0x93, 0x91, 0x97, 0x95, 0x8b, 0x89, 0x8f, 0x8d, 0x83, 0x81, 0x87, 0x85,
- 0xbb, 0xb9, 0xbf, 0xbd, 0xb3, 0xb1, 0xb7, 0xb5, 0xab, 0xa9, 0xaf, 0xad, 0xa3, 0xa1, 0xa7, 0xa5,
- 0xdb, 0xd9, 0xdf, 0xdd, 0xd3, 0xd1, 0xd7, 0xd5, 0xcb, 0xc9, 0xcf, 0xcd, 0xc3, 0xc1, 0xc7, 0xc5,
- 0xfb, 0xf9, 0xff, 0xfd, 0xf3, 0xf1, 0xf7, 0xf5, 0xeb, 0xe9, 0xef, 0xed, 0xe3, 0xe1, 0xe7, 0xe5
-};
-
-static unsigned char dot3_table[256] = {
- 0x00, 0x03, 0x06, 0x05, 0x0c, 0x0f, 0x0a, 0x09, 0x18, 0x1b, 0x1e, 0x1d, 0x14, 0x17, 0x12, 0x11,
- 0x30, 0x33, 0x36, 0x35, 0x3c, 0x3f, 0x3a, 0x39, 0x28, 0x2b, 0x2e, 0x2d, 0x24, 0x27, 0x22, 0x21,
- 0x60, 0x63, 0x66, 0x65, 0x6c, 0x6f, 0x6a, 0x69, 0x78, 0x7b, 0x7e, 0x7d, 0x74, 0x77, 0x72, 0x71,
- 0x50, 0x53, 0x56, 0x55, 0x5c, 0x5f, 0x5a, 0x59, 0x48, 0x4b, 0x4e, 0x4d, 0x44, 0x47, 0x42, 0x41,
- 0xc0, 0xc3, 0xc6, 0xc5, 0xcc, 0xcf, 0xca, 0xc9, 0xd8, 0xdb, 0xde, 0xdd, 0xd4, 0xd7, 0xd2, 0xd1,
- 0xf0, 0xf3, 0xf6, 0xf5, 0xfc, 0xff, 0xfa, 0xf9, 0xe8, 0xeb, 0xee, 0xed, 0xe4, 0xe7, 0xe2, 0xe1,
- 0xa0, 0xa3, 0xa6, 0xa5, 0xac, 0xaf, 0xaa, 0xa9, 0xb8, 0xbb, 0xbe, 0xbd, 0xb4, 0xb7, 0xb2, 0xb1,
- 0x90, 0x93, 0x96, 0x95, 0x9c, 0x9f, 0x9a, 0x99, 0x88, 0x8b, 0x8e, 0x8d, 0x84, 0x87, 0x82, 0x81,
- 0x9b, 0x98, 0x9d, 0x9e, 0x97, 0x94, 0x91, 0x92, 0x83, 0x80, 0x85, 0x86, 0x8f, 0x8c, 0x89, 0x8a,
- 0xab, 0xa8, 0xad, 0xae, 0xa7, 0xa4, 0xa1, 0xa2, 0xb3, 0xb0, 0xb5, 0xb6, 0xbf, 0xbc, 0xb9, 0xba,
- 0xfb, 0xf8, 0xfd, 0xfe, 0xf7, 0xf4, 0xf1, 0xf2, 0xe3, 0xe0, 0xe5, 0xe6, 0xef, 0xec, 0xe9, 0xea,
- 0xcb, 0xc8, 0xcd, 0xce, 0xc7, 0xc4, 0xc1, 0xc2, 0xd3, 0xd0, 0xd5, 0xd6, 0xdf, 0xdc, 0xd9, 0xda,
- 0x5b, 0x58, 0x5d, 0x5e, 0x57, 0x54, 0x51, 0x52, 0x43, 0x40, 0x45, 0x46, 0x4f, 0x4c, 0x49, 0x4a,
- 0x6b, 0x68, 0x6d, 0x6e, 0x67, 0x64, 0x61, 0x62, 0x73, 0x70, 0x75, 0x76, 0x7f, 0x7c, 0x79, 0x7a,
- 0x3b, 0x38, 0x3d, 0x3e, 0x37, 0x34, 0x31, 0x32, 0x23, 0x20, 0x25, 0x26, 0x2f, 0x2c, 0x29, 0x2a,
- 0x0b, 0x08, 0x0d, 0x0e, 0x07, 0x04, 0x01, 0x02, 0x13, 0x10, 0x15, 0x16, 0x1f, 0x1c, 0x19, 0x1a
-};
-
-/*--------------------- Static Functions --------------------------*/
-
-/*--------------------- Export Variables --------------------------*/
-
-/*--------------------- Export Functions --------------------------*/
-
-static void xor_128(unsigned char *a, unsigned char *b, unsigned char *out)
-{
- unsigned long *dwPtrA = (unsigned long *)a;
- unsigned long *dwPtrB = (unsigned long *)b;
- unsigned long *dwPtrOut = (unsigned long *)out;
-
- (*dwPtrOut++) = (*dwPtrA++) ^ (*dwPtrB++);
- (*dwPtrOut++) = (*dwPtrA++) ^ (*dwPtrB++);
- (*dwPtrOut++) = (*dwPtrA++) ^ (*dwPtrB++);
- (*dwPtrOut++) = (*dwPtrA++) ^ (*dwPtrB++);
-}
-
-static void xor_32(unsigned char *a, unsigned char *b, unsigned char *out)
-{
- unsigned long *dwPtrA = (unsigned long *)a;
- unsigned long *dwPtrB = (unsigned long *)b;
- unsigned long *dwPtrOut = (unsigned long *)out;
-
- (*dwPtrOut++) = (*dwPtrA++) ^ (*dwPtrB++);
-}
-
-static void AddRoundKey(unsigned char *key, int round)
-{
- unsigned char sbox_key[4];
- unsigned char rcon_table[10] = { 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36};
-
- sbox_key[0] = sbox_table[key[13]];
- sbox_key[1] = sbox_table[key[14]];
- sbox_key[2] = sbox_table[key[15]];
- sbox_key[3] = sbox_table[key[12]];
-
- key[0] = key[0] ^ rcon_table[round];
- xor_32(&key[0], sbox_key, &key[0]);
-
- xor_32(&key[4], &key[0], &key[4]);
- xor_32(&key[8], &key[4], &key[8]);
- xor_32(&key[12], &key[8], &key[12]);
-}
-
-static void SubBytes(unsigned char *in, unsigned char *out)
-{
- int i;
-
- for (i = 0; i < 16; i++)
- out[i] = sbox_table[in[i]];
-}
-
-static void ShiftRows(unsigned char *in, unsigned char *out)
-{
- out[0] = in[0];
- out[1] = in[5];
- out[2] = in[10];
- out[3] = in[15];
- out[4] = in[4];
- out[5] = in[9];
- out[6] = in[14];
- out[7] = in[3];
- out[8] = in[8];
- out[9] = in[13];
- out[10] = in[2];
- out[11] = in[7];
- out[12] = in[12];
- out[13] = in[1];
- out[14] = in[6];
- out[15] = in[11];
-}
-
-static void MixColumns(unsigned char *in, unsigned char *out)
-{
- out[0] = dot2_table[in[0]] ^ dot3_table[in[1]] ^ in[2] ^ in[3];
- out[1] = in[0] ^ dot2_table[in[1]] ^ dot3_table[in[2]] ^ in[3];
- out[2] = in[0] ^ in[1] ^ dot2_table[in[2]] ^ dot3_table[in[3]];
- out[3] = dot3_table[in[0]] ^ in[1] ^ in[2] ^ dot2_table[in[3]];
-}
-
-static void AESv128(unsigned char *key, unsigned char *data, unsigned char *ciphertext)
-{
- int i;
- int round;
- unsigned char TmpdataA[16];
- unsigned char TmpdataB[16];
- unsigned char abyRoundKey[16];
-
- for (i = 0; i < 16; i++)
- abyRoundKey[i] = key[i];
-
- for (round = 0; round < 11; round++) {
- if (round == 0) {
- xor_128(abyRoundKey, data, ciphertext);
- AddRoundKey(abyRoundKey, round);
- } else if (round == 10) {
- SubBytes(ciphertext, TmpdataA);
- ShiftRows(TmpdataA, TmpdataB);
- xor_128(TmpdataB, abyRoundKey, ciphertext);
- } else /* round 1 ~ 9 */{
- SubBytes(ciphertext, TmpdataA);
- ShiftRows(TmpdataA, TmpdataB);
- MixColumns(&TmpdataB[0], &TmpdataA[0]);
- MixColumns(&TmpdataB[4], &TmpdataA[4]);
- MixColumns(&TmpdataB[8], &TmpdataA[8]);
- MixColumns(&TmpdataB[12], &TmpdataA[12]);
- xor_128(TmpdataA, abyRoundKey, ciphertext);
- AddRoundKey(abyRoundKey, round);
- }
- }
-}
-
-/*
- * Description: AES decryption
- *
- * Parameters:
- * In:
- * pbyRxKey - The key used to decrypt
- * pbyFrame - Starting address of packet header
- * wFrameSize - Total packet size including CRC
- * Out:
- * none
- *
- * Return Value: MIC compare result
- *
- */
-bool AESbGenCCMP(unsigned char *pbyRxKey, unsigned char *pbyFrame, unsigned short wFrameSize)
-{
- unsigned char abyNonce[13];
- unsigned char MIC_IV[16];
- unsigned char MIC_HDR1[16];
- unsigned char MIC_HDR2[16];
- unsigned char abyMIC[16];
- unsigned char abyCTRPLD[16];
- unsigned char abyTmp[16];
- unsigned char abyPlainText[16];
- unsigned char abyLastCipher[16];
-
- PS802_11Header pMACHeader = (PS802_11Header) pbyFrame;
- unsigned char *pbyIV;
- unsigned char *pbyPayload;
- unsigned short wHLen = 22;
- unsigned short wPayloadSize = wFrameSize - 8 - 8 - 4 - WLAN_HDR_ADDR3_LEN;/* 8 is IV, 8 is MIC, 4 is CRC */
- bool bA4 = false;
- unsigned char byTmp;
- unsigned short wCnt;
- int ii, jj, kk;
-
- pbyIV = pbyFrame + WLAN_HDR_ADDR3_LEN;
- if (WLAN_GET_FC_TODS(*(unsigned short *)pbyFrame) &&
- WLAN_GET_FC_FROMDS(*(unsigned short *)pbyFrame)) {
- bA4 = true;
- pbyIV += 6; /* 6 is 802.11 address4 */
- wHLen += 6;
- wPayloadSize -= 6;
- }
- pbyPayload = pbyIV + 8; /* IV-length */
-
- abyNonce[0] = 0x00; /* now is 0, if Qos here will be priority */
- memcpy(&(abyNonce[1]), pMACHeader->abyAddr2, ETH_ALEN);
- abyNonce[7] = pbyIV[7];
- abyNonce[8] = pbyIV[6];
- abyNonce[9] = pbyIV[5];
- abyNonce[10] = pbyIV[4];
- abyNonce[11] = pbyIV[1];
- abyNonce[12] = pbyIV[0];
-
- /* MIC_IV */
- MIC_IV[0] = 0x59;
- memcpy(&(MIC_IV[1]), &(abyNonce[0]), 13);
- MIC_IV[14] = (unsigned char)(wPayloadSize >> 8);
- MIC_IV[15] = (unsigned char)(wPayloadSize & 0xff);
-
- /* MIC_HDR1 */
- MIC_HDR1[0] = (unsigned char)(wHLen >> 8);
- MIC_HDR1[1] = (unsigned char)(wHLen & 0xff);
- byTmp = (unsigned char)(pMACHeader->wFrameCtl & 0xff);
- MIC_HDR1[2] = byTmp & 0x8f;
- byTmp = (unsigned char)(pMACHeader->wFrameCtl >> 8);
- byTmp &= 0x87;
- MIC_HDR1[3] = byTmp | 0x40;
- memcpy(&(MIC_HDR1[4]), pMACHeader->abyAddr1, ETH_ALEN);
- memcpy(&(MIC_HDR1[10]), pMACHeader->abyAddr2, ETH_ALEN);
-
- /* MIC_HDR2 */
- memcpy(&(MIC_HDR2[0]), pMACHeader->abyAddr3, ETH_ALEN);
- byTmp = (unsigned char)(pMACHeader->wSeqCtl & 0xff);
- MIC_HDR2[6] = byTmp & 0x0f;
- MIC_HDR2[7] = 0;
- if (bA4) {
- memcpy(&(MIC_HDR2[8]), pMACHeader->abyAddr4, ETH_ALEN);
- } else {
- MIC_HDR2[8] = 0x00;
- MIC_HDR2[9] = 0x00;
- MIC_HDR2[10] = 0x00;
- MIC_HDR2[11] = 0x00;
- MIC_HDR2[12] = 0x00;
- MIC_HDR2[13] = 0x00;
- }
- MIC_HDR2[14] = 0x00;
- MIC_HDR2[15] = 0x00;
-
- /* CCMP */
- AESv128(pbyRxKey, MIC_IV, abyMIC);
- for (kk = 0; kk < 16; kk++)
- abyTmp[kk] = MIC_HDR1[kk] ^ abyMIC[kk];
- AESv128(pbyRxKey, abyTmp, abyMIC);
- for (kk = 0; kk < 16; kk++)
- abyTmp[kk] = MIC_HDR2[kk] ^ abyMIC[kk];
- AESv128(pbyRxKey, abyTmp, abyMIC);
-
- wCnt = 1;
- abyCTRPLD[0] = 0x01;
- memcpy(&(abyCTRPLD[1]), &(abyNonce[0]), 13);
-
- for (jj = wPayloadSize; jj > 16; jj = jj - 16) {
- abyCTRPLD[14] = (unsigned char)(wCnt >> 8);
- abyCTRPLD[15] = (unsigned char)(wCnt & 0xff);
-
- AESv128(pbyRxKey, abyCTRPLD, abyTmp);
-
- for (kk = 0; kk < 16; kk++)
- abyPlainText[kk] = abyTmp[kk] ^ pbyPayload[kk];
- for (kk = 0; kk < 16; kk++)
- abyTmp[kk] = abyMIC[kk] ^ abyPlainText[kk];
- AESv128(pbyRxKey, abyTmp, abyMIC);
-
- memcpy(pbyPayload, abyPlainText, 16);
- wCnt++;
- pbyPayload += 16;
- } /* for wPayloadSize */
-
- /* last payload */
- memcpy(&(abyLastCipher[0]), pbyPayload, jj);
- for (ii = jj; ii < 16; ii++)
- abyLastCipher[ii] = 0x00;
-
- abyCTRPLD[14] = (unsigned char)(wCnt >> 8);
- abyCTRPLD[15] = (unsigned char)(wCnt & 0xff);
-
- AESv128(pbyRxKey, abyCTRPLD, abyTmp);
- for (kk = 0; kk < 16; kk++)
- abyPlainText[kk] = abyTmp[kk] ^ abyLastCipher[kk];
- memcpy(pbyPayload, abyPlainText, jj);
- pbyPayload += jj;
-
- /* for MIC calculation */
- for (ii = jj; ii < 16; ii++)
- abyPlainText[ii] = 0x00;
- for (kk = 0; kk < 16; kk++)
- abyTmp[kk] = abyMIC[kk] ^ abyPlainText[kk];
- AESv128(pbyRxKey, abyTmp, abyMIC);
-
- /* =>above is the calculate MIC */
- /* -------------------------------------------- */
-
- wCnt = 0;
- abyCTRPLD[14] = (unsigned char)(wCnt >> 8);
- abyCTRPLD[15] = (unsigned char)(wCnt & 0xff);
- AESv128(pbyRxKey, abyCTRPLD, abyTmp);
- for (kk = 0; kk < 8; kk++)
- abyTmp[kk] = abyTmp[kk] ^ pbyPayload[kk];
- /* =>above is the dec-MIC from packet */
- /* -------------------------------------------- */
-
- return !memcmp(abyMIC, abyTmp, 8);
-}
diff --git a/drivers/staging/vt6655/aes_ccmp.h b/drivers/staging/vt6655/aes_ccmp.h
deleted file mode 100644
index fe0c506205d..00000000000
--- a/drivers/staging/vt6655/aes_ccmp.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- *
- * File: aes_ccmp.h
- *
- * Purpose: AES_CCMP Decryption
- *
- * Author: Warren Hsu
- *
- * Date: Feb 15, 2005
- *
- */
-
-#ifndef __AES_H__
-#define __AES_H__
-
-#include "ttype.h"
-
-bool AESbGenCCMP(unsigned char *pbyRxKey, unsigned char *pbyFrame, unsigned short wFrameSize);
-
-#endif /* __AES_H__ */
diff --git a/drivers/staging/vt6655/baseband.c b/drivers/staging/vt6655/baseband.c
index de54923e886..86c72ba0a0c 100644
--- a/drivers/staging/vt6655/baseband.c
+++ b/drivers/staging/vt6655/baseband.c
@@ -30,12 +30,7 @@
* BBvCaculateParameter - Caculate PhyLength, PhyService and Phy Signal parameter for baseband Tx
* BBbReadEmbedded - Embedded read baseband register via MAC
* BBbWriteEmbedded - Embedded write baseband register via MAC
- * BBbIsRegBitsOn - Test if baseband register bits on
- * BBbIsRegBitsOff - Test if baseband register bits off
* BBbVT3253Init - VIA VT3253 baseband chip init code
- * BBvReadAllRegs - Read All Baseband Registers
- * BBvLoopbackOn - Turn on BaseBand Loopback mode
- * BBvLoopbackOff - Turn off BaseBand Loopback mode
*
* Revision History:
* 06-10-2003 Bryan YC Fan: Re-write codes to support VT3253 spec.
@@ -50,7 +45,6 @@
*/
#include "tmacro.h"
-#include "tether.h"
#include "mac.h"
#include "baseband.h"
#include "srom.h"
@@ -1708,39 +1702,39 @@ static const unsigned short awcFrameTime[MAX_RATE] = {
static
unsigned long
-s_ulGetRatio(struct vnt_private *pDevice);
+s_ulGetRatio(struct vnt_private *priv);
static
void
s_vChangeAntenna(
- struct vnt_private *pDevice
+ struct vnt_private *priv
);
static
void
s_vChangeAntenna(
- struct vnt_private *pDevice
+ struct vnt_private *priv
)
{
- if (pDevice->dwRxAntennaSel == 0) {
- pDevice->dwRxAntennaSel = 1;
- if (pDevice->bTxRxAntInv == true)
- BBvSetRxAntennaMode(pDevice->PortOffset, ANT_A);
+ if (priv->dwRxAntennaSel == 0) {
+ priv->dwRxAntennaSel = 1;
+ if (priv->bTxRxAntInv == true)
+ BBvSetRxAntennaMode(priv, ANT_A);
else
- BBvSetRxAntennaMode(pDevice->PortOffset, ANT_B);
+ BBvSetRxAntennaMode(priv, ANT_B);
} else {
- pDevice->dwRxAntennaSel = 0;
- if (pDevice->bTxRxAntInv == true)
- BBvSetRxAntennaMode(pDevice->PortOffset, ANT_B);
+ priv->dwRxAntennaSel = 0;
+ if (priv->bTxRxAntInv == true)
+ BBvSetRxAntennaMode(priv, ANT_B);
else
- BBvSetRxAntennaMode(pDevice->PortOffset, ANT_A);
+ BBvSetRxAntennaMode(priv, ANT_A);
}
- if (pDevice->dwTxAntennaSel == 0) {
- pDevice->dwTxAntennaSel = 1;
- BBvSetTxAntennaMode(pDevice->PortOffset, ANT_B);
+ if (priv->dwTxAntennaSel == 0) {
+ priv->dwTxAntennaSel = 1;
+ BBvSetTxAntennaMode(priv, ANT_B);
} else {
- pDevice->dwTxAntennaSel = 0;
- BBvSetTxAntennaMode(pDevice->PortOffset, ANT_A);
+ priv->dwTxAntennaSel = 0;
+ BBvSetTxAntennaMode(priv, ANT_A);
}
}
@@ -1792,18 +1786,17 @@ BBuGetFrameTime(
uFrameTime++;
return uPreamble + uFrameTime;
- } else {
- uFrameTime = (cbFrameLength * 8 + 22) / uRate; /* ???????? */
- uTmp = ((uFrameTime * uRate) - 22) / 8;
- if (cbFrameLength != uTmp)
- uFrameTime++;
+ }
+ uFrameTime = (cbFrameLength * 8 + 22) / uRate; /* ???????? */
+ uTmp = ((uFrameTime * uRate) - 22) / 8;
+ if (cbFrameLength != uTmp)
+ uFrameTime++;
- uFrameTime = uFrameTime * 4; /* ??????? */
- if (byPktType != PK_TYPE_11A)
- uFrameTime += 6; /* ?????? */
+ uFrameTime = uFrameTime * 4; /* ??????? */
+ if (byPktType != PK_TYPE_11A)
+ uFrameTime += 6; /* ?????? */
- return 20 + uFrameTime; /* ?????? */
- }
+ return 20 + uFrameTime; /* ?????? */
}
/*
@@ -1968,8 +1961,10 @@ void vnt_get_phy_field(struct vnt_private *priv, u32 frame_length,
* Return Value: true if succeeded; false if failed.
*
*/
-bool BBbReadEmbedded(void __iomem *dwIoBase, unsigned char byBBAddr, unsigned char *pbyData)
+bool BBbReadEmbedded(struct vnt_private *priv,
+ unsigned char byBBAddr, unsigned char *pbyData)
{
+ void __iomem *dwIoBase = priv->PortOffset;
unsigned short ww;
unsigned char byValue;
@@ -2010,8 +2005,10 @@ bool BBbReadEmbedded(void __iomem *dwIoBase, unsigned char byBBAddr, unsigned ch
* Return Value: true if succeeded; false if failed.
*
*/
-bool BBbWriteEmbedded(void __iomem *dwIoBase, unsigned char byBBAddr, unsigned char byData)
+bool BBbWriteEmbedded(struct vnt_private *priv,
+ unsigned char byBBAddr, unsigned char byData)
{
+ void __iomem *dwIoBase = priv->PortOffset;
unsigned short ww;
unsigned char byValue;
@@ -2038,50 +2035,6 @@ bool BBbWriteEmbedded(void __iomem *dwIoBase, unsigned char byBBAddr, unsigned c
}
/*
- * Description: Test if all bits are set for the Baseband register
- *
- * Parameters:
- * In:
- * dwIoBase - I/O base address
- * byBBAddr - address of register in Baseband
- * byTestBits - TestBits
- * Out:
- * none
- *
- * Return Value: true if all TestBits are set; false otherwise.
- *
- */
-bool BBbIsRegBitsOn(void __iomem *dwIoBase, unsigned char byBBAddr, unsigned char byTestBits)
-{
- unsigned char byOrgData;
-
- BBbReadEmbedded(dwIoBase, byBBAddr, &byOrgData);
- return (byOrgData & byTestBits) == byTestBits;
-}
-
-/*
- * Description: Test if all bits are clear for the Baseband register
- *
- * Parameters:
- * In:
- * dwIoBase - I/O base address
- * byBBAddr - address of register in Baseband
- * byTestBits - TestBits
- * Out:
- * none
- *
- * Return Value: true if all TestBits are clear; false otherwise.
- *
- */
-bool BBbIsRegBitsOff(void __iomem *dwIoBase, unsigned char byBBAddr, unsigned char byTestBits)
-{
- unsigned char byOrgData;
-
- BBbReadEmbedded(dwIoBase, byBBAddr, &byOrgData);
- return (byOrgData & byTestBits) == 0;
-}
-
-/*
* Description: VIA VT3253 Baseband chip init function
*
* Parameters:
@@ -2096,126 +2049,126 @@ bool BBbIsRegBitsOff(void __iomem *dwIoBase, unsigned char byBBAddr, unsigned ch
*
*/
-bool BBbVT3253Init(struct vnt_private *pDevice)
+bool BBbVT3253Init(struct vnt_private *priv)
{
bool bResult = true;
int ii;
- void __iomem *dwIoBase = pDevice->PortOffset;
- unsigned char byRFType = pDevice->byRFType;
- unsigned char byLocalID = pDevice->byLocalID;
+ void __iomem *dwIoBase = priv->PortOffset;
+ unsigned char byRFType = priv->byRFType;
+ unsigned char byLocalID = priv->byLocalID;
if (byRFType == RF_RFMD2959) {
if (byLocalID <= REV_ID_VT3253_A1) {
for (ii = 0; ii < CB_VT3253_INIT_FOR_RFMD; ii++)
- bResult &= BBbWriteEmbedded(dwIoBase, byVT3253InitTab_RFMD[ii][0], byVT3253InitTab_RFMD[ii][1]);
+ bResult &= BBbWriteEmbedded(priv, byVT3253InitTab_RFMD[ii][0], byVT3253InitTab_RFMD[ii][1]);
} else {
for (ii = 0; ii < CB_VT3253B0_INIT_FOR_RFMD; ii++)
- bResult &= BBbWriteEmbedded(dwIoBase, byVT3253B0_RFMD[ii][0], byVT3253B0_RFMD[ii][1]);
+ bResult &= BBbWriteEmbedded(priv, byVT3253B0_RFMD[ii][0], byVT3253B0_RFMD[ii][1]);
for (ii = 0; ii < CB_VT3253B0_AGC_FOR_RFMD2959; ii++)
- bResult &= BBbWriteEmbedded(dwIoBase, byVT3253B0_AGC4_RFMD2959[ii][0], byVT3253B0_AGC4_RFMD2959[ii][1]);
+ bResult &= BBbWriteEmbedded(priv, byVT3253B0_AGC4_RFMD2959[ii][0], byVT3253B0_AGC4_RFMD2959[ii][1]);
VNSvOutPortD(dwIoBase + MAC_REG_ITRTMSET, 0x23);
- MACvRegBitsOn(dwIoBase, MAC_REG_PAPEDELAY, BIT0);
+ MACvRegBitsOn(dwIoBase, MAC_REG_PAPEDELAY, BIT(0));
}
- pDevice->abyBBVGA[0] = 0x18;
- pDevice->abyBBVGA[1] = 0x0A;
- pDevice->abyBBVGA[2] = 0x0;
- pDevice->abyBBVGA[3] = 0x0;
- pDevice->ldBmThreshold[0] = -70;
- pDevice->ldBmThreshold[1] = -50;
- pDevice->ldBmThreshold[2] = 0;
- pDevice->ldBmThreshold[3] = 0;
+ priv->abyBBVGA[0] = 0x18;
+ priv->abyBBVGA[1] = 0x0A;
+ priv->abyBBVGA[2] = 0x0;
+ priv->abyBBVGA[3] = 0x0;
+ priv->ldBmThreshold[0] = -70;
+ priv->ldBmThreshold[1] = -50;
+ priv->ldBmThreshold[2] = 0;
+ priv->ldBmThreshold[3] = 0;
} else if ((byRFType == RF_AIROHA) || (byRFType == RF_AL2230S)) {
for (ii = 0; ii < CB_VT3253B0_INIT_FOR_AIROHA2230; ii++)
- bResult &= BBbWriteEmbedded(dwIoBase, byVT3253B0_AIROHA2230[ii][0], byVT3253B0_AIROHA2230[ii][1]);
+ bResult &= BBbWriteEmbedded(priv, byVT3253B0_AIROHA2230[ii][0], byVT3253B0_AIROHA2230[ii][1]);
for (ii = 0; ii < CB_VT3253B0_AGC; ii++)
- bResult &= BBbWriteEmbedded(dwIoBase, byVT3253B0_AGC[ii][0], byVT3253B0_AGC[ii][1]);
-
- pDevice->abyBBVGA[0] = 0x1C;
- pDevice->abyBBVGA[1] = 0x10;
- pDevice->abyBBVGA[2] = 0x0;
- pDevice->abyBBVGA[3] = 0x0;
- pDevice->ldBmThreshold[0] = -70;
- pDevice->ldBmThreshold[1] = -48;
- pDevice->ldBmThreshold[2] = 0;
- pDevice->ldBmThreshold[3] = 0;
+ bResult &= BBbWriteEmbedded(priv, byVT3253B0_AGC[ii][0], byVT3253B0_AGC[ii][1]);
+
+ priv->abyBBVGA[0] = 0x1C;
+ priv->abyBBVGA[1] = 0x10;
+ priv->abyBBVGA[2] = 0x0;
+ priv->abyBBVGA[3] = 0x0;
+ priv->ldBmThreshold[0] = -70;
+ priv->ldBmThreshold[1] = -48;
+ priv->ldBmThreshold[2] = 0;
+ priv->ldBmThreshold[3] = 0;
} else if (byRFType == RF_UW2451) {
for (ii = 0; ii < CB_VT3253B0_INIT_FOR_UW2451; ii++)
- bResult &= BBbWriteEmbedded(dwIoBase, byVT3253B0_UW2451[ii][0], byVT3253B0_UW2451[ii][1]);
+ bResult &= BBbWriteEmbedded(priv, byVT3253B0_UW2451[ii][0], byVT3253B0_UW2451[ii][1]);
for (ii = 0; ii < CB_VT3253B0_AGC; ii++)
- bResult &= BBbWriteEmbedded(dwIoBase, byVT3253B0_AGC[ii][0], byVT3253B0_AGC[ii][1]);
+ bResult &= BBbWriteEmbedded(priv, byVT3253B0_AGC[ii][0], byVT3253B0_AGC[ii][1]);
VNSvOutPortB(dwIoBase + MAC_REG_ITRTMSET, 0x23);
- MACvRegBitsOn(dwIoBase, MAC_REG_PAPEDELAY, BIT0);
-
- pDevice->abyBBVGA[0] = 0x14;
- pDevice->abyBBVGA[1] = 0x0A;
- pDevice->abyBBVGA[2] = 0x0;
- pDevice->abyBBVGA[3] = 0x0;
- pDevice->ldBmThreshold[0] = -60;
- pDevice->ldBmThreshold[1] = -50;
- pDevice->ldBmThreshold[2] = 0;
- pDevice->ldBmThreshold[3] = 0;
+ MACvRegBitsOn(dwIoBase, MAC_REG_PAPEDELAY, BIT(0));
+
+ priv->abyBBVGA[0] = 0x14;
+ priv->abyBBVGA[1] = 0x0A;
+ priv->abyBBVGA[2] = 0x0;
+ priv->abyBBVGA[3] = 0x0;
+ priv->ldBmThreshold[0] = -60;
+ priv->ldBmThreshold[1] = -50;
+ priv->ldBmThreshold[2] = 0;
+ priv->ldBmThreshold[3] = 0;
} else if (byRFType == RF_UW2452) {
for (ii = 0; ii < CB_VT3253B0_INIT_FOR_UW2451; ii++)
- bResult &= BBbWriteEmbedded(dwIoBase, byVT3253B0_UW2451[ii][0], byVT3253B0_UW2451[ii][1]);
+ bResult &= BBbWriteEmbedded(priv, byVT3253B0_UW2451[ii][0], byVT3253B0_UW2451[ii][1]);
/* Init ANT B select,TX Config CR09 = 0x61->0x45, 0x45->0x41(VC1/VC2 define, make the ANT_A, ANT_B inverted) */
/*bResult &= BBbWriteEmbedded(dwIoBase,0x09,0x41);*/
/* Init ANT B select,RX Config CR10 = 0x28->0x2A, 0x2A->0x28(VC1/VC2 define, make the ANT_A, ANT_B inverted) */
/*bResult &= BBbWriteEmbedded(dwIoBase,0x0a,0x28);*/
/* Select VC1/VC2, CR215 = 0x02->0x06 */
- bResult &= BBbWriteEmbedded(dwIoBase, 0xd7, 0x06);
+ bResult &= BBbWriteEmbedded(priv, 0xd7, 0x06);
/* {{RobertYu:20050125, request by Jack */
- bResult &= BBbWriteEmbedded(dwIoBase, 0x90, 0x20);
- bResult &= BBbWriteEmbedded(dwIoBase, 0x97, 0xeb);
+ bResult &= BBbWriteEmbedded(priv, 0x90, 0x20);
+ bResult &= BBbWriteEmbedded(priv, 0x97, 0xeb);
/* }} */
/* {{RobertYu:20050221, request by Jack */
- bResult &= BBbWriteEmbedded(dwIoBase, 0xa6, 0x00);
- bResult &= BBbWriteEmbedded(dwIoBase, 0xa8, 0x30);
+ bResult &= BBbWriteEmbedded(priv, 0xa6, 0x00);
+ bResult &= BBbWriteEmbedded(priv, 0xa8, 0x30);
/* }} */
- bResult &= BBbWriteEmbedded(dwIoBase, 0xb0, 0x58);
+ bResult &= BBbWriteEmbedded(priv, 0xb0, 0x58);
for (ii = 0; ii < CB_VT3253B0_AGC; ii++)
- bResult &= BBbWriteEmbedded(dwIoBase, byVT3253B0_AGC[ii][0], byVT3253B0_AGC[ii][1]);
-
- pDevice->abyBBVGA[0] = 0x14;
- pDevice->abyBBVGA[1] = 0x0A;
- pDevice->abyBBVGA[2] = 0x0;
- pDevice->abyBBVGA[3] = 0x0;
- pDevice->ldBmThreshold[0] = -60;
- pDevice->ldBmThreshold[1] = -50;
- pDevice->ldBmThreshold[2] = 0;
- pDevice->ldBmThreshold[3] = 0;
+ bResult &= BBbWriteEmbedded(priv, byVT3253B0_AGC[ii][0], byVT3253B0_AGC[ii][1]);
+
+ priv->abyBBVGA[0] = 0x14;
+ priv->abyBBVGA[1] = 0x0A;
+ priv->abyBBVGA[2] = 0x0;
+ priv->abyBBVGA[3] = 0x0;
+ priv->ldBmThreshold[0] = -60;
+ priv->ldBmThreshold[1] = -50;
+ priv->ldBmThreshold[2] = 0;
+ priv->ldBmThreshold[3] = 0;
/* }} RobertYu */
} else if (byRFType == RF_VT3226) {
for (ii = 0; ii < CB_VT3253B0_INIT_FOR_AIROHA2230; ii++)
- bResult &= BBbWriteEmbedded(dwIoBase, byVT3253B0_AIROHA2230[ii][0], byVT3253B0_AIROHA2230[ii][1]);
+ bResult &= BBbWriteEmbedded(priv, byVT3253B0_AIROHA2230[ii][0], byVT3253B0_AIROHA2230[ii][1]);
for (ii = 0; ii < CB_VT3253B0_AGC; ii++)
- bResult &= BBbWriteEmbedded(dwIoBase, byVT3253B0_AGC[ii][0], byVT3253B0_AGC[ii][1]);
-
- pDevice->abyBBVGA[0] = 0x1C;
- pDevice->abyBBVGA[1] = 0x10;
- pDevice->abyBBVGA[2] = 0x0;
- pDevice->abyBBVGA[3] = 0x0;
- pDevice->ldBmThreshold[0] = -70;
- pDevice->ldBmThreshold[1] = -48;
- pDevice->ldBmThreshold[2] = 0;
- pDevice->ldBmThreshold[3] = 0;
+ bResult &= BBbWriteEmbedded(priv, byVT3253B0_AGC[ii][0], byVT3253B0_AGC[ii][1]);
+
+ priv->abyBBVGA[0] = 0x1C;
+ priv->abyBBVGA[1] = 0x10;
+ priv->abyBBVGA[2] = 0x0;
+ priv->abyBBVGA[3] = 0x0;
+ priv->ldBmThreshold[0] = -70;
+ priv->ldBmThreshold[1] = -48;
+ priv->ldBmThreshold[2] = 0;
+ priv->ldBmThreshold[3] = 0;
/* Fix VT3226 DFC system timing issue */
MACvSetRFLE_LatchBase(dwIoBase);
/* {{ RobertYu: 20050104 */
} else if (byRFType == RF_AIROHA7230) {
for (ii = 0; ii < CB_VT3253B0_INIT_FOR_AIROHA2230; ii++)
- bResult &= BBbWriteEmbedded(dwIoBase, byVT3253B0_AIROHA2230[ii][0], byVT3253B0_AIROHA2230[ii][1]);
+ bResult &= BBbWriteEmbedded(priv, byVT3253B0_AIROHA2230[ii][0], byVT3253B0_AIROHA2230[ii][1]);
/* {{ RobertYu:20050223, request by JerryChung */
@@ -2228,150 +2181,37 @@ bool BBbVT3253Init(struct vnt_private *pDevice)
/* }} */
for (ii = 0; ii < CB_VT3253B0_AGC; ii++)
- bResult &= BBbWriteEmbedded(dwIoBase, byVT3253B0_AGC[ii][0], byVT3253B0_AGC[ii][1]);
-
- pDevice->abyBBVGA[0] = 0x1C;
- pDevice->abyBBVGA[1] = 0x10;
- pDevice->abyBBVGA[2] = 0x0;
- pDevice->abyBBVGA[3] = 0x0;
- pDevice->ldBmThreshold[0] = -70;
- pDevice->ldBmThreshold[1] = -48;
- pDevice->ldBmThreshold[2] = 0;
- pDevice->ldBmThreshold[3] = 0;
+ bResult &= BBbWriteEmbedded(priv, byVT3253B0_AGC[ii][0], byVT3253B0_AGC[ii][1]);
+
+ priv->abyBBVGA[0] = 0x1C;
+ priv->abyBBVGA[1] = 0x10;
+ priv->abyBBVGA[2] = 0x0;
+ priv->abyBBVGA[3] = 0x0;
+ priv->ldBmThreshold[0] = -70;
+ priv->ldBmThreshold[1] = -48;
+ priv->ldBmThreshold[2] = 0;
+ priv->ldBmThreshold[3] = 0;
/* }} RobertYu */
} else {
/* No VGA Table now */
- pDevice->bUpdateBBVGA = false;
- pDevice->abyBBVGA[0] = 0x1C;
+ priv->bUpdateBBVGA = false;
+ priv->abyBBVGA[0] = 0x1C;
}
if (byLocalID > REV_ID_VT3253_A1) {
- BBbWriteEmbedded(dwIoBase, 0x04, 0x7F);
- BBbWriteEmbedded(dwIoBase, 0x0D, 0x01);
+ BBbWriteEmbedded(priv, 0x04, 0x7F);
+ BBbWriteEmbedded(priv, 0x0D, 0x01);
}
return bResult;
}
/*
- * Description: Read All Baseband Registers
- *
- * Parameters:
- * In:
- * dwIoBase - I/O base address
- * pbyBBRegs - Point to struct that stores Baseband Registers
- * Out:
- * none
- *
- * Return Value: none
- *
- */
-void BBvReadAllRegs(void __iomem *dwIoBase, unsigned char *pbyBBRegs)
-{
- int ii;
- unsigned char byBase = 1;
-
- for (ii = 0; ii < BB_MAX_CONTEXT_SIZE; ii++) {
- BBbReadEmbedded(dwIoBase, (unsigned char)(ii*byBase), pbyBBRegs);
- pbyBBRegs += byBase;
- }
-}
-
-/*
- * Description: Turn on BaseBand Loopback mode
- *
- * Parameters:
- * In:
- * dwIoBase - I/O base address
- * bCCK - If CCK is set
- * Out:
- * none
- *
- * Return Value: none
- *
- */
-
-void BBvLoopbackOn(struct vnt_private *pDevice)
-{
- unsigned char byData;
- void __iomem *dwIoBase = pDevice->PortOffset;
-
- /* CR C9 = 0x00 */
- BBbReadEmbedded(dwIoBase, 0xC9, &pDevice->byBBCRc9); /* CR201 */
- BBbWriteEmbedded(dwIoBase, 0xC9, 0);
- BBbReadEmbedded(dwIoBase, 0x4D, &pDevice->byBBCR4d); /* CR77 */
- BBbWriteEmbedded(dwIoBase, 0x4D, 0x90);
-
- /* CR 88 = 0x02(CCK), 0x03(OFDM) */
- BBbReadEmbedded(dwIoBase, 0x88, &pDevice->byBBCR88); /* CR136 */
-
- if (pDevice->uConnectionRate <= RATE_11M) { /* CCK */
- /* Enable internal digital loopback: CR33 |= 0000 0001 */
- BBbReadEmbedded(dwIoBase, 0x21, &byData); /* CR33 */
- BBbWriteEmbedded(dwIoBase, 0x21, (unsigned char)(byData | 0x01)); /* CR33 */
- /* CR154 = 0x00 */
- BBbWriteEmbedded(dwIoBase, 0x9A, 0); /* CR154 */
-
- BBbWriteEmbedded(dwIoBase, 0x88, 0x02); /* CR239 */
- } else { /* OFDM */
- /* Enable internal digital loopback:CR154 |= 0000 0001 */
- BBbReadEmbedded(dwIoBase, 0x9A, &byData); /* CR154 */
- BBbWriteEmbedded(dwIoBase, 0x9A, (unsigned char)(byData | 0x01)); /* CR154 */
- /* CR33 = 0x00 */
- BBbWriteEmbedded(dwIoBase, 0x21, 0); /* CR33 */
-
- BBbWriteEmbedded(dwIoBase, 0x88, 0x03); /* CR239 */
- }
-
- /* CR14 = 0x00 */
- BBbWriteEmbedded(dwIoBase, 0x0E, 0); /* CR14 */
-
- /* Disable TX_IQUN */
- BBbReadEmbedded(pDevice->PortOffset, 0x09, &pDevice->byBBCR09);
- BBbWriteEmbedded(pDevice->PortOffset, 0x09, (unsigned char)(pDevice->byBBCR09 & 0xDE));
-}
-
-/*
- * Description: Turn off BaseBand Loopback mode
- *
- * Parameters:
- * In:
- * pDevice - Device Structure
- *
- * Out:
- * none
- *
- * Return Value: none
- *
- */
-void BBvLoopbackOff(struct vnt_private *pDevice)
-{
- unsigned char byData;
- void __iomem *dwIoBase = pDevice->PortOffset;
-
- BBbWriteEmbedded(dwIoBase, 0xC9, pDevice->byBBCRc9); /* CR201 */
- BBbWriteEmbedded(dwIoBase, 0x88, pDevice->byBBCR88); /* CR136 */
- BBbWriteEmbedded(dwIoBase, 0x09, pDevice->byBBCR09); /* CR136 */
- BBbWriteEmbedded(dwIoBase, 0x4D, pDevice->byBBCR4d); /* CR77 */
-
- if (pDevice->uConnectionRate <= RATE_11M) { /* CCK */
- /* Set the CR33 Bit2 to disable internal Loopback. */
- BBbReadEmbedded(dwIoBase, 0x21, &byData);/* CR33 */
- BBbWriteEmbedded(dwIoBase, 0x21, (unsigned char)(byData & 0xFE)); /* CR33 */
- } else { /* OFDM */
- BBbReadEmbedded(dwIoBase, 0x9A, &byData); /* CR154 */
- BBbWriteEmbedded(dwIoBase, 0x9A, (unsigned char)(byData & 0xFE)); /* CR154 */
- }
- BBbReadEmbedded(dwIoBase, 0x0E, &byData); /* CR14 */
- BBbWriteEmbedded(dwIoBase, 0x0E, (unsigned char)(byData | 0x80)); /* CR14 */
-}
-
-/*
* Description: Set ShortSlotTime mode
*
* Parameters:
* In:
- * pDevice - Device Structure
+ * priv - Device Structure
* Out:
* none
*
@@ -2379,42 +2219,42 @@ void BBvLoopbackOff(struct vnt_private *pDevice)
*
*/
void
-BBvSetShortSlotTime(struct vnt_private *pDevice)
+BBvSetShortSlotTime(struct vnt_private *priv)
{
unsigned char byBBRxConf = 0;
unsigned char byBBVGA = 0;
- BBbReadEmbedded(pDevice->PortOffset, 0x0A, &byBBRxConf); /* CR10 */
+ BBbReadEmbedded(priv, 0x0A, &byBBRxConf); /* CR10 */
- if (pDevice->bShortSlotTime)
+ if (priv->bShortSlotTime)
byBBRxConf &= 0xDF; /* 1101 1111 */
else
byBBRxConf |= 0x20; /* 0010 0000 */
/* patch for 3253B0 Baseband with Cardbus module */
- BBbReadEmbedded(pDevice->PortOffset, 0xE7, &byBBVGA);
- if (byBBVGA == pDevice->abyBBVGA[0])
+ BBbReadEmbedded(priv, 0xE7, &byBBVGA);
+ if (byBBVGA == priv->abyBBVGA[0])
byBBRxConf |= 0x20; /* 0010 0000 */
- BBbWriteEmbedded(pDevice->PortOffset, 0x0A, byBBRxConf); /* CR10 */
+ BBbWriteEmbedded(priv, 0x0A, byBBRxConf); /* CR10 */
}
-void BBvSetVGAGainOffset(struct vnt_private *pDevice, unsigned char byData)
+void BBvSetVGAGainOffset(struct vnt_private *priv, unsigned char byData)
{
unsigned char byBBRxConf = 0;
- BBbWriteEmbedded(pDevice->PortOffset, 0xE7, byData);
+ BBbWriteEmbedded(priv, 0xE7, byData);
- BBbReadEmbedded(pDevice->PortOffset, 0x0A, &byBBRxConf); /* CR10 */
+ BBbReadEmbedded(priv, 0x0A, &byBBRxConf); /* CR10 */
/* patch for 3253B0 Baseband with Cardbus module */
- if (byData == pDevice->abyBBVGA[0])
+ if (byData == priv->abyBBVGA[0])
byBBRxConf |= 0x20; /* 0010 0000 */
- else if (pDevice->bShortSlotTime)
+ else if (priv->bShortSlotTime)
byBBRxConf &= 0xDF; /* 1101 1111 */
else
byBBRxConf |= 0x20; /* 0010 0000 */
- pDevice->byBBVGACurrent = byData;
- BBbWriteEmbedded(pDevice->PortOffset, 0x0A, byBBRxConf); /* CR10 */
+ priv->byBBVGACurrent = byData;
+ BBbWriteEmbedded(priv, 0x0A, byBBRxConf); /* CR10 */
}
/*
@@ -2430,12 +2270,12 @@ void BBvSetVGAGainOffset(struct vnt_private *pDevice, unsigned char byData)
*
*/
void
-BBvSoftwareReset(void __iomem *dwIoBase)
+BBvSoftwareReset(struct vnt_private *priv)
{
- BBbWriteEmbedded(dwIoBase, 0x50, 0x40);
- BBbWriteEmbedded(dwIoBase, 0x50, 0);
- BBbWriteEmbedded(dwIoBase, 0x9C, 0x01);
- BBbWriteEmbedded(dwIoBase, 0x9C, 0);
+ BBbWriteEmbedded(priv, 0x50, 0x40);
+ BBbWriteEmbedded(priv, 0x50, 0);
+ BBbWriteEmbedded(priv, 0x9C, 0x01);
+ BBbWriteEmbedded(priv, 0x9C, 0);
}
/*
@@ -2451,13 +2291,13 @@ BBvSoftwareReset(void __iomem *dwIoBase)
*
*/
void
-BBvPowerSaveModeON(void __iomem *dwIoBase)
+BBvPowerSaveModeON(struct vnt_private *priv)
{
unsigned char byOrgData;
- BBbReadEmbedded(dwIoBase, 0x0D, &byOrgData);
- byOrgData |= BIT0;
- BBbWriteEmbedded(dwIoBase, 0x0D, byOrgData);
+ BBbReadEmbedded(priv, 0x0D, &byOrgData);
+ byOrgData |= BIT(0);
+ BBbWriteEmbedded(priv, 0x0D, byOrgData);
}
/*
@@ -2473,13 +2313,13 @@ BBvPowerSaveModeON(void __iomem *dwIoBase)
*
*/
void
-BBvPowerSaveModeOFF(void __iomem *dwIoBase)
+BBvPowerSaveModeOFF(struct vnt_private *priv)
{
unsigned char byOrgData;
- BBbReadEmbedded(dwIoBase, 0x0D, &byOrgData);
- byOrgData &= ~(BIT0);
- BBbWriteEmbedded(dwIoBase, 0x0D, byOrgData);
+ BBbReadEmbedded(priv, 0x0D, &byOrgData);
+ byOrgData &= ~(BIT(0));
+ BBbWriteEmbedded(priv, 0x0D, byOrgData);
}
/*
@@ -2487,7 +2327,7 @@ BBvPowerSaveModeOFF(void __iomem *dwIoBase)
*
* Parameters:
* In:
- * pDevice - Device Structure
+ * priv - Device Structure
* byAntennaMode - Antenna Mode
* Out:
* none
@@ -2497,11 +2337,11 @@ BBvPowerSaveModeOFF(void __iomem *dwIoBase)
*/
void
-BBvSetTxAntennaMode(void __iomem *dwIoBase, unsigned char byAntennaMode)
+BBvSetTxAntennaMode(struct vnt_private *priv, unsigned char byAntennaMode)
{
unsigned char byBBTxConf;
- BBbReadEmbedded(dwIoBase, 0x09, &byBBTxConf); /* CR09 */
+ BBbReadEmbedded(priv, 0x09, &byBBTxConf); /* CR09 */
if (byAntennaMode == ANT_DIVERSITY) {
/* bit 1 is diversity */
byBBTxConf |= 0x02;
@@ -2512,7 +2352,7 @@ BBvSetTxAntennaMode(void __iomem *dwIoBase, unsigned char byAntennaMode)
byBBTxConf &= 0xFD; /* 1111 1101 */
byBBTxConf |= 0x04;
}
- BBbWriteEmbedded(dwIoBase, 0x09, byBBTxConf); /* CR09 */
+ BBbWriteEmbedded(priv, 0x09, byBBTxConf); /* CR09 */
}
/*
@@ -2520,7 +2360,7 @@ BBvSetTxAntennaMode(void __iomem *dwIoBase, unsigned char byAntennaMode)
*
* Parameters:
* In:
- * pDevice - Device Structure
+ * priv - Device Structure
* byAntennaMode - Antenna Mode
* Out:
* none
@@ -2530,11 +2370,11 @@ BBvSetTxAntennaMode(void __iomem *dwIoBase, unsigned char byAntennaMode)
*/
void
-BBvSetRxAntennaMode(void __iomem *dwIoBase, unsigned char byAntennaMode)
+BBvSetRxAntennaMode(struct vnt_private *priv, unsigned char byAntennaMode)
{
unsigned char byBBRxConf;
- BBbReadEmbedded(dwIoBase, 0x0A, &byBBRxConf); /* CR10 */
+ BBbReadEmbedded(priv, 0x0A, &byBBRxConf); /* CR10 */
if (byAntennaMode == ANT_DIVERSITY) {
byBBRxConf |= 0x01;
@@ -2544,7 +2384,7 @@ BBvSetRxAntennaMode(void __iomem *dwIoBase, unsigned char byAntennaMode)
byBBRxConf &= 0xFE; /* 1111 1110 */
byBBRxConf |= 0x02;
}
- BBbWriteEmbedded(dwIoBase, 0x0A, byBBRxConf); /* CR10 */
+ BBbWriteEmbedded(priv, 0x0A, byBBRxConf); /* CR10 */
}
/*
@@ -2552,7 +2392,7 @@ BBvSetRxAntennaMode(void __iomem *dwIoBase, unsigned char byAntennaMode)
*
* Parameters:
* In:
- * pDevice - Device Structure
+ * priv - Device Structure
* Out:
* none
*
@@ -2560,109 +2400,109 @@ BBvSetRxAntennaMode(void __iomem *dwIoBase, unsigned char byAntennaMode)
*
*/
void
-BBvSetDeepSleep(void __iomem *dwIoBase, unsigned char byLocalID)
+BBvSetDeepSleep(struct vnt_private *priv, unsigned char byLocalID)
{
- BBbWriteEmbedded(dwIoBase, 0x0C, 0x17); /* CR12 */
- BBbWriteEmbedded(dwIoBase, 0x0D, 0xB9); /* CR13 */
+ BBbWriteEmbedded(priv, 0x0C, 0x17); /* CR12 */
+ BBbWriteEmbedded(priv, 0x0D, 0xB9); /* CR13 */
}
void
-BBvExitDeepSleep(void __iomem *dwIoBase, unsigned char byLocalID)
+BBvExitDeepSleep(struct vnt_private *priv, unsigned char byLocalID)
{
- BBbWriteEmbedded(dwIoBase, 0x0C, 0x00); /* CR12 */
- BBbWriteEmbedded(dwIoBase, 0x0D, 0x01); /* CR13 */
+ BBbWriteEmbedded(priv, 0x0C, 0x00); /* CR12 */
+ BBbWriteEmbedded(priv, 0x0D, 0x01); /* CR13 */
}
static
unsigned long
-s_ulGetRatio(struct vnt_private *pDevice)
+s_ulGetRatio(struct vnt_private *priv)
{
unsigned long ulRatio = 0;
unsigned long ulMaxPacket;
unsigned long ulPacketNum;
/* This is a thousand-ratio */
- ulMaxPacket = pDevice->uNumSQ3[RATE_54M];
- if (pDevice->uNumSQ3[RATE_54M] != 0) {
- ulPacketNum = pDevice->uNumSQ3[RATE_54M];
- ulRatio = (ulPacketNum * 1000 / pDevice->uDiversityCnt);
+ ulMaxPacket = priv->uNumSQ3[RATE_54M];
+ if (priv->uNumSQ3[RATE_54M] != 0) {
+ ulPacketNum = priv->uNumSQ3[RATE_54M];
+ ulRatio = (ulPacketNum * 1000 / priv->uDiversityCnt);
ulRatio += TOP_RATE_54M;
}
- if (pDevice->uNumSQ3[RATE_48M] > ulMaxPacket) {
- ulPacketNum = pDevice->uNumSQ3[RATE_54M] + pDevice->uNumSQ3[RATE_48M];
- ulRatio = (ulPacketNum * 1000 / pDevice->uDiversityCnt);
+ if (priv->uNumSQ3[RATE_48M] > ulMaxPacket) {
+ ulPacketNum = priv->uNumSQ3[RATE_54M] + priv->uNumSQ3[RATE_48M];
+ ulRatio = (ulPacketNum * 1000 / priv->uDiversityCnt);
ulRatio += TOP_RATE_48M;
- ulMaxPacket = pDevice->uNumSQ3[RATE_48M];
+ ulMaxPacket = priv->uNumSQ3[RATE_48M];
}
- if (pDevice->uNumSQ3[RATE_36M] > ulMaxPacket) {
- ulPacketNum = pDevice->uNumSQ3[RATE_54M] + pDevice->uNumSQ3[RATE_48M] +
- pDevice->uNumSQ3[RATE_36M];
- ulRatio = (ulPacketNum * 1000 / pDevice->uDiversityCnt);
+ if (priv->uNumSQ3[RATE_36M] > ulMaxPacket) {
+ ulPacketNum = priv->uNumSQ3[RATE_54M] + priv->uNumSQ3[RATE_48M] +
+ priv->uNumSQ3[RATE_36M];
+ ulRatio = (ulPacketNum * 1000 / priv->uDiversityCnt);
ulRatio += TOP_RATE_36M;
- ulMaxPacket = pDevice->uNumSQ3[RATE_36M];
+ ulMaxPacket = priv->uNumSQ3[RATE_36M];
}
- if (pDevice->uNumSQ3[RATE_24M] > ulMaxPacket) {
- ulPacketNum = pDevice->uNumSQ3[RATE_54M] + pDevice->uNumSQ3[RATE_48M] +
- pDevice->uNumSQ3[RATE_36M] + pDevice->uNumSQ3[RATE_24M];
- ulRatio = (ulPacketNum * 1000 / pDevice->uDiversityCnt);
+ if (priv->uNumSQ3[RATE_24M] > ulMaxPacket) {
+ ulPacketNum = priv->uNumSQ3[RATE_54M] + priv->uNumSQ3[RATE_48M] +
+ priv->uNumSQ3[RATE_36M] + priv->uNumSQ3[RATE_24M];
+ ulRatio = (ulPacketNum * 1000 / priv->uDiversityCnt);
ulRatio += TOP_RATE_24M;
- ulMaxPacket = pDevice->uNumSQ3[RATE_24M];
+ ulMaxPacket = priv->uNumSQ3[RATE_24M];
}
- if (pDevice->uNumSQ3[RATE_18M] > ulMaxPacket) {
- ulPacketNum = pDevice->uNumSQ3[RATE_54M] + pDevice->uNumSQ3[RATE_48M] +
- pDevice->uNumSQ3[RATE_36M] + pDevice->uNumSQ3[RATE_24M] +
- pDevice->uNumSQ3[RATE_18M];
- ulRatio = (ulPacketNum * 1000 / pDevice->uDiversityCnt);
+ if (priv->uNumSQ3[RATE_18M] > ulMaxPacket) {
+ ulPacketNum = priv->uNumSQ3[RATE_54M] + priv->uNumSQ3[RATE_48M] +
+ priv->uNumSQ3[RATE_36M] + priv->uNumSQ3[RATE_24M] +
+ priv->uNumSQ3[RATE_18M];
+ ulRatio = (ulPacketNum * 1000 / priv->uDiversityCnt);
ulRatio += TOP_RATE_18M;
- ulMaxPacket = pDevice->uNumSQ3[RATE_18M];
+ ulMaxPacket = priv->uNumSQ3[RATE_18M];
}
- if (pDevice->uNumSQ3[RATE_12M] > ulMaxPacket) {
- ulPacketNum = pDevice->uNumSQ3[RATE_54M] + pDevice->uNumSQ3[RATE_48M] +
- pDevice->uNumSQ3[RATE_36M] + pDevice->uNumSQ3[RATE_24M] +
- pDevice->uNumSQ3[RATE_18M] + pDevice->uNumSQ3[RATE_12M];
- ulRatio = (ulPacketNum * 1000 / pDevice->uDiversityCnt);
+ if (priv->uNumSQ3[RATE_12M] > ulMaxPacket) {
+ ulPacketNum = priv->uNumSQ3[RATE_54M] + priv->uNumSQ3[RATE_48M] +
+ priv->uNumSQ3[RATE_36M] + priv->uNumSQ3[RATE_24M] +
+ priv->uNumSQ3[RATE_18M] + priv->uNumSQ3[RATE_12M];
+ ulRatio = (ulPacketNum * 1000 / priv->uDiversityCnt);
ulRatio += TOP_RATE_12M;
- ulMaxPacket = pDevice->uNumSQ3[RATE_12M];
+ ulMaxPacket = priv->uNumSQ3[RATE_12M];
}
- if (pDevice->uNumSQ3[RATE_11M] > ulMaxPacket) {
- ulPacketNum = pDevice->uDiversityCnt - pDevice->uNumSQ3[RATE_1M] -
- pDevice->uNumSQ3[RATE_2M] - pDevice->uNumSQ3[RATE_5M] -
- pDevice->uNumSQ3[RATE_6M] - pDevice->uNumSQ3[RATE_9M];
- ulRatio = (ulPacketNum * 1000 / pDevice->uDiversityCnt);
+ if (priv->uNumSQ3[RATE_11M] > ulMaxPacket) {
+ ulPacketNum = priv->uDiversityCnt - priv->uNumSQ3[RATE_1M] -
+ priv->uNumSQ3[RATE_2M] - priv->uNumSQ3[RATE_5M] -
+ priv->uNumSQ3[RATE_6M] - priv->uNumSQ3[RATE_9M];
+ ulRatio = (ulPacketNum * 1000 / priv->uDiversityCnt);
ulRatio += TOP_RATE_11M;
- ulMaxPacket = pDevice->uNumSQ3[RATE_11M];
+ ulMaxPacket = priv->uNumSQ3[RATE_11M];
}
- if (pDevice->uNumSQ3[RATE_9M] > ulMaxPacket) {
- ulPacketNum = pDevice->uDiversityCnt - pDevice->uNumSQ3[RATE_1M] -
- pDevice->uNumSQ3[RATE_2M] - pDevice->uNumSQ3[RATE_5M] -
- pDevice->uNumSQ3[RATE_6M];
- ulRatio = (ulPacketNum * 1000 / pDevice->uDiversityCnt);
+ if (priv->uNumSQ3[RATE_9M] > ulMaxPacket) {
+ ulPacketNum = priv->uDiversityCnt - priv->uNumSQ3[RATE_1M] -
+ priv->uNumSQ3[RATE_2M] - priv->uNumSQ3[RATE_5M] -
+ priv->uNumSQ3[RATE_6M];
+ ulRatio = (ulPacketNum * 1000 / priv->uDiversityCnt);
ulRatio += TOP_RATE_9M;
- ulMaxPacket = pDevice->uNumSQ3[RATE_9M];
+ ulMaxPacket = priv->uNumSQ3[RATE_9M];
}
- if (pDevice->uNumSQ3[RATE_6M] > ulMaxPacket) {
- ulPacketNum = pDevice->uDiversityCnt - pDevice->uNumSQ3[RATE_1M] -
- pDevice->uNumSQ3[RATE_2M] - pDevice->uNumSQ3[RATE_5M];
- ulRatio = (ulPacketNum * 1000 / pDevice->uDiversityCnt);
+ if (priv->uNumSQ3[RATE_6M] > ulMaxPacket) {
+ ulPacketNum = priv->uDiversityCnt - priv->uNumSQ3[RATE_1M] -
+ priv->uNumSQ3[RATE_2M] - priv->uNumSQ3[RATE_5M];
+ ulRatio = (ulPacketNum * 1000 / priv->uDiversityCnt);
ulRatio += TOP_RATE_6M;
- ulMaxPacket = pDevice->uNumSQ3[RATE_6M];
+ ulMaxPacket = priv->uNumSQ3[RATE_6M];
}
- if (pDevice->uNumSQ3[RATE_5M] > ulMaxPacket) {
- ulPacketNum = pDevice->uDiversityCnt - pDevice->uNumSQ3[RATE_1M] -
- pDevice->uNumSQ3[RATE_2M];
- ulRatio = (ulPacketNum * 1000 / pDevice->uDiversityCnt);
+ if (priv->uNumSQ3[RATE_5M] > ulMaxPacket) {
+ ulPacketNum = priv->uDiversityCnt - priv->uNumSQ3[RATE_1M] -
+ priv->uNumSQ3[RATE_2M];
+ ulRatio = (ulPacketNum * 1000 / priv->uDiversityCnt);
ulRatio += TOP_RATE_55M;
- ulMaxPacket = pDevice->uNumSQ3[RATE_5M];
+ ulMaxPacket = priv->uNumSQ3[RATE_5M];
}
- if (pDevice->uNumSQ3[RATE_2M] > ulMaxPacket) {
- ulPacketNum = pDevice->uDiversityCnt - pDevice->uNumSQ3[RATE_1M];
- ulRatio = (ulPacketNum * 1000 / pDevice->uDiversityCnt);
+ if (priv->uNumSQ3[RATE_2M] > ulMaxPacket) {
+ ulPacketNum = priv->uDiversityCnt - priv->uNumSQ3[RATE_1M];
+ ulRatio = (ulPacketNum * 1000 / priv->uDiversityCnt);
ulRatio += TOP_RATE_2M;
- ulMaxPacket = pDevice->uNumSQ3[RATE_2M];
+ ulMaxPacket = priv->uNumSQ3[RATE_2M];
}
- if (pDevice->uNumSQ3[RATE_1M] > ulMaxPacket) {
- ulPacketNum = pDevice->uDiversityCnt;
- ulRatio = (ulPacketNum * 1000 / pDevice->uDiversityCnt);
+ if (priv->uNumSQ3[RATE_1M] > ulMaxPacket) {
+ ulPacketNum = priv->uDiversityCnt;
+ ulRatio = (ulPacketNum * 1000 / priv->uDiversityCnt);
ulRatio += TOP_RATE_1M;
}
@@ -2670,13 +2510,13 @@ s_ulGetRatio(struct vnt_private *pDevice)
}
void
-BBvClearAntDivSQ3Value(struct vnt_private *pDevice)
+BBvClearAntDivSQ3Value(struct vnt_private *priv)
{
unsigned int ii;
- pDevice->uDiversityCnt = 0;
+ priv->uDiversityCnt = 0;
for (ii = 0; ii < MAX_RATE; ii++)
- pDevice->uNumSQ3[ii] = 0;
+ priv->uNumSQ3[ii] = 0;
}
/*
@@ -2684,7 +2524,7 @@ BBvClearAntDivSQ3Value(struct vnt_private *pDevice)
*
* Parameters:
* In:
- * pDevice - Device Structure
+ * priv - Device Structure
* byRSR - RSR from received packet
* bySQ3 - SQ3 value from received packet
* Out:
@@ -2694,75 +2534,75 @@ BBvClearAntDivSQ3Value(struct vnt_private *pDevice)
*
*/
-void BBvAntennaDiversity(struct vnt_private *pDevice,
+void BBvAntennaDiversity(struct vnt_private *priv,
unsigned char byRxRate, unsigned char bySQ3)
{
- if ((byRxRate >= MAX_RATE) || (pDevice->wAntDiversityMaxRate >= MAX_RATE))
+ if ((byRxRate >= MAX_RATE) || (priv->wAntDiversityMaxRate >= MAX_RATE))
return;
- pDevice->uDiversityCnt++;
+ priv->uDiversityCnt++;
- pDevice->uNumSQ3[byRxRate]++;
+ priv->uNumSQ3[byRxRate]++;
- if (pDevice->byAntennaState == 0) {
- if (pDevice->uDiversityCnt > pDevice->ulDiversityNValue) {
+ if (priv->byAntennaState == 0) {
+ if (priv->uDiversityCnt > priv->ulDiversityNValue) {
pr_debug("ulDiversityNValue=[%d],54M-[%d]\n",
- (int)pDevice->ulDiversityNValue,
- (int)pDevice->uNumSQ3[(int)pDevice->wAntDiversityMaxRate]);
+ (int)priv->ulDiversityNValue,
+ (int)priv->uNumSQ3[(int)priv->wAntDiversityMaxRate]);
- if (pDevice->uNumSQ3[pDevice->wAntDiversityMaxRate] < pDevice->uDiversityCnt/2) {
- pDevice->ulRatio_State0 = s_ulGetRatio(pDevice);
+ if (priv->uNumSQ3[priv->wAntDiversityMaxRate] < priv->uDiversityCnt/2) {
+ priv->ulRatio_State0 = s_ulGetRatio(priv);
pr_debug("SQ3_State0, rate = [%08x]\n",
- (int)pDevice->ulRatio_State0);
+ (int)priv->ulRatio_State0);
- if (pDevice->byTMax == 0)
+ if (priv->byTMax == 0)
return;
pr_debug("1.[%08x], uNumSQ3[%d]=%d, %d\n",
- (int)pDevice->ulRatio_State0,
- (int)pDevice->wAntDiversityMaxRate,
- (int)pDevice->uNumSQ3[(int)pDevice->wAntDiversityMaxRate],
- (int)pDevice->uDiversityCnt);
-
- s_vChangeAntenna(pDevice);
- pDevice->byAntennaState = 1;
- del_timer(&pDevice->TimerSQ3Tmax3);
- del_timer(&pDevice->TimerSQ3Tmax2);
- pDevice->TimerSQ3Tmax1.expires = RUN_AT(pDevice->byTMax * HZ);
- add_timer(&pDevice->TimerSQ3Tmax1);
+ (int)priv->ulRatio_State0,
+ (int)priv->wAntDiversityMaxRate,
+ (int)priv->uNumSQ3[(int)priv->wAntDiversityMaxRate],
+ (int)priv->uDiversityCnt);
+
+ s_vChangeAntenna(priv);
+ priv->byAntennaState = 1;
+ del_timer(&priv->TimerSQ3Tmax3);
+ del_timer(&priv->TimerSQ3Tmax2);
+ priv->TimerSQ3Tmax1.expires = RUN_AT(priv->byTMax * HZ);
+ add_timer(&priv->TimerSQ3Tmax1);
} else {
- pDevice->TimerSQ3Tmax3.expires = RUN_AT(pDevice->byTMax3 * HZ);
- add_timer(&pDevice->TimerSQ3Tmax3);
+ priv->TimerSQ3Tmax3.expires = RUN_AT(priv->byTMax3 * HZ);
+ add_timer(&priv->TimerSQ3Tmax3);
}
- BBvClearAntDivSQ3Value(pDevice);
+ BBvClearAntDivSQ3Value(priv);
}
} else { /* byAntennaState == 1 */
- if (pDevice->uDiversityCnt > pDevice->ulDiversityMValue) {
- del_timer(&pDevice->TimerSQ3Tmax1);
+ if (priv->uDiversityCnt > priv->ulDiversityMValue) {
+ del_timer(&priv->TimerSQ3Tmax1);
- pDevice->ulRatio_State1 = s_ulGetRatio(pDevice);
+ priv->ulRatio_State1 = s_ulGetRatio(priv);
pr_debug("RX:SQ3_State1, rate0 = %08x,rate1 = %08x\n",
- (int)pDevice->ulRatio_State0,
- (int)pDevice->ulRatio_State1);
+ (int)priv->ulRatio_State0,
+ (int)priv->ulRatio_State1);
- if (pDevice->ulRatio_State1 < pDevice->ulRatio_State0) {
+ if (priv->ulRatio_State1 < priv->ulRatio_State0) {
pr_debug("2.[%08x][%08x], uNumSQ3[%d]=%d, %d\n",
- (int)pDevice->ulRatio_State0,
- (int)pDevice->ulRatio_State1,
- (int)pDevice->wAntDiversityMaxRate,
- (int)pDevice->uNumSQ3[(int)pDevice->wAntDiversityMaxRate],
- (int)pDevice->uDiversityCnt);
-
- s_vChangeAntenna(pDevice);
- pDevice->TimerSQ3Tmax3.expires = RUN_AT(pDevice->byTMax3 * HZ);
- pDevice->TimerSQ3Tmax2.expires = RUN_AT(pDevice->byTMax2 * HZ);
- add_timer(&pDevice->TimerSQ3Tmax3);
- add_timer(&pDevice->TimerSQ3Tmax2);
+ (int)priv->ulRatio_State0,
+ (int)priv->ulRatio_State1,
+ (int)priv->wAntDiversityMaxRate,
+ (int)priv->uNumSQ3[(int)priv->wAntDiversityMaxRate],
+ (int)priv->uDiversityCnt);
+
+ s_vChangeAntenna(priv);
+ priv->TimerSQ3Tmax3.expires = RUN_AT(priv->byTMax3 * HZ);
+ priv->TimerSQ3Tmax2.expires = RUN_AT(priv->byTMax2 * HZ);
+ add_timer(&priv->TimerSQ3Tmax3);
+ add_timer(&priv->TimerSQ3Tmax2);
}
- pDevice->byAntennaState = 0;
- BBvClearAntDivSQ3Value(pDevice);
+ priv->byAntennaState = 0;
+ BBvClearAntDivSQ3Value(priv);
}
} /* byAntennaState */
}
@@ -2783,28 +2623,30 @@ void BBvAntennaDiversity(struct vnt_private *pDevice,
void
TimerSQ3CallBack(
- void *hDeviceContext
+ unsigned long data
)
{
- struct vnt_private *pDevice = hDeviceContext;
+ struct vnt_private *priv = (struct vnt_private *)data;
+ unsigned long flags;
pr_debug("TimerSQ3CallBack...\n");
- spin_lock_irq(&pDevice->lock);
+
+ spin_lock_irqsave(&priv->lock, flags);
pr_debug("3.[%08x][%08x], %d\n",
- (int)pDevice->ulRatio_State0, (int)pDevice->ulRatio_State1,
- (int)pDevice->uDiversityCnt);
+ (int)priv->ulRatio_State0, (int)priv->ulRatio_State1,
+ (int)priv->uDiversityCnt);
- s_vChangeAntenna(pDevice);
- pDevice->byAntennaState = 0;
- BBvClearAntDivSQ3Value(pDevice);
+ s_vChangeAntenna(priv);
+ priv->byAntennaState = 0;
+ BBvClearAntDivSQ3Value(priv);
- pDevice->TimerSQ3Tmax3.expires = RUN_AT(pDevice->byTMax3 * HZ);
- pDevice->TimerSQ3Tmax2.expires = RUN_AT(pDevice->byTMax2 * HZ);
- add_timer(&pDevice->TimerSQ3Tmax3);
- add_timer(&pDevice->TimerSQ3Tmax2);
+ priv->TimerSQ3Tmax3.expires = RUN_AT(priv->byTMax3 * HZ);
+ priv->TimerSQ3Tmax2.expires = RUN_AT(priv->byTMax2 * HZ);
+ add_timer(&priv->TimerSQ3Tmax3);
+ add_timer(&priv->TimerSQ3Tmax2);
- spin_unlock_irq(&pDevice->lock);
+ spin_unlock_irqrestore(&priv->lock, flags);
}
/*+
@@ -2827,43 +2669,46 @@ TimerSQ3CallBack(
void
TimerState1CallBack(
- void *hDeviceContext
+ unsigned long data
)
{
- struct vnt_private *pDevice = hDeviceContext;
+ struct vnt_private *priv = (struct vnt_private *)data;
+ unsigned long flags;
pr_debug("TimerState1CallBack...\n");
- spin_lock_irq(&pDevice->lock);
- if (pDevice->uDiversityCnt < pDevice->ulDiversityMValue/100) {
- s_vChangeAntenna(pDevice);
- pDevice->TimerSQ3Tmax3.expires = RUN_AT(pDevice->byTMax3 * HZ);
- pDevice->TimerSQ3Tmax2.expires = RUN_AT(pDevice->byTMax2 * HZ);
- add_timer(&pDevice->TimerSQ3Tmax3);
- add_timer(&pDevice->TimerSQ3Tmax2);
+ spin_lock_irqsave(&priv->lock, flags);
+
+ if (priv->uDiversityCnt < priv->ulDiversityMValue/100) {
+ s_vChangeAntenna(priv);
+ priv->TimerSQ3Tmax3.expires = RUN_AT(priv->byTMax3 * HZ);
+ priv->TimerSQ3Tmax2.expires = RUN_AT(priv->byTMax2 * HZ);
+ add_timer(&priv->TimerSQ3Tmax3);
+ add_timer(&priv->TimerSQ3Tmax2);
} else {
- pDevice->ulRatio_State1 = s_ulGetRatio(pDevice);
+ priv->ulRatio_State1 = s_ulGetRatio(priv);
pr_debug("SQ3_State1, rate0 = %08x,rate1 = %08x\n",
- (int)pDevice->ulRatio_State0,
- (int)pDevice->ulRatio_State1);
+ (int)priv->ulRatio_State0,
+ (int)priv->ulRatio_State1);
- if (pDevice->ulRatio_State1 < pDevice->ulRatio_State0) {
+ if (priv->ulRatio_State1 < priv->ulRatio_State0) {
pr_debug("2.[%08x][%08x], uNumSQ3[%d]=%d, %d\n",
- (int)pDevice->ulRatio_State0,
- (int)pDevice->ulRatio_State1,
- (int)pDevice->wAntDiversityMaxRate,
- (int)pDevice->uNumSQ3[(int)pDevice->wAntDiversityMaxRate],
- (int)pDevice->uDiversityCnt);
-
- s_vChangeAntenna(pDevice);
-
- pDevice->TimerSQ3Tmax3.expires = RUN_AT(pDevice->byTMax3 * HZ);
- pDevice->TimerSQ3Tmax2.expires = RUN_AT(pDevice->byTMax2 * HZ);
- add_timer(&pDevice->TimerSQ3Tmax3);
- add_timer(&pDevice->TimerSQ3Tmax2);
+ (int)priv->ulRatio_State0,
+ (int)priv->ulRatio_State1,
+ (int)priv->wAntDiversityMaxRate,
+ (int)priv->uNumSQ3[(int)priv->wAntDiversityMaxRate],
+ (int)priv->uDiversityCnt);
+
+ s_vChangeAntenna(priv);
+
+ priv->TimerSQ3Tmax3.expires = RUN_AT(priv->byTMax3 * HZ);
+ priv->TimerSQ3Tmax2.expires = RUN_AT(priv->byTMax2 * HZ);
+ add_timer(&priv->TimerSQ3Tmax3);
+ add_timer(&priv->TimerSQ3Tmax2);
}
}
- pDevice->byAntennaState = 0;
- BBvClearAntDivSQ3Value(pDevice);
- spin_unlock_irq(&pDevice->lock);
+ priv->byAntennaState = 0;
+ BBvClearAntDivSQ3Value(priv);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
}
diff --git a/drivers/staging/vt6655/baseband.h b/drivers/staging/vt6655/baseband.h
index 31f2255519c..d9f6d63e4ab 100644
--- a/drivers/staging/vt6655/baseband.h
+++ b/drivers/staging/vt6655/baseband.h
@@ -30,8 +30,6 @@
#ifndef __BASEBAND_H__
#define __BASEBAND_H__
-#include "ttype.h"
-#include "tether.h"
#include "device.h"
/*
@@ -79,42 +77,37 @@ BBuGetFrameTime(
void vnt_get_phy_field(struct vnt_private *, u32 frame_length,
u16 tx_rate, u8 pkt_type, struct vnt_phy_field *);
-bool BBbReadEmbedded(void __iomem *dwIoBase, unsigned char byBBAddr, unsigned char *pbyData);
-bool BBbWriteEmbedded(void __iomem *dwIoBase, unsigned char byBBAddr, unsigned char byData);
+bool BBbReadEmbedded(struct vnt_private *, unsigned char byBBAddr, unsigned char *pbyData);
+bool BBbWriteEmbedded(struct vnt_private *, unsigned char byBBAddr, unsigned char byData);
-void BBvReadAllRegs(void __iomem *dwIoBase, unsigned char *pbyBBRegs);
-void BBvLoopbackOn(struct vnt_private *pDevice);
-void BBvLoopbackOff(struct vnt_private *pDevice);
-void BBvSetShortSlotTime(struct vnt_private *pDevice);
-bool BBbIsRegBitsOn(void __iomem *dwIoBase, unsigned char byBBAddr, unsigned char byTestBits);
-bool BBbIsRegBitsOff(void __iomem *dwIoBase, unsigned char byBBAddr, unsigned char byTestBits);
-void BBvSetVGAGainOffset(struct vnt_private *pDevice, unsigned char byData);
+void BBvSetShortSlotTime(struct vnt_private *);
+void BBvSetVGAGainOffset(struct vnt_private *, unsigned char byData);
/* VT3253 Baseband */
-bool BBbVT3253Init(struct vnt_private *pDevice);
-void BBvSoftwareReset(void __iomem *dwIoBase);
-void BBvPowerSaveModeON(void __iomem *dwIoBase);
-void BBvPowerSaveModeOFF(void __iomem *dwIoBase);
-void BBvSetTxAntennaMode(void __iomem *dwIoBase, unsigned char byAntennaMode);
-void BBvSetRxAntennaMode(void __iomem *dwIoBase, unsigned char byAntennaMode);
-void BBvSetDeepSleep(void __iomem *dwIoBase, unsigned char byLocalID);
-void BBvExitDeepSleep(void __iomem *dwIoBase, unsigned char byLocalID);
+bool BBbVT3253Init(struct vnt_private *);
+void BBvSoftwareReset(struct vnt_private *);
+void BBvPowerSaveModeON(struct vnt_private *);
+void BBvPowerSaveModeOFF(struct vnt_private *);
+void BBvSetTxAntennaMode(struct vnt_private *, unsigned char byAntennaMode);
+void BBvSetRxAntennaMode(struct vnt_private *, unsigned char byAntennaMode);
+void BBvSetDeepSleep(struct vnt_private *, unsigned char byLocalID);
+void BBvExitDeepSleep(struct vnt_private *, unsigned char byLocalID);
/* timer for antenna diversity */
void
TimerSQ3CallBack(
- void *hDeviceContext
+ unsigned long
);
void
TimerState1CallBack(
- void *hDeviceContext
+ unsigned long
);
-void BBvAntennaDiversity(struct vnt_private *pDevice,
+void BBvAntennaDiversity(struct vnt_private *,
unsigned char byRxRate, unsigned char bySQ3);
void
-BBvClearAntDivSQ3Value(struct vnt_private *pDevice);
+BBvClearAntDivSQ3Value(struct vnt_private *);
#endif /* __BASEBAND_H__ */
diff --git a/drivers/staging/vt6655/bssdb.c b/drivers/staging/vt6655/bssdb.c
deleted file mode 100644
index 996d3302ce3..00000000000
--- a/drivers/staging/vt6655/bssdb.c
+++ /dev/null
@@ -1,1512 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * File: bssdb.c
- *
- * Purpose: Handles the Basic Service Set & Node Database functions
- *
- * Functions:
- * BSSpSearchBSSList - Search known BSS list for Desire SSID or BSSID
- * BSSvClearBSSList - Clear BSS List
- * BSSbInsertToBSSList - Insert a BSS set into known BSS list
- * BSSbUpdateToBSSList - Update BSS set in known BSS list
- * BSSDBbIsSTAInNodeDB - Search Node DB table to find the index of matched DstAddr
- * BSSvCreateOneNode - Allocate an Node for Node DB
- * BSSvUpdateAPNode - Update AP Node content in Index 0 of KnownNodeDB
- * BSSvSecondCallBack - One second timer callback function to update Node DB info & AP link status
- * BSSvUpdateNodeTxCounter - Update Tx attemps, Tx failure counter in Node DB for auto-fall back rate control
- *
- * Revision History:
- *
- * Author: Lyndon Chen
- *
- * Date: July 17, 2002
- *
- */
-
-#include "ttype.h"
-#include "tmacro.h"
-#include "tether.h"
-#include "device.h"
-#include "80211hdr.h"
-#include "bssdb.h"
-#include "wmgr.h"
-#include "datarate.h"
-#include "desc.h"
-#include "wcmd.h"
-#include "wpa.h"
-#include "baseband.h"
-#include "rf.h"
-#include "card.h"
-#include "channel.h"
-#include "mac.h"
-#include "wpa2.h"
-#include "iowpa.h"
-
-/*--------------------- Static Definitions -------------------------*/
-
-/*--------------------- Static Classes ----------------------------*/
-
-/*--------------------- Static Variables --------------------------*/
-static const unsigned short awHWRetry0[5][5] = {
- {RATE_18M, RATE_18M, RATE_12M, RATE_12M, RATE_12M},
- {RATE_24M, RATE_24M, RATE_18M, RATE_12M, RATE_12M},
- {RATE_36M, RATE_36M, RATE_24M, RATE_18M, RATE_18M},
- {RATE_48M, RATE_48M, RATE_36M, RATE_24M, RATE_24M},
- {RATE_54M, RATE_54M, RATE_48M, RATE_36M, RATE_36M}
-};
-static const unsigned short awHWRetry1[5][5] = {
- {RATE_18M, RATE_18M, RATE_12M, RATE_6M, RATE_6M},
- {RATE_24M, RATE_24M, RATE_18M, RATE_6M, RATE_6M},
- {RATE_36M, RATE_36M, RATE_24M, RATE_12M, RATE_12M},
- {RATE_48M, RATE_48M, RATE_24M, RATE_12M, RATE_12M},
- {RATE_54M, RATE_54M, RATE_36M, RATE_18M, RATE_18M}
-};
-
-/*--------------------- Static Functions --------------------------*/
-
-void s_vCheckSensitivity(
- void *hDeviceContext
-);
-
-#ifdef Calcu_LinkQual
-void s_uCalculateLinkQual(
- void *hDeviceContext
-);
-#endif
-
-void s_vCheckPreEDThreshold(
- void *hDeviceContext
-);
-/*--------------------- Export Variables --------------------------*/
-
-/*--------------------- Export Functions --------------------------*/
-
-/*+
- *
- * Routine Description:
- * Search known BSS list for Desire SSID or BSSID.
- *
- * Return Value:
- * PTR to KnownBSS or NULL
- *
- -*/
-
-PKnownBSS
-BSSpSearchBSSList(
- void *hDeviceContext,
- unsigned char *pbyDesireBSSID,
- unsigned char *pbyDesireSSID,
- CARD_PHY_TYPE ePhyType
-)
-{
- struct vnt_private *pDevice = hDeviceContext;
- PSMgmtObject pMgmt = pDevice->pMgmt;
- unsigned char *pbyBSSID = NULL;
- PWLAN_IE_SSID pSSID = NULL;
- PKnownBSS pCurrBSS = NULL;
- PKnownBSS pSelect = NULL;
- unsigned char ZeroBSSID[WLAN_BSSID_LEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
- unsigned int ii = 0;
-
- if (pbyDesireBSSID != NULL) {
- pr_debug("BSSpSearchBSSList BSSID[%pM]\n", pbyDesireBSSID);
- if ((!is_broadcast_ether_addr(pbyDesireBSSID)) &&
- (memcmp(pbyDesireBSSID, ZeroBSSID, 6) != 0))
- pbyBSSID = pbyDesireBSSID;
- }
- if (pbyDesireSSID != NULL) {
- if (((PWLAN_IE_SSID)pbyDesireSSID)->len != 0)
- pSSID = (PWLAN_IE_SSID) pbyDesireSSID;
- }
-
- if (pbyBSSID != NULL) {
- /* match BSSID first */
- for (ii = 0; ii < MAX_BSS_NUM; ii++) {
- pCurrBSS = &(pMgmt->sBSSList[ii]);
- if (!pDevice->bLinkPass)
- pCurrBSS->bSelected = false;
- if ((pCurrBSS->bActive) &&
- (!pCurrBSS->bSelected)) {
- if (ether_addr_equal(pCurrBSS->abyBSSID,
- pbyBSSID)) {
- if (pSSID != NULL) {
- /* compare ssid */
- if (!memcmp(pSSID->abySSID,
- ((PWLAN_IE_SSID)pCurrBSS->abySSID)->abySSID,
- pSSID->len)) {
- if ((pMgmt->eConfigMode == WMAC_CONFIG_AUTO) ||
- ((pMgmt->eConfigMode == WMAC_CONFIG_IBSS_STA) && WLAN_GET_CAP_INFO_IBSS(pCurrBSS->wCapInfo)) ||
- ((pMgmt->eConfigMode == WMAC_CONFIG_ESS_STA) && WLAN_GET_CAP_INFO_ESS(pCurrBSS->wCapInfo))
-) {
- pCurrBSS->bSelected = true;
- return pCurrBSS;
- }
- }
- } else {
- if ((pMgmt->eConfigMode == WMAC_CONFIG_AUTO) ||
- ((pMgmt->eConfigMode == WMAC_CONFIG_IBSS_STA) && WLAN_GET_CAP_INFO_IBSS(pCurrBSS->wCapInfo)) ||
- ((pMgmt->eConfigMode == WMAC_CONFIG_ESS_STA) && WLAN_GET_CAP_INFO_ESS(pCurrBSS->wCapInfo))
-) {
- pCurrBSS->bSelected = true;
- return pCurrBSS;
- }
- }
- }
- }
- }
- } else {
- /* ignore BSSID */
- for (ii = 0; ii < MAX_BSS_NUM; ii++) {
- pCurrBSS = &(pMgmt->sBSSList[ii]);
- /* 2007-0721-01<Add>by MikeLiu */
- pCurrBSS->bSelected = false;
- if (pCurrBSS->bActive) {
- if (pSSID != NULL) {
- /* matched SSID */
- if (!!memcmp(pSSID->abySSID,
- ((PWLAN_IE_SSID)pCurrBSS->abySSID)->abySSID,
- pSSID->len) ||
- (pSSID->len != ((PWLAN_IE_SSID)pCurrBSS->abySSID)->len)) {
- /* SSID not match skip this BSS */
- continue;
- }
- }
- if (((pMgmt->eConfigMode == WMAC_CONFIG_IBSS_STA) && WLAN_GET_CAP_INFO_ESS(pCurrBSS->wCapInfo)) ||
- ((pMgmt->eConfigMode == WMAC_CONFIG_ESS_STA) && WLAN_GET_CAP_INFO_IBSS(pCurrBSS->wCapInfo))
-) {
- /* Type not match skip this BSS */
- pr_debug("BSS type mismatch.... Config[%d] BSS[0x%04x]\n",
- pMgmt->eConfigMode,
- pCurrBSS->wCapInfo);
- continue;
- }
-
- if (ePhyType != PHY_TYPE_AUTO) {
- if (((ePhyType == PHY_TYPE_11A) && (PHY_TYPE_11A != pCurrBSS->eNetworkTypeInUse)) ||
- ((ePhyType != PHY_TYPE_11A) && (PHY_TYPE_11A == pCurrBSS->eNetworkTypeInUse))) {
- /* PhyType not match skip this BSS */
- pr_debug("Physical type mismatch.... ePhyType[%d] BSS[%d]\n",
- ePhyType,
- pCurrBSS->eNetworkTypeInUse);
- continue;
- }
- }
-
- if (pSelect == NULL) {
- pSelect = pCurrBSS;
- } else {
- /* compare RSSI, select signal strong one */
- if (pCurrBSS->uRSSI < pSelect->uRSSI)
- pSelect = pCurrBSS;
- }
- }
- }
- if (pSelect != NULL) {
- pSelect->bSelected = true;
- return pSelect;
- }
- }
- return NULL;
-}
-
-/*+
- *
- * Routine Description:
- * Clear BSS List
- *
- * Return Value:
- * None.
- *
- -*/
-
-void
-BSSvClearBSSList(
- void *hDeviceContext,
- bool bKeepCurrBSSID
-)
-{
- struct vnt_private *pDevice = hDeviceContext;
- PSMgmtObject pMgmt = pDevice->pMgmt;
- unsigned int ii;
-
- for (ii = 0; ii < MAX_BSS_NUM; ii++) {
- if (bKeepCurrBSSID) {
- if (pMgmt->sBSSList[ii].bActive &&
- ether_addr_equal(pMgmt->sBSSList[ii].abyBSSID,
- pMgmt->abyCurrBSSID)) {
- continue;
- }
- }
-
- if ((pMgmt->sBSSList[ii].bActive) && (pMgmt->sBSSList[ii].uClearCount < BSS_CLEAR_COUNT)) {
- pMgmt->sBSSList[ii].uClearCount++;
- continue;
- }
-
- pMgmt->sBSSList[ii].bActive = false;
- memset(&pMgmt->sBSSList[ii], 0, sizeof(KnownBSS));
- }
- BSSvClearAnyBSSJoinRecord(pDevice);
-}
-
-/*+
- *
- * Routine Description:
- * search BSS list by BSSID & SSID if matched
- *
- * Return Value:
- * true if found.
- *
- -*/
-PKnownBSS
-BSSpAddrIsInBSSList(
- void *hDeviceContext,
- unsigned char *abyBSSID,
- PWLAN_IE_SSID pSSID
-)
-{
- struct vnt_private *pDevice = hDeviceContext;
- PSMgmtObject pMgmt = pDevice->pMgmt;
- PKnownBSS pBSSList = NULL;
- unsigned int ii;
-
- for (ii = 0; ii < MAX_BSS_NUM; ii++) {
- pBSSList = &(pMgmt->sBSSList[ii]);
- if (pBSSList->bActive) {
- if (ether_addr_equal(pBSSList->abyBSSID, abyBSSID)) {
- if (pSSID->len == ((PWLAN_IE_SSID)pBSSList->abySSID)->len) {
- if (memcmp(pSSID->abySSID,
- ((PWLAN_IE_SSID)pBSSList->abySSID)->abySSID,
- pSSID->len) == 0)
- return pBSSList;
- }
- }
- }
- }
-
- return NULL;
-};
-
-/*+
- *
- * Routine Description:
- * Insert a BSS set into known BSS list
- *
- * Return Value:
- * true if success.
- *
- -*/
-
-bool
-BSSbInsertToBSSList(
- void *hDeviceContext,
- unsigned char *abyBSSIDAddr,
- __le64 qwTimestamp,
- unsigned short wBeaconInterval,
- unsigned short wCapInfo,
- unsigned char byCurrChannel,
- PWLAN_IE_SSID pSSID,
- PWLAN_IE_SUPP_RATES pSuppRates,
- PWLAN_IE_SUPP_RATES pExtSuppRates,
- PERPObject psERP,
- PWLAN_IE_RSN pRSN,
- PWLAN_IE_RSN_EXT pRSNWPA,
- PWLAN_IE_COUNTRY pIE_Country,
- PWLAN_IE_QUIET pIE_Quiet,
- unsigned int uIELength,
- unsigned char *pbyIEs,
- void *pRxPacketContext
-)
-{
- struct vnt_private *pDevice = hDeviceContext;
- PSMgmtObject pMgmt = pDevice->pMgmt;
- PSRxMgmtPacket pRxPacket = (PSRxMgmtPacket)pRxPacketContext;
- PKnownBSS pBSSList = NULL;
- unsigned int ii;
- bool bParsingQuiet = false;
- PWLAN_IE_QUIET pQuiet = NULL;
-
- pBSSList = (PKnownBSS)&(pMgmt->sBSSList[0]);
-
- for (ii = 0; ii < MAX_BSS_NUM; ii++) {
- pBSSList = (PKnownBSS)&(pMgmt->sBSSList[ii]);
- if (!pBSSList->bActive)
- break;
- }
-
- if (ii == MAX_BSS_NUM) {
- pr_debug("Get free KnowBSS node failed\n");
- return false;
- }
- /* save the BSS info */
- pBSSList->bActive = true;
- memcpy(pBSSList->abyBSSID, abyBSSIDAddr, WLAN_BSSID_LEN);
- pBSSList->qwBSSTimestamp = le64_to_cpu(qwTimestamp);
- pBSSList->wBeaconInterval = cpu_to_le16(wBeaconInterval);
- pBSSList->wCapInfo = cpu_to_le16(wCapInfo);
- pBSSList->uClearCount = 0;
-
- if (pSSID->len > WLAN_SSID_MAXLEN)
- pSSID->len = WLAN_SSID_MAXLEN;
- memcpy(pBSSList->abySSID, pSSID, pSSID->len + WLAN_IEHDR_LEN);
-
- pBSSList->uChannel = byCurrChannel;
-
- if (pSuppRates->len > WLAN_RATES_MAXLEN)
- pSuppRates->len = WLAN_RATES_MAXLEN;
- memcpy(pBSSList->abySuppRates, pSuppRates, pSuppRates->len + WLAN_IEHDR_LEN);
-
- if (pExtSuppRates != NULL) {
- if (pExtSuppRates->len > WLAN_RATES_MAXLEN)
- pExtSuppRates->len = WLAN_RATES_MAXLEN;
- memcpy(pBSSList->abyExtSuppRates, pExtSuppRates, pExtSuppRates->len + WLAN_IEHDR_LEN);
- pr_debug("BSSbInsertToBSSList: pExtSuppRates->len = %d\n",
- pExtSuppRates->len);
-
- } else {
- memset(pBSSList->abyExtSuppRates, 0, WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1);
- }
- pBSSList->sERP.byERP = psERP->byERP;
- pBSSList->sERP.bERPExist = psERP->bERPExist;
-
- /* check if BSS is 802.11a/b/g */
- if (pBSSList->uChannel > CB_MAX_CHANNEL_24G) {
- pBSSList->eNetworkTypeInUse = PHY_TYPE_11A;
- } else {
- if (pBSSList->sERP.bERPExist)
- pBSSList->eNetworkTypeInUse = PHY_TYPE_11G;
- else
- pBSSList->eNetworkTypeInUse = PHY_TYPE_11B;
- }
-
- pBSSList->byRxRate = pRxPacket->byRxRate;
- pBSSList->qwLocalTSF = pRxPacket->qwLocalTSF;
- pBSSList->uRSSI = pRxPacket->uRSSI;
- pBSSList->bySQ = pRxPacket->bySQ;
-
- if ((pMgmt->eCurrMode == WMAC_MODE_ESS_STA) &&
- (pMgmt->eCurrState == WMAC_STATE_ASSOC)) {
- /* assoc with BSS */
- if (pBSSList == pMgmt->pCurrBSS)
- bParsingQuiet = true;
- }
-
- WPA_ClearRSN(pBSSList);
-
- if (pRSNWPA != NULL) {
- unsigned int uLen = pRSNWPA->len + 2;
-
- if (uLen <= (uIELength - (unsigned int)((unsigned char *)pRSNWPA - pbyIEs))) {
- pBSSList->wWPALen = uLen;
- memcpy(pBSSList->byWPAIE, pRSNWPA, uLen);
- WPA_ParseRSN(pBSSList, pRSNWPA);
- }
- }
-
- WPA2_ClearRSN(pBSSList);
-
- if (pRSN != NULL) {
- unsigned int uLen = pRSN->len + 2;
-
- if (uLen <= (uIELength - (unsigned int)((unsigned char *)pRSN - pbyIEs))) {
- pBSSList->wRSNLen = uLen;
- memcpy(pBSSList->byRSNIE, pRSN, uLen);
- WPA2vParseRSN(pBSSList, pRSN);
- }
- }
-
- if ((pMgmt->eAuthenMode == WMAC_AUTH_WPA2) || pBSSList->bWPA2Valid) {
- PSKeyItem pTransmitKey = NULL;
- bool bIs802_1x = false;
-
- for (ii = 0; ii < pBSSList->wAKMSSAuthCount; ii++) {
- if (pBSSList->abyAKMSSAuthType[ii] == WLAN_11i_AKMSS_802_1X) {
- bIs802_1x = true;
- break;
- }
- }
- if (bIs802_1x && (pSSID->len == ((PWLAN_IE_SSID)pMgmt->abyDesireSSID)->len) &&
- (!memcmp(pSSID->abySSID, ((PWLAN_IE_SSID)pMgmt->abyDesireSSID)->abySSID, pSSID->len))) {
- bAdd_PMKID_Candidate((void *)pDevice, pBSSList->abyBSSID, &pBSSList->sRSNCapObj);
-
- if (pDevice->bLinkPass && (pMgmt->eCurrState == WMAC_STATE_ASSOC)) {
- if (KeybGetTransmitKey(&(pDevice->sKey), pDevice->abyBSSID, PAIRWISE_KEY, &pTransmitKey) ||
- KeybGetTransmitKey(&(pDevice->sKey), pDevice->abyBSSID, GROUP_KEY, &pTransmitKey)) {
- pDevice->gsPMKIDCandidate.StatusType = Ndis802_11StatusType_PMKID_CandidateList;
- pDevice->gsPMKIDCandidate.Version = 1;
-
- }
-
- }
- }
- }
-
- if (pDevice->bUpdateBBVGA) {
- /* monitor if RSSI is too strong */
- pBSSList->byRSSIStatCnt = 0;
- RFvRSSITodBm(pDevice, (unsigned char)(pRxPacket->uRSSI), &pBSSList->ldBmMAX);
- pBSSList->ldBmAverage[0] = pBSSList->ldBmMAX;
- for (ii = 1; ii < RSSI_STAT_COUNT; ii++)
- pBSSList->ldBmAverage[ii] = 0;
- }
-
- if ((pIE_Country != NULL) && pMgmt->b11hEnable) {
- set_country_info(pMgmt->pAdapter, pBSSList->eNetworkTypeInUse,
- pIE_Country);
- }
-
- if (bParsingQuiet && (pIE_Quiet != NULL)) {
- if ((((PWLAN_IE_QUIET)pIE_Quiet)->len == 8) &&
- (((PWLAN_IE_QUIET)pIE_Quiet)->byQuietCount != 0)) {
- /* valid EID */
- if (pQuiet == NULL) {
- pQuiet = (PWLAN_IE_QUIET)pIE_Quiet;
- CARDbSetQuiet(pMgmt->pAdapter,
- true,
- pQuiet->byQuietCount,
- pQuiet->byQuietPeriod,
- *((unsigned short *)pQuiet->abyQuietDuration),
- *((unsigned short *)pQuiet->abyQuietOffset)
-);
- } else {
- pQuiet = (PWLAN_IE_QUIET)pIE_Quiet;
- CARDbSetQuiet(pMgmt->pAdapter,
- false,
- pQuiet->byQuietCount,
- pQuiet->byQuietPeriod,
- *((unsigned short *)pQuiet->abyQuietDuration),
- *((unsigned short *)pQuiet->abyQuietOffset)
- );
- }
- }
- }
-
- if (bParsingQuiet && (pQuiet != NULL))
- CARDbStartQuiet(pMgmt->pAdapter);
-
- pBSSList->uIELength = uIELength;
- if (pBSSList->uIELength > WLAN_BEACON_FR_MAXLEN)
- pBSSList->uIELength = WLAN_BEACON_FR_MAXLEN;
- memcpy(pBSSList->abyIEs, pbyIEs, pBSSList->uIELength);
-
- return true;
-}
-
-/*+
- *
- * Routine Description:
- * Update BSS set in known BSS list
- *
- * Return Value:
- * true if success.
- *
- -*/
-/* TODO: input structure modify */
-
-bool
-BSSbUpdateToBSSList(
- void *hDeviceContext,
- __le64 qwTimestamp,
- unsigned short wBeaconInterval,
- unsigned short wCapInfo,
- unsigned char byCurrChannel,
- bool bChannelHit,
- PWLAN_IE_SSID pSSID,
- PWLAN_IE_SUPP_RATES pSuppRates,
- PWLAN_IE_SUPP_RATES pExtSuppRates,
- PERPObject psERP,
- PWLAN_IE_RSN pRSN,
- PWLAN_IE_RSN_EXT pRSNWPA,
- PWLAN_IE_COUNTRY pIE_Country,
- PWLAN_IE_QUIET pIE_Quiet,
- PKnownBSS pBSSList,
- unsigned int uIELength,
- unsigned char *pbyIEs,
- void *pRxPacketContext
-)
-{
- int ii;
- struct vnt_private *pDevice = hDeviceContext;
- PSMgmtObject pMgmt = pDevice->pMgmt;
- PSRxMgmtPacket pRxPacket = (PSRxMgmtPacket)pRxPacketContext;
- long ldBm;
- bool bParsingQuiet = false;
- PWLAN_IE_QUIET pQuiet = NULL;
-
- if (pBSSList == NULL)
- return false;
-
- pBSSList->qwBSSTimestamp = le64_to_cpu(qwTimestamp);
- pBSSList->wBeaconInterval = cpu_to_le16(wBeaconInterval);
- pBSSList->wCapInfo = cpu_to_le16(wCapInfo);
- pBSSList->uClearCount = 0;
- pBSSList->uChannel = byCurrChannel;
-
- if (pSSID->len > WLAN_SSID_MAXLEN)
- pSSID->len = WLAN_SSID_MAXLEN;
-
- if ((pSSID->len != 0) && (pSSID->abySSID[0] != 0))
- memcpy(pBSSList->abySSID, pSSID, pSSID->len + WLAN_IEHDR_LEN);
- memcpy(pBSSList->abySuppRates, pSuppRates, pSuppRates->len + WLAN_IEHDR_LEN);
-
- if (pExtSuppRates != NULL)
- memcpy(pBSSList->abyExtSuppRates, pExtSuppRates, pExtSuppRates->len + WLAN_IEHDR_LEN);
- else
- memset(pBSSList->abyExtSuppRates, 0, WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1);
- pBSSList->sERP.byERP = psERP->byERP;
- pBSSList->sERP.bERPExist = psERP->bERPExist;
-
- /* check if BSS is 802.11a/b/g */
- if (pBSSList->uChannel > CB_MAX_CHANNEL_24G) {
- pBSSList->eNetworkTypeInUse = PHY_TYPE_11A;
- } else {
- if (pBSSList->sERP.bERPExist)
- pBSSList->eNetworkTypeInUse = PHY_TYPE_11G;
- else
- pBSSList->eNetworkTypeInUse = PHY_TYPE_11B;
- }
-
- pBSSList->byRxRate = pRxPacket->byRxRate;
- pBSSList->qwLocalTSF = pRxPacket->qwLocalTSF;
- if (bChannelHit)
- pBSSList->uRSSI = pRxPacket->uRSSI;
- pBSSList->bySQ = pRxPacket->bySQ;
-
- if ((pMgmt->eCurrMode == WMAC_MODE_ESS_STA) &&
- (pMgmt->eCurrState == WMAC_STATE_ASSOC)) {
- /* assoc with BSS */
- if (pBSSList == pMgmt->pCurrBSS)
- bParsingQuiet = true;
- }
-
- WPA_ClearRSN(pBSSList); /* mike update */
-
- if (pRSNWPA != NULL) {
- unsigned int uLen = pRSNWPA->len + 2;
-
- if (uLen <= (uIELength - (unsigned int)((unsigned char *)pRSNWPA - pbyIEs))) {
- pBSSList->wWPALen = uLen;
- memcpy(pBSSList->byWPAIE, pRSNWPA, uLen);
- WPA_ParseRSN(pBSSList, pRSNWPA);
- }
- }
-
- WPA2_ClearRSN(pBSSList); /* mike update */
-
- if (pRSN != NULL) {
- unsigned int uLen = pRSN->len + 2;
-
- if (uLen <= (uIELength - (unsigned int)((unsigned char *)pRSN - pbyIEs))) {
- pBSSList->wRSNLen = uLen;
- memcpy(pBSSList->byRSNIE, pRSN, uLen);
- WPA2vParseRSN(pBSSList, pRSN);
- }
- }
-
- if (pRxPacket->uRSSI != 0) {
- RFvRSSITodBm(pDevice, (unsigned char)(pRxPacket->uRSSI), &ldBm);
- /* monitor if RSSI is too strong */
- pBSSList->byRSSIStatCnt++;
- pBSSList->byRSSIStatCnt %= RSSI_STAT_COUNT;
- pBSSList->ldBmAverage[pBSSList->byRSSIStatCnt] = ldBm;
- for (ii = 0; ii < RSSI_STAT_COUNT; ii++) {
- if (pBSSList->ldBmAverage[ii] != 0)
- pBSSList->ldBmMAX = max(pBSSList->ldBmAverage[ii], ldBm);
- }
- }
-
- if ((pIE_Country != NULL) && pMgmt->b11hEnable) {
- set_country_info(pMgmt->pAdapter, pBSSList->eNetworkTypeInUse,
- pIE_Country);
- }
-
- if (bParsingQuiet && (pIE_Quiet != NULL)) {
- if ((((PWLAN_IE_QUIET)pIE_Quiet)->len == 8) &&
- (((PWLAN_IE_QUIET)pIE_Quiet)->byQuietCount != 0)) {
- /* valid EID */
- if (pQuiet == NULL) {
- pQuiet = (PWLAN_IE_QUIET)pIE_Quiet;
- CARDbSetQuiet(pMgmt->pAdapter,
- true,
- pQuiet->byQuietCount,
- pQuiet->byQuietPeriod,
- *((unsigned short *)pQuiet->abyQuietDuration),
- *((unsigned short *)pQuiet->abyQuietOffset)
-);
- } else {
- pQuiet = (PWLAN_IE_QUIET)pIE_Quiet;
- CARDbSetQuiet(pMgmt->pAdapter,
- false,
- pQuiet->byQuietCount,
- pQuiet->byQuietPeriod,
- *((unsigned short *)pQuiet->abyQuietDuration),
- *((unsigned short *)pQuiet->abyQuietOffset)
- );
- }
- }
- }
-
- if (bParsingQuiet && (pQuiet != NULL))
- CARDbStartQuiet(pMgmt->pAdapter);
-
- pBSSList->uIELength = uIELength;
- if (pBSSList->uIELength > WLAN_BEACON_FR_MAXLEN)
- pBSSList->uIELength = WLAN_BEACON_FR_MAXLEN;
- memcpy(pBSSList->abyIEs, pbyIEs, pBSSList->uIELength);
-
- return true;
-}
-
-/*+
- *
- * Routine Description:
- * Search Node DB table to find the index of matched DstAddr
- *
- * Return Value:
- * None
- *
- -*/
-
-bool
-BSSDBbIsSTAInNodeDB(void *pMgmtObject, unsigned char *abyDstAddr,
- unsigned int *puNodeIndex)
-{
- PSMgmtObject pMgmt = (PSMgmtObject) pMgmtObject;
- unsigned int ii;
-
- /* Index = 0 reserved for AP Node */
- for (ii = 1; ii < (MAX_NODE_NUM + 1); ii++) {
- if (pMgmt->sNodeDBTable[ii].bActive) {
- if (ether_addr_equal(abyDstAddr,
- pMgmt->sNodeDBTable[ii].abyMACAddr)) {
- *puNodeIndex = ii;
- return true;
- }
- }
- }
-
- return false;
-};
-
-/*+
- *
- * Routine Description:
- * Find an empty node and allocat it; if there is no empty node,
- * then use the most inactive one.
- *
- * Return Value:
- * None
- *
- -*/
-void
-BSSvCreateOneNode(void *hDeviceContext, unsigned int *puNodeIndex)
-{
- struct vnt_private *pDevice = hDeviceContext;
- PSMgmtObject pMgmt = pDevice->pMgmt;
- unsigned int ii;
- unsigned int BigestCount = 0;
- unsigned int SelectIndex;
- struct sk_buff *skb;
- /*
- * Index = 0 reserved for AP Node (In STA mode)
- * Index = 0 reserved for Broadcast/MultiCast (In AP mode)
- */
- SelectIndex = 1;
- for (ii = 1; ii < (MAX_NODE_NUM + 1); ii++) {
- if (pMgmt->sNodeDBTable[ii].bActive) {
- if (pMgmt->sNodeDBTable[ii].uInActiveCount > BigestCount) {
- BigestCount = pMgmt->sNodeDBTable[ii].uInActiveCount;
- SelectIndex = ii;
- }
- } else {
- break;
- }
- }
-
- /* if not found replace uInActiveCount is largest one */
- if (ii == (MAX_NODE_NUM + 1)) {
- *puNodeIndex = SelectIndex;
- pr_info("Replace inactive node = %d\n", SelectIndex);
- /* clear ps buffer */
- if (pMgmt->sNodeDBTable[*puNodeIndex].sTxPSQueue.next != NULL) {
- while ((skb = skb_dequeue(&pMgmt->sNodeDBTable[*puNodeIndex].sTxPSQueue)) != NULL)
- dev_kfree_skb(skb);
- }
- } else {
- *puNodeIndex = ii;
- }
-
- memset(&pMgmt->sNodeDBTable[*puNodeIndex], 0, sizeof(KnownNodeDB));
- pMgmt->sNodeDBTable[*puNodeIndex].bActive = true;
- pMgmt->sNodeDBTable[*puNodeIndex].uRatePollTimeout = FALLBACK_POLL_SECOND;
- /* for AP mode PS queue */
- skb_queue_head_init(&pMgmt->sNodeDBTable[*puNodeIndex].sTxPSQueue);
- pMgmt->sNodeDBTable[*puNodeIndex].byAuthSequence = 0;
- pMgmt->sNodeDBTable[*puNodeIndex].wEnQueueCnt = 0;
- pr_debug("Create node index = %d\n", ii);
- return;
-};
-
-/*+
- *
- * Routine Description:
- * Remove Node by NodeIndex
- *
- *
- * Return Value:
- * None
- *
- -*/
-void
-BSSvRemoveOneNode(
- void *hDeviceContext,
- unsigned int uNodeIndex
-)
-{
- struct vnt_private *pDevice = hDeviceContext;
- PSMgmtObject pMgmt = pDevice->pMgmt;
- unsigned char byMask[8] = {1, 2, 4, 8, 0x10, 0x20, 0x40, 0x80};
- struct sk_buff *skb;
-
- while ((skb = skb_dequeue(&pMgmt->sNodeDBTable[uNodeIndex].sTxPSQueue)) != NULL)
- dev_kfree_skb(skb);
- /* clear context */
- memset(&pMgmt->sNodeDBTable[uNodeIndex], 0, sizeof(KnownNodeDB));
- /* clear tx bit map */
- pMgmt->abyPSTxMap[pMgmt->sNodeDBTable[uNodeIndex].wAID >> 3] &= ~byMask[pMgmt->sNodeDBTable[uNodeIndex].wAID & 7];
-
- return;
-};
-/*+
- *
- * Routine Description:
- * Update AP Node content in Index 0 of KnownNodeDB
- *
- *
- * Return Value:
- * None
- *
- -*/
-
-void
-BSSvUpdateAPNode(
- void *hDeviceContext,
- unsigned short *pwCapInfo,
- PWLAN_IE_SUPP_RATES pSuppRates,
- PWLAN_IE_SUPP_RATES pExtSuppRates
-)
-{
- struct vnt_private *pDevice = hDeviceContext;
- PSMgmtObject pMgmt = pDevice->pMgmt;
- unsigned int uRateLen = WLAN_RATES_MAXLEN;
-
- memset(&pMgmt->sNodeDBTable[0], 0, sizeof(KnownNodeDB));
-
- pMgmt->sNodeDBTable[0].bActive = true;
- if (pDevice->eCurrentPHYType == PHY_TYPE_11B)
- uRateLen = WLAN_RATES_MAXLEN_11B;
- pMgmt->abyCurrSuppRates[1] = RATEuSetIE((PWLAN_IE_SUPP_RATES)pSuppRates,
- (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
- uRateLen);
- pMgmt->abyCurrExtSuppRates[1] = RATEuSetIE((PWLAN_IE_SUPP_RATES)pExtSuppRates,
- (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates,
- uRateLen);
- RATEvParseMaxRate((void *)pDevice,
- (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
- (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates,
- true,
- &(pMgmt->sNodeDBTable[0].wMaxBasicRate),
- &(pMgmt->sNodeDBTable[0].wMaxSuppRate),
- &(pMgmt->sNodeDBTable[0].wSuppRate),
- &(pMgmt->sNodeDBTable[0].byTopCCKBasicRate),
- &(pMgmt->sNodeDBTable[0].byTopOFDMBasicRate)
-);
- memcpy(pMgmt->sNodeDBTable[0].abyMACAddr, pMgmt->abyCurrBSSID, WLAN_ADDR_LEN);
- pMgmt->sNodeDBTable[0].wTxDataRate = pMgmt->sNodeDBTable[0].wMaxSuppRate;
- pMgmt->sNodeDBTable[0].bShortPreamble = WLAN_GET_CAP_INFO_SHORTPREAMBLE(*pwCapInfo);
- pMgmt->sNodeDBTable[0].uRatePollTimeout = FALLBACK_POLL_SECOND;
- netdev_dbg(pDevice->dev, "BSSvUpdateAPNode:MaxSuppRate is %d\n",
- pMgmt->sNodeDBTable[0].wMaxSuppRate);
- /* auto rate fallback function initiation */
- pr_debug("pMgmt->sNodeDBTable[0].wTxDataRate = %d\n",
- pMgmt->sNodeDBTable[0].wTxDataRate);
-};
-
-/*+
- *
- * Routine Description:
- * Add Multicast Node content in Index 0 of KnownNodeDB
- *
- *
- * Return Value:
- * None
- *
- -*/
-
-void
-BSSvAddMulticastNode(
- void *hDeviceContext
-)
-{
- struct vnt_private *pDevice = hDeviceContext;
- PSMgmtObject pMgmt = pDevice->pMgmt;
-
- if (!pDevice->bEnableHostWEP)
- memset(&pMgmt->sNodeDBTable[0], 0, sizeof(KnownNodeDB));
- memset(pMgmt->sNodeDBTable[0].abyMACAddr, 0xff, WLAN_ADDR_LEN);
- pMgmt->sNodeDBTable[0].bActive = true;
- pMgmt->sNodeDBTable[0].bPSEnable = false;
- skb_queue_head_init(&pMgmt->sNodeDBTable[0].sTxPSQueue);
- RATEvParseMaxRate((void *)pDevice,
- (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
- (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates,
- true,
- &(pMgmt->sNodeDBTable[0].wMaxBasicRate),
- &(pMgmt->sNodeDBTable[0].wMaxSuppRate),
- &(pMgmt->sNodeDBTable[0].wSuppRate),
- &(pMgmt->sNodeDBTable[0].byTopCCKBasicRate),
- &(pMgmt->sNodeDBTable[0].byTopOFDMBasicRate)
-);
- pMgmt->sNodeDBTable[0].wTxDataRate = pMgmt->sNodeDBTable[0].wMaxBasicRate;
- netdev_dbg(pDevice->dev,
- "BSSvAddMultiCastNode:pMgmt->sNodeDBTable[0].wTxDataRate is %d\n",
- pMgmt->sNodeDBTable[0].wTxDataRate);
- pMgmt->sNodeDBTable[0].uRatePollTimeout = FALLBACK_POLL_SECOND;
-};
-
-/*+
- *
- * Routine Description:
- *
- *
- * Second call back function to update Node DB info & AP link status
- *
- *
- * Return Value:
- * none.
- *
- -*/
-void
-BSSvSecondCallBack(
- void *hDeviceContext
-)
-{
- struct vnt_private *pDevice = hDeviceContext;
- PSMgmtObject pMgmt = pDevice->pMgmt;
- unsigned int ii;
- PWLAN_IE_SSID pItemSSID, pCurrSSID;
- unsigned int uSleepySTACnt = 0;
- unsigned int uNonShortSlotSTACnt = 0;
- unsigned int uLongPreambleSTACnt = 0;
- viawget_wpa_header *wpahdr; /* DavidWang */
-
- spin_lock_irq(&pDevice->lock);
-
- pDevice->uAssocCount = 0;
-
- pDevice->byERPFlag &=
- ~(WLAN_SET_ERP_BARKER_MODE(1) | WLAN_SET_ERP_NONERP_PRESENT(1));
-
- if (pDevice->wUseProtectCntDown > 0) {
- pDevice->wUseProtectCntDown--;
- } else {
- /* disable protect mode */
- pDevice->byERPFlag &= ~(WLAN_SET_ERP_USE_PROTECTION(1));
- }
-
- if (pDevice->eCommandState == WLAN_ASSOCIATE_WAIT) {
- pDevice->byReAssocCount++;
- /* 10 sec timeout */
- if ((pDevice->byReAssocCount > 10) && (!pDevice->bLinkPass)) {
- netdev_info(pDevice->dev, "Re-association timeout!!!\n");
- pDevice->byReAssocCount = 0;
-#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
- {
- union iwreq_data wrqu;
-
- memset(&wrqu, 0, sizeof(wrqu));
- wrqu.ap_addr.sa_family = ARPHRD_ETHER;
- PRINT_K("wireless_send_event--->SIOCGIWAP(disassociated)\n");
- wireless_send_event(pDevice->dev, SIOCGIWAP, &wrqu, NULL);
- }
-#endif
- } else if (pDevice->bLinkPass)
- pDevice->byReAssocCount = 0;
- }
-
-#ifdef Calcu_LinkQual
- s_uCalculateLinkQual((void *)pDevice);
-#endif
-
- for (ii = 0; ii < (MAX_NODE_NUM + 1); ii++) {
- if (pMgmt->sNodeDBTable[ii].bActive) {
- /* increase in-activity counter */
- pMgmt->sNodeDBTable[ii].uInActiveCount++;
-
- if (ii > 0) {
- if (pMgmt->sNodeDBTable[ii].uInActiveCount > MAX_INACTIVE_COUNT) {
- BSSvRemoveOneNode(pDevice, ii);
- pr_debug("Inactive timeout [%d] sec, STA index = [%d] remove\n",
- MAX_INACTIVE_COUNT, ii);
- continue;
- }
-
- if (pMgmt->sNodeDBTable[ii].eNodeState >= NODE_ASSOC) {
- pDevice->uAssocCount++;
-
- /* check if Non ERP exist */
- if (pMgmt->sNodeDBTable[ii].uInActiveCount < ERP_RECOVER_COUNT) {
- if (!pMgmt->sNodeDBTable[ii].bShortPreamble) {
- pDevice->byERPFlag |= WLAN_SET_ERP_BARKER_MODE(1);
- uLongPreambleSTACnt++;
- }
- if (!pMgmt->sNodeDBTable[ii].bERPExist) {
- pDevice->byERPFlag |= WLAN_SET_ERP_NONERP_PRESENT(1);
- pDevice->byERPFlag |= WLAN_SET_ERP_USE_PROTECTION(1);
- }
- if (!pMgmt->sNodeDBTable[ii].bShortSlotTime)
- uNonShortSlotSTACnt++;
- }
- }
-
- /* check if any STA in PS mode */
- if (pMgmt->sNodeDBTable[ii].bPSEnable)
- uSleepySTACnt++;
-
- }
-
- /* rate fallback check */
- if (!pDevice->bFixRate) {
- if (ii > 0) {
- /* ii = 0 for multicast node (AP & Adhoc) */
- RATEvTxRateFallBack((void *)pDevice, &(pMgmt->sNodeDBTable[ii]));
- } else {
- /* ii = 0 reserved for unicast AP node (Infra STA) */
- if (pMgmt->eCurrMode == WMAC_MODE_ESS_STA)
- netdev_dbg(pDevice->dev,
- "SecondCallback:Before:TxDataRate is %d\n",
- pMgmt->sNodeDBTable[0].wTxDataRate);
- RATEvTxRateFallBack((void *)pDevice, &(pMgmt->sNodeDBTable[ii]));
- netdev_dbg(pDevice->dev,
- "SecondCallback:After:TxDataRate is %d\n",
- pMgmt->sNodeDBTable[0].wTxDataRate);
-
- }
-
- }
-
- /* check if pending PS queue */
- if (pMgmt->sNodeDBTable[ii].wEnQueueCnt != 0) {
- pr_debug("Index= %d, Queue = %d pending\n",
- ii,
- pMgmt->sNodeDBTable[ii].wEnQueueCnt);
- if ((ii > 0) && (pMgmt->sNodeDBTable[ii].wEnQueueCnt > 15)) {
- BSSvRemoveOneNode(pDevice, ii);
- pr_info("Pending many queues PS STA Index = %d remove\n",
- ii);
- continue;
- }
- }
- }
-
- }
-
- if ((pMgmt->eCurrMode == WMAC_MODE_ESS_AP) && (pDevice->eCurrentPHYType == PHY_TYPE_11G)) {
- /* on/off protect mode */
- if (WLAN_GET_ERP_USE_PROTECTION(pDevice->byERPFlag)) {
- if (!pDevice->bProtectMode) {
- MACvEnableProtectMD(pDevice->PortOffset);
- pDevice->bProtectMode = true;
- }
- } else {
- if (pDevice->bProtectMode) {
- MACvDisableProtectMD(pDevice->PortOffset);
- pDevice->bProtectMode = false;
- }
- }
- /* on/off short slot time */
-
- if (uNonShortSlotSTACnt > 0) {
- if (pDevice->bShortSlotTime) {
- pDevice->bShortSlotTime = false;
- BBvSetShortSlotTime(pDevice);
- vUpdateIFS((void *)pDevice);
- }
- } else {
- if (!pDevice->bShortSlotTime) {
- pDevice->bShortSlotTime = true;
- BBvSetShortSlotTime(pDevice);
- vUpdateIFS((void *)pDevice);
- }
- }
-
- /* on/off barker long preamble mode */
-
- if (uLongPreambleSTACnt > 0) {
- if (!pDevice->bBarkerPreambleMd) {
- MACvEnableBarkerPreambleMd(pDevice->PortOffset);
- pDevice->bBarkerPreambleMd = true;
- }
- } else {
- if (pDevice->bBarkerPreambleMd) {
- MACvDisableBarkerPreambleMd(pDevice->PortOffset);
- pDevice->bBarkerPreambleMd = false;
- }
- }
-
- }
-
- /* check if any STA in PS mode, enable DTIM multicast deliver */
- if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) {
- if (uSleepySTACnt > 0)
- pMgmt->sNodeDBTable[0].bPSEnable = true;
- else
- pMgmt->sNodeDBTable[0].bPSEnable = false;
- }
-
- pItemSSID = (PWLAN_IE_SSID)pMgmt->abyDesireSSID;
- pCurrSSID = (PWLAN_IE_SSID)pMgmt->abyCurrSSID;
-
- if ((pMgmt->eCurrMode == WMAC_MODE_STANDBY) ||
- (pMgmt->eCurrMode == WMAC_MODE_ESS_STA)) {
- /* assoc with BSS */
- if (pMgmt->sNodeDBTable[0].bActive) {
- if (pDevice->bUpdateBBVGA)
- s_vCheckPreEDThreshold((void *)pDevice);
-
- if ((pMgmt->sNodeDBTable[0].uInActiveCount >= (LOST_BEACON_COUNT/2)) &&
- (pDevice->byBBVGACurrent != pDevice->abyBBVGA[0])) {
- pDevice->byBBVGANew = pDevice->abyBBVGA[0];
- bScheduleCommand((void *)pDevice, WLAN_CMD_CHANGE_BBSENSITIVITY, NULL);
- }
-
- if (pMgmt->sNodeDBTable[0].uInActiveCount >= LOST_BEACON_COUNT) {
- pMgmt->sNodeDBTable[0].bActive = false;
- pMgmt->eCurrMode = WMAC_MODE_STANDBY;
- pMgmt->eCurrState = WMAC_STATE_IDLE;
- netif_stop_queue(pDevice->dev);
- pDevice->bLinkPass = false;
- pDevice->bRoaming = true;
- pr_info("Lost AP beacon [%d] sec, disconnected !\n",
- pMgmt->sNodeDBTable[0].uInActiveCount);
- if ((pDevice->bWPADEVUp) && (pDevice->skb != NULL)) {
- wpahdr = (viawget_wpa_header *)pDevice->skb->data;
- wpahdr->type = VIAWGET_DISASSOC_MSG;
- wpahdr->resp_ie_len = 0;
- wpahdr->req_ie_len = 0;
- skb_put(pDevice->skb, sizeof(viawget_wpa_header));
- pDevice->skb->dev = pDevice->wpadev;
- skb_reset_mac_header(pDevice->skb);
- pDevice->skb->pkt_type = PACKET_HOST;
- pDevice->skb->protocol = htons(ETH_P_802_2);
- memset(pDevice->skb->cb, 0, sizeof(pDevice->skb->cb));
- netif_rx(pDevice->skb);
- pDevice->skb = dev_alloc_skb((int)pDevice->rx_buf_sz);
- }
-#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
- {
- union iwreq_data wrqu;
-
- memset(&wrqu, 0, sizeof(wrqu));
- wrqu.ap_addr.sa_family = ARPHRD_ETHER;
- PRINT_K("wireless_send_event--->SIOCGIWAP(disassociated)\n");
- wireless_send_event(pDevice->dev, SIOCGIWAP, &wrqu, NULL);
- }
-#endif
- }
- } else if (pItemSSID->len != 0) {
- if (pDevice->uAutoReConnectTime < 10) {
- pDevice->uAutoReConnectTime++;
-#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
- /*
- * network manager support need not do
- * Roaming scan???
- */
- if (pDevice->bWPASuppWextEnabled)
- pDevice->uAutoReConnectTime = 0;
-#endif
- } else {
- /*
- * mike use old encryption status
- * for wpa reauthentication
- */
- if (pDevice->bWPADEVUp)
- pDevice->eEncryptionStatus = pDevice->eOldEncryptionStatus;
-
- pr_debug("Roaming ...\n");
- BSSvClearBSSList((void *)pDevice, pDevice->bLinkPass);
- pMgmt->eScanType = WMAC_SCAN_ACTIVE;
- bScheduleCommand((void *)pDevice, WLAN_CMD_BSSID_SCAN, pMgmt->abyDesireSSID);
- bScheduleCommand((void *)pDevice, WLAN_CMD_SSID, pMgmt->abyDesireSSID);
- pDevice->uAutoReConnectTime = 0;
- }
- }
- }
-
- if (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) {
- /* if adhoc started which essid is NULL string, rescanning */
- if ((pMgmt->eCurrState == WMAC_STATE_STARTED) && (pCurrSSID->len == 0)) {
- if (pDevice->uAutoReConnectTime < 10) {
- pDevice->uAutoReConnectTime++;
- } else {
- pr_info("Adhoc re-scanning ...\n");
- pMgmt->eScanType = WMAC_SCAN_ACTIVE;
- bScheduleCommand((void *)pDevice, WLAN_CMD_BSSID_SCAN, NULL);
- bScheduleCommand((void *)pDevice, WLAN_CMD_SSID, NULL);
- pDevice->uAutoReConnectTime = 0;
- }
- }
- if (pMgmt->eCurrState == WMAC_STATE_JOINTED) {
- if (pDevice->bUpdateBBVGA)
- s_vCheckPreEDThreshold((void *)pDevice);
- if (pMgmt->sNodeDBTable[0].uInActiveCount >= ADHOC_LOST_BEACON_COUNT) {
- pr_info("Lost other STA beacon [%d] sec, started !\n",
- pMgmt->sNodeDBTable[0].uInActiveCount);
- pMgmt->sNodeDBTable[0].uInActiveCount = 0;
- pMgmt->eCurrState = WMAC_STATE_STARTED;
- netif_stop_queue(pDevice->dev);
- pDevice->bLinkPass = false;
- }
- }
- }
-
- spin_unlock_irq(&pDevice->lock);
-
- pMgmt->sTimerSecondCallback.expires = RUN_AT(HZ);
- add_timer(&pMgmt->sTimerSecondCallback);
-}
-
-/*+
- *
- * Routine Description:
- *
- *
- * Update Tx attemps, Tx failure counter in Node DB
- *
- *
- * Return Value:
- * none.
- *
- -*/
-
-void
-BSSvUpdateNodeTxCounter(
- void *hDeviceContext,
- unsigned char byTsr0,
- unsigned char byTsr1,
- unsigned char *pbyBuffer,
- unsigned int uFIFOHeaderSize
-)
-{
- struct vnt_private *pDevice = hDeviceContext;
- PSMgmtObject pMgmt = pDevice->pMgmt;
- unsigned int uNodeIndex = 0;
- unsigned char byTxRetry = (byTsr0 & TSR0_NCR);
- PSTxBufHead pTxBufHead;
- PS802_11Header pMACHeader;
- unsigned short wRate;
- unsigned short wFallBackRate = RATE_1M;
- unsigned char byFallBack;
- unsigned int ii;
-
- pTxBufHead = (PSTxBufHead) pbyBuffer;
- if (pTxBufHead->wFIFOCtl & FIFOCTL_AUTO_FB_0)
- byFallBack = AUTO_FB_0;
- else if (pTxBufHead->wFIFOCtl & FIFOCTL_AUTO_FB_1)
- byFallBack = AUTO_FB_1;
- else
- byFallBack = AUTO_FB_NONE;
- wRate = pTxBufHead->wReserved;
-
- /* Only Unicast using support rates */
- if (pTxBufHead->wFIFOCtl & FIFOCTL_NEEDACK) {
- pr_debug("wRate %04X, byTsr0 %02X, byTsr1 %02X\n",
- wRate, byTsr0, byTsr1);
- if (pMgmt->eCurrMode == WMAC_MODE_ESS_STA) {
- pMgmt->sNodeDBTable[0].uTxAttempts += 1;
- if ((byTsr1 & TSR1_TERR) == 0) {
- /* transmit success, TxAttempts at least plus one */
- pMgmt->sNodeDBTable[0].uTxOk[MAX_RATE]++;
- if ((byFallBack == AUTO_FB_NONE) ||
- (wRate < RATE_18M)) {
- wFallBackRate = wRate;
- } else if (byFallBack == AUTO_FB_0) {
- if (byTxRetry < 5)
- wFallBackRate = awHWRetry0[wRate-RATE_18M][byTxRetry];
- else
- wFallBackRate = awHWRetry0[wRate-RATE_18M][4];
- } else if (byFallBack == AUTO_FB_1) {
- if (byTxRetry < 5)
- wFallBackRate = awHWRetry1[wRate-RATE_18M][byTxRetry];
- else
- wFallBackRate = awHWRetry1[wRate-RATE_18M][4];
- }
- pMgmt->sNodeDBTable[0].uTxOk[wFallBackRate]++;
- } else {
- pMgmt->sNodeDBTable[0].uTxFailures++;
- }
- pMgmt->sNodeDBTable[0].uTxRetry += byTxRetry;
- if (byTxRetry != 0) {
- pMgmt->sNodeDBTable[0].uTxFail[MAX_RATE] += byTxRetry;
- if ((byFallBack == AUTO_FB_NONE) ||
- (wRate < RATE_18M)) {
- pMgmt->sNodeDBTable[0].uTxFail[wRate] += byTxRetry;
- } else if (byFallBack == AUTO_FB_0) {
- for (ii = 0; ii < byTxRetry; ii++) {
- if (ii < 5)
- wFallBackRate = awHWRetry0[wRate-RATE_18M][ii];
- else
- wFallBackRate = awHWRetry0[wRate-RATE_18M][4];
- pMgmt->sNodeDBTable[0].uTxFail[wFallBackRate]++;
- }
- } else if (byFallBack == AUTO_FB_1) {
- for (ii = 0; ii < byTxRetry; ii++) {
- if (ii < 5)
- wFallBackRate = awHWRetry1[wRate-RATE_18M][ii];
- else
- wFallBackRate = awHWRetry1[wRate-RATE_18M][4];
- pMgmt->sNodeDBTable[0].uTxFail[wFallBackRate]++;
- }
- }
- }
- }
-
- if ((pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) ||
- (pMgmt->eCurrMode == WMAC_MODE_ESS_AP)) {
- pMACHeader = (PS802_11Header)(pbyBuffer + uFIFOHeaderSize);
-
- if (BSSDBbIsSTAInNodeDB((void *)pMgmt, &(pMACHeader->abyAddr1[0]), &uNodeIndex)) {
- pMgmt->sNodeDBTable[uNodeIndex].uTxAttempts += 1;
- if ((byTsr1 & TSR1_TERR) == 0) {
- /* transmit success, TxAttempts at least plus one */
- pMgmt->sNodeDBTable[uNodeIndex].uTxOk[MAX_RATE]++;
- if ((byFallBack == AUTO_FB_NONE) ||
- (wRate < RATE_18M)) {
- wFallBackRate = wRate;
- } else if (byFallBack == AUTO_FB_0) {
- if (byTxRetry < 5)
- wFallBackRate = awHWRetry0[wRate-RATE_18M][byTxRetry];
- else
- wFallBackRate = awHWRetry0[wRate-RATE_18M][4];
- } else if (byFallBack == AUTO_FB_1) {
- if (byTxRetry < 5)
- wFallBackRate = awHWRetry1[wRate-RATE_18M][byTxRetry];
- else
- wFallBackRate = awHWRetry1[wRate-RATE_18M][4];
- }
- pMgmt->sNodeDBTable[uNodeIndex].uTxOk[wFallBackRate]++;
- } else {
- pMgmt->sNodeDBTable[uNodeIndex].uTxFailures++;
- }
- pMgmt->sNodeDBTable[uNodeIndex].uTxRetry += byTxRetry;
- if (byTxRetry != 0) {
- pMgmt->sNodeDBTable[uNodeIndex].uTxFail[MAX_RATE] += byTxRetry;
- if ((byFallBack == AUTO_FB_NONE) ||
- (wRate < RATE_18M)) {
- pMgmt->sNodeDBTable[uNodeIndex].uTxFail[wRate] += byTxRetry;
- } else if (byFallBack == AUTO_FB_0) {
- for (ii = 0; ii < byTxRetry; ii++) {
- if (ii < 5)
- wFallBackRate = awHWRetry0[wRate - RATE_18M][ii];
- else
- wFallBackRate = awHWRetry0[wRate - RATE_18M][4];
- pMgmt->sNodeDBTable[uNodeIndex].uTxFail[wFallBackRate]++;
- }
- } else if (byFallBack == AUTO_FB_1) {
- for (ii = 0; ii < byTxRetry; ii++) {
- if (ii < 5)
- wFallBackRate = awHWRetry1[wRate-RATE_18M][ii];
- else
- wFallBackRate = awHWRetry1[wRate-RATE_18M][4];
- pMgmt->sNodeDBTable[uNodeIndex].uTxFail[wFallBackRate]++;
- }
- }
- }
- }
- }
- }
-}
-
-/*+
- *
- * Routine Description:
- * Clear Nodes & skb in DB Table
- *
- *
- * Parameters:
- * In:
- * hDeviceContext - The adapter context.
- * uStartIndex - starting index
- * Out:
- * none
- *
- * Return Value:
- * None.
- *
- -*/
-
-void
-BSSvClearNodeDBTable(
- void *hDeviceContext,
- unsigned int uStartIndex
-)
-
-{
- struct vnt_private *pDevice = hDeviceContext;
- PSMgmtObject pMgmt = pDevice->pMgmt;
- struct sk_buff *skb;
- unsigned int ii;
-
- for (ii = uStartIndex; ii < (MAX_NODE_NUM + 1); ii++) {
- if (pMgmt->sNodeDBTable[ii].bActive) {
- /* check if sTxPSQueue has been initial */
- if (pMgmt->sNodeDBTable[ii].sTxPSQueue.next != NULL) {
- while ((skb = skb_dequeue(&pMgmt->sNodeDBTable[ii].sTxPSQueue)) != NULL) {
- pr_debug("PS skb != NULL %d\n", ii);
- dev_kfree_skb(skb);
- }
- }
- memset(&pMgmt->sNodeDBTable[ii], 0, sizeof(KnownNodeDB));
- }
- }
-
- return;
-};
-
-void s_vCheckSensitivity(
- void *hDeviceContext
-)
-{
- struct vnt_private *pDevice = hDeviceContext;
- PKnownBSS pBSSList = NULL;
- PSMgmtObject pMgmt = pDevice->pMgmt;
- int ii;
-
- if ((pDevice->byLocalID <= REV_ID_VT3253_A1) && (pDevice->byRFType == RF_RFMD2959) &&
- (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA)) {
- return;
- }
-
- if ((pMgmt->eCurrState == WMAC_STATE_ASSOC) ||
- ((pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) && (pMgmt->eCurrState == WMAC_STATE_JOINTED))) {
- pBSSList = BSSpAddrIsInBSSList(pDevice, pMgmt->abyCurrBSSID, (PWLAN_IE_SSID)pMgmt->abyCurrSSID);
- if (pBSSList != NULL) {
- /* Update BB Reg if RSSI is too strong */
- long LocalldBmAverage = 0;
- long uNumofdBm = 0;
-
- for (ii = 0; ii < RSSI_STAT_COUNT; ii++) {
- if (pBSSList->ldBmAverage[ii] != 0) {
- uNumofdBm++;
- LocalldBmAverage += pBSSList->ldBmAverage[ii];
- }
- }
- if (uNumofdBm > 0) {
- LocalldBmAverage = LocalldBmAverage/uNumofdBm;
- for (ii = 0; ii < BB_VGA_LEVEL; ii++) {
- pr_debug("LocalldBmAverage:%ld, %ld %02x\n",
- LocalldBmAverage,
- pDevice->ldBmThreshold[ii],
- pDevice->abyBBVGA[ii]);
- if (LocalldBmAverage < pDevice->ldBmThreshold[ii]) {
- pDevice->byBBVGANew = pDevice->abyBBVGA[ii];
- break;
- }
- }
- if (pDevice->byBBVGANew != pDevice->byBBVGACurrent) {
- pDevice->uBBVGADiffCount++;
- if (pDevice->uBBVGADiffCount >= BB_VGA_CHANGE_THRESHOLD)
- bScheduleCommand((void *)pDevice, WLAN_CMD_CHANGE_BBSENSITIVITY, NULL);
- } else {
- pDevice->uBBVGADiffCount = 0;
- }
- }
- }
- }
-}
-
-void
-BSSvClearAnyBSSJoinRecord(
- void *hDeviceContext
-)
-{
- struct vnt_private *pDevice = hDeviceContext;
- PSMgmtObject pMgmt = pDevice->pMgmt;
- unsigned int ii;
-
- for (ii = 0; ii < MAX_BSS_NUM; ii++)
- pMgmt->sBSSList[ii].bSelected = false;
-}
-
-#ifdef Calcu_LinkQual
-void s_uCalculateLinkQual(
- void *hDeviceContext
-)
-{
- struct vnt_private *pDevice = hDeviceContext;
- unsigned long TxOkRatio, TxCnt;
- unsigned long RxOkRatio, RxCnt;
- unsigned long RssiRatio;
- long ldBm;
-
- TxCnt = pDevice->scStatistic.TxNoRetryOkCount +
- pDevice->scStatistic.TxRetryOkCount +
- pDevice->scStatistic.TxFailCount;
- RxCnt = pDevice->scStatistic.RxFcsErrCnt +
- pDevice->scStatistic.RxOkCnt;
- TxOkRatio = (TxCnt < 6) ? 4000 : ((pDevice->scStatistic.TxNoRetryOkCount * 4000) / TxCnt);
- RxOkRatio = (RxCnt < 6) ? 2000 : ((pDevice->scStatistic.RxOkCnt * 2000) / RxCnt);
- /* decide link quality */
- if (!pDevice->bLinkPass) {
- pDevice->scStatistic.LinkQuality = 0;
- pDevice->scStatistic.SignalStren = 0;
- } else {
- RFvRSSITodBm(pDevice, (unsigned char)(pDevice->uCurrRSSI), &ldBm);
- if (-ldBm < 50)
- RssiRatio = 4000;
- else if (-ldBm > 90)
- RssiRatio = 0;
- else
- RssiRatio = (40-(-ldBm-50))*4000/40;
- pDevice->scStatistic.SignalStren = RssiRatio/40;
- pDevice->scStatistic.LinkQuality = (RssiRatio+TxOkRatio+RxOkRatio)/100;
- }
- pDevice->scStatistic.RxFcsErrCnt = 0;
- pDevice->scStatistic.RxOkCnt = 0;
- pDevice->scStatistic.TxFailCount = 0;
- pDevice->scStatistic.TxNoRetryOkCount = 0;
- pDevice->scStatistic.TxRetryOkCount = 0;
-}
-#endif
-
-void s_vCheckPreEDThreshold(
- void *hDeviceContext
-)
-{
- struct vnt_private *pDevice = hDeviceContext;
- PKnownBSS pBSSList = NULL;
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
-
- if ((pMgmt->eCurrState == WMAC_STATE_ASSOC) ||
- ((pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) && (pMgmt->eCurrState == WMAC_STATE_JOINTED))) {
- pBSSList = BSSpAddrIsInBSSList(pDevice, pMgmt->abyCurrBSSID, (PWLAN_IE_SSID)pMgmt->abyCurrSSID);
- if (pBSSList != NULL)
- pDevice->byBBPreEDRSSI = (unsigned char) (~(pBSSList->ldBmAverRange) + 1);
- }
-}
diff --git a/drivers/staging/vt6655/bssdb.h b/drivers/staging/vt6655/bssdb.h
deleted file mode 100644
index 5d4dd28b622..00000000000
--- a/drivers/staging/vt6655/bssdb.h
+++ /dev/null
@@ -1,326 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- *
- * File: bssdb.h
- *
- * Purpose: Handles the Basic Service Set & Node Database functions
- *
- * Author: Lyndon Chen
- *
- * Date: July 16, 2002
- *
- */
-
-#ifndef __BSSDB_H__
-#define __BSSDB_H__
-
-#include <linux/skbuff.h>
-#include "80211hdr.h"
-#include "80211mgr.h"
-#include "card.h"
-
-#define MAX_NODE_NUM 64
-#define MAX_BSS_NUM 42
-#define LOST_BEACON_COUNT 10 // 10 sec, XP defined
-#define MAX_PS_TX_BUF 32 // sta max power saving tx buf
-#define ADHOC_LOST_BEACON_COUNT 30 // 30 sec, beacon lost for adhoc only
-#define MAX_INACTIVE_COUNT 300 // 300 sec, inactive STA node refresh
-
-#define USE_PROTECT_PERIOD 10 // 10 sec, Use protect mode check period
-#define ERP_RECOVER_COUNT 30 // 30 sec, ERP support callback check
-#define BSS_CLEAR_COUNT 1
-
-#define RSSI_STAT_COUNT 10
-#define MAX_CHECK_RSSI_COUNT 8
-
-// STA dwflags
-#define WLAN_STA_AUTH BIT0
-#define WLAN_STA_ASSOC BIT1
-#define WLAN_STA_PS BIT2
-#define WLAN_STA_TIM BIT3
-// permanent; do not remove entry on expiration
-#define WLAN_STA_PERM BIT4
-// If 802.1X is used, this flag is
-// controlling whether STA is authorized to
-// send and receive non-IEEE 802.1X frames
-#define WLAN_STA_AUTHORIZED BIT5
-
-#define MAX_RATE 12
-
-#define MAX_WPA_IE_LEN 64
-
-//
-// IEEE 802.11 Structures and definitions
-//
-
-typedef enum _NDIS_802_11_NETWORK_TYPE {
- Ndis802_11FH,
- Ndis802_11DS,
- Ndis802_11OFDM5,
- Ndis802_11OFDM24,
- Ndis802_11NetworkTypeMax // not a real type, defined as an upper bound
-} NDIS_802_11_NETWORK_TYPE, *PNDIS_802_11_NETWORK_TYPE;
-
-typedef struct tagSERPObject {
- bool bERPExist;
- unsigned char byERP;
-} ERPObject, *PERPObject;
-
-typedef struct tagSRSNCapObject {
- bool bRSNCapExist;
- unsigned short wRSNCap;
-} SRSNCapObject, *PSRSNCapObject;
-
-// BSS info(AP)
-#pragma pack(1)
-typedef struct tagKnownBSS {
- bool bActive;
- unsigned char abyBSSID[WLAN_BSSID_LEN];
- unsigned int uChannel;
- unsigned char abySuppRates[WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1];
- unsigned char abyExtSuppRates[WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1];
- unsigned int uRSSI;
- unsigned char bySQ;
- unsigned short wBeaconInterval;
- unsigned short wCapInfo;
- unsigned char abySSID[WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1];
- unsigned char byRxRate;
-
- unsigned char byRSSIStatCnt;
- long ldBmMAX;
- long ldBmAverage[RSSI_STAT_COUNT];
- long ldBmAverRange;
- bool bSelected;
-
- bool bWPAValid;
- unsigned char byGKType;
- unsigned char abyPKType[4];
- unsigned short wPKCount;
- unsigned char abyAuthType[4];
- unsigned short wAuthCount;
- unsigned char byDefaultK_as_PK;
- unsigned char byReplayIdx;
-
- bool bWPA2Valid;
- unsigned char byCSSGK;
- unsigned short wCSSPKCount;
- unsigned char abyCSSPK[4];
- unsigned short wAKMSSAuthCount;
- unsigned char abyAKMSSAuthType[4];
-
- unsigned char byWPAIE[MAX_WPA_IE_LEN];
- unsigned char byRSNIE[MAX_WPA_IE_LEN];
- unsigned short wWPALen;
- unsigned short wRSNLen;
-
- unsigned int uClearCount;
- unsigned int uIELength;
- u64 qwBSSTimestamp;
- u64 qwLocalTSF;
-
- CARD_PHY_TYPE eNetworkTypeInUse;
-
- ERPObject sERP;
- SRSNCapObject sRSNCapObj;
- unsigned char abyIEs[1024];
-} __attribute__ ((__packed__))
-KnownBSS , *PKnownBSS;
-
-#pragma pack()
-
-typedef enum tagNODE_STATE {
- NODE_FREE,
- NODE_AGED,
- NODE_KNOWN,
- NODE_AUTH,
- NODE_ASSOC
-} NODE_STATE, *PNODE_STATE;
-
-// STA node info
-typedef struct tagKnownNodeDB {
- bool bActive;
- unsigned char abyMACAddr[WLAN_ADDR_LEN];
- unsigned char abyCurrSuppRates[WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN];
- unsigned char abyCurrExtSuppRates[WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN];
- unsigned short wTxDataRate;
- bool bShortPreamble;
- bool bERPExist;
- bool bShortSlotTime;
- unsigned int uInActiveCount;
- unsigned short wMaxBasicRate; //Get from byTopOFDMBasicRate or byTopCCKBasicRate which depends on packetTyp.
- unsigned short wMaxSuppRate; //Records the highest supported rate getting from SuppRates IE and ExtSuppRates IE in Beacon.
- unsigned short wSuppRate;
- unsigned char byTopOFDMBasicRate;//Records the highest basic rate in OFDM mode
- unsigned char byTopCCKBasicRate; //Records the highest basic rate in CCK mode
-
- // For AP mode
- struct sk_buff_head sTxPSQueue;
- unsigned short wCapInfo;
- unsigned short wListenInterval;
- unsigned short wAID;
- NODE_STATE eNodeState;
- bool bPSEnable;
- bool bRxPSPoll;
- unsigned char byAuthSequence;
- unsigned long ulLastRxJiffer;
- unsigned char bySuppRate;
- unsigned long dwFlags;
- unsigned short wEnQueueCnt;
-
- bool bOnFly;
- unsigned long long KeyRSC;
- unsigned char byKeyIndex;
- unsigned long dwKeyIndex;
- unsigned char byCipherSuite;
- unsigned long dwTSC47_16;
- unsigned short wTSC15_0;
- unsigned int uWepKeyLength;
- unsigned char abyWepKey[WLAN_WEPMAX_KEYLEN];
- // Auto rate fallback vars
- bool bIsInFallback;
- unsigned int uAverageRSSI;
- unsigned int uRateRecoveryTimeout;
- unsigned int uRatePollTimeout;
- unsigned int uTxFailures;
- unsigned int uTxAttempts;
-
- unsigned int uTxRetry;
- unsigned int uFailureRatio;
- unsigned int uRetryRatio;
- unsigned int uTxOk[MAX_RATE+1];
- unsigned int uTxFail[MAX_RATE+1];
- unsigned int uTimeCount;
-} KnownNodeDB, *PKnownNodeDB;
-
-PKnownBSS
-BSSpSearchBSSList(
- void *hDeviceContext,
- unsigned char *pbyDesireBSSID,
- unsigned char *pbyDesireSSID,
- CARD_PHY_TYPE ePhyType
-);
-
-PKnownBSS
-BSSpAddrIsInBSSList(
- void *hDeviceContext,
- unsigned char *abyBSSID,
- PWLAN_IE_SSID pSSID
-);
-
-void
-BSSvClearBSSList(
- void *hDeviceContext,
- bool bKeepCurrBSSID
-);
-
-bool
-BSSbInsertToBSSList(
- void *hDeviceContext,
- unsigned char *abyBSSIDAddr,
- __le64 qwTimestamp,
- unsigned short wBeaconInterval,
- unsigned short wCapInfo,
- unsigned char byCurrChannel,
- PWLAN_IE_SSID pSSID,
- PWLAN_IE_SUPP_RATES pSuppRates,
- PWLAN_IE_SUPP_RATES pExtSuppRates,
- PERPObject psERP,
- PWLAN_IE_RSN pRSN,
- PWLAN_IE_RSN_EXT pRSNWPA,
- PWLAN_IE_COUNTRY pIE_Country,
- PWLAN_IE_QUIET pIE_Quiet,
- unsigned int uIELength,
- unsigned char *pbyIEs,
- void *pRxPacketContext
-);
-
-bool
-BSSbUpdateToBSSList(
- void *hDeviceContext,
- __le64 qwTimestamp,
- unsigned short wBeaconInterval,
- unsigned short wCapInfo,
- unsigned char byCurrChannel,
- bool bChannelHit,
- PWLAN_IE_SSID pSSID,
- PWLAN_IE_SUPP_RATES pSuppRates,
- PWLAN_IE_SUPP_RATES pExtSuppRates,
- PERPObject psERP,
- PWLAN_IE_RSN pRSN,
- PWLAN_IE_RSN_EXT pRSNWPA,
- PWLAN_IE_COUNTRY pIE_Country,
- PWLAN_IE_QUIET pIE_Quiet,
- PKnownBSS pBSSList,
- unsigned int uIELength,
- unsigned char *pbyIEs,
- void *pRxPacketContext
-);
-
-bool
-BSSDBbIsSTAInNodeDB(void *hDeviceContext, unsigned char *abyDstAddr,
- unsigned int *puNodeIndex);
-
-void
-BSSvCreateOneNode(void *hDeviceContext, unsigned int *puNodeIndex);
-
-void
-BSSvUpdateAPNode(
- void *hDeviceContext,
- unsigned short *pwCapInfo,
- PWLAN_IE_SUPP_RATES pItemRates,
- PWLAN_IE_SUPP_RATES pExtSuppRates
-);
-
-void
-BSSvSecondCallBack(
- void *hDeviceContext
-);
-
-void
-BSSvUpdateNodeTxCounter(
- void *hDeviceContext,
- unsigned char byTsr0,
- unsigned char byTsr1,
- unsigned char *pbyBuffer,
- unsigned int uFIFOHeaderSize
-);
-
-void
-BSSvRemoveOneNode(
- void *hDeviceContext,
- unsigned int uNodeIndex
-);
-
-void
-BSSvAddMulticastNode(
- void *hDeviceContext
-);
-
-void
-BSSvClearNodeDBTable(
- void *hDeviceContext,
- unsigned int uStartIndex
-);
-
-void
-BSSvClearAnyBSSJoinRecord(
- void *hDeviceContext
-);
-
-#endif //__BSSDB_H__
diff --git a/drivers/staging/vt6655/card.c b/drivers/staging/vt6655/card.c
index 5a6950264bd..a0796405c30 100644
--- a/drivers/staging/vt6655/card.c
+++ b/drivers/staging/vt6655/card.c
@@ -21,7 +21,6 @@
* Functions:
* s_vSafeResetTx - Rest Tx
* CARDvSetRSPINF - Set RSPINF
- * vUpdateIFS - Update slotTime,SIFS,DIFS, and EIFS
* CARDvUpdateBasicTopRate - Update BasicTopRate
* CARDbAddBasicRate - Add to BasicRateSet
* CARDbIsOFDMinBasicRate - Check if any OFDM rate is in BasicRateSet
@@ -34,8 +33,6 @@
* CARDvUpdateNextTBTT - Sync. NIC Beacon time
* CARDbRadioPowerOff - Turn Off NIC Radio Power
* CARDbRadioPowerOn - Turn On NIC Radio Power
- * CARDbSetWEPMode - Set NIC Wep mode
- * CARDbSetTxPower - Set NIC tx power
*
* Revision History:
* 06-10-2003 Bryan YC Fan: Re-write codes to support VT3253 spec.
@@ -50,38 +47,24 @@
#include "mac.h"
#include "desc.h"
#include "rf.h"
-#include "vntwifi.h"
#include "power.h"
-#include "key.h"
-#include "rc4.h"
-#include "country.h"
-#include "channel.h"
/*--------------------- Static Definitions -------------------------*/
-#define C_SIFS_A 16 // micro sec.
+#define C_SIFS_A 16 /* micro sec. */
#define C_SIFS_BG 10
-#define C_EIFS 80 // micro sec.
+#define C_EIFS 80 /* micro sec. */
-#define C_SLOT_SHORT 9 // micro sec.
+#define C_SLOT_SHORT 9 /* micro sec. */
#define C_SLOT_LONG 20
-#define C_CWMIN_A 15 // slot time
+#define C_CWMIN_A 15 /* slot time */
#define C_CWMIN_B 31
-#define C_CWMAX 1023 // slot time
+#define C_CWMAX 1023 /* slot time */
-#define WAIT_BEACON_TX_DOWN_TMO 3 // Times
-
-//1M, 2M, 5M, 11M, 18M, 24M, 36M, 54M
-static unsigned char abyDefaultSuppRatesG[] = {WLAN_EID_SUPP_RATES, 8, 0x02, 0x04, 0x0B, 0x16, 0x24, 0x30, 0x48, 0x6C};
-//6M, 9M, 12M, 48M
-static unsigned char abyDefaultExtSuppRatesG[] = {WLAN_EID_EXTSUPP_RATES, 4, 0x0C, 0x12, 0x18, 0x60};
-//6M, 9M, 12M, 18M, 24M, 36M, 48M, 54M
-static unsigned char abyDefaultSuppRatesA[] = {WLAN_EID_SUPP_RATES, 8, 0x0C, 0x12, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6C};
-//1M, 2M, 5M, 11M,
-static unsigned char abyDefaultSuppRatesB[] = {WLAN_EID_SUPP_RATES, 4, 0x02, 0x04, 0x0B, 0x16};
+#define WAIT_BEACON_TX_DOWN_TMO 3 /* Times */
/*--------------------- Static Variables --------------------------*/
@@ -94,7 +77,7 @@ static
void
s_vCalculateOFDMRParameter(
unsigned char byRate,
- CARD_PHY_TYPE ePHYType,
+ u8 bb_type,
unsigned char *pbyTxRate,
unsigned char *pbyRsvTime
);
@@ -113,20 +96,19 @@ s_vCalculateOFDMRParameter(
* pbyRsvTime - pointer to RSPINF RsvTime field
*
* Return Value: none
- *
*/
static
void
s_vCalculateOFDMRParameter(
unsigned char byRate,
- CARD_PHY_TYPE ePHYType,
+ u8 bb_type,
unsigned char *pbyTxRate,
unsigned char *pbyRsvTime
)
{
switch (byRate) {
case RATE_6M:
- if (ePHYType == PHY_TYPE_11A) {//5GHZ
+ if (bb_type == BB_TYPE_11A) { /* 5GHZ */
*pbyTxRate = 0x9B;
*pbyRsvTime = 44;
} else {
@@ -136,7 +118,7 @@ s_vCalculateOFDMRParameter(
break;
case RATE_9M:
- if (ePHYType == PHY_TYPE_11A) {//5GHZ
+ if (bb_type == BB_TYPE_11A) { /* 5GHZ */
*pbyTxRate = 0x9F;
*pbyRsvTime = 36;
} else {
@@ -146,7 +128,7 @@ s_vCalculateOFDMRParameter(
break;
case RATE_12M:
- if (ePHYType == PHY_TYPE_11A) {//5GHZ
+ if (bb_type == BB_TYPE_11A) { /* 5GHZ */
*pbyTxRate = 0x9A;
*pbyRsvTime = 32;
} else {
@@ -156,7 +138,7 @@ s_vCalculateOFDMRParameter(
break;
case RATE_18M:
- if (ePHYType == PHY_TYPE_11A) {//5GHZ
+ if (bb_type == BB_TYPE_11A) { /* 5GHZ */
*pbyTxRate = 0x9E;
*pbyRsvTime = 28;
} else {
@@ -166,7 +148,7 @@ s_vCalculateOFDMRParameter(
break;
case RATE_36M:
- if (ePHYType == PHY_TYPE_11A) {//5GHZ
+ if (bb_type == BB_TYPE_11A) { /* 5GHZ */
*pbyTxRate = 0x9D;
*pbyRsvTime = 24;
} else {
@@ -176,7 +158,7 @@ s_vCalculateOFDMRParameter(
break;
case RATE_48M:
- if (ePHYType == PHY_TYPE_11A) {//5GHZ
+ if (bb_type == BB_TYPE_11A) { /* 5GHZ */
*pbyTxRate = 0x98;
*pbyRsvTime = 24;
} else {
@@ -186,7 +168,7 @@ s_vCalculateOFDMRParameter(
break;
case RATE_54M:
- if (ePHYType == PHY_TYPE_11A) {//5GHZ
+ if (bb_type == BB_TYPE_11A) { /* 5GHZ */
*pbyTxRate = 0x9C;
*pbyRsvTime = 24;
} else {
@@ -197,7 +179,7 @@ s_vCalculateOFDMRParameter(
case RATE_24M:
default:
- if (ePHYType == PHY_TYPE_11A) {//5GHZ
+ if (bb_type == BB_TYPE_11A) { /* 5GHZ */
*pbyTxRate = 0x99;
*pbyRsvTime = 28;
} else {
@@ -208,167 +190,9 @@ s_vCalculateOFDMRParameter(
}
}
-/*
- * Description: Set RSPINF
- *
- * Parameters:
- * In:
- * pDevice - The adapter to be set
- * Out:
- * none
- *
- * Return Value: None.
- *
- */
-static
-void
-s_vSetRSPINF(struct vnt_private *pDevice, CARD_PHY_TYPE ePHYType,
- void *pvSupportRateIEs, void *pvExtSupportRateIEs)
-{
- union vnt_phy_field_swap phy;
- unsigned char byTxRate = 0, byRsvTime = 0; // For OFDM
-
- //Set to Page1
- MACvSelectPage1(pDevice->PortOffset);
-
- /* RSPINF_b_1 */
- vnt_get_phy_field(pDevice,
- 14,
- VNTWIFIbyGetACKTxRate(RATE_1M, pvSupportRateIEs, pvExtSupportRateIEs),
- PK_TYPE_11B,
- &phy.field_read);
-
- /* swap over to get correct write order */
- swap(phy.swap[0], phy.swap[1]);
-
- VNSvOutPortD(pDevice->PortOffset + MAC_REG_RSPINF_B_1, phy.field_write);
-
- /* RSPINF_b_2 */
- vnt_get_phy_field(pDevice, 14,
- VNTWIFIbyGetACKTxRate(RATE_2M, pvSupportRateIEs, pvExtSupportRateIEs),
- PK_TYPE_11B, &phy.field_read);
-
- swap(phy.swap[0], phy.swap[1]);
-
- VNSvOutPortD(pDevice->PortOffset + MAC_REG_RSPINF_B_2, phy.field_write);
-
- /* RSPINF_b_5 */
- vnt_get_phy_field(pDevice, 14,
- VNTWIFIbyGetACKTxRate(RATE_5M, pvSupportRateIEs, pvExtSupportRateIEs),
- PK_TYPE_11B, &phy.field_read);
-
- swap(phy.swap[0], phy.swap[1]);
-
- VNSvOutPortD(pDevice->PortOffset + MAC_REG_RSPINF_B_5, phy.field_write);
-
- /* RSPINF_b_11 */
- vnt_get_phy_field(pDevice, 14,
- VNTWIFIbyGetACKTxRate(RATE_11M, pvSupportRateIEs, pvExtSupportRateIEs),
- PK_TYPE_11B, &phy.field_read);
-
- swap(phy.swap[0], phy.swap[1]);
-
- VNSvOutPortD(pDevice->PortOffset + MAC_REG_RSPINF_B_11, phy.field_write);
-
- //RSPINF_a_6
- s_vCalculateOFDMRParameter(RATE_6M,
- ePHYType,
- &byTxRate,
- &byRsvTime);
- VNSvOutPortW(pDevice->PortOffset + MAC_REG_RSPINF_A_6, MAKEWORD(byTxRate, byRsvTime));
- //RSPINF_a_9
- s_vCalculateOFDMRParameter(RATE_9M,
- ePHYType,
- &byTxRate,
- &byRsvTime);
- VNSvOutPortW(pDevice->PortOffset + MAC_REG_RSPINF_A_9, MAKEWORD(byTxRate, byRsvTime));
- //RSPINF_a_12
- s_vCalculateOFDMRParameter(RATE_12M,
- ePHYType,
- &byTxRate,
- &byRsvTime);
- VNSvOutPortW(pDevice->PortOffset + MAC_REG_RSPINF_A_12, MAKEWORD(byTxRate, byRsvTime));
- //RSPINF_a_18
- s_vCalculateOFDMRParameter(RATE_18M,
- ePHYType,
- &byTxRate,
- &byRsvTime);
- VNSvOutPortW(pDevice->PortOffset + MAC_REG_RSPINF_A_18, MAKEWORD(byTxRate, byRsvTime));
- //RSPINF_a_24
- s_vCalculateOFDMRParameter(RATE_24M,
- ePHYType,
- &byTxRate,
- &byRsvTime);
- VNSvOutPortW(pDevice->PortOffset + MAC_REG_RSPINF_A_24, MAKEWORD(byTxRate, byRsvTime));
- //RSPINF_a_36
- s_vCalculateOFDMRParameter(
- VNTWIFIbyGetACKTxRate(RATE_36M, pvSupportRateIEs, pvExtSupportRateIEs),
- ePHYType,
- &byTxRate,
- &byRsvTime);
- VNSvOutPortW(pDevice->PortOffset + MAC_REG_RSPINF_A_36, MAKEWORD(byTxRate, byRsvTime));
- //RSPINF_a_48
- s_vCalculateOFDMRParameter(
- VNTWIFIbyGetACKTxRate(RATE_48M, pvSupportRateIEs, pvExtSupportRateIEs),
- ePHYType,
- &byTxRate,
- &byRsvTime);
- VNSvOutPortW(pDevice->PortOffset + MAC_REG_RSPINF_A_48, MAKEWORD(byTxRate, byRsvTime));
- //RSPINF_a_54
- s_vCalculateOFDMRParameter(
- VNTWIFIbyGetACKTxRate(RATE_54M, pvSupportRateIEs, pvExtSupportRateIEs),
- ePHYType,
- &byTxRate,
- &byRsvTime);
- VNSvOutPortW(pDevice->PortOffset + MAC_REG_RSPINF_A_54, MAKEWORD(byTxRate, byRsvTime));
- //RSPINF_a_72
- VNSvOutPortW(pDevice->PortOffset + MAC_REG_RSPINF_A_72, MAKEWORD(byTxRate, byRsvTime));
- //Set to Page0
- MACvSelectPage0(pDevice->PortOffset);
-}
-
/*--------------------- Export Functions --------------------------*/
/*
- * Description: Get Card short preamble option value
- *
- * Parameters:
- * In:
- * pDevice - The adapter to be set
- * Out:
- * none
- *
- * Return Value: true if short preamble; otherwise false
- *
- */
-bool CARDbIsShortPreamble(struct vnt_private *pDevice)
-{
-
- if (pDevice->byPreambleType == 0)
- return false;
-
- return true;
-}
-
-/*
- * Description: Get Card short slot time option value
- *
- * Parameters:
- * In:
- * pDevice - The adapter to be set
- * Out:
- * none
- *
- * Return Value: true if short slot time; otherwise false
- *
- */
-bool CARDbIsShorSlotTime(struct vnt_private *pDevice)
-{
-
- return pDevice->bShortSlotTime;
-}
-
-/*
* Description: Update IFS
*
* Parameters:
@@ -378,138 +202,118 @@ bool CARDbIsShorSlotTime(struct vnt_private *pDevice)
* none
*
* Return Value: None.
- *
*/
-bool CARDbSetPhyParameter(struct vnt_private *pDevice, CARD_PHY_TYPE ePHYType,
- unsigned short wCapInfo, unsigned char byERPField,
- void *pvSupportRateIEs, void *pvExtSupportRateIEs)
+bool CARDbSetPhyParameter(struct vnt_private *pDevice, u8 bb_type)
{
unsigned char byCWMaxMin = 0;
unsigned char bySlot = 0;
unsigned char bySIFS = 0;
unsigned char byDIFS = 0;
unsigned char byData;
- PWLAN_IE_SUPP_RATES pSupportRates = (PWLAN_IE_SUPP_RATES) pvSupportRateIEs;
- PWLAN_IE_SUPP_RATES pExtSupportRates = (PWLAN_IE_SUPP_RATES) pvExtSupportRateIEs;
-
- //Set SIFS, DIFS, EIFS, SlotTime, CwMin
- if (ePHYType == PHY_TYPE_11A) {
- if (pSupportRates == NULL)
- pSupportRates = (PWLAN_IE_SUPP_RATES) abyDefaultSuppRatesA;
+ int i;
+ /* Set SIFS, DIFS, EIFS, SlotTime, CwMin */
+ if (bb_type == BB_TYPE_11A) {
if (pDevice->byRFType == RF_AIROHA7230) {
- // AL7230 use single PAPE and connect to PAPE_2.4G
+ /* AL7230 use single PAPE and connect to PAPE_2.4G */
MACvSetBBType(pDevice->PortOffset, BB_TYPE_11G);
pDevice->abyBBVGA[0] = 0x20;
pDevice->abyBBVGA[2] = 0x10;
pDevice->abyBBVGA[3] = 0x10;
- BBbReadEmbedded(pDevice->PortOffset, 0xE7, &byData);
+ BBbReadEmbedded(pDevice, 0xE7, &byData);
if (byData == 0x1C)
- BBbWriteEmbedded(pDevice->PortOffset, 0xE7, pDevice->abyBBVGA[0]);
+ BBbWriteEmbedded(pDevice, 0xE7, pDevice->abyBBVGA[0]);
} else if (pDevice->byRFType == RF_UW2452) {
MACvSetBBType(pDevice->PortOffset, BB_TYPE_11A);
pDevice->abyBBVGA[0] = 0x18;
- BBbReadEmbedded(pDevice->PortOffset, 0xE7, &byData);
+ BBbReadEmbedded(pDevice, 0xE7, &byData);
if (byData == 0x14) {
- BBbWriteEmbedded(pDevice->PortOffset, 0xE7, pDevice->abyBBVGA[0]);
- BBbWriteEmbedded(pDevice->PortOffset, 0xE1, 0x57);
+ BBbWriteEmbedded(pDevice, 0xE7, pDevice->abyBBVGA[0]);
+ BBbWriteEmbedded(pDevice, 0xE1, 0x57);
}
} else {
MACvSetBBType(pDevice->PortOffset, BB_TYPE_11A);
}
- BBbWriteEmbedded(pDevice->PortOffset, 0x88, 0x03);
+ BBbWriteEmbedded(pDevice, 0x88, 0x03);
bySlot = C_SLOT_SHORT;
bySIFS = C_SIFS_A;
byDIFS = C_SIFS_A + 2*C_SLOT_SHORT;
byCWMaxMin = 0xA4;
- } else if (ePHYType == PHY_TYPE_11B) {
- if (pSupportRates == NULL)
- pSupportRates = (PWLAN_IE_SUPP_RATES) abyDefaultSuppRatesB;
-
+ } else if (bb_type == BB_TYPE_11B) {
MACvSetBBType(pDevice->PortOffset, BB_TYPE_11B);
if (pDevice->byRFType == RF_AIROHA7230) {
pDevice->abyBBVGA[0] = 0x1C;
pDevice->abyBBVGA[2] = 0x00;
pDevice->abyBBVGA[3] = 0x00;
- BBbReadEmbedded(pDevice->PortOffset, 0xE7, &byData);
+ BBbReadEmbedded(pDevice, 0xE7, &byData);
if (byData == 0x20)
- BBbWriteEmbedded(pDevice->PortOffset, 0xE7, pDevice->abyBBVGA[0]);
+ BBbWriteEmbedded(pDevice, 0xE7, pDevice->abyBBVGA[0]);
} else if (pDevice->byRFType == RF_UW2452) {
pDevice->abyBBVGA[0] = 0x14;
- BBbReadEmbedded(pDevice->PortOffset, 0xE7, &byData);
+ BBbReadEmbedded(pDevice, 0xE7, &byData);
if (byData == 0x18) {
- BBbWriteEmbedded(pDevice->PortOffset, 0xE7, pDevice->abyBBVGA[0]);
- BBbWriteEmbedded(pDevice->PortOffset, 0xE1, 0xD3);
+ BBbWriteEmbedded(pDevice, 0xE7, pDevice->abyBBVGA[0]);
+ BBbWriteEmbedded(pDevice, 0xE1, 0xD3);
}
}
- BBbWriteEmbedded(pDevice->PortOffset, 0x88, 0x02);
+ BBbWriteEmbedded(pDevice, 0x88, 0x02);
bySlot = C_SLOT_LONG;
bySIFS = C_SIFS_BG;
byDIFS = C_SIFS_BG + 2*C_SLOT_LONG;
byCWMaxMin = 0xA5;
- } else {// PK_TYPE_11GA & PK_TYPE_11GB
- if (pSupportRates == NULL) {
- pSupportRates = (PWLAN_IE_SUPP_RATES) abyDefaultSuppRatesG;
- pExtSupportRates = (PWLAN_IE_SUPP_RATES) abyDefaultExtSuppRatesG;
- }
+ } else { /* PK_TYPE_11GA & PK_TYPE_11GB */
MACvSetBBType(pDevice->PortOffset, BB_TYPE_11G);
if (pDevice->byRFType == RF_AIROHA7230) {
pDevice->abyBBVGA[0] = 0x1C;
pDevice->abyBBVGA[2] = 0x00;
pDevice->abyBBVGA[3] = 0x00;
- BBbReadEmbedded(pDevice->PortOffset, 0xE7, &byData);
+ BBbReadEmbedded(pDevice, 0xE7, &byData);
if (byData == 0x20)
- BBbWriteEmbedded(pDevice->PortOffset, 0xE7, pDevice->abyBBVGA[0]);
+ BBbWriteEmbedded(pDevice, 0xE7, pDevice->abyBBVGA[0]);
} else if (pDevice->byRFType == RF_UW2452) {
pDevice->abyBBVGA[0] = 0x14;
- BBbReadEmbedded(pDevice->PortOffset, 0xE7, &byData);
+ BBbReadEmbedded(pDevice, 0xE7, &byData);
if (byData == 0x18) {
- BBbWriteEmbedded(pDevice->PortOffset, 0xE7, pDevice->abyBBVGA[0]);
- BBbWriteEmbedded(pDevice->PortOffset, 0xE1, 0xD3);
+ BBbWriteEmbedded(pDevice, 0xE7, pDevice->abyBBVGA[0]);
+ BBbWriteEmbedded(pDevice, 0xE1, 0xD3);
}
}
- BBbWriteEmbedded(pDevice->PortOffset, 0x88, 0x08);
+ BBbWriteEmbedded(pDevice, 0x88, 0x08);
bySIFS = C_SIFS_BG;
- if (VNTWIFIbIsShortSlotTime(wCapInfo)) {
+
+ if (pDevice->bShortSlotTime) {
bySlot = C_SLOT_SHORT;
byDIFS = C_SIFS_BG + 2*C_SLOT_SHORT;
} else {
bySlot = C_SLOT_LONG;
byDIFS = C_SIFS_BG + 2*C_SLOT_LONG;
}
- if (VNTWIFIbyGetMaxSupportRate(pSupportRates, pExtSupportRates) > RATE_11M)
- byCWMaxMin = 0xA4;
- else
- byCWMaxMin = 0xA5;
-
- if (pDevice->bProtectMode != VNTWIFIbIsProtectMode(byERPField)) {
- pDevice->bProtectMode = VNTWIFIbIsProtectMode(byERPField);
- if (pDevice->bProtectMode)
- MACvEnableProtectMD(pDevice->PortOffset);
- else
- MACvDisableProtectMD(pDevice->PortOffset);
- }
- if (pDevice->bBarkerPreambleMd != VNTWIFIbIsBarkerMode(byERPField)) {
- pDevice->bBarkerPreambleMd = VNTWIFIbIsBarkerMode(byERPField);
- if (pDevice->bBarkerPreambleMd)
- MACvEnableBarkerPreambleMd(pDevice->PortOffset);
- else
- MACvDisableBarkerPreambleMd(pDevice->PortOffset);
+ byCWMaxMin = 0xa4;
+
+ for (i = RATE_54M; i >= RATE_6M; i--) {
+ if (pDevice->basic_rates & ((u32)(0x1 << i))) {
+ byCWMaxMin |= 0x1;
+ break;
+ }
}
}
if (pDevice->byRFType == RF_RFMD2959) {
- // bcs TX_PE will reserve 3 us
- // hardware's processing time here is 2 us.
+ /*
+ * bcs TX_PE will reserve 3 us hardware's processing
+ * time here is 2 us.
+ */
bySIFS -= 3;
byDIFS -= 3;
- //{{ RobertYu: 20041202
- //// TX_PE will reserve 3 us for MAX2829 A mode only, it is for better TX throughput
- //// MAC will need 2 us to process, so the SIFS, DIFS can be shorter by 2 us.
+ /*
+ * TX_PE will reserve 3 us for MAX2829 A mode only, it is for
+ * better TX throughput; MAC will need 2 us to process, so the
+ * SIFS, DIFS can be shorter by 2 us.
+ */
}
if (pDevice->bySIFS != bySIFS) {
@@ -527,10 +331,6 @@ bool CARDbSetPhyParameter(struct vnt_private *pDevice, CARD_PHY_TYPE ePHYType,
if (pDevice->bySlot != bySlot) {
pDevice->bySlot = bySlot;
VNSvOutPortB(pDevice->PortOffset + MAC_REG_SLOT, pDevice->bySlot);
- if (pDevice->bySlot == C_SLOT_SHORT)
- pDevice->bShortSlotTime = true;
- else
- pDevice->bShortSlotTime = false;
BBvSetShortSlotTime(pDevice);
}
@@ -538,14 +338,11 @@ bool CARDbSetPhyParameter(struct vnt_private *pDevice, CARD_PHY_TYPE ePHYType,
pDevice->byCWMaxMin = byCWMaxMin;
VNSvOutPortB(pDevice->PortOffset + MAC_REG_CWMAXMIN0, pDevice->byCWMaxMin);
}
- if (VNTWIFIbIsShortPreamble(wCapInfo))
- pDevice->byPreambleType = pDevice->byShortPreamble;
- else
- pDevice->byPreambleType = 0;
- s_vSetRSPINF(pDevice, ePHYType, pSupportRates, pExtSupportRates);
- pDevice->eCurrentPHYType = ePHYType;
- // set for NDIS OID_802_11SUPPORTED_RATES
+ pDevice->byPacketType = CARDbyGetPktType(pDevice);
+
+ CARDvSetRSPINF(pDevice, bb_type);
+
return true;
}
@@ -563,7 +360,6 @@ bool CARDbSetPhyParameter(struct vnt_private *pDevice, CARD_PHY_TYPE ePHYType,
* none
*
* Return Value: none
- *
*/
bool CARDbUpdateTSF(struct vnt_private *pDevice, unsigned char byRxRate,
u64 qwBSSTimestamp, u64 qwLocalTSF)
@@ -572,8 +368,7 @@ bool CARDbUpdateTSF(struct vnt_private *pDevice, unsigned char byRxRate,
if (qwBSSTimestamp != qwLocalTSF) {
qwTSFOffset = CARDqGetTSFOffset(byRxRate, qwBSSTimestamp, qwLocalTSF);
- // adjust TSF
- // HW's TSF add TSF Offset reg
+ /* adjust TSF, HW's TSF add TSF Offset reg */
VNSvOutPortD(pDevice->PortOffset + MAC_REG_TSFOFST, (u32)qwTSFOffset);
VNSvOutPortD(pDevice->PortOffset + MAC_REG_TSFOFST + 4, (u32)(qwTSFOffset >> 32));
MACvRegBitsOn(pDevice->PortOffset, MAC_REG_TFTCTL, TFTCTL_TSFSYNCEN);
@@ -593,21 +388,20 @@ bool CARDbUpdateTSF(struct vnt_private *pDevice, unsigned char byRxRate,
* none
*
* Return Value: true if succeed; otherwise false
- *
*/
bool CARDbSetBeaconPeriod(struct vnt_private *pDevice,
unsigned short wBeaconInterval)
{
u64 qwNextTBTT = 0;
- CARDbGetCurrentTSF(pDevice->PortOffset, &qwNextTBTT); //Get Local TSF counter
+ CARDbGetCurrentTSF(pDevice, &qwNextTBTT); /* Get Local TSF counter */
qwNextTBTT = CARDqGetNextTBTT(qwNextTBTT, wBeaconInterval);
- // set HW beacon interval
+ /* set HW beacon interval */
VNSvOutPortW(pDevice->PortOffset + MAC_REG_BI, wBeaconInterval);
pDevice->wBeaconInterval = wBeaconInterval;
- // Set NextTBTT
+ /* Set NextTBTT */
VNSvOutPortD(pDevice->PortOffset + MAC_REG_NEXTTBTT, (u32)qwNextTBTT);
VNSvOutPortD(pDevice->PortOffset + MAC_REG_NEXTTBTT + 4, (u32)(qwNextTBTT >> 32));
MACvRegBitsOn(pDevice->PortOffset, MAC_REG_TFTCTL, TFTCTL_TBTTSYNCEN);
@@ -616,225 +410,6 @@ bool CARDbSetBeaconPeriod(struct vnt_private *pDevice,
}
/*
- * Description: Card Stop Hardware Tx
- *
- * Parameters:
- * In:
- * pDeviceHandler - The adapter to be set
- * ePktType - Packet type to stop
- * Out:
- * none
- *
- * Return Value: true if all data packet complete; otherwise false.
- *
- */
-bool CARDbStopTxPacket(struct vnt_private *pDevice, CARD_PKT_TYPE ePktType)
-{
-
- if (ePktType == PKT_TYPE_802_11_ALL) {
- pDevice->bStopBeacon = true;
- pDevice->bStopTx0Pkt = true;
- pDevice->bStopDataPkt = true;
- } else if (ePktType == PKT_TYPE_802_11_BCN) {
- pDevice->bStopBeacon = true;
- } else if (ePktType == PKT_TYPE_802_11_MNG) {
- pDevice->bStopTx0Pkt = true;
- } else if (ePktType == PKT_TYPE_802_11_DATA) {
- pDevice->bStopDataPkt = true;
- }
-
- if (pDevice->bStopBeacon == true) {
- if (pDevice->bIsBeaconBufReadySet == true) {
- if (pDevice->cbBeaconBufReadySetCnt < WAIT_BEACON_TX_DOWN_TMO) {
- pDevice->cbBeaconBufReadySetCnt++;
- return false;
- }
- }
- pDevice->bIsBeaconBufReadySet = false;
- pDevice->cbBeaconBufReadySetCnt = 0;
- MACvRegBitsOff(pDevice->PortOffset, MAC_REG_TCR, TCR_AUTOBCNTX);
- }
- // wait all TD0 complete
- if (pDevice->bStopTx0Pkt == true) {
- if (pDevice->iTDUsed[TYPE_TXDMA0] != 0)
- return false;
- }
- // wait all Data TD complete
- if (pDevice->bStopDataPkt == true) {
- if (pDevice->iTDUsed[TYPE_AC0DMA] != 0)
- return false;
- }
-
- return true;
-}
-
-/*
- * Description: Card Start Hardware Tx
- *
- * Parameters:
- * In:
- * pDeviceHandler - The adapter to be set
- * ePktType - Packet type to start
- * Out:
- * none
- *
- * Return Value: true if success; false if failed.
- *
- */
-bool CARDbStartTxPacket(struct vnt_private *pDevice, CARD_PKT_TYPE ePktType)
-{
-
- if (ePktType == PKT_TYPE_802_11_ALL) {
- pDevice->bStopBeacon = false;
- pDevice->bStopTx0Pkt = false;
- pDevice->bStopDataPkt = false;
- } else if (ePktType == PKT_TYPE_802_11_BCN) {
- pDevice->bStopBeacon = false;
- } else if (ePktType == PKT_TYPE_802_11_MNG) {
- pDevice->bStopTx0Pkt = false;
- } else if (ePktType == PKT_TYPE_802_11_DATA) {
- pDevice->bStopDataPkt = false;
- }
-
- if ((pDevice->bStopBeacon == false) &&
- (pDevice->bBeaconBufReady == true) &&
- (pDevice->op_mode == NL80211_IFTYPE_ADHOC)) {
- MACvRegBitsOn(pDevice->PortOffset, MAC_REG_TCR, TCR_AUTOBCNTX);
- }
-
- return true;
-}
-
-/*
- * Description: Card Set BSSID value
- *
- * Parameters:
- * In:
- * pDeviceHandler - The adapter to be set
- * pbyBSSID - pointer to BSSID field
- * bAdhoc - flag to indicate IBSS
- * Out:
- * none
- *
- * Return Value: true if success; false if failed.
- *
- */
-bool CARDbSetBSSID(struct vnt_private *pDevice,
- unsigned char *pbyBSSID, enum nl80211_iftype op_mode)
-{
-
- MACvWriteBSSIDAddress(pDevice->PortOffset, pbyBSSID);
- memcpy(pDevice->abyBSSID, pbyBSSID, WLAN_BSSID_LEN);
- if (op_mode == NL80211_IFTYPE_ADHOC)
- MACvRegBitsOn(pDevice->PortOffset, MAC_REG_HOSTCR, HOSTCR_ADHOC);
- else
- MACvRegBitsOff(pDevice->PortOffset, MAC_REG_HOSTCR, HOSTCR_ADHOC);
-
- if (op_mode == NL80211_IFTYPE_AP)
- MACvRegBitsOn(pDevice->PortOffset, MAC_REG_HOSTCR, HOSTCR_AP);
- else
- MACvRegBitsOff(pDevice->PortOffset, MAC_REG_HOSTCR, HOSTCR_AP);
-
- if (op_mode == NL80211_IFTYPE_UNSPECIFIED) {
- MACvRegBitsOff(pDevice->PortOffset, MAC_REG_RCR, RCR_BSSID);
- pDevice->bBSSIDFilter = false;
- pDevice->byRxMode &= ~RCR_BSSID;
- pr_debug("wcmd: rx_mode = %x\n", pDevice->byRxMode);
- } else {
- if (is_zero_ether_addr(pDevice->abyBSSID) == false) {
- MACvRegBitsOn(pDevice->PortOffset, MAC_REG_RCR, RCR_BSSID);
- pDevice->bBSSIDFilter = true;
- pDevice->byRxMode |= RCR_BSSID;
- }
- pr_debug("wmgr: rx_mode = %x\n", pDevice->byRxMode);
- }
- // Adopt BSS state in Adapter Device Object
- pDevice->op_mode = op_mode;
- return true;
-}
-
-/*
- * Description: Card indicate status
- *
- * Parameters:
- * In:
- * pDeviceHandler - The adapter to be set
- * eStatus - Status
- * Out:
- * none
- *
- * Return Value: true if success; false if failed.
- *
- */
-
-/*
- * Description: Save Assoc info. contain in assoc. response frame
- *
- * Parameters:
- * In:
- * pDevice - The adapter to be set
- * wCapabilityInfo - Capability information
- * wStatus - Status code
- * wAID - Assoc. ID
- * uLen - Length of IEs
- * pbyIEs - pointer to IEs
- * Out:
- * none
- *
- * Return Value: true if succeed; otherwise false
- *
- */
-bool CARDbSetTxDataRate(
- struct vnt_private *pDevice,
- unsigned short wDataRate
-)
-{
-
- pDevice->wCurrentRate = wDataRate;
- return true;
-}
-
-/*+
- *
- * Routine Description:
- * Consider to power down when no more packets to tx or rx.
- *
- * Parameters:
- * In:
- * pDevice - The adapter to be set
- * Out:
- * none
- *
- * Return Value: true if power down success; otherwise false
- *
- -*/
-bool
-CARDbPowerDown(
- struct vnt_private *pDevice
-)
-{
- unsigned int uIdx;
-
- // check if already in Doze mode
- if (MACbIsRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_PS))
- return true;
-
- // Froce PSEN on
- MACvRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_PSEN);
-
- // check if all TD are empty,
-
- for (uIdx = 0; uIdx < TYPE_MAXTD; uIdx++) {
- if (pDevice->iTDUsed[uIdx] != 0)
- return false;
- }
-
- MACvRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_GO2DOZE);
- pr_debug("Go to Doze ZZZZZZZZZZZZZZZ\n");
- return true;
-}
-
-/*
* Description: Turn off Radio power
*
* Parameters:
@@ -844,7 +419,6 @@ CARDbPowerDown(
* none
*
* Return Value: true if success; otherwise false
- *
*/
bool CARDbRadioPowerOff(struct vnt_private *pDevice)
{
@@ -861,7 +435,7 @@ bool CARDbRadioPowerOff(struct vnt_private *pDevice)
case RF_AIROHA:
case RF_AL2230S:
- case RF_AIROHA7230: //RobertYu:20050104
+ case RF_AIROHA7230:
MACvWordRegBitsOff(pDevice->PortOffset, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE2);
MACvWordRegBitsOff(pDevice->PortOffset, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3);
break;
@@ -870,12 +444,11 @@ bool CARDbRadioPowerOff(struct vnt_private *pDevice)
MACvRegBitsOff(pDevice->PortOffset, MAC_REG_HOSTCR, HOSTCR_RXON);
- BBvSetDeepSleep(pDevice->PortOffset, pDevice->byLocalID);
+ BBvSetDeepSleep(pDevice, pDevice->byLocalID);
pDevice->bRadioOff = true;
- //2007-0409-03,<Add> by chester
pr_debug("chester power off\n");
- MACvRegBitsOn(pDevice->PortOffset, MAC_REG_GPIOCTL0, LED_ACTSET); //LED issue
+ MACvRegBitsOn(pDevice->PortOffset, MAC_REG_GPIOCTL0, LED_ACTSET); /* LED issue */
return bResult;
}
@@ -889,7 +462,6 @@ bool CARDbRadioPowerOff(struct vnt_private *pDevice)
* none
*
* Return Value: true if success; otherwise false
- *
*/
bool CARDbRadioPowerOn(struct vnt_private *pDevice)
{
@@ -907,7 +479,7 @@ bool CARDbRadioPowerOn(struct vnt_private *pDevice)
pr_debug("chester pbRadioOff\n");
return true; }
- BBvExitDeepSleep(pDevice->PortOffset, pDevice->byLocalID);
+ BBvExitDeepSleep(pDevice, pDevice->byLocalID);
MACvRegBitsOn(pDevice->PortOffset, MAC_REG_HOSTCR, HOSTCR_RXON);
@@ -919,7 +491,7 @@ bool CARDbRadioPowerOn(struct vnt_private *pDevice)
case RF_AIROHA:
case RF_AL2230S:
- case RF_AIROHA7230: //RobertYu:20050104
+ case RF_AIROHA7230:
MACvWordRegBitsOn(pDevice->PortOffset, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPE2 |
SOFTPWRCTL_SWPE3));
break;
@@ -927,493 +499,11 @@ bool CARDbRadioPowerOn(struct vnt_private *pDevice)
}
pDevice->bRadioOff = false;
-// 2007-0409-03,<Add> by chester
pr_debug("chester power on\n");
- MACvRegBitsOff(pDevice->PortOffset, MAC_REG_GPIOCTL0, LED_ACTSET); //LED issue
+ MACvRegBitsOff(pDevice->PortOffset, MAC_REG_GPIOCTL0, LED_ACTSET); /* LED issue */
return bResult;
}
-bool CARDbRemoveKey(struct vnt_private *pDevice, unsigned char *pbyBSSID)
-{
-
- KeybRemoveAllKey(&(pDevice->sKey), pbyBSSID, pDevice->PortOffset);
- return true;
-}
-
-/*
- *
- * Description:
- * Add BSSID in PMKID Candidate list.
- *
- * Parameters:
- * In:
- * hDeviceContext - device structure point
- * pbyBSSID - BSSID address for adding
- * wRSNCap - BSS's RSN capability
- * Out:
- * none
- *
- * Return Value: none.
- *
- -*/
-bool
-CARDbAdd_PMKID_Candidate(
- struct vnt_private *pDevice,
- unsigned char *pbyBSSID,
- bool bRSNCapExist,
- unsigned short wRSNCap
-)
-{
- struct pmkid_candidate *pCandidateList;
- unsigned int ii = 0;
-
- pr_debug("bAdd_PMKID_Candidate START: (%d)\n",
- (int)pDevice->gsPMKIDCandidate.NumCandidates);
-
- if (pDevice->gsPMKIDCandidate.NumCandidates >= MAX_PMKIDLIST) {
- pr_debug("vFlush_PMKID_Candidate: 3\n");
- memset(&pDevice->gsPMKIDCandidate, 0, sizeof(SPMKIDCandidateEvent));
- }
-
- for (ii = 0; ii < 6; ii++)
- pr_debug("%02X ", *(pbyBSSID + ii));
-
- pr_debug("\n");
-
- // Update Old Candidate
- for (ii = 0; ii < pDevice->gsPMKIDCandidate.NumCandidates; ii++) {
- pCandidateList = &pDevice->gsPMKIDCandidate.CandidateList[ii];
- if (!memcmp(pCandidateList->BSSID, pbyBSSID, ETH_ALEN)) {
- if (bRSNCapExist && (wRSNCap & BIT0))
- pCandidateList->Flags |= NDIS_802_11_PMKID_CANDIDATE_PREAUTH_ENABLED;
- else
- pCandidateList->Flags &= ~(NDIS_802_11_PMKID_CANDIDATE_PREAUTH_ENABLED);
-
- return true;
- }
- }
-
- // New Candidate
- pCandidateList = &pDevice->gsPMKIDCandidate.CandidateList[pDevice->gsPMKIDCandidate.NumCandidates];
- if (bRSNCapExist && (wRSNCap & BIT0))
- pCandidateList->Flags |= NDIS_802_11_PMKID_CANDIDATE_PREAUTH_ENABLED;
- else
- pCandidateList->Flags &= ~(NDIS_802_11_PMKID_CANDIDATE_PREAUTH_ENABLED);
-
- memcpy(pCandidateList->BSSID, pbyBSSID, ETH_ALEN);
- pDevice->gsPMKIDCandidate.NumCandidates++;
- pr_debug("NumCandidates:%d\n",
- (int)pDevice->gsPMKIDCandidate.NumCandidates);
- return true;
-}
-
-void *
-CARDpGetCurrentAddress(
- struct vnt_private *pDevice
-)
-{
-
- return pDevice->abyCurrentNetAddr;
-}
-
-/*
- *
- * Description:
- * Start Spectrum Measure defined in 802.11h
- *
- * Parameters:
- * In:
- * hDeviceContext - device structure point
- * Out:
- * none
- *
- * Return Value: none.
- *
- -*/
-bool
-CARDbStartMeasure(
- struct vnt_private *pDevice,
- void *pvMeasureEIDs,
- unsigned int uNumOfMeasureEIDs
-)
-{
- PWLAN_IE_MEASURE_REQ pEID = (PWLAN_IE_MEASURE_REQ) pvMeasureEIDs;
- u64 qwCurrTSF;
- u64 qwStartTSF;
- bool bExpired = true;
- unsigned short wDuration = 0;
-
- if ((pEID == NULL) ||
- (uNumOfMeasureEIDs == 0)) {
- return true;
- }
- CARDbGetCurrentTSF(pDevice->PortOffset, &qwCurrTSF);
- if (pDevice->bMeasureInProgress == true) {
- pDevice->bMeasureInProgress = false;
- VNSvOutPortB(pDevice->PortOffset + MAC_REG_RCR, pDevice->byOrgRCR);
- MACvSelectPage1(pDevice->PortOffset);
- VNSvOutPortD(pDevice->PortOffset + MAC_REG_MAR0, pDevice->dwOrgMAR0);
- VNSvOutPortD(pDevice->PortOffset + MAC_REG_MAR4, pDevice->dwOrgMAR4);
- // clear measure control
- MACvRegBitsOff(pDevice->PortOffset, MAC_REG_MSRCTL, MSRCTL_EN);
- MACvSelectPage0(pDevice->PortOffset);
- set_channel(pDevice, pDevice->byOrgChannel);
- MACvSelectPage1(pDevice->PortOffset);
- MACvRegBitsOn(pDevice->PortOffset, MAC_REG_MSRCTL+1, MSRCTL1_TXPAUSE);
- MACvSelectPage0(pDevice->PortOffset);
- }
- pDevice->uNumOfMeasureEIDs = uNumOfMeasureEIDs;
-
- do {
- pDevice->pCurrMeasureEID = pEID;
- pEID++;
- pDevice->uNumOfMeasureEIDs--;
-
- if (pDevice->byLocalID > REV_ID_VT3253_B1) {
- qwStartTSF = *((u64 *)(pDevice->pCurrMeasureEID->sReq.abyStartTime));
- wDuration = *((unsigned short *)(pDevice->pCurrMeasureEID->sReq.abyDuration));
- wDuration += 1; // 1 TU for channel switching
-
- if (qwStartTSF == 0) {
- // start immediately by setting start TSF == current TSF + 2 TU
- qwStartTSF = qwCurrTSF + 2048;
-
- bExpired = false;
- break;
- } else {
- // start at setting start TSF - 1TU(for channel switching)
- qwStartTSF -= 1024;
- }
-
- if (qwCurrTSF < qwStartTSF) {
- bExpired = false;
- break;
- }
- VNTWIFIbMeasureReport(pDevice->pMgmt,
- false,
- pDevice->pCurrMeasureEID,
- MEASURE_MODE_LATE,
- pDevice->byBasicMap,
- pDevice->byCCAFraction,
- pDevice->abyRPIs
- );
- } else {
- // hardware do not support measure
- VNTWIFIbMeasureReport(pDevice->pMgmt,
- false,
- pDevice->pCurrMeasureEID,
- MEASURE_MODE_INCAPABLE,
- pDevice->byBasicMap,
- pDevice->byCCAFraction,
- pDevice->abyRPIs
- );
- }
- } while (pDevice->uNumOfMeasureEIDs != 0);
-
- if (!bExpired) {
- MACvSelectPage1(pDevice->PortOffset);
- VNSvOutPortD(pDevice->PortOffset + MAC_REG_MSRSTART, (u32)qwStartTSF);
- VNSvOutPortD(pDevice->PortOffset + MAC_REG_MSRSTART + 4, (u32)(qwStartTSF >> 32));
- VNSvOutPortW(pDevice->PortOffset + MAC_REG_MSRDURATION, wDuration);
- MACvRegBitsOn(pDevice->PortOffset, MAC_REG_MSRCTL, MSRCTL_EN);
- MACvSelectPage0(pDevice->PortOffset);
- } else {
- // all measure start time expired we should complete action
- VNTWIFIbMeasureReport(pDevice->pMgmt,
- true,
- NULL,
- 0,
- pDevice->byBasicMap,
- pDevice->byCCAFraction,
- pDevice->abyRPIs
- );
- }
- return true;
-}
-
-/*
- *
- * Description:
- * Do Channel Switch defined in 802.11h
- *
- * Parameters:
- * In:
- * hDeviceContext - device structure point
- * Out:
- * none
- *
- * Return Value: none.
- *
- -*/
-bool
-CARDbChannelSwitch(
- struct vnt_private *pDevice,
- unsigned char byMode,
- unsigned char byNewChannel,
- unsigned char byCount
-)
-{
- bool bResult = true;
-
- if (byCount == 0) {
- bResult = set_channel(pDevice, byNewChannel);
- VNTWIFIbChannelSwitch(pDevice->pMgmt, byNewChannel);
- MACvSelectPage1(pDevice->PortOffset);
- MACvRegBitsOn(pDevice->PortOffset, MAC_REG_MSRCTL+1, MSRCTL1_TXPAUSE);
- MACvSelectPage0(pDevice->PortOffset);
- return bResult;
- }
- pDevice->byChannelSwitchCount = byCount;
- pDevice->byNewChannel = byNewChannel;
- pDevice->bChannelSwitch = true;
- if (byMode == 1)
- bResult = CARDbStopTxPacket(pDevice, PKT_TYPE_802_11_ALL);
-
- return bResult;
-}
-
-/*
- *
- * Description:
- * Handle Quiet EID defined in 802.11h
- *
- * Parameters:
- * In:
- * hDeviceContext - device structure point
- * Out:
- * none
- *
- * Return Value: none.
- *
- -*/
-bool
-CARDbSetQuiet(
- struct vnt_private *pDevice,
- bool bResetQuiet,
- unsigned char byQuietCount,
- unsigned char byQuietPeriod,
- unsigned short wQuietDuration,
- unsigned short wQuietOffset
-)
-{
- unsigned int ii = 0;
-
- if (bResetQuiet) {
- MACvRegBitsOff(pDevice->PortOffset, MAC_REG_MSRCTL, (MSRCTL_QUIETTXCHK | MSRCTL_QUIETEN));
- for (ii = 0; ii < MAX_QUIET_COUNT; ii++)
- pDevice->sQuiet[ii].bEnable = false;
-
- pDevice->uQuietEnqueue = 0;
- pDevice->bEnableFirstQuiet = false;
- pDevice->bQuietEnable = false;
- pDevice->byQuietStartCount = byQuietCount;
- }
- if (pDevice->sQuiet[pDevice->uQuietEnqueue].bEnable == false) {
- pDevice->sQuiet[pDevice->uQuietEnqueue].bEnable = true;
- pDevice->sQuiet[pDevice->uQuietEnqueue].byPeriod = byQuietPeriod;
- pDevice->sQuiet[pDevice->uQuietEnqueue].wDuration = wQuietDuration;
- pDevice->sQuiet[pDevice->uQuietEnqueue].dwStartTime = (unsigned long) byQuietCount;
- pDevice->sQuiet[pDevice->uQuietEnqueue].dwStartTime *= pDevice->wBeaconInterval;
- pDevice->sQuiet[pDevice->uQuietEnqueue].dwStartTime += wQuietOffset;
- pDevice->uQuietEnqueue++;
- pDevice->uQuietEnqueue %= MAX_QUIET_COUNT;
- if (pDevice->byQuietStartCount < byQuietCount)
- pDevice->byQuietStartCount = byQuietCount;
- }
- return true;
-}
-
-/*
- *
- * Description:
- * Do Quiet, It will be called by either ISR(after start)
- * or VNTWIFI(before start) so we do not need a SPINLOCK
- *
- * Parameters:
- * In:
- * hDeviceContext - device structure point
- * Out:
- * none
- *
- * Return Value: none.
- *
- -*/
-bool
-CARDbStartQuiet(
- struct vnt_private *pDevice
-)
-{
- unsigned int ii = 0;
- unsigned long dwStartTime = 0xFFFFFFFF;
- unsigned int uCurrentQuietIndex = 0;
- unsigned long dwNextTime = 0;
- unsigned long dwGap = 0;
- unsigned long dwDuration = 0;
-
- for (ii = 0; ii < MAX_QUIET_COUNT; ii++) {
- if ((pDevice->sQuiet[ii].bEnable == true) &&
- (dwStartTime > pDevice->sQuiet[ii].dwStartTime)) {
- dwStartTime = pDevice->sQuiet[ii].dwStartTime;
- uCurrentQuietIndex = ii;
- }
- }
- if (dwStartTime == 0xFFFFFFFF) {
- // no more quiet
- pDevice->bQuietEnable = false;
- MACvRegBitsOff(pDevice->PortOffset, MAC_REG_MSRCTL, (MSRCTL_QUIETTXCHK | MSRCTL_QUIETEN));
- } else {
- if (pDevice->bQuietEnable == false) {
- // first quiet
- pDevice->byQuietStartCount--;
- dwNextTime = pDevice->sQuiet[uCurrentQuietIndex].dwStartTime;
- dwNextTime %= pDevice->wBeaconInterval;
- MACvSelectPage1(pDevice->PortOffset);
- VNSvOutPortW(pDevice->PortOffset + MAC_REG_QUIETINIT, (unsigned short) dwNextTime);
- VNSvOutPortW(pDevice->PortOffset + MAC_REG_QUIETDUR, (unsigned short) pDevice->sQuiet[uCurrentQuietIndex].wDuration);
- if (pDevice->byQuietStartCount == 0) {
- pDevice->bEnableFirstQuiet = false;
- MACvRegBitsOn(pDevice->PortOffset, MAC_REG_MSRCTL, (MSRCTL_QUIETTXCHK | MSRCTL_QUIETEN));
- } else {
- pDevice->bEnableFirstQuiet = true;
- }
- MACvSelectPage0(pDevice->PortOffset);
- } else {
- if (pDevice->dwCurrentQuietEndTime > pDevice->sQuiet[uCurrentQuietIndex].dwStartTime) {
- // overlap with previous Quiet
- dwGap = pDevice->dwCurrentQuietEndTime - pDevice->sQuiet[uCurrentQuietIndex].dwStartTime;
- if (dwGap >= pDevice->sQuiet[uCurrentQuietIndex].wDuration) {
- // return false to indicate next quiet expired, should call this function again
- return false;
- }
- dwDuration = pDevice->sQuiet[uCurrentQuietIndex].wDuration - dwGap;
- dwGap = 0;
- } else {
- dwGap = pDevice->sQuiet[uCurrentQuietIndex].dwStartTime - pDevice->dwCurrentQuietEndTime;
- dwDuration = pDevice->sQuiet[uCurrentQuietIndex].wDuration;
- }
- // set GAP and Next duration
- MACvSelectPage1(pDevice->PortOffset);
- VNSvOutPortW(pDevice->PortOffset + MAC_REG_QUIETGAP, (unsigned short) dwGap);
- VNSvOutPortW(pDevice->PortOffset + MAC_REG_QUIETDUR, (unsigned short) dwDuration);
- MACvRegBitsOn(pDevice->PortOffset, MAC_REG_MSRCTL, MSRCTL_QUIETRPT);
- MACvSelectPage0(pDevice->PortOffset);
- }
- pDevice->bQuietEnable = true;
- pDevice->dwCurrentQuietEndTime = pDevice->sQuiet[uCurrentQuietIndex].dwStartTime;
- pDevice->dwCurrentQuietEndTime += pDevice->sQuiet[uCurrentQuietIndex].wDuration;
- if (pDevice->sQuiet[uCurrentQuietIndex].byPeriod == 0) {
- // not period disable current quiet element
- pDevice->sQuiet[uCurrentQuietIndex].bEnable = false;
- } else {
- // set next period start time
- dwNextTime = (unsigned long) pDevice->sQuiet[uCurrentQuietIndex].byPeriod;
- dwNextTime *= pDevice->wBeaconInterval;
- pDevice->sQuiet[uCurrentQuietIndex].dwStartTime = dwNextTime;
- }
- if (pDevice->dwCurrentQuietEndTime > 0x80010000) {
- // decreament all time to avoid wrap around
- for (ii = 0; ii < MAX_QUIET_COUNT; ii++) {
- if (pDevice->sQuiet[ii].bEnable == true)
- pDevice->sQuiet[ii].dwStartTime -= 0x80000000;
-
- }
- pDevice->dwCurrentQuietEndTime -= 0x80000000;
- }
- }
- return true;
-}
-
-/*
- *
- * Description:
- * Set Local Power Constraint
- *
- * Parameters:
- * In:
- * hDeviceContext - device structure point
- * Out:
- * none
- *
- * Return Value: none.
- *
- -*/
-void
-CARDvSetPowerConstraint(
- struct vnt_private *pDevice,
- unsigned char byChannel,
- char byPower
-)
-{
-
- if (byChannel > CB_MAX_CHANNEL_24G) {
- if (pDevice->bCountryInfo5G == true)
- pDevice->abyLocalPwr[byChannel] = pDevice->abyRegPwr[byChannel] - byPower;
-
- } else {
- if (pDevice->bCountryInfo24G == true)
- pDevice->abyLocalPwr[byChannel] = pDevice->abyRegPwr[byChannel] - byPower;
-
- }
-}
-
-/*
- *
- * Description:
- * Set Local Power Constraint
- *
- * Parameters:
- * In:
- * hDeviceContext - device structure point
- * Out:
- * none
- *
- * Return Value: none.
- *
- -*/
-void
-CARDvGetPowerCapability(
- struct vnt_private *pDevice,
- unsigned char *pbyMinPower,
- unsigned char *pbyMaxPower
-)
-{
- unsigned char byDec = 0;
-
- *pbyMaxPower = pDevice->abyOFDMDefaultPwr[pDevice->byCurrentCh];
- byDec = pDevice->abyOFDMPwrTbl[pDevice->byCurrentCh];
- if (pDevice->byRFType == RF_UW2452) {
- byDec *= 3;
- byDec >>= 1;
- } else {
- byDec <<= 1;
- }
- *pbyMinPower = pDevice->abyOFDMDefaultPwr[pDevice->byCurrentCh] - byDec;
-}
-
-/*
- *
- * Description:
- * Get Current Tx Power
- *
- * Parameters:
- * In:
- * hDeviceContext - device structure point
- * Out:
- * none
- *
- * Return Value: none.
- *
- */
-char
-CARDbyGetTransmitPower(
- struct vnt_private *pDevice
-)
-{
-
- return pDevice->byCurPwrdBm;
-}
-
-//xxx
void
CARDvSafeResetTx(
struct vnt_private *pDevice
@@ -1422,7 +512,7 @@ CARDvSafeResetTx(
unsigned int uu;
PSTxDesc pCurrTD;
- // initialize TD index
+ /* initialize TD index */
pDevice->apTailTD[0] = pDevice->apCurrTD[0] = &(pDevice->apTD0Rings[0]);
pDevice->apTailTD[1] = pDevice->apCurrTD[1] = &(pDevice->apTD1Rings[0]);
@@ -1432,28 +522,27 @@ CARDvSafeResetTx(
for (uu = 0; uu < pDevice->sOpts.nTxDescs[0]; uu++) {
pCurrTD = &(pDevice->apTD0Rings[uu]);
pCurrTD->m_td0TD0.f1Owner = OWNED_BY_HOST;
- // init all Tx Packet pointer to NULL
+ /* init all Tx Packet pointer to NULL */
}
for (uu = 0; uu < pDevice->sOpts.nTxDescs[1]; uu++) {
pCurrTD = &(pDevice->apTD1Rings[uu]);
pCurrTD->m_td0TD0.f1Owner = OWNED_BY_HOST;
- // init all Tx Packet pointer to NULL
+ /* init all Tx Packet pointer to NULL */
}
- // set MAC TD pointer
+ /* set MAC TD pointer */
MACvSetCurrTXDescAddr(TYPE_TXDMA0, pDevice->PortOffset,
(pDevice->td0_pool_dma));
MACvSetCurrTXDescAddr(TYPE_AC0DMA, pDevice->PortOffset,
(pDevice->td1_pool_dma));
- // set MAC Beacon TX pointer
+ /* set MAC Beacon TX pointer */
MACvSetCurrBCNTxDescAddr(pDevice->PortOffset,
(pDevice->tx_beacon_dma));
}
-/*+
- *
+/*
* Description:
* Reset Rx
*
@@ -1464,8 +553,7 @@ CARDvSafeResetTx(
* none
*
* Return Value: none
- *
- -*/
+ */
void
CARDvSafeResetRx(
struct vnt_private *pDevice
@@ -1474,11 +562,11 @@ CARDvSafeResetRx(
unsigned int uu;
PSRxDesc pDesc;
- // initialize RD index
+ /* initialize RD index */
pDevice->pCurrRD[0] = &(pDevice->aRD0Ring[0]);
pDevice->pCurrRD[1] = &(pDevice->aRD1Ring[0]);
- // init state, all RD is chip's
+ /* init state, all RD is chip's */
for (uu = 0; uu < pDevice->sOpts.nRxDescs0; uu++) {
pDesc = &(pDevice->aRD0Ring[uu]);
pDesc->m_rd0RD0.wResCount = (unsigned short)(pDevice->rx_buf_sz);
@@ -1486,7 +574,7 @@ CARDvSafeResetRx(
pDesc->m_rd1RD1.wReqCount = (unsigned short)(pDevice->rx_buf_sz);
}
- // init state, all RD is chip's
+ /* init state, all RD is chip's */
for (uu = 0; uu < pDevice->sOpts.nRxDescs1; uu++) {
pDesc = &(pDevice->aRD1Ring[uu]);
pDesc->m_rd0RD0.wResCount = (unsigned short)(pDevice->rx_buf_sz);
@@ -1494,13 +582,10 @@ CARDvSafeResetRx(
pDesc->m_rd1RD1.wReqCount = (unsigned short)(pDevice->rx_buf_sz);
}
- pDevice->cbDFCB = CB_MAX_RX_FRAG;
- pDevice->cbFreeDFCB = pDevice->cbDFCB;
-
- // set perPkt mode
+ /* set perPkt mode */
MACvRx0PerPktMode(pDevice->PortOffset);
MACvRx1PerPktMode(pDevice->PortOffset);
- // set MAC RD pointer
+ /* set MAC RD pointer */
MACvSetCurrRx0DescAddr(pDevice->PortOffset,
pDevice->rd0_pool_dma);
@@ -1519,7 +604,6 @@ CARDvSafeResetRx(
* none
*
* Return Value: response Control frame rate
- *
*/
static unsigned short CARDwGetCCKControlRate(struct vnt_private *pDevice,
unsigned short wRateIdx)
@@ -1527,7 +611,7 @@ static unsigned short CARDwGetCCKControlRate(struct vnt_private *pDevice,
unsigned int ui = (unsigned int) wRateIdx;
while (ui > RATE_1M) {
- if (pDevice->wBasicRate & ((unsigned short)1 << ui))
+ if (pDevice->basic_rates & ((u32)0x1 << ui))
return (unsigned short)ui;
ui--;
@@ -1546,14 +630,13 @@ static unsigned short CARDwGetCCKControlRate(struct vnt_private *pDevice,
* none
*
* Return Value: response Control frame rate
- *
*/
static unsigned short CARDwGetOFDMControlRate(struct vnt_private *pDevice,
unsigned short wRateIdx)
{
unsigned int ui = (unsigned int) wRateIdx;
- pr_debug("BASIC RATE: %X\n", pDevice->wBasicRate);
+ pr_debug("BASIC RATE: %X\n", pDevice->basic_rates);
if (!CARDbIsOFDMinBasicRate((void *)pDevice)) {
pr_debug("CARDwGetOFDMControlRate:(NO OFDM) %d\n", wRateIdx);
@@ -1562,7 +645,7 @@ static unsigned short CARDwGetOFDMControlRate(struct vnt_private *pDevice,
return wRateIdx;
}
while (ui > RATE_11M) {
- if (pDevice->wBasicRate & ((unsigned short)1 << ui)) {
+ if (pDevice->basic_rates & ((u32)0x1 << ui)) {
pr_debug("CARDwGetOFDMControlRate : %d\n", ui);
return (unsigned short)ui;
}
@@ -1582,14 +665,13 @@ static unsigned short CARDwGetOFDMControlRate(struct vnt_private *pDevice,
* none
*
* Return Value: None.
- *
*/
-void CARDvSetRSPINF(struct vnt_private *pDevice, CARD_PHY_TYPE ePHYType)
+void CARDvSetRSPINF(struct vnt_private *pDevice, u8 bb_type)
{
union vnt_phy_field_swap phy;
- unsigned char byTxRate, byRsvTime; //For OFDM
+ unsigned char byTxRate, byRsvTime; /* For OFDM */
- //Set to Page1
+ /* Set to Page1 */
MACvSelectPage1(pDevice->PortOffset);
/* RSPINF_b_1 */
@@ -1629,136 +711,72 @@ void CARDvSetRSPINF(struct vnt_private *pDevice, CARD_PHY_TYPE ePHYType)
VNSvOutPortD(pDevice->PortOffset + MAC_REG_RSPINF_B_11, phy.field_write);
- //RSPINF_a_6
+ /* RSPINF_a_6 */
s_vCalculateOFDMRParameter(RATE_6M,
- ePHYType,
+ bb_type,
&byTxRate,
&byRsvTime);
VNSvOutPortW(pDevice->PortOffset + MAC_REG_RSPINF_A_6, MAKEWORD(byTxRate, byRsvTime));
- //RSPINF_a_9
+ /* RSPINF_a_9 */
s_vCalculateOFDMRParameter(RATE_9M,
- ePHYType,
+ bb_type,
&byTxRate,
&byRsvTime);
VNSvOutPortW(pDevice->PortOffset + MAC_REG_RSPINF_A_9, MAKEWORD(byTxRate, byRsvTime));
- //RSPINF_a_12
+ /* RSPINF_a_12 */
s_vCalculateOFDMRParameter(RATE_12M,
- ePHYType,
+ bb_type,
&byTxRate,
&byRsvTime);
VNSvOutPortW(pDevice->PortOffset + MAC_REG_RSPINF_A_12, MAKEWORD(byTxRate, byRsvTime));
- //RSPINF_a_18
+ /* RSPINF_a_18 */
s_vCalculateOFDMRParameter(RATE_18M,
- ePHYType,
+ bb_type,
&byTxRate,
&byRsvTime);
VNSvOutPortW(pDevice->PortOffset + MAC_REG_RSPINF_A_18, MAKEWORD(byTxRate, byRsvTime));
- //RSPINF_a_24
+ /* RSPINF_a_24 */
s_vCalculateOFDMRParameter(RATE_24M,
- ePHYType,
+ bb_type,
&byTxRate,
&byRsvTime);
VNSvOutPortW(pDevice->PortOffset + MAC_REG_RSPINF_A_24, MAKEWORD(byTxRate, byRsvTime));
- //RSPINF_a_36
+ /* RSPINF_a_36 */
s_vCalculateOFDMRParameter(CARDwGetOFDMControlRate((void *)pDevice, RATE_36M),
- ePHYType,
+ bb_type,
&byTxRate,
&byRsvTime);
VNSvOutPortW(pDevice->PortOffset + MAC_REG_RSPINF_A_36, MAKEWORD(byTxRate, byRsvTime));
- //RSPINF_a_48
+ /* RSPINF_a_48 */
s_vCalculateOFDMRParameter(CARDwGetOFDMControlRate((void *)pDevice, RATE_48M),
- ePHYType,
+ bb_type,
&byTxRate,
&byRsvTime);
VNSvOutPortW(pDevice->PortOffset + MAC_REG_RSPINF_A_48, MAKEWORD(byTxRate, byRsvTime));
- //RSPINF_a_54
+ /* RSPINF_a_54 */
s_vCalculateOFDMRParameter(CARDwGetOFDMControlRate((void *)pDevice, RATE_54M),
- ePHYType,
+ bb_type,
&byTxRate,
&byRsvTime);
VNSvOutPortW(pDevice->PortOffset + MAC_REG_RSPINF_A_54, MAKEWORD(byTxRate, byRsvTime));
-
- //RSPINF_a_72
+ /* RSPINF_a_72 */
s_vCalculateOFDMRParameter(CARDwGetOFDMControlRate((void *)pDevice, RATE_54M),
- ePHYType,
+ bb_type,
&byTxRate,
&byRsvTime);
VNSvOutPortW(pDevice->PortOffset + MAC_REG_RSPINF_A_72, MAKEWORD(byTxRate, byRsvTime));
- //Set to Page0
+ /* Set to Page0 */
MACvSelectPage0(pDevice->PortOffset);
}
-/*
- * Description: Update IFS
- *
- * Parameters:
- * In:
- * pDevice - The adapter to be set
- * Out:
- * none
- *
- * Return Value: None.
- *
- */
-void vUpdateIFS(struct vnt_private *pDevice)
-{
- /* Set SIFS, DIFS, EIFS, SlotTime, CwMin */
-
- unsigned char byMaxMin = 0;
-
- if (pDevice->byPacketType == PK_TYPE_11A) {//0000 0000 0000 0000,11a
- pDevice->uSlot = C_SLOT_SHORT;
- pDevice->uSIFS = C_SIFS_A;
- pDevice->uDIFS = C_SIFS_A + 2*C_SLOT_SHORT;
- pDevice->uCwMin = C_CWMIN_A;
- byMaxMin = 4;
- } else if (pDevice->byPacketType == PK_TYPE_11B) {//0000 0001 0000 0000,11b
- pDevice->uSlot = C_SLOT_LONG;
- pDevice->uSIFS = C_SIFS_BG;
- pDevice->uDIFS = C_SIFS_BG + 2*C_SLOT_LONG;
- pDevice->uCwMin = C_CWMIN_B;
- byMaxMin = 5;
- } else { // PK_TYPE_11GA & PK_TYPE_11GB
- pDevice->uSIFS = C_SIFS_BG;
- if (pDevice->bShortSlotTime)
- pDevice->uSlot = C_SLOT_SHORT;
- else
- pDevice->uSlot = C_SLOT_LONG;
-
- pDevice->uDIFS = C_SIFS_BG + 2*pDevice->uSlot;
- if (pDevice->wBasicRate & 0x0150) { //0000 0001 0101 0000,24M,12M,6M
- pDevice->uCwMin = C_CWMIN_A;
- byMaxMin = 4;
- } else {
- pDevice->uCwMin = C_CWMIN_B;
- byMaxMin = 5;
- }
- }
-
- pDevice->uCwMax = C_CWMAX;
- pDevice->uEIFS = C_EIFS;
- if (pDevice->byRFType == RF_RFMD2959) {
- // bcs TX_PE will reserve 3 us
- VNSvOutPortB(pDevice->PortOffset + MAC_REG_SIFS, (unsigned char)(pDevice->uSIFS - 3));
- VNSvOutPortB(pDevice->PortOffset + MAC_REG_DIFS, (unsigned char)(pDevice->uDIFS - 3));
- } else {
- VNSvOutPortB(pDevice->PortOffset + MAC_REG_SIFS, (unsigned char)pDevice->uSIFS);
- VNSvOutPortB(pDevice->PortOffset + MAC_REG_DIFS, (unsigned char)pDevice->uDIFS);
- }
- VNSvOutPortB(pDevice->PortOffset + MAC_REG_EIFS, (unsigned char)pDevice->uEIFS);
- VNSvOutPortB(pDevice->PortOffset + MAC_REG_SLOT, (unsigned char)pDevice->uSlot);
- byMaxMin |= 0xA0;//1010 1111,C_CWMAX = 1023
- VNSvOutPortB(pDevice->PortOffset + MAC_REG_CWMAXMIN0, (unsigned char)byMaxMin);
-}
-
void CARDvUpdateBasicTopRate(struct vnt_private *pDevice)
{
unsigned char byTopOFDM = RATE_24M, byTopCCK = RATE_1M;
unsigned char ii;
- //Determines the highest basic rate.
+ /* Determines the highest basic rate. */
for (ii = RATE_54M; ii >= RATE_6M; ii--) {
- if ((pDevice->wBasicRate) & ((unsigned short)(1<<ii))) {
+ if ((pDevice->basic_rates) & ((u32)(1 << ii))) {
byTopOFDM = ii;
break;
}
@@ -1766,7 +784,7 @@ void CARDvUpdateBasicTopRate(struct vnt_private *pDevice)
pDevice->byTopOFDMBasicRate = byTopOFDM;
for (ii = RATE_11M;; ii--) {
- if ((pDevice->wBasicRate) & ((unsigned short)(1<<ii))) {
+ if ((pDevice->basic_rates) & ((u32)(1 << ii))) {
byTopCCK = ii;
break;
}
@@ -1776,24 +794,12 @@ void CARDvUpdateBasicTopRate(struct vnt_private *pDevice)
pDevice->byTopCCKBasicRate = byTopCCK;
}
-bool CARDbAddBasicRate(struct vnt_private *pDevice, unsigned short wRateIdx)
-{
- unsigned short wRate = (unsigned short)(1<<wRateIdx);
-
- pDevice->wBasicRate |= wRate;
-
- //Determines the highest basic rate.
- CARDvUpdateBasicTopRate((void *)pDevice);
-
- return true;
-}
-
bool CARDbIsOFDMinBasicRate(struct vnt_private *pDevice)
{
int ii;
for (ii = RATE_54M; ii >= RATE_6M; ii--) {
- if ((pDevice->wBasicRate) & ((unsigned short)(1 << ii)))
+ if ((pDevice->basic_rates) & ((u32)(1 << ii)))
return true;
}
return false;
@@ -1821,10 +827,11 @@ unsigned char CARDbyGetPktType(struct vnt_private *pDevice)
* none
*
* Return Value: none
- *
*/
-void CARDvSetLoopbackMode(void __iomem *dwIoBase, unsigned short wLoopbackMode)
+void CARDvSetLoopbackMode(struct vnt_private *priv, unsigned short wLoopbackMode)
{
+ void __iomem *dwIoBase = priv->PortOffset;
+
switch (wLoopbackMode) {
case CARD_LB_NONE:
case CARD_LB_MAC:
@@ -1834,9 +841,9 @@ void CARDvSetLoopbackMode(void __iomem *dwIoBase, unsigned short wLoopbackMode)
ASSERT(false);
break;
}
- // set MAC loopback
+ /* set MAC loopback */
MACvSetLoopbackMode(dwIoBase, LOBYTE(wLoopbackMode));
- // set Baseband loopback
+ /* set Baseband loopback */
}
/*
@@ -1849,12 +856,11 @@ void CARDvSetLoopbackMode(void __iomem *dwIoBase, unsigned short wLoopbackMode)
* none
*
* Return Value: none
- *
*/
bool CARDbSoftwareReset(struct vnt_private *pDevice)
{
- // reset MAC
+ /* reset MAC */
if (!MACbSafeSoftwareReset(pDevice->PortOffset))
return false;
@@ -1874,7 +880,6 @@ bool CARDbSoftwareReset(struct vnt_private *pDevice)
* none
*
* Return Value: TSF Offset value
- *
*/
u64 CARDqGetTSFOffset(unsigned char byRxRate, u64 qwTSF1, u64 qwTSF2)
{
@@ -1901,10 +906,10 @@ u64 CARDqGetTSFOffset(unsigned char byRxRate, u64 qwTSF1, u64 qwTSF2)
* qwCurrTSF - Current TSF counter
*
* Return Value: true if success; otherwise false
- *
*/
-bool CARDbGetCurrentTSF(void __iomem *dwIoBase, u64 *pqwCurrTSF)
+bool CARDbGetCurrentTSF(struct vnt_private *priv, u64 *pqwCurrTSF)
{
+ void __iomem *dwIoBase = priv->PortOffset;
unsigned short ww;
unsigned char byData;
@@ -1934,17 +939,12 @@ bool CARDbGetCurrentTSF(void __iomem *dwIoBase, u64 *pqwCurrTSF)
* qwCurrTSF - Current TSF counter
*
* Return Value: TSF value of next Beacon
- *
*/
u64 CARDqGetNextTBTT(u64 qwTSF, unsigned short wBeaconInterval)
{
u32 beacon_int;
beacon_int = wBeaconInterval * 1024;
-
- /* Next TBTT =
- * ((local_current_TSF / beacon_interval) + 1) * beacon_interval
- */
if (beacon_int) {
do_div(qwTSF, beacon_int);
qwTSF += 1;
@@ -1966,16 +966,16 @@ u64 CARDqGetNextTBTT(u64 qwTSF, unsigned short wBeaconInterval)
* none
*
* Return Value: none
- *
*/
-void CARDvSetFirstNextTBTT(void __iomem *dwIoBase, unsigned short wBeaconInterval)
+void CARDvSetFirstNextTBTT(struct vnt_private *priv, unsigned short wBeaconInterval)
{
+ void __iomem *dwIoBase = priv->PortOffset;
u64 qwNextTBTT = 0;
- CARDbGetCurrentTSF(dwIoBase, &qwNextTBTT); //Get Local TSF counter
+ CARDbGetCurrentTSF(priv, &qwNextTBTT); /* Get Local TSF counter */
qwNextTBTT = CARDqGetNextTBTT(qwNextTBTT, wBeaconInterval);
- // Set NextTBTT
+ /* Set NextTBTT */
VNSvOutPortD(dwIoBase + MAC_REG_NEXTTBTT, (u32)qwNextTBTT);
VNSvOutPortD(dwIoBase + MAC_REG_NEXTTBTT + 4, (u32)(qwNextTBTT >> 32));
MACvRegBitsOn(dwIoBase, MAC_REG_TFTCTL, TFTCTL_TBTTSYNCEN);
@@ -1994,12 +994,13 @@ void CARDvSetFirstNextTBTT(void __iomem *dwIoBase, unsigned short wBeaconInterva
* none
*
* Return Value: none
- *
*/
-void CARDvUpdateNextTBTT(void __iomem *dwIoBase, u64 qwTSF, unsigned short wBeaconInterval)
+void CARDvUpdateNextTBTT(struct vnt_private *priv, u64 qwTSF, unsigned short wBeaconInterval)
{
+ void __iomem *dwIoBase = priv->PortOffset;
+
qwTSF = CARDqGetNextTBTT(qwTSF, wBeaconInterval);
- // Set NextTBTT
+ /* Set NextTBTT */
VNSvOutPortD(dwIoBase + MAC_REG_NEXTTBTT, (u32)qwTSF);
VNSvOutPortD(dwIoBase + MAC_REG_NEXTTBTT + 4, (u32)(qwTSF >> 32));
MACvRegBitsOn(dwIoBase, MAC_REG_TFTCTL, TFTCTL_TBTTSYNCEN);
diff --git a/drivers/staging/vt6655/card.h b/drivers/staging/vt6655/card.h
index 96f5b6c46e8..2dfc4195227 100644
--- a/drivers/staging/vt6655/card.h
+++ b/drivers/staging/vt6655/card.h
@@ -29,35 +29,28 @@
#ifndef __CARD_H__
#define __CARD_H__
-#include "ttype.h"
#include <linux/types.h>
#include <linux/nl80211.h>
-//
-// Loopback mode
-//
-// LOBYTE is MAC LB mode, HIBYTE is MII LB mode
+/*
+ * Loopback mode
+ *
+ * LOBYTE is MAC LB mode, HIBYTE is MII LB mode
+ */
#define CARD_LB_NONE MAKEWORD(MAC_LB_NONE, 0)
-#define CARD_LB_MAC MAKEWORD(MAC_LB_INTERNAL, 0) // PHY must ISO, avoid MAC loopback packet go out
+#define CARD_LB_MAC MAKEWORD(MAC_LB_INTERNAL, 0) /* PHY must ISO, avoid MAC loopback packet go out */
#define CARD_LB_PHY MAKEWORD(MAC_LB_EXT, 0)
-#define DEFAULT_MSDU_LIFETIME 512 // ms
-#define DEFAULT_MSDU_LIFETIME_RES_64us 8000 // 64us
+#define DEFAULT_MSDU_LIFETIME 512 /* ms */
+#define DEFAULT_MSDU_LIFETIME_RES_64us 8000 /* 64us */
-#define DEFAULT_MGN_LIFETIME 8 // ms
-#define DEFAULT_MGN_LIFETIME_RES_64us 125 // 64us
+#define DEFAULT_MGN_LIFETIME 8 /* ms */
+#define DEFAULT_MGN_LIFETIME_RES_64us 125 /* 64us */
#define CB_MAX_CHANNEL_24G 14
#define CB_MAX_CHANNEL_5G 42
#define CB_MAX_CHANNEL (CB_MAX_CHANNEL_24G+CB_MAX_CHANNEL_5G)
-typedef enum _CARD_PHY_TYPE {
- PHY_TYPE_AUTO,
- PHY_TYPE_11B,
- PHY_TYPE_11G,
- PHY_TYPE_11A
-} CARD_PHY_TYPE, *PCARD_PHY_TYPE;
-
typedef enum _CARD_PKT_TYPE {
PKT_TYPE_802_11_BCN,
PKT_TYPE_802_11_MNG,
@@ -73,103 +66,24 @@ typedef enum _CARD_STATUS_TYPE {
struct vnt_private;
-void CARDvSetRSPINF(struct vnt_private *, CARD_PHY_TYPE ePHYType);
-void vUpdateIFS(struct vnt_private *);
+void CARDvSetRSPINF(struct vnt_private *, u8);
void CARDvUpdateBasicTopRate(struct vnt_private *);
-bool CARDbAddBasicRate(struct vnt_private *, unsigned short wRateIdx);
bool CARDbIsOFDMinBasicRate(struct vnt_private *);
-void CARDvSetLoopbackMode(void __iomem *dwIoBase, unsigned short wLoopbackMode);
+void CARDvSetLoopbackMode(struct vnt_private *, unsigned short wLoopbackMode);
bool CARDbSoftwareReset(struct vnt_private *);
-void CARDvSetFirstNextTBTT(void __iomem *dwIoBase, unsigned short wBeaconInterval);
-void CARDvUpdateNextTBTT(void __iomem *dwIoBase, u64 qwTSF, unsigned short wBeaconInterval);
-bool CARDbGetCurrentTSF(void __iomem *dwIoBase, u64 *pqwCurrTSF);
+void CARDvSetFirstNextTBTT(struct vnt_private *, unsigned short wBeaconInterval);
+void CARDvUpdateNextTBTT(struct vnt_private *, u64 qwTSF, unsigned short wBeaconInterval);
+bool CARDbGetCurrentTSF(struct vnt_private *, u64 *pqwCurrTSF);
u64 CARDqGetNextTBTT(u64 qwTSF, unsigned short wBeaconInterval);
u64 CARDqGetTSFOffset(unsigned char byRxRate, u64 qwTSF1, u64 qwTSF2);
-bool CARDbSetTxPower(struct vnt_private *, unsigned long ulTxPower);
unsigned char CARDbyGetPktType(struct vnt_private *);
void CARDvSafeResetTx(struct vnt_private *);
void CARDvSafeResetRx(struct vnt_private *);
bool CARDbRadioPowerOff(struct vnt_private *);
bool CARDbRadioPowerOn(struct vnt_private *);
-bool CARDbIsShortPreamble(struct vnt_private *);
-bool CARDbIsShorSlotTime(struct vnt_private *);
-bool CARDbSetPhyParameter(struct vnt_private *, CARD_PHY_TYPE ePHYType,
- unsigned short wCapInfo, unsigned char byERPField,
- void *pvSupportRateIEs, void *pvExtSupportRateIEs);
+bool CARDbSetPhyParameter(struct vnt_private *, u8);
bool CARDbUpdateTSF(struct vnt_private *, unsigned char byRxRate,
u64 qwBSSTimestamp, u64 qwLocalTSF);
-bool CARDbStopTxPacket(struct vnt_private *, CARD_PKT_TYPE ePktType);
-bool CARDbStartTxPacket(struct vnt_private *, CARD_PKT_TYPE ePktType);
bool CARDbSetBeaconPeriod(struct vnt_private *, unsigned short wBeaconInterval);
-bool CARDbSetBSSID(struct vnt_private *,
- unsigned char *pbyBSSID, enum nl80211_iftype);
-
-bool CARDbPowerDown(struct vnt_private *);
-
-bool CARDbSetTxDataRate(struct vnt_private *, unsigned short wDataRate);
-
-bool CARDbRemoveKey(struct vnt_private *, unsigned char *pbyBSSID);
-
-bool
-CARDbAdd_PMKID_Candidate(
- struct vnt_private *,
- unsigned char *pbyBSSID,
- bool bRSNCapExist,
- unsigned short wRSNCap
-);
-
-void *
-CARDpGetCurrentAddress(
- struct vnt_private *
-);
-
-bool
-CARDbStartMeasure(
- struct vnt_private *,
- void *pvMeasureEIDs,
- unsigned int uNumOfMeasureEIDs
-);
-
-bool
-CARDbChannelSwitch(
- struct vnt_private *,
- unsigned char byMode,
- unsigned char byNewChannel,
- unsigned char byCount
-);
-
-bool
-CARDbSetQuiet(
- struct vnt_private *,
- bool bResetQuiet,
- unsigned char byQuietCount,
- unsigned char byQuietPeriod,
- unsigned short wQuietDuration,
- unsigned short wQuietOffset
-);
-
-bool
-CARDbStartQuiet(
- struct vnt_private *
-);
-
-void
-CARDvSetPowerConstraint(
- struct vnt_private *,
- unsigned char byChannel,
- char byPower
-);
-
-void
-CARDvGetPowerCapability(
- struct vnt_private *,
- unsigned char *pbyMinPower,
- unsigned char *pbyMaxPower
-);
-
-char
-CARDbyGetTransmitPower(
- struct vnt_private *
-);
-#endif // __CARD_H__
+#endif /* __CARD_H__ */
diff --git a/drivers/staging/vt6655/channel.c b/drivers/staging/vt6655/channel.c
index 4ce964ba14b..c8f739dd346 100644
--- a/drivers/staging/vt6655/channel.c
+++ b/drivers/staging/vt6655/channel.c
@@ -21,494 +21,148 @@
*/
#include "baseband.h"
-#include "country.h"
#include "channel.h"
#include "device.h"
#include "rf.h"
-/*--------------------- Static Definitions -------------------------*/
-
-#define CARD_MAX_CHANNEL_TBL 56
-
-/*--------------------- Static Variables --------------------------*/
-
-static SChannelTblElement sChannelTbl[CARD_MAX_CHANNEL_TBL + 1] =
-{
- {0, 0, false, 0},
- {1, 2412, true, 0},
- {2, 2417, true, 0},
- {3, 2422, true, 0},
- {4, 2427, true, 0},
- {5, 2432, true, 0},
- {6, 2437, true, 0},
- {7, 2442, true, 0},
- {8, 2447, true, 0},
- {9, 2452, true, 0},
- {10, 2457, true, 0},
- {11, 2462, true, 0},
- {12, 2467, true, 0},
- {13, 2472, true, 0},
- {14, 2484, true, 0},
- {183, 4915, true, 0},
- {184, 4920, true, 0},
- {185, 4925, true, 0},
- {187, 4935, true, 0},
- {188, 4940, true, 0},
- {189, 4945, true, 0},
- {192, 4960, true, 0},
- {196, 4980, true, 0},
- {7, 5035, true, 0},
- {8, 5040, true, 0},
- {9, 5045, true, 0},
- {11, 5055, true, 0},
- {12, 5060, true, 0},
- {16, 5080, true, 0},
- {34, 5170, true, 0},
- {36, 5180, true, 0},
- {38, 5190, true, 0},
- {40, 5200, true, 0},
- {42, 5210, true, 0},
- {44, 5220, true, 0},
- {46, 5230, true, 0},
- {48, 5240, true, 0},
- {52, 5260, true, 0},
- {56, 5280, true, 0},
- {60, 5300, true, 0},
- {64, 5320, true, 0},
- {100, 5500, true, 0},
- {104, 5520, true, 0},
- {108, 5540, true, 0},
- {112, 5560, true, 0},
- {116, 5580, true, 0},
- {120, 5600, true, 0},
- {124, 5620, true, 0},
- {128, 5640, true, 0},
- {132, 5660, true, 0},
- {136, 5680, true, 0},
- {140, 5700, true, 0},
- {149, 5745, true, 0},
- {153, 5765, true, 0},
- {157, 5785, true, 0},
- {161, 5805, true, 0},
- {165, 5825, true, 0}
+static struct ieee80211_rate vnt_rates_bg[] = {
+ { .bitrate = 10, .hw_value = RATE_1M },
+ { .bitrate = 20, .hw_value = RATE_2M },
+ { .bitrate = 55, .hw_value = RATE_5M },
+ { .bitrate = 110, .hw_value = RATE_11M },
+ { .bitrate = 60, .hw_value = RATE_6M },
+ { .bitrate = 90, .hw_value = RATE_9M },
+ { .bitrate = 120, .hw_value = RATE_12M },
+ { .bitrate = 180, .hw_value = RATE_18M },
+ { .bitrate = 240, .hw_value = RATE_24M },
+ { .bitrate = 360, .hw_value = RATE_36M },
+ { .bitrate = 480, .hw_value = RATE_48M },
+ { .bitrate = 540, .hw_value = RATE_54M },
};
-/************************************************************************
- * The Radar regulation rules for each country
- ************************************************************************/
-static struct
-{
- unsigned char byChannelCountryCode; /* The country code */
- char chCountryCode[2];
- unsigned char bChannelIdxList[CB_MAX_CHANNEL]; /* Available channels Index */
- unsigned char byPower[CB_MAX_CHANNEL];
-} ChannelRuleTab[] =
-{
-/************************************************************************
- * This table is based on Athero driver rules
- ************************************************************************/
-/* Country Available channels, ended with 0 */
-/* 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 */
- {CCODE_FCC, {'U' , 'S'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1}
- , { 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 0, 17, 0, 17, 0, 17, 23, 23, 23, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} },
- {CCODE_TELEC, {'J' , 'P'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 23, 0, 0, 23, 0, 23, 23, 0, 23, 0, 0, 23, 23, 23, 0, 23, 0, 23, 0, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_ETSI, {'E' , 'U'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} },
- {CCODE_RESV3, {' ' , ' '}, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_RESV4, {' ' , ' '}, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_RESV5, {' ' , ' '}, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_RESV6, {' ' , ' '}, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_RESV7, {' ' , ' '}, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_RESV8, {' ' , ' '}, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_RESV9, {' ' , ' '}, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_RESVa, {' ' , ' '}, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_RESVb, {' ' , ' '}, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_RESVc, {' ' , ' '}, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_RESVd, {' ' , ' '}, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_RESVe, {' ' , ' '}, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_ALLBAND, {' ' , ' '}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_ALBANIA, {'A' , 'L'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_ALGERIA, {'D' , 'Z'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_ARGENTINA, {'A' , 'R'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 17, 17, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 0} },
- {CCODE_ARMENIA, {'A' , 'M'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 18, 0, 18, 0, 18, 0, 18, 18, 18, 18, 18, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_AUSTRALIA, {'A' , 'U'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 23, 0, 23, 0, 23, 0, 23, 23, 23, 23, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} },
- {CCODE_AUSTRIA, {'A' , 'T'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15, 0, 15, 0, 15, 0, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_AZERBAIJAN, {'A' , 'Z'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 18, 0, 18, 0, 18, 0, 18, 18, 18, 18, 18, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_BAHRAIN, {'B' , 'H'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_BELARUS, {'B' , 'Y'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_BELGIUM, {'B' , 'E'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 18, 0, 18, 0, 18, 0, 18, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_BELIZE, {'B' , 'Z'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1}
- , { 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} },
- {CCODE_BOLIVIA, {'B' , 'O'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1}
- , { 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} },
- {CCODE_BRAZIL, {'B' , 'R'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_BRUNEI_DARUSSALAM, {'B' , 'N'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} },
- {CCODE_BULGARIA, {'B' , 'G'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 23, 0, 23, 0, 23, 0, 23, 23, 23, 0, 0, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 0, 0, 0, 0, 0} },
- {CCODE_CANADA, {'C' , 'A'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1}
- , { 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 0, 17, 0, 17, 0, 17, 23, 23, 23, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} },
- {CCODE_CHILE, {'C' , 'L'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 17, 17, 17, 17} },
- {CCODE_CHINA, {'C' , 'N'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} },
- {CCODE_COLOMBIA, {'C' , 'O'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1}
- , { 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 0, 17, 0, 17, 0, 17, 23, 23, 23, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} },
- {CCODE_COSTA_RICA, {'C' , 'R'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_CROATIA, {'H' , 'R'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_CYPRUS, {'C' , 'Y'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} },
- {CCODE_CZECH, {'C' , 'Z'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_DENMARK, {'D' , 'K'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} },
- {CCODE_DOMINICAN_REPUBLIC, {'D' , 'O'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1}
- , { 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 0, 17, 0, 17, 0, 17, 23, 23, 23, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} },
- {CCODE_ECUADOR, {'E' , 'C'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_EGYPT, {'E' , 'G'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_EL_SALVADOR, {'S' , 'V'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_ESTONIA, {'E' , 'E'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} },
- {CCODE_FINLAND, {'F' , 'I'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} },
- {CCODE_FRANCE, {'F' , 'R'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_GERMANY, {'D' , 'E'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} },
- {CCODE_GREECE, {'G' , 'R'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_GEORGIA, {'G' , 'E'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 18, 0, 18, 0, 18, 0, 18, 18, 18, 18, 18, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_GUATEMALA, {'G' , 'T'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1}
- , { 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 0, 17, 0, 17, 0, 17, 23, 23, 23, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} },
- {CCODE_HONDURAS, {'H' , 'N'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_HONG_KONG, {'H' , 'K'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 23, 0, 23, 0, 23, 0, 23, 23, 23, 23, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} },
- {CCODE_HUNGARY, {'H' , 'U'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 18, 0, 18, 0, 18, 0, 18, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_ICELAND, {'I' , 'S'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} },
- {CCODE_INDIA, {'I' , 'N'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_INDONESIA, {'I' , 'D'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_IRAN, {'I' , 'R'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} },
- {CCODE_IRELAND, {'I' , 'E'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} },
- {CCODE_ITALY, {'I' , 'T'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} },
- {CCODE_ISRAEL, {'I' , 'L'}, { 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_JAPAN, {'J' , 'P'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 23, 0, 23, 0, 23, 0, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_JORDAN, {'J' , 'O'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_KAZAKHSTAN, {'K' , 'Z'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_KUWAIT, {'K' , 'W'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_LATVIA, {'L' , 'V'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_LEBANON, {'L' , 'B'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_LEICHTENSTEIN, {'L' , 'I'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 18, 0, 18, 0, 18, 0, 18, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_LITHUANIA, {'L' , 'T'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} },
- {CCODE_LUXEMBURG, {'L' , 'U'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} },
- {CCODE_MACAU, {'M' , 'O'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 23, 0, 23, 0, 23, 0, 23, 23, 23, 23, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} },
- {CCODE_MACEDONIA, {'M' , 'K'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_MALTA, {'M' , 'T'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0}
- , { 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 0, 16, 0, 16, 0, 16, 16, 16, 16, 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 16, 16, 16, 0} },
- {CCODE_MALAYSIA, {'M' , 'Y'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_MEXICO, {'M' , 'X'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1}
- , { 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 0, 17, 0, 17, 0, 17, 23, 23, 23, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} },
- {CCODE_MONACO, {'M' , 'C'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 18, 0, 18, 0, 18, 0, 18, 18, 18, 18, 18, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_MOROCCO, {'M' , 'A'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_NETHERLANDS, {'N' , 'L'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} },
- {CCODE_NEW_ZEALAND, {'N' , 'Z'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1}
- , { 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 23, 0, 23, 0, 23, 0, 23, 23, 23, 23, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} },
- {CCODE_NORTH_KOREA, {'K' , 'P'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 23, 23, 23, 23, 0} },
- {CCODE_NORWAY, {'N' , 'O'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} },
- {CCODE_OMAN, {'O' , 'M'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_PAKISTAN, {'P' , 'K'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_PANAMA, {'P' , 'A'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1}
- , { 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 0, 17, 0, 17, 0, 17, 23, 23, 23, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} },
- {CCODE_PERU, {'P' , 'E'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_PHILIPPINES, {'P' , 'H'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 0, 17, 0, 17, 0, 17, 23, 23, 23, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} },
- {CCODE_POLAND, {'P' , 'L'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} },
- {CCODE_PORTUGAL, {'P' , 'T'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} },
- {CCODE_PUERTO_RICO, {'P' , 'R'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1}
- , { 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 0, 17, 0, 17, 0, 17, 23, 23, 23, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} },
- {CCODE_QATAR, {'Q' , 'A'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_ROMANIA, {'R' , 'O'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_RUSSIA, {'R' , 'U'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_SAUDI_ARABIA, {'S' , 'A'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_SINGAPORE, {'S' , 'G'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 20, 20, 20, 20} },
- {CCODE_SLOVAKIA, {'S' , 'K'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0}
- , { 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 0, 16, 0, 16, 0, 16, 16, 16, 16, 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 16, 16, 16, 0} },
- {CCODE_SLOVENIA, {'S' , 'I'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} },
- {CCODE_SOUTH_AFRICA, {'Z' , 'A'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} },
- {CCODE_SOUTH_KOREA, {'K' , 'R'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 23, 23, 23, 23, 0} },
- {CCODE_SPAIN, {'E' , 'S'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0}
- , { 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 0, 16, 0, 16, 0, 16, 16, 16, 16, 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 16, 16, 16, 0} },
- {CCODE_SWEDEN, {'S' , 'E'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} },
- {CCODE_SWITZERLAND, {'C' , 'H'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 18, 0, 18, 0, 18, 0, 18, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_SYRIA, {'S' , 'Y'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_TAIWAN, {'T' , 'W'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 17, 17, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 0} },
- {CCODE_THAILAND, {'T' , 'H'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 23, 23, 23, 23, 0} },
- {CCODE_TRINIDAD_TOBAGO, {'T' , 'T'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 18, 0, 18, 0, 18, 0, 18, 18, 18, 18, 18, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_TUNISIA, {'T' , 'N'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_TURKEY, {'T' , 'R'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_UK, {'G' , 'B'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} },
- {CCODE_UKRAINE, {'U' , 'A'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_UNITED_ARAB_EMIRATES, {'A' , 'E'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_UNITED_STATES, {'U' , 'S'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1}
- , { 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 0, 17, 0, 17, 0, 17, 23, 23, 23, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} },
- {CCODE_URUGUAY, {'U' , 'Y'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 23, 23, 23, 23, 0} },
- {CCODE_UZBEKISTAN, {'U' , 'Z'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_VENEZUELA, {'V' , 'E'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 23, 23, 23, 23, 0} },
- {CCODE_VIETNAM, {'V' , 'N'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_YEMEN, {'Y' , 'E'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_ZIMBABWE, {'Z' , 'W'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_JAPAN_W52_W53, {'J' , 'J'}, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_MAX, {'U' , 'N'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }
-/* 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 */
+static struct ieee80211_rate vnt_rates_a[] = {
+ { .bitrate = 60, .hw_value = RATE_6M },
+ { .bitrate = 90, .hw_value = RATE_9M },
+ { .bitrate = 120, .hw_value = RATE_12M },
+ { .bitrate = 180, .hw_value = RATE_18M },
+ { .bitrate = 240, .hw_value = RATE_24M },
+ { .bitrate = 360, .hw_value = RATE_36M },
+ { .bitrate = 480, .hw_value = RATE_48M },
+ { .bitrate = 540, .hw_value = RATE_54M },
};
-/*--------------------- Export Functions --------------------------*/
-
-/**
- * is_channel_valid() - Is Country Channel Valid
- * @ChanneIndex: defined as VT3253 MAC channel:
- * 1 = 2.4G channel 1
- * 2 = 2.4G channel 2
- * ...
- * 14 = 2.4G channel 14
- * 15 = 4.9G channel 183
- * 16 = 4.9G channel 184
- * .....
- * Output: true if the specified 5GHz band is allowed to be used,
- * false otherwise.
- * 4.9G => Ch 183, 184, 185, 187, 188, 189, 192, 196 (Value:15 ~ 22)
- *
- * 5G => Ch 7, 8, 9, 11, 12, 16, 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64,
- * 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 149, 153, 157, 161, 165 (Value 23 ~ 56)
- */
-
-bool is_channel_valid(unsigned int ChannelIndex)
-{
- bool bValid;
-
- bValid = false;
- /*
- * If Channel Index is invalid, return invalid
- */
- if ((ChannelIndex > CB_MAX_CHANNEL) ||
- (ChannelIndex == 0)) {
- bValid = false;
- goto exit;
- }
+static struct ieee80211_channel vnt_channels_2ghz[] = {
+ { .center_freq = 2412, .hw_value = 1 },
+ { .center_freq = 2417, .hw_value = 2 },
+ { .center_freq = 2422, .hw_value = 3 },
+ { .center_freq = 2427, .hw_value = 4 },
+ { .center_freq = 2432, .hw_value = 5 },
+ { .center_freq = 2437, .hw_value = 6 },
+ { .center_freq = 2442, .hw_value = 7 },
+ { .center_freq = 2447, .hw_value = 8 },
+ { .center_freq = 2452, .hw_value = 9 },
+ { .center_freq = 2457, .hw_value = 10 },
+ { .center_freq = 2462, .hw_value = 11 },
+ { .center_freq = 2467, .hw_value = 12 },
+ { .center_freq = 2472, .hw_value = 13 },
+ { .center_freq = 2484, .hw_value = 14 }
+};
- bValid = sChannelTbl[ChannelIndex].bValid;
+static struct ieee80211_channel vnt_channels_5ghz[] = {
+ { .center_freq = 4915, .hw_value = 15 },
+ { .center_freq = 4920, .hw_value = 16 },
+ { .center_freq = 4925, .hw_value = 17 },
+ { .center_freq = 4935, .hw_value = 18 },
+ { .center_freq = 4940, .hw_value = 19 },
+ { .center_freq = 4945, .hw_value = 20 },
+ { .center_freq = 4960, .hw_value = 21 },
+ { .center_freq = 4980, .hw_value = 22 },
+ { .center_freq = 5035, .hw_value = 23 },
+ { .center_freq = 5040, .hw_value = 24 },
+ { .center_freq = 5045, .hw_value = 25 },
+ { .center_freq = 5055, .hw_value = 26 },
+ { .center_freq = 5060, .hw_value = 27 },
+ { .center_freq = 5080, .hw_value = 28 },
+ { .center_freq = 5170, .hw_value = 29 },
+ { .center_freq = 5180, .hw_value = 30 },
+ { .center_freq = 5190, .hw_value = 31 },
+ { .center_freq = 5200, .hw_value = 32 },
+ { .center_freq = 5210, .hw_value = 33 },
+ { .center_freq = 5220, .hw_value = 34 },
+ { .center_freq = 5230, .hw_value = 35 },
+ { .center_freq = 5240, .hw_value = 36 },
+ { .center_freq = 5260, .hw_value = 37 },
+ { .center_freq = 5280, .hw_value = 38 },
+ { .center_freq = 5300, .hw_value = 39 },
+ { .center_freq = 5320, .hw_value = 40 },
+ { .center_freq = 5500, .hw_value = 41 },
+ { .center_freq = 5520, .hw_value = 42 },
+ { .center_freq = 5540, .hw_value = 43 },
+ { .center_freq = 5560, .hw_value = 44 },
+ { .center_freq = 5580, .hw_value = 45 },
+ { .center_freq = 5600, .hw_value = 46 },
+ { .center_freq = 5620, .hw_value = 47 },
+ { .center_freq = 5640, .hw_value = 48 },
+ { .center_freq = 5660, .hw_value = 49 },
+ { .center_freq = 5680, .hw_value = 50 },
+ { .center_freq = 5700, .hw_value = 51 },
+ { .center_freq = 5745, .hw_value = 52 },
+ { .center_freq = 5765, .hw_value = 53 },
+ { .center_freq = 5785, .hw_value = 54 },
+ { .center_freq = 5805, .hw_value = 55 },
+ { .center_freq = 5825, .hw_value = 56 }
+};
-exit:
- return bValid;
-}
+static struct ieee80211_supported_band vnt_supported_2ghz_band = {
+ .channels = vnt_channels_2ghz,
+ .n_channels = ARRAY_SIZE(vnt_channels_2ghz),
+ .bitrates = vnt_rates_bg,
+ .n_bitrates = ARRAY_SIZE(vnt_rates_bg),
+};
-/**
- * channel_get_list() - Get Available Channel List for a given country
- * @CountryCode: The country code defined in country.h
- *
- * Output:
- * pbyChannelTable: (QWORD *) correspondent bit mask
- * of available channels
- * 0x0000000000000001 means channel 1 is supported
- * 0x0000000000000003 means channel 1,2 are supported
- * 0x000000000000000F means channel 1,2,..15 are supported
- */
+static struct ieee80211_supported_band vnt_supported_5ghz_band = {
+ .channels = vnt_channels_5ghz,
+ .n_channels = ARRAY_SIZE(vnt_channels_5ghz),
+ .bitrates = vnt_rates_a,
+ .n_bitrates = ARRAY_SIZE(vnt_rates_a),
+};
-bool channel_get_list(unsigned int uCountryCodeIdx, unsigned char *pbyChannelTable)
+void vnt_init_bands(struct vnt_private *priv)
{
- if (uCountryCodeIdx >= CCODE_MAX)
- return false;
-
- memcpy(pbyChannelTable, ChannelRuleTab[uCountryCodeIdx].bChannelIdxList, CB_MAX_CHANNEL);
-
- return true;
-}
+ struct ieee80211_channel *ch;
+ int i;
-void init_channel_table(void *pDeviceHandler)
-{
- struct vnt_private *pDevice = pDeviceHandler;
- bool bMultiBand = false;
- unsigned int ii;
+ switch (priv->byRFType) {
+ case RF_AIROHA7230:
+ case RF_UW2452:
+ case RF_NOTHING:
+ default:
+ ch = vnt_channels_5ghz;
- for (ii = 1; ii <= CARD_MAX_CHANNEL_TBL; ii++)
- sChannelTbl[ii].bValid = false;
+ for (i = 0; i < ARRAY_SIZE(vnt_channels_5ghz); i++) {
+ ch[i].max_power = 0x3f;
+ ch[i].flags = IEEE80211_CHAN_NO_HT40;
+ }
- switch (pDevice->byRFType) {
+ priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
+ &vnt_supported_5ghz_band;
+ /* fallthrough */
case RF_RFMD2959:
case RF_AIROHA:
case RF_AL2230S:
case RF_UW2451:
case RF_VT3226:
- bMultiBand = false;
- break;
- case RF_AIROHA7230:
- case RF_UW2452:
- case RF_NOTHING:
- default:
- bMultiBand = true;
- break;
- }
+ ch = vnt_channels_2ghz;
- if ((pDevice->dwDiagRefCount != 0) || pDevice->b11hEnable) {
- if (bMultiBand) {
- for (ii = 0; ii < CARD_MAX_CHANNEL_TBL; ii++) {
- sChannelTbl[ii + 1].bValid = true;
- pDevice->abyRegPwr[ii + 1] = pDevice->abyOFDMDefaultPwr[ii + 1];
- pDevice->abyLocalPwr[ii + 1] = pDevice->abyOFDMDefaultPwr[ii + 1];
- }
- for (ii = 0; ii < CHANNEL_MAX_24G; ii++) {
- pDevice->abyRegPwr[ii + 1] = pDevice->abyCCKDefaultPwr[ii + 1];
- pDevice->abyLocalPwr[ii + 1] = pDevice->abyCCKDefaultPwr[ii + 1];
- }
- } else {
- for (ii = 0; ii < CHANNEL_MAX_24G; ii++) {
- //2008-8-4 <add> by chester
- if (ChannelRuleTab[pDevice->byZoneType].bChannelIdxList[ii] != 0) {
- sChannelTbl[ii + 1].bValid = true;
- pDevice->abyRegPwr[ii + 1] = pDevice->abyCCKDefaultPwr[ii + 1];
- pDevice->abyLocalPwr[ii + 1] = pDevice->abyCCKDefaultPwr[ii + 1];
- }
- }
+ for (i = 0; i < ARRAY_SIZE(vnt_channels_2ghz); i++) {
+ ch[i].max_power = 0x3f;
+ ch[i].flags = IEEE80211_CHAN_NO_HT40;
}
- } else if (pDevice->byZoneType <= CCODE_MAX) {
- if (bMultiBand) {
- for (ii = 0; ii < CARD_MAX_CHANNEL_TBL; ii++) {
- if (ChannelRuleTab[pDevice->byZoneType].bChannelIdxList[ii] != 0) {
- sChannelTbl[ii + 1].bValid = true;
- pDevice->abyRegPwr[ii + 1] = ChannelRuleTab[pDevice->byZoneType].byPower[ii];
- pDevice->abyLocalPwr[ii + 1] = ChannelRuleTab[pDevice->byZoneType].byPower[ii];
- }
- }
- } else {
- for (ii = 0; ii < CHANNEL_MAX_24G; ii++) {
- if (ChannelRuleTab[pDevice->byZoneType].bChannelIdxList[ii] != 0) {
- sChannelTbl[ii + 1].bValid = true;
- pDevice->abyRegPwr[ii + 1] = ChannelRuleTab[pDevice->byZoneType].byPower[ii];
- pDevice->abyLocalPwr[ii + 1] = ChannelRuleTab[pDevice->byZoneType].byPower[ii];
- }
- }
- }
- }
- pr_info("Zone=[%d][%c][%c]!!\n",
- pDevice->byZoneType,
- ChannelRuleTab[pDevice->byZoneType].chCountryCode[0],
- ChannelRuleTab[pDevice->byZoneType].chCountryCode[1]);
-
- for (ii = 0; ii < CARD_MAX_CHANNEL_TBL; ii++) {
- if (pDevice->abyRegPwr[ii + 1] == 0)
- pDevice->abyRegPwr[ii + 1] = pDevice->abyOFDMDefaultPwr[ii + 1];
- if (pDevice->abyLocalPwr[ii + 1] == 0)
- pDevice->abyLocalPwr[ii + 1] = pDevice->abyOFDMDefaultPwr[ii + 1];
- }
-}
-
-unsigned char get_channel_mapping(void *pDeviceHandler, unsigned char byChannelNumber, CARD_PHY_TYPE ePhyType)
-{
- unsigned int ii;
-
- if ((ePhyType == PHY_TYPE_11B) || (ePhyType == PHY_TYPE_11G))
- return byChannelNumber;
-
- for (ii = (CB_MAX_CHANNEL_24G + 1); ii <= CB_MAX_CHANNEL;) {
- if (sChannelTbl[ii].byChannelNumber == byChannelNumber)
- return (unsigned char) ii;
- ii++;
+ priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
+ &vnt_supported_2ghz_band;
+ break;
}
- return 0;
-}
-
-unsigned char get_channel_number(void *pDeviceHandler, unsigned char byChannelIndex)
-{
- return sChannelTbl[byChannelIndex].byChannelNumber;
}
/**
@@ -528,37 +182,27 @@ bool set_channel(void *pDeviceHandler, unsigned int uConnectionChannel)
if (pDevice->byCurrentCh == uConnectionChannel)
return bResult;
- if (!sChannelTbl[uConnectionChannel].bValid)
- return false;
-
- if ((uConnectionChannel > CB_MAX_CHANNEL_24G) &&
- (pDevice->eCurrentPHYType != PHY_TYPE_11A)) {
- CARDbSetPhyParameter(pDevice, PHY_TYPE_11A, 0, 0, NULL, NULL);
- } else if ((uConnectionChannel <= CB_MAX_CHANNEL_24G) &&
- (pDevice->eCurrentPHYType == PHY_TYPE_11A)) {
- CARDbSetPhyParameter(pDevice, PHY_TYPE_11G, 0, 0, NULL, NULL);
- }
- // clear NAV
+ /* clear NAV */
MACvRegBitsOn(pDevice->PortOffset, MAC_REG_MACCR, MACCR_CLRNAV);
- //{{ RobertYu: 20041202
- //// TX_PE will reserve 3 us for MAX2829 A mode only, it is for better TX throughput
+ /* TX_PE will reserve 3 us for MAX2829 A mode only, it is for better TX throughput */
if (pDevice->byRFType == RF_AIROHA7230)
- RFbAL7230SelectChannelPostProcess(pDevice->PortOffset, pDevice->byCurrentCh, (unsigned char)uConnectionChannel);
- //}} RobertYu
+ RFbAL7230SelectChannelPostProcess(pDevice, pDevice->byCurrentCh,
+ (unsigned char)uConnectionChannel);
pDevice->byCurrentCh = (unsigned char)uConnectionChannel;
- bResult &= RFbSelectChannel(pDevice->PortOffset, pDevice->byRFType, (unsigned char)uConnectionChannel);
+ bResult &= RFbSelectChannel(pDevice, pDevice->byRFType,
+ (unsigned char)uConnectionChannel);
- // Init Synthesizer Table
+ /* Init Synthesizer Table */
if (pDevice->bEnablePSMode)
- RFvWriteWakeProgSyn(pDevice->PortOffset, pDevice->byRFType, uConnectionChannel);
+ RFvWriteWakeProgSyn(pDevice, pDevice->byRFType, uConnectionChannel);
- BBvSoftwareReset(pDevice->PortOffset);
+ BBvSoftwareReset(pDevice);
if (pDevice->byLocalID > REV_ID_VT3253_B1) {
- // set HW default power register
+ /* set HW default power register */
MACvSelectPage1(pDevice->PortOffset);
RFbSetPower(pDevice, RATE_1M, pDevice->byCurrentCh);
VNSvOutPortB(pDevice->PortOffset + MAC_REG_PWRCCK, pDevice->byCurPwr);
@@ -567,242 +211,10 @@ bool set_channel(void *pDeviceHandler, unsigned int uConnectionChannel)
MACvSelectPage0(pDevice->PortOffset);
}
- if (pDevice->eCurrentPHYType == PHY_TYPE_11B)
+ if (pDevice->byBBType == BB_TYPE_11B)
RFbSetPower(pDevice, RATE_1M, pDevice->byCurrentCh);
else
RFbSetPower(pDevice, RATE_6M, pDevice->byCurrentCh);
return bResult;
}
-
-/**
- * set_country_info() - Set Channel Info of Country
- *
- * Return Value: none.
- *
- */
-
-void set_country_info(void *pDeviceHandler, CARD_PHY_TYPE ePHYType, void *pIE)
-{
- struct vnt_private *pDevice = pDeviceHandler;
- unsigned int ii = 0;
- unsigned int uu = 0;
- unsigned int step = 0;
- unsigned int uNumOfCountryInfo = 0;
- unsigned char byCh = 0;
- PWLAN_IE_COUNTRY pIE_Country = (PWLAN_IE_COUNTRY) pIE;
-
- uNumOfCountryInfo = (pIE_Country->len - 3);
- uNumOfCountryInfo /= 3;
-
- if (ePHYType == PHY_TYPE_11A) {
- pDevice->bCountryInfo5G = true;
- for (ii = CB_MAX_CHANNEL_24G + 1; ii <= CARD_MAX_CHANNEL_TBL; ii++)
- sChannelTbl[ii].bValid = false;
-
- step = 4;
- } else {
- pDevice->bCountryInfo24G = true;
- for (ii = 1; ii <= CB_MAX_CHANNEL_24G; ii++)
- sChannelTbl[ii].bValid = false;
-
- step = 1;
- }
- pDevice->abyCountryCode[0] = pIE_Country->abyCountryString[0];
- pDevice->abyCountryCode[1] = pIE_Country->abyCountryString[1];
- pDevice->abyCountryCode[2] = pIE_Country->abyCountryString[2];
-
- for (ii = 0; ii < uNumOfCountryInfo; ii++) {
- for (uu = 0; uu < pIE_Country->abyCountryInfo[ii*3+1]; uu++) {
- byCh = get_channel_mapping(pDevice, (unsigned char)(pIE_Country->abyCountryInfo[ii*3]+step*uu), ePHYType);
- sChannelTbl[byCh].bValid = true;
- pDevice->abyRegPwr[byCh] = pIE_Country->abyCountryInfo[ii*3+2];
- }
- }
-}
-
-/**
- *
- * set_support_channels() - Set Support Channels IE defined in 802.11h
- *
- * @hDeviceContext: device structure point
- *
- * Return Value: none.
- *
- */
-
-unsigned char set_support_channels(void *pDeviceHandler, unsigned char *pbyIEs)
-{
- struct vnt_private *pDevice = pDeviceHandler;
- unsigned int ii;
- unsigned char byCount;
- PWLAN_IE_SUPP_CH pIE = (PWLAN_IE_SUPP_CH) pbyIEs;
- unsigned char *pbyChTupple;
- unsigned char byLen = 0;
-
- pIE->byElementID = WLAN_EID_SUPP_CH;
- pIE->len = 0;
- pbyChTupple = pIE->abyChannelTuple;
- byLen = 2;
- // lower band
- byCount = 0;
- if (ChannelRuleTab[pDevice->byZoneType].bChannelIdxList[28] == true) {
- for (ii = 28; ii < 36; ii += 2) {
- if (ChannelRuleTab[pDevice->byZoneType].bChannelIdxList[ii] == true)
- byCount++;
- }
-
- *pbyChTupple++ = 34;
- *pbyChTupple++ = byCount;
- byLen += 2;
- } else if (ChannelRuleTab[pDevice->byZoneType].bChannelIdxList[29] == true) {
- for (ii = 29; ii < 36; ii += 2) {
- if (ChannelRuleTab[pDevice->byZoneType].bChannelIdxList[ii] == true)
- byCount++;
- }
-
- *pbyChTupple++ = 36;
- *pbyChTupple++ = byCount;
- byLen += 2;
- }
- // middle band
- byCount = 0;
- if (ChannelRuleTab[pDevice->byZoneType].bChannelIdxList[36] == true) {
- for (ii = 36; ii < 40; ii++) {
- if (ChannelRuleTab[pDevice->byZoneType].bChannelIdxList[ii] == true)
- byCount++;
- }
-
- *pbyChTupple++ = 52;
- *pbyChTupple++ = byCount;
- byLen += 2;
- }
- // higher band
- byCount = 0;
- if (ChannelRuleTab[pDevice->byZoneType].bChannelIdxList[40] == true) {
- for (ii = 40; ii < 51; ii++) {
- if (ChannelRuleTab[pDevice->byZoneType].bChannelIdxList[ii] == true)
- byCount++;
- }
-
- *pbyChTupple++ = 100;
- *pbyChTupple++ = byCount;
- byLen += 2;
- } else if (ChannelRuleTab[pDevice->byZoneType].bChannelIdxList[51] == true) {
- for (ii = 51; ii < 56; ii++) {
- if (ChannelRuleTab[pDevice->byZoneType].bChannelIdxList[ii] == true)
- byCount++;
- }
-
- *pbyChTupple++ = 149;
- *pbyChTupple++ = byCount;
- byLen += 2;
- }
- pIE->len += (byLen - 2);
- return byLen;
-}
-
-void set_country_IE(void *pDeviceHandler, void *pIE)
-{
- struct vnt_private *pDevice = pDeviceHandler;
- unsigned int ii;
- PWLAN_IE_COUNTRY pIECountry = (PWLAN_IE_COUNTRY) pIE;
-
- pIECountry->byElementID = WLAN_EID_COUNTRY;
- pIECountry->len = 0;
- pIECountry->abyCountryString[0] = ChannelRuleTab[pDevice->byZoneType].chCountryCode[0];
- pIECountry->abyCountryString[1] = ChannelRuleTab[pDevice->byZoneType].chCountryCode[1];
- pIECountry->abyCountryString[2] = ' ';
- for (ii = CB_MAX_CHANNEL_24G; ii < CB_MAX_CHANNEL; ii++) {
- if (ChannelRuleTab[pDevice->byZoneType].bChannelIdxList[ii] != 0) {
- pIECountry->abyCountryInfo[pIECountry->len++] = sChannelTbl[ii + 1].byChannelNumber;
- pIECountry->abyCountryInfo[pIECountry->len++] = 1;
- pIECountry->abyCountryInfo[pIECountry->len++] = ChannelRuleTab[pDevice->byZoneType].byPower[ii];
- }
- }
- pIECountry->len += 3;
-}
-
-bool get_channel_map_info(void *pDeviceHandler, unsigned int uChannelIndex,
- unsigned char *pbyChannelNumber, unsigned char *pbyMap)
-{
- if (uChannelIndex > CB_MAX_CHANNEL)
- return false;
-
- *pbyChannelNumber = sChannelTbl[uChannelIndex].byChannelNumber;
- *pbyMap = sChannelTbl[uChannelIndex].byMAP;
- return sChannelTbl[uChannelIndex].bValid;
-}
-
-void set_channel_map_info(void *pDeviceHandler, unsigned int uChannelIndex,
- unsigned char byMap)
-{
- if (uChannelIndex > CB_MAX_CHANNEL)
- return;
-
- sChannelTbl[uChannelIndex].byMAP |= byMap;
-}
-
-void clear_channel_map_info(void *pDeviceHandler)
-{
- unsigned int ii = 0;
-
- for (ii = 1; ii <= CB_MAX_CHANNEL; ii++)
- sChannelTbl[ii].byMAP = 0;
-}
-
-unsigned char auto_channel_select(void *pDeviceHandler, CARD_PHY_TYPE ePHYType)
-{
- unsigned int ii = 0;
- unsigned char byOptionChannel = 0;
- int aiWeight[CB_MAX_CHANNEL_24G + 1] = {-1000, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
-
- if (ePHYType == PHY_TYPE_11A) {
- for (ii = CB_MAX_CHANNEL_24G + 1; ii <= CB_MAX_CHANNEL; ii++) {
- if (sChannelTbl[ii].bValid) {
- if (byOptionChannel == 0)
- byOptionChannel = (unsigned char) ii;
-
- if (sChannelTbl[ii].byMAP == 0)
- return (unsigned char) ii;
- else if (!(sChannelTbl[ii].byMAP & 0x08))
- byOptionChannel = (unsigned char) ii;
- }
- }
- } else {
- byOptionChannel = 0;
- for (ii = 1; ii <= CB_MAX_CHANNEL_24G; ii++) {
- if (sChannelTbl[ii].bValid) {
- if (sChannelTbl[ii].byMAP == 0) {
- aiWeight[ii] += 100;
- } else if (sChannelTbl[ii].byMAP & 0x01) {
- if (ii > 3)
- aiWeight[ii - 3] -= 10;
-
- if (ii > 2)
- aiWeight[ii - 2] -= 20;
-
- if (ii > 1)
- aiWeight[ii - 1] -= 40;
-
- aiWeight[ii] -= 80;
- if (ii < CB_MAX_CHANNEL_24G)
- aiWeight[ii + 1] -= 40;
-
- if (ii < (CB_MAX_CHANNEL_24G - 1))
- aiWeight[ii+2] -= 20;
-
- if (ii < (CB_MAX_CHANNEL_24G - 2))
- aiWeight[ii+3] -= 10;
- }
- }
- }
- for (ii = 1; ii <= CB_MAX_CHANNEL_24G; ii++) {
- if (sChannelTbl[ii].bValid &&
- (aiWeight[ii] > aiWeight[byOptionChannel])) {
- byOptionChannel = (unsigned char) ii;
- }
- }
- }
- return byOptionChannel;
-}
diff --git a/drivers/staging/vt6655/channel.h b/drivers/staging/vt6655/channel.h
index 4f44c8a3d3c..4f4264e2346 100644
--- a/drivers/staging/vt6655/channel.h
+++ b/drivers/staging/vt6655/channel.h
@@ -23,30 +23,10 @@
#ifndef _CHANNEL_H_
#define _CHANNEL_H_
-#include "ttype.h"
#include "card.h"
-typedef struct tagSChannelTblElement {
- unsigned char byChannelNumber;
- unsigned int uFrequency;
- bool bValid;
- unsigned char byMAP;
-} SChannelTblElement, *PSChannelTblElement;
+void vnt_init_bands(struct vnt_private *);
-bool is_channel_valid(unsigned int CountryCode);
-void init_channel_table(void *pDeviceHandler);
-unsigned char get_channel_mapping(void *pDeviceHandler, unsigned char byChannelNumber, CARD_PHY_TYPE ePhyType);
-bool channel_get_list(unsigned int uCountryCodeIdx, unsigned char *pbyChannelTable);
-unsigned char get_channel_number(void *pDeviceHandler, unsigned char byChannelIndex);
bool set_channel(void *pDeviceHandler, unsigned int uConnectionChannel);
-void set_country_info(void *pDeviceHandler, CARD_PHY_TYPE ePHYType, void *pIE);
-unsigned char set_support_channels(void *pDeviceHandler, unsigned char *pbyIEs);
-void set_country_IE(void *pDeviceHandler, void *pIE);
-bool get_channel_map_info(void *pDeviceHandler, unsigned int uChannelIndex,
- unsigned char *pbyChannelNumber, unsigned char *pbyMap);
-void set_channel_map_info(void *pDeviceHandler, unsigned int uChannelIndex,
- unsigned char byMap);
-void clear_channel_map_info(void *pDeviceHandler);
-unsigned char auto_channel_select(void *pDeviceHandler, CARD_PHY_TYPE ePHYType);
#endif /* _CHANNEL_H_ */
diff --git a/drivers/staging/vt6655/country.h b/drivers/staging/vt6655/country.h
deleted file mode 100644
index 2365fb13b03..00000000000
--- a/drivers/staging/vt6655/country.h
+++ /dev/null
@@ -1,161 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- *
- * File: country.h
- *
- * Purpose: Country Code information
- *
- * Author: Lucas Lin
- *
- * Date: Dec 23, 2004
- *
- */
-
-#ifndef __COUNTRY_H__
-#define __COUNTRY_H__
-
-#include "ttype.h"
-
-/************************************************************************
- * The definition here should be complied with the INF country order
- * Please check with VNWL.inf/VNWL64.inf/VNWL*.inf
- ************************************************************************/
-typedef enum _COUNTRY_CODE {
- CCODE_FCC = 0,
- CCODE_TELEC,
- CCODE_ETSI,
- CCODE_RESV3,
- CCODE_RESV4,
- CCODE_RESV5,
- CCODE_RESV6,
- CCODE_RESV7,
- CCODE_RESV8,
- CCODE_RESV9,
- CCODE_RESVa,
- CCODE_RESVb,
- CCODE_RESVc,
- CCODE_RESVd,
- CCODE_RESVe,
- CCODE_ALLBAND,
- CCODE_ALBANIA,
- CCODE_ALGERIA,
- CCODE_ARGENTINA,
- CCODE_ARMENIA,
- CCODE_AUSTRALIA,
- CCODE_AUSTRIA,
- CCODE_AZERBAIJAN,
- CCODE_BAHRAIN,
- CCODE_BELARUS,
- CCODE_BELGIUM,
- CCODE_BELIZE,
- CCODE_BOLIVIA,
- CCODE_BRAZIL,
- CCODE_BRUNEI_DARUSSALAM,
- CCODE_BULGARIA,
- CCODE_CANADA,
- CCODE_CHILE,
- CCODE_CHINA,
- CCODE_COLOMBIA,
- CCODE_COSTA_RICA,
- CCODE_CROATIA,
- CCODE_CYPRUS,
- CCODE_CZECH,
- CCODE_DENMARK,
- CCODE_DOMINICAN_REPUBLIC,
- CCODE_ECUADOR,
- CCODE_EGYPT,
- CCODE_EL_SALVADOR,
- CCODE_ESTONIA,
- CCODE_FINLAND,
- CCODE_FRANCE,
- CCODE_GERMANY,
- CCODE_GREECE,
- CCODE_GEORGIA,
- CCODE_GUATEMALA,
- CCODE_HONDURAS,
- CCODE_HONG_KONG,
- CCODE_HUNGARY,
- CCODE_ICELAND,
- CCODE_INDIA,
- CCODE_INDONESIA,
- CCODE_IRAN,
- CCODE_IRELAND,
- CCODE_ITALY,
- CCODE_ISRAEL,
- CCODE_JAPAN,
- CCODE_JORDAN,
- CCODE_KAZAKHSTAN,
- CCODE_KUWAIT,
- CCODE_LATVIA,
- CCODE_LEBANON,
- CCODE_LEICHTENSTEIN,
- CCODE_LITHUANIA,
- CCODE_LUXEMBURG,
- CCODE_MACAU,
- CCODE_MACEDONIA,
- CCODE_MALTA,
- CCODE_MALAYSIA,
- CCODE_MEXICO,
- CCODE_MONACO,
- CCODE_MOROCCO,
- CCODE_NETHERLANDS,
- CCODE_NEW_ZEALAND,
- CCODE_NORTH_KOREA,
- CCODE_NORWAY,
- CCODE_OMAN,
- CCODE_PAKISTAN,
- CCODE_PANAMA,
- CCODE_PERU,
- CCODE_PHILIPPINES,
- CCODE_POLAND,
- CCODE_PORTUGAL,
- CCODE_PUERTO_RICO,
- CCODE_QATAR,
- CCODE_ROMANIA,
- CCODE_RUSSIA,
- CCODE_SAUDI_ARABIA,
- CCODE_SINGAPORE,
- CCODE_SLOVAKIA,
- CCODE_SLOVENIA,
- CCODE_SOUTH_AFRICA,
- CCODE_SOUTH_KOREA,
- CCODE_SPAIN,
- CCODE_SWEDEN,
- CCODE_SWITZERLAND,
- CCODE_SYRIA,
- CCODE_TAIWAN,
- CCODE_THAILAND,
- CCODE_TRINIDAD_TOBAGO,
- CCODE_TUNISIA,
- CCODE_TURKEY,
- CCODE_UK,
- CCODE_UKRAINE,
- CCODE_UNITED_ARAB_EMIRATES,
- CCODE_UNITED_STATES,
- CCODE_URUGUAY,
- CCODE_UZBEKISTAN,
- CCODE_VENEZUELA,
- CCODE_VIETNAM,
- CCODE_YEMEN,
- CCODE_ZIMBABWE,
- CCODE_JAPAN_W52_W53,
- CCODE_MAX
-} COUNTRY_CODE;
-
-#endif /* __COUNTRY_H__ */
diff --git a/drivers/staging/vt6655/datarate.c b/drivers/staging/vt6655/datarate.c
deleted file mode 100644
index 52907a4fae9..00000000000
--- a/drivers/staging/vt6655/datarate.c
+++ /dev/null
@@ -1,410 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * File: datarate.c
- *
- * Purpose: Handles the auto fallback & data rates functions
- *
- * Author: Lyndon Chen
- *
- * Date: July 17, 2002
- *
- * Functions:
- * RATEvParseMaxRate - Parsing the highest basic & support rate in rate field of frame
- * RATEvTxRateFallBack - Rate fallback Algorithm Implementaion
- * RATEuSetIE- Set rate IE field.
- *
- * Revision History:
- *
- */
-
-#include "ttype.h"
-#include "tmacro.h"
-#include "mac.h"
-#include "80211mgr.h"
-#include "bssdb.h"
-#include "datarate.h"
-#include "card.h"
-#include "baseband.h"
-#include "srom.h"
-
-/*--------------------- Static Definitions -------------------------*/
-
-/*--------------------- Static Classes ----------------------------*/
-
-extern unsigned short TxRate_iwconfig; /* 2008-5-8 <add> by chester */
-/*--------------------- Static Variables --------------------------*/
-static const unsigned char acbyIERate[MAX_RATE] = {
-0x02, 0x04, 0x0B, 0x16, 0x0C, 0x12, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6C
-};
-
-#define AUTORATE_TXOK_CNT 0x0400
-#define AUTORATE_TXFAIL_CNT 0x0064
-#define AUTORATE_TIMEOUT 10
-
-/*--------------------- Static Functions --------------------------*/
-
-void s_vResetCounter(
- PKnownNodeDB psNodeDBTable
-);
-
-void
-s_vResetCounter(
- PKnownNodeDB psNodeDBTable
-)
-{
- unsigned char ii;
-
- /* clear statistic counter for auto_rate */
- for (ii = 0; ii <= MAX_RATE; ii++) {
- psNodeDBTable->uTxOk[ii] = 0;
- psNodeDBTable->uTxFail[ii] = 0;
- }
-}
-
-/*--------------------- Export Variables --------------------------*/
-
-/*--------------------- Export Functions --------------------------*/
-
-/*+
- *
- * Description:
- * Get RateIdx from the value in SuppRates IE or ExtSuppRates IE
- *
- * Parameters:
- * In:
- * unsigned char - Rate value in SuppRates IE or ExtSuppRates IE
- * Out:
- * none
- *
- * Return Value: RateIdx
- *
- -*/
-unsigned char
-DATARATEbyGetRateIdx(
- unsigned char byRate
-)
-{
- unsigned char ii;
-
- /* Erase basicRate flag. */
- byRate = byRate & 0x7F;/* 0111 1111 */
-
- for (ii = 0; ii < MAX_RATE; ii++) {
- if (acbyIERate[ii] == byRate)
- return ii;
- }
- return 0;
-}
-
-/*+
- *
- * Routine Description:
- * Rate fallback Algorithm Implementation
- *
- * Parameters:
- * In:
- * pDevice - Pointer to the adapter
- * psNodeDBTable - Pointer to Node Data Base
- * Out:
- * none
- *
- * Return Value: none
- *
- -*/
-#define AUTORATE_TXCNT_THRESHOLD 20
-#define AUTORATE_INC_THRESHOLD 30
-
-/*+
- *
- * Description:
- * Get RateIdx from the value in SuppRates IE or ExtSuppRates IE
- *
- * Parameters:
- * In:
- * unsigned char - Rate value in SuppRates IE or ExtSuppRates IE
- * Out:
- * none
- *
- * Return Value: RateIdx
- *
- -*/
-unsigned short
-wGetRateIdx(
- unsigned char byRate
-)
-{
- unsigned short ii;
-
- /* Erase basicRate flag. */
- byRate = byRate & 0x7F;/* 0111 1111 */
-
- for (ii = 0; ii < MAX_RATE; ii++) {
- if (acbyIERate[ii] == byRate)
- return ii;
- }
-
- return 0;
-}
-
-/*+
- *
- * Description:
- * Parsing the highest basic & support rate in rate field of frame.
- *
- * Parameters:
- * In:
- * pDevice - Pointer to the adapter
- * pItemRates - Pointer to Rate field defined in 802.11 spec.
- * pItemExtRates - Pointer to Extended Rate field defined in 802.11 spec.
- * Out:
- * pwMaxBasicRate - Maximum Basic Rate
- * pwMaxSuppRate - Maximum Supported Rate
- * pbyTopCCKRate - Maximum Basic Rate in CCK mode
- * pbyTopOFDMRate - Maximum Basic Rate in OFDM mode
- *
- * Return Value: none
- *
- -*/
-void
-RATEvParseMaxRate(
- void *pDeviceHandler,
- PWLAN_IE_SUPP_RATES pItemRates,
- PWLAN_IE_SUPP_RATES pItemExtRates,
- bool bUpdateBasicRate,
- unsigned short *pwMaxBasicRate,
- unsigned short *pwMaxSuppRate,
- unsigned short *pwSuppRate,
- unsigned char *pbyTopCCKRate,
- unsigned char *pbyTopOFDMRate
-)
-{
- struct vnt_private *pDevice = pDeviceHandler;
- unsigned int ii;
- unsigned char byHighSuppRate = 0;
- unsigned char byRate = 0;
- unsigned short wOldBasicRate = pDevice->wBasicRate;
- unsigned int uRateLen;
-
- if (pItemRates == NULL)
- return;
-
- *pwSuppRate = 0;
- uRateLen = pItemRates->len;
-
- pr_debug("ParseMaxRate Len: %d\n", uRateLen);
- if (pDevice->eCurrentPHYType != PHY_TYPE_11B) {
- if (uRateLen > WLAN_RATES_MAXLEN)
- uRateLen = WLAN_RATES_MAXLEN;
- } else {
- if (uRateLen > WLAN_RATES_MAXLEN_11B)
- uRateLen = WLAN_RATES_MAXLEN_11B;
- }
-
- for (ii = 0; ii < uRateLen; ii++) {
- byRate = (unsigned char)(pItemRates->abyRates[ii]);
- if (WLAN_MGMT_IS_BASICRATE(byRate) && bUpdateBasicRate) {
- /* Add to basic rate set, update pDevice->byTopCCKBasicRate and pDevice->byTopOFDMBasicRate */
- CARDbAddBasicRate((void *)pDevice, wGetRateIdx(byRate));
- pr_debug("ParseMaxRate AddBasicRate: %d\n",
- wGetRateIdx(byRate));
- }
- byRate = (unsigned char)(pItemRates->abyRates[ii]&0x7F);
- if (byHighSuppRate == 0)
- byHighSuppRate = byRate;
- if (byRate > byHighSuppRate)
- byHighSuppRate = byRate;
- *pwSuppRate |= (1<<wGetRateIdx(byRate));
- }
- if ((pItemExtRates != NULL) && (pItemExtRates->byElementID == WLAN_EID_EXTSUPP_RATES) &&
- (pDevice->eCurrentPHYType != PHY_TYPE_11B)) {
- unsigned int uExtRateLen = pItemExtRates->len;
-
- if (uExtRateLen > WLAN_RATES_MAXLEN)
- uExtRateLen = WLAN_RATES_MAXLEN;
-
- for (ii = 0; ii < uExtRateLen; ii++) {
- byRate = (unsigned char)(pItemExtRates->abyRates[ii]);
- /* select highest basic rate */
- if (WLAN_MGMT_IS_BASICRATE(pItemExtRates->abyRates[ii])) {
- /* Add to basic rate set, update pDevice->byTopCCKBasicRate and pDevice->byTopOFDMBasicRate */
- CARDbAddBasicRate((void *)pDevice, wGetRateIdx(byRate));
- pr_debug("ParseMaxRate AddBasicRate: %d\n",
- wGetRateIdx(byRate));
- }
- byRate = (unsigned char)(pItemExtRates->abyRates[ii]&0x7F);
- if (byHighSuppRate == 0)
- byHighSuppRate = byRate;
- if (byRate > byHighSuppRate)
- byHighSuppRate = byRate;
- *pwSuppRate |= (1<<wGetRateIdx(byRate));
- }
- }
-
- if ((pDevice->byPacketType == PK_TYPE_11GB) && CARDbIsOFDMinBasicRate((void *)pDevice))
- pDevice->byPacketType = PK_TYPE_11GA;
-
- *pbyTopCCKRate = pDevice->byTopCCKBasicRate;
- *pbyTopOFDMRate = pDevice->byTopOFDMBasicRate;
- *pwMaxSuppRate = wGetRateIdx(byHighSuppRate);
- if ((pDevice->byPacketType == PK_TYPE_11B) || (pDevice->byPacketType == PK_TYPE_11GB))
- *pwMaxBasicRate = pDevice->byTopCCKBasicRate;
- else
- *pwMaxBasicRate = pDevice->byTopOFDMBasicRate;
- if (wOldBasicRate != pDevice->wBasicRate)
- CARDvSetRSPINF((void *)pDevice, pDevice->eCurrentPHYType);
-
- pr_debug("Exit ParseMaxRate\n");
-}
-
-/*+
- *
- * Routine Description:
- * Rate fallback Algorithm Implementaion
- *
- * Parameters:
- * In:
- * pDevice - Pointer to the adapter
- * psNodeDBTable - Pointer to Node Data Base
- * Out:
- * none
- *
- * Return Value: none
- *
- -*/
-#define AUTORATE_TXCNT_THRESHOLD 20
-#define AUTORATE_INC_THRESHOLD 30
-
-void
-RATEvTxRateFallBack(
- void *pDeviceHandler,
- PKnownNodeDB psNodeDBTable
-)
-{
- struct vnt_private *pDevice = pDeviceHandler;
- unsigned short wIdxDownRate = 0;
- unsigned int ii;
- bool bAutoRate[MAX_RATE] = {true, true, true, true, false, false, true, true, true, true, true, true};
- unsigned long dwThroughputTbl[MAX_RATE] = {10, 20, 55, 110, 60, 90, 120, 180, 240, 360, 480, 540};
- unsigned long dwThroughput = 0;
- unsigned short wIdxUpRate = 0;
- unsigned long dwTxDiff = 0;
-
- if (pDevice->pMgmt->eScanState != WMAC_NO_SCANNING)
- /* Don't do Fallback when scanning Channel */
- return;
-
- psNodeDBTable->uTimeCount++;
-
- if (psNodeDBTable->uTxFail[MAX_RATE] > psNodeDBTable->uTxOk[MAX_RATE])
- dwTxDiff = psNodeDBTable->uTxFail[MAX_RATE] - psNodeDBTable->uTxOk[MAX_RATE];
-
- if ((psNodeDBTable->uTxOk[MAX_RATE] < AUTORATE_TXOK_CNT) &&
- (dwTxDiff < AUTORATE_TXFAIL_CNT) &&
- (psNodeDBTable->uTimeCount < AUTORATE_TIMEOUT)) {
- return;
- }
-
- if (psNodeDBTable->uTimeCount >= AUTORATE_TIMEOUT)
- psNodeDBTable->uTimeCount = 0;
-
- for (ii = 0; ii < MAX_RATE; ii++) {
- if (psNodeDBTable->wSuppRate & (0x0001<<ii)) {
- if (bAutoRate[ii])
- wIdxUpRate = (unsigned short) ii;
-
- } else {
- bAutoRate[ii] = false;
- }
- }
-
- for (ii = 0; ii <= psNodeDBTable->wTxDataRate; ii++) {
- if ((psNodeDBTable->uTxOk[ii] != 0) ||
- (psNodeDBTable->uTxFail[ii] != 0)) {
- dwThroughputTbl[ii] *= psNodeDBTable->uTxOk[ii];
- if (ii < RATE_11M)
- psNodeDBTable->uTxFail[ii] *= 4;
-
- dwThroughputTbl[ii] /= (psNodeDBTable->uTxOk[ii] + psNodeDBTable->uTxFail[ii]);
- }
- }
- dwThroughput = dwThroughputTbl[psNodeDBTable->wTxDataRate];
-
- wIdxDownRate = psNodeDBTable->wTxDataRate;
- for (ii = psNodeDBTable->wTxDataRate; ii > 0;) {
- ii--;
- if ((dwThroughputTbl[ii] > dwThroughput) && bAutoRate[ii]) {
- dwThroughput = dwThroughputTbl[ii];
- wIdxDownRate = (unsigned short) ii;
- }
- }
- psNodeDBTable->wTxDataRate = wIdxDownRate;
- if (psNodeDBTable->uTxOk[MAX_RATE]) {
- if (psNodeDBTable->uTxOk[MAX_RATE] >
- (psNodeDBTable->uTxFail[MAX_RATE] * 4)) {
- psNodeDBTable->wTxDataRate = wIdxUpRate;
- }
- } else {
- /* adhoc, if uTxOk =0 & uTxFail = 0 */
- if (psNodeDBTable->uTxFail[MAX_RATE] == 0)
- psNodeDBTable->wTxDataRate = wIdxUpRate;
- }
-
- /* 2008-5-8 <add> by chester */
- TxRate_iwconfig = psNodeDBTable->wTxDataRate;
- s_vResetCounter(psNodeDBTable);
-}
-
-/*+
- *
- * Description:
- * This routine is used to assemble available Rate IE.
- *
- * Parameters:
- * In:
- * pDevice
- * Out:
- *
- * Return Value: None
- *
- -*/
-unsigned char
-RATEuSetIE(
- PWLAN_IE_SUPP_RATES pSrcRates,
- PWLAN_IE_SUPP_RATES pDstRates,
- unsigned int uRateLen
-)
-{
- unsigned int ii, uu, uRateCnt = 0;
-
- if ((pSrcRates == NULL) || (pDstRates == NULL))
- return 0;
-
- if (pSrcRates->len == 0)
- return 0;
-
- for (ii = 0; ii < uRateLen; ii++) {
- for (uu = 0; uu < pSrcRates->len; uu++) {
- if ((pSrcRates->abyRates[uu] & 0x7F) == acbyIERate[ii]) {
- pDstRates->abyRates[uRateCnt++] = pSrcRates->abyRates[uu];
- break;
- }
- }
- }
- return (unsigned char)uRateCnt;
-}
diff --git a/drivers/staging/vt6655/datarate.h b/drivers/staging/vt6655/datarate.h
deleted file mode 100644
index 0509c4fd2a4..00000000000
--- a/drivers/staging/vt6655/datarate.h
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- *
- * File: datarate.h
- *
- * Purpose: Handles the auto fallback & data rates functions
- *
- * Author: Lyndon Chen
- *
- * Date: July 16, 2002
- *
- */
-#ifndef __DATARATE_H__
-#define __DATARATE_H__
-
-#define FALLBACK_PKT_COLLECT_TR_H 50
-#define FALLBACK_PKT_COLLECT_TR_L 10
-#define FALLBACK_POLL_SECOND 5
-#define FALLBACK_RECOVER_SECOND 30
-#define FALLBACK_THRESHOLD 15
-#define UPGRADE_THRESHOLD 5
-#define UPGRADE_CNT_THRD 3
-#define RETRY_TIMES_THRD_H 2
-#define RETRY_TIMES_THRD_L 1
-
-void
-RATEvParseMaxRate(
- void *pDeviceHandler,
- PWLAN_IE_SUPP_RATES pItemRates,
- PWLAN_IE_SUPP_RATES pItemExtRates,
- bool bUpdateBasicRate,
- unsigned short *pwMaxBasicRate,
- unsigned short *pwMaxSuppRate,
- unsigned short *pwSuppRate,
- unsigned char *pbyTopCCKRate,
- unsigned char *pbyTopOFDMRate
-);
-
-void
-RATEvTxRateFallBack(
- void *pDeviceHandler,
- PKnownNodeDB psNodeDBTable
-);
-
-unsigned char
-RATEuSetIE(
- PWLAN_IE_SUPP_RATES pSrcRates,
- PWLAN_IE_SUPP_RATES pDstRates,
- unsigned int uRateLen
-);
-
-unsigned short
-wGetRateIdx(
- unsigned char byRate
-);
-
-unsigned char
-DATARATEbyGetRateIdx(
- unsigned char byRate
-);
-
-#endif //__DATARATE_H__
diff --git a/drivers/staging/vt6655/desc.h b/drivers/staging/vt6655/desc.h
index 5a2bbd2047d..758eeb2afd5 100644
--- a/drivers/staging/vt6655/desc.h
+++ b/drivers/staging/vt6655/desc.h
@@ -34,44 +34,34 @@
#include <linux/types.h>
#include <linux/mm.h>
#include "linux/ieee80211.h"
-#include "ttype.h"
-#include "tether.h"
#define B_OWNED_BY_CHIP 1
#define B_OWNED_BY_HOST 0
-//
-// Bits in the RSR register
-//
+/* Bits in the RSR register */
#define RSR_ADDRBROAD 0x80
#define RSR_ADDRMULTI 0x40
#define RSR_ADDRUNI 0x00
#define RSR_IVLDTYP 0x20
-#define RSR_IVLDLEN 0x10 // invalid len (> 2312 byte)
+#define RSR_IVLDLEN 0x10 /* invalid len (> 2312 byte) */
#define RSR_BSSIDOK 0x08
#define RSR_CRCOK 0x04
#define RSR_BCNSSIDOK 0x02
#define RSR_ADDROK 0x01
-//
-// Bits in the new RSR register
-//
+/* Bits in the new RSR register */
#define NEWRSR_DECRYPTOK 0x10
#define NEWRSR_CFPIND 0x08
#define NEWRSR_HWUTSF 0x04
#define NEWRSR_BCNHITAID 0x02
#define NEWRSR_BCNHITAID0 0x01
-//
-// Bits in the TSR0 register
-//
+/* Bits in the TSR0 register */
#define TSR0_PWRSTS1_2 0xC0
#define TSR0_PWRSTS7 0x20
#define TSR0_NCR 0x1F
-//
-// Bits in the TSR1 register
-//
+/* Bits in the TSR1 register */
#define TSR1_TERR 0x80
#define TSR1_PWRSTS4_6 0x70
#define TSR1_RETRYTMO 0x08
@@ -79,16 +69,14 @@
#define TSR1_PWRSTS3 0x02
#define ACK_DATA 0x01
-//
-// Bits in the TCR register
-//
-#define EDMSDU 0x04 // end of sdu
-#define TCR_EDP 0x02 // end of packet
-#define TCR_STP 0x01 // start of packet
+/* Bits in the TCR register */
+#define EDMSDU 0x04 /* end of sdu */
+#define TCR_EDP 0x02 /* end of packet */
+#define TCR_STP 0x01 /* start of packet */
-// max transmit or receive buffer size
+/* max transmit or receive buffer size */
#define CB_MAX_BUF_SIZE 2900U
- // NOTE: must be multiple of 4
+ /* NOTE: must be multiple of 4 */
#define CB_MAX_TX_BUF_SIZE CB_MAX_BUF_SIZE
#define CB_MAX_RX_BUF_SIZE_NORMAL CB_MAX_BUF_SIZE
@@ -100,18 +88,21 @@
#define CB_MIN_TX_DESC 16
#define CB_MAX_RECEIVED_PACKETS 16
- // limit our receive routine to indicating
- // this many at a time for 2 reasons:
- // 1. driver flow control to protocol layer
- // 2. limit the time used in ISR routine
+ /*
+ * limit our receive routine to indicating
+ * this many at a time for 2 reasons:
+ * 1. driver flow control to protocol layer
+ * 2. limit the time used in ISR routine
+ */
#define CB_EXTRA_RD_NUM 32
#define CB_RD_NUM 32
#define CB_TD_NUM 32
-// max number of physical segments
-// in a single NDIS packet. Above this threshold, the packet
-// is copied into a single physically contiguous buffer
+/*
+ * max number of physical segments in a single NDIS packet. Above this
+ * threshold, the packet is copied into a single physically contiguous buffer
+ */
#define CB_MAX_SEGMENT 4
#define CB_MIN_MAP_REG_NUM 4
@@ -119,42 +110,13 @@
#define CB_PROTOCOL_RESERVED_SECTION 16
-// if retrys excess 15 times , tx will abort, and
-// if tx fifo underflow, tx will fail
-// we should try to resend it
+/*
+ * if retrys excess 15 times , tx will abort, and if tx fifo underflow,
+ * tx will fail, we should try to resend it
+ */
#define CB_MAX_TX_ABORT_RETRY 3
-#ifdef __BIG_ENDIAN
-
-// WMAC definition FIFO Control
-#define FIFOCTL_AUTO_FB_1 0x0010
-#define FIFOCTL_AUTO_FB_0 0x0008
-#define FIFOCTL_GRPACK 0x0004
-#define FIFOCTL_11GA 0x0003
-#define FIFOCTL_11GB 0x0002
-#define FIFOCTL_11B 0x0001
-#define FIFOCTL_11A 0x0000
-#define FIFOCTL_RTS 0x8000
-#define FIFOCTL_ISDMA0 0x4000
-#define FIFOCTL_GENINT 0x2000
-#define FIFOCTL_TMOEN 0x1000
-#define FIFOCTL_LRETRY 0x0800
-#define FIFOCTL_CRCDIS 0x0400
-#define FIFOCTL_NEEDACK 0x0200
-#define FIFOCTL_LHEAD 0x0100
-
-//WMAC definition Frag Control
-#define FRAGCTL_AES 0x0003
-#define FRAGCTL_TKIP 0x0002
-#define FRAGCTL_LEGACY 0x0001
-#define FRAGCTL_NONENCRYPT 0x0000
-#define FRAGCTL_ENDFRAG 0x0300
-#define FRAGCTL_MIDFRAG 0x0200
-#define FRAGCTL_STAFRAG 0x0100
-#define FRAGCTL_NONFRAG 0x0000
-
-#else
-
+/* WMAC definition FIFO Control */
#define FIFOCTL_AUTO_FB_1 0x1000
#define FIFOCTL_AUTO_FB_0 0x0800
#define FIFOCTL_GRPACK 0x0400
@@ -171,7 +133,7 @@
#define FIFOCTL_NEEDACK 0x0002
#define FIFOCTL_LHEAD 0x0001
-//WMAC definition Frag Control
+/* WMAC definition Frag Control */
#define FRAGCTL_AES 0x0300
#define FRAGCTL_TKIP 0x0200
#define FRAGCTL_LEGACY 0x0100
@@ -181,8 +143,6 @@
#define FRAGCTL_STAFRAG 0x0001
#define FRAGCTL_NONFRAG 0x0000
-#endif
-
#define TYPE_TXDMA0 0
#define TYPE_AC0DMA 1
#define TYPE_ATIMDMA 2
@@ -195,14 +155,17 @@
#define TYPE_RXDMA1 1
#define TYPE_MAXRD 2
-// TD_INFO flags control bit
-#define TD_FLAGS_NETIF_SKB 0x01 // check if need release skb
-#define TD_FLAGS_PRIV_SKB 0x02 // check if called from private skb(hostap)
-#define TD_FLAGS_PS_RETRY 0x04 // check if PS STA frame re-transmit
+/* TD_INFO flags control bit */
+#define TD_FLAGS_NETIF_SKB 0x01 /* check if need release skb */
+#define TD_FLAGS_PRIV_SKB 0x02 /* check if called from private skb (hostap) */
+#define TD_FLAGS_PS_RETRY 0x04 /* check if PS STA frame re-transmit */
-// ref_sk_buff is used for mapping the skb structure between pre-built driver-obj & running kernel.
-// Since different kernel version (2.4x) may change skb structure, i.e. pre-built driver-obj
-// may link to older skb that leads error.
+/*
+ * ref_sk_buff is used for mapping the skb structure between pre-built
+ * driver-obj & running kernel. Since different kernel version (2.4x) may
+ * change skb structure, i.e. pre-built driver-obj may link to older skb that
+ * leads error.
+ */
typedef struct tagDEVICE_RD_INFO {
struct sk_buff *skb;
@@ -242,9 +205,7 @@ typedef struct tagRDES1 {
} __attribute__ ((__packed__))
SRDES1;
-//
-// Rx descriptor
-//
+/* Rx descriptor*/
typedef struct tagSRxDesc {
volatile SRDES0 m_rd0RD0;
volatile SRDES1 m_rd1RD1;
@@ -292,6 +253,7 @@ typedef struct tagTDES1 {
STDES1;
typedef struct tagDEVICE_TD_INFO {
+ void *mic_hdr;
struct sk_buff *skb;
unsigned char *buf;
dma_addr_t skb_dma;
@@ -302,9 +264,7 @@ typedef struct tagDEVICE_TD_INFO {
unsigned char byFlags;
} DEVICE_TD_INFO, *PDEVICE_TD_INFO;
-//
-// transmit descriptor
-//
+/* transmit descriptor */
typedef struct tagSTxDesc {
volatile STDES0 m_td0TD0;
volatile STDES1 m_td1TD1;
@@ -319,8 +279,8 @@ typedef const STxDesc *PCSTxDesc;
typedef struct tagSTxSyncDesc {
volatile STDES0 m_td0TD0;
volatile STDES1 m_td1TD1;
- volatile u32 buff_addr; // pointer to logical buffer
- volatile u32 next_desc; // pointer to next logical descriptor
+ volatile u32 buff_addr; /* pointer to logical buffer */
+ volatile u32 next_desc; /* pointer to next logical descriptor */
volatile unsigned short m_wFIFOCtl;
volatile unsigned short m_wTimeStamp;
struct tagSTxSyncDesc *next __aligned(8);
@@ -329,9 +289,7 @@ typedef struct tagSTxSyncDesc {
STxSyncDesc, *PSTxSyncDesc;
typedef const STxSyncDesc *PCSTxSyncDesc;
-//
-// RsvTime buffer header
-//
+/* RsvTime buffer header */
typedef struct tagSRrvTime_atim {
unsigned short wCTSTxRrvTime_ba;
unsigned short wTxRrvTime_a;
@@ -352,9 +310,7 @@ union vnt_phy_field_swap {
u32 field_write;
};
-//
-// Tx FIFO header
-//
+/* Tx FIFO header */
typedef struct tagSTxBufHead {
u32 adwTxKey[4];
unsigned short wFIFOCtl;
@@ -392,4 +348,4 @@ typedef struct tagSKeyEntry {
} __attribute__ ((__packed__))
SKeyEntry;
-#endif // __DESC_H__
+#endif /* __DESC_H__ */
diff --git a/drivers/staging/vt6655/device.h b/drivers/staging/vt6655/device.h
index ddd356aa7ea..83efbfb57c7 100644
--- a/drivers/staging/vt6655/device.h
+++ b/drivers/staging/vt6655/device.h
@@ -50,40 +50,47 @@
#include <linux/io.h>
#include <linux/if.h>
#include <linux/crc32.h>
-//#include <linux/config.h>
#include <linux/uaccess.h>
#include <linux/proc_fs.h>
#include <linux/inetdevice.h>
#include <linux/reboot.h>
#include <linux/ethtool.h>
/* Include Wireless Extension definition and check version - Jean II */
+#include <net/mac80211.h>
#include <linux/wireless.h>
-#include <net/iw_handler.h> // New driver API
+#include <net/iw_handler.h> /* New driver API */
-//2008-0409-07, <Add> by Einsn Liu
#ifndef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
#define WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
#endif
-//
-// device specific
-//
+/* device specific */
#include "device_cfg.h"
-#include "ttype.h"
-#include "80211hdr.h"
-#include "tether.h"
-#include "wmgr.h"
-#include "wcmd.h"
+#include "card.h"
#include "mib.h"
#include "srom.h"
-#include "rc4.h"
#include "desc.h"
#include "key.h"
#include "mac.h"
/*--------------------- Export Definitions -------------------------*/
+#define RATE_1M 0
+#define RATE_2M 1
+#define RATE_5M 2
+#define RATE_11M 3
+#define RATE_6M 4
+#define RATE_9M 5
+#define RATE_12M 6
+#define RATE_18M 7
+#define RATE_24M 8
+#define RATE_36M 9
+#define RATE_48M 10
+#define RATE_54M 11
+#define RATE_AUTO 12
+#define MAX_RATE 12
+
#define MAC_MAX_CONTEXT_REG (256+128)
#define MAX_MULTICAST_ADDRESS_NUM 32
@@ -112,7 +119,7 @@
#define FB_RATE0 0
#define FB_RATE1 1
-// Antenna Mode
+/* Antenna Mode */
#define ANT_A 0
#define ANT_B 1
#define ANT_DIVERSITY 2
@@ -129,120 +136,28 @@
#define RUN_AT(x) (jiffies+(x))
#endif
-// DMA related
+#define MAKE_BEACON_RESERVED 10 /* (us) */
+
+/* DMA related */
#define RESERV_AC0DMA 4
-// BUILD OBJ mode
+/* BUILD OBJ mode */
#define AVAIL_TD(p, q) ((p)->sOpts.nTxDescs[(q)] - ((p)->iTDUsed[(q)]))
#define NUM 64
-#define PRIVATE_Message 0
-
-/*--------------------- Export Types ------------------------------*/
-
-#define PRINT_K(p, args...) \
-do { \
- if (PRIVATE_Message) \
- printk(p, ##args); \
-} while (0)
+/* 0:11A 1:11B 2:11G */
+#define BB_TYPE_11A 0
+#define BB_TYPE_11B 1
+#define BB_TYPE_11G 2
-//0:11A 1:11B 2:11G
-typedef enum _VIA_BB_TYPE
-{
- BB_TYPE_11A = 0,
- BB_TYPE_11B,
- BB_TYPE_11G
-} VIA_BB_TYPE, *PVIA_BB_TYPE;
-
-//0:11a,1:11b,2:11gb(only CCK in BasicRate),3:11ga(OFDM in Basic Rate)
-typedef enum _VIA_PKT_TYPE
-{
- PK_TYPE_11A = 0,
- PK_TYPE_11B,
- PK_TYPE_11GB,
- PK_TYPE_11GA
-} VIA_PKT_TYPE, *PVIA_PKT_TYPE;
-
-typedef enum __device_msg_level {
- MSG_LEVEL_ERR = 0, //Errors that will cause abnormal operation.
- MSG_LEVEL_NOTICE = 1, //Some errors need users to be notified.
- MSG_LEVEL_INFO = 2, //Normal message.
- MSG_LEVEL_VERBOSE = 3, //Will report all trival errors.
- MSG_LEVEL_DEBUG = 4 //Only for debug purpose.
-} DEVICE_MSG_LEVEL, *PDEVICE_MSG_LEVEL;
-
-//++ NDIS related
-
-#define MAX_BSSIDINFO_4_PMKID 16
-#define MAX_PMKIDLIST 5
-//Flags for PMKID Candidate list structure
-#define NDIS_802_11_PMKID_CANDIDATE_PREAUTH_ENABLED 0x01
-
-// PMKID Structures
-typedef unsigned char NDIS_802_11_PMKID_VALUE[16];
-
-typedef enum _NDIS_802_11_WEP_STATUS {
- Ndis802_11WEPEnabled,
- Ndis802_11Encryption1Enabled = Ndis802_11WEPEnabled,
- Ndis802_11WEPDisabled,
- Ndis802_11EncryptionDisabled = Ndis802_11WEPDisabled,
- Ndis802_11WEPKeyAbsent,
- Ndis802_11Encryption1KeyAbsent = Ndis802_11WEPKeyAbsent,
- Ndis802_11WEPNotSupported,
- Ndis802_11EncryptionNotSupported = Ndis802_11WEPNotSupported,
- Ndis802_11Encryption2Enabled,
- Ndis802_11Encryption2KeyAbsent,
- Ndis802_11Encryption3Enabled,
- Ndis802_11Encryption3KeyAbsent
-} NDIS_802_11_WEP_STATUS, *PNDIS_802_11_WEP_STATUS,
- NDIS_802_11_ENCRYPTION_STATUS, *PNDIS_802_11_ENCRYPTION_STATUS;
-
-typedef enum _NDIS_802_11_STATUS_TYPE {
- Ndis802_11StatusType_Authentication,
- Ndis802_11StatusType_MediaStreamMode,
- Ndis802_11StatusType_PMKID_CandidateList,
- Ndis802_11StatusTypeMax // not a real type, defined as an upper bound
-} NDIS_802_11_STATUS_TYPE, *PNDIS_802_11_STATUS_TYPE;
-
-//Added new types for PMKID Candidate lists.
-struct pmkid_candidate {
- NDIS_802_11_MAC_ADDRESS BSSID;
- unsigned long Flags;
-};
+/* 0:11a, 1:11b, 2:11gb (only CCK in BasicRate), 3:11ga (OFDM in BasicRate) */
+#define PK_TYPE_11A 0
+#define PK_TYPE_11B 1
+#define PK_TYPE_11GB 2
+#define PK_TYPE_11GA 3
-typedef struct _BSSID_INFO {
- NDIS_802_11_MAC_ADDRESS BSSID;
- NDIS_802_11_PMKID_VALUE PMKID;
-} BSSID_INFO, *PBSSID_INFO;
-
-typedef struct tagSPMKID {
- unsigned long Length;
- unsigned long BSSIDInfoCount;
- BSSID_INFO BSSIDInfo[MAX_BSSIDINFO_4_PMKID];
-} SPMKID, *PSPMKID;
-
-typedef struct tagSPMKIDCandidateEvent {
- NDIS_802_11_STATUS_TYPE StatusType;
- unsigned long Version; // Version of the structure
- unsigned long NumCandidates; // No. of pmkid candidates
- struct pmkid_candidate CandidateList[MAX_PMKIDLIST];
-} SPMKIDCandidateEvent, *PSPMKIDCandidateEvent;
-
-//--
-
-//++ 802.11h related
-#define MAX_QUIET_COUNT 8
-
-typedef struct tagSQuietControl {
- bool bEnable;
- unsigned long dwStartTime;
- unsigned char byPeriod;
- unsigned short wDuration;
-} SQuietControl, *PSQuietControl;
-
-//--
typedef struct __chip_info_tbl {
CHIP_TYPE chip_id;
char *name;
@@ -256,34 +171,7 @@ typedef enum {
OWNED_BY_NIC = 1
} DEVICE_OWNER_TYPE, *PDEVICE_OWNER_TYPE;
-// The receive duplicate detection cache entry
-typedef struct tagSCacheEntry {
- unsigned short wFmSequence;
- unsigned char abyAddr2[ETH_ALEN];
-} SCacheEntry, *PSCacheEntry;
-
-typedef struct tagSCache {
-/* The receive cache is updated circularly. The next entry to be written is
- * indexed by the "InPtr".
- */
- unsigned int uInPtr; // Place to use next
- SCacheEntry asCacheEntry[DUPLICATE_RX_CACHE_LENGTH];
-} SCache, *PSCache;
-
-#define CB_MAX_RX_FRAG 64
-// DeFragment Control Block, used for collecting fragments prior to reassembly
-typedef struct tagSDeFragControlBlock {
- unsigned short wSequence;
- unsigned short wFragNum;
- unsigned char abyAddr2[ETH_ALEN];
- unsigned int uLifetime;
- struct sk_buff *skb;
- unsigned char *pbyRxBuffer;
- unsigned int cbFrameLength;
- bool bInUse;
-} SDeFragControlBlock, *PSDeFragControlBlock;
-
-//flags for options
+/* flags for options */
#define DEVICE_FLAGS_IP_ALIGN 0x00000001UL
#define DEVICE_FLAGS_PREAMBLE_TYPE 0x00000002UL
#define DEVICE_FLAGS_OP_MODE 0x00000004UL
@@ -291,15 +179,15 @@ typedef struct tagSDeFragControlBlock {
#define DEVICE_FLAGS_80211h_MODE 0x00000010UL
#define DEVICE_FLAGS_DiversityANT 0x00000020UL
-//flags for driver status
+/* flags for driver status */
#define DEVICE_FLAGS_OPENED 0x00010000UL
#define DEVICE_FLAGS_WOL_ENABLED 0x00080000UL
-//flags for capabilities
+/* flags for capabilities */
#define DEVICE_FLAGS_TX_ALIGN 0x01000000UL
#define DEVICE_FLAGS_HAVE_CAM 0x02000000UL
#define DEVICE_FLAGS_FLOW_CTRL 0x04000000UL
-//flags for MII status
+/* flags for MII status */
#define DEVICE_LINK_FAIL 0x00000001UL
#define DEVICE_SPEED_10 0x00000002UL
#define DEVICE_SPEED_100 0x00000004UL
@@ -307,18 +195,14 @@ typedef struct tagSDeFragControlBlock {
#define DEVICE_DUPLEX_FULL 0x00000010UL
#define DEVICE_AUTONEG_ENABLE 0x00000020UL
#define DEVICE_FORCED_BY_EEPROM 0x00000040UL
-//for device_set_media_duplex
+/* for device_set_media_duplex */
#define DEVICE_LINK_CHANGE 0x00000001UL
typedef struct __device_opt {
- int nRxDescs0; //Number of RX descriptors0
- int nRxDescs1; //Number of RX descriptors1
- int nTxDescs[2]; //Number of TX descriptors 0, 1
- int int_works; //interrupt limits
- int rts_thresh; //rts threshold
- int frag_thresh;
- int data_rate;
- int channel_num;
+ int nRxDescs0; /* Number of RX descriptors0 */
+ int nRxDescs1; /* Number of RX descriptors1 */
+ int nTxDescs[2]; /* Number of TX descriptors 0, 1 */
+ int int_works; /* interrupt limits */
int short_retry;
int long_retry;
int bbp_type;
@@ -327,11 +211,16 @@ typedef struct __device_opt {
struct vnt_private {
struct pci_dev *pcid;
-
-// netdev
- struct net_device *dev;
-
-//dma addr, rx/tx pool
+ /* mac80211 */
+ struct ieee80211_hw *hw;
+ struct ieee80211_vif *vif;
+ unsigned long key_entry_inuse;
+ u32 basic_rates;
+ u16 current_aid;
+ int mc_list_count;
+ u8 mac_hw;
+
+/* dma addr, rx/tx pool */
dma_addr_t pool_dma;
dma_addr_t rd0_pool_dma;
dma_addr_t rd1_pool_dma;
@@ -356,9 +245,12 @@ struct vnt_private {
u32 io_size;
unsigned char byRevId;
+ unsigned char byRxMode;
unsigned short SubSystemID;
unsigned short SubVendorID;
+ spinlock_t lock;
+
int nTxQueues;
volatile int iTDUsed[TYPE_MAXTD];
@@ -371,30 +263,18 @@ struct vnt_private {
volatile PSRxDesc aRD0Ring;
volatile PSRxDesc aRD1Ring;
volatile PSRxDesc pCurrRD[TYPE_MAXRD];
- SCache sDupRxCache;
-
- SDeFragControlBlock sRxDFCB[CB_MAX_RX_FRAG];
- unsigned int cbDFCB;
- unsigned int cbFreeDFCB;
- unsigned int uCurrentDFCBIdx;
OPTIONS sOpts;
u32 flags;
u32 rx_buf_sz;
+ u8 rx_rate;
int multicast_limit;
- unsigned char byRxMode;
-
- spinlock_t lock;
-
- pid_t MLMEThr_pid;
- struct completion notify;
- struct semaphore mlme_semaphore;
u32 rx_bytes;
- // Version control
+ /* Version control */
unsigned char byLocalID;
unsigned char byRFType;
@@ -402,20 +282,15 @@ struct vnt_private {
unsigned char byZoneType;
bool bZoneRegExist;
unsigned char byOriginalZonetype;
- unsigned char abyMacContext[MAC_MAX_CONTEXT_REG];
- bool bLinkPass; // link status: OK or fail
- unsigned char abyCurrentNetAddr[ETH_ALEN];
- // Adapter statistics
+ unsigned char abyCurrentNetAddr[ETH_ALEN]; __aligned(2)
+ bool bLinkPass; /* link status: OK or fail */
+
+ /* Adapter statistics */
SStatCounter scStatistic;
- // 802.11 counter
+ /* 802.11 counter */
SDot11Counters s802_11Counter;
- // 802.11 management
- PSMgmtObject pMgmt;
- SMgmtObject sMgmtObj;
-
- // 802.11 MAC specific
unsigned int uCurrRSSI;
unsigned char byCurrSQ;
@@ -427,22 +302,25 @@ struct vnt_private {
bool bTxRxAntInv;
unsigned char *pbyTmpBuff;
- unsigned int uSIFS; //Current SIFS
- unsigned int uDIFS; //Current DIFS
- unsigned int uEIFS; //Current EIFS
- unsigned int uSlot; //Current SlotTime
- unsigned int uCwMin; //Current CwMin
- unsigned int uCwMax; //CwMax is fixed on 1023.
- // PHY parameter
+ unsigned int uSIFS; /* Current SIFS */
+ unsigned int uDIFS; /* Current DIFS */
+ unsigned int uEIFS; /* Current EIFS */
+ unsigned int uSlot; /* Current SlotTime */
+ unsigned int uCwMin; /* Current CwMin */
+ unsigned int uCwMax; /* CwMax is fixed on 1023. */
+ /* PHY parameter */
unsigned char bySIFS;
unsigned char byDIFS;
unsigned char byEIFS;
unsigned char bySlot;
unsigned char byCWMaxMin;
- CARD_PHY_TYPE eCurrentPHYType;
- VIA_BB_TYPE byBBType; //0: 11A, 1:11B, 2:11G
- VIA_PKT_TYPE byPacketType; //0:11a,1:11b,2:11gb(only CCK in BasicRate),3:11ga(OFDM in Basic Rate)
+ u8 byBBType; /* 0:11A, 1:11B, 2:11G */
+ u8 byPacketType; /*
+ * 0:11a,1:11b,2:11gb (only CCK
+ * in BasicRate), 3:11ga (OFDM in
+ * Basic Rate)
+ */
unsigned short wBasicRate;
unsigned char byACKRate;
unsigned char byTopOFDMBasicRate;
@@ -450,28 +328,16 @@ struct vnt_private {
unsigned char byMinChannel;
unsigned char byMaxChannel;
- unsigned int uConnectionRate;
unsigned char byPreambleType;
unsigned char byShortPreamble;
unsigned short wCurrentRate;
- unsigned short wRTSThreshold;
- unsigned short wFragmentationThreshold;
unsigned char byShortRetryLimit;
unsigned char byLongRetryLimit;
enum nl80211_iftype op_mode;
- unsigned char byOpMode;
bool bBSSIDFilter;
unsigned short wMaxTransmitMSDULifetime;
- unsigned char abyBSSID[ETH_ALEN];
- unsigned char abyDesireBSSID[ETH_ALEN];
- unsigned short wACKDuration; // update while speed change
- unsigned short wRTSTransmitLen; // update while speed change
- unsigned char byRTSServiceField; // update while speed change
- unsigned char byRTSSignalField; // update while speed change
-
- unsigned long dwMaxReceiveLifetime; // dot11MaxReceiveLifetime
bool bEncryptionEnable;
bool bLongHeader;
@@ -480,24 +346,20 @@ struct vnt_private {
bool bNonERPPresent;
bool bBarkerPreambleMd;
- unsigned char byERPFlag;
- unsigned short wUseProtectCntDown;
-
bool bRadioControlOff;
bool bRadioOff;
bool bEnablePSMode;
unsigned short wListenInterval;
bool bPWBitOn;
- WMAC_POWER_MODE ePSMode;
- // GPIO Radio Control
+ /* GPIO Radio Control */
unsigned char byRadioCtl;
unsigned char byGPIO;
bool bHWRadioOff;
bool bPrvActive4RadioOFF;
bool bGPIOBlockRead;
- // Beacon related
+ /* Beacon related */
unsigned short wSeqCounter;
unsigned short wBCNBufLen;
bool bBeaconBufReady;
@@ -506,71 +368,12 @@ struct vnt_private {
unsigned int cbBeaconBufReadySetCnt;
bool bFixRate;
unsigned char byCurrentCh;
- unsigned int uScanTime;
-
- CMD_STATE eCommandState;
-
- CMD_CODE eCommand;
- bool bBeaconTx;
-
- bool bStopBeacon;
- bool bStopDataPkt;
- bool bStopTx0Pkt;
- unsigned int uAutoReConnectTime;
-
- // 802.11 counter
-
- CMD_ITEM eCmdQueue[CMD_Q_SIZE];
- unsigned int uCmdDequeueIdx;
- unsigned int uCmdEnqueueIdx;
- unsigned int cbFreeCmdQueue;
- bool bCmdRunning;
- bool bCmdClear;
-
- bool bRoaming;
- //WOW
- unsigned char abyIPAddr[4];
-
- unsigned long ulTxPower;
- NDIS_802_11_WEP_STATUS eEncryptionStatus;
- bool bTransmitKey;
-//2007-0925-01<Add>by MikeLiu
-//mike add :save old Encryption
- NDIS_802_11_WEP_STATUS eOldEncryptionStatus;
-
- SKeyManagement sKey;
- unsigned long dwIVCounter;
-
- u64 qwPacketNumber; /* For CCMP and TKIP as TSC(6 bytes) */
- unsigned int uCurrentWEPMode;
-
- RC4Ext SBox;
- unsigned char abyPRNG[WLAN_WEPMAX_KEYLEN+3];
- unsigned char byKeyIndex;
- unsigned int uKeyLength;
- unsigned char abyKey[WLAN_WEP232_KEYLEN];
bool bAES;
- unsigned char byCntMeasure;
-
- // for AP mode
- unsigned int uAssocCount;
- bool bMoreData;
-
- // QoS
- bool bGrpAckPolicy;
-
- // for OID_802_11_ASSOCIATION_INFORMATION
- bool bAssocInfoSet;
unsigned char byAutoFBCtrl;
- bool bTxMICFail;
- bool bRxMICFail;
-
- unsigned int uRATEIdx;
-
- // For Update BaseBand VGA Gain Offset
+ /* For Update BaseBand VGA Gain Offset */
bool bUpdateBBVGA;
unsigned int uBBVGADiffCount;
unsigned char byBBVGANew;
@@ -581,24 +384,12 @@ struct vnt_private {
unsigned char byBBPreEDRSSI;
unsigned char byBBPreEDIndex;
- bool bRadioCmd;
unsigned long dwDiagRefCount;
- // For FOE Tuning
+ /* For FOE Tuning */
unsigned char byFOETuning;
- // For Auto Power Tunning
-
- unsigned char byAutoPwrTunning;
- short sPSetPointCCK;
- short sPSetPointOFDMG;
- short sPSetPointOFDMA;
- long lPFormulaOffset;
- short sPThreshold;
- char cAdjustStep;
- char cMinTxAGC;
-
- // For RF Power table
+ /* For RF Power table */
unsigned char byCCKPwr;
unsigned char byOFDMPwrG;
unsigned char byCurPwr;
@@ -610,27 +401,12 @@ struct vnt_private {
char abyRegPwr[CB_MAX_CHANNEL+1];
char abyLocalPwr[CB_MAX_CHANNEL+1];
- // BaseBand Loopback Use
+ /* BaseBand Loopback Use */
unsigned char byBBCR4d;
unsigned char byBBCRc9;
unsigned char byBBCR88;
unsigned char byBBCR09;
- // command timer
- struct timer_list sTimerCommand;
- struct timer_list sTimerTxData;
- unsigned long nTxDataTimeCout;
- bool fTxDataInSleep;
- bool IsTxDataTrigger;
-
-#ifdef WPA_SM_Transtatus
- bool fWPA_Authened; //is WPA/WPA-PSK or WPA2/WPA2-PSK authen??
-#endif
- unsigned char byReAssocCount; //mike add:re-association retry times!
- unsigned char byLinkWaitCount;
-
- unsigned char abyNodeName[17];
-
bool bDiversityRegCtlON;
bool bDiversityEnable;
unsigned long ulDiversityNValue;
@@ -640,13 +416,13 @@ struct vnt_private {
unsigned char byTMax3;
unsigned long ulSQ3TH;
-// ANT diversity
+ /* ANT diversity */
unsigned long uDiversityCnt;
unsigned char byAntennaState;
unsigned long ulRatio_State0;
unsigned long ulRatio_State1;
- //SQ3 functions for antenna diversity
+ /* SQ3 functions for antenna diversity */
struct timer_list TimerSQ3Tmax1;
struct timer_list TimerSQ3Tmax2;
struct timer_list TimerSQ3Tmax3;
@@ -654,86 +430,11 @@ struct vnt_private {
unsigned long uNumSQ3[MAX_RATE];
unsigned short wAntDiversityMaxRate;
- SEthernetHeader sTxEthHeader;
- SEthernetHeader sRxEthHeader;
- unsigned char abyBroadcastAddr[ETH_ALEN];
- unsigned char abySNAP_RFC1042[ETH_ALEN];
- unsigned char abySNAP_Bridgetunnel[ETH_ALEN];
- unsigned char abyEEPROM[EEP_MAX_CONTEXT_SIZE]; //unsigned long alignment
- // Pre-Authentication & PMK cache
- SPMKID gsPMKID;
- SPMKIDCandidateEvent gsPMKIDCandidate;
-
- // for 802.11h
- bool b11hEnable;
- unsigned char abyCountryCode[3];
- // for 802.11h DFS
- unsigned int uNumOfMeasureEIDs;
- PWLAN_IE_MEASURE_REQ pCurrMeasureEID;
- bool bMeasureInProgress;
- unsigned char byOrgChannel;
- unsigned char byOrgRCR;
- unsigned long dwOrgMAR0;
- unsigned long dwOrgMAR4;
- unsigned char byBasicMap;
- unsigned char byCCAFraction;
- unsigned char abyRPIs[8];
- unsigned long dwRPIs[8];
- bool bChannelSwitch;
- unsigned char byNewChannel;
- unsigned char byChannelSwitchCount;
- bool bQuietEnable;
- bool bEnableFirstQuiet;
- unsigned char byQuietStartCount;
- unsigned int uQuietEnqueue;
- unsigned long dwCurrentQuietEndTime;
- SQuietControl sQuiet[MAX_QUIET_COUNT];
- // for 802.11h TPC
- bool bCountryInfo5G;
- bool bCountryInfo24G;
+ unsigned char abyEEPROM[EEP_MAX_CONTEXT_SIZE]; /* unsigned long alignment */
unsigned short wBeaconInterval;
-
- //WPA supplicant deamon
- struct net_device *wpadev;
- bool bWPADEVUp;
- struct sk_buff *skb;
-#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
- unsigned int bwextcount;
- bool bWPASuppWextEnabled;
-#endif
-
- //--
-#ifdef HOSTAP
- // user space daemon: hostapd, is used for HOSTAP
- bool bEnableHostapd;
- bool bEnable8021x;
- bool bEnableHostWEP;
- struct net_device *apdev;
- int (*tx_80211)(struct sk_buff *skb, struct net_device *dev);
-#endif
- unsigned int uChannel;
- bool bMACSuspend;
-
- struct iw_statistics wstats; // wireless stats
- bool bCommit;
};
-static inline bool device_get_ip(struct vnt_private *pInfo)
-{
- struct in_device *in_dev = (struct in_device *)pInfo->dev->ip_ptr;
- struct in_ifaddr *ifa;
-
- if (in_dev != NULL) {
- ifa = (struct in_ifaddr *)in_dev->ifa_list;
- if (ifa != NULL) {
- memcpy(pInfo->abyIPAddr, &ifa->ifa_address, 4);
- return true;
- }
- }
- return false;
-}
-
static inline PDEVICE_RD_INFO alloc_rd_info(void)
{
return kzalloc(sizeof(DEVICE_RD_INFO), GFP_ATOMIC);
@@ -743,13 +444,4 @@ static inline PDEVICE_TD_INFO alloc_td_info(void)
{
return kzalloc(sizeof(DEVICE_TD_INFO), GFP_ATOMIC);
}
-
-/*--------------------- Export Functions --------------------------*/
-
-bool device_dma0_xmit(struct vnt_private *pDevice,
- struct sk_buff *skb, unsigned int uNodeIndex);
-bool device_alloc_frag_buf(struct vnt_private *pDevice,
- PSDeFragControlBlock pDeF);
-int Config_FileOperation(struct vnt_private *pDevice,
- bool fwrite, unsigned char *Parameter);
#endif
diff --git a/drivers/staging/vt6655/device_cfg.h b/drivers/staging/vt6655/device_cfg.h
index 7221824e4f2..a4a8a8489e0 100644
--- a/drivers/staging/vt6655/device_cfg.h
+++ b/drivers/staging/vt6655/device_cfg.h
@@ -29,8 +29,6 @@
#include <linux/types.h>
-#include "ttype.h"
-
typedef
struct _version {
unsigned char major;
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index 54e16f40d8e..83e4162c009 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -32,27 +32,16 @@
* device_free_info - device structure resource free function
* device_get_pci_info - get allocated pci io/mem resource
* device_print_info - print out resource
- * device_open - allocate dma/descripter resource & initial mac/bbp function
- * device_xmit - asynchrous data tx function
* device_intr - interrupt handle function
- * device_set_multi - set mac filter
- * device_ioctl - ioctl entry
- * device_close - shutdown mac/bbp & free dma/descripter resource
* device_rx_srv - rx service function
- * device_receive_frame - rx data function
* device_alloc_rx_buf - rx buffer pre-allocated function
- * device_alloc_frag_buf - rx fragement pre-allocated function
* device_free_tx_buf - free tx buffer function
- * device_free_frag_buf- free de-fragement buffer
- * device_dma0_tx_80211- tx 802.11 frame via dma0
- * device_dma0_xmit- tx PS bufferred frame via dma0
* device_init_rd0_ring- initial rd dma0 ring
* device_init_rd1_ring- initial rd dma1 ring
* device_init_td0_ring- initial tx dma0 ring buffer
* device_init_td1_ring- initial tx dma1 ring buffer
* device_init_registers- initial MAC & BBP & RF internal registers.
* device_init_rings- initial tx/rx ring buffer
- * device_init_defrag_cb- initial & allocate de-fragement buffer.
* device_free_rings- free all allocated ring buffer
* device_tx_srv- tx interrupt service function
*
@@ -66,24 +55,10 @@
#include "channel.h"
#include "baseband.h"
#include "mac.h"
-#include "tether.h"
-#include "wmgr.h"
-#include "wctl.h"
#include "power.h"
-#include "wcmd.h"
-#include "iocmd.h"
-#include "tcrc.h"
#include "rxtx.h"
-#include "wroute.h"
-#include "bssdb.h"
-#include "hostap.h"
-#include "wpactl.h"
-#include "ioctl.h"
-#include "iwctl.h"
#include "dpc.h"
-#include "datarate.h"
#include "rf.h"
-#include "iowpa.h"
#include <linux/delay.h>
#include <linux/kthread.h>
#include <linux/slab.h>
@@ -118,89 +93,16 @@ DEVICE_PARAM(TxDescriptors0, "Number of transmit descriptors0");
#define TX_DESC_DEF1 64
DEVICE_PARAM(TxDescriptors1, "Number of transmit descriptors1");
-#define IP_ALIG_DEF 0
-/* IP_byte_align[] is used for IP header unsigned long byte aligned
- 0: indicate the IP header won't be unsigned long byte aligned.(Default) .
- 1: indicate the IP header will be unsigned long byte aligned.
- In some environment, the IP header should be unsigned long byte aligned,
- or the packet will be droped when we receive it. (eg: IPVS)
-*/
-DEVICE_PARAM(IP_byte_align, "Enable IP header dword aligned");
-
#define INT_WORKS_DEF 20
#define INT_WORKS_MIN 10
#define INT_WORKS_MAX 64
DEVICE_PARAM(int_works, "Number of packets per interrupt services");
-#define CHANNEL_MIN 1
-#define CHANNEL_MAX 14
-#define CHANNEL_DEF 6
-
-DEVICE_PARAM(Channel, "Channel number");
-
-/* PreambleType[] is the preamble length used for transmit.
- 0: indicate allows long preamble type
- 1: indicate allows short preamble type
-*/
-
-#define PREAMBLE_TYPE_DEF 1
-
-DEVICE_PARAM(PreambleType, "Preamble Type");
-
-#define RTS_THRESH_MIN 512
-#define RTS_THRESH_MAX 2347
#define RTS_THRESH_DEF 2347
-DEVICE_PARAM(RTSThreshold, "RTS threshold");
-
-#define FRAG_THRESH_MIN 256
-#define FRAG_THRESH_MAX 2346
#define FRAG_THRESH_DEF 2346
-DEVICE_PARAM(FragThreshold, "Fragmentation threshold");
-
-#define DATA_RATE_MIN 0
-#define DATA_RATE_MAX 13
-#define DATA_RATE_DEF 13
-/* datarate[] index
- 0: indicate 1 Mbps 0x02
- 1: indicate 2 Mbps 0x04
- 2: indicate 5.5 Mbps 0x0B
- 3: indicate 11 Mbps 0x16
- 4: indicate 6 Mbps 0x0c
- 5: indicate 9 Mbps 0x12
- 6: indicate 12 Mbps 0x18
- 7: indicate 18 Mbps 0x24
- 8: indicate 24 Mbps 0x30
- 9: indicate 36 Mbps 0x48
- 10: indicate 48 Mbps 0x60
- 11: indicate 54 Mbps 0x6c
- 12: indicate 72 Mbps 0x90
- 13: indicate auto rate
-*/
-
-DEVICE_PARAM(ConnectionRate, "Connection data rate");
-
-#define OP_MODE_DEF 0
-
-DEVICE_PARAM(OPMode, "Infrastruct, adhoc, AP mode ");
-
-/* OpMode[] is used for transmit.
- 0: indicate infrastruct mode used
- 1: indicate adhoc mode used
- 2: indicate AP mode used
-*/
-
-/* PSMode[]
- 0: indicate disable power saving mode
- 1: indicate enable power saving mode
-*/
-
-#define PS_MODE_DEF 0
-
-DEVICE_PARAM(PSMode, "Power saving mode");
-
#define SHORT_RETRY_MIN 0
#define SHORT_RETRY_MAX 31
#define SHORT_RETRY_DEF 8
@@ -224,20 +126,6 @@ DEVICE_PARAM(LongRetryLimit, "long frame retry limits");
DEVICE_PARAM(BasebandType, "baseband type");
-/* 80211hEnable[]
- 0: indicate disable 802.11h
- 1: indicate enable 802.11h
-*/
-
-#define X80211h_MODE_DEF 0
-
-DEVICE_PARAM(b80211hEnable, "802.11h mode");
-
-/* 80211hEnable[]
- 0: indicate disable 802.11h
- 1: indicate enable 802.11h
-*/
-
#define DIVERSITY_ANT_DEF 0
DEVICE_PARAM(bDiversityANTEnable, "ANT diversity mode");
@@ -265,17 +153,10 @@ static void device_free_info(struct vnt_private *pDevice);
static bool device_get_pci_info(struct vnt_private *, struct pci_dev *pcid);
static void device_print_info(struct vnt_private *pDevice);
static void device_init_diversity_timer(struct vnt_private *pDevice);
-static int device_open(struct net_device *dev);
-static int device_xmit(struct sk_buff *skb, struct net_device *dev);
static irqreturn_t device_intr(int irq, void *dev_instance);
-static void device_set_multi(struct net_device *dev);
-static int device_close(struct net_device *dev);
-static int device_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
#ifdef CONFIG_PM
static int device_notify_reboot(struct notifier_block *, unsigned long event, void *ptr);
-static int viawget_suspend(struct pci_dev *pcid, pm_message_t state);
-static int viawget_resume(struct pci_dev *pcid);
static struct notifier_block device_notifier = {
.notifier_call = device_notify_reboot,
.next = NULL,
@@ -285,15 +166,9 @@ static struct notifier_block device_notifier = {
static void device_init_rd0_ring(struct vnt_private *pDevice);
static void device_init_rd1_ring(struct vnt_private *pDevice);
-static void device_init_defrag_cb(struct vnt_private *pDevice);
static void device_init_td0_ring(struct vnt_private *pDevice);
static void device_init_td1_ring(struct vnt_private *pDevice);
-static int device_dma0_tx_80211(struct sk_buff *skb, struct net_device *dev);
-//2008-0714<Add>by Mike Liu
-static bool device_release_WPADEV(struct vnt_private *pDevice);
-
-static int ethtool_ioctl(struct net_device *dev, void __user *useraddr);
static int device_rx_srv(struct vnt_private *pDevice, unsigned int uIdx);
static int device_tx_srv(struct vnt_private *pDevice, unsigned int uIdx);
static bool device_alloc_rx_buf(struct vnt_private *pDevice, PSRxDesc pDesc);
@@ -304,9 +179,6 @@ static void device_free_td1_ring(struct vnt_private *pDevice);
static void device_free_rd0_ring(struct vnt_private *pDevice);
static void device_free_rd1_ring(struct vnt_private *pDevice);
static void device_free_rings(struct vnt_private *pDevice);
-static void device_free_frag_buf(struct vnt_private *pDevice);
-static int Config_FileGetParameter(unsigned char *string,
- unsigned char *dest, unsigned char *source);
/*--------------------- Export Variables --------------------------*/
@@ -339,124 +211,50 @@ static void device_get_options(struct vnt_private *pDevice)
pOpts->nRxDescs1 = RX_DESC_DEF1;
pOpts->nTxDescs[0] = TX_DESC_DEF0;
pOpts->nTxDescs[1] = TX_DESC_DEF1;
- pOpts->flags |= DEVICE_FLAGS_IP_ALIGN;
pOpts->int_works = INT_WORKS_DEF;
- pOpts->rts_thresh = RTS_THRESH_DEF;
- pOpts->frag_thresh = FRAG_THRESH_DEF;
- pOpts->data_rate = DATA_RATE_DEF;
- pOpts->channel_num = CHANNEL_DEF;
- pOpts->flags |= DEVICE_FLAGS_PREAMBLE_TYPE;
- pOpts->flags |= DEVICE_FLAGS_OP_MODE;
pOpts->short_retry = SHORT_RETRY_DEF;
pOpts->long_retry = LONG_RETRY_DEF;
pOpts->bbp_type = BBP_TYPE_DEF;
- pOpts->flags |= DEVICE_FLAGS_80211h_MODE;
pOpts->flags |= DEVICE_FLAGS_DiversityANT;
}
static void
device_set_options(struct vnt_private *pDevice)
{
- unsigned char abyBroadcastAddr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
- unsigned char abySNAP_RFC1042[ETH_ALEN] = {0xAA, 0xAA, 0x03, 0x00, 0x00, 0x00};
- unsigned char abySNAP_Bridgetunnel[ETH_ALEN] = {0xAA, 0xAA, 0x03, 0x00, 0x00, 0xF8};
-
- memcpy(pDevice->abyBroadcastAddr, abyBroadcastAddr, ETH_ALEN);
- memcpy(pDevice->abySNAP_RFC1042, abySNAP_RFC1042, ETH_ALEN);
- memcpy(pDevice->abySNAP_Bridgetunnel, abySNAP_Bridgetunnel, ETH_ALEN);
-
- pDevice->uChannel = pDevice->sOpts.channel_num;
- pDevice->wRTSThreshold = pDevice->sOpts.rts_thresh;
- pDevice->wFragmentationThreshold = pDevice->sOpts.frag_thresh;
pDevice->byShortRetryLimit = pDevice->sOpts.short_retry;
pDevice->byLongRetryLimit = pDevice->sOpts.long_retry;
- pDevice->wMaxTransmitMSDULifetime = DEFAULT_MSDU_LIFETIME;
- pDevice->byShortPreamble = (pDevice->sOpts.flags & DEVICE_FLAGS_PREAMBLE_TYPE) ? 1 : 0;
- pDevice->byOpMode = (pDevice->sOpts.flags & DEVICE_FLAGS_OP_MODE) ? 1 : 0;
- pDevice->ePSMode = (pDevice->sOpts.flags & DEVICE_FLAGS_PS_MODE) ? 1 : 0;
- pDevice->b11hEnable = (pDevice->sOpts.flags & DEVICE_FLAGS_80211h_MODE) ? 1 : 0;
pDevice->bDiversityRegCtlON = (pDevice->sOpts.flags & DEVICE_FLAGS_DiversityANT) ? 1 : 0;
- pDevice->uConnectionRate = pDevice->sOpts.data_rate;
- if (pDevice->uConnectionRate < RATE_AUTO)
- pDevice->bFixRate = true;
pDevice->byBBType = pDevice->sOpts.bbp_type;
- pDevice->byPacketType = (VIA_PKT_TYPE)pDevice->byBBType;
+ pDevice->byPacketType = pDevice->byBBType;
pDevice->byAutoFBCtrl = AUTO_FB_0;
pDevice->bUpdateBBVGA = true;
- pDevice->byFOETuning = 0;
pDevice->byPreambleType = 0;
- pr_debug(" uChannel= %d\n", (int)pDevice->uChannel);
- pr_debug(" byOpMode= %d\n", (int)pDevice->byOpMode);
- pr_debug(" ePSMode= %d\n", (int)pDevice->ePSMode);
- pr_debug(" wRTSThreshold= %d\n", (int)pDevice->wRTSThreshold);
pr_debug(" byShortRetryLimit= %d\n", (int)pDevice->byShortRetryLimit);
pr_debug(" byLongRetryLimit= %d\n", (int)pDevice->byLongRetryLimit);
pr_debug(" byPreambleType= %d\n", (int)pDevice->byPreambleType);
pr_debug(" byShortPreamble= %d\n", (int)pDevice->byShortPreamble);
- pr_debug(" uConnectionRate= %d\n", (int)pDevice->uConnectionRate);
pr_debug(" byBBType= %d\n", (int)pDevice->byBBType);
- pr_debug(" pDevice->b11hEnable= %d\n", (int)pDevice->b11hEnable);
pr_debug(" pDevice->bDiversityRegCtlON= %d\n",
(int)pDevice->bDiversityRegCtlON);
}
-static void s_vCompleteCurrentMeasure(struct vnt_private *pDevice,
- unsigned char byResult)
-{
- unsigned int ii;
- unsigned long dwDuration = 0;
- unsigned char byRPI0 = 0;
-
- for (ii = 1; ii < 8; ii++) {
- pDevice->dwRPIs[ii] *= 255;
- dwDuration |= *((unsigned short *)(pDevice->pCurrMeasureEID->sReq.abyDuration));
- dwDuration <<= 10;
- pDevice->dwRPIs[ii] /= dwDuration;
- pDevice->abyRPIs[ii] = (unsigned char)pDevice->dwRPIs[ii];
- byRPI0 += pDevice->abyRPIs[ii];
- }
- pDevice->abyRPIs[0] = (0xFF - byRPI0);
-
- if (pDevice->uNumOfMeasureEIDs == 0) {
- VNTWIFIbMeasureReport(pDevice->pMgmt,
- true,
- pDevice->pCurrMeasureEID,
- byResult,
- pDevice->byBasicMap,
- pDevice->byCCAFraction,
- pDevice->abyRPIs
- );
- } else {
- VNTWIFIbMeasureReport(pDevice->pMgmt,
- false,
- pDevice->pCurrMeasureEID,
- byResult,
- pDevice->byBasicMap,
- pDevice->byCCAFraction,
- pDevice->abyRPIs
- );
- CARDbStartMeasure(pDevice, pDevice->pCurrMeasureEID++, pDevice->uNumOfMeasureEIDs);
- }
-}
-
//
// Initialisation of MAC & BBP registers
//
static void device_init_registers(struct vnt_private *pDevice)
{
+ unsigned long flags;
unsigned int ii;
unsigned char byValue;
unsigned char byValue1;
unsigned char byCCKPwrdBm = 0;
unsigned char byOFDMPwrdBm = 0;
- int zonetype = 0;
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
MACbShutdown(pDevice->PortOffset);
- BBvSoftwareReset(pDevice->PortOffset);
+ BBvSoftwareReset(pDevice);
/* Do MACbSoftwareReset in MACvInitialize */
MACbSoftwareReset(pDevice->PortOffset);
@@ -481,11 +279,11 @@ static void device_init_registers(struct vnt_private *pDevice)
/* Get Local ID */
VNSvInPortB(pDevice->PortOffset + MAC_REG_LOCALID, &pDevice->byLocalID);
- spin_lock_irq(&pDevice->lock);
+ spin_lock_irqsave(&pDevice->lock, flags);
SROMvReadAllContents(pDevice->PortOffset, pDevice->abyEEPROM);
- spin_unlock_irq(&pDevice->lock);
+ spin_unlock_irqrestore(&pDevice->lock, flags);
/* Get Channel range */
pDevice->byMinChannel = 1;
@@ -558,41 +356,6 @@ static void device_init_registers(struct vnt_private *pDevice)
/* zonetype initial */
pDevice->byOriginalZonetype = pDevice->abyEEPROM[EEP_OFS_ZONETYPE];
- zonetype = Config_FileOperation(pDevice, false, NULL);
-
- if (zonetype >= 0) {
- if ((zonetype == 0) &&
- (pDevice->abyEEPROM[EEP_OFS_ZONETYPE] != 0x00)) {
- /* for USA */
- pDevice->abyEEPROM[EEP_OFS_ZONETYPE] = 0;
- pDevice->abyEEPROM[EEP_OFS_MAXCHANNEL] = 0x0B;
-
- pr_debug("Init Zone Type :USA\n");
- } else if ((zonetype == 1) &&
- (pDevice->abyEEPROM[EEP_OFS_ZONETYPE] != 0x01)) {
- /* for Japan */
- pDevice->abyEEPROM[EEP_OFS_ZONETYPE] = 0x01;
- pDevice->abyEEPROM[EEP_OFS_MAXCHANNEL] = 0x0D;
- } else if ((zonetype == 2) &&
- (pDevice->abyEEPROM[EEP_OFS_ZONETYPE] != 0x02)) {
- /* for Europe */
- pDevice->abyEEPROM[EEP_OFS_ZONETYPE] = 0x02;
- pDevice->abyEEPROM[EEP_OFS_MAXCHANNEL] = 0x0D;
-
- pr_debug("Init Zone Type :Europe\n");
- } else {
- if (zonetype != pDevice->abyEEPROM[EEP_OFS_ZONETYPE])
- pr_debug("zonetype in file[%02x] mismatch with in EEPROM[%02x]\n",
- zonetype,
- pDevice->abyEEPROM[EEP_OFS_ZONETYPE]);
- else
- pr_debug("Read Zonetype file success,use default zonetype setting[%02x]\n",
- zonetype);
- }
- } else {
- pr_debug("Read Zonetype file fail,use default zonetype setting[%02x]\n",
- SROMbyReadEmbedded(pDevice->PortOffset, EEP_OFS_ZONETYPE));
- }
/* Get RFType */
pDevice->byRFType = SROMbyReadEmbedded(pDevice->PortOffset, EEP_OFS_RFTYPE);
@@ -636,14 +399,9 @@ static void device_init_registers(struct vnt_private *pDevice)
}
/* recover 12,13 ,14channel for EUROPE by 11 channel */
- if (((pDevice->abyEEPROM[EEP_OFS_ZONETYPE] == ZoneType_Japan) ||
- (pDevice->abyEEPROM[EEP_OFS_ZONETYPE] == ZoneType_Europe)) &&
- (pDevice->byOriginalZonetype == ZoneType_USA)) {
- for (ii = 11; ii < 14; ii++) {
- pDevice->abyCCKPwrTbl[ii] = pDevice->abyCCKPwrTbl[10];
- pDevice->abyOFDMPwrTbl[ii] = pDevice->abyOFDMPwrTbl[10];
-
- }
+ for (ii = 11; ii < 14; ii++) {
+ pDevice->abyCCKPwrTbl[ii] = pDevice->abyCCKPwrTbl[10];
+ pDevice->abyOFDMPwrTbl[ii] = pDevice->abyOFDMPwrTbl[10];
}
/* Load OFDM A Power Table */
@@ -657,8 +415,6 @@ static void device_init_registers(struct vnt_private *pDevice)
(unsigned char)(ii + EEP_OFS_OFDMA_PWR_dBm));
}
- init_channel_table((void *)pDevice);
-
if (pDevice->byLocalID > REV_ID_VT3253_B1) {
MACvSelectPage1(pDevice->PortOffset);
@@ -690,21 +446,12 @@ static void device_init_registers(struct vnt_private *pDevice)
BBvSetVGAGainOffset(pDevice, pDevice->abyBBVGA[0]);
}
- BBvSetRxAntennaMode(pDevice->PortOffset, pDevice->byRxAntennaMode);
- BBvSetTxAntennaMode(pDevice->PortOffset, pDevice->byTxAntennaMode);
-
- pDevice->byCurrentCh = 0;
+ BBvSetRxAntennaMode(pDevice, pDevice->byRxAntennaMode);
+ BBvSetTxAntennaMode(pDevice, pDevice->byTxAntennaMode);
/* Set BB and packet type at the same time. */
/* Set Short Slot Time, xIFS, and RSPINF. */
- if (pDevice->uConnectionRate == RATE_AUTO)
- pDevice->wCurrentRate = RATE_54M;
- else
- pDevice->wCurrentRate = (unsigned short)pDevice->uConnectionRate;
-
- /* default G Mode */
- VNTWIFIbConfigPhyMode(pDevice->pMgmt, PHY_TYPE_11G);
- VNTWIFIbConfigPhyMode(pDevice->pMgmt, PHY_TYPE_AUTO);
+ pDevice->wCurrentRate = RATE_54M;
pDevice->bRadioOff = false;
@@ -726,8 +473,6 @@ static void device_init_registers(struct vnt_private *pDevice)
if (pDevice->bHWRadioOff || pDevice->bRadioControlOff)
CARDbRadioPowerOff(pDevice);
- pMgmt->eScanType = WMAC_SCAN_PASSIVE;
-
/* get Permanent network address */
SROMvReadEtherAddress(pDevice->PortOffset, pDevice->abyCurrentNetAddr);
pr_debug("Network address = %pM\n", pDevice->abyCurrentNetAddr);
@@ -740,223 +485,39 @@ static void device_init_registers(struct vnt_private *pDevice)
if (pDevice->byLocalID <= REV_ID_VT3253_A1)
MACvRegBitsOn(pDevice->PortOffset, MAC_REG_RCR, RCR_WPAERR);
- pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled;
-
/* Turn On Rx DMA */
MACvReceive0(pDevice->PortOffset);
MACvReceive1(pDevice->PortOffset);
/* start the adapter */
MACvStart(pDevice->PortOffset);
-
- netif_stop_queue(pDevice->dev);
}
static void device_init_diversity_timer(struct vnt_private *pDevice)
{
init_timer(&pDevice->TimerSQ3Tmax1);
pDevice->TimerSQ3Tmax1.data = (unsigned long) pDevice;
- pDevice->TimerSQ3Tmax1.function = (TimerFunction)TimerSQ3CallBack;
+ pDevice->TimerSQ3Tmax1.function = TimerSQ3CallBack;
pDevice->TimerSQ3Tmax1.expires = RUN_AT(HZ);
init_timer(&pDevice->TimerSQ3Tmax2);
pDevice->TimerSQ3Tmax2.data = (unsigned long) pDevice;
- pDevice->TimerSQ3Tmax2.function = (TimerFunction)TimerSQ3CallBack;
+ pDevice->TimerSQ3Tmax2.function = TimerSQ3CallBack;
pDevice->TimerSQ3Tmax2.expires = RUN_AT(HZ);
init_timer(&pDevice->TimerSQ3Tmax3);
pDevice->TimerSQ3Tmax3.data = (unsigned long) pDevice;
- pDevice->TimerSQ3Tmax3.function = (TimerFunction)TimerState1CallBack;
+ pDevice->TimerSQ3Tmax3.function = TimerState1CallBack;
pDevice->TimerSQ3Tmax3.expires = RUN_AT(HZ);
}
-static bool device_release_WPADEV(struct vnt_private *pDevice)
-{
- viawget_wpa_header *wpahdr;
- int ii = 0;
-
- //send device close to wpa_supplicnat layer
- if (pDevice->bWPADEVUp) {
- wpahdr = (viawget_wpa_header *)pDevice->skb->data;
- wpahdr->type = VIAWGET_DEVICECLOSE_MSG;
- wpahdr->resp_ie_len = 0;
- wpahdr->req_ie_len = 0;
- skb_put(pDevice->skb, sizeof(viawget_wpa_header));
- pDevice->skb->dev = pDevice->wpadev;
- skb_reset_mac_header(pDevice->skb);
- pDevice->skb->pkt_type = PACKET_HOST;
- pDevice->skb->protocol = htons(ETH_P_802_2);
- memset(pDevice->skb->cb, 0, sizeof(pDevice->skb->cb));
- netif_rx(pDevice->skb);
- pDevice->skb = dev_alloc_skb((int)pDevice->rx_buf_sz);
-
- while (pDevice->bWPADEVUp) {
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(HZ / 20); //wait 50ms
- ii++;
- if (ii > 20)
- break;
- }
- }
- return true;
-}
-
-static const struct net_device_ops device_netdev_ops = {
- .ndo_open = device_open,
- .ndo_stop = device_close,
- .ndo_do_ioctl = device_ioctl,
- .ndo_start_xmit = device_xmit,
- .ndo_set_rx_mode = device_set_multi,
-};
-
-static int
-vt6655_probe(struct pci_dev *pcid, const struct pci_device_id *ent)
-{
- static bool bFirst = true;
- struct net_device *dev = NULL;
- PCHIP_INFO pChip_info = (PCHIP_INFO)ent->driver_data;
- struct vnt_private *pDevice;
- int rc;
-
- dev = alloc_etherdev(sizeof(*pDevice));
-
- pDevice = netdev_priv(dev);
-
- if (dev == NULL) {
- pr_err(DEVICE_NAME ": allocate net device failed\n");
- return -ENOMEM;
- }
-
- // Chain it all together
- SET_NETDEV_DEV(dev, &pcid->dev);
-
- if (bFirst) {
- pr_notice("%s Ver. %s\n", DEVICE_FULL_DRV_NAM, DEVICE_VERSION);
- pr_notice("Copyright (c) 2003 VIA Networking Technologies, Inc.\n");
- bFirst = false;
- }
-
- vt6655_init_info(pcid, &pDevice, pChip_info);
- pDevice->dev = dev;
-
- if (pci_enable_device(pcid)) {
- device_free_info(pDevice);
- return -ENODEV;
- }
- dev->irq = pcid->irq;
-
-#ifdef DEBUG
- pr_debug("Before get pci_info memaddr is %x\n", pDevice->memaddr);
-#endif
- if (!device_get_pci_info(pDevice, pcid)) {
- pr_err(DEVICE_NAME ": Failed to find PCI device.\n");
- device_free_info(pDevice);
- return -ENODEV;
- }
-
-#if 1
-
-#ifdef DEBUG
-
- pr_debug("after get pci_info memaddr is %x, io addr is %x,io_size is %d\n", pDevice->memaddr, pDevice->ioaddr, pDevice->io_size);
- {
- int i;
- u32 bar, len;
- u32 address[] = {
- PCI_BASE_ADDRESS_0,
- PCI_BASE_ADDRESS_1,
- PCI_BASE_ADDRESS_2,
- PCI_BASE_ADDRESS_3,
- PCI_BASE_ADDRESS_4,
- PCI_BASE_ADDRESS_5,
- 0};
- for (i = 0; address[i]; i++) {
- pci_read_config_dword(pcid, address[i], &bar);
- pr_debug("bar %d is %x\n", i, bar);
- if (!bar) {
- pr_debug("bar %d not implemented\n", i);
- continue;
- }
- if (bar & PCI_BASE_ADDRESS_SPACE_IO) {
- /* This is IO */
-
- len = bar & (PCI_BASE_ADDRESS_IO_MASK & 0xFFFF);
- len = len & ~(len - 1);
-
- pr_debug("IO space: len in IO %x, BAR %d\n", len, i);
- } else {
- len = bar & 0xFFFFFFF0;
- len = ~len + 1;
-
- pr_debug("len in MEM %x, BAR %d\n", len, i);
- }
- }
- }
-#endif
-
-#endif
-
- pDevice->PortOffset = ioremap(pDevice->memaddr & PCI_BASE_ADDRESS_MEM_MASK, pDevice->io_size);
-
- if (pDevice->PortOffset == NULL) {
- pr_err(DEVICE_NAME ": Failed to IO remapping ..\n");
- device_free_info(pDevice);
- return -ENODEV;
- }
-
- rc = pci_request_regions(pcid, DEVICE_NAME);
- if (rc) {
- pr_err(DEVICE_NAME ": Failed to find PCI device\n");
- device_free_info(pDevice);
- return -ENODEV;
- }
-
- dev->base_addr = pDevice->ioaddr;
- // do reset
- if (!MACbSoftwareReset(pDevice->PortOffset)) {
- pr_err(DEVICE_NAME ": Failed to access MAC hardware..\n");
- device_free_info(pDevice);
- return -ENODEV;
- }
- // initial to reload eeprom
- MACvInitialize(pDevice->PortOffset);
- MACvReadEtherAddress(pDevice->PortOffset, dev->dev_addr);
-
- device_get_options(pDevice);
- device_set_options(pDevice);
- //Mask out the options cannot be set to the chip
- pDevice->sOpts.flags &= pChip_info->flags;
-
- //Enable the chip specified capabilities
- pDevice->flags = pDevice->sOpts.flags | (pChip_info->flags & 0xFF000000UL);
- pDevice->tx_80211 = device_dma0_tx_80211;
- pDevice->sMgmtObj.pAdapter = (void *)pDevice;
- pDevice->pMgmt = &(pDevice->sMgmtObj);
-
- dev->irq = pcid->irq;
- dev->netdev_ops = &device_netdev_ops;
-
- dev->wireless_handlers = (struct iw_handler_def *)&iwctl_handler_def;
-
- rc = register_netdev(dev);
- if (rc) {
- pr_err(DEVICE_NAME " Failed to register netdev\n");
- device_free_info(pDevice);
- return -ENODEV;
- }
- device_print_info(pDevice);
- pci_set_drvdata(pcid, pDevice);
- return 0;
-}
-
static void device_print_info(struct vnt_private *pDevice)
{
- struct net_device *dev = pDevice->dev;
+ dev_info(&pDevice->pcid->dev, "%s\n", get_chip_name(pDevice->chip_id));
- pr_info("%s: %s\n", dev->name, get_chip_name(pDevice->chip_id));
- pr_info("%s: MAC=%pM IO=0x%lx Mem=0x%lx IRQ=%d\n",
- dev->name, dev->dev_addr, (unsigned long)pDevice->ioaddr,
- (unsigned long)pDevice->PortOffset, pDevice->dev->irq);
+ dev_info(&pDevice->pcid->dev, "MAC=%pM IO=0x%lx Mem=0x%lx IRQ=%d\n",
+ pDevice->abyCurrentNetAddr, (unsigned long)pDevice->ioaddr,
+ (unsigned long)pDevice->PortOffset, pDevice->pcid->irq);
}
static void vt6655_init_info(struct pci_dev *pcid,
@@ -1003,31 +564,20 @@ static bool device_get_pci_info(struct vnt_private *pDevice,
static void device_free_info(struct vnt_private *pDevice)
{
- struct net_device *dev = pDevice->dev;
-
- ASSERT(pDevice);
-//2008-0714-01<Add>by chester
- device_release_WPADEV(pDevice);
-
-//2008-07-21-01<Add>by MikeLiu
-//unregister wpadev
- if (wpa_set_wpadev(pDevice, 0) != 0)
- pr_err("unregister wpadev fail?\n");
+ if (!pDevice)
+ return;
-#ifdef HOSTAP
- if (dev)
- vt6655_hostap_set_hostapd(pDevice, 0, 0);
-#endif
- if (dev)
- unregister_netdev(dev);
+ if (pDevice->mac_hw)
+ ieee80211_unregister_hw(pDevice->hw);
if (pDevice->PortOffset)
iounmap(pDevice->PortOffset);
if (pDevice->pcid)
pci_release_regions(pDevice->pcid);
- if (dev)
- free_netdev(dev);
+
+ if (pDevice->hw)
+ ieee80211_free_hw(pDevice->hw);
}
static bool device_init_rings(struct vnt_private *pDevice)
@@ -1176,21 +726,6 @@ static void device_init_rd1_ring(struct vnt_private *pDevice)
pDevice->pCurrRD[1] = &(pDevice->aRD1Ring[0]);
}
-static void device_init_defrag_cb(struct vnt_private *pDevice)
-{
- int i;
- PSDeFragControlBlock pDeF;
-
- /* Init the fragment ctl entries */
- for (i = 0; i < CB_MAX_RX_FRAG; i++) {
- pDeF = &(pDevice->sRxDFCB[i]);
- if (!device_alloc_frag_buf(pDevice, pDeF))
- dev_err(&pDevice->pcid->dev, "can not alloc frag bufs\n");
- }
- pDevice->cbDFCB = CB_MAX_RX_FRAG;
- pDevice->cbFreeDFCB = pDevice->cbDFCB;
-}
-
static void device_free_rd0_ring(struct vnt_private *pDevice)
{
int i;
@@ -1204,7 +739,7 @@ static void device_free_rd0_ring(struct vnt_private *pDevice)
dev_kfree_skb(pRDInfo->skb);
- kfree((void *)pDesc->pRDInfo);
+ kfree(pDesc->pRDInfo);
}
}
@@ -1221,21 +756,7 @@ static void device_free_rd1_ring(struct vnt_private *pDevice)
dev_kfree_skb(pRDInfo->skb);
- kfree((void *)pDesc->pRDInfo);
- }
-}
-
-static void device_free_frag_buf(struct vnt_private *pDevice)
-{
- PSDeFragControlBlock pDeF;
- int i;
-
- for (i = 0; i < CB_MAX_RX_FRAG; i++) {
- pDeF = &(pDevice->sRxDFCB[i]);
-
- if (pDeF->skb)
- dev_kfree_skb(pDeF->skb);
-
+ kfree(pDesc->pRDInfo);
}
}
@@ -1305,7 +826,7 @@ static void device_free_td0_ring(struct vnt_private *pDevice)
if (pTDInfo->skb)
dev_kfree_skb(pTDInfo->skb);
- kfree((void *)pDesc->pTDInfo);
+ kfree(pDesc->pTDInfo);
}
}
@@ -1324,7 +845,7 @@ static void device_free_td1_ring(struct vnt_private *pDevice)
if (pTDInfo->skb)
dev_kfree_skb(pTDInfo->skb);
- kfree((void *)pDesc->pTDInfo);
+ kfree(pDesc->pTDInfo);
}
}
@@ -1340,7 +861,7 @@ static int device_rx_srv(struct vnt_private *pDevice, unsigned int uIdx)
pRD = pRD->next) {
if (works++ > 15)
break;
- if (device_receive_frame(pDevice, pRD)) {
+ if (vnt_receive_frame(pDevice, pRD)) {
if (!device_alloc_rx_buf(pDevice, pRD)) {
dev_err(&pDevice->pcid->dev,
"can not allocate rx buf\n");
@@ -1348,7 +869,6 @@ static int device_rx_srv(struct vnt_private *pDevice, unsigned int uIdx)
}
}
pRD->m_rd0RD0.f1Owner = OWNED_BY_NIC;
- pDevice->dev->last_rx = jiffies;
}
pDevice->pCurrRD[uIdx] = pRD;
@@ -1364,9 +884,12 @@ static bool device_alloc_rx_buf(struct vnt_private *pDevice, PSRxDesc pRD)
if (pRDInfo->skb == NULL)
return false;
ASSERT(pRDInfo->skb);
- pRDInfo->skb->dev = pDevice->dev;
- pRDInfo->skb_dma = pci_map_single(pDevice->pcid, skb_tail_pointer(pRDInfo->skb),
- pDevice->rx_buf_sz, PCI_DMA_FROMDEVICE);
+
+ pRDInfo->skb_dma =
+ pci_map_single(pDevice->pcid,
+ skb_put(pRDInfo->skb, skb_tailroom(pRDInfo->skb)),
+ pDevice->rx_buf_sz, PCI_DMA_FROMDEVICE);
+
*((unsigned int *)&(pRD->m_rd0RD0)) = 0; /* FIX cast */
pRD->m_rd0RD0.wResCount = cpu_to_le16(pDevice->rx_buf_sz);
@@ -1377,31 +900,84 @@ static bool device_alloc_rx_buf(struct vnt_private *pDevice, PSRxDesc pRD)
return true;
}
-bool device_alloc_frag_buf(struct vnt_private *pDevice,
- PSDeFragControlBlock pDeF)
+static const u8 fallback_rate0[5][5] = {
+ {RATE_18M, RATE_18M, RATE_12M, RATE_12M, RATE_12M},
+ {RATE_24M, RATE_24M, RATE_18M, RATE_12M, RATE_12M},
+ {RATE_36M, RATE_36M, RATE_24M, RATE_18M, RATE_18M},
+ {RATE_48M, RATE_48M, RATE_36M, RATE_24M, RATE_24M},
+ {RATE_54M, RATE_54M, RATE_48M, RATE_36M, RATE_36M}
+};
+
+static const u8 fallback_rate1[5][5] = {
+ {RATE_18M, RATE_18M, RATE_12M, RATE_6M, RATE_6M},
+ {RATE_24M, RATE_24M, RATE_18M, RATE_6M, RATE_6M},
+ {RATE_36M, RATE_36M, RATE_24M, RATE_12M, RATE_12M},
+ {RATE_48M, RATE_48M, RATE_24M, RATE_12M, RATE_12M},
+ {RATE_54M, RATE_54M, RATE_36M, RATE_18M, RATE_18M}
+};
+
+static int vnt_int_report_rate(struct vnt_private *priv,
+ PDEVICE_TD_INFO context, u8 tsr0, u8 tsr1)
{
- pDeF->skb = dev_alloc_skb((int)pDevice->rx_buf_sz);
- if (pDeF->skb == NULL)
- return false;
- ASSERT(pDeF->skb);
- pDeF->skb->dev = pDevice->dev;
+ struct vnt_tx_fifo_head *fifo_head;
+ struct ieee80211_tx_info *info;
+ struct ieee80211_rate *rate;
+ u16 fb_option;
+ u8 tx_retry = (tsr0 & TSR0_NCR);
+ s8 idx;
+
+ if (!context)
+ return -ENOMEM;
- return true;
+ if (!context->skb)
+ return -EINVAL;
+
+ fifo_head = (struct vnt_tx_fifo_head *)context->buf;
+ fb_option = (le16_to_cpu(fifo_head->fifo_ctl) &
+ (FIFOCTL_AUTO_FB_0 | FIFOCTL_AUTO_FB_1));
+
+ info = IEEE80211_SKB_CB(context->skb);
+ idx = info->control.rates[0].idx;
+
+ if (fb_option && !(tsr1 & TSR1_TERR)) {
+ u8 tx_rate;
+ u8 retry = tx_retry;
+
+ rate = ieee80211_get_tx_rate(priv->hw, info);
+ tx_rate = rate->hw_value - RATE_18M;
+
+ if (retry > 4)
+ retry = 4;
+
+ if (fb_option & FIFOCTL_AUTO_FB_0)
+ tx_rate = fallback_rate0[tx_rate][retry];
+ else if (fb_option & FIFOCTL_AUTO_FB_1)
+ tx_rate = fallback_rate1[tx_rate][retry];
+
+ if (info->band == IEEE80211_BAND_5GHZ)
+ idx = tx_rate - RATE_6M;
+ else
+ idx = tx_rate;
+ }
+
+ ieee80211_tx_info_clear_status(info);
+
+ info->status.rates[0].count = tx_retry;
+
+ if (!(tsr1 & TSR1_TERR)) {
+ info->status.rates[0].idx = idx;
+ info->flags |= IEEE80211_TX_STAT_ACK;
+ }
+
+ return 0;
}
static int device_tx_srv(struct vnt_private *pDevice, unsigned int uIdx)
{
PSTxDesc pTD;
- bool bFull = false;
int works = 0;
unsigned char byTsr0;
unsigned char byTsr1;
- unsigned int uFrameSize, uFIFOHeaderSize;
- PSTxBufHead pTxBufHead;
- struct net_device_stats *pStats = &pDevice->dev->stats;
- struct sk_buff *skb;
- unsigned int uNodeIndex;
- PSMgmtObject pMgmt = pDevice->pMgmt;
for (pTD = pDevice->apTailTD[uIdx]; pDevice->iTDUsed[uIdx] > 0; pTD = pTD->next) {
if (pTD->m_td0TD0.f1Owner == OWNED_BY_NIC)
@@ -1415,22 +991,8 @@ static int device_tx_srv(struct vnt_private *pDevice, unsigned int uIdx)
//Only the status of first TD in the chain is correct
if (pTD->m_td1TD1.byTCR & TCR_STP) {
if ((pTD->pTDInfo->byFlags & TD_FLAGS_NETIF_SKB) != 0) {
- uFIFOHeaderSize = pTD->pTDInfo->dwHeaderLength;
- uFrameSize = pTD->pTDInfo->dwReqCount - uFIFOHeaderSize;
- pTxBufHead = (PSTxBufHead) (pTD->pTDInfo->buf);
- // Update the statistics based on the Transmit status
- // now, we DONT check TSR0_CDH
-
- STAvUpdateTDStatCounter(&pDevice->scStatistic,
- byTsr0, byTsr1,
- (unsigned char *)(pTD->pTDInfo->buf + uFIFOHeaderSize),
- uFrameSize, uIdx);
-
- BSSvUpdateNodeTxCounter(pDevice,
- byTsr0, byTsr1,
- (unsigned char *)(pTD->pTDInfo->buf),
- uFIFOHeaderSize
- );
+
+ vnt_int_report_rate(pDevice, pTD->pTDInfo, byTsr0, byTsr1);
if (!(byTsr1 & TSR1_TERR)) {
if (byTsr0 != 0) {
@@ -1438,28 +1000,9 @@ static int device_tx_srv(struct vnt_private *pDevice, unsigned int uIdx)
(int)uIdx, byTsr1,
byTsr0);
}
- if ((pTxBufHead->wFragCtl & FRAGCTL_ENDFRAG) != FRAGCTL_NONFRAG)
- pDevice->s802_11Counter.TransmittedFragmentCount++;
-
- pStats->tx_packets++;
- pStats->tx_bytes += pTD->pTDInfo->skb->len;
} else {
pr_debug(" Tx[%d] dropped & tsr1[%02X] tsr0[%02X]\n",
(int)uIdx, byTsr1, byTsr0);
- pStats->tx_errors++;
- pStats->tx_dropped++;
- }
- }
-
- if ((pTD->pTDInfo->byFlags & TD_FLAGS_PRIV_SKB) != 0) {
- if (pDevice->bEnableHostapd) {
- pr_debug("tx call back netif..\n");
- skb = pTD->pTDInfo->skb;
- skb->dev = pDevice->apdev;
- skb_reset_mac_header(skb);
- skb->pkt_type = PACKET_OTHERHOST;
- memset(skb->cb, 0, sizeof(skb->cb));
- netif_rx(skb);
}
}
@@ -1468,49 +1011,12 @@ static int device_tx_srv(struct vnt_private *pDevice, unsigned int uIdx)
pr_debug(" Tx[%d] fail has error. tsr1[%02X] tsr0[%02X]\n",
(int)uIdx, byTsr1, byTsr0);
}
-
-
- if ((pMgmt->eCurrMode == WMAC_MODE_ESS_AP) &&
- (pTD->pTDInfo->byFlags & TD_FLAGS_NETIF_SKB)) {
- unsigned short wAID;
- unsigned char byMask[8] = {1, 2, 4, 8, 0x10, 0x20, 0x40, 0x80};
-
- skb = pTD->pTDInfo->skb;
- if (BSSDBbIsSTAInNodeDB(pMgmt, (unsigned char *)(skb->data), &uNodeIndex)) {
- if (pMgmt->sNodeDBTable[uNodeIndex].bPSEnable) {
- skb_queue_tail(&pMgmt->sNodeDBTable[uNodeIndex].sTxPSQueue, skb);
- pMgmt->sNodeDBTable[uNodeIndex].wEnQueueCnt++;
- // set tx map
- wAID = pMgmt->sNodeDBTable[uNodeIndex].wAID;
- pMgmt->abyPSTxMap[wAID >> 3] |= byMask[wAID & 7];
- pTD->pTDInfo->byFlags &= ~(TD_FLAGS_NETIF_SKB);
- pr_debug("tx_srv:tx fail re-queue sta index= %d, QueCnt= %d\n",
- (int)uNodeIndex,
- pMgmt->sNodeDBTable[uNodeIndex].wEnQueueCnt);
- pStats->tx_errors--;
- pStats->tx_dropped--;
- }
- }
- }
}
device_free_tx_buf(pDevice, pTD);
pDevice->iTDUsed[uIdx]--;
}
}
- if (uIdx == TYPE_AC0DMA) {
- // RESERV_AC0DMA reserved for relay
-
- if (AVAIL_TD(pDevice, uIdx) < RESERV_AC0DMA) {
- bFull = true;
- pr_debug(" AC0DMA is Full = %d\n",
- pDevice->iTDUsed[uIdx]);
- }
- if (netif_queue_stopped(pDevice->dev) && !bFull)
- netif_wake_queue(pDevice->dev);
-
- }
-
pDevice->apTailTD[uIdx] = pTD;
return works;
@@ -1521,10 +1027,6 @@ static void device_error(struct vnt_private *pDevice, unsigned short status)
if (status & ISR_FETALERR) {
dev_err(&pDevice->pcid->dev, "Hardware fatal error\n");
- netif_stop_queue(pDevice->dev);
- del_timer(&pDevice->sTimerCommand);
- del_timer(&(pDevice->pMgmt->sTimerSecondCallback));
- pDevice->bCmdRunning = false;
MACbShutdown(pDevice->PortOffset);
return;
}
@@ -1541,7 +1043,9 @@ static void device_free_tx_buf(struct vnt_private *pDevice, PSTxDesc pDesc)
PCI_DMA_TODEVICE);
}
- if ((pTDInfo->byFlags & TD_FLAGS_NETIF_SKB) != 0)
+ if (pTDInfo->byFlags & TD_FLAGS_NETIF_SKB)
+ ieee80211_tx_status_irqsafe(pDevice->hw, skb);
+ else
dev_kfree_skb_irq(skb);
pTDInfo->skb_dma = 0;
@@ -1549,671 +1053,13 @@ static void device_free_tx_buf(struct vnt_private *pDevice, PSTxDesc pDesc)
pTDInfo->byFlags = 0;
}
-static int device_open(struct net_device *dev)
-{
- struct vnt_private *pDevice = netdev_priv(dev);
- int i;
-#ifdef WPA_SM_Transtatus
- extern SWPAResult wpa_Result;
-#endif
-
- pDevice->rx_buf_sz = PKT_BUF_SZ;
- if (!device_init_rings(pDevice))
- return -ENOMEM;
-
-//2008-5-13 <add> by chester
- i = request_irq(pDevice->pcid->irq, &device_intr, IRQF_SHARED, dev->name, dev);
- if (i)
- return i;
-
-#ifdef WPA_SM_Transtatus
- memset(wpa_Result.ifname, 0, sizeof(wpa_Result.ifname));
- wpa_Result.proto = 0;
- wpa_Result.key_mgmt = 0;
- wpa_Result.eap_type = 0;
- wpa_Result.authenticated = false;
- pDevice->fWPA_Authened = false;
-#endif
- pr_debug("call device init rd0 ring\n");
- device_init_rd0_ring(pDevice);
- device_init_rd1_ring(pDevice);
- device_init_defrag_cb(pDevice);
- device_init_td0_ring(pDevice);
- device_init_td1_ring(pDevice);
-
- if (pDevice->bDiversityRegCtlON)
- device_init_diversity_timer(pDevice);
-
- vMgrObjectInit(pDevice);
- vMgrTimerInit(pDevice);
-
- pr_debug("call device_init_registers\n");
- device_init_registers(pDevice);
-
- MACvReadEtherAddress(pDevice->PortOffset, pDevice->abyCurrentNetAddr);
- memcpy(pDevice->pMgmt->abyMACAddr, pDevice->abyCurrentNetAddr, ETH_ALEN);
- device_set_multi(pDevice->dev);
-
- // Init for Key Management
- KeyvInitTable(&pDevice->sKey, pDevice->PortOffset);
- add_timer(&(pDevice->pMgmt->sTimerSecondCallback));
-
-#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
- pDevice->bwextcount = 0;
- pDevice->bWPASuppWextEnabled = false;
-#endif
- pDevice->byReAssocCount = 0;
- pDevice->bWPADEVUp = false;
- // Patch: if WEP key already set by iwconfig but device not yet open
- if (pDevice->bEncryptionEnable && pDevice->bTransmitKey) {
- KeybSetDefaultKey(&(pDevice->sKey),
- (unsigned long)(pDevice->byKeyIndex | (1 << 31)),
- pDevice->uKeyLength,
- NULL,
- pDevice->abyKey,
- KEY_CTL_WEP,
- pDevice->PortOffset,
- pDevice->byLocalID
- );
- pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled;
- }
-
- pr_debug("call MACvIntEnable\n");
- MACvIntEnable(pDevice->PortOffset, IMR_MASK_VALUE);
-
- if (pDevice->pMgmt->eConfigMode == WMAC_CONFIG_AP) {
- bScheduleCommand((void *)pDevice, WLAN_CMD_RUN_AP, NULL);
- } else {
- bScheduleCommand((void *)pDevice, WLAN_CMD_BSSID_SCAN, NULL);
- bScheduleCommand((void *)pDevice, WLAN_CMD_SSID, NULL);
- }
- pDevice->flags |= DEVICE_FLAGS_OPENED;
-
- pr_debug("device_open success..\n");
- return 0;
-}
-
-static int device_close(struct net_device *dev)
-{
- struct vnt_private *pDevice = netdev_priv(dev);
- PSMgmtObject pMgmt = pDevice->pMgmt;
-//2007-1121-02<Add>by EinsnLiu
- if (pDevice->bLinkPass) {
- bScheduleCommand((void *)pDevice, WLAN_CMD_DISASSOCIATE, NULL);
- mdelay(30);
- }
-
- del_timer(&pDevice->sTimerTxData);
- del_timer(&pDevice->sTimerCommand);
- del_timer(&pMgmt->sTimerSecondCallback);
- if (pDevice->bDiversityRegCtlON) {
- del_timer(&pDevice->TimerSQ3Tmax1);
- del_timer(&pDevice->TimerSQ3Tmax2);
- del_timer(&pDevice->TimerSQ3Tmax3);
- }
-
- netif_stop_queue(dev);
- pDevice->bCmdRunning = false;
- MACbShutdown(pDevice->PortOffset);
- MACbSoftwareReset(pDevice->PortOffset);
- CARDbRadioPowerOff(pDevice);
-
- pDevice->bLinkPass = false;
- memset(pMgmt->abyCurrBSSID, 0, 6);
- pMgmt->eCurrState = WMAC_STATE_IDLE;
- device_free_td0_ring(pDevice);
- device_free_td1_ring(pDevice);
- device_free_rd0_ring(pDevice);
- device_free_rd1_ring(pDevice);
- device_free_frag_buf(pDevice);
- device_free_rings(pDevice);
- BSSvClearNodeDBTable(pDevice, 0);
- free_irq(dev->irq, dev);
- pDevice->flags &= (~DEVICE_FLAGS_OPENED);
- //2008-0714-01<Add>by chester
- device_release_WPADEV(pDevice);
-
- pr_debug("device_close..\n");
- return 0;
-}
-
-static int device_dma0_tx_80211(struct sk_buff *skb, struct net_device *dev)
-{
- struct vnt_private *pDevice = netdev_priv(dev);
- unsigned char *pbMPDU;
- unsigned int cbMPDULen = 0;
-
- pr_debug("device_dma0_tx_80211\n");
- spin_lock_irq(&pDevice->lock);
-
- if (AVAIL_TD(pDevice, TYPE_TXDMA0) <= 0) {
- pr_debug("device_dma0_tx_80211, td0 <=0\n");
- dev_kfree_skb_irq(skb);
- spin_unlock_irq(&pDevice->lock);
- return 0;
- }
-
- if (pDevice->bStopTx0Pkt) {
- dev_kfree_skb_irq(skb);
- spin_unlock_irq(&pDevice->lock);
- return 0;
- }
-
- cbMPDULen = skb->len;
- pbMPDU = skb->data;
-
- vDMA0_tx_80211(pDevice, skb, pbMPDU, cbMPDULen);
-
- spin_unlock_irq(&pDevice->lock);
-
- return 0;
-}
-
-bool device_dma0_xmit(struct vnt_private *pDevice,
- struct sk_buff *skb, unsigned int uNodeIndex)
-{
- PSMgmtObject pMgmt = pDevice->pMgmt;
- PSTxDesc pHeadTD, pLastTD;
- unsigned int cbFrameBodySize;
- unsigned int uMACfragNum;
- unsigned char byPktType;
- bool bNeedEncryption = false;
- PSKeyItem pTransmitKey = NULL;
- unsigned int cbHeaderSize;
- unsigned int ii;
- SKeyItem STempKey;
-
- if (pDevice->bStopTx0Pkt) {
- dev_kfree_skb_irq(skb);
- return false;
- }
-
- if (AVAIL_TD(pDevice, TYPE_TXDMA0) <= 0) {
- dev_kfree_skb_irq(skb);
- pr_debug("device_dma0_xmit, td0 <=0\n");
- return false;
- }
-
- if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) {
- if (pDevice->uAssocCount == 0) {
- dev_kfree_skb_irq(skb);
- pr_debug("device_dma0_xmit, assocCount = 0\n");
- return false;
- }
- }
-
- pHeadTD = pDevice->apCurrTD[TYPE_TXDMA0];
-
- pHeadTD->m_td1TD1.byTCR = (TCR_EDP|TCR_STP);
-
- memcpy(pDevice->sTxEthHeader.abyDstAddr, (unsigned char *)(skb->data), ETH_HLEN);
- cbFrameBodySize = skb->len - ETH_HLEN;
-
- // 802.1H
- if (ntohs(pDevice->sTxEthHeader.wType) > ETH_DATA_LEN)
- cbFrameBodySize += 8;
-
- uMACfragNum = cbGetFragCount(pDevice, pTransmitKey, cbFrameBodySize, &pDevice->sTxEthHeader);
-
- if (uMACfragNum > AVAIL_TD(pDevice, TYPE_TXDMA0)) {
- dev_kfree_skb_irq(skb);
- return false;
- }
- byPktType = (unsigned char)pDevice->byPacketType;
-
- if (pDevice->bFixRate) {
- if (pDevice->eCurrentPHYType == PHY_TYPE_11B) {
- if (pDevice->uConnectionRate >= RATE_11M)
- pDevice->wCurrentRate = RATE_11M;
- else
- pDevice->wCurrentRate = (unsigned short)pDevice->uConnectionRate;
- } else {
- if (pDevice->uConnectionRate >= RATE_54M)
- pDevice->wCurrentRate = RATE_54M;
- else
- pDevice->wCurrentRate = (unsigned short)pDevice->uConnectionRate;
- }
- } else {
- pDevice->wCurrentRate = pDevice->pMgmt->sNodeDBTable[uNodeIndex].wTxDataRate;
- }
-
- //preamble type
- if (pMgmt->sNodeDBTable[uNodeIndex].bShortPreamble)
- pDevice->byPreambleType = pDevice->byShortPreamble;
- else
- pDevice->byPreambleType = PREAMBLE_LONG;
-
- pr_debug("dma0: pDevice->wCurrentRate = %d\n", pDevice->wCurrentRate);
-
- if (pDevice->wCurrentRate <= RATE_11M) {
- byPktType = PK_TYPE_11B;
- } else if (pDevice->eCurrentPHYType == PHY_TYPE_11A) {
- byPktType = PK_TYPE_11A;
- } else {
- if (pDevice->bProtectMode)
- byPktType = PK_TYPE_11GB;
- else
- byPktType = PK_TYPE_11GA;
- }
-
- if (pDevice->bEncryptionEnable)
- bNeedEncryption = true;
-
- if (pDevice->bEnableHostWEP) {
- pTransmitKey = &STempKey;
- pTransmitKey->byCipherSuite = pMgmt->sNodeDBTable[uNodeIndex].byCipherSuite;
- pTransmitKey->dwKeyIndex = pMgmt->sNodeDBTable[uNodeIndex].dwKeyIndex;
- pTransmitKey->uKeyLength = pMgmt->sNodeDBTable[uNodeIndex].uWepKeyLength;
- pTransmitKey->dwTSC47_16 = pMgmt->sNodeDBTable[uNodeIndex].dwTSC47_16;
- pTransmitKey->wTSC15_0 = pMgmt->sNodeDBTable[uNodeIndex].wTSC15_0;
- memcpy(pTransmitKey->abyKey,
- &pMgmt->sNodeDBTable[uNodeIndex].abyWepKey[0],
- pTransmitKey->uKeyLength
- );
- }
- vGenerateFIFOHeader(pDevice, byPktType, pDevice->pbyTmpBuff, bNeedEncryption,
- cbFrameBodySize, TYPE_TXDMA0, pHeadTD,
- &pDevice->sTxEthHeader, (unsigned char *)skb->data, pTransmitKey, uNodeIndex,
- &uMACfragNum,
- &cbHeaderSize
- );
-
- if (MACbIsRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_PS)) {
- // Disable PS
- MACbPSWakeup(pDevice->PortOffset);
- }
-
- pDevice->bPWBitOn = false;
-
- pLastTD = pHeadTD;
- for (ii = 0; ii < uMACfragNum; ii++) {
- // Poll Transmit the adapter
- wmb();
- pHeadTD->m_td0TD0.f1Owner = OWNED_BY_NIC;
- wmb();
- if (ii == (uMACfragNum - 1))
- pLastTD = pHeadTD;
- pHeadTD = pHeadTD->next;
- }
-
- // Save the information needed by the tx interrupt handler
- // to complete the Send request
- pLastTD->pTDInfo->skb = skb;
- pLastTD->pTDInfo->byFlags = 0;
- pLastTD->pTDInfo->byFlags |= TD_FLAGS_NETIF_SKB;
-
- pDevice->apCurrTD[TYPE_TXDMA0] = pHeadTD;
-
- MACvTransmit0(pDevice->PortOffset);
-
- return true;
-}
-
-//TYPE_AC0DMA data tx
-static int device_xmit(struct sk_buff *skb, struct net_device *dev) {
- struct vnt_private *pDevice = netdev_priv(dev);
- PSMgmtObject pMgmt = pDevice->pMgmt;
- PSTxDesc pHeadTD, pLastTD;
- unsigned int uNodeIndex = 0;
- unsigned char byMask[8] = {1, 2, 4, 8, 0x10, 0x20, 0x40, 0x80};
- unsigned short wAID;
- unsigned int uMACfragNum = 1;
- unsigned int cbFrameBodySize;
- unsigned char byPktType;
- unsigned int cbHeaderSize;
- bool bNeedEncryption = false;
- PSKeyItem pTransmitKey = NULL;
- SKeyItem STempKey;
- unsigned int ii;
- bool bTKIP_UseGTK = false;
- bool bNeedDeAuth = false;
- unsigned char *pbyBSSID;
- bool bNodeExist = false;
-
- spin_lock_irq(&pDevice->lock);
- if (!pDevice->bLinkPass) {
- dev_kfree_skb_irq(skb);
- spin_unlock_irq(&pDevice->lock);
- return 0;
- }
-
- if (pDevice->bStopDataPkt) {
- dev_kfree_skb_irq(skb);
- spin_unlock_irq(&pDevice->lock);
- return 0;
- }
-
- if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) {
- if (pDevice->uAssocCount == 0) {
- dev_kfree_skb_irq(skb);
- spin_unlock_irq(&pDevice->lock);
- return 0;
- }
- if (is_multicast_ether_addr((unsigned char *)(skb->data))) {
- uNodeIndex = 0;
- bNodeExist = true;
- if (pMgmt->sNodeDBTable[0].bPSEnable) {
- skb_queue_tail(&(pMgmt->sNodeDBTable[0].sTxPSQueue), skb);
- pMgmt->sNodeDBTable[0].wEnQueueCnt++;
- // set tx map
- pMgmt->abyPSTxMap[0] |= byMask[0];
- spin_unlock_irq(&pDevice->lock);
- return 0;
- }
- } else {
- if (BSSDBbIsSTAInNodeDB(pMgmt, (unsigned char *)(skb->data), &uNodeIndex)) {
- if (pMgmt->sNodeDBTable[uNodeIndex].bPSEnable) {
- skb_queue_tail(&pMgmt->sNodeDBTable[uNodeIndex].sTxPSQueue, skb);
- pMgmt->sNodeDBTable[uNodeIndex].wEnQueueCnt++;
- // set tx map
- wAID = pMgmt->sNodeDBTable[uNodeIndex].wAID;
- pMgmt->abyPSTxMap[wAID >> 3] |= byMask[wAID & 7];
- pr_debug("Set:pMgmt->abyPSTxMap[%d]= %d\n",
- (wAID >> 3),
- pMgmt->abyPSTxMap[wAID >> 3]);
- spin_unlock_irq(&pDevice->lock);
- return 0;
- }
-
- if (pMgmt->sNodeDBTable[uNodeIndex].bShortPreamble)
- pDevice->byPreambleType = pDevice->byShortPreamble;
- else
- pDevice->byPreambleType = PREAMBLE_LONG;
-
- bNodeExist = true;
-
- }
- }
-
- if (!bNodeExist) {
- pr_debug("Unknown STA not found in node DB\n");
- dev_kfree_skb_irq(skb);
- spin_unlock_irq(&pDevice->lock);
- return 0;
- }
- }
-
- pHeadTD = pDevice->apCurrTD[TYPE_AC0DMA];
-
- pHeadTD->m_td1TD1.byTCR = (TCR_EDP|TCR_STP);
-
- memcpy(pDevice->sTxEthHeader.abyDstAddr, (unsigned char *)(skb->data), ETH_HLEN);
- cbFrameBodySize = skb->len - ETH_HLEN;
- // 802.1H
- if (ntohs(pDevice->sTxEthHeader.wType) > ETH_DATA_LEN)
- cbFrameBodySize += 8;
-
- if (pDevice->bEncryptionEnable) {
- bNeedEncryption = true;
- // get Transmit key
- do {
- if ((pDevice->pMgmt->eCurrMode == WMAC_MODE_ESS_STA) &&
- (pDevice->pMgmt->eCurrState == WMAC_STATE_ASSOC)) {
- pbyBSSID = pDevice->abyBSSID;
- // get pairwise key
- if (KeybGetTransmitKey(&(pDevice->sKey), pbyBSSID, PAIRWISE_KEY, &pTransmitKey) == false) {
- // get group key
- if (KeybGetTransmitKey(&(pDevice->sKey), pbyBSSID, GROUP_KEY, &pTransmitKey) == true) {
- bTKIP_UseGTK = true;
- pr_debug("Get GTK\n");
- break;
- }
- } else {
- pr_debug("Get PTK\n");
- break;
- }
- } else if (pDevice->pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) {
- pbyBSSID = pDevice->sTxEthHeader.abyDstAddr; //TO_DS = 0 and FROM_DS = 0 --> 802.11 MAC Address1
- pr_debug("IBSS Serach Key:\n");
- for (ii = 0; ii < 6; ii++)
- pr_debug("%x\n", *(pbyBSSID+ii));
- pr_debug("\n");
-
- // get pairwise key
- if (KeybGetTransmitKey(&(pDevice->sKey), pbyBSSID, PAIRWISE_KEY, &pTransmitKey) == true)
- break;
- }
- // get group key
- pbyBSSID = pDevice->abyBroadcastAddr;
- if (KeybGetTransmitKey(&(pDevice->sKey), pbyBSSID, GROUP_KEY, &pTransmitKey) == false) {
- pTransmitKey = NULL;
- if (pDevice->pMgmt->eCurrMode == WMAC_MODE_IBSS_STA)
- pr_debug("IBSS and KEY is NULL. [%d]\n",
- pDevice->pMgmt->eCurrMode);
- else
- pr_debug("NOT IBSS and KEY is NULL. [%d]\n",
- pDevice->pMgmt->eCurrMode);
- } else {
- bTKIP_UseGTK = true;
- pr_debug("Get GTK\n");
- }
- } while (false);
- }
-
- if (pDevice->bEnableHostWEP) {
- pr_debug("acdma0: STA index %d\n", uNodeIndex);
- if (pDevice->bEncryptionEnable) {
- pTransmitKey = &STempKey;
- pTransmitKey->byCipherSuite = pMgmt->sNodeDBTable[uNodeIndex].byCipherSuite;
- pTransmitKey->dwKeyIndex = pMgmt->sNodeDBTable[uNodeIndex].dwKeyIndex;
- pTransmitKey->uKeyLength = pMgmt->sNodeDBTable[uNodeIndex].uWepKeyLength;
- pTransmitKey->dwTSC47_16 = pMgmt->sNodeDBTable[uNodeIndex].dwTSC47_16;
- pTransmitKey->wTSC15_0 = pMgmt->sNodeDBTable[uNodeIndex].wTSC15_0;
- memcpy(pTransmitKey->abyKey,
- &pMgmt->sNodeDBTable[uNodeIndex].abyWepKey[0],
- pTransmitKey->uKeyLength
- );
- }
- }
-
- uMACfragNum = cbGetFragCount(pDevice, pTransmitKey, cbFrameBodySize, &pDevice->sTxEthHeader);
-
- if (uMACfragNum > AVAIL_TD(pDevice, TYPE_AC0DMA)) {
- pr_debug("uMACfragNum > AVAIL_TD(TYPE_AC0DMA) = %d\n",
- uMACfragNum);
- dev_kfree_skb_irq(skb);
- spin_unlock_irq(&pDevice->lock);
- return 0;
- }
-
- if (pTransmitKey != NULL) {
- if ((pTransmitKey->byCipherSuite == KEY_CTL_WEP) &&
- (pTransmitKey->uKeyLength == WLAN_WEP232_KEYLEN)) {
- uMACfragNum = 1; //WEP256 doesn't support fragment
- }
- }
-
- byPktType = (unsigned char)pDevice->byPacketType;
-
- if (pDevice->bFixRate) {
- if (pDevice->eCurrentPHYType == PHY_TYPE_11B) {
- if (pDevice->uConnectionRate >= RATE_11M)
- pDevice->wCurrentRate = RATE_11M;
- else
- pDevice->wCurrentRate = (unsigned short)pDevice->uConnectionRate;
- } else {
- if ((pDevice->eCurrentPHYType == PHY_TYPE_11A) &&
- (pDevice->uConnectionRate <= RATE_6M)) {
- pDevice->wCurrentRate = RATE_6M;
- } else {
- if (pDevice->uConnectionRate >= RATE_54M)
- pDevice->wCurrentRate = RATE_54M;
- else
- pDevice->wCurrentRate = (unsigned short)pDevice->uConnectionRate;
-
- }
- }
- pDevice->byACKRate = (unsigned char) pDevice->wCurrentRate;
- pDevice->byTopCCKBasicRate = RATE_1M;
- pDevice->byTopOFDMBasicRate = RATE_6M;
- } else {
- //auto rate
- if (pDevice->sTxEthHeader.wType == TYPE_PKT_802_1x) {
- if (pDevice->eCurrentPHYType != PHY_TYPE_11A) {
- pDevice->wCurrentRate = RATE_1M;
- pDevice->byACKRate = RATE_1M;
- pDevice->byTopCCKBasicRate = RATE_1M;
- pDevice->byTopOFDMBasicRate = RATE_6M;
- } else {
- pDevice->wCurrentRate = RATE_6M;
- pDevice->byACKRate = RATE_6M;
- pDevice->byTopCCKBasicRate = RATE_1M;
- pDevice->byTopOFDMBasicRate = RATE_6M;
- }
- } else {
- VNTWIFIvGetTxRate(pDevice->pMgmt,
- pDevice->sTxEthHeader.abyDstAddr,
- &(pDevice->wCurrentRate),
- &(pDevice->byACKRate),
- &(pDevice->byTopCCKBasicRate),
- &(pDevice->byTopOFDMBasicRate));
-
- }
- }
-
-
- if (pDevice->wCurrentRate <= RATE_11M) {
- byPktType = PK_TYPE_11B;
- } else if (pDevice->eCurrentPHYType == PHY_TYPE_11A) {
- byPktType = PK_TYPE_11A;
- } else {
- if (pDevice->bProtectMode)
- byPktType = PK_TYPE_11GB;
- else
- byPktType = PK_TYPE_11GA;
- }
-
- if (bNeedEncryption) {
- pr_debug("ntohs Pkt Type=%04x\n",
- ntohs(pDevice->sTxEthHeader.wType));
- if ((pDevice->sTxEthHeader.wType) == TYPE_PKT_802_1x) {
- bNeedEncryption = false;
- pr_debug("Pkt Type=%04x\n",
- (pDevice->sTxEthHeader.wType));
- if ((pDevice->pMgmt->eCurrMode == WMAC_MODE_ESS_STA) && (pDevice->pMgmt->eCurrState == WMAC_STATE_ASSOC)) {
- if (pTransmitKey == NULL) {
- pr_debug("Don't Find TX KEY\n");
- } else {
- if (bTKIP_UseGTK) {
- pr_debug("error: KEY is GTK!!~~\n");
- } else {
- pr_debug("Find PTK [%lX]\n",
- pTransmitKey->dwKeyIndex);
- bNeedEncryption = true;
- }
- }
- }
-
- if (pDevice->byCntMeasure == 2) {
- bNeedDeAuth = true;
- pDevice->s802_11Counter.TKIPCounterMeasuresInvoked++;
- }
-
- if (pDevice->bEnableHostWEP) {
- if ((uNodeIndex != 0) &&
- (pMgmt->sNodeDBTable[uNodeIndex].dwKeyIndex & PAIRWISE_KEY)) {
- pr_debug("Find PTK [%lX]\n",
- pTransmitKey->dwKeyIndex);
- bNeedEncryption = true;
- }
- }
- } else {
- if (pTransmitKey == NULL) {
- pr_debug("return no tx key\n");
- dev_kfree_skb_irq(skb);
- spin_unlock_irq(&pDevice->lock);
- return 0;
- }
- }
- }
-
- vGenerateFIFOHeader(pDevice, byPktType, pDevice->pbyTmpBuff, bNeedEncryption,
- cbFrameBodySize, TYPE_AC0DMA, pHeadTD,
- &pDevice->sTxEthHeader, (unsigned char *)skb->data, pTransmitKey, uNodeIndex,
- &uMACfragNum,
- &cbHeaderSize
- );
-
- if (MACbIsRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_PS)) {
- // Disable PS
- MACbPSWakeup(pDevice->PortOffset);
- }
- pDevice->bPWBitOn = false;
-
- pLastTD = pHeadTD;
- for (ii = 0; ii < uMACfragNum; ii++) {
- // Poll Transmit the adapter
- wmb();
- pHeadTD->m_td0TD0.f1Owner = OWNED_BY_NIC;
- wmb();
- if (ii == uMACfragNum - 1)
- pLastTD = pHeadTD;
- pHeadTD = pHeadTD->next;
- }
-
- // Save the information needed by the tx interrupt handler
- // to complete the Send request
- pLastTD->pTDInfo->skb = skb;
- pLastTD->pTDInfo->byFlags = 0;
- pLastTD->pTDInfo->byFlags |= TD_FLAGS_NETIF_SKB;
- pDevice->nTxDataTimeCout = 0; //2008-8-21 chester <add> for send null packet
-
- if (AVAIL_TD(pDevice, TYPE_AC0DMA) <= 1)
- netif_stop_queue(dev);
-
- pDevice->apCurrTD[TYPE_AC0DMA] = pHeadTD;
-
- if (pDevice->bFixRate)
- pr_debug("FixRate:Rate is %d,TxPower is %d\n", pDevice->wCurrentRate, pDevice->byCurPwr);
-
- {
- unsigned char Protocol_Version; //802.1x Authentication
- unsigned char Packet_Type; //802.1x Authentication
- unsigned char Descriptor_type;
- unsigned short Key_info;
- bool bTxeapol_key = false;
-
- Protocol_Version = skb->data[ETH_HLEN];
- Packet_Type = skb->data[ETH_HLEN+1];
- Descriptor_type = skb->data[ETH_HLEN+1+1+2];
- Key_info = (skb->data[ETH_HLEN+1+1+2+1] << 8)|(skb->data[ETH_HLEN+1+1+2+2]);
- if (pDevice->sTxEthHeader.wType == TYPE_PKT_802_1x) {
- if (((Protocol_Version == 1) || (Protocol_Version == 2)) &&
- (Packet_Type == 3)) { //802.1x OR eapol-key challenge frame transfer
- bTxeapol_key = true;
- if ((Descriptor_type == 254) || (Descriptor_type == 2)) { //WPA or RSN
- if (!(Key_info & BIT3) && //group-key challenge
- (Key_info & BIT8) && (Key_info & BIT9)) { //send 2/2 key
- pDevice->fWPA_Authened = true;
- if (Descriptor_type == 254)
- pr_debug("WPA ");
- else
- pr_debug("WPA2 ");
- pr_debug("Authentication completed!!\n");
- }
- }
- }
- }
- }
-
- MACvTransmitAC0(pDevice->PortOffset);
-
- dev->trans_start = jiffies;
-
- spin_unlock_irq(&pDevice->lock);
- return 0;
-}
-
static irqreturn_t device_intr(int irq, void *dev_instance)
{
- struct net_device *dev = dev_instance;
- struct vnt_private *pDevice = netdev_priv(dev);
+ struct vnt_private *pDevice = dev_instance;
int max_count = 0;
unsigned long dwMIBCounter = 0;
- PSMgmtObject pMgmt = pDevice->pMgmt;
unsigned char byOrgPageSel = 0;
int handled = 0;
- unsigned char byData = 0;
int ii = 0;
unsigned long flags;
@@ -2256,96 +1102,13 @@ static irqreturn_t device_intr(int irq, void *dev_instance)
device_error(pDevice, pDevice->dwIsr);
}
- if (pDevice->byLocalID > REV_ID_VT3253_B1) {
- if (pDevice->dwIsr & ISR_MEASURESTART) {
- // 802.11h measure start
- pDevice->byOrgChannel = pDevice->byCurrentCh;
- VNSvInPortB(pDevice->PortOffset + MAC_REG_RCR, &(pDevice->byOrgRCR));
- VNSvOutPortB(pDevice->PortOffset + MAC_REG_RCR, (RCR_RXALLTYPE | RCR_UNICAST | RCR_BROADCAST | RCR_MULTICAST | RCR_WPAERR));
- MACvSelectPage1(pDevice->PortOffset);
- VNSvInPortD(pDevice->PortOffset + MAC_REG_MAR0, &(pDevice->dwOrgMAR0));
- VNSvInPortD(pDevice->PortOffset + MAC_REG_MAR4, &(pDevice->dwOrgMAR4));
- MACvSelectPage0(pDevice->PortOffset);
- //xxxx
- if (set_channel(pDevice, pDevice->pCurrMeasureEID->sReq.byChannel)) {
- pDevice->bMeasureInProgress = true;
- MACvSelectPage1(pDevice->PortOffset);
- MACvRegBitsOn(pDevice->PortOffset, MAC_REG_MSRCTL, MSRCTL_READY);
- MACvSelectPage0(pDevice->PortOffset);
- pDevice->byBasicMap = 0;
- pDevice->byCCAFraction = 0;
- for (ii = 0; ii < 8; ii++)
- pDevice->dwRPIs[ii] = 0;
-
- } else {
- // can not measure because set channel fail
- // clear measure control
- MACvRegBitsOff(pDevice->PortOffset, MAC_REG_MSRCTL, MSRCTL_EN);
- s_vCompleteCurrentMeasure(pDevice, MEASURE_MODE_INCAPABLE);
- MACvSelectPage1(pDevice->PortOffset);
- MACvRegBitsOn(pDevice->PortOffset, MAC_REG_MSRCTL+1, MSRCTL1_TXPAUSE);
- MACvSelectPage0(pDevice->PortOffset);
- }
- }
- if (pDevice->dwIsr & ISR_MEASUREEND) {
- // 802.11h measure end
- pDevice->bMeasureInProgress = false;
- VNSvOutPortB(pDevice->PortOffset + MAC_REG_RCR, pDevice->byOrgRCR);
- MACvSelectPage1(pDevice->PortOffset);
- VNSvOutPortD(pDevice->PortOffset + MAC_REG_MAR0, pDevice->dwOrgMAR0);
- VNSvOutPortD(pDevice->PortOffset + MAC_REG_MAR4, pDevice->dwOrgMAR4);
- VNSvInPortB(pDevice->PortOffset + MAC_REG_MSRBBSTS, &byData);
- pDevice->byBasicMap |= (byData >> 4);
- VNSvInPortB(pDevice->PortOffset + MAC_REG_CCAFRACTION, &pDevice->byCCAFraction);
- VNSvInPortB(pDevice->PortOffset + MAC_REG_MSRCTL, &byData);
- // clear measure control
- MACvRegBitsOff(pDevice->PortOffset, MAC_REG_MSRCTL, MSRCTL_EN);
- MACvSelectPage0(pDevice->PortOffset);
- set_channel(pDevice, pDevice->byOrgChannel);
- MACvSelectPage1(pDevice->PortOffset);
- MACvRegBitsOn(pDevice->PortOffset, MAC_REG_MSRCTL+1, MSRCTL1_TXPAUSE);
- MACvSelectPage0(pDevice->PortOffset);
- if (byData & MSRCTL_FINISH) {
- // measure success
- s_vCompleteCurrentMeasure(pDevice, 0);
- } else {
- // can not measure because not ready before end of measure time
- s_vCompleteCurrentMeasure(pDevice, MEASURE_MODE_LATE);
- }
- }
- if (pDevice->dwIsr & ISR_QUIETSTART) {
- do {
- ;
- } while (!CARDbStartQuiet(pDevice));
- }
- }
-
if (pDevice->dwIsr & ISR_TBTT) {
- if (pDevice->bEnableFirstQuiet) {
- pDevice->byQuietStartCount--;
- if (pDevice->byQuietStartCount == 0) {
- pDevice->bEnableFirstQuiet = false;
- MACvSelectPage1(pDevice->PortOffset);
- MACvRegBitsOn(pDevice->PortOffset, MAC_REG_MSRCTL, (MSRCTL_QUIETTXCHK | MSRCTL_QUIETEN));
- MACvSelectPage0(pDevice->PortOffset);
- }
- }
- if (pDevice->bChannelSwitch &&
- (pDevice->op_mode == NL80211_IFTYPE_STATION)) {
- pDevice->byChannelSwitchCount--;
- if (pDevice->byChannelSwitchCount == 0) {
- pDevice->bChannelSwitch = false;
- set_channel(pDevice, pDevice->byNewChannel);
- VNTWIFIbChannelSwitch(pDevice->pMgmt, pDevice->byNewChannel);
- MACvSelectPage1(pDevice->PortOffset);
- MACvRegBitsOn(pDevice->PortOffset, MAC_REG_MSRCTL+1, MSRCTL1_TXPAUSE);
- MACvSelectPage0(pDevice->PortOffset);
- CARDbStartTxPacket(pDevice, PKT_TYPE_802_11_ALL);
-
- }
- }
- if (pDevice->op_mode != NL80211_IFTYPE_ADHOC) {
- if ((pDevice->bUpdateBBVGA) && pDevice->bLinkPass && (pDevice->uCurrRSSI != 0)) {
+ if (pDevice->vif &&
+ pDevice->op_mode != NL80211_IFTYPE_ADHOC) {
+ if (pDevice->bUpdateBBVGA &&
+ !(pDevice->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL) &&
+ pDevice->vif->bss_conf.assoc &&
+ pDevice->uCurrRSSI) {
long ldBm;
RFvRSSITodBm(pDevice, (unsigned char) pDevice->uCurrRSSI, &ldBm);
@@ -2384,10 +1147,11 @@ static irqreturn_t device_intr(int irq, void *dev_instance)
if (pDevice->bEnablePSMode)
PSbIsNextTBTTWakeUp((void *)pDevice);
- if ((pDevice->op_mode == NL80211_IFTYPE_AP) ||
- (pDevice->op_mode == NL80211_IFTYPE_ADHOC)) {
+ if ((pDevice->op_mode == NL80211_IFTYPE_AP ||
+ pDevice->op_mode == NL80211_IFTYPE_ADHOC) &&
+ pDevice->vif->bss_conf.enable_beacon) {
MACvOneShotTimer1MicroSec(pDevice->PortOffset,
- (pMgmt->wIBSSBeaconPeriod - MAKE_BEACON_RESERVED) << 10);
+ (pDevice->vif->bss_conf.beacon_int - MAKE_BEACON_RESERVED) << 10);
}
/* TODO: adhoc PS mode */
@@ -2400,34 +1164,7 @@ static irqreturn_t device_intr(int irq, void *dev_instance)
pDevice->cbBeaconBufReadySetCnt = 0;
}
- if (pDevice->op_mode == NL80211_IFTYPE_AP) {
- if (pMgmt->byDTIMCount > 0) {
- pMgmt->byDTIMCount--;
- pMgmt->sNodeDBTable[0].bRxPSPoll = false;
- } else {
- if (pMgmt->byDTIMCount == 0) {
- // check if mutltcast tx bufferring
- pMgmt->byDTIMCount = pMgmt->byDTIMPeriod - 1;
- pMgmt->sNodeDBTable[0].bRxPSPoll = true;
- bScheduleCommand((void *)pDevice, WLAN_CMD_RX_PSPOLL, NULL);
- }
- }
- }
pDevice->bBeaconSent = true;
-
- if (pDevice->bChannelSwitch) {
- pDevice->byChannelSwitchCount--;
- if (pDevice->byChannelSwitchCount == 0) {
- pDevice->bChannelSwitch = false;
- set_channel(pDevice, pDevice->byNewChannel);
- VNTWIFIbChannelSwitch(pDevice->pMgmt, pDevice->byNewChannel);
- MACvSelectPage1(pDevice->PortOffset);
- MACvRegBitsOn(pDevice->PortOffset, MAC_REG_MSRCTL+1, MSRCTL1_TXPAUSE);
- MACvSelectPage0(pDevice->PortOffset);
- CARDbStartTxPacket(pDevice, PKT_TYPE_802_11_ALL);
- }
- }
-
}
if (pDevice->dwIsr & ISR_RXDMA0)
@@ -2443,14 +1180,18 @@ static irqreturn_t device_intr(int irq, void *dev_instance)
max_count += device_tx_srv(pDevice, TYPE_AC0DMA);
if (pDevice->dwIsr & ISR_SOFTTIMER1) {
- if (pDevice->op_mode == NL80211_IFTYPE_AP) {
- if (pDevice->bShortSlotTime)
- pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_SHORTSLOTTIME(1);
- else
- pMgmt->wCurrCapInfo &= ~(WLAN_SET_CAP_INFO_SHORTSLOTTIME(1));
+ if (pDevice->vif) {
+ if (pDevice->vif->bss_conf.enable_beacon)
+ vnt_beacon_make(pDevice, pDevice->vif);
}
- bMgrPrepareBeaconToSend(pDevice, pMgmt);
- pDevice->byCntMeasure = 0;
+ }
+
+ /* If both buffers available wake the queue */
+ if (pDevice->vif) {
+ if (AVAIL_TD(pDevice, TYPE_TXDMA0) &&
+ AVAIL_TD(pDevice, TYPE_AC0DMA) &&
+ ieee80211_queue_stopped(pDevice->hw, 0))
+ ieee80211_wake_queues(pDevice->hw);
}
MACvReadISR(pDevice->PortOffset, &pDevice->dwIsr);
@@ -2472,550 +1213,655 @@ static irqreturn_t device_intr(int irq, void *dev_instance)
return IRQ_RETVAL(handled);
}
-//2008-8-4 <add> by chester
-static int Config_FileGetParameter(unsigned char *string,
- unsigned char *dest, unsigned char *source)
+static int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
{
- unsigned char buf1[100];
- int source_len = strlen(source);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ PSTxDesc head_td;
+ u32 dma_idx = TYPE_AC0DMA;
+ unsigned long flags;
- memset(buf1, 0, 100);
- strcat(buf1, string);
- strcat(buf1, "=");
- source += strlen(buf1);
+ spin_lock_irqsave(&priv->lock, flags);
- memcpy(dest, source, source_len - strlen(buf1));
- return true;
-}
+ if (!ieee80211_is_data(hdr->frame_control))
+ dma_idx = TYPE_TXDMA0;
-int Config_FileOperation(struct vnt_private *pDevice,
- bool fwrite, unsigned char *Parameter)
-{
- unsigned char *buffer = kmalloc(1024, GFP_KERNEL);
- unsigned char tmpbuffer[20];
- struct file *file;
- int result = 0;
-
- if (!buffer) {
- pr_err("allocate mem for file fail?\n");
- return -1;
- }
- file = filp_open(CONFIG_PATH, O_RDONLY, 0);
- if (IS_ERR(file)) {
- kfree(buffer);
- pr_err("Config_FileOperation:open file fail?\n");
- return -1;
+ if (AVAIL_TD(priv, dma_idx) < 1) {
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return -ENOMEM;
}
- if (kernel_read(file, 0, buffer, 1024) < 0) {
- pr_err("read file error?\n");
- result = -1;
- goto error1;
- }
+ head_td = priv->apCurrTD[dma_idx];
- if (Config_FileGetParameter("ZONETYPE", tmpbuffer, buffer) != true) {
- pr_err("get parameter error?\n");
- result = -1;
- goto error1;
- }
+ head_td->m_td1TD1.byTCR = (TCR_EDP|TCR_STP);
- if (memcmp(tmpbuffer, "USA", 3) == 0) {
- result = ZoneType_USA;
- } else if (memcmp(tmpbuffer, "JAPAN", 5) == 0) {
- result = ZoneType_Japan;
- } else if (memcmp(tmpbuffer, "EUROPE", 5) == 0) {
- result = ZoneType_Europe;
- } else {
- result = -1;
- pr_err("Unknown Zonetype[%s]?\n", tmpbuffer);
- }
+ head_td->pTDInfo->skb = skb;
+
+ priv->iTDUsed[dma_idx]++;
+
+ /* Take ownership */
+ wmb();
+ head_td->m_td0TD0.f1Owner = OWNED_BY_NIC;
+
+ /* get Next */
+ wmb();
+ priv->apCurrTD[dma_idx] = head_td->next;
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ vnt_generate_fifo_header(priv, dma_idx, head_td, skb);
+
+ if (MACbIsRegBitsOn(priv->PortOffset, MAC_REG_PSCTL, PSCTL_PS))
+ MACbPSWakeup(priv->PortOffset);
+
+ spin_lock_irqsave(&priv->lock, flags);
-error1:
- kfree(buffer);
- fput(file);
- return result;
+ priv->bPWBitOn = false;
+
+ head_td->pTDInfo->byFlags = TD_FLAGS_NETIF_SKB;
+
+ if (dma_idx == TYPE_AC0DMA)
+ MACvTransmitAC0(priv->PortOffset);
+ else
+ MACvTransmit0(priv->PortOffset);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
}
-static void device_set_multi(struct net_device *dev) {
- struct vnt_private *pDevice = netdev_priv(dev);
- PSMgmtObject pMgmt = pDevice->pMgmt;
- u32 mc_filter[2];
- struct netdev_hw_addr *ha;
+static void vnt_tx_80211(struct ieee80211_hw *hw,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
+{
+ struct vnt_private *priv = hw->priv;
- VNSvInPortB(pDevice->PortOffset + MAC_REG_RCR, &(pDevice->byRxMode));
+ ieee80211_stop_queues(hw);
- if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
- pr_notice("%s: Promiscuous mode enabled\n", dev->name);
- /* Unconditionally log net taps. */
- pDevice->byRxMode |= (RCR_MULTICAST|RCR_BROADCAST|RCR_UNICAST);
- } else if ((netdev_mc_count(dev) > pDevice->multicast_limit)
- || (dev->flags & IFF_ALLMULTI)) {
- MACvSelectPage1(pDevice->PortOffset);
- VNSvOutPortD(pDevice->PortOffset + MAC_REG_MAR0, 0xffffffff);
- VNSvOutPortD(pDevice->PortOffset + MAC_REG_MAR0 + 4, 0xffffffff);
- MACvSelectPage0(pDevice->PortOffset);
- pDevice->byRxMode |= (RCR_MULTICAST|RCR_BROADCAST);
- } else {
- memset(mc_filter, 0, sizeof(mc_filter));
- netdev_for_each_mc_addr(ha, dev) {
- int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
+ if (vnt_tx_packet(priv, skb)) {
+ ieee80211_free_txskb(hw, skb);
- mc_filter[bit_nr >> 5] |= cpu_to_le32(1 << (bit_nr & 31));
- }
- MACvSelectPage1(pDevice->PortOffset);
- VNSvOutPortD(pDevice->PortOffset + MAC_REG_MAR0, mc_filter[0]);
- VNSvOutPortD(pDevice->PortOffset + MAC_REG_MAR0 + 4, mc_filter[1]);
- MACvSelectPage0(pDevice->PortOffset);
- pDevice->byRxMode &= ~(RCR_UNICAST);
- pDevice->byRxMode |= (RCR_MULTICAST|RCR_BROADCAST);
+ ieee80211_wake_queues(hw);
}
+}
+
+static int vnt_start(struct ieee80211_hw *hw)
+{
+ struct vnt_private *priv = hw->priv;
+ int ret;
- if (pMgmt->eConfigMode == WMAC_CONFIG_AP) {
- // If AP mode, don't enable RCR_UNICAST. Since hw only compare addr1 with local mac.
- pDevice->byRxMode |= (RCR_MULTICAST|RCR_BROADCAST);
- pDevice->byRxMode &= ~(RCR_UNICAST);
+ priv->rx_buf_sz = PKT_BUF_SZ;
+ if (!device_init_rings(priv))
+ return -ENOMEM;
+
+ ret = request_irq(priv->pcid->irq, &device_intr,
+ IRQF_SHARED, "vt6655", priv);
+ if (ret) {
+ dev_dbg(&priv->pcid->dev, "failed to start irq\n");
+ return ret;
}
- VNSvOutPortB(pDevice->PortOffset + MAC_REG_RCR, pDevice->byRxMode);
- pr_debug("pDevice->byRxMode = %x\n", pDevice->byRxMode);
+ dev_dbg(&priv->pcid->dev, "call device init rd0 ring\n");
+ device_init_rd0_ring(priv);
+ device_init_rd1_ring(priv);
+ device_init_td0_ring(priv);
+ device_init_td1_ring(priv);
+
+ device_init_registers(priv);
+
+ dev_dbg(&priv->pcid->dev, "call MACvIntEnable\n");
+ MACvIntEnable(priv->PortOffset, IMR_MASK_VALUE);
+
+ ieee80211_wake_queues(hw);
+
+ return 0;
}
-static int device_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+static void vnt_stop(struct ieee80211_hw *hw)
{
- struct vnt_private *pDevice = netdev_priv(dev);
- struct iwreq *wrq = (struct iwreq *)rq;
- int rc = 0;
- PSMgmtObject pMgmt = pDevice->pMgmt;
- PSCmdRequest pReq;
-
- if (pMgmt == NULL) {
- rc = -EFAULT;
- return rc;
- }
+ struct vnt_private *priv = hw->priv;
- switch (cmd) {
- case SIOCGIWNAME:
- rc = iwctl_giwname(dev, NULL, (char *)&(wrq->u.name), NULL);
- break;
+ ieee80211_stop_queues(hw);
+
+ MACbShutdown(priv->PortOffset);
+ MACbSoftwareReset(priv->PortOffset);
+ CARDbRadioPowerOff(priv);
+
+ device_free_td0_ring(priv);
+ device_free_td1_ring(priv);
+ device_free_rd0_ring(priv);
+ device_free_rd1_ring(priv);
+ device_free_rings(priv);
+
+ free_irq(priv->pcid->irq, priv);
+}
+
+static int vnt_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct vnt_private *priv = hw->priv;
+
+ priv->vif = vif;
- case SIOCGIWNWID: //0x8b03 support
- rc = -EOPNOTSUPP;
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ if (priv->bDiversityRegCtlON)
+ device_init_diversity_timer(priv);
break;
+ case NL80211_IFTYPE_ADHOC:
+ MACvRegBitsOff(priv->PortOffset, MAC_REG_RCR, RCR_UNICAST);
+
+ MACvRegBitsOn(priv->PortOffset, MAC_REG_HOSTCR, HOSTCR_ADHOC);
- // Set frequency/channel
- case SIOCSIWFREQ:
- rc = iwctl_siwfreq(dev, NULL, &(wrq->u.freq), NULL);
break;
+ case NL80211_IFTYPE_AP:
+ MACvRegBitsOff(priv->PortOffset, MAC_REG_RCR, RCR_UNICAST);
+
+ MACvRegBitsOn(priv->PortOffset, MAC_REG_HOSTCR, HOSTCR_AP);
- // Get frequency/channel
- case SIOCGIWFREQ:
- rc = iwctl_giwfreq(dev, NULL, &(wrq->u.freq), NULL);
break;
+ default:
+ return -EOPNOTSUPP;
+ }
- // Set desired network name (ESSID)
- case SIOCSIWESSID:
+ priv->op_mode = vif->type;
- {
- char essid[IW_ESSID_MAX_SIZE+1];
+ return 0;
+}
- if (wrq->u.essid.length > IW_ESSID_MAX_SIZE) {
- rc = -E2BIG;
- break;
- }
- if (copy_from_user(essid, wrq->u.essid.pointer,
- wrq->u.essid.length)) {
- rc = -EFAULT;
- break;
+static void vnt_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct vnt_private *priv = hw->priv;
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ if (priv->bDiversityRegCtlON) {
+ del_timer(&priv->TimerSQ3Tmax1);
+ del_timer(&priv->TimerSQ3Tmax2);
+ del_timer(&priv->TimerSQ3Tmax3);
}
- rc = iwctl_siwessid(dev, NULL,
- &(wrq->u.essid), essid);
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ MACvRegBitsOff(priv->PortOffset, MAC_REG_TCR, TCR_AUTOBCNTX);
+ MACvRegBitsOff(priv->PortOffset,
+ MAC_REG_TFTCTL, TFTCTL_TSFCNTREN);
+ MACvRegBitsOff(priv->PortOffset, MAC_REG_HOSTCR, HOSTCR_ADHOC);
+ break;
+ case NL80211_IFTYPE_AP:
+ MACvRegBitsOff(priv->PortOffset, MAC_REG_TCR, TCR_AUTOBCNTX);
+ MACvRegBitsOff(priv->PortOffset,
+ MAC_REG_TFTCTL, TFTCTL_TSFCNTREN);
+ MACvRegBitsOff(priv->PortOffset, MAC_REG_HOSTCR, HOSTCR_AP);
+ break;
+ default:
+ break;
}
- break;
- // Get current network name (ESSID)
- case SIOCGIWESSID:
+ priv->op_mode = NL80211_IFTYPE_UNSPECIFIED;
+}
- {
- char essid[IW_ESSID_MAX_SIZE+1];
-
- if (wrq->u.essid.pointer)
- rc = iwctl_giwessid(dev, NULL,
- &(wrq->u.essid), essid);
- if (copy_to_user(wrq->u.essid.pointer,
- essid,
- wrq->u.essid.length))
- rc = -EFAULT;
+
+static int vnt_config(struct ieee80211_hw *hw, u32 changed)
+{
+ struct vnt_private *priv = hw->priv;
+ struct ieee80211_conf *conf = &hw->conf;
+ u8 bb_type;
+
+ if (changed & IEEE80211_CONF_CHANGE_PS) {
+ if (conf->flags & IEEE80211_CONF_PS)
+ PSvEnablePowerSaving(priv, conf->listen_interval);
+ else
+ PSvDisablePowerSaving(priv);
}
- break;
- case SIOCSIWAP:
+ if ((changed & IEEE80211_CONF_CHANGE_CHANNEL) ||
+ (conf->flags & IEEE80211_CONF_OFFCHANNEL)) {
+ set_channel(priv, conf->chandef.chan->hw_value);
- rc = iwctl_siwap(dev, NULL, &(wrq->u.ap_addr), NULL);
- break;
+ if (conf->chandef.chan->band == IEEE80211_BAND_5GHZ)
+ bb_type = BB_TYPE_11A;
+ else
+ bb_type = BB_TYPE_11G;
- // Get current Access Point (BSSID)
- case SIOCGIWAP:
- rc = iwctl_giwap(dev, NULL, &(wrq->u.ap_addr), NULL);
- break;
+ if (priv->byBBType != bb_type) {
+ priv->byBBType = bb_type;
- // Set desired station name
- case SIOCSIWNICKN:
- pr_debug(" SIOCSIWNICKN\n");
- rc = -EOPNOTSUPP;
- break;
+ CARDbSetPhyParameter(priv, priv->byBBType);
+ }
+ }
- // Get current station name
- case SIOCGIWNICKN:
- pr_debug(" SIOCGIWNICKN\n");
- rc = -EOPNOTSUPP;
- break;
+ if (changed & IEEE80211_CONF_CHANGE_POWER) {
+ if (priv->byBBType == BB_TYPE_11B)
+ priv->wCurrentRate = RATE_1M;
+ else
+ priv->wCurrentRate = RATE_54M;
- // Set the desired bit-rate
- case SIOCSIWRATE:
- rc = iwctl_siwrate(dev, NULL, &(wrq->u.bitrate), NULL);
- break;
+ RFbSetPower(priv, priv->wCurrentRate,
+ conf->chandef.chan->hw_value);
+ }
+
+ return 0;
+}
- // Get the current bit-rate
- case SIOCGIWRATE:
+static void vnt_bss_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, struct ieee80211_bss_conf *conf,
+ u32 changed)
+{
+ struct vnt_private *priv = hw->priv;
- rc = iwctl_giwrate(dev, NULL, &(wrq->u.bitrate), NULL);
- break;
+ priv->current_aid = conf->aid;
- // Set the desired RTS threshold
- case SIOCSIWRTS:
+ if (changed & BSS_CHANGED_BSSID)
+ MACvWriteBSSIDAddress(priv->PortOffset, (u8 *)conf->bssid);
- rc = iwctl_siwrts(dev, NULL, &(wrq->u.rts), NULL);
- break;
+ if (changed & BSS_CHANGED_BASIC_RATES) {
+ priv->basic_rates = conf->basic_rates;
- // Get the current RTS threshold
- case SIOCGIWRTS:
+ CARDvUpdateBasicTopRate(priv);
- rc = iwctl_giwrts(dev, NULL, &(wrq->u.rts), NULL);
- break;
+ dev_dbg(&priv->pcid->dev,
+ "basic rates %x\n", conf->basic_rates);
+ }
- // Set the desired fragmentation threshold
- case SIOCSIWFRAG:
+ if (changed & BSS_CHANGED_ERP_PREAMBLE) {
+ if (conf->use_short_preamble) {
+ MACvEnableBarkerPreambleMd(priv->PortOffset);
+ priv->byPreambleType = true;
+ } else {
+ MACvDisableBarkerPreambleMd(priv->PortOffset);
+ priv->byPreambleType = false;
+ }
+ }
- rc = iwctl_siwfrag(dev, NULL, &(wrq->u.frag), NULL);
- break;
+ if (changed & BSS_CHANGED_ERP_CTS_PROT) {
+ if (conf->use_cts_prot)
+ MACvEnableProtectMD(priv->PortOffset);
+ else
+ MACvDisableProtectMD(priv->PortOffset);
+ }
- // Get the current fragmentation threshold
- case SIOCGIWFRAG:
+ if (changed & BSS_CHANGED_ERP_SLOT) {
+ if (conf->use_short_slot)
+ priv->bShortSlotTime = true;
+ else
+ priv->bShortSlotTime = false;
- rc = iwctl_giwfrag(dev, NULL, &(wrq->u.frag), NULL);
- break;
+ CARDbSetPhyParameter(priv, priv->byBBType);
+ BBvSetVGAGainOffset(priv, priv->abyBBVGA[0]);
+ }
- // Set mode of operation
- case SIOCSIWMODE:
- rc = iwctl_siwmode(dev, NULL, &(wrq->u.mode), NULL);
- break;
+ if (changed & BSS_CHANGED_TXPOWER)
+ RFbSetPower(priv, priv->wCurrentRate,
+ conf->chandef.chan->hw_value);
- // Get mode of operation
- case SIOCGIWMODE:
- rc = iwctl_giwmode(dev, NULL, &(wrq->u.mode), NULL);
- break;
+ if (changed & BSS_CHANGED_BEACON_ENABLED) {
+ dev_dbg(&priv->pcid->dev,
+ "Beacon enable %d\n", conf->enable_beacon);
- // Set WEP keys and mode
- case SIOCSIWENCODE: {
- char abyKey[WLAN_WEP232_KEYLEN];
+ if (conf->enable_beacon) {
+ vnt_beacon_enable(priv, vif, conf);
- if (wrq->u.encoding.pointer) {
- if (wrq->u.encoding.length > WLAN_WEP232_KEYLEN) {
- rc = -E2BIG;
- break;
- }
- memset(abyKey, 0, WLAN_WEP232_KEYLEN);
- if (copy_from_user(abyKey,
- wrq->u.encoding.pointer,
- wrq->u.encoding.length)) {
- rc = -EFAULT;
- break;
- }
- } else if (wrq->u.encoding.length != 0) {
- rc = -EINVAL;
- break;
+ MACvRegBitsOn(priv, MAC_REG_TCR, TCR_AUTOBCNTX);
+ } else {
+ MACvRegBitsOff(priv, MAC_REG_TCR, TCR_AUTOBCNTX);
}
- rc = iwctl_siwencode(dev, NULL, &(wrq->u.encoding), abyKey);
}
- break;
- // Get the WEP keys and mode
- case SIOCGIWENCODE:
+ if (changed & BSS_CHANGED_ASSOC && priv->op_mode != NL80211_IFTYPE_AP) {
+ if (conf->assoc) {
+ CARDbUpdateTSF(priv, conf->beacon_rate->hw_value,
+ conf->sync_device_ts, conf->sync_tsf);
- if (!capable(CAP_NET_ADMIN)) {
- rc = -EPERM;
- break;
+ CARDbSetBeaconPeriod(priv, conf->beacon_int);
+
+ CARDvSetFirstNextTBTT(priv, conf->beacon_int);
+ } else {
+ VNSvOutPortB(priv->PortOffset + MAC_REG_TFTCTL,
+ TFTCTL_TSFCNTRST);
+ VNSvOutPortB(priv->PortOffset + MAC_REG_TFTCTL,
+ TFTCTL_TSFCNTREN);
}
- {
- char abyKey[WLAN_WEP232_KEYLEN];
+ }
+}
- rc = iwctl_giwencode(dev, NULL, &(wrq->u.encoding), abyKey);
- if (rc != 0)
- break;
- if (wrq->u.encoding.pointer) {
- if (copy_to_user(wrq->u.encoding.pointer,
- abyKey,
- wrq->u.encoding.length))
- rc = -EFAULT;
+static u64 vnt_prepare_multicast(struct ieee80211_hw *hw,
+ struct netdev_hw_addr_list *mc_list)
+{
+ struct vnt_private *priv = hw->priv;
+ struct netdev_hw_addr *ha;
+ u64 mc_filter = 0;
+ u32 bit_nr = 0;
+
+ netdev_hw_addr_list_for_each(ha, mc_list) {
+ bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
+
+ mc_filter |= 1ULL << (bit_nr & 0x3f);
+ }
+
+ priv->mc_list_count = mc_list->count;
+
+ return mc_filter;
+}
+
+static void vnt_configure(struct ieee80211_hw *hw,
+ unsigned int changed_flags, unsigned int *total_flags, u64 multicast)
+{
+ struct vnt_private *priv = hw->priv;
+ u8 rx_mode = 0;
+
+ *total_flags &= FIF_ALLMULTI | FIF_OTHER_BSS | FIF_PROMISC_IN_BSS |
+ FIF_BCN_PRBRESP_PROMISC;
+
+ VNSvInPortB(priv->PortOffset + MAC_REG_RCR, &rx_mode);
+
+ dev_dbg(&priv->pcid->dev, "rx mode in = %x\n", rx_mode);
+
+ if (changed_flags & FIF_PROMISC_IN_BSS) {
+ /* unconditionally log net taps */
+ if (*total_flags & FIF_PROMISC_IN_BSS)
+ rx_mode |= RCR_UNICAST;
+ else
+ rx_mode &= ~RCR_UNICAST;
+ }
+
+ if (changed_flags & FIF_ALLMULTI) {
+ if (*total_flags & FIF_ALLMULTI) {
+ if (priv->mc_list_count > 2) {
+ MACvSelectPage1(priv->PortOffset);
+
+ VNSvOutPortD(priv->PortOffset +
+ MAC_REG_MAR0, 0xffffffff);
+ VNSvOutPortD(priv->PortOffset +
+ MAC_REG_MAR0 + 4, 0xffffffff);
+
+ MACvSelectPage0(priv->PortOffset);
+ } else {
+ MACvSelectPage1(priv->PortOffset);
+
+ VNSvOutPortD(priv->PortOffset +
+ MAC_REG_MAR0, (u32)multicast);
+ VNSvOutPortD(priv->PortOffset +
+ MAC_REG_MAR0 + 4,
+ (u32)(multicast >> 32));
+
+ MACvSelectPage0(priv->PortOffset);
}
+
+ rx_mode |= RCR_MULTICAST | RCR_BROADCAST;
+ } else {
+ rx_mode &= ~(RCR_MULTICAST | RCR_BROADCAST);
}
- break;
+ }
- // Get the current Tx-Power
- case SIOCGIWTXPOW:
- pr_debug(" SIOCGIWTXPOW\n");
- rc = -EOPNOTSUPP;
- break;
+ if (changed_flags & (FIF_OTHER_BSS | FIF_BCN_PRBRESP_PROMISC)) {
+ rx_mode |= RCR_MULTICAST | RCR_BROADCAST;
- case SIOCSIWTXPOW:
- pr_debug(" SIOCSIWTXPOW\n");
- rc = -EOPNOTSUPP;
- break;
+ if (*total_flags & (FIF_OTHER_BSS | FIF_BCN_PRBRESP_PROMISC))
+ rx_mode &= ~RCR_BSSID;
+ else
+ rx_mode |= RCR_BSSID;
+ }
- case SIOCSIWRETRY:
+ VNSvOutPortB(priv->PortOffset + MAC_REG_RCR, rx_mode);
- rc = iwctl_siwretry(dev, NULL, &(wrq->u.retry), NULL);
- break;
+ dev_dbg(&priv->pcid->dev, "rx mode out= %x\n", rx_mode);
+}
- case SIOCGIWRETRY:
+static int vnt_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct vnt_private *priv = hw->priv;
- rc = iwctl_giwretry(dev, NULL, &(wrq->u.retry), NULL);
+ switch (cmd) {
+ case SET_KEY:
+ if (vnt_set_keys(hw, sta, vif, key))
+ return -EOPNOTSUPP;
break;
+ case DISABLE_KEY:
+ if (test_bit(key->hw_key_idx, &priv->key_entry_inuse))
+ clear_bit(key->hw_key_idx, &priv->key_entry_inuse);
+ default:
+ break;
+ }
- // Get range of parameters
- case SIOCGIWRANGE:
+ return 0;
+}
- {
- struct iw_range range;
+static u64 vnt_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct vnt_private *priv = hw->priv;
+ u64 tsf;
- rc = iwctl_giwrange(dev, NULL, &(wrq->u.data), (char *)&range);
- if (copy_to_user(wrq->u.data.pointer, &range, sizeof(struct iw_range)))
- rc = -EFAULT;
- }
+ CARDbGetCurrentTSF(priv, &tsf);
- break;
+ return tsf;
+}
- case SIOCGIWPOWER:
+static void vnt_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u64 tsf)
+{
+ struct vnt_private *priv = hw->priv;
- rc = iwctl_giwpower(dev, NULL, &(wrq->u.power), NULL);
- break;
+ CARDvUpdateNextTBTT(priv, tsf, vif->bss_conf.beacon_int);
+}
- case SIOCSIWPOWER:
+static void vnt_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct vnt_private *priv = hw->priv;
- rc = iwctl_siwpower(dev, NULL, &(wrq->u.power), NULL);
- break;
+ /* reset TSF counter */
+ VNSvOutPortB(priv->PortOffset + MAC_REG_TFTCTL, TFTCTL_TSFCNTRST);
+}
- case SIOCGIWSENS:
+static const struct ieee80211_ops vnt_mac_ops = {
+ .tx = vnt_tx_80211,
+ .start = vnt_start,
+ .stop = vnt_stop,
+ .add_interface = vnt_add_interface,
+ .remove_interface = vnt_remove_interface,
+ .config = vnt_config,
+ .bss_info_changed = vnt_bss_info_changed,
+ .prepare_multicast = vnt_prepare_multicast,
+ .configure_filter = vnt_configure,
+ .set_key = vnt_set_key,
+ .get_tsf = vnt_get_tsf,
+ .set_tsf = vnt_set_tsf,
+ .reset_tsf = vnt_reset_tsf,
+};
- rc = iwctl_giwsens(dev, NULL, &(wrq->u.sens), NULL);
- break;
+int vnt_init(struct vnt_private *priv)
+{
+ SET_IEEE80211_PERM_ADDR(priv->hw, priv->abyCurrentNetAddr);
- case SIOCSIWSENS:
- pr_debug(" SIOCSIWSENS\n");
- rc = -EOPNOTSUPP;
- break;
+ vnt_init_bands(priv);
- case SIOCGIWAPLIST: {
- char buffer[IW_MAX_AP * (sizeof(struct sockaddr) + sizeof(struct iw_quality))];
-
- if (wrq->u.data.pointer) {
- rc = iwctl_giwaplist(dev, NULL, &(wrq->u.data), buffer);
- if (rc == 0) {
- if (copy_to_user(wrq->u.data.pointer,
- buffer,
- (wrq->u.data.length * (sizeof(struct sockaddr) + sizeof(struct iw_quality)))
- ))
- rc = -EFAULT;
- }
- }
- }
- break;
+ if (ieee80211_register_hw(priv->hw))
+ return -ENODEV;
-#ifdef WIRELESS_SPY
- // Set the spy list
- case SIOCSIWSPY:
+ priv->mac_hw = true;
- pr_debug(" SIOCSIWSPY\n");
- rc = -EOPNOTSUPP;
- break;
+ CARDbRadioPowerOff(priv);
- // Get the spy list
- case SIOCGIWSPY:
+ return 0;
+}
- pr_debug(" SIOCGIWSPY\n");
- rc = -EOPNOTSUPP;
- break;
+static int
+vt6655_probe(struct pci_dev *pcid, const struct pci_device_id *ent)
+{
+ PCHIP_INFO pChip_info = (PCHIP_INFO)ent->driver_data;
+ struct vnt_private *priv;
+ struct ieee80211_hw *hw;
+ struct wiphy *wiphy;
+ int rc;
-#endif // WIRELESS_SPY
+ dev_notice(&pcid->dev,
+ "%s Ver. %s\n", DEVICE_FULL_DRV_NAM, DEVICE_VERSION);
- case SIOCGIWPRIV:
- pr_debug(" SIOCGIWPRIV\n");
- rc = -EOPNOTSUPP;
- break;
+ dev_notice(&pcid->dev,
+ "Copyright (c) 2003 VIA Networking Technologies, Inc.\n");
-//2008-0409-07, <Add> by Einsn Liu
-#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
- case SIOCSIWAUTH:
- pr_debug(" SIOCSIWAUTH\n");
- rc = iwctl_siwauth(dev, NULL, &(wrq->u.param), NULL);
- break;
+ hw = ieee80211_alloc_hw(sizeof(*priv), &vnt_mac_ops);
+ if (!hw) {
+ dev_err(&pcid->dev, "could not register ieee80211_hw\n");
+ return -ENOMEM;
+ }
- case SIOCGIWAUTH:
- pr_debug(" SIOCGIWAUTH\n");
- rc = iwctl_giwauth(dev, NULL, &(wrq->u.param), NULL);
- break;
+ priv = hw->priv;
- case SIOCSIWGENIE:
- pr_debug(" SIOCSIWGENIE\n");
- rc = iwctl_siwgenie(dev, NULL, &(wrq->u.data), wrq->u.data.pointer);
- break;
+ vt6655_init_info(pcid, &priv, pChip_info);
- case SIOCGIWGENIE:
- pr_debug(" SIOCGIWGENIE\n");
- rc = iwctl_giwgenie(dev, NULL, &(wrq->u.data), wrq->u.data.pointer);
- break;
+ priv->hw = hw;
- case SIOCSIWENCODEEXT: {
- char extra[sizeof(struct iw_encode_ext)+MAX_KEY_LEN+1];
+ SET_IEEE80211_DEV(priv->hw, &pcid->dev);
- pr_debug(" SIOCSIWENCODEEXT\n");
- if (wrq->u.encoding.pointer) {
- memset(extra, 0, sizeof(struct iw_encode_ext)+MAX_KEY_LEN + 1);
- if (wrq->u.encoding.length > (sizeof(struct iw_encode_ext) + MAX_KEY_LEN)) {
- rc = -E2BIG;
- break;
- }
- if (copy_from_user(extra, wrq->u.encoding.pointer, wrq->u.encoding.length)) {
- rc = -EFAULT;
- break;
- }
- } else if (wrq->u.encoding.length != 0) {
- rc = -EINVAL;
- break;
- }
- rc = iwctl_siwencodeext(dev, NULL, &(wrq->u.encoding), extra);
+ if (pci_enable_device(pcid)) {
+ device_free_info(priv);
+ return -ENODEV;
}
- break;
- case SIOCGIWENCODEEXT:
- pr_debug(" SIOCGIWENCODEEXT\n");
- rc = iwctl_giwencodeext(dev, NULL, &(wrq->u.encoding), NULL);
- break;
+ dev_dbg(&pcid->dev,
+ "Before get pci_info memaddr is %x\n", priv->memaddr);
- case SIOCSIWMLME:
- pr_debug(" SIOCSIWMLME\n");
- rc = iwctl_siwmlme(dev, NULL, &(wrq->u.data), wrq->u.data.pointer);
- break;
+ if (!device_get_pci_info(priv, pcid)) {
+ dev_err(&pcid->dev, ": Failed to find PCI device.\n");
+ device_free_info(priv);
+ return -ENODEV;
+ }
-#endif // #ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
-//End Add -- //2008-0409-07, <Add> by Einsn Liu
+#ifdef DEBUG
+ dev_dbg(&pcid->dev,
+ "after get pci_info memaddr is %x, io addr is %x,io_size is %d\n",
+ priv->memaddr, priv->ioaddr, priv->io_size);
+ {
+ int i;
+ u32 bar, len;
+ u32 address[] = {
+ PCI_BASE_ADDRESS_0,
+ PCI_BASE_ADDRESS_1,
+ PCI_BASE_ADDRESS_2,
+ PCI_BASE_ADDRESS_3,
+ PCI_BASE_ADDRESS_4,
+ PCI_BASE_ADDRESS_5,
+ 0};
+ for (i = 0; address[i]; i++) {
+ pci_read_config_dword(pcid, address[i], &bar);
- case IOCTL_CMD_TEST:
+ dev_dbg(&pcid->dev, "bar %d is %x\n", i, bar);
- if (!(pDevice->flags & DEVICE_FLAGS_OPENED)) {
- rc = -EFAULT;
- break;
- } else {
- rc = 0;
- }
- pReq = (PSCmdRequest)rq;
- pReq->wResult = MAGIC_CODE;
- break;
+ if (!bar) {
+ dev_dbg(&pcid->dev,
+ "bar %d not implemented\n", i);
+ continue;
+ }
- case IOCTL_CMD_SET:
+ if (bar & PCI_BASE_ADDRESS_SPACE_IO) {
+ /* This is IO */
-#ifdef SndEvt_ToAPI
- if ((((PSCmdRequest)rq)->wCmdCode != WLAN_CMD_SET_EVT) &&
- !(pDevice->flags & DEVICE_FLAGS_OPENED))
-#else
- if (!(pDevice->flags & DEVICE_FLAGS_OPENED) &&
- (((PSCmdRequest)rq)->wCmdCode != WLAN_CMD_SET_WPA))
-#endif
- {
- rc = -EFAULT;
- break;
+ len = bar & (PCI_BASE_ADDRESS_IO_MASK & 0xffff);
+ len = len & ~(len - 1);
+
+ dev_dbg(&pcid->dev,
+ "IO space: len in IO %x, BAR %d\n",
+ len, i);
} else {
- rc = 0;
+ len = bar & 0xfffffff0;
+ len = ~len + 1;
+
+ dev_dbg(&pcid->dev,
+ "len in MEM %x, BAR %d\n", len, i);
}
+ }
+ }
+#endif
- if (test_and_set_bit(0, (void *)&(pMgmt->uCmdBusy)))
- return -EBUSY;
+ priv->PortOffset = ioremap(priv->memaddr & PCI_BASE_ADDRESS_MEM_MASK,
+ priv->io_size);
+ if (!priv->PortOffset) {
+ dev_err(&pcid->dev, ": Failed to IO remapping ..\n");
+ device_free_info(priv);
+ return -ENODEV;
+ }
- rc = private_ioctl(pDevice, rq);
- clear_bit(0, (void *)&(pMgmt->uCmdBusy));
- break;
+ rc = pci_request_regions(pcid, DEVICE_NAME);
+ if (rc) {
+ dev_err(&pcid->dev, ": Failed to find PCI device\n");
+ device_free_info(priv);
+ return -ENODEV;
+ }
- case IOCTL_CMD_HOSTAPD:
+ /* do reset */
+ if (!MACbSoftwareReset(priv->PortOffset)) {
+ dev_err(&pcid->dev, ": Failed to access MAC hardware..\n");
+ device_free_info(priv);
+ return -ENODEV;
+ }
+ /* initial to reload eeprom */
+ MACvInitialize(priv->PortOffset);
+ MACvReadEtherAddress(priv->PortOffset, priv->abyCurrentNetAddr);
- rc = vt6655_hostap_ioctl(pDevice, &wrq->u.data);
- break;
+ device_get_options(priv);
+ device_set_options(priv);
+ /* Mask out the options cannot be set to the chip */
+ priv->sOpts.flags &= pChip_info->flags;
- case IOCTL_CMD_WPA:
+ /* Enable the chip specified capabilities */
+ priv->flags = priv->sOpts.flags | (pChip_info->flags & 0xff000000UL);
- rc = wpa_ioctl(pDevice, &wrq->u.data);
- break;
+ wiphy = priv->hw->wiphy;
- case SIOCETHTOOL:
- return ethtool_ioctl(dev, rq->ifr_data);
- // All other calls are currently unsupported
+ wiphy->frag_threshold = FRAG_THRESH_DEF;
+ wiphy->rts_threshold = RTS_THRESH_DEF;
+ wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP);
- default:
- rc = -EOPNOTSUPP;
- pr_debug("Ioctl command not support..%x\n", cmd);
+ priv->hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
+ IEEE80211_HW_REPORTS_TX_ACK_STATUS |
+ IEEE80211_HW_SIGNAL_DBM |
+ IEEE80211_HW_TIMING_BEACON_ONLY;
- }
+ priv->hw->max_signal = 100;
- if (pDevice->bCommit) {
- if (pMgmt->eConfigMode == WMAC_CONFIG_AP) {
- netif_stop_queue(pDevice->dev);
- spin_lock_irq(&pDevice->lock);
- bScheduleCommand((void *)pDevice, WLAN_CMD_RUN_AP, NULL);
- spin_unlock_irq(&pDevice->lock);
- } else {
- pr_debug("Commit the settings\n");
- spin_lock_irq(&pDevice->lock);
- pDevice->bLinkPass = false;
- memset(pMgmt->abyCurrBSSID, 0, 6);
- pMgmt->eCurrState = WMAC_STATE_IDLE;
- netif_stop_queue(pDevice->dev);
-#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
- pMgmt->eScanType = WMAC_SCAN_ACTIVE;
- if (!pDevice->bWPASuppWextEnabled)
-#endif
- bScheduleCommand((void *)pDevice, WLAN_CMD_BSSID_SCAN, pMgmt->abyDesireSSID);
- bScheduleCommand((void *)pDevice, WLAN_CMD_SSID, NULL);
- spin_unlock_irq(&pDevice->lock);
- }
- pDevice->bCommit = false;
- }
+ if (vnt_init(priv))
+ return -ENODEV;
- return rc;
+ device_print_info(priv);
+ pci_set_drvdata(pcid, priv);
+
+ return 0;
}
-static int ethtool_ioctl(struct net_device *dev, void __user *useraddr)
+/*------------------------------------------------------------------*/
+
+#ifdef CONFIG_PM
+static int vt6655_suspend(struct pci_dev *pcid, pm_message_t state)
{
- u32 ethcmd;
+ struct vnt_private *priv = pci_get_drvdata(pcid);
+ unsigned long flags;
- if (copy_from_user(&ethcmd, useraddr, sizeof(ethcmd)))
- return -EFAULT;
+ spin_lock_irqsave(&priv->lock, flags);
- switch (ethcmd) {
- case ETHTOOL_GDRVINFO: {
- struct ethtool_drvinfo info = {ETHTOOL_GDRVINFO};
+ pci_save_state(pcid);
- strncpy(info.driver, DEVICE_NAME, sizeof(info.driver)-1);
- strncpy(info.version, DEVICE_VERSION, sizeof(info.version)-1);
- if (copy_to_user(useraddr, &info, sizeof(info)))
- return -EFAULT;
- return 0;
- }
+ MACbShutdown(priv->PortOffset);
- }
+ pci_disable_device(pcid);
+ pci_set_power_state(pcid, pci_choose_state(pcid, state));
+
+ spin_unlock_irqrestore(&priv->lock, flags);
- return -EOPNOTSUPP;
+ return 0;
}
-/*------------------------------------------------------------------*/
+static int vt6655_resume(struct pci_dev *pcid)
+{
+
+ pci_set_power_state(pcid, PCI_D0);
+ pci_enable_wake(pcid, PCI_D0, 0);
+ pci_restore_state(pcid);
+
+ return 0;
+}
+#endif
MODULE_DEVICE_TABLE(pci, vt6655_pci_id_table);
@@ -3025,8 +1871,8 @@ static struct pci_driver device_driver = {
.probe = vt6655_probe,
.remove = vt6655_remove,
#ifdef CONFIG_PM
- .suspend = viawget_suspend,
- .resume = viawget_resume,
+ .suspend = vt6655_suspend,
+ .resume = vt6655_resume,
#endif
};
@@ -3067,75 +1913,10 @@ device_notify_reboot(struct notifier_block *nb, unsigned long event, void *p)
for_each_pci_dev(pdev) {
if (pci_dev_driver(pdev) == &device_driver) {
if (pci_get_drvdata(pdev))
- viawget_suspend(pdev, PMSG_HIBERNATE);
+ vt6655_suspend(pdev, PMSG_HIBERNATE);
}
}
}
return NOTIFY_DONE;
}
-
-static int
-viawget_suspend(struct pci_dev *pcid, pm_message_t state)
-{
- int power_status; // to silence the compiler
-
- struct vnt_private *pDevice = pci_get_drvdata(pcid);
- PSMgmtObject pMgmt = pDevice->pMgmt;
-
- netif_stop_queue(pDevice->dev);
- spin_lock_irq(&pDevice->lock);
- pci_save_state(pcid);
- del_timer(&pDevice->sTimerCommand);
- del_timer(&pMgmt->sTimerSecondCallback);
- pDevice->cbFreeCmdQueue = CMD_Q_SIZE;
- pDevice->uCmdDequeueIdx = 0;
- pDevice->uCmdEnqueueIdx = 0;
- pDevice->bCmdRunning = false;
- MACbShutdown(pDevice->PortOffset);
- MACvSaveContext(pDevice->PortOffset, pDevice->abyMacContext);
- pDevice->bLinkPass = false;
- memset(pMgmt->abyCurrBSSID, 0, 6);
- pMgmt->eCurrState = WMAC_STATE_IDLE;
- pci_disable_device(pcid);
- power_status = pci_set_power_state(pcid, pci_choose_state(pcid, state));
- spin_unlock_irq(&pDevice->lock);
- return 0;
-}
-
-static int
-viawget_resume(struct pci_dev *pcid)
-{
- struct vnt_private *pDevice = pci_get_drvdata(pcid);
- PSMgmtObject pMgmt = pDevice->pMgmt;
- int power_status; // to silence the compiler
-
- power_status = pci_set_power_state(pcid, PCI_D0);
- power_status = pci_enable_wake(pcid, PCI_D0, 0);
- pci_restore_state(pcid);
- if (netif_running(pDevice->dev)) {
- spin_lock_irq(&pDevice->lock);
- MACvRestoreContext(pDevice->PortOffset, pDevice->abyMacContext);
- device_init_registers(pDevice);
- if (pMgmt->sNodeDBTable[0].bActive) { // Assoc with BSS
- pMgmt->sNodeDBTable[0].bActive = false;
- pDevice->bLinkPass = false;
- if (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) {
- // In Adhoc, BSS state set back to started.
- pMgmt->eCurrState = WMAC_STATE_STARTED;
- } else {
- pMgmt->eCurrMode = WMAC_MODE_STANDBY;
- pMgmt->eCurrState = WMAC_STATE_IDLE;
- }
- }
- init_timer(&pMgmt->sTimerSecondCallback);
- init_timer(&pDevice->sTimerCommand);
- MACvIntEnable(pDevice->PortOffset, IMR_MASK_VALUE);
- BSSvClearBSSList((void *)pDevice, pDevice->bLinkPass);
- bScheduleCommand((void *)pDevice, WLAN_CMD_BSSID_SCAN, NULL);
- bScheduleCommand((void *)pDevice, WLAN_CMD_SSID, NULL);
- spin_unlock_irq(&pDevice->lock);
- }
- return 0;
-}
-
#endif
diff --git a/drivers/staging/vt6655/dpc.c b/drivers/staging/vt6655/dpc.c
index 8515b8c8080..977683cb739 100644
--- a/drivers/staging/vt6655/dpc.c
+++ b/drivers/staging/vt6655/dpc.c
@@ -25,1289 +25,135 @@
* Date: May 20, 2003
*
* Functions:
- * device_receive_frame - Rcv 802.11 frame function
- * s_bAPModeRxCtl- AP Rcv frame filer Ctl.
- * s_bAPModeRxData- AP Rcv data frame handle
- * s_bHandleRxEncryption- Rcv decrypted data via on-fly
- * s_bHostWepRxEncryption- Rcv encrypted data via host
- * s_byGetRateIdx- get rate index
- * s_vGetDASA- get data offset
- * s_vProcessRxMACHeader- Rcv 802.11 and translate to 802.3
*
* Revision History:
*
*/
#include "device.h"
-#include "rxtx.h"
-#include "tether.h"
-#include "card.h"
-#include "bssdb.h"
-#include "mac.h"
#include "baseband.h"
-#include "michael.h"
-#include "tkip.h"
-#include "tcrc.h"
-#include "wctl.h"
-#include "wroute.h"
-#include "hostap.h"
#include "rf.h"
-#include "iowpa.h"
-#include "aes_ccmp.h"
#include "dpc.h"
-/*--------------------- Static Definitions -------------------------*/
-
-/*--------------------- Static Classes ----------------------------*/
-
-/*--------------------- Static Variables --------------------------*/
-static const unsigned char acbyRxRate[MAX_RATE] =
-{2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108};
-
-/*--------------------- Static Functions --------------------------*/
-
-/*--------------------- Static Definitions -------------------------*/
-
-/*--------------------- Static Functions --------------------------*/
-
-static unsigned char s_byGetRateIdx(unsigned char byRate);
-
-static void
-s_vGetDASA(unsigned char *pbyRxBufferAddr, unsigned int *pcbHeaderSize,
- PSEthernetHeader psEthHeader);
-
-static void
-s_vProcessRxMACHeader(struct vnt_private *pDevice, unsigned char *pbyRxBufferAddr,
- unsigned int cbPacketSize, bool bIsWEP, bool bExtIV,
- unsigned int *pcbHeadSize);
-
-static bool s_bAPModeRxCtl(
- struct vnt_private *pDevice,
- unsigned char *pbyFrame,
- int iSANodeIndex
-);
-
-static bool s_bAPModeRxData(
- struct vnt_private *pDevice,
- struct sk_buff *skb,
- unsigned int FrameSize,
- unsigned int cbHeaderOffset,
- int iSANodeIndex,
- int iDANodeIndex
-);
-
-static bool s_bHandleRxEncryption(
- struct vnt_private *pDevice,
- unsigned char *pbyFrame,
- unsigned int FrameSize,
- unsigned char *pbyRsr,
- unsigned char *pbyNewRsr,
- PSKeyItem *pKeyOut,
- bool *pbExtIV,
- unsigned short *pwRxTSC15_0,
- unsigned long *pdwRxTSC47_16
-);
-
-static bool s_bHostWepRxEncryption(
-
- struct vnt_private *pDevice,
- unsigned char *pbyFrame,
- unsigned int FrameSize,
- unsigned char *pbyRsr,
- bool bOnFly,
- PSKeyItem pKey,
- unsigned char *pbyNewRsr,
- bool *pbExtIV,
- unsigned short *pwRxTSC15_0,
- unsigned long *pdwRxTSC47_16
-
-);
-
-/*--------------------- Export Variables --------------------------*/
-
-/*+
- *
- * Description:
- * Translate Rcv 802.11 header to 802.3 header with Rx buffer
- *
- * Parameters:
- * In:
- * pDevice
- * dwRxBufferAddr - Address of Rcv Buffer
- * cbPacketSize - Rcv Packet size
- * bIsWEP - If Rcv with WEP
- * Out:
- * pcbHeaderSize - 802.11 header size
- *
- * Return Value: None
- *
- -*/
-static void
-s_vProcessRxMACHeader(struct vnt_private *pDevice,
- unsigned char *pbyRxBufferAddr,
- unsigned int cbPacketSize, bool bIsWEP, bool bExtIV,
- unsigned int *pcbHeadSize)
+static bool vnt_rx_data(struct vnt_private *priv, struct sk_buff *skb,
+ u16 bytes_received)
{
- unsigned char *pbyRxBuffer;
- unsigned int cbHeaderSize = 0;
- unsigned short *pwType;
- PS802_11Header pMACHeader;
- int ii;
-
- pMACHeader = (PS802_11Header) (pbyRxBufferAddr + cbHeaderSize);
-
- s_vGetDASA((unsigned char *)pMACHeader, &cbHeaderSize, &pDevice->sRxEthHeader);
-
- if (bIsWEP) {
- if (bExtIV) {
- // strip IV&ExtIV , add 8 byte
- cbHeaderSize += (WLAN_HDR_ADDR3_LEN + 8);
- } else {
- // strip IV , add 4 byte
- cbHeaderSize += (WLAN_HDR_ADDR3_LEN + 4);
- }
- } else {
- cbHeaderSize += WLAN_HDR_ADDR3_LEN;
- }
-
- pbyRxBuffer = (unsigned char *)(pbyRxBufferAddr + cbHeaderSize);
- if (ether_addr_equal(pbyRxBuffer, pDevice->abySNAP_Bridgetunnel)) {
- cbHeaderSize += 6;
- } else if (ether_addr_equal(pbyRxBuffer, pDevice->abySNAP_RFC1042)) {
- cbHeaderSize += 6;
- pwType = (unsigned short *)(pbyRxBufferAddr + cbHeaderSize);
- if ((*pwType != TYPE_PKT_IPX) && (*pwType != cpu_to_le16(0xF380))) {
- } else {
- cbHeaderSize -= 8;
- pwType = (unsigned short *)(pbyRxBufferAddr + cbHeaderSize);
- if (bIsWEP) {
- if (bExtIV)
- *pwType = htons(cbPacketSize - WLAN_HDR_ADDR3_LEN - 8); // 8 is IV&ExtIV
- else
- *pwType = htons(cbPacketSize - WLAN_HDR_ADDR3_LEN - 4); // 4 is IV
-
- } else {
- *pwType = htons(cbPacketSize - WLAN_HDR_ADDR3_LEN);
- }
- }
- } else {
- cbHeaderSize -= 2;
- pwType = (unsigned short *)(pbyRxBufferAddr + cbHeaderSize);
- if (bIsWEP) {
- if (bExtIV)
- *pwType = htons(cbPacketSize - WLAN_HDR_ADDR3_LEN - 8); // 8 is IV&ExtIV
- else
- *pwType = htons(cbPacketSize - WLAN_HDR_ADDR3_LEN - 4); // 4 is IV
-
- } else {
- *pwType = htons(cbPacketSize - WLAN_HDR_ADDR3_LEN);
- }
- }
-
- cbHeaderSize -= (ETH_ALEN * 2);
- pbyRxBuffer = (unsigned char *)(pbyRxBufferAddr + cbHeaderSize);
- for (ii = 0; ii < ETH_ALEN; ii++)
- *pbyRxBuffer++ = pDevice->sRxEthHeader.abyDstAddr[ii];
- for (ii = 0; ii < ETH_ALEN; ii++)
- *pbyRxBuffer++ = pDevice->sRxEthHeader.abySrcAddr[ii];
-
- *pcbHeadSize = cbHeaderSize;
-}
-
-static unsigned char s_byGetRateIdx(unsigned char byRate)
-{
- unsigned char byRateIdx;
-
- for (byRateIdx = 0; byRateIdx < MAX_RATE; byRateIdx++) {
- if (acbyRxRate[byRateIdx % MAX_RATE] == byRate)
- return byRateIdx;
- }
-
- return 0;
-}
-
-static void
-s_vGetDASA(unsigned char *pbyRxBufferAddr, unsigned int *pcbHeaderSize,
- PSEthernetHeader psEthHeader)
-{
- unsigned int cbHeaderSize = 0;
- PS802_11Header pMACHeader;
- int ii;
-
- pMACHeader = (PS802_11Header) (pbyRxBufferAddr + cbHeaderSize);
-
- if ((pMACHeader->wFrameCtl & FC_TODS) == 0) {
- if (pMACHeader->wFrameCtl & FC_FROMDS) {
- for (ii = 0; ii < ETH_ALEN; ii++) {
- psEthHeader->abyDstAddr[ii] = pMACHeader->abyAddr1[ii];
- psEthHeader->abySrcAddr[ii] = pMACHeader->abyAddr3[ii];
- }
- } else {
- // IBSS mode
- for (ii = 0; ii < ETH_ALEN; ii++) {
- psEthHeader->abyDstAddr[ii] = pMACHeader->abyAddr1[ii];
- psEthHeader->abySrcAddr[ii] = pMACHeader->abyAddr2[ii];
- }
- }
- } else {
- // Is AP mode..
- if (pMACHeader->wFrameCtl & FC_FROMDS) {
- for (ii = 0; ii < ETH_ALEN; ii++) {
- psEthHeader->abyDstAddr[ii] = pMACHeader->abyAddr3[ii];
- psEthHeader->abySrcAddr[ii] = pMACHeader->abyAddr4[ii];
- cbHeaderSize += 6;
- }
- } else {
- for (ii = 0; ii < ETH_ALEN; ii++) {
- psEthHeader->abyDstAddr[ii] = pMACHeader->abyAddr3[ii];
- psEthHeader->abySrcAddr[ii] = pMACHeader->abyAddr2[ii];
- }
- }
- }
- *pcbHeaderSize = cbHeaderSize;
-}
-
-bool
-device_receive_frame(
- struct vnt_private *pDevice,
- PSRxDesc pCurrRD
-)
-{
- PDEVICE_RD_INFO pRDInfo = pCurrRD->pRDInfo;
- struct net_device_stats *pStats = &pDevice->dev->stats;
- struct sk_buff *skb;
- PSMgmtObject pMgmt = pDevice->pMgmt;
- PSRxMgmtPacket pRxPacket = &(pDevice->pMgmt->sRxPacket);
- PS802_11Header p802_11Header;
- unsigned char *pbyRsr;
- unsigned char *pbyNewRsr;
- unsigned char *pbyRSSI;
- __le64 *pqwTSFTime;
- unsigned short *pwFrameSize;
- unsigned char *pbyFrame;
- bool bDeFragRx = false;
- bool bIsWEP = false;
- unsigned int cbHeaderOffset;
- unsigned int FrameSize;
- unsigned short wEtherType = 0;
- int iSANodeIndex = -1;
- int iDANodeIndex = -1;
- unsigned int ii;
- unsigned int cbIVOffset;
- bool bExtIV = false;
- unsigned char *pbyRxSts;
- unsigned char *pbyRxRate;
- unsigned char *pbySQ;
- unsigned int cbHeaderSize;
- PSKeyItem pKey = NULL;
- unsigned short wRxTSC15_0 = 0;
- unsigned long dwRxTSC47_16 = 0;
- SKeyItem STempKey;
- // 802.11h RPI
- unsigned long dwDuration = 0;
- long ldBm = 0;
- long ldBmThreshold = 0;
- PS802_11Header pMACHeader;
- bool bRxeapol_key = false;
-
- skb = pRDInfo->skb;
-
- pci_unmap_single(pDevice->pcid, pRDInfo->skb_dma,
- pDevice->rx_buf_sz, PCI_DMA_FROMDEVICE);
-
- pwFrameSize = (unsigned short *)(skb->data + 2);
- FrameSize = cpu_to_le16(pCurrRD->m_rd1RD1.wReqCount) - cpu_to_le16(pCurrRD->m_rd0RD0.wResCount);
-
- // Max: 2312Payload + 30HD +4CRC + 2Padding + 4Len + 8TSF + 4RSR
- // Min (ACK): 10HD +4CRC + 2Padding + 4Len + 8TSF + 4RSR
- if ((FrameSize > 2364) || (FrameSize <= 32)) {
- // Frame Size error drop this packet.
- pr_debug("---------- WRONG Length 1\n");
- return false;
- }
-
- pbyRxSts = (unsigned char *)(skb->data);
- pbyRxRate = (unsigned char *)(skb->data + 1);
- pbyRsr = (unsigned char *)(skb->data + FrameSize - 1);
- pbyRSSI = (unsigned char *)(skb->data + FrameSize - 2);
- pbyNewRsr = (unsigned char *)(skb->data + FrameSize - 3);
- pbySQ = (unsigned char *)(skb->data + FrameSize - 4);
- pqwTSFTime = (__le64 *)(skb->data + FrameSize - 12);
- pbyFrame = (unsigned char *)(skb->data + 4);
-
- // get packet size
- FrameSize = cpu_to_le16(*pwFrameSize);
-
- if ((FrameSize > 2346)|(FrameSize < 14)) { // Max: 2312Payload + 30HD +4CRC
- // Min: 14 bytes ACK
- pr_debug("---------- WRONG Length 2\n");
- return false;
- }
-
- // update receive statistic counter
- STAvUpdateRDStatCounter(&pDevice->scStatistic,
- *pbyRsr,
- *pbyNewRsr,
- *pbyRxRate,
- pbyFrame,
- FrameSize);
-
- pMACHeader = (PS802_11Header)((unsigned char *)(skb->data) + 8);
-
- if (pDevice->bMeasureInProgress) {
- if ((*pbyRsr & RSR_CRCOK) != 0)
- pDevice->byBasicMap |= 0x01;
-
- dwDuration = (FrameSize << 4);
- dwDuration /= acbyRxRate[*pbyRxRate%MAX_RATE];
- if (*pbyRxRate <= RATE_11M) {
- if (*pbyRxSts & 0x01) {
- // long preamble
- dwDuration += 192;
- } else {
- // short preamble
- dwDuration += 96;
- }
- } else {
- dwDuration += 16;
- }
- RFvRSSITodBm(pDevice, *pbyRSSI, &ldBm);
- ldBmThreshold = -57;
- for (ii = 7; ii > 0;) {
- if (ldBm > ldBmThreshold)
- break;
-
- ldBmThreshold -= 5;
- ii--;
- }
- pDevice->dwRPIs[ii] += dwDuration;
- return false;
- }
-
- if (!is_multicast_ether_addr(pbyFrame)) {
- if (WCTLbIsDuplicate(&(pDevice->sDupRxCache), (PS802_11Header)(skb->data + 4))) {
- pDevice->s802_11Counter.FrameDuplicateCount++;
- return false;
- }
- }
-
- // Use for TKIP MIC
- s_vGetDASA(skb->data+4, &cbHeaderSize, &pDevice->sRxEthHeader);
-
- // filter packet send from myself
- if (ether_addr_equal(pDevice->sRxEthHeader.abySrcAddr,
- pDevice->abyCurrentNetAddr))
- return false;
-
- if ((pMgmt->eCurrMode == WMAC_MODE_ESS_AP) || (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA)) {
- if (IS_CTL_PSPOLL(pbyFrame) || !IS_TYPE_CONTROL(pbyFrame)) {
- p802_11Header = (PS802_11Header)(pbyFrame);
- // get SA NodeIndex
- if (BSSDBbIsSTAInNodeDB(pMgmt, (unsigned char *)(p802_11Header->abyAddr2), &iSANodeIndex)) {
- pMgmt->sNodeDBTable[iSANodeIndex].ulLastRxJiffer = jiffies;
- pMgmt->sNodeDBTable[iSANodeIndex].uInActiveCount = 0;
- }
- }
- }
-
- if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) {
- if (s_bAPModeRxCtl(pDevice, pbyFrame, iSANodeIndex))
- return false;
- }
-
- if (IS_FC_WEP(pbyFrame)) {
- bool bRxDecryOK = false;
-
- pr_debug("rx WEP pkt\n");
- bIsWEP = true;
- if ((pDevice->bEnableHostWEP) && (iSANodeIndex >= 0)) {
- pKey = &STempKey;
- pKey->byCipherSuite = pMgmt->sNodeDBTable[iSANodeIndex].byCipherSuite;
- pKey->dwKeyIndex = pMgmt->sNodeDBTable[iSANodeIndex].dwKeyIndex;
- pKey->uKeyLength = pMgmt->sNodeDBTable[iSANodeIndex].uWepKeyLength;
- pKey->dwTSC47_16 = pMgmt->sNodeDBTable[iSANodeIndex].dwTSC47_16;
- pKey->wTSC15_0 = pMgmt->sNodeDBTable[iSANodeIndex].wTSC15_0;
- memcpy(pKey->abyKey,
- &pMgmt->sNodeDBTable[iSANodeIndex].abyWepKey[0],
- pKey->uKeyLength
-);
-
- bRxDecryOK = s_bHostWepRxEncryption(pDevice,
- pbyFrame,
- FrameSize,
- pbyRsr,
- pMgmt->sNodeDBTable[iSANodeIndex].bOnFly,
- pKey,
- pbyNewRsr,
- &bExtIV,
- &wRxTSC15_0,
- &dwRxTSC47_16);
- } else {
- bRxDecryOK = s_bHandleRxEncryption(pDevice,
- pbyFrame,
- FrameSize,
- pbyRsr,
- pbyNewRsr,
- &pKey,
- &bExtIV,
- &wRxTSC15_0,
- &dwRxTSC47_16);
- }
-
- if (bRxDecryOK) {
- if ((*pbyNewRsr & NEWRSR_DECRYPTOK) == 0) {
- pr_debug("ICV Fail\n");
- if ((pDevice->pMgmt->eAuthenMode == WMAC_AUTH_WPA) ||
- (pDevice->pMgmt->eAuthenMode == WMAC_AUTH_WPAPSK) ||
- (pDevice->pMgmt->eAuthenMode == WMAC_AUTH_WPANONE) ||
- (pDevice->pMgmt->eAuthenMode == WMAC_AUTH_WPA2) ||
- (pDevice->pMgmt->eAuthenMode == WMAC_AUTH_WPA2PSK)) {
- if ((pKey != NULL) && (pKey->byCipherSuite == KEY_CTL_TKIP))
- pDevice->s802_11Counter.TKIPICVErrors++;
- else if ((pKey != NULL) && (pKey->byCipherSuite == KEY_CTL_CCMP))
- pDevice->s802_11Counter.CCMPDecryptErrors++;
- }
- return false;
- }
- } else {
- pr_debug("WEP Func Fail\n");
- return false;
- }
- if ((pKey != NULL) && (pKey->byCipherSuite == KEY_CTL_CCMP))
- FrameSize -= 8; // Message Integrity Code
- else
- FrameSize -= 4; // 4 is ICV
- }
-
- //
- // RX OK
- //
- //remove the CRC length
- FrameSize -= ETH_FCS_LEN;
-
- if ((!(*pbyRsr & (RSR_ADDRBROAD | RSR_ADDRMULTI))) && // unicast address
- (IS_FRAGMENT_PKT((skb->data+4)))
-) {
- // defragment
- bDeFragRx = WCTLbHandleFragment(pDevice, (PS802_11Header)(skb->data+4), FrameSize, bIsWEP, bExtIV);
- pDevice->s802_11Counter.ReceivedFragmentCount++;
- if (bDeFragRx) {
- // defrag complete
- skb = pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx].skb;
- FrameSize = pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx].cbFrameLength;
-
- } else {
- return false;
- }
- }
-
-// Management & Control frame Handle
- if ((IS_TYPE_DATA((skb->data+4))) == false) {
- // Handle Control & Manage Frame
-
- if (IS_TYPE_MGMT((skb->data+4))) {
- unsigned char *pbyData1;
- unsigned char *pbyData2;
-
- pRxPacket->p80211Header = (PUWLAN_80211HDR)(skb->data+4);
- pRxPacket->cbMPDULen = FrameSize;
- pRxPacket->uRSSI = *pbyRSSI;
- pRxPacket->bySQ = *pbySQ;
- pRxPacket->qwLocalTSF = le64_to_cpu(*pqwTSFTime);
- if (bIsWEP) {
- // strip IV
- pbyData1 = WLAN_HDR_A3_DATA_PTR(skb->data+4);
- pbyData2 = WLAN_HDR_A3_DATA_PTR(skb->data+4) + 4;
- for (ii = 0; ii < (FrameSize - 4); ii++) {
- *pbyData1 = *pbyData2;
- pbyData1++;
- pbyData2++;
- }
- }
- pRxPacket->byRxRate = s_byGetRateIdx(*pbyRxRate);
- pRxPacket->byRxChannel = (*pbyRxSts) >> 2;
-
- vMgrRxManagePacket((void *)pDevice, pDevice->pMgmt, pRxPacket);
-
- // hostap Deamon handle 802.11 management
- if (pDevice->bEnableHostapd) {
- skb->dev = pDevice->apdev;
- skb->data += 4;
- skb->tail += 4;
- skb_put(skb, FrameSize);
- skb_reset_mac_header(skb);
- skb->pkt_type = PACKET_OTHERHOST;
- skb->protocol = htons(ETH_P_802_2);
- memset(skb->cb, 0, sizeof(skb->cb));
- netif_rx(skb);
- return true;
- }
- }
-
+ struct ieee80211_hw *hw = priv->hw;
+ struct ieee80211_supported_band *sband;
+ struct ieee80211_rx_status rx_status = { 0 };
+ struct ieee80211_hdr *hdr;
+ __le16 fc;
+ u8 *rsr, *new_rsr, *rssi;
+ __le64 *tsf_time;
+ u16 frame_size;
+ int ii, r;
+ u8 *rx_sts, *rx_rate, *sq;
+ u8 *skb_data;
+ u8 rate_idx = 0;
+ u8 rate[MAX_RATE] = {2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108};
+ long rx_dbm;
+
+ /* [31:16]RcvByteCount ( not include 4-byte Status ) */
+ frame_size = le16_to_cpu(*((__le16 *)(skb->data + 2)));
+ if (frame_size > 2346 || frame_size < 14) {
+ dev_dbg(&priv->pcid->dev, "------- WRONG Length 1\n");
return false;
- } else {
- if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) {
- //In AP mode, hw only check addr1(BSSID or RA) if equal to local MAC.
- if (!(*pbyRsr & RSR_BSSIDOK)) {
- if (bDeFragRx) {
- if (!device_alloc_frag_buf(pDevice, &pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx])) {
- pr_err("%s: can not alloc more frag bufs\n",
- pDevice->dev->name);
- }
- }
- return false;
- }
- } else {
- // discard DATA packet while not associate || BSSID error
- if (!pDevice->bLinkPass || !(*pbyRsr & RSR_BSSIDOK)) {
- if (bDeFragRx) {
- if (!device_alloc_frag_buf(pDevice, &pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx])) {
- pr_err("%s: can not alloc more frag bufs\n",
- pDevice->dev->name);
- }
- }
- return false;
- }
- //mike add:station mode check eapol-key challenge--->
- {
- unsigned char Protocol_Version; //802.1x Authentication
- unsigned char Packet_Type; //802.1x Authentication
-
- if (bIsWEP)
- cbIVOffset = 8;
- else
- cbIVOffset = 0;
- wEtherType = (skb->data[cbIVOffset + 8 + 24 + 6] << 8) |
- skb->data[cbIVOffset + 8 + 24 + 6 + 1];
- Protocol_Version = skb->data[cbIVOffset + 8 + 24 + 6 + 1 + 1];
- Packet_Type = skb->data[cbIVOffset + 8 + 24 + 6 + 1 + 1 + 1];
- if (wEtherType == ETH_P_PAE) { //Protocol Type in LLC-Header
- if (((Protocol_Version == 1) || (Protocol_Version == 2)) &&
- (Packet_Type == 3)) { //802.1x OR eapol-key challenge frame receive
- bRxeapol_key = true;
- }
- }
- }
- //mike add:station mode check eapol-key challenge<---
- }
- }
-
-// Data frame Handle
-
- if (pDevice->bEnablePSMode) {
- if (!IS_FC_MOREDATA((skb->data+4))) {
- if (pDevice->pMgmt->bInTIMWake == true)
- pDevice->pMgmt->bInTIMWake = false;
- }
- }
-
- // Now it only supports 802.11g Infrastructure Mode, and support rate must up to 54 Mbps
- if (pDevice->bDiversityEnable && (FrameSize > 50) &&
- (pDevice->op_mode == NL80211_IFTYPE_STATION) &&
- pDevice->bLinkPass) {
- BBvAntennaDiversity(pDevice, s_byGetRateIdx(*pbyRxRate), 0);
- }
-
- if (pDevice->byLocalID != REV_ID_VT3253_B1)
- pDevice->uCurrRSSI = *pbyRSSI;
-
- pDevice->byCurrSQ = *pbySQ;
-
- if ((*pbyRSSI != 0) &&
- (pMgmt->pCurrBSS != NULL)) {
- RFvRSSITodBm(pDevice, *pbyRSSI, &ldBm);
- // Monitor if RSSI is too strong.
- pMgmt->pCurrBSS->byRSSIStatCnt++;
- pMgmt->pCurrBSS->byRSSIStatCnt %= RSSI_STAT_COUNT;
- pMgmt->pCurrBSS->ldBmAverage[pMgmt->pCurrBSS->byRSSIStatCnt] = ldBm;
- for (ii = 0; ii < RSSI_STAT_COUNT; ii++)
- if (pMgmt->pCurrBSS->ldBmAverage[ii] != 0)
- pMgmt->pCurrBSS->ldBmMAX = max(pMgmt->pCurrBSS->ldBmAverage[ii], ldBm);
-
}
- // -----------------------------------------------
+ skb_data = (u8 *)skb->data;
- if ((pMgmt->eCurrMode == WMAC_MODE_ESS_AP) && pDevice->bEnable8021x) {
- unsigned char abyMacHdr[24];
+ rx_sts = skb_data;
+ rx_rate = skb_data + 1;
- // Only 802.1x packet incoming allowed
- if (bIsWEP)
- cbIVOffset = 8;
- else
- cbIVOffset = 0;
- wEtherType = (skb->data[cbIVOffset + 4 + 24 + 6] << 8) |
- skb->data[cbIVOffset + 4 + 24 + 6 + 1];
+ sband = hw->wiphy->bands[hw->conf.chandef.chan->band];
- pr_debug("wEtherType = %04x\n", wEtherType);
- if (wEtherType == ETH_P_PAE) {
- skb->dev = pDevice->apdev;
-
- if (bIsWEP) {
- // strip IV header(8)
- memcpy(&abyMacHdr[0], (skb->data + 4), 24);
- memcpy((skb->data + 4 + cbIVOffset), &abyMacHdr[0], 24);
- }
- skb->data += (cbIVOffset + 4);
- skb->tail += (cbIVOffset + 4);
- skb_put(skb, FrameSize);
- skb_reset_mac_header(skb);
-
- skb->pkt_type = PACKET_OTHERHOST;
- skb->protocol = htons(ETH_P_802_2);
- memset(skb->cb, 0, sizeof(skb->cb));
- netif_rx(skb);
- return true;
-
- }
- // check if 802.1x authorized
- if (!(pMgmt->sNodeDBTable[iSANodeIndex].dwFlags & WLAN_STA_AUTHORIZED))
- return false;
+ for (r = RATE_1M; r < MAX_RATE; r++) {
+ if (*rx_rate == rate[r])
+ break;
}
- if ((pKey != NULL) && (pKey->byCipherSuite == KEY_CTL_TKIP)) {
- if (bIsWEP)
- FrameSize -= 8; //MIC
- }
-
- //--------------------------------------------------------------------------------
- // Soft MIC
- if ((pKey != NULL) && (pKey->byCipherSuite == KEY_CTL_TKIP)) {
- if (bIsWEP) {
- __le32 *pdwMIC_L;
- __le32 *pdwMIC_R;
- __le32 dwMIC_Priority;
- __le32 dwMICKey0 = 0, dwMICKey1 = 0;
- u32 dwLocalMIC_L = 0;
- u32 dwLocalMIC_R = 0;
- viawget_wpa_header *wpahdr;
-
- if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) {
- dwMICKey0 = cpu_to_le32(*(u32 *)(&pKey->abyKey[24]));
- dwMICKey1 = cpu_to_le32(*(u32 *)(&pKey->abyKey[28]));
- } else {
- if (pDevice->pMgmt->eAuthenMode == WMAC_AUTH_WPANONE) {
- dwMICKey0 = cpu_to_le32(*(u32 *)(&pKey->abyKey[16]));
- dwMICKey1 = cpu_to_le32(*(u32 *)(&pKey->abyKey[20]));
- } else if ((pKey->dwKeyIndex & BIT28) == 0) {
- dwMICKey0 = cpu_to_le32(*(u32 *)(&pKey->abyKey[16]));
- dwMICKey1 = cpu_to_le32(*(u32 *)(&pKey->abyKey[20]));
- } else {
- dwMICKey0 = cpu_to_le32(*(u32 *)(&pKey->abyKey[24]));
- dwMICKey1 = cpu_to_le32(*(u32 *)(&pKey->abyKey[28]));
- }
- }
-
- MIC_vInit(dwMICKey0, dwMICKey1);
- MIC_vAppend((unsigned char *)&(pDevice->sRxEthHeader.abyDstAddr[0]), 12);
- dwMIC_Priority = 0;
- MIC_vAppend((unsigned char *)&dwMIC_Priority, 4);
- // 4 is Rcv buffer header, 24 is MAC Header, and 8 is IV and Ext IV.
- MIC_vAppend((unsigned char *)(skb->data + 4 + WLAN_HDR_ADDR3_LEN + 8),
- FrameSize - WLAN_HDR_ADDR3_LEN - 8);
- MIC_vGetMIC(&dwLocalMIC_L, &dwLocalMIC_R);
- MIC_vUnInit();
-
- pdwMIC_L = (__le32 *)(skb->data + 4 + FrameSize);
- pdwMIC_R = (__le32 *)(skb->data + 4 + FrameSize + 4);
-
- if ((le32_to_cpu(*pdwMIC_L) != dwLocalMIC_L) ||
- (le32_to_cpu(*pdwMIC_R) != dwLocalMIC_R) ||
- pDevice->bRxMICFail) {
- pr_debug("MIC comparison is fail!\n");
- pDevice->bRxMICFail = false;
- pDevice->s802_11Counter.TKIPLocalMICFailures++;
- if (bDeFragRx) {
- if (!device_alloc_frag_buf(pDevice, &pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx])) {
- pr_err("%s: can not alloc more frag bufs\n",
- pDevice->dev->name);
- }
- }
- //2008-0409-07, <Add> by Einsn Liu
-#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
- //send event to wpa_supplicant
- {
- union iwreq_data wrqu;
- struct iw_michaelmicfailure ev;
- int keyidx = pbyFrame[cbHeaderSize+3] >> 6; //top two-bits
-
- memset(&ev, 0, sizeof(ev));
- ev.flags = keyidx & IW_MICFAILURE_KEY_ID;
- if ((pMgmt->eCurrMode == WMAC_MODE_ESS_STA) &&
- (pMgmt->eCurrState == WMAC_STATE_ASSOC) &&
- (*pbyRsr & (RSR_ADDRBROAD | RSR_ADDRMULTI)) == 0) {
- ev.flags |= IW_MICFAILURE_PAIRWISE;
- } else {
- ev.flags |= IW_MICFAILURE_GROUP;
- }
-
- ev.src_addr.sa_family = ARPHRD_ETHER;
- memcpy(ev.src_addr.sa_data, pMACHeader->abyAddr2, ETH_ALEN);
- memset(&wrqu, 0, sizeof(wrqu));
- wrqu.data.length = sizeof(ev);
- wireless_send_event(pDevice->dev, IWEVMICHAELMICFAILURE, &wrqu, (char *)&ev);
-
- }
-#endif
+ priv->rx_rate = r;
- if ((pDevice->bWPADEVUp) && (pDevice->skb != NULL)) {
- wpahdr = (viawget_wpa_header *)pDevice->skb->data;
- if ((pDevice->pMgmt->eCurrMode == WMAC_MODE_ESS_STA) &&
- (pDevice->pMgmt->eCurrState == WMAC_STATE_ASSOC) &&
- (*pbyRsr & (RSR_ADDRBROAD | RSR_ADDRMULTI)) == 0) {
- wpahdr->type = VIAWGET_PTK_MIC_MSG;
- } else {
- wpahdr->type = VIAWGET_GTK_MIC_MSG;
- }
- wpahdr->resp_ie_len = 0;
- wpahdr->req_ie_len = 0;
- skb_put(pDevice->skb, sizeof(viawget_wpa_header));
- pDevice->skb->dev = pDevice->wpadev;
- skb_reset_mac_header(pDevice->skb);
- pDevice->skb->pkt_type = PACKET_HOST;
- pDevice->skb->protocol = htons(ETH_P_802_2);
- memset(pDevice->skb->cb, 0, sizeof(pDevice->skb->cb));
- netif_rx(pDevice->skb);
- pDevice->skb = dev_alloc_skb((int)pDevice->rx_buf_sz);
- }
-
- return false;
-
- }
- }
- } //---end of SOFT MIC-----------------------------------------------------------------------
-
- // ++++++++++ Reply Counter Check +++++++++++++
-
- if ((pKey != NULL) && ((pKey->byCipherSuite == KEY_CTL_TKIP) ||
- (pKey->byCipherSuite == KEY_CTL_CCMP))) {
- if (bIsWEP) {
- unsigned short wLocalTSC15_0 = 0;
- unsigned long dwLocalTSC47_16 = 0;
- unsigned long long RSC = 0;
- // endian issues
- RSC = *((unsigned long long *)&(pKey->KeyRSC));
- wLocalTSC15_0 = (unsigned short)RSC;
- dwLocalTSC47_16 = (unsigned long)(RSC>>16);
-
- RSC = dwRxTSC47_16;
- RSC <<= 16;
- RSC += wRxTSC15_0;
- pKey->KeyRSC = RSC;
-
- if ((pDevice->sMgmtObj.eCurrMode == WMAC_MODE_ESS_STA) &&
- (pDevice->sMgmtObj.eCurrState == WMAC_STATE_ASSOC)) {
- // check RSC
- if ((wRxTSC15_0 < wLocalTSC15_0) &&
- (dwRxTSC47_16 <= dwLocalTSC47_16) &&
- !((dwRxTSC47_16 == 0) && (dwLocalTSC47_16 == 0xFFFFFFFF))) {
- pr_debug("TSC is illegal~~!\n ");
- if (pKey->byCipherSuite == KEY_CTL_TKIP)
- pDevice->s802_11Counter.TKIPReplays++;
- else
- pDevice->s802_11Counter.CCMPReplays++;
-
- if (bDeFragRx) {
- if (!device_alloc_frag_buf(pDevice, &pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx])) {
- pr_err("%s: can not alloc more frag bufs\n",
- pDevice->dev->name);
- }
- }
- return false;
- }
- }
- }
- } // ----- End of Reply Counter Check --------------------------
-
- s_vProcessRxMACHeader(pDevice, (unsigned char *)(skb->data+4), FrameSize, bIsWEP, bExtIV, &cbHeaderOffset);
- FrameSize -= cbHeaderOffset;
- cbHeaderOffset += 4; // 4 is Rcv buffer header
-
- // Null data, framesize = 14
- if (FrameSize < 15)
- return false;
-
- if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) {
- if (!s_bAPModeRxData(pDevice,
- skb,
- FrameSize,
- cbHeaderOffset,
- iSANodeIndex,
- iDANodeIndex
-)) {
- if (bDeFragRx) {
- if (!device_alloc_frag_buf(pDevice, &pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx])) {
- pr_err("%s: can not alloc more frag bufs\n",
- pDevice->dev->name);
- }
- }
- return false;
+ for (ii = 0; ii < sband->n_bitrates; ii++) {
+ if (sband->bitrates[ii].hw_value == r) {
+ rate_idx = ii;
+ break;
}
}
- skb->data += cbHeaderOffset;
- skb->tail += cbHeaderOffset;
- skb_put(skb, FrameSize);
- skb->protocol = eth_type_trans(skb, skb->dev);
-
- //drop frame not met IEEE 802.3
-
- skb->ip_summed = CHECKSUM_NONE;
- pStats->rx_bytes += skb->len;
- pStats->rx_packets++;
- netif_rx(skb);
-
- if (bDeFragRx) {
- if (!device_alloc_frag_buf(pDevice, &pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx])) {
- pr_err("%s: can not alloc more frag bufs\n",
- pDevice->dev->name);
- }
+ if (ii == sband->n_bitrates) {
+ dev_dbg(&priv->pcid->dev, "Wrong RxRate %x\n", *rx_rate);
return false;
}
- return true;
-}
-
-static bool s_bAPModeRxCtl(
- struct vnt_private *pDevice,
- unsigned char *pbyFrame,
- int iSANodeIndex
-)
-{
- PS802_11Header p802_11Header;
- CMD_STATUS Status;
- PSMgmtObject pMgmt = pDevice->pMgmt;
-
- if (IS_CTL_PSPOLL(pbyFrame) || !IS_TYPE_CONTROL(pbyFrame)) {
- p802_11Header = (PS802_11Header)(pbyFrame);
- if (!IS_TYPE_MGMT(pbyFrame)) {
- // Data & PS-Poll packet
- // check frame class
- if (iSANodeIndex > 0) {
- // frame class 3 fliter & checking
- if (pMgmt->sNodeDBTable[iSANodeIndex].eNodeState < NODE_AUTH) {
- // send deauth notification
- // reason = (6) class 2 received from nonauth sta
- vMgrDeAuthenBeginSta(pDevice,
- pMgmt,
- (unsigned char *)(p802_11Header->abyAddr2),
- (WLAN_MGMT_REASON_CLASS2_NONAUTH),
- &Status
-);
- pr_debug("dpc: send vMgrDeAuthenBeginSta 1\n");
- return true;
- }
- if (pMgmt->sNodeDBTable[iSANodeIndex].eNodeState < NODE_ASSOC) {
- // send deassoc notification
- // reason = (7) class 3 received from nonassoc sta
- vMgrDisassocBeginSta(pDevice,
- pMgmt,
- (unsigned char *)(p802_11Header->abyAddr2),
- (WLAN_MGMT_REASON_CLASS3_NONASSOC),
- &Status
-);
- pr_debug("dpc: send vMgrDisassocBeginSta 2\n");
- return true;
- }
-
- if (pMgmt->sNodeDBTable[iSANodeIndex].bPSEnable) {
- // delcare received ps-poll event
- if (IS_CTL_PSPOLL(pbyFrame)) {
- pMgmt->sNodeDBTable[iSANodeIndex].bRxPSPoll = true;
- bScheduleCommand((void *)pDevice, WLAN_CMD_RX_PSPOLL, NULL);
- pr_debug("dpc: WLAN_CMD_RX_PSPOLL 1\n");
- } else {
- // check Data PS state
- // if PW bit off, send out all PS bufferring packets.
- if (!IS_FC_POWERMGT(pbyFrame)) {
- pMgmt->sNodeDBTable[iSANodeIndex].bPSEnable = false;
- pMgmt->sNodeDBTable[iSANodeIndex].bRxPSPoll = true;
- bScheduleCommand((void *)pDevice, WLAN_CMD_RX_PSPOLL, NULL);
- pr_debug("dpc: WLAN_CMD_RX_PSPOLL 2\n");
- }
- }
- } else {
- if (IS_FC_POWERMGT(pbyFrame)) {
- pMgmt->sNodeDBTable[iSANodeIndex].bPSEnable = true;
- // Once if STA in PS state, enable multicast bufferring
- pMgmt->sNodeDBTable[0].bPSEnable = true;
- } else {
- // clear all pending PS frame.
- if (pMgmt->sNodeDBTable[iSANodeIndex].wEnQueueCnt > 0) {
- pMgmt->sNodeDBTable[iSANodeIndex].bPSEnable = false;
- pMgmt->sNodeDBTable[iSANodeIndex].bRxPSPoll = true;
- bScheduleCommand((void *)pDevice, WLAN_CMD_RX_PSPOLL, NULL);
- pr_debug("dpc: WLAN_CMD_RX_PSPOLL 3\n");
-
- }
- }
- }
- } else {
- vMgrDeAuthenBeginSta(pDevice,
- pMgmt,
- (unsigned char *)(p802_11Header->abyAddr2),
- (WLAN_MGMT_REASON_CLASS2_NONAUTH),
- &Status
-);
- pr_debug("dpc: send vMgrDeAuthenBeginSta 3\n");
- pr_debug("BSSID:%pM\n",
- p802_11Header->abyAddr3);
- pr_debug("ADDR2:%pM\n",
- p802_11Header->abyAddr2);
- pr_debug("ADDR1:%pM\n",
- p802_11Header->abyAddr1);
- pr_debug("dpc: wFrameCtl= %x\n",
- p802_11Header->wFrameCtl);
- VNSvInPortB(pDevice->PortOffset + MAC_REG_RCR, &(pDevice->byRxMode));
- pr_debug("dpc:pDevice->byRxMode = %x\n",
- pDevice->byRxMode);
- return true;
- }
- }
- }
- return false;
-}
-
-static bool s_bHandleRxEncryption(
- struct vnt_private *pDevice,
- unsigned char *pbyFrame,
- unsigned int FrameSize,
- unsigned char *pbyRsr,
- unsigned char *pbyNewRsr,
- PSKeyItem *pKeyOut,
- bool *pbExtIV,
- unsigned short *pwRxTSC15_0,
- unsigned long *pdwRxTSC47_16
-)
-{
- unsigned int PayloadLen = FrameSize;
- unsigned char *pbyIV;
- unsigned char byKeyIdx;
- PSKeyItem pKey = NULL;
- unsigned char byDecMode = KEY_CTL_WEP;
- PSMgmtObject pMgmt = pDevice->pMgmt;
+ tsf_time = (__le64 *)(skb_data + bytes_received - 12);
+ sq = skb_data + bytes_received - 4;
+ new_rsr = skb_data + bytes_received - 3;
+ rssi = skb_data + bytes_received - 2;
+ rsr = skb_data + bytes_received - 1;
- *pwRxTSC15_0 = 0;
- *pdwRxTSC47_16 = 0;
+ RFvRSSITodBm(priv, *rssi, &rx_dbm);
- pbyIV = pbyFrame + WLAN_HDR_ADDR3_LEN;
- if (WLAN_GET_FC_TODS(*(unsigned short *)pbyFrame) &&
- WLAN_GET_FC_FROMDS(*(unsigned short *)pbyFrame)) {
- pbyIV += 6; // 6 is 802.11 address4
- PayloadLen -= 6;
- }
- byKeyIdx = (*(pbyIV+3) & 0xc0);
- byKeyIdx >>= 6;
- pr_debug("\nKeyIdx: %d\n", byKeyIdx);
+ priv->byBBPreEDRSSI = (u8)rx_dbm + 1;
+ priv->uCurrRSSI = *rssi;
- if ((pMgmt->eAuthenMode == WMAC_AUTH_WPA) ||
- (pMgmt->eAuthenMode == WMAC_AUTH_WPAPSK) ||
- (pMgmt->eAuthenMode == WMAC_AUTH_WPANONE) ||
- (pMgmt->eAuthenMode == WMAC_AUTH_WPA2) ||
- (pMgmt->eAuthenMode == WMAC_AUTH_WPA2PSK)) {
- if (((*pbyRsr & (RSR_ADDRBROAD | RSR_ADDRMULTI)) == 0) &&
- (pDevice->pMgmt->byCSSPK != KEY_CTL_NONE)) {
- // unicast pkt use pairwise key
- pr_debug("unicast pkt\n");
- if (KeybGetKey(&(pDevice->sKey), pDevice->abyBSSID, 0xFFFFFFFF, &pKey) == true) {
- if (pDevice->pMgmt->byCSSPK == KEY_CTL_TKIP)
- byDecMode = KEY_CTL_TKIP;
- else if (pDevice->pMgmt->byCSSPK == KEY_CTL_CCMP)
- byDecMode = KEY_CTL_CCMP;
- }
- pr_debug("unicast pkt: %d, %p\n", byDecMode, pKey);
- } else {
- // use group key
- KeybGetKey(&(pDevice->sKey), pDevice->abyBSSID, byKeyIdx, &pKey);
- if (pDevice->pMgmt->byCSSGK == KEY_CTL_TKIP)
- byDecMode = KEY_CTL_TKIP;
- else if (pDevice->pMgmt->byCSSGK == KEY_CTL_CCMP)
- byDecMode = KEY_CTL_CCMP;
- pr_debug("group pkt: %d, %d, %p\n",
- byKeyIdx, byDecMode, pKey);
- }
- }
- // our WEP only support Default Key
- if (pKey == NULL) {
- // use default group key
- KeybGetKey(&(pDevice->sKey), pDevice->abyBroadcastAddr, byKeyIdx, &pKey);
- if (pDevice->pMgmt->byCSSGK == KEY_CTL_TKIP)
- byDecMode = KEY_CTL_TKIP;
- else if (pDevice->pMgmt->byCSSGK == KEY_CTL_CCMP)
- byDecMode = KEY_CTL_CCMP;
- }
- *pKeyOut = pKey;
+ skb_pull(skb, 4);
+ skb_trim(skb, frame_size);
- pr_debug("AES:%d %d %d\n",
- pDevice->pMgmt->byCSSPK, pDevice->pMgmt->byCSSGK, byDecMode);
+ rx_status.mactime = le64_to_cpu(*tsf_time);
+ rx_status.band = hw->conf.chandef.chan->band;
+ rx_status.signal = rx_dbm;
+ rx_status.flag = 0;
+ rx_status.freq = hw->conf.chandef.chan->center_freq;
- if (pKey == NULL) {
- pr_debug("pKey == NULL\n");
+ hdr = (struct ieee80211_hdr *)(skb->data);
+ fc = hdr->frame_control;
- return false;
- }
- if (byDecMode != pKey->byCipherSuite) {
+ rx_status.rate_idx = rate_idx;
- *pKeyOut = NULL;
- return false;
+ if (ieee80211_has_protected(fc)) {
+ if (priv->byLocalID > REV_ID_VT3253_A1)
+ rx_status.flag = RX_FLAG_DECRYPTED;
}
- if (byDecMode == KEY_CTL_WEP) {
- // handle WEP
- if ((pDevice->byLocalID <= REV_ID_VT3253_A1) ||
- (((PSKeyTable)(pKey->pvKeyTable))->bSoftWEP == true)) {
- // Software WEP
- // 1. 3253A
- // 2. WEP 256
-
- PayloadLen -= (WLAN_HDR_ADDR3_LEN + 4 + 4); // 24 is 802.11 header,4 is IV, 4 is crc
- memcpy(pDevice->abyPRNG, pbyIV, 3);
- memcpy(pDevice->abyPRNG + 3, pKey->abyKey, pKey->uKeyLength);
- rc4_init(&pDevice->SBox, pDevice->abyPRNG, pKey->uKeyLength + 3);
- rc4_encrypt(&pDevice->SBox, pbyIV+4, pbyIV+4, PayloadLen);
-
- if (ETHbIsBufferCrc32Ok(pbyIV+4, PayloadLen))
- *pbyNewRsr |= NEWRSR_DECRYPTOK;
-
- }
- } else if ((byDecMode == KEY_CTL_TKIP) ||
- (byDecMode == KEY_CTL_CCMP)) {
- // TKIP/AES
-
- PayloadLen -= (WLAN_HDR_ADDR3_LEN + 8 + 4); // 24 is 802.11 header, 8 is IV&ExtIV, 4 is crc
- *pdwRxTSC47_16 = cpu_to_le32(*(unsigned long *)(pbyIV + 4));
- pr_debug("ExtIV: %lx\n", *pdwRxTSC47_16);
- if (byDecMode == KEY_CTL_TKIP)
- *pwRxTSC15_0 = cpu_to_le16(MAKEWORD(*(pbyIV + 2), *pbyIV));
- else
- *pwRxTSC15_0 = cpu_to_le16(*(unsigned short *)pbyIV);
-
- pr_debug("TSC0_15: %x\n", *pwRxTSC15_0);
-
- if ((byDecMode == KEY_CTL_TKIP) &&
- (pDevice->byLocalID <= REV_ID_VT3253_A1)) {
- // Software TKIP
- // 1. 3253 A
- PS802_11Header pMACHeader = (PS802_11Header)(pbyFrame);
-
- TKIPvMixKey(pKey->abyKey, pMACHeader->abyAddr2, *pwRxTSC15_0, *pdwRxTSC47_16, pDevice->abyPRNG);
- rc4_init(&pDevice->SBox, pDevice->abyPRNG, TKIP_KEY_LEN);
- rc4_encrypt(&pDevice->SBox, pbyIV+8, pbyIV+8, PayloadLen);
- if (ETHbIsBufferCrc32Ok(pbyIV+8, PayloadLen)) {
- *pbyNewRsr |= NEWRSR_DECRYPTOK;
- pr_debug("ICV OK!\n");
- } else {
- pr_debug("ICV FAIL!!!\n");
- pr_debug("PayloadLen = %d\n", PayloadLen);
- }
- }
- }// end of TKIP/AES
-
- if ((*(pbyIV+3) & 0x20) != 0)
- *pbExtIV = true;
- return true;
-}
-
-static bool s_bHostWepRxEncryption(
- struct vnt_private *pDevice,
- unsigned char *pbyFrame,
- unsigned int FrameSize,
- unsigned char *pbyRsr,
- bool bOnFly,
- PSKeyItem pKey,
- unsigned char *pbyNewRsr,
- bool *pbExtIV,
- unsigned short *pwRxTSC15_0,
- unsigned long *pdwRxTSC47_16
-)
-{
- unsigned int PayloadLen = FrameSize;
- unsigned char *pbyIV;
- unsigned char byKeyIdx;
- unsigned char byDecMode = KEY_CTL_WEP;
- PS802_11Header pMACHeader;
- *pwRxTSC15_0 = 0;
- *pdwRxTSC47_16 = 0;
-
- pbyIV = pbyFrame + WLAN_HDR_ADDR3_LEN;
- if (WLAN_GET_FC_TODS(*(unsigned short *)pbyFrame) &&
- WLAN_GET_FC_FROMDS(*(unsigned short *)pbyFrame)) {
- pbyIV += 6; // 6 is 802.11 address4
- PayloadLen -= 6;
+ if (priv->vif && priv->bDiversityEnable) {
+ if (ieee80211_is_data(fc) &&
+ (frame_size > 50) && priv->vif->bss_conf.assoc)
+ BBvAntennaDiversity(priv, priv->rx_rate, 0);
}
- byKeyIdx = (*(pbyIV+3) & 0xc0);
- byKeyIdx >>= 6;
- pr_debug("\nKeyIdx: %d\n", byKeyIdx);
-
- if (pDevice->pMgmt->byCSSGK == KEY_CTL_TKIP)
- byDecMode = KEY_CTL_TKIP;
- else if (pDevice->pMgmt->byCSSGK == KEY_CTL_CCMP)
- byDecMode = KEY_CTL_CCMP;
-
- pr_debug("AES:%d %d %d\n",
- pDevice->pMgmt->byCSSPK, pDevice->pMgmt->byCSSGK, byDecMode);
-
- if (byDecMode != pKey->byCipherSuite)
- return false;
-
- if (byDecMode == KEY_CTL_WEP) {
- // handle WEP
- pr_debug("byDecMode == KEY_CTL_WEP\n");
-
- if ((pDevice->byLocalID <= REV_ID_VT3253_A1) ||
- (((PSKeyTable)(pKey->pvKeyTable))->bSoftWEP == true) ||
- !bOnFly) {
- // Software WEP
- // 1. 3253A
- // 2. WEP 256
- // 3. NotOnFly
- PayloadLen -= (WLAN_HDR_ADDR3_LEN + 4 + 4); // 24 is 802.11 header,4 is IV, 4 is crc
- memcpy(pDevice->abyPRNG, pbyIV, 3);
- memcpy(pDevice->abyPRNG + 3, pKey->abyKey, pKey->uKeyLength);
- rc4_init(&pDevice->SBox, pDevice->abyPRNG, pKey->uKeyLength + 3);
- rc4_encrypt(&pDevice->SBox, pbyIV+4, pbyIV+4, PayloadLen);
+ memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
- if (ETHbIsBufferCrc32Ok(pbyIV+4, PayloadLen))
- *pbyNewRsr |= NEWRSR_DECRYPTOK;
+ ieee80211_rx_irqsafe(priv->hw, skb);
- }
- } else if ((byDecMode == KEY_CTL_TKIP) ||
- (byDecMode == KEY_CTL_CCMP)) {
- // TKIP/AES
-
- PayloadLen -= (WLAN_HDR_ADDR3_LEN + 8 + 4); // 24 is 802.11 header, 8 is IV&ExtIV, 4 is crc
- *pdwRxTSC47_16 = cpu_to_le32(*(unsigned long *)(pbyIV + 4));
- pr_debug("ExtIV: %lx\n", *pdwRxTSC47_16);
-
- if (byDecMode == KEY_CTL_TKIP)
- *pwRxTSC15_0 = cpu_to_le16(MAKEWORD(*(pbyIV+2), *pbyIV));
- else
- *pwRxTSC15_0 = cpu_to_le16(*(unsigned short *)pbyIV);
-
- pr_debug("TSC0_15: %x\n", *pwRxTSC15_0);
-
- if (byDecMode == KEY_CTL_TKIP) {
- if ((pDevice->byLocalID <= REV_ID_VT3253_A1) || !bOnFly) {
- // Software TKIP
- // 1. 3253 A
- // 2. NotOnFly
- pr_debug("soft KEY_CTL_TKIP\n");
- pMACHeader = (PS802_11Header)(pbyFrame);
- TKIPvMixKey(pKey->abyKey, pMACHeader->abyAddr2, *pwRxTSC15_0, *pdwRxTSC47_16, pDevice->abyPRNG);
- rc4_init(&pDevice->SBox, pDevice->abyPRNG, TKIP_KEY_LEN);
- rc4_encrypt(&pDevice->SBox, pbyIV+8, pbyIV+8, PayloadLen);
- if (ETHbIsBufferCrc32Ok(pbyIV+8, PayloadLen)) {
- *pbyNewRsr |= NEWRSR_DECRYPTOK;
- pr_debug("ICV OK!\n");
- } else {
- pr_debug("ICV FAIL!!!\n");
- pr_debug("PayloadLen = %d\n",
- PayloadLen);
- }
- }
- }
-
- if (byDecMode == KEY_CTL_CCMP) {
- if (!bOnFly) {
- // Software CCMP
- // NotOnFly
- pr_debug("soft KEY_CTL_CCMP\n");
- if (AESbGenCCMP(pKey->abyKey, pbyFrame, FrameSize)) {
- *pbyNewRsr |= NEWRSR_DECRYPTOK;
- pr_debug("CCMP MIC compare OK!\n");
- } else {
- pr_debug("CCMP MIC fail!\n");
- }
- }
- }
-
- }// end of TKIP/AES
-
- if ((*(pbyIV+3) & 0x20) != 0)
- *pbExtIV = true;
return true;
}
-static bool s_bAPModeRxData(
- struct vnt_private *pDevice,
- struct sk_buff *skb,
- unsigned int FrameSize,
- unsigned int cbHeaderOffset,
- int iSANodeIndex,
- int iDANodeIndex
-)
+bool vnt_receive_frame(struct vnt_private *priv, PSRxDesc curr_rd)
{
- PSMgmtObject pMgmt = pDevice->pMgmt;
- bool bRelayAndForward = false;
- bool bRelayOnly = false;
- unsigned char byMask[8] = {1, 2, 4, 8, 0x10, 0x20, 0x40, 0x80};
- unsigned short wAID;
-
- struct sk_buff *skbcpy = NULL;
+ PDEVICE_RD_INFO rd_info = curr_rd->pRDInfo;
+ struct sk_buff *skb;
+ u16 frame_size;
- if (FrameSize > CB_MAX_BUF_SIZE)
- return false;
- // check DA
- if (is_multicast_ether_addr((unsigned char *)(skb->data+cbHeaderOffset))) {
- if (pMgmt->sNodeDBTable[0].bPSEnable) {
- skbcpy = dev_alloc_skb((int)pDevice->rx_buf_sz);
+ skb = rd_info->skb;
- // if any node in PS mode, buffer packet until DTIM.
- if (skbcpy == NULL) {
- pr_info("relay multicast no skb available\n");
- } else {
- skbcpy->dev = pDevice->dev;
- skbcpy->len = FrameSize;
- memcpy(skbcpy->data, skb->data+cbHeaderOffset, FrameSize);
- skb_queue_tail(&(pMgmt->sNodeDBTable[0].sTxPSQueue), skbcpy);
+ pci_unmap_single(priv->pcid, rd_info->skb_dma,
+ priv->rx_buf_sz, PCI_DMA_FROMDEVICE);
- pMgmt->sNodeDBTable[0].wEnQueueCnt++;
- // set tx map
- pMgmt->abyPSTxMap[0] |= byMask[0];
- }
- } else {
- bRelayAndForward = true;
- }
- } else {
- // check if relay
- if (BSSDBbIsSTAInNodeDB(pMgmt, (unsigned char *)(skb->data+cbHeaderOffset), &iDANodeIndex)) {
- if (pMgmt->sNodeDBTable[iDANodeIndex].eNodeState >= NODE_ASSOC) {
- if (pMgmt->sNodeDBTable[iDANodeIndex].bPSEnable) {
- // queue this skb until next PS tx, and then release.
+ frame_size = le16_to_cpu(curr_rd->m_rd1RD1.wReqCount)
+ - cpu_to_le16(curr_rd->m_rd0RD0.wResCount);
- skb->data += cbHeaderOffset;
- skb->tail += cbHeaderOffset;
- skb_put(skb, FrameSize);
- skb_queue_tail(&pMgmt->sNodeDBTable[iDANodeIndex].sTxPSQueue, skb);
- pMgmt->sNodeDBTable[iDANodeIndex].wEnQueueCnt++;
- wAID = pMgmt->sNodeDBTable[iDANodeIndex].wAID;
- pMgmt->abyPSTxMap[wAID >> 3] |= byMask[wAID & 7];
- pr_debug("relay: index= %d, pMgmt->abyPSTxMap[%d]= %d\n",
- iDANodeIndex, (wAID >> 3),
- pMgmt->abyPSTxMap[wAID >> 3]);
- return true;
- } else {
- bRelayOnly = true;
- }
- }
- }
+ if ((frame_size > 2364) || (frame_size < 33)) {
+ /* Frame Size error drop this packet.*/
+ dev_dbg(&priv->pcid->dev, "Wrong frame size %d\n", frame_size);
+ dev_kfree_skb_irq(skb);
+ return true;
}
- if (bRelayOnly || bRelayAndForward) {
- // relay this packet right now
- if (bRelayAndForward)
- iDANodeIndex = 0;
+ if (vnt_rx_data(priv, skb, frame_size))
+ return true;
- if ((pDevice->uAssocCount > 1) && (iDANodeIndex >= 0))
- ROUTEbRelay(pDevice, (unsigned char *)(skb->data + cbHeaderOffset), FrameSize, (unsigned int)iDANodeIndex);
-
- if (bRelayOnly)
- return false;
- }
- // none associate, don't forward
- if (pDevice->uAssocCount == 0)
- return false;
+ dev_kfree_skb_irq(skb);
return true;
}
diff --git a/drivers/staging/vt6655/dpc.h b/drivers/staging/vt6655/dpc.h
index a068b846b1b..ad495719a25 100644
--- a/drivers/staging/vt6655/dpc.h
+++ b/drivers/staging/vt6655/dpc.h
@@ -29,14 +29,8 @@
#ifndef __DPC_H__
#define __DPC_H__
-#include "ttype.h"
#include "device.h"
-#include "wcmd.h"
-bool
-device_receive_frame(
- struct vnt_private *,
- PSRxDesc pCurrRD
-);
+bool vnt_receive_frame(struct vnt_private *priv, PSRxDesc curr_rd);
-#endif // __RXTX_H__
+#endif /* __RXTX_H__ */
diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
deleted file mode 100644
index ae0dade229d..00000000000
--- a/drivers/staging/vt6655/hostap.c
+++ /dev/null
@@ -1,765 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * File: hostap.c
- *
- * Purpose: handle hostap deamon ioctl input/out functions
- *
- * Author: Lyndon Chen
- *
- * Date: Oct. 20, 2003
- *
- * Functions:
- *
- * Revision History:
- *
- */
-
-#include "hostap.h"
-#include "iocmd.h"
-#include "mac.h"
-#include "card.h"
-#include "baseband.h"
-#include "wpactl.h"
-#include "key.h"
-
-#define VIAWGET_HOSTAPD_MAX_BUF_SIZE 1024
-#define HOSTAP_CRYPT_FLAG_SET_TX_KEY BIT0
-#define HOSTAP_CRYPT_ERR_UNKNOWN_ADDR 3
-#define HOSTAP_CRYPT_ERR_KEY_SET_FAILED 5
-
-/*--------------------- Static Definitions -------------------------*/
-
-/*--------------------- Static Classes ----------------------------*/
-
-/*--------------------- Static Functions --------------------------*/
-
-/*--------------------- Export Variables --------------------------*/
-
-/*
- * Description:
- * register net_device (AP) for hostap deamon
- *
- * Parameters:
- * In:
- * pDevice -
- * rtnl_locked -
- * Out:
- *
- * Return Value:
- *
- */
-
-static int hostap_enable_hostapd(struct vnt_private *pDevice, int rtnl_locked)
-{
- struct vnt_private *apdev_priv;
- struct net_device *dev = pDevice->dev;
- int ret;
- const struct net_device_ops apdev_netdev_ops = {
- .ndo_start_xmit = pDevice->tx_80211,
- };
-
- pr_debug("%s: Enabling hostapd mode\n", dev->name);
-
- pDevice->apdev = alloc_etherdev(sizeof(*apdev_priv));
- if (pDevice->apdev == NULL)
- return -ENOMEM;
-
- apdev_priv = netdev_priv(pDevice->apdev);
- *apdev_priv = *pDevice;
- eth_hw_addr_inherit(pDevice->apdev, dev);
-
- pDevice->apdev->netdev_ops = &apdev_netdev_ops;
-
- pDevice->apdev->type = ARPHRD_IEEE80211;
-
- pDevice->apdev->base_addr = dev->base_addr;
- pDevice->apdev->irq = dev->irq;
- pDevice->apdev->mem_start = dev->mem_start;
- pDevice->apdev->mem_end = dev->mem_end;
- sprintf(pDevice->apdev->name, "%sap", dev->name);
- if (rtnl_locked)
- ret = register_netdevice(pDevice->apdev);
- else
- ret = register_netdev(pDevice->apdev);
- if (ret) {
- pr_debug("%s: register_netdevice(AP) failed!\n",
- dev->name);
- free_netdev(pDevice->apdev);
- pDevice->apdev = NULL;
- return -1;
- }
-
- pr_debug("%s: Registered netdevice %s for AP management\n",
- dev->name, pDevice->apdev->name);
-
- KeyvInitTable(&pDevice->sKey, pDevice->PortOffset);
-
- return 0;
-}
-
-/*
- * Description:
- * unregister net_device(AP)
- *
- * Parameters:
- * In:
- * pDevice -
- * rtnl_locked -
- * Out:
- *
- * Return Value:
- *
- */
-
-static int hostap_disable_hostapd(struct vnt_private *pDevice, int rtnl_locked)
-{
- pr_debug("%s: disabling hostapd mode\n", pDevice->dev->name);
-
- if (pDevice->apdev && pDevice->apdev->name && pDevice->apdev->name[0]) {
- if (rtnl_locked)
- unregister_netdevice(pDevice->apdev);
- else
- unregister_netdev(pDevice->apdev);
- pr_debug("%s: Netdevice %s unregistered\n",
- pDevice->dev->name, pDevice->apdev->name);
- }
- if (pDevice->apdev)
- free_netdev(pDevice->apdev);
- pDevice->apdev = NULL;
- pDevice->bEnable8021x = false;
- pDevice->bEnableHostWEP = false;
- pDevice->bEncryptionEnable = false;
-
-/* 4.2007-0118-03,<Add> by EinsnLiu */
-/* execute some clear work */
- pDevice->pMgmt->byCSSPK = KEY_CTL_NONE;
- pDevice->pMgmt->byCSSGK = KEY_CTL_NONE;
- KeyvInitTable(&pDevice->sKey, pDevice->PortOffset);
-
- return 0;
-}
-
-/*
- * Description:
- * Set enable/disable hostapd mode
- *
- * Parameters:
- * In:
- * pDevice -
- * rtnl_locked -
- * Out:
- *
- * Return Value:
- *
- */
-
-int vt6655_hostap_set_hostapd(struct vnt_private *pDevice,
- int val, int rtnl_locked)
-{
- if (val < 0 || val > 1)
- return -EINVAL;
-
- if (pDevice->bEnableHostapd == val)
- return 0;
-
- pDevice->bEnableHostapd = val;
-
- if (val)
- return hostap_enable_hostapd(pDevice, rtnl_locked);
- else
- return hostap_disable_hostapd(pDevice, rtnl_locked);
-}
-
-/*
- * Description:
- * remove station function supported for hostap deamon
- *
- * Parameters:
- * In:
- * pDevice -
- * param -
- * Out:
- *
- * Return Value:
- *
- */
-static int hostap_remove_sta(struct vnt_private *pDevice,
- struct viawget_hostapd_param *param)
-{
- unsigned int uNodeIndex;
-
- if (BSSDBbIsSTAInNodeDB(pDevice->pMgmt, param->sta_addr, &uNodeIndex))
- BSSvRemoveOneNode(pDevice, uNodeIndex);
- else
- return -ENOENT;
-
- return 0;
-}
-
-/*
- * Description:
- * add a station from hostap deamon
- *
- * Parameters:
- * In:
- * pDevice -
- * param -
- * Out:
- *
- * Return Value:
- *
- */
-static int hostap_add_sta(struct vnt_private *pDevice,
- struct viawget_hostapd_param *param)
-{
- PSMgmtObject pMgmt = pDevice->pMgmt;
- unsigned int uNodeIndex;
-
- if (!BSSDBbIsSTAInNodeDB(pMgmt, param->sta_addr, &uNodeIndex))
- BSSvCreateOneNode(pDevice, &uNodeIndex);
-
- memcpy(pMgmt->sNodeDBTable[uNodeIndex].abyMACAddr, param->sta_addr, WLAN_ADDR_LEN);
- pMgmt->sNodeDBTable[uNodeIndex].eNodeState = NODE_ASSOC;
- pMgmt->sNodeDBTable[uNodeIndex].wCapInfo = param->u.add_sta.capability;
-/* TODO listenInterval */
- pMgmt->sNodeDBTable[uNodeIndex].bPSEnable = false;
- pMgmt->sNodeDBTable[uNodeIndex].bySuppRate = param->u.add_sta.tx_supp_rates;
-
- /* set max tx rate */
- pMgmt->sNodeDBTable[uNodeIndex].wTxDataRate =
- pMgmt->sNodeDBTable[uNodeIndex].wMaxSuppRate;
- /* set max basic rate */
- pMgmt->sNodeDBTable[uNodeIndex].wMaxBasicRate = RATE_2M;
- /* Todo: check sta preamble, if ap can't support, set status code */
- pMgmt->sNodeDBTable[uNodeIndex].bShortPreamble =
- WLAN_GET_CAP_INFO_SHORTPREAMBLE(pMgmt->sNodeDBTable[uNodeIndex].wCapInfo);
-
- pMgmt->sNodeDBTable[uNodeIndex].wAID = (unsigned short)param->u.add_sta.aid;
-
- pMgmt->sNodeDBTable[uNodeIndex].ulLastRxJiffer = jiffies;
-
- pr_debug("Add STA AID= %d\n", pMgmt->sNodeDBTable[uNodeIndex].wAID);
- pr_debug("MAC=%pM\n", param->sta_addr);
- pr_debug("Max Support rate = %d\n",
- pMgmt->sNodeDBTable[uNodeIndex].wMaxSuppRate);
-
- return 0;
-}
-
-/*
- * Description:
- * get station info
- *
- * Parameters:
- * In:
- * pDevice -
- * param -
- * Out:
- *
- * Return Value:
- *
- */
-
-static int hostap_get_info_sta(struct vnt_private *pDevice,
- struct viawget_hostapd_param *param)
-{
- PSMgmtObject pMgmt = pDevice->pMgmt;
- unsigned int uNodeIndex;
-
- if (BSSDBbIsSTAInNodeDB(pMgmt, param->sta_addr, &uNodeIndex)) {
- param->u.get_info_sta.inactive_sec =
- (jiffies - pMgmt->sNodeDBTable[uNodeIndex].ulLastRxJiffer) / HZ;
- } else {
- return -ENOENT;
- }
-
- return 0;
-}
-
-/*
- * Description:
- * set station flag
- *
- * Parameters:
- * In:
- * pDevice -
- * param -
- * Out:
- *
- * Return Value:
- *
- */
-static int hostap_set_flags_sta(struct vnt_private *pDevice,
- struct viawget_hostapd_param *param)
-{
- PSMgmtObject pMgmt = pDevice->pMgmt;
- unsigned int uNodeIndex;
-
- if (BSSDBbIsSTAInNodeDB(pMgmt, param->sta_addr, &uNodeIndex)) {
- pMgmt->sNodeDBTable[uNodeIndex].dwFlags |= param->u.set_flags_sta.flags_or;
- pMgmt->sNodeDBTable[uNodeIndex].dwFlags &= param->u.set_flags_sta.flags_and;
- pr_debug(" dwFlags = %x\n",
- (unsigned int)pMgmt->sNodeDBTable[uNodeIndex].dwFlags);
- } else {
- return -ENOENT;
- }
-
- return 0;
-}
-
-/*
- * Description:
- * set generic element (wpa ie)
- *
- * Parameters:
- * In:
- * pDevice -
- * param -
- * Out:
- *
- * Return Value:
- *
- */
-static int hostap_set_generic_element(struct vnt_private *pDevice,
- struct viawget_hostapd_param *param)
-{
- PSMgmtObject pMgmt = pDevice->pMgmt;
-
- if (param->u.generic_elem.len > sizeof(pMgmt->abyWPAIE))
- return -EINVAL;
-
- memcpy(pMgmt->abyWPAIE,
- param->u.generic_elem.data,
- param->u.generic_elem.len
- );
-
- pMgmt->wWPAIELen = param->u.generic_elem.len;
-
- pr_debug("pMgmt->wWPAIELen = %d\n", pMgmt->wWPAIELen);
-
- /* disable wpa */
- if (pMgmt->wWPAIELen == 0) {
- pMgmt->eAuthenMode = WMAC_AUTH_OPEN;
- pr_debug(" No WPAIE, Disable WPA\n");
- } else {
- /* enable wpa */
- if ((pMgmt->abyWPAIE[0] == WLAN_EID_RSN_WPA) ||
- (pMgmt->abyWPAIE[0] == WLAN_EID_RSN)) {
- pMgmt->eAuthenMode = WMAC_AUTH_WPANONE;
- pr_debug("Set WPAIE enable WPA\n");
- } else
- return -EINVAL;
- }
-
- return 0;
-}
-
-/*
- * Description:
- * flush station nodes table.
- *
- * Parameters:
- * In:
- * pDevice -
- * Out:
- *
- * Return Value:
- *
- */
-
-static void hostap_flush_sta(struct vnt_private *pDevice)
-{
- /* reserved node index =0 for multicast node. */
- BSSvClearNodeDBTable(pDevice, 1);
- pDevice->uAssocCount = 0;
-}
-
-/*
- * Description:
- * set each stations encryption key
- *
- * Parameters:
- * In:
- * pDevice -
- * param -
- * Out:
- *
- * Return Value:
- *
- */
-static int hostap_set_encryption(struct vnt_private *pDevice,
- struct viawget_hostapd_param *param,
- int param_len)
-{
- PSMgmtObject pMgmt = pDevice->pMgmt;
- unsigned long dwKeyIndex = 0;
- unsigned char abyKey[MAX_KEY_LEN];
- unsigned char abySeq[MAX_KEY_LEN];
- u64 KeyRSC;
- unsigned char byKeyDecMode = KEY_CTL_WEP;
- int iNodeIndex = -1;
- int ii;
- bool bKeyTableFull = false;
- unsigned short wKeyCtl = 0;
-
- param->u.crypt.err = 0;
-
- if (param->u.crypt.alg > WPA_ALG_CCMP)
- return -EINVAL;
-
- if ((param->u.crypt.idx > 3) || (param->u.crypt.key_len > MAX_KEY_LEN)) {
- param->u.crypt.err = HOSTAP_CRYPT_ERR_KEY_SET_FAILED;
- pr_debug(" HOSTAP_CRYPT_ERR_KEY_SET_FAILED\n");
- return -EINVAL;
- }
-
- if (is_broadcast_ether_addr(param->sta_addr)) {
- if (param->u.crypt.idx >= MAX_GROUP_KEY)
- return -EINVAL;
- iNodeIndex = 0;
-
- } else {
- if (BSSDBbIsSTAInNodeDB(pMgmt, param->sta_addr, &iNodeIndex) == false) {
- param->u.crypt.err = HOSTAP_CRYPT_ERR_UNKNOWN_ADDR;
- pr_debug(" HOSTAP_CRYPT_ERR_UNKNOWN_ADDR\n");
- return -EINVAL;
- }
- }
- pr_debug(" hostap_set_encryption: sta_index %d\n", iNodeIndex);
- pr_debug(" hostap_set_encryption: alg %d\n", param->u.crypt.alg);
-
- if (param->u.crypt.alg == WPA_ALG_NONE) {
- if (pMgmt->sNodeDBTable[iNodeIndex].bOnFly) {
- if (!KeybRemoveKey(&(pDevice->sKey),
- param->sta_addr,
- pMgmt->sNodeDBTable[iNodeIndex].dwKeyIndex,
- pDevice->PortOffset)) {
- pr_debug("KeybRemoveKey fail\n");
- }
- pMgmt->sNodeDBTable[iNodeIndex].bOnFly = false;
- }
- pMgmt->sNodeDBTable[iNodeIndex].byKeyIndex = 0;
- pMgmt->sNodeDBTable[iNodeIndex].dwKeyIndex = 0;
- pMgmt->sNodeDBTable[iNodeIndex].uWepKeyLength = 0;
- pMgmt->sNodeDBTable[iNodeIndex].KeyRSC = 0;
- pMgmt->sNodeDBTable[iNodeIndex].dwTSC47_16 = 0;
- pMgmt->sNodeDBTable[iNodeIndex].wTSC15_0 = 0;
- pMgmt->sNodeDBTable[iNodeIndex].byCipherSuite = 0;
- memset(&pMgmt->sNodeDBTable[iNodeIndex].abyWepKey[0],
- 0,
- MAX_KEY_LEN
-);
-
- return 0;
- }
-
- memcpy(abyKey, param->u.crypt.key, param->u.crypt.key_len);
- /* copy to node key tbl */
- pMgmt->sNodeDBTable[iNodeIndex].byKeyIndex = param->u.crypt.idx;
- pMgmt->sNodeDBTable[iNodeIndex].uWepKeyLength = param->u.crypt.key_len;
- memcpy(&pMgmt->sNodeDBTable[iNodeIndex].abyWepKey[0],
- param->u.crypt.key,
- param->u.crypt.key_len
-);
-
- dwKeyIndex = (unsigned long)(param->u.crypt.idx);
- if (param->u.crypt.flags & HOSTAP_CRYPT_FLAG_SET_TX_KEY) {
- pDevice->byKeyIndex = (unsigned char)dwKeyIndex;
- pDevice->bTransmitKey = true;
- dwKeyIndex |= (1 << 31);
- }
-
- if (param->u.crypt.alg == WPA_ALG_WEP) {
- if ((pDevice->bEnable8021x == false) || (iNodeIndex == 0)) {
- KeybSetDefaultKey(&(pDevice->sKey),
- dwKeyIndex & ~(BIT30 | USE_KEYRSC),
- param->u.crypt.key_len,
- NULL,
- abyKey,
- KEY_CTL_WEP,
- pDevice->PortOffset,
- pDevice->byLocalID);
-
- } else {
- /* 8021x enable, individual key */
- dwKeyIndex |= (1 << 30); /* set pairwise key */
- if (KeybSetKey(&(pDevice->sKey),
- &param->sta_addr[0],
- dwKeyIndex & ~(USE_KEYRSC),
- param->u.crypt.key_len,
- (u64 *) &KeyRSC,
- (unsigned char *)abyKey,
- KEY_CTL_WEP,
- pDevice->PortOffset,
- pDevice->byLocalID)) {
- pMgmt->sNodeDBTable[iNodeIndex].bOnFly = true;
-
- } else {
- /* Key Table Full */
- pMgmt->sNodeDBTable[iNodeIndex].bOnFly = false;
- bKeyTableFull = true;
- }
- }
- pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled;
- pDevice->bEncryptionEnable = true;
- pMgmt->byCSSPK = KEY_CTL_WEP;
- pMgmt->byCSSGK = KEY_CTL_WEP;
- pMgmt->sNodeDBTable[iNodeIndex].byCipherSuite = KEY_CTL_WEP;
- pMgmt->sNodeDBTable[iNodeIndex].dwKeyIndex = dwKeyIndex;
- return 0;
- }
-
- if (param->u.crypt.seq) {
- memcpy(&abySeq, param->u.crypt.seq, 8);
- for (ii = 0; ii < 8; ii++)
- KeyRSC |= (u64)abySeq[ii] << (ii * 8);
-
- dwKeyIndex |= 1 << 29;
- pMgmt->sNodeDBTable[iNodeIndex].KeyRSC = KeyRSC;
- }
-
- if (param->u.crypt.alg == WPA_ALG_TKIP) {
- if (param->u.crypt.key_len != MAX_KEY_LEN)
- return -EINVAL;
- pDevice->eEncryptionStatus = Ndis802_11Encryption2Enabled;
- byKeyDecMode = KEY_CTL_TKIP;
- pMgmt->byCSSPK = KEY_CTL_TKIP;
- pMgmt->byCSSGK = KEY_CTL_TKIP;
- }
-
- if (param->u.crypt.alg == WPA_ALG_CCMP) {
- if ((param->u.crypt.key_len != AES_KEY_LEN) ||
- (pDevice->byLocalID <= REV_ID_VT3253_A1))
- return -EINVAL;
- pDevice->eEncryptionStatus = Ndis802_11Encryption3Enabled;
- byKeyDecMode = KEY_CTL_CCMP;
- pMgmt->byCSSPK = KEY_CTL_CCMP;
- pMgmt->byCSSGK = KEY_CTL_CCMP;
- }
-
- if (iNodeIndex == 0) {
- KeybSetDefaultKey(&(pDevice->sKey),
- dwKeyIndex,
- param->u.crypt.key_len,
- (u64 *) &KeyRSC,
- abyKey,
- byKeyDecMode,
- pDevice->PortOffset,
- pDevice->byLocalID);
- pMgmt->sNodeDBTable[iNodeIndex].bOnFly = true;
-
- } else {
- dwKeyIndex |= (1 << 30); /* set pairwise key */
- if (KeybSetKey(&(pDevice->sKey),
- &param->sta_addr[0],
- dwKeyIndex,
- param->u.crypt.key_len,
- (u64 *) &KeyRSC,
- (unsigned char *)abyKey,
- byKeyDecMode,
- pDevice->PortOffset,
- pDevice->byLocalID)) {
- pMgmt->sNodeDBTable[iNodeIndex].bOnFly = true;
-
- } else {
- /* Key Table Full */
- pMgmt->sNodeDBTable[iNodeIndex].bOnFly = false;
- bKeyTableFull = true;
- pr_debug(" Key Table Full\n");
- }
-
- }
-
- if (bKeyTableFull) {
- wKeyCtl &= 0x7F00; /* clear all key control filed */
- wKeyCtl |= (byKeyDecMode << 4);
- wKeyCtl |= (byKeyDecMode);
- wKeyCtl |= 0x0044; /* use group key for all address */
- wKeyCtl |= 0x4000; /* disable KeyTable[MAX_KEY_TABLE-1] on-fly to genernate rx int */
- MACvSetDefaultKeyCtl(pDevice->PortOffset, wKeyCtl, MAX_KEY_TABLE-1, pDevice->byLocalID);
- }
-
- pr_debug(" Set key sta_index= %d\n", iNodeIndex);
- pr_debug(" tx_index=%d len=%d\n",
- param->u.crypt.idx, param->u.crypt.key_len);
- pr_debug(" key=%x-%x-%x-%x-%x-xxxxx\n",
- pMgmt->sNodeDBTable[iNodeIndex].abyWepKey[0],
- pMgmt->sNodeDBTable[iNodeIndex].abyWepKey[1],
- pMgmt->sNodeDBTable[iNodeIndex].abyWepKey[2],
- pMgmt->sNodeDBTable[iNodeIndex].abyWepKey[3],
- pMgmt->sNodeDBTable[iNodeIndex].abyWepKey[4]);
-
- /* set wep key */
- pDevice->bEncryptionEnable = true;
- pMgmt->sNodeDBTable[iNodeIndex].byCipherSuite = byKeyDecMode;
- pMgmt->sNodeDBTable[iNodeIndex].dwKeyIndex = dwKeyIndex;
- pMgmt->sNodeDBTable[iNodeIndex].dwTSC47_16 = 0;
- pMgmt->sNodeDBTable[iNodeIndex].wTSC15_0 = 0;
-
- return 0;
-}
-
-/*
- * Description:
- * get each stations encryption key
- *
- * Parameters:
- * In:
- * pDevice -
- * param -
- * Out:
- *
- * Return Value:
- *
- */
-static int hostap_get_encryption(struct vnt_private *pDevice,
- struct viawget_hostapd_param *param,
- int param_len)
-{
- PSMgmtObject pMgmt = pDevice->pMgmt;
- int ii;
- int iNodeIndex = 0;
-
- param->u.crypt.err = 0;
-
- if (is_broadcast_ether_addr(param->sta_addr)) {
- iNodeIndex = 0;
- } else {
- if (BSSDBbIsSTAInNodeDB(pMgmt, param->sta_addr, &iNodeIndex) == false) {
- param->u.crypt.err = HOSTAP_CRYPT_ERR_UNKNOWN_ADDR;
- pr_debug("hostap_get_encryption: HOSTAP_CRYPT_ERR_UNKNOWN_ADDR\n");
- return -EINVAL;
- }
- }
- pr_debug("hostap_get_encryption: %d\n", iNodeIndex);
- memset(param->u.crypt.seq, 0, 8);
- for (ii = 0; ii < 8; ii++)
- param->u.crypt.seq[ii] = (unsigned char)pMgmt->sNodeDBTable[iNodeIndex].KeyRSC >> (ii * 8);
-
- return 0;
-}
-
-/*
- * Description:
- * vt6655_hostap_ioctl main function supported for hostap deamon.
- *
- * Parameters:
- * In:
- * pDevice -
- * iw_point -
- * Out:
- *
- * Return Value:
- *
- */
-int vt6655_hostap_ioctl(struct vnt_private *pDevice, struct iw_point *p)
-{
- struct viawget_hostapd_param *param;
- int ret = 0;
- int ap_ioctl = 0;
-
- if (p->length < sizeof(struct viawget_hostapd_param) ||
- p->length > VIAWGET_HOSTAPD_MAX_BUF_SIZE || !p->pointer)
- return -EINVAL;
-
- param = kmalloc((int)p->length, GFP_KERNEL);
- if (param == NULL)
- return -ENOMEM;
-
- if (copy_from_user(param, p->pointer, p->length)) {
- ret = -EFAULT;
- goto out;
- }
-
- switch (param->cmd) {
- case VIAWGET_HOSTAPD_SET_ENCRYPTION:
- pr_debug("VIAWGET_HOSTAPD_SET_ENCRYPTION\n");
- spin_lock_irq(&pDevice->lock);
- ret = hostap_set_encryption(pDevice, param, p->length);
- spin_unlock_irq(&pDevice->lock);
- break;
- case VIAWGET_HOSTAPD_GET_ENCRYPTION:
- pr_debug("VIAWGET_HOSTAPD_GET_ENCRYPTION\n");
- spin_lock_irq(&pDevice->lock);
- ret = hostap_get_encryption(pDevice, param, p->length);
- spin_unlock_irq(&pDevice->lock);
- break;
- case VIAWGET_HOSTAPD_SET_ASSOC_AP_ADDR:
- pr_debug("VIAWGET_HOSTAPD_SET_ASSOC_AP_ADDR\n");
- ret = -EOPNOTSUPP;
- goto out;
- case VIAWGET_HOSTAPD_FLUSH:
- pr_debug("VIAWGET_HOSTAPD_FLUSH\n");
- spin_lock_irq(&pDevice->lock);
- hostap_flush_sta(pDevice);
- spin_unlock_irq(&pDevice->lock);
- break;
- case VIAWGET_HOSTAPD_ADD_STA:
- pr_debug("VIAWGET_HOSTAPD_ADD_STA\n");
- spin_lock_irq(&pDevice->lock);
- ret = hostap_add_sta(pDevice, param);
- spin_unlock_irq(&pDevice->lock);
- break;
- case VIAWGET_HOSTAPD_REMOVE_STA:
- pr_debug("VIAWGET_HOSTAPD_REMOVE_STA\n");
- spin_lock_irq(&pDevice->lock);
- ret = hostap_remove_sta(pDevice, param);
- spin_unlock_irq(&pDevice->lock);
- break;
- case VIAWGET_HOSTAPD_GET_INFO_STA:
- pr_debug("VIAWGET_HOSTAPD_GET_INFO_STA\n");
- ret = hostap_get_info_sta(pDevice, param);
- ap_ioctl = 1;
- break;
- case VIAWGET_HOSTAPD_SET_FLAGS_STA:
- pr_debug("VIAWGET_HOSTAPD_SET_FLAGS_STA\n");
- ret = hostap_set_flags_sta(pDevice, param);
- break;
- case VIAWGET_HOSTAPD_MLME:
- pr_debug("VIAWGET_HOSTAPD_MLME\n");
- ret = -EOPNOTSUPP;
- goto out;
- case VIAWGET_HOSTAPD_SET_GENERIC_ELEMENT:
- pr_debug("VIAWGET_HOSTAPD_SET_GENERIC_ELEMENT\n");
- ret = hostap_set_generic_element(pDevice, param);
- break;
- case VIAWGET_HOSTAPD_SCAN_REQ:
- pr_debug("VIAWGET_HOSTAPD_SCAN_REQ\n");
- ret = -EOPNOTSUPP;
- goto out;
- case VIAWGET_HOSTAPD_STA_CLEAR_STATS:
- pr_debug("VIAWGET_HOSTAPD_STA_CLEAR_STATS\n");
- ret = -EOPNOTSUPP;
- goto out;
- default:
- pr_debug("vt6655_hostap_ioctl: unknown cmd=%d\n",
- (int)param->cmd);
- ret = -EOPNOTSUPP;
- goto out;
- }
-
- if ((ret == 0) && ap_ioctl) {
- if (copy_to_user(p->pointer, param, p->length))
- ret = -EFAULT;
- }
-
-out:
- kfree(param);
- return ret;
-}
diff --git a/drivers/staging/vt6655/hostap.h b/drivers/staging/vt6655/hostap.h
deleted file mode 100644
index 17df4e403fc..00000000000
--- a/drivers/staging/vt6655/hostap.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * File: hostap.h
- *
- * Purpose:
- *
- * Author: Lyndon Chen
- *
- * Date: May 21, 2003
- *
- */
-
-#ifndef __HOSTAP_H__
-#define __HOSTAP_H__
-
-#include "device.h"
-
-#define WLAN_RATE_1M BIT0
-#define WLAN_RATE_2M BIT1
-#define WLAN_RATE_5M5 BIT2
-#define WLAN_RATE_11M BIT3
-#define WLAN_RATE_6M BIT4
-#define WLAN_RATE_9M BIT5
-#define WLAN_RATE_12M BIT6
-#define WLAN_RATE_18M BIT7
-#define WLAN_RATE_24M BIT8
-#define WLAN_RATE_36M BIT9
-#define WLAN_RATE_48M BIT10
-#define WLAN_RATE_54M BIT11
-
-#ifndef ETH_P_PAE
-#define ETH_P_PAE 0x888E /* Port Access Entity (IEEE 802.1X) */
-#endif /* ETH_P_PAE */
-
-#ifndef ARPHRD_IEEE80211
-#define ARPHRD_IEEE80211 801
-#endif
-
-int vt6655_hostap_set_hostapd(struct vnt_private *, int val, int rtnl_locked);
-int vt6655_hostap_ioctl(struct vnt_private *, struct iw_point *p);
-
-#endif // __HOSTAP_H__
diff --git a/drivers/staging/vt6655/iocmd.h b/drivers/staging/vt6655/iocmd.h
deleted file mode 100644
index a665cfd8a48..00000000000
--- a/drivers/staging/vt6655/iocmd.h
+++ /dev/null
@@ -1,408 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * File: iocmd.h
- *
- * Purpose: Handles the viawget ioctl private interface functions
- *
- * Author: Lyndon Chen
- *
- * Date: May 8, 2002
- *
- */
-
-#ifndef __IOCMD_H__
-#define __IOCMD_H__
-
-#include "ttype.h"
-
-// ioctl Command code
-#define MAGIC_CODE 0x3142
-#define IOCTL_CMD_TEST (SIOCDEVPRIVATE + 0)
-#define IOCTL_CMD_SET (SIOCDEVPRIVATE + 1)
-#define IOCTL_CMD_HOSTAPD (SIOCDEVPRIVATE + 2)
-#define IOCTL_CMD_WPA (SIOCDEVPRIVATE + 3)
-
-typedef enum tagWMAC_CMD {
- WLAN_CMD_BSS_SCAN,
- WLAN_CMD_BSS_JOIN,
- WLAN_CMD_DISASSOC,
- WLAN_CMD_SET_WEP,
- WLAN_CMD_GET_LINK,
- WLAN_CMD_GET_LISTLEN,
- WLAN_CMD_GET_LIST,
- WLAN_CMD_GET_MIB,
- WLAN_CMD_GET_STAT,
- WLAN_CMD_STOP_MAC,
- WLAN_CMD_START_MAC,
- WLAN_CMD_AP_START,
- WLAN_CMD_SET_HOSTAPD,
- WLAN_CMD_SET_HOSTAPD_STA,
- WLAN_CMD_SET_802_1X,
- WLAN_CMD_SET_HOST_WEP,
- WLAN_CMD_SET_WPA,
- WLAN_CMD_GET_NODE_CNT,
- WLAN_CMD_ZONETYPE_SET,
- WLAN_CMD_GET_NODE_LIST
-} WMAC_CMD, *PWMAC_CMD;
-
-typedef enum tagWZONETYPE {
- ZoneType_USA = 0,
- ZoneType_Japan = 1,
- ZoneType_Europe = 2
-} WZONETYPE;
-
-#define ADHOC 0
-#define INFRA 1
-#define BOTH 2
-#define AP 3
-
-#define ADHOC_STARTED 1
-#define ADHOC_JOINTED 2
-
-#define PHY80211a 0
-#define PHY80211b 1
-#define PHY80211g 2
-
-#define SSID_ID 0
-#define SSID_MAXLEN 32
-#define BSSID_LEN 6
-#define WEP_NKEYS 4
-#define WEP_KEYMAXLEN 29
-#define WEP_40BIT_LEN 5
-#define WEP_104BIT_LEN 13
-#define WEP_232BIT_LEN 16
-
-// Ioctl interface structure
-// Command structure
-//
-#pragma pack(1)
-typedef struct tagSCmdRequest {
- u8 name[16];
- void __user *data;
- u16 wResult;
- u16 wCmdCode;
-} SCmdRequest, *PSCmdRequest;
-
-//
-// Scan
-//
-
-typedef struct tagSCmdScan {
- u8 ssid[SSID_MAXLEN + 2];
-} SCmdScan, *PSCmdScan;
-
-//
-// BSS Join
-//
-
-typedef struct tagSCmdBSSJoin {
- u16 wBSSType;
- u16 wBBPType;
- u8 ssid[SSID_MAXLEN + 2];
- u32 uChannel;
- bool bPSEnable;
- bool bShareKeyAuth;
-} SCmdBSSJoin, *PSCmdBSSJoin;
-
-//
-// Zonetype Setting
-//
-
-typedef struct tagSCmdZoneTypeSet {
- bool bWrite;
- WZONETYPE ZoneType;
-} SCmdZoneTypeSet, *PSCmdZoneTypeSet;
-
-#ifdef WPA_SM_Transtatus
-typedef struct tagSWPAResult {
- char ifname[100];
- u8 proto;
- u8 key_mgmt;
- u8 eap_type;
- bool authenticated;
-} SWPAResult, *PSWPAResult;
-#endif
-
-typedef struct tagSCmdStartAP {
- u16 wBSSType;
- u16 wBBPType;
- u8 ssid[SSID_MAXLEN + 2];
- u32 uChannel;
- u32 uBeaconInt;
- bool bShareKeyAuth;
- u8 byBasicRate;
-} SCmdStartAP, *PSCmdStartAP;
-
-typedef struct tagSCmdSetWEP {
- bool bEnableWep;
- u8 byKeyIndex;
- u8 abyWepKey[WEP_NKEYS][WEP_KEYMAXLEN];
- bool bWepKeyAvailable[WEP_NKEYS];
- u32 auWepKeyLength[WEP_NKEYS];
-} SCmdSetWEP, *PSCmdSetWEP;
-
-typedef struct tagSBSSIDItem {
- u32 uChannel;
- u8 abyBSSID[BSSID_LEN];
- u8 abySSID[SSID_MAXLEN + 1];
- u8 byNetType;
- u16 wBeaconInterval;
- u16 wCapInfo; // for address of byNetType at align 4
-
- bool bWEPOn;
- u32 uRSSI;
-} SBSSIDItem;
-
-typedef struct tagSBSSIDList {
- u32 uItem;
- SBSSIDItem sBSSIDList[0];
-} SBSSIDList, *PSBSSIDList;
-
-typedef struct tagSCmdLinkStatus {
- bool bLink;
- u16 wBSSType;
- u8 byState;
- u8 abyBSSID[BSSID_LEN];
- u8 abySSID[SSID_MAXLEN + 2];
- u32 uChannel;
- u32 uLinkRate;
-} SCmdLinkStatus, *PSCmdLinkStatus;
-
-//
-// 802.11 counter
-//
-typedef struct tagSDot11MIBCount {
- u32 TransmittedFragmentCount;
- u32 MulticastTransmittedFrameCount;
- u32 FailedCount;
- u32 RetryCount;
- u32 MultipleRetryCount;
- u32 RTSSuccessCount;
- u32 RTSFailureCount;
- u32 ACKFailureCount;
- u32 FrameDuplicateCount;
- u32 ReceivedFragmentCount;
- u32 MulticastReceivedFrameCount;
- u32 FCSErrorCount;
-} SDot11MIBCount, *PSDot11MIBCount;
-
-//
-// statistic counter
-//
-typedef struct tagSStatMIBCount {
- //
- // ISR status count
- //
- u32 dwIsrTx0OK;
- u32 dwIsrTx1OK;
- u32 dwIsrBeaconTxOK;
- u32 dwIsrRxOK;
- u32 dwIsrTBTTInt;
- u32 dwIsrSTIMERInt;
- u32 dwIsrUnrecoverableError;
- u32 dwIsrSoftInterrupt;
- u32 dwIsrRxNoBuf;
-
- u32 dwIsrUnknown;
-
- // RSR status count
- //
- u32 dwRsrFrmAlgnErr;
- u32 dwRsrErr;
- u32 dwRsrCRCErr;
- u32 dwRsrCRCOk;
- u32 dwRsrBSSIDOk;
- u32 dwRsrADDROk;
- u32 dwRsrICVOk;
- u32 dwNewRsrShortPreamble;
- u32 dwRsrLong;
- u32 dwRsrRunt;
-
- u32 dwRsrRxControl;
- u32 dwRsrRxData;
- u32 dwRsrRxManage;
-
- u32 dwRsrRxPacket;
- u32 dwRsrRxOctet;
- u32 dwRsrBroadcast;
- u32 dwRsrMulticast;
- u32 dwRsrDirected;
- // 64-bit OID
- u32 ullRsrOK;
-
- // for some optional OIDs (64 bits) and DMI support
- u32 ullRxBroadcastBytes;
- u32 ullRxMulticastBytes;
- u32 ullRxDirectedBytes;
- u32 ullRxBroadcastFrames;
- u32 ullRxMulticastFrames;
- u32 ullRxDirectedFrames;
-
- u32 dwRsrRxFragment;
- u32 dwRsrRxFrmLen64;
- u32 dwRsrRxFrmLen65_127;
- u32 dwRsrRxFrmLen128_255;
- u32 dwRsrRxFrmLen256_511;
- u32 dwRsrRxFrmLen512_1023;
- u32 dwRsrRxFrmLen1024_1518;
-
- // TSR0,1 status count
- //
- u32 dwTsrTotalRetry[2]; // total collision retry count
- u32 dwTsrOnceRetry[2]; // this packet only occur one collision
- u32 dwTsrMoreThanOnceRetry[2]; // this packet occur more than one collision
- u32 dwTsrRetry[2]; // this packet has ever occur collision,
- // that is (dwTsrOnceCollision0 + dwTsrMoreThanOnceCollision0)
- u32 dwTsrACKData[2];
- u32 dwTsrErr[2];
- u32 dwAllTsrOK[2];
- u32 dwTsrRetryTimeout[2];
- u32 dwTsrTransmitTimeout[2];
-
- u32 dwTsrTxPacket[2];
- u32 dwTsrTxOctet[2];
- u32 dwTsrBroadcast[2];
- u32 dwTsrMulticast[2];
- u32 dwTsrDirected[2];
-
- // RD/TD count
- u32 dwCntRxFrmLength;
- u32 dwCntTxBufLength;
-
- u8 abyCntRxPattern[16];
- u8 abyCntTxPattern[16];
-
- // Software check....
- u32 dwCntRxDataErr; // rx buffer data software compare CRC err count
- u32 dwCntDecryptErr; // rx buffer data software compare CRC err count
- u32 dwCntRxICVErr; // rx buffer data software compare CRC err count
- u32 idxRxErrorDesc; // index for rx data error RD
-
- // 64-bit OID
- u32 ullTsrOK[2];
-
- // for some optional OIDs (64 bits) and DMI support
- u32 ullTxBroadcastFrames[2];
- u32 ullTxMulticastFrames[2];
- u32 ullTxDirectedFrames[2];
- u32 ullTxBroadcastBytes[2];
- u32 ullTxMulticastBytes[2];
- u32 ullTxDirectedBytes[2];
-} SStatMIBCount, *PSStatMIBCount;
-
-typedef struct tagSNodeItem {
- // STA info
- u16 wAID;
- u8 abyMACAddr[6];
- u16 wTxDataRate;
- u16 wInActiveCount;
- u16 wEnQueueCnt;
- u16 wFlags;
- bool bPWBitOn;
- u8 byKeyIndex;
- u16 wWepKeyLength;
- u8 abyWepKey[WEP_KEYMAXLEN];
- // Auto rate fallback vars
- bool bIsInFallback;
- u32 uTxFailures;
- u32 uTxAttempts;
- u16 wFailureRatio;
-} SNodeItem;
-
-typedef struct tagSNodeList {
- u32 uItem;
- SNodeItem sNodeList[0];
-} SNodeList, *PSNodeList;
-
-typedef struct tagSCmdValue {
- u32 dwValue;
-} SCmdValue, *PSCmdValue;
-
-//
-// hostapd & viawget ioctl related
-//
-
-enum {
- VIAWGET_HOSTAPD_FLUSH = 1,
- VIAWGET_HOSTAPD_ADD_STA = 2,
- VIAWGET_HOSTAPD_REMOVE_STA = 3,
- VIAWGET_HOSTAPD_GET_INFO_STA = 4,
- VIAWGET_HOSTAPD_SET_ENCRYPTION = 5,
- VIAWGET_HOSTAPD_GET_ENCRYPTION = 6,
- VIAWGET_HOSTAPD_SET_FLAGS_STA = 7,
- VIAWGET_HOSTAPD_SET_ASSOC_AP_ADDR = 8,
- VIAWGET_HOSTAPD_SET_GENERIC_ELEMENT = 9,
- VIAWGET_HOSTAPD_MLME = 10,
- VIAWGET_HOSTAPD_SCAN_REQ = 11,
- VIAWGET_HOSTAPD_STA_CLEAR_STATS = 12,
-};
-
-#define VIAWGET_HOSTAPD_GENERIC_ELEMENT_HDR_LEN \
- ((int)(&((struct viawget_hostapd_param *)0)->u.generic_elem.data))
-
-// Maximum length for algorithm names (-1 for nul termination) used in ioctl()
-
-struct viawget_hostapd_param {
- u32 cmd;
- u8 sta_addr[6];
- union {
- struct {
- u16 aid;
- u16 capability;
- u8 tx_supp_rates;
- } add_sta;
- struct {
- u32 inactive_sec;
- } get_info_sta;
- struct {
- u8 alg;
- u32 flags;
- u32 err;
- u8 idx;
- u8 seq[8];
- u16 key_len;
- u8 key[0];
- } crypt;
- struct {
- u32 flags_and;
- u32 flags_or;
- } set_flags_sta;
- struct {
- u16 rid;
- u16 len;
- u8 data[0];
- } rid;
- struct {
- u8 len;
- u8 data[0];
- } generic_elem;
- struct {
- u16 cmd;
- u16 reason_code;
- } mlme;
- struct {
- u8 ssid_len;
- u8 ssid[32];
- } scan_req;
- } u;
-};
-
-#pragma pack()
-
-#endif //__IOCMD_H__
diff --git a/drivers/staging/vt6655/ioctl.c b/drivers/staging/vt6655/ioctl.c
deleted file mode 100644
index 970e80d92fb..00000000000
--- a/drivers/staging/vt6655/ioctl.c
+++ /dev/null
@@ -1,658 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * File: ioctl.c
- *
- * Purpose: private ioctl functions
- *
- * Author: Lyndon Chen
- *
- * Date: Auguest 20, 2003
- *
- * Functions:
- *
- * Revision History:
- *
- */
-
-#include "ioctl.h"
-#include "iocmd.h"
-#include "mac.h"
-#include "card.h"
-#include "hostap.h"
-#include "wpactl.h"
-#include "rf.h"
-
-#ifdef WPA_SM_Transtatus
-SWPAResult wpa_Result;
-#endif
-
-int private_ioctl(struct vnt_private *pDevice, struct ifreq *rq)
-{
- PSCmdRequest pReq = (PSCmdRequest)rq;
- PSMgmtObject pMgmt = pDevice->pMgmt;
- int result = 0;
- PWLAN_IE_SSID pItemSSID;
- SCmdBSSJoin sJoinCmd;
- SCmdZoneTypeSet sZoneTypeCmd;
- SCmdScan sScanCmd;
- SCmdStartAP sStartAPCmd;
- SCmdSetWEP sWEPCmd;
- SCmdValue sValue;
- SBSSIDList sList;
- SNodeList sNodeList;
- PSBSSIDList pList;
- PSNodeList pNodeList;
- unsigned int cbListCount;
- PKnownBSS pBSS;
- PKnownNodeDB pNode;
- unsigned int ii, jj;
- unsigned char abySuppRates[] = {WLAN_EID_SUPP_RATES, 4, 0x02, 0x04, 0x0B, 0x16};
- unsigned char abyNullAddr[] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
- unsigned long dwKeyIndex = 0;
- unsigned char abyScanSSID[WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1];
- long ldBm;
-
- pReq->wResult = 0;
-
- switch (pReq->wCmdCode) {
- case WLAN_CMD_BSS_SCAN:
- pr_debug("WLAN_CMD_BSS_SCAN..begin\n");
- if (copy_from_user(&sScanCmd, pReq->data, sizeof(SCmdScan))) {
- result = -EFAULT;
- break;
- }
-
- pItemSSID = (PWLAN_IE_SSID)sScanCmd.ssid;
- if (pItemSSID->len > WLAN_SSID_MAXLEN + 1)
- return -EINVAL;
- if (pItemSSID->len != 0) {
- memset(abyScanSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
- memcpy(abyScanSSID, pItemSSID, pItemSSID->len + WLAN_IEHDR_LEN);
- }
-
- if (pDevice->bMACSuspend == true) {
- if (pDevice->bRadioOff == true)
- CARDbRadioPowerOn(pDevice);
- vMgrTimerInit(pDevice);
- MACvIntEnable(pDevice->PortOffset, IMR_MASK_VALUE);
- add_timer(&pMgmt->sTimerSecondCallback);
- pDevice->bMACSuspend = false;
- }
- spin_lock_irq(&pDevice->lock);
- if (memcmp(pMgmt->abyCurrBSSID, &abyNullAddr[0], 6) == 0)
- BSSvClearBSSList((void *)pDevice, false);
- else
- BSSvClearBSSList((void *)pDevice, pDevice->bLinkPass);
-
- if (pItemSSID->len != 0)
- bScheduleCommand((void *)pDevice, WLAN_CMD_BSSID_SCAN, abyScanSSID);
- else
- bScheduleCommand((void *)pDevice, WLAN_CMD_BSSID_SCAN, NULL);
- spin_unlock_irq(&pDevice->lock);
- break;
-
- case WLAN_CMD_ZONETYPE_SET:
- /* mike add :can't support. */
- result = -EOPNOTSUPP;
- break;
-
- if (copy_from_user(&sZoneTypeCmd, pReq->data, sizeof(SCmdZoneTypeSet))) {
- result = -EFAULT;
- break;
- }
-
- if (sZoneTypeCmd.bWrite == true) {
- /* write zonetype */
- if (sZoneTypeCmd.ZoneType == ZoneType_USA) {
- /* set to USA */
- pr_debug("set_ZoneType:USA\n");
- } else if (sZoneTypeCmd.ZoneType == ZoneType_Japan) {
- /* set to Japan */
- pr_debug("set_ZoneType:Japan\n");
- } else if (sZoneTypeCmd.ZoneType == ZoneType_Europe) {
- /* set to Europe */
- pr_debug("set_ZoneType:Europe\n");
- }
- } else {
- /* read zonetype */
- unsigned char zonetype = 0;
-
- if (zonetype == 0x00) { /* USA */
- sZoneTypeCmd.ZoneType = ZoneType_USA;
- } else if (zonetype == 0x01) { /* Japan */
- sZoneTypeCmd.ZoneType = ZoneType_Japan;
- } else if (zonetype == 0x02) { /* Europe */
- sZoneTypeCmd.ZoneType = ZoneType_Europe;
- } else { /* Unknown ZoneType */
- pr_err("Error:ZoneType[%x] Unknown ???\n", zonetype);
- result = -EFAULT;
- break;
- }
- if (copy_to_user(pReq->data, &sZoneTypeCmd, sizeof(SCmdZoneTypeSet))) {
- result = -EFAULT;
- break;
- }
- }
- break;
-
- case WLAN_CMD_BSS_JOIN:
- if (pDevice->bMACSuspend == true) {
- if (pDevice->bRadioOff == true)
- CARDbRadioPowerOn(pDevice);
- vMgrTimerInit(pDevice);
- MACvIntEnable(pDevice->PortOffset, IMR_MASK_VALUE);
- add_timer(&pMgmt->sTimerSecondCallback);
- pDevice->bMACSuspend = false;
- }
-
- if (copy_from_user(&sJoinCmd, pReq->data, sizeof(SCmdBSSJoin))) {
- result = -EFAULT;
- break;
- }
-
- pItemSSID = (PWLAN_IE_SSID)sJoinCmd.ssid;
- if (pItemSSID->len > WLAN_SSID_MAXLEN + 1)
- return -EINVAL;
- memset(pMgmt->abyDesireSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
- memcpy(pMgmt->abyDesireSSID, pItemSSID, pItemSSID->len + WLAN_IEHDR_LEN);
- if (sJoinCmd.wBSSType == ADHOC) {
- pMgmt->eConfigMode = WMAC_CONFIG_IBSS_STA;
- pr_debug("ioct set to adhoc mode\n");
- } else {
- pMgmt->eConfigMode = WMAC_CONFIG_ESS_STA;
- pr_debug("ioct set to STA mode\n");
- }
- if (sJoinCmd.bPSEnable == true) {
- pDevice->ePSMode = WMAC_POWER_FAST;
- pMgmt->wListenInterval = 2;
- pr_debug("Power Saving On\n");
- } else {
- pDevice->ePSMode = WMAC_POWER_CAM;
- pMgmt->wListenInterval = 1;
- pr_debug("Power Saving Off\n");
- }
-
- if (sJoinCmd.bShareKeyAuth == true) {
- pMgmt->bShareKeyAlgorithm = true;
- pr_debug("Share Key\n");
- } else {
- pMgmt->bShareKeyAlgorithm = false;
- pr_debug("Open System\n");
- }
- pDevice->uChannel = sJoinCmd.uChannel;
- netif_stop_queue(pDevice->dev);
- spin_lock_irq(&pDevice->lock);
- pMgmt->eCurrState = WMAC_STATE_IDLE;
- bScheduleCommand((void *)pDevice, WLAN_CMD_BSSID_SCAN, pMgmt->abyDesireSSID);
- bScheduleCommand((void *)pDevice, WLAN_CMD_SSID, NULL);
- spin_unlock_irq(&pDevice->lock);
- break;
-
- case WLAN_CMD_SET_WEP:
- pr_debug("WLAN_CMD_SET_WEP Key\n");
- memset(&sWEPCmd, 0, sizeof(SCmdSetWEP));
- if (copy_from_user(&sWEPCmd, pReq->data, sizeof(SCmdSetWEP))) {
- result = -EFAULT;
- break;
- }
- if (sWEPCmd.bEnableWep != true) {
- pDevice->bEncryptionEnable = false;
- pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled;
- MACvDisableDefaultKey(pDevice->PortOffset);
- pr_debug("WEP function disable\n");
- break;
- }
-
- for (ii = 0; ii < WLAN_WEP_NKEYS; ii++) {
- if (sWEPCmd.bWepKeyAvailable[ii]) {
- if (ii == sWEPCmd.byKeyIndex)
- dwKeyIndex = ii | (1 << 31);
- else
- dwKeyIndex = ii;
-
- KeybSetDefaultKey(&(pDevice->sKey),
- dwKeyIndex,
- sWEPCmd.auWepKeyLength[ii],
- NULL,
- (unsigned char *)&sWEPCmd.abyWepKey[ii][0],
- KEY_CTL_WEP,
- pDevice->PortOffset,
- pDevice->byLocalID);
- }
- }
- pDevice->byKeyIndex = sWEPCmd.byKeyIndex;
- pDevice->bTransmitKey = true;
- pDevice->bEncryptionEnable = true;
- pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled;
- break;
-
- case WLAN_CMD_GET_LINK: {
- SCmdLinkStatus sLinkStatus;
-
- pr_debug("WLAN_CMD_GET_LINK status\n");
-
- memset(&sLinkStatus, 0, sizeof(sLinkStatus));
-
- if (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA)
- sLinkStatus.wBSSType = ADHOC;
- else
- sLinkStatus.wBSSType = INFRA;
-
- if (pMgmt->eCurrState == WMAC_STATE_JOINTED)
- sLinkStatus.byState = ADHOC_JOINTED;
- else
- sLinkStatus.byState = ADHOC_STARTED;
-
- sLinkStatus.uChannel = pMgmt->uCurrChannel;
- if (pDevice->bLinkPass == true) {
- sLinkStatus.bLink = true;
- pItemSSID = (PWLAN_IE_SSID)pMgmt->abyCurrSSID;
- memcpy(sLinkStatus.abySSID, pItemSSID->abySSID, pItemSSID->len);
- memcpy(sLinkStatus.abyBSSID, pMgmt->abyCurrBSSID, WLAN_BSSID_LEN);
- sLinkStatus.uLinkRate = pMgmt->sNodeDBTable[0].wTxDataRate;
- pr_debug(" Link Success!\n");
- } else {
- sLinkStatus.bLink = false;
- sLinkStatus.uLinkRate = 0;
- }
- if (copy_to_user(pReq->data, &sLinkStatus, sizeof(SCmdLinkStatus))) {
- result = -EFAULT;
- break;
- }
- break;
- }
- case WLAN_CMD_GET_LISTLEN:
- cbListCount = 0;
- pBSS = &(pMgmt->sBSSList[0]);
- for (ii = 0; ii < MAX_BSS_NUM; ii++) {
- pBSS = &(pMgmt->sBSSList[ii]);
- if (!pBSS->bActive)
- continue;
- cbListCount++;
- }
- sList.uItem = cbListCount;
- if (copy_to_user(pReq->data, &sList, sizeof(SBSSIDList))) {
- result = -EFAULT;
- break;
- }
- pReq->wResult = 0;
- break;
-
- case WLAN_CMD_GET_LIST:
- if (copy_from_user(&sList, pReq->data, sizeof(SBSSIDList))) {
- result = -EFAULT;
- break;
- }
- if (sList.uItem > (ULONG_MAX - sizeof(SBSSIDList)) / sizeof(SBSSIDItem)) {
- result = -EINVAL;
- break;
- }
- pList = (PSBSSIDList)kmalloc(sizeof(SBSSIDList) + (sList.uItem * sizeof(SBSSIDItem)),
- GFP_ATOMIC);
- if (pList == NULL) {
- result = -ENOMEM;
- break;
- }
- pList->uItem = sList.uItem;
- pBSS = &(pMgmt->sBSSList[0]);
- for (ii = 0, jj = 0; jj < MAX_BSS_NUM; jj++) {
- pBSS = &(pMgmt->sBSSList[jj]);
- if (pBSS->bActive) {
- pList->sBSSIDList[ii].uChannel = pBSS->uChannel;
- pList->sBSSIDList[ii].wBeaconInterval = pBSS->wBeaconInterval;
- pList->sBSSIDList[ii].wCapInfo = pBSS->wCapInfo;
- RFvRSSITodBm(pDevice, (unsigned char)(pBSS->uRSSI), &ldBm);
- pList->sBSSIDList[ii].uRSSI = (unsigned int)ldBm;
- memcpy(pList->sBSSIDList[ii].abyBSSID, pBSS->abyBSSID, WLAN_BSSID_LEN);
- pItemSSID = (PWLAN_IE_SSID)pBSS->abySSID;
- memset(pList->sBSSIDList[ii].abySSID, 0, WLAN_SSID_MAXLEN + 1);
- memcpy(pList->sBSSIDList[ii].abySSID, pItemSSID->abySSID, pItemSSID->len);
- if (WLAN_GET_CAP_INFO_ESS(pBSS->wCapInfo))
- pList->sBSSIDList[ii].byNetType = INFRA;
- else
- pList->sBSSIDList[ii].byNetType = ADHOC;
-
- if (WLAN_GET_CAP_INFO_PRIVACY(pBSS->wCapInfo))
- pList->sBSSIDList[ii].bWEPOn = true;
- else
- pList->sBSSIDList[ii].bWEPOn = false;
-
- ii++;
- if (ii >= pList->uItem)
- break;
- }
- }
-
- if (copy_to_user(pReq->data, pList, sizeof(SBSSIDList) + (sList.uItem * sizeof(SBSSIDItem)))) {
- result = -EFAULT;
- break;
- }
- kfree(pList);
- pReq->wResult = 0;
- break;
-
- case WLAN_CMD_GET_MIB:
- if (copy_to_user(pReq->data, &(pDevice->s802_11Counter), sizeof(SDot11MIBCount))) {
- result = -EFAULT;
- break;
- }
- break;
-
- case WLAN_CMD_GET_STAT:
- if (copy_to_user(pReq->data, &(pDevice->scStatistic), sizeof(SStatCounter))) {
- result = -EFAULT;
- break;
- }
- break;
-
- case WLAN_CMD_STOP_MAC:
- pr_debug("WLAN_CMD_STOP_MAC\n");
- netif_stop_queue(pDevice->dev);
-
- spin_lock_irq(&pDevice->lock);
- if (pDevice->bRadioOff == false)
- CARDbRadioPowerOff(pDevice);
-
- pDevice->bLinkPass = false;
- memset(pMgmt->abyCurrBSSID, 0, 6);
- pMgmt->eCurrState = WMAC_STATE_IDLE;
- del_timer(&pDevice->sTimerCommand);
- del_timer(&pMgmt->sTimerSecondCallback);
- pDevice->bCmdRunning = false;
- pDevice->bMACSuspend = true;
- MACvIntDisable(pDevice->PortOffset);
- spin_unlock_irq(&pDevice->lock);
- break;
-
- case WLAN_CMD_START_MAC:
- pr_debug("WLAN_CMD_START_MAC\n");
-
- if (pDevice->bMACSuspend == true) {
- if (pDevice->bRadioOff == true)
- CARDbRadioPowerOn(pDevice);
- vMgrTimerInit(pDevice);
- MACvIntEnable(pDevice->PortOffset, IMR_MASK_VALUE);
- add_timer(&pMgmt->sTimerSecondCallback);
- pDevice->bMACSuspend = false;
- }
- break;
-
- case WLAN_CMD_SET_HOSTAPD:
- pr_debug("WLAN_CMD_SET_HOSTAPD\n");
-
- if (copy_from_user(&sValue, pReq->data, sizeof(SCmdValue))) {
- result = -EFAULT;
- break;
- }
- if (sValue.dwValue == 1) {
- if (vt6655_hostap_set_hostapd(pDevice, 1, 1) == 0) {
- pr_debug("Enable HOSTAP\n");
- } else {
- result = -EFAULT;
- break;
- }
- } else {
- vt6655_hostap_set_hostapd(pDevice, 0, 1);
- pr_debug("Disable HOSTAP\n");
- }
- break;
-
- case WLAN_CMD_SET_HOSTAPD_STA:
- pr_debug("WLAN_CMD_SET_HOSTAPD_STA\n");
- break;
-
- case WLAN_CMD_SET_802_1X:
- pr_debug("WLAN_CMD_SET_802_1X\n");
- if (copy_from_user(&sValue, pReq->data, sizeof(SCmdValue))) {
- result = -EFAULT;
- break;
- }
-
- if (sValue.dwValue == 1) {
- pDevice->bEnable8021x = true;
- pr_debug("Enable 802.1x\n");
- } else {
- pDevice->bEnable8021x = false;
- pr_debug("Disable 802.1x\n");
- }
- break;
-
- case WLAN_CMD_SET_HOST_WEP:
- pr_debug("WLAN_CMD_SET_HOST_WEP\n");
- if (copy_from_user(&sValue, pReq->data, sizeof(SCmdValue))) {
- result = -EFAULT;
- break;
- }
-
- if (sValue.dwValue == 1) {
- pDevice->bEnableHostWEP = true;
- pr_debug("Enable HostWEP\n");
- } else {
- pDevice->bEnableHostWEP = false;
- pr_debug("Disable HostWEP\n");
- }
- break;
-
- case WLAN_CMD_SET_WPA:
- pr_debug("WLAN_CMD_SET_WPA\n");
-
- if (copy_from_user(&sValue, pReq->data, sizeof(SCmdValue))) {
- result = -EFAULT;
- break;
- }
- if (sValue.dwValue == 1) {
- pr_debug("up wpadev\n");
- eth_hw_addr_inherit(pDevice->wpadev, pDevice->dev);
- pDevice->bWPADEVUp = true;
- } else {
- pr_debug("close wpadev\n");
- pDevice->bWPADEVUp = false;
- }
- break;
-
- case WLAN_CMD_AP_START:
- pr_debug("WLAN_CMD_AP_START\n");
- if (pDevice->bRadioOff == true) {
- CARDbRadioPowerOn(pDevice);
- vMgrTimerInit(pDevice);
- MACvIntEnable(pDevice->PortOffset, IMR_MASK_VALUE);
- add_timer(&pMgmt->sTimerSecondCallback);
- }
- if (copy_from_user(&sStartAPCmd, pReq->data, sizeof(SCmdStartAP))) {
- result = -EFAULT;
- break;
- }
-
- if (sStartAPCmd.wBSSType == AP) {
- pMgmt->eConfigMode = WMAC_CONFIG_AP;
- pr_debug("ioct set to AP mode\n");
- } else {
- pr_debug("ioct BSS type not set to AP mode\n");
- result = -EFAULT;
- break;
- }
-
- if (sStartAPCmd.wBBPType == PHY80211g)
- pMgmt->byAPBBType = PHY_TYPE_11G;
- else if (sStartAPCmd.wBBPType == PHY80211a)
- pMgmt->byAPBBType = PHY_TYPE_11A;
- else
- pMgmt->byAPBBType = PHY_TYPE_11B;
-
- pItemSSID = (PWLAN_IE_SSID)sStartAPCmd.ssid;
- if (pItemSSID->len > WLAN_SSID_MAXLEN + 1)
- return -EINVAL;
- memset(pMgmt->abyDesireSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
- memcpy(pMgmt->abyDesireSSID, pItemSSID, pItemSSID->len + WLAN_IEHDR_LEN);
-
- if ((sStartAPCmd.uChannel > 0) && (sStartAPCmd.uChannel <= 14))
- pDevice->uChannel = sStartAPCmd.uChannel;
-
- if ((sStartAPCmd.uBeaconInt >= 20) && (sStartAPCmd.uBeaconInt <= 1000))
- pMgmt->wIBSSBeaconPeriod = sStartAPCmd.uBeaconInt;
- else
- pMgmt->wIBSSBeaconPeriod = 100;
-
- if (sStartAPCmd.bShareKeyAuth == true) {
- pMgmt->bShareKeyAlgorithm = true;
- pr_debug("Share Key\n");
- } else {
- pMgmt->bShareKeyAlgorithm = false;
- pr_debug("Open System\n");
- }
- memcpy(pMgmt->abyIBSSSuppRates, abySuppRates, 6);
-
- if (sStartAPCmd.byBasicRate & BIT3) {
- pMgmt->abyIBSSSuppRates[2] |= BIT7;
- pMgmt->abyIBSSSuppRates[3] |= BIT7;
- pMgmt->abyIBSSSuppRates[4] |= BIT7;
- pMgmt->abyIBSSSuppRates[5] |= BIT7;
- } else if (sStartAPCmd.byBasicRate & BIT2) {
- pMgmt->abyIBSSSuppRates[2] |= BIT7;
- pMgmt->abyIBSSSuppRates[3] |= BIT7;
- pMgmt->abyIBSSSuppRates[4] |= BIT7;
- } else if (sStartAPCmd.byBasicRate & BIT1) {
- pMgmt->abyIBSSSuppRates[2] |= BIT7;
- pMgmt->abyIBSSSuppRates[3] |= BIT7;
- } else if (sStartAPCmd.byBasicRate & BIT1) {
- pMgmt->abyIBSSSuppRates[2] |= BIT7;
- } else {
- /* default 1,2M */
- pMgmt->abyIBSSSuppRates[2] |= BIT7;
- pMgmt->abyIBSSSuppRates[3] |= BIT7;
- }
-
- pr_debug("Support Rate= %*ph\n",
- 4, pMgmt->abyIBSSSuppRates + 2);
-
- netif_stop_queue(pDevice->dev);
- spin_lock_irq(&pDevice->lock);
- bScheduleCommand((void *)pDevice, WLAN_CMD_RUN_AP, NULL);
- spin_unlock_irq(&pDevice->lock);
- break;
-
- case WLAN_CMD_GET_NODE_CNT:
- cbListCount = 0;
- pNode = &(pMgmt->sNodeDBTable[0]);
- for (ii = 0; ii < (MAX_NODE_NUM + 1); ii++) {
- pNode = &(pMgmt->sNodeDBTable[ii]);
- if (!pNode->bActive)
- continue;
- cbListCount++;
- }
-
- sNodeList.uItem = cbListCount;
- if (copy_to_user(pReq->data, &sNodeList, sizeof(SNodeList))) {
- result = -EFAULT;
- break;
- }
- pReq->wResult = 0;
- break;
-
- case WLAN_CMD_GET_NODE_LIST:
- if (copy_from_user(&sNodeList, pReq->data, sizeof(SNodeList))) {
- result = -EFAULT;
- break;
- }
- if (sNodeList.uItem > (ULONG_MAX - sizeof(SNodeList)) / sizeof(SNodeItem)) {
- result = -EINVAL;
- break;
- }
- pNodeList = (PSNodeList)kmalloc(sizeof(SNodeList) + (sNodeList.uItem * sizeof(SNodeItem)),
- GFP_ATOMIC);
- if (pNodeList == NULL) {
- result = -ENOMEM;
- break;
- }
- pNodeList->uItem = sNodeList.uItem;
- pNode = &(pMgmt->sNodeDBTable[0]);
- for (ii = 0, jj = 0; ii < (MAX_NODE_NUM + 1); ii++) {
- pNode = &(pMgmt->sNodeDBTable[ii]);
- if (pNode->bActive) {
- pNodeList->sNodeList[jj].wAID = pNode->wAID;
- memcpy(pNodeList->sNodeList[jj].abyMACAddr, pNode->abyMACAddr, WLAN_ADDR_LEN);
- pNodeList->sNodeList[jj].wTxDataRate = pNode->wTxDataRate;
- pNodeList->sNodeList[jj].wInActiveCount = (unsigned short)pNode->uInActiveCount;
- pNodeList->sNodeList[jj].wEnQueueCnt = (unsigned short)pNode->wEnQueueCnt;
- pNodeList->sNodeList[jj].wFlags = (unsigned short)pNode->dwFlags;
- pNodeList->sNodeList[jj].bPWBitOn = pNode->bPSEnable;
- pNodeList->sNodeList[jj].byKeyIndex = pNode->byKeyIndex;
- pNodeList->sNodeList[jj].wWepKeyLength = pNode->uWepKeyLength;
- memcpy(&(pNodeList->sNodeList[jj].abyWepKey[0]), &(pNode->abyWepKey[0]), WEP_KEYMAXLEN);
- pr_debug("key= %2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
- pNodeList->sNodeList[jj].abyWepKey[0],
- pNodeList->sNodeList[jj].abyWepKey[1],
- pNodeList->sNodeList[jj].abyWepKey[2],
- pNodeList->sNodeList[jj].abyWepKey[3],
- pNodeList->sNodeList[jj].abyWepKey[4]);
- pNodeList->sNodeList[jj].bIsInFallback = pNode->bIsInFallback;
- pNodeList->sNodeList[jj].uTxFailures = pNode->uTxFailures;
- pNodeList->sNodeList[jj].uTxAttempts = pNode->uTxAttempts;
- pNodeList->sNodeList[jj].wFailureRatio = (unsigned short)pNode->uFailureRatio;
- jj++;
- if (jj >= pNodeList->uItem)
- break;
- }
- }
- if (copy_to_user(pReq->data, pNodeList, sizeof(SNodeList) + (sNodeList.uItem * sizeof(SNodeItem)))) {
- result = -EFAULT;
- break;
- }
- kfree(pNodeList);
- pReq->wResult = 0;
- break;
-
-#ifdef WPA_SM_Transtatus
- case 0xFF:
- memset(wpa_Result.ifname, 0, sizeof(wpa_Result.ifname));
- wpa_Result.proto = 0;
- wpa_Result.key_mgmt = 0;
- wpa_Result.eap_type = 0;
- wpa_Result.authenticated = false;
- pDevice->fWPA_Authened = false;
- if (copy_from_user(&wpa_Result, pReq->data, sizeof(wpa_Result))) {
- result = -EFAULT;
- break;
- }
-
- if (wpa_Result.authenticated == true) {
-#ifdef SndEvt_ToAPI
- {
- union iwreq_data wrqu;
-
- pItemSSID = (PWLAN_IE_SSID)pMgmt->abyCurrSSID;
-
- memset(&wrqu, 0, sizeof(wrqu));
- wrqu.data.flags = RT_WPACONNECTED_EVENT_FLAG;
- wrqu.data.length = pItemSSID->len;
- wireless_send_event(pDevice->dev, IWEVCUSTOM, &wrqu, pItemSSID->abySSID);
- }
-#endif
- pDevice->fWPA_Authened = true; /* is successful peer to wpa_Result.authenticated? */
- }
- pReq->wResult = 0;
- break;
-#endif
-
- default:
- pr_debug("Private command not support..\n");
- }
-
- return result;
-}
diff --git a/drivers/staging/vt6655/ioctl.h b/drivers/staging/vt6655/ioctl.h
deleted file mode 100644
index 2dc5a5743e8..00000000000
--- a/drivers/staging/vt6655/ioctl.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * File: hostap.h
- *
- * Purpose:
- *
- * Author: Lyndon Chen
- *
- * Date: May 21, 2003
- *
- */
-
-#ifndef __IOCTL_H__
-#define __IOCTL_H__
-
-#include "device.h"
-
-int private_ioctl(struct vnt_private *, struct ifreq *rq);
-
-#endif // __IOCTL_H__
diff --git a/drivers/staging/vt6655/iowpa.h b/drivers/staging/vt6655/iowpa.h
deleted file mode 100644
index fe4b22ed49f..00000000000
--- a/drivers/staging/vt6655/iowpa.h
+++ /dev/null
@@ -1,130 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * File: iowpa.h
- *
- * Purpose: Handles wpa supplicant ioctl interface
- *
- * Author: Lyndon Chen
- *
- * Date: May 8, 2002
- *
- */
-
-#ifndef __IOWPA_H__
-#define __IOWPA_H__
-
-#define WPA_IE_LEN 64
-
-//WPA related
-
-enum {
- VIAWGET_SET_WPA = 1,
- VIAWGET_SET_KEY = 2,
- VIAWGET_SET_SCAN = 3,
- VIAWGET_GET_SCAN = 4,
- VIAWGET_GET_SSID = 5,
- VIAWGET_GET_BSSID = 6,
- VIAWGET_SET_DROP_UNENCRYPT = 7,
- VIAWGET_SET_DEAUTHENTICATE = 8,
- VIAWGET_SET_ASSOCIATE = 9,
- VIAWGET_SET_DISASSOCIATE = 10
-};
-
-enum {
- VIAWGET_ASSOC_MSG = 1,
- VIAWGET_DISASSOC_MSG = 2,
- VIAWGET_PTK_MIC_MSG = 3,
- VIAWGET_GTK_MIC_MSG = 4,
- VIAWGET_CCKM_ROAM_MSG = 5,
- VIAWGET_DEVICECLOSE_MSG = 6
-};
-
-#pragma pack(1)
-typedef struct viawget_wpa_header {
- u8 type;
- u16 req_ie_len;
- u16 resp_ie_len;
-} viawget_wpa_header;
-
-struct viawget_wpa_param {
- u32 cmd;
- u8 addr[6];
- union {
- struct {
- u8 len;
- u8 data[0];
- } generic_elem;
-
- struct {
- u8 bssid[6];
- u8 ssid[32];
- u8 ssid_len;
- u8 __user *wpa_ie;
- u16 wpa_ie_len;
- int pairwise_suite;
- int group_suite;
- int key_mgmt_suite;
- int auth_alg;
- int mode;
-
- } wpa_associate;
-
- struct {
- int alg_name;
- u16 key_index;
- u16 set_tx;
- u8 *seq;
- u16 seq_len;
- u8 *key;
- u16 key_len;
- } wpa_key;
-
- struct {
- u8 ssid_len;
- u8 ssid[32];
- } scan_req;
-
- struct {
- u16 scan_count;
- u8 __user *buf;
- } scan_results;
-
- } u;
-};
-
-#pragma pack(1)
-struct viawget_scan_result {
- u8 bssid[6];
- u8 ssid[32];
- u16 ssid_len;
- u8 wpa_ie[WPA_IE_LEN];
- u16 wpa_ie_len;
- u8 rsn_ie[WPA_IE_LEN];
- u16 rsn_ie_len;
- int freq; // MHz
- int caps; // e.g. privacy
- int qual; // signal quality
- int noise;
- int level;
- int maxrate;
-};
-
-#pragma pack()
-
-#endif //__IOWPA_H__
diff --git a/drivers/staging/vt6655/iwctl.c b/drivers/staging/vt6655/iwctl.c
deleted file mode 100644
index 14a62bdae27..00000000000
--- a/drivers/staging/vt6655/iwctl.c
+++ /dev/null
@@ -1,1937 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * File: iwctl.c
- *
- * Purpose: wireless ext & ioctl functions
- *
- * Author: Lyndon Chen
- *
- * Date: July 5, 2006
- *
- * Functions:
- *
- * Revision History:
- *
- */
-
-#include "device.h"
-#include "ioctl.h"
-#include "iocmd.h"
-#include "iwctl.h"
-#include "mac.h"
-#include "card.h"
-#include "hostap.h"
-#include "power.h"
-#include "rf.h"
-
-#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
-#include "iowpa.h"
-#include "wpactl.h"
-#endif
-
-#include <net/iw_handler.h>
-extern unsigned short TxRate_iwconfig;//2008-5-8 <add> by chester
-
-/*--------------------- Static Definitions -------------------------*/
-
-//2008-0409-07, <Add> by Einsn Liu
-#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
-#define SUPPORTED_WIRELESS_EXT 18
-#else
-#define SUPPORTED_WIRELESS_EXT 17
-#endif
-
-static const long frequency_list[] = {
- 2412, 2417, 2422, 2427, 2432, 2437, 2442, 2447, 2452, 2457, 2462, 2467, 2472, 2484,
- 4915, 4920, 4925, 4935, 4940, 4945, 4960, 4980,
- 5035, 5040, 5045, 5055, 5060, 5080, 5170, 5180, 5190, 5200, 5210, 5220, 5230, 5240,
- 5260, 5280, 5300, 5320, 5500, 5520, 5540, 5560, 5580, 5600, 5620, 5640, 5660, 5680,
- 5700, 5745, 5765, 5785, 5805, 5825
-};
-
-/*--------------------- Static Classes ----------------------------*/
-/*--------------------- Static Variables --------------------------*/
-/*--------------------- Static Functions --------------------------*/
-
-/*--------------------- Export Variables --------------------------*/
-
-struct iw_statistics *iwctl_get_wireless_stats(struct net_device *dev)
-{
- struct vnt_private *pDevice = netdev_priv(dev);
- long ldBm;
-
- pDevice->wstats.status = pDevice->op_mode;
-#ifdef Calcu_LinkQual
- if (pDevice->scStatistic.LinkQuality > 100)
- pDevice->scStatistic.LinkQuality = 100;
- pDevice->wstats.qual.qual = (unsigned char)pDevice->scStatistic.LinkQuality;
-#else
- pDevice->wstats.qual.qual = pDevice->byCurrSQ;
-#endif
- RFvRSSITodBm(pDevice, (unsigned char)(pDevice->uCurrRSSI), &ldBm);
- pDevice->wstats.qual.level = ldBm;
- pDevice->wstats.qual.noise = 0;
- pDevice->wstats.qual.updated = 1;
- pDevice->wstats.discard.nwid = 0;
- pDevice->wstats.discard.code = 0;
- pDevice->wstats.discard.fragment = 0;
- pDevice->wstats.discard.retries = (unsigned long)pDevice->scStatistic.dwTsrErr;
- pDevice->wstats.discard.misc = 0;
- pDevice->wstats.miss.beacon = 0;
-
- return &pDevice->wstats;
-}
-
-/*------------------------------------------------------------------*/
-
-static int iwctl_commit(struct net_device *dev,
- struct iw_request_info *info,
- void *wrq,
- char *extra)
-{
- pr_debug(" SIOCSIWCOMMIT\n");
-
- return 0;
-}
-/*
- * Wireless Handler : get protocol name
- */
-
-int iwctl_giwname(struct net_device *dev,
- struct iw_request_info *info,
- char *wrq,
- char *extra)
-{
- strcpy(wrq, "802.11-a/b/g");
- return 0;
-}
-
-/*
- * Wireless Handler : set scan
- */
-
-static int iwctl_siwscan(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *wrq,
- char *extra)
-{
- struct vnt_private *pDevice = netdev_priv(dev);
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- struct iw_scan_req *req = (struct iw_scan_req *)extra;
- unsigned char abyScanSSID[WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1];
- PWLAN_IE_SSID pItemSSID = NULL;
-
- pr_debug(" SIOCSIWSCAN\n");
-
- if (pDevice->byReAssocCount > 0) { //reject scan when re-associating!
-//send scan event to wpa_Supplicant
- union iwreq_data wrqu;
-
- PRINT_K("wireless_send_event--->SIOCGIWSCAN(scan done)\n");
- memset(&wrqu, 0, sizeof(wrqu));
- wireless_send_event(pDevice->dev, SIOCGIWSCAN, &wrqu, NULL);
- return 0;
- }
-
- spin_lock_irq(&pDevice->lock);
- BSSvClearBSSList((void *)pDevice, pDevice->bLinkPass);
-
-//mike add: active scan OR passive scan OR desire_ssid scan
- if (wrq->length == sizeof(struct iw_scan_req)) {
- if (wrq->flags & IW_SCAN_THIS_ESSID) { //desire_ssid scan
- memset(abyScanSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
- pItemSSID = (PWLAN_IE_SSID)abyScanSSID;
- pItemSSID->byElementID = WLAN_EID_SSID;
- memcpy(pItemSSID->abySSID, req->essid, (int)req->essid_len);
- if (pItemSSID->abySSID[req->essid_len - 1] == '\0') {
- if (req->essid_len > 0)
- pItemSSID->len = req->essid_len - 1;
- } else
- pItemSSID->len = req->essid_len;
- pMgmt->eScanType = WMAC_SCAN_PASSIVE;
- PRINT_K("SIOCSIWSCAN:[desired_ssid=%s,len=%d]\n", ((PWLAN_IE_SSID)abyScanSSID)->abySSID,
- ((PWLAN_IE_SSID)abyScanSSID)->len);
- bScheduleCommand((void *)pDevice, WLAN_CMD_BSSID_SCAN, abyScanSSID);
- spin_unlock_irq(&pDevice->lock);
-
- return 0;
- } else if (req->scan_type == IW_SCAN_TYPE_PASSIVE) { //passive scan
- pMgmt->eScanType = WMAC_SCAN_PASSIVE;
- }
- } else { //active scan
- pMgmt->eScanType = WMAC_SCAN_ACTIVE;
- }
-
- pMgmt->eScanType = WMAC_SCAN_PASSIVE;
- bScheduleCommand((void *)pDevice, WLAN_CMD_BSSID_SCAN, NULL);
- spin_unlock_irq(&pDevice->lock);
-
- return 0;
-}
-
-/*
- * Wireless Handler : get scan results
- */
-
-static int iwctl_giwscan(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *wrq,
- char *extra)
-{
- int ii, jj, kk;
- struct vnt_private *pDevice = netdev_priv(dev);
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- PKnownBSS pBSS;
- PWLAN_IE_SSID pItemSSID;
- PWLAN_IE_SUPP_RATES pSuppRates, pExtSuppRates;
- char *current_ev = extra;
- char *end_buf = extra + IW_SCAN_MAX_DATA;
- char *current_val = NULL;
- struct iw_event iwe;
- long ldBm;
- char buf[MAX_WPA_IE_LEN * 2 + 30];
-
- pr_debug(" SIOCGIWSCAN\n");
-
- if (pMgmt->eScanState == WMAC_IS_SCANNING) {
- // In scanning..
- return -EAGAIN;
- }
- pBSS = &(pMgmt->sBSSList[0]);
- for (ii = 0, jj = 0; jj < MAX_BSS_NUM; jj++) {
- if (current_ev >= end_buf)
- break;
- pBSS = &(pMgmt->sBSSList[jj]);
- if (pBSS->bActive) {
- //ADD mac address
- memset(&iwe, 0, sizeof(iwe));
- iwe.cmd = SIOCGIWAP;
- iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
- memcpy(iwe.u.ap_addr.sa_data, pBSS->abyBSSID, WLAN_BSSID_LEN);
- current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe, IW_EV_ADDR_LEN);
- //ADD ssid
- memset(&iwe, 0, sizeof(iwe));
- iwe.cmd = SIOCGIWESSID;
- pItemSSID = (PWLAN_IE_SSID)pBSS->abySSID;
- iwe.u.data.length = pItemSSID->len;
- iwe.u.data.flags = 1;
- current_ev = iwe_stream_add_point(info, current_ev, end_buf, &iwe, pItemSSID->abySSID);
- //ADD mode
- memset(&iwe, 0, sizeof(iwe));
- iwe.cmd = SIOCGIWMODE;
- if (WLAN_GET_CAP_INFO_ESS(pBSS->wCapInfo))
- iwe.u.mode = IW_MODE_INFRA;
- else
- iwe.u.mode = IW_MODE_ADHOC;
-
- iwe.len = IW_EV_UINT_LEN;
- current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe, IW_EV_UINT_LEN);
- //ADD frequency
- pSuppRates = (PWLAN_IE_SUPP_RATES)pBSS->abySuppRates;
- pExtSuppRates = (PWLAN_IE_SUPP_RATES)pBSS->abyExtSuppRates;
- memset(&iwe, 0, sizeof(iwe));
- iwe.cmd = SIOCGIWFREQ;
- iwe.u.freq.m = pBSS->uChannel;
- iwe.u.freq.e = 0;
- iwe.u.freq.i = 0;
- current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe, IW_EV_FREQ_LEN);
- //2008-0409-04, <Add> by Einsn Liu
- {
- int f = (int)pBSS->uChannel - 1;
-
- if (f < 0)f = 0;
- iwe.u.freq.m = frequency_list[f] * 100000;
- iwe.u.freq.e = 1;
- }
- current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe, IW_EV_FREQ_LEN);
- //ADD quality
- memset(&iwe, 0, sizeof(iwe));
- iwe.cmd = IWEVQUAL;
- RFvRSSITodBm(pDevice, (unsigned char)(pBSS->uRSSI), &ldBm);
- iwe.u.qual.level = ldBm;
- iwe.u.qual.noise = 0;
-//2008-0409-01, <Add> by Einsn Liu
- if (-ldBm < 50)
- iwe.u.qual.qual = 100;
- else if (-ldBm > 90)
- iwe.u.qual.qual = 0;
- else
- iwe.u.qual.qual = (40 - (-ldBm - 50)) * 100 / 40;
-
- iwe.u.qual.updated = 7;
-
- current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe, IW_EV_QUAL_LEN);
-
- memset(&iwe, 0, sizeof(iwe));
- iwe.cmd = SIOCGIWENCODE;
- iwe.u.data.length = 0;
- if (WLAN_GET_CAP_INFO_PRIVACY(pBSS->wCapInfo))
- iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
- else
- iwe.u.data.flags = IW_ENCODE_DISABLED;
-
- current_ev = iwe_stream_add_point(info, current_ev, end_buf, &iwe, pItemSSID->abySSID);
-
- memset(&iwe, 0, sizeof(iwe));
- iwe.cmd = SIOCGIWRATE;
- iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0;
- current_val = current_ev + IW_EV_LCP_LEN;
-
- for (kk = 0; kk < 12; kk++) {
- if (pSuppRates->abyRates[kk] == 0)
- break;
- // Bit rate given in 500 kb/s units (+ 0x80)
- iwe.u.bitrate.value = ((pSuppRates->abyRates[kk] & 0x7f) * 500000);
- current_val = iwe_stream_add_value(info, current_ev, current_val, end_buf, &iwe, IW_EV_PARAM_LEN);
- }
- for (kk = 0; kk < 8; kk++) {
- if (pExtSuppRates->abyRates[kk] == 0)
- break;
- // Bit rate given in 500 kb/s units (+ 0x80)
- iwe.u.bitrate.value = ((pExtSuppRates->abyRates[kk] & 0x7f) * 500000);
- current_val = iwe_stream_add_value(info, current_ev, current_val, end_buf, &iwe, IW_EV_PARAM_LEN);
- }
-
- if ((current_val - current_ev) > IW_EV_LCP_LEN)
- current_ev = current_val;
-
- memset(&iwe, 0, sizeof(iwe));
- iwe.cmd = IWEVCUSTOM;
- sprintf(buf, "bcn_int=%d", pBSS->wBeaconInterval);
- iwe.u.data.length = strlen(buf);
- current_ev = iwe_stream_add_point(info, current_ev, end_buf, &iwe, buf);
-
- if ((pBSS->wWPALen > 0) && (pBSS->wWPALen <= MAX_WPA_IE_LEN)) {
- memset(&iwe, 0, sizeof(iwe));
- iwe.cmd = IWEVGENIE;
- iwe.u.data.length = pBSS->wWPALen;
- current_ev = iwe_stream_add_point(info, current_ev, end_buf, &iwe, pBSS->byWPAIE);
- }
-
- if ((pBSS->wRSNLen > 0) && (pBSS->wRSNLen <= MAX_WPA_IE_LEN)) {
- memset(&iwe, 0, sizeof(iwe));
- iwe.cmd = IWEVGENIE;
- iwe.u.data.length = pBSS->wRSNLen;
- current_ev = iwe_stream_add_point(info, current_ev, end_buf, &iwe, pBSS->byRSNIE);
- }
-
- }
- }// for
-
- wrq->length = current_ev - extra;
- return 0;
-}
-
-/*
- * Wireless Handler : set frequency or channel
- */
-
-int iwctl_siwfreq(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_freq *wrq,
- char *extra)
-{
- struct vnt_private *pDevice = netdev_priv(dev);
- int rc = 0;
-
- pr_debug(" SIOCSIWFREQ\n");
-
- // If setting by frequency, convert to a channel
- if ((wrq->e == 1) &&
- (wrq->m >= (int) 2.412e8) &&
- (wrq->m <= (int) 2.487e8)) {
- int f = wrq->m / 100000;
- int c = 0;
-
- while ((c < 14) && (f != frequency_list[c]))
- c++;
- wrq->e = 0;
- wrq->m = c + 1;
- }
- // Setting by channel number
- if ((wrq->m > 14) || (wrq->e > 0))
- rc = -EOPNOTSUPP;
- else {
- int channel = wrq->m;
-
- if ((channel < 1) || (channel > 14)) {
- pr_debug("%s: New channel value of %d is invalid!\n",
- dev->name, wrq->m);
- rc = -EINVAL;
- } else {
- // Yes ! We can set it !!!
- pr_debug(" Set to channel = %d\n", channel);
- pDevice->uChannel = channel;
- //2007-0207-04,<Add> by EinsnLiu
- //Make change effect at once
- pDevice->bCommit = true;
- }
- }
-
- return rc;
-}
-
-/*
- * Wireless Handler : get frequency or channel
- */
-
-int iwctl_giwfreq(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_freq *wrq,
- char *extra)
-{
- struct vnt_private *pDevice = netdev_priv(dev);
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
-
- pr_debug(" SIOCGIWFREQ\n");
-
-#ifdef WEXT_USECHANNELS
- wrq->m = (int)pMgmt->uCurrChannel;
- wrq->e = 0;
-#else
- {
- int f = (int)pMgmt->uCurrChannel - 1;
-
- if (f < 0)
- f = 0;
- wrq->m = frequency_list[f] * 100000;
- wrq->e = 1;
- }
-#endif
-
- return 0;
-}
-
-/*
- * Wireless Handler : set operation mode
- */
-
-int iwctl_siwmode(struct net_device *dev,
- struct iw_request_info *info,
- __u32 *wmode,
- char *extra)
-{
- struct vnt_private *pDevice = netdev_priv(dev);
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- int rc = 0;
-
- pr_debug(" SIOCSIWMODE\n");
-
- if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP && pDevice->bEnableHostapd) {
- pr_debug("Can't set operation mode, hostapd is running\n");
- return rc;
- }
-
- switch (*wmode) {
- case IW_MODE_ADHOC:
- if (pMgmt->eConfigMode != WMAC_CONFIG_IBSS_STA) {
- pMgmt->eConfigMode = WMAC_CONFIG_IBSS_STA;
- if (pDevice->flags & DEVICE_FLAGS_OPENED)
- pDevice->bCommit = true;
-
- }
- pr_debug("set mode to ad-hoc\n");
- break;
- case IW_MODE_AUTO:
- case IW_MODE_INFRA:
- if (pMgmt->eConfigMode != WMAC_CONFIG_ESS_STA) {
- pMgmt->eConfigMode = WMAC_CONFIG_ESS_STA;
- if (pDevice->flags & DEVICE_FLAGS_OPENED)
- pDevice->bCommit = true;
-
- }
- pr_debug("set mode to infrastructure\n");
- break;
- case IW_MODE_MASTER:
-
- pMgmt->eConfigMode = WMAC_CONFIG_ESS_STA;
- rc = -EOPNOTSUPP;
- break;
-
- if (pMgmt->eConfigMode != WMAC_CONFIG_AP) {
- pMgmt->eConfigMode = WMAC_CONFIG_AP;
- if (pDevice->flags & DEVICE_FLAGS_OPENED)
- pDevice->bCommit = true;
-
- }
- pr_debug("set mode to Access Point\n");
- break;
-
- case IW_MODE_REPEAT:
- pMgmt->eConfigMode = WMAC_CONFIG_ESS_STA;
- rc = -EOPNOTSUPP;
- break;
- default:
- rc = -EINVAL;
- }
-
- return rc;
-}
-
-/*
- * Wireless Handler : get operation mode
- */
-
-int iwctl_giwmode(struct net_device *dev,
- struct iw_request_info *info,
- __u32 *wmode,
- char *extra)
-{
- struct vnt_private *pDevice = netdev_priv(dev);
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
-
- pr_debug(" SIOCGIWMODE\n");
- // If not managed, assume it's ad-hoc
- switch (pMgmt->eConfigMode) {
- case WMAC_CONFIG_ESS_STA:
- *wmode = IW_MODE_INFRA;
- break;
- case WMAC_CONFIG_IBSS_STA:
- *wmode = IW_MODE_ADHOC;
- break;
- case WMAC_CONFIG_AUTO:
- *wmode = IW_MODE_INFRA;
- break;
- case WMAC_CONFIG_AP:
- *wmode = IW_MODE_MASTER;
- break;
- default:
- *wmode = IW_MODE_ADHOC;
- }
-
- return 0;
-}
-
-/*
- * Wireless Handler : get capability range
- */
-
-int iwctl_giwrange(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *wrq,
- char *extra)
-{
- struct iw_range *range = (struct iw_range *)extra;
- int i, k;
- unsigned char abySupportedRates[13] = {0x02, 0x04, 0x0B, 0x16, 0x0c, 0x12, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6C, 0x90};
-
- pr_debug(" SIOCGIWRANGE\n");
- if (wrq->pointer) {
- wrq->length = sizeof(struct iw_range);
- memset(range, 0, sizeof(struct iw_range));
- range->min_nwid = 0x0000;
- range->max_nwid = 0x0000;
- range->num_channels = 14;
- // Should be based on cap_rid.country to give only
- // what the current card support
- k = 0;
- for (i = 0; i < 14; i++) {
- range->freq[k].i = i + 1; // List index
- range->freq[k].m = frequency_list[i] * 100000;
- range->freq[k++].e = 1; // Values in table in MHz -> * 10^5 * 10
- }
- range->num_frequency = k;
- // Hum... Should put the right values there
-#ifdef Calcu_LinkQual
- range->max_qual.qual = 100;
-#else
- range->max_qual.qual = 255;
-#endif
- range->max_qual.level = 0;
- range->max_qual.noise = 0;
- range->sensitivity = 255;
-
- for (i = 0; i < 13; i++) {
- range->bitrate[i] = abySupportedRates[i] * 500000;
- if (range->bitrate[i] == 0)
- break;
- }
- range->num_bitrates = i;
-
- // Set an indication of the max TCP throughput
- // in bit/s that we can expect using this interface.
- // May be use for QoS stuff... Jean II
- if (i > 2)
- range->throughput = 5 * 1000 * 1000;
- else
- range->throughput = 1.5 * 1000 * 1000;
-
- range->min_rts = 0;
- range->max_rts = 2312;
- range->min_frag = 256;
- range->max_frag = 2312;
-
- // the encoding capabilities
- range->num_encoding_sizes = 3;
- // 64(40) bits WEP
- range->encoding_size[0] = 5;
- // 128(104) bits WEP
- range->encoding_size[1] = 13;
- // 256 bits for WPA-PSK
- range->encoding_size[2] = 32;
- // 4 keys are allowed
- range->max_encoding_tokens = 4;
-
- range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
- IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP;
-
- range->min_pmp = 0;
- range->max_pmp = 1000000;// 1 secs
- range->min_pmt = 0;
- range->max_pmt = 1000000;// 1 secs
- range->pmp_flags = IW_POWER_PERIOD;
- range->pmt_flags = IW_POWER_TIMEOUT;
- range->pm_capa = IW_POWER_PERIOD | IW_POWER_TIMEOUT | IW_POWER_ALL_R;
-
- // Transmit Power - values are in mW
-
- range->txpower[0] = 100;
- range->num_txpower = 1;
- range->txpower_capa = IW_TXPOW_MWATT;
- range->we_version_source = SUPPORTED_WIRELESS_EXT;
- range->we_version_compiled = WIRELESS_EXT;
- range->retry_capa = IW_RETRY_LIMIT | IW_RETRY_LIFETIME;
- range->retry_flags = IW_RETRY_LIMIT;
- range->r_time_flags = IW_RETRY_LIFETIME;
- range->min_retry = 1;
- range->max_retry = 65535;
- range->min_r_time = 1024;
- range->max_r_time = 65535 * 1024;
- // Experimental measurements - boundary 11/5.5 Mb/s
- // Note : with or without the (local->rssi), results
- // are somewhat different. - Jean II
- range->avg_qual.qual = 6;
- range->avg_qual.level = 176; // -80 dBm
- range->avg_qual.noise = 0;
- }
-
- return 0;
-}
-
-/*
- * Wireless Handler : set ap mac address
- */
-
-int iwctl_siwap(struct net_device *dev,
- struct iw_request_info *info,
- struct sockaddr *wrq,
- char *extra)
-{
- struct vnt_private *pDevice = netdev_priv(dev);
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- int rc = 0;
- unsigned char ZeroBSSID[WLAN_BSSID_LEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
-
- pr_debug(" SIOCSIWAP\n");
- if (pMgmt->eScanState == WMAC_IS_SCANNING) {
- // In scanning..
- pr_debug("SIOCSIWAP(??)-->In scanning..\n");
- }
- if (wrq->sa_family != ARPHRD_ETHER)
- rc = -EINVAL;
- else {
- memcpy(pMgmt->abyDesireBSSID, wrq->sa_data, 6);
- //2008-0409-05, <Add> by Einsn Liu
- if ((pDevice->bLinkPass == true) &&
- (memcmp(pMgmt->abyDesireBSSID, pMgmt->abyCurrBSSID, 6) == 0)) {
- return rc;
- }
- //mike :add
- if ((is_broadcast_ether_addr(pMgmt->abyDesireBSSID)) ||
- (memcmp(pMgmt->abyDesireBSSID, ZeroBSSID, 6) == 0)) {
- PRINT_K("SIOCSIWAP:invalid desired BSSID return!\n");
- return rc;
- }
- //mike add: if desired AP is hidden ssid(there are two same BSSID in list),
- // then ignore,because you don't known which one to be connect with??
- {
- unsigned int ii, uSameBssidNum = 0;
-
- for (ii = 0; ii < MAX_BSS_NUM; ii++) {
- if (pMgmt->sBSSList[ii].bActive &&
- ether_addr_equal(pMgmt->sBSSList[ii].abyBSSID,
- pMgmt->abyDesireBSSID)) {
- uSameBssidNum++;
- }
- }
- if (uSameBssidNum >= 2) { //hit: desired AP is in hidden ssid mode!!!
- PRINT_K("SIOCSIWAP:ignore for desired AP in hidden mode\n");
- return rc;
- }
- }
-
- if (pDevice->flags & DEVICE_FLAGS_OPENED)
- pDevice->bCommit = true;
-
- }
- return rc;
-}
-
-/*
- * Wireless Handler : get ap mac address
- */
-
-int iwctl_giwap(struct net_device *dev,
- struct iw_request_info *info,
- struct sockaddr *wrq,
- char *extra)
-{
- struct vnt_private *pDevice = netdev_priv(dev);
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
-
- pr_debug(" SIOCGIWAP\n");
-
- memcpy(wrq->sa_data, pMgmt->abyCurrBSSID, 6);
- //2008-0410,<Modify> by Einsn Liu
- if ((pDevice->bLinkPass == false) && (pMgmt->eCurrMode != WMAC_MODE_ESS_AP))
- memset(wrq->sa_data, 0, 6);
-
- if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP)
- memcpy(wrq->sa_data, pMgmt->abyCurrBSSID, 6);
-
- wrq->sa_family = ARPHRD_ETHER;
-
- return 0;
-}
-
-/*
- * Wireless Handler : get ap list
- */
-
-int iwctl_giwaplist(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *wrq,
- char *extra)
-{
- int ii, jj, rc = 0;
- struct sockaddr *sock = NULL;
- struct sockaddr *s = NULL;
- struct iw_quality *qual = NULL;
- struct iw_quality *q = NULL;
- PKnownBSS pBSS = NULL;
-
- struct vnt_private *pDevice = netdev_priv(dev);
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
-
- pr_debug(" SIOCGIWAPLIST\n");
-
- if (!capable(CAP_NET_ADMIN)) {
- rc = -EPERM;
- goto exit;
- }
-
- if (!wrq->pointer)
- goto exit;
-
- sock = kmalloc_array(IW_MAX_AP, sizeof(struct sockaddr), GFP_KERNEL);
- if (!sock) {
- rc = -ENOMEM;
- goto exit;
- }
-
- qual = kmalloc_array(IW_MAX_AP, sizeof(struct iw_quality), GFP_KERNEL);
- if (!qual) {
- rc = -ENOMEM;
- goto exit;
- }
-
- for (ii = 0, jj = 0; ii < MAX_BSS_NUM; ii++) {
- pBSS = &(pMgmt->sBSSList[ii]);
-
- if (!pBSS->bActive)
- continue;
- if (jj >= IW_MAX_AP)
- break;
-
- s = &sock[jj];
- q = &qual[jj];
-
- memcpy(s->sa_data, pBSS->abyBSSID, 6);
- s->sa_family = ARPHRD_ETHER;
- q->level = pBSS->uRSSI;
- q->qual = 0;
- q->noise = 0;
- q->updated = 2;
- jj++;
- }
-
- wrq->flags = 1; /* Should be define'd */
- wrq->length = jj;
- memcpy(extra, sock, sizeof(struct sockaddr) * jj);
- memcpy(extra + sizeof(struct sockaddr) * jj,
- qual,
- sizeof(struct iw_quality) * jj);
-exit:
- kfree(sock);
- kfree(qual);
- return rc;
-}
-
-/*
- * Wireless Handler : set essid
- */
-
-int iwctl_siwessid(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *wrq,
- char *extra)
-{
- struct vnt_private *pDevice = netdev_priv(dev);
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- PWLAN_IE_SSID pItemSSID;
- //2008-0409-05, <Add> by Einsn Liu
- unsigned char len;
-
- pr_debug(" SIOCSIWESSID\n");
- pDevice->fWPA_Authened = false;
- if (pMgmt->eScanState == WMAC_IS_SCANNING) {
- // In scanning..
- pr_debug("SIOCSIWESSID(??)-->In scanning..\n");
- }
- // Check if we asked for `any'
- if (wrq->flags == 0) {
- // Just send an empty SSID list
- memset(pMgmt->abyDesireSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
- memset(pMgmt->abyDesireBSSID, 0xFF, 6);
- PRINT_K("set essid to 'any'\n");
-#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
- return 0;
-#endif
- } else {
- // Set the SSID
- memset(pMgmt->abyDesireSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
- pItemSSID = (PWLAN_IE_SSID)pMgmt->abyDesireSSID;
- pItemSSID->byElementID = WLAN_EID_SSID;
-
- memcpy(pItemSSID->abySSID, extra, wrq->length);
- if (pItemSSID->abySSID[wrq->length - 1] == '\0') {
- if (wrq->length > 0)
- pItemSSID->len = wrq->length - 1;
- } else
- pItemSSID->len = wrq->length;
- pr_debug("set essid to %s\n", pItemSSID->abySSID);
- //2008-0409-05, <Add> by Einsn Liu
- len = (pItemSSID->len > ((PWLAN_IE_SSID)pMgmt->abyCurrSSID)->len) ? pItemSSID->len : ((PWLAN_IE_SSID)pMgmt->abyCurrSSID)->len;
- if ((pDevice->bLinkPass == true) &&
- (memcmp(pItemSSID->abySSID, ((PWLAN_IE_SSID)pMgmt->abyCurrSSID)->abySSID, len) == 0))
- return 0;
-
- //mike:need clear desiredBSSID
- if (pItemSSID->len == 0) {
- memset(pMgmt->abyDesireBSSID, 0xFF, 6);
- return 0;
- }
-
-#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
- //Wext wil order another command of siwap to link with desired AP,
- //so here need not associate??
- if (pDevice->bWPASuppWextEnabled == true) {
- /*******search if in hidden ssid mode ****/
- {
- PKnownBSS pCurr = NULL;
- unsigned char abyTmpDesireSSID[WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1];
- unsigned int ii, uSameBssidNum = 0;
-
- memcpy(abyTmpDesireSSID, pMgmt->abyDesireSSID, sizeof(abyTmpDesireSSID));
- pCurr = BSSpSearchBSSList(pDevice,
- NULL,
- abyTmpDesireSSID,
- pMgmt->eConfigPHYMode
-);
-
- if (pCurr == NULL) {
- PRINT_K("SIOCSIWESSID:hidden ssid site survey before associate.......\n");
- vResetCommandTimer((void *)pDevice);
- pMgmt->eScanType = WMAC_SCAN_ACTIVE;
- bScheduleCommand((void *)pDevice, WLAN_CMD_BSSID_SCAN, pMgmt->abyDesireSSID);
- bScheduleCommand((void *)pDevice, WLAN_CMD_SSID, pMgmt->abyDesireSSID);
- } else { //mike:to find out if that desired SSID is a hidden-ssid AP ,
- // by means of judging if there are two same BSSID exist in list ?
- for (ii = 0; ii < MAX_BSS_NUM; ii++) {
- if (pMgmt->sBSSList[ii].bActive &&
- ether_addr_equal(pMgmt->sBSSList[ii].abyBSSID,
- pCurr->abyBSSID)) {
- uSameBssidNum++;
- }
- }
- if (uSameBssidNum >= 2) { //hit: desired AP is in hidden ssid mode!!!
- pr_debug("SIOCSIWESSID:hidden ssid directly associate.......\n");
- vResetCommandTimer((void *)pDevice);
- pMgmt->eScanType = WMAC_SCAN_PASSIVE; //this scan type,you'll submit scan result!
- bScheduleCommand((void *)pDevice, WLAN_CMD_BSSID_SCAN, pMgmt->abyDesireSSID);
- bScheduleCommand((void *)pDevice, WLAN_CMD_SSID, pMgmt->abyDesireSSID);
- }
- }
- }
- return 0;
- }
-#endif
-
- pr_debug("set essid = %s\n", pItemSSID->abySSID);
- }
-
- if (pDevice->flags & DEVICE_FLAGS_OPENED)
- pDevice->bCommit = true;
-
- return 0;
-}
-
-/*
- * Wireless Handler : get essid
- */
-
-int iwctl_giwessid(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *wrq,
- char *extra)
-{
- struct vnt_private *pDevice = netdev_priv(dev);
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- PWLAN_IE_SSID pItemSSID;
-
- pr_debug(" SIOCGIWESSID\n");
-
- // Note : if wrq->u.data.flags != 0, we should
- // get the relevant SSID from the SSID list...
-
- // Get the current SSID
- pItemSSID = (PWLAN_IE_SSID)pMgmt->abyCurrSSID;
- memcpy(extra, pItemSSID->abySSID , pItemSSID->len);
- extra[pItemSSID->len] = '\0';
- wrq->length = pItemSSID->len + 1;
- //2008-0409-03, <Add> by Einsn Liu
- wrq->length = pItemSSID->len;
- wrq->flags = 1; // active
-
- return 0;
-}
-
-/*
- * Wireless Handler : set data rate
- */
-
-int iwctl_siwrate(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *wrq,
- char *extra)
-{
- struct vnt_private *pDevice = netdev_priv(dev);
- int rc = 0;
- u8 brate = 0;
- int i;
- unsigned char abySupportedRates[13] = {0x02, 0x04, 0x0B, 0x16, 0x0c, 0x12, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6C, 0x90};
-
- pr_debug(" SIOCSIWRATE\n");
- if (!(pDevice->flags & DEVICE_FLAGS_OPENED)) {
- rc = -EINVAL;
- return rc;
- }
-
- // First : get a valid bit rate value
-
- // Which type of value
- if ((wrq->value < 13) &&
- (wrq->value >= 0)) {
- // Setting by rate index
- // Find value in the magic rate table
- brate = wrq->value;
- } else {
- // Setting by frequency value
- u8 normvalue = (u8) (wrq->value/500000);
-
- // Check if rate is valid
- for (i = 0; i < 13; i++) {
- if (normvalue == abySupportedRates[i]) {
- brate = i;
- break;
- }
- }
- }
- // -1 designed the max rate (mostly auto mode)
- if (wrq->value == -1) {
- // Get the highest available rate
- for (i = 0; i < 13; i++) {
- if (abySupportedRates[i] == 0)
- break;
- }
- if (i != 0)
- brate = i - 1;
-
- }
- // Check that it is valid
- // brate is index of abySupportedRates[]
- if (brate > 13) {
- rc = -EINVAL;
- return rc;
- }
-
- // Now, check if we want a fixed or auto value
- if (wrq->fixed != 0) {
- // Fixed mode
- // One rate, fixed
- pr_debug("Rate Fix\n");
- pDevice->bFixRate = true;
- if ((pDevice->byBBType == BB_TYPE_11B) && (brate > 3)) {
- pDevice->uConnectionRate = 3;
- } else {
- pDevice->uConnectionRate = brate;
- pr_debug("Fixed to Rate %d\n",
- pDevice->uConnectionRate);
- }
-
- } else {
- pDevice->bFixRate = false;
- pDevice->uConnectionRate = 13;
- pr_debug("auto rate:connection_rate is 13\n");
- }
-
- return rc;
-}
-
-/*
- * Wireless Handler : get data rate
- */
-
-int iwctl_giwrate(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *wrq,
- char *extra)
-{
- struct vnt_private *pDevice = netdev_priv(dev);
-//2007-0118-05,<Mark> by EinsnLiu
-//Mark the unnecessary sentences.
-// PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
-
- pr_debug(" SIOCGIWRATE\n");
- {
- unsigned char abySupportedRates[13] = {0x02, 0x04, 0x0B, 0x16, 0x0c, 0x12, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6C, 0x90};
- int brate = 0;
-//2008-5-8 <modify> by chester
- if (pDevice->bLinkPass) {
- if (pDevice->bFixRate == true) {
- if (pDevice->uConnectionRate < 13) {
- brate = abySupportedRates[pDevice->uConnectionRate];
- } else {
- if (pDevice->byBBType == BB_TYPE_11B)
- brate = 0x16;
- if (pDevice->byBBType == BB_TYPE_11G)
- brate = 0x6C;
- if (pDevice->byBBType == BB_TYPE_11A)
- brate = 0x6C;
- }
- } else {
- brate = abySupportedRates[TxRate_iwconfig];
- }
- } else brate = 0;
-
- wrq->value = brate * 500000;
- // If more than one rate, set auto
- if (pDevice->bFixRate == true)
- wrq->fixed = true;
- }
-
- return 0;
-}
-
-/*
- * Wireless Handler : set rts threshold
- */
-
-int iwctl_siwrts(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *wrq,
- char *extra)
-{
- struct vnt_private *pDevice = netdev_priv(dev);
- int rc = 0;
-
- pr_debug(" SIOCSIWRTS\n");
-
- {
- int rthr = wrq->value;
-
- if (wrq->disabled)
- rthr = 2312;
-
- if ((rthr < 0) || (rthr > 2312))
- rc = -EINVAL;
- else
- pDevice->wRTSThreshold = rthr;
- }
-
- return 0;
-}
-
-/*
- * Wireless Handler : get rts
- */
-
-int iwctl_giwrts(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *wrq,
- char *extra)
-{
- struct vnt_private *pDevice = netdev_priv(dev);
-
- pr_debug(" SIOCGIWRTS\n");
- wrq->value = pDevice->wRTSThreshold;
- wrq->disabled = (wrq->value >= 2312);
- wrq->fixed = 1;
-
- return 0;
-}
-
-/*
- * Wireless Handler : set fragment threshold
- */
-
-int iwctl_siwfrag(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *wrq,
- char *extra)
-{
- struct vnt_private *pDevice = netdev_priv(dev);
- int rc = 0;
- int fthr = wrq->value;
-
- pr_debug(" SIOCSIWFRAG\n");
-
- if (wrq->disabled)
- fthr = 2312;
- if ((fthr < 256) || (fthr > 2312)) {
- rc = -EINVAL;
- } else {
- fthr &= ~0x1; // Get an even value
- pDevice->wFragmentationThreshold = (u16)fthr;
- }
-
- return rc;
-}
-
-/*
- * Wireless Handler : get fragment threshold
- */
-
-int iwctl_giwfrag(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *wrq,
- char *extra)
-{
- struct vnt_private *pDevice = netdev_priv(dev);
-
- pr_debug(" SIOCGIWFRAG\n");
- wrq->value = pDevice->wFragmentationThreshold;
- wrq->disabled = (wrq->value >= 2312);
- wrq->fixed = 1;
-
- return 0;
-}
-
-/*
- * Wireless Handler : set retry threshold
- */
-int iwctl_siwretry(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *wrq,
- char *extra)
-{
- struct vnt_private *pDevice = netdev_priv(dev);
- int rc = 0;
-
- pr_debug(" SIOCSIWRETRY\n");
-
- if (wrq->disabled) {
- rc = -EINVAL;
- return rc;
- }
-
- if (wrq->flags & IW_RETRY_LIMIT) {
- if (wrq->flags & IW_RETRY_MAX)
- pDevice->byLongRetryLimit = wrq->value;
- else if (wrq->flags & IW_RETRY_MIN)
- pDevice->byShortRetryLimit = wrq->value;
- else {
- // No modifier : set both
- pDevice->byShortRetryLimit = wrq->value;
- pDevice->byLongRetryLimit = wrq->value;
- }
- }
- if (wrq->flags & IW_RETRY_LIFETIME)
- pDevice->wMaxTransmitMSDULifetime = wrq->value;
-
- return rc;
-}
-
-/*
- * Wireless Handler : get retry threshold
- */
-int iwctl_giwretry(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *wrq,
- char *extra)
-{
- struct vnt_private *pDevice = netdev_priv(dev);
-
- pr_debug(" SIOCGIWRETRY\n");
- wrq->disabled = 0; // Can't be disabled
-
- // Note : by default, display the min retry number
- if ((wrq->flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) {
- wrq->flags = IW_RETRY_LIFETIME;
- wrq->value = (int)pDevice->wMaxTransmitMSDULifetime; //ms
- } else if ((wrq->flags & IW_RETRY_MAX)) {
- wrq->flags = IW_RETRY_LIMIT | IW_RETRY_MAX;
- wrq->value = (int)pDevice->byLongRetryLimit;
- } else {
- wrq->flags = IW_RETRY_LIMIT;
- wrq->value = (int)pDevice->byShortRetryLimit;
- if ((int)pDevice->byShortRetryLimit != (int)pDevice->byLongRetryLimit)
- wrq->flags |= IW_RETRY_MIN;
- }
-
- return 0;
-}
-
-/*
- * Wireless Handler : set encode mode
- */
-int iwctl_siwencode(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *wrq,
- char *extra)
-{
- struct vnt_private *pDevice = netdev_priv(dev);
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- unsigned long dwKeyIndex = (unsigned long)(wrq->flags & IW_ENCODE_INDEX);
- int ii, uu, rc = 0;
- int index = (wrq->flags & IW_ENCODE_INDEX);
-
-//2007-0207-07,<Modify> by EinsnLiu
-//There are some problems when using iwconfig encode/key command to set the WEP key.
-//I almost rewrite this function.
-//now it support:(assume the wireless interface's name is eth0)
-//iwconfig eth0 key [1] 1122334455 open /*set key stirng to index 1,and driver using key index is set to 1*/
-//iwconfig eth0 key [3] /*set driver using key index to 3,the key string no change */
-//iwconfig eth0 key 1122334455 /*set key string to driver using index*/
-//iwconfig eth0 key restricted /*enable share key*/
-
- PSKeyTable pkeytab;
-
- pr_debug(" SIOCSIWENCODE\n");
-
- if ((wrq->flags & IW_ENCODE_DISABLED) == 0) {
- //Not disable encryption
-
- if (dwKeyIndex > WLAN_WEP_NKEYS) {
- rc = -EINVAL;
- return rc;
- }
-
- if (dwKeyIndex < 1 && ((wrq->flags & IW_ENCODE_NOKEY) == 0)) {//set default key
- if (pDevice->byKeyIndex < WLAN_WEP_NKEYS)
- dwKeyIndex = pDevice->byKeyIndex;
- else
- dwKeyIndex = 0;
- } else {
- dwKeyIndex--;
- }
-
- // Check the size of the key
- if (wrq->length > WLAN_WEP232_KEYLEN) {
- rc = -EINVAL;
- return rc;
- }
-
- if (wrq->length > 0) {//have key
-
- if (wrq->length == WLAN_WEP232_KEYLEN) {
- pr_debug("Set 232 bit wep key\n");
- } else if (wrq->length == WLAN_WEP104_KEYLEN) {
- pr_debug("Set 104 bit wep key\n");
- } else if (wrq->length == WLAN_WEP40_KEYLEN) {
- pr_debug("Set 40 bit wep key, index= %d\n",
- (int)dwKeyIndex);
- } else {//no support length
- rc = -EINVAL;
- return rc;
- }
- memset(pDevice->abyKey, 0, WLAN_WEP232_KEYLEN);
- memcpy(pDevice->abyKey, extra, wrq->length);
-
- pr_debug("abyKey: ");
- for (ii = 0; ii < wrq->length; ii++)
- pr_debug("%02x ", pDevice->abyKey[ii]);
-
- if (pDevice->flags & DEVICE_FLAGS_OPENED) {
- spin_lock_irq(&pDevice->lock);
- KeybSetDefaultKey(&(pDevice->sKey),
- (unsigned long)(dwKeyIndex | (1 << 31)),
- wrq->length,
- NULL,
- pDevice->abyKey,
- KEY_CTL_WEP,
- pDevice->PortOffset,
- pDevice->byLocalID
-);
- spin_unlock_irq(&pDevice->lock);
- }
- pDevice->byKeyIndex = (unsigned char)dwKeyIndex;
- pDevice->uKeyLength = wrq->length;
- pDevice->bTransmitKey = true;
- pDevice->bEncryptionEnable = true;
- pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled;
-
- } else if (index > 0) {
- //when the length is 0 the request only changes the default transmit key index
- //check the new key if it has a non zero length
- if (pDevice->bEncryptionEnable == false) {
- rc = -EINVAL;
- return rc;
- }
- pr_debug("Just set Default key Index:\n");
- pkeytab = &(pDevice->sKey.KeyTable[MAX_KEY_TABLE - 1]);
- if (pkeytab->GroupKey[(unsigned char)dwKeyIndex].uKeyLength == 0) {
- pr_debug("Default key len is 0\n");
- rc = -EINVAL;
- return rc;
- }
- pDevice->byKeyIndex = (unsigned char)dwKeyIndex;
- pkeytab->dwGTKeyIndex = dwKeyIndex | (1 << 31);
- pkeytab->GroupKey[(unsigned char)dwKeyIndex].dwKeyIndex = dwKeyIndex | (1 << 31);
- }
-
- } else {//disable the key
- pr_debug("Disable WEP function\n");
- if (pDevice->bEncryptionEnable == false)
- return 0;
- pMgmt->bShareKeyAlgorithm = false;
- pDevice->bEncryptionEnable = false;
- pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled;
- if (pDevice->flags & DEVICE_FLAGS_OPENED) {
- spin_lock_irq(&pDevice->lock);
- for (uu = 0; uu < MAX_KEY_TABLE; uu++)
- MACvDisableKeyEntry(pDevice->PortOffset, uu);
- spin_unlock_irq(&pDevice->lock);
- }
- }
-//End Modify,Einsn
-
- if (wrq->flags & IW_ENCODE_RESTRICTED) {
- pr_debug("Enable WEP & ShareKey System\n");
- pMgmt->bShareKeyAlgorithm = true;
- }
- if (wrq->flags & IW_ENCODE_OPEN) {
- pr_debug("Enable WEP & Open System\n");
- pMgmt->bShareKeyAlgorithm = false;
- }
- return rc;
-}
-
-int iwctl_giwencode(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *wrq,
- char *extra)
-{
- struct vnt_private *pDevice = netdev_priv(dev);
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- char abyKey[WLAN_WEP232_KEYLEN];
-
- unsigned int index = (unsigned int)(wrq->flags & IW_ENCODE_INDEX);
- PSKeyItem pKey = NULL;
-
- pr_debug(" SIOCGIWENCODE\n");
-
- if (index > WLAN_WEP_NKEYS)
- return -EINVAL;
-
- if (index < 1) {//get default key
- if (pDevice->byKeyIndex < WLAN_WEP_NKEYS)
- index = pDevice->byKeyIndex;
- else
- index = 0;
- } else {
- index--;
- }
-
- memset(abyKey, 0, WLAN_WEP232_KEYLEN);
- // Check encryption mode
- wrq->flags = IW_ENCODE_NOKEY;
- // Is WEP enabled ???
- if (pDevice->bEncryptionEnable)
- wrq->flags |= IW_ENCODE_ENABLED;
- else
- wrq->flags |= IW_ENCODE_DISABLED;
-
- if (pMgmt->bShareKeyAlgorithm)
- wrq->flags |= IW_ENCODE_RESTRICTED;
- else
- wrq->flags |= IW_ENCODE_OPEN;
- wrq->length = 0;
-
- if ((index == 0) && (pDevice->eEncryptionStatus == Ndis802_11Encryption2Enabled ||
- pDevice->eEncryptionStatus == Ndis802_11Encryption3Enabled)) {//get wpa pairwise key
- if (KeybGetKey(&(pDevice->sKey), pMgmt->abyCurrBSSID, 0xffffffff, &pKey)) {
- wrq->length = pKey->uKeyLength;
- memcpy(abyKey, pKey->abyKey, pKey->uKeyLength);
- memcpy(extra, abyKey, WLAN_WEP232_KEYLEN);
- }
- } else if (KeybGetKey(&(pDevice->sKey), pDevice->abyBroadcastAddr, (unsigned char)index , &pKey)) {
- wrq->length = pKey->uKeyLength;
- memcpy(abyKey, pKey->abyKey, pKey->uKeyLength);
- memcpy(extra, abyKey, WLAN_WEP232_KEYLEN);
- }
-
- wrq->flags |= index+1;
-
- return 0;
-}
-
-/*
- * Wireless Handler : set power mode
- */
-int iwctl_siwpower(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *wrq,
- char *extra)
-{
- struct vnt_private *pDevice = netdev_priv(dev);
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- int rc = 0;
-
- pr_debug(" SIOCSIWPOWER\n");
-
- if (!(pDevice->flags & DEVICE_FLAGS_OPENED)) {
- rc = -EINVAL;
- return rc;
- }
-
- if (wrq->disabled) {
- pDevice->ePSMode = WMAC_POWER_CAM;
- PSvDisablePowerSaving(pDevice);
- return rc;
- }
- if ((wrq->flags & IW_POWER_TYPE) == IW_POWER_TIMEOUT) {
- pDevice->ePSMode = WMAC_POWER_FAST;
- PSvEnablePowerSaving((void *)pDevice, pMgmt->wListenInterval);
-
- } else if ((wrq->flags & IW_POWER_TYPE) == IW_POWER_PERIOD) {
- pDevice->ePSMode = WMAC_POWER_FAST;
- PSvEnablePowerSaving((void *)pDevice, pMgmt->wListenInterval);
- }
- switch (wrq->flags & IW_POWER_MODE) {
- case IW_POWER_UNICAST_R:
- pr_debug(" SIOCSIWPOWER: IW_POWER_UNICAST_R\n");
- rc = -EINVAL;
- break;
- case IW_POWER_ALL_R:
- pr_debug(" SIOCSIWPOWER: IW_POWER_ALL_R\n");
- rc = -EINVAL;
- case IW_POWER_ON:
- pr_debug(" SIOCSIWPOWER: IW_POWER_ON\n");
- break;
- default:
- rc = -EINVAL;
- }
-
- return rc;
-}
-
-/*
- * Wireless Handler : get power mode
- */
-int iwctl_giwpower(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *wrq,
- char *extra)
-{
- struct vnt_private *pDevice = netdev_priv(dev);
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- int mode = pDevice->ePSMode;
-
- pr_debug(" SIOCGIWPOWER\n");
-
- wrq->disabled = (mode == WMAC_POWER_CAM);
- if (wrq->disabled)
- return 0;
-
- if ((wrq->flags & IW_POWER_TYPE) == IW_POWER_TIMEOUT) {
- wrq->value = (int)((pMgmt->wListenInterval * pMgmt->wCurrBeaconPeriod) << 10);
- wrq->flags = IW_POWER_TIMEOUT;
- } else {
- wrq->value = (int)((pMgmt->wListenInterval * pMgmt->wCurrBeaconPeriod) << 10);
- wrq->flags = IW_POWER_PERIOD;
- }
- wrq->flags |= IW_POWER_ALL_R;
-
- return 0;
-}
-
-/*
- * Wireless Handler : get Sensitivity
- */
-int iwctl_giwsens(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *wrq,
- char *extra)
-{
- struct vnt_private *pDevice = netdev_priv(dev);
- long ldBm;
-
- pr_debug(" SIOCGIWSENS\n");
- if (pDevice->bLinkPass == true) {
- RFvRSSITodBm(pDevice, (unsigned char)(pDevice->uCurrRSSI), &ldBm);
- wrq->value = ldBm;
- } else {
- wrq->value = 0;
- }
- wrq->disabled = (wrq->value == 0);
- wrq->fixed = 1;
-
- return 0;
-}
-
-//2008-0409-07, <Add> by Einsn Liu
-#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
-
-int iwctl_siwauth(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *wrq,
- char *extra)
-{
- struct vnt_private *pDevice = netdev_priv(dev);
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- int ret = 0;
- static int wpa_version = 0; //must be static to save the last value,einsn liu
- static int pairwise = 0;
-
- pr_debug(" SIOCSIWAUTH\n");
- switch (wrq->flags & IW_AUTH_INDEX) {
- case IW_AUTH_WPA_VERSION:
- wpa_version = wrq->value;
- if (wrq->value == IW_AUTH_WPA_VERSION_DISABLED)
- PRINT_K("iwctl_siwauth:set WPADEV to disable at 1??????\n");
- else if (wrq->value == IW_AUTH_WPA_VERSION_WPA)
- PRINT_K("iwctl_siwauth:set WPADEV to WPA1******\n");
- else
- PRINT_K("iwctl_siwauth:set WPADEV to WPA2******\n");
-
- break;
- case IW_AUTH_CIPHER_PAIRWISE:
- pairwise = wrq->value;
- if (pairwise == IW_AUTH_CIPHER_CCMP)
- pDevice->eEncryptionStatus = Ndis802_11Encryption3Enabled;
- else if (pairwise == IW_AUTH_CIPHER_TKIP)
- pDevice->eEncryptionStatus = Ndis802_11Encryption2Enabled;
- else if (pairwise == IW_AUTH_CIPHER_WEP40 || pairwise == IW_AUTH_CIPHER_WEP104)
- pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled;
- else if (pairwise == IW_AUTH_CIPHER_NONE)
- ; /* do nothing,einsn liu */
- else
- pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled;
-
- break;
- case IW_AUTH_CIPHER_GROUP:
- if (wpa_version == IW_AUTH_WPA_VERSION_DISABLED)
- break;
- if (pairwise == IW_AUTH_CIPHER_NONE) {
- if (wrq->value == IW_AUTH_CIPHER_CCMP)
- pDevice->eEncryptionStatus = Ndis802_11Encryption3Enabled;
- else
- pDevice->eEncryptionStatus = Ndis802_11Encryption2Enabled;
- }
- break;
- case IW_AUTH_KEY_MGMT:
-
- if (wpa_version == IW_AUTH_WPA_VERSION_WPA2) {
- if (wrq->value == IW_AUTH_KEY_MGMT_PSK)
- pMgmt->eAuthenMode = WMAC_AUTH_WPA2PSK;
- else
- pMgmt->eAuthenMode = WMAC_AUTH_WPA2;
- } else if (wpa_version == IW_AUTH_WPA_VERSION_WPA) {
- if (wrq->value == 0)
- pMgmt->eAuthenMode = WMAC_AUTH_WPANONE;
- else if (wrq->value == IW_AUTH_KEY_MGMT_PSK)
- pMgmt->eAuthenMode = WMAC_AUTH_WPAPSK;
- else
- pMgmt->eAuthenMode = WMAC_AUTH_WPA;
- }
-
- break;
- case IW_AUTH_TKIP_COUNTERMEASURES:
- break; /* FIXME */
- case IW_AUTH_DROP_UNENCRYPTED:
- break;
- case IW_AUTH_80211_AUTH_ALG:
- if (wrq->value == IW_AUTH_ALG_OPEN_SYSTEM)
- pMgmt->bShareKeyAlgorithm = false;
- else if (wrq->value == IW_AUTH_ALG_SHARED_KEY)
- pMgmt->bShareKeyAlgorithm = true;
-
- break;
- case IW_AUTH_WPA_ENABLED:
- break;
- case IW_AUTH_RX_UNENCRYPTED_EAPOL:
- break;
- case IW_AUTH_ROAMING_CONTROL:
- ret = -EOPNOTSUPP;
- break;
- case IW_AUTH_PRIVACY_INVOKED:
- pDevice->bEncryptionEnable = !!wrq->value;
- if (pDevice->bEncryptionEnable == false) {
- wpa_version = 0;
- pairwise = 0;
- pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled;
- pMgmt->bShareKeyAlgorithm = false;
- pMgmt->eAuthenMode = false;
- }
-
- break;
- default:
- ret = -EOPNOTSUPP;
- break;
- }
-
- return ret;
-}
-
-int iwctl_giwauth(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *wrq,
- char *extra)
-{
- return -EOPNOTSUPP;
-}
-
-int iwctl_siwgenie(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *wrq,
- char __user *extra)
-{
- struct vnt_private *pDevice = netdev_priv(dev);
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- int ret = 0;
- char length;
-
- if (wrq->length) {
- if (wrq->length < 2)
- return -EINVAL;
-
- ret = get_user(length, extra + 1);
- if (ret)
- return ret;
-
- if (length + 2 != wrq->length)
- return -EINVAL;
-
- if (wrq->length > MAX_WPA_IE_LEN) {
- ret = -ENOMEM;
- goto out;
- }
- memset(pMgmt->abyWPAIE, 0, MAX_WPA_IE_LEN);
- if (copy_from_user(pMgmt->abyWPAIE, extra, wrq->length)) {
- ret = -EFAULT;
- goto out;
- }
- pMgmt->wWPAIELen = wrq->length;
- } else {
- memset(pMgmt->abyWPAIE, 0, MAX_WPA_IE_LEN);
- pMgmt->wWPAIELen = 0;
- }
-
-out://not completely ...not necessary in wpa_supplicant 0.5.8
- return ret;
-}
-
-int iwctl_giwgenie(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *wrq,
- char __user *extra)
-{
- struct vnt_private *pDevice = netdev_priv(dev);
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- int ret = 0;
- int space = wrq->length;
-
- wrq->length = 0;
- if (pMgmt->wWPAIELen > 0) {
- wrq->length = pMgmt->wWPAIELen;
- if (pMgmt->wWPAIELen <= space) {
- if (copy_to_user(extra, pMgmt->abyWPAIE, pMgmt->wWPAIELen))
- ret = -EFAULT;
-
- } else {
- ret = -E2BIG;
- }
- }
-
- return ret;
-}
-
-int iwctl_siwencodeext(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *wrq,
- char *extra)
-{
- struct vnt_private *pDevice = netdev_priv(dev);
- struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
- struct viawget_wpa_param *param = NULL;
-//original member
- enum wpa_alg alg_name;
- u8 addr[6];
- int key_idx, set_tx = 0;
- u8 seq[IW_ENCODE_SEQ_MAX_SIZE];
- u8 key[64];
- size_t seq_len = 0, key_len = 0;
-
- u8 key_array[64];
- int ret = 0;
-
- PRINT_K("SIOCSIWENCODEEXT......\n");
-
- param = kzalloc(sizeof(*param), GFP_KERNEL);
- if (param == NULL)
- return -ENOMEM;
-
-//recover alg_name
- switch (ext->alg) {
- case IW_ENCODE_ALG_NONE:
- alg_name = WPA_ALG_NONE;
- break;
- case IW_ENCODE_ALG_WEP:
- alg_name = WPA_ALG_WEP;
- break;
- case IW_ENCODE_ALG_TKIP:
- alg_name = WPA_ALG_TKIP;
- break;
- case IW_ENCODE_ALG_CCMP:
- alg_name = WPA_ALG_CCMP;
- break;
- default:
- PRINT_K("Unknown alg = %d\n", ext->alg);
- ret = -ENOMEM;
- goto error;
- }
-//recover addr
- memcpy(addr, ext->addr.sa_data, ETH_ALEN);
-//recover key_idx
- key_idx = (wrq->flags&IW_ENCODE_INDEX) - 1;
-//recover set_tx
- if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY)
- set_tx = 1;
-//recover seq,seq_len
- if (ext->ext_flags & IW_ENCODE_EXT_RX_SEQ_VALID) {
- seq_len = IW_ENCODE_SEQ_MAX_SIZE;
- memcpy(seq, ext->rx_seq, seq_len);
- }
-//recover key,key_len
- if (ext->key_len) {
- key_len = ext->key_len;
- memcpy(key, &ext->key[0], key_len);
- }
-
- memset(key_array, 0, 64);
- if (key_len > 0) {
- memcpy(key_array, key, key_len);
- if (key_len == 32) {
- // notice ! the oder
- memcpy(&key_array[16], &key[24], 8);
- memcpy(&key_array[24], &key[16], 8);
- }
- }
-
-/**************Translate iw_encode_ext to viawget_wpa_param****************/
- memcpy(param->addr, addr, ETH_ALEN);
- param->u.wpa_key.alg_name = (int)alg_name;
- param->u.wpa_key.set_tx = set_tx;
- param->u.wpa_key.key_index = key_idx;
- param->u.wpa_key.key_len = key_len;
- param->u.wpa_key.key = (u8 *)key_array;
- param->u.wpa_key.seq = (u8 *)seq;
- param->u.wpa_key.seq_len = seq_len;
-
-//****set if current action is Network Manager count??
-//****this method is so foolish,but there is no other way???
- if (param->u.wpa_key.alg_name == WPA_ALG_NONE) {
- if (param->u.wpa_key.key_index == 0)
- pDevice->bwextcount++;
-
- if ((pDevice->bwextcount == 1) && (param->u.wpa_key.key_index == 1))
- pDevice->bwextcount++;
-
- if ((pDevice->bwextcount == 2) && (param->u.wpa_key.key_index == 2))
- pDevice->bwextcount++;
-
- if ((pDevice->bwextcount == 3) && (param->u.wpa_key.key_index == 3))
- pDevice->bwextcount++;
-
- }
- if (pDevice->bwextcount == 4) {
- pr_debug("SIOCSIWENCODEEXT:Enable WPA WEXT SUPPORT!!!!!\n");
- pDevice->bwextcount = 0;
- pDevice->bWPASuppWextEnabled = true;
- }
-//******
-
- spin_lock_irq(&pDevice->lock);
- ret = wpa_set_keys(pDevice, param, true);
- spin_unlock_irq(&pDevice->lock);
-
-error:
- kfree(param);
- return ret;
-}
-
-int iwctl_giwencodeext(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *wrq,
- char *extra)
-{
- return -EOPNOTSUPP;
-}
-
-int iwctl_siwmlme(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *wrq,
- char __user *extra)
-{
- struct vnt_private *pDevice = netdev_priv(dev);
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- struct iw_mlme mime;
-
- int ret = 0;
-
- ret = copy_from_user(&mime, extra, sizeof(mime));
- if (ret)
- return -EFAULT;
-
- if (memcmp(pMgmt->abyCurrBSSID, mime.addr.sa_data, ETH_ALEN)) {
- ret = -EINVAL;
- return ret;
- }
- switch (mime.cmd) {
- case IW_MLME_DEAUTH:
- //this command seems to be not complete,please test it --einsnliu
- //bScheduleCommand((void *) pDevice, WLAN_CMD_DEAUTH, (unsigned char *)&reason);
- break;
- case IW_MLME_DISASSOC:
- if (pDevice->bLinkPass == true) {
- pr_debug("iwctl_siwmlme--->send DISASSOCIATE\n");
- //clear related flags
- memset(pMgmt->abyDesireBSSID, 0xFF, 6);
- KeyvInitTable(&pDevice->sKey, pDevice->PortOffset);
- bScheduleCommand((void *)pDevice, WLAN_CMD_DISASSOCIATE, NULL);
- }
- break;
- default:
- ret = -EOPNOTSUPP;
- }
-
- return ret;
-}
-
-#endif
-
-/*------------------------------------------------------------------*/
-/*
- * Structures to export the Wireless Handlers
- */
-
-static const iw_handler iwctl_handler[] =
-{
- (iw_handler) iwctl_commit, // SIOCSIWCOMMIT
- (iw_handler) NULL, // SIOCGIWNAME
- (iw_handler) NULL, // SIOCSIWNWID
- (iw_handler) NULL, // SIOCGIWNWID
- (iw_handler) NULL, // SIOCSIWFREQ
- (iw_handler) NULL, // SIOCGIWFREQ
- (iw_handler) NULL, // SIOCSIWMODE
- (iw_handler) NULL, // SIOCGIWMODE
- (iw_handler) NULL, // SIOCSIWSENS
- (iw_handler) NULL, // SIOCGIWSENS
- (iw_handler) NULL, // SIOCSIWRANGE
- (iw_handler) iwctl_giwrange, // SIOCGIWRANGE
- (iw_handler) NULL, // SIOCSIWPRIV
- (iw_handler) NULL, // SIOCGIWPRIV
- (iw_handler) NULL, // SIOCSIWSTATS
- (iw_handler) NULL, // SIOCGIWSTATS
- (iw_handler) NULL, // SIOCSIWSPY
- (iw_handler) NULL, // SIOCGIWSPY
- (iw_handler) NULL, // -- hole --
- (iw_handler) NULL, // -- hole --
- (iw_handler) NULL, // SIOCSIWAP
- (iw_handler) NULL, // SIOCGIWAP
- (iw_handler) NULL, // -- hole -- 0x16
- (iw_handler) NULL, // SIOCGIWAPLIST
- (iw_handler) iwctl_siwscan, // SIOCSIWSCAN
- (iw_handler) iwctl_giwscan, // SIOCGIWSCAN
- (iw_handler) NULL, // SIOCSIWESSID
- (iw_handler) NULL, // SIOCGIWESSID
- (iw_handler) NULL, // SIOCSIWNICKN
- (iw_handler) NULL, // SIOCGIWNICKN
- (iw_handler) NULL, // -- hole --
- (iw_handler) NULL, // -- hole --
- (iw_handler) NULL, // SIOCSIWRATE 0x20
- (iw_handler) NULL, // SIOCGIWRATE
- (iw_handler) NULL, // SIOCSIWRTS
- (iw_handler) NULL, // SIOCGIWRTS
- (iw_handler) NULL, // SIOCSIWFRAG
- (iw_handler) NULL, // SIOCGIWFRAG
- (iw_handler) NULL, // SIOCSIWTXPOW
- (iw_handler) NULL, // SIOCGIWTXPOW
- (iw_handler) NULL, // SIOCSIWRETRY
- (iw_handler) NULL, // SIOCGIWRETRY
- (iw_handler) NULL, // SIOCSIWENCODE
- (iw_handler) NULL, // SIOCGIWENCODE
- (iw_handler) NULL, // SIOCSIWPOWER
- (iw_handler) NULL, // SIOCGIWPOWER
-
-//2008-0409-07, <Add> by Einsn Liu
- (iw_handler) NULL, // -- hole --
- (iw_handler) NULL, // -- hole --
- (iw_handler) NULL, // SIOCSIWGENIE
- (iw_handler) NULL, // SIOCGIWGENIE
- (iw_handler) NULL, // SIOCSIWAUTH
- (iw_handler) NULL, // SIOCGIWAUTH
- (iw_handler) NULL, // SIOCSIWENCODEEXT
- (iw_handler) NULL, // SIOCGIWENCODEEXT
- (iw_handler) NULL, // SIOCSIWPMKSA
- (iw_handler) NULL, // -- hole --
-};
-
-static const iw_handler iwctl_private_handler[] =
-{
- NULL, // SIOCIWFIRSTPRIV
-};
-
-struct iw_priv_args iwctl_private_args[] = {
- { IOCTL_CMD_SET,
- IW_PRIV_TYPE_CHAR | 1024, 0,
- "set"},
-};
-
-const struct iw_handler_def iwctl_handler_def =
-{
- .get_wireless_stats = &iwctl_get_wireless_stats,
- .num_standard = sizeof(iwctl_handler)/sizeof(iw_handler),
- .num_private = 0,
- .num_private_args = 0,
- .standard = (iw_handler *)iwctl_handler,
- .private = NULL,
- .private_args = NULL,
-};
diff --git a/drivers/staging/vt6655/iwctl.h b/drivers/staging/vt6655/iwctl.h
deleted file mode 100644
index 7dd63102182..00000000000
--- a/drivers/staging/vt6655/iwctl.h
+++ /dev/null
@@ -1,206 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * File: iwctl.h
- *
- * Purpose:
- *
- * Author: Lyndon Chen
- *
- * Date: May 21, 2004
- *
- */
-
-#ifndef __IWCTL_H__
-#define __IWCTL_H__
-
-#include "device.h"
-
-/*--------------------- Export Definitions -------------------------*/
-
-/*--------------------- Export Classes ----------------------------*/
-
-/*--------------------- Export Variables --------------------------*/
-
-/*--------------------- Export Functions --------------------------*/
-
-struct iw_statistics *iwctl_get_wireless_stats(struct net_device *dev);
-
-int iwctl_siwap(struct net_device *dev,
- struct iw_request_info *info,
- struct sockaddr *wrq,
- char *extra);
-
-int iwctl_giwrange(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *wrq,
- char *extra);
-
-int iwctl_giwmode(struct net_device *dev,
- struct iw_request_info *info,
- __u32 *wmode,
- char *extra);
-
-int iwctl_siwmode(struct net_device *dev,
- struct iw_request_info *info,
- __u32 *wmode,
- char *extra);
-
-int iwctl_giwfreq(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_freq *wrq,
- char *extra);
-
-int iwctl_siwfreq(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_freq *wrq,
- char *extra);
-
-int iwctl_giwname(struct net_device *dev,
- struct iw_request_info *info,
- char *wrq,
- char *extra);
-
-int iwctl_giwsens(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *wrq,
- char *extra);
-
-int iwctl_giwap(struct net_device *dev,
- struct iw_request_info *info,
- struct sockaddr *wrq,
- char *extra);
-
-int iwctl_giwaplist(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *wrq,
- char *extra);
-
-int iwctl_siwessid(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *wrq,
- char *extra);
-
-int iwctl_giwessid(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *wrq,
- char *extra);
-
-int iwctl_siwrate(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *wrq,
- char *extra);
-
-int iwctl_giwrate(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *wrq,
- char *extra);
-
-int iwctl_siwrts(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *wrq,
- char *extra);
-
-int iwctl_giwrts(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *wrq,
- char *extra);
-
-int iwctl_siwfrag(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *wrq,
- char *extra);
-
-int iwctl_giwfrag(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *wrq,
- char *extra);
-
-int iwctl_siwretry(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *wrq,
- char *extra);
-
-int iwctl_giwretry(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *wrq,
- char *extra);
-
-int iwctl_siwencode(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *wrq,
- char *extra);
-
-int iwctl_giwencode(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *wrq,
- char *extra);
-
-int iwctl_siwpower(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *wrq,
- char *extra);
-
-int iwctl_giwpower(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *wrq,
- char *extra);
-
-//2008-0409-07, <Add> by Einsn Liu
-#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
-int iwctl_siwauth(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *wrq,
- char *extra);
-
-int iwctl_giwauth(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *wrq,
- char *extra);
-
-int iwctl_siwgenie(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *wrq,
- char __user *extra);
-
-int iwctl_giwgenie(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *wrq,
- char __user *extra);
-
-int iwctl_siwencodeext(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *wrq,
- char *extra);
-
-int iwctl_giwencodeext(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *wrq,
- char *extra);
-
-int iwctl_siwmlme(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *wrq,
- char __user *extra);
-#endif // #ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
-//End Add -- //2008-0409-07, <Add> by Einsn Liu
-
-extern const struct iw_handler_def iwctl_handler_def;
-extern struct iw_priv_args iwctl_private_args[];
-
-#endif // __IWCTL_H__
diff --git a/drivers/staging/vt6655/key.c b/drivers/staging/vt6655/key.c
index 211afae306c..f2b3fea9053 100644
--- a/drivers/staging/vt6655/key.c
+++ b/drivers/staging/vt6655/key.c
@@ -25,770 +25,144 @@
*
* Date: May 29, 2003
*
- * Functions:
- * KeyvInitTable - Init Key management table
- * KeybGetKey - Get Key from table
- * KeybSetKey - Set Key to table
- * KeybRemoveKey - Remove Key from table
- * KeybGetTransmitKey - Get Transmit Key from table
- *
- * Revision History:
- *
*/
#include "tmacro.h"
#include "key.h"
#include "mac.h"
-/*--------------------- Static Definitions -------------------------*/
-
-/*--------------------- Static Classes ----------------------------*/
-
-/*--------------------- Static Functions --------------------------*/
-
-/*--------------------- Export Variables --------------------------*/
-
-/*--------------------- Static Definitions -------------------------*/
-
-/*--------------------- Static Classes ----------------------------*/
-
-/*--------------------- Static Variables --------------------------*/
-
-/*--------------------- Static Functions --------------------------*/
-static void
-s_vCheckKeyTableValid(PSKeyManagement pTable, void __iomem *dwIoBase)
-{
- int i;
-
- for (i = 0; i < MAX_KEY_TABLE; i++) {
- if (pTable->KeyTable[i].bInUse &&
- !pTable->KeyTable[i].PairwiseKey.bKeyValid &&
- !pTable->KeyTable[i].GroupKey[0].bKeyValid &&
- !pTable->KeyTable[i].GroupKey[1].bKeyValid &&
- !pTable->KeyTable[i].GroupKey[2].bKeyValid &&
- !pTable->KeyTable[i].GroupKey[3].bKeyValid) {
- pTable->KeyTable[i].bInUse = false;
- pTable->KeyTable[i].wKeyCtl = 0;
- pTable->KeyTable[i].bSoftWEP = false;
- MACvDisableKeyEntry(dwIoBase, i);
- }
- }
-}
-
-/*--------------------- Export Functions --------------------------*/
-
-/*
- * Description: Init Key management table
- *
- * Parameters:
- * In:
- * pTable - Pointer to Key table
- * Out:
- * none
- *
- * Return Value: none
- *
- */
-void KeyvInitTable(PSKeyManagement pTable, void __iomem *dwIoBase)
-{
- int i;
- int jj;
-
- for (i = 0; i < MAX_KEY_TABLE; i++) {
- pTable->KeyTable[i].bInUse = false;
- pTable->KeyTable[i].PairwiseKey.bKeyValid = false;
- pTable->KeyTable[i].PairwiseKey.pvKeyTable = (void *)&pTable->KeyTable[i];
- for (jj = 0; jj < MAX_GROUP_KEY; jj++) {
- pTable->KeyTable[i].GroupKey[jj].bKeyValid = false;
- pTable->KeyTable[i].GroupKey[jj].pvKeyTable = (void *)&pTable->KeyTable[i];
- }
- pTable->KeyTable[i].wKeyCtl = 0;
- pTable->KeyTable[i].dwGTKeyIndex = 0;
- pTable->KeyTable[i].bSoftWEP = false;
- MACvDisableKeyEntry(dwIoBase, i);
- }
-}
-
-/*
- * Description: Get Key from table
- *
- * Parameters:
- * In:
- * pTable - Pointer to Key table
- * pbyBSSID - BSSID of Key
- * dwKeyIndex - Key Index (0xFFFFFFFF means pairwise key)
- * Out:
- * pKey - Key return
- *
- * Return Value: true if found otherwise false
- *
- */
-bool KeybGetKey(
- PSKeyManagement pTable,
- unsigned char *pbyBSSID,
- unsigned long dwKeyIndex,
- PSKeyItem *pKey
-)
-{
- int i;
-
- pr_debug("KeybGetKey()\n");
-
- *pKey = NULL;
- for (i = 0; i < MAX_KEY_TABLE; i++) {
- if (pTable->KeyTable[i].bInUse &&
- ether_addr_equal(pTable->KeyTable[i].abyBSSID, pbyBSSID)) {
- if (dwKeyIndex == 0xFFFFFFFF) {
- if (pTable->KeyTable[i].PairwiseKey.bKeyValid) {
- *pKey = &(pTable->KeyTable[i].PairwiseKey);
- return true;
- } else {
- return false;
- }
- } else if (dwKeyIndex < MAX_GROUP_KEY) {
- if (pTable->KeyTable[i].GroupKey[dwKeyIndex].bKeyValid) {
- *pKey = &(pTable->KeyTable[i].GroupKey[dwKeyIndex]);
- return true;
- } else {
- return false;
- }
- } else {
- return false;
- }
- }
- }
- return false;
-}
-
-/*
- * Description: Set Key to table
- *
- * Parameters:
- * In:
- * pTable - Pointer to Key table
- * pbyBSSID - BSSID of Key
- * dwKeyIndex - Key index (reference to NDIS DDK)
- * uKeyLength - Key length
- * KeyRSC - Key RSC
- * pbyKey - Pointer to key
- * Out:
- * none
- *
- * Return Value: true if success otherwise false
- *
- */
-bool KeybSetKey(
- PSKeyManagement pTable,
- unsigned char *pbyBSSID,
- unsigned long dwKeyIndex,
- unsigned long uKeyLength,
- u64 *pKeyRSC,
- unsigned char *pbyKey,
- unsigned char byKeyDecMode,
- void __iomem *dwIoBase,
- unsigned char byLocalID
-)
-{
- int i, j;
- unsigned int ii;
- PSKeyItem pKey;
- unsigned int uKeyIdx;
-
- pr_debug("Enter KeybSetKey: %lX\n", dwKeyIndex);
-
- j = (MAX_KEY_TABLE-1);
- for (i = 0; i < (MAX_KEY_TABLE - 1); i++) {
- if (!pTable->KeyTable[i].bInUse && (j == (MAX_KEY_TABLE-1))) {
- // found empty table
- j = i;
- }
- if (pTable->KeyTable[i].bInUse &&
- ether_addr_equal(pTable->KeyTable[i].abyBSSID, pbyBSSID)) {
- // found table already exist
- if ((dwKeyIndex & PAIRWISE_KEY) != 0) {
- // Pairwise key
- pKey = &(pTable->KeyTable[i].PairwiseKey);
- pTable->KeyTable[i].wKeyCtl &= 0xFFF0; // clear pairwise key control filed
- pTable->KeyTable[i].wKeyCtl |= byKeyDecMode;
- uKeyIdx = 4; // use HW key entry 4 for pairwise key
- } else {
- // Group key
- if ((dwKeyIndex & 0x000000FF) >= MAX_GROUP_KEY)
- return false;
- pKey = &(pTable->KeyTable[i].GroupKey[dwKeyIndex & 0x000000FF]);
- if ((dwKeyIndex & TRANSMIT_KEY) != 0) {
- // Group transmit key
- pTable->KeyTable[i].dwGTKeyIndex = dwKeyIndex;
- pr_debug("Group transmit key(R)[%lX]: %d\n",
- pTable->KeyTable[i].dwGTKeyIndex, i);
- }
- pTable->KeyTable[i].wKeyCtl &= 0xFF0F; // clear group key control filed
- pTable->KeyTable[i].wKeyCtl |= (byKeyDecMode << 4);
- pTable->KeyTable[i].wKeyCtl |= 0x0040; // use group key for group address
- uKeyIdx = (dwKeyIndex & 0x000000FF);
- }
- pTable->KeyTable[i].wKeyCtl |= 0x8000; // enable on-fly
-
- pKey->bKeyValid = true;
- pKey->uKeyLength = uKeyLength;
- pKey->dwKeyIndex = dwKeyIndex;
- pKey->byCipherSuite = byKeyDecMode;
- memcpy(pKey->abyKey, pbyKey, uKeyLength);
- if (byKeyDecMode == KEY_CTL_WEP) {
- if (uKeyLength == WLAN_WEP40_KEYLEN)
- pKey->abyKey[15] &= 0x7F;
- if (uKeyLength == WLAN_WEP104_KEYLEN)
- pKey->abyKey[15] |= 0x80;
- }
- MACvSetKeyEntry(dwIoBase, pTable->KeyTable[i].wKeyCtl, i, uKeyIdx, pbyBSSID, (u32 *)pKey->abyKey, byLocalID);
-
- if ((dwKeyIndex & USE_KEYRSC) == 0) {
- // RSC set by NIC
- pKey->KeyRSC = 0;
- } else {
- pKey->KeyRSC = *pKeyRSC;
- }
- pKey->dwTSC47_16 = 0;
- pKey->wTSC15_0 = 0;
-
- pr_debug("KeybSetKey(R):\n");
- pr_debug("pKey->bKeyValid: %d\n ", pKey->bKeyValid);
- pr_debug("pKey->abyKey: ");
- for (ii = 0; ii < pKey->uKeyLength; ii++)
- pr_debug("%02x ", pKey->abyKey[ii]);
-
- pr_debug("\n");
-
- pr_debug("pKey->dwTSC47_16: %lx\n ", pKey->dwTSC47_16);
- pr_debug("pKey->wTSC15_0: %x\n ", pKey->wTSC15_0);
- pr_debug("pKey->dwKeyIndex: %lx\n ", pKey->dwKeyIndex);
-
- return true;
- }
- }
- if (j < (MAX_KEY_TABLE-1)) {
- memcpy(pTable->KeyTable[j].abyBSSID, pbyBSSID, ETH_ALEN);
- pTable->KeyTable[j].bInUse = true;
- if ((dwKeyIndex & PAIRWISE_KEY) != 0) {
- // Pairwise key
- pKey = &(pTable->KeyTable[j].PairwiseKey);
- pTable->KeyTable[j].wKeyCtl &= 0xFFF0; // clear pairwise key control filed
- pTable->KeyTable[j].wKeyCtl |= byKeyDecMode;
- uKeyIdx = 4; // use HW key entry 4 for pairwise key
- } else {
- // Group key
- if ((dwKeyIndex & 0x000000FF) >= MAX_GROUP_KEY)
- return false;
- pKey = &(pTable->KeyTable[j].GroupKey[dwKeyIndex & 0x000000FF]);
- if ((dwKeyIndex & TRANSMIT_KEY) != 0) {
- // Group transmit key
- pTable->KeyTable[j].dwGTKeyIndex = dwKeyIndex;
- pr_debug("Group transmit key(N)[%lX]: %d\n",
- pTable->KeyTable[j].dwGTKeyIndex, j);
- }
- pTable->KeyTable[j].wKeyCtl &= 0xFF0F; // clear group key control filed
- pTable->KeyTable[j].wKeyCtl |= (byKeyDecMode << 4);
- pTable->KeyTable[j].wKeyCtl |= 0x0040; // use group key for group address
- uKeyIdx = (dwKeyIndex & 0x000000FF);
- }
- pTable->KeyTable[j].wKeyCtl |= 0x8000; // enable on-fly
-
- pKey->bKeyValid = true;
- pKey->uKeyLength = uKeyLength;
- pKey->dwKeyIndex = dwKeyIndex;
- pKey->byCipherSuite = byKeyDecMode;
- memcpy(pKey->abyKey, pbyKey, uKeyLength);
- if (byKeyDecMode == KEY_CTL_WEP) {
- if (uKeyLength == WLAN_WEP40_KEYLEN)
- pKey->abyKey[15] &= 0x7F;
- if (uKeyLength == WLAN_WEP104_KEYLEN)
- pKey->abyKey[15] |= 0x80;
- }
- MACvSetKeyEntry(dwIoBase, pTable->KeyTable[j].wKeyCtl, j, uKeyIdx, pbyBSSID, (u32 *)pKey->abyKey, byLocalID);
-
- if ((dwKeyIndex & USE_KEYRSC) == 0) {
- // RSC set by NIC
- pKey->KeyRSC = 0;
- } else {
- pKey->KeyRSC = *pKeyRSC;
- }
- pKey->dwTSC47_16 = 0;
- pKey->wTSC15_0 = 0;
-
- pr_debug("KeybSetKey(N):\n");
- pr_debug("pKey->bKeyValid: %d\n ", pKey->bKeyValid);
- pr_debug("pKey->uKeyLength: %d\n ", (int)pKey->uKeyLength);
- pr_debug("pKey->abyKey: ");
- for (ii = 0; ii < pKey->uKeyLength; ii++)
- pr_debug("%02x ", pKey->abyKey[ii]);
-
- pr_debug("\n");
-
- pr_debug("pKey->dwTSC47_16: %lx\n ", pKey->dwTSC47_16);
- pr_debug("pKey->wTSC15_0: %x\n ", pKey->wTSC15_0);
- pr_debug("pKey->dwKeyIndex: %lx\n ", pKey->dwKeyIndex);
-
- return true;
- }
- return false;
-}
-
-/*
- * Description: Remove Key from table
- *
- * Parameters:
- * In:
- * pTable - Pointer to Key table
- * pbyBSSID - BSSID of Key
- * dwKeyIndex - Key Index (reference to NDIS DDK)
- * Out:
- * none
- *
- * Return Value: true if success otherwise false
- *
- */
-bool KeybRemoveKey(
- PSKeyManagement pTable,
- unsigned char *pbyBSSID,
- unsigned long dwKeyIndex,
- void __iomem *dwIoBase
-)
-{
- int i;
-
- if (is_broadcast_ether_addr(pbyBSSID)) {
- // delete all keys
- if ((dwKeyIndex & PAIRWISE_KEY) != 0) {
- for (i = 0; i < MAX_KEY_TABLE; i++)
- pTable->KeyTable[i].PairwiseKey.bKeyValid = false;
-
- s_vCheckKeyTableValid(pTable, dwIoBase);
- return true;
- } else if ((dwKeyIndex & 0x000000FF) < MAX_GROUP_KEY) {
- for (i = 0; i < MAX_KEY_TABLE; i++) {
- pTable->KeyTable[i].GroupKey[dwKeyIndex & 0x000000FF].bKeyValid = false;
- if ((dwKeyIndex & 0x7FFFFFFF) == (pTable->KeyTable[i].dwGTKeyIndex & 0x7FFFFFFF)) {
- // remove Group transmit key
- pTable->KeyTable[i].dwGTKeyIndex = 0;
- }
- }
- s_vCheckKeyTableValid(pTable, dwIoBase);
- return true;
- } else {
- return false;
- }
- }
-
- for (i = 0; i < MAX_KEY_TABLE; i++) {
- if (pTable->KeyTable[i].bInUse &&
- ether_addr_equal(pTable->KeyTable[i].abyBSSID, pbyBSSID)) {
- if ((dwKeyIndex & PAIRWISE_KEY) != 0) {
- pTable->KeyTable[i].PairwiseKey.bKeyValid = false;
- s_vCheckKeyTableValid(pTable, dwIoBase);
- return true;
- } else if ((dwKeyIndex & 0x000000FF) < MAX_GROUP_KEY) {
- pTable->KeyTable[i].GroupKey[dwKeyIndex & 0x000000FF].bKeyValid = false;
- if ((dwKeyIndex & 0x7FFFFFFF) == (pTable->KeyTable[i].dwGTKeyIndex & 0x7FFFFFFF)) {
- // remove Group transmit key
- pTable->KeyTable[i].dwGTKeyIndex = 0;
- }
- s_vCheckKeyTableValid(pTable, dwIoBase);
- return true;
- } else {
- return false;
- }
- }
- }
- return false;
-}
-
-/*
- * Description: Remove Key from table
- *
- * Parameters:
- * In:
- * pTable - Pointer to Key table
- * pbyBSSID - BSSID of Key
- * Out:
- * none
- *
- * Return Value: true if success otherwise false
- *
- */
-bool KeybRemoveAllKey(
- PSKeyManagement pTable,
- unsigned char *pbyBSSID,
- void __iomem *dwIoBase
-)
+int vnt_key_init_table(struct vnt_private *priv)
{
- int i, u;
+ u32 i;
- for (i = 0; i < MAX_KEY_TABLE; i++) {
- if (pTable->KeyTable[i].bInUse &&
- ether_addr_equal(pTable->KeyTable[i].abyBSSID, pbyBSSID)) {
- pTable->KeyTable[i].PairwiseKey.bKeyValid = false;
- for (u = 0; u < MAX_GROUP_KEY; u++)
- pTable->KeyTable[i].GroupKey[u].bKeyValid = false;
+ for (i = 0; i < MAX_KEY_TABLE; i++)
+ MACvDisableKeyEntry(priv->PortOffset, i);
- pTable->KeyTable[i].dwGTKeyIndex = 0;
- s_vCheckKeyTableValid(pTable, dwIoBase);
- return true;
- }
- }
- return false;
+ return 0;
}
-/*
- * Description: Remove WEP Key from table
- *
- * Parameters:
- * In:
- * pTable - Pointer to Key table
- * Out:
- * none
- *
- * Return Value: true if success otherwise false
- *
- */
-void KeyvRemoveWEPKey(
- PSKeyManagement pTable,
- unsigned long dwKeyIndex,
- void __iomem *dwIoBase
-)
+static int vnt_set_keymode(struct ieee80211_hw *hw, u8 *mac_addr,
+ struct ieee80211_key_conf *key, u32 key_type, u32 mode,
+ bool onfly_latch)
{
- if ((dwKeyIndex & 0x000000FF) < MAX_GROUP_KEY) {
- if (pTable->KeyTable[MAX_KEY_TABLE-1].bInUse) {
- if (pTable->KeyTable[MAX_KEY_TABLE-1].GroupKey[dwKeyIndex & 0x000000FF].byCipherSuite == KEY_CTL_WEP) {
- pTable->KeyTable[MAX_KEY_TABLE-1].GroupKey[dwKeyIndex & 0x000000FF].bKeyValid = false;
- if ((dwKeyIndex & 0x7FFFFFFF) == (pTable->KeyTable[MAX_KEY_TABLE-1].dwGTKeyIndex & 0x7FFFFFFF)) {
- // remove Group transmit key
- pTable->KeyTable[MAX_KEY_TABLE-1].dwGTKeyIndex = 0;
- }
+ struct vnt_private *priv = hw->priv;
+ u8 broadcast[6] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ u16 key_mode = 0;
+ u32 entry = 0;
+ u8 *bssid;
+ u8 key_inx = key->keyidx;
+ u8 i;
+
+ if (mac_addr)
+ bssid = mac_addr;
+ else
+ bssid = &broadcast[0];
+
+ if (key_type != VNT_KEY_DEFAULTKEY) {
+ for (i = 0; i < (MAX_KEY_TABLE - 1); i++) {
+ if (!test_bit(i, &priv->key_entry_inuse)) {
+ set_bit(i, &priv->key_entry_inuse);
+
+ key->hw_key_idx = i;
+ entry = key->hw_key_idx;
+ break;
}
}
- s_vCheckKeyTableValid(pTable, dwIoBase);
}
-}
-void KeyvRemoveAllWEPKey(
- PSKeyManagement pTable,
- void __iomem *dwIoBase
-)
-{
- int i;
-
- for (i = 0; i < MAX_GROUP_KEY; i++)
- KeyvRemoveWEPKey(pTable, i, dwIoBase);
+ switch (key_type) {
+ /* fallthrough */
+ case VNT_KEY_DEFAULTKEY:
+ /* default key last entry */
+ entry = MAX_KEY_TABLE - 1;
+ key->hw_key_idx = entry;
+ case VNT_KEY_ALLGROUP:
+ key_mode |= VNT_KEY_ALLGROUP;
+ if (onfly_latch)
+ key_mode |= VNT_KEY_ONFLY_ALL;
+ case VNT_KEY_GROUP_ADDRESS:
+ key_mode |= mode;
+ case VNT_KEY_GROUP:
+ key_mode |= (mode << 4);
+ key_mode |= VNT_KEY_GROUP;
+ break;
+ case VNT_KEY_PAIRWISE:
+ key_mode |= mode;
+ key_inx = 4;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (onfly_latch)
+ key_mode |= VNT_KEY_ONFLY;
+
+ if (mode == KEY_CTL_WEP) {
+ if (key->keylen == WLAN_KEY_LEN_WEP40)
+ key->key[15] &= 0x7f;
+ if (key->keylen == WLAN_KEY_LEN_WEP104)
+ key->key[15] |= 0x80;
+ }
+
+ MACvSetKeyEntry(priv->PortOffset, key_mode, entry, key_inx,
+ bssid, (u32 *)key->key, priv->byLocalID);
+
+ return 0;
}
-/*
- * Description: Get Transmit Key from table
- *
- * Parameters:
- * In:
- * pTable - Pointer to Key table
- * pbyBSSID - BSSID of Key
- * Out:
- * pKey - Key return
- *
- * Return Value: true if found otherwise false
- *
- */
-bool KeybGetTransmitKey(
- PSKeyManagement pTable,
- unsigned char *pbyBSSID,
- unsigned long dwKeyType,
- PSKeyItem *pKey
-)
+int vnt_set_keys(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
+ struct ieee80211_vif *vif, struct ieee80211_key_conf *key)
{
- int i, ii;
-
- *pKey = NULL;
- for (i = 0; i < MAX_KEY_TABLE; i++) {
- if (pTable->KeyTable[i].bInUse &&
- ether_addr_equal(pTable->KeyTable[i].abyBSSID, pbyBSSID)) {
- if (dwKeyType == PAIRWISE_KEY) {
- if (pTable->KeyTable[i].PairwiseKey.bKeyValid) {
- *pKey = &(pTable->KeyTable[i].PairwiseKey);
-
- pr_debug("KeybGetTransmitKey:");
- pr_debug("PAIRWISE_KEY: KeyTable.abyBSSID: ");
- for (ii = 0; ii < 6; ii++)
- pr_debug("%x ",
- pTable->KeyTable[i].abyBSSID[ii]);
-
- pr_debug("\n");
+ struct ieee80211_bss_conf *conf = &vif->bss_conf;
+ struct vnt_private *priv = hw->priv;
+ u8 *mac_addr = NULL;
+ u8 key_dec_mode = 0;
+ int ret = 0;
+ u32 u;
- return true;
- } else {
- pr_debug("PairwiseKey.bKeyValid == false\n");
- return false;
- }
- } // End of Type == PAIRWISE
- else {
- if (pTable->KeyTable[i].dwGTKeyIndex == 0) {
- pr_debug("ERROR: dwGTKeyIndex == 0 !!!\n");
- return false;
- }
- if (pTable->KeyTable[i].GroupKey[(pTable->KeyTable[i].dwGTKeyIndex&0x000000FF)].bKeyValid) {
- *pKey = &(pTable->KeyTable[i].GroupKey[(pTable->KeyTable[i].dwGTKeyIndex&0x000000FF)]);
-
- pr_debug("KeybGetTransmitKey:");
- pr_debug("GROUP_KEY: KeyTable.abyBSSID\n");
- for (ii = 0; ii < 6; ii++)
- pr_debug("%x ",
- pTable->KeyTable[i].abyBSSID[ii]);
-
- pr_debug("\n");
- pr_debug("dwGTKeyIndex: %lX\n",
- pTable->KeyTable[i].dwGTKeyIndex);
-
- return true;
- } else {
- pr_debug("GroupKey.bKeyValid == false\n");
- return false;
- }
- } // End of Type = GROUP
- } // BSSID match
- }
- pr_debug("ERROR: NO Match BSSID !!! ");
- for (ii = 0; ii < 6; ii++)
- pr_debug("%02x ", *(pbyBSSID+ii));
-
- pr_debug("\n");
- return false;
-}
-
-/*
- * Description: Check Pairewise Key
- *
- * Parameters:
- * In:
- * pTable - Pointer to Key table
- * Out:
- * none
- *
- * Return Value: true if found otherwise false
- *
- */
-bool KeybCheckPairewiseKey(
- PSKeyManagement pTable,
- PSKeyItem *pKey
-)
-{
- int i;
+ if (sta)
+ mac_addr = &sta->addr[0];
- *pKey = NULL;
- for (i = 0; i < MAX_KEY_TABLE; i++) {
- if (pTable->KeyTable[i].bInUse &&
- pTable->KeyTable[i].PairwiseKey.bKeyValid) {
- *pKey = &(pTable->KeyTable[i].PairwiseKey);
- return true;
- }
- }
- return false;
-}
+ switch (key->cipher) {
+ case 0:
+ for (u = 0 ; u < MAX_KEY_TABLE; u++)
+ MACvDisableKeyEntry(priv->PortOffset, u);
+ return ret;
-/*
- * Description: Set Key to table
- *
- * Parameters:
- * In:
- * pTable - Pointer to Key table
- * dwKeyIndex - Key index (reference to NDIS DDK)
- * uKeyLength - Key length
- * KeyRSC - Key RSC
- * pbyKey - Pointer to key
- * Out:
- * none
- *
- * Return Value: true if success otherwise false
- *
- */
-bool KeybSetDefaultKey(
- PSKeyManagement pTable,
- unsigned long dwKeyIndex,
- unsigned long uKeyLength,
- u64 *pKeyRSC,
- unsigned char *pbyKey,
- unsigned char byKeyDecMode,
- void __iomem *dwIoBase,
- unsigned char byLocalID
-)
-{
- unsigned int ii;
- PSKeyItem pKey;
- unsigned int uKeyIdx;
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ for (u = 0; u < MAX_KEY_TABLE; u++)
+ MACvDisableKeyEntry(priv->PortOffset, u);
- pr_debug("Enter KeybSetDefaultKey: %1x, %d\n",
- (int)dwKeyIndex, (int)uKeyLength);
+ vnt_set_keymode(hw, mac_addr,
+ key, VNT_KEY_DEFAULTKEY, KEY_CTL_WEP, true);
- if ((dwKeyIndex & PAIRWISE_KEY) != 0) // Pairwise key
- return false;
- else if ((dwKeyIndex & 0x000000FF) >= MAX_GROUP_KEY)
- return false;
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
- if (uKeyLength > MAX_KEY_LEN)
- return false;
+ return ret;
+ case WLAN_CIPHER_SUITE_TKIP:
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
- pTable->KeyTable[MAX_KEY_TABLE - 1].bInUse = true;
- for (ii = 0; ii < ETH_ALEN; ii++)
- pTable->KeyTable[MAX_KEY_TABLE - 1].abyBSSID[ii] = 0xFF;
+ key_dec_mode = KEY_CTL_TKIP;
- // Group key
- pKey = &(pTable->KeyTable[MAX_KEY_TABLE - 1].GroupKey[dwKeyIndex & 0x000000FF]);
- if ((dwKeyIndex & TRANSMIT_KEY) != 0) {
- // Group transmit key
- pTable->KeyTable[MAX_KEY_TABLE-1].dwGTKeyIndex = dwKeyIndex;
- pr_debug("Group transmit key(R)[%lX]: %d\n",
- pTable->KeyTable[MAX_KEY_TABLE-1].dwGTKeyIndex,
- MAX_KEY_TABLE-1);
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ key_dec_mode = KEY_CTL_CCMP;
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
}
- pTable->KeyTable[MAX_KEY_TABLE-1].wKeyCtl &= 0x7F00; // clear all key control filed
- pTable->KeyTable[MAX_KEY_TABLE-1].wKeyCtl |= (byKeyDecMode << 4);
- pTable->KeyTable[MAX_KEY_TABLE-1].wKeyCtl |= (byKeyDecMode);
- pTable->KeyTable[MAX_KEY_TABLE-1].wKeyCtl |= 0x0044; // use group key for all address
- uKeyIdx = (dwKeyIndex & 0x000000FF);
- if ((uKeyLength == WLAN_WEP232_KEYLEN) &&
- (byKeyDecMode == KEY_CTL_WEP)) {
- pTable->KeyTable[MAX_KEY_TABLE-1].wKeyCtl |= 0x4000; // disable on-fly disable address match
- pTable->KeyTable[MAX_KEY_TABLE-1].bSoftWEP = true;
+ if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
+ vnt_set_keymode(hw, mac_addr,
+ key, VNT_KEY_PAIRWISE, key_dec_mode, true);
} else {
- if (!pTable->KeyTable[MAX_KEY_TABLE-1].bSoftWEP)
- pTable->KeyTable[MAX_KEY_TABLE-1].wKeyCtl |= 0xC000; // enable on-fly disable address match
- }
+ vnt_set_keymode(hw, mac_addr,
+ key, VNT_KEY_DEFAULTKEY, key_dec_mode, true);
- pKey->bKeyValid = true;
- pKey->uKeyLength = uKeyLength;
- pKey->dwKeyIndex = dwKeyIndex;
- pKey->byCipherSuite = byKeyDecMode;
- memcpy(pKey->abyKey, pbyKey, uKeyLength);
- if (byKeyDecMode == KEY_CTL_WEP) {
- if (uKeyLength == WLAN_WEP40_KEYLEN)
- pKey->abyKey[15] &= 0x7F;
- if (uKeyLength == WLAN_WEP104_KEYLEN)
- pKey->abyKey[15] |= 0x80;
- }
- MACvSetKeyEntry(dwIoBase, pTable->KeyTable[MAX_KEY_TABLE-1].wKeyCtl, MAX_KEY_TABLE-1, uKeyIdx, pTable->KeyTable[MAX_KEY_TABLE-1].abyBSSID, (u32 *)pKey->abyKey, byLocalID);
-
- if ((dwKeyIndex & USE_KEYRSC) == 0) {
- // RSC set by NIC
- pKey->KeyRSC = 0;
- } else {
- pKey->KeyRSC = *pKeyRSC;
+ vnt_set_keymode(hw, (u8 *)conf->bssid,
+ key, VNT_KEY_GROUP_ADDRESS, key_dec_mode, true);
}
- pKey->dwTSC47_16 = 0;
- pKey->wTSC15_0 = 0;
-
- pr_debug("KeybSetKey(R):\n");
- pr_debug("pKey->bKeyValid: %d\n", pKey->bKeyValid);
- pr_debug("pKey->uKeyLength: %d\n", (int)pKey->uKeyLength);
- pr_debug("pKey->abyKey:\n");
- for (ii = 0; ii < pKey->uKeyLength; ii++)
- pr_debug("%x", pKey->abyKey[ii]);
-
- pr_debug("\n");
-
- pr_debug("pKey->dwTSC47_16: %lx\n", pKey->dwTSC47_16);
- pr_debug("pKey->wTSC15_0: %x\n", pKey->wTSC15_0);
- pr_debug("pKey->dwKeyIndex: %lx\n", pKey->dwKeyIndex);
-
- return true;
-}
-/*
- * Description: Set Key to table
- *
- * Parameters:
- * In:
- * pTable - Pointer to Key table
- * dwKeyIndex - Key index (reference to NDIS DDK)
- * uKeyLength - Key length
- * KeyRSC - Key RSC
- * pbyKey - Pointer to key
- * Out:
- * none
- *
- * Return Value: true if success otherwise false
- *
- */
-bool KeybSetAllGroupKey(
- PSKeyManagement pTable,
- unsigned long dwKeyIndex,
- unsigned long uKeyLength,
- u64 *pKeyRSC,
- unsigned char *pbyKey,
- unsigned char byKeyDecMode,
- void __iomem *dwIoBase,
- unsigned char byLocalID
-)
-{
- int i;
- unsigned int ii;
- PSKeyItem pKey;
- unsigned int uKeyIdx;
-
- pr_debug("Enter KeybSetAllGroupKey: %lX\n", dwKeyIndex);
-
- if ((dwKeyIndex & PAIRWISE_KEY) != 0) // Pairwise key
- return false;
- else if ((dwKeyIndex & 0x000000FF) >= MAX_GROUP_KEY)
- return false;
-
- for (i = 0; i < MAX_KEY_TABLE - 1; i++) {
- if (pTable->KeyTable[i].bInUse) {
- // found table already exist
- // Group key
- pKey = &(pTable->KeyTable[i].GroupKey[dwKeyIndex & 0x000000FF]);
- if ((dwKeyIndex & TRANSMIT_KEY) != 0) {
- // Group transmit key
- pTable->KeyTable[i].dwGTKeyIndex = dwKeyIndex;
- pr_debug("Group transmit key(R)[%lX]: %d\n",
- pTable->KeyTable[i].dwGTKeyIndex, i);
-
- }
- pTable->KeyTable[i].wKeyCtl &= 0xFF0F; // clear group key control filed
- pTable->KeyTable[i].wKeyCtl |= (byKeyDecMode << 4);
- pTable->KeyTable[i].wKeyCtl |= 0x0040; // use group key for group address
- uKeyIdx = (dwKeyIndex & 0x000000FF);
-
- pTable->KeyTable[i].wKeyCtl |= 0x8000; // enable on-fly
-
- pKey->bKeyValid = true;
- pKey->uKeyLength = uKeyLength;
- pKey->dwKeyIndex = dwKeyIndex;
- pKey->byCipherSuite = byKeyDecMode;
- memcpy(pKey->abyKey, pbyKey, uKeyLength);
- if (byKeyDecMode == KEY_CTL_WEP) {
- if (uKeyLength == WLAN_WEP40_KEYLEN)
- pKey->abyKey[15] &= 0x7F;
- if (uKeyLength == WLAN_WEP104_KEYLEN)
- pKey->abyKey[15] |= 0x80;
- }
- MACvSetKeyEntry(dwIoBase, pTable->KeyTable[i].wKeyCtl, i, uKeyIdx, pTable->KeyTable[i].abyBSSID, (u32 *)pKey->abyKey, byLocalID);
-
- if ((dwKeyIndex & USE_KEYRSC) == 0) {
- // RSC set by NIC
- pKey->KeyRSC = 0;
- } else {
- pKey->KeyRSC = *pKeyRSC;
- }
- pKey->dwTSC47_16 = 0;
- pKey->wTSC15_0 = 0;
-
- pr_debug("KeybSetKey(R):\n");
- pr_debug("pKey->bKeyValid: %d\n ", pKey->bKeyValid);
- pr_debug("pKey->uKeyLength: %d\n ",
- (int)pKey->uKeyLength);
- pr_debug("pKey->abyKey: ");
- for (ii = 0; ii < pKey->uKeyLength; ii++)
- pr_debug("%02x ", pKey->abyKey[ii]);
-
- pr_debug("\n");
-
- } // (pTable->KeyTable[i].bInUse == true)
- }
- return true;
+ return 0;
}
diff --git a/drivers/staging/vt6655/key.h b/drivers/staging/vt6655/key.h
index 44efe18315a..c01d4afb6ab 100644
--- a/drivers/staging/vt6655/key.h
+++ b/drivers/staging/vt6655/key.h
@@ -30,9 +30,7 @@
#ifndef __KEY_H__
#define __KEY_H__
-#include "ttype.h"
-#include "tether.h"
-#include "80211mgr.h"
+#include <net/mac80211.h>
/*--------------------- Export Definitions -------------------------*/
#define MAX_GROUP_KEY 4
@@ -53,124 +51,19 @@
#define KEY_CTL_CCMP 0x03
#define KEY_CTL_INVALID 0xFF
-typedef struct tagSKeyItem {
- bool bKeyValid;
- unsigned long uKeyLength;
- unsigned char abyKey[MAX_KEY_LEN];
- u64 KeyRSC;
- unsigned long dwTSC47_16;
- unsigned short wTSC15_0;
- unsigned char byCipherSuite;
- unsigned char byReserved0;
- unsigned long dwKeyIndex;
- void *pvKeyTable;
-} SKeyItem, *PSKeyItem; //64
+#define VNT_KEY_DEFAULTKEY 0x1
+#define VNT_KEY_GROUP_ADDRESS 0x2
+#define VNT_KEY_ALLGROUP 0x4
+#define VNT_KEY_GROUP 0x40
+#define VNT_KEY_PAIRWISE 0x00
+#define VNT_KEY_ONFLY 0x8000
+#define VNT_KEY_ONFLY_ALL 0x4000
-typedef struct tagSKeyTable {
- unsigned char abyBSSID[ETH_ALEN]; //6
- unsigned char byReserved0[2]; //8
- SKeyItem PairwiseKey;
- SKeyItem GroupKey[MAX_GROUP_KEY]; //64*5 = 320, 320+8=328
- unsigned long dwGTKeyIndex; // GroupTransmitKey Index
- bool bInUse;
- //2006-1116-01,<Modify> by NomadZhao
- bool bSoftWEP;
- unsigned short wKeyCtl; // for address of wKeyCtl at align 4
+struct vnt_private;
- unsigned char byReserved1[6];
-} SKeyTable, *PSKeyTable; //348
+int vnt_key_init_table(struct vnt_private *);
-typedef struct tagSKeyManagement {
- SKeyTable KeyTable[MAX_KEY_TABLE];
-} SKeyManagement, *PSKeyManagement;
-
-/*--------------------- Export Types ------------------------------*/
-
-/*--------------------- Export Macros ------------------------------*/
-
-/*--------------------- Export Classes ----------------------------*/
-
-/*--------------------- Export Variables --------------------------*/
-
-/*--------------------- Export Functions --------------------------*/
-
-void KeyvInitTable(PSKeyManagement pTable, void __iomem *dwIoBase);
-
-bool KeybGetKey(
- PSKeyManagement pTable,
- unsigned char *pbyBSSID,
- unsigned long dwKeyIndex,
- PSKeyItem *pKey
-);
-
-bool KeybSetKey(
- PSKeyManagement pTable,
- unsigned char *pbyBSSID,
- unsigned long dwKeyIndex,
- unsigned long uKeyLength,
- u64 *pKeyRSC,
- unsigned char *pbyKey,
- unsigned char byKeyDecMode,
- void __iomem *dwIoBase,
- unsigned char byLocalID
-);
-
-bool KeybSetDefaultKey(
- PSKeyManagement pTable,
- unsigned long dwKeyIndex,
- unsigned long uKeyLength,
- u64 *pKeyRSC,
- unsigned char *pbyKey,
- unsigned char byKeyDecMode,
- void __iomem *dwIoBase,
- unsigned char byLocalID
-);
-
-bool KeybRemoveKey(
- PSKeyManagement pTable,
- unsigned char *pbyBSSID,
- unsigned long dwKeyIndex,
- void __iomem *dwIoBase
-);
-
-bool KeybGetTransmitKey(
- PSKeyManagement pTable,
- unsigned char *pbyBSSID,
- unsigned long dwKeyType,
- PSKeyItem *pKey
-);
-
-bool KeybCheckPairewiseKey(
- PSKeyManagement pTable,
- PSKeyItem *pKey
-);
-
-bool KeybRemoveAllKey(
- PSKeyManagement pTable,
- unsigned char *pbyBSSID,
- void __iomem *dwIoBase
-);
-
-void KeyvRemoveWEPKey(
- PSKeyManagement pTable,
- unsigned long dwKeyIndex,
- void __iomem *dwIoBase
-);
-
-void KeyvRemoveAllWEPKey(
- PSKeyManagement pTable,
- void __iomem *dwIoBase
-);
-
-bool KeybSetAllGroupKey(
- PSKeyManagement pTable,
- unsigned long dwKeyIndex,
- unsigned long uKeyLength,
- u64 *pKeyRSC,
- unsigned char *pbyKey,
- unsigned char byKeyDecMode,
- void __iomem *dwIoBase,
- unsigned char byLocalID
-);
+int vnt_set_keys(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
+ struct ieee80211_vif *vif, struct ieee80211_key_conf *key);
#endif // __KEY_H__
diff --git a/drivers/staging/vt6655/mac.c b/drivers/staging/vt6655/mac.c
index e3b0b7f7ca8..8f0d652fea7 100644
--- a/drivers/staging/vt6655/mac.c
+++ b/drivers/staging/vt6655/mac.c
@@ -26,30 +26,15 @@
* Date: May 21, 1996
*
* Functions:
- * MACvReadAllRegs - Read All MAC Registers to buffer
* MACbIsRegBitsOn - Test if All test Bits On
* MACbIsRegBitsOff - Test if All test Bits Off
* MACbIsIntDisable - Test if MAC interrupt disable
- * MACbyReadMultiAddr - Read Multicast Address Mask Pattern
- * MACvWriteMultiAddr - Write Multicast Address Mask Pattern
- * MACvSetMultiAddrByHash - Set Multicast Address Mask by Hash value
- * MACvResetMultiAddrByHash - Clear Multicast Address Mask by Hash value
- * MACvSetRxThreshold - Set Rx Threshold value
- * MACvGetRxThreshold - Get Rx Threshold value
- * MACvSetTxThreshold - Set Tx Threshold value
- * MACvGetTxThreshold - Get Tx Threshold value
- * MACvSetDmaLength - Set Dma Length value
- * MACvGetDmaLength - Get Dma Length value
* MACvSetShortRetryLimit - Set 802.11 Short Retry limit
* MACvGetShortRetryLimit - Get 802.11 Short Retry limit
* MACvSetLongRetryLimit - Set 802.11 Long Retry limit
- * MACvGetLongRetryLimit - Get 802.11 Long Retry limit
* MACvSetLoopbackMode - Set MAC Loopback Mode
- * MACbIsInLoopbackMode - Test if MAC in Loopback mode
- * MACvSetPacketFilter - Set MAC Address Filter
* MACvSaveContext - Save Context of MAC Registers
* MACvRestoreContext - Restore Context of MAC Registers
- * MACbCompareContext - Compare if values of MAC Registers same as Context
* MACbSoftwareReset - Software Reset MAC
* MACbSafeRxOff - Turn Off MAC Rx
* MACbSafeTxOff - Turn Off MAC Tx
@@ -69,54 +54,8 @@
*/
#include "tmacro.h"
-#include "tether.h"
#include "mac.h"
-unsigned short TxRate_iwconfig;//2008-5-8 <add> by chester
-/*--------------------- Static Classes ----------------------------*/
-
-/*--------------------- Static Variables --------------------------*/
-
-/*--------------------- Static Functions --------------------------*/
-
-/*--------------------- Export Variables --------------------------*/
-
-/*--------------------- Export Functions --------------------------*/
-
-/*
- * Description:
- * Read All MAC Registers to buffer
- *
- * Parameters:
- * In:
- * dwIoBase - Base Address for MAC
- * Out:
- * pbyMacRegs - buffer to read
- *
- * Return Value: none
- *
- */
-void MACvReadAllRegs(void __iomem *dwIoBase, unsigned char *pbyMacRegs)
-{
- int ii;
-
- // read page0 register
- for (ii = 0; ii < MAC_MAX_CONTEXT_SIZE_PAGE0; ii++) {
- VNSvInPortB(dwIoBase + ii, pbyMacRegs);
- pbyMacRegs++;
- }
-
- MACvSelectPage1(dwIoBase);
-
- // read page1 register
- for (ii = 0; ii < MAC_MAX_CONTEXT_SIZE_PAGE1; ii++) {
- VNSvInPortB(dwIoBase + ii, pbyMacRegs);
- pbyMacRegs++;
- }
-
- MACvSelectPage0(dwIoBase);
-}
-
/*
* Description:
* Test if all test bits on
@@ -189,252 +128,6 @@ bool MACbIsIntDisable(void __iomem *dwIoBase)
/*
* Description:
- * Read MAC Multicast Address Mask
- *
- * Parameters:
- * In:
- * dwIoBase - Base Address for MAC
- * uByteidx - Index of Mask
- * Out:
- * none
- *
- * Return Value: Mask Value read
- *
- */
-unsigned char MACbyReadMultiAddr(void __iomem *dwIoBase, unsigned int uByteIdx)
-{
- unsigned char byData;
-
- MACvSelectPage1(dwIoBase);
- VNSvInPortB(dwIoBase + MAC_REG_MAR0 + uByteIdx, &byData);
- MACvSelectPage0(dwIoBase);
- return byData;
-}
-
-/*
- * Description:
- * Write MAC Multicast Address Mask
- *
- * Parameters:
- * In:
- * dwIoBase - Base Address for MAC
- * uByteidx - Index of Mask
- * byData - Mask Value to write
- * Out:
- * none
- *
- * Return Value: none
- *
- */
-void MACvWriteMultiAddr(void __iomem *dwIoBase, unsigned int uByteIdx, unsigned char byData)
-{
- MACvSelectPage1(dwIoBase);
- VNSvOutPortB(dwIoBase + MAC_REG_MAR0 + uByteIdx, byData);
- MACvSelectPage0(dwIoBase);
-}
-
-/*
- * Description:
- * Set this hash index into multicast address register bit
- *
- * Parameters:
- * In:
- * dwIoBase - Base Address for MAC
- * byHashIdx - Hash index to set
- * Out:
- * none
- *
- * Return Value: none
- *
- */
-void MACvSetMultiAddrByHash(void __iomem *dwIoBase, unsigned char byHashIdx)
-{
- unsigned int uByteIdx;
- unsigned char byBitMask;
- unsigned char byOrgValue;
-
- // calculate byte position
- uByteIdx = byHashIdx / 8;
- ASSERT(uByteIdx < 8);
- // calculate bit position
- byBitMask = 1;
- byBitMask <<= (byHashIdx % 8);
- // turn on the bit
- byOrgValue = MACbyReadMultiAddr(dwIoBase, uByteIdx);
- MACvWriteMultiAddr(dwIoBase, uByteIdx, (unsigned char)(byOrgValue | byBitMask));
-}
-
-/*
- * Description:
- * Reset this hash index into multicast address register bit
- *
- * Parameters:
- * In:
- * dwIoBase - Base Address for MAC
- * byHashIdx - Hash index to clear
- * Out:
- * none
- *
- * Return Value: none
- *
- */
-void MACvResetMultiAddrByHash(void __iomem *dwIoBase, unsigned char byHashIdx)
-{
- unsigned int uByteIdx;
- unsigned char byBitMask;
- unsigned char byOrgValue;
-
- // calculate byte position
- uByteIdx = byHashIdx / 8;
- ASSERT(uByteIdx < 8);
- // calculate bit position
- byBitMask = 1;
- byBitMask <<= (byHashIdx % 8);
- // turn off the bit
- byOrgValue = MACbyReadMultiAddr(dwIoBase, uByteIdx);
- MACvWriteMultiAddr(dwIoBase, uByteIdx, (unsigned char)(byOrgValue & (~byBitMask)));
-}
-
-/*
- * Description:
- * Set Rx Threshold
- *
- * Parameters:
- * In:
- * dwIoBase - Base Address for MAC
- * byThreshold - Threshold Value
- * Out:
- * none
- *
- * Return Value: none
- *
- */
-void MACvSetRxThreshold(void __iomem *dwIoBase, unsigned char byThreshold)
-{
- unsigned char byOrgValue;
-
- ASSERT(byThreshold < 4);
-
- // set FCR0
- VNSvInPortB(dwIoBase + MAC_REG_FCR0, &byOrgValue);
- byOrgValue = (byOrgValue & 0xCF) | (byThreshold << 4);
- VNSvOutPortB(dwIoBase + MAC_REG_FCR0, byOrgValue);
-}
-
-/*
- * Description:
- * Get Rx Threshold
- *
- * Parameters:
- * In:
- * dwIoBase - Base Address for MAC
- * Out:
- * pbyThreshold- Threshold Value Get
- *
- * Return Value: none
- *
- */
-void MACvGetRxThreshold(void __iomem *dwIoBase, unsigned char *pbyThreshold)
-{
- // get FCR0
- VNSvInPortB(dwIoBase + MAC_REG_FCR0, pbyThreshold);
- *pbyThreshold = (*pbyThreshold >> 4) & 0x03;
-}
-
-/*
- * Description:
- * Set Tx Threshold
- *
- * Parameters:
- * In:
- * dwIoBase - Base Address for MAC
- * byThreshold - Threshold Value
- * Out:
- * none
- *
- * Return Value: none
- *
- */
-void MACvSetTxThreshold(void __iomem *dwIoBase, unsigned char byThreshold)
-{
- unsigned char byOrgValue;
-
- ASSERT(byThreshold < 4);
-
- // set FCR0
- VNSvInPortB(dwIoBase + MAC_REG_FCR0, &byOrgValue);
- byOrgValue = (byOrgValue & 0xF3) | (byThreshold << 2);
- VNSvOutPortB(dwIoBase + MAC_REG_FCR0, byOrgValue);
-}
-
-/*
- * Description:
- * Get Tx Threshold
- *
- * Parameters:
- * In:
- * dwIoBase - Base Address for MAC
- * Out:
- * pbyThreshold- Threshold Value Get
- *
- * Return Value: none
- *
- */
-void MACvGetTxThreshold(void __iomem *dwIoBase, unsigned char *pbyThreshold)
-{
- // get FCR0
- VNSvInPortB(dwIoBase + MAC_REG_FCR0, pbyThreshold);
- *pbyThreshold = (*pbyThreshold >> 2) & 0x03;
-}
-
-/*
- * Description:
- * Set Dma Length
- *
- * Parameters:
- * In:
- * dwIoBase - Base Address for MAC
- * byDmaLength - Dma Length Value
- * Out:
- * none
- *
- * Return Value: none
- *
- */
-void MACvSetDmaLength(void __iomem *dwIoBase, unsigned char byDmaLength)
-{
- unsigned char byOrgValue;
-
- ASSERT(byDmaLength < 4);
-
- // set FCR0
- VNSvInPortB(dwIoBase + MAC_REG_FCR0, &byOrgValue);
- byOrgValue = (byOrgValue & 0xFC) | byDmaLength;
- VNSvOutPortB(dwIoBase + MAC_REG_FCR0, byOrgValue);
-}
-
-/*
- * Description:
- * Get Dma Length
- *
- * Parameters:
- * In:
- * dwIoBase - Base Address for MAC
- * Out:
- * pbyDmaLength- Dma Length Value Get
- *
- * Return Value: none
- *
- */
-void MACvGetDmaLength(void __iomem *dwIoBase, unsigned char *pbyDmaLength)
-{
- // get FCR0
- VNSvInPortB(dwIoBase + MAC_REG_FCR0, pbyDmaLength);
- *pbyDmaLength &= 0x03;
-}
-
-/*
- * Description:
* Set 802.11 Short Retry Limit
*
* Parameters:
@@ -494,25 +187,6 @@ void MACvSetLongRetryLimit(void __iomem *dwIoBase, unsigned char byRetryLimit)
/*
* Description:
- * Get 802.11 Long Retry Limit
- *
- * Parameters:
- * In:
- * dwIoBase - Base Address for MAC
- * Out:
- * pbyRetryLimit - Retry Limit Get
- *
- * Return Value: none
- *
- */
-void MACvGetLongRetryLimit(void __iomem *dwIoBase, unsigned char *pbyRetryLimit)
-{
- // get LRT
- VNSvInPortB(dwIoBase + MAC_REG_LRT, pbyRetryLimit);
-}
-
-/*
- * Description:
* Set MAC Loopback mode
*
* Parameters:
@@ -540,89 +214,6 @@ void MACvSetLoopbackMode(void __iomem *dwIoBase, unsigned char byLoopbackMode)
/*
* Description:
- * Test if MAC in Loopback mode
- *
- * Parameters:
- * In:
- * dwIoBase - Base Address for MAC
- * Out:
- * none
- *
- * Return Value: true if in Loopback mode; otherwise false
- *
- */
-bool MACbIsInLoopbackMode(void __iomem *dwIoBase)
-{
- unsigned char byOrgValue;
-
- VNSvInPortB(dwIoBase + MAC_REG_TEST, &byOrgValue);
- if (byOrgValue & (TEST_LBINT | TEST_LBEXT))
- return true;
- return false;
-}
-
-/*
- * Description:
- * Set MAC Address filter
- *
- * Parameters:
- * In:
- * dwIoBase - Base Address for MAC
- * wFilterType - Filter Type
- * Out:
- * none
- *
- * Return Value: none
- *
- */
-void MACvSetPacketFilter(void __iomem *dwIoBase, unsigned short wFilterType)
-{
- unsigned char byOldRCR;
- unsigned char byNewRCR = 0;
-
- // if only in DIRECTED mode, multicast-address will set to zero,
- // but if other mode exist (e.g. PROMISCUOUS), multicast-address
- // will be open
- if (wFilterType & PKT_TYPE_DIRECTED) {
- // set multicast address to accept none
- MACvSelectPage1(dwIoBase);
- VNSvOutPortD(dwIoBase + MAC_REG_MAR0, 0L);
- VNSvOutPortD(dwIoBase + MAC_REG_MAR0 + sizeof(unsigned long), 0L);
- MACvSelectPage0(dwIoBase);
- }
-
- if (wFilterType & (PKT_TYPE_PROMISCUOUS | PKT_TYPE_ALL_MULTICAST)) {
- // set multicast address to accept all
- MACvSelectPage1(dwIoBase);
- VNSvOutPortD(dwIoBase + MAC_REG_MAR0, 0xFFFFFFFFL);
- VNSvOutPortD(dwIoBase + MAC_REG_MAR0 + sizeof(unsigned long), 0xFFFFFFFFL);
- MACvSelectPage0(dwIoBase);
- }
-
- if (wFilterType & PKT_TYPE_PROMISCUOUS) {
- byNewRCR |= (RCR_RXALLTYPE | RCR_UNICAST | RCR_MULTICAST | RCR_BROADCAST);
-
- byNewRCR &= ~RCR_BSSID;
- }
-
- if (wFilterType & (PKT_TYPE_ALL_MULTICAST | PKT_TYPE_MULTICAST))
- byNewRCR |= RCR_MULTICAST;
-
- if (wFilterType & PKT_TYPE_BROADCAST)
- byNewRCR |= RCR_BROADCAST;
-
- if (wFilterType & PKT_TYPE_ERROR_CRC)
- byNewRCR |= RCR_ERRCRC;
-
- VNSvInPortB(dwIoBase + MAC_REG_RCR, &byOldRCR);
- if (byNewRCR != byOldRCR) {
- // Modify the Receive Command Register
- VNSvOutPortB(dwIoBase + MAC_REG_RCR, byNewRCR);
- }
-}
-
-/*
- * Description:
* Save MAC registers to context buffer
*
* Parameters:
@@ -702,47 +293,6 @@ void MACvRestoreContext(void __iomem *dwIoBase, unsigned char *pbyCxtBuf)
/*
* Description:
- * Compare if MAC registers same as context buffer
- *
- * Parameters:
- * In:
- * dwIoBase - Base Address for MAC
- * pbyCxtBuf - Context buffer
- * Out:
- * none
- *
- * Return Value: true if all values are the same; otherwise false
- *
- */
-bool MACbCompareContext(void __iomem *dwIoBase, unsigned char *pbyCxtBuf)
-{
- unsigned long dwData;
-
- // compare MAC context to determine if this is a power lost init,
- // return true for power remaining init, return false for power lost init
-
- // compare CURR_RX_DESC_ADDR, CURR_TX_DESC_ADDR
- VNSvInPortD(dwIoBase + MAC_REG_TXDMAPTR0, &dwData);
- if (dwData != *(unsigned long *)(pbyCxtBuf + MAC_REG_TXDMAPTR0))
- return false;
-
- VNSvInPortD(dwIoBase + MAC_REG_AC0DMAPTR, &dwData);
- if (dwData != *(unsigned long *)(pbyCxtBuf + MAC_REG_AC0DMAPTR))
- return false;
-
- VNSvInPortD(dwIoBase + MAC_REG_RXDMAPTR0, &dwData);
- if (dwData != *(unsigned long *)(pbyCxtBuf + MAC_REG_RXDMAPTR0))
- return false;
-
- VNSvInPortD(dwIoBase + MAC_REG_RXDMAPTR1, &dwData);
- if (dwData != *(unsigned long *)(pbyCxtBuf + MAC_REG_RXDMAPTR1))
- return false;
-
- return true;
-}
-
-/*
- * Description:
* Software Reset MAC
*
* Parameters:
@@ -1018,11 +568,6 @@ void MACvInitialize(void __iomem *dwIoBase)
VNSvOutPortB(dwIoBase + MAC_REG_TFTCTL, TFTCTL_TSFCNTRST);
// enable TSF counter
VNSvOutPortB(dwIoBase + MAC_REG_TFTCTL, TFTCTL_TSFCNTREN);
-
- // set packet filter
- // receive directed and broadcast address
-
- MACvSetPacketFilter(dwIoBase, PKT_TYPE_DIRECTED | PKT_TYPE_BROADCAST);
}
/*
@@ -1234,27 +779,6 @@ void MACvTimer0MicroSDelay(void __iomem *dwIoBase, unsigned int uDelay)
* Return Value: none
*
*/
-void MACvOneShotTimer0MicroSec(void __iomem *dwIoBase, unsigned int uDelayTime)
-{
- VNSvOutPortB(dwIoBase + MAC_REG_TMCTL0, 0);
- VNSvOutPortD(dwIoBase + MAC_REG_TMDATA0, uDelayTime);
- VNSvOutPortB(dwIoBase + MAC_REG_TMCTL0, (TMCTL_TMD | TMCTL_TE));
-}
-
-/*
- * Description:
- * Micro Second One shot timer via MAC
- *
- * Parameters:
- * In:
- * dwIoBase - Base Address for MAC
- * uDelay - Delay time
- * Out:
- * none
- *
- * Return Value: none
- *
- */
void MACvOneShotTimer1MicroSec(void __iomem *dwIoBase, unsigned int uDelayTime)
{
VNSvOutPortB(dwIoBase + MAC_REG_TMCTL1, 0);
@@ -1271,102 +795,6 @@ void MACvSetMISCFifo(void __iomem *dwIoBase, unsigned short wOffset, unsigned lo
VNSvOutPortW(dwIoBase + MAC_REG_MISCFFCTL, MISCFFCTL_WRITE);
}
-bool MACbTxDMAOff(void __iomem *dwIoBase, unsigned int idx)
-{
- unsigned char byData;
- unsigned int ww = 0;
-
- if (idx == TYPE_TXDMA0) {
- VNSvOutPortB(dwIoBase + MAC_REG_TXDMACTL0+2, DMACTL_RUN);
- for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
- VNSvInPortB(dwIoBase + MAC_REG_TXDMACTL0, &byData);
- if (!(byData & DMACTL_RUN))
- break;
- }
- } else if (idx == TYPE_AC0DMA) {
- VNSvOutPortB(dwIoBase + MAC_REG_AC0DMACTL+2, DMACTL_RUN);
- for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
- VNSvInPortB(dwIoBase + MAC_REG_AC0DMACTL, &byData);
- if (!(byData & DMACTL_RUN))
- break;
- }
- }
- if (ww == W_MAX_TIMEOUT) {
- DBG_PORT80(0x29);
- pr_debug(" DBG_PORT80(0x29)\n");
- return false;
- }
- return true;
-}
-
-void MACvClearBusSusInd(void __iomem *dwIoBase)
-{
- unsigned long dwOrgValue;
- unsigned int ww;
- // check if BcnSusInd enabled
- VNSvInPortD(dwIoBase + MAC_REG_ENCFG , &dwOrgValue);
- if (!(dwOrgValue & EnCFG_BcnSusInd))
- return;
- //Set BcnSusClr
- dwOrgValue = dwOrgValue | EnCFG_BcnSusClr;
- VNSvOutPortD(dwIoBase + MAC_REG_ENCFG, dwOrgValue);
- for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
- VNSvInPortD(dwIoBase + MAC_REG_ENCFG , &dwOrgValue);
- if (!(dwOrgValue & EnCFG_BcnSusInd))
- break;
- }
- if (ww == W_MAX_TIMEOUT) {
- DBG_PORT80(0x33);
- pr_debug(" DBG_PORT80(0x33)\n");
- }
-}
-
-void MACvEnableBusSusEn(void __iomem *dwIoBase)
-{
- unsigned char byOrgValue;
- unsigned long dwOrgValue;
- unsigned int ww;
- // check if BcnSusInd enabled
- VNSvInPortB(dwIoBase + MAC_REG_CFG , &byOrgValue);
-
- //Set BcnSusEn
- byOrgValue = byOrgValue | CFG_BCNSUSEN;
- VNSvOutPortB(dwIoBase + MAC_REG_ENCFG, byOrgValue);
- for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
- VNSvInPortD(dwIoBase + MAC_REG_ENCFG , &dwOrgValue);
- if (dwOrgValue & EnCFG_BcnSusInd)
- break;
- }
- if (ww == W_MAX_TIMEOUT) {
- DBG_PORT80(0x34);
- pr_debug(" DBG_PORT80(0x34)\n");
- }
-}
-
-bool MACbFlushSYNCFifo(void __iomem *dwIoBase)
-{
- unsigned char byOrgValue;
- unsigned int ww;
- // Read MACCR
- VNSvInPortB(dwIoBase + MAC_REG_MACCR , &byOrgValue);
-
- // Set SYNCFLUSH
- byOrgValue = byOrgValue | MACCR_SYNCFLUSH;
- VNSvOutPortB(dwIoBase + MAC_REG_MACCR, byOrgValue);
-
- // Check if SyncFlushOK
- for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
- VNSvInPortB(dwIoBase + MAC_REG_MACCR , &byOrgValue);
- if (byOrgValue & MACCR_SYNCFLUSHOK)
- break;
- }
- if (ww == W_MAX_TIMEOUT) {
- DBG_PORT80(0x35);
- pr_debug(" DBG_PORT80(0x33)\n");
- }
- return true;
-}
-
bool MACbPSWakeup(void __iomem *dwIoBase)
{
unsigned char byOrgValue;
@@ -1484,211 +912,3 @@ void MACvDisableKeyEntry(void __iomem *dwIoBase, unsigned int uEntryIdx)
VNSvOutPortD(dwIoBase + MAC_REG_MISCFFDATA, 0);
VNSvOutPortW(dwIoBase + MAC_REG_MISCFFCTL, MISCFFCTL_WRITE);
}
-
-/*
- * Description:
- * Set the default Key (KeyEntry[10]) by MISCFIFO
- *
- * Parameters:
- * In:
- * dwIoBase - Base Address for MAC
- *
- * Out:
- * none
- *
- * Return Value: none
- *
- */
-
-void MACvSetDefaultKeyEntry(void __iomem *dwIoBase, unsigned int uKeyLen,
- unsigned int uKeyIdx, unsigned long *pdwKey, unsigned char byLocalID)
-{
- unsigned short wOffset;
- unsigned long dwData;
- int ii;
-
- if (byLocalID <= 1)
- return;
-
- pr_debug("MACvSetDefaultKeyEntry\n");
- wOffset = MISCFIFO_KEYETRY0;
- wOffset += (10 * MISCFIFO_KEYENTRYSIZE);
-
- wOffset++;
- wOffset++;
- wOffset += (uKeyIdx * 4);
- // always push 128 bits
- for (ii = 0; ii < 3; ii++) {
- pr_debug("(%d) wOffset: %d, Data: %lX\n",
- ii, wOffset+ii, *pdwKey);
- VNSvOutPortW(dwIoBase + MAC_REG_MISCFFNDEX, wOffset+ii);
- VNSvOutPortD(dwIoBase + MAC_REG_MISCFFDATA, *pdwKey++);
- VNSvOutPortW(dwIoBase + MAC_REG_MISCFFCTL, MISCFFCTL_WRITE);
- }
- dwData = *pdwKey;
- if (uKeyLen == WLAN_WEP104_KEYLEN)
- dwData |= 0x80000000;
-
- VNSvOutPortW(dwIoBase + MAC_REG_MISCFFNDEX, wOffset+3);
- VNSvOutPortD(dwIoBase + MAC_REG_MISCFFDATA, dwData);
- VNSvOutPortW(dwIoBase + MAC_REG_MISCFFCTL, MISCFFCTL_WRITE);
- pr_debug("End. wOffset: %d, Data: %lX\n", wOffset+3, dwData);
-}
-
-/*
- * Description:
- * Enable default Key (KeyEntry[10]) by MISCFIFO
- *
- * Parameters:
- * In:
- * dwIoBase - Base Address for MAC
- *
- * Out:
- * none
- *
- * Return Value: none
- *
- */
-/*
- void MACvEnableDefaultKey(void __iomem *dwIoBase, unsigned char byLocalID)
- {
- unsigned short wOffset;
- unsigned long dwData;
-
- if (byLocalID <= 1)
- return;
-
- wOffset = MISCFIFO_KEYETRY0;
- wOffset += (10 * MISCFIFO_KEYENTRYSIZE);
-
- dwData = 0xC0440000;
- VNSvOutPortW(dwIoBase + MAC_REG_MISCFFNDEX, wOffset);
- VNSvOutPortD(dwIoBase + MAC_REG_MISCFFDATA, dwData);
- VNSvOutPortW(dwIoBase + MAC_REG_MISCFFCTL, MISCFFCTL_WRITE);
- pr_debug("MACvEnableDefaultKey: wOffset: %d, Data: %lX\n", wOffset, dwData);
-
- }
-*/
-
-/*
- * Description:
- * Disable default Key (KeyEntry[10]) by MISCFIFO
- *
- * Parameters:
- * In:
- * dwIoBase - Base Address for MAC
- *
- * Out:
- * none
- *
- * Return Value: none
- *
- */
-void MACvDisableDefaultKey(void __iomem *dwIoBase)
-{
- unsigned short wOffset;
- unsigned long dwData;
-
- wOffset = MISCFIFO_KEYETRY0;
- wOffset += (10 * MISCFIFO_KEYENTRYSIZE);
-
- dwData = 0x0;
- VNSvOutPortW(dwIoBase + MAC_REG_MISCFFNDEX, wOffset);
- VNSvOutPortD(dwIoBase + MAC_REG_MISCFFDATA, dwData);
- VNSvOutPortW(dwIoBase + MAC_REG_MISCFFCTL, MISCFFCTL_WRITE);
- pr_debug("MACvDisableDefaultKey: wOffset: %d, Data: %lX\n",
- wOffset, dwData);
-}
-
-/*
- * Description:
- * Set the default TKIP Group Key (KeyEntry[10]) by MISCFIFO
- *
- * Parameters:
- * In:
- * dwIoBase - Base Address for MAC
- *
- * Out:
- * none
- *
- * Return Value: none
- *
- */
-void MACvSetDefaultTKIPKeyEntry(void __iomem *dwIoBase, unsigned int uKeyLen,
- unsigned int uKeyIdx, unsigned long *pdwKey, unsigned char byLocalID)
-{
- unsigned short wOffset;
- unsigned long dwData;
- int ii;
-
- if (byLocalID <= 1)
- return;
-
- pr_debug("MACvSetDefaultTKIPKeyEntry\n");
- wOffset = MISCFIFO_KEYETRY0;
- // Kyle test : change offset from 10 -> 0
- wOffset += (10 * MISCFIFO_KEYENTRYSIZE);
-
- dwData = 0xC0660000;
- VNSvOutPortW(dwIoBase + MAC_REG_MISCFFNDEX, wOffset);
- VNSvOutPortD(dwIoBase + MAC_REG_MISCFFDATA, dwData);
- VNSvOutPortW(dwIoBase + MAC_REG_MISCFFCTL, MISCFFCTL_WRITE);
- wOffset++;
-
- dwData = 0;
- VNSvOutPortW(dwIoBase + MAC_REG_MISCFFNDEX, wOffset);
- VNSvOutPortD(dwIoBase + MAC_REG_MISCFFDATA, dwData);
- VNSvOutPortW(dwIoBase + MAC_REG_MISCFFCTL, MISCFFCTL_WRITE);
- wOffset++;
-
- wOffset += (uKeyIdx * 4);
- pr_debug("1. wOffset: %d, Data: %lX, idx:%d\n",
- wOffset, *pdwKey, uKeyIdx);
- // always push 128 bits
- for (ii = 0; ii < 4; ii++) {
- pr_debug("2.(%d) wOffset: %d, Data: %lX\n",
- ii, wOffset+ii, *pdwKey);
- VNSvOutPortW(dwIoBase + MAC_REG_MISCFFNDEX, wOffset+ii);
- VNSvOutPortD(dwIoBase + MAC_REG_MISCFFDATA, *pdwKey++);
- VNSvOutPortW(dwIoBase + MAC_REG_MISCFFCTL, MISCFFCTL_WRITE);
- }
-}
-
-/*
- * Description:
- * Set the Key Control by MISCFIFO
- *
- * Parameters:
- * In:
- * dwIoBase - Base Address for MAC
- *
- * Out:
- * none
- *
- * Return Value: none
- *
- */
-
-void MACvSetDefaultKeyCtl(void __iomem *dwIoBase, unsigned short wKeyCtl, unsigned int uEntryIdx, unsigned char byLocalID)
-{
- unsigned short wOffset;
- unsigned long dwData;
-
- if (byLocalID <= 1)
- return;
-
- pr_debug("MACvSetKeyEntry\n");
- wOffset = MISCFIFO_KEYETRY0;
- wOffset += (uEntryIdx * MISCFIFO_KEYENTRYSIZE);
-
- dwData = 0;
- dwData |= wKeyCtl;
- dwData <<= 16;
- dwData |= 0xffff;
- pr_debug("1. wOffset: %d, Data: %lX, KeyCtl:%X\n",
- wOffset, dwData, wKeyCtl);
-
- VNSvOutPortW(dwIoBase + MAC_REG_MISCFFNDEX, wOffset);
- VNSvOutPortD(dwIoBase + MAC_REG_MISCFFDATA, dwData);
- VNSvOutPortW(dwIoBase + MAC_REG_MISCFFCTL, MISCFFCTL_WRITE);
-}
diff --git a/drivers/staging/vt6655/mac.h b/drivers/staging/vt6655/mac.h
index 0bf93759b6a..e1e7e10435f 100644
--- a/drivers/staging/vt6655/mac.h
+++ b/drivers/staging/vt6655/mac.h
@@ -34,7 +34,6 @@
#ifndef __MAC_H__
#define __MAC_H__
-#include "ttype.h"
#include "tmacro.h"
#include "upc.h"
@@ -575,17 +574,6 @@
#define MAC_LB_INTERNAL 0x01 //
#define MAC_LB_NONE 0x00 //
-// Ethernet address filter type
-#define PKT_TYPE_NONE 0x00 // turn off receiver
-#define PKT_TYPE_ALL_MULTICAST 0x80
-#define PKT_TYPE_PROMISCUOUS 0x40
-#define PKT_TYPE_DIRECTED 0x20 // obsolete, directed address is always accepted
-#define PKT_TYPE_BROADCAST 0x10
-#define PKT_TYPE_MULTICAST 0x08
-#define PKT_TYPE_ERROR_WPA 0x04
-#define PKT_TYPE_ERROR_CRC 0x02
-#define PKT_TYPE_BSSID 0x01
-
#define Default_BI 0x200
// MiscFIFO Offset
@@ -965,48 +953,20 @@ do { \
#define MACvSetRFLE_LatchBase(dwIoBase) \
MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_RFLEOPT)
-/*--------------------- Export Classes ----------------------------*/
-
-/*--------------------- Export Variables --------------------------*/
-
-/*--------------------- Export Functions --------------------------*/
-
-extern unsigned short TxRate_iwconfig;//2008-5-8 <add> by chester
-void MACvReadAllRegs(void __iomem *dwIoBase, unsigned char *pbyMacRegs);
-
bool MACbIsRegBitsOn(void __iomem *dwIoBase, unsigned char byRegOfs, unsigned char byTestBits);
bool MACbIsRegBitsOff(void __iomem *dwIoBase, unsigned char byRegOfs, unsigned char byTestBits);
bool MACbIsIntDisable(void __iomem *dwIoBase);
-unsigned char MACbyReadMultiAddr(void __iomem *dwIoBase, unsigned int uByteIdx);
-void MACvWriteMultiAddr(void __iomem *dwIoBase, unsigned int uByteIdx, unsigned char byData);
-void MACvSetMultiAddrByHash(void __iomem *dwIoBase, unsigned char byHashIdx);
-void MACvResetMultiAddrByHash(void __iomem *dwIoBase, unsigned char byHashIdx);
-
-void MACvSetRxThreshold(void __iomem *dwIoBase, unsigned char byThreshold);
-void MACvGetRxThreshold(void __iomem *dwIoBase, unsigned char *pbyThreshold);
-
-void MACvSetTxThreshold(void __iomem *dwIoBase, unsigned char byThreshold);
-void MACvGetTxThreshold(void __iomem *dwIoBase, unsigned char *pbyThreshold);
-
-void MACvSetDmaLength(void __iomem *dwIoBase, unsigned char byDmaLength);
-void MACvGetDmaLength(void __iomem *dwIoBase, unsigned char *pbyDmaLength);
-
void MACvSetShortRetryLimit(void __iomem *dwIoBase, unsigned char byRetryLimit);
-void MACvGetShortRetryLimit(void __iomem *dwIoBase, unsigned char *pbyRetryLimit);
void MACvSetLongRetryLimit(void __iomem *dwIoBase, unsigned char byRetryLimit);
void MACvGetLongRetryLimit(void __iomem *dwIoBase, unsigned char *pbyRetryLimit);
void MACvSetLoopbackMode(void __iomem *dwIoBase, unsigned char byLoopbackMode);
-bool MACbIsInLoopbackMode(void __iomem *dwIoBase);
-
-void MACvSetPacketFilter(void __iomem *dwIoBase, unsigned short wFilterType);
void MACvSaveContext(void __iomem *dwIoBase, unsigned char *pbyCxtBuf);
void MACvRestoreContext(void __iomem *dwIoBase, unsigned char *pbyCxtBuf);
-bool MACbCompareContext(void __iomem *dwIoBase, unsigned char *pbyCxtBuf);
bool MACbSoftwareReset(void __iomem *dwIoBase);
bool MACbSafeSoftwareReset(void __iomem *dwIoBase);
@@ -1023,27 +983,14 @@ void MACvSetCurrAC0DescAddrEx(void __iomem *dwIoBase, unsigned long dwCurrDescAd
void MACvSetCurrSyncDescAddrEx(void __iomem *dwIoBase, unsigned long dwCurrDescAddr);
void MACvSetCurrATIMDescAddrEx(void __iomem *dwIoBase, unsigned long dwCurrDescAddr);
void MACvTimer0MicroSDelay(void __iomem *dwIoBase, unsigned int uDelay);
-void MACvOneShotTimer0MicroSec(void __iomem *dwIoBase, unsigned int uDelayTime);
void MACvOneShotTimer1MicroSec(void __iomem *dwIoBase, unsigned int uDelayTime);
void MACvSetMISCFifo(void __iomem *dwIoBase, unsigned short wOffset, unsigned long dwData);
-bool MACbTxDMAOff(void __iomem *dwIoBase, unsigned int idx);
-
-void MACvClearBusSusInd(void __iomem *dwIoBase);
-void MACvEnableBusSusEn(void __iomem *dwIoBase);
-
-bool MACbFlushSYNCFifo(void __iomem *dwIoBase);
bool MACbPSWakeup(void __iomem *dwIoBase);
void MACvSetKeyEntry(void __iomem *dwIoBase, unsigned short wKeyCtl, unsigned int uEntryIdx,
unsigned int uKeyIdx, unsigned char *pbyAddr, u32 *pdwKey, unsigned char byLocalID);
void MACvDisableKeyEntry(void __iomem *dwIoBase, unsigned int uEntryIdx);
-void MACvSetDefaultKeyEntry(void __iomem *dwIoBase, unsigned int uKeyLen,
- unsigned int uKeyIdx, unsigned long *pdwKey, unsigned char byLocalID);
-void MACvDisableDefaultKey(void __iomem *dwIoBase);
-void MACvSetDefaultTKIPKeyEntry(void __iomem *dwIoBase, unsigned int uKeyLen,
- unsigned int uKeyIdx, unsigned long *pdwKey, unsigned char byLocalID);
-void MACvSetDefaultKeyCtl(void __iomem *dwIoBase, unsigned short wKeyCtl, unsigned int uEntryIdx, unsigned char byLocalID);
#endif // __MAC_H__
diff --git a/drivers/staging/vt6655/mib.c b/drivers/staging/vt6655/mib.c
index 111c0187708..d2f351d19ff 100644
--- a/drivers/staging/vt6655/mib.c
+++ b/drivers/staging/vt6655/mib.c
@@ -25,24 +25,15 @@
* Date: May 21, 1996
*
* Functions:
- * STAvClearAllCounter - Clear All MIB Counter
* STAvUpdateIstStatCounter - Update ISR statistic counter
- * STAvUpdateRDStatCounter - Update Rx statistic counter
- * STAvUpdateRDStatCounterEx - Update Rx statistic counter and copy rcv data
- * STAvUpdateTDStatCounter - Update Tx statistic counter
- * STAvUpdateTDStatCounterEx - Update Tx statistic counter and copy tx data
* STAvUpdate802_11Counter - Update 802.11 mib counter
*
* Revision History:
*
*/
-#include "upc.h"
#include "mac.h"
-#include "tether.h"
#include "mib.h"
-#include "wctl.h"
-#include "baseband.h"
/*--------------------- Static Classes ----------------------------*/
@@ -55,24 +46,6 @@
/*--------------------- Export Functions --------------------------*/
/*
- * Description: Clear All Statistic Counter
- *
- * Parameters:
- * In:
- * pStatistic - Pointer to Statistic Counter Data Structure
- * Out:
- * none
- *
- * Return Value: none
- *
- */
-void STAvClearAllCounter(PSStatCounter pStatistic)
-{
- // set memory to zero
- memset(pStatistic, 0, sizeof(SStatCounter));
-}
-
-/*
* Description: Update Isr Statistic Counter
*
* Parameters:
@@ -139,373 +112,6 @@ void STAvUpdateIsrStatCounter(PSStatCounter pStatistic, unsigned long dwIsr)
}
/*
- * Description: Update Rx Statistic Counter
- *
- * Parameters:
- * In:
- * pStatistic - Pointer to Statistic Counter Data Structure
- * byRSR - Rx Status
- * byNewRSR - Rx Status
- * pbyBuffer - Rx Buffer
- * cbFrameLength - Rx Length
- * Out:
- * none
- *
- * Return Value: none
- *
- */
-void STAvUpdateRDStatCounter(PSStatCounter pStatistic,
- unsigned char byRSR, unsigned char byNewRSR, unsigned char byRxRate,
- unsigned char *pbyBuffer, unsigned int cbFrameLength)
-{
- //need change
- PS802_11Header pHeader = (PS802_11Header)pbyBuffer;
-
- if (byRSR & RSR_ADDROK)
- pStatistic->dwRsrADDROk++;
- if (byRSR & RSR_CRCOK) {
- pStatistic->dwRsrCRCOk++;
-
- pStatistic->ullRsrOK++;
-
- if (cbFrameLength >= ETH_ALEN) {
- // update counters in case of successful transmit
- if (byRSR & RSR_ADDRBROAD) {
- pStatistic->ullRxBroadcastFrames++;
- pStatistic->ullRxBroadcastBytes += (unsigned long long) cbFrameLength;
- } else if (byRSR & RSR_ADDRMULTI) {
- pStatistic->ullRxMulticastFrames++;
- pStatistic->ullRxMulticastBytes += (unsigned long long) cbFrameLength;
- } else {
- pStatistic->ullRxDirectedFrames++;
- pStatistic->ullRxDirectedBytes += (unsigned long long) cbFrameLength;
- }
- }
- }
-
- if (byRxRate == 22) {
- pStatistic->CustomStat.ullRsr11M++;
- if (byRSR & RSR_CRCOK)
- pStatistic->CustomStat.ullRsr11MCRCOk++;
-
- pr_debug("11M: ALL[%d], OK[%d]:[%02x]\n",
- (int)pStatistic->CustomStat.ullRsr11M,
- (int)pStatistic->CustomStat.ullRsr11MCRCOk, byRSR);
- } else if (byRxRate == 11) {
- pStatistic->CustomStat.ullRsr5M++;
- if (byRSR & RSR_CRCOK)
- pStatistic->CustomStat.ullRsr5MCRCOk++;
-
- pr_debug(" 5M: ALL[%d], OK[%d]:[%02x]\n",
- (int)pStatistic->CustomStat.ullRsr5M,
- (int)pStatistic->CustomStat.ullRsr5MCRCOk, byRSR);
- } else if (byRxRate == 4) {
- pStatistic->CustomStat.ullRsr2M++;
- if (byRSR & RSR_CRCOK)
- pStatistic->CustomStat.ullRsr2MCRCOk++;
-
- pr_debug(" 2M: ALL[%d], OK[%d]:[%02x]\n",
- (int)pStatistic->CustomStat.ullRsr2M,
- (int)pStatistic->CustomStat.ullRsr2MCRCOk, byRSR);
- } else if (byRxRate == 2) {
- pStatistic->CustomStat.ullRsr1M++;
- if (byRSR & RSR_CRCOK)
- pStatistic->CustomStat.ullRsr1MCRCOk++;
-
- pr_debug(" 1M: ALL[%d], OK[%d]:[%02x]\n",
- (int)pStatistic->CustomStat.ullRsr1M,
- (int)pStatistic->CustomStat.ullRsr1MCRCOk, byRSR);
- } else if (byRxRate == 12) {
- pStatistic->CustomStat.ullRsr6M++;
- if (byRSR & RSR_CRCOK)
- pStatistic->CustomStat.ullRsr6MCRCOk++;
-
- pr_debug(" 6M: ALL[%d], OK[%d]\n",
- (int)pStatistic->CustomStat.ullRsr6M,
- (int)pStatistic->CustomStat.ullRsr6MCRCOk);
- } else if (byRxRate == 18) {
- pStatistic->CustomStat.ullRsr9M++;
- if (byRSR & RSR_CRCOK)
- pStatistic->CustomStat.ullRsr9MCRCOk++;
-
- pr_debug(" 9M: ALL[%d], OK[%d]\n",
- (int)pStatistic->CustomStat.ullRsr9M,
- (int)pStatistic->CustomStat.ullRsr9MCRCOk);
- } else if (byRxRate == 24) {
- pStatistic->CustomStat.ullRsr12M++;
- if (byRSR & RSR_CRCOK)
- pStatistic->CustomStat.ullRsr12MCRCOk++;
-
- pr_debug("12M: ALL[%d], OK[%d]\n",
- (int)pStatistic->CustomStat.ullRsr12M,
- (int)pStatistic->CustomStat.ullRsr12MCRCOk);
- } else if (byRxRate == 36) {
- pStatistic->CustomStat.ullRsr18M++;
- if (byRSR & RSR_CRCOK)
- pStatistic->CustomStat.ullRsr18MCRCOk++;
-
- pr_debug("18M: ALL[%d], OK[%d]\n",
- (int)pStatistic->CustomStat.ullRsr18M,
- (int)pStatistic->CustomStat.ullRsr18MCRCOk);
- } else if (byRxRate == 48) {
- pStatistic->CustomStat.ullRsr24M++;
- if (byRSR & RSR_CRCOK)
- pStatistic->CustomStat.ullRsr24MCRCOk++;
-
- pr_debug("24M: ALL[%d], OK[%d]\n",
- (int)pStatistic->CustomStat.ullRsr24M,
- (int)pStatistic->CustomStat.ullRsr24MCRCOk);
- } else if (byRxRate == 72) {
- pStatistic->CustomStat.ullRsr36M++;
- if (byRSR & RSR_CRCOK)
- pStatistic->CustomStat.ullRsr36MCRCOk++;
-
- pr_debug("36M: ALL[%d], OK[%d]\n",
- (int)pStatistic->CustomStat.ullRsr36M,
- (int)pStatistic->CustomStat.ullRsr36MCRCOk);
- } else if (byRxRate == 96) {
- pStatistic->CustomStat.ullRsr48M++;
- if (byRSR & RSR_CRCOK)
- pStatistic->CustomStat.ullRsr48MCRCOk++;
-
- pr_debug("48M: ALL[%d], OK[%d]\n",
- (int)pStatistic->CustomStat.ullRsr48M,
- (int)pStatistic->CustomStat.ullRsr48MCRCOk);
- } else if (byRxRate == 108) {
- pStatistic->CustomStat.ullRsr54M++;
- if (byRSR & RSR_CRCOK)
- pStatistic->CustomStat.ullRsr54MCRCOk++;
-
- pr_debug("54M: ALL[%d], OK[%d]\n",
- (int)pStatistic->CustomStat.ullRsr54M,
- (int)pStatistic->CustomStat.ullRsr54MCRCOk);
- } else {
- pr_debug("Unknown: Total[%d], CRCOK[%d]\n",
- (int)pStatistic->dwRsrRxPacket+1,
- (int)pStatistic->dwRsrCRCOk);
- }
-
- if (byRSR & RSR_BSSIDOK)
- pStatistic->dwRsrBSSIDOk++;
-
- if (byRSR & RSR_BCNSSIDOK)
- pStatistic->dwRsrBCNSSIDOk++;
- if (byRSR & RSR_IVLDLEN) //invalid len (> 2312 byte)
- pStatistic->dwRsrLENErr++;
- if (byRSR & RSR_IVLDTYP) //invalid packet type
- pStatistic->dwRsrTYPErr++;
- if (byRSR & (RSR_IVLDTYP | RSR_IVLDLEN))
- pStatistic->dwRsrErr++;
-
- if (byNewRSR & NEWRSR_DECRYPTOK)
- pStatistic->dwNewRsrDECRYPTOK++;
- if (byNewRSR & NEWRSR_CFPIND)
- pStatistic->dwNewRsrCFP++;
- if (byNewRSR & NEWRSR_HWUTSF)
- pStatistic->dwNewRsrUTSF++;
- if (byNewRSR & NEWRSR_BCNHITAID)
- pStatistic->dwNewRsrHITAID++;
- if (byNewRSR & NEWRSR_BCNHITAID0)
- pStatistic->dwNewRsrHITAID0++;
-
- // increase rx packet count
- pStatistic->dwRsrRxPacket++;
- pStatistic->dwRsrRxOctet += cbFrameLength;
-
- if (IS_TYPE_DATA(pbyBuffer))
- pStatistic->dwRsrRxData++;
- else if (IS_TYPE_MGMT(pbyBuffer))
- pStatistic->dwRsrRxManage++;
- else if (IS_TYPE_CONTROL(pbyBuffer))
- pStatistic->dwRsrRxControl++;
-
- if (byRSR & RSR_ADDRBROAD)
- pStatistic->dwRsrBroadcast++;
- else if (byRSR & RSR_ADDRMULTI)
- pStatistic->dwRsrMulticast++;
- else
- pStatistic->dwRsrDirected++;
-
- if (WLAN_GET_FC_MOREFRAG(pHeader->wFrameCtl))
- pStatistic->dwRsrRxFragment++;
-
- if (cbFrameLength < ETH_ZLEN + 4)
- pStatistic->dwRsrRunt++;
- else if (cbFrameLength == ETH_ZLEN + 4)
- pStatistic->dwRsrRxFrmLen64++;
- else if ((65 <= cbFrameLength) && (cbFrameLength <= 127))
- pStatistic->dwRsrRxFrmLen65_127++;
- else if ((128 <= cbFrameLength) && (cbFrameLength <= 255))
- pStatistic->dwRsrRxFrmLen128_255++;
- else if ((256 <= cbFrameLength) && (cbFrameLength <= 511))
- pStatistic->dwRsrRxFrmLen256_511++;
- else if ((512 <= cbFrameLength) && (cbFrameLength <= 1023))
- pStatistic->dwRsrRxFrmLen512_1023++;
- else if ((1024 <= cbFrameLength) && (cbFrameLength <= ETH_FRAME_LEN + 4))
- pStatistic->dwRsrRxFrmLen1024_1518++;
- else if (cbFrameLength > ETH_FRAME_LEN + 4)
- pStatistic->dwRsrLong++;
-}
-
-/*
- * Description: Update Rx Statistic Counter and copy Rx buffer
- *
- * Parameters:
- * In:
- * pStatistic - Pointer to Statistic Counter Data Structure
- * byRSR - Rx Status
- * byNewRSR - Rx Status
- * pbyBuffer - Rx Buffer
- * cbFrameLength - Rx Length
- * Out:
- * none
- *
- * Return Value: none
- *
- */
-
-void
-STAvUpdateRDStatCounterEx(
- PSStatCounter pStatistic,
- unsigned char byRSR,
- unsigned char byNewRSR,
- unsigned char byRxRate,
- unsigned char *pbyBuffer,
- unsigned int cbFrameLength
-)
-{
- STAvUpdateRDStatCounter(
- pStatistic,
- byRSR,
- byNewRSR,
- byRxRate,
- pbyBuffer,
- cbFrameLength
-);
-
- // rx length
- pStatistic->dwCntRxFrmLength = cbFrameLength;
- // rx pattern, we just see 10 bytes for sample
- memcpy(pStatistic->abyCntRxPattern, (unsigned char *)pbyBuffer, 10);
-}
-
-/*
- * Description: Update Tx Statistic Counter
- *
- * Parameters:
- * In:
- * pStatistic - Pointer to Statistic Counter Data Structure
- * byTSR0 - Tx Status
- * byTSR1 - Tx Status
- * pbyBuffer - Tx Buffer
- * cbFrameLength - Tx Length
- * uIdx - Index of Tx DMA
- * Out:
- * none
- *
- * Return Value: none
- *
- */
-void
-STAvUpdateTDStatCounter(
- PSStatCounter pStatistic,
- unsigned char byTSR0,
- unsigned char byTSR1,
- unsigned char *pbyBuffer,
- unsigned int cbFrameLength,
- unsigned int uIdx
-)
-{
- PWLAN_80211HDR_A4 pHeader;
- unsigned char *pbyDestAddr;
- unsigned char byTSR0_NCR = byTSR0 & TSR0_NCR;
-
- pHeader = (PWLAN_80211HDR_A4) pbyBuffer;
- if (WLAN_GET_FC_TODS(pHeader->wFrameCtl) == 0)
- pbyDestAddr = &(pHeader->abyAddr1[0]);
- else
- pbyDestAddr = &(pHeader->abyAddr3[0]);
-
- // increase tx packet count
- pStatistic->dwTsrTxPacket[uIdx]++;
- pStatistic->dwTsrTxOctet[uIdx] += cbFrameLength;
-
- if (byTSR0_NCR != 0) {
- pStatistic->dwTsrRetry[uIdx]++;
- pStatistic->dwTsrTotalRetry[uIdx] += byTSR0_NCR;
-
- if (byTSR0_NCR == 1)
- pStatistic->dwTsrOnceRetry[uIdx]++;
- else
- pStatistic->dwTsrMoreThanOnceRetry[uIdx]++;
- }
-
- if ((byTSR1&(TSR1_TERR|TSR1_RETRYTMO|TSR1_TMO|ACK_DATA)) == 0) {
- pStatistic->ullTsrOK[uIdx]++;
- pStatistic->CustomStat.ullTsrAllOK =
- (pStatistic->ullTsrOK[TYPE_AC0DMA] + pStatistic->ullTsrOK[TYPE_TXDMA0]);
- // update counters in case that successful transmit
- if (is_broadcast_ether_addr(pbyDestAddr)) {
- pStatistic->ullTxBroadcastFrames[uIdx]++;
- pStatistic->ullTxBroadcastBytes[uIdx] += (unsigned long long) cbFrameLength;
- } else if (is_multicast_ether_addr(pbyDestAddr)) {
- pStatistic->ullTxMulticastFrames[uIdx]++;
- pStatistic->ullTxMulticastBytes[uIdx] += (unsigned long long) cbFrameLength;
- } else {
- pStatistic->ullTxDirectedFrames[uIdx]++;
- pStatistic->ullTxDirectedBytes[uIdx] += (unsigned long long) cbFrameLength;
- }
- } else {
- if (byTSR1 & TSR1_TERR)
- pStatistic->dwTsrErr[uIdx]++;
- if (byTSR1 & TSR1_RETRYTMO)
- pStatistic->dwTsrRetryTimeout[uIdx]++;
- if (byTSR1 & TSR1_TMO)
- pStatistic->dwTsrTransmitTimeout[uIdx]++;
- if (byTSR1 & ACK_DATA)
- pStatistic->dwTsrACKData[uIdx]++;
- }
-
- if (is_broadcast_ether_addr(pbyDestAddr))
- pStatistic->dwTsrBroadcast[uIdx]++;
- else if (is_multicast_ether_addr(pbyDestAddr))
- pStatistic->dwTsrMulticast[uIdx]++;
- else
- pStatistic->dwTsrDirected[uIdx]++;
-}
-
-/*
- * Description: Update Tx Statistic Counter and copy Tx buffer
- *
- * Parameters:
- * In:
- * pStatistic - Pointer to Statistic Counter Data Structure
- * pbyBuffer - Tx Buffer
- * cbFrameLength - Tx Length
- * Out:
- * none
- *
- * Return Value: none
- *
- */
-void
-STAvUpdateTDStatCounterEx(
- PSStatCounter pStatistic,
- unsigned char *pbyBuffer,
- unsigned long cbFrameLength
-)
-{
- unsigned int uPktLength;
-
- uPktLength = (unsigned int)cbFrameLength;
-
- // tx length
- pStatistic->dwCntTxBufLength = uPktLength;
- // tx pattern, we just see 16 bytes for sample
- memcpy(pStatistic->abyCntTxPattern, pbyBuffer, 16);
-}
-
-/*
* Description: Update 802.11 mib counter
*
* Parameters:
@@ -526,37 +132,8 @@ STAvUpdate802_11Counter(
unsigned long dwCounter
)
{
- p802_11Counter->MulticastTransmittedFrameCount = (unsigned long long) (pStatistic->dwTsrBroadcast[TYPE_AC0DMA] +
- pStatistic->dwTsrBroadcast[TYPE_TXDMA0] +
- pStatistic->dwTsrMulticast[TYPE_AC0DMA] +
- pStatistic->dwTsrMulticast[TYPE_TXDMA0]);
- p802_11Counter->FailedCount = (unsigned long long) (pStatistic->dwTsrErr[TYPE_AC0DMA] + pStatistic->dwTsrErr[TYPE_TXDMA0]);
- p802_11Counter->RetryCount = (unsigned long long) (pStatistic->dwTsrRetry[TYPE_AC0DMA] + pStatistic->dwTsrRetry[TYPE_TXDMA0]);
- p802_11Counter->MultipleRetryCount = (unsigned long long) (pStatistic->dwTsrMoreThanOnceRetry[TYPE_AC0DMA] +
- pStatistic->dwTsrMoreThanOnceRetry[TYPE_TXDMA0]);
p802_11Counter->RTSSuccessCount += (unsigned long long) (dwCounter & 0x000000ff);
p802_11Counter->RTSFailureCount += (unsigned long long) ((dwCounter & 0x0000ff00) >> 8);
p802_11Counter->ACKFailureCount += (unsigned long long) ((dwCounter & 0x00ff0000) >> 16);
p802_11Counter->FCSErrorCount += (unsigned long long) ((dwCounter & 0xff000000) >> 24);
- p802_11Counter->MulticastReceivedFrameCount = (unsigned long long) (pStatistic->dwRsrBroadcast +
- pStatistic->dwRsrMulticast);
-}
-
-/*
- * Description: Clear 802.11 mib counter
- *
- * Parameters:
- * In:
- * p802_11Counter - Pointer to 802.11 mib counter
- * Out:
- * none
- *
- * Return Value: none
- *
- */
-void
-STAvClear802_11Counter(PSDot11Counters p802_11Counter)
-{
- // set memory to zero
- memset(p802_11Counter, 0, sizeof(SDot11Counters));
}
diff --git a/drivers/staging/vt6655/mib.h b/drivers/staging/vt6655/mib.h
index 732bddaf5b9..5cb59b8a1c7 100644
--- a/drivers/staging/vt6655/mib.h
+++ b/drivers/staging/vt6655/mib.h
@@ -29,8 +29,6 @@
#ifndef __MIB_H__
#define __MIB_H__
-#include "ttype.h"
-#include "tether.h"
#include "desc.h"
//
@@ -38,136 +36,16 @@
//
typedef struct tagSDot11Counters {
- unsigned long Length;
- unsigned long long TransmittedFragmentCount;
- unsigned long long MulticastTransmittedFrameCount;
- unsigned long long FailedCount;
- unsigned long long RetryCount;
- unsigned long long MultipleRetryCount;
unsigned long long RTSSuccessCount;
unsigned long long RTSFailureCount;
unsigned long long ACKFailureCount;
- unsigned long long FrameDuplicateCount;
- unsigned long long ReceivedFragmentCount;
- unsigned long long MulticastReceivedFrameCount;
unsigned long long FCSErrorCount;
- unsigned long long TKIPLocalMICFailures;
- unsigned long long TKIPRemoteMICFailures;
- unsigned long long TKIPICVErrors;
- unsigned long long TKIPCounterMeasuresInvoked;
- unsigned long long TKIPReplays;
- unsigned long long CCMPFormatErrors;
- unsigned long long CCMPReplays;
- unsigned long long CCMPDecryptErrors;
- unsigned long long FourWayHandshakeFailures;
} SDot11Counters, *PSDot11Counters;
//
-// MIB2 counter
-//
-typedef struct tagSMib2Counter {
- long ifIndex;
- char ifDescr[256];
- long ifType;
- long ifMtu;
- unsigned long ifSpeed;
- unsigned char ifPhysAddress[ETH_ALEN];
- long ifAdminStatus;
- long ifOperStatus;
- unsigned long ifLastChange;
- unsigned long ifInOctets;
- unsigned long ifInUcastPkts;
- unsigned long ifInNUcastPkts;
- unsigned long ifInDiscards;
- unsigned long ifInErrors;
- unsigned long ifInUnknownProtos;
- unsigned long ifOutOctets;
- unsigned long ifOutUcastPkts;
- unsigned long ifOutNUcastPkts;
- unsigned long ifOutDiscards;
- unsigned long ifOutErrors;
- unsigned long ifOutQLen;
- unsigned long ifSpecific;
-} SMib2Counter, *PSMib2Counter;
-
-// Value in the ifType entry
-#define WIRELESSLANIEEE80211b 6
-
-// Value in the ifAdminStatus/ifOperStatus entry
-#define UP 1
-#define DOWN 2
-#define TESTING 3
-
-//
-// RMON counter
-//
-typedef struct tagSRmonCounter {
- long etherStatsIndex;
- unsigned long etherStatsDataSource;
- unsigned long etherStatsDropEvents;
- unsigned long etherStatsOctets;
- unsigned long etherStatsPkts;
- unsigned long etherStatsBroadcastPkts;
- unsigned long etherStatsMulticastPkts;
- unsigned long etherStatsCRCAlignErrors;
- unsigned long etherStatsUndersizePkts;
- unsigned long etherStatsOversizePkts;
- unsigned long etherStatsFragments;
- unsigned long etherStatsJabbers;
- unsigned long etherStatsCollisions;
- unsigned long etherStatsPkt64Octets;
- unsigned long etherStatsPkt65to127Octets;
- unsigned long etherStatsPkt128to255Octets;
- unsigned long etherStatsPkt256to511Octets;
- unsigned long etherStatsPkt512to1023Octets;
- unsigned long etherStatsPkt1024to1518Octets;
- unsigned long etherStatsOwners;
- unsigned long etherStatsStatus;
-} SRmonCounter, *PSRmonCounter;
-
-//
-// Custom counter
-//
-typedef struct tagSCustomCounters {
- unsigned long Length;
-
- unsigned long long ullTsrAllOK;
-
- unsigned long long ullRsr11M;
- unsigned long long ullRsr5M;
- unsigned long long ullRsr2M;
- unsigned long long ullRsr1M;
-
- unsigned long long ullRsr11MCRCOk;
- unsigned long long ullRsr5MCRCOk;
- unsigned long long ullRsr2MCRCOk;
- unsigned long long ullRsr1MCRCOk;
-
- unsigned long long ullRsr54M;
- unsigned long long ullRsr48M;
- unsigned long long ullRsr36M;
- unsigned long long ullRsr24M;
- unsigned long long ullRsr18M;
- unsigned long long ullRsr12M;
- unsigned long long ullRsr9M;
- unsigned long long ullRsr6M;
-
- unsigned long long ullRsr54MCRCOk;
- unsigned long long ullRsr48MCRCOk;
- unsigned long long ullRsr36MCRCOk;
- unsigned long long ullRsr24MCRCOk;
- unsigned long long ullRsr18MCRCOk;
- unsigned long long ullRsr12MCRCOk;
- unsigned long long ullRsr9MCRCOk;
- unsigned long long ullRsr6MCRCOk;
-} SCustomCounters, *PSCustomCounters;
-
-//
// Custom counter
//
typedef struct tagSISRCounters {
- unsigned long Length;
-
unsigned long dwIsrTx0OK;
unsigned long dwIsrAC0TxOK;
unsigned long dwIsrBeaconTxOK;
@@ -183,161 +61,22 @@ typedef struct tagSISRCounters {
unsigned long dwIsrUnknown;
unsigned long dwIsrRx1OK;
- unsigned long dwIsrATIMTxOK;
- unsigned long dwIsrSYNCTxOK;
- unsigned long dwIsrCFPEnd;
- unsigned long dwIsrATIMEnd;
- unsigned long dwIsrSYNCFlushOK;
unsigned long dwIsrSTIMER1Int;
} SISRCounters, *PSISRCounters;
-// Value in the etherStatsStatus entry
-#define VALID 1
-#define CREATE_REQUEST 2
-#define UNDER_CREATION 3
-#define INVALID 4
-
//
// statistic counter
//
typedef struct tagSStatCounter {
- // RSR status count
- //
- unsigned long dwRsrFrmAlgnErr;
- unsigned long dwRsrErr;
- unsigned long dwRsrCRCErr;
- unsigned long dwRsrCRCOk;
- unsigned long dwRsrBSSIDOk;
- unsigned long dwRsrADDROk;
- unsigned long dwRsrBCNSSIDOk;
- unsigned long dwRsrLENErr;
- unsigned long dwRsrTYPErr;
-
- unsigned long dwNewRsrDECRYPTOK;
- unsigned long dwNewRsrCFP;
- unsigned long dwNewRsrUTSF;
- unsigned long dwNewRsrHITAID;
- unsigned long dwNewRsrHITAID0;
-
- unsigned long dwRsrLong;
- unsigned long dwRsrRunt;
-
- unsigned long dwRsrRxControl;
- unsigned long dwRsrRxData;
- unsigned long dwRsrRxManage;
-
- unsigned long dwRsrRxPacket;
- unsigned long dwRsrRxOctet;
- unsigned long dwRsrBroadcast;
- unsigned long dwRsrMulticast;
- unsigned long dwRsrDirected;
- // 64-bit OID
- unsigned long long ullRsrOK;
-
- // for some optional OIDs (64 bits) and DMI support
- unsigned long long ullRxBroadcastBytes;
- unsigned long long ullRxMulticastBytes;
- unsigned long long ullRxDirectedBytes;
- unsigned long long ullRxBroadcastFrames;
- unsigned long long ullRxMulticastFrames;
- unsigned long long ullRxDirectedFrames;
-
- unsigned long dwRsrRxFragment;
- unsigned long dwRsrRxFrmLen64;
- unsigned long dwRsrRxFrmLen65_127;
- unsigned long dwRsrRxFrmLen128_255;
- unsigned long dwRsrRxFrmLen256_511;
- unsigned long dwRsrRxFrmLen512_1023;
- unsigned long dwRsrRxFrmLen1024_1518;
-
- // TSR status count
- //
- unsigned long dwTsrTotalRetry[TYPE_MAXTD]; // total collision retry count
- unsigned long dwTsrOnceRetry[TYPE_MAXTD]; // this packet only occur one collision
- unsigned long dwTsrMoreThanOnceRetry[TYPE_MAXTD]; // this packet occur more than one collision
- unsigned long dwTsrRetry[TYPE_MAXTD]; // this packet has ever occur collision,
- // that is (dwTsrOnceCollision0 + dwTsrMoreThanOnceCollision0)
- unsigned long dwTsrACKData[TYPE_MAXTD];
- unsigned long dwTsrErr[TYPE_MAXTD];
- unsigned long dwAllTsrOK[TYPE_MAXTD];
- unsigned long dwTsrRetryTimeout[TYPE_MAXTD];
- unsigned long dwTsrTransmitTimeout[TYPE_MAXTD];
-
- unsigned long dwTsrTxPacket[TYPE_MAXTD];
- unsigned long dwTsrTxOctet[TYPE_MAXTD];
- unsigned long dwTsrBroadcast[TYPE_MAXTD];
- unsigned long dwTsrMulticast[TYPE_MAXTD];
- unsigned long dwTsrDirected[TYPE_MAXTD];
-
- // RD/TD count
- unsigned long dwCntRxFrmLength;
- unsigned long dwCntTxBufLength;
-
- unsigned char abyCntRxPattern[16];
- unsigned char abyCntTxPattern[16];
-
- // Software check....
- unsigned long dwCntRxDataErr; // rx buffer data software compare CRC err count
- unsigned long dwCntDecryptErr; // rx buffer data software compare CRC err count
- unsigned long dwCntRxICVErr; // rx buffer data software compare CRC err count
- unsigned int idxRxErrorDesc[TYPE_MAXRD]; // index for rx data error RD
-
- // 64-bit OID
- unsigned long long ullTsrOK[TYPE_MAXTD];
-
- // for some optional OIDs (64 bits) and DMI support
- unsigned long long ullTxBroadcastFrames[TYPE_MAXTD];
- unsigned long long ullTxMulticastFrames[TYPE_MAXTD];
- unsigned long long ullTxDirectedFrames[TYPE_MAXTD];
- unsigned long long ullTxBroadcastBytes[TYPE_MAXTD];
- unsigned long long ullTxMulticastBytes[TYPE_MAXTD];
- unsigned long long ullTxDirectedBytes[TYPE_MAXTD];
-
SISRCounters ISRStat;
-
- SCustomCounters CustomStat;
-
-#ifdef Calcu_LinkQual
- //Tx count:
- unsigned long TxNoRetryOkCount;
- unsigned long TxRetryOkCount;
- unsigned long TxFailCount;
- //Rx count:
- unsigned long RxOkCnt;
- unsigned long RxFcsErrCnt;
- //statistic
- unsigned long SignalStren;
- unsigned long LinkQuality;
-#endif
} SStatCounter, *PSStatCounter;
-void STAvClearAllCounter(PSStatCounter pStatistic);
-
void STAvUpdateIsrStatCounter(PSStatCounter pStatistic, unsigned long dwIsr);
-void STAvUpdateRDStatCounter(PSStatCounter pStatistic,
- unsigned char byRSR, unsigned char byNewRSR, unsigned char byRxRate,
- unsigned char *pbyBuffer, unsigned int cbFrameLength);
-
-void STAvUpdateRDStatCounterEx(PSStatCounter pStatistic,
- unsigned char byRSR, unsigned char byNewRsr, unsigned char byRxRate,
- unsigned char *pbyBuffer, unsigned int cbFrameLength);
-
-void STAvUpdateTDStatCounter(PSStatCounter pStatistic, unsigned char byTSR0, unsigned char byTSR1,
- unsigned char *pbyBuffer, unsigned int cbFrameLength, unsigned int uIdx);
-
-void STAvUpdateTDStatCounterEx(
- PSStatCounter pStatistic,
- unsigned char *pbyBuffer,
- unsigned long cbFrameLength
-);
-
void STAvUpdate802_11Counter(
PSDot11Counters p802_11Counter,
PSStatCounter pStatistic,
unsigned long dwCounter
);
-void STAvClear802_11Counter(PSDot11Counters p802_11Counter);
-
#endif // __MIB_H__
diff --git a/drivers/staging/vt6655/michael.c b/drivers/staging/vt6655/michael.c
deleted file mode 100644
index edee48777aa..00000000000
--- a/drivers/staging/vt6655/michael.c
+++ /dev/null
@@ -1,148 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- *
- * File: michael.cpp
- *
- * Purpose: The implementation of LIST data structure.
- *
- * Author: Kyle Hsu
- *
- * Date: Sep 4, 2002
- *
- * Functions:
- * s_dwGetUINT32 - Convert from unsigned char [] to unsigned long in a portable way
- * s_vPutUINT32 - Convert from unsigned long to unsigned char [] in a portable way
- * s_vClear - Reset the state to the empty message.
- * s_vSetKey - Set the key.
- * MIC_vInit - Set the key.
- * s_vAppendByte - Append the byte to our word-sized buffer.
- * MIC_vAppend - call s_vAppendByte.
- * MIC_vGetMIC - Append the minimum padding and call s_vAppendByte.
- *
- * Revision History:
- *
- */
-
-#include "tmacro.h"
-#include "michael.h"
-
-/*--------------------- Static Definitions -------------------------*/
-
-/*--------------------- Static Variables --------------------------*/
-
-/*--------------------- Static Functions --------------------------*/
-
-static void s_vClear(void); // Clear the internal message,
-// resets the object to the state just after construction.
-static void s_vSetKey(u32 dwK0, u32 dwK1);
-static void s_vAppendByte(unsigned char b); // Add a single byte to the internal message
-
-/*--------------------- Export Variables --------------------------*/
-static u32 L, R; /* Current state */
-
-static u32 K0, K1; /* Key */
-static u32 M; /* Message accumulator (single word) */
-static unsigned int nBytesInM; // # bytes in M
-
-/*--------------------- Export Functions --------------------------*/
-
-static void s_vClear(void)
-{
- // Reset the state to the empty message.
- L = K0;
- R = K1;
- nBytesInM = 0;
- M = 0;
-}
-
-static void s_vSetKey(u32 dwK0, u32 dwK1)
-{
- // Set the key
- K0 = dwK0;
- K1 = dwK1;
- // and reset the message
- s_vClear();
-}
-
-static void s_vAppendByte(unsigned char b)
-{
- // Append the byte to our word-sized buffer
- M |= b << (8*nBytesInM);
- nBytesInM++;
- // Process the word if it is full.
- if (nBytesInM >= 4) {
- L ^= M;
- R ^= ROL32(L, 17);
- L += R;
- R ^= ((L & 0xff00ff00) >> 8) | ((L & 0x00ff00ff) << 8);
- L += R;
- R ^= ROL32(L, 3);
- L += R;
- R ^= ROR32(L, 2);
- L += R;
- // Clear the buffer
- M = 0;
- nBytesInM = 0;
- }
-}
-
-void MIC_vInit(u32 dwK0, u32 dwK1)
-{
- // Set the key
- s_vSetKey(dwK0, dwK1);
-}
-
-void MIC_vUnInit(void)
-{
- // Wipe the key material
- K0 = 0;
- K1 = 0;
-
- // And the other fields as well.
- //Note that this sets (L,R) to (K0,K1) which is just fine.
- s_vClear();
-}
-
-void MIC_vAppend(unsigned char *src, unsigned int nBytes)
-{
- // This is simple
- while (nBytes > 0) {
- s_vAppendByte(*src++);
- nBytes--;
- }
-}
-
-void MIC_vGetMIC(u32 *pdwL, u32 *pdwR)
-{
- // Append the minimum padding
- s_vAppendByte(0x5a);
- s_vAppendByte(0);
- s_vAppendByte(0);
- s_vAppendByte(0);
- s_vAppendByte(0);
- // and then zeroes until the length is a multiple of 4
- while (nBytesInM != 0)
- s_vAppendByte(0);
-
- // The s_vAppendByte function has already computed the result.
- *pdwL = L;
- *pdwR = R;
- // Reset to the empty message.
- s_vClear();
-}
diff --git a/drivers/staging/vt6655/michael.h b/drivers/staging/vt6655/michael.h
deleted file mode 100644
index 86cb140e308..00000000000
--- a/drivers/staging/vt6655/michael.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- *
- * File: Michael.h
- *
- * Purpose: Reference implementation for Michael
- * written by Niels Ferguson
- *
- * Author: Kyle Hsu
- *
- * Date: Jan 2, 2003
- *
- */
-
-#ifndef __MICHAEL_H__
-#define __MICHAEL_H__
-
-#include <linux/types.h>
-
-void MIC_vInit(u32 dwK0, u32 dwK1);
-
-void MIC_vUnInit(void);
-
-/* Append bytes to the message to be MICed */
-void MIC_vAppend(unsigned char *src, unsigned int nBytes);
-
-/* Get the MIC result. Destination should accept 8 bytes of result. */
-/* This also resets the message to empty. */
-void MIC_vGetMIC(u32 *pdwL, u32 *pdwR);
-
-/* Rotation functions on 32 bit values */
-#define ROL32(A, n) \
- (((A) << (n)) | (((A)>>(32-(n))) & ((1UL << (n)) - 1)))
-#define ROR32(A, n) ROL32((A), 32-(n))
-
-#endif /*__MICHAEL_H__ */
diff --git a/drivers/staging/vt6655/power.c b/drivers/staging/vt6655/power.c
index 08241b91777..e826f07e91c 100644
--- a/drivers/staging/vt6655/power.c
+++ b/drivers/staging/vt6655/power.c
@@ -37,13 +37,9 @@
*
*/
-#include "ttype.h"
#include "mac.h"
#include "device.h"
-#include "wmgr.h"
#include "power.h"
-#include "wcmd.h"
-#include "rxtx.h"
#include "card.h"
/*--------------------- Static Definitions -------------------------*/
@@ -73,8 +69,7 @@ PSvEnablePowerSaving(
)
{
struct vnt_private *pDevice = hDeviceContext;
- PSMgmtObject pMgmt = pDevice->pMgmt;
- unsigned short wAID = pMgmt->wCurrAID | BIT14 | BIT15;
+ u16 wAID = pDevice->current_aid | BIT(14) | BIT(15);
// set period of power up before TBTT
VNSvOutPortW(pDevice->PortOffset + MAC_REG_PWBT, C_PWBT);
@@ -83,7 +78,9 @@ PSvEnablePowerSaving(
VNSvOutPortW(pDevice->PortOffset + MAC_REG_AIDATIM, wAID);
} else {
// set ATIM Window
+#if 0 /* TODO atim window */
MACvWriteATIMW(pDevice->PortOffset, pMgmt->wCurrATIMWindow);
+#endif
}
// Set AutoSleep
MACvRegBitsOn(pDevice->PortOffset, MAC_REG_PSCFG, PSCFG_AUTOSLEEP);
@@ -95,22 +92,15 @@ PSvEnablePowerSaving(
MACvRegBitsOff(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_ALBCN);
// first time set listen next beacon
MACvRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_LNBCN);
- pMgmt->wCountToWakeUp = wListenInterval;
} else {
// always listen beacon
MACvRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_ALBCN);
- pMgmt->wCountToWakeUp = 0;
}
// enable power saving hw function
MACvRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_PSEN);
pDevice->bEnablePSMode = true;
- /* We don't send null pkt in ad hoc mode since beacon will handle this. */
- if (pDevice->op_mode != NL80211_IFTYPE_ADHOC &&
- pDevice->op_mode == NL80211_IFTYPE_STATION)
- PSbSendNullPacket(pDevice);
-
pDevice->bPWBitOn = true;
pr_debug("PS:Power Saving Mode Enable...\n");
}
@@ -143,182 +133,9 @@ PSvDisablePowerSaving(
pDevice->bEnablePSMode = false;
- if (pDevice->op_mode == NL80211_IFTYPE_STATION)
- PSbSendNullPacket(pDevice);
-
pDevice->bPWBitOn = false;
}
-/*+
- *
- * Routine Description:
- * Consider to power down when no more packets to tx or rx.
- *
- * Return Value:
- * true, if power down success
- * false, if fail
- -*/
-
-bool
-PSbConsiderPowerDown(
- void *hDeviceContext,
- bool bCheckRxDMA,
- bool bCheckCountToWakeUp
-)
-{
- struct vnt_private *pDevice = hDeviceContext;
- PSMgmtObject pMgmt = pDevice->pMgmt;
- unsigned int uIdx;
-
- // check if already in Doze mode
- if (MACbIsRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_PS))
- return true;
-
- if (pMgmt->eCurrMode != WMAC_MODE_IBSS_STA) {
- // check if in TIM wake period
- if (pMgmt->bInTIMWake)
- return false;
- }
-
- // check scan state
- if (pDevice->bCmdRunning)
- return false;
-
- // Force PSEN on
- MACvRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_PSEN);
-
- // check if all TD are empty,
- for (uIdx = 0; uIdx < TYPE_MAXTD; uIdx++) {
- if (pDevice->iTDUsed[uIdx] != 0)
- return false;
- }
-
- // check if rx isr is clear
- if (bCheckRxDMA &&
- ((pDevice->dwIsr & ISR_RXDMA0) != 0) &&
- ((pDevice->dwIsr & ISR_RXDMA1) != 0)) {
- return false;
- }
-
- if (pMgmt->eCurrMode != WMAC_MODE_IBSS_STA) {
- if (bCheckCountToWakeUp &&
- (pMgmt->wCountToWakeUp == 0 || pMgmt->wCountToWakeUp == 1)) {
- return false;
- }
- }
-
- // no Tx, no Rx isr, now go to Doze
- MACvRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_GO2DOZE);
- pr_debug("Go to Doze ZZZZZZZZZZZZZZZ\n");
- return true;
-}
-
-/*+
- *
- * Routine Description:
- * Send PS-POLL packet
- *
- * Return Value:
- * None.
- *
- -*/
-
-void
-PSvSendPSPOLL(
- void *hDeviceContext
-)
-{
- struct vnt_private *pDevice = hDeviceContext;
- PSMgmtObject pMgmt = pDevice->pMgmt;
- PSTxMgmtPacket pTxPacket = NULL;
-
- memset(pMgmt->pbyPSPacketPool, 0, sizeof(STxMgmtPacket) + WLAN_HDR_ADDR2_LEN);
- pTxPacket = (PSTxMgmtPacket)pMgmt->pbyPSPacketPool;
- pTxPacket->p80211Header = (PUWLAN_80211HDR)((unsigned char *)pTxPacket + sizeof(STxMgmtPacket));
- pTxPacket->p80211Header->sA2.wFrameCtl = cpu_to_le16(
- (
- WLAN_SET_FC_FTYPE(WLAN_TYPE_CTL) |
- WLAN_SET_FC_FSTYPE(WLAN_FSTYPE_PSPOLL) |
- WLAN_SET_FC_PWRMGT(0)
-));
- pTxPacket->p80211Header->sA2.wDurationID = pMgmt->wCurrAID | BIT14 | BIT15;
- memcpy(pTxPacket->p80211Header->sA2.abyAddr1, pMgmt->abyCurrBSSID, WLAN_ADDR_LEN);
- memcpy(pTxPacket->p80211Header->sA2.abyAddr2, pMgmt->abyMACAddr, WLAN_ADDR_LEN);
- pTxPacket->cbMPDULen = WLAN_HDR_ADDR2_LEN;
- pTxPacket->cbPayloadLen = 0;
- // send the frame
- if (csMgmt_xmit(pDevice, pTxPacket) != CMD_STATUS_PENDING)
- pr_debug("Send PS-Poll packet failed..\n");
-}
-
-/*+
- *
- * Routine Description:
- * Send NULL packet to AP for notification power state of STA
- *
- * Return Value:
- * None.
- *
- -*/
-bool
-PSbSendNullPacket(
- void *hDeviceContext
-)
-{
- struct vnt_private *pDevice = hDeviceContext;
- PSTxMgmtPacket pTxPacket = NULL;
- PSMgmtObject pMgmt = pDevice->pMgmt;
- unsigned int uIdx;
-
- if (!pDevice->bLinkPass)
- return false;
-
- if (!pDevice->bEnablePSMode && !pDevice->fTxDataInSleep)
- return false;
-
- if (pDevice->bEnablePSMode) {
- for (uIdx = 0; uIdx < TYPE_MAXTD; uIdx++) {
- if (pDevice->iTDUsed[uIdx] != 0)
- return false;
- }
- }
-
- memset(pMgmt->pbyPSPacketPool, 0, sizeof(STxMgmtPacket) + WLAN_NULLDATA_FR_MAXLEN);
- pTxPacket = (PSTxMgmtPacket)pMgmt->pbyPSPacketPool;
- pTxPacket->p80211Header = (PUWLAN_80211HDR)((unsigned char *)pTxPacket + sizeof(STxMgmtPacket));
-
- if (pDevice->bEnablePSMode) {
- pTxPacket->p80211Header->sA3.wFrameCtl = cpu_to_le16(
- (
- WLAN_SET_FC_FTYPE(WLAN_TYPE_DATA) |
- WLAN_SET_FC_FSTYPE(WLAN_FSTYPE_NULL) |
- WLAN_SET_FC_PWRMGT(1)
-));
- } else {
- pTxPacket->p80211Header->sA3.wFrameCtl = cpu_to_le16(
- (
- WLAN_SET_FC_FTYPE(WLAN_TYPE_DATA) |
- WLAN_SET_FC_FSTYPE(WLAN_FSTYPE_NULL) |
- WLAN_SET_FC_PWRMGT(0)
-));
- }
-
- if (pMgmt->eCurrMode != WMAC_MODE_IBSS_STA)
- pTxPacket->p80211Header->sA3.wFrameCtl |= cpu_to_le16((unsigned short)WLAN_SET_FC_TODS(1));
-
- memcpy(pTxPacket->p80211Header->sA3.abyAddr1, pMgmt->abyCurrBSSID, WLAN_ADDR_LEN);
- memcpy(pTxPacket->p80211Header->sA3.abyAddr2, pMgmt->abyMACAddr, WLAN_ADDR_LEN);
- memcpy(pTxPacket->p80211Header->sA3.abyAddr3, pMgmt->abyCurrBSSID, WLAN_BSSID_LEN);
- pTxPacket->cbMPDULen = WLAN_HDR_ADDR3_LEN;
- pTxPacket->cbPayloadLen = 0;
- // send the frame
- if (csMgmt_xmit(pDevice, pTxPacket) != CMD_STATUS_PENDING) {
- pr_debug("Send Null Packet failed !\n");
- return false;
- }
-
- return true;
-}
/*+
*
@@ -336,21 +153,14 @@ PSbIsNextTBTTWakeUp(
)
{
struct vnt_private *pDevice = hDeviceContext;
- PSMgmtObject pMgmt = pDevice->pMgmt;
+ struct ieee80211_hw *hw = pDevice->hw;
+ struct ieee80211_conf *conf = &hw->conf;
bool bWakeUp = false;
- if (pMgmt->wListenInterval >= 2) {
- if (pMgmt->wCountToWakeUp == 0)
- pMgmt->wCountToWakeUp = pMgmt->wListenInterval;
-
- pMgmt->wCountToWakeUp--;
-
- if (pMgmt->wCountToWakeUp == 1) {
- // Turn on wake up to listen next beacon
- MACvRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_LNBCN);
- bWakeUp = true;
- }
-
+ if (conf->listen_interval == 1) {
+ /* Turn on wake up to listen next beacon */
+ MACvRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_LNBCN);
+ bWakeUp = true;
}
return bWakeUp;
diff --git a/drivers/staging/vt6655/power.h b/drivers/staging/vt6655/power.h
index 936f171a6b1..1083341b2a4 100644
--- a/drivers/staging/vt6655/power.h
+++ b/drivers/staging/vt6655/power.h
@@ -33,13 +33,6 @@
#define PS_FAST_INTERVAL 1 // Fast power saving listen interval
#define PS_MAX_INTERVAL 4 // MAX power saving listen interval
-bool
-PSbConsiderPowerDown(
- void *hDeviceContext,
- bool bCheckRxDMA,
- bool bCheckCountToWakeUp
-);
-
void
PSvDisablePowerSaving(
void *hDeviceContext
@@ -51,15 +44,6 @@ PSvEnablePowerSaving(
unsigned short wListenInterval
);
-void
-PSvSendPSPOLL(
- void *hDeviceContext
-);
-
-bool
-PSbSendNullPacket(
- void *hDeviceContext
-);
bool
PSbIsNextTBTTWakeUp(
diff --git a/drivers/staging/vt6655/rc4.c b/drivers/staging/vt6655/rc4.c
deleted file mode 100644
index b7819bc702d..00000000000
--- a/drivers/staging/vt6655/rc4.c
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * File: rc4.c
- *
- * Purpose:
- *
- * Functions:
- *
- * Revision History:
- *
- * Author: Kyle Hsu
- *
- * Date: Sep 4, 2002
- *
- */
-
-#include "rc4.h"
-
-void rc4_init(PRC4Ext pRC4, unsigned char *pbyKey, unsigned int cbKey_len)
-{
- unsigned int ust1, ust2;
- unsigned int keyindex;
- unsigned int stateindex;
- unsigned char *pbyst;
- unsigned int idx;
-
- pbyst = pRC4->abystate;
- pRC4->ux = 0;
- pRC4->uy = 0;
- for (idx = 0; idx < 256; idx++)
- pbyst[idx] = (unsigned char)idx;
- keyindex = 0;
- stateindex = 0;
- for (idx = 0; idx < 256; idx++) {
- ust1 = pbyst[idx];
- stateindex = (stateindex + pbyKey[keyindex] + ust1) & 0xff;
- ust2 = pbyst[stateindex];
- pbyst[stateindex] = (unsigned char)ust1;
- pbyst[idx] = (unsigned char)ust2;
- if (++keyindex >= cbKey_len)
- keyindex = 0;
- }
-}
-
-unsigned int rc4_byte(PRC4Ext pRC4)
-{
- unsigned int ux;
- unsigned int uy;
- unsigned int ustx, usty;
- unsigned char *pbyst;
-
- pbyst = pRC4->abystate;
- ux = (pRC4->ux + 1) & 0xff;
- ustx = pbyst[ux];
- uy = (ustx + pRC4->uy) & 0xff;
- usty = pbyst[uy];
- pRC4->ux = ux;
- pRC4->uy = uy;
- pbyst[uy] = (unsigned char)ustx;
- pbyst[ux] = (unsigned char)usty;
-
- return pbyst[(ustx + usty) & 0xff];
-}
-
-void rc4_encrypt(PRC4Ext pRC4, unsigned char *pbyDest,
- unsigned char *pbySrc, unsigned int cbData_len)
-{
- unsigned int ii;
-
- for (ii = 0; ii < cbData_len; ii++)
- pbyDest[ii] = (unsigned char)(pbySrc[ii] ^ rc4_byte(pRC4));
-}
diff --git a/drivers/staging/vt6655/rc4.h b/drivers/staging/vt6655/rc4.h
deleted file mode 100644
index 74b2eed9bce..00000000000
--- a/drivers/staging/vt6655/rc4.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * File: rc4.h
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Purpose:
- *
- * Functions:
- *
- * Revision History:
- *
- * Author: Kyle Hsu
- *
- * Date: Sep 4, 2002
- *
- */
-
-#ifndef __RC4_H__
-#define __RC4_H__
-
-#include "ttype.h"
-
-/*--------------------- Export Definitions -------------------------*/
-/*--------------------- Export Types ------------------------------*/
-typedef struct {
- unsigned int ux;
- unsigned int uy;
- unsigned char abystate[256];
-} RC4Ext, *PRC4Ext;
-
-void rc4_init(PRC4Ext pRC4, unsigned char *pbyKey, unsigned int cbKey_len);
-unsigned int rc4_byte(PRC4Ext pRC4);
-void rc4_encrypt(PRC4Ext pRC4, unsigned char *pbyDest, unsigned char *pbySrc, unsigned int cbData_len);
-
-#endif //__RC4_H__
diff --git a/drivers/staging/vt6655/rf.c b/drivers/staging/vt6655/rf.c
index e505af91bfd..32ef99341e2 100644
--- a/drivers/staging/vt6655/rf.c
+++ b/drivers/staging/vt6655/rf.c
@@ -29,6 +29,8 @@
* IFRFbWriteEmbedded - Embedded write RF register via MAC
*
* Revision History:
+ * RobertYu 2005
+ * chester 2008
*
*/
@@ -37,8 +39,6 @@
#include "rf.h"
#include "baseband.h"
-/*--------------------- Static Definitions -------------------------*/
-
#define BY_AL2230_REG_LEN 23 //24bit
#define CB_AL2230_INIT_SEQ 15
#define SWITCH_CHANNEL_DELAY_AL2230 200 //us
@@ -49,10 +49,6 @@
#define SWITCH_CHANNEL_DELAY_AL7230 200 //us
#define AL7230_PWR_IDX_LEN 64
-/*--------------------- Static Classes ----------------------------*/
-
-/*--------------------- Static Variables --------------------------*/
-
static const unsigned long dwAL2230InitTable[CB_AL2230_INIT_SEQ] = {
0x03F79000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, //
0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, //
@@ -172,7 +168,6 @@ static unsigned long dwAL2230PowerTable[AL2230_PWR_IDX_LEN] = {
0x0407F900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW
};
-//{{ RobertYu:20050104
// 40MHz reference frequency
// Need to Pull PLLON(PE3) low when writing channel registers through 3-wire.
static const unsigned long dwAL7230InitTable[CB_AL7230_INIT_SEQ] = {
@@ -408,9 +403,6 @@ static const unsigned long dwAL7230ChannelTable2[CB_MAX_CHANNEL] = {
0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 161, Tf = 5805MHz (55)
0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW // channel = 165, Tf = 5825MHz (56)
};
-//}} RobertYu
-
-/*--------------------- Static Functions --------------------------*/
/*
* Description: AIROHA IFRF chip init function
@@ -424,137 +416,81 @@ static const unsigned long dwAL7230ChannelTable2[CB_MAX_CHANNEL] = {
* Return Value: true if succeeded; false if failed.
*
*/
-static bool s_bAL7230Init(void __iomem *dwIoBase)
+static bool s_bAL7230Init(struct vnt_private *priv)
{
+ void __iomem *dwIoBase = priv->PortOffset;
int ii;
bool bResult;
bResult = true;
- //3-wire control for normal mode
+ /* 3-wire control for normal mode */
VNSvOutPortB(dwIoBase + MAC_REG_SOFTPWRCTL, 0);
MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPECTI |
SOFTPWRCTL_TXPEINV));
- BBvPowerSaveModeOFF(dwIoBase); //RobertYu:20050106, have DC value for Calibration
+ BBvPowerSaveModeOFF(priv); /* RobertYu:20050106, have DC value for Calibration */
for (ii = 0; ii < CB_AL7230_INIT_SEQ; ii++)
- bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL7230InitTable[ii]);
+ bResult &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[ii]);
- // PLL On
+ /* PLL On */
MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3);
- //Calibration
+ /* Calibration */
MACvTimer0MicroSDelay(dwIoBase, 150);//150us
- bResult &= IFRFbWriteEmbedded(dwIoBase, (0x9ABA8F00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW)); //TXDCOC:active, RCK:disable
+ /* TXDCOC:active, RCK:disable */
+ bResult &= IFRFbWriteEmbedded(priv, (0x9ABA8F00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW));
MACvTimer0MicroSDelay(dwIoBase, 30);//30us
- bResult &= IFRFbWriteEmbedded(dwIoBase, (0x3ABA8F00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW)); //TXDCOC:disable, RCK:active
+ /* TXDCOC:disable, RCK:active */
+ bResult &= IFRFbWriteEmbedded(priv, (0x3ABA8F00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW));
MACvTimer0MicroSDelay(dwIoBase, 30);//30us
- bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL7230InitTable[CB_AL7230_INIT_SEQ-1]); //TXDCOC:disable, RCK:disable
+ /* TXDCOC:disable, RCK:disable */
+ bResult &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[CB_AL7230_INIT_SEQ-1]);
MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPE3 |
SOFTPWRCTL_SWPE2 |
SOFTPWRCTL_SWPECTI |
SOFTPWRCTL_TXPEINV));
- BBvPowerSaveModeON(dwIoBase); // RobertYu:20050106
+ BBvPowerSaveModeON(priv); /* RobertYu:20050106 */
- // PE1: TX_ON, PE2: RX_ON, PE3: PLLON
- //3-wire control for power saving mode
+ /* PE1: TX_ON, PE2: RX_ON, PE3: PLLON */
+ /* 3-wire control for power saving mode */
VNSvOutPortB(dwIoBase + MAC_REG_PSPWRSIG, (PSSIG_WPE3 | PSSIG_WPE2)); //1100 0000
return bResult;
}
-// Need to Pull PLLON low when writing channel registers through 3-wire interface
-static bool s_bAL7230SelectChannel(void __iomem *dwIoBase, unsigned char byChannel)
+/* Need to Pull PLLON low when writing channel registers through
+ * 3-wire interface */
+static bool s_bAL7230SelectChannel(struct vnt_private *priv, unsigned char byChannel)
{
+ void __iomem *dwIoBase = priv->PortOffset;
bool bResult;
bResult = true;
- // PLLON Off
+ /* PLLON Off */
MACvWordRegBitsOff(dwIoBase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3);
- bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL7230ChannelTable0[byChannel - 1]); //Reg0
- bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL7230ChannelTable1[byChannel - 1]); //Reg1
- bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL7230ChannelTable2[byChannel - 1]); //Reg4
+ bResult &= IFRFbWriteEmbedded(priv, dwAL7230ChannelTable0[byChannel - 1]);
+ bResult &= IFRFbWriteEmbedded(priv, dwAL7230ChannelTable1[byChannel - 1]);
+ bResult &= IFRFbWriteEmbedded(priv, dwAL7230ChannelTable2[byChannel - 1]);
- // PLLOn On
+ /* PLLOn On */
MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3);
- // Set Channel[7] = 0 to tell H/W channel is changing now.
+ /* Set Channel[7] = 0 to tell H/W channel is changing now. */
VNSvOutPortB(dwIoBase + MAC_REG_CHANNEL, (byChannel & 0x7F));
MACvTimer0MicroSDelay(dwIoBase, SWITCH_CHANNEL_DELAY_AL7230);
- // Set Channel[7] = 1 to tell H/W channel change is done.
+ /* Set Channel[7] = 1 to tell H/W channel change is done. */
VNSvOutPortB(dwIoBase + MAC_REG_CHANNEL, (byChannel | 0x80));
return bResult;
}
/*
- * Description: Select channel with UW2452 chip
- *
- * Parameters:
- * In:
- * dwIoBase - I/O base address
- * uChannel - Channel number
- * Out:
- * none
- *
- * Return Value: true if succeeded; false if failed.
- *
- */
-
-//{{ RobertYu: 20041210
-/*
- * Description: UW2452 IFRF chip init function
- *
- * Parameters:
- * In:
- * dwIoBase - I/O base address
- * Out:
- * none
- *
- * Return Value: true if succeeded; false if failed.
- *
- */
-
-//}} RobertYu
-////////////////////////////////////////////////////////////////////////////////
-
-/*
- * Description: VT3226 IFRF chip init function
- *
- * Parameters:
- * In:
- * dwIoBase - I/O base address
- * Out:
- * none
- *
- * Return Value: true if succeeded; false if failed.
- *
- */
-
-/*
- * Description: Select channel with VT3226 chip
- *
- * Parameters:
- * In:
- * dwIoBase - I/O base address
- * uChannel - Channel number
- * Out:
- * none
- *
- * Return Value: true if succeeded; false if failed.
- *
- */
-
-/*--------------------- Export Variables --------------------------*/
-
-/*--------------------- Export Functions --------------------------*/
-
-/*
* Description: Write to IF/RF, by embedded programming
*
* Parameters:
@@ -567,14 +503,15 @@ static bool s_bAL7230SelectChannel(void __iomem *dwIoBase, unsigned char byChann
* Return Value: true if succeeded; false if failed.
*
*/
-bool IFRFbWriteEmbedded(void __iomem *dwIoBase, unsigned long dwData)
+bool IFRFbWriteEmbedded(struct vnt_private *priv, unsigned long dwData)
{
+ void __iomem *dwIoBase = priv->PortOffset;
unsigned short ww;
unsigned long dwValue;
VNSvOutPortD(dwIoBase + MAC_REG_IFREGCTL, dwData);
- // W_MAX_TIMEOUT is the timeout period
+ /* W_MAX_TIMEOUT is the timeout period */
for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
VNSvInPortD(dwIoBase + MAC_REG_IFREGCTL, &dwValue);
if (dwValue & IFREGCTL_DONE)
@@ -588,33 +525,6 @@ bool IFRFbWriteEmbedded(void __iomem *dwIoBase, unsigned long dwData)
}
/*
- * Description: RFMD RF2959 IFRF chip init function
- *
- * Parameters:
- * In:
- * dwIoBase - I/O base address
- * Out:
- * none
- *
- * Return Value: true if succeeded; false if failed.
- *
- */
-
-/*
- * Description: Select channel with RFMD 2959 chip
- *
- * Parameters:
- * In:
- * dwIoBase - I/O base address
- * uChannel - Channel number
- * Out:
- * none
- *
- * Return Value: true if succeeded; false if failed.
- *
- */
-
-/*
* Description: AIROHA IFRF chip init function
*
* Parameters:
@@ -626,113 +536,70 @@ bool IFRFbWriteEmbedded(void __iomem *dwIoBase, unsigned long dwData)
* Return Value: true if succeeded; false if failed.
*
*/
-static bool RFbAL2230Init(void __iomem *dwIoBase)
+static bool RFbAL2230Init(struct vnt_private *priv)
{
+ void __iomem *dwIoBase = priv->PortOffset;
int ii;
bool bResult;
bResult = true;
- //3-wire control for normal mode
+ /* 3-wire control for normal mode */
VNSvOutPortB(dwIoBase + MAC_REG_SOFTPWRCTL, 0);
MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPECTI |
SOFTPWRCTL_TXPEINV));
-//2008-8-21 chester <add>
- // PLL Off
-
+ /* PLL Off */
MACvWordRegBitsOff(dwIoBase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3);
- //patch abnormal AL2230 frequency output
-//2008-8-21 chester <add>
- IFRFbWriteEmbedded(dwIoBase, (0x07168700+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW));
+ /* patch abnormal AL2230 frequency output */
+ IFRFbWriteEmbedded(priv, (0x07168700+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW));
for (ii = 0; ii < CB_AL2230_INIT_SEQ; ii++)
- bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL2230InitTable[ii]);
-//2008-8-21 chester <add>
+ bResult &= IFRFbWriteEmbedded(priv, dwAL2230InitTable[ii]);
MACvTimer0MicroSDelay(dwIoBase, 30); //delay 30 us
- // PLL On
+ /* PLL On */
MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3);
MACvTimer0MicroSDelay(dwIoBase, 150);//150us
- bResult &= IFRFbWriteEmbedded(dwIoBase, (0x00d80f00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW));
+ bResult &= IFRFbWriteEmbedded(priv, (0x00d80f00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW));
MACvTimer0MicroSDelay(dwIoBase, 30);//30us
- bResult &= IFRFbWriteEmbedded(dwIoBase, (0x00780f00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW));
+ bResult &= IFRFbWriteEmbedded(priv, (0x00780f00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW));
MACvTimer0MicroSDelay(dwIoBase, 30);//30us
- bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL2230InitTable[CB_AL2230_INIT_SEQ-1]);
+ bResult &= IFRFbWriteEmbedded(priv, dwAL2230InitTable[CB_AL2230_INIT_SEQ-1]);
MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPE3 |
SOFTPWRCTL_SWPE2 |
SOFTPWRCTL_SWPECTI |
SOFTPWRCTL_TXPEINV));
- //3-wire control for power saving mode
+ /* 3-wire control for power saving mode */
VNSvOutPortB(dwIoBase + MAC_REG_PSPWRSIG, (PSSIG_WPE3 | PSSIG_WPE2)); //1100 0000
return bResult;
}
-static bool RFbAL2230SelectChannel(void __iomem *dwIoBase, unsigned char byChannel)
+static bool RFbAL2230SelectChannel(struct vnt_private *priv, unsigned char byChannel)
{
+ void __iomem *dwIoBase = priv->PortOffset;
bool bResult;
bResult = true;
- bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL2230ChannelTable0[byChannel - 1]);
- bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL2230ChannelTable1[byChannel - 1]);
+ bResult &= IFRFbWriteEmbedded(priv, dwAL2230ChannelTable0[byChannel - 1]);
+ bResult &= IFRFbWriteEmbedded(priv, dwAL2230ChannelTable1[byChannel - 1]);
- // Set Channel[7] = 0 to tell H/W channel is changing now.
+ /* Set Channel[7] = 0 to tell H/W channel is changing now. */
VNSvOutPortB(dwIoBase + MAC_REG_CHANNEL, (byChannel & 0x7F));
MACvTimer0MicroSDelay(dwIoBase, SWITCH_CHANNEL_DELAY_AL2230);
- // Set Channel[7] = 1 to tell H/W channel change is done.
+ /* Set Channel[7] = 1 to tell H/W channel change is done. */
VNSvOutPortB(dwIoBase + MAC_REG_CHANNEL, (byChannel | 0x80));
return bResult;
}
/*
- * Description: UW2451 IFRF chip init function
- *
- * Parameters:
- * In:
- * dwIoBase - I/O base address
- * Out:
- * none
- *
- * Return Value: true if succeeded; false if failed.
- *
- */
-
-/*
- * Description: Select channel with UW2451 chip
- *
- * Parameters:
- * In:
- * dwIoBase - I/O base address
- * uChannel - Channel number
- * Out:
- * none
- *
- * Return Value: true if succeeded; false if failed.
- *
- */
-
-/*
- * Description: Set sleep mode to UW2451 chip
- *
- * Parameters:
- * In:
- * dwIoBase - I/O base address
- * uChannel - Channel number
- * Out:
- * none
- *
- * Return Value: true if succeeded; false if failed.
- *
- */
-
-/*
* Description: RF init function
*
* Parameters:
@@ -746,20 +613,20 @@ static bool RFbAL2230SelectChannel(void __iomem *dwIoBase, unsigned char byChann
*
*/
bool RFbInit(
- struct vnt_private *pDevice
+ struct vnt_private *priv
)
{
bool bResult = true;
- switch (pDevice->byRFType) {
+ switch (priv->byRFType) {
case RF_AIROHA:
case RF_AL2230S:
- pDevice->byMaxPwrLevel = AL2230_PWR_IDX_LEN;
- bResult = RFbAL2230Init(pDevice->PortOffset);
+ priv->byMaxPwrLevel = AL2230_PWR_IDX_LEN;
+ bResult = RFbAL2230Init(priv);
break;
case RF_AIROHA7230:
- pDevice->byMaxPwrLevel = AL7230_PWR_IDX_LEN;
- bResult = s_bAL7230Init(pDevice->PortOffset);
+ priv->byMaxPwrLevel = AL7230_PWR_IDX_LEN;
+ bResult = s_bAL7230Init(priv);
break;
case RF_NOTHING:
bResult = true;
@@ -784,18 +651,18 @@ bool RFbInit(
* Return Value: true if succeeded; false if failed.
*
*/
-bool RFbSelectChannel(void __iomem *dwIoBase, unsigned char byRFType, unsigned char byChannel)
+bool RFbSelectChannel(struct vnt_private *priv, unsigned char byRFType, unsigned char byChannel)
{
bool bResult = true;
switch (byRFType) {
case RF_AIROHA:
case RF_AL2230S:
- bResult = RFbAL2230SelectChannel(dwIoBase, byChannel);
+ bResult = RFbAL2230SelectChannel(priv, byChannel);
break;
//{{ RobertYu: 20050104
case RF_AIROHA7230:
- bResult = s_bAL7230SelectChannel(dwIoBase, byChannel);
+ bResult = s_bAL7230SelectChannel(priv, byChannel);
break;
//}} RobertYu
case RF_NOTHING:
@@ -820,8 +687,9 @@ bool RFbSelectChannel(void __iomem *dwIoBase, unsigned char byRFType, unsigned c
* Return Value: None.
*
*/
-bool RFvWriteWakeProgSyn(void __iomem *dwIoBase, unsigned char byRFType, unsigned int uChannel)
+bool RFvWriteWakeProgSyn(struct vnt_private *priv, unsigned char byRFType, unsigned int uChannel)
{
+ void __iomem *dwIoBase = priv->PortOffset;
int ii;
unsigned char byInitCount = 0;
unsigned char bySleepCount = 0;
@@ -834,7 +702,8 @@ bool RFvWriteWakeProgSyn(void __iomem *dwIoBase, unsigned char byRFType, unsigne
if (uChannel > CB_MAX_CHANNEL_24G)
return false;
- byInitCount = CB_AL2230_INIT_SEQ + 2; // Init Reg + Channel Reg (2)
+ /* Init Reg + Channel Reg (2) */
+ byInitCount = CB_AL2230_INIT_SEQ + 2;
bySleepCount = 0;
if (byInitCount > (MISCFIFO_SYNDATASIZE - bySleepCount))
return false;
@@ -847,10 +716,10 @@ bool RFvWriteWakeProgSyn(void __iomem *dwIoBase, unsigned char byRFType, unsigne
MACvSetMISCFifo(dwIoBase, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL2230ChannelTable1[uChannel-1]);
break;
- //{{ RobertYu: 20050104
- // Need to check, PLLON need to be low for channel setting
+ /* Need to check, PLLON need to be low for channel setting */
case RF_AIROHA7230:
- byInitCount = CB_AL7230_INIT_SEQ + 3; // Init Reg + Channel Reg (3)
+ /* Init Reg + Channel Reg (3) */
+ byInitCount = CB_AL7230_INIT_SEQ + 3;
bySleepCount = 0;
if (byInitCount > (MISCFIFO_SYNDATASIZE - bySleepCount))
return false;
@@ -869,7 +738,6 @@ bool RFvWriteWakeProgSyn(void __iomem *dwIoBase, unsigned char byRFType, unsigne
ii++;
MACvSetMISCFifo(dwIoBase, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL7230ChannelTable2[uChannel-1]);
break;
- //}} RobertYu
case RF_NOTHING:
return true;
@@ -897,7 +765,7 @@ bool RFvWriteWakeProgSyn(void __iomem *dwIoBase, unsigned char byRFType, unsigne
*
*/
bool RFbSetPower(
- struct vnt_private *pDevice,
+ struct vnt_private *priv,
unsigned int uRATE,
unsigned int uCH
)
@@ -907,7 +775,7 @@ bool RFbSetPower(
unsigned char byDec = 0;
unsigned char byPwrdBm = 0;
- if (pDevice->dwDiagRefCount != 0)
+ if (priv->dwDiagRefCount != 0)
return true;
if ((uCH < 1) || (uCH > CB_MAX_CHANNEL))
@@ -918,22 +786,22 @@ bool RFbSetPower(
case RATE_2M:
case RATE_5M:
case RATE_11M:
- byPwr = pDevice->abyCCKPwrTbl[uCH];
- byPwrdBm = pDevice->abyCCKDefaultPwr[uCH];
+ byPwr = priv->abyCCKPwrTbl[uCH];
+ byPwrdBm = priv->abyCCKDefaultPwr[uCH];
break;
case RATE_6M:
case RATE_9M:
case RATE_18M:
- byPwr = pDevice->abyOFDMPwrTbl[uCH];
- if (pDevice->byRFType == RF_UW2452)
+ byPwr = priv->abyOFDMPwrTbl[uCH];
+ if (priv->byRFType == RF_UW2452)
byDec = byPwr + 14;
else
byDec = byPwr + 10;
- if (byDec >= pDevice->byMaxPwrLevel)
- byDec = pDevice->byMaxPwrLevel-1;
+ if (byDec >= priv->byMaxPwrLevel)
+ byDec = priv->byMaxPwrLevel-1;
- if (pDevice->byRFType == RF_UW2452) {
+ if (priv->byRFType == RF_UW2452) {
byPwrdBm = byDec - byPwr;
byPwrdBm /= 3;
} else {
@@ -941,24 +809,24 @@ bool RFbSetPower(
byPwrdBm >>= 1;
}
- byPwrdBm += pDevice->abyOFDMDefaultPwr[uCH];
+ byPwrdBm += priv->abyOFDMDefaultPwr[uCH];
byPwr = byDec;
break;
case RATE_24M:
case RATE_36M:
case RATE_48M:
case RATE_54M:
- byPwr = pDevice->abyOFDMPwrTbl[uCH];
- byPwrdBm = pDevice->abyOFDMDefaultPwr[uCH];
+ byPwr = priv->abyOFDMPwrTbl[uCH];
+ byPwrdBm = priv->abyOFDMDefaultPwr[uCH];
break;
}
- if (pDevice->byCurPwr == byPwr)
+ if (priv->byCurPwr == byPwr)
return true;
- bResult = RFbRawSetPower(pDevice, byPwr, uRATE);
+ bResult = RFbRawSetPower(priv, byPwr, uRATE);
if (bResult)
- pDevice->byCurPwr = byPwr;
+ priv->byCurPwr = byPwr;
return bResult;
}
@@ -978,7 +846,7 @@ bool RFbSetPower(
*/
bool RFbRawSetPower(
- struct vnt_private *pDevice,
+ struct vnt_private *priv,
unsigned char byPwr,
unsigned int uRATE
)
@@ -986,37 +854,38 @@ bool RFbRawSetPower(
bool bResult = true;
unsigned long dwMax7230Pwr = 0;
- if (byPwr >= pDevice->byMaxPwrLevel)
+ if (byPwr >= priv->byMaxPwrLevel)
return false;
- switch (pDevice->byRFType) {
+ switch (priv->byRFType) {
case RF_AIROHA:
- bResult &= IFRFbWriteEmbedded(pDevice->PortOffset, dwAL2230PowerTable[byPwr]);
+ bResult &= IFRFbWriteEmbedded(priv, dwAL2230PowerTable[byPwr]);
if (uRATE <= RATE_11M)
- bResult &= IFRFbWriteEmbedded(pDevice->PortOffset, 0x0001B400+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW);
+ bResult &= IFRFbWriteEmbedded(priv, 0x0001B400+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW);
else
- bResult &= IFRFbWriteEmbedded(pDevice->PortOffset, 0x0005A400+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW);
+ bResult &= IFRFbWriteEmbedded(priv, 0x0005A400+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW);
break;
case RF_AL2230S:
- bResult &= IFRFbWriteEmbedded(pDevice->PortOffset, dwAL2230PowerTable[byPwr]);
+ bResult &= IFRFbWriteEmbedded(priv, dwAL2230PowerTable[byPwr]);
if (uRATE <= RATE_11M) {
- bResult &= IFRFbWriteEmbedded(pDevice->PortOffset, 0x040C1400+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW);
- bResult &= IFRFbWriteEmbedded(pDevice->PortOffset, 0x00299B00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW);
+ bResult &= IFRFbWriteEmbedded(priv, 0x040C1400+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW);
+ bResult &= IFRFbWriteEmbedded(priv, 0x00299B00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW);
} else {
- bResult &= IFRFbWriteEmbedded(pDevice->PortOffset, 0x0005A400+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW);
- bResult &= IFRFbWriteEmbedded(pDevice->PortOffset, 0x00099B00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW);
+ bResult &= IFRFbWriteEmbedded(priv, 0x0005A400+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW);
+ bResult &= IFRFbWriteEmbedded(priv, 0x00099B00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW);
}
break;
case RF_AIROHA7230:
- // 0x080F1B00 for 3 wire control TxGain(D10) and 0x31 as TX Gain value
+ /* 0x080F1B00 for 3 wire control TxGain(D10)
+ * and 0x31 as TX Gain value */
dwMax7230Pwr = 0x080C0B00 | ((byPwr) << 12) |
(BY_AL7230_REG_LEN << 3) | IFREGCTL_REGW;
- bResult &= IFRFbWriteEmbedded(pDevice->PortOffset, dwMax7230Pwr);
+ bResult &= IFRFbWriteEmbedded(priv, dwMax7230Pwr);
break;
default:
@@ -1032,7 +901,7 @@ bool RFbRawSetPower(
*
* Parameters:
* In:
- * pDevice - The adapter to be translated
+ * priv - The adapter to be translated
* byCurrRSSI - RSSI to be translated
* Out:
* pdwdbm - Translated dbm number
@@ -1042,7 +911,7 @@ bool RFbRawSetPower(
-*/
void
RFvRSSITodBm(
- struct vnt_private *pDevice,
+ struct vnt_private *priv,
unsigned char byCurrRSSI,
long *pldBm
)
@@ -1052,10 +921,10 @@ RFvRSSITodBm(
long a = 0;
unsigned char abyAIROHARF[4] = {0, 18, 0, 40};
- switch (pDevice->byRFType) {
+ switch (priv->byRFType) {
case RF_AIROHA:
case RF_AL2230S:
- case RF_AIROHA7230: //RobertYu: 20040104
+ case RF_AIROHA7230:
a = abyAIROHARF[byIdx];
break;
default:
@@ -1065,42 +934,38 @@ RFvRSSITodBm(
*pldBm = -1 * (a + b * 2);
}
-////////////////////////////////////////////////////////////////////////////////
-//{{ RobertYu: 20050104
-
-// Post processing for the 11b/g and 11a.
-// for save time on changing Reg2,3,5,7,10,12,15
-bool RFbAL7230SelectChannelPostProcess(void __iomem *dwIoBase, unsigned char byOldChannel, unsigned char byNewChannel)
+/* Post processing for the 11b/g and 11a.
+ * for save time on changing Reg2,3,5,7,10,12,15 */
+bool RFbAL7230SelectChannelPostProcess(struct vnt_private *priv,
+ unsigned char byOldChannel,
+ unsigned char byNewChannel)
{
bool bResult;
bResult = true;
- // if change between 11 b/g and 11a need to update the following register
- // Channel Index 1~14
-
+ /* if change between 11 b/g and 11a need to update the following
+ * register
+ * Channel Index 1~14 */
if ((byOldChannel <= CB_MAX_CHANNEL_24G) && (byNewChannel > CB_MAX_CHANNEL_24G)) {
- // Change from 2.4G to 5G
- bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL7230InitTableAMode[2]); //Reg2
- bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL7230InitTableAMode[3]); //Reg3
- bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL7230InitTableAMode[5]); //Reg5
- bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL7230InitTableAMode[7]); //Reg7
- bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL7230InitTableAMode[10]);//Reg10
- bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL7230InitTableAMode[12]);//Reg12
- bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL7230InitTableAMode[15]);//Reg15
+ /* Change from 2.4G to 5G [Reg] */
+ bResult &= IFRFbWriteEmbedded(priv, dwAL7230InitTableAMode[2]);
+ bResult &= IFRFbWriteEmbedded(priv, dwAL7230InitTableAMode[3]);
+ bResult &= IFRFbWriteEmbedded(priv, dwAL7230InitTableAMode[5]);
+ bResult &= IFRFbWriteEmbedded(priv, dwAL7230InitTableAMode[7]);
+ bResult &= IFRFbWriteEmbedded(priv, dwAL7230InitTableAMode[10]);
+ bResult &= IFRFbWriteEmbedded(priv, dwAL7230InitTableAMode[12]);
+ bResult &= IFRFbWriteEmbedded(priv, dwAL7230InitTableAMode[15]);
} else if ((byOldChannel > CB_MAX_CHANNEL_24G) && (byNewChannel <= CB_MAX_CHANNEL_24G)) {
- // change from 5G to 2.4G
- bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL7230InitTable[2]); //Reg2
- bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL7230InitTable[3]); //Reg3
- bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL7230InitTable[5]); //Reg5
- bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL7230InitTable[7]); //Reg7
- bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL7230InitTable[10]);//Reg10
- bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL7230InitTable[12]);//Reg12
- bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL7230InitTable[15]);//Reg15
+ /* Change from 5G to 2.4G [Reg] */
+ bResult &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[2]);
+ bResult &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[3]);
+ bResult &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[5]);
+ bResult &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[7]);
+ bResult &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[10]);
+ bResult &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[12]);
+ bResult &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[15]);
}
return bResult;
}
-
-//}} RobertYu
-////////////////////////////////////////////////////////////////////////////////
diff --git a/drivers/staging/vt6655/rf.h b/drivers/staging/vt6655/rf.h
index be4ef88b766..8a6e2cfedaa 100644
--- a/drivers/staging/vt6655/rf.h
+++ b/drivers/staging/vt6655/rf.h
@@ -30,7 +30,6 @@
#ifndef __RF_H__
#define __RF_H__
-#include "ttype.h"
#include "device.h"
/*--------------------- Export Definitions -------------------------*/
@@ -74,12 +73,12 @@
/*--------------------- Export Functions --------------------------*/
-bool IFRFbWriteEmbedded(void __iomem *dwIoBase, unsigned long dwData);
-bool RFbSelectChannel(void __iomem *dwIoBase, unsigned char byRFType, unsigned char byChannel);
+bool IFRFbWriteEmbedded(struct vnt_private *, unsigned long dwData);
+bool RFbSelectChannel(struct vnt_private *, unsigned char byRFType, unsigned char byChannel);
bool RFbInit(
struct vnt_private *
);
-bool RFvWriteWakeProgSyn(void __iomem *dwIoBase, unsigned char byRFType, unsigned int uChannel);
+bool RFvWriteWakeProgSyn(struct vnt_private *, unsigned char byRFType, unsigned int uChannel);
bool RFbSetPower(struct vnt_private *, unsigned int uRATE, unsigned int uCH);
bool RFbRawSetPower(
struct vnt_private *,
@@ -95,7 +94,7 @@ RFvRSSITodBm(
);
//{{ RobertYu: 20050104
-bool RFbAL7230SelectChannelPostProcess(void __iomem *dwIoBase, unsigned char byOldChannel, unsigned char byNewChannel);
+bool RFbAL7230SelectChannelPostProcess(struct vnt_private *, unsigned char byOldChannel, unsigned char byNewChannel);
//}} RobertYu
#endif // __RF_H__
diff --git a/drivers/staging/vt6655/rxtx.c b/drivers/staging/vt6655/rxtx.c
index 7a183f55e7e..61c39dd7ad0 100644
--- a/drivers/staging/vt6655/rxtx.c
+++ b/drivers/staging/vt6655/rxtx.c
@@ -50,17 +50,9 @@
#include "device.h"
#include "rxtx.h"
-#include "tether.h"
#include "card.h"
-#include "bssdb.h"
#include "mac.h"
#include "baseband.h"
-#include "michael.h"
-#include "tkip.h"
-#include "tcrc.h"
-#include "wctl.h"
-#include "wroute.h"
-#include "hostap.h"
#include "rf.h"
/*--------------------- Static Definitions -------------------------*/
@@ -105,19 +97,6 @@ static const unsigned short wFB_Opt1[2][5] = {
#define DATADUR_A_F1 13
/*--------------------- Static Functions --------------------------*/
-
-static
-void
-s_vFillTxKey(
- struct vnt_private *pDevice,
- unsigned char *pbyBuf,
- unsigned char *pbyIVHead,
- PSKeyItem pTransmitKey,
- unsigned char *pbyHdrBuf,
- unsigned short wPayloadLen,
- unsigned char *pMICHDR
-);
-
static
void
s_vFillRTSHead(
@@ -127,7 +106,7 @@ s_vFillRTSHead(
unsigned int cbFrameLength,
bool bNeedAck,
bool bDisCRC,
- PSEthernetHeader psEthHeader,
+ struct ieee80211_hdr *hdr,
unsigned short wCurrentRate,
unsigned char byFBOption
);
@@ -144,26 +123,15 @@ s_vGenerateTxParameter(
unsigned int cbFrameSize,
bool bNeedACK,
unsigned int uDMAIdx,
- PSEthernetHeader psEthHeader,
+ void *psEthHeader,
unsigned short wCurrentRate
);
-static void s_vFillFragParameter(
- struct vnt_private *pDevice,
- unsigned char *pbyBuffer,
- unsigned int uTxType,
- void *pvtdCurr,
- unsigned short wFragType,
- unsigned int cbReqCount
-);
-
static unsigned int
s_cbFillTxBufHead(struct vnt_private *pDevice, unsigned char byPktType,
- unsigned char *pbyTxBufferAddr, unsigned int cbFrameBodySize,
+ unsigned char *pbyTxBufferAddr,
unsigned int uDMAIdx, PSTxDesc pHeadTD,
- PSEthernetHeader psEthHeader, unsigned char *pPacket,
- bool bNeedEncrypt, PSKeyItem pTransmitKey,
- unsigned int uNodeIndex, unsigned int *puMACfragNum);
+ unsigned int uNodeIndex);
static
__le16
@@ -178,165 +146,12 @@ s_uFillDataHead(
unsigned int cbLastFragmentSize,
unsigned int uMACfragNum,
unsigned char byFBOption,
- unsigned short wCurrentRate
+ unsigned short wCurrentRate,
+ bool is_pspoll
);
/*--------------------- Export Variables --------------------------*/
-static
-void
-s_vFillTxKey(
- struct vnt_private *pDevice,
- unsigned char *pbyBuf,
- unsigned char *pbyIVHead,
- PSKeyItem pTransmitKey,
- unsigned char *pbyHdrBuf,
- unsigned short wPayloadLen,
- unsigned char *pMICHDR
-)
-{
- struct vnt_mic_hdr *mic_hdr = (struct vnt_mic_hdr *)pMICHDR;
- unsigned long *pdwIV = (unsigned long *)pbyIVHead;
- unsigned long *pdwExtIV = (unsigned long *)((unsigned char *)pbyIVHead+4);
- PS802_11Header pMACHeader = (PS802_11Header)pbyHdrBuf;
- unsigned long dwRevIVCounter;
- unsigned char byKeyIndex = 0;
-
- //Fill TXKEY
- if (pTransmitKey == NULL)
- return;
-
- dwRevIVCounter = cpu_to_le32(pDevice->dwIVCounter);
- *pdwIV = pDevice->dwIVCounter;
- byKeyIndex = pTransmitKey->dwKeyIndex & 0xf;
-
- if (pTransmitKey->byCipherSuite == KEY_CTL_WEP) {
- if (pTransmitKey->uKeyLength == WLAN_WEP232_KEYLEN) {
- memcpy(pDevice->abyPRNG, (unsigned char *)&(dwRevIVCounter), 3);
- memcpy(pDevice->abyPRNG+3, pTransmitKey->abyKey, pTransmitKey->uKeyLength);
- } else {
- memcpy(pbyBuf, (unsigned char *)&(dwRevIVCounter), 3);
- memcpy(pbyBuf+3, pTransmitKey->abyKey, pTransmitKey->uKeyLength);
- if (pTransmitKey->uKeyLength == WLAN_WEP40_KEYLEN) {
- memcpy(pbyBuf+8, (unsigned char *)&(dwRevIVCounter), 3);
- memcpy(pbyBuf+11, pTransmitKey->abyKey, pTransmitKey->uKeyLength);
- }
- memcpy(pDevice->abyPRNG, pbyBuf, 16);
- }
- // Append IV after Mac Header
- *pdwIV &= WEP_IV_MASK;//00000000 11111111 11111111 11111111
- *pdwIV |= (unsigned long)byKeyIndex << 30;
- *pdwIV = cpu_to_le32(*pdwIV);
- pDevice->dwIVCounter++;
- if (pDevice->dwIVCounter > WEP_IV_MASK)
- pDevice->dwIVCounter = 0;
-
- } else if (pTransmitKey->byCipherSuite == KEY_CTL_TKIP) {
- pTransmitKey->wTSC15_0++;
- if (pTransmitKey->wTSC15_0 == 0)
- pTransmitKey->dwTSC47_16++;
-
- TKIPvMixKey(pTransmitKey->abyKey, pDevice->abyCurrentNetAddr,
- pTransmitKey->wTSC15_0, pTransmitKey->dwTSC47_16, pDevice->abyPRNG);
- memcpy(pbyBuf, pDevice->abyPRNG, 16);
- // Make IV
- memcpy(pdwIV, pDevice->abyPRNG, 3);
-
- *(pbyIVHead+3) = (unsigned char)(((byKeyIndex << 6) & 0xc0) | 0x20); // 0x20 is ExtIV
- // Append IV&ExtIV after Mac Header
- *pdwExtIV = cpu_to_le32(pTransmitKey->dwTSC47_16);
- pr_debug("vFillTxKey()---- pdwExtIV: %lx\n", *pdwExtIV);
-
- } else if (pTransmitKey->byCipherSuite == KEY_CTL_CCMP) {
- pTransmitKey->wTSC15_0++;
- if (pTransmitKey->wTSC15_0 == 0)
- pTransmitKey->dwTSC47_16++;
-
- memcpy(pbyBuf, pTransmitKey->abyKey, 16);
-
- // Make IV
- *pdwIV = 0;
- *(pbyIVHead+3) = (unsigned char)(((byKeyIndex << 6) & 0xc0) | 0x20); // 0x20 is ExtIV
- *pdwIV |= cpu_to_le16((unsigned short)(pTransmitKey->wTSC15_0));
- //Append IV&ExtIV after Mac Header
- *pdwExtIV = cpu_to_le32(pTransmitKey->dwTSC47_16);
-
- /* MICHDR0 */
- mic_hdr->id = 0x59;
- mic_hdr->tx_priority = 0;
- memcpy(mic_hdr->mic_addr2, pMACHeader->abyAddr2, ETH_ALEN);
-
- /* ccmp pn big endian order */
- mic_hdr->ccmp_pn[0] = (u8)(pTransmitKey->dwTSC47_16 >> 24);
- mic_hdr->ccmp_pn[1] = (u8)(pTransmitKey->dwTSC47_16 >> 16);
- mic_hdr->ccmp_pn[2] = (u8)(pTransmitKey->dwTSC47_16 >> 8);
- mic_hdr->ccmp_pn[3] = (u8)pTransmitKey->dwTSC47_16;
- mic_hdr->ccmp_pn[4] = (u8)(pTransmitKey->wTSC15_0 >> 8);
- mic_hdr->ccmp_pn[5] = (u8)pTransmitKey->wTSC15_0;
-
- /* MICHDR1 */
- mic_hdr->payload_len = cpu_to_be16(wPayloadLen);
-
- if (pDevice->bLongHeader)
- mic_hdr->hlen = cpu_to_be16(28);
- else
- mic_hdr->hlen = cpu_to_be16(22);
-
- memcpy(mic_hdr->addr1, pMACHeader->abyAddr1, ETH_ALEN);
- memcpy(mic_hdr->addr2, pMACHeader->abyAddr2, ETH_ALEN);
-
- /* MICHDR2 */
- memcpy(mic_hdr->addr3, pMACHeader->abyAddr3, ETH_ALEN);
- mic_hdr->frame_control =
- cpu_to_le16(pMACHeader->wFrameCtl & 0xc78f);
- mic_hdr->seq_ctrl = cpu_to_le16(pMACHeader->wSeqCtl & 0xf);
-
- if (pDevice->bLongHeader)
- memcpy(mic_hdr->addr4, pMACHeader->abyAddr4, ETH_ALEN);
- }
-}
-
-static
-void
-s_vSWencryption(
- struct vnt_private *pDevice,
- PSKeyItem pTransmitKey,
- unsigned char *pbyPayloadHead,
- unsigned short wPayloadSize
-)
-{
- unsigned int cbICVlen = 4;
- unsigned long dwICV = 0xFFFFFFFFL;
- unsigned long *pdwICV;
-
- if (pTransmitKey == NULL)
- return;
-
- if (pTransmitKey->byCipherSuite == KEY_CTL_WEP) {
- //=======================================================================
- // Append ICV after payload
- dwICV = CRCdwGetCrc32Ex(pbyPayloadHead, wPayloadSize, dwICV);//ICV(Payload)
- pdwICV = (unsigned long *)(pbyPayloadHead + wPayloadSize);
- // finally, we must invert dwCRC to get the correct answer
- *pdwICV = cpu_to_le32(~dwICV);
- // RC4 encryption
- rc4_init(&pDevice->SBox, pDevice->abyPRNG, pTransmitKey->uKeyLength + 3);
- rc4_encrypt(&pDevice->SBox, pbyPayloadHead, pbyPayloadHead, wPayloadSize+cbICVlen);
- //=======================================================================
- } else if (pTransmitKey->byCipherSuite == KEY_CTL_TKIP) {
- //=======================================================================
- //Append ICV after payload
- dwICV = CRCdwGetCrc32Ex(pbyPayloadHead, wPayloadSize, dwICV);//ICV(Payload)
- pdwICV = (unsigned long *)(pbyPayloadHead + wPayloadSize);
- // finally, we must invert dwCRC to get the correct answer
- *pdwICV = cpu_to_le32(~dwICV);
- // RC4 encryption
- rc4_init(&pDevice->SBox, pDevice->abyPRNG, TKIP_KEY_LEN);
- rc4_encrypt(&pDevice->SBox, pbyPayloadHead, pbyPayloadHead, wPayloadSize+cbICVlen);
- //=======================================================================
- }
-}
-
static __le16 vnt_time_stamp_off(struct vnt_private *priv, u16 rate)
{
return cpu_to_le16(wTimeStampOff[priv->byPreambleType % 2]
@@ -683,7 +498,8 @@ s_uFillDataHead(
unsigned int cbLastFragmentSize,
unsigned int uMACfragNum,
unsigned char byFBOption,
- unsigned short wCurrentRate
+ unsigned short wCurrentRate,
+ bool is_pspoll
)
{
@@ -702,15 +518,24 @@ s_uFillDataHead(
pDevice->byTopCCKBasicRate,
PK_TYPE_11B, &buf->b);
- /* Get Duration and TimeStamp */
- buf->duration_a = cpu_to_le16((u16)s_uGetDataDuration(pDevice, DATADUR_A, cbFrameLength,
- byPktType, wCurrentRate, bNeedAck, uFragIdx,
- cbLastFragmentSize, uMACfragNum,
- byFBOption));
- buf->duration_b = cpu_to_le16((u16)s_uGetDataDuration(pDevice, DATADUR_B, cbFrameLength,
- PK_TYPE_11B, pDevice->byTopCCKBasicRate,
- bNeedAck, uFragIdx, cbLastFragmentSize,
- uMACfragNum, byFBOption));
+ if (is_pspoll) {
+ __le16 dur = cpu_to_le16(pDevice->current_aid | BIT(14) | BIT(15));
+
+ buf->duration_a = dur;
+ buf->duration_b = dur;
+ } else {
+ /* Get Duration and TimeStamp */
+ buf->duration_a =
+ cpu_to_le16((u16)s_uGetDataDuration(pDevice, DATADUR_A, cbFrameLength,
+ byPktType, wCurrentRate, bNeedAck, uFragIdx,
+ cbLastFragmentSize, uMACfragNum,
+ byFBOption));
+ buf->duration_b =
+ cpu_to_le16((u16)s_uGetDataDuration(pDevice, DATADUR_B, cbFrameLength,
+ PK_TYPE_11B, pDevice->byTopCCKBasicRate,
+ bNeedAck, uFragIdx, cbLastFragmentSize,
+ uMACfragNum, byFBOption));
+ }
buf->time_stamp_off_a = vnt_time_stamp_off(pDevice, wCurrentRate);
buf->time_stamp_off_b = vnt_time_stamp_off(pDevice, pDevice->byTopCCKBasicRate);
@@ -764,11 +589,18 @@ s_uFillDataHead(
vnt_get_phy_field(pDevice, cbFrameLength, wCurrentRate,
byPktType, &buf->ab);
- /* Get Duration and TimeStampOff */
- buf->duration = cpu_to_le16((u16)s_uGetDataDuration(pDevice, DATADUR_A, cbFrameLength, byPktType,
+ if (is_pspoll) {
+ __le16 dur = cpu_to_le16(pDevice->current_aid | BIT(14) | BIT(15));
+
+ buf->duration = dur;
+ } else {
+ /* Get Duration and TimeStampOff */
+ buf->duration =
+ cpu_to_le16((u16)s_uGetDataDuration(pDevice, DATADUR_A, cbFrameLength, byPktType,
wCurrentRate, bNeedAck, uFragIdx,
cbLastFragmentSize, uMACfragNum,
byFBOption));
+ }
buf->time_stamp_off = vnt_time_stamp_off(pDevice, wCurrentRate);
return buf->duration;
@@ -778,17 +610,27 @@ s_uFillDataHead(
/* Get SignalField, ServiceField & Length */
vnt_get_phy_field(pDevice, cbFrameLength, wCurrentRate,
byPktType, &buf->ab);
- /* Get Duration and TimeStampOff */
- buf->duration = cpu_to_le16((u16)s_uGetDataDuration(pDevice, DATADUR_B, cbFrameLength, byPktType,
+
+ if (is_pspoll) {
+ __le16 dur = cpu_to_le16(pDevice->current_aid | BIT(14) | BIT(15));
+
+ buf->duration = dur;
+ } else {
+ /* Get Duration and TimeStampOff */
+ buf->duration =
+ cpu_to_le16((u16)s_uGetDataDuration(pDevice, DATADUR_B, cbFrameLength, byPktType,
wCurrentRate, bNeedAck, uFragIdx,
cbLastFragmentSize, uMACfragNum,
byFBOption));
+ }
+
buf->time_stamp_off = vnt_time_stamp_off(pDevice, wCurrentRate);
return buf->duration;
}
return 0;
}
+
static
void
s_vFillRTSHead(
@@ -798,7 +640,7 @@ s_vFillRTSHead(
unsigned int cbFrameLength,
bool bNeedAck,
bool bDisCRC,
- PSEthernetHeader psEthHeader,
+ struct ieee80211_hdr *hdr,
unsigned short wCurrentRate,
unsigned char byFBOption
)
@@ -850,18 +692,8 @@ s_vFillRTSHead(
cpu_to_le16(IEEE80211_FTYPE_CTL |
IEEE80211_STYPE_RTS);
-
- if ((pDevice->op_mode == NL80211_IFTYPE_ADHOC) ||
- (pDevice->op_mode == NL80211_IFTYPE_AP)) {
- memcpy(&buf->data.ra, psEthHeader->abyDstAddr, ETH_ALEN);
- } else {
- memcpy(&buf->data.ra, pDevice->abyBSSID, ETH_ALEN);
- }
- if (pDevice->op_mode == NL80211_IFTYPE_AP)
- memcpy(&buf->data.ta, pDevice->abyBSSID, ETH_ALEN);
- else
- memcpy(&buf->data.ta, psEthHeader->abySrcAddr, ETH_ALEN);
-
+ ether_addr_copy(buf->data.ra, hdr->addr1);
+ ether_addr_copy(buf->data.ta, hdr->addr2);
} else {
struct vnt_rts_g_fb *buf = pvRTS;
/* Get SignalField, ServiceField & Length */
@@ -914,19 +746,8 @@ s_vFillRTSHead(
cpu_to_le16(IEEE80211_FTYPE_CTL |
IEEE80211_STYPE_RTS);
-
- if ((pDevice->op_mode == NL80211_IFTYPE_ADHOC) ||
- (pDevice->op_mode == NL80211_IFTYPE_AP)) {
- memcpy(&buf->data.ra, psEthHeader->abyDstAddr, ETH_ALEN);
- } else {
- memcpy(&buf->data.ra, pDevice->abyBSSID, ETH_ALEN);
- }
-
- if (pDevice->op_mode == NL80211_IFTYPE_AP)
- memcpy(&buf->data.ta, pDevice->abyBSSID, ETH_ALEN);
- else
- memcpy(&buf->data.ta, psEthHeader->abySrcAddr, ETH_ALEN);
-
+ ether_addr_copy(buf->data.ra, hdr->addr1);
+ ether_addr_copy(buf->data.ta, hdr->addr2);
} // if (byFBOption == AUTO_FB_NONE)
} else if (byPktType == PK_TYPE_11A) {
if (byFBOption == AUTO_FB_NONE) {
@@ -947,19 +768,8 @@ s_vFillRTSHead(
cpu_to_le16(IEEE80211_FTYPE_CTL |
IEEE80211_STYPE_RTS);
-
- if ((pDevice->op_mode == NL80211_IFTYPE_ADHOC) ||
- (pDevice->op_mode == NL80211_IFTYPE_AP)) {
- memcpy(&buf->data.ra, psEthHeader->abyDstAddr, ETH_ALEN);
- } else {
- memcpy(&buf->data.ra, pDevice->abyBSSID, ETH_ALEN);
- }
-
- if (pDevice->op_mode == NL80211_IFTYPE_AP)
- memcpy(&buf->data.ta, pDevice->abyBSSID, ETH_ALEN);
- else
- memcpy(&buf->data.ta, psEthHeader->abySrcAddr, ETH_ALEN);
-
+ ether_addr_copy(buf->data.ra, hdr->addr1);
+ ether_addr_copy(buf->data.ta, hdr->addr2);
} else {
struct vnt_rts_a_fb *buf = pvRTS;
/* Get SignalField, ServiceField & Length */
@@ -988,16 +798,8 @@ s_vFillRTSHead(
cpu_to_le16(IEEE80211_FTYPE_CTL |
IEEE80211_STYPE_RTS);
- if ((pDevice->op_mode == NL80211_IFTYPE_ADHOC) ||
- (pDevice->op_mode == NL80211_IFTYPE_AP)) {
- memcpy(&buf->data.ra, psEthHeader->abyDstAddr, ETH_ALEN);
- } else {
- memcpy(&buf->data.ra, pDevice->abyBSSID, ETH_ALEN);
- }
- if (pDevice->op_mode == NL80211_IFTYPE_AP)
- memcpy(&buf->data.ta, pDevice->abyBSSID, ETH_ALEN);
- else
- memcpy(&buf->data.ta, psEthHeader->abySrcAddr, ETH_ALEN);
+ ether_addr_copy(buf->data.ra, hdr->addr1);
+ ether_addr_copy(buf->data.ta, hdr->addr2);
}
} else if (byPktType == PK_TYPE_11B) {
struct vnt_rts_ab *buf = pvRTS;
@@ -1016,17 +818,8 @@ s_vFillRTSHead(
buf->data.frame_control =
cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_RTS);
- if ((pDevice->op_mode == NL80211_IFTYPE_ADHOC) ||
- (pDevice->op_mode == NL80211_IFTYPE_AP)) {
- memcpy(&buf->data.ra, psEthHeader->abyDstAddr, ETH_ALEN);
- } else {
- memcpy(&buf->data.ra, pDevice->abyBSSID, ETH_ALEN);
- }
-
- if (pDevice->op_mode == NL80211_IFTYPE_AP)
- memcpy(&buf->data.ta, pDevice->abyBSSID, ETH_ALEN);
- else
- memcpy(&buf->data.ta, psEthHeader->abySrcAddr, ETH_ALEN);
+ ether_addr_copy(buf->data.ra, hdr->addr1);
+ ether_addr_copy(buf->data.ta, hdr->addr2);
}
}
@@ -1093,7 +886,8 @@ s_vFillCTSHead(
buf->reserved2 = 0x0;
- memcpy(&buf->data.ra, pDevice->abyCurrentNetAddr, ETH_ALEN);
+ ether_addr_copy(buf->data.ra,
+ pDevice->abyCurrentNetAddr);
} else { //if (byFBOption != AUTO_FB_NONE && uDMAIdx != TYPE_ATIMDMA && uDMAIdx != TYPE_BEACONDMA)
struct vnt_cts *buf = pvCTS;
/* Get SignalField, ServiceField & Length */
@@ -1116,7 +910,8 @@ s_vFillCTSHead(
IEEE80211_STYPE_CTS);
buf->reserved2 = 0x0;
- memcpy(&buf->data.ra, pDevice->abyCurrentNetAddr, ETH_ALEN);
+ ether_addr_copy(buf->data.ra,
+ pDevice->abyCurrentNetAddr);
}
}
}
@@ -1156,11 +951,10 @@ s_vGenerateTxParameter(
unsigned int cbFrameSize,
bool bNeedACK,
unsigned int uDMAIdx,
- PSEthernetHeader psEthHeader,
+ void *psEthHeader,
unsigned short wCurrentRate
)
{
- unsigned int cbMACHdLen = WLAN_HDR_ADDR3_LEN; //24
unsigned short wFifoCtl;
bool bDisCRC = false;
unsigned char byFBOption = AUTO_FB_NONE;
@@ -1178,9 +972,6 @@ s_vGenerateTxParameter(
else if (wFifoCtl & FIFOCTL_AUTO_FB_1)
byFBOption = AUTO_FB_1;
- if (pDevice->bLongHeader)
- cbMACHdLen = WLAN_HDR_ADDR3_LEN + 6;
-
if (!pvRrvTime)
return;
@@ -1237,90 +1028,30 @@ s_vGenerateTxParameter(
}
}
-static
-void
-s_vFillFragParameter(
- struct vnt_private *pDevice,
- unsigned char *pbyBuffer,
- unsigned int uTxType,
- void *pvtdCurr,
- unsigned short wFragType,
- unsigned int cbReqCount
-)
-{
- PSTxBufHead pTxBufHead = (PSTxBufHead) pbyBuffer;
-
- if (uTxType == TYPE_SYNCDMA) {
- PSTxSyncDesc ptdCurr = (PSTxSyncDesc)pvtdCurr;
-
- //Set FIFOCtl & TimeStamp in TxSyncDesc
- ptdCurr->m_wFIFOCtl = pTxBufHead->wFIFOCtl;
- ptdCurr->m_wTimeStamp = pTxBufHead->wTimeStamp;
- //Set TSR1 & ReqCount in TxDescHead
- ptdCurr->m_td1TD1.wReqCount = cpu_to_le16((unsigned short)(cbReqCount));
- if (wFragType == FRAGCTL_ENDFRAG) //Last Fragmentation
- ptdCurr->m_td1TD1.byTCR |= (TCR_STP | TCR_EDP | EDMSDU);
- else
- ptdCurr->m_td1TD1.byTCR |= (TCR_STP | TCR_EDP);
- } else {
- PSTxDesc ptdCurr = (PSTxDesc)pvtdCurr;
- //Set TSR1 & ReqCount in TxDescHead
- ptdCurr->m_td1TD1.wReqCount = cpu_to_le16((unsigned short)(cbReqCount));
- if (wFragType == FRAGCTL_ENDFRAG) //Last Fragmentation
- ptdCurr->m_td1TD1.byTCR |= (TCR_STP | TCR_EDP | EDMSDU);
- else
- ptdCurr->m_td1TD1.byTCR |= (TCR_STP | TCR_EDP);
- }
-
- pTxBufHead->wFragCtl |= (unsigned short)wFragType;//0x0001; //0000 0000 0000 0001
-}
-
static unsigned int
s_cbFillTxBufHead(struct vnt_private *pDevice, unsigned char byPktType,
- unsigned char *pbyTxBufferAddr, unsigned int cbFrameBodySize,
+ unsigned char *pbyTxBufferAddr,
unsigned int uDMAIdx, PSTxDesc pHeadTD,
- PSEthernetHeader psEthHeader, unsigned char *pPacket,
- bool bNeedEncrypt, PSKeyItem pTransmitKey,
- unsigned int uNodeIndex, unsigned int *puMACfragNum)
+ unsigned int is_pspoll)
{
- unsigned int cbMACHdLen;
+ PDEVICE_TD_INFO td_info = pHeadTD->pTDInfo;
+ struct sk_buff *skb = td_info->skb;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct vnt_tx_fifo_head *tx_buffer_head =
+ (struct vnt_tx_fifo_head *)td_info->buf;
+ u16 fifo_ctl = le16_to_cpu(tx_buffer_head->fifo_ctl);
unsigned int cbFrameSize;
- unsigned int cbFragmentSize; //Hdr+(IV)+payoad+(MIC)+(ICV)+FCS
- unsigned int cbFragPayloadSize;
- unsigned int cbLastFragmentSize; //Hdr+(IV)+payoad+(MIC)+(ICV)+FCS
- unsigned int cbLastFragPayloadSize;
- unsigned int uFragIdx;
- unsigned char *pbyPayloadHead;
- unsigned char *pbyIVHead;
- unsigned char *pbyMacHdr;
- unsigned short wFragType; //00:Non-Frag, 01:Start, 10:Mid, 11:Last
__le16 uDuration;
unsigned char *pbyBuffer;
- unsigned int cbIVlen = 0;
- unsigned int cbICVlen = 0;
- unsigned int cbMIClen = 0;
- unsigned int cbFCSlen = 4;
- unsigned int cb802_1_H_len = 0;
unsigned int uLength = 0;
- unsigned int uTmpLen = 0;
unsigned int cbMICHDR = 0;
- u32 dwMICKey0, dwMICKey1;
- u32 dwMIC_Priority;
- u32 *pdwMIC_L;
- u32 *pdwMIC_R;
- u32 dwSafeMIC_L, dwSafeMIC_R; /* Fix "Last Frag Size" < "MIC length". */
- bool bMIC2Frag = false;
- unsigned int uMICFragLen = 0;
unsigned int uMACfragNum = 1;
unsigned int uPadding = 0;
unsigned int cbReqCount = 0;
-
- bool bNeedACK;
- bool bRTS;
- bool bIsAdhoc;
- unsigned char *pbyType;
+ bool bNeedACK = (bool)(fifo_ctl & FIFOCTL_NEEDACK);
+ bool bRTS = (bool)(fifo_ctl & FIFOCTL_RTS);
PSTxDesc ptdCurr;
- PSTxBufHead psTxBufHd = (PSTxBufHead) pbyTxBufferAddr;
unsigned int cbHeaderLength = 0;
void *pvRrvTime;
struct vnt_mic_hdr *pMICHDR;
@@ -1328,72 +1059,35 @@ s_cbFillTxBufHead(struct vnt_private *pDevice, unsigned char byPktType,
void *pvCTS;
void *pvTxDataHd;
unsigned short wTxBufSize; // FFinfo size
- unsigned int uTotalCopyLength = 0;
unsigned char byFBOption = AUTO_FB_NONE;
- bool bIsWEP256 = false;
- PSMgmtObject pMgmt = pDevice->pMgmt;
pvRrvTime = pMICHDR = pvRTS = pvCTS = pvTxDataHd = NULL;
- if ((pDevice->op_mode == NL80211_IFTYPE_ADHOC) ||
- (pDevice->op_mode == NL80211_IFTYPE_AP)) {
- if (is_multicast_ether_addr(&(psEthHeader->abyDstAddr[0])))
- bNeedACK = false;
- else
- bNeedACK = true;
- bIsAdhoc = true;
- } else {
- // MSDUs in Infra mode always need ACK
- bNeedACK = true;
- bIsAdhoc = false;
- }
+ cbFrameSize = skb->len + 4;
- if (pDevice->bLongHeader)
- cbMACHdLen = WLAN_HDR_ADDR3_LEN + 6;
- else
- cbMACHdLen = WLAN_HDR_ADDR3_LEN;
-
- if ((bNeedEncrypt == true) && (pTransmitKey != NULL)) {
- if (pTransmitKey->byCipherSuite == KEY_CTL_WEP) {
- cbIVlen = 4;
- cbICVlen = 4;
- if (pTransmitKey->uKeyLength == WLAN_WEP232_KEYLEN)
- bIsWEP256 = true;
- }
- if (pTransmitKey->byCipherSuite == KEY_CTL_TKIP) {
- cbIVlen = 8;//IV+ExtIV
- cbMIClen = 8;
- cbICVlen = 4;
- }
- if (pTransmitKey->byCipherSuite == KEY_CTL_CCMP) {
- cbIVlen = 8;//RSN Header
- cbICVlen = 8;//MIC
+ if (info->control.hw_key) {
+ switch (info->control.hw_key->cipher) {
+ case WLAN_CIPHER_SUITE_CCMP:
cbMICHDR = sizeof(struct vnt_mic_hdr);
+ default:
+ break;
}
+
+ cbFrameSize += info->control.hw_key->icv_len;
+
if (pDevice->byLocalID > REV_ID_VT3253_A1) {
//MAC Header should be padding 0 to DW alignment.
- uPadding = 4 - (cbMACHdLen%4);
+ uPadding = 4 - (ieee80211_get_hdrlen_from_skb(skb) % 4);
uPadding %= 4;
}
}
- cbFrameSize = cbMACHdLen + cbIVlen + (cbFrameBodySize + cbMIClen) + cbICVlen + cbFCSlen;
-
- if ((bNeedACK == false) ||
- (cbFrameSize < pDevice->wRTSThreshold) ||
- ((cbFrameSize >= pDevice->wFragmentationThreshold) && (pDevice->wFragmentationThreshold <= pDevice->wRTSThreshold))
-) {
- bRTS = false;
- } else {
- bRTS = true;
- psTxBufHd->wFIFOCtl |= (FIFOCTL_RTS | FIFOCTL_LRETRY);
- }
//
// Use for AUTO FALL BACK
//
- if (psTxBufHd->wFIFOCtl & FIFOCTL_AUTO_FB_0)
+ if (fifo_ctl & FIFOCTL_AUTO_FB_0)
byFBOption = AUTO_FB_0;
- else if (psTxBufHd->wFIFOCtl & FIFOCTL_AUTO_FB_1)
+ else if (fifo_ctl & FIFOCTL_AUTO_FB_1)
byFBOption = AUTO_FB_1;
//////////////////////////////////////////////////////
@@ -1487,1477 +1181,345 @@ s_cbFillTxBufHead(struct vnt_private *pDevice, unsigned char byPktType,
}
} // Auto Fall Back
}
- memset((void *)(pbyTxBufferAddr + wTxBufSize), 0, (cbHeaderLength - wTxBufSize));
-
-//////////////////////////////////////////////////////////////////
- if ((bNeedEncrypt == true) && (pTransmitKey != NULL) && (pTransmitKey->byCipherSuite == KEY_CTL_TKIP)) {
- if (pDevice->pMgmt->eAuthenMode == WMAC_AUTH_WPANONE) {
- dwMICKey0 = *(u32 *)(&pTransmitKey->abyKey[16]);
- dwMICKey1 = *(u32 *)(&pTransmitKey->abyKey[20]);
- } else if ((pTransmitKey->dwKeyIndex & AUTHENTICATOR_KEY) != 0) {
- dwMICKey0 = *(u32 *)(&pTransmitKey->abyKey[16]);
- dwMICKey1 = *(u32 *)(&pTransmitKey->abyKey[20]);
- } else {
- dwMICKey0 = *(u32 *)(&pTransmitKey->abyKey[24]);
- dwMICKey1 = *(u32 *)(&pTransmitKey->abyKey[28]);
- }
- // DO Software Michael
- MIC_vInit(dwMICKey0, dwMICKey1);
- MIC_vAppend((unsigned char *)&(psEthHeader->abyDstAddr[0]), 12);
- dwMIC_Priority = 0;
- MIC_vAppend((unsigned char *)&dwMIC_Priority, 4);
- pr_debug("MIC KEY: %X, %X\n", dwMICKey0, dwMICKey1);
- }
-
-///////////////////////////////////////////////////////////////////
-
- pbyMacHdr = (unsigned char *)(pbyTxBufferAddr + cbHeaderLength);
- pbyPayloadHead = (unsigned char *)(pbyMacHdr + cbMACHdLen + uPadding + cbIVlen);
- pbyIVHead = (unsigned char *)(pbyMacHdr + cbMACHdLen + uPadding);
-
- if ((cbFrameSize > pDevice->wFragmentationThreshold) && (bNeedACK == true) && (bIsWEP256 == false)) {
- // Fragmentation
- // FragThreshold = Fragment size(Hdr+(IV)+fragment payload+(MIC)+(ICV)+FCS)
- cbFragmentSize = pDevice->wFragmentationThreshold;
- cbFragPayloadSize = cbFragmentSize - cbMACHdLen - cbIVlen - cbICVlen - cbFCSlen;
- //FragNum = (FrameSize-(Hdr+FCS))/(Fragment Size -(Hrd+FCS)))
- uMACfragNum = (unsigned short) ((cbFrameBodySize + cbMIClen) / cbFragPayloadSize);
- cbLastFragPayloadSize = (cbFrameBodySize + cbMIClen) % cbFragPayloadSize;
- if (cbLastFragPayloadSize == 0)
- cbLastFragPayloadSize = cbFragPayloadSize;
- else
- uMACfragNum++;
-
- //[Hdr+(IV)+last fragment payload+(MIC)+(ICV)+FCS]
- cbLastFragmentSize = cbMACHdLen + cbLastFragPayloadSize + cbIVlen + cbICVlen + cbFCSlen;
-
- for (uFragIdx = 0; uFragIdx < uMACfragNum; uFragIdx++) {
- if (uFragIdx == 0) {
- //=========================
- // Start Fragmentation
- //=========================
- pr_debug("Start Fragmentation...\n");
- wFragType = FRAGCTL_STAFRAG;
-
- //Fill FIFO,RrvTime,RTS,and CTS
- s_vGenerateTxParameter(pDevice, byPktType, (void *)psTxBufHd, pvRrvTime, pvRTS, pvCTS,
- cbFragmentSize, bNeedACK, uDMAIdx, psEthHeader, pDevice->wCurrentRate);
- //Fill DataHead
- uDuration = s_uFillDataHead(pDevice, byPktType, pvTxDataHd, cbFragmentSize, uDMAIdx, bNeedACK,
- uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption, pDevice->wCurrentRate);
- // Generate TX MAC Header
- vGenerateMACHeader(pDevice, pbyMacHdr, uDuration, psEthHeader, bNeedEncrypt,
- wFragType, uDMAIdx, uFragIdx);
-
- if (bNeedEncrypt == true) {
- //Fill TXKEY
- s_vFillTxKey(pDevice, (unsigned char *)(psTxBufHd->adwTxKey), pbyIVHead, pTransmitKey,
- pbyMacHdr, (unsigned short)cbFragPayloadSize, (unsigned char *)pMICHDR);
- //Fill IV(ExtIV,RSNHDR)
- if (pDevice->bEnableHostWEP) {
- pMgmt->sNodeDBTable[uNodeIndex].dwTSC47_16 = pTransmitKey->dwTSC47_16;
- pMgmt->sNodeDBTable[uNodeIndex].wTSC15_0 = pTransmitKey->wTSC15_0;
- }
- }
-
- // 802.1H
- if (ntohs(psEthHeader->wType) > ETH_DATA_LEN) {
- if ((psEthHeader->wType == TYPE_PKT_IPX) ||
- (psEthHeader->wType == cpu_to_le16(0xF380))) {
- memcpy((unsigned char *)(pbyPayloadHead), &pDevice->abySNAP_Bridgetunnel[0], 6);
- } else {
- memcpy((unsigned char *)(pbyPayloadHead), &pDevice->abySNAP_RFC1042[0], 6);
- }
- pbyType = (unsigned char *)(pbyPayloadHead + 6);
- memcpy(pbyType, &(psEthHeader->wType), sizeof(unsigned short));
- cb802_1_H_len = 8;
- }
-
- cbReqCount = cbHeaderLength + cbMACHdLen + uPadding + cbIVlen + cbFragPayloadSize;
- //---------------------------
- // S/W or H/W Encryption
- //---------------------------
- pbyBuffer = (unsigned char *)pHeadTD->pTDInfo->buf;
-
- uLength = cbHeaderLength + cbMACHdLen + uPadding + cbIVlen + cb802_1_H_len;
- //copy TxBufferHeader + MacHeader to desc
- memcpy(pbyBuffer, (void *)psTxBufHd, uLength);
-
- // Copy the Packet into a tx Buffer
- memcpy((pbyBuffer + uLength), (pPacket + 14), (cbFragPayloadSize - cb802_1_H_len));
-
- uTotalCopyLength += cbFragPayloadSize - cb802_1_H_len;
-
- if ((bNeedEncrypt == true) && (pTransmitKey != NULL) && (pTransmitKey->byCipherSuite == KEY_CTL_TKIP)) {
- pr_debug("Start MIC: %d\n",
- cbFragPayloadSize);
- MIC_vAppend((pbyBuffer + uLength - cb802_1_H_len), cbFragPayloadSize);
-
- }
-
- //---------------------------
- // S/W Encryption
- //---------------------------
- if ((pDevice->byLocalID <= REV_ID_VT3253_A1)) {
- if (bNeedEncrypt) {
- s_vSWencryption(pDevice, pTransmitKey, (pbyBuffer + uLength - cb802_1_H_len), (unsigned short)cbFragPayloadSize);
- cbReqCount += cbICVlen;
- }
- }
-
- ptdCurr = (PSTxDesc)pHeadTD;
- //--------------------
- //1.Set TSR1 & ReqCount in TxDescHead
- //2.Set FragCtl in TxBufferHead
- //3.Set Frame Control
- //4.Set Sequence Control
- //5.Get S/W generate FCS
- //--------------------
- s_vFillFragParameter(pDevice, pbyBuffer, uDMAIdx, (void *)ptdCurr, wFragType, cbReqCount);
-
- ptdCurr->pTDInfo->dwReqCount = cbReqCount - uPadding;
- ptdCurr->pTDInfo->dwHeaderLength = cbHeaderLength;
- ptdCurr->pTDInfo->skb_dma = ptdCurr->pTDInfo->buf_dma;
- ptdCurr->buff_addr = cpu_to_le32(ptdCurr->pTDInfo->skb_dma);
- pDevice->iTDUsed[uDMAIdx]++;
- pHeadTD = ptdCurr->next;
- } else if (uFragIdx == (uMACfragNum-1)) {
- //=========================
- // Last Fragmentation
- //=========================
- pr_debug("Last Fragmentation...\n");
-
- wFragType = FRAGCTL_ENDFRAG;
-
- //Fill FIFO,RrvTime,RTS,and CTS
- s_vGenerateTxParameter(pDevice, byPktType, (void *)psTxBufHd, pvRrvTime, pvRTS, pvCTS,
- cbLastFragmentSize, bNeedACK, uDMAIdx, psEthHeader, pDevice->wCurrentRate);
- //Fill DataHead
- uDuration = s_uFillDataHead(pDevice, byPktType, pvTxDataHd, cbLastFragmentSize, uDMAIdx, bNeedACK,
- uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption, pDevice->wCurrentRate);
-
- // Generate TX MAC Header
- vGenerateMACHeader(pDevice, pbyMacHdr, uDuration, psEthHeader, bNeedEncrypt,
- wFragType, uDMAIdx, uFragIdx);
-
- if (bNeedEncrypt == true) {
- //Fill TXKEY
- s_vFillTxKey(pDevice, (unsigned char *)(psTxBufHd->adwTxKey), pbyIVHead, pTransmitKey,
- pbyMacHdr, (unsigned short)cbLastFragPayloadSize, (unsigned char *)pMICHDR);
-
- if (pDevice->bEnableHostWEP) {
- pMgmt->sNodeDBTable[uNodeIndex].dwTSC47_16 = pTransmitKey->dwTSC47_16;
- pMgmt->sNodeDBTable[uNodeIndex].wTSC15_0 = pTransmitKey->wTSC15_0;
- }
-
- }
-
- cbReqCount = cbHeaderLength + cbMACHdLen + uPadding + cbIVlen + cbLastFragPayloadSize;
- //---------------------------
- // S/W or H/W Encryption
- //---------------------------
-
- pbyBuffer = (unsigned char *)pHeadTD->pTDInfo->buf;
-
- uLength = cbHeaderLength + cbMACHdLen + uPadding + cbIVlen;
-
- //copy TxBufferHeader + MacHeader to desc
- memcpy(pbyBuffer, (void *)psTxBufHd, uLength);
-
- // Copy the Packet into a tx Buffer
- if (bMIC2Frag == false) {
- memcpy((pbyBuffer + uLength),
- (pPacket + 14 + uTotalCopyLength),
- (cbLastFragPayloadSize - cbMIClen)
-);
- //TODO check uTmpLen !
- uTmpLen = cbLastFragPayloadSize - cbMIClen;
-
- }
- if ((bNeedEncrypt == true) && (pTransmitKey != NULL) && (pTransmitKey->byCipherSuite == KEY_CTL_TKIP)) {
- pr_debug("LAST: uMICFragLen:%d, cbLastFragPayloadSize:%d, uTmpLen:%d\n",
- uMICFragLen,
- cbLastFragPayloadSize,
- uTmpLen);
-
- if (bMIC2Frag == false) {
- if (uTmpLen != 0)
- MIC_vAppend((pbyBuffer + uLength), uTmpLen);
- pdwMIC_L = (u32 *)(pbyBuffer + uLength + uTmpLen);
- pdwMIC_R = (u32 *)(pbyBuffer + uLength + uTmpLen + 4);
- MIC_vGetMIC(pdwMIC_L, pdwMIC_R);
- pr_debug("Last MIC:%X, %X\n",
- *pdwMIC_L, *pdwMIC_R);
- } else {
- if (uMICFragLen >= 4) {
- memcpy((pbyBuffer + uLength), ((unsigned char *)&dwSafeMIC_R + (uMICFragLen - 4)),
- (cbMIClen - uMICFragLen));
- pr_debug("LAST: uMICFragLen >= 4: %X, %d\n",
- *(unsigned char *)((unsigned char *)&dwSafeMIC_R + (uMICFragLen - 4)),
- (cbMIClen - uMICFragLen));
-
- } else {
- memcpy((pbyBuffer + uLength), ((unsigned char *)&dwSafeMIC_L + uMICFragLen),
- (4 - uMICFragLen));
- memcpy((pbyBuffer + uLength + (4 - uMICFragLen)), &dwSafeMIC_R, 4);
- pr_debug("LAST: uMICFragLen < 4: %X, %d\n",
- *(unsigned char *)((unsigned char *)&dwSafeMIC_R + uMICFragLen - 4),
- (cbMIClen - uMICFragLen));
- }
- }
- MIC_vUnInit();
- } else {
- ASSERT(uTmpLen == (cbLastFragPayloadSize - cbMIClen));
- }
-
- //---------------------------
- // S/W Encryption
- //---------------------------
- if ((pDevice->byLocalID <= REV_ID_VT3253_A1)) {
- if (bNeedEncrypt) {
- s_vSWencryption(pDevice, pTransmitKey, (pbyBuffer + uLength), (unsigned short)cbLastFragPayloadSize);
- cbReqCount += cbICVlen;
- }
- }
-
- ptdCurr = (PSTxDesc)pHeadTD;
-
- //--------------------
- //1.Set TSR1 & ReqCount in TxDescHead
- //2.Set FragCtl in TxBufferHead
- //3.Set Frame Control
- //4.Set Sequence Control
- //5.Get S/W generate FCS
- //--------------------
-
- s_vFillFragParameter(pDevice, pbyBuffer, uDMAIdx, (void *)ptdCurr, wFragType, cbReqCount);
-
- ptdCurr->pTDInfo->dwReqCount = cbReqCount - uPadding;
- ptdCurr->pTDInfo->dwHeaderLength = cbHeaderLength;
- ptdCurr->pTDInfo->skb_dma = ptdCurr->pTDInfo->buf_dma;
- ptdCurr->buff_addr = cpu_to_le32(ptdCurr->pTDInfo->skb_dma);
- pDevice->iTDUsed[uDMAIdx]++;
- pHeadTD = ptdCurr->next;
-
- } else {
- //=========================
- // Middle Fragmentation
- //=========================
- pr_debug("Middle Fragmentation...\n");
-
- wFragType = FRAGCTL_MIDFRAG;
-
- //Fill FIFO,RrvTime,RTS,and CTS
- s_vGenerateTxParameter(pDevice, byPktType, (void *)psTxBufHd, pvRrvTime, pvRTS, pvCTS,
- cbFragmentSize, bNeedACK, uDMAIdx, psEthHeader, pDevice->wCurrentRate);
- //Fill DataHead
- uDuration = s_uFillDataHead(pDevice, byPktType, pvTxDataHd, cbFragmentSize, uDMAIdx, bNeedACK,
- uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption, pDevice->wCurrentRate);
-
- // Generate TX MAC Header
- vGenerateMACHeader(pDevice, pbyMacHdr, uDuration, psEthHeader, bNeedEncrypt,
- wFragType, uDMAIdx, uFragIdx);
-
- if (bNeedEncrypt == true) {
- //Fill TXKEY
- s_vFillTxKey(pDevice, (unsigned char *)(psTxBufHd->adwTxKey), pbyIVHead, pTransmitKey,
- pbyMacHdr, (unsigned short)cbFragPayloadSize, (unsigned char *)pMICHDR);
-
- if (pDevice->bEnableHostWEP) {
- pMgmt->sNodeDBTable[uNodeIndex].dwTSC47_16 = pTransmitKey->dwTSC47_16;
- pMgmt->sNodeDBTable[uNodeIndex].wTSC15_0 = pTransmitKey->wTSC15_0;
- }
- }
-
- cbReqCount = cbHeaderLength + cbMACHdLen + uPadding + cbIVlen + cbFragPayloadSize;
- //---------------------------
- // S/W or H/W Encryption
- //---------------------------
-
- pbyBuffer = (unsigned char *)pHeadTD->pTDInfo->buf;
- uLength = cbHeaderLength + cbMACHdLen + uPadding + cbIVlen;
-
- //copy TxBufferHeader + MacHeader to desc
- memcpy(pbyBuffer, (void *)psTxBufHd, uLength);
-
- // Copy the Packet into a tx Buffer
- memcpy((pbyBuffer + uLength),
- (pPacket + 14 + uTotalCopyLength),
- cbFragPayloadSize
-);
- uTmpLen = cbFragPayloadSize;
-
- uTotalCopyLength += uTmpLen;
-
- if ((bNeedEncrypt == true) && (pTransmitKey != NULL) && (pTransmitKey->byCipherSuite == KEY_CTL_TKIP)) {
- MIC_vAppend((pbyBuffer + uLength), uTmpLen);
-
- if (uTmpLen < cbFragPayloadSize) {
- bMIC2Frag = true;
- uMICFragLen = cbFragPayloadSize - uTmpLen;
- ASSERT(uMICFragLen < cbMIClen);
-
- pdwMIC_L = (u32 *)(pbyBuffer + uLength + uTmpLen);
- pdwMIC_R = (u32 *)(pbyBuffer + uLength + uTmpLen + 4);
- MIC_vGetMIC(pdwMIC_L, pdwMIC_R);
- dwSafeMIC_L = *pdwMIC_L;
- dwSafeMIC_R = *pdwMIC_R;
-
- pr_debug("MIDDLE: uMICFragLen:%d, cbFragPayloadSize:%d, uTmpLen:%d\n",
- uMICFragLen,
- cbFragPayloadSize,
- uTmpLen);
- pr_debug("Fill MIC in Middle frag [%d]\n",
- uMICFragLen);
- pr_debug("Get MIC:%X, %X\n",
- *pdwMIC_L, *pdwMIC_R);
- }
- pr_debug("Middle frag len: %d\n",
- uTmpLen);
-
- } else {
- ASSERT(uTmpLen == (cbFragPayloadSize));
- }
-
- if ((pDevice->byLocalID <= REV_ID_VT3253_A1)) {
- if (bNeedEncrypt) {
- s_vSWencryption(pDevice, pTransmitKey, (pbyBuffer + uLength), (unsigned short)cbFragPayloadSize);
- cbReqCount += cbICVlen;
- }
- }
-
- ptdCurr = (PSTxDesc)pHeadTD;
-
- //--------------------
- //1.Set TSR1 & ReqCount in TxDescHead
- //2.Set FragCtl in TxBufferHead
- //3.Set Frame Control
- //4.Set Sequence Control
- //5.Get S/W generate FCS
- //--------------------
-
- s_vFillFragParameter(pDevice, pbyBuffer, uDMAIdx, (void *)ptdCurr, wFragType, cbReqCount);
-
- ptdCurr->pTDInfo->dwReqCount = cbReqCount - uPadding;
- ptdCurr->pTDInfo->dwHeaderLength = cbHeaderLength;
- ptdCurr->pTDInfo->skb_dma = ptdCurr->pTDInfo->buf_dma;
- ptdCurr->buff_addr = cpu_to_le32(ptdCurr->pTDInfo->skb_dma);
- pDevice->iTDUsed[uDMAIdx]++;
- pHeadTD = ptdCurr->next;
- }
- } // for (uMACfragNum)
- } else {
- //=========================
- // No Fragmentation
- //=========================
- wFragType = FRAGCTL_NONFRAG;
-
- //Set FragCtl in TxBufferHead
- psTxBufHd->wFragCtl |= (unsigned short)wFragType;
-
- //Fill FIFO,RrvTime,RTS,and CTS
- s_vGenerateTxParameter(pDevice, byPktType, (void *)psTxBufHd, pvRrvTime, pvRTS, pvCTS,
- cbFrameSize, bNeedACK, uDMAIdx, psEthHeader, pDevice->wCurrentRate);
- //Fill DataHead
- uDuration = s_uFillDataHead(pDevice, byPktType, pvTxDataHd, cbFrameSize, uDMAIdx, bNeedACK,
- 0, 0, uMACfragNum, byFBOption, pDevice->wCurrentRate);
-
- // Generate TX MAC Header
- vGenerateMACHeader(pDevice, pbyMacHdr, uDuration, psEthHeader, bNeedEncrypt,
- wFragType, uDMAIdx, 0);
-
- if (bNeedEncrypt == true) {
- //Fill TXKEY
- s_vFillTxKey(pDevice, (unsigned char *)(psTxBufHd->adwTxKey), pbyIVHead, pTransmitKey,
- pbyMacHdr, (unsigned short)cbFrameBodySize, (unsigned char *)pMICHDR);
-
- if (pDevice->bEnableHostWEP) {
- pMgmt->sNodeDBTable[uNodeIndex].dwTSC47_16 = pTransmitKey->dwTSC47_16;
- pMgmt->sNodeDBTable[uNodeIndex].wTSC15_0 = pTransmitKey->wTSC15_0;
- }
- }
-
- // 802.1H
- if (ntohs(psEthHeader->wType) > ETH_DATA_LEN) {
- if ((psEthHeader->wType == TYPE_PKT_IPX) ||
- (psEthHeader->wType == cpu_to_le16(0xF380))) {
- memcpy((unsigned char *)(pbyPayloadHead), &pDevice->abySNAP_Bridgetunnel[0], 6);
- } else {
- memcpy((unsigned char *)(pbyPayloadHead), &pDevice->abySNAP_RFC1042[0], 6);
- }
- pbyType = (unsigned char *)(pbyPayloadHead + 6);
- memcpy(pbyType, &(psEthHeader->wType), sizeof(unsigned short));
- cb802_1_H_len = 8;
- }
-
- cbReqCount = cbHeaderLength + cbMACHdLen + uPadding + cbIVlen + (cbFrameBodySize + cbMIClen);
- //---------------------------
- // S/W or H/W Encryption
- //---------------------------
- pbyBuffer = (unsigned char *)pHeadTD->pTDInfo->buf;
- uLength = cbHeaderLength + cbMACHdLen + uPadding + cbIVlen + cb802_1_H_len;
- //copy TxBufferHeader + MacHeader to desc
- memcpy(pbyBuffer, (void *)psTxBufHd, uLength);
+ td_info->mic_hdr = pMICHDR;
- // Copy the Packet into a tx Buffer
- memcpy((pbyBuffer + uLength),
- (pPacket + 14),
- cbFrameBodySize - cb802_1_H_len
-);
-
- if ((bNeedEncrypt == true) && (pTransmitKey != NULL) && (pTransmitKey->byCipherSuite == KEY_CTL_TKIP)) {
- pr_debug("Length:%d, %d\n",
- cbFrameBodySize - cb802_1_H_len, uLength);
-
- MIC_vAppend((pbyBuffer + uLength - cb802_1_H_len), cbFrameBodySize);
-
- pdwMIC_L = (u32 *)(pbyBuffer + uLength - cb802_1_H_len + cbFrameBodySize);
- pdwMIC_R = (u32 *)(pbyBuffer + uLength - cb802_1_H_len + cbFrameBodySize + 4);
-
- MIC_vGetMIC(pdwMIC_L, pdwMIC_R);
- MIC_vUnInit();
-
- if (pDevice->bTxMICFail == true) {
- *pdwMIC_L = 0;
- *pdwMIC_R = 0;
- pDevice->bTxMICFail = false;
- }
+ memset((void *)(pbyTxBufferAddr + wTxBufSize), 0, (cbHeaderLength - wTxBufSize));
- pr_debug("uLength: %d, %d\n", uLength, cbFrameBodySize);
- pr_debug("cbReqCount:%d, %d, %d, %d\n",
- cbReqCount, cbHeaderLength, uPadding, cbIVlen);
- pr_debug("MIC:%x, %x\n", *pdwMIC_L, *pdwMIC_R);
+ /* Fill FIFO,RrvTime,RTS,and CTS */
+ s_vGenerateTxParameter(pDevice, byPktType, tx_buffer_head, pvRrvTime, pvRTS, pvCTS,
+ cbFrameSize, bNeedACK, uDMAIdx, hdr, pDevice->wCurrentRate);
+ /* Fill DataHead */
+ uDuration = s_uFillDataHead(pDevice, byPktType, pvTxDataHd, cbFrameSize, uDMAIdx, bNeedACK,
+ 0, 0, uMACfragNum, byFBOption, pDevice->wCurrentRate, is_pspoll);
- }
+ hdr->duration_id = uDuration;
- if ((pDevice->byLocalID <= REV_ID_VT3253_A1)) {
- if (bNeedEncrypt) {
- s_vSWencryption(pDevice, pTransmitKey, (pbyBuffer + uLength - cb802_1_H_len),
- (unsigned short)(cbFrameBodySize + cbMIClen));
- cbReqCount += cbICVlen;
- }
- }
+ cbReqCount = cbHeaderLength + uPadding + skb->len;
+ pbyBuffer = (unsigned char *)pHeadTD->pTDInfo->buf;
+ uLength = cbHeaderLength + uPadding;
- ptdCurr = (PSTxDesc)pHeadTD;
+ /* Copy the Packet into a tx Buffer */
+ memcpy((pbyBuffer + uLength), skb->data, skb->len);
- ptdCurr->pTDInfo->dwReqCount = cbReqCount - uPadding;
- ptdCurr->pTDInfo->dwHeaderLength = cbHeaderLength;
- ptdCurr->pTDInfo->skb_dma = ptdCurr->pTDInfo->buf_dma;
- ptdCurr->buff_addr = cpu_to_le32(ptdCurr->pTDInfo->skb_dma);
- //Set TSR1 & ReqCount in TxDescHead
- ptdCurr->m_td1TD1.byTCR |= (TCR_STP | TCR_EDP | EDMSDU);
- ptdCurr->m_td1TD1.wReqCount = cpu_to_le16((unsigned short)(cbReqCount));
+ ptdCurr = (PSTxDesc)pHeadTD;
- pDevice->iTDUsed[uDMAIdx]++;
-
- }
- *puMACfragNum = uMACfragNum;
+ ptdCurr->pTDInfo->dwReqCount = cbReqCount - uPadding;
+ ptdCurr->pTDInfo->dwHeaderLength = cbHeaderLength;
+ ptdCurr->pTDInfo->skb_dma = ptdCurr->pTDInfo->buf_dma;
+ ptdCurr->buff_addr = cpu_to_le32(ptdCurr->pTDInfo->skb_dma);
+ /* Set TSR1 & ReqCount in TxDescHead */
+ ptdCurr->m_td1TD1.byTCR |= (TCR_STP | TCR_EDP | EDMSDU);
+ ptdCurr->m_td1TD1.wReqCount = cpu_to_le16((unsigned short)(cbReqCount));
return cbHeaderLength;
}
-void
-vGenerateFIFOHeader(struct vnt_private *pDevice, unsigned char byPktType,
- unsigned char *pbyTxBufferAddr, bool bNeedEncrypt,
- unsigned int cbPayloadSize, unsigned int uDMAIdx,
- PSTxDesc pHeadTD, PSEthernetHeader psEthHeader, unsigned char *pPacket,
- PSKeyItem pTransmitKey, unsigned int uNodeIndex, unsigned int *puMACfragNum,
- unsigned int *pcbHeaderSize)
+static void vnt_fill_txkey(struct ieee80211_hdr *hdr, u8 *key_buffer,
+ struct ieee80211_key_conf *tx_key,
+ struct sk_buff *skb, u16 payload_len,
+ struct vnt_mic_hdr *mic_hdr)
{
- unsigned int wTxBufSize; // FFinfo size
- bool bNeedACK;
- bool bIsAdhoc;
- unsigned short cbMacHdLen;
- PSTxBufHead pTxBufHead = (PSTxBufHead) pbyTxBufferAddr;
-
- wTxBufSize = sizeof(STxBufHead);
-
- memset(pTxBufHead, 0, wTxBufSize);
- //Set FIFOCTL_NEEDACK
-
- if ((pDevice->op_mode == NL80211_IFTYPE_ADHOC) ||
- (pDevice->op_mode == NL80211_IFTYPE_AP)) {
- if (is_multicast_ether_addr(&(psEthHeader->abyDstAddr[0]))) {
- bNeedACK = false;
- pTxBufHead->wFIFOCtl = pTxBufHead->wFIFOCtl & (~FIFOCTL_NEEDACK);
- } else {
- bNeedACK = true;
- pTxBufHead->wFIFOCtl |= FIFOCTL_NEEDACK;
+ struct ieee80211_key_seq seq;
+ u8 *iv = ((u8 *)hdr + ieee80211_get_hdrlen_from_skb(skb));
+
+ /* strip header and icv len from payload */
+ payload_len -= ieee80211_get_hdrlen_from_skb(skb);
+ payload_len -= tx_key->icv_len;
+
+ switch (tx_key->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ memcpy(key_buffer, iv, 3);
+ memcpy(key_buffer + 3, tx_key->key, tx_key->keylen);
+
+ if (tx_key->keylen == WLAN_KEY_LEN_WEP40) {
+ memcpy(key_buffer + 8, iv, 3);
+ memcpy(key_buffer + 11,
+ tx_key->key, WLAN_KEY_LEN_WEP40);
}
- bIsAdhoc = true;
- } else {
- // MSDUs in Infra mode always need ACK
- bNeedACK = true;
- pTxBufHead->wFIFOCtl |= FIFOCTL_NEEDACK;
- bIsAdhoc = false;
- }
-
- pTxBufHead->wFIFOCtl |= FIFOCTL_TMOEN;
- pTxBufHead->wTimeStamp = cpu_to_le16(DEFAULT_MSDU_LIFETIME_RES_64us);
-
- //Set FIFOCTL_LHEAD
- if (pDevice->bLongHeader)
- pTxBufHead->wFIFOCtl |= FIFOCTL_LHEAD;
- //Set FIFOCTL_GENINT
-
- pTxBufHead->wFIFOCtl |= FIFOCTL_GENINT;
-
- //Set FIFOCTL_ISDMA0
- if (TYPE_TXDMA0 == uDMAIdx)
- pTxBufHead->wFIFOCtl |= FIFOCTL_ISDMA0;
-
- //Set FRAGCTL_MACHDCNT
- if (pDevice->bLongHeader)
- cbMacHdLen = WLAN_HDR_ADDR3_LEN + 6;
- else
- cbMacHdLen = WLAN_HDR_ADDR3_LEN;
-
- pTxBufHead->wFragCtl |= cpu_to_le16((unsigned short)(cbMacHdLen << 10));
-
- //Set packet type
- if (byPktType == PK_TYPE_11A) //0000 0000 0000 0000
- ;
- else if (byPktType == PK_TYPE_11B) //0000 0001 0000 0000
- pTxBufHead->wFIFOCtl |= FIFOCTL_11B;
- else if (byPktType == PK_TYPE_11GB) //0000 0010 0000 0000
- pTxBufHead->wFIFOCtl |= FIFOCTL_11GB;
- else if (byPktType == PK_TYPE_11GA) //0000 0011 0000 0000
- pTxBufHead->wFIFOCtl |= FIFOCTL_11GA;
-
- //Set FIFOCTL_GrpAckPolicy
- if (pDevice->bGrpAckPolicy == true) //0000 0100 0000 0000
- pTxBufHead->wFIFOCtl |= FIFOCTL_GRPACK;
-
- //Set Auto Fallback Ctl
- if (pDevice->wCurrentRate >= RATE_18M) {
- if (pDevice->byAutoFBCtrl == AUTO_FB_0)
- pTxBufHead->wFIFOCtl |= FIFOCTL_AUTO_FB_0;
- else if (pDevice->byAutoFBCtrl == AUTO_FB_1)
- pTxBufHead->wFIFOCtl |= FIFOCTL_AUTO_FB_1;
- }
-
- //Set FRAGCTL_WEPTYP
- pDevice->bAES = false;
-
- //Set FRAGCTL_WEPTYP
- if (pDevice->byLocalID > REV_ID_VT3253_A1) {
- if ((bNeedEncrypt) && (pTransmitKey != NULL)) { //WEP enabled
- if (pTransmitKey->byCipherSuite == KEY_CTL_TKIP) {
- pTxBufHead->wFragCtl |= FRAGCTL_TKIP;
- } else if (pTransmitKey->byCipherSuite == KEY_CTL_WEP) { //WEP40 or WEP104
- if (pTransmitKey->uKeyLength != WLAN_WEP232_KEYLEN)
- pTxBufHead->wFragCtl |= FRAGCTL_LEGACY;
- } else if (pTransmitKey->byCipherSuite == KEY_CTL_CCMP) { //CCMP
- pTxBufHead->wFragCtl |= FRAGCTL_AES;
- }
- }
- }
-
- RFbSetPower(pDevice, pDevice->wCurrentRate, pDevice->byCurrentCh);
-
- pTxBufHead->byTxPower = pDevice->byCurPwr;
-
- *pcbHeaderSize = s_cbFillTxBufHead(pDevice, byPktType, pbyTxBufferAddr, cbPayloadSize,
- uDMAIdx, pHeadTD, psEthHeader, pPacket, bNeedEncrypt,
- pTransmitKey, uNodeIndex, puMACfragNum);
-}
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ ieee80211_get_tkip_p2k(tx_key, skb, key_buffer);
-/*+
- *
- * Description:
- * Translate 802.3 to 802.11 header
- *
- * Parameters:
- * In:
- * pDevice - Pointer to adapter
- * dwTxBufferAddr - Transmit Buffer
- * pPacket - Packet from upper layer
- * cbPacketSize - Transmit Data Length
- * Out:
- * pcbHeadSize - Header size of MAC&Baseband control and 802.11 Header
- * pcbAppendPayload - size of append payload for 802.1H translation
- *
- * Return Value: none
- *
- -*/
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
-void
-vGenerateMACHeader(
- struct vnt_private *pDevice,
- unsigned char *pbyBufferAddr,
- __le16 wDuration,
- PSEthernetHeader psEthHeader,
- bool bNeedEncrypt,
- unsigned short wFragType,
- unsigned int uDMAIdx,
- unsigned int uFragIdx
-)
-{
- PS802_11Header pMACHeader = (PS802_11Header)pbyBufferAddr;
+ if (!mic_hdr)
+ return;
- memset(pMACHeader, 0, (sizeof(S802_11Header)));
+ mic_hdr->id = 0x59;
+ mic_hdr->payload_len = cpu_to_be16(payload_len);
+ ether_addr_copy(mic_hdr->mic_addr2, hdr->addr2);
- if (uDMAIdx == TYPE_ATIMDMA)
- pMACHeader->wFrameCtl = TYPE_802_11_ATIM;
- else
- pMACHeader->wFrameCtl = TYPE_802_11_DATA;
+ ieee80211_get_key_tx_seq(tx_key, &seq);
- if (pDevice->op_mode == NL80211_IFTYPE_AP) {
- memcpy(&(pMACHeader->abyAddr1[0]), &(psEthHeader->abyDstAddr[0]), ETH_ALEN);
- memcpy(&(pMACHeader->abyAddr2[0]), &(pDevice->abyBSSID[0]), ETH_ALEN);
- memcpy(&(pMACHeader->abyAddr3[0]), &(psEthHeader->abySrcAddr[0]), ETH_ALEN);
- pMACHeader->wFrameCtl |= FC_FROMDS;
- } else {
- if (pDevice->op_mode == NL80211_IFTYPE_ADHOC) {
- memcpy(&(pMACHeader->abyAddr1[0]), &(psEthHeader->abyDstAddr[0]), ETH_ALEN);
- memcpy(&(pMACHeader->abyAddr2[0]), &(psEthHeader->abySrcAddr[0]), ETH_ALEN);
- memcpy(&(pMACHeader->abyAddr3[0]), &(pDevice->abyBSSID[0]), ETH_ALEN);
- } else {
- memcpy(&(pMACHeader->abyAddr3[0]), &(psEthHeader->abyDstAddr[0]), ETH_ALEN);
- memcpy(&(pMACHeader->abyAddr2[0]), &(psEthHeader->abySrcAddr[0]), ETH_ALEN);
- memcpy(&(pMACHeader->abyAddr1[0]), &(pDevice->abyBSSID[0]), ETH_ALEN);
- pMACHeader->wFrameCtl |= FC_TODS;
- }
- }
+ memcpy(mic_hdr->ccmp_pn, seq.ccmp.pn, IEEE80211_CCMP_PN_LEN);
- if (bNeedEncrypt)
- pMACHeader->wFrameCtl |= cpu_to_le16((unsigned short)WLAN_SET_FC_ISWEP(1));
+ if (ieee80211_has_a4(hdr->frame_control))
+ mic_hdr->hlen = cpu_to_be16(28);
+ else
+ mic_hdr->hlen = cpu_to_be16(22);
- pMACHeader->wDurationID = le16_to_cpu(wDuration);
+ ether_addr_copy(mic_hdr->addr1, hdr->addr1);
+ ether_addr_copy(mic_hdr->addr2, hdr->addr2);
+ ether_addr_copy(mic_hdr->addr3, hdr->addr3);
- if (pDevice->bLongHeader) {
- PWLAN_80211HDR_A4 pMACA4Header = (PWLAN_80211HDR_A4) pbyBufferAddr;
+ mic_hdr->frame_control = cpu_to_le16(
+ le16_to_cpu(hdr->frame_control) & 0xc78f);
+ mic_hdr->seq_ctrl = cpu_to_le16(
+ le16_to_cpu(hdr->seq_ctrl) & 0xf);
- pMACHeader->wFrameCtl |= (FC_TODS | FC_FROMDS);
- memcpy(pMACA4Header->abyAddr4, pDevice->abyBSSID, WLAN_ADDR_LEN);
- }
- pMACHeader->wSeqCtl = cpu_to_le16(pDevice->wSeqCounter << 4);
+ if (ieee80211_has_a4(hdr->frame_control))
+ ether_addr_copy(mic_hdr->addr4, hdr->addr4);
- //Set FragNumber in Sequence Control
- pMACHeader->wSeqCtl |= cpu_to_le16((unsigned short)uFragIdx);
+ memcpy(key_buffer, tx_key->key, WLAN_KEY_LEN_CCMP);
- if ((wFragType == FRAGCTL_ENDFRAG) || (wFragType == FRAGCTL_NONFRAG)) {
- pDevice->wSeqCounter++;
- if (pDevice->wSeqCounter > 0x0fff)
- pDevice->wSeqCounter = 0;
+ break;
+ default:
+ break;
}
-
- if ((wFragType == FRAGCTL_STAFRAG) || (wFragType == FRAGCTL_MIDFRAG)) //StartFrag or MidFrag
- pMACHeader->wFrameCtl |= FC_MOREFRAG;
}
-CMD_STATUS csMgmt_xmit(struct vnt_private *pDevice, PSTxMgmtPacket pPacket)
+int vnt_generate_fifo_header(struct vnt_private *priv, u32 dma_idx,
+ PSTxDesc head_td, struct sk_buff *skb)
{
- PSTxDesc pFrstTD;
- unsigned char byPktType;
- unsigned char *pbyTxBufferAddr;
- void *pvRTS;
- struct vnt_cts *pCTS;
- void *pvTxDataHd;
- unsigned int uDuration;
- unsigned int cbReqCount;
- PS802_11Header pMACHeader;
- unsigned int cbHeaderSize;
- unsigned int cbFrameBodySize;
- bool bNeedACK;
- bool bIsPSPOLL = false;
- PSTxBufHead pTxBufHead;
- unsigned int cbFrameSize;
- unsigned int cbIVlen = 0;
- unsigned int cbICVlen = 0;
- unsigned int cbMIClen = 0;
- unsigned int cbFCSlen = 4;
- unsigned int uPadding = 0;
- unsigned short wTxBufSize;
- unsigned int cbMacHdLen;
- SEthernetHeader sEthHeader;
- void *pvRrvTime;
- void *pMICHDR;
- PSMgmtObject pMgmt = pDevice->pMgmt;
- unsigned short wCurrentRate = RATE_1M;
-
- if (AVAIL_TD(pDevice, TYPE_TXDMA0) <= 0)
- return CMD_STATUS_RESOURCES;
-
- pFrstTD = pDevice->apCurrTD[TYPE_TXDMA0];
- pbyTxBufferAddr = (unsigned char *)pFrstTD->pTDInfo->buf;
- cbFrameBodySize = pPacket->cbPayloadLen;
- pTxBufHead = (PSTxBufHead) pbyTxBufferAddr;
- wTxBufSize = sizeof(STxBufHead);
- memset(pTxBufHead, 0, wTxBufSize);
-
- if (pDevice->eCurrentPHYType == PHY_TYPE_11A) {
- wCurrentRate = RATE_6M;
- byPktType = PK_TYPE_11A;
- } else {
- wCurrentRate = RATE_1M;
- byPktType = PK_TYPE_11B;
+ PDEVICE_TD_INFO td_info = head_td->pTDInfo;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_tx_rate *tx_rate = &info->control.rates[0];
+ struct ieee80211_rate *rate;
+ struct ieee80211_key_conf *tx_key;
+ struct ieee80211_hdr *hdr;
+ struct vnt_tx_fifo_head *tx_buffer_head =
+ (struct vnt_tx_fifo_head *)td_info->buf;
+ u16 tx_body_size = skb->len, current_rate;
+ u8 pkt_type;
+ bool is_pspoll = false;
+
+ memset(tx_buffer_head, 0, sizeof(*tx_buffer_head));
+
+ hdr = (struct ieee80211_hdr *)(skb->data);
+
+ rate = ieee80211_get_tx_rate(priv->hw, info);
+
+ current_rate = rate->hw_value;
+ if (priv->wCurrentRate != current_rate &&
+ !(priv->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)) {
+ priv->wCurrentRate = current_rate;
+
+ RFbSetPower(priv, priv->wCurrentRate,
+ priv->hw->conf.chandef.chan->hw_value);
}
- // SetPower will cause error power TX state for OFDM Date packet in TX buffer.
- // 2004.11.11 Kyle -- Using OFDM power to tx MngPkt will decrease the connection capability.
- // And cmd timer will wait data pkt TX finish before scanning so it's OK
- // to set power here.
- if (pDevice->pMgmt->eScanState != WMAC_NO_SCANNING)
- RFbSetPower(pDevice, wCurrentRate, pDevice->byCurrentCh);
+ if (current_rate > RATE_11M)
+ pkt_type = (u8)priv->byPacketType;
else
- RFbSetPower(pDevice, wCurrentRate, pMgmt->uCurrChannel);
-
- pTxBufHead->byTxPower = pDevice->byCurPwr;
- //+++++++++++++++++++++ Patch VT3253 A1 performance +++++++++++++++++++++++++++
- if (pDevice->byFOETuning) {
- if ((pPacket->p80211Header->sA3.wFrameCtl & TYPE_DATE_NULL) == TYPE_DATE_NULL) {
- wCurrentRate = RATE_24M;
- byPktType = PK_TYPE_11GA;
- }
- }
-
- //Set packet type
- if (byPktType == PK_TYPE_11A) {//0000 0000 0000 0000
- pTxBufHead->wFIFOCtl = 0;
- } else if (byPktType == PK_TYPE_11B) {//0000 0001 0000 0000
- pTxBufHead->wFIFOCtl |= FIFOCTL_11B;
- } else if (byPktType == PK_TYPE_11GB) {//0000 0010 0000 0000
- pTxBufHead->wFIFOCtl |= FIFOCTL_11GB;
- } else if (byPktType == PK_TYPE_11GA) {//0000 0011 0000 0000
- pTxBufHead->wFIFOCtl |= FIFOCTL_11GA;
+ pkt_type = PK_TYPE_11B;
+
+ /*Set fifo controls */
+ if (pkt_type == PK_TYPE_11A)
+ tx_buffer_head->fifo_ctl = 0;
+ else if (pkt_type == PK_TYPE_11B)
+ tx_buffer_head->fifo_ctl = cpu_to_le16(FIFOCTL_11B);
+ else if (pkt_type == PK_TYPE_11GB)
+ tx_buffer_head->fifo_ctl = cpu_to_le16(FIFOCTL_11GB);
+ else if (pkt_type == PK_TYPE_11GA)
+ tx_buffer_head->fifo_ctl = cpu_to_le16(FIFOCTL_11GA);
+
+ /* generate interrupt */
+ tx_buffer_head->fifo_ctl |= cpu_to_le16(FIFOCTL_GENINT);
+
+ if (!ieee80211_is_data(hdr->frame_control)) {
+ tx_buffer_head->fifo_ctl |= cpu_to_le16(FIFOCTL_TMOEN);
+ tx_buffer_head->fifo_ctl |= cpu_to_le16(FIFOCTL_ISDMA0);
+ tx_buffer_head->time_stamp =
+ cpu_to_le16(DEFAULT_MGN_LIFETIME_RES_64us);
+ } else {
+ tx_buffer_head->time_stamp =
+ cpu_to_le16(DEFAULT_MSDU_LIFETIME_RES_64us);
}
- pTxBufHead->wFIFOCtl |= FIFOCTL_TMOEN;
- pTxBufHead->wTimeStamp = cpu_to_le16(DEFAULT_MGN_LIFETIME_RES_64us);
+ if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
+ tx_buffer_head->fifo_ctl |= cpu_to_le16(FIFOCTL_NEEDACK);
- if (is_multicast_ether_addr(&(pPacket->p80211Header->sA3.abyAddr1[0])))
- bNeedACK = false;
- else {
- bNeedACK = true;
- pTxBufHead->wFIFOCtl |= FIFOCTL_NEEDACK;
- }
+ if (ieee80211_has_retry(hdr->frame_control))
+ tx_buffer_head->fifo_ctl |= cpu_to_le16(FIFOCTL_LRETRY);
- if ((pMgmt->eCurrMode == WMAC_MODE_ESS_AP) ||
- (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA)) {
- pTxBufHead->wFIFOCtl |= FIFOCTL_LRETRY;
- }
+ if (tx_rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
+ priv->byPreambleType = PREAMBLE_SHORT;
+ else
+ priv->byPreambleType = PREAMBLE_LONG;
- pTxBufHead->wFIFOCtl |= (FIFOCTL_GENINT | FIFOCTL_ISDMA0);
+ if (tx_rate->flags & IEEE80211_TX_RC_USE_RTS_CTS)
+ tx_buffer_head->fifo_ctl |= cpu_to_le16(FIFOCTL_RTS);
- if ((pPacket->p80211Header->sA4.wFrameCtl & TYPE_SUBTYPE_MASK) == TYPE_CTL_PSPOLL) {
- bIsPSPOLL = true;
- cbMacHdLen = WLAN_HDR_ADDR2_LEN;
- } else {
- cbMacHdLen = WLAN_HDR_ADDR3_LEN;
+ if (ieee80211_has_a4(hdr->frame_control)) {
+ tx_buffer_head->fifo_ctl |= cpu_to_le16(FIFOCTL_LHEAD);
+ priv->bLongHeader = true;
}
- //Set FRAGCTL_MACHDCNT
- pTxBufHead->wFragCtl |= cpu_to_le16((unsigned short)(cbMacHdLen << 10));
-
- // Notes:
- // Although spec says MMPDU can be fragmented; In most cases,
- // no one will send a MMPDU under fragmentation. With RTS may occur.
- pDevice->bAES = false; //Set FRAGCTL_WEPTYP
-
- if (WLAN_GET_FC_ISWEP(pPacket->p80211Header->sA4.wFrameCtl) != 0) {
- if (pDevice->eEncryptionStatus == Ndis802_11Encryption1Enabled) {
- cbIVlen = 4;
- cbICVlen = 4;
- pTxBufHead->wFragCtl |= FRAGCTL_LEGACY;
- } else if (pDevice->eEncryptionStatus == Ndis802_11Encryption2Enabled) {
- cbIVlen = 8;//IV+ExtIV
- cbMIClen = 8;
- cbICVlen = 4;
- pTxBufHead->wFragCtl |= FRAGCTL_TKIP;
- //We need to get seed here for filling TxKey entry.
- } else if (pDevice->eEncryptionStatus == Ndis802_11Encryption3Enabled) {
- cbIVlen = 8;//RSN Header
- cbICVlen = 8;//MIC
- pTxBufHead->wFragCtl |= FRAGCTL_AES;
- pDevice->bAES = true;
+ if (info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER)
+ is_pspoll = true;
+
+ tx_buffer_head->frag_ctl =
+ cpu_to_le16(ieee80211_get_hdrlen_from_skb(skb) << 10);
+
+ if (info->control.hw_key) {
+ tx_key = info->control.hw_key;
+
+ switch (info->control.hw_key->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ tx_buffer_head->frag_ctl |= cpu_to_le16(FRAGCTL_LEGACY);
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ tx_buffer_head->frag_ctl |= cpu_to_le16(FRAGCTL_TKIP);
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ tx_buffer_head->frag_ctl |= cpu_to_le16(FRAGCTL_AES);
+ default:
+ break;
}
- //MAC Header should be padding 0 to DW alignment.
- uPadding = 4 - (cbMacHdLen%4);
- uPadding %= 4;
}
- cbFrameSize = cbMacHdLen + cbFrameBodySize + cbIVlen + cbMIClen + cbICVlen + cbFCSlen;
-
- //Set FIFOCTL_GrpAckPolicy
- if (pDevice->bGrpAckPolicy == true) //0000 0100 0000 0000
- pTxBufHead->wFIFOCtl |= FIFOCTL_GRPACK;
+ tx_buffer_head->current_rate = cpu_to_le16(current_rate);
- //the rest of pTxBufHead->wFragCtl:FragTyp will be set later in s_vFillFragParameter()
+ /* legacy rates TODO use ieee80211_tx_rate */
+ if (current_rate >= RATE_18M && ieee80211_is_data(hdr->frame_control)) {
+ if (priv->byAutoFBCtrl == AUTO_FB_0)
+ tx_buffer_head->fifo_ctl |=
+ cpu_to_le16(FIFOCTL_AUTO_FB_0);
+ else if (priv->byAutoFBCtrl == AUTO_FB_1)
+ tx_buffer_head->fifo_ctl |=
+ cpu_to_le16(FIFOCTL_AUTO_FB_1);
- //Set RrvTime/RTS/CTS Buffer
- if (byPktType == PK_TYPE_11GB || byPktType == PK_TYPE_11GA) {//802.11g packet
- pvRrvTime = (void *) (pbyTxBufferAddr + wTxBufSize);
- pMICHDR = NULL;
- pvRTS = NULL;
- pCTS = (struct vnt_cts *)(pbyTxBufferAddr + wTxBufSize +
- sizeof(struct vnt_rrv_time_cts));
- pvTxDataHd = (void *)(pbyTxBufferAddr + wTxBufSize +
- sizeof(struct vnt_rrv_time_cts) + sizeof(struct vnt_cts));
- cbHeaderSize = wTxBufSize + sizeof(struct vnt_rrv_time_cts) +
- sizeof(struct vnt_cts) + sizeof(struct vnt_tx_datahead_g);
- } else { // 802.11a/b packet
- pvRrvTime = (void *)(pbyTxBufferAddr + wTxBufSize);
- pMICHDR = NULL;
- pvRTS = NULL;
- pCTS = NULL;
- pvTxDataHd = (void *)(pbyTxBufferAddr + wTxBufSize +
- sizeof(struct vnt_rrv_time_ab));
- cbHeaderSize = wTxBufSize + sizeof(struct vnt_rrv_time_ab) +
- sizeof(struct vnt_tx_datahead_ab);
}
- memset((void *)(pbyTxBufferAddr + wTxBufSize), 0, (cbHeaderSize - wTxBufSize));
-
- memcpy(&(sEthHeader.abyDstAddr[0]), &(pPacket->p80211Header->sA3.abyAddr1[0]), ETH_ALEN);
- memcpy(&(sEthHeader.abySrcAddr[0]), &(pPacket->p80211Header->sA3.abyAddr2[0]), ETH_ALEN);
- //=========================
- // No Fragmentation
- //=========================
- pTxBufHead->wFragCtl |= (unsigned short)FRAGCTL_NONFRAG;
-
- //Fill FIFO,RrvTime,RTS,and CTS
- s_vGenerateTxParameter(pDevice, byPktType, pbyTxBufferAddr, pvRrvTime, pvRTS, pCTS,
- cbFrameSize, bNeedACK, TYPE_TXDMA0, &sEthHeader, wCurrentRate);
-
- //Fill DataHead
- uDuration = s_uFillDataHead(pDevice, byPktType, pvTxDataHd, cbFrameSize, TYPE_TXDMA0, bNeedACK,
- 0, 0, 1, AUTO_FB_NONE, wCurrentRate);
-
- pMACHeader = (PS802_11Header) (pbyTxBufferAddr + cbHeaderSize);
-
- cbReqCount = cbHeaderSize + cbMacHdLen + uPadding + cbIVlen + cbFrameBodySize;
-
- if (WLAN_GET_FC_ISWEP(pPacket->p80211Header->sA4.wFrameCtl) != 0) {
- unsigned char *pbyIVHead;
- unsigned char *pbyPayloadHead;
- unsigned char *pbyBSSID;
- PSKeyItem pTransmitKey = NULL;
-
- pbyIVHead = (unsigned char *)(pbyTxBufferAddr + cbHeaderSize + cbMacHdLen + uPadding);
- pbyPayloadHead = (unsigned char *)(pbyTxBufferAddr + cbHeaderSize + cbMacHdLen + uPadding + cbIVlen);
-
- //Fill TXKEY
- //Kyle: Need fix: TKIP and AES did't encrypt Mnt Packet.
- //s_vFillTxKey(pDevice, (unsigned char *)pTxBufHead->adwTxKey, NULL);
-
- //Fill IV(ExtIV,RSNHDR)
- //s_vFillPrePayload(pDevice, pbyIVHead, NULL);
- //---------------------------
- // S/W or H/W Encryption
- //---------------------------
- do {
- if ((pDevice->op_mode == NL80211_IFTYPE_STATION) &&
- (pDevice->bLinkPass == true)) {
- pbyBSSID = pDevice->abyBSSID;
- // get pairwise key
- if (KeybGetTransmitKey(&(pDevice->sKey), pbyBSSID, PAIRWISE_KEY, &pTransmitKey) == false) {
- // get group key
- if (KeybGetTransmitKey(&(pDevice->sKey), pbyBSSID, GROUP_KEY, &pTransmitKey) == true) {
- pr_debug("Get GTK\n");
- break;
- }
- } else {
- pr_debug("Get PTK\n");
- break;
- }
- }
- // get group key
- pbyBSSID = pDevice->abyBroadcastAddr;
- if (KeybGetTransmitKey(&(pDevice->sKey), pbyBSSID, GROUP_KEY, &pTransmitKey) == false) {
- pTransmitKey = NULL;
- pr_debug("KEY is NULL. OP Mode[%d]\n",
- pDevice->op_mode);
- } else {
- pr_debug("Get GTK\n");
- }
- } while (false);
- //Fill TXKEY
- s_vFillTxKey(pDevice, (unsigned char *)(pTxBufHead->adwTxKey), pbyIVHead, pTransmitKey,
- (unsigned char *)pMACHeader, (unsigned short)cbFrameBodySize, NULL);
-
- memcpy(pMACHeader, pPacket->p80211Header, cbMacHdLen);
- memcpy(pbyPayloadHead, ((unsigned char *)(pPacket->p80211Header) + cbMacHdLen),
- cbFrameBodySize);
- } else {
- // Copy the Packet into a tx Buffer
- memcpy(pMACHeader, pPacket->p80211Header, pPacket->cbMPDULen);
- }
+ tx_buffer_head->frag_ctl |= cpu_to_le16(FRAGCTL_NONFRAG);
- pMACHeader->wSeqCtl = cpu_to_le16(pDevice->wSeqCounter << 4);
- pDevice->wSeqCounter++;
- if (pDevice->wSeqCounter > 0x0fff)
- pDevice->wSeqCounter = 0;
-
- if (bIsPSPOLL) {
- // The MAC will automatically replace the Duration-field of MAC header by Duration-field
- // of FIFO control header.
- // This will cause AID-field of PS-POLL packet to be incorrect (Because PS-POLL's AID field is
- // in the same place of other packet's Duration-field).
- // And it will cause Cisco-AP to issue Disassociation-packet
- if (byPktType == PK_TYPE_11GB || byPktType == PK_TYPE_11GA) {
- ((struct vnt_tx_datahead_g *)pvTxDataHd)->duration_a = cpu_to_le16(pPacket->p80211Header->sA2.wDurationID);
- ((struct vnt_tx_datahead_g *)pvTxDataHd)->duration_b = cpu_to_le16(pPacket->p80211Header->sA2.wDurationID);
- } else {
- ((struct vnt_tx_datahead_ab *)pvTxDataHd)->duration = cpu_to_le16(pPacket->p80211Header->sA2.wDurationID);
- }
- }
+ s_cbFillTxBufHead(priv, pkt_type, (u8 *)tx_buffer_head,
+ dma_idx, head_td, is_pspoll);
- // first TD is the only TD
- //Set TSR1 & ReqCount in TxDescHead
- pFrstTD->m_td1TD1.byTCR = (TCR_STP | TCR_EDP | EDMSDU);
- pFrstTD->pTDInfo->skb_dma = pFrstTD->pTDInfo->buf_dma;
- pFrstTD->m_td1TD1.wReqCount = cpu_to_le16((unsigned short)(cbReqCount));
- pFrstTD->buff_addr = cpu_to_le32(pFrstTD->pTDInfo->skb_dma);
- pFrstTD->pTDInfo->byFlags = 0;
-
- if (MACbIsRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_PS)) {
- // Disable PS
- MACbPSWakeup(pDevice->PortOffset);
+ if (info->control.hw_key) {
+ tx_key = info->control.hw_key;
+ if (tx_key->keylen > 0)
+ vnt_fill_txkey(hdr, tx_buffer_head->tx_key,
+ tx_key, skb, tx_body_size, td_info->mic_hdr);
}
- pDevice->bPWBitOn = false;
-
- wmb();
- pFrstTD->m_td0TD0.f1Owner = OWNED_BY_NIC;
- wmb();
-
- pDevice->iTDUsed[TYPE_TXDMA0]++;
-
- if (AVAIL_TD(pDevice, TYPE_TXDMA0) <= 1)
- pr_debug(" available td0 <= 1\n");
-
- pDevice->apCurrTD[TYPE_TXDMA0] = pFrstTD->next;
-
- pDevice->nTxDataTimeCout = 0; //2008-8-21 chester <add> for send null packet
- // Poll Transmit the adapter
- MACvTransmit0(pDevice->PortOffset);
-
- return CMD_STATUS_PENDING;
+ return 0;
}
-CMD_STATUS csBeacon_xmit(struct vnt_private *pDevice, PSTxMgmtPacket pPacket)
+static int vnt_beacon_xmit(struct vnt_private *priv,
+ struct sk_buff *skb)
{
- unsigned char byPktType;
- unsigned char *pbyBuffer = (unsigned char *)pDevice->tx_beacon_bufs;
- unsigned int cbFrameSize = pPacket->cbMPDULen + WLAN_FCS_LEN;
- unsigned int cbHeaderSize = 0;
struct vnt_tx_short_buf_head *short_head =
- (struct vnt_tx_short_buf_head *)pbyBuffer;
- PS802_11Header pMACHeader;
- unsigned short wCurrentRate;
-
- memset(short_head, 0, sizeof(*short_head));
+ (struct vnt_tx_short_buf_head *)priv->tx_beacon_bufs;
+ struct ieee80211_mgmt *mgmt_hdr = (struct ieee80211_mgmt *)
+ (priv->tx_beacon_bufs + sizeof(*short_head));
+ struct ieee80211_tx_info *info;
+ u32 frame_size = skb->len + 4;
+ u16 current_rate;
- if (pDevice->eCurrentPHYType == PHY_TYPE_11A) {
- wCurrentRate = RATE_6M;
- byPktType = PK_TYPE_11A;
- } else {
- wCurrentRate = RATE_2M;
- byPktType = PK_TYPE_11B;
- }
+ memset(priv->tx_beacon_bufs, 0, sizeof(*short_head));
- //Set Preamble type always long
- pDevice->byPreambleType = PREAMBLE_LONG;
+ if (priv->byBBType == BB_TYPE_11A) {
+ current_rate = RATE_6M;
- /* Set FIFOCTL_GENINT */
- short_head->fifo_ctl |= cpu_to_le16(FIFOCTL_GENINT);
-
- /* Set packet type & Get Duration */
- if (byPktType == PK_TYPE_11A) {//0000 0000 0000 0000
- short_head->duration =
- cpu_to_le16((u16)s_uGetDataDuration(pDevice, DATADUR_A,
- cbFrameSize, byPktType, wCurrentRate, false,
- 0, 0, 1, AUTO_FB_NONE));
- } else if (byPktType == PK_TYPE_11B) {//0000 0001 0000 0000
- short_head->fifo_ctl |= cpu_to_le16(FIFOCTL_11B);
+ /* Get SignalField,ServiceField,Length */
+ vnt_get_phy_field(priv, frame_size, current_rate,
+ PK_TYPE_11A, &short_head->ab);
+ /* Get Duration and TimeStampOff */
short_head->duration =
- cpu_to_le16((u16)s_uGetDataDuration(pDevice, DATADUR_B,
- cbFrameSize, byPktType, wCurrentRate, false,
- 0, 0, 1, AUTO_FB_NONE));
- }
-
- vnt_get_phy_field(pDevice, cbFrameSize,
- wCurrentRate, byPktType, &short_head->ab);
-
- /* Get TimeStampOff */
- short_head->time_stamp_off = vnt_time_stamp_off(pDevice, wCurrentRate);
- cbHeaderSize = sizeof(struct vnt_tx_short_buf_head);
-
- //Generate Beacon Header
- pMACHeader = (PS802_11Header)(pbyBuffer + cbHeaderSize);
- memcpy(pMACHeader, pPacket->p80211Header, pPacket->cbMPDULen);
-
- pMACHeader->wDurationID = 0;
- pMACHeader->wSeqCtl = cpu_to_le16(pDevice->wSeqCounter << 4);
- pDevice->wSeqCounter++;
- if (pDevice->wSeqCounter > 0x0fff)
- pDevice->wSeqCounter = 0;
-
- // Set Beacon buffer length
- pDevice->wBCNBufLen = pPacket->cbMPDULen + cbHeaderSize;
-
- MACvSetCurrBCNTxDescAddr(pDevice->PortOffset, (pDevice->tx_beacon_dma));
-
- MACvSetCurrBCNLength(pDevice->PortOffset, pDevice->wBCNBufLen);
- // Set auto Transmit on
- MACvRegBitsOn(pDevice->PortOffset, MAC_REG_TCR, TCR_AUTOBCNTX);
- // Poll Transmit the adapter
- MACvTransmitBCN(pDevice->PortOffset);
-
- return CMD_STATUS_PENDING;
-}
-
-unsigned int
-cbGetFragCount(
- struct vnt_private *pDevice,
- PSKeyItem pTransmitKey,
- unsigned int cbFrameBodySize,
- PSEthernetHeader psEthHeader
-)
-{
- unsigned int cbMACHdLen;
- unsigned int cbFrameSize;
- unsigned int cbFragmentSize; //Hdr+(IV)+payoad+(MIC)+(ICV)+FCS
- unsigned int cbFragPayloadSize;
- unsigned int cbLastFragPayloadSize;
- unsigned int cbIVlen = 0;
- unsigned int cbICVlen = 0;
- unsigned int cbMIClen = 0;
- unsigned int cbFCSlen = 4;
- unsigned int uMACfragNum = 1;
- bool bNeedACK;
-
- if ((pDevice->op_mode == NL80211_IFTYPE_ADHOC) ||
- (pDevice->op_mode == NL80211_IFTYPE_AP)) {
- if (is_multicast_ether_addr(&(psEthHeader->abyDstAddr[0])))
- bNeedACK = false;
- else
- bNeedACK = true;
- } else {
- // MSDUs in Infra mode always need ACK
- bNeedACK = true;
- }
-
- if (pDevice->bLongHeader)
- cbMACHdLen = WLAN_HDR_ADDR3_LEN + 6;
- else
- cbMACHdLen = WLAN_HDR_ADDR3_LEN;
-
- if (pDevice->bEncryptionEnable == true) {
- if (pTransmitKey == NULL) {
- if ((pDevice->eEncryptionStatus == Ndis802_11Encryption1Enabled) ||
- (pDevice->pMgmt->eAuthenMode < WMAC_AUTH_WPA)) {
- cbIVlen = 4;
- cbICVlen = 4;
- } else if (pDevice->eEncryptionStatus == Ndis802_11Encryption2Enabled) {
- cbIVlen = 8;//IV+ExtIV
- cbMIClen = 8;
- cbICVlen = 4;
- } else if (pDevice->eEncryptionStatus == Ndis802_11Encryption3Enabled) {
- cbIVlen = 8;//RSN Header
- cbICVlen = 8;//MIC
- }
- } else if (pTransmitKey->byCipherSuite == KEY_CTL_WEP) {
- cbIVlen = 4;
- cbICVlen = 4;
- } else if (pTransmitKey->byCipherSuite == KEY_CTL_TKIP) {
- cbIVlen = 8;//IV+ExtIV
- cbMIClen = 8;
- cbICVlen = 4;
- } else if (pTransmitKey->byCipherSuite == KEY_CTL_CCMP) {
- cbIVlen = 8;//RSN Header
- cbICVlen = 8;//MIC
- }
- }
-
- cbFrameSize = cbMACHdLen + cbIVlen + (cbFrameBodySize + cbMIClen) + cbICVlen + cbFCSlen;
-
- if ((cbFrameSize > pDevice->wFragmentationThreshold) && (bNeedACK == true)) {
- // Fragmentation
- cbFragmentSize = pDevice->wFragmentationThreshold;
- cbFragPayloadSize = cbFragmentSize - cbMACHdLen - cbIVlen - cbICVlen - cbFCSlen;
- uMACfragNum = (unsigned short) ((cbFrameBodySize + cbMIClen) / cbFragPayloadSize);
- cbLastFragPayloadSize = (cbFrameBodySize + cbMIClen) % cbFragPayloadSize;
- if (cbLastFragPayloadSize == 0)
- cbLastFragPayloadSize = cbFragPayloadSize;
- else
- uMACfragNum++;
- }
- return uMACfragNum;
-}
-
-void vDMA0_tx_80211(struct vnt_private *pDevice, struct sk_buff *skb,
- unsigned char *pbMPDU, unsigned int cbMPDULen)
-{
- PSTxDesc pFrstTD;
- unsigned char byPktType;
- unsigned char *pbyTxBufferAddr;
- void *pvRTS;
- void *pvCTS;
- void *pvTxDataHd;
- unsigned int uDuration;
- unsigned int cbReqCount;
- PS802_11Header pMACHeader;
- unsigned int cbHeaderSize;
- unsigned int cbFrameBodySize;
- bool bNeedACK;
- bool bIsPSPOLL = false;
- PSTxBufHead pTxBufHead;
- unsigned int cbFrameSize;
- unsigned int cbIVlen = 0;
- unsigned int cbICVlen = 0;
- unsigned int cbMIClen = 0;
- unsigned int cbFCSlen = 4;
- unsigned int uPadding = 0;
- unsigned int cbMICHDR = 0;
- unsigned int uLength = 0;
- u32 dwMICKey0, dwMICKey1;
- u32 dwMIC_Priority;
- u32 *pdwMIC_L;
- u32 *pdwMIC_R;
- unsigned short wTxBufSize;
- unsigned int cbMacHdLen;
- SEthernetHeader sEthHeader;
- void *pvRrvTime;
- void *pMICHDR;
- PSMgmtObject pMgmt = pDevice->pMgmt;
- unsigned short wCurrentRate = RATE_1M;
- PUWLAN_80211HDR p80211Header;
- unsigned int uNodeIndex = 0;
- bool bNodeExist = false;
- SKeyItem STempKey;
- PSKeyItem pTransmitKey = NULL;
- unsigned char *pbyIVHead;
- unsigned char *pbyPayloadHead;
- unsigned char *pbyMacHdr;
-
- unsigned int cbExtSuppRate = 0;
-
- pvRrvTime = pMICHDR = pvRTS = pvCTS = pvTxDataHd = NULL;
-
- if (cbMPDULen <= WLAN_HDR_ADDR3_LEN)
- cbFrameBodySize = 0;
- else
- cbFrameBodySize = cbMPDULen - WLAN_HDR_ADDR3_LEN;
-
- p80211Header = (PUWLAN_80211HDR)pbMPDU;
+ cpu_to_le16((u16)s_uGetDataDuration(priv, DATADUR_B,
+ frame_size, PK_TYPE_11A, current_rate,
+ false, 0, 0, 1, AUTO_FB_NONE));
- pFrstTD = pDevice->apCurrTD[TYPE_TXDMA0];
- pbyTxBufferAddr = (unsigned char *)pFrstTD->pTDInfo->buf;
- pTxBufHead = (PSTxBufHead) pbyTxBufferAddr;
- wTxBufSize = sizeof(STxBufHead);
- memset(pTxBufHead, 0, wTxBufSize);
-
- if (pDevice->eCurrentPHYType == PHY_TYPE_11A) {
- wCurrentRate = RATE_6M;
- byPktType = PK_TYPE_11A;
+ short_head->time_stamp_off =
+ vnt_time_stamp_off(priv, current_rate);
} else {
- wCurrentRate = RATE_1M;
- byPktType = PK_TYPE_11B;
- }
-
- // SetPower will cause error power TX state for OFDM Date packet in TX buffer.
- // 2004.11.11 Kyle -- Using OFDM power to tx MngPkt will decrease the connection capability.
- // And cmd timer will wait data pkt TX to finish before scanning so it's OK
- // to set power here.
- if (pDevice->pMgmt->eScanState != WMAC_NO_SCANNING)
- RFbSetPower(pDevice, wCurrentRate, pDevice->byCurrentCh);
- else
- RFbSetPower(pDevice, wCurrentRate, pMgmt->uCurrChannel);
-
- pTxBufHead->byTxPower = pDevice->byCurPwr;
-
- //+++++++++++++++++++++ Patch VT3253 A1 performance +++++++++++++++++++++++++++
- if (pDevice->byFOETuning) {
- if ((p80211Header->sA3.wFrameCtl & TYPE_DATE_NULL) == TYPE_DATE_NULL) {
- wCurrentRate = RATE_24M;
- byPktType = PK_TYPE_11GA;
- }
- }
-
- pr_debug("vDMA0_tx_80211: p80211Header->sA3.wFrameCtl = %x\n",
- p80211Header->sA3.wFrameCtl);
-
- //Set packet type
- if (byPktType == PK_TYPE_11A) {//0000 0000 0000 0000
- pTxBufHead->wFIFOCtl = 0;
- } else if (byPktType == PK_TYPE_11B) {//0000 0001 0000 0000
- pTxBufHead->wFIFOCtl |= FIFOCTL_11B;
- } else if (byPktType == PK_TYPE_11GB) {//0000 0010 0000 0000
- pTxBufHead->wFIFOCtl |= FIFOCTL_11GB;
- } else if (byPktType == PK_TYPE_11GA) {//0000 0011 0000 0000
- pTxBufHead->wFIFOCtl |= FIFOCTL_11GA;
- }
-
- pTxBufHead->wFIFOCtl |= FIFOCTL_TMOEN;
- pTxBufHead->wTimeStamp = cpu_to_le16(DEFAULT_MGN_LIFETIME_RES_64us);
-
- if (is_multicast_ether_addr(&(p80211Header->sA3.abyAddr1[0]))) {
- bNeedACK = false;
- if (pDevice->bEnableHostWEP) {
- uNodeIndex = 0;
- bNodeExist = true;
- }
- } else {
- if (pDevice->bEnableHostWEP) {
- if (BSSDBbIsSTAInNodeDB(pDevice->pMgmt, (unsigned char *)(p80211Header->sA3.abyAddr1), &uNodeIndex))
- bNodeExist = true;
- }
- bNeedACK = true;
- pTxBufHead->wFIFOCtl |= FIFOCTL_NEEDACK;
- }
-
- if ((pMgmt->eCurrMode == WMAC_MODE_ESS_AP) ||
- (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA)) {
- pTxBufHead->wFIFOCtl |= FIFOCTL_LRETRY;
- }
-
- pTxBufHead->wFIFOCtl |= (FIFOCTL_GENINT | FIFOCTL_ISDMA0);
-
- if ((p80211Header->sA4.wFrameCtl & TYPE_SUBTYPE_MASK) == TYPE_CTL_PSPOLL) {
- bIsPSPOLL = true;
- cbMacHdLen = WLAN_HDR_ADDR2_LEN;
- } else {
- cbMacHdLen = WLAN_HDR_ADDR3_LEN;
- }
-
- // hostapd deamon ext support rate patch
- if (WLAN_GET_FC_FSTYPE(p80211Header->sA4.wFrameCtl) == WLAN_FSTYPE_ASSOCRESP) {
- if (((PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates)->len != 0)
- cbExtSuppRate += ((PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates)->len + WLAN_IEHDR_LEN;
+ current_rate = RATE_1M;
+ short_head->fifo_ctl |= cpu_to_le16(FIFOCTL_11B);
- if (((PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates)->len != 0)
- cbExtSuppRate += ((PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates)->len + WLAN_IEHDR_LEN;
+ /* Get SignalField,ServiceField,Length */
+ vnt_get_phy_field(priv, frame_size, current_rate,
+ PK_TYPE_11B, &short_head->ab);
- if (cbExtSuppRate > 0)
- cbFrameBodySize = WLAN_ASSOCRESP_OFF_SUPP_RATES;
- }
+ /* Get Duration and TimeStampOff */
+ short_head->duration =
+ cpu_to_le16((u16)s_uGetDataDuration(priv, DATADUR_B,
+ frame_size, PK_TYPE_11B, current_rate,
+ false, 0, 0, 1, AUTO_FB_NONE));
- //Set FRAGCTL_MACHDCNT
- pTxBufHead->wFragCtl |= cpu_to_le16((unsigned short)cbMacHdLen << 10);
-
- // Notes:
- // Although spec says MMPDU can be fragmented; In most cases,
- // no one will send a MMPDU under fragmentation. With RTS may occur.
- pDevice->bAES = false; //Set FRAGCTL_WEPTYP
-
- if (WLAN_GET_FC_ISWEP(p80211Header->sA4.wFrameCtl) != 0) {
- if (pDevice->eEncryptionStatus == Ndis802_11Encryption1Enabled) {
- cbIVlen = 4;
- cbICVlen = 4;
- pTxBufHead->wFragCtl |= FRAGCTL_LEGACY;
- } else if (pDevice->eEncryptionStatus == Ndis802_11Encryption2Enabled) {
- cbIVlen = 8;//IV+ExtIV
- cbMIClen = 8;
- cbICVlen = 4;
- pTxBufHead->wFragCtl |= FRAGCTL_TKIP;
- //We need to get seed here for filling TxKey entry.
- } else if (pDevice->eEncryptionStatus == Ndis802_11Encryption3Enabled) {
- cbIVlen = 8;//RSN Header
- cbICVlen = 8;//MIC
- cbMICHDR = sizeof(struct vnt_mic_hdr);
- pTxBufHead->wFragCtl |= FRAGCTL_AES;
- pDevice->bAES = true;
- }
- //MAC Header should be padding 0 to DW alignment.
- uPadding = 4 - (cbMacHdLen%4);
- uPadding %= 4;
+ short_head->time_stamp_off =
+ vnt_time_stamp_off(priv, current_rate);
}
- cbFrameSize = cbMacHdLen + cbFrameBodySize + cbIVlen + cbMIClen + cbICVlen + cbFCSlen + cbExtSuppRate;
-
- //Set FIFOCTL_GrpAckPolicy
- if (pDevice->bGrpAckPolicy == true) //0000 0100 0000 0000
- pTxBufHead->wFIFOCtl |= FIFOCTL_GRPACK;
-
- //the rest of pTxBufHead->wFragCtl:FragTyp will be set later in s_vFillFragParameter()
+ short_head->fifo_ctl |= cpu_to_le16(FIFOCTL_GENINT);
- if (byPktType == PK_TYPE_11GB || byPktType == PK_TYPE_11GA) {//802.11g packet
+ /* Copy Beacon */
+ memcpy(mgmt_hdr, skb->data, skb->len);
- pvRrvTime = (void *)(pbyTxBufferAddr + wTxBufSize);
- pMICHDR = (struct vnt_mic_hdr *)(pbyTxBufferAddr + wTxBufSize +
- sizeof(struct vnt_rrv_time_cts));
- pvRTS = NULL;
- pvCTS = (struct vnt_cts *)(pbyTxBufferAddr + wTxBufSize +
- sizeof(struct vnt_rrv_time_cts) + cbMICHDR);
- pvTxDataHd = (void *)(pbyTxBufferAddr + wTxBufSize +
- sizeof(struct vnt_rrv_time_cts) + cbMICHDR + sizeof(struct vnt_cts));
- cbHeaderSize = wTxBufSize + sizeof(struct vnt_rrv_time_cts) +
- cbMICHDR + sizeof(struct vnt_cts) + sizeof(struct vnt_tx_datahead_g);
+ /* time stamp always 0 */
+ mgmt_hdr->u.beacon.timestamp = 0;
- } else {//802.11a/b packet
-
- pvRrvTime = (void *)(pbyTxBufferAddr + wTxBufSize);
- pMICHDR = (struct vnt_mic_hdr *) (pbyTxBufferAddr +
- wTxBufSize + sizeof(struct vnt_rrv_time_ab));
- pvRTS = NULL;
- pvCTS = NULL;
- pvTxDataHd = (void *)(pbyTxBufferAddr +
- wTxBufSize + sizeof(struct vnt_rrv_time_ab) + cbMICHDR);
- cbHeaderSize = wTxBufSize + sizeof(struct vnt_rrv_time_ab) +
- cbMICHDR + sizeof(struct vnt_tx_datahead_ab);
+ info = IEEE80211_SKB_CB(skb);
+ if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)mgmt_hdr;
+ hdr->duration_id = 0;
+ hdr->seq_ctrl = cpu_to_le16(priv->wSeqCounter << 4);
}
- memset((void *)(pbyTxBufferAddr + wTxBufSize), 0, (cbHeaderSize - wTxBufSize));
- memcpy(&(sEthHeader.abyDstAddr[0]), &(p80211Header->sA3.abyAddr1[0]), ETH_ALEN);
- memcpy(&(sEthHeader.abySrcAddr[0]), &(p80211Header->sA3.abyAddr2[0]), ETH_ALEN);
- //=========================
- // No Fragmentation
- //=========================
- pTxBufHead->wFragCtl |= (unsigned short)FRAGCTL_NONFRAG;
-
- //Fill FIFO,RrvTime,RTS,and CTS
- s_vGenerateTxParameter(pDevice, byPktType, pbyTxBufferAddr, pvRrvTime, pvRTS, pvCTS,
- cbFrameSize, bNeedACK, TYPE_TXDMA0, &sEthHeader, wCurrentRate);
+ priv->wSeqCounter++;
+ if (priv->wSeqCounter > 0x0fff)
+ priv->wSeqCounter = 0;
- //Fill DataHead
- uDuration = s_uFillDataHead(pDevice, byPktType, pvTxDataHd, cbFrameSize, TYPE_TXDMA0, bNeedACK,
- 0, 0, 1, AUTO_FB_NONE, wCurrentRate);
+ priv->wBCNBufLen = sizeof(*short_head) + skb->len;
- pMACHeader = (PS802_11Header) (pbyTxBufferAddr + cbHeaderSize);
+ MACvSetCurrBCNTxDescAddr(priv->PortOffset, priv->tx_beacon_dma);
- cbReqCount = cbHeaderSize + cbMacHdLen + uPadding + cbIVlen + (cbFrameBodySize + cbMIClen) + cbExtSuppRate;
+ MACvSetCurrBCNLength(priv->PortOffset, priv->wBCNBufLen);
+ /* Set auto Transmit on */
+ MACvRegBitsOn(priv->PortOffset, MAC_REG_TCR, TCR_AUTOBCNTX);
+ /* Poll Transmit the adapter */
+ MACvTransmitBCN(priv->PortOffset);
- pbyMacHdr = (unsigned char *)(pbyTxBufferAddr + cbHeaderSize);
- pbyPayloadHead = (unsigned char *)(pbyMacHdr + cbMacHdLen + uPadding + cbIVlen);
- pbyIVHead = (unsigned char *)(pbyMacHdr + cbMacHdLen + uPadding);
+ return 0;
+}
- // Copy the Packet into a tx Buffer
- memcpy(pbyMacHdr, pbMPDU, cbMacHdLen);
+int vnt_beacon_make(struct vnt_private *priv, struct ieee80211_vif *vif)
+{
+ struct sk_buff *beacon;
- // version set to 0, patch for hostapd deamon
- pMACHeader->wFrameCtl &= cpu_to_le16(0xfffc);
- memcpy(pbyPayloadHead, (pbMPDU + cbMacHdLen), cbFrameBodySize);
+ beacon = ieee80211_beacon_get(priv->hw, vif);
+ if (!beacon)
+ return -ENOMEM;
- // replace support rate, patch for hostapd deamon(only support 11M)
- if (WLAN_GET_FC_FSTYPE(p80211Header->sA4.wFrameCtl) == WLAN_FSTYPE_ASSOCRESP) {
- if (cbExtSuppRate != 0) {
- if (((PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates)->len != 0)
- memcpy((pbyPayloadHead + cbFrameBodySize),
- pMgmt->abyCurrSuppRates,
- ((PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates)->len + WLAN_IEHDR_LEN
-);
- if (((PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates)->len != 0)
- memcpy((pbyPayloadHead + cbFrameBodySize) + ((PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates)->len + WLAN_IEHDR_LEN,
- pMgmt->abyCurrExtSuppRates,
- ((PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates)->len + WLAN_IEHDR_LEN
-);
- }
+ if (vnt_beacon_xmit(priv, beacon)) {
+ ieee80211_free_txskb(priv->hw, beacon);
+ return -ENODEV;
}
- // Set wep
- if (WLAN_GET_FC_ISWEP(p80211Header->sA4.wFrameCtl) != 0) {
- if (pDevice->bEnableHostWEP) {
- pTransmitKey = &STempKey;
- pTransmitKey->byCipherSuite = pMgmt->sNodeDBTable[uNodeIndex].byCipherSuite;
- pTransmitKey->dwKeyIndex = pMgmt->sNodeDBTable[uNodeIndex].dwKeyIndex;
- pTransmitKey->uKeyLength = pMgmt->sNodeDBTable[uNodeIndex].uWepKeyLength;
- pTransmitKey->dwTSC47_16 = pMgmt->sNodeDBTable[uNodeIndex].dwTSC47_16;
- pTransmitKey->wTSC15_0 = pMgmt->sNodeDBTable[uNodeIndex].wTSC15_0;
- memcpy(pTransmitKey->abyKey,
- &pMgmt->sNodeDBTable[uNodeIndex].abyWepKey[0],
- pTransmitKey->uKeyLength
-);
- }
-
- if ((pTransmitKey != NULL) && (pTransmitKey->byCipherSuite == KEY_CTL_TKIP)) {
- dwMICKey0 = *(u32 *)(&pTransmitKey->abyKey[16]);
- dwMICKey1 = *(u32 *)(&pTransmitKey->abyKey[20]);
-
- // DO Software Michael
- MIC_vInit(dwMICKey0, dwMICKey1);
- MIC_vAppend((unsigned char *)&(sEthHeader.abyDstAddr[0]), 12);
- dwMIC_Priority = 0;
- MIC_vAppend((unsigned char *)&dwMIC_Priority, 4);
- pr_debug("DMA0_tx_8021:MIC KEY: %X, %X\n",
- dwMICKey0, dwMICKey1);
-
- uLength = cbHeaderSize + cbMacHdLen + uPadding + cbIVlen;
-
- MIC_vAppend((pbyTxBufferAddr + uLength), cbFrameBodySize);
-
- pdwMIC_L = (u32 *)(pbyTxBufferAddr + uLength + cbFrameBodySize);
- pdwMIC_R = (u32 *)(pbyTxBufferAddr + uLength + cbFrameBodySize + 4);
-
- MIC_vGetMIC(pdwMIC_L, pdwMIC_R);
- MIC_vUnInit();
-
- if (pDevice->bTxMICFail == true) {
- *pdwMIC_L = 0;
- *pdwMIC_R = 0;
- pDevice->bTxMICFail = false;
- }
-
- pr_debug("uLength: %d, %d\n", uLength, cbFrameBodySize);
- pr_debug("cbReqCount:%d, %d, %d, %d\n",
- cbReqCount, cbHeaderSize, uPadding, cbIVlen);
- pr_debug("MIC:%x, %x\n", *pdwMIC_L, *pdwMIC_R);
-
- }
-
- s_vFillTxKey(pDevice, (unsigned char *)(pTxBufHead->adwTxKey), pbyIVHead, pTransmitKey,
- pbyMacHdr, (unsigned short)cbFrameBodySize, (unsigned char *)pMICHDR);
-
- if (pDevice->bEnableHostWEP) {
- pMgmt->sNodeDBTable[uNodeIndex].dwTSC47_16 = pTransmitKey->dwTSC47_16;
- pMgmt->sNodeDBTable[uNodeIndex].wTSC15_0 = pTransmitKey->wTSC15_0;
- }
-
- if ((pDevice->byLocalID <= REV_ID_VT3253_A1))
- s_vSWencryption(pDevice, pTransmitKey, pbyPayloadHead, (unsigned short)(cbFrameBodySize + cbMIClen));
- }
+ return 0;
+}
- pMACHeader->wSeqCtl = cpu_to_le16(pDevice->wSeqCounter << 4);
- pDevice->wSeqCounter++;
- if (pDevice->wSeqCounter > 0x0fff)
- pDevice->wSeqCounter = 0;
-
- if (bIsPSPOLL) {
- // The MAC will automatically replace the Duration-field of MAC header by Duration-field
- // of FIFO control header.
- // This will cause AID-field of PS-POLL packet be incorrect (Because PS-POLL's AID field is
- // in the same place of other packet's Duration-field).
- // And it will cause Cisco-AP to issue Disassociation-packet
- if (byPktType == PK_TYPE_11GB || byPktType == PK_TYPE_11GA) {
- ((struct vnt_tx_datahead_g *)pvTxDataHd)->duration_a = cpu_to_le16(p80211Header->sA2.wDurationID);
- ((struct vnt_tx_datahead_g *)pvTxDataHd)->duration_b = cpu_to_le16(p80211Header->sA2.wDurationID);
- } else {
- ((struct vnt_tx_datahead_ab *)pvTxDataHd)->duration = cpu_to_le16(p80211Header->sA2.wDurationID);
- }
- }
+int vnt_beacon_enable(struct vnt_private *priv, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *conf)
+{
+ int ret;
- // first TD is the only TD
- //Set TSR1 & ReqCount in TxDescHead
- pFrstTD->pTDInfo->skb = skb;
- pFrstTD->m_td1TD1.byTCR = (TCR_STP | TCR_EDP | EDMSDU);
- pFrstTD->pTDInfo->skb_dma = pFrstTD->pTDInfo->buf_dma;
- pFrstTD->m_td1TD1.wReqCount = cpu_to_le16(cbReqCount);
- pFrstTD->buff_addr = cpu_to_le32(pFrstTD->pTDInfo->skb_dma);
- pFrstTD->pTDInfo->byFlags = 0;
- pFrstTD->pTDInfo->byFlags |= TD_FLAGS_PRIV_SKB;
-
- if (MACbIsRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_PS)) {
- // Disable PS
- MACbPSWakeup(pDevice->PortOffset);
- }
- pDevice->bPWBitOn = false;
+ VNSvOutPortB(priv->PortOffset + MAC_REG_TFTCTL, TFTCTL_TSFCNTRST);
- wmb();
- pFrstTD->m_td0TD0.f1Owner = OWNED_BY_NIC;
- wmb();
+ VNSvOutPortB(priv->PortOffset + MAC_REG_TFTCTL, TFTCTL_TSFCNTREN);
- pDevice->iTDUsed[TYPE_TXDMA0]++;
+ CARDvSetFirstNextTBTT(priv, conf->beacon_int);
- if (AVAIL_TD(pDevice, TYPE_TXDMA0) <= 1)
- pr_debug(" available td0 <= 1\n");
+ CARDbSetBeaconPeriod(priv, conf->beacon_int);
- pDevice->apCurrTD[TYPE_TXDMA0] = pFrstTD->next;
+ ret = vnt_beacon_make(priv, vif);
- // Poll Transmit the adapter
- MACvTransmit0(pDevice->PortOffset);
+ return ret;
}
diff --git a/drivers/staging/vt6655/rxtx.h b/drivers/staging/vt6655/rxtx.h
index 8ee62887dee..b9bd1639b13 100644
--- a/drivers/staging/vt6655/rxtx.h
+++ b/drivers/staging/vt6655/rxtx.h
@@ -29,9 +29,11 @@
#ifndef __RXTX_H__
#define __RXTX_H__
-#include "ttype.h"
#include "device.h"
-#include "wcmd.h"
+
+#define DEFAULT_MSDU_LIFETIME_RES_64us 8000 /* 64us */
+#define DEFAULT_MGN_LIFETIME_RES_64us 125 /* 64us */
+
/*--------------------- Export Definitions -------------------------*/
@@ -173,6 +175,14 @@ struct vnt_cts_fb {
u16 reserved2;
} __packed;
+struct vnt_tx_fifo_head {
+ u8 tx_key[WLAN_KEY_LEN_CCMP];
+ __le16 fifo_ctl;
+ __le16 time_stamp;
+ __le16 frag_ctl;
+ __le16 current_rate;
+} __packed;
+
struct vnt_tx_short_buf_head {
__le16 fifo_ctl;
u16 time_stamp;
@@ -181,38 +191,10 @@ struct vnt_tx_short_buf_head {
__le16 time_stamp_off;
} __packed;
-void
-vGenerateMACHeader(
- struct vnt_private *,
- unsigned char *pbyBufferAddr,
- unsigned short wDuration,
- PSEthernetHeader psEthHeader,
- bool bNeedEncrypt,
- unsigned short wFragType,
- unsigned int uDMAIdx,
- unsigned int uFragIdx
-);
-
-unsigned int
-cbGetFragCount(
- struct vnt_private *,
- PSKeyItem pTransmitKey,
- unsigned int cbFrameBodySize,
- PSEthernetHeader psEthHeader
-);
-
-void
-vGenerateFIFOHeader(struct vnt_private *, unsigned char byPktTyp,
- unsigned char *pbyTxBufferAddr, bool bNeedEncrypt,
- unsigned int cbPayloadSize, unsigned int uDMAIdx,
- PSTxDesc pHeadTD, PSEthernetHeader psEthHeader,
- unsigned char *pPacket, PSKeyItem pTransmitKey,
- unsigned int uNodeIndex, unsigned int *puMACfragNum,
- unsigned int *pcbHeaderSize);
-
-void vDMA0_tx_80211(struct vnt_private *, struct sk_buff *skb,
- unsigned char *pbMPDU, unsigned int cbMPDULen);
-CMD_STATUS csMgmt_xmit(struct vnt_private *, PSTxMgmtPacket pPacket);
-CMD_STATUS csBeacon_xmit(struct vnt_private *, PSTxMgmtPacket pPacket);
+int vnt_generate_fifo_header(struct vnt_private *, u32,
+ PSTxDesc head_td, struct sk_buff *);
+int vnt_beacon_make(struct vnt_private *, struct ieee80211_vif *);
+int vnt_beacon_enable(struct vnt_private *, struct ieee80211_vif *,
+ struct ieee80211_bss_conf *);
#endif // __RXTX_H__
diff --git a/drivers/staging/vt6655/srom.c b/drivers/staging/vt6655/srom.c
index 5396e5832c2..9ec49e653b6 100644
--- a/drivers/staging/vt6655/srom.c
+++ b/drivers/staging/vt6655/srom.c
@@ -44,7 +44,6 @@
#include "upc.h"
#include "tmacro.h"
-#include "tether.h"
#include "mac.h"
#include "srom.h"
@@ -108,144 +107,6 @@ unsigned char SROMbyReadEmbedded(void __iomem *dwIoBase, unsigned char byContntO
}
/*
- * Description: Write a byte to EEPROM, by MAC I2C
- *
- * Parameters:
- * In:
- * dwIoBase - I/O base address
- * byContntOffset - address of EEPROM
- * wData - data to write
- * Out:
- * none
- *
- * Return Value: true if succeeded; false if failed.
- *
- */
-bool SROMbWriteEmbedded(void __iomem *dwIoBase, unsigned char byContntOffset, unsigned char byData)
-{
- unsigned short wDelay, wNoACK;
- unsigned char byWait;
-
- unsigned char byOrg;
-
- VNSvInPortB(dwIoBase + MAC_REG_I2MCFG, &byOrg);
- /* turn off hardware retry for getting NACK */
- VNSvOutPortB(dwIoBase + MAC_REG_I2MCFG, (byOrg & (~I2MCFG_NORETRY)));
- for (wNoACK = 0; wNoACK < W_MAX_I2CRETRY; wNoACK++) {
- VNSvOutPortB(dwIoBase + MAC_REG_I2MTGID, EEP_I2C_DEV_ID);
- VNSvOutPortB(dwIoBase + MAC_REG_I2MTGAD, byContntOffset);
- VNSvOutPortB(dwIoBase + MAC_REG_I2MDOPT, byData);
-
- /* issue write command */
- VNSvOutPortB(dwIoBase + MAC_REG_I2MCSR, I2MCSR_EEMW);
- /* wait DONE be set */
- for (wDelay = 0; wDelay < W_MAX_TIMEOUT; wDelay++) {
- VNSvInPortB(dwIoBase + MAC_REG_I2MCSR, &byWait);
- if (byWait & (I2MCSR_DONE | I2MCSR_NACK))
- break;
- PCAvDelayByIO(CB_DELAY_LOOP_WAIT);
- }
-
- if ((wDelay < W_MAX_TIMEOUT) &&
- (!(byWait & I2MCSR_NACK))) {
- break;
- }
- }
- if (wNoACK == W_MAX_I2CRETRY) {
- VNSvOutPortB(dwIoBase + MAC_REG_I2MCFG, byOrg);
- return false;
- }
- VNSvOutPortB(dwIoBase + MAC_REG_I2MCFG, byOrg);
- return true;
-}
-
-/*
- * Description: Turn bits on in eeprom
- *
- * Parameters:
- * In:
- * dwIoBase - I/O base address
- * byContntOffset - address of EEPROM
- * byBits - bits to turn on
- * Out:
- * none
- *
- * Return Value: none
- *
- */
-void SROMvRegBitsOn(void __iomem *dwIoBase, unsigned char byContntOffset, unsigned char byBits)
-{
- unsigned char byOrgData;
-
- byOrgData = SROMbyReadEmbedded(dwIoBase, byContntOffset);
- SROMbWriteEmbedded(dwIoBase, byContntOffset, (unsigned char)(byOrgData | byBits));
-}
-
-/*
- * Description: Turn bits off in eeprom
- *
- * Parameters:
- * In:
- * dwIoBase - I/O base address
- * byContntOffset - address of EEPROM
- * byBits - bits to turn off
- * Out:
- * none
- *
- */
-void SROMvRegBitsOff(void __iomem *dwIoBase, unsigned char byContntOffset, unsigned char byBits)
-{
- unsigned char byOrgData;
-
- byOrgData = SROMbyReadEmbedded(dwIoBase, byContntOffset);
- SROMbWriteEmbedded(dwIoBase, byContntOffset, (unsigned char)(byOrgData & (~byBits)));
-}
-
-/*
- * Description: Test if bits on in eeprom
- *
- * Parameters:
- * In:
- * dwIoBase - I/O base address
- * byContntOffset - address of EEPROM
- * byTestBits - bits to test
- * Out:
- * none
- *
- * Return Value: true if all test bits on; otherwise false
- *
- */
-bool SROMbIsRegBitsOn(void __iomem *dwIoBase, unsigned char byContntOffset, unsigned char byTestBits)
-{
- unsigned char byOrgData;
-
- byOrgData = SROMbyReadEmbedded(dwIoBase, byContntOffset);
- return (byOrgData & byTestBits) == byTestBits;
-}
-
-/*
- * Description: Test if bits off in eeprom
- *
- * Parameters:
- * In:
- * dwIoBase - I/O base address
- * byContntOffset - address of EEPROM
- * byTestBits - bits to test
- * Out:
- * none
- *
- * Return Value: true if all test bits off; otherwise false
- *
- */
-bool SROMbIsRegBitsOff(void __iomem *dwIoBase, unsigned char byContntOffset, unsigned char byTestBits)
-{
- unsigned char byOrgData;
-
- byOrgData = SROMbyReadEmbedded(dwIoBase, byContntOffset);
- return !(byOrgData & byTestBits);
-}
-
-/*
* Description: Read all contents of eeprom to buffer
*
* Parameters:
@@ -269,30 +130,6 @@ void SROMvReadAllContents(void __iomem *dwIoBase, unsigned char *pbyEepromRegs)
}
/*
- * Description: Write all contents of buffer to eeprom
- *
- * Parameters:
- * In:
- * dwIoBase - I/O base address
- * pbyEepromRegs - EEPROM content Buffer
- * Out:
- * none
- *
- * Return Value: none
- *
- */
-void SROMvWriteAllContents(void __iomem *dwIoBase, unsigned char *pbyEepromRegs)
-{
- int ii;
-
- /* ii = Rom Address */
- for (ii = 0; ii < EEP_MAX_CONTEXT_SIZE; ii++) {
- SROMbWriteEmbedded(dwIoBase, (unsigned char)ii, *pbyEepromRegs);
- pbyEepromRegs++;
- }
-}
-
-/*
* Description: Read Ethernet Address from eeprom to buffer
*
* Parameters:
@@ -314,92 +151,3 @@ void SROMvReadEtherAddress(void __iomem *dwIoBase, unsigned char *pbyEtherAddres
pbyEtherAddress++;
}
}
-
-/*
- * Description: Write Ethernet Address from buffer to eeprom
- *
- * Parameters:
- * In:
- * dwIoBase - I/O base address
- * pbyEtherAddress - Ethernet Address buffer
- * Out:
- * none
- *
- * Return Value: none
- *
- */
-void SROMvWriteEtherAddress(void __iomem *dwIoBase, unsigned char *pbyEtherAddress)
-{
- unsigned char ii;
-
- /* ii = Rom Address */
- for (ii = 0; ii < ETH_ALEN; ii++) {
- SROMbWriteEmbedded(dwIoBase, ii, *pbyEtherAddress);
- pbyEtherAddress++;
- }
-}
-
-/*
- * Description: Read Sub_VID and Sub_SysId from eeprom to buffer
- *
- * Parameters:
- * In:
- * dwIoBase - I/O base address
- * Out:
- * pdwSubSysVenId - Sub_VID and Sub_SysId read
- *
- * Return Value: none
- *
- */
-void SROMvReadSubSysVenId(void __iomem *dwIoBase, unsigned long *pdwSubSysVenId)
-{
- unsigned char *pbyData;
-
- pbyData = (unsigned char *)pdwSubSysVenId;
- /* sub vendor */
- *pbyData = SROMbyReadEmbedded(dwIoBase, 6);
- *(pbyData+1) = SROMbyReadEmbedded(dwIoBase, 7);
- /* sub system */
- *(pbyData+2) = SROMbyReadEmbedded(dwIoBase, 8);
- *(pbyData+3) = SROMbyReadEmbedded(dwIoBase, 9);
-}
-
-/*
- * Description: Auto Load EEPROM to MAC register
- *
- * Parameters:
- * In:
- * dwIoBase - I/O base address
- * Out:
- * none
- *
- * Return Value: true if success; otherwise false
- *
- */
-bool SROMbAutoLoad(void __iomem *dwIoBase)
-{
- unsigned char byWait;
- int ii;
-
- unsigned char byOrg;
-
- VNSvInPortB(dwIoBase + MAC_REG_I2MCFG, &byOrg);
- /* turn on hardware retry */
- VNSvOutPortB(dwIoBase + MAC_REG_I2MCFG, (byOrg | I2MCFG_NORETRY));
-
- MACvRegBitsOn(dwIoBase, MAC_REG_I2MCSR, I2MCSR_AUTOLD);
-
- /* ii = Rom Address */
- for (ii = 0; ii < EEP_MAX_CONTEXT_SIZE; ii++) {
- MACvTimer0MicroSDelay(dwIoBase, CB_EEPROM_READBYTE_WAIT);
- VNSvInPortB(dwIoBase + MAC_REG_I2MCSR, &byWait);
- if (!(byWait & I2MCSR_AUTOLD))
- break;
- }
-
- VNSvOutPortB(dwIoBase + MAC_REG_I2MCFG, byOrg);
-
- if (ii == EEP_MAX_CONTEXT_SIZE)
- return false;
- return true;
-}
diff --git a/drivers/staging/vt6655/srom.h b/drivers/staging/vt6655/srom.h
index 3128e535bbd..7d3e3ef9f17 100644
--- a/drivers/staging/vt6655/srom.h
+++ b/drivers/staging/vt6655/srom.h
@@ -30,8 +30,6 @@
#ifndef __SROM_H__
#define __SROM_H__
-#include "ttype.h"
-
/*--------------------- Export Definitions -------------------------*/
#define EEP_MAX_CONTEXT_SIZE 256
@@ -91,40 +89,6 @@
/*--------------------- Export Types ------------------------------*/
-// AT24C02 eeprom contents
-// 2048 bits = 256 bytes = 128 words
-//
-typedef struct tagSSromReg {
- unsigned char abyPAR[6]; // 0x00 (unsigned short)
-
- unsigned short wSUB_VID; // 0x03 (unsigned short)
- unsigned short wSUB_SID;
-
- unsigned char byBCFG0; // 0x05 (unsigned short)
- unsigned char byBCFG1;
-
- unsigned char byFCR0; // 0x06 (unsigned short)
- unsigned char byFCR1;
- unsigned char byPMC0; // 0x07 (unsigned short)
- unsigned char byPMC1;
- unsigned char byMAXLAT; // 0x08 (unsigned short)
- unsigned char byMINGNT;
- unsigned char byCFG0; // 0x09 (unsigned short)
- unsigned char byCFG1;
- unsigned short wCISPTR; // 0x0A (unsigned short)
- unsigned short wRsv0; // 0x0B (unsigned short)
- unsigned short wRsv1; // 0x0C (unsigned short)
- unsigned char byBBPAIR; // 0x0D (unsigned short)
- unsigned char byRFTYPE;
- unsigned char byMinChannel; // 0x0E (unsigned short)
- unsigned char byMaxChannel;
- unsigned char bySignature; // 0x0F (unsigned short)
- unsigned char byCheckSum;
-
- unsigned char abyReserved0[96]; // 0x10 (unsigned short)
- unsigned char abyCIS[128]; // 0x80 (unsigned short)
-} SSromReg, *PSSromReg;
-
/*--------------------- Export Macros ------------------------------*/
/*--------------------- Export Classes ----------------------------*/
@@ -134,22 +98,9 @@ typedef struct tagSSromReg {
/*--------------------- Export Functions --------------------------*/
unsigned char SROMbyReadEmbedded(void __iomem *dwIoBase, unsigned char byContntOffset);
-bool SROMbWriteEmbedded(void __iomem *dwIoBase, unsigned char byContntOffset, unsigned char byData);
-
-void SROMvRegBitsOn(void __iomem *dwIoBase, unsigned char byContntOffset, unsigned char byBits);
-void SROMvRegBitsOff(void __iomem *dwIoBase, unsigned char byContntOffset, unsigned char byBits);
-
-bool SROMbIsRegBitsOn(void __iomem *dwIoBase, unsigned char byContntOffset, unsigned char byTestBits);
-bool SROMbIsRegBitsOff(void __iomem *dwIoBase, unsigned char byContntOffset, unsigned char byTestBits);
void SROMvReadAllContents(void __iomem *dwIoBase, unsigned char *pbyEepromRegs);
-void SROMvWriteAllContents(void __iomem *dwIoBase, unsigned char *pbyEepromRegs);
void SROMvReadEtherAddress(void __iomem *dwIoBase, unsigned char *pbyEtherAddress);
-void SROMvWriteEtherAddress(void __iomem *dwIoBase, unsigned char *pbyEtherAddress);
-
-void SROMvReadSubSysVenId(void __iomem *dwIoBase, unsigned long *pdwSubSysVenId);
-
-bool SROMbAutoLoad(void __iomem *dwIoBase);
#endif // __EEPROM_H__
diff --git a/drivers/staging/vt6655/tcrc.c b/drivers/staging/vt6655/tcrc.c
deleted file mode 100644
index ddc5efd040f..00000000000
--- a/drivers/staging/vt6655/tcrc.c
+++ /dev/null
@@ -1,191 +0,0 @@
-/*
- * Copyright (c) 2003 VIA Networking, Inc. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- *
- * File: tcrc.c
- *
- * Purpose: Implement functions to calculate CRC
- *
- * Author: Tevin Chen
- *
- * Date: May 21, 1996
- *
- * Functions:
- * CRCdwCrc32 -
- * CRCdwGetCrc32 -
- * CRCdwGetCrc32Ex -
- *
- * Revision History:
- *
- */
-
-#include "tcrc.h"
-
-/*--------------------- Static Definitions -------------------------*/
-
-/*--------------------- Static Classes ----------------------------*/
-
-/*--------------------- Static Variables --------------------------*/
-
-/* 32-bit CRC table */
-static const unsigned long s_adwCrc32Table[256] = {
- 0x00000000L, 0x77073096L, 0xEE0E612CL, 0x990951BAL,
- 0x076DC419L, 0x706AF48FL, 0xE963A535L, 0x9E6495A3L,
- 0x0EDB8832L, 0x79DCB8A4L, 0xE0D5E91EL, 0x97D2D988L,
- 0x09B64C2BL, 0x7EB17CBDL, 0xE7B82D07L, 0x90BF1D91L,
- 0x1DB71064L, 0x6AB020F2L, 0xF3B97148L, 0x84BE41DEL,
- 0x1ADAD47DL, 0x6DDDE4EBL, 0xF4D4B551L, 0x83D385C7L,
- 0x136C9856L, 0x646BA8C0L, 0xFD62F97AL, 0x8A65C9ECL,
- 0x14015C4FL, 0x63066CD9L, 0xFA0F3D63L, 0x8D080DF5L,
- 0x3B6E20C8L, 0x4C69105EL, 0xD56041E4L, 0xA2677172L,
- 0x3C03E4D1L, 0x4B04D447L, 0xD20D85FDL, 0xA50AB56BL,
- 0x35B5A8FAL, 0x42B2986CL, 0xDBBBC9D6L, 0xACBCF940L,
- 0x32D86CE3L, 0x45DF5C75L, 0xDCD60DCFL, 0xABD13D59L,
- 0x26D930ACL, 0x51DE003AL, 0xC8D75180L, 0xBFD06116L,
- 0x21B4F4B5L, 0x56B3C423L, 0xCFBA9599L, 0xB8BDA50FL,
- 0x2802B89EL, 0x5F058808L, 0xC60CD9B2L, 0xB10BE924L,
- 0x2F6F7C87L, 0x58684C11L, 0xC1611DABL, 0xB6662D3DL,
- 0x76DC4190L, 0x01DB7106L, 0x98D220BCL, 0xEFD5102AL,
- 0x71B18589L, 0x06B6B51FL, 0x9FBFE4A5L, 0xE8B8D433L,
- 0x7807C9A2L, 0x0F00F934L, 0x9609A88EL, 0xE10E9818L,
- 0x7F6A0DBBL, 0x086D3D2DL, 0x91646C97L, 0xE6635C01L,
- 0x6B6B51F4L, 0x1C6C6162L, 0x856530D8L, 0xF262004EL,
- 0x6C0695EDL, 0x1B01A57BL, 0x8208F4C1L, 0xF50FC457L,
- 0x65B0D9C6L, 0x12B7E950L, 0x8BBEB8EAL, 0xFCB9887CL,
- 0x62DD1DDFL, 0x15DA2D49L, 0x8CD37CF3L, 0xFBD44C65L,
- 0x4DB26158L, 0x3AB551CEL, 0xA3BC0074L, 0xD4BB30E2L,
- 0x4ADFA541L, 0x3DD895D7L, 0xA4D1C46DL, 0xD3D6F4FBL,
- 0x4369E96AL, 0x346ED9FCL, 0xAD678846L, 0xDA60B8D0L,
- 0x44042D73L, 0x33031DE5L, 0xAA0A4C5FL, 0xDD0D7CC9L,
- 0x5005713CL, 0x270241AAL, 0xBE0B1010L, 0xC90C2086L,
- 0x5768B525L, 0x206F85B3L, 0xB966D409L, 0xCE61E49FL,
- 0x5EDEF90EL, 0x29D9C998L, 0xB0D09822L, 0xC7D7A8B4L,
- 0x59B33D17L, 0x2EB40D81L, 0xB7BD5C3BL, 0xC0BA6CADL,
- 0xEDB88320L, 0x9ABFB3B6L, 0x03B6E20CL, 0x74B1D29AL,
- 0xEAD54739L, 0x9DD277AFL, 0x04DB2615L, 0x73DC1683L,
- 0xE3630B12L, 0x94643B84L, 0x0D6D6A3EL, 0x7A6A5AA8L,
- 0xE40ECF0BL, 0x9309FF9DL, 0x0A00AE27L, 0x7D079EB1L,
- 0xF00F9344L, 0x8708A3D2L, 0x1E01F268L, 0x6906C2FEL,
- 0xF762575DL, 0x806567CBL, 0x196C3671L, 0x6E6B06E7L,
- 0xFED41B76L, 0x89D32BE0L, 0x10DA7A5AL, 0x67DD4ACCL,
- 0xF9B9DF6FL, 0x8EBEEFF9L, 0x17B7BE43L, 0x60B08ED5L,
- 0xD6D6A3E8L, 0xA1D1937EL, 0x38D8C2C4L, 0x4FDFF252L,
- 0xD1BB67F1L, 0xA6BC5767L, 0x3FB506DDL, 0x48B2364BL,
- 0xD80D2BDAL, 0xAF0A1B4CL, 0x36034AF6L, 0x41047A60L,
- 0xDF60EFC3L, 0xA867DF55L, 0x316E8EEFL, 0x4669BE79L,
- 0xCB61B38CL, 0xBC66831AL, 0x256FD2A0L, 0x5268E236L,
- 0xCC0C7795L, 0xBB0B4703L, 0x220216B9L, 0x5505262FL,
- 0xC5BA3BBEL, 0xB2BD0B28L, 0x2BB45A92L, 0x5CB36A04L,
- 0xC2D7FFA7L, 0xB5D0CF31L, 0x2CD99E8BL, 0x5BDEAE1DL,
- 0x9B64C2B0L, 0xEC63F226L, 0x756AA39CL, 0x026D930AL,
- 0x9C0906A9L, 0xEB0E363FL, 0x72076785L, 0x05005713L,
- 0x95BF4A82L, 0xE2B87A14L, 0x7BB12BAEL, 0x0CB61B38L,
- 0x92D28E9BL, 0xE5D5BE0DL, 0x7CDCEFB7L, 0x0BDBDF21L,
- 0x86D3D2D4L, 0xF1D4E242L, 0x68DDB3F8L, 0x1FDA836EL,
- 0x81BE16CDL, 0xF6B9265BL, 0x6FB077E1L, 0x18B74777L,
- 0x88085AE6L, 0xFF0F6A70L, 0x66063BCAL, 0x11010B5CL,
- 0x8F659EFFL, 0xF862AE69L, 0x616BFFD3L, 0x166CCF45L,
- 0xA00AE278L, 0xD70DD2EEL, 0x4E048354L, 0x3903B3C2L,
- 0xA7672661L, 0xD06016F7L, 0x4969474DL, 0x3E6E77DBL,
- 0xAED16A4AL, 0xD9D65ADCL, 0x40DF0B66L, 0x37D83BF0L,
- 0xA9BCAE53L, 0xDEBB9EC5L, 0x47B2CF7FL, 0x30B5FFE9L,
- 0xBDBDF21CL, 0xCABAC28AL, 0x53B39330L, 0x24B4A3A6L,
- 0xBAD03605L, 0xCDD70693L, 0x54DE5729L, 0x23D967BFL,
- 0xB3667A2EL, 0xC4614AB8L, 0x5D681B02L, 0x2A6F2B94L,
- 0xB40BBE37L, 0xC30C8EA1L, 0x5A05DF1BL, 0x2D02EF8DL
-};
-
-/*--------------------- Static Functions --------------------------*/
-
-/*--------------------- Export Variables --------------------------*/
-
-/*+
- *
- * Description:
- * Generate a CRC-32 from the data stream
- *
- * Parameters:
- * In:
- * pbyData - the data stream
- * cbByte - the length of the stream
- * dwCrcSeed - Seed for CRC32
- * Out:
- * none
- *
- * Return Value: CRC-32
- *
- -*/
-unsigned long CRCdwCrc32(unsigned char *pbyData, unsigned int cbByte, unsigned long dwCrcSeed)
-{
- unsigned long dwCrc;
-
- dwCrc = dwCrcSeed;
- while (cbByte--) {
- dwCrc = s_adwCrc32Table[(unsigned char)((dwCrc ^ (*pbyData)) & 0xFF)] ^ (dwCrc >> 8);
- pbyData++;
- }
-
- return dwCrc;
-}
-
-/*+
- *
- * Description:
- * To test CRC generator, input 8 bytes packet
- * -- 0xff 0xff 0xff 0xff 0x00 0x00 0x00 0x00
- * the generated CRC should be
- * -- 0xff 0xff 0xff 0xff
- *
- * Parameters:
- * In:
- * pbyData - the data stream
- * cbByte - the length of the stream
- * Out:
- * none
- *
- * Return Value: CRC-32
- *
- -*/
-unsigned long CRCdwGetCrc32(unsigned char *pbyData, unsigned int cbByte)
-{
- return ~CRCdwCrc32(pbyData, cbByte, 0xFFFFFFFFL);
-}
-
-/*+
- *
- * Description:
- *
- * NOTE.... Because CRCdwGetCrc32Ex() is an iteration function,
- * this means we will use the output of CRCdwGetCrc32Ex()
- * to be a new argument to do next CRCdwGetCrc32Ex() calculation.
- * Thus, the final result must be inverted to be the
- * correct answer.
- *
- * Parameters:
- * In:
- * pbyData - the data stream
- * cbByte - the length of the stream
- * Out:
- * none
- *
- * Return Value: CRC-32
- *
- -*/
-unsigned long CRCdwGetCrc32Ex(unsigned char *pbyData, unsigned int cbByte, unsigned long dwPreCRC)
-{
- return CRCdwCrc32(pbyData, cbByte, dwPreCRC);
-}
diff --git a/drivers/staging/vt6655/tcrc.h b/drivers/staging/vt6655/tcrc.h
deleted file mode 100644
index 82b5ddafae5..00000000000
--- a/drivers/staging/vt6655/tcrc.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright (c) 2003 VIA Networking, Inc. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- *
- * File: tcrc.h
- *
- * Purpose: Implement functions to calculate CRC
- *
- * Author: Tevin Chen
- *
- * Date: Jan. 28, 1997
- *
- */
-
-#ifndef __TCRC_H__
-#define __TCRC_H__
-
-#include "ttype.h"
-
-/*--------------------- Export Definitions -------------------------*/
-
-/*--------------------- Export Types ------------------------------*/
-
-/*--------------------- Export Macros ------------------------------*/
-
-/*--------------------- Export Classes ----------------------------*/
-
-/*--------------------- Export Variables --------------------------*/
-
-/*--------------------- Export Functions --------------------------*/
-
-unsigned long CRCdwCrc32(unsigned char *pbyData, unsigned int cbByte, unsigned long dwCrcSeed);
-unsigned long CRCdwGetCrc32(unsigned char *pbyData, unsigned int cbByte);
-unsigned long CRCdwGetCrc32Ex(unsigned char *pbyData, unsigned int cbByte, unsigned long dwPreCRC);
-
-#endif // __TCRC_H__
diff --git a/drivers/staging/vt6655/tether.c b/drivers/staging/vt6655/tether.c
deleted file mode 100644
index 1e7d3e2115a..00000000000
--- a/drivers/staging/vt6655/tether.c
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * Copyright (c) 2003 VIA Networking, Inc. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- *
- * File: tether.c
- *
- * Purpose:
- *
- * Author: Tevin Chen
- *
- * Date: May 21, 1996
- *
- * Functions:
- * ETHbyGetHashIndexByCrc32 - Calculate multicast hash value by CRC32
- * ETHbIsBufferCrc32Ok - Check CRC value of the buffer if Ok or not
- *
- * Revision History:
- *
- */
-
-#include "device.h"
-#include "tmacro.h"
-#include "tcrc.h"
-#include "tether.h"
-
-/*--------------------- Static Definitions -------------------------*/
-
-/*--------------------- Static Classes ----------------------------*/
-
-/*--------------------- Static Variables --------------------------*/
-
-/*--------------------- Static Functions --------------------------*/
-
-/*--------------------- Export Variables --------------------------*/
-
-/*
- * Description: Calculate multicast hash value by CRC32
- *
- * Parameters:
- * In:
- * pbyMultiAddr - Multicast Address
- * Out:
- * none
- *
- * Return Value: Hash value
- *
- */
-unsigned char ETHbyGetHashIndexByCrc32(unsigned char *pbyMultiAddr)
-{
- int ii;
- unsigned char byTmpHash;
- unsigned char byHash = 0;
-
- // get the least 6-bits from CRC generator
- byTmpHash = (unsigned char)(CRCdwCrc32(pbyMultiAddr, ETH_ALEN,
- 0xFFFFFFFFL) & 0x3F);
- // reverse most bit to least bit
- for (ii = 0; ii < (sizeof(byTmpHash) * 8); ii++) {
- byHash <<= 1;
- if (byTmpHash & 0x01)
- byHash |= 1;
- byTmpHash >>= 1;
- }
-
- // adjust 6-bits to the right most
- return byHash >> 2;
-}
-
-/*
- * Description: Check CRC value of the buffer if Ok or not
- *
- * Parameters:
- * In:
- * pbyBuffer - pointer of buffer (normally is rx buffer)
- * cbFrameLength - length of buffer, including CRC portion
- * Out:
- * none
- *
- * Return Value: true if ok; false if error.
- *
- */
-bool ETHbIsBufferCrc32Ok(unsigned char *pbyBuffer, unsigned int cbFrameLength)
-{
- unsigned long dwCRC;
-
- dwCRC = CRCdwGetCrc32(pbyBuffer, cbFrameLength - 4);
- if (cpu_to_le32(*((unsigned long *)(pbyBuffer + cbFrameLength - 4))) != dwCRC)
- return false;
-
- return true;
-}
diff --git a/drivers/staging/vt6655/tether.h b/drivers/staging/vt6655/tether.h
deleted file mode 100644
index 94cc8830d8c..00000000000
--- a/drivers/staging/vt6655/tether.h
+++ /dev/null
@@ -1,192 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * File: tether.h
- *
- * Purpose:
- *
- * Author: Tevin Chen
- *
- * Date: Jan. 28, 1997
- *
- */
-
-#ifndef __TETHER_H__
-#define __TETHER_H__
-
-#include <linux/etherdevice.h>
-#include "ttype.h"
-
-/*--------------------- Export Definitions -------------------------*/
-//
-// constants
-//
-#define U_ETHER_ADDR_STR_LEN (ETH_ALEN * 2 + 1)
-// Ethernet address string length
-
-#define MAX_LOOKAHEAD_SIZE ETH_FRAME_LEN
-
-#define U_MULTI_ADDR_LEN 8 // multicast address length
-
-#ifdef __BIG_ENDIAN
-
-#define TYPE_PKT_IP 0x0800 //
-#define TYPE_PKT_ARP 0x0806 //
-#define TYPE_PKT_RARP 0x8035 //
-#define TYPE_PKT_IPX 0x8137 //
-#define TYPE_PKT_802_1x 0x888e
-#define TYPE_PKT_PreAuth 0x88C7
-
-#define TYPE_PKT_PING_M_REQ 0x8011 // master reguest
-#define TYPE_PKT_PING_S_GNT 0x8022 // slave grant
-#define TYPE_PKT_PING_M 0x8077 // pingpong master packet
-#define TYPE_PKT_PING_S 0x8088 // pingpong slave packet
-#define TYPE_PKT_WOL_M_REQ 0x8033 // WOL waker request
-#define TYPE_PKT_WOL_S_GNT 0x8044 // WOL sleeper grant
-#define TYPE_MGMT_PROBE_RSP 0x5000
-#define TYPE_PKT_VNT_DIAG 0x8011 // Diag Pkt
-#define TYPE_PKT_VNT_PER 0x8888 // Diag PER Pkt
-//
-// wFrameCtl field in the S802_11Header
-//
-// NOTE....
-// in network byte order, high byte is going first
-#define FC_TODS 0x0001
-#define FC_FROMDS 0x0002
-#define FC_MOREFRAG 0x0004
-#define FC_RETRY 0x0008
-#define FC_POWERMGT 0x0010
-#define FC_MOREDATA 0x0020
-#define FC_WEP 0x0040
-#define TYPE_802_11_ATIM 0x9000
-
-#define TYPE_802_11_DATA 0x0800
-#define TYPE_802_11_CTL 0x0400
-#define TYPE_802_11_MGMT 0x0000
-#define TYPE_802_11_MASK 0x0C00
-#define TYPE_SUBTYPE_MASK 0xFC00
-#define TYPE_802_11_NODATA 0x4000
-#define TYPE_DATE_NULL 0x4800
-
-#define TYPE_CTL_PSPOLL 0xa400
-#define TYPE_CTL_RTS 0xb400
-#define TYPE_CTL_CTS 0xc400
-#define TYPE_CTL_ACK 0xd400
-
-#else //if LITTLE_ENDIAN
-//
-// wType field in the SEthernetHeader
-//
-// NOTE....
-// in network byte order, high byte is going first
-#define TYPE_PKT_IP 0x0008 //
-#define TYPE_PKT_ARP 0x0608 //
-#define TYPE_PKT_RARP 0x3580 //
-#define TYPE_PKT_IPX 0x3781 //
-
-#define TYPE_PKT_802_1x 0x8e88
-#define TYPE_PKT_PreAuth 0xC788
-
-#define TYPE_PKT_PING_M_REQ 0x1180 // master reguest
-#define TYPE_PKT_PING_S_GNT 0x2280 // slave grant
-#define TYPE_PKT_PING_M 0x7780 // pingpong master packet
-#define TYPE_PKT_PING_S 0x8880 // pingpong slave packet
-#define TYPE_PKT_WOL_M_REQ 0x3380 // WOL waker request
-#define TYPE_PKT_WOL_S_GNT 0x4480 // WOL sleeper grant
-#define TYPE_MGMT_PROBE_RSP 0x0050
-#define TYPE_PKT_VNT_DIAG 0x1180 // Diag Pkt
-#define TYPE_PKT_VNT_PER 0x8888 // Diag PER Pkt
-//
-// wFrameCtl field in the S802_11Header
-//
-// NOTE....
-// in network byte order, high byte is going first
-#define FC_TODS 0x0100
-#define FC_FROMDS 0x0200
-#define FC_MOREFRAG 0x0400
-#define FC_RETRY 0x0800
-#define FC_POWERMGT 0x1000
-#define FC_MOREDATA 0x2000
-#define FC_WEP 0x4000
-#define TYPE_802_11_ATIM 0x0090
-
-#define TYPE_802_11_DATA 0x0008
-#define TYPE_802_11_CTL 0x0004
-#define TYPE_802_11_MGMT 0x0000
-#define TYPE_802_11_MASK 0x000C
-#define TYPE_SUBTYPE_MASK 0x00FC
-#define TYPE_802_11_NODATA 0x0040
-#define TYPE_DATE_NULL 0x0048
-
-#define TYPE_CTL_PSPOLL 0x00a4
-#define TYPE_CTL_RTS 0x00b4
-#define TYPE_CTL_CTS 0x00c4
-#define TYPE_CTL_ACK 0x00d4
-
-#endif //#ifdef __BIG_ENDIAN
-
-#define WEP_IV_MASK 0x00FFFFFF
-
-/*--------------------- Export Types ------------------------------*/
-//
-// Ethernet packet
-//
-typedef struct tagSEthernetHeader {
- unsigned char abyDstAddr[ETH_ALEN];
- unsigned char abySrcAddr[ETH_ALEN];
- unsigned short wType;
-} __attribute__ ((__packed__))
-SEthernetHeader, *PSEthernetHeader;
-
-//
-// 802_3 packet
-//
-typedef struct tagS802_3Header {
- unsigned char abyDstAddr[ETH_ALEN];
- unsigned char abySrcAddr[ETH_ALEN];
- unsigned short wLen;
-} __attribute__ ((__packed__))
-S802_3Header, *PS802_3Header;
-
-//
-// 802_11 packet
-//
-typedef struct tagS802_11Header {
- unsigned short wFrameCtl;
- unsigned short wDurationID;
- unsigned char abyAddr1[ETH_ALEN];
- unsigned char abyAddr2[ETH_ALEN];
- unsigned char abyAddr3[ETH_ALEN];
- unsigned short wSeqCtl;
- unsigned char abyAddr4[ETH_ALEN];
-} __attribute__ ((__packed__))
-S802_11Header, *PS802_11Header;
-
-/*--------------------- Export Macros ------------------------------*/
-
-/*--------------------- Export Classes ----------------------------*/
-
-/*--------------------- Export Variables --------------------------*/
-
-/*--------------------- Export Functions --------------------------*/
-
-unsigned char ETHbyGetHashIndexByCrc32(unsigned char *pbyMultiAddr);
-//unsigned char ETHbyGetHashIndexByCrc(unsigned char *pbyMultiAddr);
-bool ETHbIsBufferCrc32Ok(unsigned char *pbyBuffer, unsigned int cbFrameLength);
-
-#endif // __TETHER_H__
diff --git a/drivers/staging/vt6655/tkip.c b/drivers/staging/vt6655/tkip.c
deleted file mode 100644
index f758d021c60..00000000000
--- a/drivers/staging/vt6655/tkip.c
+++ /dev/null
@@ -1,268 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- *
- * File: tkip.c
- *
- * Purpose: Implement functions for 802.11i TKIP
- *
- * Author: Jerry Chen
- *
- * Date: Mar. 11, 2003
- *
- * Functions:
- * TKIPvMixKey - Get TKIP RC4 Key from TK,TA, and TSC
- *
- * Revision History:
- *
- */
-
-#include "tmacro.h"
-#include "tkip.h"
-
-/*--------------------- Static Definitions -------------------------*/
-
-/*--------------------- Static Classes ----------------------------*/
-
-/*--------------------- Static Variables --------------------------*/
-
-/*--------------------- Static Functions --------------------------*/
-
-/*--------------------- Export Variables --------------------------*/
-
-/*--------------------- Static Definitions -------------------------*/
-
-/*--------------------- Static Classes ----------------------------*/
-
-/*--------------------- Static Variables --------------------------*/
-
-/* The Sbox is reduced to 2 16-bit wide tables, each with 256 entries. */
-/* The 2nd table is the same as the 1st but with the upper and lower */
-/* bytes swapped. To allow an endian tolerant implementation, the byte */
-/* halves have been expressed independently here. */
-static const unsigned char TKIP_Sbox_Lower[256] = {
- 0xA5, 0x84, 0x99, 0x8D, 0x0D, 0xBD, 0xB1, 0x54,
- 0x50, 0x03, 0xA9, 0x7D, 0x19, 0x62, 0xE6, 0x9A,
- 0x45, 0x9D, 0x40, 0x87, 0x15, 0xEB, 0xC9, 0x0B,
- 0xEC, 0x67, 0xFD, 0xEA, 0xBF, 0xF7, 0x96, 0x5B,
- 0xC2, 0x1C, 0xAE, 0x6A, 0x5A, 0x41, 0x02, 0x4F,
- 0x5C, 0xF4, 0x34, 0x08, 0x93, 0x73, 0x53, 0x3F,
- 0x0C, 0x52, 0x65, 0x5E, 0x28, 0xA1, 0x0F, 0xB5,
- 0x09, 0x36, 0x9B, 0x3D, 0x26, 0x69, 0xCD, 0x9F,
- 0x1B, 0x9E, 0x74, 0x2E, 0x2D, 0xB2, 0xEE, 0xFB,
- 0xF6, 0x4D, 0x61, 0xCE, 0x7B, 0x3E, 0x71, 0x97,
- 0xF5, 0x68, 0x00, 0x2C, 0x60, 0x1F, 0xC8, 0xED,
- 0xBE, 0x46, 0xD9, 0x4B, 0xDE, 0xD4, 0xE8, 0x4A,
- 0x6B, 0x2A, 0xE5, 0x16, 0xC5, 0xD7, 0x55, 0x94,
- 0xCF, 0x10, 0x06, 0x81, 0xF0, 0x44, 0xBA, 0xE3,
- 0xF3, 0xFE, 0xC0, 0x8A, 0xAD, 0xBC, 0x48, 0x04,
- 0xDF, 0xC1, 0x75, 0x63, 0x30, 0x1A, 0x0E, 0x6D,
- 0x4C, 0x14, 0x35, 0x2F, 0xE1, 0xA2, 0xCC, 0x39,
- 0x57, 0xF2, 0x82, 0x47, 0xAC, 0xE7, 0x2B, 0x95,
- 0xA0, 0x98, 0xD1, 0x7F, 0x66, 0x7E, 0xAB, 0x83,
- 0xCA, 0x29, 0xD3, 0x3C, 0x79, 0xE2, 0x1D, 0x76,
- 0x3B, 0x56, 0x4E, 0x1E, 0xDB, 0x0A, 0x6C, 0xE4,
- 0x5D, 0x6E, 0xEF, 0xA6, 0xA8, 0xA4, 0x37, 0x8B,
- 0x32, 0x43, 0x59, 0xB7, 0x8C, 0x64, 0xD2, 0xE0,
- 0xB4, 0xFA, 0x07, 0x25, 0xAF, 0x8E, 0xE9, 0x18,
- 0xD5, 0x88, 0x6F, 0x72, 0x24, 0xF1, 0xC7, 0x51,
- 0x23, 0x7C, 0x9C, 0x21, 0xDD, 0xDC, 0x86, 0x85,
- 0x90, 0x42, 0xC4, 0xAA, 0xD8, 0x05, 0x01, 0x12,
- 0xA3, 0x5F, 0xF9, 0xD0, 0x91, 0x58, 0x27, 0xB9,
- 0x38, 0x13, 0xB3, 0x33, 0xBB, 0x70, 0x89, 0xA7,
- 0xB6, 0x22, 0x92, 0x20, 0x49, 0xFF, 0x78, 0x7A,
- 0x8F, 0xF8, 0x80, 0x17, 0xDA, 0x31, 0xC6, 0xB8,
- 0xC3, 0xB0, 0x77, 0x11, 0xCB, 0xFC, 0xD6, 0x3A
-};
-
-static const unsigned char TKIP_Sbox_Upper[256] = {
- 0xC6, 0xF8, 0xEE, 0xF6, 0xFF, 0xD6, 0xDE, 0x91,
- 0x60, 0x02, 0xCE, 0x56, 0xE7, 0xB5, 0x4D, 0xEC,
- 0x8F, 0x1F, 0x89, 0xFA, 0xEF, 0xB2, 0x8E, 0xFB,
- 0x41, 0xB3, 0x5F, 0x45, 0x23, 0x53, 0xE4, 0x9B,
- 0x75, 0xE1, 0x3D, 0x4C, 0x6C, 0x7E, 0xF5, 0x83,
- 0x68, 0x51, 0xD1, 0xF9, 0xE2, 0xAB, 0x62, 0x2A,
- 0x08, 0x95, 0x46, 0x9D, 0x30, 0x37, 0x0A, 0x2F,
- 0x0E, 0x24, 0x1B, 0xDF, 0xCD, 0x4E, 0x7F, 0xEA,
- 0x12, 0x1D, 0x58, 0x34, 0x36, 0xDC, 0xB4, 0x5B,
- 0xA4, 0x76, 0xB7, 0x7D, 0x52, 0xDD, 0x5E, 0x13,
- 0xA6, 0xB9, 0x00, 0xC1, 0x40, 0xE3, 0x79, 0xB6,
- 0xD4, 0x8D, 0x67, 0x72, 0x94, 0x98, 0xB0, 0x85,
- 0xBB, 0xC5, 0x4F, 0xED, 0x86, 0x9A, 0x66, 0x11,
- 0x8A, 0xE9, 0x04, 0xFE, 0xA0, 0x78, 0x25, 0x4B,
- 0xA2, 0x5D, 0x80, 0x05, 0x3F, 0x21, 0x70, 0xF1,
- 0x63, 0x77, 0xAF, 0x42, 0x20, 0xE5, 0xFD, 0xBF,
- 0x81, 0x18, 0x26, 0xC3, 0xBE, 0x35, 0x88, 0x2E,
- 0x93, 0x55, 0xFC, 0x7A, 0xC8, 0xBA, 0x32, 0xE6,
- 0xC0, 0x19, 0x9E, 0xA3, 0x44, 0x54, 0x3B, 0x0B,
- 0x8C, 0xC7, 0x6B, 0x28, 0xA7, 0xBC, 0x16, 0xAD,
- 0xDB, 0x64, 0x74, 0x14, 0x92, 0x0C, 0x48, 0xB8,
- 0x9F, 0xBD, 0x43, 0xC4, 0x39, 0x31, 0xD3, 0xF2,
- 0xD5, 0x8B, 0x6E, 0xDA, 0x01, 0xB1, 0x9C, 0x49,
- 0xD8, 0xAC, 0xF3, 0xCF, 0xCA, 0xF4, 0x47, 0x10,
- 0x6F, 0xF0, 0x4A, 0x5C, 0x38, 0x57, 0x73, 0x97,
- 0xCB, 0xA1, 0xE8, 0x3E, 0x96, 0x61, 0x0D, 0x0F,
- 0xE0, 0x7C, 0x71, 0xCC, 0x90, 0x06, 0xF7, 0x1C,
- 0xC2, 0x6A, 0xAE, 0x69, 0x17, 0x99, 0x3A, 0x27,
- 0xD9, 0xEB, 0x2B, 0x22, 0xD2, 0xA9, 0x07, 0x33,
- 0x2D, 0x3C, 0x15, 0xC9, 0x87, 0xAA, 0x50, 0xA5,
- 0x03, 0x59, 0x09, 0x1A, 0x65, 0xD7, 0x84, 0xD0,
- 0x82, 0x29, 0x5A, 0x1E, 0x7B, 0xA8, 0x6D, 0x2C
-};
-
-//STKIPKeyManagement sTKIPKeyTable[MAX_TKIP_KEY];
-
-/*--------------------- Static Functions --------------------------*/
-unsigned int tkip_sbox(unsigned int index);
-unsigned int rotr1(unsigned int a);
-
-/*--------------------- Export Variables --------------------------*/
-
-/************************************************************/
-/* tkip_sbox() */
-/* Returns a 16 bit value from a 64K entry table. The Table */
-/* is synthesized from two 256 entry byte wide tables. */
-/************************************************************/
-unsigned int tkip_sbox(unsigned int index)
-{
- unsigned int index_low;
- unsigned int index_high;
- unsigned int left, right;
-
- index_low = (index % 256);
- index_high = ((index >> 8) % 256);
-
- left = TKIP_Sbox_Lower[index_low] + (TKIP_Sbox_Upper[index_low] * 256);
- right = TKIP_Sbox_Upper[index_high] + (TKIP_Sbox_Lower[index_high] * 256);
-
- return left ^ right;
-};
-
-unsigned int rotr1(unsigned int a)
-{
- unsigned int b;
-
- if ((a & 0x01) == 0x01)
- b = (a >> 1) | 0x8000;
- else
- b = (a >> 1) & 0x7fff;
-
- b = b % 65536;
- return b;
-}
-
-/*
- * Description: Calculate RC4Key fom TK, TA, and TSC
- *
- * Parameters:
- * In:
- * pbyTKey - TKey
- * pbyTA - TA
- * dwTSC - TSC
- * Out:
- * pbyRC4Key - RC4Key
- *
- * Return Value: none
- *
- */
-void TKIPvMixKey(
- unsigned char *pbyTKey,
- unsigned char *pbyTA,
- unsigned short wTSC15_0,
- unsigned long dwTSC47_16,
- unsigned char *pbyRC4Key
-)
-{
- unsigned int p1k[5];
- unsigned int tsc0, tsc1, tsc2;
- unsigned int ppk0, ppk1, ppk2, ppk3, ppk4, ppk5;
- unsigned long int pnl, pnh;
-
- int i, j;
-
- pnl = wTSC15_0;
- pnh = dwTSC47_16;
-
- tsc0 = (unsigned int)((pnh >> 16) % 65536); /* msb */
- tsc1 = (unsigned int)(pnh % 65536);
- tsc2 = (unsigned int)(pnl % 65536); /* lsb */
-
- /* Phase 1, step 1 */
- p1k[0] = tsc1;
- p1k[1] = tsc0;
- p1k[2] = (unsigned int)(pbyTA[0] + (pbyTA[1]*256));
- p1k[3] = (unsigned int)(pbyTA[2] + (pbyTA[3]*256));
- p1k[4] = (unsigned int)(pbyTA[4] + (pbyTA[5]*256));
-
- /* Phase 1, step 2 */
- for (i = 0; i < 8; i++) {
- j = 2 * (i & 1);
- p1k[0] = (p1k[0] + tkip_sbox((p1k[4] ^ ((256*pbyTKey[1+j]) + pbyTKey[j])) % 65536)) % 65536;
- p1k[1] = (p1k[1] + tkip_sbox((p1k[0] ^ ((256*pbyTKey[5+j]) + pbyTKey[4+j])) % 65536)) % 65536;
- p1k[2] = (p1k[2] + tkip_sbox((p1k[1] ^ ((256*pbyTKey[9+j]) + pbyTKey[8+j])) % 65536)) % 65536;
- p1k[3] = (p1k[3] + tkip_sbox((p1k[2] ^ ((256*pbyTKey[13+j]) + pbyTKey[12+j])) % 65536)) % 65536;
- p1k[4] = (p1k[4] + tkip_sbox((p1k[3] ^ (((256*pbyTKey[1+j]) + pbyTKey[j]))) % 65536)) % 65536;
- p1k[4] = (p1k[4] + i) % 65536;
- }
- /* Phase 2, Step 1 */
- ppk0 = p1k[0];
- ppk1 = p1k[1];
- ppk2 = p1k[2];
- ppk3 = p1k[3];
- ppk4 = p1k[4];
- ppk5 = (p1k[4] + tsc2) % 65536;
-
- /* Phase2, Step 2 */
- ppk0 = ppk0 + tkip_sbox((ppk5 ^ ((256*pbyTKey[1]) + pbyTKey[0])) % 65536);
- ppk1 = ppk1 + tkip_sbox((ppk0 ^ ((256*pbyTKey[3]) + pbyTKey[2])) % 65536);
- ppk2 = ppk2 + tkip_sbox((ppk1 ^ ((256*pbyTKey[5]) + pbyTKey[4])) % 65536);
- ppk3 = ppk3 + tkip_sbox((ppk2 ^ ((256*pbyTKey[7]) + pbyTKey[6])) % 65536);
- ppk4 = ppk4 + tkip_sbox((ppk3 ^ ((256*pbyTKey[9]) + pbyTKey[8])) % 65536);
- ppk5 = ppk5 + tkip_sbox((ppk4 ^ ((256*pbyTKey[11]) + pbyTKey[10])) % 65536);
-
- ppk0 = ppk0 + rotr1(ppk5 ^ ((256*pbyTKey[13]) + pbyTKey[12]));
- ppk1 = ppk1 + rotr1(ppk0 ^ ((256*pbyTKey[15]) + pbyTKey[14]));
- ppk2 = ppk2 + rotr1(ppk1);
- ppk3 = ppk3 + rotr1(ppk2);
- ppk4 = ppk4 + rotr1(ppk3);
- ppk5 = ppk5 + rotr1(ppk4);
-
- /* Phase 2, Step 3 */
- pbyRC4Key[0] = (tsc2 >> 8) % 256;
- pbyRC4Key[1] = (((tsc2 >> 8) % 256) | 0x20) & 0x7f;
- pbyRC4Key[2] = tsc2 % 256;
- pbyRC4Key[3] = ((ppk5 ^ ((256*pbyTKey[1]) + pbyTKey[0])) >> 1) % 256;
-
- pbyRC4Key[4] = ppk0 % 256;
- pbyRC4Key[5] = (ppk0 >> 8) % 256;
-
- pbyRC4Key[6] = ppk1 % 256;
- pbyRC4Key[7] = (ppk1 >> 8) % 256;
-
- pbyRC4Key[8] = ppk2 % 256;
- pbyRC4Key[9] = (ppk2 >> 8) % 256;
-
- pbyRC4Key[10] = ppk3 % 256;
- pbyRC4Key[11] = (ppk3 >> 8) % 256;
-
- pbyRC4Key[12] = ppk4 % 256;
- pbyRC4Key[13] = (ppk4 >> 8) % 256;
-
- pbyRC4Key[14] = ppk5 % 256;
- pbyRC4Key[15] = (ppk5 >> 8) % 256;
-}
diff --git a/drivers/staging/vt6655/tkip.h b/drivers/staging/vt6655/tkip.h
deleted file mode 100644
index 3b6357ac6de..00000000000
--- a/drivers/staging/vt6655/tkip.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- *
- * File: tkip.h
- *
- * Purpose: Implement functions for 802.11i TKIP
- *
- * Author: Jerry Chen
- *
- * Date: Mar. 11, 2003
- *
- */
-
-#ifndef __TKIP_H__
-#define __TKIP_H__
-
-#include "ttype.h"
-#include "tether.h"
-
-/*--------------------- Export Definitions -------------------------*/
-#define TKIP_KEY_LEN 16
-
-/*--------------------- Export Types ------------------------------*/
-
-/*--------------------- Export Macros ------------------------------*/
-
-/*--------------------- Export Classes ----------------------------*/
-
-/*--------------------- Export Variables --------------------------*/
-
-/*--------------------- Export Functions --------------------------*/
-
-void TKIPvMixKey(
- unsigned char *pbyTKey,
- unsigned char *pbyTA,
- unsigned short wTSC15_0,
- unsigned long dwTSC47_16,
- unsigned char *pbyRC4Key
-);
-
-#endif // __TKIP_H__
diff --git a/drivers/staging/vt6655/tmacro.h b/drivers/staging/vt6655/tmacro.h
index 59c6e72f993..607b78f7a6a 100644
--- a/drivers/staging/vt6655/tmacro.h
+++ b/drivers/staging/vt6655/tmacro.h
@@ -29,8 +29,6 @@
#ifndef __TMACRO_H__
#define __TMACRO_H__
-#include "ttype.h"
-
/****** Common helper macros ***********************************************/
#if !defined(LOBYTE)
diff --git a/drivers/staging/vt6655/ttype.h b/drivers/staging/vt6655/ttype.h
deleted file mode 100644
index 747ef62ec9b..00000000000
--- a/drivers/staging/vt6655/ttype.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * File: ttype.h
- *
- * Purpose: define basic common types and macros
- *
- * Author: Tevin Chen
- *
- * Date: May 21, 1996
- *
- */
-
-#ifndef __TTYPE_H__
-#define __TTYPE_H__
-
-/******* Common definitions and typedefs ***********************************/
-
-#ifndef WPA_SM_Transtatus
-#define WPA_SM_Transtatus
-#endif
-
-#ifndef Calcu_LinkQual
-#define Calcu_LinkQual
-#endif
-
-#endif // __TTYPE_H__
diff --git a/drivers/staging/vt6655/upc.h b/drivers/staging/vt6655/upc.h
index c5c889cade2..c53703a772f 100644
--- a/drivers/staging/vt6655/upc.h
+++ b/drivers/staging/vt6655/upc.h
@@ -30,7 +30,6 @@
#define __UPC_H__
#include "device.h"
-#include "ttype.h"
/*--------------------- Export Definitions -------------------------*/
diff --git a/drivers/staging/vt6655/vntconfiguration.dat b/drivers/staging/vt6655/vntconfiguration.dat
deleted file mode 100644
index 0064ddce7c1..00000000000
--- a/drivers/staging/vt6655/vntconfiguration.dat
+++ /dev/null
@@ -1 +0,0 @@
-ZONETYPE=EUROPE \ No newline at end of file
diff --git a/drivers/staging/vt6655/vntwifi.c b/drivers/staging/vt6655/vntwifi.c
deleted file mode 100644
index 59f66fe4735..00000000000
--- a/drivers/staging/vt6655/vntwifi.c
+++ /dev/null
@@ -1,700 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- *
- * File: vntwifi.c
- *
- * Purpose: export functions for vntwifi lib
- *
- * Functions:
- *
- * Revision History:
- *
- * Author: Yiching Chen
- *
- * Date: feb. 2, 2005
- *
- */
-
-#include "vntwifi.h"
-#include "IEEE11h.h"
-#include "country.h"
-#include "device.h"
-#include "wmgr.h"
-#include "datarate.h"
-
-/*--------------------- Static Definitions -------------------------*/
-
-/*--------------------- Static Classes ----------------------------*/
-
-/*--------------------- Static Variables --------------------------*/
-
-/*--------------------- Static Functions --------------------------*/
-
-/*--------------------- Export Variables --------------------------*/
-
-/*--------------------- Export Functions --------------------------*/
-
-/*+
- *
- * Description:
- * Set Operation Mode
- *
- * Parameters:
- * In:
- * pMgmtHandle - pointer to management object
- * eOPMode - Operation Mode
- * Out:
- * none
- *
- * Return Value: none
- *
- -*/
-void
-VNTWIFIvSetOPMode(
- void *pMgmtHandle,
- WMAC_CONFIG_MODE eOPMode
-)
-{
- PSMgmtObject pMgmt = (PSMgmtObject)pMgmtHandle;
-
- pMgmt->eConfigMode = eOPMode;
-}
-
-/*+
- *
- * Description:
- * Set Operation Mode
- *
- * Parameters:
- * In:
- * pMgmtHandle - pointer to management object
- * wBeaconPeriod - Beacon Period
- * wATIMWindow - ATIM window
- * uChannel - channel number
- * Out:
- * none
- *
- * Return Value: none
- *
- -*/
-void
-VNTWIFIvSetIBSSParameter(
- void *pMgmtHandle,
- unsigned short wBeaconPeriod,
- unsigned short wATIMWindow,
- unsigned int uChannel
-)
-{
- PSMgmtObject pMgmt = (PSMgmtObject)pMgmtHandle;
-
- pMgmt->wIBSSBeaconPeriod = wBeaconPeriod;
- pMgmt->wIBSSATIMWindow = wATIMWindow;
- pMgmt->uIBSSChannel = uChannel;
-}
-
-/*+
- *
- * Description:
- * Get current SSID
- *
- * Parameters:
- * In:
- * pMgmtHandle - pointer to management object
- * Out:
- * none
- *
- * Return Value: current SSID pointer.
- *
- -*/
-PWLAN_IE_SSID
-VNTWIFIpGetCurrentSSID(
- void *pMgmtHandle
-)
-{
- PSMgmtObject pMgmt = (PSMgmtObject)pMgmtHandle;
-
- return (PWLAN_IE_SSID) pMgmt->abyCurrSSID;
-}
-
-/*+
- *
- * Description:
- * Get current link channel
- *
- * Parameters:
- * In:
- * pMgmtHandle - pointer to management object
- * Out:
- * none
- *
- * Return Value: current Channel.
- *
- -*/
-unsigned int
-VNTWIFIpGetCurrentChannel(
- void *pMgmtHandle
-)
-{
- PSMgmtObject pMgmt = (PSMgmtObject)pMgmtHandle;
-
- if (pMgmtHandle != NULL)
- return pMgmt->uCurrChannel;
-
- return 0;
-}
-
-/*+
- *
- * Description:
- * Get current Assoc ID
- *
- * Parameters:
- * In:
- * pMgmtHandle - pointer to management object
- * Out:
- * none
- *
- * Return Value: current Assoc ID
- *
- -*/
-unsigned short
-VNTWIFIwGetAssocID(
- void *pMgmtHandle
-)
-{
- PSMgmtObject pMgmt = (PSMgmtObject)pMgmtHandle;
-
- return pMgmt->wCurrAID;
-}
-
-/*+
- *
- * Description:
- * This routine return max support rate of IES
- *
- * Parameters:
- * In:
- * pSupportRateIEs
- * pExtSupportRateIEs
- *
- * Out:
- *
- * Return Value: max support rate
- *
- -*/
-unsigned char
-VNTWIFIbyGetMaxSupportRate(
- PWLAN_IE_SUPP_RATES pSupportRateIEs,
- PWLAN_IE_SUPP_RATES pExtSupportRateIEs
-)
-{
- unsigned char byMaxSupportRate = RATE_1M;
- unsigned char bySupportRate = RATE_1M;
- unsigned int ii = 0;
-
- if (pSupportRateIEs) {
- for (ii = 0; ii < pSupportRateIEs->len; ii++) {
- bySupportRate = DATARATEbyGetRateIdx(pSupportRateIEs->abyRates[ii]);
- if (bySupportRate > byMaxSupportRate)
- byMaxSupportRate = bySupportRate;
-
- }
- }
- if (pExtSupportRateIEs) {
- for (ii = 0; ii < pExtSupportRateIEs->len; ii++) {
- bySupportRate = DATARATEbyGetRateIdx(pExtSupportRateIEs->abyRates[ii]);
- if (bySupportRate > byMaxSupportRate)
- byMaxSupportRate = bySupportRate;
-
- }
- }
-
- return byMaxSupportRate;
-}
-
-/*+
- *
- * Description:
- * This routine return data rate of ACK packtet
- *
- * Parameters:
- * In:
- * byRxDataRate
- * pSupportRateIEs
- * pExtSupportRateIEs
- *
- * Out:
- *
- * Return Value: max support rate
- *
- -*/
-unsigned char
-VNTWIFIbyGetACKTxRate(
- unsigned char byRxDataRate,
- PWLAN_IE_SUPP_RATES pSupportRateIEs,
- PWLAN_IE_SUPP_RATES pExtSupportRateIEs
-)
-{
- unsigned char byMaxAckRate;
- unsigned char byBasicRate;
- unsigned int ii;
-
- if (byRxDataRate <= RATE_11M) {
- byMaxAckRate = RATE_1M;
- } else {
- /* 24M is mandatory for 802.11a and 802.11g */
- byMaxAckRate = RATE_24M;
- }
- if (pSupportRateIEs) {
- for (ii = 0; ii < pSupportRateIEs->len; ii++) {
- if (pSupportRateIEs->abyRates[ii] & 0x80) {
- byBasicRate = DATARATEbyGetRateIdx(pSupportRateIEs->abyRates[ii]);
- if ((byBasicRate <= byRxDataRate) &&
- (byBasicRate > byMaxAckRate)) {
- byMaxAckRate = byBasicRate;
- }
- }
- }
- }
- if (pExtSupportRateIEs) {
- for (ii = 0; ii < pExtSupportRateIEs->len; ii++) {
- if (pExtSupportRateIEs->abyRates[ii] & 0x80) {
- byBasicRate = DATARATEbyGetRateIdx(pExtSupportRateIEs->abyRates[ii]);
- if ((byBasicRate <= byRxDataRate) &&
- (byBasicRate > byMaxAckRate)) {
- byMaxAckRate = byBasicRate;
- }
- }
- }
- }
-
- return byMaxAckRate;
-}
-
-/*+
- *
- * Description:
- * Set Authentication Mode
- *
- * Parameters:
- * In:
- * pMgmtHandle - pointer to management object
- * eAuthMode - Authentication mode
- * Out:
- * none
- *
- * Return Value: none
- *
- -*/
-void
-VNTWIFIvSetAuthenticationMode(
- void *pMgmtHandle,
- WMAC_AUTHENTICATION_MODE eAuthMode
-)
-{
- PSMgmtObject pMgmt = (PSMgmtObject)pMgmtHandle;
-
- pMgmt->eAuthenMode = eAuthMode;
- if ((eAuthMode == WMAC_AUTH_SHAREKEY) ||
- (eAuthMode == WMAC_AUTH_AUTO)) {
- pMgmt->bShareKeyAlgorithm = true;
- } else {
- pMgmt->bShareKeyAlgorithm = false;
- }
-}
-
-/*+
- *
- * Description:
- * Set Encryption Mode
- *
- * Parameters:
- * In:
- * pMgmtHandle - pointer to management object
- * eAuthMode - Authentication mode
- * Out:
- * none
- *
- * Return Value: none
- *
- -*/
-void
-VNTWIFIvSetEncryptionMode(
- void *pMgmtHandle,
- WMAC_ENCRYPTION_MODE eEncryptionMode
-)
-{
- PSMgmtObject pMgmt = (PSMgmtObject)pMgmtHandle;
-
- pMgmt->eEncryptionMode = eEncryptionMode;
- if ((eEncryptionMode == WMAC_ENCRYPTION_WEPEnabled) ||
- (eEncryptionMode == WMAC_ENCRYPTION_TKIPEnabled) ||
- (eEncryptionMode == WMAC_ENCRYPTION_AESEnabled)) {
- pMgmt->bPrivacyInvoked = true;
- } else {
- pMgmt->bPrivacyInvoked = false;
- }
-}
-
-bool
-VNTWIFIbConfigPhyMode(
- void *pMgmtHandle,
- CARD_PHY_TYPE ePhyType
-)
-{
- PSMgmtObject pMgmt = (PSMgmtObject)pMgmtHandle;
-
- if ((ePhyType != PHY_TYPE_AUTO) &&
- (ePhyType != pMgmt->eCurrentPHYMode)) {
- if (CARDbSetPhyParameter(pMgmt->pAdapter, ePhyType, 0, 0, NULL, NULL) == true)
- pMgmt->eCurrentPHYMode = ePhyType;
- else
- return false;
- }
- pMgmt->eConfigPHYMode = ePhyType;
- return true;
-}
-
-void
-VNTWIFIbGetConfigPhyMode(
- void *pMgmtHandle,
- void *pePhyType
-)
-{
- PSMgmtObject pMgmt = (PSMgmtObject)pMgmtHandle;
-
- if ((pMgmt != NULL) && (pePhyType != NULL))
- *(PCARD_PHY_TYPE)pePhyType = pMgmt->eConfigPHYMode;
-}
-
-/*+
- *
- * Description:
- * Clear BSS List Database except current assoc BSS
- *
- * Parameters:
- * In:
- * pMgmtHandle - Management Object structure
- * bLinkPass - Current Link status
- * Out:
- *
- * Return Value: None.
- *
- -*/
-
-/*+
- *
- * Description:
- * Query BSS List in management database
- *
- * Parameters:
- * In:
- * pMgmtHandle - Management Object structure
- * Out:
- * puBSSCount - BSS count
- * pvFirstBSS - pointer to first BSS
- *
- * Return Value: None.
- *
- -*/
-
-void
-VNTWIFIvQueryBSSList(void *pMgmtHandle, unsigned int *puBSSCount, void **pvFirstBSS)
-{
- unsigned int ii = 0;
- PSMgmtObject pMgmt = (PSMgmtObject)pMgmtHandle;
- PKnownBSS pBSS = NULL;
- unsigned int uCount = 0;
-
- *pvFirstBSS = NULL;
-
- for (ii = 0; ii < MAX_BSS_NUM; ii++) {
- pBSS = &(pMgmt->sBSSList[ii]);
- if (!pBSS->bActive)
- continue;
-
- if (*pvFirstBSS == NULL)
- *pvFirstBSS = &(pMgmt->sBSSList[ii]);
-
- uCount++;
- }
- *puBSSCount = uCount;
-}
-
-void
-VNTWIFIvGetNextBSS(
- void *pMgmtHandle,
- void *pvCurrentBSS,
- void **pvNextBSS
-)
-{
- PKnownBSS pBSS = (PKnownBSS) pvCurrentBSS;
- PSMgmtObject pMgmt = (PSMgmtObject)pMgmtHandle;
-
- *pvNextBSS = NULL;
-
- while (*pvNextBSS == NULL) {
- pBSS++;
- if (pBSS > &(pMgmt->sBSSList[MAX_BSS_NUM]))
- return;
-
- if (pBSS->bActive == true) {
- *pvNextBSS = pBSS;
- return;
- }
- }
-}
-
-/*+
- *
- * Description:
- * Update Tx attemps, Tx failure counter in Node DB
- *
- * In:
- * Out:
- * none
- *
- * Return Value: none
- *
- -*/
-void
-VNTWIFIvUpdateNodeTxCounter(
- void *pMgmtHandle,
- unsigned char *pbyDestAddress,
- bool bTxOk,
- unsigned short wRate,
- unsigned char *pbyTxFailCount
-)
-{
- PSMgmtObject pMgmt = (PSMgmtObject)pMgmtHandle;
- unsigned int uNodeIndex = 0;
- unsigned int ii;
-
- if ((pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) ||
- (pMgmt->eCurrMode == WMAC_MODE_ESS_AP)) {
- if (BSSDBbIsSTAInNodeDB(pMgmt, pbyDestAddress, &uNodeIndex) == false)
- return;
- }
-
- pMgmt->sNodeDBTable[uNodeIndex].uTxAttempts++;
- if (bTxOk) {
- /* transmit success, TxAttempts at least plus one */
- pMgmt->sNodeDBTable[uNodeIndex].uTxOk[MAX_RATE]++;
- pMgmt->sNodeDBTable[uNodeIndex].uTxOk[wRate]++;
- } else {
- pMgmt->sNodeDBTable[uNodeIndex].uTxFailures++;
- }
- pMgmt->sNodeDBTable[uNodeIndex].uTxRetry += pbyTxFailCount[MAX_RATE];
- for (ii = 0; ii < MAX_RATE; ii++)
- pMgmt->sNodeDBTable[uNodeIndex].uTxFail[ii] += pbyTxFailCount[ii];
-}
-
-void
-VNTWIFIvGetTxRate(
- void *pMgmtHandle,
- unsigned char *pbyDestAddress,
- unsigned short *pwTxDataRate,
- unsigned char *pbyACKRate,
- unsigned char *pbyCCKBasicRate,
- unsigned char *pbyOFDMBasicRate
-)
-{
- PSMgmtObject pMgmt = (PSMgmtObject)pMgmtHandle;
- unsigned int uNodeIndex = 0;
- unsigned short wTxDataRate = RATE_1M;
- unsigned char byACKRate = RATE_1M;
- unsigned char byCCKBasicRate = RATE_1M;
- unsigned char byOFDMBasicRate = RATE_24M;
- PWLAN_IE_SUPP_RATES pSupportRateIEs = NULL;
- PWLAN_IE_SUPP_RATES pExtSupportRateIEs = NULL;
-
- if ((pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) ||
- (pMgmt->eCurrMode == WMAC_MODE_ESS_AP)) {
- /* Adhoc Tx rate decided from node DB */
- if (BSSDBbIsSTAInNodeDB(pMgmt, pbyDestAddress, &uNodeIndex)) {
- wTxDataRate = (pMgmt->sNodeDBTable[uNodeIndex].wTxDataRate);
- pSupportRateIEs = (PWLAN_IE_SUPP_RATES) (pMgmt->sNodeDBTable[uNodeIndex].abyCurrSuppRates);
- pExtSupportRateIEs = (PWLAN_IE_SUPP_RATES) (pMgmt->sNodeDBTable[uNodeIndex].abyCurrExtSuppRates);
- } else {
- if (pMgmt->eCurrentPHYMode != PHY_TYPE_11A)
- wTxDataRate = RATE_2M;
- else
- wTxDataRate = RATE_24M;
-
- pSupportRateIEs = (PWLAN_IE_SUPP_RATES) pMgmt->abyCurrSuppRates;
- pExtSupportRateIEs = (PWLAN_IE_SUPP_RATES) pMgmt->abyCurrExtSuppRates;
- }
- } else { /* Infrastructure: rate decided from AP Node, index = 0 */
-
- wTxDataRate = (pMgmt->sNodeDBTable[0].wTxDataRate);
-
- pSupportRateIEs = (PWLAN_IE_SUPP_RATES) pMgmt->abyCurrSuppRates;
- pExtSupportRateIEs = (PWLAN_IE_SUPP_RATES) pMgmt->abyCurrExtSuppRates;
- }
- byACKRate = VNTWIFIbyGetACKTxRate((unsigned char) wTxDataRate,
- pSupportRateIEs,
- pExtSupportRateIEs
-);
- if (byACKRate > (unsigned char) wTxDataRate)
- byACKRate = (unsigned char) wTxDataRate;
-
- byCCKBasicRate = VNTWIFIbyGetACKTxRate(RATE_11M,
- pSupportRateIEs,
- pExtSupportRateIEs
-);
- byOFDMBasicRate = VNTWIFIbyGetACKTxRate(RATE_54M,
- pSupportRateIEs,
- pExtSupportRateIEs
-);
- *pwTxDataRate = wTxDataRate;
- *pbyACKRate = byACKRate;
- *pbyCCKBasicRate = byCCKBasicRate;
- *pbyOFDMBasicRate = byOFDMBasicRate;
-}
-
-unsigned char
-VNTWIFIbyGetKeyCypher(
- void *pMgmtHandle,
- bool bGroupKey
-)
-{
- PSMgmtObject pMgmt = (PSMgmtObject)pMgmtHandle;
-
- if (bGroupKey)
- return pMgmt->byCSSGK;
- else
- return pMgmt->byCSSPK;
-}
-
-bool
-VNTWIFIbSetPMKIDCache(
- void *pMgmtObject,
- unsigned long ulCount,
- void *pPMKIDInfo
-)
-{
- PSMgmtObject pMgmt = (PSMgmtObject) pMgmtObject;
-
- if (ulCount > MAX_PMKID_CACHE)
- return false;
-
- pMgmt->gsPMKIDCache.BSSIDInfoCount = ulCount;
- memcpy(pMgmt->gsPMKIDCache.BSSIDInfo, pPMKIDInfo, (ulCount*sizeof(PMKIDInfo)));
- return true;
-}
-
-unsigned short
-VNTWIFIwGetMaxSupportRate(
- void *pMgmtObject
-)
-{
- unsigned short wRate = RATE_54M;
- PSMgmtObject pMgmt = (PSMgmtObject) pMgmtObject;
-
- for (wRate = RATE_54M; wRate > RATE_1M; wRate--) {
- if (pMgmt->sNodeDBTable[0].wSuppRate & (1<<wRate))
- return wRate;
- }
-
- if (pMgmt->eCurrentPHYMode == PHY_TYPE_11A)
- return RATE_6M;
- else
- return RATE_1M;
-}
-
-void
-VNTWIFIvSet11h(
- void *pMgmtObject,
- bool b11hEnable
-)
-{
- PSMgmtObject pMgmt = (PSMgmtObject) pMgmtObject;
-
- pMgmt->b11hEnable = b11hEnable;
-}
-
-bool
-VNTWIFIbMeasureReport(
- void *pMgmtObject,
- bool bEndOfReport,
- void *pvMeasureEID,
- unsigned char byReportMode,
- unsigned char byBasicMap,
- unsigned char byCCAFraction,
- unsigned char *pbyRPIs
-)
-{
- PSMgmtObject pMgmt = (PSMgmtObject) pMgmtObject;
- unsigned char *pbyCurrentEID = (unsigned char *)(pMgmt->pCurrMeasureEIDRep);
-
- if ((pvMeasureEID != NULL) &&
- (pMgmt->uLengthOfRepEIDs < (WLAN_A3FR_MAXLEN - sizeof(MEASEURE_REP) - sizeof(WLAN_80211HDR_A3) - 3))
-) {
- pMgmt->pCurrMeasureEIDRep->byElementID = WLAN_EID_MEASURE_REP;
- pMgmt->pCurrMeasureEIDRep->len = 3;
- pMgmt->pCurrMeasureEIDRep->byToken = ((PWLAN_IE_MEASURE_REQ)pvMeasureEID)->byToken;
- pMgmt->pCurrMeasureEIDRep->byMode = byReportMode;
- pMgmt->pCurrMeasureEIDRep->byType = ((PWLAN_IE_MEASURE_REQ) pvMeasureEID)->byType;
- switch (pMgmt->pCurrMeasureEIDRep->byType) {
- case MEASURE_TYPE_BASIC:
- pMgmt->pCurrMeasureEIDRep->len += sizeof(MEASEURE_REP_BASIC);
- memcpy(&(pMgmt->pCurrMeasureEIDRep->sRep.sBasic),
- &(((PWLAN_IE_MEASURE_REQ) pvMeasureEID)->sReq),
- sizeof(MEASEURE_REQ));
- pMgmt->pCurrMeasureEIDRep->sRep.sBasic.byMap = byBasicMap;
- break;
- case MEASURE_TYPE_CCA:
- pMgmt->pCurrMeasureEIDRep->len += sizeof(MEASEURE_REP_CCA);
- memcpy(&(pMgmt->pCurrMeasureEIDRep->sRep.sCCA),
- &(((PWLAN_IE_MEASURE_REQ) pvMeasureEID)->sReq),
- sizeof(MEASEURE_REQ));
- pMgmt->pCurrMeasureEIDRep->sRep.sCCA.byCCABusyFraction = byCCAFraction;
- break;
- case MEASURE_TYPE_RPI:
- pMgmt->pCurrMeasureEIDRep->len += sizeof(MEASEURE_REP_RPI);
- memcpy(&(pMgmt->pCurrMeasureEIDRep->sRep.sRPI),
- &(((PWLAN_IE_MEASURE_REQ) pvMeasureEID)->sReq),
- sizeof(MEASEURE_REQ));
- memcpy(pMgmt->pCurrMeasureEIDRep->sRep.sRPI.abyRPIdensity, pbyRPIs, 8);
- break;
- default:
- break;
- }
- pbyCurrentEID += (2 + pMgmt->pCurrMeasureEIDRep->len);
- pMgmt->uLengthOfRepEIDs += (2 + pMgmt->pCurrMeasureEIDRep->len);
- pMgmt->pCurrMeasureEIDRep = (PWLAN_IE_MEASURE_REP) pbyCurrentEID;
- }
- if (bEndOfReport)
- IEEE11hbMSRRepTx(pMgmt);
-
- return true;
-}
-
-bool
-VNTWIFIbChannelSwitch(
- void *pMgmtObject,
- unsigned char byNewChannel
-)
-{
- PSMgmtObject pMgmt = (PSMgmtObject) pMgmtObject;
-
- pMgmt->uCurrChannel = byNewChannel;
- pMgmt->bSwitchChannel = false;
- return true;
-}
diff --git a/drivers/staging/vt6655/vntwifi.h b/drivers/staging/vt6655/vntwifi.h
deleted file mode 100644
index 880b8ab109b..00000000000
--- a/drivers/staging/vt6655/vntwifi.h
+++ /dev/null
@@ -1,273 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- *
- * File: vntwifi.h
- *
- * Purpose: export VNT Host WiFi library function
- *
- * Author: Yiching Chen
- *
- * Date: Jan 7, 2004
- *
- */
-
-#ifndef __VNTWIFI_H__
-#define __VNTWIFI_H__
-
-#include "ttype.h"
-#include "80211mgr.h"
-#include "card.h"
-#include "wpa2.h"
-
-/*--------------------- Export Definitions -------------------------*/
-#define RATE_1M 0
-#define RATE_2M 1
-#define RATE_5M 2
-#define RATE_11M 3
-#define RATE_6M 4
-#define RATE_9M 5
-#define RATE_12M 6
-#define RATE_18M 7
-#define RATE_24M 8
-#define RATE_36M 9
-#define RATE_48M 10
-#define RATE_54M 11
-#define RATE_AUTO 12
-#define MAX_RATE 12
-
-// key CipherSuite
-#define KEY_CTL_WEP 0x00
-#define KEY_CTL_NONE 0x01
-#define KEY_CTL_TKIP 0x02
-#define KEY_CTL_CCMP 0x03
-#define KEY_CTL_INVALID 0xFF
-
-#define CHANNEL_MAX_24G 14
-
-#define MAX_BSS_NUM 42
-
-// Pre-configured Authenticaiton Mode (from XP)
-typedef enum tagWMAC_AUTHENTICATION_MODE {
- WMAC_AUTH_OPEN,
- WMAC_AUTH_SHAREKEY,
- WMAC_AUTH_AUTO,
- WMAC_AUTH_WPA,
- WMAC_AUTH_WPAPSK,
- WMAC_AUTH_WPANONE,
- WMAC_AUTH_WPA2,
- WMAC_AUTH_WPA2PSK,
- WMAC_AUTH_MAX // Not a real mode, defined as upper bound
-} WMAC_AUTHENTICATION_MODE, *PWMAC_AUTHENTICATION_MODE;
-
-typedef enum tagWMAC_ENCRYPTION_MODE {
- WMAC_ENCRYPTION_WEPEnabled,
- WMAC_ENCRYPTION_WEPDisabled,
- WMAC_ENCRYPTION_WEPKeyAbsent,
- WMAC_ENCRYPTION_WEPNotSupported,
- WMAC_ENCRYPTION_TKIPEnabled,
- WMAC_ENCRYPTION_TKIPKeyAbsent,
- WMAC_ENCRYPTION_AESEnabled,
- WMAC_ENCRYPTION_AESKeyAbsent
-} WMAC_ENCRYPTION_MODE, *PWMAC_ENCRYPTION_MODE;
-
-// Pre-configured Mode (from XP)
-
-typedef enum tagWMAC_CONFIG_MODE {
- WMAC_CONFIG_ESS_STA = 0,
- WMAC_CONFIG_IBSS_STA,
- WMAC_CONFIG_AUTO,
- WMAC_CONFIG_AP
-} WMAC_CONFIG_MODE, *PWMAC_CONFIG_MODE;
-
-typedef enum tagWMAC_POWER_MODE {
- WMAC_POWER_CAM,
- WMAC_POWER_FAST,
- WMAC_POWER_MAX
-} WMAC_POWER_MODE, *PWMAC_POWER_MODE;
-
-#define VNTWIFIbIsShortSlotTime(wCapInfo) \
- WLAN_GET_CAP_INFO_SHORTSLOTTIME(wCapInfo) \
-
-#define VNTWIFIbIsProtectMode(byERP) \
- ((byERP & WLAN_EID_ERP_USE_PROTECTION) != 0) \
-
-#define VNTWIFIbIsBarkerMode(byERP) \
- ((byERP & WLAN_EID_ERP_BARKER_MODE) != 0) \
-
-#define VNTWIFIbIsShortPreamble(wCapInfo) \
- WLAN_GET_CAP_INFO_SHORTPREAMBLE(wCapInfo) \
-
-#define VNTWIFIbIsEncryption(wCapInfo) \
- WLAN_GET_CAP_INFO_PRIVACY(wCapInfo) \
-
-#define VNTWIFIbIsESS(wCapInfo) \
- WLAN_GET_CAP_INFO_ESS(wCapInfo) \
-
-/*--------------------- Export Classes ----------------------------*/
-
-/*--------------------- Export Variables --------------------------*/
-
-/*--------------------- Export Types ------------------------------*/
-
-/*--------------------- Export Functions --------------------------*/
-
-void
-VNTWIFIvSetIBSSParameter(
- void *pMgmtHandle,
- unsigned short wBeaconPeriod,
- unsigned short wATIMWindow,
- unsigned int uChannel
-);
-
-void
-VNTWIFIvSetOPMode(
- void *pMgmtHandle,
- WMAC_CONFIG_MODE eOPMode
-);
-
-PWLAN_IE_SSID
-VNTWIFIpGetCurrentSSID(
- void *pMgmtHandle
-);
-
-unsigned int
-VNTWIFIpGetCurrentChannel(
- void *pMgmtHandle
-);
-
-unsigned short
-VNTWIFIwGetAssocID(
- void *pMgmtHandle
-);
-
-unsigned char
-VNTWIFIbyGetMaxSupportRate(
- PWLAN_IE_SUPP_RATES pSupportRateIEs,
- PWLAN_IE_SUPP_RATES pExtSupportRateIEs
-);
-
-unsigned char
-VNTWIFIbyGetACKTxRate(
- unsigned char byRxDataRate,
- PWLAN_IE_SUPP_RATES pSupportRateIEs,
- PWLAN_IE_SUPP_RATES pExtSupportRateIEs
-);
-
-void
-VNTWIFIvSetAuthenticationMode(
- void *pMgmtHandle,
- WMAC_AUTHENTICATION_MODE eAuthMode
-);
-
-void
-VNTWIFIvSetEncryptionMode(
- void *pMgmtHandle,
- WMAC_ENCRYPTION_MODE eEncryptionMode
-);
-
-bool
-VNTWIFIbConfigPhyMode(
- void *pMgmtHandle,
- CARD_PHY_TYPE ePhyType
-);
-
-void
-VNTWIFIbGetConfigPhyMode(
- void *pMgmtHandle,
- void *pePhyType
-);
-
-void
-VNTWIFIvQueryBSSList(void *pMgmtHandle, unsigned int *puBSSCount,
- void **pvFirstBSS);
-
-void
-VNTWIFIvGetNextBSS(
- void *pMgmtHandle,
- void *pvCurrentBSS,
- void **pvNextBSS
-);
-
-void
-VNTWIFIvUpdateNodeTxCounter(
- void *pMgmtHandle,
- unsigned char *pbyDestAddress,
- bool bTxOk,
- unsigned short wRate,
- unsigned char *pbyTxFailCount
-);
-
-void
-VNTWIFIvGetTxRate(
- void *pMgmtHandle,
- unsigned char *pbyDestAddress,
- unsigned short *pwTxDataRate,
- unsigned char *pbyACKRate,
- unsigned char *pbyCCKBasicRate,
- unsigned char *pbyOFDMBasicRate
-);
-
-unsigned char
-VNTWIFIbyGetKeyCypher(
- void *pMgmtHandle,
- bool bGroupKey
-);
-
-bool
-VNTWIFIbSetPMKIDCache(
- void *pMgmtObject,
- unsigned long ulCount,
- void *pPMKIDInfo
-);
-
-bool
-VNTWIFIbCommandRunning(
- void *pMgmtObject
-);
-
-unsigned short
-VNTWIFIwGetMaxSupportRate(
- void *pMgmtObject
-);
-
-// for 802.11h
-void
-VNTWIFIvSet11h(
- void *pMgmtObject,
- bool b11hEnable
-);
-
-bool
-VNTWIFIbMeasureReport(
- void *pMgmtObject,
- bool bEndOfReport,
- void *pvMeasureEID,
- unsigned char byReportMode,
- unsigned char byBasicMap,
- unsigned char byCCAFraction,
- unsigned char *pbyRPIs
-);
-
-bool
-VNTWIFIbChannelSwitch(
- void *pMgmtObject,
- unsigned char byNewChannel
-);
-
-#endif //__VNTWIFI_H__
diff --git a/drivers/staging/vt6655/wcmd.c b/drivers/staging/vt6655/wcmd.c
deleted file mode 100644
index 985e1b99362..00000000000
--- a/drivers/staging/vt6655/wcmd.c
+++ /dev/null
@@ -1,1023 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * File: wcmd.c
- *
- * Purpose: Handles the management command interface functions
- *
- * Author: Lyndon Chen
- *
- * Date: May 8, 2003
- *
- * Functions:
- * s_vProbeChannel - Active scan channel
- * s_MgrMakeProbeRequest - Make ProbeRequest packet
- * CommandTimer - Timer function to handle command
- * s_bCommandComplete - Command Complete function
- * bScheduleCommand - Push Command and wait Command Scheduler to do
- * vCommandTimer- Command call back functions
- * vCommandTimerWait- Call back timer
- * bClearBSSID_SCAN- Clear BSSID_SCAN cmd in CMD Queue
- *
- * Revision History:
- *
- */
-
-#include "ttype.h"
-#include "tmacro.h"
-#include "device.h"
-#include "mac.h"
-#include "card.h"
-#include "80211hdr.h"
-#include "wcmd.h"
-#include "wmgr.h"
-#include "power.h"
-#include "wctl.h"
-#include "baseband.h"
-#include "rxtx.h"
-#include "rf.h"
-#include "iowpa.h"
-#include "channel.h"
-
-/*--------------------- Static Definitions -------------------------*/
-
-/*--------------------- Static Classes ----------------------------*/
-
-/*--------------------- Static Functions --------------------------*/
-
-static
-void
-s_vProbeChannel(
- struct vnt_private *pDevice
-);
-
-static
-PSTxMgmtPacket
-s_MgrMakeProbeRequest(
- struct vnt_private *pDevice,
- PSMgmtObject pMgmt,
- unsigned char *pScanBSSID,
- PWLAN_IE_SSID pSSID,
- PWLAN_IE_SUPP_RATES pCurrRates,
- PWLAN_IE_SUPP_RATES pCurrExtSuppRates
-);
-
-static
-bool
-s_bCommandComplete(
- struct vnt_private *pDevice
-);
-
-/*--------------------- Export Variables --------------------------*/
-
-/*--------------------- Export Functions --------------------------*/
-
-/*
- * Description:
- * Stop AdHoc beacon during scan process
- *
- * Parameters:
- * In:
- * pDevice - Pointer to the adapter
- * Out:
- * none
- *
- * Return Value: none
- *
- */
-static
-void
-vAdHocBeaconStop(struct vnt_private *pDevice)
-{
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- bool bStop;
-
- /*
- * temporarily stop Beacon packet for AdHoc Server
- * if all of the following conditions are met:
- * (1) STA is in AdHoc mode
- * (2) VT3253 is programmed as automatic Beacon Transmitting
- * (3) One of the following conditions is met
- * (3.1) AdHoc channel is in B/G band and the
- * current scan channel is in A band
- * or
- * (3.2) AdHoc channel is in A mode
- */
- bStop = false;
- if ((pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) &&
- (pMgmt->eCurrState >= WMAC_STATE_STARTED)) {
- if ((pMgmt->uIBSSChannel <= CB_MAX_CHANNEL_24G) &&
- (pMgmt->uScanChannel > CB_MAX_CHANNEL_24G)) {
- bStop = true;
- }
- if (pMgmt->uIBSSChannel > CB_MAX_CHANNEL_24G)
- bStop = true;
-
- }
-
- if (bStop)
- MACvRegBitsOff(pDevice->PortOffset, MAC_REG_TCR, TCR_AUTOBCNTX);
-} /* vAdHocBeaconStop */
-
-/*
- * Description:
- * Restart AdHoc beacon after scan process complete
- *
- * Parameters:
- * In:
- * pDevice - Pointer to the adapter
- * Out:
- * none
- *
- * Return Value: none
- *
- */
-static
-void
-vAdHocBeaconRestart(struct vnt_private *pDevice)
-{
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
-
- /*
- * Restart Beacon packet for AdHoc Server
- * if all of the following coditions are met:
- * (1) STA is in AdHoc mode
- * (2) VT3253 is programmed as automatic Beacon Transmitting
- */
- if ((pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) &&
- (pMgmt->eCurrState >= WMAC_STATE_STARTED)) {
- MACvRegBitsOn(pDevice->PortOffset, MAC_REG_TCR, TCR_AUTOBCNTX);
- }
-}
-
-/*+
- *
- * Routine Description:
- * Prepare and send probe request management frames.
- *
- *
- * Return Value:
- * none.
- *
- -*/
-
-static
-void
-s_vProbeChannel(
- struct vnt_private *pDevice
-)
-{
- //1M, 2M, 5M, 11M, 18M, 24M, 36M, 54M
- unsigned char abyCurrSuppRatesG[] = {WLAN_EID_SUPP_RATES, 8, 0x02, 0x04, 0x0B, 0x16, 0x24, 0x30, 0x48, 0x6C};
- unsigned char abyCurrExtSuppRatesG[] = {WLAN_EID_EXTSUPP_RATES, 4, 0x0C, 0x12, 0x18, 0x60};
- //6M, 9M, 12M, 48M
- unsigned char abyCurrSuppRatesA[] = {WLAN_EID_SUPP_RATES, 8, 0x0C, 0x12, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6C};
- unsigned char abyCurrSuppRatesB[] = {WLAN_EID_SUPP_RATES, 4, 0x02, 0x04, 0x0B, 0x16};
- unsigned char *pbyRate;
- PSTxMgmtPacket pTxPacket;
- PSMgmtObject pMgmt = pDevice->pMgmt;
- unsigned int ii;
-
- if (pDevice->eCurrentPHYType == PHY_TYPE_11A)
- pbyRate = &abyCurrSuppRatesA[0];
- else if (pDevice->eCurrentPHYType == PHY_TYPE_11B)
- pbyRate = &abyCurrSuppRatesB[0];
- else
- pbyRate = &abyCurrSuppRatesG[0];
-
- // build an assocreq frame and send it
- pTxPacket = s_MgrMakeProbeRequest
- (
- pDevice,
- pMgmt,
- pMgmt->abyScanBSSID,
- (PWLAN_IE_SSID)pMgmt->abyScanSSID,
- (PWLAN_IE_SUPP_RATES)pbyRate,
- (PWLAN_IE_SUPP_RATES)abyCurrExtSuppRatesG
- );
-
- if (pTxPacket != NULL) {
- for (ii = 0; ii < 2; ii++) {
- if (csMgmt_xmit(pDevice, pTxPacket) != CMD_STATUS_PENDING)
- pr_debug("Probe request sending fail..\n");
- else
- pr_debug("Probe request is sending..\n");
- }
- }
-}
-
-/*+
- *
- * Routine Description:
- * Constructs an probe request frame
- *
- *
- * Return Value:
- * A ptr to Tx frame or NULL on allocation failure
- *
- -*/
-
-static PSTxMgmtPacket
-s_MgrMakeProbeRequest(
- struct vnt_private *pDevice,
- PSMgmtObject pMgmt,
- unsigned char *pScanBSSID,
- PWLAN_IE_SSID pSSID,
- PWLAN_IE_SUPP_RATES pCurrRates,
- PWLAN_IE_SUPP_RATES pCurrExtSuppRates
-
-)
-{
- PSTxMgmtPacket pTxPacket = NULL;
- WLAN_FR_PROBEREQ sFrame;
-
- pTxPacket = (PSTxMgmtPacket)pMgmt->pbyMgmtPacketPool;
- memset(pTxPacket, 0, sizeof(STxMgmtPacket) + WLAN_PROBEREQ_FR_MAXLEN);
- pTxPacket->p80211Header = (PUWLAN_80211HDR)((unsigned char *)pTxPacket + sizeof(STxMgmtPacket));
- sFrame.pBuf = (unsigned char *)pTxPacket->p80211Header;
- sFrame.len = WLAN_PROBEREQ_FR_MAXLEN;
- vMgrEncodeProbeRequest(&sFrame);
- sFrame.pHdr->sA3.wFrameCtl = cpu_to_le16(
- (
- WLAN_SET_FC_FTYPE(WLAN_TYPE_MGR) |
- WLAN_SET_FC_FSTYPE(WLAN_FSTYPE_PROBEREQ)
-));
- memcpy(sFrame.pHdr->sA3.abyAddr1, pScanBSSID, WLAN_ADDR_LEN);
- memcpy(sFrame.pHdr->sA3.abyAddr2, pMgmt->abyMACAddr, WLAN_ADDR_LEN);
- memcpy(sFrame.pHdr->sA3.abyAddr3, pScanBSSID, WLAN_BSSID_LEN);
- // Copy the SSID, pSSID->len=0 indicate broadcast SSID
- sFrame.pSSID = (PWLAN_IE_SSID)(sFrame.pBuf + sFrame.len);
- sFrame.len += pSSID->len + WLAN_IEHDR_LEN;
- memcpy(sFrame.pSSID, pSSID, pSSID->len + WLAN_IEHDR_LEN);
- sFrame.pSuppRates = (PWLAN_IE_SUPP_RATES)(sFrame.pBuf + sFrame.len);
- sFrame.len += pCurrRates->len + WLAN_IEHDR_LEN;
- memcpy(sFrame.pSuppRates, pCurrRates, pCurrRates->len + WLAN_IEHDR_LEN);
- // Copy the extension rate set
- if (pDevice->eCurrentPHYType == PHY_TYPE_11G) {
- sFrame.pExtSuppRates = (PWLAN_IE_SUPP_RATES)(sFrame.pBuf + sFrame.len);
- sFrame.len += pCurrExtSuppRates->len + WLAN_IEHDR_LEN;
- memcpy(sFrame.pExtSuppRates, pCurrExtSuppRates, pCurrExtSuppRates->len + WLAN_IEHDR_LEN);
- }
- pTxPacket->cbMPDULen = sFrame.len;
- pTxPacket->cbPayloadLen = sFrame.len - WLAN_HDR_ADDR3_LEN;
-
- return pTxPacket;
-}
-
-void
-vCommandTimerWait(
- void *hDeviceContext,
- unsigned int MSecond
-)
-{
- struct vnt_private *pDevice = hDeviceContext;
-
- init_timer(&pDevice->sTimerCommand);
- pDevice->sTimerCommand.data = (unsigned long) pDevice;
- pDevice->sTimerCommand.function = (TimerFunction)vCommandTimer;
- // RUN_AT :1 msec ~= (HZ/1024)
- pDevice->sTimerCommand.expires = (unsigned int)RUN_AT((MSecond * HZ) >> 10);
- add_timer(&pDevice->sTimerCommand);
-}
-
-void
-vCommandTimer(
- void *hDeviceContext
-)
-{
- struct vnt_private *pDevice = hDeviceContext;
- PSMgmtObject pMgmt = pDevice->pMgmt;
- PWLAN_IE_SSID pItemSSID;
- PWLAN_IE_SSID pItemSSIDCurr;
- CMD_STATUS Status;
- unsigned int ii;
- unsigned char byMask[8] = {1, 2, 4, 8, 0x10, 0x20, 0x40, 0x80};
- struct sk_buff *skb;
-
- if (pDevice->dwDiagRefCount != 0)
- return;
- if (!pDevice->bCmdRunning)
- return;
-
- spin_lock_irq(&pDevice->lock);
-
- switch (pDevice->eCommandState) {
- case WLAN_CMD_SCAN_START:
-
- pDevice->byReAssocCount = 0;
- if (pDevice->bRadioOff) {
- s_bCommandComplete(pDevice);
- spin_unlock_irq(&pDevice->lock);
- return;
- }
-
- if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) {
- s_bCommandComplete(pDevice);
- CARDbSetBSSID(pMgmt->pAdapter, pMgmt->abyCurrBSSID, NL80211_IFTYPE_AP);
- spin_unlock_irq(&pDevice->lock);
- return;
- }
-
- pr_debug("eCommandState= WLAN_CMD_SCAN_START\n");
- pItemSSID = (PWLAN_IE_SSID)pMgmt->abyScanSSID;
- // wait all Data TD complete
- if (pDevice->iTDUsed[TYPE_AC0DMA] != 0) {
- spin_unlock_irq(&pDevice->lock);
- vCommandTimerWait((void *)pDevice, 10);
- return;
- }
-
- if (pMgmt->uScanChannel == 0) {
- pMgmt->uScanChannel = pDevice->byMinChannel;
- // Set Baseband to be more sensitive.
-
- }
- if (pMgmt->uScanChannel > pDevice->byMaxChannel) {
- pMgmt->eScanState = WMAC_NO_SCANNING;
-
- // Set Baseband's sensitivity back.
- // Set channel back
- set_channel(pMgmt->pAdapter, pMgmt->uCurrChannel);
- pr_debug("Scanning, set back to channel: [%d]\n",
- pMgmt->uCurrChannel);
- if (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA)
- CARDbSetBSSID(pMgmt->pAdapter, pMgmt->abyCurrBSSID, NL80211_IFTYPE_ADHOC);
- else
- CARDbSetBSSID(pMgmt->pAdapter, pMgmt->abyCurrBSSID, NL80211_IFTYPE_STATION);
-
- vAdHocBeaconRestart(pDevice);
- s_bCommandComplete(pDevice);
-
- } else {
-//2008-8-4 <add> by chester
- if (!is_channel_valid(pMgmt->uScanChannel)) {
- pr_debug("Invalid channel pMgmt->uScanChannel = %d\n",
- pMgmt->uScanChannel);
- s_bCommandComplete(pDevice);
- spin_unlock_irq(&pDevice->lock);
- return;
- }
- if (pMgmt->uScanChannel == pDevice->byMinChannel) {
- pMgmt->abyScanBSSID[0] = 0xFF;
- pMgmt->abyScanBSSID[1] = 0xFF;
- pMgmt->abyScanBSSID[2] = 0xFF;
- pMgmt->abyScanBSSID[3] = 0xFF;
- pMgmt->abyScanBSSID[4] = 0xFF;
- pMgmt->abyScanBSSID[5] = 0xFF;
- pItemSSID->byElementID = WLAN_EID_SSID;
- pMgmt->eScanState = WMAC_IS_SCANNING;
-
- }
-
- vAdHocBeaconStop(pDevice);
-
- if (set_channel(pMgmt->pAdapter, pMgmt->uScanChannel))
- pr_debug("SCAN Channel: %d\n",
- pMgmt->uScanChannel);
- else
- pr_debug("SET SCAN Channel Fail: %d\n",
- pMgmt->uScanChannel);
-
- CARDbSetBSSID(pMgmt->pAdapter, pMgmt->abyCurrBSSID, NL80211_IFTYPE_UNSPECIFIED);
- pMgmt->uScanChannel++;
-//2008-8-4 <modify> by chester
- if (!is_channel_valid(pMgmt->uScanChannel) &&
- pMgmt->uScanChannel <= pDevice->byMaxChannel) {
- pMgmt->uScanChannel = pDevice->byMaxChannel + 1;
- pMgmt->eCommandState = WLAN_CMD_SCAN_END;
-
- }
-
- if (!pMgmt->b11hEnable ||
- (pMgmt->uScanChannel < CB_MAX_CHANNEL_24G)) {
- s_vProbeChannel(pDevice);
- spin_unlock_irq(&pDevice->lock);
- vCommandTimerWait((void *)pDevice, WCMD_ACTIVE_SCAN_TIME);
- return;
- } else {
- spin_unlock_irq(&pDevice->lock);
- vCommandTimerWait((void *)pDevice, WCMD_PASSIVE_SCAN_TIME);
- return;
- }
-
- }
-
- break;
-
- case WLAN_CMD_SCAN_END:
-
- // Set Baseband's sensitivity back.
- // Set channel back
- set_channel(pMgmt->pAdapter, pMgmt->uCurrChannel);
- pr_debug("Scanning, set back to channel: [%d]\n",
- pMgmt->uCurrChannel);
- if (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA)
- CARDbSetBSSID(pMgmt->pAdapter, pMgmt->abyCurrBSSID, NL80211_IFTYPE_ADHOC);
- else
- CARDbSetBSSID(pMgmt->pAdapter, pMgmt->abyCurrBSSID, NL80211_IFTYPE_STATION);
-
- pMgmt->eScanState = WMAC_NO_SCANNING;
- vAdHocBeaconRestart(pDevice);
-//2008-0409-07, <Add> by Einsn Liu
-#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
- if (pMgmt->eScanType == WMAC_SCAN_PASSIVE) {
- //send scan event to wpa_Supplicant
- union iwreq_data wrqu;
-
- memset(&wrqu, 0, sizeof(wrqu));
- wireless_send_event(pDevice->dev, SIOCGIWSCAN, &wrqu, NULL);
- }
-#endif
- s_bCommandComplete(pDevice);
- break;
-
- case WLAN_CMD_DISASSOCIATE_START:
- pDevice->byReAssocCount = 0;
- if ((pMgmt->eCurrMode == WMAC_MODE_ESS_STA) &&
- (pMgmt->eCurrState != WMAC_STATE_ASSOC)) {
- s_bCommandComplete(pDevice);
- spin_unlock_irq(&pDevice->lock);
- return;
- } else {
- pr_debug("Send Disassociation Packet..\n");
- // reason = 8 : disassoc because sta has left
- vMgrDisassocBeginSta((void *)pDevice, pMgmt, pMgmt->abyCurrBSSID, (8), &Status);
- pDevice->bLinkPass = false;
- // unlock command busy
- pItemSSID = (PWLAN_IE_SSID)pMgmt->abyCurrSSID;
- pItemSSID->len = 0;
- memset(pItemSSID->abySSID, 0, WLAN_SSID_MAXLEN);
- pMgmt->eCurrState = WMAC_STATE_IDLE;
- pMgmt->sNodeDBTable[0].bActive = false;
- }
- netif_stop_queue(pDevice->dev);
- pDevice->eCommandState = WLAN_DISASSOCIATE_WAIT;
- // wait all Control TD complete
- if (pDevice->iTDUsed[TYPE_TXDMA0] != 0) {
- vCommandTimerWait((void *)pDevice, 10);
- spin_unlock_irq(&pDevice->lock);
- return;
- }
- pr_debug(" CARDbRadioPowerOff\n");
- //2008-09-02 <mark> by chester
- s_bCommandComplete(pDevice);
- break;
-
- case WLAN_DISASSOCIATE_WAIT:
- // wait all Control TD complete
- if (pDevice->iTDUsed[TYPE_TXDMA0] != 0) {
- vCommandTimerWait((void *)pDevice, 10);
- spin_unlock_irq(&pDevice->lock);
- return;
- }
-//2008-09-02 <mark> by chester
- s_bCommandComplete(pDevice);
- break;
-
- case WLAN_CMD_SSID_START:
- pDevice->byReAssocCount = 0;
- if (pDevice->bRadioOff) {
- s_bCommandComplete(pDevice);
- spin_unlock_irq(&pDevice->lock);
- return;
- }
- pr_debug("chester-abyDesireSSID=%s\n", ((PWLAN_IE_SSID)pMgmt->abyDesireSSID)->abySSID);
- pItemSSID = (PWLAN_IE_SSID)pMgmt->abyDesireSSID;
- pItemSSIDCurr = (PWLAN_IE_SSID)pMgmt->abyCurrSSID;
- pr_debug(" cmd: desire ssid = %s\n", pItemSSID->abySSID);
- pr_debug(" cmd: curr ssid = %s\n", pItemSSIDCurr->abySSID);
-
- if (pMgmt->eCurrState == WMAC_STATE_ASSOC) {
- pr_debug(" Cmd pMgmt->eCurrState == WMAC_STATE_ASSOC\n");
- pr_debug(" pItemSSID->len =%d\n", pItemSSID->len);
- pr_debug(" pItemSSIDCurr->len = %d\n",
- pItemSSIDCurr->len);
- pr_debug(" desire ssid = %s\n", pItemSSID->abySSID);
- pr_debug(" curr ssid = %s\n", pItemSSIDCurr->abySSID);
- }
-
- if ((pMgmt->eCurrState == WMAC_STATE_ASSOC) ||
- ((pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) && (pMgmt->eCurrState == WMAC_STATE_JOINTED))) {
- if (pItemSSID->len == pItemSSIDCurr->len) {
- if (memcmp(pItemSSID->abySSID, pItemSSIDCurr->abySSID, pItemSSID->len) == 0) {
- s_bCommandComplete(pDevice);
- spin_unlock_irq(&pDevice->lock);
- return;
- }
- }
-
- netif_stop_queue(pDevice->dev);
- pDevice->bLinkPass = false;
- }
- // set initial state
- pMgmt->eCurrState = WMAC_STATE_IDLE;
- pMgmt->eCurrMode = WMAC_MODE_STANDBY;
- PSvDisablePowerSaving((void *)pDevice);
- BSSvClearNodeDBTable(pDevice, 0);
-
- vMgrJoinBSSBegin((void *)pDevice, &Status);
- // if Infra mode
- if ((pMgmt->eCurrMode == WMAC_MODE_ESS_STA) && (pMgmt->eCurrState == WMAC_STATE_JOINTED)) {
- // Call mgr to begin the deauthentication
- // reason = (3) because sta has left ESS
- if (pMgmt->eCurrState >= WMAC_STATE_AUTH)
- vMgrDeAuthenBeginSta((void *)pDevice, pMgmt, pMgmt->abyCurrBSSID, (3), &Status);
-
- // Call mgr to begin the authentication
- vMgrAuthenBeginSta((void *)pDevice, pMgmt, &Status);
- if (Status == CMD_STATUS_SUCCESS) {
- pDevice->byLinkWaitCount = 0;
- pDevice->eCommandState = WLAN_AUTHENTICATE_WAIT;
- vCommandTimerWait((void *)pDevice, AUTHENTICATE_TIMEOUT);
- spin_unlock_irq(&pDevice->lock);
- pr_debug(" Set eCommandState = WLAN_AUTHENTICATE_WAIT\n");
- return;
- }
- }
- // if Adhoc mode
- else if (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) {
- if (pMgmt->eCurrState == WMAC_STATE_JOINTED) {
- if (netif_queue_stopped(pDevice->dev))
- netif_wake_queue(pDevice->dev);
-
- pDevice->bLinkPass = true;
-
- pMgmt->sNodeDBTable[0].bActive = true;
- pMgmt->sNodeDBTable[0].uInActiveCount = 0;
- bClearBSSID_SCAN(pDevice);
- } else {
- // start own IBSS
- vMgrCreateOwnIBSS((void *)pDevice, &Status);
- if (Status != CMD_STATUS_SUCCESS)
- pr_debug(" WLAN_CMD_IBSS_CREATE fail !\n");
-
- BSSvAddMulticastNode(pDevice);
- }
- }
- // if SSID not found
- else if (pMgmt->eCurrMode == WMAC_MODE_STANDBY) {
- if (pMgmt->eConfigMode == WMAC_CONFIG_IBSS_STA ||
- pMgmt->eConfigMode == WMAC_CONFIG_AUTO) {
- // start own IBSS
- vMgrCreateOwnIBSS((void *)pDevice, &Status);
- if (Status != CMD_STATUS_SUCCESS)
- pr_debug(" WLAN_CMD_IBSS_CREATE fail !\n");
-
- BSSvAddMulticastNode(pDevice);
- if (netif_queue_stopped(pDevice->dev))
- netif_wake_queue(pDevice->dev);
-
- pDevice->bLinkPass = true;
- } else {
- pr_debug("Disconnect SSID none\n");
-#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
- {
- union iwreq_data wrqu;
-
- memset(&wrqu, 0, sizeof(wrqu));
- wrqu.ap_addr.sa_family = ARPHRD_ETHER;
- pr_debug("wireless_send_event--->SIOCGIWAP(disassociated:vMgrJoinBSSBegin Fail !!)\n");
- wireless_send_event(pDevice->dev, SIOCGIWAP, &wrqu, NULL);
- }
-#endif
-
- }
- }
- s_bCommandComplete(pDevice);
- break;
-
- case WLAN_AUTHENTICATE_WAIT:
- pr_debug("eCommandState == WLAN_AUTHENTICATE_WAIT\n");
- if (pMgmt->eCurrState == WMAC_STATE_AUTH) {
- // Call mgr to begin the association
- pDevice->byLinkWaitCount = 0;
- pr_debug("eCurrState == WMAC_STATE_AUTH\n");
- vMgrAssocBeginSta((void *)pDevice, pMgmt, &Status);
- if (Status == CMD_STATUS_SUCCESS) {
- pDevice->byLinkWaitCount = 0;
- pr_debug("eCommandState = WLAN_ASSOCIATE_WAIT\n");
- pDevice->eCommandState = WLAN_ASSOCIATE_WAIT;
- vCommandTimerWait((void *)pDevice, ASSOCIATE_TIMEOUT);
- spin_unlock_irq(&pDevice->lock);
- return;
- }
- }
-
- else if (pMgmt->eCurrState < WMAC_STATE_AUTHPENDING) {
- pr_debug("WLAN_AUTHENTICATE_WAIT:Authen Fail???\n");
- } else if (pDevice->byLinkWaitCount <= 4) { //mike add:wait another 2 sec if authenticated_frame delay!
- pDevice->byLinkWaitCount++;
- pr_debug("WLAN_AUTHENTICATE_WAIT:wait %d times!!\n", pDevice->byLinkWaitCount);
- spin_unlock_irq(&pDevice->lock);
- vCommandTimerWait((void *)pDevice, AUTHENTICATE_TIMEOUT/2);
- return;
- }
- pDevice->byLinkWaitCount = 0;
- s_bCommandComplete(pDevice);
- break;
-
- case WLAN_ASSOCIATE_WAIT:
- if (pMgmt->eCurrState == WMAC_STATE_ASSOC) {
- pr_debug("eCurrState == WMAC_STATE_ASSOC\n");
- if (pDevice->ePSMode != WMAC_POWER_CAM)
- PSvEnablePowerSaving((void *)pDevice, pMgmt->wListenInterval);
-
- if (pMgmt->eAuthenMode >= WMAC_AUTH_WPA)
- KeybRemoveAllKey(&(pDevice->sKey), pDevice->abyBSSID, pDevice->PortOffset);
-
- pDevice->bLinkPass = true;
- pDevice->byLinkWaitCount = 0;
- pDevice->byReAssocCount = 0;
- bClearBSSID_SCAN(pDevice);
- if (pDevice->byFOETuning) {
- BBvSetFOE(pDevice->PortOffset);
- PSbSendNullPacket(pDevice);
- }
- if (netif_queue_stopped(pDevice->dev))
- netif_wake_queue(pDevice->dev);
-
- if (pDevice->IsTxDataTrigger) { //TxDataTimer is not triggered at the first time
- del_timer(&pDevice->sTimerTxData);
- init_timer(&pDevice->sTimerTxData);
- pDevice->sTimerTxData.data = (unsigned long) pDevice;
- pDevice->sTimerTxData.function = (TimerFunction)BSSvSecondTxData;
- pDevice->sTimerTxData.expires = RUN_AT(10*HZ); //10s callback
- pDevice->fTxDataInSleep = false;
- pDevice->nTxDataTimeCout = 0;
- }
-
- pDevice->IsTxDataTrigger = true;
- add_timer(&pDevice->sTimerTxData);
-
- } else if (pMgmt->eCurrState < WMAC_STATE_ASSOCPENDING) {
- printk("WLAN_ASSOCIATE_WAIT:Association Fail???\n");
- } else if (pDevice->byLinkWaitCount <= 4) { //mike add:wait another 2 sec if associated_frame delay!
- pDevice->byLinkWaitCount++;
- pr_debug("WLAN_ASSOCIATE_WAIT:wait %d times!!\n", pDevice->byLinkWaitCount);
- spin_unlock_irq(&pDevice->lock);
- vCommandTimerWait((void *)pDevice, ASSOCIATE_TIMEOUT/2);
- return;
- }
- pDevice->byLinkWaitCount = 0;
-
- s_bCommandComplete(pDevice);
- break;
-
- case WLAN_CMD_AP_MODE_START:
- pr_debug("eCommandState == WLAN_CMD_AP_MODE_START\n");
-
- if (pMgmt->eConfigMode == WMAC_CONFIG_AP) {
- del_timer(&pMgmt->sTimerSecondCallback);
- pMgmt->eCurrState = WMAC_STATE_IDLE;
- pMgmt->eCurrMode = WMAC_MODE_STANDBY;
- pDevice->bLinkPass = false;
- if (pDevice->bEnableHostWEP)
- BSSvClearNodeDBTable(pDevice, 1);
- else
- BSSvClearNodeDBTable(pDevice, 0);
- pDevice->uAssocCount = 0;
- pMgmt->eCurrState = WMAC_STATE_IDLE;
- pDevice->bFixRate = false;
-
- vMgrCreateOwnIBSS((void *)pDevice, &Status);
- if (Status != CMD_STATUS_SUCCESS)
- pr_debug(" vMgrCreateOwnIBSS fail !\n");
-
- // alway turn off unicast bit
- MACvRegBitsOff(pDevice->PortOffset, MAC_REG_RCR, RCR_UNICAST);
- pDevice->byRxMode &= ~RCR_UNICAST;
- pr_debug("wcmd: rx_mode = %x\n", pDevice->byRxMode);
- BSSvAddMulticastNode(pDevice);
- if (netif_queue_stopped(pDevice->dev))
- netif_wake_queue(pDevice->dev);
-
- pDevice->bLinkPass = true;
- add_timer(&pMgmt->sTimerSecondCallback);
- }
- s_bCommandComplete(pDevice);
- break;
-
- case WLAN_CMD_TX_PSPACKET_START:
- // DTIM Multicast tx
- if (pMgmt->sNodeDBTable[0].bRxPSPoll) {
- while ((skb = skb_dequeue(&pMgmt->sNodeDBTable[0].sTxPSQueue)) != NULL) {
- if (skb_queue_empty(&pMgmt->sNodeDBTable[0].sTxPSQueue)) {
- pMgmt->abyPSTxMap[0] &= ~byMask[0];
- pDevice->bMoreData = false;
- } else {
- pDevice->bMoreData = true;
- }
- if (!device_dma0_xmit(pDevice, skb, 0))
- pr_debug("Multicast ps tx fail\n");
-
- pMgmt->sNodeDBTable[0].wEnQueueCnt--;
- }
- }
-
- // PS nodes tx
- for (ii = 1; ii < (MAX_NODE_NUM + 1); ii++) {
- if (pMgmt->sNodeDBTable[ii].bActive &&
- pMgmt->sNodeDBTable[ii].bRxPSPoll) {
- pr_debug("Index=%d Enqueu Cnt= %d\n",
- ii,
- pMgmt->sNodeDBTable[ii].wEnQueueCnt);
- while ((skb = skb_dequeue(&pMgmt->sNodeDBTable[ii].sTxPSQueue)) != NULL) {
- if (skb_queue_empty(&pMgmt->sNodeDBTable[ii].sTxPSQueue)) {
- // clear tx map
- pMgmt->abyPSTxMap[pMgmt->sNodeDBTable[ii].wAID >> 3] &=
- ~byMask[pMgmt->sNodeDBTable[ii].wAID & 7];
- pDevice->bMoreData = false;
- } else {
- pDevice->bMoreData = true;
- }
- if (!device_dma0_xmit(pDevice, skb, ii))
- pr_debug("sta ps tx fail\n");
-
- pMgmt->sNodeDBTable[ii].wEnQueueCnt--;
- // check if sta ps enabled, and wait next pspoll.
- // if sta ps disable, then send all pending buffers.
- if (pMgmt->sNodeDBTable[ii].bPSEnable)
- break;
- }
- if (skb_queue_empty(&pMgmt->sNodeDBTable[ii].sTxPSQueue)) {
- // clear tx map
- pMgmt->abyPSTxMap[pMgmt->sNodeDBTable[ii].wAID >> 3] &=
- ~byMask[pMgmt->sNodeDBTable[ii].wAID & 7];
- pr_debug("Index=%d PS queue clear\n",
- ii);
- }
- pMgmt->sNodeDBTable[ii].bRxPSPoll = false;
- }
- }
-
- s_bCommandComplete(pDevice);
- break;
-
- case WLAN_CMD_RADIO_START:
- pr_debug("eCommandState == WLAN_CMD_RADIO_START\n");
- if (pDevice->bRadioCmd)
- CARDbRadioPowerOn(pDevice);
- else
- CARDbRadioPowerOff(pDevice);
-
- s_bCommandComplete(pDevice);
- break;
-
- case WLAN_CMD_CHECK_BBSENSITIVITY_CHANGE:
- // wait all TD complete
- if (pDevice->iTDUsed[TYPE_AC0DMA] != 0) {
- vCommandTimerWait((void *)pDevice, 10);
- spin_unlock_irq(&pDevice->lock);
- return;
- }
- if (pDevice->iTDUsed[TYPE_TXDMA0] != 0) {
- vCommandTimerWait((void *)pDevice, 10);
- spin_unlock_irq(&pDevice->lock);
- return;
- }
- pDevice->byBBVGACurrent = pDevice->byBBVGANew;
- BBvSetVGAGainOffset(pDevice, pDevice->byBBVGACurrent);
- pr_debug("SetVGAGainOffset %02X\n", pDevice->byBBVGACurrent);
- s_bCommandComplete(pDevice);
- break;
-
- default:
- s_bCommandComplete(pDevice);
- break;
-
- } //switch
- spin_unlock_irq(&pDevice->lock);
-}
-
-static
-bool
-s_bCommandComplete(
- struct vnt_private *pDevice
-)
-{
- PWLAN_IE_SSID pSSID;
- bool bRadioCmd = false;
- bool bForceSCAN = true;
- PSMgmtObject pMgmt = pDevice->pMgmt;
-
- pDevice->eCommandState = WLAN_CMD_IDLE;
- if (pDevice->cbFreeCmdQueue == CMD_Q_SIZE) {
- //Command Queue Empty
- pDevice->bCmdRunning = false;
- return true;
- } else {
- pDevice->eCommand = pDevice->eCmdQueue[pDevice->uCmdDequeueIdx].eCmd;
- pSSID = (PWLAN_IE_SSID)pDevice->eCmdQueue[pDevice->uCmdDequeueIdx].abyCmdDesireSSID;
- bRadioCmd = pDevice->eCmdQueue[pDevice->uCmdDequeueIdx].bRadioCmd;
- bForceSCAN = pDevice->eCmdQueue[pDevice->uCmdDequeueIdx].bForceSCAN;
- ADD_ONE_WITH_WRAP_AROUND(pDevice->uCmdDequeueIdx, CMD_Q_SIZE);
- pDevice->cbFreeCmdQueue++;
- pDevice->bCmdRunning = true;
- switch (pDevice->eCommand) {
- case WLAN_CMD_BSSID_SCAN:
- pr_debug("eCommandState= WLAN_CMD_BSSID_SCAN\n");
- pDevice->eCommandState = WLAN_CMD_SCAN_START;
- pMgmt->uScanChannel = 0;
- if (pSSID->len != 0)
- memcpy(pMgmt->abyScanSSID, pSSID, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
- else
- memset(pMgmt->abyScanSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
-
- break;
- case WLAN_CMD_SSID:
- pDevice->eCommandState = WLAN_CMD_SSID_START;
- if (pSSID->len > WLAN_SSID_MAXLEN)
- pSSID->len = WLAN_SSID_MAXLEN;
- if (pSSID->len != 0)
- memcpy(pDevice->pMgmt->abyDesireSSID, pSSID, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
- pr_debug("eCommandState= WLAN_CMD_SSID_START\n");
- break;
- case WLAN_CMD_DISASSOCIATE:
- pDevice->eCommandState = WLAN_CMD_DISASSOCIATE_START;
- break;
- case WLAN_CMD_RX_PSPOLL:
- pDevice->eCommandState = WLAN_CMD_TX_PSPACKET_START;
- break;
- case WLAN_CMD_RUN_AP:
- pDevice->eCommandState = WLAN_CMD_AP_MODE_START;
- break;
- case WLAN_CMD_RADIO:
- pDevice->eCommandState = WLAN_CMD_RADIO_START;
- pDevice->bRadioCmd = bRadioCmd;
- break;
- case WLAN_CMD_CHANGE_BBSENSITIVITY:
- pDevice->eCommandState = WLAN_CMD_CHECK_BBSENSITIVITY_CHANGE;
- break;
-
- default:
- break;
-
- }
-
- vCommandTimerWait((void *)pDevice, 0);
- }
-
- return true;
-}
-
-bool bScheduleCommand(
- void *hDeviceContext,
- CMD_CODE eCommand,
- unsigned char *pbyItem0
-)
-{
- struct vnt_private *pDevice = hDeviceContext;
-
- if (pDevice->cbFreeCmdQueue == 0)
- return false;
-
- pDevice->eCmdQueue[pDevice->uCmdEnqueueIdx].eCmd = eCommand;
- pDevice->eCmdQueue[pDevice->uCmdEnqueueIdx].bForceSCAN = true;
- memset(pDevice->eCmdQueue[pDevice->uCmdEnqueueIdx].abyCmdDesireSSID, 0 , WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
-
- if (pbyItem0 != NULL) {
- switch (eCommand) {
- case WLAN_CMD_BSSID_SCAN:
- memcpy(pDevice->eCmdQueue[pDevice->uCmdEnqueueIdx].abyCmdDesireSSID,
- pbyItem0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
- pDevice->eCmdQueue[pDevice->uCmdEnqueueIdx].bForceSCAN = false;
- break;
-
- case WLAN_CMD_SSID:
- memcpy(pDevice->eCmdQueue[pDevice->uCmdEnqueueIdx].abyCmdDesireSSID,
- pbyItem0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
- break;
-
- case WLAN_CMD_DISASSOCIATE:
- pDevice->eCmdQueue[pDevice->uCmdEnqueueIdx].bNeedRadioOFF = *((int *)pbyItem0);
- break;
-
- case WLAN_CMD_RX_PSPOLL:
- break;
-
- case WLAN_CMD_RADIO:
- pDevice->eCmdQueue[pDevice->uCmdEnqueueIdx].bRadioCmd = *((int *)pbyItem0);
- break;
-
- case WLAN_CMD_CHANGE_BBSENSITIVITY:
- pDevice->eCommandState = WLAN_CMD_CHECK_BBSENSITIVITY_CHANGE;
- break;
-
- default:
- break;
- }
- }
-
- ADD_ONE_WITH_WRAP_AROUND(pDevice->uCmdEnqueueIdx, CMD_Q_SIZE);
- pDevice->cbFreeCmdQueue--;
-
- if (!pDevice->bCmdRunning)
- s_bCommandComplete(pDevice);
-
- return true;
-}
-
-/*
- * Description:
- * Clear BSSID_SCAN cmd in CMD Queue
- *
- * Parameters:
- * In:
- * hDeviceContext - Pointer to the adapter
- * eCommand - Command
- * Out:
- * none
- *
- * Return Value: true if success; otherwise false
- *
- */
-bool bClearBSSID_SCAN(
- void *hDeviceContext
-)
-{
- struct vnt_private *pDevice = hDeviceContext;
- unsigned int uCmdDequeueIdx = pDevice->uCmdDequeueIdx;
- unsigned int ii;
-
- if ((pDevice->cbFreeCmdQueue < CMD_Q_SIZE) && (uCmdDequeueIdx != pDevice->uCmdEnqueueIdx)) {
- for (ii = 0; ii < (CMD_Q_SIZE - pDevice->cbFreeCmdQueue); ii++) {
- if (pDevice->eCmdQueue[uCmdDequeueIdx].eCmd == WLAN_CMD_BSSID_SCAN)
- pDevice->eCmdQueue[uCmdDequeueIdx].eCmd = WLAN_CMD_IDLE;
- ADD_ONE_WITH_WRAP_AROUND(uCmdDequeueIdx, CMD_Q_SIZE);
- if (uCmdDequeueIdx == pDevice->uCmdEnqueueIdx)
- break;
- }
- }
- return true;
-}
-
-//mike add:reset command timer
-void
-vResetCommandTimer(
- void *hDeviceContext
-)
-{
- struct vnt_private *pDevice = hDeviceContext;
-
- //delete timer
- del_timer(&pDevice->sTimerCommand);
- //init timer
- init_timer(&pDevice->sTimerCommand);
- pDevice->sTimerCommand.data = (unsigned long) pDevice;
- pDevice->sTimerCommand.function = (TimerFunction)vCommandTimer;
- pDevice->sTimerCommand.expires = RUN_AT(HZ);
- pDevice->cbFreeCmdQueue = CMD_Q_SIZE;
- pDevice->uCmdDequeueIdx = 0;
- pDevice->uCmdEnqueueIdx = 0;
- pDevice->eCommandState = WLAN_CMD_IDLE;
- pDevice->bCmdRunning = false;
- pDevice->bCmdClear = false;
-}
-
-void
-BSSvSecondTxData(
- void *hDeviceContext
-)
-{
- struct vnt_private *pDevice = hDeviceContext;
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
-
- pDevice->nTxDataTimeCout++;
-
- if (pDevice->nTxDataTimeCout < 4) //don't tx data if timer less than 40s
- {
- pDevice->sTimerTxData.expires = RUN_AT(10*HZ); //10s callback
- add_timer(&pDevice->sTimerTxData);
- return;
- }
-
- spin_lock_irq(&pDevice->lock);
-
- /* open && sharekey linking */
- if ((pDevice->bLinkPass && (pMgmt->eAuthenMode < WMAC_AUTH_WPA)) ||
- pDevice->fWPA_Authened) { /* wpa linking */
- pDevice->fTxDataInSleep = true;
- PSbSendNullPacket(pDevice); /* send null packet */
- pDevice->fTxDataInSleep = false;
- }
-
- spin_unlock_irq(&pDevice->lock);
-
- pDevice->sTimerTxData.expires = RUN_AT(10*HZ); /* 10s callback */
- add_timer(&pDevice->sTimerTxData);
-}
diff --git a/drivers/staging/vt6655/wcmd.h b/drivers/staging/vt6655/wcmd.h
deleted file mode 100644
index 6ef04de69f3..00000000000
--- a/drivers/staging/vt6655/wcmd.h
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * File: wcmd.h
- *
- * Purpose: Handles the management command interface functions
- *
- * Author: Lyndon Chen
- *
- * Date: May 8, 2002
- *
- */
-
-#ifndef __WCMD_H__
-#define __WCMD_H__
-
-#include "ttype.h"
-#include "80211hdr.h"
-#include "80211mgr.h"
-
-#define AUTHENTICATE_TIMEOUT 1000
-#define ASSOCIATE_TIMEOUT 1000
-
-typedef enum tagCMD_CODE {
- WLAN_CMD_BSSID_SCAN,
- WLAN_CMD_SSID,
- WLAN_CMD_DISASSOCIATE,
- WLAN_CMD_DEAUTH,
- WLAN_CMD_RX_PSPOLL,
- WLAN_CMD_RADIO,
- WLAN_CMD_CHANGE_BBSENSITIVITY,
- WLAN_CMD_SETPOWER,
- WLAN_CMD_TBTT_WAKEUP,
- WLAN_CMD_BECON_SEND,
- WLAN_CMD_CHANGE_ANTENNA,
- WLAN_CMD_REMOVE_ALLKEY,
- WLAN_CMD_MAC_DISPOWERSAVING,
- WLAN_CMD_11H_CHSW,
- WLAN_CMD_RUN_AP
-} CMD_CODE, *PCMD_CODE;
-
-#define CMD_Q_SIZE 32
-
-typedef enum tagCMD_STATUS {
- CMD_STATUS_SUCCESS = 0,
- CMD_STATUS_FAILURE,
- CMD_STATUS_RESOURCES,
- CMD_STATUS_TIMEOUT,
- CMD_STATUS_PENDING
-} CMD_STATUS, *PCMD_STATUS;
-
-typedef struct tagCMD_ITEM {
- CMD_CODE eCmd;
- unsigned char abyCmdDesireSSID[WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1];
- bool bNeedRadioOFF;
- unsigned short wDeAuthenReason;
- bool bRadioCmd;
- bool bForceSCAN;
-} CMD_ITEM, *PCMD_ITEM;
-
-typedef enum tagCMD_STATE {
- WLAN_CMD_SCAN_START,
- WLAN_CMD_SCAN_END,
- WLAN_CMD_DISASSOCIATE_START,
- WLAN_CMD_SSID_START,
- WLAN_AUTHENTICATE_WAIT,
- WLAN_ASSOCIATE_WAIT,
- WLAN_DISASSOCIATE_WAIT,
- WLAN_CMD_TX_PSPACKET_START,
- WLAN_CMD_AP_MODE_START,
- WLAN_CMD_RADIO_START,
- WLAN_CMD_CHECK_BBSENSITIVITY_CHANGE,
- WLAN_CMD_IDLE
-} CMD_STATE, *PCMD_STATE;
-
-void
-vResetCommandTimer(
- void *hDeviceContext
-);
-
-void
-vCommandTimer(
- void *hDeviceContext
-);
-
-bool bClearBSSID_SCAN(
- void *hDeviceContext
-);
-
-bool
-bScheduleCommand(
- void *hDeviceContext,
- CMD_CODE eCommand,
- unsigned char *pbyItem0
-);
-
-void
-vCommandTimerWait(
- void *hDeviceContext,
- unsigned int MSecond
-);
-
-void
-BSSvSecondTxData(
- void *hDeviceContext
-);
-
-#endif //__WCMD_H__
diff --git a/drivers/staging/vt6655/wctl.c b/drivers/staging/vt6655/wctl.c
deleted file mode 100644
index 5a54d98d985..00000000000
--- a/drivers/staging/vt6655/wctl.c
+++ /dev/null
@@ -1,233 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * File: wctl.c
- *
- * Purpose: handle WMAC duplicate filter & defragment
- *
- * Author: Jerry Chen
- *
- * Date: Jun. 27, 2002
- *
- * Functions:
- * WCTLbIsDuplicate - Test if duplicate packet
- * WCTLuSearchDFCB - Search DeFragment Control Database
- * WCTLuInsertDFCB - Insert DeFragment Control Database
- * WCTLbHandleFragment - Handle received fragment packet
- *
- * Revision History:
- *
- */
-
-#include "wctl.h"
-#include "device.h"
-#include "card.h"
-
-/*--------------------- Static Definitions -------------------------*/
-
-/*--------------------- Static Classes ----------------------------*/
-
-/*--------------------- Static Variables --------------------------*/
-
-/*--------------------- Static Functions --------------------------*/
-
-/*--------------------- Export Variables --------------------------*/
-
-/*
- * Description:
- * Scan Rx cache. Return true if packet is duplicate, else
- * inserts in receive cache and returns false.
- *
- * Parameters:
- * In:
- * pCache - Receive packets history
- * pMACHeader - 802.11 MAC Header of received packet
- * Out:
- * none
- *
- * Return Value: true if packet duplicate; otherwise false
- *
- */
-
-bool WCTLbIsDuplicate(PSCache pCache, PS802_11Header pMACHeader)
-{
- unsigned int uIndex;
- unsigned int ii;
- PSCacheEntry pCacheEntry;
-
- if (IS_FC_RETRY(pMACHeader)) {
- uIndex = pCache->uInPtr;
- for (ii = 0; ii < DUPLICATE_RX_CACHE_LENGTH; ii++) {
- pCacheEntry = &(pCache->asCacheEntry[uIndex]);
- if ((pCacheEntry->wFmSequence == pMACHeader->wSeqCtl) &&
- ether_addr_equal(pCacheEntry->abyAddr2,
- pMACHeader->abyAddr2)) {
- /* Duplicate match */
- return true;
- }
- ADD_ONE_WITH_WRAP_AROUND(uIndex, DUPLICATE_RX_CACHE_LENGTH);
- }
- }
- /* Not fount in cache - insert */
- pCacheEntry = &pCache->asCacheEntry[pCache->uInPtr];
- pCacheEntry->wFmSequence = pMACHeader->wSeqCtl;
- memcpy(&(pCacheEntry->abyAddr2[0]), &(pMACHeader->abyAddr2[0]), ETH_ALEN);
- ADD_ONE_WITH_WRAP_AROUND(pCache->uInPtr, DUPLICATE_RX_CACHE_LENGTH);
- return false;
-}
-
-/*
- * Description:
- * Found if sequence number of received fragment packet in Defragment Database
- *
- * Parameters:
- * In:
- * pDevice - Pointer to adapter
- * pMACHeader - 802.11 MAC Header of received packet
- * Out:
- * none
- *
- * Return Value: index number in Defragment Database
- *
- */
-unsigned int WCTLuSearchDFCB(struct vnt_private *pDevice,
- PS802_11Header pMACHeader)
-{
- unsigned int ii;
-
- for (ii = 0; ii < pDevice->cbDFCB; ii++) {
- if (pDevice->sRxDFCB[ii].bInUse &&
- ether_addr_equal(pDevice->sRxDFCB[ii].abyAddr2,
- pMACHeader->abyAddr2)) {
- return ii;
- }
- }
- return pDevice->cbDFCB;
-}
-
-/*
- * Description:
- * Insert received fragment packet in Defragment Database
- *
- * Parameters:
- * In:
- * pDevice - Pointer to adapter
- * pMACHeader - 802.11 MAC Header of received packet
- * Out:
- * none
- *
- * Return Value: index number in Defragment Database
- *
- */
-unsigned int WCTLuInsertDFCB(struct vnt_private *pDevice, PS802_11Header pMACHeader)
-{
- unsigned int ii;
-
- if (pDevice->cbFreeDFCB == 0)
- return pDevice->cbDFCB;
- for (ii = 0; ii < pDevice->cbDFCB; ii++) {
- if (!pDevice->sRxDFCB[ii].bInUse) {
- pDevice->cbFreeDFCB--;
- pDevice->sRxDFCB[ii].uLifetime = pDevice->dwMaxReceiveLifetime;
- pDevice->sRxDFCB[ii].bInUse = true;
- pDevice->sRxDFCB[ii].wSequence = (pMACHeader->wSeqCtl >> 4);
- pDevice->sRxDFCB[ii].wFragNum = (pMACHeader->wSeqCtl & 0x000F);
- memcpy(&(pDevice->sRxDFCB[ii].abyAddr2[0]), &(pMACHeader->abyAddr2[0]), ETH_ALEN);
- return ii;
- }
- }
- return pDevice->cbDFCB;
-}
-
-/*
- * Description:
- * Handle received fragment packet
- *
- * Parameters:
- * In:
- * pDevice - Pointer to adapter
- * pMACHeader - 802.11 MAC Header of received packet
- * cbFrameLength - Frame length
- * bWEP - is WEP packet
- * Out:
- * none
- *
- * Return Value: true if it is valid fragment packet and we have resource to defragment; otherwise false
- *
- */
-bool WCTLbHandleFragment(struct vnt_private *pDevice, PS802_11Header pMACHeader,
- unsigned int cbFrameLength, bool bWEP, bool bExtIV)
-{
- unsigned int uHeaderSize;
-
- if (bWEP) {
- uHeaderSize = 28;
- if (bExtIV)
- // ExtIV
- uHeaderSize += 4;
- } else {
- uHeaderSize = 24;
- }
-
- if (IS_FIRST_FRAGMENT_PKT(pMACHeader)) {
- pDevice->uCurrentDFCBIdx = WCTLuSearchDFCB(pDevice, pMACHeader);
- if (pDevice->uCurrentDFCBIdx < pDevice->cbDFCB) {
- // duplicate, we must flush previous DCB
- pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx].uLifetime = pDevice->dwMaxReceiveLifetime;
- pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx].wSequence = (pMACHeader->wSeqCtl >> 4);
- pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx].wFragNum = (pMACHeader->wSeqCtl & 0x000F);
- } else {
- pDevice->uCurrentDFCBIdx = WCTLuInsertDFCB(pDevice, pMACHeader);
- if (pDevice->uCurrentDFCBIdx == pDevice->cbDFCB)
- return false;
- }
- // reserve 4 byte to match MAC RX Buffer
- pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx].pbyRxBuffer = (unsigned char *)(pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx].skb->data + 4);
- memcpy(pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx].pbyRxBuffer, pMACHeader, cbFrameLength);
- pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx].cbFrameLength = cbFrameLength;
- pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx].pbyRxBuffer += cbFrameLength;
- pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx].wFragNum++;
- return false;
- } else {
- pDevice->uCurrentDFCBIdx = WCTLuSearchDFCB(pDevice, pMACHeader);
- if (pDevice->uCurrentDFCBIdx != pDevice->cbDFCB) {
- if ((pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx].wSequence == (pMACHeader->wSeqCtl >> 4)) &&
- (pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx].wFragNum == (pMACHeader->wSeqCtl & 0x000F)) &&
- ((pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx].cbFrameLength + cbFrameLength - uHeaderSize) < 2346)) {
- memcpy(pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx].pbyRxBuffer, ((unsigned char *)(pMACHeader) + uHeaderSize), (cbFrameLength - uHeaderSize));
- pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx].cbFrameLength += (cbFrameLength - uHeaderSize);
- pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx].pbyRxBuffer += (cbFrameLength - uHeaderSize);
- pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx].wFragNum++;
- } else {
- // seq error or frag # error flush DFCB
- pDevice->cbFreeDFCB++;
- pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx].bInUse = false;
- return false;
- }
- } else {
- return false;
- }
- if (IS_LAST_FRAGMENT_PKT(pMACHeader)) {
- //enq defragcontrolblock
- pDevice->cbFreeDFCB++;
- pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx].bInUse = false;
- return true;
- }
- return false;
- }
-}
diff --git a/drivers/staging/vt6655/wctl.h b/drivers/staging/vt6655/wctl.h
deleted file mode 100644
index f0995d86f71..00000000000
--- a/drivers/staging/vt6655/wctl.h
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * File: wctl.h
- *
- * Purpose:
- *
- * Author: Jerry Chen
- *
- * Date: Jun. 27, 2002
- *
- */
-
-#ifndef __WCTL_H__
-#define __WCTL_H__
-
-#include "ttype.h"
-#include "tether.h"
-#include "device.h"
-
-/*--------------------- Export Definitions -------------------------*/
-
-#define IS_TYPE_DATA(pMACHeader) \
- ((((PS802_11Header) pMACHeader)->wFrameCtl & TYPE_802_11_MASK) == TYPE_802_11_DATA)
-
-#define IS_TYPE_MGMT(pMACHeader) \
- ((((PS802_11Header) pMACHeader)->wFrameCtl & TYPE_802_11_MASK) == TYPE_802_11_MGMT)
-
-#define IS_TYPE_CONTROL(pMACHeader) \
- ((((PS802_11Header) pMACHeader)->wFrameCtl & TYPE_802_11_MASK) == TYPE_802_11_CTL)
-
-#define IS_FC_MOREDATA(pMACHeader) \
- ((((PS802_11Header) pMACHeader)->wFrameCtl & FC_MOREDATA) == FC_MOREDATA)
-
-#define IS_FC_POWERMGT(pMACHeader) \
- ((((PS802_11Header) pMACHeader)->wFrameCtl & FC_POWERMGT) == FC_POWERMGT)
-
-#define IS_FC_RETRY(pMACHeader) \
- ((((PS802_11Header) pMACHeader)->wFrameCtl & FC_RETRY) == FC_RETRY)
-
-#define IS_FC_WEP(pMACHeader) \
- ((((PS802_11Header) pMACHeader)->wFrameCtl & FC_WEP) == FC_WEP)
-
-#ifdef __BIG_ENDIAN
-
-#define IS_FRAGMENT_PKT(pMACHeader) \
- (((((PS802_11Header) pMACHeader)->wFrameCtl & FC_MOREFRAG) != 0) | \
- ((((PS802_11Header) pMACHeader)->wSeqCtl & 0x0F00) != 0))
-
-#define IS_FIRST_FRAGMENT_PKT(pMACHeader) \
- ((((PS802_11Header) pMACHeader)->wSeqCtl & 0x0F00) == 0)
-
-#else
-
-#define IS_FRAGMENT_PKT(pMACHeader) \
- (((((PS802_11Header) pMACHeader)->wFrameCtl & FC_MOREFRAG) != 0) | \
- ((((PS802_11Header) pMACHeader)->wSeqCtl & 0x000F) != 0))
-
-#define IS_FIRST_FRAGMENT_PKT(pMACHeader) \
- ((((PS802_11Header) pMACHeader)->wSeqCtl & 0x000F) == 0)
-
-#endif//#ifdef __BIG_ENDIAN
-
-#define IS_LAST_FRAGMENT_PKT(pMACHeader) \
- ((((PS802_11Header) pMACHeader)->wFrameCtl & FC_MOREFRAG) == 0)
-
-#define IS_CTL_PSPOLL(pMACHeader) \
- ((((PS802_11Header) pMACHeader)->wFrameCtl & TYPE_SUBTYPE_MASK) == TYPE_CTL_PSPOLL)
-
-#define ADD_ONE_WITH_WRAP_AROUND(uVar, uModulo) \
-do { \
- if ((uVar) >= ((uModulo) - 1)) \
- (uVar) = 0; \
- else \
- (uVar)++; \
-} while (0)
-
-/*--------------------- Export Classes ----------------------------*/
-
-/*--------------------- Export Variables --------------------------*/
-
-/*--------------------- Export Functions --------------------------*/
-
-bool WCTLbIsDuplicate(PSCache pCache, PS802_11Header pMACHeader);
-bool WCTLbHandleFragment(struct vnt_private *, PS802_11Header pMACHeader,
- unsigned int cbFrameLength, bool bWEP, bool bExtIV);
-unsigned int WCTLuSearchDFCB(struct vnt_private *, PS802_11Header pMACHeader);
-unsigned int WCTLuInsertDFCB(struct vnt_private *, PS802_11Header pMACHeader);
-
-#endif // __WCTL_H__
diff --git a/drivers/staging/vt6655/wmgr.c b/drivers/staging/vt6655/wmgr.c
deleted file mode 100644
index c73c39d7adf..00000000000
--- a/drivers/staging/vt6655/wmgr.c
+++ /dev/null
@@ -1,4602 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- *
- * File: wmgr.c
- *
- * Purpose: Handles the 802.11 management functions
- *
- * Author: Lyndon Chen
- *
- * Date: May 8, 2002
- *
- * Functions:
- * nsMgrObjectInitial - Initialize Management Object data structure
- * vMgrObjectReset - Reset Management Object data structure
- * vMgrAssocBeginSta - Start associate function
- * vMgrReAssocBeginSta - Start reassociate function
- * vMgrDisassocBeginSta - Start disassociate function
- * s_vMgrRxAssocRequest - Handle Rcv associate_request
- * s_vMgrRxAssocResponse - Handle Rcv associate_response
- * vMrgAuthenBeginSta - Start authentication function
- * vMgrDeAuthenDeginSta - Start deauthentication function
- * s_vMgrRxAuthentication - Handle Rcv authentication
- * s_vMgrRxAuthenSequence_1 - Handle Rcv authentication sequence 1
- * s_vMgrRxAuthenSequence_2 - Handle Rcv authentication sequence 2
- * s_vMgrRxAuthenSequence_3 - Handle Rcv authentication sequence 3
- * s_vMgrRxAuthenSequence_4 - Handle Rcv authentication sequence 4
- * s_vMgrRxDisassociation - Handle Rcv disassociation
- * s_vMgrRxBeacon - Handle Rcv Beacon
- * vMgrCreateOwnIBSS - Create ad_hoc IBSS or AP BSS
- * vMgrJoinBSSBegin - Join BSS function
- * s_vMgrSynchBSS - Synch & adopt BSS parameters
- * s_MgrMakeBeacon - Create Baecon frame
- * s_MgrMakeProbeResponse - Create Probe Response frame
- * s_MgrMakeAssocRequest - Create Associate Request frame
- * s_MgrMakeReAssocRequest - Create ReAssociate Request frame
- * s_vMgrRxProbeResponse - Handle Rcv probe_response
- * s_vMrgRxProbeRequest - Handle Rcv probe_request
- * bMgrPrepareBeaconToSend - Prepare Beacon frame
- * s_vMgrLogStatus - Log 802.11 Status
- * vMgrRxManagePacket - Rcv management frame dispatch function
- * s_vMgrFormatTIM- Assembler TIM field of beacon
- * vMgrTimerInit- Initial 1-sec and command call back funtions
- *
- * Revision History:
- *
- */
-
-#include "tmacro.h"
-#include "desc.h"
-#include "device.h"
-#include "card.h"
-#include "channel.h"
-#include "80211hdr.h"
-#include "80211mgr.h"
-#include "wmgr.h"
-#include "wcmd.h"
-#include "mac.h"
-#include "bssdb.h"
-#include "power.h"
-#include "datarate.h"
-#include "baseband.h"
-#include "rxtx.h"
-#include "wpa.h"
-#include "rf.h"
-#include "iowpa.h"
-
-/*--------------------- Static Definitions -------------------------*/
-
-/*--------------------- Static Classes ----------------------------*/
-
-/*--------------------- Static Functions --------------------------*/
-//2008-8-4 <add> by chester
-static bool ChannelExceedZoneType(
- struct vnt_private *pDevice,
- unsigned char byCurrChannel
-);
-
-// Association/diassociation functions
-static
-PSTxMgmtPacket
-s_MgrMakeAssocRequest(
- struct vnt_private *pDevice,
- PSMgmtObject pMgmt,
- unsigned char *pDAddr,
- unsigned short wCurrCapInfo,
- unsigned short wListenInterval,
- PWLAN_IE_SSID pCurrSSID,
- PWLAN_IE_SUPP_RATES pCurrRates,
- PWLAN_IE_SUPP_RATES pCurrExtSuppRates
-);
-
-static
-void
-s_vMgrRxAssocRequest(
- struct vnt_private *pDevice,
- PSMgmtObject pMgmt,
- PSRxMgmtPacket pRxPacket,
- unsigned int uNodeIndex
-);
-
-static
-PSTxMgmtPacket
-s_MgrMakeReAssocRequest(
- struct vnt_private *pDevice,
- PSMgmtObject pMgmt,
- unsigned char *pDAddr,
- unsigned short wCurrCapInfo,
- unsigned short wListenInterval,
- PWLAN_IE_SSID pCurrSSID,
- PWLAN_IE_SUPP_RATES pCurrRates,
- PWLAN_IE_SUPP_RATES pCurrExtSuppRates
-);
-
-static
-void
-s_vMgrRxAssocResponse(
- struct vnt_private *pDevice,
- PSMgmtObject pMgmt,
- PSRxMgmtPacket pRxPacket,
- bool bReAssocType
-);
-
-static
-void
-s_vMgrRxDisassociation(
- struct vnt_private *pDevice,
- PSMgmtObject pMgmt,
- PSRxMgmtPacket pRxPacket
-);
-
-// Authentication/deauthen functions
-static
-void
-s_vMgrRxAuthenSequence_1(
- struct vnt_private *pDevice,
- PSMgmtObject pMgmt,
- PWLAN_FR_AUTHEN pFrame
-);
-
-static
-void
-s_vMgrRxAuthenSequence_2(
- struct vnt_private *pDevice,
- PSMgmtObject pMgmt,
- PWLAN_FR_AUTHEN pFrame
-);
-
-static
-void
-s_vMgrRxAuthenSequence_3(
- struct vnt_private *pDevice,
- PSMgmtObject pMgmt,
- PWLAN_FR_AUTHEN pFrame
-);
-
-static
-void
-s_vMgrRxAuthenSequence_4(
- struct vnt_private *pDevice,
- PSMgmtObject pMgmt,
- PWLAN_FR_AUTHEN pFrame
-);
-
-static
-void
-s_vMgrRxAuthentication(
- struct vnt_private *pDevice,
- PSMgmtObject pMgmt,
- PSRxMgmtPacket pRxPacket
-);
-
-static
-void
-s_vMgrRxDeauthentication(
- struct vnt_private *pDevice,
- PSMgmtObject pMgmt,
- PSRxMgmtPacket pRxPacket
-);
-
-// Scan functions
-// probe request/response functions
-static
-void
-s_vMgrRxProbeRequest(
- struct vnt_private *pDevice,
- PSMgmtObject pMgmt,
- PSRxMgmtPacket pRxPacket
-);
-
-static
-void
-s_vMgrRxProbeResponse(
- struct vnt_private *pDevice,
- PSMgmtObject pMgmt,
- PSRxMgmtPacket pRxPacket
-);
-
-// beacon functions
-static
-void
-s_vMgrRxBeacon(
- struct vnt_private *pDevice,
- PSMgmtObject pMgmt,
- PSRxMgmtPacket pRxPacket,
- bool bInScan
-);
-
-static
-void
-s_vMgrFormatTIM(
- PSMgmtObject pMgmt,
- PWLAN_IE_TIM pTIM
-);
-
-static
-PSTxMgmtPacket
-s_MgrMakeBeacon(
- struct vnt_private *pDevice,
- PSMgmtObject pMgmt,
- unsigned short wCurrCapInfo,
- unsigned short wCurrBeaconPeriod,
- unsigned int uCurrChannel,
- unsigned short wCurrATIMWinodw,
- PWLAN_IE_SSID pCurrSSID,
- unsigned char *pCurrBSSID,
- PWLAN_IE_SUPP_RATES pCurrSuppRates,
- PWLAN_IE_SUPP_RATES pCurrExtSuppRates
-);
-
-// Association response
-static
-PSTxMgmtPacket
-s_MgrMakeAssocResponse(
- struct vnt_private *pDevice,
- PSMgmtObject pMgmt,
- unsigned short wCurrCapInfo,
- unsigned short wAssocStatus,
- unsigned short wAssocAID,
- unsigned char *pDstAddr,
- PWLAN_IE_SUPP_RATES pCurrSuppRates,
- PWLAN_IE_SUPP_RATES pCurrExtSuppRates
-);
-
-// ReAssociation response
-static
-PSTxMgmtPacket
-s_MgrMakeReAssocResponse(
- struct vnt_private *pDevice,
- PSMgmtObject pMgmt,
- unsigned short wCurrCapInfo,
- unsigned short wAssocStatus,
- unsigned short wAssocAID,
- unsigned char *pDstAddr,
- PWLAN_IE_SUPP_RATES pCurrSuppRates,
- PWLAN_IE_SUPP_RATES pCurrExtSuppRates
-);
-
-// Probe response
-static
-PSTxMgmtPacket
-s_MgrMakeProbeResponse(
- struct vnt_private *pDevice,
- PSMgmtObject pMgmt,
- unsigned short wCurrCapInfo,
- unsigned short wCurrBeaconPeriod,
- unsigned int uCurrChannel,
- unsigned short wCurrATIMWinodw,
- unsigned char *pDstAddr,
- PWLAN_IE_SSID pCurrSSID,
- unsigned char *pCurrBSSID,
- PWLAN_IE_SUPP_RATES pCurrSuppRates,
- PWLAN_IE_SUPP_RATES pCurrExtSuppRates,
- unsigned char byPHYType
-);
-
-// received status
-static
-void
-s_vMgrLogStatus(
- PSMgmtObject pMgmt,
- unsigned short wStatus
-);
-
-static
-void
-s_vMgrSynchBSS(
- struct vnt_private *pDevice,
- unsigned int uBSSMode,
- PKnownBSS pCurr,
- PCMD_STATUS pStatus
-);
-
-static bool
-s_bCipherMatch(
- PKnownBSS pBSSNode,
- NDIS_802_11_ENCRYPTION_STATUS EncStatus,
- unsigned char *pbyCCSPK,
- unsigned char *pbyCCSGK
-);
-
-static void Encyption_Rebuild(
- struct vnt_private *pDevice,
- PKnownBSS pCurr
-);
-
-/*--------------------- Export Variables --------------------------*/
-
-/*--------------------- Export Functions --------------------------*/
-
-/*+
- *
- * Routine Description:
- * Allocates and initializes the Management object.
- *
- * Return Value:
- * Ndis_staus.
- *
- -*/
-
-void
-vMgrObjectInit(
- void *hDeviceContext
-)
-{
- struct vnt_private *pDevice = hDeviceContext;
- PSMgmtObject pMgmt = pDevice->pMgmt;
- int ii;
-
- pMgmt->pbyPSPacketPool = &pMgmt->byPSPacketPool[0];
- pMgmt->pbyMgmtPacketPool = &pMgmt->byMgmtPacketPool[0];
- pMgmt->uCurrChannel = pDevice->uChannel;
- for (ii = 0; ii < WLAN_BSSID_LEN; ii++)
- pMgmt->abyDesireBSSID[ii] = 0xFF;
-
- pMgmt->sAssocInfo.AssocInfo.Length = sizeof(NDIS_802_11_ASSOCIATION_INFORMATION);
- pMgmt->byCSSPK = KEY_CTL_NONE;
- pMgmt->byCSSGK = KEY_CTL_NONE;
- pMgmt->wIBSSBeaconPeriod = DEFAULT_IBSS_BI;
- BSSvClearBSSList((void *)pDevice, false);
-}
-
-/*+
- *
- * Routine Description:
- * Initializes timer object
- *
- * Return Value:
- * Ndis_staus.
- *
- -*/
-
-void
-vMgrTimerInit(
- void *hDeviceContext
-)
-{
- struct vnt_private *pDevice = hDeviceContext;
- PSMgmtObject pMgmt = pDevice->pMgmt;
-
- init_timer(&pMgmt->sTimerSecondCallback);
- pMgmt->sTimerSecondCallback.data = (unsigned long) pDevice;
- pMgmt->sTimerSecondCallback.function = (TimerFunction)BSSvSecondCallBack;
- pMgmt->sTimerSecondCallback.expires = RUN_AT(HZ);
-
- init_timer(&pDevice->sTimerCommand);
- pDevice->sTimerCommand.data = (unsigned long) pDevice;
- pDevice->sTimerCommand.function = (TimerFunction)vCommandTimer;
- pDevice->sTimerCommand.expires = RUN_AT(HZ);
-
- init_timer(&pDevice->sTimerTxData);
- pDevice->sTimerTxData.data = (unsigned long) pDevice;
- pDevice->sTimerTxData.function = (TimerFunction)BSSvSecondTxData;
- pDevice->sTimerTxData.expires = RUN_AT(10*HZ); //10s callback
- pDevice->fTxDataInSleep = false;
- pDevice->IsTxDataTrigger = false;
- pDevice->nTxDataTimeCout = 0;
-
- pDevice->cbFreeCmdQueue = CMD_Q_SIZE;
- pDevice->uCmdDequeueIdx = 0;
- pDevice->uCmdEnqueueIdx = 0;
-}
-
-/*+
- *
- * Routine Description:
- * Reset the management object structure.
- *
- * Return Value:
- * None.
- *
- -*/
-
-void
-vMgrObjectReset(
- void *hDeviceContext
-)
-{
- struct vnt_private *pDevice = hDeviceContext;
- PSMgmtObject pMgmt = pDevice->pMgmt;
-
- pMgmt->eCurrMode = WMAC_MODE_STANDBY;
- pMgmt->eCurrState = WMAC_STATE_IDLE;
- pDevice->bEnablePSMode = false;
- // TODO: timer
-}
-
-/*+
- *
- * Routine Description:
- * Start the station association procedure. Namely, send an
- * association request frame to the AP.
- *
- * Return Value:
- * None.
- *
- -*/
-
-void
-vMgrAssocBeginSta(
- void *hDeviceContext,
- PSMgmtObject pMgmt,
- PCMD_STATUS pStatus
-)
-{
- struct vnt_private *pDevice = hDeviceContext;
- PSTxMgmtPacket pTxPacket;
-
- pMgmt->wCurrCapInfo = 0;
- pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_ESS(1);
- if (pDevice->bEncryptionEnable)
- pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_PRIVACY(1);
-
- pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_SHORTPREAMBLE(1);
- if (pMgmt->wListenInterval == 0)
- pMgmt->wListenInterval = 1; // at least one.
-
- // ERP Phy (802.11g) should support short preamble.
- if (pMgmt->eCurrentPHYMode == PHY_TYPE_11G) {
- pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_SHORTPREAMBLE(1);
- if (CARDbIsShorSlotTime(pMgmt->pAdapter))
- pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_SHORTSLOTTIME(1);
- } else if (pMgmt->eCurrentPHYMode == PHY_TYPE_11B) {
- if (CARDbIsShortPreamble(pMgmt->pAdapter))
- pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_SHORTPREAMBLE(1);
- }
- if (pMgmt->b11hEnable)
- pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_SPECTRUMMNG(1);
-
- /* build an assocreq frame and send it */
- pTxPacket = s_MgrMakeAssocRequest
- (
- pDevice,
- pMgmt,
- pMgmt->abyCurrBSSID,
- pMgmt->wCurrCapInfo,
- pMgmt->wListenInterval,
- (PWLAN_IE_SSID)pMgmt->abyCurrSSID,
- (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
- (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates
-);
-
- if (pTxPacket != NULL) {
- /* send the frame */
- *pStatus = csMgmt_xmit(pDevice, pTxPacket);
- if (*pStatus == CMD_STATUS_PENDING) {
- pMgmt->eCurrState = WMAC_STATE_ASSOCPENDING;
- *pStatus = CMD_STATUS_SUCCESS;
- }
- } else {
- *pStatus = CMD_STATUS_RESOURCES;
- }
-}
-
-/*+
- *
- * Routine Description:
- * Start the station re-association procedure.
- *
- * Return Value:
- * None.
- *
- -*/
-
-void
-vMgrReAssocBeginSta(
- void *hDeviceContext,
- PSMgmtObject pMgmt,
- PCMD_STATUS pStatus
-)
-{
- struct vnt_private *pDevice = hDeviceContext;
- PSTxMgmtPacket pTxPacket;
-
- pMgmt->wCurrCapInfo = 0;
- pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_ESS(1);
- if (pDevice->bEncryptionEnable)
- pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_PRIVACY(1);
-
- pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_SHORTPREAMBLE(1);
-
- if (pMgmt->wListenInterval == 0)
- pMgmt->wListenInterval = 1; // at least one.
-
- // ERP Phy (802.11g) should support short preamble.
- if (pMgmt->eCurrentPHYMode == PHY_TYPE_11G) {
- pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_SHORTPREAMBLE(1);
- if (CARDbIsShorSlotTime(pMgmt->pAdapter))
- pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_SHORTSLOTTIME(1);
- } else if (pMgmt->eCurrentPHYMode == PHY_TYPE_11B) {
- if (CARDbIsShortPreamble(pMgmt->pAdapter))
- pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_SHORTPREAMBLE(1);
- }
-
- if (pMgmt->b11hEnable)
- pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_SPECTRUMMNG(1);
-
- pTxPacket = s_MgrMakeReAssocRequest
- (
- pDevice,
- pMgmt,
- pMgmt->abyCurrBSSID,
- pMgmt->wCurrCapInfo,
- pMgmt->wListenInterval,
- (PWLAN_IE_SSID)pMgmt->abyCurrSSID,
- (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
- (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates
-);
-
- if (pTxPacket != NULL) {
- /* send the frame */
- *pStatus = csMgmt_xmit(pDevice, pTxPacket);
- if (*pStatus != CMD_STATUS_PENDING)
- pr_debug("Mgt:Reassociation tx failed\n");
- else
- pr_debug("Mgt:Reassociation tx sending\n");
- }
-}
-
-/*+
- *
- * Routine Description:
- * Send an dis-association request frame to the AP.
- *
- * Return Value:
- * None.
- *
- -*/
-
-void
-vMgrDisassocBeginSta(
- void *hDeviceContext,
- PSMgmtObject pMgmt,
- unsigned char *abyDestAddress,
- unsigned short wReason,
- PCMD_STATUS pStatus
-)
-{
- struct vnt_private *pDevice = hDeviceContext;
- PSTxMgmtPacket pTxPacket = NULL;
- WLAN_FR_DISASSOC sFrame;
-
- pTxPacket = (PSTxMgmtPacket)pMgmt->pbyMgmtPacketPool;
- memset(pTxPacket, 0, sizeof(STxMgmtPacket) + WLAN_DISASSOC_FR_MAXLEN);
- pTxPacket->p80211Header = (PUWLAN_80211HDR)((unsigned char *)pTxPacket + sizeof(STxMgmtPacket));
-
- // Setup the sFrame structure
- sFrame.pBuf = (unsigned char *)pTxPacket->p80211Header;
- sFrame.len = WLAN_DISASSOC_FR_MAXLEN;
-
- // format fixed field frame structure
- vMgrEncodeDisassociation(&sFrame);
-
- // Setup the header
- sFrame.pHdr->sA3.wFrameCtl = cpu_to_le16(
- (
- WLAN_SET_FC_FTYPE(WLAN_TYPE_MGR) |
- WLAN_SET_FC_FSTYPE(WLAN_FSTYPE_DISASSOC)
-));
-
- memcpy(sFrame.pHdr->sA3.abyAddr1, abyDestAddress, WLAN_ADDR_LEN);
- memcpy(sFrame.pHdr->sA3.abyAddr2, pMgmt->abyMACAddr, WLAN_ADDR_LEN);
- memcpy(sFrame.pHdr->sA3.abyAddr3, pMgmt->abyCurrBSSID, WLAN_BSSID_LEN);
-
- // Set reason code
- *(sFrame.pwReason) = cpu_to_le16(wReason);
- pTxPacket->cbMPDULen = sFrame.len;
- pTxPacket->cbPayloadLen = sFrame.len - WLAN_HDR_ADDR3_LEN;
-
- // send the frame
- *pStatus = csMgmt_xmit(pDevice, pTxPacket);
- if (*pStatus == CMD_STATUS_PENDING) {
- pMgmt->eCurrState = WMAC_STATE_IDLE;
- *pStatus = CMD_STATUS_SUCCESS;
- }
-}
-
-/*+
- *
- * Routine Description:(AP function)
- * Handle incoming station association request frames.
- *
- * Return Value:
- * None.
- *
- -*/
-
-static
-void
-s_vMgrRxAssocRequest(
- struct vnt_private *pDevice,
- PSMgmtObject pMgmt,
- PSRxMgmtPacket pRxPacket,
- unsigned int uNodeIndex
-)
-{
- WLAN_FR_ASSOCREQ sFrame;
- CMD_STATUS Status;
- PSTxMgmtPacket pTxPacket;
- unsigned short wAssocStatus = 0;
- unsigned short wAssocAID = 0;
- unsigned int uRateLen = WLAN_RATES_MAXLEN;
- unsigned char abyCurrSuppRates[WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1];
- unsigned char abyCurrExtSuppRates[WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1];
-
- if (pMgmt->eCurrMode != WMAC_MODE_ESS_AP)
- return;
- // node index not found
- if (!uNodeIndex)
- return;
-
- //check if node is authenticated
- //decode the frame
- memset(&sFrame, 0, sizeof(WLAN_FR_ASSOCREQ));
- memset(abyCurrSuppRates, 0, WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1);
- memset(abyCurrExtSuppRates, 0, WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1);
- sFrame.len = pRxPacket->cbMPDULen;
- sFrame.pBuf = (unsigned char *)pRxPacket->p80211Header;
-
- vMgrDecodeAssocRequest(&sFrame);
-
- if (pMgmt->sNodeDBTable[uNodeIndex].eNodeState >= NODE_AUTH) {
- pMgmt->sNodeDBTable[uNodeIndex].eNodeState = NODE_ASSOC;
- pMgmt->sNodeDBTable[uNodeIndex].wCapInfo = cpu_to_le16(*sFrame.pwCapInfo);
- pMgmt->sNodeDBTable[uNodeIndex].wListenInterval = cpu_to_le16(*sFrame.pwListenInterval);
- pMgmt->sNodeDBTable[uNodeIndex].bPSEnable =
- WLAN_GET_FC_PWRMGT(sFrame.pHdr->sA3.wFrameCtl) ? true : false;
- // Todo: check sta basic rate, if ap can't support, set status code
- if (pDevice->eCurrentPHYType == PHY_TYPE_11B)
- uRateLen = WLAN_RATES_MAXLEN_11B;
-
- abyCurrSuppRates[0] = WLAN_EID_SUPP_RATES;
- abyCurrSuppRates[1] = RATEuSetIE((PWLAN_IE_SUPP_RATES)sFrame.pSuppRates,
- (PWLAN_IE_SUPP_RATES)abyCurrSuppRates,
- uRateLen);
- abyCurrExtSuppRates[0] = WLAN_EID_EXTSUPP_RATES;
- if (pDevice->eCurrentPHYType == PHY_TYPE_11G)
- abyCurrExtSuppRates[1] = RATEuSetIE((PWLAN_IE_SUPP_RATES)sFrame.pExtSuppRates,
- (PWLAN_IE_SUPP_RATES)abyCurrExtSuppRates,
- uRateLen);
- else
- abyCurrExtSuppRates[1] = 0;
-
- RATEvParseMaxRate((void *)pDevice,
- (PWLAN_IE_SUPP_RATES)abyCurrSuppRates,
- (PWLAN_IE_SUPP_RATES)abyCurrExtSuppRates,
- false, // do not change our basic rate
- &(pMgmt->sNodeDBTable[uNodeIndex].wMaxBasicRate),
- &(pMgmt->sNodeDBTable[uNodeIndex].wMaxSuppRate),
- &(pMgmt->sNodeDBTable[uNodeIndex].wSuppRate),
- &(pMgmt->sNodeDBTable[uNodeIndex].byTopCCKBasicRate),
- &(pMgmt->sNodeDBTable[uNodeIndex].byTopOFDMBasicRate)
-);
-
- // set max tx rate
- pMgmt->sNodeDBTable[uNodeIndex].wTxDataRate =
- pMgmt->sNodeDBTable[uNodeIndex].wMaxSuppRate;
-
- pr_debug("RxAssocRequest:wTxDataRate is %d\n", pMgmt->sNodeDBTable[uNodeIndex].wTxDataRate);
-
- // Todo: check sta preamble, if ap can't support, set status code
- pMgmt->sNodeDBTable[uNodeIndex].bShortPreamble =
- WLAN_GET_CAP_INFO_SHORTPREAMBLE(*sFrame.pwCapInfo);
- pMgmt->sNodeDBTable[uNodeIndex].bShortSlotTime =
- WLAN_GET_CAP_INFO_SHORTSLOTTIME(*sFrame.pwCapInfo);
- pMgmt->sNodeDBTable[uNodeIndex].wAID = (unsigned short)uNodeIndex;
- wAssocStatus = WLAN_MGMT_STATUS_SUCCESS;
- wAssocAID = (unsigned short)uNodeIndex;
- // check if ERP support
- if (pMgmt->sNodeDBTable[uNodeIndex].wMaxSuppRate > RATE_11M)
- pMgmt->sNodeDBTable[uNodeIndex].bERPExist = true;
-
- if (pMgmt->sNodeDBTable[uNodeIndex].wMaxSuppRate <= RATE_11M) {
- // B only STA join
- pDevice->bProtectMode = true;
- pDevice->bNonERPPresent = true;
- }
- if (!pMgmt->sNodeDBTable[uNodeIndex].bShortPreamble)
- pDevice->bBarkerPreambleMd = true;
-
- pr_info("Associate AID= %d\n", wAssocAID);
- pr_info("MAC=%2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
- sFrame.pHdr->sA3.abyAddr2[0],
- sFrame.pHdr->sA3.abyAddr2[1],
- sFrame.pHdr->sA3.abyAddr2[2],
- sFrame.pHdr->sA3.abyAddr2[3],
- sFrame.pHdr->sA3.abyAddr2[4],
- sFrame.pHdr->sA3.abyAddr2[5]
- );
- pr_info("Max Support rate = %d\n",
- pMgmt->sNodeDBTable[uNodeIndex].wMaxSuppRate);
- } else {
- /* TODO: received STA under state1 handle */
- return;
- }
-
- // assoc response reply..
- pTxPacket = s_MgrMakeAssocResponse
- (
- pDevice,
- pMgmt,
- pMgmt->wCurrCapInfo,
- wAssocStatus,
- wAssocAID,
- sFrame.pHdr->sA3.abyAddr2,
- (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
- (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates
-);
- if (pTxPacket != NULL) {
- if (pDevice->bEnableHostapd)
- return;
-
- /* send the frame */
- Status = csMgmt_xmit(pDevice, pTxPacket);
- if (Status != CMD_STATUS_PENDING)
- pr_debug("Mgt:Assoc response tx failed\n");
- else
- pr_debug("Mgt:Assoc response tx sending..\n");
- }
-}
-
-/*+
- *
- * Description:(AP function)
- * Handle incoming station re-association request frames.
- *
- * Parameters:
- * In:
- * pMgmt - Management Object structure
- * pRxPacket - Received Packet
- * Out:
- * none
- *
- * Return Value: None.
- *
- -*/
-
-static
-void
-s_vMgrRxReAssocRequest(
- struct vnt_private *pDevice,
- PSMgmtObject pMgmt,
- PSRxMgmtPacket pRxPacket,
- unsigned int uNodeIndex
-)
-{
- WLAN_FR_REASSOCREQ sFrame;
- CMD_STATUS Status;
- PSTxMgmtPacket pTxPacket;
- unsigned short wAssocStatus = 0;
- unsigned short wAssocAID = 0;
- unsigned int uRateLen = WLAN_RATES_MAXLEN;
- unsigned char abyCurrSuppRates[WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1];
- unsigned char abyCurrExtSuppRates[WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1];
-
- if (pMgmt->eCurrMode != WMAC_MODE_ESS_AP)
- return;
- // node index not found
- if (!uNodeIndex)
- return;
- //check if node is authenticated
- //decode the frame
- memset(&sFrame, 0, sizeof(WLAN_FR_REASSOCREQ));
- sFrame.len = pRxPacket->cbMPDULen;
- sFrame.pBuf = (unsigned char *)pRxPacket->p80211Header;
- vMgrDecodeReassocRequest(&sFrame);
-
- if (pMgmt->sNodeDBTable[uNodeIndex].eNodeState >= NODE_AUTH) {
- pMgmt->sNodeDBTable[uNodeIndex].eNodeState = NODE_ASSOC;
- pMgmt->sNodeDBTable[uNodeIndex].wCapInfo = cpu_to_le16(*sFrame.pwCapInfo);
- pMgmt->sNodeDBTable[uNodeIndex].wListenInterval = cpu_to_le16(*sFrame.pwListenInterval);
- pMgmt->sNodeDBTable[uNodeIndex].bPSEnable =
- WLAN_GET_FC_PWRMGT(sFrame.pHdr->sA3.wFrameCtl) ? true : false;
- // Todo: check sta basic rate, if ap can't support, set status code
-
- if (pDevice->eCurrentPHYType == PHY_TYPE_11B)
- uRateLen = WLAN_RATES_MAXLEN_11B;
-
- abyCurrSuppRates[0] = WLAN_EID_SUPP_RATES;
- abyCurrSuppRates[1] = RATEuSetIE((PWLAN_IE_SUPP_RATES)sFrame.pSuppRates,
- (PWLAN_IE_SUPP_RATES)abyCurrSuppRates,
- uRateLen);
- abyCurrExtSuppRates[0] = WLAN_EID_EXTSUPP_RATES;
- if (pDevice->eCurrentPHYType == PHY_TYPE_11G) {
- abyCurrExtSuppRates[1] = RATEuSetIE((PWLAN_IE_SUPP_RATES)sFrame.pExtSuppRates,
- (PWLAN_IE_SUPP_RATES)abyCurrExtSuppRates,
- uRateLen);
- } else {
- abyCurrExtSuppRates[1] = 0;
- }
-
- RATEvParseMaxRate((void *)pDevice,
- (PWLAN_IE_SUPP_RATES)abyCurrSuppRates,
- (PWLAN_IE_SUPP_RATES)abyCurrExtSuppRates,
- false, // do not change our basic rate
- &(pMgmt->sNodeDBTable[uNodeIndex].wMaxBasicRate),
- &(pMgmt->sNodeDBTable[uNodeIndex].wMaxSuppRate),
- &(pMgmt->sNodeDBTable[uNodeIndex].wSuppRate),
- &(pMgmt->sNodeDBTable[uNodeIndex].byTopCCKBasicRate),
- &(pMgmt->sNodeDBTable[uNodeIndex].byTopOFDMBasicRate)
-);
-
- // set max tx rate
- pMgmt->sNodeDBTable[uNodeIndex].wTxDataRate =
- pMgmt->sNodeDBTable[uNodeIndex].wMaxSuppRate;
-
- pr_debug("RxReAssocRequest:TxDataRate is %d\n", pMgmt->sNodeDBTable[uNodeIndex].wTxDataRate);
-
- // Todo: check sta preamble, if ap can't support, set status code
- pMgmt->sNodeDBTable[uNodeIndex].bShortPreamble =
- WLAN_GET_CAP_INFO_SHORTPREAMBLE(*sFrame.pwCapInfo);
- pMgmt->sNodeDBTable[uNodeIndex].bShortSlotTime =
- WLAN_GET_CAP_INFO_SHORTSLOTTIME(*sFrame.pwCapInfo);
- pMgmt->sNodeDBTable[uNodeIndex].wAID = (unsigned short)uNodeIndex;
- wAssocStatus = WLAN_MGMT_STATUS_SUCCESS;
- wAssocAID = (unsigned short)uNodeIndex;
-
- // if suppurt ERP
- if (pMgmt->sNodeDBTable[uNodeIndex].wMaxSuppRate > RATE_11M)
- pMgmt->sNodeDBTable[uNodeIndex].bERPExist = true;
-
- if (pMgmt->sNodeDBTable[uNodeIndex].wMaxSuppRate <= RATE_11M) {
- // B only STA join
- pDevice->bProtectMode = true;
- pDevice->bNonERPPresent = true;
- }
- if (!pMgmt->sNodeDBTable[uNodeIndex].bShortPreamble)
- pDevice->bBarkerPreambleMd = true;
-
- pr_info("Rx ReAssociate AID= %d\n", wAssocAID);
- pr_info("MAC=%2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
- sFrame.pHdr->sA3.abyAddr2[0],
- sFrame.pHdr->sA3.abyAddr2[1],
- sFrame.pHdr->sA3.abyAddr2[2],
- sFrame.pHdr->sA3.abyAddr2[3],
- sFrame.pHdr->sA3.abyAddr2[4],
- sFrame.pHdr->sA3.abyAddr2[5]
- );
- pr_info("Max Support rate = %d\n",
- pMgmt->sNodeDBTable[uNodeIndex].wMaxSuppRate);
-
- }
-
- // assoc response reply..
- pTxPacket = s_MgrMakeReAssocResponse
- (
- pDevice,
- pMgmt,
- pMgmt->wCurrCapInfo,
- wAssocStatus,
- wAssocAID,
- sFrame.pHdr->sA3.abyAddr2,
- (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
- (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates
- );
-
- if (pTxPacket != NULL) {
- /* send the frame */
- if (pDevice->bEnableHostapd)
- return;
-
- Status = csMgmt_xmit(pDevice, pTxPacket);
- if (Status != CMD_STATUS_PENDING)
- pr_debug("Mgt:ReAssoc response tx failed\n");
- else
- pr_debug("Mgt:ReAssoc response tx sending..\n");
- }
-}
-
-/*+
- *
- * Routine Description:
- * Handle incoming association response frames.
- *
- * Return Value:
- * None.
- *
- -*/
-
-static
-void
-s_vMgrRxAssocResponse(
- struct vnt_private *pDevice,
- PSMgmtObject pMgmt,
- PSRxMgmtPacket pRxPacket,
- bool bReAssocType
-)
-{
- WLAN_FR_ASSOCRESP sFrame;
- PWLAN_IE_SSID pItemSSID;
- unsigned char *pbyIEs;
- viawget_wpa_header *wpahdr;
-
- if (pMgmt->eCurrState == WMAC_STATE_ASSOCPENDING ||
- pMgmt->eCurrState == WMAC_STATE_ASSOC) {
- sFrame.len = pRxPacket->cbMPDULen;
- sFrame.pBuf = (unsigned char *)pRxPacket->p80211Header;
- // decode the frame
- vMgrDecodeAssocResponse(&sFrame);
- if ((sFrame.pwCapInfo == NULL) ||
- (sFrame.pwStatus == NULL) ||
- (sFrame.pwAid == NULL) ||
- (sFrame.pSuppRates == NULL)) {
- DBG_PORT80(0xCC);
- return;
- }
-
- pMgmt->sAssocInfo.AssocInfo.ResponseFixedIEs.Capabilities = *(sFrame.pwCapInfo);
- pMgmt->sAssocInfo.AssocInfo.ResponseFixedIEs.StatusCode = *(sFrame.pwStatus);
- pMgmt->sAssocInfo.AssocInfo.ResponseFixedIEs.AssociationId = *(sFrame.pwAid);
- pMgmt->sAssocInfo.AssocInfo.AvailableResponseFixedIEs |= 0x07;
-
- pMgmt->sAssocInfo.AssocInfo.ResponseIELength = sFrame.len - 24 - 6;
- pMgmt->sAssocInfo.AssocInfo.OffsetResponseIEs = pMgmt->sAssocInfo.AssocInfo.OffsetRequestIEs + pMgmt->sAssocInfo.AssocInfo.RequestIELength;
- pbyIEs = pMgmt->sAssocInfo.abyIEs;
- pbyIEs += pMgmt->sAssocInfo.AssocInfo.RequestIELength;
- memcpy(pbyIEs, (sFrame.pBuf + 24 + 6), pMgmt->sAssocInfo.AssocInfo.ResponseIELength);
-
- // save values and set current BSS state
- if (cpu_to_le16((*(sFrame.pwStatus))) == WLAN_MGMT_STATUS_SUCCESS) {
- // set AID
- pMgmt->wCurrAID = cpu_to_le16((*(sFrame.pwAid)));
- if ((pMgmt->wCurrAID >> 14) != (BIT0 | BIT1))
- pr_debug("AID from AP, has two msb clear\n");
-
- pr_info("Association Successful, AID=%d\n",
- pMgmt->wCurrAID & ~(BIT14 | BIT15));
- pMgmt->eCurrState = WMAC_STATE_ASSOC;
- BSSvUpdateAPNode((void *)pDevice, sFrame.pwCapInfo, sFrame.pSuppRates, sFrame.pExtSuppRates);
- pItemSSID = (PWLAN_IE_SSID)pMgmt->abyCurrSSID;
- pr_info("Link with AP(SSID): %s\n", pItemSSID->abySSID);
- pDevice->bLinkPass = true;
- pDevice->uBBVGADiffCount = 0;
- if ((pDevice->bWPADEVUp) && (pDevice->skb != NULL)) {
- if (skb_tailroom(pDevice->skb) < (sizeof(viawget_wpa_header) + pMgmt->sAssocInfo.AssocInfo.ResponseIELength +
- pMgmt->sAssocInfo.AssocInfo.RequestIELength)) { //data room not enough
- dev_kfree_skb(pDevice->skb);
- pDevice->skb = dev_alloc_skb((int)pDevice->rx_buf_sz);
- }
- wpahdr = (viawget_wpa_header *)pDevice->skb->data;
- wpahdr->type = VIAWGET_ASSOC_MSG;
- wpahdr->resp_ie_len = pMgmt->sAssocInfo.AssocInfo.ResponseIELength;
- wpahdr->req_ie_len = pMgmt->sAssocInfo.AssocInfo.RequestIELength;
- memcpy(pDevice->skb->data + sizeof(viawget_wpa_header), pMgmt->sAssocInfo.abyIEs, wpahdr->req_ie_len);
- memcpy(pDevice->skb->data + sizeof(viawget_wpa_header) + wpahdr->req_ie_len,
- pbyIEs,
- wpahdr->resp_ie_len
-);
- skb_put(pDevice->skb, sizeof(viawget_wpa_header) + wpahdr->resp_ie_len + wpahdr->req_ie_len);
- pDevice->skb->dev = pDevice->wpadev;
- skb_reset_mac_header(pDevice->skb);
- pDevice->skb->pkt_type = PACKET_HOST;
- pDevice->skb->protocol = htons(ETH_P_802_2);
- memset(pDevice->skb->cb, 0, sizeof(pDevice->skb->cb));
- netif_rx(pDevice->skb);
- pDevice->skb = dev_alloc_skb((int)pDevice->rx_buf_sz);
- }
-
-//2008-0409-07, <Add> by Einsn Liu
-#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
- {
- unsigned char buf[512];
- size_t len;
- union iwreq_data wrqu;
- int we_event;
-
- memset(buf, 0, 512);
-
- len = pMgmt->sAssocInfo.AssocInfo.RequestIELength;
- if (len) {
- memcpy(buf, pMgmt->sAssocInfo.abyIEs, len);
- memset(&wrqu, 0, sizeof(wrqu));
- wrqu.data.length = len;
- we_event = IWEVASSOCREQIE;
- wireless_send_event(pDevice->dev, we_event, &wrqu, buf);
- }
-
- memset(buf, 0, 512);
- len = pMgmt->sAssocInfo.AssocInfo.ResponseIELength;
-
- if (len) {
- memcpy(buf, pbyIEs, len);
- memset(&wrqu, 0, sizeof(wrqu));
- wrqu.data.length = len;
- we_event = IWEVASSOCRESPIE;
- wireless_send_event(pDevice->dev, we_event, &wrqu, buf);
- }
-
- memset(&wrqu, 0, sizeof(wrqu));
- memcpy(wrqu.ap_addr.sa_data, &pMgmt->abyCurrBSSID[0], ETH_ALEN);
- wrqu.ap_addr.sa_family = ARPHRD_ETHER;
- wireless_send_event(pDevice->dev, SIOCGIWAP, &wrqu, NULL);
- }
-#endif //#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
-//End Add -- //2008-0409-07, <Add> by Einsn Liu
- } else {
- if (bReAssocType) {
- pMgmt->eCurrState = WMAC_STATE_IDLE;
- } else {
- // jump back to the auth state and indicate the error
- pMgmt->eCurrState = WMAC_STATE_AUTH;
- }
- s_vMgrLogStatus(pMgmt, cpu_to_le16((*(sFrame.pwStatus))));
- }
-
- }
-
-#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
-//need clear flags related to Networkmanager
-
- pDevice->bwextcount = 0;
- pDevice->bWPASuppWextEnabled = false;
-#endif
-
- if (pMgmt->eCurrState == WMAC_STATE_ASSOC)
- timer_expire(pDevice->sTimerCommand, 0);
-}
-
-/*+
- *
- * Routine Description:
- * Start the station authentication procedure. Namely, send an
- * authentication frame to the AP.
- *
- * Return Value:
- * None.
- *
- -*/
-
-void
-vMgrAuthenBeginSta(
- void *hDeviceContext,
- PSMgmtObject pMgmt,
- PCMD_STATUS pStatus
-)
-{
- struct vnt_private *pDevice = hDeviceContext;
- WLAN_FR_AUTHEN sFrame;
- PSTxMgmtPacket pTxPacket = NULL;
-
- pTxPacket = (PSTxMgmtPacket)pMgmt->pbyMgmtPacketPool;
- memset(pTxPacket, 0, sizeof(STxMgmtPacket) + WLAN_AUTHEN_FR_MAXLEN);
- pTxPacket->p80211Header = (PUWLAN_80211HDR)((unsigned char *)pTxPacket + sizeof(STxMgmtPacket));
- sFrame.pBuf = (unsigned char *)pTxPacket->p80211Header;
- sFrame.len = WLAN_AUTHEN_FR_MAXLEN;
- vMgrEncodeAuthen(&sFrame);
- /* insert values */
- sFrame.pHdr->sA3.wFrameCtl = cpu_to_le16(
- (
- WLAN_SET_FC_FTYPE(WLAN_TYPE_MGR) |
- WLAN_SET_FC_FSTYPE(WLAN_FSTYPE_AUTHEN)
-));
- memcpy(sFrame.pHdr->sA3.abyAddr1, pMgmt->abyCurrBSSID, WLAN_ADDR_LEN);
- memcpy(sFrame.pHdr->sA3.abyAddr2, pMgmt->abyMACAddr, WLAN_ADDR_LEN);
- memcpy(sFrame.pHdr->sA3.abyAddr3, pMgmt->abyCurrBSSID, WLAN_BSSID_LEN);
- if (pMgmt->bShareKeyAlgorithm)
- *(sFrame.pwAuthAlgorithm) = cpu_to_le16(WLAN_AUTH_ALG_SHAREDKEY);
- else
- *(sFrame.pwAuthAlgorithm) = cpu_to_le16(WLAN_AUTH_ALG_OPENSYSTEM);
-
- *(sFrame.pwAuthSequence) = cpu_to_le16(1);
- /* Adjust the length fields */
- pTxPacket->cbMPDULen = sFrame.len;
- pTxPacket->cbPayloadLen = sFrame.len - WLAN_HDR_ADDR3_LEN;
-
- *pStatus = csMgmt_xmit(pDevice, pTxPacket);
- if (*pStatus == CMD_STATUS_PENDING) {
- pMgmt->eCurrState = WMAC_STATE_AUTHPENDING;
- *pStatus = CMD_STATUS_SUCCESS;
- }
-}
-
-/*+
- *
- * Routine Description:
- * Start the station(AP) deauthentication procedure. Namely, send an
- * deauthentication frame to the AP or Sta.
- *
- * Return Value:
- * None.
- *
- -*/
-
-void
-vMgrDeAuthenBeginSta(
- void *hDeviceContext,
- PSMgmtObject pMgmt,
- unsigned char *abyDestAddress,
- unsigned short wReason,
- PCMD_STATUS pStatus
-)
-{
- struct vnt_private *pDevice = hDeviceContext;
- WLAN_FR_DEAUTHEN sFrame;
- PSTxMgmtPacket pTxPacket = NULL;
-
- pTxPacket = (PSTxMgmtPacket)pMgmt->pbyMgmtPacketPool;
- memset(pTxPacket, 0, sizeof(STxMgmtPacket) + WLAN_DEAUTHEN_FR_MAXLEN);
- pTxPacket->p80211Header = (PUWLAN_80211HDR)((unsigned char *)pTxPacket + sizeof(STxMgmtPacket));
- sFrame.pBuf = (unsigned char *)pTxPacket->p80211Header;
- sFrame.len = WLAN_DEAUTHEN_FR_MAXLEN;
- vMgrEncodeDeauthen(&sFrame);
- /* insert values */
- sFrame.pHdr->sA3.wFrameCtl = cpu_to_le16(
- (
- WLAN_SET_FC_FTYPE(WLAN_TYPE_MGR) |
- WLAN_SET_FC_FSTYPE(WLAN_FSTYPE_DEAUTHEN)
-));
-
- memcpy(sFrame.pHdr->sA3.abyAddr1, abyDestAddress, WLAN_ADDR_LEN);
- memcpy(sFrame.pHdr->sA3.abyAddr2, pMgmt->abyMACAddr, WLAN_ADDR_LEN);
- memcpy(sFrame.pHdr->sA3.abyAddr3, pMgmt->abyCurrBSSID, WLAN_BSSID_LEN);
-
- *(sFrame.pwReason) = cpu_to_le16(wReason); // deauthen. bcs left BSS
- /* Adjust the length fields */
- pTxPacket->cbMPDULen = sFrame.len;
- pTxPacket->cbPayloadLen = sFrame.len - WLAN_HDR_ADDR3_LEN;
-
- *pStatus = csMgmt_xmit(pDevice, pTxPacket);
- if (*pStatus == CMD_STATUS_PENDING)
- *pStatus = CMD_STATUS_SUCCESS;
-}
-
-/*+
- *
- * Routine Description:
- * Handle incoming authentication frames.
- *
- * Return Value:
- * None.
- *
- -*/
-
-static
-void
-s_vMgrRxAuthentication(
- struct vnt_private *pDevice,
- PSMgmtObject pMgmt,
- PSRxMgmtPacket pRxPacket
-)
-{
- WLAN_FR_AUTHEN sFrame;
-
- // we better be an AP or a STA in AUTHPENDING otherwise ignore
- if (!(pMgmt->eCurrMode == WMAC_MODE_ESS_AP ||
- pMgmt->eCurrState == WMAC_STATE_AUTHPENDING)) {
- return;
- }
-
- // decode the frame
- sFrame.len = pRxPacket->cbMPDULen;
- sFrame.pBuf = (unsigned char *)pRxPacket->p80211Header;
- vMgrDecodeAuthen(&sFrame);
- switch (cpu_to_le16((*(sFrame.pwAuthSequence)))) {
- case 1:
- //AP function
- s_vMgrRxAuthenSequence_1(pDevice, pMgmt, &sFrame);
- break;
- case 2:
- s_vMgrRxAuthenSequence_2(pDevice, pMgmt, &sFrame);
- break;
- case 3:
- //AP function
- s_vMgrRxAuthenSequence_3(pDevice, pMgmt, &sFrame);
- break;
- case 4:
- s_vMgrRxAuthenSequence_4(pDevice, pMgmt, &sFrame);
- break;
- default:
- pr_debug("Auth Sequence error, seq = %d\n",
- cpu_to_le16((*(sFrame.pwAuthSequence))));
- break;
- }
-}
-
-/*+
- *
- * Routine Description:
- * Handles incoming authen frames with sequence 1. Currently
- * assumes we're an AP. So far, no one appears to use authentication
- * in Ad-Hoc mode.
- *
- * Return Value:
- * None.
- *
- -*/
-
-static
-void
-s_vMgrRxAuthenSequence_1(
- struct vnt_private *pDevice,
- PSMgmtObject pMgmt,
- PWLAN_FR_AUTHEN pFrame
-)
-{
- PSTxMgmtPacket pTxPacket = NULL;
- unsigned int uNodeIndex;
- WLAN_FR_AUTHEN sFrame;
- PSKeyItem pTransmitKey;
-
- // Insert a Node entry
- if (!BSSDBbIsSTAInNodeDB(pMgmt, pFrame->pHdr->sA3.abyAddr2, &uNodeIndex)) {
- BSSvCreateOneNode(pDevice, &uNodeIndex);
- memcpy(pMgmt->sNodeDBTable[uNodeIndex].abyMACAddr, pFrame->pHdr->sA3.abyAddr2,
- WLAN_ADDR_LEN);
- }
-
- if (pMgmt->bShareKeyAlgorithm) {
- pMgmt->sNodeDBTable[uNodeIndex].eNodeState = NODE_KNOWN;
- pMgmt->sNodeDBTable[uNodeIndex].byAuthSequence = 1;
- } else {
- pMgmt->sNodeDBTable[uNodeIndex].eNodeState = NODE_AUTH;
- }
-
- // send auth reply
- pTxPacket = (PSTxMgmtPacket)pMgmt->pbyMgmtPacketPool;
- memset(pTxPacket, 0, sizeof(STxMgmtPacket) + WLAN_AUTHEN_FR_MAXLEN);
- pTxPacket->p80211Header = (PUWLAN_80211HDR)((unsigned char *)pTxPacket + sizeof(STxMgmtPacket));
- sFrame.pBuf = (unsigned char *)pTxPacket->p80211Header;
- sFrame.len = WLAN_AUTHEN_FR_MAXLEN;
- // format buffer structure
- vMgrEncodeAuthen(&sFrame);
- // insert values
- sFrame.pHdr->sA3.wFrameCtl = cpu_to_le16(
- (
- WLAN_SET_FC_FTYPE(WLAN_TYPE_MGR) |
- WLAN_SET_FC_FSTYPE(WLAN_FSTYPE_AUTHEN)|
- WLAN_SET_FC_ISWEP(0)
-));
- memcpy(sFrame.pHdr->sA3.abyAddr1, pFrame->pHdr->sA3.abyAddr2, WLAN_ADDR_LEN);
- memcpy(sFrame.pHdr->sA3.abyAddr2, pMgmt->abyMACAddr, WLAN_ADDR_LEN);
- memcpy(sFrame.pHdr->sA3.abyAddr3, pMgmt->abyCurrBSSID, WLAN_BSSID_LEN);
- *(sFrame.pwAuthAlgorithm) = *(pFrame->pwAuthAlgorithm);
- *(sFrame.pwAuthSequence) = cpu_to_le16(2);
-
- if (cpu_to_le16(*(pFrame->pwAuthAlgorithm)) == WLAN_AUTH_ALG_SHAREDKEY) {
- if (pMgmt->bShareKeyAlgorithm)
- *(sFrame.pwStatus) = cpu_to_le16(WLAN_MGMT_STATUS_SUCCESS);
- else
- *(sFrame.pwStatus) = cpu_to_le16(WLAN_MGMT_STATUS_UNSUPPORTED_AUTHALG);
- } else {
- if (pMgmt->bShareKeyAlgorithm)
- *(sFrame.pwStatus) = cpu_to_le16(WLAN_MGMT_STATUS_UNSUPPORTED_AUTHALG);
- else
- *(sFrame.pwStatus) = cpu_to_le16(WLAN_MGMT_STATUS_SUCCESS);
- }
-
- if (pMgmt->bShareKeyAlgorithm &&
- (cpu_to_le16(*(sFrame.pwStatus)) == WLAN_MGMT_STATUS_SUCCESS)) {
- sFrame.pChallenge = (PWLAN_IE_CHALLENGE)(sFrame.pBuf + sFrame.len);
- sFrame.len += WLAN_CHALLENGE_IE_LEN;
- sFrame.pChallenge->byElementID = WLAN_EID_CHALLENGE;
- sFrame.pChallenge->len = WLAN_CHALLENGE_LEN;
- memset(pMgmt->abyChallenge, 0, WLAN_CHALLENGE_LEN);
- // get group key
- if (KeybGetTransmitKey(&(pDevice->sKey), pDevice->abyBroadcastAddr, GROUP_KEY, &pTransmitKey) == true) {
- rc4_init(&pDevice->SBox, pDevice->abyPRNG, pTransmitKey->uKeyLength+3);
- rc4_encrypt(&pDevice->SBox, pMgmt->abyChallenge, pMgmt->abyChallenge, WLAN_CHALLENGE_LEN);
- }
- memcpy(sFrame.pChallenge->abyChallenge, pMgmt->abyChallenge , WLAN_CHALLENGE_LEN);
- }
-
- /* Adjust the length fields */
- pTxPacket->cbMPDULen = sFrame.len;
- pTxPacket->cbPayloadLen = sFrame.len - WLAN_HDR_ADDR3_LEN;
- // send the frame
- if (pDevice->bEnableHostapd)
- return;
-
- pr_debug("Mgt:Authreq_reply sequence_1 tx..\n");
- if (csMgmt_xmit(pDevice, pTxPacket) != CMD_STATUS_PENDING)
- pr_debug("Mgt:Authreq_reply sequence_1 tx failed\n");
-}
-
-/*+
- *
- * Routine Description:
- * Handles incoming auth frames with sequence number 2. Currently
- * assumes we're a station.
- *
- *
- * Return Value:
- * None.
- *
- -*/
-
-static
-void
-s_vMgrRxAuthenSequence_2(
- struct vnt_private *pDevice,
- PSMgmtObject pMgmt,
- PWLAN_FR_AUTHEN pFrame
-)
-{
- WLAN_FR_AUTHEN sFrame;
- PSTxMgmtPacket pTxPacket = NULL;
-
- switch (cpu_to_le16((*(pFrame->pwAuthAlgorithm)))) {
- case WLAN_AUTH_ALG_OPENSYSTEM:
- if (cpu_to_le16((*(pFrame->pwStatus))) == WLAN_MGMT_STATUS_SUCCESS) {
- pr_info("802.11 Authen (OPEN) Successful\n");
- pMgmt->eCurrState = WMAC_STATE_AUTH;
- timer_expire(pDevice->sTimerCommand, 0);
- } else {
- pr_info("802.11 Authen (OPEN) Failed\n");
- s_vMgrLogStatus(pMgmt, cpu_to_le16((*(pFrame->pwStatus))));
- pMgmt->eCurrState = WMAC_STATE_IDLE;
- }
-
- break;
-
- case WLAN_AUTH_ALG_SHAREDKEY:
-
- if (cpu_to_le16((*(pFrame->pwStatus))) == WLAN_MGMT_STATUS_SUCCESS) {
- pTxPacket = (PSTxMgmtPacket)pMgmt->pbyMgmtPacketPool;
- memset(pTxPacket, 0, sizeof(STxMgmtPacket) + WLAN_AUTHEN_FR_MAXLEN);
- pTxPacket->p80211Header = (PUWLAN_80211HDR)((unsigned char *)pTxPacket + sizeof(STxMgmtPacket));
- sFrame.pBuf = (unsigned char *)pTxPacket->p80211Header;
- sFrame.len = WLAN_AUTHEN_FR_MAXLEN;
- // format buffer structure
- vMgrEncodeAuthen(&sFrame);
- // insert values
- sFrame.pHdr->sA3.wFrameCtl = cpu_to_le16(
- (
- WLAN_SET_FC_FTYPE(WLAN_TYPE_MGR) |
- WLAN_SET_FC_FSTYPE(WLAN_FSTYPE_AUTHEN)|
- WLAN_SET_FC_ISWEP(1)
-));
- memcpy(sFrame.pHdr->sA3.abyAddr1, pMgmt->abyCurrBSSID, WLAN_BSSID_LEN);
- memcpy(sFrame.pHdr->sA3.abyAddr2, pMgmt->abyMACAddr, WLAN_ADDR_LEN);
- memcpy(sFrame.pHdr->sA3.abyAddr3, pMgmt->abyCurrBSSID, WLAN_BSSID_LEN);
- *(sFrame.pwAuthAlgorithm) = *(pFrame->pwAuthAlgorithm);
- *(sFrame.pwAuthSequence) = cpu_to_le16(3);
- *(sFrame.pwStatus) = cpu_to_le16(WLAN_MGMT_STATUS_SUCCESS);
- sFrame.pChallenge = (PWLAN_IE_CHALLENGE)(sFrame.pBuf + sFrame.len);
- sFrame.len += WLAN_CHALLENGE_IE_LEN;
- sFrame.pChallenge->byElementID = WLAN_EID_CHALLENGE;
- sFrame.pChallenge->len = WLAN_CHALLENGE_LEN;
- memcpy(sFrame.pChallenge->abyChallenge, pFrame->pChallenge->abyChallenge, WLAN_CHALLENGE_LEN);
- // Adjust the length fields
- pTxPacket->cbMPDULen = sFrame.len;
- pTxPacket->cbPayloadLen = sFrame.len - WLAN_HDR_ADDR3_LEN;
- // send the frame
- if (csMgmt_xmit(pDevice, pTxPacket) != CMD_STATUS_PENDING)
- pr_debug("Mgt:Auth_reply sequence_2 tx failed\n");
-
- pr_debug("Mgt:Auth_reply sequence_2 tx ...\n");
- } else {
- pr_debug("Mgt:rx Auth_reply sequence_2 status error ...\n");
- s_vMgrLogStatus(pMgmt, cpu_to_le16((*(pFrame->pwStatus))));
- }
- break;
- default:
- pr_debug("Mgt: rx auth.seq = 2 unknown AuthAlgorithm=%d\n",
- cpu_to_le16((*(pFrame->pwAuthAlgorithm))));
- break;
- }
-}
-
-/*+
- *
- * Routine Description:
- * Handles incoming authen frames with sequence 3. Currently
- * assumes we're an AP. This function assumes the frame has
- * already been successfully decrypted.
- *
- *
- * Return Value:
- * None.
- *
- -*/
-
-static
-void
-s_vMgrRxAuthenSequence_3(
- struct vnt_private *pDevice,
- PSMgmtObject pMgmt,
- PWLAN_FR_AUTHEN pFrame
-)
-{
- PSTxMgmtPacket pTxPacket = NULL;
- unsigned int uStatusCode = 0;
- unsigned int uNodeIndex = 0;
- WLAN_FR_AUTHEN sFrame;
-
- if (!WLAN_GET_FC_ISWEP(pFrame->pHdr->sA3.wFrameCtl)) {
- uStatusCode = WLAN_MGMT_STATUS_CHALLENGE_FAIL;
- goto reply;
- }
- if (BSSDBbIsSTAInNodeDB(pMgmt, pFrame->pHdr->sA3.abyAddr2, &uNodeIndex)) {
- if (pMgmt->sNodeDBTable[uNodeIndex].byAuthSequence != 1) {
- uStatusCode = WLAN_MGMT_STATUS_RX_AUTH_NOSEQ;
- goto reply;
- }
- if (memcmp(pMgmt->abyChallenge, pFrame->pChallenge->abyChallenge, WLAN_CHALLENGE_LEN) != 0) {
- uStatusCode = WLAN_MGMT_STATUS_CHALLENGE_FAIL;
- goto reply;
- }
- } else {
- uStatusCode = WLAN_MGMT_STATUS_UNSPEC_FAILURE;
- goto reply;
- }
-
- if (uNodeIndex) {
- pMgmt->sNodeDBTable[uNodeIndex].eNodeState = NODE_AUTH;
- pMgmt->sNodeDBTable[uNodeIndex].byAuthSequence = 0;
- }
- uStatusCode = WLAN_MGMT_STATUS_SUCCESS;
- pr_debug("Challenge text check ok..\n");
-
-reply:
- // send auth reply
- pTxPacket = (PSTxMgmtPacket)pMgmt->pbyMgmtPacketPool;
- memset(pTxPacket, 0, sizeof(STxMgmtPacket) + WLAN_AUTHEN_FR_MAXLEN);
- pTxPacket->p80211Header = (PUWLAN_80211HDR)((unsigned char *)pTxPacket + sizeof(STxMgmtPacket));
- sFrame.pBuf = (unsigned char *)pTxPacket->p80211Header;
- sFrame.len = WLAN_AUTHEN_FR_MAXLEN;
- // format buffer structure
- vMgrEncodeAuthen(&sFrame);
- /* insert values */
- sFrame.pHdr->sA3.wFrameCtl = cpu_to_le16(
- (
- WLAN_SET_FC_FTYPE(WLAN_TYPE_MGR) |
- WLAN_SET_FC_FSTYPE(WLAN_FSTYPE_AUTHEN)|
- WLAN_SET_FC_ISWEP(0)
-));
- memcpy(sFrame.pHdr->sA3.abyAddr1, pFrame->pHdr->sA3.abyAddr2, WLAN_ADDR_LEN);
- memcpy(sFrame.pHdr->sA3.abyAddr2, pMgmt->abyMACAddr, WLAN_ADDR_LEN);
- memcpy(sFrame.pHdr->sA3.abyAddr3, pMgmt->abyCurrBSSID, WLAN_BSSID_LEN);
- *(sFrame.pwAuthAlgorithm) = *(pFrame->pwAuthAlgorithm);
- *(sFrame.pwAuthSequence) = cpu_to_le16(4);
- *(sFrame.pwStatus) = cpu_to_le16(uStatusCode);
-
- /* Adjust the length fields */
- pTxPacket->cbMPDULen = sFrame.len;
- pTxPacket->cbPayloadLen = sFrame.len - WLAN_HDR_ADDR3_LEN;
- // send the frame
- if (pDevice->bEnableHostapd)
- return;
-
- if (csMgmt_xmit(pDevice, pTxPacket) != CMD_STATUS_PENDING)
- pr_debug("Mgt:Authreq_reply sequence_4 tx failed\n");
-}
-
-/*+
- *
- * Routine Description:
- * Handles incoming authen frames with sequence 4
- *
- *
- * Return Value:
- * None.
- *
- -*/
-static
-void
-s_vMgrRxAuthenSequence_4(
- struct vnt_private *pDevice,
- PSMgmtObject pMgmt,
- PWLAN_FR_AUTHEN pFrame
-)
-{
- if (cpu_to_le16((*(pFrame->pwStatus))) == WLAN_MGMT_STATUS_SUCCESS) {
- pr_info("802.11 Authen (SHAREDKEY) Successful\n");
- pMgmt->eCurrState = WMAC_STATE_AUTH;
- timer_expire(pDevice->sTimerCommand, 0);
- } else{
- pr_info("802.11 Authen (SHAREDKEY) Failed\n");
- s_vMgrLogStatus(pMgmt, cpu_to_le16((*(pFrame->pwStatus))));
- pMgmt->eCurrState = WMAC_STATE_IDLE;
- }
-}
-
-/*+
- *
- * Routine Description:
- * Handles incoming disassociation frames
- *
- *
- * Return Value:
- * None.
- *
- -*/
-
-static
-void
-s_vMgrRxDisassociation(
- struct vnt_private *pDevice,
- PSMgmtObject pMgmt,
- PSRxMgmtPacket pRxPacket
-)
-{
- WLAN_FR_DISASSOC sFrame;
- unsigned int uNodeIndex = 0;
- viawget_wpa_header *wpahdr;
-
- if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) {
- // if is acting an AP..
- // a STA is leaving this BSS..
- sFrame.len = pRxPacket->cbMPDULen;
- sFrame.pBuf = (unsigned char *)pRxPacket->p80211Header;
- if (BSSDBbIsSTAInNodeDB(pMgmt, pRxPacket->p80211Header->sA3.abyAddr2, &uNodeIndex))
- BSSvRemoveOneNode(pDevice, uNodeIndex);
- else
- pr_debug("Rx disassoc, sta not found\n");
-
- } else if (pMgmt->eCurrMode == WMAC_MODE_ESS_STA) {
- sFrame.len = pRxPacket->cbMPDULen;
- sFrame.pBuf = (unsigned char *)pRxPacket->p80211Header;
- vMgrDecodeDisassociation(&sFrame);
- pr_info("AP disassociated me, reason=%d\n",
- cpu_to_le16(*(sFrame.pwReason)));
- //TODO: do something let upper layer know or
- //try to send associate packet again because of inactivity timeout
- if ((pDevice->bWPADEVUp) && (pDevice->skb != NULL)) {
- wpahdr = (viawget_wpa_header *)pDevice->skb->data;
- wpahdr->type = VIAWGET_DISASSOC_MSG;
- wpahdr->resp_ie_len = 0;
- wpahdr->req_ie_len = 0;
- skb_put(pDevice->skb, sizeof(viawget_wpa_header));
- pDevice->skb->dev = pDevice->wpadev;
- skb_reset_mac_header(pDevice->skb);
-
- pDevice->skb->pkt_type = PACKET_HOST;
- pDevice->skb->protocol = htons(ETH_P_802_2);
- memset(pDevice->skb->cb, 0, sizeof(pDevice->skb->cb));
- netif_rx(pDevice->skb);
- pDevice->skb = dev_alloc_skb((int)pDevice->rx_buf_sz);
- }
-
-#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
- {
- union iwreq_data wrqu;
-
- memset(&wrqu, 0, sizeof(wrqu));
- wrqu.ap_addr.sa_family = ARPHRD_ETHER;
- pr_debug("wireless_send_event--->SIOCGIWAP(disassociated)\n");
- wireless_send_event(pDevice->dev, SIOCGIWAP, &wrqu, NULL);
- }
-#endif
- }
- /* else, ignore it */
-}
-
-/*+
- *
- * Routine Description:
- * Handles incoming deauthentication frames
- *
- *
- * Return Value:
- * None.
- *
- -*/
-
-static
-void
-s_vMgrRxDeauthentication(
- struct vnt_private *pDevice,
- PSMgmtObject pMgmt,
- PSRxMgmtPacket pRxPacket
-)
-{
- WLAN_FR_DEAUTHEN sFrame;
- unsigned int uNodeIndex = 0;
- viawget_wpa_header *wpahdr;
-
- if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) {
- //Todo:
- // if is acting an AP..
- // a STA is leaving this BSS..
- sFrame.len = pRxPacket->cbMPDULen;
- sFrame.pBuf = (unsigned char *)pRxPacket->p80211Header;
- if (BSSDBbIsSTAInNodeDB(pMgmt, pRxPacket->p80211Header->sA3.abyAddr2, &uNodeIndex))
- BSSvRemoveOneNode(pDevice, uNodeIndex);
- else
- pr_info("Rx deauth, sta not found\n");
- } else {
- if (pMgmt->eCurrMode == WMAC_MODE_ESS_STA) {
- sFrame.len = pRxPacket->cbMPDULen;
- sFrame.pBuf = (unsigned char *)pRxPacket->p80211Header;
- vMgrDecodeDeauthen(&sFrame);
- pr_info("AP deauthed me, reason=%d\n",
- cpu_to_le16((*(sFrame.pwReason))));
- // TODO: update BSS list for specific BSSID if pre-authentication case
- if (ether_addr_equal(sFrame.pHdr->sA3.abyAddr3,
- pMgmt->abyCurrBSSID)) {
- if (pMgmt->eCurrState >= WMAC_STATE_AUTHPENDING) {
- pMgmt->sNodeDBTable[0].bActive = false;
- pMgmt->eCurrMode = WMAC_MODE_STANDBY;
- pMgmt->eCurrState = WMAC_STATE_IDLE;
- netif_stop_queue(pDevice->dev);
- pDevice->bLinkPass = false;
- }
- }
-
- if ((pDevice->bWPADEVUp) && (pDevice->skb != NULL)) {
- wpahdr = (viawget_wpa_header *)pDevice->skb->data;
- wpahdr->type = VIAWGET_DISASSOC_MSG;
- wpahdr->resp_ie_len = 0;
- wpahdr->req_ie_len = 0;
- skb_put(pDevice->skb, sizeof(viawget_wpa_header));
- pDevice->skb->dev = pDevice->wpadev;
- skb_reset_mac_header(pDevice->skb);
- pDevice->skb->pkt_type = PACKET_HOST;
- pDevice->skb->protocol = htons(ETH_P_802_2);
- memset(pDevice->skb->cb, 0, sizeof(pDevice->skb->cb));
- netif_rx(pDevice->skb);
- pDevice->skb = dev_alloc_skb((int)pDevice->rx_buf_sz);
- }
-
-#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
- {
- union iwreq_data wrqu;
-
- memset(&wrqu, 0, sizeof(wrqu));
- wrqu.ap_addr.sa_family = ARPHRD_ETHER;
- PRINT_K("wireless_send_event--->SIOCGIWAP(disauthen)\n");
- wireless_send_event(pDevice->dev, SIOCGIWAP, &wrqu, NULL);
- }
-#endif
-
- }
- /* else, ignore it. TODO: IBSS authentication service
- would be implemented here */
- }
-}
-
-//2008-8-4 <add> by chester
-/*+
- *
- * Routine Description:
- * check if current channel is match ZoneType.
- *for USA:1~11;
- * Japan:1~13;
- * Europe:1~13
- * Return Value:
- * True:exceed;
- * False:normal case
- -*/
-static bool
-ChannelExceedZoneType(
- struct vnt_private *pDevice,
- unsigned char byCurrChannel
-)
-{
- bool exceed = false;
-
- switch (pDevice->byZoneType) {
- case 0x00: //USA:1~11
- if ((byCurrChannel < 1) || (byCurrChannel > 11))
- exceed = true;
- break;
- case 0x01: //Japan:1~13
- case 0x02: //Europe:1~13
- if ((byCurrChannel < 1) || (byCurrChannel > 13))
- exceed = true;
- break;
- default: //reserve for other zonetype
- break;
- }
-
- return exceed;
-}
-
-/*+
- *
- * Routine Description:
- * Handles and analysis incoming beacon frames.
- *
- *
- * Return Value:
- * None.
- *
- -*/
-
-static
-void
-s_vMgrRxBeacon(
- struct vnt_private *pDevice,
- PSMgmtObject pMgmt,
- PSRxMgmtPacket pRxPacket,
- bool bInScan
-)
-{
- PKnownBSS pBSSList;
- WLAN_FR_BEACON sFrame;
- u64 qwTSFOffset;
- bool bIsBSSIDEqual = false;
- bool bIsSSIDEqual = false;
- bool bTSFLargeDiff = false;
- bool bTSFOffsetPostive = false;
- bool bUpdateTSF = false;
- bool bIsAPBeacon = false;
- bool bIsChannelEqual = false;
- unsigned int uLocateByteIndex;
- unsigned char byTIMBitOn = 0;
- unsigned short wAIDNumber = 0;
- unsigned int uNodeIndex;
- u64 qwTimestamp, qwLocalTSF;
- u64 qwCurrTSF;
- unsigned short wStartIndex = 0;
- unsigned short wAIDIndex = 0;
- unsigned char byCurrChannel = pRxPacket->byRxChannel;
- ERPObject sERP;
- unsigned int uRateLen = WLAN_RATES_MAXLEN;
- bool bChannelHit = false;
- bool bUpdatePhyParameter = false;
- unsigned char byIEChannel = 0;
-
- memset(&sFrame, 0, sizeof(WLAN_FR_BEACON));
- sFrame.len = pRxPacket->cbMPDULen;
- sFrame.pBuf = (unsigned char *)pRxPacket->p80211Header;
-
- // decode the beacon frame
- vMgrDecodeBeacon(&sFrame);
-
- if ((sFrame.pwBeaconInterval == NULL) ||
- (sFrame.pwCapInfo == NULL) ||
- (sFrame.pSSID == NULL) ||
- (sFrame.pSuppRates == NULL)) {
- pr_debug("Rx beacon frame error\n");
- return;
- }
-
- if (sFrame.pDSParms != NULL) {
- if (byCurrChannel > CB_MAX_CHANNEL_24G) {
- // channel remapping to
- byIEChannel = get_channel_mapping(pDevice, sFrame.pDSParms->byCurrChannel, PHY_TYPE_11A);
- } else {
- byIEChannel = sFrame.pDSParms->byCurrChannel;
- }
- if (byCurrChannel != byIEChannel) {
- // adjust channel info. bcs we rcv adjacent channel packets
- bChannelHit = false;
- byCurrChannel = byIEChannel;
- }
- } else {
- // no DS channel info
- bChannelHit = true;
- }
-//2008-0730-01<Add>by MikeLiu
- if (ChannelExceedZoneType(pDevice, byCurrChannel))
- return;
-
- if (sFrame.pERP != NULL) {
- sERP.byERP = sFrame.pERP->byContext;
- sERP.bERPExist = true;
-
- } else {
- sERP.bERPExist = false;
- sERP.byERP = 0;
- }
-
- pBSSList = BSSpAddrIsInBSSList((void *)pDevice, sFrame.pHdr->sA3.abyAddr3, sFrame.pSSID);
- if (pBSSList == NULL) {
- pr_debug("Beacon/insert: RxChannel = : %d\n", byCurrChannel);
- BSSbInsertToBSSList((void *)pDevice,
- sFrame.pHdr->sA3.abyAddr3,
- *sFrame.pqwTimestamp,
- *sFrame.pwBeaconInterval,
- *sFrame.pwCapInfo,
- byCurrChannel,
- sFrame.pSSID,
- sFrame.pSuppRates,
- sFrame.pExtSuppRates,
- &sERP,
- sFrame.pRSN,
- sFrame.pRSNWPA,
- sFrame.pIE_Country,
- sFrame.pIE_Quiet,
- sFrame.len - WLAN_HDR_ADDR3_LEN,
- sFrame.pHdr->sA4.abyAddr4, // payload of beacon
- (void *)pRxPacket
-);
- } else {
- BSSbUpdateToBSSList((void *)pDevice,
- *sFrame.pqwTimestamp,
- *sFrame.pwBeaconInterval,
- *sFrame.pwCapInfo,
- byCurrChannel,
- bChannelHit,
- sFrame.pSSID,
- sFrame.pSuppRates,
- sFrame.pExtSuppRates,
- &sERP,
- sFrame.pRSN,
- sFrame.pRSNWPA,
- sFrame.pIE_Country,
- sFrame.pIE_Quiet,
- pBSSList,
- sFrame.len - WLAN_HDR_ADDR3_LEN,
- sFrame.pHdr->sA4.abyAddr4, // payload of probresponse
- (void *)pRxPacket
-);
-
- }
-
- if (bInScan)
- return;
-
- if (byCurrChannel == (unsigned char)pMgmt->uCurrChannel)
- bIsChannelEqual = true;
-
- if (bIsChannelEqual && (pMgmt->eCurrMode == WMAC_MODE_ESS_AP)) {
- // if rx beacon without ERP field
- if (sERP.bERPExist) {
- if (WLAN_GET_ERP_USE_PROTECTION(sERP.byERP)) {
- pDevice->byERPFlag |= WLAN_SET_ERP_USE_PROTECTION(1);
- pDevice->wUseProtectCntDown = USE_PROTECT_PERIOD;
- }
- } else {
- pDevice->byERPFlag |= WLAN_SET_ERP_USE_PROTECTION(1);
- pDevice->wUseProtectCntDown = USE_PROTECT_PERIOD;
- }
-
- if (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) {
- if (!WLAN_GET_CAP_INFO_SHORTPREAMBLE(*sFrame.pwCapInfo))
- pDevice->byERPFlag |= WLAN_SET_ERP_BARKER_MODE(1);
- if (!sERP.bERPExist)
- pDevice->byERPFlag |= WLAN_SET_ERP_NONERP_PRESENT(1);
- }
-
- // set to MAC&BBP
- if (WLAN_GET_ERP_USE_PROTECTION(pDevice->byERPFlag)) {
- if (!pDevice->bProtectMode) {
- MACvEnableProtectMD(pDevice->PortOffset);
- pDevice->bProtectMode = true;
- }
- }
- }
-
- if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP)
- return;
-
- // check if BSSID the same
- if (memcmp(sFrame.pHdr->sA3.abyAddr3,
- pMgmt->abyCurrBSSID,
- WLAN_BSSID_LEN) == 0) {
- bIsBSSIDEqual = true;
-
-// 2008-05-21 <add> by Richardtai
- pDevice->uCurrRSSI = pRxPacket->uRSSI;
- pDevice->byCurrSQ = pRxPacket->bySQ;
-
- if (pMgmt->sNodeDBTable[0].uInActiveCount != 0)
- pMgmt->sNodeDBTable[0].uInActiveCount = 0;
- }
- // check if SSID the same
- if (sFrame.pSSID->len == ((PWLAN_IE_SSID)pMgmt->abyCurrSSID)->len) {
- if (memcmp(sFrame.pSSID->abySSID,
- ((PWLAN_IE_SSID)pMgmt->abyCurrSSID)->abySSID,
- sFrame.pSSID->len
-) == 0) {
- bIsSSIDEqual = true;
- }
- }
-
- if (WLAN_GET_CAP_INFO_ESS(*sFrame.pwCapInfo) &&
- bIsBSSIDEqual &&
- bIsSSIDEqual &&
- (pMgmt->eCurrMode == WMAC_MODE_ESS_STA) &&
- (pMgmt->eCurrState == WMAC_STATE_ASSOC)) {
- // add state check to prevent reconnect fail since we'll receive Beacon
-
- bIsAPBeacon = true;
-
- if (pBSSList != NULL) {
- // Compare PHY parameter setting
- if (pMgmt->wCurrCapInfo != pBSSList->wCapInfo) {
- bUpdatePhyParameter = true;
- pMgmt->wCurrCapInfo = pBSSList->wCapInfo;
- }
- if (sFrame.pERP != NULL) {
- if ((sFrame.pERP->byElementID == WLAN_EID_ERP) &&
- (pMgmt->byERPContext != sFrame.pERP->byContext)) {
- bUpdatePhyParameter = true;
- pMgmt->byERPContext = sFrame.pERP->byContext;
- }
- }
- //
- // Basic Rate Set may change dynamically
- //
- if (pBSSList->eNetworkTypeInUse == PHY_TYPE_11B)
- uRateLen = WLAN_RATES_MAXLEN_11B;
-
- pMgmt->abyCurrSuppRates[1] = RATEuSetIE((PWLAN_IE_SUPP_RATES)pBSSList->abySuppRates,
- (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
- uRateLen);
- pMgmt->abyCurrExtSuppRates[1] = RATEuSetIE((PWLAN_IE_SUPP_RATES)pBSSList->abyExtSuppRates,
- (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates,
- uRateLen);
- RATEvParseMaxRate((void *)pDevice,
- (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
- (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates,
- true,
- &(pMgmt->sNodeDBTable[0].wMaxBasicRate),
- &(pMgmt->sNodeDBTable[0].wMaxSuppRate),
- &(pMgmt->sNodeDBTable[0].wSuppRate),
- &(pMgmt->sNodeDBTable[0].byTopCCKBasicRate),
- &(pMgmt->sNodeDBTable[0].byTopOFDMBasicRate)
- );
- if (bUpdatePhyParameter) {
- CARDbSetPhyParameter(pMgmt->pAdapter,
- pMgmt->eCurrentPHYMode,
- pMgmt->wCurrCapInfo,
- pMgmt->byERPContext,
- pMgmt->abyCurrSuppRates,
- pMgmt->abyCurrExtSuppRates
- );
- }
- if (sFrame.pIE_PowerConstraint != NULL) {
- CARDvSetPowerConstraint(pMgmt->pAdapter,
- (unsigned char) pBSSList->uChannel,
- sFrame.pIE_PowerConstraint->byPower
-);
- }
- if (sFrame.pIE_CHSW != NULL) {
- CARDbChannelSwitch(pMgmt->pAdapter,
- sFrame.pIE_CHSW->byMode,
- get_channel_mapping(pMgmt->pAdapter, sFrame.pIE_CHSW->byMode, pMgmt->eCurrentPHYMode),
- sFrame.pIE_CHSW->byCount
- );
-
- } else if (!bIsChannelEqual) {
- set_channel(pMgmt->pAdapter, pBSSList->uChannel);
- }
- }
- }
-
-// pr_debug("Beacon 2\n");
- // check if CF field exists
- if (WLAN_GET_CAP_INFO_ESS(*sFrame.pwCapInfo)) {
- if (sFrame.pCFParms->wCFPDurRemaining > 0) {
- // TODO: deal with CFP period to set NAV
- }
- }
-
- qwTimestamp = le64_to_cpu(*sFrame.pqwTimestamp);
- qwLocalTSF = pRxPacket->qwLocalTSF;
-
- // check if beacon TSF larger or small than our local TSF
- if (qwTimestamp >= qwLocalTSF)
- bTSFOffsetPostive = true;
- else
- bTSFOffsetPostive = false;
-
- if (bTSFOffsetPostive)
- qwTSFOffset = CARDqGetTSFOffset(pRxPacket->byRxRate, (qwTimestamp), (qwLocalTSF));
- else
- qwTSFOffset = CARDqGetTSFOffset(pRxPacket->byRxRate, (qwLocalTSF), (qwTimestamp));
-
- if (qwTSFOffset > TRIVIAL_SYNC_DIFFERENCE)
- bTSFLargeDiff = true;
-
- // if infra mode
- if (bIsAPBeacon) {
- // Infra mode: Local TSF always follow AP's TSF if Difference huge.
- if (bTSFLargeDiff)
- bUpdateTSF = true;
-
- if (pDevice->bEnablePSMode && (sFrame.pTIM != NULL)) {
- // deal with DTIM, analysis TIM
- pMgmt->bMulticastTIM = WLAN_MGMT_IS_MULTICAST_TIM(sFrame.pTIM->byBitMapCtl) ? true : false;
- pMgmt->byDTIMCount = sFrame.pTIM->byDTIMCount;
- pMgmt->byDTIMPeriod = sFrame.pTIM->byDTIMPeriod;
- wAIDNumber = pMgmt->wCurrAID & ~(BIT14|BIT15);
-
- // check if AID in TIM field bit on
- // wStartIndex = N1
- wStartIndex = WLAN_MGMT_GET_TIM_OFFSET(sFrame.pTIM->byBitMapCtl) << 1;
- // AIDIndex = N2
- wAIDIndex = (wAIDNumber >> 3);
- if ((wAIDNumber > 0) && (wAIDIndex >= wStartIndex)) {
- uLocateByteIndex = wAIDIndex - wStartIndex;
- // len = byDTIMCount + byDTIMPeriod + byDTIMPeriod + byVirtBitMap[0~250]
- if (sFrame.pTIM->len >= (uLocateByteIndex + 4)) {
- byTIMBitOn = (0x01) << ((wAIDNumber) % 8);
- pMgmt->bInTIM = sFrame.pTIM->byVirtBitMap[uLocateByteIndex] & byTIMBitOn ? true : false;
- } else {
- pMgmt->bInTIM = false;
- }
- } else {
- pMgmt->bInTIM = false;
- }
-
- if (pMgmt->bInTIM ||
- (pMgmt->bMulticastTIM && (pMgmt->byDTIMCount == 0))) {
- pMgmt->bInTIMWake = true;
- // send out ps-poll packet
-
- if (pMgmt->bInTIM)
- PSvSendPSPOLL(pDevice);
-
- } else {
- pMgmt->bInTIMWake = false;
- pr_debug("BCN: Not In TIM..\n");
- if (!pDevice->bPWBitOn) {
- pr_debug("BCN: Send Null Packet\n");
- if (PSbSendNullPacket(pDevice))
- pDevice->bPWBitOn = true;
- }
- if (PSbConsiderPowerDown(pDevice, false, false))
- pr_debug("BCN: Power down now...\n");
- }
-
- }
-
- }
- // if adhoc mode
- if ((pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) && !bIsAPBeacon && bIsChannelEqual) {
- if (bIsBSSIDEqual) {
- // Use sNodeDBTable[0].uInActiveCount as IBSS beacons received count.
- if (pMgmt->sNodeDBTable[0].uInActiveCount != 0)
- pMgmt->sNodeDBTable[0].uInActiveCount = 0;
-
- // adhoc mode:TSF updated only when beacon larger than local TSF
- if (bTSFLargeDiff && bTSFOffsetPostive &&
- (pMgmt->eCurrState == WMAC_STATE_JOINTED))
- bUpdateTSF = true;
-
- // During dpc, already in spinlocked.
- if (BSSDBbIsSTAInNodeDB(pMgmt, sFrame.pHdr->sA3.abyAddr2, &uNodeIndex)) {
- // Update the STA, (Technically the Beacons of all the IBSS nodes
- // should be identical, but that's not happening in practice.
- pMgmt->abyCurrSuppRates[1] = RATEuSetIE((PWLAN_IE_SUPP_RATES)sFrame.pSuppRates,
- (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
- WLAN_RATES_MAXLEN_11B);
- RATEvParseMaxRate((void *)pDevice,
- (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
- NULL,
- true,
- &(pMgmt->sNodeDBTable[uNodeIndex].wMaxBasicRate),
- &(pMgmt->sNodeDBTable[uNodeIndex].wMaxSuppRate),
- &(pMgmt->sNodeDBTable[uNodeIndex].wSuppRate),
- &(pMgmt->sNodeDBTable[uNodeIndex].byTopCCKBasicRate),
- &(pMgmt->sNodeDBTable[uNodeIndex].byTopOFDMBasicRate)
- );
- pMgmt->sNodeDBTable[uNodeIndex].bShortPreamble = WLAN_GET_CAP_INFO_SHORTPREAMBLE(*sFrame.pwCapInfo);
- pMgmt->sNodeDBTable[uNodeIndex].bShortSlotTime = WLAN_GET_CAP_INFO_SHORTSLOTTIME(*sFrame.pwCapInfo);
- pMgmt->sNodeDBTable[uNodeIndex].uInActiveCount = 0;
- } else {
- // Todo, initial Node content
- BSSvCreateOneNode(pDevice, &uNodeIndex);
-
- pMgmt->abyCurrSuppRates[1] = RATEuSetIE((PWLAN_IE_SUPP_RATES)sFrame.pSuppRates,
- (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
- WLAN_RATES_MAXLEN_11B);
- RATEvParseMaxRate((void *)pDevice,
- (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
- NULL,
- true,
- &(pMgmt->sNodeDBTable[uNodeIndex].wMaxBasicRate),
- &(pMgmt->sNodeDBTable[uNodeIndex].wMaxSuppRate),
- &(pMgmt->sNodeDBTable[uNodeIndex].wSuppRate),
- &(pMgmt->sNodeDBTable[uNodeIndex].byTopCCKBasicRate),
- &(pMgmt->sNodeDBTable[uNodeIndex].byTopOFDMBasicRate)
- );
-
- memcpy(pMgmt->sNodeDBTable[uNodeIndex].abyMACAddr, sFrame.pHdr->sA3.abyAddr2, WLAN_ADDR_LEN);
- pMgmt->sNodeDBTable[uNodeIndex].bShortPreamble = WLAN_GET_CAP_INFO_SHORTPREAMBLE(*sFrame.pwCapInfo);
- pMgmt->sNodeDBTable[uNodeIndex].wTxDataRate = pMgmt->sNodeDBTable[uNodeIndex].wMaxSuppRate;
- {
- pr_debug("s_vMgrRxBeacon:TxDataRate is %d,Index is %d\n", pMgmt->sNodeDBTable[uNodeIndex].wTxDataRate, uNodeIndex);
- }
- }
-
- // if other stations joined, indicate connection to upper layer..
- if (pMgmt->eCurrState == WMAC_STATE_STARTED) {
- pr_debug("Current IBSS State: [Started]........to: [Jointed]\n");
- pMgmt->eCurrState = WMAC_STATE_JOINTED;
- pDevice->bLinkPass = true;
- if (netif_queue_stopped(pDevice->dev))
- netif_wake_queue(pDevice->dev);
-
- pMgmt->sNodeDBTable[0].bActive = true;
- pMgmt->sNodeDBTable[0].uInActiveCount = 0;
-
- }
- } else if (bIsSSIDEqual) {
- // See other adhoc sta with the same SSID but BSSID is different.
- // adpot this vars only when TSF larger then us.
- if (bTSFLargeDiff && bTSFOffsetPostive) {
- // we don't support ATIM under adhoc mode
- // if (sFrame.pIBSSParms->wATIMWindow == 0) {
- // adpot this vars
- // TODO: check sFrame cap if privacy on, and support rate syn
- memcpy(pMgmt->abyCurrBSSID, sFrame.pHdr->sA3.abyAddr3, WLAN_BSSID_LEN);
- memcpy(pDevice->abyBSSID, pMgmt->abyCurrBSSID, WLAN_BSSID_LEN);
- pMgmt->wCurrATIMWindow = cpu_to_le16(sFrame.pIBSSParms->wATIMWindow);
- pMgmt->wCurrBeaconPeriod = cpu_to_le16(*sFrame.pwBeaconInterval);
- pMgmt->abyCurrSuppRates[1] = RATEuSetIE((PWLAN_IE_SUPP_RATES)sFrame.pSuppRates,
- (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
- WLAN_RATES_MAXLEN_11B);
- // set HW beacon interval and re-synchronizing....
- pr_debug("Rejoining to Other Adhoc group with same SSID........\n");
- VNSvOutPortW(pDevice->PortOffset + MAC_REG_BI, pMgmt->wCurrBeaconPeriod);
- CARDbUpdateTSF(pDevice, pRxPacket->byRxRate, qwTimestamp, qwLocalTSF);
- CARDvUpdateNextTBTT(pDevice->PortOffset, qwTimestamp, pMgmt->wCurrBeaconPeriod);
- // Turn off bssid filter to avoid filter others adhoc station which bssid is different.
- MACvWriteBSSIDAddress(pDevice->PortOffset, pMgmt->abyCurrBSSID);
-
- CARDbSetPhyParameter(pMgmt->pAdapter,
- pMgmt->eCurrentPHYMode,
- pMgmt->wCurrCapInfo,
- pMgmt->byERPContext,
- pMgmt->abyCurrSuppRates,
- pMgmt->abyCurrExtSuppRates);
-
- // Prepare beacon frame
- bMgrPrepareBeaconToSend((void *)pDevice, pMgmt);
- }
- }
- }
- // endian issue ???
- // Update TSF
-if (bUpdateTSF) {
- CARDbGetCurrentTSF(pDevice->PortOffset, &qwCurrTSF);
- CARDbUpdateTSF(pDevice, pRxPacket->byRxRate, qwTimestamp, pRxPacket->qwLocalTSF);
- CARDbGetCurrentTSF(pDevice->PortOffset, &qwCurrTSF);
- CARDvUpdateNextTBTT(pDevice->PortOffset, qwTimestamp, pMgmt->wCurrBeaconPeriod);
- }
-}
-
-/*+
- *
- * Routine Description:
- * Instructs the hw to create a bss using the supplied
- * attributes. Note that this implementation only supports Ad-Hoc
- * BSS creation.
- *
- *
- * Return Value:
- * CMD_STATUS
- *
- -*/
-void
-vMgrCreateOwnIBSS(
- void *hDeviceContext,
- PCMD_STATUS pStatus
-)
-{
- struct vnt_private *pDevice = hDeviceContext;
- PSMgmtObject pMgmt = pDevice->pMgmt;
- unsigned short wMaxBasicRate;
- unsigned short wMaxSuppRate;
- unsigned char byTopCCKBasicRate;
- unsigned char byTopOFDMBasicRate;
- u64 qwCurrTSF;
- unsigned int ii;
- unsigned char abyRATE[] = {0x82, 0x84, 0x8B, 0x96, 0x24, 0x30, 0x48, 0x6C, 0x0C, 0x12, 0x18, 0x60};
- unsigned char abyCCK_RATE[] = {0x82, 0x84, 0x8B, 0x96};
- unsigned char abyOFDM_RATE[] = {0x0C, 0x12, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6C};
- unsigned short wSuppRate;
-
- pr_debug("Create Basic Service Set .......\n");
-
- if (pMgmt->eConfigMode == WMAC_CONFIG_IBSS_STA) {
- if ((pMgmt->eAuthenMode == WMAC_AUTH_WPANONE) &&
- (pDevice->eEncryptionStatus != Ndis802_11Encryption2Enabled) &&
- (pDevice->eEncryptionStatus != Ndis802_11Encryption3Enabled)) {
- // encryption mode error
- *pStatus = CMD_STATUS_FAILURE;
- return;
- }
- }
-
- pMgmt->abyCurrSuppRates[0] = WLAN_EID_SUPP_RATES;
- pMgmt->abyCurrExtSuppRates[0] = WLAN_EID_EXTSUPP_RATES;
-
- if (pMgmt->eConfigMode == WMAC_CONFIG_AP) {
- pMgmt->eCurrentPHYMode = pMgmt->byAPBBType;
- } else {
- if (pDevice->byBBType == BB_TYPE_11G)
- pMgmt->eCurrentPHYMode = PHY_TYPE_11G;
- if (pDevice->byBBType == BB_TYPE_11B)
- pMgmt->eCurrentPHYMode = PHY_TYPE_11B;
- if (pDevice->byBBType == BB_TYPE_11A)
- pMgmt->eCurrentPHYMode = PHY_TYPE_11A;
- }
-
- if (pMgmt->eCurrentPHYMode != PHY_TYPE_11A) {
- pMgmt->abyCurrSuppRates[1] = WLAN_RATES_MAXLEN_11B;
- pMgmt->abyCurrExtSuppRates[1] = 0;
- for (ii = 0; ii < 4; ii++)
- pMgmt->abyCurrSuppRates[2+ii] = abyRATE[ii];
- } else {
- pMgmt->abyCurrSuppRates[1] = 8;
- pMgmt->abyCurrExtSuppRates[1] = 0;
- for (ii = 0; ii < 8; ii++)
- pMgmt->abyCurrSuppRates[2+ii] = abyRATE[ii];
- }
-
- if (pMgmt->eCurrentPHYMode == PHY_TYPE_11G) {
- pMgmt->abyCurrSuppRates[1] = 8;
- pMgmt->abyCurrExtSuppRates[1] = 4;
- for (ii = 0; ii < 4; ii++)
- pMgmt->abyCurrSuppRates[2+ii] = abyCCK_RATE[ii];
- for (ii = 4; ii < 8; ii++)
- pMgmt->abyCurrSuppRates[2+ii] = abyOFDM_RATE[ii-4];
- for (ii = 0; ii < 4; ii++)
- pMgmt->abyCurrExtSuppRates[2+ii] = abyOFDM_RATE[ii+4];
- }
-
- // Disable Protect Mode
- pDevice->bProtectMode = false;
- MACvDisableProtectMD(pDevice->PortOffset);
-
- pDevice->bBarkerPreambleMd = false;
- MACvDisableBarkerPreambleMd(pDevice->PortOffset);
-
- // Kyle Test 2003.11.04
-
- // set HW beacon interval
- if (pMgmt->wIBSSBeaconPeriod == 0)
- pMgmt->wIBSSBeaconPeriod = DEFAULT_IBSS_BI;
-
- CARDbGetCurrentTSF(pDevice->PortOffset, &qwCurrTSF);
- // clear TSF counter
- VNSvOutPortB(pDevice->PortOffset + MAC_REG_TFTCTL, TFTCTL_TSFCNTRST);
- // enable TSF counter
- VNSvOutPortB(pDevice->PortOffset + MAC_REG_TFTCTL, TFTCTL_TSFCNTREN);
-
- // set Next TBTT
- CARDvSetFirstNextTBTT(pDevice->PortOffset, pMgmt->wIBSSBeaconPeriod);
-
- pMgmt->uIBSSChannel = pDevice->uChannel;
-
- if (pMgmt->uIBSSChannel == 0)
- pMgmt->uIBSSChannel = DEFAULT_IBSS_CHANNEL;
-
- // set basic rate
-
- RATEvParseMaxRate((void *)pDevice, (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
- (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates, true,
- &wMaxBasicRate, &wMaxSuppRate, &wSuppRate,
- &byTopCCKBasicRate, &byTopOFDMBasicRate);
-
- if (pMgmt->eConfigMode == WMAC_CONFIG_AP)
- pMgmt->eCurrMode = WMAC_MODE_ESS_AP;
-
- if (pMgmt->eConfigMode == WMAC_CONFIG_IBSS_STA) {
- memcpy(pMgmt->abyIBSSDFSOwner, pDevice->abyCurrentNetAddr, 6);
- pMgmt->byIBSSDFSRecovery = 10;
- pMgmt->eCurrMode = WMAC_MODE_IBSS_STA;
- }
-
- // Adopt pre-configured IBSS vars to current vars
- pMgmt->eCurrState = WMAC_STATE_STARTED;
- pMgmt->wCurrBeaconPeriod = pMgmt->wIBSSBeaconPeriod;
- pMgmt->uCurrChannel = pMgmt->uIBSSChannel;
- pMgmt->wCurrATIMWindow = pMgmt->wIBSSATIMWindow;
- MACvWriteATIMW(pDevice->PortOffset, pMgmt->wCurrATIMWindow);
- pDevice->uCurrRSSI = 0;
- pDevice->byCurrSQ = 0;
- memset(pMgmt->abyCurrSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
- memcpy(pMgmt->abyCurrSSID,
- pMgmt->abyDesireSSID,
- ((PWLAN_IE_SSID)pMgmt->abyDesireSSID)->len + WLAN_IEHDR_LEN
-);
-
- if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) {
- // AP mode BSSID = MAC addr
- memcpy(pMgmt->abyCurrBSSID, pMgmt->abyMACAddr, WLAN_ADDR_LEN);
- pr_info("AP beacon created BSSID:%pM\n",
- pMgmt->abyCurrBSSID);
- }
-
- if (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) {
- // BSSID selected must be randomized as spec 11.1.3
- pMgmt->abyCurrBSSID[5] = (u8) (qwCurrTSF & 0x000000ff);
- pMgmt->abyCurrBSSID[4] = (u8) ((qwCurrTSF & 0x0000ff00) >> 8);
- pMgmt->abyCurrBSSID[3] = (u8) ((qwCurrTSF & 0x00ff0000) >> 16);
- pMgmt->abyCurrBSSID[2] = (u8) ((qwCurrTSF & 0x00000ff0) >> 4);
- pMgmt->abyCurrBSSID[1] = (u8) ((qwCurrTSF & 0x000ff000) >> 12);
- pMgmt->abyCurrBSSID[0] = (u8) ((qwCurrTSF & 0x0ff00000) >> 20);
- pMgmt->abyCurrBSSID[5] ^= pMgmt->abyMACAddr[0];
- pMgmt->abyCurrBSSID[4] ^= pMgmt->abyMACAddr[1];
- pMgmt->abyCurrBSSID[3] ^= pMgmt->abyMACAddr[2];
- pMgmt->abyCurrBSSID[2] ^= pMgmt->abyMACAddr[3];
- pMgmt->abyCurrBSSID[1] ^= pMgmt->abyMACAddr[4];
- pMgmt->abyCurrBSSID[0] ^= pMgmt->abyMACAddr[5];
- pMgmt->abyCurrBSSID[0] &= ~IEEE_ADDR_GROUP;
- pMgmt->abyCurrBSSID[0] |= IEEE_ADDR_UNIVERSAL;
-
- pr_info("Adhoc beacon created bssid:%pM\n",
- pMgmt->abyCurrBSSID);
- }
-
- // Set Capability Info
- pMgmt->wCurrCapInfo = 0;
-
- if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) {
- pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_ESS(1);
- pMgmt->byDTIMPeriod = DEFAULT_DTIM_PERIOD;
- pMgmt->byDTIMCount = pMgmt->byDTIMPeriod - 1;
- }
-
- if (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA)
- pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_IBSS(1);
-
- if (pDevice->bEncryptionEnable) {
- pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_PRIVACY(1);
- if (pMgmt->eAuthenMode == WMAC_AUTH_WPANONE) {
- if (pDevice->eEncryptionStatus == Ndis802_11Encryption3Enabled) {
- pMgmt->byCSSPK = KEY_CTL_CCMP;
- pMgmt->byCSSGK = KEY_CTL_CCMP;
- } else if (pDevice->eEncryptionStatus == Ndis802_11Encryption2Enabled) {
- pMgmt->byCSSPK = KEY_CTL_TKIP;
- pMgmt->byCSSGK = KEY_CTL_TKIP;
- } else {
- pMgmt->byCSSPK = KEY_CTL_NONE;
- pMgmt->byCSSGK = KEY_CTL_WEP;
- }
- } else {
- pMgmt->byCSSPK = KEY_CTL_WEP;
- pMgmt->byCSSGK = KEY_CTL_WEP;
- }
- }
-
- pMgmt->byERPContext = 0;
-
- if (pMgmt->eConfigMode == WMAC_CONFIG_AP) {
- CARDbSetBSSID(pMgmt->pAdapter, pMgmt->abyCurrBSSID, NL80211_IFTYPE_AP);
- } else {
- CARDbSetBSSID(pMgmt->pAdapter, pMgmt->abyCurrBSSID, NL80211_IFTYPE_ADHOC);
- }
-
- CARDbSetPhyParameter(pMgmt->pAdapter,
- pMgmt->eCurrentPHYMode,
- pMgmt->wCurrCapInfo,
- pMgmt->byERPContext,
- pMgmt->abyCurrSuppRates,
- pMgmt->abyCurrExtSuppRates
- );
-
- CARDbSetBeaconPeriod(pMgmt->pAdapter, pMgmt->wIBSSBeaconPeriod);
- // set channel and clear NAV
- set_channel(pMgmt->pAdapter, pMgmt->uIBSSChannel);
- pMgmt->uCurrChannel = pMgmt->uIBSSChannel;
-
- if (CARDbIsShortPreamble(pMgmt->pAdapter))
- pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_SHORTPREAMBLE(1);
- else
- pMgmt->wCurrCapInfo &= (~WLAN_SET_CAP_INFO_SHORTPREAMBLE(1));
-
- if (pMgmt->b11hEnable &&
- (pMgmt->eCurrentPHYMode == PHY_TYPE_11A)) {
- pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_SPECTRUMMNG(1);
- } else {
- pMgmt->wCurrCapInfo &= (~WLAN_SET_CAP_INFO_SPECTRUMMNG(1));
- }
-
- pMgmt->eCurrState = WMAC_STATE_STARTED;
- // Prepare beacon to send
- if (bMgrPrepareBeaconToSend((void *)pDevice, pMgmt))
- *pStatus = CMD_STATUS_SUCCESS;
-}
-
-/*+
- *
- * Routine Description:
- * Instructs wmac to join a bss using the supplied attributes.
- * The arguments may the BSSID or SSID and the rest of the
- * attributes are obtained from the scan result of known bss list.
- *
- *
- * Return Value:
- * None.
- *
- -*/
-
-void
-vMgrJoinBSSBegin(
- void *hDeviceContext,
- PCMD_STATUS pStatus
-)
-{
- struct vnt_private *pDevice = hDeviceContext;
- PSMgmtObject pMgmt = pDevice->pMgmt;
- PKnownBSS pCurr = NULL;
- unsigned int ii, uu;
- PWLAN_IE_SUPP_RATES pItemRates = NULL;
- PWLAN_IE_SUPP_RATES pItemExtRates = NULL;
- PWLAN_IE_SSID pItemSSID;
- unsigned int uRateLen = WLAN_RATES_MAXLEN;
- unsigned short wMaxBasicRate = RATE_1M;
- unsigned short wMaxSuppRate = RATE_1M;
- unsigned short wSuppRate;
- unsigned char byTopCCKBasicRate = RATE_1M;
- unsigned char byTopOFDMBasicRate = RATE_1M;
-
- for (ii = 0; ii < MAX_BSS_NUM; ii++) {
- if (pMgmt->sBSSList[ii].bActive)
- break;
- }
-
- if (ii == MAX_BSS_NUM) {
- *pStatus = CMD_STATUS_RESOURCES;
- pr_info("BSS finding:BSS list is empty\n");
- return;
- }
-
- // Search known BSS list for prefer BSSID or SSID
-
- pCurr = BSSpSearchBSSList(pDevice,
- pMgmt->abyDesireBSSID,
- pMgmt->abyDesireSSID,
- pMgmt->eConfigPHYMode
-);
-
- if (pCurr == NULL) {
- *pStatus = CMD_STATUS_RESOURCES;
- pItemSSID = (PWLAN_IE_SSID)pMgmt->abyDesireSSID;
- pr_info("Scanning [%s] not found, disconnected !\n",
- pItemSSID->abySSID);
- return;
- }
-
- pr_info("AP(BSS) finding:Found a AP(BSS)..\n");
- if (WLAN_GET_CAP_INFO_ESS(cpu_to_le16(pCurr->wCapInfo))) {
- if ((pMgmt->eAuthenMode == WMAC_AUTH_WPA) || (pMgmt->eAuthenMode == WMAC_AUTH_WPAPSK)) {
- // patch for CISCO migration mode
- }
-
-#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
- Encyption_Rebuild(pDevice, pCurr);
-#endif
- // Infrastructure BSS
- s_vMgrSynchBSS(pDevice,
- WMAC_MODE_ESS_STA,
- pCurr,
- pStatus
-);
-
- if (*pStatus == CMD_STATUS_SUCCESS) {
- // Adopt this BSS state vars in Mgmt Object
- pMgmt->uCurrChannel = pCurr->uChannel;
-
- memset(pMgmt->abyCurrSuppRates, 0 , WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1);
- memset(pMgmt->abyCurrExtSuppRates, 0 , WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1);
-
- if (pCurr->eNetworkTypeInUse == PHY_TYPE_11B)
- uRateLen = WLAN_RATES_MAXLEN_11B;
-
- pItemRates = (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates;
- pItemExtRates = (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates;
-
- // Parse Support Rate IE
- pItemRates->byElementID = WLAN_EID_SUPP_RATES;
- pItemRates->len = RATEuSetIE((PWLAN_IE_SUPP_RATES)pCurr->abySuppRates,
- pItemRates,
- uRateLen);
-
- // Parse Extension Support Rate IE
- pItemExtRates->byElementID = WLAN_EID_EXTSUPP_RATES;
- pItemExtRates->len = RATEuSetIE((PWLAN_IE_SUPP_RATES)pCurr->abyExtSuppRates,
- pItemExtRates,
- uRateLen);
- // Stuffing Rate IE
- if ((pItemExtRates->len > 0) && (pItemRates->len < 8)) {
- for (ii = 0; ii < (unsigned int)(8 - pItemRates->len);) {
- pItemRates->abyRates[pItemRates->len + ii] = pItemExtRates->abyRates[ii];
- ii++;
- if (pItemExtRates->len <= ii)
- break;
- }
- pItemRates->len += (unsigned char)ii;
- if (pItemExtRates->len - ii > 0) {
- pItemExtRates->len -= (unsigned char)ii;
- for (uu = 0; uu < pItemExtRates->len; uu++)
- pItemExtRates->abyRates[uu] = pItemExtRates->abyRates[uu + ii];
- } else {
- pItemExtRates->len = 0;
- }
- }
-
- RATEvParseMaxRate((void *)pDevice, pItemRates, pItemExtRates, true,
- &wMaxBasicRate, &wMaxSuppRate, &wSuppRate,
- &byTopCCKBasicRate, &byTopOFDMBasicRate);
-
- // TODO: deal with if wCapInfo the privacy is on, but station WEP is off
- // TODO: deal with if wCapInfo the PS-Pollable is on.
- pMgmt->wCurrBeaconPeriod = pCurr->wBeaconInterval;
- memset(pMgmt->abyCurrSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
- memcpy(pMgmt->abyCurrBSSID, pCurr->abyBSSID, WLAN_BSSID_LEN);
- memcpy(pMgmt->abyCurrSSID, pCurr->abySSID, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
-
- pMgmt->eCurrMode = WMAC_MODE_ESS_STA;
-
- pMgmt->eCurrState = WMAC_STATE_JOINTED;
-
- // Add current BSS to Candidate list
- // This should only works for WPA2 BSS, and WPA2 BSS check must be done before.
- if (pMgmt->eAuthenMode == WMAC_AUTH_WPA2) {
- bool bResult = bAdd_PMKID_Candidate((void *)pDevice, pMgmt->abyCurrBSSID, &pCurr->sRSNCapObj);
-
- pr_debug("bAdd_PMKID_Candidate: 1(%d)\n",
- bResult);
- if (!bResult) {
- vFlush_PMKID_Candidate((void *)pDevice);
- pr_debug("vFlush_PMKID_Candidate: 4\n");
- bAdd_PMKID_Candidate((void *)pDevice, pMgmt->abyCurrBSSID, &pCurr->sRSNCapObj);
- }
- }
-
- // Preamble type auto-switch: if AP can receive short-preamble cap,
- // we can turn on too.
-
- pr_debug("Join ESS\n");
-
- pr_debug("End of Join AP -- A/B/G Action\n");
- } else {
- pMgmt->eCurrState = WMAC_STATE_IDLE;
- }
-
- } else {
- // ad-hoc mode BSS
- if (pMgmt->eAuthenMode == WMAC_AUTH_WPANONE) {
- if (pDevice->eEncryptionStatus == Ndis802_11Encryption2Enabled) {
- if (!WPA_SearchRSN(0, WPA_TKIP, pCurr)) {
- // encryption mode error
- pMgmt->eCurrState = WMAC_STATE_IDLE;
- return;
- }
- } else if (pDevice->eEncryptionStatus == Ndis802_11Encryption3Enabled) {
- if (!WPA_SearchRSN(0, WPA_AESCCMP, pCurr)) {
- // encryption mode error
- pMgmt->eCurrState = WMAC_STATE_IDLE;
- return;
- }
- } else {
- // encryption mode error
- pMgmt->eCurrState = WMAC_STATE_IDLE;
- return;
- }
- }
-
- s_vMgrSynchBSS(pDevice,
- WMAC_MODE_IBSS_STA,
- pCurr,
- pStatus
-);
-
- if (*pStatus == CMD_STATUS_SUCCESS) {
- // Adopt this BSS state vars in Mgmt Object
- // TODO: check if CapInfo privacy on, but we don't..
- pMgmt->uCurrChannel = pCurr->uChannel;
-
- // Parse Support Rate IE
- pMgmt->abyCurrSuppRates[0] = WLAN_EID_SUPP_RATES;
- pMgmt->abyCurrSuppRates[1] = RATEuSetIE((PWLAN_IE_SUPP_RATES)pCurr->abySuppRates,
- (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
- WLAN_RATES_MAXLEN_11B);
- // set basic rate
- RATEvParseMaxRate((void *)pDevice, (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
- NULL, true, &wMaxBasicRate, &wMaxSuppRate, &wSuppRate,
- &byTopCCKBasicRate, &byTopOFDMBasicRate);
-
- pMgmt->wCurrCapInfo = pCurr->wCapInfo;
- pMgmt->wCurrBeaconPeriod = pCurr->wBeaconInterval;
- memset(pMgmt->abyCurrSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN);
- memcpy(pMgmt->abyCurrBSSID, pCurr->abyBSSID, WLAN_BSSID_LEN);
- memcpy(pMgmt->abyCurrSSID, pCurr->abySSID, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN);
- MACvWriteATIMW(pDevice->PortOffset, pMgmt->wCurrATIMWindow);
- pMgmt->eCurrMode = WMAC_MODE_IBSS_STA;
-
- pMgmt->eCurrState = WMAC_STATE_STARTED;
-
- pr_debug("Join IBSS ok:%pM\n",
- pMgmt->abyCurrBSSID);
- // Preamble type auto-switch: if AP can receive short-preamble cap,
- // and if registry setting is short preamble we can turn on too.
-
- // Prepare beacon
- bMgrPrepareBeaconToSend((void *)pDevice, pMgmt);
- } else {
- pMgmt->eCurrState = WMAC_STATE_IDLE;
- }
- }
-}
-
-/*+
- *
- * Routine Description:
- * Set HW to synchronize a specific BSS from known BSS list.
- *
- *
- * Return Value:
- * PCM_STATUS
- *
- -*/
-static
-void
-s_vMgrSynchBSS(
- struct vnt_private *pDevice,
- unsigned int uBSSMode,
- PKnownBSS pCurr,
- PCMD_STATUS pStatus
-)
-{
- CARD_PHY_TYPE ePhyType = PHY_TYPE_11B;
- PSMgmtObject pMgmt = pDevice->pMgmt;
-
- //1M, 2M, 5M, 11M, 18M, 24M, 36M, 54M
- unsigned char abyCurrSuppRatesG[] = {WLAN_EID_SUPP_RATES, 8, 0x02, 0x04, 0x0B, 0x16, 0x24, 0x30, 0x48, 0x6C};
- unsigned char abyCurrExtSuppRatesG[] = {WLAN_EID_EXTSUPP_RATES, 4, 0x0C, 0x12, 0x18, 0x60};
- //6M, 9M, 12M, 48M
- unsigned char abyCurrSuppRatesA[] = {WLAN_EID_SUPP_RATES, 8, 0x0C, 0x12, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6C};
- unsigned char abyCurrSuppRatesB[] = {WLAN_EID_SUPP_RATES, 4, 0x02, 0x04, 0x0B, 0x16};
-
- *pStatus = CMD_STATUS_FAILURE;
-
- if (!s_bCipherMatch(pCurr,
- pDevice->eEncryptionStatus,
- &(pMgmt->byCSSPK),
- &(pMgmt->byCSSGK))) {
- pr_debug("s_bCipherMatch Fail .......\n");
- return;
- }
-
- pMgmt->pCurrBSS = pCurr;
-
- // if previous mode is IBSS.
- if (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) {
- MACvRegBitsOff(pDevice->PortOffset, MAC_REG_BCNDMACTL, BEACON_READY);
- MACvRegBitsOff(pDevice->PortOffset, MAC_REG_TCR, TCR_AUTOBCNTX);
- }
-
- // Init the BSS informations
- pDevice->bProtectMode = false;
- MACvDisableProtectMD(pDevice->PortOffset);
- pDevice->bBarkerPreambleMd = false;
- MACvDisableBarkerPreambleMd(pDevice->PortOffset);
- pDevice->bNonERPPresent = false;
- pDevice->byPreambleType = 0;
- pDevice->wBasicRate = 0;
- // Set Basic Rate
- CARDbAddBasicRate((void *)pDevice, RATE_1M);
- // calculate TSF offset
- // TSF Offset = Received Timestamp TSF - Marked Local's TSF
- CARDbUpdateTSF(pDevice, pCurr->byRxRate, pCurr->qwBSSTimestamp, pCurr->qwLocalTSF);
-
- CARDbSetBeaconPeriod(pDevice, pCurr->wBeaconInterval);
-
- // set Next TBTT
- // Next TBTT = ((local_current_TSF / beacon_interval) + 1) * beacon_interval
- CARDvSetFirstNextTBTT(pDevice->PortOffset, pCurr->wBeaconInterval);
-
- // set BSSID
- MACvWriteBSSIDAddress(pDevice->PortOffset, pCurr->abyBSSID);
-
- MACvReadBSSIDAddress(pDevice->PortOffset, pMgmt->abyCurrBSSID);
-
- pr_debug("Sync:set CurrBSSID address = %pM\n", pMgmt->abyCurrBSSID);
-
- if (pCurr->eNetworkTypeInUse == PHY_TYPE_11A) {
- if ((pMgmt->eConfigPHYMode == PHY_TYPE_11A) ||
- (pMgmt->eConfigPHYMode == PHY_TYPE_AUTO)) {
- ePhyType = PHY_TYPE_11A;
- } else {
- return;
- }
- } else if (pCurr->eNetworkTypeInUse == PHY_TYPE_11B) {
- if ((pMgmt->eConfigPHYMode == PHY_TYPE_11B) ||
- (pMgmt->eConfigPHYMode == PHY_TYPE_11G) ||
- (pMgmt->eConfigPHYMode == PHY_TYPE_AUTO)) {
- ePhyType = PHY_TYPE_11B;
- } else {
- return;
- }
- } else {
- if ((pMgmt->eConfigPHYMode == PHY_TYPE_11G) ||
- (pMgmt->eConfigPHYMode == PHY_TYPE_AUTO)) {
- ePhyType = PHY_TYPE_11G;
- } else if (pMgmt->eConfigPHYMode == PHY_TYPE_11B) {
- ePhyType = PHY_TYPE_11B;
- } else {
- return;
- }
- }
-
- if (ePhyType == PHY_TYPE_11A) {
- memcpy(pMgmt->abyCurrSuppRates, &abyCurrSuppRatesA[0], sizeof(abyCurrSuppRatesA));
- pMgmt->abyCurrExtSuppRates[1] = 0;
- } else if (ePhyType == PHY_TYPE_11B) {
- memcpy(pMgmt->abyCurrSuppRates, &abyCurrSuppRatesB[0], sizeof(abyCurrSuppRatesB));
- pMgmt->abyCurrExtSuppRates[1] = 0;
- } else {
- memcpy(pMgmt->abyCurrSuppRates, &abyCurrSuppRatesG[0], sizeof(abyCurrSuppRatesG));
- memcpy(pMgmt->abyCurrExtSuppRates, &abyCurrExtSuppRatesG[0], sizeof(abyCurrExtSuppRatesG));
- }
-
- if (WLAN_GET_CAP_INFO_ESS(pCurr->wCapInfo)) {
- CARDbSetBSSID(pMgmt->pAdapter, pCurr->abyBSSID, NL80211_IFTYPE_STATION);
- // Add current BSS to Candidate list
- // This should only works for WPA2 BSS, and WPA2 BSS check must be done before.
- if (pMgmt->eAuthenMode == WMAC_AUTH_WPA2)
- CARDbAdd_PMKID_Candidate(pMgmt->pAdapter, pMgmt->abyCurrBSSID, pCurr->sRSNCapObj.bRSNCapExist, pCurr->sRSNCapObj.wRSNCap);
- } else {
- CARDbSetBSSID(pMgmt->pAdapter, pCurr->abyBSSID, NL80211_IFTYPE_ADHOC);
- }
-
- if (!CARDbSetPhyParameter(pMgmt->pAdapter,
- ePhyType,
- pCurr->wCapInfo,
- pCurr->sERP.byERP,
- pMgmt->abyCurrSuppRates,
- pMgmt->abyCurrExtSuppRates)) {
- pr_debug("<----s_bSynchBSS Set Phy Mode Fail [%d]\n", ePhyType);
- return;
- }
- // set channel and clear NAV
- if (!set_channel(pMgmt->pAdapter, pCurr->uChannel)) {
- pr_debug("<----s_bSynchBSS Set Channel [%d]\n",
- pCurr->uChannel);
- return;
- }
-
- pMgmt->uCurrChannel = pCurr->uChannel;
- pMgmt->eCurrentPHYMode = ePhyType;
- pMgmt->byERPContext = pCurr->sERP.byERP;
- pr_debug("Sync:Set to channel = [%d]\n", (int)pCurr->uChannel);
-
- *pStatus = CMD_STATUS_SUCCESS;
-
- return;
-};
-
-//mike add: fix NetworkManager 0.7.0 hidden ssid mode in WPA encryption
-// ,need reset eAuthenMode and eEncryptionStatus
-static void Encyption_Rebuild(
- struct vnt_private *pDevice,
- PKnownBSS pCurr
-)
-{
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
-
- if ((pMgmt->eAuthenMode == WMAC_AUTH_WPAPSK) || //networkmanager 0.7.0 does not give the pairwise-key selection,
- (pMgmt->eAuthenMode == WMAC_AUTH_WPA2PSK)) { // so we need re-select it according to real pairwise-key info.
- if (pCurr->bWPAValid) { //WPA-PSK
- pMgmt->eAuthenMode = WMAC_AUTH_WPAPSK;
- if (pCurr->abyPKType[0] == WPA_TKIP) {
- pDevice->eEncryptionStatus = Ndis802_11Encryption2Enabled; //TKIP
- PRINT_K("Encyption_Rebuild--->ssid reset config to [WPAPSK-TKIP]\n");
- } else if (pCurr->abyPKType[0] == WPA_AESCCMP) {
- pDevice->eEncryptionStatus = Ndis802_11Encryption3Enabled; //AES
- PRINT_K("Encyption_Rebuild--->ssid reset config to [WPAPSK-AES]\n");
- }
- } else if (pCurr->bWPA2Valid) { //WPA2-PSK
- pMgmt->eAuthenMode = WMAC_AUTH_WPA2PSK;
- if (pCurr->abyCSSPK[0] == WLAN_11i_CSS_TKIP) {
- pDevice->eEncryptionStatus = Ndis802_11Encryption2Enabled; //TKIP
- PRINT_K("Encyption_Rebuild--->ssid reset config to [WPA2PSK-TKIP]\n");
- } else if (pCurr->abyCSSPK[0] == WLAN_11i_CSS_CCMP) {
- pDevice->eEncryptionStatus = Ndis802_11Encryption3Enabled; //AES
- PRINT_K("Encyption_Rebuild--->ssid reset config to [WPA2PSK-AES]\n");
- }
- }
- }
-}
-
-/*+
- *
- * Routine Description:
- * Format TIM field
- *
- *
- * Return Value:
- * void
- *
- -*/
-
-static
-void
-s_vMgrFormatTIM(
- PSMgmtObject pMgmt,
- PWLAN_IE_TIM pTIM
-)
-{
- unsigned char byMask[8] = {1, 2, 4, 8, 0x10, 0x20, 0x40, 0x80};
- unsigned char byMap;
- unsigned int ii, jj;
- bool bStartFound = false;
- bool bMulticast = false;
- unsigned short wStartIndex = 0;
- unsigned short wEndIndex = 0;
-
- // Find size of partial virtual bitmap
- for (ii = 0; ii < (MAX_NODE_NUM + 1); ii++) {
- byMap = pMgmt->abyPSTxMap[ii];
- if (!ii) {
- // Mask out the broadcast bit which is indicated separately.
- bMulticast = (byMap & byMask[0]) != 0;
- if (bMulticast)
- pMgmt->sNodeDBTable[0].bRxPSPoll = true;
-
- byMap = 0;
- }
- if (byMap) {
- if (!bStartFound) {
- bStartFound = true;
- wStartIndex = ii;
- }
- wEndIndex = ii;
- }
- }
-
- // Round start index down to nearest even number
- wStartIndex &= ~BIT0;
-
- // Round end index up to nearest even number
- wEndIndex = ((wEndIndex + 1) & ~BIT0);
-
- // Size of element payload
-
- pTIM->len = 3 + (wEndIndex - wStartIndex) + 1;
-
- // Fill in the Fixed parts of the TIM
- pTIM->byDTIMCount = pMgmt->byDTIMCount;
- pTIM->byDTIMPeriod = pMgmt->byDTIMPeriod;
- pTIM->byBitMapCtl = (bMulticast ? TIM_MULTICAST_MASK : 0) |
- (((wStartIndex >> 1) << 1) & TIM_BITMAPOFFSET_MASK);
-
- // Append variable part of TIM
-
- for (ii = wStartIndex, jj = 0; ii <= wEndIndex; ii++, jj++)
- pTIM->byVirtBitMap[jj] = pMgmt->abyPSTxMap[ii];
-
- // Aid = 0 don't used.
- pTIM->byVirtBitMap[0] &= ~BIT0;
-}
-
-/*+
- *
- * Routine Description:
- * Constructs an Beacon frame(Ad-hoc mode)
- *
- *
- * Return Value:
- * PTR to frame; or NULL on allocation failure
- *
- -*/
-
-static
-PSTxMgmtPacket
-s_MgrMakeBeacon(
- struct vnt_private *pDevice,
- PSMgmtObject pMgmt,
- unsigned short wCurrCapInfo,
- unsigned short wCurrBeaconPeriod,
- unsigned int uCurrChannel,
- unsigned short wCurrATIMWinodw,
- PWLAN_IE_SSID pCurrSSID,
- unsigned char *pCurrBSSID,
- PWLAN_IE_SUPP_RATES pCurrSuppRates,
- PWLAN_IE_SUPP_RATES pCurrExtSuppRates
-)
-{
- PSTxMgmtPacket pTxPacket = NULL;
- WLAN_FR_BEACON sFrame;
- unsigned char abyBroadcastAddr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
- unsigned char *pbyBuffer;
- unsigned int uLength = 0;
- PWLAN_IE_IBSS_DFS pIBSSDFS = NULL;
- unsigned int ii;
-
- // prepare beacon frame
- pTxPacket = (PSTxMgmtPacket)pMgmt->pbyMgmtPacketPool;
- memset(pTxPacket, 0, sizeof(STxMgmtPacket) + WLAN_BEACON_FR_MAXLEN);
- pTxPacket->p80211Header = (PUWLAN_80211HDR)((unsigned char *)pTxPacket + sizeof(STxMgmtPacket));
- // Setup the sFrame structure.
- sFrame.pBuf = (unsigned char *)pTxPacket->p80211Header;
- sFrame.len = WLAN_BEACON_FR_MAXLEN;
- vMgrEncodeBeacon(&sFrame);
- // Setup the header
- sFrame.pHdr->sA3.wFrameCtl = cpu_to_le16(
- (
- WLAN_SET_FC_FTYPE(WLAN_TYPE_MGR) |
- WLAN_SET_FC_FSTYPE(WLAN_FSTYPE_BEACON)
-));
-
- if (pDevice->bEnablePSMode)
- sFrame.pHdr->sA3.wFrameCtl |= cpu_to_le16((unsigned short)WLAN_SET_FC_PWRMGT(1));
-
- memcpy(sFrame.pHdr->sA3.abyAddr1, abyBroadcastAddr, WLAN_ADDR_LEN);
- memcpy(sFrame.pHdr->sA3.abyAddr2, pMgmt->abyMACAddr, WLAN_ADDR_LEN);
- memcpy(sFrame.pHdr->sA3.abyAddr3, pCurrBSSID, WLAN_BSSID_LEN);
- *sFrame.pwBeaconInterval = cpu_to_le16(wCurrBeaconPeriod);
- *sFrame.pwCapInfo = cpu_to_le16(wCurrCapInfo);
- // Copy SSID
- sFrame.pSSID = (PWLAN_IE_SSID)(sFrame.pBuf + sFrame.len);
- sFrame.len += ((PWLAN_IE_SSID)pMgmt->abyCurrSSID)->len + WLAN_IEHDR_LEN;
- memcpy(sFrame.pSSID,
- pCurrSSID,
- ((PWLAN_IE_SSID)pCurrSSID)->len + WLAN_IEHDR_LEN
-);
- // Copy the rate set
- sFrame.pSuppRates = (PWLAN_IE_SUPP_RATES)(sFrame.pBuf + sFrame.len);
- sFrame.len += ((PWLAN_IE_SUPP_RATES)pCurrSuppRates)->len + WLAN_IEHDR_LEN;
- memcpy(sFrame.pSuppRates,
- pCurrSuppRates,
- ((PWLAN_IE_SUPP_RATES)pCurrSuppRates)->len + WLAN_IEHDR_LEN
-);
- // DS parameter
- if (pDevice->eCurrentPHYType != PHY_TYPE_11A) {
- sFrame.pDSParms = (PWLAN_IE_DS_PARMS)(sFrame.pBuf + sFrame.len);
- sFrame.len += (1) + WLAN_IEHDR_LEN;
- sFrame.pDSParms->byElementID = WLAN_EID_DS_PARMS;
- sFrame.pDSParms->len = 1;
- sFrame.pDSParms->byCurrChannel = (unsigned char)uCurrChannel;
- }
- // TIM field
- if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) {
- sFrame.pTIM = (PWLAN_IE_TIM)(sFrame.pBuf + sFrame.len);
- sFrame.pTIM->byElementID = WLAN_EID_TIM;
- s_vMgrFormatTIM(pMgmt, sFrame.pTIM);
- sFrame.len += (WLAN_IEHDR_LEN + sFrame.pTIM->len);
- }
-
- if (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) {
- // IBSS parameter
- sFrame.pIBSSParms = (PWLAN_IE_IBSS_PARMS)(sFrame.pBuf + sFrame.len);
- sFrame.len += (2) + WLAN_IEHDR_LEN;
- sFrame.pIBSSParms->byElementID = WLAN_EID_IBSS_PARMS;
- sFrame.pIBSSParms->len = 2;
- sFrame.pIBSSParms->wATIMWindow = wCurrATIMWinodw;
- if (pMgmt->eAuthenMode == WMAC_AUTH_WPANONE) {
- /* RSN parameter */
- sFrame.pRSNWPA = (PWLAN_IE_RSN_EXT)(sFrame.pBuf + sFrame.len);
- sFrame.pRSNWPA->byElementID = WLAN_EID_RSN_WPA;
- sFrame.pRSNWPA->len = 12;
- sFrame.pRSNWPA->abyOUI[0] = 0x00;
- sFrame.pRSNWPA->abyOUI[1] = 0x50;
- sFrame.pRSNWPA->abyOUI[2] = 0xf2;
- sFrame.pRSNWPA->abyOUI[3] = 0x01;
- sFrame.pRSNWPA->wVersion = 1;
- sFrame.pRSNWPA->abyMulticast[0] = 0x00;
- sFrame.pRSNWPA->abyMulticast[1] = 0x50;
- sFrame.pRSNWPA->abyMulticast[2] = 0xf2;
- if (pDevice->eEncryptionStatus == Ndis802_11Encryption3Enabled)
- sFrame.pRSNWPA->abyMulticast[3] = 0x04;//AES
- else if (pDevice->eEncryptionStatus == Ndis802_11Encryption2Enabled)
- sFrame.pRSNWPA->abyMulticast[3] = 0x02;//TKIP
- else if (pDevice->eEncryptionStatus == Ndis802_11Encryption1Enabled)
- sFrame.pRSNWPA->abyMulticast[3] = 0x01;//WEP40
- else
- sFrame.pRSNWPA->abyMulticast[3] = 0x00;//NONE
-
- // Pairwise Key Cipher Suite
- sFrame.pRSNWPA->wPKCount = 0;
- // Auth Key Management Suite
- *((unsigned short *)(sFrame.pBuf + sFrame.len + sFrame.pRSNWPA->len)) = 0;
- sFrame.pRSNWPA->len += 2;
-
- // RSN Capabilities
- *((unsigned short *)(sFrame.pBuf + sFrame.len + sFrame.pRSNWPA->len)) = 0;
- sFrame.pRSNWPA->len += 2;
- sFrame.len += sFrame.pRSNWPA->len + WLAN_IEHDR_LEN;
- }
- }
-
- if (pMgmt->b11hEnable && (pMgmt->eCurrentPHYMode == PHY_TYPE_11A)) {
- // Country IE
- pbyBuffer = (unsigned char *)(sFrame.pBuf + sFrame.len);
- set_country_IE(pMgmt->pAdapter, pbyBuffer);
- set_country_info(pMgmt->pAdapter, PHY_TYPE_11A, pbyBuffer);
- uLength += ((PWLAN_IE_COUNTRY) pbyBuffer)->len + WLAN_IEHDR_LEN;
- pbyBuffer += (((PWLAN_IE_COUNTRY) pbyBuffer)->len + WLAN_IEHDR_LEN);
- // Power Constrain IE
- ((PWLAN_IE_PW_CONST) pbyBuffer)->byElementID = WLAN_EID_PWR_CONSTRAINT;
- ((PWLAN_IE_PW_CONST) pbyBuffer)->len = 1;
- ((PWLAN_IE_PW_CONST) pbyBuffer)->byPower = 0;
- pbyBuffer += (1) + WLAN_IEHDR_LEN;
- uLength += (1) + WLAN_IEHDR_LEN;
- if (pMgmt->bSwitchChannel) {
- // Channel Switch IE
- ((PWLAN_IE_CH_SW) pbyBuffer)->byElementID = WLAN_EID_CH_SWITCH;
- ((PWLAN_IE_CH_SW) pbyBuffer)->len = 3;
- ((PWLAN_IE_CH_SW) pbyBuffer)->byMode = 1;
- ((PWLAN_IE_CH_SW) pbyBuffer)->byChannel = get_channel_number(pMgmt->pAdapter, pMgmt->byNewChannel);
- ((PWLAN_IE_CH_SW) pbyBuffer)->byCount = 0;
- pbyBuffer += (3) + WLAN_IEHDR_LEN;
- uLength += (3) + WLAN_IEHDR_LEN;
- }
- // TPC report
- ((PWLAN_IE_TPC_REP) pbyBuffer)->byElementID = WLAN_EID_TPC_REP;
- ((PWLAN_IE_TPC_REP) pbyBuffer)->len = 2;
- ((PWLAN_IE_TPC_REP) pbyBuffer)->byTxPower = CARDbyGetTransmitPower(pMgmt->pAdapter);
- ((PWLAN_IE_TPC_REP) pbyBuffer)->byLinkMargin = 0;
- pbyBuffer += (2) + WLAN_IEHDR_LEN;
- uLength += (2) + WLAN_IEHDR_LEN;
- // IBSS DFS
- if (pMgmt->eCurrMode != WMAC_MODE_ESS_AP) {
- pIBSSDFS = (PWLAN_IE_IBSS_DFS) pbyBuffer;
- pIBSSDFS->byElementID = WLAN_EID_IBSS_DFS;
- pIBSSDFS->len = 7;
- memcpy(pIBSSDFS->abyDFSOwner,
- pMgmt->abyIBSSDFSOwner,
- 6);
- pIBSSDFS->byDFSRecovery = pMgmt->byIBSSDFSRecovery;
- pbyBuffer += (7) + WLAN_IEHDR_LEN;
- uLength += (7) + WLAN_IEHDR_LEN;
- for (ii = CB_MAX_CHANNEL_24G+1; ii <= CB_MAX_CHANNEL; ii++) {
- if (get_channel_map_info(pMgmt->pAdapter, ii, pbyBuffer, pbyBuffer+1)) {
- pbyBuffer += 2;
- uLength += 2;
- pIBSSDFS->len += 2;
- }
- }
- }
- sFrame.len += uLength;
- }
-
- if (pMgmt->eCurrentPHYMode == PHY_TYPE_11G) {
- sFrame.pERP = (PWLAN_IE_ERP)(sFrame.pBuf + sFrame.len);
- sFrame.len += 1 + WLAN_IEHDR_LEN;
- sFrame.pERP->byElementID = WLAN_EID_ERP;
- sFrame.pERP->len = 1;
- sFrame.pERP->byContext = 0;
- if (pDevice->bProtectMode)
- sFrame.pERP->byContext |= WLAN_EID_ERP_USE_PROTECTION;
- if (pDevice->bNonERPPresent)
- sFrame.pERP->byContext |= WLAN_EID_ERP_NONERP_PRESENT;
- if (pDevice->bBarkerPreambleMd)
- sFrame.pERP->byContext |= WLAN_EID_ERP_BARKER_MODE;
- }
- if (((PWLAN_IE_SUPP_RATES)pCurrExtSuppRates)->len != 0) {
- sFrame.pExtSuppRates = (PWLAN_IE_SUPP_RATES)(sFrame.pBuf + sFrame.len);
- sFrame.len += ((PWLAN_IE_SUPP_RATES)pCurrExtSuppRates)->len + WLAN_IEHDR_LEN;
- memcpy(sFrame.pExtSuppRates,
- pCurrExtSuppRates,
- ((PWLAN_IE_SUPP_RATES)pCurrExtSuppRates)->len + WLAN_IEHDR_LEN
-);
- }
- // hostapd wpa/wpa2 IE
- if ((pMgmt->eCurrMode == WMAC_MODE_ESS_AP) && pDevice->bEnableHostapd) {
- if (pMgmt->eAuthenMode == WMAC_AUTH_WPANONE) {
- if (pMgmt->wWPAIELen != 0) {
- sFrame.pRSN = (PWLAN_IE_RSN)(sFrame.pBuf + sFrame.len);
- memcpy(sFrame.pRSN, pMgmt->abyWPAIE, pMgmt->wWPAIELen);
- sFrame.len += pMgmt->wWPAIELen;
- }
- }
- }
-
- /* Adjust the length fields */
- pTxPacket->cbMPDULen = sFrame.len;
- pTxPacket->cbPayloadLen = sFrame.len - WLAN_HDR_ADDR3_LEN;
-
- return pTxPacket;
-}
-
-/*+
- *
- * Routine Description:
- * Constructs an Prob-response frame
- *
- *
- * Return Value:
- * PTR to frame; or NULL on allocation failure
- *
- -*/
-
-static PSTxMgmtPacket
-s_MgrMakeProbeResponse(
- struct vnt_private *pDevice,
- PSMgmtObject pMgmt,
- unsigned short wCurrCapInfo,
- unsigned short wCurrBeaconPeriod,
- unsigned int uCurrChannel,
- unsigned short wCurrATIMWinodw,
- unsigned char *pDstAddr,
- PWLAN_IE_SSID pCurrSSID,
- unsigned char *pCurrBSSID,
- PWLAN_IE_SUPP_RATES pCurrSuppRates,
- PWLAN_IE_SUPP_RATES pCurrExtSuppRates,
- unsigned char byPHYType
-)
-{
- PSTxMgmtPacket pTxPacket = NULL;
- WLAN_FR_PROBERESP sFrame;
- unsigned char *pbyBuffer;
- unsigned int uLength = 0;
- PWLAN_IE_IBSS_DFS pIBSSDFS = NULL;
- unsigned int ii;
-
- pTxPacket = (PSTxMgmtPacket)pMgmt->pbyMgmtPacketPool;
- memset(pTxPacket, 0, sizeof(STxMgmtPacket) + WLAN_PROBERESP_FR_MAXLEN);
- pTxPacket->p80211Header = (PUWLAN_80211HDR)((unsigned char *)pTxPacket + sizeof(STxMgmtPacket));
- // Setup the sFrame structure.
- sFrame.pBuf = (unsigned char *)pTxPacket->p80211Header;
- sFrame.len = WLAN_PROBERESP_FR_MAXLEN;
- vMgrEncodeProbeResponse(&sFrame);
- // Setup the header
- sFrame.pHdr->sA3.wFrameCtl = cpu_to_le16(
- (
- WLAN_SET_FC_FTYPE(WLAN_TYPE_MGR) |
- WLAN_SET_FC_FSTYPE(WLAN_FSTYPE_PROBERESP)
-));
- memcpy(sFrame.pHdr->sA3.abyAddr1, pDstAddr, WLAN_ADDR_LEN);
- memcpy(sFrame.pHdr->sA3.abyAddr2, pMgmt->abyMACAddr, WLAN_ADDR_LEN);
- memcpy(sFrame.pHdr->sA3.abyAddr3, pCurrBSSID, WLAN_BSSID_LEN);
- *sFrame.pwBeaconInterval = cpu_to_le16(wCurrBeaconPeriod);
- *sFrame.pwCapInfo = cpu_to_le16(wCurrCapInfo);
-
- if (byPHYType == BB_TYPE_11B)
- *sFrame.pwCapInfo &= cpu_to_le16((unsigned short)~(WLAN_SET_CAP_INFO_SHORTSLOTTIME(1)));
-
- // Copy SSID
- sFrame.pSSID = (PWLAN_IE_SSID)(sFrame.pBuf + sFrame.len);
- sFrame.len += ((PWLAN_IE_SSID)pMgmt->abyCurrSSID)->len + WLAN_IEHDR_LEN;
- memcpy(sFrame.pSSID,
- pCurrSSID,
- ((PWLAN_IE_SSID)pCurrSSID)->len + WLAN_IEHDR_LEN
-);
- // Copy the rate set
- sFrame.pSuppRates = (PWLAN_IE_SUPP_RATES)(sFrame.pBuf + sFrame.len);
-
- sFrame.len += ((PWLAN_IE_SUPP_RATES)pCurrSuppRates)->len + WLAN_IEHDR_LEN;
- memcpy(sFrame.pSuppRates,
- pCurrSuppRates,
- ((PWLAN_IE_SUPP_RATES)pCurrSuppRates)->len + WLAN_IEHDR_LEN
-);
-
- // DS parameter
- if (pDevice->eCurrentPHYType != PHY_TYPE_11A) {
- sFrame.pDSParms = (PWLAN_IE_DS_PARMS)(sFrame.pBuf + sFrame.len);
- sFrame.len += (1) + WLAN_IEHDR_LEN;
- sFrame.pDSParms->byElementID = WLAN_EID_DS_PARMS;
- sFrame.pDSParms->len = 1;
- sFrame.pDSParms->byCurrChannel = (unsigned char)uCurrChannel;
- }
-
- if (pMgmt->eCurrMode != WMAC_MODE_ESS_AP) {
- // IBSS parameter
- sFrame.pIBSSParms = (PWLAN_IE_IBSS_PARMS)(sFrame.pBuf + sFrame.len);
- sFrame.len += (2) + WLAN_IEHDR_LEN;
- sFrame.pIBSSParms->byElementID = WLAN_EID_IBSS_PARMS;
- sFrame.pIBSSParms->len = 2;
- sFrame.pIBSSParms->wATIMWindow = 0;
- }
- if (pDevice->eCurrentPHYType == PHY_TYPE_11G) {
- sFrame.pERP = (PWLAN_IE_ERP)(sFrame.pBuf + sFrame.len);
- sFrame.len += 1 + WLAN_IEHDR_LEN;
- sFrame.pERP->byElementID = WLAN_EID_ERP;
- sFrame.pERP->len = 1;
- sFrame.pERP->byContext = 0;
- if (pDevice->bProtectMode)
- sFrame.pERP->byContext |= WLAN_EID_ERP_USE_PROTECTION;
- if (pDevice->bNonERPPresent)
- sFrame.pERP->byContext |= WLAN_EID_ERP_NONERP_PRESENT;
- if (pDevice->bBarkerPreambleMd)
- sFrame.pERP->byContext |= WLAN_EID_ERP_BARKER_MODE;
- }
-
- if (pMgmt->b11hEnable && (pMgmt->eCurrentPHYMode == PHY_TYPE_11A)) {
- // Country IE
- pbyBuffer = (unsigned char *)(sFrame.pBuf + sFrame.len);
- set_country_IE(pMgmt->pAdapter, pbyBuffer);
- set_country_info(pMgmt->pAdapter, PHY_TYPE_11A, pbyBuffer);
- uLength += ((PWLAN_IE_COUNTRY) pbyBuffer)->len + WLAN_IEHDR_LEN;
- pbyBuffer += (((PWLAN_IE_COUNTRY) pbyBuffer)->len + WLAN_IEHDR_LEN);
- // Power Constrain IE
- ((PWLAN_IE_PW_CONST) pbyBuffer)->byElementID = WLAN_EID_PWR_CONSTRAINT;
- ((PWLAN_IE_PW_CONST) pbyBuffer)->len = 1;
- ((PWLAN_IE_PW_CONST) pbyBuffer)->byPower = 0;
- pbyBuffer += (1) + WLAN_IEHDR_LEN;
- uLength += (1) + WLAN_IEHDR_LEN;
- if (pMgmt->bSwitchChannel) {
- // Channel Switch IE
- ((PWLAN_IE_CH_SW) pbyBuffer)->byElementID = WLAN_EID_CH_SWITCH;
- ((PWLAN_IE_CH_SW) pbyBuffer)->len = 3;
- ((PWLAN_IE_CH_SW) pbyBuffer)->byMode = 1;
- ((PWLAN_IE_CH_SW) pbyBuffer)->byChannel = get_channel_number(pMgmt->pAdapter, pMgmt->byNewChannel);
- ((PWLAN_IE_CH_SW) pbyBuffer)->byCount = 0;
- pbyBuffer += (3) + WLAN_IEHDR_LEN;
- uLength += (3) + WLAN_IEHDR_LEN;
- }
- // TPC report
- ((PWLAN_IE_TPC_REP) pbyBuffer)->byElementID = WLAN_EID_TPC_REP;
- ((PWLAN_IE_TPC_REP) pbyBuffer)->len = 2;
- ((PWLAN_IE_TPC_REP) pbyBuffer)->byTxPower = CARDbyGetTransmitPower(pMgmt->pAdapter);
- ((PWLAN_IE_TPC_REP) pbyBuffer)->byLinkMargin = 0;
- pbyBuffer += (2) + WLAN_IEHDR_LEN;
- uLength += (2) + WLAN_IEHDR_LEN;
- // IBSS DFS
- if (pMgmt->eCurrMode != WMAC_MODE_ESS_AP) {
- pIBSSDFS = (PWLAN_IE_IBSS_DFS) pbyBuffer;
- pIBSSDFS->byElementID = WLAN_EID_IBSS_DFS;
- pIBSSDFS->len = 7;
- memcpy(pIBSSDFS->abyDFSOwner,
- pMgmt->abyIBSSDFSOwner,
- 6);
- pIBSSDFS->byDFSRecovery = pMgmt->byIBSSDFSRecovery;
- pbyBuffer += (7) + WLAN_IEHDR_LEN;
- uLength += (7) + WLAN_IEHDR_LEN;
- for (ii = CB_MAX_CHANNEL_24G + 1; ii <= CB_MAX_CHANNEL; ii++) {
- if (get_channel_map_info(pMgmt->pAdapter, ii, pbyBuffer, pbyBuffer+1)) {
- pbyBuffer += 2;
- uLength += 2;
- pIBSSDFS->len += 2;
- }
- }
- }
- sFrame.len += uLength;
- }
-
- if (((PWLAN_IE_SUPP_RATES)pCurrExtSuppRates)->len != 0) {
- sFrame.pExtSuppRates = (PWLAN_IE_SUPP_RATES)(sFrame.pBuf + sFrame.len);
- sFrame.len += ((PWLAN_IE_SUPP_RATES)pCurrExtSuppRates)->len + WLAN_IEHDR_LEN;
- memcpy(sFrame.pExtSuppRates,
- pCurrExtSuppRates,
- ((PWLAN_IE_SUPP_RATES)pCurrExtSuppRates)->len + WLAN_IEHDR_LEN
-);
- }
-
- // hostapd wpa/wpa2 IE
- if ((pMgmt->eCurrMode == WMAC_MODE_ESS_AP) && pDevice->bEnableHostapd) {
- if (pMgmt->eAuthenMode == WMAC_AUTH_WPANONE) {
- if (pMgmt->wWPAIELen != 0) {
- sFrame.pRSN = (PWLAN_IE_RSN)(sFrame.pBuf + sFrame.len);
- memcpy(sFrame.pRSN, pMgmt->abyWPAIE, pMgmt->wWPAIELen);
- sFrame.len += pMgmt->wWPAIELen;
- }
- }
- }
-
- // Adjust the length fields
- pTxPacket->cbMPDULen = sFrame.len;
- pTxPacket->cbPayloadLen = sFrame.len - WLAN_HDR_ADDR3_LEN;
-
- return pTxPacket;
-}
-
-/*+
- *
- * Routine Description:
- * Constructs an association request frame
- *
- *
- * Return Value:
- * A ptr to frame or NULL on allocation failure
- *
- -*/
-
-static PSTxMgmtPacket
-s_MgrMakeAssocRequest(
- struct vnt_private *pDevice,
- PSMgmtObject pMgmt,
- unsigned char *pDAddr,
- unsigned short wCurrCapInfo,
- unsigned short wListenInterval,
- PWLAN_IE_SSID pCurrSSID,
- PWLAN_IE_SUPP_RATES pCurrRates,
- PWLAN_IE_SUPP_RATES pCurrExtSuppRates
-)
-{
- PSTxMgmtPacket pTxPacket = NULL;
- WLAN_FR_ASSOCREQ sFrame;
- unsigned char *pbyIEs;
- unsigned char *pbyRSN;
-
- pTxPacket = (PSTxMgmtPacket)pMgmt->pbyMgmtPacketPool;
- memset(pTxPacket, 0, sizeof(STxMgmtPacket) + WLAN_ASSOCREQ_FR_MAXLEN);
- pTxPacket->p80211Header = (PUWLAN_80211HDR)((unsigned char *)pTxPacket + sizeof(STxMgmtPacket));
- // Setup the sFrame structure.
- sFrame.pBuf = (unsigned char *)pTxPacket->p80211Header;
- sFrame.len = WLAN_ASSOCREQ_FR_MAXLEN;
- // format fixed field frame structure
- vMgrEncodeAssocRequest(&sFrame);
- // Setup the header
- sFrame.pHdr->sA3.wFrameCtl = cpu_to_le16(
- (
- WLAN_SET_FC_FTYPE(WLAN_TYPE_MGR) |
- WLAN_SET_FC_FSTYPE(WLAN_FSTYPE_ASSOCREQ)
-));
- memcpy(sFrame.pHdr->sA3.abyAddr1, pDAddr, WLAN_ADDR_LEN);
- memcpy(sFrame.pHdr->sA3.abyAddr2, pMgmt->abyMACAddr, WLAN_ADDR_LEN);
- memcpy(sFrame.pHdr->sA3.abyAddr3, pMgmt->abyCurrBSSID, WLAN_BSSID_LEN);
-
- // Set the capability and listen interval
- *(sFrame.pwCapInfo) = cpu_to_le16(wCurrCapInfo);
- *(sFrame.pwListenInterval) = cpu_to_le16(wListenInterval);
-
- // sFrame.len point to end of fixed field
- sFrame.pSSID = (PWLAN_IE_SSID)(sFrame.pBuf + sFrame.len);
- sFrame.len += pCurrSSID->len + WLAN_IEHDR_LEN;
- memcpy(sFrame.pSSID, pCurrSSID, pCurrSSID->len + WLAN_IEHDR_LEN);
-
- pMgmt->sAssocInfo.AssocInfo.RequestIELength = pCurrSSID->len + WLAN_IEHDR_LEN;
- pMgmt->sAssocInfo.AssocInfo.OffsetRequestIEs = sizeof(NDIS_802_11_ASSOCIATION_INFORMATION);
- pbyIEs = pMgmt->sAssocInfo.abyIEs;
- memcpy(pbyIEs, pCurrSSID, pCurrSSID->len + WLAN_IEHDR_LEN);
- pbyIEs += pCurrSSID->len + WLAN_IEHDR_LEN;
-
- // Copy the rate set
- sFrame.pSuppRates = (PWLAN_IE_SUPP_RATES)(sFrame.pBuf + sFrame.len);
- if ((pDevice->eCurrentPHYType == PHY_TYPE_11B) && (pCurrRates->len > 4))
- sFrame.len += 4 + WLAN_IEHDR_LEN;
- else
- sFrame.len += pCurrRates->len + WLAN_IEHDR_LEN;
- memcpy(sFrame.pSuppRates, pCurrRates, pCurrRates->len + WLAN_IEHDR_LEN);
-
- // Copy the extension rate set
- if ((pDevice->eCurrentPHYType == PHY_TYPE_11G) && (pCurrExtSuppRates->len > 0)) {
- sFrame.pExtSuppRates = (PWLAN_IE_SUPP_RATES)(sFrame.pBuf + sFrame.len);
- sFrame.len += pCurrExtSuppRates->len + WLAN_IEHDR_LEN;
- memcpy(sFrame.pExtSuppRates, pCurrExtSuppRates, pCurrExtSuppRates->len + WLAN_IEHDR_LEN);
- }
-
- pMgmt->sAssocInfo.AssocInfo.RequestIELength += pCurrRates->len + WLAN_IEHDR_LEN;
- memcpy(pbyIEs, pCurrRates, pCurrRates->len + WLAN_IEHDR_LEN);
- pbyIEs += pCurrRates->len + WLAN_IEHDR_LEN;
-
- // for 802.11h
- if (pMgmt->b11hEnable) {
- if (sFrame.pCurrPowerCap == NULL) {
- sFrame.pCurrPowerCap = (PWLAN_IE_PW_CAP)(sFrame.pBuf + sFrame.len);
- sFrame.len += (2 + WLAN_IEHDR_LEN);
- sFrame.pCurrPowerCap->byElementID = WLAN_EID_PWR_CAPABILITY;
- sFrame.pCurrPowerCap->len = 2;
- CARDvGetPowerCapability(pMgmt->pAdapter,
- &(sFrame.pCurrPowerCap->byMinPower),
- &(sFrame.pCurrPowerCap->byMaxPower)
-);
- }
- if (sFrame.pCurrSuppCh == NULL) {
- sFrame.pCurrSuppCh = (PWLAN_IE_SUPP_CH)(sFrame.pBuf + sFrame.len);
- sFrame.len += set_support_channels(pMgmt->pAdapter, (unsigned char *)sFrame.pCurrSuppCh);
- }
- }
-
- if (((pMgmt->eAuthenMode == WMAC_AUTH_WPA) ||
- (pMgmt->eAuthenMode == WMAC_AUTH_WPAPSK) ||
- (pMgmt->eAuthenMode == WMAC_AUTH_WPANONE)) &&
- (pMgmt->pCurrBSS != NULL)) {
- /* WPA IE */
- sFrame.pRSNWPA = (PWLAN_IE_RSN_EXT)(sFrame.pBuf + sFrame.len);
- sFrame.pRSNWPA->byElementID = WLAN_EID_RSN_WPA;
- sFrame.pRSNWPA->len = 16;
- sFrame.pRSNWPA->abyOUI[0] = 0x00;
- sFrame.pRSNWPA->abyOUI[1] = 0x50;
- sFrame.pRSNWPA->abyOUI[2] = 0xf2;
- sFrame.pRSNWPA->abyOUI[3] = 0x01;
- sFrame.pRSNWPA->wVersion = 1;
- //Group Key Cipher Suite
- sFrame.pRSNWPA->abyMulticast[0] = 0x00;
- sFrame.pRSNWPA->abyMulticast[1] = 0x50;
- sFrame.pRSNWPA->abyMulticast[2] = 0xf2;
- if (pMgmt->byCSSGK == KEY_CTL_WEP)
- sFrame.pRSNWPA->abyMulticast[3] = pMgmt->pCurrBSS->byGKType;
- else if (pMgmt->byCSSGK == KEY_CTL_TKIP)
- sFrame.pRSNWPA->abyMulticast[3] = WPA_TKIP;
- else if (pMgmt->byCSSGK == KEY_CTL_CCMP)
- sFrame.pRSNWPA->abyMulticast[3] = WPA_AESCCMP;
- else
- sFrame.pRSNWPA->abyMulticast[3] = WPA_NONE;
-
- // Pairwise Key Cipher Suite
- sFrame.pRSNWPA->wPKCount = 1;
- sFrame.pRSNWPA->PKSList[0].abyOUI[0] = 0x00;
- sFrame.pRSNWPA->PKSList[0].abyOUI[1] = 0x50;
- sFrame.pRSNWPA->PKSList[0].abyOUI[2] = 0xf2;
- if (pMgmt->byCSSPK == KEY_CTL_TKIP)
- sFrame.pRSNWPA->PKSList[0].abyOUI[3] = WPA_TKIP;
- else if (pMgmt->byCSSPK == KEY_CTL_CCMP)
- sFrame.pRSNWPA->PKSList[0].abyOUI[3] = WPA_AESCCMP;
- else
- sFrame.pRSNWPA->PKSList[0].abyOUI[3] = WPA_NONE;
-
- // Auth Key Management Suite
- pbyRSN = (unsigned char *)(sFrame.pBuf + sFrame.len + 2 + sFrame.pRSNWPA->len);
- *pbyRSN++ = 0x01;
- *pbyRSN++ = 0x00;
- *pbyRSN++ = 0x00;
-
- *pbyRSN++ = 0x50;
- *pbyRSN++ = 0xf2;
- if (pMgmt->eAuthenMode == WMAC_AUTH_WPAPSK)
- *pbyRSN++ = WPA_AUTH_PSK;
- else if (pMgmt->eAuthenMode == WMAC_AUTH_WPA)
- *pbyRSN++ = WPA_AUTH_IEEE802_1X;
- else
- *pbyRSN++ = WPA_NONE;
-
- sFrame.pRSNWPA->len += 6;
-
- // RSN Capabilities
-
- *pbyRSN++ = 0x00;
- *pbyRSN++ = 0x00;
- sFrame.pRSNWPA->len += 2;
-
- sFrame.len += sFrame.pRSNWPA->len + WLAN_IEHDR_LEN;
- // copy to AssocInfo. for OID_802_11_ASSOCIATION_INFORMATION
- pMgmt->sAssocInfo.AssocInfo.RequestIELength += sFrame.pRSNWPA->len + WLAN_IEHDR_LEN;
- memcpy(pbyIEs, sFrame.pRSNWPA, sFrame.pRSNWPA->len + WLAN_IEHDR_LEN);
- pbyIEs += sFrame.pRSNWPA->len + WLAN_IEHDR_LEN;
-
- } else if (((pMgmt->eAuthenMode == WMAC_AUTH_WPA2) ||
- (pMgmt->eAuthenMode == WMAC_AUTH_WPA2PSK)) &&
- (pMgmt->pCurrBSS != NULL)) {
- unsigned int ii;
- unsigned short *pwPMKID;
-
- // WPA IE
- sFrame.pRSN = (PWLAN_IE_RSN)(sFrame.pBuf + sFrame.len);
- sFrame.pRSN->byElementID = WLAN_EID_RSN;
- sFrame.pRSN->len = 6; //Version(2)+GK(4)
- sFrame.pRSN->wVersion = 1;
- //Group Key Cipher Suite
- sFrame.pRSN->abyRSN[0] = 0x00;
- sFrame.pRSN->abyRSN[1] = 0x0F;
- sFrame.pRSN->abyRSN[2] = 0xAC;
- if (pMgmt->byCSSGK == KEY_CTL_WEP)
- sFrame.pRSN->abyRSN[3] = pMgmt->pCurrBSS->byCSSGK;
- else if (pMgmt->byCSSGK == KEY_CTL_TKIP)
- sFrame.pRSN->abyRSN[3] = WLAN_11i_CSS_TKIP;
- else if (pMgmt->byCSSGK == KEY_CTL_CCMP)
- sFrame.pRSN->abyRSN[3] = WLAN_11i_CSS_CCMP;
- else
- sFrame.pRSN->abyRSN[3] = WLAN_11i_CSS_UNKNOWN;
-
- // Pairwise Key Cipher Suite
- sFrame.pRSN->abyRSN[4] = 1;
- sFrame.pRSN->abyRSN[5] = 0;
- sFrame.pRSN->abyRSN[6] = 0x00;
- sFrame.pRSN->abyRSN[7] = 0x0F;
- sFrame.pRSN->abyRSN[8] = 0xAC;
- if (pMgmt->byCSSPK == KEY_CTL_TKIP)
- sFrame.pRSN->abyRSN[9] = WLAN_11i_CSS_TKIP;
- else if (pMgmt->byCSSPK == KEY_CTL_CCMP)
- sFrame.pRSN->abyRSN[9] = WLAN_11i_CSS_CCMP;
- else if (pMgmt->byCSSPK == KEY_CTL_NONE)
- sFrame.pRSN->abyRSN[9] = WLAN_11i_CSS_USE_GROUP;
- else
- sFrame.pRSN->abyRSN[9] = WLAN_11i_CSS_UNKNOWN;
-
- sFrame.pRSN->len += 6;
-
- // Auth Key Management Suite
- sFrame.pRSN->abyRSN[10] = 1;
- sFrame.pRSN->abyRSN[11] = 0;
- sFrame.pRSN->abyRSN[12] = 0x00;
- sFrame.pRSN->abyRSN[13] = 0x0F;
- sFrame.pRSN->abyRSN[14] = 0xAC;
- if (pMgmt->eAuthenMode == WMAC_AUTH_WPA2PSK)
- sFrame.pRSN->abyRSN[15] = WLAN_11i_AKMSS_PSK;
- else if (pMgmt->eAuthenMode == WMAC_AUTH_WPA2)
- sFrame.pRSN->abyRSN[15] = WLAN_11i_AKMSS_802_1X;
- else
- sFrame.pRSN->abyRSN[15] = WLAN_11i_AKMSS_UNKNOWN;
-
- sFrame.pRSN->len += 6;
-
- // RSN Capabilities
- if (pMgmt->pCurrBSS->sRSNCapObj.bRSNCapExist) {
- memcpy(&sFrame.pRSN->abyRSN[16], &pMgmt->pCurrBSS->sRSNCapObj.wRSNCap, 2);
- } else {
- sFrame.pRSN->abyRSN[16] = 0;
- sFrame.pRSN->abyRSN[17] = 0;
- }
- sFrame.pRSN->len += 2;
-
- if ((pDevice->gsPMKID.BSSIDInfoCount > 0) && pDevice->bRoaming && (pMgmt->eAuthenMode == WMAC_AUTH_WPA2)) {
- // RSN PMKID
- pbyRSN = &sFrame.pRSN->abyRSN[18];
- pwPMKID = (unsigned short *)pbyRSN; // Point to PMKID count
- *pwPMKID = 0; // Initialize PMKID count
- pbyRSN += 2; // Point to PMKID list
- for (ii = 0; ii < pDevice->gsPMKID.BSSIDInfoCount; ii++) {
- if (!memcmp(&pDevice->gsPMKID.BSSIDInfo[ii].BSSID[0], pMgmt->abyCurrBSSID, ETH_ALEN)) {
- (*pwPMKID)++;
- memcpy(pbyRSN, pDevice->gsPMKID.BSSIDInfo[ii].PMKID, 16);
- pbyRSN += 16;
- }
- }
- if (*pwPMKID != 0)
- sFrame.pRSN->len += (2 + (*pwPMKID)*16);
- }
-
- sFrame.len += sFrame.pRSN->len + WLAN_IEHDR_LEN;
- // copy to AssocInfo. for OID_802_11_ASSOCIATION_INFORMATION
- pMgmt->sAssocInfo.AssocInfo.RequestIELength += sFrame.pRSN->len + WLAN_IEHDR_LEN;
- memcpy(pbyIEs, sFrame.pRSN, sFrame.pRSN->len + WLAN_IEHDR_LEN);
- pbyIEs += sFrame.pRSN->len + WLAN_IEHDR_LEN;
- }
-
- // Adjust the length fields
- pTxPacket->cbMPDULen = sFrame.len;
- pTxPacket->cbPayloadLen = sFrame.len - WLAN_HDR_ADDR3_LEN;
- return pTxPacket;
-}
-
-/*+
- *
- * Routine Description:
- * Constructs an re-association request frame
- *
- *
- * Return Value:
- * A ptr to frame or NULL on allocation failure
- *
- -*/
-
-static PSTxMgmtPacket
-s_MgrMakeReAssocRequest(
- struct vnt_private *pDevice,
- PSMgmtObject pMgmt,
- unsigned char *pDAddr,
- unsigned short wCurrCapInfo,
- unsigned short wListenInterval,
- PWLAN_IE_SSID pCurrSSID,
- PWLAN_IE_SUPP_RATES pCurrRates,
- PWLAN_IE_SUPP_RATES pCurrExtSuppRates
-)
-{
- PSTxMgmtPacket pTxPacket = NULL;
- WLAN_FR_REASSOCREQ sFrame;
- unsigned char *pbyIEs;
- unsigned char *pbyRSN;
-
- pTxPacket = (PSTxMgmtPacket)pMgmt->pbyMgmtPacketPool;
- memset(pTxPacket, 0, sizeof(STxMgmtPacket) + WLAN_REASSOCREQ_FR_MAXLEN);
- pTxPacket->p80211Header = (PUWLAN_80211HDR)((unsigned char *)pTxPacket + sizeof(STxMgmtPacket));
- /* Setup the sFrame structure. */
- sFrame.pBuf = (unsigned char *)pTxPacket->p80211Header;
- sFrame.len = WLAN_REASSOCREQ_FR_MAXLEN;
-
- // format fixed field frame structure
- vMgrEncodeReassocRequest(&sFrame);
-
- /* Setup the header */
- sFrame.pHdr->sA3.wFrameCtl = cpu_to_le16(
- (
- WLAN_SET_FC_FTYPE(WLAN_TYPE_MGR) |
- WLAN_SET_FC_FSTYPE(WLAN_FSTYPE_REASSOCREQ)
-));
- memcpy(sFrame.pHdr->sA3.abyAddr1, pDAddr, WLAN_ADDR_LEN);
- memcpy(sFrame.pHdr->sA3.abyAddr2, pMgmt->abyMACAddr, WLAN_ADDR_LEN);
- memcpy(sFrame.pHdr->sA3.abyAddr3, pMgmt->abyCurrBSSID, WLAN_BSSID_LEN);
-
- /* Set the capability and listen interval */
- *(sFrame.pwCapInfo) = cpu_to_le16(wCurrCapInfo);
- *(sFrame.pwListenInterval) = cpu_to_le16(wListenInterval);
-
- memcpy(sFrame.pAddrCurrAP, pMgmt->abyCurrBSSID, WLAN_BSSID_LEN);
- /* Copy the SSID */
- /* sFrame.len point to end of fixed field */
- sFrame.pSSID = (PWLAN_IE_SSID)(sFrame.pBuf + sFrame.len);
- sFrame.len += pCurrSSID->len + WLAN_IEHDR_LEN;
- memcpy(sFrame.pSSID, pCurrSSID, pCurrSSID->len + WLAN_IEHDR_LEN);
-
- pMgmt->sAssocInfo.AssocInfo.RequestIELength = pCurrSSID->len + WLAN_IEHDR_LEN;
- pMgmt->sAssocInfo.AssocInfo.OffsetRequestIEs = sizeof(NDIS_802_11_ASSOCIATION_INFORMATION);
- pbyIEs = pMgmt->sAssocInfo.abyIEs;
- memcpy(pbyIEs, pCurrSSID, pCurrSSID->len + WLAN_IEHDR_LEN);
- pbyIEs += pCurrSSID->len + WLAN_IEHDR_LEN;
-
- /* Copy the rate set */
- /* sFrame.len point to end of SSID */
- sFrame.pSuppRates = (PWLAN_IE_SUPP_RATES)(sFrame.pBuf + sFrame.len);
- sFrame.len += pCurrRates->len + WLAN_IEHDR_LEN;
- memcpy(sFrame.pSuppRates, pCurrRates, pCurrRates->len + WLAN_IEHDR_LEN);
-
- // Copy the extension rate set
- if ((pMgmt->eCurrentPHYMode == PHY_TYPE_11G) && (pCurrExtSuppRates->len > 0)) {
- sFrame.pExtSuppRates = (PWLAN_IE_SUPP_RATES)(sFrame.pBuf + sFrame.len);
- sFrame.len += pCurrExtSuppRates->len + WLAN_IEHDR_LEN;
- memcpy(sFrame.pExtSuppRates, pCurrExtSuppRates, pCurrExtSuppRates->len + WLAN_IEHDR_LEN);
- }
-
- pMgmt->sAssocInfo.AssocInfo.RequestIELength += pCurrRates->len + WLAN_IEHDR_LEN;
- memcpy(pbyIEs, pCurrRates, pCurrRates->len + WLAN_IEHDR_LEN);
- pbyIEs += pCurrRates->len + WLAN_IEHDR_LEN;
-
- if (((pMgmt->eAuthenMode == WMAC_AUTH_WPA) ||
- (pMgmt->eAuthenMode == WMAC_AUTH_WPAPSK) ||
- (pMgmt->eAuthenMode == WMAC_AUTH_WPANONE)) &&
- (pMgmt->pCurrBSS != NULL)) {
- /* WPA IE */
- sFrame.pRSNWPA = (PWLAN_IE_RSN_EXT)(sFrame.pBuf + sFrame.len);
- sFrame.pRSNWPA->byElementID = WLAN_EID_RSN_WPA;
- sFrame.pRSNWPA->len = 16;
- sFrame.pRSNWPA->abyOUI[0] = 0x00;
- sFrame.pRSNWPA->abyOUI[1] = 0x50;
- sFrame.pRSNWPA->abyOUI[2] = 0xf2;
- sFrame.pRSNWPA->abyOUI[3] = 0x01;
- sFrame.pRSNWPA->wVersion = 1;
- //Group Key Cipher Suite
- sFrame.pRSNWPA->abyMulticast[0] = 0x00;
- sFrame.pRSNWPA->abyMulticast[1] = 0x50;
- sFrame.pRSNWPA->abyMulticast[2] = 0xf2;
- if (pMgmt->byCSSGK == KEY_CTL_WEP)
- sFrame.pRSNWPA->abyMulticast[3] = pMgmt->pCurrBSS->byGKType;
- else if (pMgmt->byCSSGK == KEY_CTL_TKIP)
- sFrame.pRSNWPA->abyMulticast[3] = WPA_TKIP;
- else if (pMgmt->byCSSGK == KEY_CTL_CCMP)
- sFrame.pRSNWPA->abyMulticast[3] = WPA_AESCCMP;
- else
- sFrame.pRSNWPA->abyMulticast[3] = WPA_NONE;
-
- // Pairwise Key Cipher Suite
- sFrame.pRSNWPA->wPKCount = 1;
- sFrame.pRSNWPA->PKSList[0].abyOUI[0] = 0x00;
- sFrame.pRSNWPA->PKSList[0].abyOUI[1] = 0x50;
- sFrame.pRSNWPA->PKSList[0].abyOUI[2] = 0xf2;
- if (pMgmt->byCSSPK == KEY_CTL_TKIP)
- sFrame.pRSNWPA->PKSList[0].abyOUI[3] = WPA_TKIP;
- else if (pMgmt->byCSSPK == KEY_CTL_CCMP)
- sFrame.pRSNWPA->PKSList[0].abyOUI[3] = WPA_AESCCMP;
- else
- sFrame.pRSNWPA->PKSList[0].abyOUI[3] = WPA_NONE;
-
- // Auth Key Management Suite
- pbyRSN = (unsigned char *)(sFrame.pBuf + sFrame.len + 2 + sFrame.pRSNWPA->len);
- *pbyRSN++ = 0x01;
- *pbyRSN++ = 0x00;
- *pbyRSN++ = 0x00;
-
- *pbyRSN++ = 0x50;
- *pbyRSN++ = 0xf2;
- if (pMgmt->eAuthenMode == WMAC_AUTH_WPAPSK)
- *pbyRSN++ = WPA_AUTH_PSK;
- else if (pMgmt->eAuthenMode == WMAC_AUTH_WPA)
- *pbyRSN++ = WPA_AUTH_IEEE802_1X;
- else
- *pbyRSN++ = WPA_NONE;
-
- sFrame.pRSNWPA->len += 6;
-
- // RSN Capabilities
- *pbyRSN++ = 0x00;
- *pbyRSN++ = 0x00;
- sFrame.pRSNWPA->len += 2;
-
- sFrame.len += sFrame.pRSNWPA->len + WLAN_IEHDR_LEN;
- // copy to AssocInfo. for OID_802_11_ASSOCIATION_INFORMATION
- pMgmt->sAssocInfo.AssocInfo.RequestIELength += sFrame.pRSNWPA->len + WLAN_IEHDR_LEN;
- memcpy(pbyIEs, sFrame.pRSNWPA, sFrame.pRSNWPA->len + WLAN_IEHDR_LEN);
- pbyIEs += sFrame.pRSNWPA->len + WLAN_IEHDR_LEN;
-
- } else if (((pMgmt->eAuthenMode == WMAC_AUTH_WPA2) ||
- (pMgmt->eAuthenMode == WMAC_AUTH_WPA2PSK)) &&
- (pMgmt->pCurrBSS != NULL)) {
- unsigned int ii;
- unsigned short *pwPMKID;
-
- /* WPA IE */
- sFrame.pRSN = (PWLAN_IE_RSN)(sFrame.pBuf + sFrame.len);
- sFrame.pRSN->byElementID = WLAN_EID_RSN;
- sFrame.pRSN->len = 6; //Version(2)+GK(4)
- sFrame.pRSN->wVersion = 1;
- //Group Key Cipher Suite
- sFrame.pRSN->abyRSN[0] = 0x00;
- sFrame.pRSN->abyRSN[1] = 0x0F;
- sFrame.pRSN->abyRSN[2] = 0xAC;
- if (pMgmt->byCSSGK == KEY_CTL_WEP)
- sFrame.pRSN->abyRSN[3] = pMgmt->pCurrBSS->byCSSGK;
- else if (pMgmt->byCSSGK == KEY_CTL_TKIP)
- sFrame.pRSN->abyRSN[3] = WLAN_11i_CSS_TKIP;
- else if (pMgmt->byCSSGK == KEY_CTL_CCMP)
- sFrame.pRSN->abyRSN[3] = WLAN_11i_CSS_CCMP;
- else
- sFrame.pRSN->abyRSN[3] = WLAN_11i_CSS_UNKNOWN;
-
- // Pairwise Key Cipher Suite
- sFrame.pRSN->abyRSN[4] = 1;
- sFrame.pRSN->abyRSN[5] = 0;
- sFrame.pRSN->abyRSN[6] = 0x00;
- sFrame.pRSN->abyRSN[7] = 0x0F;
- sFrame.pRSN->abyRSN[8] = 0xAC;
- if (pMgmt->byCSSPK == KEY_CTL_TKIP)
- sFrame.pRSN->abyRSN[9] = WLAN_11i_CSS_TKIP;
- else if (pMgmt->byCSSPK == KEY_CTL_CCMP)
- sFrame.pRSN->abyRSN[9] = WLAN_11i_CSS_CCMP;
- else if (pMgmt->byCSSPK == KEY_CTL_NONE)
- sFrame.pRSN->abyRSN[9] = WLAN_11i_CSS_USE_GROUP;
- else
- sFrame.pRSN->abyRSN[9] = WLAN_11i_CSS_UNKNOWN;
-
- sFrame.pRSN->len += 6;
-
- // Auth Key Management Suite
- sFrame.pRSN->abyRSN[10] = 1;
- sFrame.pRSN->abyRSN[11] = 0;
- sFrame.pRSN->abyRSN[12] = 0x00;
- sFrame.pRSN->abyRSN[13] = 0x0F;
- sFrame.pRSN->abyRSN[14] = 0xAC;
- if (pMgmt->eAuthenMode == WMAC_AUTH_WPA2PSK)
- sFrame.pRSN->abyRSN[15] = WLAN_11i_AKMSS_PSK;
- else if (pMgmt->eAuthenMode == WMAC_AUTH_WPA2)
- sFrame.pRSN->abyRSN[15] = WLAN_11i_AKMSS_802_1X;
- else
- sFrame.pRSN->abyRSN[15] = WLAN_11i_AKMSS_UNKNOWN;
-
- sFrame.pRSN->len += 6;
-
- // RSN Capabilities
- if (pMgmt->pCurrBSS->sRSNCapObj.bRSNCapExist) {
- memcpy(&sFrame.pRSN->abyRSN[16], &pMgmt->pCurrBSS->sRSNCapObj.wRSNCap, 2);
- } else {
- sFrame.pRSN->abyRSN[16] = 0;
- sFrame.pRSN->abyRSN[17] = 0;
- }
- sFrame.pRSN->len += 2;
-
- if ((pDevice->gsPMKID.BSSIDInfoCount > 0) && pDevice->bRoaming && (pMgmt->eAuthenMode == WMAC_AUTH_WPA2)) {
- // RSN PMKID
- pbyRSN = &sFrame.pRSN->abyRSN[18];
- pwPMKID = (unsigned short *)pbyRSN; // Point to PMKID count
- *pwPMKID = 0; // Initialize PMKID count
- pbyRSN += 2; // Point to PMKID list
- for (ii = 0; ii < pDevice->gsPMKID.BSSIDInfoCount; ii++) {
- if (!memcmp(&pDevice->gsPMKID.BSSIDInfo[ii].BSSID[0], pMgmt->abyCurrBSSID, ETH_ALEN)) {
- (*pwPMKID)++;
- memcpy(pbyRSN, pDevice->gsPMKID.BSSIDInfo[ii].PMKID, 16);
- pbyRSN += 16;
- }
- }
-
- if (*pwPMKID != 0)
- sFrame.pRSN->len += (2 + (*pwPMKID) * 16);
- }
-
- sFrame.len += sFrame.pRSN->len + WLAN_IEHDR_LEN;
- // copy to AssocInfo. for OID_802_11_ASSOCIATION_INFORMATION
- pMgmt->sAssocInfo.AssocInfo.RequestIELength += sFrame.pRSN->len + WLAN_IEHDR_LEN;
- memcpy(pbyIEs, sFrame.pRSN, sFrame.pRSN->len + WLAN_IEHDR_LEN);
- pbyIEs += sFrame.pRSN->len + WLAN_IEHDR_LEN;
- }
-
- /* Adjust the length fields */
- pTxPacket->cbMPDULen = sFrame.len;
- pTxPacket->cbPayloadLen = sFrame.len - WLAN_HDR_ADDR3_LEN;
-
- return pTxPacket;
-}
-
-/*+
- *
- * Routine Description:
- * Constructs an assoc-response frame
- *
- *
- * Return Value:
- * PTR to frame; or NULL on allocation failure
- *
- -*/
-
-static PSTxMgmtPacket
-s_MgrMakeAssocResponse(
- struct vnt_private *pDevice,
- PSMgmtObject pMgmt,
- unsigned short wCurrCapInfo,
- unsigned short wAssocStatus,
- unsigned short wAssocAID,
- unsigned char *pDstAddr,
- PWLAN_IE_SUPP_RATES pCurrSuppRates,
- PWLAN_IE_SUPP_RATES pCurrExtSuppRates
-)
-{
- PSTxMgmtPacket pTxPacket = NULL;
- WLAN_FR_ASSOCRESP sFrame;
-
- pTxPacket = (PSTxMgmtPacket)pMgmt->pbyMgmtPacketPool;
- memset(pTxPacket, 0, sizeof(STxMgmtPacket) + WLAN_ASSOCREQ_FR_MAXLEN);
- pTxPacket->p80211Header = (PUWLAN_80211HDR)((unsigned char *)pTxPacket + sizeof(STxMgmtPacket));
- // Setup the sFrame structure
- sFrame.pBuf = (unsigned char *)pTxPacket->p80211Header;
- sFrame.len = WLAN_REASSOCRESP_FR_MAXLEN;
- vMgrEncodeAssocResponse(&sFrame);
- // Setup the header
- sFrame.pHdr->sA3.wFrameCtl = cpu_to_le16(
- (
- WLAN_SET_FC_FTYPE(WLAN_TYPE_MGR) |
- WLAN_SET_FC_FSTYPE(WLAN_FSTYPE_ASSOCRESP)
-));
- memcpy(sFrame.pHdr->sA3.abyAddr1, pDstAddr, WLAN_ADDR_LEN);
- memcpy(sFrame.pHdr->sA3.abyAddr2, pMgmt->abyMACAddr, WLAN_ADDR_LEN);
- memcpy(sFrame.pHdr->sA3.abyAddr3, pMgmt->abyCurrBSSID, WLAN_BSSID_LEN);
-
- *sFrame.pwCapInfo = cpu_to_le16(wCurrCapInfo);
- *sFrame.pwStatus = cpu_to_le16(wAssocStatus);
- *sFrame.pwAid = cpu_to_le16((unsigned short)(wAssocAID | BIT14 | BIT15));
-
- // Copy the rate set
- sFrame.pSuppRates = (PWLAN_IE_SUPP_RATES)(sFrame.pBuf + sFrame.len);
- sFrame.len += ((PWLAN_IE_SUPP_RATES)pCurrSuppRates)->len + WLAN_IEHDR_LEN;
- memcpy(sFrame.pSuppRates,
- pCurrSuppRates,
- ((PWLAN_IE_SUPP_RATES)pCurrSuppRates)->len + WLAN_IEHDR_LEN
-);
-
- if (((PWLAN_IE_SUPP_RATES)pCurrExtSuppRates)->len != 0) {
- sFrame.pExtSuppRates = (PWLAN_IE_SUPP_RATES)(sFrame.pBuf + sFrame.len);
- sFrame.len += ((PWLAN_IE_SUPP_RATES)pCurrExtSuppRates)->len + WLAN_IEHDR_LEN;
- memcpy(sFrame.pExtSuppRates,
- pCurrExtSuppRates,
- ((PWLAN_IE_SUPP_RATES)pCurrExtSuppRates)->len + WLAN_IEHDR_LEN
-);
- }
-
- // Adjust the length fields
- pTxPacket->cbMPDULen = sFrame.len;
- pTxPacket->cbPayloadLen = sFrame.len - WLAN_HDR_ADDR3_LEN;
-
- return pTxPacket;
-}
-
-/*+
- *
- * Routine Description:
- * Constructs an reassoc-response frame
- *
- *
- * Return Value:
- * PTR to frame; or NULL on allocation failure
- *
- -*/
-
-static PSTxMgmtPacket
-s_MgrMakeReAssocResponse(
- struct vnt_private *pDevice,
- PSMgmtObject pMgmt,
- unsigned short wCurrCapInfo,
- unsigned short wAssocStatus,
- unsigned short wAssocAID,
- unsigned char *pDstAddr,
- PWLAN_IE_SUPP_RATES pCurrSuppRates,
- PWLAN_IE_SUPP_RATES pCurrExtSuppRates
-)
-{
- PSTxMgmtPacket pTxPacket = NULL;
- WLAN_FR_REASSOCRESP sFrame;
-
- pTxPacket = (PSTxMgmtPacket)pMgmt->pbyMgmtPacketPool;
- memset(pTxPacket, 0, sizeof(STxMgmtPacket) + WLAN_ASSOCREQ_FR_MAXLEN);
- pTxPacket->p80211Header = (PUWLAN_80211HDR)((unsigned char *)pTxPacket + sizeof(STxMgmtPacket));
- // Setup the sFrame structure
- sFrame.pBuf = (unsigned char *)pTxPacket->p80211Header;
- sFrame.len = WLAN_REASSOCRESP_FR_MAXLEN;
- vMgrEncodeReassocResponse(&sFrame);
- // Setup the header
- sFrame.pHdr->sA3.wFrameCtl = cpu_to_le16(
- (
- WLAN_SET_FC_FTYPE(WLAN_TYPE_MGR) |
- WLAN_SET_FC_FSTYPE(WLAN_FSTYPE_REASSOCRESP)
-));
- memcpy(sFrame.pHdr->sA3.abyAddr1, pDstAddr, WLAN_ADDR_LEN);
- memcpy(sFrame.pHdr->sA3.abyAddr2, pMgmt->abyMACAddr, WLAN_ADDR_LEN);
- memcpy(sFrame.pHdr->sA3.abyAddr3, pMgmt->abyCurrBSSID, WLAN_BSSID_LEN);
-
- *sFrame.pwCapInfo = cpu_to_le16(wCurrCapInfo);
- *sFrame.pwStatus = cpu_to_le16(wAssocStatus);
- *sFrame.pwAid = cpu_to_le16((unsigned short)(wAssocAID | BIT14 | BIT15));
-
- // Copy the rate set
- sFrame.pSuppRates = (PWLAN_IE_SUPP_RATES)(sFrame.pBuf + sFrame.len);
- sFrame.len += ((PWLAN_IE_SUPP_RATES)pCurrSuppRates)->len + WLAN_IEHDR_LEN;
- memcpy(sFrame.pSuppRates,
- pCurrSuppRates,
- ((PWLAN_IE_SUPP_RATES)pCurrSuppRates)->len + WLAN_IEHDR_LEN
-);
-
- if (((PWLAN_IE_SUPP_RATES)pCurrExtSuppRates)->len != 0) {
- sFrame.pExtSuppRates = (PWLAN_IE_SUPP_RATES)(sFrame.pBuf + sFrame.len);
- sFrame.len += ((PWLAN_IE_SUPP_RATES)pCurrExtSuppRates)->len + WLAN_IEHDR_LEN;
- memcpy(sFrame.pExtSuppRates,
- pCurrExtSuppRates,
- ((PWLAN_IE_SUPP_RATES)pCurrExtSuppRates)->len + WLAN_IEHDR_LEN
-);
- }
-
- // Adjust the length fields
- pTxPacket->cbMPDULen = sFrame.len;
- pTxPacket->cbPayloadLen = sFrame.len - WLAN_HDR_ADDR3_LEN;
-
- return pTxPacket;
-}
-
-/*+
- *
- * Routine Description:
- * Handles probe response management frames.
- *
- *
- * Return Value:
- * none.
- *
- -*/
-
-static
-void
-s_vMgrRxProbeResponse(
- struct vnt_private *pDevice,
- PSMgmtObject pMgmt,
- PSRxMgmtPacket pRxPacket
-)
-{
- PKnownBSS pBSSList = NULL;
- WLAN_FR_PROBERESP sFrame;
- unsigned char byCurrChannel = pRxPacket->byRxChannel;
- ERPObject sERP;
- unsigned char byIEChannel = 0;
- bool bChannelHit = true;
-
- memset(&sFrame, 0, sizeof(WLAN_FR_PROBERESP));
- // decode the frame
- sFrame.len = pRxPacket->cbMPDULen;
- sFrame.pBuf = (unsigned char *)pRxPacket->p80211Header;
- vMgrDecodeProbeResponse(&sFrame);
-
- if ((sFrame.pqwTimestamp == NULL) ||
- (sFrame.pwBeaconInterval == NULL) ||
- (sFrame.pwCapInfo == NULL) ||
- (sFrame.pSSID == NULL) ||
- (sFrame.pSuppRates == NULL)) {
- pr_debug("Probe resp:Fail addr:[%p]\n",
- pRxPacket->p80211Header);
- DBG_PORT80(0xCC);
- return;
- }
-
- if (sFrame.pSSID->len == 0)
- pr_debug("Rx Probe resp: SSID len = 0\n");
-
- if (sFrame.pDSParms != NULL) {
- if (byCurrChannel > CB_MAX_CHANNEL_24G) {
- // channel remapping to
- byIEChannel = get_channel_mapping(pMgmt->pAdapter, sFrame.pDSParms->byCurrChannel, PHY_TYPE_11A);
- } else {
- byIEChannel = sFrame.pDSParms->byCurrChannel;
- }
- if (byCurrChannel != byIEChannel) {
- // adjust channel info. bcs we rcv adjacent channel packets
- bChannelHit = false;
- byCurrChannel = byIEChannel;
- }
- } else {
- // no DS channel info
- bChannelHit = true;
- }
-
-//2008-0730-01<Add>by MikeLiu
- if (ChannelExceedZoneType(pDevice, byCurrChannel))
- return;
-
- if (sFrame.pERP != NULL) {
- sERP.byERP = sFrame.pERP->byContext;
- sERP.bERPExist = true;
- } else {
- sERP.bERPExist = false;
- sERP.byERP = 0;
- }
-
- // update or insert the bss
- pBSSList = BSSpAddrIsInBSSList((void *)pDevice, sFrame.pHdr->sA3.abyAddr3, sFrame.pSSID);
- if (pBSSList) {
- BSSbUpdateToBSSList((void *)pDevice,
- *sFrame.pqwTimestamp,
- *sFrame.pwBeaconInterval,
- *sFrame.pwCapInfo,
- byCurrChannel,
- bChannelHit,
- sFrame.pSSID,
- sFrame.pSuppRates,
- sFrame.pExtSuppRates,
- &sERP,
- sFrame.pRSN,
- sFrame.pRSNWPA,
- sFrame.pIE_Country,
- sFrame.pIE_Quiet,
- pBSSList,
- sFrame.len - WLAN_HDR_ADDR3_LEN,
- sFrame.pHdr->sA4.abyAddr4, // payload of probresponse
- (void *)pRxPacket
-);
- } else {
- pr_debug("Probe resp/insert: RxChannel = : %d\n",
- byCurrChannel);
- BSSbInsertToBSSList((void *)pDevice,
- sFrame.pHdr->sA3.abyAddr3,
- *sFrame.pqwTimestamp,
- *sFrame.pwBeaconInterval,
- *sFrame.pwCapInfo,
- byCurrChannel,
- sFrame.pSSID,
- sFrame.pSuppRates,
- sFrame.pExtSuppRates,
- &sERP,
- sFrame.pRSN,
- sFrame.pRSNWPA,
- sFrame.pIE_Country,
- sFrame.pIE_Quiet,
- sFrame.len - WLAN_HDR_ADDR3_LEN,
- sFrame.pHdr->sA4.abyAddr4, // payload of beacon
- (void *)pRxPacket
-);
- }
-}
-
-/*+
- *
- * Routine Description:(AP)or(Ad-hoc STA)
- * Handles probe request management frames.
- *
- *
- * Return Value:
- * none.
- *
- -*/
-
-static
-void
-s_vMgrRxProbeRequest(
- struct vnt_private *pDevice,
- PSMgmtObject pMgmt,
- PSRxMgmtPacket pRxPacket
-)
-{
- WLAN_FR_PROBEREQ sFrame;
- CMD_STATUS Status;
- PSTxMgmtPacket pTxPacket;
- unsigned char byPHYType = BB_TYPE_11B;
-
- // STA in Ad-hoc mode: when latest TBTT beacon transmit success,
- // STA have to response this request.
- if ((pMgmt->eCurrMode == WMAC_MODE_ESS_AP) ||
- ((pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) && pDevice->bBeaconSent)) {
- memset(&sFrame, 0, sizeof(WLAN_FR_PROBEREQ));
- // decode the frame
- sFrame.len = pRxPacket->cbMPDULen;
- sFrame.pBuf = (unsigned char *)pRxPacket->p80211Header;
- vMgrDecodeProbeRequest(&sFrame);
-
- if (sFrame.pSSID->len != 0) {
- if (sFrame.pSSID->len != ((PWLAN_IE_SSID)pMgmt->abyCurrSSID)->len)
- return;
- if (memcmp(sFrame.pSSID->abySSID,
- ((PWLAN_IE_SSID)pMgmt->abyCurrSSID)->abySSID,
- ((PWLAN_IE_SSID)pMgmt->abyCurrSSID)->len) != 0) {
- return;
- }
- }
-
- if ((sFrame.pSuppRates->len > 4) || (sFrame.pExtSuppRates != NULL))
- byPHYType = BB_TYPE_11G;
-
- // Probe response reply..
- pTxPacket = s_MgrMakeProbeResponse
- (
- pDevice,
- pMgmt,
- pMgmt->wCurrCapInfo,
- pMgmt->wCurrBeaconPeriod,
- pMgmt->uCurrChannel,
- 0,
- sFrame.pHdr->sA3.abyAddr2,
- (PWLAN_IE_SSID)pMgmt->abyCurrSSID,
- (unsigned char *)pMgmt->abyCurrBSSID,
- (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
- (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates,
- byPHYType
-);
- if (pTxPacket != NULL) {
- /* send the frame */
- Status = csMgmt_xmit(pDevice, pTxPacket);
- if (Status != CMD_STATUS_PENDING)
- pr_debug("Mgt:Probe response tx failed\n");
- }
- }
-}
-
-/*+
- *
- * Routine Description:
- *
- * Entry point for the reception and handling of 802.11 management
- * frames. Makes a determination of the frame type and then calls
- * the appropriate function.
- *
- *
- * Return Value:
- * none.
- *
- -*/
-
-void
-vMgrRxManagePacket(
- void *hDeviceContext,
- PSMgmtObject pMgmt,
- PSRxMgmtPacket pRxPacket
-)
-{
- struct vnt_private *pDevice = hDeviceContext;
- bool bInScan = false;
- unsigned int uNodeIndex = 0;
- NODE_STATE eNodeState = 0;
- CMD_STATUS Status;
-
- if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) {
- if (BSSDBbIsSTAInNodeDB(pMgmt, pRxPacket->p80211Header->sA3.abyAddr2, &uNodeIndex))
- eNodeState = pMgmt->sNodeDBTable[uNodeIndex].eNodeState;
- }
-
- switch (WLAN_GET_FC_FSTYPE((pRxPacket->p80211Header->sA3.wFrameCtl))) {
- case WLAN_FSTYPE_ASSOCREQ:
- // Frame Clase = 2
- pr_debug("rx assocreq\n");
- if (eNodeState < NODE_AUTH) {
- // send deauth notification
- // reason = (6) class 2 received from nonauth sta
- vMgrDeAuthenBeginSta(pDevice,
- pMgmt,
- pRxPacket->p80211Header->sA3.abyAddr2,
- (6),
- &Status
-);
- pr_debug("wmgr: send vMgrDeAuthenBeginSta 1\n");
- } else {
- s_vMgrRxAssocRequest(pDevice, pMgmt, pRxPacket, uNodeIndex);
- }
- break;
-
- case WLAN_FSTYPE_ASSOCRESP:
- // Frame Clase = 2
- pr_debug("rx assocresp1\n");
- s_vMgrRxAssocResponse(pDevice, pMgmt, pRxPacket, false);
- pr_debug("rx assocresp2\n");
- break;
-
- case WLAN_FSTYPE_REASSOCREQ:
- // Frame Clase = 2
- pr_debug("rx reassocreq\n");
- // Todo: reassoc
- if (eNodeState < NODE_AUTH) {
- // send deauth notification
- // reason = (6) class 2 received from nonauth sta
- vMgrDeAuthenBeginSta(pDevice,
- pMgmt,
- pRxPacket->p80211Header->sA3.abyAddr2,
- (6),
- &Status
-);
- pr_debug("wmgr: send vMgrDeAuthenBeginSta 2\n");
-
- }
- s_vMgrRxReAssocRequest(pDevice, pMgmt, pRxPacket, uNodeIndex);
- break;
-
- case WLAN_FSTYPE_REASSOCRESP:
- // Frame Clase = 2
- pr_debug("rx reassocresp\n");
- s_vMgrRxAssocResponse(pDevice, pMgmt, pRxPacket, true);
- break;
-
- case WLAN_FSTYPE_PROBEREQ:
- // Frame Clase = 0
- s_vMgrRxProbeRequest(pDevice, pMgmt, pRxPacket);
- break;
-
- case WLAN_FSTYPE_PROBERESP:
- // Frame Clase = 0
- pr_debug("rx proberesp\n");
-
- s_vMgrRxProbeResponse(pDevice, pMgmt, pRxPacket);
- break;
-
- case WLAN_FSTYPE_BEACON:
- // Frame Clase = 0
- if (pMgmt->eScanState != WMAC_NO_SCANNING)
- bInScan = true;
-
- s_vMgrRxBeacon(pDevice, pMgmt, pRxPacket, bInScan);
- break;
-
- case WLAN_FSTYPE_ATIM:
- // Frame Clase = 1
- pr_debug("rx atim\n");
- break;
-
- case WLAN_FSTYPE_DISASSOC:
- // Frame Clase = 2
- pr_debug("rx disassoc\n");
- if (eNodeState < NODE_AUTH) {
- // send deauth notification
- // reason = (6) class 2 received from nonauth sta
- vMgrDeAuthenBeginSta(pDevice,
- pMgmt,
- pRxPacket->p80211Header->sA3.abyAddr2,
- (6),
- &Status
-);
- pr_debug("wmgr: send vMgrDeAuthenBeginSta 3\n");
- }
- s_vMgrRxDisassociation(pDevice, pMgmt, pRxPacket);
- break;
-
- case WLAN_FSTYPE_AUTHEN:
- // Frame Clase = 1
- pr_debug("rx authen\n");
- s_vMgrRxAuthentication(pDevice, pMgmt, pRxPacket);
- break;
-
- case WLAN_FSTYPE_DEAUTHEN:
- // Frame Clase = 1
- pr_debug("rx deauthen\n");
- s_vMgrRxDeauthentication(pDevice, pMgmt, pRxPacket);
- break;
-
- default:
- pr_debug("rx unknown mgmt\n");
- }
-}
-
-/*+
- *
- * Routine Description:
- *
- *
- * Prepare beacon to send
- *
- * Return Value:
- * true if success; false if failed.
- *
- -*/
-bool
-bMgrPrepareBeaconToSend(
- void *hDeviceContext,
- PSMgmtObject pMgmt
-)
-{
- struct vnt_private *pDevice = hDeviceContext;
- PSTxMgmtPacket pTxPacket;
-
- if (pDevice->bEncryptionEnable || pDevice->bEnable8021x)
- pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_PRIVACY(1);
- else
- pMgmt->wCurrCapInfo &= ~WLAN_SET_CAP_INFO_PRIVACY(1);
-
- pTxPacket = s_MgrMakeBeacon
- (
- pDevice,
- pMgmt,
- pMgmt->wCurrCapInfo,
- pMgmt->wCurrBeaconPeriod,
- pMgmt->uCurrChannel,
- pMgmt->wCurrATIMWindow,
- (PWLAN_IE_SSID)pMgmt->abyCurrSSID,
- (unsigned char *)pMgmt->abyCurrBSSID,
- (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
- (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates
-);
-
- if ((pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) &&
- (pMgmt->abyCurrBSSID[0] == 0))
- return false;
-
- csBeacon_xmit(pDevice, pTxPacket);
-
- return true;
-}
-
-/*+
- *
- * Routine Description:
- *
- * Log a warning message based on the contents of the Status
- * Code field of an 802.11 management frame. Defines are
- * derived from 802.11-1997 SPEC.
- *
- * Return Value:
- * none.
- *
- -*/
-static
-void
-s_vMgrLogStatus(
- PSMgmtObject pMgmt,
- unsigned short wStatus
-)
-{
- switch (wStatus) {
- case WLAN_MGMT_STATUS_UNSPEC_FAILURE:
- pr_info("Status code == Unspecified error\n");
- break;
- case WLAN_MGMT_STATUS_CAPS_UNSUPPORTED:
- pr_info("Status code == Can't support all requested capabilities\n");
- break;
- case WLAN_MGMT_STATUS_REASSOC_NO_ASSOC:
- pr_info("Status code == Reassoc denied, can't confirm original Association\n");
- break;
- case WLAN_MGMT_STATUS_ASSOC_DENIED_UNSPEC:
- pr_info("Status code == Assoc denied, undefine in spec\n");
- break;
- case WLAN_MGMT_STATUS_UNSUPPORTED_AUTHALG:
- pr_info("Status code == Peer doesn't support authen algorithm\n");
- break;
- case WLAN_MGMT_STATUS_RX_AUTH_NOSEQ:
- pr_info("Status code == Authen frame received out of sequence\n");
- break;
- case WLAN_MGMT_STATUS_CHALLENGE_FAIL:
- pr_info("Status code == Authen rejected, challenge failure\n");
- break;
- case WLAN_MGMT_STATUS_AUTH_TIMEOUT:
- pr_info("Status code == Authen rejected, timeout waiting for next frame\n");
- break;
- case WLAN_MGMT_STATUS_ASSOC_DENIED_BUSY:
- pr_info("Status code == Assoc denied, AP too busy\n");
- break;
- case WLAN_MGMT_STATUS_ASSOC_DENIED_RATES:
- pr_info("Status code == Assoc denied, we haven't enough basic rates\n");
- break;
- case WLAN_MGMT_STATUS_ASSOC_DENIED_SHORTPREAMBLE:
- pr_info("Status code == Assoc denied, we do not support short preamble\n");
- break;
- case WLAN_MGMT_STATUS_ASSOC_DENIED_PBCC:
- pr_info("Status code == Assoc denied, we do not support PBCC\n");
- break;
- case WLAN_MGMT_STATUS_ASSOC_DENIED_AGILITY:
- pr_info("Status code == Assoc denied, we do not support channel agility\n");
- break;
- default:
- pr_info("Unknown status code %d\n", wStatus);
- break;
- }
-}
-
-/*
- *
- * Description:
- * Add BSSID in PMKID Candidate list.
- *
- * Parameters:
- * In:
- * hDeviceContext - device structure point
- * pbyBSSID - BSSID address for adding
- * wRSNCap - BSS's RSN capability
- * Out:
- * none
- *
- * Return Value: none.
- *
- -*/
-bool
-bAdd_PMKID_Candidate(
- void *hDeviceContext,
- unsigned char *pbyBSSID,
- PSRSNCapObject psRSNCapObj
-)
-{
- struct vnt_private *pDevice = hDeviceContext;
- struct pmkid_candidate *pCandidateList;
- unsigned int ii = 0;
-
- pr_debug("bAdd_PMKID_Candidate START: (%d)\n",
- (int)pDevice->gsPMKIDCandidate.NumCandidates);
-
- if ((pDevice == NULL) || (pbyBSSID == NULL) || (psRSNCapObj == NULL))
- return false;
-
- if (pDevice->gsPMKIDCandidate.NumCandidates >= MAX_PMKIDLIST)
- return false;
-
- // Update Old Candidate
- for (ii = 0; ii < pDevice->gsPMKIDCandidate.NumCandidates; ii++) {
- pCandidateList = &pDevice->gsPMKIDCandidate.CandidateList[ii];
- if (!memcmp(pCandidateList->BSSID, pbyBSSID, ETH_ALEN)) {
- if (psRSNCapObj->bRSNCapExist && (psRSNCapObj->wRSNCap & BIT0))
- pCandidateList->Flags |= NDIS_802_11_PMKID_CANDIDATE_PREAUTH_ENABLED;
- else
- pCandidateList->Flags &= ~(NDIS_802_11_PMKID_CANDIDATE_PREAUTH_ENABLED);
-
- return true;
- }
- }
-
- // New Candidate
- pCandidateList = &pDevice->gsPMKIDCandidate.CandidateList[pDevice->gsPMKIDCandidate.NumCandidates];
- if (psRSNCapObj->bRSNCapExist && (psRSNCapObj->wRSNCap & BIT0))
- pCandidateList->Flags |= NDIS_802_11_PMKID_CANDIDATE_PREAUTH_ENABLED;
- else
- pCandidateList->Flags &= ~(NDIS_802_11_PMKID_CANDIDATE_PREAUTH_ENABLED);
-
- memcpy(pCandidateList->BSSID, pbyBSSID, ETH_ALEN);
- pDevice->gsPMKIDCandidate.NumCandidates++;
- pr_debug("NumCandidates:%d\n",
- (int)pDevice->gsPMKIDCandidate.NumCandidates);
- return true;
-}
-
-/*
- *
- * Description:
- * Flush PMKID Candidate list.
- *
- * Parameters:
- * In:
- * hDeviceContext - device structure point
- * Out:
- * none
- *
- * Return Value: none.
- *
- -*/
-void
-vFlush_PMKID_Candidate(
- void *hDeviceContext
-)
-{
- struct vnt_private *pDevice = hDeviceContext;
-
- if (pDevice == NULL)
- return;
-
- memset(&pDevice->gsPMKIDCandidate, 0, sizeof(SPMKIDCandidateEvent));
-}
-
-static bool
-s_bCipherMatch(
- PKnownBSS pBSSNode,
- NDIS_802_11_ENCRYPTION_STATUS EncStatus,
- unsigned char *pbyCCSPK,
- unsigned char *pbyCCSGK
-)
-{
- unsigned char byMulticastCipher = KEY_CTL_INVALID;
- unsigned char byCipherMask = 0x00;
- int i;
-
- if (pBSSNode == NULL)
- return false;
-
- // check cap. of BSS
- if ((WLAN_GET_CAP_INFO_PRIVACY(pBSSNode->wCapInfo) != 0) &&
- (EncStatus == Ndis802_11Encryption1Enabled)) {
- // default is WEP only
- byMulticastCipher = KEY_CTL_WEP;
- }
-
- if ((WLAN_GET_CAP_INFO_PRIVACY(pBSSNode->wCapInfo) != 0) &&
- pBSSNode->bWPA2Valid &&
- //20080123-01,<Add> by Einsn Liu
- ((EncStatus == Ndis802_11Encryption3Enabled) || (EncStatus == Ndis802_11Encryption2Enabled))) {
- //WPA2
- // check Group Key Cipher
- if ((pBSSNode->byCSSGK == WLAN_11i_CSS_WEP40) ||
- (pBSSNode->byCSSGK == WLAN_11i_CSS_WEP104)) {
- byMulticastCipher = KEY_CTL_WEP;
- } else if (pBSSNode->byCSSGK == WLAN_11i_CSS_TKIP) {
- byMulticastCipher = KEY_CTL_TKIP;
- } else if (pBSSNode->byCSSGK == WLAN_11i_CSS_CCMP) {
- byMulticastCipher = KEY_CTL_CCMP;
- } else {
- byMulticastCipher = KEY_CTL_INVALID;
- }
-
- // check Pairwise Key Cipher
- for (i = 0; i < pBSSNode->wCSSPKCount; i++) {
- if ((pBSSNode->abyCSSPK[i] == WLAN_11i_CSS_WEP40) ||
- (pBSSNode->abyCSSPK[i] == WLAN_11i_CSS_WEP104)) {
- // this should not happen as defined 802.11i
- byCipherMask |= 0x01;
- } else if (pBSSNode->abyCSSPK[i] == WLAN_11i_CSS_TKIP) {
- byCipherMask |= 0x02;
- } else if (pBSSNode->abyCSSPK[i] == WLAN_11i_CSS_CCMP) {
- byCipherMask |= 0x04;
- } else if (pBSSNode->abyCSSPK[i] == WLAN_11i_CSS_USE_GROUP) {
- // use group key only ignore all others
- byCipherMask = 0;
- i = pBSSNode->wCSSPKCount;
- }
- }
-
- } else if ((WLAN_GET_CAP_INFO_PRIVACY(pBSSNode->wCapInfo) != 0) &&
- pBSSNode->bWPAValid &&
- ((EncStatus == Ndis802_11Encryption3Enabled) || (EncStatus == Ndis802_11Encryption2Enabled))) {
- //WPA
- // check Group Key Cipher
- if ((pBSSNode->byGKType == WPA_WEP40) ||
- (pBSSNode->byGKType == WPA_WEP104)) {
- byMulticastCipher = KEY_CTL_WEP;
- } else if (pBSSNode->byGKType == WPA_TKIP) {
- byMulticastCipher = KEY_CTL_TKIP;
- } else if (pBSSNode->byGKType == WPA_AESCCMP) {
- byMulticastCipher = KEY_CTL_CCMP;
- } else {
- byMulticastCipher = KEY_CTL_INVALID;
- }
-
- // check Pairwise Key Cipher
- for (i = 0; i < pBSSNode->wPKCount; i++) {
- if (pBSSNode->abyPKType[i] == WPA_TKIP) {
- byCipherMask |= 0x02;
- } else if (pBSSNode->abyPKType[i] == WPA_AESCCMP) {
- byCipherMask |= 0x04;
- } else if (pBSSNode->abyPKType[i] == WPA_NONE) {
- // use group key only ignore all others
- byCipherMask = 0;
- i = pBSSNode->wPKCount;
- }
- }
- }
-
- pr_debug("%d, %d, %d, %d, EncStatus:%d\n",
- byMulticastCipher, byCipherMask,
- pBSSNode->bWPAValid, pBSSNode->bWPA2Valid, EncStatus);
-
- // mask our cap. with BSS
- if (EncStatus == Ndis802_11Encryption1Enabled) {
- // For supporting Cisco migration mode, don't care pairwise key cipher
- if ((byMulticastCipher == KEY_CTL_WEP) &&
- (byCipherMask == 0)) {
- *pbyCCSGK = KEY_CTL_WEP;
- *pbyCCSPK = KEY_CTL_NONE;
- return true;
- } else {
- return false;
- }
-
- } else if (EncStatus == Ndis802_11Encryption2Enabled) {
- if ((byMulticastCipher == KEY_CTL_TKIP) &&
- (byCipherMask == 0)) {
- *pbyCCSGK = KEY_CTL_TKIP;
- *pbyCCSPK = KEY_CTL_NONE;
- return true;
- } else if ((byMulticastCipher == KEY_CTL_WEP) &&
- ((byCipherMask & 0x02) != 0)) {
- *pbyCCSGK = KEY_CTL_WEP;
- *pbyCCSPK = KEY_CTL_TKIP;
- return true;
- } else if ((byMulticastCipher == KEY_CTL_TKIP) &&
- ((byCipherMask & 0x02) != 0)) {
- *pbyCCSGK = KEY_CTL_TKIP;
- *pbyCCSPK = KEY_CTL_TKIP;
- return true;
- } else {
- return false;
- }
- } else if (EncStatus == Ndis802_11Encryption3Enabled) {
- if ((byMulticastCipher == KEY_CTL_CCMP) &&
- (byCipherMask == 0)) {
- // When CCMP is enable, "Use group cipher suite" shall not be a valid option.
- return false;
- } else if ((byMulticastCipher == KEY_CTL_WEP) &&
- ((byCipherMask & 0x04) != 0)) {
- *pbyCCSGK = KEY_CTL_WEP;
- *pbyCCSPK = KEY_CTL_CCMP;
- return true;
- } else if ((byMulticastCipher == KEY_CTL_TKIP) &&
- ((byCipherMask & 0x04) != 0)) {
- *pbyCCSGK = KEY_CTL_TKIP;
- *pbyCCSPK = KEY_CTL_CCMP;
- return true;
- } else if ((byMulticastCipher == KEY_CTL_CCMP) &&
- ((byCipherMask & 0x04) != 0)) {
- *pbyCCSGK = KEY_CTL_CCMP;
- *pbyCCSPK = KEY_CTL_CCMP;
- return true;
- } else {
- return false;
- }
- }
- return true;
-}
diff --git a/drivers/staging/vt6655/wmgr.h b/drivers/staging/vt6655/wmgr.h
deleted file mode 100644
index ce939b30ac2..00000000000
--- a/drivers/staging/vt6655/wmgr.h
+++ /dev/null
@@ -1,420 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- *
- * File: wmgr.h
- *
- * Purpose:
- *
- * Author: lyndon chen
- *
- * Date: Jan 2, 2003
- *
- * Functions:
- *
- * Revision History:
- *
- */
-
-#ifndef __WMGR_H__
-#define __WMGR_H__
-
-#include "ttype.h"
-#include "80211mgr.h"
-#include "80211hdr.h"
-#include "wcmd.h"
-#include "bssdb.h"
-#include "wpa2.h"
-#include "vntwifi.h"
-#include "card.h"
-
-/*--------------------- Export Definitions -------------------------*/
-
-// Scan time
-#define PROBE_DELAY 100 // (us)
-#define SWITCH_CHANNEL_DELAY 200 // (us)
-#define WLAN_SCAN_MINITIME 25 // (ms)
-#define WLAN_SCAN_MAXTIME 100 // (ms)
-#define TRIVIAL_SYNC_DIFFERENCE 0 // (us)
-#define DEFAULT_IBSS_BI 100 // (ms)
-
-#define WCMD_ACTIVE_SCAN_TIME 50 //(ms)
-#define WCMD_PASSIVE_SCAN_TIME 100 //(ms)
-
-#define DEFAULT_MSDU_LIFETIME 512 // ms
-#define DEFAULT_MSDU_LIFETIME_RES_64us 8000 // 64us
-
-#define DEFAULT_MGN_LIFETIME 8 // ms
-#define DEFAULT_MGN_LIFETIME_RES_64us 125 // 64us
-
-#define MAKE_BEACON_RESERVED 10 //(us)
-
-#define TIM_MULTICAST_MASK 0x01
-#define TIM_BITMAPOFFSET_MASK 0xFE
-#define DEFAULT_DTIM_PERIOD 1
-
-#define AP_LONG_RETRY_LIMIT 4
-
-#define DEFAULT_IBSS_CHANNEL 6 //2.4G
-
-/*--------------------- Export Classes ----------------------------*/
-
-/*--------------------- Export Variables --------------------------*/
-
-/*--------------------- Export Types ------------------------------*/
-#define timer_expire(timer, next_tick) mod_timer(&timer, RUN_AT(next_tick))
-typedef void (*TimerFunction)(unsigned long);
-
-//+++ NDIS related
-
-typedef unsigned char NDIS_802_11_MAC_ADDRESS[6];
-typedef struct _NDIS_802_11_AI_REQFI {
- unsigned short Capabilities;
- unsigned short ListenInterval;
- NDIS_802_11_MAC_ADDRESS CurrentAPAddress;
-} NDIS_802_11_AI_REQFI, *PNDIS_802_11_AI_REQFI;
-
-typedef struct _NDIS_802_11_AI_RESFI {
- unsigned short Capabilities;
- unsigned short StatusCode;
- unsigned short AssociationId;
-} NDIS_802_11_AI_RESFI, *PNDIS_802_11_AI_RESFI;
-
-typedef struct _NDIS_802_11_ASSOCIATION_INFORMATION {
- unsigned long Length;
- unsigned short AvailableRequestFixedIEs;
- NDIS_802_11_AI_REQFI RequestFixedIEs;
- unsigned long RequestIELength;
- unsigned long OffsetRequestIEs;
- unsigned short AvailableResponseFixedIEs;
- NDIS_802_11_AI_RESFI ResponseFixedIEs;
- unsigned long ResponseIELength;
- unsigned long OffsetResponseIEs;
-} NDIS_802_11_ASSOCIATION_INFORMATION, *PNDIS_802_11_ASSOCIATION_INFORMATION;
-
-typedef struct tagSAssocInfo {
- NDIS_802_11_ASSOCIATION_INFORMATION AssocInfo;
- unsigned char abyIEs[WLAN_BEACON_FR_MAXLEN+WLAN_BEACON_FR_MAXLEN];
- // store ReqIEs set by OID_802_11_ASSOCIATION_INFORMATION
- unsigned long RequestIELength;
- unsigned char abyReqIEs[WLAN_BEACON_FR_MAXLEN];
-} SAssocInfo, *PSAssocInfo;
-//---
-
-typedef enum tagWMAC_SCAN_TYPE {
- WMAC_SCAN_ACTIVE,
- WMAC_SCAN_PASSIVE,
- WMAC_SCAN_HYBRID
-} WMAC_SCAN_TYPE, *PWMAC_SCAN_TYPE;
-
-typedef enum tagWMAC_SCAN_STATE {
- WMAC_NO_SCANNING,
- WMAC_IS_SCANNING,
- WMAC_IS_PROBEPENDING
-} WMAC_SCAN_STATE, *PWMAC_SCAN_STATE;
-
-// Notes:
-// Basic Service Set state explained as following:
-// WMAC_STATE_IDLE : no BSS is selected (Adhoc or Infra)
-// WMAC_STATE_STARTED : no BSS is selected, start own IBSS (Adhoc only)
-// WMAC_STATE_JOINTED : BSS is selected and synchronized (Adhoc or Infra)
-// WMAC_STATE_AUTHPENDING : Authentication pending (Infra)
-// WMAC_STATE_AUTH : Authenticated (Infra)
-// WMAC_STATE_ASSOCPENDING : Association pending (Infra)
-// WMAC_STATE_ASSOC : Associated (Infra)
-
-typedef enum tagWMAC_BSS_STATE {
- WMAC_STATE_IDLE,
- WMAC_STATE_STARTED,
- WMAC_STATE_JOINTED,
- WMAC_STATE_AUTHPENDING,
- WMAC_STATE_AUTH,
- WMAC_STATE_ASSOCPENDING,
- WMAC_STATE_ASSOC
-} WMAC_BSS_STATE, *PWMAC_BSS_STATE;
-
-// WMAC selected running mode
-typedef enum tagWMAC_CURRENT_MODE {
- WMAC_MODE_STANDBY,
- WMAC_MODE_ESS_STA,
- WMAC_MODE_IBSS_STA,
- WMAC_MODE_ESS_AP
-} WMAC_CURRENT_MODE, *PWMAC_CURRENT_MODE;
-
-/*
- typedef enum tagWMAC_POWER_MODE {
- WMAC_POWER_CAM,
- WMAC_POWER_FAST,
- WMAC_POWER_MAX
-
- } WMAC_POWER_MODE, *PWMAC_POWER_MODE;
-*/
-
-// Tx Management Packet descriptor
-typedef struct tagSTxMgmtPacket {
- PUWLAN_80211HDR p80211Header;
- unsigned int cbMPDULen;
- unsigned int cbPayloadLen;
-} STxMgmtPacket, *PSTxMgmtPacket;
-
-// Rx Management Packet descriptor
-typedef struct tagSRxMgmtPacket {
- PUWLAN_80211HDR p80211Header;
- u64 qwLocalTSF;
- unsigned int cbMPDULen;
- unsigned int cbPayloadLen;
- unsigned int uRSSI;
- unsigned char bySQ;
- unsigned char byRxRate;
- unsigned char byRxChannel;
-} SRxMgmtPacket, *PSRxMgmtPacket;
-
-typedef struct tagSMgmtObject {
- void *pAdapter;
- // MAC address
- unsigned char abyMACAddr[WLAN_ADDR_LEN];
-
- // Configuration Mode
- WMAC_CONFIG_MODE eConfigMode; // MAC pre-configed mode
- CARD_PHY_TYPE eCurrentPHYMode;
- CARD_PHY_TYPE eConfigPHYMode;
-
- // Operation state variables
- WMAC_CURRENT_MODE eCurrMode; // MAC current connection mode
- WMAC_BSS_STATE eCurrState; // MAC current BSS state
-
- PKnownBSS pCurrBSS;
- unsigned char byCSSGK;
- unsigned char byCSSPK;
-
- // Current state vars
- unsigned int uCurrChannel;
- unsigned char abyCurrSuppRates[WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1];
- unsigned char abyCurrExtSuppRates[WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1];
- unsigned char abyCurrSSID[WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1];
- unsigned char abyCurrBSSID[WLAN_BSSID_LEN];
- unsigned short wCurrCapInfo;
- unsigned short wCurrAID;
- unsigned short wCurrATIMWindow;
- unsigned short wCurrBeaconPeriod;
- bool bIsDS;
- unsigned char byERPContext;
-
- CMD_STATE eCommandState;
- unsigned int uScanChannel;
-
- // Desire joining BSS vars
- unsigned char abyDesireSSID[WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1];
- unsigned char abyDesireBSSID[WLAN_BSSID_LEN];
-
- // Adhoc or AP configuration vars
- unsigned short wIBSSBeaconPeriod;
- unsigned short wIBSSATIMWindow;
- unsigned int uIBSSChannel;
- unsigned char abyIBSSSuppRates[WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1];
- unsigned char byAPBBType;
- unsigned char abyWPAIE[MAX_WPA_IE_LEN];
- unsigned short wWPAIELen;
-
- unsigned int uAssocCount;
- bool bMoreData;
-
- // Scan state vars
- WMAC_SCAN_STATE eScanState;
- WMAC_SCAN_TYPE eScanType;
- unsigned int uScanStartCh;
- unsigned int uScanEndCh;
- unsigned short wScanSteps;
- unsigned int uScanBSSType;
- // Desire scanning vars
- unsigned char abyScanSSID[WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1];
- unsigned char abyScanBSSID[WLAN_BSSID_LEN];
-
- // Privacy
- WMAC_AUTHENTICATION_MODE eAuthenMode;
- WMAC_ENCRYPTION_MODE eEncryptionMode;
- bool bShareKeyAlgorithm;
- unsigned char abyChallenge[WLAN_CHALLENGE_LEN];
- bool bPrivacyInvoked;
-
- // Received beacon state vars
- bool bInTIM;
- bool bMulticastTIM;
- unsigned char byDTIMCount;
- unsigned char byDTIMPeriod;
-
- // Power saving state vars
- WMAC_POWER_MODE ePSMode;
- unsigned short wListenInterval;
- unsigned short wCountToWakeUp;
- bool bInTIMWake;
- unsigned char *pbyPSPacketPool;
- unsigned char byPSPacketPool[sizeof(STxMgmtPacket) + WLAN_NULLDATA_FR_MAXLEN];
- bool bRxBeaconInTBTTWake;
- unsigned char abyPSTxMap[MAX_NODE_NUM + 1];
-
- // management command related
- unsigned int uCmdBusy;
- unsigned int uCmdHostAPBusy;
-
- // management packet pool
- unsigned char *pbyMgmtPacketPool;
- unsigned char byMgmtPacketPool[sizeof(STxMgmtPacket) + WLAN_A3FR_MAXLEN];
-
- // One second callback timer
- struct timer_list sTimerSecondCallback;
-
- // Temporarily Rx Mgmt Packet Descriptor
- SRxMgmtPacket sRxPacket;
-
- // link list of known bss's (scan results)
- KnownBSS sBSSList[MAX_BSS_NUM];
-
- // table list of known node
- // sNodeDBList[0] is reserved for AP under Infra mode
- // sNodeDBList[0] is reserved for Multicast under adhoc/AP mode
- KnownNodeDB sNodeDBTable[MAX_NODE_NUM + 1];
-
- // WPA2 PMKID Cache
- SPMKIDCache gsPMKIDCache;
- bool bRoaming;
-
- // rate fall back vars
-
- // associate info
- SAssocInfo sAssocInfo;
-
- // for 802.11h
- bool b11hEnable;
- bool bSwitchChannel;
- unsigned char byNewChannel;
- PWLAN_IE_MEASURE_REP pCurrMeasureEIDRep;
- unsigned int uLengthOfRepEIDs;
- unsigned char abyCurrentMSRReq[sizeof(STxMgmtPacket) + WLAN_A3FR_MAXLEN];
- unsigned char abyCurrentMSRRep[sizeof(STxMgmtPacket) + WLAN_A3FR_MAXLEN];
- unsigned char abyIECountry[WLAN_A3FR_MAXLEN];
- unsigned char abyIBSSDFSOwner[6];
- unsigned char byIBSSDFSRecovery;
-
- struct sk_buff skb;
-} SMgmtObject, *PSMgmtObject;
-
-/*--------------------- Export Macros ------------------------------*/
-
-/*--------------------- Export Functions --------------------------*/
-
-void
-vMgrObjectInit(
- void *hDeviceContext
-);
-
-void
-vMgrTimerInit(
- void *hDeviceContext
-);
-
-void
-vMgrObjectReset(
- void *hDeviceContext
-);
-
-void
-vMgrAssocBeginSta(
- void *hDeviceContext,
- PSMgmtObject pMgmt,
- PCMD_STATUS pStatus
-);
-
-void
-vMgrReAssocBeginSta(
- void *hDeviceContext,
- PSMgmtObject pMgmt,
- PCMD_STATUS pStatus
-);
-
-void
-vMgrDisassocBeginSta(
- void *hDeviceContext,
- PSMgmtObject pMgmt,
- unsigned char *abyDestAddress,
- unsigned short wReason,
- PCMD_STATUS pStatus
-);
-
-void
-vMgrAuthenBeginSta(
- void *hDeviceContext,
- PSMgmtObject pMgmt,
- PCMD_STATUS pStatus
-);
-
-void
-vMgrCreateOwnIBSS(
- void *hDeviceContext,
- PCMD_STATUS pStatus
-);
-
-void
-vMgrJoinBSSBegin(
- void *hDeviceContext,
- PCMD_STATUS pStatus
-);
-
-void
-vMgrRxManagePacket(
- void *hDeviceContext,
- PSMgmtObject pMgmt,
- PSRxMgmtPacket pRxPacket
-);
-
-/*
- void
- vMgrScanBegin(
- void *hDeviceContext,
- PCMD_STATUS pStatus
-);
-*/
-
-void
-vMgrDeAuthenBeginSta(
- void *hDeviceContext,
- PSMgmtObject pMgmt,
- unsigned char *abyDestAddress,
- unsigned short wReason,
- PCMD_STATUS pStatus
-);
-
-bool
-bMgrPrepareBeaconToSend(
- void *hDeviceContext,
- PSMgmtObject pMgmt
-);
-
-bool
-bAdd_PMKID_Candidate(
- void *hDeviceContext,
- unsigned char *pbyBSSID,
- PSRSNCapObject psRSNCapObj
-);
-
-void
-vFlush_PMKID_Candidate(
- void *hDeviceContext
-);
-
-#endif // __WMGR_H__
diff --git a/drivers/staging/vt6655/wpa.c b/drivers/staging/vt6655/wpa.c
deleted file mode 100644
index 5d4eca8512a..00000000000
--- a/drivers/staging/vt6655/wpa.c
+++ /dev/null
@@ -1,300 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- *
- * File: wpa.c
- *
- * Purpose: Handles the Basic Service Set & Node Database functions
- *
- * Functions:
- * WPA_ParseRSN - Parse RSN IE.
- *
- * Revision History:
- *
- * Author: Kyle Hsu
- *
- * Date: July 14, 2003
- *
- */
-
-#include "ttype.h"
-#include "tmacro.h"
-#include "tether.h"
-#include "device.h"
-#include "80211hdr.h"
-#include "bssdb.h"
-#include "wmgr.h"
-#include "wpa.h"
-#include "80211mgr.h"
-
-/*--------------------- Static Variables --------------------------*/
-static const unsigned char abyOUI00[4] = { 0x00, 0x50, 0xf2, 0x00 };
-static const unsigned char abyOUI01[4] = { 0x00, 0x50, 0xf2, 0x01 };
-static const unsigned char abyOUI02[4] = { 0x00, 0x50, 0xf2, 0x02 };
-static const unsigned char abyOUI03[4] = { 0x00, 0x50, 0xf2, 0x03 };
-static const unsigned char abyOUI04[4] = { 0x00, 0x50, 0xf2, 0x04 };
-static const unsigned char abyOUI05[4] = { 0x00, 0x50, 0xf2, 0x05 };
-
-/*+
- *
- * Description:
- * Clear RSN information in BSSList.
- *
- * Parameters:
- * In:
- * pBSSList - BSS list.
- * Out:
- * none
- *
- * Return Value: none.
- *
- -*/
-
-void
-WPA_ClearRSN(
- PKnownBSS pBSSList
-)
-{
- int ii;
-
- pBSSList->byGKType = WPA_TKIP;
- for (ii = 0; ii < 4; ii++)
- pBSSList->abyPKType[ii] = WPA_TKIP;
- pBSSList->wPKCount = 0;
- for (ii = 0; ii < 4; ii++)
- pBSSList->abyAuthType[ii] = WPA_AUTH_IEEE802_1X;
- pBSSList->wAuthCount = 0;
- pBSSList->byDefaultK_as_PK = 0;
- pBSSList->byReplayIdx = 0;
- pBSSList->sRSNCapObj.bRSNCapExist = false;
- pBSSList->sRSNCapObj.wRSNCap = 0;
- pBSSList->bWPAValid = false;
-}
-
-/*+
- *
- * Description:
- * Parse RSN IE.
- *
- * Parameters:
- * In:
- * pBSSList - BSS list.
- * pRSN - Pointer to the RSN IE.
- * Out:
- * none
- *
- * Return Value: none.
- *
- -*/
-void
-WPA_ParseRSN(
- PKnownBSS pBSSList,
- PWLAN_IE_RSN_EXT pRSN
-)
-{
- PWLAN_IE_RSN_AUTH pIE_RSN_Auth = NULL;
- int i, j, m, n = 0;
- unsigned char *pbyCaps;
-
- WPA_ClearRSN(pBSSList);
-
- pr_debug("WPA_ParseRSN: [%d]\n", pRSN->len);
-
- // information element header makes sense
- if ((pRSN->len >= 6) // oui1(4)+ver(2)
- && (pRSN->byElementID == WLAN_EID_RSN_WPA) && !memcmp(pRSN->abyOUI, abyOUI01, 4)
- && (pRSN->wVersion == 1)) {
- pr_debug("Legal RSN\n");
- // update each variable if pRSN is long enough to contain the variable
- if (pRSN->len >= 10) {
- //OUI1(4)+ver(2)+GKSuite(4)
- if (!memcmp(pRSN->abyMulticast, abyOUI01, 4))
- pBSSList->byGKType = WPA_WEP40;
- else if (!memcmp(pRSN->abyMulticast, abyOUI02, 4))
- pBSSList->byGKType = WPA_TKIP;
- else if (!memcmp(pRSN->abyMulticast, abyOUI03, 4))
- pBSSList->byGKType = WPA_AESWRAP;
- else if (!memcmp(pRSN->abyMulticast, abyOUI04, 4))
- pBSSList->byGKType = WPA_AESCCMP;
- else if (!memcmp(pRSN->abyMulticast, abyOUI05, 4))
- pBSSList->byGKType = WPA_WEP104;
- else
- // any vendor checks here
- pBSSList->byGKType = WPA_NONE;
-
- pr_debug("byGKType: %x\n", pBSSList->byGKType);
- }
-
- if (pRSN->len >= 12) {
- //oui1(4)+ver(2)+GKS(4)+PKSCnt(2)
- j = 0;
- pr_debug("wPKCount: %d, sizeof(pBSSList->abyPKType): %zu\n",
- pRSN->wPKCount, sizeof(pBSSList->abyPKType));
- for (i = 0; (i < pRSN->wPKCount) && (j < ARRAY_SIZE(pBSSList->abyPKType)); i++) {
- if (pRSN->len >= 12+i*4+4) { //oui1(4)+ver(2)+GKS(4)+PKSCnt(2)+PKS(4*i)
- if (!memcmp(pRSN->PKSList[i].abyOUI, abyOUI00, 4))
- pBSSList->abyPKType[j++] = WPA_NONE;
- else if (!memcmp(pRSN->PKSList[i].abyOUI, abyOUI02, 4))
- pBSSList->abyPKType[j++] = WPA_TKIP;
- else if (!memcmp(pRSN->PKSList[i].abyOUI, abyOUI03, 4))
- pBSSList->abyPKType[j++] = WPA_AESWRAP;
- else if (!memcmp(pRSN->PKSList[i].abyOUI, abyOUI04, 4))
- pBSSList->abyPKType[j++] = WPA_AESCCMP;
- else
- // any vendor checks here
- ;
- } else
- break;
- }
- pBSSList->wPKCount = (unsigned short)j;
- pr_debug("wPKCount: %d\n", pBSSList->wPKCount);
- }
-
- m = pRSN->wPKCount;
- pr_debug("m: %d\n", m);
- pr_debug("14+m*4: %d\n", 14+m*4);
-
- if (pRSN->len >= 14+m*4) { //oui1(4)+ver(2)+GKS(4)+PKSCnt(2)+PKS(4*m)+AKC(2)
- // overlay IE_RSN_Auth structure into correct place
- pIE_RSN_Auth = (PWLAN_IE_RSN_AUTH) pRSN->PKSList[m].abyOUI;
- j = 0;
- pr_debug("wAuthCount: %d, sizeof(pBSSList->abyAuthType): %zu\n",
- pIE_RSN_Auth->wAuthCount,
- sizeof(pBSSList->abyAuthType));
- for (i = 0; (i < pIE_RSN_Auth->wAuthCount) && (j < ARRAY_SIZE(pBSSList->abyAuthType)); i++) {
- if (pRSN->len >= 14+4+(m+i)*4) { //oui1(4)+ver(2)+GKS(4)+PKSCnt(2)+PKS(4*m)+AKC(2)+AKS(4*i)
- if (!memcmp(pIE_RSN_Auth->AuthKSList[i].abyOUI, abyOUI01, 4))
- pBSSList->abyAuthType[j++] = WPA_AUTH_IEEE802_1X;
- else if (!memcmp(pIE_RSN_Auth->AuthKSList[i].abyOUI, abyOUI02, 4))
- pBSSList->abyAuthType[j++] = WPA_AUTH_PSK;
- else
- // any vendor checks here
- ;
- } else
- break;
-
- }
- if (j > 0)
- pBSSList->wAuthCount = (unsigned short)j;
- pr_debug("wAuthCount: %d\n", pBSSList->wAuthCount);
- }
-
- if (pIE_RSN_Auth != NULL) {
- n = pIE_RSN_Auth->wAuthCount;
-
- pr_debug("n: %d\n", n);
- pr_debug("14+4+(m+n)*4: %d\n", 14+4+(m+n)*4);
-
- if (pRSN->len+2 >= 14+4+(m+n)*4) { //oui1(4)+ver(2)+GKS(4)+PKSCnt(2)+PKS(4*m)+AKC(2)+AKS(4*n)+Cap(2)
- pbyCaps = (unsigned char *)pIE_RSN_Auth->AuthKSList[n].abyOUI;
- pBSSList->byDefaultK_as_PK = (*pbyCaps) & WPA_GROUPFLAG;
- pBSSList->byReplayIdx = 2 << ((*pbyCaps >> WPA_REPLAYBITSSHIFT) & WPA_REPLAYBITS);
- pBSSList->sRSNCapObj.bRSNCapExist = true;
- pBSSList->sRSNCapObj.wRSNCap = *(unsigned short *)pbyCaps;
- }
- }
- pBSSList->bWPAValid = true;
- }
-}
-
-/*+
- *
- * Description:
- * Search RSN information in BSSList.
- *
- * Parameters:
- * In:
- * byCmd - Search type
- * byEncrypt- Encrypt Type
- * pBSSList - BSS list
- * Out:
- * none
- *
- * Return Value: none.
- *
- -*/
-bool
-WPA_SearchRSN(
- unsigned char byCmd,
- unsigned char byEncrypt,
- PKnownBSS pBSSList
-)
-{
- int ii;
- unsigned char byPKType = WPA_NONE;
-
- if (!pBSSList->bWPAValid)
- return false;
-
- switch (byCmd) {
- case 0:
-
- if (byEncrypt != pBSSList->byGKType)
- return false;
-
- if (pBSSList->wPKCount > 0) {
- for (ii = 0; ii < pBSSList->wPKCount; ii++) {
- if (pBSSList->abyPKType[ii] == WPA_AESCCMP)
- byPKType = WPA_AESCCMP;
- else if ((pBSSList->abyPKType[ii] == WPA_TKIP) && (byPKType != WPA_AESCCMP))
- byPKType = WPA_TKIP;
- else if ((pBSSList->abyPKType[ii] == WPA_WEP40) && (byPKType != WPA_AESCCMP) && (byPKType != WPA_TKIP))
- byPKType = WPA_WEP40;
- else if ((pBSSList->abyPKType[ii] == WPA_WEP104) && (byPKType != WPA_AESCCMP) && (byPKType != WPA_TKIP))
- byPKType = WPA_WEP104;
- }
- if (byEncrypt != byPKType)
- return false;
- }
- return true;
-
- default:
- break;
- }
- return false;
-}
-
-/*+
- *
- * Description:
- * Check if RSN IE makes sense.
- *
- * Parameters:
- * In:
- * pRSN - Pointer to the RSN IE.
- * Out:
- * none
- *
- * Return Value: none.
- *
- -*/
-bool
-WPAb_Is_RSN(
- PWLAN_IE_RSN_EXT pRSN
-)
-{
- if (pRSN == NULL)
- return false;
-
- if ((pRSN->len >= 6) && // oui1(4)+ver(2)
- (pRSN->byElementID == WLAN_EID_RSN_WPA) && !memcmp(pRSN->abyOUI, abyOUI01, 4) &&
- (pRSN->wVersion == 1)) {
- return true;
- } else
- return false;
-}
diff --git a/drivers/staging/vt6655/wpa.h b/drivers/staging/vt6655/wpa.h
deleted file mode 100644
index 1d1918a1264..00000000000
--- a/drivers/staging/vt6655/wpa.h
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- *
- * File: wpa.h
- *
- * Purpose: Defines the macros, types, and functions for dealing
- * with WPA informations.
- *
- * Author: Kyle Hsu
- *
- * Date: Jul 14, 2003
- *
- */
-
-#ifndef __WPA_H__
-#define __WPA_H__
-
-#include "ttype.h"
-#include "80211hdr.h"
-
-/*--------------------- Export Definitions -------------------------*/
-
-#define WPA_NONE 0
-#define WPA_WEP40 1
-#define WPA_TKIP 2
-#define WPA_AESWRAP 3
-#define WPA_AESCCMP 4
-#define WPA_WEP104 5
-#define WPA_AUTH_IEEE802_1X 1
-#define WPA_AUTH_PSK 2
-
-#define WPA_GROUPFLAG 0x02
-#define WPA_REPLAYBITSSHIFT 2
-#define WPA_REPLAYBITS 0x03
-
-/*--------------------- Export Classes ----------------------------*/
-
-/*--------------------- Export Variables --------------------------*/
-
-/*--------------------- Export Types ------------------------------*/
-
-/*--------------------- Export Functions --------------------------*/
-
-void
-WPA_ClearRSN(
- PKnownBSS pBSSList
-);
-
-void
-WPA_ParseRSN(
- PKnownBSS pBSSList,
- PWLAN_IE_RSN_EXT pRSN
-);
-
-bool
-WPA_SearchRSN(
- unsigned char byCmd,
- unsigned char byEncrypt,
- PKnownBSS pBSSList
-);
-
-bool
-WPAb_Is_RSN(
- PWLAN_IE_RSN_EXT pRSN
-);
-
-#endif // __WPA_H__
diff --git a/drivers/staging/vt6655/wpa2.c b/drivers/staging/vt6655/wpa2.c
deleted file mode 100644
index bb335ef5117..00000000000
--- a/drivers/staging/vt6655/wpa2.c
+++ /dev/null
@@ -1,359 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- *
- * File: wpa2.c
- *
- * Purpose: Handles the Basic Service Set & Node Database functions
- *
- * Functions:
- *
- * Revision History:
- *
- * Author: Yiching Chen
- *
- * Date: Oct. 4, 2004
- *
- */
-
-#include "wpa2.h"
-#include "device.h"
-#include "wmgr.h"
-
-/*--------------------- Static Classes ----------------------------*/
-
-/*--------------------- Static Variables --------------------------*/
-
-static const unsigned char abyOUIGK[4] = { 0x00, 0x0F, 0xAC, 0x00 };
-static const unsigned char abyOUIWEP40[4] = { 0x00, 0x0F, 0xAC, 0x01 };
-static const unsigned char abyOUIWEP104[4] = { 0x00, 0x0F, 0xAC, 0x05 };
-static const unsigned char abyOUITKIP[4] = { 0x00, 0x0F, 0xAC, 0x02 };
-static const unsigned char abyOUICCMP[4] = { 0x00, 0x0F, 0xAC, 0x04 };
-
-static const unsigned char abyOUI8021X[4] = { 0x00, 0x0F, 0xAC, 0x01 };
-static const unsigned char abyOUIPSK[4] = { 0x00, 0x0F, 0xAC, 0x02 };
-
-/*--------------------- Static Functions --------------------------*/
-
-/*--------------------- Export Variables --------------------------*/
-
-/*--------------------- Export Functions --------------------------*/
-
-/*+
- *
- * Description:
- * Clear RSN information in BSSList.
- *
- * Parameters:
- * In:
- * pBSSNode - BSS list.
- * Out:
- * none
- *
- * Return Value: none.
- *
- -*/
-void
-WPA2_ClearRSN(
- PKnownBSS pBSSNode
-)
-{
- int ii;
-
- pBSSNode->bWPA2Valid = false;
-
- pBSSNode->byCSSGK = WLAN_11i_CSS_CCMP;
- for (ii = 0; ii < 4; ii++)
- pBSSNode->abyCSSPK[ii] = WLAN_11i_CSS_CCMP;
- pBSSNode->wCSSPKCount = 1;
- for (ii = 0; ii < 4; ii++)
- pBSSNode->abyAKMSSAuthType[ii] = WLAN_11i_AKMSS_802_1X;
- pBSSNode->wAKMSSAuthCount = 1;
- pBSSNode->sRSNCapObj.bRSNCapExist = false;
- pBSSNode->sRSNCapObj.wRSNCap = 0;
-}
-
-/*+
- *
- * Description:
- * Parse RSN IE.
- *
- * Parameters:
- * In:
- * pBSSNode - BSS list.
- * pRSN - Pointer to the RSN IE.
- * Out:
- * none
- *
- * Return Value: none.
- *
- -*/
-void
-WPA2vParseRSN(
- PKnownBSS pBSSNode,
- PWLAN_IE_RSN pRSN
-)
-{
- int i, j;
- unsigned short m = 0, n = 0;
- unsigned char *pbyOUI;
- bool bUseGK = false;
-
- pr_debug("WPA2_ParseRSN: [%d]\n", pRSN->len);
-
- WPA2_ClearRSN(pBSSNode);
-
- if (pRSN->len == 2) { // ver(2)
- if ((pRSN->byElementID == WLAN_EID_RSN) && (pRSN->wVersion == 1))
- pBSSNode->bWPA2Valid = true;
-
- return;
- }
-
- if (pRSN->len < 6) { // ver(2) + GK(4)
- // invalid CSS, P802.11i/D10.0, p31
- return;
- }
-
- // information element header makes sense
- if ((pRSN->byElementID == WLAN_EID_RSN) &&
- (pRSN->wVersion == 1)) {
- pr_debug("Legal 802.11i RSN\n");
-
- pbyOUI = &(pRSN->abyRSN[0]);
- if (!memcmp(pbyOUI, abyOUIWEP40, 4))
- pBSSNode->byCSSGK = WLAN_11i_CSS_WEP40;
- else if (!memcmp(pbyOUI, abyOUITKIP, 4))
- pBSSNode->byCSSGK = WLAN_11i_CSS_TKIP;
- else if (!memcmp(pbyOUI, abyOUICCMP, 4))
- pBSSNode->byCSSGK = WLAN_11i_CSS_CCMP;
- else if (!memcmp(pbyOUI, abyOUIWEP104, 4))
- pBSSNode->byCSSGK = WLAN_11i_CSS_WEP104;
- else if (!memcmp(pbyOUI, abyOUIGK, 4)) {
- // invalid CSS, P802.11i/D10.0, p32
- return;
- } else
- // any vendor checks here
- pBSSNode->byCSSGK = WLAN_11i_CSS_UNKNOWN;
-
- pr_debug("802.11i CSS: %X\n", pBSSNode->byCSSGK);
-
- if (pRSN->len == 6) {
- pBSSNode->bWPA2Valid = true;
- return;
- }
-
- if (pRSN->len >= 8) { // ver(2) + GK(4) + PK count(2)
- pBSSNode->wCSSPKCount = *((unsigned short *)&(pRSN->abyRSN[4]));
- j = 0;
- pbyOUI = &(pRSN->abyRSN[6]);
-
- for (i = 0; (i < pBSSNode->wCSSPKCount) && (j < sizeof(pBSSNode->abyCSSPK)/sizeof(unsigned char)); i++) {
- if (pRSN->len >= 8+i*4+4) { // ver(2)+GK(4)+PKCnt(2)+PKS(4*i)
- if (!memcmp(pbyOUI, abyOUIGK, 4)) {
- pBSSNode->abyCSSPK[j++] = WLAN_11i_CSS_USE_GROUP;
- bUseGK = true;
- } else if (!memcmp(pbyOUI, abyOUIWEP40, 4)) {
- // Invalid CSS, continue to parsing
- } else if (!memcmp(pbyOUI, abyOUITKIP, 4)) {
- if (pBSSNode->byCSSGK != WLAN_11i_CSS_CCMP)
- pBSSNode->abyCSSPK[j++] = WLAN_11i_CSS_TKIP;
- else
- ; // Invalid CSS, continue to parsing
- } else if (!memcmp(pbyOUI, abyOUICCMP, 4)) {
- pBSSNode->abyCSSPK[j++] = WLAN_11i_CSS_CCMP;
- } else if (!memcmp(pbyOUI, abyOUIWEP104, 4)) {
- // Invalid CSS, continue to parsing
- } else {
- // any vendor checks here
- pBSSNode->abyCSSPK[j++] = WLAN_11i_CSS_UNKNOWN;
- }
- pbyOUI += 4;
- pr_debug("abyCSSPK[%d]: %X\n",
- j-1, pBSSNode->abyCSSPK[j-1]);
- } else
- break;
- } //for
-
- if (bUseGK) {
- if (j != 1) {
- // invalid CSS, This should be only PK CSS.
- return;
- }
- if (pBSSNode->byCSSGK == WLAN_11i_CSS_CCMP) {
- // invalid CSS, If CCMP is enable , PK can't be CSSGK.
- return;
- }
- }
- if ((pBSSNode->wCSSPKCount != 0) && (j == 0)) {
- // invalid CSS, No valid PK.
- return;
- }
- pBSSNode->wCSSPKCount = (unsigned short)j;
- pr_debug("wCSSPKCount: %d\n", pBSSNode->wCSSPKCount);
- }
-
- m = *((unsigned short *)&(pRSN->abyRSN[4]));
-
- if (pRSN->len >= 10+m*4) { // ver(2) + GK(4) + PK count(2) + PKS(4*m) + AKMSS count(2)
- pBSSNode->wAKMSSAuthCount = *((unsigned short *)&(pRSN->abyRSN[6+4*m]));
- j = 0;
- pbyOUI = &(pRSN->abyRSN[8+4*m]);
- for (i = 0; (i < pBSSNode->wAKMSSAuthCount) && (j < sizeof(pBSSNode->abyAKMSSAuthType)/sizeof(unsigned char)); i++) {
- if (pRSN->len >= 10+(m+i)*4+4) { // ver(2)+GK(4)+PKCnt(2)+PKS(4*m)+AKMSS(2)+AKS(4*i)
- if (!memcmp(pbyOUI, abyOUI8021X, 4))
- pBSSNode->abyAKMSSAuthType[j++] = WLAN_11i_AKMSS_802_1X;
- else if (!memcmp(pbyOUI, abyOUIPSK, 4))
- pBSSNode->abyAKMSSAuthType[j++] = WLAN_11i_AKMSS_PSK;
- else
- // any vendor checks here
- pBSSNode->abyAKMSSAuthType[j++] = WLAN_11i_AKMSS_UNKNOWN;
- pr_debug("abyAKMSSAuthType[%d]: %X\n",
- j-1,
- pBSSNode->abyAKMSSAuthType[j-1]);
- } else
- break;
- }
- pBSSNode->wAKMSSAuthCount = (unsigned short)j;
- pr_debug("wAKMSSAuthCount: %d\n",
- pBSSNode->wAKMSSAuthCount);
-
- n = *((unsigned short *)&(pRSN->abyRSN[6+4*m]));
- if (pRSN->len >= 12 + 4 * m + 4 * n) { // ver(2)+GK(4)+PKCnt(2)+PKS(4*m)+AKMSSCnt(2)+AKMSS(4*n)+Cap(2)
- pBSSNode->sRSNCapObj.bRSNCapExist = true;
- pBSSNode->sRSNCapObj.wRSNCap = *((unsigned short *)&(pRSN->abyRSN[8+4*m+4*n]));
- }
- }
- //ignore PMKID lists bcs only (Re)Assocrequest has this field
- pBSSNode->bWPA2Valid = true;
- }
-}
-
-/*+
- *
- * Description:
- * Set WPA IEs
- *
- * Parameters:
- * In:
- * pMgmtHandle - Pointer to management object
- * Out:
- * pRSNIEs - Pointer to the RSN IE to set.
- *
- * Return Value: length of IEs.
- *
- -*/
-unsigned int
-WPA2uSetIEs(
- void *pMgmtHandle,
- PWLAN_IE_RSN pRSNIEs
-)
-{
- PSMgmtObject pMgmt = (PSMgmtObject) pMgmtHandle;
- unsigned char *pbyBuffer = NULL;
- unsigned int ii = 0;
- unsigned short *pwPMKID = NULL;
-
- if (pRSNIEs == NULL)
- return 0;
-
- if (((pMgmt->eAuthenMode == WMAC_AUTH_WPA2) ||
- (pMgmt->eAuthenMode == WMAC_AUTH_WPA2PSK)) &&
- (pMgmt->pCurrBSS != NULL)) {
- /* WPA2 IE */
- pbyBuffer = (unsigned char *)pRSNIEs;
- pRSNIEs->byElementID = WLAN_EID_RSN;
- pRSNIEs->len = 6; //Version(2)+GK(4)
- pRSNIEs->wVersion = 1;
- //Group Key Cipher Suite
- pRSNIEs->abyRSN[0] = 0x00;
- pRSNIEs->abyRSN[1] = 0x0F;
- pRSNIEs->abyRSN[2] = 0xAC;
- if (pMgmt->byCSSGK == KEY_CTL_WEP)
- pRSNIEs->abyRSN[3] = pMgmt->pCurrBSS->byCSSGK;
- else if (pMgmt->byCSSGK == KEY_CTL_TKIP)
- pRSNIEs->abyRSN[3] = WLAN_11i_CSS_TKIP;
- else if (pMgmt->byCSSGK == KEY_CTL_CCMP)
- pRSNIEs->abyRSN[3] = WLAN_11i_CSS_CCMP;
- else
- pRSNIEs->abyRSN[3] = WLAN_11i_CSS_UNKNOWN;
-
- // Pairwise Key Cipher Suite
- pRSNIEs->abyRSN[4] = 1;
- pRSNIEs->abyRSN[5] = 0;
- pRSNIEs->abyRSN[6] = 0x00;
- pRSNIEs->abyRSN[7] = 0x0F;
- pRSNIEs->abyRSN[8] = 0xAC;
- if (pMgmt->byCSSPK == KEY_CTL_TKIP)
- pRSNIEs->abyRSN[9] = WLAN_11i_CSS_TKIP;
- else if (pMgmt->byCSSPK == KEY_CTL_CCMP)
- pRSNIEs->abyRSN[9] = WLAN_11i_CSS_CCMP;
- else if (pMgmt->byCSSPK == KEY_CTL_NONE)
- pRSNIEs->abyRSN[9] = WLAN_11i_CSS_USE_GROUP;
- else
- pRSNIEs->abyRSN[9] = WLAN_11i_CSS_UNKNOWN;
-
- pRSNIEs->len += 6;
-
- // Auth Key Management Suite
- pRSNIEs->abyRSN[10] = 1;
- pRSNIEs->abyRSN[11] = 0;
- pRSNIEs->abyRSN[12] = 0x00;
- pRSNIEs->abyRSN[13] = 0x0F;
- pRSNIEs->abyRSN[14] = 0xAC;
- if (pMgmt->eAuthenMode == WMAC_AUTH_WPA2PSK)
- pRSNIEs->abyRSN[15] = WLAN_11i_AKMSS_PSK;
- else if (pMgmt->eAuthenMode == WMAC_AUTH_WPA2)
- pRSNIEs->abyRSN[15] = WLAN_11i_AKMSS_802_1X;
- else
- pRSNIEs->abyRSN[15] = WLAN_11i_AKMSS_UNKNOWN;
-
- pRSNIEs->len += 6;
-
- // RSN Capabilities
- if (pMgmt->pCurrBSS->sRSNCapObj.bRSNCapExist == true) {
- memcpy(&pRSNIEs->abyRSN[16], &pMgmt->pCurrBSS->sRSNCapObj.wRSNCap, 2);
- } else {
- pRSNIEs->abyRSN[16] = 0;
- pRSNIEs->abyRSN[17] = 0;
- }
- pRSNIEs->len += 2;
-
- if ((pMgmt->gsPMKIDCache.BSSIDInfoCount > 0) &&
- pMgmt->bRoaming &&
- (pMgmt->eAuthenMode == WMAC_AUTH_WPA2)) {
- // RSN PMKID
- pwPMKID = (unsigned short *)(&pRSNIEs->abyRSN[18]); // Point to PMKID count
- *pwPMKID = 0; // Initialize PMKID count
- pbyBuffer = &pRSNIEs->abyRSN[20]; // Point to PMKID list
- for (ii = 0; ii < pMgmt->gsPMKIDCache.BSSIDInfoCount; ii++) {
- if (!memcmp(&pMgmt->gsPMKIDCache.BSSIDInfo[ii].abyBSSID[0], pMgmt->abyCurrBSSID, ETH_ALEN)) {
- (*pwPMKID)++;
- memcpy(pbyBuffer, pMgmt->gsPMKIDCache.BSSIDInfo[ii].abyPMKID, 16);
- pbyBuffer += 16;
- }
- }
- if (*pwPMKID != 0)
- pRSNIEs->len += (2 + (*pwPMKID)*16);
- else
- pbyBuffer = &pRSNIEs->abyRSN[18];
- }
- return pRSNIEs->len + WLAN_IEHDR_LEN;
- }
- return 0;
-}
diff --git a/drivers/staging/vt6655/wpa2.h b/drivers/staging/vt6655/wpa2.h
deleted file mode 100644
index 2d0bd2e515f..00000000000
--- a/drivers/staging/vt6655/wpa2.h
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- *
- * File: wpa2.h
- *
- * Purpose: Defines the macros, types, and functions for dealing
- * with WPA2 informations.
- *
- * Author: Yiching Chen
- *
- * Date: Oct. 4, 2004
- *
- */
-
-#ifndef __WPA2_H__
-#define __WPA2_H__
-
-#include "ttype.h"
-#include "80211mgr.h"
-#include "80211hdr.h"
-#include "bssdb.h"
-
-/*--------------------- Export Definitions -------------------------*/
-#define MAX_PMKID_CACHE 16
-
-typedef struct tagsPMKIDInfo {
- unsigned char abyBSSID[6];
- unsigned char abyPMKID[16];
-} PMKIDInfo, *PPMKIDInfo;
-
-typedef struct tagSPMKIDCache {
- unsigned long BSSIDInfoCount;
- PMKIDInfo BSSIDInfo[MAX_PMKID_CACHE];
-} SPMKIDCache, *PSPMKIDCache;
-
-/*--------------------- Export Classes ----------------------------*/
-
-/*--------------------- Export Variables --------------------------*/
-
-/*--------------------- Export Types ------------------------------*/
-
-/*--------------------- Export Functions --------------------------*/
-
-void
-WPA2_ClearRSN(
- PKnownBSS pBSSNode
-);
-
-void
-WPA2vParseRSN(
- PKnownBSS pBSSNode,
- PWLAN_IE_RSN pRSN
-);
-
-unsigned int
-WPA2uSetIEs(
- void *pMgmtHandle,
- PWLAN_IE_RSN pRSNIEs
-);
-
-#endif // __WPA2_H__
diff --git a/drivers/staging/vt6655/wpactl.c b/drivers/staging/vt6655/wpactl.c
deleted file mode 100644
index dab1e807865..00000000000
--- a/drivers/staging/vt6655/wpactl.c
+++ /dev/null
@@ -1,896 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- *
- * File: wpactl.c
- *
- * Purpose: handle wpa supplicant ioctl input/out functions
- *
- * Author: Lyndon Chen
- *
- * Date: Oct. 20, 2003
- *
- * Functions:
- *
- * Revision History:
- *
- */
-
-#include "wpactl.h"
-#include "key.h"
-#include "mac.h"
-#include "device.h"
-#include "wmgr.h"
-#include "iocmd.h"
-#include "iowpa.h"
-#include "rf.h"
-
-/*--------------------- Static Definitions -------------------------*/
-
-#define VIAWGET_WPA_MAX_BUF_SIZE 1024
-
-static const int frequency_list[] = {
- 2412, 2417, 2422, 2427, 2432, 2437, 2442,
- 2447, 2452, 2457, 2462, 2467, 2472, 2484
-};
-/*--------------------- Static Classes ----------------------------*/
-
-/*--------------------- Static Functions --------------------------*/
-
-/*--------------------- Export Variables --------------------------*/
-static void wpadev_setup(struct net_device *dev)
-{
- dev->type = ARPHRD_IEEE80211;
- dev->hard_header_len = ETH_HLEN;
- dev->mtu = 2048;
- dev->addr_len = ETH_ALEN;
- dev->tx_queue_len = 1000;
-
- memset(dev->broadcast, 0xFF, ETH_ALEN);
-
- dev->flags = IFF_BROADCAST|IFF_MULTICAST;
-}
-
-/*
- * Description:
- * register netdev for wpa supplicant daemon
- *
- * Parameters:
- * In:
- * pDevice -
- * enable -
- * Out:
- *
- * Return Value:
- *
- */
-
-static int wpa_init_wpadev(struct vnt_private *pDevice)
-{
- struct vnt_private *wpadev_priv;
- struct net_device *dev = pDevice->dev;
- int ret = 0;
-
- pDevice->wpadev = alloc_netdev(sizeof(*wpadev_priv), "vntwpa",
- NET_NAME_UNKNOWN, wpadev_setup);
- if (pDevice->wpadev == NULL)
- return -ENOMEM;
-
- wpadev_priv = netdev_priv(pDevice->wpadev);
- *wpadev_priv = *pDevice;
- eth_hw_addr_inherit(pDevice->wpadev, dev);
- pDevice->wpadev->base_addr = dev->base_addr;
- pDevice->wpadev->irq = dev->irq;
- pDevice->wpadev->mem_start = dev->mem_start;
- pDevice->wpadev->mem_end = dev->mem_end;
- ret = register_netdev(pDevice->wpadev);
- if (ret) {
- pr_debug("%s: register_netdev(WPA) failed!\n", dev->name);
- free_netdev(pDevice->wpadev);
- return -1;
- }
-
- if (pDevice->skb == NULL) {
- pDevice->skb = dev_alloc_skb((int)pDevice->rx_buf_sz);
- if (pDevice->skb == NULL)
- return -ENOMEM;
- }
-
- pr_debug("%s: Registered netdev %s for WPA management\n",
- dev->name, pDevice->wpadev->name);
-
- return 0;
-}
-
-/*
- * Description:
- * unregister net_device (wpadev)
- *
- * Parameters:
- * In:
- * pDevice -
- * Out:
- *
- * Return Value:
- *
- */
-
-static int wpa_release_wpadev(struct vnt_private *pDevice)
-{
- if (pDevice->skb) {
- dev_kfree_skb(pDevice->skb);
- pDevice->skb = NULL;
- }
-
- if (pDevice->wpadev) {
- pr_debug("%s: Netdevice %s unregistered\n",
- pDevice->dev->name, pDevice->wpadev->name);
- unregister_netdev(pDevice->wpadev);
- free_netdev(pDevice->wpadev);
- pDevice->wpadev = NULL;
- }
-
- return 0;
-}
-
-/*
- * Description:
- * Set enable/disable dev for wpa supplicant daemon
- *
- * Parameters:
- * In:
- * pDevice -
- * val -
- * Out:
- *
- * Return Value:
- *
- */
-
-int wpa_set_wpadev(struct vnt_private *pDevice, int val)
-{
- if (val)
- return wpa_init_wpadev(pDevice);
- else
- return wpa_release_wpadev(pDevice);
-}
-
-/*
- * Description:
- * Set WPA algorithm & keys
- *
- * Parameters:
- * In:
- * pDevice -
- * param -
- * Out:
- *
- * Return Value:
- *
- */
-
-int wpa_set_keys(struct vnt_private *pDevice, void *ctx,
- bool fcpfkernel) __must_hold(&pDevice->lock)
-{
- struct viawget_wpa_param *param = ctx;
- PSMgmtObject pMgmt = pDevice->pMgmt;
- unsigned long dwKeyIndex = 0;
- unsigned char abyKey[MAX_KEY_LEN];
- unsigned char abySeq[MAX_KEY_LEN];
- u64 KeyRSC;
- unsigned char byKeyDecMode = KEY_CTL_WEP;
- int ret = 0;
- int uu, ii;
-
- if (param->u.wpa_key.alg_name > WPA_ALG_CCMP ||
- param->u.wpa_key.key_len > MAX_KEY_LEN ||
- param->u.wpa_key.seq_len > MAX_KEY_LEN)
- return -EINVAL;
-
- pr_debug("param->u.wpa_key.alg_name = %d\n", param->u.wpa_key.alg_name);
- if (param->u.wpa_key.alg_name == WPA_ALG_NONE) {
- pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled;
- pDevice->bEncryptionEnable = false;
- pDevice->byKeyIndex = 0;
- pDevice->bTransmitKey = false;
- KeyvRemoveAllWEPKey(&(pDevice->sKey), pDevice->PortOffset);
- for (uu = 0; uu < MAX_KEY_TABLE; uu++)
- MACvDisableKeyEntry(pDevice->PortOffset, uu);
-
- return ret;
- }
-
- if (param->u.wpa_key.key && fcpfkernel) {
- memcpy(&abyKey[0], param->u.wpa_key.key, param->u.wpa_key.key_len);
- } else {
- spin_unlock_irq(&pDevice->lock);
- if (param->u.wpa_key.key &&
- copy_from_user(&abyKey[0],
- (void __user *)param->u.wpa_key.key,
- param->u.wpa_key.key_len)) {
- spin_lock_irq(&pDevice->lock);
- return -EINVAL;
- }
- spin_lock_irq(&pDevice->lock);
- }
-
- dwKeyIndex = (unsigned long)(param->u.wpa_key.key_index);
-
- if (param->u.wpa_key.alg_name == WPA_ALG_WEP) {
- if (dwKeyIndex > 3) {
- return -EINVAL;
- } else {
- if (param->u.wpa_key.set_tx) {
- pDevice->byKeyIndex = (unsigned char)dwKeyIndex;
- pDevice->bTransmitKey = true;
- dwKeyIndex |= (1 << 31);
- }
- KeybSetDefaultKey(&(pDevice->sKey),
- dwKeyIndex & ~(BIT30 | USE_KEYRSC),
- param->u.wpa_key.key_len,
- NULL,
- abyKey,
- KEY_CTL_WEP,
- pDevice->PortOffset,
- pDevice->byLocalID);
-
- }
- pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled;
- pDevice->bEncryptionEnable = true;
- return ret;
- }
-
- if (param->u.wpa_key.seq && fcpfkernel) {
- memcpy(&abySeq[0], param->u.wpa_key.seq, param->u.wpa_key.seq_len);
- } else {
- spin_unlock_irq(&pDevice->lock);
- if (param->u.wpa_key.seq &&
- copy_from_user(&abySeq[0],
- (void __user *)param->u.wpa_key.seq,
- param->u.wpa_key.seq_len)) {
- spin_lock_irq(&pDevice->lock);
- return -EINVAL;
- }
- spin_lock_irq(&pDevice->lock);
- }
-
- if (param->u.wpa_key.seq_len > 0) {
- for (ii = 0; ii < param->u.wpa_key.seq_len; ii++) {
- if (ii < 4)
- KeyRSC |= (u64)(abySeq[ii] << (ii * 8));
- else
- KeyRSC |= (u64)(abySeq[ii] << ((ii-4) * 8));
- }
- dwKeyIndex |= 1 << 29;
- }
-
- if (param->u.wpa_key.key_index >= MAX_GROUP_KEY) {
- pr_debug("return dwKeyIndex > 3\n");
- return -EINVAL;
- }
-
- if (param->u.wpa_key.alg_name == WPA_ALG_TKIP)
- pDevice->eEncryptionStatus = Ndis802_11Encryption2Enabled;
-
- if (param->u.wpa_key.alg_name == WPA_ALG_CCMP)
- pDevice->eEncryptionStatus = Ndis802_11Encryption3Enabled;
-
- if (param->u.wpa_key.set_tx)
- dwKeyIndex |= (1 << 31);
-
- if (pDevice->eEncryptionStatus == Ndis802_11Encryption3Enabled)
- byKeyDecMode = KEY_CTL_CCMP;
- else if (pDevice->eEncryptionStatus == Ndis802_11Encryption2Enabled)
- byKeyDecMode = KEY_CTL_TKIP;
- else
- byKeyDecMode = KEY_CTL_WEP;
-
- /* Fix HCT test that set 256 bits KEY and Ndis802_11Encryption3Enabled */
- if (pDevice->eEncryptionStatus == Ndis802_11Encryption3Enabled) {
- if (param->u.wpa_key.key_len == MAX_KEY_LEN)
- byKeyDecMode = KEY_CTL_TKIP;
- else if (param->u.wpa_key.key_len == WLAN_WEP40_KEYLEN)
- byKeyDecMode = KEY_CTL_WEP;
- else if (param->u.wpa_key.key_len == WLAN_WEP104_KEYLEN)
- byKeyDecMode = KEY_CTL_WEP;
- } else if (pDevice->eEncryptionStatus == Ndis802_11Encryption2Enabled) {
- if (param->u.wpa_key.key_len == WLAN_WEP40_KEYLEN)
- byKeyDecMode = KEY_CTL_WEP;
- else if (param->u.wpa_key.key_len == WLAN_WEP104_KEYLEN)
- byKeyDecMode = KEY_CTL_WEP;
- }
-
- /* Check TKIP key length */
- if ((byKeyDecMode == KEY_CTL_TKIP) &&
- (param->u.wpa_key.key_len != MAX_KEY_LEN)) {
- /* TKIP Key must be 256 bits */
- pr_debug("return- TKIP Key must be 256 bits!\n");
- return -EINVAL;
- }
- /* Check AES key length */
- if ((byKeyDecMode == KEY_CTL_CCMP) &&
- (param->u.wpa_key.key_len != AES_KEY_LEN)) {
- /* AES Key must be 128 bits */
- return -EINVAL;
- }
-
- /* spin_lock_irq(&pDevice->lock); */
- if (is_broadcast_ether_addr(&param->addr[0]) || (param->addr == NULL)) {
- /* If is_broadcast_ether_addr, set the key as every key entry's group key. */
- pr_debug("Groupe Key Assign\n");
-
- if (KeybSetAllGroupKey(&(pDevice->sKey),
- dwKeyIndex,
- param->u.wpa_key.key_len,
- (u64 *) &KeyRSC,
- (unsigned char *)abyKey,
- byKeyDecMode,
- pDevice->PortOffset,
- pDevice->byLocalID) &&
- KeybSetDefaultKey(&(pDevice->sKey),
- dwKeyIndex,
- param->u.wpa_key.key_len,
- (u64 *) &KeyRSC,
- (unsigned char *)abyKey,
- byKeyDecMode,
- pDevice->PortOffset,
- pDevice->byLocalID)) {
- pr_debug("GROUP Key Assign\n");
-
- } else {
- return -EINVAL;
- }
-
- } else {
- pr_debug("Pairwise Key Assign\n");
- /* BSSID not 0xffffffffffff */
- /* Pairwise Key can't be WEP */
- if (byKeyDecMode == KEY_CTL_WEP) {
- pr_debug("Pairwise Key can't be WEP\n");
- return -EINVAL;
- }
-
- dwKeyIndex |= (1 << 30); /* set pairwise key */
- if (pMgmt->eConfigMode == WMAC_CONFIG_IBSS_STA)
- return -EINVAL;
-
- if (KeybSetKey(&(pDevice->sKey),
- &param->addr[0],
- dwKeyIndex,
- param->u.wpa_key.key_len,
- (u64 *) &KeyRSC,
- (unsigned char *)abyKey,
- byKeyDecMode,
- pDevice->PortOffset,
- pDevice->byLocalID)) {
- pr_debug("Pairwise Key Set\n");
-
- } else {
- /* Key Table Full */
- return -EINVAL;
- }
- } /* BSSID not 0xffffffffffff */
- if ((ret == 0) && ((param->u.wpa_key.set_tx) != 0)) {
- pDevice->byKeyIndex = (unsigned char)param->u.wpa_key.key_index;
- pDevice->bTransmitKey = true;
- }
- pDevice->bEncryptionEnable = true;
-
- return ret;
-}
-
-/*
- * Description:
- * enable wpa auth & mode
- *
- * Parameters:
- * In:
- * pDevice -
- * param -
- * Out:
- *
- * Return Value:
- *
- */
-
-static int wpa_set_wpa(struct vnt_private *pDevice,
- struct viawget_wpa_param *param)
-{
- PSMgmtObject pMgmt = pDevice->pMgmt;
-
- pMgmt->eAuthenMode = WMAC_AUTH_OPEN;
- pMgmt->bShareKeyAlgorithm = false;
-
- return 0;
-}
-
-/*
- * Description:
- * set disassociate
- *
- * Parameters:
- * In:
- * pDevice -
- * param -
- * Out:
- *
- * Return Value:
- *
- */
-
-static int wpa_set_disassociate(struct vnt_private *pDevice,
- struct viawget_wpa_param *param)
-{
- PSMgmtObject pMgmt = pDevice->pMgmt;
-
- spin_lock_irq(&pDevice->lock);
- if (pDevice->bLinkPass) {
- if (!memcmp(param->addr, pMgmt->abyCurrBSSID, 6))
- bScheduleCommand((void *)pDevice, WLAN_CMD_DISASSOCIATE, NULL);
- }
- spin_unlock_irq(&pDevice->lock);
-
- return 0;
-}
-
-/*
- * Description:
- * enable scan process
- *
- * Parameters:
- * In:
- * pDevice -
- * param -
- * Out:
- *
- * Return Value:
- *
- */
-
-static int wpa_set_scan(struct vnt_private *pDevice,
- struct viawget_wpa_param *param)
-{
- spin_lock_irq(&pDevice->lock);
- BSSvClearBSSList((void *)pDevice, pDevice->bLinkPass);
- bScheduleCommand((void *)pDevice, WLAN_CMD_BSSID_SCAN, NULL);
- spin_unlock_irq(&pDevice->lock);
-
- return 0;
-}
-
-/*
- * Description:
- * get bssid
- *
- * Parameters:
- * In:
- * pDevice -
- * param -
- * Out:
- *
- * Return Value:
- *
- */
-
-static int wpa_get_bssid(struct vnt_private *pDevice,
- struct viawget_wpa_param *param)
-{
- PSMgmtObject pMgmt = pDevice->pMgmt;
-
- memcpy(param->u.wpa_associate.bssid, pMgmt->abyCurrBSSID , 6);
-
- return 0;
-}
-
-/*
- * Description:
- * get bssid
- *
- * Parameters:
- * In:
- * pDevice -
- * param -
- * Out:
- *
- * Return Value:
- *
- */
-
-static int wpa_get_ssid(struct vnt_private *pDevice,
- struct viawget_wpa_param *param)
-{
- PSMgmtObject pMgmt = pDevice->pMgmt;
- PWLAN_IE_SSID pItemSSID;
-
- pItemSSID = (PWLAN_IE_SSID)pMgmt->abyCurrSSID;
-
- memcpy(param->u.wpa_associate.ssid, pItemSSID->abySSID , pItemSSID->len);
- param->u.wpa_associate.ssid_len = pItemSSID->len;
-
- return 0;
-}
-
-/*
- * Description:
- * get scan results
- *
- * Parameters:
- * In:
- * pDevice -
- * param -
- * Out:
- *
- * Return Value:
- *
- */
-
-static int wpa_get_scan(struct vnt_private *pDevice,
- struct viawget_wpa_param *param)
-{
- struct viawget_scan_result *scan_buf;
- PSMgmtObject pMgmt = pDevice->pMgmt;
- PWLAN_IE_SSID pItemSSID;
- PKnownBSS pBSS;
- unsigned char *pBuf;
- int ret = 0;
- u16 count = 0;
- u16 ii, jj;
-#if 1
-
- unsigned char *ptempBSS;
-
- ptempBSS = kmalloc(sizeof(KnownBSS), GFP_ATOMIC);
-
- if (ptempBSS == NULL) {
- pr_err("bubble sort kmalloc memory fail@@@\n");
-
- ret = -ENOMEM;
-
- return ret;
-
- }
-
- for (ii = 0; ii < MAX_BSS_NUM; ii++) {
- for (jj = 0; jj < MAX_BSS_NUM - ii - 1; jj++) {
- if ((pMgmt->sBSSList[jj].bActive != true) ||
-
- ((pMgmt->sBSSList[jj].uRSSI > pMgmt->sBSSList[jj + 1].uRSSI) && (pMgmt->sBSSList[jj + 1].bActive != false))) {
- memcpy(ptempBSS, &pMgmt->sBSSList[jj], sizeof(KnownBSS));
-
- memcpy(&pMgmt->sBSSList[jj], &pMgmt->sBSSList[jj + 1], sizeof(KnownBSS));
-
- memcpy(&pMgmt->sBSSList[jj + 1], ptempBSS, sizeof(KnownBSS));
-
- }
-
- }
-
- }
-
- kfree(ptempBSS);
-#endif
-
-//******mike:bubble sort by stronger RSSI*****//
-
- count = 0;
- pBSS = &(pMgmt->sBSSList[0]);
- for (ii = 0; ii < MAX_BSS_NUM; ii++) {
- pBSS = &(pMgmt->sBSSList[ii]);
- if (!pBSS->bActive)
- continue;
- count++;
- }
-
- pBuf = kcalloc(count, sizeof(struct viawget_scan_result), GFP_ATOMIC);
-
- if (pBuf == NULL) {
- ret = -ENOMEM;
- return ret;
- }
- scan_buf = (struct viawget_scan_result *)pBuf;
- pBSS = &(pMgmt->sBSSList[0]);
- for (ii = 0, jj = 0; ii < MAX_BSS_NUM; ii++) {
- pBSS = &(pMgmt->sBSSList[ii]);
- if (pBSS->bActive) {
- if (jj >= count)
- break;
- memcpy(scan_buf->bssid, pBSS->abyBSSID, WLAN_BSSID_LEN);
- pItemSSID = (PWLAN_IE_SSID)pBSS->abySSID;
- memcpy(scan_buf->ssid, pItemSSID->abySSID, pItemSSID->len);
- scan_buf->ssid_len = pItemSSID->len;
- scan_buf->freq = frequency_list[pBSS->uChannel-1];
- scan_buf->caps = pBSS->wCapInfo;
-
- if (pBSS->wWPALen != 0) {
- scan_buf->wpa_ie_len = pBSS->wWPALen;
- memcpy(scan_buf->wpa_ie, pBSS->byWPAIE, pBSS->wWPALen);
- }
- if (pBSS->wRSNLen != 0) {
- scan_buf->rsn_ie_len = pBSS->wRSNLen;
- memcpy(scan_buf->rsn_ie, pBSS->byRSNIE, pBSS->wRSNLen);
- }
- scan_buf = (struct viawget_scan_result *)((unsigned char *)scan_buf + sizeof(struct viawget_scan_result));
- jj++;
- }
- }
-
- if (jj < count)
- count = jj;
-
- if (copy_to_user(param->u.scan_results.buf, pBuf, sizeof(struct viawget_scan_result) * count))
- ret = -EFAULT;
-
- param->u.scan_results.scan_count = count;
- pr_debug(" param->u.scan_results.scan_count = %d\n", count);
-
- kfree(pBuf);
- return ret;
-}
-
-/*
- * Description:
- * set associate with AP
- *
- * Parameters:
- * In:
- * pDevice -
- * param -
- * Out:
- *
- * Return Value:
- *
- */
-
-static int wpa_set_associate(struct vnt_private *pDevice,
- struct viawget_wpa_param *param)
-{
- PSMgmtObject pMgmt = pDevice->pMgmt;
- PWLAN_IE_SSID pItemSSID;
- unsigned char abyNullAddr[] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
- unsigned char abyWPAIE[64];
- bool bWepEnabled = false;
-
- /* set key type & algorithm */
- pr_debug("pairwise_suite = %d\n",
- param->u.wpa_associate.pairwise_suite);
- pr_debug("group_suite = %d\n", param->u.wpa_associate.group_suite);
- pr_debug("key_mgmt_suite = %d\n",
- param->u.wpa_associate.key_mgmt_suite);
- pr_debug("auth_alg = %d\n", param->u.wpa_associate.auth_alg);
- pr_debug("mode = %d\n", param->u.wpa_associate.mode);
- pr_debug("wpa_ie_len = %d\n", param->u.wpa_associate.wpa_ie_len);
-
- if (param->u.wpa_associate.wpa_ie_len) {
- if (!param->u.wpa_associate.wpa_ie)
- return -EINVAL;
- if (param->u.wpa_associate.wpa_ie_len > sizeof(abyWPAIE))
- return -EINVAL;
- if (copy_from_user(&abyWPAIE[0], param->u.wpa_associate.wpa_ie, param->u.wpa_associate.wpa_ie_len))
- return -EFAULT;
- }
-
- if (param->u.wpa_associate.mode == 1)
- pMgmt->eConfigMode = WMAC_CONFIG_IBSS_STA;
- else
- pMgmt->eConfigMode = WMAC_CONFIG_ESS_STA;
- /* set ssid */
- memset(pMgmt->abyDesireSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
- pItemSSID = (PWLAN_IE_SSID)pMgmt->abyDesireSSID;
- pItemSSID->byElementID = WLAN_EID_SSID;
- pItemSSID->len = param->u.wpa_associate.ssid_len;
- memcpy(pItemSSID->abySSID, param->u.wpa_associate.ssid, pItemSSID->len);
- /* set bssid */
- if (memcmp(param->u.wpa_associate.bssid, &abyNullAddr[0], 6) != 0)
- memcpy(pMgmt->abyDesireBSSID, param->u.wpa_associate.bssid, 6);
- else
- bScheduleCommand((void *)pDevice, WLAN_CMD_BSSID_SCAN, pItemSSID->abySSID);
-
- if (param->u.wpa_associate.wpa_ie_len == 0) {
- if (param->u.wpa_associate.auth_alg & AUTH_ALG_SHARED_KEY)
- pMgmt->eAuthenMode = WMAC_AUTH_SHAREKEY;
- else
- pMgmt->eAuthenMode = WMAC_AUTH_OPEN;
- } else if (abyWPAIE[0] == RSN_INFO_ELEM) {
- if (param->u.wpa_associate.key_mgmt_suite == KEY_MGMT_PSK)
- pMgmt->eAuthenMode = WMAC_AUTH_WPA2PSK;
- else
- pMgmt->eAuthenMode = WMAC_AUTH_WPA2;
- } else {
- if (param->u.wpa_associate.key_mgmt_suite == KEY_MGMT_WPA_NONE)
- pMgmt->eAuthenMode = WMAC_AUTH_WPANONE;
- else if (param->u.wpa_associate.key_mgmt_suite == KEY_MGMT_PSK)
- pMgmt->eAuthenMode = WMAC_AUTH_WPAPSK;
- else
- pMgmt->eAuthenMode = WMAC_AUTH_WPA;
- }
-
- switch (param->u.wpa_associate.pairwise_suite) {
- case CIPHER_CCMP:
- pDevice->eEncryptionStatus = Ndis802_11Encryption3Enabled;
- break;
- case CIPHER_TKIP:
- pDevice->eEncryptionStatus = Ndis802_11Encryption2Enabled;
- break;
- case CIPHER_WEP40:
- case CIPHER_WEP104:
- pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled;
- bWepEnabled = true;
- break;
- case CIPHER_NONE:
- if (param->u.wpa_associate.group_suite == CIPHER_CCMP)
- pDevice->eEncryptionStatus = Ndis802_11Encryption3Enabled;
- else
- pDevice->eEncryptionStatus = Ndis802_11Encryption2Enabled;
- break;
- default:
- pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled;
- }
-
-//DavidWang add for WPA_supplicant support open/share mode
-
- if (pMgmt->eAuthenMode == WMAC_AUTH_SHAREKEY) {
- pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled;
- pMgmt->bShareKeyAlgorithm = true;
- } else if (pMgmt->eAuthenMode == WMAC_AUTH_OPEN) {
- if (!bWepEnabled) pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled;
- else pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled;
- }
-//mike save old encryption status
- pDevice->eOldEncryptionStatus = pDevice->eEncryptionStatus;
-
- if (pDevice->eEncryptionStatus != Ndis802_11EncryptionDisabled)
- pDevice->bEncryptionEnable = true;
- else
- pDevice->bEncryptionEnable = false;
- if (!((pMgmt->eAuthenMode == WMAC_AUTH_SHAREKEY) ||
- ((pMgmt->eAuthenMode == WMAC_AUTH_OPEN) && bWepEnabled))) //DavidWang //20080717-06,<Modify> by chester//Not to initial WEP
- KeyvInitTable(&pDevice->sKey, pDevice->PortOffset);
- spin_lock_irq(&pDevice->lock);
- pDevice->bLinkPass = false;
- memset(pMgmt->abyCurrBSSID, 0, 6);
- pMgmt->eCurrState = WMAC_STATE_IDLE;
- netif_stop_queue(pDevice->dev);
- //20080701-02,<Add> by Mike Liu
-/*******search if ap_scan=2 ,which is associating request in hidden ssid mode ****/
- {
- PKnownBSS pCurr = NULL;
-
- pCurr = BSSpSearchBSSList(pDevice,
- pMgmt->abyDesireBSSID,
- pMgmt->abyDesireSSID,
- pMgmt->eConfigPHYMode
-);
-
- if (pCurr == NULL) {
- pr_debug("wpa_set_associate---->hidden mode site survey before associate.......\n");
- bScheduleCommand((void *)pDevice, WLAN_CMD_BSSID_SCAN, pMgmt->abyDesireSSID);
- }
- }
-/****************************************************************/
- bScheduleCommand((void *)pDevice, WLAN_CMD_SSID, NULL);
- spin_unlock_irq(&pDevice->lock);
-
- return 0;
-}
-
-/*
- * Description:
- * wpa_ioctl main function supported for wpa supplicant
- *
- * Parameters:
- * In:
- * pDevice -
- * iw_point -
- * Out:
- *
- * Return Value:
- *
- */
-
-int wpa_ioctl(struct vnt_private *pDevice, struct iw_point *p)
-{
- struct viawget_wpa_param *param;
- int ret = 0;
- int wpa_ioctl = 0;
-
- if (p->length < sizeof(struct viawget_wpa_param) ||
- p->length > VIAWGET_WPA_MAX_BUF_SIZE || !p->pointer)
- return -EINVAL;
-
- param = kmalloc((int)p->length, GFP_KERNEL);
- if (param == NULL)
- return -ENOMEM;
-
- if (copy_from_user(param, p->pointer, p->length)) {
- ret = -EFAULT;
- goto out;
- }
-
- switch (param->cmd) {
- case VIAWGET_SET_WPA:
- ret = wpa_set_wpa(pDevice, param);
- pr_debug("VIAWGET_SET_WPA\n");
- break;
-
- case VIAWGET_SET_KEY:
- pr_debug("VIAWGET_SET_KEY\n");
- spin_lock_irq(&pDevice->lock);
- ret = wpa_set_keys(pDevice, param, false);
- spin_unlock_irq(&pDevice->lock);
- break;
-
- case VIAWGET_SET_SCAN:
- pr_debug("VIAWGET_SET_SCAN\n");
- ret = wpa_set_scan(pDevice, param);
- break;
-
- case VIAWGET_GET_SCAN:
- pr_debug("VIAWGET_GET_SCAN\n");
- ret = wpa_get_scan(pDevice, param);
- wpa_ioctl = 1;
- break;
-
- case VIAWGET_GET_SSID:
- pr_debug("VIAWGET_GET_SSID\n");
- ret = wpa_get_ssid(pDevice, param);
- wpa_ioctl = 1;
- break;
-
- case VIAWGET_GET_BSSID:
- pr_debug("VIAWGET_GET_BSSID\n");
- ret = wpa_get_bssid(pDevice, param);
- wpa_ioctl = 1;
- break;
-
- case VIAWGET_SET_ASSOCIATE:
- pr_debug("VIAWGET_SET_ASSOCIATE\n");
- ret = wpa_set_associate(pDevice, param);
- break;
-
- case VIAWGET_SET_DISASSOCIATE:
- pr_debug("VIAWGET_SET_DISASSOCIATE\n");
- ret = wpa_set_disassociate(pDevice, param);
- break;
-
- case VIAWGET_SET_DROP_UNENCRYPT:
- pr_debug("VIAWGET_SET_DROP_UNENCRYPT\n");
- break;
-
- case VIAWGET_SET_DEAUTHENTICATE:
- pr_debug("VIAWGET_SET_DEAUTHENTICATE\n");
- break;
-
- default:
- pr_debug("wpa_ioctl: unknown cmd=%d\n",
- param->cmd);
- ret = -EOPNOTSUPP;
- goto out;
- }
-
- if ((ret == 0) && wpa_ioctl) {
- if (copy_to_user(p->pointer, param, p->length)) {
- ret = -EFAULT;
- goto out;
- }
- }
-
-out:
- kfree(param);
-
- return ret;
-}
diff --git a/drivers/staging/vt6655/wpactl.h b/drivers/staging/vt6655/wpactl.h
deleted file mode 100644
index c1b4a729206..00000000000
--- a/drivers/staging/vt6655/wpactl.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * File: wpactl.h
- *
- * Purpose:
- *
- * Author: Lyndon Chen
- *
- * Date: March 1, 2005
- *
- */
-
-#ifndef __WPACTL_H__
-#define __WPACTL_H__
-
-#include "device.h"
-#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
-#include "iowpa.h"
-#endif
-
-/*--------------------- Export Definitions -------------------------*/
-
-//WPA related
-
-enum wpa_alg { WPA_ALG_NONE, WPA_ALG_WEP, WPA_ALG_TKIP, WPA_ALG_CCMP };
-enum wpa_cipher { CIPHER_NONE, CIPHER_WEP40, CIPHER_TKIP, CIPHER_CCMP,
- CIPHER_WEP104 };
-enum wpa_key_mgmt { KEY_MGMT_802_1X, KEY_MGMT_CCKM, KEY_MGMT_PSK, KEY_MGMT_NONE,
- KEY_MGMT_802_1X_NO_WPA, KEY_MGMT_WPA_NONE };
-
-#define AUTH_ALG_OPEN_SYSTEM 0x01
-#define AUTH_ALG_SHARED_KEY 0x02
-#define AUTH_ALG_LEAP 0x04
-
-#define GENERIC_INFO_ELEM 0xdd
-#define RSN_INFO_ELEM 0x30
-
-/*--------------------- Export Classes ----------------------------*/
-
-/*--------------------- Export Variables --------------------------*/
-
-/*--------------------- Export Functions --------------------------*/
-
-int wpa_set_wpadev(struct vnt_private *, int val);
-int wpa_ioctl(struct vnt_private *, struct iw_point *p);
-int wpa_set_keys(struct vnt_private *, void *ctx, bool fcpfkernel);
-
-#endif // __WPACL_H__
diff --git a/drivers/staging/vt6655/wroute.c b/drivers/staging/vt6655/wroute.c
deleted file mode 100644
index d1171fa3446..00000000000
--- a/drivers/staging/vt6655/wroute.c
+++ /dev/null
@@ -1,187 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * File: wroute.c
- *
- * Purpose: handle WMAC frame relay & filtering
- *
- * Author: Lyndon Chen
- *
- * Date: May 20, 2003
- *
- * Functions:
- * ROUTEbRelay - Relay packet
- *
- * Revision History:
- *
- */
-
-#include "mac.h"
-#include "tcrc.h"
-#include "rxtx.h"
-#include "wroute.h"
-#include "card.h"
-#include "baseband.h"
-
-/*--------------------- Static Definitions -------------------------*/
-
-/*--------------------- Static Classes ----------------------------*/
-
-/*--------------------- Static Functions --------------------------*/
-
-/*--------------------- Export Variables --------------------------*/
-
-/*
- * Description:
- * Relay packet. Return true if packet is copy to DMA1
- *
- * Parameters:
- * In:
- * pDevice -
- * pbySkbData - rx packet skb data
- * Out:
- * true, false
- *
- * Return Value: true if packet duplicate; otherwise false
- *
- */
-bool ROUTEbRelay(struct vnt_private *pDevice, unsigned char *pbySkbData,
- unsigned int uDataLen, unsigned int uNodeIndex)
-{
- PSMgmtObject pMgmt = pDevice->pMgmt;
- PSTxDesc pHeadTD, pLastTD;
- unsigned int cbFrameBodySize;
- unsigned int uMACfragNum;
- unsigned char byPktType;
- bool bNeedEncryption = false;
- SKeyItem STempKey;
- PSKeyItem pTransmitKey = NULL;
- unsigned int cbHeaderSize;
- unsigned int ii;
- unsigned char *pbyBSSID;
-
- if (AVAIL_TD(pDevice, TYPE_AC0DMA) <= 0) {
- pr_debug("Relay can't allocate TD1..\n");
- return false;
- }
-
- pHeadTD = pDevice->apCurrTD[TYPE_AC0DMA];
-
- pHeadTD->m_td1TD1.byTCR = (TCR_EDP | TCR_STP);
-
- memcpy(pDevice->sTxEthHeader.abyDstAddr, pbySkbData, ETH_HLEN);
-
- cbFrameBodySize = uDataLen - ETH_HLEN;
-
- if (ntohs(pDevice->sTxEthHeader.wType) > ETH_DATA_LEN)
- cbFrameBodySize += 8;
-
- if (pDevice->bEncryptionEnable == true) {
- bNeedEncryption = true;
-
- // get group key
- pbyBSSID = pDevice->abyBroadcastAddr;
- if (KeybGetTransmitKey(&(pDevice->sKey), pbyBSSID,
- GROUP_KEY, &pTransmitKey) == false) {
- pTransmitKey = NULL;
- pr_debug("KEY is NULL. [%d]\n",
- pDevice->pMgmt->eCurrMode);
- } else {
- pr_debug("Get GTK\n");
- }
- }
-
- if (pDevice->bEnableHostWEP) {
- if (uNodeIndex < MAX_NODE_NUM + 1) {
- pTransmitKey = &STempKey;
- pTransmitKey->byCipherSuite = pMgmt->sNodeDBTable[uNodeIndex].byCipherSuite;
- pTransmitKey->dwKeyIndex = pMgmt->sNodeDBTable[uNodeIndex].dwKeyIndex;
- pTransmitKey->uKeyLength = pMgmt->sNodeDBTable[uNodeIndex].uWepKeyLength;
- pTransmitKey->dwTSC47_16 = pMgmt->sNodeDBTable[uNodeIndex].dwTSC47_16;
- pTransmitKey->wTSC15_0 = pMgmt->sNodeDBTable[uNodeIndex].wTSC15_0;
- memcpy(pTransmitKey->abyKey,
- &pMgmt->sNodeDBTable[uNodeIndex].abyWepKey[0],
- pTransmitKey->uKeyLength);
- }
- }
-
- uMACfragNum = cbGetFragCount(pDevice, pTransmitKey,
- cbFrameBodySize, &pDevice->sTxEthHeader);
-
- if (uMACfragNum > AVAIL_TD(pDevice, TYPE_AC0DMA))
- return false;
-
- byPktType = pDevice->byPacketType;
-
- if (pDevice->bFixRate) {
- if (pDevice->eCurrentPHYType == PHY_TYPE_11B) {
- if (pDevice->uConnectionRate >= RATE_11M)
- pDevice->wCurrentRate = RATE_11M;
- else
- pDevice->wCurrentRate = pDevice->uConnectionRate;
- } else {
- if ((pDevice->eCurrentPHYType == PHY_TYPE_11A) &&
- (pDevice->uConnectionRate <= RATE_6M)) {
- pDevice->wCurrentRate = RATE_6M;
- } else {
- if (pDevice->uConnectionRate >= RATE_54M)
- pDevice->wCurrentRate = RATE_54M;
- else
- pDevice->wCurrentRate = pDevice->uConnectionRate;
- }
- }
- } else {
- pDevice->wCurrentRate = pDevice->pMgmt->sNodeDBTable[uNodeIndex].wTxDataRate;
- }
-
- if (pDevice->wCurrentRate <= RATE_11M)
- byPktType = PK_TYPE_11B;
-
- vGenerateFIFOHeader(pDevice, byPktType, pDevice->pbyTmpBuff,
- bNeedEncryption, cbFrameBodySize, TYPE_AC0DMA,
- pHeadTD, &pDevice->sTxEthHeader, pbySkbData,
- pTransmitKey, uNodeIndex, &uMACfragNum,
- &cbHeaderSize);
-
- if (MACbIsRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_PS)) {
- // Disable PS
- MACbPSWakeup(pDevice->PortOffset);
- }
-
- pDevice->bPWBitOn = false;
-
- pLastTD = pHeadTD;
- for (ii = 0; ii < uMACfragNum; ii++) {
- // Poll Transmit the adapter
- wmb();
- pHeadTD->m_td0TD0.f1Owner = OWNED_BY_NIC;
- wmb();
- if (ii == (uMACfragNum - 1))
- pLastTD = pHeadTD;
- pHeadTD = pHeadTD->next;
- }
-
- pLastTD->pTDInfo->skb = NULL;
- pLastTD->pTDInfo->byFlags = 0;
-
- pDevice->apCurrTD[TYPE_AC0DMA] = pHeadTD;
-
- MACvTransmitAC0(pDevice->PortOffset);
-
- return true;
-}
diff --git a/drivers/staging/vt6655/wroute.h b/drivers/staging/vt6655/wroute.h
deleted file mode 100644
index e59eec955ca..00000000000
--- a/drivers/staging/vt6655/wroute.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * File: wroute.h
- *
- * Purpose:
- *
- * Author: Lyndon Chen
- *
- * Date: May 21, 2003
- *
- */
-
-#ifndef __WROUTE_H__
-#define __WROUTE_H__
-
-#include "device.h"
-
-/*--------------------- Export Definitions -------------------------*/
-
-/*--------------------- Export Classes ----------------------------*/
-
-/*--------------------- Export Variables --------------------------*/
-
-/*--------------------- Export Functions --------------------------*/
-
-bool ROUTEbRelay(struct vnt_private *pDevice, unsigned char *pbySkbData,
- unsigned int uDataLen, unsigned int uNodeIndex);
-
-#endif /* __WROUTE_H__ */
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index dbc311c3dc3..b95d5b1efcc 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -431,11 +431,8 @@ static bool vnt_alloc_bufs(struct vnt_private *priv)
for (ii = 0; ii < priv->num_tx_context; ii++) {
tx_context = kmalloc(sizeof(struct vnt_usb_send_context),
GFP_KERNEL);
- if (tx_context == NULL) {
- dev_err(&priv->usb->dev,
- "allocate tx usb context failed\n");
+ if (tx_context == NULL)
goto free_tx;
- }
priv->tx_context[ii] = tx_context;
tx_context->priv = priv;
@@ -471,10 +468,8 @@ static bool vnt_alloc_bufs(struct vnt_private *priv)
}
rcb->skb = dev_alloc_skb(priv->rx_buf_sz);
- if (rcb->skb == NULL) {
- dev_err(&priv->usb->dev, "Failed to alloc rx skb\n");
+ if (rcb->skb == NULL)
goto free_rx_tx;
- }
rcb->in_use = false;
@@ -491,7 +486,6 @@ static bool vnt_alloc_bufs(struct vnt_private *priv)
priv->int_buf.data_buf = kmalloc(MAX_INTERRUPT_SIZE, GFP_KERNEL);
if (priv->int_buf.data_buf == NULL) {
- dev_err(&priv->usb->dev, "Failed to alloc int buf\n");
usb_free_urb(priv->interrupt_urb);
goto free_rx_tx;
}
diff --git a/drivers/staging/wlan-ng/hfa384x.h b/drivers/staging/wlan-ng/hfa384x.h
index 1f2c78cc008..20d146b61ba 100644
--- a/drivers/staging/wlan-ng/hfa384x.h
+++ b/drivers/staging/wlan-ng/hfa384x.h
@@ -1376,6 +1376,7 @@ int hfa384x_drvr_setconfig(hfa384x_t *hw, u16 rid, void *buf, u16 len);
static inline int hfa384x_drvr_getconfig16(hfa384x_t *hw, u16 rid, void *val)
{
int result = 0;
+
result = hfa384x_drvr_getconfig(hw, rid, val, sizeof(u16));
if (result == 0)
*((u16 *) val) = le16_to_cpu(*((u16 *) val));
@@ -1385,6 +1386,7 @@ static inline int hfa384x_drvr_getconfig16(hfa384x_t *hw, u16 rid, void *val)
static inline int hfa384x_drvr_setconfig16(hfa384x_t *hw, u16 rid, u16 val)
{
u16 value = cpu_to_le16(val);
+
return hfa384x_drvr_setconfig(hw, rid, &value, sizeof(value));
}
@@ -1402,6 +1404,7 @@ static inline int
hfa384x_drvr_setconfig16_async(hfa384x_t *hw, u16 rid, u16 val)
{
u16 value = cpu_to_le16(val);
+
return hfa384x_drvr_setconfig_async(hw, rid, &value, sizeof(value),
NULL, NULL);
}
diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
index 898bde73c59..55d2f563e30 100644
--- a/drivers/staging/wlan-ng/hfa384x_usb.c
+++ b/drivers/staging/wlan-ng/hfa384x_usb.c
@@ -3583,12 +3583,8 @@ static void hfa384x_int_rxmonitor(wlandevice_t *wlandev,
}
skb = dev_alloc_skb(skblen);
- if (skb == NULL) {
- netdev_err(hw->wlandev->netdev,
- "alloc_skb failed trying to allocate %d bytes\n",
- skblen);
+ if (skb == NULL)
return;
- }
/* only prepend the prism header if in the right mode */
if ((wlandev->netdev->type == ARPHRD_IEEE80211_PRISM) &&
diff --git a/drivers/staging/wlan-ng/p80211conv.c b/drivers/staging/wlan-ng/p80211conv.c
index 3b5468c64fd..7eaaf9a6350 100644
--- a/drivers/staging/wlan-ng/p80211conv.c
+++ b/drivers/staging/wlan-ng/p80211conv.c
@@ -107,7 +107,7 @@ int skb_ether_to_p80211(wlandevice_t *wlandev, u32 ethconv,
struct p80211_metawep *p80211_wep)
{
- u16 fc;
+ __le16 fc;
u16 proto;
struct wlan_ethhdr e_hdr;
struct wlan_llc *e_llc;
diff --git a/drivers/staging/wlan-ng/p80211hdr.h b/drivers/staging/wlan-ng/p80211hdr.h
index 66b5e201d41..79d9b20b364 100644
--- a/drivers/staging/wlan-ng/p80211hdr.h
+++ b/drivers/staging/wlan-ng/p80211hdr.h
@@ -148,7 +148,7 @@
/* Generic 802.11 Header types */
struct p80211_hdr_a3 {
- u16 fc;
+ __le16 fc;
u16 dur;
u8 a1[ETH_ALEN];
u8 a2[ETH_ALEN];
diff --git a/drivers/staging/wlan-ng/p80211netdev.c b/drivers/staging/wlan-ng/p80211netdev.c
index 2dd9bf8a6e1..a9c1e0bafa6 100644
--- a/drivers/staging/wlan-ng/p80211netdev.c
+++ b/drivers/staging/wlan-ng/p80211netdev.c
@@ -358,7 +358,7 @@ static int p80211knetdev_hard_start_xmit(struct sk_buff *skb,
* and return success .
* TODO: we need a saner way to handle this
*/
- if (skb->protocol != ETH_P_80211_RAW) {
+ if (be16_to_cpu(skb->protocol) != ETH_P_80211_RAW) {
netif_start_queue(wlandev->netdev);
netdev_notice(netdev, "Tx attempt prior to association, frame dropped.\n");
netdev->stats.tx_dropped++;
@@ -369,7 +369,7 @@ static int p80211knetdev_hard_start_xmit(struct sk_buff *skb,
}
/* Check for raw transmits */
- if (skb->protocol == ETH_P_80211_RAW) {
+ if (be16_to_cpu(skb->protocol) == ETH_P_80211_RAW) {
if (!capable(CAP_NET_ADMIN)) {
result = 1;
goto failed;
diff --git a/drivers/staging/wlan-ng/prism2fw.c b/drivers/staging/wlan-ng/prism2fw.c
index 6c38f797d1a..9408644cc8b 100644
--- a/drivers/staging/wlan-ng/prism2fw.c
+++ b/drivers/staging/wlan-ng/prism2fw.c
@@ -238,7 +238,8 @@ static int prism2_fwtry(struct usb_device *udev, wlandevice_t *wlandev)
* 0 - success
* ~0 - failure
----------------------------------------------------------------*/
-static int prism2_fwapply(const struct ihex_binrec *rfptr, wlandevice_t *wlandev)
+static int prism2_fwapply(const struct ihex_binrec *rfptr,
+ wlandevice_t *wlandev)
{
signed int result = 0;
struct p80211msg_dot11req_mibget getmsg;
@@ -986,8 +987,8 @@ static int writeimage(wlandevice_t *wlandev, struct imgchunk *fchunk,
u32 currlen;
u32 currdaddr;
- rstmsg = kmalloc(sizeof(*rstmsg), GFP_KERNEL);
- rwrmsg = kmalloc(sizeof(*rwrmsg), GFP_KERNEL);
+ rstmsg = kzalloc(sizeof(*rstmsg), GFP_KERNEL);
+ rwrmsg = kzalloc(sizeof(*rwrmsg), GFP_KERNEL);
if (!rstmsg || !rwrmsg) {
kfree(rstmsg);
kfree(rwrmsg);
@@ -997,7 +998,6 @@ static int writeimage(wlandevice_t *wlandev, struct imgchunk *fchunk,
}
/* Initialize the messages */
- memset(rstmsg, 0, sizeof(*rstmsg));
strcpy(rstmsg->devname, wlandev->name);
rstmsg->msgcode = DIDmsg_p2req_ramdl_state;
rstmsg->msglen = sizeof(*rstmsg);
@@ -1011,7 +1011,6 @@ static int writeimage(wlandevice_t *wlandev, struct imgchunk *fchunk,
rstmsg->exeaddr.len = sizeof(u32);
rstmsg->resultcode.len = sizeof(u32);
- memset(rwrmsg, 0, sizeof(*rwrmsg));
strcpy(rwrmsg->devname, wlandev->name);
rwrmsg->msgcode = DIDmsg_p2req_ramdl_write;
rwrmsg->msglen = sizeof(*rwrmsg);
diff --git a/drivers/staging/xgifb/XGI_main_26.c b/drivers/staging/xgifb/XGI_main_26.c
index be7778b5911..709d49e7f3c 100644
--- a/drivers/staging/xgifb/XGI_main_26.c
+++ b/drivers/staging/xgifb/XGI_main_26.c
@@ -2012,7 +2012,7 @@ static int xgifb_probe(struct pci_dev *pdev,
XGIfb_get_fix(&fb_info->fix, -1, fb_info);
fb_info->pseudo_palette = xgifb_info->pseudo_palette;
- fb_alloc_cmap(&fb_info->cmap, 256 , 0);
+ fb_alloc_cmap(&fb_info->cmap, 256, 0);
#ifdef CONFIG_MTRR
xgifb_info->mtrr = mtrr_add(xgifb_info->video_base,
diff --git a/drivers/staging/xgifb/vb_def.h b/drivers/staging/xgifb/vb_def.h
index 481eb174fdf..d9524a2e9ce 100644
--- a/drivers/staging/xgifb/vb_def.h
+++ b/drivers/staging/xgifb/vb_def.h
@@ -7,7 +7,6 @@
#define SupportCRT2in301C 0x0100 /* for 301C */
#define SetCHTVOverScan 0x8000
-#define Panel_320x480 0x07 /*fstn*/
#define PanelResInfo 0x1F /* CR36 Panel Type/LCDResInfo */
#define Panel_1024x768x75 0x22
#define Panel_1280x1024x75 0x23
diff --git a/drivers/staging/xgifb/vb_setmode.c b/drivers/staging/xgifb/vb_setmode.c
index d5f49d2a8db..1f6f699e238 100644
--- a/drivers/staging/xgifb/vb_setmode.c
+++ b/drivers/staging/xgifb/vb_setmode.c
@@ -4135,7 +4135,7 @@ static void XGI_SetGroup4(unsigned short ModeIdIndex,
tempax -= 1;
temp = (tempax & 0xFF00) >> 8;
- temp = ((temp & 0x0003) << 4);
+ temp = (temp & 0x0003) << 4;
xgifb_reg_set(pVBInfo->Part4Port, 0x1E, temp);
temp = (tempax & 0x00FF);
xgifb_reg_set(pVBInfo->Part4Port, 0x1D, temp);
diff --git a/drivers/staging/xgifb/vb_util.c b/drivers/staging/xgifb/vb_util.c
index 1b452f8b627..be3437ca339 100644
--- a/drivers/staging/xgifb/vb_util.c
+++ b/drivers/staging/xgifb/vb_util.c
@@ -9,11 +9,8 @@ void xgifb_reg_set(unsigned long port, u8 index, u8 data)
u8 xgifb_reg_get(unsigned long port, u8 index)
{
- u8 data;
-
outb(index, port);
- data = inb(port + 1);
- return data;
+ return inb(port + 1);
}
void xgifb_reg_and_or(unsigned long port, u8 index,
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 811d863f420..55f6774f706 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -609,6 +609,7 @@ static int __init iscsi_target_init_module(void)
return ret;
r2t_out:
+ iscsit_unregister_transport(&iscsi_target_transport);
kmem_cache_destroy(lio_r2t_cache);
ooo_out:
kmem_cache_destroy(lio_ooo_cache);
diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h
index 302eb3b7871..09a522bae22 100644
--- a/drivers/target/iscsi/iscsi_target_core.h
+++ b/drivers/target/iscsi/iscsi_target_core.h
@@ -790,7 +790,6 @@ struct iscsi_np {
void *np_context;
struct iscsit_transport *np_transport;
struct list_head np_list;
- struct iscsi_tpg_np *tpg_np;
} ____cacheline_aligned;
struct iscsi_tpg_np {
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 480f2e0ecc1..713c0c1877a 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -281,7 +281,6 @@ static int iscsi_login_zero_tsih_s1(
{
struct iscsi_session *sess = NULL;
struct iscsi_login_req *pdu = (struct iscsi_login_req *)buf;
- enum target_prot_op sup_pro_ops;
int ret;
sess = kzalloc(sizeof(struct iscsi_session), GFP_KERNEL);
@@ -343,9 +342,8 @@ static int iscsi_login_zero_tsih_s1(
kfree(sess);
return -ENOMEM;
}
- sup_pro_ops = conn->conn_transport->iscsit_get_sup_prot_ops(conn);
- sess->se_sess = transport_init_session(sup_pro_ops);
+ sess->se_sess = transport_init_session(TARGET_PROT_NORMAL);
if (IS_ERR(sess->se_sess)) {
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
@@ -1161,6 +1159,7 @@ void iscsi_target_login_sess_out(struct iscsi_conn *conn,
}
kfree(conn->sess->sess_ops);
kfree(conn->sess);
+ conn->sess = NULL;
old_sess_out:
iscsi_stop_login_thread_timer(np);
@@ -1204,6 +1203,9 @@ old_sess_out:
conn->sock = NULL;
}
+ if (conn->conn_transport->iscsit_wait_conn)
+ conn->conn_transport->iscsit_wait_conn(conn);
+
if (conn->conn_transport->iscsit_free_conn)
conn->conn_transport->iscsit_free_conn(conn);
@@ -1364,6 +1366,9 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
}
login->zero_tsih = zero_tsih;
+ conn->sess->se_sess->sup_prot_ops =
+ conn->conn_transport->iscsit_get_sup_prot_ops(conn);
+
tpg = conn->tpg;
if (!tpg) {
pr_err("Unable to locate struct iscsi_conn->tpg\n");
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index c3cb5c15efd..9053a3c0c6e 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -501,7 +501,6 @@ struct iscsi_tpg_np *iscsit_tpg_add_network_portal(
init_completion(&tpg_np->tpg_np_comp);
kref_init(&tpg_np->tpg_np_kref);
tpg_np->tpg_np = np;
- np->tpg_np = tpg_np;
tpg_np->tpg = tpg;
spin_lock(&tpg->tpg_np_lock);
diff --git a/drivers/target/iscsi/iscsi_target_transport.c b/drivers/target/iscsi/iscsi_target_transport.c
index 882728fac30..08217d62fb0 100644
--- a/drivers/target/iscsi/iscsi_target_transport.c
+++ b/drivers/target/iscsi/iscsi_target_transport.c
@@ -26,8 +26,7 @@ struct iscsit_transport *iscsit_get_transport(int type)
void iscsit_put_transport(struct iscsit_transport *t)
{
- if (t->owner)
- module_put(t->owner);
+ module_put(t->owner);
}
int iscsit_register_transport(struct iscsit_transport *t)
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index 7c6a95bcb35..bcd88ec9979 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -1356,15 +1356,15 @@ static int iscsit_do_tx_data(
struct iscsi_conn *conn,
struct iscsi_data_count *count)
{
- int data = count->data_length, total_tx = 0, tx_loop = 0, iov_len;
+ int ret, iov_len;
struct kvec *iov_p;
struct msghdr msg;
if (!conn || !conn->sock || !conn->conn_ops)
return -1;
- if (data <= 0) {
- pr_err("Data length is: %d\n", data);
+ if (count->data_length <= 0) {
+ pr_err("Data length is: %d\n", count->data_length);
return -1;
}
@@ -1373,20 +1373,16 @@ static int iscsit_do_tx_data(
iov_p = count->iov;
iov_len = count->iov_count;
- while (total_tx < data) {
- tx_loop = kernel_sendmsg(conn->sock, &msg, iov_p, iov_len,
- (data - total_tx));
- if (tx_loop <= 0) {
- pr_debug("tx_loop: %d total_tx %d\n",
- tx_loop, total_tx);
- return tx_loop;
- }
- total_tx += tx_loop;
- pr_debug("tx_loop: %d, total_tx: %d, data: %d\n",
- tx_loop, total_tx, data);
+ ret = kernel_sendmsg(conn->sock, &msg, iov_p, iov_len,
+ count->data_length);
+ if (ret != count->data_length) {
+ pr_err("Unexpected ret: %d send data %d\n",
+ ret, count->data_length);
+ return -EPIPE;
}
+ pr_debug("ret: %d, sent data: %d\n", ret, count->data_length);
- return total_tx;
+ return ret;
}
int rx_data(
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index dda9a08c939..6b3c3295468 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -138,7 +138,7 @@ static void tcm_loop_submission_work(struct work_struct *work)
set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
goto out_done;
}
- tl_nexus = tl_hba->tl_nexus;
+ tl_nexus = tl_tpg->tl_nexus;
if (!tl_nexus) {
scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus"
" does not exist\n");
@@ -218,16 +218,26 @@ static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
* to struct scsi_device
*/
static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg,
- struct tcm_loop_nexus *tl_nexus,
int lun, int task, enum tcm_tmreq_table tmr)
{
struct se_cmd *se_cmd = NULL;
struct se_session *se_sess;
struct se_portal_group *se_tpg;
+ struct tcm_loop_nexus *tl_nexus;
struct tcm_loop_cmd *tl_cmd = NULL;
struct tcm_loop_tmr *tl_tmr = NULL;
int ret = TMR_FUNCTION_FAILED, rc;
+ /*
+ * Locate the tl_nexus and se_sess pointers
+ */
+ tl_nexus = tl_tpg->tl_nexus;
+ if (!tl_nexus) {
+ pr_err("Unable to perform device reset without"
+ " active I_T Nexus\n");
+ return ret;
+ }
+
tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL);
if (!tl_cmd) {
pr_err("Unable to allocate memory for tl_cmd\n");
@@ -243,7 +253,7 @@ static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg,
se_cmd = &tl_cmd->tl_se_cmd;
se_tpg = &tl_tpg->tl_se_tpg;
- se_sess = tl_nexus->se_sess;
+ se_sess = tl_tpg->tl_nexus->se_sess;
/*
* Initialize struct se_cmd descriptor from target_core_mod infrastructure
*/
@@ -288,7 +298,6 @@ release:
static int tcm_loop_abort_task(struct scsi_cmnd *sc)
{
struct tcm_loop_hba *tl_hba;
- struct tcm_loop_nexus *tl_nexus;
struct tcm_loop_tpg *tl_tpg;
int ret = FAILED;
@@ -296,21 +305,8 @@ static int tcm_loop_abort_task(struct scsi_cmnd *sc)
* Locate the tcm_loop_hba_t pointer
*/
tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
- /*
- * Locate the tl_nexus and se_sess pointers
- */
- tl_nexus = tl_hba->tl_nexus;
- if (!tl_nexus) {
- pr_err("Unable to perform device reset without"
- " active I_T Nexus\n");
- return FAILED;
- }
-
- /*
- * Locate the tl_tpg pointer from TargetID in sc->device->id
- */
tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
- ret = tcm_loop_issue_tmr(tl_tpg, tl_nexus, sc->device->lun,
+ ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun,
sc->request->tag, TMR_ABORT_TASK);
return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
}
@@ -322,7 +318,6 @@ static int tcm_loop_abort_task(struct scsi_cmnd *sc)
static int tcm_loop_device_reset(struct scsi_cmnd *sc)
{
struct tcm_loop_hba *tl_hba;
- struct tcm_loop_nexus *tl_nexus;
struct tcm_loop_tpg *tl_tpg;
int ret = FAILED;
@@ -330,20 +325,9 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc)
* Locate the tcm_loop_hba_t pointer
*/
tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
- /*
- * Locate the tl_nexus and se_sess pointers
- */
- tl_nexus = tl_hba->tl_nexus;
- if (!tl_nexus) {
- pr_err("Unable to perform device reset without"
- " active I_T Nexus\n");
- return FAILED;
- }
- /*
- * Locate the tl_tpg pointer from TargetID in sc->device->id
- */
tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
- ret = tcm_loop_issue_tmr(tl_tpg, tl_nexus, sc->device->lun,
+
+ ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun,
0, TMR_LUN_RESET);
return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
}
@@ -939,8 +923,8 @@ static int tcm_loop_make_nexus(
struct tcm_loop_nexus *tl_nexus;
int ret = -ENOMEM;
- if (tl_tpg->tl_hba->tl_nexus) {
- pr_debug("tl_tpg->tl_hba->tl_nexus already exists\n");
+ if (tl_tpg->tl_nexus) {
+ pr_debug("tl_tpg->tl_nexus already exists\n");
return -EEXIST;
}
se_tpg = &tl_tpg->tl_se_tpg;
@@ -975,7 +959,7 @@ static int tcm_loop_make_nexus(
*/
__transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl,
tl_nexus->se_sess, tl_nexus);
- tl_tpg->tl_hba->tl_nexus = tl_nexus;
+ tl_tpg->tl_nexus = tl_nexus;
pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated"
" %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
name);
@@ -991,12 +975,8 @@ static int tcm_loop_drop_nexus(
{
struct se_session *se_sess;
struct tcm_loop_nexus *tl_nexus;
- struct tcm_loop_hba *tl_hba = tpg->tl_hba;
- if (!tl_hba)
- return -ENODEV;
-
- tl_nexus = tl_hba->tl_nexus;
+ tl_nexus = tpg->tl_nexus;
if (!tl_nexus)
return -ENODEV;
@@ -1012,13 +992,13 @@ static int tcm_loop_drop_nexus(
}
pr_debug("TCM_Loop_ConfigFS: Removing I_T Nexus to emulated"
- " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
+ " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tpg->tl_hba),
tl_nexus->se_sess->se_node_acl->initiatorname);
/*
* Release the SCSI I_T Nexus to the emulated SAS Target Port
*/
transport_deregister_session(tl_nexus->se_sess);
- tpg->tl_hba->tl_nexus = NULL;
+ tpg->tl_nexus = NULL;
kfree(tl_nexus);
return 0;
}
@@ -1034,7 +1014,7 @@ static ssize_t tcm_loop_tpg_show_nexus(
struct tcm_loop_nexus *tl_nexus;
ssize_t ret;
- tl_nexus = tl_tpg->tl_hba->tl_nexus;
+ tl_nexus = tl_tpg->tl_nexus;
if (!tl_nexus)
return -ENODEV;
diff --git a/drivers/target/loopback/tcm_loop.h b/drivers/target/loopback/tcm_loop.h
index 54c59d0b660..6ae49f272ba 100644
--- a/drivers/target/loopback/tcm_loop.h
+++ b/drivers/target/loopback/tcm_loop.h
@@ -27,11 +27,6 @@ struct tcm_loop_tmr {
};
struct tcm_loop_nexus {
- int it_nexus_active;
- /*
- * Pointer to Linux/SCSI HBA from linux/include/scsi_host.h
- */
- struct scsi_host *sh;
/*
* Pointer to TCM session for I_T Nexus
*/
@@ -51,6 +46,7 @@ struct tcm_loop_tpg {
atomic_t tl_tpg_port_count;
struct se_portal_group tl_se_tpg;
struct tcm_loop_hba *tl_hba;
+ struct tcm_loop_nexus *tl_nexus;
};
struct tcm_loop_hba {
@@ -59,7 +55,6 @@ struct tcm_loop_hba {
struct se_hba_s *se_hba;
struct se_lun *tl_hba_lun;
struct se_port *tl_hba_lun_sep;
- struct tcm_loop_nexus *tl_nexus;
struct device dev;
struct Scsi_Host *sh;
struct tcm_loop_tpg tl_hba_tpgs[TL_TPGS_PER_HBA];
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 79f9296a08a..75d89adfccc 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -50,6 +50,19 @@
#include "target_core_rd.h"
#include "target_core_xcopy.h"
+#define TB_CIT_SETUP(_name, _item_ops, _group_ops, _attrs) \
+static void target_core_setup_##_name##_cit(struct se_subsystem_api *sa) \
+{ \
+ struct target_backend_cits *tbc = &sa->tb_cits; \
+ struct config_item_type *cit = &tbc->tb_##_name##_cit; \
+ \
+ cit->ct_item_ops = _item_ops; \
+ cit->ct_group_ops = _group_ops; \
+ cit->ct_attrs = _attrs; \
+ cit->ct_owner = sa->owner; \
+ pr_debug("Setup generic %s\n", __stringify(_name)); \
+}
+
extern struct t10_alua_lu_gp *default_lu_gp;
static LIST_HEAD(g_tf_list);
@@ -126,48 +139,57 @@ static struct config_group *target_core_register_fabric(
pr_debug("Target_Core_ConfigFS: REGISTER -> group: %p name:"
" %s\n", group, name);
- /*
- * Below are some hardcoded request_module() calls to automatically
- * local fabric modules when the following is called:
- *
- * mkdir -p /sys/kernel/config/target/$MODULE_NAME
- *
- * Note that this does not limit which TCM fabric module can be
- * registered, but simply provids auto loading logic for modules with
- * mkdir(2) system calls with known TCM fabric modules.
- */
- if (!strncmp(name, "iscsi", 5)) {
+
+ tf = target_core_get_fabric(name);
+ if (!tf) {
+ pr_err("target_core_register_fabric() trying autoload for %s\n",
+ name);
+
/*
- * Automatically load the LIO Target fabric module when the
- * following is called:
+ * Below are some hardcoded request_module() calls to automatically
+ * local fabric modules when the following is called:
*
- * mkdir -p $CONFIGFS/target/iscsi
- */
- ret = request_module("iscsi_target_mod");
- if (ret < 0) {
- pr_err("request_module() failed for"
- " iscsi_target_mod.ko: %d\n", ret);
- return ERR_PTR(-EINVAL);
- }
- } else if (!strncmp(name, "loopback", 8)) {
- /*
- * Automatically load the tcm_loop fabric module when the
- * following is called:
+ * mkdir -p /sys/kernel/config/target/$MODULE_NAME
*
- * mkdir -p $CONFIGFS/target/loopback
+ * Note that this does not limit which TCM fabric module can be
+ * registered, but simply provids auto loading logic for modules with
+ * mkdir(2) system calls with known TCM fabric modules.
*/
- ret = request_module("tcm_loop");
- if (ret < 0) {
- pr_err("request_module() failed for"
- " tcm_loop.ko: %d\n", ret);
- return ERR_PTR(-EINVAL);
+
+ if (!strncmp(name, "iscsi", 5)) {
+ /*
+ * Automatically load the LIO Target fabric module when the
+ * following is called:
+ *
+ * mkdir -p $CONFIGFS/target/iscsi
+ */
+ ret = request_module("iscsi_target_mod");
+ if (ret < 0) {
+ pr_err("request_module() failed for"
+ " iscsi_target_mod.ko: %d\n", ret);
+ return ERR_PTR(-EINVAL);
+ }
+ } else if (!strncmp(name, "loopback", 8)) {
+ /*
+ * Automatically load the tcm_loop fabric module when the
+ * following is called:
+ *
+ * mkdir -p $CONFIGFS/target/loopback
+ */
+ ret = request_module("tcm_loop");
+ if (ret < 0) {
+ pr_err("request_module() failed for"
+ " tcm_loop.ko: %d\n", ret);
+ return ERR_PTR(-EINVAL);
+ }
}
+
+ tf = target_core_get_fabric(name);
}
- tf = target_core_get_fabric(name);
if (!tf) {
pr_err("target_core_get_fabric() failed for %s\n",
- name);
+ name);
return ERR_PTR(-EINVAL);
}
pr_debug("Target_Core_ConfigFS: REGISTER -> Located fabric:"
@@ -562,198 +584,21 @@ EXPORT_SYMBOL(target_fabric_configfs_deregister);
// Stop functions called by external Target Fabrics Modules
//############################################################################*/
-/* Start functions for struct config_item_type target_core_dev_attrib_cit */
-
-#define DEF_DEV_ATTRIB_SHOW(_name) \
-static ssize_t target_core_dev_show_attr_##_name( \
- struct se_dev_attrib *da, \
- char *page) \
-{ \
- return snprintf(page, PAGE_SIZE, "%u\n", \
- (u32)da->da_dev->dev_attrib._name); \
-}
-
-#define DEF_DEV_ATTRIB_STORE(_name) \
-static ssize_t target_core_dev_store_attr_##_name( \
- struct se_dev_attrib *da, \
- const char *page, \
- size_t count) \
-{ \
- unsigned long val; \
- int ret; \
- \
- ret = kstrtoul(page, 0, &val); \
- if (ret < 0) { \
- pr_err("kstrtoul() failed with" \
- " ret: %d\n", ret); \
- return -EINVAL; \
- } \
- ret = se_dev_set_##_name(da->da_dev, (u32)val); \
- \
- return (!ret) ? count : -EINVAL; \
-}
-
-#define DEF_DEV_ATTRIB(_name) \
-DEF_DEV_ATTRIB_SHOW(_name); \
-DEF_DEV_ATTRIB_STORE(_name);
-
-#define DEF_DEV_ATTRIB_RO(_name) \
-DEF_DEV_ATTRIB_SHOW(_name);
+/* Start functions for struct config_item_type tb_dev_attrib_cit */
CONFIGFS_EATTR_STRUCT(target_core_dev_attrib, se_dev_attrib);
-#define SE_DEV_ATTR(_name, _mode) \
-static struct target_core_dev_attrib_attribute \
- target_core_dev_attrib_##_name = \
- __CONFIGFS_EATTR(_name, _mode, \
- target_core_dev_show_attr_##_name, \
- target_core_dev_store_attr_##_name);
-
-#define SE_DEV_ATTR_RO(_name); \
-static struct target_core_dev_attrib_attribute \
- target_core_dev_attrib_##_name = \
- __CONFIGFS_EATTR_RO(_name, \
- target_core_dev_show_attr_##_name);
-
-DEF_DEV_ATTRIB(emulate_model_alias);
-SE_DEV_ATTR(emulate_model_alias, S_IRUGO | S_IWUSR);
-
-DEF_DEV_ATTRIB(emulate_dpo);
-SE_DEV_ATTR(emulate_dpo, S_IRUGO | S_IWUSR);
-
-DEF_DEV_ATTRIB(emulate_fua_write);
-SE_DEV_ATTR(emulate_fua_write, S_IRUGO | S_IWUSR);
-
-DEF_DEV_ATTRIB(emulate_fua_read);
-SE_DEV_ATTR(emulate_fua_read, S_IRUGO | S_IWUSR);
-
-DEF_DEV_ATTRIB(emulate_write_cache);
-SE_DEV_ATTR(emulate_write_cache, S_IRUGO | S_IWUSR);
-
-DEF_DEV_ATTRIB(emulate_ua_intlck_ctrl);
-SE_DEV_ATTR(emulate_ua_intlck_ctrl, S_IRUGO | S_IWUSR);
-
-DEF_DEV_ATTRIB(emulate_tas);
-SE_DEV_ATTR(emulate_tas, S_IRUGO | S_IWUSR);
-
-DEF_DEV_ATTRIB(emulate_tpu);
-SE_DEV_ATTR(emulate_tpu, S_IRUGO | S_IWUSR);
-
-DEF_DEV_ATTRIB(emulate_tpws);
-SE_DEV_ATTR(emulate_tpws, S_IRUGO | S_IWUSR);
-
-DEF_DEV_ATTRIB(emulate_caw);
-SE_DEV_ATTR(emulate_caw, S_IRUGO | S_IWUSR);
-
-DEF_DEV_ATTRIB(emulate_3pc);
-SE_DEV_ATTR(emulate_3pc, S_IRUGO | S_IWUSR);
-
-DEF_DEV_ATTRIB(pi_prot_type);
-SE_DEV_ATTR(pi_prot_type, S_IRUGO | S_IWUSR);
-
-DEF_DEV_ATTRIB_RO(hw_pi_prot_type);
-SE_DEV_ATTR_RO(hw_pi_prot_type);
-
-DEF_DEV_ATTRIB(pi_prot_format);
-SE_DEV_ATTR(pi_prot_format, S_IRUGO | S_IWUSR);
-
-DEF_DEV_ATTRIB(enforce_pr_isids);
-SE_DEV_ATTR(enforce_pr_isids, S_IRUGO | S_IWUSR);
-
-DEF_DEV_ATTRIB(is_nonrot);
-SE_DEV_ATTR(is_nonrot, S_IRUGO | S_IWUSR);
-
-DEF_DEV_ATTRIB(emulate_rest_reord);
-SE_DEV_ATTR(emulate_rest_reord, S_IRUGO | S_IWUSR);
-
-DEF_DEV_ATTRIB(force_pr_aptpl);
-SE_DEV_ATTR(force_pr_aptpl, S_IRUGO | S_IWUSR);
-
-DEF_DEV_ATTRIB_RO(hw_block_size);
-SE_DEV_ATTR_RO(hw_block_size);
-
-DEF_DEV_ATTRIB(block_size);
-SE_DEV_ATTR(block_size, S_IRUGO | S_IWUSR);
-
-DEF_DEV_ATTRIB_RO(hw_max_sectors);
-SE_DEV_ATTR_RO(hw_max_sectors);
-
-DEF_DEV_ATTRIB(fabric_max_sectors);
-SE_DEV_ATTR(fabric_max_sectors, S_IRUGO | S_IWUSR);
-
-DEF_DEV_ATTRIB(optimal_sectors);
-SE_DEV_ATTR(optimal_sectors, S_IRUGO | S_IWUSR);
-
-DEF_DEV_ATTRIB_RO(hw_queue_depth);
-SE_DEV_ATTR_RO(hw_queue_depth);
-
-DEF_DEV_ATTRIB(queue_depth);
-SE_DEV_ATTR(queue_depth, S_IRUGO | S_IWUSR);
-
-DEF_DEV_ATTRIB(max_unmap_lba_count);
-SE_DEV_ATTR(max_unmap_lba_count, S_IRUGO | S_IWUSR);
-
-DEF_DEV_ATTRIB(max_unmap_block_desc_count);
-SE_DEV_ATTR(max_unmap_block_desc_count, S_IRUGO | S_IWUSR);
-
-DEF_DEV_ATTRIB(unmap_granularity);
-SE_DEV_ATTR(unmap_granularity, S_IRUGO | S_IWUSR);
-
-DEF_DEV_ATTRIB(unmap_granularity_alignment);
-SE_DEV_ATTR(unmap_granularity_alignment, S_IRUGO | S_IWUSR);
-
-DEF_DEV_ATTRIB(max_write_same_len);
-SE_DEV_ATTR(max_write_same_len, S_IRUGO | S_IWUSR);
-
CONFIGFS_EATTR_OPS(target_core_dev_attrib, se_dev_attrib, da_group);
-static struct configfs_attribute *target_core_dev_attrib_attrs[] = {
- &target_core_dev_attrib_emulate_model_alias.attr,
- &target_core_dev_attrib_emulate_dpo.attr,
- &target_core_dev_attrib_emulate_fua_write.attr,
- &target_core_dev_attrib_emulate_fua_read.attr,
- &target_core_dev_attrib_emulate_write_cache.attr,
- &target_core_dev_attrib_emulate_ua_intlck_ctrl.attr,
- &target_core_dev_attrib_emulate_tas.attr,
- &target_core_dev_attrib_emulate_tpu.attr,
- &target_core_dev_attrib_emulate_tpws.attr,
- &target_core_dev_attrib_emulate_caw.attr,
- &target_core_dev_attrib_emulate_3pc.attr,
- &target_core_dev_attrib_pi_prot_type.attr,
- &target_core_dev_attrib_hw_pi_prot_type.attr,
- &target_core_dev_attrib_pi_prot_format.attr,
- &target_core_dev_attrib_enforce_pr_isids.attr,
- &target_core_dev_attrib_force_pr_aptpl.attr,
- &target_core_dev_attrib_is_nonrot.attr,
- &target_core_dev_attrib_emulate_rest_reord.attr,
- &target_core_dev_attrib_hw_block_size.attr,
- &target_core_dev_attrib_block_size.attr,
- &target_core_dev_attrib_hw_max_sectors.attr,
- &target_core_dev_attrib_fabric_max_sectors.attr,
- &target_core_dev_attrib_optimal_sectors.attr,
- &target_core_dev_attrib_hw_queue_depth.attr,
- &target_core_dev_attrib_queue_depth.attr,
- &target_core_dev_attrib_max_unmap_lba_count.attr,
- &target_core_dev_attrib_max_unmap_block_desc_count.attr,
- &target_core_dev_attrib_unmap_granularity.attr,
- &target_core_dev_attrib_unmap_granularity_alignment.attr,
- &target_core_dev_attrib_max_write_same_len.attr,
- NULL,
-};
-
static struct configfs_item_operations target_core_dev_attrib_ops = {
.show_attribute = target_core_dev_attrib_attr_show,
.store_attribute = target_core_dev_attrib_attr_store,
};
-static struct config_item_type target_core_dev_attrib_cit = {
- .ct_item_ops = &target_core_dev_attrib_ops,
- .ct_attrs = target_core_dev_attrib_attrs,
- .ct_owner = THIS_MODULE,
-};
+TB_CIT_SETUP(dev_attrib, &target_core_dev_attrib_ops, NULL, NULL);
-/* End functions for struct config_item_type target_core_dev_attrib_cit */
+/* End functions for struct config_item_type tb_dev_attrib_cit */
-/* Start functions for struct config_item_type target_core_dev_wwn_cit */
+/* Start functions for struct config_item_type tb_dev_wwn_cit */
CONFIGFS_EATTR_STRUCT(target_core_dev_wwn, t10_wwn);
#define SE_DEV_WWN_ATTR(_name, _mode) \
@@ -984,15 +829,11 @@ static struct configfs_item_operations target_core_dev_wwn_ops = {
.store_attribute = target_core_dev_wwn_attr_store,
};
-static struct config_item_type target_core_dev_wwn_cit = {
- .ct_item_ops = &target_core_dev_wwn_ops,
- .ct_attrs = target_core_dev_wwn_attrs,
- .ct_owner = THIS_MODULE,
-};
+TB_CIT_SETUP(dev_wwn, &target_core_dev_wwn_ops, NULL, target_core_dev_wwn_attrs);
-/* End functions for struct config_item_type target_core_dev_wwn_cit */
+/* End functions for struct config_item_type tb_dev_wwn_cit */
-/* Start functions for struct config_item_type target_core_dev_pr_cit */
+/* Start functions for struct config_item_type tb_dev_pr_cit */
CONFIGFS_EATTR_STRUCT(target_core_dev_pr, se_device);
#define SE_DEV_PR_ATTR(_name, _mode) \
@@ -1453,15 +1294,11 @@ static struct configfs_item_operations target_core_dev_pr_ops = {
.store_attribute = target_core_dev_pr_attr_store,
};
-static struct config_item_type target_core_dev_pr_cit = {
- .ct_item_ops = &target_core_dev_pr_ops,
- .ct_attrs = target_core_dev_pr_attrs,
- .ct_owner = THIS_MODULE,
-};
+TB_CIT_SETUP(dev_pr, &target_core_dev_pr_ops, NULL, target_core_dev_pr_attrs);
-/* End functions for struct config_item_type target_core_dev_pr_cit */
+/* End functions for struct config_item_type tb_dev_pr_cit */
-/* Start functions for struct config_item_type target_core_dev_cit */
+/* Start functions for struct config_item_type tb_dev_cit */
static ssize_t target_core_show_dev_info(void *p, char *page)
{
@@ -1925,7 +1762,7 @@ static struct target_core_configfs_attribute target_core_attr_dev_lba_map = {
.store = target_core_store_dev_lba_map,
};
-static struct configfs_attribute *lio_core_dev_attrs[] = {
+static struct configfs_attribute *target_core_dev_attrs[] = {
&target_core_attr_dev_info.attr,
&target_core_attr_dev_control.attr,
&target_core_attr_dev_alias.attr,
@@ -1984,13 +1821,9 @@ static struct configfs_item_operations target_core_dev_item_ops = {
.store_attribute = target_core_dev_store,
};
-static struct config_item_type target_core_dev_cit = {
- .ct_item_ops = &target_core_dev_item_ops,
- .ct_attrs = lio_core_dev_attrs,
- .ct_owner = THIS_MODULE,
-};
+TB_CIT_SETUP(dev, &target_core_dev_item_ops, NULL, target_core_dev_attrs);
-/* End functions for struct config_item_type target_core_dev_cit */
+/* End functions for struct config_item_type tb_dev_cit */
/* Start functions for struct config_item_type target_core_alua_lu_gp_cit */
@@ -2670,7 +2503,7 @@ static struct config_item_type target_core_alua_tg_pt_gp_cit = {
/* End functions for struct config_item_type target_core_alua_tg_pt_gp_cit */
-/* Start functions for struct config_item_type target_core_alua_tg_pt_gps_cit */
+/* Start functions for struct config_item_type tb_alua_tg_pt_gps_cit */
static struct config_group *target_core_alua_create_tg_pt_gp(
struct config_group *group,
@@ -2721,12 +2554,9 @@ static struct configfs_group_operations target_core_alua_tg_pt_gps_group_ops = {
.drop_item = &target_core_alua_drop_tg_pt_gp,
};
-static struct config_item_type target_core_alua_tg_pt_gps_cit = {
- .ct_group_ops = &target_core_alua_tg_pt_gps_group_ops,
- .ct_owner = THIS_MODULE,
-};
+TB_CIT_SETUP(dev_alua_tg_pt_gps, NULL, &target_core_alua_tg_pt_gps_group_ops, NULL);
-/* End functions for struct config_item_type target_core_alua_tg_pt_gps_cit */
+/* End functions for struct config_item_type tb_alua_tg_pt_gps_cit */
/* Start functions for struct config_item_type target_core_alua_cit */
@@ -2744,7 +2574,7 @@ static struct config_item_type target_core_alua_cit = {
/* End functions for struct config_item_type target_core_alua_cit */
-/* Start functions for struct config_item_type target_core_stat_cit */
+/* Start functions for struct config_item_type tb_dev_stat_cit */
static struct config_group *target_core_stat_mkdir(
struct config_group *group,
@@ -2765,12 +2595,9 @@ static struct configfs_group_operations target_core_stat_group_ops = {
.drop_item = &target_core_stat_rmdir,
};
-static struct config_item_type target_core_stat_cit = {
- .ct_group_ops = &target_core_stat_group_ops,
- .ct_owner = THIS_MODULE,
-};
+TB_CIT_SETUP(dev_stat, NULL, &target_core_stat_group_ops, NULL);
-/* End functions for struct config_item_type target_core_stat_cit */
+/* End functions for struct config_item_type tb_dev_stat_cit */
/* Start functions for struct config_item_type target_core_hba_cit */
@@ -2806,17 +2633,17 @@ static struct config_group *target_core_make_subdev(
if (!dev_cg->default_groups)
goto out_free_device;
- config_group_init_type_name(dev_cg, name, &target_core_dev_cit);
+ config_group_init_type_name(dev_cg, name, &t->tb_cits.tb_dev_cit);
config_group_init_type_name(&dev->dev_attrib.da_group, "attrib",
- &target_core_dev_attrib_cit);
+ &t->tb_cits.tb_dev_attrib_cit);
config_group_init_type_name(&dev->dev_pr_group, "pr",
- &target_core_dev_pr_cit);
+ &t->tb_cits.tb_dev_pr_cit);
config_group_init_type_name(&dev->t10_wwn.t10_wwn_group, "wwn",
- &target_core_dev_wwn_cit);
+ &t->tb_cits.tb_dev_wwn_cit);
config_group_init_type_name(&dev->t10_alua.alua_tg_pt_gps_group,
- "alua", &target_core_alua_tg_pt_gps_cit);
+ "alua", &t->tb_cits.tb_dev_alua_tg_pt_gps_cit);
config_group_init_type_name(&dev->dev_stat_grps.stat_group,
- "statistics", &target_core_stat_cit);
+ "statistics", &t->tb_cits.tb_dev_stat_cit);
dev_cg->default_groups[0] = &dev->dev_attrib.da_group;
dev_cg->default_groups[1] = &dev->dev_pr_group;
@@ -3110,6 +2937,17 @@ static struct config_item_type target_core_cit = {
/* Stop functions for struct config_item_type target_core_hba_cit */
+void target_core_setup_sub_cits(struct se_subsystem_api *sa)
+{
+ target_core_setup_dev_cit(sa);
+ target_core_setup_dev_attrib_cit(sa);
+ target_core_setup_dev_pr_cit(sa);
+ target_core_setup_dev_wwn_cit(sa);
+ target_core_setup_dev_alua_tg_pt_gps_cit(sa);
+ target_core_setup_dev_stat_cit(sa);
+}
+EXPORT_SYMBOL(target_core_setup_sub_cits);
+
static int __init target_core_init_configfs(void)
{
struct config_group *target_cg, *hba_cg = NULL, *alua_cg = NULL;
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index c45f9e907e4..7653cfb027a 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -659,6 +659,7 @@ int se_dev_set_max_unmap_lba_count(
dev, dev->dev_attrib.max_unmap_lba_count);
return 0;
}
+EXPORT_SYMBOL(se_dev_set_max_unmap_lba_count);
int se_dev_set_max_unmap_block_desc_count(
struct se_device *dev,
@@ -670,6 +671,7 @@ int se_dev_set_max_unmap_block_desc_count(
dev, dev->dev_attrib.max_unmap_block_desc_count);
return 0;
}
+EXPORT_SYMBOL(se_dev_set_max_unmap_block_desc_count);
int se_dev_set_unmap_granularity(
struct se_device *dev,
@@ -680,6 +682,7 @@ int se_dev_set_unmap_granularity(
dev, dev->dev_attrib.unmap_granularity);
return 0;
}
+EXPORT_SYMBOL(se_dev_set_unmap_granularity);
int se_dev_set_unmap_granularity_alignment(
struct se_device *dev,
@@ -690,6 +693,7 @@ int se_dev_set_unmap_granularity_alignment(
dev, dev->dev_attrib.unmap_granularity_alignment);
return 0;
}
+EXPORT_SYMBOL(se_dev_set_unmap_granularity_alignment);
int se_dev_set_max_write_same_len(
struct se_device *dev,
@@ -700,6 +704,7 @@ int se_dev_set_max_write_same_len(
dev, dev->dev_attrib.max_write_same_len);
return 0;
}
+EXPORT_SYMBOL(se_dev_set_max_write_same_len);
static void dev_set_t10_wwn_model_alias(struct se_device *dev)
{
@@ -738,6 +743,7 @@ int se_dev_set_emulate_model_alias(struct se_device *dev, int flag)
return 0;
}
+EXPORT_SYMBOL(se_dev_set_emulate_model_alias);
int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
{
@@ -753,6 +759,7 @@ int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
return 0;
}
+EXPORT_SYMBOL(se_dev_set_emulate_dpo);
int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
{
@@ -760,17 +767,12 @@ int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
pr_err("Illegal value %d\n", flag);
return -EINVAL;
}
-
- if (flag &&
- dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
- pr_err("emulate_fua_write not supported for pSCSI\n");
- return -EINVAL;
- }
dev->dev_attrib.emulate_fua_write = flag;
pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n",
dev, dev->dev_attrib.emulate_fua_write);
return 0;
}
+EXPORT_SYMBOL(se_dev_set_emulate_fua_write);
int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)
{
@@ -786,6 +788,7 @@ int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)
return 0;
}
+EXPORT_SYMBOL(se_dev_set_emulate_fua_read);
int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
{
@@ -794,11 +797,6 @@ int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
return -EINVAL;
}
if (flag &&
- dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
- pr_err("emulate_write_cache not supported for pSCSI\n");
- return -EINVAL;
- }
- if (flag &&
dev->transport->get_write_cache) {
pr_err("emulate_write_cache not supported for this device\n");
return -EINVAL;
@@ -809,6 +807,7 @@ int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
dev, dev->dev_attrib.emulate_write_cache);
return 0;
}
+EXPORT_SYMBOL(se_dev_set_emulate_write_cache);
int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag)
{
@@ -829,6 +828,7 @@ int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag)
return 0;
}
+EXPORT_SYMBOL(se_dev_set_emulate_ua_intlck_ctrl);
int se_dev_set_emulate_tas(struct se_device *dev, int flag)
{
@@ -849,6 +849,7 @@ int se_dev_set_emulate_tas(struct se_device *dev, int flag)
return 0;
}
+EXPORT_SYMBOL(se_dev_set_emulate_tas);
int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
{
@@ -870,6 +871,7 @@ int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
dev, flag);
return 0;
}
+EXPORT_SYMBOL(se_dev_set_emulate_tpu);
int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
{
@@ -891,6 +893,7 @@ int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
dev, flag);
return 0;
}
+EXPORT_SYMBOL(se_dev_set_emulate_tpws);
int se_dev_set_emulate_caw(struct se_device *dev, int flag)
{
@@ -904,6 +907,7 @@ int se_dev_set_emulate_caw(struct se_device *dev, int flag)
return 0;
}
+EXPORT_SYMBOL(se_dev_set_emulate_caw);
int se_dev_set_emulate_3pc(struct se_device *dev, int flag)
{
@@ -917,6 +921,7 @@ int se_dev_set_emulate_3pc(struct se_device *dev, int flag)
return 0;
}
+EXPORT_SYMBOL(se_dev_set_emulate_3pc);
int se_dev_set_pi_prot_type(struct se_device *dev, int flag)
{
@@ -970,6 +975,7 @@ int se_dev_set_pi_prot_type(struct se_device *dev, int flag)
return 0;
}
+EXPORT_SYMBOL(se_dev_set_pi_prot_type);
int se_dev_set_pi_prot_format(struct se_device *dev, int flag)
{
@@ -1005,6 +1011,7 @@ int se_dev_set_pi_prot_format(struct se_device *dev, int flag)
return 0;
}
+EXPORT_SYMBOL(se_dev_set_pi_prot_format);
int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag)
{
@@ -1017,6 +1024,7 @@ int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag)
(dev->dev_attrib.enforce_pr_isids) ? "Enabled" : "Disabled");
return 0;
}
+EXPORT_SYMBOL(se_dev_set_enforce_pr_isids);
int se_dev_set_force_pr_aptpl(struct se_device *dev, int flag)
{
@@ -1034,6 +1042,7 @@ int se_dev_set_force_pr_aptpl(struct se_device *dev, int flag)
pr_debug("dev[%p]: SE Device force_pr_aptpl: %d\n", dev, flag);
return 0;
}
+EXPORT_SYMBOL(se_dev_set_force_pr_aptpl);
int se_dev_set_is_nonrot(struct se_device *dev, int flag)
{
@@ -1046,6 +1055,7 @@ int se_dev_set_is_nonrot(struct se_device *dev, int flag)
dev, flag);
return 0;
}
+EXPORT_SYMBOL(se_dev_set_is_nonrot);
int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag)
{
@@ -1058,6 +1068,7 @@ int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag)
pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n", dev, flag);
return 0;
}
+EXPORT_SYMBOL(se_dev_set_emulate_rest_reord);
/*
* Note, this can only be called on unexported SE Device Object.
@@ -1076,31 +1087,21 @@ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
return -EINVAL;
}
- if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
+ if (queue_depth > dev->dev_attrib.queue_depth) {
if (queue_depth > dev->dev_attrib.hw_queue_depth) {
- pr_err("dev[%p]: Passed queue_depth: %u"
- " exceeds TCM/SE_Device TCQ: %u\n",
- dev, queue_depth,
+ pr_err("dev[%p]: Passed queue_depth:"
+ " %u exceeds TCM/SE_Device MAX"
+ " TCQ: %u\n", dev, queue_depth,
dev->dev_attrib.hw_queue_depth);
return -EINVAL;
}
- } else {
- if (queue_depth > dev->dev_attrib.queue_depth) {
- if (queue_depth > dev->dev_attrib.hw_queue_depth) {
- pr_err("dev[%p]: Passed queue_depth:"
- " %u exceeds TCM/SE_Device MAX"
- " TCQ: %u\n", dev, queue_depth,
- dev->dev_attrib.hw_queue_depth);
- return -EINVAL;
- }
- }
}
-
dev->dev_attrib.queue_depth = dev->queue_depth = queue_depth;
pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n",
dev, queue_depth);
return 0;
}
+EXPORT_SYMBOL(se_dev_set_queue_depth);
int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
{
@@ -1123,22 +1124,12 @@ int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
DA_STATUS_MAX_SECTORS_MIN);
return -EINVAL;
}
- if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
- if (fabric_max_sectors > dev->dev_attrib.hw_max_sectors) {
- pr_err("dev[%p]: Passed fabric_max_sectors: %u"
- " greater than TCM/SE_Device max_sectors:"
- " %u\n", dev, fabric_max_sectors,
- dev->dev_attrib.hw_max_sectors);
- return -EINVAL;
- }
- } else {
- if (fabric_max_sectors > DA_STATUS_MAX_SECTORS_MAX) {
- pr_err("dev[%p]: Passed fabric_max_sectors: %u"
- " greater than DA_STATUS_MAX_SECTORS_MAX:"
- " %u\n", dev, fabric_max_sectors,
- DA_STATUS_MAX_SECTORS_MAX);
- return -EINVAL;
- }
+ if (fabric_max_sectors > DA_STATUS_MAX_SECTORS_MAX) {
+ pr_err("dev[%p]: Passed fabric_max_sectors: %u"
+ " greater than DA_STATUS_MAX_SECTORS_MAX:"
+ " %u\n", dev, fabric_max_sectors,
+ DA_STATUS_MAX_SECTORS_MAX);
+ return -EINVAL;
}
/*
* Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
@@ -1155,6 +1146,7 @@ int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
dev, fabric_max_sectors);
return 0;
}
+EXPORT_SYMBOL(se_dev_set_fabric_max_sectors);
int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
{
@@ -1164,11 +1156,6 @@ int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
dev, dev->export_count);
return -EINVAL;
}
- if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
- pr_err("dev[%p]: Passed optimal_sectors cannot be"
- " changed for TCM/pSCSI\n", dev);
- return -EINVAL;
- }
if (optimal_sectors > dev->dev_attrib.fabric_max_sectors) {
pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
" greater than fabric_max_sectors: %u\n", dev,
@@ -1181,6 +1168,7 @@ int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
dev, optimal_sectors);
return 0;
}
+EXPORT_SYMBOL(se_dev_set_optimal_sectors);
int se_dev_set_block_size(struct se_device *dev, u32 block_size)
{
@@ -1201,13 +1189,6 @@ int se_dev_set_block_size(struct se_device *dev, u32 block_size)
return -EINVAL;
}
- if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
- pr_err("dev[%p]: Not allowed to change block_size for"
- " Physical Device, use for Linux/SCSI to change"
- " block_size for underlying hardware\n", dev);
- return -EINVAL;
- }
-
dev->dev_attrib.block_size = block_size;
pr_debug("dev[%p]: SE Device block_size changed to %u\n",
dev, block_size);
@@ -1218,6 +1199,7 @@ int se_dev_set_block_size(struct se_device *dev, u32 block_size)
return 0;
}
+EXPORT_SYMBOL(se_dev_set_block_size);
struct se_lun *core_dev_add_lun(
struct se_portal_group *tpg,
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 72c83d98662..c2aea099ea4 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -37,6 +37,7 @@
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
+#include <target/target_core_backend_configfs.h>
#include "target_core_file.h"
@@ -934,6 +935,42 @@ fd_parse_cdb(struct se_cmd *cmd)
return sbc_parse_cdb(cmd, &fd_sbc_ops);
}
+DEF_TB_DEFAULT_ATTRIBS(fileio);
+
+static struct configfs_attribute *fileio_backend_dev_attrs[] = {
+ &fileio_dev_attrib_emulate_model_alias.attr,
+ &fileio_dev_attrib_emulate_dpo.attr,
+ &fileio_dev_attrib_emulate_fua_write.attr,
+ &fileio_dev_attrib_emulate_fua_read.attr,
+ &fileio_dev_attrib_emulate_write_cache.attr,
+ &fileio_dev_attrib_emulate_ua_intlck_ctrl.attr,
+ &fileio_dev_attrib_emulate_tas.attr,
+ &fileio_dev_attrib_emulate_tpu.attr,
+ &fileio_dev_attrib_emulate_tpws.attr,
+ &fileio_dev_attrib_emulate_caw.attr,
+ &fileio_dev_attrib_emulate_3pc.attr,
+ &fileio_dev_attrib_pi_prot_type.attr,
+ &fileio_dev_attrib_hw_pi_prot_type.attr,
+ &fileio_dev_attrib_pi_prot_format.attr,
+ &fileio_dev_attrib_enforce_pr_isids.attr,
+ &fileio_dev_attrib_is_nonrot.attr,
+ &fileio_dev_attrib_emulate_rest_reord.attr,
+ &fileio_dev_attrib_force_pr_aptpl.attr,
+ &fileio_dev_attrib_hw_block_size.attr,
+ &fileio_dev_attrib_block_size.attr,
+ &fileio_dev_attrib_hw_max_sectors.attr,
+ &fileio_dev_attrib_fabric_max_sectors.attr,
+ &fileio_dev_attrib_optimal_sectors.attr,
+ &fileio_dev_attrib_hw_queue_depth.attr,
+ &fileio_dev_attrib_queue_depth.attr,
+ &fileio_dev_attrib_max_unmap_lba_count.attr,
+ &fileio_dev_attrib_max_unmap_block_desc_count.attr,
+ &fileio_dev_attrib_unmap_granularity.attr,
+ &fileio_dev_attrib_unmap_granularity_alignment.attr,
+ &fileio_dev_attrib_max_write_same_len.attr,
+ NULL,
+};
+
static struct se_subsystem_api fileio_template = {
.name = "fileio",
.inquiry_prod = "FILEIO",
@@ -957,6 +994,11 @@ static struct se_subsystem_api fileio_template = {
static int __init fileio_module_init(void)
{
+ struct target_backend_cits *tbc = &fileio_template.tb_cits;
+
+ target_core_setup_sub_cits(&fileio_template);
+ tbc->tb_dev_attrib_cit.ct_attrs = fileio_backend_dev_attrs;
+
return transport_subsystem_register(&fileio_template);
}
diff --git a/drivers/target/target_core_hba.c b/drivers/target/target_core_hba.c
index a25051a37dd..ff95f95dcd1 100644
--- a/drivers/target/target_core_hba.c
+++ b/drivers/target/target_core_hba.c
@@ -36,6 +36,7 @@
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
#include <target/target_core_fabric.h>
+#include <target/target_core_configfs.h>
#include "target_core_internal.h"
@@ -137,8 +138,7 @@ core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags)
return hba;
out_module_put:
- if (hba->transport->owner)
- module_put(hba->transport->owner);
+ module_put(hba->transport->owner);
hba->transport = NULL;
out_free_hba:
kfree(hba);
@@ -159,8 +159,7 @@ core_delete_hba(struct se_hba *hba)
pr_debug("CORE_HBA[%d] - Detached HBA from Generic Target"
" Core\n", hba->hba_id);
- if (hba->transport->owner)
- module_put(hba->transport->owner);
+ module_put(hba->transport->owner);
hba->transport = NULL;
kfree(hba);
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 7e6b857c6b3..3efff94fbd9 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -41,6 +41,7 @@
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
+#include <target/target_core_backend_configfs.h>
#include "target_core_iblock.h"
@@ -858,6 +859,42 @@ static bool iblock_get_write_cache(struct se_device *dev)
return q->flush_flags & REQ_FLUSH;
}
+DEF_TB_DEFAULT_ATTRIBS(iblock);
+
+static struct configfs_attribute *iblock_backend_dev_attrs[] = {
+ &iblock_dev_attrib_emulate_model_alias.attr,
+ &iblock_dev_attrib_emulate_dpo.attr,
+ &iblock_dev_attrib_emulate_fua_write.attr,
+ &iblock_dev_attrib_emulate_fua_read.attr,
+ &iblock_dev_attrib_emulate_write_cache.attr,
+ &iblock_dev_attrib_emulate_ua_intlck_ctrl.attr,
+ &iblock_dev_attrib_emulate_tas.attr,
+ &iblock_dev_attrib_emulate_tpu.attr,
+ &iblock_dev_attrib_emulate_tpws.attr,
+ &iblock_dev_attrib_emulate_caw.attr,
+ &iblock_dev_attrib_emulate_3pc.attr,
+ &iblock_dev_attrib_pi_prot_type.attr,
+ &iblock_dev_attrib_hw_pi_prot_type.attr,
+ &iblock_dev_attrib_pi_prot_format.attr,
+ &iblock_dev_attrib_enforce_pr_isids.attr,
+ &iblock_dev_attrib_is_nonrot.attr,
+ &iblock_dev_attrib_emulate_rest_reord.attr,
+ &iblock_dev_attrib_force_pr_aptpl.attr,
+ &iblock_dev_attrib_hw_block_size.attr,
+ &iblock_dev_attrib_block_size.attr,
+ &iblock_dev_attrib_hw_max_sectors.attr,
+ &iblock_dev_attrib_fabric_max_sectors.attr,
+ &iblock_dev_attrib_optimal_sectors.attr,
+ &iblock_dev_attrib_hw_queue_depth.attr,
+ &iblock_dev_attrib_queue_depth.attr,
+ &iblock_dev_attrib_max_unmap_lba_count.attr,
+ &iblock_dev_attrib_max_unmap_block_desc_count.attr,
+ &iblock_dev_attrib_unmap_granularity.attr,
+ &iblock_dev_attrib_unmap_granularity_alignment.attr,
+ &iblock_dev_attrib_max_write_same_len.attr,
+ NULL,
+};
+
static struct se_subsystem_api iblock_template = {
.name = "iblock",
.inquiry_prod = "IBLOCK",
@@ -883,6 +920,11 @@ static struct se_subsystem_api iblock_template = {
static int __init iblock_module_init(void)
{
+ struct target_backend_cits *tbc = &iblock_template.tb_cits;
+
+ target_core_setup_sub_cits(&iblock_template);
+ tbc->tb_dev_attrib_cit.ct_attrs = iblock_backend_dev_attrs;
+
return transport_subsystem_register(&iblock_template);
}
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index e31f42f369f..60381db9002 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -18,34 +18,6 @@ int core_dev_export(struct se_device *, struct se_portal_group *,
struct se_lun *);
void core_dev_unexport(struct se_device *, struct se_portal_group *,
struct se_lun *);
-int se_dev_set_task_timeout(struct se_device *, u32);
-int se_dev_set_max_unmap_lba_count(struct se_device *, u32);
-int se_dev_set_max_unmap_block_desc_count(struct se_device *, u32);
-int se_dev_set_unmap_granularity(struct se_device *, u32);
-int se_dev_set_unmap_granularity_alignment(struct se_device *, u32);
-int se_dev_set_max_write_same_len(struct se_device *, u32);
-int se_dev_set_emulate_model_alias(struct se_device *, int);
-int se_dev_set_emulate_dpo(struct se_device *, int);
-int se_dev_set_emulate_fua_write(struct se_device *, int);
-int se_dev_set_emulate_fua_read(struct se_device *, int);
-int se_dev_set_emulate_write_cache(struct se_device *, int);
-int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *, int);
-int se_dev_set_emulate_tas(struct se_device *, int);
-int se_dev_set_emulate_tpu(struct se_device *, int);
-int se_dev_set_emulate_tpws(struct se_device *, int);
-int se_dev_set_emulate_caw(struct se_device *, int);
-int se_dev_set_emulate_3pc(struct se_device *, int);
-int se_dev_set_pi_prot_type(struct se_device *, int);
-int se_dev_set_pi_prot_format(struct se_device *, int);
-int se_dev_set_enforce_pr_isids(struct se_device *, int);
-int se_dev_set_force_pr_aptpl(struct se_device *, int);
-int se_dev_set_is_nonrot(struct se_device *, int);
-int se_dev_set_emulate_rest_reord(struct se_device *dev, int);
-int se_dev_set_queue_depth(struct se_device *, u32);
-int se_dev_set_max_sectors(struct se_device *, u32);
-int se_dev_set_fabric_max_sectors(struct se_device *, u32);
-int se_dev_set_optimal_sectors(struct se_device *, u32);
-int se_dev_set_block_size(struct se_device *, u32);
struct se_lun *core_dev_add_lun(struct se_portal_group *, struct se_device *, u32);
void core_dev_del_lun(struct se_portal_group *, struct se_lun *);
struct se_lun *core_get_lun_from_tpg(struct se_portal_group *, u32);
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 4c261c33cf5..d56f2aaba9a 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -76,7 +76,7 @@ enum preempt_type {
};
static void __core_scsi3_complete_pro_release(struct se_device *, struct se_node_acl *,
- struct t10_pr_registration *, int);
+ struct t10_pr_registration *, int, int);
static sense_reason_t
target_scsi2_reservation_check(struct se_cmd *cmd)
@@ -1177,7 +1177,7 @@ static int core_scsi3_check_implicit_release(
* service action with the SERVICE ACTION RESERVATION KEY
* field set to zero (see 5.7.11.3).
*/
- __core_scsi3_complete_pro_release(dev, nacl, pr_reg, 0);
+ __core_scsi3_complete_pro_release(dev, nacl, pr_reg, 0, 1);
ret = 1;
/*
* For 'All Registrants' reservation types, all existing
@@ -1219,7 +1219,8 @@ static void __core_scsi3_free_registration(
pr_reg->pr_reg_deve->def_pr_registered = 0;
pr_reg->pr_reg_deve->pr_res_key = 0;
- list_del(&pr_reg->pr_reg_list);
+ if (!list_empty(&pr_reg->pr_reg_list))
+ list_del(&pr_reg->pr_reg_list);
/*
* Caller accessing *pr_reg using core_scsi3_locate_pr_reg(),
* so call core_scsi3_put_pr_reg() to decrement our reference.
@@ -1271,6 +1272,7 @@ void core_scsi3_free_pr_reg_from_nacl(
{
struct t10_reservation *pr_tmpl = &dev->t10_pr;
struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_res_holder;
+ bool free_reg = false;
/*
* If the passed se_node_acl matches the reservation holder,
* release the reservation.
@@ -1278,13 +1280,18 @@ void core_scsi3_free_pr_reg_from_nacl(
spin_lock(&dev->dev_reservation_lock);
pr_res_holder = dev->dev_pr_res_holder;
if ((pr_res_holder != NULL) &&
- (pr_res_holder->pr_reg_nacl == nacl))
- __core_scsi3_complete_pro_release(dev, nacl, pr_res_holder, 0);
+ (pr_res_holder->pr_reg_nacl == nacl)) {
+ __core_scsi3_complete_pro_release(dev, nacl, pr_res_holder, 0, 1);
+ free_reg = true;
+ }
spin_unlock(&dev->dev_reservation_lock);
/*
* Release any registration associated with the struct se_node_acl.
*/
spin_lock(&pr_tmpl->registration_lock);
+ if (pr_res_holder && free_reg)
+ __core_scsi3_free_registration(dev, pr_res_holder, NULL, 0);
+
list_for_each_entry_safe(pr_reg, pr_reg_tmp,
&pr_tmpl->registration_list, pr_reg_list) {
@@ -1307,7 +1314,7 @@ void core_scsi3_free_all_registrations(
if (pr_res_holder != NULL) {
struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
__core_scsi3_complete_pro_release(dev, pr_res_nacl,
- pr_res_holder, 0);
+ pr_res_holder, 0, 0);
}
spin_unlock(&dev->dev_reservation_lock);
@@ -1429,14 +1436,12 @@ core_scsi3_decode_spec_i_port(
struct target_core_fabric_ops *tmp_tf_ops;
unsigned char *buf;
unsigned char *ptr, *i_str = NULL, proto_ident, tmp_proto_ident;
- char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN];
+ char *iport_ptr = NULL, i_buf[PR_REG_ISID_ID_LEN];
sense_reason_t ret;
u32 tpdl, tid_len = 0;
int dest_local_nexus;
u32 dest_rtpi = 0;
- memset(dest_iport, 0, 64);
-
local_se_deve = se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
/*
* Allocate a struct pr_transport_id_holder and setup the
@@ -2105,13 +2110,13 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
/*
* sa_res_key=0 Unregister Reservation Key for registered I_T Nexus.
*/
- pr_holder = core_scsi3_check_implicit_release(
- cmd->se_dev, pr_reg);
+ type = pr_reg->pr_res_type;
+ pr_holder = core_scsi3_check_implicit_release(cmd->se_dev,
+ pr_reg);
if (pr_holder < 0) {
ret = TCM_RESERVATION_CONFLICT;
goto out;
}
- type = pr_reg->pr_res_type;
spin_lock(&pr_tmpl->registration_lock);
/*
@@ -2269,6 +2274,7 @@ core_scsi3_pro_reserve(struct se_cmd *cmd, int type, int scope, u64 res_key)
spin_lock(&dev->dev_reservation_lock);
pr_res_holder = dev->dev_pr_res_holder;
if (pr_res_holder) {
+ int pr_res_type = pr_res_holder->pr_res_type;
/*
* From spc4r17 Section 5.7.9: Reserving:
*
@@ -2279,7 +2285,9 @@ core_scsi3_pro_reserve(struct se_cmd *cmd, int type, int scope, u64 res_key)
* the logical unit, then the command shall be completed with
* RESERVATION CONFLICT status.
*/
- if (pr_res_holder != pr_reg) {
+ if ((pr_res_holder != pr_reg) &&
+ (pr_res_type != PR_TYPE_WRITE_EXCLUSIVE_ALLREG) &&
+ (pr_res_type != PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) {
struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
pr_err("SPC-3 PR: Attempted RESERVE from"
" [%s]: %s while reservation already held by"
@@ -2385,23 +2393,59 @@ static void __core_scsi3_complete_pro_release(
struct se_device *dev,
struct se_node_acl *se_nacl,
struct t10_pr_registration *pr_reg,
- int explicit)
+ int explicit,
+ int unreg)
{
struct target_core_fabric_ops *tfo = se_nacl->se_tpg->se_tpg_tfo;
char i_buf[PR_REG_ISID_ID_LEN];
+ int pr_res_type = 0, pr_res_scope = 0;
memset(i_buf, 0, PR_REG_ISID_ID_LEN);
core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN);
/*
* Go ahead and release the current PR reservation holder.
+ * If an All Registrants reservation is currently active and
+ * a unregister operation is requested, replace the current
+ * dev_pr_res_holder with another active registration.
*/
- dev->dev_pr_res_holder = NULL;
+ if (dev->dev_pr_res_holder) {
+ pr_res_type = dev->dev_pr_res_holder->pr_res_type;
+ pr_res_scope = dev->dev_pr_res_holder->pr_res_scope;
+ dev->dev_pr_res_holder->pr_res_type = 0;
+ dev->dev_pr_res_holder->pr_res_scope = 0;
+ dev->dev_pr_res_holder->pr_res_holder = 0;
+ dev->dev_pr_res_holder = NULL;
+ }
+ if (!unreg)
+ goto out;
- pr_debug("SPC-3 PR [%s] Service Action: %s RELEASE cleared"
- " reservation holder TYPE: %s ALL_TG_PT: %d\n",
- tfo->get_fabric_name(), (explicit) ? "explicit" : "implicit",
- core_scsi3_pr_dump_type(pr_reg->pr_res_type),
- (pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
+ spin_lock(&dev->t10_pr.registration_lock);
+ list_del_init(&pr_reg->pr_reg_list);
+ /*
+ * If the I_T nexus is a reservation holder, the persistent reservation
+ * is of an all registrants type, and the I_T nexus is the last remaining
+ * registered I_T nexus, then the device server shall also release the
+ * persistent reservation.
+ */
+ if (!list_empty(&dev->t10_pr.registration_list) &&
+ ((pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
+ (pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG))) {
+ dev->dev_pr_res_holder =
+ list_entry(dev->t10_pr.registration_list.next,
+ struct t10_pr_registration, pr_reg_list);
+ dev->dev_pr_res_holder->pr_res_type = pr_res_type;
+ dev->dev_pr_res_holder->pr_res_scope = pr_res_scope;
+ dev->dev_pr_res_holder->pr_res_holder = 1;
+ }
+ spin_unlock(&dev->t10_pr.registration_lock);
+out:
+ if (!dev->dev_pr_res_holder) {
+ pr_debug("SPC-3 PR [%s] Service Action: %s RELEASE cleared"
+ " reservation holder TYPE: %s ALL_TG_PT: %d\n",
+ tfo->get_fabric_name(), (explicit) ? "explicit" :
+ "implicit", core_scsi3_pr_dump_type(pr_res_type),
+ (pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
+ }
pr_debug("SPC-3 PR [%s] RELEASE Node: %s%s\n",
tfo->get_fabric_name(), se_nacl->initiatorname,
i_buf);
@@ -2532,7 +2576,7 @@ core_scsi3_emulate_pro_release(struct se_cmd *cmd, int type, int scope,
* server shall not establish a unit attention condition.
*/
__core_scsi3_complete_pro_release(dev, se_sess->se_node_acl,
- pr_reg, 1);
+ pr_reg, 1, 0);
spin_unlock(&dev->dev_reservation_lock);
@@ -2620,7 +2664,7 @@ core_scsi3_emulate_pro_clear(struct se_cmd *cmd, u64 res_key)
if (pr_res_holder) {
struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
__core_scsi3_complete_pro_release(dev, pr_res_nacl,
- pr_res_holder, 0);
+ pr_res_holder, 0, 0);
}
spin_unlock(&dev->dev_reservation_lock);
/*
@@ -2679,7 +2723,7 @@ static void __core_scsi3_complete_pro_preempt(
*/
if (dev->dev_pr_res_holder)
__core_scsi3_complete_pro_release(dev, nacl,
- dev->dev_pr_res_holder, 0);
+ dev->dev_pr_res_holder, 0, 0);
dev->dev_pr_res_holder = pr_reg;
pr_reg->pr_res_holder = 1;
@@ -2924,8 +2968,8 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key,
*/
if (pr_reg_n != pr_res_holder)
__core_scsi3_complete_pro_release(dev,
- pr_res_holder->pr_reg_nacl,
- dev->dev_pr_res_holder, 0);
+ pr_res_holder->pr_reg_nacl,
+ dev->dev_pr_res_holder, 0, 0);
/*
* b) Remove the registrations for all I_T nexuses identified
* by the SERVICE ACTION RESERVATION KEY field, except the
@@ -3059,7 +3103,7 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
struct t10_reservation *pr_tmpl = &dev->t10_pr;
unsigned char *buf;
unsigned char *initiator_str;
- char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN];
+ char *iport_ptr = NULL, i_buf[PR_REG_ISID_ID_LEN];
u32 tid_len, tmp_tid_len;
int new_reg = 0, type, scope, matching_iname;
sense_reason_t ret;
@@ -3071,7 +3115,6 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
- memset(dest_iport, 0, 64);
memset(i_buf, 0, PR_REG_ISID_ID_LEN);
se_tpg = se_sess->se_tpg;
tf_ops = se_tpg->se_tpg_tfo;
@@ -3389,7 +3432,7 @@ after_iport_check:
* holder (i.e., the I_T nexus on which the
*/
__core_scsi3_complete_pro_release(dev, pr_res_nacl,
- dev->dev_pr_res_holder, 0);
+ dev->dev_pr_res_holder, 0, 0);
/*
* g) Move the persistent reservation to the specified I_T nexus using
* the same scope and type as the persistent reservation released in
@@ -3837,7 +3880,8 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
unsigned char *buf;
u32 add_desc_len = 0, add_len = 0, desc_len, exp_desc_len;
u32 off = 8; /* off into first Full Status descriptor */
- int format_code = 0;
+ int format_code = 0, pr_res_type = 0, pr_res_scope = 0;
+ bool all_reg = false;
if (cmd->data_length < 8) {
pr_err("PRIN SA READ_FULL_STATUS SCSI Data Length: %u"
@@ -3854,6 +3898,19 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
buf[2] = ((dev->t10_pr.pr_generation >> 8) & 0xff);
buf[3] = (dev->t10_pr.pr_generation & 0xff);
+ spin_lock(&dev->dev_reservation_lock);
+ if (dev->dev_pr_res_holder) {
+ struct t10_pr_registration *pr_holder = dev->dev_pr_res_holder;
+
+ if (pr_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG ||
+ pr_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG) {
+ all_reg = true;
+ pr_res_type = pr_holder->pr_res_type;
+ pr_res_scope = pr_holder->pr_res_scope;
+ }
+ }
+ spin_unlock(&dev->dev_reservation_lock);
+
spin_lock(&pr_tmpl->registration_lock);
list_for_each_entry_safe(pr_reg, pr_reg_tmp,
&pr_tmpl->registration_list, pr_reg_list) {
@@ -3901,14 +3958,20 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
* reservation holder for PR_HOLDER bit.
*
* Also, if this registration is the reservation
- * holder, fill in SCOPE and TYPE in the next byte.
+ * holder or there is an All Registrants reservation
+ * active, fill in SCOPE and TYPE in the next byte.
*/
if (pr_reg->pr_res_holder) {
buf[off++] |= 0x01;
buf[off++] = (pr_reg->pr_res_scope & 0xf0) |
(pr_reg->pr_res_type & 0x0f);
- } else
+ } else if (all_reg) {
+ buf[off++] |= 0x01;
+ buf[off++] = (pr_res_scope & 0xf0) |
+ (pr_res_type & 0x0f);
+ } else {
off += 2;
+ }
off += 4; /* Skip over reserved area */
/*
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index caf03d5b3ca..1045dcd7bf6 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -44,6 +44,7 @@
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
+#include <target/target_core_backend_configfs.h>
#include "target_core_alua.h"
#include "target_core_pscsi.h"
@@ -1165,6 +1166,26 @@ static void pscsi_req_done(struct request *req, int uptodate)
kfree(pt);
}
+DEF_TB_DEV_ATTRIB_RO(pscsi, hw_pi_prot_type);
+TB_DEV_ATTR_RO(pscsi, hw_pi_prot_type);
+
+DEF_TB_DEV_ATTRIB_RO(pscsi, hw_block_size);
+TB_DEV_ATTR_RO(pscsi, hw_block_size);
+
+DEF_TB_DEV_ATTRIB_RO(pscsi, hw_max_sectors);
+TB_DEV_ATTR_RO(pscsi, hw_max_sectors);
+
+DEF_TB_DEV_ATTRIB_RO(pscsi, hw_queue_depth);
+TB_DEV_ATTR_RO(pscsi, hw_queue_depth);
+
+static struct configfs_attribute *pscsi_backend_dev_attrs[] = {
+ &pscsi_dev_attrib_hw_pi_prot_type.attr,
+ &pscsi_dev_attrib_hw_block_size.attr,
+ &pscsi_dev_attrib_hw_max_sectors.attr,
+ &pscsi_dev_attrib_hw_queue_depth.attr,
+ NULL,
+};
+
static struct se_subsystem_api pscsi_template = {
.name = "pscsi",
.owner = THIS_MODULE,
@@ -1185,6 +1206,11 @@ static struct se_subsystem_api pscsi_template = {
static int __init pscsi_module_init(void)
{
+ struct target_backend_cits *tbc = &pscsi_template.tb_cits;
+
+ target_core_setup_sub_cits(&pscsi_template);
+ tbc->tb_dev_attrib_cit.ct_attrs = pscsi_backend_dev_attrs;
+
return transport_subsystem_register(&pscsi_template);
}
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index b920db3388c..60ebd170a56 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -34,6 +34,7 @@
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
+#include <target/target_core_backend_configfs.h>
#include "target_core_rd.h"
@@ -632,6 +633,42 @@ rd_parse_cdb(struct se_cmd *cmd)
return sbc_parse_cdb(cmd, &rd_sbc_ops);
}
+DEF_TB_DEFAULT_ATTRIBS(rd_mcp);
+
+static struct configfs_attribute *rd_mcp_backend_dev_attrs[] = {
+ &rd_mcp_dev_attrib_emulate_model_alias.attr,
+ &rd_mcp_dev_attrib_emulate_dpo.attr,
+ &rd_mcp_dev_attrib_emulate_fua_write.attr,
+ &rd_mcp_dev_attrib_emulate_fua_read.attr,
+ &rd_mcp_dev_attrib_emulate_write_cache.attr,
+ &rd_mcp_dev_attrib_emulate_ua_intlck_ctrl.attr,
+ &rd_mcp_dev_attrib_emulate_tas.attr,
+ &rd_mcp_dev_attrib_emulate_tpu.attr,
+ &rd_mcp_dev_attrib_emulate_tpws.attr,
+ &rd_mcp_dev_attrib_emulate_caw.attr,
+ &rd_mcp_dev_attrib_emulate_3pc.attr,
+ &rd_mcp_dev_attrib_pi_prot_type.attr,
+ &rd_mcp_dev_attrib_hw_pi_prot_type.attr,
+ &rd_mcp_dev_attrib_pi_prot_format.attr,
+ &rd_mcp_dev_attrib_enforce_pr_isids.attr,
+ &rd_mcp_dev_attrib_is_nonrot.attr,
+ &rd_mcp_dev_attrib_emulate_rest_reord.attr,
+ &rd_mcp_dev_attrib_force_pr_aptpl.attr,
+ &rd_mcp_dev_attrib_hw_block_size.attr,
+ &rd_mcp_dev_attrib_block_size.attr,
+ &rd_mcp_dev_attrib_hw_max_sectors.attr,
+ &rd_mcp_dev_attrib_fabric_max_sectors.attr,
+ &rd_mcp_dev_attrib_optimal_sectors.attr,
+ &rd_mcp_dev_attrib_hw_queue_depth.attr,
+ &rd_mcp_dev_attrib_queue_depth.attr,
+ &rd_mcp_dev_attrib_max_unmap_lba_count.attr,
+ &rd_mcp_dev_attrib_max_unmap_block_desc_count.attr,
+ &rd_mcp_dev_attrib_unmap_granularity.attr,
+ &rd_mcp_dev_attrib_unmap_granularity_alignment.attr,
+ &rd_mcp_dev_attrib_max_write_same_len.attr,
+ NULL,
+};
+
static struct se_subsystem_api rd_mcp_template = {
.name = "rd_mcp",
.inquiry_prod = "RAMDISK-MCP",
@@ -653,8 +690,12 @@ static struct se_subsystem_api rd_mcp_template = {
int __init rd_module_init(void)
{
+ struct target_backend_cits *tbc = &rd_mcp_template.tb_cits;
int ret;
+ target_core_setup_sub_cits(&rd_mcp_template);
+ tbc->tb_dev_attrib_cit.ct_attrs = rd_mcp_backend_dev_attrs;
+
ret = transport_subsystem_register(&rd_mcp_template);
if (ret < 0) {
return ret;
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 9a1b314f648..8bfa61c9693 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -28,6 +28,8 @@
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
#include <target/target_core_backend.h>
+#include <target/target_core_backend_configfs.h>
+
#include <linux/target_core_user.h>
/*
@@ -1092,6 +1094,42 @@ tcmu_parse_cdb(struct se_cmd *cmd)
return ret;
}
+DEF_TB_DEFAULT_ATTRIBS(tcmu);
+
+static struct configfs_attribute *tcmu_backend_dev_attrs[] = {
+ &tcmu_dev_attrib_emulate_model_alias.attr,
+ &tcmu_dev_attrib_emulate_dpo.attr,
+ &tcmu_dev_attrib_emulate_fua_write.attr,
+ &tcmu_dev_attrib_emulate_fua_read.attr,
+ &tcmu_dev_attrib_emulate_write_cache.attr,
+ &tcmu_dev_attrib_emulate_ua_intlck_ctrl.attr,
+ &tcmu_dev_attrib_emulate_tas.attr,
+ &tcmu_dev_attrib_emulate_tpu.attr,
+ &tcmu_dev_attrib_emulate_tpws.attr,
+ &tcmu_dev_attrib_emulate_caw.attr,
+ &tcmu_dev_attrib_emulate_3pc.attr,
+ &tcmu_dev_attrib_pi_prot_type.attr,
+ &tcmu_dev_attrib_hw_pi_prot_type.attr,
+ &tcmu_dev_attrib_pi_prot_format.attr,
+ &tcmu_dev_attrib_enforce_pr_isids.attr,
+ &tcmu_dev_attrib_is_nonrot.attr,
+ &tcmu_dev_attrib_emulate_rest_reord.attr,
+ &tcmu_dev_attrib_force_pr_aptpl.attr,
+ &tcmu_dev_attrib_hw_block_size.attr,
+ &tcmu_dev_attrib_block_size.attr,
+ &tcmu_dev_attrib_hw_max_sectors.attr,
+ &tcmu_dev_attrib_fabric_max_sectors.attr,
+ &tcmu_dev_attrib_optimal_sectors.attr,
+ &tcmu_dev_attrib_hw_queue_depth.attr,
+ &tcmu_dev_attrib_queue_depth.attr,
+ &tcmu_dev_attrib_max_unmap_lba_count.attr,
+ &tcmu_dev_attrib_max_unmap_block_desc_count.attr,
+ &tcmu_dev_attrib_unmap_granularity.attr,
+ &tcmu_dev_attrib_unmap_granularity_alignment.attr,
+ &tcmu_dev_attrib_max_write_same_len.attr,
+ NULL,
+};
+
static struct se_subsystem_api tcmu_template = {
.name = "user",
.inquiry_prod = "USER",
@@ -1112,6 +1150,7 @@ static struct se_subsystem_api tcmu_template = {
static int __init tcmu_module_init(void)
{
+ struct target_backend_cits *tbc = &tcmu_template.tb_cits;
int ret;
BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0);
@@ -1134,6 +1173,9 @@ static int __init tcmu_module_init(void)
goto out_unreg_device;
}
+ target_core_setup_sub_cits(&tcmu_template);
+ tbc->tb_dev_attrib_cit.ct_attrs = tcmu_backend_dev_attrs;
+
ret = transport_subsystem_register(&tcmu_template);
if (ret)
goto out_unreg_genl;
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index f554d25b439..af40db0df58 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -112,6 +112,18 @@ config CPU_THERMAL
If you want this support, you should say Y here.
+config CLOCK_THERMAL
+ bool "Generic clock cooling support"
+ depends on COMMON_CLK
+ depends on PM_OPP
+ help
+ This entry implements the generic clock cooling mechanism through
+ frequency clipping. Typically used to cool off co-processors. The
+ device that is configured to use this cooling mechanism will be
+ controlled to reduce clock frequency whenever temperature is high.
+
+ If you want this support, you should say Y here.
+
config THERMAL_EMULATION
bool "Thermal emulation mode support"
help
@@ -143,6 +155,16 @@ config SPEAR_THERMAL
Enable this to plug the SPEAr thermal sensor driver into the Linux
thermal framework.
+config ROCKCHIP_THERMAL
+ tristate "Rockchip thermal driver"
+ depends on ARCH_ROCKCHIP
+ depends on RESET_CONTROLLER
+ help
+ Rockchip thermal driver provides support for Temperature sensor
+ ADC (TS-ADC) found on Rockchip SoCs. It supports one critical
+ trip point. Cpufreq is used as the cooling device and will throttle
+ CPUs when the Temperature crosses the passive trip point.
+
config RCAR_THERMAL
tristate "Renesas R-Car thermal driver"
depends on ARCH_SHMOBILE || COMPILE_TEST
@@ -185,6 +207,16 @@ config ARMADA_THERMAL
Enable this option if you want to have support for thermal management
controller present in Armada 370 and Armada XP SoC.
+config TEGRA_SOCTHERM
+ tristate "Tegra SOCTHERM thermal management"
+ depends on ARCH_TEGRA
+ help
+ Enable this option for integrated thermal management support on NVIDIA
+ Tegra124 systems-on-chip. The driver supports four thermal zones
+ (CPU, GPU, MEM, PLLX). Cooling devices can be bound to the thermal
+ zones to manage temperatures. This option is also required for the
+ emergency thermal reset (thermtrip) feature to function.
+
config DB8500_CPUFREQ_COOLING
tristate "DB8500 cpufreq cooling"
depends on ARCH_U8500
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
index 39c4fe87da2..fa0dc486790 100644
--- a/drivers/thermal/Makefile
+++ b/drivers/thermal/Makefile
@@ -18,8 +18,12 @@ thermal_sys-$(CONFIG_THERMAL_GOV_USER_SPACE) += user_space.o
# cpufreq cooling
thermal_sys-$(CONFIG_CPU_THERMAL) += cpu_cooling.o
+# clock cooling
+thermal_sys-$(CONFIG_CLOCK_THERMAL) += clock_cooling.o
+
# platform thermal drivers
obj-$(CONFIG_SPEAR_THERMAL) += spear_thermal.o
+obj-$(CONFIG_ROCKCHIP_THERMAL) += rockchip_thermal.o
obj-$(CONFIG_RCAR_THERMAL) += rcar_thermal.o
obj-$(CONFIG_KIRKWOOD_THERMAL) += kirkwood_thermal.o
obj-y += samsung/
@@ -34,3 +38,4 @@ obj-$(CONFIG_INTEL_SOC_DTS_THERMAL) += intel_soc_dts_thermal.o
obj-$(CONFIG_TI_SOC_THERMAL) += ti-soc-thermal/
obj-$(CONFIG_INT340X_THERMAL) += int340x_thermal/
obj-$(CONFIG_ST_THERMAL) += st/
+obj-$(CONFIG_TEGRA_SOCTHERM) += tegra_soctherm.o
diff --git a/drivers/thermal/armada_thermal.c b/drivers/thermal/armada_thermal.c
index eaaf59c98ba..c2556cf5186 100644
--- a/drivers/thermal/armada_thermal.c
+++ b/drivers/thermal/armada_thermal.c
@@ -35,10 +35,6 @@
#define PMU_TDC0_OTF_CAL_MASK (0x1 << 30)
#define PMU_TDC0_START_CAL_MASK (0x1 << 25)
-#define A375_Z1_CAL_RESET_LSB 0x8011e214
-#define A375_Z1_CAL_RESET_MSB 0x30a88019
-#define A375_Z1_WORKAROUND_BIT BIT(9)
-
#define A375_UNIT_CONTROL_SHIFT 27
#define A375_UNIT_CONTROL_MASK 0x7
#define A375_READOUT_INVERT BIT(15)
@@ -124,24 +120,12 @@ static void armada375_init_sensor(struct platform_device *pdev,
struct armada_thermal_priv *priv)
{
unsigned long reg;
- bool quirk_needed =
- !!of_device_is_compatible(pdev->dev.of_node,
- "marvell,armada375-z1-thermal");
-
- if (quirk_needed) {
- /* Ensure these registers have the default (reset) values */
- writel(A375_Z1_CAL_RESET_LSB, priv->control);
- writel(A375_Z1_CAL_RESET_MSB, priv->control + 0x4);
- }
reg = readl(priv->control + 4);
reg &= ~(A375_UNIT_CONTROL_MASK << A375_UNIT_CONTROL_SHIFT);
reg &= ~A375_READOUT_INVERT;
reg &= ~A375_HW_RESETn;
- if (quirk_needed)
- reg |= A375_Z1_WORKAROUND_BIT;
-
writel(reg, priv->control + 4);
mdelay(20);
@@ -260,10 +244,6 @@ static const struct of_device_id armada_thermal_id_table[] = {
.data = &armada375_data,
},
{
- .compatible = "marvell,armada375-z1-thermal",
- .data = &armada375_data,
- },
- {
.compatible = "marvell,armada380-thermal",
.data = &armada380_data,
},
diff --git a/drivers/thermal/clock_cooling.c b/drivers/thermal/clock_cooling.c
new file mode 100644
index 00000000000..1b4ff0f4c71
--- /dev/null
+++ b/drivers/thermal/clock_cooling.c
@@ -0,0 +1,485 @@
+/*
+ * drivers/thermal/clock_cooling.c
+ *
+ * Copyright (C) 2014 Eduardo Valentin <edubezval@gmail.com>
+ *
+ * Copyright (C) 2013 Texas Instruments Inc.
+ * Contact: Eduardo Valentin <eduardo.valentin@ti.com>
+ *
+ * Highly based on cpu_cooling.c.
+ * Copyright (C) 2012 Samsung Electronics Co., Ltd(http://www.samsung.com)
+ * Copyright (C) 2012 Amit Daniel <amit.kachhap@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+#include <linux/clk.h>
+#include <linux/cpufreq.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/idr.h>
+#include <linux/mutex.h>
+#include <linux/pm_opp.h>
+#include <linux/slab.h>
+#include <linux/thermal.h>
+#include <linux/clock_cooling.h>
+
+/**
+ * struct clock_cooling_device - data for cooling device with clock
+ * @id: unique integer value corresponding to each clock_cooling_device
+ * registered.
+ * @dev: struct device pointer to the device being used to cool off using
+ * clock frequencies.
+ * @cdev: thermal_cooling_device pointer to keep track of the
+ * registered cooling device.
+ * @clk_rate_change_nb: reference to notifier block used to receive clock
+ * rate changes.
+ * @freq_table: frequency table used to keep track of available frequencies.
+ * @clock_state: integer value representing the current state of clock
+ * cooling devices.
+ * @clock_val: integer value representing the absolute value of the clipped
+ * frequency.
+ * @clk: struct clk reference used to enforce clock limits.
+ * @lock: mutex lock to protect this struct.
+ *
+ * This structure is required for keeping information of each
+ * clock_cooling_device registered. In order to prevent corruption of this a
+ * mutex @lock is used.
+ */
+struct clock_cooling_device {
+ int id;
+ struct device *dev;
+ struct thermal_cooling_device *cdev;
+ struct notifier_block clk_rate_change_nb;
+ struct cpufreq_frequency_table *freq_table;
+ unsigned long clock_state;
+ unsigned long clock_val;
+ struct clk *clk;
+ struct mutex lock; /* lock to protect the content of this struct */
+};
+#define to_clock_cooling_device(x) \
+ container_of(x, struct clock_cooling_device, clk_rate_change_nb)
+static DEFINE_IDR(clock_idr);
+static DEFINE_MUTEX(cooling_clock_lock);
+
+/**
+ * clock_cooling_get_idr - function to get an unique id.
+ * @id: int * value generated by this function.
+ *
+ * This function will populate @id with an unique
+ * id, using the idr API.
+ *
+ * Return: 0 on success, an error code on failure.
+ */
+static int clock_cooling_get_idr(int *id)
+{
+ int ret;
+
+ mutex_lock(&cooling_clock_lock);
+ ret = idr_alloc(&clock_idr, NULL, 0, 0, GFP_KERNEL);
+ mutex_unlock(&cooling_clock_lock);
+ if (unlikely(ret < 0))
+ return ret;
+ *id = ret;
+
+ return 0;
+}
+
+/**
+ * release_idr - function to free the unique id.
+ * @id: int value representing the unique id.
+ */
+static void release_idr(int id)
+{
+ mutex_lock(&cooling_clock_lock);
+ idr_remove(&clock_idr, id);
+ mutex_unlock(&cooling_clock_lock);
+}
+
+/* Below code defines functions to be used for clock as cooling device */
+
+enum clock_cooling_property {
+ GET_LEVEL,
+ GET_FREQ,
+ GET_MAXL,
+};
+
+/**
+ * clock_cooling_get_property - fetch a property of interest for a give cpu.
+ * @ccdev: clock cooling device reference
+ * @input: query parameter
+ * @output: query return
+ * @property: type of query (frequency, level, max level)
+ *
+ * This is the common function to
+ * 1. get maximum clock cooling states
+ * 2. translate frequency to cooling state
+ * 3. translate cooling state to frequency
+ * Note that the code may be not in good shape
+ * but it is written in this way in order to:
+ * a) reduce duplicate code as most of the code can be shared.
+ * b) make sure the logic is consistent when translating between
+ * cooling states and frequencies.
+ *
+ * Return: 0 on success, -EINVAL when invalid parameters are passed.
+ */
+static int clock_cooling_get_property(struct clock_cooling_device *ccdev,
+ unsigned long input,
+ unsigned long *output,
+ enum clock_cooling_property property)
+{
+ int i;
+ unsigned long max_level = 0, level = 0;
+ unsigned int freq = CPUFREQ_ENTRY_INVALID;
+ int descend = -1;
+ struct cpufreq_frequency_table *pos, *table = ccdev->freq_table;
+
+ if (!output)
+ return -EINVAL;
+
+ if (!table)
+ return -EINVAL;
+
+ cpufreq_for_each_valid_entry(pos, table) {
+ /* ignore duplicate entry */
+ if (freq == pos->frequency)
+ continue;
+
+ /* get the frequency order */
+ if (freq != CPUFREQ_ENTRY_INVALID && descend == -1)
+ descend = freq > pos->frequency;
+
+ freq = pos->frequency;
+ max_level++;
+ }
+
+ /* No valid cpu frequency entry */
+ if (max_level == 0)
+ return -EINVAL;
+
+ /* max_level is an index, not a counter */
+ max_level--;
+
+ /* get max level */
+ if (property == GET_MAXL) {
+ *output = max_level;
+ return 0;
+ }
+
+ if (property == GET_FREQ)
+ level = descend ? input : (max_level - input);
+
+ i = 0;
+ cpufreq_for_each_valid_entry(pos, table) {
+ /* ignore duplicate entry */
+ if (freq == pos->frequency)
+ continue;
+
+ /* now we have a valid frequency entry */
+ freq = pos->frequency;
+
+ if (property == GET_LEVEL && (unsigned int)input == freq) {
+ /* get level by frequency */
+ *output = descend ? i : (max_level - i);
+ return 0;
+ }
+ if (property == GET_FREQ && level == i) {
+ /* get frequency by level */
+ *output = freq;
+ return 0;
+ }
+ i++;
+ }
+
+ return -EINVAL;
+}
+
+/**
+ * clock_cooling_get_level - return the cooling level of given clock cooling.
+ * @cdev: reference of a thermal cooling device of used as clock cooling device
+ * @freq: the frequency of interest
+ *
+ * This function will match the cooling level corresponding to the
+ * requested @freq and return it.
+ *
+ * Return: The matched cooling level on success or THERMAL_CSTATE_INVALID
+ * otherwise.
+ */
+unsigned long clock_cooling_get_level(struct thermal_cooling_device *cdev,
+ unsigned long freq)
+{
+ struct clock_cooling_device *ccdev = cdev->devdata;
+ unsigned long val;
+
+ if (clock_cooling_get_property(ccdev, (unsigned long)freq, &val,
+ GET_LEVEL))
+ return THERMAL_CSTATE_INVALID;
+
+ return val;
+}
+EXPORT_SYMBOL_GPL(clock_cooling_get_level);
+
+/**
+ * clock_cooling_get_frequency - get the absolute value of frequency from level.
+ * @ccdev: clock cooling device reference
+ * @level: cooling level
+ *
+ * This function matches cooling level with frequency. Based on a cooling level
+ * of frequency, equals cooling state of cpu cooling device, it will return
+ * the corresponding frequency.
+ * e.g level=0 --> 1st MAX FREQ, level=1 ---> 2nd MAX FREQ, .... etc
+ *
+ * Return: 0 on error, the corresponding frequency otherwise.
+ */
+static unsigned long
+clock_cooling_get_frequency(struct clock_cooling_device *ccdev,
+ unsigned long level)
+{
+ int ret = 0;
+ unsigned long freq;
+
+ ret = clock_cooling_get_property(ccdev, level, &freq, GET_FREQ);
+ if (ret)
+ return 0;
+
+ return freq;
+}
+
+/**
+ * clock_cooling_apply - function to apply frequency clipping.
+ * @ccdev: clock_cooling_device pointer containing frequency clipping data.
+ * @cooling_state: value of the cooling state.
+ *
+ * Function used to make sure the clock layer is aware of current thermal
+ * limits. The limits are applied by updating the clock rate in case it is
+ * higher than the corresponding frequency based on the requested cooling_state.
+ *
+ * Return: 0 on success, an error code otherwise (-EINVAL in case wrong
+ * cooling state).
+ */
+static int clock_cooling_apply(struct clock_cooling_device *ccdev,
+ unsigned long cooling_state)
+{
+ unsigned long clip_freq, cur_freq;
+ int ret = 0;
+
+ /* Here we write the clipping */
+ /* Check if the old cooling action is same as new cooling action */
+ if (ccdev->clock_state == cooling_state)
+ return 0;
+
+ clip_freq = clock_cooling_get_frequency(ccdev, cooling_state);
+ if (!clip_freq)
+ return -EINVAL;
+
+ cur_freq = clk_get_rate(ccdev->clk);
+
+ mutex_lock(&ccdev->lock);
+ ccdev->clock_state = cooling_state;
+ ccdev->clock_val = clip_freq;
+ /* enforce clock level */
+ if (cur_freq > clip_freq)
+ ret = clk_set_rate(ccdev->clk, clip_freq);
+ mutex_unlock(&ccdev->lock);
+
+ return ret;
+}
+
+/**
+ * clock_cooling_clock_notifier - notifier callback on clock rate changes.
+ * @nb: struct notifier_block * with callback info.
+ * @event: value showing clock event for which this function invoked.
+ * @data: callback-specific data
+ *
+ * Callback to hijack the notification on clock transition.
+ * Every time there is a clock change, we intercept all pre change events
+ * and block the transition in case the new rate infringes thermal limits.
+ *
+ * Return: NOTIFY_DONE (success) or NOTIFY_BAD (new_rate > thermal limit).
+ */
+static int clock_cooling_clock_notifier(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct clk_notifier_data *ndata = data;
+ struct clock_cooling_device *ccdev = to_clock_cooling_device(nb);
+
+ switch (event) {
+ case PRE_RATE_CHANGE:
+ /*
+ * checks on current state
+ * TODO: current method is not best we can find as it
+ * allows possibly voltage transitions, in case DVFS
+ * layer is also hijacking clock pre notifications.
+ */
+ if (ndata->new_rate > ccdev->clock_val)
+ return NOTIFY_BAD;
+ /* fall through */
+ case POST_RATE_CHANGE:
+ case ABORT_RATE_CHANGE:
+ default:
+ return NOTIFY_DONE;
+ }
+}
+
+/* clock cooling device thermal callback functions are defined below */
+
+/**
+ * clock_cooling_get_max_state - callback function to get the max cooling state.
+ * @cdev: thermal cooling device pointer.
+ * @state: fill this variable with the max cooling state.
+ *
+ * Callback for the thermal cooling device to return the clock
+ * max cooling state.
+ *
+ * Return: 0 on success, an error code otherwise.
+ */
+static int clock_cooling_get_max_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ struct clock_cooling_device *ccdev = cdev->devdata;
+ unsigned long count = 0;
+ int ret;
+
+ ret = clock_cooling_get_property(ccdev, 0, &count, GET_MAXL);
+ if (!ret)
+ *state = count;
+
+ return ret;
+}
+
+/**
+ * clock_cooling_get_cur_state - function to get the current cooling state.
+ * @cdev: thermal cooling device pointer.
+ * @state: fill this variable with the current cooling state.
+ *
+ * Callback for the thermal cooling device to return the clock
+ * current cooling state.
+ *
+ * Return: 0 (success)
+ */
+static int clock_cooling_get_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ struct clock_cooling_device *ccdev = cdev->devdata;
+
+ *state = ccdev->clock_state;
+
+ return 0;
+}
+
+/**
+ * clock_cooling_set_cur_state - function to set the current cooling state.
+ * @cdev: thermal cooling device pointer.
+ * @state: set this variable to the current cooling state.
+ *
+ * Callback for the thermal cooling device to change the clock cooling
+ * current cooling state.
+ *
+ * Return: 0 on success, an error code otherwise.
+ */
+static int clock_cooling_set_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long state)
+{
+ struct clock_cooling_device *clock_device = cdev->devdata;
+
+ return clock_cooling_apply(clock_device, state);
+}
+
+/* Bind clock callbacks to thermal cooling device ops */
+static struct thermal_cooling_device_ops const clock_cooling_ops = {
+ .get_max_state = clock_cooling_get_max_state,
+ .get_cur_state = clock_cooling_get_cur_state,
+ .set_cur_state = clock_cooling_set_cur_state,
+};
+
+/**
+ * clock_cooling_register - function to create clock cooling device.
+ * @dev: struct device pointer to the device used as clock cooling device.
+ * @clock_name: string containing the clock used as cooling mechanism.
+ *
+ * This interface function registers the clock cooling device with the name
+ * "thermal-clock-%x". The cooling device is based on clock frequencies.
+ * The struct device is assumed to be capable of DVFS transitions.
+ * The OPP layer is used to fetch and fill the available frequencies for
+ * the referred device. The ordered frequency table is used to control
+ * the clock cooling device cooling states and to limit clock transitions
+ * based on the cooling state requested by the thermal framework.
+ *
+ * Return: a valid struct thermal_cooling_device pointer on success,
+ * on failure, it returns a corresponding ERR_PTR().
+ */
+struct thermal_cooling_device *
+clock_cooling_register(struct device *dev, const char *clock_name)
+{
+ struct thermal_cooling_device *cdev;
+ struct clock_cooling_device *ccdev = NULL;
+ char dev_name[THERMAL_NAME_LENGTH];
+ int ret = 0;
+
+ ccdev = devm_kzalloc(dev, sizeof(*ccdev), GFP_KERNEL);
+ if (!ccdev)
+ return ERR_PTR(-ENOMEM);
+
+ ccdev->dev = dev;
+ ccdev->clk = devm_clk_get(dev, clock_name);
+ if (IS_ERR(ccdev->clk))
+ return ERR_CAST(ccdev->clk);
+
+ ret = clock_cooling_get_idr(&ccdev->id);
+ if (ret)
+ return ERR_PTR(-EINVAL);
+
+ snprintf(dev_name, sizeof(dev_name), "thermal-clock-%d", ccdev->id);
+
+ cdev = thermal_cooling_device_register(dev_name, ccdev,
+ &clock_cooling_ops);
+ if (IS_ERR(cdev)) {
+ release_idr(ccdev->id);
+ return ERR_PTR(-EINVAL);
+ }
+ ccdev->cdev = cdev;
+ ccdev->clk_rate_change_nb.notifier_call = clock_cooling_clock_notifier;
+
+ /* Assuming someone has already filled the opp table for this device */
+ ret = dev_pm_opp_init_cpufreq_table(dev, &ccdev->freq_table);
+ if (ret) {
+ release_idr(ccdev->id);
+ return ERR_PTR(ret);
+ }
+ ccdev->clock_state = 0;
+ ccdev->clock_val = clock_cooling_get_frequency(ccdev, 0);
+
+ clk_notifier_register(ccdev->clk, &ccdev->clk_rate_change_nb);
+
+ return cdev;
+}
+EXPORT_SYMBOL_GPL(clock_cooling_register);
+
+/**
+ * clock_cooling_unregister - function to remove clock cooling device.
+ * @cdev: thermal cooling device pointer.
+ *
+ * This interface function unregisters the "thermal-clock-%x" cooling device.
+ */
+void clock_cooling_unregister(struct thermal_cooling_device *cdev)
+{
+ struct clock_cooling_device *ccdev;
+
+ if (!cdev)
+ return;
+
+ ccdev = cdev->devdata;
+
+ clk_notifier_unregister(ccdev->clk, &ccdev->clk_rate_change_nb);
+ dev_pm_opp_free_cpufreq_table(ccdev->dev, &ccdev->freq_table);
+
+ thermal_cooling_device_unregister(ccdev->cdev);
+ release_idr(ccdev->id);
+}
+EXPORT_SYMBOL_GPL(clock_cooling_unregister);
diff --git a/drivers/thermal/int340x_thermal/acpi_thermal_rel.c b/drivers/thermal/int340x_thermal/acpi_thermal_rel.c
index 0d8db808f0a..e4e61b3fb11 100644
--- a/drivers/thermal/int340x_thermal/acpi_thermal_rel.c
+++ b/drivers/thermal/int340x_thermal/acpi_thermal_rel.c
@@ -131,6 +131,8 @@ int acpi_parse_trt(acpi_handle handle, int *trt_count, struct trt **trtp,
pr_warn("Failed to get target ACPI device\n");
}
+ result = 0;
+
*trtp = trts;
/* don't count bad entries */
*trt_count -= nr_bad_entries;
@@ -317,21 +319,21 @@ static long acpi_thermal_rel_ioctl(struct file *f, unsigned int cmd,
{
int ret = 0;
unsigned long length = 0;
- unsigned long count = 0;
+ int count = 0;
char __user *arg = (void __user *)__arg;
struct trt *trts;
struct art *arts;
switch (cmd) {
case ACPI_THERMAL_GET_TRT_COUNT:
- ret = acpi_parse_trt(acpi_thermal_rel_handle, (int *)&count,
+ ret = acpi_parse_trt(acpi_thermal_rel_handle, &count,
&trts, false);
kfree(trts);
if (!ret)
return put_user(count, (unsigned long __user *)__arg);
return ret;
case ACPI_THERMAL_GET_TRT_LEN:
- ret = acpi_parse_trt(acpi_thermal_rel_handle, (int *)&count,
+ ret = acpi_parse_trt(acpi_thermal_rel_handle, &count,
&trts, false);
kfree(trts);
length = count * sizeof(union trt_object);
@@ -341,14 +343,14 @@ static long acpi_thermal_rel_ioctl(struct file *f, unsigned int cmd,
case ACPI_THERMAL_GET_TRT:
return fill_trt(arg);
case ACPI_THERMAL_GET_ART_COUNT:
- ret = acpi_parse_art(acpi_thermal_rel_handle, (int *)&count,
+ ret = acpi_parse_art(acpi_thermal_rel_handle, &count,
&arts, false);
kfree(arts);
if (!ret)
return put_user(count, (unsigned long __user *)__arg);
return ret;
case ACPI_THERMAL_GET_ART_LEN:
- ret = acpi_parse_art(acpi_thermal_rel_handle, (int *)&count,
+ ret = acpi_parse_art(acpi_thermal_rel_handle, &count,
&arts, false);
kfree(arts);
length = count * sizeof(union art_object);
diff --git a/drivers/thermal/int340x_thermal/int3400_thermal.c b/drivers/thermal/int340x_thermal/int3400_thermal.c
index edc1cce117b..dcb306ea14a 100644
--- a/drivers/thermal/int340x_thermal/int3400_thermal.c
+++ b/drivers/thermal/int340x_thermal/int3400_thermal.c
@@ -43,6 +43,74 @@ struct int3400_thermal_priv {
struct trt *trts;
u8 uuid_bitmap;
int rel_misc_dev_res;
+ int current_uuid_index;
+};
+
+static ssize_t available_uuids_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct int3400_thermal_priv *priv = platform_get_drvdata(pdev);
+ int i;
+ int length = 0;
+
+ for (i = 0; i < INT3400_THERMAL_MAXIMUM_UUID; i++) {
+ if (priv->uuid_bitmap & (1 << i))
+ if (PAGE_SIZE - length > 0)
+ length += snprintf(&buf[length],
+ PAGE_SIZE - length,
+ "%s\n",
+ int3400_thermal_uuids[i]);
+ }
+
+ return length;
+}
+
+static ssize_t current_uuid_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct int3400_thermal_priv *priv = platform_get_drvdata(pdev);
+
+ if (priv->uuid_bitmap & (1 << priv->current_uuid_index))
+ return sprintf(buf, "%s\n",
+ int3400_thermal_uuids[priv->current_uuid_index]);
+ else
+ return sprintf(buf, "INVALID\n");
+}
+
+static ssize_t current_uuid_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct int3400_thermal_priv *priv = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < INT3400_THERMAL_MAXIMUM_UUID; ++i) {
+ if ((priv->uuid_bitmap & (1 << i)) &&
+ !(strncmp(buf, int3400_thermal_uuids[i],
+ sizeof(int3400_thermal_uuids[i]) - 1))) {
+ priv->current_uuid_index = i;
+ return count;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static DEVICE_ATTR(current_uuid, 0644, current_uuid_show, current_uuid_store);
+static DEVICE_ATTR_RO(available_uuids);
+static struct attribute *uuid_attrs[] = {
+ &dev_attr_available_uuids.attr,
+ &dev_attr_current_uuid.attr,
+ NULL
+};
+
+static struct attribute_group uuid_attribute_group = {
+ .attrs = uuid_attrs,
+ .name = "uuids"
};
static int int3400_thermal_get_uuids(struct int3400_thermal_priv *priv)
@@ -160,9 +228,9 @@ static int int3400_thermal_set_mode(struct thermal_zone_device *thermal,
if (enable != priv->mode) {
priv->mode = enable;
- /* currently, only PASSIVE COOLING is supported */
result = int3400_thermal_run_osc(priv->adev->handle,
- INT3400_THERMAL_PASSIVE_1, enable);
+ priv->current_uuid_index,
+ enable);
}
return result;
}
@@ -223,7 +291,14 @@ static int int3400_thermal_probe(struct platform_device *pdev)
priv->rel_misc_dev_res = acpi_thermal_rel_misc_device_add(
priv->adev->handle);
+ result = sysfs_create_group(&pdev->dev.kobj, &uuid_attribute_group);
+ if (result)
+ goto free_zone;
+
return 0;
+
+free_zone:
+ thermal_zone_device_unregister(priv->thermal);
free_trt:
kfree(priv->trts);
free_art:
@@ -240,6 +315,7 @@ static int int3400_thermal_remove(struct platform_device *pdev)
if (!priv->rel_misc_dev_res)
acpi_thermal_rel_misc_device_remove(priv->adev->handle);
+ sysfs_remove_group(&pdev->dev.kobj, &uuid_attribute_group);
thermal_zone_device_unregister(priv->thermal);
kfree(priv->trts);
kfree(priv->arts);
diff --git a/drivers/thermal/int340x_thermal/int3403_thermal.c b/drivers/thermal/int340x_thermal/int3403_thermal.c
index 6e9fb62eb81..1bfa6a69e77 100644
--- a/drivers/thermal/int340x_thermal/int3403_thermal.c
+++ b/drivers/thermal/int340x_thermal/int3403_thermal.c
@@ -293,8 +293,7 @@ static int int3403_sensor_add(struct int3403_priv *priv)
return 0;
err_free_obj:
- if (obj->tzone)
- thermal_zone_device_unregister(obj->tzone);
+ thermal_zone_device_unregister(obj->tzone);
return result;
}
@@ -471,7 +470,6 @@ static struct platform_driver int3403_driver = {
.remove = int3403_remove,
.driver = {
.name = "int3403 thermal",
- .owner = THIS_MODULE,
.acpi_match_table = int3403_device_ids,
},
};
diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c
index 95cb7fc20e1..e98b4249187 100644
--- a/drivers/thermal/intel_powerclamp.c
+++ b/drivers/thermal/intel_powerclamp.c
@@ -435,7 +435,6 @@ static int clamp_thread(void *arg)
* allowed. thus jiffies are updated properly.
*/
preempt_disable();
- tick_nohz_idle_enter();
/* mwait until target jiffies is reached */
while (time_before(jiffies, target_jiffies)) {
unsigned long ecx = 1;
@@ -451,7 +450,6 @@ static int clamp_thread(void *arg)
start_critical_timings();
atomic_inc(&idle_wakeup_counter);
}
- tick_nohz_idle_exit();
preempt_enable();
}
del_timer_sync(&wakeup_timer);
@@ -689,6 +687,7 @@ static const struct x86_cpu_id intel_powerclamp_ids[] = {
{ X86_VENDOR_INTEL, 6, 0x3f},
{ X86_VENDOR_INTEL, 6, 0x45},
{ X86_VENDOR_INTEL, 6, 0x46},
+ { X86_VENDOR_INTEL, 6, 0x4c},
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_powerclamp_ids);
diff --git a/drivers/thermal/intel_soc_dts_thermal.c b/drivers/thermal/intel_soc_dts_thermal.c
index a6a0a18ec0a..5580f5b24eb 100644
--- a/drivers/thermal/intel_soc_dts_thermal.c
+++ b/drivers/thermal/intel_soc_dts_thermal.c
@@ -360,6 +360,9 @@ static void proc_thermal_interrupt(void)
u32 sticky_out;
int status;
u32 ptmc_out;
+ unsigned long flags;
+
+ spin_lock_irqsave(&intr_notify_lock, flags);
/* Clear APIC interrupt */
status = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ,
@@ -378,21 +381,20 @@ static void proc_thermal_interrupt(void)
/* reset sticky bit */
status = iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE,
SOC_DTS_OFFSET_PTTSS, sticky_out);
+ spin_unlock_irqrestore(&intr_notify_lock, flags);
+
for (i = 0; i < SOC_MAX_DTS_SENSORS; ++i) {
pr_debug("TZD update for zone %d\n", i);
thermal_zone_device_update(soc_dts[i]->tzone);
}
- }
+ } else
+ spin_unlock_irqrestore(&intr_notify_lock, flags);
}
static irqreturn_t soc_irq_thread_fn(int irq, void *dev_data)
{
- unsigned long flags;
-
- spin_lock_irqsave(&intr_notify_lock, flags);
proc_thermal_interrupt();
- spin_unlock_irqrestore(&intr_notify_lock, flags);
pr_debug("proc_thermal_interrupt\n");
return IRQ_HANDLED;
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
index 62143ba3100..e145b66df44 100644
--- a/drivers/thermal/of-thermal.c
+++ b/drivers/thermal/of-thermal.c
@@ -30,27 +30,13 @@
#include <linux/err.h>
#include <linux/export.h>
#include <linux/string.h>
+#include <linux/thermal.h>
#include "thermal_core.h"
/*** Private data structures to represent thermal device tree data ***/
/**
- * struct __thermal_trip - representation of a point in temperature domain
- * @np: pointer to struct device_node that this trip point was created from
- * @temperature: temperature value in miliCelsius
- * @hysteresis: relative hysteresis in miliCelsius
- * @type: trip point type
- */
-
-struct __thermal_trip {
- struct device_node *np;
- unsigned long int temperature;
- unsigned long int hysteresis;
- enum thermal_trip_type type;
-};
-
-/**
* struct __thermal_bind_param - a match between trip and cooling device
* @cooling_device: a pointer to identify the referred cooling device
* @trip_id: the trip point index
@@ -77,8 +63,7 @@ struct __thermal_bind_params {
* @num_tbps: number of thermal bind params
* @tbps: an array of thermal bind params (0..num_tbps - 1)
* @sensor_data: sensor private data used while reading temperature and trend
- * @get_temp: sensor callback to read temperature
- * @get_trend: sensor callback to read temperature trend
+ * @ops: set of callbacks to handle the thermal zone based on DT
*/
struct __thermal_zone {
@@ -88,7 +73,7 @@ struct __thermal_zone {
/* trip data */
int ntrips;
- struct __thermal_trip *trips;
+ struct thermal_trip *trips;
/* cooling binding data */
int num_tbps;
@@ -96,8 +81,7 @@ struct __thermal_zone {
/* sensor interface */
void *sensor_data;
- int (*get_temp)(void *, long *);
- int (*get_trend)(void *, long *);
+ const struct thermal_zone_of_device_ops *ops;
};
/*** DT thermal zone device callbacks ***/
@@ -107,10 +91,96 @@ static int of_thermal_get_temp(struct thermal_zone_device *tz,
{
struct __thermal_zone *data = tz->devdata;
- if (!data->get_temp)
+ if (!data->ops->get_temp)
return -EINVAL;
- return data->get_temp(data->sensor_data, temp);
+ return data->ops->get_temp(data->sensor_data, temp);
+}
+
+/**
+ * of_thermal_get_ntrips - function to export number of available trip
+ * points.
+ * @tz: pointer to a thermal zone
+ *
+ * This function is a globally visible wrapper to get number of trip points
+ * stored in the local struct __thermal_zone
+ *
+ * Return: number of available trip points, -ENODEV when data not available
+ */
+int of_thermal_get_ntrips(struct thermal_zone_device *tz)
+{
+ struct __thermal_zone *data = tz->devdata;
+
+ if (!data || IS_ERR(data))
+ return -ENODEV;
+
+ return data->ntrips;
+}
+EXPORT_SYMBOL_GPL(of_thermal_get_ntrips);
+
+/**
+ * of_thermal_is_trip_valid - function to check if trip point is valid
+ *
+ * @tz: pointer to a thermal zone
+ * @trip: trip point to evaluate
+ *
+ * This function is responsible for checking if passed trip point is valid
+ *
+ * Return: true if trip point is valid, false otherwise
+ */
+bool of_thermal_is_trip_valid(struct thermal_zone_device *tz, int trip)
+{
+ struct __thermal_zone *data = tz->devdata;
+
+ if (!data || trip >= data->ntrips || trip < 0)
+ return false;
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(of_thermal_is_trip_valid);
+
+/**
+ * of_thermal_get_trip_points - function to get access to a globally exported
+ * trip points
+ *
+ * @tz: pointer to a thermal zone
+ *
+ * This function provides a pointer to trip points table
+ *
+ * Return: pointer to trip points table, NULL otherwise
+ */
+const struct thermal_trip * const
+of_thermal_get_trip_points(struct thermal_zone_device *tz)
+{
+ struct __thermal_zone *data = tz->devdata;
+
+ if (!data)
+ return NULL;
+
+ return data->trips;
+}
+EXPORT_SYMBOL_GPL(of_thermal_get_trip_points);
+
+/**
+ * of_thermal_set_emul_temp - function to set emulated temperature
+ *
+ * @tz: pointer to a thermal zone
+ * @temp: temperature to set
+ *
+ * This function gives the ability to set emulated value of temperature,
+ * which is handy for debugging
+ *
+ * Return: zero on success, error code otherwise
+ */
+static int of_thermal_set_emul_temp(struct thermal_zone_device *tz,
+ unsigned long temp)
+{
+ struct __thermal_zone *data = tz->devdata;
+
+ if (!data->ops || !data->ops->set_emul_temp)
+ return -EINVAL;
+
+ return data->ops->set_emul_temp(data->sensor_data, temp);
}
static int of_thermal_get_trend(struct thermal_zone_device *tz, int trip,
@@ -120,10 +190,10 @@ static int of_thermal_get_trend(struct thermal_zone_device *tz, int trip,
long dev_trend;
int r;
- if (!data->get_trend)
+ if (!data->ops->get_trend)
return -EINVAL;
- r = data->get_trend(data->sensor_data, &dev_trend);
+ r = data->ops->get_trend(data->sensor_data, &dev_trend);
if (r)
return r;
@@ -324,8 +394,7 @@ static struct thermal_zone_device_ops of_thermal_ops = {
static struct thermal_zone_device *
thermal_zone_of_add_sensor(struct device_node *zone,
struct device_node *sensor, void *data,
- int (*get_temp)(void *, long *),
- int (*get_trend)(void *, long *))
+ const struct thermal_zone_of_device_ops *ops)
{
struct thermal_zone_device *tzd;
struct __thermal_zone *tz;
@@ -336,13 +405,16 @@ thermal_zone_of_add_sensor(struct device_node *zone,
tz = tzd->devdata;
+ if (!ops)
+ return ERR_PTR(-EINVAL);
+
mutex_lock(&tzd->lock);
- tz->get_temp = get_temp;
- tz->get_trend = get_trend;
+ tz->ops = ops;
tz->sensor_data = data;
tzd->ops->get_temp = of_thermal_get_temp;
tzd->ops->get_trend = of_thermal_get_trend;
+ tzd->ops->set_emul_temp = of_thermal_set_emul_temp;
mutex_unlock(&tzd->lock);
return tzd;
@@ -356,8 +428,7 @@ thermal_zone_of_add_sensor(struct device_node *zone,
* than one sensors
* @data: a private pointer (owned by the caller) that will be passed
* back, when a temperature reading is needed.
- * @get_temp: a pointer to a function that reads the sensor temperature.
- * @get_trend: a pointer to a function that reads the sensor temperature trend.
+ * @ops: struct thermal_zone_of_device_ops *. Must contain at least .get_temp.
*
* This function will search the list of thermal zones described in device
* tree and look for the zone that refer to the sensor device pointed by
@@ -382,9 +453,8 @@ thermal_zone_of_add_sensor(struct device_node *zone,
* check the return value with help of IS_ERR() helper.
*/
struct thermal_zone_device *
-thermal_zone_of_sensor_register(struct device *dev, int sensor_id,
- void *data, int (*get_temp)(void *, long *),
- int (*get_trend)(void *, long *))
+thermal_zone_of_sensor_register(struct device *dev, int sensor_id, void *data,
+ const struct thermal_zone_of_device_ops *ops)
{
struct device_node *np, *child, *sensor_np;
struct thermal_zone_device *tzd = ERR_PTR(-ENODEV);
@@ -426,9 +496,7 @@ thermal_zone_of_sensor_register(struct device *dev, int sensor_id,
if (sensor_specs.np == sensor_np && id == sensor_id) {
tzd = thermal_zone_of_add_sensor(child, sensor_np,
- data,
- get_temp,
- get_trend);
+ data, ops);
of_node_put(sensor_specs.np);
of_node_put(child);
goto exit;
@@ -475,9 +543,9 @@ void thermal_zone_of_sensor_unregister(struct device *dev,
mutex_lock(&tzd->lock);
tzd->ops->get_temp = NULL;
tzd->ops->get_trend = NULL;
+ tzd->ops->set_emul_temp = NULL;
- tz->get_temp = NULL;
- tz->get_trend = NULL;
+ tz->ops = NULL;
tz->sensor_data = NULL;
mutex_unlock(&tzd->lock);
}
@@ -501,7 +569,7 @@ EXPORT_SYMBOL_GPL(thermal_zone_of_sensor_unregister);
*/
static int thermal_of_populate_bind_params(struct device_node *np,
struct __thermal_bind_params *__tbp,
- struct __thermal_trip *trips,
+ struct thermal_trip *trips,
int ntrips)
{
struct of_phandle_args cooling_spec;
@@ -604,7 +672,7 @@ static int thermal_of_get_trip_type(struct device_node *np,
* Return: 0 on success, proper error code otherwise
*/
static int thermal_of_populate_trip(struct device_node *np,
- struct __thermal_trip *trip)
+ struct thermal_trip *trip)
{
int prop;
int ret;
diff --git a/drivers/thermal/rockchip_thermal.c b/drivers/thermal/rockchip_thermal.c
new file mode 100644
index 00000000000..1bcddfc60e9
--- /dev/null
+++ b/drivers/thermal/rockchip_thermal.c
@@ -0,0 +1,693 @@
+/*
+ * Copyright (c) 2014, Fuzhou Rockchip Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/thermal.h>
+
+/**
+ * If the temperature over a period of time High,
+ * the resulting TSHUT gave CRU module,let it reset the entire chip,
+ * or via GPIO give PMIC.
+ */
+enum tshut_mode {
+ TSHUT_MODE_CRU = 0,
+ TSHUT_MODE_GPIO,
+};
+
+/**
+ * the system Temperature Sensors tshut(tshut) polarity
+ * the bit 8 is tshut polarity.
+ * 0: low active, 1: high active
+ */
+enum tshut_polarity {
+ TSHUT_LOW_ACTIVE = 0,
+ TSHUT_HIGH_ACTIVE,
+};
+
+/**
+ * The system has three Temperature Sensors. channel 0 is reserved,
+ * channel 1 is for CPU, and channel 2 is for GPU.
+ */
+enum sensor_id {
+ SENSOR_CPU = 1,
+ SENSOR_GPU,
+};
+
+struct rockchip_tsadc_chip {
+ /* The hardware-controlled tshut property */
+ long tshut_temp;
+ enum tshut_mode tshut_mode;
+ enum tshut_polarity tshut_polarity;
+
+ /* Chip-wide methods */
+ void (*initialize)(void __iomem *reg, enum tshut_polarity p);
+ void (*irq_ack)(void __iomem *reg);
+ void (*control)(void __iomem *reg, bool on);
+
+ /* Per-sensor methods */
+ int (*get_temp)(int chn, void __iomem *reg, long *temp);
+ void (*set_tshut_temp)(int chn, void __iomem *reg, long temp);
+ void (*set_tshut_mode)(int chn, void __iomem *reg, enum tshut_mode m);
+};
+
+struct rockchip_thermal_sensor {
+ struct rockchip_thermal_data *thermal;
+ struct thermal_zone_device *tzd;
+ enum sensor_id id;
+};
+
+#define NUM_SENSORS 2 /* Ignore unused sensor 0 */
+
+struct rockchip_thermal_data {
+ const struct rockchip_tsadc_chip *chip;
+ struct platform_device *pdev;
+ struct reset_control *reset;
+
+ struct rockchip_thermal_sensor sensors[NUM_SENSORS];
+
+ struct clk *clk;
+ struct clk *pclk;
+
+ void __iomem *regs;
+
+ long tshut_temp;
+ enum tshut_mode tshut_mode;
+ enum tshut_polarity tshut_polarity;
+};
+
+/* TSADC V2 Sensor info define: */
+#define TSADCV2_AUTO_CON 0x04
+#define TSADCV2_INT_EN 0x08
+#define TSADCV2_INT_PD 0x0c
+#define TSADCV2_DATA(chn) (0x20 + (chn) * 0x04)
+#define TSADCV2_COMP_SHUT(chn) (0x40 + (chn) * 0x04)
+#define TSADCV2_HIGHT_INT_DEBOUNCE 0x60
+#define TSADCV2_HIGHT_TSHUT_DEBOUNCE 0x64
+#define TSADCV2_AUTO_PERIOD 0x68
+#define TSADCV2_AUTO_PERIOD_HT 0x6c
+
+#define TSADCV2_AUTO_EN BIT(0)
+#define TSADCV2_AUTO_DISABLE ~BIT(0)
+#define TSADCV2_AUTO_SRC_EN(chn) BIT(4 + (chn))
+#define TSADCV2_AUTO_TSHUT_POLARITY_HIGH BIT(8)
+#define TSADCV2_AUTO_TSHUT_POLARITY_LOW ~BIT(8)
+
+#define TSADCV2_INT_SRC_EN(chn) BIT(chn)
+#define TSADCV2_SHUT_2GPIO_SRC_EN(chn) BIT(4 + (chn))
+#define TSADCV2_SHUT_2CRU_SRC_EN(chn) BIT(8 + (chn))
+
+#define TSADCV2_INT_PD_CLEAR ~BIT(8)
+
+#define TSADCV2_DATA_MASK 0xfff
+#define TSADCV2_HIGHT_INT_DEBOUNCE_COUNT 4
+#define TSADCV2_HIGHT_TSHUT_DEBOUNCE_COUNT 4
+#define TSADCV2_AUTO_PERIOD_TIME 250 /* msec */
+#define TSADCV2_AUTO_PERIOD_HT_TIME 50 /* msec */
+
+struct tsadc_table {
+ unsigned long code;
+ long temp;
+};
+
+static const struct tsadc_table v2_code_table[] = {
+ {TSADCV2_DATA_MASK, -40000},
+ {3800, -40000},
+ {3792, -35000},
+ {3783, -30000},
+ {3774, -25000},
+ {3765, -20000},
+ {3756, -15000},
+ {3747, -10000},
+ {3737, -5000},
+ {3728, 0},
+ {3718, 5000},
+ {3708, 10000},
+ {3698, 15000},
+ {3688, 20000},
+ {3678, 25000},
+ {3667, 30000},
+ {3656, 35000},
+ {3645, 40000},
+ {3634, 45000},
+ {3623, 50000},
+ {3611, 55000},
+ {3600, 60000},
+ {3588, 65000},
+ {3575, 70000},
+ {3563, 75000},
+ {3550, 80000},
+ {3537, 85000},
+ {3524, 90000},
+ {3510, 95000},
+ {3496, 100000},
+ {3482, 105000},
+ {3467, 110000},
+ {3452, 115000},
+ {3437, 120000},
+ {3421, 125000},
+ {0, 125000},
+};
+
+static u32 rk_tsadcv2_temp_to_code(long temp)
+{
+ int high, low, mid;
+
+ low = 0;
+ high = ARRAY_SIZE(v2_code_table) - 1;
+ mid = (high + low) / 2;
+
+ if (temp < v2_code_table[low].temp || temp > v2_code_table[high].temp)
+ return 0;
+
+ while (low <= high) {
+ if (temp == v2_code_table[mid].temp)
+ return v2_code_table[mid].code;
+ else if (temp < v2_code_table[mid].temp)
+ high = mid - 1;
+ else
+ low = mid + 1;
+ mid = (low + high) / 2;
+ }
+
+ return 0;
+}
+
+static long rk_tsadcv2_code_to_temp(u32 code)
+{
+ int high, low, mid;
+
+ low = 0;
+ high = ARRAY_SIZE(v2_code_table) - 1;
+ mid = (high + low) / 2;
+
+ if (code > v2_code_table[low].code || code < v2_code_table[high].code)
+ return 125000; /* No code available, return max temperature */
+
+ while (low <= high) {
+ if (code >= v2_code_table[mid].code && code <
+ v2_code_table[mid - 1].code)
+ return v2_code_table[mid].temp;
+ else if (code < v2_code_table[mid].code)
+ low = mid + 1;
+ else
+ high = mid - 1;
+ mid = (low + high) / 2;
+ }
+
+ return 125000;
+}
+
+/**
+ * rk_tsadcv2_initialize - initialize TASDC Controller
+ * (1) Set TSADCV2_AUTO_PERIOD, configure the interleave between
+ * every two accessing of TSADC in normal operation.
+ * (2) Set TSADCV2_AUTO_PERIOD_HT, configure the interleave between
+ * every two accessing of TSADC after the temperature is higher
+ * than COM_SHUT or COM_INT.
+ * (3) Set TSADCV2_HIGH_INT_DEBOUNCE and TSADC_HIGHT_TSHUT_DEBOUNCE,
+ * if the temperature is higher than COMP_INT or COMP_SHUT for
+ * "debounce" times, TSADC controller will generate interrupt or TSHUT.
+ */
+static void rk_tsadcv2_initialize(void __iomem *regs,
+ enum tshut_polarity tshut_polarity)
+{
+ if (tshut_polarity == TSHUT_HIGH_ACTIVE)
+ writel_relaxed(0 | (TSADCV2_AUTO_TSHUT_POLARITY_HIGH),
+ regs + TSADCV2_AUTO_CON);
+ else
+ writel_relaxed(0 | (TSADCV2_AUTO_TSHUT_POLARITY_LOW),
+ regs + TSADCV2_AUTO_CON);
+
+ writel_relaxed(TSADCV2_AUTO_PERIOD_TIME, regs + TSADCV2_AUTO_PERIOD);
+ writel_relaxed(TSADCV2_HIGHT_INT_DEBOUNCE_COUNT,
+ regs + TSADCV2_HIGHT_INT_DEBOUNCE);
+ writel_relaxed(TSADCV2_AUTO_PERIOD_HT_TIME,
+ regs + TSADCV2_AUTO_PERIOD_HT);
+ writel_relaxed(TSADCV2_HIGHT_TSHUT_DEBOUNCE_COUNT,
+ regs + TSADCV2_HIGHT_TSHUT_DEBOUNCE);
+}
+
+static void rk_tsadcv2_irq_ack(void __iomem *regs)
+{
+ u32 val;
+
+ val = readl_relaxed(regs + TSADCV2_INT_PD);
+ writel_relaxed(val & TSADCV2_INT_PD_CLEAR, regs + TSADCV2_INT_PD);
+}
+
+static void rk_tsadcv2_control(void __iomem *regs, bool enable)
+{
+ u32 val;
+
+ val = readl_relaxed(regs + TSADCV2_AUTO_CON);
+ if (enable)
+ val |= TSADCV2_AUTO_EN;
+ else
+ val &= ~TSADCV2_AUTO_EN;
+
+ writel_relaxed(val, regs + TSADCV2_AUTO_CON);
+}
+
+static int rk_tsadcv2_get_temp(int chn, void __iomem *regs, long *temp)
+{
+ u32 val;
+
+ /* the A/D value of the channel last conversion need some time */
+ val = readl_relaxed(regs + TSADCV2_DATA(chn));
+ if (val == 0)
+ return -EAGAIN;
+
+ *temp = rk_tsadcv2_code_to_temp(val);
+
+ return 0;
+}
+
+static void rk_tsadcv2_tshut_temp(int chn, void __iomem *regs, long temp)
+{
+ u32 tshut_value, val;
+
+ tshut_value = rk_tsadcv2_temp_to_code(temp);
+ writel_relaxed(tshut_value, regs + TSADCV2_COMP_SHUT(chn));
+
+ /* TSHUT will be valid */
+ val = readl_relaxed(regs + TSADCV2_AUTO_CON);
+ writel_relaxed(val | TSADCV2_AUTO_SRC_EN(chn), regs + TSADCV2_AUTO_CON);
+}
+
+static void rk_tsadcv2_tshut_mode(int chn, void __iomem *regs,
+ enum tshut_mode mode)
+{
+ u32 val;
+
+ val = readl_relaxed(regs + TSADCV2_INT_EN);
+ if (mode == TSHUT_MODE_GPIO) {
+ val &= ~TSADCV2_SHUT_2CRU_SRC_EN(chn);
+ val |= TSADCV2_SHUT_2GPIO_SRC_EN(chn);
+ } else {
+ val &= ~TSADCV2_SHUT_2GPIO_SRC_EN(chn);
+ val |= TSADCV2_SHUT_2CRU_SRC_EN(chn);
+ }
+
+ writel_relaxed(val, regs + TSADCV2_INT_EN);
+}
+
+static const struct rockchip_tsadc_chip rk3288_tsadc_data = {
+ .tshut_mode = TSHUT_MODE_GPIO, /* default TSHUT via GPIO give PMIC */
+ .tshut_polarity = TSHUT_LOW_ACTIVE, /* default TSHUT LOW ACTIVE */
+ .tshut_temp = 95000,
+
+ .initialize = rk_tsadcv2_initialize,
+ .irq_ack = rk_tsadcv2_irq_ack,
+ .control = rk_tsadcv2_control,
+ .get_temp = rk_tsadcv2_get_temp,
+ .set_tshut_temp = rk_tsadcv2_tshut_temp,
+ .set_tshut_mode = rk_tsadcv2_tshut_mode,
+};
+
+static const struct of_device_id of_rockchip_thermal_match[] = {
+ {
+ .compatible = "rockchip,rk3288-tsadc",
+ .data = (void *)&rk3288_tsadc_data,
+ },
+ { /* end */ },
+};
+MODULE_DEVICE_TABLE(of, of_rockchip_thermal_match);
+
+static void
+rockchip_thermal_toggle_sensor(struct rockchip_thermal_sensor *sensor, bool on)
+{
+ struct thermal_zone_device *tzd = sensor->tzd;
+
+ tzd->ops->set_mode(tzd,
+ on ? THERMAL_DEVICE_ENABLED : THERMAL_DEVICE_DISABLED);
+}
+
+static irqreturn_t rockchip_thermal_alarm_irq_thread(int irq, void *dev)
+{
+ struct rockchip_thermal_data *thermal = dev;
+ int i;
+
+ dev_dbg(&thermal->pdev->dev, "thermal alarm\n");
+
+ thermal->chip->irq_ack(thermal->regs);
+
+ for (i = 0; i < ARRAY_SIZE(thermal->sensors); i++)
+ thermal_zone_device_update(thermal->sensors[i].tzd);
+
+ return IRQ_HANDLED;
+}
+
+static int rockchip_thermal_get_temp(void *_sensor, long *out_temp)
+{
+ struct rockchip_thermal_sensor *sensor = _sensor;
+ struct rockchip_thermal_data *thermal = sensor->thermal;
+ const struct rockchip_tsadc_chip *tsadc = sensor->thermal->chip;
+ int retval;
+
+ retval = tsadc->get_temp(sensor->id, thermal->regs, out_temp);
+ dev_dbg(&thermal->pdev->dev, "sensor %d - temp: %ld, retval: %d\n",
+ sensor->id, *out_temp, retval);
+
+ return retval;
+}
+
+static const struct thermal_zone_of_device_ops rockchip_of_thermal_ops = {
+ .get_temp = rockchip_thermal_get_temp,
+};
+
+static int rockchip_configure_from_dt(struct device *dev,
+ struct device_node *np,
+ struct rockchip_thermal_data *thermal)
+{
+ u32 shut_temp, tshut_mode, tshut_polarity;
+
+ if (of_property_read_u32(np, "rockchip,hw-tshut-temp", &shut_temp)) {
+ dev_warn(dev,
+ "Missing tshut temp property, using default %ld\n",
+ thermal->chip->tshut_temp);
+ thermal->tshut_temp = thermal->chip->tshut_temp;
+ } else {
+ thermal->tshut_temp = shut_temp;
+ }
+
+ if (thermal->tshut_temp > INT_MAX) {
+ dev_err(dev, "Invalid tshut temperature specified: %ld\n",
+ thermal->tshut_temp);
+ return -ERANGE;
+ }
+
+ if (of_property_read_u32(np, "rockchip,hw-tshut-mode", &tshut_mode)) {
+ dev_warn(dev,
+ "Missing tshut mode property, using default (%s)\n",
+ thermal->chip->tshut_mode == TSHUT_MODE_GPIO ?
+ "gpio" : "cru");
+ thermal->tshut_mode = thermal->chip->tshut_mode;
+ } else {
+ thermal->tshut_mode = tshut_mode;
+ }
+
+ if (thermal->tshut_mode > 1) {
+ dev_err(dev, "Invalid tshut mode specified: %d\n",
+ thermal->tshut_mode);
+ return -EINVAL;
+ }
+
+ if (of_property_read_u32(np, "rockchip,hw-tshut-polarity",
+ &tshut_polarity)) {
+ dev_warn(dev,
+ "Missing tshut-polarity property, using default (%s)\n",
+ thermal->chip->tshut_polarity == TSHUT_LOW_ACTIVE ?
+ "low" : "high");
+ thermal->tshut_polarity = thermal->chip->tshut_polarity;
+ } else {
+ thermal->tshut_polarity = tshut_polarity;
+ }
+
+ if (thermal->tshut_polarity > 1) {
+ dev_err(dev, "Invalid tshut-polarity specified: %d\n",
+ thermal->tshut_polarity);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+rockchip_thermal_register_sensor(struct platform_device *pdev,
+ struct rockchip_thermal_data *thermal,
+ struct rockchip_thermal_sensor *sensor,
+ enum sensor_id id)
+{
+ const struct rockchip_tsadc_chip *tsadc = thermal->chip;
+ int error;
+
+ tsadc->set_tshut_mode(id, thermal->regs, thermal->tshut_mode);
+ tsadc->set_tshut_temp(id, thermal->regs, thermal->tshut_temp);
+
+ sensor->thermal = thermal;
+ sensor->id = id;
+ sensor->tzd = thermal_zone_of_sensor_register(&pdev->dev, id, sensor,
+ &rockchip_of_thermal_ops);
+ if (IS_ERR(sensor->tzd)) {
+ error = PTR_ERR(sensor->tzd);
+ dev_err(&pdev->dev, "failed to register sensor %d: %d\n",
+ id, error);
+ return error;
+ }
+
+ return 0;
+}
+
+/*
+ * Reset TSADC Controller, reset all tsadc registers.
+ */
+static void rockchip_thermal_reset_controller(struct reset_control *reset)
+{
+ reset_control_assert(reset);
+ usleep_range(10, 20);
+ reset_control_deassert(reset);
+}
+
+static int rockchip_thermal_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct rockchip_thermal_data *thermal;
+ const struct of_device_id *match;
+ struct resource *res;
+ int irq;
+ int i;
+ int error;
+
+ match = of_match_node(of_rockchip_thermal_match, np);
+ if (!match)
+ return -ENXIO;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "no irq resource?\n");
+ return -EINVAL;
+ }
+
+ thermal = devm_kzalloc(&pdev->dev, sizeof(struct rockchip_thermal_data),
+ GFP_KERNEL);
+ if (!thermal)
+ return -ENOMEM;
+
+ thermal->pdev = pdev;
+
+ thermal->chip = (const struct rockchip_tsadc_chip *)match->data;
+ if (!thermal->chip)
+ return -EINVAL;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ thermal->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(thermal->regs))
+ return PTR_ERR(thermal->regs);
+
+ thermal->reset = devm_reset_control_get(&pdev->dev, "tsadc-apb");
+ if (IS_ERR(thermal->reset)) {
+ error = PTR_ERR(thermal->reset);
+ dev_err(&pdev->dev, "failed to get tsadc reset: %d\n", error);
+ return error;
+ }
+
+ thermal->clk = devm_clk_get(&pdev->dev, "tsadc");
+ if (IS_ERR(thermal->clk)) {
+ error = PTR_ERR(thermal->clk);
+ dev_err(&pdev->dev, "failed to get tsadc clock: %d\n", error);
+ return error;
+ }
+
+ thermal->pclk = devm_clk_get(&pdev->dev, "apb_pclk");
+ if (IS_ERR(thermal->pclk)) {
+ error = PTR_ERR(thermal->clk);
+ dev_err(&pdev->dev, "failed to get apb_pclk clock: %d\n",
+ error);
+ return error;
+ }
+
+ error = clk_prepare_enable(thermal->clk);
+ if (error) {
+ dev_err(&pdev->dev, "failed to enable converter clock: %d\n",
+ error);
+ return error;
+ }
+
+ error = clk_prepare_enable(thermal->pclk);
+ if (error) {
+ dev_err(&pdev->dev, "failed to enable pclk: %d\n", error);
+ goto err_disable_clk;
+ }
+
+ rockchip_thermal_reset_controller(thermal->reset);
+
+ error = rockchip_configure_from_dt(&pdev->dev, np, thermal);
+ if (error) {
+ dev_err(&pdev->dev, "failed to parse device tree data: %d\n",
+ error);
+ goto err_disable_pclk;
+ }
+
+ thermal->chip->initialize(thermal->regs, thermal->tshut_polarity);
+
+ error = rockchip_thermal_register_sensor(pdev, thermal,
+ &thermal->sensors[0],
+ SENSOR_CPU);
+ if (error) {
+ dev_err(&pdev->dev,
+ "failed to register CPU thermal sensor: %d\n", error);
+ goto err_disable_pclk;
+ }
+
+ error = rockchip_thermal_register_sensor(pdev, thermal,
+ &thermal->sensors[1],
+ SENSOR_GPU);
+ if (error) {
+ dev_err(&pdev->dev,
+ "failed to register GPU thermal sensor: %d\n", error);
+ goto err_unregister_cpu_sensor;
+ }
+
+ error = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ &rockchip_thermal_alarm_irq_thread,
+ IRQF_ONESHOT,
+ "rockchip_thermal", thermal);
+ if (error) {
+ dev_err(&pdev->dev,
+ "failed to request tsadc irq: %d\n", error);
+ goto err_unregister_gpu_sensor;
+ }
+
+ thermal->chip->control(thermal->regs, true);
+
+ for (i = 0; i < ARRAY_SIZE(thermal->sensors); i++)
+ rockchip_thermal_toggle_sensor(&thermal->sensors[i], true);
+
+ platform_set_drvdata(pdev, thermal);
+
+ return 0;
+
+err_unregister_gpu_sensor:
+ thermal_zone_of_sensor_unregister(&pdev->dev, thermal->sensors[1].tzd);
+err_unregister_cpu_sensor:
+ thermal_zone_of_sensor_unregister(&pdev->dev, thermal->sensors[0].tzd);
+err_disable_pclk:
+ clk_disable_unprepare(thermal->pclk);
+err_disable_clk:
+ clk_disable_unprepare(thermal->clk);
+
+ return error;
+}
+
+static int rockchip_thermal_remove(struct platform_device *pdev)
+{
+ struct rockchip_thermal_data *thermal = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(thermal->sensors); i++) {
+ struct rockchip_thermal_sensor *sensor = &thermal->sensors[i];
+
+ rockchip_thermal_toggle_sensor(sensor, false);
+ thermal_zone_of_sensor_unregister(&pdev->dev, sensor->tzd);
+ }
+
+ thermal->chip->control(thermal->regs, false);
+
+ clk_disable_unprepare(thermal->pclk);
+ clk_disable_unprepare(thermal->clk);
+
+ return 0;
+}
+
+static int __maybe_unused rockchip_thermal_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct rockchip_thermal_data *thermal = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(thermal->sensors); i++)
+ rockchip_thermal_toggle_sensor(&thermal->sensors[i], false);
+
+ thermal->chip->control(thermal->regs, false);
+
+ clk_disable(thermal->pclk);
+ clk_disable(thermal->clk);
+
+ return 0;
+}
+
+static int __maybe_unused rockchip_thermal_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct rockchip_thermal_data *thermal = platform_get_drvdata(pdev);
+ int i;
+ int error;
+
+ error = clk_enable(thermal->clk);
+ if (error)
+ return error;
+
+ error = clk_enable(thermal->pclk);
+ if (error)
+ return error;
+
+ rockchip_thermal_reset_controller(thermal->reset);
+
+ thermal->chip->initialize(thermal->regs, thermal->tshut_polarity);
+
+ for (i = 0; i < ARRAY_SIZE(thermal->sensors); i++) {
+ enum sensor_id id = thermal->sensors[i].id;
+
+ thermal->chip->set_tshut_mode(id, thermal->regs,
+ thermal->tshut_mode);
+ thermal->chip->set_tshut_temp(id, thermal->regs,
+ thermal->tshut_temp);
+ }
+
+ thermal->chip->control(thermal->regs, true);
+
+ for (i = 0; i < ARRAY_SIZE(thermal->sensors); i++)
+ rockchip_thermal_toggle_sensor(&thermal->sensors[i], true);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(rockchip_thermal_pm_ops,
+ rockchip_thermal_suspend, rockchip_thermal_resume);
+
+static struct platform_driver rockchip_thermal_driver = {
+ .driver = {
+ .name = "rockchip-thermal",
+ .owner = THIS_MODULE,
+ .pm = &rockchip_thermal_pm_ops,
+ .of_match_table = of_rockchip_thermal_match,
+ },
+ .probe = rockchip_thermal_probe,
+ .remove = rockchip_thermal_remove,
+};
+
+module_platform_driver(rockchip_thermal_driver);
+
+MODULE_DESCRIPTION("ROCKCHIP THERMAL Driver");
+MODULE_AUTHOR("Rockchip, Inc.");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:rockchip-thermal");
diff --git a/drivers/thermal/samsung/exynos_thermal_common.h b/drivers/thermal/samsung/exynos_thermal_common.h
index 158f5aa8dc5..cd4471925cd 100644
--- a/drivers/thermal/samsung/exynos_thermal_common.h
+++ b/drivers/thermal/samsung/exynos_thermal_common.h
@@ -27,7 +27,6 @@
#define SENSOR_NAME_LEN 16
#define MAX_TRIP_COUNT 8
#define MAX_COOLING_DEVICE 4
-#define MAX_TRIMINFO_CTRL_REG 2
#define ACTIVE_INTERVAL 500
#define IDLE_INTERVAL 10000
diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c
index 1e7d0736e86..d44d91d681d 100644
--- a/drivers/thermal/samsung/exynos_tmu.c
+++ b/drivers/thermal/samsung/exynos_tmu.c
@@ -33,7 +33,87 @@
#include "exynos_thermal_common.h"
#include "exynos_tmu.h"
-#include "exynos_tmu_data.h"
+
+/* Exynos generic registers */
+#define EXYNOS_TMU_REG_TRIMINFO 0x0
+#define EXYNOS_TMU_REG_CONTROL 0x20
+#define EXYNOS_TMU_REG_STATUS 0x28
+#define EXYNOS_TMU_REG_CURRENT_TEMP 0x40
+#define EXYNOS_TMU_REG_INTEN 0x70
+#define EXYNOS_TMU_REG_INTSTAT 0x74
+#define EXYNOS_TMU_REG_INTCLEAR 0x78
+
+#define EXYNOS_TMU_TEMP_MASK 0xff
+#define EXYNOS_TMU_REF_VOLTAGE_SHIFT 24
+#define EXYNOS_TMU_REF_VOLTAGE_MASK 0x1f
+#define EXYNOS_TMU_BUF_SLOPE_SEL_MASK 0xf
+#define EXYNOS_TMU_BUF_SLOPE_SEL_SHIFT 8
+#define EXYNOS_TMU_CORE_EN_SHIFT 0
+
+/* Exynos3250 specific registers */
+#define EXYNOS_TMU_TRIMINFO_CON1 0x10
+
+/* Exynos4210 specific registers */
+#define EXYNOS4210_TMU_REG_THRESHOLD_TEMP 0x44
+#define EXYNOS4210_TMU_REG_TRIG_LEVEL0 0x50
+
+/* Exynos5250, Exynos4412, Exynos3250 specific registers */
+#define EXYNOS_TMU_TRIMINFO_CON2 0x14
+#define EXYNOS_THD_TEMP_RISE 0x50
+#define EXYNOS_THD_TEMP_FALL 0x54
+#define EXYNOS_EMUL_CON 0x80
+
+#define EXYNOS_TRIMINFO_RELOAD_ENABLE 1
+#define EXYNOS_TRIMINFO_25_SHIFT 0
+#define EXYNOS_TRIMINFO_85_SHIFT 8
+#define EXYNOS_TMU_TRIP_MODE_SHIFT 13
+#define EXYNOS_TMU_TRIP_MODE_MASK 0x7
+#define EXYNOS_TMU_THERM_TRIP_EN_SHIFT 12
+
+#define EXYNOS_TMU_INTEN_RISE0_SHIFT 0
+#define EXYNOS_TMU_INTEN_RISE1_SHIFT 4
+#define EXYNOS_TMU_INTEN_RISE2_SHIFT 8
+#define EXYNOS_TMU_INTEN_RISE3_SHIFT 12
+#define EXYNOS_TMU_INTEN_FALL0_SHIFT 16
+
+#define EXYNOS_EMUL_TIME 0x57F0
+#define EXYNOS_EMUL_TIME_MASK 0xffff
+#define EXYNOS_EMUL_TIME_SHIFT 16
+#define EXYNOS_EMUL_DATA_SHIFT 8
+#define EXYNOS_EMUL_DATA_MASK 0xFF
+#define EXYNOS_EMUL_ENABLE 0x1
+
+/* Exynos5260 specific */
+#define EXYNOS5260_TMU_REG_INTEN 0xC0
+#define EXYNOS5260_TMU_REG_INTSTAT 0xC4
+#define EXYNOS5260_TMU_REG_INTCLEAR 0xC8
+#define EXYNOS5260_EMUL_CON 0x100
+
+/* Exynos4412 specific */
+#define EXYNOS4412_MUX_ADDR_VALUE 6
+#define EXYNOS4412_MUX_ADDR_SHIFT 20
+
+/*exynos5440 specific registers*/
+#define EXYNOS5440_TMU_S0_7_TRIM 0x000
+#define EXYNOS5440_TMU_S0_7_CTRL 0x020
+#define EXYNOS5440_TMU_S0_7_DEBUG 0x040
+#define EXYNOS5440_TMU_S0_7_TEMP 0x0f0
+#define EXYNOS5440_TMU_S0_7_TH0 0x110
+#define EXYNOS5440_TMU_S0_7_TH1 0x130
+#define EXYNOS5440_TMU_S0_7_TH2 0x150
+#define EXYNOS5440_TMU_S0_7_IRQEN 0x210
+#define EXYNOS5440_TMU_S0_7_IRQ 0x230
+/* exynos5440 common registers */
+#define EXYNOS5440_TMU_IRQ_STATUS 0x000
+#define EXYNOS5440_TMU_PMIN 0x004
+
+#define EXYNOS5440_TMU_INTEN_RISE0_SHIFT 0
+#define EXYNOS5440_TMU_INTEN_RISE1_SHIFT 1
+#define EXYNOS5440_TMU_INTEN_RISE2_SHIFT 2
+#define EXYNOS5440_TMU_INTEN_RISE3_SHIFT 3
+#define EXYNOS5440_TMU_INTEN_FALL0_SHIFT 4
+#define EXYNOS5440_TMU_TH_RISE4_SHIFT 24
+#define EXYNOS5440_EFUSE_SWAP_OFFSET 8
/**
* struct exynos_tmu_data : A structure to hold the private data of the TMU
@@ -52,6 +132,11 @@
* @temp_error2: fused value of the second point trim.
* @regulator: pointer to the TMU regulator structure.
* @reg_conf: pointer to structure to register with core thermal.
+ * @tmu_initialize: SoC specific TMU initialization method
+ * @tmu_control: SoC specific TMU control method
+ * @tmu_read: SoC specific TMU temperature read method
+ * @tmu_set_emulation: SoC specific TMU emulation setting method
+ * @tmu_clear_irqs: SoC specific TMU interrupts clearing method
*/
struct exynos_tmu_data {
int id;
@@ -66,6 +151,12 @@ struct exynos_tmu_data {
u8 temp_error1, temp_error2;
struct regulator *regulator;
struct thermal_sensor_conf *reg_conf;
+ int (*tmu_initialize)(struct platform_device *pdev);
+ void (*tmu_control)(struct platform_device *pdev, bool on);
+ int (*tmu_read)(struct exynos_tmu_data *data);
+ void (*tmu_set_emulation)(struct exynos_tmu_data *data,
+ unsigned long temp);
+ void (*tmu_clear_irqs)(struct exynos_tmu_data *data);
};
/*
@@ -122,83 +213,10 @@ static int code_to_temp(struct exynos_tmu_data *data, u8 temp_code)
return temp;
}
-static void exynos_tmu_clear_irqs(struct exynos_tmu_data *data)
-{
- const struct exynos_tmu_registers *reg = data->pdata->registers;
- unsigned int val_irq;
-
- val_irq = readl(data->base + reg->tmu_intstat);
- /*
- * Clear the interrupts. Please note that the documentation for
- * Exynos3250, Exynos4412, Exynos5250 and Exynos5260 incorrectly
- * states that INTCLEAR register has a different placing of bits
- * responsible for FALL IRQs than INTSTAT register. Exynos5420
- * and Exynos5440 documentation is correct (Exynos4210 doesn't
- * support FALL IRQs at all).
- */
- writel(val_irq, data->base + reg->tmu_intclear);
-}
-
-static int exynos_tmu_initialize(struct platform_device *pdev)
+static void sanitize_temp_error(struct exynos_tmu_data *data, u32 trim_info)
{
- struct exynos_tmu_data *data = platform_get_drvdata(pdev);
struct exynos_tmu_platform_data *pdata = data->pdata;
- const struct exynos_tmu_registers *reg = pdata->registers;
- unsigned int status, trim_info = 0, con, ctrl;
- unsigned int rising_threshold = 0, falling_threshold = 0;
- int ret = 0, threshold_code, i;
-
- mutex_lock(&data->lock);
- clk_enable(data->clk);
- if (!IS_ERR(data->clk_sec))
- clk_enable(data->clk_sec);
- if (TMU_SUPPORTS(pdata, READY_STATUS)) {
- status = readb(data->base + reg->tmu_status);
- if (!status) {
- ret = -EBUSY;
- goto out;
- }
- }
-
- if (TMU_SUPPORTS(pdata, TRIM_RELOAD)) {
- for (i = 0; i < reg->triminfo_ctrl_count; i++) {
- if (pdata->triminfo_reload[i]) {
- ctrl = readl(data->base +
- reg->triminfo_ctrl[i]);
- ctrl |= pdata->triminfo_reload[i];
- writel(ctrl, data->base +
- reg->triminfo_ctrl[i]);
- }
- }
- }
-
- /* Save trimming info in order to perform calibration */
- if (data->soc == SOC_ARCH_EXYNOS5440) {
- /*
- * For exynos5440 soc triminfo value is swapped between TMU0 and
- * TMU2, so the below logic is needed.
- */
- switch (data->id) {
- case 0:
- trim_info = readl(data->base +
- EXYNOS5440_EFUSE_SWAP_OFFSET + reg->triminfo_data);
- break;
- case 1:
- trim_info = readl(data->base + reg->triminfo_data);
- break;
- case 2:
- trim_info = readl(data->base -
- EXYNOS5440_EFUSE_SWAP_OFFSET + reg->triminfo_data);
- }
- } else {
- /* On exynos5420 the triminfo register is in the shared space */
- if (data->soc == SOC_ARCH_EXYNOS5420_TRIMINFO)
- trim_info = readl(data->base_second +
- reg->triminfo_data);
- else
- trim_info = readl(data->base + reg->triminfo_data);
- }
data->temp_error1 = trim_info & EXYNOS_TMU_TEMP_MASK;
data->temp_error2 = ((trim_info >> EXYNOS_TRIMINFO_85_SHIFT) &
EXYNOS_TMU_TEMP_MASK);
@@ -212,69 +230,37 @@ static int exynos_tmu_initialize(struct platform_device *pdev)
data->temp_error2 =
(pdata->efuse_value >> EXYNOS_TRIMINFO_85_SHIFT) &
EXYNOS_TMU_TEMP_MASK;
+}
- rising_threshold = readl(data->base + reg->threshold_th0);
+static u32 get_th_reg(struct exynos_tmu_data *data, u32 threshold, bool falling)
+{
+ struct exynos_tmu_platform_data *pdata = data->pdata;
+ int i;
- if (data->soc == SOC_ARCH_EXYNOS4210) {
- /* Write temperature code for threshold */
- threshold_code = temp_to_code(data, pdata->threshold);
- writeb(threshold_code,
- data->base + reg->threshold_temp);
- for (i = 0; i < pdata->non_hw_trigger_levels; i++)
- writeb(pdata->trigger_levels[i], data->base +
- reg->threshold_th0 + i * sizeof(reg->threshold_th0));
+ for (i = 0; i < pdata->non_hw_trigger_levels; i++) {
+ u8 temp = pdata->trigger_levels[i];
- exynos_tmu_clear_irqs(data);
- } else {
- /* Write temperature code for rising and falling threshold */
- for (i = 0; i < pdata->non_hw_trigger_levels; i++) {
- threshold_code = temp_to_code(data,
- pdata->trigger_levels[i]);
- rising_threshold &= ~(0xff << 8 * i);
- rising_threshold |= threshold_code << 8 * i;
- if (pdata->threshold_falling) {
- threshold_code = temp_to_code(data,
- pdata->trigger_levels[i] -
- pdata->threshold_falling);
- falling_threshold |= threshold_code << 8 * i;
- }
- }
+ if (falling)
+ temp -= pdata->threshold_falling;
+ else
+ threshold &= ~(0xff << 8 * i);
- writel(rising_threshold,
- data->base + reg->threshold_th0);
- writel(falling_threshold,
- data->base + reg->threshold_th1);
-
- exynos_tmu_clear_irqs(data);
-
- /* if last threshold limit is also present */
- i = pdata->max_trigger_level - 1;
- if (pdata->trigger_levels[i] &&
- (pdata->trigger_type[i] == HW_TRIP)) {
- threshold_code = temp_to_code(data,
- pdata->trigger_levels[i]);
- if (i == EXYNOS_MAX_TRIGGER_PER_REG - 1) {
- /* 1-4 level to be assigned in th0 reg */
- rising_threshold &= ~(0xff << 8 * i);
- rising_threshold |= threshold_code << 8 * i;
- writel(rising_threshold,
- data->base + reg->threshold_th0);
- } else if (i == EXYNOS_MAX_TRIGGER_PER_REG) {
- /* 5th level to be assigned in th2 reg */
- rising_threshold =
- threshold_code << reg->threshold_th3_l0_shift;
- writel(rising_threshold,
- data->base + reg->threshold_th2);
- }
- con = readl(data->base + reg->tmu_ctrl);
- con |= (1 << reg->therm_trip_en_shift);
- writel(con, data->base + reg->tmu_ctrl);
- }
+ threshold |= temp_to_code(data, temp) << 8 * i;
}
- /*Clear the PMIN in the common TMU register*/
- if (reg->tmu_pmin && !data->id)
- writel(0, data->base_second + reg->tmu_pmin);
-out:
+
+ return threshold;
+}
+
+static int exynos_tmu_initialize(struct platform_device *pdev)
+{
+ struct exynos_tmu_data *data = platform_get_drvdata(pdev);
+ int ret;
+
+ mutex_lock(&data->lock);
+ clk_enable(data->clk);
+ if (!IS_ERR(data->clk_sec))
+ clk_enable(data->clk_sec);
+ ret = data->tmu_initialize(pdev);
clk_disable(data->clk);
mutex_unlock(&data->lock);
if (!IS_ERR(data->clk_sec))
@@ -283,20 +269,13 @@ out:
return ret;
}
-static void exynos_tmu_control(struct platform_device *pdev, bool on)
+static u32 get_con_reg(struct exynos_tmu_data *data, u32 con)
{
- struct exynos_tmu_data *data = platform_get_drvdata(pdev);
struct exynos_tmu_platform_data *pdata = data->pdata;
- const struct exynos_tmu_registers *reg = pdata->registers;
- unsigned int con, interrupt_en;
- mutex_lock(&data->lock);
- clk_enable(data->clk);
-
- con = readl(data->base + reg->tmu_ctrl);
-
- if (pdata->test_mux)
- con |= (pdata->test_mux << reg->test_mux_addr_shift);
+ if (data->soc == SOC_ARCH_EXYNOS4412 ||
+ data->soc == SOC_ARCH_EXYNOS3250)
+ con |= (EXYNOS4412_MUX_ADDR_VALUE << EXYNOS4412_MUX_ADDR_SHIFT);
con &= ~(EXYNOS_TMU_REF_VOLTAGE_MASK << EXYNOS_TMU_REF_VOLTAGE_SHIFT);
con |= pdata->reference_voltage << EXYNOS_TMU_REF_VOLTAGE_SHIFT;
@@ -305,95 +284,287 @@ static void exynos_tmu_control(struct platform_device *pdev, bool on)
con |= (pdata->gain << EXYNOS_TMU_BUF_SLOPE_SEL_SHIFT);
if (pdata->noise_cancel_mode) {
- con &= ~(reg->therm_trip_mode_mask <<
- reg->therm_trip_mode_shift);
- con |= (pdata->noise_cancel_mode << reg->therm_trip_mode_shift);
+ con &= ~(EXYNOS_TMU_TRIP_MODE_MASK << EXYNOS_TMU_TRIP_MODE_SHIFT);
+ con |= (pdata->noise_cancel_mode << EXYNOS_TMU_TRIP_MODE_SHIFT);
}
- if (on) {
- con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT);
- interrupt_en =
- pdata->trigger_enable[3] << reg->inten_rise3_shift |
- pdata->trigger_enable[2] << reg->inten_rise2_shift |
- pdata->trigger_enable[1] << reg->inten_rise1_shift |
- pdata->trigger_enable[0] << reg->inten_rise0_shift;
- if (TMU_SUPPORTS(pdata, FALLING_TRIP))
- interrupt_en |=
- interrupt_en << reg->inten_fall0_shift;
- } else {
- con &= ~(1 << EXYNOS_TMU_CORE_EN_SHIFT);
- interrupt_en = 0; /* Disable all interrupts */
- }
- writel(interrupt_en, data->base + reg->tmu_inten);
- writel(con, data->base + reg->tmu_ctrl);
+ return con;
+}
+
+static void exynos_tmu_control(struct platform_device *pdev, bool on)
+{
+ struct exynos_tmu_data *data = platform_get_drvdata(pdev);
+ mutex_lock(&data->lock);
+ clk_enable(data->clk);
+ data->tmu_control(pdev, on);
clk_disable(data->clk);
mutex_unlock(&data->lock);
}
-static int exynos_tmu_read(struct exynos_tmu_data *data)
+static int exynos4210_tmu_initialize(struct platform_device *pdev)
{
+ struct exynos_tmu_data *data = platform_get_drvdata(pdev);
struct exynos_tmu_platform_data *pdata = data->pdata;
- const struct exynos_tmu_registers *reg = pdata->registers;
- u8 temp_code;
- int temp;
+ unsigned int status;
+ int ret = 0, threshold_code, i;
- mutex_lock(&data->lock);
- clk_enable(data->clk);
+ status = readb(data->base + EXYNOS_TMU_REG_STATUS);
+ if (!status) {
+ ret = -EBUSY;
+ goto out;
+ }
- temp_code = readb(data->base + reg->tmu_cur_temp);
+ sanitize_temp_error(data, readl(data->base + EXYNOS_TMU_REG_TRIMINFO));
- if (data->soc == SOC_ARCH_EXYNOS4210)
- /* temp_code should range between 75 and 175 */
- if (temp_code < 75 || temp_code > 175) {
- temp = -ENODATA;
- goto out;
+ /* Write temperature code for threshold */
+ threshold_code = temp_to_code(data, pdata->threshold);
+ writeb(threshold_code, data->base + EXYNOS4210_TMU_REG_THRESHOLD_TEMP);
+
+ for (i = 0; i < pdata->non_hw_trigger_levels; i++)
+ writeb(pdata->trigger_levels[i], data->base +
+ EXYNOS4210_TMU_REG_TRIG_LEVEL0 + i * 4);
+
+ data->tmu_clear_irqs(data);
+out:
+ return ret;
+}
+
+static int exynos4412_tmu_initialize(struct platform_device *pdev)
+{
+ struct exynos_tmu_data *data = platform_get_drvdata(pdev);
+ struct exynos_tmu_platform_data *pdata = data->pdata;
+ unsigned int status, trim_info, con, ctrl, rising_threshold;
+ int ret = 0, threshold_code, i;
+
+ status = readb(data->base + EXYNOS_TMU_REG_STATUS);
+ if (!status) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ if (data->soc == SOC_ARCH_EXYNOS3250 ||
+ data->soc == SOC_ARCH_EXYNOS4412 ||
+ data->soc == SOC_ARCH_EXYNOS5250) {
+ if (data->soc == SOC_ARCH_EXYNOS3250) {
+ ctrl = readl(data->base + EXYNOS_TMU_TRIMINFO_CON1);
+ ctrl |= EXYNOS_TRIMINFO_RELOAD_ENABLE;
+ writel(ctrl, data->base + EXYNOS_TMU_TRIMINFO_CON1);
}
+ ctrl = readl(data->base + EXYNOS_TMU_TRIMINFO_CON2);
+ ctrl |= EXYNOS_TRIMINFO_RELOAD_ENABLE;
+ writel(ctrl, data->base + EXYNOS_TMU_TRIMINFO_CON2);
+ }
- temp = code_to_temp(data, temp_code);
+ /* On exynos5420 the triminfo register is in the shared space */
+ if (data->soc == SOC_ARCH_EXYNOS5420_TRIMINFO)
+ trim_info = readl(data->base_second + EXYNOS_TMU_REG_TRIMINFO);
+ else
+ trim_info = readl(data->base + EXYNOS_TMU_REG_TRIMINFO);
+
+ sanitize_temp_error(data, trim_info);
+
+ /* Write temperature code for rising and falling threshold */
+ rising_threshold = readl(data->base + EXYNOS_THD_TEMP_RISE);
+ rising_threshold = get_th_reg(data, rising_threshold, false);
+ writel(rising_threshold, data->base + EXYNOS_THD_TEMP_RISE);
+ writel(get_th_reg(data, 0, true), data->base + EXYNOS_THD_TEMP_FALL);
+
+ data->tmu_clear_irqs(data);
+
+ /* if last threshold limit is also present */
+ i = pdata->max_trigger_level - 1;
+ if (pdata->trigger_levels[i] && pdata->trigger_type[i] == HW_TRIP) {
+ threshold_code = temp_to_code(data, pdata->trigger_levels[i]);
+ /* 1-4 level to be assigned in th0 reg */
+ rising_threshold &= ~(0xff << 8 * i);
+ rising_threshold |= threshold_code << 8 * i;
+ writel(rising_threshold, data->base + EXYNOS_THD_TEMP_RISE);
+ con = readl(data->base + EXYNOS_TMU_REG_CONTROL);
+ con |= (1 << EXYNOS_TMU_THERM_TRIP_EN_SHIFT);
+ writel(con, data->base + EXYNOS_TMU_REG_CONTROL);
+ }
out:
- clk_disable(data->clk);
- mutex_unlock(&data->lock);
+ return ret;
+}
- return temp;
+static int exynos5440_tmu_initialize(struct platform_device *pdev)
+{
+ struct exynos_tmu_data *data = platform_get_drvdata(pdev);
+ struct exynos_tmu_platform_data *pdata = data->pdata;
+ unsigned int trim_info = 0, con, rising_threshold;
+ int ret = 0, threshold_code, i;
+
+ /*
+ * For exynos5440 soc triminfo value is swapped between TMU0 and
+ * TMU2, so the below logic is needed.
+ */
+ switch (data->id) {
+ case 0:
+ trim_info = readl(data->base + EXYNOS5440_EFUSE_SWAP_OFFSET +
+ EXYNOS5440_TMU_S0_7_TRIM);
+ break;
+ case 1:
+ trim_info = readl(data->base + EXYNOS5440_TMU_S0_7_TRIM);
+ break;
+ case 2:
+ trim_info = readl(data->base - EXYNOS5440_EFUSE_SWAP_OFFSET +
+ EXYNOS5440_TMU_S0_7_TRIM);
+ }
+ sanitize_temp_error(data, trim_info);
+
+ /* Write temperature code for rising and falling threshold */
+ rising_threshold = readl(data->base + EXYNOS5440_TMU_S0_7_TH0);
+ rising_threshold = get_th_reg(data, rising_threshold, false);
+ writel(rising_threshold, data->base + EXYNOS5440_TMU_S0_7_TH0);
+ writel(0, data->base + EXYNOS5440_TMU_S0_7_TH1);
+
+ data->tmu_clear_irqs(data);
+
+ /* if last threshold limit is also present */
+ i = pdata->max_trigger_level - 1;
+ if (pdata->trigger_levels[i] && pdata->trigger_type[i] == HW_TRIP) {
+ threshold_code = temp_to_code(data, pdata->trigger_levels[i]);
+ /* 5th level to be assigned in th2 reg */
+ rising_threshold =
+ threshold_code << EXYNOS5440_TMU_TH_RISE4_SHIFT;
+ writel(rising_threshold, data->base + EXYNOS5440_TMU_S0_7_TH2);
+ con = readl(data->base + EXYNOS5440_TMU_S0_7_CTRL);
+ con |= (1 << EXYNOS_TMU_THERM_TRIP_EN_SHIFT);
+ writel(con, data->base + EXYNOS5440_TMU_S0_7_CTRL);
+ }
+ /* Clear the PMIN in the common TMU register */
+ if (!data->id)
+ writel(0, data->base_second + EXYNOS5440_TMU_PMIN);
+ return ret;
}
-#ifdef CONFIG_THERMAL_EMULATION
-static int exynos_tmu_set_emulation(void *drv_data, unsigned long temp)
+static void exynos4210_tmu_control(struct platform_device *pdev, bool on)
{
- struct exynos_tmu_data *data = drv_data;
+ struct exynos_tmu_data *data = platform_get_drvdata(pdev);
struct exynos_tmu_platform_data *pdata = data->pdata;
- const struct exynos_tmu_registers *reg = pdata->registers;
- unsigned int val;
- int ret = -EINVAL;
+ unsigned int con, interrupt_en;
- if (!TMU_SUPPORTS(pdata, EMULATION))
- goto out;
+ con = get_con_reg(data, readl(data->base + EXYNOS_TMU_REG_CONTROL));
- if (temp && temp < MCELSIUS)
- goto out;
+ if (on) {
+ con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT);
+ interrupt_en =
+ pdata->trigger_enable[3] << EXYNOS_TMU_INTEN_RISE3_SHIFT |
+ pdata->trigger_enable[2] << EXYNOS_TMU_INTEN_RISE2_SHIFT |
+ pdata->trigger_enable[1] << EXYNOS_TMU_INTEN_RISE1_SHIFT |
+ pdata->trigger_enable[0] << EXYNOS_TMU_INTEN_RISE0_SHIFT;
+ if (data->soc != SOC_ARCH_EXYNOS4210)
+ interrupt_en |=
+ interrupt_en << EXYNOS_TMU_INTEN_FALL0_SHIFT;
+ } else {
+ con &= ~(1 << EXYNOS_TMU_CORE_EN_SHIFT);
+ interrupt_en = 0; /* Disable all interrupts */
+ }
+ writel(interrupt_en, data->base + EXYNOS_TMU_REG_INTEN);
+ writel(con, data->base + EXYNOS_TMU_REG_CONTROL);
+}
+
+static void exynos5440_tmu_control(struct platform_device *pdev, bool on)
+{
+ struct exynos_tmu_data *data = platform_get_drvdata(pdev);
+ struct exynos_tmu_platform_data *pdata = data->pdata;
+ unsigned int con, interrupt_en;
+
+ con = get_con_reg(data, readl(data->base + EXYNOS5440_TMU_S0_7_CTRL));
+
+ if (on) {
+ con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT);
+ interrupt_en =
+ pdata->trigger_enable[3] << EXYNOS5440_TMU_INTEN_RISE3_SHIFT |
+ pdata->trigger_enable[2] << EXYNOS5440_TMU_INTEN_RISE2_SHIFT |
+ pdata->trigger_enable[1] << EXYNOS5440_TMU_INTEN_RISE1_SHIFT |
+ pdata->trigger_enable[0] << EXYNOS5440_TMU_INTEN_RISE0_SHIFT;
+ interrupt_en |= interrupt_en << EXYNOS5440_TMU_INTEN_FALL0_SHIFT;
+ } else {
+ con &= ~(1 << EXYNOS_TMU_CORE_EN_SHIFT);
+ interrupt_en = 0; /* Disable all interrupts */
+ }
+ writel(interrupt_en, data->base + EXYNOS5440_TMU_S0_7_IRQEN);
+ writel(con, data->base + EXYNOS5440_TMU_S0_7_CTRL);
+}
+
+static int exynos_tmu_read(struct exynos_tmu_data *data)
+{
+ int ret;
mutex_lock(&data->lock);
clk_enable(data->clk);
+ ret = data->tmu_read(data);
+ if (ret >= 0)
+ ret = code_to_temp(data, ret);
+ clk_disable(data->clk);
+ mutex_unlock(&data->lock);
- val = readl(data->base + reg->emul_con);
+ return ret;
+}
+#ifdef CONFIG_THERMAL_EMULATION
+static u32 get_emul_con_reg(struct exynos_tmu_data *data, unsigned int val,
+ unsigned long temp)
+{
if (temp) {
temp /= MCELSIUS;
- if (TMU_SUPPORTS(pdata, EMUL_TIME)) {
- val &= ~(EXYNOS_EMUL_TIME_MASK << reg->emul_time_shift);
- val |= (EXYNOS_EMUL_TIME << reg->emul_time_shift);
+ if (data->soc != SOC_ARCH_EXYNOS5440) {
+ val &= ~(EXYNOS_EMUL_TIME_MASK << EXYNOS_EMUL_TIME_SHIFT);
+ val |= (EXYNOS_EMUL_TIME << EXYNOS_EMUL_TIME_SHIFT);
}
- val &= ~(EXYNOS_EMUL_DATA_MASK << reg->emul_temp_shift);
- val |= (temp_to_code(data, temp) << reg->emul_temp_shift) |
+ val &= ~(EXYNOS_EMUL_DATA_MASK << EXYNOS_EMUL_DATA_SHIFT);
+ val |= (temp_to_code(data, temp) << EXYNOS_EMUL_DATA_SHIFT) |
EXYNOS_EMUL_ENABLE;
} else {
val &= ~EXYNOS_EMUL_ENABLE;
}
- writel(val, data->base + reg->emul_con);
+ return val;
+}
+
+static void exynos4412_tmu_set_emulation(struct exynos_tmu_data *data,
+ unsigned long temp)
+{
+ unsigned int val;
+ u32 emul_con;
+
+ if (data->soc == SOC_ARCH_EXYNOS5260)
+ emul_con = EXYNOS5260_EMUL_CON;
+ else
+ emul_con = EXYNOS_EMUL_CON;
+
+ val = readl(data->base + emul_con);
+ val = get_emul_con_reg(data, val, temp);
+ writel(val, data->base + emul_con);
+}
+
+static void exynos5440_tmu_set_emulation(struct exynos_tmu_data *data,
+ unsigned long temp)
+{
+ unsigned int val;
+
+ val = readl(data->base + EXYNOS5440_TMU_S0_7_DEBUG);
+ val = get_emul_con_reg(data, val, temp);
+ writel(val, data->base + EXYNOS5440_TMU_S0_7_DEBUG);
+}
+
+static int exynos_tmu_set_emulation(void *drv_data, unsigned long temp)
+{
+ struct exynos_tmu_data *data = drv_data;
+ int ret = -EINVAL;
+
+ if (data->soc == SOC_ARCH_EXYNOS4210)
+ goto out;
+ if (temp && temp < MCELSIUS)
+ goto out;
+
+ mutex_lock(&data->lock);
+ clk_enable(data->clk);
+ data->tmu_set_emulation(data, temp);
clk_disable(data->clk);
mutex_unlock(&data->lock);
return 0;
@@ -401,23 +572,41 @@ out:
return ret;
}
#else
+#define exynos4412_tmu_set_emulation NULL
+#define exynos5440_tmu_set_emulation NULL
static int exynos_tmu_set_emulation(void *drv_data, unsigned long temp)
{ return -EINVAL; }
#endif/*CONFIG_THERMAL_EMULATION*/
+static int exynos4210_tmu_read(struct exynos_tmu_data *data)
+{
+ int ret = readb(data->base + EXYNOS_TMU_REG_CURRENT_TEMP);
+
+ /* "temp_code" should range between 75 and 175 */
+ return (ret < 75 || ret > 175) ? -ENODATA : ret;
+}
+
+static int exynos4412_tmu_read(struct exynos_tmu_data *data)
+{
+ return readb(data->base + EXYNOS_TMU_REG_CURRENT_TEMP);
+}
+
+static int exynos5440_tmu_read(struct exynos_tmu_data *data)
+{
+ return readb(data->base + EXYNOS5440_TMU_S0_7_TEMP);
+}
+
static void exynos_tmu_work(struct work_struct *work)
{
struct exynos_tmu_data *data = container_of(work,
struct exynos_tmu_data, irq_work);
- struct exynos_tmu_platform_data *pdata = data->pdata;
- const struct exynos_tmu_registers *reg = pdata->registers;
unsigned int val_type;
if (!IS_ERR(data->clk_sec))
clk_enable(data->clk_sec);
/* Find which sensor generated this interrupt */
- if (reg->tmu_irqstatus) {
- val_type = readl(data->base_second + reg->tmu_irqstatus);
+ if (data->soc == SOC_ARCH_EXYNOS5440) {
+ val_type = readl(data->base_second + EXYNOS5440_TMU_IRQ_STATUS);
if (!((val_type >> data->id) & 0x1))
goto out;
}
@@ -429,7 +618,7 @@ static void exynos_tmu_work(struct work_struct *work)
clk_enable(data->clk);
/* TODO: take action based on particular interrupt */
- exynos_tmu_clear_irqs(data);
+ data->tmu_clear_irqs(data);
clk_disable(data->clk);
mutex_unlock(&data->lock);
@@ -437,6 +626,40 @@ out:
enable_irq(data->irq);
}
+static void exynos4210_tmu_clear_irqs(struct exynos_tmu_data *data)
+{
+ unsigned int val_irq;
+ u32 tmu_intstat, tmu_intclear;
+
+ if (data->soc == SOC_ARCH_EXYNOS5260) {
+ tmu_intstat = EXYNOS5260_TMU_REG_INTSTAT;
+ tmu_intclear = EXYNOS5260_TMU_REG_INTCLEAR;
+ } else {
+ tmu_intstat = EXYNOS_TMU_REG_INTSTAT;
+ tmu_intclear = EXYNOS_TMU_REG_INTCLEAR;
+ }
+
+ val_irq = readl(data->base + tmu_intstat);
+ /*
+ * Clear the interrupts. Please note that the documentation for
+ * Exynos3250, Exynos4412, Exynos5250 and Exynos5260 incorrectly
+ * states that INTCLEAR register has a different placing of bits
+ * responsible for FALL IRQs than INTSTAT register. Exynos5420
+ * and Exynos5440 documentation is correct (Exynos4210 doesn't
+ * support FALL IRQs at all).
+ */
+ writel(val_irq, data->base + tmu_intclear);
+}
+
+static void exynos5440_tmu_clear_irqs(struct exynos_tmu_data *data)
+{
+ unsigned int val_irq;
+
+ val_irq = readl(data->base + EXYNOS5440_TMU_S0_7_IRQ);
+ /* clear the interrupts */
+ writel(val_irq, data->base + EXYNOS5440_TMU_S0_7_IRQ);
+}
+
static irqreturn_t exynos_tmu_irq(int irq, void *id)
{
struct exynos_tmu_data *data = id;
@@ -450,35 +673,35 @@ static irqreturn_t exynos_tmu_irq(int irq, void *id)
static const struct of_device_id exynos_tmu_match[] = {
{
.compatible = "samsung,exynos3250-tmu",
- .data = (void *)EXYNOS3250_TMU_DRV_DATA,
+ .data = &exynos3250_default_tmu_data,
},
{
.compatible = "samsung,exynos4210-tmu",
- .data = (void *)EXYNOS4210_TMU_DRV_DATA,
+ .data = &exynos4210_default_tmu_data,
},
{
.compatible = "samsung,exynos4412-tmu",
- .data = (void *)EXYNOS4412_TMU_DRV_DATA,
+ .data = &exynos4412_default_tmu_data,
},
{
.compatible = "samsung,exynos5250-tmu",
- .data = (void *)EXYNOS5250_TMU_DRV_DATA,
+ .data = &exynos5250_default_tmu_data,
},
{
.compatible = "samsung,exynos5260-tmu",
- .data = (void *)EXYNOS5260_TMU_DRV_DATA,
+ .data = &exynos5260_default_tmu_data,
},
{
.compatible = "samsung,exynos5420-tmu",
- .data = (void *)EXYNOS5420_TMU_DRV_DATA,
+ .data = &exynos5420_default_tmu_data,
},
{
.compatible = "samsung,exynos5420-tmu-ext-triminfo",
- .data = (void *)EXYNOS5420_TMU_DRV_DATA,
+ .data = &exynos5420_default_tmu_data,
},
{
.compatible = "samsung,exynos5440-tmu",
- .data = (void *)EXYNOS5440_TMU_DRV_DATA,
+ .data = &exynos5440_default_tmu_data,
},
{},
};
@@ -553,12 +776,47 @@ static int exynos_map_dt_data(struct platform_device *pdev)
dev_err(&pdev->dev, "No platform init data supplied.\n");
return -ENODEV;
}
+
data->pdata = pdata;
+ data->soc = pdata->type;
+
+ switch (data->soc) {
+ case SOC_ARCH_EXYNOS4210:
+ data->tmu_initialize = exynos4210_tmu_initialize;
+ data->tmu_control = exynos4210_tmu_control;
+ data->tmu_read = exynos4210_tmu_read;
+ data->tmu_clear_irqs = exynos4210_tmu_clear_irqs;
+ break;
+ case SOC_ARCH_EXYNOS3250:
+ case SOC_ARCH_EXYNOS4412:
+ case SOC_ARCH_EXYNOS5250:
+ case SOC_ARCH_EXYNOS5260:
+ case SOC_ARCH_EXYNOS5420:
+ case SOC_ARCH_EXYNOS5420_TRIMINFO:
+ data->tmu_initialize = exynos4412_tmu_initialize;
+ data->tmu_control = exynos4210_tmu_control;
+ data->tmu_read = exynos4412_tmu_read;
+ data->tmu_set_emulation = exynos4412_tmu_set_emulation;
+ data->tmu_clear_irqs = exynos4210_tmu_clear_irqs;
+ break;
+ case SOC_ARCH_EXYNOS5440:
+ data->tmu_initialize = exynos5440_tmu_initialize;
+ data->tmu_control = exynos5440_tmu_control;
+ data->tmu_read = exynos5440_tmu_read;
+ data->tmu_set_emulation = exynos5440_tmu_set_emulation;
+ data->tmu_clear_irqs = exynos5440_tmu_clear_irqs;
+ break;
+ default:
+ dev_err(&pdev->dev, "Platform not supported\n");
+ return -EINVAL;
+ }
+
/*
* Check if the TMU shares some registers and then try to map the
* memory of common registers.
*/
- if (!TMU_SUPPORTS(pdata, ADDRESS_MULTIPLE))
+ if (data->soc != SOC_ARCH_EXYNOS5420_TRIMINFO &&
+ data->soc != SOC_ARCH_EXYNOS5440)
return 0;
if (of_address_to_resource(pdev->dev.of_node, 1, &res)) {
@@ -625,20 +883,6 @@ static int exynos_tmu_probe(struct platform_device *pdev)
goto err_clk_sec;
}
- if (pdata->type == SOC_ARCH_EXYNOS3250 ||
- pdata->type == SOC_ARCH_EXYNOS4210 ||
- pdata->type == SOC_ARCH_EXYNOS4412 ||
- pdata->type == SOC_ARCH_EXYNOS5250 ||
- pdata->type == SOC_ARCH_EXYNOS5260 ||
- pdata->type == SOC_ARCH_EXYNOS5420_TRIMINFO ||
- pdata->type == SOC_ARCH_EXYNOS5440)
- data->soc = pdata->type;
- else {
- ret = -EINVAL;
- dev_err(&pdev->dev, "Platform not supported\n");
- goto err_clk;
- }
-
ret = exynos_tmu_initialize(pdev);
if (ret) {
dev_err(&pdev->dev, "Failed to initialize TMU\n");
diff --git a/drivers/thermal/samsung/exynos_tmu.h b/drivers/thermal/samsung/exynos_tmu.h
index c58c7663a3f..da3009bff6c 100644
--- a/drivers/thermal/samsung/exynos_tmu.h
+++ b/drivers/thermal/samsung/exynos_tmu.h
@@ -40,115 +40,12 @@ enum soc_type {
SOC_ARCH_EXYNOS4412,
SOC_ARCH_EXYNOS5250,
SOC_ARCH_EXYNOS5260,
+ SOC_ARCH_EXYNOS5420,
SOC_ARCH_EXYNOS5420_TRIMINFO,
SOC_ARCH_EXYNOS5440,
};
/**
- * EXYNOS TMU supported features.
- * TMU_SUPPORT_EMULATION - This features is used to set user defined
- * temperature to the TMU controller.
- * TMU_SUPPORT_MULTI_INST - This features denotes that the soc
- * has many instances of TMU.
- * TMU_SUPPORT_TRIM_RELOAD - This features shows that trimming can
- * be reloaded.
- * TMU_SUPPORT_FALLING_TRIP - This features shows that interrupt can
- * be registered for falling trips also.
- * TMU_SUPPORT_READY_STATUS - This feature tells that the TMU current
- * state(active/idle) can be checked.
- * TMU_SUPPORT_EMUL_TIME - This features allows to set next temp emulation
- * sample time.
- * TMU_SUPPORT_ADDRESS_MULTIPLE - This feature tells that the different TMU
- * sensors shares some common registers.
- * TMU_SUPPORT - macro to compare the above features with the supplied.
- */
-#define TMU_SUPPORT_EMULATION BIT(0)
-#define TMU_SUPPORT_MULTI_INST BIT(1)
-#define TMU_SUPPORT_TRIM_RELOAD BIT(2)
-#define TMU_SUPPORT_FALLING_TRIP BIT(3)
-#define TMU_SUPPORT_READY_STATUS BIT(4)
-#define TMU_SUPPORT_EMUL_TIME BIT(5)
-#define TMU_SUPPORT_ADDRESS_MULTIPLE BIT(6)
-
-#define TMU_SUPPORTS(a, b) (a->features & TMU_SUPPORT_ ## b)
-
-/**
- * struct exynos_tmu_register - register descriptors to access registers and
- * bitfields. The register validity, offsets and bitfield values may vary
- * slightly across different exynos SOC's.
- * @triminfo_data: register containing 2 pont trimming data
- * @triminfo_ctrl: trim info controller register.
- * @triminfo_ctrl_count: the number of trim info controller register.
- * @tmu_ctrl: TMU main controller register.
- * @test_mux_addr_shift: shift bits of test mux address.
- * @therm_trip_mode_shift: shift bits of tripping mode in tmu_ctrl register.
- * @therm_trip_mode_mask: mask bits of tripping mode in tmu_ctrl register.
- * @therm_trip_en_shift: shift bits of tripping enable in tmu_ctrl register.
- * @tmu_status: register drescribing the TMU status.
- * @tmu_cur_temp: register containing the current temperature of the TMU.
- * @threshold_temp: register containing the base threshold level.
- * @threshold_th0: Register containing first set of rising levels.
- * @threshold_th1: Register containing second set of rising levels.
- * @threshold_th2: Register containing third set of rising levels.
- * @threshold_th3_l0_shift: shift bits of level0 threshold temperature.
- * @tmu_inten: register containing the different threshold interrupt
- enable bits.
- * @inten_rise0_shift: shift bits of rising 0 interrupt bits.
- * @inten_rise1_shift: shift bits of rising 1 interrupt bits.
- * @inten_rise2_shift: shift bits of rising 2 interrupt bits.
- * @inten_rise3_shift: shift bits of rising 3 interrupt bits.
- * @inten_fall0_shift: shift bits of falling 0 interrupt bits.
- * @tmu_intstat: Register containing the interrupt status values.
- * @tmu_intclear: Register for clearing the raised interrupt status.
- * @emul_con: TMU emulation controller register.
- * @emul_temp_shift: shift bits of emulation temperature.
- * @emul_time_shift: shift bits of emulation time.
- * @tmu_irqstatus: register to find which TMU generated interrupts.
- * @tmu_pmin: register to get/set the Pmin value.
- */
-struct exynos_tmu_registers {
- u32 triminfo_data;
-
- u32 triminfo_ctrl[MAX_TRIMINFO_CTRL_REG];
- u32 triminfo_ctrl_count;
-
- u32 tmu_ctrl;
- u32 test_mux_addr_shift;
- u32 therm_trip_mode_shift;
- u32 therm_trip_mode_mask;
- u32 therm_trip_en_shift;
-
- u32 tmu_status;
-
- u32 tmu_cur_temp;
-
- u32 threshold_temp;
-
- u32 threshold_th0;
- u32 threshold_th1;
- u32 threshold_th2;
- u32 threshold_th3_l0_shift;
-
- u32 tmu_inten;
- u32 inten_rise0_shift;
- u32 inten_rise1_shift;
- u32 inten_rise2_shift;
- u32 inten_rise3_shift;
- u32 inten_fall0_shift;
-
- u32 tmu_intstat;
-
- u32 tmu_intclear;
-
- u32 emul_con;
- u32 emul_temp_shift;
- u32 emul_time_shift;
-
- u32 tmu_irqstatus;
- u32 tmu_pmin;
-};
-
-/**
* struct exynos_tmu_platform_data
* @threshold: basic temperature for generating interrupt
* 25 <= threshold <= 125 [unit: degree Celsius]
@@ -192,16 +89,10 @@ struct exynos_tmu_registers {
* @first_point_trim: temp value of the first point trimming
* @second_point_trim: temp value of the second point trimming
* @default_temp_offset: default temperature offset in case of no trimming
- * @test_mux; information if SoC supports test MUX
- * @triminfo_reload: reload value to read TRIMINFO register
* @cal_type: calibration type for temperature
* @freq_clip_table: Table representing frequency reduction percentage.
* @freq_tab_count: Count of the above table as frequency reduction may
* applicable to only some of the trigger levels.
- * @registers: Pointer to structure containing all the TMU controller registers
- * and bitfields shifts and masks.
- * @features: a bitfield value indicating the features supported in SOC like
- * emulation, multi instance etc
*
* This structure is required for configuration of exynos_tmu driver.
*/
@@ -223,15 +114,11 @@ struct exynos_tmu_platform_data {
u8 first_point_trim;
u8 second_point_trim;
u8 default_temp_offset;
- u8 test_mux;
- u8 triminfo_reload[MAX_TRIMINFO_CTRL_REG];
enum calibration_type cal_type;
enum soc_type type;
struct freq_clip_table freq_tab[4];
unsigned int freq_tab_count;
- const struct exynos_tmu_registers *registers;
- unsigned int features;
};
/**
@@ -246,4 +133,12 @@ struct exynos_tmu_init_data {
struct exynos_tmu_platform_data tmu_data[];
};
+extern struct exynos_tmu_init_data const exynos3250_default_tmu_data;
+extern struct exynos_tmu_init_data const exynos4210_default_tmu_data;
+extern struct exynos_tmu_init_data const exynos4412_default_tmu_data;
+extern struct exynos_tmu_init_data const exynos5250_default_tmu_data;
+extern struct exynos_tmu_init_data const exynos5260_default_tmu_data;
+extern struct exynos_tmu_init_data const exynos5420_default_tmu_data;
+extern struct exynos_tmu_init_data const exynos5440_default_tmu_data;
+
#endif /* _EXYNOS_TMU_H */
diff --git a/drivers/thermal/samsung/exynos_tmu_data.c b/drivers/thermal/samsung/exynos_tmu_data.c
index 1724f6cdaef..b23910069f6 100644
--- a/drivers/thermal/samsung/exynos_tmu_data.c
+++ b/drivers/thermal/samsung/exynos_tmu_data.c
@@ -22,24 +22,6 @@
#include "exynos_thermal_common.h"
#include "exynos_tmu.h"
-#include "exynos_tmu_data.h"
-
-#if defined(CONFIG_CPU_EXYNOS4210)
-static const struct exynos_tmu_registers exynos4210_tmu_registers = {
- .triminfo_data = EXYNOS_TMU_REG_TRIMINFO,
- .tmu_ctrl = EXYNOS_TMU_REG_CONTROL,
- .tmu_status = EXYNOS_TMU_REG_STATUS,
- .tmu_cur_temp = EXYNOS_TMU_REG_CURRENT_TEMP,
- .threshold_temp = EXYNOS4210_TMU_REG_THRESHOLD_TEMP,
- .threshold_th0 = EXYNOS4210_TMU_REG_TRIG_LEVEL0,
- .tmu_inten = EXYNOS_TMU_REG_INTEN,
- .inten_rise0_shift = EXYNOS_TMU_INTEN_RISE0_SHIFT,
- .inten_rise1_shift = EXYNOS_TMU_INTEN_RISE1_SHIFT,
- .inten_rise2_shift = EXYNOS_TMU_INTEN_RISE2_SHIFT,
- .inten_rise3_shift = EXYNOS_TMU_INTEN_RISE3_SHIFT,
- .tmu_intstat = EXYNOS_TMU_REG_INTSTAT,
- .tmu_intclear = EXYNOS_TMU_REG_INTCLEAR,
-};
struct exynos_tmu_init_data const exynos4210_default_tmu_data = {
.tmu_data = {
@@ -75,40 +57,10 @@ struct exynos_tmu_init_data const exynos4210_default_tmu_data = {
},
.freq_tab_count = 2,
.type = SOC_ARCH_EXYNOS4210,
- .registers = &exynos4210_tmu_registers,
- .features = TMU_SUPPORT_READY_STATUS,
},
},
.tmu_count = 1,
};
-#endif
-
-#if defined(CONFIG_SOC_EXYNOS3250)
-static const struct exynos_tmu_registers exynos3250_tmu_registers = {
- .triminfo_data = EXYNOS_TMU_REG_TRIMINFO,
- .triminfo_ctrl[0] = EXYNOS_TMU_TRIMINFO_CON1,
- .triminfo_ctrl[1] = EXYNOS_TMU_TRIMINFO_CON2,
- .triminfo_ctrl_count = 2,
- .tmu_ctrl = EXYNOS_TMU_REG_CONTROL,
- .test_mux_addr_shift = EXYNOS4412_MUX_ADDR_SHIFT,
- .therm_trip_mode_shift = EXYNOS_TMU_TRIP_MODE_SHIFT,
- .therm_trip_mode_mask = EXYNOS_TMU_TRIP_MODE_MASK,
- .therm_trip_en_shift = EXYNOS_TMU_THERM_TRIP_EN_SHIFT,
- .tmu_status = EXYNOS_TMU_REG_STATUS,
- .tmu_cur_temp = EXYNOS_TMU_REG_CURRENT_TEMP,
- .threshold_th0 = EXYNOS_THD_TEMP_RISE,
- .threshold_th1 = EXYNOS_THD_TEMP_FALL,
- .tmu_inten = EXYNOS_TMU_REG_INTEN,
- .inten_rise0_shift = EXYNOS_TMU_INTEN_RISE0_SHIFT,
- .inten_rise1_shift = EXYNOS_TMU_INTEN_RISE1_SHIFT,
- .inten_rise2_shift = EXYNOS_TMU_INTEN_RISE2_SHIFT,
- .inten_fall0_shift = EXYNOS_TMU_INTEN_FALL0_SHIFT,
- .tmu_intstat = EXYNOS_TMU_REG_INTSTAT,
- .tmu_intclear = EXYNOS_TMU_REG_INTCLEAR,
- .emul_con = EXYNOS_EMUL_CON,
- .emul_temp_shift = EXYNOS_EMUL_DATA_SHIFT,
- .emul_time_shift = EXYNOS_EMUL_TIME_SHIFT,
-};
#define EXYNOS3250_TMU_DATA \
.threshold_falling = 10, \
@@ -144,54 +96,17 @@ static const struct exynos_tmu_registers exynos3250_tmu_registers = {
.freq_clip_max = 400 * 1000, \
.temp_level = 95, \
}, \
- .freq_tab_count = 2, \
- .triminfo_reload[0] = EXYNOS_TRIMINFO_RELOAD_ENABLE, \
- .triminfo_reload[1] = EXYNOS_TRIMINFO_RELOAD_ENABLE, \
- .registers = &exynos3250_tmu_registers, \
- .features = (TMU_SUPPORT_EMULATION | TMU_SUPPORT_TRIM_RELOAD | \
- TMU_SUPPORT_FALLING_TRIP | TMU_SUPPORT_READY_STATUS | \
- TMU_SUPPORT_EMUL_TIME)
-#endif
+ .freq_tab_count = 2
-#if defined(CONFIG_SOC_EXYNOS3250)
struct exynos_tmu_init_data const exynos3250_default_tmu_data = {
.tmu_data = {
{
EXYNOS3250_TMU_DATA,
.type = SOC_ARCH_EXYNOS3250,
- .test_mux = EXYNOS4412_MUX_ADDR_VALUE,
},
},
.tmu_count = 1,
};
-#endif
-
-#if defined(CONFIG_SOC_EXYNOS4412) || defined(CONFIG_SOC_EXYNOS5250)
-static const struct exynos_tmu_registers exynos4412_tmu_registers = {
- .triminfo_data = EXYNOS_TMU_REG_TRIMINFO,
- .triminfo_ctrl[0] = EXYNOS_TMU_TRIMINFO_CON2,
- .triminfo_ctrl_count = 1,
- .tmu_ctrl = EXYNOS_TMU_REG_CONTROL,
- .test_mux_addr_shift = EXYNOS4412_MUX_ADDR_SHIFT,
- .therm_trip_mode_shift = EXYNOS_TMU_TRIP_MODE_SHIFT,
- .therm_trip_mode_mask = EXYNOS_TMU_TRIP_MODE_MASK,
- .therm_trip_en_shift = EXYNOS_TMU_THERM_TRIP_EN_SHIFT,
- .tmu_status = EXYNOS_TMU_REG_STATUS,
- .tmu_cur_temp = EXYNOS_TMU_REG_CURRENT_TEMP,
- .threshold_th0 = EXYNOS_THD_TEMP_RISE,
- .threshold_th1 = EXYNOS_THD_TEMP_FALL,
- .tmu_inten = EXYNOS_TMU_REG_INTEN,
- .inten_rise0_shift = EXYNOS_TMU_INTEN_RISE0_SHIFT,
- .inten_rise1_shift = EXYNOS_TMU_INTEN_RISE1_SHIFT,
- .inten_rise2_shift = EXYNOS_TMU_INTEN_RISE2_SHIFT,
- .inten_rise3_shift = EXYNOS_TMU_INTEN_RISE3_SHIFT,
- .inten_fall0_shift = EXYNOS_TMU_INTEN_FALL0_SHIFT,
- .tmu_intstat = EXYNOS_TMU_REG_INTSTAT,
- .tmu_intclear = EXYNOS_TMU_REG_INTCLEAR,
- .emul_con = EXYNOS_EMUL_CON,
- .emul_temp_shift = EXYNOS_EMUL_DATA_SHIFT,
- .emul_time_shift = EXYNOS_EMUL_TIME_SHIFT,
-};
#define EXYNOS4412_TMU_DATA \
.threshold_falling = 10, \
@@ -227,28 +142,18 @@ static const struct exynos_tmu_registers exynos4412_tmu_registers = {
.freq_clip_max = 400 * 1000, \
.temp_level = 95, \
}, \
- .freq_tab_count = 2, \
- .triminfo_reload[0] = EXYNOS_TRIMINFO_RELOAD_ENABLE, \
- .registers = &exynos4412_tmu_registers, \
- .features = (TMU_SUPPORT_EMULATION | TMU_SUPPORT_TRIM_RELOAD | \
- TMU_SUPPORT_FALLING_TRIP | TMU_SUPPORT_READY_STATUS | \
- TMU_SUPPORT_EMUL_TIME)
-#endif
+ .freq_tab_count = 2
-#if defined(CONFIG_SOC_EXYNOS4412)
struct exynos_tmu_init_data const exynos4412_default_tmu_data = {
.tmu_data = {
{
EXYNOS4412_TMU_DATA,
.type = SOC_ARCH_EXYNOS4412,
- .test_mux = EXYNOS4412_MUX_ADDR_VALUE,
},
},
.tmu_count = 1,
};
-#endif
-#if defined(CONFIG_SOC_EXYNOS5250)
struct exynos_tmu_init_data const exynos5250_default_tmu_data = {
.tmu_data = {
{
@@ -258,31 +163,6 @@ struct exynos_tmu_init_data const exynos5250_default_tmu_data = {
},
.tmu_count = 1,
};
-#endif
-
-#if defined(CONFIG_SOC_EXYNOS5260)
-static const struct exynos_tmu_registers exynos5260_tmu_registers = {
- .triminfo_data = EXYNOS_TMU_REG_TRIMINFO,
- .tmu_ctrl = EXYNOS_TMU_REG_CONTROL,
- .therm_trip_mode_shift = EXYNOS_TMU_TRIP_MODE_SHIFT,
- .therm_trip_mode_mask = EXYNOS_TMU_TRIP_MODE_MASK,
- .therm_trip_en_shift = EXYNOS_TMU_THERM_TRIP_EN_SHIFT,
- .tmu_status = EXYNOS_TMU_REG_STATUS,
- .tmu_cur_temp = EXYNOS_TMU_REG_CURRENT_TEMP,
- .threshold_th0 = EXYNOS_THD_TEMP_RISE,
- .threshold_th1 = EXYNOS_THD_TEMP_FALL,
- .tmu_inten = EXYNOS5260_TMU_REG_INTEN,
- .inten_rise0_shift = EXYNOS_TMU_INTEN_RISE0_SHIFT,
- .inten_rise1_shift = EXYNOS_TMU_INTEN_RISE1_SHIFT,
- .inten_rise2_shift = EXYNOS_TMU_INTEN_RISE2_SHIFT,
- .inten_rise3_shift = EXYNOS_TMU_INTEN_RISE3_SHIFT,
- .inten_fall0_shift = EXYNOS_TMU_INTEN_FALL0_SHIFT,
- .tmu_intstat = EXYNOS5260_TMU_REG_INTSTAT,
- .tmu_intclear = EXYNOS5260_TMU_REG_INTCLEAR,
- .emul_con = EXYNOS5260_EMUL_CON,
- .emul_temp_shift = EXYNOS_EMUL_DATA_SHIFT,
- .emul_time_shift = EXYNOS_EMUL_TIME_SHIFT,
-};
#define __EXYNOS5260_TMU_DATA \
.threshold_falling = 10, \
@@ -319,13 +199,10 @@ static const struct exynos_tmu_registers exynos5260_tmu_registers = {
.temp_level = 103, \
}, \
.freq_tab_count = 2, \
- .registers = &exynos5260_tmu_registers, \
#define EXYNOS5260_TMU_DATA \
__EXYNOS5260_TMU_DATA \
- .type = SOC_ARCH_EXYNOS5260, \
- .features = (TMU_SUPPORT_EMULATION | TMU_SUPPORT_FALLING_TRIP | \
- TMU_SUPPORT_READY_STATUS | TMU_SUPPORT_EMUL_TIME)
+ .type = SOC_ARCH_EXYNOS5260
struct exynos_tmu_init_data const exynos5260_default_tmu_data = {
.tmu_data = {
@@ -337,82 +214,14 @@ struct exynos_tmu_init_data const exynos5260_default_tmu_data = {
},
.tmu_count = 5,
};
-#endif
-
-#if defined(CONFIG_SOC_EXYNOS5420)
-static const struct exynos_tmu_registers exynos5420_tmu_registers = {
- .triminfo_data = EXYNOS_TMU_REG_TRIMINFO,
- .tmu_ctrl = EXYNOS_TMU_REG_CONTROL,
- .therm_trip_mode_shift = EXYNOS_TMU_TRIP_MODE_SHIFT,
- .therm_trip_mode_mask = EXYNOS_TMU_TRIP_MODE_MASK,
- .therm_trip_en_shift = EXYNOS_TMU_THERM_TRIP_EN_SHIFT,
- .tmu_status = EXYNOS_TMU_REG_STATUS,
- .tmu_cur_temp = EXYNOS_TMU_REG_CURRENT_TEMP,
- .threshold_th0 = EXYNOS_THD_TEMP_RISE,
- .threshold_th1 = EXYNOS_THD_TEMP_FALL,
- .tmu_inten = EXYNOS_TMU_REG_INTEN,
- .inten_rise0_shift = EXYNOS_TMU_INTEN_RISE0_SHIFT,
- .inten_rise1_shift = EXYNOS_TMU_INTEN_RISE1_SHIFT,
- .inten_rise2_shift = EXYNOS_TMU_INTEN_RISE2_SHIFT,
- /* INTEN_RISE3 Not availble in exynos5420 */
- .inten_rise3_shift = EXYNOS_TMU_INTEN_RISE3_SHIFT,
- .inten_fall0_shift = EXYNOS_TMU_INTEN_FALL0_SHIFT,
- .tmu_intstat = EXYNOS_TMU_REG_INTSTAT,
- .tmu_intclear = EXYNOS_TMU_REG_INTCLEAR,
- .emul_con = EXYNOS_EMUL_CON,
- .emul_temp_shift = EXYNOS_EMUL_DATA_SHIFT,
- .emul_time_shift = EXYNOS_EMUL_TIME_SHIFT,
-};
-
-#define __EXYNOS5420_TMU_DATA \
- .threshold_falling = 10, \
- .trigger_levels[0] = 85, \
- .trigger_levels[1] = 103, \
- .trigger_levels[2] = 110, \
- .trigger_levels[3] = 120, \
- .trigger_enable[0] = true, \
- .trigger_enable[1] = true, \
- .trigger_enable[2] = true, \
- .trigger_enable[3] = false, \
- .trigger_type[0] = THROTTLE_ACTIVE, \
- .trigger_type[1] = THROTTLE_ACTIVE, \
- .trigger_type[2] = SW_TRIP, \
- .trigger_type[3] = HW_TRIP, \
- .max_trigger_level = 4, \
- .non_hw_trigger_levels = 3, \
- .gain = 8, \
- .reference_voltage = 16, \
- .noise_cancel_mode = 4, \
- .cal_type = TYPE_ONE_POINT_TRIMMING, \
- .efuse_value = 55, \
- .min_efuse_value = 40, \
- .max_efuse_value = 100, \
- .first_point_trim = 25, \
- .second_point_trim = 85, \
- .default_temp_offset = 50, \
- .freq_tab[0] = { \
- .freq_clip_max = 800 * 1000, \
- .temp_level = 85, \
- }, \
- .freq_tab[1] = { \
- .freq_clip_max = 200 * 1000, \
- .temp_level = 103, \
- }, \
- .freq_tab_count = 2, \
- .registers = &exynos5420_tmu_registers, \
#define EXYNOS5420_TMU_DATA \
- __EXYNOS5420_TMU_DATA \
- .type = SOC_ARCH_EXYNOS5250, \
- .features = (TMU_SUPPORT_EMULATION | TMU_SUPPORT_FALLING_TRIP | \
- TMU_SUPPORT_READY_STATUS | TMU_SUPPORT_EMUL_TIME)
+ __EXYNOS5260_TMU_DATA \
+ .type = SOC_ARCH_EXYNOS5420
#define EXYNOS5420_TMU_DATA_SHARED \
- __EXYNOS5420_TMU_DATA \
- .type = SOC_ARCH_EXYNOS5420_TRIMINFO, \
- .features = (TMU_SUPPORT_EMULATION | TMU_SUPPORT_FALLING_TRIP | \
- TMU_SUPPORT_READY_STATUS | TMU_SUPPORT_EMUL_TIME | \
- TMU_SUPPORT_ADDRESS_MULTIPLE)
+ __EXYNOS5260_TMU_DATA \
+ .type = SOC_ARCH_EXYNOS5420_TRIMINFO
struct exynos_tmu_init_data const exynos5420_default_tmu_data = {
.tmu_data = {
@@ -424,34 +233,6 @@ struct exynos_tmu_init_data const exynos5420_default_tmu_data = {
},
.tmu_count = 5,
};
-#endif
-
-#if defined(CONFIG_SOC_EXYNOS5440)
-static const struct exynos_tmu_registers exynos5440_tmu_registers = {
- .triminfo_data = EXYNOS5440_TMU_S0_7_TRIM,
- .tmu_ctrl = EXYNOS5440_TMU_S0_7_CTRL,
- .therm_trip_mode_shift = EXYNOS_TMU_TRIP_MODE_SHIFT,
- .therm_trip_mode_mask = EXYNOS_TMU_TRIP_MODE_MASK,
- .therm_trip_en_shift = EXYNOS_TMU_THERM_TRIP_EN_SHIFT,
- .tmu_status = EXYNOS5440_TMU_S0_7_STATUS,
- .tmu_cur_temp = EXYNOS5440_TMU_S0_7_TEMP,
- .threshold_th0 = EXYNOS5440_TMU_S0_7_TH0,
- .threshold_th1 = EXYNOS5440_TMU_S0_7_TH1,
- .threshold_th2 = EXYNOS5440_TMU_S0_7_TH2,
- .threshold_th3_l0_shift = EXYNOS5440_TMU_TH_RISE4_SHIFT,
- .tmu_inten = EXYNOS5440_TMU_S0_7_IRQEN,
- .inten_rise0_shift = EXYNOS5440_TMU_INTEN_RISE0_SHIFT,
- .inten_rise1_shift = EXYNOS5440_TMU_INTEN_RISE1_SHIFT,
- .inten_rise2_shift = EXYNOS5440_TMU_INTEN_RISE2_SHIFT,
- .inten_rise3_shift = EXYNOS5440_TMU_INTEN_RISE3_SHIFT,
- .inten_fall0_shift = EXYNOS5440_TMU_INTEN_FALL0_SHIFT,
- .tmu_intstat = EXYNOS5440_TMU_S0_7_IRQ,
- .tmu_intclear = EXYNOS5440_TMU_S0_7_IRQ,
- .tmu_irqstatus = EXYNOS5440_TMU_IRQ_STATUS,
- .emul_con = EXYNOS5440_TMU_S0_7_DEBUG,
- .emul_temp_shift = EXYNOS_EMUL_DATA_SHIFT,
- .tmu_pmin = EXYNOS5440_TMU_PMIN,
-};
#define EXYNOS5440_TMU_DATA \
.trigger_levels[0] = 100, \
@@ -471,10 +252,7 @@ static const struct exynos_tmu_registers exynos5440_tmu_registers = {
.first_point_trim = 25, \
.second_point_trim = 70, \
.default_temp_offset = 25, \
- .type = SOC_ARCH_EXYNOS5440, \
- .registers = &exynos5440_tmu_registers, \
- .features = (TMU_SUPPORT_EMULATION | TMU_SUPPORT_FALLING_TRIP | \
- TMU_SUPPORT_MULTI_INST | TMU_SUPPORT_ADDRESS_MULTIPLE),
+ .type = SOC_ARCH_EXYNOS5440
struct exynos_tmu_init_data const exynos5440_default_tmu_data = {
.tmu_data = {
@@ -484,4 +262,3 @@ struct exynos_tmu_init_data const exynos5440_default_tmu_data = {
},
.tmu_count = 3,
};
-#endif
diff --git a/drivers/thermal/samsung/exynos_tmu_data.h b/drivers/thermal/samsung/exynos_tmu_data.h
deleted file mode 100644
index 63de598c9c2..00000000000
--- a/drivers/thermal/samsung/exynos_tmu_data.h
+++ /dev/null
@@ -1,159 +0,0 @@
-/*
- * exynos_tmu_data.h - Samsung EXYNOS tmu data header file
- *
- * Copyright (C) 2013 Samsung Electronics
- * Amit Daniel Kachhap <amit.daniel@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
-
-#ifndef _EXYNOS_TMU_DATA_H
-#define _EXYNOS_TMU_DATA_H
-
-/* Exynos generic registers */
-#define EXYNOS_TMU_REG_TRIMINFO 0x0
-#define EXYNOS_TMU_REG_CONTROL 0x20
-#define EXYNOS_TMU_REG_STATUS 0x28
-#define EXYNOS_TMU_REG_CURRENT_TEMP 0x40
-#define EXYNOS_TMU_REG_INTEN 0x70
-#define EXYNOS_TMU_REG_INTSTAT 0x74
-#define EXYNOS_TMU_REG_INTCLEAR 0x78
-
-#define EXYNOS_TMU_TEMP_MASK 0xff
-#define EXYNOS_TMU_REF_VOLTAGE_SHIFT 24
-#define EXYNOS_TMU_REF_VOLTAGE_MASK 0x1f
-#define EXYNOS_TMU_BUF_SLOPE_SEL_MASK 0xf
-#define EXYNOS_TMU_BUF_SLOPE_SEL_SHIFT 8
-#define EXYNOS_TMU_CORE_EN_SHIFT 0
-
-/* Exynos3250 specific registers */
-#define EXYNOS_TMU_TRIMINFO_CON1 0x10
-
-/* Exynos4210 specific registers */
-#define EXYNOS4210_TMU_REG_THRESHOLD_TEMP 0x44
-#define EXYNOS4210_TMU_REG_TRIG_LEVEL0 0x50
-
-/* Exynos5250, Exynos4412, Exynos3250 specific registers */
-#define EXYNOS_TMU_TRIMINFO_CON2 0x14
-#define EXYNOS_THD_TEMP_RISE 0x50
-#define EXYNOS_THD_TEMP_FALL 0x54
-#define EXYNOS_EMUL_CON 0x80
-
-#define EXYNOS_TRIMINFO_RELOAD_ENABLE 1
-#define EXYNOS_TRIMINFO_25_SHIFT 0
-#define EXYNOS_TRIMINFO_85_SHIFT 8
-#define EXYNOS_TMU_TRIP_MODE_SHIFT 13
-#define EXYNOS_TMU_TRIP_MODE_MASK 0x7
-#define EXYNOS_TMU_THERM_TRIP_EN_SHIFT 12
-
-#define EXYNOS_TMU_INTEN_RISE0_SHIFT 0
-#define EXYNOS_TMU_INTEN_RISE1_SHIFT 4
-#define EXYNOS_TMU_INTEN_RISE2_SHIFT 8
-#define EXYNOS_TMU_INTEN_RISE3_SHIFT 12
-#define EXYNOS_TMU_INTEN_FALL0_SHIFT 16
-
-#define EXYNOS_EMUL_TIME 0x57F0
-#define EXYNOS_EMUL_TIME_MASK 0xffff
-#define EXYNOS_EMUL_TIME_SHIFT 16
-#define EXYNOS_EMUL_DATA_SHIFT 8
-#define EXYNOS_EMUL_DATA_MASK 0xFF
-#define EXYNOS_EMUL_ENABLE 0x1
-
-#define EXYNOS_MAX_TRIGGER_PER_REG 4
-
-/* Exynos5260 specific */
-#define EXYNOS5260_TMU_REG_INTEN 0xC0
-#define EXYNOS5260_TMU_REG_INTSTAT 0xC4
-#define EXYNOS5260_TMU_REG_INTCLEAR 0xC8
-#define EXYNOS5260_EMUL_CON 0x100
-
-/* Exynos4412 specific */
-#define EXYNOS4412_MUX_ADDR_VALUE 6
-#define EXYNOS4412_MUX_ADDR_SHIFT 20
-
-/*exynos5440 specific registers*/
-#define EXYNOS5440_TMU_S0_7_TRIM 0x000
-#define EXYNOS5440_TMU_S0_7_CTRL 0x020
-#define EXYNOS5440_TMU_S0_7_DEBUG 0x040
-#define EXYNOS5440_TMU_S0_7_STATUS 0x060
-#define EXYNOS5440_TMU_S0_7_TEMP 0x0f0
-#define EXYNOS5440_TMU_S0_7_TH0 0x110
-#define EXYNOS5440_TMU_S0_7_TH1 0x130
-#define EXYNOS5440_TMU_S0_7_TH2 0x150
-#define EXYNOS5440_TMU_S0_7_IRQEN 0x210
-#define EXYNOS5440_TMU_S0_7_IRQ 0x230
-/* exynos5440 common registers */
-#define EXYNOS5440_TMU_IRQ_STATUS 0x000
-#define EXYNOS5440_TMU_PMIN 0x004
-
-#define EXYNOS5440_TMU_INTEN_RISE0_SHIFT 0
-#define EXYNOS5440_TMU_INTEN_RISE1_SHIFT 1
-#define EXYNOS5440_TMU_INTEN_RISE2_SHIFT 2
-#define EXYNOS5440_TMU_INTEN_RISE3_SHIFT 3
-#define EXYNOS5440_TMU_INTEN_FALL0_SHIFT 4
-#define EXYNOS5440_TMU_TH_RISE4_SHIFT 24
-#define EXYNOS5440_EFUSE_SWAP_OFFSET 8
-
-#if defined(CONFIG_SOC_EXYNOS3250)
-extern struct exynos_tmu_init_data const exynos3250_default_tmu_data;
-#define EXYNOS3250_TMU_DRV_DATA (&exynos3250_default_tmu_data)
-#else
-#define EXYNOS3250_TMU_DRV_DATA (NULL)
-#endif
-
-#if defined(CONFIG_CPU_EXYNOS4210)
-extern struct exynos_tmu_init_data const exynos4210_default_tmu_data;
-#define EXYNOS4210_TMU_DRV_DATA (&exynos4210_default_tmu_data)
-#else
-#define EXYNOS4210_TMU_DRV_DATA (NULL)
-#endif
-
-#if defined(CONFIG_SOC_EXYNOS4412)
-extern struct exynos_tmu_init_data const exynos4412_default_tmu_data;
-#define EXYNOS4412_TMU_DRV_DATA (&exynos4412_default_tmu_data)
-#else
-#define EXYNOS4412_TMU_DRV_DATA (NULL)
-#endif
-
-#if defined(CONFIG_SOC_EXYNOS5250)
-extern struct exynos_tmu_init_data const exynos5250_default_tmu_data;
-#define EXYNOS5250_TMU_DRV_DATA (&exynos5250_default_tmu_data)
-#else
-#define EXYNOS5250_TMU_DRV_DATA (NULL)
-#endif
-
-#if defined(CONFIG_SOC_EXYNOS5260)
-extern struct exynos_tmu_init_data const exynos5260_default_tmu_data;
-#define EXYNOS5260_TMU_DRV_DATA (&exynos5260_default_tmu_data)
-#else
-#define EXYNOS5260_TMU_DRV_DATA (NULL)
-#endif
-
-#if defined(CONFIG_SOC_EXYNOS5420)
-extern struct exynos_tmu_init_data const exynos5420_default_tmu_data;
-#define EXYNOS5420_TMU_DRV_DATA (&exynos5420_default_tmu_data)
-#else
-#define EXYNOS5420_TMU_DRV_DATA (NULL)
-#endif
-
-#if defined(CONFIG_SOC_EXYNOS5440)
-extern struct exynos_tmu_init_data const exynos5440_default_tmu_data;
-#define EXYNOS5440_TMU_DRV_DATA (&exynos5440_default_tmu_data)
-#else
-#define EXYNOS5440_TMU_DRV_DATA (NULL)
-#endif
-
-#endif /*_EXYNOS_TMU_DATA_H*/
diff --git a/drivers/thermal/tegra_soctherm.c b/drivers/thermal/tegra_soctherm.c
new file mode 100644
index 00000000000..9197fc05c5c
--- /dev/null
+++ b/drivers/thermal/tegra_soctherm.c
@@ -0,0 +1,476 @@
+/*
+ * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Author:
+ * Mikko Perttunen <mperttunen@nvidia.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/thermal.h>
+
+#include <soc/tegra/fuse.h>
+
+#define SENSOR_CONFIG0 0
+#define SENSOR_CONFIG0_STOP BIT(0)
+#define SENSOR_CONFIG0_TALL_SHIFT 8
+#define SENSOR_CONFIG0_TCALC_OVER BIT(4)
+#define SENSOR_CONFIG0_OVER BIT(3)
+#define SENSOR_CONFIG0_CPTR_OVER BIT(2)
+
+#define SENSOR_CONFIG1 4
+#define SENSOR_CONFIG1_TSAMPLE_SHIFT 0
+#define SENSOR_CONFIG1_TIDDQ_EN_SHIFT 15
+#define SENSOR_CONFIG1_TEN_COUNT_SHIFT 24
+#define SENSOR_CONFIG1_TEMP_ENABLE BIT(31)
+
+#define SENSOR_CONFIG2 8
+#define SENSOR_CONFIG2_THERMA_SHIFT 16
+#define SENSOR_CONFIG2_THERMB_SHIFT 0
+
+#define SENSOR_PDIV 0x1c0
+#define SENSOR_PDIV_T124 0x8888
+#define SENSOR_HOTSPOT_OFF 0x1c4
+#define SENSOR_HOTSPOT_OFF_T124 0x00060600
+#define SENSOR_TEMP1 0x1c8
+#define SENSOR_TEMP2 0x1cc
+
+#define SENSOR_TEMP_MASK 0xffff
+#define READBACK_VALUE_MASK 0xff00
+#define READBACK_VALUE_SHIFT 8
+#define READBACK_ADD_HALF BIT(7)
+#define READBACK_NEGATE BIT(1)
+
+#define FUSE_TSENSOR8_CALIB 0x180
+#define FUSE_SPARE_REALIGNMENT_REG_0 0x1fc
+
+#define FUSE_TSENSOR_CALIB_CP_TS_BASE_MASK 0x1fff
+#define FUSE_TSENSOR_CALIB_FT_TS_BASE_MASK (0x1fff << 13)
+#define FUSE_TSENSOR_CALIB_FT_TS_BASE_SHIFT 13
+
+#define FUSE_TSENSOR8_CALIB_CP_TS_BASE_MASK 0x3ff
+#define FUSE_TSENSOR8_CALIB_FT_TS_BASE_MASK (0x7ff << 10)
+#define FUSE_TSENSOR8_CALIB_FT_TS_BASE_SHIFT 10
+
+#define FUSE_SPARE_REALIGNMENT_REG_SHIFT_CP_MASK 0x3f
+#define FUSE_SPARE_REALIGNMENT_REG_SHIFT_FT_MASK (0x1f << 21)
+#define FUSE_SPARE_REALIGNMENT_REG_SHIFT_FT_SHIFT 21
+
+#define NOMINAL_CALIB_FT_T124 105
+#define NOMINAL_CALIB_CP_T124 25
+
+struct tegra_tsensor_configuration {
+ u32 tall, tsample, tiddq_en, ten_count, pdiv, tsample_ate, pdiv_ate;
+};
+
+struct tegra_tsensor {
+ const struct tegra_tsensor_configuration *config;
+ u32 base, calib_fuse_offset;
+ /* Correction values used to modify values read from calibration fuses */
+ s32 fuse_corr_alpha, fuse_corr_beta;
+};
+
+struct tegra_thermctl_zone {
+ void __iomem *reg;
+ unsigned int shift;
+};
+
+static const struct tegra_tsensor_configuration t124_tsensor_config = {
+ .tall = 16300,
+ .tsample = 120,
+ .tiddq_en = 1,
+ .ten_count = 1,
+ .pdiv = 8,
+ .tsample_ate = 480,
+ .pdiv_ate = 8
+};
+
+static const struct tegra_tsensor t124_tsensors[] = {
+ {
+ .config = &t124_tsensor_config,
+ .base = 0xc0,
+ .calib_fuse_offset = 0x098,
+ .fuse_corr_alpha = 1135400,
+ .fuse_corr_beta = -6266900,
+ },
+ {
+ .config = &t124_tsensor_config,
+ .base = 0xe0,
+ .calib_fuse_offset = 0x084,
+ .fuse_corr_alpha = 1122220,
+ .fuse_corr_beta = -5700700,
+ },
+ {
+ .config = &t124_tsensor_config,
+ .base = 0x100,
+ .calib_fuse_offset = 0x088,
+ .fuse_corr_alpha = 1127000,
+ .fuse_corr_beta = -6768200,
+ },
+ {
+ .config = &t124_tsensor_config,
+ .base = 0x120,
+ .calib_fuse_offset = 0x12c,
+ .fuse_corr_alpha = 1110900,
+ .fuse_corr_beta = -6232000,
+ },
+ {
+ .config = &t124_tsensor_config,
+ .base = 0x140,
+ .calib_fuse_offset = 0x158,
+ .fuse_corr_alpha = 1122300,
+ .fuse_corr_beta = -5936400,
+ },
+ {
+ .config = &t124_tsensor_config,
+ .base = 0x160,
+ .calib_fuse_offset = 0x15c,
+ .fuse_corr_alpha = 1145700,
+ .fuse_corr_beta = -7124600,
+ },
+ {
+ .config = &t124_tsensor_config,
+ .base = 0x180,
+ .calib_fuse_offset = 0x154,
+ .fuse_corr_alpha = 1120100,
+ .fuse_corr_beta = -6000500,
+ },
+ {
+ .config = &t124_tsensor_config,
+ .base = 0x1a0,
+ .calib_fuse_offset = 0x160,
+ .fuse_corr_alpha = 1106500,
+ .fuse_corr_beta = -6729300,
+ },
+};
+
+struct tegra_soctherm {
+ struct reset_control *reset;
+ struct clk *clock_tsensor;
+ struct clk *clock_soctherm;
+ void __iomem *regs;
+
+ struct thermal_zone_device *thermctl_tzs[4];
+};
+
+struct tsensor_shared_calibration {
+ u32 base_cp, base_ft;
+ u32 actual_temp_cp, actual_temp_ft;
+};
+
+static int calculate_shared_calibration(struct tsensor_shared_calibration *r)
+{
+ u32 val, shifted_cp, shifted_ft;
+ int err;
+
+ err = tegra_fuse_readl(FUSE_TSENSOR8_CALIB, &val);
+ if (err)
+ return err;
+ r->base_cp = val & FUSE_TSENSOR8_CALIB_CP_TS_BASE_MASK;
+ r->base_ft = (val & FUSE_TSENSOR8_CALIB_FT_TS_BASE_MASK)
+ >> FUSE_TSENSOR8_CALIB_FT_TS_BASE_SHIFT;
+ val = ((val & FUSE_SPARE_REALIGNMENT_REG_SHIFT_FT_MASK)
+ >> FUSE_SPARE_REALIGNMENT_REG_SHIFT_FT_SHIFT);
+ shifted_ft = sign_extend32(val, 4);
+
+ err = tegra_fuse_readl(FUSE_SPARE_REALIGNMENT_REG_0, &val);
+ if (err)
+ return err;
+ shifted_cp = sign_extend32(val, 5);
+
+ r->actual_temp_cp = 2 * NOMINAL_CALIB_CP_T124 + shifted_cp;
+ r->actual_temp_ft = 2 * NOMINAL_CALIB_FT_T124 + shifted_ft;
+
+ return 0;
+}
+
+static s64 div64_s64_precise(s64 a, s64 b)
+{
+ s64 r, al;
+
+ /* Scale up for increased precision division */
+ al = a << 16;
+
+ r = div64_s64(al * 2 + 1, 2 * b);
+ return r >> 16;
+}
+
+static int
+calculate_tsensor_calibration(const struct tegra_tsensor *sensor,
+ const struct tsensor_shared_calibration *shared,
+ u32 *calib)
+{
+ u32 val;
+ s32 actual_tsensor_ft, actual_tsensor_cp, delta_sens, delta_temp,
+ mult, div;
+ s16 therma, thermb;
+ s64 tmp;
+ int err;
+
+ err = tegra_fuse_readl(sensor->calib_fuse_offset, &val);
+ if (err)
+ return err;
+
+ actual_tsensor_cp = (shared->base_cp * 64) + sign_extend32(val, 12);
+ val = (val & FUSE_TSENSOR_CALIB_FT_TS_BASE_MASK)
+ >> FUSE_TSENSOR_CALIB_FT_TS_BASE_SHIFT;
+ actual_tsensor_ft = (shared->base_ft * 32) + sign_extend32(val, 12);
+
+ delta_sens = actual_tsensor_ft - actual_tsensor_cp;
+ delta_temp = shared->actual_temp_ft - shared->actual_temp_cp;
+
+ mult = sensor->config->pdiv * sensor->config->tsample_ate;
+ div = sensor->config->tsample * sensor->config->pdiv_ate;
+
+ therma = div64_s64_precise((s64) delta_temp * (1LL << 13) * mult,
+ (s64) delta_sens * div);
+
+ tmp = (s64)actual_tsensor_ft * shared->actual_temp_cp -
+ (s64)actual_tsensor_cp * shared->actual_temp_ft;
+ thermb = div64_s64_precise(tmp, (s64)delta_sens);
+
+ therma = div64_s64_precise((s64)therma * sensor->fuse_corr_alpha,
+ (s64)1000000LL);
+ thermb = div64_s64_precise((s64)thermb * sensor->fuse_corr_alpha +
+ sensor->fuse_corr_beta, (s64)1000000LL);
+
+ *calib = ((u16)therma << SENSOR_CONFIG2_THERMA_SHIFT) |
+ ((u16)thermb << SENSOR_CONFIG2_THERMB_SHIFT);
+
+ return 0;
+}
+
+static int enable_tsensor(struct tegra_soctherm *tegra,
+ const struct tegra_tsensor *sensor,
+ const struct tsensor_shared_calibration *shared)
+{
+ void __iomem *base = tegra->regs + sensor->base;
+ unsigned int val;
+ u32 calib;
+ int err;
+
+ err = calculate_tsensor_calibration(sensor, shared, &calib);
+ if (err)
+ return err;
+
+ val = sensor->config->tall << SENSOR_CONFIG0_TALL_SHIFT;
+ writel(val, base + SENSOR_CONFIG0);
+
+ val = (sensor->config->tsample - 1) << SENSOR_CONFIG1_TSAMPLE_SHIFT;
+ val |= sensor->config->tiddq_en << SENSOR_CONFIG1_TIDDQ_EN_SHIFT;
+ val |= sensor->config->ten_count << SENSOR_CONFIG1_TEN_COUNT_SHIFT;
+ val |= SENSOR_CONFIG1_TEMP_ENABLE;
+ writel(val, base + SENSOR_CONFIG1);
+
+ writel(calib, base + SENSOR_CONFIG2);
+
+ return 0;
+}
+
+/*
+ * Translate from soctherm readback format to millicelsius.
+ * The soctherm readback format in bits is as follows:
+ * TTTTTTTT H______N
+ * where T's contain the temperature in Celsius,
+ * H denotes an addition of 0.5 Celsius and N denotes negation
+ * of the final value.
+ */
+static long translate_temp(u16 val)
+{
+ long t;
+
+ t = ((val & READBACK_VALUE_MASK) >> READBACK_VALUE_SHIFT) * 1000;
+ if (val & READBACK_ADD_HALF)
+ t += 500;
+ if (val & READBACK_NEGATE)
+ t *= -1;
+
+ return t;
+}
+
+static int tegra_thermctl_get_temp(void *data, long *out_temp)
+{
+ struct tegra_thermctl_zone *zone = data;
+ u32 val;
+
+ val = (readl(zone->reg) >> zone->shift) & SENSOR_TEMP_MASK;
+ *out_temp = translate_temp(val);
+
+ return 0;
+}
+
+static const struct thermal_zone_of_device_ops tegra_of_thermal_ops = {
+ .get_temp = tegra_thermctl_get_temp,
+};
+
+static const struct of_device_id tegra_soctherm_of_match[] = {
+ { .compatible = "nvidia,tegra124-soctherm" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, tegra_soctherm_of_match);
+
+struct thermctl_zone_desc {
+ unsigned int offset;
+ unsigned int shift;
+};
+
+static const struct thermctl_zone_desc t124_thermctl_temp_zones[] = {
+ { SENSOR_TEMP1, 16 },
+ { SENSOR_TEMP2, 16 },
+ { SENSOR_TEMP1, 0 },
+ { SENSOR_TEMP2, 0 }
+};
+
+static int tegra_soctherm_probe(struct platform_device *pdev)
+{
+ struct tegra_soctherm *tegra;
+ struct thermal_zone_device *tz;
+ struct tsensor_shared_calibration shared_calib;
+ struct resource *res;
+ unsigned int i;
+ int err;
+
+ const struct tegra_tsensor *tsensors = t124_tsensors;
+
+ tegra = devm_kzalloc(&pdev->dev, sizeof(*tegra), GFP_KERNEL);
+ if (!tegra)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ tegra->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(tegra->regs))
+ return PTR_ERR(tegra->regs);
+
+ tegra->reset = devm_reset_control_get(&pdev->dev, "soctherm");
+ if (IS_ERR(tegra->reset)) {
+ dev_err(&pdev->dev, "can't get soctherm reset\n");
+ return PTR_ERR(tegra->reset);
+ }
+
+ tegra->clock_tsensor = devm_clk_get(&pdev->dev, "tsensor");
+ if (IS_ERR(tegra->clock_tsensor)) {
+ dev_err(&pdev->dev, "can't get tsensor clock\n");
+ return PTR_ERR(tegra->clock_tsensor);
+ }
+
+ tegra->clock_soctherm = devm_clk_get(&pdev->dev, "soctherm");
+ if (IS_ERR(tegra->clock_soctherm)) {
+ dev_err(&pdev->dev, "can't get soctherm clock\n");
+ return PTR_ERR(tegra->clock_soctherm);
+ }
+
+ reset_control_assert(tegra->reset);
+
+ err = clk_prepare_enable(tegra->clock_soctherm);
+ if (err)
+ return err;
+
+ err = clk_prepare_enable(tegra->clock_tsensor);
+ if (err) {
+ clk_disable_unprepare(tegra->clock_soctherm);
+ return err;
+ }
+
+ reset_control_deassert(tegra->reset);
+
+ /* Initialize raw sensors */
+
+ err = calculate_shared_calibration(&shared_calib);
+ if (err)
+ goto disable_clocks;
+
+ for (i = 0; i < ARRAY_SIZE(t124_tsensors); ++i) {
+ err = enable_tsensor(tegra, tsensors + i, &shared_calib);
+ if (err)
+ goto disable_clocks;
+ }
+
+ writel(SENSOR_PDIV_T124, tegra->regs + SENSOR_PDIV);
+ writel(SENSOR_HOTSPOT_OFF_T124, tegra->regs + SENSOR_HOTSPOT_OFF);
+
+ /* Initialize thermctl sensors */
+
+ for (i = 0; i < ARRAY_SIZE(tegra->thermctl_tzs); ++i) {
+ struct tegra_thermctl_zone *zone =
+ devm_kzalloc(&pdev->dev, sizeof(*zone), GFP_KERNEL);
+ if (!zone) {
+ err = -ENOMEM;
+ goto unregister_tzs;
+ }
+
+ zone->reg = tegra->regs + t124_thermctl_temp_zones[i].offset;
+ zone->shift = t124_thermctl_temp_zones[i].shift;
+
+ tz = thermal_zone_of_sensor_register(&pdev->dev, i, zone,
+ &tegra_of_thermal_ops);
+ if (IS_ERR(tz)) {
+ err = PTR_ERR(tz);
+ dev_err(&pdev->dev, "failed to register sensor: %d\n",
+ err);
+ goto unregister_tzs;
+ }
+
+ tegra->thermctl_tzs[i] = tz;
+ }
+
+ return 0;
+
+unregister_tzs:
+ while (i--)
+ thermal_zone_of_sensor_unregister(&pdev->dev,
+ tegra->thermctl_tzs[i]);
+
+disable_clocks:
+ clk_disable_unprepare(tegra->clock_tsensor);
+ clk_disable_unprepare(tegra->clock_soctherm);
+
+ return err;
+}
+
+static int tegra_soctherm_remove(struct platform_device *pdev)
+{
+ struct tegra_soctherm *tegra = platform_get_drvdata(pdev);
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(tegra->thermctl_tzs); ++i) {
+ thermal_zone_of_sensor_unregister(&pdev->dev,
+ tegra->thermctl_tzs[i]);
+ }
+
+ clk_disable_unprepare(tegra->clock_tsensor);
+ clk_disable_unprepare(tegra->clock_soctherm);
+
+ return 0;
+}
+
+static struct platform_driver tegra_soctherm_driver = {
+ .probe = tegra_soctherm_probe,
+ .remove = tegra_soctherm_remove,
+ .driver = {
+ .name = "tegra-soctherm",
+ .of_match_table = tegra_soctherm_of_match,
+ },
+};
+module_platform_driver(tegra_soctherm_driver);
+
+MODULE_AUTHOR("Mikko Perttunen <mperttunen@nvidia.com>");
+MODULE_DESCRIPTION("NVIDIA Tegra SOCTHERM thermal management driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 43b90709585..84fdf0792e2 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -368,7 +368,7 @@ static void handle_critical_trips(struct thermal_zone_device *tz,
tz->ops->get_trip_temp(tz, trip, &trip_temp);
/* If we have not crossed the trip_temp, we do not care. */
- if (tz->temperature < trip_temp)
+ if (trip_temp <= 0 || tz->temperature < trip_temp)
return;
trace_thermal_zone_trip(tz, trip, trip_type);
@@ -757,6 +757,7 @@ policy_store(struct device *dev, struct device_attribute *attr,
snprintf(name, sizeof(name), "%s", buf);
mutex_lock(&thermal_governor_lock);
+ mutex_lock(&tz->lock);
gov = __find_governor(strim(name));
if (!gov)
@@ -766,6 +767,7 @@ policy_store(struct device *dev, struct device_attribute *attr,
ret = count;
exit:
+ mutex_unlock(&tz->lock);
mutex_unlock(&thermal_governor_lock);
return ret;
}
@@ -1835,10 +1837,10 @@ static int __init thermal_init(void)
exit_netlink:
genetlink_exit();
-unregister_governors:
- thermal_unregister_governors();
unregister_class:
class_unregister(&thermal_class);
+unregister_governors:
+ thermal_unregister_governors();
error:
idr_destroy(&thermal_tz_idr);
idr_destroy(&thermal_cdev_idr);
diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h
index d15d243de27..9083e752062 100644
--- a/drivers/thermal/thermal_core.h
+++ b/drivers/thermal/thermal_core.h
@@ -89,9 +89,27 @@ static inline void thermal_gov_user_space_unregister(void) {}
#ifdef CONFIG_THERMAL_OF
int of_parse_thermal_zones(void);
void of_thermal_destroy_zones(void);
+int of_thermal_get_ntrips(struct thermal_zone_device *);
+bool of_thermal_is_trip_valid(struct thermal_zone_device *, int);
+const struct thermal_trip * const
+of_thermal_get_trip_points(struct thermal_zone_device *);
#else
static inline int of_parse_thermal_zones(void) { return 0; }
static inline void of_thermal_destroy_zones(void) { }
+static inline int of_thermal_get_ntrips(struct thermal_zone_device *tz)
+{
+ return 0;
+}
+static inline bool of_thermal_is_trip_valid(struct thermal_zone_device *tz,
+ int trip)
+{
+ return 0;
+}
+static inline const struct thermal_trip * const
+of_thermal_get_trip_points(struct thermal_zone_device *tz)
+{
+ return NULL;
+}
#endif
#endif /* __THERMAL_CORE_H__ */
diff --git a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
index 9eec26dc044..5fd03865e39 100644
--- a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
+++ b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
@@ -286,6 +286,11 @@ static int ti_thermal_get_crit_temp(struct thermal_zone_device *thermal,
return ti_thermal_get_trip_temp(thermal, OMAP_TRIP_NUMBER - 1, temp);
}
+static const struct thermal_zone_of_device_ops ti_of_thermal_ops = {
+ .get_temp = __ti_thermal_get_temp,
+ .get_trend = __ti_thermal_get_trend,
+};
+
static struct thermal_zone_device_ops ti_thermal_ops = {
.get_temp = ti_thermal_get_temp,
.get_trend = ti_thermal_get_trend,
@@ -333,8 +338,7 @@ int ti_thermal_expose_sensor(struct ti_bandgap *bgp, int id,
/* in case this is specified by DT */
data->ti_thermal = thermal_zone_of_sensor_register(bgp->dev, id,
- data, __ti_thermal_get_temp,
- __ti_thermal_get_trend);
+ data, &ti_of_thermal_ops);
if (IS_ERR(data->ti_thermal)) {
/* Create thermal zone */
data->ti_thermal = thermal_zone_device_register(domain,
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index b4b58ae24c6..555de07db59 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -530,7 +530,7 @@ static int dw8250_resume(struct device *dev)
}
#endif /* CONFIG_PM_SLEEP */
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
static int dw8250_runtime_suspend(struct device *dev)
{
struct dw8250_data *data = dev_get_drvdata(dev);
diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c
index 6f93123a428..7a11fac775c 100644
--- a/drivers/tty/serial/8250/8250_mtk.c
+++ b/drivers/tty/serial/8250/8250_mtk.c
@@ -244,7 +244,7 @@ static int mtk8250_resume(struct device *dev)
}
#endif /* CONFIG_PM_SLEEP */
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
static int mtk8250_runtime_suspend(struct device *dev)
{
struct mtk8250_data *data = dev_get_drvdata(dev);
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
index 336602eb453..96b69bfd773 100644
--- a/drivers/tty/serial/8250/8250_omap.c
+++ b/drivers/tty/serial/8250/8250_omap.c
@@ -561,7 +561,7 @@ static int omap_8250_startup(struct uart_port *port)
if (ret)
goto err;
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
up->capabilities |= UART_CAP_RPM;
#endif
@@ -997,12 +997,12 @@ static int omap8250_probe(struct platform_device *pdev)
up.port.fifosize = 64;
up.tx_loadsz = 64;
up.capabilities = UART_CAP_FIFO;
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
/*
- * PM_RUNTIME is mostly transparent. However to do it right we need to a
+ * Runtime PM is mostly transparent. However to do it right we need to a
* TX empty interrupt before we can put the device to auto idle. So if
- * PM_RUNTIME is not enabled we don't add that flag and can spare that
- * one extra interrupt in the TX path.
+ * PM is not enabled we don't add that flag and can spare that one extra
+ * interrupt in the TX path.
*/
up.capabilities |= UART_CAP_RPM;
#endif
@@ -1105,7 +1105,7 @@ static int omap8250_remove(struct platform_device *pdev)
return 0;
}
-#if defined(CONFIG_PM_SLEEP) || defined(CONFIG_PM_RUNTIME)
+#ifdef CONFIG_PM
static inline void omap8250_enable_wakeirq(struct omap8250_priv *priv,
bool enable)
@@ -1179,7 +1179,7 @@ static int omap8250_resume(struct device *dev)
#define omap8250_complete NULL
#endif
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
static int omap8250_lost_context(struct uart_8250_port *up)
{
u32 val;
diff --git a/drivers/tty/serial/mfd.c b/drivers/tty/serial/mfd.c
index e1f4fdad02c..8fe4501d756 100644
--- a/drivers/tty/serial/mfd.c
+++ b/drivers/tty/serial/mfd.c
@@ -1252,12 +1252,7 @@ static int serial_hsu_resume(struct pci_dev *pdev)
}
return 0;
}
-#else
-#define serial_hsu_suspend NULL
-#define serial_hsu_resume NULL
-#endif
-#ifdef CONFIG_PM_RUNTIME
static int serial_hsu_runtime_idle(struct device *dev)
{
pm_schedule_suspend(dev, 500);
@@ -1274,6 +1269,8 @@ static int serial_hsu_runtime_resume(struct device *dev)
return 0;
}
#else
+#define serial_hsu_suspend NULL
+#define serial_hsu_resume NULL
#define serial_hsu_runtime_idle NULL
#define serial_hsu_runtime_suspend NULL
#define serial_hsu_runtime_resume NULL
diff --git a/drivers/tty/serial/msm_serial_hs.c b/drivers/tty/serial/msm_serial_hs.c
index 8abe8ea6565..62da8534ba7 100644
--- a/drivers/tty/serial/msm_serial_hs.c
+++ b/drivers/tty/serial/msm_serial_hs.c
@@ -1792,7 +1792,7 @@ static void __exit msm_serial_hs_exit(void)
}
module_exit(msm_serial_hs_exit);
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
static int msm_hs_runtime_idle(struct device *dev)
{
/*
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
index 435478a245d..2e1073da671 100644
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
@@ -1776,7 +1776,7 @@ static void serial_omap_mdr1_errataset(struct uart_omap_port *up, u8 mdr1)
}
}
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
static void serial_omap_restore_context(struct uart_omap_port *up)
{
if (up->errata & UART_ERRATA_i202_MDR1_ACCESS)
diff --git a/drivers/usb/core/Kconfig b/drivers/usb/core/Kconfig
index 9cfda6a7219..cc0ced08bae 100644
--- a/drivers/usb/core/Kconfig
+++ b/drivers/usb/core/Kconfig
@@ -43,7 +43,7 @@ config USB_DYNAMIC_MINORS
config USB_OTG
bool "OTG support"
- depends on PM_RUNTIME
+ depends on PM
default n
help
The most notable feature of USB OTG is support for a
diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c
index e752c3098f3..395649f357a 100644
--- a/drivers/usb/host/isp1760-hcd.c
+++ b/drivers/usb/host/isp1760-hcd.c
@@ -1739,7 +1739,7 @@ static int isp1760_hub_status_data(struct usb_hcd *hcd, char *buf)
int retval = 1;
unsigned long flags;
- /* if !PM_RUNTIME, root hub timers won't get shut down ... */
+ /* if !PM, root hub timers won't get shut down ... */
if (!HC_IS_RUNNING(hcd->state))
return 0;
diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c
index 75811dd5a9d..036924e640f 100644
--- a/drivers/usb/host/oxu210hp-hcd.c
+++ b/drivers/usb/host/oxu210hp-hcd.c
@@ -3087,7 +3087,7 @@ static int oxu_hub_status_data(struct usb_hcd *hcd, char *buf)
int ports, i, retval = 1;
unsigned long flags;
- /* if !PM_RUNTIME, root hub timers won't get shut down ... */
+ /* if !PM, root hub timers won't get shut down ... */
if (!HC_IS_RUNNING(hcd->state))
return 0;
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig
index 0cd1f44f0ee..c6d0c8e745b 100644
--- a/drivers/usb/phy/Kconfig
+++ b/drivers/usb/phy/Kconfig
@@ -20,7 +20,7 @@ config AB8500_USB
config FSL_USB2_OTG
bool "Freescale USB OTG Transceiver Driver"
- depends on USB_EHCI_FSL && USB_FSL_USB2 && USB_OTG_FSM && PM_RUNTIME
+ depends on USB_EHCI_FSL && USB_FSL_USB2 && USB_OTG_FSM && PM
select USB_OTG
select USB_PHY
help
@@ -153,7 +153,7 @@ config USB_MSM_OTG
config USB_MV_OTG
tristate "Marvell USB OTG support"
- depends on USB_EHCI_MV && USB_MV_UDC && PM_RUNTIME
+ depends on USB_EHCI_MV && USB_MV_UDC && PM
select USB_OTG
select USB_PHY
help
diff --git a/drivers/usb/storage/Kconfig b/drivers/usb/storage/Kconfig
index 715f299af6e..ec84758f0e2 100644
--- a/drivers/usb/storage/Kconfig
+++ b/drivers/usb/storage/Kconfig
@@ -41,7 +41,7 @@ config USB_STORAGE_REALTEK
config REALTEK_AUTOPM
bool "Realtek Card Reader autosuspend support"
- depends on USB_STORAGE_REALTEK && PM_RUNTIME
+ depends on USB_STORAGE_REALTEK && PM
default y
config USB_STORAGE_DATAFAB
diff --git a/drivers/vfio/Kconfig b/drivers/vfio/Kconfig
index d8c57636b9c..14e27ab3245 100644
--- a/drivers/vfio/Kconfig
+++ b/drivers/vfio/Kconfig
@@ -16,7 +16,7 @@ config VFIO_SPAPR_EEH
menuconfig VFIO
tristate "VFIO Non-Privileged userspace driver framework"
depends on IOMMU_API
- select VFIO_IOMMU_TYPE1 if X86
+ select VFIO_IOMMU_TYPE1 if (X86 || S390 || ARM_SMMU)
select VFIO_IOMMU_SPAPR_TCE if (PPC_POWERNV || PPC_PSERIES)
select VFIO_SPAPR_EEH if (PPC_POWERNV || PPC_PSERIES)
select ANON_INODES
diff --git a/drivers/vfio/pci/Kconfig b/drivers/vfio/pci/Kconfig
index c41b01e2b69..c6bb5da2d2a 100644
--- a/drivers/vfio/pci/Kconfig
+++ b/drivers/vfio/pci/Kconfig
@@ -16,3 +16,11 @@ config VFIO_PCI_VGA
BIOS and generic video drivers.
If you don't know what to do here, say N.
+
+config VFIO_PCI_MMAP
+ depends on VFIO_PCI
+ def_bool y if !S390
+
+config VFIO_PCI_INTX
+ depends on VFIO_PCI
+ def_bool y if !S390
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index 9558da3f06a..255201f2212 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -215,7 +215,7 @@ static int vfio_pci_get_irq_count(struct vfio_pci_device *vdev, int irq_type)
if (irq_type == VFIO_PCI_INTX_IRQ_INDEX) {
u8 pin;
pci_read_config_byte(vdev->pdev, PCI_INTERRUPT_PIN, &pin);
- if (pin)
+ if (IS_ENABLED(CONFIG_VFIO_PCI_INTX) && pin)
return 1;
} else if (irq_type == VFIO_PCI_MSI_IRQ_INDEX) {
@@ -406,7 +406,8 @@ static long vfio_pci_ioctl(void *device_data,
info.flags = VFIO_REGION_INFO_FLAG_READ |
VFIO_REGION_INFO_FLAG_WRITE;
- if (pci_resource_flags(pdev, info.index) &
+ if (IS_ENABLED(CONFIG_VFIO_PCI_MMAP) &&
+ pci_resource_flags(pdev, info.index) &
IORESOURCE_MEM && info.size >= PAGE_SIZE)
info.flags |= VFIO_REGION_INFO_FLAG_MMAP;
break;
diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
index 1de3f94aa7d..ff75ca31a19 100644
--- a/drivers/vfio/pci/vfio_pci_config.c
+++ b/drivers/vfio/pci/vfio_pci_config.c
@@ -609,6 +609,10 @@ static int __init init_pci_cap_basic_perm(struct perm_bits *perm)
/* Sometimes used by sw, just virtualize */
p_setb(perm, PCI_INTERRUPT_LINE, (u8)ALL_VIRT, (u8)ALL_WRITE);
+
+ /* Virtualize interrupt pin to allow hiding INTx */
+ p_setb(perm, PCI_INTERRUPT_PIN, (u8)ALL_VIRT, (u8)NO_WRITE);
+
return 0;
}
@@ -1445,6 +1449,9 @@ int vfio_config_init(struct vfio_pci_device *vdev)
*(__le16 *)&vconfig[PCI_DEVICE_ID] = cpu_to_le16(pdev->device);
}
+ if (!IS_ENABLED(CONFIG_VFIO_PCI_INTX))
+ vconfig[PCI_INTERRUPT_PIN] = 0;
+
ret = vfio_cap_init(vdev);
if (ret)
goto out;
diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
index 5174ebac288..3bb02c60a2f 100644
--- a/drivers/vhost/vringh.c
+++ b/drivers/vhost/vringh.c
@@ -11,6 +11,7 @@
#include <linux/uaccess.h>
#include <linux/slab.h>
#include <linux/export.h>
+#include <uapi/linux/virtio_config.h>
static __printf(1,2) __cold void vringh_bad(const char *fmt, ...)
{
@@ -28,13 +29,14 @@ static __printf(1,2) __cold void vringh_bad(const char *fmt, ...)
/* Returns vring->num if empty, -ve on error. */
static inline int __vringh_get_head(const struct vringh *vrh,
- int (*getu16)(u16 *val, const u16 *p),
+ int (*getu16)(const struct vringh *vrh,
+ u16 *val, const __virtio16 *p),
u16 *last_avail_idx)
{
u16 avail_idx, i, head;
int err;
- err = getu16(&avail_idx, &vrh->vring.avail->idx);
+ err = getu16(vrh, &avail_idx, &vrh->vring.avail->idx);
if (err) {
vringh_bad("Failed to access avail idx at %p",
&vrh->vring.avail->idx);
@@ -49,7 +51,7 @@ static inline int __vringh_get_head(const struct vringh *vrh,
i = *last_avail_idx & (vrh->vring.num - 1);
- err = getu16(&head, &vrh->vring.avail->ring[i]);
+ err = getu16(vrh, &head, &vrh->vring.avail->ring[i]);
if (err) {
vringh_bad("Failed to read head: idx %d address %p",
*last_avail_idx, &vrh->vring.avail->ring[i]);
@@ -144,28 +146,32 @@ static inline bool no_range_check(struct vringh *vrh, u64 addr, size_t *len,
}
/* No reason for this code to be inline. */
-static int move_to_indirect(int *up_next, u16 *i, void *addr,
+static int move_to_indirect(const struct vringh *vrh,
+ int *up_next, u16 *i, void *addr,
const struct vring_desc *desc,
struct vring_desc **descs, int *desc_max)
{
+ u32 len;
+
/* Indirect tables can't have indirect. */
if (*up_next != -1) {
vringh_bad("Multilevel indirect %u->%u", *up_next, *i);
return -EINVAL;
}
- if (unlikely(desc->len % sizeof(struct vring_desc))) {
+ len = vringh32_to_cpu(vrh, desc->len);
+ if (unlikely(len % sizeof(struct vring_desc))) {
vringh_bad("Strange indirect len %u", desc->len);
return -EINVAL;
}
/* We will check this when we follow it! */
- if (desc->flags & VRING_DESC_F_NEXT)
- *up_next = desc->next;
+ if (desc->flags & cpu_to_vringh16(vrh, VRING_DESC_F_NEXT))
+ *up_next = vringh16_to_cpu(vrh, desc->next);
else
*up_next = -2;
*descs = addr;
- *desc_max = desc->len / sizeof(struct vring_desc);
+ *desc_max = len / sizeof(struct vring_desc);
/* Now, start at the first indirect. */
*i = 0;
@@ -287,22 +293,25 @@ __vringh_iov(struct vringh *vrh, u16 i,
if (unlikely(err))
goto fail;
- if (unlikely(desc.flags & VRING_DESC_F_INDIRECT)) {
+ if (unlikely(desc.flags &
+ cpu_to_vringh16(vrh, VRING_DESC_F_INDIRECT))) {
+ u64 a = vringh64_to_cpu(vrh, desc.addr);
+
/* Make sure it's OK, and get offset. */
- len = desc.len;
- if (!rcheck(vrh, desc.addr, &len, &range, getrange)) {
+ len = vringh32_to_cpu(vrh, desc.len);
+ if (!rcheck(vrh, a, &len, &range, getrange)) {
err = -EINVAL;
goto fail;
}
- if (unlikely(len != desc.len)) {
+ if (unlikely(len != vringh32_to_cpu(vrh, desc.len))) {
slow = true;
/* We need to save this range to use offset */
slowrange = range;
}
- addr = (void *)(long)(desc.addr + range.offset);
- err = move_to_indirect(&up_next, &i, addr, &desc,
+ addr = (void *)(long)(a + range.offset);
+ err = move_to_indirect(vrh, &up_next, &i, addr, &desc,
&descs, &desc_max);
if (err)
goto fail;
@@ -315,7 +324,7 @@ __vringh_iov(struct vringh *vrh, u16 i,
goto fail;
}
- if (desc.flags & VRING_DESC_F_WRITE)
+ if (desc.flags & cpu_to_vringh16(vrh, VRING_DESC_F_WRITE))
iov = wiov;
else {
iov = riov;
@@ -336,12 +345,14 @@ __vringh_iov(struct vringh *vrh, u16 i,
again:
/* Make sure it's OK, and get offset. */
- len = desc.len;
- if (!rcheck(vrh, desc.addr, &len, &range, getrange)) {
+ len = vringh32_to_cpu(vrh, desc.len);
+ if (!rcheck(vrh, vringh64_to_cpu(vrh, desc.addr), &len, &range,
+ getrange)) {
err = -EINVAL;
goto fail;
}
- addr = (void *)(unsigned long)(desc.addr + range.offset);
+ addr = (void *)(unsigned long)(vringh64_to_cpu(vrh, desc.addr) +
+ range.offset);
if (unlikely(iov->used == (iov->max_num & ~VRINGH_IOV_ALLOCATED))) {
err = resize_iovec(iov, gfp);
@@ -353,14 +364,16 @@ __vringh_iov(struct vringh *vrh, u16 i,
iov->iov[iov->used].iov_len = len;
iov->used++;
- if (unlikely(len != desc.len)) {
- desc.len -= len;
- desc.addr += len;
+ if (unlikely(len != vringh32_to_cpu(vrh, desc.len))) {
+ desc.len = cpu_to_vringh32(vrh,
+ vringh32_to_cpu(vrh, desc.len) - len);
+ desc.addr = cpu_to_vringh64(vrh,
+ vringh64_to_cpu(vrh, desc.addr) + len);
goto again;
}
- if (desc.flags & VRING_DESC_F_NEXT) {
- i = desc.next;
+ if (desc.flags & cpu_to_vringh16(vrh, VRING_DESC_F_NEXT)) {
+ i = vringh16_to_cpu(vrh, desc.next);
} else {
/* Just in case we need to finish traversing above. */
if (unlikely(up_next > 0)) {
@@ -387,7 +400,8 @@ fail:
static inline int __vringh_complete(struct vringh *vrh,
const struct vring_used_elem *used,
unsigned int num_used,
- int (*putu16)(u16 *p, u16 val),
+ int (*putu16)(const struct vringh *vrh,
+ __virtio16 *p, u16 val),
int (*putused)(struct vring_used_elem *dst,
const struct vring_used_elem
*src, unsigned num))
@@ -420,7 +434,7 @@ static inline int __vringh_complete(struct vringh *vrh,
/* Make sure buffer is written before we update index. */
virtio_wmb(vrh->weak_barriers);
- err = putu16(&vrh->vring.used->idx, used_idx + num_used);
+ err = putu16(vrh, &vrh->vring.used->idx, used_idx + num_used);
if (err) {
vringh_bad("Failed to update used index at %p",
&vrh->vring.used->idx);
@@ -433,7 +447,9 @@ static inline int __vringh_complete(struct vringh *vrh,
static inline int __vringh_need_notify(struct vringh *vrh,
- int (*getu16)(u16 *val, const u16 *p))
+ int (*getu16)(const struct vringh *vrh,
+ u16 *val,
+ const __virtio16 *p))
{
bool notify;
u16 used_event;
@@ -447,7 +463,7 @@ static inline int __vringh_need_notify(struct vringh *vrh,
/* Old-style, without event indices. */
if (!vrh->event_indices) {
u16 flags;
- err = getu16(&flags, &vrh->vring.avail->flags);
+ err = getu16(vrh, &flags, &vrh->vring.avail->flags);
if (err) {
vringh_bad("Failed to get flags at %p",
&vrh->vring.avail->flags);
@@ -457,7 +473,7 @@ static inline int __vringh_need_notify(struct vringh *vrh,
}
/* Modern: we know when other side wants to know. */
- err = getu16(&used_event, &vring_used_event(&vrh->vring));
+ err = getu16(vrh, &used_event, &vring_used_event(&vrh->vring));
if (err) {
vringh_bad("Failed to get used event idx at %p",
&vring_used_event(&vrh->vring));
@@ -478,20 +494,22 @@ static inline int __vringh_need_notify(struct vringh *vrh,
}
static inline bool __vringh_notify_enable(struct vringh *vrh,
- int (*getu16)(u16 *val, const u16 *p),
- int (*putu16)(u16 *p, u16 val))
+ int (*getu16)(const struct vringh *vrh,
+ u16 *val, const __virtio16 *p),
+ int (*putu16)(const struct vringh *vrh,
+ __virtio16 *p, u16 val))
{
u16 avail;
if (!vrh->event_indices) {
/* Old-school; update flags. */
- if (putu16(&vrh->vring.used->flags, 0) != 0) {
+ if (putu16(vrh, &vrh->vring.used->flags, 0) != 0) {
vringh_bad("Clearing used flags %p",
&vrh->vring.used->flags);
return true;
}
} else {
- if (putu16(&vring_avail_event(&vrh->vring),
+ if (putu16(vrh, &vring_avail_event(&vrh->vring),
vrh->last_avail_idx) != 0) {
vringh_bad("Updating avail event index %p",
&vring_avail_event(&vrh->vring));
@@ -503,7 +521,7 @@ static inline bool __vringh_notify_enable(struct vringh *vrh,
* sure it's written, then check again. */
virtio_mb(vrh->weak_barriers);
- if (getu16(&avail, &vrh->vring.avail->idx) != 0) {
+ if (getu16(vrh, &avail, &vrh->vring.avail->idx) != 0) {
vringh_bad("Failed to check avail idx at %p",
&vrh->vring.avail->idx);
return true;
@@ -516,11 +534,13 @@ static inline bool __vringh_notify_enable(struct vringh *vrh,
}
static inline void __vringh_notify_disable(struct vringh *vrh,
- int (*putu16)(u16 *p, u16 val))
+ int (*putu16)(const struct vringh *vrh,
+ __virtio16 *p, u16 val))
{
if (!vrh->event_indices) {
/* Old-school; update flags. */
- if (putu16(&vrh->vring.used->flags, VRING_USED_F_NO_NOTIFY)) {
+ if (putu16(vrh, &vrh->vring.used->flags,
+ VRING_USED_F_NO_NOTIFY)) {
vringh_bad("Setting used flags %p",
&vrh->vring.used->flags);
}
@@ -528,14 +548,18 @@ static inline void __vringh_notify_disable(struct vringh *vrh,
}
/* Userspace access helpers: in this case, addresses are really userspace. */
-static inline int getu16_user(u16 *val, const u16 *p)
+static inline int getu16_user(const struct vringh *vrh, u16 *val, const __virtio16 *p)
{
- return get_user(*val, (__force u16 __user *)p);
+ __virtio16 v = 0;
+ int rc = get_user(v, (__force __virtio16 __user *)p);
+ *val = vringh16_to_cpu(vrh, v);
+ return rc;
}
-static inline int putu16_user(u16 *p, u16 val)
+static inline int putu16_user(const struct vringh *vrh, __virtio16 *p, u16 val)
{
- return put_user(val, (__force u16 __user *)p);
+ __virtio16 v = cpu_to_vringh16(vrh, val);
+ return put_user(v, (__force __virtio16 __user *)p);
}
static inline int copydesc_user(void *dst, const void *src, size_t len)
@@ -577,7 +601,7 @@ static inline int xfer_to_user(void *dst, void *src, size_t len)
* Returns an error if num is invalid: you should check pointers
* yourself!
*/
-int vringh_init_user(struct vringh *vrh, u32 features,
+int vringh_init_user(struct vringh *vrh, u64 features,
unsigned int num, bool weak_barriers,
struct vring_desc __user *desc,
struct vring_avail __user *avail,
@@ -589,6 +613,7 @@ int vringh_init_user(struct vringh *vrh, u32 features,
return -EINVAL;
}
+ vrh->little_endian = (features & (1ULL << VIRTIO_F_VERSION_1));
vrh->event_indices = (features & (1 << VIRTIO_RING_F_EVENT_IDX));
vrh->weak_barriers = weak_barriers;
vrh->completed = 0;
@@ -729,8 +754,8 @@ int vringh_complete_user(struct vringh *vrh, u16 head, u32 len)
{
struct vring_used_elem used;
- used.id = head;
- used.len = len;
+ used.id = cpu_to_vringh32(vrh, head);
+ used.len = cpu_to_vringh32(vrh, len);
return __vringh_complete(vrh, &used, 1, putu16_user, putused_user);
}
EXPORT_SYMBOL(vringh_complete_user);
@@ -792,15 +817,16 @@ int vringh_need_notify_user(struct vringh *vrh)
EXPORT_SYMBOL(vringh_need_notify_user);
/* Kernelspace access helpers. */
-static inline int getu16_kern(u16 *val, const u16 *p)
+static inline int getu16_kern(const struct vringh *vrh,
+ u16 *val, const __virtio16 *p)
{
- *val = ACCESS_ONCE(*p);
+ *val = vringh16_to_cpu(vrh, ACCESS_ONCE(*p));
return 0;
}
-static inline int putu16_kern(u16 *p, u16 val)
+static inline int putu16_kern(const struct vringh *vrh, __virtio16 *p, u16 val)
{
- ACCESS_ONCE(*p) = val;
+ ACCESS_ONCE(*p) = cpu_to_vringh16(vrh, val);
return 0;
}
@@ -836,7 +862,7 @@ static inline int xfer_kern(void *src, void *dst, size_t len)
*
* Returns an error if num is invalid.
*/
-int vringh_init_kern(struct vringh *vrh, u32 features,
+int vringh_init_kern(struct vringh *vrh, u64 features,
unsigned int num, bool weak_barriers,
struct vring_desc *desc,
struct vring_avail *avail,
@@ -848,6 +874,7 @@ int vringh_init_kern(struct vringh *vrh, u32 features,
return -EINVAL;
}
+ vrh->little_endian = (features & (1ULL << VIRTIO_F_VERSION_1));
vrh->event_indices = (features & (1 << VIRTIO_RING_F_EVENT_IDX));
vrh->weak_barriers = weak_barriers;
vrh->completed = 0;
@@ -962,8 +989,8 @@ int vringh_complete_kern(struct vringh *vrh, u16 head, u32 len)
{
struct vring_used_elem used;
- used.id = head;
- used.len = len;
+ used.id = cpu_to_vringh32(vrh, head);
+ used.len = cpu_to_vringh32(vrh, len);
return __vringh_complete(vrh, &used, 1, putu16_kern, putused_kern);
}
diff --git a/drivers/video/fbdev/s3c-fb.c b/drivers/video/fbdev/s3c-fb.c
index a623a4d0c94..7e3a05fc47a 100644
--- a/drivers/video/fbdev/s3c-fb.c
+++ b/drivers/video/fbdev/s3c-fb.c
@@ -1630,7 +1630,7 @@ static int s3c_fb_resume(struct device *dev)
}
#endif
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
static int s3c_fb_runtime_suspend(struct device *dev)
{
struct s3c_fb *sfb = dev_get_drvdata(dev);
diff --git a/drivers/video/fbdev/sh_mobile_meram.c b/drivers/video/fbdev/sh_mobile_meram.c
index 1d56108dee9..baadfb207b2 100644
--- a/drivers/video/fbdev/sh_mobile_meram.c
+++ b/drivers/video/fbdev/sh_mobile_meram.c
@@ -569,7 +569,7 @@ EXPORT_SYMBOL_GPL(sh_mobile_meram_cache_update);
* Power management
*/
-#if defined(CONFIG_PM_SLEEP) || defined(CONFIG_PM_RUNTIME)
+#ifdef CONFIG_PM
static int sh_mobile_meram_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -612,7 +612,7 @@ static int sh_mobile_meram_resume(struct device *dev)
meram_write_reg(priv->base, common_regs[i], priv->regs[i]);
return 0;
}
-#endif /* CONFIG_PM_SLEEP || CONFIG_PM_RUNTIME */
+#endif /* CONFIG_PM */
static UNIVERSAL_DEV_PM_OPS(sh_mobile_meram_dev_pm_ops,
sh_mobile_meram_suspend,
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index f2266586878..b9f70dfc475 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -162,6 +162,27 @@ static void virtio_config_enable(struct virtio_device *dev)
spin_unlock_irq(&dev->config_lock);
}
+static int virtio_finalize_features(struct virtio_device *dev)
+{
+ int ret = dev->config->finalize_features(dev);
+ unsigned status;
+
+ if (ret)
+ return ret;
+
+ if (!virtio_has_feature(dev, VIRTIO_F_VERSION_1))
+ return 0;
+
+ add_status(dev, VIRTIO_CONFIG_S_FEATURES_OK);
+ status = dev->config->get_status(dev);
+ if (!(status & VIRTIO_CONFIG_S_FEATURES_OK)) {
+ dev_err(&dev->dev, "virtio: device refuses features: %x\n",
+ status);
+ return -ENODEV;
+ }
+ return 0;
+}
+
static int virtio_dev_probe(struct device *_d)
{
int err, i;
@@ -170,7 +191,6 @@ static int virtio_dev_probe(struct device *_d)
u64 device_features;
u64 driver_features;
u64 driver_features_legacy;
- unsigned status;
/* We have a driver! */
add_status(dev, VIRTIO_CONFIG_S_DRIVER);
@@ -208,21 +228,10 @@ static int virtio_dev_probe(struct device *_d)
if (device_features & (1ULL << i))
__virtio_set_bit(dev, i);
- err = dev->config->finalize_features(dev);
+ err = virtio_finalize_features(dev);
if (err)
goto err;
- if (virtio_has_feature(dev, VIRTIO_F_VERSION_1)) {
- add_status(dev, VIRTIO_CONFIG_S_FEATURES_OK);
- status = dev->config->get_status(dev);
- if (!(status & VIRTIO_CONFIG_S_FEATURES_OK)) {
- dev_err(_d, "virtio: device refuses features: %x\n",
- status);
- err = -ENODEV;
- goto err;
- }
- }
-
err = drv->probe(dev);
if (err)
goto err;
@@ -372,7 +381,7 @@ int virtio_device_restore(struct virtio_device *dev)
/* We have a driver! */
add_status(dev, VIRTIO_CONFIG_S_DRIVER);
- ret = dev->config->finalize_features(dev);
+ ret = virtio_finalize_features(dev);
if (ret)
goto err;
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index c9703d4d6f6..50c5f42d7a9 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -28,6 +28,7 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/balloon_compaction.h>
+#include <linux/oom.h>
/*
* Balloon device works in 4K page units. So each page is pointed to by
@@ -36,6 +37,12 @@
*/
#define VIRTIO_BALLOON_PAGES_PER_PAGE (unsigned)(PAGE_SIZE >> VIRTIO_BALLOON_PFN_SHIFT)
#define VIRTIO_BALLOON_ARRAY_PFNS_MAX 256
+#define OOM_VBALLOON_DEFAULT_PAGES 256
+#define VIRTBALLOON_OOM_NOTIFY_PRIORITY 80
+
+static int oom_pages = OOM_VBALLOON_DEFAULT_PAGES;
+module_param(oom_pages, int, S_IRUSR | S_IWUSR);
+MODULE_PARM_DESC(oom_pages, "pages to free on OOM");
struct virtio_balloon
{
@@ -71,6 +78,9 @@ struct virtio_balloon
/* Memory statistics */
int need_stats_update;
struct virtio_balloon_stat stats[VIRTIO_BALLOON_S_NR];
+
+ /* To register callback in oom notifier call chain */
+ struct notifier_block nb;
};
static struct virtio_device_id id_table[] = {
@@ -168,8 +178,9 @@ static void release_pages_by_pfn(const u32 pfns[], unsigned int num)
}
}
-static void leak_balloon(struct virtio_balloon *vb, size_t num)
+static unsigned leak_balloon(struct virtio_balloon *vb, size_t num)
{
+ unsigned num_freed_pages;
struct page *page;
struct balloon_dev_info *vb_dev_info = &vb->vb_dev_info;
@@ -186,6 +197,7 @@ static void leak_balloon(struct virtio_balloon *vb, size_t num)
vb->num_pages -= VIRTIO_BALLOON_PAGES_PER_PAGE;
}
+ num_freed_pages = vb->num_pfns;
/*
* Note that if
* virtio_has_feature(vdev, VIRTIO_BALLOON_F_MUST_TELL_HOST);
@@ -195,6 +207,7 @@ static void leak_balloon(struct virtio_balloon *vb, size_t num)
tell_host(vb, vb->deflate_vq);
mutex_unlock(&vb->balloon_lock);
release_pages_by_pfn(vb->pfns, vb->num_pfns);
+ return num_freed_pages;
}
static inline void update_stat(struct virtio_balloon *vb, int idx,
@@ -287,6 +300,38 @@ static void update_balloon_size(struct virtio_balloon *vb)
&actual);
}
+/*
+ * virtballoon_oom_notify - release pages when system is under severe
+ * memory pressure (called from out_of_memory())
+ * @self : notifier block struct
+ * @dummy: not used
+ * @parm : returned - number of freed pages
+ *
+ * The balancing of memory by use of the virtio balloon should not cause
+ * the termination of processes while there are pages in the balloon.
+ * If virtio balloon manages to release some memory, it will make the
+ * system return and retry the allocation that forced the OOM killer
+ * to run.
+ */
+static int virtballoon_oom_notify(struct notifier_block *self,
+ unsigned long dummy, void *parm)
+{
+ struct virtio_balloon *vb;
+ unsigned long *freed;
+ unsigned num_freed_pages;
+
+ vb = container_of(self, struct virtio_balloon, nb);
+ if (!virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM))
+ return NOTIFY_OK;
+
+ freed = parm;
+ num_freed_pages = leak_balloon(vb, oom_pages);
+ update_balloon_size(vb);
+ *freed += num_freed_pages;
+
+ return NOTIFY_OK;
+}
+
static int balloon(void *_vballoon)
{
struct virtio_balloon *vb = _vballoon;
@@ -443,6 +488,12 @@ static int virtballoon_probe(struct virtio_device *vdev)
if (err)
goto out_free_vb;
+ vb->nb.notifier_call = virtballoon_oom_notify;
+ vb->nb.priority = VIRTBALLOON_OOM_NOTIFY_PRIORITY;
+ err = register_oom_notifier(&vb->nb);
+ if (err < 0)
+ goto out_oom_notify;
+
vb->thread = kthread_run(balloon, vb, "vballoon");
if (IS_ERR(vb->thread)) {
err = PTR_ERR(vb->thread);
@@ -452,6 +503,8 @@ static int virtballoon_probe(struct virtio_device *vdev)
return 0;
out_del_vqs:
+ unregister_oom_notifier(&vb->nb);
+out_oom_notify:
vdev->config->del_vqs(vdev);
out_free_vb:
kfree(vb);
@@ -476,6 +529,7 @@ static void virtballoon_remove(struct virtio_device *vdev)
{
struct virtio_balloon *vb = vdev->priv;
+ unregister_oom_notifier(&vb->nb);
kthread_stop(vb->thread);
remove_common(vb);
kfree(vb);
@@ -515,6 +569,7 @@ static int virtballoon_restore(struct virtio_device *vdev)
static unsigned int features[] = {
VIRTIO_BALLOON_F_MUST_TELL_HOST,
VIRTIO_BALLOON_F_STATS_VQ,
+ VIRTIO_BALLOON_F_DEFLATE_ON_OOM,
};
static struct virtio_driver virtio_balloon_driver = {
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index 953057d8418..2ef9529809d 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -458,7 +458,44 @@ static int virtio_pci_restore(struct device *dev)
return virtio_device_restore(&vp_dev->vdev);
}
-const struct dev_pm_ops virtio_pci_pm_ops = {
+static const struct dev_pm_ops virtio_pci_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(virtio_pci_freeze, virtio_pci_restore)
};
#endif
+
+
+/* Qumranet donated their vendor ID for devices 0x1000 thru 0x10FF. */
+static const struct pci_device_id virtio_pci_id_table[] = {
+ { PCI_DEVICE(0x1af4, PCI_ANY_ID) },
+ { 0 }
+};
+
+MODULE_DEVICE_TABLE(pci, virtio_pci_id_table);
+
+static int virtio_pci_probe(struct pci_dev *pci_dev,
+ const struct pci_device_id *id)
+{
+ return virtio_pci_legacy_probe(pci_dev, id);
+}
+
+static void virtio_pci_remove(struct pci_dev *pci_dev)
+{
+ virtio_pci_legacy_remove(pci_dev);
+}
+
+static struct pci_driver virtio_pci_driver = {
+ .name = "virtio-pci",
+ .id_table = virtio_pci_id_table,
+ .probe = virtio_pci_probe,
+ .remove = virtio_pci_remove,
+#ifdef CONFIG_PM_SLEEP
+ .driver.pm = &virtio_pci_pm_ops,
+#endif
+};
+
+module_pci_driver(virtio_pci_driver);
+
+MODULE_AUTHOR("Anthony Liguori <aliguori@us.ibm.com>");
+MODULE_DESCRIPTION("virtio-pci");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("1");
diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h
index d840dad4149..adddb647b21 100644
--- a/drivers/virtio/virtio_pci_common.h
+++ b/drivers/virtio/virtio_pci_common.h
@@ -27,7 +27,6 @@
#include <linux/virtio.h>
#include <linux/virtio_config.h>
#include <linux/virtio_ring.h>
-#define VIRTIO_PCI_NO_LEGACY
#include <linux/virtio_pci.h>
#include <linux/highmem.h>
#include <linux/spinlock.h>
@@ -129,8 +128,8 @@ const char *vp_bus_name(struct virtio_device *vdev);
int vp_set_vq_affinity(struct virtqueue *vq, int cpu);
void virtio_pci_release_dev(struct device *);
-#ifdef CONFIG_PM_SLEEP
-extern const struct dev_pm_ops virtio_pci_pm_ops;
-#endif
+int virtio_pci_legacy_probe(struct pci_dev *pci_dev,
+ const struct pci_device_id *id);
+void virtio_pci_legacy_remove(struct pci_dev *pci_dev);
#endif
diff --git a/drivers/virtio/virtio_pci_legacy.c b/drivers/virtio/virtio_pci_legacy.c
index 2588252e5c1..6c76f0f5658 100644
--- a/drivers/virtio/virtio_pci_legacy.c
+++ b/drivers/virtio/virtio_pci_legacy.c
@@ -19,14 +19,6 @@
#include "virtio_pci_common.h"
-/* Qumranet donated their vendor ID for devices 0x1000 thru 0x10FF. */
-static const struct pci_device_id virtio_pci_id_table[] = {
- { PCI_DEVICE(0x1af4, PCI_ANY_ID) },
- { 0 }
-};
-
-MODULE_DEVICE_TABLE(pci, virtio_pci_id_table);
-
/* virtio config->get_features() implementation */
static u64 vp_get_features(struct virtio_device *vdev)
{
@@ -220,7 +212,7 @@ static const struct virtio_config_ops virtio_pci_config_ops = {
};
/* the PCI probing function */
-static int virtio_pci_probe(struct pci_dev *pci_dev,
+int virtio_pci_legacy_probe(struct pci_dev *pci_dev,
const struct pci_device_id *id)
{
struct virtio_pci_device *vp_dev;
@@ -300,7 +292,7 @@ out:
return err;
}
-static void virtio_pci_remove(struct pci_dev *pci_dev)
+void virtio_pci_legacy_remove(struct pci_dev *pci_dev)
{
struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
@@ -312,15 +304,3 @@ static void virtio_pci_remove(struct pci_dev *pci_dev)
pci_disable_device(pci_dev);
kfree(vp_dev);
}
-
-static struct pci_driver virtio_pci_driver = {
- .name = "virtio-pci",
- .id_table = virtio_pci_id_table,
- .probe = virtio_pci_probe,
- .remove = virtio_pci_remove,
-#ifdef CONFIG_PM_SLEEP
- .driver.pm = &virtio_pci_pm_ops,
-#endif
-};
-
-module_pci_driver(virtio_pci_driver);
diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c
index 65b84d8c0b9..d6add516a7a 100644
--- a/drivers/watchdog/imx2_wdt.c
+++ b/drivers/watchdog/imx2_wdt.c
@@ -326,6 +326,52 @@ static void imx2_wdt_shutdown(struct platform_device *pdev)
}
}
+#ifdef CONFIG_PM_SLEEP
+/* Disable watchdog if it is active during suspend */
+static int imx2_wdt_suspend(struct device *dev)
+{
+ struct watchdog_device *wdog = dev_get_drvdata(dev);
+ struct imx2_wdt_device *wdev = watchdog_get_drvdata(wdog);
+
+ imx2_wdt_set_timeout(wdog, IMX2_WDT_MAX_TIME);
+ imx2_wdt_ping(wdog);
+
+ /* Watchdog has been stopped but IP block is still running */
+ if (!watchdog_active(wdog) && imx2_wdt_is_running(wdev))
+ del_timer_sync(&wdev->timer);
+
+ clk_disable_unprepare(wdev->clk);
+
+ return 0;
+}
+
+/* Enable watchdog and configure it if necessary */
+static int imx2_wdt_resume(struct device *dev)
+{
+ struct watchdog_device *wdog = dev_get_drvdata(dev);
+ struct imx2_wdt_device *wdev = watchdog_get_drvdata(wdog);
+
+ clk_prepare_enable(wdev->clk);
+
+ if (watchdog_active(wdog) && !imx2_wdt_is_running(wdev)) {
+ /* Resumes from deep sleep we need restart
+ * the watchdog again.
+ */
+ imx2_wdt_setup(wdog);
+ imx2_wdt_set_timeout(wdog, wdog->timeout);
+ imx2_wdt_ping(wdog);
+ } else if (imx2_wdt_is_running(wdev)) {
+ imx2_wdt_ping(wdog);
+ mod_timer(&wdev->timer, jiffies + wdog->timeout * HZ / 2);
+ }
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(imx2_wdt_pm_ops, imx2_wdt_suspend,
+ imx2_wdt_resume);
+
static const struct of_device_id imx2_wdt_dt_ids[] = {
{ .compatible = "fsl,imx21-wdt", },
{ /* sentinel */ }
@@ -337,6 +383,7 @@ static struct platform_driver imx2_wdt_driver = {
.shutdown = imx2_wdt_shutdown,
.driver = {
.name = DRIVER_NAME,
+ .pm = &imx2_wdt_pm_ops,
.of_match_table = imx2_wdt_dt_ids,
},
};
diff --git a/fs/Makefile b/fs/Makefile
index da0bbb456d3..bedff48e8fd 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -11,7 +11,7 @@ obj-y := open.o read_write.o file_table.o super.o \
attr.o bad_inode.o file.o filesystems.o namespace.o \
seq_file.o xattr.o libfs.o fs-writeback.o \
pnode.o splice.o sync.o utimes.o \
- stack.o fs_struct.o statfs.o fs_pin.o
+ stack.o fs_struct.o statfs.o fs_pin.o nsfs.o
ifeq ($(CONFIG_BLOCK),y)
obj-y += buffer.o block_dev.o direct-io.o mpage.o
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
index c04ef1d4f18..97aff2879cd 100644
--- a/fs/binfmt_misc.c
+++ b/fs/binfmt_misc.c
@@ -254,6 +254,7 @@ static char *scanarg(char *s, char del)
return NULL;
}
}
+ s[-1] ='\0';
return s;
}
@@ -378,8 +379,7 @@ static Node *create_entry(const char __user *buffer, size_t count)
p = scanarg(p, del);
if (!p)
goto einval;
- p[-1] = '\0';
- if (p == e->magic)
+ if (!e->magic[0])
goto einval;
if (USE_DEBUG)
print_hex_dump_bytes(
@@ -391,8 +391,7 @@ static Node *create_entry(const char __user *buffer, size_t count)
p = scanarg(p, del);
if (!p)
goto einval;
- p[-1] = '\0';
- if (p == e->mask) {
+ if (!e->mask[0]) {
e->mask = NULL;
pr_debug("register: mask[raw]: none\n");
} else if (USE_DEBUG)
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index e6fbbd74b71..7e607416755 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -3481,8 +3481,8 @@ void btrfs_put_block_group_cache(struct btrfs_fs_info *info);
u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo);
int btrfs_error_unpin_extent_range(struct btrfs_root *root,
u64 start, u64 end);
-int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr,
- u64 num_bytes, u64 *actual_bytes);
+int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
+ u64 num_bytes, u64 *actual_bytes);
int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
struct btrfs_root *root, u64 type);
int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range);
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 30965120772..8c63419a7f7 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -4121,12 +4121,6 @@ again:
if (ret)
break;
- /* opt_discard */
- if (btrfs_test_opt(root, DISCARD))
- ret = btrfs_error_discard_extent(root, start,
- end + 1 - start,
- NULL);
-
clear_extent_dirty(unpin, start, end, GFP_NOFS);
btrfs_error_unpin_extent_range(root, start, end);
cond_resched();
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 222d6aea4a8..a80b97100d9 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -1889,8 +1889,8 @@ static int btrfs_issue_discard(struct block_device *bdev,
return blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_NOFS, 0);
}
-static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
- u64 num_bytes, u64 *actual_bytes)
+int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
+ u64 num_bytes, u64 *actual_bytes)
{
int ret;
u64 discarded_bytes = 0;
@@ -5727,7 +5727,8 @@ void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
update_global_block_rsv(fs_info);
}
-static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
+static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end,
+ const bool return_free_space)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_block_group_cache *cache = NULL;
@@ -5751,7 +5752,8 @@ static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
if (start < cache->last_byte_to_unpin) {
len = min(len, cache->last_byte_to_unpin - start);
- btrfs_add_free_space(cache, start, len);
+ if (return_free_space)
+ btrfs_add_free_space(cache, start, len);
}
start += len;
@@ -5815,7 +5817,7 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
end + 1 - start, NULL);
clear_extent_dirty(unpin, start, end, GFP_NOFS);
- unpin_extent_range(root, start, end);
+ unpin_extent_range(root, start, end, true);
cond_resched();
}
@@ -8872,6 +8874,7 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
cache_node);
rb_erase(&block_group->cache_node,
&info->block_group_cache_tree);
+ RB_CLEAR_NODE(&block_group->cache_node);
spin_unlock(&info->block_group_cache_lock);
down_write(&block_group->space_info->groups_sem);
@@ -9130,6 +9133,7 @@ int btrfs_read_block_groups(struct btrfs_root *root)
spin_lock(&info->block_group_cache_lock);
rb_erase(&cache->cache_node,
&info->block_group_cache_tree);
+ RB_CLEAR_NODE(&cache->cache_node);
spin_unlock(&info->block_group_cache_lock);
btrfs_put_block_group(cache);
goto error;
@@ -9271,6 +9275,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
spin_lock(&root->fs_info->block_group_cache_lock);
rb_erase(&cache->cache_node,
&root->fs_info->block_group_cache_tree);
+ RB_CLEAR_NODE(&cache->cache_node);
spin_unlock(&root->fs_info->block_group_cache_lock);
btrfs_put_block_group(cache);
return ret;
@@ -9690,13 +9695,7 @@ out:
int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
{
- return unpin_extent_range(root, start, end);
-}
-
-int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr,
- u64 num_bytes, u64 *actual_bytes)
-{
- return btrfs_discard_extent(root, bytenr, num_bytes, actual_bytes);
+ return unpin_extent_range(root, start, end, false);
}
int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 030847bf7ce..d6c03f7f136 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -2966,8 +2966,8 @@ static int do_trimming(struct btrfs_block_group_cache *block_group,
spin_unlock(&block_group->lock);
spin_unlock(&space_info->lock);
- ret = btrfs_error_discard_extent(fs_info->extent_root,
- start, bytes, &trimmed);
+ ret = btrfs_discard_extent(fs_info->extent_root,
+ start, bytes, &trimmed);
if (!ret)
*total_trimmed += trimmed;
@@ -3185,16 +3185,18 @@ out:
spin_unlock(&block_group->lock);
+ lock_chunks(block_group->fs_info->chunk_root);
em_tree = &block_group->fs_info->mapping_tree.map_tree;
write_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, block_group->key.objectid,
1);
BUG_ON(!em); /* logic error, can't happen */
+ /*
+ * remove_extent_mapping() will delete us from the pinned_chunks
+ * list, which is protected by the chunk mutex.
+ */
remove_extent_mapping(em_tree, em);
write_unlock(&em_tree->lock);
-
- lock_chunks(block_group->fs_info->chunk_root);
- list_del_init(&em->list);
unlock_chunks(block_group->fs_info->chunk_root);
/* once for us and once for the tree */
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 0144790e296..50c5a8762ae 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -1485,7 +1485,7 @@ static void update_dev_time(char *path_name)
struct file *filp;
filp = filp_open(path_name, O_RDWR, 0);
- if (!filp)
+ if (IS_ERR(filp))
return;
file_update_time(filp);
filp_close(filp, NULL);
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 18c06bbaf13..f5013d92a7e 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -192,17 +192,30 @@ static int readpage_nounlock(struct file *filp, struct page *page)
struct ceph_osd_client *osdc =
&ceph_inode_to_client(inode)->client->osdc;
int err = 0;
+ u64 off = page_offset(page);
u64 len = PAGE_CACHE_SIZE;
- err = ceph_readpage_from_fscache(inode, page);
+ if (off >= i_size_read(inode)) {
+ zero_user_segment(page, err, PAGE_CACHE_SIZE);
+ SetPageUptodate(page);
+ return 0;
+ }
+ /*
+ * Uptodate inline data should have been added into page cache
+ * while getting Fcr caps.
+ */
+ if (ci->i_inline_version != CEPH_INLINE_NONE)
+ return -EINVAL;
+
+ err = ceph_readpage_from_fscache(inode, page);
if (err == 0)
goto out;
dout("readpage inode %p file %p page %p index %lu\n",
inode, filp, page, page->index);
err = ceph_osdc_readpages(osdc, ceph_vino(inode), &ci->i_layout,
- (u64) page_offset(page), &len,
+ off, &len,
ci->i_truncate_seq, ci->i_truncate_size,
&page, 1, 0);
if (err == -ENOENT)
@@ -319,7 +332,7 @@ static int start_read(struct inode *inode, struct list_head *page_list, int max)
off, len);
vino = ceph_vino(inode);
req = ceph_osdc_new_request(osdc, &ci->i_layout, vino, off, &len,
- 1, CEPH_OSD_OP_READ,
+ 0, 1, CEPH_OSD_OP_READ,
CEPH_OSD_FLAG_READ, NULL,
ci->i_truncate_seq, ci->i_truncate_size,
false);
@@ -384,6 +397,9 @@ static int ceph_readpages(struct file *file, struct address_space *mapping,
int rc = 0;
int max = 0;
+ if (ceph_inode(inode)->i_inline_version != CEPH_INLINE_NONE)
+ return -EINVAL;
+
rc = ceph_readpages_from_fscache(mapping->host, mapping, page_list,
&nr_pages);
@@ -673,7 +689,7 @@ static int ceph_writepages_start(struct address_space *mapping,
int rc = 0;
unsigned wsize = 1 << inode->i_blkbits;
struct ceph_osd_request *req = NULL;
- int do_sync;
+ int do_sync = 0;
u64 truncate_size, snap_size;
u32 truncate_seq;
@@ -750,7 +766,6 @@ retry:
last_snapc = snapc;
while (!done && index <= end) {
- int num_ops = do_sync ? 2 : 1;
unsigned i;
int first;
pgoff_t next;
@@ -850,7 +865,8 @@ get_more_pages:
len = wsize;
req = ceph_osdc_new_request(&fsc->client->osdc,
&ci->i_layout, vino,
- offset, &len, num_ops,
+ offset, &len, 0,
+ do_sync ? 2 : 1,
CEPH_OSD_OP_WRITE,
CEPH_OSD_FLAG_WRITE |
CEPH_OSD_FLAG_ONDISK,
@@ -862,6 +878,9 @@ get_more_pages:
break;
}
+ if (do_sync)
+ osd_req_op_init(req, 1, CEPH_OSD_OP_STARTSYNC);
+
req->r_callback = writepages_finish;
req->r_inode = inode;
@@ -1204,6 +1223,7 @@ static int ceph_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
struct inode *inode = file_inode(vma->vm_file);
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_file_info *fi = vma->vm_file->private_data;
+ struct page *pinned_page = NULL;
loff_t off = vmf->pgoff << PAGE_CACHE_SHIFT;
int want, got, ret;
@@ -1215,7 +1235,8 @@ static int ceph_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
want = CEPH_CAP_FILE_CACHE;
while (1) {
got = 0;
- ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, want, &got, -1);
+ ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, want,
+ -1, &got, &pinned_page);
if (ret == 0)
break;
if (ret != -ERESTARTSYS) {
@@ -1226,12 +1247,54 @@ static int ceph_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
dout("filemap_fault %p %llu~%zd got cap refs on %s\n",
inode, off, (size_t)PAGE_CACHE_SIZE, ceph_cap_string(got));
- ret = filemap_fault(vma, vmf);
+ if ((got & (CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO)) ||
+ ci->i_inline_version == CEPH_INLINE_NONE)
+ ret = filemap_fault(vma, vmf);
+ else
+ ret = -EAGAIN;
dout("filemap_fault %p %llu~%zd dropping cap refs on %s ret %d\n",
inode, off, (size_t)PAGE_CACHE_SIZE, ceph_cap_string(got), ret);
+ if (pinned_page)
+ page_cache_release(pinned_page);
ceph_put_cap_refs(ci, got);
+ if (ret != -EAGAIN)
+ return ret;
+
+ /* read inline data */
+ if (off >= PAGE_CACHE_SIZE) {
+ /* does not support inline data > PAGE_SIZE */
+ ret = VM_FAULT_SIGBUS;
+ } else {
+ int ret1;
+ struct address_space *mapping = inode->i_mapping;
+ struct page *page = find_or_create_page(mapping, 0,
+ mapping_gfp_mask(mapping) &
+ ~__GFP_FS);
+ if (!page) {
+ ret = VM_FAULT_OOM;
+ goto out;
+ }
+ ret1 = __ceph_do_getattr(inode, page,
+ CEPH_STAT_CAP_INLINE_DATA, true);
+ if (ret1 < 0 || off >= i_size_read(inode)) {
+ unlock_page(page);
+ page_cache_release(page);
+ ret = VM_FAULT_SIGBUS;
+ goto out;
+ }
+ if (ret1 < PAGE_CACHE_SIZE)
+ zero_user_segment(page, ret1, PAGE_CACHE_SIZE);
+ else
+ flush_dcache_page(page);
+ SetPageUptodate(page);
+ vmf->page = page;
+ ret = VM_FAULT_MAJOR | VM_FAULT_LOCKED;
+ }
+out:
+ dout("filemap_fault %p %llu~%zd read inline data ret %d\n",
+ inode, off, (size_t)PAGE_CACHE_SIZE, ret);
return ret;
}
@@ -1250,6 +1313,19 @@ static int ceph_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
size_t len;
int want, got, ret;
+ if (ci->i_inline_version != CEPH_INLINE_NONE) {
+ struct page *locked_page = NULL;
+ if (off == 0) {
+ lock_page(page);
+ locked_page = page;
+ }
+ ret = ceph_uninline_data(vma->vm_file, locked_page);
+ if (locked_page)
+ unlock_page(locked_page);
+ if (ret < 0)
+ return VM_FAULT_SIGBUS;
+ }
+
if (off + PAGE_CACHE_SIZE <= size)
len = PAGE_CACHE_SIZE;
else
@@ -1263,7 +1339,8 @@ static int ceph_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
want = CEPH_CAP_FILE_BUFFER;
while (1) {
got = 0;
- ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, &got, off + len);
+ ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, off + len,
+ &got, NULL);
if (ret == 0)
break;
if (ret != -ERESTARTSYS) {
@@ -1297,11 +1374,13 @@ static int ceph_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
ret = VM_FAULT_SIGBUS;
}
out:
- if (ret != VM_FAULT_LOCKED) {
+ if (ret != VM_FAULT_LOCKED)
unlock_page(page);
- } else {
+ if (ret == VM_FAULT_LOCKED ||
+ ci->i_inline_version != CEPH_INLINE_NONE) {
int dirty;
spin_lock(&ci->i_ceph_lock);
+ ci->i_inline_version = CEPH_INLINE_NONE;
dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR);
spin_unlock(&ci->i_ceph_lock);
if (dirty)
@@ -1315,6 +1394,178 @@ out:
return ret;
}
+void ceph_fill_inline_data(struct inode *inode, struct page *locked_page,
+ char *data, size_t len)
+{
+ struct address_space *mapping = inode->i_mapping;
+ struct page *page;
+
+ if (locked_page) {
+ page = locked_page;
+ } else {
+ if (i_size_read(inode) == 0)
+ return;
+ page = find_or_create_page(mapping, 0,
+ mapping_gfp_mask(mapping) & ~__GFP_FS);
+ if (!page)
+ return;
+ if (PageUptodate(page)) {
+ unlock_page(page);
+ page_cache_release(page);
+ return;
+ }
+ }
+
+ dout("fill_inline_data %p %llx.%llx len %lu locked_page %p\n",
+ inode, ceph_vinop(inode), len, locked_page);
+
+ if (len > 0) {
+ void *kaddr = kmap_atomic(page);
+ memcpy(kaddr, data, len);
+ kunmap_atomic(kaddr);
+ }
+
+ if (page != locked_page) {
+ if (len < PAGE_CACHE_SIZE)
+ zero_user_segment(page, len, PAGE_CACHE_SIZE);
+ else
+ flush_dcache_page(page);
+
+ SetPageUptodate(page);
+ unlock_page(page);
+ page_cache_release(page);
+ }
+}
+
+int ceph_uninline_data(struct file *filp, struct page *locked_page)
+{
+ struct inode *inode = file_inode(filp);
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+ struct ceph_osd_request *req;
+ struct page *page = NULL;
+ u64 len, inline_version;
+ int err = 0;
+ bool from_pagecache = false;
+
+ spin_lock(&ci->i_ceph_lock);
+ inline_version = ci->i_inline_version;
+ spin_unlock(&ci->i_ceph_lock);
+
+ dout("uninline_data %p %llx.%llx inline_version %llu\n",
+ inode, ceph_vinop(inode), inline_version);
+
+ if (inline_version == 1 || /* initial version, no data */
+ inline_version == CEPH_INLINE_NONE)
+ goto out;
+
+ if (locked_page) {
+ page = locked_page;
+ WARN_ON(!PageUptodate(page));
+ } else if (ceph_caps_issued(ci) &
+ (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) {
+ page = find_get_page(inode->i_mapping, 0);
+ if (page) {
+ if (PageUptodate(page)) {
+ from_pagecache = true;
+ lock_page(page);
+ } else {
+ page_cache_release(page);
+ page = NULL;
+ }
+ }
+ }
+
+ if (page) {
+ len = i_size_read(inode);
+ if (len > PAGE_CACHE_SIZE)
+ len = PAGE_CACHE_SIZE;
+ } else {
+ page = __page_cache_alloc(GFP_NOFS);
+ if (!page) {
+ err = -ENOMEM;
+ goto out;
+ }
+ err = __ceph_do_getattr(inode, page,
+ CEPH_STAT_CAP_INLINE_DATA, true);
+ if (err < 0) {
+ /* no inline data */
+ if (err == -ENODATA)
+ err = 0;
+ goto out;
+ }
+ len = err;
+ }
+
+ req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
+ ceph_vino(inode), 0, &len, 0, 1,
+ CEPH_OSD_OP_CREATE,
+ CEPH_OSD_FLAG_ONDISK | CEPH_OSD_FLAG_WRITE,
+ ci->i_snap_realm->cached_context,
+ 0, 0, false);
+ if (IS_ERR(req)) {
+ err = PTR_ERR(req);
+ goto out;
+ }
+
+ ceph_osdc_build_request(req, 0, NULL, CEPH_NOSNAP, &inode->i_mtime);
+ err = ceph_osdc_start_request(&fsc->client->osdc, req, false);
+ if (!err)
+ err = ceph_osdc_wait_request(&fsc->client->osdc, req);
+ ceph_osdc_put_request(req);
+ if (err < 0)
+ goto out;
+
+ req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
+ ceph_vino(inode), 0, &len, 1, 3,
+ CEPH_OSD_OP_WRITE,
+ CEPH_OSD_FLAG_ONDISK | CEPH_OSD_FLAG_WRITE,
+ ci->i_snap_realm->cached_context,
+ ci->i_truncate_seq, ci->i_truncate_size,
+ false);
+ if (IS_ERR(req)) {
+ err = PTR_ERR(req);
+ goto out;
+ }
+
+ osd_req_op_extent_osd_data_pages(req, 1, &page, len, 0, false, false);
+
+ err = osd_req_op_xattr_init(req, 0, CEPH_OSD_OP_CMPXATTR,
+ "inline_version", &inline_version,
+ sizeof(inline_version),
+ CEPH_OSD_CMPXATTR_OP_GT,
+ CEPH_OSD_CMPXATTR_MODE_U64);
+ if (err)
+ goto out_put;
+
+ err = osd_req_op_xattr_init(req, 2, CEPH_OSD_OP_SETXATTR,
+ "inline_version", &inline_version,
+ sizeof(inline_version), 0, 0);
+ if (err)
+ goto out_put;
+
+ ceph_osdc_build_request(req, 0, NULL, CEPH_NOSNAP, &inode->i_mtime);
+ err = ceph_osdc_start_request(&fsc->client->osdc, req, false);
+ if (!err)
+ err = ceph_osdc_wait_request(&fsc->client->osdc, req);
+out_put:
+ ceph_osdc_put_request(req);
+ if (err == -ECANCELED)
+ err = 0;
+out:
+ if (page && page != locked_page) {
+ if (from_pagecache) {
+ unlock_page(page);
+ page_cache_release(page);
+ } else
+ __free_pages(page, 0);
+ }
+
+ dout("uninline_data %p %llx.%llx inline_version %llu = %d\n",
+ inode, ceph_vinop(inode), inline_version, err);
+ return err;
+}
+
static struct vm_operations_struct ceph_vmops = {
.fault = ceph_filemap_fault,
.page_mkwrite = ceph_page_mkwrite,
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index cefca661464..b93c631c6c8 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -975,10 +975,12 @@ static int send_cap_msg(struct ceph_mds_session *session,
kuid_t uid, kgid_t gid, umode_t mode,
u64 xattr_version,
struct ceph_buffer *xattrs_buf,
- u64 follows)
+ u64 follows, bool inline_data)
{
struct ceph_mds_caps *fc;
struct ceph_msg *msg;
+ void *p;
+ size_t extra_len;
dout("send_cap_msg %s %llx %llx caps %s wanted %s dirty %s"
" seq %u/%u mseq %u follows %lld size %llu/%llu"
@@ -988,7 +990,10 @@ static int send_cap_msg(struct ceph_mds_session *session,
seq, issue_seq, mseq, follows, size, max_size,
xattr_version, xattrs_buf ? (int)xattrs_buf->vec.iov_len : 0);
- msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPS, sizeof(*fc), GFP_NOFS, false);
+ /* flock buffer size + inline version + inline data size */
+ extra_len = 4 + 8 + 4;
+ msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPS, sizeof(*fc) + extra_len,
+ GFP_NOFS, false);
if (!msg)
return -ENOMEM;
@@ -1020,6 +1025,14 @@ static int send_cap_msg(struct ceph_mds_session *session,
fc->gid = cpu_to_le32(from_kgid(&init_user_ns, gid));
fc->mode = cpu_to_le32(mode);
+ p = fc + 1;
+ /* flock buffer size */
+ ceph_encode_32(&p, 0);
+ /* inline version */
+ ceph_encode_64(&p, inline_data ? 0 : CEPH_INLINE_NONE);
+ /* inline data size */
+ ceph_encode_32(&p, 0);
+
fc->xattr_version = cpu_to_le64(xattr_version);
if (xattrs_buf) {
msg->middle = ceph_buffer_get(xattrs_buf);
@@ -1126,6 +1139,7 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
u64 flush_tid = 0;
int i;
int ret;
+ bool inline_data;
held = cap->issued | cap->implemented;
revoking = cap->implemented & ~cap->issued;
@@ -1209,13 +1223,15 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
xattr_version = ci->i_xattrs.version;
}
+ inline_data = ci->i_inline_version != CEPH_INLINE_NONE;
+
spin_unlock(&ci->i_ceph_lock);
ret = send_cap_msg(session, ceph_vino(inode).ino, cap_id,
op, keep, want, flushing, seq, flush_tid, issue_seq, mseq,
size, max_size, &mtime, &atime, time_warp_seq,
uid, gid, mode, xattr_version, xattr_blob,
- follows);
+ follows, inline_data);
if (ret < 0) {
dout("error sending cap msg, must requeue %p\n", inode);
delayed = 1;
@@ -1336,7 +1352,7 @@ retry:
capsnap->time_warp_seq,
capsnap->uid, capsnap->gid, capsnap->mode,
capsnap->xattr_version, capsnap->xattr_blob,
- capsnap->follows);
+ capsnap->follows, capsnap->inline_data);
next_follows = capsnap->follows + 1;
ceph_put_cap_snap(capsnap);
@@ -2057,15 +2073,17 @@ static void __take_cap_refs(struct ceph_inode_info *ci, int got)
* requested from the MDS.
*/
static int try_get_cap_refs(struct ceph_inode_info *ci, int need, int want,
- int *got, loff_t endoff, int *check_max, int *err)
+ loff_t endoff, int *got, struct page **pinned_page,
+ int *check_max, int *err)
{
struct inode *inode = &ci->vfs_inode;
int ret = 0;
- int have, implemented;
+ int have, implemented, _got = 0;
int file_wanted;
dout("get_cap_refs %p need %s want %s\n", inode,
ceph_cap_string(need), ceph_cap_string(want));
+again:
spin_lock(&ci->i_ceph_lock);
/* make sure file is actually open */
@@ -2075,7 +2093,7 @@ static int try_get_cap_refs(struct ceph_inode_info *ci, int need, int want,
ceph_cap_string(need), ceph_cap_string(file_wanted));
*err = -EBADF;
ret = 1;
- goto out;
+ goto out_unlock;
}
/* finish pending truncate */
@@ -2095,7 +2113,7 @@ static int try_get_cap_refs(struct ceph_inode_info *ci, int need, int want,
*check_max = 1;
ret = 1;
}
- goto out;
+ goto out_unlock;
}
/*
* If a sync write is in progress, we must wait, so that we
@@ -2103,7 +2121,7 @@ static int try_get_cap_refs(struct ceph_inode_info *ci, int need, int want,
*/
if (__ceph_have_pending_cap_snap(ci)) {
dout("get_cap_refs %p cap_snap_pending\n", inode);
- goto out;
+ goto out_unlock;
}
}
@@ -2120,18 +2138,50 @@ static int try_get_cap_refs(struct ceph_inode_info *ci, int need, int want,
inode, ceph_cap_string(have), ceph_cap_string(not),
ceph_cap_string(revoking));
if ((revoking & not) == 0) {
- *got = need | (have & want);
- __take_cap_refs(ci, *got);
+ _got = need | (have & want);
+ __take_cap_refs(ci, _got);
ret = 1;
}
} else {
dout("get_cap_refs %p have %s needed %s\n", inode,
ceph_cap_string(have), ceph_cap_string(need));
}
-out:
+out_unlock:
spin_unlock(&ci->i_ceph_lock);
+
+ if (ci->i_inline_version != CEPH_INLINE_NONE &&
+ (_got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) &&
+ i_size_read(inode) > 0) {
+ int ret1;
+ struct page *page = find_get_page(inode->i_mapping, 0);
+ if (page) {
+ if (PageUptodate(page)) {
+ *pinned_page = page;
+ goto out;
+ }
+ page_cache_release(page);
+ }
+ /*
+ * drop cap refs first because getattr while holding
+ * caps refs can cause deadlock.
+ */
+ ceph_put_cap_refs(ci, _got);
+ _got = 0;
+
+ /* getattr request will bring inline data into page cache */
+ ret1 = __ceph_do_getattr(inode, NULL,
+ CEPH_STAT_CAP_INLINE_DATA, true);
+ if (ret1 >= 0) {
+ ret = 0;
+ goto again;
+ }
+ *err = ret1;
+ ret = 1;
+ }
+out:
dout("get_cap_refs %p ret %d got %s\n", inode,
- ret, ceph_cap_string(*got));
+ ret, ceph_cap_string(_got));
+ *got = _got;
return ret;
}
@@ -2168,8 +2218,8 @@ static void check_max_size(struct inode *inode, loff_t endoff)
* due to a small max_size, make sure we check_max_size (and possibly
* ask the mds) so we don't get hung up indefinitely.
*/
-int ceph_get_caps(struct ceph_inode_info *ci, int need, int want, int *got,
- loff_t endoff)
+int ceph_get_caps(struct ceph_inode_info *ci, int need, int want,
+ loff_t endoff, int *got, struct page **pinned_page)
{
int check_max, ret, err;
@@ -2179,8 +2229,8 @@ retry:
check_max = 0;
err = 0;
ret = wait_event_interruptible(ci->i_cap_wq,
- try_get_cap_refs(ci, need, want,
- got, endoff,
+ try_get_cap_refs(ci, need, want, endoff,
+ got, pinned_page,
&check_max, &err));
if (err)
ret = err;
@@ -2383,6 +2433,8 @@ static void invalidate_aliases(struct inode *inode)
static void handle_cap_grant(struct ceph_mds_client *mdsc,
struct inode *inode, struct ceph_mds_caps *grant,
void *snaptrace, int snaptrace_len,
+ u64 inline_version,
+ void *inline_data, int inline_len,
struct ceph_buffer *xattr_buf,
struct ceph_mds_session *session,
struct ceph_cap *cap, int issued)
@@ -2403,6 +2455,7 @@ static void handle_cap_grant(struct ceph_mds_client *mdsc,
bool queue_invalidate = false;
bool queue_revalidate = false;
bool deleted_inode = false;
+ bool fill_inline = false;
dout("handle_cap_grant inode %p cap %p mds%d seq %d %s\n",
inode, cap, mds, seq, ceph_cap_string(newcaps));
@@ -2576,6 +2629,13 @@ static void handle_cap_grant(struct ceph_mds_client *mdsc,
}
BUG_ON(cap->issued & ~cap->implemented);
+ if (inline_version > 0 && inline_version >= ci->i_inline_version) {
+ ci->i_inline_version = inline_version;
+ if (ci->i_inline_version != CEPH_INLINE_NONE &&
+ (newcaps & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)))
+ fill_inline = true;
+ }
+
spin_unlock(&ci->i_ceph_lock);
if (le32_to_cpu(grant->op) == CEPH_CAP_OP_IMPORT) {
@@ -2589,6 +2649,9 @@ static void handle_cap_grant(struct ceph_mds_client *mdsc,
wake = true;
}
+ if (fill_inline)
+ ceph_fill_inline_data(inode, NULL, inline_data, inline_len);
+
if (queue_trunc) {
ceph_queue_vmtruncate(inode);
ceph_queue_revalidate(inode);
@@ -2996,11 +3059,12 @@ void ceph_handle_caps(struct ceph_mds_session *session,
u64 cap_id;
u64 size, max_size;
u64 tid;
+ u64 inline_version = 0;
+ void *inline_data = NULL;
+ u32 inline_len = 0;
void *snaptrace;
size_t snaptrace_len;
- void *flock;
- void *end;
- u32 flock_len;
+ void *p, *end;
dout("handle_caps from mds%d\n", mds);
@@ -3021,30 +3085,37 @@ void ceph_handle_caps(struct ceph_mds_session *session,
snaptrace = h + 1;
snaptrace_len = le32_to_cpu(h->snap_trace_len);
+ p = snaptrace + snaptrace_len;
if (le16_to_cpu(msg->hdr.version) >= 2) {
- void *p = snaptrace + snaptrace_len;
+ u32 flock_len;
ceph_decode_32_safe(&p, end, flock_len, bad);
if (p + flock_len > end)
goto bad;
- flock = p;
- } else {
- flock = NULL;
- flock_len = 0;
+ p += flock_len;
}
if (le16_to_cpu(msg->hdr.version) >= 3) {
if (op == CEPH_CAP_OP_IMPORT) {
- void *p = flock + flock_len;
if (p + sizeof(*peer) > end)
goto bad;
peer = p;
+ p += sizeof(*peer);
} else if (op == CEPH_CAP_OP_EXPORT) {
/* recorded in unused fields */
peer = (void *)&h->size;
}
}
+ if (le16_to_cpu(msg->hdr.version) >= 4) {
+ ceph_decode_64_safe(&p, end, inline_version, bad);
+ ceph_decode_32_safe(&p, end, inline_len, bad);
+ if (p + inline_len > end)
+ goto bad;
+ inline_data = p;
+ p += inline_len;
+ }
+
/* lookup ino */
inode = ceph_find_inode(sb, vino);
ci = ceph_inode(inode);
@@ -3085,6 +3156,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
handle_cap_import(mdsc, inode, h, peer, session,
&cap, &issued);
handle_cap_grant(mdsc, inode, h, snaptrace, snaptrace_len,
+ inline_version, inline_data, inline_len,
msg->middle, session, cap, issued);
goto done_unlocked;
}
@@ -3105,8 +3177,9 @@ void ceph_handle_caps(struct ceph_mds_session *session,
case CEPH_CAP_OP_GRANT:
__ceph_caps_issued(ci, &issued);
issued |= __ceph_caps_dirty(ci);
- handle_cap_grant(mdsc, inode, h, NULL, 0, msg->middle,
- session, cap, issued);
+ handle_cap_grant(mdsc, inode, h, NULL, 0,
+ inline_version, inline_data, inline_len,
+ msg->middle, session, cap, issued);
goto done_unlocked;
case CEPH_CAP_OP_FLUSH_ACK:
@@ -3137,8 +3210,7 @@ flush_cap_releases:
done:
mutex_unlock(&session->s_mutex);
done_unlocked:
- if (inode)
- iput(inode);
+ iput(inode);
return;
bad:
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index 681a8537b64..c241603764f 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -183,7 +183,7 @@ more:
spin_unlock(&parent->d_lock);
/* make sure a dentry wasn't dropped while we didn't have parent lock */
- if (!ceph_dir_is_complete(dir)) {
+ if (!ceph_dir_is_complete_ordered(dir)) {
dout(" lost dir complete on %p; falling back to mds\n", dir);
dput(dentry);
err = -EAGAIN;
@@ -261,10 +261,6 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx)
/* always start with . and .. */
if (ctx->pos == 0) {
- /* note dir version at start of readdir so we can tell
- * if any dentries get dropped */
- fi->dir_release_count = atomic_read(&ci->i_release_count);
-
dout("readdir off 0 -> '.'\n");
if (!dir_emit(ctx, ".", 1,
ceph_translate_ino(inode->i_sb, inode->i_ino),
@@ -289,7 +285,7 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx)
if ((ctx->pos == 2 || fi->dentry) &&
!ceph_test_mount_opt(fsc, NOASYNCREADDIR) &&
ceph_snap(inode) != CEPH_SNAPDIR &&
- __ceph_dir_is_complete(ci) &&
+ __ceph_dir_is_complete_ordered(ci) &&
__ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1)) {
u32 shared_gen = ci->i_shared_gen;
spin_unlock(&ci->i_ceph_lock);
@@ -312,6 +308,13 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx)
/* proceed with a normal readdir */
+ if (ctx->pos == 2) {
+ /* note dir version at start of readdir so we can tell
+ * if any dentries get dropped */
+ fi->dir_release_count = atomic_read(&ci->i_release_count);
+ fi->dir_ordered_count = ci->i_ordered_count;
+ }
+
more:
/* do we have the correct frag content buffered? */
if (fi->frag != frag || fi->last_readdir == NULL) {
@@ -446,8 +449,12 @@ more:
*/
spin_lock(&ci->i_ceph_lock);
if (atomic_read(&ci->i_release_count) == fi->dir_release_count) {
- dout(" marking %p complete\n", inode);
- __ceph_dir_set_complete(ci, fi->dir_release_count);
+ if (ci->i_ordered_count == fi->dir_ordered_count)
+ dout(" marking %p complete and ordered\n", inode);
+ else
+ dout(" marking %p complete\n", inode);
+ __ceph_dir_set_complete(ci, fi->dir_release_count,
+ fi->dir_ordered_count);
}
spin_unlock(&ci->i_ceph_lock);
@@ -805,7 +812,9 @@ static int ceph_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
acls.pagelist = NULL;
}
err = ceph_mdsc_do_request(mdsc, dir, req);
- if (!err && !req->r_reply_info.head->is_dentry)
+ if (!err &&
+ !req->r_reply_info.head->is_target &&
+ !req->r_reply_info.head->is_dentry)
err = ceph_handle_notrace_create(dir, dentry);
ceph_mdsc_put_request(req);
out:
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 9f8e3572040..ce74b394b49 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -333,6 +333,11 @@ int ceph_release(struct inode *inode, struct file *file)
return 0;
}
+enum {
+ CHECK_EOF = 1,
+ READ_INLINE = 2,
+};
+
/*
* Read a range of bytes striped over one or more objects. Iterate over
* objects we stripe over. (That's not atomic, but good enough for now.)
@@ -412,7 +417,7 @@ more:
ret = read;
/* did we bounce off eof? */
if (pos + left > inode->i_size)
- *checkeof = 1;
+ *checkeof = CHECK_EOF;
}
dout("striped_read returns %d\n", ret);
@@ -598,7 +603,7 @@ ceph_sync_direct_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos)
snapc = ci->i_snap_realm->cached_context;
vino = ceph_vino(inode);
req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
- vino, pos, &len,
+ vino, pos, &len, 0,
2,/*include a 'startsync' command*/
CEPH_OSD_OP_WRITE, flags, snapc,
ci->i_truncate_seq,
@@ -609,6 +614,8 @@ ceph_sync_direct_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos)
break;
}
+ osd_req_op_init(req, 1, CEPH_OSD_OP_STARTSYNC);
+
n = iov_iter_get_pages_alloc(from, &pages, len, &start);
if (unlikely(n < 0)) {
ret = n;
@@ -713,7 +720,7 @@ ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos)
snapc = ci->i_snap_realm->cached_context;
vino = ceph_vino(inode);
req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
- vino, pos, &len, 1,
+ vino, pos, &len, 0, 1,
CEPH_OSD_OP_WRITE, flags, snapc,
ci->i_truncate_seq,
ci->i_truncate_size,
@@ -803,9 +810,10 @@ static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to)
size_t len = iocb->ki_nbytes;
struct inode *inode = file_inode(filp);
struct ceph_inode_info *ci = ceph_inode(inode);
+ struct page *pinned_page = NULL;
ssize_t ret;
int want, got = 0;
- int checkeof = 0, read = 0;
+ int retry_op = 0, read = 0;
again:
dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
@@ -815,7 +823,7 @@ again:
want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
else
want = CEPH_CAP_FILE_CACHE;
- ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, want, &got, -1);
+ ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, want, -1, &got, &pinned_page);
if (ret < 0)
return ret;
@@ -827,8 +835,12 @@ again:
inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
ceph_cap_string(got));
- /* hmm, this isn't really async... */
- ret = ceph_sync_read(iocb, to, &checkeof);
+ if (ci->i_inline_version == CEPH_INLINE_NONE) {
+ /* hmm, this isn't really async... */
+ ret = ceph_sync_read(iocb, to, &retry_op);
+ } else {
+ retry_op = READ_INLINE;
+ }
} else {
dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
@@ -838,13 +850,55 @@ again:
}
dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
+ if (pinned_page) {
+ page_cache_release(pinned_page);
+ pinned_page = NULL;
+ }
ceph_put_cap_refs(ci, got);
+ if (retry_op && ret >= 0) {
+ int statret;
+ struct page *page = NULL;
+ loff_t i_size;
+ if (retry_op == READ_INLINE) {
+ page = __page_cache_alloc(GFP_NOFS);
+ if (!page)
+ return -ENOMEM;
+ }
- if (checkeof && ret >= 0) {
- int statret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
+ statret = __ceph_do_getattr(inode, page,
+ CEPH_STAT_CAP_INLINE_DATA, !!page);
+ if (statret < 0) {
+ __free_page(page);
+ if (statret == -ENODATA) {
+ BUG_ON(retry_op != READ_INLINE);
+ goto again;
+ }
+ return statret;
+ }
+
+ i_size = i_size_read(inode);
+ if (retry_op == READ_INLINE) {
+ /* does not support inline data > PAGE_SIZE */
+ if (i_size > PAGE_CACHE_SIZE) {
+ ret = -EIO;
+ } else if (iocb->ki_pos < i_size) {
+ loff_t end = min_t(loff_t, i_size,
+ iocb->ki_pos + len);
+ if (statret < end)
+ zero_user_segment(page, statret, end);
+ ret = copy_page_to_iter(page,
+ iocb->ki_pos & ~PAGE_MASK,
+ end - iocb->ki_pos, to);
+ iocb->ki_pos += ret;
+ } else {
+ ret = 0;
+ }
+ __free_pages(page, 0);
+ return ret;
+ }
/* hit EOF or hole? */
- if (statret == 0 && iocb->ki_pos < inode->i_size &&
+ if (retry_op == CHECK_EOF && iocb->ki_pos < i_size &&
ret < len) {
dout("sync_read hit hole, ppos %lld < size %lld"
", reading more\n", iocb->ki_pos,
@@ -852,7 +906,7 @@ again:
read += ret;
len -= ret;
- checkeof = 0;
+ retry_op = 0;
goto again;
}
}
@@ -909,6 +963,12 @@ static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
if (err)
goto out;
+ if (ci->i_inline_version != CEPH_INLINE_NONE) {
+ err = ceph_uninline_data(file, NULL);
+ if (err < 0)
+ goto out;
+ }
+
retry_snap:
if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL)) {
err = -ENOSPC;
@@ -922,7 +982,8 @@ retry_snap:
else
want = CEPH_CAP_FILE_BUFFER;
got = 0;
- err = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, &got, pos + count);
+ err = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, pos + count,
+ &got, NULL);
if (err < 0)
goto out;
@@ -969,6 +1030,7 @@ retry_snap:
if (written >= 0) {
int dirty;
spin_lock(&ci->i_ceph_lock);
+ ci->i_inline_version = CEPH_INLINE_NONE;
dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR);
spin_unlock(&ci->i_ceph_lock);
if (dirty)
@@ -1111,7 +1173,7 @@ static int ceph_zero_partial_object(struct inode *inode,
req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
ceph_vino(inode),
offset, length,
- 1, op,
+ 0, 1, op,
CEPH_OSD_FLAG_WRITE |
CEPH_OSD_FLAG_ONDISK,
NULL, 0, 0, false);
@@ -1214,6 +1276,12 @@ static long ceph_fallocate(struct file *file, int mode,
goto unlock;
}
+ if (ci->i_inline_version != CEPH_INLINE_NONE) {
+ ret = ceph_uninline_data(file, NULL);
+ if (ret < 0)
+ goto unlock;
+ }
+
size = i_size_read(inode);
if (!(mode & FALLOC_FL_KEEP_SIZE))
endoff = offset + length;
@@ -1223,7 +1291,7 @@ static long ceph_fallocate(struct file *file, int mode,
else
want = CEPH_CAP_FILE_BUFFER;
- ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, &got, endoff);
+ ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, endoff, &got, NULL);
if (ret < 0)
goto unlock;
@@ -1240,6 +1308,7 @@ static long ceph_fallocate(struct file *file, int mode,
if (!ret) {
spin_lock(&ci->i_ceph_lock);
+ ci->i_inline_version = CEPH_INLINE_NONE;
dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR);
spin_unlock(&ci->i_ceph_lock);
if (dirty)
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index a5593d51d03..f61a74115be 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -387,8 +387,10 @@ struct inode *ceph_alloc_inode(struct super_block *sb)
spin_lock_init(&ci->i_ceph_lock);
ci->i_version = 0;
+ ci->i_inline_version = 0;
ci->i_time_warp_seq = 0;
ci->i_ceph_flags = 0;
+ ci->i_ordered_count = 0;
atomic_set(&ci->i_release_count, 1);
atomic_set(&ci->i_complete_count, 0);
ci->i_symlink = NULL;
@@ -657,7 +659,7 @@ void ceph_fill_file_time(struct inode *inode, int issued,
* Populate an inode based on info from mds. May be called on new or
* existing inodes.
*/
-static int fill_inode(struct inode *inode,
+static int fill_inode(struct inode *inode, struct page *locked_page,
struct ceph_mds_reply_info_in *iinfo,
struct ceph_mds_reply_dirfrag *dirinfo,
struct ceph_mds_session *session,
@@ -675,6 +677,7 @@ static int fill_inode(struct inode *inode,
bool wake = false;
bool queue_trunc = false;
bool new_version = false;
+ bool fill_inline = false;
dout("fill_inode %p ino %llx.%llx v %llu had %llu\n",
inode, ceph_vinop(inode), le64_to_cpu(info->version),
@@ -845,7 +848,8 @@ static int fill_inode(struct inode *inode,
(issued & CEPH_CAP_FILE_EXCL) == 0 &&
!__ceph_dir_is_complete(ci)) {
dout(" marking %p complete (empty)\n", inode);
- __ceph_dir_set_complete(ci, atomic_read(&ci->i_release_count));
+ __ceph_dir_set_complete(ci, atomic_read(&ci->i_release_count),
+ ci->i_ordered_count);
}
/* were we issued a capability? */
@@ -873,8 +877,23 @@ static int fill_inode(struct inode *inode,
ceph_vinop(inode));
__ceph_get_fmode(ci, cap_fmode);
}
+
+ if (iinfo->inline_version > 0 &&
+ iinfo->inline_version >= ci->i_inline_version) {
+ int cache_caps = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
+ ci->i_inline_version = iinfo->inline_version;
+ if (ci->i_inline_version != CEPH_INLINE_NONE &&
+ (locked_page ||
+ (le32_to_cpu(info->cap.caps) & cache_caps)))
+ fill_inline = true;
+ }
+
spin_unlock(&ci->i_ceph_lock);
+ if (fill_inline)
+ ceph_fill_inline_data(inode, locked_page,
+ iinfo->inline_data, iinfo->inline_len);
+
if (wake)
wake_up_all(&ci->i_cap_wq);
@@ -1062,7 +1081,8 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
struct inode *dir = req->r_locked_dir;
if (dir) {
- err = fill_inode(dir, &rinfo->diri, rinfo->dirfrag,
+ err = fill_inode(dir, NULL,
+ &rinfo->diri, rinfo->dirfrag,
session, req->r_request_started, -1,
&req->r_caps_reservation);
if (err < 0)
@@ -1132,7 +1152,7 @@ retry_lookup:
}
req->r_target_inode = in;
- err = fill_inode(in, &rinfo->targeti, NULL,
+ err = fill_inode(in, req->r_locked_page, &rinfo->targeti, NULL,
session, req->r_request_started,
(!req->r_aborted && rinfo->head->result == 0) ?
req->r_fmode : -1,
@@ -1204,8 +1224,8 @@ retry_lookup:
ceph_invalidate_dentry_lease(dn);
/* d_move screws up sibling dentries' offsets */
- ceph_dir_clear_complete(dir);
- ceph_dir_clear_complete(olddir);
+ ceph_dir_clear_ordered(dir);
+ ceph_dir_clear_ordered(olddir);
dout("dn %p gets new offset %lld\n", req->r_old_dentry,
ceph_dentry(req->r_old_dentry)->offset);
@@ -1217,6 +1237,7 @@ retry_lookup:
if (!rinfo->head->is_target) {
dout("fill_trace null dentry\n");
if (dn->d_inode) {
+ ceph_dir_clear_ordered(dir);
dout("d_delete %p\n", dn);
d_delete(dn);
} else {
@@ -1233,7 +1254,7 @@ retry_lookup:
/* attach proper inode */
if (!dn->d_inode) {
- ceph_dir_clear_complete(dir);
+ ceph_dir_clear_ordered(dir);
ihold(in);
dn = splice_dentry(dn, in, &have_lease);
if (IS_ERR(dn)) {
@@ -1263,7 +1284,7 @@ retry_lookup:
BUG_ON(!dir);
BUG_ON(ceph_snap(dir) != CEPH_SNAPDIR);
dout(" linking snapped dir %p to dn %p\n", in, dn);
- ceph_dir_clear_complete(dir);
+ ceph_dir_clear_ordered(dir);
ihold(in);
dn = splice_dentry(dn, in, NULL);
if (IS_ERR(dn)) {
@@ -1300,7 +1321,7 @@ static int readdir_prepopulate_inodes_only(struct ceph_mds_request *req,
dout("new_inode badness got %d\n", err);
continue;
}
- rc = fill_inode(in, &rinfo->dir_in[i], NULL, session,
+ rc = fill_inode(in, NULL, &rinfo->dir_in[i], NULL, session,
req->r_request_started, -1,
&req->r_caps_reservation);
if (rc < 0) {
@@ -1416,7 +1437,7 @@ retry_lookup:
}
}
- if (fill_inode(in, &rinfo->dir_in[i], NULL, session,
+ if (fill_inode(in, NULL, &rinfo->dir_in[i], NULL, session,
req->r_request_started, -1,
&req->r_caps_reservation) < 0) {
pr_err("fill_inode badness on %p\n", in);
@@ -1899,7 +1920,8 @@ out_put:
* Verify that we have a lease on the given mask. If not,
* do a getattr against an mds.
*/
-int ceph_do_getattr(struct inode *inode, int mask, bool force)
+int __ceph_do_getattr(struct inode *inode, struct page *locked_page,
+ int mask, bool force)
{
struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
struct ceph_mds_client *mdsc = fsc->mdsc;
@@ -1911,7 +1933,8 @@ int ceph_do_getattr(struct inode *inode, int mask, bool force)
return 0;
}
- dout("do_getattr inode %p mask %s mode 0%o\n", inode, ceph_cap_string(mask), inode->i_mode);
+ dout("do_getattr inode %p mask %s mode 0%o\n",
+ inode, ceph_cap_string(mask), inode->i_mode);
if (!force && ceph_caps_issued_mask(ceph_inode(inode), mask, 1))
return 0;
@@ -1922,7 +1945,19 @@ int ceph_do_getattr(struct inode *inode, int mask, bool force)
ihold(inode);
req->r_num_caps = 1;
req->r_args.getattr.mask = cpu_to_le32(mask);
+ req->r_locked_page = locked_page;
err = ceph_mdsc_do_request(mdsc, NULL, req);
+ if (locked_page && err == 0) {
+ u64 inline_version = req->r_reply_info.targeti.inline_version;
+ if (inline_version == 0) {
+ /* the reply is supposed to contain inline data */
+ err = -EINVAL;
+ } else if (inline_version == CEPH_INLINE_NONE) {
+ err = -ENODATA;
+ } else {
+ err = req->r_reply_info.targeti.inline_len;
+ }
+ }
ceph_mdsc_put_request(req);
dout("do_getattr result=%d\n", err);
return err;
diff --git a/fs/ceph/locks.c b/fs/ceph/locks.c
index fbc39c47bac..c35c5c614e3 100644
--- a/fs/ceph/locks.c
+++ b/fs/ceph/locks.c
@@ -9,6 +9,8 @@
#include <linux/ceph/pagelist.h>
static u64 lock_secret;
+static int ceph_lock_wait_for_completion(struct ceph_mds_client *mdsc,
+ struct ceph_mds_request *req);
static inline u64 secure_addr(void *addr)
{
@@ -40,6 +42,9 @@ static int ceph_lock_message(u8 lock_type, u16 operation, struct file *file,
u64 length = 0;
u64 owner;
+ if (operation != CEPH_MDS_OP_SETFILELOCK || cmd == CEPH_LOCK_UNLOCK)
+ wait = 0;
+
req = ceph_mdsc_create_request(mdsc, operation, USE_AUTH_MDS);
if (IS_ERR(req))
return PTR_ERR(req);
@@ -68,6 +73,9 @@ static int ceph_lock_message(u8 lock_type, u16 operation, struct file *file,
req->r_args.filelock_change.length = cpu_to_le64(length);
req->r_args.filelock_change.wait = wait;
+ if (wait)
+ req->r_wait_for_completion = ceph_lock_wait_for_completion;
+
err = ceph_mdsc_do_request(mdsc, inode, req);
if (operation == CEPH_MDS_OP_GETFILELOCK) {
@@ -96,6 +104,52 @@ static int ceph_lock_message(u8 lock_type, u16 operation, struct file *file,
return err;
}
+static int ceph_lock_wait_for_completion(struct ceph_mds_client *mdsc,
+ struct ceph_mds_request *req)
+{
+ struct ceph_mds_request *intr_req;
+ struct inode *inode = req->r_inode;
+ int err, lock_type;
+
+ BUG_ON(req->r_op != CEPH_MDS_OP_SETFILELOCK);
+ if (req->r_args.filelock_change.rule == CEPH_LOCK_FCNTL)
+ lock_type = CEPH_LOCK_FCNTL_INTR;
+ else if (req->r_args.filelock_change.rule == CEPH_LOCK_FLOCK)
+ lock_type = CEPH_LOCK_FLOCK_INTR;
+ else
+ BUG_ON(1);
+ BUG_ON(req->r_args.filelock_change.type == CEPH_LOCK_UNLOCK);
+
+ err = wait_for_completion_interruptible(&req->r_completion);
+ if (!err)
+ return 0;
+
+ dout("ceph_lock_wait_for_completion: request %llu was interrupted\n",
+ req->r_tid);
+
+ intr_req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETFILELOCK,
+ USE_AUTH_MDS);
+ if (IS_ERR(intr_req))
+ return PTR_ERR(intr_req);
+
+ intr_req->r_inode = inode;
+ ihold(inode);
+ intr_req->r_num_caps = 1;
+
+ intr_req->r_args.filelock_change = req->r_args.filelock_change;
+ intr_req->r_args.filelock_change.rule = lock_type;
+ intr_req->r_args.filelock_change.type = CEPH_LOCK_UNLOCK;
+
+ err = ceph_mdsc_do_request(mdsc, inode, intr_req);
+ ceph_mdsc_put_request(intr_req);
+
+ if (err && err != -ERESTARTSYS)
+ return err;
+
+ wait_for_completion(&req->r_completion);
+ return 0;
+}
+
/**
* Attempt to set an fcntl lock.
* For now, this just goes away to the server. Later it may be more awesome.
@@ -143,11 +197,6 @@ int ceph_lock(struct file *file, int cmd, struct file_lock *fl)
err);
}
}
-
- } else if (err == -ERESTARTSYS) {
- dout("undoing lock\n");
- ceph_lock_message(CEPH_LOCK_FCNTL, op, file,
- CEPH_LOCK_UNLOCK, 0, fl);
}
return err;
}
@@ -186,11 +235,6 @@ int ceph_flock(struct file *file, int cmd, struct file_lock *fl)
file, CEPH_LOCK_UNLOCK, 0, fl);
dout("got %d on flock_lock_file_wait, undid lock", err);
}
- } else if (err == -ERESTARTSYS) {
- dout("undoing lock\n");
- ceph_lock_message(CEPH_LOCK_FLOCK,
- CEPH_MDS_OP_SETFILELOCK,
- file, CEPH_LOCK_UNLOCK, 0, fl);
}
return err;
}
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index a92d3f5c6c1..d2171f4a698 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -89,6 +89,16 @@ static int parse_reply_info_in(void **p, void *end,
ceph_decode_need(p, end, info->xattr_len, bad);
info->xattr_data = *p;
*p += info->xattr_len;
+
+ if (features & CEPH_FEATURE_MDS_INLINE_DATA) {
+ ceph_decode_64_safe(p, end, info->inline_version, bad);
+ ceph_decode_32_safe(p, end, info->inline_len, bad);
+ ceph_decode_need(p, end, info->inline_len, bad);
+ info->inline_data = *p;
+ *p += info->inline_len;
+ } else
+ info->inline_version = CEPH_INLINE_NONE;
+
return 0;
bad:
return err;
@@ -524,8 +534,7 @@ void ceph_mdsc_release_request(struct kref *kref)
}
if (req->r_locked_dir)
ceph_put_cap_refs(ceph_inode(req->r_locked_dir), CEPH_CAP_PIN);
- if (req->r_target_inode)
- iput(req->r_target_inode);
+ iput(req->r_target_inode);
if (req->r_dentry)
dput(req->r_dentry);
if (req->r_old_dentry)
@@ -861,8 +870,11 @@ static struct ceph_msg *create_session_open_msg(struct ceph_mds_client *mdsc, u6
/*
* Serialize client metadata into waiting buffer space, using
* the format that userspace expects for map<string, string>
+ *
+ * ClientSession messages with metadata are v2
*/
- msg->hdr.version = 2; /* ClientSession messages with metadata are v2 */
+ msg->hdr.version = cpu_to_le16(2);
+ msg->hdr.compat_version = cpu_to_le16(1);
/* The write pointer, following the session_head structure */
p = msg->front.iov_base + sizeof(*h);
@@ -1066,8 +1078,7 @@ out:
session->s_cap_iterator = NULL;
spin_unlock(&session->s_cap_lock);
- if (last_inode)
- iput(last_inode);
+ iput(last_inode);
if (old_cap)
ceph_put_cap(session->s_mdsc, old_cap);
@@ -1874,7 +1885,7 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc,
goto out_free2;
}
- msg->hdr.version = 2;
+ msg->hdr.version = cpu_to_le16(2);
msg->hdr.tid = cpu_to_le64(req->r_tid);
head = msg->front.iov_base;
@@ -2208,6 +2219,8 @@ int ceph_mdsc_do_request(struct ceph_mds_client *mdsc,
&req->r_completion, req->r_timeout);
if (err == 0)
err = -EIO;
+ } else if (req->r_wait_for_completion) {
+ err = req->r_wait_for_completion(mdsc, req);
} else {
err = wait_for_completion_killable(&req->r_completion);
}
@@ -3744,6 +3757,20 @@ static struct ceph_msg *mds_alloc_msg(struct ceph_connection *con,
return msg;
}
+static int sign_message(struct ceph_connection *con, struct ceph_msg *msg)
+{
+ struct ceph_mds_session *s = con->private;
+ struct ceph_auth_handshake *auth = &s->s_auth;
+ return ceph_auth_sign_message(auth, msg);
+}
+
+static int check_message_signature(struct ceph_connection *con, struct ceph_msg *msg)
+{
+ struct ceph_mds_session *s = con->private;
+ struct ceph_auth_handshake *auth = &s->s_auth;
+ return ceph_auth_check_message_signature(auth, msg);
+}
+
static const struct ceph_connection_operations mds_con_ops = {
.get = con_get,
.put = con_put,
@@ -3753,6 +3780,8 @@ static const struct ceph_connection_operations mds_con_ops = {
.invalidate_authorizer = invalidate_authorizer,
.peer_reset = peer_reset,
.alloc_msg = mds_alloc_msg,
+ .sign_message = sign_message,
+ .check_message_signature = check_message_signature,
};
/* eof */
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
index 3288359353e..e2817d00f7d 100644
--- a/fs/ceph/mds_client.h
+++ b/fs/ceph/mds_client.h
@@ -41,6 +41,9 @@ struct ceph_mds_reply_info_in {
char *symlink;
u32 xattr_len;
char *xattr_data;
+ u64 inline_version;
+ u32 inline_len;
+ char *inline_data;
};
/*
@@ -166,6 +169,11 @@ struct ceph_mds_client;
*/
typedef void (*ceph_mds_request_callback_t) (struct ceph_mds_client *mdsc,
struct ceph_mds_request *req);
+/*
+ * wait for request completion callback
+ */
+typedef int (*ceph_mds_request_wait_callback_t) (struct ceph_mds_client *mdsc,
+ struct ceph_mds_request *req);
/*
* an in-flight mds request
@@ -215,6 +223,7 @@ struct ceph_mds_request {
int r_request_release_offset;
struct ceph_msg *r_reply;
struct ceph_mds_reply_info_parsed r_reply_info;
+ struct page *r_locked_page;
int r_err;
bool r_aborted;
@@ -239,6 +248,7 @@ struct ceph_mds_request {
struct completion r_completion;
struct completion r_safe_completion;
ceph_mds_request_callback_t r_callback;
+ ceph_mds_request_wait_callback_t r_wait_for_completion;
struct list_head r_unsafe_item; /* per-session unsafe list item */
bool r_got_unsafe, r_got_safe, r_got_result;
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index f01645a2775..ce35fbd4ba5 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -288,6 +288,9 @@ static int cmpu64_rev(const void *a, const void *b)
return 0;
}
+
+static struct ceph_snap_context *empty_snapc;
+
/*
* build the snap context for a given realm.
*/
@@ -328,6 +331,12 @@ static int build_snap_context(struct ceph_snap_realm *realm)
return 0;
}
+ if (num == 0 && realm->seq == empty_snapc->seq) {
+ ceph_get_snap_context(empty_snapc);
+ snapc = empty_snapc;
+ goto done;
+ }
+
/* alloc new snap context */
err = -ENOMEM;
if (num > (SIZE_MAX - sizeof(*snapc)) / sizeof(u64))
@@ -365,8 +374,8 @@ static int build_snap_context(struct ceph_snap_realm *realm)
realm->ino, realm, snapc, snapc->seq,
(unsigned int) snapc->num_snaps);
- if (realm->cached_context)
- ceph_put_snap_context(realm->cached_context);
+done:
+ ceph_put_snap_context(realm->cached_context);
realm->cached_context = snapc;
return 0;
@@ -466,6 +475,9 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
cap_snap. lucky us. */
dout("queue_cap_snap %p already pending\n", inode);
kfree(capsnap);
+ } else if (ci->i_snap_realm->cached_context == empty_snapc) {
+ dout("queue_cap_snap %p empty snapc\n", inode);
+ kfree(capsnap);
} else if (dirty & (CEPH_CAP_AUTH_EXCL|CEPH_CAP_XATTR_EXCL|
CEPH_CAP_FILE_EXCL|CEPH_CAP_FILE_WR)) {
struct ceph_snap_context *snapc = ci->i_head_snapc;
@@ -504,6 +516,8 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
capsnap->xattr_version = 0;
}
+ capsnap->inline_data = ci->i_inline_version != CEPH_INLINE_NONE;
+
/* dirty page count moved from _head to this cap_snap;
all subsequent writes page dirties occur _after_ this
snapshot. */
@@ -590,15 +604,13 @@ static void queue_realm_cap_snaps(struct ceph_snap_realm *realm)
if (!inode)
continue;
spin_unlock(&realm->inodes_with_caps_lock);
- if (lastinode)
- iput(lastinode);
+ iput(lastinode);
lastinode = inode;
ceph_queue_cap_snap(ci);
spin_lock(&realm->inodes_with_caps_lock);
}
spin_unlock(&realm->inodes_with_caps_lock);
- if (lastinode)
- iput(lastinode);
+ iput(lastinode);
list_for_each_entry(child, &realm->children, child_item) {
dout("queue_realm_cap_snaps %p %llx queue child %p %llx\n",
@@ -928,5 +940,16 @@ out:
return;
}
+int __init ceph_snap_init(void)
+{
+ empty_snapc = ceph_create_snap_context(0, GFP_NOFS);
+ if (!empty_snapc)
+ return -ENOMEM;
+ empty_snapc->seq = 1;
+ return 0;
+}
-
+void ceph_snap_exit(void)
+{
+ ceph_put_snap_context(empty_snapc);
+}
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index f6e12377335..50f06cddc94 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -515,7 +515,8 @@ static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt,
struct ceph_fs_client *fsc;
const u64 supported_features =
CEPH_FEATURE_FLOCK |
- CEPH_FEATURE_DIRLAYOUTHASH;
+ CEPH_FEATURE_DIRLAYOUTHASH |
+ CEPH_FEATURE_MDS_INLINE_DATA;
const u64 required_features = 0;
int page_count;
size_t size;
@@ -1017,9 +1018,6 @@ static struct file_system_type ceph_fs_type = {
};
MODULE_ALIAS_FS("ceph");
-#define _STRINGIFY(x) #x
-#define STRINGIFY(x) _STRINGIFY(x)
-
static int __init init_ceph(void)
{
int ret = init_caches();
@@ -1028,15 +1026,20 @@ static int __init init_ceph(void)
ceph_flock_init();
ceph_xattr_init();
+ ret = ceph_snap_init();
+ if (ret)
+ goto out_xattr;
ret = register_filesystem(&ceph_fs_type);
if (ret)
- goto out_icache;
+ goto out_snap;
pr_info("loaded (mds proto %d)\n", CEPH_MDSC_PROTOCOL);
return 0;
-out_icache:
+out_snap:
+ ceph_snap_exit();
+out_xattr:
ceph_xattr_exit();
destroy_caches();
out:
@@ -1047,6 +1050,7 @@ static void __exit exit_ceph(void)
{
dout("exit_ceph\n");
unregister_filesystem(&ceph_fs_type);
+ ceph_snap_exit();
ceph_xattr_exit();
destroy_caches();
}
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index b82f507979b..e1aa32d0759 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -161,6 +161,7 @@ struct ceph_cap_snap {
u64 time_warp_seq;
int writing; /* a sync write is still in progress */
int dirty_pages; /* dirty pages awaiting writeback */
+ bool inline_data;
};
static inline void ceph_put_cap_snap(struct ceph_cap_snap *capsnap)
@@ -253,9 +254,11 @@ struct ceph_inode_info {
spinlock_t i_ceph_lock;
u64 i_version;
+ u64 i_inline_version;
u32 i_time_warp_seq;
unsigned i_ceph_flags;
+ int i_ordered_count;
atomic_t i_release_count;
atomic_t i_complete_count;
@@ -434,14 +437,19 @@ static inline struct inode *ceph_find_inode(struct super_block *sb,
/*
* Ceph inode.
*/
-#define CEPH_I_NODELAY 4 /* do not delay cap release */
-#define CEPH_I_FLUSH 8 /* do not delay flush of dirty metadata */
-#define CEPH_I_NOFLUSH 16 /* do not flush dirty caps */
+#define CEPH_I_DIR_ORDERED 1 /* dentries in dir are ordered */
+#define CEPH_I_NODELAY 4 /* do not delay cap release */
+#define CEPH_I_FLUSH 8 /* do not delay flush of dirty metadata */
+#define CEPH_I_NOFLUSH 16 /* do not flush dirty caps */
static inline void __ceph_dir_set_complete(struct ceph_inode_info *ci,
- int release_count)
+ int release_count, int ordered_count)
{
atomic_set(&ci->i_complete_count, release_count);
+ if (ci->i_ordered_count == ordered_count)
+ ci->i_ceph_flags |= CEPH_I_DIR_ORDERED;
+ else
+ ci->i_ceph_flags &= ~CEPH_I_DIR_ORDERED;
}
static inline void __ceph_dir_clear_complete(struct ceph_inode_info *ci)
@@ -455,16 +463,35 @@ static inline bool __ceph_dir_is_complete(struct ceph_inode_info *ci)
atomic_read(&ci->i_release_count);
}
+static inline bool __ceph_dir_is_complete_ordered(struct ceph_inode_info *ci)
+{
+ return __ceph_dir_is_complete(ci) &&
+ (ci->i_ceph_flags & CEPH_I_DIR_ORDERED);
+}
+
static inline void ceph_dir_clear_complete(struct inode *inode)
{
__ceph_dir_clear_complete(ceph_inode(inode));
}
-static inline bool ceph_dir_is_complete(struct inode *inode)
+static inline void ceph_dir_clear_ordered(struct inode *inode)
{
- return __ceph_dir_is_complete(ceph_inode(inode));
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ spin_lock(&ci->i_ceph_lock);
+ ci->i_ordered_count++;
+ ci->i_ceph_flags &= ~CEPH_I_DIR_ORDERED;
+ spin_unlock(&ci->i_ceph_lock);
}
+static inline bool ceph_dir_is_complete_ordered(struct inode *inode)
+{
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ bool ret;
+ spin_lock(&ci->i_ceph_lock);
+ ret = __ceph_dir_is_complete_ordered(ci);
+ spin_unlock(&ci->i_ceph_lock);
+ return ret;
+}
/* find a specific frag @f */
extern struct ceph_inode_frag *__ceph_find_frag(struct ceph_inode_info *ci,
@@ -580,6 +607,7 @@ struct ceph_file_info {
char *last_name; /* last entry in previous chunk */
struct dentry *dentry; /* next dentry (for dcache readdir) */
int dir_release_count;
+ int dir_ordered_count;
/* used for -o dirstat read() on directory thing */
char *dir_info;
@@ -673,6 +701,8 @@ extern void ceph_queue_cap_snap(struct ceph_inode_info *ci);
extern int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
struct ceph_cap_snap *capsnap);
extern void ceph_cleanup_empty_realms(struct ceph_mds_client *mdsc);
+extern int ceph_snap_init(void);
+extern void ceph_snap_exit(void);
/*
* a cap_snap is "pending" if it is still awaiting an in-progress
@@ -715,7 +745,12 @@ extern void ceph_queue_vmtruncate(struct inode *inode);
extern void ceph_queue_invalidate(struct inode *inode);
extern void ceph_queue_writeback(struct inode *inode);
-extern int ceph_do_getattr(struct inode *inode, int mask, bool force);
+extern int __ceph_do_getattr(struct inode *inode, struct page *locked_page,
+ int mask, bool force);
+static inline int ceph_do_getattr(struct inode *inode, int mask, bool force)
+{
+ return __ceph_do_getattr(inode, NULL, mask, force);
+}
extern int ceph_permission(struct inode *inode, int mask);
extern int ceph_setattr(struct dentry *dentry, struct iattr *attr);
extern int ceph_getattr(struct vfsmount *mnt, struct dentry *dentry,
@@ -830,7 +865,7 @@ extern int ceph_encode_dentry_release(void **p, struct dentry *dn,
int mds, int drop, int unless);
extern int ceph_get_caps(struct ceph_inode_info *ci, int need, int want,
- int *got, loff_t endoff);
+ loff_t endoff, int *got, struct page **pinned_page);
/* for counting open files by mode */
static inline void __ceph_get_fmode(struct ceph_inode_info *ci, int mode)
@@ -852,7 +887,9 @@ extern int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
struct file *file, unsigned flags, umode_t mode,
int *opened);
extern int ceph_release(struct inode *inode, struct file *filp);
-
+extern void ceph_fill_inline_data(struct inode *inode, struct page *locked_page,
+ char *data, size_t len);
+int ceph_uninline_data(struct file *filp, struct page *locked_page);
/* dir.c */
extern const struct file_operations ceph_dir_fops;
extern const struct inode_operations ceph_dir_iops;
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
index 678b0d2bbbc..5a492caf34c 100644
--- a/fs/ceph/xattr.c
+++ b/fs/ceph/xattr.c
@@ -854,7 +854,7 @@ static int ceph_sync_setxattr(struct dentry *dentry, const char *name,
struct ceph_pagelist *pagelist = NULL;
int err;
- if (value) {
+ if (size > 0) {
/* copy value into pagelist */
pagelist = kmalloc(sizeof(*pagelist), GFP_NOFS);
if (!pagelist)
@@ -864,7 +864,7 @@ static int ceph_sync_setxattr(struct dentry *dentry, const char *name,
err = ceph_pagelist_append(pagelist, value, size);
if (err)
goto out;
- } else {
+ } else if (!value) {
flags |= CEPH_XATTR_REMOVE;
}
@@ -1001,6 +1001,9 @@ int ceph_setxattr(struct dentry *dentry, const char *name,
if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
return generic_setxattr(dentry, name, value, size, flags);
+ if (size == 0)
+ value = ""; /* empty EA, do not remove */
+
return __ceph_setxattr(dentry, name, value, size, flags);
}
diff --git a/fs/coda/dir.c b/fs/coda/dir.c
index 7ff025966e4..86c893884eb 100644
--- a/fs/coda/dir.c
+++ b/fs/coda/dir.c
@@ -426,7 +426,6 @@ static int coda_venus_readdir(struct file *coda_file, struct dir_context *ctx)
struct coda_file_info *cfi;
struct coda_inode_info *cii;
struct file *host_file;
- struct dentry *de;
struct venus_dirent *vdir;
unsigned long vdir_size = offsetof(struct venus_dirent, d_name);
unsigned int type;
@@ -438,8 +437,7 @@ static int coda_venus_readdir(struct file *coda_file, struct dir_context *ctx)
BUG_ON(!cfi || cfi->cfi_magic != CODA_MAGIC);
host_file = cfi->cfi_container;
- de = coda_file->f_path.dentry;
- cii = ITOC(de->d_inode);
+ cii = ITOC(file_inode(coda_file));
vdir = kmalloc(sizeof(*vdir), GFP_KERNEL);
if (!vdir) return -ENOMEM;
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
index c2d6604667b..719e1ce1c60 100644
--- a/fs/ecryptfs/crypto.c
+++ b/fs/ecryptfs/crypto.c
@@ -1917,7 +1917,6 @@ ecryptfs_decode_from_filename(unsigned char *dst, size_t *dst_size,
break;
case 2:
dst[dst_byte_offset++] |= (src_byte);
- dst[dst_byte_offset] = 0;
current_bit_offset = 0;
break;
}
diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
index 80154ec4f8c..6f4e659f508 100644
--- a/fs/ecryptfs/file.c
+++ b/fs/ecryptfs/file.c
@@ -190,23 +190,11 @@ static int ecryptfs_open(struct inode *inode, struct file *file)
{
int rc = 0;
struct ecryptfs_crypt_stat *crypt_stat = NULL;
- struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
struct dentry *ecryptfs_dentry = file->f_path.dentry;
/* Private value of ecryptfs_dentry allocated in
* ecryptfs_lookup() */
struct ecryptfs_file_info *file_info;
- mount_crypt_stat = &ecryptfs_superblock_to_private(
- ecryptfs_dentry->d_sb)->mount_crypt_stat;
- if ((mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED)
- && ((file->f_flags & O_WRONLY) || (file->f_flags & O_RDWR)
- || (file->f_flags & O_CREAT) || (file->f_flags & O_TRUNC)
- || (file->f_flags & O_APPEND))) {
- printk(KERN_WARNING "Mount has encrypted view enabled; "
- "files may only be read\n");
- rc = -EPERM;
- goto out;
- }
/* Released in ecryptfs_release or end of function if failure */
file_info = kmem_cache_zalloc(ecryptfs_file_info_cache, GFP_KERNEL);
ecryptfs_set_file_private(file, file_info);
diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c
index 635e8e16a5b..917bd5c9776 100644
--- a/fs/ecryptfs/keystore.c
+++ b/fs/ecryptfs/keystore.c
@@ -100,12 +100,12 @@ int ecryptfs_parse_packet_length(unsigned char *data, size_t *size,
(*size) = 0;
if (data[0] < 192) {
/* One-byte length */
- (*size) = (unsigned char)data[0];
+ (*size) = data[0];
(*length_size) = 1;
} else if (data[0] < 224) {
/* Two-byte length */
- (*size) = (((unsigned char)(data[0]) - 192) * 256);
- (*size) += ((unsigned char)(data[1]) + 192);
+ (*size) = (data[0] - 192) * 256;
+ (*size) += data[1] + 192;
(*length_size) = 2;
} else if (data[0] == 255) {
/* If support is added, adjust ECRYPTFS_MAX_PKT_LEN_SIZE */
diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c
index c4cd1fd86cc..d9eb84bda55 100644
--- a/fs/ecryptfs/main.c
+++ b/fs/ecryptfs/main.c
@@ -493,6 +493,7 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags
{
struct super_block *s;
struct ecryptfs_sb_info *sbi;
+ struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
struct ecryptfs_dentry_info *root_info;
const char *err = "Getting sb failed";
struct inode *inode;
@@ -511,6 +512,7 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags
err = "Error parsing options";
goto out;
}
+ mount_crypt_stat = &sbi->mount_crypt_stat;
s = sget(fs_type, NULL, set_anon_super, flags, NULL);
if (IS_ERR(s)) {
@@ -557,11 +559,19 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags
/**
* Set the POSIX ACL flag based on whether they're enabled in the lower
- * mount. Force a read-only eCryptfs mount if the lower mount is ro.
- * Allow a ro eCryptfs mount even when the lower mount is rw.
+ * mount.
*/
s->s_flags = flags & ~MS_POSIXACL;
- s->s_flags |= path.dentry->d_sb->s_flags & (MS_RDONLY | MS_POSIXACL);
+ s->s_flags |= path.dentry->d_sb->s_flags & MS_POSIXACL;
+
+ /**
+ * Force a read-only eCryptfs mount when:
+ * 1) The lower mount is ro
+ * 2) The ecryptfs_encrypted_view mount option is specified
+ */
+ if (path.dentry->d_sb->s_flags & MS_RDONLY ||
+ mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED)
+ s->s_flags |= MS_RDONLY;
s->s_maxbytes = path.dentry->d_sb->s_maxbytes;
s->s_blocksize = path.dentry->d_sb->s_blocksize;
diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
index 503ea15dc5d..370420bfae8 100644
--- a/fs/ext4/move_extent.c
+++ b/fs/ext4/move_extent.c
@@ -267,7 +267,6 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
handle_t *handle;
ext4_lblk_t orig_blk_offset, donor_blk_offset;
unsigned long blocksize = orig_inode->i_sb->s_blocksize;
- unsigned int w_flags = 0;
unsigned int tmp_data_size, data_size, replaced_size;
int err2, jblocks, retries = 0;
int replaced_count = 0;
@@ -288,9 +287,6 @@ again:
return 0;
}
- if (segment_eq(get_fs(), KERNEL_DS))
- w_flags |= AOP_FLAG_UNINTERRUPTIBLE;
-
orig_blk_offset = orig_page_offset * blocks_per_page +
data_offset_in_page;
diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
index 966ace8b243..28d0c7abba1 100644
--- a/fs/fuse/cuse.c
+++ b/fs/fuse/cuse.c
@@ -415,7 +415,7 @@ err_unlock:
err_region:
unregister_chrdev_region(devt, 1);
err:
- fuse_conn_kill(fc);
+ fuse_abort_conn(fc);
goto out;
}
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index ca887314aba..ba1107977f2 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -511,6 +511,35 @@ void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
}
EXPORT_SYMBOL_GPL(fuse_request_send);
+ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args)
+{
+ struct fuse_req *req;
+ ssize_t ret;
+
+ req = fuse_get_req(fc, 0);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+
+ req->in.h.opcode = args->in.h.opcode;
+ req->in.h.nodeid = args->in.h.nodeid;
+ req->in.numargs = args->in.numargs;
+ memcpy(req->in.args, args->in.args,
+ args->in.numargs * sizeof(struct fuse_in_arg));
+ req->out.argvar = args->out.argvar;
+ req->out.numargs = args->out.numargs;
+ memcpy(req->out.args, args->out.args,
+ args->out.numargs * sizeof(struct fuse_arg));
+ fuse_request_send(fc, req);
+ ret = req->out.h.error;
+ if (!ret && args->out.argvar) {
+ BUG_ON(args->out.numargs != 1);
+ ret = req->out.args[0].size;
+ }
+ fuse_put_request(fc, req);
+
+ return ret;
+}
+
static void fuse_request_send_nowait_locked(struct fuse_conn *fc,
struct fuse_req *req)
{
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index df562cc8776..252b8a5de8b 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -145,22 +145,22 @@ static void fuse_invalidate_entry(struct dentry *entry)
fuse_invalidate_entry_cache(entry);
}
-static void fuse_lookup_init(struct fuse_conn *fc, struct fuse_req *req,
+static void fuse_lookup_init(struct fuse_conn *fc, struct fuse_args *args,
u64 nodeid, struct qstr *name,
struct fuse_entry_out *outarg)
{
memset(outarg, 0, sizeof(struct fuse_entry_out));
- req->in.h.opcode = FUSE_LOOKUP;
- req->in.h.nodeid = nodeid;
- req->in.numargs = 1;
- req->in.args[0].size = name->len + 1;
- req->in.args[0].value = name->name;
- req->out.numargs = 1;
+ args->in.h.opcode = FUSE_LOOKUP;
+ args->in.h.nodeid = nodeid;
+ args->in.numargs = 1;
+ args->in.args[0].size = name->len + 1;
+ args->in.args[0].value = name->name;
+ args->out.numargs = 1;
if (fc->minor < 9)
- req->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
+ args->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
else
- req->out.args[0].size = sizeof(struct fuse_entry_out);
- req->out.args[0].value = outarg;
+ args->out.args[0].size = sizeof(struct fuse_entry_out);
+ args->out.args[0].value = outarg;
}
u64 fuse_get_attr_version(struct fuse_conn *fc)
@@ -200,9 +200,8 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
goto invalid;
else if (time_before64(fuse_dentry_time(entry), get_jiffies_64()) ||
(flags & LOOKUP_REVAL)) {
- int err;
struct fuse_entry_out outarg;
- struct fuse_req *req;
+ FUSE_ARGS(args);
struct fuse_forget_link *forget;
u64 attr_version;
@@ -215,31 +214,23 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
goto out;
fc = get_fuse_conn(inode);
- req = fuse_get_req_nopages(fc);
- ret = PTR_ERR(req);
- if (IS_ERR(req))
- goto out;
forget = fuse_alloc_forget();
- if (!forget) {
- fuse_put_request(fc, req);
- ret = -ENOMEM;
+ ret = -ENOMEM;
+ if (!forget)
goto out;
- }
attr_version = fuse_get_attr_version(fc);
parent = dget_parent(entry);
- fuse_lookup_init(fc, req, get_node_id(parent->d_inode),
+ fuse_lookup_init(fc, &args, get_node_id(parent->d_inode),
&entry->d_name, &outarg);
- fuse_request_send(fc, req);
+ ret = fuse_simple_request(fc, &args);
dput(parent);
- err = req->out.h.error;
- fuse_put_request(fc, req);
/* Zero nodeid is same as -ENOENT */
- if (!err && !outarg.nodeid)
- err = -ENOENT;
- if (!err) {
+ if (!ret && !outarg.nodeid)
+ ret = -ENOENT;
+ if (!ret) {
fi = get_fuse_inode(inode);
if (outarg.nodeid != get_node_id(inode)) {
fuse_queue_forget(fc, forget, outarg.nodeid, 1);
@@ -250,7 +241,9 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
spin_unlock(&fc->lock);
}
kfree(forget);
- if (err || (outarg.attr.mode ^ inode->i_mode) & S_IFMT)
+ if (ret == -ENOMEM)
+ goto out;
+ if (ret || (outarg.attr.mode ^ inode->i_mode) & S_IFMT)
goto invalid;
fuse_change_attributes(inode, &outarg.attr,
@@ -296,7 +289,7 @@ int fuse_lookup_name(struct super_block *sb, u64 nodeid, struct qstr *name,
struct fuse_entry_out *outarg, struct inode **inode)
{
struct fuse_conn *fc = get_fuse_conn_super(sb);
- struct fuse_req *req;
+ FUSE_ARGS(args);
struct fuse_forget_link *forget;
u64 attr_version;
int err;
@@ -306,24 +299,16 @@ int fuse_lookup_name(struct super_block *sb, u64 nodeid, struct qstr *name,
if (name->len > FUSE_NAME_MAX)
goto out;
- req = fuse_get_req_nopages(fc);
- err = PTR_ERR(req);
- if (IS_ERR(req))
- goto out;
forget = fuse_alloc_forget();
err = -ENOMEM;
- if (!forget) {
- fuse_put_request(fc, req);
+ if (!forget)
goto out;
- }
attr_version = fuse_get_attr_version(fc);
- fuse_lookup_init(fc, req, nodeid, name, outarg);
- fuse_request_send(fc, req);
- err = req->out.h.error;
- fuse_put_request(fc, req);
+ fuse_lookup_init(fc, &args, nodeid, name, outarg);
+ err = fuse_simple_request(fc, &args);
/* Zero nodeid is same as -ENOENT, but with valid timeout */
if (err || !outarg->nodeid)
goto out_put_forget;
@@ -405,7 +390,7 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry,
int err;
struct inode *inode;
struct fuse_conn *fc = get_fuse_conn(dir);
- struct fuse_req *req;
+ FUSE_ARGS(args);
struct fuse_forget_link *forget;
struct fuse_create_in inarg;
struct fuse_open_out outopen;
@@ -420,15 +405,10 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry,
if (!forget)
goto out_err;
- req = fuse_get_req_nopages(fc);
- err = PTR_ERR(req);
- if (IS_ERR(req))
- goto out_put_forget_req;
-
err = -ENOMEM;
ff = fuse_file_alloc(fc);
if (!ff)
- goto out_put_request;
+ goto out_put_forget_req;
if (!fc->dont_mask)
mode &= ~current_umask();
@@ -439,24 +419,23 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry,
inarg.flags = flags;
inarg.mode = mode;
inarg.umask = current_umask();
- req->in.h.opcode = FUSE_CREATE;
- req->in.h.nodeid = get_node_id(dir);
- req->in.numargs = 2;
- req->in.args[0].size = fc->minor < 12 ? sizeof(struct fuse_open_in) :
+ args.in.h.opcode = FUSE_CREATE;
+ args.in.h.nodeid = get_node_id(dir);
+ args.in.numargs = 2;
+ args.in.args[0].size = fc->minor < 12 ? sizeof(struct fuse_open_in) :
sizeof(inarg);
- req->in.args[0].value = &inarg;
- req->in.args[1].size = entry->d_name.len + 1;
- req->in.args[1].value = entry->d_name.name;
- req->out.numargs = 2;
+ args.in.args[0].value = &inarg;
+ args.in.args[1].size = entry->d_name.len + 1;
+ args.in.args[1].value = entry->d_name.name;
+ args.out.numargs = 2;
if (fc->minor < 9)
- req->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
+ args.out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
else
- req->out.args[0].size = sizeof(outentry);
- req->out.args[0].value = &outentry;
- req->out.args[1].size = sizeof(outopen);
- req->out.args[1].value = &outopen;
- fuse_request_send(fc, req);
- err = req->out.h.error;
+ args.out.args[0].size = sizeof(outentry);
+ args.out.args[0].value = &outentry;
+ args.out.args[1].size = sizeof(outopen);
+ args.out.args[1].value = &outopen;
+ err = fuse_simple_request(fc, &args);
if (err)
goto out_free_ff;
@@ -464,7 +443,6 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry,
if (!S_ISREG(outentry.attr.mode) || invalid_nodeid(outentry.nodeid))
goto out_free_ff;
- fuse_put_request(fc, req);
ff->fh = outopen.fh;
ff->nodeid = outentry.nodeid;
ff->open_flags = outopen.open_flags;
@@ -492,8 +470,6 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry,
out_free_ff:
fuse_file_free(ff);
-out_put_request:
- fuse_put_request(fc, req);
out_put_forget_req:
kfree(forget);
out_err:
@@ -547,7 +523,7 @@ no_open:
/*
* Code shared between mknod, mkdir, symlink and link
*/
-static int create_new_entry(struct fuse_conn *fc, struct fuse_req *req,
+static int create_new_entry(struct fuse_conn *fc, struct fuse_args *args,
struct inode *dir, struct dentry *entry,
umode_t mode)
{
@@ -557,22 +533,18 @@ static int create_new_entry(struct fuse_conn *fc, struct fuse_req *req,
struct fuse_forget_link *forget;
forget = fuse_alloc_forget();
- if (!forget) {
- fuse_put_request(fc, req);
+ if (!forget)
return -ENOMEM;
- }
memset(&outarg, 0, sizeof(outarg));
- req->in.h.nodeid = get_node_id(dir);
- req->out.numargs = 1;
+ args->in.h.nodeid = get_node_id(dir);
+ args->out.numargs = 1;
if (fc->minor < 9)
- req->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
+ args->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
else
- req->out.args[0].size = sizeof(outarg);
- req->out.args[0].value = &outarg;
- fuse_request_send(fc, req);
- err = req->out.h.error;
- fuse_put_request(fc, req);
+ args->out.args[0].size = sizeof(outarg);
+ args->out.args[0].value = &outarg;
+ err = fuse_simple_request(fc, args);
if (err)
goto out_put_forget_req;
@@ -609,9 +581,7 @@ static int fuse_mknod(struct inode *dir, struct dentry *entry, umode_t mode,
{
struct fuse_mknod_in inarg;
struct fuse_conn *fc = get_fuse_conn(dir);
- struct fuse_req *req = fuse_get_req_nopages(fc);
- if (IS_ERR(req))
- return PTR_ERR(req);
+ FUSE_ARGS(args);
if (!fc->dont_mask)
mode &= ~current_umask();
@@ -620,14 +590,14 @@ static int fuse_mknod(struct inode *dir, struct dentry *entry, umode_t mode,
inarg.mode = mode;
inarg.rdev = new_encode_dev(rdev);
inarg.umask = current_umask();
- req->in.h.opcode = FUSE_MKNOD;
- req->in.numargs = 2;
- req->in.args[0].size = fc->minor < 12 ? FUSE_COMPAT_MKNOD_IN_SIZE :
+ args.in.h.opcode = FUSE_MKNOD;
+ args.in.numargs = 2;
+ args.in.args[0].size = fc->minor < 12 ? FUSE_COMPAT_MKNOD_IN_SIZE :
sizeof(inarg);
- req->in.args[0].value = &inarg;
- req->in.args[1].size = entry->d_name.len + 1;
- req->in.args[1].value = entry->d_name.name;
- return create_new_entry(fc, req, dir, entry, mode);
+ args.in.args[0].value = &inarg;
+ args.in.args[1].size = entry->d_name.len + 1;
+ args.in.args[1].value = entry->d_name.name;
+ return create_new_entry(fc, &args, dir, entry, mode);
}
static int fuse_create(struct inode *dir, struct dentry *entry, umode_t mode,
@@ -640,9 +610,7 @@ static int fuse_mkdir(struct inode *dir, struct dentry *entry, umode_t mode)
{
struct fuse_mkdir_in inarg;
struct fuse_conn *fc = get_fuse_conn(dir);
- struct fuse_req *req = fuse_get_req_nopages(fc);
- if (IS_ERR(req))
- return PTR_ERR(req);
+ FUSE_ARGS(args);
if (!fc->dont_mask)
mode &= ~current_umask();
@@ -650,13 +618,13 @@ static int fuse_mkdir(struct inode *dir, struct dentry *entry, umode_t mode)
memset(&inarg, 0, sizeof(inarg));
inarg.mode = mode;
inarg.umask = current_umask();
- req->in.h.opcode = FUSE_MKDIR;
- req->in.numargs = 2;
- req->in.args[0].size = sizeof(inarg);
- req->in.args[0].value = &inarg;
- req->in.args[1].size = entry->d_name.len + 1;
- req->in.args[1].value = entry->d_name.name;
- return create_new_entry(fc, req, dir, entry, S_IFDIR);
+ args.in.h.opcode = FUSE_MKDIR;
+ args.in.numargs = 2;
+ args.in.args[0].size = sizeof(inarg);
+ args.in.args[0].value = &inarg;
+ args.in.args[1].size = entry->d_name.len + 1;
+ args.in.args[1].value = entry->d_name.name;
+ return create_new_entry(fc, &args, dir, entry, S_IFDIR);
}
static int fuse_symlink(struct inode *dir, struct dentry *entry,
@@ -664,17 +632,15 @@ static int fuse_symlink(struct inode *dir, struct dentry *entry,
{
struct fuse_conn *fc = get_fuse_conn(dir);
unsigned len = strlen(link) + 1;
- struct fuse_req *req = fuse_get_req_nopages(fc);
- if (IS_ERR(req))
- return PTR_ERR(req);
+ FUSE_ARGS(args);
- req->in.h.opcode = FUSE_SYMLINK;
- req->in.numargs = 2;
- req->in.args[0].size = entry->d_name.len + 1;
- req->in.args[0].value = entry->d_name.name;
- req->in.args[1].size = len;
- req->in.args[1].value = link;
- return create_new_entry(fc, req, dir, entry, S_IFLNK);
+ args.in.h.opcode = FUSE_SYMLINK;
+ args.in.numargs = 2;
+ args.in.args[0].size = entry->d_name.len + 1;
+ args.in.args[0].value = entry->d_name.name;
+ args.in.args[1].size = len;
+ args.in.args[1].value = link;
+ return create_new_entry(fc, &args, dir, entry, S_IFLNK);
}
static inline void fuse_update_ctime(struct inode *inode)
@@ -689,18 +655,14 @@ static int fuse_unlink(struct inode *dir, struct dentry *entry)
{
int err;
struct fuse_conn *fc = get_fuse_conn(dir);
- struct fuse_req *req = fuse_get_req_nopages(fc);
- if (IS_ERR(req))
- return PTR_ERR(req);
-
- req->in.h.opcode = FUSE_UNLINK;
- req->in.h.nodeid = get_node_id(dir);
- req->in.numargs = 1;
- req->in.args[0].size = entry->d_name.len + 1;
- req->in.args[0].value = entry->d_name.name;
- fuse_request_send(fc, req);
- err = req->out.h.error;
- fuse_put_request(fc, req);
+ FUSE_ARGS(args);
+
+ args.in.h.opcode = FUSE_UNLINK;
+ args.in.h.nodeid = get_node_id(dir);
+ args.in.numargs = 1;
+ args.in.args[0].size = entry->d_name.len + 1;
+ args.in.args[0].value = entry->d_name.name;
+ err = fuse_simple_request(fc, &args);
if (!err) {
struct inode *inode = entry->d_inode;
struct fuse_inode *fi = get_fuse_inode(inode);
@@ -729,18 +691,14 @@ static int fuse_rmdir(struct inode *dir, struct dentry *entry)
{
int err;
struct fuse_conn *fc = get_fuse_conn(dir);
- struct fuse_req *req = fuse_get_req_nopages(fc);
- if (IS_ERR(req))
- return PTR_ERR(req);
-
- req->in.h.opcode = FUSE_RMDIR;
- req->in.h.nodeid = get_node_id(dir);
- req->in.numargs = 1;
- req->in.args[0].size = entry->d_name.len + 1;
- req->in.args[0].value = entry->d_name.name;
- fuse_request_send(fc, req);
- err = req->out.h.error;
- fuse_put_request(fc, req);
+ FUSE_ARGS(args);
+
+ args.in.h.opcode = FUSE_RMDIR;
+ args.in.h.nodeid = get_node_id(dir);
+ args.in.numargs = 1;
+ args.in.args[0].size = entry->d_name.len + 1;
+ args.in.args[0].value = entry->d_name.name;
+ err = fuse_simple_request(fc, &args);
if (!err) {
clear_nlink(entry->d_inode);
fuse_invalidate_attr(dir);
@@ -757,27 +715,21 @@ static int fuse_rename_common(struct inode *olddir, struct dentry *oldent,
int err;
struct fuse_rename2_in inarg;
struct fuse_conn *fc = get_fuse_conn(olddir);
- struct fuse_req *req;
-
- req = fuse_get_req_nopages(fc);
- if (IS_ERR(req))
- return PTR_ERR(req);
+ FUSE_ARGS(args);
memset(&inarg, 0, argsize);
inarg.newdir = get_node_id(newdir);
inarg.flags = flags;
- req->in.h.opcode = opcode;
- req->in.h.nodeid = get_node_id(olddir);
- req->in.numargs = 3;
- req->in.args[0].size = argsize;
- req->in.args[0].value = &inarg;
- req->in.args[1].size = oldent->d_name.len + 1;
- req->in.args[1].value = oldent->d_name.name;
- req->in.args[2].size = newent->d_name.len + 1;
- req->in.args[2].value = newent->d_name.name;
- fuse_request_send(fc, req);
- err = req->out.h.error;
- fuse_put_request(fc, req);
+ args.in.h.opcode = opcode;
+ args.in.h.nodeid = get_node_id(olddir);
+ args.in.numargs = 3;
+ args.in.args[0].size = argsize;
+ args.in.args[0].value = &inarg;
+ args.in.args[1].size = oldent->d_name.len + 1;
+ args.in.args[1].value = oldent->d_name.name;
+ args.in.args[2].size = newent->d_name.len + 1;
+ args.in.args[2].value = newent->d_name.name;
+ err = fuse_simple_request(fc, &args);
if (!err) {
/* ctime changes */
fuse_invalidate_attr(oldent->d_inode);
@@ -849,19 +801,17 @@ static int fuse_link(struct dentry *entry, struct inode *newdir,
struct fuse_link_in inarg;
struct inode *inode = entry->d_inode;
struct fuse_conn *fc = get_fuse_conn(inode);
- struct fuse_req *req = fuse_get_req_nopages(fc);
- if (IS_ERR(req))
- return PTR_ERR(req);
+ FUSE_ARGS(args);
memset(&inarg, 0, sizeof(inarg));
inarg.oldnodeid = get_node_id(inode);
- req->in.h.opcode = FUSE_LINK;
- req->in.numargs = 2;
- req->in.args[0].size = sizeof(inarg);
- req->in.args[0].value = &inarg;
- req->in.args[1].size = newent->d_name.len + 1;
- req->in.args[1].value = newent->d_name.name;
- err = create_new_entry(fc, req, newdir, newent, inode->i_mode);
+ args.in.h.opcode = FUSE_LINK;
+ args.in.numargs = 2;
+ args.in.args[0].size = sizeof(inarg);
+ args.in.args[0].value = &inarg;
+ args.in.args[1].size = newent->d_name.len + 1;
+ args.in.args[1].value = newent->d_name.name;
+ err = create_new_entry(fc, &args, newdir, newent, inode->i_mode);
/* Contrary to "normal" filesystems it can happen that link
makes two "logical" inodes point to the same "physical"
inode. We invalidate the attributes of the old one, so it
@@ -929,13 +879,9 @@ static int fuse_do_getattr(struct inode *inode, struct kstat *stat,
struct fuse_getattr_in inarg;
struct fuse_attr_out outarg;
struct fuse_conn *fc = get_fuse_conn(inode);
- struct fuse_req *req;
+ FUSE_ARGS(args);
u64 attr_version;
- req = fuse_get_req_nopages(fc);
- if (IS_ERR(req))
- return PTR_ERR(req);
-
attr_version = fuse_get_attr_version(fc);
memset(&inarg, 0, sizeof(inarg));
@@ -947,20 +893,18 @@ static int fuse_do_getattr(struct inode *inode, struct kstat *stat,
inarg.getattr_flags |= FUSE_GETATTR_FH;
inarg.fh = ff->fh;
}
- req->in.h.opcode = FUSE_GETATTR;
- req->in.h.nodeid = get_node_id(inode);
- req->in.numargs = 1;
- req->in.args[0].size = sizeof(inarg);
- req->in.args[0].value = &inarg;
- req->out.numargs = 1;
+ args.in.h.opcode = FUSE_GETATTR;
+ args.in.h.nodeid = get_node_id(inode);
+ args.in.numargs = 1;
+ args.in.args[0].size = sizeof(inarg);
+ args.in.args[0].value = &inarg;
+ args.out.numargs = 1;
if (fc->minor < 9)
- req->out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE;
+ args.out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE;
else
- req->out.args[0].size = sizeof(outarg);
- req->out.args[0].value = &outarg;
- fuse_request_send(fc, req);
- err = req->out.h.error;
- fuse_put_request(fc, req);
+ args.out.args[0].size = sizeof(outarg);
+ args.out.args[0].value = &outarg;
+ err = fuse_simple_request(fc, &args);
if (!err) {
if ((inode->i_mode ^ outarg.attr.mode) & S_IFMT) {
make_bad_inode(inode);
@@ -1102,7 +1046,7 @@ int fuse_allow_current_process(struct fuse_conn *fc)
static int fuse_access(struct inode *inode, int mask)
{
struct fuse_conn *fc = get_fuse_conn(inode);
- struct fuse_req *req;
+ FUSE_ARGS(args);
struct fuse_access_in inarg;
int err;
@@ -1111,20 +1055,14 @@ static int fuse_access(struct inode *inode, int mask)
if (fc->no_access)
return 0;
- req = fuse_get_req_nopages(fc);
- if (IS_ERR(req))
- return PTR_ERR(req);
-
memset(&inarg, 0, sizeof(inarg));
inarg.mask = mask & (MAY_READ | MAY_WRITE | MAY_EXEC);
- req->in.h.opcode = FUSE_ACCESS;
- req->in.h.nodeid = get_node_id(inode);
- req->in.numargs = 1;
- req->in.args[0].size = sizeof(inarg);
- req->in.args[0].value = &inarg;
- fuse_request_send(fc, req);
- err = req->out.h.error;
- fuse_put_request(fc, req);
+ args.in.h.opcode = FUSE_ACCESS;
+ args.in.h.nodeid = get_node_id(inode);
+ args.in.numargs = 1;
+ args.in.args[0].size = sizeof(inarg);
+ args.in.args[0].value = &inarg;
+ err = fuse_simple_request(fc, &args);
if (err == -ENOSYS) {
fc->no_access = 1;
err = 0;
@@ -1445,31 +1383,27 @@ static char *read_link(struct dentry *dentry)
{
struct inode *inode = dentry->d_inode;
struct fuse_conn *fc = get_fuse_conn(inode);
- struct fuse_req *req = fuse_get_req_nopages(fc);
+ FUSE_ARGS(args);
char *link;
-
- if (IS_ERR(req))
- return ERR_CAST(req);
+ ssize_t ret;
link = (char *) __get_free_page(GFP_KERNEL);
- if (!link) {
- link = ERR_PTR(-ENOMEM);
- goto out;
- }
- req->in.h.opcode = FUSE_READLINK;
- req->in.h.nodeid = get_node_id(inode);
- req->out.argvar = 1;
- req->out.numargs = 1;
- req->out.args[0].size = PAGE_SIZE - 1;
- req->out.args[0].value = link;
- fuse_request_send(fc, req);
- if (req->out.h.error) {
+ if (!link)
+ return ERR_PTR(-ENOMEM);
+
+ args.in.h.opcode = FUSE_READLINK;
+ args.in.h.nodeid = get_node_id(inode);
+ args.out.argvar = 1;
+ args.out.numargs = 1;
+ args.out.args[0].size = PAGE_SIZE - 1;
+ args.out.args[0].value = link;
+ ret = fuse_simple_request(fc, &args);
+ if (ret < 0) {
free_page((unsigned long) link);
- link = ERR_PTR(req->out.h.error);
- } else
- link[req->out.args[0].size] = '\0';
- out:
- fuse_put_request(fc, req);
+ link = ERR_PTR(ret);
+ } else {
+ link[ret] = '\0';
+ }
fuse_invalidate_atime(inode);
return link;
}
@@ -1629,22 +1563,22 @@ void fuse_release_nowrite(struct inode *inode)
spin_unlock(&fc->lock);
}
-static void fuse_setattr_fill(struct fuse_conn *fc, struct fuse_req *req,
+static void fuse_setattr_fill(struct fuse_conn *fc, struct fuse_args *args,
struct inode *inode,
struct fuse_setattr_in *inarg_p,
struct fuse_attr_out *outarg_p)
{
- req->in.h.opcode = FUSE_SETATTR;
- req->in.h.nodeid = get_node_id(inode);
- req->in.numargs = 1;
- req->in.args[0].size = sizeof(*inarg_p);
- req->in.args[0].value = inarg_p;
- req->out.numargs = 1;
+ args->in.h.opcode = FUSE_SETATTR;
+ args->in.h.nodeid = get_node_id(inode);
+ args->in.numargs = 1;
+ args->in.args[0].size = sizeof(*inarg_p);
+ args->in.args[0].value = inarg_p;
+ args->out.numargs = 1;
if (fc->minor < 9)
- req->out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE;
+ args->out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE;
else
- req->out.args[0].size = sizeof(*outarg_p);
- req->out.args[0].value = outarg_p;
+ args->out.args[0].size = sizeof(*outarg_p);
+ args->out.args[0].value = outarg_p;
}
/*
@@ -1653,14 +1587,9 @@ static void fuse_setattr_fill(struct fuse_conn *fc, struct fuse_req *req,
int fuse_flush_times(struct inode *inode, struct fuse_file *ff)
{
struct fuse_conn *fc = get_fuse_conn(inode);
- struct fuse_req *req;
+ FUSE_ARGS(args);
struct fuse_setattr_in inarg;
struct fuse_attr_out outarg;
- int err;
-
- req = fuse_get_req_nopages(fc);
- if (IS_ERR(req))
- return PTR_ERR(req);
memset(&inarg, 0, sizeof(inarg));
memset(&outarg, 0, sizeof(outarg));
@@ -1677,12 +1606,9 @@ int fuse_flush_times(struct inode *inode, struct fuse_file *ff)
inarg.valid |= FATTR_FH;
inarg.fh = ff->fh;
}
- fuse_setattr_fill(fc, req, inode, &inarg, &outarg);
- fuse_request_send(fc, req);
- err = req->out.h.error;
- fuse_put_request(fc, req);
+ fuse_setattr_fill(fc, &args, inode, &inarg, &outarg);
- return err;
+ return fuse_simple_request(fc, &args);
}
/*
@@ -1698,7 +1624,7 @@ int fuse_do_setattr(struct inode *inode, struct iattr *attr,
{
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_inode *fi = get_fuse_inode(inode);
- struct fuse_req *req;
+ FUSE_ARGS(args);
struct fuse_setattr_in inarg;
struct fuse_attr_out outarg;
bool is_truncate = false;
@@ -1723,10 +1649,6 @@ int fuse_do_setattr(struct inode *inode, struct iattr *attr,
if (attr->ia_valid & ATTR_SIZE)
is_truncate = true;
- req = fuse_get_req_nopages(fc);
- if (IS_ERR(req))
- return PTR_ERR(req);
-
if (is_truncate) {
fuse_set_nowrite(inode);
set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
@@ -1747,10 +1669,8 @@ int fuse_do_setattr(struct inode *inode, struct iattr *attr,
inarg.valid |= FATTR_LOCKOWNER;
inarg.lock_owner = fuse_lock_owner_id(fc, current->files);
}
- fuse_setattr_fill(fc, req, inode, &inarg, &outarg);
- fuse_request_send(fc, req);
- err = req->out.h.error;
- fuse_put_request(fc, req);
+ fuse_setattr_fill(fc, &args, inode, &inarg, &outarg);
+ err = fuse_simple_request(fc, &args);
if (err) {
if (err == -EINTR)
fuse_invalidate_attr(inode);
@@ -1837,32 +1757,26 @@ static int fuse_setxattr(struct dentry *entry, const char *name,
{
struct inode *inode = entry->d_inode;
struct fuse_conn *fc = get_fuse_conn(inode);
- struct fuse_req *req;
+ FUSE_ARGS(args);
struct fuse_setxattr_in inarg;
int err;
if (fc->no_setxattr)
return -EOPNOTSUPP;
- req = fuse_get_req_nopages(fc);
- if (IS_ERR(req))
- return PTR_ERR(req);
-
memset(&inarg, 0, sizeof(inarg));
inarg.size = size;
inarg.flags = flags;
- req->in.h.opcode = FUSE_SETXATTR;
- req->in.h.nodeid = get_node_id(inode);
- req->in.numargs = 3;
- req->in.args[0].size = sizeof(inarg);
- req->in.args[0].value = &inarg;
- req->in.args[1].size = strlen(name) + 1;
- req->in.args[1].value = name;
- req->in.args[2].size = size;
- req->in.args[2].value = value;
- fuse_request_send(fc, req);
- err = req->out.h.error;
- fuse_put_request(fc, req);
+ args.in.h.opcode = FUSE_SETXATTR;
+ args.in.h.nodeid = get_node_id(inode);
+ args.in.numargs = 3;
+ args.in.args[0].size = sizeof(inarg);
+ args.in.args[0].value = &inarg;
+ args.in.args[1].size = strlen(name) + 1;
+ args.in.args[1].value = name;
+ args.in.args[2].size = size;
+ args.in.args[2].value = value;
+ err = fuse_simple_request(fc, &args);
if (err == -ENOSYS) {
fc->no_setxattr = 1;
err = -EOPNOTSUPP;
@@ -1879,7 +1793,7 @@ static ssize_t fuse_getxattr(struct dentry *entry, const char *name,
{
struct inode *inode = entry->d_inode;
struct fuse_conn *fc = get_fuse_conn(inode);
- struct fuse_req *req;
+ FUSE_ARGS(args);
struct fuse_getxattr_in inarg;
struct fuse_getxattr_out outarg;
ssize_t ret;
@@ -1887,40 +1801,32 @@ static ssize_t fuse_getxattr(struct dentry *entry, const char *name,
if (fc->no_getxattr)
return -EOPNOTSUPP;
- req = fuse_get_req_nopages(fc);
- if (IS_ERR(req))
- return PTR_ERR(req);
-
memset(&inarg, 0, sizeof(inarg));
inarg.size = size;
- req->in.h.opcode = FUSE_GETXATTR;
- req->in.h.nodeid = get_node_id(inode);
- req->in.numargs = 2;
- req->in.args[0].size = sizeof(inarg);
- req->in.args[0].value = &inarg;
- req->in.args[1].size = strlen(name) + 1;
- req->in.args[1].value = name;
+ args.in.h.opcode = FUSE_GETXATTR;
+ args.in.h.nodeid = get_node_id(inode);
+ args.in.numargs = 2;
+ args.in.args[0].size = sizeof(inarg);
+ args.in.args[0].value = &inarg;
+ args.in.args[1].size = strlen(name) + 1;
+ args.in.args[1].value = name;
/* This is really two different operations rolled into one */
- req->out.numargs = 1;
+ args.out.numargs = 1;
if (size) {
- req->out.argvar = 1;
- req->out.args[0].size = size;
- req->out.args[0].value = value;
+ args.out.argvar = 1;
+ args.out.args[0].size = size;
+ args.out.args[0].value = value;
} else {
- req->out.args[0].size = sizeof(outarg);
- req->out.args[0].value = &outarg;
+ args.out.args[0].size = sizeof(outarg);
+ args.out.args[0].value = &outarg;
}
- fuse_request_send(fc, req);
- ret = req->out.h.error;
- if (!ret)
- ret = size ? req->out.args[0].size : outarg.size;
- else {
- if (ret == -ENOSYS) {
- fc->no_getxattr = 1;
- ret = -EOPNOTSUPP;
- }
+ ret = fuse_simple_request(fc, &args);
+ if (!ret && !size)
+ ret = outarg.size;
+ if (ret == -ENOSYS) {
+ fc->no_getxattr = 1;
+ ret = -EOPNOTSUPP;
}
- fuse_put_request(fc, req);
return ret;
}
@@ -1928,7 +1834,7 @@ static ssize_t fuse_listxattr(struct dentry *entry, char *list, size_t size)
{
struct inode *inode = entry->d_inode;
struct fuse_conn *fc = get_fuse_conn(inode);
- struct fuse_req *req;
+ FUSE_ARGS(args);
struct fuse_getxattr_in inarg;
struct fuse_getxattr_out outarg;
ssize_t ret;
@@ -1939,38 +1845,30 @@ static ssize_t fuse_listxattr(struct dentry *entry, char *list, size_t size)
if (fc->no_listxattr)
return -EOPNOTSUPP;
- req = fuse_get_req_nopages(fc);
- if (IS_ERR(req))
- return PTR_ERR(req);
-
memset(&inarg, 0, sizeof(inarg));
inarg.size = size;
- req->in.h.opcode = FUSE_LISTXATTR;
- req->in.h.nodeid = get_node_id(inode);
- req->in.numargs = 1;
- req->in.args[0].size = sizeof(inarg);
- req->in.args[0].value = &inarg;
+ args.in.h.opcode = FUSE_LISTXATTR;
+ args.in.h.nodeid = get_node_id(inode);
+ args.in.numargs = 1;
+ args.in.args[0].size = sizeof(inarg);
+ args.in.args[0].value = &inarg;
/* This is really two different operations rolled into one */
- req->out.numargs = 1;
+ args.out.numargs = 1;
if (size) {
- req->out.argvar = 1;
- req->out.args[0].size = size;
- req->out.args[0].value = list;
+ args.out.argvar = 1;
+ args.out.args[0].size = size;
+ args.out.args[0].value = list;
} else {
- req->out.args[0].size = sizeof(outarg);
- req->out.args[0].value = &outarg;
+ args.out.args[0].size = sizeof(outarg);
+ args.out.args[0].value = &outarg;
}
- fuse_request_send(fc, req);
- ret = req->out.h.error;
- if (!ret)
- ret = size ? req->out.args[0].size : outarg.size;
- else {
- if (ret == -ENOSYS) {
- fc->no_listxattr = 1;
- ret = -EOPNOTSUPP;
- }
+ ret = fuse_simple_request(fc, &args);
+ if (!ret && !size)
+ ret = outarg.size;
+ if (ret == -ENOSYS) {
+ fc->no_listxattr = 1;
+ ret = -EOPNOTSUPP;
}
- fuse_put_request(fc, req);
return ret;
}
@@ -1978,24 +1876,18 @@ static int fuse_removexattr(struct dentry *entry, const char *name)
{
struct inode *inode = entry->d_inode;
struct fuse_conn *fc = get_fuse_conn(inode);
- struct fuse_req *req;
+ FUSE_ARGS(args);
int err;
if (fc->no_removexattr)
return -EOPNOTSUPP;
- req = fuse_get_req_nopages(fc);
- if (IS_ERR(req))
- return PTR_ERR(req);
-
- req->in.h.opcode = FUSE_REMOVEXATTR;
- req->in.h.nodeid = get_node_id(inode);
- req->in.numargs = 1;
- req->in.args[0].size = strlen(name) + 1;
- req->in.args[0].value = name;
- fuse_request_send(fc, req);
- err = req->out.h.error;
- fuse_put_request(fc, req);
+ args.in.h.opcode = FUSE_REMOVEXATTR;
+ args.in.h.nodeid = get_node_id(inode);
+ args.in.numargs = 1;
+ args.in.args[0].size = strlen(name) + 1;
+ args.in.args[0].value = name;
+ err = fuse_simple_request(fc, &args);
if (err == -ENOSYS) {
fc->no_removexattr = 1;
err = -EOPNOTSUPP;
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index bf50259012a..760b2c55219 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -24,30 +24,22 @@ static int fuse_send_open(struct fuse_conn *fc, u64 nodeid, struct file *file,
int opcode, struct fuse_open_out *outargp)
{
struct fuse_open_in inarg;
- struct fuse_req *req;
- int err;
-
- req = fuse_get_req_nopages(fc);
- if (IS_ERR(req))
- return PTR_ERR(req);
+ FUSE_ARGS(args);
memset(&inarg, 0, sizeof(inarg));
inarg.flags = file->f_flags & ~(O_CREAT | O_EXCL | O_NOCTTY);
if (!fc->atomic_o_trunc)
inarg.flags &= ~O_TRUNC;
- req->in.h.opcode = opcode;
- req->in.h.nodeid = nodeid;
- req->in.numargs = 1;
- req->in.args[0].size = sizeof(inarg);
- req->in.args[0].value = &inarg;
- req->out.numargs = 1;
- req->out.args[0].size = sizeof(*outargp);
- req->out.args[0].value = outargp;
- fuse_request_send(fc, req);
- err = req->out.h.error;
- fuse_put_request(fc, req);
+ args.in.h.opcode = opcode;
+ args.in.h.nodeid = nodeid;
+ args.in.numargs = 1;
+ args.in.args[0].size = sizeof(inarg);
+ args.in.args[0].value = &inarg;
+ args.out.numargs = 1;
+ args.out.args[0].size = sizeof(*outargp);
+ args.out.args[0].value = outargp;
- return err;
+ return fuse_simple_request(fc, &args);
}
struct fuse_file *fuse_file_alloc(struct fuse_conn *fc)
@@ -89,37 +81,9 @@ struct fuse_file *fuse_file_get(struct fuse_file *ff)
return ff;
}
-static void fuse_release_async(struct work_struct *work)
-{
- struct fuse_req *req;
- struct fuse_conn *fc;
- struct path path;
-
- req = container_of(work, struct fuse_req, misc.release.work);
- path = req->misc.release.path;
- fc = get_fuse_conn(path.dentry->d_inode);
-
- fuse_put_request(fc, req);
- path_put(&path);
-}
-
static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req)
{
- if (fc->destroy_req) {
- /*
- * If this is a fuseblk mount, then it's possible that
- * releasing the path will result in releasing the
- * super block and sending the DESTROY request. If
- * the server is single threaded, this would hang.
- * For this reason do the path_put() in a separate
- * thread.
- */
- atomic_inc(&req->count);
- INIT_WORK(&req->misc.release.work, fuse_release_async);
- schedule_work(&req->misc.release.work);
- } else {
- path_put(&req->misc.release.path);
- }
+ iput(req->misc.release.inode);
}
static void fuse_file_put(struct fuse_file *ff, bool sync)
@@ -133,12 +97,12 @@ static void fuse_file_put(struct fuse_file *ff, bool sync)
* implement 'open'
*/
req->background = 0;
- path_put(&req->misc.release.path);
+ iput(req->misc.release.inode);
fuse_put_request(ff->fc, req);
} else if (sync) {
req->background = 0;
fuse_request_send(ff->fc, req);
- path_put(&req->misc.release.path);
+ iput(req->misc.release.inode);
fuse_put_request(ff->fc, req);
} else {
req->end = fuse_release_end;
@@ -297,9 +261,8 @@ void fuse_release_common(struct file *file, int opcode)
inarg->lock_owner = fuse_lock_owner_id(ff->fc,
(fl_owner_t) file);
}
- /* Hold vfsmount and dentry until release is finished */
- path_get(&file->f_path);
- req->misc.release.path = file->f_path;
+ /* Hold inode until release is finished */
+ req->misc.release.inode = igrab(file_inode(file));
/*
* Normally this will send the RELEASE request, however if
@@ -480,7 +443,7 @@ int fuse_fsync_common(struct file *file, loff_t start, loff_t end,
struct inode *inode = file->f_mapping->host;
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_file *ff = file->private_data;
- struct fuse_req *req;
+ FUSE_ARGS(args);
struct fuse_fsync_in inarg;
int err;
@@ -506,23 +469,15 @@ int fuse_fsync_common(struct file *file, loff_t start, loff_t end,
if ((!isdir && fc->no_fsync) || (isdir && fc->no_fsyncdir))
goto out;
- req = fuse_get_req_nopages(fc);
- if (IS_ERR(req)) {
- err = PTR_ERR(req);
- goto out;
- }
-
memset(&inarg, 0, sizeof(inarg));
inarg.fh = ff->fh;
inarg.fsync_flags = datasync ? 1 : 0;
- req->in.h.opcode = isdir ? FUSE_FSYNCDIR : FUSE_FSYNC;
- req->in.h.nodeid = get_node_id(inode);
- req->in.numargs = 1;
- req->in.args[0].size = sizeof(inarg);
- req->in.args[0].value = &inarg;
- fuse_request_send(fc, req);
- err = req->out.h.error;
- fuse_put_request(fc, req);
+ args.in.h.opcode = isdir ? FUSE_FSYNCDIR : FUSE_FSYNC;
+ args.in.h.nodeid = get_node_id(inode);
+ args.in.numargs = 1;
+ args.in.args[0].size = sizeof(inarg);
+ args.in.args[0].value = &inarg;
+ err = fuse_simple_request(fc, &args);
if (err == -ENOSYS) {
if (isdir)
fc->no_fsyncdir = 1;
@@ -2156,49 +2111,44 @@ static int convert_fuse_file_lock(const struct fuse_file_lock *ffl,
return 0;
}
-static void fuse_lk_fill(struct fuse_req *req, struct file *file,
+static void fuse_lk_fill(struct fuse_args *args, struct file *file,
const struct file_lock *fl, int opcode, pid_t pid,
- int flock)
+ int flock, struct fuse_lk_in *inarg)
{
struct inode *inode = file_inode(file);
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_file *ff = file->private_data;
- struct fuse_lk_in *arg = &req->misc.lk_in;
-
- arg->fh = ff->fh;
- arg->owner = fuse_lock_owner_id(fc, fl->fl_owner);
- arg->lk.start = fl->fl_start;
- arg->lk.end = fl->fl_end;
- arg->lk.type = fl->fl_type;
- arg->lk.pid = pid;
+
+ memset(inarg, 0, sizeof(*inarg));
+ inarg->fh = ff->fh;
+ inarg->owner = fuse_lock_owner_id(fc, fl->fl_owner);
+ inarg->lk.start = fl->fl_start;
+ inarg->lk.end = fl->fl_end;
+ inarg->lk.type = fl->fl_type;
+ inarg->lk.pid = pid;
if (flock)
- arg->lk_flags |= FUSE_LK_FLOCK;
- req->in.h.opcode = opcode;
- req->in.h.nodeid = get_node_id(inode);
- req->in.numargs = 1;
- req->in.args[0].size = sizeof(*arg);
- req->in.args[0].value = arg;
+ inarg->lk_flags |= FUSE_LK_FLOCK;
+ args->in.h.opcode = opcode;
+ args->in.h.nodeid = get_node_id(inode);
+ args->in.numargs = 1;
+ args->in.args[0].size = sizeof(*inarg);
+ args->in.args[0].value = inarg;
}
static int fuse_getlk(struct file *file, struct file_lock *fl)
{
struct inode *inode = file_inode(file);
struct fuse_conn *fc = get_fuse_conn(inode);
- struct fuse_req *req;
+ FUSE_ARGS(args);
+ struct fuse_lk_in inarg;
struct fuse_lk_out outarg;
int err;
- req = fuse_get_req_nopages(fc);
- if (IS_ERR(req))
- return PTR_ERR(req);
-
- fuse_lk_fill(req, file, fl, FUSE_GETLK, 0, 0);
- req->out.numargs = 1;
- req->out.args[0].size = sizeof(outarg);
- req->out.args[0].value = &outarg;
- fuse_request_send(fc, req);
- err = req->out.h.error;
- fuse_put_request(fc, req);
+ fuse_lk_fill(&args, file, fl, FUSE_GETLK, 0, 0, &inarg);
+ args.out.numargs = 1;
+ args.out.args[0].size = sizeof(outarg);
+ args.out.args[0].value = &outarg;
+ err = fuse_simple_request(fc, &args);
if (!err)
err = convert_fuse_file_lock(&outarg.lk, fl);
@@ -2209,7 +2159,8 @@ static int fuse_setlk(struct file *file, struct file_lock *fl, int flock)
{
struct inode *inode = file_inode(file);
struct fuse_conn *fc = get_fuse_conn(inode);
- struct fuse_req *req;
+ FUSE_ARGS(args);
+ struct fuse_lk_in inarg;
int opcode = (fl->fl_flags & FL_SLEEP) ? FUSE_SETLKW : FUSE_SETLK;
pid_t pid = fl->fl_type != F_UNLCK ? current->tgid : 0;
int err;
@@ -2223,17 +2174,13 @@ static int fuse_setlk(struct file *file, struct file_lock *fl, int flock)
if (fl->fl_flags & FL_CLOSE)
return 0;
- req = fuse_get_req_nopages(fc);
- if (IS_ERR(req))
- return PTR_ERR(req);
+ fuse_lk_fill(&args, file, fl, opcode, pid, flock, &inarg);
+ err = fuse_simple_request(fc, &args);
- fuse_lk_fill(req, file, fl, opcode, pid, flock);
- fuse_request_send(fc, req);
- err = req->out.h.error;
/* locking is restartable */
if (err == -EINTR)
err = -ERESTARTSYS;
- fuse_put_request(fc, req);
+
return err;
}
@@ -2283,7 +2230,7 @@ static sector_t fuse_bmap(struct address_space *mapping, sector_t block)
{
struct inode *inode = mapping->host;
struct fuse_conn *fc = get_fuse_conn(inode);
- struct fuse_req *req;
+ FUSE_ARGS(args);
struct fuse_bmap_in inarg;
struct fuse_bmap_out outarg;
int err;
@@ -2291,24 +2238,18 @@ static sector_t fuse_bmap(struct address_space *mapping, sector_t block)
if (!inode->i_sb->s_bdev || fc->no_bmap)
return 0;
- req = fuse_get_req_nopages(fc);
- if (IS_ERR(req))
- return 0;
-
memset(&inarg, 0, sizeof(inarg));
inarg.block = block;
inarg.blocksize = inode->i_sb->s_blocksize;
- req->in.h.opcode = FUSE_BMAP;
- req->in.h.nodeid = get_node_id(inode);
- req->in.numargs = 1;
- req->in.args[0].size = sizeof(inarg);
- req->in.args[0].value = &inarg;
- req->out.numargs = 1;
- req->out.args[0].size = sizeof(outarg);
- req->out.args[0].value = &outarg;
- fuse_request_send(fc, req);
- err = req->out.h.error;
- fuse_put_request(fc, req);
+ args.in.h.opcode = FUSE_BMAP;
+ args.in.h.nodeid = get_node_id(inode);
+ args.in.numargs = 1;
+ args.in.args[0].size = sizeof(inarg);
+ args.in.args[0].value = &inarg;
+ args.out.numargs = 1;
+ args.out.args[0].size = sizeof(outarg);
+ args.out.args[0].value = &outarg;
+ err = fuse_simple_request(fc, &args);
if (err == -ENOSYS)
fc->no_bmap = 1;
@@ -2776,7 +2717,7 @@ unsigned fuse_file_poll(struct file *file, poll_table *wait)
struct fuse_conn *fc = ff->fc;
struct fuse_poll_in inarg = { .fh = ff->fh, .kh = ff->kh };
struct fuse_poll_out outarg;
- struct fuse_req *req;
+ FUSE_ARGS(args);
int err;
if (fc->no_poll)
@@ -2794,21 +2735,15 @@ unsigned fuse_file_poll(struct file *file, poll_table *wait)
fuse_register_polled_file(fc, ff);
}
- req = fuse_get_req_nopages(fc);
- if (IS_ERR(req))
- return POLLERR;
-
- req->in.h.opcode = FUSE_POLL;
- req->in.h.nodeid = ff->nodeid;
- req->in.numargs = 1;
- req->in.args[0].size = sizeof(inarg);
- req->in.args[0].value = &inarg;
- req->out.numargs = 1;
- req->out.args[0].size = sizeof(outarg);
- req->out.args[0].value = &outarg;
- fuse_request_send(fc, req);
- err = req->out.h.error;
- fuse_put_request(fc, req);
+ args.in.h.opcode = FUSE_POLL;
+ args.in.h.nodeid = ff->nodeid;
+ args.in.numargs = 1;
+ args.in.args[0].size = sizeof(inarg);
+ args.in.args[0].value = &inarg;
+ args.out.numargs = 1;
+ args.out.args[0].size = sizeof(outarg);
+ args.out.args[0].value = &outarg;
+ err = fuse_simple_request(fc, &args);
if (!err)
return outarg.revents;
@@ -2949,10 +2884,10 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
loff_t length)
{
struct fuse_file *ff = file->private_data;
- struct inode *inode = file->f_inode;
+ struct inode *inode = file_inode(file);
struct fuse_inode *fi = get_fuse_inode(inode);
struct fuse_conn *fc = ff->fc;
- struct fuse_req *req;
+ FUSE_ARGS(args);
struct fuse_fallocate_in inarg = {
.fh = ff->fh,
.offset = offset,
@@ -2985,25 +2920,16 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
if (!(mode & FALLOC_FL_KEEP_SIZE))
set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
- req = fuse_get_req_nopages(fc);
- if (IS_ERR(req)) {
- err = PTR_ERR(req);
- goto out;
- }
-
- req->in.h.opcode = FUSE_FALLOCATE;
- req->in.h.nodeid = ff->nodeid;
- req->in.numargs = 1;
- req->in.args[0].size = sizeof(inarg);
- req->in.args[0].value = &inarg;
- fuse_request_send(fc, req);
- err = req->out.h.error;
+ args.in.h.opcode = FUSE_FALLOCATE;
+ args.in.h.nodeid = ff->nodeid;
+ args.in.numargs = 1;
+ args.in.args[0].size = sizeof(inarg);
+ args.in.args[0].value = &inarg;
+ err = fuse_simple_request(fc, &args);
if (err == -ENOSYS) {
fc->no_fallocate = 1;
err = -EOPNOTSUPP;
}
- fuse_put_request(fc, req);
-
if (err)
goto out;
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index e8e47a6ab51..e0fc6725d1d 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -213,7 +213,7 @@ struct fuse_out {
unsigned numargs;
/** Array of arguments */
- struct fuse_arg args[3];
+ struct fuse_arg args[2];
};
/** FUSE page descriptor */
@@ -222,6 +222,25 @@ struct fuse_page_desc {
unsigned int offset;
};
+struct fuse_args {
+ struct {
+ struct {
+ uint32_t opcode;
+ uint64_t nodeid;
+ } h;
+ unsigned numargs;
+ struct fuse_in_arg args[3];
+
+ } in;
+ struct {
+ unsigned argvar:1;
+ unsigned numargs;
+ struct fuse_arg args[2];
+ } out;
+};
+
+#define FUSE_ARGS(args) struct fuse_args args = {}
+
/** The request state */
enum fuse_req_state {
FUSE_REQ_INIT = 0,
@@ -305,11 +324,8 @@ struct fuse_req {
/** Data for asynchronous requests */
union {
struct {
- union {
- struct fuse_release_in in;
- struct work_struct work;
- };
- struct path path;
+ struct fuse_release_in in;
+ struct inode *inode;
} release;
struct fuse_init_in init_in;
struct fuse_init_out init_out;
@@ -324,7 +340,6 @@ struct fuse_req {
struct fuse_req *next;
} write;
struct fuse_notify_retrieve_in retrieve_in;
- struct fuse_lk_in lk_in;
} misc;
/** page vector */
@@ -754,15 +769,6 @@ struct fuse_req *fuse_get_req_for_background(struct fuse_conn *fc,
void __fuse_get_request(struct fuse_req *req);
/**
- * Get a request, may fail with -ENOMEM,
- * useful for callers who doesn't use req->pages[]
- */
-static inline struct fuse_req *fuse_get_req_nopages(struct fuse_conn *fc)
-{
- return fuse_get_req(fc, 0);
-}
-
-/**
* Gets a requests for a file operation, always succeeds
*/
struct fuse_req *fuse_get_req_nofail_nopages(struct fuse_conn *fc,
@@ -780,6 +786,11 @@ void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req);
void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req);
/**
+ * Simple request sending that does request allocation and freeing
+ */
+ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args);
+
+/**
* Send a request in the background
*/
void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req);
@@ -804,8 +815,6 @@ void fuse_invalidate_atime(struct inode *inode);
*/
struct fuse_conn *fuse_conn_get(struct fuse_conn *fc);
-void fuse_conn_kill(struct fuse_conn *fc);
-
/**
* Initialize fuse_conn
*/
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 03246cd9d47..6749109f255 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -376,28 +376,13 @@ static void fuse_bdi_destroy(struct fuse_conn *fc)
bdi_destroy(&fc->bdi);
}
-void fuse_conn_kill(struct fuse_conn *fc)
-{
- spin_lock(&fc->lock);
- fc->connected = 0;
- fc->blocked = 0;
- fc->initialized = 1;
- spin_unlock(&fc->lock);
- /* Flush all readers on this fs */
- kill_fasync(&fc->fasync, SIGIO, POLL_IN);
- wake_up_all(&fc->waitq);
- wake_up_all(&fc->blocked_waitq);
- wake_up_all(&fc->reserved_req_waitq);
-}
-EXPORT_SYMBOL_GPL(fuse_conn_kill);
-
static void fuse_put_super(struct super_block *sb)
{
struct fuse_conn *fc = get_fuse_conn_super(sb);
fuse_send_destroy(fc);
- fuse_conn_kill(fc);
+ fuse_abort_conn(fc);
mutex_lock(&fuse_mutex);
list_del(&fc->entry);
fuse_ctl_remove_conn(fc);
@@ -425,7 +410,7 @@ static int fuse_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct super_block *sb = dentry->d_sb;
struct fuse_conn *fc = get_fuse_conn_super(sb);
- struct fuse_req *req;
+ FUSE_ARGS(args);
struct fuse_statfs_out outarg;
int err;
@@ -434,23 +419,17 @@ static int fuse_statfs(struct dentry *dentry, struct kstatfs *buf)
return 0;
}
- req = fuse_get_req_nopages(fc);
- if (IS_ERR(req))
- return PTR_ERR(req);
-
memset(&outarg, 0, sizeof(outarg));
- req->in.numargs = 0;
- req->in.h.opcode = FUSE_STATFS;
- req->in.h.nodeid = get_node_id(dentry->d_inode);
- req->out.numargs = 1;
- req->out.args[0].size =
+ args.in.numargs = 0;
+ args.in.h.opcode = FUSE_STATFS;
+ args.in.h.nodeid = get_node_id(dentry->d_inode);
+ args.out.numargs = 1;
+ args.out.args[0].size =
fc->minor < 4 ? FUSE_COMPAT_STATFS_SIZE : sizeof(outarg);
- req->out.args[0].value = &outarg;
- fuse_request_send(fc, req);
- err = req->out.h.error;
+ args.out.args[0].value = &outarg;
+ err = fuse_simple_request(fc, &args);
if (!err)
convert_fuse_statfs(buf, &outarg.st);
- fuse_put_request(fc, req);
return err;
}
diff --git a/fs/hfsplus/catalog.c b/fs/hfsplus/catalog.c
index 32602c667b4..7892e6fddb6 100644
--- a/fs/hfsplus/catalog.c
+++ b/fs/hfsplus/catalog.c
@@ -38,21 +38,30 @@ int hfsplus_cat_bin_cmp_key(const hfsplus_btree_key *k1,
return hfsplus_strcmp(&k1->cat.name, &k2->cat.name);
}
-void hfsplus_cat_build_key(struct super_block *sb, hfsplus_btree_key *key,
- u32 parent, struct qstr *str)
+/* Generates key for catalog file/folders record. */
+int hfsplus_cat_build_key(struct super_block *sb,
+ hfsplus_btree_key *key, u32 parent, struct qstr *str)
{
- int len;
+ int len, err;
key->cat.parent = cpu_to_be32(parent);
- if (str) {
- hfsplus_asc2uni(sb, &key->cat.name, HFSPLUS_MAX_STRLEN,
- str->name, str->len);
- len = be16_to_cpu(key->cat.name.length);
- } else {
- key->cat.name.length = 0;
- len = 0;
- }
+ err = hfsplus_asc2uni(sb, &key->cat.name, HFSPLUS_MAX_STRLEN,
+ str->name, str->len);
+ if (unlikely(err < 0))
+ return err;
+
+ len = be16_to_cpu(key->cat.name.length);
key->key_len = cpu_to_be16(6 + 2 * len);
+ return 0;
+}
+
+/* Generates key for catalog thread record. */
+void hfsplus_cat_build_key_with_cnid(struct super_block *sb,
+ hfsplus_btree_key *key, u32 parent)
+{
+ key->cat.parent = cpu_to_be32(parent);
+ key->cat.name.length = 0;
+ key->key_len = cpu_to_be16(6);
}
static void hfsplus_cat_build_key_uni(hfsplus_btree_key *key, u32 parent,
@@ -167,11 +176,16 @@ static int hfsplus_fill_cat_thread(struct super_block *sb,
hfsplus_cat_entry *entry, int type,
u32 parentid, struct qstr *str)
{
+ int err;
+
entry->type = cpu_to_be16(type);
entry->thread.reserved = 0;
entry->thread.parentID = cpu_to_be32(parentid);
- hfsplus_asc2uni(sb, &entry->thread.nodeName, HFSPLUS_MAX_STRLEN,
+ err = hfsplus_asc2uni(sb, &entry->thread.nodeName, HFSPLUS_MAX_STRLEN,
str->name, str->len);
+ if (unlikely(err < 0))
+ return err;
+
return 10 + be16_to_cpu(entry->thread.nodeName.length) * 2;
}
@@ -183,7 +197,7 @@ int hfsplus_find_cat(struct super_block *sb, u32 cnid,
int err;
u16 type;
- hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
+ hfsplus_cat_build_key_with_cnid(sb, fd->search_key, cnid);
err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
if (err)
return err;
@@ -250,11 +264,16 @@ int hfsplus_create_cat(u32 cnid, struct inode *dir,
if (err)
return err;
- hfsplus_cat_build_key(sb, fd.search_key, cnid, NULL);
+ hfsplus_cat_build_key_with_cnid(sb, fd.search_key, cnid);
entry_size = hfsplus_fill_cat_thread(sb, &entry,
S_ISDIR(inode->i_mode) ?
HFSPLUS_FOLDER_THREAD : HFSPLUS_FILE_THREAD,
dir->i_ino, str);
+ if (unlikely(entry_size < 0)) {
+ err = entry_size;
+ goto err2;
+ }
+
err = hfs_brec_find(&fd, hfs_find_rec_by_key);
if (err != -ENOENT) {
if (!err)
@@ -265,7 +284,10 @@ int hfsplus_create_cat(u32 cnid, struct inode *dir,
if (err)
goto err2;
- hfsplus_cat_build_key(sb, fd.search_key, dir->i_ino, str);
+ err = hfsplus_cat_build_key(sb, fd.search_key, dir->i_ino, str);
+ if (unlikely(err))
+ goto err1;
+
entry_size = hfsplus_cat_build_record(&entry, cnid, inode);
err = hfs_brec_find(&fd, hfs_find_rec_by_key);
if (err != -ENOENT) {
@@ -288,7 +310,7 @@ int hfsplus_create_cat(u32 cnid, struct inode *dir,
return 0;
err1:
- hfsplus_cat_build_key(sb, fd.search_key, cnid, NULL);
+ hfsplus_cat_build_key_with_cnid(sb, fd.search_key, cnid);
if (!hfs_brec_find(&fd, hfs_find_rec_by_key))
hfs_brec_remove(&fd);
err2:
@@ -313,7 +335,7 @@ int hfsplus_delete_cat(u32 cnid, struct inode *dir, struct qstr *str)
if (!str) {
int len;
- hfsplus_cat_build_key(sb, fd.search_key, cnid, NULL);
+ hfsplus_cat_build_key_with_cnid(sb, fd.search_key, cnid);
err = hfs_brec_find(&fd, hfs_find_rec_by_key);
if (err)
goto out;
@@ -329,7 +351,9 @@ int hfsplus_delete_cat(u32 cnid, struct inode *dir, struct qstr *str)
off + 2, len);
fd.search_key->key_len = cpu_to_be16(6 + len);
} else
- hfsplus_cat_build_key(sb, fd.search_key, dir->i_ino, str);
+ err = hfsplus_cat_build_key(sb, fd.search_key, dir->i_ino, str);
+ if (unlikely(err))
+ goto out;
err = hfs_brec_find(&fd, hfs_find_rec_by_key);
if (err)
@@ -360,7 +384,7 @@ int hfsplus_delete_cat(u32 cnid, struct inode *dir, struct qstr *str)
if (err)
goto out;
- hfsplus_cat_build_key(sb, fd.search_key, cnid, NULL);
+ hfsplus_cat_build_key_with_cnid(sb, fd.search_key, cnid);
err = hfs_brec_find(&fd, hfs_find_rec_by_key);
if (err)
goto out;
@@ -405,7 +429,11 @@ int hfsplus_rename_cat(u32 cnid,
dst_fd = src_fd;
/* find the old dir entry and read the data */
- hfsplus_cat_build_key(sb, src_fd.search_key, src_dir->i_ino, src_name);
+ err = hfsplus_cat_build_key(sb, src_fd.search_key,
+ src_dir->i_ino, src_name);
+ if (unlikely(err))
+ goto out;
+
err = hfs_brec_find(&src_fd, hfs_find_rec_by_key);
if (err)
goto out;
@@ -419,7 +447,11 @@ int hfsplus_rename_cat(u32 cnid,
type = be16_to_cpu(entry.type);
/* create new dir entry with the data from the old entry */
- hfsplus_cat_build_key(sb, dst_fd.search_key, dst_dir->i_ino, dst_name);
+ err = hfsplus_cat_build_key(sb, dst_fd.search_key,
+ dst_dir->i_ino, dst_name);
+ if (unlikely(err))
+ goto out;
+
err = hfs_brec_find(&dst_fd, hfs_find_rec_by_key);
if (err != -ENOENT) {
if (!err)
@@ -436,7 +468,11 @@ int hfsplus_rename_cat(u32 cnid,
dst_dir->i_mtime = dst_dir->i_ctime = CURRENT_TIME_SEC;
/* finally remove the old entry */
- hfsplus_cat_build_key(sb, src_fd.search_key, src_dir->i_ino, src_name);
+ err = hfsplus_cat_build_key(sb, src_fd.search_key,
+ src_dir->i_ino, src_name);
+ if (unlikely(err))
+ goto out;
+
err = hfs_brec_find(&src_fd, hfs_find_rec_by_key);
if (err)
goto out;
@@ -449,7 +485,7 @@ int hfsplus_rename_cat(u32 cnid,
src_dir->i_mtime = src_dir->i_ctime = CURRENT_TIME_SEC;
/* remove old thread entry */
- hfsplus_cat_build_key(sb, src_fd.search_key, cnid, NULL);
+ hfsplus_cat_build_key_with_cnid(sb, src_fd.search_key, cnid);
err = hfs_brec_find(&src_fd, hfs_find_rec_by_key);
if (err)
goto out;
@@ -459,9 +495,14 @@ int hfsplus_rename_cat(u32 cnid,
goto out;
/* create new thread entry */
- hfsplus_cat_build_key(sb, dst_fd.search_key, cnid, NULL);
+ hfsplus_cat_build_key_with_cnid(sb, dst_fd.search_key, cnid);
entry_size = hfsplus_fill_cat_thread(sb, &entry, type,
dst_dir->i_ino, dst_name);
+ if (unlikely(entry_size < 0)) {
+ err = entry_size;
+ goto out;
+ }
+
err = hfs_brec_find(&dst_fd, hfs_find_rec_by_key);
if (err != -ENOENT) {
if (!err)
diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c
index 610a3260bef..435bea231cc 100644
--- a/fs/hfsplus/dir.c
+++ b/fs/hfsplus/dir.c
@@ -44,7 +44,10 @@ static struct dentry *hfsplus_lookup(struct inode *dir, struct dentry *dentry,
err = hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
if (err)
return ERR_PTR(err);
- hfsplus_cat_build_key(sb, fd.search_key, dir->i_ino, &dentry->d_name);
+ err = hfsplus_cat_build_key(sb, fd.search_key, dir->i_ino,
+ &dentry->d_name);
+ if (unlikely(err < 0))
+ goto fail;
again:
err = hfs_brec_read(&fd, &entry, sizeof(entry));
if (err) {
@@ -97,9 +100,11 @@ again:
be32_to_cpu(entry.file.permissions.dev);
str.len = sprintf(name, "iNode%d", linkid);
str.name = name;
- hfsplus_cat_build_key(sb, fd.search_key,
+ err = hfsplus_cat_build_key(sb, fd.search_key,
HFSPLUS_SB(sb)->hidden_dir->i_ino,
&str);
+ if (unlikely(err < 0))
+ goto fail;
goto again;
}
} else if (!dentry->d_fsdata)
@@ -145,7 +150,7 @@ static int hfsplus_readdir(struct file *file, struct dir_context *ctx)
err = -ENOMEM;
goto out;
}
- hfsplus_cat_build_key(sb, fd.search_key, inode->i_ino, NULL);
+ hfsplus_cat_build_key_with_cnid(sb, fd.search_key, inode->i_ino);
err = hfs_brec_find(&fd, hfs_find_rec_by_key);
if (err)
goto out;
diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h
index eb5e059f481..b0441d65fa5 100644
--- a/fs/hfsplus/hfsplus_fs.h
+++ b/fs/hfsplus/hfsplus_fs.h
@@ -443,8 +443,10 @@ int hfsplus_cat_case_cmp_key(const hfsplus_btree_key *k1,
const hfsplus_btree_key *k2);
int hfsplus_cat_bin_cmp_key(const hfsplus_btree_key *k1,
const hfsplus_btree_key *k2);
-void hfsplus_cat_build_key(struct super_block *sb, hfsplus_btree_key *key,
+int hfsplus_cat_build_key(struct super_block *sb, hfsplus_btree_key *key,
u32 parent, struct qstr *str);
+void hfsplus_cat_build_key_with_cnid(struct super_block *sb,
+ hfsplus_btree_key *key, u32 parent);
void hfsplus_cat_set_perms(struct inode *inode, struct hfsplus_perm *perms);
int hfsplus_find_cat(struct super_block *sb, u32 cnid,
struct hfs_find_data *fd);
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
index 4cf2024b87d..593af2fdcc2 100644
--- a/fs/hfsplus/super.c
+++ b/fs/hfsplus/super.c
@@ -515,7 +515,9 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
err = hfs_find_init(sbi->cat_tree, &fd);
if (err)
goto out_put_root;
- hfsplus_cat_build_key(sb, fd.search_key, HFSPLUS_ROOT_CNID, &str);
+ err = hfsplus_cat_build_key(sb, fd.search_key, HFSPLUS_ROOT_CNID, &str);
+ if (unlikely(err < 0))
+ goto out_put_root;
if (!hfs_brec_read(&fd, &entry, sizeof(entry))) {
hfs_find_exit(&fd);
if (entry.type != cpu_to_be16(HFSPLUS_FOLDER))
diff --git a/fs/inode.c b/fs/inode.c
index ad60555b476..aa149e7262a 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -114,6 +114,11 @@ int proc_nr_inodes(struct ctl_table *table, int write,
}
#endif
+static int no_open(struct inode *inode, struct file *file)
+{
+ return -ENXIO;
+}
+
/**
* inode_init_always - perform inode structure intialisation
* @sb: superblock inode belongs to
@@ -125,7 +130,7 @@ int proc_nr_inodes(struct ctl_table *table, int write,
int inode_init_always(struct super_block *sb, struct inode *inode)
{
static const struct inode_operations empty_iops;
- static const struct file_operations empty_fops;
+ static const struct file_operations no_open_fops = {.open = no_open};
struct address_space *const mapping = &inode->i_data;
inode->i_sb = sb;
@@ -133,7 +138,7 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
inode->i_flags = 0;
atomic_set(&inode->i_count, 1);
inode->i_op = &empty_iops;
- inode->i_fop = &empty_fops;
+ inode->i_fop = &no_open_fops;
inode->__i_nlink = 1;
inode->i_opflags = 0;
i_uid_write(inode, 0);
@@ -1798,7 +1803,7 @@ void init_special_inode(struct inode *inode, umode_t mode, dev_t rdev)
} else if (S_ISFIFO(mode))
inode->i_fop = &pipefifo_fops;
else if (S_ISSOCK(mode))
- inode->i_fop = &bad_sock_fops;
+ ; /* leave it no_open_fops */
else
printk(KERN_DEBUG "init_special_inode: bogus i_mode (%o) for"
" inode %s:%lu\n", mode, inode->i_sb->s_id,
diff --git a/fs/internal.h b/fs/internal.h
index 757ba2abf21..e9a61fe6757 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -147,3 +147,8 @@ extern const struct file_operations pipefifo_fops;
*/
extern void sb_pin_kill(struct super_block *sb);
extern void mnt_pin_kill(struct mount *m);
+
+/*
+ * fs/nsfs.c
+ */
+extern struct dentry_operations ns_dentry_operations;
diff --git a/fs/ioctl.c b/fs/ioctl.c
index 77c9a781254..214c3c11fbc 100644
--- a/fs/ioctl.c
+++ b/fs/ioctl.c
@@ -443,7 +443,7 @@ int ioctl_preallocate(struct file *filp, void __user *argp)
return -EINVAL;
}
- return do_fallocate(filp, FALLOC_FL_KEEP_SIZE, sr.l_start, sr.l_len);
+ return vfs_fallocate(filp, FALLOC_FL_KEEP_SIZE, sr.l_start, sr.l_len);
}
static int file_ioctl(struct file *filp, unsigned int cmd,
diff --git a/fs/isofs/rock.c b/fs/isofs/rock.c
index f488bbae541..bb63254ed84 100644
--- a/fs/isofs/rock.c
+++ b/fs/isofs/rock.c
@@ -30,6 +30,7 @@ struct rock_state {
int cont_size;
int cont_extent;
int cont_offset;
+ int cont_loops;
struct inode *inode;
};
@@ -73,6 +74,9 @@ static void init_rock_state(struct rock_state *rs, struct inode *inode)
rs->inode = inode;
}
+/* Maximum number of Rock Ridge continuation entries */
+#define RR_MAX_CE_ENTRIES 32
+
/*
* Returns 0 if the caller should continue scanning, 1 if the scan must end
* and -ve on error.
@@ -105,6 +109,8 @@ static int rock_continue(struct rock_state *rs)
goto out;
}
ret = -EIO;
+ if (++rs->cont_loops >= RR_MAX_CE_ENTRIES)
+ goto out;
bh = sb_bread(rs->inode->i_sb, rs->cont_extent);
if (bh) {
memcpy(rs->buffer, bh->b_data + rs->cont_offset,
diff --git a/fs/jffs2/readinode.c b/fs/jffs2/readinode.c
index 386303dca38..dddbde4f56f 100644
--- a/fs/jffs2/readinode.c
+++ b/fs/jffs2/readinode.c
@@ -224,7 +224,7 @@ static int jffs2_add_tn_to_tree(struct jffs2_sb_info *c,
dbg_readinode("insert fragment %#04x-%#04x, ver %u at %08x\n", tn->fn->ofs, fn_end, tn->version, ref_offset(tn->fn->raw));
- /* If a node has zero dsize, we only have to keep if it if it might be the
+ /* If a node has zero dsize, we only have to keep it if it might be the
node with highest version -- i.e. the one which will end up as f->metadata.
Note that such nodes won't be REF_UNCHECKED since there are no data to
check anyway. */
diff --git a/fs/jffs2/summary.c b/fs/jffs2/summary.c
index c522d098bb4..bc5385471a6 100644
--- a/fs/jffs2/summary.c
+++ b/fs/jffs2/summary.c
@@ -844,6 +844,7 @@ static int jffs2_sum_write_data(struct jffs2_sb_info *c, struct jffs2_eraseblock
/* Write out summary information - called from jffs2_do_reserve_space */
int jffs2_sum_write_sumnode(struct jffs2_sb_info *c)
+ __must_hold(&c->erase_completion_block)
{
int datasize, infosize, padsize;
struct jffs2_eraseblock *jeb;
diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
index 697390ea47b..ddc9f9612f1 100644
--- a/fs/kernfs/file.c
+++ b/fs/kernfs/file.c
@@ -448,27 +448,6 @@ static struct mempolicy *kernfs_vma_get_policy(struct vm_area_struct *vma,
return pol;
}
-static int kernfs_vma_migrate(struct vm_area_struct *vma,
- const nodemask_t *from, const nodemask_t *to,
- unsigned long flags)
-{
- struct file *file = vma->vm_file;
- struct kernfs_open_file *of = kernfs_of(file);
- int ret;
-
- if (!of->vm_ops)
- return 0;
-
- if (!kernfs_get_active(of->kn))
- return 0;
-
- ret = 0;
- if (of->vm_ops->migrate)
- ret = of->vm_ops->migrate(vma, from, to, flags);
-
- kernfs_put_active(of->kn);
- return ret;
-}
#endif
static const struct vm_operations_struct kernfs_vm_ops = {
@@ -479,7 +458,6 @@ static const struct vm_operations_struct kernfs_vm_ops = {
#ifdef CONFIG_NUMA
.set_policy = kernfs_vma_set_policy,
.get_policy = kernfs_vma_get_policy,
- .migrate = kernfs_vma_migrate,
#endif
};
diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c
index 9106f42c472..1cc6ec51e6b 100644
--- a/fs/lockd/mon.c
+++ b/fs/lockd/mon.c
@@ -214,7 +214,7 @@ int nsm_monitor(const struct nlm_host *host)
if (unlikely(res.status != 0))
status = -EIO;
if (unlikely(status < 0)) {
- printk(KERN_NOTICE "lockd: cannot monitor %s\n", nsm->sm_name);
+ pr_notice_ratelimited("lockd: cannot monitor %s\n", nsm->sm_name);
return status;
}
diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
index d1bb7ecfd20..e94c887da2d 100644
--- a/fs/lockd/svc.c
+++ b/fs/lockd/svc.c
@@ -350,7 +350,7 @@ static struct svc_serv *lockd_create_svc(void)
printk(KERN_WARNING
"lockd_up: no pid, %d users??\n", nlmsvc_users);
- serv = svc_create(&nlmsvc_program, LOCKD_BUFSIZE, NULL);
+ serv = svc_create(&nlmsvc_program, LOCKD_BUFSIZE, svc_rpcb_cleanup);
if (!serv) {
printk(KERN_WARNING "lockd_up: create service failed\n");
return ERR_PTR(-ENOMEM);
diff --git a/fs/mount.h b/fs/mount.h
index f82c6284090..0ad6f760ce5 100644
--- a/fs/mount.h
+++ b/fs/mount.h
@@ -1,10 +1,11 @@
#include <linux/mount.h>
#include <linux/seq_file.h>
#include <linux/poll.h>
+#include <linux/ns_common.h>
struct mnt_namespace {
atomic_t count;
- unsigned int proc_inum;
+ struct ns_common ns;
struct mount * root;
struct list_head list;
struct user_namespace *user_ns;
diff --git a/fs/namei.c b/fs/namei.c
index ca814165d84..bc35b02883b 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -487,6 +487,19 @@ void path_put(const struct path *path)
}
EXPORT_SYMBOL(path_put);
+struct nameidata {
+ struct path path;
+ struct qstr last;
+ struct path root;
+ struct inode *inode; /* path.dentry.d_inode */
+ unsigned int flags;
+ unsigned seq, m_seq;
+ int last_type;
+ unsigned depth;
+ struct file *base;
+ char *saved_names[MAX_NESTED_LINKS + 1];
+};
+
/*
* Path walking has 2 modes, rcu-walk and ref-walk (see
* Documentation/filesystems/path-lookup.txt). In situations when we can't
@@ -695,6 +708,18 @@ void nd_jump_link(struct nameidata *nd, struct path *path)
nd->flags |= LOOKUP_JUMPED;
}
+void nd_set_link(struct nameidata *nd, char *path)
+{
+ nd->saved_names[nd->depth] = path;
+}
+EXPORT_SYMBOL(nd_set_link);
+
+char *nd_get_link(struct nameidata *nd)
+{
+ return nd->saved_names[nd->depth];
+}
+EXPORT_SYMBOL(nd_get_link);
+
static inline void put_link(struct nameidata *nd, struct path *link, void *cookie)
{
struct inode *inode = link->dentry->d_inode;
@@ -1821,13 +1846,14 @@ static int link_path_walk(const char *name, struct nameidata *nd)
}
static int path_init(int dfd, const char *name, unsigned int flags,
- struct nameidata *nd, struct file **fp)
+ struct nameidata *nd)
{
int retval = 0;
nd->last_type = LAST_ROOT; /* if there are only slashes... */
- nd->flags = flags | LOOKUP_JUMPED;
+ nd->flags = flags | LOOKUP_JUMPED | LOOKUP_PARENT;
nd->depth = 0;
+ nd->base = NULL;
if (flags & LOOKUP_ROOT) {
struct dentry *root = nd->root.dentry;
struct inode *inode = root->d_inode;
@@ -1847,7 +1873,7 @@ static int path_init(int dfd, const char *name, unsigned int flags,
} else {
path_get(&nd->path);
}
- return 0;
+ goto done;
}
nd->root.mnt = NULL;
@@ -1897,7 +1923,7 @@ static int path_init(int dfd, const char *name, unsigned int flags,
nd->path = f.file->f_path;
if (flags & LOOKUP_RCU) {
if (f.flags & FDPUT_FPUT)
- *fp = f.file;
+ nd->base = f.file;
nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
rcu_read_lock();
} else {
@@ -1908,13 +1934,26 @@ static int path_init(int dfd, const char *name, unsigned int flags,
nd->inode = nd->path.dentry->d_inode;
if (!(flags & LOOKUP_RCU))
- return 0;
+ goto done;
if (likely(!read_seqcount_retry(&nd->path.dentry->d_seq, nd->seq)))
- return 0;
+ goto done;
if (!(nd->flags & LOOKUP_ROOT))
nd->root.mnt = NULL;
rcu_read_unlock();
return -ECHILD;
+done:
+ current->total_link_count = 0;
+ return link_path_walk(name, nd);
+}
+
+static void path_cleanup(struct nameidata *nd)
+{
+ if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT)) {
+ path_put(&nd->root);
+ nd->root.mnt = NULL;
+ }
+ if (unlikely(nd->base))
+ fput(nd->base);
}
static inline int lookup_last(struct nameidata *nd, struct path *path)
@@ -1930,7 +1969,6 @@ static inline int lookup_last(struct nameidata *nd, struct path *path)
static int path_lookupat(int dfd, const char *name,
unsigned int flags, struct nameidata *nd)
{
- struct file *base = NULL;
struct path path;
int err;
@@ -1948,14 +1986,7 @@ static int path_lookupat(int dfd, const char *name,
* be handled by restarting a traditional ref-walk (which will always
* be able to complete).
*/
- err = path_init(dfd, name, flags | LOOKUP_PARENT, nd, &base);
-
- if (unlikely(err))
- goto out;
-
- current->total_link_count = 0;
- err = link_path_walk(name, nd);
-
+ err = path_init(dfd, name, flags, nd);
if (!err && !(flags & LOOKUP_PARENT)) {
err = lookup_last(nd, &path);
while (err > 0) {
@@ -1983,14 +2014,7 @@ static int path_lookupat(int dfd, const char *name,
}
}
-out:
- if (base)
- fput(base);
-
- if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT)) {
- path_put(&nd->root);
- nd->root.mnt = NULL;
- }
+ path_cleanup(nd);
return err;
}
@@ -2297,19 +2321,13 @@ out:
static int
path_mountpoint(int dfd, const char *name, struct path *path, unsigned int flags)
{
- struct file *base = NULL;
struct nameidata nd;
int err;
- err = path_init(dfd, name, flags | LOOKUP_PARENT, &nd, &base);
+ err = path_init(dfd, name, flags, &nd);
if (unlikely(err))
goto out;
- current->total_link_count = 0;
- err = link_path_walk(name, &nd);
- if (err)
- goto out;
-
err = mountpoint_last(&nd, path);
while (err > 0) {
void *cookie;
@@ -2325,12 +2343,7 @@ path_mountpoint(int dfd, const char *name, struct path *path, unsigned int flags
put_link(&nd, &link, cookie);
}
out:
- if (base)
- fput(base);
-
- if (nd.root.mnt && !(nd.flags & LOOKUP_ROOT))
- path_put(&nd.root);
-
+ path_cleanup(&nd);
return err;
}
@@ -3181,7 +3194,6 @@ out:
static struct file *path_openat(int dfd, struct filename *pathname,
struct nameidata *nd, const struct open_flags *op, int flags)
{
- struct file *base = NULL;
struct file *file;
struct path path;
int opened = 0;
@@ -3198,12 +3210,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
goto out;
}
- error = path_init(dfd, pathname->name, flags | LOOKUP_PARENT, nd, &base);
- if (unlikely(error))
- goto out;
-
- current->total_link_count = 0;
- error = link_path_walk(pathname->name, nd);
+ error = path_init(dfd, pathname->name, flags, nd);
if (unlikely(error))
goto out;
@@ -3229,10 +3236,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
put_link(nd, &link, cookie);
}
out:
- if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT))
- path_put(&nd->root);
- if (base)
- fput(base);
+ path_cleanup(nd);
if (!(opened & FILE_OPENED)) {
BUG_ON(!error);
put_filp(file);
diff --git a/fs/namespace.c b/fs/namespace.c
index 5b66b2b3624..cd1e9681a0c 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -963,7 +963,8 @@ static struct mount *clone_mnt(struct mount *old, struct dentry *root,
}
/* Don't allow unprivileged users to reveal what is under a mount */
- if ((flag & CL_UNPRIVILEGED) && list_empty(&old->mnt_expire))
+ if ((flag & CL_UNPRIVILEGED) &&
+ (!(flag & CL_EXPIRE) || list_empty(&old->mnt_expire)))
mnt->mnt.mnt_flags |= MNT_LOCKED;
atomic_inc(&sb->s_active);
@@ -1369,6 +1370,8 @@ void umount_tree(struct mount *mnt, int how)
}
if (last) {
last->mnt_hash.next = unmounted.first;
+ if (unmounted.first)
+ unmounted.first->pprev = &last->mnt_hash.next;
unmounted.first = tmp_list.first;
unmounted.first->pprev = &unmounted.first;
}
@@ -1544,6 +1547,9 @@ SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
goto dput_and_out;
if (mnt->mnt.mnt_flags & MNT_LOCKED)
goto dput_and_out;
+ retval = -EPERM;
+ if (flags & MNT_FORCE && !capable(CAP_SYS_ADMIN))
+ goto dput_and_out;
retval = do_umount(mnt, flags);
dput_and_out:
@@ -1569,17 +1575,13 @@ SYSCALL_DEFINE1(oldumount, char __user *, name)
static bool is_mnt_ns_file(struct dentry *dentry)
{
/* Is this a proxy for a mount namespace? */
- struct inode *inode = dentry->d_inode;
- struct proc_ns *ei;
-
- if (!proc_ns_inode(inode))
- return false;
-
- ei = get_proc_ns(inode);
- if (ei->ns_ops != &mntns_operations)
- return false;
+ return dentry->d_op == &ns_dentry_operations &&
+ dentry->d_fsdata == &mntns_operations;
+}
- return true;
+struct mnt_namespace *to_mnt_ns(struct ns_common *ns)
+{
+ return container_of(ns, struct mnt_namespace, ns);
}
static bool mnt_ns_loop(struct dentry *dentry)
@@ -1591,7 +1593,7 @@ static bool mnt_ns_loop(struct dentry *dentry)
if (!is_mnt_ns_file(dentry))
return false;
- mnt_ns = get_proc_ns(dentry->d_inode)->ns;
+ mnt_ns = to_mnt_ns(get_proc_ns(dentry->d_inode));
return current->nsproxy->mnt_ns->seq >= mnt_ns->seq;
}
@@ -1610,7 +1612,6 @@ struct mount *copy_tree(struct mount *mnt, struct dentry *dentry,
if (IS_ERR(q))
return q;
- q->mnt.mnt_flags &= ~MNT_LOCKED;
q->mnt_mountpoint = mnt->mnt_mountpoint;
p = mnt;
@@ -2020,7 +2021,10 @@ static int do_loopback(struct path *path, const char *old_name,
if (IS_MNT_UNBINDABLE(old))
goto out2;
- if (!check_mnt(parent) || !check_mnt(old))
+ if (!check_mnt(parent))
+ goto out2;
+
+ if (!check_mnt(old) && old_path.dentry->d_op != &ns_dentry_operations)
goto out2;
if (!recurse && has_locked_children(old, old_path.dentry))
@@ -2098,7 +2102,13 @@ static int do_remount(struct path *path, int flags, int mnt_flags,
}
if ((mnt->mnt.mnt_flags & MNT_LOCK_NODEV) &&
!(mnt_flags & MNT_NODEV)) {
- return -EPERM;
+ /* Was the nodev implicitly added in mount? */
+ if ((mnt->mnt_ns->user_ns != &init_user_ns) &&
+ !(sb->s_type->fs_flags & FS_USERNS_DEV_MOUNT)) {
+ mnt_flags |= MNT_NODEV;
+ } else {
+ return -EPERM;
+ }
}
if ((mnt->mnt.mnt_flags & MNT_LOCK_NOSUID) &&
!(mnt_flags & MNT_NOSUID)) {
@@ -2640,7 +2650,7 @@ dput_out:
static void free_mnt_ns(struct mnt_namespace *ns)
{
- proc_free_inum(ns->proc_inum);
+ ns_free_inum(&ns->ns);
put_user_ns(ns->user_ns);
kfree(ns);
}
@@ -2662,11 +2672,12 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
new_ns = kmalloc(sizeof(struct mnt_namespace), GFP_KERNEL);
if (!new_ns)
return ERR_PTR(-ENOMEM);
- ret = proc_alloc_inum(&new_ns->proc_inum);
+ ret = ns_alloc_inum(&new_ns->ns);
if (ret) {
kfree(new_ns);
return ERR_PTR(ret);
}
+ new_ns->ns.ops = &mntns_operations;
new_ns->seq = atomic64_add_return(1, &mnt_ns_seq);
atomic_set(&new_ns->count, 1);
new_ns->root = NULL;
@@ -2958,6 +2969,8 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
/* mount new_root on / */
attach_mnt(new_mnt, real_mount(root_parent.mnt), root_mp);
touch_mnt_namespace(current->nsproxy->mnt_ns);
+ /* A moved mount should not expire automatically */
+ list_del_init(&new_mnt->mnt_expire);
unlock_mount_hash();
chroot_fs_refs(&root, &new);
put_mountpoint(root_mp);
@@ -3002,6 +3015,7 @@ static void __init init_mount_tree(void)
root.mnt = mnt;
root.dentry = mnt->mnt_root;
+ mnt->mnt_flags |= MNT_LOCKED;
set_fs_pwd(current->fs, &root);
set_fs_root(current->fs, &root);
@@ -3144,31 +3158,31 @@ found:
return visible;
}
-static void *mntns_get(struct task_struct *task)
+static struct ns_common *mntns_get(struct task_struct *task)
{
- struct mnt_namespace *ns = NULL;
+ struct ns_common *ns = NULL;
struct nsproxy *nsproxy;
task_lock(task);
nsproxy = task->nsproxy;
if (nsproxy) {
- ns = nsproxy->mnt_ns;
- get_mnt_ns(ns);
+ ns = &nsproxy->mnt_ns->ns;
+ get_mnt_ns(to_mnt_ns(ns));
}
task_unlock(task);
return ns;
}
-static void mntns_put(void *ns)
+static void mntns_put(struct ns_common *ns)
{
- put_mnt_ns(ns);
+ put_mnt_ns(to_mnt_ns(ns));
}
-static int mntns_install(struct nsproxy *nsproxy, void *ns)
+static int mntns_install(struct nsproxy *nsproxy, struct ns_common *ns)
{
struct fs_struct *fs = current->fs;
- struct mnt_namespace *mnt_ns = ns;
+ struct mnt_namespace *mnt_ns = to_mnt_ns(ns);
struct path root;
if (!ns_capable(mnt_ns->user_ns, CAP_SYS_ADMIN) ||
@@ -3198,17 +3212,10 @@ static int mntns_install(struct nsproxy *nsproxy, void *ns)
return 0;
}
-static unsigned int mntns_inum(void *ns)
-{
- struct mnt_namespace *mnt_ns = ns;
- return mnt_ns->proc_inum;
-}
-
const struct proc_ns_operations mntns_operations = {
.name = "mnt",
.type = CLONE_NEWNS,
.get = mntns_get,
.put = mntns_put,
.install = mntns_install,
- .inum = mntns_inum,
};
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index 0beb023f25a..ac71d13c69e 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -33,6 +33,7 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/file.h>
+#include <linux/falloc.h>
#include <linux/slab.h>
#include "idmap.h"
@@ -772,7 +773,7 @@ nfsd4_read(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
* the client wants us to do more in this compound:
*/
if (!nfsd4_last_compound_op(rqstp))
- rqstp->rq_splice_ok = false;
+ clear_bit(RQ_SPLICE_OK, &rqstp->rq_flags);
/* check stateid */
if ((status = nfs4_preprocess_stateid_op(SVC_NET(rqstp),
@@ -1014,6 +1015,44 @@ nfsd4_write(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
}
static __be32
+nfsd4_fallocate(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ struct nfsd4_fallocate *fallocate, int flags)
+{
+ __be32 status = nfserr_notsupp;
+ struct file *file;
+
+ status = nfs4_preprocess_stateid_op(SVC_NET(rqstp), cstate,
+ &fallocate->falloc_stateid,
+ WR_STATE, &file);
+ if (status != nfs_ok) {
+ dprintk("NFSD: nfsd4_fallocate: couldn't process stateid!\n");
+ return status;
+ }
+
+ status = nfsd4_vfs_fallocate(rqstp, &cstate->current_fh, file,
+ fallocate->falloc_offset,
+ fallocate->falloc_length,
+ flags);
+ fput(file);
+ return status;
+}
+
+static __be32
+nfsd4_allocate(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ struct nfsd4_fallocate *fallocate)
+{
+ return nfsd4_fallocate(rqstp, cstate, fallocate, 0);
+}
+
+static __be32
+nfsd4_deallocate(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ struct nfsd4_fallocate *fallocate)
+{
+ return nfsd4_fallocate(rqstp, cstate, fallocate,
+ FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE);
+}
+
+static __be32
nfsd4_seek(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_seek *seek)
{
@@ -1331,7 +1370,7 @@ nfsd4_proc_compound(struct svc_rqst *rqstp,
* Don't use the deferral mechanism for NFSv4; compounds make it
* too hard to avoid non-idempotency problems.
*/
- rqstp->rq_usedeferral = false;
+ clear_bit(RQ_USEDEFERRAL, &rqstp->rq_flags);
/*
* According to RFC3010, this takes precedence over all other errors.
@@ -1447,7 +1486,7 @@ encode_op:
BUG_ON(cstate->replay_owner);
out:
/* Reset deferral mechanism for RPC deferrals */
- rqstp->rq_usedeferral = true;
+ set_bit(RQ_USEDEFERRAL, &rqstp->rq_flags);
dprintk("nfsv4 compound returned %d\n", ntohl(status));
return status;
}
@@ -1929,6 +1968,18 @@ static struct nfsd4_operation nfsd4_ops[] = {
},
/* NFSv4.2 operations */
+ [OP_ALLOCATE] = {
+ .op_func = (nfsd4op_func)nfsd4_allocate,
+ .op_flags = OP_MODIFIES_SOMETHING | OP_CACHEME,
+ .op_name = "OP_ALLOCATE",
+ .op_rsize_bop = (nfsd4op_rsize)nfsd4_write_rsize,
+ },
+ [OP_DEALLOCATE] = {
+ .op_func = (nfsd4op_func)nfsd4_deallocate,
+ .op_flags = OP_MODIFIES_SOMETHING | OP_CACHEME,
+ .op_name = "OP_DEALLOCATE",
+ .op_rsize_bop = (nfsd4op_rsize)nfsd4_write_rsize,
+ },
[OP_SEEK] = {
.op_func = (nfsd4op_func)nfsd4_seek,
.op_name = "OP_SEEK",
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 4e1d7268b00..3550a9c8761 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -275,9 +275,11 @@ opaque_hashval(const void *ptr, int nbytes)
return x;
}
-static void nfsd4_free_file(struct nfs4_file *f)
+static void nfsd4_free_file_rcu(struct rcu_head *rcu)
{
- kmem_cache_free(file_slab, f);
+ struct nfs4_file *fp = container_of(rcu, struct nfs4_file, fi_rcu);
+
+ kmem_cache_free(file_slab, fp);
}
static inline void
@@ -286,9 +288,10 @@ put_nfs4_file(struct nfs4_file *fi)
might_lock(&state_lock);
if (atomic_dec_and_lock(&fi->fi_ref, &state_lock)) {
- hlist_del(&fi->fi_hash);
+ hlist_del_rcu(&fi->fi_hash);
spin_unlock(&state_lock);
- nfsd4_free_file(fi);
+ WARN_ON_ONCE(!list_empty(&fi->fi_delegations));
+ call_rcu(&fi->fi_rcu, nfsd4_free_file_rcu);
}
}
@@ -1440,7 +1443,7 @@ static void init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, stru
list_add(&new->se_perclnt, &clp->cl_sessions);
spin_unlock(&clp->cl_lock);
- if (cses->flags & SESSION4_BACK_CHAN) {
+ {
struct sockaddr *sa = svc_addr(rqstp);
/*
* This is a little silly; with sessions there's no real
@@ -1711,15 +1714,14 @@ static int copy_cred(struct svc_cred *target, struct svc_cred *source)
return 0;
}
-static long long
+static int
compare_blob(const struct xdr_netobj *o1, const struct xdr_netobj *o2)
{
- long long res;
-
- res = o1->len - o2->len;
- if (res)
- return res;
- return (long long)memcmp(o1->data, o2->data, o1->len);
+ if (o1->len < o2->len)
+ return -1;
+ if (o1->len > o2->len)
+ return 1;
+ return memcmp(o1->data, o2->data, o1->len);
}
static int same_name(const char *n1, const char *n2)
@@ -1907,7 +1909,7 @@ add_clp_to_name_tree(struct nfs4_client *new_clp, struct rb_root *root)
static struct nfs4_client *
find_clp_in_name_tree(struct xdr_netobj *name, struct rb_root *root)
{
- long long cmp;
+ int cmp;
struct rb_node *node = root->rb_node;
struct nfs4_client *clp;
@@ -3057,10 +3059,9 @@ static struct nfs4_file *nfsd4_alloc_file(void)
}
/* OPEN Share state helper functions */
-static void nfsd4_init_file(struct nfs4_file *fp, struct knfsd_fh *fh)
+static void nfsd4_init_file(struct knfsd_fh *fh, unsigned int hashval,
+ struct nfs4_file *fp)
{
- unsigned int hashval = file_hashval(fh);
-
lockdep_assert_held(&state_lock);
atomic_set(&fp->fi_ref, 1);
@@ -3073,7 +3074,7 @@ static void nfsd4_init_file(struct nfs4_file *fp, struct knfsd_fh *fh)
fp->fi_share_deny = 0;
memset(fp->fi_fds, 0, sizeof(fp->fi_fds));
memset(fp->fi_access, 0, sizeof(fp->fi_access));
- hlist_add_head(&fp->fi_hash, &file_hashtbl[hashval]);
+ hlist_add_head_rcu(&fp->fi_hash, &file_hashtbl[hashval]);
}
void
@@ -3294,17 +3295,14 @@ move_to_close_lru(struct nfs4_ol_stateid *s, struct net *net)
/* search file_hashtbl[] for file */
static struct nfs4_file *
-find_file_locked(struct knfsd_fh *fh)
+find_file_locked(struct knfsd_fh *fh, unsigned int hashval)
{
- unsigned int hashval = file_hashval(fh);
struct nfs4_file *fp;
- lockdep_assert_held(&state_lock);
-
- hlist_for_each_entry(fp, &file_hashtbl[hashval], fi_hash) {
+ hlist_for_each_entry_rcu(fp, &file_hashtbl[hashval], fi_hash) {
if (nfsd_fh_match(&fp->fi_fhandle, fh)) {
- get_nfs4_file(fp);
- return fp;
+ if (atomic_inc_not_zero(&fp->fi_ref))
+ return fp;
}
}
return NULL;
@@ -3314,10 +3312,11 @@ static struct nfs4_file *
find_file(struct knfsd_fh *fh)
{
struct nfs4_file *fp;
+ unsigned int hashval = file_hashval(fh);
- spin_lock(&state_lock);
- fp = find_file_locked(fh);
- spin_unlock(&state_lock);
+ rcu_read_lock();
+ fp = find_file_locked(fh, hashval);
+ rcu_read_unlock();
return fp;
}
@@ -3325,11 +3324,18 @@ static struct nfs4_file *
find_or_add_file(struct nfs4_file *new, struct knfsd_fh *fh)
{
struct nfs4_file *fp;
+ unsigned int hashval = file_hashval(fh);
+
+ rcu_read_lock();
+ fp = find_file_locked(fh, hashval);
+ rcu_read_unlock();
+ if (fp)
+ return fp;
spin_lock(&state_lock);
- fp = find_file_locked(fh);
- if (fp == NULL) {
- nfsd4_init_file(new, fh);
+ fp = find_file_locked(fh, hashval);
+ if (likely(fp == NULL)) {
+ nfsd4_init_file(fh, hashval, new);
fp = new;
}
spin_unlock(&state_lock);
@@ -4127,7 +4133,7 @@ void nfsd4_cleanup_open_state(struct nfsd4_compound_state *cstate,
nfs4_put_stateowner(so);
}
if (open->op_file)
- nfsd4_free_file(open->op_file);
+ kmem_cache_free(file_slab, open->op_file);
if (open->op_stp)
nfs4_put_stid(&open->op_stp->st_stid);
}
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index b1eed4dd2ea..15f7b73e0c0 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -1514,6 +1514,23 @@ static __be32 nfsd4_decode_reclaim_complete(struct nfsd4_compoundargs *argp, str
}
static __be32
+nfsd4_decode_fallocate(struct nfsd4_compoundargs *argp,
+ struct nfsd4_fallocate *fallocate)
+{
+ DECODE_HEAD;
+
+ status = nfsd4_decode_stateid(argp, &fallocate->falloc_stateid);
+ if (status)
+ return status;
+
+ READ_BUF(16);
+ p = xdr_decode_hyper(p, &fallocate->falloc_offset);
+ xdr_decode_hyper(p, &fallocate->falloc_length);
+
+ DECODE_TAIL;
+}
+
+static __be32
nfsd4_decode_seek(struct nfsd4_compoundargs *argp, struct nfsd4_seek *seek)
{
DECODE_HEAD;
@@ -1604,10 +1621,10 @@ static nfsd4_dec nfsd4_dec_ops[] = {
[OP_RECLAIM_COMPLETE] = (nfsd4_dec)nfsd4_decode_reclaim_complete,
/* new operations for NFSv4.2 */
- [OP_ALLOCATE] = (nfsd4_dec)nfsd4_decode_notsupp,
+ [OP_ALLOCATE] = (nfsd4_dec)nfsd4_decode_fallocate,
[OP_COPY] = (nfsd4_dec)nfsd4_decode_notsupp,
[OP_COPY_NOTIFY] = (nfsd4_dec)nfsd4_decode_notsupp,
- [OP_DEALLOCATE] = (nfsd4_dec)nfsd4_decode_notsupp,
+ [OP_DEALLOCATE] = (nfsd4_dec)nfsd4_decode_fallocate,
[OP_IO_ADVISE] = (nfsd4_dec)nfsd4_decode_notsupp,
[OP_LAYOUTERROR] = (nfsd4_dec)nfsd4_decode_notsupp,
[OP_LAYOUTSTATS] = (nfsd4_dec)nfsd4_decode_notsupp,
@@ -1714,7 +1731,7 @@ nfsd4_decode_compound(struct nfsd4_compoundargs *argp)
argp->rqstp->rq_cachetype = cachethis ? RC_REPLBUFF : RC_NOCACHE;
if (readcount > 1 || max_reply > PAGE_SIZE - auth_slack)
- argp->rqstp->rq_splice_ok = false;
+ clear_bit(RQ_SPLICE_OK, &argp->rqstp->rq_flags);
DECODE_TAIL;
}
@@ -1795,9 +1812,12 @@ static __be32 nfsd4_encode_components_esc(struct xdr_stream *xdr, char sep,
}
else
end++;
+ if (found_esc)
+ end = next;
+
str = end;
}
- pathlen = htonl(xdr->buf->len - pathlen_offset);
+ pathlen = htonl(count);
write_bytes_to_xdr_buf(xdr->buf, pathlen_offset, &pathlen, 4);
return 0;
}
@@ -3236,10 +3256,10 @@ nfsd4_encode_read(struct nfsd4_compoundres *resp, __be32 nfserr,
p = xdr_reserve_space(xdr, 8); /* eof flag and byte count */
if (!p) {
- WARN_ON_ONCE(resp->rqstp->rq_splice_ok);
+ WARN_ON_ONCE(test_bit(RQ_SPLICE_OK, &resp->rqstp->rq_flags));
return nfserr_resource;
}
- if (resp->xdr.buf->page_len && resp->rqstp->rq_splice_ok) {
+ if (resp->xdr.buf->page_len && test_bit(RQ_SPLICE_OK, &resp->rqstp->rq_flags)) {
WARN_ON_ONCE(1);
return nfserr_resource;
}
@@ -3256,7 +3276,7 @@ nfsd4_encode_read(struct nfsd4_compoundres *resp, __be32 nfserr,
goto err_truncate;
}
- if (file->f_op->splice_read && resp->rqstp->rq_splice_ok)
+ if (file->f_op->splice_read && test_bit(RQ_SPLICE_OK, &resp->rqstp->rq_flags))
err = nfsd4_encode_splice_read(resp, read, file, maxcount);
else
err = nfsd4_encode_readv(resp, read, file, maxcount);
diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
index 122f69185ef..83a9694ec48 100644
--- a/fs/nfsd/nfscache.c
+++ b/fs/nfsd/nfscache.c
@@ -490,7 +490,7 @@ found_entry:
/* From the hall of fame of impractical attacks:
* Is this a user who tries to snoop on the cache? */
rtn = RC_DOIT;
- if (!rqstp->rq_secure && rp->c_secure)
+ if (!test_bit(RQ_SECURE, &rqstp->rq_flags) && rp->c_secure)
goto out;
/* Compose RPC reply header */
@@ -579,7 +579,7 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
spin_lock(&b->cache_lock);
drc_mem_usage += bufsize;
lru_put_end(b, rp);
- rp->c_secure = rqstp->rq_secure;
+ rp->c_secure = test_bit(RQ_SECURE, &rqstp->rq_flags);
rp->c_type = cachetype;
rp->c_state = RC_DONE;
spin_unlock(&b->cache_lock);
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 9506ea56561..19ace74d35f 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -608,7 +608,7 @@ static ssize_t __write_versions(struct file *file, char *buf, size_t size)
num);
sep = " ";
- if (len > remaining)
+ if (len >= remaining)
break;
remaining -= len;
buf += len;
@@ -623,7 +623,7 @@ static ssize_t __write_versions(struct file *file, char *buf, size_t size)
'+' : '-',
minor);
- if (len > remaining)
+ if (len >= remaining)
break;
remaining -= len;
buf += len;
@@ -631,7 +631,7 @@ static ssize_t __write_versions(struct file *file, char *buf, size_t size)
}
len = snprintf(buf, remaining, "\n");
- if (len > remaining)
+ if (len >= remaining)
return -EINVAL;
return tlen + len;
}
diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c
index 88026fc6a98..965b478d50f 100644
--- a/fs/nfsd/nfsfh.c
+++ b/fs/nfsd/nfsfh.c
@@ -86,7 +86,7 @@ static __be32 nfsd_setuser_and_check_port(struct svc_rqst *rqstp,
int flags = nfsexp_flags(rqstp, exp);
/* Check if the request originated from a secure port. */
- if (!rqstp->rq_secure && !(flags & NFSEXP_INSECURE_PORT)) {
+ if (!test_bit(RQ_SECURE, &rqstp->rq_flags) && !(flags & NFSEXP_INSECURE_PORT)) {
RPC_IFDEBUG(char buf[RPC_MAX_ADDRBUFLEN]);
dprintk("nfsd: request from insecure port %s!\n",
svc_print_addr(rqstp, buf, sizeof(buf)));
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index 752d56bbe0b..314f5c8f8f1 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -692,7 +692,7 @@ nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp)
/* Now call the procedure handler, and encode NFS status. */
nfserr = proc->pc_func(rqstp, rqstp->rq_argp, rqstp->rq_resp);
nfserr = map_new_errors(rqstp->rq_vers, nfserr);
- if (nfserr == nfserr_dropit || rqstp->rq_dropme) {
+ if (nfserr == nfserr_dropit || test_bit(RQ_DROPME, &rqstp->rq_flags)) {
dprintk("nfsd: Dropping request; may be revisited later\n");
nfsd_cache_update(rqstp, RC_NOCACHE, NULL);
return 0;
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index 2712042a66b..9d3be371240 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -463,17 +463,24 @@ static inline struct nfs4_lockowner * lockowner(struct nfs4_stateowner *so)
/*
* nfs4_file: a file opened by some number of (open) nfs4_stateowners.
*
- * These objects are global. nfsd only keeps one instance of a nfs4_file per
- * inode (though it may keep multiple file descriptors open per inode). These
- * are tracked in the file_hashtbl which is protected by the state_lock
- * spinlock.
+ * These objects are global. nfsd keeps one instance of a nfs4_file per
+ * filehandle (though it may keep multiple file descriptors for each). Each
+ * inode can have multiple filehandles associated with it, so there is
+ * (potentially) a many to one relationship between this struct and struct
+ * inode.
+ *
+ * These are hashed by filehandle in the file_hashtbl, which is protected by
+ * the global state_lock spinlock.
*/
struct nfs4_file {
atomic_t fi_ref;
spinlock_t fi_lock;
- struct hlist_node fi_hash; /* hash by "struct inode *" */
+ struct hlist_node fi_hash; /* hash on fi_fhandle */
struct list_head fi_stateids;
- struct list_head fi_delegations;
+ union {
+ struct list_head fi_delegations;
+ struct rcu_head fi_rcu;
+ };
/* One each for O_RDONLY, O_WRONLY, O_RDWR: */
struct file * fi_fds[3];
/*
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 0a82e3c033e..5685c679dd9 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -16,6 +16,7 @@
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/splice.h>
+#include <linux/falloc.h>
#include <linux/fcntl.h>
#include <linux/namei.h>
#include <linux/delay.h>
@@ -533,6 +534,26 @@ __be32 nfsd4_set_nfs4_label(struct svc_rqst *rqstp, struct svc_fh *fhp,
}
#endif
+__be32 nfsd4_vfs_fallocate(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ struct file *file, loff_t offset, loff_t len,
+ int flags)
+{
+ __be32 err;
+ int error;
+
+ if (!S_ISREG(file_inode(file)->i_mode))
+ return nfserr_inval;
+
+ err = nfsd_permission(rqstp, fhp->fh_export, fhp->fh_dentry, NFSD_MAY_WRITE);
+ if (err)
+ return err;
+
+ error = vfs_fallocate(file, flags, offset, len);
+ if (!error)
+ error = commit_metadata(fhp);
+
+ return nfserrno(error);
+}
#endif /* defined(CONFIG_NFSD_V4) */
#ifdef CONFIG_NFSD_V3
@@ -881,7 +902,7 @@ static __be32
nfsd_vfs_read(struct svc_rqst *rqstp, struct file *file,
loff_t offset, struct kvec *vec, int vlen, unsigned long *count)
{
- if (file->f_op->splice_read && rqstp->rq_splice_ok)
+ if (file->f_op->splice_read && test_bit(RQ_SPLICE_OK, &rqstp->rq_flags))
return nfsd_splice_read(rqstp, file, offset, count);
else
return nfsd_readv(file, offset, vec, vlen, count);
@@ -937,9 +958,10 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
int stable = *stablep;
int use_wgather;
loff_t pos = offset;
+ loff_t end = LLONG_MAX;
unsigned int pflags = current->flags;
- if (rqstp->rq_local)
+ if (test_bit(RQ_LOCAL, &rqstp->rq_flags))
/*
* We want less throttling in balance_dirty_pages()
* and shrink_inactive_list() so that nfs to
@@ -967,10 +989,13 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
fsnotify_modify(file);
if (stable) {
- if (use_wgather)
+ if (use_wgather) {
host_err = wait_for_concurrent_writes(file);
- else
- host_err = vfs_fsync_range(file, offset, offset+*cnt, 0);
+ } else {
+ if (*cnt)
+ end = offset + *cnt - 1;
+ host_err = vfs_fsync_range(file, offset, end, 0);
+ }
}
out_nfserr:
@@ -979,7 +1004,7 @@ out_nfserr:
err = 0;
else
err = nfserrno(host_err);
- if (rqstp->rq_local)
+ if (test_bit(RQ_LOCAL, &rqstp->rq_flags))
tsk_restore_flags(current, pflags, PF_LESS_THROTTLE);
return err;
}
diff --git a/fs/nfsd/vfs.h b/fs/nfsd/vfs.h
index b1796d6ee53..2050cb01699 100644
--- a/fs/nfsd/vfs.h
+++ b/fs/nfsd/vfs.h
@@ -54,6 +54,8 @@ int nfsd_mountpoint(struct dentry *, struct svc_export *);
#ifdef CONFIG_NFSD_V4
__be32 nfsd4_set_nfs4_label(struct svc_rqst *, struct svc_fh *,
struct xdr_netobj *);
+__be32 nfsd4_vfs_fallocate(struct svc_rqst *, struct svc_fh *,
+ struct file *, loff_t, loff_t, int);
#endif /* CONFIG_NFSD_V4 */
__be32 nfsd_create(struct svc_rqst *, struct svc_fh *,
char *name, int len, struct iattr *attrs,
diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h
index 5720e9457f3..90a5925bd6a 100644
--- a/fs/nfsd/xdr4.h
+++ b/fs/nfsd/xdr4.h
@@ -428,6 +428,13 @@ struct nfsd4_reclaim_complete {
u32 rca_one_fs;
};
+struct nfsd4_fallocate {
+ /* request */
+ stateid_t falloc_stateid;
+ loff_t falloc_offset;
+ u64 falloc_length;
+};
+
struct nfsd4_seek {
/* request */
stateid_t seek_stateid;
@@ -486,6 +493,8 @@ struct nfsd4_op {
struct nfsd4_free_stateid free_stateid;
/* NFSv4.2 */
+ struct nfsd4_fallocate allocate;
+ struct nfsd4_fallocate deallocate;
struct nfsd4_seek seek;
} u;
struct nfs4_replay * replay;
diff --git a/fs/nsfs.c b/fs/nsfs.c
new file mode 100644
index 00000000000..af1b24fa899
--- /dev/null
+++ b/fs/nsfs.c
@@ -0,0 +1,161 @@
+#include <linux/mount.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/proc_ns.h>
+#include <linux/magic.h>
+#include <linux/ktime.h>
+
+static struct vfsmount *nsfs_mnt;
+
+static const struct file_operations ns_file_operations = {
+ .llseek = no_llseek,
+};
+
+static char *ns_dname(struct dentry *dentry, char *buffer, int buflen)
+{
+ struct inode *inode = dentry->d_inode;
+ const struct proc_ns_operations *ns_ops = dentry->d_fsdata;
+
+ return dynamic_dname(dentry, buffer, buflen, "%s:[%lu]",
+ ns_ops->name, inode->i_ino);
+}
+
+static void ns_prune_dentry(struct dentry *dentry)
+{
+ struct inode *inode = dentry->d_inode;
+ if (inode) {
+ struct ns_common *ns = inode->i_private;
+ atomic_long_set(&ns->stashed, 0);
+ }
+}
+
+const struct dentry_operations ns_dentry_operations =
+{
+ .d_prune = ns_prune_dentry,
+ .d_delete = always_delete_dentry,
+ .d_dname = ns_dname,
+};
+
+static void nsfs_evict(struct inode *inode)
+{
+ struct ns_common *ns = inode->i_private;
+ clear_inode(inode);
+ ns->ops->put(ns);
+}
+
+void *ns_get_path(struct path *path, struct task_struct *task,
+ const struct proc_ns_operations *ns_ops)
+{
+ struct vfsmount *mnt = mntget(nsfs_mnt);
+ struct qstr qname = { .name = "", };
+ struct dentry *dentry;
+ struct inode *inode;
+ struct ns_common *ns;
+ unsigned long d;
+
+again:
+ ns = ns_ops->get(task);
+ if (!ns) {
+ mntput(mnt);
+ return ERR_PTR(-ENOENT);
+ }
+ rcu_read_lock();
+ d = atomic_long_read(&ns->stashed);
+ if (!d)
+ goto slow;
+ dentry = (struct dentry *)d;
+ if (!lockref_get_not_dead(&dentry->d_lockref))
+ goto slow;
+ rcu_read_unlock();
+ ns_ops->put(ns);
+got_it:
+ path->mnt = mnt;
+ path->dentry = dentry;
+ return NULL;
+slow:
+ rcu_read_unlock();
+ inode = new_inode_pseudo(mnt->mnt_sb);
+ if (!inode) {
+ ns_ops->put(ns);
+ mntput(mnt);
+ return ERR_PTR(-ENOMEM);
+ }
+ inode->i_ino = ns->inum;
+ inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
+ inode->i_flags |= S_IMMUTABLE;
+ inode->i_mode = S_IFREG | S_IRUGO;
+ inode->i_fop = &ns_file_operations;
+ inode->i_private = ns;
+
+ dentry = d_alloc_pseudo(mnt->mnt_sb, &qname);
+ if (!dentry) {
+ iput(inode);
+ mntput(mnt);
+ return ERR_PTR(-ENOMEM);
+ }
+ d_instantiate(dentry, inode);
+ dentry->d_fsdata = (void *)ns_ops;
+ d = atomic_long_cmpxchg(&ns->stashed, 0, (unsigned long)dentry);
+ if (d) {
+ d_delete(dentry); /* make sure ->d_prune() does nothing */
+ dput(dentry);
+ cpu_relax();
+ goto again;
+ }
+ goto got_it;
+}
+
+int ns_get_name(char *buf, size_t size, struct task_struct *task,
+ const struct proc_ns_operations *ns_ops)
+{
+ struct ns_common *ns;
+ int res = -ENOENT;
+ ns = ns_ops->get(task);
+ if (ns) {
+ res = snprintf(buf, size, "%s:[%u]", ns_ops->name, ns->inum);
+ ns_ops->put(ns);
+ }
+ return res;
+}
+
+struct file *proc_ns_fget(int fd)
+{
+ struct file *file;
+
+ file = fget(fd);
+ if (!file)
+ return ERR_PTR(-EBADF);
+
+ if (file->f_op != &ns_file_operations)
+ goto out_invalid;
+
+ return file;
+
+out_invalid:
+ fput(file);
+ return ERR_PTR(-EINVAL);
+}
+
+static const struct super_operations nsfs_ops = {
+ .statfs = simple_statfs,
+ .evict_inode = nsfs_evict,
+};
+static struct dentry *nsfs_mount(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data)
+{
+ return mount_pseudo(fs_type, "nsfs:", &nsfs_ops,
+ &ns_dentry_operations, NSFS_MAGIC);
+}
+static struct file_system_type nsfs = {
+ .name = "nsfs",
+ .mount = nsfs_mount,
+ .kill_sb = kill_anon_super,
+};
+
+void __init nsfs_init(void)
+{
+ nsfs_mnt = kern_mount(&nsfs);
+ if (IS_ERR(nsfs_mnt))
+ panic("can't set nsfs up\n");
+ nsfs_mnt->mnt_sb->s_flags &= ~MS_NOUSER;
+}
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index a93bf989225..fcae9ef1a32 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -5662,7 +5662,7 @@ int ocfs2_remove_btree_range(struct inode *inode,
struct ocfs2_extent_tree *et,
u32 cpos, u32 phys_cpos, u32 len, int flags,
struct ocfs2_cached_dealloc_ctxt *dealloc,
- u64 refcount_loc)
+ u64 refcount_loc, bool refcount_tree_locked)
{
int ret, credits = 0, extra_blocks = 0;
u64 phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, phys_cpos);
@@ -5676,11 +5676,13 @@ int ocfs2_remove_btree_range(struct inode *inode,
BUG_ON(!(OCFS2_I(inode)->ip_dyn_features &
OCFS2_HAS_REFCOUNT_FL));
- ret = ocfs2_lock_refcount_tree(osb, refcount_loc, 1,
- &ref_tree, NULL);
- if (ret) {
- mlog_errno(ret);
- goto bail;
+ if (!refcount_tree_locked) {
+ ret = ocfs2_lock_refcount_tree(osb, refcount_loc, 1,
+ &ref_tree, NULL);
+ if (ret) {
+ mlog_errno(ret);
+ goto bail;
+ }
}
ret = ocfs2_prepare_refcount_change_for_del(inode,
@@ -7021,6 +7023,7 @@ int ocfs2_commit_truncate(struct ocfs2_super *osb,
u64 refcount_loc = le64_to_cpu(di->i_refcount_loc);
struct ocfs2_extent_tree et;
struct ocfs2_cached_dealloc_ctxt dealloc;
+ struct ocfs2_refcount_tree *ref_tree = NULL;
ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), di_bh);
ocfs2_init_dealloc_ctxt(&dealloc);
@@ -7130,9 +7133,18 @@ start:
phys_cpos = ocfs2_blocks_to_clusters(inode->i_sb, blkno);
+ if ((flags & OCFS2_EXT_REFCOUNTED) && trunc_len && !ref_tree) {
+ status = ocfs2_lock_refcount_tree(osb, refcount_loc, 1,
+ &ref_tree, NULL);
+ if (status) {
+ mlog_errno(status);
+ goto bail;
+ }
+ }
+
status = ocfs2_remove_btree_range(inode, &et, trunc_cpos,
phys_cpos, trunc_len, flags, &dealloc,
- refcount_loc);
+ refcount_loc, true);
if (status < 0) {
mlog_errno(status);
goto bail;
@@ -7147,6 +7159,8 @@ start:
goto start;
bail:
+ if (ref_tree)
+ ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
ocfs2_schedule_truncate_log_flush(osb, 1);
diff --git a/fs/ocfs2/alloc.h b/fs/ocfs2/alloc.h
index ca381c58412..fb09b97db16 100644
--- a/fs/ocfs2/alloc.h
+++ b/fs/ocfs2/alloc.h
@@ -142,7 +142,7 @@ int ocfs2_remove_btree_range(struct inode *inode,
struct ocfs2_extent_tree *et,
u32 cpos, u32 phys_cpos, u32 len, int flags,
struct ocfs2_cached_dealloc_ctxt *dealloc,
- u64 refcount_loc);
+ u64 refcount_loc, bool refcount_tree_locked);
int ocfs2_num_free_extents(struct ocfs2_super *osb,
struct ocfs2_extent_tree *et);
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index d9f222987f2..46d93e941f3 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -894,7 +894,7 @@ void ocfs2_unlock_and_free_pages(struct page **pages, int num_pages)
}
}
-static void ocfs2_free_write_ctxt(struct ocfs2_write_ctxt *wc)
+static void ocfs2_unlock_pages(struct ocfs2_write_ctxt *wc)
{
int i;
@@ -915,7 +915,11 @@ static void ocfs2_free_write_ctxt(struct ocfs2_write_ctxt *wc)
page_cache_release(wc->w_target_page);
}
ocfs2_unlock_and_free_pages(wc->w_pages, wc->w_num_pages);
+}
+static void ocfs2_free_write_ctxt(struct ocfs2_write_ctxt *wc)
+{
+ ocfs2_unlock_pages(wc);
brelse(wc->w_di_bh);
kfree(wc);
}
@@ -2042,11 +2046,19 @@ out_write_size:
ocfs2_update_inode_fsync_trans(handle, inode, 1);
ocfs2_journal_dirty(handle, wc->w_di_bh);
+ /* unlock pages before dealloc since it needs acquiring j_trans_barrier
+ * lock, or it will cause a deadlock since journal commit threads holds
+ * this lock and will ask for the page lock when flushing the data.
+ * put it here to preserve the unlock order.
+ */
+ ocfs2_unlock_pages(wc);
+
ocfs2_commit_trans(osb, handle);
ocfs2_run_deallocs(osb, &wc->w_dealloc);
- ocfs2_free_write_ctxt(wc);
+ brelse(wc->w_di_bh);
+ kfree(wc);
return copied;
}
diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c
index 79d56dc981b..319e786175a 100644
--- a/fs/ocfs2/dir.c
+++ b/fs/ocfs2/dir.c
@@ -4479,7 +4479,7 @@ int ocfs2_dx_dir_truncate(struct inode *dir, struct buffer_head *di_bh)
p_cpos = ocfs2_blocks_to_clusters(dir->i_sb, blkno);
ret = ocfs2_remove_btree_range(dir, &et, cpos, p_cpos, clen, 0,
- &dealloc, 0);
+ &dealloc, 0, false);
if (ret) {
mlog_errno(ret);
goto out;
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index 3689b359204..a6944b25fd5 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -695,14 +695,6 @@ void __dlm_lockres_grab_inflight_worker(struct dlm_ctxt *dlm,
res->inflight_assert_workers);
}
-static void dlm_lockres_grab_inflight_worker(struct dlm_ctxt *dlm,
- struct dlm_lock_resource *res)
-{
- spin_lock(&res->spinlock);
- __dlm_lockres_grab_inflight_worker(dlm, res);
- spin_unlock(&res->spinlock);
-}
-
static void __dlm_lockres_drop_inflight_worker(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res)
{
@@ -1646,6 +1638,7 @@ send_response:
}
mlog(0, "%u is the owner of %.*s, cleaning everyone else\n",
dlm->node_num, res->lockname.len, res->lockname.name);
+ spin_lock(&res->spinlock);
ret = dlm_dispatch_assert_master(dlm, res, 0, request->node_idx,
DLM_ASSERT_MASTER_MLE_CLEANUP);
if (ret < 0) {
@@ -1653,7 +1646,8 @@ send_response:
response = DLM_MASTER_RESP_ERROR;
dlm_lockres_put(res);
} else
- dlm_lockres_grab_inflight_worker(dlm, res);
+ __dlm_lockres_grab_inflight_worker(dlm, res);
+ spin_unlock(&res->spinlock);
} else {
if (res)
dlm_lockres_put(res);
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 69fb9f75b08..3950693dd0f 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -1803,7 +1803,7 @@ static int ocfs2_remove_inode_range(struct inode *inode,
ret = ocfs2_remove_btree_range(inode, &et, trunc_cpos,
phys_cpos, trunc_len, flags,
- &dealloc, refcount_loc);
+ &dealloc, refcount_loc, false);
if (ret < 0) {
mlog_errno(ret);
goto out;
diff --git a/fs/open.c b/fs/open.c
index d45bd905d41..813be037b41 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -222,7 +222,7 @@ SYSCALL_DEFINE2(ftruncate64, unsigned int, fd, loff_t, length)
#endif /* BITS_PER_LONG == 32 */
-int do_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
+int vfs_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
{
struct inode *inode = file_inode(file);
long ret;
@@ -309,6 +309,7 @@ int do_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
sb_end_write(inode->i_sb);
return ret;
}
+EXPORT_SYMBOL_GPL(vfs_fallocate);
SYSCALL_DEFINE4(fallocate, int, fd, int, mode, loff_t, offset, loff_t, len)
{
@@ -316,7 +317,7 @@ SYSCALL_DEFINE4(fallocate, int, fd, int, mode, loff_t, offset, loff_t, len)
int error = -EBADF;
if (f.file) {
- error = do_fallocate(f.file, mode, offset, len);
+ error = vfs_fallocate(f.file, mode, offset, len);
fdput(f);
}
return error;
diff --git a/fs/pnode.c b/fs/pnode.c
index aae331a5d03..260ac8f898a 100644
--- a/fs/pnode.c
+++ b/fs/pnode.c
@@ -242,6 +242,7 @@ static int propagate_one(struct mount *m)
child = copy_tree(last_source, last_source->mnt.mnt_root, type);
if (IS_ERR(child))
return PTR_ERR(child);
+ child->mnt.mnt_flags &= ~MNT_LOCKED;
mnt_set_mountpoint(m, mp, child);
last_dest = m;
last_source = child;
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 590aeda5af1..3f3d7aeb071 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -2464,6 +2464,57 @@ static const struct file_operations proc_projid_map_operations = {
.llseek = seq_lseek,
.release = proc_id_map_release,
};
+
+static int proc_setgroups_open(struct inode *inode, struct file *file)
+{
+ struct user_namespace *ns = NULL;
+ struct task_struct *task;
+ int ret;
+
+ ret = -ESRCH;
+ task = get_proc_task(inode);
+ if (task) {
+ rcu_read_lock();
+ ns = get_user_ns(task_cred_xxx(task, user_ns));
+ rcu_read_unlock();
+ put_task_struct(task);
+ }
+ if (!ns)
+ goto err;
+
+ if (file->f_mode & FMODE_WRITE) {
+ ret = -EACCES;
+ if (!ns_capable(ns, CAP_SYS_ADMIN))
+ goto err_put_ns;
+ }
+
+ ret = single_open(file, &proc_setgroups_show, ns);
+ if (ret)
+ goto err_put_ns;
+
+ return 0;
+err_put_ns:
+ put_user_ns(ns);
+err:
+ return ret;
+}
+
+static int proc_setgroups_release(struct inode *inode, struct file *file)
+{
+ struct seq_file *seq = file->private_data;
+ struct user_namespace *ns = seq->private;
+ int ret = single_release(inode, file);
+ put_user_ns(ns);
+ return ret;
+}
+
+static const struct file_operations proc_setgroups_operations = {
+ .open = proc_setgroups_open,
+ .write = proc_setgroups_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = proc_setgroups_release,
+};
#endif /* CONFIG_USER_NS */
static int proc_pid_personality(struct seq_file *m, struct pid_namespace *ns,
@@ -2572,6 +2623,7 @@ static const struct pid_entry tgid_base_stuff[] = {
REG("uid_map", S_IRUGO|S_IWUSR, proc_uid_map_operations),
REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations),
REG("projid_map", S_IRUGO|S_IWUSR, proc_projid_map_operations),
+ REG("setgroups", S_IRUGO|S_IWUSR, proc_setgroups_operations),
#endif
#ifdef CONFIG_CHECKPOINT_RESTORE
REG("timers", S_IRUGO, proc_timers_operations),
@@ -2916,6 +2968,7 @@ static const struct pid_entry tid_base_stuff[] = {
REG("uid_map", S_IRUGO|S_IWUSR, proc_uid_map_operations),
REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations),
REG("projid_map", S_IRUGO|S_IWUSR, proc_projid_map_operations),
+ REG("setgroups", S_IRUGO|S_IWUSR, proc_setgroups_operations),
#endif
};
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index 333080d7a67..8420a2f8081 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -32,8 +32,6 @@ static void proc_evict_inode(struct inode *inode)
{
struct proc_dir_entry *de;
struct ctl_table_header *head;
- const struct proc_ns_operations *ns_ops;
- void *ns;
truncate_inode_pages_final(&inode->i_data);
clear_inode(inode);
@@ -50,11 +48,6 @@ static void proc_evict_inode(struct inode *inode)
RCU_INIT_POINTER(PROC_I(inode)->sysctl, NULL);
sysctl_head_put(head);
}
- /* Release any associated namespace */
- ns_ops = PROC_I(inode)->ns.ns_ops;
- ns = PROC_I(inode)->ns.ns;
- if (ns_ops && ns)
- ns_ops->put(ns);
}
static struct kmem_cache * proc_inode_cachep;
@@ -73,8 +66,7 @@ static struct inode *proc_alloc_inode(struct super_block *sb)
ei->pde = NULL;
ei->sysctl = NULL;
ei->sysctl_entry = NULL;
- ei->ns.ns = NULL;
- ei->ns.ns_ops = NULL;
+ ei->ns_ops = NULL;
inode = &ei->vfs_inode;
inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
return inode;
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index 7fb1a4869fd..6fcdba573e0 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -65,7 +65,7 @@ struct proc_inode {
struct proc_dir_entry *pde;
struct ctl_table_header *sysctl;
struct ctl_table *sysctl_entry;
- struct proc_ns ns;
+ const struct proc_ns_operations *ns_ops;
struct inode vfs_inode;
};
diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
index aa1eee06420..d3ebf2e6185 100644
--- a/fs/proc/meminfo.c
+++ b/fs/proc/meminfo.c
@@ -12,6 +12,9 @@
#include <linux/vmstat.h>
#include <linux/atomic.h>
#include <linux/vmalloc.h>
+#ifdef CONFIG_CMA
+#include <linux/cma.h>
+#endif
#include <asm/page.h>
#include <asm/pgtable.h>
#include "internal.h"
@@ -138,6 +141,10 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
"AnonHugePages: %8lu kB\n"
#endif
+#ifdef CONFIG_CMA
+ "CmaTotal: %8lu kB\n"
+ "CmaFree: %8lu kB\n"
+#endif
,
K(i.totalram),
K(i.freeram),
@@ -187,12 +194,16 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
vmi.used >> 10,
vmi.largest_chunk >> 10
#ifdef CONFIG_MEMORY_FAILURE
- ,atomic_long_read(&num_poisoned_pages) << (PAGE_SHIFT - 10)
+ , atomic_long_read(&num_poisoned_pages) << (PAGE_SHIFT - 10)
#endif
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
+ , K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
HPAGE_PMD_NR)
#endif
+#ifdef CONFIG_CMA
+ , K(totalcma_pages)
+ , K(global_page_state(NR_FREE_CMA_PAGES))
+#endif
);
hugetlb_report_meminfo(m);
diff --git a/fs/proc/namespaces.c b/fs/proc/namespaces.c
index 89026095f2b..c9eac4563fa 100644
--- a/fs/proc/namespaces.c
+++ b/fs/proc/namespaces.c
@@ -1,10 +1,6 @@
#include <linux/proc_fs.h>
#include <linux/nsproxy.h>
-#include <linux/sched.h>
#include <linux/ptrace.h>
-#include <linux/fs_struct.h>
-#include <linux/mount.h>
-#include <linux/path.h>
#include <linux/namei.h>
#include <linux/file.h>
#include <linux/utsname.h>
@@ -34,138 +30,45 @@ static const struct proc_ns_operations *ns_entries[] = {
&mntns_operations,
};
-static const struct file_operations ns_file_operations = {
- .llseek = no_llseek,
-};
-
-static const struct inode_operations ns_inode_operations = {
- .setattr = proc_setattr,
-};
-
-static char *ns_dname(struct dentry *dentry, char *buffer, int buflen)
-{
- struct inode *inode = dentry->d_inode;
- const struct proc_ns_operations *ns_ops = PROC_I(inode)->ns.ns_ops;
-
- return dynamic_dname(dentry, buffer, buflen, "%s:[%lu]",
- ns_ops->name, inode->i_ino);
-}
-
-const struct dentry_operations ns_dentry_operations =
-{
- .d_delete = always_delete_dentry,
- .d_dname = ns_dname,
-};
-
-static struct dentry *proc_ns_get_dentry(struct super_block *sb,
- struct task_struct *task, const struct proc_ns_operations *ns_ops)
-{
- struct dentry *dentry, *result;
- struct inode *inode;
- struct proc_inode *ei;
- struct qstr qname = { .name = "", };
- void *ns;
-
- ns = ns_ops->get(task);
- if (!ns)
- return ERR_PTR(-ENOENT);
-
- dentry = d_alloc_pseudo(sb, &qname);
- if (!dentry) {
- ns_ops->put(ns);
- return ERR_PTR(-ENOMEM);
- }
-
- inode = iget_locked(sb, ns_ops->inum(ns));
- if (!inode) {
- dput(dentry);
- ns_ops->put(ns);
- return ERR_PTR(-ENOMEM);
- }
-
- ei = PROC_I(inode);
- if (inode->i_state & I_NEW) {
- inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
- inode->i_op = &ns_inode_operations;
- inode->i_mode = S_IFREG | S_IRUGO;
- inode->i_fop = &ns_file_operations;
- ei->ns.ns_ops = ns_ops;
- ei->ns.ns = ns;
- unlock_new_inode(inode);
- } else {
- ns_ops->put(ns);
- }
-
- d_set_d_op(dentry, &ns_dentry_operations);
- result = d_instantiate_unique(dentry, inode);
- if (result) {
- dput(dentry);
- dentry = result;
- }
-
- return dentry;
-}
-
static void *proc_ns_follow_link(struct dentry *dentry, struct nameidata *nd)
{
struct inode *inode = dentry->d_inode;
- struct super_block *sb = inode->i_sb;
- struct proc_inode *ei = PROC_I(inode);
+ const struct proc_ns_operations *ns_ops = PROC_I(inode)->ns_ops;
struct task_struct *task;
struct path ns_path;
void *error = ERR_PTR(-EACCES);
task = get_proc_task(inode);
if (!task)
- goto out;
+ return error;
- if (!ptrace_may_access(task, PTRACE_MODE_READ))
- goto out_put_task;
-
- ns_path.dentry = proc_ns_get_dentry(sb, task, ei->ns.ns_ops);
- if (IS_ERR(ns_path.dentry)) {
- error = ERR_CAST(ns_path.dentry);
- goto out_put_task;
+ if (ptrace_may_access(task, PTRACE_MODE_READ)) {
+ error = ns_get_path(&ns_path, task, ns_ops);
+ if (!error)
+ nd_jump_link(nd, &ns_path);
}
-
- ns_path.mnt = mntget(nd->path.mnt);
- nd_jump_link(nd, &ns_path);
- error = NULL;
-
-out_put_task:
put_task_struct(task);
-out:
return error;
}
static int proc_ns_readlink(struct dentry *dentry, char __user *buffer, int buflen)
{
struct inode *inode = dentry->d_inode;
- struct proc_inode *ei = PROC_I(inode);
- const struct proc_ns_operations *ns_ops = ei->ns.ns_ops;
+ const struct proc_ns_operations *ns_ops = PROC_I(inode)->ns_ops;
struct task_struct *task;
- void *ns;
char name[50];
int res = -EACCES;
task = get_proc_task(inode);
if (!task)
- goto out;
-
- if (!ptrace_may_access(task, PTRACE_MODE_READ))
- goto out_put_task;
+ return res;
- res = -ENOENT;
- ns = ns_ops->get(task);
- if (!ns)
- goto out_put_task;
-
- snprintf(name, sizeof(name), "%s:[%u]", ns_ops->name, ns_ops->inum(ns));
- res = readlink_copy(buffer, buflen, name);
- ns_ops->put(ns);
-out_put_task:
+ if (ptrace_may_access(task, PTRACE_MODE_READ)) {
+ res = ns_get_name(name, sizeof(name), task, ns_ops);
+ if (res >= 0)
+ res = readlink_copy(buffer, buflen, name);
+ }
put_task_struct(task);
-out:
return res;
}
@@ -189,7 +92,7 @@ static int proc_ns_instantiate(struct inode *dir,
ei = PROC_I(inode);
inode->i_mode = S_IFLNK|S_IRWXUGO;
inode->i_op = &proc_ns_link_inode_operations;
- ei->ns.ns_ops = ns_ops;
+ ei->ns_ops = ns_ops;
d_set_d_op(dentry, &pid_dentry_operations);
d_add(dentry, inode);
@@ -267,31 +170,3 @@ const struct inode_operations proc_ns_dir_inode_operations = {
.getattr = pid_getattr,
.setattr = proc_setattr,
};
-
-struct file *proc_ns_fget(int fd)
-{
- struct file *file;
-
- file = fget(fd);
- if (!file)
- return ERR_PTR(-EBADF);
-
- if (file->f_op != &ns_file_operations)
- goto out_invalid;
-
- return file;
-
-out_invalid:
- fput(file);
- return ERR_PTR(-EINVAL);
-}
-
-struct proc_ns *get_proc_ns(struct inode *inode)
-{
- return &PROC_I(inode)->ns;
-}
-
-bool proc_ns_inode(struct inode *inode)
-{
- return inode->i_fop == &ns_file_operations;
-}
diff --git a/fs/proc/stat.c b/fs/proc/stat.c
index bf2d03f8fd3..510413eb25b 100644
--- a/fs/proc/stat.c
+++ b/fs/proc/stat.c
@@ -159,7 +159,7 @@ static int show_stat(struct seq_file *p, void *v)
/* sum again ? it could be updated? */
for_each_irq_nr(j)
- seq_put_decimal_ull(p, ' ', kstat_irqs(j));
+ seq_put_decimal_ull(p, ' ', kstat_irqs_usr(j));
seq_printf(p,
"\nctxt %llu\n"
diff --git a/fs/proc_namespace.c b/fs/proc_namespace.c
index 73ca1740d83..0f96f71ab32 100644
--- a/fs/proc_namespace.c
+++ b/fs/proc_namespace.c
@@ -91,6 +91,7 @@ static void show_type(struct seq_file *m, struct super_block *sb)
static int show_vfsmnt(struct seq_file *m, struct vfsmount *mnt)
{
+ struct proc_mounts *p = proc_mounts(m);
struct mount *r = real_mount(mnt);
int err = 0;
struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt };
@@ -104,7 +105,10 @@ static int show_vfsmnt(struct seq_file *m, struct vfsmount *mnt)
mangle(m, r->mnt_devname ? r->mnt_devname : "none");
}
seq_putc(m, ' ');
- seq_path(m, &mnt_path, " \t\n\\");
+ /* mountpoints outside of chroot jail will give SEQ_SKIP on this */
+ err = seq_path_root(m, &mnt_path, &p->root, " \t\n\\");
+ if (err)
+ goto out;
seq_putc(m, ' ');
show_type(m, sb);
seq_puts(m, __mnt_is_readonly(mnt) ? " ro" : " rw");
@@ -125,7 +129,6 @@ static int show_mountinfo(struct seq_file *m, struct vfsmount *mnt)
struct mount *r = real_mount(mnt);
struct super_block *sb = mnt->mnt_sb;
struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt };
- struct path root = p->root;
int err = 0;
seq_printf(m, "%i %i %u:%u ", r->mnt_id, r->mnt_parent->mnt_id,
@@ -139,7 +142,7 @@ static int show_mountinfo(struct seq_file *m, struct vfsmount *mnt)
seq_putc(m, ' ');
/* mountpoints outside of chroot jail will give SEQ_SKIP on this */
- err = seq_path_root(m, &mnt_path, &root, " \t\n\\");
+ err = seq_path_root(m, &mnt_path, &p->root, " \t\n\\");
if (err)
goto out;
@@ -182,6 +185,7 @@ out:
static int show_vfsstat(struct seq_file *m, struct vfsmount *mnt)
{
+ struct proc_mounts *p = proc_mounts(m);
struct mount *r = real_mount(mnt);
struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt };
struct super_block *sb = mnt_path.dentry->d_sb;
@@ -201,7 +205,10 @@ static int show_vfsstat(struct seq_file *m, struct vfsmount *mnt)
/* mount point */
seq_puts(m, " mounted on ");
- seq_path(m, &mnt_path, " \t\n\\");
+ /* mountpoints outside of chroot jail will give SEQ_SKIP on this */
+ err = seq_path_root(m, &mnt_path, &p->root, " \t\n\\");
+ if (err)
+ goto out;
seq_putc(m, ' ');
/* file system type */
@@ -216,6 +223,7 @@ static int show_vfsstat(struct seq_file *m, struct vfsmount *mnt)
}
seq_putc(m, '\n');
+out:
return err;
}
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index ea63ab13ef9..71fbbe3e2da 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -2172,6 +2172,9 @@ error_unlocked:
reiserfs_write_unlock(s);
}
+ if (sbi->commit_wq)
+ destroy_workqueue(sbi->commit_wq);
+
cancel_delayed_work_sync(&REISERFS_SB(s)->old_work);
reiserfs_free_bitmap_cache(s);
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 7581518e3ef..61e32ec1fc4 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -313,6 +313,7 @@ struct acpi_device_wakeup_flags {
u8 valid:1; /* Can successfully enable wakeup? */
u8 run_wake:1; /* Run-Wake GPE devices */
u8 notifier_present:1; /* Wake-up notify handler has been installed */
+ u8 enabled:1; /* Enabled for wakeup */
};
struct acpi_device_wakeup_context {
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index aa70cbda327..bee5d683074 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -164,6 +164,7 @@
#define CLKSRC_OF_TABLES() OF_TABLE(CONFIG_CLKSRC_OF, clksrc)
#define IRQCHIP_OF_MATCH_TABLE() OF_TABLE(CONFIG_IRQCHIP, irqchip)
#define CLK_OF_TABLES() OF_TABLE(CONFIG_COMMON_CLK, clk)
+#define IOMMU_OF_TABLES() OF_TABLE(CONFIG_OF_IOMMU, iommu)
#define RESERVEDMEM_OF_TABLES() OF_TABLE(CONFIG_OF_RESERVED_MEM, reservedmem)
#define CPU_METHOD_OF_TABLES() OF_TABLE(CONFIG_SMP, cpu_method)
#define EARLYCON_OF_TABLES() OF_TABLE(CONFIG_SERIAL_EARLYCON, earlycon)
@@ -497,6 +498,7 @@
CLK_OF_TABLES() \
RESERVEDMEM_OF_TABLES() \
CLKSRC_OF_TABLES() \
+ IOMMU_OF_TABLES() \
CPU_METHOD_OF_TABLES() \
KERNEL_DTB() \
IRQCHIP_OF_MATCH_TABLE() \
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 53ed87698a7..8ba35c622e2 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -125,8 +125,8 @@ struct dma_buf_attachment;
extern __printf(2, 3)
void drm_ut_debug_printk(const char *function_name,
const char *format, ...);
-extern __printf(2, 3)
-void drm_err(const char *func, const char *format, ...);
+extern __printf(1, 2)
+void drm_err(const char *format, ...);
/***********************************************************************/
/** \name DRM template customization defaults */
@@ -155,7 +155,7 @@ void drm_err(const char *func, const char *format, ...);
* \param arg arguments
*/
#define DRM_ERROR(fmt, ...) \
- drm_err(__func__, fmt, ##__VA_ARGS__)
+ drm_err(fmt, ##__VA_ARGS__)
/**
* Rate limited error output. Like DRM_ERROR() but won't flood the log.
@@ -170,7 +170,7 @@ void drm_err(const char *func, const char *format, ...);
DEFAULT_RATELIMIT_BURST); \
\
if (__ratelimit(&_rs)) \
- drm_err(__func__, fmt, ##__VA_ARGS__); \
+ drm_err(fmt, ##__VA_ARGS__); \
})
#define DRM_INFO(fmt, ...) \
@@ -809,7 +809,7 @@ struct drm_device {
struct drm_local_map *agp_buffer_map;
unsigned int agp_buffer_token;
- struct drm_mode_config mode_config; /**< Current mode config */
+ struct drm_mode_config mode_config; /**< Current mode config */
/** \name GEM information */
/*@{ */
@@ -986,7 +986,7 @@ extern void drm_gem_dmabuf_release(struct dma_buf *dma_buf);
extern int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
dma_addr_t *addrs, int max_pages);
-extern struct sg_table *drm_prime_pages_to_sg(struct page **pages, int nr_pages);
+extern struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages);
extern void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg);
@@ -1028,10 +1028,25 @@ void drm_pci_agp_destroy(struct drm_device *dev);
extern int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver);
extern void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver);
+#ifdef CONFIG_PCI
extern int drm_get_pci_dev(struct pci_dev *pdev,
const struct pci_device_id *ent,
struct drm_driver *driver);
extern int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master);
+#else
+static inline int drm_get_pci_dev(struct pci_dev *pdev,
+ const struct pci_device_id *ent,
+ struct drm_driver *driver)
+{
+ return -ENOSYS;
+}
+
+static inline int drm_pci_set_busid(struct drm_device *dev,
+ struct drm_master *master)
+{
+ return -ENOSYS;
+}
+#endif
#define DRM_PCIE_SPEED_25 1
#define DRM_PCIE_SPEED_50 2
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
new file mode 100644
index 00000000000..ad2229574dd
--- /dev/null
+++ b/include/drm/drm_atomic.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2014 Red Hat
+ * Copyright (C) 2014 Intel Corp.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rob Clark <robdclark@gmail.com>
+ * Daniel Vetter <daniel.vetter@ffwll.ch>
+ */
+
+#ifndef DRM_ATOMIC_H_
+#define DRM_ATOMIC_H_
+
+#include <drm/drm_crtc.h>
+
+struct drm_atomic_state * __must_check
+drm_atomic_state_alloc(struct drm_device *dev);
+void drm_atomic_state_clear(struct drm_atomic_state *state);
+void drm_atomic_state_free(struct drm_atomic_state *state);
+
+struct drm_crtc_state * __must_check
+drm_atomic_get_crtc_state(struct drm_atomic_state *state,
+ struct drm_crtc *crtc);
+struct drm_plane_state * __must_check
+drm_atomic_get_plane_state(struct drm_atomic_state *state,
+ struct drm_plane *plane);
+struct drm_connector_state * __must_check
+drm_atomic_get_connector_state(struct drm_atomic_state *state,
+ struct drm_connector *connector);
+
+int __must_check
+drm_atomic_set_crtc_for_plane(struct drm_atomic_state *state,
+ struct drm_plane *plane, struct drm_crtc *crtc);
+void drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state,
+ struct drm_framebuffer *fb);
+int __must_check
+drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
+ struct drm_crtc *crtc);
+int __must_check
+drm_atomic_add_affected_connectors(struct drm_atomic_state *state,
+ struct drm_crtc *crtc);
+int
+drm_atomic_connectors_for_crtc(struct drm_atomic_state *state,
+ struct drm_crtc *crtc);
+
+void drm_atomic_legacy_backoff(struct drm_atomic_state *state);
+
+int __must_check drm_atomic_check_only(struct drm_atomic_state *state);
+int __must_check drm_atomic_commit(struct drm_atomic_state *state);
+int __must_check drm_atomic_async_commit(struct drm_atomic_state *state);
+
+#endif /* DRM_ATOMIC_H_ */
diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h
new file mode 100644
index 00000000000..f956b413311
--- /dev/null
+++ b/include/drm/drm_atomic_helper.h
@@ -0,0 +1,126 @@
+/*
+ * Copyright (C) 2014 Red Hat
+ * Copyright (C) 2014 Intel Corp.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rob Clark <robdclark@gmail.com>
+ * Daniel Vetter <daniel.vetter@ffwll.ch>
+ */
+
+#ifndef DRM_ATOMIC_HELPER_H_
+#define DRM_ATOMIC_HELPER_H_
+
+#include <drm/drm_crtc.h>
+
+int drm_atomic_helper_check(struct drm_device *dev,
+ struct drm_atomic_state *state);
+int drm_atomic_helper_commit(struct drm_device *dev,
+ struct drm_atomic_state *state,
+ bool async);
+
+void drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
+ struct drm_atomic_state *old_state);
+
+void drm_atomic_helper_commit_pre_planes(struct drm_device *dev,
+ struct drm_atomic_state *state);
+void drm_atomic_helper_commit_post_planes(struct drm_device *dev,
+ struct drm_atomic_state *old_state);
+
+int drm_atomic_helper_prepare_planes(struct drm_device *dev,
+ struct drm_atomic_state *state);
+void drm_atomic_helper_commit_planes(struct drm_device *dev,
+ struct drm_atomic_state *state);
+void drm_atomic_helper_cleanup_planes(struct drm_device *dev,
+ struct drm_atomic_state *old_state);
+
+void drm_atomic_helper_swap_state(struct drm_device *dev,
+ struct drm_atomic_state *state);
+
+/* implementations for legacy interfaces */
+int drm_atomic_helper_update_plane(struct drm_plane *plane,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h);
+int drm_atomic_helper_disable_plane(struct drm_plane *plane);
+int drm_atomic_helper_set_config(struct drm_mode_set *set);
+
+int drm_atomic_helper_crtc_set_property(struct drm_crtc *crtc,
+ struct drm_property *property,
+ uint64_t val);
+int drm_atomic_helper_plane_set_property(struct drm_plane *plane,
+ struct drm_property *property,
+ uint64_t val);
+int drm_atomic_helper_connector_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t val);
+int drm_atomic_helper_page_flip(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event,
+ uint32_t flags);
+
+/* default implementations for state handling */
+void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc);
+struct drm_crtc_state *
+drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc);
+void drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state);
+
+void drm_atomic_helper_plane_reset(struct drm_plane *plane);
+struct drm_plane_state *
+drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane);
+void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *state);
+
+void drm_atomic_helper_connector_reset(struct drm_connector *connector);
+struct drm_connector_state *
+drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector);
+void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
+ struct drm_connector_state *state);
+
+/**
+ * drm_atomic_crtc_for_each_plane - iterate over planes currently attached to CRTC
+ * @plane: the loop cursor
+ * @crtc: the crtc whose planes are iterated
+ *
+ * This iterates over the current state, useful (for example) when applying
+ * atomic state after it has been checked and swapped. To iterate over the
+ * planes which *will* be attached (for ->atomic_check()) see
+ * drm_crtc_for_each_pending_plane()
+ */
+#define drm_atomic_crtc_for_each_plane(plane, crtc) \
+ drm_for_each_plane_mask(plane, (crtc)->dev, (crtc)->state->plane_mask)
+
+/**
+ * drm_crtc_atomic_state_for_each_plane - iterate over attached planes in new state
+ * @plane: the loop cursor
+ * @crtc_state: the incoming crtc-state
+ *
+ * Similar to drm_crtc_for_each_plane(), but iterates the planes that will be
+ * attached if the specified state is applied. Useful during (for example)
+ * ->atomic_check() operations, to validate the incoming state
+ */
+#define drm_atomic_crtc_state_for_each_plane(plane, crtc_state) \
+ drm_for_each_plane_mask(plane, (crtc_state)->state->dev, (crtc_state)->plane_mask)
+
+#endif /* DRM_ATOMIC_HELPER_H_ */
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index c40070a92d6..b86329813ad 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -42,6 +42,7 @@ struct drm_object_properties;
struct drm_file;
struct drm_clip_rect;
struct device_node;
+struct fence;
#define DRM_MODE_OBJECT_CRTC 0xcccccccc
#define DRM_MODE_OBJECT_CONNECTOR 0xc0c0c0c0
@@ -136,14 +137,22 @@ struct drm_display_info {
u8 cea_rev;
};
+/* data corresponds to displayid vend/prod/serial */
+struct drm_tile_group {
+ struct kref refcount;
+ struct drm_device *dev;
+ int id;
+ u8 group_data[8];
+};
+
struct drm_framebuffer_funcs {
/* note: use drm_framebuffer_remove() */
void (*destroy)(struct drm_framebuffer *framebuffer);
int (*create_handle)(struct drm_framebuffer *fb,
struct drm_file *file_priv,
unsigned int *handle);
- /**
- * Optinal callback for the dirty fb ioctl.
+ /*
+ * Optional callback for the dirty fb ioctl.
*
* Userspace can notify the driver via this callback
* that a area of the framebuffer has changed and should
@@ -196,7 +205,7 @@ struct drm_framebuffer {
struct drm_property_blob {
struct drm_mode_object base;
struct list_head head;
- unsigned int length;
+ size_t length;
unsigned char data[];
};
@@ -215,7 +224,7 @@ struct drm_property {
uint64_t *values;
struct drm_device *dev;
- struct list_head enum_blob_list;
+ struct list_head enum_list;
};
struct drm_crtc;
@@ -224,19 +233,65 @@ struct drm_encoder;
struct drm_pending_vblank_event;
struct drm_plane;
struct drm_bridge;
+struct drm_atomic_state;
/**
- * drm_crtc_funcs - control CRTCs for a given device
+ * struct drm_crtc_state - mutable CRTC state
+ * @enable: whether the CRTC should be enabled, gates all other state
+ * @mode_changed: for use by helpers and drivers when computing state updates
+ * @plane_mask: bitmask of (1 << drm_plane_index(plane)) of attached planes
+ * @last_vblank_count: for helpers and drivers to capture the vblank of the
+ * update to ensure framebuffer cleanup isn't done too early
+ * @planes_changed: for use by helpers and drivers when computing state updates
+ * @adjusted_mode: for use by helpers and drivers to compute adjusted mode timings
+ * @mode: current mode timings
+ * @event: optional pointer to a DRM event to signal upon completion of the
+ * state update
+ * @state: backpointer to global drm_atomic_state
+ */
+struct drm_crtc_state {
+ bool enable;
+
+ /* computed state bits used by helpers and drivers */
+ bool planes_changed : 1;
+ bool mode_changed : 1;
+
+ /* attached planes bitmask:
+ * WARNING: transitional helpers do not maintain plane_mask so
+ * drivers not converted over to atomic helpers should not rely
+ * on plane_mask being accurate!
+ */
+ u32 plane_mask;
+
+ /* last_vblank_count: for vblank waits before cleanup */
+ u32 last_vblank_count;
+
+ /* adjusted_mode: for use by helpers and drivers */
+ struct drm_display_mode adjusted_mode;
+
+ struct drm_display_mode mode;
+
+ struct drm_pending_vblank_event *event;
+
+ struct drm_atomic_state *state;
+};
+
+/**
+ * struct drm_crtc_funcs - control CRTCs for a given device
* @save: save CRTC state
* @restore: restore CRTC state
* @reset: reset CRTC after state has been invalidated (e.g. resume)
* @cursor_set: setup the cursor
+ * @cursor_set2: setup the cursor with hotspot, superseeds @cursor_set if set
* @cursor_move: move the cursor
* @gamma_set: specify color ramp for CRTC
* @destroy: deinit and free object
* @set_property: called when a property is changed
* @set_config: apply a new CRTC configuration
* @page_flip: initiate a page flip
+ * @atomic_duplicate_state: duplicate the atomic state for this CRTC
+ * @atomic_destroy_state: destroy an atomic state for this CRTC
+ * @atomic_set_property: set a property on an atomic state for this CRTC
*
* The drm_crtc_funcs structure is the central CRTC management structure
* in the DRM. Each CRTC controls one or more connectors (note that the name
@@ -287,16 +342,28 @@ struct drm_crtc_funcs {
int (*set_property)(struct drm_crtc *crtc,
struct drm_property *property, uint64_t val);
+
+ /* atomic update handling */
+ struct drm_crtc_state *(*atomic_duplicate_state)(struct drm_crtc *crtc);
+ void (*atomic_destroy_state)(struct drm_crtc *crtc,
+ struct drm_crtc_state *state);
+ int (*atomic_set_property)(struct drm_crtc *crtc,
+ struct drm_crtc_state *state,
+ struct drm_property *property,
+ uint64_t val);
};
/**
- * drm_crtc - central CRTC control structure
+ * struct drm_crtc - central CRTC control structure
* @dev: parent DRM device
+ * @port: OF node used by drm_of_find_possible_crtcs()
* @head: list management
* @mutex: per-CRTC locking
* @base: base KMS object for ID tracking etc.
* @primary: primary plane for this CRTC
* @cursor: cursor plane for this CRTC
+ * @cursor_x: current x position of the cursor, used for universal cursor planes
+ * @cursor_y: current y position of the cursor, used for universal cursor planes
* @enabled: is this CRTC enabled?
* @mode: current mode timings
* @hwmode: mode timings as programmed to hw regs
@@ -309,10 +376,13 @@ struct drm_crtc_funcs {
* @gamma_size: size of gamma ramp
* @gamma_store: gamma ramp values
* @framedur_ns: precise frame timing
- * @framedur_ns: precise line timing
+ * @linedur_ns: precise line timing
* @pixeldur_ns: precise pixel timing
* @helper_private: mid-layer private data
* @properties: property tracking for this CRTC
+ * @state: current atomic state for this CRTC
+ * @acquire_ctx: per-CRTC implicit acquire context used by atomic drivers for
+ * legacy ioctls
*
* Each CRTC may have one or more connectors associated with it. This structure
* allows the CRTC to be controlled.
@@ -322,7 +392,7 @@ struct drm_crtc {
struct device_node *port;
struct list_head head;
- /**
+ /*
* crtc mutex
*
* This provides a read lock for the overall crtc state (mode, dpms
@@ -368,6 +438,8 @@ struct drm_crtc {
struct drm_object_properties properties;
+ struct drm_crtc_state *state;
+
/*
* For legacy crtc ioctls so that atomic drivers can get at the locking
* acquire context.
@@ -375,9 +447,22 @@ struct drm_crtc {
struct drm_modeset_acquire_ctx *acquire_ctx;
};
+/**
+ * struct drm_connector_state - mutable connector state
+ * @crtc: CRTC to connect connector to, NULL if disabled
+ * @best_encoder: can be used by helpers and drivers to select the encoder
+ * @state: backpointer to global drm_atomic_state
+ */
+struct drm_connector_state {
+ struct drm_crtc *crtc; /* do not write directly, use drm_atomic_set_crtc_for_connector() */
+
+ struct drm_encoder *best_encoder;
+
+ struct drm_atomic_state *state;
+};
/**
- * drm_connector_funcs - control connectors on a given device
+ * struct drm_connector_funcs - control connectors on a given device
* @dpms: set power state (see drm_crtc_funcs above)
* @save: save connector state
* @restore: restore connector state
@@ -387,6 +472,9 @@ struct drm_crtc {
* @set_property: property for this connector may need an update
* @destroy: make object go away
* @force: notify the driver that the connector is forced on
+ * @atomic_duplicate_state: duplicate the atomic state for this connector
+ * @atomic_destroy_state: destroy an atomic state for this connector
+ * @atomic_set_property: set a property on an atomic state for this connector
*
* Each CRTC may have one or more connectors attached to it. The functions
* below allow the core DRM code to control connectors, enumerate available modes,
@@ -411,10 +499,19 @@ struct drm_connector_funcs {
uint64_t val);
void (*destroy)(struct drm_connector *connector);
void (*force)(struct drm_connector *connector);
+
+ /* atomic update handling */
+ struct drm_connector_state *(*atomic_duplicate_state)(struct drm_connector *connector);
+ void (*atomic_destroy_state)(struct drm_connector *connector,
+ struct drm_connector_state *state);
+ int (*atomic_set_property)(struct drm_connector *connector,
+ struct drm_connector_state *state,
+ struct drm_property *property,
+ uint64_t val);
};
/**
- * drm_encoder_funcs - encoder controls
+ * struct drm_encoder_funcs - encoder controls
* @reset: reset state (e.g. at init or resume time)
* @destroy: cleanup and free associated data
*
@@ -428,7 +525,7 @@ struct drm_encoder_funcs {
#define DRM_CONNECTOR_MAX_ENCODER 3
/**
- * drm_encoder - central DRM encoder structure
+ * struct drm_encoder - central DRM encoder structure
* @dev: parent DRM device
* @head: list management
* @base: base KMS object
@@ -472,7 +569,7 @@ struct drm_encoder {
#define MAX_ELD_BYTES 128
/**
- * drm_connector - central DRM connector control structure
+ * struct drm_connector - central DRM connector control structure
* @dev: parent DRM device
* @kdev: kernel device for sysfs attributes
* @attr: sysfs attributes
@@ -483,6 +580,7 @@ struct drm_encoder {
* @connector_type_id: index into connector type enum
* @interlace_allowed: can this connector handle interlaced modes?
* @doublescan_allowed: can this connector handle doublescan?
+ * @stereo_allowed: can this connector handle stereo modes?
* @modes: modes available on this connector (from fill_modes() + user)
* @status: one of the drm_connector_status enums (connected, not, or unknown)
* @probed_modes: list of modes derived directly from the display
@@ -490,10 +588,13 @@ struct drm_encoder {
* @funcs: connector control functions
* @edid_blob_ptr: DRM property containing EDID if present
* @properties: property tracking for this connector
+ * @path_blob_ptr: DRM blob property data for the DP MST path property
* @polled: a %DRM_CONNECTOR_POLL_<foo> value for core driven polling
* @dpms: current dpms state
* @helper_private: mid-layer private data
+ * @cmdline_mode: mode line parsed from the kernel cmdline for this connector
* @force: a %DRM_FORCE_<foo> state for forced mode sets
+ * @override_edid: has the EDID been overwritten through debugfs for testing?
* @encoder_ids: valid encoders for this connector
* @encoder: encoder driving this connector, if any
* @eld: EDID-like data, if present
@@ -503,6 +604,18 @@ struct drm_encoder {
* @video_latency: video latency info from ELD, if found
* @audio_latency: audio latency info from ELD, if found
* @null_edid_counter: track sinks that give us all zeros for the EDID
+ * @bad_edid_counter: track sinks that give us an EDID with invalid checksum
+ * @debugfs_entry: debugfs directory for this connector
+ * @state: current atomic state for this connector
+ * @has_tile: is this connector connected to a tiled monitor
+ * @tile_group: tile group for the connected monitor
+ * @tile_is_single_monitor: whether the tile is one monitor housing
+ * @num_h_tile: number of horizontal tiles in the tile group
+ * @num_v_tile: number of vertical tiles in the tile group
+ * @tile_h_loc: horizontal location of this tile
+ * @tile_v_loc: vertical location of this tile
+ * @tile_h_size: horizontal size of this tile.
+ * @tile_v_size: vertical size of this tile.
*
* Each connector may be connected to one or more CRTCs, or may be clonable by
* another connector if they can share a CRTC. Each connector also has a specific
@@ -538,6 +651,8 @@ struct drm_connector {
struct drm_property_blob *path_blob_ptr;
+ struct drm_property_blob *tile_blob_ptr;
+
uint8_t polled; /* DRM_CONNECTOR_POLL_* */
/* requested DPMS state */
@@ -563,14 +678,63 @@ struct drm_connector {
unsigned bad_edid_counter;
struct dentry *debugfs_entry;
+
+ struct drm_connector_state *state;
+
+ /* DisplayID bits */
+ bool has_tile;
+ struct drm_tile_group *tile_group;
+ bool tile_is_single_monitor;
+
+ uint8_t num_h_tile, num_v_tile;
+ uint8_t tile_h_loc, tile_v_loc;
+ uint16_t tile_h_size, tile_v_size;
+};
+
+/**
+ * struct drm_plane_state - mutable plane state
+ * @crtc: currently bound CRTC, NULL if disabled
+ * @fb: currently bound framebuffer
+ * @fence: optional fence to wait for before scanning out @fb
+ * @crtc_x: left position of visible portion of plane on crtc
+ * @crtc_y: upper position of visible portion of plane on crtc
+ * @crtc_w: width of visible portion of plane on crtc
+ * @crtc_h: height of visible portion of plane on crtc
+ * @src_x: left position of visible portion of plane within
+ * plane (in 16.16)
+ * @src_y: upper position of visible portion of plane within
+ * plane (in 16.16)
+ * @src_w: width of visible portion of plane (in 16.16)
+ * @src_h: height of visible portion of plane (in 16.16)
+ * @state: backpointer to global drm_atomic_state
+ */
+struct drm_plane_state {
+ struct drm_crtc *crtc; /* do not write directly, use drm_atomic_set_crtc_for_plane() */
+ struct drm_framebuffer *fb; /* do not write directly, use drm_atomic_set_fb_for_plane() */
+ struct fence *fence;
+
+ /* Signed dest location allows it to be partially off screen */
+ int32_t crtc_x, crtc_y;
+ uint32_t crtc_w, crtc_h;
+
+ /* Source values are 16.16 fixed point */
+ uint32_t src_x, src_y;
+ uint32_t src_h, src_w;
+
+ struct drm_atomic_state *state;
};
+
/**
- * drm_plane_funcs - driver plane control functions
+ * struct drm_plane_funcs - driver plane control functions
* @update_plane: update the plane configuration
* @disable_plane: shut down the plane
* @destroy: clean up plane resources
+ * @reset: reset plane after state has been invalidated (e.g. resume)
* @set_property: called when a property is changed
+ * @atomic_duplicate_state: duplicate the atomic state for this plane
+ * @atomic_destroy_state: destroy an atomic state for this plane
+ * @atomic_set_property: set a property on an atomic state for this plane
*/
struct drm_plane_funcs {
int (*update_plane)(struct drm_plane *plane,
@@ -585,6 +749,15 @@ struct drm_plane_funcs {
int (*set_property)(struct drm_plane *plane,
struct drm_property *property, uint64_t val);
+
+ /* atomic update handling */
+ struct drm_plane_state *(*atomic_duplicate_state)(struct drm_plane *plane);
+ void (*atomic_destroy_state)(struct drm_plane *plane,
+ struct drm_plane_state *state);
+ int (*atomic_set_property)(struct drm_plane *plane,
+ struct drm_plane_state *state,
+ struct drm_property *property,
+ uint64_t val);
};
enum drm_plane_type {
@@ -594,7 +767,7 @@ enum drm_plane_type {
};
/**
- * drm_plane - central DRM plane control structure
+ * struct drm_plane - central DRM plane control structure
* @dev: DRM device this plane belongs to
* @head: for list management
* @base: base mode object
@@ -603,14 +776,19 @@ enum drm_plane_type {
* @format_count: number of formats supported
* @crtc: currently bound CRTC
* @fb: currently bound fb
+ * @old_fb: Temporary tracking of the old fb while a modeset is ongoing. Used by
+ * drm_mode_set_config_internal() to implement correct refcounting.
* @funcs: helper functions
* @properties: property tracking for this plane
* @type: type of plane (overlay, primary, cursor)
+ * @state: current atomic state for this plane
*/
struct drm_plane {
struct drm_device *dev;
struct list_head head;
+ struct drm_modeset_lock mutex;
+
struct drm_mode_object base;
uint32_t possible_crtcs;
@@ -620,8 +798,6 @@ struct drm_plane {
struct drm_crtc *crtc;
struct drm_framebuffer *fb;
- /* Temporary tracking of the old fb while a modeset is ongoing. Used
- * by drm_mode_set_config_internal to implement correct refcounting. */
struct drm_framebuffer *old_fb;
const struct drm_plane_funcs *funcs;
@@ -629,10 +805,14 @@ struct drm_plane {
struct drm_object_properties properties;
enum drm_plane_type type;
+
+ void *helper_private;
+
+ struct drm_plane_state *state;
};
/**
- * drm_bridge_funcs - drm_bridge control functions
+ * struct drm_bridge_funcs - drm_bridge control functions
* @mode_fixup: Try to fixup (or reject entirely) proposed mode for this bridge
* @disable: Called right before encoder prepare, disables the bridge
* @post_disable: Called right after encoder prepare, for lockstepped disable
@@ -656,7 +836,7 @@ struct drm_bridge_funcs {
};
/**
- * drm_bridge - central DRM bridge control structure
+ * struct drm_bridge - central DRM bridge control structure
* @dev: DRM device this bridge belongs to
* @head: list management
* @base: base mode object
@@ -674,8 +854,35 @@ struct drm_bridge {
};
/**
- * drm_mode_set - new values for a CRTC config change
- * @head: list management
+ * struct struct drm_atomic_state - the global state object for atomic updates
+ * @dev: parent DRM device
+ * @flags: state flags like async update
+ * @planes: pointer to array of plane pointers
+ * @plane_states: pointer to array of plane states pointers
+ * @crtcs: pointer to array of CRTC pointers
+ * @crtc_states: pointer to array of CRTC states pointers
+ * @num_connector: size of the @connectors and @connector_states arrays
+ * @connectors: pointer to array of connector pointers
+ * @connector_states: pointer to array of connector states pointers
+ * @acquire_ctx: acquire context for this atomic modeset state update
+ */
+struct drm_atomic_state {
+ struct drm_device *dev;
+ uint32_t flags;
+ struct drm_plane **planes;
+ struct drm_plane_state **plane_states;
+ struct drm_crtc **crtcs;
+ struct drm_crtc_state **crtc_states;
+ int num_connector;
+ struct drm_connector **connectors;
+ struct drm_connector_state **connector_states;
+
+ struct drm_modeset_acquire_ctx *acquire_ctx;
+};
+
+
+/**
+ * struct drm_mode_set - new values for a CRTC config change
* @fb: framebuffer to use for new config
* @crtc: CRTC whose configuration we're about to change
* @mode: mode timings to use
@@ -705,6 +912,9 @@ struct drm_mode_set {
* struct drm_mode_config_funcs - basic driver provided mode setting functions
* @fb_create: create a new framebuffer object
* @output_poll_changed: function to handle output configuration changes
+ * @atomic_check: check whether a give atomic state update is possible
+ * @atomic_commit: commit an atomic state update previously verified with
+ * atomic_check()
*
* Some global (i.e. not per-CRTC, connector, etc) mode setting functions that
* involve drivers.
@@ -714,13 +924,20 @@ struct drm_mode_config_funcs {
struct drm_file *file_priv,
struct drm_mode_fb_cmd2 *mode_cmd);
void (*output_poll_changed)(struct drm_device *dev);
+
+ int (*atomic_check)(struct drm_device *dev,
+ struct drm_atomic_state *a);
+ int (*atomic_commit)(struct drm_device *dev,
+ struct drm_atomic_state *a,
+ bool async);
};
/**
- * drm_mode_group - group of mode setting resources for potential sub-grouping
+ * struct drm_mode_group - group of mode setting resources for potential sub-grouping
* @num_crtcs: CRTC count
* @num_encoders: encoder count
* @num_connectors: connector count
+ * @num_bridges: bridge count
* @id_list: list of KMS object IDs in this group
*
* Currently this simply tracks the global mode setting state. But in the
@@ -740,10 +957,14 @@ struct drm_mode_group {
};
/**
- * drm_mode_config - Mode configuration control structure
+ * struct drm_mode_config - Mode configuration control structure
* @mutex: mutex protecting KMS related lists and structures
+ * @connection_mutex: ww mutex protecting connector state and routing
+ * @acquire_ctx: global implicit acquire context used by atomic drivers for
+ * legacy ioctls
* @idr_mutex: mutex for KMS ID allocation and management
* @crtc_idr: main KMS ID tracking object
+ * @fb_lock: mutex to protect fb state and lists
* @num_fb: number of fbs available
* @fb_list: list of framebuffers available
* @num_connector: number of connectors on this device
@@ -752,17 +973,28 @@ struct drm_mode_group {
* @bridge_list: list of bridge objects
* @num_encoder: number of encoders on this device
* @encoder_list: list of encoder objects
+ * @num_overlay_plane: number of overlay planes on this device
+ * @num_total_plane: number of universal (i.e. with primary/curso) planes on this device
+ * @plane_list: list of plane objects
* @num_crtc: number of CRTCs on this device
* @crtc_list: list of CRTC objects
+ * @property_list: list of property objects
* @min_width: minimum pixel width on this device
* @min_height: minimum pixel height on this device
* @max_width: maximum pixel width on this device
* @max_height: maximum pixel height on this device
* @funcs: core driver provided mode setting functions
* @fb_base: base address of the framebuffer
- * @poll_enabled: track polling status for this device
+ * @poll_enabled: track polling support for this device
+ * @poll_running: track polling status for this device
* @output_poll_work: delayed work for polling in process context
+ * @property_blob_list: list of all the blob property objects
* @*_property: core property tracking
+ * @preferred_depth: preferred RBG pixel depth, used by fb helpers
+ * @prefer_shadow: hint to userspace to prefer shadow-fb rendering
+ * @async_page_flip: does this device support async flips on the primary plane?
+ * @cursor_width: hint to userspace for max cursor width
+ * @cursor_height: hint to userspace for max cursor height
*
* Core mode resource tracking structure. All CRTC, encoders, and connectors
* enumerated by the driver are added here, as are global properties. Some
@@ -774,16 +1006,10 @@ struct drm_mode_config {
struct drm_modeset_acquire_ctx *acquire_ctx; /* for legacy _lock_all() / _unlock_all() */
struct mutex idr_mutex; /* for IDR management */
struct idr crtc_idr; /* use this idr for all IDs, fb, crtc, connector, modes - just makes life easier */
+ struct idr tile_idr; /* use this idr for all IDs, fb, crtc, connector, modes - just makes life easier */
/* this is limited to one for now */
-
- /**
- * fb_lock - mutex to protect fb state
- *
- * Besides the global fb list his also protects the fbs list in the
- * file_priv
- */
- struct mutex fb_lock;
+ struct mutex fb_lock; /* proctects global and per-file fb lists */
int num_fb;
struct list_head fb_list;
@@ -824,6 +1050,7 @@ struct drm_mode_config {
struct drm_property *edid_property;
struct drm_property *dpms_property;
struct drm_property *path_property;
+ struct drm_property *tile_property;
struct drm_property *plane_type_property;
struct drm_property *rotation_property;
@@ -851,6 +1078,10 @@ struct drm_mode_config {
struct drm_property *aspect_ratio_property;
struct drm_property *dirty_info_property;
+ /* properties for virtual machine layout */
+ struct drm_property *suggested_x_property;
+ struct drm_property *suggested_y_property;
+
/* dumb ioctl parameters */
uint32_t preferred_depth, prefer_shadow;
@@ -861,6 +1092,19 @@ struct drm_mode_config {
uint32_t cursor_width, cursor_height;
};
+/**
+ * drm_for_each_plane_mask - iterate over planes specified by bitmask
+ * @plane: the loop cursor
+ * @dev: the DRM device
+ * @plane_mask: bitmask of plane indices
+ *
+ * Iterate over all planes specified by bitmask.
+ */
+#define drm_for_each_plane_mask(plane, dev, plane_mask) \
+ list_for_each_entry((plane), &(dev)->mode_config.plane_list, head) \
+ if ((plane_mask) & (1 << drm_plane_index(plane)))
+
+
#define obj_to_crtc(x) container_of(x, struct drm_crtc, base)
#define obj_to_connector(x) container_of(x, struct drm_connector, base)
#define obj_to_encoder(x) container_of(x, struct drm_encoder, base)
@@ -880,9 +1124,6 @@ extern int drm_crtc_init_with_planes(struct drm_device *dev,
struct drm_plane *primary,
struct drm_plane *cursor,
const struct drm_crtc_funcs *funcs);
-extern int drm_crtc_init(struct drm_device *dev,
- struct drm_crtc *crtc,
- const struct drm_crtc_funcs *funcs);
extern void drm_crtc_cleanup(struct drm_crtc *crtc);
extern unsigned int drm_crtc_index(struct drm_crtc *crtc);
@@ -978,9 +1219,10 @@ extern void drm_mode_config_reset(struct drm_device *dev);
extern void drm_mode_config_cleanup(struct drm_device *dev);
extern int drm_mode_connector_set_path_property(struct drm_connector *connector,
- char *path);
+ const char *path);
+int drm_mode_connector_set_tile_property(struct drm_connector *connector);
extern int drm_mode_connector_update_edid_property(struct drm_connector *connector,
- struct edid *edid);
+ const struct edid *edid);
static inline bool drm_property_type_is(struct drm_property *property,
uint32_t type)
@@ -1041,11 +1283,13 @@ extern void drm_property_destroy(struct drm_device *dev, struct drm_property *pr
extern int drm_property_add_enum(struct drm_property *property, int index,
uint64_t value, const char *name);
extern int drm_mode_create_dvi_i_properties(struct drm_device *dev);
-extern int drm_mode_create_tv_properties(struct drm_device *dev, int num_formats,
- char *formats[]);
+extern int drm_mode_create_tv_properties(struct drm_device *dev,
+ unsigned int num_modes,
+ char *modes[]);
extern int drm_mode_create_scaling_mode_property(struct drm_device *dev);
extern int drm_mode_create_aspect_ratio_property(struct drm_device *dev);
extern int drm_mode_create_dirty_info_property(struct drm_device *dev);
+extern int drm_mode_create_suggested_offset_properties(struct drm_device *dev);
extern int drm_mode_connector_attach_encoder(struct drm_connector *connector,
struct drm_encoder *encoder);
@@ -1113,6 +1357,13 @@ extern void drm_set_preferred_mode(struct drm_connector *connector,
extern int drm_edid_header_is_valid(const u8 *raw_edid);
extern bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid);
extern bool drm_edid_is_valid(struct edid *edid);
+
+extern struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev,
+ char topology[8]);
+extern struct drm_tile_group *drm_mode_get_tile_group(struct drm_device *dev,
+ char topology[8]);
+extern void drm_mode_put_tile_group(struct drm_device *dev,
+ struct drm_tile_group *tg);
struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
int hsize, int vsize, int fresh,
bool rb);
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
index a3d75fefd01..7adbb65ea8a 100644
--- a/include/drm/drm_crtc_helper.h
+++ b/include/drm/drm_crtc_helper.h
@@ -68,6 +68,7 @@ struct drm_crtc_helper_funcs {
int (*mode_set)(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode, int x, int y,
struct drm_framebuffer *old_fb);
+ void (*mode_set_nofb)(struct drm_crtc *crtc);
/* Move the crtc on the current fb to the given position *optional* */
int (*mode_set_base)(struct drm_crtc *crtc, int x, int y,
@@ -81,6 +82,12 @@ struct drm_crtc_helper_funcs {
/* disable crtc when not in use - more explicit than dpms off */
void (*disable)(struct drm_crtc *crtc);
+
+ /* atomic helpers */
+ int (*atomic_check)(struct drm_crtc *crtc,
+ struct drm_crtc_state *state);
+ void (*atomic_begin)(struct drm_crtc *crtc);
+ void (*atomic_flush)(struct drm_crtc *crtc);
};
/**
@@ -161,6 +168,12 @@ static inline void drm_connector_helper_add(struct drm_connector *connector,
extern void drm_helper_resume_force_mode(struct drm_device *dev);
+int drm_helper_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode, int x, int y,
+ struct drm_framebuffer *old_fb);
+int drm_helper_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb);
+
/* drm_probe_helper.c */
extern int drm_helper_probe_single_connector_modes(struct drm_connector
*connector, uint32_t maxX,
diff --git a/include/drm/drm_displayid.h b/include/drm/drm_displayid.h
new file mode 100644
index 00000000000..623b4e98e74
--- /dev/null
+++ b/include/drm/drm_displayid.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright © 2014 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef DRM_DISPLAYID_H
+#define DRM_DISPLAYID_H
+
+#define DATA_BLOCK_PRODUCT_ID 0x00
+#define DATA_BLOCK_DISPLAY_PARAMETERS 0x01
+#define DATA_BLOCK_COLOR_CHARACTERISTICS 0x02
+#define DATA_BLOCK_TYPE_1_DETAILED_TIMING 0x03
+#define DATA_BLOCK_TYPE_2_DETAILED_TIMING 0x04
+#define DATA_BLOCK_TYPE_3_SHORT_TIMING 0x05
+#define DATA_BLOCK_TYPE_4_DMT_TIMING 0x06
+#define DATA_BLOCK_VESA_TIMING 0x07
+#define DATA_BLOCK_CEA_TIMING 0x08
+#define DATA_BLOCK_VIDEO_TIMING_RANGE 0x09
+#define DATA_BLOCK_PRODUCT_SERIAL_NUMBER 0x0a
+#define DATA_BLOCK_GP_ASCII_STRING 0x0b
+#define DATA_BLOCK_DISPLAY_DEVICE_DATA 0x0c
+#define DATA_BLOCK_INTERFACE_POWER_SEQUENCING 0x0d
+#define DATA_BLOCK_TRANSFER_CHARACTERISTICS 0x0e
+#define DATA_BLOCK_DISPLAY_INTERFACE 0x0f
+#define DATA_BLOCK_STEREO_DISPLAY_INTERFACE 0x10
+#define DATA_BLOCK_TILED_DISPLAY 0x12
+
+#define DATA_BLOCK_VENDOR_SPECIFIC 0x7f
+
+#define PRODUCT_TYPE_EXTENSION 0
+#define PRODUCT_TYPE_TEST 1
+#define PRODUCT_TYPE_PANEL 2
+#define PRODUCT_TYPE_MONITOR 3
+#define PRODUCT_TYPE_TV 4
+#define PRODUCT_TYPE_REPEATER 5
+#define PRODUCT_TYPE_DIRECT_DRIVE 6
+
+struct displayid_hdr {
+ u8 rev;
+ u8 bytes;
+ u8 prod_id;
+ u8 ext_count;
+} __packed;
+
+struct displayid_block {
+ u8 tag;
+ u8 rev;
+ u8 num_bytes;
+} __packed;
+
+struct displayid_tiled_block {
+ struct displayid_block base;
+ u8 tile_cap;
+ u8 topo[3];
+ u8 tile_size[4];
+ u8 tile_pixel_bezel[5];
+ u8 topology_id[8];
+} __packed;
+
+#endif
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 9305c718d78..11f8c84f98c 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -303,7 +303,8 @@
#define DP_TEST_CRC_B_CB 0x244
#define DP_TEST_SINK_MISC 0x246
-#define DP_TEST_CRC_SUPPORTED (1 << 5)
+# define DP_TEST_CRC_SUPPORTED (1 << 5)
+# define DP_TEST_COUNT_MASK 0x7
#define DP_TEST_RESPONSE 0x260
# define DP_TEST_ACK (1 << 0)
@@ -313,7 +314,7 @@
#define DP_TEST_EDID_CHECKSUM 0x261
#define DP_TEST_SINK 0x270
-#define DP_TEST_SINK_START (1 << 0)
+# define DP_TEST_SINK_START (1 << 0)
#define DP_PAYLOAD_TABLE_UPDATE_STATUS 0x2c0 /* 1.2 MST */
# define DP_PAYLOAD_TABLE_UPDATED (1 << 0)
@@ -404,26 +405,6 @@
#define MODE_I2C_READ 4
#define MODE_I2C_STOP 8
-/**
- * struct i2c_algo_dp_aux_data - driver interface structure for i2c over dp
- * aux algorithm
- * @running: set by the algo indicating whether an i2c is ongoing or whether
- * the i2c bus is quiescent
- * @address: i2c target address for the currently ongoing transfer
- * @aux_ch: driver callback to transfer a single byte of the i2c payload
- */
-struct i2c_algo_dp_aux_data {
- bool running;
- u16 address;
- int (*aux_ch) (struct i2c_adapter *adapter,
- int mode, uint8_t write_byte,
- uint8_t *read_byte);
-};
-
-int
-i2c_dp_aux_add_bus(struct i2c_adapter *adapter);
-
-
#define DP_LINK_STATUS_SIZE 6
bool drm_dp_channel_eq_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
int lane_count);
@@ -550,6 +531,7 @@ struct drm_dp_aux {
struct mutex hw_mutex;
ssize_t (*transfer)(struct drm_dp_aux *aux,
struct drm_dp_aux_msg *msg);
+ unsigned i2c_nack_count, i2c_defer_count;
};
ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset,
diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
index 338fc105383..00c1da92724 100644
--- a/include/drm/drm_dp_mst_helper.h
+++ b/include/drm/drm_dp_mst_helper.h
@@ -28,7 +28,7 @@
struct drm_dp_mst_branch;
/**
- * struct drm_dp_vcpi - Virtual Channel Payload Identifer
+ * struct drm_dp_vcpi - Virtual Channel Payload Identifier
* @vcpi: Virtual channel ID.
* @pbn: Payload Bandwidth Number for this channel
* @aligned_pbn: PBN aligned with slot size
@@ -92,6 +92,8 @@ struct drm_dp_mst_port {
struct drm_dp_vcpi vcpi;
struct drm_connector *connector;
struct drm_dp_mst_topology_mgr *mgr;
+
+ struct edid *cached_edid; /* for DP logical ports - make tiling work */
};
/**
@@ -371,7 +373,7 @@ struct drm_dp_sideband_msg_tx {
struct drm_dp_mst_topology_mgr;
struct drm_dp_mst_topology_cbs {
/* create a connector for a port */
- struct drm_connector *(*add_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, char *path);
+ struct drm_connector *(*add_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *path);
void (*destroy_connector)(struct drm_dp_mst_topology_mgr *mgr,
struct drm_connector *connector);
void (*hotplug)(struct drm_dp_mst_topology_mgr *mgr);
@@ -474,7 +476,7 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled);
-enum drm_connector_status drm_dp_mst_detect_port(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
+enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index b96031d947a..87d85e81d3a 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -27,12 +27,14 @@
#define EDID_LENGTH 128
#define DDC_ADDR 0x50
+#define DDC_ADDR2 0x52 /* E-DDC 1.2 - where DisplayID can hide */
#define CEA_EXT 0x02
#define VTB_EXT 0x10
#define DI_EXT 0x40
#define LS_EXT 0x50
#define MI_EXT 0x60
+#define DISPLAYID_EXT 0x70
struct est_timings {
u8 t1;
@@ -207,6 +209,61 @@ struct detailed_timing {
#define DRM_EDID_HDMI_DC_30 (1 << 4)
#define DRM_EDID_HDMI_DC_Y444 (1 << 3)
+/* ELD Header Block */
+#define DRM_ELD_HEADER_BLOCK_SIZE 4
+
+#define DRM_ELD_VER 0
+# define DRM_ELD_VER_SHIFT 3
+# define DRM_ELD_VER_MASK (0x1f << 3)
+
+#define DRM_ELD_BASELINE_ELD_LEN 2 /* in dwords! */
+
+/* ELD Baseline Block for ELD_Ver == 2 */
+#define DRM_ELD_CEA_EDID_VER_MNL 4
+# define DRM_ELD_CEA_EDID_VER_SHIFT 5
+# define DRM_ELD_CEA_EDID_VER_MASK (7 << 5)
+# define DRM_ELD_CEA_EDID_VER_NONE (0 << 5)
+# define DRM_ELD_CEA_EDID_VER_CEA861 (1 << 5)
+# define DRM_ELD_CEA_EDID_VER_CEA861A (2 << 5)
+# define DRM_ELD_CEA_EDID_VER_CEA861BCD (3 << 5)
+# define DRM_ELD_MNL_SHIFT 0
+# define DRM_ELD_MNL_MASK (0x1f << 0)
+
+#define DRM_ELD_SAD_COUNT_CONN_TYPE 5
+# define DRM_ELD_SAD_COUNT_SHIFT 4
+# define DRM_ELD_SAD_COUNT_MASK (0xf << 4)
+# define DRM_ELD_CONN_TYPE_SHIFT 2
+# define DRM_ELD_CONN_TYPE_MASK (3 << 2)
+# define DRM_ELD_CONN_TYPE_HDMI (0 << 2)
+# define DRM_ELD_CONN_TYPE_DP (1 << 2)
+# define DRM_ELD_SUPPORTS_AI (1 << 1)
+# define DRM_ELD_SUPPORTS_HDCP (1 << 0)
+
+#define DRM_ELD_AUD_SYNCH_DELAY 6 /* in units of 2 ms */
+# define DRM_ELD_AUD_SYNCH_DELAY_MAX 0xfa /* 500 ms */
+
+#define DRM_ELD_SPEAKER 7
+# define DRM_ELD_SPEAKER_RLRC (1 << 6)
+# define DRM_ELD_SPEAKER_FLRC (1 << 5)
+# define DRM_ELD_SPEAKER_RC (1 << 4)
+# define DRM_ELD_SPEAKER_RLR (1 << 3)
+# define DRM_ELD_SPEAKER_FC (1 << 2)
+# define DRM_ELD_SPEAKER_LFE (1 << 1)
+# define DRM_ELD_SPEAKER_FLR (1 << 0)
+
+#define DRM_ELD_PORT_ID 8 /* offsets 8..15 inclusive */
+# define DRM_ELD_PORT_ID_LEN 8
+
+#define DRM_ELD_MANUFACTURER_NAME0 16
+#define DRM_ELD_MANUFACTURER_NAME1 17
+
+#define DRM_ELD_PRODUCT_CODE0 18
+#define DRM_ELD_PRODUCT_CODE1 19
+
+#define DRM_ELD_MONITOR_NAME_STRING 20 /* offsets 20..(20+mnl-1) inclusive */
+
+#define DRM_ELD_CEA_SAD(mnl, sad) (20 + (mnl) + 3 * (sad))
+
struct edid {
u8 header[8];
/* Vendor & product info */
@@ -279,4 +336,56 @@ int
drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame,
const struct drm_display_mode *mode);
+/**
+ * drm_eld_mnl - Get ELD monitor name length in bytes.
+ * @eld: pointer to an eld memory structure with mnl set
+ */
+static inline int drm_eld_mnl(const uint8_t *eld)
+{
+ return (eld[DRM_ELD_CEA_EDID_VER_MNL] & DRM_ELD_MNL_MASK) >> DRM_ELD_MNL_SHIFT;
+}
+
+/**
+ * drm_eld_sad_count - Get ELD SAD count.
+ * @eld: pointer to an eld memory structure with sad_count set
+ */
+static inline int drm_eld_sad_count(const uint8_t *eld)
+{
+ return (eld[DRM_ELD_SAD_COUNT_CONN_TYPE] & DRM_ELD_SAD_COUNT_MASK) >>
+ DRM_ELD_SAD_COUNT_SHIFT;
+}
+
+/**
+ * drm_eld_calc_baseline_block_size - Calculate baseline block size in bytes
+ * @eld: pointer to an eld memory structure with mnl and sad_count set
+ *
+ * This is a helper for determining the payload size of the baseline block, in
+ * bytes, for e.g. setting the Baseline_ELD_Len field in the ELD header block.
+ */
+static inline int drm_eld_calc_baseline_block_size(const uint8_t *eld)
+{
+ return DRM_ELD_MONITOR_NAME_STRING - DRM_ELD_HEADER_BLOCK_SIZE +
+ drm_eld_mnl(eld) + drm_eld_sad_count(eld) * 3;
+}
+
+/**
+ * drm_eld_size - Get ELD size in bytes
+ * @eld: pointer to a complete eld memory structure
+ *
+ * The returned value does not include the vendor block. It's vendor specific,
+ * and comprises of the remaining bytes in the ELD memory buffer after
+ * drm_eld_size() bytes of header and baseline block.
+ *
+ * The returned value is guaranteed to be a multiple of 4.
+ */
+static inline int drm_eld_size(const uint8_t *eld)
+{
+ return DRM_ELD_HEADER_BLOCK_SIZE + eld[DRM_ELD_BASELINE_ELD_LEN] * 4;
+}
+
+struct edid *drm_do_get_edid(struct drm_connector *connector,
+ int (*get_edid_block)(void *data, u8 *buf, unsigned int block,
+ size_t len),
+ void *data);
+
#endif /* __DRM_EDID_H__ */
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index f4ad254e348..b597068103a 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -34,9 +34,14 @@ struct drm_fb_helper;
#include <linux/kgdb.h>
+struct drm_fb_offset {
+ int x, y;
+};
+
struct drm_fb_helper_crtc {
struct drm_mode_set mode_set;
struct drm_display_mode *desired_mode;
+ int x, y;
};
struct drm_fb_helper_surface_size {
@@ -72,6 +77,7 @@ struct drm_fb_helper_funcs {
bool (*initial_config)(struct drm_fb_helper *fb_helper,
struct drm_fb_helper_crtc **crtcs,
struct drm_display_mode **modes,
+ struct drm_fb_offset *offsets,
bool *enabled, int width, int height);
};
diff --git a/include/drm/drm_flip_work.h b/include/drm/drm_flip_work.h
index 9eed34dcd6a..d387cf06ae0 100644
--- a/include/drm/drm_flip_work.h
+++ b/include/drm/drm_flip_work.h
@@ -25,6 +25,7 @@
#define DRM_FLIP_WORK_H
#include <linux/kfifo.h>
+#include <linux/spinlock.h>
#include <linux/workqueue.h>
/**
@@ -32,9 +33,9 @@
*
* Util to queue up work to run from work-queue context after flip/vblank.
* Typically this can be used to defer unref of framebuffer's, cursor
- * bo's, etc until after vblank. The APIs are all safe (and lockless)
- * for up to one producer and once consumer at a time. The single-consumer
- * aspect is ensured by committing the queued work to a single work-queue.
+ * bo's, etc until after vblank. The APIs are all thread-safe.
+ * Moreover, drm_flip_work_queue_task and drm_flip_work_queue can be called
+ * in atomic context.
*/
struct drm_flip_work;
@@ -51,26 +52,40 @@ struct drm_flip_work;
typedef void (*drm_flip_func_t)(struct drm_flip_work *work, void *val);
/**
+ * struct drm_flip_task - flip work task
+ * @node: list entry element
+ * @data: data to pass to work->func
+ */
+struct drm_flip_task {
+ struct list_head node;
+ void *data;
+};
+
+/**
* struct drm_flip_work - flip work queue
* @name: debug name
- * @pending: number of queued but not committed items
- * @count: number of committed items
* @func: callback fxn called for each committed item
* @worker: worker which calls @func
- * @fifo: queue of committed items
+ * @queued: queued tasks
+ * @commited: commited tasks
+ * @lock: lock to access queued and commited lists
*/
struct drm_flip_work {
const char *name;
- atomic_t pending, count;
drm_flip_func_t func;
struct work_struct worker;
- DECLARE_KFIFO_PTR(fifo, void *);
+ struct list_head queued;
+ struct list_head commited;
+ spinlock_t lock;
};
+struct drm_flip_task *drm_flip_work_allocate_task(void *data, gfp_t flags);
+void drm_flip_work_queue_task(struct drm_flip_work *work,
+ struct drm_flip_task *task);
void drm_flip_work_queue(struct drm_flip_work *work, void *val);
void drm_flip_work_commit(struct drm_flip_work *work,
struct workqueue_struct *wq);
-int drm_flip_work_init(struct drm_flip_work *work, int size,
+void drm_flip_work_init(struct drm_flip_work *work,
const char *name, drm_flip_func_t func);
void drm_flip_work_cleanup(struct drm_flip_work *work);
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
index 1e6ae1458f7..780511a459c 100644
--- a/include/drm/drm_gem.h
+++ b/include/drm/drm_gem.h
@@ -119,6 +119,13 @@ struct drm_gem_object {
* simply leave it as NULL.
*/
struct dma_buf_attachment *import_attach;
+
+ /**
+ * dumb - created as dumb buffer
+ * Whether the gem object was created using the dumb buffer interface
+ * as such it may not be used for GPU rendering.
+ */
+ bool dumb;
};
void drm_gem_object_release(struct drm_gem_object *obj);
diff --git a/include/drm/drm_gem_cma_helper.h b/include/drm/drm_gem_cma_helper.h
index 2ff35f3de9c..acd6af8a8e6 100644
--- a/include/drm/drm_gem_cma_helper.h
+++ b/include/drm/drm_gem_cma_helper.h
@@ -4,6 +4,13 @@
#include <drm/drmP.h>
#include <drm/drm_gem.h>
+/**
+ * struct drm_gem_cma_object - GEM object backed by CMA memory allocations
+ * @base: base GEM object
+ * @paddr: physical address of the backing memory
+ * @sgt: scatter/gather table for imported PRIME buffers
+ * @vaddr: kernel virtual address of the backing memory
+ */
struct drm_gem_cma_object {
struct drm_gem_object base;
dma_addr_t paddr;
@@ -19,23 +26,30 @@ to_drm_gem_cma_obj(struct drm_gem_object *gem_obj)
return container_of(gem_obj, struct drm_gem_cma_object, base);
}
-/* free gem object. */
+/* free GEM object */
void drm_gem_cma_free_object(struct drm_gem_object *gem_obj);
-/* create memory region for drm framebuffer. */
+/* create memory region for DRM framebuffer */
+int drm_gem_cma_dumb_create_internal(struct drm_file *file_priv,
+ struct drm_device *drm,
+ struct drm_mode_create_dumb *args);
+
+/* create memory region for DRM framebuffer */
int drm_gem_cma_dumb_create(struct drm_file *file_priv,
- struct drm_device *drm, struct drm_mode_create_dumb *args);
+ struct drm_device *drm,
+ struct drm_mode_create_dumb *args);
-/* map memory region for drm framebuffer to user space. */
+/* map memory region for DRM framebuffer to user space */
int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv,
- struct drm_device *drm, uint32_t handle, uint64_t *offset);
+ struct drm_device *drm, u32 handle,
+ u64 *offset);
-/* set vm_flags and we can change the vm attribute to other one at here. */
+/* set vm_flags and we can change the VM attribute to other one at here */
int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma);
-/* allocate physical memory. */
+/* allocate physical memory */
struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
- unsigned int size);
+ size_t size);
extern const struct vm_operations_struct drm_gem_cma_vm_ops;
diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h
index 8569dc5a102..f1d8d0dbb4f 100644
--- a/include/drm/drm_mipi_dsi.h
+++ b/include/drm/drm_mipi_dsi.h
@@ -26,6 +26,7 @@ struct mipi_dsi_device;
* struct mipi_dsi_msg - read/write DSI buffer
* @channel: virtual channel id
* @type: payload data type
+ * @flags: flags controlling this message transmission
* @tx_len: length of @tx_buf
* @tx_buf: data to be written
* @rx_len: length of @rx_buf
@@ -43,12 +44,44 @@ struct mipi_dsi_msg {
void *rx_buf;
};
+bool mipi_dsi_packet_format_is_short(u8 type);
+bool mipi_dsi_packet_format_is_long(u8 type);
+
+/**
+ * struct mipi_dsi_packet - represents a MIPI DSI packet in protocol format
+ * @size: size (in bytes) of the packet
+ * @header: the four bytes that make up the header (Data ID, Word Count or
+ * Packet Data, and ECC)
+ * @payload_length: number of bytes in the payload
+ * @payload: a pointer to a buffer containing the payload, if any
+ */
+struct mipi_dsi_packet {
+ size_t size;
+ u8 header[4];
+ size_t payload_length;
+ const u8 *payload;
+};
+
+int mipi_dsi_create_packet(struct mipi_dsi_packet *packet,
+ const struct mipi_dsi_msg *msg);
+
/**
* struct mipi_dsi_host_ops - DSI bus operations
* @attach: attach DSI device to DSI host
* @detach: detach DSI device from DSI host
- * @transfer: send and/or receive DSI packet, return number of received bytes,
- * or error
+ * @transfer: transmit a DSI packet
+ *
+ * DSI packets transmitted by .transfer() are passed in as mipi_dsi_msg
+ * structures. This structure contains information about the type of packet
+ * being transmitted as well as the transmit and receive buffers. When an
+ * error is encountered during transmission, this function will return a
+ * negative error code. On success it shall return the number of bytes
+ * transmitted for write packets or the number of bytes received for read
+ * packets.
+ *
+ * Note that typically DSI packet transmission is atomic, so the .transfer()
+ * function will seldomly return anything other than the number of bytes
+ * contained in the transmit buffer on success.
*/
struct mipi_dsi_host_ops {
int (*attach)(struct mipi_dsi_host *host,
@@ -56,7 +89,7 @@ struct mipi_dsi_host_ops {
int (*detach)(struct mipi_dsi_host *host,
struct mipi_dsi_device *dsi);
ssize_t (*transfer)(struct mipi_dsi_host *host,
- struct mipi_dsi_msg *msg);
+ const struct mipi_dsi_msg *msg);
};
/**
@@ -130,12 +163,57 @@ static inline struct mipi_dsi_device *to_mipi_dsi_device(struct device *dev)
return container_of(dev, struct mipi_dsi_device, dev);
}
+struct mipi_dsi_device *of_find_mipi_dsi_device_by_node(struct device_node *np);
int mipi_dsi_attach(struct mipi_dsi_device *dsi);
int mipi_dsi_detach(struct mipi_dsi_device *dsi);
-ssize_t mipi_dsi_dcs_write(struct mipi_dsi_device *dsi, const void *data,
- size_t len);
+int mipi_dsi_set_maximum_return_packet_size(struct mipi_dsi_device *dsi,
+ u16 value);
+
+ssize_t mipi_dsi_generic_write(struct mipi_dsi_device *dsi, const void *payload,
+ size_t size);
+ssize_t mipi_dsi_generic_read(struct mipi_dsi_device *dsi, const void *params,
+ size_t num_params, void *data, size_t size);
+
+/**
+ * enum mipi_dsi_dcs_tear_mode - Tearing Effect Output Line mode
+ * @MIPI_DSI_DCS_TEAR_MODE_VBLANK: the TE output line consists of V-Blanking
+ * information only
+ * @MIPI_DSI_DCS_TEAR_MODE_VHBLANK : the TE output line consists of both
+ * V-Blanking and H-Blanking information
+ */
+enum mipi_dsi_dcs_tear_mode {
+ MIPI_DSI_DCS_TEAR_MODE_VBLANK,
+ MIPI_DSI_DCS_TEAR_MODE_VHBLANK,
+};
+
+#define MIPI_DSI_DCS_POWER_MODE_DISPLAY (1 << 2)
+#define MIPI_DSI_DCS_POWER_MODE_NORMAL (1 << 3)
+#define MIPI_DSI_DCS_POWER_MODE_SLEEP (1 << 4)
+#define MIPI_DSI_DCS_POWER_MODE_PARTIAL (1 << 5)
+#define MIPI_DSI_DCS_POWER_MODE_IDLE (1 << 6)
+
+ssize_t mipi_dsi_dcs_write_buffer(struct mipi_dsi_device *dsi,
+ const void *data, size_t len);
+ssize_t mipi_dsi_dcs_write(struct mipi_dsi_device *dsi, u8 cmd,
+ const void *data, size_t len);
ssize_t mipi_dsi_dcs_read(struct mipi_dsi_device *dsi, u8 cmd, void *data,
size_t len);
+int mipi_dsi_dcs_nop(struct mipi_dsi_device *dsi);
+int mipi_dsi_dcs_soft_reset(struct mipi_dsi_device *dsi);
+int mipi_dsi_dcs_get_power_mode(struct mipi_dsi_device *dsi, u8 *mode);
+int mipi_dsi_dcs_get_pixel_format(struct mipi_dsi_device *dsi, u8 *format);
+int mipi_dsi_dcs_enter_sleep_mode(struct mipi_dsi_device *dsi);
+int mipi_dsi_dcs_exit_sleep_mode(struct mipi_dsi_device *dsi);
+int mipi_dsi_dcs_set_display_off(struct mipi_dsi_device *dsi);
+int mipi_dsi_dcs_set_display_on(struct mipi_dsi_device *dsi);
+int mipi_dsi_dcs_set_column_address(struct mipi_dsi_device *dsi, u16 start,
+ u16 end);
+int mipi_dsi_dcs_set_page_address(struct mipi_dsi_device *dsi, u16 start,
+ u16 end);
+int mipi_dsi_dcs_set_tear_off(struct mipi_dsi_device *dsi);
+int mipi_dsi_dcs_set_tear_on(struct mipi_dsi_device *dsi,
+ enum mipi_dsi_dcs_tear_mode mode);
+int mipi_dsi_dcs_set_pixel_format(struct mipi_dsi_device *dsi, u8 format);
/**
* struct mipi_dsi_driver - DSI driver
@@ -167,9 +245,13 @@ static inline void mipi_dsi_set_drvdata(struct mipi_dsi_device *dsi, void *data)
dev_set_drvdata(&dsi->dev, data);
}
-int mipi_dsi_driver_register(struct mipi_dsi_driver *driver);
+int mipi_dsi_driver_register_full(struct mipi_dsi_driver *driver,
+ struct module *owner);
void mipi_dsi_driver_unregister(struct mipi_dsi_driver *driver);
+#define mipi_dsi_driver_register(driver) \
+ mipi_dsi_driver_register_full(driver, THIS_MODULE)
+
#define module_mipi_dsi_driver(__mipi_dsi_driver) \
module_driver(__mipi_dsi_driver, mipi_dsi_driver_register, \
mipi_dsi_driver_unregister)
diff --git a/include/drm/drm_modeset_lock.h b/include/drm/drm_modeset_lock.h
index 75a5c45e21c..70595ff565b 100644
--- a/include/drm/drm_modeset_lock.h
+++ b/include/drm/drm_modeset_lock.h
@@ -33,6 +33,7 @@ struct drm_modeset_lock;
* @ww_ctx: base acquire ctx
* @contended: used internally for -EDEADLK handling
* @locked: list of held locks
+ * @trylock_only: trylock mode used in atomic contexts/panic notifiers
*
* Each thread competing for a set of locks must use one acquire
* ctx. And if any lock fxn returns -EDEADLK, it must backoff and
@@ -126,11 +127,13 @@ void drm_modeset_unlock(struct drm_modeset_lock *lock);
struct drm_device;
struct drm_crtc;
+struct drm_plane;
void drm_modeset_lock_all(struct drm_device *dev);
int __drm_modeset_lock_all(struct drm_device *dev, bool trylock);
void drm_modeset_unlock_all(struct drm_device *dev);
-void drm_modeset_lock_crtc(struct drm_crtc *crtc);
+void drm_modeset_lock_crtc(struct drm_crtc *crtc,
+ struct drm_plane *plane);
void drm_modeset_unlock_crtc(struct drm_crtc *crtc);
void drm_warn_on_modeset_not_all_locked(struct drm_device *dev);
struct drm_modeset_acquire_ctx *
diff --git a/include/drm/drm_plane_helper.h b/include/drm/drm_plane_helper.h
index 52e6870534b..a185392cafe 100644
--- a/include/drm/drm_plane_helper.h
+++ b/include/drm/drm_plane_helper.h
@@ -25,6 +25,7 @@
#define DRM_PLANE_HELPER_H
#include <drm/drm_rect.h>
+#include <drm/drm_crtc.h>
/*
* Drivers that don't allow primary plane scaling may pass this macro in place
@@ -42,6 +43,37 @@
* planes.
*/
+extern int drm_crtc_init(struct drm_device *dev,
+ struct drm_crtc *crtc,
+ const struct drm_crtc_funcs *funcs);
+
+/**
+ * drm_plane_helper_funcs - helper operations for CRTCs
+ * @prepare_fb: prepare a framebuffer for use by the plane
+ * @cleanup_fb: cleanup a framebuffer when it's no longer used by the plane
+ * @atomic_check: check that a given atomic state is valid and can be applied
+ * @atomic_update: apply an atomic state to the plane
+ *
+ * The helper operations are called by the mid-layer CRTC helper.
+ */
+struct drm_plane_helper_funcs {
+ int (*prepare_fb)(struct drm_plane *plane,
+ struct drm_framebuffer *fb);
+ void (*cleanup_fb)(struct drm_plane *plane,
+ struct drm_framebuffer *fb);
+
+ int (*atomic_check)(struct drm_plane *plane,
+ struct drm_plane_state *state);
+ void (*atomic_update)(struct drm_plane *plane,
+ struct drm_plane_state *old_state);
+};
+
+static inline void drm_plane_helper_add(struct drm_plane *plane,
+ const struct drm_plane_helper_funcs *funcs)
+{
+ plane->helper_private = (void *)funcs;
+}
+
extern int drm_plane_helper_check_update(struct drm_plane *plane,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
@@ -68,4 +100,16 @@ extern struct drm_plane *drm_primary_helper_create_plane(struct drm_device *dev,
int num_formats);
+int drm_plane_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h);
+int drm_plane_helper_disable(struct drm_plane *plane);
+
+/* For use by drm_crtc_helper.c */
+int drm_plane_helper_commit(struct drm_plane *plane,
+ struct drm_plane_state *plane_state,
+ struct drm_framebuffer *old_fb);
#endif
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index a70d4564789..180ad0e6de2 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -259,4 +259,21 @@
INTEL_VGA_DEVICE(0x22b2, info), \
INTEL_VGA_DEVICE(0x22b3, info)
+#define INTEL_SKL_IDS(info) \
+ INTEL_VGA_DEVICE(0x1916, info), /* ULT GT2 */ \
+ INTEL_VGA_DEVICE(0x1906, info), /* ULT GT1 */ \
+ INTEL_VGA_DEVICE(0x1926, info), /* ULT GT3 */ \
+ INTEL_VGA_DEVICE(0x1921, info), /* ULT GT2F */ \
+ INTEL_VGA_DEVICE(0x190E, info), /* ULX GT1 */ \
+ INTEL_VGA_DEVICE(0x191E, info), /* ULX GT2 */ \
+ INTEL_VGA_DEVICE(0x1912, info), /* DT GT2 */ \
+ INTEL_VGA_DEVICE(0x1902, info), /* DT GT1 */ \
+ INTEL_VGA_DEVICE(0x191B, info), /* Halo GT2 */ \
+ INTEL_VGA_DEVICE(0x192B, info), /* Halo GT3 */ \
+ INTEL_VGA_DEVICE(0x190B, info), /* Halo GT1 */ \
+ INTEL_VGA_DEVICE(0x191A, info), /* SRV GT2 */ \
+ INTEL_VGA_DEVICE(0x192A, info), /* SRV GT3 */ \
+ INTEL_VGA_DEVICE(0x190A, info), /* SRV GT1 */ \
+ INTEL_VGA_DEVICE(0x191D, info) /* WKS GT2 */
+
#endif /* _I915_PCIIDS_H */
diff --git a/include/drm/ttm/ttm_execbuf_util.h b/include/drm/ttm/ttm_execbuf_util.h
index 46044171441..b620c317c77 100644
--- a/include/drm/ttm/ttm_execbuf_util.h
+++ b/include/drm/ttm/ttm_execbuf_util.h
@@ -68,6 +68,7 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
* non-blocking reserves should be tried.
* @list: thread private list of ttm_validate_buffer structs.
* @intr: should the wait be interruptible
+ * @dups: [out] optional list of duplicates.
*
* Tries to reserve bos pointed to by the list entries for validation.
* If the function returns 0, all buffers are marked as "unfenced",
@@ -83,6 +84,11 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
* calling process receives a signal while waiting. In that case, no
* buffers on the list will be reserved upon return.
*
+ * If dups is non NULL all buffers already reserved by the current thread
+ * (e.g. duplicates) are added to this list, otherwise -EALREADY is returned
+ * on the first already reserved buffer and all buffers from the list are
+ * unreserved again.
+ *
* Buffers reserved by this function should be unreserved by
* a call to either ttm_eu_backoff_reservation() or
* ttm_eu_fence_buffer_objects() when command submission is complete or
@@ -90,7 +96,8 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
*/
extern int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
- struct list_head *list, bool intr);
+ struct list_head *list, bool intr,
+ struct list_head *dups);
/**
* function ttm_eu_fence_buffer_objects.
diff --git a/include/dt-bindings/thermal/tegra124-soctherm.h b/include/dt-bindings/thermal/tegra124-soctherm.h
new file mode 100644
index 00000000000..85aaf66690f
--- /dev/null
+++ b/include/dt-bindings/thermal/tegra124-soctherm.h
@@ -0,0 +1,13 @@
+/*
+ * This header provides constants for binding nvidia,tegra124-soctherm.
+ */
+
+#ifndef _DT_BINDINGS_THERMAL_TEGRA124_SOCTHERM_H
+#define _DT_BINDINGS_THERMAL_TEGRA124_SOCTHERM_H
+
+#define TEGRA124_SOCTHERM_SENSOR_CPU 0
+#define TEGRA124_SOCTHERM_SENSOR_MEM 1
+#define TEGRA124_SOCTHERM_SENSOR_GPU 2
+#define TEGRA124_SOCTHERM_SENSOR_PLLX 3
+
+#endif
diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h
index ad9db6045b2..b3f45a57834 100644
--- a/include/kvm/arm_arch_timer.h
+++ b/include/kvm/arm_arch_timer.h
@@ -60,7 +60,8 @@ struct arch_timer_cpu {
#ifdef CONFIG_KVM_ARM_TIMER
int kvm_timer_hyp_init(void);
-int kvm_timer_init(struct kvm *kvm);
+void kvm_timer_enable(struct kvm *kvm);
+void kvm_timer_init(struct kvm *kvm);
void kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
const struct kvm_irq_level *irq);
void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu);
@@ -77,11 +78,8 @@ static inline int kvm_timer_hyp_init(void)
return 0;
};
-static inline int kvm_timer_init(struct kvm *kvm)
-{
- return 0;
-}
-
+static inline void kvm_timer_enable(struct kvm *kvm) {}
+static inline void kvm_timer_init(struct kvm *kvm) {}
static inline void kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
const struct kvm_irq_level *irq) {}
static inline void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu) {}
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index 206dcc3b3f7..ac4888dc86b 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -274,7 +274,7 @@ struct kvm_exit_mmio;
#ifdef CONFIG_KVM_ARM_VGIC
int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write);
int kvm_vgic_hyp_init(void);
-int kvm_vgic_init(struct kvm *kvm);
+int kvm_vgic_map_resources(struct kvm *kvm);
int kvm_vgic_create(struct kvm *kvm);
void kvm_vgic_destroy(struct kvm *kvm);
void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu);
@@ -287,7 +287,8 @@ bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
struct kvm_exit_mmio *mmio);
#define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel))
-#define vgic_initialized(k) ((k)->arch.vgic.ready)
+#define vgic_initialized(k) (!!((k)->arch.vgic.nr_cpus))
+#define vgic_ready(k) ((k)->arch.vgic.ready)
int vgic_v2_probe(struct device_node *vgic_node,
const struct vgic_ops **ops,
@@ -321,7 +322,7 @@ static inline int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr,
return -ENXIO;
}
-static inline int kvm_vgic_init(struct kvm *kvm)
+static inline int kvm_vgic_map_resources(struct kvm *kvm)
{
return 0;
}
@@ -373,6 +374,11 @@ static inline bool vgic_initialized(struct kvm *kvm)
{
return true;
}
+
+static inline bool vgic_ready(struct kvm *kvm)
+{
+ return true;
+}
#endif
#endif
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 6bff83b1f29..856d381b1d5 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -153,6 +153,7 @@ int acpi_unmap_lsapic(int cpu);
int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base);
int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base);
+int acpi_ioapic_registered(acpi_handle handle, u32 gsi_base);
void acpi_irq_stats_init(void);
extern u32 acpi_irq_handled;
extern u32 acpi_irq_not_handled;
diff --git a/include/linux/ceph/auth.h b/include/linux/ceph/auth.h
index 5f338684413..260d78b587c 100644
--- a/include/linux/ceph/auth.h
+++ b/include/linux/ceph/auth.h
@@ -13,6 +13,7 @@
struct ceph_auth_client;
struct ceph_authorizer;
+struct ceph_msg;
struct ceph_auth_handshake {
struct ceph_authorizer *authorizer;
@@ -20,6 +21,10 @@ struct ceph_auth_handshake {
size_t authorizer_buf_len;
void *authorizer_reply_buf;
size_t authorizer_reply_buf_len;
+ int (*sign_message)(struct ceph_auth_handshake *auth,
+ struct ceph_msg *msg);
+ int (*check_message_signature)(struct ceph_auth_handshake *auth,
+ struct ceph_msg *msg);
};
struct ceph_auth_client_ops {
@@ -66,6 +71,11 @@ struct ceph_auth_client_ops {
void (*reset)(struct ceph_auth_client *ac);
void (*destroy)(struct ceph_auth_client *ac);
+
+ int (*sign_message)(struct ceph_auth_handshake *auth,
+ struct ceph_msg *msg);
+ int (*check_message_signature)(struct ceph_auth_handshake *auth,
+ struct ceph_msg *msg);
};
struct ceph_auth_client {
@@ -113,4 +123,20 @@ extern int ceph_auth_verify_authorizer_reply(struct ceph_auth_client *ac,
extern void ceph_auth_invalidate_authorizer(struct ceph_auth_client *ac,
int peer_type);
+static inline int ceph_auth_sign_message(struct ceph_auth_handshake *auth,
+ struct ceph_msg *msg)
+{
+ if (auth->sign_message)
+ return auth->sign_message(auth, msg);
+ return 0;
+}
+
+static inline
+int ceph_auth_check_message_signature(struct ceph_auth_handshake *auth,
+ struct ceph_msg *msg)
+{
+ if (auth->check_message_signature)
+ return auth->check_message_signature(auth, msg);
+ return 0;
+}
#endif
diff --git a/include/linux/ceph/buffer.h b/include/linux/ceph/buffer.h
index 07ad423cc37..07ca15e7610 100644
--- a/include/linux/ceph/buffer.h
+++ b/include/linux/ceph/buffer.h
@@ -10,8 +10,7 @@
/*
* a simple reference counted buffer.
*
- * use kmalloc for small sizes (<= one page), vmalloc for larger
- * sizes.
+ * use kmalloc for smaller sizes, vmalloc for larger sizes.
*/
struct ceph_buffer {
struct kref kref;
diff --git a/include/linux/ceph/ceph_features.h b/include/linux/ceph/ceph_features.h
index d12659ce550..71e05bbf8ce 100644
--- a/include/linux/ceph/ceph_features.h
+++ b/include/linux/ceph/ceph_features.h
@@ -84,6 +84,7 @@ static inline u64 ceph_sanitize_features(u64 features)
CEPH_FEATURE_PGPOOL3 | \
CEPH_FEATURE_OSDENC | \
CEPH_FEATURE_CRUSH_TUNABLES | \
+ CEPH_FEATURE_MSG_AUTH | \
CEPH_FEATURE_CRUSH_TUNABLES2 | \
CEPH_FEATURE_REPLY_CREATE_INODE | \
CEPH_FEATURE_OSDHASHPSPOOL | \
diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h
index 3c97d5e9b95..c0dadaac26e 100644
--- a/include/linux/ceph/ceph_fs.h
+++ b/include/linux/ceph/ceph_fs.h
@@ -522,8 +522,11 @@ struct ceph_mds_reply_dirfrag {
__le32 dist[];
} __attribute__ ((packed));
-#define CEPH_LOCK_FCNTL 1
-#define CEPH_LOCK_FLOCK 2
+#define CEPH_LOCK_FCNTL 1
+#define CEPH_LOCK_FLOCK 2
+#define CEPH_LOCK_FCNTL_INTR 3
+#define CEPH_LOCK_FLOCK_INTR 4
+
#define CEPH_LOCK_SHARED 1
#define CEPH_LOCK_EXCL 2
@@ -549,6 +552,7 @@ struct ceph_filelock {
int ceph_flags_to_mode(int flags);
+#define CEPH_INLINE_NONE ((__u64)-1)
/* capability bits */
#define CEPH_CAP_PIN 1 /* no specific capabilities beyond the pin */
@@ -613,6 +617,8 @@ int ceph_flags_to_mode(int flags);
CEPH_CAP_LINK_SHARED | \
CEPH_CAP_FILE_SHARED | \
CEPH_CAP_XATTR_SHARED)
+#define CEPH_STAT_CAP_INLINE_DATA (CEPH_CAP_FILE_SHARED | \
+ CEPH_CAP_FILE_RD)
#define CEPH_CAP_ANY_SHARED (CEPH_CAP_AUTH_SHARED | \
CEPH_CAP_LINK_SHARED | \
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
index 07bc359b88a..8b11a79ca1c 100644
--- a/include/linux/ceph/libceph.h
+++ b/include/linux/ceph/libceph.h
@@ -29,6 +29,7 @@
#define CEPH_OPT_NOSHARE (1<<1) /* don't share client with other sbs */
#define CEPH_OPT_MYIP (1<<2) /* specified my ip */
#define CEPH_OPT_NOCRC (1<<3) /* no data crc on writes */
+#define CEPH_OPT_NOMSGAUTH (1<<4) /* not require cephx message signature */
#define CEPH_OPT_DEFAULT (0)
@@ -184,7 +185,6 @@ extern bool libceph_compatible(void *data);
extern const char *ceph_msg_type_name(int type);
extern int ceph_check_fsid(struct ceph_client *client, struct ceph_fsid *fsid);
extern void *ceph_kvmalloc(size_t size, gfp_t flags);
-extern void ceph_kvfree(const void *ptr);
extern struct ceph_options *ceph_parse_options(char *options,
const char *dev_name, const char *dev_name_end,
diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h
index 40ae58e3e9d..d9d396c1650 100644
--- a/include/linux/ceph/messenger.h
+++ b/include/linux/ceph/messenger.h
@@ -42,6 +42,10 @@ struct ceph_connection_operations {
struct ceph_msg * (*alloc_msg) (struct ceph_connection *con,
struct ceph_msg_header *hdr,
int *skip);
+ int (*sign_message) (struct ceph_connection *con, struct ceph_msg *msg);
+
+ int (*check_message_signature) (struct ceph_connection *con,
+ struct ceph_msg *msg);
};
/* use format string %s%d */
@@ -142,7 +146,10 @@ struct ceph_msg_data_cursor {
*/
struct ceph_msg {
struct ceph_msg_header hdr; /* header */
- struct ceph_msg_footer footer; /* footer */
+ union {
+ struct ceph_msg_footer footer; /* footer */
+ struct ceph_msg_footer_old old_footer; /* old format footer */
+ };
struct kvec front; /* unaligned blobs of message */
struct ceph_buffer *middle;
diff --git a/include/linux/ceph/msgr.h b/include/linux/ceph/msgr.h
index 3d94a73b5f3..1c1887206ff 100644
--- a/include/linux/ceph/msgr.h
+++ b/include/linux/ceph/msgr.h
@@ -152,7 +152,8 @@ struct ceph_msg_header {
receiver: mask against ~PAGE_MASK */
struct ceph_entity_name src;
- __le32 reserved;
+ __le16 compat_version;
+ __le16 reserved;
__le32 crc; /* header crc32c */
} __attribute__ ((packed));
@@ -164,13 +165,21 @@ struct ceph_msg_header {
/*
* follows data payload
*/
+struct ceph_msg_footer_old {
+ __le32 front_crc, middle_crc, data_crc;
+ __u8 flags;
+} __attribute__ ((packed));
+
struct ceph_msg_footer {
__le32 front_crc, middle_crc, data_crc;
+ // sig holds the 64 bits of the digital signature for the message PLR
+ __le64 sig;
__u8 flags;
} __attribute__ ((packed));
#define CEPH_MSG_FOOTER_COMPLETE (1<<0) /* msg wasn't aborted */
#define CEPH_MSG_FOOTER_NOCRC (1<<1) /* no data crc */
+#define CEPH_MSG_FOOTER_SIGNED (1<<2) /* msg was signed */
#endif
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index 03aeb27fcc6..5d86416d35f 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -87,6 +87,13 @@ struct ceph_osd_req_op {
struct ceph_osd_data osd_data;
} extent;
struct {
+ __le32 name_len;
+ __le32 value_len;
+ __u8 cmp_op; /* CEPH_OSD_CMPXATTR_OP_* */
+ __u8 cmp_mode; /* CEPH_OSD_CMPXATTR_MODE_* */
+ struct ceph_osd_data osd_data;
+ } xattr;
+ struct {
const char *class_name;
const char *method_name;
struct ceph_osd_data request_info;
@@ -295,6 +302,9 @@ extern void osd_req_op_cls_response_data_pages(struct ceph_osd_request *,
extern void osd_req_op_cls_init(struct ceph_osd_request *osd_req,
unsigned int which, u16 opcode,
const char *class, const char *method);
+extern int osd_req_op_xattr_init(struct ceph_osd_request *osd_req, unsigned int which,
+ u16 opcode, const char *name, const void *value,
+ size_t size, u8 cmp_op, u8 cmp_mode);
extern void osd_req_op_watch_init(struct ceph_osd_request *osd_req,
unsigned int which, u16 opcode,
u64 cookie, u64 version, int flag);
@@ -318,7 +328,8 @@ extern struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *,
struct ceph_file_layout *layout,
struct ceph_vino vino,
u64 offset, u64 *len,
- int num_ops, int opcode, int flags,
+ unsigned int which, int num_ops,
+ int opcode, int flags,
struct ceph_snap_context *snapc,
u32 truncate_seq, u64 truncate_size,
bool use_mempool);
diff --git a/include/linux/ceph/pagelist.h b/include/linux/ceph/pagelist.h
index 5f871d84ddc..13d71fe18b0 100644
--- a/include/linux/ceph/pagelist.h
+++ b/include/linux/ceph/pagelist.h
@@ -1,8 +1,10 @@
#ifndef __FS_CEPH_PAGELIST_H
#define __FS_CEPH_PAGELIST_H
-#include <linux/list.h>
+#include <asm/byteorder.h>
#include <linux/atomic.h>
+#include <linux/list.h>
+#include <linux/types.h>
struct ceph_pagelist {
struct list_head head;
diff --git a/include/linux/clock_cooling.h b/include/linux/clock_cooling.h
new file mode 100644
index 00000000000..4d1019d56f7
--- /dev/null
+++ b/include/linux/clock_cooling.h
@@ -0,0 +1,65 @@
+/*
+ * linux/include/linux/clock_cooling.h
+ *
+ * Copyright (C) 2014 Eduardo Valentin <edubezval@gmail.com>
+ *
+ * Copyright (C) 2013 Texas Instruments Inc.
+ * Contact: Eduardo Valentin <eduardo.valentin@ti.com>
+ *
+ * Highly based on cpu_cooling.c.
+ * Copyright (C) 2012 Samsung Electronics Co., Ltd(http://www.samsung.com)
+ * Copyright (C) 2012 Amit Daniel <amit.kachhap@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef __CPU_COOLING_H__
+#define __CPU_COOLING_H__
+
+#include <linux/of.h>
+#include <linux/thermal.h>
+#include <linux/cpumask.h>
+
+#ifdef CONFIG_CLOCK_THERMAL
+/**
+ * clock_cooling_register - function to create clock cooling device.
+ * @dev: struct device pointer to the device used as clock cooling device.
+ * @clock_name: string containing the clock used as cooling mechanism.
+ */
+struct thermal_cooling_device *
+clock_cooling_register(struct device *dev, const char *clock_name);
+
+/**
+ * clock_cooling_unregister - function to remove clock cooling device.
+ * @cdev: thermal cooling device pointer.
+ */
+void clock_cooling_unregister(struct thermal_cooling_device *cdev);
+
+unsigned long clock_cooling_get_level(struct thermal_cooling_device *cdev,
+ unsigned long freq);
+#else /* !CONFIG_CLOCK_THERMAL */
+static inline struct thermal_cooling_device *
+clock_cooling_register(struct device *dev, const char *clock_name)
+{
+ return NULL;
+}
+static inline
+void clock_cooling_unregister(struct thermal_cooling_device *cdev)
+{
+}
+static inline
+unsigned long clock_cooling_get_level(struct thermal_cooling_device *cdev,
+ unsigned long freq)
+{
+ return THERMAL_CSTATE_INVALID;
+}
+#endif /* CONFIG_CLOCK_THERMAL */
+
+#endif /* __CPU_COOLING_H__ */
diff --git a/include/linux/cma.h b/include/linux/cma.h
index a93438beb33..9384ba66e97 100644
--- a/include/linux/cma.h
+++ b/include/linux/cma.h
@@ -15,6 +15,7 @@
struct cma;
+extern unsigned long totalcma_pages;
extern phys_addr_t cma_get_base(struct cma *cma);
extern unsigned long cma_get_size(struct cma *cma);
diff --git a/include/linux/cred.h b/include/linux/cred.h
index b2d0820837c..2fb2ca2127e 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -68,6 +68,7 @@ extern void groups_free(struct group_info *);
extern int set_current_groups(struct group_info *);
extern void set_groups(struct cred *, struct group_info *);
extern int groups_search(const struct group_info *, kgid_t);
+extern bool may_setgroups(void);
/* access the groups "array" with this macro */
#define GROUP_AT(gi, i) \
diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
index f1863dcd83e..ce447f0f1ba 100644
--- a/include/linux/devfreq.h
+++ b/include/linux/devfreq.h
@@ -188,7 +188,7 @@ extern struct devfreq *devm_devfreq_add_device(struct device *dev,
extern void devm_devfreq_remove_device(struct device *dev,
struct devfreq *devfreq);
-/* Supposed to be called by PM_SLEEP/PM_RUNTIME callbacks */
+/* Supposed to be called by PM callbacks */
extern int devfreq_suspend_device(struct devfreq *devfreq);
extern int devfreq_resume_device(struct devfreq *devfreq);
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index d5d388160f4..c3007cb4bfa 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -129,11 +129,14 @@ static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
extern u64 dma_get_required_mask(struct device *dev);
-#ifndef set_arch_dma_coherent_ops
-static inline int set_arch_dma_coherent_ops(struct device *dev)
-{
- return 0;
-}
+#ifndef arch_setup_dma_ops
+static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
+ u64 size, struct iommu_ops *iommu,
+ bool coherent) { }
+#endif
+
+#ifndef arch_teardown_dma_ops
+static inline void arch_teardown_dma_ops(struct device *dev) { }
#endif
static inline unsigned int dma_get_max_seg_size(struct device *dev)
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 88157253b9e..f90c0282c11 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -2086,7 +2086,7 @@ struct filename {
extern long vfs_truncate(struct path *, loff_t);
extern int do_truncate(struct dentry *, loff_t start, unsigned int time_attrs,
struct file *filp);
-extern int do_fallocate(struct file *file, int mode, loff_t offset,
+extern int vfs_fallocate(struct file *file, int mode, loff_t offset,
loff_t len);
extern long do_sys_open(int dfd, const char __user *filename, int flags,
umode_t mode);
@@ -2176,7 +2176,6 @@ static inline int sb_is_blkdev_sb(struct super_block *sb)
extern int sync_filesystem(struct super_block *);
extern const struct file_operations def_blk_fops;
extern const struct file_operations def_chr_fops;
-extern const struct file_operations bad_sock_fops;
#ifdef CONFIG_BLOCK
extern int ioctl_by_bdev(struct block_device *, unsigned, unsigned long);
extern int blkdev_ioctl(struct block_device *, fmode_t, unsigned, unsigned long);
diff --git a/include/linux/fsl_ifc.h b/include/linux/fsl_ifc.h
index 84d60cb841b..bf0321eabbd 100644
--- a/include/linux/fsl_ifc.h
+++ b/include/linux/fsl_ifc.h
@@ -29,7 +29,16 @@
#include <linux/of_platform.h>
#include <linux/interrupt.h>
-#define FSL_IFC_BANK_COUNT 4
+/*
+ * The actual number of banks implemented depends on the IFC version
+ * - IFC version 1.0 implements 4 banks.
+ * - IFC version 1.1 onward implements 8 banks.
+ */
+#define FSL_IFC_BANK_COUNT 8
+
+#define FSL_IFC_VERSION_MASK 0x0F0F0000
+#define FSL_IFC_VERSION_1_0_0 0x01000000
+#define FSL_IFC_VERSION_1_1_0 0x01010000
/*
* CSPR - Chip Select Property Register
@@ -776,23 +785,23 @@ struct fsl_ifc_regs {
__be32 cspr;
u32 res2;
} cspr_cs[FSL_IFC_BANK_COUNT];
- u32 res3[0x19];
+ u32 res3[0xd];
struct {
__be32 amask;
u32 res4[0x2];
} amask_cs[FSL_IFC_BANK_COUNT];
- u32 res5[0x18];
+ u32 res5[0xc];
struct {
__be32 csor;
__be32 csor_ext;
u32 res6;
} csor_cs[FSL_IFC_BANK_COUNT];
- u32 res7[0x18];
+ u32 res7[0xc];
struct {
__be32 ftim[4];
u32 res8[0x8];
} ftim_cs[FSL_IFC_BANK_COUNT];
- u32 res9[0x60];
+ u32 res9[0x30];
__be32 rb_stat;
u32 res10[0x2];
__be32 ifc_gcr;
@@ -827,6 +836,8 @@ struct fsl_ifc_ctrl {
int nand_irq;
spinlock_t lock;
void *nand;
+ int version;
+ int banks;
u32 nand_stat;
wait_queue_head_t nand_wait;
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index ed501953f0b..1da602982cf 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -39,6 +39,12 @@
# define FTRACE_FORCE_LIST_FUNC 0
#endif
+/* Main tracing buffer and events set up */
+#ifdef CONFIG_TRACING
+void trace_init(void);
+#else
+static inline void trace_init(void) { }
+#endif
struct module;
struct ftrace_hash;
@@ -873,6 +879,7 @@ static inline int test_tsk_trace_graph(struct task_struct *tsk)
enum ftrace_dump_mode;
extern enum ftrace_dump_mode ftrace_dump_on_oops;
+extern int tracepoint_printk;
extern void disable_trace_on_warning(void);
extern int __disable_trace_on_warning;
diff --git a/include/linux/hdmi.h b/include/linux/hdmi.h
index 11c0182a153..cbb5790a35c 100644
--- a/include/linux/hdmi.h
+++ b/include/linux/hdmi.h
@@ -1,9 +1,24 @@
/*
* Copyright (C) 2012 Avionic Design GmbH
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
*/
#ifndef __LINUX_HDMI_H_
diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h
index d8257ab60ba..2c476acb87d 100644
--- a/include/linux/iio/common/st_sensors.h
+++ b/include/linux/iio/common/st_sensors.h
@@ -164,7 +164,7 @@ struct st_sensor_transfer_function {
};
/**
- * struct st_sensors - ST sensors list
+ * struct st_sensor_settings - ST specific sensor settings
* @wai: Contents of WhoAmI register.
* @sensors_supported: List of supported sensors by struct itself.
* @ch: IIO channels for the sensor.
@@ -177,7 +177,7 @@ struct st_sensor_transfer_function {
* @multi_read_bit: Use or not particular bit for [I2C/SPI] multi-read.
* @bootime: samples to discard when sensor passing from power-down to power-up.
*/
-struct st_sensors {
+struct st_sensor_settings {
u8 wai;
char sensors_supported[ST_SENSORS_MAX_4WAI][ST_SENSORS_MAX_NAME];
struct iio_chan_spec *ch;
@@ -196,7 +196,7 @@ struct st_sensors {
* struct st_sensor_data - ST sensor device status
* @dev: Pointer to instance of struct device (I2C or SPI).
* @trig: The trigger in use by the core driver.
- * @sensor: Pointer to the current sensor struct in use.
+ * @sensor_settings: Pointer to the specific sensor settings in use.
* @current_fullscale: Maximum range of measure by the sensor.
* @vdd: Pointer to sensor's Vdd power supply
* @vdd_io: Pointer to sensor's Vdd-IO power supply
@@ -213,7 +213,7 @@ struct st_sensors {
struct st_sensor_data {
struct device *dev;
struct iio_trigger *trig;
- struct st_sensors *sensor;
+ struct st_sensor_settings *sensor_settings;
struct st_sensor_fullscale_avl *current_fullscale;
struct regulator *vdd;
struct regulator *vdd_io;
@@ -279,7 +279,7 @@ int st_sensors_read_info_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *ch, int *val);
int st_sensors_check_device_support(struct iio_dev *indio_dev,
- int num_sensors_list, const struct st_sensors *sensors);
+ int num_sensors_list, const struct st_sensor_settings *sensor_settings);
ssize_t st_sensors_sysfs_sampling_frequency_avail(struct device *dev,
struct device_attribute *attr, char *buf);
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
index 15dc6bc2bdd..3642ce7ef51 100644
--- a/include/linux/iio/iio.h
+++ b/include/linux/iio/iio.h
@@ -13,6 +13,7 @@
#include <linux/device.h>
#include <linux/cdev.h>
#include <linux/iio/types.h>
+#include <linux/of.h>
/* IIO TODO LIST */
/*
* Provide means of adjusting timer accuracy.
@@ -326,6 +327,11 @@ struct iio_dev;
* @update_scan_mode: function to configure device and scan buffer when
* channels have changed
* @debugfs_reg_access: function to read or write register value of device
+ * @of_xlate: function pointer to obtain channel specifier index.
+ * When #iio-cells is greater than '0', the driver could
+ * provide a custom of_xlate function that reads the
+ * *args* and returns the appropriate index in registered
+ * IIO channels array.
**/
struct iio_info {
struct module *driver_module;
@@ -385,6 +391,8 @@ struct iio_info {
int (*debugfs_reg_access)(struct iio_dev *indio_dev,
unsigned reg, unsigned writeval,
unsigned *readval);
+ int (*of_xlate)(struct iio_dev *indio_dev,
+ const struct of_phandle_args *iiospec);
};
/**
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 7a7bd15e54f..38daa453f2e 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -21,6 +21,7 @@
#include <linux/errno.h>
#include <linux/err.h>
+#include <linux/of.h>
#include <linux/types.h>
#include <linux/scatterlist.h>
#include <trace/events/iommu.h>
@@ -106,7 +107,9 @@ enum iommu_attr {
* @remove_device: remove device from iommu grouping
* @domain_get_attr: Query domain attributes
* @domain_set_attr: Change domain attributes
+ * @of_xlate: add OF master IDs to iommu grouping
* @pgsize_bitmap: bitmap of supported page sizes
+ * @priv: per-instance data private to the iommu driver
*/
struct iommu_ops {
bool (*capable)(enum iommu_cap);
@@ -138,7 +141,12 @@ struct iommu_ops {
/* Get the numer of window per domain */
u32 (*domain_get_windows)(struct iommu_domain *domain);
+#ifdef CONFIG_OF_IOMMU
+ int (*of_xlate)(struct device *dev, struct of_phandle_args *args);
+#endif
+
unsigned long pgsize_bitmap;
+ void *priv;
};
#define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
index e365d5ec69c..1eee6bcfcf7 100644
--- a/include/linux/ipc_namespace.h
+++ b/include/linux/ipc_namespace.h
@@ -6,6 +6,7 @@
#include <linux/rwsem.h>
#include <linux/notifier.h>
#include <linux/nsproxy.h>
+#include <linux/ns_common.h>
struct user_namespace;
@@ -58,7 +59,7 @@ struct ipc_namespace {
/* user_ns which owns the ipc ns */
struct user_namespace *user_ns;
- unsigned int proc_inum;
+ struct ns_common ns;
};
extern struct ipc_namespace init_ipc_ns;
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index 03a4ea37ba8..1e8b0cf3079 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -49,6 +49,10 @@
#define GICD_CTLR_ENABLE_G1A (1U << 1)
#define GICD_CTLR_ENABLE_G1 (1U << 0)
+#define GICD_TYPER_ID_BITS(typer) ((((typer) >> 19) & 0x1f) + 1)
+#define GICD_TYPER_IRQS(typer) ((((typer) & 0x1f) + 1) * 32)
+#define GICD_TYPER_LPIS (1U << 17)
+
#define GICD_IROUTER_SPI_MODE_ONE (0U << 31)
#define GICD_IROUTER_SPI_MODE_ANY (1U << 31)
@@ -76,9 +80,27 @@
#define GICR_MOVALLR 0x0110
#define GICR_PIDR2 GICD_PIDR2
+#define GICR_CTLR_ENABLE_LPIS (1UL << 0)
+
+#define GICR_TYPER_CPU_NUMBER(r) (((r) >> 8) & 0xffff)
+
#define GICR_WAKER_ProcessorSleep (1U << 1)
#define GICR_WAKER_ChildrenAsleep (1U << 2)
+#define GICR_PROPBASER_NonShareable (0U << 10)
+#define GICR_PROPBASER_InnerShareable (1U << 10)
+#define GICR_PROPBASER_OuterShareable (2U << 10)
+#define GICR_PROPBASER_SHAREABILITY_MASK (3UL << 10)
+#define GICR_PROPBASER_nCnB (0U << 7)
+#define GICR_PROPBASER_nC (1U << 7)
+#define GICR_PROPBASER_RaWt (2U << 7)
+#define GICR_PROPBASER_RaWb (3U << 7)
+#define GICR_PROPBASER_WaWt (4U << 7)
+#define GICR_PROPBASER_WaWb (5U << 7)
+#define GICR_PROPBASER_RaWaWt (6U << 7)
+#define GICR_PROPBASER_RaWaWb (7U << 7)
+#define GICR_PROPBASER_IDBITS_MASK (0x1f)
+
/*
* Re-Distributor registers, offsets from SGI_base
*/
@@ -91,9 +113,93 @@
#define GICR_IPRIORITYR0 GICD_IPRIORITYR
#define GICR_ICFGR0 GICD_ICFGR
+#define GICR_TYPER_PLPIS (1U << 0)
#define GICR_TYPER_VLPIS (1U << 1)
#define GICR_TYPER_LAST (1U << 4)
+#define LPI_PROP_GROUP1 (1 << 1)
+#define LPI_PROP_ENABLED (1 << 0)
+
+/*
+ * ITS registers, offsets from ITS_base
+ */
+#define GITS_CTLR 0x0000
+#define GITS_IIDR 0x0004
+#define GITS_TYPER 0x0008
+#define GITS_CBASER 0x0080
+#define GITS_CWRITER 0x0088
+#define GITS_CREADR 0x0090
+#define GITS_BASER 0x0100
+#define GITS_PIDR2 GICR_PIDR2
+
+#define GITS_TRANSLATER 0x10040
+
+#define GITS_TYPER_PTA (1UL << 19)
+
+#define GITS_CBASER_VALID (1UL << 63)
+#define GITS_CBASER_nCnB (0UL << 59)
+#define GITS_CBASER_nC (1UL << 59)
+#define GITS_CBASER_RaWt (2UL << 59)
+#define GITS_CBASER_RaWb (3UL << 59)
+#define GITS_CBASER_WaWt (4UL << 59)
+#define GITS_CBASER_WaWb (5UL << 59)
+#define GITS_CBASER_RaWaWt (6UL << 59)
+#define GITS_CBASER_RaWaWb (7UL << 59)
+#define GITS_CBASER_NonShareable (0UL << 10)
+#define GITS_CBASER_InnerShareable (1UL << 10)
+#define GITS_CBASER_OuterShareable (2UL << 10)
+#define GITS_CBASER_SHAREABILITY_MASK (3UL << 10)
+
+#define GITS_BASER_NR_REGS 8
+
+#define GITS_BASER_VALID (1UL << 63)
+#define GITS_BASER_nCnB (0UL << 59)
+#define GITS_BASER_nC (1UL << 59)
+#define GITS_BASER_RaWt (2UL << 59)
+#define GITS_BASER_RaWb (3UL << 59)
+#define GITS_BASER_WaWt (4UL << 59)
+#define GITS_BASER_WaWb (5UL << 59)
+#define GITS_BASER_RaWaWt (6UL << 59)
+#define GITS_BASER_RaWaWb (7UL << 59)
+#define GITS_BASER_TYPE_SHIFT (56)
+#define GITS_BASER_TYPE(r) (((r) >> GITS_BASER_TYPE_SHIFT) & 7)
+#define GITS_BASER_ENTRY_SIZE_SHIFT (48)
+#define GITS_BASER_ENTRY_SIZE(r) ((((r) >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0xff) + 1)
+#define GITS_BASER_NonShareable (0UL << 10)
+#define GITS_BASER_InnerShareable (1UL << 10)
+#define GITS_BASER_OuterShareable (2UL << 10)
+#define GITS_BASER_SHAREABILITY_SHIFT (10)
+#define GITS_BASER_SHAREABILITY_MASK (3UL << GITS_BASER_SHAREABILITY_SHIFT)
+#define GITS_BASER_PAGE_SIZE_SHIFT (8)
+#define GITS_BASER_PAGE_SIZE_4K (0UL << GITS_BASER_PAGE_SIZE_SHIFT)
+#define GITS_BASER_PAGE_SIZE_16K (1UL << GITS_BASER_PAGE_SIZE_SHIFT)
+#define GITS_BASER_PAGE_SIZE_64K (2UL << GITS_BASER_PAGE_SIZE_SHIFT)
+#define GITS_BASER_PAGE_SIZE_MASK (3UL << GITS_BASER_PAGE_SIZE_SHIFT)
+
+#define GITS_BASER_TYPE_NONE 0
+#define GITS_BASER_TYPE_DEVICE 1
+#define GITS_BASER_TYPE_VCPU 2
+#define GITS_BASER_TYPE_CPU 3
+#define GITS_BASER_TYPE_COLLECTION 4
+#define GITS_BASER_TYPE_RESERVED5 5
+#define GITS_BASER_TYPE_RESERVED6 6
+#define GITS_BASER_TYPE_RESERVED7 7
+
+/*
+ * ITS commands
+ */
+#define GITS_CMD_MAPD 0x08
+#define GITS_CMD_MAPC 0x09
+#define GITS_CMD_MAPVI 0x0a
+#define GITS_CMD_MOVI 0x01
+#define GITS_CMD_DISCARD 0x0f
+#define GITS_CMD_INV 0x0c
+#define GITS_CMD_MOVALL 0x0e
+#define GITS_CMD_INVALL 0x0d
+#define GITS_CMD_INT 0x03
+#define GITS_CMD_CLEAR 0x04
+#define GITS_CMD_SYNC 0x05
+
/*
* CPU interface registers
*/
@@ -189,12 +295,34 @@
#include <linux/stringify.h>
+/*
+ * We need a value to serve as a irq-type for LPIs. Choose one that will
+ * hopefully pique the interest of the reviewer.
+ */
+#define GIC_IRQ_TYPE_LPI 0xa110c8ed
+
+struct rdists {
+ struct {
+ void __iomem *rd_base;
+ struct page *pend_page;
+ phys_addr_t phys_base;
+ } __percpu *rdist;
+ struct page *prop_page;
+ int id_bits;
+ u64 flags;
+};
+
static inline void gic_write_eoir(u64 irq)
{
asm volatile("msr_s " __stringify(ICC_EOIR1_EL1) ", %0" : : "r" (irq));
isb();
}
+struct irq_domain;
+int its_cpu_init(void);
+int its_init(struct device_node *node, struct rdists *rdists,
+ struct irq_domain *domain);
+
#endif
#endif
diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
index 13eed92c7d2..71d706d5f16 100644
--- a/include/linux/irqchip/arm-gic.h
+++ b/include/linux/irqchip/arm-gic.h
@@ -91,6 +91,8 @@
#ifndef __ASSEMBLY__
+#include <linux/irqdomain.h>
+
struct device_node;
extern struct irq_chip gic_arch_extn;
@@ -106,6 +108,8 @@ static inline void gic_init(unsigned int nr, int start,
gic_init_bases(nr, start, dist, cpu, 0, NULL);
}
+int gicv2m_of_init(struct device_node *node, struct irq_domain *parent);
+
void gic_send_sgi(unsigned int cpu_id, unsigned int irq);
int gic_get_cpu_id(unsigned int cpu);
void gic_migrate_target(unsigned int new_cpu_id);
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h
index b9376cd5a18..25a822f6f00 100644
--- a/include/linux/kernel_stat.h
+++ b/include/linux/kernel_stat.h
@@ -68,6 +68,7 @@ static inline unsigned int kstat_softirqs_cpu(unsigned int irq, int cpu)
* Number of interrupts per specific IRQ source, since bootup
*/
extern unsigned int kstat_irqs(unsigned int irq);
+extern unsigned int kstat_irqs_usr(unsigned int irq);
/*
* Number of interrupts per cpu, since bootup
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index a6059bdf7b0..26f106022c8 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -43,6 +43,7 @@
* include/linux/kvm_h.
*/
#define KVM_MEMSLOT_INVALID (1UL << 16)
+#define KVM_MEMSLOT_INCOHERENT (1UL << 17)
/* Two fragments for cross MMIO pages. */
#define KVM_MAX_MMIO_FRAGMENTS 2
@@ -353,6 +354,8 @@ struct kvm_memslots {
struct kvm_memory_slot memslots[KVM_MEM_SLOTS_NUM];
/* The mapping table from slot id to the index in memslots[]. */
short id_to_index[KVM_MEM_SLOTS_NUM];
+ atomic_t lru_slot;
+ int used_slots;
};
struct kvm {
@@ -395,7 +398,6 @@ struct kvm {
* Update side is protected by irq_lock.
*/
struct kvm_irq_routing_table __rcu *irq_routing;
- struct hlist_head mask_notifier_list;
#endif
#ifdef CONFIG_HAVE_KVM_IRQFD
struct hlist_head irq_ack_notifier_list;
@@ -447,6 +449,14 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
int __must_check vcpu_load(struct kvm_vcpu *vcpu);
void vcpu_put(struct kvm_vcpu *vcpu);
+#ifdef __KVM_HAVE_IOAPIC
+void kvm_vcpu_request_scan_ioapic(struct kvm *kvm);
+#else
+static inline void kvm_vcpu_request_scan_ioapic(struct kvm *kvm)
+{
+}
+#endif
+
#ifdef CONFIG_HAVE_KVM_IRQFD
int kvm_irqfd_init(void);
void kvm_irqfd_exit(void);
@@ -711,44 +721,6 @@ struct kvm_irq_ack_notifier {
void (*irq_acked)(struct kvm_irq_ack_notifier *kian);
};
-struct kvm_assigned_dev_kernel {
- struct kvm_irq_ack_notifier ack_notifier;
- struct list_head list;
- int assigned_dev_id;
- int host_segnr;
- int host_busnr;
- int host_devfn;
- unsigned int entries_nr;
- int host_irq;
- bool host_irq_disabled;
- bool pci_2_3;
- struct msix_entry *host_msix_entries;
- int guest_irq;
- struct msix_entry *guest_msix_entries;
- unsigned long irq_requested_type;
- int irq_source_id;
- int flags;
- struct pci_dev *dev;
- struct kvm *kvm;
- spinlock_t intx_lock;
- spinlock_t intx_mask_lock;
- char irq_name[32];
- struct pci_saved_state *pci_saved_state;
-};
-
-struct kvm_irq_mask_notifier {
- void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked);
- int irq;
- struct hlist_node link;
-};
-
-void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
- struct kvm_irq_mask_notifier *kimn);
-void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
- struct kvm_irq_mask_notifier *kimn);
-void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
- bool mask);
-
int kvm_irq_map_gsi(struct kvm *kvm,
struct kvm_kernel_irq_routing_entry *entries, int gsi);
int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin);
@@ -770,12 +742,6 @@ void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
#ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
-int kvm_iommu_map_guest(struct kvm *kvm);
-int kvm_iommu_unmap_guest(struct kvm *kvm);
-int kvm_assign_device(struct kvm *kvm,
- struct kvm_assigned_dev_kernel *assigned_dev);
-int kvm_deassign_device(struct kvm *kvm,
- struct kvm_assigned_dev_kernel *assigned_dev);
#else
static inline int kvm_iommu_map_pages(struct kvm *kvm,
struct kvm_memory_slot *slot)
@@ -787,11 +753,6 @@ static inline void kvm_iommu_unmap_pages(struct kvm *kvm,
struct kvm_memory_slot *slot)
{
}
-
-static inline int kvm_iommu_unmap_guest(struct kvm *kvm)
-{
- return 0;
-}
#endif
static inline void kvm_guest_enter(void)
@@ -832,12 +793,28 @@ static inline void kvm_guest_exit(void)
static inline struct kvm_memory_slot *
search_memslots(struct kvm_memslots *slots, gfn_t gfn)
{
- struct kvm_memory_slot *memslot;
+ int start = 0, end = slots->used_slots;
+ int slot = atomic_read(&slots->lru_slot);
+ struct kvm_memory_slot *memslots = slots->memslots;
+
+ if (gfn >= memslots[slot].base_gfn &&
+ gfn < memslots[slot].base_gfn + memslots[slot].npages)
+ return &memslots[slot];
- kvm_for_each_memslot(memslot, slots)
- if (gfn >= memslot->base_gfn &&
- gfn < memslot->base_gfn + memslot->npages)
- return memslot;
+ while (start < end) {
+ slot = start + (end - start) / 2;
+
+ if (gfn >= memslots[slot].base_gfn)
+ end = slot;
+ else
+ start = slot + 1;
+ }
+
+ if (gfn >= memslots[start].base_gfn &&
+ gfn < memslots[start].base_gfn + memslots[start].npages) {
+ atomic_set(&slots->lru_slot, start);
+ return &memslots[start];
+ }
return NULL;
}
@@ -1011,25 +988,6 @@ static inline bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) { return true; }
#endif
-#ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
-
-long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
- unsigned long arg);
-
-void kvm_free_all_assigned_devices(struct kvm *kvm);
-
-#else
-
-static inline long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
- unsigned long arg)
-{
- return -ENOTTY;
-}
-
-static inline void kvm_free_all_assigned_devices(struct kvm *kvm) {}
-
-#endif
-
static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
{
set_bit(req, &vcpu->requests);
diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
index b606bb689a3..931da7e917c 100644
--- a/include/linux/kvm_types.h
+++ b/include/linux/kvm_types.h
@@ -54,33 +54,6 @@ typedef u64 hfn_t;
typedef hfn_t pfn_t;
-union kvm_ioapic_redirect_entry {
- u64 bits;
- struct {
- u8 vector;
- u8 delivery_mode:3;
- u8 dest_mode:1;
- u8 delivery_status:1;
- u8 polarity:1;
- u8 remote_irr:1;
- u8 trig_mode:1;
- u8 mask:1;
- u8 reserve:7;
- u8 reserved[4];
- u8 dest_id;
- } fields;
-};
-
-struct kvm_lapic_irq {
- u32 vector;
- u32 delivery_mode;
- u32 dest_mode;
- u32 level;
- u32 trig_mode;
- u32 shorthand;
- u32 dest_id;
-};
-
struct gfn_to_hva_cache {
u64 generation;
gpa_t gpa;
diff --git a/include/linux/leds.h b/include/linux/leds.h
index 361101fef27..cfceef32c9b 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -13,6 +13,7 @@
#define __LINUX_LEDS_H_INCLUDED
#include <linux/list.h>
+#include <linux/mutex.h>
#include <linux/rwsem.h>
#include <linux/spinlock.h>
#include <linux/timer.h>
@@ -42,11 +43,20 @@ struct led_classdev {
#define LED_BLINK_ONESHOT (1 << 17)
#define LED_BLINK_ONESHOT_STOP (1 << 18)
#define LED_BLINK_INVERT (1 << 19)
+#define LED_SYSFS_DISABLE (1 << 20)
+#define SET_BRIGHTNESS_ASYNC (1 << 21)
+#define SET_BRIGHTNESS_SYNC (1 << 22)
/* Set LED brightness level */
/* Must not sleep, use a workqueue if needed */
void (*brightness_set)(struct led_classdev *led_cdev,
enum led_brightness brightness);
+ /*
+ * Set LED brightness level immediately - it can block the caller for
+ * the time required for accessing a LED device register.
+ */
+ int (*brightness_set_sync)(struct led_classdev *led_cdev,
+ enum led_brightness brightness);
/* Get LED brightness level */
enum led_brightness (*brightness_get)(struct led_classdev *led_cdev);
@@ -85,6 +95,9 @@ struct led_classdev {
/* true if activated - deactivate routine uses it to do cleanup */
bool activated;
#endif
+
+ /* Ensures consistent access to the LED Flash Class device */
+ struct mutex led_access;
};
extern int led_classdev_register(struct device *parent,
@@ -151,6 +164,33 @@ extern void led_set_brightness(struct led_classdev *led_cdev,
*/
extern int led_update_brightness(struct led_classdev *led_cdev);
+/**
+ * led_sysfs_disable - disable LED sysfs interface
+ * @led_cdev: the LED to set
+ *
+ * Disable the led_cdev's sysfs interface.
+ */
+extern void led_sysfs_disable(struct led_classdev *led_cdev);
+
+/**
+ * led_sysfs_enable - enable LED sysfs interface
+ * @led_cdev: the LED to set
+ *
+ * Enable the led_cdev's sysfs interface.
+ */
+extern void led_sysfs_enable(struct led_classdev *led_cdev);
+
+/**
+ * led_sysfs_is_disabled - check if LED sysfs interface is disabled
+ * @led_cdev: the LED to query
+ *
+ * Returns: true if the led_cdev's sysfs interface is disabled.
+ */
+static inline bool led_sysfs_is_disabled(struct led_classdev *led_cdev)
+{
+ return led_cdev->flags & LED_SYSFS_DISABLE;
+}
+
/*
* LED Triggers
*/
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 01aad3ed89e..fab9b32ace8 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -36,9 +36,6 @@ extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
extern int migrate_prep(void);
extern int migrate_prep_local(void);
-extern int migrate_vmas(struct mm_struct *mm,
- const nodemask_t *from, const nodemask_t *to,
- unsigned long flags);
extern void migrate_page_copy(struct page *newpage, struct page *page);
extern int migrate_huge_page_move_mapping(struct address_space *mapping,
struct page *newpage, struct page *page);
@@ -57,13 +54,6 @@ static inline int migrate_pages(struct list_head *l, new_page_t new,
static inline int migrate_prep(void) { return -ENOSYS; }
static inline int migrate_prep_local(void) { return -ENOSYS; }
-static inline int migrate_vmas(struct mm_struct *mm,
- const nodemask_t *from, const nodemask_t *to,
- unsigned long flags)
-{
- return -ENOSYS;
-}
-
static inline void migrate_page_copy(struct page *newpage,
struct page *page) {}
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index ea4f1c46f76..4e5bd813bb9 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -120,6 +120,15 @@ enum {
};
enum {
+ MLX5_MKEY_INBOX_PG_ACCESS = 1 << 31
+};
+
+enum {
+ MLX5_PFAULT_SUBTYPE_WQE = 0,
+ MLX5_PFAULT_SUBTYPE_RDMA = 1,
+};
+
+enum {
MLX5_PERM_LOCAL_READ = 1 << 2,
MLX5_PERM_LOCAL_WRITE = 1 << 3,
MLX5_PERM_REMOTE_READ = 1 << 4,
@@ -180,6 +189,19 @@ enum {
MLX5_MKEY_MASK_FREE = 1ull << 29,
};
+enum {
+ MLX5_UMR_TRANSLATION_OFFSET_EN = (1 << 4),
+
+ MLX5_UMR_CHECK_NOT_FREE = (1 << 5),
+ MLX5_UMR_CHECK_FREE = (2 << 5),
+
+ MLX5_UMR_INLINE = (1 << 7),
+};
+
+#define MLX5_UMR_MTT_ALIGNMENT 0x40
+#define MLX5_UMR_MTT_MASK (MLX5_UMR_MTT_ALIGNMENT - 1)
+#define MLX5_UMR_MTT_MIN_CHUNK_SIZE MLX5_UMR_MTT_ALIGNMENT
+
enum mlx5_event {
MLX5_EVENT_TYPE_COMP = 0x0,
@@ -206,6 +228,8 @@ enum mlx5_event {
MLX5_EVENT_TYPE_CMD = 0x0a,
MLX5_EVENT_TYPE_PAGE_REQUEST = 0xb,
+
+ MLX5_EVENT_TYPE_PAGE_FAULT = 0xc,
};
enum {
@@ -225,6 +249,7 @@ enum {
MLX5_DEV_CAP_FLAG_APM = 1LL << 17,
MLX5_DEV_CAP_FLAG_ATOMIC = 1LL << 18,
MLX5_DEV_CAP_FLAG_BLOCK_MCAST = 1LL << 23,
+ MLX5_DEV_CAP_FLAG_ON_DMND_PG = 1LL << 24,
MLX5_DEV_CAP_FLAG_CQ_MODER = 1LL << 29,
MLX5_DEV_CAP_FLAG_RESIZE_CQ = 1LL << 30,
MLX5_DEV_CAP_FLAG_DCT = 1LL << 37,
@@ -290,6 +315,8 @@ enum {
enum {
HCA_CAP_OPMOD_GET_MAX = 0,
HCA_CAP_OPMOD_GET_CUR = 1,
+ HCA_CAP_OPMOD_GET_ODP_MAX = 4,
+ HCA_CAP_OPMOD_GET_ODP_CUR = 5
};
struct mlx5_inbox_hdr {
@@ -319,6 +346,23 @@ struct mlx5_cmd_query_adapter_mbox_out {
u8 vsd_psid[16];
};
+enum mlx5_odp_transport_cap_bits {
+ MLX5_ODP_SUPPORT_SEND = 1 << 31,
+ MLX5_ODP_SUPPORT_RECV = 1 << 30,
+ MLX5_ODP_SUPPORT_WRITE = 1 << 29,
+ MLX5_ODP_SUPPORT_READ = 1 << 28,
+};
+
+struct mlx5_odp_caps {
+ char reserved[0x10];
+ struct {
+ __be32 rc_odp_caps;
+ __be32 uc_odp_caps;
+ __be32 ud_odp_caps;
+ } per_transport_caps;
+ char reserved2[0xe4];
+};
+
struct mlx5_cmd_init_hca_mbox_in {
struct mlx5_inbox_hdr hdr;
u8 rsvd0[2];
@@ -439,6 +483,27 @@ struct mlx5_eqe_page_req {
__be32 rsvd1[5];
};
+struct mlx5_eqe_page_fault {
+ __be32 bytes_committed;
+ union {
+ struct {
+ u16 reserved1;
+ __be16 wqe_index;
+ u16 reserved2;
+ __be16 packet_length;
+ u8 reserved3[12];
+ } __packed wqe;
+ struct {
+ __be32 r_key;
+ u16 reserved1;
+ __be16 packet_length;
+ __be32 rdma_op_len;
+ __be64 rdma_va;
+ } __packed rdma;
+ } __packed;
+ __be32 flags_qpn;
+} __packed;
+
union ev_data {
__be32 raw[7];
struct mlx5_eqe_cmd cmd;
@@ -450,6 +515,7 @@ union ev_data {
struct mlx5_eqe_congestion cong;
struct mlx5_eqe_stall_vl stall_vl;
struct mlx5_eqe_page_req req_pages;
+ struct mlx5_eqe_page_fault page_fault;
} __packed;
struct mlx5_eqe {
@@ -776,6 +842,10 @@ struct mlx5_query_eq_mbox_out {
struct mlx5_eq_context ctx;
};
+enum {
+ MLX5_MKEY_STATUS_FREE = 1 << 6,
+};
+
struct mlx5_mkey_seg {
/* This is a two bit field occupying bits 31-30.
* bit 31 is always 0,
@@ -812,7 +882,7 @@ struct mlx5_query_special_ctxs_mbox_out {
struct mlx5_create_mkey_mbox_in {
struct mlx5_inbox_hdr hdr;
__be32 input_mkey_index;
- u8 rsvd0[4];
+ __be32 flags;
struct mlx5_mkey_seg seg;
u8 rsvd1[16];
__be32 xlat_oct_act_size;
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index b1bf41556b3..166d9315fe4 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -113,6 +113,13 @@ enum {
MLX5_REG_HOST_ENDIANNESS = 0x7004,
};
+enum mlx5_page_fault_resume_flags {
+ MLX5_PAGE_FAULT_RESUME_REQUESTOR = 1 << 0,
+ MLX5_PAGE_FAULT_RESUME_WRITE = 1 << 1,
+ MLX5_PAGE_FAULT_RESUME_RDMA = 1 << 2,
+ MLX5_PAGE_FAULT_RESUME_ERROR = 1 << 7,
+};
+
enum dbg_rsc_type {
MLX5_DBG_RSC_QP,
MLX5_DBG_RSC_EQ,
@@ -467,7 +474,7 @@ struct mlx5_priv {
struct workqueue_struct *pg_wq;
struct rb_root page_root;
int fw_pages;
- int reg_pages;
+ atomic_t reg_pages;
struct list_head free_list;
struct mlx5_core_health health;
@@ -703,6 +710,9 @@ void mlx5_eq_cleanup(struct mlx5_core_dev *dev);
void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas);
void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn);
void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type);
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+void mlx5_eq_pagefault(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe);
+#endif
void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type);
struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn);
void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector);
@@ -740,6 +750,8 @@ int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn,
int npsvs, u32 *sig_index);
int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num);
void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common);
+int mlx5_query_odp_caps(struct mlx5_core_dev *dev,
+ struct mlx5_odp_caps *odp_caps);
static inline u32 mlx5_mkey_to_idx(u32 mkey)
{
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
index 3fa075daeb1..61f7a342d1b 100644
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@ -50,6 +50,9 @@
#define MLX5_BSF_APPTAG_ESCAPE 0x1
#define MLX5_BSF_APPREF_ESCAPE 0x2
+#define MLX5_QPN_BITS 24
+#define MLX5_QPN_MASK ((1 << MLX5_QPN_BITS) - 1)
+
enum mlx5_qp_optpar {
MLX5_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0,
MLX5_QP_OPTPAR_RRE = 1 << 1,
@@ -189,6 +192,14 @@ struct mlx5_wqe_ctrl_seg {
__be32 imm;
};
+#define MLX5_WQE_CTRL_DS_MASK 0x3f
+#define MLX5_WQE_CTRL_QPN_MASK 0xffffff00
+#define MLX5_WQE_CTRL_QPN_SHIFT 8
+#define MLX5_WQE_DS_UNITS 16
+#define MLX5_WQE_CTRL_OPCODE_MASK 0xff
+#define MLX5_WQE_CTRL_WQE_INDEX_MASK 0x00ffff00
+#define MLX5_WQE_CTRL_WQE_INDEX_SHIFT 8
+
struct mlx5_wqe_xrc_seg {
__be32 xrc_srqn;
u8 rsvd[12];
@@ -292,6 +303,8 @@ struct mlx5_wqe_signature_seg {
u8 rsvd1[11];
};
+#define MLX5_WQE_INLINE_SEG_BYTE_COUNT_MASK 0x3ff
+
struct mlx5_wqe_inline_seg {
__be32 byte_count;
};
@@ -360,9 +373,46 @@ struct mlx5_stride_block_ctrl_seg {
__be16 num_entries;
};
+enum mlx5_pagefault_flags {
+ MLX5_PFAULT_REQUESTOR = 1 << 0,
+ MLX5_PFAULT_WRITE = 1 << 1,
+ MLX5_PFAULT_RDMA = 1 << 2,
+};
+
+/* Contains the details of a pagefault. */
+struct mlx5_pagefault {
+ u32 bytes_committed;
+ u8 event_subtype;
+ enum mlx5_pagefault_flags flags;
+ union {
+ /* Initiator or send message responder pagefault details. */
+ struct {
+ /* Received packet size, only valid for responders. */
+ u32 packet_size;
+ /*
+ * WQE index. Refers to either the send queue or
+ * receive queue, according to event_subtype.
+ */
+ u16 wqe_index;
+ } wqe;
+ /* RDMA responder pagefault details */
+ struct {
+ u32 r_key;
+ /*
+ * Received packet size, minimal size page fault
+ * resolution required for forward progress.
+ */
+ u32 packet_size;
+ u32 rdma_op_len;
+ u64 rdma_va;
+ } rdma;
+ };
+};
+
struct mlx5_core_qp {
struct mlx5_core_rsc_common common; /* must be first */
void (*event) (struct mlx5_core_qp *, int);
+ void (*pfault_handler)(struct mlx5_core_qp *, struct mlx5_pagefault *);
int qpn;
struct mlx5_rsc_debug *dbg;
int pid;
@@ -530,6 +580,17 @@ static inline struct mlx5_core_mr *__mlx5_mr_lookup(struct mlx5_core_dev *dev, u
return radix_tree_lookup(&dev->priv.mr_table.tree, key);
}
+struct mlx5_page_fault_resume_mbox_in {
+ struct mlx5_inbox_hdr hdr;
+ __be32 flags_qpn;
+ u8 reserved[4];
+};
+
+struct mlx5_page_fault_resume_mbox_out {
+ struct mlx5_outbox_hdr hdr;
+ u8 rsvd[8];
+};
+
int mlx5_core_create_qp(struct mlx5_core_dev *dev,
struct mlx5_core_qp *qp,
struct mlx5_create_qp_mbox_in *in,
@@ -549,6 +610,10 @@ void mlx5_init_qp_table(struct mlx5_core_dev *dev);
void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev);
int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp);
void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp);
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 qpn,
+ u8 context, int error);
+#endif
static inline const char *mlx5_qp_type_str(int type)
{
diff --git a/include/linux/mm.h b/include/linux/mm.h
index c0a67b894c4..f80d0194c9b 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -286,8 +286,6 @@ struct vm_operations_struct {
*/
struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
unsigned long addr);
- int (*migrate)(struct vm_area_struct *vma, const nodemask_t *from,
- const nodemask_t *to, unsigned long flags);
#endif
/* called by sys_remap_file_pages() to populate non-linear mapping */
int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr,
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index ab8564b0346..95243d28a0e 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -98,11 +98,11 @@ struct mmu_notifier_ops {
/*
* invalidate_range_start() and invalidate_range_end() must be
* paired and are called only when the mmap_sem and/or the
- * locks protecting the reverse maps are held. The subsystem
- * must guarantee that no additional references are taken to
- * the pages in the range established between the call to
- * invalidate_range_start() and the matching call to
- * invalidate_range_end().
+ * locks protecting the reverse maps are held. If the subsystem
+ * can't guarantee that no additional references are taken to
+ * the pages in the range, it has to implement the
+ * invalidate_range() notifier to remove any references taken
+ * after invalidate_range_start().
*
* Invalidation of multiple concurrent ranges may be
* optionally permitted by the driver. Either way the
@@ -144,6 +144,29 @@ struct mmu_notifier_ops {
void (*invalidate_range_end)(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long start, unsigned long end);
+
+ /*
+ * invalidate_range() is either called between
+ * invalidate_range_start() and invalidate_range_end() when the
+ * VM has to free pages that where unmapped, but before the
+ * pages are actually freed, or outside of _start()/_end() when
+ * a (remote) TLB is necessary.
+ *
+ * If invalidate_range() is used to manage a non-CPU TLB with
+ * shared page-tables, it not necessary to implement the
+ * invalidate_range_start()/end() notifiers, as
+ * invalidate_range() alread catches the points in time when an
+ * external TLB range needs to be flushed.
+ *
+ * The invalidate_range() function is called under the ptl
+ * spin-lock and not allowed to sleep.
+ *
+ * Note that this function might be called with just a sub-range
+ * of what was passed to invalidate_range_start()/end(), if
+ * called between those functions.
+ */
+ void (*invalidate_range)(struct mmu_notifier *mn, struct mm_struct *mm,
+ unsigned long start, unsigned long end);
};
/*
@@ -190,6 +213,8 @@ extern void __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
unsigned long start, unsigned long end);
extern void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
unsigned long start, unsigned long end);
+extern void __mmu_notifier_invalidate_range(struct mm_struct *mm,
+ unsigned long start, unsigned long end);
static inline void mmu_notifier_release(struct mm_struct *mm)
{
@@ -242,6 +267,13 @@ static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm,
__mmu_notifier_invalidate_range_end(mm, start, end);
}
+static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+ if (mm_has_notifiers(mm))
+ __mmu_notifier_invalidate_range(mm, start, end);
+}
+
static inline void mmu_notifier_mm_init(struct mm_struct *mm)
{
mm->mmu_notifier_mm = NULL;
@@ -279,6 +311,44 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
__young; \
})
+#define ptep_clear_flush_notify(__vma, __address, __ptep) \
+({ \
+ unsigned long ___addr = __address & PAGE_MASK; \
+ struct mm_struct *___mm = (__vma)->vm_mm; \
+ pte_t ___pte; \
+ \
+ ___pte = ptep_clear_flush(__vma, __address, __ptep); \
+ mmu_notifier_invalidate_range(___mm, ___addr, \
+ ___addr + PAGE_SIZE); \
+ \
+ ___pte; \
+})
+
+#define pmdp_clear_flush_notify(__vma, __haddr, __pmd) \
+({ \
+ unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \
+ struct mm_struct *___mm = (__vma)->vm_mm; \
+ pmd_t ___pmd; \
+ \
+ ___pmd = pmdp_clear_flush(__vma, __haddr, __pmd); \
+ mmu_notifier_invalidate_range(___mm, ___haddr, \
+ ___haddr + HPAGE_PMD_SIZE); \
+ \
+ ___pmd; \
+})
+
+#define pmdp_get_and_clear_notify(__mm, __haddr, __pmd) \
+({ \
+ unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \
+ pmd_t ___pmd; \
+ \
+ ___pmd = pmdp_get_and_clear(__mm, __haddr, __pmd); \
+ mmu_notifier_invalidate_range(__mm, ___haddr, \
+ ___haddr + HPAGE_PMD_SIZE); \
+ \
+ ___pmd; \
+})
+
/*
* set_pte_at_notify() sets the pte _after_ running the notifier.
* This is safe to start by updating the secondary MMUs, because the primary MMU
@@ -342,6 +412,11 @@ static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm,
{
}
+static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+}
+
static inline void mmu_notifier_mm_init(struct mm_struct *mm)
{
}
@@ -352,6 +427,9 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
#define ptep_clear_flush_young_notify ptep_clear_flush_young
#define pmdp_clear_flush_young_notify pmdp_clear_flush_young
+#define ptep_clear_flush_notify ptep_clear_flush
+#define pmdp_clear_flush_notify pmdp_clear_flush
+#define pmdp_get_and_clear_notify pmdp_get_and_clear
#define set_pte_at_notify set_pte_at
#endif /* CONFIG_MMU_NOTIFIER */
diff --git a/include/linux/module.h b/include/linux/module.h
index 71f282a4e30..ebfb0e153c6 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -210,20 +210,6 @@ enum module_state {
MODULE_STATE_UNFORMED, /* Still setting it up. */
};
-/**
- * struct module_ref - per cpu module reference counts
- * @incs: number of module get on this cpu
- * @decs: number of module put on this cpu
- *
- * We force an alignment on 8 or 16 bytes, so that alloc_percpu()
- * put @incs/@decs in same cache line, with no extra memory cost,
- * since alloc_percpu() is fine grained.
- */
-struct module_ref {
- unsigned long incs;
- unsigned long decs;
-} __attribute((aligned(2 * sizeof(unsigned long))));
-
struct module {
enum module_state state;
@@ -367,7 +353,7 @@ struct module {
/* Destruction function. */
void (*exit)(void);
- struct module_ref __percpu *refptr;
+ atomic_t refcnt;
#endif
#ifdef CONFIG_CONSTRUCTORS
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index e4d451e4600..3d4ea7eb2b6 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -455,8 +455,21 @@ struct nand_hw_control {
* be provided if an hardware ECC is available
* @calculate: function for ECC calculation or readback from ECC hardware
* @correct: function for ECC correction, matching to ECC generator (sw/hw)
- * @read_page_raw: function to read a raw page without ECC
- * @write_page_raw: function to write a raw page without ECC
+ * @read_page_raw: function to read a raw page without ECC. This function
+ * should hide the specific layout used by the ECC
+ * controller and always return contiguous in-band and
+ * out-of-band data even if they're not stored
+ * contiguously on the NAND chip (e.g.
+ * NAND_ECC_HW_SYNDROME interleaves in-band and
+ * out-of-band data).
+ * @write_page_raw: function to write a raw page without ECC. This function
+ * should hide the specific layout used by the ECC
+ * controller and consider the passed data as contiguous
+ * in-band and out-of-band data. ECC controller is
+ * responsible for doing the appropriate transformations
+ * to adapt to its specific layout (e.g.
+ * NAND_ECC_HW_SYNDROME interleaves in-band and
+ * out-of-band data).
* @read_page: function to read a page according to the ECC generator
* requirements; returns maximum number of bitflips corrected in
* any single ECC step, 0 if bitflips uncorrectable, -EIO hw error
@@ -723,6 +736,7 @@ struct nand_chip {
#define NAND_MFR_EON 0x92
#define NAND_MFR_SANDISK 0x45
#define NAND_MFR_INTEL 0x89
+#define NAND_MFR_ATO 0x9b
/* The maximum expected count of bytes in the NAND ID sequence */
#define NAND_MAX_ID_LEN 8
diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h
index 046a0a2e4c4..63aeccf9ddc 100644
--- a/include/linux/mtd/spi-nor.h
+++ b/include/linux/mtd/spi-nor.h
@@ -116,6 +116,10 @@ enum spi_nor_ops {
SPI_NOR_OPS_UNLOCK,
};
+enum spi_nor_option_flags {
+ SNOR_F_USE_FSR = BIT(0),
+};
+
/**
* struct spi_nor - Structure for defining a the SPI NOR layer
* @mtd: point to a mtd_info structure
@@ -129,6 +133,7 @@ enum spi_nor_ops {
* @program_opcode: the program opcode
* @flash_read: the mode of the read
* @sst_write_second: used by the SST write operation
+ * @flags: flag options for the current SPI-NOR (SNOR_F_*)
* @cfg: used by the read_xfer/write_xfer
* @cmd_buf: used by the write_reg
* @prepare: [OPTIONAL] do some preparations for the
@@ -139,9 +144,6 @@ enum spi_nor_ops {
* @write_xfer: [OPTIONAL] the writefundamental primitive
* @read_reg: [DRIVER-SPECIFIC] read out the register
* @write_reg: [DRIVER-SPECIFIC] write data to the register
- * @read_id: [REPLACEABLE] read out the ID data, and find
- * the proper spi_device_id
- * @wait_till_ready: [REPLACEABLE] wait till the NOR becomes ready
* @read: [DRIVER-SPECIFIC] read data from the SPI NOR
* @write: [DRIVER-SPECIFIC] write data to the SPI NOR
* @erase: [DRIVER-SPECIFIC] erase a sector of the SPI NOR
@@ -160,6 +162,7 @@ struct spi_nor {
u8 program_opcode;
enum read_mode flash_read;
bool sst_write_second;
+ u32 flags;
struct spi_nor_xfer_cfg cfg;
u8 cmd_buf[SPI_NOR_MAX_CMD_SIZE];
@@ -172,8 +175,6 @@ struct spi_nor {
int (*read_reg)(struct spi_nor *nor, u8 opcode, u8 *buf, int len);
int (*write_reg)(struct spi_nor *nor, u8 opcode, u8 *buf, int len,
int write_enable);
- const struct spi_device_id *(*read_id)(struct spi_nor *nor);
- int (*wait_till_ready)(struct spi_nor *nor);
int (*read)(struct spi_nor *nor, loff_t from,
size_t len, size_t *retlen, u_char *read_buf);
diff --git a/include/linux/namei.h b/include/linux/namei.h
index 492de72560f..c8990779f0c 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -7,21 +7,10 @@
#include <linux/path.h>
struct vfsmount;
+struct nameidata;
enum { MAX_NESTED_LINKS = 8 };
-struct nameidata {
- struct path path;
- struct qstr last;
- struct path root;
- struct inode *inode; /* path.dentry.d_inode */
- unsigned int flags;
- unsigned seq, m_seq;
- int last_type;
- unsigned depth;
- char *saved_names[MAX_NESTED_LINKS + 1];
-};
-
/*
* Type of the last component on LOOKUP_PARENT
*/
@@ -82,16 +71,8 @@ extern struct dentry *lock_rename(struct dentry *, struct dentry *);
extern void unlock_rename(struct dentry *, struct dentry *);
extern void nd_jump_link(struct nameidata *nd, struct path *path);
-
-static inline void nd_set_link(struct nameidata *nd, char *path)
-{
- nd->saved_names[nd->depth] = path;
-}
-
-static inline char *nd_get_link(struct nameidata *nd)
-{
- return nd->saved_names[nd->depth];
-}
+extern void nd_set_link(struct nameidata *nd, char *path);
+extern char *nd_get_link(struct nameidata *nd);
static inline void nd_terminate_link(void *name, size_t len, size_t maxlen)
{
diff --git a/include/linux/ns_common.h b/include/linux/ns_common.h
new file mode 100644
index 00000000000..85a5c8c16be
--- /dev/null
+++ b/include/linux/ns_common.h
@@ -0,0 +1,12 @@
+#ifndef _LINUX_NS_COMMON_H
+#define _LINUX_NS_COMMON_H
+
+struct proc_ns_operations;
+
+struct ns_common {
+ atomic_long_t stashed;
+ const struct proc_ns_operations *ops;
+ unsigned int inum;
+};
+
+#endif
diff --git a/include/linux/of_iommu.h b/include/linux/of_iommu.h
index 51a560f34bc..16c75547d72 100644
--- a/include/linux/of_iommu.h
+++ b/include/linux/of_iommu.h
@@ -1,12 +1,19 @@
#ifndef __OF_IOMMU_H
#define __OF_IOMMU_H
+#include <linux/device.h>
+#include <linux/iommu.h>
+#include <linux/of.h>
+
#ifdef CONFIG_OF_IOMMU
extern int of_get_dma_window(struct device_node *dn, const char *prefix,
int index, unsigned long *busno, dma_addr_t *addr,
size_t *size);
+extern void of_iommu_init(void);
+extern struct iommu_ops *of_iommu_configure(struct device *dev);
+
#else
static inline int of_get_dma_window(struct device_node *dn, const char *prefix,
@@ -16,6 +23,22 @@ static inline int of_get_dma_window(struct device_node *dn, const char *prefix,
return -EINVAL;
}
+static inline void of_iommu_init(void) { }
+static inline struct iommu_ops *of_iommu_configure(struct device *dev)
+{
+ return NULL;
+}
+
#endif /* CONFIG_OF_IOMMU */
+void of_iommu_set_ops(struct device_node *np, struct iommu_ops *ops);
+struct iommu_ops *of_iommu_get_ops(struct device_node *np);
+
+extern struct of_device_id __iommu_of_table;
+
+typedef int (*of_iommu_init_fn)(struct device_node *);
+
+#define IOMMU_OF_DECLARE(name, compat, fn) \
+ _OF_DECLARE(iommu, name, compat, fn, of_iommu_init_fn)
+
#endif /* __OF_IOMMU_H */
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 44a27696ab6..360a966a97a 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -349,6 +349,7 @@ struct pci_dev {
unsigned int __aer_firmware_first:1;
unsigned int broken_intx_masking:1;
unsigned int io_window_1k:1; /* Intel P2P bridge 1K I/O windows */
+ unsigned int irq_managed:1;
pci_dev_flags_t dev_flags;
atomic_t enable_cnt; /* pci_enable_device has been called */
diff --git a/include/linux/phy_fixed.h b/include/linux/phy_fixed.h
index f2ca1b45937..7e75bfe37cc 100644
--- a/include/linux/phy_fixed.h
+++ b/include/linux/phy_fixed.h
@@ -11,7 +11,7 @@ struct fixed_phy_status {
struct device_node;
-#ifdef CONFIG_FIXED_PHY
+#if IS_ENABLED(CONFIG_FIXED_PHY)
extern int fixed_phy_add(unsigned int irq, int phy_id,
struct fixed_phy_status *status);
extern struct phy_device *fixed_phy_register(unsigned int irq,
diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h
index 1997ffc295a..b9cf6c51b18 100644
--- a/include/linux/pid_namespace.h
+++ b/include/linux/pid_namespace.h
@@ -8,6 +8,7 @@
#include <linux/threads.h>
#include <linux/nsproxy.h>
#include <linux/kref.h>
+#include <linux/ns_common.h>
struct pidmap {
atomic_t nr_free;
@@ -43,7 +44,7 @@ struct pid_namespace {
kgid_t pid_gid;
int hide_pid;
int reboot; /* group exit code if this pidns was rebooted */
- unsigned int proc_inum;
+ struct ns_common ns;
};
extern struct pid_namespace init_pid_ns;
diff --git a/include/linux/platform_data/rcar-du.h b/include/linux/platform_data/rcar-du.h
deleted file mode 100644
index a5f045e1d8f..00000000000
--- a/include/linux/platform_data/rcar-du.h
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * rcar_du.h -- R-Car Display Unit DRM driver
- *
- * Copyright (C) 2013 Renesas Corporation
- *
- * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef __RCAR_DU_H__
-#define __RCAR_DU_H__
-
-#include <video/videomode.h>
-
-enum rcar_du_output {
- RCAR_DU_OUTPUT_DPAD0,
- RCAR_DU_OUTPUT_DPAD1,
- RCAR_DU_OUTPUT_LVDS0,
- RCAR_DU_OUTPUT_LVDS1,
- RCAR_DU_OUTPUT_TCON,
- RCAR_DU_OUTPUT_MAX,
-};
-
-enum rcar_du_encoder_type {
- RCAR_DU_ENCODER_UNUSED = 0,
- RCAR_DU_ENCODER_NONE,
- RCAR_DU_ENCODER_VGA,
- RCAR_DU_ENCODER_LVDS,
-};
-
-struct rcar_du_panel_data {
- unsigned int width_mm; /* Panel width in mm */
- unsigned int height_mm; /* Panel height in mm */
- struct videomode mode;
-};
-
-struct rcar_du_connector_lvds_data {
- struct rcar_du_panel_data panel;
-};
-
-struct rcar_du_connector_vga_data {
- /* TODO: Add DDC information for EDID retrieval */
-};
-
-/*
- * struct rcar_du_encoder_data - Encoder platform data
- * @type: the encoder type (RCAR_DU_ENCODER_*)
- * @output: the DU output the connector is connected to (RCAR_DU_OUTPUT_*)
- * @connector.lvds: platform data for LVDS connectors
- * @connector.vga: platform data for VGA connectors
- *
- * Encoder platform data describes an on-board encoder, its associated DU SoC
- * output, and the connector.
- */
-struct rcar_du_encoder_data {
- enum rcar_du_encoder_type type;
- enum rcar_du_output output;
-
- union {
- struct rcar_du_connector_lvds_data lvds;
- struct rcar_du_connector_vga_data vga;
- } connector;
-};
-
-struct rcar_du_platform_data {
- struct rcar_du_encoder_data *encoders;
- unsigned int num_encoders;
-};
-
-#endif /* __RCAR_DU_H__ */
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 66a656eb335..8b597636461 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -351,8 +351,6 @@ struct dev_pm_ops {
#define SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn)
#endif
-#define SET_PM_RUNTIME_PM_OPS SET_RUNTIME_PM_OPS
-
/*
* Use this if you want to use the same suspend and resume callbacks for suspend
* to RAM and hibernation.
diff --git a/include/linux/proc_ns.h b/include/linux/proc_ns.h
index 34a1e105bef..42dfc615dbf 100644
--- a/include/linux/proc_ns.h
+++ b/include/linux/proc_ns.h
@@ -4,21 +4,18 @@
#ifndef _LINUX_PROC_NS_H
#define _LINUX_PROC_NS_H
+#include <linux/ns_common.h>
+
struct pid_namespace;
struct nsproxy;
+struct path;
struct proc_ns_operations {
const char *name;
int type;
- void *(*get)(struct task_struct *task);
- void (*put)(void *ns);
- int (*install)(struct nsproxy *nsproxy, void *ns);
- unsigned int (*inum)(void *ns);
-};
-
-struct proc_ns {
- void *ns;
- const struct proc_ns_operations *ns_ops;
+ struct ns_common *(*get)(struct task_struct *task);
+ void (*put)(struct ns_common *ns);
+ int (*install)(struct nsproxy *nsproxy, struct ns_common *ns);
};
extern const struct proc_ns_operations netns_operations;
@@ -43,32 +40,38 @@ enum {
extern int pid_ns_prepare_proc(struct pid_namespace *ns);
extern void pid_ns_release_proc(struct pid_namespace *ns);
-extern struct file *proc_ns_fget(int fd);
-extern struct proc_ns *get_proc_ns(struct inode *);
extern int proc_alloc_inum(unsigned int *pino);
extern void proc_free_inum(unsigned int inum);
-extern bool proc_ns_inode(struct inode *inode);
#else /* CONFIG_PROC_FS */
static inline int pid_ns_prepare_proc(struct pid_namespace *ns) { return 0; }
static inline void pid_ns_release_proc(struct pid_namespace *ns) {}
-static inline struct file *proc_ns_fget(int fd)
-{
- return ERR_PTR(-EINVAL);
-}
-
-static inline struct proc_ns *get_proc_ns(struct inode *inode) { return NULL; }
-
static inline int proc_alloc_inum(unsigned int *inum)
{
*inum = 1;
return 0;
}
static inline void proc_free_inum(unsigned int inum) {}
-static inline bool proc_ns_inode(struct inode *inode) { return false; }
#endif /* CONFIG_PROC_FS */
+static inline int ns_alloc_inum(struct ns_common *ns)
+{
+ atomic_long_set(&ns->stashed, 0);
+ return proc_alloc_inum(&ns->inum);
+}
+
+#define ns_free_inum(ns) proc_free_inum((ns)->inum)
+
+extern struct file *proc_ns_fget(int fd);
+#define get_proc_ns(inode) ((struct ns_common *)(inode)->i_private)
+extern void *ns_get_path(struct path *path, struct task_struct *task,
+ const struct proc_ns_operations *ns_ops);
+
+extern int ns_get_name(char *buf, size_t size, struct task_struct *task,
+ const struct proc_ns_operations *ns_ops);
+extern void nsfs_init(void);
+
#endif /* _LINUX_PROC_NS_H */
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index 21678464883..6f22cfeef5e 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -26,10 +26,10 @@ typedef int (*svc_thread_fn)(void *);
/* statistics for svc_pool structures */
struct svc_pool_stats {
- unsigned long packets;
+ atomic_long_t packets;
unsigned long sockets_queued;
- unsigned long threads_woken;
- unsigned long threads_timedout;
+ atomic_long_t threads_woken;
+ atomic_long_t threads_timedout;
};
/*
@@ -45,12 +45,13 @@ struct svc_pool_stats {
struct svc_pool {
unsigned int sp_id; /* pool id; also node id on NUMA */
spinlock_t sp_lock; /* protects all fields */
- struct list_head sp_threads; /* idle server threads */
struct list_head sp_sockets; /* pending sockets */
unsigned int sp_nrthreads; /* # of threads in pool */
struct list_head sp_all_threads; /* all server threads */
struct svc_pool_stats sp_stats; /* statistics on pool operation */
- int sp_task_pending;/* has pending task */
+#define SP_TASK_PENDING (0) /* still work to do even if no
+ * xprt is queued. */
+ unsigned long sp_flags;
} ____cacheline_aligned_in_smp;
/*
@@ -219,8 +220,8 @@ static inline void svc_putu32(struct kvec *iov, __be32 val)
* processed.
*/
struct svc_rqst {
- struct list_head rq_list; /* idle list */
struct list_head rq_all; /* all threads list */
+ struct rcu_head rq_rcu_head; /* for RCU deferred kfree */
struct svc_xprt * rq_xprt; /* transport ptr */
struct sockaddr_storage rq_addr; /* peer address */
@@ -236,7 +237,6 @@ struct svc_rqst {
struct svc_cred rq_cred; /* auth info */
void * rq_xprt_ctxt; /* transport specific context ptr */
struct svc_deferred_req*rq_deferred; /* deferred request we are replaying */
- bool rq_usedeferral; /* use deferral */
size_t rq_xprt_hlen; /* xprt header len */
struct xdr_buf rq_arg;
@@ -253,9 +253,17 @@ struct svc_rqst {
u32 rq_vers; /* program version */
u32 rq_proc; /* procedure number */
u32 rq_prot; /* IP protocol */
- unsigned short
- rq_secure : 1; /* secure port */
- unsigned short rq_local : 1; /* local request */
+ int rq_cachetype; /* catering to nfsd */
+#define RQ_SECURE (0) /* secure port */
+#define RQ_LOCAL (1) /* local request */
+#define RQ_USEDEFERRAL (2) /* use deferral */
+#define RQ_DROPME (3) /* drop current reply */
+#define RQ_SPLICE_OK (4) /* turned off in gss privacy
+ * to prevent encrypting page
+ * cache pages */
+#define RQ_VICTIM (5) /* about to be shut down */
+#define RQ_BUSY (6) /* request is busy */
+ unsigned long rq_flags; /* flags field */
void * rq_argp; /* decoded arguments */
void * rq_resp; /* xdr'd results */
@@ -271,16 +279,12 @@ struct svc_rqst {
struct cache_req rq_chandle; /* handle passed to caches for
* request delaying
*/
- bool rq_dropme;
/* Catering to nfsd */
struct auth_domain * rq_client; /* RPC peer info */
struct auth_domain * rq_gssclient; /* "gss/"-style peer info */
- int rq_cachetype;
struct svc_cacherep * rq_cacherep; /* cache info */
- bool rq_splice_ok; /* turned off in gss privacy
- * to prevent encrypting page
- * cache pages */
struct task_struct *rq_task; /* service thread */
+ spinlock_t rq_lock; /* per-request lock */
};
#define SVC_NET(svc_rqst) (svc_rqst->rq_xprt->xpt_net)
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
index ce6e4182a5b..79f6f8f3dc0 100644
--- a/include/linux/sunrpc/svc_xprt.h
+++ b/include/linux/sunrpc/svc_xprt.h
@@ -63,10 +63,9 @@ struct svc_xprt {
#define XPT_CHNGBUF 7 /* need to change snd/rcv buf sizes */
#define XPT_DEFERRED 8 /* deferred request pending */
#define XPT_OLD 9 /* used for xprt aging mark+sweep */
-#define XPT_DETACHED 10 /* detached from tempsocks list */
-#define XPT_LISTENER 11 /* listening endpoint */
-#define XPT_CACHE_AUTH 12 /* cache auth info */
-#define XPT_LOCAL 13 /* connection from loopback interface */
+#define XPT_LISTENER 10 /* listening endpoint */
+#define XPT_CACHE_AUTH 11 /* cache auth info */
+#define XPT_LOCAL 12 /* connection from loopback interface */
struct svc_serv *xpt_server; /* service for transport */
atomic_t xpt_reserved; /* space on outq that is rsvd */
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index ef90838b36a..c611a02fbc5 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -29,10 +29,10 @@
#include <linux/idr.h>
#include <linux/device.h>
#include <linux/workqueue.h>
+#include <uapi/linux/thermal.h>
#define THERMAL_TRIPS_NONE -1
#define THERMAL_MAX_TRIPS 12
-#define THERMAL_NAME_LENGTH 20
/* invalid cooling state */
#define THERMAL_CSTATE_INVALID -1UL
@@ -49,11 +49,6 @@
#define MILLICELSIUS_TO_DECI_KELVIN_WITH_OFFSET(t, off) (((t) / 100) + (off))
#define MILLICELSIUS_TO_DECI_KELVIN(t) MILLICELSIUS_TO_DECI_KELVIN_WITH_OFFSET(t, 2732)
-/* Adding event notification support elements */
-#define THERMAL_GENL_FAMILY_NAME "thermal_event"
-#define THERMAL_GENL_VERSION 0x01
-#define THERMAL_GENL_MCAST_GROUP_NAME "thermal_mc_grp"
-
/* Default Thermal Governor */
#if defined(CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE)
#define DEFAULT_THERMAL_GOVERNOR "step_wise"
@@ -86,30 +81,6 @@ enum thermal_trend {
THERMAL_TREND_DROP_FULL, /* apply lowest cooling action */
};
-/* Events supported by Thermal Netlink */
-enum events {
- THERMAL_AUX0,
- THERMAL_AUX1,
- THERMAL_CRITICAL,
- THERMAL_DEV_FAULT,
-};
-
-/* attributes of thermal_genl_family */
-enum {
- THERMAL_GENL_ATTR_UNSPEC,
- THERMAL_GENL_ATTR_EVENT,
- __THERMAL_GENL_ATTR_MAX,
-};
-#define THERMAL_GENL_ATTR_MAX (__THERMAL_GENL_ATTR_MAX - 1)
-
-/* commands supported by the thermal_genl_family */
-enum {
- THERMAL_GENL_CMD_UNSPEC,
- THERMAL_GENL_CMD_EVENT,
- __THERMAL_GENL_CMD_MAX,
-};
-#define THERMAL_GENL_CMD_MAX (__THERMAL_GENL_CMD_MAX - 1)
-
struct thermal_zone_device_ops {
int (*bind) (struct thermal_zone_device *,
struct thermal_cooling_device *);
@@ -289,19 +260,49 @@ struct thermal_genl_event {
enum events event;
};
+/**
+ * struct thermal_zone_of_device_ops - scallbacks for handling DT based zones
+ *
+ * Mandatory:
+ * @get_temp: a pointer to a function that reads the sensor temperature.
+ *
+ * Optional:
+ * @get_trend: a pointer to a function that reads the sensor temperature trend.
+ * @set_emul_temp: a pointer to a function that sets sensor emulated
+ * temperature.
+ */
+struct thermal_zone_of_device_ops {
+ int (*get_temp)(void *, long *);
+ int (*get_trend)(void *, long *);
+ int (*set_emul_temp)(void *, unsigned long);
+};
+
+/**
+ * struct thermal_trip - representation of a point in temperature domain
+ * @np: pointer to struct device_node that this trip point was created from
+ * @temperature: temperature value in miliCelsius
+ * @hysteresis: relative hysteresis in miliCelsius
+ * @type: trip point type
+ */
+
+struct thermal_trip {
+ struct device_node *np;
+ unsigned long int temperature;
+ unsigned long int hysteresis;
+ enum thermal_trip_type type;
+};
+
/* Function declarations */
#ifdef CONFIG_THERMAL_OF
struct thermal_zone_device *
-thermal_zone_of_sensor_register(struct device *dev, int id,
- void *data, int (*get_temp)(void *, long *),
- int (*get_trend)(void *, long *));
+thermal_zone_of_sensor_register(struct device *dev, int id, void *data,
+ const struct thermal_zone_of_device_ops *ops);
void thermal_zone_of_sensor_unregister(struct device *dev,
struct thermal_zone_device *tz);
#else
static inline struct thermal_zone_device *
-thermal_zone_of_sensor_register(struct device *dev, int id,
- void *data, int (*get_temp)(void *, long *),
- int (*get_trend)(void *, long *))
+thermal_zone_of_sensor_register(struct device *dev, int id, void *data,
+ const struct thermal_zone_of_device_ops *ops)
{
return NULL;
}
diff --git a/include/linux/uio.h b/include/linux/uio.h
index a41e252396c..1c5e453f7ea 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -101,6 +101,11 @@ static inline size_t iov_iter_count(struct iov_iter *i)
return i->count;
}
+static inline bool iter_is_iovec(struct iov_iter *i)
+{
+ return !(i->type & (ITER_BVEC | ITER_KVEC));
+}
+
/*
* Cap the iov_iter by given limit; note that the second argument is
* *not* the new size - it's upper limit for such. Passing it a value
diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
index e95372654f0..8297e5b341d 100644
--- a/include/linux/user_namespace.h
+++ b/include/linux/user_namespace.h
@@ -3,6 +3,7 @@
#include <linux/kref.h>
#include <linux/nsproxy.h>
+#include <linux/ns_common.h>
#include <linux/sched.h>
#include <linux/err.h>
@@ -17,6 +18,10 @@ struct uid_gid_map { /* 64 bytes -- 1 cache line */
} extent[UID_GID_MAP_MAX_EXTENTS];
};
+#define USERNS_SETGROUPS_ALLOWED 1UL
+
+#define USERNS_INIT_FLAGS USERNS_SETGROUPS_ALLOWED
+
struct user_namespace {
struct uid_gid_map uid_map;
struct uid_gid_map gid_map;
@@ -26,7 +31,8 @@ struct user_namespace {
int level;
kuid_t owner;
kgid_t group;
- unsigned int proc_inum;
+ struct ns_common ns;
+ unsigned long flags;
/* Register of per-UID persistent keyrings for this namespace */
#ifdef CONFIG_PERSISTENT_KEYRINGS
@@ -63,6 +69,9 @@ extern const struct seq_operations proc_projid_seq_operations;
extern ssize_t proc_uid_map_write(struct file *, const char __user *, size_t, loff_t *);
extern ssize_t proc_gid_map_write(struct file *, const char __user *, size_t, loff_t *);
extern ssize_t proc_projid_map_write(struct file *, const char __user *, size_t, loff_t *);
+extern ssize_t proc_setgroups_write(struct file *, const char __user *, size_t, loff_t *);
+extern int proc_setgroups_show(struct seq_file *m, void *v);
+extern bool userns_may_setgroups(const struct user_namespace *ns);
#else
static inline struct user_namespace *get_user_ns(struct user_namespace *ns)
@@ -87,6 +96,10 @@ static inline void put_user_ns(struct user_namespace *ns)
{
}
+static inline bool userns_may_setgroups(const struct user_namespace *ns)
+{
+ return true;
+}
#endif
#endif /* _LINUX_USER_H */
diff --git a/include/linux/utsname.h b/include/linux/utsname.h
index 239e27733d6..5093f58ae19 100644
--- a/include/linux/utsname.h
+++ b/include/linux/utsname.h
@@ -5,6 +5,7 @@
#include <linux/sched.h>
#include <linux/kref.h>
#include <linux/nsproxy.h>
+#include <linux/ns_common.h>
#include <linux/err.h>
#include <uapi/linux/utsname.h>
@@ -23,7 +24,7 @@ struct uts_namespace {
struct kref kref;
struct new_utsname name;
struct user_namespace *user_ns;
- unsigned int proc_inum;
+ struct ns_common ns;
};
extern struct uts_namespace init_uts_ns;
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index d09e0938fd6..28f0e65b9a1 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -81,7 +81,7 @@ void *virtqueue_get_used(struct virtqueue *vq);
/**
* virtio_device - representation of a device using virtio
* @index: unique position on the virtio bus
- * @failed: saved value for CONFIG_S_FAILED bit (for restore)
+ * @failed: saved value for VIRTIO_CONFIG_S_FAILED bit (for restore)
* @config_enabled: configuration change reporting enabled
* @config_change_pending: configuration change reported while disabled
* @config_lock: protects configuration change reporting
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h
index 7979f850e7a..ca3ed78e5ec 100644
--- a/include/linux/virtio_config.h
+++ b/include/linux/virtio_config.h
@@ -19,6 +19,9 @@
* offset: the offset of the configuration field
* buf: the buffer to read the field value from.
* len: the length of the buffer
+ * @generation: config generation counter
+ * vdev: the virtio_device
+ * Returns the config generation counter
* @get_status: read the status byte
* vdev: the virtio_device
* Returns the status byte
@@ -60,6 +63,7 @@ struct virtio_config_ops {
void *buf, unsigned len);
void (*set)(struct virtio_device *vdev, unsigned offset,
const void *buf, unsigned len);
+ u32 (*generation)(struct virtio_device *vdev);
u8 (*get_status)(struct virtio_device *vdev);
void (*set_status)(struct virtio_device *vdev, u8 status);
void (*reset)(struct virtio_device *vdev);
@@ -301,11 +305,33 @@ static inline u8 virtio_cread8(struct virtio_device *vdev, unsigned int offset)
return ret;
}
+/* Read @count fields, @bytes each. */
+static inline void __virtio_cread_many(struct virtio_device *vdev,
+ unsigned int offset,
+ void *buf, size_t count, size_t bytes)
+{
+ u32 old, gen = vdev->config->generation ?
+ vdev->config->generation(vdev) : 0;
+ int i;
+
+ do {
+ old = gen;
+
+ for (i = 0; i < count; i++)
+ vdev->config->get(vdev, offset + bytes * i,
+ buf + i * bytes, bytes);
+
+ gen = vdev->config->generation ?
+ vdev->config->generation(vdev) : 0;
+ } while (gen != old);
+}
+
+
static inline void virtio_cread_bytes(struct virtio_device *vdev,
unsigned int offset,
void *buf, size_t len)
{
- vdev->config->get(vdev, offset, buf, len);
+ __virtio_cread_many(vdev, offset, buf, len, 1);
}
static inline void virtio_cwrite8(struct virtio_device *vdev,
@@ -349,6 +375,7 @@ static inline u64 virtio_cread64(struct virtio_device *vdev,
{
u64 ret;
vdev->config->get(vdev, offset, &ret, sizeof(ret));
+ __virtio_cread_many(vdev, offset, &ret, 1, sizeof(ret));
return virtio64_to_cpu(vdev, (__force __virtio64)ret);
}
diff --git a/include/linux/vringh.h b/include/linux/vringh.h
index 749cde28728..a3fa537e717 100644
--- a/include/linux/vringh.h
+++ b/include/linux/vringh.h
@@ -24,12 +24,16 @@
#ifndef _LINUX_VRINGH_H
#define _LINUX_VRINGH_H
#include <uapi/linux/virtio_ring.h>
+#include <linux/virtio_byteorder.h>
#include <linux/uio.h>
#include <linux/slab.h>
#include <asm/barrier.h>
/* virtio_ring with information needed for host access. */
struct vringh {
+ /* Everything is little endian */
+ bool little_endian;
+
/* Guest publishes used event idx (note: we always do). */
bool event_indices;
@@ -105,7 +109,7 @@ struct vringh_kiov {
#define VRINGH_IOV_ALLOCATED 0x8000000
/* Helpers for userspace vrings. */
-int vringh_init_user(struct vringh *vrh, u32 features,
+int vringh_init_user(struct vringh *vrh, u64 features,
unsigned int num, bool weak_barriers,
struct vring_desc __user *desc,
struct vring_avail __user *avail,
@@ -167,7 +171,7 @@ bool vringh_notify_enable_user(struct vringh *vrh);
void vringh_notify_disable_user(struct vringh *vrh);
/* Helpers for kernelspace vrings. */
-int vringh_init_kern(struct vringh *vrh, u32 features,
+int vringh_init_kern(struct vringh *vrh, u64 features,
unsigned int num, bool weak_barriers,
struct vring_desc *desc,
struct vring_avail *avail,
@@ -222,4 +226,33 @@ static inline void vringh_notify(struct vringh *vrh)
vrh->notify(vrh);
}
+static inline u16 vringh16_to_cpu(const struct vringh *vrh, __virtio16 val)
+{
+ return __virtio16_to_cpu(vrh->little_endian, val);
+}
+
+static inline __virtio16 cpu_to_vringh16(const struct vringh *vrh, u16 val)
+{
+ return __cpu_to_virtio16(vrh->little_endian, val);
+}
+
+static inline u32 vringh32_to_cpu(const struct vringh *vrh, __virtio32 val)
+{
+ return __virtio32_to_cpu(vrh->little_endian, val);
+}
+
+static inline __virtio32 cpu_to_vringh32(const struct vringh *vrh, u32 val)
+{
+ return __cpu_to_virtio32(vrh->little_endian, val);
+}
+
+static inline u64 vringh64_to_cpu(const struct vringh *vrh, __virtio64 val)
+{
+ return __virtio64_to_cpu(vrh->little_endian, val);
+}
+
+static inline __virtio64 cpu_to_vringh64(const struct vringh *vrh, u64 val)
+{
+ return __cpu_to_virtio64(vrh->little_endian, val);
+}
#endif /* _LINUX_VRINGH_H */
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index e0d64667a4b..2e8756b8c77 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -26,6 +26,7 @@
#endif
#include <net/netns/nftables.h>
#include <net/netns/xfrm.h>
+#include <linux/ns_common.h>
struct user_namespace;
struct proc_dir_entry;
@@ -60,7 +61,7 @@ struct net {
struct user_namespace *user_ns; /* Owning user namespace */
- unsigned int proc_inum;
+ struct ns_common ns;
struct proc_dir_entry *proc_net;
struct proc_dir_entry *proc_net_stat;
diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h
index a2bf41e0bde..2d83cfd7e6c 100644
--- a/include/rdma/ib_umem.h
+++ b/include/rdma/ib_umem.h
@@ -38,11 +38,12 @@
#include <linux/workqueue.h>
struct ib_ucontext;
+struct ib_umem_odp;
struct ib_umem {
struct ib_ucontext *context;
size_t length;
- int offset;
+ unsigned long address;
int page_size;
int writable;
int hugetlb;
@@ -50,17 +51,43 @@ struct ib_umem {
struct pid *pid;
struct mm_struct *mm;
unsigned long diff;
+ struct ib_umem_odp *odp_data;
struct sg_table sg_head;
int nmap;
int npages;
};
+/* Returns the offset of the umem start relative to the first page. */
+static inline int ib_umem_offset(struct ib_umem *umem)
+{
+ return umem->address & ((unsigned long)umem->page_size - 1);
+}
+
+/* Returns the first page of an ODP umem. */
+static inline unsigned long ib_umem_start(struct ib_umem *umem)
+{
+ return umem->address - ib_umem_offset(umem);
+}
+
+/* Returns the address of the page after the last one of an ODP umem. */
+static inline unsigned long ib_umem_end(struct ib_umem *umem)
+{
+ return PAGE_ALIGN(umem->address + umem->length);
+}
+
+static inline size_t ib_umem_num_pages(struct ib_umem *umem)
+{
+ return (ib_umem_end(umem) - ib_umem_start(umem)) >> PAGE_SHIFT;
+}
+
#ifdef CONFIG_INFINIBAND_USER_MEM
struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
size_t size, int access, int dmasync);
void ib_umem_release(struct ib_umem *umem);
int ib_umem_page_count(struct ib_umem *umem);
+int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
+ size_t length);
#else /* CONFIG_INFINIBAND_USER_MEM */
@@ -73,7 +100,10 @@ static inline struct ib_umem *ib_umem_get(struct ib_ucontext *context,
}
static inline void ib_umem_release(struct ib_umem *umem) { }
static inline int ib_umem_page_count(struct ib_umem *umem) { return 0; }
-
+static inline int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
+ size_t length) {
+ return -EINVAL;
+}
#endif /* CONFIG_INFINIBAND_USER_MEM */
#endif /* IB_UMEM_H */
diff --git a/include/rdma/ib_umem_odp.h b/include/rdma/ib_umem_odp.h
new file mode 100644
index 00000000000..3da0b167041
--- /dev/null
+++ b/include/rdma/ib_umem_odp.h
@@ -0,0 +1,160 @@
+/*
+ * Copyright (c) 2014 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef IB_UMEM_ODP_H
+#define IB_UMEM_ODP_H
+
+#include <rdma/ib_umem.h>
+#include <rdma/ib_verbs.h>
+#include <linux/interval_tree.h>
+
+struct umem_odp_node {
+ u64 __subtree_last;
+ struct rb_node rb;
+};
+
+struct ib_umem_odp {
+ /*
+ * An array of the pages included in the on-demand paging umem.
+ * Indices of pages that are currently not mapped into the device will
+ * contain NULL.
+ */
+ struct page **page_list;
+ /*
+ * An array of the same size as page_list, with DMA addresses mapped
+ * for pages the pages in page_list. The lower two bits designate
+ * access permissions. See ODP_READ_ALLOWED_BIT and
+ * ODP_WRITE_ALLOWED_BIT.
+ */
+ dma_addr_t *dma_list;
+ /*
+ * The umem_mutex protects the page_list and dma_list fields of an ODP
+ * umem, allowing only a single thread to map/unmap pages. The mutex
+ * also protects access to the mmu notifier counters.
+ */
+ struct mutex umem_mutex;
+ void *private; /* for the HW driver to use. */
+
+ /* When false, use the notifier counter in the ucontext struct. */
+ bool mn_counters_active;
+ int notifiers_seq;
+ int notifiers_count;
+
+ /* A linked list of umems that don't have private mmu notifier
+ * counters yet. */
+ struct list_head no_private_counters;
+ struct ib_umem *umem;
+
+ /* Tree tracking */
+ struct umem_odp_node interval_tree;
+
+ struct completion notifier_completion;
+ int dying;
+};
+
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+
+int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem);
+
+void ib_umem_odp_release(struct ib_umem *umem);
+
+/*
+ * The lower 2 bits of the DMA address signal the R/W permissions for
+ * the entry. To upgrade the permissions, provide the appropriate
+ * bitmask to the map_dma_pages function.
+ *
+ * Be aware that upgrading a mapped address might result in change of
+ * the DMA address for the page.
+ */
+#define ODP_READ_ALLOWED_BIT (1<<0ULL)
+#define ODP_WRITE_ALLOWED_BIT (1<<1ULL)
+
+#define ODP_DMA_ADDR_MASK (~(ODP_READ_ALLOWED_BIT | ODP_WRITE_ALLOWED_BIT))
+
+int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 start_offset, u64 bcnt,
+ u64 access_mask, unsigned long current_seq);
+
+void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 start_offset,
+ u64 bound);
+
+void rbt_ib_umem_insert(struct umem_odp_node *node, struct rb_root *root);
+void rbt_ib_umem_remove(struct umem_odp_node *node, struct rb_root *root);
+typedef int (*umem_call_back)(struct ib_umem *item, u64 start, u64 end,
+ void *cookie);
+/*
+ * Call the callback on each ib_umem in the range. Returns the logical or of
+ * the return values of the functions called.
+ */
+int rbt_ib_umem_for_each_in_range(struct rb_root *root, u64 start, u64 end,
+ umem_call_back cb, void *cookie);
+
+struct umem_odp_node *rbt_ib_umem_iter_first(struct rb_root *root,
+ u64 start, u64 last);
+struct umem_odp_node *rbt_ib_umem_iter_next(struct umem_odp_node *node,
+ u64 start, u64 last);
+
+static inline int ib_umem_mmu_notifier_retry(struct ib_umem *item,
+ unsigned long mmu_seq)
+{
+ /*
+ * This code is strongly based on the KVM code from
+ * mmu_notifier_retry. Should be called with
+ * the relevant locks taken (item->odp_data->umem_mutex
+ * and the ucontext umem_mutex semaphore locked for read).
+ */
+
+ /* Do not allow page faults while the new ib_umem hasn't seen a state
+ * with zero notifiers yet, and doesn't have its own valid set of
+ * private counters. */
+ if (!item->odp_data->mn_counters_active)
+ return 1;
+
+ if (unlikely(item->odp_data->notifiers_count))
+ return 1;
+ if (item->odp_data->notifiers_seq != mmu_seq)
+ return 1;
+ return 0;
+}
+
+#else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
+
+static inline int ib_umem_odp_get(struct ib_ucontext *context,
+ struct ib_umem *umem)
+{
+ return -EINVAL;
+}
+
+static inline void ib_umem_odp_release(struct ib_umem *umem) {}
+
+#endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
+
+#endif /* IB_UMEM_ODP_H */
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 470a011d6fa..0d74f1de99a 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -51,6 +51,7 @@
#include <uapi/linux/if_ether.h>
#include <linux/atomic.h>
+#include <linux/mmu_notifier.h>
#include <asm/uaccess.h>
extern struct workqueue_struct *ib_wq;
@@ -123,7 +124,8 @@ enum ib_device_cap_flags {
IB_DEVICE_MEM_WINDOW_TYPE_2A = (1<<23),
IB_DEVICE_MEM_WINDOW_TYPE_2B = (1<<24),
IB_DEVICE_MANAGED_FLOW_STEERING = (1<<29),
- IB_DEVICE_SIGNATURE_HANDOVER = (1<<30)
+ IB_DEVICE_SIGNATURE_HANDOVER = (1<<30),
+ IB_DEVICE_ON_DEMAND_PAGING = (1<<31),
};
enum ib_signature_prot_cap {
@@ -143,6 +145,27 @@ enum ib_atomic_cap {
IB_ATOMIC_GLOB
};
+enum ib_odp_general_cap_bits {
+ IB_ODP_SUPPORT = 1 << 0,
+};
+
+enum ib_odp_transport_cap_bits {
+ IB_ODP_SUPPORT_SEND = 1 << 0,
+ IB_ODP_SUPPORT_RECV = 1 << 1,
+ IB_ODP_SUPPORT_WRITE = 1 << 2,
+ IB_ODP_SUPPORT_READ = 1 << 3,
+ IB_ODP_SUPPORT_ATOMIC = 1 << 4,
+};
+
+struct ib_odp_caps {
+ uint64_t general_caps;
+ struct {
+ uint32_t rc_odp_caps;
+ uint32_t uc_odp_caps;
+ uint32_t ud_odp_caps;
+ } per_transport_caps;
+};
+
struct ib_device_attr {
u64 fw_ver;
__be64 sys_image_guid;
@@ -186,6 +209,7 @@ struct ib_device_attr {
u8 local_ca_ack_delay;
int sig_prot_cap;
int sig_guard_cap;
+ struct ib_odp_caps odp_caps;
};
enum ib_mtu {
@@ -1073,7 +1097,8 @@ enum ib_access_flags {
IB_ACCESS_REMOTE_READ = (1<<2),
IB_ACCESS_REMOTE_ATOMIC = (1<<3),
IB_ACCESS_MW_BIND = (1<<4),
- IB_ZERO_BASED = (1<<5)
+ IB_ZERO_BASED = (1<<5),
+ IB_ACCESS_ON_DEMAND = (1<<6),
};
struct ib_phys_buf {
@@ -1115,6 +1140,8 @@ struct ib_fmr_attr {
u8 page_shift;
};
+struct ib_umem;
+
struct ib_ucontext {
struct ib_device *device;
struct list_head pd_list;
@@ -1127,6 +1154,24 @@ struct ib_ucontext {
struct list_head xrcd_list;
struct list_head rule_list;
int closing;
+
+ struct pid *tgid;
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+ struct rb_root umem_tree;
+ /*
+ * Protects .umem_rbroot and tree, as well as odp_mrs_count and
+ * mmu notifiers registration.
+ */
+ struct rw_semaphore umem_rwsem;
+ void (*invalidate_range)(struct ib_umem *umem,
+ unsigned long start, unsigned long end);
+
+ struct mmu_notifier mn;
+ atomic_t notifier_count;
+ /* A list of umems that don't have private mmu notifier counters yet. */
+ struct list_head no_private_counters;
+ int odp_mrs_count;
+#endif
};
struct ib_uobject {
@@ -1662,7 +1707,10 @@ static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t
static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
{
- return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
+ size_t copy_sz;
+
+ copy_sz = min_t(size_t, len, udata->outlen);
+ return copy_to_user(udata->outbuf, src, copy_sz) ? -EFAULT : 0;
}
/**
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 6364e23454d..3a4edd1f7db 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -441,13 +441,13 @@ static inline int scsi_execute_req(struct scsi_device *sdev,
extern void sdev_disable_disk_events(struct scsi_device *sdev);
extern void sdev_enable_disk_events(struct scsi_device *sdev);
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
extern int scsi_autopm_get_device(struct scsi_device *);
extern void scsi_autopm_put_device(struct scsi_device *);
#else
static inline int scsi_autopm_get_device(struct scsi_device *d) { return 0; }
static inline void scsi_autopm_put_device(struct scsi_device *d) {}
-#endif /* CONFIG_PM_RUNTIME */
+#endif /* CONFIG_PM */
static inline int __must_check scsi_device_reprobe(struct scsi_device *sdev)
{
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
index 9adc1bca117..430cfaf9228 100644
--- a/include/target/target_core_backend.h
+++ b/include/target/target_core_backend.h
@@ -5,6 +5,15 @@
#define TRANSPORT_PLUGIN_VHBA_PDEV 2
#define TRANSPORT_PLUGIN_VHBA_VDEV 3
+struct target_backend_cits {
+ struct config_item_type tb_dev_cit;
+ struct config_item_type tb_dev_attrib_cit;
+ struct config_item_type tb_dev_pr_cit;
+ struct config_item_type tb_dev_wwn_cit;
+ struct config_item_type tb_dev_alua_tg_pt_gps_cit;
+ struct config_item_type tb_dev_stat_cit;
+};
+
struct se_subsystem_api {
struct list_head sub_api_list;
@@ -44,6 +53,8 @@ struct se_subsystem_api {
int (*init_prot)(struct se_device *);
int (*format_prot)(struct se_device *);
void (*free_prot)(struct se_device *);
+
+ struct target_backend_cits tb_cits;
};
struct sbc_ops {
@@ -96,4 +107,36 @@ sense_reason_t transport_generic_map_mem_to_cmd(struct se_cmd *,
void array_free(void *array, int n);
+/* From target_core_configfs.c to setup default backend config_item_types */
+void target_core_setup_sub_cits(struct se_subsystem_api *);
+
+/* attribute helpers from target_core_device.c for backend drivers */
+int se_dev_set_max_unmap_lba_count(struct se_device *, u32);
+int se_dev_set_max_unmap_block_desc_count(struct se_device *, u32);
+int se_dev_set_unmap_granularity(struct se_device *, u32);
+int se_dev_set_unmap_granularity_alignment(struct se_device *, u32);
+int se_dev_set_max_write_same_len(struct se_device *, u32);
+int se_dev_set_emulate_model_alias(struct se_device *, int);
+int se_dev_set_emulate_dpo(struct se_device *, int);
+int se_dev_set_emulate_fua_write(struct se_device *, int);
+int se_dev_set_emulate_fua_read(struct se_device *, int);
+int se_dev_set_emulate_write_cache(struct se_device *, int);
+int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *, int);
+int se_dev_set_emulate_tas(struct se_device *, int);
+int se_dev_set_emulate_tpu(struct se_device *, int);
+int se_dev_set_emulate_tpws(struct se_device *, int);
+int se_dev_set_emulate_caw(struct se_device *, int);
+int se_dev_set_emulate_3pc(struct se_device *, int);
+int se_dev_set_pi_prot_type(struct se_device *, int);
+int se_dev_set_pi_prot_format(struct se_device *, int);
+int se_dev_set_enforce_pr_isids(struct se_device *, int);
+int se_dev_set_force_pr_aptpl(struct se_device *, int);
+int se_dev_set_is_nonrot(struct se_device *, int);
+int se_dev_set_emulate_rest_reord(struct se_device *dev, int);
+int se_dev_set_queue_depth(struct se_device *, u32);
+int se_dev_set_max_sectors(struct se_device *, u32);
+int se_dev_set_fabric_max_sectors(struct se_device *, u32);
+int se_dev_set_optimal_sectors(struct se_device *, u32);
+int se_dev_set_block_size(struct se_device *, u32);
+
#endif /* TARGET_CORE_BACKEND_H */
diff --git a/include/target/target_core_backend_configfs.h b/include/target/target_core_backend_configfs.h
new file mode 100644
index 00000000000..3247d753010
--- /dev/null
+++ b/include/target/target_core_backend_configfs.h
@@ -0,0 +1,120 @@
+#ifndef TARGET_CORE_BACKEND_CONFIGFS_H
+#define TARGET_CORE_BACKEND_CONFIGFS_H
+
+#include <target/configfs_macros.h>
+
+#define DEF_TB_DEV_ATTRIB_SHOW(_backend, _name) \
+static ssize_t _backend##_dev_show_attr_##_name( \
+ struct se_dev_attrib *da, \
+ char *page) \
+{ \
+ return snprintf(page, PAGE_SIZE, "%u\n", \
+ (u32)da->da_dev->dev_attrib._name); \
+}
+
+#define DEF_TB_DEV_ATTRIB_STORE(_backend, _name) \
+static ssize_t _backend##_dev_store_attr_##_name( \
+ struct se_dev_attrib *da, \
+ const char *page, \
+ size_t count) \
+{ \
+ unsigned long val; \
+ int ret; \
+ \
+ ret = kstrtoul(page, 0, &val); \
+ if (ret < 0) { \
+ pr_err("kstrtoul() failed with ret: %d\n", ret); \
+ return -EINVAL; \
+ } \
+ ret = se_dev_set_##_name(da->da_dev, (u32)val); \
+ \
+ return (!ret) ? count : -EINVAL; \
+}
+
+#define DEF_TB_DEV_ATTRIB(_backend, _name) \
+DEF_TB_DEV_ATTRIB_SHOW(_backend, _name); \
+DEF_TB_DEV_ATTRIB_STORE(_backend, _name);
+
+#define DEF_TB_DEV_ATTRIB_RO(_backend, name) \
+DEF_TB_DEV_ATTRIB_SHOW(_backend, name);
+
+CONFIGFS_EATTR_STRUCT(target_backend_dev_attrib, se_dev_attrib);
+#define TB_DEV_ATTR(_backend, _name, _mode) \
+static struct target_backend_dev_attrib_attribute _backend##_dev_attrib_##_name = \
+ __CONFIGFS_EATTR(_name, _mode, \
+ _backend##_dev_show_attr_##_name, \
+ _backend##_dev_store_attr_##_name);
+
+#define TB_DEV_ATTR_RO(_backend, _name) \
+static struct target_backend_dev_attrib_attribute _backend##_dev_attrib_##_name = \
+ __CONFIGFS_EATTR_RO(_name, \
+ _backend##_dev_show_attr_##_name);
+
+/*
+ * Default list of target backend device attributes as defined by
+ * struct se_dev_attrib
+ */
+
+#define DEF_TB_DEFAULT_ATTRIBS(_backend) \
+ DEF_TB_DEV_ATTRIB(_backend, emulate_model_alias); \
+ TB_DEV_ATTR(_backend, emulate_model_alias, S_IRUGO | S_IWUSR); \
+ DEF_TB_DEV_ATTRIB(_backend, emulate_dpo); \
+ TB_DEV_ATTR(_backend, emulate_dpo, S_IRUGO | S_IWUSR); \
+ DEF_TB_DEV_ATTRIB(_backend, emulate_fua_write); \
+ TB_DEV_ATTR(_backend, emulate_fua_write, S_IRUGO | S_IWUSR); \
+ DEF_TB_DEV_ATTRIB(_backend, emulate_fua_read); \
+ TB_DEV_ATTR(_backend, emulate_fua_read, S_IRUGO | S_IWUSR); \
+ DEF_TB_DEV_ATTRIB(_backend, emulate_write_cache); \
+ TB_DEV_ATTR(_backend, emulate_write_cache, S_IRUGO | S_IWUSR); \
+ DEF_TB_DEV_ATTRIB(_backend, emulate_ua_intlck_ctrl); \
+ TB_DEV_ATTR(_backend, emulate_ua_intlck_ctrl, S_IRUGO | S_IWUSR); \
+ DEF_TB_DEV_ATTRIB(_backend, emulate_tas); \
+ TB_DEV_ATTR(_backend, emulate_tas, S_IRUGO | S_IWUSR); \
+ DEF_TB_DEV_ATTRIB(_backend, emulate_tpu); \
+ TB_DEV_ATTR(_backend, emulate_tpu, S_IRUGO | S_IWUSR); \
+ DEF_TB_DEV_ATTRIB(_backend, emulate_tpws); \
+ TB_DEV_ATTR(_backend, emulate_tpws, S_IRUGO | S_IWUSR); \
+ DEF_TB_DEV_ATTRIB(_backend, emulate_caw); \
+ TB_DEV_ATTR(_backend, emulate_caw, S_IRUGO | S_IWUSR); \
+ DEF_TB_DEV_ATTRIB(_backend, emulate_3pc); \
+ TB_DEV_ATTR(_backend, emulate_3pc, S_IRUGO | S_IWUSR); \
+ DEF_TB_DEV_ATTRIB(_backend, pi_prot_type); \
+ TB_DEV_ATTR(_backend, pi_prot_type, S_IRUGO | S_IWUSR); \
+ DEF_TB_DEV_ATTRIB_RO(_backend, hw_pi_prot_type); \
+ TB_DEV_ATTR_RO(_backend, hw_pi_prot_type); \
+ DEF_TB_DEV_ATTRIB(_backend, pi_prot_format); \
+ TB_DEV_ATTR(_backend, pi_prot_format, S_IRUGO | S_IWUSR); \
+ DEF_TB_DEV_ATTRIB(_backend, enforce_pr_isids); \
+ TB_DEV_ATTR(_backend, enforce_pr_isids, S_IRUGO | S_IWUSR); \
+ DEF_TB_DEV_ATTRIB(_backend, is_nonrot); \
+ TB_DEV_ATTR(_backend, is_nonrot, S_IRUGO | S_IWUSR); \
+ DEF_TB_DEV_ATTRIB(_backend, emulate_rest_reord); \
+ TB_DEV_ATTR(_backend, emulate_rest_reord, S_IRUGO | S_IWUSR); \
+ DEF_TB_DEV_ATTRIB(_backend, force_pr_aptpl); \
+ TB_DEV_ATTR(_backend, force_pr_aptpl, S_IRUGO | S_IWUSR); \
+ DEF_TB_DEV_ATTRIB_RO(_backend, hw_block_size); \
+ TB_DEV_ATTR_RO(_backend, hw_block_size); \
+ DEF_TB_DEV_ATTRIB(_backend, block_size); \
+ TB_DEV_ATTR(_backend, block_size, S_IRUGO | S_IWUSR); \
+ DEF_TB_DEV_ATTRIB_RO(_backend, hw_max_sectors); \
+ TB_DEV_ATTR_RO(_backend, hw_max_sectors); \
+ DEF_TB_DEV_ATTRIB(_backend, fabric_max_sectors); \
+ TB_DEV_ATTR(_backend, fabric_max_sectors, S_IRUGO | S_IWUSR); \
+ DEF_TB_DEV_ATTRIB(_backend, optimal_sectors); \
+ TB_DEV_ATTR(_backend, optimal_sectors, S_IRUGO | S_IWUSR); \
+ DEF_TB_DEV_ATTRIB_RO(_backend, hw_queue_depth); \
+ TB_DEV_ATTR_RO(_backend, hw_queue_depth); \
+ DEF_TB_DEV_ATTRIB(_backend, queue_depth); \
+ TB_DEV_ATTR(_backend, queue_depth, S_IRUGO | S_IWUSR); \
+ DEF_TB_DEV_ATTRIB(_backend, max_unmap_lba_count); \
+ TB_DEV_ATTR(_backend, max_unmap_lba_count, S_IRUGO | S_IWUSR); \
+ DEF_TB_DEV_ATTRIB(_backend, max_unmap_block_desc_count); \
+ TB_DEV_ATTR(_backend, max_unmap_block_desc_count, S_IRUGO | S_IWUSR); \
+ DEF_TB_DEV_ATTRIB(_backend, unmap_granularity); \
+ TB_DEV_ATTR(_backend, unmap_granularity, S_IRUGO | S_IWUSR); \
+ DEF_TB_DEV_ATTRIB(_backend, unmap_granularity_alignment); \
+ TB_DEV_ATTR(_backend, unmap_granularity_alignment, S_IRUGO | S_IWUSR); \
+ DEF_TB_DEV_ATTRIB(_backend, max_write_same_len); \
+ TB_DEV_ATTR(_backend, max_write_same_len, S_IRUGO | S_IWUSR);
+
+#endif /* TARGET_CORE_BACKEND_CONFIGFS_H */
diff --git a/include/trace/events/host1x.h b/include/trace/events/host1x.h
index 94db6a2c354..63116362543 100644
--- a/include/trace/events/host1x.h
+++ b/include/trace/events/host1x.h
@@ -29,6 +29,8 @@
#include <linux/ktime.h>
#include <linux/tracepoint.h>
+struct host1x_bo;
+
DECLARE_EVENT_CLASS(host1x,
TP_PROTO(const char *name),
TP_ARGS(name),
@@ -79,14 +81,14 @@ TRACE_EVENT(host1x_cdma_push,
);
TRACE_EVENT(host1x_cdma_push_gather,
- TP_PROTO(const char *name, u32 mem_id,
+ TP_PROTO(const char *name, struct host1x_bo *bo,
u32 words, u32 offset, void *cmdbuf),
- TP_ARGS(name, mem_id, words, offset, cmdbuf),
+ TP_ARGS(name, bo, words, offset, cmdbuf),
TP_STRUCT__entry(
__field(const char *, name)
- __field(u32, mem_id)
+ __field(struct host1x_bo *, bo)
__field(u32, words)
__field(u32, offset)
__field(bool, cmdbuf)
@@ -100,13 +102,13 @@ TRACE_EVENT(host1x_cdma_push_gather,
}
__entry->cmdbuf = cmdbuf;
__entry->name = name;
- __entry->mem_id = mem_id;
+ __entry->bo = bo;
__entry->words = words;
__entry->offset = offset;
),
- TP_printk("name=%s, mem_id=%08x, words=%u, offset=%d, contents=[%s]",
- __entry->name, __entry->mem_id,
+ TP_printk("name=%s, bo=%p, words=%u, offset=%d, contents=[%s]",
+ __entry->name, __entry->bo,
__entry->words, __entry->offset,
__print_hex(__get_dynamic_array(cmdbuf),
__entry->cmdbuf ? __entry->words * 4 : 0))
@@ -221,12 +223,13 @@ TRACE_EVENT(host1x_syncpt_load_min,
);
TRACE_EVENT(host1x_syncpt_wait_check,
- TP_PROTO(void *mem_id, u32 offset, u32 syncpt_id, u32 thresh, u32 min),
+ TP_PROTO(struct host1x_bo *bo, u32 offset, u32 syncpt_id, u32 thresh,
+ u32 min),
- TP_ARGS(mem_id, offset, syncpt_id, thresh, min),
+ TP_ARGS(bo, offset, syncpt_id, thresh, min),
TP_STRUCT__entry(
- __field(void *, mem_id)
+ __field(struct host1x_bo *, bo)
__field(u32, offset)
__field(u32, syncpt_id)
__field(u32, thresh)
@@ -234,15 +237,15 @@ TRACE_EVENT(host1x_syncpt_wait_check,
),
TP_fast_assign(
- __entry->mem_id = mem_id;
+ __entry->bo = bo;
__entry->offset = offset;
__entry->syncpt_id = syncpt_id;
__entry->thresh = thresh;
__entry->min = min;
),
- TP_printk("mem_id=%p, offset=%05x, id=%d, thresh=%d, current=%d",
- __entry->mem_id, __entry->offset,
+ TP_printk("bo=%p, offset=%05x, id=%d, thresh=%d, current=%d",
+ __entry->bo, __entry->offset,
__entry->syncpt_id, __entry->thresh,
__entry->min)
);
diff --git a/include/trace/events/module.h b/include/trace/events/module.h
index 7c5cbfe3fc4..81c4c183d34 100644
--- a/include/trace/events/module.h
+++ b/include/trace/events/module.h
@@ -80,7 +80,7 @@ DECLARE_EVENT_CLASS(module_refcnt,
TP_fast_assign(
__entry->ip = ip;
- __entry->refcnt = __this_cpu_read(mod->refptr->incs) - __this_cpu_read(mod->refptr->decs);
+ __entry->refcnt = atomic_read(&mod->refcnt);
__assign_str(name, mod->name);
),
diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
index 171ca4ff6d9..b9c1dc6c825 100644
--- a/include/trace/events/sunrpc.h
+++ b/include/trace/events/sunrpc.h
@@ -8,6 +8,7 @@
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/svc.h>
#include <linux/sunrpc/xprtsock.h>
+#include <linux/sunrpc/svc_xprt.h>
#include <net/tcp_states.h>
#include <linux/net.h>
#include <linux/tracepoint.h>
@@ -412,6 +413,16 @@ TRACE_EVENT(xs_tcp_data_recv,
__entry->copied, __entry->reclen, __entry->offset)
);
+#define show_rqstp_flags(flags) \
+ __print_flags(flags, "|", \
+ { (1UL << RQ_SECURE), "RQ_SECURE"}, \
+ { (1UL << RQ_LOCAL), "RQ_LOCAL"}, \
+ { (1UL << RQ_USEDEFERRAL), "RQ_USEDEFERRAL"}, \
+ { (1UL << RQ_DROPME), "RQ_DROPME"}, \
+ { (1UL << RQ_SPLICE_OK), "RQ_SPLICE_OK"}, \
+ { (1UL << RQ_VICTIM), "RQ_VICTIM"}, \
+ { (1UL << RQ_BUSY), "RQ_BUSY"})
+
TRACE_EVENT(svc_recv,
TP_PROTO(struct svc_rqst *rqst, int status),
@@ -421,16 +432,19 @@ TRACE_EVENT(svc_recv,
__field(struct sockaddr *, addr)
__field(__be32, xid)
__field(int, status)
+ __field(unsigned long, flags)
),
TP_fast_assign(
__entry->addr = (struct sockaddr *)&rqst->rq_addr;
__entry->xid = status > 0 ? rqst->rq_xid : 0;
__entry->status = status;
+ __entry->flags = rqst->rq_flags;
),
- TP_printk("addr=%pIScp xid=0x%x status=%d", __entry->addr,
- be32_to_cpu(__entry->xid), __entry->status)
+ TP_printk("addr=%pIScp xid=0x%x status=%d flags=%s", __entry->addr,
+ be32_to_cpu(__entry->xid), __entry->status,
+ show_rqstp_flags(__entry->flags))
);
DECLARE_EVENT_CLASS(svc_rqst_status,
@@ -444,18 +458,19 @@ DECLARE_EVENT_CLASS(svc_rqst_status,
__field(__be32, xid)
__field(int, dropme)
__field(int, status)
+ __field(unsigned long, flags)
),
TP_fast_assign(
__entry->addr = (struct sockaddr *)&rqst->rq_addr;
__entry->xid = rqst->rq_xid;
- __entry->dropme = (int)rqst->rq_dropme;
__entry->status = status;
+ __entry->flags = rqst->rq_flags;
),
- TP_printk("addr=%pIScp rq_xid=0x%x dropme=%d status=%d",
- __entry->addr, be32_to_cpu(__entry->xid), __entry->dropme,
- __entry->status)
+ TP_printk("addr=%pIScp rq_xid=0x%x status=%d flags=%s",
+ __entry->addr, be32_to_cpu(__entry->xid),
+ __entry->status, show_rqstp_flags(__entry->flags))
);
DEFINE_EVENT(svc_rqst_status, svc_process,
@@ -466,6 +481,99 @@ DEFINE_EVENT(svc_rqst_status, svc_send,
TP_PROTO(struct svc_rqst *rqst, int status),
TP_ARGS(rqst, status));
+#define show_svc_xprt_flags(flags) \
+ __print_flags(flags, "|", \
+ { (1UL << XPT_BUSY), "XPT_BUSY"}, \
+ { (1UL << XPT_CONN), "XPT_CONN"}, \
+ { (1UL << XPT_CLOSE), "XPT_CLOSE"}, \
+ { (1UL << XPT_DATA), "XPT_DATA"}, \
+ { (1UL << XPT_TEMP), "XPT_TEMP"}, \
+ { (1UL << XPT_DEAD), "XPT_DEAD"}, \
+ { (1UL << XPT_CHNGBUF), "XPT_CHNGBUF"}, \
+ { (1UL << XPT_DEFERRED), "XPT_DEFERRED"}, \
+ { (1UL << XPT_OLD), "XPT_OLD"}, \
+ { (1UL << XPT_LISTENER), "XPT_LISTENER"}, \
+ { (1UL << XPT_CACHE_AUTH), "XPT_CACHE_AUTH"}, \
+ { (1UL << XPT_LOCAL), "XPT_LOCAL"})
+
+TRACE_EVENT(svc_xprt_do_enqueue,
+ TP_PROTO(struct svc_xprt *xprt, struct svc_rqst *rqst),
+
+ TP_ARGS(xprt, rqst),
+
+ TP_STRUCT__entry(
+ __field(struct svc_xprt *, xprt)
+ __field(struct svc_rqst *, rqst)
+ ),
+
+ TP_fast_assign(
+ __entry->xprt = xprt;
+ __entry->rqst = rqst;
+ ),
+
+ TP_printk("xprt=0x%p addr=%pIScp pid=%d flags=%s", __entry->xprt,
+ (struct sockaddr *)&__entry->xprt->xpt_remote,
+ __entry->rqst ? __entry->rqst->rq_task->pid : 0,
+ show_svc_xprt_flags(__entry->xprt->xpt_flags))
+);
+
+TRACE_EVENT(svc_xprt_dequeue,
+ TP_PROTO(struct svc_xprt *xprt),
+
+ TP_ARGS(xprt),
+
+ TP_STRUCT__entry(
+ __field(struct svc_xprt *, xprt)
+ __field_struct(struct sockaddr_storage, ss)
+ __field(unsigned long, flags)
+ ),
+
+ TP_fast_assign(
+ __entry->xprt = xprt,
+ xprt ? memcpy(&__entry->ss, &xprt->xpt_remote, sizeof(__entry->ss)) : memset(&__entry->ss, 0, sizeof(__entry->ss));
+ __entry->flags = xprt ? xprt->xpt_flags : 0;
+ ),
+
+ TP_printk("xprt=0x%p addr=%pIScp flags=%s", __entry->xprt,
+ (struct sockaddr *)&__entry->ss,
+ show_svc_xprt_flags(__entry->flags))
+);
+
+TRACE_EVENT(svc_wake_up,
+ TP_PROTO(int pid),
+
+ TP_ARGS(pid),
+
+ TP_STRUCT__entry(
+ __field(int, pid)
+ ),
+
+ TP_fast_assign(
+ __entry->pid = pid;
+ ),
+
+ TP_printk("pid=%d", __entry->pid)
+);
+
+TRACE_EVENT(svc_handle_xprt,
+ TP_PROTO(struct svc_xprt *xprt, int len),
+
+ TP_ARGS(xprt, len),
+
+ TP_STRUCT__entry(
+ __field(struct svc_xprt *, xprt)
+ __field(int, len)
+ ),
+
+ TP_fast_assign(
+ __entry->xprt = xprt;
+ __entry->len = len;
+ ),
+
+ TP_printk("xprt=0x%p addr=%pIScp len=%d flags=%s", __entry->xprt,
+ (struct sockaddr *)&__entry->xprt->xpt_remote, __entry->len,
+ show_svc_xprt_flags(__entry->xprt->xpt_flags))
+);
#endif /* _TRACE_SUNRPC_H */
#include <trace/define_trace.h>
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index a0db2d4aa5f..86574b0005f 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -286,6 +286,8 @@ struct drm_mode_get_property {
char name[DRM_PROP_NAME_LEN];
__u32 count_values;
+ /* This is only used to count enum values, not blobs. The _blobs is
+ * simply because of a historical reason, i.e. backwards compat. */
__u32 count_enum_blobs;
};
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index ff57f07c324..250262265ee 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -340,6 +340,7 @@ typedef struct drm_i915_irq_wait {
#define I915_PARAM_HAS_EXEC_HANDLE_LUT 26
#define I915_PARAM_HAS_WT 27
#define I915_PARAM_CMD_PARSER_VERSION 28
+#define I915_PARAM_HAS_COHERENT_PHYS_GTT 29
typedef struct drm_i915_getparam {
int param;
@@ -876,6 +877,12 @@ struct drm_i915_gem_get_tiling {
* mmap mapping.
*/
__u32 swizzle_mode;
+
+ /**
+ * Returned address bit 6 swizzling required for CPU access through
+ * mmap mapping whilst bound.
+ */
+ __u32 phys_swizzle_mode;
};
struct drm_i915_gem_get_aperture {
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index d8e1716707b..00b100023c4 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -1,4 +1,5 @@
# UAPI Header export list
+header-y += android/
header-y += byteorder/
header-y += can/
header-y += caif/
@@ -211,6 +212,7 @@ header-y += ivtv.h
header-y += ixjuser.h
header-y += jffs2.h
header-y += joystick.h
+header-y += kcmp.h
header-y += kdev_t.h
header-y += kd.h
header-y += kernelcapi.h
@@ -385,6 +387,7 @@ header-y += tcp.h
header-y += tcp_metrics.h
header-y += telephony.h
header-y += termios.h
+header-y += thermal.h
header-y += time.h
header-y += times.h
header-y += timex.h
diff --git a/include/uapi/linux/android/Kbuild b/include/uapi/linux/android/Kbuild
new file mode 100644
index 00000000000..ca011eec252
--- /dev/null
+++ b/include/uapi/linux/android/Kbuild
@@ -0,0 +1,2 @@
+# UAPI Header export list
+header-y += binder.h
diff --git a/drivers/staging/android/uapi/binder.h b/include/uapi/linux/android/binder.h
index dba4cef3a8d..41420e341e7 100644
--- a/drivers/staging/android/uapi/binder.h
+++ b/include/uapi/linux/android/binder.h
@@ -20,6 +20,7 @@
#ifndef _UAPI_LINUX_BINDER_H
#define _UAPI_LINUX_BINDER_H
+#include <linux/types.h>
#include <linux/ioctl.h>
#define B_PACK_CHARS(c1, c2, c3, c4) \
diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h
index 12e26683c70..d3475e1f15e 100644
--- a/include/uapi/linux/audit.h
+++ b/include/uapi/linux/audit.h
@@ -371,7 +371,9 @@ enum {
#define AUDIT_ARCH_PARISC (EM_PARISC)
#define AUDIT_ARCH_PARISC64 (EM_PARISC|__AUDIT_ARCH_64BIT)
#define AUDIT_ARCH_PPC (EM_PPC)
+/* do not define AUDIT_ARCH_PPCLE since it is not supported by audit */
#define AUDIT_ARCH_PPC64 (EM_PPC64|__AUDIT_ARCH_64BIT)
+#define AUDIT_ARCH_PPC64LE (EM_PPC64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
#define AUDIT_ARCH_S390 (EM_S390)
#define AUDIT_ARCH_S390X (EM_S390|__AUDIT_ARCH_64BIT)
#define AUDIT_ARCH_SH (EM_SH)
diff --git a/include/uapi/linux/if_tun.h b/include/uapi/linux/if_tun.h
index 18b2403982f..50ae2433544 100644
--- a/include/uapi/linux/if_tun.h
+++ b/include/uapi/linux/if_tun.h
@@ -48,6 +48,8 @@
#define TUNSETQUEUE _IOW('T', 217, int)
#define TUNSETIFINDEX _IOW('T', 218, unsigned int)
#define TUNGETFILTER _IOR('T', 219, struct sock_fprog)
+#define TUNSETVNETLE _IOW('T', 220, int)
+#define TUNGETVNETLE _IOR('T', 221, int)
/* TUNSETIFF ifr flags */
#define IFF_TUN 0x0001
@@ -57,7 +59,6 @@
#define IFF_ONE_QUEUE 0x2000
#define IFF_VNET_HDR 0x4000
#define IFF_TUN_EXCL 0x8000
-#define IFF_VNET_LE 0x10000
#define IFF_MULTI_QUEUE 0x0100
#define IFF_ATTACH_QUEUE 0x0200
#define IFF_DETACH_QUEUE 0x0400
diff --git a/include/linux/kcmp.h b/include/uapi/linux/kcmp.h
index 2dcd1b3aafc..84df14b3736 100644
--- a/include/linux/kcmp.h
+++ b/include/uapi/linux/kcmp.h
@@ -1,5 +1,5 @@
-#ifndef _LINUX_KCMP_H
-#define _LINUX_KCMP_H
+#ifndef _UAPI_LINUX_KCMP_H
+#define _UAPI_LINUX_KCMP_H
/* Comparison type */
enum kcmp_type {
@@ -14,4 +14,4 @@ enum kcmp_type {
KCMP_TYPES,
};
-#endif /* _LINUX_KCMP_H */
+#endif /* _UAPI_LINUX_KCMP_H */
diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h
new file mode 100644
index 00000000000..7acef41fc20
--- /dev/null
+++ b/include/uapi/linux/kfd_ioctl.h
@@ -0,0 +1,154 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef KFD_IOCTL_H_INCLUDED
+#define KFD_IOCTL_H_INCLUDED
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#define KFD_IOCTL_MAJOR_VERSION 1
+#define KFD_IOCTL_MINOR_VERSION 0
+
+struct kfd_ioctl_get_version_args {
+ uint32_t major_version; /* from KFD */
+ uint32_t minor_version; /* from KFD */
+};
+
+/* For kfd_ioctl_create_queue_args.queue_type. */
+#define KFD_IOC_QUEUE_TYPE_COMPUTE 0
+#define KFD_IOC_QUEUE_TYPE_SDMA 1
+#define KFD_IOC_QUEUE_TYPE_COMPUTE_AQL 2
+
+#define KFD_MAX_QUEUE_PERCENTAGE 100
+#define KFD_MAX_QUEUE_PRIORITY 15
+
+struct kfd_ioctl_create_queue_args {
+ uint64_t ring_base_address; /* to KFD */
+ uint64_t write_pointer_address; /* from KFD */
+ uint64_t read_pointer_address; /* from KFD */
+ uint64_t doorbell_offset; /* from KFD */
+
+ uint32_t ring_size; /* to KFD */
+ uint32_t gpu_id; /* to KFD */
+ uint32_t queue_type; /* to KFD */
+ uint32_t queue_percentage; /* to KFD */
+ uint32_t queue_priority; /* to KFD */
+ uint32_t queue_id; /* from KFD */
+
+ uint64_t eop_buffer_address; /* to KFD */
+ uint64_t eop_buffer_size; /* to KFD */
+ uint64_t ctx_save_restore_address; /* to KFD */
+ uint64_t ctx_save_restore_size; /* to KFD */
+};
+
+struct kfd_ioctl_destroy_queue_args {
+ uint32_t queue_id; /* to KFD */
+ uint32_t pad;
+};
+
+struct kfd_ioctl_update_queue_args {
+ uint64_t ring_base_address; /* to KFD */
+
+ uint32_t queue_id; /* to KFD */
+ uint32_t ring_size; /* to KFD */
+ uint32_t queue_percentage; /* to KFD */
+ uint32_t queue_priority; /* to KFD */
+};
+
+/* For kfd_ioctl_set_memory_policy_args.default_policy and alternate_policy */
+#define KFD_IOC_CACHE_POLICY_COHERENT 0
+#define KFD_IOC_CACHE_POLICY_NONCOHERENT 1
+
+struct kfd_ioctl_set_memory_policy_args {
+ uint64_t alternate_aperture_base; /* to KFD */
+ uint64_t alternate_aperture_size; /* to KFD */
+
+ uint32_t gpu_id; /* to KFD */
+ uint32_t default_policy; /* to KFD */
+ uint32_t alternate_policy; /* to KFD */
+ uint32_t pad;
+};
+
+/*
+ * All counters are monotonic. They are used for profiling of compute jobs.
+ * The profiling is done by userspace.
+ *
+ * In case of GPU reset, the counter should not be affected.
+ */
+
+struct kfd_ioctl_get_clock_counters_args {
+ uint64_t gpu_clock_counter; /* from KFD */
+ uint64_t cpu_clock_counter; /* from KFD */
+ uint64_t system_clock_counter; /* from KFD */
+ uint64_t system_clock_freq; /* from KFD */
+
+ uint32_t gpu_id; /* to KFD */
+ uint32_t pad;
+};
+
+#define NUM_OF_SUPPORTED_GPUS 7
+
+struct kfd_process_device_apertures {
+ uint64_t lds_base; /* from KFD */
+ uint64_t lds_limit; /* from KFD */
+ uint64_t scratch_base; /* from KFD */
+ uint64_t scratch_limit; /* from KFD */
+ uint64_t gpuvm_base; /* from KFD */
+ uint64_t gpuvm_limit; /* from KFD */
+ uint32_t gpu_id; /* from KFD */
+ uint32_t pad;
+};
+
+struct kfd_ioctl_get_process_apertures_args {
+ struct kfd_process_device_apertures
+ process_apertures[NUM_OF_SUPPORTED_GPUS];/* from KFD */
+
+ /* from KFD, should be in the range [1 - NUM_OF_SUPPORTED_GPUS] */
+ uint32_t num_of_nodes;
+ uint32_t pad;
+};
+
+#define KFD_IOC_MAGIC 'K'
+
+#define KFD_IOC_GET_VERSION \
+ _IOR(KFD_IOC_MAGIC, 1, struct kfd_ioctl_get_version_args)
+
+#define KFD_IOC_CREATE_QUEUE \
+ _IOWR(KFD_IOC_MAGIC, 2, struct kfd_ioctl_create_queue_args)
+
+#define KFD_IOC_DESTROY_QUEUE \
+ _IOWR(KFD_IOC_MAGIC, 3, struct kfd_ioctl_destroy_queue_args)
+
+#define KFD_IOC_SET_MEMORY_POLICY \
+ _IOW(KFD_IOC_MAGIC, 4, struct kfd_ioctl_set_memory_policy_args)
+
+#define KFD_IOC_GET_CLOCK_COUNTERS \
+ _IOWR(KFD_IOC_MAGIC, 5, struct kfd_ioctl_get_clock_counters_args)
+
+#define KFD_IOC_GET_PROCESS_APERTURES \
+ _IOR(KFD_IOC_MAGIC, 6, struct kfd_ioctl_get_process_apertures_args)
+
+#define KFD_IOC_UPDATE_QUEUE \
+ _IOW(KFD_IOC_MAGIC, 7, struct kfd_ioctl_update_queue_args)
+
+#endif
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 60768822b14..a37fd1224f3 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -647,11 +647,7 @@ struct kvm_ppc_smmu_info {
#define KVM_CAP_MP_STATE 14
#define KVM_CAP_COALESCED_MMIO 15
#define KVM_CAP_SYNC_MMU 16 /* Changes to host mmap are reflected in guest */
-#define KVM_CAP_DEVICE_ASSIGNMENT 17
#define KVM_CAP_IOMMU 18
-#ifdef __KVM_HAVE_MSI
-#define KVM_CAP_DEVICE_MSI 20
-#endif
/* Bug in KVM_SET_USER_MEMORY_REGION fixed: */
#define KVM_CAP_DESTROY_MEMORY_REGION_WORKS 21
#define KVM_CAP_USER_NMI 22
@@ -663,10 +659,6 @@ struct kvm_ppc_smmu_info {
#endif
#define KVM_CAP_IRQ_ROUTING 25
#define KVM_CAP_IRQ_INJECT_STATUS 26
-#define KVM_CAP_DEVICE_DEASSIGNMENT 27
-#ifdef __KVM_HAVE_MSIX
-#define KVM_CAP_DEVICE_MSIX 28
-#endif
#define KVM_CAP_ASSIGN_DEV_IRQ 29
/* Another bug in KVM_SET_USER_MEMORY_REGION fixed: */
#define KVM_CAP_JOIN_MEMORY_REGIONS_WORKS 30
@@ -1107,9 +1099,6 @@ struct kvm_s390_ucas_mapping {
#define KVM_X86_SETUP_MCE _IOW(KVMIO, 0x9c, __u64)
#define KVM_X86_GET_MCE_CAP_SUPPORTED _IOR(KVMIO, 0x9d, __u64)
#define KVM_X86_SET_MCE _IOW(KVMIO, 0x9e, struct kvm_x86_mce)
-/* IA64 stack access */
-#define KVM_IA64_VCPU_GET_STACK _IOR(KVMIO, 0x9a, void *)
-#define KVM_IA64_VCPU_SET_STACK _IOW(KVMIO, 0x9b, void *)
/* Available with KVM_CAP_VCPU_EVENTS */
#define KVM_GET_VCPU_EVENTS _IOR(KVMIO, 0x9f, struct kvm_vcpu_events)
#define KVM_SET_VCPU_EVENTS _IOW(KVMIO, 0xa0, struct kvm_vcpu_events)
diff --git a/include/uapi/linux/magic.h b/include/uapi/linux/magic.h
index 77c60311a6c..7d664ea85eb 100644
--- a/include/uapi/linux/magic.h
+++ b/include/uapi/linux/magic.h
@@ -72,5 +72,6 @@
#define MTD_INODE_FS_MAGIC 0x11307854
#define ANON_INODE_FS_MAGIC 0x09041934
#define BTRFS_TEST_MAGIC 0x73727279
+#define NSFS_MAGIC 0x6e736673
#endif /* __LINUX_MAGIC_H__ */
diff --git a/include/uapi/linux/target_core_user.h b/include/uapi/linux/target_core_user.h
index 7dcfbe6771b..b483d1909d3 100644
--- a/include/uapi/linux/target_core_user.h
+++ b/include/uapi/linux/target_core_user.h
@@ -6,10 +6,6 @@
#include <linux/types.h>
#include <linux/uio.h>
-#ifndef __packed
-#define __packed __attribute__((packed))
-#endif
-
#define TCMU_VERSION "1.0"
/*
diff --git a/include/uapi/linux/thermal.h b/include/uapi/linux/thermal.h
new file mode 100644
index 00000000000..ac553585598
--- /dev/null
+++ b/include/uapi/linux/thermal.h
@@ -0,0 +1,35 @@
+#ifndef _UAPI_LINUX_THERMAL_H
+#define _UAPI_LINUX_THERMAL_H
+
+#define THERMAL_NAME_LENGTH 20
+
+/* Adding event notification support elements */
+#define THERMAL_GENL_FAMILY_NAME "thermal_event"
+#define THERMAL_GENL_VERSION 0x01
+#define THERMAL_GENL_MCAST_GROUP_NAME "thermal_mc_grp"
+
+/* Events supported by Thermal Netlink */
+enum events {
+ THERMAL_AUX0,
+ THERMAL_AUX1,
+ THERMAL_CRITICAL,
+ THERMAL_DEV_FAULT,
+};
+
+/* attributes of thermal_genl_family */
+enum {
+ THERMAL_GENL_ATTR_UNSPEC,
+ THERMAL_GENL_ATTR_EVENT,
+ __THERMAL_GENL_ATTR_MAX,
+};
+#define THERMAL_GENL_ATTR_MAX (__THERMAL_GENL_ATTR_MAX - 1)
+
+/* commands supported by the thermal_genl_family */
+enum {
+ THERMAL_GENL_CMD_UNSPEC,
+ THERMAL_GENL_CMD_EVENT,
+ __THERMAL_GENL_CMD_MAX,
+};
+#define THERMAL_GENL_CMD_MAX (__THERMAL_GENL_CMD_MAX - 1)
+
+#endif /* _UAPI_LINUX_THERMAL_H */
diff --git a/include/uapi/linux/v4l2-mediabus.h b/include/uapi/linux/v4l2-mediabus.h
index 5a86d8ede09..26db20647e6 100644
--- a/include/uapi/linux/v4l2-mediabus.h
+++ b/include/uapi/linux/v4l2-mediabus.h
@@ -31,9 +31,9 @@ struct v4l2_mbus_framefmt {
__u32 code;
__u32 field;
__u32 colorspace;
- __u32 ycbcr_enc;
- __u32 quantization;
- __u32 reserved[5];
+ __u16 ycbcr_enc;
+ __u16 quantization;
+ __u32 reserved[6];
};
#ifndef __KERNEL__
diff --git a/include/uapi/linux/virtio_balloon.h b/include/uapi/linux/virtio_balloon.h
index 5e26f61b5df..be40f7059e9 100644
--- a/include/uapi/linux/virtio_balloon.h
+++ b/include/uapi/linux/virtio_balloon.h
@@ -31,6 +31,7 @@
/* The feature bitmap for virtio balloon */
#define VIRTIO_BALLOON_F_MUST_TELL_HOST 0 /* Tell before reclaiming pages */
#define VIRTIO_BALLOON_F_STATS_VQ 1 /* Memory Stats virtqueue */
+#define VIRTIO_BALLOON_F_DEFLATE_ON_OOM 2 /* Deflate balloon on OOM */
/* Size of a PFN in the balloon interface. */
#define VIRTIO_BALLOON_PFN_SHIFT 12
diff --git a/include/uapi/linux/virtio_pci.h b/include/uapi/linux/virtio_pci.h
index e5ec1caab82..35b552c7f33 100644
--- a/include/uapi/linux/virtio_pci.h
+++ b/include/uapi/linux/virtio_pci.h
@@ -41,6 +41,8 @@
#include <linux/virtio_config.h>
+#ifndef VIRTIO_PCI_NO_LEGACY
+
/* A 32-bit r/o bitmask of the features supported by the host */
#define VIRTIO_PCI_HOST_FEATURES 0
@@ -67,16 +69,11 @@
* a read-and-acknowledge. */
#define VIRTIO_PCI_ISR 19
-/* The bit of the ISR which indicates a device configuration change. */
-#define VIRTIO_PCI_ISR_CONFIG 0x2
-
/* MSI-X registers: only enabled if MSI-X is enabled. */
/* A 16-bit vector for configuration changes. */
#define VIRTIO_MSI_CONFIG_VECTOR 20
/* A 16-bit vector for selected queue notifications. */
#define VIRTIO_MSI_QUEUE_VECTOR 22
-/* Vector value used to disable MSI for queue */
-#define VIRTIO_MSI_NO_VECTOR 0xffff
/* The remaining space is defined by each driver as the per-driver
* configuration space */
@@ -94,4 +91,12 @@
/* The alignment to use between consumer and producer parts of vring.
* x86 pagesize again. */
#define VIRTIO_PCI_VRING_ALIGN 4096
+
+#endif /* VIRTIO_PCI_NO_LEGACY */
+
+/* The bit of the ISR which indicates a device configuration change. */
+#define VIRTIO_PCI_ISR_CONFIG 0x2
+/* Vector value used to disable MSI for queue */
+#define VIRTIO_MSI_NO_VECTOR 0xffff
+
#endif
diff --git a/include/uapi/rdma/ib_user_verbs.h b/include/uapi/rdma/ib_user_verbs.h
index 26daf55ff76..4275b961bf6 100644
--- a/include/uapi/rdma/ib_user_verbs.h
+++ b/include/uapi/rdma/ib_user_verbs.h
@@ -90,8 +90,9 @@ enum {
};
enum {
+ IB_USER_VERBS_EX_CMD_QUERY_DEVICE = IB_USER_VERBS_CMD_QUERY_DEVICE,
IB_USER_VERBS_EX_CMD_CREATE_FLOW = IB_USER_VERBS_CMD_THRESHOLD,
- IB_USER_VERBS_EX_CMD_DESTROY_FLOW
+ IB_USER_VERBS_EX_CMD_DESTROY_FLOW,
};
/*
@@ -201,6 +202,32 @@ struct ib_uverbs_query_device_resp {
__u8 reserved[4];
};
+enum {
+ IB_USER_VERBS_EX_QUERY_DEVICE_ODP = 1ULL << 0,
+};
+
+struct ib_uverbs_ex_query_device {
+ __u32 comp_mask;
+ __u32 reserved;
+};
+
+struct ib_uverbs_odp_caps {
+ __u64 general_caps;
+ struct {
+ __u32 rc_odp_caps;
+ __u32 uc_odp_caps;
+ __u32 ud_odp_caps;
+ } per_transport_caps;
+ __u32 reserved;
+};
+
+struct ib_uverbs_ex_query_device_resp {
+ struct ib_uverbs_query_device_resp base;
+ __u32 comp_mask;
+ __u32 reserved;
+ struct ib_uverbs_odp_caps odp_caps;
+};
+
struct ib_uverbs_query_port {
__u64 response;
__u8 port_num;
diff --git a/init/do_mounts.c b/init/do_mounts.c
index 9b3565c4150..eb410083e8e 100644
--- a/init/do_mounts.c
+++ b/init/do_mounts.c
@@ -395,8 +395,6 @@ retry:
case 0:
goto out;
case -EACCES:
- flags |= MS_RDONLY;
- goto retry;
case -EINVAL:
continue;
}
@@ -419,6 +417,10 @@ retry:
#endif
panic("VFS: Unable to mount root fs on %s", b);
}
+ if (!(flags & MS_RDONLY)) {
+ flags |= MS_RDONLY;
+ goto retry;
+ }
printk("List of all partitions:\n");
printk_all_partitions();
diff --git a/init/main.c b/init/main.c
index 8a914b758e5..61b993767db 100644
--- a/init/main.c
+++ b/init/main.c
@@ -79,6 +79,7 @@
#include <linux/random.h>
#include <linux/list.h>
#include <linux/integrity.h>
+#include <linux/proc_ns.h>
#include <asm/io.h>
#include <asm/bugs.h>
@@ -578,6 +579,10 @@ asmlinkage __visible void __init start_kernel(void)
local_irq_disable();
idr_init_cache();
rcu_init();
+
+ /* trace_printk() and trace points may be used after this */
+ trace_init();
+
context_tracking_init();
radix_tree_init();
/* init some links before init_ISA_irqs() */
@@ -661,6 +666,7 @@ asmlinkage __visible void __init start_kernel(void)
/* rootfs populating might need page-writeback */
page_writeback_init();
proc_root_init();
+ nsfs_init();
cgroup_init();
cpuset_init();
taskstats_init_early();
diff --git a/init/version.c b/init/version.c
index 1a4718e500f..fe41a63efed 100644
--- a/init/version.c
+++ b/init/version.c
@@ -35,7 +35,10 @@ struct uts_namespace init_uts_ns = {
.domainname = UTS_DOMAINNAME,
},
.user_ns = &init_user_ns,
- .proc_inum = PROC_UTS_INIT_INO,
+ .ns.inum = PROC_UTS_INIT_INO,
+#ifdef CONFIG_UTS_NS
+ .ns.ops = &utsns_operations,
+#endif
};
EXPORT_SYMBOL_GPL(init_uts_ns);
diff --git a/ipc/msgutil.c b/ipc/msgutil.c
index 7e7095974d5..2b491590eba 100644
--- a/ipc/msgutil.c
+++ b/ipc/msgutil.c
@@ -31,7 +31,10 @@ DEFINE_SPINLOCK(mq_lock);
struct ipc_namespace init_ipc_ns = {
.count = ATOMIC_INIT(1),
.user_ns = &init_user_ns,
- .proc_inum = PROC_IPC_INIT_INO,
+ .ns.inum = PROC_IPC_INIT_INO,
+#ifdef CONFIG_IPC_NS
+ .ns.ops = &ipcns_operations,
+#endif
};
atomic_t nr_ipc_ns = ATOMIC_INIT(1);
diff --git a/ipc/namespace.c b/ipc/namespace.c
index 1a3ffd40356..068caf18d56 100644
--- a/ipc/namespace.c
+++ b/ipc/namespace.c
@@ -26,16 +26,17 @@ static struct ipc_namespace *create_ipc_ns(struct user_namespace *user_ns,
if (ns == NULL)
return ERR_PTR(-ENOMEM);
- err = proc_alloc_inum(&ns->proc_inum);
+ err = ns_alloc_inum(&ns->ns);
if (err) {
kfree(ns);
return ERR_PTR(err);
}
+ ns->ns.ops = &ipcns_operations;
atomic_set(&ns->count, 1);
err = mq_init_ns(ns);
if (err) {
- proc_free_inum(ns->proc_inum);
+ ns_free_inum(&ns->ns);
kfree(ns);
return ERR_PTR(err);
}
@@ -97,7 +98,7 @@ static void free_ipc_ns(struct ipc_namespace *ns)
atomic_dec(&nr_ipc_ns);
put_user_ns(ns->user_ns);
- proc_free_inum(ns->proc_inum);
+ ns_free_inum(&ns->ns);
kfree(ns);
}
@@ -127,7 +128,12 @@ void put_ipc_ns(struct ipc_namespace *ns)
}
}
-static void *ipcns_get(struct task_struct *task)
+static inline struct ipc_namespace *to_ipc_ns(struct ns_common *ns)
+{
+ return container_of(ns, struct ipc_namespace, ns);
+}
+
+static struct ns_common *ipcns_get(struct task_struct *task)
{
struct ipc_namespace *ns = NULL;
struct nsproxy *nsproxy;
@@ -138,17 +144,17 @@ static void *ipcns_get(struct task_struct *task)
ns = get_ipc_ns(nsproxy->ipc_ns);
task_unlock(task);
- return ns;
+ return ns ? &ns->ns : NULL;
}
-static void ipcns_put(void *ns)
+static void ipcns_put(struct ns_common *ns)
{
- return put_ipc_ns(ns);
+ return put_ipc_ns(to_ipc_ns(ns));
}
-static int ipcns_install(struct nsproxy *nsproxy, void *new)
+static int ipcns_install(struct nsproxy *nsproxy, struct ns_common *new)
{
- struct ipc_namespace *ns = new;
+ struct ipc_namespace *ns = to_ipc_ns(new);
if (!ns_capable(ns->user_ns, CAP_SYS_ADMIN) ||
!ns_capable(current_user_ns(), CAP_SYS_ADMIN))
return -EPERM;
@@ -160,18 +166,10 @@ static int ipcns_install(struct nsproxy *nsproxy, void *new)
return 0;
}
-static unsigned int ipcns_inum(void *vp)
-{
- struct ipc_namespace *ns = vp;
-
- return ns->proc_inum;
-}
-
const struct proc_ns_operations ipcns_operations = {
.name = "ipc",
.type = CLONE_NEWIPC,
.get = ipcns_get,
.put = ipcns_put,
.install = ipcns_install,
- .inum = ipcns_inum,
};
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 113b837470c..4c1ee7f2beb 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -7477,11 +7477,11 @@ SYSCALL_DEFINE5(perf_event_open,
if (move_group) {
synchronize_rcu();
- perf_install_in_context(ctx, group_leader, event->cpu);
+ perf_install_in_context(ctx, group_leader, group_leader->cpu);
get_ctx(ctx);
list_for_each_entry(sibling, &group_leader->sibling_list,
group_entry) {
- perf_install_in_context(ctx, sibling, event->cpu);
+ perf_install_in_context(ctx, sibling, sibling->cpu);
get_ctx(ctx);
}
}
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 995a95f61a1..cb346f26a22 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -193,7 +193,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
}
flush_cache_page(vma, addr, pte_pfn(*ptep));
- ptep_clear_flush(vma, addr, ptep);
+ ptep_clear_flush_notify(vma, addr, ptep);
set_pte_at_notify(mm, addr, ptep, mk_pte(kpage, vma->vm_page_prot));
page_remove_rmap(page);
diff --git a/kernel/groups.c b/kernel/groups.c
index 451698f86cf..664411f171b 100644
--- a/kernel/groups.c
+++ b/kernel/groups.c
@@ -6,6 +6,7 @@
#include <linux/slab.h>
#include <linux/security.h>
#include <linux/syscalls.h>
+#include <linux/user_namespace.h>
#include <asm/uaccess.h>
/* init to 2 - one for init_task, one to ensure it is never freed */
@@ -213,6 +214,14 @@ out:
return i;
}
+bool may_setgroups(void)
+{
+ struct user_namespace *user_ns = current_user_ns();
+
+ return ns_capable(user_ns, CAP_SETGID) &&
+ userns_may_setgroups(user_ns);
+}
+
/*
* SMP: Our groups are copy-on-write. We can set them safely
* without another task interfering.
@@ -223,7 +232,7 @@ SYSCALL_DEFINE2(setgroups, int, gidsetsize, gid_t __user *, grouplist)
struct group_info *group_info;
int retval;
- if (!ns_capable(current_user_ns(), CAP_SETGID))
+ if (!may_setgroups())
return -EPERM;
if ((unsigned)gidsetsize > NGROUPS_MAX)
return -EINVAL;
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index 4332d766619..df553b0af93 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -78,8 +78,12 @@ extern void unmask_threaded_irq(struct irq_desc *desc);
#ifdef CONFIG_SPARSE_IRQ
static inline void irq_mark_irq(unsigned int irq) { }
+extern void irq_lock_sparse(void);
+extern void irq_unlock_sparse(void);
#else
extern void irq_mark_irq(unsigned int irq);
+static inline void irq_lock_sparse(void) { }
+static inline void irq_unlock_sparse(void) { }
#endif
extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr);
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index a1782f88f0a..99793b9b6d2 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -132,6 +132,16 @@ static void free_masks(struct irq_desc *desc)
static inline void free_masks(struct irq_desc *desc) { }
#endif
+void irq_lock_sparse(void)
+{
+ mutex_lock(&sparse_irq_lock);
+}
+
+void irq_unlock_sparse(void)
+{
+ mutex_unlock(&sparse_irq_lock);
+}
+
static struct irq_desc *alloc_desc(int irq, int node, struct module *owner)
{
struct irq_desc *desc;
@@ -168,6 +178,12 @@ static void free_desc(unsigned int irq)
unregister_irq_proc(irq, desc);
+ /*
+ * sparse_irq_lock protects also show_interrupts() and
+ * kstat_irq_usr(). Once we deleted the descriptor from the
+ * sparse tree we can free it. Access in proc will fail to
+ * lookup the descriptor.
+ */
mutex_lock(&sparse_irq_lock);
delete_irq_desc(irq);
mutex_unlock(&sparse_irq_lock);
@@ -574,6 +590,15 @@ void kstat_incr_irq_this_cpu(unsigned int irq)
kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
}
+/**
+ * kstat_irqs_cpu - Get the statistics for an interrupt on a cpu
+ * @irq: The interrupt number
+ * @cpu: The cpu number
+ *
+ * Returns the sum of interrupt counts on @cpu since boot for
+ * @irq. The caller must ensure that the interrupt is not removed
+ * concurrently.
+ */
unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
{
struct irq_desc *desc = irq_to_desc(irq);
@@ -582,6 +607,14 @@ unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
*per_cpu_ptr(desc->kstat_irqs, cpu) : 0;
}
+/**
+ * kstat_irqs - Get the statistics for an interrupt
+ * @irq: The interrupt number
+ *
+ * Returns the sum of interrupt counts on all cpus since boot for
+ * @irq. The caller must ensure that the interrupt is not removed
+ * concurrently.
+ */
unsigned int kstat_irqs(unsigned int irq)
{
struct irq_desc *desc = irq_to_desc(irq);
@@ -594,3 +627,22 @@ unsigned int kstat_irqs(unsigned int irq)
sum += *per_cpu_ptr(desc->kstat_irqs, cpu);
return sum;
}
+
+/**
+ * kstat_irqs_usr - Get the statistics for an interrupt
+ * @irq: The interrupt number
+ *
+ * Returns the sum of interrupt counts on all cpus since boot for
+ * @irq. Contrary to kstat_irqs() this can be called from any
+ * preemptible context. It's protected against concurrent removal of
+ * an interrupt descriptor when sparse irqs are enabled.
+ */
+unsigned int kstat_irqs_usr(unsigned int irq)
+{
+ int sum;
+
+ irq_lock_sparse();
+ sum = kstat_irqs(irq);
+ irq_unlock_sparse();
+ return sum;
+}
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index ac1ba2f1103..9dc9bfd8a67 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -15,6 +15,23 @@
#include "internals.h"
+/*
+ * Access rules:
+ *
+ * procfs protects read/write of /proc/irq/N/ files against a
+ * concurrent free of the interrupt descriptor. remove_proc_entry()
+ * immediately prevents new read/writes to happen and waits for
+ * already running read/write functions to complete.
+ *
+ * We remove the proc entries first and then delete the interrupt
+ * descriptor from the radix tree and free it. So it is guaranteed
+ * that irq_to_desc(N) is valid as long as the read/writes are
+ * permitted by procfs.
+ *
+ * The read from /proc/interrupts is a different problem because there
+ * is no protection. So the lookup and the access to irqdesc
+ * information must be protected by sparse_irq_lock.
+ */
static struct proc_dir_entry *root_irq_dir;
#ifdef CONFIG_SMP
@@ -437,9 +454,10 @@ int show_interrupts(struct seq_file *p, void *v)
seq_putc(p, '\n');
}
+ irq_lock_sparse();
desc = irq_to_desc(i);
if (!desc)
- return 0;
+ goto outsparse;
raw_spin_lock_irqsave(&desc->lock, flags);
for_each_online_cpu(j)
@@ -479,6 +497,8 @@ int show_interrupts(struct seq_file *p, void *v)
seq_putc(p, '\n');
out:
raw_spin_unlock_irqrestore(&desc->lock, flags);
+outsparse:
+ irq_unlock_sparse();
return 0;
}
#endif
diff --git a/kernel/module.c b/kernel/module.c
index e52a8739361..3965511ae13 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -42,7 +42,6 @@
#include <linux/vermagic.h>
#include <linux/notifier.h>
#include <linux/sched.h>
-#include <linux/stop_machine.h>
#include <linux/device.h>
#include <linux/string.h>
#include <linux/mutex.h>
@@ -98,7 +97,7 @@
* 1) List of modules (also safely readable with preempt_disable),
* 2) module_use links,
* 3) module_addr_min/module_addr_max.
- * (delete uses stop_machine/add uses RCU list operations). */
+ * (delete and add uses RCU list operations). */
DEFINE_MUTEX(module_mutex);
EXPORT_SYMBOL_GPL(module_mutex);
static LIST_HEAD(modules);
@@ -158,13 +157,13 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
* Protected by module_mutex. */
static unsigned long module_addr_min = -1UL, module_addr_max = 0;
-int register_module_notifier(struct notifier_block * nb)
+int register_module_notifier(struct notifier_block *nb)
{
return blocking_notifier_chain_register(&module_notify_list, nb);
}
EXPORT_SYMBOL(register_module_notifier);
-int unregister_module_notifier(struct notifier_block * nb)
+int unregister_module_notifier(struct notifier_block *nb)
{
return blocking_notifier_chain_unregister(&module_notify_list, nb);
}
@@ -628,18 +627,23 @@ static char last_unloaded_module[MODULE_NAME_LEN+1];
EXPORT_TRACEPOINT_SYMBOL(module_get);
+/* MODULE_REF_BASE is the base reference count by kmodule loader. */
+#define MODULE_REF_BASE 1
+
/* Init the unload section of the module. */
static int module_unload_init(struct module *mod)
{
- mod->refptr = alloc_percpu(struct module_ref);
- if (!mod->refptr)
- return -ENOMEM;
+ /*
+ * Initialize reference counter to MODULE_REF_BASE.
+ * refcnt == 0 means module is going.
+ */
+ atomic_set(&mod->refcnt, MODULE_REF_BASE);
INIT_LIST_HEAD(&mod->source_list);
INIT_LIST_HEAD(&mod->target_list);
/* Hold reference count during initialization. */
- raw_cpu_write(mod->refptr->incs, 1);
+ atomic_inc(&mod->refcnt);
return 0;
}
@@ -721,8 +725,6 @@ static void module_unload_free(struct module *mod)
kfree(use);
}
mutex_unlock(&module_mutex);
-
- free_percpu(mod->refptr);
}
#ifdef CONFIG_MODULE_FORCE_UNLOAD
@@ -740,60 +742,39 @@ static inline int try_force_unload(unsigned int flags)
}
#endif /* CONFIG_MODULE_FORCE_UNLOAD */
-struct stopref
+/* Try to release refcount of module, 0 means success. */
+static int try_release_module_ref(struct module *mod)
{
- struct module *mod;
- int flags;
- int *forced;
-};
+ int ret;
-/* Whole machine is stopped with interrupts off when this runs. */
-static int __try_stop_module(void *_sref)
-{
- struct stopref *sref = _sref;
+ /* Try to decrement refcnt which we set at loading */
+ ret = atomic_sub_return(MODULE_REF_BASE, &mod->refcnt);
+ BUG_ON(ret < 0);
+ if (ret)
+ /* Someone can put this right now, recover with checking */
+ ret = atomic_add_unless(&mod->refcnt, MODULE_REF_BASE, 0);
+
+ return ret;
+}
+static int try_stop_module(struct module *mod, int flags, int *forced)
+{
/* If it's not unused, quit unless we're forcing. */
- if (module_refcount(sref->mod) != 0) {
- if (!(*sref->forced = try_force_unload(sref->flags)))
+ if (try_release_module_ref(mod) != 0) {
+ *forced = try_force_unload(flags);
+ if (!(*forced))
return -EWOULDBLOCK;
}
/* Mark it as dying. */
- sref->mod->state = MODULE_STATE_GOING;
- return 0;
-}
-
-static int try_stop_module(struct module *mod, int flags, int *forced)
-{
- struct stopref sref = { mod, flags, forced };
+ mod->state = MODULE_STATE_GOING;
- return stop_machine(__try_stop_module, &sref, NULL);
+ return 0;
}
unsigned long module_refcount(struct module *mod)
{
- unsigned long incs = 0, decs = 0;
- int cpu;
-
- for_each_possible_cpu(cpu)
- decs += per_cpu_ptr(mod->refptr, cpu)->decs;
- /*
- * ensure the incs are added up after the decs.
- * module_put ensures incs are visible before decs with smp_wmb.
- *
- * This 2-count scheme avoids the situation where the refcount
- * for CPU0 is read, then CPU0 increments the module refcount,
- * then CPU1 drops that refcount, then the refcount for CPU1 is
- * read. We would record a decrement but not its corresponding
- * increment so we would see a low count (disaster).
- *
- * Rare situation? But module_refcount can be preempted, and we
- * might be tallying up 4096+ CPUs. So it is not impossible.
- */
- smp_rmb();
- for_each_possible_cpu(cpu)
- incs += per_cpu_ptr(mod->refptr, cpu)->incs;
- return incs - decs;
+ return (unsigned long)atomic_read(&mod->refcnt) - MODULE_REF_BASE;
}
EXPORT_SYMBOL(module_refcount);
@@ -877,8 +858,10 @@ static inline void print_unload_info(struct seq_file *m, struct module *mod)
seq_printf(m, " %lu ", module_refcount(mod));
- /* Always include a trailing , so userspace can differentiate
- between this and the old multi-field proc format. */
+ /*
+ * Always include a trailing , so userspace can differentiate
+ * between this and the old multi-field proc format.
+ */
list_for_each_entry(use, &mod->source_list, source_list) {
printed_something = 1;
seq_printf(m, "%s,", use->source->name);
@@ -886,11 +869,11 @@ static inline void print_unload_info(struct seq_file *m, struct module *mod)
if (mod->init != NULL && mod->exit == NULL) {
printed_something = 1;
- seq_printf(m, "[permanent],");
+ seq_puts(m, "[permanent],");
}
if (!printed_something)
- seq_printf(m, "-");
+ seq_puts(m, "-");
}
void __symbol_put(const char *symbol)
@@ -935,7 +918,7 @@ void __module_get(struct module *module)
{
if (module) {
preempt_disable();
- __this_cpu_inc(module->refptr->incs);
+ atomic_inc(&module->refcnt);
trace_module_get(module, _RET_IP_);
preempt_enable();
}
@@ -948,11 +931,11 @@ bool try_module_get(struct module *module)
if (module) {
preempt_disable();
-
- if (likely(module_is_live(module))) {
- __this_cpu_inc(module->refptr->incs);
+ /* Note: here, we can fail to get a reference */
+ if (likely(module_is_live(module) &&
+ atomic_inc_not_zero(&module->refcnt) != 0))
trace_module_get(module, _RET_IP_);
- } else
+ else
ret = false;
preempt_enable();
@@ -963,11 +946,12 @@ EXPORT_SYMBOL(try_module_get);
void module_put(struct module *module)
{
+ int ret;
+
if (module) {
preempt_disable();
- smp_wmb(); /* see comment in module_refcount */
- __this_cpu_inc(module->refptr->decs);
-
+ ret = atomic_dec_if_positive(&module->refcnt);
+ WARN_ON(ret < 0); /* Failed to put refcount */
trace_module_put(module, _RET_IP_);
preempt_enable();
}
@@ -978,7 +962,7 @@ EXPORT_SYMBOL(module_put);
static inline void print_unload_info(struct seq_file *m, struct module *mod)
{
/* We don't know the usage count, or what modules are using. */
- seq_printf(m, " - -");
+ seq_puts(m, " - -");
}
static inline void module_unload_free(struct module *mod)
@@ -1131,7 +1115,7 @@ static unsigned long maybe_relocated(unsigned long crc,
static int check_version(Elf_Shdr *sechdrs,
unsigned int versindex,
const char *symname,
- struct module *mod,
+ struct module *mod,
const unsigned long *crc,
const struct module *crc_owner)
{
@@ -1165,7 +1149,7 @@ static int check_version(Elf_Shdr *sechdrs,
return 0;
bad_version:
- printk("%s: disagrees about version of symbol %s\n",
+ pr_warn("%s: disagrees about version of symbol %s\n",
mod->name, symname);
return 0;
}
@@ -1200,7 +1184,7 @@ static inline int same_magic(const char *amagic, const char *bmagic,
static inline int check_version(Elf_Shdr *sechdrs,
unsigned int versindex,
const char *symname,
- struct module *mod,
+ struct module *mod,
const unsigned long *crc,
const struct module *crc_owner)
{
@@ -1288,15 +1272,13 @@ static inline bool sect_empty(const Elf_Shdr *sect)
return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
}
-struct module_sect_attr
-{
+struct module_sect_attr {
struct module_attribute mattr;
char *name;
unsigned long address;
};
-struct module_sect_attrs
-{
+struct module_sect_attrs {
struct attribute_group grp;
unsigned int nsections;
struct module_sect_attr attrs[0];
@@ -1550,7 +1532,8 @@ static int module_add_modinfo_attrs(struct module *mod)
(attr->test && attr->test(mod))) {
memcpy(temp_attr, attr, sizeof(*temp_attr));
sysfs_attr_init(&temp_attr->attr);
- error = sysfs_create_file(&mod->mkobj.kobj,&temp_attr->attr);
+ error = sysfs_create_file(&mod->mkobj.kobj,
+ &temp_attr->attr);
++temp_attr;
}
}
@@ -1566,7 +1549,7 @@ static void module_remove_modinfo_attrs(struct module *mod)
/* pick a field to test for end of list */
if (!attr->attr.name)
break;
- sysfs_remove_file(&mod->mkobj.kobj,&attr->attr);
+ sysfs_remove_file(&mod->mkobj.kobj, &attr->attr);
if (attr->free)
attr->free(mod);
}
@@ -1697,18 +1680,6 @@ static void mod_sysfs_teardown(struct module *mod)
mod_sysfs_fini(mod);
}
-/*
- * unlink the module with the whole machine is stopped with interrupts off
- * - this defends against kallsyms not taking locks
- */
-static int __unlink_module(void *_mod)
-{
- struct module *mod = _mod;
- list_del(&mod->list);
- module_bug_cleanup(mod);
- return 0;
-}
-
#ifdef CONFIG_DEBUG_SET_MODULE_RONX
/*
* LKM RO/NX protection: protect module's text/ro-data
@@ -1860,7 +1831,12 @@ static void free_module(struct module *mod)
/* Now we can delete it from the lists */
mutex_lock(&module_mutex);
- stop_machine(__unlink_module, mod, NULL);
+ /* Unlink carefully: kallsyms could be walking list. */
+ list_del_rcu(&mod->list);
+ /* Remove this module from bug list, this uses list_del_rcu */
+ module_bug_cleanup(mod);
+ /* Wait for RCU synchronizing before releasing mod->list and buglist. */
+ synchronize_rcu();
mutex_unlock(&module_mutex);
/* This may be NULL, but that's OK */
@@ -1955,7 +1931,7 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
/* We compiled with -fno-common. These are not
supposed to happen. */
pr_debug("Common symbol: %s\n", name);
- printk("%s: please compile with -fno-common\n",
+ pr_warn("%s: please compile with -fno-common\n",
mod->name);
ret = -ENOEXEC;
break;
@@ -2259,7 +2235,7 @@ static char elf_type(const Elf_Sym *sym, const struct load_info *info)
}
static bool is_core_symbol(const Elf_Sym *src, const Elf_Shdr *sechdrs,
- unsigned int shnum)
+ unsigned int shnum)
{
const Elf_Shdr *sec;
@@ -2735,7 +2711,7 @@ static int find_module_sections(struct module *mod, struct load_info *info)
* This shouldn't happen with same compiler and binutils
* building all parts of the module.
*/
- printk(KERN_WARNING "%s: has both .ctors and .init_array.\n",
+ pr_warn("%s: has both .ctors and .init_array.\n",
mod->name);
return -EINVAL;
}
@@ -3023,8 +2999,10 @@ static int do_init_module(struct module *mod)
if (mod->init != NULL)
ret = do_one_initcall(mod->init);
if (ret < 0) {
- /* Init routine failed: abort. Try to protect us from
- buggy refcounters. */
+ /*
+ * Init routine failed: abort. Try to protect us from
+ * buggy refcounters.
+ */
mod->state = MODULE_STATE_GOING;
synchronize_sched();
module_put(mod);
@@ -3202,7 +3180,7 @@ out:
static int unknown_module_param_cb(char *param, char *val, const char *modname)
{
- /* Check for magic 'dyndbg' arg */
+ /* Check for magic 'dyndbg' arg */
int ret = ddebug_dyndbg_module_param_cb(param, val, modname);
if (ret != 0)
pr_warn("%s: unknown parameter '%s' ignored\n", modname, param);
@@ -3352,6 +3330,8 @@ static int load_module(struct load_info *info, const char __user *uargs,
/* Unlink carefully: kallsyms could be walking list. */
list_del_rcu(&mod->list);
wake_up_all(&module_wq);
+ /* Wait for RCU synchronizing before releasing mod->list. */
+ synchronize_rcu();
mutex_unlock(&module_mutex);
free_module:
module_deallocate(mod, info);
@@ -3685,8 +3665,8 @@ static int m_show(struct seq_file *m, void *p)
/* Informative for users. */
seq_printf(m, " %s",
- mod->state == MODULE_STATE_GOING ? "Unloading":
- mod->state == MODULE_STATE_COMING ? "Loading":
+ mod->state == MODULE_STATE_GOING ? "Unloading" :
+ mod->state == MODULE_STATE_COMING ? "Loading" :
"Live");
/* Used by oprofile and other similar tools. */
seq_printf(m, " 0x%pK", mod->module_core);
@@ -3695,7 +3675,7 @@ static int m_show(struct seq_file *m, void *p)
if (mod->taints)
seq_printf(m, " %s", module_flags(mod, buf));
- seq_printf(m, "\n");
+ seq_puts(m, "\n");
return 0;
}
diff --git a/kernel/nsproxy.c b/kernel/nsproxy.c
index ef42d0ab311..49746c81ad8 100644
--- a/kernel/nsproxy.c
+++ b/kernel/nsproxy.c
@@ -220,11 +220,10 @@ void exit_task_namespaces(struct task_struct *p)
SYSCALL_DEFINE2(setns, int, fd, int, nstype)
{
- const struct proc_ns_operations *ops;
struct task_struct *tsk = current;
struct nsproxy *new_nsproxy;
- struct proc_ns *ei;
struct file *file;
+ struct ns_common *ns;
int err;
file = proc_ns_fget(fd);
@@ -232,9 +231,8 @@ SYSCALL_DEFINE2(setns, int, fd, int, nstype)
return PTR_ERR(file);
err = -EINVAL;
- ei = get_proc_ns(file_inode(file));
- ops = ei->ns_ops;
- if (nstype && (ops->type != nstype))
+ ns = get_proc_ns(file_inode(file));
+ if (nstype && (ns->ops->type != nstype))
goto out;
new_nsproxy = create_new_namespaces(0, tsk, current_user_ns(), tsk->fs);
@@ -243,7 +241,7 @@ SYSCALL_DEFINE2(setns, int, fd, int, nstype)
goto out;
}
- err = ops->install(new_nsproxy, ei->ns);
+ err = ns->ops->install(new_nsproxy, ns);
if (err) {
free_nsproxy(new_nsproxy);
goto out;
diff --git a/kernel/params.c b/kernel/params.c
index db97b791390..0af9b2c4e56 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -603,74 +603,67 @@ static __modinit int add_sysfs_param(struct module_kobject *mk,
const struct kernel_param *kp,
const char *name)
{
- struct module_param_attrs *new;
- struct attribute **attrs;
- int err, num;
+ struct module_param_attrs *new_mp;
+ struct attribute **new_attrs;
+ unsigned int i;
/* We don't bother calling this with invisible parameters. */
BUG_ON(!kp->perm);
if (!mk->mp) {
- num = 0;
- attrs = NULL;
- } else {
- num = mk->mp->num;
- attrs = mk->mp->grp.attrs;
+ /* First allocation. */
+ mk->mp = kzalloc(sizeof(*mk->mp), GFP_KERNEL);
+ if (!mk->mp)
+ return -ENOMEM;
+ mk->mp->grp.name = "parameters";
+ /* NULL-terminated attribute array. */
+ mk->mp->grp.attrs = kzalloc(sizeof(mk->mp->grp.attrs[0]),
+ GFP_KERNEL);
+ /* Caller will cleanup via free_module_param_attrs */
+ if (!mk->mp->grp.attrs)
+ return -ENOMEM;
}
- /* Enlarge. */
- new = krealloc(mk->mp,
- sizeof(*mk->mp) + sizeof(mk->mp->attrs[0]) * (num+1),
- GFP_KERNEL);
- if (!new) {
- kfree(attrs);
- err = -ENOMEM;
- goto fail;
- }
- /* Despite looking like the typical realloc() bug, this is safe.
- * We *want* the old 'attrs' to be freed either way, and we'll store
- * the new one in the success case. */
- attrs = krealloc(attrs, sizeof(new->grp.attrs[0])*(num+2), GFP_KERNEL);
- if (!attrs) {
- err = -ENOMEM;
- goto fail_free_new;
- }
+ /* Enlarge allocations. */
+ new_mp = krealloc(mk->mp,
+ sizeof(*mk->mp) +
+ sizeof(mk->mp->attrs[0]) * (mk->mp->num + 1),
+ GFP_KERNEL);
+ if (!new_mp)
+ return -ENOMEM;
+ mk->mp = new_mp;
- /* Sysfs wants everything zeroed. */
- memset(new, 0, sizeof(*new));
- memset(&new->attrs[num], 0, sizeof(new->attrs[num]));
- memset(&attrs[num], 0, sizeof(attrs[num]));
- new->grp.name = "parameters";
- new->grp.attrs = attrs;
+ /* Extra pointer for NULL terminator */
+ new_attrs = krealloc(mk->mp->grp.attrs,
+ sizeof(mk->mp->grp.attrs[0]) * (mk->mp->num + 2),
+ GFP_KERNEL);
+ if (!new_attrs)
+ return -ENOMEM;
+ mk->mp->grp.attrs = new_attrs;
/* Tack new one on the end. */
- sysfs_attr_init(&new->attrs[num].mattr.attr);
- new->attrs[num].param = kp;
- new->attrs[num].mattr.show = param_attr_show;
- new->attrs[num].mattr.store = param_attr_store;
- new->attrs[num].mattr.attr.name = (char *)name;
- new->attrs[num].mattr.attr.mode = kp->perm;
- new->num = num+1;
+ sysfs_attr_init(&mk->mp->attrs[mk->mp->num].mattr.attr);
+ mk->mp->attrs[mk->mp->num].param = kp;
+ mk->mp->attrs[mk->mp->num].mattr.show = param_attr_show;
+ /* Do not allow runtime DAC changes to make param writable. */
+ if ((kp->perm & (S_IWUSR | S_IWGRP | S_IWOTH)) != 0)
+ mk->mp->attrs[mk->mp->num].mattr.store = param_attr_store;
+ mk->mp->attrs[mk->mp->num].mattr.attr.name = (char *)name;
+ mk->mp->attrs[mk->mp->num].mattr.attr.mode = kp->perm;
+ mk->mp->num++;
/* Fix up all the pointers, since krealloc can move us */
- for (num = 0; num < new->num; num++)
- new->grp.attrs[num] = &new->attrs[num].mattr.attr;
- new->grp.attrs[num] = NULL;
-
- mk->mp = new;
+ for (i = 0; i < mk->mp->num; i++)
+ mk->mp->grp.attrs[i] = &mk->mp->attrs[i].mattr.attr;
+ mk->mp->grp.attrs[mk->mp->num] = NULL;
return 0;
-
-fail_free_new:
- kfree(new);
-fail:
- mk->mp = NULL;
- return err;
}
#ifdef CONFIG_MODULES
static void free_module_param_attrs(struct module_kobject *mk)
{
- kfree(mk->mp->grp.attrs);
+ if (mk->mp)
+ kfree(mk->mp->grp.attrs);
kfree(mk->mp);
mk->mp = NULL;
}
@@ -695,8 +688,10 @@ int module_param_sysfs_setup(struct module *mod,
if (kparam[i].perm == 0)
continue;
err = add_sysfs_param(&mod->mkobj, &kparam[i], kparam[i].name);
- if (err)
+ if (err) {
+ free_module_param_attrs(&mod->mkobj);
return err;
+ }
params = true;
}
diff --git a/kernel/pid.c b/kernel/pid.c
index 82430c858d6..cd36a5e0d17 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -79,7 +79,10 @@ struct pid_namespace init_pid_ns = {
.level = 0,
.child_reaper = &init_task,
.user_ns = &init_user_ns,
- .proc_inum = PROC_PID_INIT_INO,
+ .ns.inum = PROC_PID_INIT_INO,
+#ifdef CONFIG_PID_NS
+ .ns.ops = &pidns_operations,
+#endif
};
EXPORT_SYMBOL_GPL(init_pid_ns);
diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
index bc6d6a89b6e..a65ba137fd1 100644
--- a/kernel/pid_namespace.c
+++ b/kernel/pid_namespace.c
@@ -105,9 +105,10 @@ static struct pid_namespace *create_pid_namespace(struct user_namespace *user_ns
if (ns->pid_cachep == NULL)
goto out_free_map;
- err = proc_alloc_inum(&ns->proc_inum);
+ err = ns_alloc_inum(&ns->ns);
if (err)
goto out_free_map;
+ ns->ns.ops = &pidns_operations;
kref_init(&ns->kref);
ns->level = level;
@@ -142,7 +143,7 @@ static void destroy_pid_namespace(struct pid_namespace *ns)
{
int i;
- proc_free_inum(ns->proc_inum);
+ ns_free_inum(&ns->ns);
for (i = 0; i < PIDMAP_ENTRIES; i++)
kfree(ns->pidmap[i].page);
put_user_ns(ns->user_ns);
@@ -333,7 +334,12 @@ int reboot_pid_ns(struct pid_namespace *pid_ns, int cmd)
return 0;
}
-static void *pidns_get(struct task_struct *task)
+static inline struct pid_namespace *to_pid_ns(struct ns_common *ns)
+{
+ return container_of(ns, struct pid_namespace, ns);
+}
+
+static struct ns_common *pidns_get(struct task_struct *task)
{
struct pid_namespace *ns;
@@ -343,18 +349,18 @@ static void *pidns_get(struct task_struct *task)
get_pid_ns(ns);
rcu_read_unlock();
- return ns;
+ return ns ? &ns->ns : NULL;
}
-static void pidns_put(void *ns)
+static void pidns_put(struct ns_common *ns)
{
- put_pid_ns(ns);
+ put_pid_ns(to_pid_ns(ns));
}
-static int pidns_install(struct nsproxy *nsproxy, void *ns)
+static int pidns_install(struct nsproxy *nsproxy, struct ns_common *ns)
{
struct pid_namespace *active = task_active_pid_ns(current);
- struct pid_namespace *ancestor, *new = ns;
+ struct pid_namespace *ancestor, *new = to_pid_ns(ns);
if (!ns_capable(new->user_ns, CAP_SYS_ADMIN) ||
!ns_capable(current_user_ns(), CAP_SYS_ADMIN))
@@ -382,19 +388,12 @@ static int pidns_install(struct nsproxy *nsproxy, void *ns)
return 0;
}
-static unsigned int pidns_inum(void *ns)
-{
- struct pid_namespace *pid_ns = ns;
- return pid_ns->proc_inum;
-}
-
const struct proc_ns_operations pidns_operations = {
.name = "pid",
.type = CLONE_NEWPID,
.get = pidns_get,
.put = pidns_put,
.install = pidns_install,
- .inum = pidns_inum,
};
static __init int pid_namespaces_init(void)
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index 6e7708c2c21..48b28d387c7 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -94,7 +94,7 @@ config PM_STD_PARTITION
config PM_SLEEP
def_bool y
depends on SUSPEND || HIBERNATE_CALLBACKS
- select PM_RUNTIME
+ select PM
config PM_SLEEP_SMP
def_bool y
@@ -130,23 +130,19 @@ config PM_WAKELOCKS_GC
depends on PM_WAKELOCKS
default y
-config PM_RUNTIME
- bool "Run-time PM core functionality"
+config PM
+ bool "Device power management core functionality"
---help---
Enable functionality allowing I/O devices to be put into energy-saving
- (low power) states at run time (or autosuspended) after a specified
- period of inactivity and woken up in response to a hardware-generated
+ (low power) states, for example after a specified period of inactivity
+ (autosuspended), and woken up in response to a hardware-generated
wake-up event or a driver's request.
Hardware support is generally required for this functionality to work
and the bus type drivers of the buses the devices are on are
- responsible for the actual handling of the autosuspend requests and
+ responsible for the actual handling of device suspend requests and
wake-up events.
-config PM
- def_bool y
- depends on PM_SLEEP || PM_RUNTIME
-
config PM_DEBUG
bool "Power Management Debug Support"
depends on PM
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 7c54ff79afd..137c7f69b26 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -623,6 +623,13 @@ static struct ctl_table kern_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec,
},
+ {
+ .procname = "tracepoint_printk",
+ .data = &tracepoint_printk,
+ .maxlen = sizeof(tracepoint_printk),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
#endif
#ifdef CONFIG_KEXEC
{
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 4d54b754058..1363d58f07e 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -847,7 +847,6 @@ void tick_nohz_idle_enter(void)
local_irq_enable();
}
-EXPORT_SYMBOL_GPL(tick_nohz_idle_enter);
/**
* tick_nohz_irq_exit - update next tick event from interrupt exit
@@ -974,7 +973,6 @@ void tick_nohz_idle_exit(void)
local_irq_enable();
}
-EXPORT_SYMBOL_GPL(tick_nohz_idle_exit);
static int tick_nohz_reprogram(struct tick_sched *ts, ktime_t now)
{
diff --git a/kernel/time/time.c b/kernel/time/time.c
index 65015ff2f07..6390517e77d 100644
--- a/kernel/time/time.c
+++ b/kernel/time/time.c
@@ -741,6 +741,7 @@ u64 nsecs_to_jiffies64(u64 n)
return div_u64(n * 9, (9ull * NSEC_PER_SEC + HZ / 2) / HZ);
#endif
}
+EXPORT_SYMBOL(nsecs_to_jiffies64);
/**
* nsecs_to_jiffies - Convert nsecs in u64 to jiffies
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index 67d6369ddf8..979ccde2672 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -55,7 +55,7 @@ obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o
obj-$(CONFIG_EVENT_TRACING) += trace_events_trigger.o
obj-$(CONFIG_KPROBE_EVENT) += trace_kprobe.o
obj-$(CONFIG_TRACEPOINTS) += power-traces.o
-ifeq ($(CONFIG_PM_RUNTIME),y)
+ifeq ($(CONFIG_PM),y)
obj-$(CONFIG_TRACEPOINTS) += rpm-traces.o
endif
ifeq ($(CONFIG_TRACING),y)
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index ab76b7bcb36..2e767972e99 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -63,6 +63,10 @@ static bool __read_mostly tracing_selftest_running;
*/
bool __read_mostly tracing_selftest_disabled;
+/* Pipe tracepoints to printk */
+struct trace_iterator *tracepoint_print_iter;
+int tracepoint_printk;
+
/* For tracers that don't implement custom flags */
static struct tracer_opt dummy_tracer_opt[] = {
{ }
@@ -193,6 +197,13 @@ static int __init set_trace_boot_clock(char *str)
}
__setup("trace_clock=", set_trace_boot_clock);
+static int __init set_tracepoint_printk(char *str)
+{
+ if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
+ tracepoint_printk = 1;
+ return 1;
+}
+__setup("tp_printk", set_tracepoint_printk);
unsigned long long ns2usecs(cycle_t nsec)
{
@@ -6898,6 +6909,19 @@ out:
return ret;
}
+void __init trace_init(void)
+{
+ if (tracepoint_printk) {
+ tracepoint_print_iter =
+ kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
+ if (WARN_ON(!tracepoint_print_iter))
+ tracepoint_printk = 0;
+ }
+ tracer_alloc_buffers();
+ init_ftrace_syscalls();
+ trace_event_init();
+}
+
__init static int clear_boot_tracer(void)
{
/*
@@ -6917,6 +6941,5 @@ __init static int clear_boot_tracer(void)
return 0;
}
-early_initcall(tracer_alloc_buffers);
fs_initcall(tracer_init_debugfs);
late_initcall(clear_boot_tracer);
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 3255dfb054a..8de48bac1ce 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -1301,4 +1301,18 @@ int perf_ftrace_event_register(struct ftrace_event_call *call,
#define perf_ftrace_event_register NULL
#endif
+#ifdef CONFIG_FTRACE_SYSCALLS
+void init_ftrace_syscalls(void);
+#else
+static inline void init_ftrace_syscalls(void) { }
+#endif
+
+#ifdef CONFIG_EVENT_TRACING
+void trace_event_init(void);
+#else
+static inline void __init trace_event_init(void) { }
+#endif
+
+extern struct trace_iterator *tracepoint_print_iter;
+
#endif /* _LINUX_KERNEL_TRACE_H */
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index d0e4f92b5eb..366a78a3e61 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -212,8 +212,40 @@ void *ftrace_event_buffer_reserve(struct ftrace_event_buffer *fbuffer,
}
EXPORT_SYMBOL_GPL(ftrace_event_buffer_reserve);
+static DEFINE_SPINLOCK(tracepoint_iter_lock);
+
+static void output_printk(struct ftrace_event_buffer *fbuffer)
+{
+ struct ftrace_event_call *event_call;
+ struct trace_event *event;
+ unsigned long flags;
+ struct trace_iterator *iter = tracepoint_print_iter;
+
+ if (!iter)
+ return;
+
+ event_call = fbuffer->ftrace_file->event_call;
+ if (!event_call || !event_call->event.funcs ||
+ !event_call->event.funcs->trace)
+ return;
+
+ event = &fbuffer->ftrace_file->event_call->event;
+
+ spin_lock_irqsave(&tracepoint_iter_lock, flags);
+ trace_seq_init(&iter->seq);
+ iter->ent = fbuffer->entry;
+ event_call->event.funcs->trace(iter, 0, event);
+ trace_seq_putc(&iter->seq, 0);
+ printk("%s", iter->seq.buffer);
+
+ spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
+}
+
void ftrace_event_buffer_commit(struct ftrace_event_buffer *fbuffer)
{
+ if (tracepoint_printk)
+ output_printk(fbuffer);
+
event_trigger_unlock_commit(fbuffer->ftrace_file, fbuffer->buffer,
fbuffer->event, fbuffer->entry,
fbuffer->flags, fbuffer->pc);
@@ -2480,8 +2512,14 @@ static __init int event_trace_init(void)
#endif
return 0;
}
-early_initcall(event_trace_memsetup);
-core_initcall(event_trace_enable);
+
+void __init trace_event_init(void)
+{
+ event_trace_memsetup();
+ init_ftrace_syscalls();
+ event_trace_enable();
+}
+
fs_initcall(event_trace_init);
#ifdef CONFIG_FTRACE_STARTUP_TEST
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index dfe00a4f3f3..c6ee36fcbf9 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -514,7 +514,7 @@ unsigned long __init __weak arch_syscall_addr(int nr)
return (unsigned long)sys_call_table[nr];
}
-static int __init init_ftrace_syscalls(void)
+void __init init_ftrace_syscalls(void)
{
struct syscall_metadata *meta;
unsigned long addr;
@@ -524,7 +524,7 @@ static int __init init_ftrace_syscalls(void)
GFP_KERNEL);
if (!syscalls_metadata) {
WARN_ON(1);
- return -ENOMEM;
+ return;
}
for (i = 0; i < NR_syscalls; i++) {
@@ -536,10 +536,7 @@ static int __init init_ftrace_syscalls(void)
meta->syscall_nr = i;
syscalls_metadata[i] = meta;
}
-
- return 0;
}
-early_initcall(init_ftrace_syscalls);
#ifdef CONFIG_PERF_EVENTS
diff --git a/kernel/uid16.c b/kernel/uid16.c
index 602e5bbbcef..d58cc4d8f0d 100644
--- a/kernel/uid16.c
+++ b/kernel/uid16.c
@@ -176,7 +176,7 @@ SYSCALL_DEFINE2(setgroups16, int, gidsetsize, old_gid_t __user *, grouplist)
struct group_info *group_info;
int retval;
- if (!ns_capable(current_user_ns(), CAP_SETGID))
+ if (!may_setgroups())
return -EPERM;
if ((unsigned)gidsetsize > NGROUPS_MAX)
return -EINVAL;
diff --git a/kernel/user.c b/kernel/user.c
index 4efa39350e4..b069ccbfb0b 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -50,7 +50,11 @@ struct user_namespace init_user_ns = {
.count = ATOMIC_INIT(3),
.owner = GLOBAL_ROOT_UID,
.group = GLOBAL_ROOT_GID,
- .proc_inum = PROC_USER_INIT_INO,
+ .ns.inum = PROC_USER_INIT_INO,
+#ifdef CONFIG_USER_NS
+ .ns.ops = &userns_operations,
+#endif
+ .flags = USERNS_INIT_FLAGS,
#ifdef CONFIG_PERSISTENT_KEYRINGS
.persistent_keyring_register_sem =
__RWSEM_INITIALIZER(init_user_ns.persistent_keyring_register_sem),
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
index aa312b0dc3e..4109f832068 100644
--- a/kernel/user_namespace.c
+++ b/kernel/user_namespace.c
@@ -24,6 +24,7 @@
#include <linux/fs_struct.h>
static struct kmem_cache *user_ns_cachep __read_mostly;
+static DEFINE_MUTEX(userns_state_mutex);
static bool new_idmap_permitted(const struct file *file,
struct user_namespace *ns, int cap_setid,
@@ -86,11 +87,12 @@ int create_user_ns(struct cred *new)
if (!ns)
return -ENOMEM;
- ret = proc_alloc_inum(&ns->proc_inum);
+ ret = ns_alloc_inum(&ns->ns);
if (ret) {
kmem_cache_free(user_ns_cachep, ns);
return ret;
}
+ ns->ns.ops = &userns_operations;
atomic_set(&ns->count, 1);
/* Leave the new->user_ns reference with the new user namespace. */
@@ -99,6 +101,11 @@ int create_user_ns(struct cred *new)
ns->owner = owner;
ns->group = group;
+ /* Inherit USERNS_SETGROUPS_ALLOWED from our parent */
+ mutex_lock(&userns_state_mutex);
+ ns->flags = parent_ns->flags;
+ mutex_unlock(&userns_state_mutex);
+
set_cred_user_ns(new, ns);
#ifdef CONFIG_PERSISTENT_KEYRINGS
@@ -136,7 +143,7 @@ void free_user_ns(struct user_namespace *ns)
#ifdef CONFIG_PERSISTENT_KEYRINGS
key_put(ns->persistent_keyring_register);
#endif
- proc_free_inum(ns->proc_inum);
+ ns_free_inum(&ns->ns);
kmem_cache_free(user_ns_cachep, ns);
ns = parent;
} while (atomic_dec_and_test(&parent->count));
@@ -583,9 +590,6 @@ static bool mappings_overlap(struct uid_gid_map *new_map,
return false;
}
-
-static DEFINE_MUTEX(id_map_mutex);
-
static ssize_t map_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos,
int cap_setid,
@@ -602,7 +606,7 @@ static ssize_t map_write(struct file *file, const char __user *buf,
ssize_t ret = -EINVAL;
/*
- * The id_map_mutex serializes all writes to any given map.
+ * The userns_state_mutex serializes all writes to any given map.
*
* Any map is only ever written once.
*
@@ -620,7 +624,7 @@ static ssize_t map_write(struct file *file, const char __user *buf,
* order and smp_rmb() is guaranteed that we don't have crazy
* architectures returning stale data.
*/
- mutex_lock(&id_map_mutex);
+ mutex_lock(&userns_state_mutex);
ret = -EPERM;
/* Only allow one successful write to the map */
@@ -640,7 +644,7 @@ static ssize_t map_write(struct file *file, const char __user *buf,
if (!page)
goto out;
- /* Only allow <= page size writes at the beginning of the file */
+ /* Only allow < page size writes at the beginning of the file */
ret = -EINVAL;
if ((*ppos != 0) || (count >= PAGE_SIZE))
goto out;
@@ -750,7 +754,7 @@ static ssize_t map_write(struct file *file, const char __user *buf,
*ppos = count;
ret = count;
out:
- mutex_unlock(&id_map_mutex);
+ mutex_unlock(&userns_state_mutex);
if (page)
free_page(page);
return ret;
@@ -812,16 +816,21 @@ static bool new_idmap_permitted(const struct file *file,
struct user_namespace *ns, int cap_setid,
struct uid_gid_map *new_map)
{
- /* Allow mapping to your own filesystem ids */
- if ((new_map->nr_extents == 1) && (new_map->extent[0].count == 1)) {
+ const struct cred *cred = file->f_cred;
+ /* Don't allow mappings that would allow anything that wouldn't
+ * be allowed without the establishment of unprivileged mappings.
+ */
+ if ((new_map->nr_extents == 1) && (new_map->extent[0].count == 1) &&
+ uid_eq(ns->owner, cred->euid)) {
u32 id = new_map->extent[0].lower_first;
if (cap_setid == CAP_SETUID) {
kuid_t uid = make_kuid(ns->parent, id);
- if (uid_eq(uid, file->f_cred->fsuid))
+ if (uid_eq(uid, cred->euid))
return true;
} else if (cap_setid == CAP_SETGID) {
kgid_t gid = make_kgid(ns->parent, id);
- if (gid_eq(gid, file->f_cred->fsgid))
+ if (!(ns->flags & USERNS_SETGROUPS_ALLOWED) &&
+ gid_eq(gid, cred->egid))
return true;
}
}
@@ -841,7 +850,106 @@ static bool new_idmap_permitted(const struct file *file,
return false;
}
-static void *userns_get(struct task_struct *task)
+int proc_setgroups_show(struct seq_file *seq, void *v)
+{
+ struct user_namespace *ns = seq->private;
+ unsigned long userns_flags = ACCESS_ONCE(ns->flags);
+
+ seq_printf(seq, "%s\n",
+ (userns_flags & USERNS_SETGROUPS_ALLOWED) ?
+ "allow" : "deny");
+ return 0;
+}
+
+ssize_t proc_setgroups_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *seq = file->private_data;
+ struct user_namespace *ns = seq->private;
+ char kbuf[8], *pos;
+ bool setgroups_allowed;
+ ssize_t ret;
+
+ /* Only allow a very narrow range of strings to be written */
+ ret = -EINVAL;
+ if ((*ppos != 0) || (count >= sizeof(kbuf)))
+ goto out;
+
+ /* What was written? */
+ ret = -EFAULT;
+ if (copy_from_user(kbuf, buf, count))
+ goto out;
+ kbuf[count] = '\0';
+ pos = kbuf;
+
+ /* What is being requested? */
+ ret = -EINVAL;
+ if (strncmp(pos, "allow", 5) == 0) {
+ pos += 5;
+ setgroups_allowed = true;
+ }
+ else if (strncmp(pos, "deny", 4) == 0) {
+ pos += 4;
+ setgroups_allowed = false;
+ }
+ else
+ goto out;
+
+ /* Verify there is not trailing junk on the line */
+ pos = skip_spaces(pos);
+ if (*pos != '\0')
+ goto out;
+
+ ret = -EPERM;
+ mutex_lock(&userns_state_mutex);
+ if (setgroups_allowed) {
+ /* Enabling setgroups after setgroups has been disabled
+ * is not allowed.
+ */
+ if (!(ns->flags & USERNS_SETGROUPS_ALLOWED))
+ goto out_unlock;
+ } else {
+ /* Permanently disabling setgroups after setgroups has
+ * been enabled by writing the gid_map is not allowed.
+ */
+ if (ns->gid_map.nr_extents != 0)
+ goto out_unlock;
+ ns->flags &= ~USERNS_SETGROUPS_ALLOWED;
+ }
+ mutex_unlock(&userns_state_mutex);
+
+ /* Report a successful write */
+ *ppos = count;
+ ret = count;
+out:
+ return ret;
+out_unlock:
+ mutex_unlock(&userns_state_mutex);
+ goto out;
+}
+
+bool userns_may_setgroups(const struct user_namespace *ns)
+{
+ bool allowed;
+
+ mutex_lock(&userns_state_mutex);
+ /* It is not safe to use setgroups until a gid mapping in
+ * the user namespace has been established.
+ */
+ allowed = ns->gid_map.nr_extents != 0;
+ /* Is setgroups allowed? */
+ allowed = allowed && (ns->flags & USERNS_SETGROUPS_ALLOWED);
+ mutex_unlock(&userns_state_mutex);
+
+ return allowed;
+}
+
+static inline struct user_namespace *to_user_ns(struct ns_common *ns)
+{
+ return container_of(ns, struct user_namespace, ns);
+}
+
+static struct ns_common *userns_get(struct task_struct *task)
{
struct user_namespace *user_ns;
@@ -849,17 +957,17 @@ static void *userns_get(struct task_struct *task)
user_ns = get_user_ns(__task_cred(task)->user_ns);
rcu_read_unlock();
- return user_ns;
+ return user_ns ? &user_ns->ns : NULL;
}
-static void userns_put(void *ns)
+static void userns_put(struct ns_common *ns)
{
- put_user_ns(ns);
+ put_user_ns(to_user_ns(ns));
}
-static int userns_install(struct nsproxy *nsproxy, void *ns)
+static int userns_install(struct nsproxy *nsproxy, struct ns_common *ns)
{
- struct user_namespace *user_ns = ns;
+ struct user_namespace *user_ns = to_user_ns(ns);
struct cred *cred;
/* Don't allow gaining capabilities by reentering
@@ -888,19 +996,12 @@ static int userns_install(struct nsproxy *nsproxy, void *ns)
return commit_creds(cred);
}
-static unsigned int userns_inum(void *ns)
-{
- struct user_namespace *user_ns = ns;
- return user_ns->proc_inum;
-}
-
const struct proc_ns_operations userns_operations = {
.name = "user",
.type = CLONE_NEWUSER,
.get = userns_get,
.put = userns_put,
.install = userns_install,
- .inum = userns_inum,
};
static __init int user_namespaces_init(void)
diff --git a/kernel/utsname.c b/kernel/utsname.c
index 883aaaa7de8..831ea710823 100644
--- a/kernel/utsname.c
+++ b/kernel/utsname.c
@@ -42,12 +42,14 @@ static struct uts_namespace *clone_uts_ns(struct user_namespace *user_ns,
if (!ns)
return ERR_PTR(-ENOMEM);
- err = proc_alloc_inum(&ns->proc_inum);
+ err = ns_alloc_inum(&ns->ns);
if (err) {
kfree(ns);
return ERR_PTR(err);
}
+ ns->ns.ops = &utsns_operations;
+
down_read(&uts_sem);
memcpy(&ns->name, &old_ns->name, sizeof(ns->name));
ns->user_ns = get_user_ns(user_ns);
@@ -84,11 +86,16 @@ void free_uts_ns(struct kref *kref)
ns = container_of(kref, struct uts_namespace, kref);
put_user_ns(ns->user_ns);
- proc_free_inum(ns->proc_inum);
+ ns_free_inum(&ns->ns);
kfree(ns);
}
-static void *utsns_get(struct task_struct *task)
+static inline struct uts_namespace *to_uts_ns(struct ns_common *ns)
+{
+ return container_of(ns, struct uts_namespace, ns);
+}
+
+static struct ns_common *utsns_get(struct task_struct *task)
{
struct uts_namespace *ns = NULL;
struct nsproxy *nsproxy;
@@ -101,17 +108,17 @@ static void *utsns_get(struct task_struct *task)
}
task_unlock(task);
- return ns;
+ return ns ? &ns->ns : NULL;
}
-static void utsns_put(void *ns)
+static void utsns_put(struct ns_common *ns)
{
- put_uts_ns(ns);
+ put_uts_ns(to_uts_ns(ns));
}
-static int utsns_install(struct nsproxy *nsproxy, void *new)
+static int utsns_install(struct nsproxy *nsproxy, struct ns_common *new)
{
- struct uts_namespace *ns = new;
+ struct uts_namespace *ns = to_uts_ns(new);
if (!ns_capable(ns->user_ns, CAP_SYS_ADMIN) ||
!ns_capable(current_user_ns(), CAP_SYS_ADMIN))
@@ -123,18 +130,10 @@ static int utsns_install(struct nsproxy *nsproxy, void *new)
return 0;
}
-static unsigned int utsns_inum(void *vp)
-{
- struct uts_namespace *ns = vp;
-
- return ns->proc_inum;
-}
-
const struct proc_ns_operations utsns_operations = {
.name = "uts",
.type = CLONE_NEWUTS,
.get = utsns_get,
.put = utsns_put,
.install = utsns_install,
- .inum = utsns_inum,
};
diff --git a/lib/bug.c b/lib/bug.c
index d1d7c787890..0c3bd9552b6 100644
--- a/lib/bug.c
+++ b/lib/bug.c
@@ -64,16 +64,22 @@ static LIST_HEAD(module_bug_list);
static const struct bug_entry *module_find_bug(unsigned long bugaddr)
{
struct module *mod;
+ const struct bug_entry *bug = NULL;
- list_for_each_entry(mod, &module_bug_list, bug_list) {
- const struct bug_entry *bug = mod->bug_table;
+ rcu_read_lock();
+ list_for_each_entry_rcu(mod, &module_bug_list, bug_list) {
unsigned i;
+ bug = mod->bug_table;
for (i = 0; i < mod->num_bugs; ++i, ++bug)
if (bugaddr == bug_addr(bug))
- return bug;
+ goto out;
}
- return NULL;
+ bug = NULL;
+out:
+ rcu_read_unlock();
+
+ return bug;
}
void module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
@@ -99,13 +105,15 @@ void module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
* Strictly speaking this should have a spinlock to protect against
* traversals, but since we only traverse on BUG()s, a spinlock
* could potentially lead to deadlock and thus be counter-productive.
+ * Thus, this uses RCU to safely manipulate the bug list, since BUG
+ * must run in non-interruptive state.
*/
- list_add(&mod->bug_list, &module_bug_list);
+ list_add_rcu(&mod->bug_list, &module_bug_list);
}
void module_bug_cleanup(struct module *mod)
{
- list_del(&mod->bug_list);
+ list_del_rcu(&mod->bug_list);
}
#else
diff --git a/lib/show_mem.c b/lib/show_mem.c
index 5e256271b47..7de89f4a36c 100644
--- a/lib/show_mem.c
+++ b/lib/show_mem.c
@@ -8,6 +8,7 @@
#include <linux/mm.h>
#include <linux/nmi.h>
#include <linux/quicklist.h>
+#include <linux/cma.h>
void show_mem(unsigned int filter)
{
@@ -38,7 +39,12 @@ void show_mem(unsigned int filter)
printk("%lu pages RAM\n", total);
printk("%lu pages HighMem/MovableOnly\n", highmem);
+#ifdef CONFIG_CMA
+ printk("%lu pages reserved\n", (reserved - totalcma_pages));
+ printk("%lu pages cma reserved\n", totalcma_pages);
+#else
printk("%lu pages reserved\n", reserved);
+#endif
#ifdef CONFIG_QUICKLIST
printk("%lu pages in pagetable cache\n",
quicklist_total_size());
diff --git a/mm/cma.c b/mm/cma.c
index f8917629cbd..a85ae28709a 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -337,6 +337,7 @@ int __init cma_declare_contiguous(phys_addr_t base,
if (ret)
goto err;
+ totalcma_pages += (size / PAGE_SIZE);
pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M,
&base);
return 0;
diff --git a/mm/filemap.c b/mm/filemap.c
index e8905bc3cbd..bd8543c6508 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2464,7 +2464,7 @@ ssize_t generic_perform_write(struct file *file,
/*
* Copies from kernel address space cannot fail (NFSD is a big user).
*/
- if (segment_eq(get_fs(), KERNEL_DS))
+ if (!iter_is_iovec(i))
flags |= AOP_FLAG_UNINTERRUPTIBLE;
do {
diff --git a/mm/fremap.c b/mm/fremap.c
index 11ef7ec40d1..2805d71cf47 100644
--- a/mm/fremap.c
+++ b/mm/fremap.c
@@ -37,7 +37,7 @@ static void zap_pte(struct mm_struct *mm, struct vm_area_struct *vma,
if (pte_present(pte)) {
flush_cache_page(vma, addr, pte_pfn(pte));
- pte = ptep_clear_flush(vma, addr, ptep);
+ pte = ptep_clear_flush_notify(vma, addr, ptep);
page = vm_normal_page(vma, addr, pte);
if (page) {
if (pte_dirty(pte))
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 46f96c23cc2..817a875f2b8 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1035,7 +1035,7 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
goto out_free_pages;
VM_BUG_ON_PAGE(!PageHead(page), page);
- pmdp_clear_flush(vma, haddr, pmd);
+ pmdp_clear_flush_notify(vma, haddr, pmd);
/* leave pmd empty until pte is filled */
pgtable = pgtable_trans_huge_withdraw(mm, pmd);
@@ -1178,7 +1178,7 @@ alloc:
pmd_t entry;
entry = mk_huge_pmd(new_page, vma->vm_page_prot);
entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
- pmdp_clear_flush(vma, haddr, pmd);
+ pmdp_clear_flush_notify(vma, haddr, pmd);
page_add_new_anon_rmap(new_page, vma, haddr);
mem_cgroup_commit_charge(new_page, memcg, false);
lru_cache_add_active_or_unevictable(new_page, vma);
@@ -1512,7 +1512,7 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
pmd_t entry;
ret = 1;
if (!prot_numa) {
- entry = pmdp_get_and_clear(mm, addr, pmd);
+ entry = pmdp_get_and_clear_notify(mm, addr, pmd);
if (pmd_numa(entry))
entry = pmd_mknonnuma(entry);
entry = pmd_modify(entry, newprot);
@@ -1644,6 +1644,7 @@ static int __split_huge_page_splitting(struct page *page,
* serialize against split_huge_page*.
*/
pmdp_splitting_flush(vma, address, pmd);
+
ret = 1;
spin_unlock(ptl);
}
@@ -2834,7 +2835,7 @@ static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
pmd_t _pmd;
int i;
- pmdp_clear_flush(vma, haddr, pmd);
+ pmdp_clear_flush_notify(vma, haddr, pmd);
/* leave pmd empty until pte is filled */
pgtable = pgtable_trans_huge_withdraw(mm, pmd);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 47f6070d7c4..85032de5e20 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2598,8 +2598,11 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
}
set_huge_pte_at(dst, addr, dst_pte, entry);
} else {
- if (cow)
+ if (cow) {
huge_ptep_set_wrprotect(src, addr, src_pte);
+ mmu_notifier_invalidate_range(src, mmun_start,
+ mmun_end);
+ }
entry = huge_ptep_get(src_pte);
ptepage = pte_page(entry);
get_page(ptepage);
@@ -2901,6 +2904,7 @@ retry_avoidcopy:
/* Break COW */
huge_ptep_clear_flush(vma, address, ptep);
+ mmu_notifier_invalidate_range(mm, mmun_start, mmun_end);
set_huge_pte_at(mm, address, ptep,
make_huge_pte(vma, new_page, 1));
page_remove_rmap(old_page);
@@ -3376,6 +3380,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
* and that page table be reused and filled with junk.
*/
flush_tlb_range(vma, start, end);
+ mmu_notifier_invalidate_range(mm, start, end);
i_mmap_unlock_write(vma->vm_file->f_mapping);
mmu_notifier_invalidate_range_end(mm, start, end);
diff --git a/mm/ksm.c b/mm/ksm.c
index 6b2e337bc03..d247efab507 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -892,7 +892,7 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page,
* this assure us that no O_DIRECT can happen after the check
* or in the middle of the check.
*/
- entry = ptep_clear_flush(vma, addr, ptep);
+ entry = ptep_clear_flush_notify(vma, addr, ptep);
/*
* Check that no O_DIRECT or similar I/O is in progress on the
* page
@@ -960,7 +960,7 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
page_add_anon_rmap(kpage, vma, addr);
flush_cache_page(vma, addr, pte_pfn(*ptep));
- ptep_clear_flush(vma, addr, ptep);
+ ptep_clear_flush_notify(vma, addr, ptep);
set_pte_at_notify(mm, addr, ptep, mk_pte(kpage, vma->vm_page_prot));
page_remove_rmap(page);
diff --git a/mm/madvise.c b/mm/madvise.c
index 0938b30da4a..a271adc9328 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -326,7 +326,7 @@ static long madvise_remove(struct vm_area_struct *vma,
*/
get_file(f);
up_read(&current->mm->mmap_sem);
- error = do_fallocate(f,
+ error = vfs_fallocate(f,
FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
offset, end - start);
fput(f);
diff --git a/mm/memory.c b/mm/memory.c
index fbf74112de5..d8aebc52265 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -235,10 +235,8 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long
static void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
{
- if (!tlb->end)
- return;
-
tlb_flush(tlb);
+ mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end);
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
tlb_table_flush(tlb);
#endif
@@ -258,6 +256,9 @@ static void tlb_flush_mmu_free(struct mmu_gather *tlb)
void tlb_flush_mmu(struct mmu_gather *tlb)
{
+ if (!tlb->end)
+ return;
+
tlb_flush_mmu_tlbonly(tlb);
tlb_flush_mmu_free(tlb);
}
@@ -2220,7 +2221,7 @@ gotten:
* seen in the presence of one thread doing SMC and another
* thread doing COW.
*/
- ptep_clear_flush(vma, address, page_table);
+ ptep_clear_flush_notify(vma, address, page_table);
page_add_new_anon_rmap(new_page, vma, address);
mem_cgroup_commit_charge(new_page, memcg, false);
lru_cache_add_active_or_unevictable(new_page, vma);
@@ -2995,6 +2996,12 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma,
if (set_page_dirty(fault_page))
dirtied = 1;
+ /*
+ * Take a local copy of the address_space - page.mapping may be zeroed
+ * by truncate after unlock_page(). The address_space itself remains
+ * pinned by vma->vm_file's reference. We rely on unlock_page()'s
+ * release semantics to prevent the compiler from undoing this copying.
+ */
mapping = fault_page->mapping;
unlock_page(fault_page);
if ((dirtied || vma->vm_ops->page_mkwrite) && mapping) {
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index e58725aff7e..0e0961b8c39 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -162,12 +162,6 @@ static const struct mempolicy_operations {
enum mpol_rebind_step step);
} mpol_ops[MPOL_MAX];
-/* Check that the nodemask contains at least one populated zone */
-static int is_valid_nodemask(const nodemask_t *nodemask)
-{
- return nodes_intersects(*nodemask, node_states[N_MEMORY]);
-}
-
static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
{
return pol->flags & MPOL_MODE_FLAGS;
@@ -202,7 +196,7 @@ static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
{
- if (!is_valid_nodemask(nodes))
+ if (nodes_empty(*nodes))
return -EINVAL;
pol->v.nodes = *nodes;
return 0;
@@ -234,7 +228,7 @@ static int mpol_set_nodemask(struct mempolicy *pol,
nodes = NULL; /* explicit local allocation */
else {
if (pol->flags & MPOL_F_RELATIVE_NODES)
- mpol_relative_nodemask(&nsc->mask2, nodes,&nsc->mask1);
+ mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1);
else
nodes_and(nsc->mask2, *nodes, nsc->mask1);
@@ -1047,10 +1041,6 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
down_read(&mm->mmap_sem);
- err = migrate_vmas(mm, from, to, flags);
- if (err)
- goto out;
-
/*
* Find a 'source' bit set in 'tmp' whose corresponding 'dest'
* bit in 'to' is not also set in 'tmp'. Clear the found 'source'
@@ -1130,7 +1120,6 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
if (err < 0)
break;
}
-out:
up_read(&mm->mmap_sem);
if (err < 0)
return err;
diff --git a/mm/migrate.c b/mm/migrate.c
index 253474c2223..344cdf692fc 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1536,27 +1536,6 @@ out:
return err;
}
-/*
- * Call migration functions in the vma_ops that may prepare
- * memory in a vm for migration. migration functions may perform
- * the migration for vmas that do not have an underlying page struct.
- */
-int migrate_vmas(struct mm_struct *mm, const nodemask_t *to,
- const nodemask_t *from, unsigned long flags)
-{
- struct vm_area_struct *vma;
- int err = 0;
-
- for (vma = mm->mmap; vma && !err; vma = vma->vm_next) {
- if (vma->vm_ops && vma->vm_ops->migrate) {
- err = vma->vm_ops->migrate(vma, to, from, flags);
- if (err)
- break;
- }
- }
- return err;
-}
-
#ifdef CONFIG_NUMA_BALANCING
/*
* Returns true if this is a safe migration target node for misplaced NUMA
@@ -1862,7 +1841,7 @@ fail_putback:
*/
flush_cache_range(vma, mmun_start, mmun_end);
page_add_anon_rmap(new_page, vma, mmun_start);
- pmdp_clear_flush(vma, mmun_start, pmd);
+ pmdp_clear_flush_notify(vma, mmun_start, pmd);
set_pmd_at(mm, mmun_start, pmd, entry);
flush_tlb_range(vma, mmun_start, mmun_end);
update_mmu_cache_pmd(vma, address, &entry);
@@ -1870,6 +1849,7 @@ fail_putback:
if (page_count(page) != 2) {
set_pmd_at(mm, mmun_start, pmd, orig_entry);
flush_tlb_range(vma, mmun_start, mmun_end);
+ mmu_notifier_invalidate_range(mm, mmun_start, mmun_end);
update_mmu_cache_pmd(vma, address, &entry);
page_remove_rmap(new_page);
goto fail_putback;
diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
index 2c8da9825fe..3b9b3d0741b 100644
--- a/mm/mmu_notifier.c
+++ b/mm/mmu_notifier.c
@@ -193,6 +193,16 @@ void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
id = srcu_read_lock(&srcu);
hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
+ /*
+ * Call invalidate_range here too to avoid the need for the
+ * subsystem of having to register an invalidate_range_end
+ * call-back when there is invalidate_range already. Usually a
+ * subsystem registers either invalidate_range_start()/end() or
+ * invalidate_range(), so this will be no additional overhead
+ * (besides the pointer check).
+ */
+ if (mn->ops->invalidate_range)
+ mn->ops->invalidate_range(mn, mm, start, end);
if (mn->ops->invalidate_range_end)
mn->ops->invalidate_range_end(mn, mm, start, end);
}
@@ -200,6 +210,21 @@ void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
}
EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range_end);
+void __mmu_notifier_invalidate_range(struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+ struct mmu_notifier *mn;
+ int id;
+
+ id = srcu_read_lock(&srcu);
+ hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
+ if (mn->ops->invalidate_range)
+ mn->ops->invalidate_range(mn, mm, start, end);
+ }
+ srcu_read_unlock(&srcu, id);
+}
+EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range);
+
static int do_mmu_notifier_register(struct mmu_notifier *mn,
struct mm_struct *mm,
int take_mmap_sem)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index fa974d87f60..7633c503a11 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -111,6 +111,7 @@ static DEFINE_SPINLOCK(managed_page_count_lock);
unsigned long totalram_pages __read_mostly;
unsigned long totalreserve_pages __read_mostly;
+unsigned long totalcma_pages __read_mostly;
/*
* When calculating the number of globally allowed dirty pages, there
* is a certain number of per-zone reserves that should not be
@@ -5586,7 +5587,7 @@ void __init mem_init_print_info(const char *str)
pr_info("Memory: %luK/%luK available "
"(%luK kernel code, %luK rwdata, %luK rodata, "
- "%luK init, %luK bss, %luK reserved"
+ "%luK init, %luK bss, %luK reserved, %luK cma-reserved"
#ifdef CONFIG_HIGHMEM
", %luK highmem"
#endif
@@ -5594,7 +5595,8 @@ void __init mem_init_print_info(const char *str)
nr_free_pages() << (PAGE_SHIFT-10), physpages << (PAGE_SHIFT-10),
codesize >> 10, datasize >> 10, rosize >> 10,
(init_data_size + init_code_size) >> 10, bss_size >> 10,
- (physpages - totalram_pages) << (PAGE_SHIFT-10),
+ (physpages - totalram_pages - totalcma_pages) << (PAGE_SHIFT-10),
+ totalcma_pages << (PAGE_SHIFT-10),
#ifdef CONFIG_HIGHMEM
totalhigh_pages << (PAGE_SHIFT-10),
#endif
diff --git a/mm/rmap.c b/mm/rmap.c
index c52f43a69ee..45ba250babd 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1380,7 +1380,7 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
/* Nuke the page table entry. */
flush_cache_page(vma, address, pte_pfn(*pte));
- pteval = ptep_clear_flush(vma, address, pte);
+ pteval = ptep_clear_flush_notify(vma, address, pte);
/* If nonlinear, store the file page offset in the pte. */
if (page->index != linear_page_index(vma, address)) {
diff --git a/mm/shmem.c b/mm/shmem.c
index 185836ba53e..73ba1df7c8b 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1536,7 +1536,7 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
* holes of a sparse file, we actually need to allocate those pages,
* and even mark them dirty, so it cannot exceed the max_blocks limit.
*/
- if (segment_eq(get_fs(), KERNEL_DS))
+ if (!iter_is_iovec(to))
sgp = SGP_DIRTY;
index = *ppos >> PAGE_CACHE_SHIFT;
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 4d0a063145e..b72403927aa 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -884,19 +884,6 @@ static struct notifier_block zs_cpu_nb = {
.notifier_call = zs_cpu_notifier
};
-static void zs_unregister_cpu_notifier(void)
-{
- int cpu;
-
- cpu_notifier_register_begin();
-
- for_each_online_cpu(cpu)
- zs_cpu_notifier(NULL, CPU_DEAD, (void *)(long)cpu);
- __unregister_cpu_notifier(&zs_cpu_nb);
-
- cpu_notifier_register_done();
-}
-
static int zs_register_cpu_notifier(void)
{
int cpu, uninitialized_var(ret);
@@ -914,40 +901,28 @@ static int zs_register_cpu_notifier(void)
return notifier_to_errno(ret);
}
-static void init_zs_size_classes(void)
+static void zs_unregister_cpu_notifier(void)
{
- int nr;
+ int cpu;
- nr = (ZS_MAX_ALLOC_SIZE - ZS_MIN_ALLOC_SIZE) / ZS_SIZE_CLASS_DELTA + 1;
- if ((ZS_MAX_ALLOC_SIZE - ZS_MIN_ALLOC_SIZE) % ZS_SIZE_CLASS_DELTA)
- nr += 1;
+ cpu_notifier_register_begin();
- zs_size_classes = nr;
-}
+ for_each_online_cpu(cpu)
+ zs_cpu_notifier(NULL, CPU_DEAD, (void *)(long)cpu);
+ __unregister_cpu_notifier(&zs_cpu_nb);
-static void __exit zs_exit(void)
-{
-#ifdef CONFIG_ZPOOL
- zpool_unregister_driver(&zs_zpool_driver);
-#endif
- zs_unregister_cpu_notifier();
+ cpu_notifier_register_done();
}
-static int __init zs_init(void)
+static void init_zs_size_classes(void)
{
- int ret = zs_register_cpu_notifier();
-
- if (ret) {
- zs_unregister_cpu_notifier();
- return ret;
- }
+ int nr;
- init_zs_size_classes();
+ nr = (ZS_MAX_ALLOC_SIZE - ZS_MIN_ALLOC_SIZE) / ZS_SIZE_CLASS_DELTA + 1;
+ if ((ZS_MAX_ALLOC_SIZE - ZS_MIN_ALLOC_SIZE) % ZS_SIZE_CLASS_DELTA)
+ nr += 1;
-#ifdef CONFIG_ZPOOL
- zpool_register_driver(&zs_zpool_driver);
-#endif
- return 0;
+ zs_size_classes = nr;
}
static unsigned int get_maxobj_per_zspage(int size, int pages_per_zspage)
@@ -967,113 +942,101 @@ static bool can_merge(struct size_class *prev, int size, int pages_per_zspage)
return true;
}
+unsigned long zs_get_total_pages(struct zs_pool *pool)
+{
+ return atomic_long_read(&pool->pages_allocated);
+}
+EXPORT_SYMBOL_GPL(zs_get_total_pages);
+
/**
- * zs_create_pool - Creates an allocation pool to work from.
- * @flags: allocation flags used to allocate pool metadata
+ * zs_map_object - get address of allocated object from handle.
+ * @pool: pool from which the object was allocated
+ * @handle: handle returned from zs_malloc
*
- * This function must be called before anything when using
- * the zsmalloc allocator.
+ * Before using an object allocated from zs_malloc, it must be mapped using
+ * this function. When done with the object, it must be unmapped using
+ * zs_unmap_object.
*
- * On success, a pointer to the newly created pool is returned,
- * otherwise NULL.
+ * Only one object can be mapped per cpu at a time. There is no protection
+ * against nested mappings.
+ *
+ * This function returns with preemption and page faults disabled.
*/
-struct zs_pool *zs_create_pool(gfp_t flags)
+void *zs_map_object(struct zs_pool *pool, unsigned long handle,
+ enum zs_mapmode mm)
{
- int i;
- struct zs_pool *pool;
- struct size_class *prev_class = NULL;
+ struct page *page;
+ unsigned long obj_idx, off;
- pool = kzalloc(sizeof(*pool), GFP_KERNEL);
- if (!pool)
- return NULL;
+ unsigned int class_idx;
+ enum fullness_group fg;
+ struct size_class *class;
+ struct mapping_area *area;
+ struct page *pages[2];
- pool->size_class = kcalloc(zs_size_classes, sizeof(struct size_class *),
- GFP_KERNEL);
- if (!pool->size_class) {
- kfree(pool);
- return NULL;
- }
+ BUG_ON(!handle);
/*
- * Iterate reversly, because, size of size_class that we want to use
- * for merging should be larger or equal to current size.
+ * Because we use per-cpu mapping areas shared among the
+ * pools/users, we can't allow mapping in interrupt context
+ * because it can corrupt another users mappings.
*/
- for (i = zs_size_classes - 1; i >= 0; i--) {
- int size;
- int pages_per_zspage;
- struct size_class *class;
-
- size = ZS_MIN_ALLOC_SIZE + i * ZS_SIZE_CLASS_DELTA;
- if (size > ZS_MAX_ALLOC_SIZE)
- size = ZS_MAX_ALLOC_SIZE;
- pages_per_zspage = get_pages_per_zspage(size);
-
- /*
- * size_class is used for normal zsmalloc operation such
- * as alloc/free for that size. Although it is natural that we
- * have one size_class for each size, there is a chance that we
- * can get more memory utilization if we use one size_class for
- * many different sizes whose size_class have same
- * characteristics. So, we makes size_class point to
- * previous size_class if possible.
- */
- if (prev_class) {
- if (can_merge(prev_class, size, pages_per_zspage)) {
- pool->size_class[i] = prev_class;
- continue;
- }
- }
-
- class = kzalloc(sizeof(struct size_class), GFP_KERNEL);
- if (!class)
- goto err;
+ BUG_ON(in_interrupt());
- class->size = size;
- class->index = i;
- class->pages_per_zspage = pages_per_zspage;
- spin_lock_init(&class->lock);
- pool->size_class[i] = class;
+ obj_handle_to_location(handle, &page, &obj_idx);
+ get_zspage_mapping(get_first_page(page), &class_idx, &fg);
+ class = pool->size_class[class_idx];
+ off = obj_idx_to_offset(page, obj_idx, class->size);
- prev_class = class;
+ area = &get_cpu_var(zs_map_area);
+ area->vm_mm = mm;
+ if (off + class->size <= PAGE_SIZE) {
+ /* this object is contained entirely within a page */
+ area->vm_addr = kmap_atomic(page);
+ return area->vm_addr + off;
}
- pool->flags = flags;
-
- return pool;
+ /* this object spans two pages */
+ pages[0] = page;
+ pages[1] = get_next_page(page);
+ BUG_ON(!pages[1]);
-err:
- zs_destroy_pool(pool);
- return NULL;
+ return __zs_map_object(area, pages, off, class->size);
}
-EXPORT_SYMBOL_GPL(zs_create_pool);
+EXPORT_SYMBOL_GPL(zs_map_object);
-void zs_destroy_pool(struct zs_pool *pool)
+void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
{
- int i;
+ struct page *page;
+ unsigned long obj_idx, off;
- for (i = 0; i < zs_size_classes; i++) {
- int fg;
- struct size_class *class = pool->size_class[i];
+ unsigned int class_idx;
+ enum fullness_group fg;
+ struct size_class *class;
+ struct mapping_area *area;
- if (!class)
- continue;
+ BUG_ON(!handle);
- if (class->index != i)
- continue;
+ obj_handle_to_location(handle, &page, &obj_idx);
+ get_zspage_mapping(get_first_page(page), &class_idx, &fg);
+ class = pool->size_class[class_idx];
+ off = obj_idx_to_offset(page, obj_idx, class->size);
- for (fg = 0; fg < _ZS_NR_FULLNESS_GROUPS; fg++) {
- if (class->fullness_list[fg]) {
- pr_info("Freeing non-empty class with size %db, fullness group %d\n",
- class->size, fg);
- }
- }
- kfree(class);
- }
+ area = this_cpu_ptr(&zs_map_area);
+ if (off + class->size <= PAGE_SIZE)
+ kunmap_atomic(area->vm_addr);
+ else {
+ struct page *pages[2];
- kfree(pool->size_class);
- kfree(pool);
+ pages[0] = page;
+ pages[1] = get_next_page(page);
+ BUG_ON(!pages[1]);
+
+ __zs_unmap_object(area, pages, off, class->size);
+ }
+ put_cpu_var(zs_map_area);
}
-EXPORT_SYMBOL_GPL(zs_destroy_pool);
+EXPORT_SYMBOL_GPL(zs_unmap_object);
/**
* zs_malloc - Allocate block of given size from pool.
@@ -1176,100 +1139,137 @@ void zs_free(struct zs_pool *pool, unsigned long obj)
EXPORT_SYMBOL_GPL(zs_free);
/**
- * zs_map_object - get address of allocated object from handle.
- * @pool: pool from which the object was allocated
- * @handle: handle returned from zs_malloc
- *
- * Before using an object allocated from zs_malloc, it must be mapped using
- * this function. When done with the object, it must be unmapped using
- * zs_unmap_object.
+ * zs_create_pool - Creates an allocation pool to work from.
+ * @flags: allocation flags used to allocate pool metadata
*
- * Only one object can be mapped per cpu at a time. There is no protection
- * against nested mappings.
+ * This function must be called before anything when using
+ * the zsmalloc allocator.
*
- * This function returns with preemption and page faults disabled.
+ * On success, a pointer to the newly created pool is returned,
+ * otherwise NULL.
*/
-void *zs_map_object(struct zs_pool *pool, unsigned long handle,
- enum zs_mapmode mm)
+struct zs_pool *zs_create_pool(gfp_t flags)
{
- struct page *page;
- unsigned long obj_idx, off;
+ int i;
+ struct zs_pool *pool;
+ struct size_class *prev_class = NULL;
- unsigned int class_idx;
- enum fullness_group fg;
- struct size_class *class;
- struct mapping_area *area;
- struct page *pages[2];
+ pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+ if (!pool)
+ return NULL;
- BUG_ON(!handle);
+ pool->size_class = kcalloc(zs_size_classes, sizeof(struct size_class *),
+ GFP_KERNEL);
+ if (!pool->size_class) {
+ kfree(pool);
+ return NULL;
+ }
/*
- * Because we use per-cpu mapping areas shared among the
- * pools/users, we can't allow mapping in interrupt context
- * because it can corrupt another users mappings.
+ * Iterate reversly, because, size of size_class that we want to use
+ * for merging should be larger or equal to current size.
*/
- BUG_ON(in_interrupt());
+ for (i = zs_size_classes - 1; i >= 0; i--) {
+ int size;
+ int pages_per_zspage;
+ struct size_class *class;
- obj_handle_to_location(handle, &page, &obj_idx);
- get_zspage_mapping(get_first_page(page), &class_idx, &fg);
- class = pool->size_class[class_idx];
- off = obj_idx_to_offset(page, obj_idx, class->size);
+ size = ZS_MIN_ALLOC_SIZE + i * ZS_SIZE_CLASS_DELTA;
+ if (size > ZS_MAX_ALLOC_SIZE)
+ size = ZS_MAX_ALLOC_SIZE;
+ pages_per_zspage = get_pages_per_zspage(size);
- area = &get_cpu_var(zs_map_area);
- area->vm_mm = mm;
- if (off + class->size <= PAGE_SIZE) {
- /* this object is contained entirely within a page */
- area->vm_addr = kmap_atomic(page);
- return area->vm_addr + off;
+ /*
+ * size_class is used for normal zsmalloc operation such
+ * as alloc/free for that size. Although it is natural that we
+ * have one size_class for each size, there is a chance that we
+ * can get more memory utilization if we use one size_class for
+ * many different sizes whose size_class have same
+ * characteristics. So, we makes size_class point to
+ * previous size_class if possible.
+ */
+ if (prev_class) {
+ if (can_merge(prev_class, size, pages_per_zspage)) {
+ pool->size_class[i] = prev_class;
+ continue;
+ }
+ }
+
+ class = kzalloc(sizeof(struct size_class), GFP_KERNEL);
+ if (!class)
+ goto err;
+
+ class->size = size;
+ class->index = i;
+ class->pages_per_zspage = pages_per_zspage;
+ spin_lock_init(&class->lock);
+ pool->size_class[i] = class;
+
+ prev_class = class;
}
- /* this object spans two pages */
- pages[0] = page;
- pages[1] = get_next_page(page);
- BUG_ON(!pages[1]);
+ pool->flags = flags;
- return __zs_map_object(area, pages, off, class->size);
+ return pool;
+
+err:
+ zs_destroy_pool(pool);
+ return NULL;
}
-EXPORT_SYMBOL_GPL(zs_map_object);
+EXPORT_SYMBOL_GPL(zs_create_pool);
-void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
+void zs_destroy_pool(struct zs_pool *pool)
{
- struct page *page;
- unsigned long obj_idx, off;
+ int i;
- unsigned int class_idx;
- enum fullness_group fg;
- struct size_class *class;
- struct mapping_area *area;
+ for (i = 0; i < zs_size_classes; i++) {
+ int fg;
+ struct size_class *class = pool->size_class[i];
- BUG_ON(!handle);
+ if (!class)
+ continue;
- obj_handle_to_location(handle, &page, &obj_idx);
- get_zspage_mapping(get_first_page(page), &class_idx, &fg);
- class = pool->size_class[class_idx];
- off = obj_idx_to_offset(page, obj_idx, class->size);
+ if (class->index != i)
+ continue;
- area = this_cpu_ptr(&zs_map_area);
- if (off + class->size <= PAGE_SIZE)
- kunmap_atomic(area->vm_addr);
- else {
- struct page *pages[2];
+ for (fg = 0; fg < _ZS_NR_FULLNESS_GROUPS; fg++) {
+ if (class->fullness_list[fg]) {
+ pr_info("Freeing non-empty class with size %db, fullness group %d\n",
+ class->size, fg);
+ }
+ }
+ kfree(class);
+ }
- pages[0] = page;
- pages[1] = get_next_page(page);
- BUG_ON(!pages[1]);
+ kfree(pool->size_class);
+ kfree(pool);
+}
+EXPORT_SYMBOL_GPL(zs_destroy_pool);
- __zs_unmap_object(area, pages, off, class->size);
+static int __init zs_init(void)
+{
+ int ret = zs_register_cpu_notifier();
+
+ if (ret) {
+ zs_unregister_cpu_notifier();
+ return ret;
}
- put_cpu_var(zs_map_area);
+
+ init_zs_size_classes();
+
+#ifdef CONFIG_ZPOOL
+ zpool_register_driver(&zs_zpool_driver);
+#endif
+ return 0;
}
-EXPORT_SYMBOL_GPL(zs_unmap_object);
-unsigned long zs_get_total_pages(struct zs_pool *pool)
+static void __exit zs_exit(void)
{
- return atomic_long_read(&pool->pages_allocated);
+#ifdef CONFIG_ZPOOL
+ zpool_unregister_driver(&zs_zpool_driver);
+#endif
+ zs_unregister_cpu_notifier();
}
-EXPORT_SYMBOL_GPL(zs_get_total_pages);
module_init(zs_init);
module_exit(zs_exit);
diff --git a/net/Makefile b/net/Makefile
index 95fc694e4dd..38704bdf941 100644
--- a/net/Makefile
+++ b/net/Makefile
@@ -5,8 +5,6 @@
# Rewritten to use lists instead of if-statements.
#
-obj-y := nonet.o
-
obj-$(CONFIG_NET) := socket.o core/
tmp-$(CONFIG_COMPAT) := compat.o
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 79d84b88b8f..fe18825cc8a 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -661,7 +661,7 @@ static void hci_req_add_le_create_conn(struct hci_request *req,
memset(&cp, 0, sizeof(cp));
/* Update random address, but set require_privacy to false so
- * that we never connect with an unresolvable address.
+ * that we never connect with an non-resolvable address.
*/
if (hci_update_random_address(req, false, &own_addr_type))
return;
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 93f92a08550..5dcacf9607e 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -1373,8 +1373,6 @@ static void hci_init1_req(struct hci_request *req, unsigned long opt)
static void bredr_setup(struct hci_request *req)
{
- struct hci_dev *hdev = req->hdev;
-
__le16 param;
__u8 flt_type;
@@ -1403,14 +1401,6 @@ static void bredr_setup(struct hci_request *req)
/* Connection accept timeout ~20 secs */
param = cpu_to_le16(0x7d00);
hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
-
- /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
- * but it does not support page scan related HCI commands.
- */
- if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
- hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
- hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
- }
}
static void le_setup(struct hci_request *req)
@@ -1718,6 +1708,16 @@ static void hci_init3_req(struct hci_request *req, unsigned long opt)
if (hdev->commands[5] & 0x10)
hci_setup_link_policy(req);
+ if (hdev->commands[8] & 0x01)
+ hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
+
+ /* Some older Broadcom based Bluetooth 1.2 controllers do not
+ * support the Read Page Scan Type command. Check support for
+ * this command in the bit mask of supported commands.
+ */
+ if (hdev->commands[13] & 0x01)
+ hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
+
if (lmp_le_capable(hdev)) {
u8 events[8];
@@ -2634,6 +2634,12 @@ static int hci_dev_do_close(struct hci_dev *hdev)
drain_workqueue(hdev->workqueue);
hci_dev_lock(hdev);
+
+ if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
+ if (hdev->dev_type == HCI_BREDR)
+ mgmt_powered(hdev, 0);
+ }
+
hci_inquiry_cache_flush(hdev);
hci_pend_le_actions_clear(hdev);
hci_conn_hash_flush(hdev);
@@ -2681,14 +2687,6 @@ static int hci_dev_do_close(struct hci_dev *hdev)
hdev->flags &= BIT(HCI_RAW);
hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
- if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
- if (hdev->dev_type == HCI_BREDR) {
- hci_dev_lock(hdev);
- mgmt_powered(hdev, 0);
- hci_dev_unlock(hdev);
- }
- }
-
/* Controller radio is available but is currently powered down */
hdev->amp_status = AMP_STATUS_POWERED_DOWN;
@@ -3083,7 +3081,9 @@ static void hci_power_on(struct work_struct *work)
err = hci_dev_do_open(hdev);
if (err < 0) {
+ hci_dev_lock(hdev);
mgmt_set_powered_failed(hdev, err);
+ hci_dev_unlock(hdev);
return;
}
@@ -3959,17 +3959,29 @@ int hci_update_random_address(struct hci_request *req, bool require_privacy,
}
/* In case of required privacy without resolvable private address,
- * use an unresolvable private address. This is useful for active
+ * use an non-resolvable private address. This is useful for active
* scanning and non-connectable advertising.
*/
if (require_privacy) {
- bdaddr_t urpa;
+ bdaddr_t nrpa;
+
+ while (true) {
+ /* The non-resolvable private address is generated
+ * from random six bytes with the two most significant
+ * bits cleared.
+ */
+ get_random_bytes(&nrpa, 6);
+ nrpa.b[5] &= 0x3f;
- get_random_bytes(&urpa, 6);
- urpa.b[5] &= 0x3f; /* Clear two most significant bits */
+ /* The non-resolvable private address shall not be
+ * equal to the public address.
+ */
+ if (bacmp(&hdev->bdaddr, &nrpa))
+ break;
+ }
*own_addr_type = ADDR_LE_DEV_RANDOM;
- set_random_addr(req, &urpa);
+ set_random_addr(req, &nrpa);
return 0;
}
@@ -5625,7 +5637,7 @@ void hci_req_add_le_passive_scan(struct hci_request *req)
u8 filter_policy;
/* Set require_privacy to false since no SCAN_REQ are send
- * during passive scanning. Not using an unresolvable address
+ * during passive scanning. Not using an non-resolvable address
* here is important so that peer devices using direct
* advertising with our address will be correctly reported
* by the controller.
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 322abbbbcef..39a5c8a0172 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -257,6 +257,8 @@ static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
if (!sent)
return;
+ hci_dev_lock(hdev);
+
if (!status) {
__u8 param = *((__u8 *) sent);
@@ -268,6 +270,8 @@ static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
if (test_bit(HCI_MGMT, &hdev->dev_flags))
mgmt_auth_enable_complete(hdev, status);
+
+ hci_dev_unlock(hdev);
}
static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
@@ -443,6 +447,8 @@ static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
if (!sent)
return;
+ hci_dev_lock(hdev);
+
if (!status) {
if (sent->mode)
hdev->features[1][0] |= LMP_HOST_SSP;
@@ -458,6 +464,8 @@ static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
else
clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
}
+
+ hci_dev_unlock(hdev);
}
static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
@@ -471,6 +479,8 @@ static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
if (!sent)
return;
+ hci_dev_lock(hdev);
+
if (!status) {
if (sent->support)
hdev->features[1][0] |= LMP_HOST_SC;
@@ -486,6 +496,8 @@ static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
else
clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
}
+
+ hci_dev_unlock(hdev);
}
static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
@@ -1135,6 +1147,8 @@ static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
if (!cp)
return;
+ hci_dev_lock(hdev);
+
switch (cp->enable) {
case LE_SCAN_ENABLE:
set_bit(HCI_LE_SCAN, &hdev->dev_flags);
@@ -1184,6 +1198,8 @@ static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
break;
}
+
+ hci_dev_unlock(hdev);
}
static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
@@ -1278,6 +1294,8 @@ static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
if (!sent)
return;
+ hci_dev_lock(hdev);
+
if (sent->le) {
hdev->features[1][0] |= LMP_HOST_LE;
set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
@@ -1291,6 +1309,8 @@ static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
hdev->features[1][0] |= LMP_HOST_LE_BREDR;
else
hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
+
+ hci_dev_unlock(hdev);
}
static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index a2b6dfa38a0..d04dc009573 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -6966,8 +6966,9 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
test_bit(HCI_HS_ENABLED, &hcon->hdev->dev_flags))
conn->local_fixed_chan |= L2CAP_FC_A2MP;
- if (bredr_sc_enabled(hcon->hdev) &&
- test_bit(HCI_LE_ENABLED, &hcon->hdev->dev_flags))
+ if (test_bit(HCI_LE_ENABLED, &hcon->hdev->dev_flags) &&
+ (bredr_sc_enabled(hcon->hdev) ||
+ test_bit(HCI_FORCE_LESC, &hcon->hdev->dbg_flags)))
conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
mutex_init(&conn->ident_lock);
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 7384f116133..693ce8bcd06 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -2199,12 +2199,14 @@ static void le_enable_complete(struct hci_dev *hdev, u8 status)
{
struct cmd_lookup match = { NULL, hdev };
+ hci_dev_lock(hdev);
+
if (status) {
u8 mgmt_err = mgmt_status(status);
mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
&mgmt_err);
- return;
+ goto unlock;
}
mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
@@ -2222,17 +2224,16 @@ static void le_enable_complete(struct hci_dev *hdev, u8 status)
if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
struct hci_request req;
- hci_dev_lock(hdev);
-
hci_req_init(&req, hdev);
update_adv_data(&req);
update_scan_rsp_data(&req);
hci_req_run(&req, NULL);
hci_update_background_scan(hdev);
-
- hci_dev_unlock(hdev);
}
+
+unlock:
+ hci_dev_unlock(hdev);
}
static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
@@ -3114,14 +3115,13 @@ static void pairing_complete(struct pending_cmd *cmd, u8 status)
conn->disconn_cfm_cb = NULL;
hci_conn_drop(conn);
- hci_conn_put(conn);
-
- mgmt_pending_remove(cmd);
/* The device is paired so there is no need to remove
* its connection parameters anymore.
*/
clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
+
+ hci_conn_put(conn);
}
void mgmt_smp_complete(struct hci_conn *conn, bool complete)
@@ -3130,8 +3130,10 @@ void mgmt_smp_complete(struct hci_conn *conn, bool complete)
struct pending_cmd *cmd;
cmd = find_pairing(conn);
- if (cmd)
+ if (cmd) {
cmd->cmd_complete(cmd, status);
+ mgmt_pending_remove(cmd);
+ }
}
static void pairing_complete_cb(struct hci_conn *conn, u8 status)
@@ -3141,10 +3143,13 @@ static void pairing_complete_cb(struct hci_conn *conn, u8 status)
BT_DBG("status %u", status);
cmd = find_pairing(conn);
- if (!cmd)
+ if (!cmd) {
BT_DBG("Unable to find a pending command");
- else
- cmd->cmd_complete(cmd, mgmt_status(status));
+ return;
+ }
+
+ cmd->cmd_complete(cmd, mgmt_status(status));
+ mgmt_pending_remove(cmd);
}
static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
@@ -3157,10 +3162,13 @@ static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
return;
cmd = find_pairing(conn);
- if (!cmd)
+ if (!cmd) {
BT_DBG("Unable to find a pending command");
- else
- cmd->cmd_complete(cmd, mgmt_status(status));
+ return;
+ }
+
+ cmd->cmd_complete(cmd, mgmt_status(status));
+ mgmt_pending_remove(cmd);
}
static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
@@ -3274,8 +3282,10 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
cmd->user_data = hci_conn_get(conn);
if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
- hci_conn_security(conn, sec_level, auth_type, true))
- pairing_complete(cmd, 0);
+ hci_conn_security(conn, sec_level, auth_type, true)) {
+ cmd->cmd_complete(cmd, 0);
+ mgmt_pending_remove(cmd);
+ }
err = 0;
@@ -3317,7 +3327,8 @@ static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
goto unlock;
}
- pairing_complete(cmd, MGMT_STATUS_CANCELLED);
+ cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
+ mgmt_pending_remove(cmd);
err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
addr, sizeof(*addr));
@@ -3791,7 +3802,7 @@ static bool trigger_discovery(struct hci_request *req, u8 *status)
/* All active scans will be done with either a resolvable
* private address (when privacy feature has been enabled)
- * or unresolvable private address.
+ * or non-resolvable private address.
*/
err = hci_update_random_address(req, true, &own_addr_type);
if (err < 0) {
@@ -4279,12 +4290,14 @@ static void set_advertising_complete(struct hci_dev *hdev, u8 status)
{
struct cmd_lookup match = { NULL, hdev };
+ hci_dev_lock(hdev);
+
if (status) {
u8 mgmt_err = mgmt_status(status);
mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
cmd_status_rsp, &mgmt_err);
- return;
+ goto unlock;
}
if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
@@ -4299,6 +4312,9 @@ static void set_advertising_complete(struct hci_dev *hdev, u8 status)
if (match.sk)
sock_put(match.sk);
+
+unlock:
+ hci_dev_unlock(hdev);
}
static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
@@ -6081,6 +6097,11 @@ static int powered_update_hci(struct hci_dev *hdev)
hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
}
+ if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
+ u8 sc = 0x01;
+ hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, sizeof(sc), &sc);
+ }
+
if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
lmp_bredr_capable(hdev)) {
struct hci_cp_write_le_host_supported cp;
@@ -6130,8 +6151,7 @@ static int powered_update_hci(struct hci_dev *hdev)
int mgmt_powered(struct hci_dev *hdev, u8 powered)
{
struct cmd_lookup match = { NULL, hdev };
- u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
- u8 zero_cod[] = { 0, 0, 0 };
+ u8 status, zero_cod[] = { 0, 0, 0 };
int err;
if (!test_bit(HCI_MGMT, &hdev->dev_flags))
@@ -6147,7 +6167,20 @@ int mgmt_powered(struct hci_dev *hdev, u8 powered)
}
mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
- mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status_not_powered);
+
+ /* If the power off is because of hdev unregistration let
+ * use the appropriate INVALID_INDEX status. Otherwise use
+ * NOT_POWERED. We cover both scenarios here since later in
+ * mgmt_index_removed() any hci_conn callbacks will have already
+ * been triggered, potentially causing misleading DISCONNECTED
+ * status responses.
+ */
+ if (test_bit(HCI_UNREGISTER, &hdev->dev_flags))
+ status = MGMT_STATUS_INVALID_INDEX;
+ else
+ status = MGMT_STATUS_NOT_POWERED;
+
+ mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
@@ -6681,8 +6714,10 @@ void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
cmd ? cmd->sk : NULL);
- if (cmd)
- pairing_complete(cmd, status);
+ if (cmd) {
+ cmd->cmd_complete(cmd, status);
+ mgmt_pending_remove(cmd);
+ }
}
void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
@@ -7046,13 +7081,15 @@ void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
* kept and checking possible scan response data
* will be skipped.
*/
- if (hdev->discovery.uuid_count > 0) {
+ if (hdev->discovery.uuid_count > 0)
match = eir_has_uuids(eir, eir_len,
hdev->discovery.uuid_count,
hdev->discovery.uuids);
- if (!match)
- return;
- }
+ else
+ match = true;
+
+ if (!match && !scan_rsp_len)
+ return;
/* Copy EIR or advertising data into event */
memcpy(ev->eir, eir, eir_len);
@@ -7061,8 +7098,10 @@ void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
* provided, results with empty EIR or advertising data
* should be dropped since they do not match any UUID.
*/
- if (hdev->discovery.uuid_count > 0)
+ if (hdev->discovery.uuid_count > 0 && !scan_rsp_len)
return;
+
+ match = false;
}
if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 6a46252fe66..b67749bb55b 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -1673,7 +1673,8 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
/* SMP over BR/EDR requires special treatment */
if (conn->hcon->type == ACL_LINK) {
/* We must have a BR/EDR SC link */
- if (!test_bit(HCI_CONN_AES_CCM, &conn->hcon->flags))
+ if (!test_bit(HCI_CONN_AES_CCM, &conn->hcon->flags) &&
+ !test_bit(HCI_FORCE_LESC, &hdev->dbg_flags))
return SMP_CROSS_TRANSP_NOT_ALLOWED;
set_bit(SMP_FLAG_SC, &smp->flags);
@@ -2927,7 +2928,7 @@ static struct l2cap_chan *smp_add_cid(struct hci_dev *hdev, u16 cid)
tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0, 0);
if (IS_ERR(tfm_aes)) {
BT_ERR("Unable to create crypto context");
- return ERR_PTR(PTR_ERR(tfm_aes));
+ return ERR_CAST(tfm_aes);
}
create_chan:
diff --git a/net/ceph/auth_x.c b/net/ceph/auth_x.c
index 7e38b729696..15845814a0f 100644
--- a/net/ceph/auth_x.c
+++ b/net/ceph/auth_x.c
@@ -8,6 +8,7 @@
#include <linux/ceph/decode.h>
#include <linux/ceph/auth.h>
+#include <linux/ceph/messenger.h>
#include "crypto.h"
#include "auth_x.h"
@@ -293,6 +294,11 @@ static int ceph_x_build_authorizer(struct ceph_auth_client *ac,
dout("build_authorizer for %s %p\n",
ceph_entity_type_name(th->service), au);
+ ceph_crypto_key_destroy(&au->session_key);
+ ret = ceph_crypto_key_clone(&au->session_key, &th->session_key);
+ if (ret)
+ return ret;
+
maxlen = sizeof(*msg_a) + sizeof(msg_b) +
ceph_x_encrypt_buflen(ticket_blob_len);
dout(" need len %d\n", maxlen);
@@ -302,8 +308,10 @@ static int ceph_x_build_authorizer(struct ceph_auth_client *ac,
}
if (!au->buf) {
au->buf = ceph_buffer_new(maxlen, GFP_NOFS);
- if (!au->buf)
+ if (!au->buf) {
+ ceph_crypto_key_destroy(&au->session_key);
return -ENOMEM;
+ }
}
au->service = th->service;
au->secret_id = th->secret_id;
@@ -329,7 +337,7 @@ static int ceph_x_build_authorizer(struct ceph_auth_client *ac,
get_random_bytes(&au->nonce, sizeof(au->nonce));
msg_b.struct_v = 1;
msg_b.nonce = cpu_to_le64(au->nonce);
- ret = ceph_x_encrypt(&th->session_key, &msg_b, sizeof(msg_b),
+ ret = ceph_x_encrypt(&au->session_key, &msg_b, sizeof(msg_b),
p, end - p);
if (ret < 0)
goto out_buf;
@@ -560,6 +568,8 @@ static int ceph_x_create_authorizer(
auth->authorizer_buf_len = au->buf->vec.iov_len;
auth->authorizer_reply_buf = au->reply_buf;
auth->authorizer_reply_buf_len = sizeof (au->reply_buf);
+ auth->sign_message = ac->ops->sign_message;
+ auth->check_message_signature = ac->ops->check_message_signature;
return 0;
}
@@ -588,17 +598,13 @@ static int ceph_x_verify_authorizer_reply(struct ceph_auth_client *ac,
struct ceph_authorizer *a, size_t len)
{
struct ceph_x_authorizer *au = (void *)a;
- struct ceph_x_ticket_handler *th;
int ret = 0;
struct ceph_x_authorize_reply reply;
void *preply = &reply;
void *p = au->reply_buf;
void *end = p + sizeof(au->reply_buf);
- th = get_ticket_handler(ac, au->service);
- if (IS_ERR(th))
- return PTR_ERR(th);
- ret = ceph_x_decrypt(&th->session_key, &p, end, &preply, sizeof(reply));
+ ret = ceph_x_decrypt(&au->session_key, &p, end, &preply, sizeof(reply));
if (ret < 0)
return ret;
if (ret != sizeof(reply))
@@ -618,6 +624,7 @@ static void ceph_x_destroy_authorizer(struct ceph_auth_client *ac,
{
struct ceph_x_authorizer *au = (void *)a;
+ ceph_crypto_key_destroy(&au->session_key);
ceph_buffer_put(au->buf);
kfree(au);
}
@@ -663,6 +670,59 @@ static void ceph_x_invalidate_authorizer(struct ceph_auth_client *ac,
memset(&th->validity, 0, sizeof(th->validity));
}
+static int calcu_signature(struct ceph_x_authorizer *au,
+ struct ceph_msg *msg, __le64 *sig)
+{
+ int ret;
+ char tmp_enc[40];
+ __le32 tmp[5] = {
+ 16u, msg->hdr.crc, msg->footer.front_crc,
+ msg->footer.middle_crc, msg->footer.data_crc,
+ };
+ ret = ceph_x_encrypt(&au->session_key, &tmp, sizeof(tmp),
+ tmp_enc, sizeof(tmp_enc));
+ if (ret < 0)
+ return ret;
+ *sig = *(__le64*)(tmp_enc + 4);
+ return 0;
+}
+
+static int ceph_x_sign_message(struct ceph_auth_handshake *auth,
+ struct ceph_msg *msg)
+{
+ int ret;
+ if (!auth->authorizer)
+ return 0;
+ ret = calcu_signature((struct ceph_x_authorizer *)auth->authorizer,
+ msg, &msg->footer.sig);
+ if (ret < 0)
+ return ret;
+ msg->footer.flags |= CEPH_MSG_FOOTER_SIGNED;
+ return 0;
+}
+
+static int ceph_x_check_message_signature(struct ceph_auth_handshake *auth,
+ struct ceph_msg *msg)
+{
+ __le64 sig_check;
+ int ret;
+
+ if (!auth->authorizer)
+ return 0;
+ ret = calcu_signature((struct ceph_x_authorizer *)auth->authorizer,
+ msg, &sig_check);
+ if (ret < 0)
+ return ret;
+ if (sig_check == msg->footer.sig)
+ return 0;
+ if (msg->footer.flags & CEPH_MSG_FOOTER_SIGNED)
+ dout("ceph_x_check_message_signature %p has signature %llx "
+ "expect %llx\n", msg, msg->footer.sig, sig_check);
+ else
+ dout("ceph_x_check_message_signature %p sender did not set "
+ "CEPH_MSG_FOOTER_SIGNED\n", msg);
+ return -EBADMSG;
+}
static const struct ceph_auth_client_ops ceph_x_ops = {
.name = "x",
@@ -677,6 +737,8 @@ static const struct ceph_auth_client_ops ceph_x_ops = {
.invalidate_authorizer = ceph_x_invalidate_authorizer,
.reset = ceph_x_reset,
.destroy = ceph_x_destroy,
+ .sign_message = ceph_x_sign_message,
+ .check_message_signature = ceph_x_check_message_signature,
};
diff --git a/net/ceph/auth_x.h b/net/ceph/auth_x.h
index 65ee72082d9..e8b7c6917d4 100644
--- a/net/ceph/auth_x.h
+++ b/net/ceph/auth_x.h
@@ -26,6 +26,7 @@ struct ceph_x_ticket_handler {
struct ceph_x_authorizer {
+ struct ceph_crypto_key session_key;
struct ceph_buffer *buf;
unsigned int service;
u64 nonce;
diff --git a/net/ceph/buffer.c b/net/ceph/buffer.c
index 621b5f65407..add5f921a0f 100644
--- a/net/ceph/buffer.c
+++ b/net/ceph/buffer.c
@@ -6,7 +6,7 @@
#include <linux/ceph/buffer.h>
#include <linux/ceph/decode.h>
-#include <linux/ceph/libceph.h> /* for ceph_kv{malloc,free} */
+#include <linux/ceph/libceph.h> /* for ceph_kvmalloc */
struct ceph_buffer *ceph_buffer_new(size_t len, gfp_t gfp)
{
@@ -35,7 +35,7 @@ void ceph_buffer_release(struct kref *kref)
struct ceph_buffer *b = container_of(kref, struct ceph_buffer, kref);
dout("buffer_release %p\n", b);
- ceph_kvfree(b->vec.iov_base);
+ kvfree(b->vec.iov_base);
kfree(b);
}
EXPORT_SYMBOL(ceph_buffer_release);
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
index 58fbfe134f9..5d5ab67f516 100644
--- a/net/ceph/ceph_common.c
+++ b/net/ceph/ceph_common.c
@@ -184,14 +184,6 @@ void *ceph_kvmalloc(size_t size, gfp_t flags)
return __vmalloc(size, flags | __GFP_HIGHMEM, PAGE_KERNEL);
}
-void ceph_kvfree(const void *ptr)
-{
- if (is_vmalloc_addr(ptr))
- vfree(ptr);
- else
- kfree(ptr);
-}
-
static int parse_fsid(const char *str, struct ceph_fsid *fsid)
{
@@ -245,6 +237,8 @@ enum {
Opt_noshare,
Opt_crc,
Opt_nocrc,
+ Opt_cephx_require_signatures,
+ Opt_nocephx_require_signatures,
};
static match_table_t opt_tokens = {
@@ -263,6 +257,8 @@ static match_table_t opt_tokens = {
{Opt_noshare, "noshare"},
{Opt_crc, "crc"},
{Opt_nocrc, "nocrc"},
+ {Opt_cephx_require_signatures, "cephx_require_signatures"},
+ {Opt_nocephx_require_signatures, "nocephx_require_signatures"},
{-1, NULL}
};
@@ -461,6 +457,12 @@ ceph_parse_options(char *options, const char *dev_name,
case Opt_nocrc:
opt->flags |= CEPH_OPT_NOCRC;
break;
+ case Opt_cephx_require_signatures:
+ opt->flags &= ~CEPH_OPT_NOMSGAUTH;
+ break;
+ case Opt_nocephx_require_signatures:
+ opt->flags |= CEPH_OPT_NOMSGAUTH;
+ break;
default:
BUG_ON(token);
@@ -504,6 +506,9 @@ struct ceph_client *ceph_create_client(struct ceph_options *opt, void *private,
init_waitqueue_head(&client->auth_wq);
client->auth_err = 0;
+ if (!ceph_test_opt(client, NOMSGAUTH))
+ required_features |= CEPH_FEATURE_MSG_AUTH;
+
client->extra_mon_dispatch = NULL;
client->supported_features = CEPH_FEATURES_SUPPORTED_DEFAULT |
supported_features;
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 8d1653caffd..33a2f201e46 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -1196,8 +1196,18 @@ static void prepare_write_message_footer(struct ceph_connection *con)
dout("prepare_write_message_footer %p\n", con);
con->out_kvec_is_msg = true;
con->out_kvec[v].iov_base = &m->footer;
- con->out_kvec[v].iov_len = sizeof(m->footer);
- con->out_kvec_bytes += sizeof(m->footer);
+ if (con->peer_features & CEPH_FEATURE_MSG_AUTH) {
+ if (con->ops->sign_message)
+ con->ops->sign_message(con, m);
+ else
+ m->footer.sig = 0;
+ con->out_kvec[v].iov_len = sizeof(m->footer);
+ con->out_kvec_bytes += sizeof(m->footer);
+ } else {
+ m->old_footer.flags = m->footer.flags;
+ con->out_kvec[v].iov_len = sizeof(m->old_footer);
+ con->out_kvec_bytes += sizeof(m->old_footer);
+ }
con->out_kvec_left++;
con->out_more = m->more_to_follow;
con->out_msg_done = true;
@@ -2249,6 +2259,7 @@ static int read_partial_message(struct ceph_connection *con)
int ret;
unsigned int front_len, middle_len, data_len;
bool do_datacrc = !con->msgr->nocrc;
+ bool need_sign = (con->peer_features & CEPH_FEATURE_MSG_AUTH);
u64 seq;
u32 crc;
@@ -2361,12 +2372,21 @@ static int read_partial_message(struct ceph_connection *con)
}
/* footer */
- size = sizeof (m->footer);
+ if (need_sign)
+ size = sizeof(m->footer);
+ else
+ size = sizeof(m->old_footer);
+
end += size;
ret = read_partial(con, end, size, &m->footer);
if (ret <= 0)
return ret;
+ if (!need_sign) {
+ m->footer.flags = m->old_footer.flags;
+ m->footer.sig = 0;
+ }
+
dout("read_partial_message got msg %p %d (%u) + %d (%u) + %d (%u)\n",
m, front_len, m->footer.front_crc, middle_len,
m->footer.middle_crc, data_len, m->footer.data_crc);
@@ -2390,6 +2410,12 @@ static int read_partial_message(struct ceph_connection *con)
return -EBADMSG;
}
+ if (need_sign && con->ops->check_message_signature &&
+ con->ops->check_message_signature(con, m)) {
+ pr_err("read_partial_message %p signature check failed\n", m);
+ return -EBADMSG;
+ }
+
return 1; /* done! */
}
@@ -3288,7 +3314,7 @@ static int ceph_con_in_msg_alloc(struct ceph_connection *con, int *skip)
static void ceph_msg_free(struct ceph_msg *m)
{
dout("%s %p\n", __func__, m);
- ceph_kvfree(m->front.iov_base);
+ kvfree(m->front.iov_base);
kmem_cache_free(ceph_msg_cache, m);
}
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 6f164289bde..53299c7b0ca 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -292,6 +292,10 @@ static void osd_req_op_data_release(struct ceph_osd_request *osd_req,
ceph_osd_data_release(&op->cls.request_data);
ceph_osd_data_release(&op->cls.response_data);
break;
+ case CEPH_OSD_OP_SETXATTR:
+ case CEPH_OSD_OP_CMPXATTR:
+ ceph_osd_data_release(&op->xattr.osd_data);
+ break;
default:
break;
}
@@ -476,8 +480,7 @@ void osd_req_op_extent_init(struct ceph_osd_request *osd_req,
size_t payload_len = 0;
BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE &&
- opcode != CEPH_OSD_OP_DELETE && opcode != CEPH_OSD_OP_ZERO &&
- opcode != CEPH_OSD_OP_TRUNCATE);
+ opcode != CEPH_OSD_OP_ZERO && opcode != CEPH_OSD_OP_TRUNCATE);
op->extent.offset = offset;
op->extent.length = length;
@@ -545,6 +548,39 @@ void osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which,
}
EXPORT_SYMBOL(osd_req_op_cls_init);
+int osd_req_op_xattr_init(struct ceph_osd_request *osd_req, unsigned int which,
+ u16 opcode, const char *name, const void *value,
+ size_t size, u8 cmp_op, u8 cmp_mode)
+{
+ struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which, opcode);
+ struct ceph_pagelist *pagelist;
+ size_t payload_len;
+
+ BUG_ON(opcode != CEPH_OSD_OP_SETXATTR && opcode != CEPH_OSD_OP_CMPXATTR);
+
+ pagelist = kmalloc(sizeof(*pagelist), GFP_NOFS);
+ if (!pagelist)
+ return -ENOMEM;
+
+ ceph_pagelist_init(pagelist);
+
+ payload_len = strlen(name);
+ op->xattr.name_len = payload_len;
+ ceph_pagelist_append(pagelist, name, payload_len);
+
+ op->xattr.value_len = size;
+ ceph_pagelist_append(pagelist, value, size);
+ payload_len += size;
+
+ op->xattr.cmp_op = cmp_op;
+ op->xattr.cmp_mode = cmp_mode;
+
+ ceph_osd_data_pagelist_init(&op->xattr.osd_data, pagelist);
+ op->payload_len = payload_len;
+ return 0;
+}
+EXPORT_SYMBOL(osd_req_op_xattr_init);
+
void osd_req_op_watch_init(struct ceph_osd_request *osd_req,
unsigned int which, u16 opcode,
u64 cookie, u64 version, int flag)
@@ -626,7 +662,6 @@ static u64 osd_req_encode_op(struct ceph_osd_request *req,
case CEPH_OSD_OP_READ:
case CEPH_OSD_OP_WRITE:
case CEPH_OSD_OP_ZERO:
- case CEPH_OSD_OP_DELETE:
case CEPH_OSD_OP_TRUNCATE:
if (src->op == CEPH_OSD_OP_WRITE)
request_data_len = src->extent.length;
@@ -676,6 +711,19 @@ static u64 osd_req_encode_op(struct ceph_osd_request *req,
dst->alloc_hint.expected_write_size =
cpu_to_le64(src->alloc_hint.expected_write_size);
break;
+ case CEPH_OSD_OP_SETXATTR:
+ case CEPH_OSD_OP_CMPXATTR:
+ dst->xattr.name_len = cpu_to_le32(src->xattr.name_len);
+ dst->xattr.value_len = cpu_to_le32(src->xattr.value_len);
+ dst->xattr.cmp_op = src->xattr.cmp_op;
+ dst->xattr.cmp_mode = src->xattr.cmp_mode;
+ osd_data = &src->xattr.osd_data;
+ ceph_osdc_msg_data_add(req->r_request, osd_data);
+ request_data_len = osd_data->pagelist->length;
+ break;
+ case CEPH_OSD_OP_CREATE:
+ case CEPH_OSD_OP_DELETE:
+ break;
default:
pr_err("unsupported osd opcode %s\n",
ceph_osd_op_name(src->op));
@@ -705,7 +753,8 @@ static u64 osd_req_encode_op(struct ceph_osd_request *req,
struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
struct ceph_file_layout *layout,
struct ceph_vino vino,
- u64 off, u64 *plen, int num_ops,
+ u64 off, u64 *plen,
+ unsigned int which, int num_ops,
int opcode, int flags,
struct ceph_snap_context *snapc,
u32 truncate_seq,
@@ -716,13 +765,11 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
u64 objnum = 0;
u64 objoff = 0;
u64 objlen = 0;
- u32 object_size;
- u64 object_base;
int r;
BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE &&
- opcode != CEPH_OSD_OP_DELETE && opcode != CEPH_OSD_OP_ZERO &&
- opcode != CEPH_OSD_OP_TRUNCATE);
+ opcode != CEPH_OSD_OP_ZERO && opcode != CEPH_OSD_OP_TRUNCATE &&
+ opcode != CEPH_OSD_OP_CREATE && opcode != CEPH_OSD_OP_DELETE);
req = ceph_osdc_alloc_request(osdc, snapc, num_ops, use_mempool,
GFP_NOFS);
@@ -738,29 +785,24 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
return ERR_PTR(r);
}
- object_size = le32_to_cpu(layout->fl_object_size);
- object_base = off - objoff;
- if (!(truncate_seq == 1 && truncate_size == -1ULL)) {
- if (truncate_size <= object_base) {
- truncate_size = 0;
- } else {
- truncate_size -= object_base;
- if (truncate_size > object_size)
- truncate_size = object_size;
+ if (opcode == CEPH_OSD_OP_CREATE || opcode == CEPH_OSD_OP_DELETE) {
+ osd_req_op_init(req, which, opcode);
+ } else {
+ u32 object_size = le32_to_cpu(layout->fl_object_size);
+ u32 object_base = off - objoff;
+ if (!(truncate_seq == 1 && truncate_size == -1ULL)) {
+ if (truncate_size <= object_base) {
+ truncate_size = 0;
+ } else {
+ truncate_size -= object_base;
+ if (truncate_size > object_size)
+ truncate_size = object_size;
+ }
}
+ osd_req_op_extent_init(req, which, opcode, objoff, objlen,
+ truncate_size, truncate_seq);
}
- osd_req_op_extent_init(req, 0, opcode, objoff, objlen,
- truncate_size, truncate_seq);
-
- /*
- * A second op in the ops array means the caller wants to
- * also issue a include a 'startsync' command so that the
- * osd will flush data quickly.
- */
- if (num_ops > 1)
- osd_req_op_init(req, 1, CEPH_OSD_OP_STARTSYNC);
-
req->r_base_oloc.pool = ceph_file_layout_pg_pool(*layout);
snprintf(req->r_base_oid.name, sizeof(req->r_base_oid.name),
@@ -2626,7 +2668,7 @@ int ceph_osdc_readpages(struct ceph_osd_client *osdc,
dout("readpages on ino %llx.%llx on %llu~%llu\n", vino.ino,
vino.snap, off, *plen);
- req = ceph_osdc_new_request(osdc, layout, vino, off, plen, 1,
+ req = ceph_osdc_new_request(osdc, layout, vino, off, plen, 0, 1,
CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ,
NULL, truncate_seq, truncate_size,
false);
@@ -2669,7 +2711,7 @@ int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino,
int page_align = off & ~PAGE_MASK;
BUG_ON(vino.snap != CEPH_NOSNAP); /* snapshots aren't writeable */
- req = ceph_osdc_new_request(osdc, layout, vino, off, &len, 1,
+ req = ceph_osdc_new_request(osdc, layout, vino, off, &len, 0, 1,
CEPH_OSD_OP_WRITE,
CEPH_OSD_FLAG_ONDISK | CEPH_OSD_FLAG_WRITE,
snapc, truncate_seq, truncate_size,
@@ -2920,6 +2962,20 @@ static int invalidate_authorizer(struct ceph_connection *con)
return ceph_monc_validate_auth(&osdc->client->monc);
}
+static int sign_message(struct ceph_connection *con, struct ceph_msg *msg)
+{
+ struct ceph_osd *o = con->private;
+ struct ceph_auth_handshake *auth = &o->o_auth;
+ return ceph_auth_sign_message(auth, msg);
+}
+
+static int check_message_signature(struct ceph_connection *con, struct ceph_msg *msg)
+{
+ struct ceph_osd *o = con->private;
+ struct ceph_auth_handshake *auth = &o->o_auth;
+ return ceph_auth_check_message_signature(auth, msg);
+}
+
static const struct ceph_connection_operations osd_con_ops = {
.get = get_osd_con,
.put = put_osd_con,
@@ -2928,5 +2984,7 @@ static const struct ceph_connection_operations osd_con_ops = {
.verify_authorizer_reply = verify_authorizer_reply,
.invalidate_authorizer = invalidate_authorizer,
.alloc_msg = alloc_msg,
+ .sign_message = sign_message,
+ .check_message_signature = check_message_signature,
.fault = osd_reset,
};
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 7f155175bba..ce780c722e4 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -337,17 +337,17 @@ EXPORT_SYMBOL_GPL(__put_net);
struct net *get_net_ns_by_fd(int fd)
{
- struct proc_ns *ei;
struct file *file;
+ struct ns_common *ns;
struct net *net;
file = proc_ns_fget(fd);
if (IS_ERR(file))
return ERR_CAST(file);
- ei = get_proc_ns(file_inode(file));
- if (ei->ns_ops == &netns_operations)
- net = get_net(ei->ns);
+ ns = get_proc_ns(file_inode(file));
+ if (ns->ops == &netns_operations)
+ net = get_net(container_of(ns, struct net, ns));
else
net = ERR_PTR(-EINVAL);
@@ -386,12 +386,15 @@ EXPORT_SYMBOL_GPL(get_net_ns_by_pid);
static __net_init int net_ns_net_init(struct net *net)
{
- return proc_alloc_inum(&net->proc_inum);
+#ifdef CONFIG_NET_NS
+ net->ns.ops = &netns_operations;
+#endif
+ return ns_alloc_inum(&net->ns);
}
static __net_exit void net_ns_net_exit(struct net *net)
{
- proc_free_inum(net->proc_inum);
+ ns_free_inum(&net->ns);
}
static struct pernet_operations __net_initdata net_ns_ops = {
@@ -629,7 +632,7 @@ void unregister_pernet_device(struct pernet_operations *ops)
EXPORT_SYMBOL_GPL(unregister_pernet_device);
#ifdef CONFIG_NET_NS
-static void *netns_get(struct task_struct *task)
+static struct ns_common *netns_get(struct task_struct *task)
{
struct net *net = NULL;
struct nsproxy *nsproxy;
@@ -640,17 +643,22 @@ static void *netns_get(struct task_struct *task)
net = get_net(nsproxy->net_ns);
task_unlock(task);
- return net;
+ return net ? &net->ns : NULL;
}
-static void netns_put(void *ns)
+static inline struct net *to_net_ns(struct ns_common *ns)
{
- put_net(ns);
+ return container_of(ns, struct net, ns);
}
-static int netns_install(struct nsproxy *nsproxy, void *ns)
+static void netns_put(struct ns_common *ns)
{
- struct net *net = ns;
+ put_net(to_net_ns(ns));
+}
+
+static int netns_install(struct nsproxy *nsproxy, struct ns_common *ns)
+{
+ struct net *net = to_net_ns(ns);
if (!ns_capable(net->user_ns, CAP_SYS_ADMIN) ||
!ns_capable(current_user_ns(), CAP_SYS_ADMIN))
@@ -661,18 +669,11 @@ static int netns_install(struct nsproxy *nsproxy, void *ns)
return 0;
}
-static unsigned int netns_inum(void *ns)
-{
- struct net *net = ns;
- return net->proc_inum;
-}
-
const struct proc_ns_operations netns_operations = {
.name = "net",
.type = CLONE_NEWNET,
.get = netns_get,
.put = netns_put,
.install = netns_install,
- .inum = netns_inum,
};
#endif
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index d06107d36ec..9cf6fe9ddc0 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -2368,6 +2368,11 @@ int ndo_dflt_fdb_add(struct ndmsg *ndm,
return err;
}
+ if (vid) {
+ pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name);
+ return err;
+ }
+
if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
err = dev_uc_add_excl(dev, addr);
else if (is_multicast_ether_addr(addr))
diff --git a/net/ipv4/geneve.c b/net/ipv4/geneve.c
index a457232f013..95e47c97585 100644
--- a/net/ipv4/geneve.c
+++ b/net/ipv4/geneve.c
@@ -159,6 +159,15 @@ static void geneve_notify_add_rx_port(struct geneve_sock *gs)
}
}
+static void geneve_notify_del_rx_port(struct geneve_sock *gs)
+{
+ struct sock *sk = gs->sock->sk;
+ sa_family_t sa_family = sk->sk_family;
+
+ if (sa_family == AF_INET)
+ udp_del_offload(&gs->udp_offloads);
+}
+
/* Callback from net/ipv4/udp.c to receive packets */
static int geneve_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
{
@@ -287,6 +296,7 @@ struct geneve_sock *geneve_sock_add(struct net *net, __be16 port,
geneve_rcv_t *rcv, void *data,
bool no_share, bool ipv6)
{
+ struct geneve_net *gn = net_generic(net, geneve_net_id);
struct geneve_sock *gs;
gs = geneve_socket_create(net, port, rcv, data, ipv6);
@@ -296,15 +306,15 @@ struct geneve_sock *geneve_sock_add(struct net *net, __be16 port,
if (no_share) /* Return error if sharing is not allowed. */
return ERR_PTR(-EINVAL);
+ spin_lock(&gn->sock_lock);
gs = geneve_find_sock(net, port);
- if (gs) {
- if (gs->rcv == rcv)
- atomic_inc(&gs->refcnt);
- else
+ if (gs && ((gs->rcv != rcv) ||
+ !atomic_add_unless(&gs->refcnt, 1, 0)))
gs = ERR_PTR(-EBUSY);
- } else {
+ spin_unlock(&gn->sock_lock);
+
+ if (!gs)
gs = ERR_PTR(-EINVAL);
- }
return gs;
}
@@ -312,9 +322,17 @@ EXPORT_SYMBOL_GPL(geneve_sock_add);
void geneve_sock_release(struct geneve_sock *gs)
{
+ struct net *net = sock_net(gs->sock->sk);
+ struct geneve_net *gn = net_generic(net, geneve_net_id);
+
if (!atomic_dec_and_test(&gs->refcnt))
return;
+ spin_lock(&gn->sock_lock);
+ hlist_del_rcu(&gs->hlist);
+ geneve_notify_del_rx_port(gs);
+ spin_unlock(&gn->sock_lock);
+
queue_work(geneve_wq, &gs->del_work);
}
EXPORT_SYMBOL_GPL(geneve_sock_release);
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index ac8491245e5..4f4bf5b9968 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -252,10 +252,6 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
struct ip_tunnel *tunnel = netdev_priv(dev);
const struct iphdr *tnl_params;
- skb = gre_handle_offloads(skb, !!(tunnel->parms.o_flags&TUNNEL_CSUM));
- if (IS_ERR(skb))
- goto out;
-
if (dev->header_ops) {
/* Need space for new headers */
if (skb_cow_head(skb, dev->needed_headroom -
@@ -268,6 +264,7 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
* to gre header.
*/
skb_pull(skb, tunnel->hlen + sizeof(struct iphdr));
+ skb_reset_mac_header(skb);
} else {
if (skb_cow_head(skb, dev->needed_headroom))
goto free_skb;
@@ -275,6 +272,10 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
tnl_params = &tunnel->parms.iph;
}
+ skb = gre_handle_offloads(skb, !!(tunnel->parms.o_flags&TUNNEL_CSUM));
+ if (IS_ERR(skb))
+ goto out;
+
__gre_xmit(skb, dev, tnl_params, skb->protocol);
return NETDEV_TX_OK;
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 63e745aadab..d3e44793672 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -514,6 +514,9 @@ const struct ip_tunnel_encap_ops __rcu *
int ip_tunnel_encap_add_ops(const struct ip_tunnel_encap_ops *ops,
unsigned int num)
{
+ if (num >= MAX_IPTUN_ENCAP_OPS)
+ return -ERANGE;
+
return !cmpxchg((const struct ip_tunnel_encap_ops **)
&iptun_encaps[num],
NULL, ops) ? 0 : -1;
@@ -525,6 +528,9 @@ int ip_tunnel_encap_del_ops(const struct ip_tunnel_encap_ops *ops,
{
int ret;
+ if (num >= MAX_IPTUN_ENCAP_OPS)
+ return -ERANGE;
+
ret = (cmpxchg((const struct ip_tunnel_encap_ops **)
&iptun_encaps[num],
ops, NULL) == ops) ? 0 : -1;
@@ -567,6 +573,9 @@ int ip_tunnel_encap(struct sk_buff *skb, struct ip_tunnel *t,
if (t->encap.type == TUNNEL_ENCAP_NONE)
return 0;
+ if (t->encap.type >= MAX_IPTUN_ENCAP_OPS)
+ return -EINVAL;
+
rcu_read_lock();
ops = rcu_dereference(iptun_encaps[t->encap.type]);
if (likely(ops && ops->build_header))
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
index 5d6dae9e4aa..da1c12c3448 100644
--- a/net/mac80211/chan.c
+++ b/net/mac80211/chan.c
@@ -1011,6 +1011,10 @@ ieee80211_vif_use_reserved_reassign(struct ieee80211_sub_if_data *sdata)
ieee80211_vif_update_chandef(sdata, &sdata->reserved_chandef);
+ ieee80211_recalc_smps_chanctx(local, new_ctx);
+ ieee80211_recalc_radar_chanctx(local, new_ctx);
+ ieee80211_recalc_chanctx_min_def(local, new_ctx);
+
if (changed)
ieee80211_bss_info_change_notify(sdata, changed);
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index 434a91ad12c..0bb7038121a 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -656,7 +656,7 @@ void ieee80211_free_sta_keys(struct ieee80211_local *local,
int i;
mutex_lock(&local->key_mtx);
- for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
+ for (i = 0; i < ARRAY_SIZE(sta->gtk); i++) {
key = key_mtx_dereference(local, sta->gtk[i]);
if (!key)
continue;
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 75a9bf50207..2c36c4765f4 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -174,6 +174,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
if (!(ht_cap->cap_info &
cpu_to_le16(IEEE80211_HT_CAP_SUP_WIDTH_20_40))) {
ret = IEEE80211_STA_DISABLE_40MHZ;
+ vht_chandef = *chandef;
goto out;
}
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 49c23bdf08b..683b10f4650 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -1761,14 +1761,14 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
sc = le16_to_cpu(hdr->seq_ctrl);
frag = sc & IEEE80211_SCTL_FRAG;
- if (likely(!ieee80211_has_morefrags(fc) && frag == 0))
- goto out;
-
if (is_multicast_ether_addr(hdr->addr1)) {
rx->local->dot11MulticastReceivedFrameCount++;
- goto out;
+ goto out_no_led;
}
+ if (likely(!ieee80211_has_morefrags(fc) && frag == 0))
+ goto out;
+
I802_DEBUG_INC(rx->local->rx_handlers_fragments);
if (skb_linearize(rx->skb))
@@ -1859,9 +1859,10 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
status->rx_flags |= IEEE80211_RX_FRAGMENTED;
out:
+ ieee80211_led_rx(rx->local);
+ out_no_led:
if (rx->sta)
rx->sta->rx_packets++;
- ieee80211_led_rx(rx->local);
return RX_CONTINUE;
}
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index ef5f77b44ec..074cf3e91c6 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -525,14 +525,14 @@ out:
return err;
}
-static void netlink_frame_flush_dcache(const struct nl_mmap_hdr *hdr)
+static void netlink_frame_flush_dcache(const struct nl_mmap_hdr *hdr, unsigned int nm_len)
{
#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
struct page *p_start, *p_end;
/* First page is flushed through netlink_{get,set}_status */
p_start = pgvec_to_page(hdr + PAGE_SIZE);
- p_end = pgvec_to_page((void *)hdr + NL_MMAP_HDRLEN + hdr->nm_len - 1);
+ p_end = pgvec_to_page((void *)hdr + NL_MMAP_HDRLEN + nm_len - 1);
while (p_start <= p_end) {
flush_dcache_page(p_start);
p_start++;
@@ -550,9 +550,9 @@ static enum nl_mmap_status netlink_get_status(const struct nl_mmap_hdr *hdr)
static void netlink_set_status(struct nl_mmap_hdr *hdr,
enum nl_mmap_status status)
{
+ smp_mb();
hdr->nm_status = status;
flush_dcache_page(pgvec_to_page(hdr));
- smp_wmb();
}
static struct nl_mmap_hdr *
@@ -714,24 +714,16 @@ static int netlink_mmap_sendmsg(struct sock *sk, struct msghdr *msg,
struct nl_mmap_hdr *hdr;
struct sk_buff *skb;
unsigned int maxlen;
- bool excl = true;
int err = 0, len = 0;
- /* Netlink messages are validated by the receiver before processing.
- * In order to avoid userspace changing the contents of the message
- * after validation, the socket and the ring may only be used by a
- * single process, otherwise we fall back to copying.
- */
- if (atomic_long_read(&sk->sk_socket->file->f_count) > 1 ||
- atomic_read(&nlk->mapped) > 1)
- excl = false;
-
mutex_lock(&nlk->pg_vec_lock);
ring = &nlk->tx_ring;
maxlen = ring->frame_size - NL_MMAP_HDRLEN;
do {
+ unsigned int nm_len;
+
hdr = netlink_current_frame(ring, NL_MMAP_STATUS_VALID);
if (hdr == NULL) {
if (!(msg->msg_flags & MSG_DONTWAIT) &&
@@ -739,35 +731,23 @@ static int netlink_mmap_sendmsg(struct sock *sk, struct msghdr *msg,
schedule();
continue;
}
- if (hdr->nm_len > maxlen) {
+
+ nm_len = ACCESS_ONCE(hdr->nm_len);
+ if (nm_len > maxlen) {
err = -EINVAL;
goto out;
}
- netlink_frame_flush_dcache(hdr);
+ netlink_frame_flush_dcache(hdr, nm_len);
- if (likely(dst_portid == 0 && dst_group == 0 && excl)) {
- skb = alloc_skb_head(GFP_KERNEL);
- if (skb == NULL) {
- err = -ENOBUFS;
- goto out;
- }
- sock_hold(sk);
- netlink_ring_setup_skb(skb, sk, ring, hdr);
- NETLINK_CB(skb).flags |= NETLINK_SKB_TX;
- __skb_put(skb, hdr->nm_len);
- netlink_set_status(hdr, NL_MMAP_STATUS_RESERVED);
- atomic_inc(&ring->pending);
- } else {
- skb = alloc_skb(hdr->nm_len, GFP_KERNEL);
- if (skb == NULL) {
- err = -ENOBUFS;
- goto out;
- }
- __skb_put(skb, hdr->nm_len);
- memcpy(skb->data, (void *)hdr + NL_MMAP_HDRLEN, hdr->nm_len);
- netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
+ skb = alloc_skb(nm_len, GFP_KERNEL);
+ if (skb == NULL) {
+ err = -ENOBUFS;
+ goto out;
}
+ __skb_put(skb, nm_len);
+ memcpy(skb->data, (void *)hdr + NL_MMAP_HDRLEN, nm_len);
+ netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
netlink_increment_head(ring);
@@ -813,7 +793,7 @@ static void netlink_queue_mmaped_skb(struct sock *sk, struct sk_buff *skb)
hdr->nm_pid = NETLINK_CB(skb).creds.pid;
hdr->nm_uid = from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid);
hdr->nm_gid = from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid);
- netlink_frame_flush_dcache(hdr);
+ netlink_frame_flush_dcache(hdr, hdr->nm_len);
netlink_set_status(hdr, NL_MMAP_STATUS_VALID);
NETLINK_CB(skb).flags |= NETLINK_SKB_DELIVERED;
diff --git a/net/nonet.c b/net/nonet.c
deleted file mode 100644
index b1a73fda9c1..00000000000
--- a/net/nonet.c
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * net/nonet.c
- *
- * Dummy functions to allow us to configure network support entirely
- * out of the kernel.
- *
- * Distributed under the terms of the GNU GPL version 2.
- * Copyright (c) Matthew Wilcox 2003
- */
-
-#include <linux/module.h>
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-
-static int sock_no_open(struct inode *irrelevant, struct file *dontcare)
-{
- return -ENXIO;
-}
-
-const struct file_operations bad_sock_fops = {
- .owner = THIS_MODULE,
- .open = sock_no_open,
- .llseek = noop_llseek,
-};
diff --git a/net/rds/message.c b/net/rds/message.c
index ff220221818..5a21e6f5986 100644
--- a/net/rds/message.c
+++ b/net/rds/message.c
@@ -325,7 +325,8 @@ int rds_message_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to)
copied = 0;
while (iov_iter_count(to) && copied < len) {
- to_copy = min(iov_iter_count(to), sg->length - vec_off);
+ to_copy = min_t(unsigned long, iov_iter_count(to),
+ sg->length - vec_off);
to_copy = min_t(unsigned long, to_copy, len - copied);
rds_stats_add(s_copy_to_user, to_copy);
diff --git a/net/socket.c b/net/socket.c
index 8809afccf7f..a2c33a4dc7b 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -113,7 +113,6 @@ unsigned int sysctl_net_busy_read __read_mostly;
unsigned int sysctl_net_busy_poll __read_mostly;
#endif
-static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t pos);
static ssize_t sock_aio_write(struct kiocb *iocb, const struct iovec *iov,
@@ -151,7 +150,6 @@ static const struct file_operations socket_file_ops = {
.compat_ioctl = compat_sock_ioctl,
#endif
.mmap = sock_mmap,
- .open = sock_no_open, /* special open code to disallow open via /proc */
.release = sock_close,
.fasync = sock_fasync,
.sendpage = sock_sendpage,
@@ -374,7 +372,6 @@ struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname)
path.mnt = mntget(sock_mnt);
d_instantiate(path.dentry, SOCK_INODE(sock));
- SOCK_INODE(sock)->i_fop = &socket_file_ops;
file = alloc_file(&path, FMODE_READ | FMODE_WRITE,
&socket_file_ops);
@@ -559,23 +556,6 @@ static struct socket *sock_alloc(void)
return sock;
}
-/*
- * In theory you can't get an open on this inode, but /proc provides
- * a back door. Remember to keep it shut otherwise you'll let the
- * creepy crawlies in.
- */
-
-static int sock_no_open(struct inode *irrelevant, struct file *dontcare)
-{
- return -ENXIO;
-}
-
-const struct file_operations bad_sock_fops = {
- .owner = THIS_MODULE,
- .open = sock_no_open,
- .llseek = noop_llseek,
-};
-
/**
* sock_release - close a socket
* @sock: socket to close
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index de856ddf5fe..224a82f24d3 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -886,7 +886,7 @@ unwrap_priv_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct gs
u32 priv_len, maj_stat;
int pad, saved_len, remaining_len, offset;
- rqstp->rq_splice_ok = false;
+ clear_bit(RQ_SPLICE_OK, &rqstp->rq_flags);
priv_len = svc_getnl(&buf->head[0]);
if (rqstp->rq_deferred) {
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 06636214113..33fb105d435 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -20,6 +20,7 @@
#include <linux/list.h>
#include <linux/module.h>
#include <linux/ctype.h>
+#include <linux/string_helpers.h>
#include <asm/uaccess.h>
#include <linux/poll.h>
#include <linux/seq_file.h>
@@ -1067,30 +1068,15 @@ void qword_add(char **bpp, int *lp, char *str)
{
char *bp = *bpp;
int len = *lp;
- char c;
+ int ret;
if (len < 0) return;
- while ((c=*str++) && len)
- switch(c) {
- case ' ':
- case '\t':
- case '\n':
- case '\\':
- if (len >= 4) {
- *bp++ = '\\';
- *bp++ = '0' + ((c & 0300)>>6);
- *bp++ = '0' + ((c & 0070)>>3);
- *bp++ = '0' + ((c & 0007)>>0);
- }
- len -= 4;
- break;
- default:
- *bp++ = c;
- len--;
- }
- if (c || len <1) len = -1;
+ ret = string_escape_str(str, &bp, len, ESCAPE_OCTAL, "\\ \n\t");
+ if (ret < 0 || ret == len)
+ len = -1;
else {
+ len -= ret;
*bp++ = ' ';
len--;
}
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 2783fd80c22..91eaef1844c 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -191,7 +191,7 @@ svc_pool_map_init_percpu(struct svc_pool_map *m)
return err;
for_each_online_cpu(cpu) {
- BUG_ON(pidx > maxpools);
+ BUG_ON(pidx >= maxpools);
m->to_pool[cpu] = pidx;
m->pool_to[pidx] = cpu;
pidx++;
@@ -476,15 +476,11 @@ __svc_create(struct svc_program *prog, unsigned int bufsize, int npools,
i, serv->sv_name);
pool->sp_id = i;
- INIT_LIST_HEAD(&pool->sp_threads);
INIT_LIST_HEAD(&pool->sp_sockets);
INIT_LIST_HEAD(&pool->sp_all_threads);
spin_lock_init(&pool->sp_lock);
}
- if (svc_uses_rpcbind(serv) && (!serv->sv_shutdown))
- serv->sv_shutdown = svc_rpcb_cleanup;
-
return serv;
}
@@ -505,13 +501,15 @@ svc_create_pooled(struct svc_program *prog, unsigned int bufsize,
unsigned int npools = svc_pool_map_get();
serv = __svc_create(prog, bufsize, npools, shutdown);
+ if (!serv)
+ goto out_err;
- if (serv != NULL) {
- serv->sv_function = func;
- serv->sv_module = mod;
- }
-
+ serv->sv_function = func;
+ serv->sv_module = mod;
return serv;
+out_err:
+ svc_pool_map_put();
+ return NULL;
}
EXPORT_SYMBOL_GPL(svc_create_pooled);
@@ -615,12 +613,14 @@ svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node)
goto out_enomem;
serv->sv_nrthreads++;
+ __set_bit(RQ_BUSY, &rqstp->rq_flags);
+ spin_lock_init(&rqstp->rq_lock);
+ rqstp->rq_server = serv;
+ rqstp->rq_pool = pool;
spin_lock_bh(&pool->sp_lock);
pool->sp_nrthreads++;
- list_add(&rqstp->rq_all, &pool->sp_all_threads);
+ list_add_rcu(&rqstp->rq_all, &pool->sp_all_threads);
spin_unlock_bh(&pool->sp_lock);
- rqstp->rq_server = serv;
- rqstp->rq_pool = pool;
rqstp->rq_argp = kmalloc_node(serv->sv_xdrsize, GFP_KERNEL, node);
if (!rqstp->rq_argp)
@@ -685,7 +685,8 @@ found_pool:
* so we don't try to kill it again.
*/
rqstp = list_entry(pool->sp_all_threads.next, struct svc_rqst, rq_all);
- list_del_init(&rqstp->rq_all);
+ set_bit(RQ_VICTIM, &rqstp->rq_flags);
+ list_del_rcu(&rqstp->rq_all);
task = rqstp->rq_task;
}
spin_unlock_bh(&pool->sp_lock);
@@ -783,10 +784,11 @@ svc_exit_thread(struct svc_rqst *rqstp)
spin_lock_bh(&pool->sp_lock);
pool->sp_nrthreads--;
- list_del(&rqstp->rq_all);
+ if (!test_and_set_bit(RQ_VICTIM, &rqstp->rq_flags))
+ list_del_rcu(&rqstp->rq_all);
spin_unlock_bh(&pool->sp_lock);
- kfree(rqstp);
+ kfree_rcu(rqstp, rq_rcu_head);
/* Release the server */
if (serv)
@@ -1086,10 +1088,10 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
goto err_short_len;
/* Will be turned off only in gss privacy case: */
- rqstp->rq_splice_ok = true;
+ set_bit(RQ_SPLICE_OK, &rqstp->rq_flags);
/* Will be turned off only when NFSv4 Sessions are used */
- rqstp->rq_usedeferral = true;
- rqstp->rq_dropme = false;
+ set_bit(RQ_USEDEFERRAL, &rqstp->rq_flags);
+ clear_bit(RQ_DROPME, &rqstp->rq_flags);
/* Setup reply header */
rqstp->rq_xprt->xpt_ops->xpo_prep_reply_hdr(rqstp);
@@ -1189,7 +1191,7 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
*statp = procp->pc_func(rqstp, rqstp->rq_argp, rqstp->rq_resp);
/* Encode reply */
- if (rqstp->rq_dropme) {
+ if (test_bit(RQ_DROPME, &rqstp->rq_flags)) {
if (procp->pc_release)
procp->pc_release(rqstp, NULL, rqstp->rq_resp);
goto dropit;
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index bbb3b044b87..c69358b3cf7 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -220,9 +220,11 @@ static struct svc_xprt *__svc_xpo_create(struct svc_xprt_class *xcl,
*/
static void svc_xprt_received(struct svc_xprt *xprt)
{
- WARN_ON_ONCE(!test_bit(XPT_BUSY, &xprt->xpt_flags));
- if (!test_bit(XPT_BUSY, &xprt->xpt_flags))
+ if (!test_bit(XPT_BUSY, &xprt->xpt_flags)) {
+ WARN_ONCE(1, "xprt=0x%p already busy!", xprt);
return;
+ }
+
/* As soon as we clear busy, the xprt could be closed and
* 'put', so we need a reference to call svc_xprt_do_enqueue with:
*/
@@ -310,25 +312,6 @@ char *svc_print_addr(struct svc_rqst *rqstp, char *buf, size_t len)
}
EXPORT_SYMBOL_GPL(svc_print_addr);
-/*
- * Queue up an idle server thread. Must have pool->sp_lock held.
- * Note: this is really a stack rather than a queue, so that we only
- * use as many different threads as we need, and the rest don't pollute
- * the cache.
- */
-static void svc_thread_enqueue(struct svc_pool *pool, struct svc_rqst *rqstp)
-{
- list_add(&rqstp->rq_list, &pool->sp_threads);
-}
-
-/*
- * Dequeue an nfsd thread. Must have pool->sp_lock held.
- */
-static void svc_thread_dequeue(struct svc_pool *pool, struct svc_rqst *rqstp)
-{
- list_del(&rqstp->rq_list);
-}
-
static bool svc_xprt_has_something_to_do(struct svc_xprt *xprt)
{
if (xprt->xpt_flags & ((1<<XPT_CONN)|(1<<XPT_CLOSE)))
@@ -341,11 +324,12 @@ static bool svc_xprt_has_something_to_do(struct svc_xprt *xprt)
static void svc_xprt_do_enqueue(struct svc_xprt *xprt)
{
struct svc_pool *pool;
- struct svc_rqst *rqstp;
+ struct svc_rqst *rqstp = NULL;
int cpu;
+ bool queued = false;
if (!svc_xprt_has_something_to_do(xprt))
- return;
+ goto out;
/* Mark transport as busy. It will remain in this state until
* the provider calls svc_xprt_received. We update XPT_BUSY
@@ -355,43 +339,69 @@ static void svc_xprt_do_enqueue(struct svc_xprt *xprt)
if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags)) {
/* Don't enqueue transport while already enqueued */
dprintk("svc: transport %p busy, not enqueued\n", xprt);
- return;
+ goto out;
}
cpu = get_cpu();
pool = svc_pool_for_cpu(xprt->xpt_server, cpu);
- spin_lock_bh(&pool->sp_lock);
- pool->sp_stats.packets++;
-
- if (!list_empty(&pool->sp_threads)) {
- rqstp = list_entry(pool->sp_threads.next,
- struct svc_rqst,
- rq_list);
- dprintk("svc: transport %p served by daemon %p\n",
- xprt, rqstp);
- svc_thread_dequeue(pool, rqstp);
- if (rqstp->rq_xprt)
- printk(KERN_ERR
- "svc_xprt_enqueue: server %p, rq_xprt=%p!\n",
- rqstp, rqstp->rq_xprt);
- /* Note the order of the following 3 lines:
- * We want to assign xprt to rqstp->rq_xprt only _after_
- * we've woken up the process, so that we don't race with
- * the lockless check in svc_get_next_xprt().
+ atomic_long_inc(&pool->sp_stats.packets);
+
+redo_search:
+ /* find a thread for this xprt */
+ rcu_read_lock();
+ list_for_each_entry_rcu(rqstp, &pool->sp_all_threads, rq_all) {
+ /* Do a lockless check first */
+ if (test_bit(RQ_BUSY, &rqstp->rq_flags))
+ continue;
+
+ /*
+ * Once the xprt has been queued, it can only be dequeued by
+ * the task that intends to service it. All we can do at that
+ * point is to try to wake this thread back up so that it can
+ * do so.
*/
- svc_xprt_get(xprt);
+ if (!queued) {
+ spin_lock_bh(&rqstp->rq_lock);
+ if (test_and_set_bit(RQ_BUSY, &rqstp->rq_flags)) {
+ /* already busy, move on... */
+ spin_unlock_bh(&rqstp->rq_lock);
+ continue;
+ }
+
+ /* this one will do */
+ rqstp->rq_xprt = xprt;
+ svc_xprt_get(xprt);
+ spin_unlock_bh(&rqstp->rq_lock);
+ }
+ rcu_read_unlock();
+
+ atomic_long_inc(&pool->sp_stats.threads_woken);
wake_up_process(rqstp->rq_task);
- rqstp->rq_xprt = xprt;
- pool->sp_stats.threads_woken++;
- } else {
+ put_cpu();
+ goto out;
+ }
+ rcu_read_unlock();
+
+ /*
+ * We didn't find an idle thread to use, so we need to queue the xprt.
+ * Do so and then search again. If we find one, we can't hook this one
+ * up to it directly but we can wake the thread up in the hopes that it
+ * will pick it up once it searches for a xprt to service.
+ */
+ if (!queued) {
+ queued = true;
dprintk("svc: transport %p put into queue\n", xprt);
+ spin_lock_bh(&pool->sp_lock);
list_add_tail(&xprt->xpt_ready, &pool->sp_sockets);
pool->sp_stats.sockets_queued++;
+ spin_unlock_bh(&pool->sp_lock);
+ goto redo_search;
}
-
- spin_unlock_bh(&pool->sp_lock);
+ rqstp = NULL;
put_cpu();
+out:
+ trace_svc_xprt_do_enqueue(xprt, rqstp);
}
/*
@@ -408,22 +418,28 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
EXPORT_SYMBOL_GPL(svc_xprt_enqueue);
/*
- * Dequeue the first transport. Must be called with the pool->sp_lock held.
+ * Dequeue the first transport, if there is one.
*/
static struct svc_xprt *svc_xprt_dequeue(struct svc_pool *pool)
{
- struct svc_xprt *xprt;
+ struct svc_xprt *xprt = NULL;
if (list_empty(&pool->sp_sockets))
- return NULL;
-
- xprt = list_entry(pool->sp_sockets.next,
- struct svc_xprt, xpt_ready);
- list_del_init(&xprt->xpt_ready);
+ goto out;
- dprintk("svc: transport %p dequeued, inuse=%d\n",
- xprt, atomic_read(&xprt->xpt_ref.refcount));
+ spin_lock_bh(&pool->sp_lock);
+ if (likely(!list_empty(&pool->sp_sockets))) {
+ xprt = list_first_entry(&pool->sp_sockets,
+ struct svc_xprt, xpt_ready);
+ list_del_init(&xprt->xpt_ready);
+ svc_xprt_get(xprt);
+ dprintk("svc: transport %p dequeued, inuse=%d\n",
+ xprt, atomic_read(&xprt->xpt_ref.refcount));
+ }
+ spin_unlock_bh(&pool->sp_lock);
+out:
+ trace_svc_xprt_dequeue(xprt);
return xprt;
}
@@ -484,34 +500,36 @@ static void svc_xprt_release(struct svc_rqst *rqstp)
}
/*
- * External function to wake up a server waiting for data
- * This really only makes sense for services like lockd
- * which have exactly one thread anyway.
+ * Some svc_serv's will have occasional work to do, even when a xprt is not
+ * waiting to be serviced. This function is there to "kick" a task in one of
+ * those services so that it can wake up and do that work. Note that we only
+ * bother with pool 0 as we don't need to wake up more than one thread for
+ * this purpose.
*/
void svc_wake_up(struct svc_serv *serv)
{
struct svc_rqst *rqstp;
- unsigned int i;
struct svc_pool *pool;
- for (i = 0; i < serv->sv_nrpools; i++) {
- pool = &serv->sv_pools[i];
+ pool = &serv->sv_pools[0];
- spin_lock_bh(&pool->sp_lock);
- if (!list_empty(&pool->sp_threads)) {
- rqstp = list_entry(pool->sp_threads.next,
- struct svc_rqst,
- rq_list);
- dprintk("svc: daemon %p woken up.\n", rqstp);
- /*
- svc_thread_dequeue(pool, rqstp);
- rqstp->rq_xprt = NULL;
- */
- wake_up_process(rqstp->rq_task);
- } else
- pool->sp_task_pending = 1;
- spin_unlock_bh(&pool->sp_lock);
+ rcu_read_lock();
+ list_for_each_entry_rcu(rqstp, &pool->sp_all_threads, rq_all) {
+ /* skip any that aren't queued */
+ if (test_bit(RQ_BUSY, &rqstp->rq_flags))
+ continue;
+ rcu_read_unlock();
+ dprintk("svc: daemon %p woken up.\n", rqstp);
+ wake_up_process(rqstp->rq_task);
+ trace_svc_wake_up(rqstp->rq_task->pid);
+ return;
}
+ rcu_read_unlock();
+
+ /* No free entries available */
+ set_bit(SP_TASK_PENDING, &pool->sp_flags);
+ smp_wmb();
+ trace_svc_wake_up(0);
}
EXPORT_SYMBOL_GPL(svc_wake_up);
@@ -622,75 +640,86 @@ static int svc_alloc_arg(struct svc_rqst *rqstp)
return 0;
}
+static bool
+rqst_should_sleep(struct svc_rqst *rqstp)
+{
+ struct svc_pool *pool = rqstp->rq_pool;
+
+ /* did someone call svc_wake_up? */
+ if (test_and_clear_bit(SP_TASK_PENDING, &pool->sp_flags))
+ return false;
+
+ /* was a socket queued? */
+ if (!list_empty(&pool->sp_sockets))
+ return false;
+
+ /* are we shutting down? */
+ if (signalled() || kthread_should_stop())
+ return false;
+
+ /* are we freezing? */
+ if (freezing(current))
+ return false;
+
+ return true;
+}
+
static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout)
{
struct svc_xprt *xprt;
struct svc_pool *pool = rqstp->rq_pool;
long time_left = 0;
+ /* rq_xprt should be clear on entry */
+ WARN_ON_ONCE(rqstp->rq_xprt);
+
/* Normally we will wait up to 5 seconds for any required
* cache information to be provided.
*/
rqstp->rq_chandle.thread_wait = 5*HZ;
- spin_lock_bh(&pool->sp_lock);
xprt = svc_xprt_dequeue(pool);
if (xprt) {
rqstp->rq_xprt = xprt;
- svc_xprt_get(xprt);
/* As there is a shortage of threads and this request
* had to be queued, don't allow the thread to wait so
* long for cache updates.
*/
rqstp->rq_chandle.thread_wait = 1*HZ;
- pool->sp_task_pending = 0;
- } else {
- if (pool->sp_task_pending) {
- pool->sp_task_pending = 0;
- xprt = ERR_PTR(-EAGAIN);
- goto out;
- }
- /*
- * We have to be able to interrupt this wait
- * to bring down the daemons ...
- */
- set_current_state(TASK_INTERRUPTIBLE);
+ clear_bit(SP_TASK_PENDING, &pool->sp_flags);
+ return xprt;
+ }
- /* No data pending. Go to sleep */
- svc_thread_enqueue(pool, rqstp);
- spin_unlock_bh(&pool->sp_lock);
+ /*
+ * We have to be able to interrupt this wait
+ * to bring down the daemons ...
+ */
+ set_current_state(TASK_INTERRUPTIBLE);
+ clear_bit(RQ_BUSY, &rqstp->rq_flags);
+ smp_mb();
- if (!(signalled() || kthread_should_stop())) {
- time_left = schedule_timeout(timeout);
- __set_current_state(TASK_RUNNING);
+ if (likely(rqst_should_sleep(rqstp)))
+ time_left = schedule_timeout(timeout);
+ else
+ __set_current_state(TASK_RUNNING);
- try_to_freeze();
+ try_to_freeze();
- xprt = rqstp->rq_xprt;
- if (xprt != NULL)
- return xprt;
- } else
- __set_current_state(TASK_RUNNING);
+ spin_lock_bh(&rqstp->rq_lock);
+ set_bit(RQ_BUSY, &rqstp->rq_flags);
+ spin_unlock_bh(&rqstp->rq_lock);
- spin_lock_bh(&pool->sp_lock);
- if (!time_left)
- pool->sp_stats.threads_timedout++;
+ xprt = rqstp->rq_xprt;
+ if (xprt != NULL)
+ return xprt;
- xprt = rqstp->rq_xprt;
- if (!xprt) {
- svc_thread_dequeue(pool, rqstp);
- spin_unlock_bh(&pool->sp_lock);
- dprintk("svc: server %p, no data yet\n", rqstp);
- if (signalled() || kthread_should_stop())
- return ERR_PTR(-EINTR);
- else
- return ERR_PTR(-EAGAIN);
- }
- }
-out:
- spin_unlock_bh(&pool->sp_lock);
- return xprt;
+ if (!time_left)
+ atomic_long_inc(&pool->sp_stats.threads_timedout);
+
+ if (signalled() || kthread_should_stop())
+ return ERR_PTR(-EINTR);
+ return ERR_PTR(-EAGAIN);
}
static void svc_add_new_temp_xprt(struct svc_serv *serv, struct svc_xprt *newxpt)
@@ -719,7 +748,7 @@ static int svc_handle_xprt(struct svc_rqst *rqstp, struct svc_xprt *xprt)
dprintk("svc_recv: found XPT_CLOSE\n");
svc_delete_xprt(xprt);
/* Leave XPT_BUSY set on the dead xprt: */
- return 0;
+ goto out;
}
if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) {
struct svc_xprt *newxpt;
@@ -750,6 +779,8 @@ static int svc_handle_xprt(struct svc_rqst *rqstp, struct svc_xprt *xprt)
}
/* clear XPT_BUSY: */
svc_xprt_received(xprt);
+out:
+ trace_svc_handle_xprt(xprt, len);
return len;
}
@@ -797,7 +828,10 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
clear_bit(XPT_OLD, &xprt->xpt_flags);
- rqstp->rq_secure = xprt->xpt_ops->xpo_secure_port(rqstp);
+ if (xprt->xpt_ops->xpo_secure_port(rqstp))
+ set_bit(RQ_SECURE, &rqstp->rq_flags);
+ else
+ clear_bit(RQ_SECURE, &rqstp->rq_flags);
rqstp->rq_chandle.defer = svc_defer;
rqstp->rq_xid = svc_getu32(&rqstp->rq_arg.head[0]);
@@ -895,7 +929,6 @@ static void svc_age_temp_xprts(unsigned long closure)
continue;
list_del_init(le);
set_bit(XPT_CLOSE, &xprt->xpt_flags);
- set_bit(XPT_DETACHED, &xprt->xpt_flags);
dprintk("queuing xprt %p for closing\n", xprt);
/* a thread will dequeue and close it soon */
@@ -935,8 +968,7 @@ static void svc_delete_xprt(struct svc_xprt *xprt)
xprt->xpt_ops->xpo_detach(xprt);
spin_lock_bh(&serv->sv_lock);
- if (!test_and_set_bit(XPT_DETACHED, &xprt->xpt_flags))
- list_del_init(&xprt->xpt_list);
+ list_del_init(&xprt->xpt_list);
WARN_ON_ONCE(!list_empty(&xprt->xpt_ready));
if (test_bit(XPT_TEMP, &xprt->xpt_flags))
serv->sv_tmpcnt--;
@@ -1080,7 +1112,7 @@ static struct cache_deferred_req *svc_defer(struct cache_req *req)
struct svc_rqst *rqstp = container_of(req, struct svc_rqst, rq_chandle);
struct svc_deferred_req *dr;
- if (rqstp->rq_arg.page_len || !rqstp->rq_usedeferral)
+ if (rqstp->rq_arg.page_len || !test_bit(RQ_USEDEFERRAL, &rqstp->rq_flags))
return NULL; /* if more than a page, give up FIXME */
if (rqstp->rq_deferred) {
dr = rqstp->rq_deferred;
@@ -1109,7 +1141,7 @@ static struct cache_deferred_req *svc_defer(struct cache_req *req)
}
svc_xprt_get(rqstp->rq_xprt);
dr->xprt = rqstp->rq_xprt;
- rqstp->rq_dropme = true;
+ set_bit(RQ_DROPME, &rqstp->rq_flags);
dr->handle.revisit = svc_revisit;
return &dr->handle;
@@ -1311,10 +1343,10 @@ static int svc_pool_stats_show(struct seq_file *m, void *p)
seq_printf(m, "%u %lu %lu %lu %lu\n",
pool->sp_id,
- pool->sp_stats.packets,
+ (unsigned long)atomic_long_read(&pool->sp_stats.packets),
pool->sp_stats.sockets_queued,
- pool->sp_stats.threads_woken,
- pool->sp_stats.threads_timedout);
+ (unsigned long)atomic_long_read(&pool->sp_stats.threads_woken),
+ (unsigned long)atomic_long_read(&pool->sp_stats.threads_timedout));
return 0;
}
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index f9c052d508f..cc331b6cf57 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -1145,7 +1145,10 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
rqstp->rq_xprt_ctxt = NULL;
rqstp->rq_prot = IPPROTO_TCP;
- rqstp->rq_local = !!test_bit(XPT_LOCAL, &svsk->sk_xprt.xpt_flags);
+ if (test_bit(XPT_LOCAL, &svsk->sk_xprt.xpt_flags))
+ set_bit(RQ_LOCAL, &rqstp->rq_flags);
+ else
+ clear_bit(RQ_LOCAL, &rqstp->rq_flags);
p = (__be32 *)rqstp->rq_arg.head[0].iov_base;
calldir = p[1];
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index 290af97bf6f..1cb61242e55 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -617,9 +617,10 @@ void xdr_truncate_encode(struct xdr_stream *xdr, size_t len)
fraglen = min_t(int, buf->len - len, tail->iov_len);
tail->iov_len -= fraglen;
buf->len -= fraglen;
- if (tail->iov_len && buf->len == len) {
+ if (tail->iov_len) {
xdr->p = tail->iov_base + tail->iov_len;
- /* xdr->end, xdr->iov should be set already */
+ WARN_ON_ONCE(!xdr->end);
+ WARN_ON_ONCE(!xdr->iov);
return;
}
WARN_ON_ONCE(fraglen);
@@ -631,11 +632,11 @@ void xdr_truncate_encode(struct xdr_stream *xdr, size_t len)
old = new + fraglen;
xdr->page_ptr -= (old >> PAGE_SHIFT) - (new >> PAGE_SHIFT);
- if (buf->page_len && buf->len == len) {
+ if (buf->page_len) {
xdr->p = page_address(*xdr->page_ptr);
xdr->end = (void *)xdr->p + PAGE_SIZE;
xdr->p = (void *)xdr->p + (new % PAGE_SIZE);
- /* xdr->iov should already be NULL */
+ WARN_ON_ONCE(xdr->iov);
return;
}
if (fraglen) {
diff --git a/net/wireless/chan.c b/net/wireless/chan.c
index 85506f1d078..7aaf7415dc4 100644
--- a/net/wireless/chan.c
+++ b/net/wireless/chan.c
@@ -603,7 +603,7 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
{
struct ieee80211_sta_ht_cap *ht_cap;
struct ieee80211_sta_vht_cap *vht_cap;
- u32 width, control_freq;
+ u32 width, control_freq, cap;
if (WARN_ON(!cfg80211_chandef_valid(chandef)))
return false;
@@ -643,7 +643,8 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
return false;
break;
case NL80211_CHAN_WIDTH_80P80:
- if (!(vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ))
+ cap = vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK;
+ if (cap != IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ)
return false;
case NL80211_CHAN_WIDTH_80:
if (!vht_cap->vht_supported)
@@ -654,7 +655,9 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
case NL80211_CHAN_WIDTH_160:
if (!vht_cap->vht_supported)
return false;
- if (!(vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ))
+ cap = vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK;
+ if (cap != IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ &&
+ cap != IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ)
return false;
prohibited_flags |= IEEE80211_CHAN_NO_160MHZ;
width = 160;
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index a17d6bc6b22..7ca4b513312 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -6002,7 +6002,7 @@ nl80211_parse_sched_scan(struct wiphy *wiphy, struct wireless_dev *wdev,
}
/* there was no other matchset, so the RSSI one is alone */
- if (i == 0)
+ if (i == 0 && n_match_sets)
request->match_sets[0].rssi_thold = default_match_rssi;
request->min_rssi_thold = INT_MAX;
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 47be6163381..7b8309840d4 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -1549,9 +1549,15 @@ static bool reg_wdev_chan_valid(struct wiphy *wiphy, struct wireless_dev *wdev)
ret = cfg80211_reg_can_beacon(wiphy,
&wdev->chandef, wdev->iftype);
break;
+ case NL80211_IFTYPE_ADHOC:
+ if (!wdev->ssid_len)
+ goto out;
+
+ ret = cfg80211_reg_can_beacon(wiphy,
+ &wdev->chandef, wdev->iftype);
+ break;
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_P2P_CLIENT:
- case NL80211_IFTYPE_ADHOC:
if (!wdev->current_bss ||
!wdev->current_bss->pub.channel)
goto out;
@@ -1907,7 +1913,7 @@ static enum reg_request_treatment
reg_process_hint_driver(struct wiphy *wiphy,
struct regulatory_request *driver_request)
{
- const struct ieee80211_regdomain *regd;
+ const struct ieee80211_regdomain *regd, *tmp;
enum reg_request_treatment treatment;
treatment = __reg_process_hint_driver(driver_request);
@@ -1927,7 +1933,10 @@ reg_process_hint_driver(struct wiphy *wiphy,
reg_free_request(driver_request);
return REG_REQ_IGNORE;
}
+
+ tmp = get_wiphy_regdom(wiphy);
rcu_assign_pointer(wiphy->regd, regd);
+ rcu_free_regdom(tmp);
}
@@ -1986,11 +1995,8 @@ __reg_process_hint_country_ie(struct wiphy *wiphy,
return REG_REQ_IGNORE;
return REG_REQ_ALREADY_SET;
}
- /*
- * Two consecutive Country IE hints on the same wiphy.
- * This should be picked up early by the driver/stack
- */
- if (WARN_ON(regdom_changes(country_ie_request->alpha2)))
+
+ if (regdom_changes(country_ie_request->alpha2))
return REG_REQ_OK;
return REG_REQ_ALREADY_SET;
}
diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include
index 5374b1bdf02..edd2794569d 100644
--- a/scripts/Kbuild.include
+++ b/scripts/Kbuild.include
@@ -185,6 +185,18 @@ modbuiltin := -f $(srctree)/scripts/Makefile.modbuiltin obj
# $(Q)$(MAKE) $(dtbinst)=dir
dtbinst := -f $(if $(KBUILD_SRC),$(srctree)/)scripts/Makefile.dtbinst obj
+###
+# Shorthand for $(Q)$(MAKE) -f scripts/Makefile.clean obj=
+# Usage:
+# $(Q)$(MAKE) $(clean)=dir
+clean := -f $(srctree)/scripts/Makefile.clean obj
+
+###
+# Shorthand for $(Q)$(MAKE) -f scripts/Makefile.headersinst obj=
+# Usage:
+# $(Q)$(MAKE) $(hdr-inst)=dir
+hdr-inst := -f $(srctree)/scripts/Makefile.headersinst obj
+
# Prefix -I with $(srctree) if it is not an absolute path.
# skip if -I has no parameter
addtree = $(if $(patsubst -I%,%,$(1)), \
diff --git a/scripts/Makefile.clean b/scripts/Makefile.clean
index b1c668dc681..1bca180db8a 100644
--- a/scripts/Makefile.clean
+++ b/scripts/Makefile.clean
@@ -7,10 +7,7 @@ src := $(obj)
PHONY := __clean
__clean:
-# Shorthand for $(Q)$(MAKE) scripts/Makefile.clean obj=dir
-# Usage:
-# $(Q)$(MAKE) $(clean)=dir
-clean := -f $(srctree)/scripts/Makefile.clean obj
+include scripts/Kbuild.include
# The filename Kbuild has precedence over Makefile
kbuild-dir := $(if $(filter /%,$(src)),$(src),$(srctree)/$(src))
@@ -91,11 +88,6 @@ PHONY += $(subdir-ymn)
$(subdir-ymn):
$(Q)$(MAKE) $(clean)=$@
-# If quiet is set, only print short version of command
-
-cmd = @$(if $($(quiet)cmd_$(1)),echo ' $($(quiet)cmd_$(1))' &&) $(cmd_$(1))
-
-
# Declare the contents of the .PHONY variable as phony. We keep that
# information in a variable se we can use it in if_changed and friends.
diff --git a/scripts/Makefile.headersinst b/scripts/Makefile.headersinst
index 8ccf83056a7..1106d6ca3a3 100644
--- a/scripts/Makefile.headersinst
+++ b/scripts/Makefile.headersinst
@@ -122,7 +122,6 @@ $(check-file): scripts/headers_check.pl $(output-files) FORCE
endif
# Recursion
-hdr-inst := -rR -f $(srctree)/scripts/Makefile.headersinst obj
.PHONY: $(subdirs)
$(subdirs):
$(Q)$(MAKE) $(hdr-inst)=$(obj)/$@ dst=$(_dst)/$@
diff --git a/scripts/coccinelle/misc/bugon.cocci b/scripts/coccinelle/misc/bugon.cocci
index 556456ca761..3b7eec24fb5 100644
--- a/scripts/coccinelle/misc/bugon.cocci
+++ b/scripts/coccinelle/misc/bugon.cocci
@@ -8,7 +8,7 @@
// Confidence: High
// Copyright: (C) 2014 Himangi Saraogi. GPLv2.
// Comments:
-// Options: --no-includes, --include-headers
+// Options: --no-includes --include-headers
virtual patch
virtual context
diff --git a/scripts/headers.sh b/scripts/headers.sh
index 95ece06599a..d4dc4de5cea 100755
--- a/scripts/headers.sh
+++ b/scripts/headers.sh
@@ -19,8 +19,6 @@ for arch in ${archs}; do
case ${arch} in
um) # no userspace export
;;
- cris) # headers export are known broken
- ;;
*)
if [ -d ${srctree}/arch/${arch} ]; then
do_command $1 ${arch}
diff --git a/scripts/kconfig/mconf.c b/scripts/kconfig/mconf.c
index 14cea7463a6..4dd37552abc 100644
--- a/scripts/kconfig/mconf.c
+++ b/scripts/kconfig/mconf.c
@@ -330,10 +330,10 @@ static void set_subtitle(void)
list_for_each_entry(sp, &trail, entries) {
if (sp->text) {
if (pos) {
- pos->next = xcalloc(sizeof(*pos), 1);
+ pos->next = xcalloc(1, sizeof(*pos));
pos = pos->next;
} else {
- subtitles = pos = xcalloc(sizeof(*pos), 1);
+ subtitles = pos = xcalloc(1, sizeof(*pos));
}
pos->text = sp->text;
}
diff --git a/scripts/kconfig/menu.c b/scripts/kconfig/menu.c
index a26cc5d2a9b..72c9dba84c5 100644
--- a/scripts/kconfig/menu.c
+++ b/scripts/kconfig/menu.c
@@ -548,7 +548,7 @@ static void get_prompt_str(struct gstr *r, struct property *prop,
{
int i, j;
struct menu *submenu[8], *menu, *location = NULL;
- struct jump_key *jump;
+ struct jump_key *jump = NULL;
str_printf(r, _("Prompt: %s\n"), _(prop->text));
menu = prop->menu->parent;
@@ -586,7 +586,7 @@ static void get_prompt_str(struct gstr *r, struct property *prop,
str_printf(r, _(" Location:\n"));
for (j = 4; --i >= 0; j += 2) {
menu = submenu[i];
- if (head && location && menu == location)
+ if (jump && menu == location)
jump->offset = strlen(r->s);
str_printf(r, "%*c-> %s", j, ' ',
_(menu_get_prompt(menu)));
diff --git a/scripts/package/mkspec b/scripts/package/mkspec
index 13957602f7c..d9ab94b17de 100755
--- a/scripts/package/mkspec
+++ b/scripts/package/mkspec
@@ -117,6 +117,7 @@ echo 'mv vmlinux.bz2 $RPM_BUILD_ROOT'"/boot/vmlinux-$KERNELRELEASE.bz2"
echo 'mv vmlinux.orig vmlinux'
echo "%endif"
+if ! $PREBUILT; then
echo 'rm -f $RPM_BUILD_ROOT'"/lib/modules/$KERNELRELEASE/{build,source}"
echo "mkdir -p "'$RPM_BUILD_ROOT'"/usr/src/kernels/$KERNELRELEASE"
echo "EXCLUDES=\"$RCS_TAR_IGNORE --exclude .tmp_versions --exclude=*vmlinux* --exclude=*.o --exclude=*.ko --exclude=*.cmd --exclude=Documentation --exclude=firmware --exclude .config.old --exclude .missing-syscalls.d\""
@@ -124,6 +125,7 @@ echo "tar "'$EXCLUDES'" -cf- . | (cd "'$RPM_BUILD_ROOT'"/usr/src/kernels/$KERNEL
echo 'cd $RPM_BUILD_ROOT'"/lib/modules/$KERNELRELEASE"
echo "ln -sf /usr/src/kernels/$KERNELRELEASE build"
echo "ln -sf /usr/src/kernels/$KERNELRELEASE source"
+fi
echo ""
echo "%clean"
@@ -151,9 +153,11 @@ echo "%files headers"
echo '%defattr (-, root, root)'
echo "/usr/include"
echo ""
+if ! $PREBUILT; then
echo "%files devel"
echo '%defattr (-, root, root)'
echo "/usr/src/kernels/$KERNELRELEASE"
echo "/lib/modules/$KERNELRELEASE/build"
echo "/lib/modules/$KERNELRELEASE/source"
echo ""
+fi
diff --git a/security/integrity/ima/Kconfig b/security/integrity/ima/Kconfig
index b80a93ec1cc..57515bc915c 100644
--- a/security/integrity/ima/Kconfig
+++ b/security/integrity/ima/Kconfig
@@ -10,7 +10,7 @@ config IMA
select CRYPTO_HASH_INFO
select TCG_TPM if HAS_IOMEM && !UML
select TCG_TIS if TCG_TPM && X86
- select TCG_IBMVTPM if TCG_TPM && PPC64
+ select TCG_IBMVTPM if TCG_TPM && PPC_PSERIES
help
The Trusted Computing Group(TCG) runtime Integrity
Measurement Architecture(IMA) maintains a list of hash
diff --git a/security/keys/encrypted-keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c
index db9675db102..7bed4ad7cd7 100644
--- a/security/keys/encrypted-keys/encrypted.c
+++ b/security/keys/encrypted-keys/encrypted.c
@@ -1017,10 +1017,13 @@ static int __init init_encrypted(void)
ret = encrypted_shash_alloc();
if (ret < 0)
return ret;
+ ret = aes_get_sizes();
+ if (ret < 0)
+ goto out;
ret = register_key_type(&key_type_encrypted);
if (ret < 0)
goto out;
- return aes_get_sizes();
+ return 0;
out:
encrypted_shash_release();
return ret;
diff --git a/security/keys/key.c b/security/keys/key.c
index e17ba6aefdc..aee2ec5a18f 100644
--- a/security/keys/key.c
+++ b/security/keys/key.c
@@ -276,12 +276,10 @@ struct key *key_alloc(struct key_type *type, const char *desc,
if (!key)
goto no_memory_2;
- if (desc) {
- key->index_key.desc_len = desclen;
- key->index_key.description = kmemdup(desc, desclen + 1, GFP_KERNEL);
- if (!key->description)
- goto no_memory_3;
- }
+ key->index_key.desc_len = desclen;
+ key->index_key.description = kmemdup(desc, desclen + 1, GFP_KERNEL);
+ if (!key->description)
+ goto no_memory_3;
atomic_set(&key->usage, 1);
init_rwsem(&key->sem);
diff --git a/sound/firewire/oxfw/oxfw-pcm.c b/sound/firewire/oxfw/oxfw-pcm.c
index 9bc556b15a9..67ade0775a5 100644
--- a/sound/firewire/oxfw/oxfw-pcm.c
+++ b/sound/firewire/oxfw/oxfw-pcm.c
@@ -19,7 +19,7 @@ static int hw_rule_rate(struct snd_pcm_hw_params *params,
.min = UINT_MAX, .max = 0, .integer = 1
};
struct snd_oxfw_stream_formation formation;
- unsigned int i, err;
+ int i, err;
for (i = 0; i < SND_OXFW_STREAM_FORMAT_ENTRIES; i++) {
if (formats[i] == NULL)
@@ -47,7 +47,7 @@ static int hw_rule_channels(struct snd_pcm_hw_params *params,
const struct snd_interval *r =
hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_RATE);
struct snd_oxfw_stream_formation formation;
- unsigned int i, j, err;
+ int i, j, err;
unsigned int count, list[SND_OXFW_STREAM_FORMAT_ENTRIES] = {0};
count = 0;
@@ -80,7 +80,7 @@ static int hw_rule_channels(struct snd_pcm_hw_params *params,
static void limit_channels_and_rates(struct snd_pcm_hardware *hw, u8 **formats)
{
struct snd_oxfw_stream_formation formation;
- unsigned int i, err;
+ int i, err;
hw->channels_min = UINT_MAX;
hw->channels_max = 0;
diff --git a/sound/firewire/oxfw/oxfw-proc.c b/sound/firewire/oxfw/oxfw-proc.c
index 604808e5526..8ba4f9f262b 100644
--- a/sound/firewire/oxfw/oxfw-proc.c
+++ b/sound/firewire/oxfw/oxfw-proc.c
@@ -15,7 +15,7 @@ static void proc_read_formation(struct snd_info_entry *entry,
struct snd_oxfw_stream_formation formation, curr;
u8 *format;
char flag;
- unsigned int i, err;
+ int i, err;
/* Show input. */
err = snd_oxfw_stream_get_current_formation(oxfw,
diff --git a/sound/firewire/oxfw/oxfw-stream.c b/sound/firewire/oxfw/oxfw-stream.c
index b77cf80f167..bda845afb47 100644
--- a/sound/firewire/oxfw/oxfw-stream.c
+++ b/sound/firewire/oxfw/oxfw-stream.c
@@ -61,7 +61,8 @@ static int set_stream_format(struct snd_oxfw *oxfw, struct amdtp_stream *s,
u8 **formats;
struct snd_oxfw_stream_formation formation;
enum avc_general_plug_dir dir;
- unsigned int i, err, len;
+ unsigned int len;
+ int i, err;
if (s == &oxfw->tx_stream) {
formats = oxfw->tx_stream_formats;
diff --git a/sound/firewire/oxfw/oxfw.c b/sound/firewire/oxfw/oxfw.c
index cf1d0b55e82..60e5cad0531 100644
--- a/sound/firewire/oxfw/oxfw.c
+++ b/sound/firewire/oxfw/oxfw.c
@@ -43,7 +43,7 @@ static bool detect_loud_models(struct fw_unit *unit)
err = fw_csr_string(unit->directory, CSR_MODEL,
model, sizeof(model));
if (err < 0)
- return err;
+ return false;
for (i = 0; i < ARRAY_SIZE(models); i++) {
if (strcmp(models[i], model) == 0)
diff --git a/sound/pci/asihpi/hpi_internal.h b/sound/pci/asihpi/hpi_internal.h
index 48380ce2c81..aeea679b228 100644
--- a/sound/pci/asihpi/hpi_internal.h
+++ b/sound/pci/asihpi/hpi_internal.h
@@ -1367,9 +1367,9 @@ struct hpi_control_cache_single {
struct hpi_control_cache_pad {
struct hpi_control_cache_info i;
u32 field_valid_flags;
- u8 c_channel[8];
- u8 c_artist[40];
- u8 c_title[40];
+ u8 c_channel[40];
+ u8 c_artist[100];
+ u8 c_title[100];
u8 c_comment[200];
u32 pTY;
u32 pI;
diff --git a/sound/pci/asihpi/hpi_version.h b/sound/pci/asihpi/hpi_version.h
index e9146e53bd5..6623ab11003 100644
--- a/sound/pci/asihpi/hpi_version.h
+++ b/sound/pci/asihpi/hpi_version.h
@@ -11,13 +11,13 @@ Production releases have even minor version.
/* Use single digits for versions less that 10 to avoid octal. */
/* *** HPI_VER is the only edit required to update version *** */
/** HPI version */
-#define HPI_VER HPI_VERSION_CONSTRUCTOR(4, 10, 1)
+#define HPI_VER HPI_VERSION_CONSTRUCTOR(4, 14, 3)
/** HPI version string in dotted decimal format */
-#define HPI_VER_STRING "4.10.01"
+#define HPI_VER_STRING "4.14.03"
/** Library version as documented in hpi-api-versions.txt */
-#define HPI_LIB_VER HPI_VERSION_CONSTRUCTOR(10, 2, 0)
+#define HPI_LIB_VER HPI_VERSION_CONSTRUCTOR(10, 4, 0)
/** Construct hpi version number from major, minor, release numbers */
#define HPI_VERSION_CONSTRUCTOR(maj, min, r) ((maj << 16) + (min << 8) + r)
diff --git a/sound/pci/asihpi/hpidspcd.c b/sound/pci/asihpi/hpidspcd.c
index ac916377001..3603c24f34d 100644
--- a/sound/pci/asihpi/hpidspcd.c
+++ b/sound/pci/asihpi/hpidspcd.c
@@ -1,8 +1,9 @@
-/***********************************************************************/
-/**
+/***********************************************************************
AudioScience HPI driver
- Copyright (C) 1997-2011 AudioScience Inc. <support@audioscience.com>
+ Functions for reading DSP code using hotplug firmware loader
+
+ Copyright (C) 1997-2014 AudioScience Inc. <support@audioscience.com>
This program is free software; you can redistribute it and/or modify
it under the terms of version 2 of the GNU General Public License as
@@ -17,11 +18,7 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-\file
-Functions for reading DSP code using
-hotplug firmware loader from individual dsp code files
-*/
-/***********************************************************************/
+***********************************************************************/
#define SOURCEFILE_NAME "hpidspcd.c"
#include "hpidspcd.h"
#include "hpidebug.h"
@@ -68,17 +65,18 @@ short hpi_dsp_code_open(u32 adapter, void *os_data, struct dsp_code *dsp_code,
goto error2;
}
- if ((header.version >> 9) != (HPI_VER >> 9)) {
- /* Consider even and subsequent odd minor versions to be compatible */
- dev_err(&dev->dev, "Incompatible firmware version DSP image %X != Driver %X\n",
+ if (HPI_VER_MAJOR(header.version) != HPI_VER_MAJOR(HPI_VER)) {
+ /* Major version change probably means Host-DSP protocol change */
+ dev_err(&dev->dev,
+ "Incompatible firmware version DSP image %X != Driver %X\n",
header.version, HPI_VER);
goto error2;
}
if (header.version != HPI_VER) {
- dev_info(&dev->dev,
- "Firmware: release version mismatch DSP image %X != Driver %X\n",
- header.version, HPI_VER);
+ dev_warn(&dev->dev,
+ "Firmware version mismatch: DSP image %X != Driver %X\n",
+ header.version, HPI_VER);
}
HPI_DEBUG_LOG(DEBUG, "dsp code %s opened\n", fw_name);
diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
index 8337645aa7a..8276a743e22 100644
--- a/sound/pci/hda/hda_controller.c
+++ b/sound/pci/hda/hda_controller.c
@@ -1676,7 +1676,7 @@ irqreturn_t azx_interrupt(int irq, void *dev_id)
u8 sd_status;
int i;
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
if (chip->driver_caps & AZX_DCAPS_PM_RUNTIME)
if (!pm_runtime_active(chip->card->dev))
return IRQ_NONE;
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index 63b69f750d8..b680b4ec633 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -3218,12 +3218,13 @@ static int create_input_ctls(struct hda_codec *codec)
}
/* add stereo mix when explicitly enabled via hint */
- if (mixer && spec->add_stereo_mix_input &&
- snd_hda_get_bool_hint(codec, "add_stereo_mix_input") > 0) {
+ if (mixer && spec->add_stereo_mix_input == HDA_HINT_STEREO_MIX_ENABLE) {
err = parse_capture_source(codec, mixer, CFG_IDX_MIX, num_adcs,
"Stereo Mix", 0);
if (err < 0)
return err;
+ else
+ spec->suppress_auto_mic = 1;
}
return 0;
@@ -4542,9 +4543,8 @@ int snd_hda_gen_parse_auto_config(struct hda_codec *codec,
/* add stereo mix if available and not enabled yet */
if (!spec->auto_mic && spec->mixer_nid &&
- spec->add_stereo_mix_input &&
- spec->input_mux.num_items > 1 &&
- snd_hda_get_bool_hint(codec, "add_stereo_mix_input") < 0) {
+ spec->add_stereo_mix_input == HDA_HINT_STEREO_MIX_AUTO &&
+ spec->input_mux.num_items > 1) {
err = parse_capture_source(codec, spec->mixer_nid,
CFG_IDX_MIX, spec->num_all_adcs,
"Stereo Mix", 0);
diff --git a/sound/pci/hda/hda_generic.h b/sound/pci/hda/hda_generic.h
index 61dd5153f51..3d852660443 100644
--- a/sound/pci/hda/hda_generic.h
+++ b/sound/pci/hda/hda_generic.h
@@ -222,7 +222,7 @@ struct hda_gen_spec {
unsigned int vmaster_mute_enum:1; /* add vmaster mute mode enum */
unsigned int indep_hp:1; /* independent HP supported */
unsigned int prefer_hp_amp:1; /* enable HP amp for speaker if any */
- unsigned int add_stereo_mix_input:1; /* add aamix as a capture src */
+ unsigned int add_stereo_mix_input:2; /* add aamix as a capture src */
unsigned int add_jack_modes:1; /* add i/o jack mode enum ctls */
unsigned int power_down_unused:1; /* power down unused widgets */
unsigned int dac_min_mute:1; /* minimal = mute for DACs */
@@ -291,6 +291,13 @@ struct hda_gen_spec {
struct hda_jack_callback *cb);
};
+/* values for add_stereo_mix_input flag */
+enum {
+ HDA_HINT_STEREO_MIX_DISABLE, /* No stereo mix input */
+ HDA_HINT_STEREO_MIX_ENABLE, /* Add stereo mix input */
+ HDA_HINT_STEREO_MIX_AUTO, /* Add only if auto-mic is disabled */
+};
+
int snd_hda_gen_spec_init(struct hda_gen_spec *spec);
int snd_hda_gen_init(struct hda_codec *codec);
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 5ac0d39d59b..2bf0b568e3d 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -872,7 +872,7 @@ static int azx_resume(struct device *dev)
}
#endif /* CONFIG_PM_SLEEP || SUPPORT_VGA_SWITCHEROO */
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
static int azx_runtime_suspend(struct device *dev)
{
struct snd_card *card = dev_get_drvdata(dev);
@@ -970,9 +970,6 @@ static int azx_runtime_idle(struct device *dev)
return 0;
}
-#endif /* CONFIG_PM_RUNTIME */
-
-#ifdef CONFIG_PM
static const struct dev_pm_ops azx_pm = {
SET_SYSTEM_SLEEP_PM_OPS(azx_suspend, azx_resume)
SET_RUNTIME_PM_OPS(azx_runtime_suspend, azx_runtime_resume, azx_runtime_idle)
diff --git a/sound/pci/hda/hda_sysfs.c b/sound/pci/hda/hda_sysfs.c
index bef721592c3..ccc962a1699 100644
--- a/sound/pci/hda/hda_sysfs.c
+++ b/sound/pci/hda/hda_sysfs.c
@@ -468,7 +468,7 @@ int snd_hda_get_bool_hint(struct hda_codec *codec, const char *key)
EXPORT_SYMBOL_GPL(snd_hda_get_bool_hint);
/**
- * snd_hda_get_bool_hint - Get a boolean hint value
+ * snd_hda_get_int_hint - Get an integer hint value
* @codec: the HDA codec
* @key: the hint key string
* @valp: pointer to store a value
diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
index c81b715d6c9..a9d78e27513 100644
--- a/sound/pci/hda/patch_analog.c
+++ b/sound/pci/hda/patch_analog.c
@@ -195,7 +195,8 @@ static int ad198x_parse_auto_config(struct hda_codec *codec, bool indep_hp)
codec->no_sticky_stream = 1;
spec->gen.indep_hp = indep_hp;
- spec->gen.add_stereo_mix_input = 1;
+ if (!spec->gen.add_stereo_mix_input)
+ spec->gen.add_stereo_mix_input = HDA_HINT_STEREO_MIX_AUTO;
err = snd_hda_parse_pin_defcfg(codec, cfg, NULL, 0);
if (err < 0)
@@ -256,6 +257,18 @@ static void ad1986a_fixup_eapd(struct hda_codec *codec,
}
}
+/* enable stereo-mix input for avoiding regression on KDE (bko#88251) */
+static void ad1986a_fixup_eapd_mix_in(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+{
+ struct ad198x_spec *spec = codec->spec;
+
+ if (action == HDA_FIXUP_ACT_PRE_PROBE) {
+ ad1986a_fixup_eapd(codec, fix, action);
+ spec->gen.add_stereo_mix_input = HDA_HINT_STEREO_MIX_ENABLE;
+ }
+}
+
enum {
AD1986A_FIXUP_INV_JACK_DETECT,
AD1986A_FIXUP_ULTRA,
@@ -264,6 +277,8 @@ enum {
AD1986A_FIXUP_LAPTOP,
AD1986A_FIXUP_LAPTOP_IMIC,
AD1986A_FIXUP_EAPD,
+ AD1986A_FIXUP_EAPD_MIX_IN,
+ AD1986A_FIXUP_EASYNOTE,
};
static const struct hda_fixup ad1986a_fixups[] = {
@@ -328,6 +343,30 @@ static const struct hda_fixup ad1986a_fixups[] = {
.type = HDA_FIXUP_FUNC,
.v.func = ad1986a_fixup_eapd,
},
+ [AD1986A_FIXUP_EAPD_MIX_IN] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = ad1986a_fixup_eapd_mix_in,
+ },
+ [AD1986A_FIXUP_EASYNOTE] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x1a, 0x0421402f }, /* headphone */
+ { 0x1b, 0x90170110 }, /* speaker */
+ { 0x1c, 0x411111f0 }, /* N/A */
+ { 0x1d, 0x90a70130 }, /* int mic */
+ { 0x1e, 0x411111f0 }, /* N/A */
+ { 0x1f, 0x04a19040 }, /* mic */
+ { 0x20, 0x411111f0 }, /* N/A */
+ { 0x21, 0x411111f0 }, /* N/A */
+ { 0x22, 0x411111f0 }, /* N/A */
+ { 0x23, 0x411111f0 }, /* N/A */
+ { 0x24, 0x411111f0 }, /* N/A */
+ { 0x25, 0x411111f0 }, /* N/A */
+ {}
+ },
+ .chained = true,
+ .chain_id = AD1986A_FIXUP_EAPD_MIX_IN,
+ },
};
static const struct snd_pci_quirk ad1986a_fixup_tbl[] = {
@@ -341,6 +380,7 @@ static const struct snd_pci_quirk ad1986a_fixup_tbl[] = {
SND_PCI_QUIRK(0x144d, 0xc01e, "FSC V2060", AD1986A_FIXUP_LAPTOP),
SND_PCI_QUIRK_MASK(0x144d, 0xff00, 0xc000, "Samsung", AD1986A_FIXUP_SAMSUNG),
SND_PCI_QUIRK(0x144d, 0xc027, "Samsung Q1", AD1986A_FIXUP_ULTRA),
+ SND_PCI_QUIRK(0x1631, 0xc022, "PackardBell EasyNote MX65", AD1986A_FIXUP_EASYNOTE),
SND_PCI_QUIRK(0x17aa, 0x2066, "Lenovo N100", AD1986A_FIXUP_INV_JACK_DETECT),
SND_PCI_QUIRK(0x17aa, 0x1011, "Lenovo M55", AD1986A_FIXUP_3STACK),
SND_PCI_QUIRK(0x17aa, 0x1017, "Lenovo A60", AD1986A_FIXUP_3STACK),
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index e9ebc7bd752..fd3ed18670e 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -855,14 +855,14 @@ static int patch_conexant_auto(struct hda_codec *codec)
case 0x14f15045:
codec->single_adc_amp = 1;
spec->gen.mixer_nid = 0x17;
- spec->gen.add_stereo_mix_input = 1;
+ spec->gen.add_stereo_mix_input = HDA_HINT_STEREO_MIX_AUTO;
snd_hda_pick_fixup(codec, cxt5045_fixup_models,
cxt5045_fixups, cxt_fixups);
break;
case 0x14f15047:
codec->pin_amp_workaround = 1;
spec->gen.mixer_nid = 0x19;
- spec->gen.add_stereo_mix_input = 1;
+ spec->gen.add_stereo_mix_input = HDA_HINT_STEREO_MIX_AUTO;
snd_hda_pick_fixup(codec, cxt5047_fixup_models,
cxt5047_fixups, cxt_fixups);
break;
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 9dc9cf8c90e..5f13d2d1807 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -47,7 +47,9 @@ MODULE_PARM_DESC(static_hdmi_pcm, "Don't restrict PCM parameters per ELD info");
#define is_haswell(codec) ((codec)->vendor_id == 0x80862807)
#define is_broadwell(codec) ((codec)->vendor_id == 0x80862808)
-#define is_haswell_plus(codec) (is_haswell(codec) || is_broadwell(codec))
+#define is_skylake(codec) ((codec)->vendor_id == 0x80862809)
+#define is_haswell_plus(codec) (is_haswell(codec) || is_broadwell(codec) \
+ || is_skylake(codec))
#define is_valleyview(codec) ((codec)->vendor_id == 0x80862882)
#define is_cherryview(codec) ((codec)->vendor_id == 0x80862883)
@@ -3365,6 +3367,7 @@ static const struct hda_codec_preset snd_hda_preset_hdmi[] = {
{ .id = 0x80862806, .name = "PantherPoint HDMI", .patch = patch_generic_hdmi },
{ .id = 0x80862807, .name = "Haswell HDMI", .patch = patch_generic_hdmi },
{ .id = 0x80862808, .name = "Broadwell HDMI", .patch = patch_generic_hdmi },
+{ .id = 0x80862809, .name = "Skylake HDMI", .patch = patch_generic_hdmi },
{ .id = 0x80862880, .name = "CedarTrail HDMI", .patch = patch_generic_hdmi },
{ .id = 0x80862882, .name = "Valleyview2 HDMI", .patch = patch_generic_hdmi },
{ .id = 0x80862883, .name = "Braswell HDMI", .patch = patch_generic_hdmi },
@@ -3425,6 +3428,7 @@ MODULE_ALIAS("snd-hda-codec-id:80862805");
MODULE_ALIAS("snd-hda-codec-id:80862806");
MODULE_ALIAS("snd-hda-codec-id:80862807");
MODULE_ALIAS("snd-hda-codec-id:80862808");
+MODULE_ALIAS("snd-hda-codec-id:80862809");
MODULE_ALIAS("snd-hda-codec-id:80862880");
MODULE_ALIAS("snd-hda-codec-id:80862882");
MODULE_ALIAS("snd-hda-codec-id:80862883");
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index a722067c491..65f1f4e18ea 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -321,10 +321,12 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
break;
case 0x10ec0233:
case 0x10ec0255:
+ case 0x10ec0256:
case 0x10ec0282:
case 0x10ec0283:
case 0x10ec0286:
case 0x10ec0288:
+ case 0x10ec0298:
alc_update_coef_idx(codec, 0x10, 1<<9, 0);
break;
case 0x10ec0285:
@@ -2659,7 +2661,9 @@ enum {
ALC269_TYPE_ALC284,
ALC269_TYPE_ALC285,
ALC269_TYPE_ALC286,
+ ALC269_TYPE_ALC298,
ALC269_TYPE_ALC255,
+ ALC269_TYPE_ALC256,
};
/*
@@ -2686,7 +2690,9 @@ static int alc269_parse_auto_config(struct hda_codec *codec)
case ALC269_TYPE_ALC282:
case ALC269_TYPE_ALC283:
case ALC269_TYPE_ALC286:
+ case ALC269_TYPE_ALC298:
case ALC269_TYPE_ALC255:
+ case ALC269_TYPE_ALC256:
ssids = alc269_ssids;
break;
default:
@@ -4829,6 +4835,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1028, 0x0638, "Dell Inspiron 5439", ALC290_FIXUP_MONO_SPEAKERS_HSJACK),
SND_PCI_QUIRK(0x1028, 0x064a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x064b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x06c7, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x06da, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
@@ -5417,9 +5424,15 @@ static int patch_alc269(struct hda_codec *codec)
spec->codec_variant = ALC269_TYPE_ALC286;
spec->shutup = alc286_shutup;
break;
+ case 0x10ec0298:
+ spec->codec_variant = ALC269_TYPE_ALC298;
+ break;
case 0x10ec0255:
spec->codec_variant = ALC269_TYPE_ALC255;
break;
+ case 0x10ec0256:
+ spec->codec_variant = ALC269_TYPE_ALC256;
+ break;
}
if (snd_hda_codec_read(codec, 0x51, 0, AC_VERB_PARAMETERS, 0) == 0x10ec5505) {
@@ -6341,6 +6354,7 @@ static const struct hda_codec_preset snd_hda_preset_realtek[] = {
{ .id = 0x10ec0233, .name = "ALC233", .patch = patch_alc269 },
{ .id = 0x10ec0235, .name = "ALC233", .patch = patch_alc269 },
{ .id = 0x10ec0255, .name = "ALC255", .patch = patch_alc269 },
+ { .id = 0x10ec0256, .name = "ALC256", .patch = patch_alc269 },
{ .id = 0x10ec0260, .name = "ALC260", .patch = patch_alc260 },
{ .id = 0x10ec0262, .name = "ALC262", .patch = patch_alc262 },
{ .id = 0x10ec0267, .name = "ALC267", .patch = patch_alc268 },
@@ -6360,6 +6374,7 @@ static const struct hda_codec_preset snd_hda_preset_realtek[] = {
{ .id = 0x10ec0290, .name = "ALC290", .patch = patch_alc269 },
{ .id = 0x10ec0292, .name = "ALC292", .patch = patch_alc269 },
{ .id = 0x10ec0293, .name = "ALC293", .patch = patch_alc269 },
+ { .id = 0x10ec0298, .name = "ALC298", .patch = patch_alc269 },
{ .id = 0x10ec0861, .rev = 0x100340, .name = "ALC660",
.patch = patch_alc861 },
{ .id = 0x10ec0660, .name = "ALC660-VD", .patch = patch_alc861vd },
diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c
index 6c206b6c8d6..3de6d3d779c 100644
--- a/sound/pci/hda/patch_via.c
+++ b/sound/pci/hda/patch_via.c
@@ -137,7 +137,7 @@ static struct via_spec *via_new_spec(struct hda_codec *codec)
spec->gen.indep_hp = 1;
spec->gen.keep_eapd_on = 1;
spec->gen.pcm_playback_hook = via_playback_pcm_hook;
- spec->gen.add_stereo_mix_input = 1;
+ spec->gen.add_stereo_mix_input = HDA_HINT_STEREO_MIX_AUTO;
return spec;
}
diff --git a/sound/soc/atmel/atmel_ssc_dai.c b/sound/soc/atmel/atmel_ssc_dai.c
index b1cc2a4a7fc..99ff35e2a25 100644
--- a/sound/soc/atmel/atmel_ssc_dai.c
+++ b/sound/soc/atmel/atmel_ssc_dai.c
@@ -267,7 +267,7 @@ static void atmel_ssc_shutdown(struct snd_pcm_substream *substream,
if (!ssc_p->dir_mask) {
if (ssc_p->initialized) {
/* Shutdown the SSC clock. */
- pr_debug("atmel_ssc_dau: Stopping clock\n");
+ pr_debug("atmel_ssc_dai: Stopping clock\n");
clk_disable(ssc_p->ssc->clk);
free_irq(ssc_p->ssc->irq, ssc_p);
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index 883c5778b30..8349f982a58 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -520,6 +520,8 @@ config SND_SOC_RT5670
config SND_SOC_RT5677
tristate
+ select REGMAP_I2C
+ select REGMAP_IRQ
config SND_SOC_RT5677_SPI
tristate
diff --git a/sound/soc/codecs/cs35l32.c b/sound/soc/codecs/cs35l32.c
index c125925da92..ec55c590afd 100644
--- a/sound/soc/codecs/cs35l32.c
+++ b/sound/soc/codecs/cs35l32.c
@@ -550,7 +550,7 @@ static int cs35l32_i2c_remove(struct i2c_client *i2c_client)
return 0;
}
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
static int cs35l32_runtime_suspend(struct device *dev)
{
struct cs35l32_private *cs35l32 = dev_get_drvdata(dev);
diff --git a/sound/soc/codecs/cs42xx8.c b/sound/soc/codecs/cs42xx8.c
index 02b1520ae0b..670ebfe1290 100644
--- a/sound/soc/codecs/cs42xx8.c
+++ b/sound/soc/codecs/cs42xx8.c
@@ -537,7 +537,7 @@ err_enable:
}
EXPORT_SYMBOL_GPL(cs42xx8_probe);
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
static int cs42xx8_runtime_resume(struct device *dev)
{
struct cs42xx8_priv *cs42xx8 = dev_get_drvdata(dev);
diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c
index 151f718241e..b112b1c2c39 100644
--- a/sound/soc/codecs/max98090.c
+++ b/sound/soc/codecs/max98090.c
@@ -2611,7 +2611,7 @@ static int max98090_i2c_remove(struct i2c_client *client)
return 0;
}
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
static int max98090_runtime_resume(struct device *dev)
{
struct max98090_priv *max98090 = dev_get_drvdata(dev);
diff --git a/sound/soc/codecs/pcm512x-i2c.c b/sound/soc/codecs/pcm512x-i2c.c
index 4d62230bd37..d0547fa275f 100644
--- a/sound/soc/codecs/pcm512x-i2c.c
+++ b/sound/soc/codecs/pcm512x-i2c.c
@@ -24,8 +24,13 @@ static int pcm512x_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct regmap *regmap;
+ struct regmap_config config = pcm512x_regmap;
- regmap = devm_regmap_init_i2c(i2c, &pcm512x_regmap);
+ /* msb needs to be set to enable auto-increment of addresses */
+ config.read_flag_mask = 0x80;
+ config.write_flag_mask = 0x80;
+
+ regmap = devm_regmap_init_i2c(i2c, &config);
if (IS_ERR(regmap))
return PTR_ERR(regmap);
diff --git a/sound/soc/codecs/pcm512x.c b/sound/soc/codecs/pcm512x.c
index 0c8aefab404..e5f2fb884bf 100644
--- a/sound/soc/codecs/pcm512x.c
+++ b/sound/soc/codecs/pcm512x.c
@@ -517,7 +517,7 @@ void pcm512x_remove(struct device *dev)
}
EXPORT_SYMBOL_GPL(pcm512x_remove);
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
static int pcm512x_suspend(struct device *dev)
{
struct pcm512x_priv *pcm512x = dev_get_drvdata(dev);
diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
index a7789a8726e..27141e2df87 100644
--- a/sound/soc/codecs/rt5645.c
+++ b/sound/soc/codecs/rt5645.c
@@ -2209,6 +2209,10 @@ static int rt5645_jack_detect(struct snd_soc_codec *codec)
int gpio_state, jack_type = 0;
unsigned int val;
+ if (!gpio_is_valid(rt5645->pdata.hp_det_gpio)) {
+ dev_err(codec->dev, "invalid gpio\n");
+ return -EINVAL;
+ }
gpio_state = gpio_get_value(rt5645->pdata.hp_det_gpio);
dev_dbg(codec->dev, "gpio = %d(%d)\n", rt5645->pdata.hp_det_gpio,
diff --git a/sound/soc/codecs/tas2552.c b/sound/soc/codecs/tas2552.c
index b505212019e..ae23acdd270 100644
--- a/sound/soc/codecs/tas2552.c
+++ b/sound/soc/codecs/tas2552.c
@@ -115,7 +115,7 @@ static const struct snd_soc_dapm_route tas2552_audio_map[] = {
{"ClassD", NULL, "PLL"},
};
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
static void tas2552_sw_shutdown(struct tas2552_data *tas_data, int sw_shutdown)
{
u8 cfg1_reg;
@@ -264,7 +264,7 @@ static int tas2552_mute(struct snd_soc_dai *dai, int mute)
return 0;
}
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
static int tas2552_runtime_suspend(struct device *dev)
{
struct tas2552_data *tas2552 = dev_get_drvdata(dev);
diff --git a/sound/soc/codecs/wm2200.c b/sound/soc/codecs/wm2200.c
index cdea9d9c163..15599845a66 100644
--- a/sound/soc/codecs/wm2200.c
+++ b/sound/soc/codecs/wm2200.c
@@ -2440,7 +2440,7 @@ static int wm2200_i2c_remove(struct i2c_client *i2c)
return 0;
}
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
static int wm2200_runtime_suspend(struct device *dev)
{
struct wm2200_priv *wm2200 = dev_get_drvdata(dev);
diff --git a/sound/soc/codecs/wm5100.c b/sound/soc/codecs/wm5100.c
index a01ad629ed6..b80970dc2d2 100644
--- a/sound/soc/codecs/wm5100.c
+++ b/sound/soc/codecs/wm5100.c
@@ -2664,7 +2664,7 @@ static int wm5100_i2c_remove(struct i2c_client *i2c)
return 0;
}
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
static int wm5100_runtime_suspend(struct device *dev)
{
struct wm5100_priv *wm5100 = dev_get_drvdata(dev);
diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
index 1534d88a66e..d32d554f5b3 100644
--- a/sound/soc/codecs/wm8962.c
+++ b/sound/soc/codecs/wm8962.c
@@ -3785,7 +3785,7 @@ static int wm8962_i2c_remove(struct i2c_client *client)
return 0;
}
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
static int wm8962_runtime_resume(struct device *dev)
{
struct wm8962_priv *wm8962 = dev_get_drvdata(dev);
diff --git a/sound/soc/fsl/fsl_asrc.c b/sound/soc/fsl/fsl_asrc.c
index 9deabdd2b1a..026a8011754 100644
--- a/sound/soc/fsl/fsl_asrc.c
+++ b/sound/soc/fsl/fsl_asrc.c
@@ -928,7 +928,7 @@ static int fsl_asrc_probe(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
static int fsl_asrc_runtime_resume(struct device *dev)
{
struct fsl_asrc *asrc_priv = dev_get_drvdata(dev);
@@ -954,7 +954,7 @@ static int fsl_asrc_runtime_suspend(struct device *dev)
return 0;
}
-#endif /* CONFIG_PM_RUNTIME */
+#endif /* CONFIG_PM */
#ifdef CONFIG_PM_SLEEP
static int fsl_asrc_suspend(struct device *dev)
diff --git a/sound/soc/intel/sst-haswell-pcm.c b/sound/soc/intel/sst-haswell-pcm.c
index b8a782c0d4c..61952520070 100644
--- a/sound/soc/intel/sst-haswell-pcm.c
+++ b/sound/soc/intel/sst-haswell-pcm.c
@@ -998,7 +998,7 @@ static int hsw_pcm_dev_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
static int hsw_pcm_runtime_idle(struct device *dev)
{
@@ -1057,7 +1057,7 @@ static int hsw_pcm_runtime_resume(struct device *dev)
#define hsw_pcm_runtime_resume NULL
#endif
-#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_PM_RUNTIME)
+#ifdef CONFIG_PM
static void hsw_pcm_complete(struct device *dev)
{
diff --git a/sound/soc/intel/sst/sst_acpi.c b/sound/soc/intel/sst/sst_acpi.c
index 31124aa4434..3abc29e8a92 100644
--- a/sound/soc/intel/sst/sst_acpi.c
+++ b/sound/soc/intel/sst/sst_acpi.c
@@ -43,7 +43,7 @@
#include "sst.h"
struct sst_machines {
- char codec_id[32];
+ char *codec_id;
char board[32];
char machine[32];
void (*machine_quirk)(void);
@@ -277,16 +277,16 @@ int sst_acpi_probe(struct platform_device *pdev)
dev_dbg(dev, "ACPI device id: %x\n", dev_id);
plat_dev = platform_device_register_data(dev, mach->pdata->platform, -1, NULL, 0);
- if (plat_dev == NULL) {
+ if (IS_ERR(plat_dev)) {
dev_err(dev, "Failed to create machine device: %s\n", mach->pdata->platform);
- return -ENODEV;
+ return PTR_ERR(plat_dev);
}
/* Create platform device for sst machine driver */
mdev = platform_device_register_data(dev, mach->machine, -1, NULL, 0);
- if (mdev == NULL) {
+ if (IS_ERR(mdev)) {
dev_err(dev, "Failed to create machine device: %s\n", mach->machine);
- return -ENODEV;
+ return PTR_ERR(mdev);
}
ret = sst_alloc_drv_context(&ctx, dev, dev_id);
diff --git a/sound/soc/samsung/i2s.c b/sound/soc/samsung/i2s.c
index 95340ba415c..b5a80c528d8 100644
--- a/sound/soc/samsung/i2s.c
+++ b/sound/soc/samsung/i2s.c
@@ -1135,7 +1135,7 @@ static inline const struct samsung_i2s_dai_data *samsung_i2s_get_driver_data(
platform_get_device_id(pdev)->driver_data;
}
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
static int i2s_runtime_suspend(struct device *dev)
{
struct i2s_dai *i2s = dev_get_drvdata(dev);
@@ -1153,7 +1153,7 @@ static int i2s_runtime_resume(struct device *dev)
return 0;
}
-#endif /* CONFIG_PM_RUNTIME */
+#endif /* CONFIG_PM */
static int samsung_i2s_probe(struct platform_device *pdev)
{
@@ -1261,6 +1261,8 @@ static int samsung_i2s_probe(struct platform_device *pdev)
ret = -ENOMEM;
goto err;
}
+
+ sec_dai->variant_regs = pri_dai->variant_regs;
sec_dai->dma_playback.dma_addr = regs_base + I2STXDS;
sec_dai->dma_playback.ch_name = "tx-sec";
diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c
index 1994d41348f..b703cb3cda1 100644
--- a/sound/usb/mixer_maps.c
+++ b/sound/usb/mixer_maps.c
@@ -333,8 +333,11 @@ static struct usbmix_name_map gamecom780_map[] = {
{}
};
-static const struct usbmix_name_map kef_x300a_map[] = {
- { 10, NULL }, /* firmware locks up (?) when we try to access this FU */
+/* some (all?) SCMS USB3318 devices are affected by a firmware lock up
+ * when anything attempts to access FU 10 (control)
+ */
+static const struct usbmix_name_map scms_usb3318_map[] = {
+ { 10, NULL },
{ 0 }
};
@@ -434,8 +437,14 @@ static struct usbmix_ctl_map usbmix_ctl_maps[] = {
.map = ebox44_map,
},
{
+ /* KEF X300A */
.id = USB_ID(0x27ac, 0x1000),
- .map = kef_x300a_map,
+ .map = scms_usb3318_map,
+ },
+ {
+ /* Arcam rPAC */
+ .id = USB_ID(0x25c4, 0x0003),
+ .map = scms_usb3318_map,
},
{ 0 } /* terminator */
};
diff --git a/sound/usb/mixer_scarlett.c b/sound/usb/mixer_scarlett.c
index 9109652b88b..7438e7c4a84 100644
--- a/sound/usb/mixer_scarlett.c
+++ b/sound/usb/mixer_scarlett.c
@@ -655,7 +655,7 @@ static struct scarlett_device_info s6i6_info = {
.names = NULL
},
- .num_controls = 0,
+ .num_controls = 9,
.controls = {
{ .num = 0, .type = SCARLETT_OUTPUTS, .name = "Monitor" },
{ .num = 1, .type = SCARLETT_OUTPUTS, .name = "Headphone" },
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 4dbfb3d18ee..a7398412310 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1245,8 +1245,9 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
/* XMOS based USB DACs */
switch (chip->usb_id) {
- /* iFi Audio micro/nano iDSD */
- case USB_ID(0x20b1, 0x3008):
+ case USB_ID(0x20b1, 0x3008): /* iFi Audio micro/nano iDSD */
+ case USB_ID(0x20b1, 0x2008): /* Matrix Audio X-Sabre */
+ case USB_ID(0x20b1, 0x300a): /* Matrix Audio Mini-i Pro */
if (fp->altsetting == 2)
return SNDRV_PCM_FMTBIT_DSD_U32_BE;
break;
diff --git a/tools/include/asm-generic/bitops.h b/tools/include/asm-generic/bitops.h
new file mode 100644
index 00000000000..6eedba1f773
--- /dev/null
+++ b/tools/include/asm-generic/bitops.h
@@ -0,0 +1,27 @@
+#ifndef __TOOLS_ASM_GENERIC_BITOPS_H
+#define __TOOLS_ASM_GENERIC_BITOPS_H
+
+/*
+ * tools/ copied this from include/asm-generic/bitops.h, bit by bit as it needed
+ * some functions.
+ *
+ * For the benefit of those who are trying to port Linux to another
+ * architecture, here are some C-language equivalents. You should
+ * recode these in the native assembly language, if at all possible.
+ *
+ * C language equivalents written by Theodore Ts'o, 9/26/92
+ */
+
+#include <asm-generic/bitops/__ffs.h>
+#include <asm-generic/bitops/fls.h>
+#include <asm-generic/bitops/__fls.h>
+#include <asm-generic/bitops/fls64.h>
+#include <asm-generic/bitops/find.h>
+
+#ifndef _TOOLS_LINUX_BITOPS_H_
+#error only <linux/bitops.h> can be included directly
+#endif
+
+#include <asm-generic/bitops/atomic.h>
+
+#endif /* __TOOLS_ASM_GENERIC_BITOPS_H */
diff --git a/tools/include/asm-generic/bitops/__ffs.h b/tools/include/asm-generic/bitops/__ffs.h
new file mode 100644
index 00000000000..c94175015a8
--- /dev/null
+++ b/tools/include/asm-generic/bitops/__ffs.h
@@ -0,0 +1,43 @@
+#ifndef _TOOLS_LINUX_ASM_GENERIC_BITOPS___FFS_H_
+#define _TOOLS_LINUX_ASM_GENERIC_BITOPS___FFS_H_
+
+#include <asm/types.h>
+
+/**
+ * __ffs - find first bit in word.
+ * @word: The word to search
+ *
+ * Undefined if no bit exists, so code should check against 0 first.
+ */
+static __always_inline unsigned long __ffs(unsigned long word)
+{
+ int num = 0;
+
+#if __BITS_PER_LONG == 64
+ if ((word & 0xffffffff) == 0) {
+ num += 32;
+ word >>= 32;
+ }
+#endif
+ if ((word & 0xffff) == 0) {
+ num += 16;
+ word >>= 16;
+ }
+ if ((word & 0xff) == 0) {
+ num += 8;
+ word >>= 8;
+ }
+ if ((word & 0xf) == 0) {
+ num += 4;
+ word >>= 4;
+ }
+ if ((word & 0x3) == 0) {
+ num += 2;
+ word >>= 2;
+ }
+ if ((word & 0x1) == 0)
+ num += 1;
+ return num;
+}
+
+#endif /* _TOOLS_LINUX_ASM_GENERIC_BITOPS___FFS_H_ */
diff --git a/tools/include/asm-generic/bitops/__fls.h b/tools/include/asm-generic/bitops/__fls.h
new file mode 100644
index 00000000000..2218b9add4c
--- /dev/null
+++ b/tools/include/asm-generic/bitops/__fls.h
@@ -0,0 +1 @@
+#include <../../../../include/asm-generic/bitops/__fls.h>
diff --git a/tools/include/asm-generic/bitops/atomic.h b/tools/include/asm-generic/bitops/atomic.h
new file mode 100644
index 00000000000..4bccd7c3d5d
--- /dev/null
+++ b/tools/include/asm-generic/bitops/atomic.h
@@ -0,0 +1,22 @@
+#ifndef _TOOLS_LINUX_ASM_GENERIC_BITOPS_ATOMIC_H_
+#define _TOOLS_LINUX_ASM_GENERIC_BITOPS_ATOMIC_H_
+
+#include <asm/types.h>
+
+static inline void set_bit(int nr, unsigned long *addr)
+{
+ addr[nr / __BITS_PER_LONG] |= 1UL << (nr % __BITS_PER_LONG);
+}
+
+static inline void clear_bit(int nr, unsigned long *addr)
+{
+ addr[nr / __BITS_PER_LONG] &= ~(1UL << (nr % __BITS_PER_LONG));
+}
+
+static __always_inline int test_bit(unsigned int nr, const unsigned long *addr)
+{
+ return ((1UL << (nr % __BITS_PER_LONG)) &
+ (((unsigned long *)addr)[nr / __BITS_PER_LONG])) != 0;
+}
+
+#endif /* _TOOLS_LINUX_ASM_GENERIC_BITOPS_ATOMIC_H_ */
diff --git a/tools/include/asm-generic/bitops/find.h b/tools/include/asm-generic/bitops/find.h
new file mode 100644
index 00000000000..31f51547fcd
--- /dev/null
+++ b/tools/include/asm-generic/bitops/find.h
@@ -0,0 +1,33 @@
+#ifndef _TOOLS_LINUX_ASM_GENERIC_BITOPS_FIND_H_
+#define _TOOLS_LINUX_ASM_GENERIC_BITOPS_FIND_H_
+
+#ifndef find_next_bit
+/**
+ * find_next_bit - find the next set bit in a memory region
+ * @addr: The address to base the search on
+ * @offset: The bitnumber to start searching at
+ * @size: The bitmap size in bits
+ *
+ * Returns the bit number for the next set bit
+ * If no bits are set, returns @size.
+ */
+extern unsigned long find_next_bit(const unsigned long *addr, unsigned long
+ size, unsigned long offset);
+#endif
+
+#ifndef find_first_bit
+
+/**
+ * find_first_bit - find the first set bit in a memory region
+ * @addr: The address to start the search at
+ * @size: The maximum number of bits to search
+ *
+ * Returns the bit number of the first set bit.
+ * If no bits are set, returns @size.
+ */
+extern unsigned long find_first_bit(const unsigned long *addr,
+ unsigned long size);
+
+#endif /* find_first_bit */
+
+#endif /*_TOOLS_LINUX_ASM_GENERIC_BITOPS_FIND_H_ */
diff --git a/tools/include/asm-generic/bitops/fls.h b/tools/include/asm-generic/bitops/fls.h
new file mode 100644
index 00000000000..dbf711a28f7
--- /dev/null
+++ b/tools/include/asm-generic/bitops/fls.h
@@ -0,0 +1 @@
+#include <../../../../include/asm-generic/bitops/fls.h>
diff --git a/tools/include/asm-generic/bitops/fls64.h b/tools/include/asm-generic/bitops/fls64.h
new file mode 100644
index 00000000000..980b1f63c04
--- /dev/null
+++ b/tools/include/asm-generic/bitops/fls64.h
@@ -0,0 +1 @@
+#include <../../../../include/asm-generic/bitops/fls64.h>
diff --git a/tools/include/linux/bitops.h b/tools/include/linux/bitops.h
new file mode 100644
index 00000000000..26005a15e7e
--- /dev/null
+++ b/tools/include/linux/bitops.h
@@ -0,0 +1,53 @@
+#ifndef _TOOLS_LINUX_BITOPS_H_
+#define _TOOLS_LINUX_BITOPS_H_
+
+#include <linux/kernel.h>
+#include <linux/compiler.h>
+#include <asm/hweight.h>
+
+#ifndef __WORDSIZE
+#define __WORDSIZE (__SIZEOF_LONG__ * 8)
+#endif
+
+#define BITS_PER_LONG __WORDSIZE
+
+#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
+#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
+#define BITS_PER_BYTE 8
+#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
+#define BITS_TO_U64(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u64))
+#define BITS_TO_U32(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u32))
+#define BITS_TO_BYTES(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE)
+
+/*
+ * Include this here because some architectures need generic_ffs/fls in
+ * scope
+ *
+ * XXX: this needs to be asm/bitops.h, when we get to per arch optimizations
+ */
+#include <asm-generic/bitops.h>
+
+#define for_each_set_bit(bit, addr, size) \
+ for ((bit) = find_first_bit((addr), (size)); \
+ (bit) < (size); \
+ (bit) = find_next_bit((addr), (size), (bit) + 1))
+
+/* same as for_each_set_bit() but use bit as value to start with */
+#define for_each_set_bit_from(bit, addr, size) \
+ for ((bit) = find_next_bit((addr), (size), (bit)); \
+ (bit) < (size); \
+ (bit) = find_next_bit((addr), (size), (bit) + 1))
+
+static inline unsigned long hweight_long(unsigned long w)
+{
+ return sizeof(w) == 4 ? hweight32(w) : hweight64(w);
+}
+
+static inline unsigned fls_long(unsigned long l)
+{
+ if (sizeof(l) == 4)
+ return fls(l);
+ return fls64(l);
+}
+
+#endif
diff --git a/tools/include/linux/log2.h b/tools/include/linux/log2.h
new file mode 100644
index 00000000000..41446668ccc
--- /dev/null
+++ b/tools/include/linux/log2.h
@@ -0,0 +1,185 @@
+/* Integer base 2 logarithm calculation
+ *
+ * Copyright (C) 2006 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _TOOLS_LINUX_LOG2_H
+#define _TOOLS_LINUX_LOG2_H
+
+/*
+ * deal with unrepresentable constant logarithms
+ */
+extern __attribute__((const, noreturn))
+int ____ilog2_NaN(void);
+
+/*
+ * non-constant log of base 2 calculators
+ * - the arch may override these in asm/bitops.h if they can be implemented
+ * more efficiently than using fls() and fls64()
+ * - the arch is not required to handle n==0 if implementing the fallback
+ */
+static inline __attribute__((const))
+int __ilog2_u32(u32 n)
+{
+ return fls(n) - 1;
+}
+
+static inline __attribute__((const))
+int __ilog2_u64(u64 n)
+{
+ return fls64(n) - 1;
+}
+
+/*
+ * Determine whether some value is a power of two, where zero is
+ * *not* considered a power of two.
+ */
+
+static inline __attribute__((const))
+bool is_power_of_2(unsigned long n)
+{
+ return (n != 0 && ((n & (n - 1)) == 0));
+}
+
+/*
+ * round up to nearest power of two
+ */
+static inline __attribute__((const))
+unsigned long __roundup_pow_of_two(unsigned long n)
+{
+ return 1UL << fls_long(n - 1);
+}
+
+/*
+ * round down to nearest power of two
+ */
+static inline __attribute__((const))
+unsigned long __rounddown_pow_of_two(unsigned long n)
+{
+ return 1UL << (fls_long(n) - 1);
+}
+
+/**
+ * ilog2 - log of base 2 of 32-bit or a 64-bit unsigned value
+ * @n - parameter
+ *
+ * constant-capable log of base 2 calculation
+ * - this can be used to initialise global variables from constant data, hence
+ * the massive ternary operator construction
+ *
+ * selects the appropriately-sized optimised version depending on sizeof(n)
+ */
+#define ilog2(n) \
+( \
+ __builtin_constant_p(n) ? ( \
+ (n) < 1 ? ____ilog2_NaN() : \
+ (n) & (1ULL << 63) ? 63 : \
+ (n) & (1ULL << 62) ? 62 : \
+ (n) & (1ULL << 61) ? 61 : \
+ (n) & (1ULL << 60) ? 60 : \
+ (n) & (1ULL << 59) ? 59 : \
+ (n) & (1ULL << 58) ? 58 : \
+ (n) & (1ULL << 57) ? 57 : \
+ (n) & (1ULL << 56) ? 56 : \
+ (n) & (1ULL << 55) ? 55 : \
+ (n) & (1ULL << 54) ? 54 : \
+ (n) & (1ULL << 53) ? 53 : \
+ (n) & (1ULL << 52) ? 52 : \
+ (n) & (1ULL << 51) ? 51 : \
+ (n) & (1ULL << 50) ? 50 : \
+ (n) & (1ULL << 49) ? 49 : \
+ (n) & (1ULL << 48) ? 48 : \
+ (n) & (1ULL << 47) ? 47 : \
+ (n) & (1ULL << 46) ? 46 : \
+ (n) & (1ULL << 45) ? 45 : \
+ (n) & (1ULL << 44) ? 44 : \
+ (n) & (1ULL << 43) ? 43 : \
+ (n) & (1ULL << 42) ? 42 : \
+ (n) & (1ULL << 41) ? 41 : \
+ (n) & (1ULL << 40) ? 40 : \
+ (n) & (1ULL << 39) ? 39 : \
+ (n) & (1ULL << 38) ? 38 : \
+ (n) & (1ULL << 37) ? 37 : \
+ (n) & (1ULL << 36) ? 36 : \
+ (n) & (1ULL << 35) ? 35 : \
+ (n) & (1ULL << 34) ? 34 : \
+ (n) & (1ULL << 33) ? 33 : \
+ (n) & (1ULL << 32) ? 32 : \
+ (n) & (1ULL << 31) ? 31 : \
+ (n) & (1ULL << 30) ? 30 : \
+ (n) & (1ULL << 29) ? 29 : \
+ (n) & (1ULL << 28) ? 28 : \
+ (n) & (1ULL << 27) ? 27 : \
+ (n) & (1ULL << 26) ? 26 : \
+ (n) & (1ULL << 25) ? 25 : \
+ (n) & (1ULL << 24) ? 24 : \
+ (n) & (1ULL << 23) ? 23 : \
+ (n) & (1ULL << 22) ? 22 : \
+ (n) & (1ULL << 21) ? 21 : \
+ (n) & (1ULL << 20) ? 20 : \
+ (n) & (1ULL << 19) ? 19 : \
+ (n) & (1ULL << 18) ? 18 : \
+ (n) & (1ULL << 17) ? 17 : \
+ (n) & (1ULL << 16) ? 16 : \
+ (n) & (1ULL << 15) ? 15 : \
+ (n) & (1ULL << 14) ? 14 : \
+ (n) & (1ULL << 13) ? 13 : \
+ (n) & (1ULL << 12) ? 12 : \
+ (n) & (1ULL << 11) ? 11 : \
+ (n) & (1ULL << 10) ? 10 : \
+ (n) & (1ULL << 9) ? 9 : \
+ (n) & (1ULL << 8) ? 8 : \
+ (n) & (1ULL << 7) ? 7 : \
+ (n) & (1ULL << 6) ? 6 : \
+ (n) & (1ULL << 5) ? 5 : \
+ (n) & (1ULL << 4) ? 4 : \
+ (n) & (1ULL << 3) ? 3 : \
+ (n) & (1ULL << 2) ? 2 : \
+ (n) & (1ULL << 1) ? 1 : \
+ (n) & (1ULL << 0) ? 0 : \
+ ____ilog2_NaN() \
+ ) : \
+ (sizeof(n) <= 4) ? \
+ __ilog2_u32(n) : \
+ __ilog2_u64(n) \
+ )
+
+/**
+ * roundup_pow_of_two - round the given value up to nearest power of two
+ * @n - parameter
+ *
+ * round the given value up to the nearest power of two
+ * - the result is undefined when n == 0
+ * - this can be used to initialise global variables from constant data
+ */
+#define roundup_pow_of_two(n) \
+( \
+ __builtin_constant_p(n) ? ( \
+ (n == 1) ? 1 : \
+ (1UL << (ilog2((n) - 1) + 1)) \
+ ) : \
+ __roundup_pow_of_two(n) \
+ )
+
+/**
+ * rounddown_pow_of_two - round the given value down to nearest power of two
+ * @n - parameter
+ *
+ * round the given value down to the nearest power of two
+ * - the result is undefined when n == 0
+ * - this can be used to initialise global variables from constant data
+ */
+#define rounddown_pow_of_two(n) \
+( \
+ __builtin_constant_p(n) ? ( \
+ (1UL << ilog2(n))) : \
+ __rounddown_pow_of_two(n) \
+ )
+
+#endif /* _TOOLS_LINUX_LOG2_H */
diff --git a/tools/lib/api/fs/fs.c b/tools/lib/api/fs/fs.c
index c1b49c36a95..65d9be3f988 100644
--- a/tools/lib/api/fs/fs.c
+++ b/tools/lib/api/fs/fs.c
@@ -7,6 +7,10 @@
#include <stdlib.h>
#include <string.h>
#include <sys/vfs.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
#include "debugfs.h"
#include "fs.h"
@@ -163,3 +167,33 @@ const char *name##__mountpoint(void) \
FS__MOUNTPOINT(sysfs, FS__SYSFS);
FS__MOUNTPOINT(procfs, FS__PROCFS);
+
+int filename__read_int(const char *filename, int *value)
+{
+ char line[64];
+ int fd = open(filename, O_RDONLY), err = -1;
+
+ if (fd < 0)
+ return -1;
+
+ if (read(fd, line, sizeof(line)) > 0) {
+ *value = atoi(line);
+ err = 0;
+ }
+
+ close(fd);
+ return err;
+}
+
+int sysctl__read_int(const char *sysctl, int *value)
+{
+ char path[PATH_MAX];
+ const char *procfs = procfs__mountpoint();
+
+ if (!procfs)
+ return -1;
+
+ snprintf(path, sizeof(path), "%s/sys/%s", procfs, sysctl);
+
+ return filename__read_int(path, value);
+}
diff --git a/tools/lib/api/fs/fs.h b/tools/lib/api/fs/fs.h
index cb7049551f3..6caa2bbc6ce 100644
--- a/tools/lib/api/fs/fs.h
+++ b/tools/lib/api/fs/fs.h
@@ -11,4 +11,7 @@
const char *sysfs__mountpoint(void);
const char *procfs__mountpoint(void);
+
+int filename__read_int(const char *filename, int *value);
+int sysctl__read_int(const char *sysctl, int *value);
#endif /* __API_FS__ */
diff --git a/tools/lib/util/find_next_bit.c b/tools/lib/util/find_next_bit.c
new file mode 100644
index 00000000000..41b44f65a79
--- /dev/null
+++ b/tools/lib/util/find_next_bit.c
@@ -0,0 +1,89 @@
+/* find_next_bit.c: fallback find next bit implementation
+ *
+ * Copied from lib/find_next_bit.c to tools/lib/next_bit.c
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/bitops.h>
+#include <asm/types.h>
+#include <asm/byteorder.h>
+
+#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
+
+#ifndef find_next_bit
+/*
+ * Find the next set bit in a memory region.
+ */
+unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
+ unsigned long offset)
+{
+ const unsigned long *p = addr + BITOP_WORD(offset);
+ unsigned long result = offset & ~(BITS_PER_LONG-1);
+ unsigned long tmp;
+
+ if (offset >= size)
+ return size;
+ size -= result;
+ offset %= BITS_PER_LONG;
+ if (offset) {
+ tmp = *(p++);
+ tmp &= (~0UL << offset);
+ if (size < BITS_PER_LONG)
+ goto found_first;
+ if (tmp)
+ goto found_middle;
+ size -= BITS_PER_LONG;
+ result += BITS_PER_LONG;
+ }
+ while (size & ~(BITS_PER_LONG-1)) {
+ if ((tmp = *(p++)))
+ goto found_middle;
+ result += BITS_PER_LONG;
+ size -= BITS_PER_LONG;
+ }
+ if (!size)
+ return result;
+ tmp = *p;
+
+found_first:
+ tmp &= (~0UL >> (BITS_PER_LONG - size));
+ if (tmp == 0UL) /* Are any bits set? */
+ return result + size; /* Nope. */
+found_middle:
+ return result + __ffs(tmp);
+}
+#endif
+
+#ifndef find_first_bit
+/*
+ * Find the first set bit in a memory region.
+ */
+unsigned long find_first_bit(const unsigned long *addr, unsigned long size)
+{
+ const unsigned long *p = addr;
+ unsigned long result = 0;
+ unsigned long tmp;
+
+ while (size & ~(BITS_PER_LONG-1)) {
+ if ((tmp = *(p++)))
+ goto found;
+ result += BITS_PER_LONG;
+ size -= BITS_PER_LONG;
+ }
+ if (!size)
+ return result;
+
+ tmp = (*p) & (~0UL >> (BITS_PER_LONG - size));
+ if (tmp == 0UL) /* Are any bits set? */
+ return result + size; /* Nope. */
+found:
+ return result + __ffs(tmp);
+}
+#endif
diff --git a/tools/perf/Documentation/perf.txt b/tools/perf/Documentation/perf.txt
index d240bb2e5b2..1e8e400b449 100644
--- a/tools/perf/Documentation/perf.txt
+++ b/tools/perf/Documentation/perf.txt
@@ -18,6 +18,10 @@ OPTIONS
--debug verbose # sets verbose = 1
--debug verbose=2 # sets verbose = 2
+--buildid-dir::
+ Setup buildid cache directory. It has higher priority than
+ buildid.dir config file option.
+
DESCRIPTION
-----------
Performance counters for Linux are a new kernel-based subsystem
diff --git a/tools/perf/MANIFEST b/tools/perf/MANIFEST
index 344c4d3d0a4..83e2887f91a 100644
--- a/tools/perf/MANIFEST
+++ b/tools/perf/MANIFEST
@@ -4,17 +4,31 @@ tools/lib/traceevent
tools/lib/api
tools/lib/symbol/kallsyms.c
tools/lib/symbol/kallsyms.h
+tools/lib/util/find_next_bit.c
tools/include/asm/bug.h
+tools/include/asm-generic/bitops/atomic.h
+tools/include/asm-generic/bitops/__ffs.h
+tools/include/asm-generic/bitops/__fls.h
+tools/include/asm-generic/bitops/find.h
+tools/include/asm-generic/bitops/fls64.h
+tools/include/asm-generic/bitops/fls.h
+tools/include/asm-generic/bitops.h
+tools/include/linux/bitops.h
tools/include/linux/compiler.h
-tools/include/linux/hash.h
tools/include/linux/export.h
+tools/include/linux/hash.h
+tools/include/linux/log2.h
tools/include/linux/types.h
+include/asm-generic/bitops/fls64.h
+include/asm-generic/bitops/__fls.h
+include/asm-generic/bitops/fls.h
include/linux/const.h
include/linux/perf_event.h
include/linux/rbtree.h
include/linux/list.h
include/linux/hash.h
include/linux/stringify.h
+lib/find_next_bit.c
lib/rbtree.c
include/linux/swab.h
arch/*/include/asm/unistd*.h
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index 478efa9b236..67a03a825b3 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -231,8 +231,16 @@ LIB_H += ../../include/uapi/linux/const.h
LIB_H += ../include/linux/hash.h
LIB_H += ../../include/linux/stringify.h
LIB_H += util/include/linux/bitmap.h
-LIB_H += util/include/linux/bitops.h
+LIB_H += ../include/linux/bitops.h
+LIB_H += ../include/asm-generic/bitops/atomic.h
+LIB_H += ../include/asm-generic/bitops/find.h
+LIB_H += ../include/asm-generic/bitops/fls64.h
+LIB_H += ../include/asm-generic/bitops/fls.h
+LIB_H += ../include/asm-generic/bitops/__ffs.h
+LIB_H += ../include/asm-generic/bitops/__fls.h
+LIB_H += ../include/asm-generic/bitops.h
LIB_H += ../include/linux/compiler.h
+LIB_H += ../include/linux/log2.h
LIB_H += util/include/linux/const.h
LIB_H += util/include/linux/ctype.h
LIB_H += util/include/linux/kernel.h
@@ -335,6 +343,7 @@ LIB_OBJS += $(OUTPUT)util/event.o
LIB_OBJS += $(OUTPUT)util/evlist.o
LIB_OBJS += $(OUTPUT)util/evsel.o
LIB_OBJS += $(OUTPUT)util/exec_cmd.o
+LIB_OBJS += $(OUTPUT)util/find_next_bit.o
LIB_OBJS += $(OUTPUT)util/help.o
LIB_OBJS += $(OUTPUT)util/kallsyms.o
LIB_OBJS += $(OUTPUT)util/levenshtein.o
@@ -458,7 +467,6 @@ BUILTIN_OBJS += $(OUTPUT)bench/mem-memcpy-x86-64-asm.o
BUILTIN_OBJS += $(OUTPUT)bench/mem-memset-x86-64-asm.o
endif
BUILTIN_OBJS += $(OUTPUT)bench/mem-memcpy.o
-BUILTIN_OBJS += $(OUTPUT)bench/mem-memset.o
BUILTIN_OBJS += $(OUTPUT)bench/futex-hash.o
BUILTIN_OBJS += $(OUTPUT)bench/futex-wake.o
BUILTIN_OBJS += $(OUTPUT)bench/futex-requeue.o
@@ -735,6 +743,9 @@ $(OUTPUT)util/kallsyms.o: ../lib/symbol/kallsyms.c $(OUTPUT)PERF-CFLAGS
$(OUTPUT)util/rbtree.o: ../../lib/rbtree.c $(OUTPUT)PERF-CFLAGS
$(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -Wno-unused-parameter -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $<
+$(OUTPUT)util/find_next_bit.o: ../lib/util/find_next_bit.c $(OUTPUT)PERF-CFLAGS
+ $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -Wno-unused-parameter -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $<
+
$(OUTPUT)util/parse-events.o: util/parse-events.c $(OUTPUT)PERF-CFLAGS
$(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -Wno-redundant-decls $<
diff --git a/tools/perf/bench/mem-memcpy.c b/tools/perf/bench/mem-memcpy.c
index 2465141b554..6c14afe8c1b 100644
--- a/tools/perf/bench/mem-memcpy.c
+++ b/tools/perf/bench/mem-memcpy.c
@@ -13,6 +13,7 @@
#include "../util/cloexec.h"
#include "bench.h"
#include "mem-memcpy-arch.h"
+#include "mem-memset-arch.h"
#include <stdio.h>
#include <stdlib.h>
@@ -48,20 +49,24 @@ static const struct option options[] = {
};
typedef void *(*memcpy_t)(void *, const void *, size_t);
+typedef void *(*memset_t)(void *, int, size_t);
struct routine {
const char *name;
const char *desc;
- memcpy_t fn;
+ union {
+ memcpy_t memcpy;
+ memset_t memset;
+ } fn;
};
-struct routine routines[] = {
- { "default",
- "Default memcpy() provided by glibc",
- memcpy },
+struct routine memcpy_routines[] = {
+ { .name = "default",
+ .desc = "Default memcpy() provided by glibc",
+ .fn.memcpy = memcpy },
#ifdef HAVE_ARCH_X86_64_SUPPORT
-#define MEMCPY_FN(fn, name, desc) { name, desc, fn },
+#define MEMCPY_FN(_fn, _name, _desc) {.name = _name, .desc = _desc, .fn.memcpy = _fn},
#include "mem-memcpy-x86-64-asm-def.h"
#undef MEMCPY_FN
@@ -69,7 +74,7 @@ struct routine routines[] = {
{ NULL,
NULL,
- NULL }
+ {NULL} }
};
static const char * const bench_mem_memcpy_usage[] = {
@@ -110,63 +115,6 @@ static double timeval2double(struct timeval *ts)
(double)ts->tv_usec / (double)1000000;
}
-static void alloc_mem(void **dst, void **src, size_t length)
-{
- *dst = zalloc(length);
- if (!*dst)
- die("memory allocation failed - maybe length is too large?\n");
-
- *src = zalloc(length);
- if (!*src)
- die("memory allocation failed - maybe length is too large?\n");
- /* Make sure to always replace the zero pages even if MMAP_THRESH is crossed */
- memset(*src, 0, length);
-}
-
-static u64 do_memcpy_cycle(memcpy_t fn, size_t len, bool prefault)
-{
- u64 cycle_start = 0ULL, cycle_end = 0ULL;
- void *src = NULL, *dst = NULL;
- int i;
-
- alloc_mem(&src, &dst, len);
-
- if (prefault)
- fn(dst, src, len);
-
- cycle_start = get_cycle();
- for (i = 0; i < iterations; ++i)
- fn(dst, src, len);
- cycle_end = get_cycle();
-
- free(src);
- free(dst);
- return cycle_end - cycle_start;
-}
-
-static double do_memcpy_gettimeofday(memcpy_t fn, size_t len, bool prefault)
-{
- struct timeval tv_start, tv_end, tv_diff;
- void *src = NULL, *dst = NULL;
- int i;
-
- alloc_mem(&src, &dst, len);
-
- if (prefault)
- fn(dst, src, len);
-
- BUG_ON(gettimeofday(&tv_start, NULL));
- for (i = 0; i < iterations; ++i)
- fn(dst, src, len);
- BUG_ON(gettimeofday(&tv_end, NULL));
-
- timersub(&tv_end, &tv_start, &tv_diff);
-
- free(src);
- free(dst);
- return (double)((double)len / timeval2double(&tv_diff));
-}
-
#define pf (no_prefault ? 0 : 1)
#define print_bps(x) do { \
@@ -180,16 +128,25 @@ static double do_memcpy_gettimeofday(memcpy_t fn, size_t len, bool prefault)
printf(" %14lf GB/Sec", x / K / K / K); \
} while (0)
-int bench_mem_memcpy(int argc, const char **argv,
- const char *prefix __maybe_unused)
+struct bench_mem_info {
+ const struct routine *routines;
+ u64 (*do_cycle)(const struct routine *r, size_t len, bool prefault);
+ double (*do_gettimeofday)(const struct routine *r, size_t len, bool prefault);
+ const char *const *usage;
+};
+
+static int bench_mem_common(int argc, const char **argv,
+ const char *prefix __maybe_unused,
+ struct bench_mem_info *info)
{
int i;
size_t len;
+ double totallen;
double result_bps[2];
u64 result_cycle[2];
argc = parse_options(argc, argv, options,
- bench_mem_memcpy_usage, 0);
+ info->usage, 0);
if (no_prefault && only_prefault) {
fprintf(stderr, "Invalid options: -o and -n are mutually exclusive\n");
@@ -200,6 +157,7 @@ int bench_mem_memcpy(int argc, const char **argv,
init_cycle();
len = (size_t)perf_atoll((char *)length_str);
+ totallen = (double)len * iterations;
result_cycle[0] = result_cycle[1] = 0ULL;
result_bps[0] = result_bps[1] = 0.0;
@@ -213,16 +171,16 @@ int bench_mem_memcpy(int argc, const char **argv,
if (only_prefault && no_prefault)
only_prefault = no_prefault = false;
- for (i = 0; routines[i].name; i++) {
- if (!strcmp(routines[i].name, routine))
+ for (i = 0; info->routines[i].name; i++) {
+ if (!strcmp(info->routines[i].name, routine))
break;
}
- if (!routines[i].name) {
+ if (!info->routines[i].name) {
printf("Unknown routine:%s\n", routine);
printf("Available routines...\n");
- for (i = 0; routines[i].name; i++) {
+ for (i = 0; info->routines[i].name; i++) {
printf("\t%s ... %s\n",
- routines[i].name, routines[i].desc);
+ info->routines[i].name, info->routines[i].desc);
}
return 1;
}
@@ -234,25 +192,25 @@ int bench_mem_memcpy(int argc, const char **argv,
/* show both of results */
if (use_cycle) {
result_cycle[0] =
- do_memcpy_cycle(routines[i].fn, len, false);
+ info->do_cycle(&info->routines[i], len, false);
result_cycle[1] =
- do_memcpy_cycle(routines[i].fn, len, true);
+ info->do_cycle(&info->routines[i], len, true);
} else {
result_bps[0] =
- do_memcpy_gettimeofday(routines[i].fn,
+ info->do_gettimeofday(&info->routines[i],
len, false);
result_bps[1] =
- do_memcpy_gettimeofday(routines[i].fn,
+ info->do_gettimeofday(&info->routines[i],
len, true);
}
} else {
if (use_cycle) {
result_cycle[pf] =
- do_memcpy_cycle(routines[i].fn,
+ info->do_cycle(&info->routines[i],
len, only_prefault);
} else {
result_bps[pf] =
- do_memcpy_gettimeofday(routines[i].fn,
+ info->do_gettimeofday(&info->routines[i],
len, only_prefault);
}
}
@@ -263,10 +221,10 @@ int bench_mem_memcpy(int argc, const char **argv,
if (use_cycle) {
printf(" %14lf Cycle/Byte\n",
(double)result_cycle[0]
- / (double)len);
+ / totallen);
printf(" %14lf Cycle/Byte (with prefault)\n",
(double)result_cycle[1]
- / (double)len);
+ / totallen);
} else {
print_bps(result_bps[0]);
printf("\n");
@@ -277,7 +235,7 @@ int bench_mem_memcpy(int argc, const char **argv,
if (use_cycle) {
printf(" %14lf Cycle/Byte",
(double)result_cycle[pf]
- / (double)len);
+ / totallen);
} else
print_bps(result_bps[pf]);
@@ -288,8 +246,8 @@ int bench_mem_memcpy(int argc, const char **argv,
if (!only_prefault && !no_prefault) {
if (use_cycle) {
printf("%lf %lf\n",
- (double)result_cycle[0] / (double)len,
- (double)result_cycle[1] / (double)len);
+ (double)result_cycle[0] / totallen,
+ (double)result_cycle[1] / totallen);
} else {
printf("%lf %lf\n",
result_bps[0], result_bps[1]);
@@ -297,7 +255,7 @@ int bench_mem_memcpy(int argc, const char **argv,
} else {
if (use_cycle) {
printf("%lf\n", (double)result_cycle[pf]
- / (double)len);
+ / totallen);
} else
printf("%lf\n", result_bps[pf]);
}
@@ -310,3 +268,163 @@ int bench_mem_memcpy(int argc, const char **argv,
return 0;
}
+
+static void memcpy_alloc_mem(void **dst, void **src, size_t length)
+{
+ *dst = zalloc(length);
+ if (!*dst)
+ die("memory allocation failed - maybe length is too large?\n");
+
+ *src = zalloc(length);
+ if (!*src)
+ die("memory allocation failed - maybe length is too large?\n");
+ /* Make sure to always replace the zero pages even if MMAP_THRESH is crossed */
+ memset(*src, 0, length);
+}
+
+static u64 do_memcpy_cycle(const struct routine *r, size_t len, bool prefault)
+{
+ u64 cycle_start = 0ULL, cycle_end = 0ULL;
+ void *src = NULL, *dst = NULL;
+ memcpy_t fn = r->fn.memcpy;
+ int i;
+
+ memcpy_alloc_mem(&src, &dst, len);
+
+ if (prefault)
+ fn(dst, src, len);
+
+ cycle_start = get_cycle();
+ for (i = 0; i < iterations; ++i)
+ fn(dst, src, len);
+ cycle_end = get_cycle();
+
+ free(src);
+ free(dst);
+ return cycle_end - cycle_start;
+}
+
+static double do_memcpy_gettimeofday(const struct routine *r, size_t len,
+ bool prefault)
+{
+ struct timeval tv_start, tv_end, tv_diff;
+ memcpy_t fn = r->fn.memcpy;
+ void *src = NULL, *dst = NULL;
+ int i;
+
+ memcpy_alloc_mem(&src, &dst, len);
+
+ if (prefault)
+ fn(dst, src, len);
+
+ BUG_ON(gettimeofday(&tv_start, NULL));
+ for (i = 0; i < iterations; ++i)
+ fn(dst, src, len);
+ BUG_ON(gettimeofday(&tv_end, NULL));
+
+ timersub(&tv_end, &tv_start, &tv_diff);
+
+ free(src);
+ free(dst);
+ return (double)(((double)len * iterations) / timeval2double(&tv_diff));
+}
+
+int bench_mem_memcpy(int argc, const char **argv,
+ const char *prefix __maybe_unused)
+{
+ struct bench_mem_info info = {
+ .routines = memcpy_routines,
+ .do_cycle = do_memcpy_cycle,
+ .do_gettimeofday = do_memcpy_gettimeofday,
+ .usage = bench_mem_memcpy_usage,
+ };
+
+ return bench_mem_common(argc, argv, prefix, &info);
+}
+
+static void memset_alloc_mem(void **dst, size_t length)
+{
+ *dst = zalloc(length);
+ if (!*dst)
+ die("memory allocation failed - maybe length is too large?\n");
+}
+
+static u64 do_memset_cycle(const struct routine *r, size_t len, bool prefault)
+{
+ u64 cycle_start = 0ULL, cycle_end = 0ULL;
+ memset_t fn = r->fn.memset;
+ void *dst = NULL;
+ int i;
+
+ memset_alloc_mem(&dst, len);
+
+ if (prefault)
+ fn(dst, -1, len);
+
+ cycle_start = get_cycle();
+ for (i = 0; i < iterations; ++i)
+ fn(dst, i, len);
+ cycle_end = get_cycle();
+
+ free(dst);
+ return cycle_end - cycle_start;
+}
+
+static double do_memset_gettimeofday(const struct routine *r, size_t len,
+ bool prefault)
+{
+ struct timeval tv_start, tv_end, tv_diff;
+ memset_t fn = r->fn.memset;
+ void *dst = NULL;
+ int i;
+
+ memset_alloc_mem(&dst, len);
+
+ if (prefault)
+ fn(dst, -1, len);
+
+ BUG_ON(gettimeofday(&tv_start, NULL));
+ for (i = 0; i < iterations; ++i)
+ fn(dst, i, len);
+ BUG_ON(gettimeofday(&tv_end, NULL));
+
+ timersub(&tv_end, &tv_start, &tv_diff);
+
+ free(dst);
+ return (double)(((double)len * iterations) / timeval2double(&tv_diff));
+}
+
+static const char * const bench_mem_memset_usage[] = {
+ "perf bench mem memset <options>",
+ NULL
+};
+
+static const struct routine memset_routines[] = {
+ { .name ="default",
+ .desc = "Default memset() provided by glibc",
+ .fn.memset = memset },
+#ifdef HAVE_ARCH_X86_64_SUPPORT
+
+#define MEMSET_FN(_fn, _name, _desc) { .name = _name, .desc = _desc, .fn.memset = _fn },
+#include "mem-memset-x86-64-asm-def.h"
+#undef MEMSET_FN
+
+#endif
+
+ { .name = NULL,
+ .desc = NULL,
+ .fn.memset = NULL }
+};
+
+int bench_mem_memset(int argc, const char **argv,
+ const char *prefix __maybe_unused)
+{
+ struct bench_mem_info info = {
+ .routines = memset_routines,
+ .do_cycle = do_memset_cycle,
+ .do_gettimeofday = do_memset_gettimeofday,
+ .usage = bench_mem_memset_usage,
+ };
+
+ return bench_mem_common(argc, argv, prefix, &info);
+}
diff --git a/tools/perf/bench/mem-memset.c b/tools/perf/bench/mem-memset.c
deleted file mode 100644
index 75fc3e65fb2..00000000000
--- a/tools/perf/bench/mem-memset.c
+++ /dev/null
@@ -1,304 +0,0 @@
-/*
- * mem-memset.c
- *
- * memset: Simple memory set in various ways
- *
- * Trivial clone of mem-memcpy.c.
- */
-
-#include "../perf.h"
-#include "../util/util.h"
-#include "../util/parse-options.h"
-#include "../util/header.h"
-#include "../util/cloexec.h"
-#include "bench.h"
-#include "mem-memset-arch.h"
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <sys/time.h>
-#include <errno.h>
-
-#define K 1024
-
-static const char *length_str = "1MB";
-static const char *routine = "default";
-static int iterations = 1;
-static bool use_cycle;
-static int cycle_fd;
-static bool only_prefault;
-static bool no_prefault;
-
-static const struct option options[] = {
- OPT_STRING('l', "length", &length_str, "1MB",
- "Specify length of memory to set. "
- "Available units: B, KB, MB, GB and TB (upper and lower)"),
- OPT_STRING('r', "routine", &routine, "default",
- "Specify routine to set"),
- OPT_INTEGER('i', "iterations", &iterations,
- "repeat memset() invocation this number of times"),
- OPT_BOOLEAN('c', "cycle", &use_cycle,
- "Use cycles event instead of gettimeofday() for measuring"),
- OPT_BOOLEAN('o', "only-prefault", &only_prefault,
- "Show only the result with page faults before memset()"),
- OPT_BOOLEAN('n', "no-prefault", &no_prefault,
- "Show only the result without page faults before memset()"),
- OPT_END()
-};
-
-typedef void *(*memset_t)(void *, int, size_t);
-
-struct routine {
- const char *name;
- const char *desc;
- memset_t fn;
-};
-
-static const struct routine routines[] = {
- { "default",
- "Default memset() provided by glibc",
- memset },
-#ifdef HAVE_ARCH_X86_64_SUPPORT
-
-#define MEMSET_FN(fn, name, desc) { name, desc, fn },
-#include "mem-memset-x86-64-asm-def.h"
-#undef MEMSET_FN
-
-#endif
-
- { NULL,
- NULL,
- NULL }
-};
-
-static const char * const bench_mem_memset_usage[] = {
- "perf bench mem memset <options>",
- NULL
-};
-
-static struct perf_event_attr cycle_attr = {
- .type = PERF_TYPE_HARDWARE,
- .config = PERF_COUNT_HW_CPU_CYCLES
-};
-
-static void init_cycle(void)
-{
- cycle_fd = sys_perf_event_open(&cycle_attr, getpid(), -1, -1,
- perf_event_open_cloexec_flag());
-
- if (cycle_fd < 0 && errno == ENOSYS)
- die("No CONFIG_PERF_EVENTS=y kernel support configured?\n");
- else
- BUG_ON(cycle_fd < 0);
-}
-
-static u64 get_cycle(void)
-{
- int ret;
- u64 clk;
-
- ret = read(cycle_fd, &clk, sizeof(u64));
- BUG_ON(ret != sizeof(u64));
-
- return clk;
-}
-
-static double timeval2double(struct timeval *ts)
-{
- return (double)ts->tv_sec +
- (double)ts->tv_usec / (double)1000000;
-}
-
-static void alloc_mem(void **dst, size_t length)
-{
- *dst = zalloc(length);
- if (!*dst)
- die("memory allocation failed - maybe length is too large?\n");
-}
-
-static u64 do_memset_cycle(memset_t fn, size_t len, bool prefault)
-{
- u64 cycle_start = 0ULL, cycle_end = 0ULL;
- void *dst = NULL;
- int i;
-
- alloc_mem(&dst, len);
-
- if (prefault)
- fn(dst, -1, len);
-
- cycle_start = get_cycle();
- for (i = 0; i < iterations; ++i)
- fn(dst, i, len);
- cycle_end = get_cycle();
-
- free(dst);
- return cycle_end - cycle_start;
-}
-
-static double do_memset_gettimeofday(memset_t fn, size_t len, bool prefault)
-{
- struct timeval tv_start, tv_end, tv_diff;
- void *dst = NULL;
- int i;
-
- alloc_mem(&dst, len);
-
- if (prefault)
- fn(dst, -1, len);
-
- BUG_ON(gettimeofday(&tv_start, NULL));
- for (i = 0; i < iterations; ++i)
- fn(dst, i, len);
- BUG_ON(gettimeofday(&tv_end, NULL));
-
- timersub(&tv_end, &tv_start, &tv_diff);
-
- free(dst);
- return (double)((double)len / timeval2double(&tv_diff));
-}
-
-#define pf (no_prefault ? 0 : 1)
-
-#define print_bps(x) do { \
- if (x < K) \
- printf(" %14lf B/Sec", x); \
- else if (x < K * K) \
- printf(" %14lfd KB/Sec", x / K); \
- else if (x < K * K * K) \
- printf(" %14lf MB/Sec", x / K / K); \
- else \
- printf(" %14lf GB/Sec", x / K / K / K); \
- } while (0)
-
-int bench_mem_memset(int argc, const char **argv,
- const char *prefix __maybe_unused)
-{
- int i;
- size_t len;
- double result_bps[2];
- u64 result_cycle[2];
-
- argc = parse_options(argc, argv, options,
- bench_mem_memset_usage, 0);
-
- if (no_prefault && only_prefault) {
- fprintf(stderr, "Invalid options: -o and -n are mutually exclusive\n");
- return 1;
- }
-
- if (use_cycle)
- init_cycle();
-
- len = (size_t)perf_atoll((char *)length_str);
-
- result_cycle[0] = result_cycle[1] = 0ULL;
- result_bps[0] = result_bps[1] = 0.0;
-
- if ((s64)len <= 0) {
- fprintf(stderr, "Invalid length:%s\n", length_str);
- return 1;
- }
-
- /* same to without specifying either of prefault and no-prefault */
- if (only_prefault && no_prefault)
- only_prefault = no_prefault = false;
-
- for (i = 0; routines[i].name; i++) {
- if (!strcmp(routines[i].name, routine))
- break;
- }
- if (!routines[i].name) {
- printf("Unknown routine:%s\n", routine);
- printf("Available routines...\n");
- for (i = 0; routines[i].name; i++) {
- printf("\t%s ... %s\n",
- routines[i].name, routines[i].desc);
- }
- return 1;
- }
-
- if (bench_format == BENCH_FORMAT_DEFAULT)
- printf("# Copying %s Bytes ...\n\n", length_str);
-
- if (!only_prefault && !no_prefault) {
- /* show both of results */
- if (use_cycle) {
- result_cycle[0] =
- do_memset_cycle(routines[i].fn, len, false);
- result_cycle[1] =
- do_memset_cycle(routines[i].fn, len, true);
- } else {
- result_bps[0] =
- do_memset_gettimeofday(routines[i].fn,
- len, false);
- result_bps[1] =
- do_memset_gettimeofday(routines[i].fn,
- len, true);
- }
- } else {
- if (use_cycle) {
- result_cycle[pf] =
- do_memset_cycle(routines[i].fn,
- len, only_prefault);
- } else {
- result_bps[pf] =
- do_memset_gettimeofday(routines[i].fn,
- len, only_prefault);
- }
- }
-
- switch (bench_format) {
- case BENCH_FORMAT_DEFAULT:
- if (!only_prefault && !no_prefault) {
- if (use_cycle) {
- printf(" %14lf Cycle/Byte\n",
- (double)result_cycle[0]
- / (double)len);
- printf(" %14lf Cycle/Byte (with prefault)\n ",
- (double)result_cycle[1]
- / (double)len);
- } else {
- print_bps(result_bps[0]);
- printf("\n");
- print_bps(result_bps[1]);
- printf(" (with prefault)\n");
- }
- } else {
- if (use_cycle) {
- printf(" %14lf Cycle/Byte",
- (double)result_cycle[pf]
- / (double)len);
- } else
- print_bps(result_bps[pf]);
-
- printf("%s\n", only_prefault ? " (with prefault)" : "");
- }
- break;
- case BENCH_FORMAT_SIMPLE:
- if (!only_prefault && !no_prefault) {
- if (use_cycle) {
- printf("%lf %lf\n",
- (double)result_cycle[0] / (double)len,
- (double)result_cycle[1] / (double)len);
- } else {
- printf("%lf %lf\n",
- result_bps[0], result_bps[1]);
- }
- } else {
- if (use_cycle) {
- printf("%lf\n", (double)result_cycle[pf]
- / (double)len);
- } else
- printf("%lf\n", result_bps[pf]);
- }
- break;
- default:
- /* reaching this means there's some disaster: */
- die("unknown format: %d\n", bench_format);
- break;
- }
-
- return 0;
-}
diff --git a/tools/perf/builtin-buildid-cache.c b/tools/perf/builtin-buildid-cache.c
index 70385756da6..77d5cae54c6 100644
--- a/tools/perf/builtin-buildid-cache.c
+++ b/tools/perf/builtin-buildid-cache.c
@@ -285,12 +285,11 @@ int cmd_buildid_cache(int argc, const char **argv,
struct str_node *pos;
int ret = 0;
bool force = false;
- char debugdir[PATH_MAX];
char const *add_name_list_str = NULL,
*remove_name_list_str = NULL,
*missing_filename = NULL,
*update_name_list_str = NULL,
- *kcore_filename;
+ *kcore_filename = NULL;
char sbuf[STRERR_BUFSIZE];
struct perf_data_file file = {
@@ -335,13 +334,11 @@ int cmd_buildid_cache(int argc, const char **argv,
setup_pager();
- snprintf(debugdir, sizeof(debugdir), "%s", buildid_dir);
-
if (add_name_list_str) {
list = strlist__new(true, add_name_list_str);
if (list) {
strlist__for_each(pos, list)
- if (build_id_cache__add_file(pos->s, debugdir)) {
+ if (build_id_cache__add_file(pos->s, buildid_dir)) {
if (errno == EEXIST) {
pr_debug("%s already in the cache\n",
pos->s);
@@ -359,7 +356,7 @@ int cmd_buildid_cache(int argc, const char **argv,
list = strlist__new(true, remove_name_list_str);
if (list) {
strlist__for_each(pos, list)
- if (build_id_cache__remove_file(pos->s, debugdir)) {
+ if (build_id_cache__remove_file(pos->s, buildid_dir)) {
if (errno == ENOENT) {
pr_debug("%s wasn't in the cache\n",
pos->s);
@@ -380,7 +377,7 @@ int cmd_buildid_cache(int argc, const char **argv,
list = strlist__new(true, update_name_list_str);
if (list) {
strlist__for_each(pos, list)
- if (build_id_cache__update_file(pos->s, debugdir)) {
+ if (build_id_cache__update_file(pos->s, buildid_dir)) {
if (errno == ENOENT) {
pr_debug("%s wasn't in the cache\n",
pos->s);
@@ -395,7 +392,7 @@ int cmd_buildid_cache(int argc, const char **argv,
}
if (kcore_filename &&
- build_id_cache__add_kcore(kcore_filename, debugdir, force))
+ build_id_cache__add_kcore(kcore_filename, buildid_dir, force))
pr_warning("Couldn't add %s\n", kcore_filename);
out:
diff --git a/tools/perf/builtin-kvm.c b/tools/perf/builtin-kvm.c
index 3c0f3d4fb02..0894a817f67 100644
--- a/tools/perf/builtin-kvm.c
+++ b/tools/perf/builtin-kvm.c
@@ -1293,7 +1293,8 @@ static int kvm_events_live(struct perf_kvm_stat *kvm,
OPT_UINTEGER('d', "display", &kvm->display_time,
"time in seconds between display updates"),
OPT_STRING(0, "event", &kvm->report_event, "report event",
- "event for reporting: vmexit, mmio, ioport"),
+ "event for reporting: "
+ "vmexit, mmio (x86 only), ioport (x86 only)"),
OPT_INTEGER(0, "vcpu", &kvm->trace_vcpu,
"vcpu id to report"),
OPT_STRING('k', "key", &kvm->sort_key, "sort-key",
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index 83a4835c811..badfabc6a01 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -2045,7 +2045,6 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
unsigned long before;
const bool forks = argc > 0;
bool draining = false;
- char sbuf[STRERR_BUFSIZE];
trace->live = true;
@@ -2106,11 +2105,8 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
goto out_error_open;
err = perf_evlist__mmap(evlist, trace->opts.mmap_pages, false);
- if (err < 0) {
- fprintf(trace->output, "Couldn't mmap the events: %s\n",
- strerror_r(errno, sbuf, sizeof(sbuf)));
- goto out_delete_evlist;
- }
+ if (err < 0)
+ goto out_error_mmap;
perf_evlist__enable(evlist);
@@ -2210,6 +2206,10 @@ out_error_tp:
perf_evlist__strerror_tp(evlist, errno, errbuf, sizeof(errbuf));
goto out_error;
+out_error_mmap:
+ perf_evlist__strerror_mmap(evlist, errno, errbuf, sizeof(errbuf));
+ goto out_error;
+
out_error_open:
perf_evlist__strerror_open(evlist, errno, errbuf, sizeof(errbuf));
@@ -2485,7 +2485,7 @@ int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused)
.user_freq = UINT_MAX,
.user_interval = ULLONG_MAX,
.no_buffering = true,
- .mmap_pages = 1024,
+ .mmap_pages = UINT_MAX,
},
.output = stdout,
.show_comm = true,
diff --git a/tools/perf/perf.c b/tools/perf/perf.c
index 452a8474d29..3700a7faca6 100644
--- a/tools/perf/perf.c
+++ b/tools/perf/perf.c
@@ -200,6 +200,16 @@ static int handle_options(const char ***argv, int *argc, int *envchanged)
*envchanged = 1;
(*argv)++;
(*argc)--;
+ } else if (!strcmp(cmd, "--buildid-dir")) {
+ if (*argc < 2) {
+ fprintf(stderr, "No directory given for --buildid-dir.\n");
+ usage(perf_usage_string);
+ }
+ set_buildid_dir((*argv)[1]);
+ if (envchanged)
+ *envchanged = 1;
+ (*argv)++;
+ (*argc)--;
} else if (!prefixcmp(cmd, CMD_DEBUGFS_DIR)) {
perf_debugfs_set_path(cmd + strlen(CMD_DEBUGFS_DIR));
fprintf(stderr, "dir: %s\n", debugfs_mountpoint);
@@ -499,7 +509,7 @@ int main(int argc, const char **argv)
}
if (!prefixcmp(cmd, "trace")) {
#ifdef HAVE_LIBAUDIT_SUPPORT
- set_buildid_dir();
+ set_buildid_dir(NULL);
setup_path();
argv[0] = "trace";
return cmd_trace(argc, argv, NULL);
@@ -514,7 +524,7 @@ int main(int argc, const char **argv)
argc--;
handle_options(&argv, &argc, NULL);
commit_pager_choice();
- set_buildid_dir();
+ set_buildid_dir(NULL);
if (argc > 0) {
if (!prefixcmp(argv[0], "--"))
diff --git a/tools/perf/tests/attr/base-record b/tools/perf/tests/attr/base-record
index f710b92ccff..d3095dafed3 100644
--- a/tools/perf/tests/attr/base-record
+++ b/tools/perf/tests/attr/base-record
@@ -5,7 +5,7 @@ group_fd=-1
flags=0|8
cpu=*
type=0|1
-size=96
+size=104
config=0
sample_period=4000
sample_type=263
diff --git a/tools/perf/tests/attr/base-stat b/tools/perf/tests/attr/base-stat
index dc3ada2470c..872ed7e24c7 100644
--- a/tools/perf/tests/attr/base-stat
+++ b/tools/perf/tests/attr/base-stat
@@ -5,7 +5,7 @@ group_fd=-1
flags=0|8
cpu=*
type=0
-size=96
+size=104
config=0
sample_period=0
sample_type=0
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index 502daff76ce..e6bb04b5b09 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -1252,7 +1252,7 @@ static int hists__browser_title(struct hists *hists,
nr_samples = convert_unit(nr_samples, &unit);
printed = scnprintf(bf, size,
- "Samples: %lu%c of event '%s', Event count (approx.): %lu",
+ "Samples: %lu%c of event '%s', Event count (approx.): %" PRIu64,
nr_samples, unit, ev_name, nr_events);
diff --git a/tools/perf/ui/hist.c b/tools/perf/ui/hist.c
index 2af18376b07..dc0d095f318 100644
--- a/tools/perf/ui/hist.c
+++ b/tools/perf/ui/hist.c
@@ -162,8 +162,8 @@ static int __hpp__sort(struct hist_entry *a, struct hist_entry *b,
return ret;
nr_members = evsel->nr_members;
- fields_a = calloc(sizeof(*fields_a), nr_members);
- fields_b = calloc(sizeof(*fields_b), nr_members);
+ fields_a = calloc(nr_members, sizeof(*fields_a));
+ fields_b = calloc(nr_members, sizeof(*fields_b));
if (!fields_a || !fields_b)
goto out;
diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c
index e8d79e5bfaf..0c72680a977 100644
--- a/tools/perf/util/build-id.c
+++ b/tools/perf/util/build-id.c
@@ -410,21 +410,18 @@ int perf_session__cache_build_ids(struct perf_session *session)
{
struct rb_node *nd;
int ret;
- char debugdir[PATH_MAX];
if (no_buildid_cache)
return 0;
- snprintf(debugdir, sizeof(debugdir), "%s", buildid_dir);
-
- if (mkdir(debugdir, 0755) != 0 && errno != EEXIST)
+ if (mkdir(buildid_dir, 0755) != 0 && errno != EEXIST)
return -1;
- ret = machine__cache_build_ids(&session->machines.host, debugdir);
+ ret = machine__cache_build_ids(&session->machines.host, buildid_dir);
for (nd = rb_first(&session->machines.guests); nd; nd = rb_next(nd)) {
struct machine *pos = rb_entry(nd, struct machine, rb_node);
- ret |= machine__cache_build_ids(pos, debugdir);
+ ret |= machine__cache_build_ids(pos, buildid_dir);
}
return ret ? -1 : 0;
}
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c
index cf524a35cc8..64b377e591e 100644
--- a/tools/perf/util/callchain.c
+++ b/tools/perf/util/callchain.c
@@ -77,7 +77,7 @@ int parse_callchain_record_opt(const char *arg)
ret = 0;
} else
pr_err("callchain: No more arguments "
- "needed for -g fp\n");
+ "needed for --call-graph fp\n");
break;
#ifdef HAVE_DWARF_UNWIND_SUPPORT
diff --git a/tools/perf/util/config.c b/tools/perf/util/config.c
index 57ff826f150..e18f653cd7d 100644
--- a/tools/perf/util/config.c
+++ b/tools/perf/util/config.c
@@ -522,7 +522,7 @@ static int buildid_dir_command_config(const char *var, const char *value,
const char *v;
/* same dir for all commands */
- if (!prefixcmp(var, "buildid.") && !strcmp(var + 8, "dir")) {
+ if (!strcmp(var, "buildid.dir")) {
v = perf_config_dirname(var, value);
if (!v)
return -1;
@@ -539,12 +539,14 @@ static void check_buildid_dir_config(void)
perf_config(buildid_dir_command_config, &c);
}
-void set_buildid_dir(void)
+void set_buildid_dir(const char *dir)
{
- buildid_dir[0] = '\0';
+ if (dir)
+ scnprintf(buildid_dir, MAXPATHLEN-1, "%s", dir);
/* try config file */
- check_buildid_dir_config();
+ if (buildid_dir[0] == '\0')
+ check_buildid_dir_config();
/* default to $HOME/.debug */
if (buildid_dir[0] == '\0') {
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index cfbe2b99b9a..cbab1fb77b1 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -8,6 +8,7 @@
*/
#include "util.h"
#include <api/fs/debugfs.h>
+#include <api/fs/fs.h>
#include <poll.h>
#include "cpumap.h"
#include "thread_map.h"
@@ -24,6 +25,7 @@
#include <linux/bitops.h>
#include <linux/hash.h>
+#include <linux/log2.h>
static void perf_evlist__mmap_put(struct perf_evlist *evlist, int idx);
static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx);
@@ -892,10 +894,24 @@ out_unmap:
static size_t perf_evlist__mmap_size(unsigned long pages)
{
- /* 512 kiB: default amount of unprivileged mlocked memory */
- if (pages == UINT_MAX)
- pages = (512 * 1024) / page_size;
- else if (!is_power_of_2(pages))
+ if (pages == UINT_MAX) {
+ int max;
+
+ if (sysctl__read_int("kernel/perf_event_mlock_kb", &max) < 0) {
+ /*
+ * Pick a once upon a time good value, i.e. things look
+ * strange since we can't read a sysctl value, but lets not
+ * die yet...
+ */
+ max = 512;
+ } else {
+ max -= (page_size / 1024);
+ }
+
+ pages = (max * 1024) / page_size;
+ if (!is_power_of_2(pages))
+ pages = rounddown_pow_of_two(pages);
+ } else if (!is_power_of_2(pages))
return 0;
return (pages + 1) * page_size;
@@ -932,7 +948,7 @@ static long parse_pages_arg(const char *str, unsigned long min,
/* leave number of pages at 0 */
} else if (!is_power_of_2(pages)) {
/* round pages up to next power of 2 */
- pages = next_pow2_l(pages);
+ pages = roundup_pow_of_two(pages);
if (!pages)
return -EINVAL;
pr_info("rounding mmap pages size to %lu bytes (%lu pages)\n",
@@ -1483,6 +1499,37 @@ int perf_evlist__strerror_open(struct perf_evlist *evlist __maybe_unused,
return 0;
}
+int perf_evlist__strerror_mmap(struct perf_evlist *evlist, int err, char *buf, size_t size)
+{
+ char sbuf[STRERR_BUFSIZE], *emsg = strerror_r(err, sbuf, sizeof(sbuf));
+ int pages_attempted = evlist->mmap_len / 1024, pages_max_per_user, printed = 0;
+
+ switch (err) {
+ case EPERM:
+ sysctl__read_int("kernel/perf_event_mlock_kb", &pages_max_per_user);
+ printed += scnprintf(buf + printed, size - printed,
+ "Error:\t%s.\n"
+ "Hint:\tCheck /proc/sys/kernel/perf_event_mlock_kb (%d kB) setting.\n"
+ "Hint:\tTried using %zd kB.\n",
+ emsg, pages_max_per_user, pages_attempted);
+
+ if (pages_attempted >= pages_max_per_user) {
+ printed += scnprintf(buf + printed, size - printed,
+ "Hint:\tTry 'sudo sh -c \"echo %d > /proc/sys/kernel/perf_event_mlock_kb\"', or\n",
+ pages_max_per_user + pages_attempted);
+ }
+
+ printed += scnprintf(buf + printed, size - printed,
+ "Hint:\tTry using a smaller -m/--mmap-pages value.");
+ break;
+ default:
+ scnprintf(buf, size, "%s", emsg);
+ break;
+ }
+
+ return 0;
+}
+
void perf_evlist__to_front(struct perf_evlist *evlist,
struct perf_evsel *move_evsel)
{
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index 649b0c59728..0ba93f67ab9 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -185,6 +185,7 @@ size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp);
int perf_evlist__strerror_tp(struct perf_evlist *evlist, int err, char *buf, size_t size);
int perf_evlist__strerror_open(struct perf_evlist *evlist, int err, char *buf, size_t size);
+int perf_evlist__strerror_mmap(struct perf_evlist *evlist, int err, char *buf, size_t size);
static inline unsigned int perf_mmap__read_head(struct perf_mmap *mm)
{
diff --git a/tools/perf/util/include/linux/bitops.h b/tools/perf/util/include/linux/bitops.h
deleted file mode 100644
index c3294163de1..00000000000
--- a/tools/perf/util/include/linux/bitops.h
+++ /dev/null
@@ -1,162 +0,0 @@
-#ifndef _PERF_LINUX_BITOPS_H_
-#define _PERF_LINUX_BITOPS_H_
-
-#include <linux/kernel.h>
-#include <linux/compiler.h>
-#include <asm/hweight.h>
-
-#ifndef __WORDSIZE
-#define __WORDSIZE (__SIZEOF_LONG__ * 8)
-#endif
-
-#define BITS_PER_LONG __WORDSIZE
-#define BITS_PER_BYTE 8
-#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
-#define BITS_TO_U64(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u64))
-#define BITS_TO_U32(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u32))
-#define BITS_TO_BYTES(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE)
-#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
-#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
-
-#define for_each_set_bit(bit, addr, size) \
- for ((bit) = find_first_bit((addr), (size)); \
- (bit) < (size); \
- (bit) = find_next_bit((addr), (size), (bit) + 1))
-
-/* same as for_each_set_bit() but use bit as value to start with */
-#define for_each_set_bit_from(bit, addr, size) \
- for ((bit) = find_next_bit((addr), (size), (bit)); \
- (bit) < (size); \
- (bit) = find_next_bit((addr), (size), (bit) + 1))
-
-static inline void set_bit(int nr, unsigned long *addr)
-{
- addr[nr / BITS_PER_LONG] |= 1UL << (nr % BITS_PER_LONG);
-}
-
-static inline void clear_bit(int nr, unsigned long *addr)
-{
- addr[nr / BITS_PER_LONG] &= ~(1UL << (nr % BITS_PER_LONG));
-}
-
-static __always_inline int test_bit(unsigned int nr, const unsigned long *addr)
-{
- return ((1UL << (nr % BITS_PER_LONG)) &
- (((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0;
-}
-
-static inline unsigned long hweight_long(unsigned long w)
-{
- return sizeof(w) == 4 ? hweight32(w) : hweight64(w);
-}
-
-#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
-
-/**
- * __ffs - find first bit in word.
- * @word: The word to search
- *
- * Undefined if no bit exists, so code should check against 0 first.
- */
-static __always_inline unsigned long __ffs(unsigned long word)
-{
- int num = 0;
-
-#if BITS_PER_LONG == 64
- if ((word & 0xffffffff) == 0) {
- num += 32;
- word >>= 32;
- }
-#endif
- if ((word & 0xffff) == 0) {
- num += 16;
- word >>= 16;
- }
- if ((word & 0xff) == 0) {
- num += 8;
- word >>= 8;
- }
- if ((word & 0xf) == 0) {
- num += 4;
- word >>= 4;
- }
- if ((word & 0x3) == 0) {
- num += 2;
- word >>= 2;
- }
- if ((word & 0x1) == 0)
- num += 1;
- return num;
-}
-
-typedef const unsigned long __attribute__((__may_alias__)) long_alias_t;
-
-/*
- * Find the first set bit in a memory region.
- */
-static inline unsigned long
-find_first_bit(const unsigned long *addr, unsigned long size)
-{
- long_alias_t *p = (long_alias_t *) addr;
- unsigned long result = 0;
- unsigned long tmp;
-
- while (size & ~(BITS_PER_LONG-1)) {
- if ((tmp = *(p++)))
- goto found;
- result += BITS_PER_LONG;
- size -= BITS_PER_LONG;
- }
- if (!size)
- return result;
-
- tmp = (*p) & (~0UL >> (BITS_PER_LONG - size));
- if (tmp == 0UL) /* Are any bits set? */
- return result + size; /* Nope. */
-found:
- return result + __ffs(tmp);
-}
-
-/*
- * Find the next set bit in a memory region.
- */
-static inline unsigned long
-find_next_bit(const unsigned long *addr, unsigned long size, unsigned long offset)
-{
- const unsigned long *p = addr + BITOP_WORD(offset);
- unsigned long result = offset & ~(BITS_PER_LONG-1);
- unsigned long tmp;
-
- if (offset >= size)
- return size;
- size -= result;
- offset %= BITS_PER_LONG;
- if (offset) {
- tmp = *(p++);
- tmp &= (~0UL << offset);
- if (size < BITS_PER_LONG)
- goto found_first;
- if (tmp)
- goto found_middle;
- size -= BITS_PER_LONG;
- result += BITS_PER_LONG;
- }
- while (size & ~(BITS_PER_LONG-1)) {
- if ((tmp = *(p++)))
- goto found_middle;
- result += BITS_PER_LONG;
- size -= BITS_PER_LONG;
- }
- if (!size)
- return result;
- tmp = *p;
-
-found_first:
- tmp &= (~0UL >> (BITS_PER_LONG - size));
- if (tmp == 0UL) /* Are any bits set? */
- return result + size; /* Nope. */
-found_middle:
- return result + __ffs(tmp);
-}
-
-#endif
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 15dd0a9691c..94de3e48b49 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -1385,19 +1385,46 @@ struct mem_info *sample__resolve_mem(struct perf_sample *sample,
static int add_callchain_ip(struct thread *thread,
struct symbol **parent,
struct addr_location *root_al,
- int cpumode,
+ bool branch_history,
u64 ip)
{
struct addr_location al;
al.filtered = 0;
al.sym = NULL;
- if (cpumode == -1)
+ if (branch_history)
thread__find_cpumode_addr_location(thread, MAP__FUNCTION,
ip, &al);
- else
+ else {
+ u8 cpumode = PERF_RECORD_MISC_USER;
+
+ if (ip >= PERF_CONTEXT_MAX) {
+ switch (ip) {
+ case PERF_CONTEXT_HV:
+ cpumode = PERF_RECORD_MISC_HYPERVISOR;
+ break;
+ case PERF_CONTEXT_KERNEL:
+ cpumode = PERF_RECORD_MISC_KERNEL;
+ break;
+ case PERF_CONTEXT_USER:
+ cpumode = PERF_RECORD_MISC_USER;
+ break;
+ default:
+ pr_debug("invalid callchain context: "
+ "%"PRId64"\n", (s64) ip);
+ /*
+ * It seems the callchain is corrupted.
+ * Discard all.
+ */
+ callchain_cursor_reset(&callchain_cursor);
+ return 1;
+ }
+ return 0;
+ }
thread__find_addr_location(thread, cpumode, MAP__FUNCTION,
ip, &al);
+ }
+
if (al.sym != NULL) {
if (sort__has_parent && !*parent &&
symbol__match_regex(al.sym, &parent_regex))
@@ -1480,11 +1507,8 @@ static int thread__resolve_callchain_sample(struct thread *thread,
struct addr_location *root_al,
int max_stack)
{
- u8 cpumode = PERF_RECORD_MISC_USER;
int chain_nr = min(max_stack, (int)chain->nr);
- int i;
- int j;
- int err;
+ int i, j, err;
int skip_idx = -1;
int first_call = 0;
@@ -1542,10 +1566,10 @@ static int thread__resolve_callchain_sample(struct thread *thread,
for (i = 0; i < nr; i++) {
err = add_callchain_ip(thread, parent, root_al,
- -1, be[i].to);
+ true, be[i].to);
if (!err)
err = add_callchain_ip(thread, parent, root_al,
- -1, be[i].from);
+ true, be[i].from);
if (err == -EINVAL)
break;
if (err)
@@ -1574,36 +1598,10 @@ check_calls:
#endif
ip = chain->ips[j];
- if (ip >= PERF_CONTEXT_MAX) {
- switch (ip) {
- case PERF_CONTEXT_HV:
- cpumode = PERF_RECORD_MISC_HYPERVISOR;
- break;
- case PERF_CONTEXT_KERNEL:
- cpumode = PERF_RECORD_MISC_KERNEL;
- break;
- case PERF_CONTEXT_USER:
- cpumode = PERF_RECORD_MISC_USER;
- break;
- default:
- pr_debug("invalid callchain context: "
- "%"PRId64"\n", (s64) ip);
- /*
- * It seems the callchain is corrupted.
- * Discard all.
- */
- callchain_cursor_reset(&callchain_cursor);
- return 0;
- }
- continue;
- }
+ err = add_callchain_ip(thread, parent, root_al, false, ip);
- err = add_callchain_ip(thread, parent, root_al,
- cpumode, ip);
- if (err == -EINVAL)
- break;
if (err)
- return err;
+ return (err < 0) ? err : 0;
}
return 0;
diff --git a/tools/perf/util/record.c b/tools/perf/util/record.c
index cf69325b985..8acd0df88b5 100644
--- a/tools/perf/util/record.c
+++ b/tools/perf/util/record.c
@@ -137,16 +137,7 @@ void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts)
static int get_max_rate(unsigned int *rate)
{
- char path[PATH_MAX];
- const char *procfs = procfs__mountpoint();
-
- if (!procfs)
- return -1;
-
- snprintf(path, PATH_MAX,
- "%s/sys/kernel/perf_event_max_sample_rate", procfs);
-
- return filename__read_int(path, (int *) rate);
+ return sysctl__read_int("kernel/perf_event_max_sample_rate", (int *)rate);
}
static int record_opts__config_freq(struct record_opts *opts)
diff --git a/tools/perf/util/srcline.c b/tools/perf/util/srcline.c
index e73b6a5c9e0..c93fb0c5bd0 100644
--- a/tools/perf/util/srcline.c
+++ b/tools/perf/util/srcline.c
@@ -20,7 +20,7 @@
struct a2l_data {
const char *input;
- unsigned long addr;
+ u64 addr;
bool found;
const char *filename;
@@ -147,7 +147,7 @@ static void addr2line_cleanup(struct a2l_data *a2l)
free(a2l);
}
-static int addr2line(const char *dso_name, unsigned long addr,
+static int addr2line(const char *dso_name, u64 addr,
char **file, unsigned int *line, struct dso *dso)
{
int ret = 0;
@@ -193,7 +193,7 @@ void dso__free_a2l(struct dso *dso)
#else /* HAVE_LIBBFD_SUPPORT */
-static int addr2line(const char *dso_name, unsigned long addr,
+static int addr2line(const char *dso_name, u64 addr,
char **file, unsigned int *line_nr,
struct dso *dso __maybe_unused)
{
@@ -252,7 +252,7 @@ void dso__free_a2l(struct dso *dso __maybe_unused)
*/
#define A2L_FAIL_LIMIT 123
-char *get_srcline(struct dso *dso, unsigned long addr, struct symbol *sym,
+char *get_srcline(struct dso *dso, u64 addr, struct symbol *sym,
bool show_sym)
{
char *file = NULL;
@@ -293,10 +293,10 @@ out:
dso__free_a2l(dso);
}
if (sym) {
- if (asprintf(&srcline, "%s+%ld", show_sym ? sym->name : "",
+ if (asprintf(&srcline, "%s+%" PRIu64, show_sym ? sym->name : "",
addr - sym->start) < 0)
return SRCLINE_UNKNOWN;
- } else if (asprintf(&srcline, "%s[%lx]", dso->short_name, addr) < 0)
+ } else if (asprintf(&srcline, "%s[%" PRIx64 "]", dso->short_name, addr) < 0)
return SRCLINE_UNKNOWN;
return srcline;
}
diff --git a/tools/perf/util/symbol-minimal.c b/tools/perf/util/symbol-minimal.c
index fa585c63f56..d7efb03b3f9 100644
--- a/tools/perf/util/symbol-minimal.c
+++ b/tools/perf/util/symbol-minimal.c
@@ -129,6 +129,7 @@ int filename__read_build_id(const char *filename, void *bf, size_t size)
for (i = 0, phdr = buf; i < ehdr.e_phnum; i++, phdr++) {
void *tmp;
+ long offset;
if (need_swap) {
phdr->p_type = bswap_32(phdr->p_type);
@@ -140,12 +141,13 @@ int filename__read_build_id(const char *filename, void *bf, size_t size)
continue;
buf_size = phdr->p_filesz;
+ offset = phdr->p_offset;
tmp = realloc(buf, buf_size);
if (tmp == NULL)
goto out_free;
buf = tmp;
- fseek(fp, phdr->p_offset, SEEK_SET);
+ fseek(fp, offset, SEEK_SET);
if (fread(buf, buf_size, 1, fp) != 1)
goto out_free;
@@ -178,6 +180,7 @@ int filename__read_build_id(const char *filename, void *bf, size_t size)
for (i = 0, phdr = buf; i < ehdr.e_phnum; i++, phdr++) {
void *tmp;
+ long offset;
if (need_swap) {
phdr->p_type = bswap_32(phdr->p_type);
@@ -189,12 +192,13 @@ int filename__read_build_id(const char *filename, void *bf, size_t size)
continue;
buf_size = phdr->p_filesz;
+ offset = phdr->p_offset;
tmp = realloc(buf, buf_size);
if (tmp == NULL)
goto out_free;
buf = tmp;
- fseek(fp, phdr->p_offset, SEEK_SET);
+ fseek(fp, offset, SEEK_SET);
if (fread(buf, buf_size, 1, fp) != 1)
goto out_free;
diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c
index d5eab3f3323..b86744f29ee 100644
--- a/tools/perf/util/util.c
+++ b/tools/perf/util/util.c
@@ -442,23 +442,6 @@ unsigned long parse_tag_value(const char *str, struct parse_tag *tags)
return (unsigned long) -1;
}
-int filename__read_int(const char *filename, int *value)
-{
- char line[64];
- int fd = open(filename, O_RDONLY), err = -1;
-
- if (fd < 0)
- return -1;
-
- if (read(fd, line, sizeof(line)) > 0) {
- *value = atoi(line);
- err = 0;
- }
-
- close(fd);
- return err;
-}
-
int filename__read_str(const char *filename, char **buf, size_t *sizep)
{
size_t size = 0, alloc_size = 0;
@@ -523,16 +506,9 @@ const char *get_filename_for_perf_kvm(void)
int perf_event_paranoid(void)
{
- char path[PATH_MAX];
- const char *procfs = procfs__mountpoint();
int value;
- if (!procfs)
- return INT_MAX;
-
- scnprintf(path, PATH_MAX, "%s/sys/kernel/perf_event_paranoid", procfs);
-
- if (filename__read_int(path, &value))
+ if (sysctl__read_int("kernel/perf_event_paranoid", &value))
return INT_MAX;
return value;
diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h
index 419bee030f8..027a5153495 100644
--- a/tools/perf/util/util.h
+++ b/tools/perf/util/util.h
@@ -153,7 +153,7 @@ extern void warning(const char *err, ...) __attribute__((format (printf, 1, 2)))
extern void set_die_routine(void (*routine)(const char *err, va_list params) NORETURN);
extern int prefixcmp(const char *str, const char *prefix);
-extern void set_buildid_dir(void);
+extern void set_buildid_dir(const char *dir);
static inline const char *skip_prefix(const char *str, const char *prefix)
{
@@ -269,35 +269,6 @@ void event_attr_init(struct perf_event_attr *attr);
#define _STR(x) #x
#define STR(x) _STR(x)
-/*
- * Determine whether some value is a power of two, where zero is
- * *not* considered a power of two.
- */
-
-static inline __attribute__((const))
-bool is_power_of_2(unsigned long n)
-{
- return (n != 0 && ((n & (n - 1)) == 0));
-}
-
-static inline unsigned next_pow2(unsigned x)
-{
- if (!x)
- return 1;
- return 1ULL << (32 - __builtin_clz(x - 1));
-}
-
-static inline unsigned long next_pow2_l(unsigned long x)
-{
-#if BITS_PER_LONG == 64
- if (x <= (1UL << 31))
- return next_pow2(x);
- return (unsigned long)next_pow2(x >> 32) << 32;
-#else
- return next_pow2(x);
-#endif
-}
-
size_t hex_width(u64 v);
int hex2u64(const char *ptr, u64 *val);
@@ -339,11 +310,10 @@ static inline int path__join3(char *bf, size_t size,
struct dso;
struct symbol;
-char *get_srcline(struct dso *dso, unsigned long addr, struct symbol *sym,
+char *get_srcline(struct dso *dso, u64 addr, struct symbol *sym,
bool show_sym);
void free_srcline(char *srcline);
-int filename__read_int(const char *filename, int *value);
int filename__read_str(const char *filename, char **buf, size_t *sizep);
int perf_event_paranoid(void);
diff --git a/tools/power/cpupower/utils/cpuidle-info.c b/tools/power/cpupower/utils/cpuidle-info.c
index 458d69b444a..75e66de7e7a 100644
--- a/tools/power/cpupower/utils/cpuidle-info.c
+++ b/tools/power/cpupower/utils/cpuidle-info.c
@@ -22,13 +22,13 @@
static void cpuidle_cpu_output(unsigned int cpu, int verbose)
{
- int idlestates, idlestate;
+ unsigned int idlestates, idlestate;
char *tmp;
printf(_ ("Analyzing CPU %d:\n"), cpu);
idlestates = sysfs_get_idlestate_count(cpu);
- if (idlestates < 1) {
+ if (idlestates == 0) {
printf(_("CPU %u: No idle states\n"), cpu);
return;
}
@@ -100,10 +100,10 @@ static void cpuidle_general_output(void)
static void proc_cpuidle_cpu_output(unsigned int cpu)
{
long max_allowed_cstate = 2000000000;
- int cstate, cstates;
+ unsigned int cstate, cstates;
cstates = sysfs_get_idlestate_count(cpu);
- if (cstates < 1) {
+ if (cstates == 0) {
printf(_("CPU %u: No C-states info\n"), cpu);
return;
}
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index c14893b501a..4e511221a0c 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -1,21 +1,23 @@
TARGETS = breakpoints
TARGETS += cpu-hotplug
TARGETS += efivarfs
+TARGETS += exec
+TARGETS += firmware
+TARGETS += ftrace
TARGETS += kcmp
TARGETS += memfd
TARGETS += memory-hotplug
-TARGETS += mqueue
TARGETS += mount
+TARGETS += mqueue
TARGETS += net
+TARGETS += powerpc
TARGETS += ptrace
+TARGETS += size
+TARGETS += sysctl
TARGETS += timers
-TARGETS += vm
-TARGETS += powerpc
TARGETS += user
-TARGETS += sysctl
-TARGETS += firmware
-TARGETS += ftrace
-TARGETS += exec
+TARGETS += vm
+#Please keep the TARGETS list alphabetically sorted
TARGETS_HOTPLUG = cpu-hotplug
TARGETS_HOTPLUG += memory-hotplug
diff --git a/tools/testing/selftests/breakpoints/breakpoint_test.c b/tools/testing/selftests/breakpoints/breakpoint_test.c
index a0743f3b2b5..120895ab550 100644
--- a/tools/testing/selftests/breakpoints/breakpoint_test.c
+++ b/tools/testing/selftests/breakpoints/breakpoint_test.c
@@ -17,6 +17,8 @@
#include <sys/types.h>
#include <sys/wait.h>
+#include "../kselftest.h"
+
/* Breakpoint access modes */
enum {
@@ -42,7 +44,7 @@ static void set_breakpoint_addr(void *addr, int n)
offsetof(struct user, u_debugreg[n]), addr);
if (ret) {
perror("Can't set breakpoint addr\n");
- exit(-1);
+ ksft_exit_fail();
}
}
@@ -105,7 +107,7 @@ static void toggle_breakpoint(int n, int type, int len,
offsetof(struct user, u_debugreg[7]), dr7);
if (ret) {
perror("Can't set dr7");
- exit(-1);
+ ksft_exit_fail();
}
}
@@ -275,7 +277,7 @@ static void check_success(const char *msg)
msg2 = "Ok";
if (ptrace(PTRACE_POKEDATA, child_pid, &trapped, 1)) {
perror("Can't poke\n");
- exit(-1);
+ ksft_exit_fail();
}
}
@@ -390,5 +392,5 @@ int main(int argc, char **argv)
wait(NULL);
- return 0;
+ return ksft_exit_pass();
}
diff --git a/tools/testing/selftests/ipc/msgque.c b/tools/testing/selftests/ipc/msgque.c
index 552f0810bff..1b2ce334bb3 100644
--- a/tools/testing/selftests/ipc/msgque.c
+++ b/tools/testing/selftests/ipc/msgque.c
@@ -5,6 +5,8 @@
#include <linux/msg.h>
#include <fcntl.h>
+#include "../kselftest.h"
+
#define MAX_MSG_SIZE 32
struct msg1 {
@@ -195,58 +197,58 @@ int main(int argc, char **argv)
if (getuid() != 0) {
printf("Please run the test as root - Exiting.\n");
- exit(1);
+ return ksft_exit_fail();
}
msgque.key = ftok(argv[0], 822155650);
if (msgque.key == -1) {
- printf("Can't make key\n");
- return -errno;
+ printf("Can't make key: %d\n", -errno);
+ return ksft_exit_fail();
}
msgque.msq_id = msgget(msgque.key, IPC_CREAT | IPC_EXCL | 0666);
if (msgque.msq_id == -1) {
err = -errno;
- printf("Can't create queue\n");
+ printf("Can't create queue: %d\n", err);
goto err_out;
}
err = fill_msgque(&msgque);
if (err) {
- printf("Failed to fill queue\n");
+ printf("Failed to fill queue: %d\n", err);
goto err_destroy;
}
err = dump_queue(&msgque);
if (err) {
- printf("Failed to dump queue\n");
+ printf("Failed to dump queue: %d\n", err);
goto err_destroy;
}
err = check_and_destroy_queue(&msgque);
if (err) {
- printf("Failed to check and destroy queue\n");
+ printf("Failed to check and destroy queue: %d\n", err);
goto err_out;
}
err = restore_queue(&msgque);
if (err) {
- printf("Failed to restore queue\n");
+ printf("Failed to restore queue: %d\n", err);
goto err_destroy;
}
err = check_and_destroy_queue(&msgque);
if (err) {
- printf("Failed to test queue\n");
+ printf("Failed to test queue: %d\n", err);
goto err_out;
}
- return 0;
+ return ksft_exit_pass();
err_destroy:
if (msgctl(msgque.msq_id, IPC_RMID, 0)) {
printf("Failed to destroy queue: %d\n", -errno);
- return -errno;
+ return ksft_exit_fail();
}
err_out:
- return err;
+ return ksft_exit_fail();
}
diff --git a/tools/testing/selftests/kcmp/Makefile b/tools/testing/selftests/kcmp/Makefile
index 8aabd82db9e..ff0eefdc6ce 100644
--- a/tools/testing/selftests/kcmp/Makefile
+++ b/tools/testing/selftests/kcmp/Makefile
@@ -1,25 +1,7 @@
-uname_M := $(shell uname -m 2>/dev/null || echo not)
-ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/i386/)
-ifeq ($(ARCH),i386)
- ARCH := x86
- CFLAGS := -DCONFIG_X86_32 -D__i386__
-endif
-ifeq ($(ARCH),x86_64)
- ARCH := x86
- CFLAGS := -DCONFIG_X86_64 -D__x86_64__
-endif
-
-CFLAGS += -I../../../../arch/x86/include/generated/
-CFLAGS += -I../../../../include/
+CC := $(CROSS_COMPILE)$(CC)
CFLAGS += -I../../../../usr/include/
-CFLAGS += -I../../../../arch/x86/include/
-all:
-ifeq ($(ARCH),x86)
- gcc $(CFLAGS) kcmp_test.c -o kcmp_test
-else
- echo "Not an x86 target, can't build kcmp selftest"
-endif
+all: kcmp_test
run_tests: all
@./kcmp_test || echo "kcmp_test: [FAIL]"
diff --git a/tools/testing/selftests/kcmp/kcmp_test.c b/tools/testing/selftests/kcmp/kcmp_test.c
index dbba4084869..a5a4da856df 100644
--- a/tools/testing/selftests/kcmp/kcmp_test.c
+++ b/tools/testing/selftests/kcmp/kcmp_test.c
@@ -17,6 +17,8 @@
#include <sys/stat.h>
#include <sys/wait.h>
+#include "../kselftest.h"
+
static long sys_kcmp(int pid1, int pid2, int type, int fd1, int fd2)
{
return syscall(__NR_kcmp, pid1, pid2, type, fd1, fd2);
@@ -34,13 +36,13 @@ int main(int argc, char **argv)
if (fd1 < 0) {
perror("Can't create file");
- exit(1);
+ ksft_exit_fail();
}
pid2 = fork();
if (pid2 < 0) {
perror("fork failed");
- exit(1);
+ ksft_exit_fail();
}
if (!pid2) {
@@ -50,7 +52,7 @@ int main(int argc, char **argv)
fd2 = open(kpath, O_RDWR, 0644);
if (fd2 < 0) {
perror("Can't open file");
- exit(1);
+ ksft_exit_fail();
}
/* An example of output and arguments */
@@ -74,23 +76,34 @@ int main(int argc, char **argv)
if (ret) {
printf("FAIL: 0 expected but %d returned (%s)\n",
ret, strerror(errno));
+ ksft_inc_fail_cnt();
ret = -1;
- } else
+ } else {
printf("PASS: 0 returned as expected\n");
+ ksft_inc_pass_cnt();
+ }
/* Compare with self */
ret = sys_kcmp(pid1, pid1, KCMP_VM, 0, 0);
if (ret) {
printf("FAIL: 0 expected but %d returned (%s)\n",
ret, strerror(errno));
+ ksft_inc_fail_cnt();
ret = -1;
- } else
+ } else {
printf("PASS: 0 returned as expected\n");
+ ksft_inc_pass_cnt();
+ }
+
+ ksft_print_cnts();
- exit(ret);
+ if (ret)
+ ksft_exit_fail();
+ else
+ ksft_exit_pass();
}
waitpid(pid2, &status, P_ALL);
- return 0;
+ return ksft_exit_pass();
}
diff --git a/tools/testing/selftests/kselftest.h b/tools/testing/selftests/kselftest.h
new file mode 100644
index 00000000000..572c8888167
--- /dev/null
+++ b/tools/testing/selftests/kselftest.h
@@ -0,0 +1,62 @@
+/*
+ * kselftest.h: kselftest framework return codes to include from
+ * selftests.
+ *
+ * Copyright (c) 2014 Shuah Khan <shuahkh@osg.samsung.com>
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd.
+ *
+ * This file is released under the GPLv2.
+ */
+#ifndef __KSELFTEST_H
+#define __KSELFTEST_H
+
+#include <stdlib.h>
+#include <unistd.h>
+
+/* counters */
+struct ksft_count {
+ unsigned int ksft_pass;
+ unsigned int ksft_fail;
+ unsigned int ksft_xfail;
+ unsigned int ksft_xpass;
+ unsigned int ksft_xskip;
+};
+
+static struct ksft_count ksft_cnt;
+
+static inline void ksft_inc_pass_cnt(void) { ksft_cnt.ksft_pass++; }
+static inline void ksft_inc_fail_cnt(void) { ksft_cnt.ksft_fail++; }
+static inline void ksft_inc_xfail_cnt(void) { ksft_cnt.ksft_xfail++; }
+static inline void ksft_inc_xpass_cnt(void) { ksft_cnt.ksft_xpass++; }
+static inline void ksft_inc_xskip_cnt(void) { ksft_cnt.ksft_xskip++; }
+
+static inline void ksft_print_cnts(void)
+{
+ printf("Pass: %d Fail: %d Xfail: %d Xpass: %d, Xskip: %d\n",
+ ksft_cnt.ksft_pass, ksft_cnt.ksft_fail,
+ ksft_cnt.ksft_xfail, ksft_cnt.ksft_xpass,
+ ksft_cnt.ksft_xskip);
+}
+
+static inline int ksft_exit_pass(void)
+{
+ exit(0);
+}
+static inline int ksft_exit_fail(void)
+{
+ exit(1);
+}
+static inline int ksft_exit_xfail(void)
+{
+ exit(2);
+}
+static inline int ksft_exit_xpass(void)
+{
+ exit(3);
+}
+static inline int ksft_exit_skip(void)
+{
+ exit(4);
+}
+
+#endif /* __KSELFTEST_H */
diff --git a/tools/testing/selftests/mount/unprivileged-remount-test.c b/tools/testing/selftests/mount/unprivileged-remount-test.c
index 1b3ff2fda4d..517785052f1 100644
--- a/tools/testing/selftests/mount/unprivileged-remount-test.c
+++ b/tools/testing/selftests/mount/unprivileged-remount-test.c
@@ -6,6 +6,8 @@
#include <sys/types.h>
#include <sys/mount.h>
#include <sys/wait.h>
+#include <sys/vfs.h>
+#include <sys/statvfs.h>
#include <stdlib.h>
#include <unistd.h>
#include <fcntl.h>
@@ -32,11 +34,14 @@
# define CLONE_NEWPID 0x20000000
#endif
+#ifndef MS_REC
+# define MS_REC 16384
+#endif
#ifndef MS_RELATIME
-#define MS_RELATIME (1 << 21)
+# define MS_RELATIME (1 << 21)
#endif
#ifndef MS_STRICTATIME
-#define MS_STRICTATIME (1 << 24)
+# define MS_STRICTATIME (1 << 24)
#endif
static void die(char *fmt, ...)
@@ -48,17 +53,14 @@ static void die(char *fmt, ...)
exit(EXIT_FAILURE);
}
-static void write_file(char *filename, char *fmt, ...)
+static void vmaybe_write_file(bool enoent_ok, char *filename, char *fmt, va_list ap)
{
char buf[4096];
int fd;
ssize_t written;
int buf_len;
- va_list ap;
- va_start(ap, fmt);
buf_len = vsnprintf(buf, sizeof(buf), fmt, ap);
- va_end(ap);
if (buf_len < 0) {
die("vsnprintf failed: %s\n",
strerror(errno));
@@ -69,6 +71,8 @@ static void write_file(char *filename, char *fmt, ...)
fd = open(filename, O_WRONLY);
if (fd < 0) {
+ if ((errno == ENOENT) && enoent_ok)
+ return;
die("open of %s failed: %s\n",
filename, strerror(errno));
}
@@ -87,6 +91,65 @@ static void write_file(char *filename, char *fmt, ...)
}
}
+static void maybe_write_file(char *filename, char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ vmaybe_write_file(true, filename, fmt, ap);
+ va_end(ap);
+
+}
+
+static void write_file(char *filename, char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ vmaybe_write_file(false, filename, fmt, ap);
+ va_end(ap);
+
+}
+
+static int read_mnt_flags(const char *path)
+{
+ int ret;
+ struct statvfs stat;
+ int mnt_flags;
+
+ ret = statvfs(path, &stat);
+ if (ret != 0) {
+ die("statvfs of %s failed: %s\n",
+ path, strerror(errno));
+ }
+ if (stat.f_flag & ~(ST_RDONLY | ST_NOSUID | ST_NODEV | \
+ ST_NOEXEC | ST_NOATIME | ST_NODIRATIME | ST_RELATIME | \
+ ST_SYNCHRONOUS | ST_MANDLOCK)) {
+ die("Unrecognized mount flags\n");
+ }
+ mnt_flags = 0;
+ if (stat.f_flag & ST_RDONLY)
+ mnt_flags |= MS_RDONLY;
+ if (stat.f_flag & ST_NOSUID)
+ mnt_flags |= MS_NOSUID;
+ if (stat.f_flag & ST_NODEV)
+ mnt_flags |= MS_NODEV;
+ if (stat.f_flag & ST_NOEXEC)
+ mnt_flags |= MS_NOEXEC;
+ if (stat.f_flag & ST_NOATIME)
+ mnt_flags |= MS_NOATIME;
+ if (stat.f_flag & ST_NODIRATIME)
+ mnt_flags |= MS_NODIRATIME;
+ if (stat.f_flag & ST_RELATIME)
+ mnt_flags |= MS_RELATIME;
+ if (stat.f_flag & ST_SYNCHRONOUS)
+ mnt_flags |= MS_SYNCHRONOUS;
+ if (stat.f_flag & ST_MANDLOCK)
+ mnt_flags |= ST_MANDLOCK;
+
+ return mnt_flags;
+}
+
static void create_and_enter_userns(void)
{
uid_t uid;
@@ -100,13 +163,10 @@ static void create_and_enter_userns(void)
strerror(errno));
}
+ maybe_write_file("/proc/self/setgroups", "deny");
write_file("/proc/self/uid_map", "0 %d 1", uid);
write_file("/proc/self/gid_map", "0 %d 1", gid);
- if (setgroups(0, NULL) != 0) {
- die("setgroups failed: %s\n",
- strerror(errno));
- }
if (setgid(0) != 0) {
die ("setgid(0) failed %s\n",
strerror(errno));
@@ -118,7 +178,8 @@ static void create_and_enter_userns(void)
}
static
-bool test_unpriv_remount(int mount_flags, int remount_flags, int invalid_flags)
+bool test_unpriv_remount(const char *fstype, const char *mount_options,
+ int mount_flags, int remount_flags, int invalid_flags)
{
pid_t child;
@@ -151,9 +212,11 @@ bool test_unpriv_remount(int mount_flags, int remount_flags, int invalid_flags)
strerror(errno));
}
- if (mount("testing", "/tmp", "ramfs", mount_flags, NULL) != 0) {
- die("mount of /tmp failed: %s\n",
- strerror(errno));
+ if (mount("testing", "/tmp", fstype, mount_flags, mount_options) != 0) {
+ die("mount of %s with options '%s' on /tmp failed: %s\n",
+ fstype,
+ mount_options? mount_options : "",
+ strerror(errno));
}
create_and_enter_userns();
@@ -181,62 +244,127 @@ bool test_unpriv_remount(int mount_flags, int remount_flags, int invalid_flags)
static bool test_unpriv_remount_simple(int mount_flags)
{
- return test_unpriv_remount(mount_flags, mount_flags, 0);
+ return test_unpriv_remount("ramfs", NULL, mount_flags, mount_flags, 0);
}
static bool test_unpriv_remount_atime(int mount_flags, int invalid_flags)
{
- return test_unpriv_remount(mount_flags, mount_flags, invalid_flags);
+ return test_unpriv_remount("ramfs", NULL, mount_flags, mount_flags,
+ invalid_flags);
+}
+
+static bool test_priv_mount_unpriv_remount(void)
+{
+ pid_t child;
+ int ret;
+ const char *orig_path = "/dev";
+ const char *dest_path = "/tmp";
+ int orig_mnt_flags, remount_mnt_flags;
+
+ child = fork();
+ if (child == -1) {
+ die("fork failed: %s\n",
+ strerror(errno));
+ }
+ if (child != 0) { /* parent */
+ pid_t pid;
+ int status;
+ pid = waitpid(child, &status, 0);
+ if (pid == -1) {
+ die("waitpid failed: %s\n",
+ strerror(errno));
+ }
+ if (pid != child) {
+ die("waited for %d got %d\n",
+ child, pid);
+ }
+ if (!WIFEXITED(status)) {
+ die("child did not terminate cleanly\n");
+ }
+ return WEXITSTATUS(status) == EXIT_SUCCESS ? true : false;
+ }
+
+ orig_mnt_flags = read_mnt_flags(orig_path);
+
+ create_and_enter_userns();
+ ret = unshare(CLONE_NEWNS);
+ if (ret != 0) {
+ die("unshare(CLONE_NEWNS) failed: %s\n",
+ strerror(errno));
+ }
+
+ ret = mount(orig_path, dest_path, "bind", MS_BIND | MS_REC, NULL);
+ if (ret != 0) {
+ die("recursive bind mount of %s onto %s failed: %s\n",
+ orig_path, dest_path, strerror(errno));
+ }
+
+ ret = mount(dest_path, dest_path, "none",
+ MS_REMOUNT | MS_BIND | orig_mnt_flags , NULL);
+ if (ret != 0) {
+ /* system("cat /proc/self/mounts"); */
+ die("remount of /tmp failed: %s\n",
+ strerror(errno));
+ }
+
+ remount_mnt_flags = read_mnt_flags(dest_path);
+ if (orig_mnt_flags != remount_mnt_flags) {
+ die("Mount flags unexpectedly changed during remount of %s originally mounted on %s\n",
+ dest_path, orig_path);
+ }
+ exit(EXIT_SUCCESS);
}
int main(int argc, char **argv)
{
- if (!test_unpriv_remount_simple(MS_RDONLY|MS_NODEV)) {
+ if (!test_unpriv_remount_simple(MS_RDONLY)) {
die("MS_RDONLY malfunctions\n");
}
- if (!test_unpriv_remount_simple(MS_NODEV)) {
+ if (!test_unpriv_remount("devpts", "newinstance", MS_NODEV, MS_NODEV, 0)) {
die("MS_NODEV malfunctions\n");
}
- if (!test_unpriv_remount_simple(MS_NOSUID|MS_NODEV)) {
+ if (!test_unpriv_remount_simple(MS_NOSUID)) {
die("MS_NOSUID malfunctions\n");
}
- if (!test_unpriv_remount_simple(MS_NOEXEC|MS_NODEV)) {
+ if (!test_unpriv_remount_simple(MS_NOEXEC)) {
die("MS_NOEXEC malfunctions\n");
}
- if (!test_unpriv_remount_atime(MS_RELATIME|MS_NODEV,
- MS_NOATIME|MS_NODEV))
+ if (!test_unpriv_remount_atime(MS_RELATIME,
+ MS_NOATIME))
{
die("MS_RELATIME malfunctions\n");
}
- if (!test_unpriv_remount_atime(MS_STRICTATIME|MS_NODEV,
- MS_NOATIME|MS_NODEV))
+ if (!test_unpriv_remount_atime(MS_STRICTATIME,
+ MS_NOATIME))
{
die("MS_STRICTATIME malfunctions\n");
}
- if (!test_unpriv_remount_atime(MS_NOATIME|MS_NODEV,
- MS_STRICTATIME|MS_NODEV))
+ if (!test_unpriv_remount_atime(MS_NOATIME,
+ MS_STRICTATIME))
{
- die("MS_RELATIME malfunctions\n");
+ die("MS_NOATIME malfunctions\n");
}
- if (!test_unpriv_remount_atime(MS_RELATIME|MS_NODIRATIME|MS_NODEV,
- MS_NOATIME|MS_NODEV))
+ if (!test_unpriv_remount_atime(MS_RELATIME|MS_NODIRATIME,
+ MS_NOATIME))
{
- die("MS_RELATIME malfunctions\n");
+ die("MS_RELATIME|MS_NODIRATIME malfunctions\n");
}
- if (!test_unpriv_remount_atime(MS_STRICTATIME|MS_NODIRATIME|MS_NODEV,
- MS_NOATIME|MS_NODEV))
+ if (!test_unpriv_remount_atime(MS_STRICTATIME|MS_NODIRATIME,
+ MS_NOATIME))
{
- die("MS_RELATIME malfunctions\n");
+ die("MS_STRICTATIME|MS_NODIRATIME malfunctions\n");
}
- if (!test_unpriv_remount_atime(MS_NOATIME|MS_NODIRATIME|MS_NODEV,
- MS_STRICTATIME|MS_NODEV))
+ if (!test_unpriv_remount_atime(MS_NOATIME|MS_NODIRATIME,
+ MS_STRICTATIME))
{
- die("MS_RELATIME malfunctions\n");
+ die("MS_NOATIME|MS_DIRATIME malfunctions\n");
}
- if (!test_unpriv_remount(MS_STRICTATIME|MS_NODEV, MS_NODEV,
- MS_NOATIME|MS_NODEV))
+ if (!test_unpriv_remount("ramfs", NULL, MS_STRICTATIME, 0, MS_NOATIME))
{
die("Default atime malfunctions\n");
}
+ if (!test_priv_mount_unpriv_remount()) {
+ die("Mount flags unexpectedly changed after remount\n");
+ }
return EXIT_SUCCESS;
}
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
index c7493b8f9b0..62f22cc9941 100644
--- a/tools/testing/selftests/net/Makefile
+++ b/tools/testing/selftests/net/Makefile
@@ -14,12 +14,6 @@ all: $(NET_PROGS)
run_tests: all
@/bin/sh ./run_netsocktests || echo "sockettests: [FAIL]"
@/bin/sh ./run_afpackettests || echo "afpackettests: [FAIL]"
- @if /sbin/modprobe test_bpf ; then \
- /sbin/rmmod test_bpf; \
- echo "test_bpf: ok"; \
- else \
- echo "test_bpf: [FAIL]"; \
- exit 1; \
- fi
+ ./test_bpf.sh
clean:
$(RM) $(NET_PROGS)
diff --git a/tools/testing/selftests/net/test_bpf.sh b/tools/testing/selftests/net/test_bpf.sh
new file mode 100755
index 00000000000..8b29796d46a
--- /dev/null
+++ b/tools/testing/selftests/net/test_bpf.sh
@@ -0,0 +1,10 @@
+#!/bin/sh
+# Runs bpf test using test_bpf kernel module
+
+if /sbin/modprobe -q test_bpf ; then
+ /sbin/modprobe -q -r test_bpf;
+ echo "test_bpf: ok";
+else
+ echo "test_bpf: [FAIL]";
+ exit 1;
+fi
diff --git a/tools/testing/selftests/size/.gitignore b/tools/testing/selftests/size/.gitignore
new file mode 100644
index 00000000000..189b7818de3
--- /dev/null
+++ b/tools/testing/selftests/size/.gitignore
@@ -0,0 +1 @@
+get_size
diff --git a/tools/testing/selftests/size/Makefile b/tools/testing/selftests/size/Makefile
new file mode 100644
index 00000000000..04dc25e4fa9
--- /dev/null
+++ b/tools/testing/selftests/size/Makefile
@@ -0,0 +1,12 @@
+CC = $(CROSS_COMPILE)gcc
+
+all: get_size
+
+get_size: get_size.c
+ $(CC) -static -ffreestanding -nostartfiles -s $< -o $@
+
+run_tests: all
+ ./get_size
+
+clean:
+ $(RM) get_size
diff --git a/tools/testing/selftests/size/get_size.c b/tools/testing/selftests/size/get_size.c
new file mode 100644
index 00000000000..2d1af7cca46
--- /dev/null
+++ b/tools/testing/selftests/size/get_size.c
@@ -0,0 +1,100 @@
+/*
+ * Copyright 2014 Sony Mobile Communications Inc.
+ *
+ * Licensed under the terms of the GNU GPL License version 2
+ *
+ * Selftest for runtime system size
+ *
+ * Prints the amount of RAM that the currently running system is using.
+ *
+ * This program tries to be as small as possible itself, to
+ * avoid perturbing the system memory utilization with its
+ * own execution. It also attempts to have as few dependencies
+ * on kernel features as possible.
+ *
+ * It should be statically linked, with startup libs avoided.
+ * It uses no library calls, and only the following 3 syscalls:
+ * sysinfo(), write(), and _exit()
+ *
+ * For output, it avoids printf (which in some C libraries
+ * has large external dependencies) by implementing it's own
+ * number output and print routines, and using __builtin_strlen()
+ */
+
+#include <sys/sysinfo.h>
+#include <unistd.h>
+
+#define STDOUT_FILENO 1
+
+static int print(const char *s)
+{
+ return write(STDOUT_FILENO, s, __builtin_strlen(s));
+}
+
+static inline char *num_to_str(unsigned long num, char *buf, int len)
+{
+ unsigned int digit;
+
+ /* put digits in buffer from back to front */
+ buf += len - 1;
+ *buf = 0;
+ do {
+ digit = num % 10;
+ *(--buf) = digit + '0';
+ num /= 10;
+ } while (num > 0);
+
+ return buf;
+}
+
+static int print_num(unsigned long num)
+{
+ char num_buf[30];
+
+ return print(num_to_str(num, num_buf, sizeof(num_buf)));
+}
+
+static int print_k_value(const char *s, unsigned long num, unsigned long units)
+{
+ unsigned long long temp;
+ int ccode;
+
+ print(s);
+
+ temp = num;
+ temp = (temp * units)/1024;
+ num = temp;
+ ccode = print_num(num);
+ print("\n");
+ return ccode;
+}
+
+/* this program has no main(), as startup libraries are not used */
+void _start(void)
+{
+ int ccode;
+ struct sysinfo info;
+ unsigned long used;
+
+ print("Testing system size.\n");
+ print("1..1\n");
+
+ ccode = sysinfo(&info);
+ if (ccode < 0) {
+ print("not ok 1 get runtime memory use\n");
+ print("# could not get sysinfo\n");
+ _exit(ccode);
+ }
+ /* ignore cache complexities for now */
+ used = info.totalram - info.freeram - info.bufferram;
+ print_k_value("ok 1 get runtime memory use # size = ", used,
+ info.mem_unit);
+
+ print("# System runtime memory report (units in Kilobytes):\n");
+ print_k_value("# Total: ", info.totalram, info.mem_unit);
+ print_k_value("# Free: ", info.freeram, info.mem_unit);
+ print_k_value("# Buffer: ", info.bufferram, info.mem_unit);
+ print_k_value("# In use: ", used, info.mem_unit);
+
+ _exit(0);
+}
diff --git a/tools/testing/selftests/timers/posix_timers.c b/tools/testing/selftests/timers/posix_timers.c
index 41bd85559d4..f87d970a485 100644
--- a/tools/testing/selftests/timers/posix_timers.c
+++ b/tools/testing/selftests/timers/posix_timers.c
@@ -15,6 +15,8 @@
#include <time.h>
#include <pthread.h>
+#include "../kselftest.h"
+
#define DELAY 2
#define USECS_PER_SEC 1000000
@@ -194,16 +196,16 @@ int main(int argc, char **argv)
printf("based timers if other threads run on the CPU...\n");
if (check_itimer(ITIMER_VIRTUAL) < 0)
- return -1;
+ return ksft_exit_fail();
if (check_itimer(ITIMER_PROF) < 0)
- return -1;
+ return ksft_exit_fail();
if (check_itimer(ITIMER_REAL) < 0)
- return -1;
+ return ksft_exit_fail();
if (check_timer_create(CLOCK_THREAD_CPUTIME_ID) < 0)
- return -1;
+ return ksft_exit_fail();
/*
* It's unfortunately hard to reliably test a timer expiration
@@ -215,7 +217,7 @@ int main(int argc, char **argv)
* find a better solution.
*/
if (check_timer_create(CLOCK_PROCESS_CPUTIME_ID) < 0)
- return -1;
+ return ksft_exit_fail();
- return 0;
+ return ksft_exit_pass();
}
diff --git a/tools/testing/selftests/user/Makefile b/tools/testing/selftests/user/Makefile
index 396255bd720..12c9d15bab0 100644
--- a/tools/testing/selftests/user/Makefile
+++ b/tools/testing/selftests/user/Makefile
@@ -4,10 +4,4 @@
all:
run_tests: all
- @if /sbin/modprobe test_user_copy ; then \
- rmmod test_user_copy; \
- echo "user_copy: ok"; \
- else \
- echo "user_copy: [FAIL]"; \
- exit 1; \
- fi
+ ./test_user_copy.sh
diff --git a/tools/testing/selftests/user/test_user_copy.sh b/tools/testing/selftests/user/test_user_copy.sh
new file mode 100755
index 00000000000..350107f40c1
--- /dev/null
+++ b/tools/testing/selftests/user/test_user_copy.sh
@@ -0,0 +1,10 @@
+#!/bin/sh
+# Runs copy_to/from_user infrastructure using test_user_copy kernel module
+
+if /sbin/modprobe -q test_user_copy; then
+ /sbin/modprobe -q -r test_user_copy
+ echo "user_copy: ok"
+else
+ echo "user_copy: [FAIL]"
+ exit 1
+fi
diff --git a/tools/thermal/tmon/sysfs.c b/tools/thermal/tmon/sysfs.c
index dfe454855cd..1c12536f208 100644
--- a/tools/thermal/tmon/sysfs.c
+++ b/tools/thermal/tmon/sysfs.c
@@ -446,7 +446,7 @@ int probe_thermal_sysfs(void)
return -1;
}
- ptdata.tzi = calloc(sizeof(struct tz_info), ptdata.max_tz_instance+1);
+ ptdata.tzi = calloc(ptdata.max_tz_instance+1, sizeof(struct tz_info));
if (!ptdata.tzi) {
fprintf(stderr, "Err: allocate tz_info\n");
return -1;
@@ -454,8 +454,8 @@ int probe_thermal_sysfs(void)
/* we still show thermal zone information if there is no cdev */
if (ptdata.nr_cooling_dev) {
- ptdata.cdi = calloc(sizeof(struct cdev_info),
- ptdata.max_cdev_instance + 1);
+ ptdata.cdi = calloc(ptdata.max_cdev_instance + 1,
+ sizeof(struct cdev_info));
if (!ptdata.cdi) {
free(ptdata.tzi);
fprintf(stderr, "Err: allocate cdev_info\n");
diff --git a/tools/virtio/Makefile b/tools/virtio/Makefile
index 9325f469382..505ad51b3b5 100644
--- a/tools/virtio/Makefile
+++ b/tools/virtio/Makefile
@@ -3,7 +3,7 @@ test: virtio_test vringh_test
virtio_test: virtio_ring.o virtio_test.o
vringh_test: vringh_test.o vringh.o virtio_ring.o
-CFLAGS += -g -O2 -Wall -I. -I../include/ -I ../../usr/include/ -Wno-pointer-sign -fno-strict-overflow -fno-strict-aliasing -fno-common -MMD -U_FORTIFY_SOURCE
+CFLAGS += -g -O2 -Werror -Wall -I. -I../include/ -I ../../usr/include/ -Wno-pointer-sign -fno-strict-overflow -fno-strict-aliasing -fno-common -MMD -U_FORTIFY_SOURCE
vpath %.c ../../drivers/virtio ../../drivers/vhost
mod:
${MAKE} -C `pwd`/../.. M=`pwd`/vhost_test
diff --git a/tools/virtio/linux/virtio.h b/tools/virtio/linux/virtio.h
index 8eb6421761c..a3e07016a44 100644
--- a/tools/virtio/linux/virtio.h
+++ b/tools/virtio/linux/virtio.h
@@ -6,6 +6,7 @@
/* TODO: empty stubs for now. Broken but enough for virtio_ring.c */
#define list_add_tail(a, b) do {} while (0)
#define list_del(a) do {} while (0)
+#define list_for_each_entry(a, b, c) while (0)
/* end of stubs */
struct virtio_device {
diff --git a/tools/virtio/linux/virtio_byteorder.h b/tools/virtio/linux/virtio_byteorder.h
new file mode 100644
index 00000000000..9de9e6ac1d1
--- /dev/null
+++ b/tools/virtio/linux/virtio_byteorder.h
@@ -0,0 +1,8 @@
+#ifndef _LINUX_VIRTIO_BYTEORDER_STUB_H
+#define _LINUX_VIRTIO_BYTEORDER_STUB_H
+
+#include <asm/byteorder.h>
+#include "../../include/linux/byteorder/generic.h"
+#include "../../include/linux/virtio_byteorder.h"
+
+#endif
diff --git a/tools/virtio/linux/virtio_config.h b/tools/virtio/linux/virtio_config.h
index 83b27e8e9d7..806d683ab10 100644
--- a/tools/virtio/linux/virtio_config.h
+++ b/tools/virtio/linux/virtio_config.h
@@ -1,6 +1,72 @@
-#define VIRTIO_TRANSPORT_F_START 28
-#define VIRTIO_TRANSPORT_F_END 32
+#include <linux/virtio_byteorder.h>
+#include <linux/virtio.h>
+#include <uapi/linux/virtio_config.h>
+
+/*
+ * __virtio_test_bit - helper to test feature bits. For use by transports.
+ * Devices should normally use virtio_has_feature,
+ * which includes more checks.
+ * @vdev: the device
+ * @fbit: the feature bit
+ */
+static inline bool __virtio_test_bit(const struct virtio_device *vdev,
+ unsigned int fbit)
+{
+ return vdev->features & (1ULL << fbit);
+}
+
+/**
+ * __virtio_set_bit - helper to set feature bits. For use by transports.
+ * @vdev: the device
+ * @fbit: the feature bit
+ */
+static inline void __virtio_set_bit(struct virtio_device *vdev,
+ unsigned int fbit)
+{
+ vdev->features |= (1ULL << fbit);
+}
+
+/**
+ * __virtio_clear_bit - helper to clear feature bits. For use by transports.
+ * @vdev: the device
+ * @fbit: the feature bit
+ */
+static inline void __virtio_clear_bit(struct virtio_device *vdev,
+ unsigned int fbit)
+{
+ vdev->features &= ~(1ULL << fbit);
+}
#define virtio_has_feature(dev, feature) \
(__virtio_test_bit((dev), feature))
+static inline u16 virtio16_to_cpu(struct virtio_device *vdev, __virtio16 val)
+{
+ return __virtio16_to_cpu(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val);
+}
+
+static inline __virtio16 cpu_to_virtio16(struct virtio_device *vdev, u16 val)
+{
+ return __cpu_to_virtio16(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val);
+}
+
+static inline u32 virtio32_to_cpu(struct virtio_device *vdev, __virtio32 val)
+{
+ return __virtio32_to_cpu(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val);
+}
+
+static inline __virtio32 cpu_to_virtio32(struct virtio_device *vdev, u32 val)
+{
+ return __cpu_to_virtio32(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val);
+}
+
+static inline u64 virtio64_to_cpu(struct virtio_device *vdev, __virtio64 val)
+{
+ return __virtio64_to_cpu(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val);
+}
+
+static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val)
+{
+ return __cpu_to_virtio64(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val);
+}
+
diff --git a/tools/virtio/uapi/linux/virtio_types.h b/tools/virtio/uapi/linux/virtio_types.h
new file mode 100644
index 00000000000..e7a1096e7c9
--- /dev/null
+++ b/tools/virtio/uapi/linux/virtio_types.h
@@ -0,0 +1 @@
+#include "../../include/uapi/linux/virtio_types.h"
diff --git a/tools/virtio/virtio_test.c b/tools/virtio/virtio_test.c
index db3437c641a..e0445898f08 100644
--- a/tools/virtio/virtio_test.c
+++ b/tools/virtio/virtio_test.c
@@ -11,6 +11,7 @@
#include <sys/types.h>
#include <fcntl.h>
#include <stdbool.h>
+#include <linux/virtio_types.h>
#include <linux/vhost.h>
#include <linux/virtio.h>
#include <linux/virtio_ring.h>
@@ -227,6 +228,14 @@ const struct option longopts[] = {
.val = 'i',
},
{
+ .name = "virtio-1",
+ .val = '1',
+ },
+ {
+ .name = "no-virtio-1",
+ .val = '0',
+ },
+ {
.name = "delayed-interrupt",
.val = 'D',
},
@@ -243,6 +252,7 @@ static void help(void)
fprintf(stderr, "Usage: virtio_test [--help]"
" [--no-indirect]"
" [--no-event-idx]"
+ " [--no-virtio-1]"
" [--delayed-interrupt]"
"\n");
}
@@ -251,7 +261,7 @@ int main(int argc, char **argv)
{
struct vdev_info dev;
unsigned long long features = (1ULL << VIRTIO_RING_F_INDIRECT_DESC) |
- (1ULL << VIRTIO_RING_F_EVENT_IDX);
+ (1ULL << VIRTIO_RING_F_EVENT_IDX) | (1ULL << VIRTIO_F_VERSION_1);
int o;
bool delayed = false;
@@ -272,6 +282,9 @@ int main(int argc, char **argv)
case 'i':
features &= ~(1ULL << VIRTIO_RING_F_INDIRECT_DESC);
break;
+ case '0':
+ features &= ~(1ULL << VIRTIO_F_VERSION_1);
+ break;
case 'D':
delayed = true;
break;
diff --git a/tools/virtio/vringh_test.c b/tools/virtio/vringh_test.c
index 9d4b1bca54b..5f94f510567 100644
--- a/tools/virtio/vringh_test.c
+++ b/tools/virtio/vringh_test.c
@@ -7,6 +7,7 @@
#include <linux/virtio.h>
#include <linux/vringh.h>
#include <linux/virtio_ring.h>
+#include <linux/virtio_config.h>
#include <linux/uaccess.h>
#include <sys/types.h>
#include <sys/stat.h>
@@ -131,7 +132,7 @@ static inline int vringh_get_head(struct vringh *vrh, u16 *head)
return 1;
}
-static int parallel_test(unsigned long features,
+static int parallel_test(u64 features,
bool (*getrange)(struct vringh *vrh,
u64 addr, struct vringh_range *r),
bool fast_vringh)
@@ -456,6 +457,8 @@ int main(int argc, char *argv[])
__virtio_set_bit(&vdev, VIRTIO_RING_F_INDIRECT_DESC);
else if (strcmp(argv[1], "--eventidx") == 0)
__virtio_set_bit(&vdev, VIRTIO_RING_F_EVENT_IDX);
+ else if (strcmp(argv[1], "--virtio-1") == 0)
+ __virtio_set_bit(&vdev, VIRTIO_F_VERSION_1);
else if (strcmp(argv[1], "--slow-range") == 0)
getrange = getrange_slow;
else if (strcmp(argv[1], "--fast-vringh") == 0)
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index 22fa819a9b6..1c0772b340d 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -61,12 +61,14 @@ static void timer_disarm(struct arch_timer_cpu *timer)
static void kvm_timer_inject_irq(struct kvm_vcpu *vcpu)
{
+ int ret;
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
timer->cntv_ctl |= ARCH_TIMER_CTRL_IT_MASK;
- kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
- timer->irq->irq,
- timer->irq->level);
+ ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
+ timer->irq->irq,
+ timer->irq->level);
+ WARN_ON(ret);
}
static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
@@ -307,12 +309,24 @@ void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu)
timer_disarm(timer);
}
-int kvm_timer_init(struct kvm *kvm)
+void kvm_timer_enable(struct kvm *kvm)
{
- if (timecounter && wqueue) {
- kvm->arch.timer.cntvoff = kvm_phys_timer_read();
+ if (kvm->arch.timer.enabled)
+ return;
+
+ /*
+ * There is a potential race here between VCPUs starting for the first
+ * time, which may be enabling the timer multiple times. That doesn't
+ * hurt though, because we're just setting a variable to the same
+ * variable that it already was. The important thing is that all
+ * VCPUs have the enabled variable set, before entering the guest, if
+ * the arch timers are enabled.
+ */
+ if (timecounter && wqueue)
kvm->arch.timer.enabled = 1;
- }
+}
- return 0;
+void kvm_timer_init(struct kvm *kvm)
+{
+ kvm->arch.timer.cntvoff = kvm_phys_timer_read();
}
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index aacdb59f30d..03affc7bf45 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -91,6 +91,7 @@
#define ACCESS_WRITE_VALUE (3 << 1)
#define ACCESS_WRITE_MASK(x) ((x) & (3 << 1))
+static int vgic_init(struct kvm *kvm);
static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu);
static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu);
static void vgic_update_state(struct kvm *kvm);
@@ -1607,7 +1608,7 @@ static int vgic_validate_injection(struct kvm_vcpu *vcpu, int irq, int level)
}
}
-static bool vgic_update_irq_pending(struct kvm *kvm, int cpuid,
+static int vgic_update_irq_pending(struct kvm *kvm, int cpuid,
unsigned int irq_num, bool level)
{
struct vgic_dist *dist = &kvm->arch.vgic;
@@ -1643,9 +1644,10 @@ static bool vgic_update_irq_pending(struct kvm *kvm, int cpuid,
vgic_dist_irq_clear_level(vcpu, irq_num);
if (!vgic_dist_irq_soft_pend(vcpu, irq_num))
vgic_dist_irq_clear_pending(vcpu, irq_num);
- } else {
- vgic_dist_irq_clear_pending(vcpu, irq_num);
}
+
+ ret = false;
+ goto out;
}
enabled = vgic_irq_is_enabled(vcpu, irq_num);
@@ -1672,7 +1674,7 @@ static bool vgic_update_irq_pending(struct kvm *kvm, int cpuid,
out:
spin_unlock(&dist->lock);
- return ret;
+ return ret ? cpuid : -EINVAL;
}
/**
@@ -1692,11 +1694,26 @@ out:
int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
bool level)
{
- if (likely(vgic_initialized(kvm)) &&
- vgic_update_irq_pending(kvm, cpuid, irq_num, level))
- vgic_kick_vcpus(kvm);
+ int ret = 0;
+ int vcpu_id;
- return 0;
+ if (unlikely(!vgic_initialized(kvm))) {
+ mutex_lock(&kvm->lock);
+ ret = vgic_init(kvm);
+ mutex_unlock(&kvm->lock);
+
+ if (ret)
+ goto out;
+ }
+
+ vcpu_id = vgic_update_irq_pending(kvm, cpuid, irq_num, level);
+ if (vcpu_id >= 0) {
+ /* kick the specified vcpu */
+ kvm_vcpu_kick(kvm_get_vcpu(kvm, vcpu_id));
+ }
+
+out:
+ return ret;
}
static irqreturn_t vgic_maintenance_handler(int irq, void *data)
@@ -1726,39 +1743,14 @@ static int vgic_vcpu_init_maps(struct kvm_vcpu *vcpu, int nr_irqs)
int sz = (nr_irqs - VGIC_NR_PRIVATE_IRQS) / 8;
vgic_cpu->pending_shared = kzalloc(sz, GFP_KERNEL);
- vgic_cpu->vgic_irq_lr_map = kzalloc(nr_irqs, GFP_KERNEL);
+ vgic_cpu->vgic_irq_lr_map = kmalloc(nr_irqs, GFP_KERNEL);
if (!vgic_cpu->pending_shared || !vgic_cpu->vgic_irq_lr_map) {
kvm_vgic_vcpu_destroy(vcpu);
return -ENOMEM;
}
- return 0;
-}
-
-/**
- * kvm_vgic_vcpu_init - Initialize per-vcpu VGIC state
- * @vcpu: pointer to the vcpu struct
- *
- * Initialize the vgic_cpu struct and vgic_dist struct fields pertaining to
- * this vcpu and enable the VGIC for this VCPU
- */
-static void kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
-{
- struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
- int i;
-
- for (i = 0; i < dist->nr_irqs; i++) {
- if (i < VGIC_NR_PPIS)
- vgic_bitmap_set_irq_val(&dist->irq_enabled,
- vcpu->vcpu_id, i, 1);
- if (i < VGIC_NR_PRIVATE_IRQS)
- vgic_bitmap_set_irq_val(&dist->irq_cfg,
- vcpu->vcpu_id, i, VGIC_CFG_EDGE);
-
- vgic_cpu->vgic_irq_lr_map[i] = LR_EMPTY;
- }
+ memset(vgic_cpu->vgic_irq_lr_map, LR_EMPTY, nr_irqs);
/*
* Store the number of LRs per vcpu, so we don't have to go
@@ -1767,7 +1759,7 @@ static void kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
*/
vgic_cpu->nr_lr = vgic->nr_lr;
- vgic_enable(vcpu);
+ return 0;
}
void kvm_vgic_destroy(struct kvm *kvm)
@@ -1798,20 +1790,21 @@ void kvm_vgic_destroy(struct kvm *kvm)
dist->irq_spi_cpu = NULL;
dist->irq_spi_target = NULL;
dist->irq_pending_on_cpu = NULL;
+ dist->nr_cpus = 0;
}
/*
* Allocate and initialize the various data structures. Must be called
* with kvm->lock held!
*/
-static int vgic_init_maps(struct kvm *kvm)
+static int vgic_init(struct kvm *kvm)
{
struct vgic_dist *dist = &kvm->arch.vgic;
struct kvm_vcpu *vcpu;
int nr_cpus, nr_irqs;
- int ret, i;
+ int ret, i, vcpu_id;
- if (dist->nr_cpus) /* Already allocated */
+ if (vgic_initialized(kvm))
return 0;
nr_cpus = dist->nr_cpus = atomic_read(&kvm->online_vcpus);
@@ -1859,16 +1852,28 @@ static int vgic_init_maps(struct kvm *kvm)
if (ret)
goto out;
- kvm_for_each_vcpu(i, vcpu, kvm) {
+ for (i = VGIC_NR_PRIVATE_IRQS; i < dist->nr_irqs; i += 4)
+ vgic_set_target_reg(kvm, 0, i);
+
+ kvm_for_each_vcpu(vcpu_id, vcpu, kvm) {
ret = vgic_vcpu_init_maps(vcpu, nr_irqs);
if (ret) {
kvm_err("VGIC: Failed to allocate vcpu memory\n");
break;
}
- }
- for (i = VGIC_NR_PRIVATE_IRQS; i < dist->nr_irqs; i += 4)
- vgic_set_target_reg(kvm, 0, i);
+ for (i = 0; i < dist->nr_irqs; i++) {
+ if (i < VGIC_NR_PPIS)
+ vgic_bitmap_set_irq_val(&dist->irq_enabled,
+ vcpu->vcpu_id, i, 1);
+ if (i < VGIC_NR_PRIVATE_IRQS)
+ vgic_bitmap_set_irq_val(&dist->irq_cfg,
+ vcpu->vcpu_id, i,
+ VGIC_CFG_EDGE);
+ }
+
+ vgic_enable(vcpu);
+ }
out:
if (ret)
@@ -1878,25 +1883,23 @@ out:
}
/**
- * kvm_vgic_init - Initialize global VGIC state before running any VCPUs
+ * kvm_vgic_map_resources - Configure global VGIC state before running any VCPUs
* @kvm: pointer to the kvm struct
*
* Map the virtual CPU interface into the VM before running any VCPUs. We
* can't do this at creation time, because user space must first set the
- * virtual CPU interface address in the guest physical address space. Also
- * initialize the ITARGETSRn regs to 0 on the emulated distributor.
+ * virtual CPU interface address in the guest physical address space.
*/
-int kvm_vgic_init(struct kvm *kvm)
+int kvm_vgic_map_resources(struct kvm *kvm)
{
- struct kvm_vcpu *vcpu;
- int ret = 0, i;
+ int ret = 0;
if (!irqchip_in_kernel(kvm))
return 0;
mutex_lock(&kvm->lock);
- if (vgic_initialized(kvm))
+ if (vgic_ready(kvm))
goto out;
if (IS_VGIC_ADDR_UNDEF(kvm->arch.vgic.vgic_dist_base) ||
@@ -1906,7 +1909,11 @@ int kvm_vgic_init(struct kvm *kvm)
goto out;
}
- ret = vgic_init_maps(kvm);
+ /*
+ * Initialize the vgic if this hasn't already been done on demand by
+ * accessing the vgic state from userspace.
+ */
+ ret = vgic_init(kvm);
if (ret) {
kvm_err("Unable to allocate maps\n");
goto out;
@@ -1920,9 +1927,6 @@ int kvm_vgic_init(struct kvm *kvm)
goto out;
}
- kvm_for_each_vcpu(i, vcpu, kvm)
- kvm_vgic_vcpu_init(vcpu);
-
kvm->arch.vgic.ready = true;
out:
if (ret)
@@ -2167,7 +2171,7 @@ static int vgic_attr_regs_access(struct kvm_device *dev,
mutex_lock(&dev->kvm->lock);
- ret = vgic_init_maps(dev->kvm);
+ ret = vgic_init(dev->kvm);
if (ret)
goto out;
@@ -2289,7 +2293,7 @@ static int vgic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
mutex_lock(&dev->kvm->lock);
- if (vgic_initialized(dev->kvm) || dev->kvm->arch.vgic.nr_irqs)
+ if (vgic_ready(dev->kvm) || dev->kvm->arch.vgic.nr_irqs)
ret = -EBUSY;
else
dev->kvm->arch.vgic.nr_irqs = val;
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index b0fb390943c..148b2392c76 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -36,9 +36,6 @@
#include <linux/seqlock.h>
#include <trace/events/kvm.h>
-#ifdef __KVM_HAVE_IOAPIC
-#include "ioapic.h"
-#endif
#include "iodev.h"
#ifdef CONFIG_HAVE_KVM_IRQFD
@@ -492,9 +489,7 @@ void kvm_register_irq_ack_notifier(struct kvm *kvm,
mutex_lock(&kvm->irq_lock);
hlist_add_head_rcu(&kian->link, &kvm->irq_ack_notifier_list);
mutex_unlock(&kvm->irq_lock);
-#ifdef __KVM_HAVE_IOAPIC
kvm_vcpu_request_scan_ioapic(kvm);
-#endif
}
void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
@@ -504,9 +499,7 @@ void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
hlist_del_init_rcu(&kian->link);
mutex_unlock(&kvm->irq_lock);
synchronize_srcu(&kvm->irq_srcu);
-#ifdef __KVM_HAVE_IOAPIC
kvm_vcpu_request_scan_ioapic(kvm);
-#endif
}
#endif
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 3cee7b16705..f5283438ee0 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -124,15 +124,6 @@ int vcpu_load(struct kvm_vcpu *vcpu)
if (mutex_lock_killable(&vcpu->mutex))
return -EINTR;
- if (unlikely(vcpu->pid != current->pids[PIDTYPE_PID].pid)) {
- /* The thread running this VCPU changed. */
- struct pid *oldpid = vcpu->pid;
- struct pid *newpid = get_task_pid(current, PIDTYPE_PID);
- rcu_assign_pointer(vcpu->pid, newpid);
- if (oldpid)
- synchronize_rcu();
- put_pid(oldpid);
- }
cpu = get_cpu();
preempt_notifier_register(&vcpu->preempt_notifier);
kvm_arch_vcpu_load(vcpu, cpu);
@@ -468,9 +459,6 @@ static struct kvm *kvm_create_vm(unsigned long type)
if (r)
goto out_err_no_disable;
-#ifdef CONFIG_HAVE_KVM_IRQCHIP
- INIT_HLIST_HEAD(&kvm->mask_notifier_list);
-#endif
#ifdef CONFIG_HAVE_KVM_IRQFD
INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list);
#endif
@@ -668,48 +656,46 @@ static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot)
return 0;
}
-static int cmp_memslot(const void *slot1, const void *slot2)
-{
- struct kvm_memory_slot *s1, *s2;
-
- s1 = (struct kvm_memory_slot *)slot1;
- s2 = (struct kvm_memory_slot *)slot2;
-
- if (s1->npages < s2->npages)
- return 1;
- if (s1->npages > s2->npages)
- return -1;
-
- return 0;
-}
-
/*
- * Sort the memslots base on its size, so the larger slots
- * will get better fit.
+ * Insert memslot and re-sort memslots based on their GFN,
+ * so binary search could be used to lookup GFN.
+ * Sorting algorithm takes advantage of having initially
+ * sorted array and known changed memslot position.
*/
-static void sort_memslots(struct kvm_memslots *slots)
-{
- int i;
-
- sort(slots->memslots, KVM_MEM_SLOTS_NUM,
- sizeof(struct kvm_memory_slot), cmp_memslot, NULL);
-
- for (i = 0; i < KVM_MEM_SLOTS_NUM; i++)
- slots->id_to_index[slots->memslots[i].id] = i;
-}
-
static void update_memslots(struct kvm_memslots *slots,
struct kvm_memory_slot *new)
{
- if (new) {
- int id = new->id;
- struct kvm_memory_slot *old = id_to_memslot(slots, id);
- unsigned long npages = old->npages;
+ int id = new->id;
+ int i = slots->id_to_index[id];
+ struct kvm_memory_slot *mslots = slots->memslots;
- *old = *new;
- if (new->npages != npages)
- sort_memslots(slots);
+ WARN_ON(mslots[i].id != id);
+ if (!new->npages) {
+ new->base_gfn = 0;
+ if (mslots[i].npages)
+ slots->used_slots--;
+ } else {
+ if (!mslots[i].npages)
+ slots->used_slots++;
}
+
+ while (i < KVM_MEM_SLOTS_NUM - 1 &&
+ new->base_gfn <= mslots[i + 1].base_gfn) {
+ if (!mslots[i + 1].npages)
+ break;
+ mslots[i] = mslots[i + 1];
+ slots->id_to_index[mslots[i].id] = i;
+ i++;
+ }
+ while (i > 0 &&
+ new->base_gfn > mslots[i - 1].base_gfn) {
+ mslots[i] = mslots[i - 1];
+ slots->id_to_index[mslots[i].id] = i;
+ i--;
+ }
+
+ mslots[i] = *new;
+ slots->id_to_index[mslots[i].id] = i;
}
static int check_memory_region_flags(struct kvm_userspace_memory_region *mem)
@@ -727,7 +713,7 @@ static int check_memory_region_flags(struct kvm_userspace_memory_region *mem)
}
static struct kvm_memslots *install_new_memslots(struct kvm *kvm,
- struct kvm_memslots *slots, struct kvm_memory_slot *new)
+ struct kvm_memslots *slots)
{
struct kvm_memslots *old_memslots = kvm->memslots;
@@ -738,7 +724,6 @@ static struct kvm_memslots *install_new_memslots(struct kvm *kvm,
WARN_ON(old_memslots->generation & 1);
slots->generation = old_memslots->generation + 1;
- update_memslots(slots, new);
rcu_assign_pointer(kvm->memslots, slots);
synchronize_srcu_expedited(&kvm->srcu);
@@ -760,7 +745,7 @@ static struct kvm_memslots *install_new_memslots(struct kvm *kvm,
*
* Discontiguous memory is allowed, mostly for framebuffers.
*
- * Must be called holding mmap_sem for write.
+ * Must be called holding kvm->slots_lock for write.
*/
int __kvm_set_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem)
@@ -866,15 +851,16 @@ int __kvm_set_memory_region(struct kvm *kvm,
goto out_free;
}
+ slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots),
+ GFP_KERNEL);
+ if (!slots)
+ goto out_free;
+
if ((change == KVM_MR_DELETE) || (change == KVM_MR_MOVE)) {
- slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots),
- GFP_KERNEL);
- if (!slots)
- goto out_free;
slot = id_to_memslot(slots, mem->slot);
slot->flags |= KVM_MEMSLOT_INVALID;
- old_memslots = install_new_memslots(kvm, slots, NULL);
+ old_memslots = install_new_memslots(kvm, slots);
/* slot was deleted or moved, clear iommu mapping */
kvm_iommu_unmap_pages(kvm, &old);
@@ -886,6 +872,12 @@ int __kvm_set_memory_region(struct kvm *kvm,
* - kvm_is_visible_gfn (mmu_check_roots)
*/
kvm_arch_flush_shadow_memslot(kvm, slot);
+
+ /*
+ * We can re-use the old_memslots from above, the only difference
+ * from the currently installed memslots is the invalid flag. This
+ * will get overwritten by update_memslots anyway.
+ */
slots = old_memslots;
}
@@ -893,26 +885,14 @@ int __kvm_set_memory_region(struct kvm *kvm,
if (r)
goto out_slots;
- r = -ENOMEM;
- /*
- * We can re-use the old_memslots from above, the only difference
- * from the currently installed memslots is the invalid flag. This
- * will get overwritten by update_memslots anyway.
- */
- if (!slots) {
- slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots),
- GFP_KERNEL);
- if (!slots)
- goto out_free;
- }
-
/* actual memory is freed via old in kvm_free_physmem_slot below */
if (change == KVM_MR_DELETE) {
new.dirty_bitmap = NULL;
memset(&new.arch, 0, sizeof(new.arch));
}
- old_memslots = install_new_memslots(kvm, slots, &new);
+ update_memslots(slots, &new);
+ old_memslots = install_new_memslots(kvm, slots);
kvm_arch_commit_memory_region(kvm, mem, &old, change);
@@ -1799,10 +1779,6 @@ int kvm_vcpu_yield_to(struct kvm_vcpu *target)
rcu_read_unlock();
if (!task)
return ret;
- if (task->flags & PF_VCPU) {
- put_task_struct(task);
- return ret;
- }
ret = yield_to(task, 1);
put_task_struct(task);
@@ -2065,6 +2041,15 @@ static long kvm_vcpu_ioctl(struct file *filp,
r = -EINVAL;
if (arg)
goto out;
+ if (unlikely(vcpu->pid != current->pids[PIDTYPE_PID].pid)) {
+ /* The thread running this VCPU changed. */
+ struct pid *oldpid = vcpu->pid;
+ struct pid *newpid = get_task_pid(current, PIDTYPE_PID);
+ rcu_assign_pointer(vcpu->pid, newpid);
+ if (oldpid)
+ synchronize_rcu();
+ put_pid(oldpid);
+ }
r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run);
trace_kvm_userspace_exit(vcpu->run->exit_reason, r);
break;
@@ -2599,8 +2584,6 @@ static long kvm_vm_ioctl(struct file *filp,
break;
default:
r = kvm_arch_vm_ioctl(filp, ioctl, arg);
- if (r == -ENOTTY)
- r = kvm_vm_ioctl_assigned_device(kvm, ioctl, arg);
}
out:
return r;